diff --git a/.gitignore b/.gitignore index e69de29b..b8d28fe0 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1 @@ +bins/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b4c1f06..cf9ed1f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,8 +17,6 @@ This version supports OneView appliances with version 4.2 using the OneView REST - Support for Update Group in Logical Enclosure. - Support for Update Compliance in Logical Interconnect. -- Usecase for synergy infrastructure provisioning with Network. -- Usecase for synergy infrastructure provisioning with i3s. ### Oneview Features supported - Enclosure @@ -49,11 +47,11 @@ This version supports OneView appliances with version 4.2 using the OneView REST ### Notes - Patch changes to sever profile to include boot order fixes. - Usecase for infrasructure provisioning without storage and image streamer - + # [v1.0.0] ### Notes - This is the first release of the OneView modules in Terraform and it adds support to core features listed below. - This version of the module supports OneView appliances with version 4.10, using the OneView REST API version 800. + This is the first release of the OneView modules in Terraform and it adds support to core features listed below. + This version of the module supports OneView appliances with version 4.10, using the OneView REST API version 800. ### Major changes: - Support for Go 1.11 diff --git a/Dockerfile b/Dockerfile index d159e452..ca44a68c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,28 +1,7 @@ -FROM golang:alpine -MAINTAINER "Priyanka Sood " +FROM alpine -ENV TERRAFORM_VERSION=0.11.10 +# Add the recently compiled Go binaries +ADD bins/linux/ /usr/local/terraform/ -ENV USER root -RUN mkdir -p /usr/local/terraform -RUN apk update && \ - apk add curl jq python bash ca-certificates git openssl unzip wget && \ - cd /tmp && \ - wget https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip && \ -unzip terraform_${TERRAFORM_VERSION}_linux_amd64.zip -d /usr/local/terraform/ - -ENV GOROOT /usr/local/go -ENV GOPATH /go -ENV PATH /go/bin:$PATH - -RUN mkdir -p ${GOPATH}/src ${GOPATH}/bin -WORKDIR /go/src/github.com/HewlettPackard/terraform-provider-oneview - -COPY . /go/src/github.com/HewlettPackard/terraform-provider-oneview - -RUN cd $GOPATH/src/github.com/HewlettPackard/terraform-provider-oneview -RUN go build -o $GOPATH/bin/terraform-provider-oneview -RUN mv $GOPATH/bin/terraform-provider-oneview /usr/local/terraform/ - -RUN go get github.com/kardianos/govendor +# Add the folder above to the path ENV PATH $PATH:/usr/local/terraform/ diff --git a/README.md b/README.md index 270f8c22..1f549f6f 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ A Terraform provider for oneview ## Installing `terraform-provider-oneview` with Go * Install Go 1.11. For previous versions, you may have to set your `$GOPATH` manually, if you haven't done it yet. -* Install Terraform 0.11.x [from here](https://www.terraform.io/downloads.html) and save it into `/usr/local/bin/terraform` folder (create it if it doesn't exists). This provider DOES NOT SUPPORT Terraform 0.12 or above. +* Install Terraform 0.11.x or above [from here](https://www.terraform.io/downloads.html) and save it into `/usr/local/bin/terraform` folder (create it if it doesn't exists) * Download the code by issuing a `go get` command. ```bash @@ -24,18 +24,18 @@ $ mv $GOPATH/bin/terraform-provider-oneview /usr/local/bin/terraform ## Using `terraform-provider-oneview` with Docker -We also provide a lightweight and easy way to test and execute `terraform-provider-oneview`. The `prisood/terraform-provider-oneview` docker image contains +We also provide a lightweight and easy way to test and execute `terraform-provider-oneview`. The `dciacoe/terraform-provider-oneview` docker image contains an installation of Terraform and our provider you can use by just pulling down the Docker Image: ```bash # Download and store a local copy of terraform-provider-oneview and # use it as a Docker image. -$ docker pull prisood/terraform-provider-oneview - +$ docker pull dciacoe/terraform-provider-oneview + # Run docker commands using the "ash" shell from Alpine, this will create # a sh session where you can create files, issue commands and execute both # terraform and the provider with ease. -$ docker run -it prisood/terraform-provider-oneview /bin/sh +$ docker run -it dciacoe/terraform-provider-oneview /bin/sh ``` @@ -51,7 +51,6 @@ provider "oneview" { ov_endpoint = "" ov_sslverify = true/false ov_apiversion = - ov_domain = "" ov_ifmatch = "*" } ``` diff --git a/docs/d/server_profile.html.markdown b/docs/d/server_profile.html.markdown index d73862c1..6e3336ae 100644 --- a/docs/d/server_profile.html.markdown +++ b/docs/d/server_profile.html.markdown @@ -55,33 +55,3 @@ In addition to the arguments listed above, the following computed attributes are * `ilo_ip` - The ILO ip address that is managing the server. * `hardware_uri` - The URI of the hardware the server is provisioned on. - -* `boot_order` - Defines the order in which boot will be attempted on the available devices. - -* `boot_mode` - Boot mode settings to be configured on the server. - -* `bios_option` - Server BIOS settings. - -* `server_hardware_type` - Identifies the server hardware type for which the Server Profile was designed. - -* `enclosure_group` - Identifies the enclosure group for which the Server Profile was designed. - -* `affinity` - This identifies the behavior of the server profile when the server hardware is removed or replaced. - -* `hide_unused_flex_nics` - This setting controls the enumeration of physical functions that do not correspond to connections in a profile. - -* `serial_number_type` - Specifies the type of Serial Number and UUID to be programmed into the server ROM. - -* `wwn_type` - Specifies the type of WWN address to be programmed into the IO devices. - -* `mac_type` - Specifies the type of MAC address to be programmed into the IO Devices. - -* `firmware` - Defines and enables firmware baseline management. - -* `local_storage` - Local storage settings to be configured on the server. - -* `logical_drives` - The list of logical drives associated with the controller. - -* `san_storage` - The profile san storage configuration. - -* `volume_attachments` - The list of storage volume attachments. \ No newline at end of file diff --git a/docs/index.html.markdown b/docs/index.html.markdown index f24c64e2..1687d22f 100644 --- a/docs/index.html.markdown +++ b/docs/index.html.markdown @@ -3,13 +3,13 @@ layout: "oneview" page_title: "Provider: OneView" sidebar_current: "docs-oneview-index" description: |- - The Oneview provider is used to interact with your on premise OneView system. The provider needs to be configured with the proper credentials before it can be used. + The Oneview provider is used to interact with your on premise OneView system. The provider needs to be configured with the proper credentials before it can be used. --- -# Oneview Provider +# Oneview Provider - The Oneview provider is used to interact with [OneView](https://www.hpe.com/us/en/integrated-systems/software.html). - The provider needs to be configured with the proper credentials before it can be used. + The Oneview provider is used to interact with [OneView](https://www.hpe.com/us/en/integrated-systems/software.html). + The provider needs to be configured with the proper credentials before it can be used. ## Example Usage ```js @@ -19,9 +19,7 @@ provider "oneview" { ov_password = "password123" ov_endpoint = oneview_url.com ov_sslverify = true - ov_apiversion = 800 - ov_domain = "Local" - ov_ifmatch = "*" + ov_apiversion = 200 } //Create a new ethernet network @@ -37,16 +35,16 @@ The Oneview provider supports static credentials and environment variables. The following keys can be used to configure the provider. -* `ov_username` - (Optional) This is the OneView username. +* `ov_username` - (Optional) This is the OneView username. It must be provided or sourced from ONEVIEW_OV_USER environment variable. -* `ov_password` - (Optional) This is the OneView password. +* `ov_password` - (Optional) This is the OneView password. It must be provided or sourced from ONEVIEW_OV_PASSWORD environment variable. - + * `ov_endpoint` - (Optional) This is the OneView URL. It must be provided or sourced from ONEVIEW_OV_ENDPOINT environment variable. -* `ov_sslverify` - (Optional) This is a boolean value for whether ssl is enabled. +* `ov_sslverify` - (Optional) This is a boolean value for whether ssl is enabled. It must be provided or sourced from ONEVIEW_OV_SSLVERIFY environment variable. * `ov_apiversion` - (Optional) This specifies what API version to use. @@ -54,9 +52,7 @@ The following keys can be used to configure the provider. * `ov_domain` - (Optional) This is the domain to use for the oneview system. It can be provided or sourced from ONEVIEW_OV_DOMAIN environment variable. - -* `ov_ifmatch` - (Optional) This is the if-Match(ETag) request header to use for the oneview system. - It can be provided or sourced from ONEVIEW_OV_IF_MATCH environment variable. - + + * `i3s_endpoint` - (Optional) This is the endpoint to connect to an image streamer instance It can be provided or sourced from ONEVIEW_I3S_ENDPOINT environment variable. diff --git a/docs/r/fcoe_network.html.markdown b/docs/r/fcoe_network.html.markdown index 4bbea6eb..acd0c477 100644 --- a/docs/r/fcoe_network.html.markdown +++ b/docs/r/fcoe_network.html.markdown @@ -21,23 +21,13 @@ resource "oneview_fcoe_network" "default" { ## Argument Reference -The following arguments are supported: +The following arguments are supported: * `name` - (Required) A unique name for the resource. -* `vlanId` - (Required) The Virtual LAN (VLAN) identification number (integer) assigned to the network. +* `vlanId` - (Required) The Virtual LAN (VLAN) identification number (integer) assigned to the network. Changing this forces a new resource -* `initial_scope_uris` - (Optional) A list of URIs of the scopes to which the resource shall be initially assigned. - -* `connection_template_uri` - The connection template URI that is associated with this Fibre Channel network. - -* `managed_san_uri` - The managed SAN URI that is associated with this Fibre Channel network. - -* `fabric_uri` - The URI of the fabric resource of which this resource is a member. - -* `description` - Brief description of the resource. - ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: diff --git a/docs/r/icsp_server.html.markdown b/docs/r/icsp_server.html.markdown new file mode 100644 index 00000000..cbd3a3e5 --- /dev/null +++ b/docs/r/icsp_server.html.markdown @@ -0,0 +1,65 @@ +--- +layout: "oneview" +page_title: "Oneview: icsp_server" +sidebar_current: "docs-oneview-icsp-server" +description: |- + Hooks a server profile created in OneView into ICSP. +--- + +# oneview\_icsp\_server + +Hooks a server profile created in OneView into ICSP. + +## Example Usage + +```js +resource "icsp_server" "default" { + ilo_ip = "15.x.x.x" + user_name = "ilo_user" + password = "ilo_password" + serial_number = "${oneview_server_profile.default.serial_number}" + build_plans = ["/rest/os-deployment-build-plans/1570001"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `ilo_ip` - (Required) The IP address of the iLO of the server. + +* `user_name` - (Required) The user name required to log into the server's iLO. + +* `password` - (Required) The password required to log into the server's iLO. + +* `serial_number` - (Required) The serial number assigned to the Server. + +- - - + +* `port` - (Optional) The iLO port to use when logging in. + This defaults to 443. + +* `build_plans` - (Optional) An array of build plan uris that you want to run on the server. + +* `public_mac` - (Optional) The MAC address of the NIC that will be the public network connection. + +* `public_slot_id` - (Optional) The slot id for the public network connection. + +* `custom_attribute` - (Optional) A key/value pair for a custom attribute you would like associated with +the server on icsp. Custom Attribute options specified below. + +Custom Attribute supports the following: + +* `key` - (Required) - The unique name of the attribute. + +* `value` - (Required) - The value of the attribute. + +* `scope` - (Optional) - The scope of the attribute. Defaults to `server`. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `mid` - A unique ID assigned to the Server by Server Automation. + +* `public_ipv4` - The public ip address if `public_mac` and `public_slot_id` were specified in the configuration. diff --git a/docs/r/logical_enclosure.html.markdown b/docs/r/logical_enclosure.html.markdown index d022ba53..99a0b567 100644 --- a/docs/r/logical_enclosure.html.markdown +++ b/docs/r/logical_enclosure.html.markdown @@ -25,74 +25,11 @@ resource "oneview_logical_enclosure" "default" { The following arguments are supported: -* `name` -(Required) A unique name for the resource. +* `name` -(Optional) A unique name for the resource. --- -* `enclosure_uris` -(Required) The set of uris associated with the enclosure. +* `enclosure_uris` -(Optional) The set of uris associated with the enclosure. -* `enclosure_group_uri` - (Required) The uri of the enclosure group. - ---- -description: |- - Imports a Logical Enclosure from appliance. ---- - -Import a logical enclosure. - -## Example Usage - -```js -resource "oneview_logical_enclosure" "default" { - # Empty body -} -``` -## Terraform Command to import - - terraform import oneview_logical_enclosure.default - -## Argument Reference - -The following arguments are supported: - -* `logical-enclosure-name` - (Required) A unique name for the resource as per the appliance. - -- - - -description: |- - Updates Logical Enclosure ---- - -# oneview\_logical\_enclosure - -Update logical enclosure. - -## Example Usage - -```js -resource "oneview_logical_enclosure" "default" { - name = "default-logical-enclosure" - enclosure_uris = ["${oneview_enclosure1.uri}", "${oneview_enclosure2.uri}"] - enclosure_group_uri = "${oneview_enclosure_group.uri}" - update_type = "update-type" -} -``` - -## Argument Reference - -The following arguments are supported: - -* `name` -(Required) A unique name for the resource. - ---- - -* `enclosure_uris` -(Required) The set of uris associated with the enclosure. - -* `enclosure_group_uri` - (Required) The uri of the enclosure group. - -* `update_type` - (Required) Type of update of Logical Enclosure. - - | NO | Type of Update | Update String | - |----|------------------------------------------------|-------------------------------| - | 1 |`UpdateLogicalEnclosure` |'update' | - | 2 |`UpdateFromGroupLogicalEnclosure` |'updateByGroup' | +* `enclosure_group_uri` - (Optional) The uri of the enclosure group. diff --git a/docs/r/network_set.html.markdown b/docs/r/network_set.html.markdown index 09a5cb3c..6d11302c 100644 --- a/docs/r/network_set.html.markdown +++ b/docs/r/network_set.html.markdown @@ -46,11 +46,3 @@ In addition to the arguments listed above, the following computed attributes are * `uri` - The URI of the created resource. * `eTag` - Entity tag/version ID of the resource. - -* `connection_template_uri` - The URI of the existing connection template associated with this object. - -* `scopesUri` - The URI for the resource scope assignments. - -* `description` - Brief description of the resource. - -* `network_set_type` - NetworkSet Type whose default value is "Regular". diff --git a/docs/r/server_profile.html.markdown b/docs/r/server_profile.html.markdown index a71d5d76..91b65367 100644 --- a/docs/r/server_profile.html.markdown +++ b/docs/r/server_profile.html.markdown @@ -12,7 +12,6 @@ Creates a server profile. et ## Example Usage -Create server profile using server profile template. ```js resource "oneview_server_profile" "default" { name = "test-server-profile" @@ -20,52 +19,6 @@ resource "oneview_server_profile" "default" { } ``` -Create server profile without server profile template. -```js -resource "oneview_server_profile" "default" { - name = "test-server-profile" - hardware_name = "00AT092 bay 2" - type = "ServerProfileV10" -} -``` -Update server profile -```js -resource "oneview_server_profile" "default" { - update_type = "patch" - options = [ - { - op = "replace" - path = "/refreshState" - value = "RefreshPending" - } - ] - name = "TestSP_Renamed" - type = "ServerProfileV10" - server_hardware_type = "SY 480 Gen9 3" - enclosure_group = "SYN03_EC" - hardware_name = "SYN03_Frame3, bay 1" -} -``` - -Patch request for server profile -```js -resource "oneview_server_profile" "default" { - update_type = "patch" - options = [ - { - op = "replace" - path = "/refreshState" - value = "RefreshPending" - } - ] - name = "TestSP" - type = "ServerProfileV10" - server_hardware_type = "SY 480 Gen9 3" - enclosure_group = "SYN03_EC" - hardware_name = "SYN03_Frame3, bay 1" -} -``` - ## Argument Reference The following arguments are supported: @@ -74,17 +27,6 @@ The following arguments are supported: * `template` - (Optional) The name of the template you will use for the server profile. -* `update_type` - (Required) Type of update of Server Profile. - - | NO | Type of Update | Update String | - |----|-------------------|-----------------| - | 1 |`Update` |'put' | - |----|-------------------|-----------------| - | 1 |`Patch` |'patch' | - |----|-------------------|-----------------| - -- - - - - - - * `public_connection` - (Optional) The name of the network that is going out to the public. @@ -116,35 +58,3 @@ In addition to the arguments listed above, the following computed attributes are * `ilo_ip` - The ILO ip address that is managing the server. * `hardware_uri` - The URI of the hardware the server is provisioned on. - -* `os_deployment_settings` - (Optional) OS Deployment settings applicable when deployment is invoked through a server profile. - -* `boot_order` - Defines the order in which boot will be attempted on the available devices. - -* `boot_mode` - Boot mode settings to be configured on the server. - -* `bios_option` - Server BIOS settings. - -* `server_hardware_type` - Identifies the server hardware type for which the Server Profile was designed. - -* `enclosure_group` - Identifies the enclosure group for which the Server Profile was designed. - -* `affinity` - This identifies the behavior of the server profile when the server hardware is removed or replaced. - -* `hide_unused_flex_nics` - This setting controls the enumeration of physical functions that do not correspond to connections in a profile. - -* `serial_number_type` - Specifies the type of Serial Number and UUID to be programmed into the server ROM. - -* `wwn_type` - Specifies the type of WWN address to be programmed into the IO devices. - -* `mac_type` - Specifies the type of MAC address to be programmed into the IO Devices. - -* `firmware` - Defines and enables firmware baseline management. - -* `local_storage` - Local storage settings to be configured on the server. - -* `logical_drives` - The list of logical drives associated with the controller. - -* `san_storage` - The profile san storage configuration. - -* `volume_attachments` - The list of storage volume attachments. \ No newline at end of file diff --git a/docs/r/server_profile_template.html.markdown b/docs/r/server_profile_template.html.markdown index 5d8fb600..ab4a7866 100644 --- a/docs/r/server_profile_template.html.markdown +++ b/docs/r/server_profile_template.html.markdown @@ -77,18 +77,6 @@ Changing this forces a new resoure. * `requested_mbps` - (Optional) The transmit throughput (mbps) that should be allocated to this connection. Defaults to `2500` -* `bios_option` - (Optional) Bios setting required for the server profile template. - -* `firmware` - (Optional) Firmware attributes required to configure firmware. - -* `local_storage` - (Optional) Local storage settings to be configured on the server. - -* `logical_drives` - (Optional) List of logical drives associated with the controller. - -* `san_storage` - (Optional)The profile SAN storage configuration. - -* `os_deployment_settings` - (Optional) OS Deployment settings applicable when deployment is invoked through a server profile. - ## Attributes Reference diff --git a/endpoints-support.md b/endpoints-support.md index 3360414e..6d9c261b 100644 --- a/endpoints-support.md +++ b/endpoints-support.md @@ -12,7 +12,7 @@ * If an endpoint is marked as implemented in a previous version of the API, it will likely already be working for newer API versions, however in these cases it is important to: 1. Specify the 'type' of the resource when using an untested API, as it will not get set by default -2. If an example is not working, verify the [HPE OneView REST API Documentation](http://h17007.www1.hpe.com/docs/enterprise/servers/oneview4.2/cic-api/en/index.html) for the respective API version being used, since the expected attributes for that resource might have changed. +2. If an example is not working, verify the [HPE OneView REST API Documentation](http://h17007.www1.hpe.com/docs/enterprise/servers/oneview3.1/cic-api/en/api-docs/current/index.html) for the API version being used, since the expected attributes for that resource might have changed.
@@ -26,33 +26,31 @@ |/rest/enclosure/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/enclosure/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Enclosure Group** | -|/rest/enclosure-group | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/enclosure-group/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/enclosure-group/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/enclosure-group | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/enclosure-group/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/enclosure-group/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | | **Ethernet Networks** | -|/rest/ethernet-networks | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/ethernet-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/ethernet-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | | **FC Networks** | -|/rest/fc-networks/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/fc-networks | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/fc-networks/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/fc-networks/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/fc-networks/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/fc-networks | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/fc-networks/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/fc-networks/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | | **FCoE Networks** | -|/rest/fcoe-networks/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fcoe-networks | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fcoe-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fcoe-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/fcoe-networks | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | +|/rest/fcoe-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | +|/rest/fcoe-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | | **Interconnect Types** | -|/rest/Interconnect-types/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/Interconnect-types/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | | **Interconnects** | -|/rest/interconnects/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/interconnects/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | | **Logical Enclosure** | -|/rest/logical-enclosures/ | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures/{id}/updateFromGroup | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures/ | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | | **Logical Interconnects** | |/rest/logical-interconnects/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | |/rest/logical-interconnects/{id}/compliance | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | @@ -71,11 +69,11 @@ |/rest/network-sets/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/network-sets/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Scope** | -|/rest/scopes | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/scopes/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/scopes/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/scopes | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/scopes/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/scopes/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | | **Server Hardware** | -|/rest/server-hardware/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/server-hardware/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | | **Server Hardware Type** | |/rest/server-hardware-type/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | | **Server Profile Templates** | @@ -90,43 +88,21 @@ |/rest/server-profiles | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/server-profiles | PATCH | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Storage System** -|/rest/storage-systems/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-systems | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-systems | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-systems | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -| **Storage Pool** -|/rest/storage-pools/ | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-pools/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-pools/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -| **Storage Volume** | -|/rest/storage-volumes/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volumes | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volumes/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volumes/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -| **Storage Volume Attachment** -|/rest/storage-volume-attachments/ | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volume-attachments/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -| **Storage Volume Template** -|/rest/storage-volume-templates/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volume-templates | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volume-templates | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volume-templates | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/storage-systems/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/storage-systems | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/storage-systems | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | +|/rest/storage-systems | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | | **Uplink Sets** | -|/rest/uplink-sets | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/uplink-sets | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/uplink-sets/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/uplink-sets/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/uplink-sets | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/uplink-sets | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/uplink-sets/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/uplink-sets/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | ## HPE Synergy Image Streamer -| Endpoints | Verb | V200 | V300 | V500 |V600 |V800 |V1000 -| --------------------------------------------------------------------------------------- | -------- | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | +| Endpoints | Verb | V200 | V300 | V500 |V600 |V800 +| --------------------------------------------------------------------------------------- | -------- | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | | **OS Deployment Plans** | |/rest/os-deployment-plans | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | |/rest/os-deployment-plans | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | /rest/os-deployment-plans | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | -| **Deployment Plans** | -/rest/deployment-plans | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/deployment-plans | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/deployment-plans | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -/rest/deployment-plans | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | diff --git a/examples/enclosure_group.tf b/examples/enclosure_group.tf index 32f4b255..63c44d4c 100644 --- a/examples/enclosure_group.tf +++ b/examples/enclosure_group.tf @@ -25,7 +25,7 @@ resource "oneview_enclosure_group" "eg_inst" { { interconnect_bay = 4 logical_interconnect_group_uri = "/rest/sas-logical-interconnect-groups/a14f144f-359d-4ce2-b2c1-446382a30ad0" - } + }, ] } @@ -44,7 +44,7 @@ resource "oneview_enclosure_group" "eg_inst" { { interconnect_bay = 4 logical_interconnect_group_uri = "/rest/sas-logical-interconnect-groups/a14f144f-359d-4ce2-b2c1-446382a30ad0" - } + }, ] interconnect_bay_mapping_count = 2 type = "EnclosureGroupV7" diff --git a/examples/interconnects.tf b/examples/interconnects.tf index 581e3043..157984a7 100644 --- a/examples/interconnects.tf +++ b/examples/interconnects.tf @@ -3,7 +3,7 @@ provider "oneview" { ov_password = "" ov_endpoint = "" ov_sslverify = false - ov_apiversion = 1000 + ov_apiversion = 800 } data "oneview_interconnect" "interconnect" { diff --git a/examples/logical_enclosure.tf b/examples/logical_enclosure.tf index e909daec..e5689ba3 100644 --- a/examples/logical_enclosure.tf +++ b/examples/logical_enclosure.tf @@ -19,13 +19,6 @@ resource "oneview_logical_enclosure" "LogicalEnclosure" { name = "TESTLE" enclosure_uris = ["/rest/enclosures/0000000000A66101","/rest/enclosures/0000000000A66102","/rest/enclosures/0000000000A66103"] enclosure_group_uri = "/rest/enclosure-groups/4f7b2cf4-fdb5-4065-8886-54a9905659f9" -} -/* Update by Group +} -resource "oneview_logical_enclosure" "LogicalEnclosure" { - name = "TESTLE" - enclosure_uris = ["/rest/enclosures/0000000000A66101","/rest/enclosures/0000000000A66102","/rest/enclosures/0000000000A66103"] - enclosure_group_uri = "/rest/enclosure-groups/4f7b2cf4-fdb5-4065-8886-54a9905659f9" - update_type = "updateByGroup" -*/ diff --git a/examples/logical_interconnect_group.tf b/examples/logical_interconnect_group.tf index 4daa59c5..98744984 100644 --- a/examples/logical_interconnect_group.tf +++ b/examples/logical_interconnect_group.tf @@ -23,13 +23,3 @@ resource "oneview_logical_interconnect_group" "LIG" { }, ] } - -/* Test for data source - -data "oneview_logical_interconnect_group" "logical_interconnect_group" { - name = "TestLIG" -} - -output "lig_value" { - value = "${data.oneview_logical_interconnect_group.logical_interconnect_group.redundancy_type}" -}*/ diff --git a/examples/logical_interconnects.tf b/examples/logical_interconnects.tf index 488bc4cf..33f95965 100644 --- a/examples/logical_interconnects.tf +++ b/examples/logical_interconnects.tf @@ -12,11 +12,9 @@ provider "oneview" { Import the existing the resource from appliance into the teriraform instance. Create a empty resource and execute the following command. - terraform import oneview_logical_interconnect. + terraform import oneview_logical_interconnect. instance - instance name declared in the empty resource declared. - resource_id - id of the logical interconnect as per the appliance. - - Example: terraform import oneview_logical_interconnect.li d4468f89-4442-4324-9c01-624c7382db2d + resource_name - name of the logical interconnect as per the appliance. */ /*resource "oneview_logical_interconnect" "li" { }*/ @@ -31,5 +29,6 @@ provider "oneview" { ------------------------------------------------------------------------------------ */ resource "oneview_logical_interconnect" "li" { + uri = "/rest/logical-interconnects/d4468f89-4442-4324-9c01-624c7382db2d" update_type = "updateComplianceById" } diff --git a/examples/network_set.tf b/examples/network_set.tf index 156343cd..7415f584 100644 --- a/examples/network_set.tf +++ b/examples/network_set.tf @@ -23,6 +23,3 @@ output "oneview_network_set_value" { value = "${data.oneview_network_set.network_set.uri}" } -//Importing an existing resource from appliance -resource "oneview_network_set" "import_ns" { -} diff --git a/examples/server_profile.tf b/examples/server_profile.tf index 2304922b..f7f92c11 100644 --- a/examples/server_profile.tf +++ b/examples/server_profile.tf @@ -15,43 +15,6 @@ resource "oneview_server_profile" "SP" { type = "ServerProfileV9" } -# Creation of Server Profile without template -resource "oneview_server_profile" "SP" { - name = "TestSP" - hardware_name = "SYN03_Frame3, bay 1" - type = "ServerProfileV10" - enclosure_group = "SYN03_EC" - initial_scope_uris = ["${data.oneview_scope.scope.uri}"] -} - -# Updating Server profile -resource "oneview_server_profile" "SP" { - name = "TestSP_Renamed" - hardware_name = "SYN03_Frame3, bay 1" - type = "ServerProfileV10" - enclosure_group = "SYN03_EC" - server_hardware_type = "SY 480 Gen9 3" - initial_scope_uris = ["${data.oneview_scope.scope.uri}"] - update_type = "put" -} - -# Patch request - Server profile Refresh -resource "oneview_server_profile" "SP" { - update_type = "patch" - options = [ - { - op = "replace" - path = "/refreshState" - value = "RefreshPending" - } - ] - name = "TestSP_Renamed" - type = "ServerProfileV10" - server_hardware_type = "SY 480 Gen9 3" - enclosure_group = "SYN03_EC" - hardware_name = "SYN03_Frame3, bay 1" -} - #Data source for server profile data "oneview_server_profile" "sp" { @@ -62,10 +25,3 @@ output "oneview_server_profile_value" { value = "${data.oneview_server_profile.sp.uri}" } -# To import an existing server profile to terraform, use the below code and run the following command: - -# terraform import . -# Eg: terraform import oneview_server_profile.serverProfile Test - -resource "oneview_server_profile" "serverProfile" { -} diff --git a/examples/storage_system.tf b/examples/storage_system.tf index 9a1ee12c..4795fe28 100644 --- a/examples/storage_system.tf +++ b/examples/storage_system.tf @@ -7,38 +7,53 @@ provider "oneview" { ov_ifmatch = "*" } + resource "oneview_storage_system" "ss_inst" { hostname = "" - username = "" + username = "" password = "" - family = "" + family = "StoreServ" } - - -/* // Uncomment the following resource to update. -resource "oneview_storage_system" "ss_inst"{ +/*resource "oneview_storage_system" "ss_inst"{ credentials = [ { - username = "" - password = "" + username = "" + password = "" } ] hostname = "" - name = "" + name = "ThreePAR-2" ports = [ { - mode = "" - id= "" + id = "ea0b2d3d-098c-4f95-ac08-aa6100a80de7" + mode = "AutoSelectExpectedSan" + partner_port = "1:1:1" + }, + { + id = "5a1469d8-a925-4b9a-a87b-aa6100a80de7" + mode = "AutoSelectExpectedSan" + partner_port = "1:1:2" + }, + { + id = "4f55584b-2abb-47f5-95cc-aa6100a80de7" + mode = "AutoSelectExpectedSan" + partner_port = "1:1:3" + }, + ] + managed_pool = [] + storage_system_device_specific_attributes = [ + { + managed_domain = "TestDomain" + firmware = "3.2.1.292" + model = "HP_3PAR 7200" } ] - managed_pool = [] - eTag = "--" + eTag = "10" description = "TestStorageSystem" - uri = "/rest/storage-systems/" -} -*/ + uri = "/rest/storage-systems/TXQ1010307" +}*/ /* Testing the data source data "oneview_storage_system" "storage_system" { diff --git a/main.go b/main.go index 2a9114a8..60711a8c 100644 --- a/main.go +++ b/main.go @@ -8,11 +8,13 @@ // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. +//Modifications by NST package main import ( - "github.com/HewlettPackard/terraform-provider-oneview/oneview" + "terraform-oneview/oneview" + "github.com/hashicorp/terraform/plugin" ) diff --git a/oneview/data_source_enclosure.go b/oneview/data_source_enclosure.go index 3cf9b03d..007766fc 100644 --- a/oneview/data_source_enclosure.go +++ b/oneview/data_source_enclosure.go @@ -110,8 +110,9 @@ func dataSourceEnclosure() *schema.Resource { Set: schema.HashString, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "enclosure_group_uri": { Type: schema.TypeString, diff --git a/oneview/data_source_enclosure_group.go b/oneview/data_source_enclosure_group.go index d25cd1c4..f6bd296d 100644 --- a/oneview/data_source_enclosure_group.go +++ b/oneview/data_source_enclosure_group.go @@ -132,8 +132,9 @@ func dataSourceEnclosureGroup() *schema.Resource { Computed: true, }, "Status": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "type": { Type: schema.TypeString, diff --git a/oneview/data_source_ethernet_network.go b/oneview/data_source_ethernet_network.go index a9dab9bf..8a20b57f 100644 --- a/oneview/data_source_ethernet_network.go +++ b/oneview/data_source_ethernet_network.go @@ -85,12 +85,14 @@ func dataSourceEthernetNetwork() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "scopesUri": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "initial_scope_uris": { Computed: true, diff --git a/oneview/data_source_fc_network.go b/oneview/data_source_fc_network.go index 0265b17c..444a4567 100644 --- a/oneview/data_source_fc_network.go +++ b/oneview/data_source_fc_network.go @@ -69,8 +69,9 @@ func dataSourceFCNetwork() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "created": { Type: schema.TypeString, @@ -81,8 +82,9 @@ func dataSourceFCNetwork() *schema.Resource { Computed: true, }, "scopesUri": { - Computed: true, - Type: schema.TypeString, + Computed: true, + Type: schema.TypeString, + Deprecated: "Warning: Current value structure is deprecated", }, }, } diff --git a/oneview/data_source_interconnect_type.go b/oneview/data_source_interconnect_type.go index c4db2f32..168d362c 100644 --- a/oneview/data_source_interconnect_type.go +++ b/oneview/data_source_interconnect_type.go @@ -142,8 +142,9 @@ func dataSourceInterconnectType() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, }, } @@ -175,7 +176,7 @@ func dataSourceInterconnectTypeRead(d *schema.ResourceData, meta interface{}) er DownlinkPortCapability = append(DownlinkPortCapability, map[string]interface{}{ "created": interconnectType.DownlinkPortCapability.Created, "max_bandwidth_in_gbps": interconnectType.DownlinkPortCapability.MaxBandwidthInGbps, - "uri": interconnectType.DownlinkPortCapability.URI, + "uri": interconnectType.DownlinkPortCapability.URI, }) d.Set("downlink_port_capability", DownlinkPortCapability) diff --git a/oneview/data_source_network_set.go b/oneview/data_source_network_set.go index f455d94a..52c038d3 100644 --- a/oneview/data_source_network_set.go +++ b/oneview/data_source_network_set.go @@ -71,17 +71,14 @@ func dataSourceNetworkSet() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "scopes_uri": { Type: schema.TypeString, Computed: true, }, - "network_set_type": { - Type: schema.TypeString, - Computed: true, - }, }, } } @@ -123,7 +120,6 @@ func dataSourceNetworkSetRead(d *schema.ResourceData, meta interface{}) error { } d.Set("network_uris", networkUris) d.Set("scopes_uri", netSet.ScopesUri) - d.Set("network_set_type", netSet.NetworkSetType) return nil diff --git a/oneview/data_source_server_hardware_type.go b/oneview/data_source_server_hardware_type.go index ff4d5204..6ebdd75d 100644 --- a/oneview/data_source_server_hardware_type.go +++ b/oneview/data_source_server_hardware_type.go @@ -66,14 +66,6 @@ func dataSourceServerHardwareType() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "family": { - Type: schema.TypeString, - Computed: true, - }, - "model": { - Type: schema.TypeString, - Computed: true, - }, }, } } @@ -94,8 +86,6 @@ func dataSourceServerHardwareTypeRead(d *schema.ResourceData, meta interface{}) d.Set("category", serverHardwareType.Category) d.Set("etag", serverHardwareType.ETAG) d.Set("uri", serverHardwareType.URI.String()) - d.Set("family", serverHardwareType.Family) - d.Set("model", serverHardwareType.Model) controllerModes := make([]interface{}, len(serverHardwareType.StorageCapabilities.ControllerModes)) for i, controllerMode := range serverHardwareType.StorageCapabilities.ControllerModes { diff --git a/oneview/data_source_server_profile.go b/oneview/data_source_server_profile.go index 2c3ed0e6..3d13d269 100644 --- a/oneview/data_source_server_profile.go +++ b/oneview/data_source_server_profile.go @@ -12,7 +12,6 @@ package oneview import ( - "github.com/HewlettPackard/oneview-golang/ov" "github.com/hashicorp/terraform/helper/schema" ) @@ -25,410 +24,6 @@ func dataSourceServerProfile() *schema.Resource { Type: schema.TypeString, Required: true, }, - "boot_order": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "boot_mode": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "manage_mode": { - Type: schema.TypeBool, - Required: true, - }, - "mode": { - Type: schema.TypeString, - Required: true, - }, - "pxe_boot_policy": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "bios_option": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "manage_bios": { - Type: schema.TypeBool, - Required: true, - }, - "overridden_settings": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "network": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "function_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "network_uri": { - Type: schema.TypeString, - Required: true, - }, - "port_id": { - Type: schema.TypeString, - Optional: true, - Default: "Lom 1:1-a", - }, - "requested_mbps": { - Type: schema.TypeString, - Optional: true, - Default: "2500", - }, - "id": { - Type: schema.TypeInt, - Optional: true, - }, - "boot": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "priority": { - Type: schema.TypeString, - Optional: true, - }, - "ethernet_boot_type": { - Type: schema.TypeString, - Optional: true, - }, - "boot_volume_source": { - Type: schema.TypeString, - Optional: true, - }, - "iscsi": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "chap_level": { - Type: schema.TypeString, - Optional: true, - }, - "first_boot_target_ip": { - Type: schema.TypeString, - Optional: true, - }, - "first_boot_target_port": { - Type: schema.TypeString, - Optional: true, - }, - "initiator_name_source": { - Type: schema.TypeString, - Optional: true, - }, - "second_boot_target_ip": { - Type: schema.TypeString, - Optional: true, - }, - "second_boot_target_port": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "ipv4": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "gateway": { - Type: schema.TypeString, - Optional: true, - }, - "ip_address_source": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "server_hardware_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "enclosure_group": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "affinity": { - Type: schema.TypeString, - Optional: true, - Default: "Bay", - }, - "hide_unused_flex_nics": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "manage_connections": { - Type: schema.TypeBool, - Optional: true, - }, - "serial_number_type": { - Type: schema.TypeString, - Optional: true, - Default: "Virtual", - ForceNew: true, - }, - "wwn_type": { - Type: schema.TypeString, - Optional: true, - Default: "Virtual", - ForceNew: true, - }, - "mac_type": { - Type: schema.TypeString, - Optional: true, - Default: "Virtual", - ForceNew: true, - }, - "firmware": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "force_install_firmware": { - Type: schema.TypeBool, - Optional: true, - }, - "firmware_baseline_uri": { - Type: schema.TypeString, - Optional: true, - }, - "manage_firmware": { - Type: schema.TypeBool, - Optional: true, - }, - "firmware_install_type": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "local_storage": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "manage_local_storage": { - Type: schema.TypeBool, - Optional: true, - }, - "initialize": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "logical_drives": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bootable": { - Type: schema.TypeBool, - Optional: true, - }, - "raid_level": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "san_storage": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host_os_type": { - Type: schema.TypeString, - Optional: true, - }, - "manage_san_storage": { - Type: schema.TypeBool, - Optional: true, - }, - "server_hardware_type_uri": { - Type: schema.TypeString, - Optional: true, - }, - "server_hardware_uri": { - Type: schema.TypeString, - Optional: true, - }, - "serial_number": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - }, - "uri": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - // schema for ov.SanStorage.VolumeAttachments - "volume_attachments": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeInt, - Required: true, - }, - "lun": { - Type: schema.TypeString, - Required: true, - }, - "lun_type": { - Type: schema.TypeString, - Optional: true, - }, - "boot_volume_priority": { - Type: schema.TypeString, - Optional: true, - }, - "permanent": { - Type: schema.TypeBool, - Optional: true, - }, - "volume_storage_pool_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_storage_system_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_shareable": { - Type: schema.TypeBool, - Optional: true, - }, - "volume_description": { - Type: schema.TypeString, - Optional: true, - }, - "volume_provision_type": { - Type: schema.TypeString, - Optional: true, - }, - "volume_provisioned_capacity_bytes": { - Type: schema.TypeString, - Optional: true, - }, - "volume_name": { - Type: schema.TypeString, - Optional: true, - }, - "storage_paths": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Optional: true, - }, - "storage_target_type": { - Type: schema.TypeString, - Optional: true, - }, - "target_selector": { - Type: schema.TypeString, - Optional: true, - }, - "is_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "connection_id": { - Type: schema.TypeInt, - Optional: true, - }, - "targets": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": { - Type: schema.TypeString, - Optional: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "tcp_port": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, "uri": { Type: schema.TypeString, Computed: true, @@ -502,55 +97,6 @@ func dataSourceServerProfileRead(d *schema.ResourceData, meta interface{}) error d.Set("public_slot_id", publicConnection.ID) } - enclosureGroup, err := config.ovClient.GetEnclosureGroupByUri(serverProfile.EnclosureGroupURI) - if err != nil { - return err - } - d.Set("enclosure_group", enclosureGroup.Name) - - serverHardwareType, err := config.ovClient.GetServerHardwareTypeByUri(serverProfile.ServerHardwareTypeURI) - if err != nil { - return err - } - d.Set("server_hardware_type", serverHardwareType.Name) - d.Set("affinity", serverProfile.Affinity) - d.Set("serial_number_type", serverProfile.SerialNumberType) - d.Set("wwn_type", serverProfile.WWNType) - d.Set("mac_type", serverProfile.MACType) - d.Set("hide_unused_flex_nics", serverProfile.HideUnusedFlexNics) - - var connections []ov.Connection - if len(serverProfile.ConnectionSettings.Connections) != 0 { - connections = serverProfile.ConnectionSettings.Connections - } else { - connections = serverProfile.Connections - } - if len(connections) != 0 { - networks := make([]map[string]interface{}, 0, len(connections)) - for _, rawNet := range connections { - networks = append(networks, map[string]interface{}{ - "name": rawNet.Name, - "function_type": rawNet.FunctionType, - "network_uri": rawNet.NetworkURI.String(), - "port_id": rawNet.PortID, - "requested_mbps": rawNet.RequestedMbps, - }) - } - d.Set("network", networks) - } - - if serverProfile.Boot.ManageBoot { - bootOrder := make([]interface{}, 0) - for _, currBoot := range serverProfile.Boot.Order { - rawBootOrder := d.Get("boot_order").(*schema.Set).List() - for _, raw := range rawBootOrder { - if raw == currBoot { - bootOrder = append(bootOrder, currBoot) - } - } - } - d.Set("boot_order", bootOrder) - } d.Set("name", serverProfile.Name) d.Set("type", serverProfile.Type) d.Set("uri", serverProfile.URI.String()) diff --git a/oneview/data_source_storage_system.go b/oneview/data_source_storage_system.go index a23c9ebf..cc735479 100644 --- a/oneview/data_source_storage_system.go +++ b/oneview/data_source_storage_system.go @@ -70,8 +70,9 @@ func dataSourceStorageSystem() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "type": { Type: schema.TypeString, diff --git a/oneview/data_source_storage_volume.go b/oneview/data_source_storage_volume.go new file mode 100644 index 00000000..68fde841 --- /dev/null +++ b/oneview/data_source_storage_volume.go @@ -0,0 +1,211 @@ +package oneview + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceStorageVolume() *schema.Resource { + return &schema.Resource{ + Read: dataSourceStorageVolumeRead, + Schema: map[string]*schema.Schema{ + "category": { + Type: schema.TypeString, + Computed: true, + }, + "eTag": { + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", + }, + "created": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + }, + "device_volume_name": { + Type: schema.TypeString, + Computed: true, + }, + "requesting_refresh": { + Type: schema.TypeBool, + Computed: true, + }, + "allocated_capacity": { + Type: schema.TypeString, + Computed: true, + }, + "initial_scopes": { + Type: schema.TypeString, + Computed: true, + }, + "device_specific_attributes": { + Computed: true, + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "transport": { + Type: schema.TypeString, + Computed: true, + }, + "iqn": { + Type: schema.TypeString, + Computed: true, + }, + "number_of_replicas": { + Type: schema.TypeString, + Computed: true, + }, + "data_protection_level": { + Type: schema.TypeString, + Computed: true, + }, + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + }, + "copy_state": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_pool_uri": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "volume_template_uri": { + Type: schema.TypeString, + Computed: true, + }, + "is_shareable": { + Type: schema.TypeBool, + Computed: true, + }, + "storage_pool_uri": { + Type: schema.TypeString, + Computed: true, + }, + "storage_system_uri": { + Type: schema.TypeString, + Computed: true, + }, + "provisioned_capacity": { + Type: schema.TypeString, + Computed: true, + }, + "properties": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "storage_pool": { + Type: schema.TypeString, + Computed: true, + }, + "size": { + Type: schema.TypeInt, + Computed: true, + }, + "provisioning_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "template_uri": { + Type: schema.TypeString, + Computed: true, + }, + "is_permanent": { + Type: schema.TypeBool, + Computed: true, + }, + "provisioning_type_for_update": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceStorageVolumeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + storageVolume, err := config.ovClient.GetStorageVolumeByName(string(d.Id())) + if err != nil { + d.SetId("") + return nil + } + + d.SetId(d.Id()) + d.Set("category", storageVolume.Category) + d.Set("eTag", storageVolume.ETAG) + d.Set("created", storageVolume.Created) + d.Set("description", storageVolume.Description) + d.Set("name", storageVolume.Name) + d.Set("state", storageVolume.State) + d.Set("status", storageVolume.Status) + d.Set("type", storageVolume.Type) + d.Set("uri", storageVolume.URI) + d.Set("device_volume_name", storageVolume.DeviceVolumeName) + d.Set("requesting_refresh", storageVolume.RequestingRefresh) + d.Set("allocated_capacity", storageVolume.AllocatedCapacity) + d.Set("initial_scope_uris", storageVolume.InitialScopeUris) + deviceSpecificAttributes := make([]map[string]interface{}, 0) + deviceSpecificAttributes = append(deviceSpecificAttributes, map[string]interface{}{ + "transport": storageVolume.DeviceSpecificAttributes.Transport, + "iqn": storageVolume.DeviceSpecificAttributes.Iqn, + "number_of_replicas": storageVolume.DeviceSpecificAttributes.NumberOfReplicas, + "data_protection_level": storageVolume.DeviceSpecificAttributes.DataProtectionLevel, + "id": storageVolume.DeviceSpecificAttributes.Id, + "uri": storageVolume.DeviceSpecificAttributes.Uri, + "copy_state": storageVolume.DeviceSpecificAttributes.CopyState, + "snapshot_pool_uri": storageVolume.DeviceSpecificAttributes.SnapshotPoolUri}) + d.Set("device_specific_attributes", deviceSpecificAttributes) + d.Set("volume_template_uri", storageVolume.VolumeTemplateUri) + d.Set("is_sheareable", storageVolume.IsShareable) + d.Set("storage_pool_uri", storageVolume.StoragePoolUri) + d.Set("storage_system_uri", storageVolume.StorageSystemUri) + d.Set("provision_capacity", storageVolume.ProvisionedCapacity) + properties := make([]map[string]interface{}, 0) + properties = append(properties, map[string]interface{}{ + "name": storageVolume.Properties.Name, + "storage_pool": storageVolume.Properties.Storagepool, + "size": storageVolume.Properties.Size, + "provisioning_type": storageVolume.Properties.ProvisioningType}) + d.Set("properties", properties) + d.Set("template_uri", storageVolume.TemplateURI) + d.Set("is_permanent", storageVolume.IsPermanent) + d.Set("provisioning_type", storageVolume.ProvisioningTypeForUpdate) + + return nil +} diff --git a/oneview/provider.go b/oneview/provider.go index 7ca9c415..c7e7ac59 100644 --- a/oneview/provider.go +++ b/oneview/provider.go @@ -60,6 +60,36 @@ func Provider() terraform.ResourceProvider { Optional: true, DefaultFunc: schema.EnvDefaultFunc("ONEVIEW_OV_IF_MATCH", "*"), }, + "icsp_domain": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ONEVIEW_ICSP_DOMAIN", ""), + }, + "icsp_username": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ONEVIEW_ICSP_USER", ""), + }, + "icsp_password": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ONEVIEW_ICSP_PASSWORD", ""), + }, + "icsp_endpoint": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ONEVIEW_ICSP_ENDPOINT", ""), + }, + "icsp_sslverify": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ONEVIEW_ICSP_SSLVERIFY", true), + }, + "icsp_apiversion": { + Type: schema.TypeInt, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("ONEVIEW_ICSP_API_VERSION", 200), + }, "i3s_endpoint": { Type: schema.TypeString, Optional: true, @@ -68,31 +98,26 @@ func Provider() terraform.ResourceProvider { }, DataSourcesMap: map[string]*schema.Resource{ - "oneview_fc_network": dataSourceFCNetwork(), - "oneview_fcoe_network": dataSourceFCoENetwork(), - "oneview_enclosure": dataSourceEnclosure(), - "oneview_ethernet_network": dataSourceEthernetNetwork(), - "oneview_interconnect_type": dataSourceInterconnectType(), - "oneview_interconnect": dataSourceInterconnects(), - "oneview_logical_interconnect": dataSourceLogicalInterconnect(), - "oneview_logical_interconnect_group": dataSourceLogicalInterconnectGroup(), - "oneview_scope": dataSourceScope(), - "oneview_server_hardware": dataSourceServerHardware(), - "oneview_server_hardware_type": dataSourceServerHardwareType(), - "oneview_storage_attachment": dataSourceStorageAttachment(), - "oneview_logical_enclosure": dataSourceLogicalEnclosure(), - "oneview_enclosure_group": dataSourceEnclosureGroup(), - "oneview_server_profile": dataSourceServerProfile(), - "oneview_server_profile_template": dataSourceServerProfileTemplate(), - "oneview_storage_system": dataSourceStorageSystem(), - "oneview_uplink_set": dataSourceUplinkSet(), - "oneview_network_set": dataSourceNetworkSet(), - "oneview_storage_volume_template": dataSourceStorageVolumeTemplate(), - "oneview_volume": dataSourceVolume(), + "oneview_fc_network": dataSourceFCNetwork(), + "oneview_enclosure": dataSourceEnclosure(), + "oneview_ethernet_network": dataSourceEthernetNetwork(), + "oneview_interconnect_type": dataSourceInterconnectType(), + "oneview_interconnect": dataSourceInterconnects(), + "oneview_logical_interconnect": dataSourceLogicalInterconnect(), + "oneview_scope": dataSourceScope(), + "oneview_server_hardware": dataSourceServerHardware(), + "oneview_server_hardware_type": dataSourceServerHardwareType(), + "oneview_logical_enclosure": dataSourceLogicalEnclosure(), + "oneview_enclosure_group": dataSourceEnclosureGroup(), + "oneview_server_profile": dataSourceServerProfile(), + "oneview_server_profile_template": dataSourceServerProfileTemplate(), + "oneview_storage_system": dataSourceStorageSystem(), + "oneview_uplink_set": dataSourceUplinkSet(), + "oneview_network_set": dataSourceNetworkSet(), + "oneview_storage_volume": dataSourceStorageVolume(), }, ResourcesMap: map[string]*schema.Resource{ - "oneview_deployment_plan": resourceDeploymentPlan(), "oneview_server_profile": resourceServerProfile(), "oneview_enclosure": resourceEnclosure(), "oneview_enclosure_group": resourceEnclosureGroup(), @@ -109,9 +134,6 @@ func Provider() terraform.ResourceProvider { "oneview_uplink_set": resourceUplinkSet(), "oneview_i3s_plan": resourceI3SPlan(), "oneview_logical_enclosure": resourceLogicalEnclosure(), - "oneview_storage_pool": resourceStoragePool(), - "oneview_storage_volume_template": resourceStorageVolumeTemplate(), - "oneview_volume": resourceVolume(), }, ConfigureFunc: providerConfigure, } diff --git a/oneview/resource_enclosure.go b/oneview/resource_enclosure.go index 03bbcb77..1431d471 100644 --- a/oneview/resource_enclosure.go +++ b/oneview/resource_enclosure.go @@ -12,6 +12,8 @@ package oneview import ( + "io/ioutil" + "github.com/HewlettPackard/oneview-golang/ov" "github.com/HewlettPackard/oneview-golang/utils" "github.com/hashicorp/terraform/helper/schema" @@ -127,8 +129,9 @@ func resourceEnclosure() *schema.Resource { Set: schema.HashString, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "enclosure_group_uri": { Type: schema.TypeString, diff --git a/oneview/resource_enclosure_group.go b/oneview/resource_enclosure_group.go index de671f42..74172ad2 100644 --- a/oneview/resource_enclosure_group.go +++ b/oneview/resource_enclosure_group.go @@ -109,7 +109,7 @@ func resourceEnclosureGroup() *schema.Resource { }, "port_mapping_count": { Type: schema.TypeInt, - Computed: true, + Optional: true, }, "port_mappings": { Optional: true, @@ -140,8 +140,9 @@ func resourceEnclosureGroup() *schema.Resource { Optional: true, }, "Status": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Deprecated: "Warning: Current value structure is deprecated", }, "type": { Type: schema.TypeString, @@ -244,10 +245,10 @@ func resourceEnclosureGroupRead(d *schema.ResourceData, meta interface{}) error func resourceEnclosureGroupUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) enclosureGroup := ov.EnclosureGroup{ - URI: utils.NewNstring(d.Get("uri").(string)), + URI: utils.NewNstring(d.Get("uri").(string)), InterconnectBayMappingCount: d.Get("interconnect_bay_mapping_count").(int), - Type: d.Get("type").(string), - StackingMode: d.Get("stacking_mode").(string), + Type: d.Get("type").(string), + StackingMode: d.Get("stacking_mode").(string), } rawInterconnectBayMappings := d.Get("interconnect_bay_mappings").(*schema.Set).List() diff --git a/oneview/resource_ethernet_network.go b/oneview/resource_ethernet_network.go index a05a0ee9..6fe8eb6b 100644 --- a/oneview/resource_ethernet_network.go +++ b/oneview/resource_ethernet_network.go @@ -99,13 +99,15 @@ func resourceEthernetNetwork() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "scopesUri": { - Optional: true, - Type: schema.TypeString, - Computed: true, + Optional: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "initial_scope_uris": { Optional: true, @@ -192,7 +194,7 @@ func resourceEthernetNetworkUpdate(d *schema.ResourceData, meta interface{}) err PrivateNetwork: d.Get("private_network").(bool), SmartLink: d.Get("smart_link").(bool), ConnectionTemplateUri: utils.NewNstring(d.Get("connection_template_uri").(string)), - Type: d.Get("type").(string), + Type: d.Get("type").(string), } err := config.ovClient.UpdateEthernetNetwork(newENet) diff --git a/oneview/resource_fc_network.go b/oneview/resource_fc_network.go index 8f0b9c95..177da189 100644 --- a/oneview/resource_fc_network.go +++ b/oneview/resource_fc_network.go @@ -82,8 +82,9 @@ func resourceFCNetwork() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "created": { Type: schema.TypeString, @@ -94,9 +95,10 @@ func resourceFCNetwork() *schema.Resource { Computed: true, }, "scopesUri": { - Optional: true, - Type: schema.TypeString, - Computed: true, + Optional: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "initial_scope_uris": { Optional: true, @@ -119,8 +121,8 @@ func resourceFCNetworkCreate(d *schema.ResourceData, meta interface{}) error { LinkStabilityTime: d.Get("link_stability_time").(int), ManagedSanURI: utils.NewNstring(d.Get("managed_san_uri").(string)), AutoLoginRedistribution: d.Get("auto_login_redistribution").(bool), - Type: d.Get("type").(string), - Description: utils.NewNstring(d.Get("description").(string)), + Type: d.Get("type").(string), + Description: utils.NewNstring(d.Get("description").(string)), } if val, ok := d.GetOk("initial_scope_uris"); ok { @@ -179,9 +181,9 @@ func resourceFCNetworkUpdate(d *schema.ResourceData, meta interface{}) error { FabricType: d.Get("fabric_type").(string), LinkStabilityTime: d.Get("link_stability_time").(int), AutoLoginRedistribution: d.Get("auto_login_redistribution").(bool), - Type: d.Get("type").(string), - ConnectionTemplateUri: utils.NewNstring(d.Get("connection_template_uri").(string)), - Description: utils.NewNstring(d.Get("description").(string)), + Type: d.Get("type").(string), + ConnectionTemplateUri: utils.NewNstring(d.Get("connection_template_uri").(string)), + Description: utils.NewNstring(d.Get("description").(string)), } err := config.ovClient.UpdateFcNetwork(fcNet) diff --git a/oneview/resource_fcoe_network.go b/oneview/resource_fcoe_network.go index 724780be..b83de985 100644 --- a/oneview/resource_fcoe_network.go +++ b/oneview/resource_fcoe_network.go @@ -33,13 +33,15 @@ func resourceFCoENetwork() *schema.Resource { Required: true, }, "vlanId": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Deprecated: "Warning: Current value structure is deprecated", }, "connectionTemplateUri": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "type": { Type: schema.TypeString, @@ -47,12 +49,14 @@ func resourceFCoENetwork() *schema.Resource { Default: "fcoe-network", }, "managedSanUri": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "fabricUri": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "description": { Type: schema.TypeString, @@ -67,8 +71,9 @@ func resourceFCoENetwork() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "modified": { Type: schema.TypeString, @@ -86,19 +91,6 @@ func resourceFCoENetwork() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "scopesUri": { - Optional: true, - Type: schema.TypeString, - Computed: true, - }, - "initial_scope_uris": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - Set: schema.HashString, - }, }, } } @@ -111,14 +103,7 @@ func resourceFCoENetworkCreate(d *schema.ResourceData, meta interface{}) error { VlanId: d.Get("vlanId").(int), Type: d.Get("type").(string), } - if val, ok := d.GetOk("initial_scope_uris"); ok { - rawInitialScopeUris := val.(*schema.Set).List() - initialScopeUris := make([]utils.Nstring, len(rawInitialScopeUris)) - for _, rawData := range rawInitialScopeUris { - initialScopeUris = append(initialScopeUris, utils.Nstring(rawData.(string))) - } - fcoeNet.InitialScopeUris = initialScopeUris - } + fcoeNetError := config.ovClient.CreateFCoENetwork(fcoeNet) d.SetId(d.Get("name").(string)) if fcoeNetError != nil { @@ -131,12 +116,11 @@ func resourceFCoENetworkCreate(d *schema.ResourceData, meta interface{}) error { func resourceFCoENetworkRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - fcoeNet, fcoeNetError := config.ovClient.GetFCoENetworkByName(d.Id()) + fcoeNet, fcoeNetError := config.ovClient.GetFCoENetworkByName(d.Get("name").(string)) if fcoeNetError != nil || fcoeNet.URI.IsNil() { d.SetId("") return nil } - d.Set("vlanId", fcoeNet.VlanId) d.Set("created", fcoeNet.Created) d.Set("modified", fcoeNet.Modified) d.Set("uri", fcoeNet.URI.String()) @@ -148,8 +132,7 @@ func resourceFCoENetworkRead(d *schema.ResourceData, meta interface{}) error { d.Set("eTag", fcoeNet.ETAG) d.Set("managedSanUri", fcoeNet.ManagedSanUri) d.Set("description", fcoeNet.Description) - d.Set("scopesUri", fcoeNet.ScopesUri.String()) - d.Set("initial_scope_uris", fcoeNet.InitialScopeUris) + return nil } @@ -157,12 +140,12 @@ func resourceFCoENetworkUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) newFCoENet := ov.FCoENetwork{ - ETAG: d.Get("eTag").(string), - URI: utils.NewNstring(d.Get("uri").(string)), - VlanId: d.Get("vlanId").(int), - Name: d.Get("name").(string), + ETAG: d.Get("eTag").(string), + URI: utils.NewNstring(d.Get("uri").(string)), + VlanId: d.Get("vlanId").(int), + Name: d.Get("name").(string), ConnectionTemplateUri: utils.NewNstring(d.Get("connectionTemplateUri").(string)), - Type: d.Get("type").(string), + Type: d.Get("type").(string), } err := config.ovClient.UpdateFCoENetwork(newFCoENet) diff --git a/oneview/resource_logical_enclosure.go b/oneview/resource_logical_enclosure.go index 76dccdb3..312ea214 100644 --- a/oneview/resource_logical_enclosure.go +++ b/oneview/resource_logical_enclosure.go @@ -174,10 +174,6 @@ func resourceLogicalEnclosure() *schema.Resource { Type: schema.TypeString, Optional: true, }, - "update_type": { - Type: schema.TypeString, - Optional: true, - }, }, } @@ -219,12 +215,12 @@ func resourceLogicalEnclosureCreate(d *schema.ResourceData, meta interface{}) er func resourceLogicalEnclosureRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - logicalEnclosure, err := config.ovClient.GetLogicalEnclosureByName(d.Id()) + logicalEnclosure, err := config.ovClient.GetLogicalEnclosureByName(d.Get("name").(string)) if err != nil { d.SetId("") return nil } - d.SetId(logicalEnclosure.Name) + d.SetId("name") d.Set("ambient_temperature_mode", logicalEnclosure.AmbientTemperatureMode) d.Set("category", logicalEnclosure.Category) d.Set("created", logicalEnclosure.Created) @@ -292,21 +288,6 @@ func resourceLogicalEnclosureRead(d *schema.ResourceData, meta interface{}) erro func resourceLogicalEnclosureUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - if val, ok := d.GetOk("update_type"); ok { - if val.(string) == "updateByGroup" { - id := d.Id() - logicalEnclosure, err := config.ovClient.GetLogicalEnclosureByName(id) - err = config.ovClient.UpdateFromGroupLogicalEnclosure(logicalEnclosure) - - if err != nil { - return err - } - d.SetId(id) - - return resourceLogicalEnclosureRead(d, meta) - } - } - logicalEnclosure := ov.LogicalEnclosure{ Name: d.Get("name").(string), Type: d.Get("type").(string), @@ -332,10 +313,6 @@ func resourceLogicalEnclosureUpdate(d *schema.ResourceData, meta interface{}) er logicalEnclosure.ScopesUri = utils.NewNstring(val.(string)) } - if val, ok := d.GetOk("enclosure_group_uri"); ok { - logicalEnclosure.EnclosureGroupUri = utils.NewNstring(val.(string)) - } - deploymentManagerSettingsList := d.Get("deployment_manager_settings").(*schema.Set).List() for _, raw := range deploymentManagerSettingsList { deploymentManagerSetting := raw.(map[string]interface{}) @@ -347,10 +324,7 @@ func resourceLogicalEnclosureUpdate(d *schema.ResourceData, meta interface{}) er ManageOSDeployment: deploymentManagerSetting["manage_os_deployment"].(bool), DeploymentModeSettings: &deploymentModeSettings, } - deploymentClusterUri := utils.NewNstring("") - if deploymentManagerSetting["deployment_cluster_uri"] != nil { - deploymentClusterUri = utils.NewNstring(deploymentManagerSetting["deployment_cluster_uri"].(string)) - } + deploymentClusterUri := utils.NewNstring(deploymentManagerSetting["deployment_cluster_uri"].(string)) deploymentManagerSettings := ov.DeploymentManagerSettings{ DeploymentClusterUri: deploymentClusterUri, OsDeploymentSettings: &leOsDeploymentSettings, diff --git a/oneview/resource_logical_interconnect_group.go b/oneview/resource_logical_interconnect_group.go index 081462f8..bbf1b3dd 100644 --- a/oneview/resource_logical_interconnect_group.go +++ b/oneview/resource_logical_interconnect_group.go @@ -612,8 +612,9 @@ func resourceLogicalInterconnectGroup() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, }, } @@ -1526,7 +1527,7 @@ func resourceLogicalInterconnectGroupRead(d *schema.ResourceData, meta interface qualityOfService := make([]map[string]interface{}, 0, 1) qualityOfService = append(qualityOfService, map[string]interface{}{ - "type": logicalInterconnectGroup.QosConfiguration.Type, + "type": logicalInterconnectGroup.QosConfiguration.Type, "active_qos_config_type": logicalInterconnectGroup.QosConfiguration.ActiveQosConfig.Type, "config_type": logicalInterconnectGroup.QosConfiguration.ActiveQosConfig.ConfigType, "uplink_classification_type": logicalInterconnectGroup.QosConfiguration.ActiveQosConfig.UplinkClassificationType, diff --git a/oneview/resource_logical_interconnects.go b/oneview/resource_logical_interconnects.go index 025e92b8..70af4a83 100644 --- a/oneview/resource_logical_interconnects.go +++ b/oneview/resource_logical_interconnects.go @@ -12,6 +12,9 @@ package oneview import ( + "errors" + "strings" + "github.com/hashicorp/terraform/helper/schema" ) @@ -19,6 +22,7 @@ func resourceLogicalInterconnect() *schema.Resource { return &schema.Resource{ Read: resourceLogicalInterconnectRead, Update: resourceLogicalInterconnectUpdate, + Delete: resourceLogicalInterconnectDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -380,7 +384,8 @@ func resourceLogicalInterconnectUpdate(d *schema.ResourceData, meta interface{}) updateType := d.Get("update_type").(string) if updateType == "updateComplianceById" { - id := d.Id() + id := strings.Replace(d.Get("uri").(string), "/rest/logical-interconnects/", "", -1) + err := config.ovClient.UpdateLogicalInterconnectConsistentStateById(id) if err != nil { return err @@ -390,3 +395,8 @@ func resourceLogicalInterconnectUpdate(d *schema.ResourceData, meta interface{}) return resourceLogicalInterconnectRead(d, meta) } + +func resourceLogicalInterconnectDelete(d *schema.ResourceData, meta interface{}) error { + + return errors.New("No logic implemented") +} diff --git a/oneview/resource_logical_switch_group.go b/oneview/resource_logical_switch_group.go index 4208d233..4b4a47a8 100644 --- a/oneview/resource_logical_switch_group.go +++ b/oneview/resource_logical_switch_group.go @@ -86,8 +86,9 @@ func resourceLogicalSwitchGroup() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "switch_type_uri": { Type: schema.TypeString, diff --git a/oneview/resource_network_set.go b/oneview/resource_network_set.go index 8351456c..8dabbed3 100644 --- a/oneview/resource_network_set.go +++ b/oneview/resource_network_set.go @@ -80,8 +80,9 @@ func resourceNetworkSet() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "scopes_uri": { Type: schema.TypeString, @@ -95,10 +96,6 @@ func resourceNetworkSet() *schema.Resource { }, Set: schema.HashString, }, - "network_set_type": { - Type: schema.TypeString, - Optional: true, - }, }, } } @@ -128,10 +125,6 @@ func resourceNetworkSetCreate(d *schema.ResourceData, meta interface{}) error { netSet.InitialScopeUris = initialScopeUris } - if val, ok := d.GetOk("network_set_type"); ok { - netSet.NetworkSetType = val.(string) - } - netSetError := config.ovClient.CreateNetworkSet(netSet) d.SetId(d.Get("name").(string)) if netSetError != nil { @@ -178,7 +171,6 @@ func resourceNetworkSetRead(d *schema.ResourceData, meta interface{}) error { d.Set("network_uris", networkUris) d.Set("initial_scope_uris", netSet.InitialScopeUris) d.Set("scopes_uri", netSet.ScopesUri) - d.Set("network_set_type", netSet.NetworkSetType) return nil @@ -193,17 +185,13 @@ func resourceNetworkSetUpdate(d *schema.ResourceData, meta interface{}) error { netUris[i] = utils.NewNstring(raw.(string)) } newNetSet := ov.NetworkSet{ - ETAG: d.Get("eTag").(string), - URI: utils.NewNstring(d.Get("uri").(string)), - Name: d.Get("name").(string), + ETAG: d.Get("eTag").(string), + URI: utils.NewNstring(d.Get("uri").(string)), + Name: d.Get("name").(string), ConnectionTemplateUri: utils.NewNstring(d.Get("connection_template_uri").(string)), - Type: d.Get("type").(string), - NativeNetworkUri: utils.NewNstring(d.Get("native_network_uri").(string)), - NetworkUris: netUris, - } - - if val, ok := d.GetOk("network_set_type"); ok { - newNetSet.NetworkSetType = val.(string) + Type: d.Get("type").(string), + NativeNetworkUri: utils.NewNstring(d.Get("native_network_uri").(string)), + NetworkUris: netUris, } err := config.ovClient.UpdateNetworkSet(newNetSet) diff --git a/oneview/resource_server_profile.go b/oneview/resource_server_profile.go index 7f44052c..88fc404f 100644 --- a/oneview/resource_server_profile.go +++ b/oneview/resource_server_profile.go @@ -36,416 +36,6 @@ func resourceServerProfile() *schema.Resource { Type: schema.TypeString, Required: true, }, - "boot_order": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "boot_mode": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "manage_mode": { - Type: schema.TypeBool, - Required: true, - }, - "mode": { - Type: schema.TypeString, - Required: true, - }, - "pxe_boot_policy": { - Type: schema.TypeString, - Required: true, - }, - }, - }, - }, - "bios_option": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "manage_bios": { - Type: schema.TypeBool, - Required: true, - }, - "overridden_settings": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "network": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Required: true, - }, - "function_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "network_uri": { - Type: schema.TypeString, - Required: true, - }, - "port_id": { - Type: schema.TypeString, - Optional: true, - Default: "Lom 1:1-a", - }, - "requested_mbps": { - Type: schema.TypeString, - Optional: true, - Default: "2500", - }, - "id": { - Type: schema.TypeInt, - Optional: true, - }, - "boot": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "priority": { - Type: schema.TypeString, - Optional: true, - }, - "ethernet_boot_type": { - Type: schema.TypeString, - Optional: true, - }, - "boot_volume_source": { - Type: schema.TypeString, - Optional: true, - }, - "iscsi": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "chap_level": { - Type: schema.TypeString, - Optional: true, - }, - "first_boot_target_ip": { - Type: schema.TypeString, - Optional: true, - }, - "first_boot_target_port": { - Type: schema.TypeString, - Optional: true, - }, - "initiator_name_source": { - Type: schema.TypeString, - Optional: true, - }, - "second_boot_target_ip": { - Type: schema.TypeString, - Optional: true, - }, - "second_boot_target_port": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "ipv4": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "gateway": { - Type: schema.TypeString, - Optional: true, - }, - "ip_address_source": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "server_hardware_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "enclosure_group": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - "affinity": { - Type: schema.TypeString, - Optional: true, - Default: "Bay", - }, - "hide_unused_flex_nics": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "manage_connections": { - Type: schema.TypeBool, - Optional: true, - }, - "initial_scope_uris": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - }, - "serial_number_type": { - Type: schema.TypeString, - Optional: true, - Default: "Virtual", - ForceNew: true, - }, - "wwn_type": { - Type: schema.TypeString, - Optional: true, - Default: "Virtual", - ForceNew: true, - }, - "mac_type": { - Type: schema.TypeString, - Optional: true, - Default: "Virtual", - ForceNew: true, - }, - "firmware": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "force_install_firmware": { - Type: schema.TypeBool, - Optional: true, - }, - "firmware_baseline_uri": { - Type: schema.TypeString, - Optional: true, - }, - "manage_firmware": { - Type: schema.TypeBool, - Optional: true, - }, - "firmware_install_type": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "local_storage": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "manage_local_storage": { - Type: schema.TypeBool, - Optional: true, - }, - "initialize": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "logical_drives": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bootable": { - Type: schema.TypeBool, - Optional: true, - }, - "raid_level": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "san_storage": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host_os_type": { - Type: schema.TypeString, - Optional: true, - }, - "manage_san_storage": { - Type: schema.TypeBool, - Optional: true, - }, - "server_hardware_type_uri": { - Type: schema.TypeString, - Optional: true, - }, - "server_hardware_uri": { - Type: schema.TypeString, - Optional: true, - }, - "serial_number": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - }, - "uri": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - // schema for ov.SanStorage.VolumeAttachments - "volume_attachments": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeInt, - Required: true, - }, - "lun": { - Type: schema.TypeString, - Required: true, - }, - "lun_type": { - Type: schema.TypeString, - Optional: true, - }, - "boot_volume_priority": { - Type: schema.TypeString, - Optional: true, - }, - "permanent": { - Type: schema.TypeBool, - Optional: true, - }, - "volume_storage_pool_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_storage_system_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_shareable": { - Type: schema.TypeBool, - Optional: true, - }, - "volume_description": { - Type: schema.TypeString, - Optional: true, - }, - "volume_provision_type": { - Type: schema.TypeString, - Optional: true, - }, - "volume_provisioned_capacity_bytes": { - Type: schema.TypeString, - Optional: true, - }, - "volume_name": { - Type: schema.TypeString, - Optional: true, - }, - "storage_paths": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Optional: true, - }, - "storage_target_type": { - Type: schema.TypeString, - Optional: true, - }, - "target_selector": { - Type: schema.TypeString, - Optional: true, - }, - "is_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "connection_id": { - Type: schema.TypeInt, - Optional: true, - }, - "targets": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": { - Type: schema.TypeString, - Optional: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "tcp_port": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, "uri": { Type: schema.TypeString, Computed: true, @@ -492,66 +82,6 @@ func resourceServerProfile() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "power_state": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: func(v interface{}, k string) (warning []string, errors []error) { - val := v.(string) - if val != "on" && val != "off" { - errors = append(errors, fmt.Errorf("%q must be 'on' or 'off'", k)) - } - return - }, - }, - "options": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "op": { - Required: true, - Type: schema.TypeString, - }, - "path": { - Required: true, - Type: schema.TypeString, - }, - "value": { - Required: true, - Type: schema.TypeString, - }, - }, - }, - }, - "update_type": { - Type: schema.TypeString, - Optional: true, - }, - "os_deployment_settings": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "os_custom_attributes": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, }, } } @@ -576,10 +106,8 @@ func resourceServerProfileCreate(d *schema.ResourceData, meta interface{}) error serverProfile.Type = d.Get("type").(string) serverProfile.Name = d.Get("name").(string) - var serverHardware ov.ServerHardware if val, ok := d.GetOk("hardware_name"); ok { - var err error - serverHardware, err = config.ovClient.GetServerHardwareByName(val.(string)) + serverHardware, err := config.ovClient.GetServerHardwareByName(val.(string)) if err != nil { return err } @@ -589,314 +117,12 @@ func resourceServerProfileCreate(d *schema.ResourceData, meta interface{}) error serverProfile.ServerHardwareURI = serverHardware.URI } - if val, ok := d.GetOk("affinity"); ok { - serverProfile.Affinity = val.(string) - } - - if val, ok := d.GetOk("serial_number_type"); ok { - serverProfile.SerialNumberType = val.(string) - } - - if val, ok := d.GetOk("wwn_type"); ok { - serverProfile.WWNType = val.(string) - } - - if val, ok := d.GetOk("mac_type"); ok { - serverProfile.MACType = val.(string) - } - - if val, ok := d.GetOk("hide_unused_flex_nics"); ok { - serverProfile.HideUnusedFlexNics = val.(bool) - } - - if val, ok := d.GetOk("enclosure_group"); ok { - enclosureGroup, err := config.ovClient.GetEnclosureGroupByName(val.(string)) - if err != nil { - return err - } - serverProfile.EnclosureGroupURI = enclosureGroup.URI - } - - if val, ok := d.GetOk("server_hardware_type"); ok { - serverHardwareType, err := config.ovClient.GetServerHardwareTypeByName(val.(string)) - if err != nil { - return err - } - serverProfile.ServerHardwareTypeURI = serverHardwareType.URI - } - - rawNetwork := d.Get("network").(*schema.Set).List() - networks := make([]ov.Connection, 0) - for _, rawNet := range rawNetwork { - rawNetworkItem := rawNet.(map[string]interface{}) - - bootOptions := ov.BootOption{} - if rawNetworkItem["boot"] != nil { - rawBoots := rawNetworkItem["boot"].(*schema.Set).List() - for _, rawBoot := range rawBoots { - bootItem := rawBoot.(map[string]interface{}) - - iscsi := ov.BootIscsi{} - if bootItem["iscsi"] != nil { - rawIscsis := bootItem["iscsi"].(*schema.Set).List() - for _, rawIscsi := range rawIscsis { - rawIscsiItem := rawIscsi.(map[string]interface{}) - iscsi = ov.BootIscsi{ - Chaplevel: rawIscsiItem["chap_level"].(string), - FirstBootTargetIp: rawIscsiItem["first_boot_target_ip"].(string), - FirstBootTargetPort: rawIscsiItem["first_boot_target_ip"].(string), - SecondBootTargetIp: rawIscsiItem["second_boot_target_ip"].(string), - SecondBootTargetPort: rawIscsiItem["second_boot_target_port"].(string), - } - } - } - - bootOptions = ov.BootOption{ - Priority: bootItem["priority"].(string), - EthernetBootType: bootItem["ethernet_boot_type"].(string), - BootVolumeSource: bootItem["boot_volume_source"].(string), - Iscsi: &iscsi, - } - } - } - - ipv4 := ov.Ipv4Option{} - if rawNetworkItem["ipv4"] != nil { - rawIpv4s := rawNetworkItem["ipv4"].(*schema.Set).List() - for _, rawIpv4 := range rawIpv4s { - rawIpv4Item := rawIpv4.(map[string]interface{}) - ipv4 = ov.Ipv4Option{ - Gateway: rawIpv4Item["gateway"].(string), - IpAddressSource: rawIpv4Item["ip_address_source"].(string), - } - } - } - - networks = append(networks, ov.Connection{ - ID: rawNetworkItem["id"].(int), - Name: rawNetworkItem["name"].(string), - FunctionType: rawNetworkItem["function_type"].(string), - NetworkURI: utils.NewNstring(rawNetworkItem["network_uri"].(string)), - PortID: rawNetworkItem["port_id"].(string), - RequestedMbps: rawNetworkItem["requested_mbps"].(string), - Ipv4: &ipv4, - Boot: &bootOptions, - }) - } - if val, ok := d.GetOk("manage_connections"); ok { - serverProfile.ConnectionSettings.ManageConnections = val.(bool) - serverProfile.ConnectionSettings.Connections = networks - } else { - serverProfile.Connections = networks - } - - if val, ok := d.GetOk("boot_order"); ok { - rawBootOrder := val.(*schema.Set).List() - bootOrder := make([]string, len(rawBootOrder)) - for i, raw := range rawBootOrder { - bootOrder[i] = raw.(string) - } - serverProfile.Boot.ManageBoot = true - serverProfile.Boot.Order = bootOrder - rawBootMode := d.Get("boot_mode").(*schema.Set).List()[0].(map[string]interface{}) - serverProfile.BootMode = ov.BootModeOption{ - ManageMode: rawBootMode["manage_mode"].(bool), - Mode: rawBootMode["mode"].(string), - PXEBootPolicy: utils.Nstring(rawBootMode["pxe_boot_policy"].(string)), - } - - } - - if val, ok := d.GetOk("bios_option"); ok { - rawBiosOption := val.(*schema.Set).List() - biosOption := ov.BiosOption{} - for _, raw := range rawBiosOption { - rawBiosItem := raw.(map[string]interface{}) - - overriddenSettings := make([]ov.BiosSettings, 0) - rawOverriddenSetting := rawBiosItem["overridden_settings"].(*schema.Set).List() - - for _, raw2 := range rawOverriddenSetting { - rawOverriddenSettingItem := raw2.(map[string]interface{}) - overriddenSettings = append(overriddenSettings, ov.BiosSettings{ - ID: rawOverriddenSettingItem["id"].(string), - Value: rawOverriddenSettingItem["value"].(string), - }) - } - biosOption = ov.BiosOption{ - ManageBios: rawBiosItem["manage_bios"].(bool), - OverriddenSettings: overriddenSettings, - } - } - serverProfile.Bios = &biosOption - } - - if val, ok := d.GetOk("initial_scope_uris"); ok { - initialScopeUrisOrder := val.(*schema.Set).List() - initialScopeUris := make([]utils.Nstring, len(initialScopeUrisOrder)) - for i, raw := range initialScopeUrisOrder { - initialScopeUris[i] = utils.Nstring(raw.(string)) - } - serverProfile.InitialScopeUris = initialScopeUris - } - - // Get firmware details - rawFirmware := d.Get("firmware").(*schema.Set).List() - firmware := ov.FirmwareOption{} - for _, raw := range rawFirmware { - firmwareItem := raw.(map[string]interface{}) - firmware = ov.FirmwareOption{ - ForceInstallFirmware: firmwareItem["force_install_firmware"].(bool), - FirmwareBaselineUri: utils.NewNstring(firmwareItem["firmware_baseline_uri"].(string)), - ManageFirmware: firmwareItem["manage_firmware"].(bool), - FirmwareOptionv200: ov.FirmwareOptionv200{ - FirmwareInstallType: firmwareItem["firmware_install_type"].(string), - }, - } - } - serverProfile.Firmware = firmware - - // Get local storage data if provided - rawLocalStorage := d.Get("local_storage").(*schema.Set).List() - localStorage := ov.LocalStorageOptions{} - for _, raw := range rawLocalStorage { - localStorageItem := raw.(map[string]interface{}) - localStorage = ov.LocalStorageOptions{ - ManageLocalStorage: localStorageItem["manage_local_storage"].(bool), - Initialize: localStorageItem["initialize"].(bool), - } - } - serverProfile.LocalStorage = localStorage - - rawLogicalDrives := d.Get("logical_drives").(*schema.Set).List() - logicalDrives := make([]ov.LogicalDrive, 0) - for _, rawLogicalDrive := range rawLogicalDrives { - logicalDrivesItem := rawLogicalDrive.(map[string]interface{}) - logicalDrives = append(logicalDrives, ov.LogicalDrive{ - Bootable: logicalDrivesItem["bootable"].(bool), - RaidLevel: logicalDrivesItem["raid_level"].(string), - }) - } - serverProfile.LocalStorage.LogicalDrives = logicalDrives - - // get SAN storage data if provided - rawSanStorage := d.Get("san_storage").(*schema.Set).List() - sanStorage := ov.SanStorageOptions{} - for _, raw := range rawSanStorage { - sanStorageItem := raw.(map[string]interface{}) - sanStorage = ov.SanStorageOptions{ - HostOSType: sanStorageItem["host_os_type"].(string), - ManageSanStorage: sanStorageItem["manage_san_storage"].(bool), - ServerHardwareTypeURI: utils.NewNstring(sanStorageItem["server_hardware_type_uri"].(string)), - ServerHardwareURI: utils.NewNstring(sanStorageItem["server_hardware_uri"].(string)), - SerialNumber: sanStorageItem["serial_number"].(string), - Type: sanStorageItem["type"].(string), - URI: utils.NewNstring(sanStorageItem["uri"].(string)), - } - } - serverProfile.SanStorage = sanStorage - - // Get volume attachment data for san storage - rawVolumeAttachments := d.Get("volume_attachments").(*schema.Set).List() - volumeAttachments := make([]ov.VolumeAttachment, 0) - - for _, rawVolumeAttachment := range rawVolumeAttachments { - volumeAttachmentItem := rawVolumeAttachment.(map[string]interface{}) - - // get volumeAttachemts.storagepaths - storagePaths := make([]ov.StoragePath, 0) - if volumeAttachmentItem["storage_paths"] != nil { - rawStoragePaths := volumeAttachmentItem["storage_paths"].(*schema.Set).List() - - for _, rawStoragePath := range rawStoragePaths { - storagePathItem := rawStoragePath.(map[string]interface{}) - - // get volumeAttachemts.storagepaths.targets - targets := make([]ov.Target, 0) - if storagePathItem["targets"] != nil { - rawStorageTargets := storagePathItem["targets"].(*schema.Set).List() - for _, rawStorageTarget := range rawStorageTargets { - storageTargetItem := rawStorageTarget.(map[string]interface{}) - targets = append(targets, ov.Target{ - IpAddress: storageTargetItem["ip_address"].(string), - Name: storageTargetItem["name"].(string), - TcpPort: storageTargetItem["tcp_port"].(int), - }) - } - } - - storagePaths = append(storagePaths, ov.StoragePath{ - IsEnabled: storagePathItem["is_enabled"].(bool), - Status: storagePathItem["status"].(string), - ConnectionID: storagePathItem["connection_id"].(int), - StorageTargetType: storagePathItem["storage_target_type"].(string), - TargetSelector: storagePathItem["target_selector"].(string), - Targets: targets, - }) - } - } - - tempPermanent := volumeAttachmentItem["permanent"].(bool) - tempVolumeShareable := volumeAttachmentItem["volume_shareable"].(bool) - volumeAttachments = append(volumeAttachments, ov.VolumeAttachment{ - Permanent: &tempPermanent, - ID: volumeAttachmentItem["id"].(int), - LUN: volumeAttachmentItem["lun"].(string), - LUNType: volumeAttachmentItem["lun_type"].(string), - VolumeStoragePoolURI: utils.NewNstring(volumeAttachmentItem["volume_storage_pool_uri"].(string)), - VolumeURI: utils.NewNstring(volumeAttachmentItem["volume_uri"].(string)), - VolumeStorageSystemURI: utils.NewNstring(volumeAttachmentItem["volume_storage_system_uri"].(string)), - VolumeShareable: &tempVolumeShareable, - VolumeDescription: volumeAttachmentItem["volume_description"].(string), - VolumeProvisionType: volumeAttachmentItem["volume_provision_type"].(string), - VolumeProvisionedCapacityBytes: volumeAttachmentItem["volume_provisioned_capacity_bytes"].(string), - VolumeName: volumeAttachmentItem["volume_name"].(string), - StoragePaths: storagePaths, - BootVolumePriority: volumeAttachmentItem["boot_volume_priority"].(string), - }) - } - serverProfile.SanStorage.VolumeAttachments = volumeAttachments - - if val, ok := d.GetOk("os_deployment_settings"); ok { - rawOsDeploySetting := val.(*schema.Set).List() - for _, raw := range rawOsDeploySetting { - osDeploySettingItem := raw.(map[string]interface{}) - - osCustomAttributes := make([]ov.OSCustomAttribute, 0) - if osDeploySettingItem["os_custom_attributes"] != nil { - rawOsDeploySettings := osDeploySettingItem["os_custom_attributes"].(*schema.Set).List() - for _, rawDeploySetting := range rawOsDeploySettings { - rawOsDeploySetting := rawDeploySetting.(map[string]interface{}) - - osCustomAttributes = append(osCustomAttributes, ov.OSCustomAttribute{ - Name: rawOsDeploySetting["name"].(string), - Value: rawOsDeploySetting["value"].(string), - }) - } - } - - // If Name already imported from SPT, overwrite its value from SP - for _, temp1 := range osCustomAttributes { - for j, temp2 := range serverProfile.OSDeploymentSettings.OSCustomAttributes { - if temp1.Name == temp2.Name { - serverProfile.OSDeploymentSettings.OSCustomAttributes[j].Value = temp1.Value - } - } - } - - } - } - err := config.ovClient.SubmitNewProfile(serverProfile) d.SetId(d.Get("name").(string)) if err != nil { d.SetId("") return err - } else if d.Get("power_state").(string) == "on" { - if err := serverHardware.PowerOn(); err != nil { - return err - } } return resourceServerProfileRead(d, meta) @@ -905,7 +131,7 @@ func resourceServerProfileCreate(d *schema.ResourceData, meta interface{}) error func resourceServerProfileRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - serverProfile, err := config.ovClient.GetProfileByName(d.Id()) + serverProfile, err := config.ovClient.GetProfileByName(d.Get("name").(string)) if err != nil || serverProfile.URI.IsNil() { d.SetId("") return nil @@ -933,422 +159,42 @@ func resourceServerProfileRead(d *schema.ResourceData, meta interface{}) error { d.Set("type", serverProfile.Type) d.Set("uri", serverProfile.URI.String()) - enclosureGroup, err := config.ovClient.GetEnclosureGroupByUri(serverProfile.EnclosureGroupURI) - if err != nil { - return err - } - d.Set("enclosure_group", enclosureGroup.Name) - - serverHardwareType, err := config.ovClient.GetServerHardwareTypeByUri(serverProfile.ServerHardwareTypeURI) - if err != nil { - return err - } - d.Set("server_hardware_type", serverHardwareType.Name) - d.Set("affinity", serverProfile.Affinity) - d.Set("serial_number_type", serverProfile.SerialNumberType) - d.Set("wwn_type", serverProfile.WWNType) - d.Set("mac_type", serverProfile.MACType) - d.Set("hide_unused_flex_nics", serverProfile.HideUnusedFlexNics) - - var connections []ov.Connection - if len(serverProfile.ConnectionSettings.Connections) != 0 { - connections = serverProfile.ConnectionSettings.Connections - } else { - connections = serverProfile.Connections - } - if len(connections) != 0 { - networks := make([]map[string]interface{}, 0, len(connections)) - for _, rawNet := range connections { - networks = append(networks, map[string]interface{}{ - "name": rawNet.Name, - "function_type": rawNet.FunctionType, - "network_uri": rawNet.NetworkURI.String(), - "port_id": rawNet.PortID, - "requested_mbps": rawNet.RequestedMbps, - }) - } - d.Set("network", networks) - } - - if serverProfile.Boot.ManageBoot { - bootOrder := make([]interface{}, 0) - for _, currBoot := range serverProfile.Boot.Order { - rawBootOrder := d.Get("boot_order").(*schema.Set).List() - for _, raw := range rawBootOrder { - if raw == currBoot { - bootOrder = append(bootOrder, currBoot) - } - } - } - d.Set("boot_order", bootOrder) - } - return nil } func resourceServerProfileUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - updateType := d.Get("update_type").(string) - - if updateType == "patch" { - serverProfile := ov.ServerProfile{ - Name: d.Get("name").(string), - Type: d.Get("type").(string), - URI: utils.NewNstring(d.Get("uri").(string)), - } - - rawOptions := d.Get("options").(*schema.Set).List() - options := make([]ov.Options, len(rawOptions)) - for i, rawData := range rawOptions { - option := rawData.(map[string]interface{}) - options[i] = ov.Options{ - Op: option["op"].(string), - Path: option["path"].(string), - Value: option["value"].(string)} - } - - error := config.ovClient.PatchServerProfile(serverProfile, options) - d.SetId(d.Get("name").(string)) - if error != nil { - d.SetId("") - return error - } + serverProfile := ov.ServerProfile{ + Type: d.Get("type").(string), + Name: d.Get("name").(string), + URI: utils.NewNstring(d.Get("uri").(string)), } - if updateType == "put" { - serverProfile := ov.ServerProfile{ - Type: d.Get("type").(string), - Name: d.Get("name").(string), - URI: utils.NewNstring(d.Get("uri").(string)), - } - - var serverHardware ov.ServerHardware - if val, ok := d.GetOk("hardware_name"); ok { - var err error - serverHardware, err = config.ovClient.GetServerHardwareByName(val.(string)) - if err != nil { - return err - } - if !strings.EqualFold(serverHardware.PowerState, "off") { - return fmt.Errorf("Server Hardware must be powered off to assign to server profile") - } - serverProfile.ServerHardwareURI = serverHardware.URI - } - - if val, ok := d.GetOk("template"); ok { - serverProfileTemplate, err := config.ovClient.GetProfileTemplateByName(val.(string)) - if err != nil || serverProfileTemplate.URI.IsNil() { - return err - } - serverProfile.ServerProfileTemplateURI = serverProfileTemplate.URI - } - - if val, ok := d.GetOk("affinity"); ok { - serverProfile.Affinity = val.(string) - } - - if val, ok := d.GetOk("serial_number_type"); ok { - serverProfile.SerialNumberType = val.(string) - } - - if val, ok := d.GetOk("wwn_type"); ok { - serverProfile.WWNType = val.(string) - } - - if val, ok := d.GetOk("mac_type"); ok { - serverProfile.MACType = val.(string) - } - - if val, ok := d.GetOk("hide_unused_flex_nics"); ok { - serverProfile.HideUnusedFlexNics = val.(bool) - } - - if val, ok := d.GetOk("enclosure_group"); ok { - enclosureGroup, err := config.ovClient.GetEnclosureGroupByName(val.(string)) - if err != nil { - return err - } - serverProfile.EnclosureGroupURI = enclosureGroup.URI - } - - if val, ok := d.GetOk("server_hardware_type"); ok { - serverHardwareType, err := config.ovClient.GetServerHardwareTypeByName(val.(string)) - if err != nil { - return err - } - serverProfile.ServerHardwareTypeURI = serverHardwareType.URI - } - - rawNetwork := d.Get("network").(*schema.Set).List() - networks := make([]ov.Connection, 0) - for _, rawNet := range rawNetwork { - rawNetworkItem := rawNet.(map[string]interface{}) - - bootOptions := ov.BootOption{} - if rawNetworkItem["boot"] != nil { - rawBoots := rawNetworkItem["boot"].(*schema.Set).List() - for _, rawBoot := range rawBoots { - bootItem := rawBoot.(map[string]interface{}) - - iscsi := ov.BootIscsi{} - if bootItem["iscsi"] != nil { - rawIscsis := bootItem["iscsi"].(*schema.Set).List() - for _, rawIscsi := range rawIscsis { - rawIscsiItem := rawIscsi.(map[string]interface{}) - iscsi = ov.BootIscsi{ - Chaplevel: rawIscsiItem["chap_level"].(string), - FirstBootTargetIp: rawIscsiItem["first_boot_target_ip"].(string), - FirstBootTargetPort: rawIscsiItem["first_boot_target_ip"].(string), - SecondBootTargetIp: rawIscsiItem["second_boot_target_ip"].(string), - SecondBootTargetPort: rawIscsiItem["second_boot_target_port"].(string), - } - } - } - - bootOptions = ov.BootOption{ - Priority: bootItem["priority"].(string), - EthernetBootType: bootItem["ethernet_boot_type"].(string), - BootVolumeSource: bootItem["boot_volume_source"].(string), - Iscsi: &iscsi, - } - } - } - - ipv4 := ov.Ipv4Option{} - if rawNetworkItem["ipv4"] != nil { - rawIpv4s := rawNetworkItem["ipv4"].(*schema.Set).List() - for _, rawIpv4 := range rawIpv4s { - rawIpv4Item := rawIpv4.(map[string]interface{}) - ipv4 = ov.Ipv4Option{ - Gateway: rawIpv4Item["gateway"].(string), - IpAddressSource: rawIpv4Item["ip_address_source"].(string), - } - } - } - - networks = append(networks, ov.Connection{ - ID: rawNetworkItem["id"].(int), - Name: rawNetworkItem["name"].(string), - FunctionType: rawNetworkItem["function_type"].(string), - NetworkURI: utils.NewNstring(rawNetworkItem["network_uri"].(string)), - PortID: rawNetworkItem["port_id"].(string), - RequestedMbps: rawNetworkItem["requested_mbps"].(string), - Ipv4: &ipv4, - Boot: &bootOptions, - }) - } - if val, ok := d.GetOk("manage_connections"); ok { - serverProfile.ConnectionSettings.ManageConnections = val.(bool) - serverProfile.ConnectionSettings.Connections = networks - } else { - serverProfile.Connections = networks - } - - if val, ok := d.GetOk("boot_order"); ok { - rawBootOrder := val.(*schema.Set).List() - bootOrder := make([]string, len(rawBootOrder)) - for i, raw := range rawBootOrder { - bootOrder[i] = raw.(string) - } - serverProfile.Boot.ManageBoot = true - serverProfile.Boot.Order = bootOrder - rawBootMode := d.Get("boot_mode").(*schema.Set).List()[0].(map[string]interface{}) - serverProfile.BootMode = ov.BootModeOption{ - ManageMode: rawBootMode["manage_mode"].(bool), - Mode: rawBootMode["mode"].(string), - PXEBootPolicy: utils.Nstring(rawBootMode["pxe_boot_policy"].(string)), - } - - } - - if val, ok := d.GetOk("bios_option"); ok { - rawBiosOption := val.(*schema.Set).List() - biosOption := ov.BiosOption{} - for _, raw := range rawBiosOption { - rawBiosItem := raw.(map[string]interface{}) - - overriddenSettings := make([]ov.BiosSettings, 0) - rawOverriddenSetting := rawBiosItem["overridden_settings"].(*schema.Set).List() - - for _, raw2 := range rawOverriddenSetting { - rawOverriddenSettingItem := raw2.(map[string]interface{}) - overriddenSettings = append(overriddenSettings, ov.BiosSettings{ - ID: rawOverriddenSettingItem["id"].(string), - Value: rawOverriddenSettingItem["value"].(string), - }) - } - biosOption = ov.BiosOption{ - ManageBios: rawBiosItem["manage_bios"].(bool), - OverriddenSettings: overriddenSettings, - } - } - serverProfile.Bios = &biosOption - } - - if val, ok := d.GetOk("initial_scope_uris"); ok { - initialScopeUrisOrder := val.(*schema.Set).List() - initialScopeUris := make([]utils.Nstring, len(initialScopeUrisOrder)) - for i, raw := range initialScopeUrisOrder { - initialScopeUris[i] = utils.Nstring(raw.(string)) - } - serverProfile.InitialScopeUris = initialScopeUris - } - - // Get firmware details - rawFirmware := d.Get("firmware").(*schema.Set).List() - firmware := ov.FirmwareOption{} - for _, raw := range rawFirmware { - firmwareItem := raw.(map[string]interface{}) - firmware = ov.FirmwareOption{ - ForceInstallFirmware: firmwareItem["force_install_firmware"].(bool), - FirmwareBaselineUri: utils.NewNstring(firmwareItem["firmware_baseline_uri"].(string)), - ManageFirmware: firmwareItem["manage_firmware"].(bool), - FirmwareOptionv200: ov.FirmwareOptionv200{ - FirmwareInstallType: firmwareItem["firmware_install_type"].(string), - }, - } - } - serverProfile.Firmware = firmware - - // Get local storage data if provided - rawLocalStorage := d.Get("local_storage").(*schema.Set).List() - localStorage := ov.LocalStorageOptions{} - for _, raw := range rawLocalStorage { - localStorageItem := raw.(map[string]interface{}) - localStorage = ov.LocalStorageOptions{ - ManageLocalStorage: localStorageItem["manage_local_storage"].(bool), - Initialize: localStorageItem["initialize"].(bool), - } - } - serverProfile.LocalStorage = localStorage - - rawLogicalDrives := d.Get("logical_drives").(*schema.Set).List() - logicalDrives := make([]ov.LogicalDrive, 0) - for _, rawLogicalDrive := range rawLogicalDrives { - logicalDrivesItem := rawLogicalDrive.(map[string]interface{}) - logicalDrives = append(logicalDrives, ov.LogicalDrive{ - Bootable: logicalDrivesItem["bootable"].(bool), - RaidLevel: logicalDrivesItem["raid_level"].(string), - }) - } - serverProfile.LocalStorage.LogicalDrives = logicalDrives - - // get SAN storage data if provided - rawSanStorage := d.Get("san_storage").(*schema.Set).List() - sanStorage := ov.SanStorageOptions{} - for _, raw := range rawSanStorage { - sanStorageItem := raw.(map[string]interface{}) - sanStorage = ov.SanStorageOptions{ - HostOSType: sanStorageItem["host_os_type"].(string), - ManageSanStorage: sanStorageItem["manage_san_storage"].(bool), - ServerHardwareTypeURI: utils.NewNstring(sanStorageItem["server_hardware_type_uri"].(string)), - ServerHardwareURI: utils.NewNstring(sanStorageItem["server_hardware_uri"].(string)), - SerialNumber: sanStorageItem["serial_number"].(string), - Type: sanStorageItem["type"].(string), - URI: utils.NewNstring(sanStorageItem["uri"].(string)), - } - } - serverProfile.SanStorage = sanStorage - - // Get volume attachment data for san storage - rawVolumeAttachments := d.Get("volume_attachments").(*schema.Set).List() - volumeAttachments := make([]ov.VolumeAttachment, 0) - - for _, rawVolumeAttachment := range rawVolumeAttachments { - volumeAttachmentItem := rawVolumeAttachment.(map[string]interface{}) - - // get volumeAttachemts.storagepaths - storagePaths := make([]ov.StoragePath, 0) - if volumeAttachmentItem["storage_paths"] != nil { - rawStoragePaths := volumeAttachmentItem["storage_paths"].(*schema.Set).List() - - for _, rawStoragePath := range rawStoragePaths { - storagePathItem := rawStoragePath.(map[string]interface{}) - - // get volumeAttachemts.storagepaths.targets - targets := make([]ov.Target, 0) - if storagePathItem["targets"] != nil { - rawStorageTargets := storagePathItem["targets"].(*schema.Set).List() - for _, rawStorageTarget := range rawStorageTargets { - storageTargetItem := rawStorageTarget.(map[string]interface{}) - targets = append(targets, ov.Target{ - IpAddress: storageTargetItem["ip_address"].(string), - Name: storageTargetItem["name"].(string), - TcpPort: storageTargetItem["tcp_port"].(int), - }) - } - } - - storagePaths = append(storagePaths, ov.StoragePath{ - IsEnabled: storagePathItem["is_enabled"].(bool), - Status: storagePathItem["status"].(string), - ConnectionID: storagePathItem["connection_id"].(int), - StorageTargetType: storagePathItem["storage_target_type"].(string), - TargetSelector: storagePathItem["target_selector"].(string), - Targets: targets, - }) - } - } - - tempPermanent := volumeAttachmentItem["permanent"].(bool) - tempVolumeShareable := volumeAttachmentItem["volume_shareable"].(bool) - volumeAttachments = append(volumeAttachments, ov.VolumeAttachment{ - Permanent: &tempPermanent, - ID: volumeAttachmentItem["id"].(int), - LUN: volumeAttachmentItem["lun"].(string), - LUNType: volumeAttachmentItem["lun_type"].(string), - VolumeStoragePoolURI: utils.NewNstring(volumeAttachmentItem["volume_storage_pool_uri"].(string)), - VolumeURI: utils.NewNstring(volumeAttachmentItem["volume_uri"].(string)), - VolumeStorageSystemURI: utils.NewNstring(volumeAttachmentItem["volume_storage_system_uri"].(string)), - VolumeShareable: &tempVolumeShareable, - VolumeDescription: volumeAttachmentItem["volume_description"].(string), - VolumeProvisionType: volumeAttachmentItem["volume_provision_type"].(string), - VolumeProvisionedCapacityBytes: volumeAttachmentItem["volume_provisioned_capacity_bytes"].(string), - VolumeName: volumeAttachmentItem["volume_name"].(string), - StoragePaths: storagePaths, - BootVolumePriority: volumeAttachmentItem["boot_volume_priority"].(string), - }) + if val, ok := d.GetOk("hardware_name"); ok { + serverHardware, err := config.ovClient.GetServerHardwareByName(val.(string)) + if err != nil { + return err } - serverProfile.SanStorage.VolumeAttachments = volumeAttachments - - if val, ok := d.GetOk("os_deployment_settings"); ok { - rawOsDeploySetting := val.(*schema.Set).List() - for _, raw := range rawOsDeploySetting { - osDeploySettingItem := raw.(map[string]interface{}) - - osCustomAttributes := make([]ov.OSCustomAttribute, 0) - if osDeploySettingItem["os_custom_attributes"] != nil { - rawOsDeploySettings := osDeploySettingItem["os_custom_attributes"].(*schema.Set).List() - for _, rawDeploySetting := range rawOsDeploySettings { - rawOsDeploySetting := rawDeploySetting.(map[string]interface{}) - - osCustomAttributes = append(osCustomAttributes, ov.OSCustomAttribute{ - Name: rawOsDeploySetting["name"].(string), - Value: rawOsDeploySetting["value"].(string), - }) - } - } - - // If Name already imported from SPT, overwrite its value from SP - for _, temp1 := range osCustomAttributes { - for j, temp2 := range serverProfile.OSDeploymentSettings.OSCustomAttributes { - if temp1.Name == temp2.Name { - serverProfile.OSDeploymentSettings.OSCustomAttributes[j].Value = temp1.Value - } - } - } - - } + if !strings.EqualFold(serverHardware.PowerState, "off") { + return fmt.Errorf("Server Hardware must be powered off to assign to server profile") } + serverProfile.ServerHardwareURI = serverHardware.URI + } - err := config.ovClient.UpdateServerProfile(serverProfile) - if err != nil { + if val, ok := d.GetOk("template"); ok { + serverProfileTemplate, err := config.ovClient.GetProfileTemplateByName(val.(string)) + if err != nil || serverProfileTemplate.URI.IsNil() { return err } - d.SetId(d.Get("name").(string)) + serverProfile.ServerProfileTemplateURI = serverProfileTemplate.URI + } + err := config.ovClient.UpdateServerProfile(serverProfile) + if err != nil { + return err } + d.SetId(d.Get("name").(string)) return resourceServerProfileRead(d, meta) @@ -1377,7 +223,7 @@ func getServerHardware(config *Config, serverProfileTemplate ov.ServerProfile, f f = append(f, filters...) - if hwlist, err = config.ovClient.GetServerHardwareList(f, "name:desc", "", "", ""); err != nil { + if hwlist, err = config.ovClient.GetServerHardwareList(f, "name:desc"); err != nil { if _, ok := err.(*json.SyntaxError); ok && len(filters) > 0 { return hw, fmt.Errorf("%s. It's likely your hw_filter(s) are incorrectly formatted", err) } diff --git a/oneview/resource_server_profile_template.go b/oneview/resource_server_profile_template.go index 644443a0..7f0c376f 100644 --- a/oneview/resource_server_profile_template.go +++ b/oneview/resource_server_profile_template.go @@ -12,7 +12,6 @@ package oneview import ( - "fmt" "github.com/HewlettPackard/oneview-golang/ov" "github.com/HewlettPackard/oneview-golang/utils" "github.com/hashicorp/terraform/helper/schema" @@ -59,34 +58,6 @@ func resourceServerProfileTemplate() *schema.Resource { }, }, }, - "bios_option": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "manage_bios": { - Type: schema.TypeBool, - Required: true, - }, - "overridden_settings": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, "network": { Optional: true, Type: schema.TypeSet, @@ -117,76 +88,7 @@ func resourceServerProfileTemplate() *schema.Resource { }, "id": { Type: schema.TypeInt, - Optional: true, - }, - "boot": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "priority": { - Type: schema.TypeString, - Optional: true, - }, - "ethernet_boot_type": { - Type: schema.TypeString, - Optional: true, - }, - "boot_volume_source": { - Type: schema.TypeString, - Optional: true, - }, - "iscsi": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "chap_level": { - Type: schema.TypeString, - Optional: true, - }, - "first_boot_target_ip": { - Type: schema.TypeString, - Optional: true, - }, - "first_boot_target_port": { - Type: schema.TypeString, - Optional: true, - }, - "initiator_name_source": { - Type: schema.TypeString, - Optional: true, - }, - "second_boot_target_ip": { - Type: schema.TypeString, - Optional: true, - }, - "second_boot_target_port": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "ipv4": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "gateway": { - Type: schema.TypeString, - Optional: true, - }, - "ip_address_source": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, + Computed: true, }, }, }, @@ -252,243 +154,6 @@ func resourceServerProfileTemplate() *schema.Resource { Default: "Virtual", ForceNew: true, }, - "firmware": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "force_install_firmware": { - Type: schema.TypeBool, - Optional: true, - }, - "firmware_baseline_uri": { - Type: schema.TypeString, - Optional: true, - }, - "manage_firmware": { - Type: schema.TypeBool, - Optional: true, - }, - "firmware_install_type": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "local_storage": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "manage_local_storage": { - Type: schema.TypeBool, - Optional: true, - }, - "initialize": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "logical_drives": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "bootable": { - Type: schema.TypeBool, - Optional: true, - }, - "raid_level": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - "san_storage": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host_os_type": { - Type: schema.TypeString, - Optional: true, - }, - "manage_san_storage": { - Type: schema.TypeBool, - Optional: true, - }, - "server_hardware_type_uri": { - Type: schema.TypeString, - Optional: true, - }, - "server_hardware_uri": { - Type: schema.TypeString, - Optional: true, - }, - "serial_number": { - Type: schema.TypeString, - Optional: true, - }, - "type": { - Type: schema.TypeString, - Optional: true, - }, - "uri": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - // schema for ov.SanStorage.VolumeAttachments - "volume_attachments": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeInt, - Required: true, - }, - "lun": { - Type: schema.TypeString, - Required: true, - }, - "lun_type": { - Type: schema.TypeString, - Optional: true, - }, - "boot_volume_priority": { - Type: schema.TypeString, - Optional: true, - }, - "permanent": { - Type: schema.TypeBool, - Optional: true, - }, - "volume_storage_pool_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_storage_system_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_uri": { - Type: schema.TypeString, - Optional: true, - }, - "volume_shareable": { - Type: schema.TypeBool, - Optional: true, - }, - "volume_description": { - Type: schema.TypeString, - Optional: true, - }, - "volume_provision_type": { - Type: schema.TypeString, - Optional: true, - }, - "volume_provisioned_capacity_bytes": { - Type: schema.TypeString, - Optional: true, - }, - "volume_name": { - Type: schema.TypeString, - Optional: true, - }, - "storage_paths": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "status": { - Type: schema.TypeString, - Optional: true, - }, - "storage_target_type": { - Type: schema.TypeString, - Optional: true, - }, - "target_selector": { - Type: schema.TypeString, - Optional: true, - }, - "is_enabled": { - Type: schema.TypeBool, - Optional: true, - }, - "connection_id": { - Type: schema.TypeInt, - Optional: true, - }, - "targets": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_address": { - Type: schema.TypeString, - Optional: true, - }, - "name": { - Type: schema.TypeString, - Optional: true, - }, - "tcp_port": { - Type: schema.TypeInt, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "os_deployment_settings": { - Optional: true, - Type: schema.TypeSet, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "os_deployment_plan_name": { - Type: schema.TypeString, - Optional: true, - }, - "os_volume_uri": { - Type: schema.TypeString, - Optional: true, - }, - "os_custom_attributes": { - Optional: true, - Type: schema.TypeSet, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Type: schema.TypeString, - Optional: true, - }, - "value": { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - }, - }, - }, }, } } @@ -522,58 +187,12 @@ func resourceServerProfileTemplateCreate(d *schema.ResourceData, meta interface{ networks := make([]ov.Connection, 0) for _, rawNet := range rawNetwork { rawNetworkItem := rawNet.(map[string]interface{}) - - bootOptions := ov.BootOption{} - if rawNetworkItem["boot"] != nil { - rawBoots := rawNetworkItem["boot"].(*schema.Set).List() - for _, rawBoot := range rawBoots { - bootItem := rawBoot.(map[string]interface{}) - - iscsi := ov.BootIscsi{} - if bootItem["iscsi"] != nil { - rawIscsis := bootItem["iscsi"].(*schema.Set).List() - for _, rawIscsi := range rawIscsis { - rawIscsiItem := rawIscsi.(map[string]interface{}) - iscsi = ov.BootIscsi{ - Chaplevel: rawIscsiItem["chap_level"].(string), - FirstBootTargetIp: rawIscsiItem["first_boot_target_ip"].(string), - FirstBootTargetPort: rawIscsiItem["first_boot_target_ip"].(string), - SecondBootTargetIp: rawIscsiItem["second_boot_target_ip"].(string), - SecondBootTargetPort: rawIscsiItem["second_boot_target_port"].(string), - } - } - } - - bootOptions = ov.BootOption{ - Priority: bootItem["priority"].(string), - EthernetBootType: bootItem["ethernet_boot_type"].(string), - BootVolumeSource: bootItem["boot_volume_source"].(string), - Iscsi: &iscsi, - } - } - } - - ipv4 := ov.Ipv4Option{} - if rawNetworkItem["ipv4"] != nil { - rawIpv4s := rawNetworkItem["ipv4"].(*schema.Set).List() - for _, rawIpv4 := range rawIpv4s { - rawIpv4Item := rawIpv4.(map[string]interface{}) - ipv4 = ov.Ipv4Option{ - Gateway: rawIpv4Item["gateway"].(string), - IpAddressSource: rawIpv4Item["ip_address_source"].(string), - } - } - } - networks = append(networks, ov.Connection{ - ID: rawNetworkItem["id"].(int), Name: rawNetworkItem["name"].(string), FunctionType: rawNetworkItem["function_type"].(string), NetworkURI: utils.NewNstring(rawNetworkItem["network_uri"].(string)), PortID: rawNetworkItem["port_id"].(string), RequestedMbps: rawNetworkItem["requested_mbps"].(string), - Ipv4: &ipv4, - Boot: &bootOptions, }) } if val, ok := d.GetOk("manage_connections"); ok { @@ -600,30 +219,6 @@ func resourceServerProfileTemplateCreate(d *schema.ResourceData, meta interface{ } - if val, ok := d.GetOk("bios_option"); ok { - rawBiosOption := val.(*schema.Set).List() - biosOption := ov.BiosOption{} - for _, raw := range rawBiosOption { - rawBiosItem := raw.(map[string]interface{}) - - overriddenSettings := make([]ov.BiosSettings, 0) - rawOverriddenSetting := rawBiosItem["overridden_settings"].(*schema.Set).List() - - for _, raw2 := range rawOverriddenSetting { - rawOverriddenSettingItem := raw2.(map[string]interface{}) - overriddenSettings = append(overriddenSettings, ov.BiosSettings{ - ID: rawOverriddenSettingItem["id"].(string), - Value: rawOverriddenSettingItem["value"].(string), - }) - } - biosOption = ov.BiosOption{ - ManageBios: rawBiosItem["manage_bios"].(bool), - OverriddenSettings: overriddenSettings, - } - } - serverProfileTemplate.Bios = &biosOption - } - if val, ok := d.GetOk("initial_scope_uris"); ok { initialScopeUrisOrder := val.(*schema.Set).List() initialScopeUris := make([]utils.Nstring, len(initialScopeUrisOrder)) @@ -633,156 +228,6 @@ func resourceServerProfileTemplateCreate(d *schema.ResourceData, meta interface{ serverProfileTemplate.InitialScopeUris = initialScopeUris } - // Get firmware details - rawFirmware := d.Get("firmware").(*schema.Set).List() - firmware := ov.FirmwareOption{} - for _, raw := range rawFirmware { - firmwareItem := raw.(map[string]interface{}) - firmware = ov.FirmwareOption{ - ForceInstallFirmware: firmwareItem["force_install_firmware"].(bool), - FirmwareBaselineUri: utils.NewNstring(firmwareItem["firmware_baseline_uri"].(string)), - ManageFirmware: firmwareItem["manage_firmware"].(bool), - FirmwareOptionv200: ov.FirmwareOptionv200{ - FirmwareInstallType: firmwareItem["firmware_install_type"].(string), - }, - } - } - serverProfileTemplate.Firmware = firmware - - // Get local storage data if provided - rawLocalStorage := d.Get("local_storage").(*schema.Set).List() - localStorage := ov.LocalStorageOptions{} - for _, raw := range rawLocalStorage { - localStorageItem := raw.(map[string]interface{}) - localStorage = ov.LocalStorageOptions{ - ManageLocalStorage: localStorageItem["manage_local_storage"].(bool), - Initialize: localStorageItem["initialize"].(bool), - } - } - serverProfileTemplate.LocalStorage = localStorage - - rawLogicalDrives := d.Get("logical_drives").(*schema.Set).List() - logicalDrives := make([]ov.LogicalDrive, 0) - for _, rawLogicalDrive := range rawLogicalDrives { - logicalDrivesItem := rawLogicalDrive.(map[string]interface{}) - logicalDrives = append(logicalDrives, ov.LogicalDrive{ - Bootable: logicalDrivesItem["bootable"].(bool), - RaidLevel: logicalDrivesItem["raid_level"].(string), - }) - } - serverProfileTemplate.LocalStorage.LogicalDrives = logicalDrives - - // get SAN storage data if provided - rawSanStorage := d.Get("san_storage").(*schema.Set).List() - sanStorage := ov.SanStorageOptions{} - for _, raw := range rawSanStorage { - sanStorageItem := raw.(map[string]interface{}) - sanStorage = ov.SanStorageOptions{ - HostOSType: sanStorageItem["host_os_type"].(string), - ManageSanStorage: sanStorageItem["manage_san_storage"].(bool), - ServerHardwareTypeURI: utils.NewNstring(sanStorageItem["server_hardware_type_uri"].(string)), - ServerHardwareURI: utils.NewNstring(sanStorageItem["server_hardware_uri"].(string)), - SerialNumber: sanStorageItem["serial_number"].(string), - Type: sanStorageItem["type"].(string), - URI: utils.NewNstring(sanStorageItem["uri"].(string)), - } - } - serverProfileTemplate.SanStorage = sanStorage - - // Get volume attachment data for san storage - rawVolumeAttachments := d.Get("volume_attachments").(*schema.Set).List() - volumeAttachments := make([]ov.VolumeAttachment, 0) - - for _, rawVolumeAttachment := range rawVolumeAttachments { - volumeAttachmentItem := rawVolumeAttachment.(map[string]interface{}) - - // get volumeAttachemts.storagepaths - storagePaths := make([]ov.StoragePath, 0) - if volumeAttachmentItem["storage_paths"] != nil { - rawStoragePaths := volumeAttachmentItem["storage_paths"].(*schema.Set).List() - - for _, rawStoragePath := range rawStoragePaths { - storagePathItem := rawStoragePath.(map[string]interface{}) - - // get volumeAttachemts.storagepaths.targets - targets := make([]ov.Target, 0) - if storagePathItem["targets"] != nil { - rawStorageTargets := storagePathItem["targets"].(*schema.Set).List() - for _, rawStorageTarget := range rawStorageTargets { - storageTargetItem := rawStorageTarget.(map[string]interface{}) - targets = append(targets, ov.Target{ - IpAddress: storageTargetItem["ip_address"].(string), - Name: storageTargetItem["name"].(string), - TcpPort: storageTargetItem["tcp_port"].(int), - }) - } - } - - storagePaths = append(storagePaths, ov.StoragePath{ - IsEnabled: storagePathItem["is_enabled"].(bool), - Status: storagePathItem["status"].(string), - ConnectionID: storagePathItem["connection_id"].(int), - StorageTargetType: storagePathItem["storage_target_type"].(string), - TargetSelector: storagePathItem["target_selector"].(string), - Targets: targets, - }) - } - } - - tempPermanent := volumeAttachmentItem["permanent"].(bool) - tempVolumeShareable := volumeAttachmentItem["volume_shareable"].(bool) - volumeAttachments = append(volumeAttachments, ov.VolumeAttachment{ - Permanent: &tempPermanent, - ID: volumeAttachmentItem["id"].(int), - LUN: volumeAttachmentItem["lun"].(string), - LUNType: volumeAttachmentItem["lun_type"].(string), - VolumeStoragePoolURI: utils.NewNstring(volumeAttachmentItem["volume_storage_pool_uri"].(string)), - VolumeURI: utils.NewNstring(volumeAttachmentItem["volume_uri"].(string)), - VolumeStorageSystemURI: utils.NewNstring(volumeAttachmentItem["volume_storage_system_uri"].(string)), - VolumeShareable: &tempVolumeShareable, - VolumeDescription: volumeAttachmentItem["volume_description"].(string), - VolumeProvisionType: volumeAttachmentItem["volume_provision_type"].(string), - VolumeProvisionedCapacityBytes: volumeAttachmentItem["volume_provisioned_capacity_bytes"].(string), - VolumeName: volumeAttachmentItem["volume_name"].(string), - StoragePaths: storagePaths, - BootVolumePriority: volumeAttachmentItem["boot_volume_priority"].(string), - }) - } - serverProfileTemplate.SanStorage.VolumeAttachments = volumeAttachments - - rawOsDeploySetting := d.Get("os_deployment_settings").(*schema.Set).List() - osDeploySetting := ov.OSDeploymentSettings{} - for _, raw := range rawOsDeploySetting { - osDeploySettingItem := raw.(map[string]interface{}) - osDeploymentPlan, err := config.ovClient.GetOSDeploymentPlanByName(osDeploySettingItem["os_deployment_plan_name"].(string)) - if err != nil { - return err - } - if osDeploymentPlan.URI == "" { - return fmt.Errorf("Could not find deployment plan by name: %s", osDeploySettingItem["os_deployment_plan_name"].(string)) - } - - osCustomAttributes := make([]ov.OSCustomAttribute, 0) - if osDeploySettingItem["os_custom_attributes"] != nil { - rawOsDeploySettings := osDeploySettingItem["os_custom_attributes"].(*schema.Set).List() - for _, rawDeploySetting := range rawOsDeploySettings { - rawOsDeploySetting := rawDeploySetting.(map[string]interface{}) - osCustomAttributes = append(osCustomAttributes, ov.OSCustomAttribute{ - Name: rawOsDeploySetting["name"].(string), - Value: rawOsDeploySetting["value"].(string), - }) - } - } - - osDeploySetting = ov.OSDeploymentSettings{ - OSDeploymentPlanUri: osDeploymentPlan.URI, - OSVolumeUri: utils.NewNstring(osDeploySettingItem["os_volume_uri"].(string)), - OSCustomAttributes: osCustomAttributes, - } - } - - serverProfileTemplate.OSDeploymentSettings = &osDeploySetting - sptError := config.ovClient.CreateProfileTemplate(serverProfileTemplate) d.SetId(d.Get("name").(string)) if sptError != nil { @@ -889,60 +334,15 @@ func resourceServerProfileTemplateUpdate(d *schema.ResourceData, meta interface{ networks := make([]ov.Connection, 0) for _, rawNet := range rawNetwork { rawNetworkItem := rawNet.(map[string]interface{}) - - bootOptions := ov.BootOption{} - if rawNetworkItem["boot"] != nil { - rawBoots := rawNetworkItem["boot"].(*schema.Set).List() - for _, rawBoot := range rawBoots { - bootItem := rawBoot.(map[string]interface{}) - - iscsi := ov.BootIscsi{} - if bootItem["iscsi"] != nil { - rawIscsis := bootItem["iscsi"].(*schema.Set).List() - for _, rawIscsi := range rawIscsis { - rawIscsiItem := rawIscsi.(map[string]interface{}) - iscsi = ov.BootIscsi{ - Chaplevel: rawIscsiItem["chap_level"].(string), - FirstBootTargetIp: rawIscsiItem["first_boot_target_ip"].(string), - FirstBootTargetPort: rawIscsiItem["first_boot_target_ip"].(string), - SecondBootTargetIp: rawIscsiItem["second_boot_target_ip"].(string), - SecondBootTargetPort: rawIscsiItem["second_boot_target_port"].(string), - } - } - } - - bootOptions = ov.BootOption{ - Priority: bootItem["priority"].(string), - EthernetBootType: bootItem["ethernet_boot_type"].(string), - BootVolumeSource: bootItem["boot_volume_source"].(string), - Iscsi: &iscsi, - } - } - } - - ipv4 := ov.Ipv4Option{} - if rawNetworkItem["ipv4"] != nil { - rawIpv4s := rawNetworkItem["ipv4"].(*schema.Set).List() - for _, rawIpv4 := range rawIpv4s { - rawIpv4Item := rawIpv4.(map[string]interface{}) - ipv4 = ov.Ipv4Option{ - Gateway: rawIpv4Item["gateway"].(string), - IpAddressSource: rawIpv4Item["ip_address_source"].(string), - } - } - } - networks = append(networks, ov.Connection{ - ID: rawNetworkItem["id"].(int), Name: rawNetworkItem["name"].(string), FunctionType: rawNetworkItem["function_type"].(string), NetworkURI: utils.NewNstring(rawNetworkItem["network_uri"].(string)), PortID: rawNetworkItem["port_id"].(string), RequestedMbps: rawNetworkItem["requested_mbps"].(string), - Ipv4: &ipv4, - Boot: &bootOptions, }) } + serverProfileTemplate.Connections = networks if val, ok := d.GetOk("boot_order"); ok { rawBootOrder := val.(*schema.Set).List() @@ -954,155 +354,6 @@ func resourceServerProfileTemplateUpdate(d *schema.ResourceData, meta interface{ serverProfileTemplate.Boot.Order = bootOrder } - rawFirmware := d.Get("firmware").(*schema.Set).List() - firmware := ov.FirmwareOption{} - for _, raw := range rawFirmware { - firmwareItem := raw.(map[string]interface{}) - firmware = ov.FirmwareOption{ - ForceInstallFirmware: firmwareItem["force_install_firmware"].(bool), - FirmwareBaselineUri: utils.NewNstring(firmwareItem["firmware_baseline_uri"].(string)), - ManageFirmware: firmwareItem["manage_firmware"].(bool), - FirmwareOptionv200: ov.FirmwareOptionv200{ - FirmwareInstallType: firmwareItem["firmware_install_type"].(string), - }, - } - } - serverProfileTemplate.Firmware = firmware - - // Get local storage data if provided - rawLocalStorage := d.Get("local_storage").(*schema.Set).List() - localStorage := ov.LocalStorageOptions{} - for _, raw := range rawLocalStorage { - localStorageItem := raw.(map[string]interface{}) - localStorage = ov.LocalStorageOptions{ - ManageLocalStorage: localStorageItem["manage_local_storage"].(bool), - Initialize: localStorageItem["initialize"].(bool), - } - } - serverProfileTemplate.LocalStorage = localStorage - - rawLogicalDrives := d.Get("logical_drives").(*schema.Set).List() - logicalDrives := make([]ov.LogicalDrive, 0) - for _, rawLogicalDrive := range rawLogicalDrives { - logicalDrivesItem := rawLogicalDrive.(map[string]interface{}) - logicalDrives = append(logicalDrives, ov.LogicalDrive{ - Bootable: logicalDrivesItem["bootable"].(bool), - RaidLevel: logicalDrivesItem["raid_level"].(string), - }) - } - serverProfileTemplate.LocalStorage.LogicalDrives = logicalDrives - - // get SAN storage data if provided - rawSanStorage := d.Get("san_storage").(*schema.Set).List() - sanStorage := ov.SanStorageOptions{} - for _, raw := range rawSanStorage { - sanStorageItem := raw.(map[string]interface{}) - sanStorage = ov.SanStorageOptions{ - HostOSType: sanStorageItem["host_os_type"].(string), - ManageSanStorage: sanStorageItem["manage_san_storage"].(bool), - ServerHardwareTypeURI: utils.NewNstring(sanStorageItem["server_hardware_type_uri"].(string)), - ServerHardwareURI: utils.NewNstring(sanStorageItem["server_hardware_uri"].(string)), - SerialNumber: sanStorageItem["serial_number"].(string), - Type: sanStorageItem["type"].(string), - URI: utils.NewNstring(sanStorageItem["uri"].(string)), - } - } - serverProfileTemplate.SanStorage = sanStorage - - // Get volume attachment data for san storage - rawVolumeAttachments := d.Get("volume_attachments").(*schema.Set).List() - volumeAttachments := make([]ov.VolumeAttachment, 0) - - for _, rawVolumeAttachment := range rawVolumeAttachments { - volumeAttachmentItem := rawVolumeAttachment.(map[string]interface{}) - - // get volumeAttachemts.storagepaths - storagePaths := make([]ov.StoragePath, 0) - if volumeAttachmentItem["storage_paths"] != nil { - rawStoragePaths := volumeAttachmentItem["storage_paths"].(*schema.Set).List() - - for _, rawStoragePath := range rawStoragePaths { - storagePathItem := rawStoragePath.(map[string]interface{}) - - // get volumeAttachemts.storagepaths.targets - targets := make([]ov.Target, 0) - if storagePathItem["targets"] != nil { - rawStorageTargets := storagePathItem["targets"].(*schema.Set).List() - for _, rawStorageTarget := range rawStorageTargets { - storageTargetItem := rawStorageTarget.(map[string]interface{}) - targets = append(targets, ov.Target{ - IpAddress: storageTargetItem["ip_address"].(string), - Name: storageTargetItem["name"].(string), - TcpPort: storageTargetItem["tcp_port"].(int), - }) - } - } - - storagePaths = append(storagePaths, ov.StoragePath{ - IsEnabled: storagePathItem["is_enabled"].(bool), - Status: storagePathItem["status"].(string), - ConnectionID: storagePathItem["connection_id"].(int), - StorageTargetType: storagePathItem["storage_target_type"].(string), - TargetSelector: storagePathItem["target_selector"].(string), - Targets: targets, - }) - } - } - - tempPermanent := volumeAttachmentItem["permanent"].(bool) - tempVolumeShareable := volumeAttachmentItem["volume_shareable"].(bool) - volumeAttachments = append(volumeAttachments, ov.VolumeAttachment{ - Permanent: &tempPermanent, - ID: volumeAttachmentItem["id"].(int), - LUN: volumeAttachmentItem["lun"].(string), - LUNType: volumeAttachmentItem["lun_type"].(string), - VolumeStoragePoolURI: utils.NewNstring(volumeAttachmentItem["volume_storage_pool_uri"].(string)), - VolumeURI: utils.NewNstring(volumeAttachmentItem["volume_uri"].(string)), - VolumeStorageSystemURI: utils.NewNstring(volumeAttachmentItem["volume_storage_system_uri"].(string)), - VolumeShareable: &tempVolumeShareable, - VolumeDescription: volumeAttachmentItem["volume_description"].(string), - VolumeProvisionType: volumeAttachmentItem["volume_provision_type"].(string), - VolumeProvisionedCapacityBytes: volumeAttachmentItem["volume_provisioned_capacity_bytes"].(string), - VolumeName: volumeAttachmentItem["volume_name"].(string), - StoragePaths: storagePaths, - BootVolumePriority: volumeAttachmentItem["boot_volume_priority"].(string), - }) - } - serverProfileTemplate.SanStorage.VolumeAttachments = volumeAttachments - - rawOsDeploySetting := d.Get("os_deployment_settings").(*schema.Set).List() - osDeploySetting := ov.OSDeploymentSettings{} - for _, raw := range rawOsDeploySetting { - osDeploySettingItem := raw.(map[string]interface{}) - osDeploymentPlan, err := config.ovClient.GetOSDeploymentPlanByName(osDeploySettingItem["os_deployment_plan_name"].(string)) - if err != nil { - return err - } - if osDeploymentPlan.URI == "" { - return fmt.Errorf("Could not find deployment plan by name: %s", osDeploySettingItem["os_deployment_plan_name"].(string)) - } - - osCustomAttributes := make([]ov.OSCustomAttribute, 0) - if osDeploySettingItem["os_custom_attributes"] != nil { - rawOsDeploySettings := osDeploySettingItem["os_custom_attributes"].(*schema.Set).List() - for _, rawDeploySetting := range rawOsDeploySettings { - rawOsDeploySetting := rawDeploySetting.(map[string]interface{}) - osCustomAttributes = append(osCustomAttributes, ov.OSCustomAttribute{ - Name: rawOsDeploySetting["name"].(string), - Value: rawOsDeploySetting["value"].(string), - }) - } - } - - osDeploySetting = ov.OSDeploymentSettings{ - OSDeploymentPlanUri: osDeploymentPlan.URI, - OSVolumeUri: utils.NewNstring(osDeploySettingItem["os_volume_uri"].(string)), - OSCustomAttributes: osCustomAttributes, - } - } - - serverProfileTemplate.OSDeploymentSettings = &osDeploySetting - err = config.ovClient.UpdateProfileTemplate(serverProfileTemplate) if err != nil { return err diff --git a/oneview/resource_storage_system.go b/oneview/resource_storage_system.go index 36b94957..e28495d4 100644 --- a/oneview/resource_storage_system.go +++ b/oneview/resource_storage_system.go @@ -13,6 +13,7 @@ package oneview import ( "fmt" + "github.com/HewlettPackard/oneview-golang/ov" "github.com/HewlettPackard/oneview-golang/utils" "github.com/hashicorp/terraform/helper/schema" @@ -79,8 +80,9 @@ func resourceStorageSystem() *schema.Resource { Computed: true, }, "eTag": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + Deprecated: "Warning: Current value structure is deprecated", }, "type": { Type: schema.TypeString, @@ -185,7 +187,7 @@ func resourceStorageSystem() *schema.Resource { func resourceStorageSystemCreate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - storageSystem := ov.StorageSystem{ + storageSystem := ov.StorageSystemV4{ Hostname: d.Get("hostname").(string), Username: d.Get("username").(string), Password: d.Get("password").(string), @@ -280,7 +282,7 @@ func resourceStorageSystemRead(d *schema.ResourceData, meta interface{}) error { func resourceStorageSystemUpdate(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - storageSystem := ov.StorageSystem{ + storageSystem := ov.StorageSystemV4{ Hostname: d.Get("hostname").(string), URI: utils.NewNstring(d.Get("uri").(string)), Name: d.Get("name").(string), diff --git a/oneview/resource_storage_system_test.go b/oneview/resource_storage_system_test.go index aa7af451..07a1bc75 100644 --- a/oneview/resource_storage_system_test.go +++ b/oneview/resource_storage_system_test.go @@ -20,7 +20,7 @@ import ( ) func TestAccStorageSystem_1(t *testing.T) { - var storageSystem ov.StorageSystem + var storageSystem ov.StorageSystemV4 resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -56,7 +56,7 @@ func TestAccStorageSystem_1(t *testing.T) { }) } -func testAccCheckStorageSystemExists(n string, storageSystem *ov.StorageSystem) resource.TestCheckFunc { +func testAccCheckStorageSystemExists(n string, storageSystem *ov.StorageSystemV4) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/oneview/resource_storage_volume.go b/oneview/resource_storage_volume.go new file mode 100644 index 00000000..37d1e438 --- /dev/null +++ b/oneview/resource_storage_volume.go @@ -0,0 +1,210 @@ +package oneview + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceStorageVolume() *schema.Resource { + return &schema.Resource{ + Read: dataSourceStorageVolumeRead, + Schema: map[string]*schema.Schema{ + "category": { + Type: schema.TypeString, + Optional: true, + }, + "eTag": { + Type: schema.TypeString, + Optional: true, + }, + "created": { + Type: schema.TypeString, + Optional: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "state": { + Type: schema.TypeString, + Optional: true, + }, + "type": { + Type: schema.TypeString, + Optional: true, + }, + "uri": { + Type: schema.TypeString, + Optional: true, + }, + "device_volume_name": { + Type: schema.TypeString, + Optional: true, + }, + "requesting_refresh": { + Type: schema.TypeBool, + Optional: true, + }, + "allocated_capacity": { + Type: schema.TypeString, + Optional: true, + }, + "initial_scopes": { + Type: schema.TypeString, + Required: true, + }, + "device_specific_attributes": { + Computed: true, + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "transport": { + Type: schema.TypeString, + Optional: true, + }, + "iqn": { + Type: schema.TypeString, + Optional: true, + }, + "number_of_replicas": { + Type: schema.TypeString, + Optional: true, + }, + "data_protection_level": { + Type: schema.TypeString, + Required: true, + }, + "id": { + Type: schema.TypeInt, + Computed: true, + }, + "uri": { + Type: schema.TypeString, + Computed: true, + }, + "copy_state": { + Type: schema.TypeString, + Computed: true, + }, + "snapshot_pool_uri": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "volume_template_uri": { + Type: schema.TypeString, + Optional: true, + }, + "is_shareable": { + Type: schema.TypeBool, + Optional: true, + }, + "storage_pool_uri": { + Type: schema.TypeString, + Optional: true, + }, + "storage_system_uri": { + Type: schema.TypeString, + Optional: true, + }, + "provisioned_capacity": { + Type: schema.TypeString, + Optional: true, + }, + "properties": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "storage_pool": { + Type: schema.TypeString, + Required: true, + }, + "size": { + Type: schema.TypeInt, + Required: true, + }, + "provisioning_type": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "template_uri": { + Type: schema.TypeString, + Required: true, + }, + "is_permanent": { + Type: schema.TypeBool, + Required: true, + }, + "provisioning_type_for_update": { + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceStorageVolumeRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + storageVolume, err := config.ovClient.GetStorageVolumeByName(string(d.Id())) + if err != nil { + d.SetId("") + return nil + } + + d.SetId(d.Id()) + d.Set("category", storageVolume.Category) + d.Set("eTag", storageVolume.ETAG) + d.Set("created", storageVolume.Created) + d.Set("description", storageVolume.Description) + d.Set("name", storageVolume.Name) + d.Set("state", storageVolume.State) + d.Set("status", storageVolume.Status) + d.Set("type", storageVolume.Type) + d.Set("uri", storageVolume.URI) + d.Set("deviceVolumeName", storageVolume.DeviceVolumeName) + d.Set("requestingRefresh", storageVolume.RequestingRefresh) + d.Set("allocatedCapacity", storageVolume.AllocatedCapacity) + d.Set("initialScopeUris", storageVolume.InitialScopeUris) + deviceSpecificAttributes := make([]map[string]interface{}, 0) + deviceSpecificAttributes = append(deviceSpecificAttributes, map[string]interface{}{ + "transport": storageVolume.DeviceSpecificAttributes.Transport, + "iqn": storageVolume.DeviceSpecificAttributes.Iqn, + "numberOfReplicas": storageVolume.DeviceSpecificAttributes.NumberOfReplicas, + "data_protection_Level": storageVolume.DeviceSpecificAttributes.DataProtectionLevel, + "id": storageVolume.DeviceSpecificAttributes.Id, + "uri": storageVolume.DeviceSpecificAttributes.Uri, + "copyState": storageVolume.DeviceSpecificAttributes.CopyState, + "snapshotPoolUri": storageVolume.DeviceSpecificAttributes.SnapshotPoolUri}) + d.Set("deviceSpecificAttributes", deviceSpecificAttributes) + d.Set("volumeTemplateUri", storageVolume.VolumeTemplateUri) + d.Set("isSheareable", storageVolume.IsShareable) + d.Set("storagePoolUri", storageVolume.StoragePoolUri) + d.Set("storageSystemUri", storageVolume.StorageSystemUri) + d.Set("provisionCapacity", storageVolume.ProvisionedCapacity) + properties := make([]map[string]interface{}, 0) + properties = append(properties, map[string]interface{}{ + "name": storageVolume.Properties.Name, + "storagePool": storageVolume.Properties.Storagepool, + "size": storageVolume.Properties.Size, + "provisioningType": storageVolume.Properties.ProvisioningType}) + d.Set("properties", properties) + d.Set("templateUri", storageVolume.TemplateURI) + d.Set("isPermanent", storageVolume.IsPermanent) + d.Set("provisioningType", storageVolume.ProvisioningTypeForUpdate) + + return nil +} diff --git a/oneview/resource_uplink_set.go b/oneview/resource_uplink_set.go index 3eaa82ab..d4ce5147 100644 --- a/oneview/resource_uplink_set.go +++ b/oneview/resource_uplink_set.go @@ -83,8 +83,9 @@ func resourceUplinkSet() *schema.Resource { Optional: true, }, "eTag": { - Type: schema.TypeString, - Computed: true, + Type: schema.TypeString, + Computed: true, + Deprecated: "Warning: Current value structure is deprecated", }, "lacptimer": { Type: schema.TypeString, @@ -154,12 +155,12 @@ func resourceUplinkSetCreate(d *schema.ResourceData, meta interface{}) error { const Port = "Port" uplinkSet := ov.UplinkSet{ - Name: d.Get("name").(string), - LogicalInterconnectURI: utils.NewNstring(d.Get("logical_interconnect_uri").(string)), - ConnectionMode: d.Get("connection_mode").(string), - NetworkType: d.Get("network_type").(string), - EthernetNetworkType: d.Get("ethernet_network_type").(string), - Type: d.Get("type").(string), + Name: d.Get("name").(string), + LogicalInterconnectURI: utils.NewNstring(d.Get("logical_interconnect_uri").(string)), + ConnectionMode: d.Get("connection_mode").(string), + NetworkType: d.Get("network_type").(string), + EthernetNetworkType: d.Get("ethernet_network_type").(string), + Type: d.Get("type").(string), ManualLoginRedistributionState: d.Get("manual_login_redistribution_state").(string), } @@ -287,13 +288,13 @@ func resourceUplinkSetUpdate(d *schema.ResourceData, meta interface{}) error { const Port = "Port" uplinkSet := ov.UplinkSet{ - Name: d.Get("name").(string), - LogicalInterconnectURI: utils.NewNstring(d.Get("logical_interconnect_uri").(string)), - ConnectionMode: d.Get("connection_mode").(string), - NetworkType: d.Get("network_type").(string), - EthernetNetworkType: d.Get("ethernet_network_type").(string), - Type: d.Get("type").(string), - URI: utils.NewNstring(d.Get("uri").(string)), + Name: d.Get("name").(string), + LogicalInterconnectURI: utils.NewNstring(d.Get("logical_interconnect_uri").(string)), + ConnectionMode: d.Get("connection_mode").(string), + NetworkType: d.Get("network_type").(string), + EthernetNetworkType: d.Get("ethernet_network_type").(string), + Type: d.Get("type").(string), + URI: utils.NewNstring(d.Get("uri").(string)), ManualLoginRedistributionState: d.Get("manual_login_redistribution_state").(string), } diff --git a/terraform-oneview.exe b/terraform-oneview.exe new file mode 100644 index 00000000..65a2f576 Binary files /dev/null and b/terraform-oneview.exe differ diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 00000000..125b7033 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,513 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/googleapis/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := defaultClient.hc.Do(req.WithContext(ctx)) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + u := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", u, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + if res.StatusCode != 200 { + return "", "", &Error{Code: res.StatusCode, Message: string(all)} + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// Error contains an error response from the server. +type Error struct { + // Code is the HTTP response status code. + Code int + // Message is the server response message. + Message string +} + +func (e *Error) Error() string { + return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message) +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 00000000..5232cb67 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,315 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "context" + "fmt" + "time" + + gax "github.com/googleapis/gax-go/v2" + pb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// client abstracts the IAMPolicy API to allow multiple implementations. +type client interface { + Get(ctx context.Context, resource string) (*pb.Policy, error) + Set(ctx context.Context, resource string, p *pb.Policy) error + Test(ctx context.Context, resource string, perms []string) ([]string, error) +} + +// grpcClient implements client for the standard gRPC-based IAMPolicy service. +type grpcClient struct { + c pb.IAMPolicyClient +} + +var withRetry = gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60 * time.Second, + Multiplier: 1.3, + }) +}) + +func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) { + var proto *pb.Policy + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource}) + return err + }, withRetry) + if err != nil { + return nil, err + } + return proto, nil +} + +func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + _, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: resource, + Policy: p, + }) + return err + }, withRetry) +} + +func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) { + var res *pb.TestIamPermissionsResponse + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource)) + ctx = insertMetadata(ctx, md) + + err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error { + var err error + res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: resource, + Permissions: perms, + }) + return err + }, withRetry) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A Handle provides IAM operations for a resource. +type Handle struct { + c client + resource string +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { + return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource) +} + +// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// grpc service that implements IAM as a mixin +func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle { + return InternalNewHandleClient(&grpcClient{c: c}, resource) +} + +// InternalNewHandleClient is for use by the Google Cloud Libraries only. +// +// InternalNewHandleClient returns a Handle for resource using the given +// client implementation. +func InternalNewHandleClient(c client, resource string) *Handle { + return &Handle{ + c: c, + resource: resource, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.Get(ctx, h.resource) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + return h.c.Set(ctx, h.resource, policy.InternalProto) +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + return h.c.Test(ctx, h.resource, permissions) +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + bi := p.bindingIndex(r) + if bi < 0 { + return + } + bindings := p.InternalProto.Bindings + b := bindings[bi] + mi := memberIndex(member, b) + if mi < 0 { + return + } + // Order doesn't matter for bindings or members, so to remove, move the last item + // into the removed spot and shrink the slice. + if len(b.Members) == 1 { + // Remove binding. + last := len(bindings) - 1 + bindings[bi] = bindings[last] + bindings[last] = nil + p.InternalProto.Bindings = bindings[:last] + return + } + // Remove member. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[mi] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + i := p.bindingIndex(r) + if i < 0 { + return nil + } + return p.InternalProto.Bindings[i] +} + +func (p *Policy) bindingIndex(r RoleName) int { + if p.InternalProto == nil { + return -1 + } + for i, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return i + } + } + return -1 +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} + +// insertMetadata inserts metadata into the given context +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} diff --git a/vendor/cloud.google.com/go/internal/annotate.go b/vendor/cloud.google.com/go/internal/annotate.go new file mode 100644 index 00000000..6435695b --- /dev/null +++ b/vendor/cloud.google.com/go/internal/annotate.go @@ -0,0 +1,54 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + + "google.golang.org/api/googleapi" + "google.golang.org/grpc/status" +) + +// Annotate prepends msg to the error message in err, attempting +// to preserve other information in err, like an error code. +// +// Annotate panics if err is nil. +// +// Annotate knows about these error types: +// - "google.golang.org/grpc/status".Status +// - "google.golang.org/api/googleapi".Error +// If the error is not one of these types, Annotate behaves +// like +// fmt.Errorf("%s: %v", msg, err) +func Annotate(err error, msg string) error { + if err == nil { + panic("Annotate called with nil") + } + if s, ok := status.FromError(err); ok { + p := s.Proto() + p.Message = msg + ": " + p.Message + return status.ErrorProto(p) + } + if g, ok := err.(*googleapi.Error); ok { + g.Message = msg + ": " + g.Message + return g + } + return fmt.Errorf("%s: %v", msg, err) +} + +// Annotatef uses format and args to format a string, then calls Annotate. +func Annotatef(err error, format string, args ...interface{}) error { + return Annotate(err, fmt.Sprintf(format, args...)) +} diff --git a/vendor/cloud.google.com/go/internal/optional/optional.go b/vendor/cloud.google.com/go/internal/optional/optional.go new file mode 100644 index 00000000..72780f76 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/optional/optional.go @@ -0,0 +1,108 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package optional provides versions of primitive types that can +// be nil. These are useful in methods that update some of an API object's +// fields. +package optional + +import ( + "fmt" + "strings" + "time" +) + +type ( + // Bool is either a bool or nil. + Bool interface{} + + // String is either a string or nil. + String interface{} + + // Int is either an int or nil. + Int interface{} + + // Uint is either a uint or nil. + Uint interface{} + + // Float64 is either a float64 or nil. + Float64 interface{} + + // Duration is either a time.Duration or nil. + Duration interface{} +) + +// ToBool returns its argument as a bool. +// It panics if its argument is nil or not a bool. +func ToBool(v Bool) bool { + x, ok := v.(bool) + if !ok { + doPanic("Bool", v) + } + return x +} + +// ToString returns its argument as a string. +// It panics if its argument is nil or not a string. +func ToString(v String) string { + x, ok := v.(string) + if !ok { + doPanic("String", v) + } + return x +} + +// ToInt returns its argument as an int. +// It panics if its argument is nil or not an int. +func ToInt(v Int) int { + x, ok := v.(int) + if !ok { + doPanic("Int", v) + } + return x +} + +// ToUint returns its argument as a uint. +// It panics if its argument is nil or not a uint. +func ToUint(v Uint) uint { + x, ok := v.(uint) + if !ok { + doPanic("Uint", v) + } + return x +} + +// ToFloat64 returns its argument as a float64. +// It panics if its argument is nil or not a float64. +func ToFloat64(v Float64) float64 { + x, ok := v.(float64) + if !ok { + doPanic("Float64", v) + } + return x +} + +// ToDuration returns its argument as a time.Duration. +// It panics if its argument is nil or not a time.Duration. +func ToDuration(v Duration) time.Duration { + x, ok := v.(time.Duration) + if !ok { + doPanic("Duration", v) + } + return x +} + +func doPanic(capType string, v interface{}) { + panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) +} diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 00000000..7a7b4c20 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,54 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "context" + "time" + + gax "github.com/googleapis/gax-go/v2" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with an error that +// includes both ctx.Error() and the last error returned by f. +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return Annotatef(lastErr, "retry failed with %v; last error", cerr) + } + return cerr + } + } +} diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go new file mode 100644 index 00000000..66dc3911 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/trace/trace.go @@ -0,0 +1,109 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + "fmt" + + "go.opencensus.io/trace" + "google.golang.org/api/googleapi" + "google.golang.org/genproto/googleapis/rpc/code" + "google.golang.org/grpc/status" +) + +// StartSpan adds a span to the trace with the given name. +func StartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name) + return ctx +} + +// EndSpan ends a span with the given error. +func EndSpan(ctx context.Context, err error) { + span := trace.FromContext(ctx) + if err != nil { + span.SetStatus(toStatus(err)) + } + span.End() +} + +// toStatus interrogates an error and converts it to an appropriate +// OpenCensus status. +func toStatus(err error) trace.Status { + if err2, ok := err.(*googleapi.Error); ok { + return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message} + } else if s, ok := status.FromError(err); ok { + return trace.Status{Code: int32(s.Code()), Message: s.Message()} + } else { + return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()} + } +} + +// TODO(deklerk): switch to using OpenCensus function when it becomes available. +// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto +func httpStatusCodeToOCCode(httpStatusCode int) int32 { + switch httpStatusCode { + case 200: + return int32(code.Code_OK) + case 499: + return int32(code.Code_CANCELLED) + case 500: + return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS + case 400: + return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE + case 504: + return int32(code.Code_DEADLINE_EXCEEDED) + case 404: + return int32(code.Code_NOT_FOUND) + case 409: + return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED + case 403: + return int32(code.Code_PERMISSION_DENIED) + case 401: + return int32(code.Code_UNAUTHENTICATED) + case 429: + return int32(code.Code_RESOURCE_EXHAUSTED) + case 501: + return int32(code.Code_UNIMPLEMENTED) + case 503: + return int32(code.Code_UNAVAILABLE) + default: + return int32(code.Code_UNKNOWN) + } +} + +// TODO: (odeke-em): perhaps just pass around spans due to the cost +// incurred from using trace.FromContext(ctx) yet we could avoid +// throwing away the work done by ctx, span := trace.StartSpan. +func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) { + var attrs []trace.Attribute + for k, v := range attrMap { + var a trace.Attribute + switch v := v.(type) { + case string: + a = trace.StringAttribute(k, v) + case bool: + a = trace.BoolAttribute(k, v) + case int: + a = trace.Int64Attribute(k, int64(v)) + case int64: + a = trace.Int64Attribute(k, v) + default: + a = trace.StringAttribute(k, fmt.Sprintf("%#v", v)) + } + attrs = append(attrs, a) + } + trace.FromContext(ctx).Annotatef(attrs, format, args...) +} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh new file mode 100644 index 00000000..fecf1f03 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/update_version.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +today=$(date +%Y%m%d) + +sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE + diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 00000000..4a2a8c19 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20180226" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return !strings.ContainsRune("0123456789.", r) +} diff --git a/vendor/cloud.google.com/go/storage/README.md b/vendor/cloud.google.com/go/storage/README.md new file mode 100644 index 00000000..a2253c4b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/README.md @@ -0,0 +1,32 @@ +## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) + +- [About Cloud Storage](https://cloud.google.com/storage/) +- [API documentation](https://cloud.google.com/storage/docs) +- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) + +### Example Usage + +First create a `storage.Client` to use throughout your application: + +[snip]:# (storage-1) +```go +client, err := storage.NewClient(ctx) +if err != nil { + log.Fatal(err) +} +``` + +[snip]:# (storage-2) +```go +// Read the object1 from bucket. +rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) +if err != nil { + log.Fatal(err) +} +defer rc.Close() +body, err := ioutil.ReadAll(rc) +if err != nil { + log.Fatal(err) +} +``` \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 00000000..7855d110 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,335 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "net/http" + "reflect" + + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a +// Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + EntityID string + Role ACLRole + Domain string + Email string + ProjectTeam *ProjectTeam +} + +// ProjectTeam is the project team associated with the entity, if any. +type ProjectTeam struct { + ProjectNumber string + Team string +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool + userProject string // for requester-pays buckets +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the role for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectSet(ctx, entity, role, false) + } + if a.isDefault { + return a.objectSet(ctx, entity, role, true) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List") + defer func() { trace.EndSpan(ctx, err) }() + + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.List(a.bucket) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.BucketAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.List(a.bucket) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toBucketACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl) + a.configureCall(ctx, req) + _, err := req.Do() + return err + }) + if err != nil { + return err + } + return nil +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object) + a.configureCall(ctx, req) + acls, err = req.Do() + return err + }) + if err != nil { + return nil, err + } + return toObjectACLRules(acls.Items), nil +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error { + type setRequest interface { + Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error) + Header() http.Header + } + + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + var req setRequest + if isBucketDefault { + req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl) + } else { + req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl) + } + a.configureCall(ctx, req) + return runWithRetry(ctx, func() error { + _, err := req.Do() + return err + }) +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + return runWithRetry(ctx, func() error { + req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)) + a.configureCall(ctx, req) + return req.Do() + }) +} + +func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) { + vc := reflect.ValueOf(call) + vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)}) + if a.userProject != "" { + vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)}) + } + setClientHeader(call.Header()) +} + +func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toObjectACLRule(item)) + } + return rs +} + +func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule { + var rs []ACLRule + for _, item := range items { + rs = append(rs, toBucketACLRule(item)) + } + return rs +} + +func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.Entity), + EntityID: a.EntityId, + Role: ACLRole(a.Role), + Domain: a.Domain, + Email: a.Email, + ProjectTeam: toObjectProjectTeam(a.ProjectTeam), + } +} + +func toBucketACLRule(a *raw.BucketAccessControl) ACLRule { + return ACLRule{ + Entity: ACLEntity(a.Entity), + EntityID: a.EntityId, + Role: ACLRole(a.Role), + Domain: a.Domain, + Email: a.Email, + ProjectTeam: toBucketProjectTeam(a.ProjectTeam), + } +} + +func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*raw.ObjectAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary + } + return r +} + +func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl { + if len(rules) == 0 { + return nil + } + r := make([]*raw.BucketAccessControl, 0, len(rules)) + for _, rule := range rules { + r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary + } + return r +} + +func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl { + return &raw.BucketAccessControl{ + Bucket: bucket, + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl { + return &raw.ObjectAccessControl{ + Bucket: bucket, + Entity: string(r.Entity), + Role: string(r.Role), + // The other fields are not settable. + } +} + +func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.ProjectNumber, + Team: p.Team, + } +} + +func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam { + if p == nil { + return nil + } + return &ProjectTeam{ + ProjectNumber: p.ProjectNumber, + Team: p.Team, + } +} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 00000000..bbfc59b2 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,1186 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "fmt" + "net/http" + "reflect" + "time" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + c *Client + name string + acl ACLHandle + defaultObjectACL ACLHandle + conds *BucketConditions + userProject string // project for Requester Pays buckets +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + return &BucketHandle{ + c: c, + name: name, + acl: ACLHandle{ + c: c, + bucket: name, + }, + defaultObjectACL: ACLHandle{ + c: c, + bucket: name, + isDefault: true, + }, + } +} + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + // If there is lifecycle information but no location, explicitly set + // the location. This is a GCS quirk/bug. + if bkt.Location == "" && bkt.Lifecycle != nil { + bkt.Location = "US" + } + req := b.c.raw.Buckets.Insert(projectID, bkt) + setClientHeader(req.Header()) + if attrs != nil && attrs.PredefinedACL != "" { + req.PredefinedAcl(attrs.PredefinedACL) + } + if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) + } + return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newDeleteCall() + if err != nil { + return err + } + return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) +} + +func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) { + req := b.c.raw.Buckets.Delete(b.name) + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (b *BucketHandle) ACL() *ACLHandle { + return &b.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (b *BucketHandle) DefaultObjectACL() *ACLHandle { + return &b.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + userProject: b.userProject, + }, + gen: -1, + userProject: b.userProject, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newGetCall() + if err != nil { + return nil, err + } + var resp *raw.Bucket + err = runWithRetry(ctx, func() error { + resp, err = req.Context(ctx).Do() + return err + }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp) +} + +func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) { + req := b.c.raw.Buckets.Get(b.name).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// Update updates a bucket's attributes. +func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create") + defer func() { trace.EndSpan(ctx, err) }() + + req, err := b.newPatchCall(&uattrs) + if err != nil { + return nil, err + } + if uattrs.PredefinedACL != "" { + req.PredefinedAcl(uattrs.PredefinedACL) + } + if uattrs.PredefinedDefaultObjectACL != "" { + req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL) + } + // TODO(jba): retry iff metagen is set? + rb, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + return newBucket(rb) +} + +func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) { + rb := uattrs.toRawBucket() + req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full") + setClientHeader(req.Header()) + if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil { + return nil, err + } + if b.userProject != "" { + req.UserProject(b.userProject) + } + return req, nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +// Read-only fields are ignored by BucketHandle.Create. +type BucketAttrs struct { + // Name is the name of the bucket. + // This field is read-only. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // BucketPolicyOnly configures access checks to use only bucket-level IAM + // policies. + BucketPolicyOnly BucketPolicyOnly + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // DefaultEventBasedHold is the default value for event-based hold on + // newly created objects in this bucket. It defaults to false. + DefaultEventBasedHold bool + + // If not empty, applies a predefined set of access controls. It should be set + // only when creating a bucket. + // It is always empty for BucketAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + // for valid values. + PredefinedACL string + + // If not empty, applies a predefined set of default object access controls. + // It should be set only when creating a bucket. + // It is always empty for BucketAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + // for valid values. + PredefinedDefaultObjectACL string + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + // This field is read-only. + MetaGeneration int64 + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "MULTI_REGIONAL", + // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which + // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on + // the bucket's location settings. + StorageClass string + + // Created is the creation time of the bucket. + // This field is read-only. + Created time.Time + + // VersioningEnabled reports whether this bucket has versioning enabled. + VersioningEnabled bool + + // Labels are the bucket's labels. + Labels map[string]string + + // RequesterPays reports whether the bucket is a Requester Pays bucket. + // Clients performing operations on Requester Pays buckets must provide + // a user project (see BucketHandle.UserProject), which will be billed + // for the operations. + RequesterPays bool + + // Lifecycle is the lifecycle configuration for objects in the bucket. + Lifecycle Lifecycle + + // Retention policy enforces a minimum retention time for all objects + // contained in the bucket. A RetentionPolicy of nil implies the bucket + // has no minimum data retention. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // The bucket's Cross-Origin Resource Sharing (CORS) configuration. + CORS []CORS + + // The encryption configuration used by default for newly inserted objects. + Encryption *BucketEncryption + + // The logging configuration. + Logging *BucketLogging + + // The website configuration. + Website *BucketWebsite + + // Etag is the HTTP/1.1 Entity tag for the bucket. + // This field is read-only. + Etag string +} + +// BucketPolicyOnly configures access checks to use only bucket-level IAM +// policies. +type BucketPolicyOnly struct { + // Enabled specifies whether access checks use only bucket-level IAM + // policies. Enabled may be disabled until the locked time. + Enabled bool + // LockedTime specifies the deadline for changing Enabled from true to + // false. + LockedTime time.Time +} + +// Lifecycle is the lifecycle configuration for objects in the bucket. +type Lifecycle struct { + Rules []LifecycleRule +} + +// RetentionPolicy enforces a minimum retention time for all objects +// contained in the bucket. +// +// Any attempt to overwrite or delete objects younger than the retention +// period will result in an error. An unlocked retention policy can be +// modified or removed from the bucket via the Update method. A +// locked retention policy cannot be removed or shortened in duration +// for the lifetime of the bucket. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +type RetentionPolicy struct { + // RetentionPeriod specifies the duration that objects need to be + // retained. Retention duration must be greater than zero and less than + // 100 years. Note that enforcement of retention periods less than a day + // is not guaranteed. Such periods should only be used for testing + // purposes. + RetentionPeriod time.Duration + + // EffectiveTime is the time from which the policy was enforced and + // effective. This field is read-only. + EffectiveTime time.Time + + // IsLocked describes whether the bucket is locked. Once locked, an + // object retention policy cannot be modified. + // This field is read-only. + IsLocked bool +} + +const ( + // RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule. + rfc3339Date = "2006-01-02" + + // DeleteAction is a lifecycle action that deletes a live and/or archived + // objects. Takes precedence over SetStorageClass actions. + DeleteAction = "Delete" + + // SetStorageClassAction changes the storage class of live and/or archived + // objects. + SetStorageClassAction = "SetStorageClass" +) + +// LifecycleRule is a lifecycle configuration rule. +// +// When all the configured conditions are met by an object in the bucket, the +// configured action will automatically be taken on that object. +type LifecycleRule struct { + // Action is the action to take when all of the associated conditions are + // met. + Action LifecycleAction + + // Condition is the set of conditions that must be met for the associated + // action to be taken. + Condition LifecycleCondition +} + +// LifecycleAction is a lifecycle configuration action. +type LifecycleAction struct { + // Type is the type of action to take on matching objects. + // + // Acceptable values are "Delete" to delete matching objects and + // "SetStorageClass" to set the storage class defined in StorageClass on + // matching objects. + Type string + + // StorageClass is the storage class to set on matching objects if the Action + // is "SetStorageClass". + StorageClass string +} + +// Liveness specifies whether the object is live or not. +type Liveness int + +const ( + // LiveAndArchived includes both live and archived objects. + LiveAndArchived Liveness = iota + // Live specifies that the object is still live. + Live + // Archived specifies that the object is archived. + Archived +) + +// LifecycleCondition is a set of conditions used to match objects and take an +// action automatically. +// +// All configured conditions must be met for the associated action to be taken. +type LifecycleCondition struct { + // AgeInDays is the age of the object in days. + AgeInDays int64 + + // CreatedBefore is the time the object was created. + // + // This condition is satisfied when an object is created before midnight of + // the specified date in UTC. + CreatedBefore time.Time + + // Liveness specifies the object's liveness. Relevant only for versioned objects + Liveness Liveness + + // MatchesStorageClasses is the condition matching the object's storage + // class. + // + // Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", + // "STANDARD", and "DURABLE_REDUCED_AVAILABILITY". + MatchesStorageClasses []string + + // NumNewerVersions is the condition matching objects with a number of newer versions. + // + // If the value is N, this condition is satisfied when there are at least N + // versions (including the live version) newer than this version of the + // object. + NumNewerVersions int64 +} + +// BucketLogging holds the bucket's logging configuration, which defines the +// destination bucket and optional name prefix for the current bucket's +// logs. +type BucketLogging struct { + // The destination bucket where the current bucket's logs + // should be placed. + LogBucket string + + // A prefix for log object names. + LogObjectPrefix string +} + +// BucketWebsite holds the bucket's website configuration, controlling how the +// service behaves when accessing bucket contents as a web site. See +// https://cloud.google.com/storage/docs/static-website for more information. +type BucketWebsite struct { + // If the requested object path is missing, the service will ensure the path has + // a trailing '/', append this suffix, and attempt to retrieve the resulting + // object. This allows the creation of index.html objects to represent directory + // pages. + MainPageSuffix string + + // If the requested object path is missing, and any mainPageSuffix object is + // missing, if applicable, the service will return the named object from this + // bucket as the content for a 404 Not Found result. + NotFoundPage string +} + +func newBucket(b *raw.Bucket) (*BucketAttrs, error) { + if b == nil { + return nil, nil + } + rp, err := toRetentionPolicy(b.RetentionPolicy) + if err != nil { + return nil, err + } + return &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + DefaultEventBasedHold: b.DefaultEventBasedHold, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + ACL: toBucketACLRules(b.Acl), + DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl), + Labels: b.Labels, + RequesterPays: b.Billing != nil && b.Billing.RequesterPays, + Lifecycle: toLifecycle(b.Lifecycle), + RetentionPolicy: rp, + CORS: toCORS(b.Cors), + Encryption: toBucketEncryption(b.Encryption), + Logging: toBucketLogging(b.Logging), + Website: toBucketWebsite(b.Website), + BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration), + Etag: b.Etag, + }, nil +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + // Copy label map. + var labels map[string]string + if len(b.Labels) > 0 { + labels = make(map[string]string, len(b.Labels)) + for k, v := range b.Labels { + labels[k] = v + } + } + // Ignore VersioningEnabled if it is false. This is OK because + // we only call this method when creating a bucket, and by default + // new buckets have versioning off. + var v *raw.BucketVersioning + if b.VersioningEnabled { + v = &raw.BucketVersioning{Enabled: true} + } + var bb *raw.BucketBilling + if b.RequesterPays { + bb = &raw.BucketBilling{RequesterPays: true} + } + var bktIAM *raw.BucketIamConfiguration + if b.BucketPolicyOnly.Enabled { + bktIAM = &raw.BucketIamConfiguration{ + BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ + Enabled: true, + }, + } + } + return &raw.Bucket{ + Name: b.Name, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: toRawBucketACL(b.ACL), + DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL), + Versioning: v, + Labels: labels, + Billing: bb, + Lifecycle: toRawLifecycle(b.Lifecycle), + RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), + Cors: toRawCORS(b.CORS), + Encryption: b.Encryption.toRawBucketEncryption(), + Logging: b.Logging.toRawBucketLogging(), + Website: b.Website.toRawBucketWebsite(), + IamConfiguration: bktIAM, + } +} + +// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration. +type CORS struct { + // MaxAge is the value to return in the Access-Control-Max-Age + // header used in preflight responses. + MaxAge time.Duration + + // Methods is the list of HTTP methods on which to include CORS response + // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list + // of methods, and means "any method". + Methods []string + + // Origins is the list of Origins eligible to receive CORS response + // headers. Note: "*" is permitted in the list of origins, and means + // "any Origin". + Origins []string + + // ResponseHeaders is the list of HTTP headers other than the simple + // response headers to give permission for the user-agent to share + // across domains. + ResponseHeaders []string +} + +// BucketEncryption is a bucket's encryption configuration. +type BucketEncryption struct { + // A Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt + // objects inserted into this bucket, if no encryption method is specified. + // The key's location must be the same as the bucket's. + DefaultKMSKeyName string +} + +// BucketAttrsToUpdate define the attributes to update during an Update call. +type BucketAttrsToUpdate struct { + // If set, updates whether the bucket uses versioning. + VersioningEnabled optional.Bool + + // If set, updates whether the bucket is a Requester Pays bucket. + RequesterPays optional.Bool + + // DefaultEventBasedHold is the default value for event-based hold on + // newly created objects in this bucket. + DefaultEventBasedHold optional.Bool + + // BucketPolicyOnly configures access checks to use only bucket-level IAM + // policies. + BucketPolicyOnly *BucketPolicyOnly + + // If set, updates the retention policy of the bucket. Using + // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. + // + // This feature is in private alpha release. It is not currently available to + // most customers. It might be changed in backwards-incompatible ways and is not + // subject to any SLA or deprecation policy. + RetentionPolicy *RetentionPolicy + + // If set, replaces the CORS configuration with a new configuration. + // An empty (rather than nil) slice causes all CORS policies to be removed. + CORS []CORS + + // If set, replaces the encryption configuration of the bucket. Using + // BucketEncryption.DefaultKMSKeyName = "" will delete the existing + // configuration. + Encryption *BucketEncryption + + // If set, replaces the lifecycle configuration of the bucket. + Lifecycle *Lifecycle + + // If set, replaces the logging configuration of the bucket. + Logging *BucketLogging + + // If set, replaces the website configuration of the bucket. + Website *BucketWebsite + + // If not empty, applies a predefined set of access controls. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. + PredefinedACL string + + // If not empty, applies a predefined set of default object access controls. + // See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch. + PredefinedDefaultObjectACL string + + setLabels map[string]string + deleteLabels map[string]bool +} + +// SetLabel causes a label to be added or modified when ua is used +// in a call to Bucket.Update. +func (ua *BucketAttrsToUpdate) SetLabel(name, value string) { + if ua.setLabels == nil { + ua.setLabels = map[string]string{} + } + ua.setLabels[name] = value +} + +// DeleteLabel causes a label to be deleted when ua is used in a +// call to Bucket.Update. +func (ua *BucketAttrsToUpdate) DeleteLabel(name string) { + if ua.deleteLabels == nil { + ua.deleteLabels = map[string]bool{} + } + ua.deleteLabels[name] = true +} + +func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { + rb := &raw.Bucket{} + if ua.CORS != nil { + rb.Cors = toRawCORS(ua.CORS) + rb.ForceSendFields = append(rb.ForceSendFields, "Cors") + } + if ua.DefaultEventBasedHold != nil { + rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold) + rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold") + } + if ua.RetentionPolicy != nil { + if ua.RetentionPolicy.RetentionPeriod == 0 { + rb.NullFields = append(rb.NullFields, "RetentionPolicy") + rb.RetentionPolicy = nil + } else { + rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy() + } + } + if ua.VersioningEnabled != nil { + rb.Versioning = &raw.BucketVersioning{ + Enabled: optional.ToBool(ua.VersioningEnabled), + ForceSendFields: []string{"Enabled"}, + } + } + if ua.RequesterPays != nil { + rb.Billing = &raw.BucketBilling{ + RequesterPays: optional.ToBool(ua.RequesterPays), + ForceSendFields: []string{"RequesterPays"}, + } + } + if ua.BucketPolicyOnly != nil { + rb.IamConfiguration = &raw.BucketIamConfiguration{ + BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{ + Enabled: ua.BucketPolicyOnly.Enabled, + }, + } + } + if ua.Encryption != nil { + if ua.Encryption.DefaultKMSKeyName == "" { + rb.NullFields = append(rb.NullFields, "Encryption") + rb.Encryption = nil + } else { + rb.Encryption = ua.Encryption.toRawBucketEncryption() + } + } + if ua.Lifecycle != nil { + rb.Lifecycle = toRawLifecycle(*ua.Lifecycle) + } + if ua.Logging != nil { + if *ua.Logging == (BucketLogging{}) { + rb.NullFields = append(rb.NullFields, "Logging") + rb.Logging = nil + } else { + rb.Logging = ua.Logging.toRawBucketLogging() + } + } + if ua.Website != nil { + if *ua.Website == (BucketWebsite{}) { + rb.NullFields = append(rb.NullFields, "Website") + rb.Website = nil + } else { + rb.Website = ua.Website.toRawBucketWebsite() + } + } + if ua.PredefinedACL != "" { + // Clear ACL or the call will fail. + rb.Acl = nil + rb.ForceSendFields = append(rb.ForceSendFields, "Acl") + } + if ua.PredefinedDefaultObjectACL != "" { + // Clear ACLs or the call will fail. + rb.DefaultObjectAcl = nil + rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl") + } + if ua.setLabels != nil || ua.deleteLabels != nil { + rb.Labels = map[string]string{} + for k, v := range ua.setLabels { + rb.Labels[k] = v + } + if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 { + rb.ForceSendFields = append(rb.ForceSendFields, "Labels") + } + for l := range ua.deleteLabels { + rb.NullFields = append(rb.NullFields, "Labels."+l) + } + } + return rb +} + +// If returns a new BucketHandle that applies a set of preconditions. +// Preconditions already set on the BucketHandle are ignored. +// Operations on the new handle will return an error if the preconditions are not +// satisfied. The only valid preconditions for buckets are MetagenerationMatch +// and MetagenerationNotMatch. +func (b *BucketHandle) If(conds BucketConditions) *BucketHandle { + b2 := *b + b2.conds = &conds + return &b2 +} + +// BucketConditions constrain bucket methods to act on specific metagenerations. +// +// The zero value is an empty set of constraints. +type BucketConditions struct { + // MetagenerationMatch specifies that the bucket must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the bucket must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *BucketConditions) validate(method string) error { + if *c == (BucketConditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +// UserProject returns a new BucketHandle that passes the project ID as the user +// project for all subsequent calls. Calls with a user project will be billed to that +// project rather than to the bucket's owning project. +// +// A user project is required for all operations on Requester Pays buckets. +func (b *BucketHandle) UserProject(projectID string) *BucketHandle { + b2 := *b + b2.userProject = projectID + b2.acl.userProject = projectID + b2.defaultObjectACL.userProject = projectID + return &b2 +} + +// LockRetentionPolicy locks a bucket's retention policy until a previously-configured +// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less +// than a day, the retention policy is treated as a development configuration and locking +// will have no effect. The BucketHandle must have a metageneration condition that +// matches the bucket's metageneration. See BucketHandle.If. +// +// This feature is in private alpha release. It is not currently available to +// most customers. It might be changed in backwards-incompatible ways and is not +// subject to any SLA or deprecation policy. +func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { + var metageneration int64 + if b.conds != nil { + metageneration = b.conds.MetagenerationMatch + } + req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration) + _, err := req.Context(ctx).Do() + return err +} + +// applyBucketConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + cval := reflect.ValueOf(call) + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy { + if rp == nil { + return nil + } + return &raw.BucketRetentionPolicy{ + RetentionPeriod: int64(rp.RetentionPeriod / time.Second), + } +} + +func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) { + if rp == nil { + return nil, nil + } + t, err := time.Parse(time.RFC3339, rp.EffectiveTime) + if err != nil { + return nil, err + } + return &RetentionPolicy{ + RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second, + EffectiveTime: t, + IsLocked: rp.IsLocked, + }, nil +} + +func toRawCORS(c []CORS) []*raw.BucketCors { + var out []*raw.BucketCors + for _, v := range c { + out = append(out, &raw.BucketCors{ + MaxAgeSeconds: int64(v.MaxAge / time.Second), + Method: v.Methods, + Origin: v.Origins, + ResponseHeader: v.ResponseHeaders, + }) + } + return out +} + +func toCORS(rc []*raw.BucketCors) []CORS { + var out []CORS + for _, v := range rc { + out = append(out, CORS{ + MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second, + Methods: v.Method, + Origins: v.Origin, + ResponseHeaders: v.ResponseHeader, + }) + } + return out +} + +func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle { + var rl raw.BucketLifecycle + if len(l.Rules) == 0 { + return nil + } + for _, r := range l.Rules { + rr := &raw.BucketLifecycleRule{ + Action: &raw.BucketLifecycleRuleAction{ + Type: r.Action.Type, + StorageClass: r.Action.StorageClass, + }, + Condition: &raw.BucketLifecycleRuleCondition{ + Age: r.Condition.AgeInDays, + MatchesStorageClass: r.Condition.MatchesStorageClasses, + NumNewerVersions: r.Condition.NumNewerVersions, + }, + } + + switch r.Condition.Liveness { + case LiveAndArchived: + rr.Condition.IsLive = nil + case Live: + rr.Condition.IsLive = googleapi.Bool(true) + case Archived: + rr.Condition.IsLive = googleapi.Bool(false) + } + + if !r.Condition.CreatedBefore.IsZero() { + rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date) + } + rl.Rule = append(rl.Rule, rr) + } + return &rl +} + +func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { + var l Lifecycle + if rl == nil { + return l + } + for _, rr := range rl.Rule { + r := LifecycleRule{ + Action: LifecycleAction{ + Type: rr.Action.Type, + StorageClass: rr.Action.StorageClass, + }, + Condition: LifecycleCondition{ + AgeInDays: rr.Condition.Age, + MatchesStorageClasses: rr.Condition.MatchesStorageClass, + NumNewerVersions: rr.Condition.NumNewerVersions, + }, + } + + switch { + case rr.Condition.IsLive == nil: + r.Condition.Liveness = LiveAndArchived + case *rr.Condition.IsLive == true: + r.Condition.Liveness = Live + case *rr.Condition.IsLive == false: + r.Condition.Liveness = Archived + } + + if rr.Condition.CreatedBefore != "" { + r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore) + } + l.Rules = append(l.Rules, r) + } + return l +} + +func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { + if e == nil { + return nil + } + return &raw.BucketEncryption{ + DefaultKmsKeyName: e.DefaultKMSKeyName, + } +} + +func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { + if e == nil { + return nil + } + return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} +} + +func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging { + if b == nil { + return nil + } + return &raw.BucketLogging{ + LogBucket: b.LogBucket, + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func toBucketLogging(b *raw.BucketLogging) *BucketLogging { + if b == nil { + return nil + } + return &BucketLogging{ + LogBucket: b.LogBucket, + LogObjectPrefix: b.LogObjectPrefix, + } +} + +func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite { + if w == nil { + return nil + } + return &raw.BucketWebsite{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite { + if w == nil { + return nil + } + return &BucketWebsite{ + MainPageSuffix: w.MainPageSuffix, + NotFoundPage: w.NotFoundPage, + } +} + +func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly { + if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled { + return BucketPolicyOnly{} + } + lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime) + if err != nil { + return BucketPolicyOnly{ + Enabled: true, + } + } + return BucketPolicyOnly{ + Enabled: true, + LockedTime: lt, + } +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + if q != nil { + it.query = *q + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.bucket.c.raw.Objects.List(it.bucket.name) + setClientHeader(req.Header()) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(pageToken) + if it.bucket.userProject != "" { + req.UserProject(it.bucket.userProject) + } + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil +} + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + it := &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) { + req := it.client.raw.Buckets.List(it.projectID) + setClientHeader(req.Header()) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + return "", err + } + for _, item := range resp.Items { + b, err := newBucket(item) + if err != nil { + return "", err + } + it.buckets = append(it.buckets, b) + } + return resp.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go new file mode 100644 index 00000000..52162e72 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -0,0 +1,228 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" +) + +// CopierFrom creates a Copier that can copy src to dst. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +// +// For Requester Pays buckets, the user project of dst is billed, unless it is empty, +// in which case the user project of src is billed. +func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { + return &Copier{dst: dst, src: src} +} + +// A Copier copies a source object to a destination. +type Copier struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Copier. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // RewriteToken can be set before calling Run to resume a copy + // operation. After Run returns a non-nil error, RewriteToken will + // have been updated to contain the value needed to resume the copy. + RewriteToken string + + // ProgressFunc can be used to monitor the progress of a multi-RPC copy + // operation. If ProgressFunc is not nil and copying requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then + // ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far and the total size in bytes of the source object. + // + // ProgressFunc is intended to make upload progress available to the + // application. For example, the implementation of ProgressFunc may update + // a progress bar in the application's UI, or log the result of + // float64(copiedBytes)/float64(totalBytes). + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(copiedBytes, totalBytes uint64) + + // The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K, + // that will be used to encrypt the object. Overrides the object's KMSKeyName, if + // any. + // + // Providing both a DestinationKMSKeyName and a customer-supplied encryption key + // (via ObjectHandle.Key) on the destination object will result in an error when + // Run is called. + DestinationKMSKeyName string + + dst, src *ObjectHandle +} + +// Run performs the copy. +func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.src.validate(); err != nil { + return nil, err + } + if err := c.dst.validate(); err != nil { + return nil, err + } + if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { + return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") + } + // Convert destination attributes to raw form, omitting the bucket. + // If the bucket is included but name or content-type aren't, the service + // returns a 400 with "Required" as the only message. Omitting the bucket + // does not cause any problems. + rawObject := c.ObjectAttrs.toRawObject("") + for { + res, err := c.callRewrite(ctx, rawObject) + if err != nil { + return nil, err + } + if c.ProgressFunc != nil { + c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize)) + } + if res.Done { // Finished successfully. + return newObject(res.Resource), nil + } + } +} + +func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) { + call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj) + + call.Context(ctx).Projection("full") + if c.RewriteToken != "" { + call.RewriteToken(c.RewriteToken) + } + if c.DestinationKMSKeyName != "" { + call.DestinationKmsKeyName(c.DestinationKMSKeyName) + } + if c.PredefinedACL != "" { + call.DestinationPredefinedAcl(c.PredefinedACL) + } + if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } else if c.src.userProject != "" { + call.UserProject(c.src.userProject) + } + if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { + return nil, err + } + var res *raw.RewriteResponse + var err error + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) + if err != nil { + return nil, err + } + c.RewriteToken = res.RewriteToken + return res, nil +} + +// ComposerFrom creates a Composer that can compose srcs into dst. +// You can immediately call Run on the returned Composer, or you can +// configure it first. +// +// The encryption key for the destination object will be used to decrypt all +// source objects and encrypt the destination object. It is an error +// to specify an encryption key for any of the source objects. +func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { + return &Composer{dst: dst, srcs: srcs} +} + +// A Composer composes source objects into a destination object. +// +// For Requester Pays buckets, the user project of dst is billed. +type Composer struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Composer. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + dst *ObjectHandle + srcs []*ObjectHandle +} + +// Run performs the compose operation. +func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run") + defer func() { trace.EndSpan(ctx, err) }() + + if err := c.dst.validate(); err != nil { + return nil, err + } + if len(c.srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + for _, src := range c.srcs { + if err := src.validate(); err != nil { + return nil, err + } + if src.bucket != c.dst.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) + } + if src.encryptionKey != nil { + return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if c.dst.userProject != "" { + call.UserProject(c.dst.userProject) + } + if c.PredefinedACL != "" { + call.DestinationPredefinedAcl(c.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if err != nil { + return nil, err + } + return newObject(obj), nil +} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go new file mode 100644 index 00000000..88f64590 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -0,0 +1,176 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package storage provides an easy way to work with Google Cloud Storage. +Google Cloud Storage stores data in named objects, which are grouped into buckets. + +More information about Google Cloud Storage is available at +https://cloud.google.com/storage/docs. + +See https://godoc.org/cloud.google.com/go for authentication, timeouts, +connection pooling and similar aspects of this package. + +All of the methods of this package use exponential backoff to retry calls that fail +with certain errors, as described in +https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues +indefinitely unless the controlling context is canceled or the client is closed. See +context.WithTimeout and context.WithCancel. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + +The client will use your default application credentials. + +If you only wish to access public data, you can create +an unauthenticated client with + + client, err := storage.NewClient(ctx, option.WithoutAuthentication()) + +Buckets + +A Google Cloud Storage bucket is a collection of objects. To work with a +bucket, make a bucket handle: + + bkt := client.Bucket(bucketName) + +A handle is a reference to a bucket. You can have a handle even if the +bucket doesn't exist yet. To create a bucket in Google Cloud Storage, +call Create on the handle: + + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Note that although buckets are associated with projects, bucket names are +global across all projects. + +Each bucket has associated metadata, represented in this package by +BucketAttrs. The third argument to BucketHandle.Create allows you to set +the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use +Attrs: + + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + +Objects + +An object holds arbitrary data as a sequence of bytes, like a file. You +refer to objects using a handle, just as with buckets, but unlike buckets +you don't explicitly create an object. Instead, the first time you write +to an object it will be created. You can use the standard Go io.Reader +and io.Writer interfaces to read and write object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will either create the object or overwrite whatever is there already. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with Attrs: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +ACLs + +Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of +ACLRules, each of which specifies the role of a user, group or project. ACLs +are suitable for fine-grained control, but you may prefer using IAM to control +access at the project level (see +https://cloud.google.com/storage/docs/access-control/iam). + +To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: + + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } + +You can also set and delete ACLs. + +Conditions + +Every object has a generation and a metageneration. The generation changes +whenever the content changes, and the metageneration changes whenever the +metadata changes. Conditions let you check these values before an operation; +the operation only executes if the conditions match. You can use conditions to +prevent race conditions in read-modify-write operations. + +For example, say you've read an object's metadata into objAttrs. Now +you want to write to that object, but only if its contents haven't changed +since you read it. Here is how to express that: + + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. + +Signed URLs + +You can obtain a URL that lets anyone read or write an object for a limited time. +You don't need to create a client to do this. See the documentation of +SignedURL for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +Errors + +Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error). +These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example: + + if e, ok := err.(*googleapi.Error); ok { + if e.Code == 409 { ... } + } +*/ +package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/go110.go b/vendor/cloud.google.com/go/storage/go110.go new file mode 100644 index 00000000..206813f0 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/go110.go @@ -0,0 +1,32 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.10 + +package storage + +import "google.golang.org/api/googleapi" + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case interface{ Temporary() bool }: + return e.Temporary() + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/storage/iam.go b/vendor/cloud.google.com/go/storage/iam.go new file mode 100644 index 00000000..9d936067 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/iam.go @@ -0,0 +1,130 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// IAM provides access to IAM access control for the bucket. +func (b *BucketHandle) IAM() *iam.Handle { + return iam.InternalNewHandleClient(&iamClient{ + raw: b.c.raw, + userProject: b.userProject, + }, b.name) +} + +// iamClient implements the iam.client interface. +type iamClient struct { + raw *raw.Service + userProject string +} + +func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get") + defer func() { trace.EndSpan(ctx, err) }() + + call := c.raw.Buckets.GetIamPolicy(resource) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var rp *raw.Policy + err = runWithRetry(ctx, func() error { + rp, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return iamFromStoragePolicy(rp), nil +} + +func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set") + defer func() { trace.EndSpan(ctx, err) }() + + rp := iamToStoragePolicy(p) + call := c.raw.Buckets.SetIamPolicy(resource, rp) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + return runWithRetry(ctx, func() error { + _, err := call.Context(ctx).Do() + return err + }) +} + +func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test") + defer func() { trace.EndSpan(ctx, err) }() + + call := c.raw.Buckets.TestIamPermissions(resource, perms) + setClientHeader(call.Header()) + if c.userProject != "" { + call.UserProject(c.userProject) + } + var res *raw.TestIamPermissionsResponse + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy { + return &raw.Policy{ + Bindings: iamToStorageBindings(ip.Bindings), + Etag: string(ip.Etag), + } +} + +func iamToStorageBindings(ibs []*iampb.Binding) []*raw.PolicyBindings { + var rbs []*raw.PolicyBindings + for _, ib := range ibs { + rbs = append(rbs, &raw.PolicyBindings{ + Role: ib.Role, + Members: ib.Members, + }) + } + return rbs +} + +func iamFromStoragePolicy(rp *raw.Policy) *iampb.Policy { + return &iampb.Policy{ + Bindings: iamFromStorageBindings(rp.Bindings), + Etag: []byte(rp.Etag), + } +} + +func iamFromStorageBindings(rbs []*raw.PolicyBindings) []*iampb.Binding { + var ibs []*iampb.Binding + for _, rb := range rbs { + ibs = append(ibs, &iampb.Binding{ + Role: rb.Role, + Members: rb.Members, + }) + } + return ibs +} diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go new file mode 100644 index 00000000..e755f197 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -0,0 +1,37 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go/v2" +) + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +func runWithRetry(ctx context.Context, call func() error) error { + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + if shouldRetry(err) { + return false, nil + } + return true, err + }) +} diff --git a/vendor/cloud.google.com/go/storage/not_go110.go b/vendor/cloud.google.com/go/storage/not_go110.go new file mode 100644 index 00000000..66fa45be --- /dev/null +++ b/vendor/cloud.google.com/go/storage/not_go110.go @@ -0,0 +1,42 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.10 + +package storage + +import ( + "net/url" + "strings" + + "google.golang.org/api/googleapi" +) + +func shouldRetry(err error) bool { + switch e := err.(type) { + case *googleapi.Error: + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + return e.Code == 429 || (e.Code >= 500 && e.Code < 600) + case *url.Error: + // Retry on REFUSED_STREAM. + // Unfortunately the error type is unexported, so we resort to string + // matching. + return strings.Contains(e.Error(), "REFUSED_STREAM") + case interface{ Temporary() bool }: + return e.Temporary() + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go new file mode 100644 index 00000000..84619b6d --- /dev/null +++ b/vendor/cloud.google.com/go/storage/notifications.go @@ -0,0 +1,188 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "regexp" + + "cloud.google.com/go/internal/trace" + raw "google.golang.org/api/storage/v1" +) + +// A Notification describes how to send Cloud PubSub messages when certain +// events occur in a bucket. +type Notification struct { + //The ID of the notification. + ID string + + // The ID of the topic to which this subscription publishes. + TopicID string + + // The ID of the project to which the topic belongs. + TopicProjectID string + + // Only send notifications about listed event types. If empty, send notifications + // for all event types. + // See https://cloud.google.com/storage/docs/pubsub-notifications#events. + EventTypes []string + + // If present, only apply this notification configuration to object names that + // begin with this prefix. + ObjectNamePrefix string + + // An optional list of additional attributes to attach to each Cloud PubSub + // message published for this notification subscription. + CustomAttributes map[string]string + + // The contents of the message payload. + // See https://cloud.google.com/storage/docs/pubsub-notifications#payload. + PayloadFormat string +} + +// Values for Notification.PayloadFormat. +const ( + // Send no payload with notification messages. + NoPayload = "NONE" + + // Send object metadata as JSON with notification messages. + JSONPayload = "JSON_API_V1" +) + +// Values for Notification.EventTypes. +const ( + // Event that occurs when an object is successfully created. + ObjectFinalizeEvent = "OBJECT_FINALIZE" + + // Event that occurs when the metadata of an existing object changes. + ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE" + + // Event that occurs when an object is permanently deleted. + ObjectDeleteEvent = "OBJECT_DELETE" + + // Event that occurs when the live version of an object becomes an + // archived version. + ObjectArchiveEvent = "OBJECT_ARCHIVE" +) + +func toNotification(rn *raw.Notification) *Notification { + n := &Notification{ + ID: rn.Id, + EventTypes: rn.EventTypes, + ObjectNamePrefix: rn.ObjectNamePrefix, + CustomAttributes: rn.CustomAttributes, + PayloadFormat: rn.PayloadFormat, + } + n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic) + return n +} + +var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)") + +// parseNotificationTopic extracts the project and topic IDs from from the full +// resource name returned by the service. If the name is malformed, it returns +// "?" for both IDs. +func parseNotificationTopic(nt string) (projectID, topicID string) { + matches := topicRE.FindStringSubmatch(nt) + if matches == nil { + return "?", "?" + } + return matches[1], matches[2] +} + +func toRawNotification(n *Notification) *raw.Notification { + return &raw.Notification{ + Id: n.ID, + Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s", + n.TopicProjectID, n.TopicID), + EventTypes: n.EventTypes, + ObjectNamePrefix: n.ObjectNamePrefix, + CustomAttributes: n.CustomAttributes, + PayloadFormat: string(n.PayloadFormat), + } +} + +// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID +// and PayloadFormat, and must not set its ID. The other fields are all optional. The +// returned Notification's ID can be used to refer to it. +func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification") + defer func() { trace.EndSpan(ctx, err) }() + + if n.ID != "" { + return nil, errors.New("storage: AddNotification: ID must not be set") + } + if n.TopicProjectID == "" { + return nil, errors.New("storage: AddNotification: missing TopicProjectID") + } + if n.TopicID == "" { + return nil, errors.New("storage: AddNotification: missing TopicID") + } + call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n)) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + rn, err := call.Context(ctx).Do() + if err != nil { + return nil, err + } + return toNotification(rn), nil +} + +// Notifications returns all the Notifications configured for this bucket, as a map +// indexed by notification ID. +func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications") + defer func() { trace.EndSpan(ctx, err) }() + + call := b.c.raw.Notifications.List(b.name) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + var res *raw.Notifications + err = runWithRetry(ctx, func() error { + res, err = call.Context(ctx).Do() + return err + }) + if err != nil { + return nil, err + } + return notificationsToMap(res.Items), nil +} + +func notificationsToMap(rns []*raw.Notification) map[string]*Notification { + m := map[string]*Notification{} + for _, rn := range rns { + m[rn.Id] = toNotification(rn) + } + return m +} + +// DeleteNotification deletes the notification with the given ID. +func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification") + defer func() { trace.EndSpan(ctx, err) }() + + call := b.c.raw.Notifications.Delete(b.name, id) + setClientHeader(call.Header()) + if b.userProject != "" { + call.UserProject(b.userProject) + } + return call.Context(ctx).Do() +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 00000000..50f381f9 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,385 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "errors" + "fmt" + "hash/crc32" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/internal/trace" + "google.golang.org/api/googleapi" +) + +var crc32cTable = crc32.MakeTable(crc32.Castagnoli) + +// ReaderObjectAttrs are attributes about the object being read. These are populated +// during the New call. This struct only holds a subset of object attributes: to +// get the full set of attributes, use ObjectHandle.Attrs. +// +// Each field is read-only. +type ReaderObjectAttrs struct { + // Size is the length of the object's content. + Size int64 + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // CacheControl specifies whether and for how long browser and Internet + // caches are allowed to cache your objects. + CacheControl string + + // LastModified is the time that the object was last modified. + LastModified time.Time + + // Generation is the generation number of the object's content. + Generation int64 + + // Metageneration is the version of the metadata for this object at + // this generation. This field is used for preconditions and for + // detecting changes in metadata. A metageneration number is only + // meaningful in the context of a particular generation of a + // particular object. + Metageneration int64 +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + if o.userProject != "" { + req.Header.Set("X-Goog-User-Project", o.userProject) + } + if o.readCompressed { + req.Header.Set("Accept-Encoding", "gzip") + } + if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { + return nil, err + } + + gen := o.gen + + // Define a function that initiates a Read with offset and length, assuming we + // have already read seen bytes. + reopen := func(seen int64) (*http.Response, error) { + start := offset + seen + if length < 0 && start > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if length > 0 { + // The end character isn't affected by how many bytes we've seen. + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1)) + } + // We wait to assign conditions here because the generation number can change in between reopen() runs. + req.URL.RawQuery = conditionsQuery(gen, o.conds) + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + // If a generation hasn't been specified, and this is the first response we get, let's record the + // generation. In future requests we'll use this generation as a precondition to avoid data races. + if gen < 0 && res.Header.Get("X-Goog-Generation") != "" { + gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64) + if err != nil { + return err + } + gen = gen64 + } + return nil + }) + if err != nil { + return nil, err + } + return res, nil + } + + res, err := reopen(0) + if err != nil { + return nil, err + } + var ( + size int64 // total size of object, even if a range was requested. + checkCRC bool + crc uint32 + ) + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + } else { + size = res.ContentLength + // Check the CRC iff all of the following hold: + // - We asked for content (length != 0). + // - We got all the content (status != PartialContent). + // - The server sent a CRC header. + // - The Go http stack did not uncompress the file. + // - We were not served compressed data that was uncompressed on download. + // The problem with the last two cases is that the CRC will not match -- GCS + // computes it on the compressed contents, but we compute it on the + // uncompressed contents. + if length != 0 && !res.Uncompressed && !uncompressedByServer(res) { + crc, checkCRC = parseCRC32c(res) + } + } + + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + var metaGen int64 + if res.Header.Get("X-Goog-Generation") != "" { + metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64) + if err != nil { + return nil, err + } + } + + var lm time.Time + if res.Header.Get("Last-Modified") != "" { + lm, err = http.ParseTime(res.Header.Get("Last-Modified")) + if err != nil { + return nil, err + } + } + + attrs := ReaderObjectAttrs{ + Size: size, + ContentType: res.Header.Get("Content-Type"), + ContentEncoding: res.Header.Get("Content-Encoding"), + CacheControl: res.Header.Get("Cache-Control"), + LastModified: lm, + Generation: gen, + Metageneration: metaGen, + } + return &Reader{ + Attrs: attrs, + body: body, + size: size, + remain: remain, + wantCRC: crc, + checkCRC: checkCRC, + reopen: reopen, + }, nil +} + +func uncompressedByServer(res *http.Response) bool { + // If the data is stored as gzip but is not encoded as gzip, then it + // was uncompressed by the server. + return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" && + res.Header.Get("Content-Encoding") != "gzip" +} + +func parseCRC32c(res *http.Response) (uint32, bool) { + const prefix = "crc32c=" + for _, spec := range res.Header["X-Goog-Hash"] { + if strings.HasPrefix(spec, prefix) { + c, err := decodeUint32(spec[len(prefix):]) + if err == nil { + return c, true + } + } + } + return 0, false +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// Reader reads a Cloud Storage object. +// It implements io.Reader. +// +// Typically, a Reader computes the CRC of the downloaded content and compares it to +// the stored CRC, returning an error from Read if there is a mismatch. This integrity check +// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. +type Reader struct { + Attrs ReaderObjectAttrs + body io.ReadCloser + seen, remain, size int64 + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc + reopen func(seen int64) (*http.Response, error) +} + +// Close closes the Reader. It must be called when done reading. +func (r *Reader) Close() error { + return r.body.Close() +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.readWithRetry(p) + if r.remain != -1 { + r.remain -= int64(n) + } + if r.checkCRC { + r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, p[:n]) + // Check CRC here. It would be natural to check it in Close, but + // everybody defers Close on the assumption that it doesn't return + // anything worth looking at. + if err == io.EOF { + if r.gotCRC != r.wantCRC { + return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d", + r.gotCRC, r.wantCRC) + } + } + } + return n, err +} + +func (r *Reader) readWithRetry(p []byte) (int, error) { + n := 0 + for len(p[n:]) > 0 { + m, err := r.body.Read(p[n:]) + n += m + r.seen += int64(m) + if !shouldRetryRead(err) { + return n, err + } + // Read failed, but we will try again. Send a ranged read request that takes + // into account the number of bytes we've already seen. + res, err := r.reopen(r.seen) + if err != nil { + // reopen already retries + return n, err + } + r.body.Close() + r.body = res.Body + } + return n, nil +} + +func shouldRetryRead(err error) bool { + if err == nil { + return false + } + return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2") +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +// +// Deprecated: use Reader.Attrs.Size. +func (r *Reader) Size() int64 { + return r.Attrs.Size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +// +// Deprecated: use Reader.Attrs.ContentType. +func (r *Reader) ContentType() string { + return r.Attrs.ContentType +} + +// ContentEncoding returns the content encoding of the object. +// +// Deprecated: use Reader.Attrs.ContentEncoding. +func (r *Reader) ContentEncoding() string { + return r.Attrs.ContentEncoding +} + +// CacheControl returns the cache control of the object. +// +// Deprecated: use Reader.Attrs.CacheControl. +func (r *Reader) CacheControl() string { + return r.Attrs.CacheControl +} + +// LastModified returns the value of the Last-Modified header. +// +// Deprecated: use Reader.Attrs.LastModified. +func (r *Reader) LastModified() (time.Time, error) { + return r.Attrs.LastModified, nil +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 00000000..c5c5c59b --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,1351 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "cloud.google.com/go/internal/optional" + "cloud.google.com/go/internal/trace" + "cloud.google.com/go/internal/version" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + raw "google.golang.org/api/storage/v1" + htransport "google.golang.org/api/transport/http" +) + +var ( + // ErrBucketNotExist indicates that the bucket does not exist. + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + // ErrObjectNotExist indicates that the object does not exist. + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const userAgent = "gcloud-golang-storage/20151204" + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo) + +func setClientHeader(headers http.Header) { + headers.Set("x-goog-api-client", xGoogHeader) +} + +// Client is a client for interacting with Google Cloud Storage. +// +// Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +type Client struct { + hc *http.Client + raw *raw.Service +} + +// NewClient creates a new Google Cloud Storage client. +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), + } + opts = append(o, opts...) + hc, ep, err := htransport.NewClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + rawService, err := raw.New(hc) + if err != nil { + return nil, fmt.Errorf("storage client: %v", err) + } + if ep != "" { + rawService.BasePath = ep + } + return &Client{ + hc: hc, + raw: rawService, + }, nil +} + +// Close closes the Client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + // Set fields to nil so that subsequent uses will panic. + c.hc = nil + c.raw = nil + return nil +} + +// SigningScheme determines the API version to use when signing URLs. +type SigningScheme int + +const ( + // SigningSchemeDefault is presently V2 and will change to V4 in the future. + SigningSchemeDefault SigningScheme = iota + + // SigningSchemeV2 uses the V2 scheme to sign URLs. + SigningSchemeV2 + + // SigningSchemeV4 uses the V4 scheme to sign URLs. + SigningSchemeV4 +) + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. For example, if + // your application is running on Google App Engine, you can use + // appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. For SigningSchemeV4, the expiration may be no + // more than seven days in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extension headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 string + + // Scheme determines the version of URL signing to use. Default is + // SigningSchemeV2. + Scheme SigningScheme +} + +var ( + tabRegex = regexp.MustCompile(`[\t]+`) + // I was tempted to call this spacex. :) + spaceRegex = regexp.MustCompile(` +`) + + canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`) + excludedCanonicalHeaders = map[string]bool{ + "x-goog-encryption-key": true, + "x-goog-encryption-key-sha256": true, + } +) + +// v2SanitizeHeaders applies the specifications for canonical extension headers at +// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +func v2SanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + var header, value string + // Only keep canonical headers, discard any others. + headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader) + if len(headerMatches) == 0 { + continue + } + header = headerMatches[1] + value = headerMatches[2] + + header = strings.ToLower(strings.TrimSpace(header)) + value = strings.TrimSpace(value) + + if excludedCanonicalHeaders[header] { + // Do not keep any deliberately excluded canonical headers when signing. + continue + } + + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[header] = append(headerMap[header], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the header name + // from the header value or around the values themselves. The values + // should be separated by commas. + // + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases anyway and we + // should assume that there will be no canonical headers using empty + // values. Any such headers are discarded at the regexp stage above. + sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) + } + sort.Strings(sanitizedHeaders) + return sanitizedHeaders +} + +// v4SanitizeHeaders applies the specifications for canonical extension headers +// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers. +// +// V4 does a couple things differently from V2: +// - Headers get sorted by key, instead of by key:value. We do this in +// signedURLV4. +// - There's no canonical regexp: we simply split headers on :. +// - We don't exclude canonical headers. +// - We replace leading and trailing spaces in header values, like v2, but also +// all intermediate space duplicates get stripped. That is, there's only ever +// a single consecutive space. +func v4SanitizeHeaders(hdrs []string) []string { + headerMap := map[string][]string{} + for _, hdr := range hdrs { + // No leading or trailing whitespaces. + sanitizedHeader := strings.TrimSpace(hdr) + + var key, value string + headerMatches := strings.Split(sanitizedHeader, ":") + if len(headerMatches) < 2 { + continue + } + + key = headerMatches[0] + value = headerMatches[1] + + key = strings.ToLower(strings.TrimSpace(key)) + value = strings.TrimSpace(value) + value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" "))) + value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t"))) + + if len(value) > 0 { + // Remove duplicate headers by appending the values of duplicates + // in their order of appearance. + headerMap[key] = append(headerMap[key], value) + } + } + + var sanitizedHeaders []string + for header, values := range headerMap { + // There should be no spaces around the colon separating the header name + // from the header value or around the values themselves. The values + // should be separated by commas. + // + // NOTE: The semantics for headers without a value are not clear. + // However from specifications these should be edge-cases anyway and we + // should assume that there will be no canonical headers using empty + // values. Any such headers are discarded at the regexp stage above. + sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ","))) + } + return sanitizedHeaders +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + now := utcNow() + if err := validateOptions(opts, now); err != nil { + return "", err + } + + switch opts.Scheme { + case SigningSchemeV2: + opts.Headers = v2SanitizeHeaders(opts.Headers) + return signedURLV2(bucket, name, opts) + case SigningSchemeV4: + opts.Headers = v4SanitizeHeaders(opts.Headers) + return signedURLV4(bucket, name, opts, now) + default: // SigningSchemeDefault + opts.Headers = v2SanitizeHeaders(opts.Headers) + return signedURLV2(bucket, name, opts) + } +} + +func validateOptions(opts *SignedURLOptions, now time.Time) error { + if opts == nil { + return errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Method == "" { + return errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return errors.New("storage: missing required expires option") + } + if opts.MD5 != "" { + md5, err := base64.StdEncoding.DecodeString(opts.MD5) + if err != nil || len(md5) != 16 { + return errors.New("storage: invalid MD5 checksum") + } + } + if opts.Scheme == SigningSchemeV4 { + cutoff := now.Add(604801 * time.Second) // 7 days + 1 second + if !opts.Expires.Before(cutoff) { + return errors.New("storage: expires must be within seven days from now") + } + } + return nil +} + +const ( + iso8601 = "20060102T150405Z" + yearMonthDay = "20060102" +) + +// utcNow returns the current time in UTC and is a variable to allow for +// reassignment in tests to provide deterministic signed URL values. +var utcNow = func() time.Time { + return time.Now().UTC() +} + +// extractHeaderNames takes in a series of key:value headers and returns the +// header names only. +func extractHeaderNames(kvs []string) []string { + var res []string + for _, header := range kvs { + nameValue := strings.Split(header, ":") + res = append(res, nameValue[0]) + } + return res +} + +// signedURLV4 creates a signed URL using the sigV4 algorithm. +func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) { + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + u := &url.URL{Path: bucket} + if name != "" { + u.Path += "/" + name + } + + // Note: we have to add a / here because GCS does so auto-magically, despite + // Go's EscapedPath not doing so (and we have to exactly match their + // canonical query). + fmt.Fprintf(buf, "/%s\n", u.EscapedPath()) + + headerNames := append(extractHeaderNames(opts.Headers), "host") + if opts.ContentType != "" { + headerNames = append(headerNames, "content-type") + } + if opts.MD5 != "" { + headerNames = append(headerNames, "content-md5") + } + sort.Strings(headerNames) + signedHeaders := strings.Join(headerNames, ";") + timestamp := now.Format(iso8601) + credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay)) + canonicalQueryString := url.Values{ + "X-Goog-Algorithm": {"GOOG4-RSA-SHA256"}, + "X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)}, + "X-Goog-Date": {timestamp}, + "X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))}, + "X-Goog-SignedHeaders": {signedHeaders}, + } + fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode()) + + u.Host = "storage.googleapis.com" + + var headersWithValue []string + headersWithValue = append(headersWithValue, "host:"+u.Host) + headersWithValue = append(headersWithValue, opts.Headers...) + if opts.ContentType != "" { + headersWithValue = append(headersWithValue, "content-type:"+strings.TrimSpace(opts.ContentType)) + } + if opts.MD5 != "" { + headersWithValue = append(headersWithValue, "content-md5:"+strings.TrimSpace(opts.MD5)) + } + canonicalHeaders := strings.Join(sortHeadersByKey(headersWithValue), "\n") + fmt.Fprintf(buf, "%s\n\n", canonicalHeaders) + fmt.Fprintf(buf, "%s\n", signedHeaders) + fmt.Fprint(buf, "UNSIGNED-PAYLOAD") + + sum := sha256.Sum256(buf.Bytes()) + hexDigest := hex.EncodeToString(sum[:]) + signBuf := &bytes.Buffer{} + fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n") + fmt.Fprintf(signBuf, "%s\n", timestamp) + fmt.Fprintf(signBuf, "%s\n", credentialScope) + fmt.Fprintf(signBuf, "%s", hexDigest) + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + b, err := signBytes(signBuf.Bytes()) + if err != nil { + return "", err + } + signature := hex.EncodeToString(b) + canonicalQueryString.Set("X-Goog-Signature", string(signature)) + u.Scheme = "https" + u.RawQuery = canonicalQueryString.Encode() + return u.String(), nil +} + +// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header +// key. +func sortHeadersByKey(hdrs []string) []string { + headersMap := map[string]string{} + var headersKeys []string + for _, h := range hdrs { + parts := strings.Split(h, ":") + k := parts[0] + v := parts[1] + headersMap[k] = v + headersKeys = append(headersKeys, k) + } + sort.Strings(headersKeys) + var sorted []string + for _, k := range headersKeys { + v := headersMap[k] + sorted = append(sorted, fmt.Sprintf("%s:%s", k, v)) + } + return sorted +} + +func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + if len(opts.Headers) > 0 { + fmt.Fprintf(buf, "%s\n", strings.Join(opts.Headers, "\n")) + } + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = "storage.googleapis.com" + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key + userProject string // for requester-pays buckets + readCompressed bool // Accept-Encoding: gzip +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return &o.acl +} + +// Generation returns a new ObjectHandle that operates on a specific generation +// of the object. +// By default, the handle operates on the latest generation. Not +// all operations work when given a specific generation; check the API +// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. +func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { + o2 := *o + o2.gen = gen + return &o2 +} + +// If returns a new ObjectHandle that applies a set of preconditions. +// Preconditions already set on the ObjectHandle are ignored. +// Operations on the new handle will return an error if the preconditions are not +// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions +// for more details. +func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { + o2 := *o + o2.conds = &conds + return &o2 +} + +// Key returns a new ObjectHandle that uses the supplied encryption +// key to encrypt and decrypt the object's contents. +// +// Encryption key must be a 32-byte AES-256 key. +// See https://cloud.google.com/storage/docs/encryption for details. +func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { + o2 := *o + o2.encryptionKey = encryptionKey + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Update updates an object with the provided attributes. +// All zero-value attributes are ignored. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) { + ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update") + defer func() { trace.EndSpan(ctx, err) }() + + if err := o.validate(); err != nil { + return nil, err + } + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + // For ContentType, sending the empty string is a no-op. + // Instead we send a null. + if attrs.ContentType == "" { + nullFields = append(nullFields, "ContentType") + } else { + forceSendFields = append(forceSendFields, "ContentType") + } + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage it's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentEncoding") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.EventBasedHold != nil { + attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold) + forceSendFields = append(forceSendFields, "EventBasedHold") + } + if uattrs.TemporaryHold != nil { + attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold) + forceSendFields = append(forceSendFields, "TemporaryHold") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(o.bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", o.gen, o.conds, call); err != nil { + return nil, err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + if uattrs.PredefinedACL != "" { + call.PredefinedAcl(uattrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + setClientHeader(call.Header()) + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// BucketName returns the name of the bucket. +func (o *ObjectHandle) BucketName() string { + return o.bucket +} + +// ObjectName returns the name of the object. +func (o *ObjectHandle) ObjectName() string { + return o.object +} + +// ObjectAttrsToUpdate is used to update the attributes of an object. +// Only fields set to non-nil values will be updated. +// Set a field to its zero value to delete it. +// +// For example, to change ContentType and delete ContentEncoding and +// Metadata, use +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } +type ObjectAttrsToUpdate struct { + EventBasedHold optional.Bool + TemporaryHold optional.Bool + ContentType optional.String + ContentLanguage optional.String + ContentEncoding optional.String + ContentDisposition optional.String + CacheControl optional.String + Metadata map[string]string // set to map[string]string{} to delete + ACL []ACLRule + + // If not empty, applies a predefined set of access controls. ACL must be nil. + // See https://cloud.google.com/storage/docs/json_api/v1/objects/patch. + PredefinedACL string +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if err := o.validate(); err != nil { + return err + } + call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) + if err := applyConds("Delete", o.gen, o.conds, call); err != nil { + return err + } + if o.userProject != "" { + call.UserProject(o.userProject) + } + // Encryption doesn't apply to Delete. + setClientHeader(call.Header()) + err := runWithRetry(ctx, func() error { return call.Do() }) + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err +} + +// ReadCompressed when true causes the read to happen without decompressing. +func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { + o2 := *o + o2.readCompressed = compressed + return &o2 +} + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. To +// stop writing without saving the data, cancel the context. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + ChunkSize: googleapi.DefaultUploadChunkSize, + } +} + +func (o *ObjectHandle) validate() error { + if o.bucket == "" { + return errors.New("storage: bucket name is empty") + } + if o.object == "" { + return errors.New("storage: object name is empty") + } + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + return nil +} + +// parseKey converts the binary contents of a private key file to an +// *rsa.PrivateKey. It detects whether the private key is in a PEM container or +// not. If so, it extracts the private key from PEM container before +// conversion. It only supports PEM containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { + var ret string + if !o.RetentionExpirationTime.IsZero() { + ret = o.RetentionExpirationTime.Format(time.RFC3339) + } + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + EventBasedHold: o.EventBasedHold, + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: ret, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: toRawObjectACL(o.ACL), + Metadata: o.Metadata, + } +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // EventBasedHold specifies whether an object is under event-based hold. New + // objects created in a bucket whose DefaultEventBasedHold is set will + // default to that value. + EventBasedHold bool + + // TemporaryHold specifies whether an object is under temporary hold. While + // this flag is set to true, the object is protected against deletion and + // overwrites. + TemporaryHold bool + + // RetentionExpirationTime is a server-determined value that specifies the + // earliest time that the object's retention period expires. + // This is a read-only field. + RetentionExpirationTime time.Time + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // If not empty, applies a predefined set of access controls. It should be set + // only when writing, copying or composing an object. When copying or composing, + // it acts as the destinationPredefinedAcl parameter. + // PredefinedACL is always empty for ObjectAttrs returned from the service. + // See https://cloud.google.com/storage/docs/json_api/v1/objects/insert + // for valid values. + PredefinedACL string + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only, + // except when used from a Writer. If set on a Writer, the uploaded + // data is rejected if its MD5 hash does not match this field. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. This field is read-only, except when + // used from a Writer. If set on a Writer and Writer.SendCRC32C + // is true, the uploaded data is rejected if its CRC32c hash does not + // match this field. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // Metageneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + Metageneration int64 + + // StorageClass is the storage class of the object. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" + // and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" + // or "REGIONAL" depending on the bucket's location settings. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the + // customer-supplied encryption key for the object. It is empty if there is + // no customer-supplied encryption key. + // See // https://cloud.google.com/storage/docs/encryption for more about + // encryption in Google Cloud Storage. + CustomerKeySHA256 string + + // Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object, + // if the object is encrypted by such a key. + // + // Providing both a KMSKeyName and a customer-supplied encryption key (via + // ObjectHandle.Key) will result in an error when writing an object. + KMSKeyName string + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string + + // Etag is the HTTP/1.1 Entity tag for the object. + // This field is read-only. + Etag string +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + crc32c, _ := decodeUint32(o.Crc32c) + var sha256 string + if o.CustomerEncryption != nil { + sha256 = o.CustomerEncryption.KeySha256 + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + EventBasedHold: o.EventBasedHold, + TemporaryHold: o.TemporaryHold, + RetentionExpirationTime: convertTime(o.RetentionExpirationTime), + ACL: toObjectACLRules(o.Acl), + Owner: owner, + ContentEncoding: o.ContentEncoding, + ContentDisposition: o.ContentDisposition, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + Metageneration: o.Metageneration, + StorageClass: o.StorageClass, + CustomerKeySHA256: sha256, + KMSKeyName: o.KmsKeyName, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + Etag: o.Etag, + } +} + +// Decode a uint32 encoded in Base64 in big-endian byte order. +func decodeUint32(b64 string) (uint32, error) { + d, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return 0, err + } + if len(d) != 4 { + return 0, fmt.Errorf("storage: %q does not encode a 32-bit value", d) + } + return uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]), nil +} + +// Encode a uint32 as Base64 in big-endian byte order. +func encodeUint32(u uint32) string { + b := []byte{byte(u >> 24), byte(u >> 16), byte(u >> 8), byte(u)} + return base64.StdEncoding.EncodeToString(b) +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool +} + +// Conditions constrain methods to act on specific generations of +// objects. +// +// The zero value is an empty set of constraints. Not all conditions or +// combinations of conditions are applicable to all methods. +// See https://cloud.google.com/storage/docs/generations-preconditions +// for details on how these operate. +type Conditions struct { + // Generation constraints. + // At most one of the following can be set to a non-zero value. + + // GenerationMatch specifies that the object must have the given generation + // for the operation to occur. + // If GenerationMatch is zero, it has no effect. + // Use DoesNotExist to specify that the object does not exist in the bucket. + GenerationMatch int64 + + // GenerationNotMatch specifies that the object must not have the given + // generation for the operation to occur. + // If GenerationNotMatch is zero, it has no effect. + GenerationNotMatch int64 + + // DoesNotExist specifies that the object must not exist in the bucket for + // the operation to occur. + // If DoesNotExist is false, it has no effect. + DoesNotExist bool + + // Metadata generation constraints. + // At most one of the following can be set to a non-zero value. + + // MetagenerationMatch specifies that the object must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the object must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *Conditions) validate(method string) error { + if *c == (Conditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if !c.isGenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) + } + if !c.isMetagenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +func (c *Conditions) isGenerationValid() bool { + n := 0 + if c.GenerationMatch != 0 { + n++ + } + if c.GenerationNotMatch != 0 { + n++ + } + if c.DoesNotExist { + n++ + } + return n <= 1 +} + +func (c *Conditions) isMetagenerationValid() bool { + return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { + cval := reflect.ValueOf(call) + if gen >= 0 { + if !setConditionField(cval, "Generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionField(cval, "IfGenerationMatch", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) + } + return nil +} + +// setConditionField sets a field on a *raw.WhateverCall. +// We can't use anonymous interfaces because the return type is +// different, since the field setters are builders. +func setConditionField(call reflect.Value, name string, value interface{}) bool { + m := call.MethodByName(name) + if !m.IsValid() { + return false + } + m.Call([]reflect.Value{reflect.ValueOf(value)}) + return true +} + +// conditionsQuery returns the generation and conditions as a URL query +// string suitable for URL.RawQuery. It assumes that the conditions +// have been validated. +func conditionsQuery(gen int64, conds *Conditions) string { + // URL escapes are elided because integer strings are URL-safe. + var buf []byte + + appendParam := func(s string, n int64) { + if len(buf) > 0 { + buf = append(buf, '&') + } + buf = append(buf, s...) + buf = strconv.AppendInt(buf, n, 10) + } + + if gen >= 0 { + appendParam("generation=", gen) + } + if conds == nil { + return string(buf) + } + switch { + case conds.GenerationMatch != 0: + appendParam("ifGenerationMatch=", conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) + case conds.DoesNotExist: + appendParam("ifGenerationMatch=", 0) + } + switch { + case conds.MetagenerationMatch != 0: + appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) + } + return string(buf) +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { + if key == nil { + return nil + } + // TODO(jbd): Ask the API team to return a more user-friendly error + // and avoid doing this check at the client level. + if len(key) != 32 { + return errors.New("storage: not a 32-byte AES-256 key") + } + var cs string + if copySource { + cs = "copy-source-" + } + headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) + keyHash := sha256.Sum256(key) + headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) + return nil +} + +// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account. +func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) { + r := c.raw.Projects.ServiceAccount.Get(projectID) + res, err := r.Context(ctx).Do() + if err != nil { + return "", err + } + return res.EmailAddress, nil +} diff --git a/vendor/cloud.google.com/go/storage/storage.replay b/vendor/cloud.google.com/go/storage/storage.replay new file mode 100644 index 00000000..07ed1bfa --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.replay @@ -0,0 +1,30067 @@ +{ + "Initial": "IjIwMTktMDUtMDJUMjI6MjM6NTMuNDAzNDMyMDEzWiI=", + "Version": "0.2", + "Converter": { + "ClearHeaders": [ + "^X-Goog-.*Encryption-Key$" + ], + "RemoveRequestHeaders": [ + "^Authorization$", + "^Proxy-Authorization$", + "^Connection$", + "^Content-Type$", + "^Date$", + "^Host$", + "^Transfer-Encoding$", + "^Via$", + "^X-Forwarded-.*$", + "^X-Cloud-Trace-Context$", + "^X-Goog-Api-Client$", + "^X-Google-.*$", + "^X-Gfe-.*$" + ], + "RemoveResponseHeaders": [ + "^X-Google-.*$", + "^X-Gfe-.*$" + ], + "ClearParams": null, + "RemoveParams": null + }, + "Entries": [ + { + "ID": "f5f231bed6e14b7f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:54 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrZvgYBWgsCwPaGI9bo1ccC0WCBc8kJgydTwioDtXR9xps4HiDoKXI-vjYUl876SMqF0JhmhaEBgvxrIL9Y989YCFrH65xGys_r1JbPdi9M9N0kS3M" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "9a9914424ef59619", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqFvRYrCleVqpn0QshSvzW5I1-8o7N6vGYh8o5G1f-AHnsX2N_x-NKJrvlxnXqm9auw5gMoWFaJTSTtKL5y85WlQ_eAjmmlrkD4tbHYBZJ386xgaZw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "17f2abbdd781a33b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:55 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYoTmTG5mxpFGPvmECUTlGMlQwhfmqGsZtBtZ9xV89Pw3q-p5BBeX_3imdofr_7EBT7nBm4v5alpg45Zi8a8ET28qBH2xfNe4n15HR-1fhGou2wQU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU1LjEwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NS4xMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "6752f5a9a036af11", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4bFsk4ylv96GsTkuDKG--hVaCR_UEhZ_fAzMGt5Eu5ZKncHOLjU_f2PcNP9saFGW-UkH9jXwt_nuR0G2zXOBjMJmLdd7Ml61bGMMrJeVa0OtcGpM" + ] + }, + "Body": "" + } + }, + { + "ID": "9c25646df7aacad9", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "543" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "926" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq6CjN9PjjzT3LmHxW_tU_ciQ1rahetoQGbX_gX8EC5il7tPJi2yxi5VZxnDNrp1h14b7Ix8tnvtkHufAyO1-lMRutdHK5GSzonff78Nm6KPAKN5fU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "f795b9adcb1b546e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2872" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up2-mWQyRDbFSpF6U96vQpaBYr74NgiUWh3-KZnLWaFYnhQti1tgKWNtL15YgK8blaRSnzGeACPA6jNuM34yhr7bxztrdN2tobEQAzD5RVgzpqx14w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU2LjQwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1Ni40MDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "2ee3f84c4e4045fb", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0002?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UplJEr-Hxa3hDFT5ozLEHYhHfaxlYFpc9Vwm8AL831-w_7BBgxHjjEsU8Br_uLnLes0h9hz37iuE9V8uVZ2liHY7ZD4piNH31oyapjCtwyXrukIP94" + ] + }, + "Body": "" + } + }, + { + "ID": "16f19dbf8e206756", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:23:57 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqvp5XjvB8nhNuz-bTeN9OklTfiBGldYKkcY13JF6oUfpV0z_jwoEQD3B3Ss3wWpaSmZfePjo7fkkr-hP3jbrUazNHaqQliiHqOBSNmSoPmwJpzfOI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "949f5ce411d6f672", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:58 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpVeVcmmuUyOt3Hbja89_Ewi6GRsJtRduqK93OT4Ys1aK5GqDWeGxyDbczUyRLeUYvZgtJzYLwVOOUszqAF4ipSXgZ1L_byd9cJ7ttVfQ_ceQBXxY4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1NC42MTBaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "b75303fbdafb66d0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "64" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:58 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqDnNlC3m95GplHjE79aqzhtwgfJCWQjHaCFG4i7qmTFliz2gdE4OiOnKAPIoNqxEngE35065YXNYA65aMSSeEluDKmQ__rJXcS_DpRdoYP4rZIyPo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OC40MzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxhYmVscyI6eyJlbXB0eSI6IiIsImwxIjoidjEifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "831805b62d969707", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "93" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiYWJzZW50IjpudWxsLCJlbXB0eSI6bnVsbCwibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2495" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq8yNb3V9Kq8zPa_pdVrJIYv83v4fu6xAHwktfTz_Cy4K1rpi8xrDzYmw5wICaazfMcAiYPhM8r4Y6WkeOyeCRInvkJ6ndduhNN_dgu1U59uI5F3Qc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS4wMzJaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "8a816d061fe9e7e0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "77" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2570" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:23:59 GMT" + ], + "Etag": [ + "CAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrbjywEmDPkqxln7-Nx_8ngRxoWvncfCx1fGVpPZGEjjmg8OJgv0uaczxapjlNeEcvMnqWI_RVzG6_588QaO8nCnPVnE2kkIek3D_t6UNL9CCPYLRc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyMzo1OS41MzBaIiwibWV0YWdlbmVyYXRpb24iOiI0IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FRPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVE9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVE9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBUT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVE9In0=" + } + }, + { + "ID": "6a60397ebae323e7", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiYnVja2V0UG9saWN5T25seSJ9Cg==", + "dGVzdA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3305" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:00 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UokOvJnDKKHAQOa8mJLrxFpWWvQ2U_BC_3uI0Z4x870Q068evHio_t_YudbSq614h77-ofhBsyHpoknWnm_YrnXxkHzopreKoMBykFIcbsSB8TDKEE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRQb2xpY3lPbmx5LzE1NTY4MzU4NDAwNTUxNjEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5IiwibmFtZSI6ImJ1Y2tldFBvbGljeU9ubHkiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMC4wNTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDAuMDU0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAwLjA1NFoiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJDWTlyelVZaDAzUEszazZESmllMDlnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seT9nZW5lcmF0aW9uPTE1NTY4MzU4NDAwNTUxNjEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJocUJ5d0E9PSIsImV0YWciOiJDUG11NGJueC9lRUNFQUU9In0=" + } + }, + { + "ID": "32d392d1da32f27b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl/user-test%40example.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "111" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:00 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoau0xgFp6ib5wM0bBWjRlklvDRPOu0VZ-LFCeENUWXutmkXSfgUbtr2Nuefb7Pm_yLvCNqtB9B6k_N1V7AlvkN4_JEz67ZSXQ_sAD5L1teQIpGiqA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3VzZXItdGVzdEBleGFtcGxlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InVzZXItdGVzdEBleGFtcGxlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJlbWFpbCI6InRlc3RAZXhhbXBsZS5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9" + } + }, + { + "ID": "6e3d0eda38a3ab6e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "59" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6dHJ1ZX19fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "663" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpSPFJHLzusxOr_OvhStJlWEpOs2EuthMO0Ys6pS9bsQeP0fthp_VUZfa8_sN8TX6PJYpxIdFlxB2QUaIujot1cUrssoU74XFrAwoqhlmiE5y9Aw-w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMS4yNDJaIiwibWV0YWdlbmVyYXRpb24iOiI1IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOnRydWUsImxvY2tlZFRpbWUiOiIyMDE5LTA3LTMxVDIyOjI0OjAxLjIzMFoifX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FVPSJ9" + } + }, + { + "ID": "8f8dbb687dc49edb", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13230" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:01 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGpW2zLlN7nAgV5IVkKU3kx4QWHCkzAgMa-QPC1PyCol8CP9W605bMUmFMeerbR4enzmeNMvtb4a2HzBPUZ296YfGdtkt_6Guq82E226xzC5TPq4w" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"invalid","message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.INVALID_VALUE, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=INVALID_VALUE, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., unnamedArguments=[]}, location=null, message=Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only., reason=invalid, rpcCode=400} Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INVALID_REQUEST_FOR_BUCKET_POLICY_ONLY_RESOURCE: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Cannot get legacy ACLs for a bucket that has enabled Bucket Policy Only. Read more at https://cloud.google.com/storage/docs/bucket-policy-only."}}" + } + }, + { + "ID": "42ce6421b2c9fae4", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13358" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJwxja3nYzWYbg_I5gWvOiow2ORuo8tNA-_Vzw7DX_YVhhb6_p1giUk3WjUHWt-lyDA13adhPGi4BDfIXzQyf-ZL3lsHoa2sNm29BIqRznw4mkAdE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., unnamedArguments=[]}, location=null, message=another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly., reason=forbidden, rpcCode=403} another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"another-thing@deklerk-sandbox.iam.gserviceaccount.com does not have storage.objects.get access to go-integration-test-20190502-80633403432013-0001/bucketPolicyOnly."}}" + } + }, + { + "ID": "0ca0bddf513110f4", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "45" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnt9fX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "624" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpdDWPDU9-gPzHEieE0Rqx_40yf8fJLhwAP6fVsdS4F7I7sWj0h-Ti7VoDWciZgI_lgNUB7qyh08wjTAxrTLTsSiIYt2GR6ksxhDRupMoPWni5kXmE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FZPSJ9" + } + }, + { + "ID": "8a8bee740102593d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2964" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Etag": [ + "CPmu4bnx/eECEAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq59Mf8Ea4fgpDnUzupeIP3bGt3VpyI6HjL4KJtDKAD_h-Ua-AJSX3u3x4TCsx2MZcIVhMs9pW9SWsrIcsvsr3kGt2Je9W87LElbN5dlw02EItBcR4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldFBvbGljeU9ubHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0UG9saWN5T25seS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldFBvbGljeU9ubHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MDA1NTE2MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUG11NGJueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYnVja2V0UG9saWN5T25seS8xNTU2ODM1ODQwMDU1MTYxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0UG9saWN5T25seSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQwMDU1MTYxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BtdTRibngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldFBvbGljeU9ubHkvMTU1NjgzNTg0MDA1NTE2MS91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRQb2xpY3lPbmx5L2FjbC91c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJidWNrZXRQb2xpY3lPbmx5IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDAwNTUxNjEiLCJlbnRpdHkiOiJ1c2VyLXRlc3RAZXhhbXBsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJ0ZXN0QGV4YW1wbGUuY29tIiwiZXRhZyI6IkNQbXU0Ym54L2VFQ0VBST0ifV19" + } + }, + { + "ID": "91ba2c22c5f5de9a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketPolicyOnly?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrwotKay181GHzZsWfp6BzJA4FDOIfK2s1WlzB9p8QsIEX42AtvMhkgLWqSIyEhb-MSv9snqx0WRwUs5sDN3_5NVoFTzQeLkDBR9mbsixI-udc8SLI" + ] + }, + "Body": "" + } + }, + { + "ID": "43fec6c3a8c8cb63", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY29uZGRlbCJ9Cg==", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3161" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Etag": [ + "CNHLq7vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrrYnzZRz4GFV3BsHoF-vv9v2Lc13Wp6-P5iowSZFjqyykVhYZ6CkesIVxA2v2xNCbVeWgCBXCFFt9fje73cis1cECwwqrYLr5QF5bZCy4TsJ-LT8k" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb25kZGVsLzE1NTY4MzU4NDMzNjg0MDEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsIiwibmFtZSI6ImNvbmRkZWwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMy4zNjhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDMuMzY4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjAzLjM2OFoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbD9nZW5lcmF0aW9uPTE1NTY4MzU4NDMzNjg0MDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbmRkZWwvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbmRkZWwvMTU1NjgzNTg0MzM2ODQwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29uZGRlbC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbmRkZWwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0MzM2ODQwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29uZGRlbC8xNTU2ODM1ODQzMzY4NDAxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb25kZGVsL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29uZGRlbCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQzMzY4NDAxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05ITHE3dngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDTkhMcTd2eC9lRUNFQUU9In0=" + } + }, + { + "ID": "ba03d9efb1402903", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368400\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12249" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-mcvaL-eBgBoviwBg_r8LyEEK3JNyFaj0cFy2neOMo1pVkWpkGQ-3gz8vQNtAGoX-Q7_CMYLNv_I0pOoy5iRr-MdYKny5ICdC1s3ji5DwL7sqmn0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/conddel, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/conddel: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/conddel\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/conddel"}}" + } + }, + { + "ID": "e26af2cd4673cc7c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationMatch=2\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 412, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12051" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uopfd5TJBzhGauzgyZW0h-TNFtDHz34k0kjbeuTeQPnMGBaiSFz9FdWA2gxp7qKp-V586voh7kqHgnVSW_QI4bWEuC35pj8NbCmmpNL_9pstpCgckw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Expected metadata generation to match 2, but actual value was 1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" + } + }, + { + "ID": "857f8a30eb55b023", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026ifMetagenerationNotMatch=1\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 304, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uprv3QMjU4zz49ScYI4YaY9FitNwO7wGMQ1KZ8Y99w4D7keznFfA8M80bMdKvSH55jsWsDoLKcQ1VvIqIEUw88NqSfUM7E5SR_y5zfDaCf_uHSlgPM" + ] + }, + "Body": "" + } + }, + { + "ID": "4c69a7dc19302935", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/conddel?alt=json\u0026generation=1556835843368401\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpoiZa1zqMV-8ZkeUrvT6Xi0uJul6yGWWth5s3YbFtA2p0-6vc_54sNtEbxbnsASZyMDXLelHaCSixgcVT6JDVELrXHDim9gHcjjlSTF6BsHWu181A" + ] + }, + "Body": "" + } + }, + { + "ID": "834f05b1eb2dbc32", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMSJ9Cg==", + "TuDshcL7vdCAXh8L42NvEQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozIFQVqWdhJxDLMyV7dfkeraSOw2Mw_AsI_aCWnAUudrz4HjQgZ4kbnvKXJfNF3SMQhxzxZHhk1Otmw7PgPxL7HyDkPDyK1DeHDc8GyfY1B3Q3YfA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "34a1730e8bbe1d09", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqMiJ9Cg==", + "55GZ37DvGFQS3PnkEKv3Jg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "CJ2Xkrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVrbP1vUH6TU_zfm_Aaca624z-91MoULnLIBcn9Hg4htv5T5Z6B4XYJMFZUuDjWWeBhR6EpG1UZL4iJe0TJybYQ2CCsB_k63CcOex39xaOIT8UYUA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "b7c2c8ec1e65fb6d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9Cg==", + "kT7fkMXrRdrhn2P2+EeT5g==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3366" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "COqurrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrEgFAbU2gAqyKMITI9vr9kAgIBfNY3ZGs1l2_X4e-fVtNb01M9N81N-83LdChVzrk8iB09yuKUKdxRc0nMYrgy7S1fqPwbJdsQ6TJfMB05JJj6qr4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "0dbae3d4b454f434", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3366" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Etag": [ + "COqurrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqk7CzWGAqaT12X_fTymveyPtsPJIPHD814QaAOIQLA_AJo_4OtnQOOXJM2jJhIZ1Co4NgEmboDUG9su4SN5fDTHObNh9elLts9VpUJ9iSYrIsn-Os" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "3793882b91d38a07", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpzUH_BRjU20OGbDAW2dy0lx9Gk_1Ko7zks6KG6OEpq0pBPfCIBjkCeIZTxKxtvxnOd1hqlZF7SlbNoyNUqX5UFXpZY5NjP5GTeUkm512vaBR5vnDI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "39924c2826bcd33f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Etag": [ + "CJ2Xkrzx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgOr-dpWoY-JJi6vRigJX3Mpi_WQ4zWvWKK382DCP-mzWSnrpbaOzijhTCdNz6pmXskVVs30APwlqZgvb-S6xAwXqKJG5n6832Py4UlTdDR_INTew" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyIiwibmFtZSI6Im9iajIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiQ0Mxd2x3ck1PSXEwZHZNa015bFVoZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajI/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ1MDQ5MjQ1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoyLzE1NTY4MzU4NDUwNDkyNDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDUwNDkyNDUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKMlhrcnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoialY1QVZRPT0iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9" + } + }, + { + "ID": "89ea9ab5e21a635e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqj9dj5PbUhIU3baBv98vMViDP-BnVPs0APrFYL4jSbKc7fK6eAGoyDfsccGzxK-t0lGvozizW4ltrga8DXo_oxlZ9-k5a85v9i0PWTFIisPC7xuL8" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "b27c106562362f6b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3446" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoIo3JJa2rfg9_o3bXN_5QU6XU_iIrWYfuEuvKTQL0O5tjIHkODIDd4biyHxjbGNFnrQNbkiEUXWEBlo-3fTVgfFTOWuaPpgae-SafTBP8h5X5ddJ4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" + } + }, + { + "ID": "ca654b46531c4cb2", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqINI0Ii418NlxoYErOrjhTKb3-G3n-8h1Ryc_4YT4Tksc7WXA9m4CcJWInyhJahYC-UUrM47O5EK-3FUt3toZOZxWlbwhE6Sfnra0H-CqjmvxWyRs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "0186889a60e651db", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFTK3QnFf4m9htpY4s3P9_NHuDiQl8InpJoWPOaQHo0XaYnSoSqkH7CcUbm0-sWamsCZbd4DHoXHLCrea3ff9rLpWlptkL0aMKV_Dleb1Xm-j14fQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "a253776f79c46fe1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3446" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoadifz-4B7kC7DvXOyrNa5omK5-DXUNJBtT_d5I1jciPUvGlRm1L1vkvVMU1Y5n9zFig2CF_qqBLlcdvoYCaUeaNUu706L0fKJhJkA8vV5JUvFq0E" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNoQnZZbW92ZDJsMGFDOXpiR0Z6YUdWeiIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmovd2l0aC9zbGFzaGVzLzE1NTY4MzU4NDU1MTEwMTgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcyIsIm5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuNTEwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjUxMFoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS41MTBaIiwic2l6ZSI6IjE2IiwibWQ1SGFzaCI6InVlei9oSjZ3QXJlRFFuY2NEVWR4Zmc9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcz9nZW5lcmF0aW9uPTE1NTY4MzU4NDU1MTEwMTgmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iaiUyRndpdGglMkZzbGFzaGVzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai93aXRoL3NsYXNoZXMvMTU1NjgzNTg0NTUxMTAxOC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqJTJGd2l0aCUyRnNsYXNoZXMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmovd2l0aC9zbGFzaGVzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDU1MTEwMTgiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3F1cnJ6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqL3dpdGgvc2xhc2hlcy8xNTU2ODM1ODQ1NTExMDE4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmolMkZ3aXRoJTJGc2xhc2hlcy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iai93aXRoL3NsYXNoZXMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTUxMTAxOCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPcXVycnp4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoib2VvK0ZBPT0iLCJldGFnIjoiQ09xdXJyengvZUVDRUFFPSJ9XX0=" + } + }, + { + "ID": "dc8959751769ee9c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=ChBvYmovd2l0aC9zbGFzaGVz\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLeV9n2aiAq15pl3CqEWOt3_OYHsxbG0lHFIKr7Emh5btUEXLXl4nZWPi27LinKW7i0LYvp-HZK5b9E742MR_PNFe87treTpz_QgUmchGPKLcdZVM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwibmV4dFBhZ2VUb2tlbiI6IkNnUnZZbW94IiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEiLCJuYW1lIjoib2JqMSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "23f7f167269db6b2", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=1\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo4hY4jSbETgB5jf-AU8_b1sRvfTSrlt_Pt6UXiuTte4GtueVBgDwuMEliwE2-nOSiRE6juXOCbR1LQlRrpek1TLeRRf1cCVbz90YcCIGhWV_zFz0o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "518c5855f7f2035b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "6581" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGaiKkmr1oBduBvG8APGBVoM_Ve7zHw4TBuyV3QYcFr9SYzEnATEwE5P6BH-yBsVXSmaRLej1a55x18Bra_ZAC7nYh2UKvWM66JGE3U1muaDBabyw" + ] + }, + "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" + } + }, + { + "ID": "5887398d8cd8c906", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGAvr7ojM3PcmnrTMTX-TXgYKzfo-3gzwYU_l9OaCfpth_ad2lWUJf6w6B8pjEGsAFZJvNueCHYGSJdx6YJ7NAe71oah1xi6lGQvQkWEwAl0fXd7Y" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "81d53e304c7ed90d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "6581" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur9XEMeCefUS1K7QkIxrxZboQ1zQ5SkrEUZMzrQdNBNAuDQnpDczXzJvF-rZmxFVYSdMySScTTu-FICOdI91b-fQVRomrqAxJwgn60FoObIVVpgkog" + ] + }, + "Body": "{"kind":"storage#objects","nextPageToken":"CgRvYmox","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="}]}" + } + }, + { + "ID": "0be1bdae5ba86bfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=2\u0026pageToken=CgRvYmox\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3187" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UocXesX_BUCirPixQFUhYfAIoR0Os309HMGHarzTGrqH1PkmHuGaaiNSH_pJq8nnWUXEnjk1cvuPGJWCF21t7EJQtCIGBWQoPBNV6OpvALZGdpPXuI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RzIiwiaXRlbXMiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIiLCJuYW1lIjoib2JqMiIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA1LjA0OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNS4wNDhaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDUuMDQ4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJDQzF3bHdyTU9JcTBkdk1rTXlsVWhnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMj9nZW5lcmF0aW9uPTE1NTY4MzU4NDUwNDkyNDUmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajIvMTU1NjgzNTg0NTA0OTI0NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NTA0OTI0NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMi8xNTU2ODM1ODQ1MDQ5MjQ1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ1MDQ5MjQ1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0oyWGtyengvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJqVjVBVlE9PSIsImV0YWciOiJDSjJYa3J6eC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "592a3fdf8b5a83e5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoS2FM55Dkg1PRaNQBzsu5KBns-svmRrQ5byrUGRHgnsBQQDdE3ZznljVTrNq3wDAClddQ4zbCMQSLdqxPPNlom-n1tMg1hO8s6kS8_CTWy2SOM6As" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "b01846866b585241", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=3\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo22tWMx8uW6afXbfFrFT8UZzPbsqPItYMdRAPHNEuksgeqWI2US_xblUG6C8WgcHvfEeR_19eVmBTmW6QE7rmosaIm_raZxW9FuCHHiSi8TJnZ1CI" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "2eeefee032c6e805", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq55nB00t16AHTo1gSxBFUNplz5SJcGYBOeCPsK-MD5OLO_kHKT3YNZq69AAHpozaWVrTU_jDQqiqMFSoPhhmlF2dR1mcq6Ihk_y8uuFY6FM4O_hmI" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "11bfe17bc978cdfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026maxResults=13\u0026pageToken=\u0026prefix=obj\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "9705" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotgN85lDspseXEgwnXhmfSmio9oOxSzhqtyabApVH0KQz_aVg3DbQ7m0Z0L9SzPzzmNEUfU6xhAT5xQAXf-wvY1NHDFxdf9IJ6YoVNhN0aHN-75hY" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:04.646Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLnS+bvx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="}]}" + } + }, + { + "ID": "c05bfa6f1a43381b", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:11 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrVVjXVa2SMY6cjYeIgdcZOivQwE2Crz6UPZs2VXVBFMwReZs7kKbETuTMwuEMHKKm_LIrk-o6jS07MimubSWVxGMzT1BtCacU1X2Go_RckmyJPZso" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "f2932604385db16e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrADckAOw2vMHdy0qy02f20x-8s_Ue81-NYwSpKeqp6Zx6eCPReLM6qXUyxFz0xHNGob5WJA8J4ctl6ZFzL2fEVwBMNY8xxzpkKOAwSu9n9e0OE63E" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "8581afc6216ad2cb", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"082d70970acc388ab476f32433295486\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845049245" + ], + "X-Goog-Hash": [ + "crc32c=jV5AVQ==", + "md5=CC1wlwrMOIq0dvMkMylUhg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrkoYpGTn57WC5t-sItsEdWimp47AtSuqw8autFOD3TirChrdQThg_Fh-kykfgvnTaOyiw1InKeYs0Z2MISmjUpu4uHUUGDKTX6W0zQEuUks7i2BqQ" + ] + }, + "Body": "55GZ37DvGFQS3PnkEKv3Jg==" + } + }, + { + "ID": "ae129e597d5dd3e8", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"082d70970acc388ab476f32433295486\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845049245" + ], + "X-Goog-Hash": [ + "crc32c=jV5AVQ==", + "md5=CC1wlwrMOIq0dvMkMylUhg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoaoET13zBO6-kmfarxy5tCZhmqc2oO38xOd6m6OQ55e-MyyKYM35hNakm5xYdWaoUNsKwVCiklp4HtYE8afVObxcDERuTCgyTw-olewCN6VBwd-H0" + ] + }, + "Body": "55GZ37DvGFQS3PnkEKv3Jg==" + } + }, + { + "ID": "11acf26dc7680b3e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"b9ecff849eb002b78342771c0d47717e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845511018" + ], + "X-Goog-Hash": [ + "crc32c=oeo+FA==", + "md5=uez/hJ6wAreDQnccDUdxfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up_XtsS2bu4fnGI-Z85gqz1MECYqweRqin42arAyEfpJpZwmadI1-Op9V7n-q3UaYyRtUWFR7an7zKxWhJ_CB6PXz-C55C1nPoI7EWVEV2oXcS-q8c" + ] + }, + "Body": "kT7fkMXrRdrhn2P2+EeT5g==" + } + }, + { + "ID": "ff72204b0c538268", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj/with/slashes", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"b9ecff849eb002b78342771c0d47717e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:05 GMT" + ], + "X-Goog-Generation": [ + "1556835845511018" + ], + "X-Goog-Hash": [ + "crc32c=oeo+FA==", + "md5=uez/hJ6wAreDQnccDUdxfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrjLdnTX47-_XGNBlBZ0rEgsIkYJMzixBaVmhjn8IsK66UpQAUO-njMM-WznI-DBNIbXVTcPQKZBNe2ZHZEskfuRjsFwMjtooyUH_PqTfZnARVv8M" + ] + }, + "Body": "kT7fkMXrRdrhn2P2+EeT5g==" + } + }, + { + "ID": "7972502f866e015e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-15" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNiJIZYFETpngMkDItRU7PtN7Fv4bDU7Kfvndst1JR2eGmW-tOMTfng8Abx6EWwPvvodLkSk-1TM4Gywxzh3c24gPDN4NsQMuh40Mfp0Jvg1syOls" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "251a62254c59cbd1", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-7" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 0-7/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UobLFpkqodTM1bHuzS0GzRCKZypPQkf3nH1hbOexCjgjAcheZhV_zxJRyLhOYZoW8ABEaVMJLULNrzAm1VZs4JrTdtT46fYIcQe7OBeJp0aHI7Lr5s" + ] + }, + "Body": "TuDshcL7vdA=" + } + }, + { + "ID": "2482617229166e50", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=8-23" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 8-15/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uon8ZtKgOTqFm36bWHd3hf535jPGQBEX9j2yPrr11_ycAJjfI4A-mFWMTsFRR3nwSo68784B9TUPUjQd0cib0QIrfBjfDcz91p6oPJG3VpV9afywIY" + ] + }, + "Body": "gF4fC+NjbxE=" + } + }, + { + "ID": "74ff0c5848c7dba6", + "Request": { + "Method": "HEAD", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:12 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKc-08ZJEWVbyaV84xKlnrHkl2PBuBUQP6xtx6TzZW0fsjyZ-SHshiy-QMGCuP1UF4x1ettHyDvRbAleHIXy4y7wMwoUWST2NKs_0oRA0oVqOoKtQ" + ] + }, + "Body": "" + } + }, + { + "ID": "90888164f5d2d40d", + "Request": { + "Method": "HEAD", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTSlg3r_LSoiqQyJVJDzL-0xNj6jlSNp9zLUmlPtu3xbhCLBDNAxY9CRbyEjw7UudDcsFOe43C3QlsVjoBJln1q179ZspYWXY-fHqjrztAfJkZUpU" + ] + }, + "Body": "" + } + }, + { + "ID": "7961f3dc74fa0663", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=8-" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "8" + ], + "Content-Range": [ + "bytes 8-15/16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJjC8HFLb8u_EaJVF0_TUOhNWhbooW0oANE8_LPjoIdLcoU42caZe7LjgbTIjCb0Ss3horP2noxirF3u3FGq0Yoh4rjuHBVhwZcemZHnKLgJUUbQQ" + ] + }, + "Body": "gF4fC+NjbxE=" + } + }, + { + "ID": "af8c1fcfa456592a", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=0-31" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqq6ro5-i6q8pIDup0yW35uFEynuLK95P8OSFhj7b70DK33tjv3fkOdkobsfwplOAGuNME2bLjQuyy8mhd2xBbNjyjvXt13Nc48PB4M2t_46RoM3Ak" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "d312c241da5bd348", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Range": [ + "bytes=32-41" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 416, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "167" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpAvzLs-GgdTPEGM2kvABFTxehSb-DhZQ2Y31nQ9ad0RMmfIMtVGRBXvkXo7FwGAy78ZHMSGWRKG-DYMy6JU9PCM4Tpo8PDmm4-rHa_LVpH2dFN-JM" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+SW52YWxpZFJhbmdlPC9Db2RlPjxNZXNzYWdlPlRoZSByZXF1ZXN0ZWQgcmFuZ2UgY2Fubm90IGJlIHNhdGlzZmllZC48L01lc3NhZ2U+PERldGFpbHM+Ynl0ZXM9MzItNDE8L0RldGFpbHM+PC9FcnJvcj4=" + } + }, + { + "ID": "2bbd010d0173c966", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZDScoUU4zzBoXc21jt6C1vcW1SrU6ELiWodrCID-OoNKVMbecv_DSgHZATQGs4GS-BebkzqdalqTq5C77aKXQMxiks-9A5kyrc8N4aY9L5YndaI8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFFPSJ9" + } + }, + { + "ID": "e3522e1cc7aef940", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2570" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:13 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5x_r1PKMQrnKbsfUWRR6wBwFQTM8U9mGwyq_fK56rrHE_UoLDxLRpiuaFGLVi9L4Hj67UXH76drA6KQQQgOsMVo0YEl1pVS2P7OKQ64WF4NubRCU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowMi4zNjFaIiwibWV0YWdlbmVyYXRpb24iOiI2IiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FZPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQVk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBWT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBWT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6ZmFsc2V9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MiIsIm5ldyI6Im5ldyJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQVk9In0=" + } + }, + { + "ID": "77ed46692038573c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3333" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:14 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpBnZgUt8GyAer5VG-_nEuf3YizRjF2t4MS6vOflI_Vid1-zVpQ99TuphB3pZjPZNI5ZoHJxCmDv3lyLwyhUDvk_p_NO9x8qZTEpjf1EOO5uxRsOxo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDE0NDc5MiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC4xNDRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuMTQ0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0LjE0NFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQxNDQ3OTImYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvcHktb2JqMS8xNTU2ODM1ODU0MTQ0NzkyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb3B5LW9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDE0NDc5MiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQxNDQ3OTIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0MTQ0NzkyIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ppcXZjRHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDSmlxdmNEeC9lRUNFQUU9In19" + } + }, + { + "ID": "9187dbb998c64d40", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "31" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb250ZW50RW5jb2RpbmciOiJpZGVudGl0eSJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3299" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:14 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpyrF7U9KSc3XcJb-T_KGf3Fg2yhFUjJqTl06wpqqhG5IT6chgLCWKnLTuamfHtVq8XcP7acZy4TGyKIEvZ4L36TGTfSM8Zp_JyF8K4rN5p375VBMc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTYiLCJvYmplY3RTaXplIjoiMTYiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMSIsIm5hbWUiOiJjb3B5LW9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NDc0NzU0MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNC43NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTQuNzQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE0Ljc0N1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NTQ3NDc1NDMmYWx0PW1lZGlhIiwiY29udGVudEVuY29kaW5nIjoiaWRlbnRpdHkiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb3B5LW9iajEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29weS1vYmoxLzE1NTY4MzU4NTQ3NDc1NDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvcHktb2JqMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvcHktb2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU0NzQ3NTQzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb3B5LW9iajEvMTU1NjgzNTg1NDc0NzU0My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29weS1vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29weS1vYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTQ3NDc1NDMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmVQNHNEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkNUNmRUQT09IiwiZXRhZyI6IkNKZVA0c0R4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "c199412d75fadc45", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "193" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJhY2wiOlt7ImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9XSwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm1ldGFkYXRhIjp7ImtleSI6InZhbHVlIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2046" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:15 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKO5A_yihnTzTSNbXxyLm-I3MkyC58APsD1U6RZ5_xpjjjL3nXVZIyACkeiDKXRZyU_oP1Yt0FeX23ONp6JeNyoy0J0kW8GwGMgPeN1ATdWGzCMXs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS4yMjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MDQuNjQ2WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJTbmEvVVd2N21jWkkyM29FNXRVYWJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMT9nZW5lcmF0aW9uPTE1NTY4MzU4NDQ2NDcyMjUmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJtZXRhZGF0YSI6eyJrZXkiOiJ2YWx1ZSJ9LCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6ImRvbWFpbi1nb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiIsImRvbWFpbiI6Imdvb2dsZS5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Im9iajEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMblMrYnZ4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQ1Q2ZFRBPT0iLCJldGFnIjoiQ0xuUytidngvZUVDRUFJPSJ9" + } + }, + { + "ID": "c845025158c6b7d0", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "120" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOm51bGwsImNvbnRlbnRUeXBlIjpudWxsLCJtZXRhZGF0YSI6bnVsbH0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1970" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:15 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UryOVdmGzCqBOS2ZZ0nw1SqbHF7qJ1CeTxgb6a1BJyoA42NTNZ8RdEVQGNF5u2hWdSQW79cBOonxjms_MoogyDJVAtKHl1rys87QHF6-750NNNSoEw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vYmoxLzE1NTY4MzU4NDQ2NDcyMjUiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxIiwibmFtZSI6Im9iajEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg0NDY0NzIyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDowNC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuNTI3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjA0LjY0NloiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiU25hL1VXdjdtY1pJMjNvRTV0VWFiUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajE/Z2VuZXJhdGlvbj0xNTU2ODM1ODQ0NjQ3MjI1JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iajEvMTU1NjgzNTg0NDY0NzIyNS9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL29iajEvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9vYmoxL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoib2JqMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODQ0NjQ3MjI1IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xuUytidngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJDVDZkVEE9PSIsImV0YWciOiJDTG5TK2J2eC9lRUNFQU09In0=" + } + }, + { + "ID": "811b0b9d4980b14f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY2hlY2tzdW0tb2JqZWN0In0K", + "aGVsbG93b3JsZA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3305" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:16 GMT" + ], + "Etag": [ + "CIGhrMHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqJTh_P91pby-vQkRF12GexDihy7TZAlUPamDV_REldvXeqvbrWY_kB0Vcp1lYyuIY7EK2_eMBYYSRMMIVEYz5fxxt51-9Br_UmYvnuRgjhfC16AuA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jaGVja3N1bS1vYmplY3QvMTU1NjgzNTg1NTk2MjI0MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdCIsIm5hbWUiOiJjaGVja3N1bS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNS45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTUuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE1Ljk2MVoiLCJzaXplIjoiMTAiLCJtZDVIYXNoIjoiL0Y0RGpUaWxjRElJVkVIbi9uQVFzQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdD9nZW5lcmF0aW9uPTE1NTY4MzU4NTU5NjIyNDEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY2hlY2tzdW0tb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NoZWNrc3VtLW9iamVjdC8xNTU2ODM1ODU1OTYyMjQxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jaGVja3N1bS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjaGVja3N1bS1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NTk2MjI0MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY2hlY2tzdW0tb2JqZWN0LzE1NTY4MzU4NTU5NjIyNDEvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NoZWNrc3VtLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNoZWNrc3VtLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODU1OTYyMjQxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0lHaHJNSHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJWc3UwZ0E9PSIsImV0YWciOiJDSUdock1IeC9lRUNFQUU9In0=" + } + }, + { + "ID": "69c77b8bdd7cc643", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVyby1vYmplY3QifQo=", + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3240" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:16 GMT" + ], + "Etag": [ + "CLirz8Hx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2iXVwACD0yFKYjt0WT-lW1Tx6PtpOgDYttPBWBnJZ3CPf4cUK7heI_9SwdzYXoieaRHDj9n3w3M_SExedwQqQ-GTOY9qM9DPmrPy11hEny0WjECc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QiLCJuYW1lIjoiemVyby1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxNi41MzZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTYuNTM2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE2LjUzNloiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3Q/Z2VuZXJhdGlvbj0xNTU2ODM1ODU2NTM3MDE2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby1vYmplY3QvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLW9iamVjdC8xNTU2ODM1ODU2NTM3MDE2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvLW9iamVjdC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8tb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NTY1MzcwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8tb2JqZWN0LzE1NTY4MzU4NTY1MzcwMTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8tb2JqZWN0L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVyby1vYmplY3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1NjUzNzAxNiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMaXJ6OEh4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiQUFBQUFBPT0iLCJldGFnIjoiQ0xpcno4SHgvZUVDRUFFPSJ9" + } + }, + { + "ID": "1017883279ca634a", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "98" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "417" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Etag": [ + "CLnS+bvx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKqYaaFUj6b4ezS4lnnQgYv4CHVSsqhVSlLP3QqpItNahh25KnLzbTccK_idrfjKV0gExzr17MvFmoRw7oUIWyJJINmpEXEw5EmHScxipsb3KbWZ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvb2JqMS8xNTU2ODM1ODQ0NjQ3MjI1L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vb2JqMS9hY2wvYWxsVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJvYmoxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NDQ2NDcyMjUiLCJlbnRpdHkiOiJhbGxVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0xuUytidngvZUVDRUFRPSJ9" + } + }, + { + "ID": "35f04bf2400be96f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Etag": [ + "\"4a76bf516bfb99c648db7a04e6d51a6d\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:17 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:04 GMT" + ], + "X-Goog-Generation": [ + "1556835844647225" + ], + "X-Goog-Hash": [ + "crc32c=CT6dTA==", + "md5=Sna/UWv7mcZI23oE5tUabQ==" + ], + "X-Goog-Metageneration": [ + "4" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqC9teasrZdeHJo8KLaQQ251d02ORMxXreH3TKcaQp4NWLVbpiAw1GpW9WeSdFgbpWtVDdyzfjE93gAs6uA0kdlDPZsIEfJVK3GFfaZE2aW5aFCn8Y" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEQ==" + } + }, + { + "ID": "d01040504d2a91c4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoib2JqMSJ9Cg==", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 401, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "30343" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:17 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "Www-Authenticate": [ + "Bearer realm=\"https://accounts.google.com/\"" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqRhLZOcm0K7eNByUzCBgYfgiytuZ6Hj06jPkbkK77LS80OPgO_78AL530-2AcrlLe1fIy0jM0tonLLncLuOszrqKcGgVhqEJuE48-67vqBRACjqmY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/obj1."}}" + } + }, + { + "ID": "db327c92dac99584", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoa0LrEhOIeq-iYERbwsOkXjr0QR1ANe3tDsleUqoFNU7fRIruaqYikdnU1svzhc8iGRwx0A5dAGYQ_mM045LN_oxU1c76ugXtnWS2PW8wBcmst7hk" + ] + }, + "Body": "" + } + }, + { + "ID": "e70c028bd3dc9250", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12275" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrvFq18HewCbAkJcD7BmKgE8_WOFqSNkcwbpbNqCDPIy5jLVf2fIaHdg76_1rHuAHTtFM84Zr6euptOehRZOqA38Zc_BrJUSZKjg686imvazehpBc" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" + } + }, + { + "ID": "4d7120b41e583669", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/copy-obj1?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoJ1cGOQjoNiRk8qv-igyccVp3Vg_ut7NLSEO3A-yyQwgGnoXR5jPbi3tuomYWrS57GIvd6GpShkcAYSIJ2e94Jx_1SseYkYtlSF8a7qsDU0OVFYgM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, unnamedArguments=[]}, location=entity.resource_id.name, message=No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1, reason=notFound, rpcCode=404} No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::OBJECT_NOT_FOUND: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"No such object: go-integration-test-20190502-80633403432013-0001/copy-obj1"}}" + } + }, + { + "ID": "e7711da9b067a58f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "156" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6Im9iajEifSx7Im5hbWUiOiJvYmoyIn0seyJuYW1lIjoib2JqL3dpdGgvc2xhc2hlcyJ9XX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "750" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "CI2048Lx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoJ5Us6Rp03fUUqSAPFFGPuqW2qR4gCfzJ4w3W1Kx8C0JGfNDxte0QlBoVEi4K7rV5zkUY1IXQvIk6FOU3DxP8ECZdxkVJBW57Jf5iXo8kRyLVu7OA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDEvMTU1NjgzNTg1ODk2Mjk1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMSIsIm5hbWUiOiJjb21wb3NlZDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1ODk2Mjk1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOC45NjJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTguOTYyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE4Ljk2MloiLCJzaXplIjoiNDgiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29tcG9zZWQxP2dlbmVyYXRpb249MTU1NjgzNTg1ODk2Mjk1NyZhbHQ9bWVkaWEiLCJjcmMzMmMiOiJBYldCeVE9PSIsImNvbXBvbmVudENvdW50IjozLCJldGFnIjoiQ0kyMDQ4THgvZUVDRUFFPSJ9" + } + }, + { + "ID": "de5d78de4310ff60", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed1", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "48" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "\"-CI2048Lx/eECEAE=\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "3" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:18 GMT" + ], + "X-Goog-Generation": [ + "1556835858962957" + ], + "X-Goog-Hash": [ + "crc32c=AbWByQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "48" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqprnxhGEtAMeMcwiKn43QTEdsHrB7k_jCjOqnvH984bk5CIjIhSalrDVNwlLb37IU67jIYyIY1Nq8uaSz5HGUROWwpE6SVmGvIGXnuAbTP7x8Ez5w" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" + } + }, + { + "ID": "4b7c14a3f35089ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "182" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50VHlwZSI6InRleHQvanNvbiJ9LCJzb3VyY2VPYmplY3RzIjpbeyJuYW1lIjoib2JqMSJ9LHsibmFtZSI6Im9iajIifSx7Im5hbWUiOiJvYmovd2l0aC9zbGFzaGVzIn1dfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "776" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "CP+RiMPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrMYc2wH1kAYUthDQQhZ4gGQU6kYXq6KGs1msu9ifujWYQ2dwS5ifqZFWiTYf74Rq2y9VkzALfhjkTK6anDL-SwHxUH3IwMe8j1rrrbaMbv7eLQCR8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb21wb3NlZDIvMTU1NjgzNTg1OTU2NDc5OSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbXBvc2VkMiIsIm5hbWUiOiJjb21wb3NlZDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg1OTU2NDc5OSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9qc29uIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjE5LjU2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoxOS41NjRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MTkuNTY0WiIsInNpemUiOiI0OCIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb21wb3NlZDI/Z2VuZXJhdGlvbj0xNTU2ODM1ODU5NTY0Nzk5JmFsdD1tZWRpYSIsImNyYzMyYyI6IkFiV0J5UT09IiwiY29tcG9uZW50Q291bnQiOjMsImV0YWciOiJDUCtSaU1QeC9lRUNFQUU9In0=" + } + }, + { + "ID": "26a4cb091ed2f1bd", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/composed2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "48" + ], + "Content-Type": [ + "text/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Etag": [ + "\"-CP+RiMPx/eECEAE=\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "3" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:19 GMT" + ], + "X-Goog-Generation": [ + "1556835859564799" + ], + "X-Goog-Hash": [ + "crc32c=AbWByQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "48" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-t-yqapXjBzjIrFWFhaT080f06JahbskCp0DC_-izUjPK4fYn556m2qyRDgj9HjkkDPtKR6dauzF5afjNZZ61bN_dTvw15d82A206-SdS1Jv_YM4" + ] + }, + "Body": "TuDshcL7vdCAXh8L42NvEeeRmd+w7xhUEtz55BCr9yaRPt+QxetF2uGfY/b4R5Pm" + } + }, + { + "ID": "6b59ed7d3d098970", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwibmFtZSI6Imd6aXAtdGVzdCJ9Cg==", + "H4sIAAAAAAAA/2IgEgACAAD//7E97OkoAAAA" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3227" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Etag": [ + "CNuJssPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoXNmswaWh2Gfcz1pYPy7etwk09Nz_dmuYVq55M2p3ox38A3mC0QgoHf-hL1uCZxQvusJSIHeugTQvmCgZvRdXV-3juD0W8KI669m0EILAHSfnVoJ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nemlwLXRlc3QvMTU1NjgzNTg2MDI1MTg2NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdCIsIm5hbWUiOiJnemlwLXRlc3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24veC1nemlwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIwLjI1MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMC4yNTFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjAuMjUxWiIsInNpemUiOiIyNyIsIm1kNUhhc2giOiJPdEN3K2FSUklScUtHRkFFT2F4K3F3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0P2dlbmVyYXRpb249MTU1NjgzNTg2MDI1MTg2NyZhbHQ9bWVkaWEiLCJjb250ZW50RW5jb2RpbmciOiJnemlwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ3ppcC10ZXN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2d6aXAtdGVzdC8xNTU2ODM1ODYwMjUxODY3L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nemlwLXRlc3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJnemlwLXRlc3QiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MDI1MTg2NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ3ppcC10ZXN0LzE1NTY4MzU4NjAyNTE4NjcvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2d6aXAtdGVzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imd6aXAtdGVzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYwMjUxODY3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ051SnNzUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiI5RGh3QkE9PSIsImV0YWciOiJDTnVKc3NQeC9lRUNFQUU9In0=" + } + }, + { + "ID": "070a9af78f7967bf", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/gzip-test", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "none" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Type": [ + "application/x-gzip" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:20 GMT" + ], + "X-Goog-Generation": [ + "1556835860251867" + ], + "X-Goog-Hash": [ + "crc32c=9DhwBA==", + "md5=OtCw+aRRIRqKGFAEOax+qw==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "27" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosPALnXk-3j5nBlDDWX7hBP4xBNcCe7lGNkEU-sHHTWsRexFWLoB-1gn8RfnlqS6Q5ZTrR86NBxpP1T0bFxgHnlYUYGh-G3mThRCgGIAWjXggOeOU" + ] + }, + "Body": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" + } + }, + { + "ID": "3ece6665583d65fb", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/obj-not-exists", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "225" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:20 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uph0zmuYk7ZmOWzbNkj24Iai85wz2vKj0mMnMkIHIoDUlDAUCA0e_CgFT5fV_XAvgUx06I9FspomkB_ImflSVCvj55GK6zEoY5iV1hN96nh4AmPBN0" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+Tm9TdWNoS2V5PC9Db2RlPjxNZXNzYWdlPlRoZSBzcGVjaWZpZWQga2V5IGRvZXMgbm90IGV4aXN0LjwvTWVzc2FnZT48RGV0YWlscz5ObyBzdWNoIG9iamVjdDogZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL29iai1ub3QtZXhpc3RzPC9EZXRhaWxzPjwvRXJyb3I+" + } + }, + { + "ID": "602f4fdfff4e490c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic2lnbmVkVVJMIn0K", + "VGhpcyBpcyBhIHRlc3Qgb2YgU2lnbmVkVVJMLgo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3230" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:21 GMT" + ], + "Etag": [ + "CNat6MPx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWSi2VcVAHJJzhGSPjVT17kZYaCh3uZvWXBU0R4hVmPZJ10gvbv_Ucrfd2kwrBvRYoZaQcYkY5Q3M-oywiCNtLm6SM9InWHz2Omc-0pWJJjfQWPCM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zaWduZWRVUkwvMTU1NjgzNTg2MTE0MTIwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTCIsIm5hbWUiOiJzaWduZWRVUkwiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyMS4xNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjEuMTQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjIxLjE0MFoiLCJzaXplIjoiMjkiLCJtZDVIYXNoIjoiSnl4dmd3bTluMk1zckdUTVBiTWVZQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTD9nZW5lcmF0aW9uPTE1NTY4MzU4NjExNDEyMDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc2lnbmVkVVJML2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3NpZ25lZFVSTC8xNTU2ODM1ODYxMTQxMjA2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zaWduZWRVUkwvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzaWduZWRVUkwiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2MTE0MTIwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc2lnbmVkVVJMLzE1NTY4MzU4NjExNDEyMDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NpZ25lZFVSTC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNpZ25lZFVSTCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODYxMTQxMjA2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05hdDZNUHgvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJaVHFBTHc9PSIsImV0YWciOiJDTmF0Nk1QeC9lRUNFQUU9In0=" + } + }, + { + "ID": "4a1c9dc802f4427c", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Etag": [ + "CAc=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrbFZENfdiDkKpD_H_zm44x7h5bTZ3VD6MEoiiXuMwxHHK-zCSSoPRA2ALNUXoR3EcS1c-huuyZBIiw7ULKii7BNqKz0RMu4lWRxrDRQBOjGwKDDgE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQWM9In0=" + } + }, + { + "ID": "e12cb7360f4fd8dd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Etag": [ + "CAc=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:23 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrVJI9g41JQEDEgaIAOXKSCe2B8xklFKSoIciM7uC0uod7t7NzXfxsIrI6mQeZGkWa2B12xdKpSJBndrNbBvygZ6aXxpBBvL8d80NJ42u8KQC7AaFE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWM9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBYz0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBYz0ifV19" + } + }, + { + "ID": "d25b1dc2d7247768", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMSJ9Cg==", + "/pXmp0sD+azbBKjod9MhwQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:24 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosBTMUgCzCYlGDKqRCN7Kexl832zyi4FxoRqfYtmTr8yv63z4BAQBeCTLajyZqMbRyzjje7TtPPYomUSv_8zrQ8bFdXlNzbTSeKK8kXi2Ys82jus4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxIiwibmFtZSI6ImFjbDEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjI0N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC4yNDdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuMjQ3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiIwRTl0Rk5wWmowL1dLT0o2ZlY5cGF3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMT9nZW5lcmF0aW9uPTE1NTY4MzU4NjQyNDgxNzAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkZpRG1WZz09IiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "1d3df0d90130f5fe", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWNsMiJ9Cg==", + "HSHw5Wsm5u7iJL/jjKkPVQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:24 GMT" + ], + "Etag": [ + "CJrxw8Xx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpMl0_NB41LujALQWmul06CCiuw7okuKlhYG_qilsoXlld2qoYgc2-ABYogriA37QOJMK5ZlJ5sYzP2Mp7CTSzK9uccFLi10TdrGFMUjUpD0OvcXrE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyIiwibmFtZSI6ImFjbDIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI0LjczN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyNC43MzdaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjQuNzM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJjOStPL3JnMjRIVEZCYytldFdqZWZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMj9nZW5lcmF0aW9uPTE1NTY4MzU4NjQ3Mzc5NDYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0NzM3OTQ2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0pyeHc4WHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDIvMTU1NjgzNTg2NDczNzk0Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDczNzk0NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMi8xNTU2ODM1ODY0NzM3OTQ2L2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMi9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wyLzE1NTY4MzU4NjQ3Mzc5NDYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDIvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQ3Mzc5NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSnJ4dzhYeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IkF0TlJ0QT09IiwiZXRhZyI6IkNKcnh3OFh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a270daf4f72d9d3d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2767" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UranlihhkPcnPzTT2UYs4r6Hritk60lULu-pzO6YX-EWIlMlFiXzKmxaZVgOcPfnCgfJsRktKsL_TbFun1PfupraBREfYklCvRXKvJ_526gW-Jf9zs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hY2wxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYWNsMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY0MjQ4MTcwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09yK3BjWHgvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbDEvMTU1NjgzNTg2NDI0ODE3MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImFjbDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2NDI0ODE3MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsMS8xNTU2ODM1ODY0MjQ4MTcwL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYWNsMS9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPcitwY1h4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wxLzE1NTY4MzU4NjQyNDgxNzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2FjbDEvYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJhY2wxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjQyNDgxNzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3IrcGNYeC9lRUNFQUU9In1dfQ==" + } + }, + { + "ID": "328125b2bc991e95", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:25 GMT" + ], + "Etag": [ + "COr+pcXx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UosP78wuW4e1mPLJgcCbCxBC4Je5LWjKugd0fVK51u-P3qeXbQXj4_Amo25ZHyut2LZvoipvmK95xEvebSyVyzVydKyU2VSbHLDkwnRpR-nr9UGIf0" + ] + }, + "Body": "" + } + }, + { + "ID": "1c7595376917851f", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:26 GMT" + ], + "Etag": [ + "CAg=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqnZXiLrdCFoctnkKUqdvXLdSp9xumFk20gsRJc4xB1CTZ14aLZYwbDJmi90E28Q2_E4klaJBOaB1OCcpbDBoF1N9E9Bk3VEaNYYrHKMoECuO7RMPs" + ] + }, + "Body": "" + } + }, + { + "ID": "4183198b151a3557", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "109" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJ1c2VyLWpiZEBnb29nbGUuY29tIiwicm9sZSI6IlJFQURFUiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "386" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:27 GMT" + ], + "Etag": [ + "CAk=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo361-YkcyyOvd5CL4q3LgTM1Bk4RdkPY3cn-qOm6Dn0vghTnmIE9VKkzGiOnjRNnD7SWXGMqpxaCGvCt6P44D55PqOwAtVTF8PNwmEK9HWCoVXABo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In0=" + } + }, + { + "ID": "e98f53a71d8839c9", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "1789" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Etag": [ + "CAk=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqACfvLjoBUrCN_6ksyMdPlv7yPWQGo1D9u1UiCCgFDRlEwr6NOp18BSxaeTuMIabf2ciNj7_Ba1W6YCz64HlcYyQOoEXNfGYGKcAqGTubXsGHPVk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FrPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQWs9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvdXNlci1qYmRAZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvdXNlci1qYmRAZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImVudGl0eSI6InVzZXItamJkQGdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZW1haWwiOiJqYmRAZ29vZ2xlLmNvbSIsImV0YWciOiJDQWs9In1dfQ==" + } + }, + { + "ID": "28b9a29086758e65", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/acl/user-jbd%40google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:28 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozsFdtI43Ltv91E18-CrdRZ4pQDVZXReJOVhpWDu3mxNA7rrWU7hNGl4kPMd0FgcVWDigZA5JBNsYbFV113Ew6Cp2uTsuk0uI1io_mhA6-Lmd2h18" + ] + }, + "Body": "" + } + }, + { + "ID": "8ef9f7b6caefd750", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiZ29waGVyIn0K", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3196" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:29 GMT" + ], + "Etag": [ + "CIXH3cfx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo97hrh9l4pmrJQqaH8Qu3axsdbEPH2Y8GSEso8-ExKuf4ot8o2uS79ROJcgCtql4vLQJ_4L8q0Z7E9E2WYbbl6vKUYKdQiCI0POisS9o_ViNh9-Yk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlciIsIm5hbWUiOiJnb3BoZXIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTM1MjgzNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS4zNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuMzUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5LjM1MloiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyP2dlbmVyYXRpb249MTU1NjgzNTg2OTM1MjgzNyZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9nb3BoZXIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvZ29waGVyLzE1NTY4MzU4NjkzNTI4MzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2dvcGhlci9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImdvcGhlciIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5MzUyODM3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9nb3BoZXIvMTU1NjgzNTg2OTM1MjgzNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vZ29waGVyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiZ29waGVyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NjkzNTI4MzciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSVhIM2NmeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InJ0aDkwUT09IiwiZXRhZyI6IkNJWEgzY2Z4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ae3a5920bc8a9c3c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3548" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:29 GMT" + ], + "Etag": [ + "CPzK+8fx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrPLZGQqU8j04JqxMmfsqas3HGTlrdx5NM99YR5nCedJYGMSaZ1ynvd2UbUcdwps_gfRo__0XapzHuD3hM_bshff65qlnw_i2cOwp97N2EhfUsZ1X4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS/Qk9C+0YTQtdGA0L7QstC4LzE1NTY4MzU4Njk4NDQ4NjAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgiLCJuYW1lIjoi0JPQvtGE0LXRgNC+0LLQuCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiY29udGVudFR5cGUiOiJ0ZXh0L3BsYWluOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjI5Ljg0NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOS44NDRaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MjkuODQ0WiIsInNpemUiOiI0IiwibWQ1SGFzaCI6ImpYZC9PRjA5L3NpQlhTRDNTV0FtM0E9PSIsIm1lZGlhTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL2Rvd25sb2FkL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjg/Z2VuZXJhdGlvbj0xNTU2ODM1ODY5ODQ0ODYwJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ItCT0L7RhNC10YDQvtCy0LgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg2OTg0NDg2MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEv0JPQvtGE0LXRgNC+0LLQuC8xNTU2ODM1ODY5ODQ0ODYwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby8lRDAlOTMlRDAlQkUlRDElODQlRDAlQjUlRDElODAlRDAlQkUlRDAlQjIlRDAlQjgvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiLQk9C+0YTQtdGA0L7QstC4IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Njk4NDQ4NjAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL9CT0L7RhNC10YDQvtCy0LgvMTU1NjgzNTg2OTg0NDg2MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vJUQwJTkzJUQwJUJFJUQxJTg0JUQwJUI1JUQxJTgwJUQwJUJFJUQwJUIyJUQwJUI4L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoi0JPQvtGE0LXRgNC+0LLQuCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODY5ODQ0ODYwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1B6Sys4ZngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDUHpLKzhmeC9lRUNFQUU9In0=" + } + }, + { + "ID": "0104a746dbe20580", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYSJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3116" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:30 GMT" + ], + "Etag": [ + "COKVlMjx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqY12miALkWDvx8PHEyL23oxWZlw8Jq7Cn6kS8E8_Tn1tTWWoTuS_pWb5pi9qevCIvqK6aTK56MzwrMuy9axIjpw5yyMZ42MaWK_YFkBr95OnK5IbM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hLzE1NTY4MzU4NzAyNDc2NTAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hIiwibmFtZSI6ImEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMC4yNDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzAuMjQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMwLjI0N1oiLCJzaXplIjoiNCIsIm1kNUhhc2giOiJqWGQvT0YwOS9zaUJYU0QzU1dBbTNBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYT9nZW5lcmF0aW9uPTE1NTY4MzU4NzAyNDc2NTAmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2EvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2EvMTU1NjgzNTg3MDI0NzY1MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MDI0NzY1MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYS8xNTU2ODM1ODcwMjQ3NjUwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9hL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYSIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODcwMjQ3NjUwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09LVmxNangvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJydGg5MFE9PSIsImV0YWciOiJDT0tWbE1qeC9lRUNFQUU9In0=" + } + }, + { + "ID": "da19af085be0fb47", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYSJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "19484" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:30 GMT" + ], + "Etag": [ + "CLmmusjx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrAJr55dqTDkY_jBKB0vAWyE0owtzB_kEoH91_FKTr9C3bHd8fWUiTrsruL5mgwXg2l8gbPiOXad-A9HIJ3Zt_AeXq_p0m-Q8LrHQqJ6sAKsbzV4Zw" + ] + }, + "Body": "{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","name":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835870872377","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:30.872Z","updated":"2019-05-02T22:24:30.872Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:30.872Z","size":"4","md5Hash":"jXd/OF09/siBXSD3SWAm3A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?generation=1556835870872377&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLmmusjx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/1556835870872377/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa","generation":"1556835870872377","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLmmusjx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"rth90Q==","etag":"CLmmusjx/eECEAE="}" + } + }, + { + "ID": "96f915707a76c50c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAifQo=", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "2948" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrQIwi81TFCNpDIZUjs_5tSUFqURLnaBF7g2l1sGMuHCTWZeLyr45VWkcc8fUDYnF-P84QixYQbTzJGuJzOOx7mw1qluYDqoORFGPICfvKmNxrZM3A" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" + } + }, + { + "ID": "7a562d6e65ef8a7a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEifQo=", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "4785" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ups-3Re4_9njFMmkYwND6BuK5cgxs5tKVRwQ2CGkcks-K0aALbethXTYaEb6fLos6w_FSnM8YkPGM3TETCAoyYpVbQPlotXR9CH3DXngjQNv0yv_ZQ" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiVGhlIG1heGltdW0gb2JqZWN0IGxlbmd0aCBpcyAxMDI0IGNoYXJhY3RlcnMsIGJ1dCBnb3QgYSBuYW1lIHdpdGggMTAyNSBjaGFyYWN0ZXJzOiAnJ2FhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhLi4uJyciLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5pZC5uYW1lLCBtZXNzYWdlPVRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnLCB1bm5hbWVkQXJndW1lbnRzPVthYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1UaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJywgcmVhc29uPWludmFsaWQsIHJwY0NvZGU9NDAwfSBUaGUgbWF4aW11bSBvYmplY3QgbGVuZ3RoIGlzIDEwMjQgY2hhcmFjdGVycywgYnV0IGdvdCBhIG5hbWUgd2l0aCAxMDI1IGNoYXJhY3RlcnM6ICcnYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWEuLi4nJ1xuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5FcnJvckNvbGxlY3Rvci50b0ZhdWx0KEVycm9yQ29sbGVjdG9yLmphdmE6NTQpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5RXJyb3JDb252ZXJ0ZXIudG9GYXVsdChSb3N5RXJyb3JDb252ZXJ0ZXIuamF2YTo2Nylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjI1OSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lIYW5kbGVyJDIuY2FsbChSb3N5SGFuZGxlci5qYXZhOjIzOSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkRpcmVjdEV4ZWN1dG9yLmV4ZWN1dGUoRGlyZWN0RXhlY3V0b3IuamF2YTozMClcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmV4ZWN1dGVMaXN0ZW5lcihBYnN0cmFjdEZ1dHVyZS5qYXZhOjExNDMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5jb21wbGV0ZShBYnN0cmFjdEZ1dHVyZS5qYXZhOjk2Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLnNldChBYnN0cmFjdEZ1dHVyZS5qYXZhOjczMSlcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUudXRpbC5DYWxsYWJsZUZ1dHVyZS5ydW4oQ2FsbGFibGVGdXR1cmUuamF2YTo2Milcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnRocmVhZC5UaHJlYWRUcmFja2VycyRUaHJlYWRUcmFja2luZ1J1bm5hYmxlLnJ1bihUaHJlYWRUcmFja2Vycy5qYXZhOjEyNilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW5JbkNvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6NDUzKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuc2VydmVyLkNvbW1vbk1vZHVsZSRDb250ZXh0Q2FycnlpbmdFeGVjdXRvclNlcnZpY2UkMS5ydW5JbkNvbnRleHQoQ29tbW9uTW9kdWxlLmphdmE6ODAyKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlJDEucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ2MClcblx0YXQgaW8uZ3JwYy5Db250ZXh0LnJ1bihDb250ZXh0LmphdmE6NTY1KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuQ3VycmVudENvbnRleHQucnVuSW5Db250ZXh0KEN1cnJlbnRDb250ZXh0LmphdmE6MjA0KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0Tm9VbnJlZihUcmFjZUNvbnRleHQuamF2YTozMTkpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHQoVHJhY2VDb250ZXh0LmphdmE6MzExKVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NTcpXG5cdGF0IGNvbS5nb29nbGUuZ3NlLmludGVybmFsLkRpc3BhdGNoUXVldWVJbXBsJFdvcmtlclRocmVhZC5ydW4oRGlzcGF0Y2hRdWV1ZUltcGwuamF2YTo0MDMpXG4ifV0sImNvZGUiOjQwMCwibWVzc2FnZSI6IlRoZSBtYXhpbXVtIG9iamVjdCBsZW5ndGggaXMgMTAyNCBjaGFyYWN0ZXJzLCBidXQgZ290IGEgbmFtZSB3aXRoIDEwMjUgY2hhcmFjdGVyczogJydhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYWFhYS4uLicnIn19" + } + }, + { + "ID": "239b642e1919a84a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoibmV3XG5saW5lcyJ9Cg==", + "ZGF0YQ==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3270" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urk-XOPoshQkyIHYZp241R2W5lOuLwqPMx606rubzPOaGggM2z_sZO93dYCJHffXuPDOjy64lIxrBXIYW4QT04eM932jPtzPdVd-PFWssV9RYk83JE" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiRGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJyIsImRlYnVnSW5mbyI6ImNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkZhdWx0OiBJbW11dGFibGVFcnJvckRlZmluaXRpb257YmFzZT1JTlZBTElEX1ZBTFVFLCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLklOVkFMSURfVkFMVUUsIGNyZWF0ZWRCeUJhY2tlbmQ9dHJ1ZSwgZGVidWdNZXNzYWdlPW51bGwsIGVycm9yUHJvdG9Db2RlPUlOVkFMSURfVkFMVUUsIGVycm9yUHJvdG9Eb21haW49Z2RhdGEuQ29yZUVycm9yRG9tYWluLCBmaWx0ZXJlZE1lc3NhZ2U9bnVsbCwgbG9jYXRpb249ZW50aXR5LnJlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9RGlzYWxsb3dlZCB1bmljb2RlIGNoYXJhY3RlcnMgcHJlc2VudCBpbiBvYmplY3QgbmFtZSAnJ25ld1xubGluZXMnJywgdW5uYW1lZEFyZ3VtZW50cz1bbmV3XG5saW5lc119LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1EaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IERpc2FsbG93ZWQgdW5pY29kZSBjaGFyYWN0ZXJzIHByZXNlbnQgaW4gb2JqZWN0IG5hbWUgJyduZXdcbmxpbmVzJydcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJEaXNhbGxvd2VkIHVuaWNvZGUgY2hhcmFjdGVycyBwcmVzZW50IGluIG9iamVjdCBuYW1lICcnbmV3XG5saW5lcycnIn19" + } + }, + { + "ID": "3cd795cb0b675f98", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpOXjD9T_c_FRByyyA-CjE753kXZefEzIwQdrNG6OewoHOvKC3Bb9LleziQ6-ymVDg-F1S1eeQeFNJH7nJxDsdA_VmxOjeBzTDB_F3--_1NtwMF6Do" + ] + }, + "Body": "" + } + }, + { + "ID": "361d4541e2cb460a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/a?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpomJFlpfOIJoFvdCmQgD2dRXSREzV1ToI2ntcHi8I9tSyzMCUk0ZrytllkwR_nU8WFZ0YuXVl0Kwsq1EhJL85RlBgCkBNc1P-yVMyNbdt5YrooKok" + ] + }, + "Body": "" + } + }, + { + "ID": "a4e929446e49411d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/%D0%93%D0%BE%D1%84%D0%B5%D1%80%D0%BE%D0%B2%D0%B8?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:31 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqmdInqhuXhUxVjtfRvXvHd6Rmo1ODUbeNFFTBW4diuT7HDjzmYnvd4TU6oJPqbck52yFAR99fMx4Tks1TllSUCB4GsMVbBNENfZU6te-hXIB8XyJs" + ] + }, + "Body": "" + } + }, + { + "ID": "89b2ef62a071f7b9", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gopher?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrRgapcSgK1GOswXS37Vynb9BRj0go8bNPOBtrFZ3TDr-nzIdlvKBQuwHtXl4Tlb-mLl-A65zL5Iw3unCEuy6lwtWl5-V7REtcIVHkrWLK5AoLoHqA" + ] + }, + "Body": "" + } + }, + { + "ID": "ca91b2e4ee4555cb", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", + "SXQgd2FzIHRoZSBiZXN0IG9mIHRpbWVzLCBpdCB3YXMgdGhlIHdvcnN0IG9mIHRpbWVzLg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3213" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Etag": [ + "CLGNmsnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UovWW5HSrWbcvQoWGU3c_n64wwcau06tjEM-fzdADFSbmmylG3VLjae6MRNIM4B9gDitCKfjo1_xPQNAZEmMYzRplVmdI4cIrBC8ursFSUywoCSJeU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" + } + }, + { + "ID": "eb678345630fe080", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3213" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:32 GMT" + ], + "Etag": [ + "CLGNmsnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrtPOErXGrnlQtXCV1PhgO5nz80m75Fzs7DoR-y8Ava3jvL9NDJ4L-i8SIV_tktR8VqCvv9uh-X1ltJCj3isa_oY8TlV-OsZeIiESuDrRTYueQWQP0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMi40NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzIuNDQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMyLjQ0MVoiLCJzaXplIjoiNTIiLCJtZDVIYXNoIjoiSzI4NUF3S1dXZlZSZEJjQ1VYaHpOZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODcyNDQyMDMzJmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzI0NDIwMzMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzI0NDIwMzMiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MjQ0MjAzMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MjQ0MjAzMyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNMR05tc254L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiRmNYTThRPT0iLCJldGFnIjoiQ0xHTm1zbngvZUVDRUFFPSJ9" + } + }, + { + "ID": "d76ba152a62e7dc1", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJuYW1lIjoiY29udGVudCJ9Cg==", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3212" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CLPJv8nx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqk5XoKCyt74JBpeU8oREfzh3e0LLtXupBjcWtsFfHNZDEn4DjcmzWo6resyQ1gEisBp_STlU4hVaU3MbTyX3LM443_Gsu4ouZGdf3ZTvTzsLD_zNw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" + } + }, + { + "ID": "f785ed7f0490bb6d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3212" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CLPJv8nx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrCqpaad9mhxk7VZgTc8xIunote5-AHPp3vYkUbSU8uiVcq65rcnrbtIuCqJ63JTuVJvKw7pFwnLjyn5fL37JzfXLhmLAowAV67_Qqtxxhiy4WQars" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzMwNTU5MjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sOyBjaGFyc2V0PXV0Zi04IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjA1NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy4wNTVaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuMDU1WiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzMwNTU5MjMmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3MzA1NTkyMy9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3MzA1NTkyMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczMDU1OTIzL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczMDU1OTIzIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0xQSnY4bngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDTFBKdjhueC9lRUNFQUU9In0=" + } + }, + { + "ID": "096d156b863922d6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvaHRtbCIsIm5hbWUiOiJjb250ZW50In0K", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3197" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:33 GMT" + ], + "Etag": [ + "CPW+6cnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoivaSzjVdOm2KP4nXCF8VQDNHXmlNjm9rZwTaWzjvBmH3oK01QYTnHqZ5DhYRewu_1H0KoQgFpUIC-M4OKZoOhRHrq0-H8HLT__SqD1D5cmezrqHo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" + } + }, + { + "ID": "0e96d304e9558094", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3197" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CPW+6cnx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoc-8JwtQprLtec5Ft6q3z4p0ooDaCCLmO9oVgrOXvBpoFhApztj8anfFwEcHdzpB6FBlUFqou0viF_HLHJvHolsxeCCoDOhqyIggOOlWjnSRg8XzY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzM3NDI3MDkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9odG1sIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjMzLjc0MloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozMy43NDJaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzMuNzQyWiIsInNpemUiOiI1NCIsIm1kNUhhc2giOiJOOHA4L3M5RndkQUFubHZyL2xFQWpRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudD9nZW5lcmF0aW9uPTE1NTY4MzU4NzM3NDI3MDkmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3Mzc0MjcwOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3Mzc0MjcwOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY29udGVudC8xNTU2ODM1ODczNzQyNzA5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY29udGVudCIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODczNzQyNzA5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BXKzZjbngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJHb1Vic1E9PSIsImV0YWciOiJDUFcrNmNueC9lRUNFQUU9In0=" + } + }, + { + "ID": "b4c8dedc10a69e07", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6ImltYWdlL2pwZWciLCJuYW1lIjoiY29udGVudCJ9Cg==", + "PGh0bWw+PGhlYWQ+PHRpdGxlPk15IGZpcnN0IHBhZ2U8L3RpdGxlPjwvaGVhZD48L2h0bWw+" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3198" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CM/Yjsrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpC3UBVkNENjfRoaL2M1fPRh-iTDTwCsgF8O3TOX_iBuXcDxMjOoTmrIhD4cE5g1s7PObP66TZB1lecFypnLz8hL-aowsOxUMuL0KXJvrgoGxKHUFg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" + } + }, + { + "ID": "c45640ec17230f64", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3198" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:34 GMT" + ], + "Etag": [ + "CM/Yjsrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrvqLsOaMe2H6Xh3MyDCOuHmLzU1fn_Zh6kHLkpTpn4EWj3nFkeROu1g-cABD151cTh8YmY798iEku-Q3TnZIMZ5mexmHiCJKdUhAolbSd9b_apMK8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50IiwibmFtZSI6ImNvbnRlbnQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiaW1hZ2UvanBlZyIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNC4zNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzQuMzUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM0LjM1MVoiLCJzaXplIjoiNTQiLCJtZDVIYXNoIjoiTjhwOC9zOUZ3ZEFBbmx2ci9sRUFqUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQ/Z2VuZXJhdGlvbj0xNTU2ODM1ODc0MzUyMjA3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jb250ZW50L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jb250ZW50LzE1NTY4MzU4NzQzNTIyMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NvbnRlbnQvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjb250ZW50IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzQzNTIyMDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NvbnRlbnQvMTU1NjgzNTg3NDM1MjIwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY29udGVudC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNvbnRlbnQiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NDM1MjIwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNNL1lqc3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiR29VYnNRPT0iLCJldGFnIjoiQ00vWWpzcngvZUVDRUFFPSJ9" + } + }, + { + "ID": "752ab630b062d61e", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiY3VzdG9tZXItZW5jcnlwdGlvbiJ9Cg==", + "dG9wIHNlY3JldC4=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3482" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpaWw6jJMKNOoCNMXwJNVVsxqzPhGfMo-5g3krJ7A8_PSGTx4W2OakfmtDvzOpg1hexLJcsWunNHfyWVXsjBpM58dtQlM6VucQW4Uxndlb9RIp0UhM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "3f4d8fd0e94ca6de", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3425" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoO-NbHUptMgzQPx9zApWGCeqMgfkk2mt6PJl1iWbq8ocUYq_0ud8gPuj9bcBTBOBdqewKzSouddziC-BFLoOdQJ6ElkN136q5y39ZRDN4zq6UWLUs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "0355971107342253", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3482" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UryZM2k28xNlFIDnXCH6SJ8RA2lDVNIx-5_eYRExB4-D1-dg_3fWvU3AREwoow4DvlYN4eyzA1AthWs5y0tYZEzvamBoZufkrXPmD64aT8kzAatns8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuMDU4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoicjBOR3JnPT0iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "8f8c7cba5307c918", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3448" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upnljw7UWMuKFeJUJp_gu-lyfzDRVTwgInddze_9zF5waX4glKKpcRrGpe50cvS8dV_xbbKLQa-CqAMVxlEp7qIHsmTdUR1amjrq0w9U_qrn4gYwSw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzUuOTIwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbj9nZW5lcmF0aW9uPTE1NTY4MzU4NzUwNTg2ODAmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQam51Y3J4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjdXN0b21lci1lbmNyeXB0aW9uIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzUwNTg2ODAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUGpudWNyeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQUk9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" + } + }, + { + "ID": "fc766ddbbfcd9d8a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3505" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "CPjnucrx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqlTuEeQHkdcuHKu_aWtuYjopXaORKyG9TF597i1ybzIPgJHE_oiKH-xYwG_D2txhF_Sv6Jzfy44Dfga1bINCpPLseDEeD_Ez4rvxx8baTX1uJ7lJo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLzE1NTY4MzU4NzUwNTg2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uIiwibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNS4wNThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzYuMjI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM1LjA1OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24/Z2VuZXJhdGlvbj0xNTU2ODM1ODc1MDU4NjgwJmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2N1c3RvbWVyLWVuY3J5cHRpb24vMTU1NjgzNTg3NTA1ODY4MC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NTA1ODY4MCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDUGpudWNyeC9lRUNFQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi8xNTU2ODM1ODc1MDU4NjgwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc1MDU4NjgwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ1BqbnVjcngvZUVDRUFNPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJyME5Hcmc9PSIsImV0YWciOiJDUGpudWNyeC9lRUNFQU09IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJIK0xtblhoUm9lSTZUTVc1YnNWNkh5VWs2cHlHYzJJTWJxWWJBWEJjcHMwPSJ9fQ==" + } + }, + { + "ID": "fb1e502029272329", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urm5pF1ghApp-RiVg_TAipd-LdTbF-hcDPcVRYbmj7KUIQxnsMUF5WOcrMA1t7ZltdvPZhyfsELISuH6DGJlUUedaCNH-B5j580GjBYLUKCwmAADeM" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "c151ba4fab0c6499", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Etag": [ + "\"-CPjnucrx/eECEAM=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:35 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:35 GMT" + ], + "X-Goog-Generation": [ + "1556835875058680" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "3" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur68mIGQ8VAtTRZs4F6ixZZYKcqe1oTqWHWqmp7gNF-81XKf2xX6eS4PcegbKx7bYnb4i6dzTCoLlbaJRNHwwLVzNbYMkGNy_bTHbWYTgtlBSKVtWs" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "e09c6326cc60ad82", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12563" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:36 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqldLQvGKuu2fihIdwh7CvU7W0yE3vUXu8jqefTPSMhlGgvR1EfeYFqjk_Zxj7fEpv7AjugvHnn1DmkJxJWhrZyM8b5gS4uJID92D_018h2YHH1r30" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "6c83b36cc6fc482c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3527" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGOUzu40Jq-_GztkQxoFN64MHcSoZjph7ivV8tqrp6ENxFIB0c82xqEdKKfcpqbJ2YXtXQgwflZgX-uiVOgGY_1c8SLoV7geQMfErTz1Q0NDM4YDM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3NzEwODYxNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozNy4xMDhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzcuMTA4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM3LjEwOFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzcxMDg2MTQmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzcxMDg2MTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc3MTA4NjE0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3NzEwODYxNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzcxMDg2MTQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSWIzdHN2eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNJYjN0c3Z4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "e499f3b7077fb222", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Etag": [ + "\"c7058d15ad157573e6940c2b95c00972\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:37 GMT" + ], + "X-Goog-Generation": [ + "1556835877108614" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpfEf1WUM8E5dSmOe1sOfgheagnlrcFUolvMMBiP6jDvPqoS7Sx0IwklM-DwnuAbBJPQ_EHhX-42njQu9vUjXqamH1U6np0Vmi0BngqFYOTc9MVP3M" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "9e67dfa901c4e619", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12563" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYPZ-j39Sr1Whn6F3PYeVaxqIIjS_fBgSNkBO7f7B8RMWih6iCCLf1cPDXB3Br-0XBkdm6i9N9fGeK84_laVYWuJc0uJwSCXydk0bSWezw0qec5yE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=The requested object is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: \n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "670f74ac2a0b734e", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3640" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWwxnT2DHS1ymBhZJmnwxQiajAVYbOH62dyhX86syLNW_TlPnzYaM_7kACpN8ar5EeAHQ9fsMSHP4CqMcb6ZWg5KY3OkbPvfitdLalAA9MXxMFqoo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODE4MzI1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC4xODJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguMTgyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4LjE4MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4NzgxODMyNTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTlBDK012eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4NzgxODMyNTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4MTgzMjUxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODE4MzI1MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4NzgxODMyNTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTlBDK012eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNOUEMrTXZ4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkZuQnZmUTFkRHN5UzhrSEQrYUI2SEhJZ2xEb1E1SW03V1lEbTNYWVRHclE9In19fQ==" + } + }, + { + "ID": "9c6e7ad2d8e2c68a", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoSrQtDkljyQ-aUJoC8m6nY20wZiHLTwh4znEZ5hyKxLQUrZIi7PnU416twfTwfPXWvd4cPGZC3NVdpJWg332KnUKSEdFPY2BQwLJ-_9S5eVdwMrf8" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "b5fbec9f26148f95", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-2", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Language": [ + "en" + ], + "Content-Length": [ + "11" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Etag": [ + "\"-CNPC+Mvx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:38 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:38 GMT" + ], + "X-Goog-Generation": [ + "1556835878183251" + ], + "X-Goog-Hash": [ + "crc32c=r0NGrg==", + "md5=xwWNFa0VdXPmlAwrlcAJcg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "11" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpTr8Qws5nL-el_ied6M7FYtryiLpFfhGNN-TxakIl1FCAOBJwMeq6_KBQ0l4UmT8VePQQ9h_S8r55NolBB1Ex12-iKvooCkTsNmvQ4rE4hOqQGbLc" + ] + }, + "Body": "dG9wIHNlY3JldC4=" + } + }, + { + "ID": "616de25561ebbffa", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "FnBvfQ1dDsyS8kHD+aB6HHIglDoQ5Im7WYDm3XYTGrQ=" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3640" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqyD8XYw6tgOBbcxdUIVAw_vXrrryw0bgh-L-jRW0hyJt0lCHU9K26isn6tykgml7UgX52dgw65xD_ht4yxbHkY_VFl6MkEY6H-mWRx0_52xj49RW8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3ODk5ODUxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOC45OThaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzguOTk4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM4Ljk5OFoiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4Nzg5OTg1MTEmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4Nzg5OTg1MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODc4OTk4NTExIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg3ODk5ODUxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4Nzg5OTg1MTEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTytqcXN6eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNPK2pxc3p4L2VFQ0VBRT0iLCJjdXN0b21lckVuY3J5cHRpb24iOnsiZW5jcnlwdGlvbkFsZ29yaXRobSI6IkFFUzI1NiIsImtleVNoYTI1NiI6IkgrTG1uWGhSb2VJNlRNVzVic1Y2SHlVazZweUdjMklNYnFZYkFYQmNwczA9In19fQ==" + } + }, + { + "ID": "2ddf1402c6efc3a8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "160" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13334" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrFitgTFIY6yt8kk3zprPGtWncWc7Ad6oUFVXVdE0O3QqK0qlYGB7RSw-dwN1AvMk4Sndi8gR22cF8qs88ZlmdAxB-1zYdUNqwArmpP2xni66xX-L4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceIsEncryptedWithCustomerEncryptionKey","message":"The target object is encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=null, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=null, message=The target object is encrypted by a customer-supplied encryption key., reason=resourceIsEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_IS_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption) is encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "eea75f5f6c3c7e89", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "160" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24ifSx7Im5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "911" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Etag": [ + "CPDp3szx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uotyvz71ZfU1J_hl3NsDz9ldw92nWlIp9muxpBUsdv0nTyfJEqz-yQFyEwE2UM11wv7tkpLQfffGlKhlx7CVK0S1UYJnS2XQu2P0Uiz5bbamHxE520" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTMvMTU1NjgzNTg3OTg1OTQ0MCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMyIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg3OTg1OTQ0MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDozOS44NTlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6MzkuODU5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjM5Ljg1OVoiLCJzaXplIjoiMjIiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0zP2dlbmVyYXRpb249MTU1NjgzNTg3OTg1OTQ0MCZhbHQ9bWVkaWEiLCJjcmMzMmMiOiI1ajF5cGc9PSIsImNvbXBvbmVudENvdW50IjoyLCJldGFnIjoiQ1BEcDNzengvZUVDRUFFPSIsImN1c3RvbWVyRW5jcnlwdGlvbiI6eyJlbmNyeXB0aW9uQWxnb3JpdGhtIjoiQUVTMjU2Iiwia2V5U2hhMjU2IjoiSCtMbW5YaFJvZUk2VE1XNWJzVjZIeVVrNnB5R2MySU1icVliQVhCY3BzMD0ifX0=" + } + }, + { + "ID": "b19fb82ecf450b51", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "277" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urcj3TJ6B5h4Q0DT7G_ioI_Icu9iv65m57OlRH6h9mKD9zZZl320H3QD6A7fNd1UPBSGGDZ9I0TG4_lSFa8JgkJQEVRGVRvVQv36b7ArCuGmg0loV0" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+UmVzb3VyY2VJc0VuY3J5cHRlZFdpdGhDdXN0b21lckVuY3J5cHRpb25LZXk8L0NvZGU+PE1lc3NhZ2U+VGhlIHJlc291cmNlIGlzIGVuY3J5cHRlZCB3aXRoIGEgY3VzdG9tZXIgZW5jcnlwdGlvbiBrZXkuPC9NZXNzYWdlPjxEZXRhaWxzPlRoZSByZXF1ZXN0ZWQgb2JqZWN0IGlzIGVuY3J5cHRlZCBieSBhIGN1c3RvbWVyLXN1cHBsaWVkIGVuY3J5cHRpb24ga2V5LjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "1f0a12bd00a88965", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/customer-encryption-3", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "22" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Etag": [ + "\"-CPDp3szx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:39 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Component-Count": [ + "2" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:24:39 GMT" + ], + "X-Goog-Generation": [ + "1556835879859440" + ], + "X-Goog-Hash": [ + "crc32c=5j1ypg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "22" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7DoEkjdrkGGrYpsCDbd2Y6c_Mu8sEA7cN_k06l4WjY25UJsijDFWSlSIA9SCi-ouLuI3if4Rh33a5n5vKcN3oSYjzK92eSPVCczEpQdILKFWyQh4" + ] + }, + "Body": "dG9wIHNlY3JldC50b3Agc2VjcmV0Lg==" + } + }, + { + "ID": "f649a81672a92823", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3527" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:40 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrfhqrYGhW8ew5dxbfg_DcMhaJh6k2oY_JqMwPRSDtS3Ef5kljeo8oTONrxRIAP8I0ScqxFCy7okNejkqOeO4Vr1TBZ2mc4MDjwiJA52ERSgZMUyvM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMTEiLCJvYmplY3RTaXplIjoiMTEiLCJkb25lIjp0cnVlLCJyZXNvdXJjZSI6eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMiIsIm5hbWUiOiJjdXN0b21lci1lbmNyeXB0aW9uLTIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MDcxNzUwNiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MC43MTdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDAuNzE3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQwLjcxN1oiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoieHdXTkZhMFZkWFBtbEF3cmxjQUpjZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMj9nZW5lcmF0aW9uPTE1NTY4MzU4ODA3MTc1MDYmYWx0PW1lZGlhIiwiY29udGVudExhbmd1YWdlIjoiZW4iLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jdXN0b21lci1lbmNyeXB0aW9uLTIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3VzdG9tZXItZW5jcnlwdGlvbi0yLzE1NTY4MzU4ODA3MTc1MDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2N1c3RvbWVyLWVuY3J5cHRpb24tMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgwNzE3NTA2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jdXN0b21lci1lbmNyeXB0aW9uLTIvMTU1NjgzNTg4MDcxNzUwNi91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3VzdG9tZXItZW5jcnlwdGlvbi0yL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3VzdG9tZXItZW5jcnlwdGlvbi0yIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODA3MTc1MDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUtaazgzeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InIwTkdyZz09IiwiZXRhZyI6IkNNS1prODN4L2VFQ0VBRT0ifX0=" + } + }, + { + "ID": "61763fd57e906039", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "129" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImN1c3RvbWVyLWVuY3J5cHRpb24tMiJ9XX0K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13444" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7TRBQyfrSgWnsPL0CVfQCd5s_QJN9pRUHB1YttIoObh1YkzCEtuRxrwyKYtccpyodUCMmTVRyD6d0oDy-c2_Q7GK-_6OC5h_EF_1e9-t6E8VZCQ8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"resourceNotEncryptedWithCustomerEncryptionKey","message":"The target object is not encrypted by a customer-supplied encryption key.","extendedHelp":"https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=INVALID_VALUE, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.encryptionKey, message=Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key., unnamedArguments=[]}, location=entity.encryptionKey, message=The target object is not encrypted by a customer-supplied encryption key., reason=resourceNotEncryptedWithCustomerEncryptionKey, rpcCode=400} The target object is not encrypted by a customer-supplied encryption key.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RESOURCE_NOT_ENCRYPTED_WITH_CUSTOMER_ENCRYPTION_KEY: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Component object (go-integration-test-20190502-80633403432013-0001/customer-encryption-2) us not encrypted by a customer-supplied encryption key.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"The target object is not encrypted by a customer-supplied encryption key."}}" + } + }, + { + "ID": "b45cb452c78d7b50", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:41 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq2YnotZtU1JMnJw10vtgVDeukiWK_4DXx0QFWA91CaCYLPXLDKzCzY8xqb6EGkVxvw731As27REu_hYOqZTwSfJJ6SeHemliLV9JgQYjfngxL0HFA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDoyOC44ODJaIiwibWV0YWdlbmVyYXRpb24iOiIxMCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBbz0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FvPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQW89In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0FvPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQW89In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7Im5ldyI6Im5ldyIsImwxIjoidjIifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FvPSJ9" + } + }, + { + "ID": "6831287fd0edbcd4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYyJ9Cg==", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Etag": [ + "CJek2c3x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrXGhYaqbrodjXeCKtZGAt0hM0GrM8GBcZBkGkLJ7550-Iqaxp681SkhDDDOp0V3-xMmoq3rjWOC8A4XvJfAzgxGsO2VekCUJgUBMalL8hEAur49q8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" + } + }, + { + "ID": "7588cdc84e3976f4", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Etag": [ + "CJek2c3x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGBHQWnSHLUYlW5nck2w_3QN4x5uYmOM-GIatNzD1tqhzK0JOf7rBe1BKILpdyEUPlSmrLwrukcqJwHgfezE50OQL7ha9n6_fc6qUMoVwkEGawOgA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODE4NjU3NTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjIiwibmFtZSI6InBvc2MiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0MS44NjVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDEuODY1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQxLjg2NVoiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYz9nZW5lcmF0aW9uPTE1NTY4MzU4ODE4NjU3NTEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4MTg2NTc1MS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MTg2NTc1MSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgxODY1NzUxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODgxODY1NzUxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0plazJjM3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJ6OFN1SFE9PSIsImV0YWciOiJDSmVrMmMzeC9lRUNFQUU9In0=" + } + }, + { + "ID": "974ed1b42608e806", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "34" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:42 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrKCtuc_GsAj6sf3zXpqT2_KZrdL0SEwsunv07k0DaaEHMj8gkX4kRCcm5r8AUrD9RNYjoKeceiCyY4pDN8nrHljshmEIXF2P29Oyd0KSuqDjsNeps" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiMyIsIm9iamVjdFNpemUiOiIzIiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcG9zYy8xNTU2ODM1ODgyNzYwNjA3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYyIsIm5hbWUiOiJwb3NjIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDIuNzYwWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQyLjc2MFoiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Mi43NjBaIiwic2l6ZSI6IjMiLCJtZDVIYXNoIjoickwwWTIwekMrRnp0NzJWUHpNU2syQT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2M/Z2VuZXJhdGlvbj0xNTU2ODM1ODgyNzYwNjA3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjLzE1NTY4MzU4ODI3NjA2MDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJwb3NjIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODI3NjA2MDciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MvMTU1NjgzNTg4Mjc2MDYwNy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYy9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4Mjc2MDYwNyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKL3pqODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ0ovemo4N3gvZUVDRUFFPSJ9fQ==" + } + }, + { + "ID": "a2be30bf1e3cb539", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoicG9zYzIiLCJzdG9yYWdlQ2xhc3MiOiJNVUxUSV9SRUdJT05BTCJ9Cg==", + "eHh4" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3150" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Etag": [ + "CILrp87x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqziYHpTkcbyMFRtHko_e04Rze8FHLbALw476U2bB9k_SqPwILBzxKRLzFvRN4q3RLt4xR1mpR2lgJ1Zq0BNYCefLOZsOeq4JbzJYLxGL5V799xkQ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIiLCJuYW1lIjoicG9zYzIiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My4xNTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuMTUyWiIsInN0b3JhZ2VDbGFzcyI6Ik1VTFRJX1JFR0lPTkFMIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjE1MloiLCJzaXplIjoiMyIsIm1kNUhhc2giOiI5V0dxOXU4TDhVMUNDTHRHcE15enJRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzI/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzMTUyNzcwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vcG9zYzIvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9wb3NjMi8xNTU2ODM1ODgzMTUyNzcwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9wb3NjMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InBvc2MyIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODMxNTI3NzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Bvc2MyLzE1NTY4MzU4ODMxNTI3NzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3Bvc2MyL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoicG9zYzIiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzE1Mjc3MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJTHJwODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMTdxQUJRPT0iLCJldGFnIjoiQ0lMcnA4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "d570e13411ade628", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMifQo=", + "Zm9v" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3336" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Etag": [ + "CPCDx87x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrltsLp6xgl5GQBA_ZoqyMKcRlP-MvWyo0epRXUAbOAkOUUpAOgezp4fFQRP5wEM1fq771AWUgtYvQ3HLXCnwxmDaCqJxEhICiYhxDp8RpvCnn3xuI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMiLCJuYW1lIjoiYnVja2V0SW5Db3B5QXR0cnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0My42NjNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDMuNjYzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQzLjY2M1oiLCJzaXplIjoiMyIsIm1kNUhhc2giOiJyTDBZMjB6QytGenQ3MlZQek1TazJBPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnM/Z2VuZXJhdGlvbj0xNTU2ODM1ODgzNjYzODU2JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vYnVja2V0SW5Db3B5QXR0cnMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9idWNrZXRJbkNvcHlBdHRycy8xNTU2ODM1ODgzNjYzODU2L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9idWNrZXRJbkNvcHlBdHRycy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImJ1Y2tldEluQ29weUF0dHJzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODM2NjM4NTYiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2J1Y2tldEluQ29weUF0dHJzLzE1NTY4MzU4ODM2NjM4NTYvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2J1Y2tldEluQ29weUF0dHJzL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiYnVja2V0SW5Db3B5QXR0cnMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4MzY2Mzg1NiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQQ0R4ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiejhTdUhRPT0iLCJldGFnIjoiQ1BDRHg4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "284a06aa5d3f4c0f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "62" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "2972" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:43 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqEuzT5vUHzBcQTT84B5lbxQRZ8Wazbvf7pARGdim0OKOWWM6MdR9KrH11f9w9bxrybc2YHzoveHnAwpEgFzwUcXlLN8xDttCt9pwOnPMX3My8utXg" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6InJlcXVpcmVkIiwibWVzc2FnZSI6IlJlcXVpcmVkIiwiZGVidWdJbmZvIjoiY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRmF1bHQ6IEltbXV0YWJsZUVycm9yRGVmaW5pdGlvbntiYXNlPVJFUVVJUkVELCBjYXRlZ29yeT1VU0VSX0VSUk9SLCBjYXVzZT1udWxsLCBkZWJ1Z0luZm89bnVsbCwgZG9tYWluPWdsb2JhbCwgZXh0ZW5kZWRIZWxwPW51bGwsIGh0dHBIZWFkZXJzPXt9LCBodHRwU3RhdHVzPWJhZFJlcXVlc3QsIGludGVybmFsUmVhc29uPVJlYXNvbnthcmd1bWVudHM9e30sIGNhdXNlPW51bGwsIGNvZGU9Z2RhdGEuQ29yZUVycm9yRG9tYWluLlJFUVVJUkVELCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1SRVFVSVJFRCwgZXJyb3JQcm90b0RvbWFpbj1nZGF0YS5Db3JlRXJyb3JEb21haW4sIGZpbHRlcmVkTWVzc2FnZT1udWxsLCBsb2NhdGlvbj1lbnRpdHkuZGVzdGluYXRpb25fcmVzb3VyY2UuaWQubmFtZSwgbWVzc2FnZT1udWxsLCB1bm5hbWVkQXJndW1lbnRzPVtdfSwgbG9jYXRpb249ZW50aXR5LmRlc3RpbmF0aW9uX3Jlc291cmNlLmlkLm5hbWUsIG1lc3NhZ2U9UmVxdWlyZWQsIHJlYXNvbj1yZXF1aXJlZCwgcnBjQ29kZT00MDB9IFJlcXVpcmVkXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUmVxdWlyZWQifX0=" + } + }, + { + "ID": "631a4dc25c1479df", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBK3c9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Etag": [ + "CIes587x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up5pzFgsk-trM6xfNGHCutAafrBhKStla4toQDjvEsTPe4TTesYnwc0KLg9WK95RXOKqdm_KUngd4hv6Tucfns_MALlJyx1s6A2cZR3vco6jKPTW00" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC4xOTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuMTkyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjE5MloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0MTkzMjg3JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQxOTMyODcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQxOTMyODciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDE5MzI4Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDE5MzI4NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJZXM1ODd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0llczU4N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "e60d42eeeafea205", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjcmMzMmMiOiJjSCtBL0E9PSIsIm5hbWUiOiJoYXNoZXNPblVwbG9hZC0xIn0K", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3301" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ9QOOcNdGntWr097PRNnaDFbt2xarczbyHRwkwxSiwRZIhV26iZGbh5vHIpvl3Z5Dxc7hjtWuutHTMn8jpCTEowLJNAre2A6mZxVNtV52Vh6XDX4" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W2NIK0EvQT09XX0sIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5jcmMzMmMsIG1lc3NhZ2U9UHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi4sIHJlYXNvbj1pbnZhbGlkLCBycGNDb2RlPTQwMH0gUHJvdmlkZWQgQ1JDMzJDIFwiY0grQS9BPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgQ1JDMzJDIFwiY0grQSt3PT1cIi5cblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLmNvcmUuRXJyb3JDb2xsZWN0b3IudG9GYXVsdChFcnJvckNvbGxlY3Rvci5qYXZhOjU0KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUVycm9yQ29udmVydGVyLnRvRmF1bHQoUm9zeUVycm9yQ29udmVydGVyLmphdmE6NjcpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyNTkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5yZXN0LmFkYXB0ZXIucm9zeS5Sb3N5SGFuZGxlciQyLmNhbGwoUm9zeUhhbmRsZXIuamF2YToyMzkpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5EaXJlY3RFeGVjdXRvci5leGVjdXRlKERpcmVjdEV4ZWN1dG9yLmphdmE6MzApXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5leGVjdXRlTGlzdGVuZXIoQWJzdHJhY3RGdXR1cmUuamF2YToxMTQzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuY29tcGxldGUoQWJzdHJhY3RGdXR1cmUuamF2YTo5NjMpXG5cdGF0IGNvbS5nb29nbGUuY29tbW9uLnV0aWwuY29uY3VycmVudC5BYnN0cmFjdEZ1dHVyZS5zZXQoQWJzdHJhY3RGdXR1cmUuamF2YTo3MzEpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLnV0aWwuQ2FsbGFibGVGdXR1cmUucnVuKENhbGxhYmxlRnV0dXJlLmphdmE6NjIpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci50aHJlYWQuVGhyZWFkVHJhY2tlcnMkVGhyZWFkVHJhY2tpbmdSdW5uYWJsZS5ydW4oVGhyZWFkVHJhY2tlcnMuamF2YToxMjYpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuSW5Db250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjQ1Mylcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnNlcnZlci5Db21tb25Nb2R1bGUkQ29udGV4dENhcnJ5aW5nRXhlY3V0b3JTZXJ2aWNlJDEucnVuSW5Db250ZXh0KENvbW1vbk1vZHVsZS5qYXZhOjgwMilcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZSQxLnJ1bihUcmFjZUNvbnRleHQuamF2YTo0NjApXG5cdGF0IGlvLmdycGMuQ29udGV4dC5ydW4oQ29udGV4dC5qYXZhOjU2NSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLkN1cnJlbnRDb250ZXh0LnJ1bkluQ29udGV4dChDdXJyZW50Q29udGV4dC5qYXZhOjIwNClcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dE5vVW5yZWYoVHJhY2VDb250ZXh0LmphdmE6MzE5KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JEFic3RyYWN0VHJhY2VDb250ZXh0Q2FsbGJhY2sucnVuSW5Jbmhlcml0ZWRDb250ZXh0KFRyYWNlQ29udGV4dC5qYXZhOjMxMSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRUcmFjZUNvbnRleHRSdW5uYWJsZS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDU3KVxuXHRhdCBjb20uZ29vZ2xlLmdzZS5pbnRlcm5hbC5EaXNwYXRjaFF1ZXVlSW1wbCRXb3JrZXJUaHJlYWQucnVuKERpc3BhdGNoUXVldWVJbXBsLmphdmE6NDAzKVxuIn1dLCJjb2RlIjo0MDAsIm1lc3NhZ2UiOiJQcm92aWRlZCBDUkMzMkMgXCJjSCtBL0E9PVwiIGRvZXNuJ3QgbWF0Y2ggY2FsY3VsYXRlZCBDUkMzMkMgXCJjSCtBK3c9PVwiLiJ9fQ==" + } + }, + { + "ID": "5a293e6162b30ef3", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiaGFzaGVzT25VcGxvYWQtMSJ9Cg==", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:44 GMT" + ], + "Etag": [ + "CJuDg8/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqgpXYBQOYHYXTQAVb7IGna0BRqHaB7oBdZ19frcgAVGErLbEHe3bESKZ7zKSO6r0AtvqwfCEuJty95EeD5MkwqIsXbr88YcTG0j7M-g4yTKkejukI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NC42NDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDQuNjQ2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ0LjY0NloiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg0NjQ2ODExJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODQ2NDY4MTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODQ2NDY4MTEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NDY0NjgxMS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NDY0NjgxMSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKdURnOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0p1RGc4L3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "19f19648f1196c10", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3321" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Etag": [ + "CKDem8/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAmKuvPxUsjTHDB5nMuNG92dU0gql0Yol2zlvPDWEDuXHQRLhOJzYBAH207Ot-_IB22pH5QSfTaTayqEWBUfkobCR9YGVa79W0LG4fv9fnAjxk5Cs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xIiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0NS4wNTFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NDUuMDUxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ1LjA1MVoiLCJzaXplIjoiMjciLCJtZDVIYXNoIjoib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTE/Z2VuZXJhdGlvbj0xNTU2ODM1ODg1MDUxNjgwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9oYXNoZXNPblVwbG9hZC0xL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9oYXNoZXNPblVwbG9hZC0xLzE1NTY4MzU4ODUwNTE2ODAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2hhc2hlc09uVXBsb2FkLTEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJoYXNoZXNPblVwbG9hZC0xIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4ODUwNTE2ODAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2hhc2hlc09uVXBsb2FkLTEvMTU1NjgzNTg4NTA1MTY4MC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vaGFzaGVzT25VcGxvYWQtMS9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Imhhc2hlc09uVXBsb2FkLTEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg4NTA1MTY4MCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNLRGVtOC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiY0grQSt3PT0iLCJldGFnIjoiQ0tEZW04L3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "880416f9891fc25f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJtZDVIYXNoIjoib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09IiwibmFtZSI6Imhhc2hlc09uVXBsb2FkLTEifQo=", + "SSBjYW4ndCB3YWl0IHRvIGJlIHZlcmlmaWVk" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "3515" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFC5_DRlr9xktR_eWpfuFqiZRbenVOATtUEg3tfJC8JbCqagKQzn0_sYLvxvG52ordbl2Nwo0L6Y0AbwuenLAo1uoX4uzHnro-JiY8MkdQEsO3uSA" + ] + }, + "Body": "eyJlcnJvciI6eyJlcnJvcnMiOlt7ImRvbWFpbiI6Imdsb2JhbCIsInJlYXNvbiI6ImludmFsaWQiLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4iLCJkZWJ1Z0luZm8iOiJjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS5GYXVsdDogSW1tdXRhYmxlRXJyb3JEZWZpbml0aW9ue2Jhc2U9SU5WQUxJRF9WQUxVRSwgY2F0ZWdvcnk9VVNFUl9FUlJPUiwgY2F1c2U9bnVsbCwgZGVidWdJbmZvPW51bGwsIGRvbWFpbj1nbG9iYWwsIGV4dGVuZGVkSGVscD1udWxsLCBodHRwSGVhZGVycz17fSwgaHR0cFN0YXR1cz1iYWRSZXF1ZXN0LCBpbnRlcm5hbFJlYXNvbj1SZWFzb257YXJndW1lbnRzPXt9LCBjYXVzZT1udWxsLCBjb2RlPWdkYXRhLkNvcmVFcnJvckRvbWFpbi5JTlZBTElEX1ZBTFVFLCBjcmVhdGVkQnlCYWNrZW5kPXRydWUsIGRlYnVnTWVzc2FnZT1udWxsLCBlcnJvclByb3RvQ29kZT1JTlZBTElEX1ZBTFVFLCBlcnJvclByb3RvRG9tYWluPWdkYXRhLkNvcmVFcnJvckRvbWFpbiwgZmlsdGVyZWRNZXNzYWdlPW51bGwsIGxvY2F0aW9uPWVudGl0eS5yZXNvdXJjZS5tZDVfaGFzaF9iYXNlNjQsIG1lc3NhZ2U9UHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4sIHVubmFtZWRBcmd1bWVudHM9W292WmpHbGNYUEppR09BZktGYkpsMVE9PV19LCBsb2NhdGlvbj1lbnRpdHkucmVzb3VyY2UubWQ1X2hhc2hfYmFzZTY0LCBtZXNzYWdlPVByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuLCByZWFzb249aW52YWxpZCwgcnBjQ29kZT00MDB9IFByb3ZpZGVkIE1ENSBoYXNoIFwib3ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIgZG9lc24ndCBtYXRjaCBjYWxjdWxhdGVkIE1ENSBoYXNoIFwib2ZaakdsY1hQSmlHT0FmS0ZiSmwxUT09XCIuXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5jb3JlLkVycm9yQ29sbGVjdG9yLnRvRmF1bHQoRXJyb3JDb2xsZWN0b3IuamF2YTo1NClcblx0YXQgY29tLmdvb2dsZS5hcGkuc2VydmVyLnJlc3QuYWRhcHRlci5yb3N5LlJvc3lFcnJvckNvbnZlcnRlci50b0ZhdWx0KFJvc3lFcnJvckNvbnZlcnRlci5qYXZhOjY3KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjU5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIucmVzdC5hZGFwdGVyLnJvc3kuUm9zeUhhbmRsZXIkMi5jYWxsKFJvc3lIYW5kbGVyLmphdmE6MjM5KVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuRGlyZWN0RXhlY3V0b3IuZXhlY3V0ZShEaXJlY3RFeGVjdXRvci5qYXZhOjMwKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuZXhlY3V0ZUxpc3RlbmVyKEFic3RyYWN0RnV0dXJlLmphdmE6MTE0Mylcblx0YXQgY29tLmdvb2dsZS5jb21tb24udXRpbC5jb25jdXJyZW50LkFic3RyYWN0RnV0dXJlLmNvbXBsZXRlKEFic3RyYWN0RnV0dXJlLmphdmE6OTYzKVxuXHRhdCBjb20uZ29vZ2xlLmNvbW1vbi51dGlsLmNvbmN1cnJlbnQuQWJzdHJhY3RGdXR1cmUuc2V0KEFic3RyYWN0RnV0dXJlLmphdmE6NzMxKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIuY29yZS51dGlsLkNhbGxhYmxlRnV0dXJlLnJ1bihDYWxsYWJsZUZ1dHVyZS5qYXZhOjYyKVxuXHRhdCBjb20uZ29vZ2xlLmFwaS5zZXJ2ZXIudGhyZWFkLlRocmVhZFRyYWNrZXJzJFRocmVhZFRyYWNraW5nUnVubmFibGUucnVuKFRocmVhZFRyYWNrZXJzLmphdmE6MTI2KVxuXHRhdCBjb20uZ29vZ2xlLnRyYWNpbmcuVHJhY2VDb250ZXh0JFRyYWNlQ29udGV4dFJ1bm5hYmxlLnJ1bkluQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTo0NTMpXG5cdGF0IGNvbS5nb29nbGUuYXBpLnNlcnZlci5zZXJ2ZXIuQ29tbW9uTW9kdWxlJENvbnRleHRDYXJyeWluZ0V4ZWN1dG9yU2VydmljZSQxLnJ1bkluQ29udGV4dChDb21tb25Nb2R1bGUuamF2YTo4MDIpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUkMS5ydW4oVHJhY2VDb250ZXh0LmphdmE6NDYwKVxuXHRhdCBpby5ncnBjLkNvbnRleHQucnVuKENvbnRleHQuamF2YTo1NjUpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5DdXJyZW50Q29udGV4dC5ydW5JbkNvbnRleHQoQ3VycmVudENvbnRleHQuamF2YToyMDQpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkQWJzdHJhY3RUcmFjZUNvbnRleHRDYWxsYmFjay5ydW5JbkluaGVyaXRlZENvbnRleHROb1VucmVmKFRyYWNlQ29udGV4dC5qYXZhOjMxOSlcblx0YXQgY29tLmdvb2dsZS50cmFjaW5nLlRyYWNlQ29udGV4dCRBYnN0cmFjdFRyYWNlQ29udGV4dENhbGxiYWNrLnJ1bkluSW5oZXJpdGVkQ29udGV4dChUcmFjZUNvbnRleHQuamF2YTozMTEpXG5cdGF0IGNvbS5nb29nbGUudHJhY2luZy5UcmFjZUNvbnRleHQkVHJhY2VDb250ZXh0UnVubmFibGUucnVuKFRyYWNlQ29udGV4dC5qYXZhOjQ1Nylcblx0YXQgY29tLmdvb2dsZS5nc2UuaW50ZXJuYWwuRGlzcGF0Y2hRdWV1ZUltcGwkV29ya2VyVGhyZWFkLnJ1bihEaXNwYXRjaFF1ZXVlSW1wbC5qYXZhOjQwMylcbiJ9XSwiY29kZSI6NDAwLCJtZXNzYWdlIjoiUHJvdmlkZWQgTUQ1IGhhc2ggXCJvdlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIiBkb2Vzbid0IG1hdGNoIGNhbGN1bGF0ZWQgTUQ1IGhhc2ggXCJvZlpqR2xjWFBKaUdPQWZLRmJKbDFRPT1cIi4ifX0=" + } + }, + { + "ID": "e1a7bef7c2f0f7b9", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "341" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Etag": [ + "CAo=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:45 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrDioJ0fktjtyI_mABE4-oH9KqDlurM5mWiYDhoDp_LRJYSPvDxjuoaYzNhjoTHdHPALySTzCxScTstm27R-praR27EAm4Gx9k3wHAAwKJUptqRnFE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfV0sImV0YWciOiJDQW89In0=" + } + }, + { + "ID": "df5371a2b0cd8c3b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "317" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaW5kaW5ncyI6W3sibWVtYmVycyI6WyJwcm9qZWN0RWRpdG9yOmRla2xlcmstc2FuZGJveCIsInByb2plY3RPd25lcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0T3duZXIifSx7Im1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXSwicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIn0seyJtZW1iZXJzIjpbInByb2plY3RWaWV3ZXI6ZGVrbGVyay1zYW5kYm94Il0sInJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciJ9XSwiZXRhZyI6IkNBbz0ifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "423" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Etag": [ + "CAs=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrAVgetrpv1xjtKv2B7KFBdnbg20V-btiRn3sD4kQF7shWIQ1-FqJMRsbYfcYJyTMQHv_BhW_eVzvcc56z0LOcYfE-wbCRZpqYjen6c-bVcMzPet2A" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" + } + }, + { + "ID": "feaa4b1a4dda0450", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "423" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Etag": [ + "CAs=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqwMVZVQQ8s-ZBUJDLKyC-qMMTANncoBhRQWlMCITVUXQrTnFGGxu4GdKyVcuudrSa-DWy5w5iLE-i4a407GopUNma3xIq1eNiyVAziPD7jQ4YluzA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNwb2xpY3kiLCJyZXNvdXJjZUlkIjoicHJvamVjdHMvXy9idWNrZXRzL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsImJpbmRpbmdzIjpbeyJyb2xlIjoicm9sZXMvc3RvcmFnZS5sZWdhY3lCdWNrZXRPd25lciIsIm1lbWJlcnMiOlsicHJvamVjdEVkaXRvcjpkZWtsZXJrLXNhbmRib3giLCJwcm9qZWN0T3duZXI6ZGVrbGVyay1zYW5kYm94Il19LHsicm9sZSI6InJvbGVzL3N0b3JhZ2UubGVnYWN5QnVja2V0UmVhZGVyIiwibWVtYmVycyI6WyJwcm9qZWN0Vmlld2VyOmRla2xlcmstc2FuZGJveCJdfSx7InJvbGUiOiJyb2xlcy9zdG9yYWdlLm9iamVjdFZpZXdlciIsIm1lbWJlcnMiOlsicHJvamVjdFZpZXdlcjpkZWtsZXJrLXNhbmRib3giXX1dLCJldGFnIjoiQ0FzPSJ9" + } + }, + { + "ID": "23d94c8a09c0c334", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/iam/testPermissions?alt=json\u0026permissions=storage.buckets.get\u0026permissions=storage.buckets.delete\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "108" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:46 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoR-XKWu9nOWmFZve2-NeDyDvZDo5rYpPD3avVEmZkZ-lODvHCSR0D7zdPeCa61L5EtOIYJBqmC0D9Xc229A1GDS5d91vgAGuzRoxRnNa9DjBw68xM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSN0ZXN0SWFtUGVybWlzc2lvbnNSZXNwb25zZSIsInBlcm1pc3Npb25zIjpbInN0b3JhZ2UuYnVja2V0cy5nZXQiLCJzdG9yYWdlLmJ1Y2tldHMuZGVsZXRlIl19" + } + }, + { + "ID": "3d0a13fe961ed15c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "93" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "518" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:47 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoIRjhruHo5rweyX35Zt-Mzm5UDD5vr4319gSNjtnJHD2RWtVLvQdjuZ0jv-XKT3s1xcJO7CvoU-qAehCknukI5ssvv2LgwazyhnkcuFws3isqM9uo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ni44MDNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImJpbGxpbmciOnsicmVxdWVzdGVyUGF5cyI6dHJ1ZX0sImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "c1f385994f4db8e3", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/user-integration%40gcloud-golang-firestore-tests.iam.gserviceaccount.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "159" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "589" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UozEPu-BKQnmBUxTwsOWB7mmIPC5lppmUt6lA150OiXgQttSVRetVysUp299p7PbjI08qchUQl2idgMbfBCScDL5SuoHi_u9ani0DXYX2OV9QXAovQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "ccb3141a79c55333", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoG9XpooOFPlHtC8fg7llzYYETQUMEm0PvG4TbxBF5KdfCdB_sxNi0Yy9KrAmMDPBVyq0Iuaq7mzQOhQe9lXe5PBOc8-CGTXZiEMkMMJe3OYxjExIM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "2d4244737318f2d5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqn_UdEXhLLXE1QsYjwtyqQlzX3nDvpAO1WNpRkHhyV39RJKkxMBrcd8f6Xo3VZFDps7iKuQHnW49yRqjh42062lkisOH7otDXrdAKAih4Iz9fzERM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b63c3951975a766e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12183" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq-LP6kQLHNpjT5jYVkPzNXyfr838sGr4jGMVPq-FOq01ayCWvyWVAsvUbxp0NSdOYJcW-z0i2NkQLuSKGQYrX_3z8LW1Vb4uL2jxfC8kSiuz28hkA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "0764f2598b67c664", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "3054" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGddSv5GSaZB7qBmb0PigbMbJmWs91JB0W99oOTcbMeUk9QeBf-7QF7W_BpAn3wNxqVD5cBDPgGuKrZCfYv77SgCGxcPW-jrXsUY5h50q8eFL0_3o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjQ2LjgwM1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo0Ny45MTlaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBST0ifV0sImRlZmF1bHRPYmplY3RBY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "bdb0ff507e4df251", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13039" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up8t6bOWiJucB8KpVlIp8WjWLSUKssQaynZNguVy_oBM2ggBePUFnoteMcbw_L9aSHoD4hwi91dMaBC4aVc6VuaMSBSVuCe0L_0IymxhBIx3SRj_As" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "d2e66ae2cc0d11bc", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3133" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:50 GMT" + ], + "Etag": [ + "COq52NHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqQ_qlqWPPh6t8-gwPlD5zRB96zsmFgzyez4LG3XN4uErivnyHcCeeHje_i95VLJcWYXn7_-knEn0faPBCCAIStf4fXGs_Lz_VzHK6CLBZjU-oBxcU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0NiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDI0MDc0NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC4yNDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuMjQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjI0MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDI0MDc0NiZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTAyNDA3NDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwMjQwNzQ2IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDI0MDc0Ni91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTAyNDA3NDYiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3E1Mk5IeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPcTUyTkh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "89b2bc9232a865a4", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3133" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:50 GMT" + ], + "Etag": [ + "CJTz9tHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrymnpCwS6vZ0kM33ZD4nBupWs9hAuaOGpwiNcS4GJctgaLPe9CQ0Ada06yQV1aEfh9FtX6lgYh9jHwbsT6qdgOYOYe9yGDdAF9f91aE4y4VUHQHts" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MDczOTYwNCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MC43MzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTAuNzM5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUwLjczOVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MDczOTYwNCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTA3Mzk2MDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkwNzM5NjA0IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MDczOTYwNC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTA3Mzk2MDQiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSlR6OXRIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNKVHo5dEh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a70634faba29225b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqsIKUUc6WyIHUlDvAIDlXVeOofVG4sVV3Rus8ktfPMTUTI7e4PH4657AoDnNOKKy9TOgt8yUtUvCNvDFQC1xwVApFofRhWT3kcjsEYRgZPCDDsUqM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "5539336327f2e0ba", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrH7pn9r8O1Zu1Nc2aNUO_CO75MyvOJyONGw27TUkfWRSSmFIWJUZ6F3e3OprVF5jKtAkQ73puZbyNCK6mozyHrmPRRa0mtyuBPhkYg4DzPGkXJOSU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "029ec3d54709e7ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqcpItVCw5LaIWvDckOjfoChIifJGtuURBAmgtpzzna_iVOfDSaQOhxiMDVfYkV3mKG7__z0WTNRdVpa2Zs2IPcIAKwqwSeIMaLtz--MluSCHv9kc8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "bc0d0ed808bc8e68", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpljfpVGJewuMZUyJZ8DU7j2k6JmfPqXXgkeeu8vGdK5j-C_DUgpubS4nFEp1z8EsN-fUdceCxfiy-FGIJiFpME38hWnztW_WIn6zeSh6LCbwIK9oM" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "b695481e00d2d6d9", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "deklerk-sandbox" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrmYf7b7ig2ase_O8qHEsgSAXifAlxVdDy_Zh5Qaqx1IlL1wkTvy1BSblI6ZDz3M2X-Y6KrdJp1IaP6nDU1F_Yhyt84Y7dOG80r2g-x4E7NAyyjV0o" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "2fe3b6cc96ffa451", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "266" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-S2G_BUo7MzXUj-7vUDuVFWfBIA5GJYfTdaeruyelzRGFimnhov8wRB_ozWC2cGPQ-a2xQk8bw_cVV_D2Q7c7rdR1ujaTM3oIjr8vjsJBZXa1VeA" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RNaXNzaW5nPC9Db2RlPjxNZXNzYWdlPkJ1Y2tldCBpcyBhIHJlcXVlc3RlciBwYXlzIGJ1Y2tldCBidXQgbm8gdXNlciBwcm9qZWN0IHByb3ZpZGVkLjwvTWVzc2FnZT48RGV0YWlscz5CdWNrZXQgaXMgUmVxdWVzdGVyIFBheXMgYnVja2V0IGJ1dCBubyBiaWxsaW5nIHByb2plY3QgaWQgcHJvdmlkZWQgZm9yIG5vbi1vd25lci48L0RldGFpbHM+PC9FcnJvcj4=" + } + }, + { + "ID": "59bdcad91d723d0e", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "gcloud-golang-firestore-tests" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "5" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Etag": [ + "\"5d41402abc4b2a76b9719d911017c592\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:24:51 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1556835891548001" + ], + "X-Goog-Hash": [ + "crc32c=mnG7TA==", + "md5=XUFAKrxLKna5cZ2REBfFkg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "5" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up3lrNfk5bvup2RRbcsT6CeDACQitaiF3BTMyQ7B23ACtTqwWweq9ooOfZn3CX7tsbzljhlY_009nsTXXQwm7V_xNNd8FZwT44CzSpul-SZIRODC2w" + ] + }, + "Body": "aGVsbG8=" + } + }, + { + "ID": "7b7b6a79cd2a53e3", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0003/foo", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ], + "X-Goog-User-Project": [ + "veener-jba" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "342" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpT7Ge_QpcDrqajg6a4kZrmoA55ZSma9IM3pyz0Ow1bu5nLURc0V7cAThbvikg47_O-ox0uMdF6zBbtVtKL7olyMUGd9wqCQ9ubELVoJXiNMDehvXY" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+VXNlclByb2plY3RBY2Nlc3NEZW5pZWQ8L0NvZGU+PE1lc3NhZ2U+UmVxdWVzdGVyIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBwZXJtaXNzaW9ucyBvbiB1c2VyIHByb2plY3QuPC9NZXNzYWdlPjxEZXRhaWxzPmludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIGRvZXMgbm90IGhhdmUgc2VydmljZXVzYWdlLnNlcnZpY2VzLnVzZSBhY2Nlc3MgdG8gcHJvamVjdCA2NDIwODA5MTgxMDEuPC9EZXRhaWxzPjwvRXJyb3I+" + } + }, + { + "ID": "914bca19df35cbed", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpM7lBsoxgNb-rh81Lfe0tWFGTkXQYfEB12ofkKZ8SK8kK5bPRUsQMVhm3aYB2N9e5_QSBZA25my-t5LpgR8TVW1lNTGd97OoD7bdBhP_CGo3xMbos" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "164b4ffce0c7e233", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqbRkPvid4Zo9mQsbMXlTlOQ1womp_CnHUJcNWRamG_lp4ABhgVPVOdL8lE11J3tmvVkYhzcEPpIJuA_RTvuAbssXFsNo9Fu14cQ44HwhSRYk7r_dU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "d04d3cb2ac9d6376", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12183" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:53 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrcH6PdfDpm0th9SnZgYwMYIiYGYz2VvNs-Nb0VYFbZdA4QXfOJiRFg-xKqmDq09HRt7j8OwTDO7UMm2EiqwjPz7j_JPIvfcCzfCnVVoF3XiZFomuQ" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "8b3aeb1f423d76c5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoX_zzYo9AYFb44YmIz49k5z8v8ITzEQsDrSxEE50-7AC-ndg18Yo5DFAZ9ZFCDtT5P7kch3c0-YlkhX-5oYCUIm-mmPmoTg0CKWVI0TJ9la9z9-vE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTEuNTQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJpbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "f32edc9a9263c0a1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13039" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uok5lyOpE3YUZDwRz_bsA-E-xzWeqaApyadumOOFoA_YmOi_xnvFaJr6gfp9Gaktbpud9mv8qDCIvq1Yu7CyAVY0ScanSfNCke3naR_G7kPKYgrywY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:309)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.GetObject.handleRequestReceived(GetObject.java:70)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.get(ObjectsDelegator.java:81)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "da73739950dbfa82", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:54 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrdpofRnchjkCQz5LLoghzg-RHoGATH3xbgJVW0LKGBkH025w5EB5RlGmHHnmXuPtK_5Ffyl1bxjmoc_h7_vGvjcxWbHv9-NKe9fLO_oNYRmw-ffvU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTQuNzM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBST0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFJPSJ9" + } + }, + { + "ID": "94e697431c9323b9", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqhr0guIL00afW_qpaRvxEGRSJEWTtfrZP2Tv8Vqt2xas1ivvvMHVbSpT12ltU7FjytFhno9SFXTS8pW1S0S8qJdFctUCa83tkcQRXgN1uPUwhX1ws" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuMTMwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFNPSJ9" + } + }, + { + "ID": "3c5d694690a75054", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12375" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UogsU3nh3dfRYWq9YqTmJc7CfCX1U0b-IrWpDCM7M4YwfuUWgzzzcydWdHLrF0UPqRxNFQj3eMHBbDuHYcF0xuUNah4g5J_nt26feHRp-moh7uFXNw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "e050063f79f64820", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3216" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:55 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq4hs_SY8_2HHPHu8NfVIkOdwmZz7h6XH0nWRSTQJSqzQZQGvv1p83Z6W209h3bXsbN9_ESiU3OIyLRwmEldfBP94hRhN5t1JnagcKPcMEOv8FtvHs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNDo1MS41NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjQ6NTUuODI0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI0OjUxLjU0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTg5MTU0ODAwMSZhbHQ9bWVkaWEiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTg5MTU0ODAwMS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFRPSJ9" + } + }, + { + "ID": "f7edef926cdb7d75", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJjb250ZW50TGFuZ3VhZ2UiOiJlbiJ9Cg==" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13231" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqyQ9DLrBDGkx7uOkrIKrzMMYA4YygdFsd1si16n2Lr4I5Rfp1lxwadC3jThiJbE-cDyuZATHtvM2bzpsRDPJzbmKgtxYydHykiB-oCsSFqfwy50gw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:428)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.UpdateAndPatchObject.handleRequestReceived(UpdateAndPatchObject.java:58)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.update(ObjectsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "2745d195a40b86f1", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:57 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpGeWJRYTNsu57knuRFhwHuoeUMIJhUtuQj4PLhGDoFcITB2Y--7JgaOCVhepJ6a5RRKvU9d3UrAyrtbGNOVHyW2WAbN_KpfkiFhYq8l7HPERpLDjg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "5258a70437064146", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:57 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqImoOCe_iDYCV8XHpaaEXPn-jeAf0M1MyixGju1tNCSTBIMEKDzh_wxVxapMmXWPnOes7YX0T0Fr663x7eJCl85B0EGXggUgp8bqeDV3cJ-mqeDrY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "5cef2c464b7356c8", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpH5br-AVWN2g1nkkM_FJWZIjgOgh2NGLuiyYUS7QD2DSs6B-c0IQHgC2NUyy0eZq4McQhwxBGhUJ05-WeD_bX35i7skN7Jn_q3ELAaDwpi6_x3Z8c" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "11cd5140a264e423", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "377" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2_tsxxuZC3Ni-A0zL0E9w8OVne0sWfLi61Cv48eTWj2R4F6G0AmXZU7J8L-WnDJf4s-6GpZV0SyQqrjBA_Mp75pJfWb6cl9_7oimTy8HPnxAtMwc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQU09In0=" + } + }, + { + "ID": "dac6629e58d45469", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:58 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrTXUzCa6WOvWD6NqoHhkdwRtG0Z4h0QurHOuKU1gyAWzXOt8lc1NVsLqe-6m8siE76k7l8g0EY3Le273_4GFB81gHMhM4qBOOgbwSBgLl4zKHZspU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "1c8663192ed534e8", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqfc2TzyVBAwGuVpWWQt7OYr3IQEmVj5R7LIr5LSRAhwFernkoD6TFVnJno_ria4LL8P9tSQgzCF7uJpStJTpYgTOiW5FqTxQ-AgbOjFmAMjah3o6U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "51858ae7ea77845a", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqc5V7vaPL-d4aMLJi5QXyxZYWWtGBx0QuvfeB8rFlhwPGPFLQUABxm_XXkR0AOeIqdZ-intSu7I5srWdFKX85aY8r-z4oRTuB-CjGUe5uMiXQzXU4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "8248d1314da9a068", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:24:59 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqrg4CYWnZtCkMXT5KHCcQXY-aSGc6JuuEoVkTtKgObB9qr-dE-5sMKCrbTfpFudhid3ulDpkNuOUmRmBy4k7QQoRxqgues_a8dB85cOybZKZTM864" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "8c1628782bcf7eaf", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2370" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqmer5wTHG4zziRJUR_QqUbX_xgytHZkMg2KFntbVz8pn55RFN7idAyYcqz3AhqthLD-bHH2Lggdg4MuIdjHVk_kKHxvDdqI0p814avmZPYAhJdvsE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsImVudGl0eSI6InVzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI2J1Y2tldEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9kb21haW4tZ29vZ2xlLmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9hY2wvZG9tYWluLWdvb2dsZS5jb20iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBTT0ifV19" + } + }, + { + "ID": "6813d64060667ba0", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsvYC9PE_CO5H9MncFfA0uYzQqFBOqu0_zdexcxDTJ22CO_vv1LiRY6ZSC49_FMeUZBA3KdMfOnS7DsSxBpqrLC9ZM6GAs2sFolmZKhp0RWJjvOzs" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "76ac0290041d5a26", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Etag": [ + "CAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur-ejQ-G_PN4CMllngdiEh1SqHQNy8TXBA-htpazVSOadp1oThRsYsQuflX2kwkDQHFpeo1UgDPXqi5BRq8qZUBsM2koJIMOp-APWnB8T_qsTHlY30" + ] + }, + "Body": "" + } + }, + { + "ID": "0de0d173f3f5c2cc", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:01 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLOtWcLW6yZk8n72e3qkdl4ft2-s7FJFYyRv_5REkk9Q6d7qk6Pcpcy_HMCJFgKTV5RZzzIU3k5hceknIV1XNGVWK7gOCFimyeb8sFjNZC7y7rypI" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "eb2af5bce5b9e1df", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqgaldc1OJvgmfZxcZs4i5d6EXMyFK93ZUJLzSK8Vo-qPYKRwhvPzK8GFv3gqrdyjBbvdc2Uz57nCwy10LhC5AD2pscln8VbaQIOAHqxvkwsqV27Rw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "ad55ad01147a8c62", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoQpkkPNbb1CGY8lAzUfymkmZ7PNlRK6hK3deBeoMip13ARTb212DIgf4lgUDM8PVQsAqcnKwzNulbZICoGzHwrsBZfyi7p5TGX2yJHLXSAGEY0MBE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "1ef9ea9b9bc398a6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:03 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5WrZwygTHM0dDH6L1kypZzaAQtCwxt9wA7VXvgnjwrfwdRnTw0D7K3VLYLRTPDcf8mRFn5CetkcimFxfqUDoWLMF7bH1TLiBr38K6aBU74aRWUd4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "5c1cadcffc6c95e0", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo_n__u-lZKgZqveqge9YBw7wQMldCd4t0io7l_L_nlcF4DSPBPvZd9jGPCstE1EYuQS9L-Z3Y53Q15FlqM1IcmRDv6bYEJQWqkpuzCRRa_TLh9P7A" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "4a615256ab8c55b4", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpwGIlBhvugAIHxiObS0RPpJ40OY_R3exJfPh5IHLLB3SrdYtGMpD01QLdxc3E0IPGkBiTrwM-Z2X1gMcxuOqJWJUrhV4xTFe_n5G8bBxzievz39F4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "63c0acd95aa571a1", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqNu5DFt-lpr-_4YX8LRIhoi3KVRYVgFAlG7WOPsY7IGJ2NLZSF3SQhmkvfL_IKZ0FWIwx1nGT6HBBe9t2RYXNE1kXis-9KgEaAWhrEByTud5QGx5c" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "3c6b1be88134f9d9", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "119" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqzTVWCTbjlM7VCFWFbOojrtt8ZMY9bRy0eXj0Uxj3gaYalvwqR0WBrtbX0iGQ-aN5pv3-Acc7EWIoHBa37YbMpx9fuoJUpTswmdpiCOH3wbMN7tIk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDQVU9In0=" + } + }, + { + "ID": "60cde48b740e9a63", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrLyiKgPsp1RDgH3Q2SGlQAwTvGJwVDvb9U9FfvTysIFo6sO-5AoXrGPQfZpr80aMOEKojmPksURjhsteWSV9gdPsOlPIJMorxvQMHeomMCh-Bgmuo" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "95b915570e7c2375", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrOZjNI5VbLPTLuBwhSAdcgxvqgcFpH-ED4XyEgIbnndmtC_zJ0lkLfudymSRpdRE4NBxn1Vp-AmZnb1ONsP2I1GBnLYfpYRbzFQpmihPrgBkDnaB4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "e4170c637b705043", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqXQonYrNQZxcXoDllGz19Ruu-Eq1_3FVjMndISB0h1O1ojodwFr2Xor2nZHBZgv3NN_YOSrunnT_KYTfBFNdqtVvqVC2L19Qhwk-cxse_FqrMVbIM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "a3918deffd2f8f0f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpCCE2tr_4iWFlwmQkFVvzfskSWvZdvohQYV52oXF-tWUfie6Brp5TYrvUYx7jH8zsTg9qqk_kv0th8pOA1WsHahBUbDYuaqAmW0EpSJI6_xjBPdZ8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "42a941620757dac5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "684" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Etag": [ + "CAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoqov2civPKG8oSKmpVgUSoOD4W3rev4av_O6jgxkc5tSpPMgtxZeG3NznVRuFJLgClao2zPH8BvZIxTAAPUi_R-H0zgPz6ou8k9SMzLrcgW_HW100" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBVT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNBVT0ifV19" + } + }, + { + "ID": "ae79f2e22d58681a", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:06 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqP_EhJin_8uqVkiSwAv0jBAI8A0o7nSwNAQ-Z_EdAnq2PItIGJZl5NOE_xcDTFce_ncHfsf6LvV4NMvuxiOsl32BOddZj_6x5dC36QqrKvhXKaQJ0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "99becaf4890b3c21", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Etag": [ + "CAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpWMu40YNkuE3QcNByTmrKw_cnq4a8DVtcknBJ7gTnLdH1BC6CqoFOStPpa6y4PMmdpLpquozlOzvI4NWjgZYp1d9F_wBEyaq-wquTQAWGXNy3qktY" + ] + }, + "Body": "" + } + }, + { + "ID": "586ccc26d5a22712", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsRO9GedosQ-kg2lCQpx2S6C0-HH592CdB3SU9yGMwEEGKX9md7KGeSDhecrm_6Ug6tyYtEMlsMq08YBAKGprXfcN9130mRolu_HVey3CcrXK1nZ0" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "fad4bdab588e0f3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:08 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoUefKoF0asUvxzmAtX5z5m_5FRy9UfliKkjQN8cYyrHAotGwiQA72QN0lYfA0HXbe7gFRokwBAY8R5OpHcEdRq5K9sw8AwHuNtruFtBMxjN0D3hR4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "23bb9ce53da2f525", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upa2KE5JJOXq2FOn7RHasJpOeeHqWU2kk3BC4cQcGQ9jGzh6XIksN6Z6u1NVoCbxW7-3zGiZMdAop-oa_EuyPAauD__JgzNPdngeNeXO7lsGaVI6i8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "46482e47e655836d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/defaultObjectAcl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urze42blCAS2Dskbf5jkbCkdo5IhfOjcTlcufV1ooZd5x_QD0zK1gRoXLXfvLjKHEKaJ7cuDeSRmNjuqgEoxJFMmXUoPOIetWLYbH0G-lJcM6tydew" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "856a1fb5ffdb000f", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:09 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFSnYuPlv_JejZ0nEjoWRXeLwj0MS-ddTlIaSOjaK5BudyIcIOeVDEeyFYWsF-GCZzMC0F7v-UEw0_lbsYBXCMZcLp6WZu_s3OPDREhpz2s3Txw-s" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "a74c3e1b541a9ffc", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ury3MiMZho3xaSHmFgwHNvKIHTa0_gZzfKvX8FW6iVGNkdSbXvmVypIUuIoKXDjd4rOXpFghdoJlES9A_iXUvZyfWAZLvWqR1w6sDRymrIARVd0dPE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "913da4fa24ea5f1f", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKAxObyNBS_Mew44t3k1u80cLBGQW_I2ohrF96rIjZRxFscjGFrvsOHfLxw78rTRnpHmfA0uXnNz8T2FPc_Op16Lnn1YkfyOezSEhDBx11qZbinEE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "5dbdf24cae75e565", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "463" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:10 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpoJgRZ665WjiUif7SnQ-Vg4B0kJl-UQ_kIk5XgmcQIL5zmCBp3NCxjksybkvNKpGcQnIJSLevfyWd2fugRv5y5vxmEBgqFffIy8ibTIyBDH4b7R74" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvZG9tYWluLWdvb2dsZS5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL2RvbWFpbi1nb29nbGUuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU4OTE1NDgwMDEiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIiLCJkb21haW4iOiJnb29nbGUuY29tIiwiZXRhZyI6IkNPR2VxTkx4L2VFQ0VBVT0ifQ==" + } + }, + { + "ID": "f41892a142fe6d2b", + "Request": { + "Method": "PUT", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "107" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJlbnRpdHkiOiJkb21haW4tZ29vZ2xlLmNvbSIsInJvbGUiOiJSRUFERVIifQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpThInLTHhtepE7J4k-Rg6OoLRyOqnjPz4sZmfIH6wp5TSIyLrZQhExKAP52XbwUwMYp7x-xypSMY5kfR66I-dmpRWaFkfs0Lhy-Z6kqb5JUXSppqk" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:90)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.UpdateAcls.handleRequestReceived(UpdateAcls.java:27)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.update(AccessControlsDelegator.java:103)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "5eb0710be2571dab", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpbetUCGwYg4rhIR-DP3ntPqbkFwSqutNltVR55I4qedP-rZ5hC7ospMy6hZ4aQaNOhNv0frmst8MQZ0j6Mp8BcTC6qy7mU8bhEAiMGLycR5XxUWfI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "56aaafb0a24b0644", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqQczDm-dzjgTlLFcJ2lXQrMTpQ_8UP-1hNbGj7eZEgZcbxWA-oyXqEe4tJXq3gYdZ3mEIcAzaDtNeBHWidpIev7kdXWank04_JSWbQtU2UomJqeU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "3bbc40d778939612", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12203" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur5kJWpLwxz76BKPll75-nOsLv_uoh7X-TW0SbNHCYEzkGq8VZ-W8a5tKGUF6rKea4ejr4MhcqJVr1Zu7O-zNAfQWJMsHC9CTkOrPa5Iu1s0KBJDdY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "a2868e9e376e356b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2800" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAU=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoyZfAJB3TroGxzYv0gy7dHUwpU0jtIeU_EyQSFQKzHlHbyRa33r_a-B1aHw5tHaoawuDvv7skisdx10yhmm9ZRrPw0ibwpMkX5Cwhf0OK85DF84jM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9scyIsIml0ZW1zIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJvYmplY3QiOiJmb28iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTg5MTU0ODAwMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU4OTE1NDgwMDEvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvdXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ09HZXFOTHgvZUVDRUFVPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2Zvby8xNTU2ODM1ODkxNTQ4MDAxL2RvbWFpbi1nb29nbGUuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9kb21haW4tZ29vZ2xlLmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1ODkxNTQ4MDAxIiwiZW50aXR5IjoiZG9tYWluLWdvb2dsZS5jb20iLCJyb2xlIjoiUkVBREVSIiwiZG9tYWluIjoiZ29vZ2xlLmNvbSIsImV0YWciOiJDT0dlcU5MeC9lRUNFQVU9In1dfQ==" + } + }, + { + "ID": "ffb216dc74ccc27e", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13059" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:12 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqBumxgw_FTJlt7J3xLsBDxljAcEyC2G5aQqhEibawMttB-ogPabqDfz8ZN65WtOikIggZQ8OqW8zigV4Q5vfQkog7NMasAuivcMjhAsgqv_6qWv_E" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:101)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.ListAcls.handleRequestReceived(ListAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.list(AccessControlsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "183157f40c6c1bcd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Etag": [ + "COGeqNLx/eECEAY=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrC6xxVQeIIjitNwDv5PhDUIBQbtGdHJDi_AmLs8vTbGzkby-5hguMIwI_UeHlCuGF1u5jBoVwCOgSqJBiJJ5fxh8oCzBn-nHGcGAdIOeQt7gTL0RQ" + ] + }, + "Body": "" + } + }, + { + "ID": "4b6b925053570053", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:13 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpiIBwh04pJH1-X9d1oJluF_2ZCJEFLzF4E7SI-easf2noIolxavCbpH7NP9GMmNXnVK_ZJm4kW8coV5ArXDr-Rwodjnd9Bn2hFiJ_puNeWWldJ6mA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "9b4f9c448170e89d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqHc1VVBdvXFq9aDPAprpXNP5xPt-ZtPdYXwCQPHX_s4gkCMom31AtHeDocV_hker5qryWoYnRD8PPu5c7JqDNQ75v7GgE1vO5xAt7AqWPoLU5MO5A" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "692d41cf9ea91eb2", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11631" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpuFB_mdJg9Fd7Sj06Xdcrj7Up2xXVc_P-SgISEEtiv4v9doPXzNbzBz92Q2UfEchYWSQnikHhS34dUAOefj9J4vaBYqjc9RjFmrn8YcPNyS8aWKgI" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.scope, message=null, unnamedArguments=[]}, location=entity.resource_id.scope, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACL_SCOPE_NOT_FOUND: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Specified ACL scope was not found\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "db035a3cc51de007", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/acl/domain-google.com?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:14 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGb7BqO44vNpZjBnLrSI6_jVCVRFevSqm5eux2DIE2zmUY3sH0T0st7OOsLPcBjvLSbDfijMcdJwsgpM1fQGcerB9bS8Ffd4Qgzdjth0I8FreV4Vs" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:86)\n\tat com.google.cloud.bigstore.api.json.handlers.acls.DeleteAcls.handleRequestReceived(DeleteAcls.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.AccessControlsDelegator.delete(AccessControlsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "e1d95794cb6be989", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3273" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:15 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVcH2L0r9wGab--fnRuKptLRoUoHCUHfRegh3rKNQExHVsYLnt1NFwOKFa_6hRbeYXO1aY8-F-KiD6IY3sWykIKXr5iqelEJHTWEMQ8wHuzIcWLO8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTUyNjYyMTQiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuMjY1WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1LjI2NVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS4yNjVaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1MjY2MjE0JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTI2NjIxNC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTI2NjIxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1MjY2MjE0L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1MjY2MjE0IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tieHo5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDS2J4ejkzeC9lRUNFQUU9In19" + } + }, + { + "ID": "297f13e741cceb04", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3273" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:15 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UroLXQs6945-UOUyN7qOhICUNnGSzCVrNCv5DD9uMyvgD08zpuufBZj8WM8Fxe0DuOuQowqbPEn1UdJdbFTW7puyeY4zX8fM6UgT7SZIVB0Xi-B0OM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTU4NDE5OTciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTUuODQxWiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE1Ljg0MVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNS44NDFaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE1ODQxOTk3JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNTg0MTk5Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNTg0MTk5NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE1ODQxOTk3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE1ODQxOTk3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ00yRDg5M3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTTJEODkzeC9lRUNFQUU9In19" + } + }, + { + "ID": "9617f04d2dc95a7c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12683" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqeJiJeDLLZgn9vlRyNk1plY-7dcPXaz47gPNAhMqmSx5ZMvg6pqWj18PVUR65UGuWE7Amtua_zqsv0dVyDZE2FFceMeIQcQ2n6noklvRS9kxPXcEU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "2309e069a65c894f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3333" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxhvVZmlJMvR93BoI0JsL0TYbDN-kmlYHQOOlRfb-Nnqpc8iUImRtJT8Qvx0XWrOsALmLv4Oub_aeCCWkHPvqaF28oAazlhqRZYoxrPlaQlW0N1Qs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weSIsIm5hbWUiOiJjb3B5IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTY2MzcxNjUiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTYuNjM2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE2LjYzNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNi42MzZaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHk/Z2VuZXJhdGlvbj0xNTU2ODM1OTE2NjM3MTY1JmFsdD1tZWRpYSIsImNvbnRlbnRMYW5ndWFnZSI6ImVuIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2NvcHkvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL2NvcHkvMTU1NjgzNTkxNjYzNzE2NS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29weS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImNvcHkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNjYzNzE2NSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvY29weS8xNTU2ODM1OTE2NjM3MTY1L3VzZXItaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb3B5L2FjbC91c2VyLWludGVncmF0aW9uQGdjbG91ZC1nb2xhbmctZmlyZXN0b3JlLXRlc3RzLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiY29weSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE2NjM3MTY1IiwiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiaW50ZWdyYXRpb25AZ2Nsb3VkLWdvbGFuZy1maXJlc3RvcmUtdGVzdHMuaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ08zSG85N3gvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1pbnRlZ3JhdGlvbkBnY2xvdWQtZ29sYW5nLWZpcmVzdG9yZS10ZXN0cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTzNIbzk3eC9lRUNFQUU9In19" + } + }, + { + "ID": "d2fe101ea1da654d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo/rewriteTo/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026projection=full\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqjFigs6aYtqXW_7VucOIgJY2MQZwuLv9B12s2ffhWgWDxtJyjLinG4A8U9gKBCpiTt9jUvdHKtaG20qZfyfN9Q1eVAkUQh_zv7ESbqnWhsiheI7zw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.rewrite(RewriteObject.java:200)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:193)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.RewriteObject.handleRequestReceived(RewriteObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.rewrite(ObjectsDelegator.java:121)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 18 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "6620f07aff04b6fd", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:17 GMT" + ], + "Etag": [ + "CP/B0t7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpyWMaddvNz0IWnQ6KIl-tv6fJZCvrqTlGuaoHvKqXhxb3O1EKUIEirKSVcaedFZyxtjLhPD7Nj6qf1CRGWG1VnVigo5Wf3sYFDuuLvctxVpoHT0rY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTc0MDY0NjMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxNzQwNjQ2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxNy40MDZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTcuNDA2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE3LjQwNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTc0MDY0NjMmYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNQL0IwdDd4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7f88847ad9c49799", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:18 GMT" + ], + "Etag": [ + "CKav+N7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urtn69BB0ytbCZpuCbus4w__tiLeBpqeNzD2gKl5KN2LgP9ZJOIjudGKpDOJRhyW_WQjVOCJBC1CSB_AgE0MdPzVDAKWt13VfQ8aKe3pMWh4y1kOlI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTgwMjY2NjIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODAyNjY2MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC4wMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguMDI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4LjAyNloiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTgwMjY2NjImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLYXYrTjd4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "a21f3ddca57ed424", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12267" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:18 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrE9jrLJ0W1PUHWT3ldYhVsL9nNhAeFPU2K8g2rCMimty07QlN95esxj7dBAySQ-nzvK17L3WA7vjmxjCpeQADa2bztj6ke8NZjb8fuWkQHsnnzKXA" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "08fda3fa173e6e11", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "742" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Etag": [ + "CKy2sd/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqfGXVJ2-MQaZWJ-JS6runpSycA3GDKIXYHDGf3VBOXnADbUzL2YhZVmlQcW_95jXY0eMkK4Z5tSRk3cxGP_84T_1t3YDYQS2p37ZouBk7GfX-4xxw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9jb21wb3NlLzE1NTY4MzU5MTg5NjE0NTIiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9jb21wb3NlIiwibmFtZSI6ImNvbXBvc2UiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxODk2MTQ1MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTguOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE4Ljk2MVoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vY29tcG9zZT9nZW5lcmF0aW9uPTE1NTY4MzU5MTg5NjE0NTImYWx0PW1lZGlhIiwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNLeTJzZC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "0c9e19bd4ec37de2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose/compose?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "127" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6ImZvbyJ9LHsibmFtZSI6ImNvcHkifV19Cg==" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13123" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGREJ2AZDcrZiJzPX5ltwlB28vRYhZ_xv4LMIO3La2IgFWk4wd5IVrqBs3z2epgtg7wkPpZm4qPH9zTzSIsXNxdLLHcNMMYr2azidefxtJVgXqgDY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:199)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ComposeObject.handleRequestReceived(ComposeObject.java:47)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.compose(ObjectsDelegator.java:126)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "84daee26f7d989ce", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:19 GMT" + ], + "Etag": [ + "CLmF4d/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoLuw-z5wwTKy-X92_i7pyTrimolUNRUGrElhep6rUH_mmpgu_Vxp-1ncr8x8XNXHLs8NFRW5S7WVUsaDRRTrC5KBPea4EoFLANK8MFY0FpTkXr7_g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkxOTc0MTYyNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToxOS43NDFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MTkuNzQxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjE5Ljc0MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkxOTc0MTYyNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MTk3NDE2MjUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTE5NzQxNjI1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkxOTc0MTYyNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MTk3NDE2MjUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTG1GNGQveC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMbUY0ZC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "8457a5701f4ced08", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpKF1M6PpAT354ON3809wMCor-B9KITRuEV34MQczP3OD4Co0sJYtNSub2FUzA8qanTDrtANRK0RuxPTz314o3HQro2HLOoyKZBow5CZ4sfsunKHv0" + ] + }, + "Body": "" + } + }, + { + "ID": "1c2ba8a7670e2218", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Etag": [ + "CP+SgODx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoq9hgjjwBbSH1zXN2UlNtWO31Qf5uk4ijjGCQlETB9_5H_CouvU3q4tyHAKEbIBlaW1_mNk3Fsp6LgXqsPPKvFzs3-WHeADAgc2-GpD-0inoYUHMs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2MyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDI1MTI2MyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC4yNTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuMjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjI1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDI1MTI2MyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjAyNTEyNjMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwMjUxMjYzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDI1MTI2My91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjAyNTEyNjMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUCtTZ09EeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNQK1NnT0R4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7e6d2f97bd881584", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UohQH2nTuRg_FtGhlfr9zC26C6lKFA-aZTf4GbiV5kWZCJ9E4YLHfIUKLpCLIgEfX9TAvi4oPNWfEBpbY-orlescLxrxYcB1U5fLmzzHQR2R0teR-s" + ] + }, + "Body": "" + } + }, + { + "ID": "48e189abff98ea93", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Etag": [ + "CM3Aq+Dx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq_NscG9s3iq-8NZq8vQhzdc9FJMaAfq_0GborrX84b8gG2zpbUNtEVnrjFjLyOXqBeJwthPgPUPcsGbIcD597LaPlxt-VNaetM2BLtMtYry8pPeQ8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMDk2MTYxMyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMC45NjFaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjAuOTYxWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIwLjk2MVoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMDk2MTYxMyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjA5NjE2MTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIwOTYxNjEzIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMDk2MTYxMy91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjA5NjE2MTMiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTTNBcStEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNNM0FxK0R4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "02d6922c26c06cbd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 400, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "12243" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrmjSJHhErkbzi8feJ_-8XOMqsIzGrEe6frQzWXQ7dPl0D8MW1mMHClKdwb2zBOTvpx7mXQWy2STZYSqx2o7O1Tr2DB1RZfwCZ3xp1jgw3NCICi7Xw" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Bucket is requester pays bucket but no user project provided.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=badRequest, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Bucket is requester pays bucket but no user project provided., unnamedArguments=[]}, location=null, message=Bucket is requester pays bucket but no user project provided., reason=required, rpcCode=400} Bucket is requester pays bucket but no user project provided.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_MISSING: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Bucket is Requester Pays bucket but no billing project id provided for non-owner.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":400,"message":"Bucket is requester pays bucket but no user project provided."}}" + } + }, + { + "ID": "3fee93fca0ef933c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:21 GMT" + ], + "Etag": [ + "CLfH1eDx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLO91a0iSaTsK7EA2iN621UX-atvJ-8m8BeqeR8_o_CQLB8Pco5FMXznwQY7DZ8u0GPd1BvLE98s5S7T1Z6yL66OKL0CFFUntLBqNAKlji2Dcme6g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMTY1MDYxNSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMS42NTBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjEuNjUwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIxLjY1MFoiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMTY1MDYxNSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjE2NTA2MTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIxNjUwNjE1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMTY1MDYxNS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjE2NTA2MTUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTGZIMWVEeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNMZkgxZUR4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "5c26cf93373a296a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=gcloud-golang-firestore-tests", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urx3fvI3sQc5JiIazK1OQw5W12poVdjRyBKxIBM6N98jEJli9F0O33KVHHcHaP-QybHub31QmLNngZpzcVKJq33DSaVMW2Vpi5BUaVAAUL2jMoyOxs" + ] + }, + "Body": "" + } + }, + { + "ID": "f8b5020ef71e4047", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJuYW1lIjoiZm9vIn0K", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3112" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Etag": [ + "CIungeHx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq8QEz45d-uPa7qZ6Nl_SMqw2iepQXoM4aAkTx4YCqArXnSKI1UO3ZhJ5-_PDg3Uq6PPa2JDBTbxckiZCRMQvf0Cv6E_8Cwk3jKXGkneWFJYSeMAts" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2ZvbyIsIm5hbWUiOiJmb28iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkyMjM2NzM3MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyMi4zNjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MjIuMzY2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjIyLjM2NloiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vP2dlbmVyYXRpb249MTU1NjgzNTkyMjM2NzM3MSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvby9mb28vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDMvZm9vLzE1NTY4MzU5MjIzNjczNzEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9vL2Zvby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMyIsIm9iamVjdCI6ImZvbyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTIyMzY3MzcxIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMy9mb28vMTU1NjgzNTkyMjM2NzM3MS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzL28vZm9vL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAzIiwib2JqZWN0IjoiZm9vIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MjIzNjczNzEiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSXVuZ2VIeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Im1uRzdUQT09IiwiZXRhZyI6IkNJdW5nZUh4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "7cfadd8c1e7c0bd8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false\u0026userProject=veener-jba", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13099" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:22 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpjVnPaklKmbO8qV2lapwEIAwPjvf4HSJs3kNHHO_xZH4bV65pVPLKCb2qjMPCYSwEKRZzH6NpYjm6WlxsBdJnfBvpyRpIR017q9iJDyBcVPNUoDn8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., unnamedArguments=[]}, location=null, message=integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101., reason=forbidden, rpcCode=403} integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::USER_PROJECT_ACCESS_DENIED: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"integration@gcloud-golang-firestore-tests.iam.gserviceaccount.com does not have serviceusage.services.use access to project 642080918101."}}" + } + }, + { + "ID": "b1381b9995cf35f5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/foo?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrD1hmfKK3yAwFXKqIwvCsEVMb_e7MWysJc7zbyVjZ_sbD9P41lzc0gYSef9yl8CRa6B0RtPq14V5MBSdy6C0i_cAkgANDN1da3O5QKELvACU11moU" + ] + }, + "Body": "" + } + }, + { + "ID": "5e06ccad06bbd110", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/copy?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2FRcf4QGZH2KhkP3kATb9eOy4S9lgrA3NQXYqhc1KBZDbcZN5NHF4MB6WiTxOraNE0HhkW3SOg8sp1gS4kAcWRZ2G0EGWmLgL6RUcwr3a2O-WY10" + ] + }, + "Body": "" + } + }, + { + "ID": "b637ccaa36faa55b", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003/o/compose?alt=json\u0026prettyPrint=false\u0026userProject=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:23 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGzSAdUK8pPs53JjaSJVQJObMJRsfQ5FIpSL0D4tNfft68cfE8Xb5fjSsAQG_PfmKZyX1Ind-s6dW6FgwREeHzu_OnEeOwCOaT8EZi2N4vNeWLvac" + ] + }, + "Body": "" + } + }, + { + "ID": "15055c361faad5c1", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0003?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqwmXVRTmXX3P5iId4-EjuA1oviIQD-Vbdr7M5rgd7d5IxNvmPVkZz_zFyMBNoHd_DzXnPMN5R7grAe1yDdOzbyafIBwRcFsLh4r0a7ghp4LkNSLZk" + ] + }, + "Body": "" + } + }, + { + "ID": "307f8a44d4ce6e9f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "32" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpaHmcEJBKGI7CtW9fK8-j5Z-oRk_dTwMS40wVaoNH1PN7k6x6d_9aD3w3ce6WGZJF5OYxkdoifp6L2vVz5lIGjzcmmIZ4Wxp1vktBOv8hLa417Mp4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" + } + }, + { + "ID": "f8df88961018be68", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "121" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "297" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:24 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqWiAA6LvnU4rExe2rVTwwFd51SI93o_d2tOj9OY1jk_qe1Yd1eiTXIa2eNecJQsmgwuFr4BUQoWy8ndutrCqGucmVIRL0jA6-i-vaGGVRF8uEe0AM" + ] + }, + "Body": "eyJpZCI6IjExIiwidG9waWMiOiIvL3B1YnN1Yi5nb29nbGVhcGlzLmNvbS9wcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvdG9waWNzL2dvLXN0b3JhZ2Utbm90aWZpY2F0aW9uLXRlc3QiLCJwYXlsb2FkX2Zvcm1hdCI6Ik5PTkUiLCJldGFnIjoiMTEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm90aWZpY2F0aW9uQ29uZmlncy8xMSIsImtpbmQiOiJzdG9yYWdlI25vdGlmaWNhdGlvbiJ9" + } + }, + { + "ID": "45f44fa6c3668273", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "340" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFWgB9V-ilyDIBP7KEF3SY7uTg-rikYE1ooEgWYkiXb2cW6oOwwtH1W7oe24WU2A9oyJIGOSskz3_icHqmMn7CfPoZQ-Lu5QdtZzv_5062_-UzAt0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIiwiaXRlbXMiOlt7ImlkIjoiMTEiLCJ0b3BpYyI6Ii8vcHVic3ViLmdvb2dsZWFwaXMuY29tL3Byb2plY3RzL2Rla2xlcmstc2FuZGJveC90b3BpY3MvZ28tc3RvcmFnZS1ub3RpZmljYXRpb24tdGVzdCIsInBheWxvYWRfZm9ybWF0IjoiTk9ORSIsImV0YWciOiIxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9ub3RpZmljYXRpb25Db25maWdzLzExIiwia2luZCI6InN0b3JhZ2Ujbm90aWZpY2F0aW9uIn1dfQ==" + } + }, + { + "ID": "08c2546e2c262ee7", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs/11?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTmXr5TZZPRnABz_XbbZpjwU3iPz2e802RkcmNFr3sLzUx205tdmAb6vVWMNlYMUGZ5tGoddlqQ_oPnz0Xdvpz8CiD2eDLMDyrMbxVFjc3BinwBo0" + ] + }, + "Body": "" + } + }, + { + "ID": "efc8b67f9c2f4b40", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/notificationConfigs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "32" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UraMBYT56qFvKLe_OPJfpJRhTy2qSpQyRjP6CScOnnijgaQ88eQtVahktX_Vsvy-G7J11d7GNT64FnpqZrewu1sUmuNNLiEtmkqVFILdNcWed6i1w4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNub3RpZmljYXRpb25zIn0=" + } + }, + { + "ID": "6dd92f9ce3b15a57", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:25 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:25 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpedRCcxIOhXCWdipyV2E0R3z00CUlOK6rPlof1gpKuQbLeJmvMoPFn28o8zqmmeVJ5rbX41bB6Hp116-_ISgEXl4Htmc1VS0Aq41lJQiN_mIvozbY" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "914cb60452456134", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2F\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "12632" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpLE4EijIrGqoHRLrCqmprZtxBU2JtHLLsZ-nIakblnuZX8s6FZA1SEaczyybdjB32loN1-gVCf83PFM4x06S24ATKp9-xRgQ9QNC9fwVH_ec4e9JQ" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF/1475599144579000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B1.TIF","bucket":"gcp-public-data-landsat","generation":"1475599144579000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:04.545Z","updated":"2016-10-04T16:39:04.545Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:04.545Z","size":"74721736","md5Hash":"835L6B5frB0zCB6s22r2Sw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B1.TIF?generation=1475599144579000&alt=media","crc32c":"934Brg==","etag":"CLjf35bLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF/1475599310042000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B10.TIF","bucket":"gcp-public-data-landsat","generation":"1475599310042000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:50.002Z","updated":"2016-10-04T16:41:50.002Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:50.002Z","size":"58681228","md5Hash":"BW623xHg15IhV24mbrL+Aw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B10.TIF?generation=1475599310042000&alt=media","crc32c":"xzV2fg==","etag":"CJDn0uXLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF/1475599319188000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B11.TIF","bucket":"gcp-public-data-landsat","generation":"1475599319188000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:59.149Z","updated":"2016-10-04T16:41:59.149Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:59.149Z","size":"56796439","md5Hash":"FOxiyxJXqAflRT8lFnSdOg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B11.TIF?generation=1475599319188000&alt=media","crc32c":"p/HFVw==","etag":"CKCEgerLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF/1475599161224000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B2.TIF","bucket":"gcp-public-data-landsat","generation":"1475599161224000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:21.160Z","updated":"2016-10-04T16:39:21.160Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:21.160Z","size":"77149771","md5Hash":"MP22zjOo2Ns0iY4MTPJRwA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B2.TIF?generation=1475599161224000&alt=media","crc32c":"rI8YRg==","etag":"CMDW157Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF/1475599178435000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B3.TIF","bucket":"gcp-public-data-landsat","generation":"1475599178435000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:38.376Z","updated":"2016-10-04T16:39:38.376Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:38.376Z","size":"80293687","md5Hash":"vQMiGeDuBg6cr3XsfIEjoQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B3.TIF?generation=1475599178435000&alt=media","crc32c":"uZBrnA==","etag":"CLiT8qbLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF/1475599194268000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B4.TIF","bucket":"gcp-public-data-landsat","generation":"1475599194268000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:39:54.211Z","updated":"2016-10-04T16:39:54.211Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:39:54.211Z","size":"84494375","md5Hash":"FWeVA01ZO0+mA+ERFczuhA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B4.TIF?generation=1475599194268000&alt=media","crc32c":"Wes5oQ==","etag":"CODCuK7Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF/1475599202979000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B5.TIF","bucket":"gcp-public-data-landsat","generation":"1475599202979000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:02.937Z","updated":"2016-10-04T16:40:02.937Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:02.937Z","size":"89318467","md5Hash":"p4oyKHAGo5Ky3Kg1TK1ZQw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B5.TIF?generation=1475599202979000&alt=media","crc32c":"pTYuuw==","etag":"CLiZzLLLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF/1475599233481000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B6.TIF","bucket":"gcp-public-data-landsat","generation":"1475599233481000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:33.349Z","updated":"2016-10-04T16:40:33.349Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:33.349Z","size":"89465767","md5Hash":"2Z72GUOKtlgzT9VRSGYXjA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B6.TIF?generation=1475599233481000&alt=media","crc32c":"INXHbQ==","etag":"CKjykcHLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF/1475599241055000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B7.TIF","bucket":"gcp-public-data-landsat","generation":"1475599241055000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:40:41.021Z","updated":"2016-10-04T16:40:41.021Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:40:41.021Z","size":"86462614","md5Hash":"8gPNQ7QZoF2CNZZ9Emrlog==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B7.TIF?generation=1475599241055000&alt=media","crc32c":"uwCD+A==","etag":"CJiW4MTLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF/1475599281338000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B8.TIF","bucket":"gcp-public-data-landsat","generation":"1475599281338000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:21.300Z","updated":"2016-10-04T16:41:21.300Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:21.300Z","size":"318887774","md5Hash":"y795LrUzBwk2tL6PM01cEA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B8.TIF?generation=1475599281338000&alt=media","crc32c":"Z3+ZhQ==","etag":"CJDt+tfLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF/1475599291425000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_B9.TIF","bucket":"gcp-public-data-landsat","generation":"1475599291425000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:41:31.361Z","updated":"2016-10-04T16:41:31.361Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:41:31.361Z","size":"44308205","md5Hash":"5B41E2DBbY52pYPUGVh95g==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_B9.TIF?generation=1475599291425000&alt=media","crc32c":"a0ODQw==","etag":"COjB4tzLwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF/1475599327222000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_BQA.TIF","bucket":"gcp-public-data-landsat","generation":"1475599327222000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.159Z","updated":"2016-10-04T16:42:07.159Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.159Z","size":"3354719","md5Hash":"zqigvl5Envmi/GLc8yH51A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_BQA.TIF?generation=1475599327222000&alt=media","crc32c":"WOBgKA==","etag":"CPCx6+3Lwc8CEAE="},{"kind":"storage#object","id":"gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt/1475599327662000","selfLink":"https://www.googleapis.com/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt","name":"LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt","bucket":"gcp-public-data-landsat","generation":"1475599327662000","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2016-10-04T16:42:07.618Z","updated":"2016-10-04T16:42:07.618Z","storageClass":"STANDARD","timeStorageClassUpdated":"2016-10-04T16:42:07.618Z","size":"7903","md5Hash":"el/UdDvWR0hfiElvrbBcUQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/gcp-public-data-landsat/o/LC08%2FPRE%2F044%2F034%2FLC80440342016259LGN00%2FLC80440342016259LGN00_MTL.txt?generation=1475599327662000&alt=media","crc32c":"PWBt8g==","etag":"CLCfhu7Lwc8CEAE="}]}" + } + }, + { + "ID": "3f3f45cd2b718e17", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/noauth", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "247" + ], + "Content-Type": [ + "application/xml; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqsZlmkTyf2_fqITeaIiM8s2MLUvz5qFiP_rzA4Mf6Q9LMxsiQeP-GBRwHON_XvnG2qef3XL1EzgTLA_GxtLKZRjdzOBndJPf9XbJa9KjJCIPlducQ" + ] + }, + "Body": "PD94bWwgdmVyc2lvbj0nMS4wJyBlbmNvZGluZz0nVVRGLTgnPz48RXJyb3I+PENvZGU+QWNjZXNzRGVuaWVkPC9Db2RlPjxNZXNzYWdlPkFjY2VzcyBkZW5pZWQuPC9NZXNzYWdlPjxEZXRhaWxzPkFub255bW91cyBjYWxsZXIgZG9lcyBub3QgaGF2ZSBzdG9yYWdlLm9iamVjdHMuZ2V0IGFjY2VzcyB0byBnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvbm9hdXRoLjwvRGV0YWlscz48L0Vycm9yPg==" + } + }, + { + "ID": "110cdd2daefc6162", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoibm9hdXRoIn0K", + "Yg==" + ] + }, + "Response": { + "StatusCode": 401, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "30405" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:26 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "Www-Authenticate": [ + "Bearer realm=\"https://accounts.google.com/\"" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrGgfg36ZmqdKbc0NqBivwXULVnF2NuBAD31rJqzC5Rj9xgHFbRnwxJCbdxCuUdlAhGW_-H88mBadNGmqch0fmAyAd80HPANBkUmIgkolsL4HK7XAU" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"required","message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.","locationType":"header","location":"Authorization","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={WWW-Authenticate=[Bearer realm=\"https://accounts.google.com/\"]}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.auth.AuthenticatorInterceptor.addChallengeHeader(AuthenticatorInterceptor.java:269)\n\tat com.google.api.server.auth.AuthenticatorInterceptor.processErrorResponse(AuthenticatorInterceptor.java:236)\n\tat com.google.api.server.auth.GaiaMintInterceptor.processErrorResponse(GaiaMintInterceptor.java:768)\n\tat com.google.api.server.core.intercept.AroundInterceptorWrapper.processErrorResponse(AroundInterceptorWrapper.java:28)\n\tat com.google.api.server.stats.StatsBootstrap$InterceptorStatsRecorder.processErrorResponse(StatsBootstrap.java:315)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.handleErrorResponse(Interceptions.java:202)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception.access$200(Interceptions.java:103)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:144)\n\tat com.google.api.server.core.intercept.Interceptions$AroundInterception$1.call(Interceptions.java:137)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.setException(AbstractFuture.java:753)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:68)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\nCaused by: com.google.api.server.core.Fault: ImmutableErrorDefinition{base=LOGIN_REQUIRED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=unauthorized, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.REQUIRED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=REQUIRED, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.authenticated_user, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., unnamedArguments=[]}, location=headers.Authorization, message=Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth., reason=required, rpcCode=401} Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.InsertObject.handleRequestReceived(InsertObject.java:44)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.insert(ObjectsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\t... 20 more\n"}],"code":401,"message":"Anonymous caller does not have storage.objects.create access to go-integration-test-20190502-80633403432013-0001/noauth."}}" + } + }, + { + "ID": "848c7013eb1665b1", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqbrgn51bDFbwZ1c2_BxCHhog7If9w6ooKAtb2YCerQcObpiFJZqT3-Jn7zTXEEPVuysmxKw4PmvEOmkbCAJVkmbEVZm6z877JKFhrXTrkWqWYooq8" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "a5f818071a0f22da", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrozB-0-kEtkjITLFZA2uQpw77J_Zc6GErrYrIyxkPTWeUHJNBRLw4JXhyIEFG8szc7bhqblQVKoKYiZ4myOcfL5zIM9KYGIbCyAP9e4sEEdx2pBe0" + ] + }, + "Body": "GROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "55eeb942e5603431", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Range": [ + "bytes=1-" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "7902" + ], + "Content-Range": [ + "bytes 1-7902/7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqlAytrsQn43Os8JxSOx4C8v9ApqqtqwEE7kZ-voKAcmcYe32lG7ANHxzNrwkqN8bbLLohoAHd88brZDVaC3U6Q01dhBBoeDFnlkCzHKUJjA8ZWrgM" + ] + }, + "Body": "ROUP = L1_METADATA_FILE
  GROUP = METADATA_FILE_INFO
    ORIGIN = "Image courtesy of the U.S. Geological Survey"
    REQUEST_ID = "0701609191051_00004"
    LANDSAT_SCENE_ID = "LC80440342016259LGN00"
    FILE_DATE = 2016-09-20T03:13:02Z
    STATION_ID = "LGN"
    PROCESSING_SOFTWARE_VERSION = "LPGS_2.6.2"
  END_GROUP = METADATA_FILE_INFO
  GROUP = PRODUCT_METADATA
    DATA_TYPE = "L1T"
    ELEVATION_SOURCE = "GLS2000"
    OUTPUT_FORMAT = "GEOTIFF"
    SPACECRAFT_ID = "LANDSAT_8"
    SENSOR_ID = "OLI_TIRS"
    WRS_PATH = 44
    WRS_ROW = 34
    NADIR_OFFNADIR = "NADIR"
    TARGET_WRS_PATH = 44
    TARGET_WRS_ROW = 34
    DATE_ACQUIRED = 2016-09-15
    SCENE_CENTER_TIME = "18:46:18.6867380Z"
    CORNER_UL_LAT_PRODUCT = 38.52819
    CORNER_UL_LON_PRODUCT = -123.40843
    CORNER_UR_LAT_PRODUCT = 38.50765
    CORNER_UR_LON_PRODUCT = -120.76933
    CORNER_LL_LAT_PRODUCT = 36.41633
    CORNER_LL_LON_PRODUCT = -123.39709
    CORNER_LR_LAT_PRODUCT = 36.39729
    CORNER_LR_LON_PRODUCT = -120.83117
    CORNER_UL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_UL_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_UR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_UR_PROJECTION_Y_PRODUCT = 4264500.000
    CORNER_LL_PROJECTION_X_PRODUCT = 464400.000
    CORNER_LL_PROJECTION_Y_PRODUCT = 4030200.000
    CORNER_LR_PROJECTION_X_PRODUCT = 694500.000
    CORNER_LR_PROJECTION_Y_PRODUCT = 4030200.000
    PANCHROMATIC_LINES = 15621
    PANCHROMATIC_SAMPLES = 15341
    REFLECTIVE_LINES = 7811
    REFLECTIVE_SAMPLES = 7671
    THERMAL_LINES = 7811
    THERMAL_SAMPLES = 7671
    FILE_NAME_BAND_1 = "LC80440342016259LGN00_B1.TIF"
    FILE_NAME_BAND_2 = "LC80440342016259LGN00_B2.TIF"
    FILE_NAME_BAND_3 = "LC80440342016259LGN00_B3.TIF"
    FILE_NAME_BAND_4 = "LC80440342016259LGN00_B4.TIF"
    FILE_NAME_BAND_5 = "LC80440342016259LGN00_B5.TIF"
    FILE_NAME_BAND_6 = "LC80440342016259LGN00_B6.TIF"
    FILE_NAME_BAND_7 = "LC80440342016259LGN00_B7.TIF"
    FILE_NAME_BAND_8 = "LC80440342016259LGN00_B8.TIF"
    FILE_NAME_BAND_9 = "LC80440342016259LGN00_B9.TIF"
    FILE_NAME_BAND_10 = "LC80440342016259LGN00_B10.TIF"
    FILE_NAME_BAND_11 = "LC80440342016259LGN00_B11.TIF"
    FILE_NAME_BAND_QUALITY = "LC80440342016259LGN00_BQA.TIF"
    METADATA_FILE_NAME = "LC80440342016259LGN00_MTL.txt"
    BPF_NAME_OLI = "LO8BPF20160915183057_20160915200950.01"
    BPF_NAME_TIRS = "LT8BPF20160902084122_20160917074027.02"
    CPF_NAME = "L8CPF20160701_20160930.02"
    RLUT_FILE_NAME = "L8RLUT20150303_20431231v11.h5"
  END_GROUP = PRODUCT_METADATA
  GROUP = IMAGE_ATTRIBUTES
    CLOUD_COVER = 29.56
    CLOUD_COVER_LAND = 3.33
    IMAGE_QUALITY_OLI = 9
    IMAGE_QUALITY_TIRS = 9
    TIRS_SSM_MODEL = "FINAL"
    TIRS_SSM_POSITION_STATUS = "ESTIMATED"
    ROLL_ANGLE = -0.001
    SUN_AZIMUTH = 148.48049396
    SUN_ELEVATION = 50.93768399
    EARTH_SUN_DISTANCE = 1.0053752
    GROUND_CONTROL_POINTS_VERSION = 4
    GROUND_CONTROL_POINTS_MODEL = 548
    GEOMETRIC_RMSE_MODEL = 5.857
    GEOMETRIC_RMSE_MODEL_Y = 3.841
    GEOMETRIC_RMSE_MODEL_X = 4.422
    GROUND_CONTROL_POINTS_VERIFY = 228
    GEOMETRIC_RMSE_VERIFY = 3.382
  END_GROUP = IMAGE_ATTRIBUTES
  GROUP = MIN_MAX_RADIANCE
    RADIANCE_MAXIMUM_BAND_1 = 751.95709
    RADIANCE_MINIMUM_BAND_1 = -62.09686
    RADIANCE_MAXIMUM_BAND_2 = 770.01318
    RADIANCE_MINIMUM_BAND_2 = -63.58794
    RADIANCE_MAXIMUM_BAND_3 = 709.56061
    RADIANCE_MINIMUM_BAND_3 = -58.59575
    RADIANCE_MAXIMUM_BAND_4 = 598.34149
    RADIANCE_MINIMUM_BAND_4 = -49.41123
    RADIANCE_MAXIMUM_BAND_5 = 366.15515
    RADIANCE_MINIMUM_BAND_5 = -30.23721
    RADIANCE_MAXIMUM_BAND_6 = 91.05946
    RADIANCE_MINIMUM_BAND_6 = -7.51972
    RADIANCE_MAXIMUM_BAND_7 = 30.69191
    RADIANCE_MINIMUM_BAND_7 = -2.53455
    RADIANCE_MAXIMUM_BAND_8 = 677.15784
    RADIANCE_MINIMUM_BAND_8 = -55.91992
    RADIANCE_MAXIMUM_BAND_9 = 143.10173
    RADIANCE_MINIMUM_BAND_9 = -11.81739
    RADIANCE_MAXIMUM_BAND_10 = 22.00180
    RADIANCE_MINIMUM_BAND_10 = 0.10033
    RADIANCE_MAXIMUM_BAND_11 = 22.00180
    RADIANCE_MINIMUM_BAND_11 = 0.10033
  END_GROUP = MIN_MAX_RADIANCE
  GROUP = MIN_MAX_REFLECTANCE
    REFLECTANCE_MAXIMUM_BAND_1 = 1.210700
    REFLECTANCE_MINIMUM_BAND_1 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_2 = 1.210700
    REFLECTANCE_MINIMUM_BAND_2 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_3 = 1.210700
    REFLECTANCE_MINIMUM_BAND_3 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_4 = 1.210700
    REFLECTANCE_MINIMUM_BAND_4 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_5 = 1.210700
    REFLECTANCE_MINIMUM_BAND_5 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_6 = 1.210700
    REFLECTANCE_MINIMUM_BAND_6 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_7 = 1.210700
    REFLECTANCE_MINIMUM_BAND_7 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_8 = 1.210700
    REFLECTANCE_MINIMUM_BAND_8 = -0.099980
    REFLECTANCE_MAXIMUM_BAND_9 = 1.210700
    REFLECTANCE_MINIMUM_BAND_9 = -0.099980
  END_GROUP = MIN_MAX_REFLECTANCE
  GROUP = MIN_MAX_PIXEL_VALUE
    QUANTIZE_CAL_MAX_BAND_1 = 65535
    QUANTIZE_CAL_MIN_BAND_1 = 1
    QUANTIZE_CAL_MAX_BAND_2 = 65535
    QUANTIZE_CAL_MIN_BAND_2 = 1
    QUANTIZE_CAL_MAX_BAND_3 = 65535
    QUANTIZE_CAL_MIN_BAND_3 = 1
    QUANTIZE_CAL_MAX_BAND_4 = 65535
    QUANTIZE_CAL_MIN_BAND_4 = 1
    QUANTIZE_CAL_MAX_BAND_5 = 65535
    QUANTIZE_CAL_MIN_BAND_5 = 1
    QUANTIZE_CAL_MAX_BAND_6 = 65535
    QUANTIZE_CAL_MIN_BAND_6 = 1
    QUANTIZE_CAL_MAX_BAND_7 = 65535
    QUANTIZE_CAL_MIN_BAND_7 = 1
    QUANTIZE_CAL_MAX_BAND_8 = 65535
    QUANTIZE_CAL_MIN_BAND_8 = 1
    QUANTIZE_CAL_MAX_BAND_9 = 65535
    QUANTIZE_CAL_MIN_BAND_9 = 1
    QUANTIZE_CAL_MAX_BAND_10 = 65535
    QUANTIZE_CAL_MIN_BAND_10 = 1
    QUANTIZE_CAL_MAX_BAND_11 = 65535
    QUANTIZE_CAL_MIN_BAND_11 = 1
  END_GROUP = MIN_MAX_PIXEL_VALUE
  GROUP = RADIOMETRIC_RESCALING
    RADIANCE_MULT_BAND_1 = 1.2422E-02
    RADIANCE_MULT_BAND_2 = 1.2720E-02
    RADIANCE_MULT_BAND_3 = 1.1721E-02
    RADIANCE_MULT_BAND_4 = 9.8842E-03
    RADIANCE_MULT_BAND_5 = 6.0487E-03
    RADIANCE_MULT_BAND_6 = 1.5042E-03
    RADIANCE_MULT_BAND_7 = 5.0701E-04
    RADIANCE_MULT_BAND_8 = 1.1186E-02
    RADIANCE_MULT_BAND_9 = 2.3640E-03
    RADIANCE_MULT_BAND_10 = 3.3420E-04
    RADIANCE_MULT_BAND_11 = 3.3420E-04
    RADIANCE_ADD_BAND_1 = -62.10928
    RADIANCE_ADD_BAND_2 = -63.60066
    RADIANCE_ADD_BAND_3 = -58.60747
    RADIANCE_ADD_BAND_4 = -49.42112
    RADIANCE_ADD_BAND_5 = -30.24326
    RADIANCE_ADD_BAND_6 = -7.52122
    RADIANCE_ADD_BAND_7 = -2.53505
    RADIANCE_ADD_BAND_8 = -55.93110
    RADIANCE_ADD_BAND_9 = -11.81975
    RADIANCE_ADD_BAND_10 = 0.10000
    RADIANCE_ADD_BAND_11 = 0.10000
    REFLECTANCE_MULT_BAND_1 = 2.0000E-05
    REFLECTANCE_MULT_BAND_2 = 2.0000E-05
    REFLECTANCE_MULT_BAND_3 = 2.0000E-05
    REFLECTANCE_MULT_BAND_4 = 2.0000E-05
    REFLECTANCE_MULT_BAND_5 = 2.0000E-05
    REFLECTANCE_MULT_BAND_6 = 2.0000E-05
    REFLECTANCE_MULT_BAND_7 = 2.0000E-05
    REFLECTANCE_MULT_BAND_8 = 2.0000E-05
    REFLECTANCE_MULT_BAND_9 = 2.0000E-05
    REFLECTANCE_ADD_BAND_1 = -0.100000
    REFLECTANCE_ADD_BAND_2 = -0.100000
    REFLECTANCE_ADD_BAND_3 = -0.100000
    REFLECTANCE_ADD_BAND_4 = -0.100000
    REFLECTANCE_ADD_BAND_5 = -0.100000
    REFLECTANCE_ADD_BAND_6 = -0.100000
    REFLECTANCE_ADD_BAND_7 = -0.100000
    REFLECTANCE_ADD_BAND_8 = -0.100000
    REFLECTANCE_ADD_BAND_9 = -0.100000
  END_GROUP = RADIOMETRIC_RESCALING
  GROUP = TIRS_THERMAL_CONSTANTS
    K1_CONSTANT_BAND_10 = 774.8853
    K1_CONSTANT_BAND_11 = 480.8883
    K2_CONSTANT_BAND_10 = 1321.0789
    K2_CONSTANT_BAND_11 = 1201.1442
  END_GROUP = TIRS_THERMAL_CONSTANTS
  GROUP = PROJECTION_PARAMETERS
    MAP_PROJECTION = "UTM"
    DATUM = "WGS84"
    ELLIPSOID = "WGS84"
    UTM_ZONE = 10
    GRID_CELL_SIZE_PANCHROMATIC = 15.00
    GRID_CELL_SIZE_REFLECTIVE = 30.00
    GRID_CELL_SIZE_THERMAL = 30.00
    ORIENTATION = "NORTH_UP"
    RESAMPLING_OPTION = "CUBIC_CONVOLUTION"
  END_GROUP = PROJECTION_PARAMETERS
END_GROUP = L1_METADATA_FILE
END
" + } + }, + { + "ID": "320dc27adc377057", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/gcp-public-data-landsat/LC08/PRE/044/034/LC80440342016259LGN00/LC80440342016259LGN00_MTL.txt", + "Header": { + "Range": [ + "bytes=0-17" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "18" + ], + "Content-Range": [ + "bytes 0-17/7903" + ], + "Content-Type": [ + "application/octet-stream" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"7a5fd4743bd647485f88496fadb05c51\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 04 Oct 2016 16:42:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1475599327662000" + ], + "X-Goog-Hash": [ + "crc32c=PWBt8g==", + "md5=el/UdDvWR0hfiElvrbBcUQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "7903" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UovperPufphUvaf55r54Wd-USAbL2ZQVTseICyulStqI633iJcFBLryyqecsHQcoU2cXp4MsKgB8uQu979IXcnv-aGNm6viDydIrqmPqA7SmPElPGI" + ] + }, + "Body": "R1JPVVAgPSBMMV9NRVRBREFU" + } + }, + { + "ID": "a20a4e3b35dfd271", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq7491k1eCc7fe_JApsP1zDcSyo759KmHvh-9YHm9ekpOGaG8v1bZNPjaMEkikJSDYt_LkVMHrb9HTDx9vvDGy3Zm1kPrlxS4933Sw-Wdh35lDomi4" + ] + }, + "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" + } + }, + { + "ID": "d1941f2e08f3bc52", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoYIJ-m0VbRdjw7ZGI_0TiIfj6fcuhJkomWPdQGApFxH2_LnekHIdv7igEpJAM3a-zrTOzR20bzvc-JBunTe_-f_Hsyxz_VPxJNrQY7PSrQ3OMfEhQ" + ] + }, + "Body": "H4sIAAAAAAAAC8tIzcnJVyjPL8pJAQCFEUoNCwAAAA==" + } + }, + { + "ID": "2451a87df39e1241", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Range": [ + "bytes=1-8" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "W/\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "Warning": [ + "214 UploadServer gunzipped" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Response-Body-Transformations": [ + "gunzipped" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNfvi75DBdUL5KpenQbqbsYr5A8YSQp2lGAPIhe4FlijJP95WctTUrdrLIyq3riprP50HzntCuw7zh5ycZfqbJBkFbibeIwEp5bVDj7yVpJbqctiI" + ] + }, + "Body": "aGVsbG8gd29ybGQ=" + } + }, + { + "ID": "f0e47a86731e8924", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/gzipped-text.txt", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Range": [ + "bytes=1-8" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 206, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Encoding": [ + "gzip" + ], + "Content-Range": [ + "bytes 1-8/31" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:25:27 GMT" + ], + "Etag": [ + "\"c6117833aa4d1510d09ef69144d56790\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:25:27 GMT" + ], + "Last-Modified": [ + "Tue, 14 Nov 2017 13:07:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Accept-Encoding" + ], + "X-Goog-Generation": [ + "1510664852486988" + ], + "X-Goog-Hash": [ + "crc32c=T1s5RQ==", + "md5=xhF4M6pNFRDQnvaRRNVnkA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "gzip" + ], + "X-Goog-Stored-Content-Length": [ + "31" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upzmwe6UNntf6SxdYLZTz0aZiFJ7UANU6y7I2YKJbADGSVoCRe63OoxcC4uh-9n2JEnkvQgq0dwCHCbQ3qmYG4cWFEovG_fIMKFx6O11iPQUhLmeFw" + ] + }, + "Body": "iwgAAAAAAAA=" + } + }, + { + "ID": "2a14c414736e344d", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:28 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgiftU7BDllYqI1jhKj6RbZME3LoakRb653ThaSSD_kzX1O5odabBcDFfG-oswR6YOcDZTW174qxbUaa2G1EvajUt_wJAD3ViGgoMwT_YjoTvObkc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC4wMDlaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "b03cb69547c1a6de", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "99" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiR0VUIl0sIm9yaWdpbiI6WyIqIl0sInJlc3BvbnNlSGVhZGVyIjpbInNvbWUtaGVhZGVyIl19XX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2528" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:28 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urw36CL-_RvF58GoVX1bJlTZ1YUUdWXJKQDIac7eTOYj5s65KYFsk2ndonwA5ro_9nGPfo4qRIYyt7J1Xxb01TiJUQPRXzH-z9T_S5UQE6fxOssN0g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "735da673f622341d", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2528" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:29 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urb-FeCpjdYOEj6HdkFXRMfVXffmMB8xjLqLwPvjgs3FghXs7r94UrXEpX0rW_CgEpjG6v6iHCWCkwxib31kFkLPMZMMZxHTy66dPT2AdBDsIE8Y8k" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI4LjAwOVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOC43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyIqIl0sIm1ldGhvZCI6WyJHRVQiXSwicmVzcG9uc2VIZWFkZXIiOlsic29tZS1oZWFkZXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "97ce23a7211781ed", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:30 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrqrjU-mu7dGgY6-CuNd9AwgGuKtQ_O589el9eyWSAb54cDYnQN74dl8HxRVzIUayStgIinEuuux2afAlvkUGm9lhrefX8nugBXNL2lGluNcfQKfRo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyOS43MjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "a8f5b1c42d7a3c5f", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "12" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbXX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:30 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ4q-BeojZ4WgF5ZHNzJG7sIvdOxmprH5GqoQ3DM-QllSzHWOwAfhVdJZH7WmVK5Dfs1TQH-pKgneNLb_9uYM-rDFb_5Hh9vs89o4pOPT9ob9ezTE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "cab9472f01ed4bd5", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo0__TFFuzUsEi6Eck0G1lblGy_1abmyPHERWBavjN2GHncyWWnKQ6yYaFDpDVQ-kfBS15M6H9NoxXUDS0hZ9VX8S9hK4rUGAAFWXznHjOntSK5ldY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjI5LjcyNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMC42MjhaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "7a196e6b35872d48", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "168" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJjb3JzIjpbeyJtYXhBZ2VTZWNvbmRzIjozNjAwLCJtZXRob2QiOlsiUE9TVCJdLCJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXX1dLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "593" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpJhdZl7X4YXxzKdcr7SJ3C6Xadtlo0ixoS8DNO1jaDHs5MeGKu9nx2pHWPpnUWd13HZ8inxG0pMh3un84TucDfyLu4Z-5Gp7Ka3HBfPnt_w_VTiuE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJjb3JzIjpbeyJvcmlnaW4iOlsic29tZS1vcmlnaW4uY29tIl0sIm1ldGhvZCI6WyJQT1NUIl0sInJlc3BvbnNlSGVhZGVyIjpbImZvby1iYXIiXSwibWF4QWdlU2Vjb25kcyI6MzYwMH1dLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "24b6a3b712ccab17", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:31 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqe_Q9NPVf3WTO_It8-A42wh0FD3KyF4kopi6posCEX6l28cnowG8O7WSYwETklwvpp0Uk1fCXiVmgwqLlWCSc3ez-nYllKp5m_UVtxup1E1UNadGg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "ebed90fad884e1cd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2539" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrjLjhrtR5T2TdsRM6X3HPy_6vnZjhUfpjytCivwknDLKhiMkND-f1qeZ4te6AnGrv_qtnGI4OGajQJaydHJvQZGWpnBQN9VOXHqYjSBTCxnNbcbC4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMxLjQwN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMS40MDdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImNvcnMiOlt7Im9yaWdpbiI6WyJzb21lLW9yaWdpbi5jb20iXSwibWV0aG9kIjpbIlBPU1QiXSwicmVzcG9uc2VIZWFkZXIiOlsiZm9vLWJhciJdLCJtYXhBZ2VTZWNvbmRzIjozNjAwfV0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "31ee7f558375c66a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0006?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up70C3kZ6tTqgyRHbTyjXA5pLWMohouKPHCPhexU8UuWw0cAmLxjHcwaa2xBT-Soq-CLyVxoWeEkxFMhRqqK87xaM38Q2hSjGkeXEhmhoZ3iy5s6D8" + ] + }, + "Body": "" + } + }, + { + "ID": "72458186a1839a5a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0005?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:32 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsiYUGF50kaeS-bMsAR51mlDVCwH9HwmqU1hCFSwzrMDgCmGF46ZuQGfqWx4SJprlBWIxaNwT_EQMKQrTQARvFyee5ZOJq21xZUsVEc1iBW0KkiVk" + ] + }, + "Body": "" + } + }, + { + "ID": "20fcdf5cb9502a64", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0004?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:33 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up8ALydoTkq1rLUaKiwrh6YEhczTwI3VX8iKuCNoqr6N2BSBvtxD2Qc9De99Svk_4EX82rynlWctsS2F9Ffr4WuDby30_W1Fl7TMWFZGWfo5Wc_nfI" + ] + }, + "Body": "" + } + }, + { + "ID": "ace99589753f3389", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:33 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotVHCMZG4hcbTx20gUPBV1LsUxIAVuDym9d21c2In0VVLzBxOM_DAULuLEd9LTMmpNg6A7yW4yqEsnRlHhAaMbpe7MwAWbadF2gJJ2M7uGdA-rcLQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "466aeae01d2d2933", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVuGt1j5ranQuT0nS5bzOIcVM5M42RwIHcF2mKK_H1ctJEqC_djWDMm00OBFXY4yxsTFdQ1ZYjWVnFq8GhUAWUsvsj4_B9Ur8MzJQcbAzG9vHgIjs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozMy43MDVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "34d8505e483be050", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "31" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZWZhdWx0RXZlbnRCYXNlZEhvbGQiOnRydWV9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2460" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo5A5V7O5JcpElASqEqUZ_UiEjPa-39PCu3SwJk8EE9cHfyeVL5DWQkAss8k9iRx7YjD4ZC7WnzE_ENz95PuKdccjgzeC2GLFAUzA-sR2iEf-lNil4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "728568d994e275c7", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2460" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrOVH8W8OREkTrF0UKlK3dOp_AAbRF7Rsx16Zbjdl5caBObFjXETVORm7o-d-YYcPIjcrx1mKcQwxHCIXpdE5P6baH6WeY3Wc3JVF7UcQnKna-a_U4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNC42MzJaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b7230d8a6b23fd8b", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "35" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uouwpmid1Y6D0uXWBb_WrmlZVHKYnw0RGbreddchrmPzQUE1H_DeclZnbl6Yb2346L4YNt44ZeZWEM2u46h7Zx45lw9rUFxZDQIvlXRn4vT6fW4WQQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" + } + }, + { + "ID": "ec9da9e78b750503", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2493" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq25oUSnJMHsIEARHohSGtkz5vw7RCXuQupRBliEjCXMeivgUIK0y9C3U4yWEJ-_182SvGUQLvo5rXxymcoyahwAEhfe21FJU_M2IijdiWsSv83eow" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjMzLjcwNVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNS4zMzNaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsImRlZmF1bHRFdmVudEJhc2VkSG9sZCI6dHJ1ZSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJiaWxsaW5nIjp7InJlcXVlc3RlclBheXMiOnRydWV9LCJldGFnIjoiQ0FNPSJ9" + } + }, + { + "ID": "ad49d37f07d631a5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0007?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq02fbr_iko_zBCbJljbMwyW7mOaz1cqR6AWHm2ac3m9BcqrUXNkmRqxk0R2rZW6SAUC_KzbOze-1wUGY2E0g6wbYLUqcb8IOdqN2-ROvYO49uxzjE" + ] + }, + "Body": "" + } + }, + { + "ID": "7b04c14c4e323488", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:36 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrJrrmhbKyKRTvA3_mNqzRsaaMOAAlvn8UMwBiOR77-6X6m5InqoXMb4qoollPzgpcFOmchQ54jFNic0xJf-tiGu0tlrzSdbjh5Hzw8Rl3RnJy_jPw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM2LjUwMVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNi41MDFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "ae3e9c648aaff158", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJuYW1lIjoic29tZS1vYmoifQo=", + "X7Xb+/Xtxt2fLTn7y+yBvw==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrJv7oOcHCzf9atnIPo70j3xSldrZtXTb5CmV1iuM4mMPU4hiBgpDqCAdUDI3i9hlR2qRie9qT30AJNshzO5CSsuzoTQ88W255VO8VVumaCSZ0KXZs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "e7a12fdbeb636933", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo-mQ6Bnw1qbAyHbMdumiv5YlTBrFQbHo-9tXn02VufGPTp3_Q8nXXp9VIcdRmzH2CJbhaVLu5fUFYZ6VKgcitBwcE-dCwxWrzaljgyT6zmM7x01ls" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "142eb6c42bc6f90e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "84" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:37 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrBTG1gAXfMDmjm_clsDlv04E6VRtZUsUKnaVr4bC3AIRXILH2f0UebFf1_SmXPSf6Go8sw7TXti9yO112PAG5QNin3LwaSjRuFDSH_unnMZJwRceM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" + } + }, + { + "ID": "12487e4f20538041", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3215" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqxyG_OCKjwh7TR9zL9nbOGPP1ttxni9cP68b18nTZ7wZ8WqVE0MAGh1XFphNYZEYr-2kdgNgmqQrOvFiNbg6hJi-RfjCWYg0KHPLKfzyWc-gejszc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozNy4xMzdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuODI1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiZExyZG9WZ2p4bEFBR05hWVgxQm1idz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTkzNzEzNzQzMCZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvc29tZS1vYmovMTU1NjgzNTkzNzEzNzQzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTM3MTM3NDMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDSmJtaHVqeC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6Ik01ZUcvQT09IiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBST0iLCJldmVudEJhc2VkSG9sZCI6dHJ1ZX0=" + } + }, + { + "ID": "e3a625bb7418cea9", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "82" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVf0XeyD5sC9KvHEILX2e7x7VyJ9d9xoyrP3NqGFlsMgvK3zgeOEOXDvSpftUSy4opndA74-LNRkmAi8Q_6wc91iC8OWV7X3VR8kENWwclNK30V_U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "2290ac6bb34b705b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:38 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoKagfJuJDLVLgIcrGtlV_UXlXLSqfEFj2-knllMBWWLktTv22ZfKoeSo8lY6gSfE1LKlvn87WiyJv75QWpcWdXzvablAMBOAZ_XMe-1D8dKZ5ipHs" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC40MjBaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFNPSIsImV2ZW50QmFzZWRIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "572d50dc9b5ce8a4", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "85" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJldmVudEJhc2VkSG9sZCI6ZmFsc2V9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3194" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Etag": [ + "CJbmhujx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrWVb2L8OGfosYWTjk_F-zmyNwltYI93f2tqyP4EkOx7hIRpsSG2iGDFXB7SP7DKn-teczhb9tiBKRM7rYG5vcJ5jVinideFoX44khld5BhtRaQsUc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjM3LjEzN1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTozOC45MTlaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6MzcuMTM3WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJkTHJkb1ZnanhsQUFHTmFZWDFCbWJ3PT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTM3MTM3NDMwJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9zb21lLW9iai8xNTU2ODM1OTM3MTM3NDMwL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDgvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5MzcxMzc0MzAiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4L3NvbWUtb2JqLzE1NTY4MzU5MzcxMzc0MzAvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA4Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTkzNzEzNzQzMCIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNKYm1odWp4L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiTTVlRy9BPT0iLCJldGFnIjoiQ0pibWh1angvZUVDRUFRPSIsImV2ZW50QmFzZWRIb2xkIjpmYWxzZX0=" + } + }, + { + "ID": "bda340f0117c1fb4", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotG32AgmOwDD_bLLd_W64JCOZQ2-UgeY_cOZwomlkWb6Vx5JSE54tdsuCPvawuyJ8eNuCf8EM0qXlIjtAyg6QQiuN9t72vHrQLsrSs6WtJBDGZziU" + ] + }, + "Body": "" + } + }, + { + "ID": "9a0b7379af024a3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0008?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:39 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur7S22mcdvPtDq_Z9iUbo4v0ApOQXNFgXFNZuacd8yZUq2FhZKco01Z2T4vJSv8s1yZkGXzoUrAuUKnCBx0vZuE-MDGnmTF_YxhQoXobyvAyktswdE" + ] + }, + "Body": "" + } + }, + { + "ID": "affb380cf86f173c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5In0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:40 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrF4q2mbuFoVeV9q22libc-FjfB1eDXSMYhNGZNU6sbaMWEKdc-8eXu1gtMLC7Ia9bSRU0oxgpIRR9516KZRcg7o_VGdrU1lliVPuSY9rYa4OsJymE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjA4MVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC4wODFaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "88123486ecd710ed", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJuYW1lIjoic29tZS1vYmoifQo=", + "cGCusp668ZMwY59j94Srfg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrixhGvSHn6UzWmshHjh8Ny2NE4LW-gcPPW4dCm9dZTDfBwtENYMJra88jZlphQFcyabnQwTegZW9_6bL8KYNOsyYQYgdtEDvFFMQeJ9B_IXQbrsgI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "042e51e8b073dc93", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqTH44ERv_v2A5OmhnLVBFOkQ_bPdeDI-MNir5xjrZt1ybrczlk9GwseHZ-kt566XrMnuYGTaCkr4ImF__dpbt98CqMpmYkiJaR1vKcI1DC7o3Dbl8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ed898d656784527a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "83" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urh-jh9w3q8AC6TmG1NF_6JgV-Js74mV6EkB4ccrlZ1ZCQ9TlA4AhJ-J6rkqtJ9fKiy4YjT_l-TN6dQhnCwZvTUqoaJC-W-z6QQZywuQ1uX9a90PhU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "9ddea19a0c44c449", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3214" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:41 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoZVnUhVRVPu1unMjxNHQzZrlqTsVtPMZHc_YPTkvy18AAD3d_TOg9qTNAEv5NXUC5n0XF1sPedifaWR19a5w7dCMltRYfuJUfGxaMzyrvpqKbGjx4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0MC42MzhaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDEuNTE5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoiUjhVSExIMG84ZjFlTVhVRHRyMTZLZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqP2dlbmVyYXRpb249MTU1NjgzNTk0MDYzODY3MiZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvc29tZS1vYmovMTU1NjgzNTk0MDYzODY3Mi9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJvYmplY3QiOiJzb21lLW9iaiIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTQwNjM4NjcyIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTkMvM09ueC9lRUNFQUk9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IjB6R3NGUT09IiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBST0iLCJ0ZW1wb3JhcnlIb2xkIjp0cnVlfQ==" + } + }, + { + "ID": "9b6c4955472bdb51", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "82" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJjb250ZW50VHlwZSI6ImZvbyJ9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3192" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoybPBMseeVzkBPlzTObhT-eKnJ9TbsC15btCmXKMDvqgD5Uz4JclrgL9lBqMq09UjP8GL2_3zCDkfA0gJ8SHCDZbHEi1XYcuAUT7hmXazBL3IIMqc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" + } + }, + { + "ID": "0227238a512229dc", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3192" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up6PS1r6NaUKQ9158DD3cKWHqyParXLdcW8k7sToMskeTU6BjpajWENp3qNHqLMFvX9NrK78TIw31h_ANYkltj3vAQoYq2RG5C6ZB2LCvUIch96SMo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiMyIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi4xMjNaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBTT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFNPSIsInRlbXBvcmFyeUhvbGQiOnRydWV9" + } + }, + { + "ID": "7a81946424736215", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "84" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJ0ZW1wb3JhcnlIb2xkIjpmYWxzZX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3193" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:42 GMT" + ], + "Etag": [ + "CNC/3Onx/eECEAQ=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo1klBHpEl6qb-hfnVYr2KJLmo_3_1fgqlMYMBpBt23b-U9pF7bPcNEiJaMtrGEhZPQb9-QnqHiU8qPfmpNpSdy9KR41FwZm-89gyu3EtE5gq3J9-0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsIm1ldGFnZW5lcmF0aW9uIjoiNCIsImNvbnRlbnRUeXBlIjoiZm9vIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQwLjYzOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Mi43MjFaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDAuNjM4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJSOFVITEgwbzhmMWVNWFVEdHIxNktnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQwNjM4NjcyJmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9zb21lLW9iai8xNTU2ODM1OTQwNjM4NjcyL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDkvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOSIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDA2Mzg2NzIiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5L3NvbWUtb2JqLzE1NTY4MzU5NDA2Mzg2NzIvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwOS9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDA5Iiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0MDYzODY3MiIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNOQy8zT254L2VFQ0VBUT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiMHpHc0ZRPT0iLCJldGFnIjoiQ05DLzNPbngvZUVDRUFRPSIsInRlbXBvcmFyeUhvbGQiOmZhbHNlfQ==" + } + }, + { + "ID": "7865e37b40b9ec66", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:43 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2nfd81k4cBaY8ZFRGIu8G6DHyGUVAdIJCB65E8ZqqO9ADJIV4K22sQaOA8fqGL3jyi3tIszjBVsXHOFrCgca1vLXSpA1-3s2cn-MVhTKpzZ5tzY4" + ] + }, + "Body": "" + } + }, + { + "ID": "fd9104ab049c0173", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0009?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:43 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqMqIYvjYuUxhnSLyoY-p5BYUiuZIX7cs418asOvABObfrQc8eXZpo-V4LSZVWWgt4CNoviMvFwcdC9sCTKCZU1H-9Ifcuf8lptTMZpiWMJjZfD-DY" + ] + }, + "Body": "" + } + }, + { + "ID": "e735515a80a67931", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "105" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjM2MDAifX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "573" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqxx-CzGd9qn-QCqeE_iYOkeO93A0IpWx1voocTxhTrdkkBJonYBIWXeaEq3g-LNThw7j85IKVotWyBNZlHzqYSgBchb0DeXXetMqS8WBkY603Sa60" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0My44MDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "94bfaea89e1f47c2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJuYW1lIjoic29tZS1vYmoifQo=", + "29UJUEl2/QvM9FnDBFnPRA==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3245" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CPXIv+vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqLSIzBtq8Qn_3f51_hlfoIkcpamafGwB3U8er3K_mzANwxXyscBHWZhaxb2-3M2wOT6GZER6-zLAtdeeSNtSlUMHkHX77gocrZGYj103HA-AGjVSM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" + } + }, + { + "ID": "7db6d4640ce21307", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3245" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:44 GMT" + ], + "Etag": [ + "CPXIv+vx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoUK3qzYL0xUcMKfWD6mmDZqRfnd8q7O14gBrKng7OYalNXZR3ZegpD1VyqjzW6bgQqFvLrtFjWXwPoJ4E78_nThYByR2n3Hm33ms2fvCdaRSOy1Qc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmoiLCJuYW1lIjoic29tZS1vYmoiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoiYXBwbGljYXRpb24vb2N0ZXQtc3RyZWFtIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ0LjM1OFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NC4zNThaIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJ0aW1lU3RvcmFnZUNsYXNzVXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NDQuMzU4WiIsInNpemUiOiIxNiIsIm1kNUhhc2giOiJQYzFTWFEyUUk3MGxNNlZIN1F0NUFnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmo/Z2VuZXJhdGlvbj0xNTU2ODM1OTQ0MzU5MDI5JmFsdD1tZWRpYSIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL28vc29tZS1vYmovYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9zb21lLW9iai8xNTU2ODM1OTQ0MzU5MDI5L3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvby9zb21lLW9iai9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsIm9iamVjdCI6InNvbWUtb2JqIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NDQzNTkwMjkiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3NvbWUtb2JqLzE1NTY4MzU5NDQzNTkwMjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9vL3NvbWUtb2JqL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwib2JqZWN0Ijoic29tZS1vYmoiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk0NDM1OTAyOSIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNQWEl2K3Z4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoidUpwZFdRPT0iLCJldGFnIjoiQ1BYSXYrdngvZUVDRUFFPSIsInJldGVudGlvbkV4cGlyYXRpb25UaW1lIjoiMjAxOS0wNS0wMlQyMzoyNTo0NC4zNThaIn0=" + } + }, + { + "ID": "cec800ec28205dab", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAm9jtf7nMSv2Bbn_pFGaLCmlq2CtqUdGeEOyYrkllqiADBk9_xsJS3BufmzfGIdARRHN7jVwbeHKev2CHMyzWoGA8UxNGNIldPCC4oZ1dMPARKHk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQzLjgwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0NS4yMjZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTAvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "c787f6d5b23123a3", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010/o/some-obj?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrgP5ghUeBxvXks4XlillwzLwfwyRGxt5y-FImGHSwZ-Meht6M0-keE0YTUG2Qy5hH5i1gpWPyGHXweFDyrk8a3QbDPnG5shDEoxzNOlMEsX6V8-4I" + ] + }, + "Body": "" + } + }, + { + "ID": "144726e64e401a23", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0010?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:45 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxuxvNJ3APW11H4y-qweAzn3ChIZ0eRjSetoSHkGwVvO7nMR17h5Hbj8I8zgOGjR7HPA201lDCw47ejD6kj1wCz3ZTIrUz-r-qjdm0vGGszQYFidQ" + ] + }, + "Body": "" + } + }, + { + "ID": "eebfbd37f51a5cc6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:46 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uox0foiLUu9KuEF-KVU8lIN_yUKAJsYEuwVqgSXvzFRGmzJtSZvFeNvMDS8W0aQ7c8YoI89qS_MxU8zBSBqcljYpIZe1NPIvkEFEG5K22RJkAuwmh8" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni4xNzNaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "c7ddc04b5d29113a", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:46 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpD4zsQLTflaR7g-z0FszOHvw5oXvxqdiyjlAnMwMcBcUEjidrpIGgpvmrz1EWi2AyWkdDCSY0l8ItQxYnWwSZt24jOxXBlzq-gCNrCCE6j0fJVGKw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "e08a38dca4dda51f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrsphQdkHT0ctCFLyPV1JRkq7cyQ7w1tvYmAWv-6tYsuThaMN-vkUJoTNz2gPKGy6jV0yqdbXaAlRlD_5n6GiQa-ULOyKLi-C0q8y25lfk7L6zROE4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ2LjE3M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ni44NjNaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDExL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTEvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDYuMTczWiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "fe586a68284be237", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:47 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpAjkPxULicnoY5wPnfrg-D7yzTbTzTZcUShEN8JSsqVMN758lI4DlkvFibNK9URhYVBH0xclTheAOO_CEDoVb5egvcF-WJqJ3SDisU2YPaZsohRro" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0Ny41NjVaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "732c3421adfb99d8", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UohfaTh8JFEjT7TTMwV7NkrcAsgkdpDyovvbjHJLNbXalzOWX3_HwhowBimaqZXhkuvBx1D38zsUZ5dEzigoyoXRJtgHfy7ClUrshBL_yMfPkZ4JE4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "aebe46d0a31a6e95", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2519" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:48 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uop7c4AxllIcwFmLfqhIIaGP3Q9uHfmQoAm9LttwjQIvzHiJvu4fg_Q3UfNP8YbDuP97jLsG78JbCK68eSQv8ZHPp8kpzfdJcvOER0cs75wQ9gC7NA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ3LjU2NVoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OC4yNzBaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiIzNjAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjU6NDcuNTY1WiJ9LCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "bea48b3667650bdd", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo1p1nEuvoUAqYHYa8wuU6qF64gN2JPCxLqqFEUiKXLgo9rwPH_btm63155v5hbuS6sow8cYEvsXVJthsJ1Zn8qfb05USodp1EvTTrt_m1rpJnIxdo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS4wMTRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "a951cae4048b1267", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZAHhwxqSSY5MYFK1fdLLirRH41rc7W2YWSnn4CQZkbdR_98fRYjddtqZVfpEizqsZ1BNa5A7ENxQz7BwQSamASc8XAC5gyZp_pwi-Yu8l0XLSCpU" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "e7f0506408261db3", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:49 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UotOOFJlGACBcheGxpS8qxLlGsDT0xOhT_Cg1M_6DxUI16IiOu-YHaPvwzxilOKOrTvL8kRzcVUiNuFeS6rQExTmdK70I1IOH2d2BLOrMm1L5Int1g" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjQ5LjAxNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo0OS42MjdaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDEzL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTMvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxMyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "b2c4589e9b9b5ad2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:50 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoFdLKiYfqS-J3PXjvHGxpXtJwVnUB7Li8XrR-DTYkmyed-ZlUmUcmZFgGyIayuACtUWVYBcRbhYOKvgkfl8srE0OdJbc15OHWBttYqenraJ0MdXj8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "836baafb4e98da80", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:51 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpNuySlLo4SanUdFJK3SGoCidNgWEW4mUuFDGMLLSzY-UUA3TJtN5_FLoLq7et7d1YSUDlOoGZY4a_GdewGQtsUwGdbHLz-bopObpzPy160Wm3UMpg" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "281ffb73e66efd1f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:52 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo8PK9JysFiApLjDSbbo9GloKqKjLO8vr85RWGkeC_Q0iEQf7zu2mwQVeiuJ81pfdJCUja0HrhNG13sczI2XoflqlsWNFvdxHGjCBrvDObr-s6Y32U" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "515ecb8616d48a7f", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:55 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqF93J_Bpwg-_yefVpKWteSyWBSx9FqNm8MP5eZKqVfvDWpSF6EPZsISss7hNOkKMM5OuV8stlJIW2UAkzsSgkYOHx5fCwOwylbtqAyb8OlYJCHz6Q" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS4yMDRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "771e55d617425f26", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:55 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up6Slob_akPo2UWNmzCe-LD4cb-9DVTq_FPlNxA7mKVF6fBFks6gFnZ83BSajEU2qD8g14Y-pX20kbfoKf8XGR9pIW2xXH_oHCxsjspHZ2ARGePTMA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "9526aab2be8b214c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UopNhdzHbHv4sXyk0r1owz7bkkPQwYIGbb3gRingbq7vgESKltWLIr9PDDIUNREfumEvZDfpxMineWYNzZ0qdOaI1QNYqXjnuxO6sCUDjBYEhr76uk" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU1LjIwNFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1NS44MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE0L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTQvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNC9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "0aabff8aa1cc1e9c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "103" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjYwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpTj6zYv1NMHDI_ktA1d-p7HKq4YnJMxxI9nXubPfCMyIspOgya8HYibmvOMFwLtCPagnzRHe0HbmTCSqJpYMay0xYZstGSE_IUWRqI9lUhzq0Iqe4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiNjAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "05147b5b5fdd7882", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2517" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:56 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoshhz3RuOTlNgZ3gOLis_dSFBsqr7khyJCdWUD8BGk0_j-AHxA5etttVHuHTrUcDksF4LdtfJLhlVrAJe9PrGiYZ-Wy57j6tb7L6vc0qHoTvolIRg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "7de8d497e7e1a76c", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2517" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up3mjbvLxwRDDhjg8GTGBNcJM7EraDydPsmD77oQcyYYGVlQnUqg0NQbezf4KQOoOHoEnC9jMR052e0olW3qi9KdAi4-qJEiKGI1rGivb5hnJnmU5o" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1Ni4zNzRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE1L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI2MCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI1OjU2LjM3NFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "3876040740117531", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0015?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqGdmh-XzrU5d5VwRugwWr452hENxpWf1sEtNCDaGksLJ1taCgnEjL_0oAlELqirqyXLlw-frE8LYo1V37Pk1G3qvWGk9E1iSZV7uECOjsdE3JagK8" + ] + }, + "Body": "" + } + }, + { + "ID": "39fb456e528c7081", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0014?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:57 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqKcAQsTdqHfiXj71c5kivJTwCNhY6xUQpZtaxywsmr1kNG5_MdJBsOvuLVbDSHKRIsG9tDxk_9Gz-7zaqP_pqke1aqKOFg9NeCU4ZqfjD0SBy0Q5Q" + ] + }, + "Body": "" + } + }, + { + "ID": "3fdacef865a6fc39", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0013?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:58 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur0PIkQTUvOl8-6tC08mwiS3nBPydo1OYUeD6Q46ZrU6VkmpRwxc9MikGqJlsejuGZkyY_aaxUVKYGREjOF363tfkbII3of6pGFuQ8rINOf8uFV5ts" + ] + }, + "Body": "" + } + }, + { + "ID": "2da48fa864dd9efe", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0012?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:58 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoWkbW_2n8KvRkdk7Lh_5eOVBjW6JvoW0i9LNQUHM-Df2H1QIbUYIwFnUGLt-bPfFa46Ur-v8Q76mU24KpjW7WIC_HVq4Q5H96pRYk7I6ggUsckTs0" + ] + }, + "Body": "" + } + }, + { + "ID": "3ef26f9548e0aec4", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0011?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:25:59 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqZfOfnhYy6nk2TxHz3h1eMu_0KlczCaOb-xcAIBRoRgxk7Q67BqsT5fvb-vAky2phYzQWNHLTk-0T8V2tpPY4Hg23Ub9evuo6_YEndEVdiBRhTcFI" + ] + }, + "Body": "" + } + }, + { + "ID": "8f3abbeca307cc5a", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:25:59 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqdSQK1TuFGAgs143LV6RLNjzfrsAl5vt-Jb_RQ4dcVGDzK8I29sKKrPmcY0rLIJuVIQar3mdRGhNLhqXp2xCKli28TFq4rtsIgiA4DxCtdVbajIhY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS4zNjRaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "bb92a1c4ecd9e884", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", + "aGVsbG8gd29ybGQ=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3315" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Etag": [ + "COup9/Lx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrRoCa6t1nTWVUrdfIXfyxZBI6PKBkzO24svoJRDEYMEsduIrzBpdDiX8mIUWSxcL8-pSRZgcDvy9_iHKpoVbiUNt4QhlG7zosGpa3WOk--YlBxjeM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk1OTk1MjYxOSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNTo1OS45NTJaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjU6NTkuOTUyWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5Ljk1MloiLCJzaXplIjoiMTEiLCJtZDVIYXNoIjoiWHJZN3UrQWU3dENUeXlLN2oxck53dz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk1OTk1MjYxOSZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvc29tZS1vYmplY3QvMTU1NjgzNTk1OTk1MjYxOS9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTU5OTUyNjE5IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9zb21lLW9iamVjdC8xNTU2ODM1OTU5OTUyNjE5L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NTk5NTI2MTkiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDT3VwOS9MeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6InlaUmxxZz09IiwiZXRhZyI6IkNPdXA5L0x4L2VFQ0VBRT0iLCJyZXRlbnRpb25FeHBpcmF0aW9uVGltZSI6IjIwMTktMDUtMDNUMjM6MjU6NTkuOTUyWiJ9" + } + }, + { + "ID": "8f2ac73db961f1d6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "13884" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uok63dRFSkGv0Fum7yPhHf_AtYTWceX5_zkck648-jhWkyym1ezg2Og-LqKJ6nnZCW_gPZuU01OkZErKAQbL7mvQroCq8nEtNTEAosMxk9fJRKpYlc" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, unnamedArguments=[]}, location=null, message=Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00, reason=forbidden, rpcCode=403} Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::RETENTION_POLICY_NOT_MET: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:88)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.DeleteObject.handleRequestReceived(DeleteObject.java:25)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.delete(ObjectsDelegator.java:110)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Object 'go-integration-test-20190502-80633403432013-0016/some-object' is subject to bucket's retention policy and cannot be deleted, overwritten or archived until 2019-05-03T16:25:59.952208428-07:00"}}" + } + }, + { + "ID": "5a7a0988789dfb17", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "25" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOm51bGx9Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:00 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrZIBPbBTYIUYYaBFrcXzNnp7uuC8DoPry45bX57PyfL7aM5a83NLxYnvWsXD62w3LrRVksNhJbrLnSL5e8ZS6b0Cto8ufh-V00gNrGI-CaTc4achc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI1OjU5LjM2NFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMC43NTZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTYvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "8410bda61aa0b6fd", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:01 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2MOVFr2ZwvkwYm32qtqndRBhJnBc28nVBEBowKjIIxpOxiYcSef4wtz9sLTJ23PbXr1F8L_TfGwyShv5j1eTsQ-91OXU8Q8JEZtui9VVyK96j2D4" + ] + }, + "Body": "" + } + }, + { + "ID": "7b024f33e16d850f", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0016?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:01 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpDkcsqcqPLSySgf13o9jWNafbKdVjSRqGNu3ZedvF7TlL9TBswdMMpcl0zQOHtvXWIrhrcZODr7RjRhHuvS8gJdMbnYeQDHrBCbiH6x9WrnaZ5uIQ" + ] + }, + "Body": "" + } + }, + { + "ID": "33b0efc1b91cfb42", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpItvsNld0b1FGsjQgaVCDxW22UEra8vRGfkWtbDS7R2kMbHRPr0I6iPjzaQBOaa9-xUJxGO-JV6RhMlWwH2NY-FXY2-0lrkZiAzIL-tdFwkCwlF-Q" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "632b9a0387d9cff1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2520" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:02 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpEVtrO8ed8TEHdpzyw857ZykloGKTT56atylmGnu682Y0VxI_T6SWN0JLFupJ_2Cu80ntiZttFSCBqwqFGfxVrRqsNnf0KBDyzqOH8eI-BaWPBXvE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMS45MDhaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "a32a5924f876577b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=1\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "0" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "639" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpsYL8RxAKClLw1KRdSSIORQHYKKf_HMml2vtwDOFrh0kTYDzhGEgX2RJAPQKOP2-70q7ZZ2VfkkGz12tmTiRgSuwB2dM3xxZxZucASROVeoit9Shg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIiwiZWZmZWN0aXZlVGltZSI6IjIwMTktMDUtMDJUMjI6MjY6MDEuOTA4WiIsImlzTG9ja2VkIjp0cnVlfSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "718775f2f8320cfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2536" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:03 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UprHupyIuGJCYeUCPKigfkARZRkCc2Jz051KFrXK7YS0FbdKxNe3r20jon1ejkooHsAtcXQOi1tDV41Ob9dJ3Zoyj7stDImJ0eQriMO0WLLQ__-V54" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjowMy41NDRaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxNyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInJldGVudGlvblBvbGljeSI6eyJyZXRlbnRpb25QZXJpb2QiOiI5MDAwMCIsImVmZmVjdGl2ZVRpbWUiOiIyMDE5LTA1LTAyVDIyOjI2OjAxLjkwOFoiLCJpc0xvY2tlZCI6dHJ1ZX0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBST0ifQ==" + } + }, + { + "ID": "2f4b1104fa94954b", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "47" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiMzYwMCJ9fQo=" + ] + }, + "Response": { + "StatusCode": 403, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "13774" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrafLpm39Fg6hBYYi9lSEizIWUdMduEDQykOeT9Bch6uf9BNQUO9sOAnWpTklSpNac1yW_kSNj-JR8LMmVLirC2kqG2DcQdE-Hg5dtVBkVZ4Xhjlc8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"forbidden","message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=FORBIDDEN, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=forbidden, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.FORBIDDEN, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n, errorProtoCode=FORBIDDEN, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., unnamedArguments=[]}, location=null, message=Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'., reason=forbidden, rpcCode=403} Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::ACCESS_DENIED: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.UpdateAndPatchBucket.updateBucket(UpdateAndPatchBucket.java:119)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.patchBucket(PatchBucket.java:196)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:141)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.PatchBucket.handleRequestReceived(PatchBucket.java:46)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.update(BucketsDelegator.java:102)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'.\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 19 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":403,"message":"Cannot reduce retention duration of a locked Retention Policy for bucket 'go-integration-test-20190502-80633403432013-0017'."}}" + } + }, + { + "ID": "a8964565736a4805", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:04 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrNbiNCNtu8DhS-gNXfoe4R9DFKEVzknZ0g6YoI0JdvwH6P199oVJ2oaJwh9WXMte6frhDr_vJo1_PlxLhezMnUTJzMYZgPlPWdU1QN9imk3hXgawE" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "49648eb4e4e2ddda", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:05 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UplUHmGCg4wOKtpKVz2KH0cn-th8s-KXQR62bOkAF1pV1zIZJvsoI_tURcFttkyHks8g05Hln5fT4Ej_O-Lx6JCTovYYwhhHIK7AAcialHMj5fPHk4" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "42cf12714cf13289", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:07 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrcjWbo0pc_i_GQTU2WE6YR0XhWYMGRPJutu1MQXo6XbRKJAYhzNK4p2-_gCqZ6BvNGncUP1UZNoTrXsFFGt1RRTRR3DS9XNiws0gt9YQdp_I8dqqM" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "4f0fdb3eb607f33b", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 429, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12201" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:11 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up0EsbUXQesAgz95sFt6OzWRDj5WAXqEpYaqHgGvCClYnBGvjX8eQmor80UQOCIDzT6v9ZAtOPVhFIqkJG6rV0-CDXiLAnr4gwDrPkqgezvm8PQTaY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"usageLimits","reason":"rateLimitExceeded","message":"The project exceeded the rate limit for creating and deleting buckets.","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=TOO_MANY_REQUESTS, category=QUOTA_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=usageLimits, extendedHelp=null, httpHeaders={}, httpStatus=tooManyRequests, internalReason=Reason{arguments={}, cause=null, code=cloud.bigstore.api.BigstoreErrorDomain.CLIENT_QUOTA_EXCEEDED, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CLIENT_QUOTA_EXCEEDED, errorProtoDomain=cloud.bigstore.api.BigstoreErrorDomain, filteredMessage=null, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., unnamedArguments=[]}, location=entity.resource.bucket.name, message=The project exceeded the rate limit for creating and deleting buckets., reason=rateLimitExceeded, rpcCode=429} The project exceeded the rate limit for creating and deleting buckets.: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::PROJECT_BUCKET_OP_RATE_TOO_HIGH: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:184)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.InsertBucket.handleRequestReceived(InsertBucket.java:42)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.insert(BucketsDelegator.java:95)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: Creating buckets too quickly, please slow down\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":429,"message":"The project exceeded the rate limit for creating and deleting buckets."}}" + } + }, + { + "ID": "a5a8b8af13be21cf", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "106" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwicmV0ZW50aW9uUG9saWN5Ijp7InJldGVudGlvblBlcmlvZCI6IjkwMDAwIn19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "574" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:15 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpF7Lqxvm1dNC6q_v81ZB1Ui9c2ZLJ3GXLX6Y6UNZN1nbkF78c0J3NwXavHF1cMFZ5oP8ufCnmCLg9vQHM6tfs2ih8YxQSfE4D3AxGsvBaGNP3VUMo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE4IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE1LjA5N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJyZXRlbnRpb25Qb2xpY3kiOnsicmV0ZW50aW9uUGVyaW9kIjoiOTAwMDAiLCJlZmZlY3RpdmVUaW1lIjoiMjAxOS0wNS0wMlQyMjoyNjoxNS4wOTdaIn0sInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "e1dbaeed8c6d1ffb", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/lockRetentionPolicy?alt=json\u0026ifMetagenerationMatch=0\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "0" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 412, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Content-Length": [ + "12155" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:16 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4dRXLSTXYf9VXr3eOCMfuLBwDLvNQMXwIYjwEuSFQO_p-SxZQXQJ7I_VA0T5Hn8gnnEZIqLV3vLCnurXr0cyHDEMpCdxWOiIiqsXlgkbcLKf78to" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"conditionNotMet","message":"Precondition Failed","locationType":"header","location":"If-Match","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=PRECONDITION_FAILED, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=preconditionFailed, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.CONDITION_NOT_MET, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=CONDITION_NOT_MET, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=null, message=null, unnamedArguments=[]}, location=headers.If-Match, message=Precondition Failed, reason=conditionNotMet, rpcCode=412} Precondition Failed: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::INCORRECT_META_GENERATION_SPECIFIED: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:204)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.LockRetentionPolicy.handleRequestReceived(LockRetentionPolicy.java:57)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.lockRetentionPolicy(BucketsDelegator.java:109)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: expected BucketMetadata.metadata_generation: 0 actual: 1\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":412,"message":"Precondition Failed"}}" + } + }, + { + "ID": "814dffcceacb3bd8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026kmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoia21zIn0K", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CMGB+Prx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upis-8CwJYvr7glFAMnec_3T3YDojfTz3O_uICK5tQQmfJF2_rFtNnbqopcxBL5L3aKyoYe4zOLoqN4_MFFrhYKDm6Tmrl3rytuh0ymlCV15VZKyrQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "9b05110f2b59351b", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/kms", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "\"-CMGB+Prx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:16 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:16 GMT" + ], + "X-Goog-Generation": [ + "1556835976741057" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpZ6aSiGPoeZKANPoMPgXhL1QD5nbILvzv8v3sOMId95Y65wOPssqesNA1MXzp86JA4Qa5gAMxxx5cnY4z3-tCUnvsKoIHn3NXOK2U6ZSRLzYNeQeU" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "011b81453e7060ac", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CMGB+Prx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uq5t8aexqBqqxnLJFK9mWmGdVrFfd3gaCGOKfGJMO1phB3ATnxt67-cLVpELRy1gNnjNOK7-zMzWCR6yV1v5PQcv3lccINA--Q92RoECpvBON-QKtc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1NyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3Njc0MTA1NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNi43NDBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTYuNzQwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE2Ljc0MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3Njc0MTA1NyZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEva21zLzE1NTY4MzU5NzY3NDEwNTcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc2NzQxMDU3IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9rbXMvMTU1NjgzNTk3Njc0MTA1Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzY3NDEwNTciLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTUdCK1ByeC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNNR0IrUHJ4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "0dbd0a37c735be56", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/kms?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur_RD6un7NLXQpzwRBFEfZdp982qhfEzyvw8f_P1Lx1wKv8sUid99lQ7Nba5Rs73IJfv3qHXjQfUDHjWclvCAcS91nMboiexn-FwiMrUuKqCQ_d__4" + ] + }, + "Body": "" + } + }, + { + "ID": "1b81fc72e201f2a2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Encryption-Key-Sha256": [ + "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoiY3NlayJ9Cg==", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:17 GMT" + ], + "Etag": [ + "CKCxsfvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqRTb3RtqG1gIL-ENbVZC8nWKRHapQP78uVzv4yyL2t7N8cA5jEBWa014iJALicWO06KthvAMPLctgZCJ2UvPqo72IO4_LheEfTr13-_h43M-roUyc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jc2VrLzE1NTY4MzU5Nzc2ODEwNTYiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrIiwibmFtZSI6ImNzZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxNy42ODBaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTcuNjgwWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE3LjY4MFoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlaz9nZW5lcmF0aW9uPTE1NTY4MzU5Nzc2ODEwNTYmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NzZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NzZWsvMTU1NjgzNTk3NzY4MTA1Ni9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY3Nlay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNzZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3NzY4MTA1NiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY3Nlay8xNTU2ODM1OTc3NjgxMDU2L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jc2VrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY3NlayIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc3NjgxMDU2IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0tDeHNmdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDS0N4c2Z2eC9lRUNFQUU9IiwiY3VzdG9tZXJFbmNyeXB0aW9uIjp7ImVuY3J5cHRpb25BbGdvcml0aG0iOiJBRVMyNTYiLCJrZXlTaGEyNTYiOiJJbzRsbk9QVStFVGhPMFgwbnE3bU5FWEIxcld4WnNCSTRMMzdwQm15ZkRjPSJ9fQ==" + } + }, + { + "ID": "6da27052d0dabd22", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek/rewriteTo/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026destinationKmsKeyName=projects%2Fdeklerk-sandbox%2Flocations%2Fglobal%2FkeyRings%2Fgo-integration-test%2FcryptoKeys%2Fkey1\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ], + "X-Goog-Copy-Source-Encryption-Algorithm": [ + "AES256" + ], + "X-Goog-Copy-Source-Encryption-Key": [ + "CLEARED" + ], + "X-Goog-Copy-Source-Encryption-Key-Sha256": [ + "Io4lnOPU+EThO0X0nq7mNEXB1rWxZsBI4L37pBmyfDc=" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3372" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpKmOV156VWN4edUkrNip1urAj8WTpDcgGTS520rVGilFT71KrZLZQxUMKloxtxKx5R6V9pUvIqYym68P8lQ8RSc5rEE_D1KlH-NyPgybiPERbxYZ4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiOSIsIm9iamVjdFNpemUiOiI5IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21layIsIm5hbWUiOiJjbWVrIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwic2l6ZSI6IjkiLCJtZDVIYXNoIjoiQUFQUVM0NlRybk1ZbnFpS0FiYWd0UT09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWs/Z2VuZXJhdGlvbj0xNTU2ODM1OTc4MTI2NDc3JmFsdD1tZWRpYSIsImNhY2hlQ29udHJvbCI6InB1YmxpYywgbWF4LWFnZT02MCIsImFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6ImVkaXRvcnMifSwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzcvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJjbWVrIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5NzgxMjY0NzciLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJyb2xlIjoiT1dORVIiLCJlbWFpbCI6ImFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiZXRhZyI6IkNJM0p6UHZ4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiVUk3ODVBPT0iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSIsImttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MS9jcnlwdG9LZXlWZXJzaW9ucy8xIn19" + } + }, + { + "ID": "6ff8a4be1ed6bda3", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/cmek", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Etag": [ + "\"-CI3JzPvx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:18 GMT" + ], + "X-Goog-Generation": [ + "1556835978126477" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqDR7RHLf_CQraIBM5Z3vCUxSJndE0E6FBOLFBqmYvMntnrQyVIfxwuTE6BtHL4b8oQszU6rIycc72JMUjiGRM_XoGdjsk-WhsbBmL_3c3x5H1s4bs" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "51f1995364e644ed", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3271" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Etag": [ + "CI3JzPvx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpM1hbkOXC-oSxvoqjP8s2MpxREJEpzVp2TTYj-6B8467MLYSA-yrWs9UoGwRGQUCJTR76e-qAmQnbzGsFGT31Kc3qjevb8SM0MynLHL9GhOMkm72w" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9jbWVrLzE1NTY4MzU5NzgxMjY0NzciLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrIiwibmFtZSI6ImNtZWsiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOC4xMjZaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTguMTI2WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE4LjEyNloiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21laz9nZW5lcmF0aW9uPTE1NTY4MzU5NzgxMjY0NzcmYWx0PW1lZGlhIiwiY2FjaGVDb250cm9sIjoicHVibGljLCBtYXgtYWdlPTYwIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL2NtZWsvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2NtZWsvMTU1NjgzNTk3ODEyNjQ3Ny9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vY21lay9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6ImNtZWsiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3ODEyNjQ3NyIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDSTNKelB2eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvY21lay8xNTU2ODM1OTc4MTI2NDc3L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9jbWVrL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiY21layIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc4MTI2NDc3IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0kzSnpQdngvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJVSTc4NUE9PSIsImV0YWciOiJDSTNKelB2eC9lRUNFQUU9Iiwia21zS2V5TmFtZSI6InByb2plY3RzL2Rla2xlcmstc2FuZGJveC9sb2NhdGlvbnMvZ2xvYmFsL2tleVJpbmdzL2dvLWludGVncmF0aW9uLXRlc3QvY3J5cHRvS2V5cy9rZXkxL2NyeXB0b0tleVZlcnNpb25zLzEifQ==" + } + }, + { + "ID": "8972a840dc3d95a5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/csek?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoTmNCzrFh_7cFaryet35nLbKqM6dyo7nvsyZApHIj6fLKcMDb9ZA-_TRNjGOLNFjjoD0IZ1YOz9P-ftNchBFkjWDTaFzBjbHmryCc9JInU3wd_DLw" + ] + }, + "Body": "" + } + }, + { + "ID": "1e28b278fe3e0a66", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/cmek?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:18 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqrX5OILXadm6r5e8fLyuLEhQncM5jrmmDaOesSqraquvQbXXKhTqNJ63RYU3qj90M3FLjwnAOz8oOBTQQDOq2sRqUkbpBbJzYj82Rp3Rd9VLpNns0" + ] + }, + "Body": "" + } + }, + { + "ID": "d1ac5d123ee4bae6", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "200" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwibG9jYXRpb24iOiJVUyIsIm5hbWUiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "609" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpbajYTiCBBelYuwcqOs7_O-SyJbcV6WGwYhC4JCMmeImLlFLb93JyaiYrSp-201iYZTIdQ2VViFR_Emxe2Z7b4jV_G-0o2xzkqnkSnz0-Qcw_q0Wc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImVuY3J5cHRpb24iOnsiZGVmYXVsdEttc0tleU5hbWUiOiJwcm9qZWN0cy9kZWtsZXJrLXNhbmRib3gvbG9jYXRpb25zL2dsb2JhbC9rZXlSaW5ncy9nby1pbnRlZ3JhdGlvbi10ZXN0L2NyeXB0b0tleXMva2V5MSJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FFPSJ9" + } + }, + { + "ID": "d035cdb991d0fabb", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoi5SKd4TdRXg-INW-U3pKcPA2ioBWIc2zdC5W3J4efS9iZ0EKFNqprjQ-NGEK_3wSDGezk4Al3Dd4JovGxXSnH73rIw22S_R__XRrpTQmYpTiNFGQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS4wNjdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "672cb2bcc1ed4ee2", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJuYW1lIjoia21zIn0K", + "bXkgc2VjcmV0" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "CLHMt/zx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upyst1aH69iaN9J4055oX7H2yReN5TADIX11Vul8KakNuYZbJ7yXm1GBdvCDVB-IPTvbhzELJWgfurnIHxGpzKJszLO70n58xMnVbSLsHX1J3W02Lw" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "c037ecafa6249395", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0019/kms", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "9" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "\"-CLHMt/zx/eECEAE=\"" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:19 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Encryption-Kms-Key-Name": [ + "projects/deklerk-sandbox/locations/global/keyRings/go-integration-test/cryptoKeys/key1/cryptoKeyVersions/1" + ], + "X-Goog-Generation": [ + "1556835979879985" + ], + "X-Goog-Hash": [ + "crc32c=UI785A==", + "md5=AAPQS46TrnMYnqiKAbagtQ==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "9" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrrQOFSgCkxAaKVPAQhz0ohMN6irPryraHLjWDEQgV3dNEwc5cxqcDDRChzx6_HR1Z_NPlm3vmjFCJcNK-2rAz-tmvVS8w36Q5U9Lc2wAGUgL_joX4" + ] + }, + "Body": "bXkgc2VjcmV0" + } + }, + { + "ID": "fc3eda648104f042", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3234" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Etag": [ + "CLHMt/zx/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqHPSbutJRXX-6i05QnH_4UizrPft0fsGc3_ISDNmXQ4C3RPbEAnNfS1Ren0j3k2z8zxk9AV1WZQCWiKG9D5A8gZAZESFxbSZyjbBH8jkLEsEX54NQ" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcyIsIm5hbWUiOiJrbXMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk3OTg3OTk4NSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoxOS44NzlaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MTkuODc5WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5Ljg3OVoiLCJzaXplIjoiOSIsIm1kNUhhc2giOiJBQVBRUzQ2VHJuTVlucWlLQWJhZ3RRPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zP2dlbmVyYXRpb249MTU1NjgzNTk3OTg3OTk4NSZhbHQ9bWVkaWEiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvby9rbXMvYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkva21zLzE1NTY4MzU5Nzk4Nzk5ODUvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9vL2ttcy9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsIm9iamVjdCI6ImttcyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTc5ODc5OTg1IiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9rbXMvMTU1NjgzNTk3OTg3OTk4NS91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L28va21zL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5Iiwib2JqZWN0Ijoia21zIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5Nzk4Nzk5ODUiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDTEhNdC96eC9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlVJNzg1QT09IiwiZXRhZyI6IkNMSE10L3p4L2VFQ0VBRT0iLCJrbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTEvY3J5cHRvS2V5VmVyc2lvbnMvMSJ9" + } + }, + { + "ID": "13283bc1ba20f019", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019/o/kms?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:20 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo2b3OtHWqpsRWxurpcGgrEtM10ZDdsrIa2z0nFsO-P4G3RAGYMTVMR0MCtwLDePrKyhfnsK-d9T5AIcRpeaIe8Z40Qm7gkZP4Fdi9jXd6ceYe5Iwc" + ] + }, + "Body": "" + } + }, + { + "ID": "ca2255586aa4aeab", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "126" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur05CMGbsLFprbwWYFSeE3I5dKFdFUzO--ImyAzkLt6ueNGpMDviyX6c1S4F6t1bqXTSnyQXXQzgugu8JoecrL3-1eJOPsqnN-OBMubntJCblkyWqE" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "e1c336910a43425f", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2555" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:21 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Urcwbg5Z9C0rFvSTWYj4FxF27ooV0028Cm-yme-BrfmvK0dWsRkfRQEMWylWwokcmM2kIvLKEiucJlvDLHvnbJPB_HjH3zF2Lm8xCwV_9gPN42QDeo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMS4zMzZaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUk9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBST0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUk9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBST0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJlbmNyeXB0aW9uIjp7ImRlZmF1bHRLbXNLZXlOYW1lIjoicHJvamVjdHMvZGVrbGVyay1zYW5kYm94L2xvY2F0aW9ucy9nbG9iYWwva2V5UmluZ3MvZ28taW50ZWdyYXRpb24tdGVzdC9jcnlwdG9LZXlzL2tleTIifSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUk9In0=" + } + }, + { + "ID": "581a002b653f595d", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "20" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJlbmNyeXB0aW9uIjpudWxsfQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:22 GMT" + ], + "Etag": [ + "CAM=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upv2LawTqc_eDVGR_hv2jSCyEA77H33uMkPevVxUUDKj1u4a9IWOH2f1oMXpU52-49Jg17SUSoJUJai1Ueff3ahN58GC9FCxwJt5v5xOHI3SytFjpo" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5IiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjE5LjA2N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMi4xMjRaIiwibWV0YWdlbmVyYXRpb24iOiIzIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FNPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDE5L3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMTkvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOS9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAxOSIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQU09In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBTT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQU09In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBTT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBTT0ifQ==" + } + }, + { + "ID": "a0db97c641f3f17c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0019?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:22 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo15eoN1OrQFM6eqxJcuNFl7Wfr6x_VvJ0ENBlhhC_Hedf5al2OplGnNslWnRQT9RVs92RVLevZ_F_ohutgnNsUbK1PVsR04UnQ_A6ea3C0ZLy8FkI" + ] + }, + "Body": "" + } + }, + { + "ID": "ca84fc3a71a55ea8", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026predefinedAcl=authenticatedRead\u0026predefinedDefaultObjectAcl=publicRead\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1468" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrnCCRKNh19fdBAL3pyCzbtLnG7z20pSwSZKl9uTerD6o6-B5mB79mqZRIbXNcUmcpTdpxTreKjvsHK3Y9EKq_5fkzBPInDj8YkjbGViIpd1ldSots" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "ed54d82686b4d390", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "1468" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:23 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpXJh1_aAMuDVjYxLZJdwqAt7OwZdmTkc0_5wf_LshhNm_So0q87o7gJ31T58vp1b0_CucSecAyyyOLUbYTDrWDDTZGMkZBpfx1iBArjRtBcud1IgI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyMy4xMDZaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9hY2wvYWxsQXV0aGVudGljYXRlZFVzZXJzIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZW50aXR5IjoiYWxsQXV0aGVudGljYXRlZFVzZXJzIiwicm9sZSI6IlJFQURFUiIsImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "224aee5425ffb86e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026predefinedAcl=private\u0026predefinedDefaultObjectAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "33" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJhY2wiOltdLCJkZWZhdWx0T2JqZWN0QWNsIjpbXX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1113" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:24 GMT" + ], + "Etag": [ + "CAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoknWPZdEFnvKZGCGO_Z26ZaKeXNdBzc6k6qqlbGdQXVEtPvLPR2oYtKCXsqDgFBjMw4wfWVXwDfmcc5u5kBAxshpuzKGgCp8UOxv-i_Rml56mzrOI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjIzLjEwNloiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC40MjVaIiwibWV0YWdlbmVyYXRpb24iOiIyIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FJPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6ImFsbEF1dGhlbnRpY2F0ZWRVc2VycyIsInJvbGUiOiJSRUFERVIiLCJldGFnIjoiQ0FJPSJ9XSwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sIm93bmVyIjp7ImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCJ9LCJsb2NhdGlvbiI6IlVTIiwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0FJPSJ9" + } + }, + { + "ID": "b6a80796e20baf03", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o?alt=json\u0026predefinedAcl=authenticatedRead\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJuYW1lIjoicHJpdmF0ZSJ9Cg==", + "aGVsbG8=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1995" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Etag": [ + "CNDq5v7x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqMfGdFW2xElqOJcP4q5XeDjVehJr4FmjcFPij-Y31f_168_Jgb6zibcWxCOJzrV1aUYYTXZW9DMX-BcvfJi6YXoBCPpN23EhrGQyY7tHSb0SVurGY" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjQuODQ3WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL3ByaXZhdGUvMTU1NjgzNTk4NDg0ODIwOC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJwcml2YXRlIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODQ4NDgyMDgiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNORHE1djd4L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFFPSJ9" + } + }, + { + "ID": "4812f1e0e907256e", + "Request": { + "Method": "PATCH", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026predefinedAcl=private\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "62" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifQo=" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1529" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Etag": [ + "CNDq5v7x/eECEAI=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqTjsMMIlet4CWUmGoB3TuGzhTHXlDwuUe1SAQO8H2vmbTo2QYYV_WozfDO2NyZpsA9LCtgwwthwxeEeEG182WQTTe3xLWnsVjqPZx-iho8JAgI66U" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9wcml2YXRlLzE1NTY4MzU5ODQ4NDgyMDgiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlIiwibmFtZSI6InByaXZhdGUiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NDg0ODIwOCIsIm1ldGFnZW5lcmF0aW9uIjoiMiIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNC44NDdaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuNDIzWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI0Ljg0N1oiLCJzaXplIjoiNSIsIm1kNUhhc2giOiJYVUZBS3J4TEtuYTVjWjJSRUJmRmtnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vcHJpdmF0ZT9nZW5lcmF0aW9uPTE1NTY4MzU5ODQ4NDgyMDgmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvcHJpdmF0ZS8xNTU2ODM1OTg0ODQ4MjA4L3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9wcml2YXRlL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoicHJpdmF0ZSIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg0ODQ4MjA4IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ05EcTV2N3gvZUVDRUFJPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJtbkc3VEE9PSIsImV0YWciOiJDTkRxNXY3eC9lRUNFQUk9In0=" + } + }, + { + "ID": "969763dc18755620", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private/rewriteTo/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026destinationPredefinedAcl=publicRead\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "3" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "e30K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "2017" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:25 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoNk9xKdyuh-I4RkIXqQxMF4A82DZ9b2HYZOdVK7ELOsnMYBqOmrqsY5hzZJsLXMZCZ6zOSzCeefQZuTgYHwyd-jAttJ4QoA1lDk0HTwZqigIfpjCc" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNyZXdyaXRlUmVzcG9uc2UiLCJ0b3RhbEJ5dGVzUmV3cml0dGVuIjoiNSIsIm9iamVjdFNpemUiOiI1IiwiZG9uZSI6dHJ1ZSwicmVzb3VyY2UiOnsia2luZCI6InN0b3JhZ2Ujb2JqZWN0IiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9kc3QiLCJuYW1lIjoiZHN0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODU4NTAyNjkiLCJtZXRhZ2VuZXJhdGlvbiI6IjEiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW47IGNoYXJzZXQ9dXRmLTgiLCJ0aW1lQ3JlYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjUuODQ5WiIsInVwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI1Ljg0OVoiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsInRpbWVTdG9yYWdlQ2xhc3NVcGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNS44NDlaIiwic2l6ZSI6IjUiLCJtZDVIYXNoIjoiWFVGQUtyeExLbmE1Y1oyUkVCZkZrZz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdD9nZW5lcmF0aW9uPTE1NTY4MzU5ODU4NTAyNjkmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvZHN0LzE1NTY4MzU5ODU4NTAyNjkvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9vL2RzdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2RzdC8xNTU2ODM1OTg1ODUwMjY5L2FsbFVzZXJzIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vZHN0L2FjbC9hbGxVc2VycyIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMCIsIm9iamVjdCI6ImRzdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg1ODUwMjY5IiwiZW50aXR5IjoiYWxsVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKMy9vLy94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoibW5HN1RBPT0iLCJldGFnIjoiQ0ozL28vL3gvZUVDRUFFPSJ9fQ==" + } + }, + { + "ID": "8db7e2a6b620e794", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp/compose?alt=json\u0026destinationPredefinedAcl=authenticatedRead\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "130" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJkZXN0aW5hdGlvbiI6eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAifSwic291cmNlT2JqZWN0cyI6W3sibmFtZSI6InByaXZhdGUifSx7Im5hbWUiOiJkc3QifV19Cg==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "1906" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Etag": [ + "CJGwwP/x/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpqjCYxNFNTu3V1H_kV5WJxkbEYE9I3H5fYjfGMPO7n6C9NJstYZGcIh6xVA5RAxMCwP1PdzXITgTI1m2vm5xP5nhzLVRHKhder2qzYFGsAooH-kG0" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMC9jb21wLzE1NTY4MzU5ODYzMTUyODEiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wIiwibmFtZSI6ImNvbXAiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4NjMxNTI4MSIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyNi4zMTRaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjYuMzE0WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI2LjMxNFoiLCJzaXplIjoiMTAiLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL28vY29tcD9nZW5lcmF0aW9uPTE1NTY4MzU5ODYzMTUyODEmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvY29tcC8xNTU2ODM1OTg2MzE1MjgxL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwIiwib2JqZWN0IjoiY29tcCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg2MzE1MjgxIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0pHd3dQL3gvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIwL2NvbXAvMTU1NjgzNTk4NjMxNTI4MS9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAvby9jb21wL2FjbC9hbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjAiLCJvYmplY3QiOiJjb21wIiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODYzMTUyODEiLCJlbnRpdHkiOiJhbGxBdXRoZW50aWNhdGVkVXNlcnMiLCJyb2xlIjoiUkVBREVSIiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifV0sIm93bmVyIjp7ImVudGl0eSI6InVzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20ifSwiY3JjMzJjIjoiL1JDT2dnPT0iLCJjb21wb25lbnRDb3VudCI6MiwiZXRhZyI6IkNKR3d3UC94L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "72a0ce58c513f6d5", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/comp?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpWOQtHF0cKNME8sTEm9vbS9KKKLLK9afxka1b6TTVGxDjCQ4qAdnA9zshsfVFVKK5hNS10aABAQog_gjYCZTXbbsF4bQNBM0i4R7bK2r7saocPtFo" + ] + }, + "Body": "" + } + }, + { + "ID": "111b5a296b726631", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/dst?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:26 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqPsYXABoYOnGueMXiIvH9AntIPEf4xc6i18Nnr_XEYBxy-3LSK9klIcNyfSB7we4lgbctsiYA2e2WWBJlfqk1GnK6YA2fj-e3IiPbKwBwTSLjr1Jk" + ] + }, + "Body": "" + } + }, + { + "ID": "27e4065cd2000d43", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020/o/private?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:27 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpMPK_b_vcRkSIj1t37L47as9idfpbrNWqzirdNBLwoaGDMPlbjatMzjLThnYRNwk9CqTNQ5H8eoKFbunpe5TMktUo_FHyI7rryoUhbCpkbytRrTeM" + ] + }, + "Body": "" + } + }, + { + "ID": "cccb109f5fba2a3a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0020?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:27 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoVAiJOaePqfycNhiwbkPOptOqgpe9Tvo0jo1aW2nviozj4-QcjTtnGGyhcdrZ8QTxT-KHPo-saYS9ESC_6osYhChJOCi8b9nXtuqH0BqlvywXs9FE" + ] + }, + "Body": "" + } + }, + { + "ID": "cfab6af30981f62b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/projects/deklerk-sandbox/serviceAccount?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "116" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrDBTBzZwm7GH6Jklv6hzuaZaO3JdgliJk8zLZkVNDqwDO7tYP-c06rzrTj03llkWuV_l1k9w2tbUcGG2Rtv-1MdozY6pktTutDMLfyhyHe1DaEIys" + ] + }, + "Body": "eyJlbWFpbF9hZGRyZXNzIjoic2VydmljZS00OTYxNjk2MDE3MTRAZ3MtcHJvamVjdC1hY2NvdW50cy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImtpbmQiOiJzdG9yYWdlI3NlcnZpY2VBY2NvdW50In0=" + } + }, + { + "ID": "677e783b9aea8c15", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJjb250ZW50VHlwZSI6InRleHQvcGxhaW4iLCJuYW1lIjoic29tZS1vYmplY3QifQo=", + "vcVTjdWWssp9XFk0D3Nk5w==" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CPryt4Dy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrdWRt6oMzDM98VpA0VDmWRuNvox2ZBw6ZpjbyOFDKCFKgJ92jsLkF8v4M4UUxaTUVpmr8iElUTld7Q2g5_MZSbRatDE2zVgaY-769Ppq7Nx6f0HAg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "ff3c05fe02cd38ac", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/some-object", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=60" + ], + "Content-Length": [ + "16" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "\"b4769c7d229f0720beffe7251bbabe8f\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:27:28 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:28 GMT" + ], + "X-Goog-Generation": [ + "1556835988273530" + ], + "X-Goog-Hash": [ + "crc32c=Sm1gKw==", + "md5=tHacfSKfByC+/+clG7q+jw==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "16" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqn6PsKCJsyZAhI_JtSOcVKs7SWuqYsoprQTUagoZXqu1CHWyBc1TzWfUyF9iv5VBS4iZPP5qz0Lf3b51p8O36Sud2-QS2zDg9FKhtm1uBFKLpcTOs" + ] + }, + "Body": "vcVTjdWWssp9XFk0D3Nk5w==" + } + }, + { + "ID": "d1c7cc7c0e1d2829", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3262" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CPryt4Dy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqluzdkA1sUAJQJZodwWmM7SP7tNoX2JJjDiqHTy3XtjEqiVDNpI0whWTFJJH4caozRYjJPjTb6_ywm82dnAiYEo-YCknhlvkWexfTq1BM9MRWwQeg" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwIiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QiLCJuYW1lIjoic29tZS1vYmplY3QiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4ODI3MzUzMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbiIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOC4yNzNaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjguMjczWiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI4LjI3M1oiLCJzaXplIjoiMTYiLCJtZDVIYXNoIjoidEhhY2ZTS2ZCeUMrLytjbEc3cStqdz09IiwibWVkaWFMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vZG93bmxvYWQvc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0P2dlbmVyYXRpb249MTU1NjgzNTk4ODI3MzUzMCZhbHQ9bWVkaWEiLCJjYWNoZUNvbnRyb2wiOiJwdWJsaWMsIG1heC1hZ2U9NjAiLCJhY2wiOlt7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3NvbWUtb2JqZWN0L2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvcHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvc29tZS1vYmplY3QvMTU1NjgzNTk4ODI3MzUzMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vc29tZS1vYmplY3QvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJvYmplY3QiOiJzb21lLW9iamVjdCIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg4MjczNTMwIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9zb21lLW9iamVjdC8xNTU2ODM1OTg4MjczNTMwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby9zb21lLW9iamVjdC9hY2wvdXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6InNvbWUtb2JqZWN0IiwiZ2VuZXJhdGlvbiI6IjE1NTY4MzU5ODgyNzM1MzAiLCJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwicm9sZSI6Ik9XTkVSIiwiZW1haWwiOiJhbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsImV0YWciOiJDUHJ5dDREeS9lRUNFQUU9In1dLCJvd25lciI6eyJlbnRpdHkiOiJ1c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIn0sImNyYzMyYyI6IlNtMWdLdz09IiwiZXRhZyI6IkNQcnl0NER5L2VFQ0VBRT0ifQ==" + } + }, + { + "ID": "951f74cbe6b30b67", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Etag": [ + "CA0=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:28 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoMRpk7F0qXPvUV4uNPdV7VhfUO2Mm-qPBbx3VV92y1wRyElPF1kbRNEGJ1KqbIW5w2XozD7IzjxBx4KevqcPxZr0_E1iU1_NASedJ4lZj4EFoP9UI" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" + } + }, + { + "ID": "aadc7916a3fc24ad", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11805" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqhoAeZtt8ffiWaPr6J0LEaVR3vjerqtJ7gMM9BObOkz_6MtctDuS_s8-DZRxtTZsGfMDtekIKG2v8p2_sb3MxvTgeLhgO4vqvJNIszTaMjjyQcEdY" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.resource_id.name, message=null, unnamedArguments=[]}, location=entity.resource_id.name, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:104)\n\tat com.google.cloud.bigstore.api.json.handlers.buckets.GetBucket.handleRequestReceived(GetBucket.java:33)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.BucketsDelegator.get(BucketsDelegator.java:83)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "03b45b7eac939177", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/Caf%C3%A9", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "20" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"ade43306cb39336d630e101af5fb51b4\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:26:29 GMT" + ], + "Last-Modified": [ + "Fri, 24 Mar 2017 20:04:38 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1490385878535828" + ], + "X-Goog-Hash": [ + "crc32c=fN3yZg==", + "md5=reQzBss5M21jDhAa9ftRtA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "20" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFU2KyS5OA-bAy_MovHXHg5zJZ2kznc6tgTTrSjilrNHiw0NimPHZApcRXOS_ejrSSy8nIn02xipxUWxvHM3ru5IYhJRTeBZxhlq6_XQVsATkvink" + ] + }, + "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEM=" + } + }, + { + "ID": "725883eeccd4b42c", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/upload/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026prettyPrint=false\u0026projection=full\u0026uploadType=multipart", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "multipart/related", + "BodyParts": [ + "eyJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJuYW1lIjoiemVybyJ9Cg==", + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "3128" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "CKyp9oDy/eECEAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_single_post_uploads" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UruPh9tBADr1LSQWFlj1C4tjlS6uwlcy_MVRYaTGD4e_qEUlq0iXJx6ns3JHbH7I9olH7aSJpDW6VQcV5EdLwjQMecgA7qKhxzLoYFW4BZdEXbUrro" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNvYmplY3QiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS96ZXJvLzE1NTY4MzU5ODkyOTYzMDAiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvIiwibmFtZSI6Inplcm8iLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsIm1ldGFnZW5lcmF0aW9uIjoiMSIsImNvbnRlbnRUeXBlIjoidGV4dC9wbGFpbjsgY2hhcnNldD11dGYtOCIsInRpbWVDcmVhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjoyOS4yOTVaIiwidXBkYXRlZCI6IjIwMTktMDUtMDJUMjI6MjY6MjkuMjk1WiIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwidGltZVN0b3JhZ2VDbGFzc1VwZGF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjI5LjI5NVoiLCJzaXplIjoiMCIsIm1kNUhhc2giOiIxQjJNMlk4QXNnVHBnQW1ZN1BoQ2ZnPT0iLCJtZWRpYUxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9kb3dubG9hZC9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVybz9nZW5lcmF0aW9uPTE1NTY4MzU5ODkyOTYzMDAmYWx0PW1lZGlhIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMS9vL3plcm8vYWNsL3Byb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC9wcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0IiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3plcm8vMTU1NjgzNTk4OTI5NjMwMC9wcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL28vemVyby9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsIm9iamVjdCI6Inplcm8iLCJnZW5lcmF0aW9uIjoiMTU1NjgzNTk4OTI5NjMwMCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvemVyby8xNTU2ODM1OTg5Mjk2MzAwL3VzZXItYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvby96ZXJvL2FjbC91c2VyLWFub3RoZXItdGhpbmdAZGVrbGVyay1zYW5kYm94LmlhbS5nc2VydmljZWFjY291bnQuY29tIiwiYnVja2V0IjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwib2JqZWN0IjoiemVybyIsImdlbmVyYXRpb24iOiIxNTU2ODM1OTg5Mjk2MzAwIiwiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIsInJvbGUiOiJPV05FUiIsImVtYWlsIjoiYW5vdGhlci10aGluZ0BkZWtsZXJrLXNhbmRib3guaWFtLmdzZXJ2aWNlYWNjb3VudC5jb20iLCJldGFnIjoiQ0t5cDlvRHkvZUVDRUFFPSJ9XSwib3duZXIiOnsiZW50aXR5IjoidXNlci1hbm90aGVyLXRoaW5nQGRla2xlcmstc2FuZGJveC5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSJ9LCJjcmMzMmMiOiJBQUFBQUE9PSIsImV0YWciOiJDS3lwOW9EeS9lRUNFQUU9In0=" + } + }, + { + "ID": "036f06773f6d6306", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0021/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 404, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "11821" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "agent_rejected" + ], + "X-Guploader-Upload-Result": [ + "agent_rejected" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqs9uKpqdPwYRv420tAk0KmBT6DS2POPsIj39ROT-r3ymrd2SB-oOJEXuaHGOfih1ckOqp-YrvHHAh7l7A-6rwlFjoBSb4qCtWzXGPjekN0pUBIok8" + ] + }, + "Body": "{"error":{"errors":[{"domain":"global","reason":"notFound","message":"Not Found","debugInfo":"com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\ncom.google.api.server.core.Fault: ImmutableErrorDefinition{base=NOT_FOUND, category=USER_ERROR, cause=null, debugInfo=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, domain=global, extendedHelp=null, httpHeaders={}, httpStatus=notFound, internalReason=Reason{arguments={}, cause=null, code=gdata.CoreErrorDomain.NOT_FOUND, createdByBackend=true, debugMessage=com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n, errorProtoCode=NOT_FOUND, errorProtoDomain=gdata.CoreErrorDomain, filteredMessage=null, location=entity.bucket, message=null, unnamedArguments=[]}, location=entity.bucket, message=Not Found, reason=notFound, rpcCode=404} Not Found: com.google.net.rpc3.RpcException: cloud.bigstore.ResponseCode.ErrorCode::BUCKET_NOT_FOUND: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.toRpc3Exception(BigstoreException.java:147)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:322)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:171)\n\tat com.google.cloud.bigstore.api.json.handlers.objects.ListObjects.handleRequestReceived(ListObjects.java:41)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handleRequestReceived(RequestHandler.java:310)\n\tat com.google.cloud.bigstore.api.json.framework.RequestHandler.handle(RequestHandler.java:256)\n\tat com.google.cloud.bigstore.api.json.ObjectsDelegator.list(ObjectsDelegator.java:89)\n\tat com.google.cloud.bigstore.isolation.RpcReceiver.lambda$processRequestAsync$4(RpcReceiver.java:202)\n\tat com.google.cloud.bigstore.isolation.AsyncExecutor.lambda$submit$0(AsyncExecutor.java:253)\n\tat com.google.common.context.ContextRunnable.runInContext(ContextRunnable.java:50)\n\tat com.google.common.context.ContextRunnable$1.run(ContextRunnable.java:39)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContextNoUnref(GenericContextCallback.java:72)\n\tat com.google.tracing.GenericContextCallback.runInInheritedContext(GenericContextCallback.java:64)\n\tat com.google.common.context.ContextRunnable.run(ContextRunnable.java:35)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: com.google.cloud.bigstore.common.BigstoreException: No such bucket: go-integration-test-20190502-80633403432013-0021\n\tat com.google.cloud.bigstore.common.BigstoreException.throwOnError(BigstoreException.java:312)\n\tat com.google.cloud.bigstore.common.BigstoreException.throwRpc3OnError(BigstoreException.java:320)\n\t... 17 more\n\n\tat com.google.api.server.core.ErrorCollector.toFault(ErrorCollector.java:54)\n\tat com.google.api.server.rest.adapter.rosy.RosyErrorConverter.toFault(RosyErrorConverter.java:67)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:259)\n\tat com.google.api.server.rest.adapter.rosy.RosyHandler$2.call(RosyHandler.java:239)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.common.util.concurrent.DirectExecutor.execute(DirectExecutor.java:30)\n\tat com.google.common.util.concurrent.AbstractFuture.executeListener(AbstractFuture.java:1143)\n\tat com.google.common.util.concurrent.AbstractFuture.complete(AbstractFuture.java:963)\n\tat com.google.common.util.concurrent.AbstractFuture.set(AbstractFuture.java:731)\n\tat com.google.api.server.core.util.CallableFuture.run(CallableFuture.java:62)\n\tat com.google.api.server.thread.ThreadTrackers$ThreadTrackingRunnable.run(ThreadTrackers.java:126)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.runInContext(TraceContext.java:453)\n\tat com.google.api.server.server.CommonModule$ContextCarryingExecutorService$1.runInContext(CommonModule.java:802)\n\tat com.google.tracing.TraceContext$TraceContextRunnable$1.run(TraceContext.java:460)\n\tat io.grpc.Context.run(Context.java:565)\n\tat com.google.tracing.CurrentContext.runInContext(CurrentContext.java:204)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContextNoUnref(TraceContext.java:319)\n\tat com.google.tracing.TraceContext$AbstractTraceContextCallback.runInInheritedContext(TraceContext.java:311)\n\tat com.google.tracing.TraceContext$TraceContextRunnable.run(TraceContext.java:457)\n\tat com.google.gse.internal.DispatchQueueImpl$WorkerThread.run(DispatchQueueImpl.java:403)\n"}],"code":404,"message":"Not Found"}}" + } + }, + { + "ID": "6b9dc540a02ed56f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/storage-library-test-bucket/Cafe%CC%81", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "public, max-age=3600" + ], + "Content-Length": [ + "20" + ], + "Content-Type": [ + "text/plain" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"df597679bac7c6150429ad80a1a05680\"" + ], + "Expires": [ + "Thu, 02 May 2019 23:26:29 GMT" + ], + "Last-Modified": [ + "Fri, 24 Mar 2017 20:04:37 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Generation": [ + "1490385877705600" + ], + "X-Goog-Hash": [ + "crc32c=qBeWjQ==", + "md5=31l2ebrHxhUEKa2AoaBWgA==" + ], + "X-Goog-Metageneration": [ + "2" + ], + "X-Goog-Storage-Class": [ + "MULTI_REGIONAL" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "20" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrK69XFh4eDvl0RvSevMYIwMVs-nlDelMhoacS-4faSvuEateXMEqzdMvHClWdzkyqcEk7IQ1N0JMcv5uEPt6V98chYK-p9otm8puF4Uf846EBUEmg" + ] + }, + "Body": "Tm9ybWFsaXphdGlvbiBGb3JtIEQ=" + } + }, + { + "ID": "09f71762da168c2f", + "Request": { + "Method": "GET", + "URL": "https://storage.googleapis.com/go-integration-test-20190502-80633403432013-0001/zero", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "Go-http-client/1.1" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Accept-Ranges": [ + "bytes" + ], + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "text/plain; charset=utf-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Etag": [ + "\"d41d8cd98f00b204e9800998ecf8427e\"" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Last-Modified": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Server": [ + "UploadServer" + ], + "X-Goog-Expiration": [ + "Sat, 01 Jun 2019 22:26:29 GMT" + ], + "X-Goog-Generation": [ + "1556835989296300" + ], + "X-Goog-Hash": [ + "crc32c=AAAAAA==", + "md5=1B2M2Y8AsgTpgAmY7PhCfg==" + ], + "X-Goog-Metageneration": [ + "1" + ], + "X-Goog-Storage-Class": [ + "STANDARD" + ], + "X-Goog-Stored-Content-Encoding": [ + "identity" + ], + "X-Goog-Stored-Content-Length": [ + "0" + ], + "X-Guploader-Customer": [ + "cloud-storage" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrL3cJhuZXF-arxiYeo4MSf1to_dAoiyMisikOum5rqnrSnFa0OZ25Sl8hUU7lcIeW6P3-dwXK6qv0yHGFYMFnUB0VYpI9BRT16gMo8ikQ_L3gBmew" + ] + }, + "Body": "" + } + }, + { + "ID": "681729281517b9c0", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:29 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrKCNWRWyBsgMgWxsrSjXZMKW22GWVttfUWHw_UNSF7X7hnnz4h-cD7vFzaLO7OVWvnuMyW9qpJAA4eJaERT9hSjGfv41XVw_9ouvBOeIK9ykq-DrE" + ] + }, + "Body": "" + } + }, + { + "ID": "39261fa7e6e9aede", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "60" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIn0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "485" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:33 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpEt-jSVBZqHHBwfNkeP4NXxMdC1ISsFp2E_M6JiSw9rDj2gTnmTZhrvOn-bo_f9Lx_HMxMDgsXfO0ftWqvUjGwdmHAYNnYK5JSJozLfuosPNxv_S4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJzdG9yYWdlQ2xhc3MiOiJTVEFOREFSRCIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "3e70965b0aa1111b", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2431" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrB-4r560VjwZdR64GS_7_mZDefc-iM5ma_H8KjYfrdNXt-IlkgvzShsy7iNLseN6coOnkJwWT5tLbP54_M7nMUjord_jmpb1i19SBRA_3RQ_64yys" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjMzLjU1N1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozMy41NTdaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInN0b3JhZ2VDbGFzcyI6IlNUQU5EQVJEIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "b80f9a99661cd572", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur4ItFRDN2_vB0_Ej095pAE061aywdVAoU_00t3fazm21R5ZLxUHpQL4ifTJ58218EkOeDBSWNaOaWuuzTnTM0DhXTe4l2X3pytOdfcZdIeyLV1rWA" + ] + }, + "Body": "" + } + }, + { + "ID": "979ffb3f3450c3d5", + "Request": { + "Method": "POST", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026prettyPrint=false\u0026project=deklerk-sandbox", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "Content-Length": [ + "543" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "application/json", + "BodyParts": [ + "eyJsYWJlbHMiOnsiZW1wdHkiOiIiLCJsMSI6InYxIn0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsic3RvcmFnZUNsYXNzIjoiTkVBUkxJTkUiLCJ0eXBlIjoiU2V0U3RvcmFnZUNsYXNzIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsb2NhdGlvbiI6IlVTIiwibmFtZSI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwidmVyc2lvbmluZyI6eyJlbmFibGVkIjp0cnVlfX0K" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "926" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqM_YYHcsYolLFJJwWhO9AUYmnc3Vp3J6C5AydYOy-0RVAmBxObG8QpwZOsn28vDVllo_OZ1-4bASi_RGi4wdply2CPhzUOCVjP11XNzVYZ4_EiFb4" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiaWFtQ29uZmlndXJhdGlvbiI6eyJidWNrZXRQb2xpY3lPbmx5Ijp7ImVuYWJsZWQiOmZhbHNlfX0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOnRydWV9LCJsaWZlY3ljbGUiOnsicnVsZSI6W3siYWN0aW9uIjp7InR5cGUiOiJTZXRTdG9yYWdlQ2xhc3MiLCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSJ9LCJjb25kaXRpb24iOnsiYWdlIjoxMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOmZhbHNlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk1VTFRJX1JFR0lPTkFMIiwiU1RBTkRBUkQiXSwibnVtTmV3ZXJWZXJzaW9ucyI6M319LHsiYWN0aW9uIjp7InR5cGUiOiJEZWxldGUifSwiY29uZGl0aW9uIjp7ImFnZSI6MzAsImNyZWF0ZWRCZWZvcmUiOiIyMDE3LTAxLTAxIiwiaXNMaXZlIjp0cnVlLCJtYXRjaGVzU3RvcmFnZUNsYXNzIjpbIk5FQVJMSU5FIl0sIm51bU5ld2VyVmVyc2lvbnMiOjEwfX1dfSwibGFiZWxzIjp7ImwxIjoidjEiLCJlbXB0eSI6IiJ9LCJzdG9yYWdlQ2xhc3MiOiJORUFSTElORSIsImV0YWciOiJDQUU9In0=" + } + }, + { + "ID": "4931edd6fbd7e278", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2872" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Etag": [ + "CAE=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:34 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpxOq-rTQ-0CFxAKezuPpbibtEmD_yASKzhW5dOiUdwVORx6a7_e1dmDnNCdamDPwLsBOpLueQD8iDbwlracb2uxGwwZ6yz92QD-t-JSKM-jSNp2xA" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjI2OjM0LjU2M1oiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNjozNC41NjNaIiwibWV0YWdlbmVyYXRpb24iOiIxIiwiYWNsIjpbeyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0FFPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDIyL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMjIvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMi9hY2wvcHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsImJ1Y2tldCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAyMiIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQUU9In1dLCJkZWZhdWx0T2JqZWN0QWNsIjpbeyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJvd25lcnMifSwiZXRhZyI6IkNBRT0ifSx7ImtpbmQiOiJzdG9yYWdlI29iamVjdEFjY2Vzc0NvbnRyb2wiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQUU9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC12aWV3ZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJSRUFERVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6InZpZXdlcnMifSwiZXRhZyI6IkNBRT0ifV0sImlhbUNvbmZpZ3VyYXRpb24iOnsiYnVja2V0UG9saWN5T25seSI6eyJlbmFibGVkIjpmYWxzZX19LCJvd25lciI6eyJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQifSwibG9jYXRpb24iOiJVUyIsInZlcnNpb25pbmciOnsiZW5hYmxlZCI6dHJ1ZX0sImxpZmVjeWNsZSI6eyJydWxlIjpbeyJhY3Rpb24iOnsidHlwZSI6IlNldFN0b3JhZ2VDbGFzcyIsInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjEwLCJjcmVhdGVkQmVmb3JlIjoiMjAxNy0wMS0wMSIsImlzTGl2ZSI6ZmFsc2UsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTVVMVElfUkVHSU9OQUwiLCJTVEFOREFSRCJdLCJudW1OZXdlclZlcnNpb25zIjozfX0seyJhY3Rpb24iOnsidHlwZSI6IkRlbGV0ZSJ9LCJjb25kaXRpb24iOnsiYWdlIjozMCwiY3JlYXRlZEJlZm9yZSI6IjIwMTctMDEtMDEiLCJpc0xpdmUiOnRydWUsIm1hdGNoZXNTdG9yYWdlQ2xhc3MiOlsiTkVBUkxJTkUiXSwibnVtTmV3ZXJWZXJzaW9ucyI6MTB9fV19LCJsYWJlbHMiOnsibDEiOiJ2MSIsImVtcHR5IjoiIn0sInN0b3JhZ2VDbGFzcyI6Ik5FQVJMSU5FIiwiZXRhZyI6IkNBRT0ifQ==" + } + }, + { + "ID": "12c06b5419a1fa81", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0022?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uoc24sLEBwU00YKpSYT4V6iJu3WxME2LgbyRWaYj4kX8s6Gjatzl0AWcCwPRhj_Bq0BOoUhKQzS8L0GJlhHC36eyEWEIs2rk2BcUyo0aE3U8XbuHuM" + ] + }, + "Body": "" + } + }, + { + "ID": "bf24dffbe6cb8609", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "2571" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Etag": [ + "CA0=" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqVkn03LSqmt8RiOPdW3JO4dpsS9vRp7VK8Qc0HwhmS3L9KDOgmyZIqLnvAwuAyvoIz1VsbqeoLwMMtz9pCIX216P4K3OpEs-x_bf_VSwMiFehgZiM" + ] + }, + "Body": "eyJraW5kIjoic3RvcmFnZSNidWNrZXQiLCJpZCI6ImdvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInNlbGZMaW5rIjoiaHR0cHM6Ly93d3cuZ29vZ2xlYXBpcy5jb20vc3RvcmFnZS92MS9iL2dvLWludGVncmF0aW9uLXRlc3QtMjAxOTA1MDItODA2MzM0MDM0MzIwMTMtMDAwMSIsInByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJuYW1lIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxIiwidGltZUNyZWF0ZWQiOiIyMDE5LTA1LTAyVDIyOjIzOjU0LjYxMFoiLCJ1cGRhdGVkIjoiMjAxOS0wNS0wMlQyMjoyNToyNS41MjdaIiwibWV0YWdlbmVyYXRpb24iOiIxMyIsImFjbCI6W3sia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3QtZWRpdG9ycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LWVkaXRvcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6Ik9XTkVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJlZGl0b3JzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNidWNrZXRBY2Nlc3NDb250cm9sIiwiaWQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvcHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0Iiwic2VsZkxpbmsiOiJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9zdG9yYWdlL3YxL2IvZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL2FjbC9wcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LW93bmVycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiT1dORVIiLCJwcm9qZWN0VGVhbSI6eyJwcm9qZWN0TnVtYmVyIjoiNDk2MTY5NjAxNzE0IiwidGVhbSI6Im93bmVycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2UjYnVja2V0QWNjZXNzQ29udHJvbCIsImlkIjoiZ28taW50ZWdyYXRpb24tdGVzdC0yMDE5MDUwMi04MDYzMzQwMzQzMjAxMy0wMDAxL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJzZWxmTGluayI6Imh0dHBzOi8vd3d3Lmdvb2dsZWFwaXMuY29tL3N0b3JhZ2UvdjEvYi9nby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEvYWNsL3Byb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJidWNrZXQiOiJnby1pbnRlZ3JhdGlvbi10ZXN0LTIwMTkwNTAyLTgwNjMzNDAzNDMyMDEzLTAwMDEiLCJlbnRpdHkiOiJwcm9qZWN0LXZpZXdlcnMtNDk2MTY5NjAxNzE0Iiwicm9sZSI6IlJFQURFUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoidmlld2VycyJ9LCJldGFnIjoiQ0EwPSJ9XSwiZGVmYXVsdE9iamVjdEFjbCI6W3sia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtb3duZXJzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoib3duZXJzIn0sImV0YWciOiJDQTA9In0seyJraW5kIjoic3RvcmFnZSNvYmplY3RBY2Nlc3NDb250cm9sIiwiZW50aXR5IjoicHJvamVjdC1lZGl0b3JzLTQ5NjE2OTYwMTcxNCIsInJvbGUiOiJPV05FUiIsInByb2plY3RUZWFtIjp7InByb2plY3ROdW1iZXIiOiI0OTYxNjk2MDE3MTQiLCJ0ZWFtIjoiZWRpdG9ycyJ9LCJldGFnIjoiQ0EwPSJ9LHsia2luZCI6InN0b3JhZ2Ujb2JqZWN0QWNjZXNzQ29udHJvbCIsImVudGl0eSI6InByb2plY3Qtdmlld2Vycy00OTYxNjk2MDE3MTQiLCJyb2xlIjoiUkVBREVSIiwicHJvamVjdFRlYW0iOnsicHJvamVjdE51bWJlciI6IjQ5NjE2OTYwMTcxNCIsInRlYW0iOiJ2aWV3ZXJzIn0sImV0YWciOiJDQTA9In1dLCJpYW1Db25maWd1cmF0aW9uIjp7ImJ1Y2tldFBvbGljeU9ubHkiOnsiZW5hYmxlZCI6ZmFsc2V9fSwib3duZXIiOnsiZW50aXR5IjoicHJvamVjdC1vd25lcnMtNDk2MTY5NjAxNzE0In0sImxvY2F0aW9uIjoiVVMiLCJ2ZXJzaW9uaW5nIjp7ImVuYWJsZWQiOmZhbHNlfSwibGlmZWN5Y2xlIjp7InJ1bGUiOlt7ImFjdGlvbiI6eyJ0eXBlIjoiRGVsZXRlIn0sImNvbmRpdGlvbiI6eyJhZ2UiOjMwfX1dfSwibGFiZWxzIjp7ImwxIjoidjIiLCJuZXciOiJuZXcifSwic3RvcmFnZUNsYXNzIjoiU1RBTkRBUkQiLCJldGFnIjoiQ0EwPSJ9" + } + }, + { + "ID": "2e41eac5e693f3e1", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o?alt=json\u0026delimiter=\u0026pageToken=\u0026prefix=\u0026prettyPrint=false\u0026projection=full\u0026versions=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "64746" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqgGJECa6hRJfdjE3ErHERxS3HPU_SALrzjf-HIDuv4lfxMvOwrkF2pfY2jUJ3b8TcIjWcnaa85Rs4LBucKffiSWfFXcfLZMF0EuiVRj9oH6Kp4tOg" + ] + }, + "Body": "{"kind":"storage#objects","items":[{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1","name":"acl1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864248170","metageneration":"2","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.247Z","updated":"2019-05-02T22:24:25.629Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.247Z","size":"16","md5Hash":"0E9tFNpZj0/WKOJ6fV9paw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?generation=1556835864248170&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COr+pcXx/eECEAI="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl1/1556835864248170/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl1","generation":"1556835864248170","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COr+pcXx/eECEAI="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"FiDmVg==","etag":"COr+pcXx/eECEAI="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2","name":"acl2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835864737946","metageneration":"1","contentType":"application/octet-stream","timeCreated":"2019-05-02T22:24:24.737Z","updated":"2019-05-02T22:24:24.737Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:24.737Z","size":"16","md5Hash":"c9+O/rg24HTFBc+etWjefg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?generation=1556835864737946&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/acl2/1556835864737946/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"acl2","generation":"1556835864737946","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJrxw8Xx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AtNRtA==","etag":"CJrxw8Xx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs","name":"bucketInCopyAttrs","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883663856","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.663Z","updated":"2019-05-02T22:24:43.663Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:43.663Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?generation=1556835883663856&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPCDx87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/bucketInCopyAttrs/1556835883663856/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"bucketInCopyAttrs","generation":"1556835883663856","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPCDx87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CPCDx87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object","name":"checksum-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835855962241","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:15.961Z","updated":"2019-05-02T22:24:15.961Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:15.961Z","size":"10","md5Hash":"/F4DjTilcDIIVEHn/nAQsA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?generation=1556835855962241&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CIGhrMHx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/checksum-object/1556835855962241/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"checksum-object","generation":"1556835855962241","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CIGhrMHx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Vsu0gA==","etag":"CIGhrMHx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1","name":"composed1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835858962957","metageneration":"1","timeCreated":"2019-05-02T22:24:18.962Z","updated":"2019-05-02T22:24:18.962Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:18.962Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?generation=1556835858962957&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CI2048Lx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed1/1556835858962957/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed1","generation":"1556835858962957","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CI2048Lx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CI2048Lx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2","name":"composed2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835859564799","metageneration":"1","contentType":"text/json","timeCreated":"2019-05-02T22:24:19.564Z","updated":"2019-05-02T22:24:19.564Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:19.564Z","size":"48","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?generation=1556835859564799&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/composed2/1556835859564799/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"composed2","generation":"1556835859564799","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CP+RiMPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AbWByQ==","componentCount":3,"etag":"CP+RiMPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content","name":"content","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835874352207","metageneration":"1","contentType":"image/jpeg","timeCreated":"2019-05-02T22:24:34.351Z","updated":"2019-05-02T22:24:34.351Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:34.351Z","size":"54","md5Hash":"N8p8/s9FwdAAnlvr/lEAjQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?generation=1556835874352207&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/content/1556835874352207/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"content","generation":"1556835874352207","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CM/Yjsrx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"GoUbsQ==","etag":"CM/Yjsrx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption","name":"customer-encryption","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835875058680","metageneration":"3","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:35.058Z","updated":"2019-05-02T22:24:36.225Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:35.058Z","size":"11","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?generation=1556835875058680&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPjnucrx/eECEAM="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption/1556835875058680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption","generation":"1556835875058680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPjnucrx/eECEAM="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"etag":"CPjnucrx/eECEAM=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2","name":"customer-encryption-2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835880717506","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:40.717Z","updated":"2019-05-02T22:24:40.717Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:40.717Z","size":"11","md5Hash":"xwWNFa0VdXPmlAwrlcAJcg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?generation=1556835880717506&alt=media","contentLanguage":"en","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CMKZk83x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-2/1556835880717506/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-2","generation":"1556835880717506","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CMKZk83x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"r0NGrg==","etag":"CMKZk83x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3","name":"customer-encryption-3","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835879859440","metageneration":"1","timeCreated":"2019-05-02T22:24:39.859Z","updated":"2019-05-02T22:24:39.859Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:39.859Z","size":"22","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?generation=1556835879859440&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPDp3szx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/customer-encryption-3/1556835879859440/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"customer-encryption-3","generation":"1556835879859440","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPDp3szx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"componentCount":2,"etag":"CPDp3szx/eECEAE=","customerEncryption":{"encryptionAlgorithm":"AES256","keySha256":"H+LmnXhRoeI6TMW5bsV6HyUk6pyGc2IMbqYbAXBcps0="}},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test","name":"gzip-test","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835860251867","metageneration":"1","contentType":"application/x-gzip","timeCreated":"2019-05-02T22:24:20.251Z","updated":"2019-05-02T22:24:20.251Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:20.251Z","size":"27","md5Hash":"OtCw+aRRIRqKGFAEOax+qw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?generation=1556835860251867&alt=media","contentEncoding":"gzip","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNuJssPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/gzip-test/1556835860251867/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"gzip-test","generation":"1556835860251867","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNuJssPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"9DhwBA==","etag":"CNuJssPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1","name":"hashesOnUpload-1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835885051680","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:45.051Z","updated":"2019-05-02T22:24:45.051Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:45.051Z","size":"27","md5Hash":"ofZjGlcXPJiGOAfKFbJl1Q==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?generation=1556835885051680&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CKDem8/x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/hashesOnUpload-1/1556835885051680/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"hashesOnUpload-1","generation":"1556835885051680","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CKDem8/x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"cH+A+w==","etag":"CKDem8/x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes","name":"obj/with/slashes","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845511018","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.510Z","updated":"2019-05-02T22:24:05.510Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.510Z","size":"16","md5Hash":"uez/hJ6wAreDQnccDUdxfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?generation=1556835845511018&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"COqurrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj/with/slashes/1556835845511018/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj/with/slashes","generation":"1556835845511018","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"COqurrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"oeo+FA==","etag":"COqurrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1","name":"obj1","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835844647225","metageneration":"4","timeCreated":"2019-05-02T22:24:04.646Z","updated":"2019-05-02T22:24:17.121Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:04.646Z","size":"16","md5Hash":"Sna/UWv7mcZI23oE5tUabQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?generation=1556835844647225&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/domain-google.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/domain-google.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"domain-google.com","role":"READER","domain":"google.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj1/1556835844647225/allUsers","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1/acl/allUsers","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj1","generation":"1556835844647225","entity":"allUsers","role":"READER","etag":"CLnS+bvx/eECEAQ="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"CT6dTA==","etag":"CLnS+bvx/eECEAQ="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2","name":"obj2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835845049245","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:05.048Z","updated":"2019-05-02T22:24:05.048Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:05.048Z","size":"16","md5Hash":"CC1wlwrMOIq0dvMkMylUhg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?generation=1556835845049245&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/obj2/1556835845049245/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"obj2","generation":"1556835845049245","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ2Xkrzx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"jV5AVQ==","etag":"CJ2Xkrzx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc","name":"posc","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835882760607","metageneration":"1","timeCreated":"2019-05-02T22:24:42.760Z","updated":"2019-05-02T22:24:42.760Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:42.760Z","size":"3","md5Hash":"rL0Y20zC+Fzt72VPzMSk2A==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?generation=1556835882760607&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CJ/zj87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc/1556835882760607/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc","generation":"1556835882760607","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CJ/zj87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"z8SuHQ==","etag":"CJ/zj87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2","name":"posc2","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835883152770","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:43.152Z","updated":"2019-05-02T22:24:43.152Z","storageClass":"MULTI_REGIONAL","timeStorageClassUpdated":"2019-05-02T22:24:43.152Z","size":"3","md5Hash":"9WGq9u8L8U1CCLtGpMyzrQ==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?generation=1556835883152770&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CILrp87x/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/posc2/1556835883152770/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"posc2","generation":"1556835883152770","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CILrp87x/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"17qABQ==","etag":"CILrp87x/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL","name":"signedURL","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835861141206","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:24:21.140Z","updated":"2019-05-02T22:24:21.140Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:21.140Z","size":"29","md5Hash":"Jyxvgwm9n2MsrGTMPbMeYA==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?generation=1556835861141206&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CNat6MPx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/signedURL/1556835861141206/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"signedURL","generation":"1556835861141206","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CNat6MPx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"ZTqALw==","etag":"CNat6MPx/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object","name":"some-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835988273530","metageneration":"1","contentType":"text/plain","timeCreated":"2019-05-02T22:26:28.273Z","updated":"2019-05-02T22:26:28.273Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:26:28.273Z","size":"16","md5Hash":"tHacfSKfByC+/+clG7q+jw==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?generation=1556835988273530&alt=media","cacheControl":"public, max-age=60","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CPryt4Dy/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/some-object/1556835988273530/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"some-object","generation":"1556835988273530","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CPryt4Dy/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"Sm1gKw==","etag":"CPryt4Dy/eECEAE="},{"kind":"storage#object","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object","name":"zero-object","bucket":"go-integration-test-20190502-80633403432013-0001","generation":"1556835856537016","metageneration":"1","contentType":"text/plain; charset=utf-8","timeCreated":"2019-05-02T22:24:16.536Z","updated":"2019-05-02T22:24:16.536Z","storageClass":"STANDARD","timeStorageClassUpdated":"2019-05-02T22:24:16.536Z","size":"0","md5Hash":"1B2M2Y8AsgTpgAmY7PhCfg==","mediaLink":"https://www.googleapis.com/download/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?generation=1556835856537016&alt=media","acl":[{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CLirz8Hx/eECEAE="},{"kind":"storage#objectAccessControl","id":"go-integration-test-20190502-80633403432013-0001/zero-object/1556835856537016/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object/acl/user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","bucket":"go-integration-test-20190502-80633403432013-0001","object":"zero-object","generation":"1556835856537016","entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com","role":"OWNER","email":"another-thing@deklerk-sandbox.iam.gserviceaccount.com","etag":"CLirz8Hx/eECEAE="}],"owner":{"entity":"user-another-thing@deklerk-sandbox.iam.gserviceaccount.com"},"crc32c":"AAAAAA==","etag":"CLirz8Hx/eECEAE="}]}" + } + }, + { + "ID": "744cbe6970c9623b", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpfI-4jHQ7K_XJuo8cMIfOvPLKerfuQA1KlRDBi4Fq11Uc9i--oKNuFL_MbH0CCxK8PeI4r4fmZILF9hdSGAMMpMmyX0w67j7t84C4tRDx2LnbzG4I" + ] + }, + "Body": "" + } + }, + { + "ID": "a2bea52380600211", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/acl2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:35 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrzTcVhZWJuBZITVX_K6haiVlOGb5fEYRdiiH8ODa4XsVSPl4FCDs7zPpKMTfoZA1veIMd4ccdcj8rUqWDMcRfDU5l4cOGvNhGZV-h9S2AVJfl6GdE" + ] + }, + "Body": "" + } + }, + { + "ID": "e187777c49170a6d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/bucketInCopyAttrs?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UouPwITheFghtKlzs2vxoAGnEm_V7OZZnYkk_0pLhV8g5YN6TPRS2n1h0iAtzkuFXYtjovp3ifqt_351LO6-CobCar6VgRnJ-_EQ9WgOWhIC9DzYTs" + ] + }, + "Body": "" + } + }, + { + "ID": "653fa745ed683364", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/checksum-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UoBLZLK-ePmvYxO2ByvMQ1dXb_4-5yhOR-1nYkxjkINZf5fY9ptO2H4RfRtbIrRQ-QQNZHqZGgCOGJakKLkNgVI2_ExobQLkJ-JaJ-ViDWL9GZCEgU" + ] + }, + "Body": "" + } + }, + { + "ID": "91230665f80c46f7", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upasg8dCwbL7dWntUy3O1t_wWVeDNNT3YCz1uVNL8QUNh4z75y0p9OfL57wFX4CUv3NV06Oi9f7a85m42BzgDZl-kqmv4com_INYmRYjKbwrTdFB8A" + ] + }, + "Body": "" + } + }, + { + "ID": "dc68e1f94de4fa2a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/composed2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqXdUtFVSSFIrK72haTC8pKLogl9pB4lDLpARc8oDKdNt1yfbHj4VAQH0M_Doqj1mLY2LVmThFxvG_1mvtVBm-N4x_J-c5Izq46lZh_xx3wOQEYZKQ" + ] + }, + "Body": "" + } + }, + { + "ID": "db5b5c31ef8e503e", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/content?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Ur2nVuDhUpL1-5n7rzvboeHuOBlvh-59eoy59Y5ZIyvUy6FXYj2eCphVTBFkaIPXK7XIS6DJXkPzZABjiFi4hMA7Ewdgp860sYu44qguzhTfOymXYY" + ] + }, + "Body": "" + } + }, + { + "ID": "f44103066254972d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqul73HMDr4p6XCqdfKz99RILwd68VT50DnynneOl5m5E_a7ky2mbMA8mdM6xyAWCQtiJm8dCxvIuKYtjLavA8JUlkGIqqCLGfLbB2EvWahd55Fw_A" + ] + }, + "Body": "" + } + }, + { + "ID": "99aa9c86d4998414", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:36 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uqfk5JfeEeFo80cy1Y-OxXgzSIbt_x7qDWML2yJopIrt9DsPmRq4cDVFqg6sUFStj6aMrjxP7iu_tbwZzS_3BtnwqfPrJZvHllZ7tdeFTXLmto742o" + ] + }, + "Body": "" + } + }, + { + "ID": "24bd829460fb4df6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/customer-encryption-3?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UprF0LO3CObAApP8iwUO6IvC-Yh-5whtr0F_mMzaj0rltEl4C-AZjNuBMGuQUue-v9oBqb-nQU2ykIenSZ3UYnK4XVV7moFIS43VyP7pByYqsHPkMo" + ] + }, + "Body": "" + } + }, + { + "ID": "acdf7531eddfd72c", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/gzip-test?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqAEWwZD_lXrdbrjMg-PYbmLP1-zSzjESAs-osetXJrFTueJ6ufxVfA3xkMG5KjtEz-68YryfKl9gAGsgfseX6H5COwltvyW6pJSdxDdA-dTV_S8eU" + ] + }, + "Body": "" + } + }, + { + "ID": "8facd7ede0dc71a6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/hashesOnUpload-1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrInkaBVPO1ip6mFSpF0Nihd1wYE9HNfIz8HfFsZkiEVG1mX7eKMItsbmNwWMehWZ0qaKAf081imDi7DmKqs6mEy7vyTokMNXVUp5LrFoQ-yDfVRuc" + ] + }, + "Body": "" + } + }, + { + "ID": "6affac5f0a8dea5a", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj%2Fwith%2Fslashes?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqhbUYzZ-vUqGcyfzGFaQrr89Q05cXGCB0EbWA2Dy8dmQxB3uWJ7_FUwi6Ish-VMpOrZv78bT9qAHd5Tr5b3s8bTCX6QR9HlelvxkKdyrUv2G8QQuY" + ] + }, + "Body": "" + } + }, + { + "ID": "8258a6e1b5dc73aa", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj1?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Upola6CIBa9ONjl53PxMpFGQkHvYEau96KG2mgCmUDWDhxZKCw0T58QQRx8OIJW5sisH_acrPcg2o-LLdJgOxCtlVSw-t6wGisVL2-TSopEuezgmu4" + ] + }, + "Body": "" + } + }, + { + "ID": "12186a37ecd09333", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/obj2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UpFxLaOTSsPe7hDbzBznGCoZvAUmeNgWBwS45X2j3rpLlk8_MIFFuCa-aBpAM83d23ixudKpklnhiqCcrQQotCUZhkrMLpWLHBJdmcM0d5rCKL6opY" + ] + }, + "Body": "" + } + }, + { + "ID": "e7a92beca78e4ec6", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:37 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrpITV4ogt1-_eCsp0KFz7tVeM2A4FzSevs8Q0BPZ36nEPrP9aAaB4nfWASzLZgi2hNM1l9eFkf5UbYYWDT5J-nQFCapVNcm_4Nr43yd94Ce95SVps" + ] + }, + "Body": "" + } + }, + { + "ID": "5bb0506f3ca9135d", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/posc2?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqFBt86Wz7Oqelsb0pPpIskBYdMYLR3XuBmD2F5DwyXt84Q1AUyL0Q94YtqTJP1yzgohHCkJWSGibqUARgP8Ilv9v4lVipsdfjSdFHgIjFJ1H-vuU4" + ] + }, + "Body": "" + } + }, + { + "ID": "3ea05ac204d7a112", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/signedURL?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Up2mLLaTTO-0iqVf4m8WXPEsP-mHlaS6hz_gavjGI5yzU0jCjBNDRw1gj6Wa_cQIBzfjytwP2XYhBIJeAglwa6Uqv87WhzeqgGXt4u820FuZKVgY-w" + ] + }, + "Body": "" + } + }, + { + "ID": "bed8580dc40e850e", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/some-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UodR7nshMeihBskPB6loBZoNDRFEUxojhFCKhQ0NpiEMRHY0ZHU8ncI4Sr7FHUmIDXyK8xVg_I7Qt5ESrqnGmRzW0h1eM8OGfEM-3weHyzMqN-VMik" + ] + }, + "Body": "" + } + }, + { + "ID": "57489f1d9de0ddb8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001/o/zero-object?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UqxQ_NSeD-rrAbZXERIaDr3_1wDC_RZ4MyuFZnOhdDqLkn4qub7rAIamqSQKDD8-jbn0c3XmxvowTghxhq4tZaxxHTmYJtB_FPvJxRZtCt3Leugx4g" + ] + }, + "Body": "" + } + }, + { + "ID": "006b3176688c0ac8", + "Request": { + "Method": "DELETE", + "URL": "https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0001?alt=json\u0026prettyPrint=false", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 204, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "no-cache, no-store, max-age=0, must-revalidate" + ], + "Content-Length": [ + "0" + ], + "Content-Type": [ + "application/json" + ], + "Date": [ + "Thu, 02 May 2019 22:26:38 GMT" + ], + "Expires": [ + "Mon, 01 Jan 1990 00:00:00 GMT" + ], + "Pragma": [ + "no-cache" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2Uo4l8LfaJI78MH37sa7nOmY2A8EB4paZccrewnIFFud-pwGLeVf067ZiRaBAOAPsPYlaxxtsr6gtECA6UY0xiUBHPTLFOe4VNnNQpniNWYJEfZ1-I8" + ] + }, + "Body": "" + } + }, + { + "ID": "a853ce25e734dbfd", + "Request": { + "Method": "GET", + "URL": "https://www.googleapis.com/storage/v1/b?alt=json\u0026pageToken=\u0026prefix=go-integration-test\u0026prettyPrint=false\u0026project=deklerk-sandbox\u0026projection=full", + "Header": { + "Accept-Encoding": [ + "gzip" + ], + "User-Agent": [ + "google-api-go-client/0.5" + ] + }, + "MediaType": "", + "BodyParts": [ + "" + ] + }, + "Response": { + "StatusCode": 200, + "Proto": "HTTP/1.1", + "ProtoMajor": 1, + "ProtoMinor": 1, + "Header": { + "Alt-Svc": [ + "quic=\":443\"; ma=2592000; v=\"46,44,43,39\"" + ], + "Cache-Control": [ + "private, max-age=0, must-revalidate, no-transform" + ], + "Content-Length": [ + "47300" + ], + "Content-Type": [ + "application/json; charset=UTF-8" + ], + "Date": [ + "Thu, 02 May 2019 22:26:39 GMT" + ], + "Expires": [ + "Thu, 02 May 2019 22:26:39 GMT" + ], + "Server": [ + "UploadServer" + ], + "Vary": [ + "Origin", + "X-Origin" + ], + "X-Guploader-Customer": [ + "apiary_cloudstorage_metadata" + ], + "X-Guploader-Request-Result": [ + "success" + ], + "X-Guploader-Upload-Result": [ + "success" + ], + "X-Guploader-Uploadid": [ + "AEnB2UrnXhEY-kWgvl-XqP1kXnVDpQphHD9znE-EyBBlFT-kHZtXEsAW5QG5PuzlaJPfQUwZqnTHP0wxu6YDbRCgDMB31UR760_lfEiX88PtVKIdTgCWm5A" + ] + }, + "Body": "{"kind":"storage#buckets","items":[{"kind":"storage#bucket","id":"go-integration-test-20190502-66597705133146-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66597705133146-0001","timeCreated":"2019-05-02T18:29:58.601Z","updated":"2019-05-02T18:29:58.601Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66597705133146-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66597705133146-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66597705133146-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66611506907602-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66611506907602-0001","timeCreated":"2019-05-02T18:30:12.264Z","updated":"2019-05-02T18:30:12.264Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66611506907602-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66611506907602-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66611506907602-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-66633965420818-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-66633965420818-0001","timeCreated":"2019-05-02T18:30:34.662Z","updated":"2019-05-02T18:30:34.662Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-66633965420818-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-66633965420818-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-66633965420818-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-78294113514519-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-78294113514519-0001","timeCreated":"2019-05-02T21:44:54.960Z","updated":"2019-05-02T21:44:54.960Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-78294113514519-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-78294113514519-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-78294113514519-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0017","timeCreated":"2019-05-02T22:05:34.375Z","updated":"2019-05-02T22:05:36.341Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:34.375Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79385133382825-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79385133382825-0018","timeCreated":"2019-05-02T22:05:37.733Z","updated":"2019-05-02T22:05:37.733Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79385133382825-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79385133382825-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79385133382825-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:05:37.733Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79911595924903-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79911595924903-0001","timeCreated":"2019-05-02T22:11:52.358Z","updated":"2019-05-02T22:11:52.358Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79911595924903-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79911595924903-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79911595924903-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0001","timeCreated":"2019-05-02T22:12:09.706Z","updated":"2019-05-02T22:13:41.634Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"l1":"v2","new":"new"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0017","timeCreated":"2019-05-02T22:14:17.486Z","updated":"2019-05-02T22:14:18.905Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:17.486Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-79928727982393-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-79928727982393-0018","timeCreated":"2019-05-02T22:14:20.105Z","updated":"2019-05-02T22:14:20.105Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-79928727982393-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-79928727982393-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-79928727982393-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:14:20.105Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0001","timeCreated":"2019-05-02T22:18:47.398Z","updated":"2019-05-02T22:18:47.398Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80326589403446-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80326589403446-0002","timeCreated":"2019-05-02T22:18:48.114Z","updated":"2019-05-02T22:18:48.114Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80326589403446-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80326589403446-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80326589403446-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0001","timeCreated":"2019-05-02T22:18:54.701Z","updated":"2019-05-02T22:18:54.701Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80333955303539-0002","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002","projectNumber":"496169601714","name":"go-integration-test-20190502-80333955303539-0002","timeCreated":"2019-05-02T22:18:55.293Z","updated":"2019-05-02T22:18:55.293Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80333955303539-0002/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80333955303539-0002/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80333955303539-0002","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0001","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0001","timeCreated":"2019-05-02T22:20:46.897Z","updated":"2019-05-02T22:22:07.429Z","metageneration":"13","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0001/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0001/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0001","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CA0="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CA0="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","versioning":{"enabled":false},"lifecycle":{"rule":[{"action":{"type":"Delete"},"condition":{"age":30}}]},"labels":{"new":"new","l1":"v2"},"storageClass":"STANDARD","etag":"CA0="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0017","timeCreated":"2019-05-02T22:22:50.428Z","updated":"2019-05-02T22:22:51.935Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:50.428Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80445830430808-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80445830430808-0018","timeCreated":"2019-05-02T22:22:52.962Z","updated":"2019-05-02T22:22:52.962Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80445830430808-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80445830430808-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80445830430808-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:22:52.962Z"},"storageClass":"STANDARD","etag":"CAE="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0017","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0017","timeCreated":"2019-05-02T22:26:01.908Z","updated":"2019-05-02T22:26:03.544Z","metageneration":"2","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0017/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0017/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0017","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAI="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAI="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:01.908Z","isLocked":true},"storageClass":"STANDARD","etag":"CAI="},{"kind":"storage#bucket","id":"go-integration-test-20190502-80633403432013-0018","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018","projectNumber":"496169601714","name":"go-integration-test-20190502-80633403432013-0018","timeCreated":"2019-05-02T22:26:15.097Z","updated":"2019-05-02T22:26:15.097Z","metageneration":"1","acl":[{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-owners-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-owners-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-editors-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-editors-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#bucketAccessControl","id":"go-integration-test-20190502-80633403432013-0018/project-viewers-496169601714","selfLink":"https://www.googleapis.com/storage/v1/b/go-integration-test-20190502-80633403432013-0018/acl/project-viewers-496169601714","bucket":"go-integration-test-20190502-80633403432013-0018","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"defaultObjectAcl":[{"kind":"storage#objectAccessControl","entity":"project-owners-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"owners"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-editors-496169601714","role":"OWNER","projectTeam":{"projectNumber":"496169601714","team":"editors"},"etag":"CAE="},{"kind":"storage#objectAccessControl","entity":"project-viewers-496169601714","role":"READER","projectTeam":{"projectNumber":"496169601714","team":"viewers"},"etag":"CAE="}],"iamConfiguration":{"bucketPolicyOnly":{"enabled":false}},"owner":{"entity":"project-owners-496169601714"},"location":"US","retentionPolicy":{"retentionPeriod":"90000","effectiveTime":"2019-05-02T22:26:15.097Z"},"storageClass":"STANDARD","etag":"CAE="}]}" + } + } + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 00000000..3a58c404 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,261 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + "unicode/utf8" + + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + // SendCRC specifies whether to transmit a CRC32C field. It should be set + // to true in addition to setting the Writer's CRC32C field, because zero + // is a valid CRC and normally a zero would not be transmitted. + // If a CRC32C is sent, and the data written does not match the checksum, + // the write will be rejected. + SendCRC32C bool + + // ChunkSize controls the maximum number of bytes of the object that the + // Writer will attempt to send to the server in a single request. Objects + // smaller than the size will be sent in a single request, while larger + // objects will be split over multiple requests. The size will be rounded up + // to the nearest multiple of 256K. If zero, chunking will be disabled and + // the object will be uploaded in a single request. + // + // ChunkSize will default to a reasonable value. If you perform many concurrent + // writes of small objects, you may wish set ChunkSize to a value that matches + // your objects' sizes to avoid consuming large amounts of memory. + // + // ChunkSize must be set before the first Write call. + ChunkSize int + + // ProgressFunc can be used to monitor the progress of a large write. + // operation. If ProgressFunc is not nil and writing requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload), + // then ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far. + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(int64) + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + obj *ObjectAttrs + + mu sync.Mutex + err error +} + +func (w *Writer) open() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { + return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") + } + pr, pw := io.Pipe() + w.pw = pw + w.opened = true + + go w.monitorCancel() + + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must be non-negative") + } + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(w.ChunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + + go func() { + defer close(w.donec) + + rawObj := attrs.toRawObject(w.o.bucket) + if w.SendCRC32C { + rawObj.Crc32c = encodeUint32(attrs.CRC32C) + } + if w.MD5 != nil { + rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5) + } + call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj). + Media(pr, mediaOpts...). + Projection("full"). + Context(w.ctx) + if w.ProgressFunc != nil { + call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) + } + if attrs.KMSKeyName != "" { + call.KmsKeyName(attrs.KMSKeyName) + } + if attrs.PredefinedACL != "" { + call.PredefinedAcl(attrs.PredefinedACL) + } + if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { + w.mu.Lock() + w.err = err + w.mu.Unlock() + pr.CloseWithError(err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", w.o.gen, w.o.conds, call) + if err == nil { + if w.o.userProject != "" { + call.UserProject(w.o.userProject) + } + setClientHeader(call.Header()) + // If the chunk size is zero, then no chunking is done on the Reader, + // which means we cannot retry: the first call will read the data, and if + // it fails, there is no way to re-read. + if w.ChunkSize == 0 { + resp, err = call.Do() + } else { + // We will only retry here if the initial POST, which obtains a URI for + // the resumable upload, fails with a retryable error. The upload itself + // has its own retry logic. + err = runWithRetry(w.ctx, func() error { + var err2 error + resp, err2 = call.Do() + return err2 + }) + } + } + if err != nil { + w.mu.Lock() + w.err = err + w.mu.Unlock() + pr.CloseWithError(err) + return + } + w.obj = newObject(resp) + }() + return nil +} + +// Write appends to w. It implements the io.Writer interface. +// +// Since writes happen asynchronously, Write may return a nil +// error even though the write failed (or will fail). Always +// use the error returned from Writer.Close to determine if +// the upload was successful. +func (w *Writer) Write(p []byte) (n int, err error) { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + if werr != nil { + return 0, werr + } + if !w.opened { + if err := w.open(); err != nil { + return 0, err + } + } + n, err = w.pw.Write(p) + if err != nil { + w.mu.Lock() + werr := w.err + w.mu.Unlock() + // Preserve existing functionality that when context is canceled, Write will return + // context.Canceled instead of "io: read/write on closed pipe". This hides the + // pipe implementation detail from users and makes Write seem as though it's an RPC. + if werr == context.Canceled || werr == context.DeadlineExceeded { + return n, werr + } + } + return n, err +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *Writer) Close() error { + if !w.opened { + if err := w.open(); err != nil { + return err + } + } + + // Closing either the read or write causes the entire pipe to close. + if err := w.pw.Close(); err != nil { + return err + } + + <-w.donec + w.mu.Lock() + defer w.mu.Unlock() + return w.err +} + +// monitorCancel is intended to be used as a background goroutine. It monitors the +// the context, and when it observes that the context has been canceled, it manually +// closes things that do not take a context. +func (w *Writer) monitorCancel() { + select { + case <-w.ctx.Done(): + w.mu.Lock() + werr := w.ctx.Err() + w.err = werr + w.mu.Unlock() + + // Closing either the read or write causes the entire pipe to close. + w.CloseWithError(werr) + case <-w.donec: + } +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +// +// Deprecated: cancel the context passed to NewWriter instead. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Attrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md new file mode 100644 index 00000000..0786fdf4 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md @@ -0,0 +1,24 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md new file mode 100644 index 00000000..3b9e908f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md @@ -0,0 +1,61 @@ +# OpenCensus Agent Go Exporter + +[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url] + + +This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter. +OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from +OpenCensus Library, export them to other backends and possibly push configurations back to +Library. See more details on [OC-Agent Readme][OCAgentReadme]. + +Note: This is an experimental repository and is likely to get backwards-incompatible changes. +Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo]. + +## Installation + +```bash +$ go get -u contrib.go.opencensus.io/exporter/ocagent +``` + +## Usage + +```go +import ( + "context" + "fmt" + "log" + "time" + + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/trace" +) + +func Example() { + exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name")) + if err != nil { + log.Fatalf("Failed to create the agent exporter: %v", err) + } + defer exp.Stop() + + // Now register it as a trace exporter. + trace.RegisterExporter(exp) + + // Then use the OpenCensus tracing library, like we normally would. + ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example") + defer span.End() + + for i := 0; i < 10; i++ { + _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i)) + <-time.After(6 * time.Millisecond) + iSpan.End() + } +} +``` + +[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto +[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go +[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg +[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent +[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master +[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent + diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go new file mode 100644 index 00000000..297e44b6 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math/rand" + "time" +) + +var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) + +// retries function fn upto n times, if fn returns an error lest it returns nil early. +// It applies exponential backoff in units of (1< 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + traceExporter, err := traceSvcClient.Export(ctx) + if err != nil { + return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) + } + + firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := traceExporter.Send(firstTraceMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + ae.mu.Lock() + ae.traceExporter = traceExporter + ae.mu.Unlock() + + // Initiate the config service by sending over node identifier info. + configStream, err := traceSvcClient.Config(context.Background()) + if err != nil { + return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) + } + firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node} + if err := configStream.Send(firstCfgMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + // In the background, handle trace configurations that are beamed down + // by the agent, but also reply to it with the applied configuration. + go ae.handleConfigStreaming(configStream) + + return nil +} + +func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { + metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc) + metricsExporter, err := metricsSvcClient.Export(context.Background()) + if err != nil { + return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err) + } + // Initiate the metrics service by sending over the first message just containing the Node and Resource. + firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := metricsExporter.Send(firstMetricsMessage); err != nil { + return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err) + } + + ae.mu.Lock() + ae.metricsExporter = metricsExporter + ae.mu.Unlock() + + // With that we are good to go and can start sending metrics + return nil +} + +func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { + addr := ae.prepareAgentAddress() + var dialOpts []grpc.DialOption + if ae.clientTransportCredentials != nil { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials)) + } else if ae.canDialInsecure { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } + if ae.compressor != "" { + dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor))) + } + dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) + if len(ae.grpcDialOptions) != 0 { + dialOpts = append(dialOpts, ae.grpcDialOptions...) + } + + ctx := context.Background() + if len(ae.headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + return grpc.DialContext(ctx, addr, dialOpts...) +} + +func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { + // Note: We haven't yet implemented configuration sending so we + // should NOT be changing connection states within this function for now. + for { + recv, err := configStream.Recv() + if err != nil { + // TODO: Check if this is a transient error or exponential backoff-able. + return err + } + cfg := recv.Config + if cfg == nil { + continue + } + + // Otherwise now apply the trace configuration sent down from the agent + if psamp := cfg.GetProbabilitySampler(); psamp != nil { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) + } else if csamp := cfg.GetConstantSampler(); csamp != nil { + alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON + if alwaysSample { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + } else { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) + } + } else { // TODO: Add the rate limiting sampler here + } + + // Then finally send back to upstream the newly applied configuration + err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) + if err != nil { + return err + } + } +} + +// Stop shuts down all the connections and resources +// related to the exporter. +func (ae *Exporter) Stop() error { + ae.mu.RLock() + cc := ae.grpcClientConn + started := ae.started + stopped := ae.stopped + ae.mu.RUnlock() + + if !started { + return errNotStarted + } + if stopped { + // TODO: tell the user that we've already stopped, so perhaps a sentinel error? + return nil + } + + ae.Flush() + + // Now close the underlying gRPC connection. + var err error + if cc != nil { + err = cc.Close() + } + + // At this point we can change the state variables: started and stopped + ae.mu.Lock() + ae.started = false + ae.stopped = true + ae.mu.Unlock() + close(ae.stopCh) + + // Ensure that the backgroundConnector returns + <-ae.backgroundConnectionDoneCh + + return err +} + +func (ae *Exporter) ExportSpan(sd *trace.SpanData) { + if sd == nil { + return + } + _ = ae.traceBundler.Add(sd, 1) +} + +func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error { + if batch == nil || len(batch.Spans) == 0 { + return nil + } + + select { + case <-ae.stopCh: + return errStopped + + default: + if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil { + return fmt.Errorf("ExportTraceServiceRequest: no active connection, last connection error: %v", lastConnectErr) + } + + ae.senderMu.Lock() + err := ae.traceExporter.Send(batch) + ae.senderMu.Unlock() + if err != nil { + if err == io.EOF { + ae.recvMu.Lock() + // Perform a .Recv to try to find out why the RPC actually ended. + // See: + // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100 + // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ + for { + _, err = ae.traceExporter.Recv() + if err != nil { + break + } + } + ae.recvMu.Unlock() + } + + ae.setStateDisconnected(err) + if err != io.EOF { + return err + } + } + return nil + } +} + +func (ae *Exporter) ExportView(vd *view.Data) { + if vd == nil { + return + } + _ = ae.viewDataBundler.Add(vd, 1) +} + +func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span { + if len(sdl) == 0 { + return nil + } + protoSpans := make([]*tracepb.Span, 0, len(sdl)) + for _, sd := range sdl { + if sd != nil { + protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) + } + } + return protoSpans +} + +func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { + select { + case <-ae.stopCh: + return + + default: + if !ae.connected() { + return + } + + protoSpans := ocSpanDataToPbSpans(sdl) + if len(protoSpans) == 0 { + return + } + ae.senderMu.Lock() + err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ + Spans: protoSpans, + }) + ae.senderMu.Unlock() + if err != nil { + ae.setStateDisconnected(err) + } + } +} + +func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric { + if len(vdl) == 0 { + return nil + } + metrics := make([]*metricspb.Metric, 0, len(vdl)) + for _, vd := range vdl { + if vd != nil { + vmetric, err := viewDataToMetric(vd) + // TODO: (@odeke-em) somehow report this error, if it is non-nil. + if err == nil && vmetric != nil { + metrics = append(metrics, vmetric) + } + } + } + return metrics +} + +func (ae *Exporter) uploadViewData(vdl []*view.Data) { + select { + case <-ae.stopCh: + return + + default: + if !ae.connected() { + return + } + + protoMetrics := ocViewDataToPbMetrics(vdl) + if len(protoMetrics) == 0 { + return + } + err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{ + Metrics: protoMetrics, + // TODO:(@odeke-em) + // a) Figure out how to derive a Node from the environment + // b) Figure out how to derive a Resource from the environment + // or better letting users of the exporter configure it. + }) + if err != nil { + ae.setStateDisconnected(err) + } + } +} + +func (ae *Exporter) Flush() { + ae.traceBundler.Flush() + ae.viewDataBundler.Flush() +} + +func resourceProtoFromEnv() *resourcepb.Resource { + rs, _ := resource.FromEnv(context.Background()) + if rs == nil { + return nil + } + return resourceToResourcePb(rs) +} + +func resourceToResourcePb(rs *resource.Resource) *resourcepb.Resource { + rprs := &resourcepb.Resource{ + Type: rs.Type, + } + if rs.Labels != nil { + rprs.Labels = make(map[string]string) + for k, v := range rs.Labels { + rprs.Labels[k] = v + } + } + return rprs +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go new file mode 100644 index 00000000..6820216f --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go @@ -0,0 +1,161 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "time" + + "go.opencensus.io/resource" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +const ( + DefaultAgentPort uint16 = 55678 + DefaultAgentHost string = "localhost" +) + +type ExporterOption interface { + withExporter(e *Exporter) +} + +type resourceDetector resource.Detector + +var _ ExporterOption = (*resourceDetector)(nil) + +func (rd resourceDetector) withExporter(e *Exporter) { + e.resourceDetector = resource.Detector(rd) +} + +// WithResourceDetector allows one to register a resource detector. Resource Detector is used +// to detect resources associated with the application. Detected resource is exported +// along with the metrics. If the detector fails then it panics. +// If a resource detector is not provided then by default it detects from the environment. +func WithResourceDetector(rd resource.Detector) ExporterOption { + return resourceDetector(rd) +} + +type insecureGrpcConnection int + +var _ ExporterOption = (*insecureGrpcConnection)(nil) + +func (igc *insecureGrpcConnection) withExporter(e *Exporter) { + e.canDialInsecure = true +} + +// WithInsecure disables client transport security for the exporter's gRPC connection +// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure +// does. Note, by default, client security is required unless WithInsecure is used. +func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } + +type addressSetter string + +func (as addressSetter) withExporter(e *Exporter) { + e.agentAddress = string(as) +} + +var _ ExporterOption = (*addressSetter)(nil) + +// WithAddress allows one to set the address that the exporter will +// connect to the agent on. If unset, it will instead try to use +// connect to DefaultAgentHost:DefaultAgentPort +func WithAddress(addr string) ExporterOption { + return addressSetter(addr) +} + +type serviceNameSetter string + +func (sns serviceNameSetter) withExporter(e *Exporter) { + e.serviceName = string(sns) +} + +var _ ExporterOption = (*serviceNameSetter)(nil) + +// WithServiceName allows one to set/override the service name +// that the exporter will report to the agent. +func WithServiceName(serviceName string) ExporterOption { + return serviceNameSetter(serviceName) +} + +type reconnectionPeriod time.Duration + +func (rp reconnectionPeriod) withExporter(e *Exporter) { + e.reconnectionPeriod = time.Duration(rp) +} + +func WithReconnectionPeriod(rp time.Duration) ExporterOption { + return reconnectionPeriod(rp) +} + +type compressorSetter string + +func (c compressorSetter) withExporter(e *Exporter) { + e.compressor = string(c) +} + +// UseCompressor will set the compressor for the gRPC client to use when sending requests. +// It is the responsibility of the caller to ensure that the compressor set has been registered +// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some +// compressors auto-register on import, such as gzip, which can be registered by calling +// `import _ "google.golang.org/grpc/encoding/gzip"` +func UseCompressor(compressorName string) ExporterOption { + return compressorSetter(compressorName) +} + +type headerSetter map[string]string + +func (h headerSetter) withExporter(e *Exporter) { + e.headers = map[string]string(h) +} + +// WithHeaders will send the provided headers when the gRPC stream connection +// is instantiated +func WithHeaders(headers map[string]string) ExporterOption { + return headerSetter(headers) +} + +type clientCredentials struct { + credentials.TransportCredentials +} + +var _ ExporterOption = (*clientCredentials)(nil) + +// WithTLSCredentials allows the connection to use TLS credentials +// when talking to the server. It takes in grpc.TransportCredentials instead +// of say a Certificate file or a tls.Certificate, because the retrieving +// these credentials can be done in many ways e.g. plain file, in code tls.Config +// or by certificate rotation, so it is up to the caller to decide what to use. +func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption { + return &clientCredentials{TransportCredentials: creds} +} + +func (cc *clientCredentials) withExporter(e *Exporter) { + e.clientTransportCredentials = cc.TransportCredentials +} + +type grpcDialOptions []grpc.DialOption + +var _ ExporterOption = (*grpcDialOptions)(nil) + +// WithGRPCDialOption opens support to any grpc.DialOption to be used. If it conflicts +// with some other configuration the GRPC specified via the agent the ones here will +// take preference since they are set last. +func WithGRPCDialOption(opts ...grpc.DialOption) ExporterOption { + return grpcDialOptions(opts) +} + +func (opts grpcDialOptions) withExporter(e *Exporter) { + e.grpcDialOptions = opts +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go new file mode 100644 index 00000000..983ebe7b --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go @@ -0,0 +1,248 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math" + "time" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 +) + +func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { + if sd == nil { + return nil + } + var namePtr *tracepb.TruncatableString + if sd.Name != "" { + namePtr = &tracepb.TruncatableString{Value: sd.Name} + } + return &tracepb.Span{ + TraceId: sd.TraceID[:], + SpanId: sd.SpanID[:], + ParentSpanId: sd.ParentSpanID[:], + Status: ocStatusToProtoStatus(sd.Status), + StartTime: timeToTimestamp(sd.StartTime), + EndTime: timeToTimestamp(sd.EndTime), + Links: ocLinksToProtoLinks(sd.Links), + Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), + Name: namePtr, + Attributes: ocAttributesToProtoAttributes(sd.Attributes), + TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents), + Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), + } +} + +var blankStatus trace.Status + +func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { + if status == blankStatus { + return nil + } + return &tracepb.Status{ + Code: status.Code, + Message: status.Message, + } +} + +func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, ocLink := range links { + // This redefinition is necessary to prevent ocLink.*ID[:] copies + // being reused -- in short we need a new ocLink per iteration. + ocLink := ocLink + + sl = append(sl, &tracepb.Span_Link{ + TraceId: ocLink.TraceID[:], + SpanId: ocLink.SpanID[:], + Type: ocLinkTypeToProtoLinkType(ocLink.Type), + }) + } + + return &tracepb.Span_Links{ + Link: sl, + } +} + +func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { + switch oct { + case trace.LinkTypeChild: + return tracepb.Span_Link_CHILD_LINKED_SPAN + case trace.LinkTypeParent: + return tracepb.Span_Link_PARENT_LINKED_SPAN + default: + return tracepb.Span_Link_TYPE_UNSPECIFIED + } +} + +func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { + if len(attrs) == 0 { + return nil + } + outMap := make(map[string]*tracepb.AttributeValue) + for k, v := range attrs { + switch v := v.(type) { + case bool: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} + + case int: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} + + case int64: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} + + case string: + outMap[k] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: v}, + }, + } + } + } + return &tracepb.Span_Attributes{ + AttributeMap: outMap, + } +} + +// This code is mostly copied from +// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46 +func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents { + if len(as) == 0 && len(es) == 0 { + return nil + } + + timeEvents := &tracepb.Span_TimeEvents{} + var annotations, droppedAnnotationsCount int + var messageEvents, droppedMessageEventsCount int + + // Transform annotations + for i, a := range as { + if annotations >= maxAnnotationEventsPerSpan { + droppedAnnotationsCount = len(as) - i + break + } + annotations++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(a.Time), + Value: transformAnnotationToTimeEvent(&a), + }, + ) + } + + // Transform message events + for i, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(e.Time), + Value: transformMessageEventToTimeEvent(&e), + }, + ) + } + + // Process dropped counter + timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + + return timeEvents +} + +func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ { + return &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: &tracepb.TruncatableString{Value: a.Message}, + Attributes: ocAttributesToProtoAttributes(a.Attributes), + }, + } +} + +func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ { + return &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: uint64(e.MessageID), + UncompressedSize: uint64(e.UncompressedByteSize), + CompressedSize: uint64(e.CompressedByteSize), + }, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} + +func timeToTimestamp(t time.Time) *timestamp.Timestamp { + nanoTime := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: nanoTime / 1e9, + Nanos: int32(nanoTime % 1e9), + } +} + +func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindClient: + return tracepb.Span_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SERVER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} + +func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { + if ts == nil { + return nil + } + return &tracepb.Span_Tracestate{ + Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), + } +} + +func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { + protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) + for _, entry := range entries { + protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ + Key: entry.Key, + Value: entry.Value, + }) + } + return protoEntries +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go new file mode 100644 index 00000000..43f18dec --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go @@ -0,0 +1,274 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "errors" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/golang/protobuf/ptypes/timestamp" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" +) + +var ( + errNilMeasure = errors.New("expecting a non-nil stats.Measure") + errNilView = errors.New("expecting a non-nil view.View") + errNilViewData = errors.New("expecting a non-nil view.Data") +) + +func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) { + if vd == nil { + return nil, errNilViewData + } + + descriptor, err := viewToMetricDescriptor(vd.View) + if err != nil { + return nil, err + } + + timeseries, err := viewDataToTimeseries(vd) + if err != nil { + return nil, err + } + + metric := &metricspb.Metric{ + MetricDescriptor: descriptor, + Timeseries: timeseries, + } + return metric, nil +} + +func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) { + if v == nil { + return nil, errNilView + } + if v.Measure == nil { + return nil, errNilMeasure + } + + desc := &metricspb.MetricDescriptor{ + Name: stringOrCall(v.Name, v.Measure.Name), + Description: stringOrCall(v.Description, v.Measure.Description), + Unit: v.Measure.Unit(), + Type: aggregationToMetricDescriptorType(v), + LabelKeys: tagKeysToLabelKeys(v.TagKeys), + } + return desc, nil +} + +func stringOrCall(first string, call func() string) string { + if first != "" { + return first + } + return call() +} + +type measureType uint + +const ( + measureUnknown measureType = iota + measureInt64 + measureFloat64 +) + +func measureTypeFromMeasure(m stats.Measure) measureType { + switch m.(type) { + default: + return measureUnknown + case *stats.Float64Measure: + return measureFloat64 + case *stats.Int64Measure: + return measureInt64 + } +} + +func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type { + if v == nil || v.Aggregation == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + if v.Measure == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + + switch v.Aggregation.Type { + case view.AggTypeCount: + // Cumulative on int64 + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + + case view.AggTypeDistribution: + // Cumulative types + return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION + + case view.AggTypeLastValue: + // Gauge types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_GAUGE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_GAUGE_INT64 + } + + case view.AggTypeSum: + // Cumulative types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + } + } + + // For all other cases, return unspecified. + return metricspb.MetricDescriptor_UNSPECIFIED +} + +func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey { + labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys)) + for _, tagKey := range tagKeys { + labelKeys = append(labelKeys, &metricspb.LabelKey{ + Key: tagKey.Name(), + }) + } + return labelKeys +} + +func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) { + if vd == nil || len(vd.Rows) == 0 { + return nil, nil + } + + // Given that view.Data only contains Start, End + // the timestamps for all the row data will be the exact same + // per aggregation. However, the values will differ. + // Each row has its own tags. + startTimestamp := timeToProtoTimestamp(vd.Start) + endTimestamp := timeToProtoTimestamp(vd.End) + + mType := measureTypeFromMeasure(vd.View.Measure) + timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows)) + // It is imperative that the ordering of "LabelValues" matches those + // of the Label keys in the metric descriptor. + for _, row := range vd.Rows { + labelValues := labelValuesFromTags(row.Tags) + point := rowToPoint(vd.View, row, endTimestamp, mType) + timeseries = append(timeseries, &metricspb.TimeSeries{ + StartTimestamp: startTimestamp, + LabelValues: labelValues, + Points: []*metricspb.Point{point}, + }) + } + + if len(timeseries) == 0 { + return nil, nil + } + + return timeseries, nil +} + +func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp { + unixNano := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: int64(unixNano / 1e9), + Nanos: int32(unixNano % 1e9), + } +} + +func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point { + pt := &metricspb.Point{ + Timestamp: endTimestamp, + } + + switch data := row.Data.(type) { + case *view.CountData: + pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value} + + case *view.DistributionData: + pt.Value = &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + Count: data.Count, + Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count + // TODO: Add Exemplar + Buckets: bucketsToProtoBuckets(data.CountPerBucket), + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: v.Aggregation.Buckets, + }, + }, + }, + SumOfSquaredDeviation: data.SumOfSquaredDev, + }} + + case *view.LastValueData: + setPointValue(pt, data.Value, mType) + + case *view.SumData: + setPointValue(pt, data.Value, mType) + } + + return pt +} + +// Not returning anything from this function because metricspb.Point.is_Value is an unexported +// interface hence we just have to set its value by pointer. +func setPointValue(pt *metricspb.Point, value float64, mType measureType) { + if mType == measureInt64 { + pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)} + } else { + pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value} + } +} + +func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket { + distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket)) + for i := 0; i < len(countPerBucket); i++ { + count := countPerBucket[i] + + distBuckets[i] = &metricspb.DistributionValue_Bucket{ + Count: count, + } + } + + return distBuckets +} + +func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue { + if len(tags) == 0 { + return nil + } + + labelValues := make([]*metricspb.LabelValue, 0, len(tags)) + for _, tag_ := range tags { + labelValues = append(labelValues, &metricspb.LabelValue{ + Value: tag_.Value, + + // It is imperative that we set the "HasValue" attribute, + // in order to distinguish missing a label from the empty string. + // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue + // + // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments, + // so the best case that we can use to distinguish missing labels/tags from the + // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have + // a value. + HasValue: tag_.Key.Name() != "", + }) + } + return labelValues +} diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go new file mode 100644 index 00000000..68be4c75 --- /dev/null +++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go @@ -0,0 +1,17 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +const Version = "0.0.1" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 00000000..af39a91e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 00000000..2d1d7260 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources/models.go new file mode 100644 index 00000000..c791818c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources/models.go @@ -0,0 +1,203 @@ +// +build go1.9 + +// Copyright 2019 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This code was auto-generated by: +// github.com/Azure/azure-sdk-for-go/tools/profileBuilder + +package resources + +import ( + "context" + + original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources" +) + +const ( + DefaultBaseURI = original.DefaultBaseURI +) + +type DeploymentMode = original.DeploymentMode + +const ( + Complete DeploymentMode = original.Complete + Incremental DeploymentMode = original.Incremental +) + +type ResourceIdentityType = original.ResourceIdentityType + +const ( + SystemAssigned ResourceIdentityType = original.SystemAssigned +) + +type AliasPathType = original.AliasPathType +type AliasType = original.AliasType +type BaseClient = original.BaseClient +type BasicDependency = original.BasicDependency +type Client = original.Client +type DebugSetting = original.DebugSetting +type Dependency = original.Dependency +type Deployment = original.Deployment +type DeploymentExportResult = original.DeploymentExportResult +type DeploymentExtended = original.DeploymentExtended +type DeploymentExtendedFilter = original.DeploymentExtendedFilter +type DeploymentListResult = original.DeploymentListResult +type DeploymentListResultIterator = original.DeploymentListResultIterator +type DeploymentListResultPage = original.DeploymentListResultPage +type DeploymentOperation = original.DeploymentOperation +type DeploymentOperationProperties = original.DeploymentOperationProperties +type DeploymentOperationsClient = original.DeploymentOperationsClient +type DeploymentOperationsListResult = original.DeploymentOperationsListResult +type DeploymentOperationsListResultIterator = original.DeploymentOperationsListResultIterator +type DeploymentOperationsListResultPage = original.DeploymentOperationsListResultPage +type DeploymentProperties = original.DeploymentProperties +type DeploymentPropertiesExtended = original.DeploymentPropertiesExtended +type DeploymentValidateResult = original.DeploymentValidateResult +type DeploymentsClient = original.DeploymentsClient +type DeploymentsCreateOrUpdateFuture = original.DeploymentsCreateOrUpdateFuture +type DeploymentsDeleteFuture = original.DeploymentsDeleteFuture +type ExportTemplateRequest = original.ExportTemplateRequest +type GenericResource = original.GenericResource +type GenericResourceFilter = original.GenericResourceFilter +type Group = original.Group +type GroupExportResult = original.GroupExportResult +type GroupFilter = original.GroupFilter +type GroupListResult = original.GroupListResult +type GroupListResultIterator = original.GroupListResultIterator +type GroupListResultPage = original.GroupListResultPage +type GroupProperties = original.GroupProperties +type GroupsClient = original.GroupsClient +type GroupsDeleteFuture = original.GroupsDeleteFuture +type HTTPMessage = original.HTTPMessage +type Identity = original.Identity +type ListResult = original.ListResult +type ListResultIterator = original.ListResultIterator +type ListResultPage = original.ListResultPage +type ManagementErrorWithDetails = original.ManagementErrorWithDetails +type MoveInfo = original.MoveInfo +type MoveResourcesFuture = original.MoveResourcesFuture +type ParametersLink = original.ParametersLink +type Plan = original.Plan +type Provider = original.Provider +type ProviderListResult = original.ProviderListResult +type ProviderListResultIterator = original.ProviderListResultIterator +type ProviderListResultPage = original.ProviderListResultPage +type ProviderOperationDisplayProperties = original.ProviderOperationDisplayProperties +type ProviderResourceType = original.ProviderResourceType +type ProvidersClient = original.ProvidersClient +type Resource = original.Resource +type Sku = original.Sku +type SubResource = original.SubResource +type TagCount = original.TagCount +type TagDetails = original.TagDetails +type TagValue = original.TagValue +type TagsClient = original.TagsClient +type TagsListResult = original.TagsListResult +type TagsListResultIterator = original.TagsListResultIterator +type TagsListResultPage = original.TagsListResultPage +type TargetResource = original.TargetResource +type TemplateLink = original.TemplateLink +type UpdateFuture = original.UpdateFuture + +func New(subscriptionID string) BaseClient { + return original.New(subscriptionID) +} +func NewClient(subscriptionID string) Client { + return original.NewClient(subscriptionID) +} +func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { + return original.NewClientWithBaseURI(baseURI, subscriptionID) +} +func NewDeploymentListResultIterator(page DeploymentListResultPage) DeploymentListResultIterator { + return original.NewDeploymentListResultIterator(page) +} +func NewDeploymentListResultPage(getNextPage func(context.Context, DeploymentListResult) (DeploymentListResult, error)) DeploymentListResultPage { + return original.NewDeploymentListResultPage(getNextPage) +} +func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient { + return original.NewDeploymentOperationsClient(subscriptionID) +} +func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient { + return original.NewDeploymentOperationsClientWithBaseURI(baseURI, subscriptionID) +} +func NewDeploymentOperationsListResultIterator(page DeploymentOperationsListResultPage) DeploymentOperationsListResultIterator { + return original.NewDeploymentOperationsListResultIterator(page) +} +func NewDeploymentOperationsListResultPage(getNextPage func(context.Context, DeploymentOperationsListResult) (DeploymentOperationsListResult, error)) DeploymentOperationsListResultPage { + return original.NewDeploymentOperationsListResultPage(getNextPage) +} +func NewDeploymentsClient(subscriptionID string) DeploymentsClient { + return original.NewDeploymentsClient(subscriptionID) +} +func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient { + return original.NewDeploymentsClientWithBaseURI(baseURI, subscriptionID) +} +func NewGroupListResultIterator(page GroupListResultPage) GroupListResultIterator { + return original.NewGroupListResultIterator(page) +} +func NewGroupListResultPage(getNextPage func(context.Context, GroupListResult) (GroupListResult, error)) GroupListResultPage { + return original.NewGroupListResultPage(getNextPage) +} +func NewGroupsClient(subscriptionID string) GroupsClient { + return original.NewGroupsClient(subscriptionID) +} +func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient { + return original.NewGroupsClientWithBaseURI(baseURI, subscriptionID) +} +func NewListResultIterator(page ListResultPage) ListResultIterator { + return original.NewListResultIterator(page) +} +func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage { + return original.NewListResultPage(getNextPage) +} +func NewProviderListResultIterator(page ProviderListResultPage) ProviderListResultIterator { + return original.NewProviderListResultIterator(page) +} +func NewProviderListResultPage(getNextPage func(context.Context, ProviderListResult) (ProviderListResult, error)) ProviderListResultPage { + return original.NewProviderListResultPage(getNextPage) +} +func NewProvidersClient(subscriptionID string) ProvidersClient { + return original.NewProvidersClient(subscriptionID) +} +func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient { + return original.NewProvidersClientWithBaseURI(baseURI, subscriptionID) +} +func NewTagsClient(subscriptionID string) TagsClient { + return original.NewTagsClient(subscriptionID) +} +func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient { + return original.NewTagsClientWithBaseURI(baseURI, subscriptionID) +} +func NewTagsListResultIterator(page TagsListResultPage) TagsListResultIterator { + return original.NewTagsListResultIterator(page) +} +func NewTagsListResultPage(getNextPage func(context.Context, TagsListResult) (TagsListResult, error)) TagsListResultPage { + return original.NewTagsListResultPage(getNextPage) +} +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return original.NewWithBaseURI(baseURI, subscriptionID) +} +func PossibleDeploymentModeValues() []DeploymentMode { + return original.PossibleDeploymentModeValues() +} +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return original.PossibleResourceIdentityTypeValues() +} +func UserAgent() string { + return original.UserAgent() + " profiles/2017-03-09" +} +func Version() string { + return original.Version() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage/models.go new file mode 100644 index 00000000..1af1d6eb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage/models.go @@ -0,0 +1,176 @@ +// +build go1.9 + +// Copyright 2019 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This code was auto-generated by: +// github.com/Azure/azure-sdk-for-go/tools/profileBuilder + +package storage + +import original "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage" + +const ( + DefaultBaseURI = original.DefaultBaseURI +) + +type AccessTier = original.AccessTier + +const ( + Cool AccessTier = original.Cool + Hot AccessTier = original.Hot +) + +type AccountStatus = original.AccountStatus + +const ( + Available AccountStatus = original.Available + Unavailable AccountStatus = original.Unavailable +) + +type KeyPermission = original.KeyPermission + +const ( + FULL KeyPermission = original.FULL + READ KeyPermission = original.READ +) + +type Kind = original.Kind + +const ( + BlobStorage Kind = original.BlobStorage + Storage Kind = original.Storage +) + +type ProvisioningState = original.ProvisioningState + +const ( + Creating ProvisioningState = original.Creating + ResolvingDNS ProvisioningState = original.ResolvingDNS + Succeeded ProvisioningState = original.Succeeded +) + +type Reason = original.Reason + +const ( + AccountNameInvalid Reason = original.AccountNameInvalid + AlreadyExists Reason = original.AlreadyExists +) + +type SkuName = original.SkuName + +const ( + PremiumLRS SkuName = original.PremiumLRS + StandardGRS SkuName = original.StandardGRS + StandardLRS SkuName = original.StandardLRS + StandardRAGRS SkuName = original.StandardRAGRS + StandardZRS SkuName = original.StandardZRS +) + +type SkuTier = original.SkuTier + +const ( + Premium SkuTier = original.Premium + Standard SkuTier = original.Standard +) + +type UsageUnit = original.UsageUnit + +const ( + Bytes UsageUnit = original.Bytes + BytesPerSecond UsageUnit = original.BytesPerSecond + Count UsageUnit = original.Count + CountsPerSecond UsageUnit = original.CountsPerSecond + Percent UsageUnit = original.Percent + Seconds UsageUnit = original.Seconds +) + +type Account = original.Account +type AccountCheckNameAvailabilityParameters = original.AccountCheckNameAvailabilityParameters +type AccountCreateParameters = original.AccountCreateParameters +type AccountKey = original.AccountKey +type AccountListKeysResult = original.AccountListKeysResult +type AccountListResult = original.AccountListResult +type AccountProperties = original.AccountProperties +type AccountPropertiesCreateParameters = original.AccountPropertiesCreateParameters +type AccountPropertiesUpdateParameters = original.AccountPropertiesUpdateParameters +type AccountRegenerateKeyParameters = original.AccountRegenerateKeyParameters +type AccountUpdateParameters = original.AccountUpdateParameters +type AccountsClient = original.AccountsClient +type AccountsCreateFuture = original.AccountsCreateFuture +type BaseClient = original.BaseClient +type CheckNameAvailabilityResult = original.CheckNameAvailabilityResult +type CustomDomain = original.CustomDomain +type Encryption = original.Encryption +type EncryptionService = original.EncryptionService +type EncryptionServices = original.EncryptionServices +type Endpoints = original.Endpoints +type Resource = original.Resource +type Sku = original.Sku +type Usage = original.Usage +type UsageClient = original.UsageClient +type UsageListResult = original.UsageListResult +type UsageName = original.UsageName + +func New(subscriptionID string) BaseClient { + return original.New(subscriptionID) +} +func NewAccountsClient(subscriptionID string) AccountsClient { + return original.NewAccountsClient(subscriptionID) +} +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return original.NewAccountsClientWithBaseURI(baseURI, subscriptionID) +} +func NewUsageClient(subscriptionID string) UsageClient { + return original.NewUsageClient(subscriptionID) +} +func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { + return original.NewUsageClientWithBaseURI(baseURI, subscriptionID) +} +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return original.NewWithBaseURI(baseURI, subscriptionID) +} +func PossibleAccessTierValues() []AccessTier { + return original.PossibleAccessTierValues() +} +func PossibleAccountStatusValues() []AccountStatus { + return original.PossibleAccountStatusValues() +} +func PossibleKeyPermissionValues() []KeyPermission { + return original.PossibleKeyPermissionValues() +} +func PossibleKindValues() []Kind { + return original.PossibleKindValues() +} +func PossibleProvisioningStateValues() []ProvisioningState { + return original.PossibleProvisioningStateValues() +} +func PossibleReasonValues() []Reason { + return original.PossibleReasonValues() +} +func PossibleSkuNameValues() []SkuName { + return original.PossibleSkuNameValues() +} +func PossibleSkuTierValues() []SkuTier { + return original.PossibleSkuTierValues() +} +func PossibleUsageUnitValues() []UsageUnit { + return original.PossibleUsageUnitValues() +} +func UserAgent() string { + return original.UserAgent() + " profiles/2017-03-09" +} +func Version() string { + return original.Version() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/client.go new file mode 100644 index 00000000..a25b93f1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/client.go @@ -0,0 +1,51 @@ +// Package resources implements the Azure ARM Resources service API version 2016-02-01. +// +// +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Resources + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Resources. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/deploymentoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/deploymentoperations.go new file mode 100644 index 00000000..67d7f6a3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/deploymentoperations.go @@ -0,0 +1,256 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DeploymentOperationsClient is the client for the DeploymentOperations methods of the Resources service. +type DeploymentOperationsClient struct { + BaseClient +} + +// NewDeploymentOperationsClient creates an instance of the DeploymentOperationsClient client. +func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient { + return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentOperationsClientWithBaseURI creates an instance of the DeploymentOperationsClient client. +func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient { + return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get get a list of deployments operations. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment. +// operationID - operation Id. +func (client DeploymentOperationsClient) Get(ctx context.Context, resourceGroupName string, deploymentName string, operationID string) (result DeploymentOperation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentOperationsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, deploymentName, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentOperationsClient) GetPreparer(ctx context.Context, resourceGroupName string, deploymentName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) GetResponder(resp *http.Response) (result DeploymentOperation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of deployments operations. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment. +// top - query parameters. +func (client DeploymentOperationsClient) List(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (result DeploymentOperationsListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.List") + defer func() { + sc := -1 + if result.dolr.Response.Response != nil { + sc = result.dolr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentOperationsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, deploymentName, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.dolr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure sending request") + return + } + + result.dolr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentOperationsClient) ListPreparer(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) ListResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client DeploymentOperationsClient) listNextResults(ctx context.Context, lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, err error) { + req, err := lastResults.deploymentOperationsListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client DeploymentOperationsClient) ListComplete(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (result DeploymentOperationsListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, deploymentName, top) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/deployments.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/deployments.go new file mode 100644 index 00000000..54ddcb2c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/deployments.go @@ -0,0 +1,783 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DeploymentsClient is the client for the Deployments methods of the Resources service. +type DeploymentsClient struct { + BaseClient +} + +// NewDeploymentsClient creates an instance of the DeploymentsClient client. +func NewDeploymentsClient(subscriptionID string) DeploymentsClient { + return NewDeploymentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentsClientWithBaseURI creates an instance of the DeploymentsClient client. +func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient { + return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel cancel a currently running template deployment. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment. +func (client DeploymentsClient) Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.Cancel") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "Cancel", err.Error()) + } + + req, err := client.CancelPreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", nil, "Failure preparing request") + return + } + + resp, err := client.CancelSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", resp, "Failure sending request") + return + } + + result, err = client.CancelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", resp, "Failure responding to request") + } + + return +} + +// CancelPreparer prepares the Cancel request. +func (client DeploymentsClient) CancelPreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CancelSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// CheckExistence checks whether deployment exists. +// Parameters: +// resourceGroupName - the name of the resource group to check. The name is case insensitive. +// deploymentName - the name of the deployment. +func (client DeploymentsClient) CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CheckExistence") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "CheckExistence", err.Error()) + } + + req, err := client.CheckExistencePreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", nil, "Failure preparing request") + return + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", resp, "Failure sending request") + return + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client DeploymentsClient) CheckExistencePreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a named template deployment using a template. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment. +// parameters - additional parameters supplied to the operation. +func (client DeploymentsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.ParametersLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ParametersLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, deploymentName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DeploymentsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (future DeploymentsCreateOrUpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CreateOrUpdateResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete deployment. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment to be deleted. +func (client DeploymentsClient) Delete(ctx context.Context, resourceGroupName string, deploymentName string) (result DeploymentsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DeploymentsClient) DeletePreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) DeleteSender(req *http.Request) (future DeploymentsDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// ExportTemplate exports a deployment template. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment. +func (client DeploymentsClient) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result DeploymentExportResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.ExportTemplate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "ExportTemplate", err.Error()) + } + + req, err := client.ExportTemplatePreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", nil, "Failure preparing request") + return + } + + resp, err := client.ExportTemplateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", resp, "Failure sending request") + return + } + + result, err = client.ExportTemplateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", resp, "Failure responding to request") + } + + return +} + +// ExportTemplatePreparer prepares the ExportTemplate request. +func (client DeploymentsClient) ExportTemplatePreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExportTemplateSender sends the ExportTemplate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ExportTemplateResponder handles the response to the ExportTemplate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ExportTemplateResponder(resp *http.Response) (result DeploymentExportResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a deployment. +// Parameters: +// resourceGroupName - the name of the resource group to get. The name is case insensitive. +// deploymentName - the name of the deployment. +func (client DeploymentsClient) Get(ctx context.Context, resourceGroupName string, deploymentName string) (result DeploymentExtended, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentsClient) GetPreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) GetResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get a list of deployments. +// Parameters: +// resourceGroupName - the name of the resource group to filter by. The name is case insensitive. +// filter - the filter to apply on the operation. +// top - query parameters. If null is passed returns all deployments. +func (client DeploymentsClient) List(ctx context.Context, resourceGroupName string, filter string, top *int32) (result DeploymentListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.List") + defer func() { + sc := -1 + if result.dlr.Response.Response != nil { + sc = result.dlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, filter, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.dlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", resp, "Failure sending request") + return + } + + result.dlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentsClient) ListPreparer(ctx context.Context, resourceGroupName string, filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ListResponder(resp *http.Response) (result DeploymentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client DeploymentsClient) listNextResults(ctx context.Context, lastResults DeploymentListResult) (result DeploymentListResult, err error) { + req, err := lastResults.deploymentListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client DeploymentsClient) ListComplete(ctx context.Context, resourceGroupName string, filter string, top *int32) (result DeploymentListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, filter, top) + return +} + +// Validate validate a deployment template. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment. +// parameters - deployment to validate. +func (client DeploymentsClient) Validate(ctx context.Context, resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentValidateResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.Validate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.ParametersLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ParametersLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "Validate", err.Error()) + } + + req, err := client.ValidatePreparer(ctx, resourceGroupName, deploymentName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", nil, "Failure preparing request") + return + } + + resp, err := client.ValidateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", resp, "Failure sending request") + return + } + + result, err = client.ValidateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", resp, "Failure responding to request") + } + + return +} + +// ValidatePreparer prepares the Validate request. +func (client DeploymentsClient) ValidatePreparer(ctx context.Context, resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ValidateSender sends the Validate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ValidateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ValidateResponder handles the response to the Validate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ValidateResponder(resp *http.Response) (result DeploymentValidateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/groups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/groups.go new file mode 100644 index 00000000..b8fb3d07 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/groups.go @@ -0,0 +1,805 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// GroupsClient is the client for the Groups methods of the Resources service. +type GroupsClient struct { + BaseClient +} + +// NewGroupsClient creates an instance of the GroupsClient client. +func NewGroupsClient(subscriptionID string) GroupsClient { + return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client. +func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient { + return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether resource group exists. +// Parameters: +// resourceGroupName - the name of the resource group to check. The name is case insensitive. +func (client GroupsClient) CheckExistence(ctx context.Context, resourceGroupName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.CheckExistence") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "CheckExistence", err.Error()) + } + + req, err := client.CheckExistencePreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", nil, "Failure preparing request") + return + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", resp, "Failure sending request") + return + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client GroupsClient) CheckExistencePreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client GroupsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource group. +// Parameters: +// resourceGroupName - the name of the resource group to be created or updated. +// parameters - parameters supplied to the create or update resource group service operation. +func (client GroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, parameters Group) (result Group, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, parameters Group) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GroupsClient) CreateOrUpdateResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete resource group. +// Parameters: +// resourceGroupName - the name of the resource group to be deleted. The name is case insensitive. +func (client GroupsClient) Delete(ctx context.Context, resourceGroupName string) (result GroupsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) DeleteSender(req *http.Request) (future GroupsDeleteFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ExportTemplate captures the specified resource group as a template. +// Parameters: +// resourceGroupName - the name of the resource group to be created or updated. +// parameters - parameters supplied to the export template resource group operation. +func (client GroupsClient) ExportTemplate(ctx context.Context, resourceGroupName string, parameters ExportTemplateRequest) (result GroupExportResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.ExportTemplate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "ExportTemplate", err.Error()) + } + + req, err := client.ExportTemplatePreparer(ctx, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", nil, "Failure preparing request") + return + } + + resp, err := client.ExportTemplateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", resp, "Failure sending request") + return + } + + result, err = client.ExportTemplateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", resp, "Failure responding to request") + } + + return +} + +// ExportTemplatePreparer prepares the ExportTemplate request. +func (client GroupsClient) ExportTemplatePreparer(ctx context.Context, resourceGroupName string, parameters ExportTemplateRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExportTemplateSender sends the ExportTemplate request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ExportTemplateResponder handles the response to the ExportTemplate request. The method always +// closes the http.Response Body. +func (client GroupsClient) ExportTemplateResponder(resp *http.Response) (result GroupExportResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get get a resource group. +// Parameters: +// resourceGroupName - the name of the resource group to get. The name is case insensitive. +func (client GroupsClient) Get(ctx context.Context, resourceGroupName string) (result Group, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GroupsClient) GetPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GroupsClient) GetResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a collection of resource groups. +// Parameters: +// filter - the filter to apply on the operation. +// top - query parameters. If null is passed returns all resource groups. +func (client GroupsClient) List(ctx context.Context, filter string, top *int32) (result GroupListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.List") + defer func() { + sc := -1 + if result.glr.Response.Response != nil { + sc = result.glr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.glr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure sending request") + return + } + + result.glr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client GroupsClient) ListPreparer(ctx context.Context, filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResponder(resp *http.Response) (result GroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client GroupsClient) listNextResults(ctx context.Context, lastResults GroupListResult) (result GroupListResult, err error) { + req, err := lastResults.groupListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client GroupsClient) ListComplete(ctx context.Context, filter string, top *int32) (result GroupListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, top) + return +} + +// ListResources get all of the resources under a subscription. +// Parameters: +// resourceGroupName - query parameters. If null is passed returns all resource groups. +// filter - the filter to apply on the operation. +// expand - the $expand query parameter +// top - query parameters. If null is passed returns all resource groups. +func (client GroupsClient) ListResources(ctx context.Context, resourceGroupName string, filter string, expand string, top *int32) (result ListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.ListResources") + defer func() { + sc := -1 + if result.lr.Response.Response != nil { + sc = result.lr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "ListResources", err.Error()) + } + + result.fn = client.listResourcesNextResults + req, err := client.ListResourcesPreparer(ctx, resourceGroupName, filter, expand, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", nil, "Failure preparing request") + return + } + + resp, err := client.ListResourcesSender(req) + if err != nil { + result.lr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", resp, "Failure sending request") + return + } + + result.lr, err = client.ListResourcesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", resp, "Failure responding to request") + } + + return +} + +// ListResourcesPreparer prepares the ListResources request. +func (client GroupsClient) ListResourcesPreparer(ctx context.Context, resourceGroupName string, filter string, expand string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListResourcesSender sends the ListResources request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListResourcesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResourcesResponder handles the response to the ListResources request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResourcesResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listResourcesNextResults retrieves the next set of results, if any. +func (client GroupsClient) listResourcesNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) { + req, err := lastResults.listResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "listResourcesNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListResourcesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "listResourcesNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResourcesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "listResourcesNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListResourcesComplete enumerates all values, automatically crossing page boundaries as required. +func (client GroupsClient) ListResourcesComplete(ctx context.Context, resourceGroupName string, filter string, expand string, top *int32) (result ListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.ListResources") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListResources(ctx, resourceGroupName, filter, expand, top) + return +} + +// Patch resource groups can be updated through a simple PATCH operation to a group address. The format of the request +// is the same as that for creating a resource groups, though if a field is unspecified current value will be carried +// over. +// Parameters: +// resourceGroupName - the name of the resource group to be created or updated. The name is case insensitive. +// parameters - parameters supplied to the update state resource group service operation. +func (client GroupsClient) Patch(ctx context.Context, resourceGroupName string, parameters Group) (result Group, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Patch") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "Patch", err.Error()) + } + + req, err := client.PatchPreparer(ctx, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Patch", nil, "Failure preparing request") + return + } + + resp, err := client.PatchSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Patch", resp, "Failure sending request") + return + } + + result, err = client.PatchResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Patch", resp, "Failure responding to request") + } + + return +} + +// PatchPreparer prepares the Patch request. +func (client GroupsClient) PatchPreparer(ctx context.Context, resourceGroupName string, parameters Group) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PatchSender sends the Patch request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) PatchSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// PatchResponder handles the response to the Patch request. The method always +// closes the http.Response Body. +func (client GroupsClient) PatchResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/models.go new file mode 100644 index 00000000..8a30b722 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/models.go @@ -0,0 +1,1566 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources" + +// DeploymentMode enumerates the values for deployment mode. +type DeploymentMode string + +const ( + // Complete ... + Complete DeploymentMode = "Complete" + // Incremental ... + Incremental DeploymentMode = "Incremental" +) + +// PossibleDeploymentModeValues returns an array of possible values for the DeploymentMode const type. +func PossibleDeploymentModeValues() []DeploymentMode { + return []DeploymentMode{Complete, Incremental} +} + +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // SystemAssigned ... + SystemAssigned ResourceIdentityType = "SystemAssigned" +) + +// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type. +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return []ResourceIdentityType{SystemAssigned} +} + +// AliasPathType ... +type AliasPathType struct { + // Path - The path of an alias. + Path *string `json:"path,omitempty"` + // APIVersions - The api versions. + APIVersions *[]string `json:"apiVersions,omitempty"` +} + +// AliasType ... +type AliasType struct { + // Name - The alias name. + Name *string `json:"name,omitempty"` + // Paths - The paths for an alias. + Paths *[]AliasPathType `json:"paths,omitempty"` +} + +// BasicDependency deployment dependency information. +type BasicDependency struct { + // ID - The ID of the dependency. + ID *string `json:"id,omitempty"` + // ResourceType - The dependency resource type. + ResourceType *string `json:"resourceType,omitempty"` + // ResourceName - The dependency resource name. + ResourceName *string `json:"resourceName,omitempty"` +} + +// DebugSetting ... +type DebugSetting struct { + // DetailLevel - The debug detail level. + DetailLevel *string `json:"detailLevel,omitempty"` +} + +// Dependency deployment dependency information. +type Dependency struct { + // DependsOn - The list of dependencies. + DependsOn *[]BasicDependency `json:"dependsOn,omitempty"` + // ID - The ID of the dependency. + ID *string `json:"id,omitempty"` + // ResourceType - The dependency resource type. + ResourceType *string `json:"resourceType,omitempty"` + // ResourceName - The dependency resource name. + ResourceName *string `json:"resourceName,omitempty"` +} + +// Deployment deployment operation parameters. +type Deployment struct { + // Properties - The deployment properties. + Properties *DeploymentProperties `json:"properties,omitempty"` +} + +// DeploymentExportResult ... +type DeploymentExportResult struct { + autorest.Response `json:"-"` + // Template - The template content. + Template interface{} `json:"template,omitempty"` +} + +// DeploymentExtended deployment information. +type DeploymentExtended struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The ID of the deployment. + ID *string `json:"id,omitempty"` + // Name - The name of the deployment. + Name *string `json:"name,omitempty"` + // Properties - Deployment properties. + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// DeploymentExtendedFilter deployment filter. +type DeploymentExtendedFilter struct { + // ProvisioningState - The provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// DeploymentListResult list of deployments. +type DeploymentListResult struct { + autorest.Response `json:"-"` + // Value - The list of deployments. + Value *[]DeploymentExtended `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentListResultIterator provides access to a complete listing of DeploymentExtended values. +type DeploymentListResultIterator struct { + i int + page DeploymentListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeploymentListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeploymentListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeploymentListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeploymentListResultIterator) Response() DeploymentListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeploymentListResultIterator) Value() DeploymentExtended { + if !iter.page.NotDone() { + return DeploymentExtended{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeploymentListResultIterator type. +func NewDeploymentListResultIterator(page DeploymentListResultPage) DeploymentListResultIterator { + return DeploymentListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dlr DeploymentListResult) IsEmpty() bool { + return dlr.Value == nil || len(*dlr.Value) == 0 +} + +// deploymentListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dlr DeploymentListResult) deploymentListResultPreparer(ctx context.Context) (*http.Request, error) { + if dlr.NextLink == nil || len(to.String(dlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dlr.NextLink))) +} + +// DeploymentListResultPage contains a page of DeploymentExtended values. +type DeploymentListResultPage struct { + fn func(context.Context, DeploymentListResult) (DeploymentListResult, error) + dlr DeploymentListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeploymentListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.dlr) + if err != nil { + return err + } + page.dlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeploymentListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeploymentListResultPage) NotDone() bool { + return !page.dlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeploymentListResultPage) Response() DeploymentListResult { + return page.dlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeploymentListResultPage) Values() []DeploymentExtended { + if page.dlr.IsEmpty() { + return nil + } + return *page.dlr.Value +} + +// Creates a new instance of the DeploymentListResultPage type. +func NewDeploymentListResultPage(getNextPage func(context.Context, DeploymentListResult) (DeploymentListResult, error)) DeploymentListResultPage { + return DeploymentListResultPage{fn: getNextPage} +} + +// DeploymentOperation deployment operation information. +type DeploymentOperation struct { + autorest.Response `json:"-"` + // ID - Full deployment operation id. + ID *string `json:"id,omitempty"` + // OperationID - Deployment operation id. + OperationID *string `json:"operationId,omitempty"` + // Properties - Deployment properties. + Properties *DeploymentOperationProperties `json:"properties,omitempty"` +} + +// DeploymentOperationProperties deployment operation properties. +type DeploymentOperationProperties struct { + // ProvisioningState - The state of the provisioning. + ProvisioningState *string `json:"provisioningState,omitempty"` + // Timestamp - The date and time of the operation. + Timestamp *date.Time `json:"timestamp,omitempty"` + // ServiceRequestID - Deployment operation service request id. + ServiceRequestID *string `json:"serviceRequestId,omitempty"` + // StatusCode - Operation status code. + StatusCode *string `json:"statusCode,omitempty"` + // StatusMessage - Operation status message. + StatusMessage interface{} `json:"statusMessage,omitempty"` + // TargetResource - The target resource. + TargetResource *TargetResource `json:"targetResource,omitempty"` + // Request - The HTTP request message. + Request *HTTPMessage `json:"request,omitempty"` + // Response - The HTTP response message. + Response *HTTPMessage `json:"response,omitempty"` +} + +// DeploymentOperationsListResult list of deployment operations. +type DeploymentOperationsListResult struct { + autorest.Response `json:"-"` + // Value - The list of deployments. + Value *[]DeploymentOperation `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentOperationsListResultIterator provides access to a complete listing of DeploymentOperation +// values. +type DeploymentOperationsListResultIterator struct { + i int + page DeploymentOperationsListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeploymentOperationsListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeploymentOperationsListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeploymentOperationsListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeploymentOperationsListResultIterator) Response() DeploymentOperationsListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeploymentOperationsListResultIterator) Value() DeploymentOperation { + if !iter.page.NotDone() { + return DeploymentOperation{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeploymentOperationsListResultIterator type. +func NewDeploymentOperationsListResultIterator(page DeploymentOperationsListResultPage) DeploymentOperationsListResultIterator { + return DeploymentOperationsListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dolr DeploymentOperationsListResult) IsEmpty() bool { + return dolr.Value == nil || len(*dolr.Value) == 0 +} + +// deploymentOperationsListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dolr DeploymentOperationsListResult) deploymentOperationsListResultPreparer(ctx context.Context) (*http.Request, error) { + if dolr.NextLink == nil || len(to.String(dolr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dolr.NextLink))) +} + +// DeploymentOperationsListResultPage contains a page of DeploymentOperation values. +type DeploymentOperationsListResultPage struct { + fn func(context.Context, DeploymentOperationsListResult) (DeploymentOperationsListResult, error) + dolr DeploymentOperationsListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeploymentOperationsListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.dolr) + if err != nil { + return err + } + page.dolr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeploymentOperationsListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeploymentOperationsListResultPage) NotDone() bool { + return !page.dolr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeploymentOperationsListResultPage) Response() DeploymentOperationsListResult { + return page.dolr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeploymentOperationsListResultPage) Values() []DeploymentOperation { + if page.dolr.IsEmpty() { + return nil + } + return *page.dolr.Value +} + +// Creates a new instance of the DeploymentOperationsListResultPage type. +func NewDeploymentOperationsListResultPage(getNextPage func(context.Context, DeploymentOperationsListResult) (DeploymentOperationsListResult, error)) DeploymentOperationsListResultPage { + return DeploymentOperationsListResultPage{fn: getNextPage} +} + +// DeploymentProperties deployment properties. +type DeploymentProperties struct { + // Template - The template content. It can be a JObject or a well formed JSON string. Use only one of Template or TemplateLink. + Template interface{} `json:"template,omitempty"` + // TemplateLink - The template URI. Use only one of Template or TemplateLink. + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + // Parameters - Deployment parameters. It can be a JObject or a well formed JSON string. Use only one of Parameters or ParametersLink. + Parameters interface{} `json:"parameters,omitempty"` + // ParametersLink - The parameters URI. Use only one of Parameters or ParametersLink. + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + // Mode - The deployment mode. Possible values include: 'Incremental', 'Complete' + Mode DeploymentMode `json:"mode,omitempty"` + // DebugSetting - The debug setting of the deployment. + DebugSetting *DebugSetting `json:"debugSetting,omitempty"` +} + +// DeploymentPropertiesExtended deployment properties with additional details. +type DeploymentPropertiesExtended struct { + // ProvisioningState - The state of the provisioning. + ProvisioningState *string `json:"provisioningState,omitempty"` + // CorrelationID - The correlation ID of the deployment. + CorrelationID *string `json:"correlationId,omitempty"` + // Timestamp - The timestamp of the template deployment. + Timestamp *date.Time `json:"timestamp,omitempty"` + // Outputs - Key/value pairs that represent deployment output. + Outputs interface{} `json:"outputs,omitempty"` + // Providers - The list of resource providers needed for the deployment. + Providers *[]Provider `json:"providers,omitempty"` + // Dependencies - The list of deployment dependencies. + Dependencies *[]Dependency `json:"dependencies,omitempty"` + // Template - The template content. Use only one of Template or TemplateLink. + Template interface{} `json:"template,omitempty"` + // TemplateLink - The URI referencing the template. Use only one of Template or TemplateLink. + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + // Parameters - Deployment parameters. Use only one of Parameters or ParametersLink. + Parameters interface{} `json:"parameters,omitempty"` + // ParametersLink - The URI referencing the parameters. Use only one of Parameters or ParametersLink. + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + // Mode - The deployment mode. Possible values include: 'Incremental', 'Complete' + Mode DeploymentMode `json:"mode,omitempty"` + // DebugSetting - The debug setting of the deployment. + DebugSetting *DebugSetting `json:"debugSetting,omitempty"` +} + +// DeploymentsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DeploymentsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DeploymentsCreateOrUpdateFuture) Result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + +// DeploymentsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DeploymentsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DeploymentsDeleteFuture) Result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DeploymentValidateResult information from validate template deployment response. +type DeploymentValidateResult struct { + autorest.Response `json:"-"` + // Error - Validation error. + Error *ManagementErrorWithDetails `json:"error,omitempty"` + // Properties - The template deployment properties. + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// ExportTemplateRequest export resource group template request parameters. +type ExportTemplateRequest struct { + // ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'. + ResourcesProperty *[]string `json:"resources,omitempty"` + // Options - The export template options. A CSV-formatted list containing zero or more of the following: 'IncludeParameterDefaultValue', 'IncludeComments', 'SkipResourceNameParameterization', 'SkipAllParameterization' + Options *string `json:"options,omitempty"` +} + +// GenericResource resource information. +type GenericResource struct { + autorest.Response `json:"-"` + // Plan - The plan of the resource. + Plan *Plan `json:"plan,omitempty"` + // Properties - The resource properties. + Properties interface{} `json:"properties,omitempty"` + // Kind - The kind of the resource. + Kind *string `json:"kind,omitempty"` + // ManagedBy - Id of the resource that manages this resource. + ManagedBy *string `json:"managedBy,omitempty"` + // Sku - The sku of the resource. + Sku *Sku `json:"sku,omitempty"` + // Identity - The identity of the resource. + Identity *Identity `json:"identity,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GenericResource. +func (gr GenericResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gr.Plan != nil { + objectMap["plan"] = gr.Plan + } + if gr.Properties != nil { + objectMap["properties"] = gr.Properties + } + if gr.Kind != nil { + objectMap["kind"] = gr.Kind + } + if gr.ManagedBy != nil { + objectMap["managedBy"] = gr.ManagedBy + } + if gr.Sku != nil { + objectMap["sku"] = gr.Sku + } + if gr.Identity != nil { + objectMap["identity"] = gr.Identity + } + if gr.Location != nil { + objectMap["location"] = gr.Location + } + if gr.Tags != nil { + objectMap["tags"] = gr.Tags + } + return json.Marshal(objectMap) +} + +// GenericResourceFilter resource filter. +type GenericResourceFilter struct { + // ResourceType - The resource type. + ResourceType *string `json:"resourceType,omitempty"` + // Tagname - The tag name. + Tagname *string `json:"tagname,omitempty"` + // Tagvalue - The tag value. + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// Group resource group information. +type Group struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The ID of the resource group. + ID *string `json:"id,omitempty"` + // Name - The Name of the resource group. + Name *string `json:"name,omitempty"` + Properties *GroupProperties `json:"properties,omitempty"` + // Location - The location of the resource group. It cannot be changed after the resource group has been created. Has to be one of the supported Azure Locations, such as West US, East US, West Europe, East Asia, etc. + Location *string `json:"location,omitempty"` + // Tags - The tags attached to the resource group. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Group. +func (g Group) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if g.Name != nil { + objectMap["name"] = g.Name + } + if g.Properties != nil { + objectMap["properties"] = g.Properties + } + if g.Location != nil { + objectMap["location"] = g.Location + } + if g.Tags != nil { + objectMap["tags"] = g.Tags + } + return json.Marshal(objectMap) +} + +// GroupExportResult ... +type GroupExportResult struct { + autorest.Response `json:"-"` + // Template - The template content. + Template interface{} `json:"template,omitempty"` + // Error - The error. + Error *ManagementErrorWithDetails `json:"error,omitempty"` +} + +// GroupFilter resource group filter. +type GroupFilter struct { + // TagName - The tag name. + TagName *string `json:"tagName,omitempty"` + // TagValue - The tag value. + TagValue *string `json:"tagValue,omitempty"` +} + +// GroupListResult list of resource groups. +type GroupListResult struct { + autorest.Response `json:"-"` + // Value - The list of resource groups. + Value *[]Group `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// GroupListResultIterator provides access to a complete listing of Group values. +type GroupListResultIterator struct { + i int + page GroupListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GroupListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *GroupListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GroupListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GroupListResultIterator) Response() GroupListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GroupListResultIterator) Value() Group { + if !iter.page.NotDone() { + return Group{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the GroupListResultIterator type. +func NewGroupListResultIterator(page GroupListResultPage) GroupListResultIterator { + return GroupListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (glr GroupListResult) IsEmpty() bool { + return glr.Value == nil || len(*glr.Value) == 0 +} + +// groupListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (glr GroupListResult) groupListResultPreparer(ctx context.Context) (*http.Request, error) { + if glr.NextLink == nil || len(to.String(glr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(glr.NextLink))) +} + +// GroupListResultPage contains a page of Group values. +type GroupListResultPage struct { + fn func(context.Context, GroupListResult) (GroupListResult, error) + glr GroupListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GroupListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.glr) + if err != nil { + return err + } + page.glr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *GroupListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GroupListResultPage) NotDone() bool { + return !page.glr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GroupListResultPage) Response() GroupListResult { + return page.glr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GroupListResultPage) Values() []Group { + if page.glr.IsEmpty() { + return nil + } + return *page.glr.Value +} + +// Creates a new instance of the GroupListResultPage type. +func NewGroupListResultPage(getNextPage func(context.Context, GroupListResult) (GroupListResult, error)) GroupListResultPage { + return GroupListResultPage{fn: getNextPage} +} + +// GroupProperties the resource group properties. +type GroupProperties struct { + // ProvisioningState - READ-ONLY; The provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// GroupsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type GroupsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GroupsDeleteFuture) Result(client GroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.GroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// HTTPMessage ... +type HTTPMessage struct { + // Content - HTTP message content. + Content interface{} `json:"content,omitempty"` +} + +// Identity identity for the resource. +type Identity struct { + // PrincipalID - READ-ONLY; The principal id of resource identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - READ-ONLY; The tenant id of resource. + TenantID *string `json:"tenantId,omitempty"` + // Type - The identity type. Possible values include: 'SystemAssigned' + Type ResourceIdentityType `json:"type,omitempty"` +} + +// ListResult list of resource groups. +type ListResult struct { + autorest.Response `json:"-"` + // Value - The list of resources. + Value *[]GenericResource `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ListResultIterator provides access to a complete listing of GenericResource values. +type ListResultIterator struct { + i int + page ListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ListResultIterator) Response() ListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ListResultIterator) Value() GenericResource { + if !iter.page.NotDone() { + return GenericResource{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ListResultIterator type. +func NewListResultIterator(page ListResultPage) ListResultIterator { + return ListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lr ListResult) IsEmpty() bool { + return lr.Value == nil || len(*lr.Value) == 0 +} + +// listResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, error) { + if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lr.NextLink))) +} + +// ListResultPage contains a page of GenericResource values. +type ListResultPage struct { + fn func(context.Context, ListResult) (ListResult, error) + lr ListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.lr) + if err != nil { + return err + } + page.lr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ListResultPage) NotDone() bool { + return !page.lr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ListResultPage) Response() ListResult { + return page.lr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ListResultPage) Values() []GenericResource { + if page.lr.IsEmpty() { + return nil + } + return *page.lr.Value +} + +// Creates a new instance of the ListResultPage type. +func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage { + return ListResultPage{fn: getNextPage} +} + +// ManagementErrorWithDetails ... +type ManagementErrorWithDetails struct { + // Code - The error code returned from the server. + Code *string `json:"code,omitempty"` + // Message - The error message returned from the server. + Message *string `json:"message,omitempty"` + // Target - The target of the error. + Target *string `json:"target,omitempty"` + // Details - Validation error. + Details *[]ManagementErrorWithDetails `json:"details,omitempty"` +} + +// MoveInfo parameters of move resources. +type MoveInfo struct { + // ResourcesProperty - The ids of the resources. + ResourcesProperty *[]string `json:"resources,omitempty"` + // TargetResourceGroup - The target resource group. + TargetResourceGroup *string `json:"targetResourceGroup,omitempty"` +} + +// MoveResourcesFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type MoveResourcesFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *MoveResourcesFuture) Result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.MoveResourcesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.MoveResourcesFuture") + return + } + ar.Response = future.Response() + return +} + +// ParametersLink entity representing the reference to the deployment parameters. +type ParametersLink struct { + // URI - URI referencing the template. + URI *string `json:"uri,omitempty"` + // ContentVersion - If included it must match the ContentVersion in the template. + ContentVersion *string `json:"contentVersion,omitempty"` +} + +// Plan plan for the resource. +type Plan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Product - The offer ID. + Product *string `json:"product,omitempty"` + // PromotionCode - The promotion code. + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// Provider resource provider information. +type Provider struct { + autorest.Response `json:"-"` + // ID - The provider id. + ID *string `json:"id,omitempty"` + // Namespace - The namespace of the provider. + Namespace *string `json:"namespace,omitempty"` + // RegistrationState - The registration state of the provider. + RegistrationState *string `json:"registrationState,omitempty"` + // ResourceTypes - The collection of provider resource types. + ResourceTypes *[]ProviderResourceType `json:"resourceTypes,omitempty"` +} + +// ProviderListResult list of resource providers. +type ProviderListResult struct { + autorest.Response `json:"-"` + // Value - The list of resource providers. + Value *[]Provider `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ProviderListResultIterator provides access to a complete listing of Provider values. +type ProviderListResultIterator struct { + i int + page ProviderListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ProviderListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProviderListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ProviderListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ProviderListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ProviderListResultIterator) Response() ProviderListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ProviderListResultIterator) Value() Provider { + if !iter.page.NotDone() { + return Provider{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ProviderListResultIterator type. +func NewProviderListResultIterator(page ProviderListResultPage) ProviderListResultIterator { + return ProviderListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (plr ProviderListResult) IsEmpty() bool { + return plr.Value == nil || len(*plr.Value) == 0 +} + +// providerListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (plr ProviderListResult) providerListResultPreparer(ctx context.Context) (*http.Request, error) { + if plr.NextLink == nil || len(to.String(plr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(plr.NextLink))) +} + +// ProviderListResultPage contains a page of Provider values. +type ProviderListResultPage struct { + fn func(context.Context, ProviderListResult) (ProviderListResult, error) + plr ProviderListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ProviderListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProviderListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.plr) + if err != nil { + return err + } + page.plr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ProviderListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ProviderListResultPage) NotDone() bool { + return !page.plr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ProviderListResultPage) Response() ProviderListResult { + return page.plr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ProviderListResultPage) Values() []Provider { + if page.plr.IsEmpty() { + return nil + } + return *page.plr.Value +} + +// Creates a new instance of the ProviderListResultPage type. +func NewProviderListResultPage(getNextPage func(context.Context, ProviderListResult) (ProviderListResult, error)) ProviderListResultPage { + return ProviderListResultPage{fn: getNextPage} +} + +// ProviderOperationDisplayProperties resource provider operation's display properties. +type ProviderOperationDisplayProperties struct { + // Publisher - Operation description. + Publisher *string `json:"publisher,omitempty"` + // Provider - Operation provider. + Provider *string `json:"provider,omitempty"` + // Resource - Operation resource. + Resource *string `json:"resource,omitempty"` + // Operation - Operation. + Operation *string `json:"operation,omitempty"` + // Description - Operation description. + Description *string `json:"description,omitempty"` +} + +// ProviderResourceType resource type managed by the resource provider. +type ProviderResourceType struct { + // ResourceType - The resource type. + ResourceType *string `json:"resourceType,omitempty"` + // Locations - The collection of locations where this resource type can be created in. + Locations *[]string `json:"locations,omitempty"` + // Aliases - The aliases that are supported by this resource type. + Aliases *[]AliasType `json:"aliases,omitempty"` + // APIVersions - The api version. + APIVersions *[]string `json:"apiVersions,omitempty"` + // Properties - The properties. + Properties map[string]*string `json:"properties"` +} + +// MarshalJSON is the custom marshaler for ProviderResourceType. +func (prt ProviderResourceType) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if prt.ResourceType != nil { + objectMap["resourceType"] = prt.ResourceType + } + if prt.Locations != nil { + objectMap["locations"] = prt.Locations + } + if prt.Aliases != nil { + objectMap["aliases"] = prt.Aliases + } + if prt.APIVersions != nil { + objectMap["apiVersions"] = prt.APIVersions + } + if prt.Properties != nil { + objectMap["properties"] = prt.Properties + } + return json.Marshal(objectMap) +} + +// Resource ... +type Resource struct { + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// Sku sku for the resource. +type Sku struct { + // Name - The sku name. + Name *string `json:"name,omitempty"` + // Tier - The sku tier. + Tier *string `json:"tier,omitempty"` + // Size - The sku size. + Size *string `json:"size,omitempty"` + // Family - The sku family. + Family *string `json:"family,omitempty"` + // Model - The sku model. + Model *string `json:"model,omitempty"` + // Capacity - The sku capacity. + Capacity *int32 `json:"capacity,omitempty"` +} + +// SubResource ... +type SubResource struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// TagCount tag count. +type TagCount struct { + // Type - Type of count. + Type *string `json:"type,omitempty"` + // Value - Value of count. + Value *string `json:"value,omitempty"` +} + +// TagDetails tag details. +type TagDetails struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The tag ID. + ID *string `json:"id,omitempty"` + // TagName - The tag name. + TagName *string `json:"tagName,omitempty"` + // Count - The tag count. + Count *TagCount `json:"count,omitempty"` + // Values - The list of tag values. + Values *[]TagValue `json:"values,omitempty"` +} + +// TagsListResult list of subscription tags. +type TagsListResult struct { + autorest.Response `json:"-"` + // Value - The list of tags. + Value *[]TagDetails `json:"value,omitempty"` + // NextLink - The URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// TagsListResultIterator provides access to a complete listing of TagDetails values. +type TagsListResultIterator struct { + i int + page TagsListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *TagsListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *TagsListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter TagsListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter TagsListResultIterator) Response() TagsListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter TagsListResultIterator) Value() TagDetails { + if !iter.page.NotDone() { + return TagDetails{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the TagsListResultIterator type. +func NewTagsListResultIterator(page TagsListResultPage) TagsListResultIterator { + return TagsListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (tlr TagsListResult) IsEmpty() bool { + return tlr.Value == nil || len(*tlr.Value) == 0 +} + +// tagsListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (tlr TagsListResult) tagsListResultPreparer(ctx context.Context) (*http.Request, error) { + if tlr.NextLink == nil || len(to.String(tlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(tlr.NextLink))) +} + +// TagsListResultPage contains a page of TagDetails values. +type TagsListResultPage struct { + fn func(context.Context, TagsListResult) (TagsListResult, error) + tlr TagsListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *TagsListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.tlr) + if err != nil { + return err + } + page.tlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *TagsListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page TagsListResultPage) NotDone() bool { + return !page.tlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page TagsListResultPage) Response() TagsListResult { + return page.tlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page TagsListResultPage) Values() []TagDetails { + if page.tlr.IsEmpty() { + return nil + } + return *page.tlr.Value +} + +// Creates a new instance of the TagsListResultPage type. +func NewTagsListResultPage(getNextPage func(context.Context, TagsListResult) (TagsListResult, error)) TagsListResultPage { + return TagsListResultPage{fn: getNextPage} +} + +// TagValue tag information. +type TagValue struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The tag ID. + ID *string `json:"id,omitempty"` + // TagValue - The tag value. + TagValue *string `json:"tagValue,omitempty"` + // Count - The tag value count. + Count *TagCount `json:"count,omitempty"` +} + +// TargetResource target resource. +type TargetResource struct { + // ID - The ID of the resource. + ID *string `json:"id,omitempty"` + // ResourceName - The name of the resource. + ResourceName *string `json:"resourceName,omitempty"` + // ResourceType - The type of the resource. + ResourceType *string `json:"resourceType,omitempty"` +} + +// TemplateLink entity representing the reference to the template. +type TemplateLink struct { + // URI - URI referencing the template. + URI *string `json:"uri,omitempty"` + // ContentVersion - If included it must match the ContentVersion in the template. + ContentVersion *string `json:"contentVersion,omitempty"` +} + +// UpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type UpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *UpdateFuture) Result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.UpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.UpdateResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/providers.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/providers.go new file mode 100644 index 00000000..66a4c831 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/providers.go @@ -0,0 +1,391 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ProvidersClient is the client for the Providers methods of the Resources service. +type ProvidersClient struct { + BaseClient +} + +// NewProvidersClient creates an instance of the ProvidersClient client. +func NewProvidersClient(subscriptionID string) ProvidersClient { + return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient client. +func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient { + return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a resource provider. +// Parameters: +// resourceProviderNamespace - namespace of the resource provider. +// expand - the $expand query parameter. e.g. To include property aliases in response, use +// $expand=resourceTypes/aliases. +func (client ProvidersClient) Get(ctx context.Context, resourceProviderNamespace string, expand string) (result Provider, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceProviderNamespace, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProvidersClient) GetPreparer(ctx context.Context, resourceProviderNamespace string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of resource providers. +// Parameters: +// top - query parameters. If null is passed returns all deployments. +// expand - the $expand query parameter. e.g. To include property aliases in response, use +// $expand=resourceTypes/aliases. +func (client ProvidersClient) List(ctx context.Context, top *int32, expand string) (result ProviderListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.List") + defer func() { + sc := -1 + if result.plr.Response.Response != nil { + sc = result.plr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, top, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.plr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure sending request") + return + } + + result.plr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ProvidersClient) ListPreparer(ctx context.Context, top *int32, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ProvidersClient) ListResponder(resp *http.Response) (result ProviderListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ProvidersClient) listNextResults(ctx context.Context, lastResults ProviderListResult) (result ProviderListResult, err error) { + req, err := lastResults.providerListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ProvidersClient) ListComplete(ctx context.Context, top *int32, expand string) (result ProviderListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, top, expand) + return +} + +// Register registers provider to be used with a subscription. +// Parameters: +// resourceProviderNamespace - namespace of the resource provider. +func (client ProvidersClient) Register(ctx context.Context, resourceProviderNamespace string) (result Provider, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Register") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RegisterPreparer(ctx, resourceProviderNamespace) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", nil, "Failure preparing request") + return + } + + resp, err := client.RegisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure sending request") + return + } + + result, err = client.RegisterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure responding to request") + } + + return +} + +// RegisterPreparer prepares the Register request. +func (client ProvidersClient) RegisterPreparer(ctx context.Context, resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RegisterSender sends the Register request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// RegisterResponder handles the response to the Register request. The method always +// closes the http.Response Body. +func (client ProvidersClient) RegisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Unregister unregisters provider from a subscription. +// Parameters: +// resourceProviderNamespace - namespace of the resource provider. +func (client ProvidersClient) Unregister(ctx context.Context, resourceProviderNamespace string) (result Provider, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Unregister") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UnregisterPreparer(ctx, resourceProviderNamespace) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", nil, "Failure preparing request") + return + } + + resp, err := client.UnregisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure sending request") + return + } + + result, err = client.UnregisterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure responding to request") + } + + return +} + +// UnregisterPreparer prepares the Unregister request. +func (client ProvidersClient) UnregisterPreparer(ctx context.Context, resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UnregisterSender sends the Unregister request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UnregisterResponder handles the response to the Unregister request. The method always +// closes the http.Response Body. +func (client ProvidersClient) UnregisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/resources.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/resources.go new file mode 100644 index 00000000..c197f530 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/resources.go @@ -0,0 +1,710 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// Client is the client for the Resources methods of the Resources service. +type Client struct { + BaseClient +} + +// NewClient creates an instance of the Client client. +func NewClient(subscriptionID string) Client { + return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewClientWithBaseURI creates an instance of the Client client. +func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { + return Client{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether resource exists. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// resourceProviderNamespace - resource identity. +// parentResourcePath - resource identity. +// resourceType - resource identity. +// resourceName - resource identity. +func (client Client) CheckExistence(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.CheckExistence") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "CheckExistence", err.Error()) + } + + req, err := client.CheckExistencePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", nil, "Failure preparing request") + return + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", resp, "Failure sending request") + return + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client Client) CheckExistencePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CheckExistenceSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client Client) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate create a resource. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// resourceProviderNamespace - resource identity. +// parentResourcePath - resource identity. +// resourceType - resource identity. +// resourceName - resource identity. +// parameters - create or update resource parameters. +func (client Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (result GenericResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client Client) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client Client) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete resource and all of its resources. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// resourceProviderNamespace - resource identity. +// parentResourcePath - resource identity. +// resourceType - resource identity. +// resourceName - resource identity. +func (client Client) Delete(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.Client", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client Client) DeletePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns a resource belonging to a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// resourceProviderNamespace - resource identity. +// parentResourcePath - resource identity. +// resourceType - resource identity. +// resourceName - resource identity. +func (client Client) Get(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result GenericResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.Client", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client Client) GetPreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client Client) GetResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all of the resources under a subscription. +// Parameters: +// filter - the filter to apply on the operation. +// expand - the $expand query parameter. +// top - query parameters. If null is passed returns all resource groups. +func (client Client) List(ctx context.Context, filter string, expand string, top *int32) (result ListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.List") + defer func() { + sc := -1 + if result.lr.Response.Response != nil { + sc = result.lr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, expand, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.Client", "List", resp, "Failure sending request") + return + } + + result.lr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client Client) ListPreparer(ctx context.Context, filter string, expand string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client Client) ListResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client Client) listNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) { + req, err := lastResults.listResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.Client", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client Client) ListComplete(ctx context.Context, filter string, expand string, top *int32) (result ListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, expand, top) + return +} + +// MoveResources move resources from one resource group to another. The resources being moved should all be in the same +// resource group. +// Parameters: +// sourceResourceGroupName - source resource group name. +// parameters - move resources' parameters. +func (client Client) MoveResources(ctx context.Context, sourceResourceGroupName string, parameters MoveInfo) (result MoveResourcesFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.MoveResources") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: sourceResourceGroupName, + Constraints: []validation.Constraint{{Target: "sourceResourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "sourceResourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "sourceResourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "MoveResources", err.Error()) + } + + req, err := client.MoveResourcesPreparer(ctx, sourceResourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "MoveResources", nil, "Failure preparing request") + return + } + + result, err = client.MoveResourcesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "MoveResources", result.Response(), "Failure sending request") + return + } + + return +} + +// MoveResourcesPreparer prepares the MoveResources request. +func (client Client) MoveResourcesPreparer(ctx context.Context, sourceResourceGroupName string, parameters MoveInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "sourceResourceGroupName": autorest.Encode("path", sourceResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// MoveResourcesSender sends the MoveResources request. The method will close the +// http.Response Body if it receives an error. +func (client Client) MoveResourcesSender(req *http.Request) (future MoveResourcesFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// MoveResourcesResponder handles the response to the MoveResources request. The method always +// closes the http.Response Body. +func (client Client) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates a resource. +// Parameters: +// resourceGroupName - the name of the resource group for the resource. The name is case insensitive. +// resourceProviderNamespace - the namespace of the resource provider. +// parentResourcePath - the parent resource identity. +// resourceType - the resource type of the resource to update. +// resourceName - the name of the resource to update. +// parameters - parameters for updating the resource. +func (client Client) Update(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (result UpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client Client) UpdatePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client Client) UpdateSender(req *http.Request) (future UpdateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client Client) UpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/tags.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/tags.go new file mode 100644 index 00000000..7d5b286c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/tags.go @@ -0,0 +1,453 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// TagsClient is the client for the Tags methods of the Resources service. +type TagsClient struct { + BaseClient +} + +// NewTagsClient creates an instance of the TagsClient client. +func NewTagsClient(subscriptionID string) TagsClient { + return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTagsClientWithBaseURI creates an instance of the TagsClient client. +func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient { + return TagsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create a subscription resource tag. +// Parameters: +// tagName - the name of the tag. +func (client TagsClient) CreateOrUpdate(ctx context.Context, tagName string) (result TagDetails, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, tagName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client TagsClient) CreateOrUpdatePreparer(ctx context.Context, tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateResponder(resp *http.Response) (result TagDetails, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateValue create a subscription resource tag value. +// Parameters: +// tagName - the name of the tag. +// tagValue - the value of the tag. +func (client TagsClient) CreateOrUpdateValue(ctx context.Context, tagName string, tagValue string) (result TagValue, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.CreateOrUpdateValue") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdateValuePreparer(ctx, tagName, tagValue) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateValueSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateValueResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateValuePreparer prepares the CreateOrUpdateValue request. +func (client TagsClient) CreateOrUpdateValuePreparer(ctx context.Context, tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + "tagValue": autorest.Encode("path", tagValue), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateValueSender sends the CreateOrUpdateValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateValueSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateValueResponder handles the response to the CreateOrUpdateValue request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateValueResponder(resp *http.Response) (result TagValue, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete a subscription resource tag. +// Parameters: +// tagName - the name of the tag. +func (client TagsClient) Delete(ctx context.Context, tagName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, tagName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TagsClient) DeletePreparer(ctx context.Context, tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteValue delete a subscription resource tag value. +// Parameters: +// tagName - the name of the tag. +// tagValue - the value of the tag. +func (client TagsClient) DeleteValue(ctx context.Context, tagName string, tagValue string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.DeleteValue") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteValuePreparer(ctx, tagName, tagValue) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteValueSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", resp, "Failure sending request") + return + } + + result, err = client.DeleteValueResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", resp, "Failure responding to request") + } + + return +} + +// DeleteValuePreparer prepares the DeleteValue request. +func (client TagsClient) DeleteValuePreparer(ctx context.Context, tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + "tagValue": autorest.Encode("path", tagValue), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteValueSender sends the DeleteValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteValueSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteValueResponder handles the response to the DeleteValue request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteValueResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// List get a list of subscription resource tags. +func (client TagsClient) List(ctx context.Context) (result TagsListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.List") + defer func() { + sc := -1 + if result.tlr.Response.Response != nil { + sc = result.tlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.tlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure sending request") + return + } + + result.tlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TagsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-02-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TagsClient) ListResponder(resp *http.Response) (result TagsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client TagsClient) listNextResults(ctx context.Context, lastResults TagsListResult) (result TagsListResult, err error) { + req, err := lastResults.tagsListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client TagsClient) ListComplete(ctx context.Context) (result TagsListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/version.go new file mode 100644 index 00000000..b7b50288 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-02-01/resources/version.go @@ -0,0 +1,30 @@ +package resources + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " resources/2016-02-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/accounts.go new file mode 100644 index 00000000..f381b9db --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/accounts.go @@ -0,0 +1,813 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AccountsClient is the the Storage Management Client. +type AccountsClient struct { + BaseClient +} + +// NewAccountsClient creates an instance of the AccountsClient client. +func NewAccountsClient(subscriptionID string) AccountsClient { + return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient client. +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckNameAvailability checks that the storage account name is valid and is not already in use. +// Parameters: +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) CheckNameAvailability(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (result CheckNameAvailabilityResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.CheckNameAvailability") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "accountName.Type", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "CheckNameAvailability", err.Error()) + } + + req, err := client.CheckNameAvailabilityPreparer(ctx, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", nil, "Failure preparing request") + return + } + + resp, err := client.CheckNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure sending request") + return + } + + result, err = client.CheckNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "CheckNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckNameAvailabilityPreparer prepares the CheckNameAvailability request. +func (client AccountsClient) CheckNameAvailabilityPreparer(ctx context.Context, accountName AccountCheckNameAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability", pathParameters), + autorest.WithJSON(accountName), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always +// closes the http.Response Body. +func (client AccountsClient) CheckNameAvailabilityResponder(resp *http.Response) (result CheckNameAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Create asynchronously creates a new storage account with the specified parameters. If an account is already created +// and a subsequent create request is issued with different properties, the account properties will be updated. If an +// account is already created and a subsequent create or update request is issued with the exact same set of +// properties, the request will succeed. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the parameters to provide for the created account. +func (client AccountsClient) Create(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (result AccountsCreateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Create") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Sku", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AccountPropertiesCreateParameters", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.CustomDomain.Name", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.AccountPropertiesCreateParameters.Encryption", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.AccountPropertiesCreateParameters.Encryption.KeySource", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "Create", err.Error()) + } + + req, err := client.CreatePreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", nil, "Failure preparing request") + return + } + + result, err = client.CreateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Create", result.Response(), "Failure sending request") + return + } + + return +} + +// CreatePreparer prepares the Create request. +func (client AccountsClient) CreatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountCreateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateSender sends the Create request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) CreateSender(req *http.Request) (future AccountsCreateFuture, err error) { + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateResponder handles the response to the Create request. The method always +// closes the http.Response Body. +func (client AccountsClient) CreateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a storage account in Microsoft Azure. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) Delete(ctx context.Context, resourceGroupName string, accountName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AccountsClient) DeletePreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AccountsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetProperties returns the properties for the specified storage account including but not limited to name, SKU name, +// location, and account status. The ListKeys operation should be used to retrieve storage keys. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) GetProperties(ctx context.Context, resourceGroupName string, accountName string) (result Account, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.GetProperties") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "GetProperties", err.Error()) + } + + req, err := client.GetPropertiesPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", nil, "Failure preparing request") + return + } + + resp, err := client.GetPropertiesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure sending request") + return + } + + result, err = client.GetPropertiesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "GetProperties", resp, "Failure responding to request") + } + + return +} + +// GetPropertiesPreparer prepares the GetProperties request. +func (client AccountsClient) GetPropertiesPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetPropertiesSender sends the GetProperties request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetPropertiesResponder handles the response to the GetProperties request. The method always +// closes the http.Response Body. +func (client AccountsClient) GetPropertiesResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the storage accounts available under the subscription. Note that storage keys are not returned; use +// the ListKeys operation for this. +func (client AccountsClient) List(ctx context.Context) (result AccountListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AccountsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists all the storage accounts available under the given resource group. Note that storage keys +// are not returned; use the ListKeys operation for this. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +func (client AccountsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AccountListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AccountsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListByResourceGroupResponder(resp *http.Response) (result AccountListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListKeys lists the access keys for the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +func (client AccountsClient) ListKeys(ctx context.Context, resourceGroupName string, accountName string) (result AccountListKeysResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.ListKeys") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "ListKeys", err.Error()) + } + + req, err := client.ListKeysPreparer(ctx, resourceGroupName, accountName) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", nil, "Failure preparing request") + return + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure sending request") + return + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client AccountsClient) ListKeysPreparer(ctx context.Context, resourceGroupName string, accountName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client AccountsClient) ListKeysResponder(resp *http.Response) (result AccountListKeysResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKey regenerates one of the access keys for the specified storage account. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// regenerateKey - specifies name of the key which should be regenerated -- key1 or key2. +func (client AccountsClient) RegenerateKey(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (result AccountListKeysResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.RegenerateKey") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}, + {TargetValue: regenerateKey, + Constraints: []validation.Constraint{{Target: "regenerateKey.KeyName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "RegenerateKey", err.Error()) + } + + req, err := client.RegenerateKeyPreparer(ctx, resourceGroupName, accountName, regenerateKey) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", nil, "Failure preparing request") + return + } + + resp, err := client.RegenerateKeySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure sending request") + return + } + + result, err = client.RegenerateKeyResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "RegenerateKey", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeyPreparer prepares the RegenerateKey request. +func (client AccountsClient) RegenerateKeyPreparer(ctx context.Context, resourceGroupName string, accountName string, regenerateKey AccountRegenerateKeyParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey", pathParameters), + autorest.WithJSON(regenerateKey), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RegenerateKeySender sends the RegenerateKey request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// RegenerateKeyResponder handles the response to the RegenerateKey request. The method always +// closes the http.Response Body. +func (client AccountsClient) RegenerateKeyResponder(resp *http.Response) (result AccountListKeysResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update the update operation can be used to update the SKU, encryption, access tier, or tags for a storage account. +// It can also be used to map the account to a custom domain. Only one custom domain is supported per storage account; +// the replacement/change of custom domain is not supported. In order to replace an old custom domain, the old value +// must be cleared/unregistered before a new value can be set. The update of multiple properties is supported. This +// call does not change the storage keys for the account. If you want to change the storage account keys, use the +// regenerate keys operation. The location and name of the storage account cannot be changed after creation. +// Parameters: +// resourceGroupName - the name of the resource group within the user's subscription. +// accountName - the name of the storage account within the specified resource group. Storage account names +// must be between 3 and 24 characters in length and use numbers and lower-case letters only. +// parameters - the parameters to provide for the updated account. +func (client AccountsClient) Update(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (result Account, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AccountsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: accountName, + Constraints: []validation.Constraint{{Target: "accountName", Name: validation.MaxLength, Rule: 24, Chain: nil}, + {Target: "accountName", Name: validation.MinLength, Rule: 3, Chain: nil}}}}); err != nil { + return result, validation.NewError("storage.AccountsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, accountName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AccountsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, accountName string, parameters AccountUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "accountName": autorest.Encode("path", accountName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AccountsClient) UpdateResponder(resp *http.Response) (result Account, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/client.go new file mode 100644 index 00000000..483dfc6d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/client.go @@ -0,0 +1,51 @@ +// Package storage implements the Azure ARM Storage service API version 2016-01-01. +// +// The Storage Management Client. +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Storage + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Storage. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/models.go new file mode 100644 index 00000000..f8dbdc07 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/models.go @@ -0,0 +1,690 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage" + +// AccessTier enumerates the values for access tier. +type AccessTier string + +const ( + // Cool ... + Cool AccessTier = "Cool" + // Hot ... + Hot AccessTier = "Hot" +) + +// PossibleAccessTierValues returns an array of possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{Cool, Hot} +} + +// AccountStatus enumerates the values for account status. +type AccountStatus string + +const ( + // Available ... + Available AccountStatus = "Available" + // Unavailable ... + Unavailable AccountStatus = "Unavailable" +) + +// PossibleAccountStatusValues returns an array of possible values for the AccountStatus const type. +func PossibleAccountStatusValues() []AccountStatus { + return []AccountStatus{Available, Unavailable} +} + +// KeyPermission enumerates the values for key permission. +type KeyPermission string + +const ( + // FULL ... + FULL KeyPermission = "FULL" + // READ ... + READ KeyPermission = "READ" +) + +// PossibleKeyPermissionValues returns an array of possible values for the KeyPermission const type. +func PossibleKeyPermissionValues() []KeyPermission { + return []KeyPermission{FULL, READ} +} + +// Kind enumerates the values for kind. +type Kind string + +const ( + // BlobStorage ... + BlobStorage Kind = "BlobStorage" + // Storage ... + Storage Kind = "Storage" +) + +// PossibleKindValues returns an array of possible values for the Kind const type. +func PossibleKindValues() []Kind { + return []Kind{BlobStorage, Storage} +} + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Creating ... + Creating ProvisioningState = "Creating" + // ResolvingDNS ... + ResolvingDNS ProvisioningState = "ResolvingDNS" + // Succeeded ... + Succeeded ProvisioningState = "Succeeded" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{Creating, ResolvingDNS, Succeeded} +} + +// Reason enumerates the values for reason. +type Reason string + +const ( + // AccountNameInvalid ... + AccountNameInvalid Reason = "AccountNameInvalid" + // AlreadyExists ... + AlreadyExists Reason = "AlreadyExists" +) + +// PossibleReasonValues returns an array of possible values for the Reason const type. +func PossibleReasonValues() []Reason { + return []Reason{AccountNameInvalid, AlreadyExists} +} + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // PremiumLRS ... + PremiumLRS SkuName = "Premium_LRS" + // StandardGRS ... + StandardGRS SkuName = "Standard_GRS" + // StandardLRS ... + StandardLRS SkuName = "Standard_LRS" + // StandardRAGRS ... + StandardRAGRS SkuName = "Standard_RAGRS" + // StandardZRS ... + StandardZRS SkuName = "Standard_ZRS" +) + +// PossibleSkuNameValues returns an array of possible values for the SkuName const type. +func PossibleSkuNameValues() []SkuName { + return []SkuName{PremiumLRS, StandardGRS, StandardLRS, StandardRAGRS, StandardZRS} +} + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // Premium ... + Premium SkuTier = "Premium" + // Standard ... + Standard SkuTier = "Standard" +) + +// PossibleSkuTierValues returns an array of possible values for the SkuTier const type. +func PossibleSkuTierValues() []SkuTier { + return []SkuTier{Premium, Standard} +} + +// UsageUnit enumerates the values for usage unit. +type UsageUnit string + +const ( + // Bytes ... + Bytes UsageUnit = "Bytes" + // BytesPerSecond ... + BytesPerSecond UsageUnit = "BytesPerSecond" + // Count ... + Count UsageUnit = "Count" + // CountsPerSecond ... + CountsPerSecond UsageUnit = "CountsPerSecond" + // Percent ... + Percent UsageUnit = "Percent" + // Seconds ... + Seconds UsageUnit = "Seconds" +) + +// PossibleUsageUnitValues returns an array of possible values for the UsageUnit const type. +func PossibleUsageUnitValues() []UsageUnit { + return []UsageUnit{Bytes, BytesPerSecond, Count, CountsPerSecond, Percent, Seconds} +} + +// Account the storage account. +type Account struct { + autorest.Response `json:"-"` + // Sku - READ-ONLY; Gets the SKU. + Sku *Sku `json:"sku,omitempty"` + // Kind - READ-ONLY; Gets the Kind. Possible values include: 'Storage', 'BlobStorage' + Kind Kind `json:"kind,omitempty"` + *AccountProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Tags assigned to a resource; can be used for viewing and grouping a resource (across resource groups). + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Account. +func (a Account) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if a.AccountProperties != nil { + objectMap["properties"] = a.AccountProperties + } + if a.Location != nil { + objectMap["location"] = a.Location + } + if a.Tags != nil { + objectMap["tags"] = a.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for Account struct. +func (a *Account) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + a.Sku = &sku + } + case "kind": + if v != nil { + var kind Kind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + a.Kind = kind + } + case "properties": + if v != nil { + var accountProperties AccountProperties + err = json.Unmarshal(*v, &accountProperties) + if err != nil { + return err + } + a.AccountProperties = &accountProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + a.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + a.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + a.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + a.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + a.Tags = tags + } + } + } + + return nil +} + +// AccountCheckNameAvailabilityParameters ... +type AccountCheckNameAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// AccountCreateParameters the parameters used when creating a storage account. +type AccountCreateParameters struct { + // Sku - Required. Gets or sets the sku name. + Sku *Sku `json:"sku,omitempty"` + // Kind - Required. Indicates the type of storage account. Possible values include: 'Storage', 'BlobStorage' + Kind Kind `json:"kind,omitempty"` + // Location - Required. Gets or sets the location of the resource. This will be one of the supported and registered Azure Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once it is created, but if an identical geo region is specified on update, the request will succeed. + Location *string `json:"location,omitempty"` + // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used for viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key with a length no greater than 128 characters and a value with a length no greater than 256 characters. + Tags map[string]*string `json:"tags"` + *AccountPropertiesCreateParameters `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountCreateParameters. +func (acp AccountCreateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if acp.Sku != nil { + objectMap["sku"] = acp.Sku + } + if acp.Kind != "" { + objectMap["kind"] = acp.Kind + } + if acp.Location != nil { + objectMap["location"] = acp.Location + } + if acp.Tags != nil { + objectMap["tags"] = acp.Tags + } + if acp.AccountPropertiesCreateParameters != nil { + objectMap["properties"] = acp.AccountPropertiesCreateParameters + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccountCreateParameters struct. +func (acp *AccountCreateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + acp.Sku = &sku + } + case "kind": + if v != nil { + var kind Kind + err = json.Unmarshal(*v, &kind) + if err != nil { + return err + } + acp.Kind = kind + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + acp.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + acp.Tags = tags + } + case "properties": + if v != nil { + var accountPropertiesCreateParameters AccountPropertiesCreateParameters + err = json.Unmarshal(*v, &accountPropertiesCreateParameters) + if err != nil { + return err + } + acp.AccountPropertiesCreateParameters = &accountPropertiesCreateParameters + } + } + } + + return nil +} + +// AccountKey an access key for the storage account. +type AccountKey struct { + // KeyName - READ-ONLY; Name of the key. + KeyName *string `json:"keyName,omitempty"` + // Value - READ-ONLY; Base 64-encoded value of the key. + Value *string `json:"value,omitempty"` + // Permissions - READ-ONLY; Permissions for the key -- read-only or full permissions. Possible values include: 'READ', 'FULL' + Permissions KeyPermission `json:"permissions,omitempty"` +} + +// AccountListKeysResult the response from the ListKeys operation. +type AccountListKeysResult struct { + autorest.Response `json:"-"` + // Keys - READ-ONLY; Gets the list of storage account keys and their properties for the specified storage account. + Keys *[]AccountKey `json:"keys,omitempty"` +} + +// AccountListResult the response from the List Storage Accounts operation. +type AccountListResult struct { + autorest.Response `json:"-"` + // Value - READ-ONLY; Gets the list of storage accounts and their properties. + Value *[]Account `json:"value,omitempty"` +} + +// AccountProperties ... +type AccountProperties struct { + // ProvisioningState - READ-ONLY; Gets the status of the storage account at the time the operation was called. Possible values include: 'Creating', 'ResolvingDNS', 'Succeeded' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // PrimaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object. Note that Standard_ZRS and Premium_LRS accounts only return the blob endpoint. + PrimaryEndpoints *Endpoints `json:"primaryEndpoints,omitempty"` + // PrimaryLocation - READ-ONLY; Gets the location of the primary data center for the storage account. + PrimaryLocation *string `json:"primaryLocation,omitempty"` + // StatusOfPrimary - READ-ONLY; Gets the status indicating whether the primary location of the storage account is available or unavailable. Possible values include: 'Available', 'Unavailable' + StatusOfPrimary AccountStatus `json:"statusOfPrimary,omitempty"` + // LastGeoFailoverTime - READ-ONLY; Gets the timestamp of the most recent instance of a failover to the secondary location. Only the most recent timestamp is retained. This element is not returned if there has never been a failover instance. Only available if the accountType is Standard_GRS or Standard_RAGRS. + LastGeoFailoverTime *date.Time `json:"lastGeoFailoverTime,omitempty"` + // SecondaryLocation - READ-ONLY; Gets the location of the geo-replicated secondary for the storage account. Only available if the accountType is Standard_GRS or Standard_RAGRS. + SecondaryLocation *string `json:"secondaryLocation,omitempty"` + // StatusOfSecondary - READ-ONLY; Gets the status indicating whether the secondary location of the storage account is available or unavailable. Only available if the SKU name is Standard_GRS or Standard_RAGRS. Possible values include: 'Available', 'Unavailable' + StatusOfSecondary AccountStatus `json:"statusOfSecondary,omitempty"` + // CreationTime - READ-ONLY; Gets the creation date and time of the storage account in UTC. + CreationTime *date.Time `json:"creationTime,omitempty"` + // CustomDomain - READ-ONLY; Gets the custom domain the user assigned to this storage account. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // SecondaryEndpoints - READ-ONLY; Gets the URLs that are used to perform a retrieval of a public blob, queue, or table object from the secondary location of the storage account. Only available if the SKU name is Standard_RAGRS. + SecondaryEndpoints *Endpoints `json:"secondaryEndpoints,omitempty"` + // Encryption - READ-ONLY; Gets the encryption settings on the account. If unspecified, the account is unencrypted. + Encryption *Encryption `json:"encryption,omitempty"` + // AccessTier - READ-ONLY; Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' + AccessTier AccessTier `json:"accessTier,omitempty"` +} + +// AccountPropertiesCreateParameters ... +type AccountPropertiesCreateParameters struct { + // CustomDomain - User domain assigned to the storage account. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // Encryption - Provides the encryption settings on the account. If left unspecified the account encryption settings will remain the same. The default setting is unencrypted. + Encryption *Encryption `json:"encryption,omitempty"` + // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' + AccessTier AccessTier `json:"accessTier,omitempty"` +} + +// AccountPropertiesUpdateParameters ... +type AccountPropertiesUpdateParameters struct { + // CustomDomain - Custom domain assigned to the storage account by the user. Name is the CNAME source. Only one custom domain is supported per storage account at this time. To clear the existing custom domain, use an empty string for the custom domain name property. + CustomDomain *CustomDomain `json:"customDomain,omitempty"` + // Encryption - Provides the encryption settings on the account. The default setting is unencrypted. + Encryption *Encryption `json:"encryption,omitempty"` + // AccessTier - Required for storage accounts where kind = BlobStorage. The access tier used for billing. Possible values include: 'Hot', 'Cool' + AccessTier AccessTier `json:"accessTier,omitempty"` +} + +// AccountRegenerateKeyParameters ... +type AccountRegenerateKeyParameters struct { + KeyName *string `json:"keyName,omitempty"` +} + +// AccountsCreateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type AccountsCreateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *AccountsCreateFuture) Result(client AccountsClient) (a Account, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("storage.AccountsCreateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if a.Response.Response, err = future.GetResult(sender); err == nil && a.Response.Response.StatusCode != http.StatusNoContent { + a, err = client.CreateResponder(a.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.AccountsCreateFuture", "Result", a.Response.Response, "Failure responding to request") + } + } + return +} + +// AccountUpdateParameters the parameters that can be provided when updating the storage account +// properties. +type AccountUpdateParameters struct { + // Sku - Gets or sets the SKU name. Note that the SKU name cannot be updated to Standard_ZRS or Premium_LRS, nor can accounts of those sku names be updated to any other value. + Sku *Sku `json:"sku,omitempty"` + // Tags - Gets or sets a list of key value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater in length than 128 characters and a value no greater in length than 256 characters. + Tags map[string]*string `json:"tags"` + *AccountPropertiesUpdateParameters `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AccountUpdateParameters. +func (aup AccountUpdateParameters) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if aup.Sku != nil { + objectMap["sku"] = aup.Sku + } + if aup.Tags != nil { + objectMap["tags"] = aup.Tags + } + if aup.AccountPropertiesUpdateParameters != nil { + objectMap["properties"] = aup.AccountPropertiesUpdateParameters + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AccountUpdateParameters struct. +func (aup *AccountUpdateParameters) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "sku": + if v != nil { + var sku Sku + err = json.Unmarshal(*v, &sku) + if err != nil { + return err + } + aup.Sku = &sku + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + aup.Tags = tags + } + case "properties": + if v != nil { + var accountPropertiesUpdateParameters AccountPropertiesUpdateParameters + err = json.Unmarshal(*v, &accountPropertiesUpdateParameters) + if err != nil { + return err + } + aup.AccountPropertiesUpdateParameters = &accountPropertiesUpdateParameters + } + } + } + + return nil +} + +// CheckNameAvailabilityResult the CheckNameAvailability operation response. +type CheckNameAvailabilityResult struct { + autorest.Response `json:"-"` + // NameAvailable - READ-ONLY; Gets a boolean value that indicates whether the name is available for you to use. If true, the name is available. If false, the name has already been taken or is invalid and cannot be used. + NameAvailable *bool `json:"nameAvailable,omitempty"` + // Reason - READ-ONLY; Gets the reason that a storage account name could not be used. The Reason element is only returned if NameAvailable is false. Possible values include: 'AccountNameInvalid', 'AlreadyExists' + Reason Reason `json:"reason,omitempty"` + // Message - READ-ONLY; Gets an error message explaining the Reason value in more detail. + Message *string `json:"message,omitempty"` +} + +// CustomDomain the custom domain assigned to this storage account. This can be set via Update. +type CustomDomain struct { + // Name - Gets or sets the custom domain name assigned to the storage account. Name is the CNAME source. + Name *string `json:"name,omitempty"` + // UseSubDomainName - Indicates whether indirect CName validation is enabled. Default value is false. This should only be set on updates. + UseSubDomainName *bool `json:"useSubDomainName,omitempty"` +} + +// Encryption the encryption settings on the storage account. +type Encryption struct { + // Services - List of services which support encryption. + Services *EncryptionServices `json:"services,omitempty"` + // KeySource - The encryption keySource (provider). Possible values (case-insensitive): Microsoft.Storage + KeySource *string `json:"keySource,omitempty"` +} + +// EncryptionService a service that allows server-side encryption to be used. +type EncryptionService struct { + // Enabled - A boolean indicating whether or not the service encrypts the data as it is stored. + Enabled *bool `json:"enabled,omitempty"` + // LastEnabledTime - READ-ONLY; Gets a rough estimate of the date/time when the encryption was last enabled by the user. Only returned when encryption is enabled. There might be some unencrypted blobs which were written after this time, as it is just a rough estimate. + LastEnabledTime *date.Time `json:"lastEnabledTime,omitempty"` +} + +// EncryptionServices a list of services that support encryption. +type EncryptionServices struct { + // Blob - The encryption function of the blob storage service. + Blob *EncryptionService `json:"blob,omitempty"` +} + +// Endpoints the URIs that are used to perform a retrieval of a public blob, queue, or table object. +type Endpoints struct { + // Blob - READ-ONLY; Gets the blob endpoint. + Blob *string `json:"blob,omitempty"` + // Queue - READ-ONLY; Gets the queue endpoint. + Queue *string `json:"queue,omitempty"` + // Table - READ-ONLY; Gets the table endpoint. + Table *string `json:"table,omitempty"` + // File - READ-ONLY; Gets the file endpoint. + File *string `json:"file,omitempty"` +} + +// Resource ... +type Resource struct { + // ID - READ-ONLY; Resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Tags assigned to a resource; can be used for viewing and grouping a resource (across resource groups). + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// Sku the SKU of the storage account. +type Sku struct { + // Name - Gets or sets the sku name. Required for account creation; optional for update. Note that in older versions, sku name was called accountType. Possible values include: 'StandardLRS', 'StandardGRS', 'StandardRAGRS', 'StandardZRS', 'PremiumLRS' + Name SkuName `json:"name,omitempty"` + // Tier - READ-ONLY; Gets the sku tier. This is based on the SKU name. Possible values include: 'Standard', 'Premium' + Tier SkuTier `json:"tier,omitempty"` +} + +// Usage describes Storage Resource Usage. +type Usage struct { + // Unit - READ-ONLY; Gets the unit of measurement. Possible values include: 'Count', 'Bytes', 'Seconds', 'Percent', 'CountsPerSecond', 'BytesPerSecond' + Unit UsageUnit `json:"unit,omitempty"` + // CurrentValue - READ-ONLY; Gets the current count of the allocated resources in the subscription. + CurrentValue *int32 `json:"currentValue,omitempty"` + // Limit - READ-ONLY; Gets the maximum count of the resources that can be allocated in the subscription. + Limit *int32 `json:"limit,omitempty"` + // Name - READ-ONLY; Gets the name of the type of usage. + Name *UsageName `json:"name,omitempty"` +} + +// UsageListResult the response from the List Usages operation. +type UsageListResult struct { + autorest.Response `json:"-"` + // Value - Gets or sets the list of Storage Resource Usages. + Value *[]Usage `json:"value,omitempty"` +} + +// UsageName the usage names that can be used; currently limited to StorageAccount. +type UsageName struct { + // Value - READ-ONLY; Gets a string describing the resource name. + Value *string `json:"value,omitempty"` + // LocalizedValue - READ-ONLY; Gets a localized string describing the resource name. + LocalizedValue *string `json:"localizedValue,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/usage.go new file mode 100644 index 00000000..d9558309 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/usage.go @@ -0,0 +1,113 @@ +package storage + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// UsageClient is the the Storage Management Client. +type UsageClient struct { + BaseClient +} + +// NewUsageClient creates an instance of the UsageClient client. +func NewUsageClient(subscriptionID string) UsageClient { + return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageClientWithBaseURI creates an instance of the UsageClient client. +func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { + return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the current usage count and the limit for the resources under the subscription. +func (client UsageClient) List(ctx context.Context) (result UsageListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/UsageClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "storage.UsageClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Storage/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageClient) ListResponder(resp *http.Response) (result UsageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/version.go new file mode 100644 index 00000000..b7c1881b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2016-01-01/storage/version.go @@ -0,0 +1,30 @@ +package storage + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " storage/2016-01-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md new file mode 100644 index 00000000..459b4583 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -0,0 +1,22 @@ +# Azure Storage SDK for Go (Preview) + +:exclamation: IMPORTANT: This package is in maintenance only and will be deprecated in the +future. Please use one of the following packages instead. + +| Service | Import Path/Repo | +|---------|------------------| +| Storage - Blobs | [github.com/Azure/azure-storage-blob-go](https://github.com/Azure/azure-storage-blob-go) | +| Storage - Files | [github.com/Azure/azure-storage-file-go](https://github.com/Azure/azure-storage-file-go) | +| Storage - Queues | [github.com/Azure/azure-storage-queue-go](https://github.com/Azure/azure-storage-queue-go) | + +The `github.com/Azure/azure-sdk-for-go/storage` package is used to manage +[Azure Storage](https://docs.microsoft.com/en-us/azure/storage/) data plane +resources: containers, blobs, tables, and queues. + +To manage storage *accounts* use Azure Resource Manager (ARM) via the packages +at [github.com/Azure/azure-sdk-for-go/services/storage](https://github.com/Azure/azure-sdk-for-go/tree/master/services/storage). + +This package also supports the [Azure Storage +Emulator](https://azure.microsoft.com/documentation/articles/storage-use-emulator/) +(Windows only). + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go new file mode 100644 index 00000000..8b5b96d4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/appendblob.go @@ -0,0 +1,91 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "time" +) + +// PutAppendBlob initializes an empty append blob with specified name. An +// append blob must be created using this method before appending blocks. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeAppend) +} + +// AppendBlockOptions includes the options for an append block operation +type AppendBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + MaxSize *uint `header:"x-ms-blob-condition-maxsize"` + AppendPosition *uint `header:"x-ms-blob-condition-appendpos"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` + ContentMD5 bool +} + +// AppendBlock appends a block to an append blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Append-Block +func (b *Blob) AppendBlock(chunk []byte, options *AppendBlockOptions) error { + params := url.Values{"comp": {"appendblock"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeAppend) + headers["Content-Length"] = fmt.Sprintf("%v", len(chunk)) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.ContentMD5 { + md5sum := md5.Sum(chunk) + headers[headerContentMD5] = base64.StdEncoding.EncodeToString(md5sum[:]) + } + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes.NewReader(chunk), b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeAppend) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go new file mode 100644 index 00000000..76794c30 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/authorization.go @@ -0,0 +1,246 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "net/url" + "sort" + "strings" +) + +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/authentication-for-the-azure-storage-services + +type authentication string + +const ( + sharedKey authentication = "sharedKey" + sharedKeyForTable authentication = "sharedKeyTable" + sharedKeyLite authentication = "sharedKeyLite" + sharedKeyLiteForTable authentication = "sharedKeyLiteTable" + + // headers + headerAcceptCharset = "Accept-Charset" + headerAuthorization = "Authorization" + headerContentLength = "Content-Length" + headerDate = "Date" + headerXmsDate = "x-ms-date" + headerXmsVersion = "x-ms-version" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentType = "Content-Type" + headerContentMD5 = "Content-MD5" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" + headerDataServiceVersion = "DataServiceVersion" + headerMaxDataServiceVersion = "MaxDataServiceVersion" + headerContentTransferEncoding = "Content-Transfer-Encoding" +) + +func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]string, auth authentication) (map[string]string, error) { + if !c.sasClient { + authHeader, err := c.getSharedKey(verb, url, headers, auth) + if err != nil { + return nil, err + } + headers[headerAuthorization] = authHeader + } + return headers, nil +} + +func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { + canRes, err := c.buildCanonicalizedResource(url, auth, false) + if err != nil { + return "", err + } + + canString, err := buildCanonicalizedString(verb, headers, canRes, auth) + if err != nil { + return "", err + } + return c.createAuthorizationHeader(canString, auth), nil +} + +func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) { + errMsg := "buildCanonicalizedResource error: %s" + u, err := url.Parse(uri) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + cr := bytes.NewBufferString("") + if c.accountName != StorageEmulatorAccountName || !sas { + cr.WriteString("/") + cr.WriteString(c.getCanonicalizedAccountName()) + } + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } + + params, err := url.ParseQuery(u.RawQuery) + if err != nil { + return "", fmt.Errorf(errMsg, err.Error()) + } + + // See https://github.com/Azure/azure-storage-net/blob/master/Lib/Common/Core/Util/AuthenticationUtility.cs#L277 + if auth == sharedKey { + if len(params) > 0 { + cr.WriteString("\n") + + keys := []string{} + for key := range params { + keys = append(keys, key) + } + sort.Strings(keys) + + completeParams := []string{} + for _, key := range keys { + if len(params[key]) > 1 { + sort.Strings(params[key]) + } + + completeParams = append(completeParams, fmt.Sprintf("%s:%s", key, strings.Join(params[key], ","))) + } + cr.WriteString(strings.Join(completeParams, "\n")) + } + } else { + // search for "comp" parameter, if exists then add it to canonicalizedresource + if v, ok := params["comp"]; ok { + cr.WriteString("?comp=" + v[0]) + } + } + + return string(cr.Bytes()), nil +} + +func (c *Client) getCanonicalizedAccountName() string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(c.accountName, "-secondary") +} + +func buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string, auth authentication) (string, error) { + contentLength := headers[headerContentLength] + if contentLength == "0" { + contentLength = "" + } + date := headers[headerDate] + if v, ok := headers[headerXmsDate]; ok { + if auth == sharedKey || auth == sharedKeyLite { + date = "" + } else { + date = v + } + } + var canString string + switch auth { + case sharedKey: + canString = strings.Join([]string{ + verb, + headers[headerContentEncoding], + headers[headerContentLanguage], + contentLength, + headers[headerContentMD5], + headers[headerContentType], + date, + headers[headerIfModifiedSince], + headers[headerIfMatch], + headers[headerIfNoneMatch], + headers[headerIfUnmodifiedSince], + headers[headerRange], + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case sharedKeyForTable: + canString = strings.Join([]string{ + verb, + headers[headerContentMD5], + headers[headerContentType], + date, + canonicalizedResource, + }, "\n") + case sharedKeyLite: + canString = strings.Join([]string{ + verb, + headers[headerContentMD5], + headers[headerContentType], + date, + buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + case sharedKeyLiteForTable: + canString = strings.Join([]string{ + date, + canonicalizedResource, + }, "\n") + default: + return "", fmt.Errorf("%s authentication is not supported yet", auth) + } + return canString, nil +} + +func buildCanonicalizedHeader(headers map[string]string) string { + cm := make(map[string]string) + + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v + } + } + + if len(cm) == 0 { + return "" + } + + keys := []string{} + for key := range cm { + keys = append(keys, key) + } + + sort.Strings(keys) + + ch := bytes.NewBufferString("") + + for _, key := range keys { + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(cm[key]) + ch.WriteRune('\n') + } + + return strings.TrimSuffix(string(ch.Bytes()), "\n") +} + +func (c *Client) createAuthorizationHeader(canonicalizedString string, auth authentication) string { + signature := c.computeHmac256(canonicalizedString) + var key string + switch auth { + case sharedKey, sharedKeyForTable: + key = "SharedKey" + case sharedKeyLite, sharedKeyLiteForTable: + key = "SharedKeyLite" + } + return fmt.Sprintf("%s %s:%s", key, c.getCanonicalizedAccountName(), signature) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go new file mode 100644 index 00000000..1d224862 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -0,0 +1,632 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// A Blob is an entry in BlobListResponse. +type Blob struct { + Container *Container + Name string `xml:"Name"` + Snapshot time.Time `xml:"Snapshot"` + Properties BlobProperties `xml:"Properties"` + Metadata BlobMetadata `xml:"Metadata"` +} + +// PutBlobOptions includes the options any put blob operation +// (page, block, append) +type PutBlobOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// BlobMetadata is a set of custom name/value pairs. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179404.aspx +type BlobMetadata map[string]string + +type blobMetadataEntries struct { + Entries []blobMetadataEntry `xml:",any"` +} +type blobMetadataEntry struct { + XMLName xml.Name + Value string `xml:",chardata"` +} + +// UnmarshalXML converts the xml:Metadata into Metadata map +func (bm *BlobMetadata) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var entries blobMetadataEntries + if err := d.DecodeElement(&entries, &start); err != nil { + return err + } + for _, entry := range entries.Entries { + if *bm == nil { + *bm = make(BlobMetadata) + } + (*bm)[strings.ToLower(entry.XMLName.Local)] = entry.Value + } + return nil +} + +// MarshalXML implements the xml.Marshaler interface. It encodes +// metadata name/value pairs as they would appear in an Azure +// ListBlobs response. +func (bm BlobMetadata) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + entries := make([]blobMetadataEntry, 0, len(bm)) + for k, v := range bm { + entries = append(entries, blobMetadataEntry{ + XMLName: xml.Name{Local: http.CanonicalHeaderKey(k)}, + Value: v, + }) + } + return enc.EncodeElement(blobMetadataEntries{ + Entries: entries, + }, start) +} + +// BlobProperties contains various properties of a blob +// returned in various endpoints like ListBlobs or GetBlobProperties. +type BlobProperties struct { + LastModified TimeRFC1123 `xml:"Last-Modified"` + Etag string `xml:"Etag"` + ContentMD5 string `xml:"Content-MD5" header:"x-ms-blob-content-md5"` + ContentLength int64 `xml:"Content-Length"` + ContentType string `xml:"Content-Type" header:"x-ms-blob-content-type"` + ContentEncoding string `xml:"Content-Encoding" header:"x-ms-blob-content-encoding"` + CacheControl string `xml:"Cache-Control" header:"x-ms-blob-cache-control"` + ContentLanguage string `xml:"Cache-Language" header:"x-ms-blob-content-language"` + ContentDisposition string `xml:"Content-Disposition" header:"x-ms-blob-content-disposition"` + BlobType BlobType `xml:"BlobType"` + SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` + CopyID string `xml:"CopyId"` + CopyStatus string `xml:"CopyStatus"` + CopySource string `xml:"CopySource"` + CopyProgress string `xml:"CopyProgress"` + CopyCompletionTime TimeRFC1123 `xml:"CopyCompletionTime"` + CopyStatusDescription string `xml:"CopyStatusDescription"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + ServerEncrypted bool `xml:"ServerEncrypted"` + IncrementalCopy bool `xml:"IncrementalCopy"` +} + +// BlobType defines the type of the Azure Blob. +type BlobType string + +// Types of page blobs +const ( + BlobTypeBlock BlobType = "BlockBlob" + BlobTypePage BlobType = "PageBlob" + BlobTypeAppend BlobType = "AppendBlob" +) + +func (b *Blob) buildPath() string { + return b.Container.buildPath() + "/" + b.Name +} + +// Exists returns true if a blob with given name exists on the specified +// container of the storage account. +func (b *Blob) Exists() (bool, error) { + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), nil) + headers := b.Container.bsc.client.getStandardHeaders() + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusOK, nil + } + } + return false, err +} + +// GetURL gets the canonical URL to the blob with the specified name in the +// specified container. +// This method does not create a publicly accessible URL if the blob or container +// is private and this method does not check if the blob exists. +func (b *Blob) GetURL() string { + container := b.Container.Name + if container == "" { + container = "$root" + } + return b.Container.bsc.client.getEndpoint(blobServiceName, pathForResource(container, b.Name), nil) +} + +// GetBlobRangeOptions includes the options for a get blob range operation +type GetBlobRangeOptions struct { + Range *BlobRange + GetRangeContentMD5 bool + *GetBlobOptions +} + +// GetBlobOptions includes the options for a get blob operation +type GetBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// BlobRange represents the bytes range to be get +type BlobRange struct { + Start uint64 + End uint64 +} + +func (br BlobRange) String() string { + if br.End == 0 { + return fmt.Sprintf("bytes=%d-", br.Start) + } + return fmt.Sprintf("bytes=%d-%d", br.Start, br.End) +} + +// Get returns a stream to read the blob. Caller must call both Read and Close() +// to correctly close the underlying connection. +// +// See the GetRange method for use with a Range header. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { + rangeOptions := GetBlobRangeOptions{ + GetBlobOptions: options, + } + resp, err := b.getRange(&rangeOptions) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return nil, err + } + if err := b.writeProperties(resp.Header, true); err != nil { + return resp.Body, err + } + return resp.Body, nil +} + +// GetRange reads the specified range of a blob to a stream. The bytesRange +// string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec. +// Caller must call both Read and Close()// to correctly close the underlying +// connection. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob +func (b *Blob) GetRange(options *GetBlobRangeOptions) (io.ReadCloser, error) { + resp, err := b.getRange(options) + if err != nil { + return nil, err + } + + if err := checkRespCode(resp, []int{http.StatusPartialContent}); err != nil { + return nil, err + } + // Content-Length header should not be updated, as the service returns the range length + // (which is not alwys the full blob length) + if err := b.writeProperties(resp.Header, false); err != nil { + return resp.Body, err + } + return resp.Body, nil +} + +func (b *Blob) getRange(options *GetBlobRangeOptions) (*http.Response, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + if options.Range != nil { + headers["Range"] = options.Range.String() + if options.GetRangeContentMD5 { + headers["x-ms-range-get-content-md5"] = "true" + } + } + if options.GetBlobOptions != nil { + headers = mergeHeaders(headers, headersFromStruct(*options.GetBlobOptions)) + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + } + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return nil, err + } + return resp, err +} + +// SnapshotOptions includes the options for a snapshot blob operation +type SnapshotOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// CreateSnapshot creates a snapshot for a blob +// See https://msdn.microsoft.com/en-us/library/azure/ee691971.aspx +func (b *Blob) CreateSnapshot(options *SnapshotOptions) (snapshotTimestamp *time.Time, err error) { + params := url.Values{"comp": {"snapshot"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil || resp == nil { + return nil, err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { + return nil, err + } + + snapshotResponse := resp.Header.Get(http.CanonicalHeaderKey("x-ms-snapshot")) + if snapshotResponse != "" { + snapshotTimestamp, err := time.Parse(time.RFC3339, snapshotResponse) + if err != nil { + return nil, err + } + return &snapshotTimestamp, nil + } + + return nil, errors.New("Snapshot not created") +} + +// GetBlobPropertiesOptions includes the options for a get blob properties operation +type GetBlobPropertiesOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetProperties provides various information about the specified blob. +// See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx +func (b *Blob) GetProperties(options *GetBlobPropertiesOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodHead, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + return b.writeProperties(resp.Header, true) +} + +func (b *Blob) writeProperties(h http.Header, includeContentLen bool) error { + var err error + + contentLength := b.Properties.ContentLength + if includeContentLen { + contentLengthStr := h.Get("Content-Length") + if contentLengthStr != "" { + contentLength, err = strconv.ParseInt(contentLengthStr, 0, 64) + if err != nil { + return err + } + } + } + + var sequenceNum int64 + sequenceNumStr := h.Get("x-ms-blob-sequence-number") + if sequenceNumStr != "" { + sequenceNum, err = strconv.ParseInt(sequenceNumStr, 0, 64) + if err != nil { + return err + } + } + + lastModified, err := getTimeFromHeaders(h, "Last-Modified") + if err != nil { + return err + } + + copyCompletionTime, err := getTimeFromHeaders(h, "x-ms-copy-completion-time") + if err != nil { + return err + } + + b.Properties = BlobProperties{ + LastModified: TimeRFC1123(*lastModified), + Etag: h.Get("Etag"), + ContentMD5: h.Get("Content-MD5"), + ContentLength: contentLength, + ContentEncoding: h.Get("Content-Encoding"), + ContentType: h.Get("Content-Type"), + ContentDisposition: h.Get("Content-Disposition"), + CacheControl: h.Get("Cache-Control"), + ContentLanguage: h.Get("Content-Language"), + SequenceNumber: sequenceNum, + CopyCompletionTime: TimeRFC1123(*copyCompletionTime), + CopyStatusDescription: h.Get("x-ms-copy-status-description"), + CopyID: h.Get("x-ms-copy-id"), + CopyProgress: h.Get("x-ms-copy-progress"), + CopySource: h.Get("x-ms-copy-source"), + CopyStatus: h.Get("x-ms-copy-status"), + BlobType: BlobType(h.Get("x-ms-blob-type")), + LeaseStatus: h.Get("x-ms-lease-status"), + LeaseState: h.Get("x-ms-lease-state"), + } + b.writeMetadata(h) + return nil +} + +// SetBlobPropertiesOptions contains various properties of a blob and is an entry +// in SetProperties +type SetBlobPropertiesOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + Origin string `header:"Origin"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + SequenceNumberAction *SequenceNumberAction + RequestID string `header:"x-ms-client-request-id"` +} + +// SequenceNumberAction defines how the blob's sequence number should be modified +type SequenceNumberAction string + +// Options for sequence number action +const ( + SequenceNumberActionMax SequenceNumberAction = "max" + SequenceNumberActionUpdate SequenceNumberAction = "update" + SequenceNumberActionIncrement SequenceNumberAction = "increment" +) + +// SetProperties replaces the BlobHeaders for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Blob-Properties +func (b *Blob) SetProperties(options *SetBlobPropertiesOptions) error { + params := url.Values{"comp": {"properties"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + if b.Properties.BlobType == BlobTypePage { + headers = addToHeaders(headers, "x-ms-blob-content-length", fmt.Sprintf("%v", b.Properties.ContentLength)) + if options != nil && options.SequenceNumberAction != nil { + headers = addToHeaders(headers, "x-ms-sequence-number-action", string(*options.SequenceNumberAction)) + if *options.SequenceNumberAction != SequenceNumberActionIncrement { + headers = addToHeaders(headers, "x-ms-blob-sequence-number", fmt.Sprintf("%v", b.Properties.SequenceNumber)) + } + } + } + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusOK}) +} + +// SetBlobMetadataOptions includes the options for a set blob metadata operation +type SetBlobMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b *Blob) SetMetadata(options *SetBlobMetadataOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := b.Container.bsc.client.getStandardHeaders() + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusOK}) +} + +// GetBlobMetadataOptions includes the options for a get blob metadata operation +type GetBlobMetadataOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetMetadata returns all user-defined metadata for the specified blob. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx +func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + b.writeMetadata(resp.Header) + return nil +} + +func (b *Blob) writeMetadata(h http.Header) { + b.Metadata = BlobMetadata(writeMetadata(h)) +} + +// DeleteBlobOptions includes the options for a delete blob operation +type DeleteBlobOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + DeleteSnapshots *bool + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Delete deletes the given blob from the specified container. +// If the blob does not exists at the time of the Delete Blob operation, it +// returns error. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) Delete(options *DeleteBlobOptions) error { + resp, err := b.delete(options) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusAccepted}) +} + +// DeleteIfExists deletes the given blob from the specified container If the +// blob is deleted with this call, returns true. Otherwise returns false. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Blob +func (b *Blob) DeleteIfExists(options *DeleteBlobOptions) (bool, error) { + resp, err := b.delete(options) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (b *Blob) delete(options *DeleteBlobOptions) (*http.Response, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + if options.DeleteSnapshots != nil { + if *options.DeleteSnapshots { + headers["x-ms-delete-snapshots"] = "include" + } else { + headers["x-ms-delete-snapshots"] = "only" + } + } + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + return b.Container.bsc.client.exec(http.MethodDelete, uri, headers, nil, b.Container.bsc.auth) +} + +// helper method to construct the path to either a blob or container +func pathForResource(container, name string) string { + if name != "" { + return fmt.Sprintf("/%s/%s", container, name) + } + return fmt.Sprintf("/%s", container) +} + +func (b *Blob) respondCreation(resp *http.Response, bt BlobType) error { + defer drainRespBody(resp) + err := checkRespCode(resp, []int{http.StatusCreated}) + if err != nil { + return err + } + b.Properties.BlobType = bt + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go new file mode 100644 index 00000000..62e461a5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobsasuri.go @@ -0,0 +1,179 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// OverrideHeaders defines overridable response heaedrs in +// a request using a SAS URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type OverrideHeaders struct { + CacheControl string + ContentDisposition string + ContentEncoding string + ContentLanguage string + ContentType string +} + +// BlobSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type BlobSASOptions struct { + BlobServiceSASPermissions + OverrideHeaders + SASOptions +} + +// BlobServiceSASPermissions includes the available permissions for +// blob service SAS URI. +type BlobServiceSASPermissions struct { + Read bool + Add bool + Create bool + Write bool + Delete bool +} + +func (p BlobServiceSASPermissions) buildString() string { + permissions := "" + if p.Read { + permissions += "r" + } + if p.Add { + permissions += "a" + } + if p.Create { + permissions += "c" + } + if p.Write { + permissions += "w" + } + if p.Delete { + permissions += "d" + } + return permissions +} + +// GetSASURI creates an URL to the blob which contains the Shared +// Access Signature with the specified options. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (b *Blob) GetSASURI(options BlobSASOptions) (string, error) { + uri := b.GetURL() + signedResource := "b" + canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(uri, b.Container.bsc.auth, true) + if err != nil { + return "", err + } + + permissions := options.BlobServiceSASPermissions.buildString() + return b.Container.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + +func (c *Client) blobAndFileSASURI(options SASOptions, uri, permissions, canonicalizedResource, signedResource string, headers OverrideHeaders) (string, error) { + start := "" + if options.Start != (time.Time{}) { + start = options.Start.UTC().Format(time.RFC3339) + } + + expiry := options.Expiry.UTC().Format(time.RFC3339) + + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err := url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + protocols := "" + if options.UseHTTPS { + protocols = "https" + } + stringToSign, err := blobSASStringToSign(permissions, start, expiry, canonicalizedResource, options.Identifier, options.IP, protocols, c.apiVersion, signedResource, "", headers) + if err != nil { + return "", err + } + + sig := c.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {c.apiVersion}, + "se": {expiry}, + "sr": {signedResource}, + "sp": {permissions}, + "sig": {sig}, + } + + if start != "" { + sasParams.Add("st", start) + } + + if c.apiVersion >= "2015-04-05" { + if protocols != "" { + sasParams.Add("spr", protocols) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) + } + } + + // Add override response hedaers + addQueryParameter(sasParams, "rscc", headers.CacheControl) + addQueryParameter(sasParams, "rscd", headers.ContentDisposition) + addQueryParameter(sasParams, "rsce", headers.ContentEncoding) + addQueryParameter(sasParams, "rscl", headers.ContentLanguage) + addQueryParameter(sasParams, "rsct", headers.ContentType) + + sasURL, err := url.Parse(uri) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func blobSASStringToSign(signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime string, headers OverrideHeaders) (string, error) { + rscc := headers.CacheControl + rscd := headers.ContentDisposition + rsce := headers.ContentEncoding + rscl := headers.ContentLanguage + rsct := headers.ContentType + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/blob" + canonicalizedResource + } + + // https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + if signedVersion >= "2018-11-09" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, signedResource, signedSnapshotTime, rscc, rscd, rsce, rscl, rsct), nil + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedIP, protocols, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go new file mode 100644 index 00000000..02fa5929 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -0,0 +1,186 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" +) + +// BlobStorageClient contains operations for Microsoft Azure Blob Storage +// Service. +type BlobStorageClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-blob-service-properties +func (b *BlobStorageClient) GetServiceProperties() (*ServiceProperties, error) { + return b.client.getServiceProperties(blobServiceName, b.auth) +} + +// SetServiceProperties sets the properties of your storage account's blob service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-blob-service-properties +func (b *BlobStorageClient) SetServiceProperties(props ServiceProperties) error { + return b.client.setServiceProperties(props, blobServiceName, b.auth) +} + +// ListContainersParameters defines the set of customizable parameters to make a +// List Containers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ListContainersParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// GetContainerReference returns a Container object for the specified container name. +func (b *BlobStorageClient) GetContainerReference(name string) *Container { + return &Container{ + bsc: b, + Name: name, + } +} + +// GetContainerReferenceFromSASURI returns a Container object for the specified +// container SASURI +func GetContainerReferenceFromSASURI(sasuri url.URL) (*Container, error) { + path := strings.Split(sasuri.Path, "/") + if len(path) <= 1 { + return nil, fmt.Errorf("could not find a container in URI: %s", sasuri.String()) + } + c, err := newSASClientFromURL(&sasuri) + if err != nil { + return nil, err + } + cli := c.GetBlobService() + return &Container{ + bsc: &cli, + Name: path[1], + sasuri: sasuri, + }, nil +} + +// ListContainers returns the list of containers in a storage account along with +// pagination token and other response details. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*ContainerListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + uri := b.client.getEndpoint(blobServiceName, "", q) + headers := b.client.getStandardHeaders() + + type ContainerAlias struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata BlobMetadata + sasuri url.URL + } + type ContainerListResponseAlias struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []ContainerAlias `xml:"Containers>Container"` + } + + var outAlias ContainerListResponseAlias + resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &outAlias) + if err != nil { + return nil, err + } + + out := ContainerListResponse{ + XMLName: outAlias.XMLName, + Xmlns: outAlias.Xmlns, + Prefix: outAlias.Prefix, + Marker: outAlias.Marker, + NextMarker: outAlias.NextMarker, + MaxResults: outAlias.MaxResults, + Containers: make([]Container, len(outAlias.Containers)), + } + for i, cnt := range outAlias.Containers { + out.Containers[i] = Container{ + bsc: &b, + Name: cnt.Name, + Properties: cnt.Properties, + Metadata: map[string]string(cnt.Metadata), + sasuri: cnt.sasuri, + } + } + + return &out, err +} + +func (p ListContainersParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} + +func writeMetadata(h http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range h { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go new file mode 100644 index 00000000..c9c62d79 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blockblob.go @@ -0,0 +1,270 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// BlockListType is used to filter out types of blocks in a Get Blocks List call +// for a block blob. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all +// block types. +type BlockListType string + +// Filters for listing blocks in block blobs +const ( + BlockListTypeAll BlockListType = "all" + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" +) + +// Maximum sizes (per REST API) for various concepts +const ( + MaxBlobBlockSize = 100 * 1024 * 1024 + MaxBlobPageSize = 4 * 1024 * 1024 +) + +// BlockStatus defines states a block for a block blob can +// be in. +type BlockStatus string + +// List of statuses that can be used to refer to a block in a block list +const ( + BlockStatusUncommitted BlockStatus = "Uncommitted" + BlockStatusCommitted BlockStatus = "Committed" + BlockStatusLatest BlockStatus = "Latest" +) + +// Block is used to create Block entities for Put Block List +// call. +type Block struct { + ID string + Status BlockStatus +} + +// BlockListResponse contains the response fields from Get Block List call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx +type BlockListResponse struct { + XMLName xml.Name `xml:"BlockList"` + CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` + UncommittedBlocks []BlockResponse `xml:"UncommittedBlocks>Block"` +} + +// BlockResponse contains the block information returned +// in the GetBlockListCall. +type BlockResponse struct { + Name string `xml:"Name"` + Size int64 `xml:"Size"` +} + +// CreateBlockBlob initializes an empty block blob with no blocks. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { + return b.CreateBlockBlobFromReader(nil, options) +} + +// CreateBlockBlobFromReader initializes a block blob using data from +// reader. Size must be the number of bytes read from reader. To +// create an empty blob, use size==0 and reader==nil. +// +// Any headers set in blob.Properties or metadata in blob.Metadata +// will be set on the blob. +// +// The API rejects requests with size > 256 MiB (but this limit is not +// checked by the SDK). To write a larger blob, use CreateBlockBlob, +// PutBlock, and PutBlockList. +// +// To create a blob from scratch, call container.GetBlobReference() to +// get an empty blob, fill in blob.Properties and blob.Metadata as +// appropriate then call this method. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypeBlock) + + headers["Content-Length"] = "0" + var n int64 + var err error + if blob != nil { + type lener interface { + Len() int + } + // TODO(rjeczalik): handle io.ReadSeeker, in case blob is *os.File etc. + if l, ok := blob.(lener); ok { + n = int64(l.Len()) + } else { + var buf bytes.Buffer + n, err = io.Copy(&buf, blob) + if err != nil { + return err + } + blob = &buf + } + + headers["Content-Length"] = strconv.FormatInt(n, 10) + } + b.Properties.ContentLength = n + + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + +// PutBlockOptions includes the options for a put block operation +type PutBlockOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + ContentMD5 string `header:"Content-MD5"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlock saves the given data chunk to the specified block blob with +// given ID. +// +// The API rejects chunks larger than 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlock(blockID string, chunk []byte, options *PutBlockOptions) error { + return b.PutBlockWithLength(blockID, uint64(len(chunk)), bytes.NewReader(chunk), options) +} + +// PutBlockWithLength saves the given data stream of exactly specified size to +// the block blob with given ID. It is an alternative to PutBlocks where data +// comes as stream but the length is known in advance. +// +// The API rejects requests with size > 100 MiB (but this limit is not +// checked by the SDK). +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block +func (b *Blob) PutBlockWithLength(blockID string, size uint64, blob io.Reader, options *PutBlockOptions) error { + query := url.Values{ + "comp": {"block"}, + "blockid": {blockID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", size) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), query) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, blob, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypeBlock) +} + +// PutBlockListOptions includes the options for a put block list operation +type PutBlockListOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// PutBlockList saves list of blocks to the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Block-List +func (b *Blob) PutBlockList(blocks []Block, options *PutBlockListOptions) error { + params := url.Values{"comp": {"blocklist"}} + blockListXML := prepareBlockListRequest(blocks) + headers := b.Container.bsc.client.getStandardHeaders() + headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML)) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, strings.NewReader(blockListXML), b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusCreated}) +} + +// GetBlockListOptions includes the options for a get block list operation +type GetBlockListOptions struct { + Timeout uint + Snapshot *time.Time + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetBlockList retrieves list of blocks in the specified block blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Block-List +func (b *Blob) GetBlockList(blockType BlockListType, options *GetBlockListOptions) (BlockListResponse, error) { + params := url.Values{ + "comp": {"blocklist"}, + "blocklisttype": {string(blockType)}, + } + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out BlockListResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go new file mode 100644 index 00000000..99702eff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -0,0 +1,991 @@ +// Package storage provides clients for Microsoft Azure Storage Services. +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "mime/multipart" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/version" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" +) + +const ( + // DefaultBaseURL is the domain name used for storage requests in the + // public cloud when a default client is created. + DefaultBaseURL = "core.windows.net" + + // DefaultAPIVersion is the Azure Storage API version string used when a + // basic client is created. + DefaultAPIVersion = "2018-03-28" + + defaultUseHTTPS = true + defaultRetryAttempts = 5 + defaultRetryDuration = time.Second * 5 + + // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountName = "devstoreaccount1" + + // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + + blobServiceName = "blob" + tableServiceName = "table" + queueServiceName = "queue" + fileServiceName = "file" + + storageEmulatorBlob = "127.0.0.1:10000" + storageEmulatorTable = "127.0.0.1:10002" + storageEmulatorQueue = "127.0.0.1:10001" + + userAgentHeader = "User-Agent" + + userDefinedMetadataHeaderPrefix = "x-ms-meta-" + + connectionStringAccountName = "accountname" + connectionStringAccountKey = "accountkey" + connectionStringEndpointSuffix = "endpointsuffix" + connectionStringEndpointProtocol = "defaultendpointsprotocol" + + connectionStringBlobEndpoint = "blobendpoint" + connectionStringFileEndpoint = "fileendpoint" + connectionStringQueueEndpoint = "queueendpoint" + connectionStringTableEndpoint = "tableendpoint" + connectionStringSAS = "sharedaccesssignature" +) + +var ( + validStorageAccount = regexp.MustCompile("^[0-9a-z]{3,24}$") + defaultValidStatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +// Sender sends a request +type Sender interface { + Send(*Client, *http.Request) (*http.Response, error) +} + +// DefaultSender is the default sender for the client. It implements +// an automatic retry strategy. +type DefaultSender struct { + RetryAttempts int + RetryDuration time.Duration + ValidStatusCodes []int + attempts int // used for testing +} + +// Send is the default retry strategy in the client +func (ds *DefaultSender) Send(c *Client, req *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(req) + for attempts := 0; attempts < ds.RetryAttempts; attempts++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = c.HTTPClient.Do(rr.Request()) + if err != nil || !autorest.ResponseHasStatusCode(resp, ds.ValidStatusCodes...) { + return resp, err + } + drainRespBody(resp) + autorest.DelayForBackoff(ds.RetryDuration, attempts, req.Cancel) + ds.attempts = attempts + } + ds.attempts++ + return resp, err +} + +// Client is the object that needs to be constructed to perform +// operations on the storage account. +type Client struct { + // HTTPClient is the http.Client used to initiate API + // requests. http.DefaultClient is used when creating a + // client. + HTTPClient *http.Client + + // Sender is an interface that sends the request. Clients are + // created with a DefaultSender. The DefaultSender has an + // automatic retry strategy built in. The Sender can be customized. + Sender Sender + + accountName string + accountKey []byte + useHTTPS bool + UseSharedKeyLite bool + baseURL string + apiVersion string + userAgent string + sasClient bool + accountSASToken url.Values +} + +type odataResponse struct { + resp *http.Response + odata odataErrorWrapper +} + +// AzureStorageServiceError contains fields of the error response from +// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx +// Some fields might be specific to certain calls. +type AzureStorageServiceError struct { + Code string `xml:"Code"` + Message string `xml:"Message"` + AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` + QueryParameterName string `xml:"QueryParameterName"` + QueryParameterValue string `xml:"QueryParameterValue"` + Reason string `xml:"Reason"` + Lang string + StatusCode int + RequestID string + Date string + APIVersion string +} + +type odataErrorMessage struct { + Lang string `json:"lang"` + Value string `json:"value"` +} + +type odataError struct { + Code string `json:"code"` + Message odataErrorMessage `json:"message"` +} + +type odataErrorWrapper struct { + Err odataError `json:"odata.error"` +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int + got int + inner error +} + +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("storage: status code from service response is %s; was expecting %s. Inner error: %+v", got, strings.Join(expected, " or "), e.inner) +} + +// Got is the actual status code returned by Azure. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// Inner returns any inner error info. +func (e UnexpectedStatusCodeError) Inner() error { + return e.inner +} + +// NewClientFromConnectionString creates a Client from the connection string. +func NewClientFromConnectionString(input string) (Client, error) { + // build a map of connection string key/value pairs + parts := map[string]string{} + for _, pair := range strings.Split(input, ";") { + if pair == "" { + continue + } + + equalDex := strings.IndexByte(pair, '=') + if equalDex <= 0 { + return Client{}, fmt.Errorf("Invalid connection segment %q", pair) + } + + value := strings.TrimSpace(pair[equalDex+1:]) + key := strings.TrimSpace(strings.ToLower(pair[:equalDex])) + parts[key] = value + } + + // TODO: validate parameter sets? + + if parts[connectionStringAccountName] == StorageEmulatorAccountName { + return NewEmulatorClient() + } + + if parts[connectionStringSAS] != "" { + endpoint := "" + if parts[connectionStringBlobEndpoint] != "" { + endpoint = parts[connectionStringBlobEndpoint] + } else if parts[connectionStringFileEndpoint] != "" { + endpoint = parts[connectionStringFileEndpoint] + } else if parts[connectionStringQueueEndpoint] != "" { + endpoint = parts[connectionStringQueueEndpoint] + } else { + endpoint = parts[connectionStringTableEndpoint] + } + + return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS]) + } + + useHTTPS := defaultUseHTTPS + if parts[connectionStringEndpointProtocol] != "" { + useHTTPS = parts[connectionStringEndpointProtocol] == "https" + } + + return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey], + parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS) +} + +// NewBasicClient constructs a Client with given storage service name and +// key. +func NewBasicClient(accountName, accountKey string) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) +} + +// NewBasicClientOnSovereignCloud constructs a Client with given storage service name and +// key in the referenced cloud. +func NewBasicClientOnSovereignCloud(accountName, accountKey string, env azure.Environment) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } + return NewClient(accountName, accountKey, env.StorageEndpointSuffix, DefaultAPIVersion, defaultUseHTTPS) +} + +//NewEmulatorClient contructs a Client intended to only work with Azure +//Storage Emulator +func NewEmulatorClient() (Client, error) { + return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) +} + +// NewClient constructs a Client. This should be used if the caller wants +// to specify whether to use HTTPS, a specific REST API version or a custom +// storage endpoint than Azure Public Cloud. +func NewClient(accountName, accountKey, serviceBaseURL, apiVersion string, useHTTPS bool) (Client, error) { + var c Client + if !IsValidStorageAccount(accountName) { + return c, fmt.Errorf("azure: account name is not valid: it must be between 3 and 24 characters, and only may contain numbers and lowercase letters: %v", accountName) + } else if accountKey == "" { + return c, fmt.Errorf("azure: account key required") + } else if serviceBaseURL == "" { + return c, fmt.Errorf("azure: base storage service url required") + } + + key, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return c, fmt.Errorf("azure: malformed storage account key: %v", err) + } + + c = Client{ + HTTPClient: http.DefaultClient, + accountName: accountName, + accountKey: key, + useHTTPS: useHTTPS, + baseURL: serviceBaseURL, + apiVersion: apiVersion, + sasClient: false, + UseSharedKeyLite: false, + Sender: &DefaultSender{ + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, + }, + } + c.userAgent = c.getDefaultUserAgent() + return c, nil +} + +// IsValidStorageAccount checks if the storage account name is valid. +// See https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account +func IsValidStorageAccount(account string) bool { + return validStorageAccount.MatchString(account) +} + +// NewAccountSASClient contructs a client that uses accountSAS authorization +// for its operations. +func NewAccountSASClient(account string, token url.Values, env azure.Environment) Client { + return newSASClient(account, env.StorageEndpointSuffix, token) +} + +// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization +// for its operations using the specified endpoint and SAS token. +func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) { + u, err := url.Parse(endpoint) + if err != nil { + return Client{}, err + } + _, err = url.ParseQuery(sasToken) + if err != nil { + return Client{}, err + } + u.RawQuery = sasToken + return newSASClientFromURL(u) +} + +func newSASClient(accountName, baseURL string, sasToken url.Values) Client { + c := Client{ + HTTPClient: http.DefaultClient, + apiVersion: DefaultAPIVersion, + sasClient: true, + Sender: &DefaultSender{ + RetryAttempts: defaultRetryAttempts, + ValidStatusCodes: defaultValidStatusCodes, + RetryDuration: defaultRetryDuration, + }, + accountName: accountName, + baseURL: baseURL, + accountSASToken: sasToken, + useHTTPS: defaultUseHTTPS, + } + c.userAgent = c.getDefaultUserAgent() + // Get API version and protocol from token + c.apiVersion = sasToken.Get("sv") + if spr := sasToken.Get("spr"); spr != "" { + c.useHTTPS = spr == "https" + } + return c +} + +func newSASClientFromURL(u *url.URL) (Client, error) { + // the host name will look something like this + // - foo.blob.core.windows.net + // "foo" is the account name + // "core.windows.net" is the baseURL + + // find the first dot to get account name + i1 := strings.IndexByte(u.Host, '.') + if i1 < 0 { + return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host) + } + + // now find the second dot to get the base URL + i2 := strings.IndexByte(u.Host[i1+1:], '.') + if i2 < 0 { + return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:]) + } + + sasToken := u.Query() + c := newSASClient(u.Host[:i1], u.Host[i1+i2+2:], sasToken) + if spr := sasToken.Get("spr"); spr == "" { + // infer from URL if not in the query params set + c.useHTTPS = u.Scheme == "https" + } + return c, nil +} + +func (c Client) isServiceSASClient() bool { + return c.sasClient && c.accountSASToken == nil +} + +func (c Client) isAccountSASClient() bool { + return c.sasClient && c.accountSASToken != nil +} + +func (c Client) getDefaultUserAgent() string { + return fmt.Sprintf("Go/%s (%s-%s) azure-storage-go/%s api-version/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + version.Number, + c.apiVersion, + ) +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.userAgent = fmt.Sprintf("%s %s", c.userAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.userAgent) +} + +// protectUserAgent is used in funcs that include extraheaders as a parameter. +// It prevents the User-Agent header to be overwritten, instead if it happens to +// be present, it gets added to the current User-Agent. Use it before getStandardHeaders +func (c *Client) protectUserAgent(extraheaders map[string]string) map[string]string { + if v, ok := extraheaders[userAgentHeader]; ok { + c.AddToUserAgent(v) + delete(extraheaders, userAgentHeader) + } + return extraheaders +} + +func (c Client) getBaseURL(service string) *url.URL { + scheme := "http" + if c.useHTTPS { + scheme = "https" + } + host := "" + if c.accountName == StorageEmulatorAccountName { + switch service { + case blobServiceName: + host = storageEmulatorBlob + case tableServiceName: + host = storageEmulatorTable + case queueServiceName: + host = storageEmulatorQueue + } + } else { + host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + } + + return &url.URL{ + Scheme: scheme, + Host: host, + } +} + +func (c Client) getEndpoint(service, path string, params url.Values) string { + u := c.getBaseURL(service) + + // API doesn't accept path segments not starting with '/' + if !strings.HasPrefix(path, "/") { + path = fmt.Sprintf("/%v", path) + } + + if c.accountName == StorageEmulatorAccountName { + path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) + } + + u.Path = path + u.RawQuery = params.Encode() + return u.String() +} + +// AccountSASTokenOptions includes options for constructing +// an account SAS token. +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +type AccountSASTokenOptions struct { + APIVersion string + Services Services + ResourceTypes ResourceTypes + Permissions Permissions + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool +} + +// Services specify services accessible with an account SAS. +type Services struct { + Blob bool + Queue bool + Table bool + File bool +} + +// ResourceTypes specify the resources accesible with an +// account SAS. +type ResourceTypes struct { + Service bool + Container bool + Object bool +} + +// Permissions specifies permissions for an accountSAS. +type Permissions struct { + Read bool + Write bool + Delete bool + List bool + Add bool + Create bool + Update bool + Process bool +} + +// GetAccountSASToken creates an account SAS token +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-an-account-sas +func (c Client) GetAccountSASToken(options AccountSASTokenOptions) (url.Values, error) { + if options.APIVersion == "" { + options.APIVersion = c.apiVersion + } + + if options.APIVersion < "2015-04-05" { + return url.Values{}, fmt.Errorf("account SAS does not support API versions prior to 2015-04-05. API version : %s", options.APIVersion) + } + + // build services string + services := "" + if options.Services.Blob { + services += "b" + } + if options.Services.Queue { + services += "q" + } + if options.Services.Table { + services += "t" + } + if options.Services.File { + services += "f" + } + + // build resources string + resources := "" + if options.ResourceTypes.Service { + resources += "s" + } + if options.ResourceTypes.Container { + resources += "c" + } + if options.ResourceTypes.Object { + resources += "o" + } + + // build permissions string + permissions := "" + if options.Permissions.Read { + permissions += "r" + } + if options.Permissions.Write { + permissions += "w" + } + if options.Permissions.Delete { + permissions += "d" + } + if options.Permissions.List { + permissions += "l" + } + if options.Permissions.Add { + permissions += "a" + } + if options.Permissions.Create { + permissions += "c" + } + if options.Permissions.Update { + permissions += "u" + } + if options.Permissions.Process { + permissions += "p" + } + + // build start time, if exists + start := "" + if options.Start != (time.Time{}) { + start = options.Start.UTC().Format(time.RFC3339) + } + + // build expiry time + expiry := options.Expiry.UTC().Format(time.RFC3339) + + protocol := "https,http" + if options.UseHTTPS { + protocol = "https" + } + + stringToSign := strings.Join([]string{ + c.accountName, + permissions, + services, + resources, + start, + expiry, + options.IP, + protocol, + options.APIVersion, + "", + }, "\n") + signature := c.computeHmac256(stringToSign) + + sasParams := url.Values{ + "sv": {options.APIVersion}, + "ss": {services}, + "srt": {resources}, + "sp": {permissions}, + "se": {expiry}, + "spr": {protocol}, + "sig": {signature}, + } + if start != "" { + sasParams.Add("st", start) + } + if options.IP != "" { + sasParams.Add("sip", options.IP) + } + + return sasParams, nil +} + +// GetBlobService returns a BlobStorageClient which can operate on the blob +// service of the storage account. +func (c Client) GetBlobService() BlobStorageClient { + b := BlobStorageClient{ + client: c, + } + b.client.AddToUserAgent(blobServiceName) + b.auth = sharedKey + if c.UseSharedKeyLite { + b.auth = sharedKeyLite + } + return b +} + +// GetQueueService returns a QueueServiceClient which can operate on the queue +// service of the storage account. +func (c Client) GetQueueService() QueueServiceClient { + q := QueueServiceClient{ + client: c, + } + q.client.AddToUserAgent(queueServiceName) + q.auth = sharedKey + if c.UseSharedKeyLite { + q.auth = sharedKeyLite + } + return q +} + +// GetTableService returns a TableServiceClient which can operate on the table +// service of the storage account. +func (c Client) GetTableService() TableServiceClient { + t := TableServiceClient{ + client: c, + } + t.client.AddToUserAgent(tableServiceName) + t.auth = sharedKeyForTable + if c.UseSharedKeyLite { + t.auth = sharedKeyLiteForTable + } + return t +} + +// GetFileService returns a FileServiceClient which can operate on the file +// service of the storage account. +func (c Client) GetFileService() FileServiceClient { + f := FileServiceClient{ + client: c, + } + f.client.AddToUserAgent(fileServiceName) + f.auth = sharedKey + if c.UseSharedKeyLite { + f.auth = sharedKeyLite + } + return f +} + +func (c Client) getStandardHeaders() map[string]string { + return map[string]string{ + userAgentHeader: c.userAgent, + "x-ms-version": c.apiVersion, + "x-ms-date": currentTimeRfc1123Formatted(), + } +} + +func (c Client) exec(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*http.Response, error) { + headers, err := c.addAuthorizationHeader(verb, url, headers, auth) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(verb, url, body) + if err != nil { + return nil, errors.New("azure/storage: error creating request: " + err.Error()) + } + + // http.NewRequest() will automatically set req.ContentLength for a handful of types + // otherwise we will handle here. + if req.ContentLength < 1 { + if clstr, ok := headers["Content-Length"]; ok { + if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil { + req.ContentLength = cl + } + } + } + + for k, v := range headers { + req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 + } + + if c.isAccountSASClient() { + // append the SAS token to the query params + v := req.URL.Query() + v = mergeParams(v, c.accountSASToken) + req.URL.RawQuery = v.Encode() + } + + resp, err := c.Sender.Send(&c, req) + if err != nil { + return nil, err + } + + if resp.StatusCode >= 400 && resp.StatusCode <= 505 { + return resp, getErrorFromResponse(resp) + } + + return resp, nil +} + +func (c Client) execInternalJSONCommon(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, *http.Request, *http.Response, error) { + headers, err := c.addAuthorizationHeader(verb, url, headers, auth) + if err != nil { + return nil, nil, nil, err + } + + req, err := http.NewRequest(verb, url, body) + for k, v := range headers { + req.Header.Add(k, v) + } + + resp, err := c.Sender.Send(&c, req) + if err != nil { + return nil, nil, nil, err + } + + respToRet := &odataResponse{resp: resp} + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + var respBody []byte + respBody, err = readAndCloseBody(resp.Body) + if err != nil { + return nil, nil, nil, err + } + + requestID, date, version := getDebugHeaders(resp.Header) + if len(respBody) == 0 { + // no error in response body, might happen in HEAD requests + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) + return respToRet, req, resp, err + } + // try unmarshal as odata.error json + err = json.Unmarshal(respBody, &respToRet.odata) + } + + return respToRet, req, resp, err +} + +func (c Client) execInternalJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + respToRet, _, _, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + return respToRet, err +} + +func (c Client) execBatchOperationJSON(verb, url string, headers map[string]string, body io.Reader, auth authentication) (*odataResponse, error) { + // execute common query, get back generated request, response etc... for more processing. + respToRet, req, resp, err := c.execInternalJSONCommon(verb, url, headers, body, auth) + if err != nil { + return nil, err + } + + // return the OData in the case of executing batch commands. + // In this case we need to read the outer batch boundary and contents. + // Then we read the changeset information within the batch + var respBody []byte + respBody, err = readAndCloseBody(resp.Body) + if err != nil { + return nil, err + } + + // outer multipart body + _, batchHeader, err := mime.ParseMediaType(resp.Header["Content-Type"][0]) + if err != nil { + return nil, err + } + + // batch details. + batchBoundary := batchHeader["boundary"] + batchPartBuf, changesetBoundary, err := genBatchReader(batchBoundary, respBody) + if err != nil { + return nil, err + } + + // changeset details. + err = genChangesetReader(req, respToRet, batchPartBuf, changesetBoundary) + if err != nil { + return nil, err + } + + return respToRet, nil +} + +func genChangesetReader(req *http.Request, respToRet *odataResponse, batchPartBuf io.Reader, changesetBoundary string) error { + changesetMultiReader := multipart.NewReader(batchPartBuf, changesetBoundary) + changesetPart, err := changesetMultiReader.NextPart() + if err != nil { + return err + } + + changesetPartBufioReader := bufio.NewReader(changesetPart) + changesetResp, err := http.ReadResponse(changesetPartBufioReader, req) + if err != nil { + return err + } + + if changesetResp.StatusCode != http.StatusNoContent { + changesetBody, err := readAndCloseBody(changesetResp.Body) + err = json.Unmarshal(changesetBody, &respToRet.odata) + if err != nil { + return err + } + respToRet.resp = changesetResp + } + + return nil +} + +func genBatchReader(batchBoundary string, respBody []byte) (io.Reader, string, error) { + respBodyString := string(respBody) + respBodyReader := strings.NewReader(respBodyString) + + // reading batchresponse + batchMultiReader := multipart.NewReader(respBodyReader, batchBoundary) + batchPart, err := batchMultiReader.NextPart() + if err != nil { + return nil, "", err + } + batchPartBufioReader := bufio.NewReader(batchPart) + + _, changesetHeader, err := mime.ParseMediaType(batchPart.Header.Get("Content-Type")) + if err != nil { + return nil, "", err + } + changesetBoundary := changesetHeader["boundary"] + return batchPartBufioReader, changesetBoundary, nil +} + +func readAndCloseBody(body io.ReadCloser) ([]byte, error) { + defer body.Close() + out, err := ioutil.ReadAll(body) + if err == io.EOF { + err = nil + } + return out, err +} + +// reads the response body then closes it +func drainRespBody(resp *http.Response) { + io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() +} + +func serviceErrFromXML(body []byte, storageErr *AzureStorageServiceError) error { + if err := xml.Unmarshal(body, storageErr); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + return nil +} + +func serviceErrFromJSON(body []byte, storageErr *AzureStorageServiceError) error { + odataError := odataErrorWrapper{} + if err := json.Unmarshal(body, &odataError); err != nil { + storageErr.Message = fmt.Sprintf("Response body could no be unmarshaled: %v. Body: %v.", err, string(body)) + return err + } + storageErr.Code = odataError.Err.Code + storageErr.Message = odataError.Err.Message.Value + storageErr.Lang = odataError.Err.Message.Lang + return nil +} + +func serviceErrFromStatusCode(code int, status string, requestID, date, version string) AzureStorageServiceError { + return AzureStorageServiceError{ + StatusCode: code, + Code: status, + RequestID: requestID, + Date: date, + APIVersion: version, + Message: "no response body was available for error status code", + } +} + +func (e AzureStorageServiceError) Error() string { + return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestInitiated=%s, RequestId=%s, API Version=%s, QueryParameterName=%s, QueryParameterValue=%s", + e.StatusCode, e.Code, e.Message, e.Date, e.RequestID, e.APIVersion, e.QueryParameterName, e.QueryParameterValue) +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(resp *http.Response, allowed []int) error { + for _, v := range allowed { + if resp.StatusCode == v { + return nil + } + } + err := getErrorFromResponse(resp) + return UnexpectedStatusCodeError{ + allowed: allowed, + got: resp.StatusCode, + inner: err, + } +} + +func (c Client) addMetadataToHeaders(h map[string]string, metadata map[string]string) map[string]string { + metadata = c.protectUserAgent(metadata) + for k, v := range metadata { + h[userDefinedMetadataHeaderPrefix+k] = v + } + return h +} + +func getDebugHeaders(h http.Header) (requestID, date, version string) { + requestID = h.Get("x-ms-request-id") + version = h.Get("x-ms-version") + date = h.Get("Date") + return +} + +func getErrorFromResponse(resp *http.Response) error { + respBody, err := readAndCloseBody(resp.Body) + if err != nil { + return err + } + + requestID, date, version := getDebugHeaders(resp.Header) + if len(respBody) == 0 { + // no error in response body, might happen in HEAD requests + err = serviceErrFromStatusCode(resp.StatusCode, resp.Status, requestID, date, version) + } else { + storageErr := AzureStorageServiceError{ + StatusCode: resp.StatusCode, + RequestID: requestID, + Date: date, + APIVersion: version, + } + // response contains storage service error object, unmarshal + if resp.Header.Get("Content-Type") == "application/xml" { + errIn := serviceErrFromXML(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } + } else { + errIn := serviceErrFromJSON(respBody, &storageErr) + if err != nil { // error unmarshaling the error response + err = errIn + } + } + err = storageErr + } + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go new file mode 100644 index 00000000..e898e9bf --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/commonsasuri.go @@ -0,0 +1,38 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/url" + "time" +) + +// SASOptions includes options used by SAS URIs for different +// services and resources. +type SASOptions struct { + APIVersion string + Start time.Time + Expiry time.Time + IP string + UseHTTPS bool + Identifier string +} + +func addQueryParameter(query url.Values, key, value string) url.Values { + if value != "" { + query.Add(key, value) + } + return query +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go new file mode 100644 index 00000000..056473d4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -0,0 +1,640 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// Container represents an Azure container. +type Container struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata map[string]string + sasuri url.URL +} + +// Client returns the HTTP client used by the Container reference. +func (c *Container) Client() *Client { + return &c.bsc.client +} + +func (c *Container) buildPath() string { + return fmt.Sprintf("/%s", c.Name) +} + +// GetURL gets the canonical URL to the container. +// This method does not create a publicly accessible URL if the container +// is private and this method does not check if the blob exists. +func (c *Container) GetURL() string { + container := c.Name + if container == "" { + container = "$root" + } + return c.bsc.client.getEndpoint(blobServiceName, pathForResource(container, ""), nil) +} + +// ContainerSASOptions are options to construct a container SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type ContainerSASOptions struct { + ContainerSASPermissions + OverrideHeaders + SASOptions +} + +// ContainerSASPermissions includes the available permissions for +// a container SAS URI. +type ContainerSASPermissions struct { + BlobServiceSASPermissions + List bool +} + +// GetSASURI creates an URL to the container which contains the Shared +// Access Signature with the specified options. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (c *Container) GetSASURI(options ContainerSASOptions) (string, error) { + uri := c.GetURL() + signedResource := "c" + canonicalizedResource, err := c.bsc.client.buildCanonicalizedResource(uri, c.bsc.auth, true) + if err != nil { + return "", err + } + + // build permissions string + permissions := options.BlobServiceSASPermissions.buildString() + if options.List { + permissions += "l" + } + + return c.bsc.client.blobAndFileSASURI(options.SASOptions, uri, permissions, canonicalizedResource, signedResource, options.OverrideHeaders) +} + +// ContainerProperties contains various properties of a container returned from +// various endpoints like ListContainers. +type ContainerProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + LeaseStatus string `xml:"LeaseStatus"` + LeaseState string `xml:"LeaseState"` + LeaseDuration string `xml:"LeaseDuration"` + PublicAccess ContainerAccessType `xml:"PublicAccess"` +} + +// ContainerListResponse contains the response fields from +// ListContainers call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx +type ContainerListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []Container `xml:"Containers>Container"` +} + +// BlobListResponse contains the response fields from ListBlobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type BlobListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Blobs []Blob `xml:"Blobs>Blob"` + + // BlobPrefix is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + // The list here can be thought of as "folders" that may contain + // other folders or blobs. + BlobPrefixes []string `xml:"Blobs>BlobPrefix>Name"` + + // Delimiter is used to traverse blobs as if it were a file system. + // It is returned if ListBlobsParameters.Delimiter is specified. + Delimiter string `xml:"Delimiter"` +} + +// IncludeBlobDataset has options to include in a list blobs operation +type IncludeBlobDataset struct { + Snapshots bool + Metadata bool + UncommittedBlobs bool + Copy bool +} + +// ListBlobsParameters defines the set of customizable +// parameters to make a List Blobs call. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx +type ListBlobsParameters struct { + Prefix string + Delimiter string + Marker string + Include *IncludeBlobDataset + MaxResults uint + Timeout uint + RequestID string +} + +func (p ListBlobsParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Delimiter != "" { + out.Set("delimiter", p.Delimiter) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != nil { + include := []string{} + include = addString(include, p.Include.Snapshots, "snapshots") + include = addString(include, p.Include.Metadata, "metadata") + include = addString(include, p.Include.UncommittedBlobs, "uncommittedblobs") + include = addString(include, p.Include.Copy, "copy") + fullInclude := strings.Join(include, ",") + out.Set("include", fullInclude) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} + +func addString(datasets []string, include bool, text string) []string { + if include { + datasets = append(datasets, text) + } + return datasets +} + +// ContainerAccessType defines the access level to the container from a public +// request. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms- +// blob-public-access" header. +type ContainerAccessType string + +// Access options for containers +const ( + ContainerAccessTypePrivate ContainerAccessType = "" + ContainerAccessTypeBlob ContainerAccessType = "blob" + ContainerAccessTypeContainer ContainerAccessType = "container" +) + +// ContainerAccessPolicy represents each access policy in the container ACL. +type ContainerAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanWrite bool + CanDelete bool +} + +// ContainerPermissions represents the container ACLs. +type ContainerPermissions struct { + AccessType ContainerAccessType + AccessPolicies []ContainerAccessPolicy +} + +// ContainerAccessHeader references header used when setting/getting container ACL +const ( + ContainerAccessHeader string = "x-ms-blob-public-access" +) + +// GetBlobReference returns a Blob object for the specified blob name. +func (c *Container) GetBlobReference(name string) *Blob { + return &Blob{ + Container: c, + Name: name, + } +} + +// CreateContainerOptions includes the options for a create container operation +type CreateContainerOptions struct { + Timeout uint + Access ContainerAccessType `header:"x-ms-blob-public-access"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Create creates a blob container within the storage account +// with given name and access level. Returns error if container already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Container +func (c *Container) Create(options *CreateContainerOptions) error { + resp, err := c.create(options) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusCreated}) +} + +// CreateIfNotExists creates a blob container if it does not exist. Returns +// true if container is newly created or false if container already exists. +func (c *Container) CreateIfNotExists(options *CreateContainerOptions) (bool, error) { + resp, err := c.create(options) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { + return resp.StatusCode == http.StatusCreated, nil + } + } + return false, err +} + +func (c *Container) create(options *CreateContainerOptions) (*http.Response, error) { + query := url.Values{"restype": {"container"}} + headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + + return c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) +} + +// Exists returns true if a container with given name exists +// on the storage account, otherwise returns false. +func (c *Container) Exists() (bool, error) { + q := url.Values{"restype": {"container"}} + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + + } else { + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } + headers := c.bsc.client.getStandardHeaders() + + resp, err := c.bsc.client.exec(http.MethodHead, uri, headers, nil, c.bsc.auth) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusOK, nil + } + } + return false, err +} + +// SetContainerPermissionOptions includes options for a set container permissions operation +type SetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetPermissions sets up container permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Container-ACL +func (c *Container) SetPermissions(permissions ContainerPermissions, options *SetContainerPermissionOptions) error { + body, length, err := generateContainerACLpayload(permissions.AccessPolicies) + if err != nil { + return err + } + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, ContainerAccessHeader, string(permissions.AccessType)) + headers["Content-Length"] = strconv.Itoa(length) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, body, c.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusOK}) +} + +// GetContainerPermissionOptions includes options for a get container permissions operation +type GetContainerPermissionOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the container permissions as per https://msdn.microsoft.com/en-us/library/azure/dd179469.aspx +// If timeout is 0 then it will not be passed to Azure +// leaseID will only be passed to Azure if populated +func (c *Container) GetPermissions(options *GetContainerPermissionOptions) (*ContainerPermissions, error) { + params := url.Values{ + "restype": {"container"}, + "comp": {"acl"}, + } + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var ap AccessPolicy + err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildAccessPolicy(ap, &resp.Header), nil +} + +func buildAccessPolicy(ap AccessPolicy, headers *http.Header) *ContainerPermissions { + // containerAccess. Blob, Container, empty + containerAccess := headers.Get(http.CanonicalHeaderKey(ContainerAccessHeader)) + permissions := ContainerPermissions{ + AccessType: ContainerAccessType(containerAccess), + AccessPolicies: []ContainerAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + capd := ContainerAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + capd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + capd.CanWrite = updatePermissions(policy.AccessPolicy.Permission, "w") + capd.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + permissions.AccessPolicies = append(permissions.AccessPolicies, capd) + } + return &permissions +} + +// DeleteContainerOptions includes options for a delete container operation +type DeleteContainerOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// Delete deletes the container with given name on the storage +// account. If the container does not exist returns error. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) Delete(options *DeleteContainerOptions) error { + resp, err := c.delete(options) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusAccepted}) +} + +// DeleteIfExists deletes the container with given name on the storage +// account if it exists. Returns true if container is deleted with this call, or +// false if the container did not exist at the time of the Delete Container +// operation. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-container +func (c *Container) DeleteIfExists(options *DeleteContainerOptions) (bool, error) { + resp, err := c.delete(options) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil + } + } + return false, err +} + +func (c *Container) delete(options *DeleteContainerOptions) (*http.Response, error) { + query := url.Values{"restype": {"container"}} + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), query) + + return c.bsc.client.exec(http.MethodDelete, uri, headers, nil, c.bsc.auth) +} + +// ListBlobs returns an object that contains list of blobs in the container, +// pagination token and other information in the response of List Blobs call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Blobs +func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{ + "restype": {"container"}, + "comp": {"list"}, + }) + var uri string + if c.bsc.client.isServiceSASClient() { + q = mergeParams(q, c.sasuri.Query()) + newURI := c.sasuri + newURI.RawQuery = q.Encode() + uri = newURI.String() + } else { + uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) + } + + headers := c.bsc.client.getStandardHeaders() + headers = addToHeaders(headers, "x-ms-client-request-id", params.RequestID) + + var out BlobListResponse + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + for i := range out.Blobs { + out.Blobs[i].Container = c + } + return out, err +} + +// ContainerMetadataOptions includes options for container metadata operations +type ContainerMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified container. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata +func (c *Container) SetMetadata(options *ContainerMetadataOptions) error { + params := url.Values{ + "comp": {"metadata"}, + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusOK}) +} + +// GetMetadata returns all user-defined metadata for the specified container. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata +func (c *Container) GetMetadata(options *ContainerMetadataOptions) error { + params := url.Values{ + "comp": {"metadata"}, + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + c.writeMetadata(resp.Header) + return nil +} + +func (c *Container) writeMetadata(h http.Header) { + c.Metadata = writeMetadata(h) +} + +func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, capd := range policies { + permission := capd.generateContainerPermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(capd.ID, capd.StartTime, capd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (capd *ContainerAccessPolicy) generateContainerPermissions() (permissions string) { + // generate the permissions string (rwd). + // still want the end user API to have bool flags. + permissions = "" + + if capd.CanRead { + permissions += "r" + } + + if capd.CanWrite { + permissions += "w" + } + + if capd.CanDelete { + permissions += "d" + } + + return permissions +} + +// GetProperties updated the properties of the container. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-properties +func (c *Container) GetProperties() error { + params := url.Values{ + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + defer resp.Body.Close() + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + // update properties + c.Properties.Etag = resp.Header.Get(headerEtag) + c.Properties.LeaseStatus = resp.Header.Get("x-ms-lease-status") + c.Properties.LeaseState = resp.Header.Get("x-ms-lease-state") + c.Properties.LeaseDuration = resp.Header.Get("x-ms-lease-duration") + c.Properties.LastModified = resp.Header.Get("Last-Modified") + c.Properties.PublicAccess = ContainerAccessType(resp.Header.Get(ContainerAccessHeader)) + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go new file mode 100644 index 00000000..151e9a51 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/copyblob.go @@ -0,0 +1,237 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + blobCopyStatusPending = "pending" + blobCopyStatusSuccess = "success" + blobCopyStatusAborted = "aborted" + blobCopyStatusFailed = "failed" +) + +// CopyOptions includes the options for a copy blob operation +type CopyOptions struct { + Timeout uint + Source CopyOptionsConditions + Destiny CopyOptionsConditions + RequestID string +} + +// IncrementalCopyOptions includes the options for an incremental copy blob operation +type IncrementalCopyOptions struct { + Timeout uint + Destination IncrementalCopyOptionsConditions + RequestID string +} + +// CopyOptionsConditions includes some conditional options in a copy blob operation +type CopyOptionsConditions struct { + LeaseID string + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// IncrementalCopyOptionsConditions includes some conditional options in a copy blob operation +type IncrementalCopyOptionsConditions struct { + IfModifiedSince *time.Time + IfUnmodifiedSince *time.Time + IfMatch string + IfNoneMatch string +} + +// Copy starts a blob copy operation and waits for the operation to +// complete. sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using the GetURL method.) There is no SLA on blob copy and therefore +// this helper method works faster on smaller files. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error { + copyID, err := b.StartCopy(sourceBlob, options) + if err != nil { + return err + } + + return b.WaitForCopy(copyID) +} + +// StartCopy starts a blob copy operation. +// sourceBlob parameter must be a canonical URL to the blob (can be +// obtained using the GetURL method.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob +func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = sourceBlob + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + // source + headers = addToHeaders(headers, "x-ms-source-lease-id", options.Source.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-source-if-modified-since", options.Source.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-source-if-unmodified-since", options.Source.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-source-if-match", options.Source.IfMatch) + headers = addToHeaders(headers, "x-ms-source-if-none-match", options.Source.IfNoneMatch) + //destiny + headers = addToHeaders(headers, "x-ms-lease-id", options.Destiny.LeaseID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destiny.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destiny.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destiny.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destiny.IfNoneMatch) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{http.StatusAccepted, http.StatusCreated}); err != nil { + return "", err + } + + copyID := resp.Header.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} + +// AbortCopyOptions includes the options for an abort blob operation +type AbortCopyOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AbortCopy aborts a BlobCopy which has already been triggered by the StartBlobCopy function. +// copyID is generated from StartBlobCopy function. +// currentLeaseID is required IF the destination blob has an active lease on it. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Abort-Copy-Blob +func (b *Blob) AbortCopy(copyID string, options *AbortCopyOptions) error { + params := url.Values{ + "comp": {"copy"}, + "copyid": {copyID}, + } + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-action"] = "abort" + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// WaitForCopy loops until a BlobCopy operation is completed (or fails with error) +func (b *Blob) WaitForCopy(copyID string) error { + for { + err := b.GetProperties(nil) + if err != nil { + return err + } + + if b.Properties.CopyID != copyID { + return errBlobCopyIDMismatch + } + + switch b.Properties.CopyStatus { + case blobCopyStatusSuccess: + return nil + case blobCopyStatusPending: + continue + case blobCopyStatusAborted: + return errBlobCopyAborted + case blobCopyStatusFailed: + return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", b.Properties.CopyID, b.Properties.CopyStatusDescription) + default: + return fmt.Errorf("storage: unhandled blob copy status: '%s'", b.Properties.CopyStatus) + } + } +} + +// IncrementalCopyBlob copies a snapshot of a source blob and copies to referring blob +// sourceBlob parameter must be a valid snapshot URL of the original blob. +// THe original blob mut be public, or use a Shared Access Signature. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/incremental-copy-blob . +func (b *Blob) IncrementalCopyBlob(sourceBlobURL string, snapshotTime time.Time, options *IncrementalCopyOptions) (string, error) { + params := url.Values{"comp": {"incrementalcopy"}} + + // need formatting to 7 decimal places so it's friendly to Windows and *nix + snapshotTimeFormatted := snapshotTime.Format("2006-01-02T15:04:05.0000000Z") + u, err := url.Parse(sourceBlobURL) + if err != nil { + return "", err + } + query := u.Query() + query.Add("snapshot", snapshotTimeFormatted) + encodedQuery := query.Encode() + encodedQuery = strings.Replace(encodedQuery, "%3A", ":", -1) + u.RawQuery = encodedQuery + snapshotURL := u.String() + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-copy-source"] = snapshotURL + + if options != nil { + addTimeout(params, options.Timeout) + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + headers = addTimeToHeaders(headers, "x-ms-if-modified-since", options.Destination.IfModifiedSince) + headers = addTimeToHeaders(headers, "x-ms-if-unmodified-since", options.Destination.IfUnmodifiedSince) + headers = addToHeaders(headers, "x-ms-if-match", options.Destination.IfMatch) + headers = addToHeaders(headers, "x-ms-if-none-match", options.Destination.IfNoneMatch) + } + + // get URI of destination blob + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return "", err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{http.StatusAccepted}); err != nil { + return "", err + } + + copyID := resp.Header.Get("x-ms-copy-id") + if copyID == "" { + return "", errors.New("Got empty copy id header") + } + return copyID, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go new file mode 100644 index 00000000..2e805e7d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/directory.go @@ -0,0 +1,238 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "net/http" + "net/url" + "sync" +) + +// Directory represents a directory on a share. +type Directory struct { + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties DirectoryProperties + share *Share +} + +// DirectoryProperties contains various properties of a directory. +type DirectoryProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` +} + +// ListDirsAndFilesParameters defines the set of customizable parameters to +// make a List Files and Directories call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +type ListDirsAndFilesParameters struct { + Prefix string + Marker string + MaxResults uint + Timeout uint +} + +// DirsAndFilesListResponse contains the response fields from +// a List Files and Directories call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +type DirsAndFilesListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Marker string `xml:"Marker"` + MaxResults int64 `xml:"MaxResults"` + Directories []Directory `xml:"Entries>Directory"` + Files []File `xml:"Entries>File"` + NextMarker string `xml:"NextMarker"` +} + +// builds the complete directory path for this directory object. +func (d *Directory) buildPath() string { + path := "" + current := d + for current.Name != "" { + path = "/" + current.Name + path + current = current.parent + } + return d.share.buildPath() + path +} + +// Create this directory in the associated share. +// If a directory with the same name already exists, the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) Create(options *FileRequestOptions) error { + // if this is the root directory exit early + if d.parent == nil { + return nil + } + + params := prepareOptions(options) + headers, err := d.fsc.createResource(d.buildPath(), resourceDirectory, params, mergeMDIntoExtraHeaders(d.Metadata, nil), []int{http.StatusCreated}) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + return nil +} + +// CreateIfNotExists creates this directory under the associated share if the +// directory does not exists. Returns true if the directory is newly created or +// false if the directory already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Directory +func (d *Directory) CreateIfNotExists(options *FileRequestOptions) (bool, error) { + // if this is the root directory exit early + if d.parent == nil { + return false, nil + } + + params := prepareOptions(options) + resp, err := d.fsc.createResourceNoClose(d.buildPath(), resourceDirectory, params, nil) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { + if resp.StatusCode == http.StatusCreated { + d.updateEtagAndLastModified(resp.Header) + return true, nil + } + + return false, d.FetchAttributes(nil) + } + } + + return false, err +} + +// Delete removes this directory. It must be empty in order to be deleted. +// If the directory does not exist the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) Delete(options *FileRequestOptions) error { + return d.fsc.deleteResource(d.buildPath(), resourceDirectory, options) +} + +// DeleteIfExists removes this directory if it exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Directory +func (d *Directory) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := d.fsc.deleteResourceNoClose(d.buildPath(), resourceDirectory, options) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// Exists returns true if this directory exists. +func (d *Directory) Exists() (bool, error) { + exists, headers, err := d.fsc.resourceExists(d.buildPath(), resourceDirectory) + if exists { + d.updateEtagAndLastModified(headers) + } + return exists, err +} + +// FetchAttributes retrieves metadata for this directory. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-directory-properties +func (d *Directory) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := d.fsc.getResourceHeaders(d.buildPath(), compNone, resourceDirectory, params, http.MethodHead) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + d.Metadata = getMetadataFromHeaders(headers) + + return nil +} + +// GetDirectoryReference returns a child Directory object for this directory. +func (d *Directory) GetDirectoryReference(name string) *Directory { + return &Directory{ + fsc: d.fsc, + Name: name, + parent: d, + share: d.share, + } +} + +// GetFileReference returns a child File object for this directory. +func (d *Directory) GetFileReference(name string) *File { + return &File{ + fsc: d.fsc, + Name: name, + parent: d, + share: d.share, + mutex: &sync.Mutex{}, + } +} + +// ListDirsAndFiles returns a list of files and directories under this directory. +// It also contains a pagination token and other response details. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Directories-and-Files +func (d *Directory) ListDirsAndFiles(params ListDirsAndFilesParameters) (*DirsAndFilesListResponse, error) { + q := mergeParams(params.getParameters(), getURLInitValues(compList, resourceDirectory)) + + resp, err := d.fsc.listContent(d.buildPath(), q, nil) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + var out DirsAndFilesListResponse + err = xmlUnmarshal(resp.Body, &out) + return &out, err +} + +// SetMetadata replaces the metadata for this directory. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetDirectoryMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Directory-Metadata +func (d *Directory) SetMetadata(options *FileRequestOptions) error { + headers, err := d.fsc.setResourceHeaders(d.buildPath(), compMetadata, resourceDirectory, mergeMDIntoExtraHeaders(d.Metadata, nil), options) + if err != nil { + return err + } + + d.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (d *Directory) updateEtagAndLastModified(headers http.Header) { + d.Properties.Etag = headers.Get("Etag") + d.Properties.LastModified = headers.Get("Last-Modified") +} + +// URL gets the canonical URL to this directory. +// This method does not create a publicly accessible URL if the directory +// is private and this method does not check if the directory exists. +func (d *Directory) URL() string { + return d.fsc.client.getEndpoint(fileServiceName, d.buildPath(), url.Values{}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go new file mode 100644 index 00000000..38525352 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -0,0 +1,466 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + uuid "github.com/satori/go.uuid" +) + +// Annotating as secure for gas scanning +/* #nosec */ +const ( + partitionKeyNode = "PartitionKey" + rowKeyNode = "RowKey" + etagErrorTemplate = "Etag didn't match: %v" +) + +var ( + errEmptyPayload = errors.New("Empty payload is not a valid metadata level for this operation") + errNilPreviousResult = errors.New("The previous results page is nil") + errNilNextLink = errors.New("There are no more pages in this query results") +) + +// Entity represents an entity inside an Azure table. +type Entity struct { + Table *Table + PartitionKey string + RowKey string + TimeStamp time.Time + OdataMetadata string + OdataType string + OdataID string + OdataEtag string + OdataEditLink string + Properties map[string]interface{} +} + +// GetEntityReference returns an Entity object with the specified +// partition key and row key. +func (t *Table) GetEntityReference(partitionKey, rowKey string) *Entity { + return &Entity{ + PartitionKey: partitionKey, + RowKey: rowKey, + Table: t, + } +} + +// EntityOptions includes options for entity operations. +type EntityOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetEntityOptions includes options for a get entity operation +type GetEntityOptions struct { + Select []string + RequestID string `header:"x-ms-client-request-id"` +} + +// Get gets the referenced entity. Which properties to get can be +// specified using the select option. +// See: +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (e *Entity) Get(timeout uint, ml MetadataLevel, options *GetEntityOptions) error { + if ml == EmptyPayload { + return errEmptyPayload + } + // RowKey and PartitionKey could be lost if not included in the query + // As those are the entity identifiers, it is best if they are not lost + rk := e.RowKey + pk := e.PartitionKey + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := e.Table.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + if options != nil { + if len(options.Select) > 0 { + query.Add("$select", strings.Join(options.Select, ",")) + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodGet, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + err = json.Unmarshal(respBody, e) + if err != nil { + return err + } + e.PartitionKey = pk + e.RowKey = rk + + return nil +} + +// Insert inserts the referenced entity in its table. +// The function fails if there is an entity with the same +// PartitionKey and RowKey in the table. +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-entity +func (e *Entity) Insert(ml MetadataLevel, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, ml) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.Table.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodPost, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if ml != EmptyPayload { + if err = checkRespCode(resp, []int{http.StatusCreated}); err != nil { + return err + } + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + if err = e.UnmarshalJSON(data); err != nil { + return err + } + } else { + if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + } + + return nil +} + +// Update updates the contents of an entity. The function fails if there is no entity +// with the same PartitionKey and RowKey in the table or if the ETag is different +// than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/update-entity2 +func (e *Entity) Update(force bool, options *EntityOptions) error { + return e.updateMerge(force, http.MethodPut, options) +} + +// Merge merges the contents of entity specified with PartitionKey and RowKey +// with the content specified in Properties. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/merge-entity +func (e *Entity) Merge(force bool, options *EntityOptions) error { + return e.updateMerge(force, "MERGE", options) +} + +// Delete deletes the entity. +// The function fails if there is no entity with the same PartitionKey and +// RowKey in the table or if the ETag is different than the one in Azure. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/delete-entity1 +func (e *Entity) Delete(force bool, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(http.MethodDelete, uri, headers, nil, e.Table.tsc.auth) + if err != nil { + if resp.StatusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateTimestamp(resp.Header) +} + +// InsertOrReplace inserts an entity or replaces the existing one. +// Read more: https://docs.microsoft.com/rest/api/storageservices/fileservices/insert-or-replace-entity +func (e *Entity) InsertOrReplace(options *EntityOptions) error { + return e.insertOr(http.MethodPut, options) +} + +// InsertOrMerge inserts an entity or merges the existing one. +// Read more: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/insert-or-merge-entity +func (e *Entity) InsertOrMerge(options *EntityOptions) error { + return e.insertOr("MERGE", options) +} + +func (e *Entity) buildPath() string { + return fmt.Sprintf("%s(PartitionKey='%s', RowKey='%s')", e.Table.buildPath(), e.PartitionKey, e.RowKey) +} + +// MarshalJSON is a custom marshaller for entity +func (e *Entity) MarshalJSON() ([]byte, error) { + completeMap := map[string]interface{}{} + completeMap[partitionKeyNode] = e.PartitionKey + completeMap[rowKeyNode] = e.RowKey + for k, v := range e.Properties { + typeKey := strings.Join([]string{k, OdataTypeSuffix}, "") + switch t := v.(type) { + case []byte: + completeMap[typeKey] = OdataBinary + completeMap[k] = t + case time.Time: + completeMap[typeKey] = OdataDateTime + completeMap[k] = t.Format(time.RFC3339Nano) + case uuid.UUID: + completeMap[typeKey] = OdataGUID + completeMap[k] = t.String() + case int64: + completeMap[typeKey] = OdataInt64 + completeMap[k] = fmt.Sprintf("%v", v) + case float32, float64: + completeMap[typeKey] = OdataDouble + completeMap[k] = fmt.Sprintf("%v", v) + default: + completeMap[k] = v + } + if strings.HasSuffix(k, OdataTypeSuffix) { + if !(completeMap[k] == OdataBinary || + completeMap[k] == OdataDateTime || + completeMap[k] == OdataGUID || + completeMap[k] == OdataInt64 || + completeMap[k] == OdataDouble) { + return nil, fmt.Errorf("Odata.type annotation %v value is not valid", k) + } + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + if _, ok := completeMap[valueKey]; !ok { + return nil, fmt.Errorf("Odata.type annotation %v defined without value defined", k) + } + } + } + return json.Marshal(completeMap) +} + +// UnmarshalJSON is a custom unmarshaller for entities +func (e *Entity) UnmarshalJSON(data []byte) error { + errorTemplate := "Deserializing error: %v" + + props := map[string]interface{}{} + err := json.Unmarshal(data, &props) + if err != nil { + return err + } + + // deselialize metadata + e.OdataMetadata = stringFromMap(props, "odata.metadata") + e.OdataType = stringFromMap(props, "odata.type") + e.OdataID = stringFromMap(props, "odata.id") + e.OdataEtag = stringFromMap(props, "odata.etag") + e.OdataEditLink = stringFromMap(props, "odata.editLink") + e.PartitionKey = stringFromMap(props, partitionKeyNode) + e.RowKey = stringFromMap(props, rowKeyNode) + + // deserialize timestamp + timeStamp, ok := props["Timestamp"] + if ok { + str, ok := timeStamp.(string) + if !ok { + return fmt.Errorf(errorTemplate, "Timestamp casting error") + } + t, err := time.Parse(time.RFC3339Nano, str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + e.TimeStamp = t + } + delete(props, "Timestamp") + delete(props, "Timestamp@odata.type") + + // deserialize entity (user defined fields) + for k, v := range props { + if strings.HasSuffix(k, OdataTypeSuffix) { + valueKey := strings.TrimSuffix(k, OdataTypeSuffix) + str, ok := props[valueKey].(string) + if !ok { + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v casting error", v)) + } + switch v { + case OdataBinary: + props[valueKey], err = base64.StdEncoding.DecodeString(str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + case OdataDateTime: + t, err := time.Parse("2006-01-02T15:04:05Z", str) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = t + case OdataGUID: + props[valueKey] = uuid.FromStringOrNil(str) + case OdataInt64: + i, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = i + case OdataDouble: + f, err := strconv.ParseFloat(str, 64) + if err != nil { + return fmt.Errorf(errorTemplate, err) + } + props[valueKey] = f + default: + return fmt.Errorf(errorTemplate, fmt.Sprintf("%v is not supported", v)) + } + delete(props, k) + } + } + + e.Properties = props + return nil +} + +func getAndDelete(props map[string]interface{}, key string) interface{} { + if value, ok := props[key]; ok { + delete(props, key) + return value + } + return nil +} + +func addIfMatchHeader(h map[string]string, force bool, etag string) map[string]string { + if force { + h[headerIfMatch] = "*" + } else { + h[headerIfMatch] = etag + } + return h +} + +// updates Etag and timestamp +func (e *Entity) updateEtagAndTimestamp(headers http.Header) error { + e.OdataEtag = headers.Get(headerEtag) + return e.updateTimestamp(headers) +} + +func (e *Entity) updateTimestamp(headers http.Header) error { + str := headers.Get(headerDate) + t, err := time.Parse(time.RFC1123, str) + if err != nil { + return fmt.Errorf("Update timestamp error: %v", err) + } + e.TimeStamp = t + return nil +} + +func (e *Entity) insertOr(verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.Header) +} + +func (e *Entity) updateMerge(force bool, verb string, options *EntityOptions) error { + query, headers := options.getParameters() + headers = mergeHeaders(headers, e.Table.tsc.client.getStandardHeaders()) + + body, err := json.Marshal(e) + if err != nil { + return err + } + headers = addBodyRelatedHeaders(headers, len(body)) + headers = addIfMatchHeader(headers, force, e.OdataEtag) + headers = addReturnContentHeaders(headers, EmptyPayload) + + uri := e.Table.tsc.client.getEndpoint(tableServiceName, e.buildPath(), query) + resp, err := e.Table.tsc.client.exec(verb, uri, headers, bytes.NewReader(body), e.Table.tsc.auth) + if err != nil { + if resp.StatusCode == http.StatusPreconditionFailed { + return fmt.Errorf(etagErrorTemplate, err) + } + return err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + + return e.updateEtagAndTimestamp(resp.Header) +} + +func stringFromMap(props map[string]interface{}, key string) string { + value := getAndDelete(props, key) + if value != nil { + return value.(string) + } + return "" +} + +func (options *EntityOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + query = addTimeout(query, options.Timeout) + headers = headersFromStruct(*options) + } + return query, headers +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go new file mode 100644 index 00000000..06bbe4ba --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -0,0 +1,480 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "sync" +) + +const fourMB = uint64(4194304) +const oneTB = uint64(1099511627776) + +// Export maximum range and file sizes +const MaxRangeSize = fourMB +const MaxFileSize = oneTB + +// File represents a file on a share. +type File struct { + fsc *FileServiceClient + Metadata map[string]string + Name string `xml:"Name"` + parent *Directory + Properties FileProperties `xml:"Properties"` + share *Share + FileCopyProperties FileCopyState + mutex *sync.Mutex +} + +// FileProperties contains various properties of a file. +type FileProperties struct { + CacheControl string `header:"x-ms-cache-control"` + Disposition string `header:"x-ms-content-disposition"` + Encoding string `header:"x-ms-content-encoding"` + Etag string + Language string `header:"x-ms-content-language"` + LastModified string + Length uint64 `xml:"Content-Length" header:"x-ms-content-length"` + MD5 string `header:"x-ms-content-md5"` + Type string `header:"x-ms-content-type"` +} + +// FileCopyState contains various properties of a file copy operation. +type FileCopyState struct { + CompletionTime string + ID string `header:"x-ms-copy-id"` + Progress string + Source string + Status string `header:"x-ms-copy-status"` + StatusDesc string +} + +// FileStream contains file data returned from a call to GetFile. +type FileStream struct { + Body io.ReadCloser + ContentMD5 string +} + +// FileRequestOptions will be passed to misc file operations. +// Currently just Timeout (in seconds) but could expand. +type FileRequestOptions struct { + Timeout uint // timeout duration in seconds. +} + +func prepareOptions(options *FileRequestOptions) url.Values { + params := url.Values{} + if options != nil { + params = addTimeout(params, options.Timeout) + } + return params +} + +// FileRanges contains a list of file range information for a file. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +type FileRanges struct { + ContentLength uint64 + LastModified string + ETag string + FileRanges []FileRange `xml:"Range"` +} + +// FileRange contains range information for a file. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +type FileRange struct { + Start uint64 `xml:"Start"` + End uint64 `xml:"End"` +} + +func (fr FileRange) String() string { + return fmt.Sprintf("bytes=%d-%d", fr.Start, fr.End) +} + +// builds the complete file path for this file object +func (f *File) buildPath() string { + return f.parent.buildPath() + "/" + f.Name +} + +// ClearRange releases the specified range of space in a file. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) ClearRange(fileRange FileRange, options *FileRequestOptions) error { + var timeout *uint + if options != nil { + timeout = &options.Timeout + } + headers, err := f.modifyRange(nil, fileRange, timeout, nil) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil +} + +// Create creates a new file or replaces an existing one. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-File +func (f *File) Create(maxSize uint64, options *FileRequestOptions) error { + if maxSize > oneTB { + return fmt.Errorf("max file size is 1TB") + } + params := prepareOptions(options) + headers := headersFromStruct(f.Properties) + headers["x-ms-content-length"] = strconv.FormatUint(maxSize, 10) + headers["x-ms-type"] = "file" + + outputHeaders, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, headers), []int{http.StatusCreated}) + if err != nil { + return err + } + + f.Properties.Length = maxSize + f.updateEtagAndLastModified(outputHeaders) + return nil +} + +// CopyFile operation copied a file/blob from the sourceURL to the path provided. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/copy-file +func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error { + extraHeaders := map[string]string{ + "x-ms-type": "file", + "x-ms-copy-source": sourceURL, + } + params := prepareOptions(options) + + headers, err := f.fsc.createResource(f.buildPath(), resourceFile, params, mergeMDIntoExtraHeaders(f.Metadata, extraHeaders), []int{http.StatusAccepted}) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id") + f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status") + return nil +} + +// Delete immediately removes this file from the storage account. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) Delete(options *FileRequestOptions) error { + return f.fsc.deleteResource(f.buildPath(), resourceFile, options) +} + +// DeleteIfExists removes this file if it exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-File2 +func (f *File) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := f.fsc.deleteResourceNoClose(f.buildPath(), resourceFile, options) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// GetFileOptions includes options for a get file operation +type GetFileOptions struct { + Timeout uint + GetContentMD5 bool +} + +// DownloadToStream operation downloads the file. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f *File) DownloadToStream(options *FileRequestOptions) (io.ReadCloser, error) { + params := prepareOptions(options) + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, nil) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + drainRespBody(resp) + return nil, err + } + return resp.Body, nil +} + +// DownloadRangeToStream operation downloads the specified range of this file with optional MD5 hash. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file +func (f *File) DownloadRangeToStream(fileRange FileRange, options *GetFileOptions) (fs FileStream, err error) { + extraHeaders := map[string]string{ + "Range": fileRange.String(), + } + params := url.Values{} + if options != nil { + if options.GetContentMD5 { + if isRangeTooBig(fileRange) { + return fs, fmt.Errorf("must specify a range less than or equal to 4MB when getContentMD5 is true") + } + extraHeaders["x-ms-range-get-content-md5"] = "true" + } + params = addTimeout(params, options.Timeout) + } + + resp, err := f.fsc.getResourceNoClose(f.buildPath(), compNone, resourceFile, params, http.MethodGet, extraHeaders) + if err != nil { + return fs, err + } + + if err = checkRespCode(resp, []int{http.StatusOK, http.StatusPartialContent}); err != nil { + drainRespBody(resp) + return fs, err + } + + fs.Body = resp.Body + if options != nil && options.GetContentMD5 { + fs.ContentMD5 = resp.Header.Get("Content-MD5") + } + return fs, nil +} + +// Exists returns true if this file exists. +func (f *File) Exists() (bool, error) { + exists, headers, err := f.fsc.resourceExists(f.buildPath(), resourceFile) + if exists { + f.updateEtagAndLastModified(headers) + f.updateProperties(headers) + } + return exists, err +} + +// FetchAttributes updates metadata and properties for this file. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-properties +func (f *File) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := f.fsc.getResourceHeaders(f.buildPath(), compNone, resourceFile, params, http.MethodHead) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + f.updateProperties(headers) + f.Metadata = getMetadataFromHeaders(headers) + return nil +} + +// returns true if the range is larger than 4MB +func isRangeTooBig(fileRange FileRange) bool { + if fileRange.End-fileRange.Start > fourMB { + return true + } + + return false +} + +// ListRangesOptions includes options for a list file ranges operation +type ListRangesOptions struct { + Timeout uint + ListRange *FileRange +} + +// ListRanges returns the list of valid ranges for this file. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Ranges +func (f *File) ListRanges(options *ListRangesOptions) (*FileRanges, error) { + params := url.Values{"comp": {"rangelist"}} + + // add optional range to list + var headers map[string]string + if options != nil { + params = addTimeout(params, options.Timeout) + if options.ListRange != nil { + headers = make(map[string]string) + headers["Range"] = options.ListRange.String() + } + } + + resp, err := f.fsc.listContent(f.buildPath(), params, headers) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + var cl uint64 + cl, err = strconv.ParseUint(resp.Header.Get("x-ms-content-length"), 10, 64) + if err != nil { + ioutil.ReadAll(resp.Body) + return nil, err + } + + var out FileRanges + out.ContentLength = cl + out.ETag = resp.Header.Get("ETag") + out.LastModified = resp.Header.Get("Last-Modified") + + err = xmlUnmarshal(resp.Body, &out) + return &out, err +} + +// modifies a range of bytes in this file +func (f *File) modifyRange(bytes io.Reader, fileRange FileRange, timeout *uint, contentMD5 *string) (http.Header, error) { + if err := f.fsc.checkForStorageEmulator(); err != nil { + return nil, err + } + if fileRange.End < fileRange.Start { + return nil, errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if bytes != nil && isRangeTooBig(fileRange) { + return nil, errors.New("range cannot exceed 4MB in size") + } + + params := url.Values{"comp": {"range"}} + if timeout != nil { + params = addTimeout(params, *timeout) + } + + uri := f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), params) + + // default to clear + write := "clear" + cl := uint64(0) + + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (fileRange.End - fileRange.Start) + 1 + } + + extraHeaders := map[string]string{ + "Content-Length": strconv.FormatUint(cl, 10), + "Range": fileRange.String(), + "x-ms-write": write, + } + + if contentMD5 != nil { + extraHeaders["Content-MD5"] = *contentMD5 + } + + headers := mergeHeaders(f.fsc.client.getStandardHeaders(), extraHeaders) + resp, err := f.fsc.client.exec(http.MethodPut, uri, headers, bytes, f.fsc.auth) + if err != nil { + return nil, err + } + defer drainRespBody(resp) + return resp.Header, checkRespCode(resp, []int{http.StatusCreated}) +} + +// SetMetadata replaces the metadata for this file. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetFileMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Metadata +func (f *File) SetMetadata(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compMetadata, resourceFile, mergeMDIntoExtraHeaders(f.Metadata, nil), options) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil +} + +// SetProperties sets system properties on this file. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetFileProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-File-Properties +func (f *File) SetProperties(options *FileRequestOptions) error { + headers, err := f.fsc.setResourceHeaders(f.buildPath(), compProperties, resourceFile, headersFromStruct(f.Properties), options) + if err != nil { + return err + } + + f.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (f *File) updateEtagAndLastModified(headers http.Header) { + f.Properties.Etag = headers.Get("Etag") + f.Properties.LastModified = headers.Get("Last-Modified") +} + +// updates file properties from the specified HTTP header +func (f *File) updateProperties(header http.Header) { + size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) + if err == nil { + f.Properties.Length = size + } + + f.updateEtagAndLastModified(header) + f.Properties.CacheControl = header.Get("Cache-Control") + f.Properties.Disposition = header.Get("Content-Disposition") + f.Properties.Encoding = header.Get("Content-Encoding") + f.Properties.Language = header.Get("Content-Language") + f.Properties.MD5 = header.Get("Content-MD5") + f.Properties.Type = header.Get("Content-Type") +} + +// URL gets the canonical URL to this file. +// This method does not create a publicly accessible URL if the file +// is private and this method does not check if the file exists. +func (f *File) URL() string { + return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) +} + +// WriteRangeOptions includes options for a write file range operation +type WriteRangeOptions struct { + Timeout uint + ContentMD5 string +} + +// WriteRange writes a range of bytes to this file with an optional MD5 hash of the content (inside +// options parameter). Note that the length of bytes must match (rangeEnd - rangeStart) + 1 with +// a maximum size of 4MB. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Range +func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRangeOptions) error { + if bytes == nil { + return errors.New("bytes cannot be nil") + } + var timeout *uint + var md5 *string + if options != nil { + timeout = &options.Timeout + md5 = &options.ContentMD5 + } + + headers, err := f.modifyRange(bytes, fileRange, timeout, md5) + if err != nil { + return err + } + // it's perfectly legal for multiple go routines to call WriteRange + // on the same *File (e.g. concurrently writing non-overlapping ranges) + // so we must take the file mutex before updating our properties. + f.mutex.Lock() + f.updateEtagAndLastModified(headers) + f.mutex.Unlock() + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go new file mode 100644 index 00000000..1db8e7da --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/fileserviceclient.go @@ -0,0 +1,338 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" +) + +// FileServiceClient contains operations for Microsoft Azure File Service. +type FileServiceClient struct { + client Client + auth authentication +} + +// ListSharesParameters defines the set of customizable parameters to make a +// List Shares call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares +type ListSharesParameters struct { + Prefix string + Marker string + Include string + MaxResults uint + Timeout uint +} + +// ShareListResponse contains the response fields from +// ListShares call. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/List-Shares +type ShareListResponse struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Shares []Share `xml:"Shares>Share"` +} + +type compType string + +const ( + compNone compType = "" + compList compType = "list" + compMetadata compType = "metadata" + compProperties compType = "properties" + compRangeList compType = "rangelist" +) + +func (ct compType) String() string { + return string(ct) +} + +type resourceType string + +const ( + resourceDirectory resourceType = "directory" + resourceFile resourceType = "" + resourceShare resourceType = "share" +) + +func (rt resourceType) String() string { + return string(rt) +} + +func (p ListSharesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.Include != "" { + out.Set("include", p.Include) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + if p.Timeout != 0 { + out.Set("timeout", strconv.FormatUint(uint64(p.Timeout), 10)) + } + + return out +} + +func (p ListDirsAndFilesParameters) getParameters() url.Values { + out := url.Values{} + + if p.Prefix != "" { + out.Set("prefix", p.Prefix) + } + if p.Marker != "" { + out.Set("marker", p.Marker) + } + if p.MaxResults != 0 { + out.Set("maxresults", strconv.FormatUint(uint64(p.MaxResults), 10)) + } + out = addTimeout(out, p.Timeout) + + return out +} + +// returns url.Values for the specified types +func getURLInitValues(comp compType, res resourceType) url.Values { + values := url.Values{} + if comp != compNone { + values.Set("comp", comp.String()) + } + if res != resourceFile { + values.Set("restype", res.String()) + } + return values +} + +// GetShareReference returns a Share object for the specified share name. +func (f *FileServiceClient) GetShareReference(name string) *Share { + return &Share{ + fsc: f, + Name: name, + Properties: ShareProperties{ + Quota: -1, + }, + } +} + +// ListShares returns the list of shares in a storage account along with +// pagination token and other response details. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/list-shares +func (f FileServiceClient) ListShares(params ListSharesParameters) (*ShareListResponse, error) { + q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) + + var out ShareListResponse + resp, err := f.listContent("", q, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + err = xmlUnmarshal(resp.Body, &out) + + // assign our client to the newly created Share objects + for i := range out.Shares { + out.Shares[i].fsc = &f + } + return &out, err +} + +// GetServiceProperties gets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-file-service-properties +func (f *FileServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return f.client.getServiceProperties(fileServiceName, f.auth) +} + +// SetServiceProperties sets the properties of your storage account's file service. +// File service does not support logging +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-file-service-properties +func (f *FileServiceClient) SetServiceProperties(props ServiceProperties) error { + return f.client.setServiceProperties(props, fileServiceName, f.auth) +} + +// retrieves directory or share content +func (f FileServiceClient) listContent(path string, params url.Values, extraHeaders map[string]string) (*http.Response, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodGet, uri, headers, nil, f.auth) + if err != nil { + return nil, err + } + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + drainRespBody(resp) + return nil, err + } + + return resp, nil +} + +// returns true if the specified resource exists +func (f FileServiceClient) resourceExists(path string, res resourceType) (bool, http.Header, error) { + if err := f.checkForStorageEmulator(); err != nil { + return false, nil, err + } + + uri := f.client.getEndpoint(fileServiceName, path, getURLInitValues(compNone, res)) + headers := f.client.getStandardHeaders() + + resp, err := f.client.exec(http.MethodHead, uri, headers, nil, f.auth) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusOK, resp.Header, nil + } + } + return false, nil, err +} + +// creates a resource depending on the specified resource type +func (f FileServiceClient) createResource(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string, expectedResponseCodes []int) (http.Header, error) { + resp, err := f.createResourceNoClose(path, res, urlParams, extraHeaders) + if err != nil { + return nil, err + } + defer drainRespBody(resp) + return resp.Header, checkRespCode(resp, expectedResponseCodes) +} + +// creates a resource depending on the specified resource type, doesn't close the response body +func (f FileServiceClient) createResourceNoClose(path string, res resourceType, urlParams url.Values, extraHeaders map[string]string) (*http.Response, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := getURLInitValues(compNone, res) + combinedParams := mergeParams(values, urlParams) + uri := f.client.getEndpoint(fileServiceName, path, combinedParams) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(http.MethodPut, uri, headers, nil, f.auth) +} + +// returns HTTP header data for the specified directory or share +func (f FileServiceClient) getResourceHeaders(path string, comp compType, res resourceType, params url.Values, verb string) (http.Header, error) { + resp, err := f.getResourceNoClose(path, comp, res, params, verb, nil) + if err != nil { + return nil, err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return nil, err + } + + return resp.Header, nil +} + +// gets the specified resource, doesn't close the response body +func (f FileServiceClient) getResourceNoClose(path string, comp compType, res resourceType, params url.Values, verb string, extraHeaders map[string]string) (*http.Response, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params = mergeParams(params, getURLInitValues(comp, res)) + uri := f.client.getEndpoint(fileServiceName, path, params) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + return f.client.exec(verb, uri, headers, nil, f.auth) +} + +// deletes the resource and returns the response +func (f FileServiceClient) deleteResource(path string, res resourceType, options *FileRequestOptions) error { + resp, err := f.deleteResourceNoClose(path, res, options) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusAccepted}) +} + +// deletes the resource and returns the response, doesn't close the response body +func (f FileServiceClient) deleteResourceNoClose(path string, res resourceType, options *FileRequestOptions) (*http.Response, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + values := mergeParams(getURLInitValues(compNone, res), prepareOptions(options)) + uri := f.client.getEndpoint(fileServiceName, path, values) + return f.client.exec(http.MethodDelete, uri, f.client.getStandardHeaders(), nil, f.auth) +} + +// merges metadata into extraHeaders and returns extraHeaders +func mergeMDIntoExtraHeaders(metadata, extraHeaders map[string]string) map[string]string { + if metadata == nil && extraHeaders == nil { + return nil + } + if extraHeaders == nil { + extraHeaders = make(map[string]string) + } + for k, v := range metadata { + extraHeaders[userDefinedMetadataHeaderPrefix+k] = v + } + return extraHeaders +} + +// sets extra header data for the specified resource +func (f FileServiceClient) setResourceHeaders(path string, comp compType, res resourceType, extraHeaders map[string]string, options *FileRequestOptions) (http.Header, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } + + params := mergeParams(getURLInitValues(comp, res), prepareOptions(options)) + uri := f.client.getEndpoint(fileServiceName, path, params) + extraHeaders = f.client.protectUserAgent(extraHeaders) + headers := mergeHeaders(f.client.getStandardHeaders(), extraHeaders) + + resp, err := f.client.exec(http.MethodPut, uri, headers, nil, f.auth) + if err != nil { + return nil, err + } + defer drainRespBody(resp) + + return resp.Header, checkRespCode(resp, []int{http.StatusOK}) +} + +//checkForStorageEmulator determines if the client is setup for use with +//Azure Storage Emulator, and returns a relevant error +func (f FileServiceClient) checkForStorageEmulator() error { + if f.client.accountName == StorageEmulatorAccountName { + return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go new file mode 100644 index 00000000..5b4a6514 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/leaseblob.go @@ -0,0 +1,201 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "net/http" + "net/url" + "strconv" + "time" +) + +// lease constants. +const ( + leaseHeaderPrefix = "x-ms-lease-" + headerLeaseID = "x-ms-lease-id" + leaseAction = "x-ms-lease-action" + leaseBreakPeriod = "x-ms-lease-break-period" + leaseDuration = "x-ms-lease-duration" + leaseProposedID = "x-ms-proposed-lease-id" + leaseTime = "x-ms-lease-time" + + acquireLease = "acquire" + renewLease = "renew" + changeLease = "change" + releaseLease = "release" + breakLease = "break" +) + +// leasePut is common PUT code for the various acquire/release/break etc functions. +func (b *Blob) leaseCommonPut(headers map[string]string, expectedStatus int, options *LeaseOptions) (http.Header, error) { + params := url.Values{"comp": {"lease"}} + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return nil, err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{expectedStatus}); err != nil { + return nil, err + } + + return resp.Header, nil +} + +// LeaseOptions includes options for all operations regarding leasing blobs +type LeaseOptions struct { + Timeout uint + Origin string `header:"Origin"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + RequestID string `header:"x-ms-client-request-id"` +} + +// AcquireLease creates a lease for a blob +// returns leaseID acquired +// In API Versions starting on 2012-02-12, the minimum leaseTimeInSeconds is 15, the maximum +// non-infinite leaseTimeInSeconds is 60. To specify an infinite lease, provide the value -1. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) AcquireLease(leaseTimeInSeconds int, proposedLeaseID string, options *LeaseOptions) (returnedLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = acquireLease + + if leaseTimeInSeconds == -1 { + // Do nothing, but don't trigger the following clauses. + } else if leaseTimeInSeconds > 60 || b.Container.bsc.client.apiVersion < "2012-02-12" { + leaseTimeInSeconds = 60 + } else if leaseTimeInSeconds < 15 { + leaseTimeInSeconds = 15 + } + + headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds) + + if proposedLeaseID != "" { + headers[leaseProposedID] = proposedLeaseID + } + + respHeaders, err := b.leaseCommonPut(headers, http.StatusCreated, options) + if err != nil { + return "", err + } + + returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + + if returnedLeaseID != "" { + return returnedLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// BreakLease breaks the lease for a blob +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLease(options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + return b.breakLeaseCommon(headers, options) +} + +// BreakLeaseWithBreakPeriod breaks the lease for a blob +// breakPeriodInSeconds is used to determine how long until new lease can be created. +// Returns the timeout remaining in the lease in seconds +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) BreakLeaseWithBreakPeriod(breakPeriodInSeconds int, options *LeaseOptions) (breakTimeout int, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = breakLease + headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds) + return b.breakLeaseCommon(headers, options) +} + +// breakLeaseCommon is common code for both version of BreakLease (with and without break period) +func (b *Blob) breakLeaseCommon(headers map[string]string, options *LeaseOptions) (breakTimeout int, err error) { + + respHeaders, err := b.leaseCommonPut(headers, http.StatusAccepted, options) + if err != nil { + return 0, err + } + + breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime)) + if breakTimeoutStr != "" { + breakTimeout, err = strconv.Atoi(breakTimeoutStr) + if err != nil { + return 0, err + } + } + + return breakTimeout, nil +} + +// ChangeLease changes a lease ID for a blob +// Returns the new LeaseID acquired +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ChangeLease(currentLeaseID string, proposedLeaseID string, options *LeaseOptions) (newLeaseID string, err error) { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = changeLease + headers[headerLeaseID] = currentLeaseID + headers[leaseProposedID] = proposedLeaseID + + respHeaders, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return "", err + } + + newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(headerLeaseID)) + if newLeaseID != "" { + return newLeaseID, nil + } + + return "", errors.New("LeaseID not returned") +} + +// ReleaseLease releases the lease for a blob +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Lease-Blob +func (b *Blob) ReleaseLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = releaseLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} + +// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx +func (b *Blob) RenewLease(currentLeaseID string, options *LeaseOptions) error { + headers := b.Container.bsc.client.getStandardHeaders() + headers[leaseAction] = renewLease + headers[headerLeaseID] = currentLeaseID + + _, err := b.leaseCommonPut(headers, http.StatusOK, options) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go new file mode 100644 index 00000000..ffc183be --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/message.go @@ -0,0 +1,171 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "fmt" + "net/http" + "net/url" + "strconv" + "time" +) + +// Message represents an Azure message. +type Message struct { + Queue *Queue + Text string `xml:"MessageText"` + ID string `xml:"MessageId"` + Insertion TimeRFC1123 `xml:"InsertionTime"` + Expiration TimeRFC1123 `xml:"ExpirationTime"` + PopReceipt string `xml:"PopReceipt"` + NextVisible TimeRFC1123 `xml:"TimeNextVisible"` + DequeueCount int `xml:"DequeueCount"` +} + +func (m *Message) buildPath() string { + return fmt.Sprintf("%s/%s", m.Queue.buildPathMessages(), m.ID) +} + +// PutMessageOptions is the set of options can be specified for Put Messsage +// operation. A zero struct does not use any preferences for the request. +type PutMessageOptions struct { + Timeout uint + VisibilityTimeout int + MessageTTL int + RequestID string `header:"x-ms-client-request-id"` +} + +// Put operation adds a new message to the back of the message queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Message +func (m *Message) Put(options *PutMessageOptions) error { + query := url.Values{} + headers := m.Queue.qsc.client.getStandardHeaders() + + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + if options.MessageTTL != 0 { + query.Set("messagettl", strconv.Itoa(options.MessageTTL)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.Queue.buildPathMessages(), query) + resp, err := m.Queue.qsc.client.exec(http.MethodPost, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + err = checkRespCode(resp, []int{http.StatusCreated}) + if err != nil { + return err + } + err = xmlUnmarshal(resp.Body, m) + if err != nil { + return err + } + return nil +} + +// UpdateMessageOptions is the set of options can be specified for Update Messsage +// operation. A zero struct does not use any preferences for the request. +type UpdateMessageOptions struct { + Timeout uint + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` +} + +// Update operation updates the specified message. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Update-Message +func (m *Message) Update(options *UpdateMessageOptions) error { + query := url.Values{} + if m.PopReceipt != "" { + query.Set("popreceipt", m.PopReceipt) + } + + headers := m.Queue.qsc.client.getStandardHeaders() + req := putMessageRequest{MessageText: m.Text} + body, nn, err := xmlMarshal(req) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(nn) + // visibilitytimeout is required for Update (zero or greater) so set the default here + query.Set("visibilitytimeout", "0") + if options != nil { + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), query) + + resp, err := m.Queue.qsc.client.exec(http.MethodPut, uri, headers, body, m.Queue.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + m.PopReceipt = resp.Header.Get("x-ms-popreceipt") + nextTimeStr := resp.Header.Get("x-ms-time-next-visible") + if nextTimeStr != "" { + nextTime, err := time.Parse(time.RFC1123, nextTimeStr) + if err != nil { + return err + } + m.NextVisible = TimeRFC1123(nextTime) + } + + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// Delete operation deletes the specified message. +// +// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx +func (m *Message) Delete(options *QueueServiceOptions) error { + params := url.Values{"popreceipt": {m.PopReceipt}} + headers := m.Queue.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := m.Queue.qsc.client.getEndpoint(queueServiceName, m.buildPath(), params) + + resp, err := m.Queue.qsc.client.exec(http.MethodDelete, uri, headers, nil, m.Queue.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +type putMessageRequest struct { + XMLName xml.Name `xml:"QueueMessage"` + MessageText string `xml:"MessageText"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go new file mode 100644 index 00000000..0690e85a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/odata.go @@ -0,0 +1,48 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// MetadataLevel determines if operations should return a paylod, +// and it level of detail. +type MetadataLevel string + +// This consts are meant to help with Odata supported operations +const ( + OdataTypeSuffix = "@odata.type" + + // Types + + OdataBinary = "Edm.Binary" + OdataDateTime = "Edm.DateTime" + OdataDouble = "Edm.Double" + OdataGUID = "Edm.Guid" + OdataInt64 = "Edm.Int64" + + // Query options + + OdataFilter = "$filter" + OdataOrderBy = "$orderby" + OdataTop = "$top" + OdataSkip = "$skip" + OdataCount = "$count" + OdataExpand = "$expand" + OdataSelect = "$select" + OdataSearch = "$search" + + EmptyPayload MetadataLevel = "" + NoMetadata MetadataLevel = "application/json;odata=nometadata" + MinimalMetadata MetadataLevel = "application/json;odata=minimalmetadata" + FullMetadata MetadataLevel = "application/json;odata=fullmetadata" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go new file mode 100644 index 00000000..7ffd6382 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -0,0 +1,203 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "time" +) + +// GetPageRangesResponse contains the response fields from +// Get Page Ranges call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type GetPageRangesResponse struct { + XMLName xml.Name `xml:"PageList"` + PageList []PageRange `xml:"PageRange"` +} + +// PageRange contains information about a page of a page blob from +// Get Pages Range call. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx +type PageRange struct { + Start int64 `xml:"Start"` + End int64 `xml:"End"` +} + +var ( + errBlobCopyAborted = errors.New("storage: blob copy is aborted") + errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch") +) + +// PutPageOptions includes the options for a put page operation +type PutPageOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + IfSequenceNumberLessThanOrEqualTo *int `header:"x-ms-if-sequence-number-le"` + IfSequenceNumberLessThan *int `header:"x-ms-if-sequence-number-lt"` + IfSequenceNumberEqualTo *int `header:"x-ms-if-sequence-number-eq"` + IfModifiedSince *time.Time `header:"If-Modified-Since"` + IfUnmodifiedSince *time.Time `header:"If-Unmodified-Since"` + IfMatch string `header:"If-Match"` + IfNoneMatch string `header:"If-None-Match"` + RequestID string `header:"x-ms-client-request-id"` +} + +// WriteRange writes a range of pages to a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) WriteRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if bytes == nil { + return errors.New("bytes cannot be nil") + } + return b.modifyRange(blobRange, bytes, options) +} + +// ClearRange clears the given range in a page blob. +// Ranges must be aligned with 512-byte boundaries and chunk must be of size +// multiplies by 512. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Page +func (b *Blob) ClearRange(blobRange BlobRange, options *PutPageOptions) error { + return b.modifyRange(blobRange, nil, options) +} + +func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPageOptions) error { + if blobRange.End < blobRange.Start { + return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") + } + if blobRange.Start%512 != 0 { + return errors.New("the value for rangeStart must be a multiple of 512") + } + if blobRange.End%512 != 511 { + return errors.New("the value for rangeEnd must be a multiple of 512 - 1") + } + + params := url.Values{"comp": {"page"}} + + // default to clear + write := "clear" + var cl uint64 + + // if bytes is not nil then this is an update operation + if bytes != nil { + write = "update" + cl = (blobRange.End - blobRange.Start) + 1 + } + + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-page-write"] = write + headers["x-ms-range"] = blobRange.String() + headers["Content-Length"] = fmt.Sprintf("%v", cl) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, bytes, b.Container.bsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusCreated}) +} + +// GetPageRangesOptions includes the options for a get page ranges operation +type GetPageRangesOptions struct { + Timeout uint + Snapshot *time.Time + PreviousSnapshot *time.Time + Range *BlobRange + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPageRanges returns the list of valid page ranges for a page blob. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Page-Ranges +func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesResponse, error) { + params := url.Values{"comp": {"pagelist"}} + headers := b.Container.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + params = addSnapshot(params, options.Snapshot) + if options.PreviousSnapshot != nil { + params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot)) + } + if options.Range != nil { + headers["Range"] = options.Range.String() + } + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + var out GetPageRangesResponse + resp, err := b.Container.bsc.client.exec(http.MethodGet, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return out, err + } + defer drainRespBody(resp) + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return out, err + } + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// PutPageBlob initializes an empty page blob with specified name and maximum +// size in bytes (size must be aligned to a 512-byte boundary). A page blob must +// be created using this method before writing pages. +// +// See CreateBlockBlobFromReader for more info on creating blobs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob +func (b *Blob) PutPageBlob(options *PutBlobOptions) error { + if b.Properties.ContentLength%512 != 0 { + return errors.New("Content length must be aligned to a 512-byte boundary") + } + + params := url.Values{} + headers := b.Container.bsc.client.getStandardHeaders() + headers["x-ms-blob-type"] = string(BlobTypePage) + headers["x-ms-blob-content-length"] = fmt.Sprintf("%v", b.Properties.ContentLength) + headers["x-ms-blob-sequence-number"] = fmt.Sprintf("%v", b.Properties.SequenceNumber) + headers = mergeHeaders(headers, headersFromStruct(b.Properties)) + headers = b.Container.bsc.client.addMetadataToHeaders(headers, b.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := b.Container.bsc.client.getEndpoint(blobServiceName, b.buildPath(), params) + + resp, err := b.Container.bsc.client.exec(http.MethodPut, uri, headers, nil, b.Container.bsc.auth) + if err != nil { + return err + } + return b.respondCreation(resp, BlobTypePage) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go new file mode 100644 index 00000000..f90050cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queue.go @@ -0,0 +1,436 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/xml" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + "time" +) + +const ( + // casing is per Golang's http.Header canonicalizing the header names. + approximateMessagesCountHeader = "X-Ms-Approximate-Messages-Count" +) + +// QueueAccessPolicy represents each access policy in the queue ACL. +type QueueAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAdd bool + CanUpdate bool + CanProcess bool +} + +// QueuePermissions represents the queue ACLs. +type QueuePermissions struct { + AccessPolicies []QueueAccessPolicy +} + +// SetQueuePermissionOptions includes options for a set queue permissions operation +type SetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// Queue represents an Azure queue. +type Queue struct { + qsc *QueueServiceClient + Name string + Metadata map[string]string + AproxMessageCount uint64 +} + +func (q *Queue) buildPath() string { + return fmt.Sprintf("/%s", q.Name) +} + +func (q *Queue) buildPathMessages() string { + return fmt.Sprintf("%s/messages", q.buildPath()) +} + +// QueueServiceOptions includes options for some queue service operations +type QueueServiceOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// Create operation creates a queue under the given account. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Queue4 +func (q *Queue) Create(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusCreated}) +} + +// Delete operation permanently deletes the specified queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Queue3 +func (q *Queue) Delete(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// Exists returns true if a queue with given name exists. +func (q *Queue) Exists() (bool, error) { + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}}) + resp, err := q.qsc.client.exec(http.MethodGet, uri, q.qsc.client.getStandardHeaders(), nil, q.qsc.auth) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusOK, nil + } + err = getErrorFromResponse(resp) + } + return false, err +} + +// SetMetadata operation sets user-defined metadata on the specified queue. +// Metadata is associated with the queue as name-value pairs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata +func (q *Queue) SetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() + headers = q.qsc.client.addMetadataToHeaders(headers, q.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// GetMetadata operation retrieves user-defined metadata and queue +// properties on the specified queue. Metadata is associated with +// the queue as name-values pairs. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Queue-Metadata +// +// Because the way Golang's http client (and http.Header in particular) +// canonicalize header names, the returned metadata names would always +// be all lower case. +func (q *Queue) GetMetadata(options *QueueServiceOptions) error { + params := url.Values{"comp": {"metadata"}} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + aproxMessagesStr := resp.Header.Get(http.CanonicalHeaderKey(approximateMessagesCountHeader)) + if aproxMessagesStr != "" { + aproxMessages, err := strconv.ParseUint(aproxMessagesStr, 10, 64) + if err != nil { + return err + } + q.AproxMessageCount = aproxMessages + } + + q.Metadata = getMetadataFromHeaders(resp.Header) + return nil +} + +// GetMessageReference returns a message object with the specified text. +func (q *Queue) GetMessageReference(text string) *Message { + return &Message{ + Queue: q, + Text: text, + } +} + +// GetMessagesOptions is the set of options can be specified for Get +// Messsages operation. A zero struct does not use any preferences for the +// request. +type GetMessagesOptions struct { + Timeout uint + NumOfMessages int + VisibilityTimeout int + RequestID string `header:"x-ms-client-request-id"` +} + +type messages struct { + XMLName xml.Name `xml:"QueueMessagesList"` + Messages []Message `xml:"QueueMessage"` +} + +// GetMessages operation retrieves one or more messages from the front of the +// queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Messages +func (q *Queue) GetMessages(options *GetMessagesOptions) ([]Message, error) { + query := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + if options.VisibilityTimeout != 0 { + query.Set("visibilitytimeout", strconv.Itoa(options.VisibilityTimeout)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return []Message{}, err + } + defer resp.Body.Close() + + var out messages + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err +} + +// PeekMessagesOptions is the set of options can be specified for Peek +// Messsage operation. A zero struct does not use any preferences for the +// request. +type PeekMessagesOptions struct { + Timeout uint + NumOfMessages int + RequestID string `header:"x-ms-client-request-id"` +} + +// PeekMessages retrieves one or more messages from the front of the queue, but +// does not alter the visibility of the message. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Peek-Messages +func (q *Queue) PeekMessages(options *PeekMessagesOptions) ([]Message, error) { + query := url.Values{"peekonly": {"true"}} // Required for peek operation + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + if options.NumOfMessages != 0 { + query.Set("numofmessages", strconv.Itoa(options.NumOfMessages)) + } + query = addTimeout(query, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), query) + + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return []Message{}, err + } + defer resp.Body.Close() + + var out messages + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return []Message{}, err + } + for i := range out.Messages { + out.Messages[i].Queue = q + } + return out.Messages, err +} + +// ClearMessages operation deletes all messages from the specified queue. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Clear-Messages +func (q *Queue) ClearMessages(options *QueueServiceOptions) error { + params := url.Values{} + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPathMessages(), params) + + resp, err := q.qsc.client.exec(http.MethodDelete, uri, headers, nil, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// SetPermissions sets up queue permissions +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-acl +func (q *Queue) SetPermissions(permissions QueuePermissions, options *SetQueuePermissionOptions) error { + body, length, err := generateQueueACLpayload(permissions.AccessPolicies) + if err != nil { + return err + } + + params := url.Values{ + "comp": {"acl"}, + } + headers := q.qsc.client.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodPut, uri, headers, body, q.qsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +func generateQueueACLpayload(policies []QueueAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, qapd := range policies { + permission := qapd.generateQueuePermissions() + signedIdentifier := convertAccessPolicyToXMLStructs(qapd.ID, qapd.StartTime, qapd.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +func (qapd *QueueAccessPolicy) generateQueuePermissions() (permissions string) { + // generate the permissions string (raup). + // still want the end user API to have bool flags. + permissions = "" + + if qapd.CanRead { + permissions += "r" + } + + if qapd.CanAdd { + permissions += "a" + } + + if qapd.CanUpdate { + permissions += "u" + } + + if qapd.CanProcess { + permissions += "p" + } + + return permissions +} + +// GetQueuePermissionOptions includes options for a get queue permissions operation +type GetQueuePermissionOptions struct { + Timeout uint + RequestID string `header:"x-ms-client-request-id"` +} + +// GetPermissions gets the queue permissions as per https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-acl +// If timeout is 0 then it will not be passed to Azure +func (q *Queue) GetPermissions(options *GetQueuePermissionOptions) (*QueuePermissions, error) { + params := url.Values{ + "comp": {"acl"}, + } + headers := q.qsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params) + resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var ap AccessPolicy + err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return buildQueueAccessPolicy(ap, &resp.Header), nil +} + +func buildQueueAccessPolicy(ap AccessPolicy, headers *http.Header) *QueuePermissions { + permissions := QueuePermissions{ + AccessPolicies: []QueueAccessPolicy{}, + } + + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + qapd := QueueAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + qapd.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + qapd.CanAdd = updatePermissions(policy.AccessPolicy.Permission, "a") + qapd.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + qapd.CanProcess = updatePermissions(policy.AccessPolicy.Permission, "p") + + permissions.AccessPolicies = append(permissions.AccessPolicies, qapd) + } + return &permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go new file mode 100644 index 00000000..28d9ab93 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queuesasuri.go @@ -0,0 +1,146 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" + "strings" + "time" +) + +// QueueSASOptions are options to construct a blob SAS +// URI. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +type QueueSASOptions struct { + QueueSASPermissions + SASOptions +} + +// QueueSASPermissions includes the available permissions for +// a queue SAS URI. +type QueueSASPermissions struct { + Read bool + Add bool + Update bool + Process bool +} + +func (q QueueSASPermissions) buildString() string { + permissions := "" + + if q.Read { + permissions += "r" + } + if q.Add { + permissions += "a" + } + if q.Update { + permissions += "u" + } + if q.Process { + permissions += "p" + } + return permissions +} + +// GetSASURI creates an URL to the specified queue which contains the Shared +// Access Signature with specified permissions and expiration time. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas +func (q *Queue) GetSASURI(options QueueSASOptions) (string, error) { + canonicalizedResource, err := q.qsc.client.buildCanonicalizedResource(q.buildPath(), q.qsc.auth, true) + if err != nil { + return "", err + } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + + signedStart := "" + if options.Start != (time.Time{}) { + signedStart = options.Start.UTC().Format(time.RFC3339) + } + signedExpiry := options.Expiry.UTC().Format(time.RFC3339) + + protocols := "https,http" + if options.UseHTTPS { + protocols = "https" + } + + permissions := options.QueueSASPermissions.buildString() + stringToSign, err := queueSASStringToSign(q.qsc.client.apiVersion, canonicalizedResource, signedStart, signedExpiry, options.IP, permissions, protocols, options.Identifier) + if err != nil { + return "", err + } + + sig := q.qsc.client.computeHmac256(stringToSign) + sasParams := url.Values{ + "sv": {q.qsc.client.apiVersion}, + "se": {signedExpiry}, + "sp": {permissions}, + "sig": {sig}, + } + + if q.qsc.client.apiVersion >= "2015-04-05" { + sasParams.Add("spr", protocols) + addQueryParameter(sasParams, "sip", options.IP) + } + + uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), nil) + sasURL, err := url.Parse(uri) + if err != nil { + return "", err + } + sasURL.RawQuery = sasParams.Encode() + return sasURL.String(), nil +} + +func queueSASStringToSign(signedVersion, canonicalizedResource, signedStart, signedExpiry, signedIP, signedPermissions, protocols, signedIdentifier string) (string, error) { + + if signedVersion >= "2015-02-21" { + canonicalizedResource = "/queue" + canonicalizedResource + } + + // https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx#Anchor_12 + if signedVersion >= "2015-04-05" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", + signedPermissions, + signedStart, + signedExpiry, + canonicalizedResource, + signedIdentifier, + signedIP, + protocols, + signedVersion), nil + + } + + // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + if signedVersion >= "2013-08-15" { + return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion), nil + } + + return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go new file mode 100644 index 00000000..29febe14 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/queueserviceclient.go @@ -0,0 +1,42 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// QueueServiceClient contains operations for Microsoft Azure Queue Storage +// Service. +type QueueServiceClient struct { + client Client + auth authentication +} + +// GetServiceProperties gets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-queue-service-properties +func (q *QueueServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return q.client.getServiceProperties(queueServiceName, q.auth) +} + +// SetServiceProperties sets the properties of your storage account's queue service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-queue-service-properties +func (q *QueueServiceClient) SetServiceProperties(props ServiceProperties) error { + return q.client.setServiceProperties(props, queueServiceName, q.auth) +} + +// GetQueueReference returns a Container object for the specified queue name. +func (q *QueueServiceClient) GetQueueReference(name string) *Queue { + return &Queue{ + qsc: q, + Name: name, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go new file mode 100644 index 00000000..cf75a265 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/share.go @@ -0,0 +1,216 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "net/url" + "strconv" +) + +// Share represents an Azure file share. +type Share struct { + fsc *FileServiceClient + Name string `xml:"Name"` + Properties ShareProperties `xml:"Properties"` + Metadata map[string]string +} + +// ShareProperties contains various properties of a share. +type ShareProperties struct { + LastModified string `xml:"Last-Modified"` + Etag string `xml:"Etag"` + Quota int `xml:"Quota"` +} + +// builds the complete path for this share object. +func (s *Share) buildPath() string { + return fmt.Sprintf("/%s", s.Name) +} + +// Create this share under the associated account. +// If a share with the same name already exists, the operation fails. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) Create(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + headers, err := s.fsc.createResource(s.buildPath(), resourceShare, params, mergeMDIntoExtraHeaders(s.Metadata, extraheaders), []int{http.StatusCreated}) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// CreateIfNotExists creates this share under the associated account if +// it does not exist. Returns true if the share is newly created or false if +// the share already exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Create-Share +func (s *Share) CreateIfNotExists(options *FileRequestOptions) (bool, error) { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + params := prepareOptions(options) + resp, err := s.fsc.createResourceNoClose(s.buildPath(), resourceShare, params, extraheaders) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusConflict { + if resp.StatusCode == http.StatusCreated { + s.updateEtagAndLastModified(resp.Header) + return true, nil + } + return false, s.FetchAttributes(nil) + } + } + + return false, err +} + +// Delete marks this share for deletion. The share along with any files +// and directories contained within it are later deleted during garbage +// collection. If the share does not exist the operation fails +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) Delete(options *FileRequestOptions) error { + return s.fsc.deleteResource(s.buildPath(), resourceShare, options) +} + +// DeleteIfExists operation marks this share for deletion if it exists. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Delete-Share +func (s *Share) DeleteIfExists(options *FileRequestOptions) (bool, error) { + resp, err := s.fsc.deleteResourceNoClose(s.buildPath(), resourceShare, options) + if resp != nil { + defer drainRespBody(resp) + if resp.StatusCode == http.StatusAccepted || resp.StatusCode == http.StatusNotFound { + return resp.StatusCode == http.StatusAccepted, nil + } + } + return false, err +} + +// Exists returns true if this share already exists +// on the storage account, otherwise returns false. +func (s *Share) Exists() (bool, error) { + exists, headers, err := s.fsc.resourceExists(s.buildPath(), resourceShare) + if exists { + s.updateEtagAndLastModified(headers) + s.updateQuota(headers) + } + return exists, err +} + +// FetchAttributes retrieves metadata and properties for this share. +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-share-properties +func (s *Share) FetchAttributes(options *FileRequestOptions) error { + params := prepareOptions(options) + headers, err := s.fsc.getResourceHeaders(s.buildPath(), compNone, resourceShare, params, http.MethodHead) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + s.updateQuota(headers) + s.Metadata = getMetadataFromHeaders(headers) + + return nil +} + +// GetRootDirectoryReference returns a Directory object at the root of this share. +func (s *Share) GetRootDirectoryReference() *Directory { + return &Directory{ + fsc: s.fsc, + share: s, + } +} + +// ServiceClient returns the FileServiceClient associated with this share. +func (s *Share) ServiceClient() *FileServiceClient { + return s.fsc +} + +// SetMetadata replaces the metadata for this share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetShareMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-share-metadata +func (s *Share) SetMetadata(options *FileRequestOptions) error { + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compMetadata, resourceShare, mergeMDIntoExtraHeaders(s.Metadata, nil), options) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// SetProperties sets system properties for this share. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by SetShareProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Set-Share-Properties +func (s *Share) SetProperties(options *FileRequestOptions) error { + extraheaders := map[string]string{} + if s.Properties.Quota > 0 { + if s.Properties.Quota > 5120 { + return fmt.Errorf("invalid value %v for quota, valid values are [1, 5120]", s.Properties.Quota) + } + extraheaders["x-ms-share-quota"] = strconv.Itoa(s.Properties.Quota) + } + + headers, err := s.fsc.setResourceHeaders(s.buildPath(), compProperties, resourceShare, extraheaders, options) + if err != nil { + return err + } + + s.updateEtagAndLastModified(headers) + return nil +} + +// updates Etag and last modified date +func (s *Share) updateEtagAndLastModified(headers http.Header) { + s.Properties.Etag = headers.Get("Etag") + s.Properties.LastModified = headers.Get("Last-Modified") +} + +// updates quota value +func (s *Share) updateQuota(headers http.Header) { + quota, err := strconv.Atoi(headers.Get("x-ms-share-quota")) + if err == nil { + s.Properties.Quota = quota + } +} + +// URL gets the canonical URL to this share. This method does not create a publicly accessible +// URL if the share is private and this method does not check if the share exists. +func (s *Share) URL() string { + return s.fsc.client.getEndpoint(fileServiceName, s.buildPath(), url.Values{}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go new file mode 100644 index 00000000..056ab398 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storagepolicy.go @@ -0,0 +1,61 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// AccessPolicyDetailsXML has specifics about an access policy +// annotated with XML details. +type AccessPolicyDetailsXML struct { + StartTime time.Time `xml:"Start"` + ExpiryTime time.Time `xml:"Expiry"` + Permission string `xml:"Permission"` +} + +// SignedIdentifier is a wrapper for a specific policy +type SignedIdentifier struct { + ID string `xml:"Id"` + AccessPolicy AccessPolicyDetailsXML `xml:"AccessPolicy"` +} + +// SignedIdentifiers part of the response from GetPermissions call. +type SignedIdentifiers struct { + SignedIdentifiers []SignedIdentifier `xml:"SignedIdentifier"` +} + +// AccessPolicy is the response type from the GetPermissions call. +type AccessPolicy struct { + SignedIdentifiersList SignedIdentifiers `xml:"SignedIdentifiers"` +} + +// convertAccessPolicyToXMLStructs converts between AccessPolicyDetails which is a struct better for API usage to the +// AccessPolicy struct which will get converted to XML. +func convertAccessPolicyToXMLStructs(id string, startTime time.Time, expiryTime time.Time, permissions string) SignedIdentifier { + return SignedIdentifier{ + ID: id, + AccessPolicy: AccessPolicyDetailsXML{ + StartTime: startTime.UTC().Round(time.Second), + ExpiryTime: expiryTime.UTC().Round(time.Second), + Permission: permissions, + }, + } +} + +func updatePermissions(permissions, permission string) bool { + return strings.Contains(permissions, permission) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go new file mode 100644 index 00000000..dc419922 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/storageservice.go @@ -0,0 +1,150 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" + "net/url" + "strconv" +) + +// ServiceProperties represents the storage account service properties +type ServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors + DeleteRetentionPolicy *RetentionPolicy // blob storage only + StaticWebsite *StaticWebsite // blob storage only +} + +// Logging represents the Azure Analytics Logging settings +type Logging struct { + Version string + Delete bool + Read bool + Write bool + RetentionPolicy *RetentionPolicy +} + +// RetentionPolicy indicates if retention is enabled and for how many days +type RetentionPolicy struct { + Enabled bool + Days *int +} + +// Metrics provide request statistics. +type Metrics struct { + Version string + Enabled bool + IncludeAPIs *bool + RetentionPolicy *RetentionPolicy +} + +// Cors includes all the CORS rules +type Cors struct { + CorsRule []CorsRule +} + +// CorsRule includes all settings for a Cors rule +type CorsRule struct { + AllowedOrigins string + AllowedMethods string + MaxAgeInSeconds int + ExposedHeaders string + AllowedHeaders string +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // Enabled - Indicates whether this account is hosting a static website + Enabled bool + // IndexDocument - The default name of the index page under each directory + IndexDocument *string + // ErrorDocument404Path - The absolute path of the custom 404 page + ErrorDocument404Path *string +} + +func (c Client) getServiceProperties(service string, auth authentication) (*ServiceProperties, error) { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + headers := c.getStandardHeaders() + + resp, err := c.exec(http.MethodGet, uri, headers, nil, auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return nil, err + } + + var out ServiceProperties + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return nil, err + } + + return &out, nil +} + +func (c Client) setServiceProperties(props ServiceProperties, service string, auth authentication) error { + query := url.Values{ + "restype": {"service"}, + "comp": {"properties"}, + } + uri := c.getEndpoint(service, "", query) + + // Ideally, StorageServiceProperties would be the output struct + // This is to avoid golint stuttering, while generating the correct XML + type StorageServiceProperties struct { + Logging *Logging + HourMetrics *Metrics + MinuteMetrics *Metrics + Cors *Cors + DeleteRetentionPolicy *RetentionPolicy + StaticWebsite *StaticWebsite + } + input := StorageServiceProperties{ + Logging: props.Logging, + HourMetrics: props.HourMetrics, + MinuteMetrics: props.MinuteMetrics, + Cors: props.Cors, + } + // only set these fields for blob storage else it's invalid XML + if service == blobServiceName { + input.DeleteRetentionPolicy = props.DeleteRetentionPolicy + input.StaticWebsite = props.StaticWebsite + } + + body, length, err := xmlMarshal(input) + if err != nil { + return err + } + + headers := c.getStandardHeaders() + headers["Content-Length"] = strconv.Itoa(length) + + resp, err := c.exec(http.MethodPut, uri, headers, body, auth) + if err != nil { + return err + } + defer drainRespBody(resp) + return checkRespCode(resp, []int{http.StatusAccepted}) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go new file mode 100644 index 00000000..22d9b4f5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table.go @@ -0,0 +1,419 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +const ( + tablesURIPath = "/Tables" + nextTableQueryParameter = "NextTableName" + headerNextPartitionKey = "x-ms-continuation-NextPartitionKey" + headerNextRowKey = "x-ms-continuation-NextRowKey" + nextPartitionKeyQueryParameter = "NextPartitionKey" + nextRowKeyQueryParameter = "NextRowKey" +) + +// TableAccessPolicy are used for SETTING table policies +type TableAccessPolicy struct { + ID string + StartTime time.Time + ExpiryTime time.Time + CanRead bool + CanAppend bool + CanUpdate bool + CanDelete bool +} + +// Table represents an Azure table. +type Table struct { + tsc *TableServiceClient + Name string `json:"TableName"` + OdataEditLink string `json:"odata.editLink"` + OdataID string `json:"odata.id"` + OdataMetadata string `json:"odata.metadata"` + OdataType string `json:"odata.type"` +} + +// EntityQueryResult contains the response from +// ExecuteQuery and ExecuteQueryNextResults functions. +type EntityQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Entities []*Entity `json:"value"` + QueryNextLink + table *Table +} + +type continuationToken struct { + NextPartitionKey string + NextRowKey string +} + +func (t *Table) buildPath() string { + return fmt.Sprintf("/%s", t.Name) +} + +func (t *Table) buildSpecificPath() string { + return fmt.Sprintf("%s('%s')", tablesURIPath, t.Name) +} + +// Get gets the referenced table. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/querying-tables-and-entities +func (t *Table) Get(timeout uint, ml MetadataLevel) error { + if ml == EmptyPayload { + return errEmptyPayload + } + + query := url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + } + headers := t.tsc.client.getStandardHeaders() + headers[headerAccept] = string(ml) + + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), query) + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return err + } + defer resp.Body.Close() + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return err + } + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + err = json.Unmarshal(respBody, t) + if err != nil { + return err + } + return nil +} + +// Create creates the referenced table. +// This function fails if the name is not compliant +// with the specification or the tables already exists. +// ml determines the level of detail of metadata in the operation response, +// or no data at all. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/create-table +func (t *Table) Create(timeout uint, ml MetadataLevel, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, tablesURIPath, url.Values{ + "timeout": {strconv.FormatUint(uint64(timeout), 10)}, + }) + + type createTableRequest struct { + TableName string `json:"TableName"` + } + req := createTableRequest{TableName: t.Name} + buf := new(bytes.Buffer) + if err := json.NewEncoder(buf).Encode(req); err != nil { + return err + } + + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, ml) + headers = addBodyRelatedHeaders(headers, buf.Len()) + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodPost, uri, headers, buf, t.tsc.auth) + if err != nil { + return err + } + defer resp.Body.Close() + + if ml == EmptyPayload { + if err := checkRespCode(resp, []int{http.StatusNoContent}); err != nil { + return err + } + } else { + if err := checkRespCode(resp, []int{http.StatusCreated}); err != nil { + return err + } + } + + if ml != EmptyPayload { + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + err = json.Unmarshal(data, t) + if err != nil { + return err + } + } + + return nil +} + +// Delete deletes the referenced table. +// This function fails if the table is not present. +// Be advised: Delete deletes all the entries that may be present. +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/delete-table +func (t *Table) Delete(timeout uint, options *TableOptions) error { + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildSpecificPath(), url.Values{ + "timeout": {strconv.Itoa(int(timeout))}, + }) + + headers := t.tsc.client.getStandardHeaders() + headers = addReturnContentHeaders(headers, EmptyPayload) + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodDelete, uri, headers, nil, t.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +// QueryOptions includes options for a query entities operation. +// Top, filter and select are OData query options. +type QueryOptions struct { + Top uint + Filter string + Select []string + RequestID string +} + +func (options *QueryOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + if len(options.Select) > 0 { + query.Add(OdataSelect, strings.Join(options.Select, ",")) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryEntities returns the entities in the table. +// You can use query options defined by the OData Protocol specification. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +func (t *Table) QueryEntities(timeout uint, ml MetadataLevel, options *QueryOptions) (*EntityQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + query, headers := options.getParameters() + query = addTimeout(query, timeout) + uri := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), query) + return t.queryEntities(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryEntities or NextResults operation. +// +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-entities +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (eqr *EntityQueryResult) NextResults(options *TableOptions) (*EntityQueryResult, error) { + if eqr == nil { + return nil, errNilPreviousResult + } + if eqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + return eqr.table.queryEntities(*eqr.NextLink, headers, eqr.ml) +} + +// SetPermissions sets up table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/Set-Table-ACL +func (t *Table) SetPermissions(tap []TableAccessPolicy, timeout uint, options *TableOptions) error { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, + } + + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) + + body, length, err := generateTableACLPayload(tap) + if err != nil { + return err + } + headers["Content-Length"] = strconv.Itoa(length) + + resp, err := t.tsc.client.exec(http.MethodPut, uri, headers, body, t.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp) + + return checkRespCode(resp, []int{http.StatusNoContent}) +} + +func generateTableACLPayload(policies []TableAccessPolicy) (io.Reader, int, error) { + sil := SignedIdentifiers{ + SignedIdentifiers: []SignedIdentifier{}, + } + for _, tap := range policies { + permission := generateTablePermissions(&tap) + signedIdentifier := convertAccessPolicyToXMLStructs(tap.ID, tap.StartTime, tap.ExpiryTime, permission) + sil.SignedIdentifiers = append(sil.SignedIdentifiers, signedIdentifier) + } + return xmlMarshal(sil) +} + +// GetPermissions gets the table ACL permissions +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/get-table-acl +func (t *Table) GetPermissions(timeout int, options *TableOptions) ([]TableAccessPolicy, error) { + params := url.Values{"comp": {"acl"}, + "timeout": {strconv.Itoa(int(timeout))}, + } + + uri := t.tsc.client.getEndpoint(tableServiceName, t.Name, params) + headers := t.tsc.client.getStandardHeaders() + headers = options.addToHeaders(headers) + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return nil, err + } + + var ap AccessPolicy + err = xmlUnmarshal(resp.Body, &ap.SignedIdentifiersList) + if err != nil { + return nil, err + } + return updateTableAccessPolicy(ap), nil +} + +func (t *Table) queryEntities(uri string, headers map[string]string, ml MetadataLevel) (*EntityQueryResult, error) { + headers = mergeHeaders(headers, t.tsc.client.getStandardHeaders()) + if ml != EmptyPayload { + headers[headerAccept] = string(ml) + } + + resp, err := t.tsc.client.exec(http.MethodGet, uri, headers, nil, t.tsc.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err = checkRespCode(resp, []int{http.StatusOK}); err != nil { + return nil, err + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var entities EntityQueryResult + err = json.Unmarshal(data, &entities) + if err != nil { + return nil, err + } + + for i := range entities.Entities { + entities.Entities[i].Table = t + } + entities.table = t + + contToken := extractContinuationTokenFromHeaders(resp.Header) + if contToken == nil { + entities.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextPartitionKeyQueryParameter, contToken.NextPartitionKey) + v.Set(nextRowKeyQueryParameter, contToken.NextRowKey) + newURI := t.tsc.client.getEndpoint(tableServiceName, t.buildPath(), v) + entities.NextLink = &newURI + entities.ml = ml + } + + return &entities, nil +} + +func extractContinuationTokenFromHeaders(h http.Header) *continuationToken { + ct := continuationToken{ + NextPartitionKey: h.Get(headerNextPartitionKey), + NextRowKey: h.Get(headerNextRowKey), + } + + if ct.NextPartitionKey != "" && ct.NextRowKey != "" { + return &ct + } + return nil +} + +func updateTableAccessPolicy(ap AccessPolicy) []TableAccessPolicy { + taps := []TableAccessPolicy{} + for _, policy := range ap.SignedIdentifiersList.SignedIdentifiers { + tap := TableAccessPolicy{ + ID: policy.ID, + StartTime: policy.AccessPolicy.StartTime, + ExpiryTime: policy.AccessPolicy.ExpiryTime, + } + tap.CanRead = updatePermissions(policy.AccessPolicy.Permission, "r") + tap.CanAppend = updatePermissions(policy.AccessPolicy.Permission, "a") + tap.CanUpdate = updatePermissions(policy.AccessPolicy.Permission, "u") + tap.CanDelete = updatePermissions(policy.AccessPolicy.Permission, "d") + + taps = append(taps, tap) + } + return taps +} + +func generateTablePermissions(tap *TableAccessPolicy) (permissions string) { + // generate the permissions string (raud). + // still want the end user API to have bool flags. + permissions = "" + + if tap.CanRead { + permissions += "r" + } + + if tap.CanAppend { + permissions += "a" + } + + if tap.CanUpdate { + permissions += "u" + } + + if tap.CanDelete { + permissions += "d" + } + return permissions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go new file mode 100644 index 00000000..5b05e3e2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -0,0 +1,325 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "mime/multipart" + "net/http" + "net/textproto" + "sort" + "strings" +) + +// Operation type. Insert, Delete, Replace etc. +type Operation int + +// consts for batch operations. +const ( + InsertOp = Operation(1) + DeleteOp = Operation(2) + ReplaceOp = Operation(3) + MergeOp = Operation(4) + InsertOrReplaceOp = Operation(5) + InsertOrMergeOp = Operation(6) +) + +// BatchEntity used for tracking Entities to operate on and +// whether operations (replace/merge etc) should be forced. +// Wrapper for regular Entity with additional data specific for the entity. +type BatchEntity struct { + *Entity + Force bool + Op Operation +} + +// TableBatch stores all the entities that will be operated on during a batch process. +// Entities can be inserted, replaced or deleted. +type TableBatch struct { + BatchEntitySlice []BatchEntity + + // reference to table we're operating on. + Table *Table +} + +// defaultChangesetHeaders for changeSets +var defaultChangesetHeaders = map[string]string{ + "Accept": "application/json;odata=minimalmetadata", + "Content-Type": "application/json", + "Prefer": "return-no-content", +} + +// NewBatch return new TableBatch for populating. +func (t *Table) NewBatch() *TableBatch { + return &TableBatch{ + Table: t, + } +} + +// InsertEntity adds an entity in preparation for a batch insert. +func (t *TableBatch) InsertEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntity adds an entity in preparation for a batch insert or replace. +func (t *TableBatch) InsertOrReplaceEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrReplaceEntityByForce adds an entity in preparation for a batch insert or replace. Forces regardless of ETag +func (t *TableBatch) InsertOrReplaceEntityByForce(entity *Entity) { + t.InsertOrReplaceEntity(entity, true) +} + +// InsertOrMergeEntity adds an entity in preparation for a batch insert or merge. +func (t *TableBatch) InsertOrMergeEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: InsertOrMergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// InsertOrMergeEntityByForce adds an entity in preparation for a batch insert or merge. Forces regardless of ETag +func (t *TableBatch) InsertOrMergeEntityByForce(entity *Entity) { + t.InsertOrMergeEntity(entity, true) +} + +// ReplaceEntity adds an entity in preparation for a batch replace. +func (t *TableBatch) ReplaceEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: ReplaceOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntity adds an entity in preparation for a batch delete +func (t *TableBatch) DeleteEntity(entity *Entity, force bool) { + be := BatchEntity{Entity: entity, Force: false, Op: DeleteOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// DeleteEntityByForce adds an entity in preparation for a batch delete. Forces regardless of ETag +func (t *TableBatch) DeleteEntityByForce(entity *Entity, force bool) { + t.DeleteEntity(entity, true) +} + +// MergeEntity adds an entity in preparation for a batch merge +func (t *TableBatch) MergeEntity(entity *Entity) { + be := BatchEntity{Entity: entity, Force: false, Op: MergeOp} + t.BatchEntitySlice = append(t.BatchEntitySlice, be) +} + +// ExecuteBatch executes many table operations in one request to Azure. +// The operations can be combinations of Insert, Delete, Replace and Merge +// Creates the inner changeset body (various operations, Insert, Delete etc) then creates the outer request packet that encompasses +// the changesets. +// As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions +func (t *TableBatch) ExecuteBatch() error { + + id, err := newUUID() + if err != nil { + return err + } + + changesetBoundary := fmt.Sprintf("changeset_%s", id.String()) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) + changesetBody, err := t.generateChangesetBody(changesetBoundary) + if err != nil { + return err + } + + id, err = newUUID() + if err != nil { + return err + } + + boundary := fmt.Sprintf("batch_%s", id.String()) + body, err := generateBody(changesetBody, changesetBoundary, boundary) + if err != nil { + return err + } + + headers := t.Table.tsc.client.getStandardHeaders() + headers[headerContentType] = fmt.Sprintf("multipart/mixed; boundary=%s", boundary) + + resp, err := t.Table.tsc.client.execBatchOperationJSON(http.MethodPost, uri, headers, bytes.NewReader(body.Bytes()), t.Table.tsc.auth) + if err != nil { + return err + } + defer drainRespBody(resp.resp) + + if err = checkRespCode(resp.resp, []int{http.StatusAccepted}); err != nil { + + // check which batch failed. + operationFailedMessage := t.getFailedOperation(resp.odata.Err.Message.Value) + requestID, date, version := getDebugHeaders(resp.resp.Header) + return AzureStorageServiceError{ + StatusCode: resp.resp.StatusCode, + Code: resp.odata.Err.Code, + RequestID: requestID, + Date: date, + APIVersion: version, + Message: operationFailedMessage, + } + } + + return nil +} + +// getFailedOperation parses the original Azure error string and determines which operation failed +// and generates appropriate message. +func (t *TableBatch) getFailedOperation(errorMessage string) string { + // errorMessage consists of "number:string" we just need the number. + sp := strings.Split(errorMessage, ":") + if len(sp) > 1 { + msg := fmt.Sprintf("Element %s in the batch returned an unexpected response code.\n%s", sp[0], errorMessage) + return msg + } + + // cant parse the message, just return the original message to client + return errorMessage +} + +// generateBody generates the complete body for the batch request. +func generateBody(changeSetBody *bytes.Buffer, changesetBoundary string, boundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(boundary) + h := make(textproto.MIMEHeader) + h.Set(headerContentType, fmt.Sprintf("multipart/mixed; boundary=%s\r\n", changesetBoundary)) + batchWriter, err := writer.CreatePart(h) + if err != nil { + return nil, err + } + batchWriter.Write(changeSetBody.Bytes()) + writer.Close() + return body, nil +} + +// generateChangesetBody generates the individual changesets for the various operations within the batch request. +// There is a changeset for Insert, Delete, Merge etc. +func (t *TableBatch) generateChangesetBody(changesetBoundary string) (*bytes.Buffer, error) { + + body := new(bytes.Buffer) + writer := multipart.NewWriter(body) + writer.SetBoundary(changesetBoundary) + + for _, be := range t.BatchEntitySlice { + t.generateEntitySubset(&be, writer) + } + + writer.Close() + return body, nil +} + +// generateVerb generates the HTTP request VERB required for each changeset. +func generateVerb(op Operation) (string, error) { + switch op { + case InsertOp: + return http.MethodPost, nil + case DeleteOp: + return http.MethodDelete, nil + case ReplaceOp, InsertOrReplaceOp: + return http.MethodPut, nil + case MergeOp, InsertOrMergeOp: + return "MERGE", nil + default: + return "", errors.New("Unable to detect operation") + } +} + +// generateQueryPath generates the query path for within the changesets +// For inserts it will just be a table query path (table name) +// but for other operations (modifying an existing entity) then +// the partition/row keys need to be generated. +func (t *TableBatch) generateQueryPath(op Operation, entity *Entity) string { + if op == InsertOp { + return entity.Table.buildPath() + } + return entity.buildPath() +} + +// generateGenericOperationHeaders generates common headers for a given operation. +func generateGenericOperationHeaders(be *BatchEntity) map[string]string { + retval := map[string]string{} + + for k, v := range defaultChangesetHeaders { + retval[k] = v + } + + if be.Op == DeleteOp || be.Op == ReplaceOp || be.Op == MergeOp { + if be.Force || be.Entity.OdataEtag == "" { + retval["If-Match"] = "*" + } else { + retval["If-Match"] = be.Entity.OdataEtag + } + } + + return retval +} + +// generateEntitySubset generates body payload for particular batch entity +func (t *TableBatch) generateEntitySubset(batchEntity *BatchEntity, writer *multipart.Writer) error { + + h := make(textproto.MIMEHeader) + h.Set(headerContentType, "application/http") + h.Set(headerContentTransferEncoding, "binary") + + verb, err := generateVerb(batchEntity.Op) + if err != nil { + return err + } + + genericOpHeadersMap := generateGenericOperationHeaders(batchEntity) + queryPath := t.generateQueryPath(batchEntity.Op, batchEntity.Entity) + uri := t.Table.tsc.client.getEndpoint(tableServiceName, queryPath, nil) + + operationWriter, err := writer.CreatePart(h) + if err != nil { + return err + } + + urlAndVerb := fmt.Sprintf("%s %s HTTP/1.1\r\n", verb, uri) + operationWriter.Write([]byte(urlAndVerb)) + writeHeaders(genericOpHeadersMap, &operationWriter) + operationWriter.Write([]byte("\r\n")) // additional \r\n is needed per changeset separating the "headers" and the body. + + // delete operation doesn't need a body. + if batchEntity.Op != DeleteOp { + //var e Entity = batchEntity.Entity + body, err := json.Marshal(batchEntity.Entity) + if err != nil { + return err + } + operationWriter.Write(body) + } + + return nil +} + +func writeHeaders(h map[string]string, writer *io.Writer) { + // This way it is guaranteed the headers will be written in a sorted order + var keys []string + for k := range h { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + (*writer).Write([]byte(fmt.Sprintf("%s: %s\r\n", k, h[k]))) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go new file mode 100644 index 00000000..1f063a39 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/tableserviceclient.go @@ -0,0 +1,204 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +const ( + headerAccept = "Accept" + headerEtag = "Etag" + headerPrefer = "Prefer" + headerXmsContinuation = "x-ms-Continuation-NextTableName" +) + +// TableServiceClient contains operations for Microsoft Azure Table Storage +// Service. +type TableServiceClient struct { + client Client + auth authentication +} + +// TableOptions includes options for some table operations +type TableOptions struct { + RequestID string +} + +func (options *TableOptions) addToHeaders(h map[string]string) map[string]string { + if options != nil { + h = addToHeaders(h, "x-ms-client-request-id", options.RequestID) + } + return h +} + +// QueryNextLink includes information for getting the next page of +// results in query operations +type QueryNextLink struct { + NextLink *string + ml MetadataLevel +} + +// GetServiceProperties gets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/get-table-service-properties +func (t *TableServiceClient) GetServiceProperties() (*ServiceProperties, error) { + return t.client.getServiceProperties(tableServiceName, t.auth) +} + +// SetServiceProperties sets the properties of your storage account's table service. +// See: https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/set-table-service-properties +func (t *TableServiceClient) SetServiceProperties(props ServiceProperties) error { + return t.client.setServiceProperties(props, tableServiceName, t.auth) +} + +// GetTableReference returns a Table object for the specified table name. +func (t *TableServiceClient) GetTableReference(name string) *Table { + return &Table{ + tsc: t, + Name: name, + } +} + +// QueryTablesOptions includes options for some table operations +type QueryTablesOptions struct { + Top uint + Filter string + RequestID string +} + +func (options *QueryTablesOptions) getParameters() (url.Values, map[string]string) { + query := url.Values{} + headers := map[string]string{} + if options != nil { + if options.Top > 0 { + query.Add(OdataTop, strconv.FormatUint(uint64(options.Top), 10)) + } + if options.Filter != "" { + query.Add(OdataFilter, options.Filter) + } + headers = addToHeaders(headers, "x-ms-client-request-id", options.RequestID) + } + return query, headers +} + +// QueryTables returns the tables in the storage account. +// You can use query options defined by the OData Protocol specification. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/query-tables +func (t *TableServiceClient) QueryTables(ml MetadataLevel, options *QueryTablesOptions) (*TableQueryResult, error) { + query, headers := options.getParameters() + uri := t.client.getEndpoint(tableServiceName, tablesURIPath, query) + return t.queryTables(uri, headers, ml) +} + +// NextResults returns the next page of results +// from a QueryTables or a NextResults operation. +// +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-tables +// See https://docs.microsoft.com/rest/api/storageservices/fileservices/query-timeout-and-pagination +func (tqr *TableQueryResult) NextResults(options *TableOptions) (*TableQueryResult, error) { + if tqr == nil { + return nil, errNilPreviousResult + } + if tqr.NextLink == nil { + return nil, errNilNextLink + } + headers := options.addToHeaders(map[string]string{}) + + return tqr.tsc.queryTables(*tqr.NextLink, headers, tqr.ml) +} + +// TableQueryResult contains the response from +// QueryTables and QueryTablesNextResults functions. +type TableQueryResult struct { + OdataMetadata string `json:"odata.metadata"` + Tables []Table `json:"value"` + QueryNextLink + tsc *TableServiceClient +} + +func (t *TableServiceClient) queryTables(uri string, headers map[string]string, ml MetadataLevel) (*TableQueryResult, error) { + if ml == EmptyPayload { + return nil, errEmptyPayload + } + headers = mergeHeaders(headers, t.client.getStandardHeaders()) + headers[headerAccept] = string(ml) + + resp, err := t.client.exec(http.MethodGet, uri, headers, nil, t.auth) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := checkRespCode(resp, []int{http.StatusOK}); err != nil { + return nil, err + } + + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var out TableQueryResult + err = json.Unmarshal(respBody, &out) + if err != nil { + return nil, err + } + + for i := range out.Tables { + out.Tables[i].tsc = t + } + out.tsc = t + + nextLink := resp.Header.Get(http.CanonicalHeaderKey(headerXmsContinuation)) + if nextLink == "" { + out.NextLink = nil + } else { + originalURI, err := url.Parse(uri) + if err != nil { + return nil, err + } + v := originalURI.Query() + v.Set(nextTableQueryParameter, nextLink) + newURI := t.client.getEndpoint(tableServiceName, tablesURIPath, v) + out.NextLink = &newURI + out.ml = ml + } + + return &out, nil +} + +func addBodyRelatedHeaders(h map[string]string, length int) map[string]string { + h[headerContentType] = "application/json" + h[headerContentLength] = fmt.Sprintf("%v", length) + h[headerAcceptCharset] = "UTF-8" + return h +} + +func addReturnContentHeaders(h map[string]string, ml MetadataLevel) map[string]string { + if ml != EmptyPayload { + h[headerPrefer] = "return-content" + h[headerAccept] = string(ml) + } else { + h[headerPrefer] = "return-no-content" + // From API version 2015-12-11 onwards, Accept header is required + h[headerAccept] = string(NoMetadata) + } + return h +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go new file mode 100644 index 00000000..67739479 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -0,0 +1,260 @@ +package storage + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + + uuid "github.com/satori/go.uuid" +) + +var ( + fixedTime = time.Date(2050, time.December, 20, 21, 55, 0, 0, time.FixedZone("GMT", -6)) + accountSASOptions = AccountSASTokenOptions{ + Services: Services{ + Blob: true, + }, + ResourceTypes: ResourceTypes{ + Service: true, + Container: true, + Object: true, + }, + Permissions: Permissions{ + Read: true, + Write: true, + Delete: true, + List: true, + Add: true, + Create: true, + Update: true, + Process: true, + }, + Expiry: fixedTime, + UseHTTPS: true, + } +) + +func (c Client) computeHmac256(message string) string { + h := hmac.New(sha256.New, c.accountKey) + h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func currentTimeRfc1123Formatted() string { + return timeRfc1123Formatted(time.Now().UTC()) +} + +func timeRfc1123Formatted(t time.Time) string { + return t.Format(http.TimeFormat) +} + +func timeRFC3339Formatted(t time.Time) string { + return t.Format("2006-01-02T15:04:05.0000000Z") +} + +func mergeParams(v1, v2 url.Values) url.Values { + out := url.Values{} + for k, v := range v1 { + out[k] = v + } + for k, v := range v2 { + vals, ok := out[k] + if ok { + vals = append(vals, v...) + out[k] = vals + } else { + out[k] = v + } + } + return out +} + +func prepareBlockListRequest(blocks []Block) string { + s := `` + for _, v := range blocks { + s += fmt.Sprintf("<%s>%s", v.Status, v.ID, v.Status) + } + s += `` + return s +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func xmlMarshal(v interface{}) (io.Reader, int, error) { + b, err := xml.Marshal(v) + if err != nil { + return nil, 0, err + } + return bytes.NewReader(b), len(b), nil +} + +func headersFromStruct(v interface{}) map[string]string { + headers := make(map[string]string) + value := reflect.ValueOf(v) + for i := 0; i < value.NumField(); i++ { + key := value.Type().Field(i).Tag.Get("header") + if key != "" { + reflectedValue := reflect.Indirect(value.Field(i)) + var val string + if reflectedValue.IsValid() { + switch reflectedValue.Type() { + case reflect.TypeOf(fixedTime): + val = timeRfc1123Formatted(reflectedValue.Interface().(time.Time)) + case reflect.TypeOf(uint64(0)), reflect.TypeOf(uint(0)): + val = strconv.FormatUint(reflectedValue.Uint(), 10) + case reflect.TypeOf(int(0)): + val = strconv.FormatInt(reflectedValue.Int(), 10) + default: + val = reflectedValue.String() + } + } + if val != "" { + headers[key] = val + } + } + } + return headers +} + +// merges extraHeaders into headers and returns headers +func mergeHeaders(headers, extraHeaders map[string]string) map[string]string { + for k, v := range extraHeaders { + headers[k] = v + } + return headers +} + +func addToHeaders(h map[string]string, key, value string) map[string]string { + if value != "" { + h[key] = value + } + return h +} + +func addTimeToHeaders(h map[string]string, key string, value *time.Time) map[string]string { + if value != nil { + h = addToHeaders(h, key, timeRfc1123Formatted(*value)) + } + return h +} + +func addTimeout(params url.Values, timeout uint) url.Values { + if timeout > 0 { + params.Add("timeout", fmt.Sprintf("%v", timeout)) + } + return params +} + +func addSnapshot(params url.Values, snapshot *time.Time) url.Values { + if snapshot != nil { + params.Add("snapshot", timeRFC3339Formatted(*snapshot)) + } + return params +} + +func getTimeFromHeaders(h http.Header, key string) (*time.Time, error) { + var out time.Time + var err error + outStr := h.Get(key) + if outStr != "" { + out, err = time.Parse(time.RFC1123, outStr) + if err != nil { + return nil, err + } + } + return &out, nil +} + +// TimeRFC1123 is an alias for time.Time needed for custom Unmarshalling +type TimeRFC1123 time.Time + +// UnmarshalXML is a custom unmarshaller that overrides the default time unmarshal which uses a different time layout. +func (t *TimeRFC1123) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var value string + d.DecodeElement(&value, &start) + parse, err := time.Parse(time.RFC1123, value) + if err != nil { + return err + } + *t = TimeRFC1123(parse) + return nil +} + +// MarshalXML marshals using time.RFC1123. +func (t *TimeRFC1123) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(time.Time(*t).Format(time.RFC1123), start) +} + +// returns a map of custom metadata values from the specified HTTP header +func getMetadataFromHeaders(header http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range header { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + + if len(metadata) == 0 { + return nil + } + + return metadata +} + +// newUUID returns a new uuid using RFC 4122 algorithm. +func newUUID() (uuid.UUID, error) { + u := [16]byte{} + // Set all bits to randomly (or pseudo-randomly) chosen values. + _, err := rand.Read(u[:]) + if err != nil { + return uuid.UUID{}, err + } + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) // u.setVariant(ReservedRFC4122) + u[6] = (u[6] & 0xF) | (uuid.V4 << 4) // u.setVersion(V4) + return uuid.FromBytes(u[:]) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go new file mode 100644 index 00000000..d8c3113d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -0,0 +1,21 @@ +package version + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Number contains the semantic version of this SDK. +const Number = "v31.1.0" diff --git a/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 00000000..b9d6a27e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 00000000..7b0c4bc4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 00000000..fa596474 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 00000000..b38f4c24 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,242 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 00000000..5c95dbfe --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/date v0.1.0 + github.com/Azure/go-autorest/autorest/mocks v0.1.0 + github.com/Azure/go-autorest/tracing v0.1.0 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 00000000..ef9d1016 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,139 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 00000000..9e15f275 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,73 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 00000000..834401e0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,60 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 00000000..4083e76e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1055 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/tracing" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = &http.Client{Transport: tracing.Transport} + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: &http.Client{Transport: tracing.Transport}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + v.Set("api-version", "2018-02-01") + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{}, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: &http.Client{Transport: tracing.Transport}, + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + if userAssignedID != nil { + spt.inner.ClientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + imds, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return u.Host == imds.Host && u.Path == imds.Path +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + req = req.WithContext(ctx) + if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + req.Header.Set(metadataHeader, "true") + } + + var resp *http.Response + if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.inner.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + for attempt < maxAttempts { + resp, err = sender.Do(req) + // retry on temporary network errors, e.g. transient network failures. + // if we don't receive a response then assume we can't connect to the + // endpoint so we're likely not running on an Azure VM so don't retry. + if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +// returns true if the specified error is a temporary network error or false if it's not. +// if the error doesn't implement the net.Error interface the return value is true. +func isTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// returns true if slice ints contains the value n +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 00000000..c867b348 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 00000000..380865cd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,336 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/tracing" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{Transport: tracing.Transport} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return &multiTenantSPTAuthorizer{tp: tp} +} + +type multiTenantSPTAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 00000000..aafdf021 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 00000000..02d01196 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,919 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + return sender.Do(req) +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 00000000..3a0a439f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,326 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) + return nil + } + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner + se.AdditionalInfo = additional +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { + return err + } + } + if e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body as raw JSON in hopes of getting something. + rawBody := map[string]interface{}{} + if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { + return err + } + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod new file mode 100644 index 00000000..a147222b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod @@ -0,0 +1,10 @@ +module github.com/Azure/go-autorest/autorest/azure/cli + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.1.0 + github.com/Azure/go-autorest/autorest/date v0.1.0 + github.com/dimchansky/utfbom v1.1.0 + github.com/mitchellh/go-homedir v1.1.0 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum new file mode 100644 index 00000000..1d098cff --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum @@ -0,0 +1,144 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest/adal v0.1.0 h1:RSw/7EAullliqwkZvgIGDYZWQm1PGKXI8c4aY/87yuU= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 00000000..a336b958 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,79 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` + User *User `json:"user"` +} + +// User represents a User from the Azure CLI +type User struct { + Name string `json:"name"` + Type string `json:"type"` +} + +const azureProfileJSON = "azureProfile.json" + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" { + return filepath.Join(cfgDir, azureProfileJSON), nil + } + return homedir.Expand("~/.azure/" + azureProfileJSON) +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 00000000..810075ba --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,170 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +// TODO(#199): add unit test. +func AccessTokensPath() (string, error) { + // Azure-CLI allows user to customize the path of access tokens thorugh environment variable. + var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE") + var err error + + // Fallback logic to default path on non-cloud-shell environment. + // TODO(#200): remove the dependency on hard-coding path. + if accessTokenPath == "" { + accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json") + } + + return accessTokenPath, err +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} + +// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLI(resource string) (*Token, error) { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" + + // Validate resource, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) + } + + // Execute Azure CLI to get token + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) + cliCmd.Args = append(cliCmd.Args, "/c", "az") + } else { + cliCmd = exec.Command("az") + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) + } + cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource) + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) + if err != nil { + return nil, err + } + + return &tokenResponse, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 00000000..6c20b817 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,244 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 00000000..507f9e95 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 00000000..86ce9f2b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,200 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 00000000..92da6adb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,324 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/tracing" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + var defaultTransport = http.DefaultTransport.(*http.Transport) + transport := tracing.Transport + // for non-default values of TLS renegotiation create a new tracing transport. + // updating tracing.Transport affects all clients which is not what we want. + if renengotiation != tls.RenegotiateNever { + transport = tracing.NewTransport() + } + transport.Base = &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j, Transport: transport} + } + + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 00000000..c4571065 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 00000000..13a1e980 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 00000000..b453fad0 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 00000000..48fb39ba --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 00000000..7073959b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 00000000..12addf0e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 00000000..f724f333 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 00000000..44a76708 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.1.0 + github.com/Azure/go-autorest/autorest/mocks v0.1.0 + github.com/Azure/go-autorest/logger v0.1.0 + github.com/Azure/go-autorest/tracing v0.1.0 + go.opencensus.io v0.20.2 + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 00000000..a5908324 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,143 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest/adal v0.1.0 h1:RSw/7EAullliqwkZvgIGDYZWQm1PGKXI8c4aY/87yuU= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 00000000..48914022 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,526 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + + v := r.URL.Query() + for key, value := range parameters { + d, err := url.QueryUnescape(value) + if err != nil { + return r, err + } + v.Add(key, d) + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 00000000..349e1963 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 00000000..fa11dbed --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 00000000..7143cc61 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 00000000..ae15c6bf --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 00000000..b918833f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,382 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "fmt" + "log" + "math" + "net/http" + "strconv" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt := 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + resp, err = s.Do(rr.Request()) + // if the error isn't temporary don't bother retrying + if err != nil && !IsTemporaryNetworkError(err) { + return + } + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 00000000..86694bd2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,152 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} + +// ByteSlicePtr returns a pointer to the passed byte slice. +func ByteSlicePtr(b []byte) *[]byte { + return &b +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod new file mode 100644 index 00000000..a2054be3 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/autorest/to + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 00000000..08cf11c1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,228 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go new file mode 100644 index 00000000..fed156db --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go @@ -0,0 +1,48 @@ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" +) + +// Error is the type that's returned when the validation of an APIs arguments constraints fails. +type Error struct { + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // Message is the error message. + Message string +} + +// Error returns a string containing the details of the validation failure. +func (e Error) Error() string { + return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) +} + +// NewError creates a new Error object with the specified parameters. +// message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) Error { + return Error{ + PackageType: packageType, + Method: method, + Message: fmt.Sprintf(message, args...), + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod new file mode 100644 index 00000000..c44c51ff --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/validation + +go 1.12 + +require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum new file mode 100644 index 00000000..4347755a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum @@ -0,0 +1,7 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go new file mode 100644 index 00000000..65899b69 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -0,0 +1,400 @@ +/* +Package validation provides methods for validating parameter value using reflection. +*/ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type Constraint struct { + + // Target field name for validation. + Target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + Name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + Rule interface{} + + // Chain Validations for struct type + Chain []Constraint +} + +// Validation stores parameter-wise validation. +type Validation struct { + TargetValue interface{} + Constraints []Constraint +} + +// Constraint list +const ( + Empty = "Empty" + Null = "Null" + ReadOnly = "ReadOnly" + Pattern = "Pattern" + MaxLength = "MaxLength" + MinLength = "MinLength" + MaxItems = "MaxItems" + MinItems = "MinItems" + MultipleOf = "MultipleOf" + UniqueItems = "UniqueItems" + InclusiveMaximum = "InclusiveMaximum" + ExclusiveMaximum = "ExclusiveMaximum" + ExclusiveMinimum = "ExclusiveMinimum" + InclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func Validate(m []Validation) error { + for _, item := range m { + v := reflect.ValueOf(item.TargetValue) + for _, constraint := range item.Constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v Constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.Target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) + } + + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(f), + Constraints: []Constraint{v}, + }, + }) +} + +func validatePtr(x reflect.Value, v Constraint) error { + if v.Name == ReadOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x.Elem()), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v Constraint) error { + i := x.Int() + r, ok := toInt64(v.Rule) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case MultipleOf: + if i%r != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case ExclusiveMinimum: + if i <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if i >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if i < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if i > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) + } + return nil +} + +func validateFloat(x reflect.Value, v Constraint) error { + f := x.Float() + r, ok := v.Rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case ExclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) + } + return nil +} + +func validateString(x reflect.Value, v Constraint) error { + s := x.String() + switch v.Name { + case Empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) + } + case MaxLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) + } + case MinLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) + } + case ReadOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v Constraint) error { + switch v.Name { + case Null: + if x.IsNil() { + return checkNil(x, v) + } + case Empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case MaxItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) + } + case MinItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) + } + case UniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) + } + case ReadOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + if v.Rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + + if v.Rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v Constraint, err string) error { + return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.Target, v.Name, getInterfaceValue(x), err) +} + +func toInt64(v interface{}) (int64, bool) { + if i64, ok := v.(int64); ok { + return i64, true + } + // older generators emit max constants as int, so if int64 fails fall back to int + if i32, ok := v.(int); ok { + return int64(i32), true + } + return 0, false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 00000000..ec7e8481 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v12.3.0" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 00000000..f22ed56b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 00000000..da09f394 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 00000000..b066b3e6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,8 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 + +require ( + contrib.go.opencensus.io/exporter/ocagent v0.4.12 + go.opencensus.io v0.20.2 +) diff --git a/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 00000000..4e5548c9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,130 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 00000000..28951c28 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,195 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "fmt" + "net/http" + "os" + + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/plugin/ochttp/propagation/tracecontext" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" +) + +var ( + // Transport is the default tracing RoundTripper. The custom options setter will control + // if traces are being emitted or not. + Transport = NewTransport() + + // enabled is the flag for marking if tracing is enabled. + enabled = false + + // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise + // it will be using the parent sampler or the default. + sampler = trace.NeverSample() + + // Views for metric instrumentation. + views = map[string]*view.View{} + + // the trace exporter + traceExporter trace.Exporter +) + +func init() { + enableFromEnv() +} + +func enableFromEnv() { + _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED") + _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD") + if ok || legacyOk { + agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT") + + if ok { + EnableWithAIForwarding(agentEndpoint) + } else { + Enable() + } + } +} + +// NewTransport returns a new instance of a tracing-aware RoundTripper. +func NewTransport() *ochttp.Transport { + return &ochttp.Transport{ + Propagation: &tracecontext.HTTPFormat{}, + GetStartOptions: getStartOptions, + } +} + +// IsEnabled returns true if monitoring is enabled for the sdk. +func IsEnabled() bool { + return enabled +} + +// Enable will start instrumentation for metrics and traces. +func Enable() error { + enabled = true + sampler = nil + + err := initStats() + return err +} + +// Disable will disable instrumentation for metrics and traces. +func Disable() { + disableStats() + sampler = trace.NeverSample() + if traceExporter != nil { + trace.UnregisterExporter(traceExporter) + } + enabled = false +} + +// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder +// exporter making the metrics and traces available in app insights. +func EnableWithAIForwarding(agentEndpoint string) (err error) { + err = Enable() + if err != nil { + return err + } + + traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint)) + if err != nil { + return err + } + trace.RegisterExporter(traceExporter) + return +} + +// getStartOptions is the custom options setter for the ochttp package. +func getStartOptions(*http.Request) trace.StartOptions { + return trace.StartOptions{ + Sampler: sampler, + } +} + +// initStats registers the views for the http metrics +func initStats() (err error) { + clientViews := []*view.View{ + ochttp.ClientCompletedCount, + ochttp.ClientRoundtripLatencyDistribution, + ochttp.ClientReceivedBytesDistribution, + ochttp.ClientSentBytesDistribution, + } + for _, cv := range clientViews { + vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name) + views[vn] = cv.WithName(vn) + err = view.Register(views[vn]) + if err != nil { + return err + } + } + return +} + +// disableStats will unregister the previously registered metrics +func disableStats() { + for _, v := range views { + view.Unregister(v) + } +} + +// StartSpan starts a trace span +func StartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler)) + return ctx +} + +// EndSpan ends a previously started span stored in the context +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + span := trace.FromContext(ctx) + + if span == nil { + return + } + + if err != nil { + span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)}) + } + span.End() +} + +// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined +// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status +func toTraceStatusCode(httpStatusCode int) int32 { + switch { + case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest: + return trace.StatusCodeOK + case httpStatusCode == http.StatusBadRequest: + return trace.StatusCodeInvalidArgument + case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated. + return trace.StatusCodeUnauthenticated + case httpStatusCode == http.StatusForbidden: + return trace.StatusCodePermissionDenied + case httpStatusCode == http.StatusNotFound: + return trace.StatusCodeNotFound + case httpStatusCode == http.StatusTooManyRequests: + return trace.StatusCodeResourceExhausted + case httpStatusCode == 499: + return trace.StatusCodeCancelled + case httpStatusCode == http.StatusNotImplemented: + return trace.StatusCodeUnimplemented + case httpStatusCode == http.StatusServiceUnavailable: + return trace.StatusCodeUnavailable + case httpStatusCode == http.StatusGatewayTimeout: + return trace.StatusCodeDeadlineExceeded + default: + return trace.StatusCodeUnknown + } +} diff --git a/vendor/github.com/Azure/go-ntlmssp/LICENSE b/vendor/github.com/Azure/go-ntlmssp/LICENSE new file mode 100644 index 00000000..dc1cf39d --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/Azure/go-ntlmssp/README.md b/vendor/github.com/Azure/go-ntlmssp/README.md new file mode 100644 index 00000000..55cdcefa --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/README.md @@ -0,0 +1,29 @@ +# go-ntlmssp +Golang package that provides NTLM/Negotiate authentication over HTTP + +[![GoDoc](https://godoc.org/github.com/Azure/go-ntlmssp?status.svg)](https://godoc.org/github.com/Azure/go-ntlmssp) [![Build Status](https://travis-ci.org/Azure/go-ntlmssp.svg?branch=dev)](https://travis-ci.org/Azure/go-ntlmssp) + +Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx +Implementation hints from http://davenport.sourceforge.net/ntlm.html + +This package only implements authentication, no key exchange or encryption. It +only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. +This package implements NTLMv2. + +# Usage + +``` +url, user, password := "http://www.example.com/secrets", "robpike", "pw123" +client := &http.Client{ + Transport: ntlmssp.Negotiator{ + RoundTripper:&http.Transport{}, + }, +} + +req, _ := http.NewRequest("GET", url, nil) +req.SetBasicAuth(user, password) +res, _ := client.Do(req) +``` + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go new file mode 100644 index 00000000..c6fbe444 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/authenticate_message.go @@ -0,0 +1,128 @@ +package ntlmssp + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "errors" + "time" +) + +type authenicateMessage struct { + LmChallengeResponse []byte + NtChallengeResponse []byte + + TargetName string + UserName string + + // only set if negotiateFlag_NTLMSSP_NEGOTIATE_KEY_EXCH + EncryptedRandomSessionKey []byte + + NegotiateFlags negotiateFlags + + MIC []byte +} + +type authenticateMessageFields struct { + messageHeader + LmChallengeResponse varField + NtChallengeResponse varField + TargetName varField + UserName varField + Workstation varField + _ [8]byte + NegotiateFlags negotiateFlags +} + +func (m authenicateMessage) MarshalBinary() ([]byte, error) { + if !m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE) { + return nil, errors.New("Only unicode is supported") + } + + target, user := toUnicode(m.TargetName), toUnicode(m.UserName) + workstation := toUnicode("go-ntlmssp") + + ptr := binary.Size(&authenticateMessageFields{}) + f := authenticateMessageFields{ + messageHeader: newMessageHeader(3), + NegotiateFlags: m.NegotiateFlags, + LmChallengeResponse: newVarField(&ptr, len(m.LmChallengeResponse)), + NtChallengeResponse: newVarField(&ptr, len(m.NtChallengeResponse)), + TargetName: newVarField(&ptr, len(target)), + UserName: newVarField(&ptr, len(user)), + Workstation: newVarField(&ptr, len(workstation)), + } + + f.NegotiateFlags.Unset(negotiateFlagNTLMSSPNEGOTIATEVERSION) + + b := bytes.Buffer{} + if err := binary.Write(&b, binary.LittleEndian, &f); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &m.LmChallengeResponse); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &m.NtChallengeResponse); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &target); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &user); err != nil { + return nil, err + } + if err := binary.Write(&b, binary.LittleEndian, &workstation); err != nil { + return nil, err + } + + return b.Bytes(), nil +} + +//ProcessChallenge crafts an AUTHENTICATE message in response to the CHALLENGE message +//that was received from the server +func ProcessChallenge(challengeMessageData []byte, user, password string) ([]byte, error) { + if user == "" && password == "" { + return nil, errors.New("Anonymous authentication not supported") + } + + var cm challengeMessage + if err := cm.UnmarshalBinary(challengeMessageData); err != nil { + return nil, err + } + + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATELMKEY) { + return nil, errors.New("Only NTLM v2 is supported, but server requested v1 (NTLMSSP_NEGOTIATE_LM_KEY)") + } + if cm.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEKEYEXCH) { + return nil, errors.New("Key exchange requested but not supported (NTLMSSP_NEGOTIATE_KEY_EXCH)") + } + + am := authenicateMessage{ + UserName: user, + TargetName: cm.TargetName, + NegotiateFlags: cm.NegotiateFlags, + } + + timestamp := cm.TargetInfo[avIDMsvAvTimestamp] + if timestamp == nil { // no time sent, take current time + ft := uint64(time.Now().UnixNano()) / 100 + ft += 116444736000000000 // add time between unix & windows offset + timestamp = make([]byte, 8) + binary.LittleEndian.PutUint64(timestamp, ft) + } + + clientChallenge := make([]byte, 8) + rand.Reader.Read(clientChallenge) + + ntlmV2Hash := getNtlmV2Hash(password, user, cm.TargetName) + + am.NtChallengeResponse = computeNtlmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge, timestamp, cm.TargetInfoRaw) + + if cm.TargetInfoRaw == nil { + am.LmChallengeResponse = computeLmV2Response(ntlmV2Hash, + cm.ServerChallenge[:], clientChallenge) + } + + return am.MarshalBinary() +} diff --git a/vendor/github.com/Azure/go-ntlmssp/authheader.go b/vendor/github.com/Azure/go-ntlmssp/authheader.go new file mode 100644 index 00000000..aac3f77d --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/authheader.go @@ -0,0 +1,37 @@ +package ntlmssp + +import ( + "encoding/base64" + "strings" +) + +type authheader string + +func (h authheader) IsBasic() bool { + return strings.HasPrefix(string(h), "Basic ") +} + +func (h authheader) IsNegotiate() bool { + return strings.HasPrefix(string(h), "Negotiate") +} + +func (h authheader) IsNTLM() bool { + return strings.HasPrefix(string(h), "NTLM") +} + +func (h authheader) GetData() ([]byte, error) { + p := strings.Split(string(h), " ") + if len(p) < 2 { + return nil, nil + } + return base64.StdEncoding.DecodeString(string(p[1])) +} + +func (h authheader) GetBasicCreds() (username, password string, err error) { + d, err := h.GetData() + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(d), ":", 2) + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/avids.go b/vendor/github.com/Azure/go-ntlmssp/avids.go new file mode 100644 index 00000000..196b5f13 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/avids.go @@ -0,0 +1,17 @@ +package ntlmssp + +type avID uint16 + +const ( + avIDMsvAvEOL avID = iota + avIDMsvAvNbComputerName + avIDMsvAvNbDomainName + avIDMsvAvDNSComputerName + avIDMsvAvDNSDomainName + avIDMsvAvDNSTreeName + avIDMsvAvFlags + avIDMsvAvTimestamp + avIDMsvAvSingleHost + avIDMsvAvTargetName + avIDMsvChannelBindings +) diff --git a/vendor/github.com/Azure/go-ntlmssp/challenge_message.go b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go new file mode 100644 index 00000000..053b55e4 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/challenge_message.go @@ -0,0 +1,82 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "fmt" +) + +type challengeMessageFields struct { + messageHeader + TargetName varField + NegotiateFlags negotiateFlags + ServerChallenge [8]byte + _ [8]byte + TargetInfo varField +} + +func (m challengeMessageFields) IsValid() bool { + return m.messageHeader.IsValid() && m.MessageType == 2 +} + +type challengeMessage struct { + challengeMessageFields + TargetName string + TargetInfo map[avID][]byte + TargetInfoRaw []byte +} + +func (m *challengeMessage) UnmarshalBinary(data []byte) error { + r := bytes.NewReader(data) + err := binary.Read(r, binary.LittleEndian, &m.challengeMessageFields) + if err != nil { + return err + } + if !m.challengeMessageFields.IsValid() { + return fmt.Errorf("Message is not a valid challenge message: %+v", m.challengeMessageFields.messageHeader) + } + + if m.challengeMessageFields.TargetName.Len > 0 { + m.TargetName, err = m.challengeMessageFields.TargetName.ReadStringFrom(data, m.NegotiateFlags.Has(negotiateFlagNTLMSSPNEGOTIATEUNICODE)) + if err != nil { + return err + } + } + + if m.challengeMessageFields.TargetInfo.Len > 0 { + d, err := m.challengeMessageFields.TargetInfo.ReadFrom(data) + m.TargetInfoRaw = d + if err != nil { + return err + } + m.TargetInfo = make(map[avID][]byte) + r := bytes.NewReader(d) + for { + var id avID + var l uint16 + err = binary.Read(r, binary.LittleEndian, &id) + if err != nil { + return err + } + if id == avIDMsvAvEOL { + break + } + + err = binary.Read(r, binary.LittleEndian, &l) + if err != nil { + return err + } + value := make([]byte, l) + n, err := r.Read(value) + if err != nil { + return err + } + if n != int(l) { + return fmt.Errorf("Expected to read %d bytes, got only %d", l, n) + } + m.TargetInfo[id] = value + } + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/messageheader.go b/vendor/github.com/Azure/go-ntlmssp/messageheader.go new file mode 100644 index 00000000..247e2846 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/messageheader.go @@ -0,0 +1,21 @@ +package ntlmssp + +import ( + "bytes" +) + +var signature = [8]byte{'N', 'T', 'L', 'M', 'S', 'S', 'P', 0} + +type messageHeader struct { + Signature [8]byte + MessageType uint32 +} + +func (h messageHeader) IsValid() bool { + return bytes.Equal(h.Signature[:], signature[:]) && + h.MessageType > 0 && h.MessageType < 4 +} + +func newMessageHeader(messageType uint32) messageHeader { + return messageHeader{signature, messageType} +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go new file mode 100644 index 00000000..5905c023 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_flags.go @@ -0,0 +1,52 @@ +package ntlmssp + +type negotiateFlags uint32 + +const ( + /*A*/ negotiateFlagNTLMSSPNEGOTIATEUNICODE negotiateFlags = 1 << 0 + /*B*/ negotiateFlagNTLMNEGOTIATEOEM = 1 << 1 + /*C*/ negotiateFlagNTLMSSPREQUESTTARGET = 1 << 2 + + /*D*/ + negotiateFlagNTLMSSPNEGOTIATESIGN = 1 << 4 + /*E*/ negotiateFlagNTLMSSPNEGOTIATESEAL = 1 << 5 + /*F*/ negotiateFlagNTLMSSPNEGOTIATEDATAGRAM = 1 << 6 + /*G*/ negotiateFlagNTLMSSPNEGOTIATELMKEY = 1 << 7 + + /*H*/ + negotiateFlagNTLMSSPNEGOTIATENTLM = 1 << 9 + + /*J*/ + negotiateFlagANONYMOUS = 1 << 11 + /*K*/ negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED = 1 << 12 + /*L*/ negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED = 1 << 13 + + /*M*/ + negotiateFlagNTLMSSPNEGOTIATEALWAYSSIGN = 1 << 15 + /*N*/ negotiateFlagNTLMSSPTARGETTYPEDOMAIN = 1 << 16 + /*O*/ negotiateFlagNTLMSSPTARGETTYPESERVER = 1 << 17 + + /*P*/ + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY = 1 << 19 + /*Q*/ negotiateFlagNTLMSSPNEGOTIATEIDENTIFY = 1 << 20 + + /*R*/ + negotiateFlagNTLMSSPREQUESTNONNTSESSIONKEY = 1 << 22 + /*S*/ negotiateFlagNTLMSSPNEGOTIATETARGETINFO = 1 << 23 + + /*T*/ + negotiateFlagNTLMSSPNEGOTIATEVERSION = 1 << 25 + + /*U*/ + negotiateFlagNTLMSSPNEGOTIATE128 = 1 << 29 + /*V*/ negotiateFlagNTLMSSPNEGOTIATEKEYEXCH = 1 << 30 + /*W*/ negotiateFlagNTLMSSPNEGOTIATE56 = 1 << 31 +) + +func (field negotiateFlags) Has(flags negotiateFlags) bool { + return field&flags == flags +} + +func (field *negotiateFlags) Unset(flags negotiateFlags) { + *field = *field ^ (*field & flags) +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go new file mode 100644 index 00000000..e466a986 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiate_message.go @@ -0,0 +1,64 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "errors" + "strings" +) + +const expMsgBodyLen = 40 + +type negotiateMessageFields struct { + messageHeader + NegotiateFlags negotiateFlags + + Domain varField + Workstation varField + + Version +} + +var defaultFlags = negotiateFlagNTLMSSPNEGOTIATETARGETINFO | + negotiateFlagNTLMSSPNEGOTIATE56 | + negotiateFlagNTLMSSPNEGOTIATE128 | + negotiateFlagNTLMSSPNEGOTIATEUNICODE | + negotiateFlagNTLMSSPNEGOTIATEEXTENDEDSESSIONSECURITY + +//NewNegotiateMessage creates a new NEGOTIATE message with the +//flags that this package supports. +func NewNegotiateMessage(domainName, workstationName string) ([]byte, error) { + payloadOffset := expMsgBodyLen + flags := defaultFlags + + if domainName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMDOMAINSUPPLIED + } + + if workstationName != "" { + flags |= negotiateFlagNTLMSSPNEGOTIATEOEMWORKSTATIONSUPPLIED + } + + msg := negotiateMessageFields{ + messageHeader: newMessageHeader(1), + NegotiateFlags: flags, + Domain: newVarField(&payloadOffset, len(domainName)), + Workstation: newVarField(&payloadOffset, len(workstationName)), + Version: DefaultVersion(), + } + + b := bytes.Buffer{} + if err := binary.Write(&b, binary.LittleEndian, &msg); err != nil { + return nil, err + } + if b.Len() != expMsgBodyLen { + return nil, errors.New("incorrect body length") + } + + payload := strings.ToUpper(domainName + workstationName) + if _, err := b.WriteString(payload); err != nil { + return nil, err + } + + return b.Bytes(), nil +} diff --git a/vendor/github.com/Azure/go-ntlmssp/negotiator.go b/vendor/github.com/Azure/go-ntlmssp/negotiator.go new file mode 100644 index 00000000..6e304544 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/negotiator.go @@ -0,0 +1,144 @@ +package ntlmssp + +import ( + "bytes" + "encoding/base64" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// GetDomain : parse domain name from based on slashes in the input +func GetDomain(user string) (string, string) { + domain := "" + + if strings.Contains(user, "\\") { + ucomponents := strings.SplitN(user, "\\", 2) + domain = ucomponents[0] + user = ucomponents[1] + } + return user, domain +} + +//Negotiator is a http.Roundtripper decorator that automatically +//converts basic authentication to NTLM/Negotiate authentication when appropriate. +type Negotiator struct{ http.RoundTripper } + +//RoundTrip sends the request to the server, handling any authentication +//re-sends as needed. +func (l Negotiator) RoundTrip(req *http.Request) (res *http.Response, err error) { + // Use default round tripper if not provided + rt := l.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + // If it is not basic auth, just round trip the request as usual + reqauth := authheader(req.Header.Get("Authorization")) + if !reqauth.IsBasic() { + return rt.RoundTrip(req) + } + // Save request body + body := bytes.Buffer{} + if req.Body != nil { + _, err = body.ReadFrom(req.Body) + if err != nil { + return nil, err + } + + req.Body.Close() + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + } + // first try anonymous, in case the server still finds us + // authenticated from previous traffic + req.Header.Del("Authorization") + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusUnauthorized { + return res, err + } + + resauth := authheader(res.Header.Get("Www-Authenticate")) + if !resauth.IsNegotiate() && !resauth.IsNTLM() { + // Unauthorized, Negotiate not requested, let's try with basic auth + req.Header.Set("Authorization", string(reqauth)) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode != http.StatusUnauthorized { + return res, err + } + resauth = authheader(res.Header.Get("Www-Authenticate")) + } + + if resauth.IsNegotiate() || resauth.IsNTLM() { + // 401 with request:Basic and response:Negotiate + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + // recycle credentials + u, p, err := reqauth.GetBasicCreds() + if err != nil { + return nil, err + } + + // get domain from username + domain := "" + u, domain = GetDomain(u) + + // send negotiate + negotiateMessage, err := NewNegotiateMessage(domain, "") + if err != nil { + return nil, err + } + if resauth.IsNTLM() { + req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(negotiateMessage)) + } else { + req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(negotiateMessage)) + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + if err != nil { + return nil, err + } + + // receive challenge? + resauth = authheader(res.Header.Get("Www-Authenticate")) + challengeMessage, err := resauth.GetData() + if err != nil { + return nil, err + } + if !(resauth.IsNegotiate() || resauth.IsNTLM()) || len(challengeMessage) == 0 { + // Negotiation failed, let client deal with response + return res, nil + } + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + // send authenticate + authenticateMessage, err := ProcessChallenge(challengeMessage, u, p) + if err != nil { + return nil, err + } + if resauth.IsNTLM() { + req.Header.Set("Authorization", "NTLM "+base64.StdEncoding.EncodeToString(authenticateMessage)) + } else { + req.Header.Set("Authorization", "Negotiate "+base64.StdEncoding.EncodeToString(authenticateMessage)) + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + + res, err = rt.RoundTrip(req) + } + + return res, err +} diff --git a/vendor/github.com/Azure/go-ntlmssp/nlmp.go b/vendor/github.com/Azure/go-ntlmssp/nlmp.go new file mode 100644 index 00000000..1e65abe8 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/nlmp.go @@ -0,0 +1,51 @@ +// Package ntlmssp provides NTLM/Negotiate authentication over HTTP +// +// Protocol details from https://msdn.microsoft.com/en-us/library/cc236621.aspx, +// implementation hints from http://davenport.sourceforge.net/ntlm.html . +// This package only implements authentication, no key exchange or encryption. It +// only supports Unicode (UTF16LE) encoding of protocol strings, no OEM encoding. +// This package implements NTLMv2. +package ntlmssp + +import ( + "crypto/hmac" + "crypto/md5" + "golang.org/x/crypto/md4" + "strings" +) + +func getNtlmV2Hash(password, username, target string) []byte { + return hmacMd5(getNtlmHash(password), toUnicode(strings.ToUpper(username)+target)) +} + +func getNtlmHash(password string) []byte { + hash := md4.New() + hash.Write(toUnicode(password)) + return hash.Sum(nil) +} + +func computeNtlmV2Response(ntlmV2Hash, serverChallenge, clientChallenge, + timestamp, targetInfo []byte) []byte { + + temp := []byte{1, 1, 0, 0, 0, 0, 0, 0} + temp = append(temp, timestamp...) + temp = append(temp, clientChallenge...) + temp = append(temp, 0, 0, 0, 0) + temp = append(temp, targetInfo...) + temp = append(temp, 0, 0, 0, 0) + + NTProofStr := hmacMd5(ntlmV2Hash, serverChallenge, temp) + return append(NTProofStr, temp...) +} + +func computeLmV2Response(ntlmV2Hash, serverChallenge, clientChallenge []byte) []byte { + return append(hmacMd5(ntlmV2Hash, serverChallenge, clientChallenge), clientChallenge...) +} + +func hmacMd5(key []byte, data ...[]byte) []byte { + mac := hmac.New(md5.New, key) + for _, d := range data { + mac.Write(d) + } + return mac.Sum(nil) +} diff --git a/vendor/github.com/Azure/go-ntlmssp/unicode.go b/vendor/github.com/Azure/go-ntlmssp/unicode.go new file mode 100644 index 00000000..7b4f4716 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/unicode.go @@ -0,0 +1,29 @@ +package ntlmssp + +import ( + "bytes" + "encoding/binary" + "errors" + "unicode/utf16" +) + +// helper func's for dealing with Windows Unicode (UTF16LE) + +func fromUnicode(d []byte) (string, error) { + if len(d)%2 > 0 { + return "", errors.New("Unicode (UTF 16 LE) specified, but uneven data length") + } + s := make([]uint16, len(d)/2) + err := binary.Read(bytes.NewReader(d), binary.LittleEndian, &s) + if err != nil { + return "", err + } + return string(utf16.Decode(s)), nil +} + +func toUnicode(s string) []byte { + uints := utf16.Encode([]rune(s)) + b := bytes.Buffer{} + binary.Write(&b, binary.LittleEndian, &uints) + return b.Bytes() +} diff --git a/vendor/github.com/Azure/go-ntlmssp/varfield.go b/vendor/github.com/Azure/go-ntlmssp/varfield.go new file mode 100644 index 00000000..15f9aa11 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/varfield.go @@ -0,0 +1,40 @@ +package ntlmssp + +import ( + "errors" +) + +type varField struct { + Len uint16 + MaxLen uint16 + BufferOffset uint32 +} + +func (f varField) ReadFrom(buffer []byte) ([]byte, error) { + if len(buffer) < int(f.BufferOffset+uint32(f.Len)) { + return nil, errors.New("Error reading data, varField extends beyond buffer") + } + return buffer[f.BufferOffset : f.BufferOffset+uint32(f.Len)], nil +} + +func (f varField) ReadStringFrom(buffer []byte, unicode bool) (string, error) { + d, err := f.ReadFrom(buffer) + if err != nil { + return "", err + } + if unicode { // UTF-16LE encoding scheme + return fromUnicode(d) + } + // OEM encoding, close enough to ASCII, since no code page is specified + return string(d), err +} + +func newVarField(ptr *int, fieldsize int) varField { + f := varField{ + Len: uint16(fieldsize), + MaxLen: uint16(fieldsize), + BufferOffset: uint32(*ptr), + } + *ptr += fieldsize + return f +} diff --git a/vendor/github.com/Azure/go-ntlmssp/version.go b/vendor/github.com/Azure/go-ntlmssp/version.go new file mode 100644 index 00000000..6d848921 --- /dev/null +++ b/vendor/github.com/Azure/go-ntlmssp/version.go @@ -0,0 +1,20 @@ +package ntlmssp + +// Version is a struct representing https://msdn.microsoft.com/en-us/library/cc236654.aspx +type Version struct { + ProductMajorVersion uint8 + ProductMinorVersion uint8 + ProductBuild uint16 + _ [3]byte + NTLMRevisionCurrent uint8 +} + +// DefaultVersion returns a Version with "sensible" defaults (Windows 7) +func DefaultVersion() Version { + return Version{ + ProductMajorVersion: 6, + ProductMinorVersion: 1, + ProductBuild: 7601, + NTLMRevisionCurrent: 15, + } +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/LICENSE b/vendor/github.com/ChrisTrenkamp/goxpath/LICENSE new file mode 100644 index 00000000..2afe88a6 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 ChrisTrenkamp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/README.md b/vendor/github.com/ChrisTrenkamp/goxpath/README.md new file mode 100644 index 00000000..deb49624 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/README.md @@ -0,0 +1,2 @@ +# goxpath [![GoDoc](https://godoc.org/gopkg.in/src-d/go-git.v2?status.svg)](https://godoc.org/github.com/ChrisTrenkamp/goxpath) [![Build Status](https://travis-ci.org/ChrisTrenkamp/goxpath.svg?branch=master)](https://travis-ci.org/ChrisTrenkamp/goxpath) [![codecov.io](https://codecov.io/github/ChrisTrenkamp/goxpath/coverage.svg?branch=master)](https://codecov.io/github/ChrisTrenkamp/goxpath?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/ChrisTrenkamp/goxpath)](https://goreportcard.com/report/github.com/ChrisTrenkamp/goxpath) +An XPath 1.0 implementation written in Go. See the [wiki](https://github.com/ChrisTrenkamp/goxpath/wiki) for more information. diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/coverage.sh b/vendor/github.com/ChrisTrenkamp/goxpath/coverage.sh new file mode 100644 index 00000000..0e35d7de --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/coverage.sh @@ -0,0 +1,17 @@ +#!/bin/bash +cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +go get github.com/ChrisTrenkamp/goxpath/cmd/goxpath +if [ $? != 0 ]; then + exit 1 +fi +go test >/dev/null 2>&1 +if [ $? != 0 ]; then + go test + exit 1 +fi +gometalinter --deadline=1m ./... +go list -f '{{if gt (len .TestGoFiles) 0}}"go test -covermode count -coverprofile {{.Name}}.coverprofile -coverpkg ./... {{.ImportPath}}"{{end}} >/dev/null' ./... | xargs -I {} bash -c {} 2>/dev/null +gocovmerge `ls *.coverprofile` > coverage.txt +go tool cover -html=coverage.txt -o coverage.html +firefox coverage.html +rm coverage.html coverage.txt *.coverprofile diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go b/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go new file mode 100644 index 00000000..189ebb9c --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/goxpath.go @@ -0,0 +1,117 @@ +package goxpath + +import ( + "encoding/xml" + "fmt" + + "github.com/ChrisTrenkamp/goxpath/internal/execxp" + "github.com/ChrisTrenkamp/goxpath/parser" + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//Opts defines namespace mappings and custom functions for XPath expressions. +type Opts struct { + NS map[string]string + Funcs map[xml.Name]tree.Wrap + Vars map[string]tree.Result +} + +//FuncOpts is a function wrapper for Opts. +type FuncOpts func(*Opts) + +//XPathExec is the XPath executor, compiled from an XPath string +type XPathExec struct { + n *parser.Node +} + +//Parse parses the XPath expression, xp, returning an XPath executor. +func Parse(xp string) (XPathExec, error) { + n, err := parser.Parse(xp) + return XPathExec{n: n}, err +} + +//MustParse is like Parse, but panics instead of returning an error. +func MustParse(xp string) XPathExec { + ret, err := Parse(xp) + if err != nil { + panic(err) + } + return ret +} + +//Exec executes the XPath expression, xp, against the tree, t, with the +//namespace mappings, ns, and returns the result as a stringer. +func (xp XPathExec) Exec(t tree.Node, opts ...FuncOpts) (tree.Result, error) { + o := &Opts{ + NS: make(map[string]string), + Funcs: make(map[xml.Name]tree.Wrap), + Vars: make(map[string]tree.Result), + } + for _, i := range opts { + i(o) + } + return execxp.Exec(xp.n, t, o.NS, o.Funcs, o.Vars) +} + +//ExecBool is like Exec, except it will attempt to convert the result to its boolean value. +func (xp XPathExec) ExecBool(t tree.Node, opts ...FuncOpts) (bool, error) { + res, err := xp.Exec(t, opts...) + if err != nil { + return false, err + } + + b, ok := res.(tree.IsBool) + if !ok { + return false, fmt.Errorf("Cannot convert result to a boolean") + } + + return bool(b.Bool()), nil +} + +//ExecNum is like Exec, except it will attempt to convert the result to its number value. +func (xp XPathExec) ExecNum(t tree.Node, opts ...FuncOpts) (float64, error) { + res, err := xp.Exec(t, opts...) + if err != nil { + return 0, err + } + + n, ok := res.(tree.IsNum) + if !ok { + return 0, fmt.Errorf("Cannot convert result to a number") + } + + return float64(n.Num()), nil +} + +//ExecNode is like Exec, except it will attempt to return the result as a node-set. +func (xp XPathExec) ExecNode(t tree.Node, opts ...FuncOpts) (tree.NodeSet, error) { + res, err := xp.Exec(t, opts...) + if err != nil { + return nil, err + } + + n, ok := res.(tree.NodeSet) + if !ok { + return nil, fmt.Errorf("Cannot convert result to a node-set") + } + + return n, nil +} + +//MustExec is like Exec, but panics instead of returning an error. +func (xp XPathExec) MustExec(t tree.Node, opts ...FuncOpts) tree.Result { + res, err := xp.Exec(t, opts...) + if err != nil { + panic(err) + } + return res +} + +//ParseExec parses the XPath string, xpstr, and runs Exec. +func ParseExec(xpstr string, t tree.Node, opts ...FuncOpts) (tree.Result, error) { + xp, err := Parse(xpstr) + if err != nil { + return nil, err + } + return xp.Exec(t, opts...) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go new file mode 100644 index 00000000..7b325156 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/execxp.go @@ -0,0 +1,27 @@ +package execxp + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/parser" + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//Exec executes the XPath expression, xp, against the tree, t, with the +//namespace mappings, ns. +func Exec(n *parser.Node, t tree.Node, ns map[string]string, fns map[xml.Name]tree.Wrap, v map[string]tree.Result) (tree.Result, error) { + f := xpFilt{ + t: t, + ns: ns, + ctx: tree.NodeSet{t}, + fns: fns, + variables: v, + } + + return exec(&f, n) +} + +func exec(f *xpFilt, n *parser.Node) (tree.Result, error) { + err := xfExec(f, n) + return f.ctx, err +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go new file mode 100644 index 00000000..058f79a0 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil/findUtil.go @@ -0,0 +1,307 @@ +package findutil + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/parser/pathexpr" + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/xconst" +) + +const ( + wildcard = "*" +) + +type findFunc func(tree.Node, *pathexpr.PathExpr, *[]tree.Node) + +var findMap = map[string]findFunc{ + xconst.AxisAncestor: findAncestor, + xconst.AxisAncestorOrSelf: findAncestorOrSelf, + xconst.AxisAttribute: findAttribute, + xconst.AxisChild: findChild, + xconst.AxisDescendent: findDescendent, + xconst.AxisDescendentOrSelf: findDescendentOrSelf, + xconst.AxisFollowing: findFollowing, + xconst.AxisFollowingSibling: findFollowingSibling, + xconst.AxisNamespace: findNamespace, + xconst.AxisParent: findParent, + xconst.AxisPreceding: findPreceding, + xconst.AxisPrecedingSibling: findPrecedingSibling, + xconst.AxisSelf: findSelf, +} + +//Find finds nodes based on the pathexpr.PathExpr +func Find(x tree.Node, p pathexpr.PathExpr) []tree.Node { + ret := []tree.Node{} + + if p.Axis == "" { + findChild(x, &p, &ret) + return ret + } + + f := findMap[p.Axis] + f(x, &p, &ret) + + return ret +} + +func findAncestor(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + + addNode(x.GetParent(), p, ret) + findAncestor(x.GetParent(), p, ret) +} + +func findAncestorOrSelf(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + findSelf(x, p, ret) + findAncestor(x, p, ret) +} + +func findAttribute(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if ele, ok := x.(tree.Elem); ok { + for _, i := range ele.GetAttrs() { + addNode(i, p, ret) + } + } +} + +func findChild(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if ele, ok := x.(tree.Elem); ok { + ch := ele.GetChildren() + for i := range ch { + addNode(ch[i], p, ret) + } + } +} + +func findDescendent(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if ele, ok := x.(tree.Elem); ok { + ch := ele.GetChildren() + for i := range ch { + addNode(ch[i], p, ret) + findDescendent(ch[i], p, ret) + } + } +} + +func findDescendentOrSelf(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + findSelf(x, p, ret) + findDescendent(x, p, ret) +} + +func findFollowing(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + par := x.GetParent() + ch := par.GetChildren() + i := 0 + for x != ch[i] { + i++ + } + i++ + for i < len(ch) { + findDescendentOrSelf(ch[i], p, ret) + i++ + } + findFollowing(par, p, ret) +} + +func findFollowingSibling(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + par := x.GetParent() + ch := par.GetChildren() + i := 0 + for x != ch[i] { + i++ + } + i++ + for i < len(ch) { + findSelf(ch[i], p, ret) + i++ + } +} + +func findNamespace(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if ele, ok := x.(tree.NSElem); ok { + ns := tree.BuildNS(ele) + for _, i := range ns { + addNode(i, p, ret) + } + } +} + +func findParent(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() != tree.NtRoot { + addNode(x.GetParent(), p, ret) + } +} + +func findPreceding(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + par := x.GetParent() + ch := par.GetChildren() + i := len(ch) - 1 + for x != ch[i] { + i-- + } + i-- + for i >= 0 { + findDescendentOrSelf(ch[i], p, ret) + i-- + } + findPreceding(par, p, ret) +} + +func findPrecedingSibling(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + if x.GetNodeType() == tree.NtRoot { + return + } + par := x.GetParent() + ch := par.GetChildren() + i := len(ch) - 1 + for x != ch[i] { + i-- + } + i-- + for i >= 0 { + findSelf(ch[i], p, ret) + i-- + } +} + +func findSelf(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + addNode(x, p, ret) +} + +func addNode(x tree.Node, p *pathexpr.PathExpr, ret *[]tree.Node) { + add := false + tok := x.GetToken() + + switch x.GetNodeType() { + case tree.NtAttr: + add = evalAttr(p, tok.(xml.Attr)) + case tree.NtChd: + add = evalChd(p) + case tree.NtComm: + add = evalComm(p) + case tree.NtElem, tree.NtRoot: + add = evalEle(p, tok.(xml.StartElement)) + case tree.NtNs: + add = evalNS(p, tok.(xml.Attr)) + case tree.NtPi: + add = evalPI(p) + } + + if add { + *ret = append(*ret, x) + } +} + +func evalAttr(p *pathexpr.PathExpr, a xml.Attr) bool { + if p.NodeType == "" { + if p.Name.Space != wildcard { + if a.Name.Space != p.NS[p.Name.Space] { + return false + } + } + + if p.Name.Local == wildcard && p.Axis == xconst.AxisAttribute { + return true + } + + if p.Name.Local == a.Name.Local { + return true + } + } else { + if p.NodeType == xconst.NodeTypeNode { + return true + } + } + + return false +} + +func evalChd(p *pathexpr.PathExpr) bool { + if p.NodeType == xconst.NodeTypeText || p.NodeType == xconst.NodeTypeNode { + return true + } + + return false +} + +func evalComm(p *pathexpr.PathExpr) bool { + if p.NodeType == xconst.NodeTypeComment || p.NodeType == xconst.NodeTypeNode { + return true + } + + return false +} + +func evalEle(p *pathexpr.PathExpr, ele xml.StartElement) bool { + if p.NodeType == "" { + return checkNameAndSpace(p, ele) + } + + if p.NodeType == xconst.NodeTypeNode { + return true + } + + return false +} + +func checkNameAndSpace(p *pathexpr.PathExpr, ele xml.StartElement) bool { + if p.Name.Local == wildcard && p.Name.Space == "" { + return true + } + + if p.Name.Space != wildcard && ele.Name.Space != p.NS[p.Name.Space] { + return false + } + + if p.Name.Local == wildcard && p.Axis != xconst.AxisAttribute && p.Axis != xconst.AxisNamespace { + return true + } + + if p.Name.Local == ele.Name.Local { + return true + } + + return false +} + +func evalNS(p *pathexpr.PathExpr, ns xml.Attr) bool { + if p.NodeType == "" { + if p.Name.Space != "" && p.Name.Space != wildcard { + return false + } + + if p.Name.Local == wildcard && p.Axis == xconst.AxisNamespace { + return true + } + + if p.Name.Local == ns.Name.Local { + return true + } + } else { + if p.NodeType == xconst.NodeTypeNode { + return true + } + } + + return false +} + +func evalPI(p *pathexpr.PathExpr) bool { + if p.NodeType == xconst.NodeTypeProcInst || p.NodeType == xconst.NodeTypeNode { + return true + } + + return false +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go new file mode 100644 index 00000000..5480ebcc --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/boolfns.go @@ -0,0 +1,74 @@ +package intfns + +import ( + "fmt" + + "github.com/ChrisTrenkamp/goxpath/tree" + "golang.org/x/text/language" +) + +func boolean(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + if b, ok := args[0].(tree.IsBool); ok { + return b.Bool(), nil + } + + return nil, fmt.Errorf("Cannot convert object to a boolean") +} + +func not(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + b, ok := args[0].(tree.IsBool) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a boolean") + } + return !b.Bool(), nil +} + +func _true(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Bool(true), nil +} + +func _false(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Bool(false), nil +} + +func lang(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + lStr := args[0].String() + + var n tree.Elem + + for _, i := range c.NodeSet { + if i.GetNodeType() == tree.NtElem { + n = i.(tree.Elem) + } else { + n = i.GetParent() + } + + for n.GetNodeType() != tree.NtRoot { + if attr, ok := tree.GetAttribute(n, "lang", tree.XMLSpace); ok { + return checkLang(lStr, attr.Value), nil + } + n = n.GetParent() + } + } + + return tree.Bool(false), nil +} + +func checkLang(srcStr, targStr string) tree.Bool { + srcLang := language.Make(srcStr) + srcRegion, srcRegionConf := srcLang.Region() + + targLang := language.Make(targStr) + targRegion, targRegionConf := targLang.Region() + + if srcRegionConf == language.Exact && targRegionConf != language.Exact { + return tree.Bool(false) + } + + if srcRegion != targRegion && srcRegionConf == language.Exact && targRegionConf == language.Exact { + return tree.Bool(false) + } + + _, _, conf := language.NewMatcher([]language.Tag{srcLang}).Match(targLang) + return tree.Bool(conf >= language.High) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go new file mode 100644 index 00000000..8b09fa99 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/intfns.go @@ -0,0 +1,41 @@ +package intfns + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//BuiltIn contains the list of built-in XPath functions +var BuiltIn = map[xml.Name]tree.Wrap{ + //String functions + {Local: "string"}: {Fn: _string, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "concat"}: {Fn: concat, NArgs: 3, LastArgOpt: tree.Variadic}, + {Local: "starts-with"}: {Fn: startsWith, NArgs: 2}, + {Local: "contains"}: {Fn: contains, NArgs: 2}, + {Local: "substring-before"}: {Fn: substringBefore, NArgs: 2}, + {Local: "substring-after"}: {Fn: substringAfter, NArgs: 2}, + {Local: "substring"}: {Fn: substring, NArgs: 3, LastArgOpt: tree.Optional}, + {Local: "string-length"}: {Fn: stringLength, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "normalize-space"}: {Fn: normalizeSpace, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "translate"}: {Fn: translate, NArgs: 3}, + //Node set functions + {Local: "last"}: {Fn: last}, + {Local: "position"}: {Fn: position}, + {Local: "count"}: {Fn: count, NArgs: 1}, + {Local: "local-name"}: {Fn: localName, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "namespace-uri"}: {Fn: namespaceURI, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "name"}: {Fn: name, NArgs: 1, LastArgOpt: tree.Optional}, + //boolean functions + {Local: "boolean"}: {Fn: boolean, NArgs: 1}, + {Local: "not"}: {Fn: not, NArgs: 1}, + {Local: "true"}: {Fn: _true}, + {Local: "false"}: {Fn: _false}, + {Local: "lang"}: {Fn: lang, NArgs: 1}, + //number functions + {Local: "number"}: {Fn: number, NArgs: 1, LastArgOpt: tree.Optional}, + {Local: "sum"}: {Fn: sum, NArgs: 1}, + {Local: "floor"}: {Fn: floor, NArgs: 1}, + {Local: "ceiling"}: {Fn: ceiling, NArgs: 1}, + {Local: "round"}: {Fn: round, NArgs: 1}, +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go new file mode 100644 index 00000000..7286a304 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/nodesetfns.go @@ -0,0 +1,131 @@ +package intfns + +import ( + "encoding/xml" + "fmt" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +func last(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Num(c.Size), nil +} + +func position(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Num(c.Pos), nil +} + +func count(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + n, ok := args[0].(tree.NodeSet) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + return tree.Num(len(n)), nil +} + +func localName(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var n tree.NodeSet + ok := true + if len(args) == 1 { + n, ok = args[0].(tree.NodeSet) + } else { + n = c.NodeSet + } + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + ret := "" + if len(n) == 0 { + return tree.String(ret), nil + } + node := n[0] + + tok := node.GetToken() + + switch node.GetNodeType() { + case tree.NtElem: + ret = tok.(xml.StartElement).Name.Local + case tree.NtAttr: + ret = tok.(xml.Attr).Name.Local + case tree.NtPi: + ret = tok.(xml.ProcInst).Target + } + + return tree.String(ret), nil +} + +func namespaceURI(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var n tree.NodeSet + ok := true + if len(args) == 1 { + n, ok = args[0].(tree.NodeSet) + } else { + n = c.NodeSet + } + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + ret := "" + if len(n) == 0 { + return tree.String(ret), nil + } + node := n[0] + + tok := node.GetToken() + + switch node.GetNodeType() { + case tree.NtElem: + ret = tok.(xml.StartElement).Name.Space + case tree.NtAttr: + ret = tok.(xml.Attr).Name.Space + } + + return tree.String(ret), nil +} + +func name(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var n tree.NodeSet + ok := true + if len(args) == 1 { + n, ok = args[0].(tree.NodeSet) + } else { + n = c.NodeSet + } + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + ret := "" + if len(n) == 0 { + return tree.String(ret), nil + } + node := n[0] + + switch node.GetNodeType() { + case tree.NtElem: + t := node.GetToken().(xml.StartElement) + space := "" + + if t.Name.Space != "" { + space = fmt.Sprintf("{%s}", t.Name.Space) + } + + ret = fmt.Sprintf("%s%s", space, t.Name.Local) + case tree.NtAttr: + t := node.GetToken().(xml.Attr) + space := "" + + if t.Name.Space != "" { + space = fmt.Sprintf("{%s}", t.Name.Space) + } + + ret = fmt.Sprintf("%s%s", space, t.Name.Local) + case tree.NtPi: + ret = fmt.Sprintf("%s", node.GetToken().(xml.ProcInst).Target) + } + + return tree.String(ret), nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go new file mode 100644 index 00000000..464403f9 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/numfns.go @@ -0,0 +1,71 @@ +package intfns + +import ( + "fmt" + "math" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +func number(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + if b, ok := args[0].(tree.IsNum); ok { + return b.Num(), nil + } + + return nil, fmt.Errorf("Cannot convert object to a number") +} + +func sum(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + n, ok := args[0].(tree.NodeSet) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a node-set") + } + + ret := 0.0 + for _, i := range n { + ret += float64(tree.GetNodeNum(i)) + } + + return tree.Num(ret), nil +} + +func floor(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + n, ok := args[0].(tree.IsNum) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a number") + } + + return tree.Num(math.Floor(float64(n.Num()))), nil +} + +func ceiling(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + n, ok := args[0].(tree.IsNum) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a number") + } + + return tree.Num(math.Ceil(float64(n.Num()))), nil +} + +func round(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + isn, ok := args[0].(tree.IsNum) + if !ok { + return nil, fmt.Errorf("Cannot convert object to a number") + } + + n := isn.Num() + + if math.IsNaN(float64(n)) || math.IsInf(float64(n), 0) { + return n, nil + } + + if n < -0.5 { + n = tree.Num(int(n - 0.5)) + } else if n > 0.5 { + n = tree.Num(int(n + 0.5)) + } else { + n = 0 + } + + return n, nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go new file mode 100644 index 00000000..4e6a73a0 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns/stringfns.go @@ -0,0 +1,141 @@ +package intfns + +import ( + "math" + "regexp" + "strings" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +func _string(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + if len(args) == 1 { + return tree.String(args[0].String()), nil + } + + return tree.String(c.NodeSet.String()), nil +} + +func concat(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + ret := "" + + for _, i := range args { + ret += i.String() + } + + return tree.String(ret), nil +} + +func startsWith(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Bool(strings.Index(args[0].String(), args[1].String()) == 0), nil +} + +func contains(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + return tree.Bool(strings.Contains(args[0].String(), args[1].String())), nil +} + +func substringBefore(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + ind := strings.Index(args[0].String(), args[1].String()) + if ind == -1 { + return tree.String(""), nil + } + + return tree.String(args[0].String()[:ind]), nil +} + +func substringAfter(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + ind := strings.Index(args[0].String(), args[1].String()) + if ind == -1 { + return tree.String(""), nil + } + + return tree.String(args[0].String()[ind+len(args[1].String()):]), nil +} + +func substring(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + str := args[0].String() + + bNum, bErr := round(c, args[1]) + if bErr != nil { + return nil, bErr + } + + b := bNum.(tree.Num).Num() + + if float64(b-1) >= float64(len(str)) || math.IsNaN(float64(b)) { + return tree.String(""), nil + } + + if len(args) == 2 { + if b <= 1 { + b = 1 + } + + return tree.String(str[int(b)-1:]), nil + } + + eNum, eErr := round(c, args[2]) + if eErr != nil { + return nil, eErr + } + + e := eNum.(tree.Num).Num() + + if e <= 0 || math.IsNaN(float64(e)) || (math.IsInf(float64(b), 0) && math.IsInf(float64(e), 0)) { + return tree.String(""), nil + } + + if b <= 1 { + e = b + e - 1 + b = 1 + } + + if float64(b+e-1) >= float64(len(str)) { + e = tree.Num(len(str)) - b + 1 + } + + return tree.String(str[int(b)-1 : int(b+e)-1]), nil +} + +func stringLength(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var str string + if len(args) == 1 { + str = args[0].String() + } else { + str = c.NodeSet.String() + } + + return tree.Num(len(str)), nil +} + +var spaceTrim = regexp.MustCompile(`\s+`) + +func normalizeSpace(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + var str string + if len(args) == 1 { + str = args[0].String() + } else { + str = c.NodeSet.String() + } + + str = strings.TrimSpace(str) + + return tree.String(spaceTrim.ReplaceAllString(str, " ")), nil +} + +func translate(c tree.Ctx, args ...tree.Result) (tree.Result, error) { + ret := args[0].String() + src := args[1].String() + repl := args[2].String() + + for i := range src { + r := "" + if i < len(repl) { + r = string(repl[i]) + } + + ret = strings.Replace(ret, string(src[i]), r, -1) + } + + return tree.String(ret), nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/operators.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/operators.go new file mode 100644 index 00000000..d41ba7fc --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/operators.go @@ -0,0 +1,212 @@ +package execxp + +import ( + "fmt" + "math" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +func bothNodeOperator(left tree.NodeSet, right tree.NodeSet, f *xpFilt, op string) error { + var err error + for _, l := range left { + for _, r := range right { + lStr := l.ResValue() + rStr := r.ResValue() + + if eqOps[op] { + err = equalsOperator(tree.String(lStr), tree.String(rStr), f, op) + if err == nil && f.ctx.String() == tree.True { + return nil + } + } else { + err = numberOperator(tree.String(lStr), tree.String(rStr), f, op) + if err == nil && f.ctx.String() == tree.True { + return nil + } + } + } + } + + f.ctx = tree.Bool(false) + + return nil +} + +func leftNodeOperator(left tree.NodeSet, right tree.Result, f *xpFilt, op string) error { + var err error + for _, l := range left { + lStr := l.ResValue() + + if eqOps[op] { + err = equalsOperator(tree.String(lStr), right, f, op) + if err == nil && f.ctx.String() == tree.True { + return nil + } + } else { + err = numberOperator(tree.String(lStr), right, f, op) + if err == nil && f.ctx.String() == tree.True { + return nil + } + } + } + + f.ctx = tree.Bool(false) + + return nil +} + +func rightNodeOperator(left tree.Result, right tree.NodeSet, f *xpFilt, op string) error { + var err error + for _, r := range right { + rStr := r.ResValue() + + if eqOps[op] { + err = equalsOperator(left, tree.String(rStr), f, op) + if err == nil && f.ctx.String() == "true" { + return nil + } + } else { + err = numberOperator(left, tree.String(rStr), f, op) + if err == nil && f.ctx.String() == "true" { + return nil + } + } + } + + f.ctx = tree.Bool(false) + + return nil +} + +func equalsOperator(left, right tree.Result, f *xpFilt, op string) error { + _, lOK := left.(tree.Bool) + _, rOK := right.(tree.Bool) + + if lOK || rOK { + lTest, lt := left.(tree.IsBool) + rTest, rt := right.(tree.IsBool) + if !lt || !rt { + return fmt.Errorf("Cannot convert argument to boolean") + } + + if op == "=" { + f.ctx = tree.Bool(lTest.Bool() == rTest.Bool()) + } else { + f.ctx = tree.Bool(lTest.Bool() != rTest.Bool()) + } + + return nil + } + + _, lOK = left.(tree.Num) + _, rOK = right.(tree.Num) + if lOK || rOK { + return numberOperator(left, right, f, op) + } + + lStr := left.String() + rStr := right.String() + + if op == "=" { + f.ctx = tree.Bool(lStr == rStr) + } else { + f.ctx = tree.Bool(lStr != rStr) + } + + return nil +} + +func numberOperator(left, right tree.Result, f *xpFilt, op string) error { + lt, lOK := left.(tree.IsNum) + rt, rOK := right.(tree.IsNum) + if !lOK || !rOK { + return fmt.Errorf("Cannot convert data type to number") + } + + ln, rn := lt.Num(), rt.Num() + + switch op { + case "*": + f.ctx = ln * rn + case "div": + if rn != 0 { + f.ctx = ln / rn + } else { + if ln == 0 { + f.ctx = tree.Num(math.NaN()) + } else { + if math.Signbit(float64(ln)) == math.Signbit(float64(rn)) { + f.ctx = tree.Num(math.Inf(1)) + } else { + f.ctx = tree.Num(math.Inf(-1)) + } + } + } + case "mod": + f.ctx = tree.Num(int(ln) % int(rn)) + case "+": + f.ctx = ln + rn + case "-": + f.ctx = ln - rn + case "=": + f.ctx = tree.Bool(ln == rn) + case "!=": + f.ctx = tree.Bool(ln != rn) + case "<": + f.ctx = tree.Bool(ln < rn) + case "<=": + f.ctx = tree.Bool(ln <= rn) + case ">": + f.ctx = tree.Bool(ln > rn) + case ">=": + f.ctx = tree.Bool(ln >= rn) + } + + return nil +} + +func andOrOperator(left, right tree.Result, f *xpFilt, op string) error { + lt, lOK := left.(tree.IsBool) + rt, rOK := right.(tree.IsBool) + + if !lOK || !rOK { + return fmt.Errorf("Cannot convert argument to boolean") + } + + l, r := lt.Bool(), rt.Bool() + + if op == "and" { + f.ctx = l && r + } else { + f.ctx = l || r + } + + return nil +} + +func unionOperator(left, right tree.Result, f *xpFilt, op string) error { + lNode, lOK := left.(tree.NodeSet) + rNode, rOK := right.(tree.NodeSet) + + if !lOK || !rOK { + return fmt.Errorf("Cannot convert data type to node-set") + } + + uniq := make(map[int]tree.Node) + for _, i := range lNode { + uniq[i.Pos()] = i + } + for _, i := range rNode { + uniq[i.Pos()] = i + } + + res := make(tree.NodeSet, 0, len(uniq)) + for _, v := range uniq { + res = append(res, v) + } + + f.ctx = res + + return nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go new file mode 100644 index 00000000..5a78138e --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/execxp/paths.go @@ -0,0 +1,396 @@ +package execxp + +import ( + "encoding/xml" + "fmt" + "strconv" + "strings" + + "github.com/ChrisTrenkamp/goxpath/internal/execxp/findutil" + "github.com/ChrisTrenkamp/goxpath/internal/execxp/intfns" + "github.com/ChrisTrenkamp/goxpath/internal/xsort" + "github.com/ChrisTrenkamp/goxpath/lexer" + "github.com/ChrisTrenkamp/goxpath/parser" + "github.com/ChrisTrenkamp/goxpath/parser/pathexpr" + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/xconst" +) + +type xpFilt struct { + t tree.Node + ctx tree.Result + expr pathexpr.PathExpr + ns map[string]string + ctxPos int + ctxSize int + proxPos map[int]int + fns map[xml.Name]tree.Wrap + variables map[string]tree.Result +} + +type xpExecFn func(*xpFilt, string) + +var xpFns = map[lexer.XItemType]xpExecFn{ + lexer.XItemAbsLocPath: xfAbsLocPath, + lexer.XItemAbbrAbsLocPath: xfAbbrAbsLocPath, + lexer.XItemRelLocPath: xfRelLocPath, + lexer.XItemAbbrRelLocPath: xfAbbrRelLocPath, + lexer.XItemAxis: xfAxis, + lexer.XItemAbbrAxis: xfAbbrAxis, + lexer.XItemNCName: xfNCName, + lexer.XItemQName: xfQName, + lexer.XItemNodeType: xfNodeType, + lexer.XItemProcLit: xfProcInstLit, + lexer.XItemStrLit: xfStrLit, + lexer.XItemNumLit: xfNumLit, +} + +func xfExec(f *xpFilt, n *parser.Node) (err error) { + for n != nil { + if fn, ok := xpFns[n.Val.Typ]; ok { + fn(f, n.Val.Val) + n = n.Left + } else if n.Val.Typ == lexer.XItemPredicate { + if err = xfPredicate(f, n.Left); err != nil { + return + } + + n = n.Right + } else if n.Val.Typ == lexer.XItemFunction { + if err = xfFunction(f, n); err != nil { + return + } + + n = n.Right + } else if n.Val.Typ == lexer.XItemOperator { + lf := xpFilt{ + t: f.t, + ns: f.ns, + ctx: f.ctx, + ctxPos: f.ctxPos, + ctxSize: f.ctxSize, + proxPos: f.proxPos, + fns: f.fns, + variables: f.variables, + } + left, err := exec(&lf, n.Left) + if err != nil { + return err + } + + rf := xpFilt{ + t: f.t, + ns: f.ns, + ctx: f.ctx, + fns: f.fns, + variables: f.variables, + } + right, err := exec(&rf, n.Right) + if err != nil { + return err + } + + return xfOperator(left, right, f, n.Val.Val) + } else if n.Val.Typ == lexer.XItemVariable { + if res, ok := f.variables[n.Val.Val]; ok { + f.ctx = res + return nil + } + return fmt.Errorf("Invalid variable '%s'", n.Val.Val) + } else if string(n.Val.Typ) == "" { + n = n.Left + //} else { + // return fmt.Errorf("Cannot process " + string(n.Val.Typ)) + } + } + + return +} + +func xfPredicate(f *xpFilt, n *parser.Node) (err error) { + res := f.ctx.(tree.NodeSet) + newRes := make(tree.NodeSet, 0, len(res)) + + for i := range res { + pf := xpFilt{ + t: f.t, + ns: f.ns, + ctxPos: i, + ctxSize: f.ctxSize, + ctx: tree.NodeSet{res[i]}, + fns: f.fns, + variables: f.variables, + } + + predRes, err := exec(&pf, n) + if err != nil { + return err + } + + ok, err := checkPredRes(predRes, f, res[i]) + if err != nil { + return err + } + + if ok { + newRes = append(newRes, res[i]) + } + } + + f.proxPos = make(map[int]int) + for pos, j := range newRes { + f.proxPos[j.Pos()] = pos + 1 + } + + f.ctx = newRes + f.ctxSize = len(newRes) + + return +} + +func checkPredRes(ret tree.Result, f *xpFilt, node tree.Node) (bool, error) { + if num, ok := ret.(tree.Num); ok { + if float64(f.proxPos[node.Pos()]) == float64(num) { + return true, nil + } + return false, nil + } + + if b, ok := ret.(tree.IsBool); ok { + return bool(b.Bool()), nil + } + + return false, fmt.Errorf("Cannot convert argument to boolean") +} + +func xfFunction(f *xpFilt, n *parser.Node) error { + spl := strings.Split(n.Val.Val, ":") + var name xml.Name + if len(spl) == 1 { + name.Local = spl[0] + } else { + name.Space = f.ns[spl[0]] + name.Local = spl[1] + } + fn, ok := intfns.BuiltIn[name] + if !ok { + fn, ok = f.fns[name] + } + + if ok { + args := []tree.Result{} + param := n.Left + + for param != nil { + pf := xpFilt{ + t: f.t, + ctx: f.ctx, + ns: f.ns, + ctxPos: f.ctxPos, + ctxSize: f.ctxSize, + fns: f.fns, + variables: f.variables, + } + res, err := exec(&pf, param.Left) + if err != nil { + return err + } + + args = append(args, res) + param = param.Right + } + + filt, err := fn.Call(tree.Ctx{NodeSet: f.ctx.(tree.NodeSet), Size: f.ctxSize, Pos: f.ctxPos + 1}, args...) + f.ctx = filt + return err + } + + return fmt.Errorf("Unknown function: %s", n.Val.Val) +} + +var eqOps = map[string]bool{ + "=": true, + "!=": true, +} + +var booleanOps = map[string]bool{ + "=": true, + "!=": true, + "<": true, + "<=": true, + ">": true, + ">=": true, +} + +var numOps = map[string]bool{ + "*": true, + "div": true, + "mod": true, + "+": true, + "-": true, + "=": true, + "!=": true, + "<": true, + "<=": true, + ">": true, + ">=": true, +} + +var andOrOps = map[string]bool{ + "and": true, + "or": true, +} + +func xfOperator(left, right tree.Result, f *xpFilt, op string) error { + if booleanOps[op] { + lNode, lOK := left.(tree.NodeSet) + rNode, rOK := right.(tree.NodeSet) + if lOK && rOK { + return bothNodeOperator(lNode, rNode, f, op) + } + + if lOK { + return leftNodeOperator(lNode, right, f, op) + } + + if rOK { + return rightNodeOperator(left, rNode, f, op) + } + + if eqOps[op] { + return equalsOperator(left, right, f, op) + } + } + + if numOps[op] { + return numberOperator(left, right, f, op) + } + + if andOrOps[op] { + return andOrOperator(left, right, f, op) + } + + //if op == "|" { + return unionOperator(left, right, f, op) + //} + + //return fmt.Errorf("Unknown operator " + op) +} + +func xfAbsLocPath(f *xpFilt, val string) { + i := f.t + for i.GetNodeType() != tree.NtRoot { + i = i.GetParent() + } + f.ctx = tree.NodeSet{i} +} + +func xfAbbrAbsLocPath(f *xpFilt, val string) { + i := f.t + for i.GetNodeType() != tree.NtRoot { + i = i.GetParent() + } + f.ctx = tree.NodeSet{i} + f.expr = abbrPathExpr() + find(f) +} + +func xfRelLocPath(f *xpFilt, val string) { +} + +func xfAbbrRelLocPath(f *xpFilt, val string) { + f.expr = abbrPathExpr() + find(f) +} + +func xfAxis(f *xpFilt, val string) { + f.expr.Axis = val +} + +func xfAbbrAxis(f *xpFilt, val string) { + f.expr.Axis = xconst.AxisAttribute +} + +func xfNCName(f *xpFilt, val string) { + f.expr.Name.Space = val +} + +func xfQName(f *xpFilt, val string) { + f.expr.Name.Local = val + find(f) +} + +func xfNodeType(f *xpFilt, val string) { + f.expr.NodeType = val + find(f) +} + +func xfProcInstLit(f *xpFilt, val string) { + filt := tree.NodeSet{} + for _, i := range f.ctx.(tree.NodeSet) { + if i.GetToken().(xml.ProcInst).Target == val { + filt = append(filt, i) + } + } + f.ctx = filt +} + +func xfStrLit(f *xpFilt, val string) { + f.ctx = tree.String(val) +} + +func xfNumLit(f *xpFilt, val string) { + num, _ := strconv.ParseFloat(val, 64) + f.ctx = tree.Num(num) +} + +func abbrPathExpr() pathexpr.PathExpr { + return pathexpr.PathExpr{ + Name: xml.Name{}, + Axis: xconst.AxisDescendentOrSelf, + NodeType: xconst.NodeTypeNode, + } +} + +func find(f *xpFilt) { + dupFilt := make(map[int]tree.Node) + f.proxPos = make(map[int]int) + + if f.expr.Axis == "" && f.expr.NodeType == "" && f.expr.Name.Space == "" { + if f.expr.Name.Local == "." { + f.expr = pathexpr.PathExpr{ + Name: xml.Name{}, + Axis: xconst.AxisSelf, + NodeType: xconst.NodeTypeNode, + } + } + + if f.expr.Name.Local == ".." { + f.expr = pathexpr.PathExpr{ + Name: xml.Name{}, + Axis: xconst.AxisParent, + NodeType: xconst.NodeTypeNode, + } + } + } + + f.expr.NS = f.ns + + for _, i := range f.ctx.(tree.NodeSet) { + for pos, j := range findutil.Find(i, f.expr) { + dupFilt[j.Pos()] = j + f.proxPos[j.Pos()] = pos + 1 + } + } + + res := make(tree.NodeSet, 0, len(dupFilt)) + for _, i := range dupFilt { + res = append(res, i) + } + + xsort.SortNodes(res) + + f.expr = pathexpr.PathExpr{} + f.ctxSize = len(res) + f.ctx = res +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/internal/xsort/xsort.go b/vendor/github.com/ChrisTrenkamp/goxpath/internal/xsort/xsort.go new file mode 100644 index 00000000..95f57c24 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/internal/xsort/xsort.go @@ -0,0 +1,20 @@ +package xsort + +import ( + "sort" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +type nodeSort []tree.Node + +func (ns nodeSort) Len() int { return len(ns) } +func (ns nodeSort) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } +func (ns nodeSort) Less(i, j int) bool { + return ns[i].Pos() < ns[j].Pos() +} + +//SortNodes sorts the array by the node document order +func SortNodes(res []tree.Node) { + sort.Sort(nodeSort(res)) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go new file mode 100644 index 00000000..29f53c42 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/lexer.go @@ -0,0 +1,419 @@ +package lexer + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +const ( + //XItemError is an error with the parser input + XItemError XItemType = "Error" + //XItemAbsLocPath is an absolute path + XItemAbsLocPath = "Absolute path" + //XItemAbbrAbsLocPath represents an abbreviated absolute path + XItemAbbrAbsLocPath = "Abbreviated absolute path" + //XItemAbbrRelLocPath marks the start of a path expression + XItemAbbrRelLocPath = "Abbreviated relative path" + //XItemRelLocPath represents a relative location path + XItemRelLocPath = "Relative path" + //XItemEndPath marks the end of a path + XItemEndPath = "End path instruction" + //XItemAxis marks an axis specifier of a path + XItemAxis = "Axis" + //XItemAbbrAxis marks an abbreviated axis specifier (just @ at this point) + XItemAbbrAxis = "Abbreviated attribute axis" + //XItemNCName marks a namespace name in a node test + XItemNCName = "Namespace" + //XItemQName marks the local name in an a node test + XItemQName = "Local name" + //XItemNodeType marks a node type in a node test + XItemNodeType = "Node type" + //XItemProcLit marks a processing-instruction literal + XItemProcLit = "processing-instruction" + //XItemFunction marks a function call + XItemFunction = "function" + //XItemArgument marks a function argument + XItemArgument = "function argument" + //XItemEndFunction marks the end of a function + XItemEndFunction = "end of function" + //XItemPredicate marks a predicate in an axis + XItemPredicate = "predicate" + //XItemEndPredicate marks a predicate in an axis + XItemEndPredicate = "end of predicate" + //XItemStrLit marks a string literal + XItemStrLit = "string literal" + //XItemNumLit marks a numeric literal + XItemNumLit = "numeric literal" + //XItemOperator marks an operator + XItemOperator = "operator" + //XItemVariable marks a variable reference + XItemVariable = "variable" +) + +const ( + eof = -(iota + 1) +) + +//XItemType is the parser token types +type XItemType string + +//XItem is the token emitted from the parser +type XItem struct { + Typ XItemType + Val string +} + +type stateFn func(*Lexer) stateFn + +//Lexer lexes out XPath expressions +type Lexer struct { + input string + start int + pos int + width int + items chan XItem +} + +//Lex an XPath expresion on the io.Reader +func Lex(xpath string) chan XItem { + l := &Lexer{ + input: xpath, + items: make(chan XItem), + } + go l.run() + return l.items +} + +func (l *Lexer) run() { + for state := startState; state != nil; { + state = state(l) + } + + if l.peek() != eof { + l.errorf("Malformed XPath expression") + } + + close(l.items) +} + +func (l *Lexer) emit(t XItemType) { + l.items <- XItem{t, l.input[l.start:l.pos]} + l.start = l.pos +} + +func (l *Lexer) emitVal(t XItemType, val string) { + l.items <- XItem{t, val} + l.start = l.pos +} + +func (l *Lexer) next() (r rune) { + if l.pos >= len(l.input) { + l.width = 0 + return eof + } + + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + + l.pos += l.width + + return r +} + +func (l *Lexer) ignore() { + l.start = l.pos +} + +func (l *Lexer) backup() { + l.pos -= l.width +} + +func (l *Lexer) peek() rune { + r := l.next() + + l.backup() + return r +} + +func (l *Lexer) peekAt(n int) rune { + if n <= 1 { + return l.peek() + } + + width := 0 + var ret rune + + for count := 0; count < n; count++ { + r, s := utf8.DecodeRuneInString(l.input[l.pos+width:]) + width += s + + if l.pos+width > len(l.input) { + return eof + } + + ret = r + } + + return ret +} + +func (l *Lexer) accept(valid string) bool { + if strings.ContainsRune(valid, l.next()) { + return true + } + + l.backup() + return false +} + +func (l *Lexer) acceptRun(valid string) { + for strings.ContainsRune(valid, l.next()) { + } + l.backup() +} + +func (l *Lexer) skip(num int) { + for i := 0; i < num; i++ { + l.next() + } + l.ignore() +} + +func (l *Lexer) skipWS(ig bool) { + for { + n := l.next() + + if n == eof || !unicode.IsSpace(n) { + break + } + } + + l.backup() + + if ig { + l.ignore() + } +} + +func (l *Lexer) errorf(format string, args ...interface{}) stateFn { + l.items <- XItem{ + XItemError, + fmt.Sprintf(format, args...), + } + + return nil +} + +func isElemChar(r rune) bool { + return string(r) != ":" && string(r) != "/" && + (unicode.Is(first, r) || unicode.Is(second, r) || string(r) == "*") && + r != eof +} + +func startState(l *Lexer) stateFn { + l.skipWS(true) + + if string(l.peek()) == "/" { + l.next() + l.ignore() + + if string(l.next()) == "/" { + l.ignore() + return abbrAbsLocPathState + } + + l.backup() + return absLocPathState + } else if string(l.peek()) == `'` || string(l.peek()) == `"` { + if err := getStrLit(l, XItemStrLit); err != nil { + return l.errorf(err.Error()) + } + + if l.peek() != eof { + return startState + } + } else if getNumLit(l) { + l.skipWS(true) + if l.peek() != eof { + return startState + } + } else if string(l.peek()) == "$" { + l.next() + l.ignore() + r := l.peek() + for unicode.Is(first, r) || unicode.Is(second, r) { + l.next() + r = l.peek() + } + tok := l.input[l.start:l.pos] + if len(tok) == 0 { + return l.errorf("Empty variable name") + } + l.emit(XItemVariable) + l.skipWS(true) + if l.peek() != eof { + return startState + } + } else if st := findOperatorState(l); st != nil { + return st + } else { + if isElemChar(l.peek()) { + colons := 0 + + for { + if isElemChar(l.peek()) { + l.next() + } else if string(l.peek()) == ":" { + l.next() + colons++ + } else { + break + } + } + + if string(l.peek()) == "(" && colons <= 1 { + tok := l.input[l.start:l.pos] + err := procFunc(l, tok) + if err != nil { + return l.errorf(err.Error()) + } + + l.skipWS(true) + + if string(l.peek()) == "/" { + l.next() + l.ignore() + + if string(l.next()) == "/" { + l.ignore() + return abbrRelLocPathState + } + + l.backup() + return relLocPathState + } + + return startState + } + + l.pos = l.start + return relLocPathState + } else if string(l.peek()) == "@" { + return relLocPathState + } + } + + return nil +} + +func strPeek(str string, l *Lexer) bool { + for i := 0; i < len(str); i++ { + if string(l.peekAt(i+1)) != string(str[i]) { + return false + } + } + return true +} + +func findOperatorState(l *Lexer) stateFn { + l.skipWS(true) + + switch string(l.peek()) { + case ">", "<", "!": + l.next() + if string(l.peek()) == "=" { + l.next() + } + l.emit(XItemOperator) + return startState + case "|", "+", "-", "*", "=": + l.next() + l.emit(XItemOperator) + return startState + case "(": + l.next() + l.emit(XItemOperator) + for state := startState; state != nil; { + state = state(l) + } + l.skipWS(true) + if string(l.next()) != ")" { + return l.errorf("Missing end )") + } + l.emit(XItemOperator) + return startState + } + + if strPeek("and", l) { + l.next() + l.next() + l.next() + l.emit(XItemOperator) + return startState + } + + if strPeek("or", l) { + l.next() + l.next() + l.emit(XItemOperator) + return startState + } + + if strPeek("mod", l) { + l.next() + l.next() + l.next() + l.emit(XItemOperator) + return startState + } + + if strPeek("div", l) { + l.next() + l.next() + l.next() + l.emit(XItemOperator) + return startState + } + + return nil +} + +func getStrLit(l *Lexer, tok XItemType) error { + q := l.next() + var r rune + + l.ignore() + + for r != q { + r = l.next() + if r == eof { + return fmt.Errorf("Unexpected end of string literal.") + } + } + + l.backup() + l.emit(tok) + l.next() + l.ignore() + + return nil +} + +func getNumLit(l *Lexer) bool { + const dig = "0123456789" + l.accept("-") + start := l.pos + l.acceptRun(dig) + + if l.pos == start { + return false + } + + if l.accept(".") { + l.acceptRun(dig) + } + + l.emit(XItemNumLit) + return true +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go new file mode 100644 index 00000000..dac30d6b --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/paths.go @@ -0,0 +1,219 @@ +package lexer + +import ( + "fmt" + + "github.com/ChrisTrenkamp/goxpath/xconst" +) + +func absLocPathState(l *Lexer) stateFn { + l.emit(XItemAbsLocPath) + return stepState +} + +func abbrAbsLocPathState(l *Lexer) stateFn { + l.emit(XItemAbbrAbsLocPath) + return stepState +} + +func relLocPathState(l *Lexer) stateFn { + l.emit(XItemRelLocPath) + return stepState +} + +func abbrRelLocPathState(l *Lexer) stateFn { + l.emit(XItemAbbrRelLocPath) + return stepState +} + +func stepState(l *Lexer) stateFn { + l.skipWS(true) + r := l.next() + + for isElemChar(r) { + r = l.next() + } + + l.backup() + tok := l.input[l.start:l.pos] + + state, err := parseSeparators(l, tok) + if err != nil { + return l.errorf(err.Error()) + } + + return getNextPathState(l, state) +} + +func parseSeparators(l *Lexer, tok string) (XItemType, error) { + l.skipWS(false) + state := XItemType(XItemQName) + r := l.peek() + + if string(r) == ":" && string(l.peekAt(2)) == ":" { + var err error + if state, err = getAxis(l, tok); err != nil { + return state, fmt.Errorf(err.Error()) + } + } else if string(r) == ":" { + state = XItemNCName + l.emitVal(state, tok) + l.skip(1) + l.skipWS(true) + } else if string(r) == "@" { + state = XItemAbbrAxis + l.emitVal(state, tok) + l.skip(1) + l.skipWS(true) + } else if string(r) == "(" { + var err error + if state, err = getNT(l, tok); err != nil { + return state, fmt.Errorf(err.Error()) + } + } else if len(tok) > 0 { + l.emitVal(state, tok) + } + + return state, nil +} + +func getAxis(l *Lexer, tok string) (XItemType, error) { + var state XItemType + for i := range xconst.AxisNames { + if tok == xconst.AxisNames[i] { + state = XItemAxis + } + } + if state != XItemAxis { + return state, fmt.Errorf("Invalid Axis specifier, %s", tok) + } + l.emitVal(state, tok) + l.skip(2) + l.skipWS(true) + return state, nil +} + +func getNT(l *Lexer, tok string) (XItemType, error) { + isNT := false + for _, i := range xconst.NodeTypes { + if tok == i { + isNT = true + break + } + } + + if isNT { + return procNT(l, tok) + } + + return XItemError, fmt.Errorf("Invalid node-type " + tok) +} + +func procNT(l *Lexer, tok string) (XItemType, error) { + state := XItemType(XItemNodeType) + l.emitVal(state, tok) + l.skip(1) + l.skipWS(true) + n := l.peek() + if tok == xconst.NodeTypeProcInst && (string(n) == `"` || string(n) == `'`) { + if err := getStrLit(l, XItemProcLit); err != nil { + return state, fmt.Errorf(err.Error()) + } + l.skipWS(true) + n = l.next() + } + + if string(n) != ")" { + return state, fmt.Errorf("Missing ) at end of NodeType declaration.") + } + + l.skip(1) + return state, nil +} + +func procFunc(l *Lexer, tok string) error { + state := XItemType(XItemFunction) + l.emitVal(state, tok) + l.skip(1) + l.skipWS(true) + if string(l.peek()) != ")" { + l.emit(XItemArgument) + for { + for state := startState; state != nil; { + state = state(l) + } + l.skipWS(true) + + if string(l.peek()) == "," { + l.emit(XItemArgument) + l.skip(1) + } else if string(l.peek()) == ")" { + l.emit(XItemEndFunction) + l.skip(1) + break + } else if l.peek() == eof { + return fmt.Errorf("Missing ) at end of function declaration.") + } + } + } else { + l.emit(XItemEndFunction) + l.skip(1) + } + + return nil +} + +func getNextPathState(l *Lexer, state XItemType) stateFn { + isMultiPart := state == XItemAxis || state == XItemAbbrAxis || state == XItemNCName + + l.skipWS(true) + + for string(l.peek()) == "[" { + if err := getPred(l); err != nil { + return l.errorf(err.Error()) + } + } + + if string(l.peek()) == "/" && !isMultiPart { + l.skip(1) + if string(l.peek()) == "/" { + l.skip(1) + return abbrRelLocPathState + } + l.skipWS(true) + return relLocPathState + } else if isMultiPart && isElemChar(l.peek()) { + return stepState + } + + if isMultiPart { + return l.errorf("Step is not complete") + } + + l.emit(XItemEndPath) + return findOperatorState +} + +func getPred(l *Lexer) error { + l.emit(XItemPredicate) + l.skip(1) + l.skipWS(true) + + if string(l.peek()) == "]" { + return fmt.Errorf("Missing content in predicate.") + } + + for state := startState; state != nil; { + state = state(l) + } + + l.skipWS(true) + if string(l.peek()) != "]" { + return fmt.Errorf("Missing ] at end of predicate.") + } + l.skip(1) + l.emit(XItemEndPredicate) + l.skipWS(true) + + return nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go new file mode 100644 index 00000000..4e6bc8f6 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/lexer/xmlchars.go @@ -0,0 +1,316 @@ +package lexer + +import "unicode" + +//first and second was copied from src/encoding/xml/xml.go +var first = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x003A, 0x003A, 1}, + {0x0041, 0x005A, 1}, + {0x005F, 0x005F, 1}, + {0x0061, 0x007A, 1}, + {0x00C0, 0x00D6, 1}, + {0x00D8, 0x00F6, 1}, + {0x00F8, 0x00FF, 1}, + {0x0100, 0x0131, 1}, + {0x0134, 0x013E, 1}, + {0x0141, 0x0148, 1}, + {0x014A, 0x017E, 1}, + {0x0180, 0x01C3, 1}, + {0x01CD, 0x01F0, 1}, + {0x01F4, 0x01F5, 1}, + {0x01FA, 0x0217, 1}, + {0x0250, 0x02A8, 1}, + {0x02BB, 0x02C1, 1}, + {0x0386, 0x0386, 1}, + {0x0388, 0x038A, 1}, + {0x038C, 0x038C, 1}, + {0x038E, 0x03A1, 1}, + {0x03A3, 0x03CE, 1}, + {0x03D0, 0x03D6, 1}, + {0x03DA, 0x03E0, 2}, + {0x03E2, 0x03F3, 1}, + {0x0401, 0x040C, 1}, + {0x040E, 0x044F, 1}, + {0x0451, 0x045C, 1}, + {0x045E, 0x0481, 1}, + {0x0490, 0x04C4, 1}, + {0x04C7, 0x04C8, 1}, + {0x04CB, 0x04CC, 1}, + {0x04D0, 0x04EB, 1}, + {0x04EE, 0x04F5, 1}, + {0x04F8, 0x04F9, 1}, + {0x0531, 0x0556, 1}, + {0x0559, 0x0559, 1}, + {0x0561, 0x0586, 1}, + {0x05D0, 0x05EA, 1}, + {0x05F0, 0x05F2, 1}, + {0x0621, 0x063A, 1}, + {0x0641, 0x064A, 1}, + {0x0671, 0x06B7, 1}, + {0x06BA, 0x06BE, 1}, + {0x06C0, 0x06CE, 1}, + {0x06D0, 0x06D3, 1}, + {0x06D5, 0x06D5, 1}, + {0x06E5, 0x06E6, 1}, + {0x0905, 0x0939, 1}, + {0x093D, 0x093D, 1}, + {0x0958, 0x0961, 1}, + {0x0985, 0x098C, 1}, + {0x098F, 0x0990, 1}, + {0x0993, 0x09A8, 1}, + {0x09AA, 0x09B0, 1}, + {0x09B2, 0x09B2, 1}, + {0x09B6, 0x09B9, 1}, + {0x09DC, 0x09DD, 1}, + {0x09DF, 0x09E1, 1}, + {0x09F0, 0x09F1, 1}, + {0x0A05, 0x0A0A, 1}, + {0x0A0F, 0x0A10, 1}, + {0x0A13, 0x0A28, 1}, + {0x0A2A, 0x0A30, 1}, + {0x0A32, 0x0A33, 1}, + {0x0A35, 0x0A36, 1}, + {0x0A38, 0x0A39, 1}, + {0x0A59, 0x0A5C, 1}, + {0x0A5E, 0x0A5E, 1}, + {0x0A72, 0x0A74, 1}, + {0x0A85, 0x0A8B, 1}, + {0x0A8D, 0x0A8D, 1}, + {0x0A8F, 0x0A91, 1}, + {0x0A93, 0x0AA8, 1}, + {0x0AAA, 0x0AB0, 1}, + {0x0AB2, 0x0AB3, 1}, + {0x0AB5, 0x0AB9, 1}, + {0x0ABD, 0x0AE0, 0x23}, + {0x0B05, 0x0B0C, 1}, + {0x0B0F, 0x0B10, 1}, + {0x0B13, 0x0B28, 1}, + {0x0B2A, 0x0B30, 1}, + {0x0B32, 0x0B33, 1}, + {0x0B36, 0x0B39, 1}, + {0x0B3D, 0x0B3D, 1}, + {0x0B5C, 0x0B5D, 1}, + {0x0B5F, 0x0B61, 1}, + {0x0B85, 0x0B8A, 1}, + {0x0B8E, 0x0B90, 1}, + {0x0B92, 0x0B95, 1}, + {0x0B99, 0x0B9A, 1}, + {0x0B9C, 0x0B9C, 1}, + {0x0B9E, 0x0B9F, 1}, + {0x0BA3, 0x0BA4, 1}, + {0x0BA8, 0x0BAA, 1}, + {0x0BAE, 0x0BB5, 1}, + {0x0BB7, 0x0BB9, 1}, + {0x0C05, 0x0C0C, 1}, + {0x0C0E, 0x0C10, 1}, + {0x0C12, 0x0C28, 1}, + {0x0C2A, 0x0C33, 1}, + {0x0C35, 0x0C39, 1}, + {0x0C60, 0x0C61, 1}, + {0x0C85, 0x0C8C, 1}, + {0x0C8E, 0x0C90, 1}, + {0x0C92, 0x0CA8, 1}, + {0x0CAA, 0x0CB3, 1}, + {0x0CB5, 0x0CB9, 1}, + {0x0CDE, 0x0CDE, 1}, + {0x0CE0, 0x0CE1, 1}, + {0x0D05, 0x0D0C, 1}, + {0x0D0E, 0x0D10, 1}, + {0x0D12, 0x0D28, 1}, + {0x0D2A, 0x0D39, 1}, + {0x0D60, 0x0D61, 1}, + {0x0E01, 0x0E2E, 1}, + {0x0E30, 0x0E30, 1}, + {0x0E32, 0x0E33, 1}, + {0x0E40, 0x0E45, 1}, + {0x0E81, 0x0E82, 1}, + {0x0E84, 0x0E84, 1}, + {0x0E87, 0x0E88, 1}, + {0x0E8A, 0x0E8D, 3}, + {0x0E94, 0x0E97, 1}, + {0x0E99, 0x0E9F, 1}, + {0x0EA1, 0x0EA3, 1}, + {0x0EA5, 0x0EA7, 2}, + {0x0EAA, 0x0EAB, 1}, + {0x0EAD, 0x0EAE, 1}, + {0x0EB0, 0x0EB0, 1}, + {0x0EB2, 0x0EB3, 1}, + {0x0EBD, 0x0EBD, 1}, + {0x0EC0, 0x0EC4, 1}, + {0x0F40, 0x0F47, 1}, + {0x0F49, 0x0F69, 1}, + {0x10A0, 0x10C5, 1}, + {0x10D0, 0x10F6, 1}, + {0x1100, 0x1100, 1}, + {0x1102, 0x1103, 1}, + {0x1105, 0x1107, 1}, + {0x1109, 0x1109, 1}, + {0x110B, 0x110C, 1}, + {0x110E, 0x1112, 1}, + {0x113C, 0x1140, 2}, + {0x114C, 0x1150, 2}, + {0x1154, 0x1155, 1}, + {0x1159, 0x1159, 1}, + {0x115F, 0x1161, 1}, + {0x1163, 0x1169, 2}, + {0x116D, 0x116E, 1}, + {0x1172, 0x1173, 1}, + {0x1175, 0x119E, 0x119E - 0x1175}, + {0x11A8, 0x11AB, 0x11AB - 0x11A8}, + {0x11AE, 0x11AF, 1}, + {0x11B7, 0x11B8, 1}, + {0x11BA, 0x11BA, 1}, + {0x11BC, 0x11C2, 1}, + {0x11EB, 0x11F0, 0x11F0 - 0x11EB}, + {0x11F9, 0x11F9, 1}, + {0x1E00, 0x1E9B, 1}, + {0x1EA0, 0x1EF9, 1}, + {0x1F00, 0x1F15, 1}, + {0x1F18, 0x1F1D, 1}, + {0x1F20, 0x1F45, 1}, + {0x1F48, 0x1F4D, 1}, + {0x1F50, 0x1F57, 1}, + {0x1F59, 0x1F5B, 0x1F5B - 0x1F59}, + {0x1F5D, 0x1F5D, 1}, + {0x1F5F, 0x1F7D, 1}, + {0x1F80, 0x1FB4, 1}, + {0x1FB6, 0x1FBC, 1}, + {0x1FBE, 0x1FBE, 1}, + {0x1FC2, 0x1FC4, 1}, + {0x1FC6, 0x1FCC, 1}, + {0x1FD0, 0x1FD3, 1}, + {0x1FD6, 0x1FDB, 1}, + {0x1FE0, 0x1FEC, 1}, + {0x1FF2, 0x1FF4, 1}, + {0x1FF6, 0x1FFC, 1}, + {0x2126, 0x2126, 1}, + {0x212A, 0x212B, 1}, + {0x212E, 0x212E, 1}, + {0x2180, 0x2182, 1}, + {0x3007, 0x3007, 1}, + {0x3021, 0x3029, 1}, + {0x3041, 0x3094, 1}, + {0x30A1, 0x30FA, 1}, + {0x3105, 0x312C, 1}, + {0x4E00, 0x9FA5, 1}, + {0xAC00, 0xD7A3, 1}, + }, +} + +var second = &unicode.RangeTable{ + R16: []unicode.Range16{ + {0x002D, 0x002E, 1}, + {0x0030, 0x0039, 1}, + {0x00B7, 0x00B7, 1}, + {0x02D0, 0x02D1, 1}, + {0x0300, 0x0345, 1}, + {0x0360, 0x0361, 1}, + {0x0387, 0x0387, 1}, + {0x0483, 0x0486, 1}, + {0x0591, 0x05A1, 1}, + {0x05A3, 0x05B9, 1}, + {0x05BB, 0x05BD, 1}, + {0x05BF, 0x05BF, 1}, + {0x05C1, 0x05C2, 1}, + {0x05C4, 0x0640, 0x0640 - 0x05C4}, + {0x064B, 0x0652, 1}, + {0x0660, 0x0669, 1}, + {0x0670, 0x0670, 1}, + {0x06D6, 0x06DC, 1}, + {0x06DD, 0x06DF, 1}, + {0x06E0, 0x06E4, 1}, + {0x06E7, 0x06E8, 1}, + {0x06EA, 0x06ED, 1}, + {0x06F0, 0x06F9, 1}, + {0x0901, 0x0903, 1}, + {0x093C, 0x093C, 1}, + {0x093E, 0x094C, 1}, + {0x094D, 0x094D, 1}, + {0x0951, 0x0954, 1}, + {0x0962, 0x0963, 1}, + {0x0966, 0x096F, 1}, + {0x0981, 0x0983, 1}, + {0x09BC, 0x09BC, 1}, + {0x09BE, 0x09BF, 1}, + {0x09C0, 0x09C4, 1}, + {0x09C7, 0x09C8, 1}, + {0x09CB, 0x09CD, 1}, + {0x09D7, 0x09D7, 1}, + {0x09E2, 0x09E3, 1}, + {0x09E6, 0x09EF, 1}, + {0x0A02, 0x0A3C, 0x3A}, + {0x0A3E, 0x0A3F, 1}, + {0x0A40, 0x0A42, 1}, + {0x0A47, 0x0A48, 1}, + {0x0A4B, 0x0A4D, 1}, + {0x0A66, 0x0A6F, 1}, + {0x0A70, 0x0A71, 1}, + {0x0A81, 0x0A83, 1}, + {0x0ABC, 0x0ABC, 1}, + {0x0ABE, 0x0AC5, 1}, + {0x0AC7, 0x0AC9, 1}, + {0x0ACB, 0x0ACD, 1}, + {0x0AE6, 0x0AEF, 1}, + {0x0B01, 0x0B03, 1}, + {0x0B3C, 0x0B3C, 1}, + {0x0B3E, 0x0B43, 1}, + {0x0B47, 0x0B48, 1}, + {0x0B4B, 0x0B4D, 1}, + {0x0B56, 0x0B57, 1}, + {0x0B66, 0x0B6F, 1}, + {0x0B82, 0x0B83, 1}, + {0x0BBE, 0x0BC2, 1}, + {0x0BC6, 0x0BC8, 1}, + {0x0BCA, 0x0BCD, 1}, + {0x0BD7, 0x0BD7, 1}, + {0x0BE7, 0x0BEF, 1}, + {0x0C01, 0x0C03, 1}, + {0x0C3E, 0x0C44, 1}, + {0x0C46, 0x0C48, 1}, + {0x0C4A, 0x0C4D, 1}, + {0x0C55, 0x0C56, 1}, + {0x0C66, 0x0C6F, 1}, + {0x0C82, 0x0C83, 1}, + {0x0CBE, 0x0CC4, 1}, + {0x0CC6, 0x0CC8, 1}, + {0x0CCA, 0x0CCD, 1}, + {0x0CD5, 0x0CD6, 1}, + {0x0CE6, 0x0CEF, 1}, + {0x0D02, 0x0D03, 1}, + {0x0D3E, 0x0D43, 1}, + {0x0D46, 0x0D48, 1}, + {0x0D4A, 0x0D4D, 1}, + {0x0D57, 0x0D57, 1}, + {0x0D66, 0x0D6F, 1}, + {0x0E31, 0x0E31, 1}, + {0x0E34, 0x0E3A, 1}, + {0x0E46, 0x0E46, 1}, + {0x0E47, 0x0E4E, 1}, + {0x0E50, 0x0E59, 1}, + {0x0EB1, 0x0EB1, 1}, + {0x0EB4, 0x0EB9, 1}, + {0x0EBB, 0x0EBC, 1}, + {0x0EC6, 0x0EC6, 1}, + {0x0EC8, 0x0ECD, 1}, + {0x0ED0, 0x0ED9, 1}, + {0x0F18, 0x0F19, 1}, + {0x0F20, 0x0F29, 1}, + {0x0F35, 0x0F39, 2}, + {0x0F3E, 0x0F3F, 1}, + {0x0F71, 0x0F84, 1}, + {0x0F86, 0x0F8B, 1}, + {0x0F90, 0x0F95, 1}, + {0x0F97, 0x0F97, 1}, + {0x0F99, 0x0FAD, 1}, + {0x0FB1, 0x0FB7, 1}, + {0x0FB9, 0x0FB9, 1}, + {0x20D0, 0x20DC, 1}, + {0x20E1, 0x3005, 0x3005 - 0x20E1}, + {0x302A, 0x302F, 1}, + {0x3031, 0x3035, 1}, + {0x3099, 0x309A, 1}, + {0x309D, 0x309E, 1}, + {0x30FC, 0x30FE, 1}, + }, +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/marshal.go b/vendor/github.com/ChrisTrenkamp/goxpath/marshal.go new file mode 100644 index 00000000..e1045f20 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/marshal.go @@ -0,0 +1,105 @@ +package goxpath + +import ( + "bytes" + "encoding/xml" + "io" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//Marshal prints the result tree, r, in XML form to w. +func Marshal(n tree.Node, w io.Writer) error { + return marshal(n, w) +} + +//MarshalStr is like Marhal, but returns a string. +func MarshalStr(n tree.Node) (string, error) { + ret := bytes.NewBufferString("") + err := marshal(n, ret) + + return ret.String(), err +} + +func marshal(n tree.Node, w io.Writer) error { + e := xml.NewEncoder(w) + err := encTok(n, e) + if err != nil { + return err + } + + return e.Flush() +} + +func encTok(n tree.Node, e *xml.Encoder) error { + switch n.GetNodeType() { + case tree.NtAttr: + return encAttr(n.GetToken().(xml.Attr), e) + case tree.NtElem: + return encEle(n.(tree.Elem), e) + case tree.NtNs: + return encNS(n.GetToken().(xml.Attr), e) + case tree.NtRoot: + for _, i := range n.(tree.Elem).GetChildren() { + err := encTok(i, e) + if err != nil { + return err + } + } + return nil + } + + //case tree.NtChd, tree.NtComm, tree.NtPi: + return e.EncodeToken(n.GetToken()) +} + +func encAttr(a xml.Attr, e *xml.Encoder) error { + str := a.Name.Local + `="` + a.Value + `"` + + if a.Name.Space != "" { + str += ` xmlns="` + a.Name.Space + `"` + } + + pi := xml.ProcInst{ + Target: "attribute", + Inst: ([]byte)(str), + } + + return e.EncodeToken(pi) +} + +func encNS(ns xml.Attr, e *xml.Encoder) error { + pi := xml.ProcInst{ + Target: "namespace", + Inst: ([]byte)(ns.Value), + } + return e.EncodeToken(pi) +} + +func encEle(n tree.Elem, e *xml.Encoder) error { + ele := xml.StartElement{ + Name: n.GetToken().(xml.StartElement).Name, + } + + attrs := n.GetAttrs() + ele.Attr = make([]xml.Attr, len(attrs)) + for i := range attrs { + ele.Attr[i] = attrs[i].GetToken().(xml.Attr) + } + + err := e.EncodeToken(ele) + if err != nil { + return err + } + + if x, ok := n.(tree.Elem); ok { + for _, i := range x.GetChildren() { + err := encTok(i, e) + if err != nil { + return err + } + } + } + + return e.EncodeToken(ele.End()) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go new file mode 100644 index 00000000..08196c62 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/parser/ast.go @@ -0,0 +1,113 @@ +package parser + +import "github.com/ChrisTrenkamp/goxpath/lexer" + +//NodeType enumerations +const ( + Empty lexer.XItemType = "" +) + +//Node builds an AST tree for operating on XPath expressions +type Node struct { + Val lexer.XItem + Left *Node + Right *Node + Parent *Node + next *Node +} + +var beginPathType = map[lexer.XItemType]bool{ + lexer.XItemAbsLocPath: true, + lexer.XItemAbbrAbsLocPath: true, + lexer.XItemAbbrRelLocPath: true, + lexer.XItemRelLocPath: true, + lexer.XItemFunction: true, +} + +func (n *Node) add(i lexer.XItem) { + if n.Val.Typ == Empty { + n.Val = i + } else if n.Left == nil && n.Right == nil { + n.Left = &Node{Val: n.Val, Parent: n} + n.Val = i + } else if beginPathType[n.Val.Typ] { + next := &Node{Val: n.Val, Left: n.Left, Right: n.Right, Parent: n} + n.Left, n.Right = next, nil + n.Val = i + } else if n.Right == nil { + n.Right = &Node{Val: i, Parent: n} + } else { + next := &Node{Val: n.Val, Left: n.Left, Right: n.Right, Parent: n} + n.Left, n.Right = next, nil + n.Val = i + } + n.next = n +} + +func (n *Node) push(i lexer.XItem) { + if n.Left == nil { + n.Left = &Node{Val: i, Parent: n} + n.next = n.Left + } else if n.Right == nil { + n.Right = &Node{Val: i, Parent: n} + n.next = n.Right + } else { + next := &Node{Val: i, Left: n.Right, Parent: n} + n.Right = next + n.next = n.Right + } +} + +func (n *Node) pushNotEmpty(i lexer.XItem) { + if n.Val.Typ == Empty { + n.add(i) + } else { + n.push(i) + } +} + +/* +func (n *Node) prettyPrint(depth, width int) { + nodes := []*Node{} + n.getLine(depth, &nodes) + fmt.Printf("%*s", (width-depth)*2, "") + toggle := true + if len(nodes) > 1 { + for _, i := range nodes { + if i != nil { + if toggle { + fmt.Print("/ ") + } else { + fmt.Print("\\ ") + } + } + toggle = !toggle + } + fmt.Println() + fmt.Printf("%*s", (width-depth)*2, "") + } + for _, i := range nodes { + if i != nil { + fmt.Print(i.Val.Val, " ") + } + } + fmt.Println() +} + +func (n *Node) getLine(depth int, ret *[]*Node) { + if depth <= 0 && n != nil { + *ret = append(*ret, n) + return + } + if n.Left != nil { + n.Left.getLine(depth-1, ret) + } else if depth-1 <= 0 { + *ret = append(*ret, nil) + } + if n.Right != nil { + n.Right.getLine(depth-1, ret) + } else if depth-1 <= 0 { + *ret = append(*ret, nil) + } +} +*/ diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go new file mode 100644 index 00000000..e5ce8222 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/parser/parser.go @@ -0,0 +1,200 @@ +package parser + +import ( + "fmt" + + "github.com/ChrisTrenkamp/goxpath/lexer" +) + +type stateType int + +const ( + defState stateType = iota + xpathState + funcState + paramState + predState + parenState +) + +type parseStack struct { + stack []*Node + stateTypes []stateType + cur *Node +} + +func (p *parseStack) push(t stateType) { + p.stack = append(p.stack, p.cur) + p.stateTypes = append(p.stateTypes, t) +} + +func (p *parseStack) pop() { + stackPos := len(p.stack) - 1 + + p.cur = p.stack[stackPos] + p.stack = p.stack[:stackPos] + p.stateTypes = p.stateTypes[:stackPos] +} + +func (p *parseStack) curState() stateType { + if len(p.stateTypes) == 0 { + return defState + } + return p.stateTypes[len(p.stateTypes)-1] +} + +type lexFn func(*parseStack, lexer.XItem) + +var parseMap = map[lexer.XItemType]lexFn{ + lexer.XItemAbsLocPath: xiXPath, + lexer.XItemAbbrAbsLocPath: xiXPath, + lexer.XItemAbbrRelLocPath: xiXPath, + lexer.XItemRelLocPath: xiXPath, + lexer.XItemEndPath: xiEndPath, + lexer.XItemAxis: xiXPath, + lexer.XItemAbbrAxis: xiXPath, + lexer.XItemNCName: xiXPath, + lexer.XItemQName: xiXPath, + lexer.XItemNodeType: xiXPath, + lexer.XItemProcLit: xiXPath, + lexer.XItemFunction: xiFunc, + lexer.XItemArgument: xiFuncArg, + lexer.XItemEndFunction: xiEndFunc, + lexer.XItemPredicate: xiPred, + lexer.XItemEndPredicate: xiEndPred, + lexer.XItemStrLit: xiValue, + lexer.XItemNumLit: xiValue, + lexer.XItemOperator: xiOp, + lexer.XItemVariable: xiValue, +} + +var opPrecedence = map[string]int{ + "|": 1, + "*": 2, + "div": 2, + "mod": 2, + "+": 3, + "-": 3, + "=": 4, + "!=": 4, + "<": 4, + "<=": 4, + ">": 4, + ">=": 4, + "and": 5, + "or": 6, +} + +//Parse creates an AST tree for XPath expressions. +func Parse(xp string) (*Node, error) { + var err error + c := lexer.Lex(xp) + n := &Node{} + p := &parseStack{cur: n} + + for next := range c { + if next.Typ != lexer.XItemError { + parseMap[next.Typ](p, next) + } else if err == nil { + err = fmt.Errorf(next.Val) + } + } + + return n, err +} + +func xiXPath(p *parseStack, i lexer.XItem) { + if p.curState() == xpathState { + p.cur.push(i) + p.cur = p.cur.next + return + } + + if p.cur.Val.Typ == lexer.XItemFunction { + p.cur.Right = &Node{Val: i, Parent: p.cur} + p.cur.next = p.cur.Right + p.push(xpathState) + p.cur = p.cur.next + return + } + + p.cur.pushNotEmpty(i) + p.push(xpathState) + p.cur = p.cur.next +} + +func xiEndPath(p *parseStack, i lexer.XItem) { + p.pop() +} + +func xiFunc(p *parseStack, i lexer.XItem) { + if p.cur.Val.Typ == Empty { + p.cur.pushNotEmpty(i) + p.push(funcState) + p.cur = p.cur.next + return + } + p.cur.push(i) + p.cur = p.cur.next + p.push(funcState) +} + +func xiFuncArg(p *parseStack, i lexer.XItem) { + if p.curState() != funcState { + p.pop() + } + + p.cur.push(i) + p.cur = p.cur.next + p.push(paramState) + p.cur.push(lexer.XItem{Typ: Empty, Val: ""}) + p.cur = p.cur.next +} + +func xiEndFunc(p *parseStack, i lexer.XItem) { + if p.curState() == paramState { + p.pop() + } + p.pop() +} + +func xiPred(p *parseStack, i lexer.XItem) { + p.cur.push(i) + p.cur = p.cur.next + p.push(predState) + p.cur.push(lexer.XItem{Typ: Empty, Val: ""}) + p.cur = p.cur.next +} + +func xiEndPred(p *parseStack, i lexer.XItem) { + p.pop() +} + +func xiValue(p *parseStack, i lexer.XItem) { + p.cur.add(i) +} + +func xiOp(p *parseStack, i lexer.XItem) { + if i.Val == "(" { + p.cur.push(lexer.XItem{Typ: Empty, Val: ""}) + p.push(parenState) + p.cur = p.cur.next + return + } + + if i.Val == ")" { + p.pop() + return + } + + if p.cur.Val.Typ == lexer.XItemOperator { + if opPrecedence[p.cur.Val.Val] <= opPrecedence[i.Val] { + p.cur.add(i) + } else { + p.cur.push(i) + } + } else { + p.cur.add(i) + } + p.cur = p.cur.next +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go b/vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go new file mode 100644 index 00000000..7fe69eb4 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/parser/pathexpr/pathexpr.go @@ -0,0 +1,11 @@ +package pathexpr + +import "encoding/xml" + +//PathExpr represents XPath step's. xmltree.XMLTree uses it to find nodes. +type PathExpr struct { + Name xml.Name + Axis string + NodeType string + NS map[string]string +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/interfaces.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/interfaces.go new file mode 100644 index 00000000..a6e64257 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/interfaces.go @@ -0,0 +1,18 @@ +package tree + +import "fmt" + +//Result is used for all data types. +type Result interface { + fmt.Stringer +} + +//IsBool is used for the XPath boolean function. It turns the data type to a bool. +type IsBool interface { + Bool() Bool +} + +//IsNum is used for the XPath number function. It turns the data type to a number. +type IsNum interface { + Num() Num +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/tree.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/tree.go new file mode 100644 index 00000000..c5d6333d --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/tree.go @@ -0,0 +1,221 @@ +package tree + +import ( + "encoding/xml" + "sort" +) + +//XMLSpace is the W3C XML namespace +const XMLSpace = "http://www.w3.org/XML/1998/namespace" + +//NodePos is a helper for representing the node's document order +type NodePos int + +//Pos returns the node's document order position +func (n NodePos) Pos() int { + return int(n) +} + +//NodeType is a safer way to determine a node's type than type assertions. +type NodeType int + +//GetNodeType returns the node's type. +func (t NodeType) GetNodeType() NodeType { + return t +} + +//These are all the possible node types +const ( + NtAttr NodeType = iota + NtChd + NtComm + NtElem + NtNs + NtRoot + NtPi +) + +//Node is a XPath result that is a node except elements +type Node interface { + //ResValue prints the node's string value + ResValue() string + //Pos returns the node's position in the document order + Pos() int + //GetToken returns the xml.Token representation of the node + GetToken() xml.Token + //GetParent returns the parent node, which will always be an XML element + GetParent() Elem + //GetNodeType returns the node's type + GetNodeType() NodeType +} + +//Elem is a XPath result that is an element node +type Elem interface { + Node + //GetChildren returns the elements children. + GetChildren() []Node + //GetAttrs returns the attributes of the element + GetAttrs() []Node +} + +//NSElem is a node that keeps track of namespaces. +type NSElem interface { + Elem + GetNS() map[xml.Name]string +} + +//NSBuilder is a helper-struct for satisfying the NSElem interface +type NSBuilder struct { + NS map[xml.Name]string +} + +//GetNS returns the namespaces found on the current element. It should not be +//confused with BuildNS, which actually resolves the namespace nodes. +func (ns NSBuilder) GetNS() map[xml.Name]string { + return ns.NS +} + +type nsValueSort []NS + +func (ns nsValueSort) Len() int { return len(ns) } +func (ns nsValueSort) Swap(i, j int) { + ns[i], ns[j] = ns[j], ns[i] +} +func (ns nsValueSort) Less(i, j int) bool { + return ns[i].Value < ns[j].Value +} + +//BuildNS resolves all the namespace nodes of the element and returns them +func BuildNS(t Elem) (ret []NS) { + vals := make(map[xml.Name]string) + + if nselem, ok := t.(NSElem); ok { + buildNS(nselem, vals) + + ret = make([]NS, 0, len(vals)) + i := 1 + + for k, v := range vals { + if !(k.Local == "xmlns" && k.Space == "" && v == "") { + ret = append(ret, NS{ + Attr: xml.Attr{Name: k, Value: v}, + Parent: t, + NodeType: NtNs, + }) + i++ + } + } + + sort.Sort(nsValueSort(ret)) + for i := range ret { + ret[i].NodePos = NodePos(t.Pos() + i + 1) + } + } + + return ret +} + +func buildNS(x NSElem, ret map[xml.Name]string) { + if x.GetNodeType() == NtRoot { + return + } + + if nselem, ok := x.GetParent().(NSElem); ok { + buildNS(nselem, ret) + } + + for k, v := range x.GetNS() { + ret[k] = v + } +} + +//NS is a namespace node. +type NS struct { + xml.Attr + Parent Elem + NodePos + NodeType +} + +//GetToken returns the xml.Token representation of the namespace. +func (ns NS) GetToken() xml.Token { + return ns.Attr +} + +//GetParent returns the parent node of the namespace. +func (ns NS) GetParent() Elem { + return ns.Parent +} + +//ResValue returns the string value of the namespace +func (ns NS) ResValue() string { + return ns.Attr.Value +} + +//GetAttribute is a convenience function for getting the specified attribute from an element. +//false is returned if the attribute is not found. +func GetAttribute(n Elem, local, space string) (xml.Attr, bool) { + attrs := n.GetAttrs() + for _, i := range attrs { + attr := i.GetToken().(xml.Attr) + if local == attr.Name.Local && space == attr.Name.Space { + return attr, true + } + } + return xml.Attr{}, false +} + +//GetAttributeVal is like GetAttribute, except it returns the attribute's value. +func GetAttributeVal(n Elem, local, space string) (string, bool) { + attr, ok := GetAttribute(n, local, space) + return attr.Value, ok +} + +//GetAttrValOrEmpty is like GetAttributeVal, except it returns an empty string if +//the attribute is not found instead of false. +func GetAttrValOrEmpty(n Elem, local, space string) string { + val, ok := GetAttributeVal(n, local, space) + if !ok { + return "" + } + return val +} + +//FindNodeByPos finds a node from the given position. Returns nil if the node +//is not found. +func FindNodeByPos(n Node, pos int) Node { + if n.Pos() == pos { + return n + } + + if elem, ok := n.(Elem); ok { + chldrn := elem.GetChildren() + for i := 1; i < len(chldrn); i++ { + if chldrn[i-1].Pos() <= pos && chldrn[i].Pos() > pos { + return FindNodeByPos(chldrn[i-1], pos) + } + } + + if len(chldrn) > 0 { + if chldrn[len(chldrn)-1].Pos() <= pos { + return FindNodeByPos(chldrn[len(chldrn)-1], pos) + } + } + + attrs := elem.GetAttrs() + for _, i := range attrs { + if i.Pos() == pos { + return i + } + } + + ns := BuildNS(elem) + for _, i := range ns { + if i.Pos() == pos { + return i + } + } + } + + return nil +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xfn.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xfn.go new file mode 100644 index 00000000..7a940bb3 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xfn.go @@ -0,0 +1,52 @@ +package tree + +import ( + "fmt" +) + +//Ctx represents the current context position, size, node, and the current filtered result +type Ctx struct { + NodeSet + Pos int + Size int +} + +//Fn is a XPath function, written in Go +type Fn func(c Ctx, args ...Result) (Result, error) + +//LastArgOpt sets whether the last argument in a function is optional, variadic, or neither +type LastArgOpt int + +//LastArgOpt options +const ( + None LastArgOpt = iota + Optional + Variadic +) + +//Wrap interfaces XPath function calls with Go +type Wrap struct { + Fn Fn + //NArgs represents the number of arguments to the XPath function. -1 represents a single optional argument + NArgs int + LastArgOpt LastArgOpt +} + +//Call checks the arguments and calls Fn if they are valid +func (w Wrap) Call(c Ctx, args ...Result) (Result, error) { + switch w.LastArgOpt { + case Optional: + if len(args) == w.NArgs || len(args) == w.NArgs-1 { + return w.Fn(c, args...) + } + case Variadic: + if len(args) >= w.NArgs-1 { + return w.Fn(c, args...) + } + default: + if len(args) == w.NArgs { + return w.Fn(c, args...) + } + } + return nil, fmt.Errorf("Invalid number of arguments") +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder/xmlbuilder.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder/xmlbuilder.go new file mode 100644 index 00000000..625713c9 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder/xmlbuilder.go @@ -0,0 +1,25 @@ +package xmlbuilder + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//BuilderOpts supplies all the information needed to create an XML node. +type BuilderOpts struct { + Dec *xml.Decoder + Tok xml.Token + NodeType tree.NodeType + NS map[xml.Name]string + Attrs []*xml.Attr + NodePos int + AttrStartPos int +} + +//XMLBuilder is an interface for creating XML structures. +type XMLBuilder interface { + tree.Node + CreateNode(*BuilderOpts) XMLBuilder + EndElem() XMLBuilder +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele/xmlele.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele/xmlele.go new file mode 100644 index 00000000..85e3445d --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele/xmlele.go @@ -0,0 +1,106 @@ +package xmlele + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode" +) + +//XMLEle is an implementation of XPRes for XML elements +type XMLEle struct { + xml.StartElement + tree.NSBuilder + Attrs []tree.Node + Children []tree.Node + Parent tree.Elem + tree.NodePos + tree.NodeType +} + +//Root is the default root node builder for xmltree.ParseXML +func Root() xmlbuilder.XMLBuilder { + return &XMLEle{NodeType: tree.NtRoot} +} + +//CreateNode is an implementation of xmlbuilder.XMLBuilder. It appends the node +//specified in opts and returns the child if it is an element. Otherwise, it returns x. +func (x *XMLEle) CreateNode(opts *xmlbuilder.BuilderOpts) xmlbuilder.XMLBuilder { + if opts.NodeType == tree.NtElem { + ele := &XMLEle{ + StartElement: opts.Tok.(xml.StartElement), + NSBuilder: tree.NSBuilder{NS: opts.NS}, + Attrs: make([]tree.Node, len(opts.Attrs)), + Parent: x, + NodePos: tree.NodePos(opts.NodePos), + NodeType: opts.NodeType, + } + for i := range opts.Attrs { + ele.Attrs[i] = xmlnode.XMLNode{ + Token: opts.Attrs[i], + NodePos: tree.NodePos(opts.AttrStartPos + i), + NodeType: tree.NtAttr, + Parent: ele, + } + } + x.Children = append(x.Children, ele) + return ele + } + + node := xmlnode.XMLNode{ + Token: opts.Tok, + NodePos: tree.NodePos(opts.NodePos), + NodeType: opts.NodeType, + Parent: x, + } + x.Children = append(x.Children, node) + return x +} + +//EndElem is an implementation of xmlbuilder.XMLBuilder. It returns x's parent. +func (x *XMLEle) EndElem() xmlbuilder.XMLBuilder { + return x.Parent.(*XMLEle) +} + +//GetToken returns the xml.Token representation of the node +func (x *XMLEle) GetToken() xml.Token { + return x.StartElement +} + +//GetParent returns the parent node, or itself if it's the root +func (x *XMLEle) GetParent() tree.Elem { + return x.Parent +} + +//GetChildren returns all child nodes of the element +func (x *XMLEle) GetChildren() []tree.Node { + ret := make([]tree.Node, len(x.Children)) + + for i := range x.Children { + ret[i] = x.Children[i] + } + + return ret +} + +//GetAttrs returns all attributes of the element +func (x *XMLEle) GetAttrs() []tree.Node { + ret := make([]tree.Node, len(x.Attrs)) + for i := range x.Attrs { + ret[i] = x.Attrs[i] + } + return ret +} + +//ResValue returns the string value of the element and children +func (x *XMLEle) ResValue() string { + ret := "" + for i := range x.Children { + switch x.Children[i].GetNodeType() { + case tree.NtChd, tree.NtElem, tree.NtRoot: + ret += x.Children[i].ResValue() + } + } + return ret +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode/xmlnode.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode/xmlnode.go new file mode 100644 index 00000000..9be76985 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlnode/xmlnode.go @@ -0,0 +1,43 @@ +package xmlnode + +import ( + "encoding/xml" + + "github.com/ChrisTrenkamp/goxpath/tree" +) + +//XMLNode will represent an attribute, character data, comment, or processing instruction node +type XMLNode struct { + xml.Token + tree.NodePos + tree.NodeType + Parent tree.Elem +} + +//GetToken returns the xml.Token representation of the node +func (a XMLNode) GetToken() xml.Token { + if a.NodeType == tree.NtAttr { + ret := a.Token.(*xml.Attr) + return *ret + } + return a.Token +} + +//GetParent returns the parent node +func (a XMLNode) GetParent() tree.Elem { + return a.Parent +} + +//ResValue returns the string value of the attribute +func (a XMLNode) ResValue() string { + switch a.NodeType { + case tree.NtAttr: + return a.Token.(*xml.Attr).Value + case tree.NtChd: + return string(a.Token.(xml.CharData)) + case tree.NtComm: + return string(a.Token.(xml.Comment)) + } + //case tree.NtPi: + return string(a.Token.(xml.ProcInst).Inst) +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmltree.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmltree.go new file mode 100644 index 00000000..2ec066c5 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmltree.go @@ -0,0 +1,158 @@ +package xmltree + +import ( + "encoding/xml" + "io" + + "golang.org/x/net/html/charset" + + "github.com/ChrisTrenkamp/goxpath/tree" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlbuilder" + "github.com/ChrisTrenkamp/goxpath/tree/xmltree/xmlele" +) + +//ParseOptions is a set of methods and function pointers that alter +//the way the XML decoder works and the Node types that are created. +//Options that are not set will default to what is set in internal/defoverride.go +type ParseOptions struct { + Strict bool + XMLRoot func() xmlbuilder.XMLBuilder +} + +//DirectiveParser is an optional interface extended from XMLBuilder that handles +//XML directives. +type DirectiveParser interface { + xmlbuilder.XMLBuilder + Directive(xml.Directive, *xml.Decoder) +} + +//ParseSettings is a function for setting the ParseOptions you want when +//parsing an XML tree. +type ParseSettings func(s *ParseOptions) + +//MustParseXML is like ParseXML, but panics instead of returning an error. +func MustParseXML(r io.Reader, op ...ParseSettings) tree.Node { + ret, err := ParseXML(r, op...) + + if err != nil { + panic(err) + } + + return ret +} + +//ParseXML creates an XMLTree structure from an io.Reader. +func ParseXML(r io.Reader, op ...ParseSettings) (tree.Node, error) { + ov := ParseOptions{ + Strict: true, + XMLRoot: xmlele.Root, + } + for _, i := range op { + i(&ov) + } + + dec := xml.NewDecoder(r) + dec.CharsetReader = charset.NewReaderLabel + dec.Strict = ov.Strict + + ordrPos := 1 + xmlTree := ov.XMLRoot() + + t, err := dec.Token() + + if err != nil { + return nil, err + } + + if head, ok := t.(xml.ProcInst); ok && head.Target == "xml" { + t, err = dec.Token() + } + + opts := xmlbuilder.BuilderOpts{ + Dec: dec, + } + + for err == nil { + switch xt := t.(type) { + case xml.StartElement: + setEle(&opts, xmlTree, xt, &ordrPos) + xmlTree = xmlTree.CreateNode(&opts) + case xml.CharData: + setNode(&opts, xmlTree, xt, tree.NtChd, &ordrPos) + xmlTree = xmlTree.CreateNode(&opts) + case xml.Comment: + setNode(&opts, xmlTree, xt, tree.NtComm, &ordrPos) + xmlTree = xmlTree.CreateNode(&opts) + case xml.ProcInst: + setNode(&opts, xmlTree, xt, tree.NtPi, &ordrPos) + xmlTree = xmlTree.CreateNode(&opts) + case xml.EndElement: + xmlTree = xmlTree.EndElem() + case xml.Directive: + if dp, ok := xmlTree.(DirectiveParser); ok { + dp.Directive(xt.Copy(), dec) + } + } + + t, err = dec.Token() + } + + if err == io.EOF { + err = nil + } + + return xmlTree, err +} + +func setEle(opts *xmlbuilder.BuilderOpts, xmlTree xmlbuilder.XMLBuilder, ele xml.StartElement, ordrPos *int) { + opts.NodePos = *ordrPos + opts.Tok = ele + opts.Attrs = opts.Attrs[0:0:cap(opts.Attrs)] + opts.NS = make(map[xml.Name]string) + opts.NodeType = tree.NtElem + + for i := range ele.Attr { + attr := ele.Attr[i].Name + val := ele.Attr[i].Value + + if (attr.Local == "xmlns" && attr.Space == "") || attr.Space == "xmlns" { + opts.NS[attr] = val + } else { + opts.Attrs = append(opts.Attrs, &ele.Attr[i]) + } + } + + if nstree, ok := xmlTree.(tree.NSElem); ok { + ns := make(map[xml.Name]string) + + for _, i := range tree.BuildNS(nstree) { + ns[i.Name] = i.Value + } + + for k, v := range opts.NS { + ns[k] = v + } + + if ns[xml.Name{Local: "xmlns"}] == "" { + delete(ns, xml.Name{Local: "xmlns"}) + } + + for k, v := range ns { + opts.NS[k] = v + } + + if xmlTree.GetNodeType() == tree.NtRoot { + opts.NS[xml.Name{Space: "xmlns", Local: "xml"}] = tree.XMLSpace + } + } + + opts.AttrStartPos = len(opts.NS) + len(opts.Attrs) + *ordrPos + *ordrPos = opts.AttrStartPos + 1 +} + +func setNode(opts *xmlbuilder.BuilderOpts, xmlTree xmlbuilder.XMLBuilder, tok xml.Token, nt tree.NodeType, ordrPos *int) { + opts.Tok = xml.CopyToken(tok) + opts.NodeType = nt + opts.NodePos = *ordrPos + *ordrPos++ +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/tree/xtypes.go b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xtypes.go new file mode 100644 index 00000000..b9704109 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/tree/xtypes.go @@ -0,0 +1,113 @@ +package tree + +import ( + "fmt" + "math" + "strconv" + "strings" +) + +//Boolean strings +const ( + True = "true" + False = "false" +) + +//Bool is a boolean XPath type +type Bool bool + +//ResValue satisfies the Res interface for Bool +func (b Bool) String() string { + if b { + return True + } + + return False +} + +//Bool satisfies the HasBool interface for Bool's +func (b Bool) Bool() Bool { + return b +} + +//Num satisfies the HasNum interface for Bool's +func (b Bool) Num() Num { + if b { + return Num(1) + } + + return Num(0) +} + +//Num is a number XPath type +type Num float64 + +//ResValue satisfies the Res interface for Num +func (n Num) String() string { + if math.IsInf(float64(n), 0) { + if math.IsInf(float64(n), 1) { + return "Infinity" + } + return "-Infinity" + } + return fmt.Sprintf("%g", float64(n)) +} + +//Bool satisfies the HasBool interface for Num's +func (n Num) Bool() Bool { + return n != 0 +} + +//Num satisfies the HasNum interface for Num's +func (n Num) Num() Num { + return n +} + +//String is string XPath type +type String string + +//ResValue satisfies the Res interface for String +func (s String) String() string { + return string(s) +} + +//Bool satisfies the HasBool interface for String's +func (s String) Bool() Bool { + return Bool(len(s) > 0) +} + +//Num satisfies the HasNum interface for String's +func (s String) Num() Num { + num, err := strconv.ParseFloat(strings.TrimSpace(string(s)), 64) + if err != nil { + return Num(math.NaN()) + } + return Num(num) +} + +//NodeSet is a node-set XPath type +type NodeSet []Node + +//GetNodeNum converts the node to a string-value and to a number +func GetNodeNum(n Node) Num { + return String(n.ResValue()).Num() +} + +//String satisfies the Res interface for NodeSet +func (n NodeSet) String() string { + if len(n) == 0 { + return "" + } + + return n[0].ResValue() +} + +//Bool satisfies the HasBool interface for node-set's +func (n NodeSet) Bool() Bool { + return Bool(len(n) > 0) +} + +//Num satisfies the HasNum interface for NodeSet's +func (n NodeSet) Num() Num { + return String(n.String()).Num() +} diff --git a/vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go b/vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go new file mode 100644 index 00000000..65099127 --- /dev/null +++ b/vendor/github.com/ChrisTrenkamp/goxpath/xconst/xconst.go @@ -0,0 +1,66 @@ +package xconst + +const ( + //AxisAncestor represents the "ancestor" axis + AxisAncestor = "ancestor" + //AxisAncestorOrSelf represents the "ancestor-or-self" axis + AxisAncestorOrSelf = "ancestor-or-self" + //AxisAttribute represents the "attribute" axis + AxisAttribute = "attribute" + //AxisChild represents the "child" axis + AxisChild = "child" + //AxisDescendent represents the "descendant" axis + AxisDescendent = "descendant" + //AxisDescendentOrSelf represents the "descendant-or-self" axis + AxisDescendentOrSelf = "descendant-or-self" + //AxisFollowing represents the "following" axis + AxisFollowing = "following" + //AxisFollowingSibling represents the "following-sibling" axis + AxisFollowingSibling = "following-sibling" + //AxisNamespace represents the "namespace" axis + AxisNamespace = "namespace" + //AxisParent represents the "parent" axis + AxisParent = "parent" + //AxisPreceding represents the "preceding" axis + AxisPreceding = "preceding" + //AxisPrecedingSibling represents the "preceding-sibling" axis + AxisPrecedingSibling = "preceding-sibling" + //AxisSelf represents the "self" axis + AxisSelf = "self" +) + +//AxisNames is all the possible Axis identifiers wrapped in an array for convenience +var AxisNames = []string{ + AxisAncestor, + AxisAncestorOrSelf, + AxisAttribute, + AxisChild, + AxisDescendent, + AxisDescendentOrSelf, + AxisFollowing, + AxisFollowingSibling, + AxisNamespace, + AxisParent, + AxisPreceding, + AxisPrecedingSibling, + AxisSelf, +} + +const ( + //NodeTypeComment represents the "comment" node test + NodeTypeComment = "comment" + //NodeTypeText represents the "text" node test + NodeTypeText = "text" + //NodeTypeProcInst represents the "processing-instruction" node test + NodeTypeProcInst = "processing-instruction" + //NodeTypeNode represents the "node" node test + NodeTypeNode = "node" +) + +//NodeTypes is all the possible node tests wrapped in an array for convenience +var NodeTypes = []string{ + NodeTypeComment, + NodeTypeText, + NodeTypeProcInst, + NodeTypeNode, +} diff --git a/vendor/github.com/HewlettPackard/oneview-golang/CHANGELOG.md b/vendor/github.com/HewlettPackard/oneview-golang/CHANGELOG.md index 96339792..7ef6c186 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/CHANGELOG.md +++ b/vendor/github.com/HewlettPackard/oneview-golang/CHANGELOG.md @@ -18,9 +18,6 @@ This project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html - Server Profile support with Server Profile Template extended to 1000 - Support for update compliance in Logical Interonnect. -### Major changes: -- Extended support of SDK to OneView API1000. - #### Bug fixes & Enhancements: - [#153] (https://github.com/HewlettPackard/oneview-golang/issues/153) change in the enclosure.go file for editing wrong fields diff --git a/vendor/github.com/HewlettPackard/oneview-golang/endpoints-support.md b/vendor/github.com/HewlettPackard/oneview-golang/endpoints-support.md old mode 100755 new mode 100644 index 9f4ea76e..b7c7fdf6 --- a/vendor/github.com/HewlettPackard/oneview-golang/endpoints-support.md +++ b/vendor/github.com/HewlettPackard/oneview-golang/endpoints-support.md @@ -12,7 +12,7 @@ * If an endpoint is marked as implemented in a previous version of the API, it will likely already be working for newer API versions, however in these cases it is important to: 1. Specify the 'type' of the resource when using an untested API, as it will not get set by default -2. If an example is not working, verify the [HPE OneView REST API Documentation](http://h17007.www1.hpe.com/docs/enterprise/servers/oneview4.2/cic-api/en/index.html) for the respective API version being used, since the expected attributes for that resource might have changed. +2. If an example is not working, verify the [HPE OneView REST API Documentation](http://h17007.www1.hpe.com/docs/enterprise/servers/oneview3.1/cic-api/en/api-docs/current/index.html) for the API version being used, since the expected attributes for that resource might have changed.
@@ -26,50 +26,49 @@ |/rest/enclosures/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/enclosures/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Enclosure Groups** | -|/rest/enclosure-groups | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/enclosure-groups | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/enclosure-groups/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/enclosure-groups/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/enclosure-groups/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/enclosure-groups/{id}/script | GET | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | -|/rest/enclosure-groups/{id}/script | PUT | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | +|/rest/enclosure-groups | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/enclosure-groups | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/enclosure-groups/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/enclosure-groups/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/enclosure-groups/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/enclosure-groups/{id}/script | GET | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/enclosure-groups/{id}/script | PUT | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | | **Ethernet Networks** | -|/rest/ethernet-networks | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks | POST | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks/bulk | POST | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks/{id} | PATCH | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | -|/rest/ethernet-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks/{id}/associatedProfiles | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/ethernet-networks/{id}/associatedUplinkGroups | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks | POST | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks/bulk | POST | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks/{id} | PATCH | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | +|/rest/ethernet-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks/{id}/associatedProfiles | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/ethernet-networks/{id}/associatedUplinkGroups | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **FC Networks** | -|/rest/fc-networks | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fc-networks | POST | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fc-networks/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fc-networks/{id} | PATCH | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | -|/rest/fc-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fc-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/fc-networks | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/fc-networks | POST | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/fc-networks/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/fc-networks/{id} | PATCH | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :heavy_minus_sign: | :heavy_minus_sign: | +|/rest/fc-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/fc-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **FCoE Networks** | -|/rest/fcoe-networks | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fcoe-networks | POST | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fcoe-networks/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fcoe-networks/{id} | PATCH | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | -|/rest/fcoe-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/fcoe-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/fcoe-networks | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | +|/rest/fcoe-networks | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | +|/rest/fcoe-networks/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | +|/rest/fcoe-networks/{id} | PATCH | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | +|/rest/fcoe-networks/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | +|/rest/fcoe-networks/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | | **Interconnects** | -|/rest/interconnects | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/interconnects/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/interconnects | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/interconnects/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | | **Interconnect Types** | -|/rest/interconnect-types | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/interconnect-types/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/interconnect-types | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/interconnect-types/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Logical Enclosures** | -|/rest/logical-enclosures | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures/{id}/updateFromGroup | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/logical-enclosures/{id} | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures | POST | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/logical-enclosures/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Logical Interconnect Groups** | |/rest/logical-interconnect-groups | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | |/rest/logical-interconnect-groups | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | @@ -140,25 +139,25 @@ |/rest/fc-sans/device-managers/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | |/rest/fc-sans/providers/{id}/device-managers | POST | :white_check_mark: | :heavy_minus_sign: | :heavy_multiplication_x: | | **Scope** | -|/rest/scope | GET | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/scope/{id} | GET | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/scope/{id} | PUT | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/scope | POST | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/scope/{id} | DELETE | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/scope | GET | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | +|/rest/scope/{id} | GET | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | +|/rest/scope/{id} | PUT | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | +|/rest/scope | POST | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | +|/rest/scope/{id} | DELETE | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | | **Server Hardware** | |/rest/server-hardware | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/server-hardware/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/server-hardware/{id}/powerState | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/server-hardware/{id}/firmware | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Server Hardware Types** | -|/rest/server-hardware-types | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/server-hardware-types/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/server-hardware-types/{id} | PUT | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | -|/rest/server-hardware-types/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | +|/rest/server-hardware-types | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/server-hardware-types/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/server-hardware-types/{id} | PUT | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_multiplication_x: | :heavy_multiplication_x: | +|/rest/server-hardware-types/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | | **Server Profile Templates** | |/rest/server-profile-templates | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | |/rest/server-profile-templates | POST | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | -|/rest/server-profile-templates/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | +|/rest/server-profile-templates/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/server-profile-templates/{id} | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | |/rest/server-profile-templates/{id} | DELETE | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | | **Server Profiles** | @@ -169,29 +168,22 @@ /rest/server-profiles | PUT | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | /rest/server-profiles | PATCH | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Storage Pools** | -|/rest/storage-pools | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-pools/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-pools/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-pools | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-pools/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-pools/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Storage Systems** | -|/rest/storage-systems | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-systems/{id} | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-systems/{id}/reachable-ports | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-systems | POST | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-systems | DELETE | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -/rest/storage-systems/{id} | PUT | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-systems/{id}/storage-volume-sets | GET | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | -| **Storage Volume Attachments** | -|/rest/storage-volume-attachments | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volume-attachments/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| **Storage Volume Templates** | -|/rest/storage-volume-templates | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volume-templates/{id} | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volume-templates | POST | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volume-templates | DELETE | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -/rest/storage-volume-templates/{id} | PUT | :heavy_multiplication_x: | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-systems | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-systems/{id} | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-systems/{id}/reachable-ports | GET | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-systems | POST | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-systems | DELETE | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +/rest/storage-systems/{id} | PUT | :heavy_minus_sign: | :heavy_minus_sign: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Switch Types** | |/rest/switch-types | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | |/rest/switch-types/{id} | GET | :white_check_mark: | :white_check_mark: | :heavy_multiplication_x: | +| **Storage Volume Attachments** | +|/rest/storage-volume-attachments | GET | :heavy_multiplication_x: | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-volume-attachments/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Tasks** | |/rest/tasks | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Version** | @@ -203,19 +195,8 @@ |/rest/uplink-sets/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | |/rest/uplink-sets/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | | **Volumes** | -|/rest/storage-volumes | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volumes/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volumes | POST | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volumes/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -|/rest/storage-volumes/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | - -## HPE Synergy Image Streamer - -| Endpoints | Verb | V300 | V500 | V600 | V800 | V1000 -| --------------------------------------------------------------------------------------- | -------- | :------------------: | :------------------: | :------------------: | :------------------: | :------------------: | -| **Deployment Plans** | -| /rest/deployment-plans | POST | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| /rest/deployment-plans | GET | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| /rest/deployment-plans/{id} | GET | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| /rest/deployment-plans/{id} | PUT | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | -| /rest/deployment-plans/{id} | DELETE | :heavy_multiplication_x: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-volumes | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-volumes/{id} | GET | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-volumes | POST | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-volumes/{id} | PUT | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +|/rest/storage-volumes/{id} | DELETE | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | diff --git a/vendor/github.com/HewlettPackard/oneview-golang/i3s/deployment_plan.go b/vendor/github.com/HewlettPackard/oneview-golang/i3s/deployment_plan.go index 44d999c3..2162eb7a 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/i3s/deployment_plan.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/i3s/deployment_plan.go @@ -28,14 +28,14 @@ type DeploymentPlan struct { Category string `json:"category,omitempty"` // "category": "oe-deployment-plan", Created string `json:"created,omitempty"` // "created": "20150831T154835.250Z", CustomAttributes []CustomAttribute `json:"customAttributes,omitempty"` // "customAttributes": [], - Description utils.Nstring `json:"description,omitempty"` // "description": "Deployment Plan 1", + Description string `json:"description,omitempty"` // "description": "Deployment Plan 1", ETAG string `json:"eTag,omitempty"` // "eTag": "1441036118675/8", - GoldenImageUri utils.Nstring `json:"goldenImageURI,omitempty"` // "goldenImageUri": "", - HPProvided bool `json:"hpProvided"` // "hpProvided": false, + GoldenImageUri utils.Nstring `json:"goldenImageUri,omitempty"` // "goldenImageUri": "", + HPProvided bool `json:"hpProvided,omitempty"` // "hpProvided": false, ID string `json:"id,omitempty"` // "id": "1", Modified string `json:"modified,omitempty"` // "modified": "20150831T154835.250Z", Name string `json:"name,omitempty"` // "name": "Deployment Plan 1", - OEBuildPlanURI utils.Nstring `json:"oeBuildPlanURI,omitempty"` // "oeBuildPlanUri": "/rest/build-plans/4f907ab5-7133-4081-bb5a-4a6dea398046", + OEBuildPlanURI utils.Nstring `json:"oeBuildPlanUri,omitempty"` // "oeBuildPlanUri": "/rest/build-plans/4f907ab5-7133-4081-bb5a-4a6dea398046", State string `json:"state,omitempty"` // "state": "Normal", Status string `json:"status,omitempty"` // "status": "Critical", Type string `json:"type,omitempty"` // "type": "OEDeploymentPlan", @@ -67,7 +67,7 @@ func (c *I3SClient) GetDeploymentPlanByName(name string) (DeploymentPlan, error) var ( depPlan DeploymentPlan ) - depPlans, err := c.GetDeploymentPlans("", fmt.Sprintf("name matches '%s'", name), "", "name:asc", "") + depPlans, err := c.GetDeploymentPlans(fmt.Sprintf("name matches '%s'", name), "name:asc") if err != nil { return depPlan, err } @@ -78,33 +78,21 @@ func (c *I3SClient) GetDeploymentPlanByName(name string) (DeploymentPlan, error) } } -func (c *I3SClient) GetDeploymentPlans(count string, filter string, query string, sort string, start string) (DeploymentPlanList, error) { +func (c *I3SClient) GetDeploymentPlans(filter string, sort string) (DeploymentPlanList, error) { var ( uri = "/rest/deployment-plans" q map[string]interface{} deploymentPlans DeploymentPlanList ) q = make(map[string]interface{}) - if len(count) > 0 { - q["count"] = count - } - if len(filter) > 0 { q["filter"] = filter } - if len(query) > 0 { - q["query"] = query - } - if sort != "" { q["sort"] = sort } - if len(start) > 0 { - q["start"] = start - } - c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) // Setup query if len(q) > 0 { @@ -124,21 +112,37 @@ func (c *I3SClient) GetDeploymentPlans(count string, filter string, query string } func (c *I3SClient) CreateDeploymentPlan(deploymentPlan DeploymentPlan) error { - log.Infof("Initializing creation of deploymentPlan for %s.", deploymentPlan.Name) var ( uri = "/rest/deployment-plans" + t *Task ) c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) + t = t.NewTask(c) + t.ResetTask() log.Debugf("REST : %s \n %+v\n", uri, deploymentPlan) - _, err := c.RestAPICall(rest.POST, uri, deploymentPlan) + log.Debugf("task -> %+v", t) + data, err := c.RestAPICall(rest.POST, uri, deploymentPlan) if err != nil { + t.TaskIsDone = true log.Errorf("Error submitting new deployment plan request: %s", err) return err } + log.Debugf("Response New DeploymentPlan %s", data) + if err := json.Unmarshal([]byte(data), &t); err != nil { + t.TaskIsDone = true + log.Errorf("Error with task un-marshal: %s", err) + return err + } + + err = t.Wait() + if err != nil { + return err + } + return nil } @@ -146,6 +150,7 @@ func (c *I3SClient) DeleteDeploymentPlan(name string) error { var ( deploymentPlan DeploymentPlan err error + t *Task uri string ) @@ -154,18 +159,33 @@ func (c *I3SClient) DeleteDeploymentPlan(name string) error { return err } if deploymentPlan.Name != "" { + t = t.NewTask(c) + t.ResetTask() log.Debugf("REST : %s \n %+v\n", deploymentPlan.URI, deploymentPlan) + log.Debugf("task -> %+v", t) uri = deploymentPlan.URI.String() if uri == "" { log.Warn("Unable to post delete, no uri found.") + t.TaskIsDone = true return err } - _, err := c.RestAPICall(rest.DELETE, uri, nil) + data, err := c.RestAPICall(rest.DELETE, uri, nil) if err != nil { log.Errorf("Error submitting delete deployment plan request: %s", err) + t.TaskIsDone = true return err } + log.Debugf("Response delete deployment plan %s", data) + if err := json.Unmarshal([]byte(data), &t); err != nil { + t.TaskIsDone = true + log.Errorf("Error with task un-marshal: %s", err) + return err + } + err = t.Wait() + if err != nil { + return err + } return nil } else { log.Infof("DeploymentPlan could not be found to delete, %s, skipping delete ...", name) @@ -177,16 +197,33 @@ func (c *I3SClient) UpdateDeploymentPlan(deploymentPlan DeploymentPlan) error { log.Infof("Initializing update of deployment plan for %s.", deploymentPlan.Name) var ( uri = deploymentPlan.URI.String() + t *Task ) c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) + t = t.NewTask(c) + t.ResetTask() log.Debugf("REST : %s \n %+v\n", uri, deploymentPlan) - _, err := c.RestAPICall(rest.PUT, uri, deploymentPlan) + log.Debugf("task -> %+v", t) + data, err := c.RestAPICall(rest.PUT, uri, deploymentPlan) if err != nil { + t.TaskIsDone = true log.Errorf("Error submitting update deployment plan request: %s", err) return err } + log.Debugf("Response update DeploymentPlan %s", data) + if err := json.Unmarshal([]byte(data), &t); err != nil { + t.TaskIsDone = true + log.Errorf("Error with task un-marshal: %s", err) + return err + } + + err = t.Wait() + if err != nil { + return err + } + return nil } diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/connection.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/connection.go index c17240ee..05ccef98 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/connection.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/connection.go @@ -27,35 +27,18 @@ type BootTarget struct { LUN string `json:"lun,omitempty"` // lun(string,required), The LUN of the Boot Volume presented by the target device. This value can be either 1 to 3 decimal digits in the range 0 to 255, or 13 to 16 hex digits with no other characters } -type Ipv4Option struct { - Gateway string `json:"gateway,omitempty"` - IpAddressSource string `json:"ipAddressSource,omitempty"` -} - -type BootIscsi struct { - Chaplevel string `json:"chapLevel,omitempty"` - FirstBootTargetIp string `json:"firstBootTargetIp,omitempty"` - FirstBootTargetPort string `json:"firstBootTargetPort,omitempty"` - SecondBootTargetIp string `json:"secondBootTargetIp,omitempty"` - SecondBootTargetPort string `json:"secondBootTargetPort,omitempty"` -} - // BootOption - type BootOption struct { BootOptionV3 - Priority string `json:"priority,omitempty"` // priority(const_string), indicates the boot priority for this device. PXE and Fibre Channel connections are treated separately; an Ethernet connection and a Fibre Channel connection can both be marked as Primary. The 'order' attribute controls ordering among the different device types. - Targets []BootTarget `json:"targets,omitempty"` // targets {BootTarget} - EthernetBootType string `json:"ethernetBootType,omitempty"` - BootVolumeSource string `json:"bootVolumeSource,omitempty"` - Iscsi *BootIscsi `json:"iscsi,omitempty"` + Priority string `json:"priority,omitempty"` // priority(const_string), indicates the boot priority for this device. PXE and Fibre Channel connections are treated separately; an Ethernet connection and a Fibre Channel connection can both be marked as Primary. The 'order' attribute controls ordering among the different device types. + Targets []BootTarget `json:"targets,omitempty"` // targets {BootTarget} } // Connection server profile object for ov type Connection struct { Connectionv200 - AllocatedMbps int `json:"allocatedMbps,omitempty"` // allocatedMbps(int:read), The transmit throughput (mbps) currently allocated to this connection. When Fibre Channel connections are set to Auto for requested bandwidth, the value can be set to -2000 to indicate that the actual value is unknown until OneView is able to negotiate the actual speed. - Boot *BootOption `json:"boot,omitempty"` // boot {} - Ipv4 *Ipv4Option `json:"ipv4,omitempty"` + AllocatedMbps int `json:"allocatedMbps,omitempty"` // allocatedMbps(int:read), The transmit throughput (mbps) currently allocated to this connection. When Fibre Channel connections are set to Auto for requested bandwidth, the value can be set to -2000 to indicate that the actual value is unknown until OneView is able to negotiate the actual speed. + Boot *BootOption `json:"boot,omitempty"` // boot {} DeploymentStatus string `json:"deploymentStatus,omitempty"` // deploymentStatus(const_string:read), The deployment status of the connection. The value can be 'Undefined', 'Reserved', or 'Deployed'. FunctionType string `json:"functionType,omitempty"` // functionType(const_string), Type of function required for the connection. functionType cannot be modified after the connection is created. 'Ethernet', 'FibreChannel' ID int `json:"id,omitempty"` // id(int), A unique identifier for this connection. When creating or editing a profile, an id is automatically assigned if the attribute is omitted or 0 is specified. When editing a profile, a connection is created if the id does not identify an existing connection. diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/enclosure.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/enclosure.go index dee7ebb2..35b35987 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/enclosure.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/enclosure.go @@ -3,7 +3,6 @@ package ov import ( "encoding/json" "fmt" - "github.com/HewlettPackard/oneview-golang/rest" "github.com/HewlettPackard/oneview-golang/utils" "github.com/docker/machine/libmachine/log" @@ -248,7 +247,7 @@ type Partition struct { MemoryMb int `json:"memoryMb,omitempty"` // "memoryMb": 1, MonarchDevice int `json:"monarchDevice,omitempty"` // "monarchDevice": 1, PartitionHealth string `json:"partitionHealth,omitempty"` // "partitionHealth": "", - PartitionID int `json:"partitionID,omitempty"` // "partitionID": "", + PartitionID string `json:"partitionID,omitempty"` // "partitionID": "", PartitionName string `json:"partitionName,omitempty"` // "partitionName": "", PartitionStatus string `json:"partitionStatus,omitempty"` // "partitionStatus": "", ParttionUUID string `json:"parttionUUID,omitempty"` // "parttionUUID": "", diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/fcoe_network.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/fcoe_network.go index 9ca0e00e..b58844d2 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/fcoe_network.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/fcoe_network.go @@ -9,22 +9,20 @@ import ( ) type FCoENetwork struct { - Type string `json:"type,omitempty"` - VlanId int `json:"vlanId,omitempty"` - ConnectionTemplateUri utils.Nstring `json:"connectionTemplateUri,omitempty"` - ManagedSanUri utils.Nstring `json:"managedSanUri,omitempty"` - FabricUri utils.Nstring `json:"fabricUri,omitempty"` - Description utils.Nstring `json:"description,omitempty"` - Name string `json:"name,omitempty"` - State string `json:"state,omitempty"` - Status string `json:"status,omitempty"` - ETAG string `json:"eTag,omitempty"` - Modified string `json:"modified,omitempty"` - Created string `json:"created,omitempty"` - Category string `json:"category,omitempty"` - URI utils.Nstring `json:"uri,omitempty"` - ScopesUri utils.Nstring `json:"scopesUri,omitempty"` - InitialScopeUris []utils.Nstring `json:"initialScopeUris,omitempty"` // "initialScopeUris":[] + Type string `json:"type,omitempty"` + VlanId int `json:"vlanId,omitempty"` + ConnectionTemplateUri utils.Nstring `json:"connectionTemplateUri,omitempty"` + ManagedSanUri utils.Nstring `json:"managedSanUri,omitempty"` + FabricUri utils.Nstring `json:"fabricUri,omitempty"` + Description utils.Nstring `json:"description,omitempty"` + Name string `json:"name,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + ETAG string `json:"eTag,omitempty"` + Modified string `json:"modified,omitempty"` + Created string `json:"created,omitempty"` + Category string `json:"category,omitempty"` + URI utils.Nstring `json:"uri,omitempty"` } type FCoENetworkList struct { @@ -41,7 +39,7 @@ func (c *OVClient) GetFCoENetworkByName(name string) (FCoENetwork, error) { var ( fcoeNet FCoENetwork ) - fcoeNets, err := c.GetFCoENetworks(fmt.Sprintf("name matches '%s'", name), "name:asc", "", "") + fcoeNets, err := c.GetFCoENetworks(fmt.Sprintf("name matches '%s'", name), "name:asc") if fcoeNets.Total > 0 { return fcoeNets.Members[0], err } else { @@ -49,12 +47,13 @@ func (c *OVClient) GetFCoENetworkByName(name string) (FCoENetwork, error) { } } -func (c *OVClient) GetFCoENetworks(filter string, sort string, start string, count string) (FCoENetworkList, error) { +func (c *OVClient) GetFCoENetworks(filter string, sort string) (FCoENetworkList, error) { var ( uri = "/rest/fcoe-networks" - q = make(map[string]interface{}) + q map[string]interface{} fcoeNetworks FCoENetworkList ) + q = make(map[string]interface{}) if len(filter) > 0 { q["filter"] = filter } @@ -63,14 +62,6 @@ func (c *OVClient) GetFCoENetworks(filter string, sort string, start string, cou q["sort"] = sort } - if start != "" { - q["start"] = start - } - - if count != "" { - q["count"] = count - } - // refresh login c.RefreshLogin() c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) @@ -84,7 +75,7 @@ func (c *OVClient) GetFCoENetworks(filter string, sort string, start string, cou } log.Debugf("GetfcoeNetworks %s", data) - if err := json.Unmarshal(data, &fcoeNetworks); err != nil { + if err := json.Unmarshal([]byte(data), &fcoeNetworks); err != nil { return fcoeNetworks, err } return fcoeNetworks, nil diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/logical_enclosure.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/logical_enclosure.go index ae956908..04b1c877 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/logical_enclosure.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/logical_enclosure.go @@ -245,9 +245,6 @@ func (c *OVClient) UpdateLogicalEnclosure(logEn LogicalEnclosure) error { c.RefreshLogin() c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) - // reset query - c.SetQueryString(make(map[string]interface{})) - t = t.NewProfileTask(c) t.ResetTask() log.Debugf("REST : %s \n %+v\n", uri, logEn) @@ -273,42 +270,3 @@ func (c *OVClient) UpdateLogicalEnclosure(logEn LogicalEnclosure) error { return nil } - -func (c *OVClient) UpdateFromGroupLogicalEnclosure(logEn LogicalEnclosure) error { - log.Infof("Initializing updateFromGroup of logical enclosure for %s.", logEn.Name) - var ( - uri = logEn.URI.String() + "/updateFromGroup" - t *Task - ) - // refresh login - c.RefreshLogin() - c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) - - // reset query - c.SetQueryString(make(map[string]interface{})) - - t = t.NewProfileTask(c) - t.ResetTask() - log.Debugf("REST : %s \n %+v\n", uri, nil) - log.Debugf("task -> %+v", t) - data, err := c.RestAPICall(rest.PUT, uri, nil) - if err != nil { - t.TaskIsDone = true - log.Errorf("Error submitting updateFromGroup logical enclosure request: %s", err) - return err - } - - log.Debugf("Response updateFromGroup LogicalEnclosure %s", data) - if err := json.Unmarshal([]byte(data), &t); err != nil { - t.TaskIsDone = true - log.Errorf("Error with task un-marshal: %s", err) - return err - } - - err = t.Wait() - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/network_set.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/network_set.go index d4bf2085..666e766f 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/network_set.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/network_set.go @@ -18,7 +18,6 @@ type NetworkSet struct { Modified string `json:"modified,omitempty"` // "modified": "20150831T154835.250Z", Name string `json:"name"` // "name": "Network Set 1", NativeNetworkUri utils.Nstring `json:"nativeNetworkUri,omitempty"` - NetworkSetType string `json:"networkSetType,omitempty"` // "networkSetType": "Regular" NetworkUris []utils.Nstring `json:"networkUris"` ScopesUri utils.Nstring `json:"scopesUri,omitempty"` State string `json:"state,omitempty"` // "state": "Normal", diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/profiles.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/profiles.go index 993d5bca..f3e4496c 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/profiles.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/profiles.go @@ -65,16 +65,10 @@ type ConnectionSettings struct { Connections []Connection `json:"connections,omitempty"` } -type Options struct { - Op string `json:"op,omitempty"` // "op": "replace", - Path string `json:"path,omitempty"` // "path": "/templateCompliance", - Value string `json:"value,omitempty"` // "value": "Compliant", -} - // ServerProfile - server profile object for ov type ServerProfile struct { ServerProfilev200 - ServerProfilev300 + *ServerProfilev300 Affinity string `json:"affinity,omitempty"` // "affinity": "Bay", AssociatedServer utils.Nstring `json:"associatedServer,omitempty"` // "associatedServer": null, Bios *BiosOption `json:"bios,omitempty"` // "bios": { }, @@ -459,40 +453,3 @@ func (c *OVClient) UpdateServerProfile(p ServerProfile) error { return nil } - -func (c *OVClient) PatchServerProfile(p ServerProfile, request []Options) error { - - log.Infof("Initializing update of server profile for %s.", p.Name) - - var ( - uri = p.URI.String() - t *Task - ) - - // refresh login - c.RefreshLogin() - c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) - t = t.NewProfileTask(c) - t.ResetTask() - log.Debugf("REST : %s \n %+v\n", uri, request) - log.Debugf("task -> %+v", t) - data, err := c.RestAPICall(rest.PATCH, uri, request) - if err != nil { - t.TaskIsDone = true - log.Errorf("Error submitting update server profile request: %s", err) - return err - } - log.Debugf("Response update ServerProfile %s", data) - if err := json.Unmarshal([]byte(data), &t); err != nil { - t.TaskIsDone = true - log.Errorf("Error with task un-marshal: %s", err) - return err - } - - err = t.Wait() - if err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/profilesv3.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/profilesv3.go index 14144ba1..323c27e1 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/profilesv3.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/profilesv3.go @@ -45,9 +45,9 @@ type BootOptionV3 struct { } type ServerProfilev300 struct { - IscsiInitiatorName string `json:"iscsiInitiatorName,omitempty"` // "iscsiInitiatorName": "name of iscsi initiator name", - IscsiInitiatorNameType string `json:"iscsiInitiatorNameType,omitempty"` // "iscsiInitiatorNameType": "UserDefined", - OSDeploymentSettings *OSDeploymentSettings `json:"osDeploymentSettings,omitempty"` // "osDeploymentSettings": {...}, + IscsiInitiatorName string `json:"iscsiInitiatorName,omitempty"` // "iscsiInitiatorName": "name of iscsi initiator name", + IscsiInitiatorNameType string `json:"iscsiInitiatorNameType,omitempty"` // "iscsiInitiatorNameType": "UserDefined", + OSDeploymentSettings OSDeploymentSettings `json:"osDeploymentSettings,omitempty"` // "osDeploymentSettings": {...}, } type OSDeploymentSettings struct { diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/server_hardware.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/server_hardware.go index 82838029..5a68bd8d 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/server_hardware.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/server_hardware.go @@ -72,33 +72,30 @@ func (h HardwareState) Equal(s string) bool { // ServerHardware get server hardware from ov type ServerHardware struct { ServerHardwarev200 - AssetTag string `json:"assetTag,omitempty"` // "assetTag": "[Unknown]", - Category string `json:"category,omitempty"` // "category": "server-hardware", - Created string `json:"created,omitempty"` // "created": "2015-08-14T21:02:01.537Z", - Description utils.Nstring `json:"description,omitempty"` // "description": null, - ETAG string `json:"eTag,omitempty"` // "eTag": "1441147370086", - FormFactor string `json:"formFactor,omitempty"` // "formFactor": "HalfHeight", - Generation string `json:"generation,omitempty"` // "generation":, - HostOsType int `json:"hostOsType,omitempty"` - LicensingIntent string `json:"licensingIntent,omitempty"` // "licensingIntent": "OneView", - LocationURI utils.Nstring `json:"locationUri,omitempty"` // "locationUri": "/rest/enclosures/092SN51207RR", - MemoryMb int `json:"memoryMb,omitempty"` // "memoryMb": 262144, - Model string `json:"model,omitempty"` // "model": "ProLiant BL460c Gen9", - Modified string `json:"modified,omitempty"` // "modified": "2015-09-01T22:42:50.086Z", - MpFirwareVersion string `json:"mpFirmwareVersion,omitempty"` // "mpFirmwareVersion": "2.03 Nov 07 2014", - MpModel string `json:"mpModel,omitempty"` // "mpModel": "iLO4", - Name string `json:"name,omitempty"` // "name": "se05, bay 16", - PartNumber string `json:"partNumber,omitempty"` // "partNumber": "727021-B21", - Position int `json:"position,omitempty"` // "position": 16, - PowerLock bool `json:"powerLock,omitempty"` // "powerLock": false, - PowerState string `json:"powerState,omitempty"` // "powerState": "Off", - ProcessorCoreCount int `json:"processorCoreCount,omitempty"` // "processorCoreCount": 14, - ProcessorCount int `json:"processorCount,omitempty"` // "processorCount": 2, - ProcessorSpeedMhz int `json:"processorSpeedMhz,omitempty"` // "processorSpeedMhz": 2300, - ProcessorType string `json:"processorType,omitempty"` // "processorType": "Intel(R) Xeon(R) CPU E5-2695 v3 @ 2.30GHz", - RefreshState string `json:"refreshState,omitempty"` // "refreshState": "NotRefreshing", - RomVersion string `json:"romVersion,omitempty"` // "romVersion": "I36 11/03/2014", - ScopesUri string `json:"scopesUri,omitempty"` + AssetTag string `json:"assetTag,omitempty"` // "assetTag": "[Unknown]", + Category string `json:"category,omitempty"` // "category": "server-hardware", + Created string `json:"created,omitempty"` // "created": "2015-08-14T21:02:01.537Z", + Description utils.Nstring `json:"description,omitempty"` // "description": null, + ETAG string `json:"eTag,omitempty"` // "eTag": "1441147370086", + FormFactor string `json:"formFactor,omitempty"` // "formFactor": "HalfHeight", + LicensingIntent string `json:"licensingIntent,omitempty"` // "licensingIntent": "OneView", + LocationURI utils.Nstring `json:"locationUri,omitempty"` // "locationUri": "/rest/enclosures/092SN51207RR", + MemoryMb int `json:"memoryMb,omitempty"` // "memoryMb": 262144, + Model string `json:"model,omitempty"` // "model": "ProLiant BL460c Gen9", + Modified string `json:"modified,omitempty"` // "modified": "2015-09-01T22:42:50.086Z", + MpFirwareVersion string `json:"mpFirmwareVersion,omitempty"` // "mpFirmwareVersion": "2.03 Nov 07 2014", + MpModel string `json:"mpModel,omitempty"` // "mpModel": "iLO4", + Name string `json:"name,omitempty"` // "name": "se05, bay 16", + PartNumber string `json:"partNumber,omitempty"` // "partNumber": "727021-B21", + Position int `json:"position,omitempty"` // "position": 16, + PowerLock bool `json:"powerLock,omitempty"` // "powerLock": false, + PowerState string `json:"powerState,omitempty"` // "powerState": "Off", + ProcessorCoreCount int `json:"processorCoreCount,omitempty"` // "processorCoreCount": 14, + ProcessorCount int `json:"processorCount,omitempty"` // "processorCount": 2, + ProcessorSpeedMhz int `json:"processorSpeedMhz,omitempty"` // "processorSpeedMhz": 2300, + ProcessorType string `json:"processorType,omitempty"` // "processorType": "Intel(R) Xeon(R) CPU E5-2695 v3 @ 2.30GHz", + RefreshState string `json:"refreshState,omitempty"` // "refreshState": "NotRefreshing", + RomVersion string `json:"romVersion,omitempty"` // "romVersion": "I36 11/03/2014", SerialNumber utils.Nstring `json:"serialNumber,omitempty"` // "serialNumber": "2M25090RMW", ServerGroupURI utils.Nstring `json:"serverGroupUri,omitempty"` // "serverGroupUri": "/rest/enclosure-groups/56ad0069-8362-42fd-b4e3-f5c5a69af039", ServerHardwareTypeURI utils.Nstring `json:"serverHardwareTypeUri,omitempty"` // "serverHardwareTypeUri": "/rest/server-hardware-types/DB7726F7-F601-4EA8-B4A6-D1EE1B32C07C", @@ -236,7 +233,7 @@ func (c *OVClient) GetServerHardwareByName(name string) (ServerHardware, error) ) filters := []string{fmt.Sprintf("name matches '%s'", name)} - serverHardwareList, err := c.GetServerHardwareList(filters, "name:asc", "", "", "") + serverHardwareList, err := c.GetServerHardwareList(filters, "name:asc") if serverHardwareList.Total > 0 { serverHardwareList.Members[0].Client = c return serverHardwareList.Members[0], err @@ -246,7 +243,7 @@ func (c *OVClient) GetServerHardwareByName(name string) (ServerHardware, error) } // GetServerHardwareList gets a server hardware with filters -func (c *OVClient) GetServerHardwareList(filters []string, sort string, start string, count string, expand string) (ServerHardwareList, error) { +func (c *OVClient) GetServerHardwareList(filters []string, sort string) (ServerHardwareList, error) { var ( uri = "/rest/server-hardware" q map[string]interface{} @@ -261,18 +258,6 @@ func (c *OVClient) GetServerHardwareList(filters []string, sort string, start st q["sort"] = sort } - if start != "" { - q["start"] = start - } - - if count != "" { - q["count"] = count - } - - if expand != "" { - q["expand"] = expand - } - // refresh login c.RefreshLogin() c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) @@ -301,7 +286,7 @@ func (c *OVClient) GetAvailableHardware(hardwaretype_uri utils.Nstring, servergr f = []string{"serverHardwareTypeUri='" + hardwaretype_uri.String() + "'", "serverGroupUri='" + servergroup_uri.String() + "'"} ) - if hwlist, err = c.GetServerHardwareList(f, "name:desc", "", "", ""); err != nil { + if hwlist, err = c.GetServerHardwareList(f, "name:desc"); err != nil { return hw, err } if !(len(hwlist.Members) > 0) { diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/server_hardware_type.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/server_hardware_type.go index 0657578e..606ea222 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/server_hardware_type.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/server_hardware_type.go @@ -71,9 +71,7 @@ type BiosSetting struct { } type StorageCapability struct { - ConotrollerCapabilities []string `json:"controllerCapabilities,omitempty"` // "controllerCapabilities":{} ControllerModes []string `json:"controllerModes,omitempty"` // "controllerModes":{} - DedicatedSpareSupported bool `json:"dedicatedSpareSupported,omitempty"` // "dedicatedSpareSupported":false DriveTechnologies []string `json:"driveTechnologies,omitempty"` // "driveTechnologies":{} DriveWriteCacheSupported bool `json:"driveWriteCacheSupported,omitempty"` // "driveWriteCacheSupported":false MaximumDrives int `json:"maximumDrives,omitempty"` // "maximumDrives":5 @@ -98,7 +96,7 @@ type ServerHardwareType struct { Name string `json:"name,omitempty"` // "name": "ServerHardware 1", PxeBootPolicies []string `json:"pxeBootPolicies,omitempty"` // "pxeBootPolicies":{}, StorageCapabilities *StorageCapability `json:"storageCapabilities,omitempty"` // "storageCapabilities":{..,}, - Type string `json:"type,omitempty"` // "type": "server-hardware-type-10", + Type string `json:"type,omitempty"` // "type": "server-hardware-type-4", URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/server-hardware-types/e2f0031b-52bd-4223-9ac1-d91cb519d548" UefiClass string `json:"uefiClass,omitempty"` // "uefiClass":"2" } diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage.go index 735b0101..8dfce979 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage.go @@ -52,14 +52,7 @@ type StoragePath struct { IsEnabled bool `json:"isEnabled"` // isEnabled (required), Identifies whether the storage path is enabled. Status string `json:"status,omitempty"` // status (read only), The overall health status of the storage path. StorageTargetType string `json:"storageTargetType,omitempty"` // storageTargetType ('Auto', 'TargetPorts') - Targets []Target `json:"targets,omitempty"` // only set when storageTargetType is TargetPorts - TargetSelector string `json:"targetSelector,omitempty"` -} - -type Target struct { - IpAddress string `json:"IpAddress,omitempty"` - Name string `json:"name,omitempty"` - TcpPort int `json:"tcpPort,omitempty"` + StorageTargets []string `json:"storageTargets,omitempty"` // only set when storageTargetType is TargetPorts } // Clone - @@ -68,18 +61,17 @@ func (c StoragePath) Clone() StoragePath { ConnectionID: c.ConnectionID, IsEnabled: c.IsEnabled, StorageTargetType: c.StorageTargetType, - Targets: c.Targets, + StorageTargets: c.StorageTargets, } } // VolumeAttachment volume attachment type VolumeAttachment struct { VolumeAttachmentV3 - ID int `json:"id,omitempty"` // id, The ID of the attached storage volume. - LUN string `json:"lun,omitempty"` // lun, The logical unit number. - LUNType string `json:"lunType,omitempty"` // lunType(required), The logical unit number type: Auto or Manual. - Permanent *bool `json:"permanent,omitempty"` // permanent, If true, indicates that the volume will persist when the profile is deleted. If false, then the volume will be deleted when the profile is deleted. - BootVolumePriority string `json:"bootVolumePriority,omitempty"` + ID int `json:"id,omitempty"` // id, The ID of the attached storage volume. + LUN string `json:"lun,omitempty"` // lun, The logical unit number. + LUNType string `json:"lunType,omitempty"` // lunType(required), The logical unit number type: Auto or Manual. + Permanent bool `json:"permanent"` // permanent, If true, indicates that the volume will persist when the profile is deleted. If false, then the volume will be deleted when the profile is deleted. State string `json:"state,omitempty"` // state(read only), current state of the attachment Status string `json:"status,omitempty"` // status(read only), The current status of the attachment. StoragePaths []StoragePath `json:"storagePaths,omitempty"` // A list of host-to-target path associations. @@ -87,10 +79,10 @@ type VolumeAttachment struct { VolumeName string `json:"volumeName,omitempty"` // The name of the volume. This attribute is required when creating a volume. VolumeProvisionType string `json:"volumeProvisionType,omitempty"` // The provisioning type of the new volume: Thin or Thick. This attribute is required when creating a volume. VolumeProvisionedCapacityBytes string `json:"volumeProvisionedCapacityBytes,omitempty"` // The requested provisioned capacity of the storage volume in bytes. This attribute is required when creating a volume. - VolumeShareable *bool `json:"volumeShareable,omitempty"` // Identifies whether the storage volume is shared or private. If false, then the volume will be private. If true, then the volume will be shared. This attribute is required when creating a volume. + VolumeShareable bool `json:"volumeShareable"` // Identifies whether the storage volume is shared or private. If false, then the volume will be private. If true, then the volume will be shared. This attribute is required when creating a volume. VolumeStoragePoolURI utils.Nstring `json:"volumeStoragePoolUri,omitempty"` // The URI of the storage pool associated with this volume attachment's volume. Use GET /rest/server-profiles/available-storage-systems to retrieve the URI of the storage pool associated with a volume. VolumeStorageSystemURI utils.Nstring `json:"volumeStorageSystemUri,omitempty"` // The URI of the storage system associated with this volume attachment. Use GET /rest/server-profiles/available-storage-systems to retrieve the URI of the storage system associated with a volume. - VolumeURI utils.Nstring `json:"volumeUri,omitempty"` // The URI of the storage volume associated with this volume attachment. Use GET /rest/server-profiles/available-storage-systems to retrieve the URIs of available storage volumes. + VolumeURI utils.Nstring `json:"volumenUri,omitempty"` // The URI of the storage volume associated with this volume attachment. Use GET /rest/server-profiles/available-storage-systems to retrieve the URIs of available storage volumes. } // Clone clone volume attachment for submits diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_pool.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_pool.go index bcd47c3a..8f143847 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_pool.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_pool.go @@ -11,68 +11,38 @@ import ( // The Marshal() will omit the bool attibutes below if they are false. // Please remove the omitempty option and use it as and when required. -type StoragePool struct { - Category string `json:"category,omitempty"` - Created string `json:"created,omitempty"` - Description utils.Nstring `json:"description"` - ETAG string `json:"eTag,omitempty"` - Name string `json:"name,omitempty"` - State string `json:"state,omitempty"` - Status string `json:"status,omitempty"` - Type string `json:"type,omitempty"` - URI utils.Nstring `json:"uri,omitempty"` - AllocatedCapacity string `json:"allocatedCapacity,omitempty"` - InitialScopeUris utils.Nstring `json:"initialScopeUris,omitempty"` - DeviceSpecificAttributes *DeviceSpecificAttributesStoragePool `json:"deviceSpecificAttributes,omitempty"` - StorageSystemUri utils.Nstring `json:"storageSystemUri,omitempty"` - TotalCapacity string `json:"totalCapacity,omitempty"` - FreeCapacity string `json:"freeCapacity,omitempty"` - IsManaged bool `json:"isManaged"` +type StoragePoolV3 struct { + Category string `json:"category,omitempty"` + Created string `json:"created,omitempty"` + Description utils.Nstring `json:"description,omitempty"` + ETAG string `json:"eTag,omitempty"` + Name string `json:"name,omitempty"` + State string `json:"state,omitempty"` + Status string `json:"status,omitempty"` + Type string `json:"type,omitempty"` + URI utils.Nstring `json:"uri,omitempty"` + AllocatedCapacity string `json:"allocatedCapacity,omitempty"` + InitialScopeUris utils.Nstring `json:"initialScopeUris,omitempty"` + DeviceSpecificAttributes *DeviceSpecificAttributes `json:"deviceSpecificAttributes,omitempty"` + StorageSystemUri utils.Nstring `json:"storageSystemUri,omitempty"` + TotalCapacity string `json:"totalCapacity,omitempty"` + FreeCapacity string `json:"freeCapacity,omitempty"` + IsManaged bool `json:"isManaged"` } -type DeviceSpecificAttributesStoragePool struct { - DeviceID string `json:"deviceId,omitempty"` - Folders []Folders `json:"folders,omitempty"` - IsDeduplicationCapable bool `json:"isDeduplicationCapable,omitempty"` - AllocatedCapacity *AllocatedCapacity `json:"allocatedCapacity,omitempty"` - CapacityLimit string `json:"capacityLimit,omitempty"` - CapacityWarningLimit string `json:"capacityWarningLimit,omitempty"` - DeviceSpeed string `json:"deviceSpeed,omitempty"` - Domain string `json:"domain,omitempty"` - SupportedRaidLevel string `json:"supportedRaidLevel,omitempty"` - Uuid string `json:"uuid,omitempty"` - VolumeCreationSpace []VolumeCreationSpace `json:"volumeCreationSpace,omitempty"` +type StoragePoolsListV3 struct { + Total int `json:"total,omitempty"` // "total": 1, + Count int `json:"count,omitempty"` // "count": 1, + Start int `json:"start,omitempty"` // "start": 0, + PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` // "prevPageUri": null, + NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` // "nextPageUri": null, + URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/storage-pools?filter=connectionTemplateUri%20matches%7769cae0-b680-435b-9b87-9b864c81657fsort=name:asc" + Members []StoragePoolV3 `json:"members,omitempty"` // "members":[] } -type Folders struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -type AllocatedCapacity struct { - SnapshotAllocatedCapacity string `json:"snapshotAllocatedCapacity,omitempty"` - TotalAllocatedCapacity string `json:"totalAllocatedCapacity,omitempty"` - VolumeAllocatedCapacity string `json:"volumeAllocatedCapacity,omitempty"` -} - -type VolumeCreationSpace struct { - AvailableSpace int `json:"availableSpace,omitempty"` - ReplicationLevel int `json:"replicationLevel,omitempty"` -} - -type StoragePoolsList struct { - Total int `json:"total,omitempty"` // "total": 1, - Count int `json:"count,omitempty"` // "count": 1, - Start int `json:"start,omitempty"` // "start": 0, - PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` // "prevPageUri": null, - NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` // "nextPageUri": null, - URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/storage-pools?filter=connectionTemplateUri%20matches%7769cae0-b680-435b-9b87-9b864c81657fsort=name:asc" - Members []StoragePool `json:"members,omitempty"` // "members":[] -} - -func (c *OVClient) GetStoragePoolByName(name string) (StoragePool, error) { +func (c *OVClient) GetStoragePoolByName(name string) (StoragePoolV3, error) { var ( - sPool StoragePool + sPool StoragePoolV3 ) sPools, err := c.GetStoragePools(fmt.Sprintf("name matches '%s'", name), "name:asc", "", "") if sPools.Total > 0 { @@ -82,11 +52,11 @@ func (c *OVClient) GetStoragePoolByName(name string) (StoragePool, error) { } } -func (c *OVClient) GetStoragePools(filter string, sort string, start string, count string) (StoragePoolsList, error) { +func (c *OVClient) GetStoragePools(filter string, sort string, start string, count string) (StoragePoolsListV3, error) { var ( uri = "/rest/storage-pools" q map[string]interface{} - sPools StoragePoolsList + sPools StoragePoolsListV3 ) q = make(map[string]interface{}) if len(filter) > 0 { @@ -125,7 +95,7 @@ func (c *OVClient) GetStoragePools(filter string, sort string, start string, cou return sPools, nil } -func (c *OVClient) UpdateStoragePool(sPool StoragePool) error { +func (c *OVClient) UpdateStoragePool(sPool StoragePoolV3) error { log.Infof("Initializing update of storage volume for %s.", sPool.Name) var ( uri = sPool.URI.String() diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_system.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_system.go index 58ce2571..6792c003 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_system.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_system.go @@ -21,7 +21,7 @@ import ( "github.com/docker/machine/libmachine/log" ) -type StorageSystem struct { +type StorageSystemV4 struct { Hostname string `json:"hostname,omitempty"` Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` @@ -58,28 +58,10 @@ type PortDeviceSpecificAttributes struct { } type StorageSystemDeviceSpecificAttributes struct { - Firmware string `json:"firmware,omitempty"` - Model string `json:"model,omitempty"` - ManagedPools []ManagedPools `json:"managedPools,omitempty"` - ManagedDomain string `json:"managedDomain,omitempty"` - DefaultEncryptionCipher string `json:"defaultEncryptionCipher,omitempty"` - IsDefaultEncryptionForced bool `json:"isDefaultEncryptionForced,omitempty"` - PerformancePolicies []PerformancePolicies `json:"performancePolicies,omitempty"` - ProtectionTemplates []ProtectionTemplates `json:"protectionTemplates,omitempty"` - SoftwareVersion string `json:"softwareVersion,omitempty"` -} - -type PerformancePolicies struct { - ApplicationCategory string `json:"applicationCategory,omitempty"` - BlockSize int `json:",blockSize,omitempty"` - Name string `json:"name,omitempty"` - IsCompressed bool `json:"isCompressed,omitempty"` - SpacePolicy string `json:"spacePolicy,omitempty"` -} - -type ProtectionTemplates struct { - AppSync string `json:"appSync,omitempty"` - Name string `json:"name,omitempty"` + Firmware string `json:"firmware,omitempty"` + Model string `json:"model,omitempty"` + ManagedPools []ManagedPools `json:"managedPools,omitempty"` + ManagedDomain string `json:"managedDomain,omitempty"` } type ManagedPools struct { @@ -91,14 +73,14 @@ type ManagedPools struct { Totalcapacity string `json:"totalCapacity,omitempty"` } -type StorageSystemsList struct { - Total int `json:"total,omitempty"` // "total": 1, - Count int `json:"count,omitempty"` // "count": 1, - Start int `json:"start,omitempty"` // "start": 0, - PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` // "prevPageUri": null, - NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` // "nextPageUri": null, - URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/storage-systems" - Members []StorageSystem `json:"members,omitempty"` // "members":[] +type StorageSystemsListV4 struct { + Total int `json:"total,omitempty"` // "total": 1, + Count int `json:"count,omitempty"` // "count": 1, + Start int `json:"start,omitempty"` // "start": 0, + PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` // "prevPageUri": null, + NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` // "nextPageUri": null, + URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/storage-systems" + Members []StorageSystemV4 `json:"members,omitempty"` // "members":[] } type ReachablePortsList struct { @@ -116,26 +98,9 @@ type ReachablePorts struct { ReachableNetworks utils.Nstring `json:"reachableNetworks,omitempty"` } -type VolumeSetList struct { - Category string `json:"category,omitempty"` - Members []VolumeSet `json:"members,omitempty"` - Total int `json:"total,omitempty"` - Count int `json:"count,omitempty"` - Start int `json:"start,omitempty"` - PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` - NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` - URI utils.Nstring `json:"uri,omitempty"` -} - -type VolumeSet struct { - TotalVolumes int `json:"totalVolumes,omitempty"` - VolumeURIs []string `json:"volumeUris,omitempty"` - State string `json:"state,omitempty"` -} - -func (c *OVClient) GetStorageSystemByName(name string) (StorageSystem, error) { +func (c *OVClient) GetStorageSystemByName(name string) (StorageSystemV4, error) { var ( - sSystem StorageSystem + sSystem StorageSystemV4 ) sSystems, err := c.GetStorageSystems(fmt.Sprintf("name matches '%s'", name), "name:asc") if sSystems.Total > 0 { @@ -145,11 +110,11 @@ func (c *OVClient) GetStorageSystemByName(name string) (StorageSystem, error) { } } -func (c *OVClient) GetStorageSystems(filter string, sort string) (StorageSystemsList, error) { +func (c *OVClient) GetStorageSystems(filter string, sort string) (StorageSystemsListV4, error) { var ( uri = "/rest/storage-systems" q map[string]interface{} - sSystem StorageSystemsList + sSystem StorageSystemsListV4 ) q = make(map[string]interface{}) if len(filter) > 0 { @@ -180,7 +145,7 @@ func (c *OVClient) GetStorageSystems(filter string, sort string) (StorageSystems return sSystem, nil } -func (c *OVClient) CreateStorageSystem(sSystem StorageSystem) error { +func (c *OVClient) CreateStorageSystem(sSystem StorageSystemV4) error { log.Infof("Initializing creation of storage volume for %s.", sSystem.Name) var ( uri = "/rest/storage-systems" @@ -219,7 +184,7 @@ func (c *OVClient) CreateStorageSystem(sSystem StorageSystem) error { func (c *OVClient) DeleteStorageSystem(name string) error { var ( - sSystem StorageSystem + sSystem StorageSystemV4 err error t *Task uri string @@ -264,7 +229,7 @@ func (c *OVClient) DeleteStorageSystem(name string) error { return nil } -func (c *OVClient) UpdateStorageSystem(sSystem StorageSystem) error { +func (c *OVClient) UpdateStorageSystem(sSystem StorageSystemV4) error { log.Infof("Initializing update of storage volume for %s.", sSystem.Name) var ( uri = sSystem.URI.String() @@ -319,23 +284,3 @@ func (c *OVClient) GetReachablePorts(uri utils.Nstring) (ReachablePortsList, err } return reachable_ports, nil } - -func (c *OVClient) GetVolumeSets(uri utils.Nstring) (VolumeSetList, error) { - var ( - volume_sets VolumeSetList - main_uri = uri.String() - ) - c.RefreshLogin() - c.SetAuthHeaderOptions(c.GetAuthHeaderMap()) - main_uri = main_uri + "/storage-volume-sets" - data, err := c.RestAPICall(rest.GET, main_uri, nil) - if err != nil { - log.Errorf("Error in getting volume sets: %s", err) - return volume_sets, err - } - log.Debugf("Volume Sets %s", data) - if err := json.Unmarshal([]byte(data), &volume_sets); err != nil { - return volume_sets, err - } - return volume_sets, nil -} diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_volume.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_volume.go index b6f41e1e..75b3afb6 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_volume.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_volume.go @@ -11,7 +11,7 @@ import ( // The Marshal() will omit the bool attibutes below if they are false. // Please remove the omitempty option and use it as and when required. -type StorageVolume struct { +type StorageVolumeV3 struct { Category string `json:"category,omitempty"` Created string `json:"created,omitempty"` Description utils.Nstring `json:"description,omitempty"` @@ -24,29 +24,28 @@ type StorageVolume struct { DeviceVolumeName string `json:"deviceVolumeName,omitempty"` RequestingRefresh bool `json:"requestingRefresh,omitempty"` AllocatedCapacity string `json:"allocatedCapacity,omitempty"` - InitialScopeUris []utils.Nstring `json:"initialScopeUris,omitempty"` + InitialScopeUris utils.Nstring `json:"initialScopeUris,omitempty"` DeviceSpecificAttributes *DeviceSpecificAttributes `json:"deviceSpecificAttributes,omitempty"` VolumeTemplateUri utils.Nstring `json:"volumeTemplateUri,omitempty"` - IsShareable *bool `json:"isShareable,omitempty"` + IsShareable bool `json:"isShareable,omitempty"` StoragePoolUri utils.Nstring `json:"storagePoolUri,omitempty"` StorageSystemUri utils.Nstring `json:"storageSystemUri,omitempty"` ProvisionedCapacity string `json:"provisionedCapacity,omitempty"` Properties *Properties `json:"properties,omitempty"` TemplateURI utils.Nstring `json:"templateURI,omitempty"` - IsPermanent *bool `json:"isPermanent,omitempty"` + IsPermanent bool `json:"isPermanent,omitempty"` ProvisioningTypeForUpdate string `json:"provisioningType,omitempty"` - TemplateVersion string `json:"templateVersion,omitempty"` // Wwn string `json:""` /* - Category string `json:"category,omitempty"` // "type": "StorageVolume", + Category string `json:"category,omitempty"` // "type": "StorageVolumeV3", Created string `json:"created,omitempty"` // "created": "2016-06-20T22:48:14.422Z" Description string `json:"description,omitempty"` // "description": "Integration test volume 2", ETAG string `json:"eTag,omitempty"` // "eTag": "2016-06-20T22:48:17.704Z", string `json:"modified,omitempty"` // "modified": "20150831T154835.250Z", Name string `json:"name,omitempty"` // "name": "Volume_2", State string `json:"state,omitempty"` // "state": "Configured", Status string `json:"status,omitempty"` // "status": "OK", - Type string `json:"type,omitempty"` // "type": "StorageVolume", + Type string `json:"type,omitempty"` // "type": "StorageVolumeV3", URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/storage-volumes/527801AC-B6B6-4A63-8510-D32906C9C57B" Shareable string `json:"shareable,omitempty"` // "shareable": false, AllocatedCapacity string `json:""` // "allocatedCapacity": "1073741824", @@ -78,55 +77,36 @@ type ProvisioningParameters struct { } type Properties struct { - Name string `json:"name,omitempty"` - Storagepool utils.Nstring `json:"storagePool,omitempty"` - Size int `json:"size,omitempty"` - ProvisioningType string `json:"provisioningType,omitempty"` - DataTransferLimit int `json:"dataTransferLimit,omitempty"` - IsDeduplicated bool `json:"isDeduplicated,omitempty"` - IsEncrypted bool `json:"isEncrypted,omitempty"` - IsPinned bool `json:"isPinned,omitempty"` - IsCompressed bool `json:"isCompressed,omitempty"` - IsShareable bool `json:"isShareable,omitempty"` - DataProtectionLevel string `json:"dataProtectionLevel,omitempty"` + Name string `json:"name,omitempty"` + Storagepool utils.Nstring `json:"storagePool,omitempty"` + Size int `json:"size,omitempty"` + ProvisioningType string `json:"provisioningType,omitempty"` } type DeviceSpecificAttributes struct { - ApplicationCategory string `json:"applicationCategory,omitempty"` - BlockSize int `json:"blockSize,omitempty"` - DataTransferLimit int `json:"dataTransferLimit,omitempty"` - FolderId string `json:"folderId,omitempty"` - FolderName string `json:"folderName,omitempty"` - IopsLimit int `json:"iopsLimit,omitempty"` - IsDeduplicated bool `json:"isDeduplicated,omitempty"` - IsEncrypted bool `json:"isEncrypted,omitempty"` - IsPinned bool `json:"isPinned,omitempty"` - PreviouslyDeduplicated bool `json:"previouslyDeduplicated,omitempty"` - IsCompressed bool `json:"isCompressed,omitempty"` - Transport string `json:"transport,omitempty"` - Iqn string `json:"iqn,omitempty"` - NumberOfReplicas int `json:"numberOfReplicas,omitempty"` - DataProtectionLevel string `json:"dataProtectionLevel,omitempty"` - Id int `json:"id,omitempty"` - Uri utils.Nstring `json:"uri,omitempty"` - CopyState string `json:"copyState,omitempty"` - SnapshotPoolUri utils.Nstring `json:"snapshotPoolUri,omitempty"` - IsAdaptiveOptimizationEnabled bool `json:"isAdaptiveOptimizationEnabled,omitempty"` + Transport string `json:"transport,omitempty"` + Iqn string `json:"iqn,omitempty"` + NumberOfReplicas int `json:"numberOfReplicas,omitempty"` + DataProtectionLevel string `json:"dataProtectionLevel,omitempty"` + Id int `json:"id,omitempty"` + Uri utils.Nstring `json:"uri,omitempty"` + CopyState string `json:"copyState,omitempty"` + SnapshotPoolUri utils.Nstring `json:"snapshotPoolUri,omitempty"` } -type StorageVolumesList struct { - Total int `json:"total,omitempty"` // "total": 1, - Count int `json:"count,omitempty"` // "count": 1, - Start int `json:"start,omitempty"` // "start": 0, - PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` // "prevPageUri": null, - NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` // "nextPageUri": null, - URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/server-profiles?filter=connectionTemplateUri%20matches%7769cae0-b680-435b-9b87-9b864c81657fsort=name:asc" - Members []StorageVolume `json:"members,omitempty"` // "members":[] +type StorageVolumesListV3 struct { + Total int `json:"total,omitempty"` // "total": 1, + Count int `json:"count,omitempty"` // "count": 1, + Start int `json:"start,omitempty"` // "start": 0, + PrevPageURI utils.Nstring `json:"prevPageUri,omitempty"` // "prevPageUri": null, + NextPageURI utils.Nstring `json:"nextPageUri,omitempty"` // "nextPageUri": null, + URI utils.Nstring `json:"uri,omitempty"` // "uri": "/rest/server-profiles?filter=connectionTemplateUri%20matches%7769cae0-b680-435b-9b87-9b864c81657fsort=name:asc" + Members []StorageVolumeV3 `json:"members,omitempty"` // "members":[] } -func (c *OVClient) GetStorageVolumeByName(name string) (StorageVolume, error) { +func (c *OVClient) GetStorageVolumeByName(name string) (StorageVolumeV3, error) { var ( - sVol StorageVolume + sVol StorageVolumeV3 ) sVols, err := c.GetStorageVolumes(fmt.Sprintf("name matches '%s'", name), "name:asc") if sVols.Total > 0 { @@ -136,11 +116,11 @@ func (c *OVClient) GetStorageVolumeByName(name string) (StorageVolume, error) { } } -func (c *OVClient) GetStorageVolumes(filter string, sort string) (StorageVolumesList, error) { +func (c *OVClient) GetStorageVolumes(filter string, sort string) (StorageVolumesListV3, error) { var ( uri = "/rest/storage-volumes" q map[string]interface{} - sVols StorageVolumesList + sVols StorageVolumesListV3 ) q = make(map[string]interface{}) if len(filter) > 0 { @@ -171,7 +151,7 @@ func (c *OVClient) GetStorageVolumes(filter string, sort string) (StorageVolumes return sVols, nil } -func (c *OVClient) CreateStorageVolume(sVol StorageVolume) error { +func (c *OVClient) CreateStorageVolume(sVol StorageVolumeV3) error { log.Infof("Initializing creation of storage volume for %s.", sVol.Name) var ( uri = "/rest/storage-volumes" @@ -210,7 +190,7 @@ func (c *OVClient) CreateStorageVolume(sVol StorageVolume) error { func (c *OVClient) DeleteStorageVolume(name string) error { var ( - sVol StorageVolume + sVol StorageVolumeV3 err error t *Task uri string @@ -255,7 +235,7 @@ func (c *OVClient) DeleteStorageVolume(name string) error { return nil } -func (c *OVClient) UpdateStorageVolume(sVol StorageVolume) error { +func (c *OVClient) UpdateStorageVolume(sVol StorageVolumeV3) error { log.Infof("Initializing update of storage volume for %s.", sVol.Name) var ( uri = sVol.URI.String() diff --git a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_volume_attachment.go b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_volume_attachment.go index 1e109e9c..aa76009c 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_volume_attachment.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/ov/storage_volume_attachment.go @@ -17,7 +17,7 @@ type StorageAttachment struct { Description utils.Nstring `json:"description,omitempty"` ETAG string `json:"eTag,omitempty"` Host *Host `json:"host,omitempty"` - Paths []Paths `json:"paths,omitempty"` + Paths *Paths `json:"paths,omitempty"` Name string `json:"name,omitempty"` State string `json:"state,omitempty"` Status string `json:"status,omitempty"` @@ -25,7 +25,6 @@ type StorageAttachment struct { URI utils.Nstring `json:"uri,omitempty"` StorageSystemUri utils.Nstring `json:"storageSystemUri,omitempty"` StorageVolumeUri utils.Nstring `json:"storageVolumeUri,omitempty"` - OwnerUri utils.Nstring `json:"ownerUri,omitempty"` } type Host struct { @@ -57,7 +56,7 @@ type MutualChap struct { secret string `json:"secret,omitempty"` } -type StorageAttachmentsList struct { +type StorageAttachmentsListV2 struct { Total int `json:"total,omitempty"` // "total": 1, Count int `json:"count,omitempty"` // "count": 1, Start int `json:"start,omitempty"` // "start": 0, @@ -79,11 +78,11 @@ func (c *OVClient) GetStorageAttachmentByName(name string) (StorageAttachment, e } } -func (c *OVClient) GetStorageAttachments(filter string, sort string, start string, count string) (StorageAttachmentsList, error) { +func (c *OVClient) GetStorageAttachments(filter string, sort string, start string, count string) (StorageAttachmentsListV2, error) { var ( uri = "/rest/storage-volume-attachments" q map[string]interface{} - sAttachments StorageAttachmentsList + sAttachments StorageAttachmentsListV2 ) q = make(map[string]interface{}) if len(filter) > 0 { diff --git a/vendor/github.com/HewlettPackard/oneview-golang/rest/netutil.go b/vendor/github.com/HewlettPackard/oneview-golang/rest/netutil.go index 65ee09b4..b15603c2 100644 --- a/vendor/github.com/HewlettPackard/oneview-golang/rest/netutil.go +++ b/vendor/github.com/HewlettPackard/oneview-golang/rest/netutil.go @@ -190,9 +190,7 @@ func (c *Client) RestAPICall(method Method, path string, options interface{}) ([ // Added the condition to accomodate the response where only the response header is returned. if len(data) == 0 { - if resp.Header["Location"] != nil { - data = []byte(`{"URI":"` + resp.Header["Location"][0] + `"}`) - } + data = []byte(`{"URI":"` + resp.Header["Location"][0] + `"}`) } return data, nil } diff --git a/vendor/github.com/Unknwon/com/LICENSE b/vendor/github.com/Unknwon/com/LICENSE new file mode 100644 index 00000000..8405e89a --- /dev/null +++ b/vendor/github.com/Unknwon/com/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/Unknwon/com/README.md b/vendor/github.com/Unknwon/com/README.md new file mode 100644 index 00000000..8d821abd --- /dev/null +++ b/vendor/github.com/Unknwon/com/README.md @@ -0,0 +1,20 @@ +Common Functions +================ + +[![Build Status](https://travis-ci.org/Unknwon/com.svg)](https://travis-ci.org/Unknwon/com) [![Go Walker](http://gowalker.org/api/v1/badge)](http://gowalker.org/github.com/Unknwon/com) + +This is an open source project for commonly used functions for the Go programming language. + +This package need >= **go 1.2** + +Code Convention: based on [Go Code Convention](https://github.com/Unknwon/go-code-convention). + +## Contribute + +Your contribute is welcome, but you have to check following steps after you added some functions and commit them: + +1. Make sure you wrote user-friendly comments for **all functions** . +2. Make sure you wrote test cases with any possible condition for **all functions** in file `*_test.go`. +3. Make sure you wrote benchmarks for **all functions** in file `*_test.go`. +4. Make sure you wrote useful examples for **all functions** in file `example_test.go`. +5. Make sure you ran `go test` and got **PASS** . diff --git a/vendor/github.com/Unknwon/com/cmd.go b/vendor/github.com/Unknwon/com/cmd.go new file mode 100644 index 00000000..dc7086d8 --- /dev/null +++ b/vendor/github.com/Unknwon/com/cmd.go @@ -0,0 +1,161 @@ +// +build go1.2 + +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package com is an open source project for commonly used functions for the Go programming language. +package com + +import ( + "bytes" + "fmt" + "os/exec" + "runtime" + "strings" +) + +// ExecCmdDirBytes executes system command in given directory +// and return stdout, stderr in bytes type, along with possible error. +func ExecCmdDirBytes(dir, cmdName string, args ...string) ([]byte, []byte, error) { + bufOut := new(bytes.Buffer) + bufErr := new(bytes.Buffer) + + cmd := exec.Command(cmdName, args...) + cmd.Dir = dir + cmd.Stdout = bufOut + cmd.Stderr = bufErr + + err := cmd.Run() + return bufOut.Bytes(), bufErr.Bytes(), err +} + +// ExecCmdBytes executes system command +// and return stdout, stderr in bytes type, along with possible error. +func ExecCmdBytes(cmdName string, args ...string) ([]byte, []byte, error) { + return ExecCmdDirBytes("", cmdName, args...) +} + +// ExecCmdDir executes system command in given directory +// and return stdout, stderr in string type, along with possible error. +func ExecCmdDir(dir, cmdName string, args ...string) (string, string, error) { + bufOut, bufErr, err := ExecCmdDirBytes(dir, cmdName, args...) + return string(bufOut), string(bufErr), err +} + +// ExecCmd executes system command +// and return stdout, stderr in string type, along with possible error. +func ExecCmd(cmdName string, args ...string) (string, string, error) { + return ExecCmdDir("", cmdName, args...) +} + +// _________ .__ .____ +// \_ ___ \ ____ | | ___________ | | ____ ____ +// / \ \/ / _ \| | / _ \_ __ \ | | / _ \ / ___\ +// \ \___( <_> ) |_( <_> ) | \/ | |__( <_> ) /_/ > +// \______ /\____/|____/\____/|__| |_______ \____/\___ / +// \/ \/ /_____/ + +// Color number constants. +const ( + Gray = uint8(iota + 90) + Red + Green + Yellow + Blue + Magenta + //NRed = uint8(31) // Normal + EndColor = "\033[0m" +) + +// getColorLevel returns colored level string by given level. +func getColorLevel(level string) string { + level = strings.ToUpper(level) + switch level { + case "TRAC": + return fmt.Sprintf("\033[%dm%s\033[0m", Blue, level) + case "ERRO": + return fmt.Sprintf("\033[%dm%s\033[0m", Red, level) + case "WARN": + return fmt.Sprintf("\033[%dm%s\033[0m", Magenta, level) + case "SUCC": + return fmt.Sprintf("\033[%dm%s\033[0m", Green, level) + default: + return level + } +} + +// ColorLogS colors log and return colored content. +// Log format: [ error ]. +// Level: TRAC -> blue; ERRO -> red; WARN -> Magenta; SUCC -> green; others -> default. +// Content: default; path: yellow; error -> red. +// Level has to be surrounded by "[" and "]". +// Highlights have to be surrounded by "# " and " #"(space), "#" will be deleted. +// Paths have to be surrounded by "( " and " )"(space). +// Errors have to be surrounded by "[ " and " ]"(space). +// Note: it hasn't support windows yet, contribute is welcome. +func ColorLogS(format string, a ...interface{}) string { + log := fmt.Sprintf(format, a...) + + var clog string + + if runtime.GOOS != "windows" { + // Level. + i := strings.Index(log, "]") + if log[0] == '[' && i > -1 { + clog += "[" + getColorLevel(log[1:i]) + "]" + } + + log = log[i+1:] + + // Error. + log = strings.Replace(log, "[ ", fmt.Sprintf("[\033[%dm", Red), -1) + log = strings.Replace(log, " ]", EndColor+"]", -1) + + // Path. + log = strings.Replace(log, "( ", fmt.Sprintf("(\033[%dm", Yellow), -1) + log = strings.Replace(log, " )", EndColor+")", -1) + + // Highlights. + log = strings.Replace(log, "# ", fmt.Sprintf("\033[%dm", Gray), -1) + log = strings.Replace(log, " #", EndColor, -1) + + } else { + // Level. + i := strings.Index(log, "]") + if log[0] == '[' && i > -1 { + clog += "[" + log[1:i] + "]" + } + + log = log[i+1:] + + // Error. + log = strings.Replace(log, "[ ", "[", -1) + log = strings.Replace(log, " ]", "]", -1) + + // Path. + log = strings.Replace(log, "( ", "(", -1) + log = strings.Replace(log, " )", ")", -1) + + // Highlights. + log = strings.Replace(log, "# ", "", -1) + log = strings.Replace(log, " #", "", -1) + } + return clog + log +} + +// ColorLog prints colored log to stdout. +// See color rules in function 'ColorLogS'. +func ColorLog(format string, a ...interface{}) { + fmt.Print(ColorLogS(format, a...)) +} diff --git a/vendor/github.com/Unknwon/com/convert.go b/vendor/github.com/Unknwon/com/convert.go new file mode 100644 index 00000000..bf24aa8b --- /dev/null +++ b/vendor/github.com/Unknwon/com/convert.go @@ -0,0 +1,167 @@ +// Copyright 2014 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "fmt" + "strconv" +) + +// Convert string to specify type. +type StrTo string + +func (f StrTo) Exist() bool { + return string(f) != string(0x1E) +} + +func (f StrTo) Uint8() (uint8, error) { + v, err := strconv.ParseUint(f.String(), 10, 8) + return uint8(v), err +} + +func (f StrTo) Int() (int, error) { + v, err := strconv.ParseInt(f.String(), 10, 0) + return int(v), err +} + +func (f StrTo) Int64() (int64, error) { + v, err := strconv.ParseInt(f.String(), 10, 64) + return int64(v), err +} + +func (f StrTo) Float64() (float64, error) { + v, err := strconv.ParseFloat(f.String(), 64) + return float64(v), err +} + +func (f StrTo) MustUint8() uint8 { + v, _ := f.Uint8() + return v +} + +func (f StrTo) MustInt() int { + v, _ := f.Int() + return v +} + +func (f StrTo) MustInt64() int64 { + v, _ := f.Int64() + return v +} + +func (f StrTo) MustFloat64() float64 { + v, _ := f.Float64() + return v +} + +func (f StrTo) String() string { + if f.Exist() { + return string(f) + } + return "" +} + +// Convert any type to string. +func ToStr(value interface{}, args ...int) (s string) { + switch v := value.(type) { + case bool: + s = strconv.FormatBool(v) + case float32: + s = strconv.FormatFloat(float64(v), 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 32)) + case float64: + s = strconv.FormatFloat(v, 'f', argInt(args).Get(0, -1), argInt(args).Get(1, 64)) + case int: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int8: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int16: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int32: + s = strconv.FormatInt(int64(v), argInt(args).Get(0, 10)) + case int64: + s = strconv.FormatInt(v, argInt(args).Get(0, 10)) + case uint: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint8: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint16: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint32: + s = strconv.FormatUint(uint64(v), argInt(args).Get(0, 10)) + case uint64: + s = strconv.FormatUint(v, argInt(args).Get(0, 10)) + case string: + s = v + case []byte: + s = string(v) + default: + s = fmt.Sprintf("%v", v) + } + return s +} + +type argInt []int + +func (a argInt) Get(i int, args ...int) (r int) { + if i >= 0 && i < len(a) { + r = a[i] + } else if len(args) > 0 { + r = args[0] + } + return +} + +// HexStr2int converts hex format string to decimal number. +func HexStr2int(hexStr string) (int, error) { + num := 0 + length := len(hexStr) + for i := 0; i < length; i++ { + char := hexStr[length-i-1] + factor := -1 + + switch { + case char >= '0' && char <= '9': + factor = int(char) - '0' + case char >= 'a' && char <= 'f': + factor = int(char) - 'a' + 10 + default: + return -1, fmt.Errorf("invalid hex: %s", string(char)) + } + + num += factor * PowInt(16, i) + } + return num, nil +} + +// Int2HexStr converts decimal number to hex format string. +func Int2HexStr(num int) (hex string) { + if num == 0 { + return "0" + } + + for num > 0 { + r := num % 16 + + c := "?" + if r >= 0 && r <= 9 { + c = string(r + '0') + } else { + c = string(r + 'a' - 10) + } + hex = c + hex + num = num / 16 + } + return hex +} diff --git a/vendor/github.com/Unknwon/com/dir.go b/vendor/github.com/Unknwon/com/dir.go new file mode 100644 index 00000000..c16e9de3 --- /dev/null +++ b/vendor/github.com/Unknwon/com/dir.go @@ -0,0 +1,218 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "errors" + "fmt" + "os" + "path" + "strings" +) + +// IsDir returns true if given path is a directory, +// or returns false when it's a file or does not exist. +func IsDir(dir string) bool { + f, e := os.Stat(dir) + if e != nil { + return false + } + return f.IsDir() +} + +func statDir(dirPath, recPath string, includeDir, isDirOnly, followSymlinks bool) ([]string, error) { + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + defer dir.Close() + + fis, err := dir.Readdir(0) + if err != nil { + return nil, err + } + + statList := make([]string, 0) + for _, fi := range fis { + if strings.Contains(fi.Name(), ".DS_Store") { + continue + } + + relPath := path.Join(recPath, fi.Name()) + curPath := path.Join(dirPath, fi.Name()) + if fi.IsDir() { + if includeDir { + statList = append(statList, relPath+"/") + } + s, err := statDir(curPath, relPath, includeDir, isDirOnly, followSymlinks) + if err != nil { + return nil, err + } + statList = append(statList, s...) + } else if !isDirOnly { + statList = append(statList, relPath) + } else if followSymlinks && fi.Mode()&os.ModeSymlink != 0 { + link, err := os.Readlink(curPath) + if err != nil { + return nil, err + } + + if IsDir(link) { + if includeDir { + statList = append(statList, relPath+"/") + } + s, err := statDir(curPath, relPath, includeDir, isDirOnly, followSymlinks) + if err != nil { + return nil, err + } + statList = append(statList, s...) + } + } + } + return statList, nil +} + +// StatDir gathers information of given directory by depth-first. +// It returns slice of file list and includes subdirectories if enabled; +// it returns error and nil slice when error occurs in underlying functions, +// or given path is not a directory or does not exist. +// +// Slice does not include given path itself. +// If subdirectories is enabled, they will have suffix '/'. +func StatDir(rootPath string, includeDir ...bool) ([]string, error) { + if !IsDir(rootPath) { + return nil, errors.New("not a directory or does not exist: " + rootPath) + } + + isIncludeDir := false + if len(includeDir) >= 1 { + isIncludeDir = includeDir[0] + } + return statDir(rootPath, "", isIncludeDir, false, false) +} + +// LstatDir gathers information of given directory by depth-first. +// It returns slice of file list, follows symbolic links and includes subdirectories if enabled; +// it returns error and nil slice when error occurs in underlying functions, +// or given path is not a directory or does not exist. +// +// Slice does not include given path itself. +// If subdirectories is enabled, they will have suffix '/'. +func LstatDir(rootPath string, includeDir ...bool) ([]string, error) { + if !IsDir(rootPath) { + return nil, errors.New("not a directory or does not exist: " + rootPath) + } + + isIncludeDir := false + if len(includeDir) >= 1 { + isIncludeDir = includeDir[0] + } + return statDir(rootPath, "", isIncludeDir, false, true) +} + +// GetAllSubDirs returns all subdirectories of given root path. +// Slice does not include given path itself. +func GetAllSubDirs(rootPath string) ([]string, error) { + if !IsDir(rootPath) { + return nil, errors.New("not a directory or does not exist: " + rootPath) + } + return statDir(rootPath, "", true, true, false) +} + +// LgetAllSubDirs returns all subdirectories of given root path, including +// following symbolic links, if any. +// Slice does not include given path itself. +func LgetAllSubDirs(rootPath string) ([]string, error) { + if !IsDir(rootPath) { + return nil, errors.New("not a directory or does not exist: " + rootPath) + } + return statDir(rootPath, "", true, true, true) +} + +// GetFileListBySuffix returns an ordered list of file paths. +// It recognize if given path is a file, and don't do recursive find. +func GetFileListBySuffix(dirPath, suffix string) ([]string, error) { + if !IsExist(dirPath) { + return nil, fmt.Errorf("given path does not exist: %s", dirPath) + } else if IsFile(dirPath) { + return []string{dirPath}, nil + } + + // Given path is a directory. + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + + fis, err := dir.Readdir(0) + if err != nil { + return nil, err + } + + files := make([]string, 0, len(fis)) + for _, fi := range fis { + if strings.HasSuffix(fi.Name(), suffix) { + files = append(files, path.Join(dirPath, fi.Name())) + } + } + + return files, nil +} + +// CopyDir copy files recursively from source to target directory. +// +// The filter accepts a function that process the path info. +// and should return true for need to filter. +// +// It returns error when error occurs in underlying functions. +func CopyDir(srcPath, destPath string, filters ...func(filePath string) bool) error { + // Check if target directory exists. + if IsExist(destPath) { + return errors.New("file or directory alreay exists: " + destPath) + } + + err := os.MkdirAll(destPath, os.ModePerm) + if err != nil { + return err + } + + // Gather directory info. + infos, err := StatDir(srcPath, true) + if err != nil { + return err + } + + var filter func(filePath string) bool + if len(filters) > 0 { + filter = filters[0] + } + + for _, info := range infos { + if filter != nil && filter(info) { + continue + } + + curPath := path.Join(destPath, info) + if strings.HasSuffix(info, "/") { + err = os.MkdirAll(curPath, os.ModePerm) + } else { + err = Copy(path.Join(srcPath, info), curPath) + } + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Unknwon/com/file.go b/vendor/github.com/Unknwon/com/file.go new file mode 100644 index 00000000..b51502c9 --- /dev/null +++ b/vendor/github.com/Unknwon/com/file.go @@ -0,0 +1,145 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "fmt" + "io" + "io/ioutil" + "math" + "os" + "path" +) + +// Storage unit constants. +const ( + Byte = 1 + KByte = Byte * 1024 + MByte = KByte * 1024 + GByte = MByte * 1024 + TByte = GByte * 1024 + PByte = TByte * 1024 + EByte = PByte * 1024 +) + +func logn(n, b float64) float64 { + return math.Log(n) / math.Log(b) +} + +func humanateBytes(s uint64, base float64, sizes []string) string { + if s < 10 { + return fmt.Sprintf("%dB", s) + } + e := math.Floor(logn(float64(s), base)) + suffix := sizes[int(e)] + val := float64(s) / math.Pow(base, math.Floor(e)) + f := "%.0f" + if val < 10 { + f = "%.1f" + } + + return fmt.Sprintf(f+"%s", val, suffix) +} + +// HumaneFileSize calculates the file size and generate user-friendly string. +func HumaneFileSize(s uint64) string { + sizes := []string{"B", "KB", "MB", "GB", "TB", "PB", "EB"} + return humanateBytes(s, 1024, sizes) +} + +// FileMTime returns file modified time and possible error. +func FileMTime(file string) (int64, error) { + f, err := os.Stat(file) + if err != nil { + return 0, err + } + return f.ModTime().Unix(), nil +} + +// FileSize returns file size in bytes and possible error. +func FileSize(file string) (int64, error) { + f, err := os.Stat(file) + if err != nil { + return 0, err + } + return f.Size(), nil +} + +// Copy copies file from source to target path. +func Copy(src, dest string) error { + // Gather file information to set back later. + si, err := os.Lstat(src) + if err != nil { + return err + } + + // Handle symbolic link. + if si.Mode()&os.ModeSymlink != 0 { + target, err := os.Readlink(src) + if err != nil { + return err + } + // NOTE: os.Chmod and os.Chtimes don't recoganize symbolic link, + // which will lead "no such file or directory" error. + return os.Symlink(target, dest) + } + + sr, err := os.Open(src) + if err != nil { + return err + } + defer sr.Close() + + dw, err := os.Create(dest) + if err != nil { + return err + } + defer dw.Close() + + if _, err = io.Copy(dw, sr); err != nil { + return err + } + + // Set back file information. + if err = os.Chtimes(dest, si.ModTime(), si.ModTime()); err != nil { + return err + } + return os.Chmod(dest, si.Mode()) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it +// and its upper level paths. +func WriteFile(filename string, data []byte) error { + os.MkdirAll(path.Dir(filename), os.ModePerm) + return ioutil.WriteFile(filename, data, 0655) +} + +// IsFile returns true if given path is a file, +// or returns false when it's a directory or does not exist. +func IsFile(filePath string) bool { + f, e := os.Stat(filePath) + if e != nil { + return false + } + return !f.IsDir() +} + +// IsExist checks whether a file or directory exists. +// It returns false when the file or directory does not exist. +func IsExist(path string) bool { + _, err := os.Stat(path) + return err == nil || os.IsExist(err) +} diff --git a/vendor/github.com/Unknwon/com/go.mod b/vendor/github.com/Unknwon/com/go.mod new file mode 100644 index 00000000..0bb87b46 --- /dev/null +++ b/vendor/github.com/Unknwon/com/go.mod @@ -0,0 +1,8 @@ +module github.com/Unknwon/com + +require ( + github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect + github.com/jtolds/gls v4.2.1+incompatible // indirect + github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 // indirect + github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c +) diff --git a/vendor/github.com/Unknwon/com/go.sum b/vendor/github.com/Unknwon/com/go.sum new file mode 100644 index 00000000..3fcc3035 --- /dev/null +++ b/vendor/github.com/Unknwon/com/go.sum @@ -0,0 +1,8 @@ +github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= +github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY= +github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w= +github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= diff --git a/vendor/github.com/Unknwon/com/html.go b/vendor/github.com/Unknwon/com/html.go new file mode 100644 index 00000000..8a99df4e --- /dev/null +++ b/vendor/github.com/Unknwon/com/html.go @@ -0,0 +1,60 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "html" + "regexp" + "strings" +) + +// Html2JS converts []byte type of HTML content into JS format. +func Html2JS(data []byte) []byte { + s := string(data) + s = strings.Replace(s, `\`, `\\`, -1) + s = strings.Replace(s, "\n", `\n`, -1) + s = strings.Replace(s, "\r", "", -1) + s = strings.Replace(s, "\"", `\"`, -1) + s = strings.Replace(s, "", "<table>", -1) + return []byte(s) +} + +// encode html chars to string +func HtmlEncode(str string) string { + return html.EscapeString(str) +} + +// HtmlDecode decodes string to html chars +func HtmlDecode(str string) string { + return html.UnescapeString(str) +} + +// strip tags in html string +func StripTags(src string) string { + //去除style,script,html tag + re := regexp.MustCompile(`(?s)<(?:style|script)[^<>]*>.*?|]*>|`) + src = re.ReplaceAllString(src, "") + + //trim all spaces(2+) into \n + re = regexp.MustCompile(`\s{2,}`) + src = re.ReplaceAllString(src, "\n") + + return strings.TrimSpace(src) +} + +// change \n to
+func Nl2br(str string) string { + return strings.Replace(str, "\n", "
", -1) +} diff --git a/vendor/github.com/Unknwon/com/http.go b/vendor/github.com/Unknwon/com/http.go new file mode 100644 index 00000000..cf0820f3 --- /dev/null +++ b/vendor/github.com/Unknwon/com/http.go @@ -0,0 +1,201 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path" +) + +type NotFoundError struct { + Message string +} + +func (e NotFoundError) Error() string { + return e.Message +} + +type RemoteError struct { + Host string + Err error +} + +func (e *RemoteError) Error() string { + return e.Err.Error() +} + +var UserAgent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1541.0 Safari/537.36" + +// HttpCall makes HTTP method call. +func HttpCall(client *http.Client, method, url string, header http.Header, body io.Reader) (io.ReadCloser, error) { + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", UserAgent) + for k, vs := range header { + req.Header[k] = vs + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode == 200 { + return resp.Body, nil + } + resp.Body.Close() + if resp.StatusCode == 404 { // 403 can be rate limit error. || resp.StatusCode == 403 { + err = fmt.Errorf("resource not found: %s", url) + } else { + err = fmt.Errorf("%s %s -> %d", method, url, resp.StatusCode) + } + return nil, err +} + +// HttpGet gets the specified resource. +// ErrNotFound is returned if the server responds with status 404. +func HttpGet(client *http.Client, url string, header http.Header) (io.ReadCloser, error) { + return HttpCall(client, "GET", url, header, nil) +} + +// HttpPost posts the specified resource. +// ErrNotFound is returned if the server responds with status 404. +func HttpPost(client *http.Client, url string, header http.Header, body []byte) (io.ReadCloser, error) { + return HttpCall(client, "POST", url, header, bytes.NewBuffer(body)) +} + +// HttpGetToFile gets the specified resource and writes to file. +// ErrNotFound is returned if the server responds with status 404. +func HttpGetToFile(client *http.Client, url string, header http.Header, fileName string) error { + rc, err := HttpGet(client, url, header) + if err != nil { + return err + } + defer rc.Close() + + os.MkdirAll(path.Dir(fileName), os.ModePerm) + f, err := os.Create(fileName) + if err != nil { + return err + } + defer f.Close() + _, err = io.Copy(f, rc) + return err +} + +// HttpGetBytes gets the specified resource. ErrNotFound is returned if the server +// responds with status 404. +func HttpGetBytes(client *http.Client, url string, header http.Header) ([]byte, error) { + rc, err := HttpGet(client, url, header) + if err != nil { + return nil, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} + +// HttpGetJSON gets the specified resource and mapping to struct. +// ErrNotFound is returned if the server responds with status 404. +func HttpGetJSON(client *http.Client, url string, v interface{}) error { + rc, err := HttpGet(client, url, nil) + if err != nil { + return err + } + defer rc.Close() + err = json.NewDecoder(rc).Decode(v) + if _, ok := err.(*json.SyntaxError); ok { + return fmt.Errorf("JSON syntax error at %s", url) + } + return nil +} + +// HttpPostJSON posts the specified resource with struct values, +// and maps results to struct. +// ErrNotFound is returned if the server responds with status 404. +func HttpPostJSON(client *http.Client, url string, body, v interface{}) error { + data, err := json.Marshal(body) + if err != nil { + return err + } + rc, err := HttpPost(client, url, http.Header{"content-type": []string{"application/json"}}, data) + if err != nil { + return err + } + defer rc.Close() + err = json.NewDecoder(rc).Decode(v) + if _, ok := err.(*json.SyntaxError); ok { + return fmt.Errorf("JSON syntax error at %s", url) + } + return nil +} + +// A RawFile describes a file that can be downloaded. +type RawFile interface { + Name() string + RawUrl() string + Data() []byte + SetData([]byte) +} + +// FetchFiles fetches files specified by the rawURL field in parallel. +func FetchFiles(client *http.Client, files []RawFile, header http.Header) error { + ch := make(chan error, len(files)) + for i := range files { + go func(i int) { + p, err := HttpGetBytes(client, files[i].RawUrl(), nil) + if err != nil { + ch <- err + return + } + files[i].SetData(p) + ch <- nil + }(i) + } + for _ = range files { + if err := <-ch; err != nil { + return err + } + } + return nil +} + +// FetchFilesCurl uses command `curl` to fetch files specified by the rawURL field in parallel. +func FetchFilesCurl(files []RawFile, curlOptions ...string) error { + ch := make(chan error, len(files)) + for i := range files { + go func(i int) { + stdout, _, err := ExecCmd("curl", append(curlOptions, files[i].RawUrl())...) + if err != nil { + ch <- err + return + } + + files[i].SetData([]byte(stdout)) + ch <- nil + }(i) + } + for _ = range files { + if err := <-ch; err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/Unknwon/com/math.go b/vendor/github.com/Unknwon/com/math.go new file mode 100644 index 00000000..62b77e87 --- /dev/null +++ b/vendor/github.com/Unknwon/com/math.go @@ -0,0 +1,29 @@ +// Copyright 2014 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +// PowInt is int type of math.Pow function. +func PowInt(x int, y int) int { + if y <= 0 { + return 1 + } else { + if y%2 == 0 { + sqrt := PowInt(x, y/2) + return sqrt * sqrt + } else { + return PowInt(x, y-1) * x + } + } +} diff --git a/vendor/github.com/Unknwon/com/path.go b/vendor/github.com/Unknwon/com/path.go new file mode 100644 index 00000000..b1e860de --- /dev/null +++ b/vendor/github.com/Unknwon/com/path.go @@ -0,0 +1,80 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "strings" +) + +// GetGOPATHs returns all paths in GOPATH variable. +func GetGOPATHs() []string { + gopath := os.Getenv("GOPATH") + var paths []string + if runtime.GOOS == "windows" { + gopath = strings.Replace(gopath, "\\", "/", -1) + paths = strings.Split(gopath, ";") + } else { + paths = strings.Split(gopath, ":") + } + return paths +} + +// GetSrcPath returns app. source code path. +// It only works when you have src. folder in GOPATH, +// it returns error not able to locate source folder path. +func GetSrcPath(importPath string) (appPath string, err error) { + paths := GetGOPATHs() + for _, p := range paths { + if IsExist(p + "/src/" + importPath + "/") { + appPath = p + "/src/" + importPath + "/" + break + } + } + + if len(appPath) == 0 { + return "", errors.New("Unable to locate source folder path") + } + + appPath = filepath.Dir(appPath) + "/" + if runtime.GOOS == "windows" { + // Replace all '\' to '/'. + appPath = strings.Replace(appPath, "\\", "/", -1) + } + + return appPath, nil +} + +// HomeDir returns path of '~'(in Linux) on Windows, +// it returns error when the variable does not exist. +func HomeDir() (home string, err error) { + if runtime.GOOS == "windows" { + home = os.Getenv("USERPROFILE") + if len(home) == 0 { + home = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH") + } + } else { + home = os.Getenv("HOME") + } + + if len(home) == 0 { + return "", errors.New("Cannot specify home directory because it's empty") + } + + return home, nil +} diff --git a/vendor/github.com/Unknwon/com/regex.go b/vendor/github.com/Unknwon/com/regex.go new file mode 100644 index 00000000..14926474 --- /dev/null +++ b/vendor/github.com/Unknwon/com/regex.go @@ -0,0 +1,56 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import "regexp" + +const ( + regex_email_pattern = `(?i)[A-Z0-9._%+-]+@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}` + regex_strict_email_pattern = `(?i)[A-Z0-9!#$%&'*+/=?^_{|}~-]+` + + `(?:\.[A-Z0-9!#$%&'*+/=?^_{|}~-]+)*` + + `@(?:[A-Z0-9](?:[A-Z0-9-]*[A-Z0-9])?\.)+` + + `[A-Z0-9](?:[A-Z0-9-]*[A-Z0-9])?` + regex_url_pattern = `(ftp|http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?` +) + +var ( + regex_email *regexp.Regexp + regex_strict_email *regexp.Regexp + regex_url *regexp.Regexp +) + +func init() { + regex_email = regexp.MustCompile(regex_email_pattern) + regex_strict_email = regexp.MustCompile(regex_strict_email_pattern) + regex_url = regexp.MustCompile(regex_url_pattern) +} + +// IsEmail validates string is an email address, if not return false +// basically validation can match 99% cases +func IsEmail(email string) bool { + return regex_email.MatchString(email) +} + +// IsEmailRFC validates string is an email address, if not return false +// this validation omits RFC 2822 +func IsEmailRFC(email string) bool { + return regex_strict_email.MatchString(email) +} + +// IsUrl validates string is a url link, if not return false +// simple validation can match 99% cases +func IsUrl(url string) bool { + return regex_url.MatchString(url) +} diff --git a/vendor/github.com/Unknwon/com/slice.go b/vendor/github.com/Unknwon/com/slice.go new file mode 100644 index 00000000..c3c9ab2e --- /dev/null +++ b/vendor/github.com/Unknwon/com/slice.go @@ -0,0 +1,87 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "strings" +) + +// AppendStr appends string to slice with no duplicates. +func AppendStr(strs []string, str string) []string { + for _, s := range strs { + if s == str { + return strs + } + } + return append(strs, str) +} + +// CompareSliceStr compares two 'string' type slices. +// It returns true if elements and order are both the same. +func CompareSliceStr(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + + return true +} + +// CompareSliceStrU compares two 'string' type slices. +// It returns true if elements are the same, and ignores the order. +func CompareSliceStrU(s1, s2 []string) bool { + if len(s1) != len(s2) { + return false + } + + for i := range s1 { + for j := len(s2) - 1; j >= 0; j-- { + if s1[i] == s2[j] { + s2 = append(s2[:j], s2[j+1:]...) + break + } + } + } + if len(s2) > 0 { + return false + } + return true +} + +// IsSliceContainsStr returns true if the string exists in given slice, ignore case. +func IsSliceContainsStr(sl []string, str string) bool { + str = strings.ToLower(str) + for _, s := range sl { + if strings.ToLower(s) == str { + return true + } + } + return false +} + +// IsSliceContainsInt64 returns true if the int64 exists in given slice. +func IsSliceContainsInt64(sl []int64, i int64) bool { + for _, s := range sl { + if s == i { + return true + } + } + return false +} diff --git a/vendor/github.com/Unknwon/com/string.go b/vendor/github.com/Unknwon/com/string.go new file mode 100644 index 00000000..7080d174 --- /dev/null +++ b/vendor/github.com/Unknwon/com/string.go @@ -0,0 +1,253 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "errors" + r "math/rand" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +// AESGCMEncrypt encrypts plaintext with the given key using AES in GCM mode. +func AESGCMEncrypt(key, plaintext []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + nonce := make([]byte, gcm.NonceSize()) + if _, err := rand.Read(nonce); err != nil { + return nil, err + } + + ciphertext := gcm.Seal(nil, nonce, plaintext, nil) + return append(nonce, ciphertext...), nil +} + +// AESGCMDecrypt decrypts ciphertext with the given key using AES in GCM mode. +func AESGCMDecrypt(key, ciphertext []byte) ([]byte, error) { + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + gcm, err := cipher.NewGCM(block) + if err != nil { + return nil, err + } + + size := gcm.NonceSize() + if len(ciphertext)-size <= 0 { + return nil, errors.New("Ciphertext is empty") + } + + nonce := ciphertext[:size] + ciphertext = ciphertext[size:] + + plainText, err := gcm.Open(nil, nonce, ciphertext, nil) + if err != nil { + return nil, err + } + + return plainText, nil +} + +// IsLetter returns true if the 'l' is an English letter. +func IsLetter(l uint8) bool { + n := (l | 0x20) - 'a' + if n >= 0 && n < 26 { + return true + } + return false +} + +// Expand replaces {k} in template with match[k] or subs[atoi(k)] if k is not in match. +func Expand(template string, match map[string]string, subs ...string) string { + var p []byte + var i int + for { + i = strings.Index(template, "{") + if i < 0 { + break + } + p = append(p, template[:i]...) + template = template[i+1:] + i = strings.Index(template, "}") + if s, ok := match[template[:i]]; ok { + p = append(p, s...) + } else { + j, _ := strconv.Atoi(template[:i]) + if j >= len(subs) { + p = append(p, []byte("Missing")...) + } else { + p = append(p, subs[j]...) + } + } + template = template[i+1:] + } + p = append(p, template...) + return string(p) +} + +// Reverse s string, support unicode +func Reverse(s string) string { + n := len(s) + runes := make([]rune, n) + for _, rune := range s { + n-- + runes[n] = rune + } + return string(runes[n:]) +} + +// RandomCreateBytes generate random []byte by specify chars. +func RandomCreateBytes(n int, alphabets ...byte) []byte { + const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + var randby bool + if num, err := rand.Read(bytes); num != n || err != nil { + r.Seed(time.Now().UnixNano()) + randby = true + } + for i, b := range bytes { + if len(alphabets) == 0 { + if randby { + bytes[i] = alphanum[r.Intn(len(alphanum))] + } else { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + } else { + if randby { + bytes[i] = alphabets[r.Intn(len(alphabets))] + } else { + bytes[i] = alphabets[b%byte(len(alphabets))] + } + } + } + return bytes +} + +// ToSnakeCase can convert all upper case characters in a string to +// underscore format. +// +// Some samples. +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +// +// From https://github.com/huandu/xstrings +func ToSnakeCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &bytes.Buffer{} + var prev, r0, r1 rune + var size int + + r0 = '_' + + for len(str) > 0 { + prev = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + switch { + case r0 == utf8.RuneError: + buf.WriteByte(byte(str[0])) + + case unicode.IsUpper(r0): + if prev != '_' { + buf.WriteRune('_') + } + + buf.WriteRune(unicode.ToLower(r0)) + + if len(str) == 0 { + break + } + + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !unicode.IsUpper(r0) { + buf.WriteRune(r0) + break + } + + // find next non-upper-case character and insert `_` properly. + // it's designed to convert `HTTPServer` to `http_server`. + // if there are more than 2 adjacent upper case characters in a word, + // treat them as an abbreviation plus a normal word. + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if r0 == utf8.RuneError { + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteByte(byte(str[0])) + break + } + + if !unicode.IsUpper(r0) { + if r0 == '_' || r0 == ' ' || r0 == '-' { + r0 = '_' + + buf.WriteRune(unicode.ToLower(r1)) + } else { + buf.WriteRune('_') + buf.WriteRune(unicode.ToLower(r1)) + buf.WriteRune(r0) + } + + break + } + + buf.WriteRune(unicode.ToLower(r1)) + } + + if len(str) == 0 || r0 == '_' { + buf.WriteRune(unicode.ToLower(r0)) + break + } + + default: + if r0 == ' ' || r0 == '-' { + r0 = '_' + } + + buf.WriteRune(r0) + } + } + + return buf.String() +} diff --git a/vendor/github.com/Unknwon/com/time.go b/vendor/github.com/Unknwon/com/time.go new file mode 100644 index 00000000..dd1cdbcf --- /dev/null +++ b/vendor/github.com/Unknwon/com/time.go @@ -0,0 +1,115 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Format unix time int64 to string +func Date(ti int64, format string) string { + t := time.Unix(int64(ti), 0) + return DateT(t, format) +} + +// Format unix time string to string +func DateS(ts string, format string) string { + i, _ := strconv.ParseInt(ts, 10, 64) + return Date(i, format) +} + +// Format time.Time struct to string +// MM - month - 01 +// M - month - 1, single bit +// DD - day - 02 +// D - day 2 +// YYYY - year - 2006 +// YY - year - 06 +// HH - 24 hours - 03 +// H - 24 hours - 3 +// hh - 12 hours - 03 +// h - 12 hours - 3 +// mm - minute - 04 +// m - minute - 4 +// ss - second - 05 +// s - second = 5 +func DateT(t time.Time, format string) string { + res := strings.Replace(format, "MM", t.Format("01"), -1) + res = strings.Replace(res, "M", t.Format("1"), -1) + res = strings.Replace(res, "DD", t.Format("02"), -1) + res = strings.Replace(res, "D", t.Format("2"), -1) + res = strings.Replace(res, "YYYY", t.Format("2006"), -1) + res = strings.Replace(res, "YY", t.Format("06"), -1) + res = strings.Replace(res, "HH", fmt.Sprintf("%02d", t.Hour()), -1) + res = strings.Replace(res, "H", fmt.Sprintf("%d", t.Hour()), -1) + res = strings.Replace(res, "hh", t.Format("03"), -1) + res = strings.Replace(res, "h", t.Format("3"), -1) + res = strings.Replace(res, "mm", t.Format("04"), -1) + res = strings.Replace(res, "m", t.Format("4"), -1) + res = strings.Replace(res, "ss", t.Format("05"), -1) + res = strings.Replace(res, "s", t.Format("5"), -1) + return res +} + +// DateFormat pattern rules. +var datePatterns = []string{ + // year + "Y", "2006", // A full numeric representation of a year, 4 digits Examples: 1999 or 2003 + "y", "06", //A two digit representation of a year Examples: 99 or 03 + + // month + "m", "01", // Numeric representation of a month, with leading zeros 01 through 12 + "n", "1", // Numeric representation of a month, without leading zeros 1 through 12 + "M", "Jan", // A short textual representation of a month, three letters Jan through Dec + "F", "January", // A full textual representation of a month, such as January or March January through December + + // day + "d", "02", // Day of the month, 2 digits with leading zeros 01 to 31 + "j", "2", // Day of the month without leading zeros 1 to 31 + + // week + "D", "Mon", // A textual representation of a day, three letters Mon through Sun + "l", "Monday", // A full textual representation of the day of the week Sunday through Saturday + + // time + "g", "3", // 12-hour format of an hour without leading zeros 1 through 12 + "G", "15", // 24-hour format of an hour without leading zeros 0 through 23 + "h", "03", // 12-hour format of an hour with leading zeros 01 through 12 + "H", "15", // 24-hour format of an hour with leading zeros 00 through 23 + + "a", "pm", // Lowercase Ante meridiem and Post meridiem am or pm + "A", "PM", // Uppercase Ante meridiem and Post meridiem AM or PM + + "i", "04", // Minutes with leading zeros 00 to 59 + "s", "05", // Seconds, with leading zeros 00 through 59 + + // time zone + "T", "MST", + "P", "-07:00", + "O", "-0700", + + // RFC 2822 + "r", time.RFC1123Z, +} + +// Parse Date use PHP time format. +func DateParse(dateString, format string) (time.Time, error) { + replacer := strings.NewReplacer(datePatterns...) + format = replacer.Replace(format) + return time.ParseInLocation(format, dateString, time.Local) +} diff --git a/vendor/github.com/Unknwon/com/url.go b/vendor/github.com/Unknwon/com/url.go new file mode 100644 index 00000000..b0b7c0e1 --- /dev/null +++ b/vendor/github.com/Unknwon/com/url.go @@ -0,0 +1,41 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "encoding/base64" + "net/url" +) + +// url encode string, is + not %20 +func UrlEncode(str string) string { + return url.QueryEscape(str) +} + +// url decode string +func UrlDecode(str string) (string, error) { + return url.QueryUnescape(str) +} + +// base64 encode +func Base64Encode(str string) string { + return base64.StdEncoding.EncodeToString([]byte(str)) +} + +// base64 decode +func Base64Decode(str string) (string, error) { + s, e := base64.StdEncoding.DecodeString(str) + return string(s), e +} diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO new file mode 100644 index 00000000..716561d5 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/agext/levenshtein/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS new file mode 100644 index 00000000..726c2afb --- /dev/null +++ b/vendor/github.com/agext/levenshtein/MAINTAINERS @@ -0,0 +1 @@ +Alex Bucataru (@AlexBucataru) diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE new file mode 100644 index 00000000..eaffaab9 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/NOTICE @@ -0,0 +1,5 @@ +Alrux Go EXTensions (AGExt) - package levenshtein +Copyright 2016 ALRUX Inc. + +This product includes software developed at ALRUX Inc. +(http://www.alrux.com/). diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md new file mode 100644 index 00000000..9e425587 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/README.md @@ -0,0 +1,38 @@ +# A Go package for calculating the Levenshtein distance between two strings + +[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein)  +[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein) +[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein) +[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein) + + +This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org). + +## Project Status + +v1.2.2 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. + +This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. + +## Overview + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. + +## Installation + +``` +go get github.com/agext/levenshtein +``` + +## License + +Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/agext/levenshtein/go.mod b/vendor/github.com/agext/levenshtein/go.mod new file mode 100644 index 00000000..545d432b --- /dev/null +++ b/vendor/github.com/agext/levenshtein/go.mod @@ -0,0 +1 @@ +module github.com/agext/levenshtein diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go new file mode 100644 index 00000000..df69ce70 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -0,0 +1,290 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. +*/ +package levenshtein + +// Calculate determines the Levenshtein distance between two strings, using +// the given costs for each edit operation. It returns the distance along with +// the lengths of the longest common prefix and suffix. +// +// If maxCost is non-zero, the calculation stops as soon as the distance is determined +// to be greater than maxCost. Therefore, any return value higher than maxCost is a +// lower bound for the actual distance. +func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { + l1, l2 := len(str1), len(str2) + // trim common prefix, if any, as it doesn't affect the distance + for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { + if str1[prefixLen] != str2[prefixLen] { + break + } + } + str1, str2 = str1[prefixLen:], str2[prefixLen:] + l1 -= prefixLen + l2 -= prefixLen + // trim common suffix, if any, as it doesn't affect the distance + for 0 < l1 && 0 < l2 { + if str1[l1-1] != str2[l2-1] { + str1, str2 = str1[:l1], str2[:l2] + break + } + l1-- + l2-- + suffixLen++ + } + // if the first string is empty, the distance is the length of the second string times the cost of insertion + if l1 == 0 { + dist = l2 * insCost + return + } + // if the second string is empty, the distance is the length of the first string times the cost of deletion + if l2 == 0 { + dist = l1 * delCost + return + } + + // variables used in inner "for" loops + var y, dy, c, l int + + // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' + if maxCost > 0 { + if subCost < delCost+insCost { + if maxCost >= l1*subCost+(l2-l1)*insCost { + maxCost = 0 + } + } else { + if maxCost >= l1*delCost+l2*insCost { + maxCost = 0 + } + } + } + + if maxCost > 0 { + // prefer the longer string first, to minimize time; + // a swap also transposes the meanings of insertion and deletion. + if l1 < l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + + // the length differential times cost of deletion is a lower bound for the cost; + // if it is higher than the maxCost, there is no point going into the main calculation. + if dist = (l1 - l2) * delCost; dist > maxCost { + return + } + + d := make([]int, l1+1) + + // offset and length of d in the current row + doff, dlen := 0, 1 + for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { + d[y] = dy + y++ + dy = y * delCost + } + // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + + for x := 0; x < l2; x++ { + dy, d[doff] = d[doff], d[doff]+insCost + for d[doff] > maxCost && dlen > 0 { + if str1[doff] != str2[x] { + dy += subCost + } + doff++ + dlen-- + if c = d[doff] + insCost; c < dy { + dy = c + } + dy, d[doff] = d[doff], dy + } + for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + if y < l1 { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { + y++ + dlen++ + } + } + // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + if dlen == 0 { + dist = maxCost + 1 + return + } + } + if doff+dlen-1 < l1 { + dist = maxCost + 1 + return + } + dist = d[l1] + } else { + // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is + // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space + // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html + + // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; + // a swap also transposes the meanings of insertion and deletion. + if l1 > l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + d := make([]int, l1+1) + + for y = 1; y <= l1; y++ { + d[y] = y * delCost + } + for x := 0; x < l2; x++ { + dy, d[0] = d[0], d[0]+insCost + for y = 0; y < l1; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + } + dist = d[l1] + } + + return +} + +// Distance returns the Levenshtein distance between str1 and str2, using the +// default or provided cost values. Pass nil for the third argument to use the +// default cost of 1 for all three operations, with no maximum. +func Distance(str1, str2 string, p *Params) int { + if p == nil { + p = defaultParams + } + dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) + return dist +} + +// Similarity returns a score in the range of 0..1 for how similar the two strings are. +// A score of 1 means the strings are identical, and 0 means they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Similarity(str1, str2 string, p *Params) float64 { + return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus +} + +// Match returns a similarity score adjusted by the same method as proposed by Winkler for +// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their +// similarity score is already over a threshold. +// +// The score is in the range of 0..1, with 1 meaning the strings are identical, +// and 0 meaning they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations, maximum length of +// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Match(str1, str2 string, p *Params) float64 { + s1, s2 := []rune(str1), []rune(str2) + l1, l2 := len(s1), len(s2) + // two empty strings are identical; shortcut also avoids divByZero issues later on. + if l1 == 0 && l2 == 0 { + return 1 + } + + if p == nil { + p = defaultParams + } + + // a min over 1 can never be satisfied, so the score is 0. + if p.minScore > 1 { + return 0 + } + + insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 + if l1 > l2 { + l1, l2, insCost, delCost = l2, l1, delCost, insCost + } + + if p.subCost < delCost+insCost { + maxDist = l1*p.subCost + (l2-l1)*insCost + } else { + maxDist = l1*delCost + l2*insCost + } + + // a zero min is always satisfied, so no need to set a max cost. + if p.minScore > 0 { + // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula + // for the max cost, because a sim score below min cannot receive a bonus. + if p.minScore < p.bonusThreshold { + // round down the max - a cost equal to a rounded up max would already be under min. + max = int((1 - p.minScore) * float64(maxDist)) + } else { + // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) + // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) + // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist + // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist + // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist + max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) + } + } + + dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) + if max > 0 && dist > max { + return 0 + } + sim := 1 - float64(dist)/float64(maxDist) + + if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { + if pl > p.bonusPrefix { + pl = p.bonusPrefix + } + sim += float64(pl) * p.bonusScale * (1 - sim) + } + + if sim < p.minScore { + return 0 + } + + return sim +} diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go new file mode 100644 index 00000000..a85727b3 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/params.go @@ -0,0 +1,152 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levenshtein + +// Params represents a set of parameter values for the various formulas involved +// in the calculation of the Levenshtein string metrics. +type Params struct { + insCost int + subCost int + delCost int + maxCost int + minScore float64 + bonusPrefix int + bonusScale float64 + bonusThreshold float64 +} + +var ( + defaultParams = NewParams() +) + +// NewParams creates a new set of parameters and initializes it with the default values. +func NewParams() *Params { + return &Params{ + insCost: 1, + subCost: 1, + delCost: 1, + maxCost: 0, + minScore: 0, + bonusPrefix: 4, + bonusScale: .1, + bonusThreshold: .7, + } +} + +// Clone returns a pointer to a copy of the receiver parameter set, or of a new +// default parameter set if the receiver is nil. +func (p *Params) Clone() *Params { + if p == nil { + return NewParams() + } + return &Params{ + insCost: p.insCost, + subCost: p.subCost, + delCost: p.delCost, + maxCost: p.maxCost, + minScore: p.minScore, + bonusPrefix: p.bonusPrefix, + bonusScale: p.bonusScale, + bonusThreshold: p.bonusThreshold, + } +} + +// InsCost overrides the default value of 1 for the cost of insertion. +// The new value must be zero or positive. +func (p *Params) InsCost(v int) *Params { + if v >= 0 { + p.insCost = v + } + return p +} + +// SubCost overrides the default value of 1 for the cost of substitution. +// The new value must be zero or positive. +func (p *Params) SubCost(v int) *Params { + if v >= 0 { + p.subCost = v + } + return p +} + +// DelCost overrides the default value of 1 for the cost of deletion. +// The new value must be zero or positive. +func (p *Params) DelCost(v int) *Params { + if v >= 0 { + p.delCost = v + } + return p +} + +// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. +// The calculation of Distance() stops when the result is guaranteed to exceed +// this maximum, returning a lower-bound rather than exact value. +// The new value must be zero or positive. +func (p *Params) MaxCost(v int) *Params { + if v >= 0 { + p.maxCost = v + } + return p +} + +// MinScore overrides the default value of 0 for the minimum similarity score. +// Scores below this threshold are returned as 0 by Similarity() and Match(). +// The new value must be zero or positive. Note that a minimum greater than 1 +// can never be satisfied, resulting in a score of 0 for any pair of strings. +func (p *Params) MinScore(v float64) *Params { + if v >= 0 { + p.minScore = v + } + return p +} + +// BonusPrefix overrides the default value for the maximum length of +// common prefix to be considered for bonus by Match(). +// The new value must be zero or positive. +func (p *Params) BonusPrefix(v int) *Params { + if v >= 0 { + p.bonusPrefix = v + } + return p +} + +// BonusScale overrides the default value for the scaling factor used by Match() +// in calculating the bonus. +// The new value must be zero or positive. To guarantee that the similarity score +// remains in the interval 0..1, this scaling factor is not allowed to exceed +// 1 / BonusPrefix. +func (p *Params) BonusScale(v float64) *Params { + if v >= 0 { + p.bonusScale = v + } + + // the bonus cannot exceed (1-sim), or the score may become greater than 1. + if float64(p.bonusPrefix)*p.bonusScale > 1 { + p.bonusScale = 1 / float64(p.bonusPrefix) + } + + return p +} + +// BonusThreshold overrides the default value for the minimum similarity score +// for which Match() can assign a bonus. +// The new value must be zero or positive. Note that a threshold greater than 1 +// effectively makes Match() become the equivalent of Similarity(). +func (p *Params) BonusThreshold(v float64) *Params { + if v >= 0 { + p.bonusThreshold = v + } + return p +} diff --git a/vendor/github.com/agext/levenshtein/test.sh b/vendor/github.com/agext/levenshtein/test.sh new file mode 100644 index 00000000..c5ed7246 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/test.sh @@ -0,0 +1,10 @@ +set -ev + +if [[ "$1" == "goveralls" ]]; then + echo "Testing with goveralls..." + go get github.com/mattn/goveralls + $HOME/gopath/bin/goveralls -service=travis-ci +else + echo "Testing with go test..." + go test -v ./... +fi diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/api_timeout.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/api_timeout.go new file mode 100644 index 00000000..d7968dab --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/api_timeout.go @@ -0,0 +1,249 @@ +package sdk + +import ( + "encoding/json" + "strings" + "time" +) + +var apiTimeouts = `{ + "ecs": { + "ActivateRouterInterface": 10, + "AddTags": 61, + "AllocateDedicatedHosts": 10, + "AllocateEipAddress": 17, + "AllocatePublicIpAddress": 36, + "ApplyAutoSnapshotPolicy": 10, + "AssignIpv6Addresses": 10, + "AssignPrivateIpAddresses": 10, + "AssociateEipAddress": 17, + "AttachClassicLinkVpc": 14, + "AttachDisk": 36, + "AttachInstanceRamRole": 11, + "AttachKeyPair": 16, + "AttachNetworkInterface": 16, + "AuthorizeSecurityGroupEgress": 16, + "AuthorizeSecurityGroup": 16, + "CancelAutoSnapshotPolicy": 10, + "CancelCopyImage": 10, + "CancelPhysicalConnection": 10, + "CancelSimulatedSystemEvents": 10, + "CancelTask": 10, + "ConnectRouterInterface": 10, + "ConvertNatPublicIpToEip": 12, + "CopyImage": 10, + "CreateAutoSnapshotPolicy": 10, + "CreateCommand": 16, + "CreateDeploymentSet": 16, + "CreateDisk": 36, + "CreateHpcCluster": 10, + "CreateImage": 36, + "CreateInstance": 86, + "CreateKeyPair": 10, + "CreateLaunchTemplate": 10, + "CreateLaunchTemplateVersion": 10, + "CreateNatGateway": 36, + "CreateNetworkInterfacePermission": 13, + "CreateNetworkInterface": 16, + "CreatePhysicalConnection": 10, + "CreateRouteEntry": 17, + "CreateRouterInterface": 10, + "CreateSecurityGroup": 86, + "CreateSimulatedSystemEvents": 10, + "CreateSnapshot": 86, + "CreateVirtualBorderRouter": 10, + "CreateVpc": 16, + "CreateVSwitch": 17, + "DeactivateRouterInterface": 10, + "DeleteAutoSnapshotPolicy": 10, + "DeleteBandwidthPackage": 10, + "DeleteCommand": 16, + "DeleteDeploymentSet": 12, + "DeleteDisk": 16, + "DeleteHpcCluster": 10, + "DeleteImage": 36, + "DeleteInstance": 66, + "DeleteKeyPairs": 10, + "DeleteLaunchTemplate": 10, + "DeleteLaunchTemplateVersion": 10, + "DeleteNatGateway": 10, + "DeleteNetworkInterfacePermission": 10, + "DeleteNetworkInterface": 16, + "DeletePhysicalConnection": 10, + "DeleteRouteEntry": 16, + "DeleteRouterInterface": 10, + "DeleteSecurityGroup": 87, + "DeleteSnapshot": 17, + "DeleteVirtualBorderRouter": 10, + "DeleteVpc": 17, + "DeleteVSwitch": 17, + "DescribeAccessPoints": 10, + "DescribeAccountAttributes": 10, + "DescribeAutoSnapshotPolicyEx": 16, + "DescribeAvailableResource": 10, + "DescribeBandwidthLimitation": 16, + "DescribeBandwidthPackages": 10, + "DescribeClassicLinkInstances": 15, + "DescribeCloudAssistantStatus": 16, + "DescribeClusters": 10, + "DescribeCommands": 16, + "DescribeDedicatedHosts": 10, + "DescribeDedicatedHostTypes": 10, + "DescribeDeploymentSets": 26, + "DescribeDiskMonitorData": 16, + "DescribeDisksFullStatus": 14, + "DescribeDisks": 19, + "DescribeEipAddresses": 16, + "DescribeEipMonitorData": 16, + "DescribeEniMonitorData": 10, + "DescribeHaVips": 10, + "DescribeHpcClusters": 16, + "DescribeImageSharePermission": 10, + "DescribeImages": 38, + "DescribeImageSupportInstanceTypes": 16, + "DescribeInstanceAttribute": 36, + "DescribeInstanceAutoRenewAttribute": 17, + "DescribeInstanceHistoryEvents": 19, + "DescribeInstanceMonitorData": 19, + "DescribeInstancePhysicalAttribute": 10, + "DescribeInstanceRamRole": 11, + "DescribeInstancesFullStatus": 14, + "DescribeInstances": 10, + "DescribeInstanceStatus": 26, + "DescribeInstanceTopology": 12, + "DescribeInstanceTypeFamilies": 17, + "DescribeInstanceTypes": 17, + "DescribeInstanceVncPasswd": 10, + "DescribeInstanceVncUrl": 36, + "DescribeInvocationResults": 16, + "DescribeInvocations": 16, + "DescribeKeyPairs": 12, + "DescribeLaunchTemplates": 16, + "DescribeLaunchTemplateVersions": 16, + "DescribeLimitation": 36, + "DescribeNatGateways": 10, + "DescribeNetworkInterfacePermissions": 13, + "DescribeNetworkInterfaces": 16, + "DescribeNewProjectEipMonitorData": 16, + "DescribePhysicalConnections": 10, + "DescribePrice": 16, + "DescribeRecommendInstanceType": 10, + "DescribeRegions": 19, + "DescribeRenewalPrice": 16, + "DescribeResourceByTags": 10, + "DescribeResourcesModification": 17, + "DescribeRouterInterfaces": 10, + "DescribeRouteTables": 17, + "DescribeSecurityGroupAttribute": 133, + "DescribeSecurityGroupReferences": 16, + "DescribeSecurityGroups": 25, + "DescribeSnapshotLinks": 17, + "DescribeSnapshotMonitorData": 12, + "DescribeSnapshotPackage": 10, + "DescribeSnapshots": 26, + "DescribeSnapshotsUsage": 26, + "DescribeSpotPriceHistory": 22, + "DescribeTags": 17, + "DescribeTaskAttribute": 10, + "DescribeTasks": 11, + "DescribeUserBusinessBehavior": 13, + "DescribeUserData": 10, + "DescribeVirtualBorderRoutersForPhysicalConnection": 10, + "DescribeVirtualBorderRouters": 10, + "DescribeVpcs": 41, + "DescribeVRouters": 17, + "DescribeVSwitches": 17, + "DescribeZones": 103, + "DetachClassicLinkVpc": 14, + "DetachDisk": 17, + "DetachInstanceRamRole": 10, + "DetachKeyPair": 10, + "DetachNetworkInterface": 16, + "EipFillParams": 19, + "EipFillProduct": 13, + "EipNotifyPaid": 10, + "EnablePhysicalConnection": 10, + "ExportImage": 10, + "GetInstanceConsoleOutput": 14, + "GetInstanceScreenshot": 14, + "ImportImage": 29, + "ImportKeyPair": 10, + "InstallCloudAssistant": 10, + "InvokeCommand": 16, + "JoinResourceGroup": 10, + "JoinSecurityGroup": 66, + "LeaveSecurityGroup": 66, + "ModifyAutoSnapshotPolicyEx": 10, + "ModifyBandwidthPackageSpec": 11, + "ModifyCommand": 10, + "ModifyDeploymentSetAttribute": 10, + "ModifyDiskAttribute": 16, + "ModifyDiskChargeType": 13, + "ModifyEipAddressAttribute": 14, + "ModifyImageAttribute": 10, + "ModifyImageSharePermission": 16, + "ModifyInstanceAttribute": 22, + "ModifyInstanceAutoReleaseTime": 15, + "ModifyInstanceAutoRenewAttribute": 16, + "ModifyInstanceChargeType": 22, + "ModifyInstanceDeployment": 10, + "ModifyInstanceNetworkSpec": 36, + "ModifyInstanceSpec": 62, + "ModifyInstanceVncPasswd": 35, + "ModifyInstanceVpcAttribute": 15, + "ModifyLaunchTemplateDefaultVersion": 10, + "ModifyNetworkInterfaceAttribute": 10, + "ModifyPhysicalConnectionAttribute": 10, + "ModifyPrepayInstanceSpec": 13, + "ModifyRouterInterfaceAttribute": 10, + "ModifySecurityGroupAttribute": 10, + "ModifySecurityGroupEgressRule": 10, + "ModifySecurityGroupPolicy": 10, + "ModifySecurityGroupRule": 16, + "ModifySnapshotAttribute": 10, + "ModifyUserBusinessBehavior": 10, + "ModifyVirtualBorderRouterAttribute": 10, + "ModifyVpcAttribute": 10, + "ModifyVRouterAttribute": 10, + "ModifyVSwitchAttribute": 10, + "ReActivateInstances": 10, + "RebootInstance": 27, + "RedeployInstance": 14, + "ReInitDisk": 16, + "ReleaseDedicatedHost": 10, + "ReleaseEipAddress": 16, + "ReleasePublicIpAddress": 10, + "RemoveTags": 10, + "RenewInstance": 19, + "ReplaceSystemDisk": 36, + "ResetDisk": 36, + "ResizeDisk": 11, + "RevokeSecurityGroupEgress": 13, + "RevokeSecurityGroup": 16, + "RunInstances": 86, + "StartInstance": 46, + "StopInstance": 27, + "StopInvocation": 10, + "TerminatePhysicalConnection": 10, + "TerminateVirtualBorderRouter": 10, + "UnassignIpv6Addresses": 10, + "UnassignPrivateIpAddresses": 10, + "UnassociateEipAddress": 16 + } +} +` + +func getAPIMaxTimeout(product, actionName string) (time.Duration, bool) { + timeout := make(map[string]map[string]int) + err := json.Unmarshal([]byte(apiTimeouts), &timeout) + if err != nil { + return 0 * time.Millisecond, false + } + + obj := timeout[strings.ToLower(product)] + if obj != nil && obj[actionName] != 0 { + return time.Duration(obj[actionName]) * time.Second, true + } + + return 0 * time.Millisecond, false +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go new file mode 100644 index 00000000..7f20b7a4 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credential.go @@ -0,0 +1,18 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package auth + +type Credential interface { +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go new file mode 100644 index 00000000..68f82263 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/access_key_credential.go @@ -0,0 +1,34 @@ +package credentials + +// Deprecated: Use AccessKeyCredential in this package instead. +type BaseCredential struct { + AccessKeyId string + AccessKeySecret string +} + +type AccessKeyCredential struct { + AccessKeyId string + AccessKeySecret string +} + +// Deprecated: Use NewAccessKeyCredential in this package instead. +func NewBaseCredential(accessKeyId, accessKeySecret string) *BaseCredential { + return &BaseCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } +} + +func (baseCred *BaseCredential) ToAccessKeyCredential() *AccessKeyCredential { + return &AccessKeyCredential{ + AccessKeyId: baseCred.AccessKeyId, + AccessKeySecret: baseCred.AccessKeySecret, + } +} + +func NewAccessKeyCredential(accessKeyId, accessKeySecret string) *AccessKeyCredential { + return &AccessKeyCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go new file mode 100644 index 00000000..6d4763e6 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/bearer_token_credential.go @@ -0,0 +1,12 @@ +package credentials + +type BearerTokenCredential struct { + BearerToken string +} + +// NewBearerTokenCredential return a BearerTokenCredential object +func NewBearerTokenCredential(token string) *BearerTokenCredential { + return &BearerTokenCredential{ + BearerToken: token, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go new file mode 100644 index 00000000..55a5c2da --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/ecs_ram_role.go @@ -0,0 +1,29 @@ +package credentials + +func (oldCred *StsRoleNameOnEcsCredential) ToEcsRamRoleCredential() *EcsRamRoleCredential { + return &EcsRamRoleCredential{ + RoleName: oldCred.RoleName, + } +} + +type EcsRamRoleCredential struct { + RoleName string +} + +func NewEcsRamRoleCredential(roleName string) *EcsRamRoleCredential { + return &EcsRamRoleCredential{ + RoleName: roleName, + } +} + +// Deprecated: Use EcsRamRoleCredential in this package instead. +type StsRoleNameOnEcsCredential struct { + RoleName string +} + +// Deprecated: Use NewEcsRamRoleCredential in this package instead. +func NewStsRoleNameOnEcsCredential(roleName string) *StsRoleNameOnEcsCredential { + return &StsRoleNameOnEcsCredential{ + RoleName: roleName, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go new file mode 100644 index 00000000..3cd0d020 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/env.go @@ -0,0 +1,30 @@ +package provider + +import ( + "errors" + "os" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" +) + +type EnvProvider struct{} + +var ProviderEnv = new(EnvProvider) + +func NewEnvProvider() Provider { + return &EnvProvider{} +} + +func (p *EnvProvider) Resolve() (auth.Credential, error) { + accessKeyID, ok1 := os.LookupEnv(ENVAccessKeyID) + accessKeySecret, ok2 := os.LookupEnv(ENVAccessKeySecret) + if !ok1 || !ok2 { + return nil, nil + } + if accessKeyID == "" || accessKeySecret == "" { + return nil, errors.New("Environmental variable (ALIBABACLOUD_ACCESS_KEY_ID or ALIBABACLOUD_ACCESS_KEY_SECRET) is empty") + } + return credentials.NewAccessKeyCredential(accessKeyID, accessKeySecret), nil +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go new file mode 100644 index 00000000..1906d21f --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/instance_credentials.go @@ -0,0 +1,92 @@ +package provider + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +type InstanceCredentialsProvider struct{} + +var ProviderInstance = new(InstanceCredentialsProvider) + +var HookGet = func(fn func(string) (int, []byte, error)) func(string) (int, []byte, error) { + return fn +} + +func NewInstanceCredentialsProvider() Provider { + return &InstanceCredentialsProvider{} +} + +func (p *InstanceCredentialsProvider) Resolve() (auth.Credential, error) { + roleName, ok := os.LookupEnv(ENVEcsMetadata) + if !ok { + return nil, nil + } + if roleName == "" { + return nil, errors.New("Environmental variable 'ALIBABA_CLOUD_ECS_METADATA' are empty") + } + status, content, err := HookGet(get)(securityCredURL + roleName) + if err != nil { + return nil, err + } + if status != 200 { + if status == 404 { + return nil, fmt.Errorf("The role was not found in the instance") + } + return nil, fmt.Errorf("Received %d when getting security credentials for %s", status, roleName) + } + body := make(map[string]interface{}) + + if err := json.Unmarshal(content, &body); err != nil { + return nil, err + } + + accessKeyID, err := extractString(body, "AccessKeyId") + if err != nil { + return nil, err + } + accessKeySecret, err := extractString(body, "AccessKeySecret") + if err != nil { + return nil, err + } + securityToken, err := extractString(body, "SecurityToken") + if err != nil { + return nil, err + } + + return credentials.NewStsTokenCredential(accessKeyID, accessKeySecret, securityToken), nil +} + +func get(url string) (status int, content []byte, err error) { + httpClient := http.DefaultClient + httpClient.Timeout = time.Second * 1 + resp, err := httpClient.Get(url) + if err != nil { + return + } + defer resp.Body.Close() + content, err = ioutil.ReadAll(resp.Body) + return resp.StatusCode, content, err +} + +func extractString(m map[string]interface{}, key string) (string, error) { + raw, ok := m[key] + if !ok { + return "", fmt.Errorf("%s not in map", key) + } + str, ok := raw.(string) + if !ok { + return "", fmt.Errorf("%s is not a string in map", key) + } + return str, nil +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go new file mode 100644 index 00000000..8d525c37 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/profile_credentials.go @@ -0,0 +1,158 @@ +package provider + +import ( + "bufio" + "errors" + "os" + "runtime" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + + ini "gopkg.in/ini.v1" +) + +type ProfileProvider struct { + Profile string +} + +var ProviderProfile = NewProfileProvider() + +// NewProfileProvider receive zero or more parameters, +// when length of name is 0, the value of field Profile will be "default", +// and when there are multiple inputs, the function will take the +// first one and discard the other values. +func NewProfileProvider(name ...string) Provider { + p := new(ProfileProvider) + if len(name) == 0 { + p.Profile = "default" + } else { + p.Profile = name[0] + } + return p +} + +// Resolve implements the Provider interface +// when credential type is rsa_key_pair, the content of private_key file +// must be able to be parsed directly into the required string +// that NewRsaKeyPairCredential function needed +func (p *ProfileProvider) Resolve() (auth.Credential, error) { + path, ok := os.LookupEnv(ENVCredentialFile) + if !ok { + path, err := checkDefaultPath() + if err != nil { + return nil, err + } + if path == "" { + return nil, nil + } + } else if path == "" { + return nil, errors.New("Environment variable '" + ENVCredentialFile + "' cannot be empty") + } + + ini, err := ini.Load(path) + if err != nil { + return nil, errors.New("ERROR: Can not open file" + err.Error()) + } + + section, err := ini.GetSection(p.Profile) + if err != nil { + return nil, errors.New("ERROR: Can not load section" + err.Error()) + } + + value, err := section.GetKey("type") + if err != nil { + return nil, errors.New("ERROR: Can not find credential type" + err.Error()) + } + + switch value.String() { + case "access_key": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + if err1 != nil || err2 != nil { + return nil, errors.New("ERROR: Failed to get value") + } + if value1.String() == "" || value2.String() == "" { + return nil, errors.New("ERROR: Value can't be empty") + } + return credentials.NewAccessKeyCredential(value1.String(), value2.String()), nil + case "ecs_ram_role": + value1, err1 := section.GetKey("role_name") + if err1 != nil { + return nil, errors.New("ERROR: Failed to get value") + } + if value1.String() == "" { + return nil, errors.New("ERROR: Value can't be empty") + } + return credentials.NewEcsRamRoleCredential(value1.String()), nil + case "ram_role_arn": + value1, err1 := section.GetKey("access_key_id") + value2, err2 := section.GetKey("access_key_secret") + value3, err3 := section.GetKey("role_arn") + value4, err4 := section.GetKey("role_session_name") + if err1 != nil || err2 != nil || err3 != nil || err4 != nil { + return nil, errors.New("ERROR: Failed to get value") + } + if value1.String() == "" || value2.String() == "" || value3.String() == "" || value4.String() == "" { + return nil, errors.New("ERROR: Value can't be empty") + } + return credentials.NewRamRoleArnCredential(value1.String(), value2.String(), value3.String(), value4.String(), 3600), nil + case "rsa_key_pair": + value1, err1 := section.GetKey("public_key_id") + value2, err2 := section.GetKey("private_key_file") + if err1 != nil || err2 != nil { + return nil, errors.New("ERROR: Failed to get value") + } + if value1.String() == "" || value2.String() == "" { + return nil, errors.New("ERROR: Value can't be empty") + } + file, err := os.Open(value2.String()) + if err != nil { + return nil, errors.New("ERROR: Can not get private_key") + } + defer file.Close() + var privateKey string + scan := bufio.NewScanner(file) + var data string + for scan.Scan() { + if strings.HasPrefix(scan.Text(), "----") { + continue + } + data += scan.Text() + "\n" + } + return credentials.NewRsaKeyPairCredential(privateKey, value1.String(), 3600), nil + default: + return nil, errors.New("ERROR: Failed to get credential") + } +} + +// GetHomePath return home directory according to the system. +// if the environmental virables does not exist, will return empty +func GetHomePath() string { + if runtime.GOOS == "windows" { + path, ok := os.LookupEnv("USERPROFILE") + if !ok { + return "" + } + return path + } + path, ok := os.LookupEnv("HOME") + if !ok { + return "" + } + return path +} + +func checkDefaultPath() (path string, err error) { + path = GetHomePath() + if path == "" { + return "", errors.New("The default credential file path is invalid") + } + path = strings.Replace("~/.alibabacloud/credentials", "~", path, 1) + _, err = os.Stat(path) + if err != nil { + return "", nil + } + return path, nil +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go new file mode 100644 index 00000000..ae4e168e --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider.go @@ -0,0 +1,19 @@ +package provider + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" +) + +//Environmental virables that may be used by the provider +const ( + ENVAccessKeyID = "ALIBABA_CLOUD_ACCESS_KEY_ID" + ENVAccessKeySecret = "ALIBABA_CLOUD_ACCESS_KEY_SECRET" + ENVCredentialFile = "ALIBABA_CLOUD_CREDENTIALS_FILE" + ENVEcsMetadata = "ALIBABA_CLOUD_ECS_METADATA" + PATHCredentialFile = "~/.alibabacloud/credentials" +) + +// When you want to customize the provider, you only need to implement the method of the interface. +type Provider interface { + Resolve() (auth.Credential, error) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go new file mode 100644 index 00000000..3f9315d1 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider/provider_chain.go @@ -0,0 +1,34 @@ +package provider + +import ( + "errors" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" +) + +type ProviderChain struct { + Providers []Provider +} + +var defaultproviders = []Provider{ProviderEnv, ProviderProfile, ProviderInstance} +var DefaultChain = NewProviderChain(defaultproviders) + +func NewProviderChain(providers []Provider) Provider { + return &ProviderChain{ + Providers: providers, + } +} + +func (p *ProviderChain) Resolve() (auth.Credential, error) { + for _, provider := range p.Providers { + creds, err := provider.Resolve() + if err != nil { + return nil, err + } else if err == nil && creds == nil { + continue + } + return creds, err + } + return nil, errors.New("No credential found") + +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go new file mode 100644 index 00000000..00d688eb --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/rsa_key_pair_credential.go @@ -0,0 +1,15 @@ +package credentials + +type RsaKeyPairCredential struct { + PrivateKey string + PublicKeyId string + SessionExpiration int +} + +func NewRsaKeyPairCredential(privateKey, publicKeyId string, sessionExpiration int) *RsaKeyPairCredential { + return &RsaKeyPairCredential{ + PrivateKey: privateKey, + PublicKeyId: publicKeyId, + SessionExpiration: sessionExpiration, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go new file mode 100644 index 00000000..554431ff --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_credential.go @@ -0,0 +1,15 @@ +package credentials + +type StsTokenCredential struct { + AccessKeyId string + AccessKeySecret string + AccessKeyStsToken string +} + +func NewStsTokenCredential(accessKeyId, accessKeySecret, accessKeyStsToken string) *StsTokenCredential { + return &StsTokenCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + AccessKeyStsToken: accessKeyStsToken, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go new file mode 100644 index 00000000..27602fd7 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/sts_role_arn_credential.go @@ -0,0 +1,61 @@ +package credentials + +// Deprecated: Use RamRoleArnCredential in this package instead. +type StsRoleArnCredential struct { + AccessKeyId string + AccessKeySecret string + RoleArn string + RoleSessionName string + RoleSessionExpiration int +} + +type RamRoleArnCredential struct { + AccessKeyId string + AccessKeySecret string + RoleArn string + RoleSessionName string + RoleSessionExpiration int + Policy string +} + +// Deprecated: Use RamRoleArnCredential in this package instead. +func NewStsRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName string, roleSessionExpiration int) *StsRoleArnCredential { + return &StsRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + } +} + +func (oldCred *StsRoleArnCredential) ToRamRoleArnCredential() *RamRoleArnCredential { + return &RamRoleArnCredential{ + AccessKeyId: oldCred.AccessKeyId, + AccessKeySecret: oldCred.AccessKeySecret, + RoleArn: oldCred.RoleArn, + RoleSessionName: oldCred.RoleSessionName, + RoleSessionExpiration: oldCred.RoleSessionExpiration, + } +} + +func NewRamRoleArnCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName string, roleSessionExpiration int) *RamRoleArnCredential { + return &RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + } +} + +func NewRamRoleArnWithPolicyCredential(accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string, roleSessionExpiration int) *RamRoleArnCredential { + return &RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + RoleSessionExpiration: roleSessionExpiration, + Policy: policy, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go new file mode 100644 index 00000000..8b4037a0 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/roa_signature_composer.go @@ -0,0 +1,136 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package auth + +import ( + "bytes" + "sort" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +var debug utils.Debug + +var hookGetDate = func(fn func() string) string { + return fn() +} + +func init() { + debug = utils.Init("sdk") +} + +func signRoaRequest(request requests.AcsRequest, signer Signer, regionId string) (err error) { + completeROASignParams(request, signer, regionId) + stringToSign := buildRoaStringToSign(request) + request.SetStringToSign(stringToSign) + accessKeyId, err := signer.GetAccessKeyId() + if err != nil { + return err + } + + signature := signer.Sign(stringToSign, "") + request.GetHeaders()["Authorization"] = "acs " + accessKeyId + ":" + signature + + return +} + +func completeROASignParams(request requests.AcsRequest, signer Signer, regionId string) { + headerParams := request.GetHeaders() + + // complete query params + queryParams := request.GetQueryParams() + //if _, ok := queryParams["RegionId"]; !ok { + // queryParams["RegionId"] = regionId + //} + if extraParam := signer.GetExtraParam(); extraParam != nil { + for key, value := range extraParam { + if key == "SecurityToken" { + headerParams["x-acs-security-token"] = value + continue + } + if key == "BearerToken" { + headerParams["x-acs-bearer-token"] = value + continue + } + queryParams[key] = value + } + } + + // complete header params + headerParams["Date"] = hookGetDate(utils.GetTimeInFormatRFC2616) + headerParams["x-acs-signature-method"] = signer.GetName() + headerParams["x-acs-signature-version"] = signer.GetVersion() + if request.GetFormParams() != nil && len(request.GetFormParams()) > 0 { + formString := utils.GetUrlFormedMap(request.GetFormParams()) + request.SetContent([]byte(formString)) + headerParams["Content-Type"] = requests.Form + } + contentMD5 := utils.GetMD5Base64(request.GetContent()) + headerParams["Content-MD5"] = contentMD5 + if _, contains := headerParams["Content-Type"]; !contains { + headerParams["Content-Type"] = requests.Raw + } + switch format := request.GetAcceptFormat(); format { + case "JSON": + headerParams["Accept"] = requests.Json + case "XML": + headerParams["Accept"] = requests.Xml + default: + headerParams["Accept"] = requests.Raw + } +} + +func buildRoaStringToSign(request requests.AcsRequest) (stringToSign string) { + + headers := request.GetHeaders() + + stringToSignBuilder := bytes.Buffer{} + stringToSignBuilder.WriteString(request.GetMethod()) + stringToSignBuilder.WriteString(requests.HeaderSeparator) + + // append header keys for sign + appendIfContain(headers, &stringToSignBuilder, "Accept", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Content-MD5", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Content-Type", requests.HeaderSeparator) + appendIfContain(headers, &stringToSignBuilder, "Date", requests.HeaderSeparator) + + // sort and append headers witch starts with 'x-acs-' + var acsHeaders []string + for key := range headers { + if strings.HasPrefix(key, "x-acs-") { + acsHeaders = append(acsHeaders, key) + } + } + sort.Strings(acsHeaders) + for _, key := range acsHeaders { + stringToSignBuilder.WriteString(key + ":" + headers[key]) + stringToSignBuilder.WriteString(requests.HeaderSeparator) + } + + // append query params + stringToSignBuilder.WriteString(request.BuildQueries()) + stringToSign = stringToSignBuilder.String() + debug("stringToSign: %s", stringToSign) + return +} + +func appendIfContain(sourceMap map[string]string, target *bytes.Buffer, key, separator string) { + if value, contain := sourceMap[key]; contain && len(value) > 0 { + target.WriteString(sourceMap[key]) + target.WriteString(separator) + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go new file mode 100644 index 00000000..33967b9e --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/rpc_signature_composer.go @@ -0,0 +1,94 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package auth + +import ( + "net/url" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +var hookGetNonce = func(fn func() string) string { + return fn() +} + +func signRpcRequest(request requests.AcsRequest, signer Signer, regionId string) (err error) { + err = completeRpcSignParams(request, signer, regionId) + if err != nil { + return + } + // remove while retry + if _, containsSign := request.GetQueryParams()["Signature"]; containsSign { + delete(request.GetQueryParams(), "Signature") + } + stringToSign := buildRpcStringToSign(request) + request.SetStringToSign(stringToSign) + signature := signer.Sign(stringToSign, "&") + request.GetQueryParams()["Signature"] = signature + + return +} + +func completeRpcSignParams(request requests.AcsRequest, signer Signer, regionId string) (err error) { + queryParams := request.GetQueryParams() + queryParams["Version"] = request.GetVersion() + queryParams["Action"] = request.GetActionName() + queryParams["Format"] = request.GetAcceptFormat() + queryParams["Timestamp"] = hookGetDate(utils.GetTimeInFormatISO8601) + queryParams["SignatureMethod"] = signer.GetName() + queryParams["SignatureType"] = signer.GetType() + queryParams["SignatureVersion"] = signer.GetVersion() + queryParams["SignatureNonce"] = hookGetNonce(utils.GetUUID) + queryParams["AccessKeyId"], err = signer.GetAccessKeyId() + + if err != nil { + return + } + + if _, contains := queryParams["RegionId"]; !contains { + queryParams["RegionId"] = regionId + } + if extraParam := signer.GetExtraParam(); extraParam != nil { + for key, value := range extraParam { + queryParams[key] = value + } + } + + request.GetHeaders()["Content-Type"] = requests.Form + formString := utils.GetUrlFormedMap(request.GetFormParams()) + request.SetContent([]byte(formString)) + + return +} + +func buildRpcStringToSign(request requests.AcsRequest) (stringToSign string) { + signParams := make(map[string]string) + for key, value := range request.GetQueryParams() { + signParams[key] = value + } + for key, value := range request.GetFormParams() { + signParams[key] = value + } + + stringToSign = utils.GetUrlFormedMap(signParams) + stringToSign = strings.Replace(stringToSign, "+", "%20", -1) + stringToSign = strings.Replace(stringToSign, "*", "%2A", -1) + stringToSign = strings.Replace(stringToSign, "%7E", "~", -1) + stringToSign = url.QueryEscape(stringToSign) + stringToSign = request.GetMethod() + "&%2F&" + stringToSign + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go new file mode 100644 index 00000000..cbbc3cef --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signer.go @@ -0,0 +1,98 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package auth + +import ( + "fmt" + "reflect" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +type Signer interface { + GetName() string + GetType() string + GetVersion() string + GetAccessKeyId() (string, error) + GetExtraParam() map[string]string + Sign(stringToSign, secretSuffix string) string +} + +func NewSignerWithCredential(credential Credential, commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)) (signer Signer, err error) { + switch instance := credential.(type) { + case *credentials.AccessKeyCredential: + { + signer = signers.NewAccessKeySigner(instance) + } + case *credentials.StsTokenCredential: + { + signer = signers.NewStsTokenSigner(instance) + } + case *credentials.BearerTokenCredential: + { + signer = signers.NewBearerTokenSigner(instance) + } + case *credentials.RamRoleArnCredential: + { + signer, err = signers.NewRamRoleArnSigner(instance, commonApi) + } + case *credentials.RsaKeyPairCredential: + { + signer, err = signers.NewSignerKeyPair(instance, commonApi) + } + case *credentials.EcsRamRoleCredential: + { + signer = signers.NewEcsRamRoleSigner(instance, commonApi) + } + case *credentials.BaseCredential: // deprecated user interface + { + signer = signers.NewAccessKeySigner(instance.ToAccessKeyCredential()) + } + case *credentials.StsRoleArnCredential: // deprecated user interface + { + signer, err = signers.NewRamRoleArnSigner(instance.ToRamRoleArnCredential(), commonApi) + } + case *credentials.StsRoleNameOnEcsCredential: // deprecated user interface + { + signer = signers.NewEcsRamRoleSigner(instance.ToEcsRamRoleCredential(), commonApi) + } + default: + message := fmt.Sprintf(errors.UnsupportedCredentialErrorMessage, reflect.TypeOf(credential)) + err = errors.NewClientError(errors.UnsupportedCredentialErrorCode, message, nil) + } + return +} + +func Sign(request requests.AcsRequest, signer Signer, regionId string) (err error) { + switch request.GetStyle() { + case requests.ROA: + { + err = signRoaRequest(request, signer, regionId) + } + case requests.RPC: + { + err = signRpcRequest(request, signer, regionId) + } + default: + message := fmt.Sprintf(errors.UnknownRequestTypeErrorMessage, reflect.TypeOf(request)) + err = errors.NewClientError(errors.UnknownRequestTypeErrorCode, message, nil) + } + + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go new file mode 100644 index 00000000..887f5020 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/algorithms.go @@ -0,0 +1,57 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "crypto" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" +) + +func ShaHmac1(source, secret string) string { + key := []byte(secret) + hmac := hmac.New(sha1.New, key) + hmac.Write([]byte(source)) + signedBytes := hmac.Sum(nil) + signedString := base64.StdEncoding.EncodeToString(signedBytes) + return signedString +} + +func Sha256WithRsa(source, secret string) string { + // block, _ := pem.Decode([]byte(secret)) + decodeString, err := base64.StdEncoding.DecodeString(secret) + if err != nil { + panic(err) + } + private, err := x509.ParsePKCS8PrivateKey(decodeString) + if err != nil { + panic(err) + } + + h := crypto.Hash.New(crypto.SHA256) + h.Write([]byte(source)) + hashed := h.Sum(nil) + signature, err := rsa.SignPKCS1v15(rand.Reader, private.(*rsa.PrivateKey), + crypto.SHA256, hashed) + if err != nil { + panic(err) + } + + return base64.StdEncoding.EncodeToString(signature) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go new file mode 100644 index 00000000..ba291a41 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/credential_updater.go @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +const defaultInAdvanceScale = 0.95 + +type credentialUpdater struct { + credentialExpiration int + lastUpdateTimestamp int64 + inAdvanceScale float64 + buildRequestMethod func() (*requests.CommonRequest, error) + responseCallBack func(response *responses.CommonResponse) error + refreshApi func(request *requests.CommonRequest) (response *responses.CommonResponse, err error) +} + +func (updater *credentialUpdater) needUpdateCredential() (result bool) { + if updater.inAdvanceScale == 0 { + updater.inAdvanceScale = defaultInAdvanceScale + } + return time.Now().Unix()-updater.lastUpdateTimestamp >= int64(float64(updater.credentialExpiration)*updater.inAdvanceScale) +} + +func (updater *credentialUpdater) updateCredential() (err error) { + request, err := updater.buildRequestMethod() + if err != nil { + return + } + response, err := updater.refreshApi(request) + if err != nil { + return + } + updater.lastUpdateTimestamp = time.Now().Unix() + err = updater.responseCallBack(response) + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go new file mode 100644 index 00000000..99c624c8 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/session_credential.go @@ -0,0 +1,7 @@ +package signers + +type SessionCredential struct { + AccessKeyId string + AccessKeySecret string + StsToken string +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go new file mode 100644 index 00000000..bc4f35b8 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_access_key.go @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +type AccessKeySigner struct { + credential *credentials.AccessKeyCredential +} + +func (signer *AccessKeySigner) GetExtraParam() map[string]string { + return nil +} + +func NewAccessKeySigner(credential *credentials.AccessKeyCredential) *AccessKeySigner { + return &AccessKeySigner{ + credential: credential, + } +} + +func (*AccessKeySigner) GetName() string { + return "HMAC-SHA1" +} + +func (*AccessKeySigner) GetType() string { + return "" +} + +func (*AccessKeySigner) GetVersion() string { + return "1.0" +} + +func (signer *AccessKeySigner) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.AccessKeyId, nil +} + +func (signer *AccessKeySigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go new file mode 100644 index 00000000..75b78433 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_bearer_token.go @@ -0,0 +1,35 @@ +package signers + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +type BearerTokenSigner struct { + credential *credentials.BearerTokenCredential +} + +func NewBearerTokenSigner(credential *credentials.BearerTokenCredential) *BearerTokenSigner { + return &BearerTokenSigner{ + credential: credential, + } +} + +func (signer *BearerTokenSigner) GetExtraParam() map[string]string { + return map[string]string{"BearerToken": signer.credential.BearerToken} +} + +func (*BearerTokenSigner) GetName() string { + return "" +} +func (*BearerTokenSigner) GetType() string { + return "BEARERTOKEN" +} +func (*BearerTokenSigner) GetVersion() string { + return "1.0" +} +func (signer *BearerTokenSigner) GetAccessKeyId() (accessKeyId string, err error) { + return "", nil +} +func (signer *BearerTokenSigner) Sign(stringToSign, secretSuffix string) string { + return "" +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go new file mode 100644 index 00000000..73788429 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ecs_ram_role.go @@ -0,0 +1,167 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + jmespath "github.com/jmespath/go-jmespath" +) + +var securityCredURL = "http://100.100.100.200/latest/meta-data/ram/security-credentials/" + +type EcsRamRoleSigner struct { + *credentialUpdater + sessionCredential *SessionCredential + credential *credentials.EcsRamRoleCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +func NewEcsRamRoleSigner(credential *credentials.EcsRamRoleCredential, commonApi func(*requests.CommonRequest, interface{}) (response *responses.CommonResponse, err error)) (signer *EcsRamRoleSigner) { + signer = &EcsRamRoleSigner{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: defaultDurationSeconds / 60, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + return signer +} + +func (*EcsRamRoleSigner) GetName() string { + return "HMAC-SHA1" +} + +func (*EcsRamRoleSigner) GetType() string { + return "" +} + +func (*EcsRamRoleSigner) GetVersion() string { + return "1.0" +} + +func (signer *EcsRamRoleSigner) GetAccessKeyId() (accessKeyId string, err error) { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + err = signer.updateCredential() + if err != nil { + return + } + } + if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 { + return "", nil + } + return signer.sessionCredential.AccessKeyId, nil +} + +func (signer *EcsRamRoleSigner) GetExtraParam() map[string]string { + if signer.sessionCredential == nil { + return make(map[string]string) + } + if len(signer.sessionCredential.StsToken) <= 0 { + return make(map[string]string) + } + return map[string]string{"SecurityToken": signer.sessionCredential.StsToken} +} + +func (signer *EcsRamRoleSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *EcsRamRoleSigner) buildCommonRequest() (request *requests.CommonRequest, err error) { + return +} + +func (signer *EcsRamRoleSigner) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + requestUrl := securityCredURL + signer.credential.RoleName + httpRequest, err := http.NewRequest(requests.GET, requestUrl, strings.NewReader("")) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + return + } + httpClient := &http.Client{} + httpResponse, err := httpClient.Do(httpRequest) + if err != nil { + err = fmt.Errorf("refresh Ecs sts token err: %s", err.Error()) + return + } + + response = responses.NewCommonResponse() + err = responses.Unmarshal(response, httpResponse, "") + return +} + +func (signer *EcsRamRoleSigner) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + return fmt.Errorf("refresh Ecs sts token err, httpStatus: %d, message = %s", response.GetHttpStatus(), response.GetHttpContentString()) + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, json.Unmarshal fail: %s", err.Error()) + } + code, err := jmespath.Search("Code", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get Code: %s", err.Error()) + } + if code.(string) != "Success" { + return fmt.Errorf("refresh Ecs sts token err, Code is not Success") + } + accessKeyId, err := jmespath.Search("AccessKeyId", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeyId: %s", err.Error()) + } + accessKeySecret, err := jmespath.Search("AccessKeySecret", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get AccessKeySecret: %s", err.Error()) + } + securityToken, err := jmespath.Search("SecurityToken", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get SecurityToken: %s", err.Error()) + } + expiration, err := jmespath.Search("Expiration", data) + if err != nil { + return fmt.Errorf("refresh Ecs sts token err, fail to get Expiration: %s", err.Error()) + } + if accessKeyId == nil || accessKeySecret == nil || securityToken == nil || expiration == nil { + return + } + + expirationTime, err := time.Parse("2006-01-02T15:04:05Z", expiration.(string)) + signer.credentialExpiration = int(expirationTime.Unix() - time.Now().Unix()) + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + StsToken: securityToken.(string), + } + + return +} + +func (signer *EcsRamRoleSigner) GetSessionCredential() *SessionCredential { + return signer.sessionCredential +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go new file mode 100644 index 00000000..19273d5a --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_key_pair.go @@ -0,0 +1,148 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + jmespath "github.com/jmespath/go-jmespath" +) + +type SignerKeyPair struct { + *credentialUpdater + sessionCredential *SessionCredential + credential *credentials.RsaKeyPairCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +func NewSignerKeyPair(credential *credentials.RsaKeyPairCredential, commonApi func(*requests.CommonRequest, interface{}) (response *responses.CommonResponse, err error)) (signer *SignerKeyPair, err error) { + signer = &SignerKeyPair{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: credential.SessionExpiration, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + if credential.SessionExpiration > 0 { + if credential.SessionExpiration >= 900 && credential.SessionExpiration <= 3600 { + signer.credentialExpiration = credential.SessionExpiration + } else { + err = errors.NewClientError(errors.InvalidParamErrorCode, "Key Pair session duration should be in the range of 15min - 1Hr", nil) + } + } else { + signer.credentialExpiration = defaultDurationSeconds + } + return +} + +func (*SignerKeyPair) GetName() string { + return "HMAC-SHA1" +} + +func (*SignerKeyPair) GetType() string { + return "" +} + +func (*SignerKeyPair) GetVersion() string { + return "1.0" +} + +func (signer *SignerKeyPair) ensureCredential() error { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + return signer.updateCredential() + } + return nil +} + +func (signer *SignerKeyPair) GetAccessKeyId() (accessKeyId string, err error) { + err = signer.ensureCredential() + if err != nil { + return + } + if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 { + accessKeyId = "" + return + } + + accessKeyId = signer.sessionCredential.AccessKeyId + return +} + +func (signer *SignerKeyPair) GetExtraParam() map[string]string { + return make(map[string]string) +} + +func (signer *SignerKeyPair) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *SignerKeyPair) buildCommonRequest() (request *requests.CommonRequest, err error) { + request = requests.NewCommonRequest() + request.Product = "Sts" + request.Version = "2015-04-01" + request.ApiName = "GenerateSessionAccessKey" + request.Scheme = requests.HTTPS + request.SetDomain("sts.ap-northeast-1.aliyuncs.com") + request.QueryParams["PublicKeyId"] = signer.credential.PublicKeyId + request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration) + return +} + +func (signer *SignerKeyPair) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + signerV2 := NewSignerV2(signer.credential) + return signer.commonApi(request, signerV2) +} + +func (signer *SignerKeyPair) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + message := "refresh session AccessKey failed" + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), message) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + return fmt.Errorf("refresh KeyPair err, json.Unmarshal fail: %s", err.Error()) + } + accessKeyId, err := jmespath.Search("SessionAccessKey.SessionAccessKeyId", data) + if err != nil { + return fmt.Errorf("refresh KeyPair err, fail to get SessionAccessKeyId: %s", err.Error()) + } + accessKeySecret, err := jmespath.Search("SessionAccessKey.SessionAccessKeySecret", data) + if err != nil { + return fmt.Errorf("refresh KeyPair err, fail to get SessionAccessKeySecret: %s", err.Error()) + } + if accessKeyId == nil || accessKeySecret == nil { + return + } + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go new file mode 100644 index 00000000..c945c8ae --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_ram_role_arn.go @@ -0,0 +1,175 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + jmespath "github.com/jmespath/go-jmespath" +) + +const ( + defaultDurationSeconds = 3600 +) + +type RamRoleArnSigner struct { + *credentialUpdater + roleSessionName string + sessionCredential *SessionCredential + credential *credentials.RamRoleArnCredential + commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error) +} + +func NewRamRoleArnSigner(credential *credentials.RamRoleArnCredential, commonApi func(request *requests.CommonRequest, signer interface{}) (response *responses.CommonResponse, err error)) (signer *RamRoleArnSigner, err error) { + signer = &RamRoleArnSigner{ + credential: credential, + commonApi: commonApi, + } + + signer.credentialUpdater = &credentialUpdater{ + credentialExpiration: credential.RoleSessionExpiration, + buildRequestMethod: signer.buildCommonRequest, + responseCallBack: signer.refreshCredential, + refreshApi: signer.refreshApi, + } + + if len(credential.RoleSessionName) > 0 { + signer.roleSessionName = credential.RoleSessionName + } else { + signer.roleSessionName = "aliyun-go-sdk-" + strconv.FormatInt(time.Now().UnixNano()/1000, 10) + } + if credential.RoleSessionExpiration > 0 { + if credential.RoleSessionExpiration >= 900 && credential.RoleSessionExpiration <= 3600 { + signer.credentialExpiration = credential.RoleSessionExpiration + } else { + err = errors.NewClientError(errors.InvalidParamErrorCode, "Assume Role session duration should be in the range of 15min - 1Hr", nil) + } + } else { + signer.credentialExpiration = defaultDurationSeconds + } + return +} + +func (*RamRoleArnSigner) GetName() string { + return "HMAC-SHA1" +} + +func (*RamRoleArnSigner) GetType() string { + return "" +} + +func (*RamRoleArnSigner) GetVersion() string { + return "1.0" +} + +func (signer *RamRoleArnSigner) GetAccessKeyId() (accessKeyId string, err error) { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + err = signer.updateCredential() + if err != nil { + return + } + } + + if signer.sessionCredential == nil || len(signer.sessionCredential.AccessKeyId) <= 0 { + return "", err + } + + return signer.sessionCredential.AccessKeyId, nil +} + +func (signer *RamRoleArnSigner) GetExtraParam() map[string]string { + if signer.sessionCredential == nil || signer.needUpdateCredential() { + signer.updateCredential() + } + if signer.sessionCredential == nil || len(signer.sessionCredential.StsToken) <= 0 { + return make(map[string]string) + } + return map[string]string{"SecurityToken": signer.sessionCredential.StsToken} +} + +func (signer *RamRoleArnSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.sessionCredential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} + +func (signer *RamRoleArnSigner) buildCommonRequest() (request *requests.CommonRequest, err error) { + request = requests.NewCommonRequest() + request.Product = "Sts" + request.Version = "2015-04-01" + request.ApiName = "AssumeRole" + request.Scheme = requests.HTTPS + request.QueryParams["RoleArn"] = signer.credential.RoleArn + if signer.credential.Policy != "" { + request.QueryParams["Policy"] = signer.credential.Policy + } + request.QueryParams["RoleSessionName"] = signer.credential.RoleSessionName + request.QueryParams["DurationSeconds"] = strconv.Itoa(signer.credentialExpiration) + return +} + +func (signer *RamRoleArnSigner) refreshApi(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + credential := &credentials.AccessKeyCredential{ + AccessKeyId: signer.credential.AccessKeyId, + AccessKeySecret: signer.credential.AccessKeySecret, + } + signerV1 := NewAccessKeySigner(credential) + return signer.commonApi(request, signerV1) +} + +func (signer *RamRoleArnSigner) refreshCredential(response *responses.CommonResponse) (err error) { + if response.GetHttpStatus() != http.StatusOK { + message := "refresh session token failed" + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), message) + return + } + var data interface{} + err = json.Unmarshal(response.GetHttpContentBytes(), &data) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err, json.Unmarshal fail: %s", err.Error()) + } + accessKeyId, err := jmespath.Search("Credentials.AccessKeyId", data) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err, fail to get AccessKeyId: %s", err.Error()) + } + accessKeySecret, err := jmespath.Search("Credentials.AccessKeySecret", data) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err, fail to get AccessKeySecret: %s", err.Error()) + } + securityToken, err := jmespath.Search("Credentials.SecurityToken", data) + if err != nil { + return fmt.Errorf("refresh RoleArn sts token err, fail to get SecurityToken: %s", err.Error()) + } + if accessKeyId == nil || accessKeySecret == nil || securityToken == nil { + return + } + signer.sessionCredential = &SessionCredential{ + AccessKeyId: accessKeyId.(string), + AccessKeySecret: accessKeySecret.(string), + StsToken: securityToken.(string), + } + return +} + +func (signer *RamRoleArnSigner) GetSessionCredential() *SessionCredential { + return signer.sessionCredential +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go new file mode 100644 index 00000000..d0ce36c3 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_sts_token.go @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +type StsTokenSigner struct { + credential *credentials.StsTokenCredential +} + +func NewStsTokenSigner(credential *credentials.StsTokenCredential) *StsTokenSigner { + return &StsTokenSigner{ + credential: credential, + } +} + +func (*StsTokenSigner) GetName() string { + return "HMAC-SHA1" +} + +func (*StsTokenSigner) GetType() string { + return "" +} + +func (*StsTokenSigner) GetVersion() string { + return "1.0" +} + +func (signer *StsTokenSigner) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.AccessKeyId, nil +} + +func (signer *StsTokenSigner) GetExtraParam() map[string]string { + return map[string]string{"SecurityToken": signer.credential.AccessKeyStsToken} +} + +func (signer *StsTokenSigner) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.AccessKeySecret + secretSuffix + return ShaHmac1(stringToSign, secret) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go new file mode 100644 index 00000000..97348529 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/signers/signer_v2.go @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signers + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" +) + +type SignerV2 struct { + credential *credentials.RsaKeyPairCredential +} + +func (signer *SignerV2) GetExtraParam() map[string]string { + return nil +} + +func NewSignerV2(credential *credentials.RsaKeyPairCredential) *SignerV2 { + return &SignerV2{ + credential: credential, + } +} + +func (*SignerV2) GetName() string { + return "SHA256withRSA" +} + +func (*SignerV2) GetType() string { + return "PRIVATEKEY" +} + +func (*SignerV2) GetVersion() string { + return "1.0" +} + +func (signer *SignerV2) GetAccessKeyId() (accessKeyId string, err error) { + return signer.credential.PublicKeyId, err +} + +func (signer *SignerV2) Sign(stringToSign, secretSuffix string) string { + secret := signer.credential.PrivateKey + return Sha256WithRsa(stringToSign, secret) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go new file mode 100644 index 00000000..aef8bdc0 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/client.go @@ -0,0 +1,766 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sdk + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials/provider" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +var debug utils.Debug + +func init() { + debug = utils.Init("sdk") +} + +// Version this value will be replaced while build: -ldflags="-X sdk.version=x.x.x" +var Version = "0.0.1" +var defaultConnectTimeout = 5 * time.Second +var defaultReadTimeout = 10 * time.Second + +var DefaultUserAgent = fmt.Sprintf("AlibabaCloud (%s; %s) Golang/%s Core/%s", runtime.GOOS, runtime.GOARCH, strings.Trim(runtime.Version(), "go"), Version) + +var hookDo = func(fn func(req *http.Request) (*http.Response, error)) func(req *http.Request) (*http.Response, error) { + return fn +} + +// Client the type Client +type Client struct { + isInsecure bool + regionId string + config *Config + httpProxy string + httpsProxy string + noProxy string + logger *Logger + userAgent map[string]string + signer auth.Signer + httpClient *http.Client + asyncTaskQueue chan func() + readTimeout time.Duration + connectTimeout time.Duration + EndpointMap map[string]string + EndpointType string + Network string + + debug bool + isRunning bool + // void "panic(write to close channel)" cause of addAsync() after Shutdown() + asyncChanLock *sync.RWMutex +} + +func (client *Client) Init() (err error) { + panic("not support yet") +} + +func (client *Client) SetEndpointRules(endpointMap map[string]string, endpointType string, netWork string) { + client.EndpointMap = endpointMap + client.Network = netWork + client.EndpointType = endpointType +} + +func (client *Client) SetHTTPSInsecure(isInsecure bool) { + client.isInsecure = isInsecure +} + +func (client *Client) GetHTTPSInsecure() bool { + return client.isInsecure +} + +func (client *Client) SetHttpsProxy(httpsProxy string) { + client.httpsProxy = httpsProxy +} + +func (client *Client) GetHttpsProxy() string { + return client.httpsProxy +} + +func (client *Client) SetHttpProxy(httpProxy string) { + client.httpProxy = httpProxy +} + +func (client *Client) GetHttpProxy() string { + return client.httpProxy +} + +func (client *Client) SetNoProxy(noProxy string) { + client.noProxy = noProxy +} + +func (client *Client) GetNoProxy() string { + return client.noProxy +} + +// InitWithProviderChain will get credential from the providerChain, +// the RsaKeyPairCredential Only applicable to regionID `ap-northeast-1`, +// if your providerChain may return a credential type with RsaKeyPairCredential, +// please ensure your regionID is `ap-northeast-1`. +func (client *Client) InitWithProviderChain(regionId string, provider provider.Provider) (err error) { + config := client.InitClientConfig() + credential, err := provider.Resolve() + if err != nil { + return + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithOptions(regionId string, config *Config, credential auth.Credential) (err error) { + client.isRunning = true + client.asyncChanLock = new(sync.RWMutex) + client.regionId = regionId + client.config = config + client.httpClient = &http.Client{} + + if config.HttpTransport != nil { + client.httpClient.Transport = config.HttpTransport + } + + if config.Timeout > 0 { + client.httpClient.Timeout = config.Timeout + } + + if config.EnableAsync { + client.EnableAsync(config.GoRoutinePoolSize, config.MaxTaskQueueSize) + } + + client.signer, err = auth.NewSignerWithCredential(credential, client.ProcessCommonRequestWithSigner) + + return +} + +func (client *Client) SetReadTimeout(readTimeout time.Duration) { + client.readTimeout = readTimeout +} + +func (client *Client) SetConnectTimeout(connectTimeout time.Duration) { + client.connectTimeout = connectTimeout +} + +func (client *Client) GetReadTimeout() time.Duration { + return client.readTimeout +} + +func (client *Client) GetConnectTimeout() time.Duration { + return client.connectTimeout +} + +func (client *Client) getHttpProxy(scheme string) (proxy *url.URL, err error) { + if scheme == "https" { + if client.GetHttpsProxy() != "" { + proxy, err = url.Parse(client.httpsProxy) + } else if rawurl := os.Getenv("HTTPS_PROXY"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } else if rawurl := os.Getenv("https_proxy"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } + } else { + if client.GetHttpProxy() != "" { + proxy, err = url.Parse(client.httpProxy) + } else if rawurl := os.Getenv("HTTP_PROXY"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } else if rawurl := os.Getenv("http_proxy"); rawurl != "" { + proxy, err = url.Parse(rawurl) + } + } + + return proxy, err +} + +func (client *Client) getNoProxy(scheme string) []string { + var urls []string + if client.GetNoProxy() != "" { + urls = strings.Split(client.noProxy, ",") + } else if rawurl := os.Getenv("NO_PROXY"); rawurl != "" { + urls = strings.Split(rawurl, ",") + } else if rawurl := os.Getenv("no_proxy"); rawurl != "" { + urls = strings.Split(rawurl, ",") + } + + return urls +} + +// EnableAsync enable the async task queue +func (client *Client) EnableAsync(routinePoolSize, maxTaskQueueSize int) { + client.asyncTaskQueue = make(chan func(), maxTaskQueueSize) + for i := 0; i < routinePoolSize; i++ { + go func() { + for client.isRunning { + select { + case task, notClosed := <-client.asyncTaskQueue: + if notClosed { + task() + } + } + } + }() + } +} + +func (client *Client) InitWithAccessKey(regionId, accessKeyId, accessKeySecret string) (err error) { + config := client.InitClientConfig() + credential := &credentials.BaseCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithStsToken(regionId, accessKeyId, accessKeySecret, securityToken string) (err error) { + config := client.InitClientConfig() + credential := &credentials.StsTokenCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + AccessKeyStsToken: securityToken, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (err error) { + config := client.InitClientConfig() + credential := &credentials.RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (err error) { + config := client.InitClientConfig() + credential := &credentials.RamRoleArnCredential{ + AccessKeyId: accessKeyId, + AccessKeySecret: accessKeySecret, + RoleArn: roleArn, + RoleSessionName: roleSessionName, + Policy: policy, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithRsaKeyPair(regionId, publicKeyId, privateKey string, sessionExpiration int) (err error) { + config := client.InitClientConfig() + credential := &credentials.RsaKeyPairCredential{ + PrivateKey: privateKey, + PublicKeyId: publicKeyId, + SessionExpiration: sessionExpiration, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithEcsRamRole(regionId, roleName string) (err error) { + config := client.InitClientConfig() + credential := &credentials.EcsRamRoleCredential{ + RoleName: roleName, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitWithBearerToken(regionId, bearerToken string) (err error) { + config := client.InitClientConfig() + credential := &credentials.BearerTokenCredential{ + BearerToken: bearerToken, + } + return client.InitWithOptions(regionId, config, credential) +} + +func (client *Client) InitClientConfig() (config *Config) { + if client.config != nil { + return client.config + } else { + return NewConfig() + } +} + +func (client *Client) DoAction(request requests.AcsRequest, response responses.AcsResponse) (err error) { + return client.DoActionWithSigner(request, response, nil) +} + +func (client *Client) GetEndpointRules(regionId string, product string) (endpointRaw string) { + if client.EndpointType == "regional" { + endpointRaw = strings.Replace("..aliyuncs.com", "", regionId, 1) + } else { + endpointRaw = ".aliyuncs.com" + } + endpointRaw = strings.Replace(endpointRaw, "", strings.ToLower(product), 1) + if client.Network == "" || client.Network == "public" { + endpointRaw = strings.Replace(endpointRaw, "", "", 1) + } else { + endpointRaw = strings.Replace(endpointRaw, "", "-"+client.Network, 1) + } + return endpointRaw +} + +func (client *Client) buildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (httpRequest *http.Request, err error) { + // add clientVersion + request.GetHeaders()["x-sdk-core-version"] = Version + + regionId := client.regionId + if len(request.GetRegionId()) > 0 { + regionId = request.GetRegionId() + } + + // resolve endpoint + endpoint := request.GetDomain() + if endpoint == "" && client.EndpointType != "" { + if client.EndpointMap != nil && client.Network == "" || client.Network == "public" { + endpoint = client.EndpointMap[regionId] + } + + if endpoint == "" { + endpoint = client.GetEndpointRules(regionId, request.GetProduct()) + } + } + + if endpoint == "" { + resolveParam := &endpoints.ResolveParam{ + Domain: request.GetDomain(), + Product: request.GetProduct(), + RegionId: regionId, + LocationProduct: request.GetLocationServiceCode(), + LocationEndpointType: request.GetLocationEndpointType(), + CommonApi: client.ProcessCommonRequest, + } + endpoint, err = endpoints.Resolve(resolveParam) + if err != nil { + return + } + } + + request.SetDomain(endpoint) + if request.GetScheme() == "" { + request.SetScheme(client.config.Scheme) + } + // init request params + err = requests.InitParams(request) + if err != nil { + return + } + + // signature + var finalSigner auth.Signer + if signer != nil { + finalSigner = signer + } else { + finalSigner = client.signer + } + httpRequest, err = buildHttpRequest(request, finalSigner, regionId) + if err == nil { + userAgent := DefaultUserAgent + getSendUserAgent(client.config.UserAgent, client.userAgent, request.GetUserAgent()) + httpRequest.Header.Set("User-Agent", userAgent) + } + + return +} + +func getSendUserAgent(configUserAgent string, clientUserAgent, requestUserAgent map[string]string) string { + realUserAgent := "" + for key1, value1 := range clientUserAgent { + for key2, _ := range requestUserAgent { + if key1 == key2 { + key1 = "" + } + } + if key1 != "" { + realUserAgent += fmt.Sprintf(" %s/%s", key1, value1) + + } + } + for key, value := range requestUserAgent { + realUserAgent += fmt.Sprintf(" %s/%s", key, value) + } + if configUserAgent != "" { + return realUserAgent + fmt.Sprintf(" Extra/%s", configUserAgent) + } + return realUserAgent +} + +func (client *Client) AppendUserAgent(key, value string) { + newkey := true + + if client.userAgent == nil { + client.userAgent = make(map[string]string) + } + if strings.ToLower(key) != "core" && strings.ToLower(key) != "go" { + for tag, _ := range client.userAgent { + if tag == key { + client.userAgent[tag] = value + newkey = false + } + } + if newkey { + client.userAgent[key] = value + } + } +} + +func (client *Client) BuildRequestWithSigner(request requests.AcsRequest, signer auth.Signer) (err error) { + _, err = client.buildRequestWithSigner(request, signer) + return +} + +func (client *Client) getTimeout(request requests.AcsRequest) (time.Duration, time.Duration) { + readTimeout := defaultReadTimeout + connectTimeout := defaultConnectTimeout + + reqReadTimeout := request.GetReadTimeout() + reqConnectTimeout := request.GetConnectTimeout() + if reqReadTimeout != 0*time.Millisecond { + readTimeout = reqReadTimeout + } else if client.readTimeout != 0*time.Millisecond { + readTimeout = client.readTimeout + } else if client.httpClient.Timeout != 0 { + readTimeout = client.httpClient.Timeout + } else if timeout, ok := getAPIMaxTimeout(request.GetProduct(), request.GetActionName()); ok { + readTimeout = timeout + } + + if reqConnectTimeout != 0*time.Millisecond { + connectTimeout = reqConnectTimeout + } else if client.connectTimeout != 0*time.Millisecond { + connectTimeout = client.connectTimeout + } + return readTimeout, connectTimeout +} + +func Timeout(connectTimeout time.Duration) func(cxt context.Context, net, addr string) (c net.Conn, err error) { + return func(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{ + Timeout: connectTimeout, + DualStack: true, + }).DialContext(ctx, network, address) + } +} + +func (client *Client) setTimeout(request requests.AcsRequest) { + readTimeout, connectTimeout := client.getTimeout(request) + client.httpClient.Timeout = readTimeout + if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil { + trans.DialContext = Timeout(connectTimeout) + client.httpClient.Transport = trans + } else { + client.httpClient.Transport = &http.Transport{ + DialContext: Timeout(connectTimeout), + } + } +} + +func (client *Client) getHTTPSInsecure(request requests.AcsRequest) (insecure bool) { + if request.GetHTTPSInsecure() != nil { + insecure = *request.GetHTTPSInsecure() + } else { + insecure = client.GetHTTPSInsecure() + } + return insecure +} + +func (client *Client) DoActionWithSigner(request requests.AcsRequest, response responses.AcsResponse, signer auth.Signer) (err error) { + + fieldMap := make(map[string]string) + initLogMsg(fieldMap) + defer func() { + client.printLog(fieldMap, err) + }() + httpRequest, err := client.buildRequestWithSigner(request, signer) + if err != nil { + return + } + + client.setTimeout(request) + proxy, err := client.getHttpProxy(httpRequest.URL.Scheme) + if err != nil { + return err + } + + noProxy := client.getNoProxy(httpRequest.URL.Scheme) + + var flag bool + for _, value := range noProxy { + if value == httpRequest.Host { + flag = true + break + } + } + + // Set whether to ignore certificate validation. + // Default InsecureSkipVerify is false. + if trans, ok := client.httpClient.Transport.(*http.Transport); ok && trans != nil { + trans.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: client.getHTTPSInsecure(request), + } + if proxy != nil && !flag { + trans.Proxy = http.ProxyURL(proxy) + } + client.httpClient.Transport = trans + } + + var httpResponse *http.Response + for retryTimes := 0; retryTimes <= client.config.MaxRetryTime; retryTimes++ { + if proxy != nil && proxy.User != nil { + if password, passwordSet := proxy.User.Password(); passwordSet { + httpRequest.SetBasicAuth(proxy.User.Username(), password) + } + } + if retryTimes > 0 { + client.printLog(fieldMap, err) + initLogMsg(fieldMap) + } + putMsgToMap(fieldMap, httpRequest) + debug("> %s %s %s", httpRequest.Method, httpRequest.URL.RequestURI(), httpRequest.Proto) + debug("> Host: %s", httpRequest.Host) + for key, value := range httpRequest.Header { + debug("> %s: %v", key, strings.Join(value, "")) + } + debug(">") + debug(" Retry Times: %d.", retryTimes) + + startTime := time.Now() + fieldMap["{start_time}"] = startTime.Format("2006-01-02 15:04:05") + httpResponse, err = hookDo(client.httpClient.Do)(httpRequest) + fieldMap["{cost}"] = time.Now().Sub(startTime).String() + if err == nil { + fieldMap["{code}"] = strconv.Itoa(httpResponse.StatusCode) + fieldMap["{res_headers}"] = TransToString(httpResponse.Header) + debug("< %s %s", httpResponse.Proto, httpResponse.Status) + for key, value := range httpResponse.Header { + debug("< %s: %v", key, strings.Join(value, "")) + } + } + debug("<") + // receive error + if err != nil { + debug(" Error: %s.", err.Error()) + if !client.config.AutoRetry { + return + } else if retryTimes >= client.config.MaxRetryTime { + // timeout but reached the max retry times, return + times := strconv.Itoa(retryTimes + 1) + timeoutErrorMsg := fmt.Sprintf(errors.TimeoutErrorMessage, times, times) + if strings.Contains(err.Error(), "Client.Timeout") { + timeoutErrorMsg += " Read timeout. Please set a valid ReadTimeout." + } else { + timeoutErrorMsg += " Connect timeout. Please set a valid ConnectTimeout." + } + err = errors.NewClientError(errors.TimeoutErrorCode, timeoutErrorMsg, err) + return + } + } + // if status code >= 500 or timeout, will trigger retry + if client.config.AutoRetry && (err != nil || isServerError(httpResponse)) { + client.setTimeout(request) + // rewrite signatureNonce and signature + httpRequest, err = client.buildRequestWithSigner(request, signer) + // buildHttpRequest(request, finalSigner, regionId) + if err != nil { + return + } + continue + } + break + } + + err = responses.Unmarshal(response, httpResponse, request.GetAcceptFormat()) + fieldMap["{res_body}"] = response.GetHttpContentString() + debug("%s", response.GetHttpContentString()) + // wrap server errors + if serverErr, ok := err.(*errors.ServerError); ok { + var wrapInfo = map[string]string{} + wrapInfo["StringToSign"] = request.GetStringToSign() + err = errors.WrapServerError(serverErr, wrapInfo) + } + return +} + +func putMsgToMap(fieldMap map[string]string, request *http.Request) { + fieldMap["{host}"] = request.Host + fieldMap["{method}"] = request.Method + fieldMap["{uri}"] = request.URL.RequestURI() + fieldMap["{pid}"] = strconv.Itoa(os.Getpid()) + fieldMap["{version}"] = strings.Split(request.Proto, "/")[1] + hostname, _ := os.Hostname() + fieldMap["{hostname}"] = hostname + fieldMap["{req_headers}"] = TransToString(request.Header) + fieldMap["{target}"] = request.URL.Path + request.URL.RawQuery +} + +func buildHttpRequest(request requests.AcsRequest, singer auth.Signer, regionId string) (httpRequest *http.Request, err error) { + err = auth.Sign(request, singer, regionId) + if err != nil { + return + } + requestMethod := request.GetMethod() + requestUrl := request.BuildUrl() + body := request.GetBodyReader() + httpRequest, err = http.NewRequest(requestMethod, requestUrl, body) + if err != nil { + return + } + for key, value := range request.GetHeaders() { + httpRequest.Header[key] = []string{value} + } + // host is a special case + if host, containsHost := request.GetHeaders()["Host"]; containsHost { + httpRequest.Host = host + } + return +} + +func isServerError(httpResponse *http.Response) bool { + return httpResponse.StatusCode >= http.StatusInternalServerError +} + +/** +only block when any one of the following occurs: +1. the asyncTaskQueue is full, increase the queue size to avoid this +2. Shutdown() in progressing, the client is being closed +**/ +func (client *Client) AddAsyncTask(task func()) (err error) { + if client.asyncTaskQueue != nil { + client.asyncChanLock.RLock() + defer client.asyncChanLock.RUnlock() + if client.isRunning { + client.asyncTaskQueue <- task + } + } else { + err = errors.NewClientError(errors.AsyncFunctionNotEnabledCode, errors.AsyncFunctionNotEnabledMessage, nil) + } + return +} + +func (client *Client) GetConfig() *Config { + return client.config +} + +func NewClient() (client *Client, err error) { + client = &Client{} + err = client.Init() + return +} + +func NewClientWithProvider(regionId string, providers ...provider.Provider) (client *Client, err error) { + client = &Client{} + var pc provider.Provider + if len(providers) == 0 { + pc = provider.DefaultChain + } else { + pc = provider.NewProviderChain(providers) + } + err = client.InitWithProviderChain(regionId, pc) + return +} + +func NewClientWithOptions(regionId string, config *Config, credential auth.Credential) (client *Client, err error) { + client = &Client{} + err = client.InitWithOptions(regionId, config, credential) + return +} + +func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) { + client = &Client{} + err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret) + return +} + +func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken) + return +} + +func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) + return +} + +func NewClientWithRamRoleArnAndPolicy(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArnAndPolicy(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName, policy) + return +} + +func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithEcsRamRole(regionId, roleName) + return +} + +func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) { + client = &Client{} + err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration) + return +} + +func NewClientWithBearerToken(regionId, bearerToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithBearerToken(regionId, bearerToken) + return +} + +func (client *Client) ProcessCommonRequest(request *requests.CommonRequest) (response *responses.CommonResponse, err error) { + request.TransToAcsRequest() + response = responses.NewCommonResponse() + err = client.DoAction(request, response) + return +} + +func (client *Client) ProcessCommonRequestWithSigner(request *requests.CommonRequest, signerInterface interface{}) (response *responses.CommonResponse, err error) { + if signer, isSigner := signerInterface.(auth.Signer); isSigner { + request.TransToAcsRequest() + response = responses.NewCommonResponse() + err = client.DoActionWithSigner(request, response, signer) + return + } + panic("should not be here") +} + +func (client *Client) Shutdown() { + // lock the addAsync() + client.asyncChanLock.Lock() + defer client.asyncChanLock.Unlock() + if client.asyncTaskQueue != nil { + close(client.asyncTaskQueue) + } + client.isRunning = false +} + +// Deprecated: Use NewClientWithRamRoleArn in this package instead. +func NewClientWithStsRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + return NewClientWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) +} + +// Deprecated: Use NewClientWithEcsRamRole in this package instead. +func NewClientWithStsRoleNameOnEcs(regionId string, roleName string) (client *Client, err error) { + return NewClientWithEcsRamRole(regionId, roleName) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go new file mode 100644 index 00000000..5e50166f --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/config.go @@ -0,0 +1,91 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sdk + +import ( + "net/http" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +type Config struct { + AutoRetry bool `default:"true"` + MaxRetryTime int `default:"3"` + UserAgent string `default:""` + Debug bool `default:"false"` + HttpTransport *http.Transport `default:""` + EnableAsync bool `default:"false"` + MaxTaskQueueSize int `default:"1000"` + GoRoutinePoolSize int `default:"5"` + Scheme string `default:"HTTP"` + Timeout time.Duration +} + +func NewConfig() (config *Config) { + config = &Config{} + utils.InitStructWithDefaultTag(config) + return +} + +func (c *Config) WithAutoRetry(isAutoRetry bool) *Config { + c.AutoRetry = isAutoRetry + return c +} + +func (c *Config) WithMaxRetryTime(maxRetryTime int) *Config { + c.MaxRetryTime = maxRetryTime + return c +} + +func (c *Config) WithUserAgent(userAgent string) *Config { + c.UserAgent = userAgent + return c +} + +func (c *Config) WithDebug(isDebug bool) *Config { + c.Debug = isDebug + return c +} + +func (c *Config) WithTimeout(timeout time.Duration) *Config { + c.Timeout = timeout + return c +} + +func (c *Config) WithHttpTransport(httpTransport *http.Transport) *Config { + c.HttpTransport = httpTransport + return c +} + +func (c *Config) WithEnableAsync(isEnableAsync bool) *Config { + c.EnableAsync = isEnableAsync + return c +} + +func (c *Config) WithMaxTaskQueueSize(maxTaskQueueSize int) *Config { + c.MaxTaskQueueSize = maxTaskQueueSize + return c +} + +func (c *Config) WithGoRoutinePoolSize(goRoutinePoolSize int) *Config { + c.GoRoutinePoolSize = goRoutinePoolSize + return c +} + +func (c *Config) WithScheme(scheme string) *Config { + c.Scheme = scheme + return c +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go new file mode 100644 index 00000000..a4fc4709 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/endpoints_config.go @@ -0,0 +1,4126 @@ + +package endpoints + +import ( + "encoding/json" + "fmt" + "sync" +) + +const endpointsJson =`{ + "products": [ + { + "code": "emr", + "document_id": "28140", + "location_service_code": "emr", + "regional_endpoints": [ + { + "region": "cn-qingdao", + "endpoint": "emr.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "emr.eu-west-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "emr.me-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "emr.ap-northeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "emr.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "emr.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "emr.ap-south-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "emr.us-east-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "emr.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "emr.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "emr.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "emr.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "emr.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "emr.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "emr.aliyuncs.com" + } + ], + "global_endpoint": "emr.aliyuncs.com", + "regional_endpoint_pattern": "emr.[RegionId].aliyuncs.com" + }, + { + "code": "petadata", + "document_id": "", + "location_service_code": "petadata", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "petadata.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "petadata.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "petadata.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "petadata.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "petadata.me-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "petadata.ap-southeast-2.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "petadata.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "petadata.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "petadata.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "petadata.ap-southeast-5.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "petadata.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "petadata.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "petadata.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "petadata.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "petadata.aliyuncs.com" + } + ], + "global_endpoint": "petadata.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "dbs", + "document_id": "", + "location_service_code": "dbs", + "regional_endpoints": [ + { + "region": "cn-shenzhen", + "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "dbs-api.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "dbs-api.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "dbs-api.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "alidnsgtm", + "document_id": "", + "location_service_code": "alidnsgtm", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "alidns.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "alidns.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "elasticsearch", + "document_id": "", + "location_service_code": "elasticsearch", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "elasticsearch.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "elasticsearch.cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "elasticsearch.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "elasticsearch.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "elasticsearch.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "elasticsearch.ap-southeast-3.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "elasticsearch.us-west-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "elasticsearch.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "elasticsearch.ap-southeast-5.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "elasticsearch.eu-central-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "elasticsearch.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "elasticsearch.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "elasticsearch.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "elasticsearch.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "elasticsearch.ap-northeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "baas", + "document_id": "", + "location_service_code": "baas", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "baas.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "baas.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "baas.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "baas.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "baas.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "baas.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cr", + "document_id": "60716", + "location_service_code": "cr", + "regional_endpoints": null, + "global_endpoint": "cr.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudap", + "document_id": "", + "location_service_code": "cloudap", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "cloudwf.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "imagesearch", + "document_id": "", + "location_service_code": "imagesearch", + "regional_endpoints": [ + { + "region": "ap-southeast-2", + "endpoint": "imagesearch.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "imagesearch.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "imagesearch.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "imagesearch.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "pts", + "document_id": "", + "location_service_code": "pts", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "pts.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ehs", + "document_id": "", + "location_service_code": "ehs", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "ehpc.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ehpc.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "ehpc.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ehpc.cn-qingdao.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "ehpc.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "ehpc.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "ehpc.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ehpc.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ehpc.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ehpc.cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ehpc.ap-southeast-2.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "polardb", + "document_id": "58764", + "location_service_code": "polardb", + "regional_endpoints": [ + { + "region": "ap-south-1", + "endpoint": "polardb.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "polardb.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "polardb.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "polardb.ap-southeast-5.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "polardb.aliyuncs.com" + }, + { + "code": "r-kvstore", + "document_id": "60831", + "location_service_code": "redisa", + "regional_endpoints": [ + { + "region": "cn-shenzhen", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "r-kvstore.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "r-kvstore.ap-southeast-3.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "r-kvstore.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "r-kvstore.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "r-kvstore.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "r-kvstore.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "r-kvstore.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "r-kvstore.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "r-kvstore.eu-west-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "r-kvstore.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "r-kvstore.me-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "r-kvstore.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "r-kvstore.ap-southeast-5.aliyuncs.com" + } + ], + "global_endpoint": "r-kvstore.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "xianzhi", + "document_id": "", + "location_service_code": "xianzhi", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "xianzhi.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "pcdn", + "document_id": "", + "location_service_code": "pcdn", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "pcdn.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cdn", + "document_id": "27148", + "location_service_code": "cdn", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "cdn.aliyuncs.com" + } + ], + "global_endpoint": "cdn.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudauth", + "document_id": "60687", + "location_service_code": "cloudauth", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "cloudauth.aliyuncs.com" + } + ], + "global_endpoint": "cloudauth.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "nas", + "document_id": "62598", + "location_service_code": "nas", + "regional_endpoints": [ + { + "region": "ap-southeast-2", + "endpoint": "nas.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "nas.ap-south-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "nas.eu-central-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "nas.us-west-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "nas.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "nas.cn-qingdao.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "nas.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "nas.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "nas.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "nas.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "nas.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "nas.ap-northeast-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "nas.us-east-1.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "nas.cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "nas.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "nas.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "nas.ap-southeast-3.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "alidns", + "document_id": "29739", + "location_service_code": "alidns", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "alidns.aliyuncs.com" + } + ], + "global_endpoint": "alidns.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "dts", + "document_id": "", + "location_service_code": "dts", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "dts.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "dts.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "dts.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "dts.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "dts.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "dts.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "dts.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "dts.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "dts.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "emas", + "document_id": "", + "location_service_code": "emas", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "mhub.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "mhub.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "dysmsapi", + "document_id": "", + "location_service_code": "dysmsapi", + "regional_endpoints": [ + { + "region": "ap-southeast-3", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "dysmsapi.aliyuncs.com" + }, + { + "region": "cn-chengdu", + "endpoint": "dysmsapi.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "dysmsapi.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "dysmsapi.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "dysmsapi.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "dysmsapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "dysmsapi.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "dysmsapi.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "dysmsapi.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudwf", + "document_id": "58111", + "location_service_code": "cloudwf", + "regional_endpoints": null, + "global_endpoint": "cloudwf.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "fc", + "document_id": "", + "location_service_code": "fc", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "cn-beijing.fc.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ap-southeast-2.fc.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "cn-huhehaote.fc.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "cn-shanghai.fc.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "cn-hangzhou.fc.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "cn-shenzhen.fc.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "saf", + "document_id": "", + "location_service_code": "saf", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "saf.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "saf.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "saf.cn-shenzhen.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "rds", + "document_id": "26223", + "location_service_code": "rds", + "regional_endpoints": [ + { + "region": "ap-northeast-1", + "endpoint": "rds.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "rds.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "rds.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "rds.eu-west-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "rds.ap-southeast-3.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "rds.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "rds.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "rds.ap-southeast-5.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "rds.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "rds.me-east-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "rds.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "rds.aliyuncs.com" + } + ], + "global_endpoint": "rds.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "vpc", + "document_id": "34962", + "location_service_code": "vpc", + "regional_endpoints": [ + { + "region": "ap-south-1", + "endpoint": "vpc.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "vpc.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "vpc.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "vpc.cn-huhehaote.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "vpc.me-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "vpc.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "vpc.ap-southeast-3.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "vpc.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "vpc.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "vpc.eu-west-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "vpc.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "vpc.aliyuncs.com" + } + ], + "global_endpoint": "vpc.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "gpdb", + "document_id": "", + "location_service_code": "gpdb", + "regional_endpoints": [ + { + "region": "ap-southeast-3", + "endpoint": "gpdb.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "gpdb.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "gpdb.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "gpdb.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "gpdb.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "gpdb.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "gpdb.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "gpdb.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "gpdb.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "gpdb.ap-southeast-2.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "gpdb.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "gpdb.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "gpdb.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "gpdb.ap-northeast-1.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "gpdb.eu-west-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "gpdb.ap-south-1.aliyuncs.com" + } + ], + "global_endpoint": "gpdb.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "yunmarket", + "document_id": "", + "location_service_code": "yunmarket", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "market.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "pvtz", + "document_id": "", + "location_service_code": "pvtz", + "regional_endpoints": [ + { + "region": "ap-southeast-1", + "endpoint": "pvtz.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "pvtz.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "oss", + "document_id": "", + "location_service_code": "oss", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "oss-cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "oss-cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "oss-cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "oss-cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "oss-cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "oss-ap-southeast-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "oss-us-west-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "oss-cn-qingdao.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "foas", + "document_id": "", + "location_service_code": "foas", + "regional_endpoints": [ + { + "region": "cn-qingdao", + "endpoint": "foas.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "foas.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "foas.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "foas.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "foas.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "foas.cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "foas.ap-northeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ddos", + "document_id": "", + "location_service_code": "ddos", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ddospro.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ddospro.cn-hongkong.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cbn", + "document_id": "", + "location_service_code": "cbn", + "regional_endpoints": [ + { + "region": "ap-southeast-1", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "cbn.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "cbn.aliyuncs.com" + } + ], + "global_endpoint": "cbn.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "nlp", + "document_id": "", + "location_service_code": "nlp", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "nlp.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "hsm", + "document_id": "", + "location_service_code": "hsm", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "hsm.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "hsm.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "hsm.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "hsm.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "hsm.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "hsm.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ons", + "document_id": "44416", + "location_service_code": "ons", + "regional_endpoints": [ + { + "region": "ap-southeast-1", + "endpoint": "ons.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "ons.cn-huhehaote.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "ons.us-east-1.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ons.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ons.cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "ons.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ons.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ons.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "ons.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "ons.me-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "ons.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ons.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "ons.cn-hangzhou.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "ons.eu-central-1.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "ons.eu-west-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "ons.us-west-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ons.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ons.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "kms", + "document_id": "", + "location_service_code": "kms", + "regional_endpoints": [ + { + "region": "cn-hongkong", + "endpoint": "kms.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "kms.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "kms.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "kms.cn-qingdao.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "kms.eu-west-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "kms.us-east-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "kms.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "kms.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "kms.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "kms.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "kms.cn-huhehaote.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "kms.me-east-1.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "kms.cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "kms.ap-southeast-3.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "kms.us-west-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "kms.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "kms.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "kms.eu-central-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "kms.ap-northeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cps", + "document_id": "", + "location_service_code": "cps", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "cloudpush.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ensdisk", + "document_id": "", + "location_service_code": "ensdisk", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ens.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudapi", + "document_id": "43590", + "location_service_code": "apigateway", + "regional_endpoints": [ + { + "region": "ap-southeast-2", + "endpoint": "apigateway.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "apigateway.ap-south-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "apigateway.us-east-1.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "apigateway.me-east-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "apigateway.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "apigateway.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "apigateway.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "apigateway.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "apigateway.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "apigateway.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "apigateway.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "apigateway.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "apigateway.cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "apigateway.us-west-1.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "apigateway.cn-shenzhen.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "apigateway.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "apigateway.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "apigateway.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "apigateway.cn-hongkong.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "apigateway.[RegionId].aliyuncs.com" + }, + { + "code": "eci", + "document_id": "", + "location_service_code": "eci", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "eci.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "eci.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "eci.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "eci.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "eci.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "eci.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "onsvip", + "document_id": "", + "location_service_code": "onsvip", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "ons.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ons.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ons.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ons.cn-shenzhen.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "ons.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ons.cn-qingdao.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "linkwan", + "document_id": "", + "location_service_code": "linkwan", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "linkwan.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "linkwan.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ddosdip", + "document_id": "", + "location_service_code": "ddosdip", + "regional_endpoints": [ + { + "region": "ap-southeast-1", + "endpoint": "ddosdip.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "batchcompute", + "document_id": "44717", + "location_service_code": "batchcompute", + "regional_endpoints": [ + { + "region": "us-west-1", + "endpoint": "batchcompute.us-west-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "batchcompute.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "batchcompute.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "batchcompute.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "batchcompute.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "batchcompute.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "batchcompute.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "batchcompute.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "batchcompute.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "batchcompute.[RegionId].aliyuncs.com" + }, + { + "code": "aegis", + "document_id": "28449", + "location_service_code": "vipaegis", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "aegis.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "aegis.ap-southeast-3.aliyuncs.com" + } + ], + "global_endpoint": "aegis.cn-hangzhou.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "arms", + "document_id": "42924", + "location_service_code": "arms", + "regional_endpoints": [ + { + "region": "cn-hongkong", + "endpoint": "arms.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "arms.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "arms.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "arms.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "arms.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "arms.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "arms.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "arms.[RegionId].aliyuncs.com" + }, + { + "code": "live", + "document_id": "48207", + "location_service_code": "live", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "live.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "live.aliyuncs.com" + } + ], + "global_endpoint": "live.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "alimt", + "document_id": "", + "location_service_code": "alimt", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "mt.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "actiontrail", + "document_id": "", + "location_service_code": "actiontrail", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "actiontrail.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "actiontrail.cn-qingdao.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "actiontrail.us-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "actiontrail.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "actiontrail.ap-southeast-3.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "actiontrail.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "actiontrail.ap-south-1.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "actiontrail.me-east-1.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "actiontrail.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "actiontrail.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "actiontrail.cn-hangzhou.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "actiontrail.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "actiontrail.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "actiontrail.ap-northeast-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "actiontrail.us-west-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "actiontrail.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "actiontrail.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "actiontrail.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "actiontrail.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "smartag", + "document_id": "", + "location_service_code": "smartag", + "regional_endpoints": [ + { + "region": "ap-southeast-3", + "endpoint": "smartag.ap-southeast-3.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "smartag.ap-southeast-5.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "smartag.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "smartag.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "smartag.cn-hongkong.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "smartag.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "smartag.ap-southeast-2.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "vod", + "document_id": "60574", + "location_service_code": "vod", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "vod.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "vod.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "vod.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "vod.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "vod.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "vod.eu-central-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "domain", + "document_id": "42875", + "location_service_code": "domain", + "regional_endpoints": [ + { + "region": "ap-southeast-1", + "endpoint": "domain-intl.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "domain.aliyuncs.com" + } + ], + "global_endpoint": "domain.aliyuncs.com", + "regional_endpoint_pattern": "domain.aliyuncs.com" + }, + { + "code": "ros", + "document_id": "28899", + "location_service_code": "ros", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ros.aliyuncs.com" + } + ], + "global_endpoint": "ros.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudphoto", + "document_id": "59902", + "location_service_code": "cloudphoto", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "cloudphoto.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "cloudphoto.[RegionId].aliyuncs.com" + }, + { + "code": "rtc", + "document_id": "", + "location_service_code": "rtc", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "rtc.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "odpsmayi", + "document_id": "", + "location_service_code": "odpsmayi", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "bsb.cloud.alipay.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "bsb.cloud.alipay.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ims", + "document_id": "", + "location_service_code": "ims", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ims.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "csb", + "document_id": "64837", + "location_service_code": "csb", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "csb.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "csb.cn-beijing.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "csb.[RegionId].aliyuncs.com" + }, + { + "code": "cds", + "document_id": "62887", + "location_service_code": "codepipeline", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "cds.cn-beijing.aliyuncs.com" + } + ], + "global_endpoint": "cds.cn-beijing.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "ddosbgp", + "document_id": "", + "location_service_code": "ddosbgp", + "regional_endpoints": [ + { + "region": "cn-huhehaote", + "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ddosbgp.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "ddosbgp.us-west-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ddosbgp.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "dybaseapi", + "document_id": "", + "location_service_code": "dybaseapi", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "dybaseapi.aliyuncs.com" + }, + { + "region": "cn-chengdu", + "endpoint": "dybaseapi.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "dybaseapi.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "dybaseapi.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "dybaseapi.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "dybaseapi.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "dybaseapi.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "dybaseapi.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "dybaseapi.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ecs", + "document_id": "25484", + "location_service_code": "ecs", + "regional_endpoints": [ + { + "region": "cn-huhehaote", + "endpoint": "ecs.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "ecs.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ecs.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "ecs.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "ecs.ap-southeast-3.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "ecs.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "ecs.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "ecs.eu-west-1.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "ecs.me-east-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "ecs.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "ecs-cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "ecs-cn-hangzhou.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "ccc", + "document_id": "63027", + "location_service_code": "ccc", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ccc.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ccc.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "ccc.[RegionId].aliyuncs.com" + }, + { + "code": "cs", + "document_id": "26043", + "location_service_code": "cs", + "regional_endpoints": null, + "global_endpoint": "cs.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "drdspre", + "document_id": "", + "location_service_code": "drdspre", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "drds.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "drds.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "drds.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "drds.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "drds.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "drds.cn-qingdao.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "drds.cn-beijing.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "dcdn", + "document_id": "", + "location_service_code": "dcdn", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "dcdn.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "dcdn.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "linkedmall", + "document_id": "", + "location_service_code": "linkedmall", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "linkedmall.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "linkedmall.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "trademark", + "document_id": "", + "location_service_code": "trademark", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "trademark.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "openanalytics", + "document_id": "", + "location_service_code": "openanalytics", + "regional_endpoints": [ + { + "region": "cn-shenzhen", + "endpoint": "openanalytics.cn-shenzhen.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "openanalytics.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "openanalytics.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "openanalytics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "openanalytics.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "openanalytics.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "openanalytics.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "openanalytics.cn-zhangjiakou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "sts", + "document_id": "28756", + "location_service_code": "sts", + "regional_endpoints": null, + "global_endpoint": "sts.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "waf", + "document_id": "62847", + "location_service_code": "waf", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "wafopenapi.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ots", + "document_id": "", + "location_service_code": "ots", + "regional_endpoints": [ + { + "region": "me-east-1", + "endpoint": "ots.me-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "ots.ap-southeast-5.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "ots.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "ots.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ots.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ots.ap-southeast-2.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "ots.us-west-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "ots.us-east-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "ots.ap-south-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "ots.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "ots.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ots.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "ots.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ots.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "ots.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "ots.ap-southeast-3.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cloudfirewall", + "document_id": "", + "location_service_code": "cloudfirewall", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "cloudfw.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "dm", + "document_id": "29434", + "location_service_code": "dm", + "regional_endpoints": [ + { + "region": "ap-southeast-2", + "endpoint": "dm.ap-southeast-2.aliyuncs.com" + } + ], + "global_endpoint": "dm.aliyuncs.com", + "regional_endpoint_pattern": "dm.[RegionId].aliyuncs.com" + }, + { + "code": "oas", + "document_id": "", + "location_service_code": "oas", + "regional_endpoints": [ + { + "region": "cn-shenzhen", + "endpoint": "cn-shenzhen.oas.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "cn-beijing.oas.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "cn-hangzhou.oas.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ddoscoo", + "document_id": "", + "location_service_code": "ddoscoo", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ddoscoo.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "jaq", + "document_id": "35037", + "location_service_code": "jaq", + "regional_endpoints": null, + "global_endpoint": "jaq.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "iovcc", + "document_id": "", + "location_service_code": "iovcc", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "iovcc.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "sas-api", + "document_id": "28498", + "location_service_code": "sas", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "sas.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "chatbot", + "document_id": "60760", + "location_service_code": "beebot", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "chatbot.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "chatbot.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "chatbot.[RegionId].aliyuncs.com" + }, + { + "code": "airec", + "document_id": "", + "location_service_code": "airec", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "airec.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "airec.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "airec.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "dmsenterprise", + "document_id": "", + "location_service_code": "dmsenterprise", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "dms-enterprise.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "dms-enterprise.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "dms-enterprise.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "dms-enterprise.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "dms-enterprise.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "dms-enterprise.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ivision", + "document_id": "", + "location_service_code": "ivision", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ivision.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ivision.cn-beijing.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "odpsplusmayi", + "document_id": "", + "location_service_code": "odpsplusmayi", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "bsb.cloud.alipay.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "bsb.cloud.alipay.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "bsb.cloud.alipay.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "gameshield", + "document_id": "", + "location_service_code": "gameshield", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "gameshield.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "gameshield.cn-zhangjiakou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "scdn", + "document_id": "", + "location_service_code": "scdn", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "scdn.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "hitsdb", + "document_id": "", + "location_service_code": "hitsdb", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "hitsdb.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "hdm", + "document_id": "", + "location_service_code": "hdm", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "hdm-api.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "slb", + "document_id": "27565", + "location_service_code": "slb", + "regional_endpoints": [ + { + "region": "cn-shenzhen", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "slb.eu-west-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "slb.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "slb.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "slb.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "slb.ap-south-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "slb.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "slb.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "slb.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "slb.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "slb.me-east-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "slb.cn-zhangjiakou.aliyuncs.com" + } + ], + "global_endpoint": "slb.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "green", + "document_id": "28427", + "location_service_code": "green", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "green.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "green.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "green.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "green.cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "green.us-west-1.aliyuncs.com" + } + ], + "global_endpoint": "green.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cccvn", + "document_id": "", + "location_service_code": "cccvn", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "voicenavigator.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ddosrewards", + "document_id": "", + "location_service_code": "ddosrewards", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ddosright.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "iot", + "document_id": "30557", + "location_service_code": "iot", + "regional_endpoints": [ + { + "region": "us-east-1", + "endpoint": "iot.us-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "iot.ap-northeast-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "iot.us-west-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "iot.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "iot.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "iot.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "iot.[RegionId].aliyuncs.com" + }, + { + "code": "bssopenapi", + "document_id": "", + "location_service_code": "bssopenapi", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "business.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "business.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "business.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "business.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "business.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "business.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "business.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "business.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "business.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "sca", + "document_id": "", + "location_service_code": "sca", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "qualitycheck.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "luban", + "document_id": "", + "location_service_code": "luban", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "luban.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "luban.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "drdspost", + "document_id": "", + "location_service_code": "drdspost", + "regional_endpoints": [ + { + "region": "cn-shanghai", + "endpoint": "drds.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "drds.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "drds.ap-southeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "drds", + "document_id": "51111", + "location_service_code": "drds", + "regional_endpoints": [ + { + "region": "ap-southeast-1", + "endpoint": "drds.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "drds.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "drds.aliyuncs.com", + "regional_endpoint_pattern": "drds.aliyuncs.com" + }, + { + "code": "httpdns", + "document_id": "52679", + "location_service_code": "httpdns", + "regional_endpoints": null, + "global_endpoint": "httpdns-api.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "cas", + "document_id": "", + "location_service_code": "cas", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "cas.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "cas.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "cas.ap-northeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "cas.eu-central-1.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "cas.me-east-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "hpc", + "document_id": "35201", + "location_service_code": "hpc", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "hpc.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "hpc.aliyuncs.com" + } + ], + "global_endpoint": "hpc.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "ddosbasic", + "document_id": "", + "location_service_code": "ddosbasic", + "regional_endpoints": [ + { + "region": "ap-south-1", + "endpoint": "antiddos-openapi.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "antiddos-openapi.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "antiddos-openapi.ap-southeast-3.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "antiddos-openapi.me-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "antiddos-openapi.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "antiddos-openapi.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "antiddos-openapi.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "antiddos-openapi.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "antiddos-openapi.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "antiddos.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "antiddos-openapi.ap-northeast-1.aliyuncs.com" + } + ], + "global_endpoint": "antiddos.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "clouddesktop", + "document_id": "", + "location_service_code": "clouddesktop", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "clouddesktop.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "clouddesktop.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "clouddesktop.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "clouddesktop.cn-shenzhen.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "uis", + "document_id": "", + "location_service_code": "uis", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "uis.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "imm", + "document_id": "", + "location_service_code": "imm", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "imm.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "imm.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "imm.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "imm.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "imm.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "imm.cn-shenzhen.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ens", + "document_id": "", + "location_service_code": "ens", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ens.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ram", + "document_id": "28672", + "location_service_code": "ram", + "regional_endpoints": null, + "global_endpoint": "ram.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "hcs_mgw", + "document_id": "", + "location_service_code": "hcs_mgw", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "mgw.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "mgw.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "mgw.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "itaas", + "document_id": "55759", + "location_service_code": "itaas", + "regional_endpoints": null, + "global_endpoint": "itaas.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "qualitycheck", + "document_id": "50807", + "location_service_code": "qualitycheck", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "qualitycheck.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "alikafka", + "document_id": "", + "location_service_code": "alikafka", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "alikafka.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "alikafka.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "alikafka.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "alikafka.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "alikafka.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "alikafka.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "alikafka.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "alikafka.cn-qingdao.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "faas", + "document_id": "", + "location_service_code": "faas", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "faas.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "faas.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "faas.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "faas.cn-beijing.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "alidfs", + "document_id": "", + "location_service_code": "alidfs", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "dfs.cn-beijing.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "dfs.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "cms", + "document_id": "28615", + "location_service_code": "cms", + "regional_endpoints": [ + { + "region": "ap-southeast-3", + "endpoint": "metrics.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "metrics.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "metrics.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "metrics.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "metrics.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "metrics.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "metrics.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "metrics.cn-hangzhou.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "domain-intl", + "document_id": "", + "location_service_code": "domain-intl", + "regional_endpoints": null, + "global_endpoint": "domain-intl.aliyuncs.com", + "regional_endpoint_pattern": "domain-intl.aliyuncs.com" + }, + { + "code": "kvstore", + "document_id": "", + "location_service_code": "kvstore", + "regional_endpoints": [ + { + "region": "ap-northeast-1", + "endpoint": "r-kvstore.ap-northeast-1.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ccs", + "document_id": "", + "location_service_code": "ccs", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "ccs.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "ess", + "document_id": "25925", + "location_service_code": "ess", + "regional_endpoints": [ + { + "region": "cn-huhehaote", + "endpoint": "ess.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "ess.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "ess.eu-west-1.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "ess.ap-southeast-5.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "ess.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "ess.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "ess.me-east-1.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "ess.ap-northeast-1.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "ess.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "ess.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "ess.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "ess.aliyuncs.com" + } + ], + "global_endpoint": "ess.aliyuncs.com", + "regional_endpoint_pattern": "ess.[RegionId].aliyuncs.com" + }, + { + "code": "dds", + "document_id": "61715", + "location_service_code": "dds", + "regional_endpoints": [ + { + "region": "me-east-1", + "endpoint": "mongodb.me-east-1.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "mongodb.ap-southeast-3.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "mongodb.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "mongodb.ap-northeast-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "mongodb.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "mongodb.eu-west-1.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "cn-huhehaote", + "endpoint": "mongodb.cn-huhehaote.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "mongodb.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "mongodb.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "mongodb.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "mongodb.ap-south-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "mongodb.aliyuncs.com" + } + ], + "global_endpoint": "mongodb.aliyuncs.com", + "regional_endpoint_pattern": "mongodb.[RegionId].aliyuncs.com" + }, + { + "code": "mts", + "document_id": "29212", + "location_service_code": "mts", + "regional_endpoints": [ + { + "region": "cn-beijing", + "endpoint": "mts.cn-beijing.aliyuncs.com" + }, + { + "region": "ap-northeast-1", + "endpoint": "mts.ap-northeast-1.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "mts.cn-hongkong.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "mts.cn-shenzhen.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "mts.cn-zhangjiakou.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "mts.ap-south-1.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "mts.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "mts.ap-southeast-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "mts.us-west-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "mts.eu-central-1.aliyuncs.com" + }, + { + "region": "eu-west-1", + "endpoint": "mts.eu-west-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "mts.cn-hangzhou.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "push", + "document_id": "30074", + "location_service_code": "push", + "regional_endpoints": null, + "global_endpoint": "cloudpush.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "hcs_sgw", + "document_id": "", + "location_service_code": "hcs_sgw", + "regional_endpoints": [ + { + "region": "eu-central-1", + "endpoint": "sgw.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "sgw.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-zhangjiakou", + "endpoint": "sgw.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "sgw.ap-southeast-1.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "sgw.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-hongkong", + "endpoint": "sgw.cn-shanghai.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "sgw.ap-southeast-2.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "sgw.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "sgw.cn-shanghai.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "sgw.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "hbase", + "document_id": "", + "location_service_code": "hbase", + "regional_endpoints": [ + { + "region": "cn-huhehaote", + "endpoint": "hbase.cn-huhehaote.aliyuncs.com" + }, + { + "region": "ap-south-1", + "endpoint": "hbase.ap-south-1.aliyuncs.com" + }, + { + "region": "us-west-1", + "endpoint": "hbase.aliyuncs.com" + }, + { + "region": "me-east-1", + "endpoint": "hbase.me-east-1.aliyuncs.com" + }, + { + "region": "eu-central-1", + "endpoint": "hbase.eu-central-1.aliyuncs.com" + }, + { + "region": "cn-qingdao", + "endpoint": "hbase.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "hbase.aliyuncs.com" + }, + { + "region": "cn-shenzhen", + "endpoint": "hbase.aliyuncs.com" + }, + { + "region": "ap-southeast-2", + "endpoint": "hbase.ap-southeast-2.aliyuncs.com" + }, + { + "region": "ap-southeast-3", + "endpoint": "hbase.ap-southeast-3.aliyuncs.com" + }, + { + "region": "cn-hangzhou", + "endpoint": "hbase.aliyuncs.com" + }, + { + "region": "us-east-1", + "endpoint": "hbase.aliyuncs.com" + }, + { + "region": "ap-southeast-5", + "endpoint": "hbase.ap-southeast-5.aliyuncs.com" + }, + { + "region": "cn-beijing", + "endpoint": "hbase.aliyuncs.com" + }, + { + "region": "ap-southeast-1", + "endpoint": "hbase.aliyuncs.com" + } + ], + "global_endpoint": "hbase.aliyuncs.com", + "regional_endpoint_pattern": "" + }, + { + "code": "bastionhost", + "document_id": "", + "location_service_code": "bastionhost", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "yundun-bastionhost.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + }, + { + "code": "vs", + "document_id": "", + "location_service_code": "vs", + "regional_endpoints": [ + { + "region": "cn-hangzhou", + "endpoint": "vs.cn-hangzhou.aliyuncs.com" + }, + { + "region": "cn-shanghai", + "endpoint": "vs.cn-shanghai.aliyuncs.com" + } + ], + "global_endpoint": "", + "regional_endpoint_pattern": "" + } + ] +}` +var initOnce sync.Once +var data interface{} + +func getEndpointConfigData() interface{} { + initOnce.Do(func() { + err := json.Unmarshal([]byte(endpointsJson), &data) + if err != nil { + panic(fmt.Sprintf("init endpoint config data failed. %s", err)) + } + }) + return data +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go new file mode 100644 index 00000000..160e62cb --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_global_resolver.go @@ -0,0 +1,43 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +import ( + "fmt" + "strings" + + "github.com/jmespath/go-jmespath" +) + +type LocalGlobalResolver struct { +} + +func (resolver *LocalGlobalResolver) GetName() (name string) { + name = "local global resolver" + return +} + +func (resolver *LocalGlobalResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + // get the global endpoints configs + endpointExpression := fmt.Sprintf("products[?code=='%s'].global_endpoint", strings.ToLower(param.Product)) + endpointData, err := jmespath.Search(endpointExpression, getEndpointConfigData()) + if err == nil && endpointData != nil && len(endpointData.([]interface{})) > 0 { + endpoint = endpointData.([]interface{})[0].(string) + support = len(endpoint) > 0 + return + } + support = false + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go new file mode 100644 index 00000000..7fee64d4 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/local_regional_resolver.go @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +import ( + "fmt" + "strings" + + "github.com/jmespath/go-jmespath" +) + +type LocalRegionalResolver struct { +} + +func (resolver *LocalRegionalResolver) GetName() (name string) { + name = "local regional resolver" + return +} + +func (resolver *LocalRegionalResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + // get the regional endpoints configs + regionalExpression := fmt.Sprintf("products[?code=='%s'].regional_endpoints", strings.ToLower(param.Product)) + regionalData, err := jmespath.Search(regionalExpression, getEndpointConfigData()) + if err == nil && regionalData != nil && len(regionalData.([]interface{})) > 0 { + endpointExpression := fmt.Sprintf("[0][?region=='%s'].endpoint", strings.ToLower(param.RegionId)) + var endpointData interface{} + endpointData, err = jmespath.Search(endpointExpression, regionalData) + if err == nil && endpointData != nil && len(endpointData.([]interface{})) > 0 { + endpoint = endpointData.([]interface{})[0].(string) + support = len(endpoint) > 0 + return + } + } + support = false + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go new file mode 100644 index 00000000..cc354cc4 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/location_resolver.go @@ -0,0 +1,176 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package endpoints + +import ( + "encoding/json" + "sync" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" +) + +const ( + // EndpointCacheExpireTime ... + EndpointCacheExpireTime = 3600 //Seconds +) + +// Cache caches endpoint for specific product and region +type Cache struct { + sync.RWMutex + cache map[string]interface{} +} + +// Get ... +func (c *Cache) Get(k string) (v interface{}) { + c.RLock() + v = c.cache[k] + c.RUnlock() + return +} + +// Set ... +func (c *Cache) Set(k string, v interface{}) { + c.Lock() + c.cache[k] = v + c.Unlock() +} + +var lastClearTimePerProduct = &Cache{cache: make(map[string]interface{})} +var endpointCache = &Cache{cache: make(map[string]interface{})} + +// LocationResolver ... +type LocationResolver struct { +} + +func (resolver *LocationResolver) GetName() (name string) { + name = "location resolver" + return +} + +// TryResolve resolves endpoint giving product and region +func (resolver *LocationResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + if len(param.LocationProduct) <= 0 { + support = false + return + } + + //get from cache + cacheKey := param.Product + "#" + param.RegionId + var ok bool + endpoint, ok = endpointCache.Get(cacheKey).(string) + + if ok && len(endpoint) > 0 && !CheckCacheIsExpire(cacheKey) { + support = true + return + } + + //get from remote + getEndpointRequest := requests.NewCommonRequest() + + getEndpointRequest.Product = "Location" + getEndpointRequest.Version = "2015-06-12" + getEndpointRequest.ApiName = "DescribeEndpoints" + getEndpointRequest.Domain = "location-readonly.aliyuncs.com" + getEndpointRequest.Method = "GET" + getEndpointRequest.Scheme = requests.HTTPS + + getEndpointRequest.QueryParams["Id"] = param.RegionId + getEndpointRequest.QueryParams["ServiceCode"] = param.LocationProduct + if len(param.LocationEndpointType) > 0 { + getEndpointRequest.QueryParams["Type"] = param.LocationEndpointType + } else { + getEndpointRequest.QueryParams["Type"] = "openAPI" + } + + response, err := param.CommonApi(getEndpointRequest) + if err != nil { + support = false + return + } + + if !response.IsSuccess() { + support = false + return + } + + var getEndpointResponse GetEndpointResponse + err = json.Unmarshal([]byte(response.GetHttpContentString()), &getEndpointResponse) + if err != nil { + support = false + return + } + + if !getEndpointResponse.Success || getEndpointResponse.Endpoints == nil { + support = false + return + } + if len(getEndpointResponse.Endpoints.Endpoint) <= 0 { + support = false + return + } + if len(getEndpointResponse.Endpoints.Endpoint[0].Endpoint) > 0 { + endpoint = getEndpointResponse.Endpoints.Endpoint[0].Endpoint + endpointCache.Set(cacheKey, endpoint) + lastClearTimePerProduct.Set(cacheKey, time.Now().Unix()) + support = true + return + } + + support = false + return +} + +// CheckCacheIsExpire ... +func CheckCacheIsExpire(cacheKey string) bool { + lastClearTime, ok := lastClearTimePerProduct.Get(cacheKey).(int64) + if !ok { + return true + } + + if lastClearTime <= 0 { + lastClearTime = time.Now().Unix() + lastClearTimePerProduct.Set(cacheKey, lastClearTime) + } + + now := time.Now().Unix() + elapsedTime := now - lastClearTime + if elapsedTime > EndpointCacheExpireTime { + return true + } + + return false +} + +// GetEndpointResponse ... +type GetEndpointResponse struct { + Endpoints *EndpointsObj + RequestId string + Success bool +} + +// EndpointsObj ... +type EndpointsObj struct { + Endpoint []EndpointObj +} + +// EndpointObj ... +type EndpointObj struct { + // Protocols map[string]string + Type string + Namespace string + Id string + SerivceCode string + Endpoint string +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go new file mode 100644 index 00000000..e39f5336 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/mapping_resolver.go @@ -0,0 +1,48 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +import ( + "fmt" + "strings" +) + +const keyFormatter = "%s::%s" + +var endpointMapping = make(map[string]string) + +// AddEndpointMapping Use product id and region id as key to store the endpoint into inner map +func AddEndpointMapping(regionId, productId, endpoint string) (err error) { + key := fmt.Sprintf(keyFormatter, strings.ToLower(regionId), strings.ToLower(productId)) + endpointMapping[key] = endpoint + return nil +} + +// MappingResolver the mapping resolver type +type MappingResolver struct { +} + +// GetName get the resolver name: "mapping resolver" +func (resolver *MappingResolver) GetName() (name string) { + name = "mapping resolver" + return +} + +// TryResolve use Product and RegionId as key to find endpoint from inner map +func (resolver *MappingResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + key := fmt.Sprintf(keyFormatter, strings.ToLower(param.RegionId), strings.ToLower(param.Product)) + endpoint, contains := endpointMapping[key] + return endpoint, contains, nil +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go new file mode 100644 index 00000000..5e1e3053 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/resolver.go @@ -0,0 +1,98 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +var debug utils.Debug + +func init() { + debug = utils.Init("sdk") +} + +const ( + ResolveEndpointUserGuideLink = "" +) + +var once sync.Once +var resolvers []Resolver + +type Resolver interface { + TryResolve(param *ResolveParam) (endpoint string, support bool, err error) + GetName() (name string) +} + +// Resolve resolve endpoint with params +// It will resolve with each supported resolver until anyone resolved +func Resolve(param *ResolveParam) (endpoint string, err error) { + supportedResolvers := getAllResolvers() + var lastErr error + for _, resolver := range supportedResolvers { + endpoint, supported, resolveErr := resolver.TryResolve(param) + if resolveErr != nil { + lastErr = resolveErr + } + + if supported { + debug("resolve endpoint with %s\n", param) + debug("\t%s by resolver(%s)\n", endpoint, resolver.GetName()) + return endpoint, nil + } + } + + // not support + errorMsg := fmt.Sprintf(errors.CanNotResolveEndpointErrorMessage, param, ResolveEndpointUserGuideLink) + err = errors.NewClientError(errors.CanNotResolveEndpointErrorCode, errorMsg, lastErr) + return +} + +func getAllResolvers() []Resolver { + once.Do(func() { + resolvers = []Resolver{ + &SimpleHostResolver{}, + &MappingResolver{}, + &LocationResolver{}, + &LocalRegionalResolver{}, + &LocalGlobalResolver{}, + } + }) + return resolvers +} + +type ResolveParam struct { + Domain string + Product string + RegionId string + LocationProduct string + LocationEndpointType string + CommonApi func(request *requests.CommonRequest) (response *responses.CommonResponse, err error) `json:"-"` +} + +func (param *ResolveParam) String() string { + jsonBytes, err := json.Marshal(param) + if err != nil { + return fmt.Sprint("ResolveParam.String() process error:", err) + } + return string(jsonBytes) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go new file mode 100644 index 00000000..9ba2346c --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/endpoints/simple_host_resolver.go @@ -0,0 +1,33 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package endpoints + +// SimpleHostResolver the simple host resolver type +type SimpleHostResolver struct { +} + +// GetName get the resolver name: "simple host resolver" +func (resolver *SimpleHostResolver) GetName() (name string) { + name = "simple host resolver" + return +} + +// TryResolve if the Domain exist in param, use it as endpoint +func (resolver *SimpleHostResolver) TryResolve(param *ResolveParam) (endpoint string, support bool, err error) { + if support = len(param.Domain) > 0; support { + endpoint = param.Domain + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go new file mode 100644 index 00000000..1e2d9c00 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/client_error.go @@ -0,0 +1,92 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package errors + +import "fmt" + +const ( + DefaultClientErrorStatus = 400 + DefaultClientErrorCode = "SDK.ClientError" + + UnsupportedCredentialErrorCode = "SDK.UnsupportedCredential" + UnsupportedCredentialErrorMessage = "Specified credential (type = %s) is not supported, please check" + + CanNotResolveEndpointErrorCode = "SDK.CanNotResolveEndpoint" + CanNotResolveEndpointErrorMessage = "Can not resolve endpoint(param = %s), please check your accessKey with secret, and read the user guide\n %s" + + UnsupportedParamPositionErrorCode = "SDK.UnsupportedParamPosition" + UnsupportedParamPositionErrorMessage = "Specified param position (%s) is not supported, please upgrade sdk and retry" + + AsyncFunctionNotEnabledCode = "SDK.AsyncFunctionNotEnabled" + AsyncFunctionNotEnabledMessage = "Async function is not enabled in client, please invoke 'client.EnableAsync' function" + + UnknownRequestTypeErrorCode = "SDK.UnknownRequestType" + UnknownRequestTypeErrorMessage = "Unknown Request Type: %s" + + MissingParamErrorCode = "SDK.MissingParam" + InvalidParamErrorCode = "SDK.InvalidParam" + + JsonUnmarshalErrorCode = "SDK.JsonUnmarshalError" + JsonUnmarshalErrorMessage = "Failed to unmarshal response, but you can get the data via response.GetHttpStatusCode() and response.GetHttpContentString()" + + TimeoutErrorCode = "SDK.TimeoutError" + TimeoutErrorMessage = "The request timed out %s times(%s for retry), perhaps we should have the threshold raised a little?" +) + +type ClientError struct { + errorCode string + message string + originError error +} + +func NewClientError(errorCode, message string, originErr error) Error { + return &ClientError{ + errorCode: errorCode, + message: message, + originError: originErr, + } +} + +func (err *ClientError) Error() string { + clientErrMsg := fmt.Sprintf("[%s] %s", err.ErrorCode(), err.message) + if err.originError != nil { + return clientErrMsg + "\ncaused by:\n" + err.originError.Error() + } + return clientErrMsg +} + +func (err *ClientError) OriginError() error { + return err.originError +} + +func (*ClientError) HttpStatus() int { + return DefaultClientErrorStatus +} + +func (err *ClientError) ErrorCode() string { + if err.errorCode == "" { + return DefaultClientErrorCode + } else { + return err.errorCode + } +} + +func (err *ClientError) Message() string { + return err.message +} + +func (err *ClientError) String() string { + return err.Error() +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go new file mode 100644 index 00000000..49962f3b --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/error.go @@ -0,0 +1,23 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package errors + +type Error interface { + error + HttpStatus() int + ErrorCode() string + Message() string + OriginError() error +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go new file mode 100644 index 00000000..1b781041 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/server_error.go @@ -0,0 +1,123 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package errors + +import ( + "encoding/json" + "fmt" + + "github.com/jmespath/go-jmespath" +) + +var wrapperList = []ServerErrorWrapper{ + &SignatureDostNotMatchWrapper{}, +} + +type ServerError struct { + httpStatus int + requestId string + hostId string + errorCode string + recommend string + message string + comment string +} + +type ServerErrorWrapper interface { + tryWrap(error *ServerError, wrapInfo map[string]string) bool +} + +func (err *ServerError) Error() string { + return fmt.Sprintf("SDK.ServerError\nErrorCode: %s\nRecommend: %s\nRequestId: %s\nMessage: %s", + err.errorCode, err.comment+err.recommend, err.requestId, err.message) +} + +func NewServerError(httpStatus int, responseContent, comment string) Error { + result := &ServerError{ + httpStatus: httpStatus, + message: responseContent, + comment: comment, + } + + var data interface{} + err := json.Unmarshal([]byte(responseContent), &data) + if err == nil { + requestId, _ := jmespath.Search("RequestId", data) + hostId, _ := jmespath.Search("HostId", data) + errorCode, _ := jmespath.Search("Code", data) + recommend, _ := jmespath.Search("Recommend", data) + message, _ := jmespath.Search("Message", data) + + if requestId != nil { + result.requestId = requestId.(string) + } + if hostId != nil { + result.hostId = hostId.(string) + } + if errorCode != nil { + result.errorCode = errorCode.(string) + } + if recommend != nil { + result.recommend = recommend.(string) + } + if message != nil { + result.message = message.(string) + } + } + + return result +} + +func WrapServerError(originError *ServerError, wrapInfo map[string]string) *ServerError { + for _, wrapper := range wrapperList { + ok := wrapper.tryWrap(originError, wrapInfo) + if ok { + return originError + } + } + return originError +} + +func (err *ServerError) HttpStatus() int { + return err.httpStatus +} + +func (err *ServerError) ErrorCode() string { + return err.errorCode +} + +func (err *ServerError) Message() string { + return err.message +} + +func (err *ServerError) OriginError() error { + return nil +} + +func (err *ServerError) HostId() string { + return err.hostId +} + +func (err *ServerError) RequestId() string { + return err.requestId +} + +func (err *ServerError) Recommend() string { + return err.recommend +} + +func (err *ServerError) Comment() string { + return err.comment +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go new file mode 100644 index 00000000..4b09d7d7 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors/signature_does_not_match_wrapper.go @@ -0,0 +1,45 @@ +package errors + +import ( + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +const SignatureDostNotMatchErrorCode = "SignatureDoesNotMatch" +const IncompleteSignatureErrorCode = "IncompleteSignature" +const MessageContain = "server string to sign is:" + +var debug utils.Debug + +func init() { + debug = utils.Init("sdk") +} + +type SignatureDostNotMatchWrapper struct { +} + +func (*SignatureDostNotMatchWrapper) tryWrap(error *ServerError, wrapInfo map[string]string) (ok bool) { + clientStringToSign := wrapInfo["StringToSign"] + if (error.errorCode == SignatureDostNotMatchErrorCode || error.errorCode == IncompleteSignatureErrorCode) && clientStringToSign != "" { + message := error.message + if strings.Contains(message, MessageContain) { + str := strings.Split(message, MessageContain) + serverStringToSign := str[1] + + if clientStringToSign == serverStringToSign { + // user secret is error + error.recommend = "InvalidAccessKeySecret: Please check you AccessKeySecret" + } else { + debug("Client StringToSign: %s", clientStringToSign) + debug("Server StringToSign: %s", serverStringToSign) + error.recommend = "This may be a bug with the SDK and we hope you can submit this question in the " + + "github issue(https://github.com/aliyun/alibaba-cloud-sdk-go/issues), thanks very much" + } + } + ok = true + return + } + ok = false + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/logger.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/logger.go new file mode 100644 index 00000000..a01a7bbc --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/logger.go @@ -0,0 +1,116 @@ +package sdk + +import ( + "encoding/json" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" + "io" + "log" + "os" + "strings" + "time" +) + +var logChannel string +var defaultChannel = "AlibabaCloud" + +type Logger struct { + *log.Logger + formatTemplate string + isOpen bool + lastLogMsg string +} + +var defaultLoggerTemplate = `{time} {channel}: "{method} {uri} HTTP/{version}" {code} {cost} {hostname}` +var loggerParam = []string{"{time}", "{start_time}", "{ts}", "{channel}", "{pid}", "{host}", "{method}", "{uri}", "{version}", "{target}", "{hostname}", "{code}", "{error}", "{req_headers}", "{res_body}", "{res_headers}", "{cost}"} + +func initLogMsg(fieldMap map[string]string) { + for _, value := range loggerParam { + fieldMap[value] = "" + } +} + +func (client *Client) GetLogger() *Logger { + return client.logger +} + +func (client *Client) GetLoggerMsg() string { + if client.logger == nil { + client.SetLogger("", "", os.Stdout, "") + } + return client.logger.lastLogMsg +} + +func (client *Client) SetLogger(level string, channel string, out io.Writer, template string) { + if level == "" { + level = "info" + } + + logChannel = "AlibabaCloud" + if channel != "" { + logChannel = channel + } + log := log.New(out, "["+strings.ToUpper(level)+"]", log.Lshortfile) + if template == "" { + template = defaultLoggerTemplate + } + + client.logger = &Logger{ + Logger: log, + formatTemplate: template, + isOpen: true, + } +} + +func (client *Client) OpenLogger() { + if client.logger == nil { + client.SetLogger("", "", os.Stdout, "") + } + client.logger.isOpen = true +} + +func (client *Client) CloseLogger() { + if client.logger != nil { + client.logger.isOpen = false + } +} + +func (client *Client) SetTemplate(template string) { + if client.logger == nil { + client.SetLogger("", "", os.Stdout, "") + } + client.logger.formatTemplate = template +} + +func (client *Client) GetTemplate() string { + if client.logger == nil { + client.SetLogger("", "", os.Stdout, "") + } + return client.logger.formatTemplate +} + +func TransToString(object interface{}) string { + byt, err := json.Marshal(object) + if err != nil { + return "" + } + return string(byt) +} + +func (client *Client) printLog(fieldMap map[string]string, err error) { + if err != nil { + fieldMap["{error}"] = err.Error() + } + fieldMap["{time}"] = time.Now().Format("2006-01-02 15:04:05") + fieldMap["{ts}"] = utils.GetTimeInFormatISO8601() + fieldMap["{channel}"] = logChannel + if client.logger != nil { + logMsg := client.logger.formatTemplate + for key, value := range fieldMap { + logMsg = strings.Replace(logMsg, key, value, -1) + } + client.logger.lastLogMsg = logMsg + if client.logger.isOpen == true { + client.logger.Output(2, logMsg) + } + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go new file mode 100644 index 00000000..fa22db1f --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/acs_request.go @@ -0,0 +1,378 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package requests + +import ( + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" +) + +const ( + RPC = "RPC" + ROA = "ROA" + + HTTP = "HTTP" + HTTPS = "HTTPS" + + DefaultHttpPort = "80" + + GET = "GET" + PUT = "PUT" + POST = "POST" + DELETE = "DELETE" + HEAD = "HEAD" + OPTIONS = "OPTIONS" + + Json = "application/json" + Xml = "application/xml" + Raw = "application/octet-stream" + Form = "application/x-www-form-urlencoded" + + Header = "Header" + Query = "Query" + Body = "Body" + Path = "Path" + + HeaderSeparator = "\n" +) + +// interface +type AcsRequest interface { + GetScheme() string + GetMethod() string + GetDomain() string + GetPort() string + GetRegionId() string + GetHeaders() map[string]string + GetQueryParams() map[string]string + GetFormParams() map[string]string + GetContent() []byte + GetBodyReader() io.Reader + GetStyle() string + GetProduct() string + GetVersion() string + SetVersion(version string) + GetActionName() string + GetAcceptFormat() string + GetLocationServiceCode() string + GetLocationEndpointType() string + GetReadTimeout() time.Duration + GetConnectTimeout() time.Duration + SetReadTimeout(readTimeout time.Duration) + SetConnectTimeout(connectTimeout time.Duration) + SetHTTPSInsecure(isInsecure bool) + GetHTTPSInsecure() *bool + + GetUserAgent() map[string]string + + SetStringToSign(stringToSign string) + GetStringToSign() string + + SetDomain(domain string) + SetContent(content []byte) + SetScheme(scheme string) + BuildUrl() string + BuildQueries() string + + addHeaderParam(key, value string) + addQueryParam(key, value string) + addFormParam(key, value string) + addPathParam(key, value string) +} + +// base class +type baseRequest struct { + Scheme string + Method string + Domain string + Port string + RegionId string + ReadTimeout time.Duration + ConnectTimeout time.Duration + isInsecure *bool + + userAgent map[string]string + product string + version string + + actionName string + + AcceptFormat string + + QueryParams map[string]string + Headers map[string]string + FormParams map[string]string + Content []byte + + locationServiceCode string + locationEndpointType string + + queries string + + stringToSign string +} + +func (request *baseRequest) GetQueryParams() map[string]string { + return request.QueryParams +} + +func (request *baseRequest) GetFormParams() map[string]string { + return request.FormParams +} + +func (request *baseRequest) GetReadTimeout() time.Duration { + return request.ReadTimeout +} + +func (request *baseRequest) GetConnectTimeout() time.Duration { + return request.ConnectTimeout +} + +func (request *baseRequest) SetReadTimeout(readTimeout time.Duration) { + request.ReadTimeout = readTimeout +} + +func (request *baseRequest) SetConnectTimeout(connectTimeout time.Duration) { + request.ConnectTimeout = connectTimeout +} + +func (request *baseRequest) GetHTTPSInsecure() *bool { + return request.isInsecure +} + +func (request *baseRequest) SetHTTPSInsecure(isInsecure bool) { + request.isInsecure = &isInsecure +} + +func (request *baseRequest) GetContent() []byte { + return request.Content +} + +func (request *baseRequest) SetVersion(version string) { + request.version = version +} + +func (request *baseRequest) GetVersion() string { + return request.version +} + +func (request *baseRequest) GetActionName() string { + return request.actionName +} + +func (request *baseRequest) SetContent(content []byte) { + request.Content = content +} + +func (request *baseRequest) GetUserAgent() map[string]string { + return request.userAgent +} + +func (request *baseRequest) AppendUserAgent(key, value string) { + newkey := true + if request.userAgent == nil { + request.userAgent = make(map[string]string) + } + if strings.ToLower(key) != "core" && strings.ToLower(key) != "go" { + for tag, _ := range request.userAgent { + if tag == key { + request.userAgent[tag] = value + newkey = false + } + } + if newkey { + request.userAgent[key] = value + } + } +} + +func (request *baseRequest) addHeaderParam(key, value string) { + request.Headers[key] = value +} + +func (request *baseRequest) addQueryParam(key, value string) { + request.QueryParams[key] = value +} + +func (request *baseRequest) addFormParam(key, value string) { + request.FormParams[key] = value +} + +func (request *baseRequest) GetAcceptFormat() string { + return request.AcceptFormat +} + +func (request *baseRequest) GetLocationServiceCode() string { + return request.locationServiceCode +} + +func (request *baseRequest) GetLocationEndpointType() string { + return request.locationEndpointType +} + +func (request *baseRequest) GetProduct() string { + return request.product +} + +func (request *baseRequest) GetScheme() string { + return request.Scheme +} + +func (request *baseRequest) SetScheme(scheme string) { + request.Scheme = scheme +} + +func (request *baseRequest) GetMethod() string { + return request.Method +} + +func (request *baseRequest) GetDomain() string { + return request.Domain +} + +func (request *baseRequest) SetDomain(host string) { + request.Domain = host +} + +func (request *baseRequest) GetPort() string { + return request.Port +} + +func (request *baseRequest) GetRegionId() string { + return request.RegionId +} + +func (request *baseRequest) GetHeaders() map[string]string { + return request.Headers +} + +func (request *baseRequest) SetContentType(contentType string) { + request.addHeaderParam("Content-Type", contentType) +} + +func (request *baseRequest) GetContentType() (contentType string, contains bool) { + contentType, contains = request.Headers["Content-Type"] + return +} + +func (request *baseRequest) SetStringToSign(stringToSign string) { + request.stringToSign = stringToSign +} + +func (request *baseRequest) GetStringToSign() string { + return request.stringToSign +} + +func defaultBaseRequest() (request *baseRequest) { + request = &baseRequest{ + Scheme: "", + AcceptFormat: "JSON", + Method: GET, + QueryParams: make(map[string]string), + Headers: map[string]string{ + "x-sdk-client": "golang/1.0.0", + "x-sdk-invoke-type": "normal", + "Accept-Encoding": "identity", + }, + FormParams: make(map[string]string), + } + return +} + +func InitParams(request AcsRequest) (err error) { + requestValue := reflect.ValueOf(request).Elem() + err = flatRepeatedList(requestValue, request, "", "") + return +} + +func flatRepeatedList(dataValue reflect.Value, request AcsRequest, position, prefix string) (err error) { + dataType := dataValue.Type() + for i := 0; i < dataType.NumField(); i++ { + field := dataType.Field(i) + name, containsNameTag := field.Tag.Lookup("name") + fieldPosition := position + if fieldPosition == "" { + fieldPosition, _ = field.Tag.Lookup("position") + } + typeTag, containsTypeTag := field.Tag.Lookup("type") + if containsNameTag { + if !containsTypeTag { + // simple param + key := prefix + name + value := dataValue.Field(i).String() + if dataValue.Field(i).Kind().String() == "map" { + byt, _ := json.Marshal(dataValue.Field(i).Interface()) + value = string(byt) + } + err = addParam(request, fieldPosition, key, value) + if err != nil { + return + } + } else if typeTag == "Repeated" { + // repeated param + repeatedFieldValue := dataValue.Field(i) + if repeatedFieldValue.Kind() != reflect.Slice { + // possible value: {"[]string", "*[]struct"}, we must call Elem() in the last condition + repeatedFieldValue = repeatedFieldValue.Elem() + } + if repeatedFieldValue.IsValid() && !repeatedFieldValue.IsNil() { + for m := 0; m < repeatedFieldValue.Len(); m++ { + elementValue := repeatedFieldValue.Index(m) + key := prefix + name + "." + strconv.Itoa(m+1) + if elementValue.Type().Kind().String() == "string" { + value := elementValue.String() + err = addParam(request, fieldPosition, key, value) + if err != nil { + return + } + } else { + err = flatRepeatedList(elementValue, request, fieldPosition, key+".") + if err != nil { + return + } + } + } + } + } + } + } + return +} + +func addParam(request AcsRequest, position, name, value string) (err error) { + if len(value) > 0 { + switch position { + case Header: + request.addHeaderParam(name, value) + case Query: + request.addQueryParam(name, value) + case Path: + request.addPathParam(name, value) + case Body: + request.addFormParam(name, value) + default: + errMsg := fmt.Sprintf(errors.UnsupportedParamPositionErrorMessage, position) + err = errors.NewClientError(errors.UnsupportedParamPositionErrorCode, errMsg, nil) + } + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go new file mode 100644 index 00000000..80c17009 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/common_request.go @@ -0,0 +1,108 @@ +package requests + +import ( + "bytes" + "fmt" + "io" + "sort" + "strings" +) + +type CommonRequest struct { + *baseRequest + + Version string + ApiName string + Product string + ServiceCode string + + // roa params + PathPattern string + PathParams map[string]string + + Ontology AcsRequest +} + +func NewCommonRequest() (request *CommonRequest) { + request = &CommonRequest{ + baseRequest: defaultBaseRequest(), + } + request.Headers["x-sdk-invoke-type"] = "common" + request.PathParams = make(map[string]string) + return +} + +func (request *CommonRequest) String() string { + request.TransToAcsRequest() + + resultBuilder := bytes.Buffer{} + + mapOutput := func(m map[string]string) { + if len(m) > 0 { + sortedKeys := make([]string, 0) + for k := range m { + sortedKeys = append(sortedKeys, k) + } + + // sort 'string' key in increasing order + sort.Strings(sortedKeys) + + for _, key := range sortedKeys { + resultBuilder.WriteString(key + ": " + m[key] + "\n") + } + } + } + + // Request Line + resultBuilder.WriteString(fmt.Sprintf("%s %s %s/1.1\n", request.Method, request.BuildQueries(), strings.ToUpper(request.Scheme))) + + // Headers + resultBuilder.WriteString("Host" + ": " + request.Domain + "\n") + mapOutput(request.Headers) + + resultBuilder.WriteString("\n") + // Body + if len(request.Content) > 0 { + resultBuilder.WriteString(string(request.Content) + "\n") + } else { + mapOutput(request.FormParams) + } + + return resultBuilder.String() +} + +func (request *CommonRequest) TransToAcsRequest() { + if len(request.PathPattern) > 0 { + roaRequest := &RoaRequest{} + roaRequest.initWithCommonRequest(request) + request.Ontology = roaRequest + } else { + rpcRequest := &RpcRequest{} + rpcRequest.baseRequest = request.baseRequest + rpcRequest.product = request.Product + rpcRequest.version = request.Version + rpcRequest.locationServiceCode = request.ServiceCode + rpcRequest.actionName = request.ApiName + request.Ontology = rpcRequest + } +} + +func (request *CommonRequest) BuildUrl() string { + return request.Ontology.BuildUrl() +} + +func (request *CommonRequest) BuildQueries() string { + return request.Ontology.BuildQueries() +} + +func (request *CommonRequest) GetBodyReader() io.Reader { + return request.Ontology.GetBodyReader() +} + +func (request *CommonRequest) GetStyle() string { + return request.Ontology.GetStyle() +} + +func (request *CommonRequest) addPathParam(key, value string) { + request.PathParams[key] = value +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go new file mode 100644 index 00000000..70b856e3 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/roa_request.go @@ -0,0 +1,152 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package requests + +import ( + "bytes" + "fmt" + "io" + "net/url" + "sort" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +type RoaRequest struct { + *baseRequest + pathPattern string + PathParams map[string]string +} + +func (*RoaRequest) GetStyle() string { + return ROA +} + +func (request *RoaRequest) GetBodyReader() io.Reader { + if request.FormParams != nil && len(request.FormParams) > 0 { + formString := utils.GetUrlFormedMap(request.FormParams) + return strings.NewReader(formString) + } else if len(request.Content) > 0 { + return bytes.NewReader(request.Content) + } else { + return nil + } +} + +// for sign method, need not url encoded +func (request *RoaRequest) BuildQueries() string { + return request.buildQueries() +} + +func (request *RoaRequest) buildPath() string { + path := request.pathPattern + for key, value := range request.PathParams { + path = strings.Replace(path, "["+key+"]", value, 1) + } + return path +} + +func (request *RoaRequest) buildQueries() string { + // replace path params with value + path := request.buildPath() + queryParams := request.QueryParams + // sort QueryParams by key + var queryKeys []string + for key := range queryParams { + queryKeys = append(queryKeys, key) + } + sort.Strings(queryKeys) + + // append urlBuilder + urlBuilder := bytes.Buffer{} + urlBuilder.WriteString(path) + if len(queryKeys) > 0 { + urlBuilder.WriteString("?") + } + for i := 0; i < len(queryKeys); i++ { + queryKey := queryKeys[i] + urlBuilder.WriteString(queryKey) + if value := queryParams[queryKey]; len(value) > 0 { + urlBuilder.WriteString("=") + urlBuilder.WriteString(value) + } + if i < len(queryKeys)-1 { + urlBuilder.WriteString("&") + } + } + result := urlBuilder.String() + result = popStandardUrlencode(result) + return result +} + +func (request *RoaRequest) buildQueryString() string { + queryParams := request.QueryParams + // sort QueryParams by key + q := url.Values{} + for key, value := range queryParams { + q.Add(key, value) + } + return q.Encode() +} + +func popStandardUrlencode(stringToSign string) (result string) { + result = strings.Replace(stringToSign, "+", "%20", -1) + result = strings.Replace(result, "*", "%2A", -1) + result = strings.Replace(result, "%7E", "~", -1) + return +} + +func (request *RoaRequest) BuildUrl() string { + // for network trans, need url encoded + scheme := strings.ToLower(request.Scheme) + domain := request.Domain + port := request.Port + path := request.buildPath() + url := fmt.Sprintf("%s://%s:%s%s", scheme, domain, port, path) + querystring := request.buildQueryString() + if len(querystring) > 0 { + url = fmt.Sprintf("%s?%s", url, querystring) + } + return url +} + +func (request *RoaRequest) addPathParam(key, value string) { + request.PathParams[key] = value +} + +func (request *RoaRequest) InitWithApiInfo(product, version, action, uriPattern, serviceCode, endpointType string) { + request.baseRequest = defaultBaseRequest() + request.PathParams = make(map[string]string) + request.Headers["x-acs-version"] = version + request.pathPattern = uriPattern + request.locationServiceCode = serviceCode + request.locationEndpointType = endpointType + request.product = product + //request.version = version + request.actionName = action +} + +func (request *RoaRequest) initWithCommonRequest(commonRequest *CommonRequest) { + request.baseRequest = commonRequest.baseRequest + request.PathParams = commonRequest.PathParams + request.product = commonRequest.Product + //request.version = commonRequest.Version + request.Headers["x-acs-version"] = commonRequest.Version + request.actionName = commonRequest.ApiName + request.pathPattern = commonRequest.PathPattern + request.locationServiceCode = commonRequest.ServiceCode + request.locationEndpointType = "" +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go new file mode 100644 index 00000000..01be6fd0 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/rpc_request.go @@ -0,0 +1,79 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package requests + +import ( + "fmt" + "io" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils" +) + +type RpcRequest struct { + *baseRequest +} + +func (request *RpcRequest) init() { + request.baseRequest = defaultBaseRequest() + request.Method = POST +} + +func (*RpcRequest) GetStyle() string { + return RPC +} + +func (request *RpcRequest) GetBodyReader() io.Reader { + if request.FormParams != nil && len(request.FormParams) > 0 { + formString := utils.GetUrlFormedMap(request.FormParams) + return strings.NewReader(formString) + } else { + return strings.NewReader("") + } +} + +func (request *RpcRequest) BuildQueries() string { + request.queries = "/?" + utils.GetUrlFormedMap(request.QueryParams) + return request.queries +} + +func (request *RpcRequest) BuildUrl() string { + url := fmt.Sprintf("%s://%s", strings.ToLower(request.Scheme), request.Domain) + if len(request.Port) > 0 { + url = fmt.Sprintf("%s:%s", url, request.Port) + } + return url + request.BuildQueries() +} + +func (request *RpcRequest) GetVersion() string { + return request.version +} + +func (request *RpcRequest) GetActionName() string { + return request.actionName +} + +func (request *RpcRequest) addPathParam(key, value string) { + panic("not support") +} + +func (request *RpcRequest) InitWithApiInfo(product, version, action, serviceCode, endpointType string) { + request.init() + request.product = product + request.version = version + request.actionName = action + request.locationServiceCode = serviceCode + request.locationEndpointType = endpointType +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go new file mode 100644 index 00000000..28af63ea --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests/types.go @@ -0,0 +1,53 @@ +package requests + +import "strconv" + +type Integer string + +func NewInteger(integer int) Integer { + return Integer(strconv.Itoa(integer)) +} + +func (integer Integer) HasValue() bool { + return integer != "" +} + +func (integer Integer) GetValue() (int, error) { + return strconv.Atoi(string(integer)) +} + +func NewInteger64(integer int64) Integer { + return Integer(strconv.FormatInt(integer, 10)) +} + +func (integer Integer) GetValue64() (int64, error) { + return strconv.ParseInt(string(integer), 10, 0) +} + +type Boolean string + +func NewBoolean(bool bool) Boolean { + return Boolean(strconv.FormatBool(bool)) +} + +func (boolean Boolean) HasValue() bool { + return boolean != "" +} + +func (boolean Boolean) GetValue() (bool, error) { + return strconv.ParseBool(string(boolean)) +} + +type Float string + +func NewFloat(f float64) Float { + return Float(strconv.FormatFloat(f, 'f', 6, 64)) +} + +func (float Float) HasValue() bool { + return float != "" +} + +func (float Float) GetValue() (float64, error) { + return strconv.ParseFloat(string(float), 64) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go new file mode 100644 index 00000000..36604fe8 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/json_parser.go @@ -0,0 +1,328 @@ +package responses + +import ( + "encoding/json" + "io" + "math" + "strconv" + "strings" + "unsafe" + + jsoniter "github.com/json-iterator/go" +) + +const maxUint = ^uint(0) +const maxInt = int(maxUint >> 1) +const minInt = -maxInt - 1 + +var jsonParser jsoniter.API + +func init() { + registerBetterFuzzyDecoder() + jsonParser = jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() +} + +func registerBetterFuzzyDecoder() { + jsoniter.RegisterTypeDecoder("string", &nullableFuzzyStringDecoder{}) + jsoniter.RegisterTypeDecoder("bool", &fuzzyBoolDecoder{}) + jsoniter.RegisterTypeDecoder("float32", &nullableFuzzyFloat32Decoder{}) + jsoniter.RegisterTypeDecoder("float64", &nullableFuzzyFloat64Decoder{}) + jsoniter.RegisterTypeDecoder("int", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxInt) || val < float64(minInt) { + iter.ReportError("fuzzy decode int", "exceed range") + return + } + *((*int)(ptr)) = int(val) + } else { + *((*int)(ptr)) = iter.ReadInt() + } + }}) + jsoniter.RegisterTypeDecoder("uint", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(maxUint) || val < 0 { + iter.ReportError("fuzzy decode uint", "exceed range") + return + } + *((*uint)(ptr)) = uint(val) + } else { + *((*uint)(ptr)) = iter.ReadUint() + } + }}) + jsoniter.RegisterTypeDecoder("int8", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt8) || val < float64(math.MinInt8) { + iter.ReportError("fuzzy decode int8", "exceed range") + return + } + *((*int8)(ptr)) = int8(val) + } else { + *((*int8)(ptr)) = iter.ReadInt8() + } + }}) + jsoniter.RegisterTypeDecoder("uint8", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint8) || val < 0 { + iter.ReportError("fuzzy decode uint8", "exceed range") + return + } + *((*uint8)(ptr)) = uint8(val) + } else { + *((*uint8)(ptr)) = iter.ReadUint8() + } + }}) + jsoniter.RegisterTypeDecoder("int16", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt16) || val < float64(math.MinInt16) { + iter.ReportError("fuzzy decode int16", "exceed range") + return + } + *((*int16)(ptr)) = int16(val) + } else { + *((*int16)(ptr)) = iter.ReadInt16() + } + }}) + jsoniter.RegisterTypeDecoder("uint16", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint16) || val < 0 { + iter.ReportError("fuzzy decode uint16", "exceed range") + return + } + *((*uint16)(ptr)) = uint16(val) + } else { + *((*uint16)(ptr)) = iter.ReadUint16() + } + }}) + jsoniter.RegisterTypeDecoder("int32", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt32) || val < float64(math.MinInt32) { + iter.ReportError("fuzzy decode int32", "exceed range") + return + } + *((*int32)(ptr)) = int32(val) + } else { + *((*int32)(ptr)) = iter.ReadInt32() + } + }}) + jsoniter.RegisterTypeDecoder("uint32", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint32) || val < 0 { + iter.ReportError("fuzzy decode uint32", "exceed range") + return + } + *((*uint32)(ptr)) = uint32(val) + } else { + *((*uint32)(ptr)) = iter.ReadUint32() + } + }}) + jsoniter.RegisterTypeDecoder("int64", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxInt64) || val < float64(math.MinInt64) { + iter.ReportError("fuzzy decode int64", "exceed range") + return + } + *((*int64)(ptr)) = int64(val) + } else { + *((*int64)(ptr)) = iter.ReadInt64() + } + }}) + jsoniter.RegisterTypeDecoder("uint64", &nullableFuzzyIntegerDecoder{func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) { + if isFloat { + val := iter.ReadFloat64() + if val > float64(math.MaxUint64) || val < 0 { + iter.ReportError("fuzzy decode uint64", "exceed range") + return + } + *((*uint64)(ptr)) = uint64(val) + } else { + *((*uint64)(ptr)) = iter.ReadUint64() + } + }}) +} + +type nullableFuzzyStringDecoder struct { +} + +func (decoder *nullableFuzzyStringDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + *((*string)(ptr)) = string(number) + case jsoniter.StringValue: + *((*string)(ptr)) = iter.ReadString() + case jsoniter.BoolValue: + *((*string)(ptr)) = strconv.FormatBool(iter.ReadBool()) + case jsoniter.NilValue: + iter.ReadNil() + *((*string)(ptr)) = "" + default: + iter.ReportError("fuzzyStringDecoder", "not number or string or bool") + } +} + +type fuzzyBoolDecoder struct { +} + +func (decoder *fuzzyBoolDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + switch valueType { + case jsoniter.BoolValue: + *((*bool)(ptr)) = iter.ReadBool() + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + num, err := number.Int64() + if err != nil { + iter.ReportError("fuzzyBoolDecoder", "get value from json.number failed") + } + if num == 0 { + *((*bool)(ptr)) = false + } else { + *((*bool)(ptr)) = true + } + case jsoniter.StringValue: + strValue := strings.ToLower(iter.ReadString()) + if strValue == "true" { + *((*bool)(ptr)) = true + } else if strValue == "false" || strValue == "" { + *((*bool)(ptr)) = false + } else { + iter.ReportError("fuzzyBoolDecoder", "unsupported bool value: "+strValue) + } + case jsoniter.NilValue: + iter.ReadNil() + *((*bool)(ptr)) = false + default: + iter.ReportError("fuzzyBoolDecoder", "not number or string or nil") + } +} + +type nullableFuzzyIntegerDecoder struct { + fun func(isFloat bool, ptr unsafe.Pointer, iter *jsoniter.Iterator) +} + +func (decoder *nullableFuzzyIntegerDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + var number json.Number + iter.ReadVal(&number) + str = string(number) + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + str = "0" + } + case jsoniter.BoolValue: + if iter.ReadBool() { + str = "1" + } else { + str = "0" + } + case jsoniter.NilValue: + iter.ReadNil() + str = "0" + default: + iter.ReportError("fuzzyIntegerDecoder", "not number or string") + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + isFloat := strings.IndexByte(str, '.') != -1 + decoder.fun(isFloat, ptr, newIter) + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } +} + +type nullableFuzzyFloat32Decoder struct { +} + +func (decoder *nullableFuzzyFloat32Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float32)(ptr)) = iter.ReadFloat32() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float32)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float32)(ptr)) = newIter.ReadFloat32() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float32 + if iter.ReadBool() { + *((*float32)(ptr)) = 1 + } else { + *((*float32)(ptr)) = 0 + } + case jsoniter.NilValue: + iter.ReadNil() + *((*float32)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat32Decoder", "not number or string") + } +} + +type nullableFuzzyFloat64Decoder struct { +} + +func (decoder *nullableFuzzyFloat64Decoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + valueType := iter.WhatIsNext() + var str string + switch valueType { + case jsoniter.NumberValue: + *((*float64)(ptr)) = iter.ReadFloat64() + case jsoniter.StringValue: + str = iter.ReadString() + // support empty string + if str == "" { + *((*float64)(ptr)) = 0 + return + } + newIter := iter.Pool().BorrowIterator([]byte(str)) + defer iter.Pool().ReturnIterator(newIter) + *((*float64)(ptr)) = newIter.ReadFloat64() + if newIter.Error != nil && newIter.Error != io.EOF { + iter.Error = newIter.Error + } + case jsoniter.BoolValue: + // support bool to float64 + if iter.ReadBool() { + *((*float64)(ptr)) = 1 + } else { + *((*float64)(ptr)) = 0 + } + case jsoniter.NilValue: + // support empty string + iter.ReadNil() + *((*float64)(ptr)) = 0 + default: + iter.ReportError("nullableFuzzyFloat64Decoder", "not number or string") + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go new file mode 100644 index 00000000..53a156b7 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses/response.go @@ -0,0 +1,144 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package responses + +import ( + "bytes" + "encoding/xml" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" +) + +type AcsResponse interface { + IsSuccess() bool + GetHttpStatus() int + GetHttpHeaders() map[string][]string + GetHttpContentString() string + GetHttpContentBytes() []byte + GetOriginHttpResponse() *http.Response + parseFromHttpResponse(httpResponse *http.Response) error +} + +// Unmarshal object from http response body to target Response +func Unmarshal(response AcsResponse, httpResponse *http.Response, format string) (err error) { + err = response.parseFromHttpResponse(httpResponse) + if err != nil { + return + } + if !response.IsSuccess() { + err = errors.NewServerError(response.GetHttpStatus(), response.GetHttpContentString(), "") + return + } + + if _, isCommonResponse := response.(*CommonResponse); isCommonResponse { + // common response need not unmarshal + return + } + + if len(response.GetHttpContentBytes()) == 0 { + return + } + + if strings.ToUpper(format) == "JSON" { + err = jsonParser.Unmarshal(response.GetHttpContentBytes(), response) + if err != nil { + err = errors.NewClientError(errors.JsonUnmarshalErrorCode, errors.JsonUnmarshalErrorMessage, err) + } + } else if strings.ToUpper(format) == "XML" { + err = xml.Unmarshal(response.GetHttpContentBytes(), response) + } + return +} + +type BaseResponse struct { + httpStatus int + httpHeaders map[string][]string + httpContentString string + httpContentBytes []byte + originHttpResponse *http.Response +} + +func (baseResponse *BaseResponse) GetHttpStatus() int { + return baseResponse.httpStatus +} + +func (baseResponse *BaseResponse) GetHttpHeaders() map[string][]string { + return baseResponse.httpHeaders +} + +func (baseResponse *BaseResponse) GetHttpContentString() string { + return baseResponse.httpContentString +} + +func (baseResponse *BaseResponse) GetHttpContentBytes() []byte { + return baseResponse.httpContentBytes +} + +func (baseResponse *BaseResponse) GetOriginHttpResponse() *http.Response { + return baseResponse.originHttpResponse +} + +func (baseResponse *BaseResponse) IsSuccess() bool { + if baseResponse.GetHttpStatus() >= 200 && baseResponse.GetHttpStatus() < 300 { + return true + } + + return false +} + +func (baseResponse *BaseResponse) parseFromHttpResponse(httpResponse *http.Response) (err error) { + defer httpResponse.Body.Close() + body, err := ioutil.ReadAll(httpResponse.Body) + if err != nil { + return + } + baseResponse.httpStatus = httpResponse.StatusCode + baseResponse.httpHeaders = httpResponse.Header + baseResponse.httpContentBytes = body + baseResponse.httpContentString = string(body) + baseResponse.originHttpResponse = httpResponse + return +} + +func (baseResponse *BaseResponse) String() string { + resultBuilder := bytes.Buffer{} + // statusCode + // resultBuilder.WriteString("\n") + resultBuilder.WriteString(fmt.Sprintf("%s %s\n", baseResponse.originHttpResponse.Proto, baseResponse.originHttpResponse.Status)) + // httpHeaders + //resultBuilder.WriteString("Headers:\n") + for key, value := range baseResponse.httpHeaders { + resultBuilder.WriteString(key + ": " + strings.Join(value, ";") + "\n") + } + resultBuilder.WriteString("\n") + // content + //resultBuilder.WriteString("Content:\n") + resultBuilder.WriteString(baseResponse.httpContentString + "\n") + return resultBuilder.String() +} + +type CommonResponse struct { + *BaseResponse +} + +func NewCommonResponse() (response *CommonResponse) { + return &CommonResponse{ + BaseResponse: &BaseResponse{}, + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go new file mode 100644 index 00000000..09440d27 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/debug.go @@ -0,0 +1,36 @@ +package utils + +import ( + "fmt" + "os" + "strings" +) + +type Debug func(format string, v ...interface{}) + +var hookGetEnv = func() string { + return os.Getenv("DEBUG") +} + +var hookPrint = func(input string) { + fmt.Println(input) +} + +func Init(flag string) Debug { + enable := false + + env := hookGetEnv() + parts := strings.Split(env, ",") + for _, part := range parts { + if part == flag { + enable = true + break + } + } + + return func(format string, v ...interface{}) { + if enable { + hookPrint(fmt.Sprintf(format, v...)) + } + } +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go new file mode 100644 index 00000000..f8a3ad38 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/sdk/utils/utils.go @@ -0,0 +1,141 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "crypto/md5" + "crypto/rand" + "encoding/base64" + "encoding/hex" + "hash" + rand2 "math/rand" + "net/url" + "reflect" + "strconv" + "time" +) + +type UUID [16]byte + +const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + +func GetUUID() (uuidHex string) { + uuid := NewUUID() + uuidHex = hex.EncodeToString(uuid[:]) + return +} + +func RandStringBytes(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = letterBytes[rand2.Intn(len(letterBytes))] + } + return string(b) +} + +func GetMD5Base64(bytes []byte) (base64Value string) { + md5Ctx := md5.New() + md5Ctx.Write(bytes) + md5Value := md5Ctx.Sum(nil) + base64Value = base64.StdEncoding.EncodeToString(md5Value) + return +} + +func GetTimeInFormatISO8601() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("2006-01-02T15:04:05Z") +} + +func GetTimeInFormatRFC2616() (timeStr string) { + gmt := time.FixedZone("GMT", 0) + + return time.Now().In(gmt).Format("Mon, 02 Jan 2006 15:04:05 GMT") +} + +func GetUrlFormedMap(source map[string]string) (urlEncoded string) { + urlEncoder := url.Values{} + for key, value := range source { + urlEncoder.Add(key, value) + } + urlEncoded = urlEncoder.Encode() + return +} + +func InitStructWithDefaultTag(bean interface{}) { + configType := reflect.TypeOf(bean) + for i := 0; i < configType.Elem().NumField(); i++ { + field := configType.Elem().Field(i) + defaultValue := field.Tag.Get("default") + if defaultValue == "" { + continue + } + setter := reflect.ValueOf(bean).Elem().Field(i) + switch field.Type.String() { + case "int": + intValue, _ := strconv.ParseInt(defaultValue, 10, 64) + setter.SetInt(intValue) + case "time.Duration": + intValue, _ := strconv.ParseInt(defaultValue, 10, 64) + setter.SetInt(intValue) + case "string": + setter.SetString(defaultValue) + case "bool": + boolValue, _ := strconv.ParseBool(defaultValue) + setter.SetBool(boolValue) + } + } +} + +func NewUUID() UUID { + ns := UUID{} + safeRandom(ns[:]) + u := newFromHash(md5.New(), ns, RandStringBytes(16)) + u[6] = (u[6] & 0x0f) | (byte(2) << 4) + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + + return u +} + +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +func safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/client.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/client.go new file mode 100644 index 00000000..64ae84c9 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/client.go @@ -0,0 +1,81 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth" +) + +// Client is the sdk client struct, each func corresponds to an OpenAPI +type Client struct { + sdk.Client +} + +// NewClient creates a sdk client with environment variables +func NewClient() (client *Client, err error) { + client = &Client{} + err = client.Init() + return +} + +// NewClientWithOptions creates a sdk client with regionId/sdkConfig/credential +// this is the common api to create a sdk client +func NewClientWithOptions(regionId string, config *sdk.Config, credential auth.Credential) (client *Client, err error) { + client = &Client{} + err = client.InitWithOptions(regionId, config, credential) + return +} + +// NewClientWithAccessKey is a shortcut to create sdk client with accesskey +// usage: https://help.aliyun.com/document_detail/66217.html +func NewClientWithAccessKey(regionId, accessKeyId, accessKeySecret string) (client *Client, err error) { + client = &Client{} + err = client.InitWithAccessKey(regionId, accessKeyId, accessKeySecret) + return +} + +// NewClientWithStsToken is a shortcut to create sdk client with sts token +// usage: https://help.aliyun.com/document_detail/66222.html +func NewClientWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken string) (client *Client, err error) { + client = &Client{} + err = client.InitWithStsToken(regionId, stsAccessKeyId, stsAccessKeySecret, stsToken) + return +} + +// NewClientWithRamRoleArn is a shortcut to create sdk client with ram roleArn +// usage: https://help.aliyun.com/document_detail/66222.html +func NewClientWithRamRoleArn(regionId string, accessKeyId, accessKeySecret, roleArn, roleSessionName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithRamRoleArn(regionId, accessKeyId, accessKeySecret, roleArn, roleSessionName) + return +} + +// NewClientWithEcsRamRole is a shortcut to create sdk client with ecs ram role +// usage: https://help.aliyun.com/document_detail/66223.html +func NewClientWithEcsRamRole(regionId string, roleName string) (client *Client, err error) { + client = &Client{} + err = client.InitWithEcsRamRole(regionId, roleName) + return +} + +// NewClientWithRsaKeyPair is a shortcut to create sdk client with rsa key pair +// attention: rsa key pair auth is only Japan regions available +func NewClientWithRsaKeyPair(regionId string, publicKeyId, privateKey string, sessionExpiration int) (client *Client, err error) { + client = &Client{} + err = client.InitWithRsaKeyPair(regionId, publicKeyId, privateKey, sessionExpiration) + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoint.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoint.go new file mode 100644 index 00000000..06b3d6ed --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoint.go @@ -0,0 +1,111 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeEndpoint invokes the location.DescribeEndpoint API synchronously +// api document: https://help.aliyun.com/api/location/describeendpoint.html +func (client *Client) DescribeEndpoint(request *DescribeEndpointRequest) (response *DescribeEndpointResponse, err error) { + response = CreateDescribeEndpointResponse() + err = client.DoAction(request, response) + return +} + +// DescribeEndpointWithChan invokes the location.DescribeEndpoint API asynchronously +// api document: https://help.aliyun.com/api/location/describeendpoint.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeEndpointWithChan(request *DescribeEndpointRequest) (<-chan *DescribeEndpointResponse, <-chan error) { + responseChan := make(chan *DescribeEndpointResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeEndpoint(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeEndpointWithCallback invokes the location.DescribeEndpoint API asynchronously +// api document: https://help.aliyun.com/api/location/describeendpoint.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeEndpointWithCallback(request *DescribeEndpointRequest, callback func(response *DescribeEndpointResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeEndpointResponse + var err error + defer close(result) + response, err = client.DescribeEndpoint(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeEndpointRequest is the request struct for api DescribeEndpoint +type DescribeEndpointRequest struct { + *requests.RpcRequest + Password string `position:"Query" name:"Password"` + ServiceCode string `position:"Query" name:"ServiceCode"` + Id string `position:"Query" name:"Id"` +} + +// DescribeEndpointResponse is the response struct for api DescribeEndpoint +type DescribeEndpointResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + Endpoint string `json:"Endpoint" xml:"Endpoint"` + Id string `json:"Id" xml:"Id"` + Namespace string `json:"Namespace" xml:"Namespace"` + SerivceCode string `json:"SerivceCode" xml:"SerivceCode"` + Type string `json:"Type" xml:"Type"` + Protocols ProtocolsInDescribeEndpoint `json:"Protocols" xml:"Protocols"` +} + +// CreateDescribeEndpointRequest creates a request to invoke DescribeEndpoint API +func CreateDescribeEndpointRequest() (request *DescribeEndpointRequest) { + request = &DescribeEndpointRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "DescribeEndpoint", "location", "openAPI") + return +} + +// CreateDescribeEndpointResponse creates a response to parse from DescribeEndpoint response +func CreateDescribeEndpointResponse() (response *DescribeEndpointResponse) { + response = &DescribeEndpointResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoints.go new file mode 100644 index 00000000..95c6e787 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_endpoints.go @@ -0,0 +1,107 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeEndpoints invokes the location.DescribeEndpoints API synchronously +// api document: https://help.aliyun.com/api/location/describeendpoints.html +func (client *Client) DescribeEndpoints(request *DescribeEndpointsRequest) (response *DescribeEndpointsResponse, err error) { + response = CreateDescribeEndpointsResponse() + err = client.DoAction(request, response) + return +} + +// DescribeEndpointsWithChan invokes the location.DescribeEndpoints API asynchronously +// api document: https://help.aliyun.com/api/location/describeendpoints.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeEndpointsWithChan(request *DescribeEndpointsRequest) (<-chan *DescribeEndpointsResponse, <-chan error) { + responseChan := make(chan *DescribeEndpointsResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeEndpoints(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeEndpointsWithCallback invokes the location.DescribeEndpoints API asynchronously +// api document: https://help.aliyun.com/api/location/describeendpoints.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeEndpointsWithCallback(request *DescribeEndpointsRequest, callback func(response *DescribeEndpointsResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeEndpointsResponse + var err error + defer close(result) + response, err = client.DescribeEndpoints(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeEndpointsRequest is the request struct for api DescribeEndpoints +type DescribeEndpointsRequest struct { + *requests.RpcRequest + ServiceCode string `position:"Query" name:"ServiceCode"` + Id string `position:"Query" name:"Id"` + Type string `position:"Query" name:"Type"` +} + +// DescribeEndpointsResponse is the response struct for api DescribeEndpoints +type DescribeEndpointsResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + Success bool `json:"Success" xml:"Success"` + Endpoints Endpoints `json:"Endpoints" xml:"Endpoints"` +} + +// CreateDescribeEndpointsRequest creates a request to invoke DescribeEndpoints API +func CreateDescribeEndpointsRequest() (request *DescribeEndpointsRequest) { + request = &DescribeEndpointsRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "DescribeEndpoints", "location", "openAPI") + return +} + +// CreateDescribeEndpointsResponse creates a response to parse from DescribeEndpoints response +func CreateDescribeEndpointsResponse() (response *DescribeEndpointsResponse) { + response = &DescribeEndpointsResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_regions.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_regions.go new file mode 100644 index 00000000..824829dc --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_regions.go @@ -0,0 +1,105 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeRegions invokes the location.DescribeRegions API synchronously +// api document: https://help.aliyun.com/api/location/describeregions.html +func (client *Client) DescribeRegions(request *DescribeRegionsRequest) (response *DescribeRegionsResponse, err error) { + response = CreateDescribeRegionsResponse() + err = client.DoAction(request, response) + return +} + +// DescribeRegionsWithChan invokes the location.DescribeRegions API asynchronously +// api document: https://help.aliyun.com/api/location/describeregions.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeRegionsWithChan(request *DescribeRegionsRequest) (<-chan *DescribeRegionsResponse, <-chan error) { + responseChan := make(chan *DescribeRegionsResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeRegions(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeRegionsWithCallback invokes the location.DescribeRegions API asynchronously +// api document: https://help.aliyun.com/api/location/describeregions.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeRegionsWithCallback(request *DescribeRegionsRequest, callback func(response *DescribeRegionsResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeRegionsResponse + var err error + defer close(result) + response, err = client.DescribeRegions(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeRegionsRequest is the request struct for api DescribeRegions +type DescribeRegionsRequest struct { + *requests.RpcRequest + Password string `position:"Query" name:"Password"` +} + +// DescribeRegionsResponse is the response struct for api DescribeRegions +type DescribeRegionsResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + TotalCount int `json:"TotalCount" xml:"TotalCount"` + RegionIds RegionIds `json:"RegionIds" xml:"RegionIds"` +} + +// CreateDescribeRegionsRequest creates a request to invoke DescribeRegions API +func CreateDescribeRegionsRequest() (request *DescribeRegionsRequest) { + request = &DescribeRegionsRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "DescribeRegions", "location", "openAPI") + return +} + +// CreateDescribeRegionsResponse creates a response to parse from DescribeRegions response +func CreateDescribeRegionsResponse() (response *DescribeRegionsResponse) { + response = &DescribeRegionsResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_services.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_services.go new file mode 100644 index 00000000..af185ecf --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/describe_services.go @@ -0,0 +1,105 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// DescribeServices invokes the location.DescribeServices API synchronously +// api document: https://help.aliyun.com/api/location/describeservices.html +func (client *Client) DescribeServices(request *DescribeServicesRequest) (response *DescribeServicesResponse, err error) { + response = CreateDescribeServicesResponse() + err = client.DoAction(request, response) + return +} + +// DescribeServicesWithChan invokes the location.DescribeServices API asynchronously +// api document: https://help.aliyun.com/api/location/describeservices.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeServicesWithChan(request *DescribeServicesRequest) (<-chan *DescribeServicesResponse, <-chan error) { + responseChan := make(chan *DescribeServicesResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.DescribeServices(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// DescribeServicesWithCallback invokes the location.DescribeServices API asynchronously +// api document: https://help.aliyun.com/api/location/describeservices.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) DescribeServicesWithCallback(request *DescribeServicesRequest, callback func(response *DescribeServicesResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *DescribeServicesResponse + var err error + defer close(result) + response, err = client.DescribeServices(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// DescribeServicesRequest is the request struct for api DescribeServices +type DescribeServicesRequest struct { + *requests.RpcRequest + Password string `position:"Query" name:"Password"` +} + +// DescribeServicesResponse is the response struct for api DescribeServices +type DescribeServicesResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + TotalCount int `json:"TotalCount" xml:"TotalCount"` + Services Services `json:"Services" xml:"Services"` +} + +// CreateDescribeServicesRequest creates a request to invoke DescribeServices API +func CreateDescribeServicesRequest() (request *DescribeServicesRequest) { + request = &DescribeServicesRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "DescribeServices", "location", "openAPI") + return +} + +// CreateDescribeServicesResponse creates a response to parse from DescribeServices response +func CreateDescribeServicesResponse() (response *DescribeServicesResponse) { + response = &DescribeServicesResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints.go new file mode 100644 index 00000000..6b11d030 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints.go @@ -0,0 +1,107 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// ListEndpoints invokes the location.ListEndpoints API synchronously +// api document: https://help.aliyun.com/api/location/listendpoints.html +func (client *Client) ListEndpoints(request *ListEndpointsRequest) (response *ListEndpointsResponse, err error) { + response = CreateListEndpointsResponse() + err = client.DoAction(request, response) + return +} + +// ListEndpointsWithChan invokes the location.ListEndpoints API asynchronously +// api document: https://help.aliyun.com/api/location/listendpoints.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ListEndpointsWithChan(request *ListEndpointsRequest) (<-chan *ListEndpointsResponse, <-chan error) { + responseChan := make(chan *ListEndpointsResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.ListEndpoints(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// ListEndpointsWithCallback invokes the location.ListEndpoints API asynchronously +// api document: https://help.aliyun.com/api/location/listendpoints.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ListEndpointsWithCallback(request *ListEndpointsRequest, callback func(response *ListEndpointsResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *ListEndpointsResponse + var err error + defer close(result) + response, err = client.ListEndpoints(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// ListEndpointsRequest is the request struct for api ListEndpoints +type ListEndpointsRequest struct { + *requests.RpcRequest + Namespace string `position:"Query" name:"Namespace"` + Id string `position:"Query" name:"Id"` + SerivceCode string `position:"Query" name:"SerivceCode"` +} + +// ListEndpointsResponse is the response struct for api ListEndpoints +type ListEndpointsResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + Success bool `json:"Success" xml:"Success"` + EndpointList EndpointListInListEndpoints `json:"EndpointList" xml:"EndpointList"` +} + +// CreateListEndpointsRequest creates a request to invoke ListEndpoints API +func CreateListEndpointsRequest() (request *ListEndpointsRequest) { + request = &ListEndpointsRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "ListEndpoints", "location", "openAPI") + return +} + +// CreateListEndpointsResponse creates a response to parse from ListEndpoints response +func CreateListEndpointsResponse() (response *ListEndpointsResponse) { + response = &ListEndpointsResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints_by_ip.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints_by_ip.go new file mode 100644 index 00000000..e5d75303 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/list_endpoints_by_ip.go @@ -0,0 +1,105 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses" +) + +// ListEndpointsByIp invokes the location.ListEndpointsByIp API synchronously +// api document: https://help.aliyun.com/api/location/listendpointsbyip.html +func (client *Client) ListEndpointsByIp(request *ListEndpointsByIpRequest) (response *ListEndpointsByIpResponse, err error) { + response = CreateListEndpointsByIpResponse() + err = client.DoAction(request, response) + return +} + +// ListEndpointsByIpWithChan invokes the location.ListEndpointsByIp API asynchronously +// api document: https://help.aliyun.com/api/location/listendpointsbyip.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ListEndpointsByIpWithChan(request *ListEndpointsByIpRequest) (<-chan *ListEndpointsByIpResponse, <-chan error) { + responseChan := make(chan *ListEndpointsByIpResponse, 1) + errChan := make(chan error, 1) + err := client.AddAsyncTask(func() { + defer close(responseChan) + defer close(errChan) + response, err := client.ListEndpointsByIp(request) + if err != nil { + errChan <- err + } else { + responseChan <- response + } + }) + if err != nil { + errChan <- err + close(responseChan) + close(errChan) + } + return responseChan, errChan +} + +// ListEndpointsByIpWithCallback invokes the location.ListEndpointsByIp API asynchronously +// api document: https://help.aliyun.com/api/location/listendpointsbyip.html +// asynchronous document: https://help.aliyun.com/document_detail/66220.html +func (client *Client) ListEndpointsByIpWithCallback(request *ListEndpointsByIpRequest, callback func(response *ListEndpointsByIpResponse, err error)) <-chan int { + result := make(chan int, 1) + err := client.AddAsyncTask(func() { + var response *ListEndpointsByIpResponse + var err error + defer close(result) + response, err = client.ListEndpointsByIp(request) + callback(response, err) + result <- 1 + }) + if err != nil { + defer close(result) + callback(nil, err) + result <- 0 + } + return result +} + +// ListEndpointsByIpRequest is the request struct for api ListEndpointsByIp +type ListEndpointsByIpRequest struct { + *requests.RpcRequest + Ip string `position:"Query" name:"Ip"` +} + +// ListEndpointsByIpResponse is the response struct for api ListEndpointsByIp +type ListEndpointsByIpResponse struct { + *responses.BaseResponse + RequestId string `json:"RequestId" xml:"RequestId"` + Success bool `json:"Success" xml:"Success"` + EndpointList EndpointListInListEndpointsByIp `json:"EndpointList" xml:"EndpointList"` +} + +// CreateListEndpointsByIpRequest creates a request to invoke ListEndpointsByIp API +func CreateListEndpointsByIpRequest() (request *ListEndpointsByIpRequest) { + request = &ListEndpointsByIpRequest{ + RpcRequest: &requests.RpcRequest{}, + } + request.InitWithApiInfo("Location", "2015-06-12", "ListEndpointsByIp", "location", "openAPI") + return +} + +// CreateListEndpointsByIpResponse creates a response to parse from ListEndpointsByIp response +func CreateListEndpointsByIpResponse() (response *ListEndpointsByIpResponse) { + response = &ListEndpointsByIpResponse{ + BaseResponse: &responses.BaseResponse{}, + } + return +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint.go new file mode 100644 index 00000000..1856a795 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint.go @@ -0,0 +1,26 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Endpoint is a nested struct in location response +type Endpoint struct { + Endpoint string `json:"Endpoint" xml:"Endpoint"` + Id string `json:"Id" xml:"Id"` + Namespace string `json:"Namespace" xml:"Namespace"` + SerivceCode string `json:"SerivceCode" xml:"SerivceCode"` + Type string `json:"Type" xml:"Type"` + Protocols ProtocolsInDescribeEndpoints `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints.go new file mode 100644 index 00000000..b41f43f5 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// EndpointListInListEndpoints is a nested struct in location response +type EndpointListInListEndpoints struct { + ItemEndpoint []ItemEndpoint `json:"ItemEndpoint" xml:"ItemEndpoint"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints_by_ip.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints_by_ip.go new file mode 100644 index 00000000..75a1ea0c --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoint_list_in_list_endpoints_by_ip.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// EndpointListInListEndpointsByIp is a nested struct in location response +type EndpointListInListEndpointsByIp struct { + ItemEndpoint []ItemEndpoint `json:"ItemEndpoint" xml:"ItemEndpoint"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoints.go new file mode 100644 index 00000000..e55882a6 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_endpoints.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Endpoints is a nested struct in location response +type Endpoints struct { + Endpoint []Endpoint `json:"Endpoint" xml:"Endpoint"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_item_endpoint.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_item_endpoint.go new file mode 100644 index 00000000..099768f1 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_item_endpoint.go @@ -0,0 +1,26 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ItemEndpoint is a nested struct in location response +type ItemEndpoint struct { + Endpoint string `json:"Endpoint" xml:"Endpoint"` + Product string `json:"Product" xml:"Product"` + Namespace string `json:"Namespace" xml:"Namespace"` + Id string `json:"Id" xml:"Id"` + Type string `json:"Type" xml:"Type"` + Protocols ProtocolsInListEndpointsByIp `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoint.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoint.go new file mode 100644 index 00000000..d1d2d794 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoint.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ProtocolsInDescribeEndpoint is a nested struct in location response +type ProtocolsInDescribeEndpoint struct { + Protocols []string `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoints.go new file mode 100644 index 00000000..c79d4da8 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_describe_endpoints.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ProtocolsInDescribeEndpoints is a nested struct in location response +type ProtocolsInDescribeEndpoints struct { + Protocols []string `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints.go new file mode 100644 index 00000000..61006914 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ProtocolsInListEndpoints is a nested struct in location response +type ProtocolsInListEndpoints struct { + Protocols []string `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints_by_ip.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints_by_ip.go new file mode 100644 index 00000000..962d2c04 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_protocols_in_list_endpoints_by_ip.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// ProtocolsInListEndpointsByIp is a nested struct in location response +type ProtocolsInListEndpointsByIp struct { + Protocols []string `json:"Protocols" xml:"Protocols"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_region_ids.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_region_ids.go new file mode 100644 index 00000000..0f1fbe3b --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_region_ids.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// RegionIds is a nested struct in location response +type RegionIds struct { + RegionIds []string `json:"RegionIds" xml:"RegionIds"` +} diff --git a/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_services.go b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_services.go new file mode 100644 index 00000000..b3e934c5 --- /dev/null +++ b/vendor/github.com/aliyun/alibaba-cloud-sdk-go/services/location/struct_services.go @@ -0,0 +1,21 @@ +package location + +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. +// +// Code generated by Alibaba Cloud SDK Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Services is a nested struct in location response +type Services struct { + Services []string `json:"Services" xml:"Services"` +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go new file mode 100644 index 00000000..34750b7f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/auth.go @@ -0,0 +1,144 @@ +package oss + +import ( + "bytes" + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "fmt" + "hash" + "io" + "net/http" + "sort" + "strconv" + "strings" +) + +// headerSorter defines the key-value structure for storing the sorted data in signHeader. +type headerSorter struct { + Keys []string + Vals []string +} + +// signHeader signs the header and sets it as the authorization header. +func (conn Conn) signHeader(req *http.Request, canonicalizedResource string) { + + akIf := conn.config.GetCredentials() + + // Get the final authorization string + authorizationStr := "OSS " + akIf.GetAccessKeyID() + ":" + conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) + + // Give the parameter "Authorization" value + req.Header.Set(HTTPHeaderAuthorization, authorizationStr) +} + +func (conn Conn) getSignedStr(req *http.Request, canonicalizedResource string, keySecret string) string { + // Find out the "x-oss-"'s address in header of the request + temp := make(map[string]string) + + for k, v := range req.Header { + if strings.HasPrefix(strings.ToLower(k), "x-oss-") { + temp[strings.ToLower(k)] = v[0] + } + } + hs := newHeaderSorter(temp) + + // Sort the temp by the ascending order + hs.Sort() + + // Get the canonicalizedOSSHeaders + canonicalizedOSSHeaders := "" + for i := range hs.Keys { + canonicalizedOSSHeaders += hs.Keys[i] + ":" + hs.Vals[i] + "\n" + } + + // Give other parameters values + // when sign URL, date is expires + date := req.Header.Get(HTTPHeaderDate) + contentType := req.Header.Get(HTTPHeaderContentType) + contentMd5 := req.Header.Get(HTTPHeaderContentMD5) + + signStr := req.Method + "\n" + contentMd5 + "\n" + contentType + "\n" + date + "\n" + canonicalizedOSSHeaders + canonicalizedResource + + // convert sign to log for easy to view + if conn.config.LogLevel >= Debug { + var signBuf bytes.Buffer + for i := 0; i < len(signStr); i++ { + if signStr[i] != '\n' { + signBuf.WriteByte(signStr[i]) + } else { + signBuf.WriteString("\\n") + } + } + conn.config.WriteLog(Debug, "[Req:%p]signStr:%s\n", req, signBuf.String()) + } + + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + return signedStr +} + +func (conn Conn) getRtmpSignedStr(bucketName, channelName, playlistName string, expiration int64, keySecret string, params map[string]interface{}) string { + if params[HTTPParamAccessKeyID] == nil { + return "" + } + + canonResource := fmt.Sprintf("/%s/%s", bucketName, channelName) + canonParamsKeys := []string{} + for key := range params { + if key != HTTPParamAccessKeyID && key != HTTPParamSignature && key != HTTPParamExpires && key != HTTPParamSecurityToken { + canonParamsKeys = append(canonParamsKeys, key) + } + } + + sort.Strings(canonParamsKeys) + canonParamsStr := "" + for _, key := range canonParamsKeys { + canonParamsStr = fmt.Sprintf("%s%s:%s\n", canonParamsStr, key, params[key].(string)) + } + + expireStr := strconv.FormatInt(expiration, 10) + signStr := expireStr + "\n" + canonParamsStr + canonResource + + h := hmac.New(func() hash.Hash { return sha1.New() }, []byte(keySecret)) + io.WriteString(h, signStr) + signedStr := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return signedStr +} + +// newHeaderSorter is an additional function for function SignHeader. +func newHeaderSorter(m map[string]string) *headerSorter { + hs := &headerSorter{ + Keys: make([]string, 0, len(m)), + Vals: make([]string, 0, len(m)), + } + + for k, v := range m { + hs.Keys = append(hs.Keys, k) + hs.Vals = append(hs.Vals, v) + } + return hs +} + +// Sort is an additional function for function SignHeader. +func (hs *headerSorter) Sort() { + sort.Sort(hs) +} + +// Len is an additional function for function SignHeader. +func (hs *headerSorter) Len() int { + return len(hs.Vals) +} + +// Less is an additional function for function SignHeader. +func (hs *headerSorter) Less(i, j int) bool { + return bytes.Compare([]byte(hs.Keys[i]), []byte(hs.Keys[j])) < 0 +} + +// Swap is an additional function for function SignHeader. +func (hs *headerSorter) Swap(i, j int) { + hs.Vals[i], hs.Vals[j] = hs.Vals[j], hs.Vals[i] + hs.Keys[i], hs.Keys[j] = hs.Keys[j], hs.Keys[i] +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go new file mode 100644 index 00000000..e96fb47b --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/bucket.go @@ -0,0 +1,1198 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/xml" + "fmt" + "hash" + "hash/crc64" + "io" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" +) + +// Bucket implements the operations of object. +type Bucket struct { + Client Client + BucketName string +} + +// PutObject creates a new object and it will overwrite the original one if it exists already. +// +// objectKey the object key in UTF-8 encoding. The length must be between 1 and 1023, and cannot start with "/" or "\". +// reader io.Reader instance for reading the data for uploading +// options the options for uploading the object. The valid options here are CacheControl, ContentDisposition, ContentEncoding +// Expires, ServerSideEncryption, ObjectACL and Meta. Refer to the link below for more details. +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObject(objectKey string, reader io.Reader, options ...Option) error { + opts := addContentType(options, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFile creates a new object from the local file. +// +// objectKey object key. +// filePath the local file path to upload. +// options the options for uploading the object. Refer to the parameter options in PutObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFile(objectKey, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + opts := addContentType(options, filePath, objectKey) + + request := &PutObjectRequest{ + ObjectKey: objectKey, + Reader: fd, + } + resp, err := bucket.DoPutObject(request, opts) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObject does the actual upload work. +// +// request the request instance for uploading an object. +// options the options for uploading an object. +// +// Response the response from OSS. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObject(request *PutObjectRequest, options []Option) (*Response, error) { + isOptSet, _, _ := isOptionSet(options, HTTPHeaderContentType) + if !isOptSet { + options = addContentType(options, request.ObjectKey) + } + + listener := getProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", request.ObjectKey, params, options, request.Reader, listener) + if err != nil { + return nil, err + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoPutObject") + if err != nil { + return resp, err + } + } + + err = checkRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObject downloads the object. +// +// objectKey the object key. +// options the options for downloading the object. The valid values are: Range, IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more details, please check out: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObject(objectKey string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return nil, err + } + + return result.Response, nil +} + +// GetObjectToFile downloads the data to a local file. +// +// objectKey the object key to download. +// filePath the local file to store the object data. +// options the options for downloading the object. Refer to the parameter options in method GetObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFile(objectKey, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Calls the API to actually download the object. Returns the result instance. + result, err := bucket.DoGetObject(&GetObjectRequest{objectKey}, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the local file does not exist, create a new one. If it exists, overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Copy the data to the local file path. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compares the CRC value + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = checkCRC(result.Response, "GetObjectToFile") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObject is the actual API that gets the object. It's the internal function called by other public APIs. +// +// request the request to download the object. +// options the options for downloading the file. Checks out the parameter options in method GetObject. +// +// GetObjectResult the result instance of getting the object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObject(request *GetObjectRequest, options []Option) (*GetObjectResult, error) { + params, _ := getRawParams(options) + resp, err := bucket.do("GET", request.ObjectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + if bucket.getConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(crcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := getProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// CopyObject copies the object inside the bucket. +// +// srcObjectKey the source object to copy. +// destObjectKey the target object to copy. +// options options for copying an object. You can specify the conditions of copy. The valid conditions are CopySourceIfMatch, +// CopySourceIfNoneMatch, CopySourceIfModifiedSince, CopySourceIfUnmodifiedSince, MetadataDirective. +// Also you can specify the target object's attributes, such as CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, ObjectACL, Meta. Refer to the link below for more details : +// https://help.aliyun.com/document_detail/oss/api-reference/object/CopyObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObject(srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + + //first find version id + versionIdKey := "versionId" + versionId, _ := findOption(options, versionIdKey, nil) + if versionId == nil { + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + } else { + options = deleteOption(options, versionIdKey) + options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string))) + } + + params := map[string]interface{}{} + resp, err := bucket.do("PUT", destObjectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// CopyObjectTo copies the object to another bucket. +// +// srcObjectKey source object key. The source bucket is Bucket.BucketName . +// destBucketName target bucket name. +// destObjectKey target object name. +// options copy options, check out parameter options in function CopyObject for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectTo(destBucketName, destObjectKey, srcObjectKey string, options ...Option) (CopyObjectResult, error) { + return bucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +// +// CopyObjectFrom copies the object to another bucket. +// +// srcBucketName source bucket name. +// srcObjectKey source object name. +// destObjectKey target object name. The target bucket name is Bucket.BucketName. +// options copy options. Check out parameter options in function CopyObject. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) CopyObjectFrom(srcBucketName, srcObjectKey, destObjectKey string, options ...Option) (CopyObjectResult, error) { + destBucketName := bucket.BucketName + var out CopyObjectResult + srcBucket, err := bucket.Client.Bucket(srcBucketName) + if err != nil { + return out, err + } + + return srcBucket.copy(srcObjectKey, destBucketName, destObjectKey, options...) +} + +func (bucket Bucket) copy(srcObjectKey, destBucketName, destObjectKey string, options ...Option) (CopyObjectResult, error) { + var out CopyObjectResult + + //first find version id + versionIdKey := "versionId" + versionId, _ := findOption(options, versionIdKey, nil) + if versionId == nil { + options = append(options, CopySource(bucket.BucketName, url.QueryEscape(srcObjectKey))) + } else { + options = deleteOption(options, versionIdKey) + options = append(options, CopySourceVersion(bucket.BucketName, url.QueryEscape(srcObjectKey), versionId.(string))) + } + + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return out, err + } + params := map[string]interface{}{} + resp, err := bucket.Client.Conn.Do("PUT", destBucketName, destObjectKey, params, headers, nil, 0, nil) + + // get response header + respHeader, _ := findOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AppendObject uploads the data in the way of appending an existing or new object. +// +// AppendObject the parameter appendPosition specifies which postion (in the target object) to append. For the first append (to a non-existing file), +// the appendPosition should be 0. The appendPosition in the subsequent calls will be the current object length. +// For example, the first appendObject's appendPosition is 0 and it uploaded 65536 bytes data, then the second call's position is 65536. +// The response header x-oss-next-append-position after each successful request also specifies the next call's append position (so the caller need not to maintain this information). +// +// objectKey the target object to append to. +// reader io.Reader. The read instance for reading the data to append. +// appendPosition the start position to append. +// destObjectProperties the options for the first appending, such as CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL. +// +// int64 the next append position, it's valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) AppendObject(objectKey string, reader io.Reader, appendPosition int64, options ...Option) (int64, error) { + request := &AppendObjectRequest{ + ObjectKey: objectKey, + Reader: reader, + Position: appendPosition, + } + + result, err := bucket.DoAppendObject(request, options) + if err != nil { + return appendPosition, err + } + + return result.NextPosition, err +} + +// DoAppendObject is the actual API that does the object append. +// +// request the request object for appending object. +// options the options for appending object. +// +// AppendObjectResult the result object for appending object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoAppendObject(request *AppendObjectRequest, options []Option) (*AppendObjectResult, error) { + params := map[string]interface{}{} + params["append"] = nil + params["position"] = strconv.FormatInt(request.Position, 10) + headers := make(map[string]string) + + opts := addContentType(options, request.ObjectKey) + handleOptions(headers, opts) + + var initCRC uint64 + isCRCSet, initCRCOpt, _ := isOptionSet(options, initCRC64) + if isCRCSet { + initCRC = initCRCOpt.(uint64) + } + + listener := getProgressListener(options) + + handleOptions(headers, opts) + resp, err := bucket.Client.Conn.Do("POST", bucket.BucketName, request.ObjectKey, params, headers, + request.Reader, initCRC, listener) + + // get response header + respHeader, _ := findOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + if err != nil { + return nil, err + } + defer resp.Body.Close() + + nextPosition, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderOssNextAppendPosition), 10, 64) + result := &AppendObjectResult{ + NextPosition: nextPosition, + CRC: resp.ServerCRC, + } + + if bucket.getConfig().IsEnableCRC && isCRCSet { + err = checkCRC(resp, "AppendObject") + if err != nil { + return result, err + } + } + + return result, nil +} + +// DeleteObject deletes the object. +// +// objectKey the object key to delete. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObject(objectKey string, options ...Option) error { + params, _ := getRawParams(options) + resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// DeleteObjects deletes multiple objects. +// +// objectKeys the object keys to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjects(objectKeys []string, options ...Option) (DeleteObjectsResult, error) { + out := DeleteObjectsResult{} + dxml := deleteXML{} + for _, key := range objectKeys { + dxml.Objects = append(dxml.Objects, DeleteObject{Key: key}) + } + + isQuiet, _ := findOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + deletedResult := DeleteObjectVersionsResult{} + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &deletedResult); err == nil { + err = decodeDeleteObjectsResult(&deletedResult) + } + } + + // Keep compatibility:need convert to struct DeleteObjectsResult + out.XMLName = deletedResult.XMLName + for _, v := range deletedResult.DeletedObjectsDetail { + out.DeletedObjects = append(out.DeletedObjects, v.Key) + } + + return out, err +} + +// DeleteObjectVersions deletes multiple object versions. +// +// objectVersions the object keys and versions to delete. +// options the options for deleting objects. +// Supported option is DeleteObjectsQuiet which means it will not return error even deletion failed (not recommended). By default it's not used. +// +// DeleteObjectVersionsResult the result object. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DeleteObjectVersions(objectVersions []DeleteObject, options ...Option) (DeleteObjectVersionsResult, error) { + out := DeleteObjectVersionsResult{} + dxml := deleteXML{} + dxml.Objects = objectVersions + + isQuiet, _ := findOption(options, deleteObjectsQuiet, false) + dxml.Quiet = isQuiet.(bool) + + bs, err := xml.Marshal(dxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + options = append(options, ContentType(contentType)) + sum := md5.Sum(bs) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + options = append(options, ContentMD5(b64)) + + params := map[string]interface{}{} + params["delete"] = nil + params["encoding-type"] = "url" + + resp, err := bucket.do("POST", "", params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + if !dxml.Quiet { + if err = xmlUnmarshal(resp.Body, &out); err == nil { + err = decodeDeleteObjectsResult(&out) + } + } + return out, err +} + +// IsObjectExist checks if the object exists. +// +// bool flag of object's existence (true:exists; false:non-exist) when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) IsObjectExist(objectKey string, options ...Option) (bool, error) { + _, err := bucket.GetObjectMeta(objectKey, options...) + if err == nil { + return true, nil + } + + switch err.(type) { + case ServiceError: + if err.(ServiceError).StatusCode == 404 { + return false, nil + } + } + + return false, err +} + +// ListObjects lists the objects under the current bucket. +// +// options it contains all the filters for listing objects. +// It could specify a prefix filter on object keys, the max keys count to return and the object key marker and the delimiter for grouping object names. +// The key marker means the returned objects' key must be greater than it in lexicographic order. +// +// For example, if the bucket has 8 objects, my-object-1, my-object-11, my-object-2, my-object-21, +// my-object-22, my-object-3, my-object-31, my-object-32. If the prefix is my-object-2 (no other filters), then it returns +// my-object-2, my-object-21, my-object-22 three objects. If the marker is my-object-22 (no other filters), then it returns +// my-object-3, my-object-31, my-object-32 three objects. If the max keys is 5, then it returns 5 objects. +// The three filters could be used together to achieve filter and paging functionality. +// If the prefix is the folder name, then it could list all files under this folder (including the files under its subfolders). +// But if the delimiter is specified with '/', then it only returns that folder's files (no subfolder's files). The direct subfolders are in the commonPrefixes properties. +// For example, if the bucket has three objects fun/test.jpg, fun/movie/001.avi, fun/movie/007.avi. And if the prefix is "fun/", then it returns all three objects. +// But if the delimiter is '/', then only "fun/test.jpg" is returned as files and fun/movie/ is returned as common prefix. +// +// For common usage scenario, check out sample/list_object.go. +// +// ListObjectsResponse the return value after operation succeeds (only valid when error is nil). +// +func (bucket Bucket) ListObjects(options ...Option) (ListObjectsResult, error) { + var out ListObjectsResult + + options = append(options, EncodingType("url")) + params, err := getRawParams(options) + if err != nil { + return out, err + } + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectsResult(&out) + return out, err +} + +// ListObjectVersions lists objects of all versions under the current bucket. +func (bucket Bucket) ListObjectVersions(options ...Option) (ListObjectVersionsResult, error) { + var out ListObjectVersionsResult + + options = append(options, EncodingType("url")) + params, err := getRawParams(options) + if err != nil { + return out, err + } + params["versions"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + + err = decodeListObjectVersionsResult(&out) + return out, err +} + +// SetObjectMeta sets the metadata of the Object. +// +// objectKey object +// options options for setting the metadata. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, and custom metadata. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectMeta(objectKey string, options ...Option) error { + options = append(options, MetadataDirective(MetaReplace)) + _, err := bucket.CopyObject(objectKey, objectKey, options...) + return err +} + +// GetObjectDetailedMeta gets the object's detailed metadata +// +// objectKey object key. +// options the constraints of the object. Only when the object meets the requirements this method will return the metadata. Otherwise returns error. Valid options are IfModifiedSince, IfUnmodifiedSince, +// IfMatch, IfNoneMatch. For more details check out https://help.aliyun.com/document_detail/oss/api-reference/object/HeadObject.html +// +// http.Header object meta when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectDetailedMeta(objectKey string, options ...Option) (http.Header, error) { + params, _ := getRawParams(options) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// GetObjectMeta gets object metadata. +// +// GetObjectMeta is more lightweight than GetObjectDetailedMeta as it only returns basic metadata including ETag +// size, LastModified. The size information is in the HTTP header Content-Length. +// +// objectKey object key +// +// http.Header the object's metadata, valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectMeta(objectKey string, options ...Option) (http.Header, error) { + params, _ := getRawParams(options) + params["objectMeta"] = nil + //resp, err := bucket.do("GET", objectKey, "?objectMeta", "", nil, nil, nil) + resp, err := bucket.do("HEAD", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return resp.Headers, nil +} + +// SetObjectACL updates the object's ACL. +// +// Only the bucket's owner could update object's ACL which priority is higher than bucket's ACL. +// For example, if the bucket ACL is private and object's ACL is public-read-write. +// Then object's ACL is used and it means all users could read or write that object. +// When the object's ACL is not set, then bucket's ACL is used as the object's ACL. +// +// Object read operations include GetObject, HeadObject, CopyObject and UploadPartCopy on the source object; +// Object write operations include PutObject, PostObject, AppendObject, DeleteObject, DeleteMultipleObjects, +// CompleteMultipartUpload and CopyObject on target object. +// +// objectKey the target object key (to set the ACL on) +// objectAcl object ACL. Valid options are PrivateACL, PublicReadACL, PublicReadWriteACL. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SetObjectACL(objectKey string, objectACL ACLType, options ...Option) error { + options = append(options, ObjectACL(objectACL)) + params, _ := getRawParams(options) + params["acl"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetObjectACL gets object's ACL +// +// objectKey the object to get ACL from. +// +// GetObjectACLResult the result object when error is nil. GetObjectACLResult.Acl is the object ACL. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectACL(objectKey string, options ...Option) (GetObjectACLResult, error) { + var out GetObjectACLResult + params, _ := getRawParams(options) + params["acl"] = nil + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// PutSymlink creates a symlink (to point to an existing object) +// +// Symlink cannot point to another symlink. +// When creating a symlink, it does not check the existence of the target file, and does not check if the target file is symlink. +// Neither it checks the caller's permission on the target file. All these checks are deferred to the actual GetObject call via this symlink. +// If trying to add an existing file, as long as the caller has the write permission, the existing one will be overwritten. +// If the x-oss-meta- is specified, it will be added as the metadata of the symlink file. +// +// symObjectKey the symlink object's key. +// targetObjectKey the target object key to point to. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutSymlink(symObjectKey string, targetObjectKey string, options ...Option) error { + options = append(options, symlinkTarget(url.QueryEscape(targetObjectKey))) + params, _ := getRawParams(options) + params["symlink"] = nil + resp, err := bucket.do("PUT", symObjectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetSymlink gets the symlink object with the specified key. +// If the symlink object does not exist, returns 404. +// +// objectKey the symlink object's key. +// +// error it's nil if no error, otherwise it's an error object. +// When error is nil, the target file key is in the X-Oss-Symlink-Target header of the returned object. +// +func (bucket Bucket) GetSymlink(objectKey string, options ...Option) (http.Header, error) { + params, _ := getRawParams(options) + params["symlink"] = nil + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + targetObjectKey := resp.Headers.Get(HTTPHeaderOssSymlinkTarget) + targetObjectKey, err = url.QueryUnescape(targetObjectKey) + if err != nil { + return resp.Headers, err + } + resp.Headers.Set(HTTPHeaderOssSymlinkTarget, targetObjectKey) + return resp.Headers, err +} + +// RestoreObject restores the object from the archive storage. +// +// An archive object is in cold status by default and it cannot be accessed. +// When restore is called on the cold object, it will become available for access after some time. +// If multiple restores are called on the same file when the object is being restored, server side does nothing for additional calls but returns success. +// By default, the restored object is available for access for one day. After that it will be unavailable again. +// But if another RestoreObject are called after the file is restored, then it will extend one day's access time of that object, up to 7 days. +// +// objectKey object key to restore. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) RestoreObject(objectKey string, options ...Option) error { + params, _ := getRawParams(options) + params["restore"] = nil + resp, err := bucket.do("POST", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK, http.StatusAccepted}) +} + +// SignURL signs the URL. Users could access the object directly with this URL without getting the AK. +// +// objectKey the target object to sign. +// signURLConfig the configuration for the signed URL +// +// string returns the signed URL, when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) SignURL(objectKey string, method HTTPMethod, expiredInSec int64, options ...Option) (string, error) { + if expiredInSec < 0 { + return "", fmt.Errorf("invalid expires: %d, expires must bigger than 0", expiredInSec) + } + expiration := time.Now().Unix() + expiredInSec + + params, err := getRawParams(options) + if err != nil { + return "", err + } + + headers := make(map[string]string) + err = handleOptions(headers, options) + if err != nil { + return "", err + } + + return bucket.Client.Conn.signURL(method, bucket.BucketName, objectKey, expiration, params, headers), nil +} + +// PutObjectWithURL uploads an object with the URL. If the object exists, it will be overwritten. +// PutObjectWithURL It will not generate minetype according to the key name. +// +// signedURL signed URL. +// reader io.Reader the read instance for reading the data for the upload. +// options the options for uploading the data. The valid options are CacheControl, ContentDisposition, ContentEncoding, +// Expires, ServerSideEncryption, ObjectACL and custom metadata. Check out the following link for details: +// https://help.aliyun.com/document_detail/oss/api-reference/object/PutObject.html +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectWithURL(signedURL string, reader io.Reader, options ...Option) error { + resp, err := bucket.DoPutObjectWithURL(signedURL, reader, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// PutObjectFromFileWithURL uploads an object from a local file with the signed URL. +// PutObjectFromFileWithURL It does not generate mimetype according to object key's name or the local file name. +// +// signedURL the signed URL. +// filePath local file path, such as dirfile.txt, for uploading. +// options options for uploading, same as the options in PutObject function. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) PutObjectFromFileWithURL(signedURL, filePath string, options ...Option) error { + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + resp, err := bucket.DoPutObjectWithURL(signedURL, fd, options) + if err != nil { + return err + } + defer resp.Body.Close() + + return err +} + +// DoPutObjectWithURL is the actual API that does the upload with URL work(internal for SDK) +// +// signedURL the signed URL. +// reader io.Reader the read instance for getting the data to upload. +// options options for uploading. +// +// Response the response object which contains the HTTP response. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoPutObjectWithURL(signedURL string, reader io.Reader, options []Option) (*Response, error) { + listener := getProgressListener(options) + + params := map[string]interface{}{} + resp, err := bucket.doURL("PUT", signedURL, params, options, reader, listener) + if err != nil { + return nil, err + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoPutObjectWithURL") + if err != nil { + return resp, err + } + } + + err = checkRespCode(resp.StatusCode, []int{http.StatusOK}) + + return resp, err +} + +// GetObjectWithURL downloads the object and returns the reader instance, with the signed URL. +// +// signedURL the signed URL. +// options options for downloading the object. Valid options are IfModifiedSince, IfUnmodifiedSince, IfMatch, +// IfNoneMatch, AcceptEncoding. For more information, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/object/GetObject.html +// +// io.ReadCloser the reader object for getting the data from response. It needs be closed after the usage. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectWithURL(signedURL string, options ...Option) (io.ReadCloser, error) { + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return nil, err + } + return result.Response, nil +} + +// GetObjectToFileWithURL downloads the object into a local file with the signed URL. +// +// signedURL the signed URL +// filePath the local file path to download to. +// options the options for downloading object. Check out the parameter options in function GetObject for the reference. +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) GetObjectToFileWithURL(signedURL, filePath string, options ...Option) error { + tempFilePath := filePath + TempFileSuffix + + // Get the object's content + result, err := bucket.DoGetObjectWithURL(signedURL, options) + if err != nil { + return err + } + defer result.Response.Close() + + // If the file does not exist, create one. If exists, then overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, FilePermMode) + if err != nil { + return err + } + + // Save the data to the file. + _, err = io.Copy(fd, result.Response.Body) + fd.Close() + if err != nil { + return err + } + + // Compare the CRC value. If CRC values do not match, return error. + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + encodeOpt, _ := findOption(options, HTTPHeaderAcceptEncoding, nil) + acceptEncoding := "" + if encodeOpt != nil { + acceptEncoding = encodeOpt.(string) + } + + if bucket.getConfig().IsEnableCRC && !hasRange && acceptEncoding != "gzip" { + result.Response.ClientCRC = result.ClientCRC.Sum64() + err = checkCRC(result.Response, "GetObjectToFileWithURL") + if err != nil { + os.Remove(tempFilePath) + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// DoGetObjectWithURL is the actual API that downloads the file with the signed URL. +// +// signedURL the signed URL. +// options the options for getting object. Check out parameter options in GetObject for the reference. +// +// GetObjectResult the result object when the error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) DoGetObjectWithURL(signedURL string, options []Option) (*GetObjectResult, error) { + params, _ := getRawParams(options) + resp, err := bucket.doURL("GET", signedURL, params, options, nil, nil) + if err != nil { + return nil, err + } + + result := &GetObjectResult{ + Response: resp, + } + + // CRC + var crcCalc hash.Hash64 + hasRange, _, _ := isOptionSet(options, HTTPHeaderRange) + if bucket.getConfig().IsEnableCRC && !hasRange { + crcCalc = crc64.New(crcTable()) + result.ServerCRC = resp.ServerCRC + result.ClientCRC = crcCalc + } + + // Progress + listener := getProgressListener(options) + + contentLen, _ := strconv.ParseInt(resp.Headers.Get(HTTPHeaderContentLength), 10, 64) + resp.Body = TeeReader(resp.Body, crcCalc, contentLen, listener, nil) + + return result, nil +} + +// +// ProcessObject apply process on the specified image file. +// +// The supported process includes resize, rotate, crop, watermark, format, +// udf, customized style, etc. +// +// +// objectKey object key to process. +// process process string, such as "image/resize,w_100|sys/saveas,o_dGVzdC5qcGc,b_dGVzdA" +// +// error it's nil if no error, otherwise it's an error object. +// +func (bucket Bucket) ProcessObject(objectKey string, process string, options ...Option) (ProcessObjectResult, error) { + var out ProcessObjectResult + params, _ := getRawParams(options) + params["x-oss-process"] = nil + processData := fmt.Sprintf("%v=%v", "x-oss-process", process) + data := strings.NewReader(processData) + resp, err := bucket.do("POST", objectKey, params, nil, data, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = jsonUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutObjectTagging add tagging to object +// +// objectKey object key to add tagging +// tagging tagging to be added +// +// error nil if success, otherwise error +// +func (bucket Bucket) PutObjectTagging(objectKey string, tagging Tagging, options ...Option) error { + bs, err := xml.Marshal(tagging) + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params, _ := getRawParams(options) + params["tagging"] = nil + resp, err := bucket.do("PUT", objectKey, params, options, buffer, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// +// GetObjectTagging get tagging of the object +// +// objectKey object key to get tagging +// +// Tagging +// error nil if success, otherwise error + +func (bucket Bucket) GetObjectTagging(objectKey string, options ...Option) (GetObjectTaggingResult, error) { + var out GetObjectTaggingResult + params, _ := getRawParams(options) + params["tagging"] = nil + + resp, err := bucket.do("GET", objectKey, params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteObjectTagging delete object taggging +// +// objectKey object key to delete tagging +// +// error nil if success, otherwise error +// +func (bucket Bucket) DeleteObjectTagging(objectKey string, options ...Option) error { + params, _ := getRawParams(options) + params["tagging"] = nil + + if objectKey == "" { + return fmt.Errorf("invalid argument: object name is empty") + } + + resp, err := bucket.do("DELETE", objectKey, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +func (bucket Bucket) OptionsMethod(objectKey string, options ...Option) (http.Header, error) { + var out http.Header + resp, err := bucket.do("OPTIONS", objectKey, nil, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + out = resp.Headers + return out, nil +} + +// Private +func (bucket Bucket) do(method, objectName string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + + resp, err := bucket.Client.Conn.Do(method, bucket.BucketName, objectName, + params, headers, data, 0, listener) + + // get response header + respHeader, _ := findOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} + +func (bucket Bucket) doURL(method HTTPMethod, signedURL string, params map[string]interface{}, options []Option, + data io.Reader, listener ProgressListener) (*Response, error) { + headers := make(map[string]string) + err := handleOptions(headers, options) + if err != nil { + return nil, err + } + + resp, err := bucket.Client.Conn.DoURL(method, signedURL, headers, data, 0, listener) + + // get response header + respHeader, _ := findOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} + +func (bucket Bucket) getConfig() *Config { + return bucket.Client.Config +} + +func addContentType(options []Option, keys ...string) []Option { + typ := TypeByExtension("") + for _, key := range keys { + typ = TypeByExtension(key) + if typ != "" { + break + } + } + + if typ == "" { + typ = "application/octet-stream" + } + + opts := []Option{ContentType(typ)} + opts = append(opts, options...) + + return opts +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go new file mode 100644 index 00000000..da277386 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/client.go @@ -0,0 +1,1300 @@ +// Package oss implements functions for access oss service. +// It has two main struct Client and Bucket. +package oss + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "strings" + "time" +) + +// Client SDK's entry point. It's for bucket related options such as create/delete/set bucket (such as set/get ACL/lifecycle/referer/logging/website). +// Object related operations are done by Bucket class. +// Users use oss.New to create Client instance. +// +type ( + // Client OSS client + Client struct { + Config *Config // OSS client configuration + Conn *Conn // Send HTTP request + HTTPClient *http.Client //http.Client to use - if nil will make its own + } + + // ClientOption client option such as UseCname, Timeout, SecurityToken. + ClientOption func(*Client) +) + +// New creates a new client. +// +// endpoint the OSS datacenter endpoint such as http://oss-cn-hangzhou.aliyuncs.com . +// accessKeyId access key Id. +// accessKeySecret access key secret. +// +// Client creates the new client instance, the returned value is valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func New(endpoint, accessKeyID, accessKeySecret string, options ...ClientOption) (*Client, error) { + // Configuration + config := getDefaultOssConfig() + config.Endpoint = endpoint + config.AccessKeyID = accessKeyID + config.AccessKeySecret = accessKeySecret + + // URL parse + url := &urlMaker{} + url.Init(config.Endpoint, config.IsCname, config.IsUseProxy) + + // HTTP connect + conn := &Conn{config: config, url: url} + + // OSS client + client := &Client{ + Config: config, + Conn: conn, + } + + // Client options parse + for _, option := range options { + option(client) + } + + // Create HTTP connection + err := conn.init(config, url, client.HTTPClient) + + return client, err +} + +// Bucket gets the bucket instance. +// +// bucketName the bucket name. +// Bucket the bucket object, when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) Bucket(bucketName string) (*Bucket, error) { + return &Bucket{ + client, + bucketName, + }, nil +} + +// CreateBucket creates a bucket. +// +// bucketName the bucket name, it's globably unique and immutable. The bucket name can only consist of lowercase letters, numbers and dash ('-'). +// It must start with lowercase letter or number and the length can only be between 3 and 255. +// options options for creating the bucket, with optional ACL. The ACL could be ACLPrivate, ACLPublicRead, and ACLPublicReadWrite. By default it's ACLPrivate. +// It could also be specified with StorageClass option, which supports StorageStandard, StorageIA(infrequent access), StorageArchive. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) CreateBucket(bucketName string, options ...Option) error { + headers := make(map[string]string) + handleOptions(headers, options) + + buffer := new(bytes.Buffer) + + isOptSet, val, _ := isOptionSet(options, storageClass) + if isOptSet { + cbConfig := createBucketConfiguration{StorageClass: val.(StorageClassType)} + bs, err := xml.Marshal(cbConfig) + if err != nil { + return err + } + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers[HTTPHeaderContentType] = contentType + } + + params := map[string]interface{}{} + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// ListBuckets lists buckets of the current account under the given endpoint, with optional filters. +// +// options specifies the filters such as Prefix, Marker and MaxKeys. Prefix is the bucket name's prefix filter. +// And marker makes sure the returned buckets' name are greater than it in lexicographic order. +// Maxkeys limits the max keys to return, and by default it's 100 and up to 1000. +// For the common usage scenario, please check out list_bucket.go in the sample. +// ListBucketsResponse the response object if error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) ListBuckets(options ...Option) (ListBucketsResult, error) { + var out ListBucketsResult + + params, err := getRawParams(options) + if err != nil { + return out, err + } + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// IsBucketExist checks if the bucket exists +// +// bucketName the bucket name. +// +// bool true if it exists, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) IsBucketExist(bucketName string) (bool, error) { + listRes, err := client.ListBuckets(Prefix(bucketName), MaxKeys(1)) + if err != nil { + return false, err + } + + if len(listRes.Buckets) == 1 && listRes.Buckets[0].Name == bucketName { + return true, nil + } + return false, nil +} + +// DeleteBucket deletes the bucket. Only empty bucket can be deleted (no object and parts). +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucket(bucketName string) error { + params := map[string]interface{}{} + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLocation gets the bucket location. +// +// Checks out the following link for more information : +// https://help.aliyun.com/document_detail/oss/user_guide/oss_concept/endpoint.html +// +// bucketName the bucket name +// +// string bucket's datacenter location +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLocation(bucketName string) (string, error) { + params := map[string]interface{}{} + params["location"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var LocationConstraint string + err = xmlUnmarshal(resp.Body, &LocationConstraint) + return LocationConstraint, err +} + +// SetBucketACL sets bucket's ACL. +// +// bucketName the bucket name +// bucketAcl the bucket ACL: ACLPrivate, ACLPublicRead and ACLPublicReadWrite. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketACL(bucketName string, bucketACL ACLType) error { + headers := map[string]string{HTTPHeaderOssACL: string(bucketACL)} + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("PUT", bucketName, params, headers, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketACL gets the bucket ACL. +// +// bucketName the bucket name. +// +// GetBucketAclResponse the result object, and it's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketACL(bucketName string) (GetBucketACLResult, error) { + var out GetBucketACLResult + params := map[string]interface{}{} + params["acl"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLifecycle sets the bucket's lifecycle. +// +// For more information, checks out following link: +// https://help.aliyun.com/document_detail/oss/user_guide/manage_object/object_lifecycle.html +// +// bucketName the bucket name. +// rules the lifecycle rules. There're two kind of rules: absolute time expiration and relative time expiration in days and day/month/year respectively. +// Check out sample/bucket_lifecycle.go for more details. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLifecycle(bucketName string, rules []LifecycleRule) error { + err := verifyLifecycleRules(rules) + if err != nil { + return err + } + lifecycleCfg := LifecycleConfiguration{Rules: rules} + bs, err := xml.Marshal(lifecycleCfg) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLifecycle deletes the bucket's lifecycle. +// +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLifecycle(bucketName string) error { + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLifecycle gets the bucket's lifecycle settings. +// +// bucketName the bucket name. +// +// GetBucketLifecycleResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLifecycle(bucketName string) (GetBucketLifecycleResult, error) { + var out GetBucketLifecycleResult + params := map[string]interface{}{} + params["lifecycle"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketReferer sets the bucket's referer whitelist and the flag if allowing empty referrer. +// +// To avoid stealing link on OSS data, OSS supports the HTTP referrer header. A whitelist referrer could be set either by API or web console, as well as +// the allowing empty referrer flag. Note that this applies to requests from webbrowser only. +// For example, for a bucket os-example and its referrer http://www.aliyun.com, all requests from this URL could access the bucket. +// For more information, please check out this link : +// https://help.aliyun.com/document_detail/oss/user_guide/security_management/referer.html +// +// bucketName the bucket name. +// referers the referrer white list. A bucket could have a referrer list and each referrer supports one '*' and multiple '?' as wildcards. +// The sample could be found in sample/bucket_referer.go +// allowEmptyReferer the flag of allowing empty referrer. By default it's true. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketReferer(bucketName string, referers []string, allowEmptyReferer bool) error { + rxml := RefererXML{} + rxml.AllowEmptyReferer = allowEmptyReferer + if referers == nil { + rxml.RefererList = append(rxml.RefererList, "") + } else { + for _, referer := range referers { + rxml.RefererList = append(rxml.RefererList, referer) + } + } + + bs, err := xml.Marshal(rxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketReferer gets the bucket's referrer white list. +// +// bucketName the bucket name. +// +// GetBucketRefererResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketReferer(bucketName string) (GetBucketRefererResult, error) { + var out GetBucketRefererResult + params := map[string]interface{}{} + params["referer"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketLogging sets the bucket logging settings. +// +// OSS could automatically store the access log. Only the bucket owner could enable the logging. +// Once enabled, OSS would save all the access log into hourly log files in a specified bucket. +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/logging.html +// +// bucketName bucket name to enable the log. +// targetBucket the target bucket name to store the log files. +// targetPrefix the log files' prefix. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketLogging(bucketName, targetBucket, targetPrefix string, + isEnable bool) error { + var err error + var bs []byte + if isEnable { + lxml := LoggingXML{} + lxml.LoggingEnabled.TargetBucket = targetBucket + lxml.LoggingEnabled.TargetPrefix = targetPrefix + bs, err = xml.Marshal(lxml) + } else { + lxml := loggingXMLEmpty{} + bs, err = xml.Marshal(lxml) + } + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketLogging deletes the logging configuration to disable the logging on the bucket. +// +// bucketName the bucket name to disable the logging. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketLogging(bucketName string) error { + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketLogging gets the bucket's logging settings +// +// bucketName the bucket name +// GetBucketLoggingResponse the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketLogging(bucketName string) (GetBucketLoggingResult, error) { + var out GetBucketLoggingResult + params := map[string]interface{}{} + params["logging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketWebsite sets the bucket's static website's index and error page. +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// indexDocument index page. +// errorDocument error page. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsite(bucketName, indexDocument, errorDocument string) error { + wxml := WebsiteXML{} + wxml.IndexDocument.Suffix = indexDocument + wxml.ErrorDocument.Key = errorDocument + + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// SetBucketWebsiteDetail sets the bucket's static website's detail +// +// OSS supports static web site hosting for the bucket data. When the bucket is enabled with that, you can access the file in the bucket like the way to access a static website. +// For more information, please check out: https://help.aliyun.com/document_detail/oss/user_guide/static_host_website.html +// +// bucketName the bucket name to enable static web site. +// +// wxml the website's detail +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketWebsiteDetail(bucketName string, wxml WebsiteXML, options ...Option) error { + bs, err := xml.Marshal(wxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := make(map[string]string) + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketWebsite deletes the bucket's static web site settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketWebsite(bucketName string) error { + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketWebsite gets the bucket's default page (index page) and the error page. +// +// bucketName the bucket name +// +// GetBucketWebsiteResponse the result object upon successful request. It's only valid when error is nil. +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketWebsite(bucketName string) (GetBucketWebsiteResult, error) { + var out GetBucketWebsiteResult + params := map[string]interface{}{} + params["website"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketCORS sets the bucket's CORS rules +// +// For more information, please check out https://help.aliyun.com/document_detail/oss/user_guide/security_management/cors.html +// +// bucketName the bucket name +// corsRules the CORS rules to set. The related sample code is in sample/bucket_cors.go. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketCORS(bucketName string, corsRules []CORSRule) error { + corsxml := CORSXML{} + for _, v := range corsRules { + cr := CORSRule{} + cr.AllowedMethod = v.AllowedMethod + cr.AllowedOrigin = v.AllowedOrigin + cr.AllowedHeader = v.AllowedHeader + cr.ExposeHeader = v.ExposeHeader + cr.MaxAgeSeconds = v.MaxAgeSeconds + corsxml.CORSRules = append(corsxml.CORSRules, cr) + } + + bs, err := xml.Marshal(corsxml) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketCORS deletes the bucket's static website settings. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketCORS(bucketName string) error { + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketCORS gets the bucket's CORS settings. +// +// bucketName the bucket name. +// GetBucketCORSResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketCORS(bucketName string) (GetBucketCORSResult, error) { + var out GetBucketCORSResult + params := map[string]interface{}{} + params["cors"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketInfo gets the bucket information. +// +// bucketName the bucket name. +// GetBucketInfoResult the result object upon successful request. It's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketInfo(bucketName string) (GetBucketInfoResult, error) { + var out GetBucketInfoResult + params := map[string]interface{}{} + params["bucketInfo"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + + // convert None to "" + if err == nil { + if out.BucketInfo.SseRule.KMSMasterKeyID == "None" { + out.BucketInfo.SseRule.KMSMasterKeyID = "" + } + + if out.BucketInfo.SseRule.SSEAlgorithm == "None" { + out.BucketInfo.SseRule.SSEAlgorithm = "" + } + } + return out, err +} + +// SetBucketVersioning set bucket versioning:Enabled、Suspended +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) SetBucketVersioning(bucketName string, versioningConfig VersioningConfig, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(versioningConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["versioning"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketVersioning get bucket versioning status:Enabled、Suspended +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketVersioning(bucketName string, options ...Option) (GetBucketVersioningResult, error) { + var out GetBucketVersioningResult + params := map[string]interface{}{} + params["versioning"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketEncryption set bucket encryption config +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) SetBucketEncryption(bucketName string, encryptionRule ServerEncryptionRule, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(encryptionRule) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketEncryption get bucket encryption +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketEncryption(bucketName string, options ...Option) (GetBucketEncryptionResult, error) { + var out GetBucketEncryptionResult + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketEncryption delete bucket encryption config +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error bucket +func (client Client) DeleteBucketEncryption(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["encryption"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// SetBucketTagging add tagging to bucket +// bucketName name of bucket +// tagging tagging to be added +// error nil if success, otherwise error +func (client Client) SetBucketTagging(bucketName string, tagging Tagging, options ...Option) error { + var err error + var bs []byte + bs, err = xml.Marshal(tagging) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketTagging get tagging of the bucket +// bucketName name of bucket +// error nil if success, otherwise error +func (client Client) GetBucketTagging(bucketName string, options ...Option) (GetBucketTaggingResult, error) { + var out GetBucketTaggingResult + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteBucketTagging delete bucket tagging +// bucketName name of bucket +// error nil if success, otherwise error +// +func (client Client) DeleteBucketTagging(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["tagging"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// GetBucketStat get bucket stat +// bucketName the bucket name. +// error it's nil if no error, otherwise it's an error object. +func (client Client) GetBucketStat(bucketName string) (GetBucketStatResult, error) { + var out GetBucketStatResult + params := map[string]interface{}{} + params["stat"] = nil + resp, err := client.do("GET", bucketName, params, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetBucketPolicy API operation for Object Storage Service. +// +// Get the policy from the bucket. +// +// bucketName the bucket name. +// +// string return the bucket's policy, and it's only valid when error is nil. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketPolicy(bucketName string, options ...Option) (string, error) { + params := map[string]interface{}{} + params["policy"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + + out := string(body) + return out, err +} + +// SetBucketPolicy API operation for Object Storage Service. +// +// Set the policy from the bucket. +// +// bucketName the bucket name. +// +// policy the bucket policy. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketPolicy(bucketName string, policy string, options ...Option) error { + params := map[string]interface{}{} + params["policy"] = nil + + buffer := strings.NewReader(policy) + + resp, err := client.do("PUT", bucketName, params, nil, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// DeleteBucketPolicy API operation for Object Storage Service. +// +// Deletes the policy from the bucket. +// +// bucketName the bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketPolicy(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["policy"] = nil + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// SetBucketRequestPayment API operation for Object Storage Service. +// +// Set the requestPayment of bucket +// +// bucketName the bucket name. +// +// paymentConfig the payment configuration +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketRequestPayment(bucketName string, paymentConfig RequestPaymentConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["requestPayment"] = nil + + var bs []byte + bs, err := xml.Marshal(paymentConfig) + + if err != nil { + return err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentType := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentType + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketRequestPayment API operation for Object Storage Service. +// +// Get bucket requestPayment +// +// bucketName the bucket name. +// +// RequestPaymentConfiguration the payment configuration +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketRequestPayment(bucketName string, options ...Option) (RequestPaymentConfiguration, error) { + var out RequestPaymentConfiguration + params := map[string]interface{}{} + params["requestPayment"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// GetUserQoSInfo API operation for Object Storage Service. +// +// Get user qos. +// +// UserQoSConfiguration the User Qos and range Information. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetUserQoSInfo(options ...Option) (UserQoSConfiguration, error) { + var out UserQoSConfiguration + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("GET", "", params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// SetBucketQoSInfo API operation for Object Storage Service. +// +// Set Bucket Qos information. +// +// bucketName tht bucket name. +// +// qosConf the qos configuration. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) SetBucketQoSInfo(bucketName string, qosConf BucketQoSConfiguration, options ...Option) error { + params := map[string]interface{}{} + params["qosInfo"] = nil + + var bs []byte + bs, err := xml.Marshal(qosConf) + if err != nil { + return err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + contentTpye := http.DetectContentType(buffer.Bytes()) + headers := map[string]string{} + headers[HTTPHeaderContentType] = contentTpye + + resp, err := client.do("PUT", bucketName, params, headers, buffer, options...) + if err != nil { + return err + } + + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetBucketQosInfo API operation for Object Storage Service. +// +// Get Bucket Qos information. +// +// bucketName tht bucket name. +// +// BucketQoSConfiguration the return qos configuration. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) GetBucketQosInfo(bucketName string, options ...Option) (BucketQoSConfiguration, error) { + var out BucketQoSConfiguration + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("GET", bucketName, params, nil, nil, options...) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// DeleteBucketQosInfo API operation for Object Storage Service. +// +// Delete Bucket QoS information. +// +// bucketName tht bucket name. +// +// error it's nil if no error, otherwise it's an error object. +// +func (client Client) DeleteBucketQosInfo(bucketName string, options ...Option) error { + params := map[string]interface{}{} + params["qosInfo"] = nil + + resp, err := client.do("DELETE", bucketName, params, nil, nil, options...) + if err != nil { + return err + } + defer resp.Body.Close() + + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// LimitUploadSpeed set upload bandwidth limit speed,default is 0,unlimited +// upSpeed KB/s, 0 is unlimited,default is 0 +// error it's nil if success, otherwise failure +func (client Client) LimitUploadSpeed(upSpeed int) error { + if client.Config == nil { + return fmt.Errorf("client config is nil") + } + return client.Config.LimitUploadSpeed(upSpeed) +} + +// UseCname sets the flag of using CName. By default it's false. +// +// isUseCname true: the endpoint has the CName, false: the endpoint does not have cname. Default is false. +// +func UseCname(isUseCname bool) ClientOption { + return func(client *Client) { + client.Config.IsCname = isUseCname + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// Timeout sets the HTTP timeout in seconds. +// +// connectTimeoutSec HTTP timeout in seconds. Default is 10 seconds. 0 means infinite (not recommended) +// readWriteTimeout HTTP read or write's timeout in seconds. Default is 20 seconds. 0 means infinite. +// +func Timeout(connectTimeoutSec, readWriteTimeout int64) ClientOption { + return func(client *Client) { + client.Config.HTTPTimeout.ConnectTimeout = + time.Second * time.Duration(connectTimeoutSec) + client.Config.HTTPTimeout.ReadWriteTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.HeaderTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.IdleConnTimeout = + time.Second * time.Duration(readWriteTimeout) + client.Config.HTTPTimeout.LongTimeout = + time.Second * time.Duration(readWriteTimeout*10) + } +} + +// SecurityToken sets the temporary user's SecurityToken. +// +// token STS token +// +func SecurityToken(token string) ClientOption { + return func(client *Client) { + client.Config.SecurityToken = strings.TrimSpace(token) + } +} + +// EnableMD5 enables MD5 validation. +// +// isEnableMD5 true: enable MD5 validation; false: disable MD5 validation. +// +func EnableMD5(isEnableMD5 bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableMD5 = isEnableMD5 + } +} + +// MD5ThresholdCalcInMemory sets the memory usage threshold for computing the MD5, default is 16MB. +// +// threshold the memory threshold in bytes. When the uploaded content is more than 16MB, the temp file is used for computing the MD5. +// +func MD5ThresholdCalcInMemory(threshold int64) ClientOption { + return func(client *Client) { + client.Config.MD5Threshold = threshold + } +} + +// EnableCRC enables the CRC checksum. Default is true. +// +// isEnableCRC true: enable CRC checksum; false: disable the CRC checksum. +// +func EnableCRC(isEnableCRC bool) ClientOption { + return func(client *Client) { + client.Config.IsEnableCRC = isEnableCRC + } +} + +// UserAgent specifies UserAgent. The default is aliyun-sdk-go/1.2.0 (windows/-/amd64;go1.5.2). +// +// userAgent the user agent string. +// +func UserAgent(userAgent string) ClientOption { + return func(client *Client) { + client.Config.UserAgent = userAgent + } +} + +// Proxy sets the proxy (optional). The default is not using proxy. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// +func Proxy(proxyHost string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// AuthProxy sets the proxy information with user name and password. +// +// proxyHost the proxy host in the format "host:port". For example, proxy.com:80 . +// proxyUser the proxy user name. +// proxyPassword the proxy password. +// +func AuthProxy(proxyHost, proxyUser, proxyPassword string) ClientOption { + return func(client *Client) { + client.Config.IsUseProxy = true + client.Config.ProxyHost = proxyHost + client.Config.IsAuthProxy = true + client.Config.ProxyUser = proxyUser + client.Config.ProxyPassword = proxyPassword + client.Conn.url.Init(client.Config.Endpoint, client.Config.IsCname, client.Config.IsUseProxy) + } +} + +// +// HTTPClient sets the http.Client in use to the one passed in +// +func HTTPClient(HTTPClient *http.Client) ClientOption { + return func(client *Client) { + client.HTTPClient = HTTPClient + } +} + +// +// SetLogLevel sets the oss sdk log level +// +func SetLogLevel(LogLevel int) ClientOption { + return func(client *Client) { + client.Config.LogLevel = LogLevel + } +} + +// +// SetLogger sets the oss sdk logger +// +func SetLogger(Logger *log.Logger) ClientOption { + return func(client *Client) { + client.Config.Logger = Logger + } +} + +// SetAKInterface sets funciton for get the user's ak +// +func SetCredentialsProvider(provider CredentialsProvider) ClientOption { + return func(client *Client) { + client.Config.CredentialsProvider = provider + } +} + +// Private +func (client Client) do(method, bucketName string, params map[string]interface{}, + headers map[string]string, data io.Reader, options ...Option) (*Response, error) { + resp, err := client.Conn.Do(method, bucketName, "", params, + headers, data, 0, nil) + + // get response header + respHeader, _ := findOption(options, responseHeader, nil) + if respHeader != nil { + pRespHeader := respHeader.(*http.Header) + *pRespHeader = resp.Headers + } + + return resp, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go new file mode 100644 index 00000000..ec72d56b --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conf.go @@ -0,0 +1,176 @@ +package oss + +import ( + "bytes" + "fmt" + "log" + "os" + "time" +) + +// Define the level of the output log +const ( + LogOff = iota + Error + Warn + Info + Debug +) + +// LogTag Tag for each level of log +var LogTag = []string{"[error]", "[warn]", "[info]", "[debug]"} + +// HTTPTimeout defines HTTP timeout. +type HTTPTimeout struct { + ConnectTimeout time.Duration + ReadWriteTimeout time.Duration + HeaderTimeout time.Duration + LongTimeout time.Duration + IdleConnTimeout time.Duration +} + +// HTTPMaxConns defines max idle connections and max idle connections per host +type HTTPMaxConns struct { + MaxIdleConns int + MaxIdleConnsPerHost int +} + +// CredentialInf is interface for get AccessKeyID,AccessKeySecret,SecurityToken +type Credentials interface { + GetAccessKeyID() string + GetAccessKeySecret() string + GetSecurityToken() string +} + +// CredentialInfBuild is interface for get CredentialInf +type CredentialsProvider interface { + GetCredentials() Credentials +} + +type defaultCredentials struct { + config *Config +} + +func (defCre *defaultCredentials) GetAccessKeyID() string { + return defCre.config.AccessKeyID +} + +func (defCre *defaultCredentials) GetAccessKeySecret() string { + return defCre.config.AccessKeySecret +} + +func (defCre *defaultCredentials) GetSecurityToken() string { + return defCre.config.SecurityToken +} + +type defaultCredentialsProvider struct { + config *Config +} + +func (defBuild *defaultCredentialsProvider) GetCredentials() Credentials { + return &defaultCredentials{config: defBuild.config} +} + +// Config defines oss configuration +type Config struct { + Endpoint string // OSS endpoint + AccessKeyID string // AccessId + AccessKeySecret string // AccessKey + RetryTimes uint // Retry count by default it's 5. + UserAgent string // SDK name/version/system information + IsDebug bool // Enable debug mode. Default is false. + Timeout uint // Timeout in seconds. By default it's 60. + SecurityToken string // STS Token + IsCname bool // If cname is in the endpoint. + HTTPTimeout HTTPTimeout // HTTP timeout + HTTPMaxConns HTTPMaxConns // Http max connections + IsUseProxy bool // Flag of using proxy. + ProxyHost string // Flag of using proxy host. + IsAuthProxy bool // Flag of needing authentication. + ProxyUser string // Proxy user + ProxyPassword string // Proxy password + IsEnableMD5 bool // Flag of enabling MD5 for upload. + MD5Threshold int64 // Memory footprint threshold for each MD5 computation (16MB is the default), in byte. When the data is more than that, temp file is used. + IsEnableCRC bool // Flag of enabling CRC for upload. + LogLevel int // Log level + Logger *log.Logger // For write log + UploadLimitSpeed int // Upload limit speed:KB/s, 0 is unlimited + UploadLimiter *OssLimiter // Bandwidth limit reader for upload + CredentialsProvider CredentialsProvider // User provides interface to get AccessKeyID, AccessKeySecret, SecurityToken +} + +// LimitUploadSpeed uploadSpeed:KB/s, 0 is unlimited,default is 0 +func (config *Config) LimitUploadSpeed(uploadSpeed int) error { + if uploadSpeed < 0 { + return fmt.Errorf("invalid argument, the value of uploadSpeed is less than 0") + } else if uploadSpeed == 0 { + config.UploadLimitSpeed = 0 + config.UploadLimiter = nil + return nil + } + + var err error + config.UploadLimiter, err = GetOssLimiter(uploadSpeed) + if err == nil { + config.UploadLimitSpeed = uploadSpeed + } + return err +} + +// WriteLog output log function +func (config *Config) WriteLog(LogLevel int, format string, a ...interface{}) { + if config.LogLevel < LogLevel || config.Logger == nil { + return + } + + var logBuffer bytes.Buffer + logBuffer.WriteString(LogTag[LogLevel-1]) + logBuffer.WriteString(fmt.Sprintf(format, a...)) + config.Logger.Printf("%s", logBuffer.String()) +} + +// for get Credentials +func (config *Config) GetCredentials() Credentials { + return config.CredentialsProvider.GetCredentials() +} + +// getDefaultOssConfig gets the default configuration. +func getDefaultOssConfig() *Config { + config := Config{} + + config.Endpoint = "" + config.AccessKeyID = "" + config.AccessKeySecret = "" + config.RetryTimes = 5 + config.IsDebug = false + config.UserAgent = userAgent() + config.Timeout = 60 // Seconds + config.SecurityToken = "" + config.IsCname = false + + config.HTTPTimeout.ConnectTimeout = time.Second * 30 // 30s + config.HTTPTimeout.ReadWriteTimeout = time.Second * 60 // 60s + config.HTTPTimeout.HeaderTimeout = time.Second * 60 // 60s + config.HTTPTimeout.LongTimeout = time.Second * 300 // 300s + config.HTTPTimeout.IdleConnTimeout = time.Second * 50 // 50s + config.HTTPMaxConns.MaxIdleConns = 100 + config.HTTPMaxConns.MaxIdleConnsPerHost = 100 + + config.IsUseProxy = false + config.ProxyHost = "" + config.IsAuthProxy = false + config.ProxyUser = "" + config.ProxyPassword = "" + + config.MD5Threshold = 16 * 1024 * 1024 // 16MB + config.IsEnableMD5 = false + config.IsEnableCRC = true + + config.LogLevel = LogOff + config.Logger = log.New(os.Stdout, "", log.LstdFlags) + + provider := &defaultCredentialsProvider{config: &config} + config.CredentialsProvider = provider + + return &config +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go new file mode 100644 index 00000000..3bdbbb22 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/conn.go @@ -0,0 +1,751 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "hash" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "sort" + "strconv" + "strings" + "time" +) + +// Conn defines OSS Conn +type Conn struct { + config *Config + url *urlMaker + client *http.Client +} + +var signKeyList = []string{"acl", "uploads", "location", "cors", + "logging", "website", "referer", "lifecycle", + "delete", "append", "tagging", "objectMeta", + "uploadId", "partNumber", "security-token", + "position", "img", "style", "styleName", + "replication", "replicationProgress", + "replicationLocation", "cname", "bucketInfo", + "comp", "qos", "live", "status", "vod", + "startTime", "endTime", "symlink", + "x-oss-process", "response-content-type", "x-oss-traffic-limit", + "response-content-language", "response-expires", + "response-cache-control", "response-content-disposition", + "response-content-encoding", "udf", "udfName", "udfImage", + "udfId", "udfImageDesc", "udfApplication", "comp", + "udfApplicationLog", "restore", "callback", "callback-var", "qosInfo", + "policy", "stat", "encryption", "versions", "versioning", "versionId", "requestPayment"} + +// init initializes Conn +func (conn *Conn) init(config *Config, urlMaker *urlMaker, client *http.Client) error { + if client == nil { + // New transport + transport := newTransport(conn, config) + + // Proxy + if conn.config.IsUseProxy { + proxyURL, err := url.Parse(config.ProxyHost) + if err != nil { + return err + } + transport.Proxy = http.ProxyURL(proxyURL) + } + client = &http.Client{Transport: transport} + } + + conn.config = config + conn.url = urlMaker + conn.client = client + + return nil +} + +// Do sends request and returns the response +func (conn Conn) Do(method, bucketName, objectName string, params map[string]interface{}, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + urlParams := conn.getURLParams(params) + subResource := conn.getSubResource(params) + uri := conn.url.getURL(bucketName, objectName, urlParams) + resource := conn.url.getResource(bucketName, objectName, subResource) + return conn.doRequest(method, uri, resource, headers, data, initCRC, listener) +} + +// DoURL sends the request with signed URL and returns the response result. +func (conn Conn) DoURL(method HTTPMethod, signedURL string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + // Get URI from signedURL + uri, err := url.ParseRequestURI(signedURL) + if err != nil { + return nil, err + } + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) + publishProgress(listener, event) + + if conn.config.LogLevel >= Debug { + conn.LoggerHTTPReq(req) + } + + resp, err := conn.client.Do(req) + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + return nil, err + } + + if conn.config.LogLevel >= Debug { + //print out http resp + conn.LoggerHTTPResp(req, resp) + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) getURLParams(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + for k := range params { + keys = append(keys, k) + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(url.QueryEscape(k)) + if params[k] != nil { + buf.WriteString("=" + url.QueryEscape(params[k].(string))) + } + } + + return buf.String() +} + +func (conn Conn) getSubResource(params map[string]interface{}) string { + // Sort + keys := make([]string, 0, len(params)) + for k := range params { + if conn.isParamSign(k) { + keys = append(keys, k) + } + } + sort.Strings(keys) + + // Serialize + var buf bytes.Buffer + for _, k := range keys { + if buf.Len() > 0 { + buf.WriteByte('&') + } + buf.WriteString(k) + if params[k] != nil { + buf.WriteString("=" + params[k].(string)) + } + } + + return buf.String() +} + +func (conn Conn) isParamSign(paramKey string) bool { + for _, k := range signKeyList { + if paramKey == k { + return true + } + } + return false +} + +func (conn Conn) doRequest(method string, uri *url.URL, canonicalizedResource string, headers map[string]string, + data io.Reader, initCRC uint64, listener ProgressListener) (*Response, error) { + method = strings.ToUpper(method) + req := &http.Request{ + Method: method, + URL: uri, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: uri.Host, + } + + tracker := &readerTracker{completedBytes: 0} + fd, crc := conn.handleBody(req, data, initCRC, listener, tracker) + if fd != nil { + defer func() { + fd.Close() + os.Remove(fd.Name()) + }() + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + date := time.Now().UTC().Format(http.TimeFormat) + req.Header.Set(HTTPHeaderDate, date) + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + akIf := conn.config.GetCredentials() + if akIf.GetSecurityToken() != "" { + req.Header.Set(HTTPHeaderOssSecurityToken, akIf.GetSecurityToken()) + } + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + conn.signHeader(req, canonicalizedResource) + + // Transfer started + event := newProgressEvent(TransferStartedEvent, 0, req.ContentLength, 0) + publishProgress(listener, event) + + if conn.config.LogLevel >= Debug { + conn.LoggerHTTPReq(req) + } + + resp, err := conn.client.Do(req) + + if err != nil { + // Transfer failed + event = newProgressEvent(TransferFailedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + return nil, err + } + + if conn.config.LogLevel >= Debug { + //print out http resp + conn.LoggerHTTPResp(req, resp) + } + + // Transfer completed + event = newProgressEvent(TransferCompletedEvent, tracker.completedBytes, req.ContentLength, 0) + publishProgress(listener, event) + + return conn.handleResponse(resp, crc) +} + +func (conn Conn) signURL(method HTTPMethod, bucketName, objectName string, expiration int64, params map[string]interface{}, headers map[string]string) string { + akIf := conn.config.GetCredentials() + if akIf.GetSecurityToken() != "" { + params[HTTPParamSecurityToken] = akIf.GetSecurityToken() + } + subResource := conn.getSubResource(params) + canonicalizedResource := conn.url.getResource(bucketName, objectName, subResource) + + m := strings.ToUpper(string(method)) + req := &http.Request{ + Method: m, + Header: make(http.Header), + } + + if conn.config.IsAuthProxy { + auth := conn.config.ProxyUser + ":" + conn.config.ProxyPassword + basic := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) + req.Header.Set("Proxy-Authorization", basic) + } + + req.Header.Set(HTTPHeaderDate, strconv.FormatInt(expiration, 10)) + req.Header.Set(HTTPHeaderHost, conn.config.Endpoint) + req.Header.Set(HTTPHeaderUserAgent, conn.config.UserAgent) + + if headers != nil { + for k, v := range headers { + req.Header.Set(k, v) + } + } + + signedStr := conn.getSignedStr(req, canonicalizedResource, akIf.GetAccessKeySecret()) + + params[HTTPParamExpires] = strconv.FormatInt(expiration, 10) + params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() + params[HTTPParamSignature] = signedStr + + urlParams := conn.getURLParams(params) + return conn.url.getSignURL(bucketName, objectName, urlParams) +} + +func (conn Conn) signRtmpURL(bucketName, channelName, playlistName string, expiration int64) string { + params := map[string]interface{}{} + if playlistName != "" { + params[HTTPParamPlaylistName] = playlistName + } + expireStr := strconv.FormatInt(expiration, 10) + params[HTTPParamExpires] = expireStr + + akIf := conn.config.GetCredentials() + if akIf.GetAccessKeyID() != "" { + params[HTTPParamAccessKeyID] = akIf.GetAccessKeyID() + if akIf.GetSecurityToken() != "" { + params[HTTPParamSecurityToken] = akIf.GetSecurityToken() + } + signedStr := conn.getRtmpSignedStr(bucketName, channelName, playlistName, expiration, akIf.GetAccessKeySecret(), params) + params[HTTPParamSignature] = signedStr + } + + urlParams := conn.getURLParams(params) + return conn.url.getSignRtmpURL(bucketName, channelName, urlParams) +} + +// handleBody handles request body +func (conn Conn) handleBody(req *http.Request, body io.Reader, initCRC uint64, + listener ProgressListener, tracker *readerTracker) (*os.File, hash.Hash64) { + var file *os.File + var crc hash.Hash64 + reader := body + + // Length + switch v := body.(type) { + case *bytes.Buffer: + req.ContentLength = int64(v.Len()) + case *bytes.Reader: + req.ContentLength = int64(v.Len()) + case *strings.Reader: + req.ContentLength = int64(v.Len()) + case *os.File: + req.ContentLength = tryGetFileSize(v) + case *io.LimitedReader: + req.ContentLength = int64(v.N) + } + req.Header.Set(HTTPHeaderContentLength, strconv.FormatInt(req.ContentLength, 10)) + + // MD5 + if body != nil && conn.config.IsEnableMD5 && req.Header.Get(HTTPHeaderContentMD5) == "" { + md5 := "" + reader, md5, file, _ = calcMD5(body, req.ContentLength, conn.config.MD5Threshold) + req.Header.Set(HTTPHeaderContentMD5, md5) + } + + // CRC + if reader != nil && conn.config.IsEnableCRC { + crc = NewCRC(crcTable(), initCRC) + reader = TeeReader(reader, crc, req.ContentLength, listener, tracker) + } + + // HTTP body + rc, ok := reader.(io.ReadCloser) + if !ok && reader != nil { + rc = ioutil.NopCloser(reader) + } + + if conn.isUploadLimitReq(req) { + limitReader := &LimitSpeedReader{ + reader: rc, + ossLimiter: conn.config.UploadLimiter, + } + req.Body = limitReader + } else { + req.Body = rc + } + return file, crc +} + +// isUploadLimitReq: judge limit upload speed or not +func (conn Conn) isUploadLimitReq(req *http.Request) bool { + if conn.config.UploadLimitSpeed == 0 || conn.config.UploadLimiter == nil { + return false + } + + if req.Method != "GET" && req.Method != "DELETE" && req.Method != "HEAD" { + if req.ContentLength > 0 { + return true + } + } + return false +} + +func tryGetFileSize(f *os.File) int64 { + fInfo, _ := f.Stat() + return fInfo.Size() +} + +// handleResponse handles response +func (conn Conn) handleResponse(resp *http.Response, crc hash.Hash64) (*Response, error) { + var cliCRC uint64 + var srvCRC uint64 + + statusCode := resp.StatusCode + if statusCode >= 400 && statusCode <= 505 { + // 4xx and 5xx indicate that the operation has error occurred + var respBody []byte + respBody, err := readResponseBody(resp) + if err != nil { + return nil, err + } + + if len(respBody) == 0 { + err = ServiceError{ + StatusCode: statusCode, + RequestID: resp.Header.Get(HTTPHeaderOssRequestID), + } + } else { + // Response contains storage service error object, unmarshal + srvErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, + resp.Header.Get(HTTPHeaderOssRequestID)) + if errIn != nil { // error unmarshaling the error response + err = fmt.Errorf("oss: service returned invalid response body, status = %s, RequestId = %s", resp.Status, resp.Header.Get(HTTPHeaderOssRequestID)) + } else { + err = srvErr + } + } + + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: ioutil.NopCloser(bytes.NewReader(respBody)), // restore the body + }, err + } else if statusCode >= 300 && statusCode <= 307 { + // OSS use 3xx, but response has no body + err := fmt.Errorf("oss: service returned %d,%s", resp.StatusCode, resp.Status) + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + }, err + } + + if conn.config.IsEnableCRC && crc != nil { + cliCRC = crc.Sum64() + } + srvCRC, _ = strconv.ParseUint(resp.Header.Get(HTTPHeaderOssCRC64), 10, 64) + + // 2xx, successful + return &Response{ + StatusCode: resp.StatusCode, + Headers: resp.Header, + Body: resp.Body, + ClientCRC: cliCRC, + ServerCRC: srvCRC, + }, nil +} + +// LoggerHTTPReq Print the header information of the http request +func (conn Conn) LoggerHTTPReq(req *http.Request) { + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("[Req:%p]Method:%s\t", req, req.Method)) + logBuffer.WriteString(fmt.Sprintf("Host:%s\t", req.URL.Host)) + logBuffer.WriteString(fmt.Sprintf("Path:%s\t", req.URL.Path)) + logBuffer.WriteString(fmt.Sprintf("Query:%s\t", req.URL.RawQuery)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + + for k, v := range req.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) +} + +// LoggerHTTPResp Print Response to http request +func (conn Conn) LoggerHTTPResp(req *http.Request, resp *http.Response) { + var logBuffer bytes.Buffer + logBuffer.WriteString(fmt.Sprintf("[Resp:%p]StatusCode:%d\t", req, resp.StatusCode)) + logBuffer.WriteString(fmt.Sprintf("Header info:")) + for k, v := range resp.Header { + var valueBuffer bytes.Buffer + for j := 0; j < len(v); j++ { + if j > 0 { + valueBuffer.WriteString(" ") + } + valueBuffer.WriteString(v[j]) + } + logBuffer.WriteString(fmt.Sprintf("\t%s:%s", k, valueBuffer.String())) + } + conn.config.WriteLog(Debug, "%s\n", logBuffer.String()) +} + +func calcMD5(body io.Reader, contentLen, md5Threshold int64) (reader io.Reader, b64 string, tempFile *os.File, err error) { + if contentLen == 0 || contentLen > md5Threshold { + // Huge body, use temporary file + tempFile, err = ioutil.TempFile(os.TempDir(), TempFilePrefix) + if tempFile != nil { + io.Copy(tempFile, body) + tempFile.Seek(0, os.SEEK_SET) + md5 := md5.New() + io.Copy(md5, tempFile) + sum := md5.Sum(nil) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + tempFile.Seek(0, os.SEEK_SET) + reader = tempFile + } + } else { + // Small body, use memory + buf, _ := ioutil.ReadAll(body) + sum := md5.Sum(buf) + b64 = base64.StdEncoding.EncodeToString(sum[:]) + reader = bytes.NewReader(buf) + } + return +} + +func readResponseBody(resp *http.Response) ([]byte, error) { + defer resp.Body.Close() + out, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + return out, err +} + +func serviceErrFromXML(body []byte, statusCode int, requestID string) (ServiceError, error) { + var storageErr ServiceError + + if err := xml.Unmarshal(body, &storageErr); err != nil { + return storageErr, err + } + + storageErr.StatusCode = statusCode + storageErr.RequestID = requestID + storageErr.RawMessage = string(body) + return storageErr, nil +} + +func xmlUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return xml.Unmarshal(data, v) +} + +func jsonUnmarshal(body io.Reader, v interface{}) error { + data, err := ioutil.ReadAll(body) + if err != nil { + return err + } + return json.Unmarshal(data, v) +} + +// timeoutConn handles HTTP timeout +type timeoutConn struct { + conn net.Conn + timeout time.Duration + longTimeout time.Duration +} + +func newTimeoutConn(conn net.Conn, timeout time.Duration, longTimeout time.Duration) *timeoutConn { + conn.SetReadDeadline(time.Now().Add(longTimeout)) + return &timeoutConn{ + conn: conn, + timeout: timeout, + longTimeout: longTimeout, + } +} + +func (c *timeoutConn) Read(b []byte) (n int, err error) { + c.SetReadDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Read(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Write(b []byte) (n int, err error) { + c.SetWriteDeadline(time.Now().Add(c.timeout)) + n, err = c.conn.Write(b) + c.SetReadDeadline(time.Now().Add(c.longTimeout)) + return n, err +} + +func (c *timeoutConn) Close() error { + return c.conn.Close() +} + +func (c *timeoutConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *timeoutConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *timeoutConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *timeoutConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *timeoutConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +// UrlMaker builds URL and resource +const ( + urlTypeCname = 1 + urlTypeIP = 2 + urlTypeAliyun = 3 +) + +type urlMaker struct { + Scheme string // HTTP or HTTPS + NetLoc string // Host or IP + Type int // 1 CNAME, 2 IP, 3 ALIYUN + IsProxy bool // Proxy +} + +// Init parses endpoint +func (um *urlMaker) Init(endpoint string, isCname bool, isProxy bool) { + if strings.HasPrefix(endpoint, "http://") { + um.Scheme = "http" + um.NetLoc = endpoint[len("http://"):] + } else if strings.HasPrefix(endpoint, "https://") { + um.Scheme = "https" + um.NetLoc = endpoint[len("https://"):] + } else { + um.Scheme = "http" + um.NetLoc = endpoint + } + + host, _, err := net.SplitHostPort(um.NetLoc) + if err != nil { + host = um.NetLoc + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + } + + ip := net.ParseIP(host) + if ip != nil { + um.Type = urlTypeIP + } else if isCname { + um.Type = urlTypeCname + } else { + um.Type = urlTypeAliyun + } + um.IsProxy = isProxy +} + +// getURL gets URL +func (um urlMaker) getURL(bucket, object, params string) *url.URL { + host, path := um.buildURL(bucket, object) + addr := "" + if params == "" { + addr = fmt.Sprintf("%s://%s%s", um.Scheme, host, path) + } else { + addr = fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) + } + uri, _ := url.ParseRequestURI(addr) + return uri +} + +// getSignURL gets sign URL +func (um urlMaker) getSignURL(bucket, object, params string) string { + host, path := um.buildURL(bucket, object) + return fmt.Sprintf("%s://%s%s?%s", um.Scheme, host, path, params) +} + +// getSignRtmpURL Build Sign Rtmp URL +func (um urlMaker) getSignRtmpURL(bucket, channelName, params string) string { + host, path := um.buildURL(bucket, "live") + + channelName = url.QueryEscape(channelName) + channelName = strings.Replace(channelName, "+", "%20", -1) + + return fmt.Sprintf("rtmp://%s%s/%s?%s", host, path, channelName, params) +} + +// buildURL builds URL +func (um urlMaker) buildURL(bucket, object string) (string, string) { + var host = "" + var path = "" + + object = url.QueryEscape(object) + object = strings.Replace(object, "+", "%20", -1) + + if um.Type == urlTypeCname { + host = um.NetLoc + path = "/" + object + } else if um.Type == urlTypeIP { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = um.NetLoc + path = fmt.Sprintf("/%s/%s", bucket, object) + } + } else { + if bucket == "" { + host = um.NetLoc + path = "/" + } else { + host = bucket + "." + um.NetLoc + path = "/" + object + } + } + + return host, path +} + +// getResource gets canonicalized resource +func (um urlMaker) getResource(bucketName, objectName, subResource string) string { + if subResource != "" { + subResource = "?" + subResource + } + if bucketName == "" { + return fmt.Sprintf("/%s%s", bucketName, subResource) + } + return fmt.Sprintf("/%s/%s%s", bucketName, objectName, subResource) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go new file mode 100644 index 00000000..a10f3bde --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/const.go @@ -0,0 +1,186 @@ +package oss + +import "os" + +// ACLType bucket/object ACL +type ACLType string + +const ( + // ACLPrivate definition : private read and write + ACLPrivate ACLType = "private" + + // ACLPublicRead definition : public read and private write + ACLPublicRead ACLType = "public-read" + + // ACLPublicReadWrite definition : public read and public write + ACLPublicReadWrite ACLType = "public-read-write" + + // ACLDefault Object. It's only applicable for object. + ACLDefault ACLType = "default" +) + +// bucket versioning status +type VersioningStatus string + +const ( + // Versioning Status definition: Enabled + VersionEnabled VersioningStatus = "Enabled" + + // Versioning Status definition: Suspended + VersionSuspended VersioningStatus = "Suspended" +) + +// MetadataDirectiveType specifying whether use the metadata of source object when copying object. +type MetadataDirectiveType string + +const ( + // MetaCopy the target object's metadata is copied from the source one + MetaCopy MetadataDirectiveType = "COPY" + + // MetaReplace the target object's metadata is created as part of the copy request (not same as the source one) + MetaReplace MetadataDirectiveType = "REPLACE" +) + +// TaggingDirectiveType specifying whether use the tagging of source object when copying object. +type TaggingDirectiveType string + +const ( + // TaggingCopy the target object's tagging is copied from the source one + TaggingCopy TaggingDirectiveType = "COPY" + + // TaggingReplace the target object's tagging is created as part of the copy request (not same as the source one) + TaggingReplace TaggingDirectiveType = "REPLACE" +) + +// AlgorithmType specifying the server side encryption algorithm name +type AlgorithmType string + +const ( + KMSAlgorithm AlgorithmType = "KMS" + AESAlgorithm AlgorithmType = "AES256" +) + +// StorageClassType bucket storage type +type StorageClassType string + +const ( + // StorageStandard standard + StorageStandard StorageClassType = "Standard" + + // StorageIA infrequent access + StorageIA StorageClassType = "IA" + + // StorageArchive archive + StorageArchive StorageClassType = "Archive" +) + +// PayerType the type of request payer +type PayerType string + +const ( + // Requester the requester who send the request + Requester PayerType = "Requester" + + // BucketOwner the requester who send the request + BucketOwner PayerType = "BucketOwner" +) + +// HTTPMethod HTTP request method +type HTTPMethod string + +const ( + // HTTPGet HTTP GET + HTTPGet HTTPMethod = "GET" + + // HTTPPut HTTP PUT + HTTPPut HTTPMethod = "PUT" + + // HTTPHead HTTP HEAD + HTTPHead HTTPMethod = "HEAD" + + // HTTPPost HTTP POST + HTTPPost HTTPMethod = "POST" + + // HTTPDelete HTTP DELETE + HTTPDelete HTTPMethod = "DELETE" +) + +// HTTP headers +const ( + HTTPHeaderAcceptEncoding string = "Accept-Encoding" + HTTPHeaderAuthorization = "Authorization" + HTTPHeaderCacheControl = "Cache-Control" + HTTPHeaderContentDisposition = "Content-Disposition" + HTTPHeaderContentEncoding = "Content-Encoding" + HTTPHeaderContentLength = "Content-Length" + HTTPHeaderContentMD5 = "Content-MD5" + HTTPHeaderContentType = "Content-Type" + HTTPHeaderContentLanguage = "Content-Language" + HTTPHeaderDate = "Date" + HTTPHeaderEtag = "ETag" + HTTPHeaderExpires = "Expires" + HTTPHeaderHost = "Host" + HTTPHeaderLastModified = "Last-Modified" + HTTPHeaderRange = "Range" + HTTPHeaderLocation = "Location" + HTTPHeaderOrigin = "Origin" + HTTPHeaderServer = "Server" + HTTPHeaderUserAgent = "User-Agent" + HTTPHeaderIfModifiedSince = "If-Modified-Since" + HTTPHeaderIfUnmodifiedSince = "If-Unmodified-Since" + HTTPHeaderIfMatch = "If-Match" + HTTPHeaderIfNoneMatch = "If-None-Match" + HTTPHeaderACReqMethod = "Access-Control-Request-Method" + HTTPHeaderACReqHeaders = "Access-Control-Request-Headers" + + HTTPHeaderOssACL = "X-Oss-Acl" + HTTPHeaderOssMetaPrefix = "X-Oss-Meta-" + HTTPHeaderOssObjectACL = "X-Oss-Object-Acl" + HTTPHeaderOssSecurityToken = "X-Oss-Security-Token" + HTTPHeaderOssServerSideEncryption = "X-Oss-Server-Side-Encryption" + HTTPHeaderOssServerSideEncryptionKeyID = "X-Oss-Server-Side-Encryption-Key-Id" + HTTPHeaderOssCopySource = "X-Oss-Copy-Source" + HTTPHeaderOssCopySourceRange = "X-Oss-Copy-Source-Range" + HTTPHeaderOssCopySourceIfMatch = "X-Oss-Copy-Source-If-Match" + HTTPHeaderOssCopySourceIfNoneMatch = "X-Oss-Copy-Source-If-None-Match" + HTTPHeaderOssCopySourceIfModifiedSince = "X-Oss-Copy-Source-If-Modified-Since" + HTTPHeaderOssCopySourceIfUnmodifiedSince = "X-Oss-Copy-Source-If-Unmodified-Since" + HTTPHeaderOssMetadataDirective = "X-Oss-Metadata-Directive" + HTTPHeaderOssNextAppendPosition = "X-Oss-Next-Append-Position" + HTTPHeaderOssRequestID = "X-Oss-Request-Id" + HTTPHeaderOssCRC64 = "X-Oss-Hash-Crc64ecma" + HTTPHeaderOssSymlinkTarget = "X-Oss-Symlink-Target" + HTTPHeaderOssStorageClass = "X-Oss-Storage-Class" + HTTPHeaderOssCallback = "X-Oss-Callback" + HTTPHeaderOssCallbackVar = "X-Oss-Callback-Var" + HTTPHeaderOssRequester = "X-Oss-Request-Payer" + HTTPHeaderOssTagging = "X-Oss-Tagging" + HTTPHeaderOssTaggingDirective = "X-Oss-Tagging-Directive" + HTTPHeaderOssTrafficLimit = "X-Oss-Traffic-Limit" +) + +// HTTP Param +const ( + HTTPParamExpires = "Expires" + HTTPParamAccessKeyID = "OSSAccessKeyId" + HTTPParamSignature = "Signature" + HTTPParamSecurityToken = "security-token" + HTTPParamPlaylistName = "playlistName" +) + +// Other constants +const ( + MaxPartSize = 5 * 1024 * 1024 * 1024 // Max part size, 5GB + MinPartSize = 100 * 1024 // Min part size, 100KB + + FilePermMode = os.FileMode(0664) // Default file permission + + TempFilePrefix = "oss-go-temp-" // Temp file prefix + TempFileSuffix = ".temp" // Temp file suffix + + CheckpointFileSuffix = ".cp" // Checkpoint file suffix + + NullVersion = "null" + + Version = "v2.0.1" // Go SDK version +) diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go new file mode 100644 index 00000000..c96694f2 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/crc.go @@ -0,0 +1,123 @@ +package oss + +import ( + "hash" + "hash/crc64" +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + crc uint64 + tab *crc64.Table +} + +// NewCRC creates a new hash.Hash64 computing the CRC64 checksum +// using the polynomial represented by the Table. +func NewCRC(tab *crc64.Table, init uint64) hash.Hash64 { return &digest{init, tab} } + +// Size returns the number of bytes sum will return. +func (d *digest) Size() int { return crc64.Size } + +// BlockSize returns the hash's underlying block size. +// The Write method must be able to accept any amount +// of data, but it may operate more efficiently if all writes +// are a multiple of the block size. +func (d *digest) BlockSize() int { return 1 } + +// Reset resets the hash to its initial state. +func (d *digest) Reset() { d.crc = 0 } + +// Write (via the embedded io.Writer interface) adds more data to the running hash. +// It never returns an error. +func (d *digest) Write(p []byte) (n int, err error) { + d.crc = crc64.Update(d.crc, d.tab, p) + return len(p), nil +} + +// Sum64 returns CRC64 value. +func (d *digest) Sum64() uint64 { return d.crc } + +// Sum returns hash value. +func (d *digest) Sum(in []byte) []byte { + s := d.Sum64() + return append(in, byte(s>>56), byte(s>>48), byte(s>>40), byte(s>>32), byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) +} + +// gf2Dim dimension of GF(2) vectors (length of CRC) +const gf2Dim int = 64 + +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + for i := 0; vec != 0; i++ { + if vec&1 != 0 { + sum ^= mat[i] + } + + vec >>= 1 + } + return sum +} + +func gf2MatrixSquare(square []uint64, mat []uint64) { + for n := 0; n < gf2Dim; n++ { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// CRC64Combine combines CRC64 +func CRC64Combine(crc1 uint64, crc2 uint64, len2 uint64) uint64 { + var even [gf2Dim]uint64 // Even-power-of-two zeros operator + var odd [gf2Dim]uint64 // Odd-power-of-two zeros operator + + // Degenerate case + if len2 == 0 { + return crc1 + } + + // Put operator for one zero bit in odd + odd[0] = crc64.ECMA // CRC64 polynomial + var row uint64 = 1 + for n := 1; n < gf2Dim; n++ { + odd[n] = row + row <<= 1 + } + + // Put operator for two zero bits in even + gf2MatrixSquare(even[:], odd[:]) + + // Put operator for four zero bits in odd + gf2MatrixSquare(odd[:], even[:]) + + // Apply len2 zeros to crc1, first square will put the operator for one zero byte, eight zero bits, in even + for { + // Apply zeros operator for this bit of len2 + gf2MatrixSquare(even[:], odd[:]) + + if len2&1 != 0 { + crc1 = gf2MatrixTimes(even[:], crc1) + } + + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + + // Another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd[:], even[:]) + if len2&1 != 0 { + crc1 = gf2MatrixTimes(odd[:], crc1) + } + len2 >>= 1 + + // If no more bits set, then done + if len2 == 0 { + break + } + } + + // Return combined CRC + crc1 ^= crc2 + return crc1 +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go new file mode 100644 index 00000000..13ea0173 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/download.go @@ -0,0 +1,563 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "hash/crc64" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strconv" +) + +// DownloadFile downloads files with multipart download. +// +// objectKey the object key. +// filePath the local file to download from objectKey in OSS. +// partSize the part size in bytes. +// options object's constraints, check out GetObject for the reference. +// +// error it's nil when the call succeeds, otherwise it's an error object. +// +func (bucket Bucket) DownloadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < 1 { + return errors.New("oss: part size smaller than 1") + } + + uRange, err := getRangeConfig(options) + if err != nil { + return err + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getDownloadCpFilePath(cpConf, bucket.BucketName, objectKey, filePath) + if cpFilePath != "" { + return bucket.downloadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines, uRange) + } + } + + return bucket.downloadFile(objectKey, filePath, partSize, options, routines, uRange) +} + +func getDownloadCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destFile string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + absPath, _ := filepath.Abs(destFile) + cpFileName := getCpFileName(src, absPath) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// getRangeConfig gets the download range from the options. +func getRangeConfig(options []Option) (*unpackedRange, error) { + rangeOpt, err := findOption(options, HTTPHeaderRange, nil) + if err != nil || rangeOpt == nil { + return nil, err + } + return parseRange(rangeOpt.(string)) +} + +// ----- concurrent download without checkpoint ----- + +// downloadWorkerArg is download worker's parameters +type downloadWorkerArg struct { + bucket *Bucket + key string + filePath string + options []Option + hook downloadPartHook + enableCRC bool +} + +// downloadPartHook is hook for test +type downloadPartHook func(part downloadPart) error + +var downloadPartHooker downloadPartHook = defaultDownloadPartHook + +func defaultDownloadPartHook(part downloadPart) error { + return nil +} + +// defaultDownloadProgressListener defines default ProgressListener, shields the ProgressListener in options of GetObject. +type defaultDownloadProgressListener struct { +} + +// ProgressChanged no-ops +func (listener *defaultDownloadProgressListener) ProgressChanged(event *ProgressEvent) { +} + +// downloadWorker +func downloadWorker(id int, arg downloadWorkerArg, jobs <-chan downloadPart, results chan<- downloadPart, failed chan<- error, die <-chan bool) { + for part := range jobs { + if err := arg.hook(part); err != nil { + failed <- err + break + } + + // Resolve options + r := Range(part.Start, part.End) + p := Progress(&defaultDownloadProgressListener{}) + opts := make([]Option, len(arg.options)+2) + // Append orderly, can not be reversed! + opts = append(opts, arg.options...) + opts = append(opts, r, p) + + rd, err := arg.bucket.GetObject(arg.key, opts...) + if err != nil { + failed <- err + break + } + defer rd.Close() + + var crcCalc hash.Hash64 + if arg.enableCRC { + crcCalc = crc64.New(crcTable()) + contentLen := part.End - part.Start + 1 + rd = ioutil.NopCloser(TeeReader(rd, crcCalc, contentLen, nil, nil)) + } + defer rd.Close() + + select { + case <-die: + return + default: + } + + fd, err := os.OpenFile(arg.filePath, os.O_WRONLY, FilePermMode) + if err != nil { + failed <- err + break + } + + _, err = fd.Seek(part.Start-part.Offset, os.SEEK_SET) + if err != nil { + fd.Close() + failed <- err + break + } + + _, err = io.Copy(fd, rd) + if err != nil { + fd.Close() + failed <- err + break + } + + if arg.enableCRC { + part.CRC64 = crcCalc.Sum64() + } + + fd.Close() + results <- part + } +} + +// downloadScheduler +func downloadScheduler(jobs chan downloadPart, parts []downloadPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// downloadPart defines download part +type downloadPart struct { + Index int // Part number, starting from 0 + Start int64 // Start index + End int64 // End index + Offset int64 // Offset + CRC64 uint64 // CRC check value of part +} + +// getDownloadParts gets download parts +func getDownloadParts(objectSize, partSize int64, uRange *unpackedRange) []downloadPart { + parts := []downloadPart{} + part := downloadPart{} + i := 0 + start, end := adjustRange(uRange, objectSize) + for offset := start; offset < end; offset += partSize { + part.Index = i + part.Start = offset + part.End = GetPartEnd(offset, end, partSize) + part.Offset = start + part.CRC64 = 0 + parts = append(parts, part) + i++ + } + return parts +} + +// getObjectBytes gets object bytes length +func getObjectBytes(parts []downloadPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// combineCRCInParts caculates the total CRC of continuous parts +func combineCRCInParts(dps []downloadPart) uint64 { + if dps == nil || len(dps) == 0 { + return 0 + } + + crc := dps[0].CRC64 + for i := 1; i < len(dps); i++ { + crc = CRC64Combine(crc, dps[i].CRC64, (uint64)(dps[i].End-dps[i].Start+1)) + } + + return crc +} + +// downloadFile downloads file concurrently without checkpoint. +func (bucket Bucket) downloadFile(objectKey, filePath string, partSize int64, options []Option, routines int, uRange *unpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := getProgressListener(options) + + // If the file does not exist, create one. If exists, the download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Get the object detailed meta for object whole size + // must delete header:range to get whole object size + skipOptions := deleteOption(options, HTTPHeaderRange) + meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + enableCRC := false + expectedCRC := (uint64)(0) + if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) { + enableCRC = true + expectedCRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0) + } + } + + // Get the parts of the file + parts := getDownloadParts(objectSize, partSize, uRange) + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start the download workers + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Download parts concurrently + go downloadScheduler(jobs, parts) + + // Waiting for parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + downBytes := (part.End - part.Start + 1) + completedBytes += downBytes + parts[part.Index].CRC64 = part.CRC64 + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, downBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + if enableCRC { + actualCRC := combineCRCInParts(parts) + err = checkDownloadCRC(actualCRC, expectedCRC) + if err != nil { + return err + } + } + + return os.Rename(tempFilePath, filePath) +} + +// ----- Concurrent download with chcekpoint ----- + +const downloadCpMagic = "92611BED-89E2-46B6-89E5-72F273D4B0A3" + +type downloadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint content MD5 + FilePath string // Local file + Object string // Key + ObjStat objectStat // Object status + Parts []downloadPart // All download parts + PartStat []bool // Parts' download status + Start int64 // Start point of the file + End int64 // End point of the file + enableCRC bool // Whether has CRC check + CRC uint64 // CRC check value +} + +type objectStat struct { + Size int64 // Object size + LastModified string // Last modified time + Etag string // Etag +} + +// isValid flags of checkpoint data is valid. It returns true when the data is valid and the checkpoint is valid and the object is not updated. +func (cp downloadCheckpoint) isValid(meta http.Header, uRange *unpackedRange) (bool, error) { + // Compare the CP's Magic and the MD5 + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return false, err + } + + // Compare the object size, last modified time and etag + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + // Check the download range + if uRange != nil { + start, end := adjustRange(uRange, objectSize) + if start != cp.Start || end != cp.End { + return false, nil + } + } + + return true, nil +} + +// load checkpoint from local file +func (cp *downloadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump funciton dumps to file +func (cp *downloadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialize + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts gets unfinished parts +func (cp downloadCheckpoint) todoParts() []downloadPart { + dps := []downloadPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes gets completed size +func (cp downloadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initiates download tasks +func (cp *downloadCheckpoint) prepare(meta http.Header, bucket *Bucket, objectKey, filePath string, partSize int64, uRange *unpackedRange) error { + // CP + cp.Magic = downloadCpMagic + cp.FilePath = filePath + cp.Object = objectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + if bucket.getConfig().IsEnableCRC && meta.Get(HTTPHeaderOssCRC64) != "" { + if uRange == nil || (!uRange.hasStart && !uRange.hasEnd) { + cp.enableCRC = true + cp.CRC, _ = strconv.ParseUint(meta.Get(HTTPHeaderOssCRC64), 10, 0) + } + } + + // Parts + cp.Parts = getDownloadParts(objectSize, partSize, uRange) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + + return nil +} + +func (cp *downloadCheckpoint) complete(cpFilePath, downFilepath string) error { + os.Remove(cpFilePath) + return os.Rename(downFilepath, cp.FilePath) +} + +// downloadFileWithCp downloads files with checkpoint. +func (bucket Bucket) downloadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int, uRange *unpackedRange) error { + tempFilePath := filePath + TempFileSuffix + listener := getProgressListener(options) + + // Load checkpoint data. + dcp := downloadCheckpoint{} + err := dcp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Get the object detailed meta for object whole size + // must delete header:range to get whole object size + skipOptions := deleteOption(options, HTTPHeaderRange) + meta, err := bucket.GetObjectDetailedMeta(objectKey, skipOptions...) + if err != nil { + return err + } + + // Load error or data invalid. Re-initialize the download. + valid, err := dcp.isValid(meta, uRange) + if err != nil || !valid { + if err = dcp.prepare(meta, &bucket, objectKey, filePath, partSize, uRange); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Create the file if not exists. Otherwise the parts download will overwrite it. + fd, err := os.OpenFile(tempFilePath, os.O_WRONLY|os.O_CREATE, FilePermMode) + if err != nil { + return err + } + fd.Close() + + // Unfinished parts + parts := dcp.todoParts() + jobs := make(chan downloadPart, len(parts)) + results := make(chan downloadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := dcp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + + // Start the download workers routine + arg := downloadWorkerArg{&bucket, objectKey, tempFilePath, options, downloadPartHooker, dcp.enableCRC} + for w := 1; w <= routines; w++ { + go downloadWorker(w, arg, jobs, results, failed, die) + } + + // Concurrently downloads parts + go downloadScheduler(jobs, parts) + + // Wait for the parts download finished + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + dcp.PartStat[part.Index] = true + dcp.Parts[part.Index].CRC64 = part.CRC64 + dcp.dump(cpFilePath) + downBytes := (part.End - part.Start + 1) + completedBytes += downBytes + event = newProgressEvent(TransferDataEvent, completedBytes, dcp.ObjStat.Size, downBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, dcp.ObjStat.Size, 0) + publishProgress(listener, event) + + if dcp.enableCRC { + actualCRC := combineCRCInParts(dcp.Parts) + err = checkDownloadCRC(actualCRC, dcp.CRC) + if err != nil { + return err + } + } + + return dcp.complete(cpFilePath, tempFilePath) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go new file mode 100644 index 00000000..6d7b4e0f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/error.go @@ -0,0 +1,94 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/http" + "strings" +) + +// ServiceError contains fields of the error response from Oss Service REST API. +type ServiceError struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` // The error code returned from OSS to the caller + Message string `xml:"Message"` // The detail error message from OSS + RequestID string `xml:"RequestId"` // The UUID used to uniquely identify the request + HostID string `xml:"HostId"` // The OSS server cluster's Id + Endpoint string `xml:"Endpoint"` + RawMessage string // The raw messages from OSS + StatusCode int // HTTP status code +} + +// Error implements interface error +func (e ServiceError) Error() string { + if e.Endpoint == "" { + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s", + e.StatusCode, e.Code, e.Message, e.RequestID) + } + return fmt.Sprintf("oss: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=\"%s\", RequestId=%s, Endpoint=%s", + e.StatusCode, e.Code, e.Message, e.RequestID, e.Endpoint) +} + +// UnexpectedStatusCodeError is returned when a storage service responds with neither an error +// nor with an HTTP status code indicating success. +type UnexpectedStatusCodeError struct { + allowed []int // The expected HTTP stats code returned from OSS + got int // The actual HTTP status code from OSS +} + +// Error implements interface error +func (e UnexpectedStatusCodeError) Error() string { + s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) } + + got := s(e.got) + expected := []string{} + for _, v := range e.allowed { + expected = append(expected, s(v)) + } + return fmt.Sprintf("oss: status code from service response is %s; was expecting %s", + got, strings.Join(expected, " or ")) +} + +// Got is the actual status code returned by oss. +func (e UnexpectedStatusCodeError) Got() int { + return e.got +} + +// checkRespCode returns UnexpectedStatusError if the given response code is not +// one of the allowed status codes; otherwise nil. +func checkRespCode(respCode int, allowed []int) error { + for _, v := range allowed { + if respCode == v { + return nil + } + } + return UnexpectedStatusCodeError{allowed, respCode} +} + +// CRCCheckError is returned when crc check is inconsistent between client and server +type CRCCheckError struct { + clientCRC uint64 // Calculated CRC64 in client + serverCRC uint64 // Calculated CRC64 in server + operation string // Upload operations such as PutObject/AppendObject/UploadPart, etc + requestID string // The request id of this operation +} + +// Error implements interface error +func (e CRCCheckError) Error() string { + return fmt.Sprintf("oss: the crc of %s is inconsistent, client %d but server %d; request id is %s", + e.operation, e.clientCRC, e.serverCRC, e.requestID) +} + +func checkDownloadCRC(clientCRC, serverCRC uint64) error { + if clientCRC == serverCRC { + return nil + } + return CRCCheckError{clientCRC, serverCRC, "DownloadFile", ""} +} + +func checkCRC(resp *Response, operation string) error { + if resp.Headers.Get(HTTPHeaderOssCRC64) == "" || resp.ClientCRC == resp.ServerCRC { + return nil + } + return CRCCheckError{resp.ClientCRC, resp.ServerCRC, operation, resp.Headers.Get(HTTPHeaderOssRequestID)} +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go new file mode 100644 index 00000000..943dc8fd --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_6.go @@ -0,0 +1,28 @@ +// +build !go1.7 + +// "golang.org/x/time/rate" is depended on golang context package go1.7 onward +// this file is only for build,not supports limit upload speed +package oss + +import ( + "fmt" + "io" +) + +const ( + perTokenBandwidthSize int = 1024 +) + +type OssLimiter struct { +} + +type LimitSpeedReader struct { + io.ReadCloser + reader io.Reader + ossLimiter *OssLimiter +} + +func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { + err = fmt.Errorf("rate.Limiter is not supported below version go1.7") + return nil, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go new file mode 100644 index 00000000..f6baf298 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/limit_reader_1_7.go @@ -0,0 +1,90 @@ +// +build go1.7 + +package oss + +import ( + "fmt" + "io" + "math" + "time" + + "golang.org/x/time/rate" +) + +const ( + perTokenBandwidthSize int = 1024 +) + +// OssLimiter wrapper rate.Limiter +type OssLimiter struct { + limiter *rate.Limiter +} + +// GetOssLimiter create OssLimiter +// uploadSpeed KB/s +func GetOssLimiter(uploadSpeed int) (ossLimiter *OssLimiter, err error) { + limiter := rate.NewLimiter(rate.Limit(uploadSpeed), uploadSpeed) + + // first consume the initial full token,the limiter will behave more accurately + limiter.AllowN(time.Now(), uploadSpeed) + + return &OssLimiter{ + limiter: limiter, + }, nil +} + +// LimitSpeedReader for limit bandwidth upload +type LimitSpeedReader struct { + io.ReadCloser + reader io.Reader + ossLimiter *OssLimiter +} + +// Read +func (r *LimitSpeedReader) Read(p []byte) (n int, err error) { + n = 0 + err = nil + start := 0 + burst := r.ossLimiter.limiter.Burst() + var end int + var tmpN int + var tc int + for start < len(p) { + if start+burst*perTokenBandwidthSize < len(p) { + end = start + burst*perTokenBandwidthSize + } else { + end = len(p) + } + + tmpN, err = r.reader.Read(p[start:end]) + if tmpN > 0 { + n += tmpN + start = n + } + + if err != nil { + return + } + + tc = int(math.Ceil(float64(tmpN) / float64(perTokenBandwidthSize))) + now := time.Now() + re := r.ossLimiter.limiter.ReserveN(now, tc) + if !re.OK() { + err = fmt.Errorf("LimitSpeedReader.Read() failure,ReserveN error,start:%d,end:%d,burst:%d,perTokenBandwidthSize:%d", + start, end, burst, perTokenBandwidthSize) + return + } + timeDelay := re.Delay() + time.Sleep(timeDelay) + } + return +} + +// Close ... +func (r *LimitSpeedReader) Close() error { + rc, ok := r.reader.(io.ReadCloser) + if ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go new file mode 100644 index 00000000..00393061 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/livechannel.go @@ -0,0 +1,257 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "strconv" + "time" +) + +// +// CreateLiveChannel create a live-channel +// +// channelName the name of the channel +// config configuration of the channel +// +// CreateLiveChannelResult the result of create live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) CreateLiveChannel(channelName string, config LiveChannelConfiguration) (CreateLiveChannelResult, error) { + var out CreateLiveChannelResult + + bs, err := xml.Marshal(config) + if err != nil { + return out, err + } + + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["live"] = nil + resp, err := bucket.do("PUT", channelName, params, nil, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// PutLiveChannelStatus Set the status of the live-channel: enabled/disabled +// +// channelName the name of the channel +// status enabled/disabled +// +// error nil if success, otherwise error +// +func (bucket Bucket) PutLiveChannelStatus(channelName, status string) error { + params := map[string]interface{}{} + params["live"] = nil + params["status"] = status + + resp, err := bucket.do("PUT", channelName, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// PostVodPlaylist create an playlist based on the specified playlist name, startTime and endTime +// +// channelName the name of the channel +// playlistName the name of the playlist, must end with ".m3u8" +// startTime the start time of the playlist +// endTime the endtime of the playlist +// +// error nil if success, otherwise error +// +func (bucket Bucket) PostVodPlaylist(channelName, playlistName string, startTime, endTime time.Time) error { + params := map[string]interface{}{} + params["vod"] = nil + params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) + params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) + + key := fmt.Sprintf("%s/%s", channelName, playlistName) + resp, err := bucket.do("POST", key, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return checkRespCode(resp.StatusCode, []int{http.StatusOK}) +} + +// GetVodPlaylist get the playlist based on the specified channelName, startTime and endTime +// +// channelName the name of the channel +// startTime the start time of the playlist +// endTime the endtime of the playlist +// +// io.ReadCloser reader instance for reading data from response. It must be called close() after the usage and only valid when error is nil. +// error nil if success, otherwise error +// +func (bucket Bucket) GetVodPlaylist(channelName string, startTime, endTime time.Time) (io.ReadCloser, error) { + params := map[string]interface{}{} + params["vod"] = nil + params["startTime"] = strconv.FormatInt(startTime.Unix(), 10) + params["endTime"] = strconv.FormatInt(endTime.Unix(), 10) + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +// +// GetLiveChannelStat Get the state of the live-channel +// +// channelName the name of the channel +// +// LiveChannelStat the state of the live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelStat(channelName string) (LiveChannelStat, error) { + var out LiveChannelStat + params := map[string]interface{}{} + params["live"] = nil + params["comp"] = "stat" + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetLiveChannelInfo Get the configuration info of the live-channel +// +// channelName the name of the channel +// +// LiveChannelConfiguration the configuration info of the live-channel +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelInfo(channelName string) (LiveChannelConfiguration, error) { + var out LiveChannelConfiguration + params := map[string]interface{}{} + params["live"] = nil + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// GetLiveChannelHistory Get push records of live-channel +// +// channelName the name of the channel +// +// LiveChannelHistory push records +// error nil if success, otherwise error +// +func (bucket Bucket) GetLiveChannelHistory(channelName string) (LiveChannelHistory, error) { + var out LiveChannelHistory + params := map[string]interface{}{} + params["live"] = nil + params["comp"] = "history" + + resp, err := bucket.do("GET", channelName, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// ListLiveChannel list the live-channels +// +// options Prefix: filter by the name start with the value of "Prefix" +// MaxKeys: the maximum count returned +// Marker: cursor from which starting list +// +// ListLiveChannelResult live-channel list +// error nil if success, otherwise error +// +func (bucket Bucket) ListLiveChannel(options ...Option) (ListLiveChannelResult, error) { + var out ListLiveChannelResult + + params, err := getRawParams(options) + if err != nil { + return out, err + } + + params["live"] = nil + + resp, err := bucket.do("GET", "", params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// +// DeleteLiveChannel Delete the live-channel. When a client trying to stream the live-channel, the operation will fail. it will only delete the live-channel itself and the object generated by the live-channel will not be deleted. +// +// channelName the name of the channel +// +// error nil if success, otherwise error +// +func (bucket Bucket) DeleteLiveChannel(channelName string) error { + params := map[string]interface{}{} + params["live"] = nil + + if channelName == "" { + return fmt.Errorf("invalid argument: channel name is empty") + } + + resp, err := bucket.do("DELETE", channelName, params, nil, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// +// SignRtmpURL Generate a RTMP push-stream signature URL for the trusted user to push the RTMP stream to the live-channel. +// +// channelName the name of the channel +// playlistName the name of the playlist, must end with ".m3u8" +// expires expiration (in seconds) +// +// string singed rtmp push stream url +// error nil if success, otherwise error +// +func (bucket Bucket) SignRtmpURL(channelName, playlistName string, expires int64) (string, error) { + if expires <= 0 { + return "", fmt.Errorf("invalid argument: %d, expires must greater than 0", expires) + } + expiration := time.Now().Unix() + expires + + return bucket.Client.Conn.signRtmpURL(bucket.BucketName, channelName, playlistName, expiration), nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go new file mode 100644 index 00000000..42368458 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/mime.go @@ -0,0 +1,246 @@ +package oss + +import ( + "mime" + "path" + "strings" +) + +var extToMimeType = map[string]string{ + ".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + ".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + ".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + ".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + ".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + ".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + ".xlam": "application/vnd.ms-excel.addin.macroEnabled.12", + ".xlsb": "application/vnd.ms-excel.sheet.binary.macroEnabled.12", + ".apk": "application/vnd.android.package-archive", + ".hqx": "application/mac-binhex40", + ".cpt": "application/mac-compactpro", + ".doc": "application/msword", + ".ogg": "application/ogg", + ".pdf": "application/pdf", + ".rtf": "text/rtf", + ".mif": "application/vnd.mif", + ".xls": "application/vnd.ms-excel", + ".ppt": "application/vnd.ms-powerpoint", + ".odc": "application/vnd.oasis.opendocument.chart", + ".odb": "application/vnd.oasis.opendocument.database", + ".odf": "application/vnd.oasis.opendocument.formula", + ".odg": "application/vnd.oasis.opendocument.graphics", + ".otg": "application/vnd.oasis.opendocument.graphics-template", + ".odi": "application/vnd.oasis.opendocument.image", + ".odp": "application/vnd.oasis.opendocument.presentation", + ".otp": "application/vnd.oasis.opendocument.presentation-template", + ".ods": "application/vnd.oasis.opendocument.spreadsheet", + ".ots": "application/vnd.oasis.opendocument.spreadsheet-template", + ".odt": "application/vnd.oasis.opendocument.text", + ".odm": "application/vnd.oasis.opendocument.text-master", + ".ott": "application/vnd.oasis.opendocument.text-template", + ".oth": "application/vnd.oasis.opendocument.text-web", + ".sxw": "application/vnd.sun.xml.writer", + ".stw": "application/vnd.sun.xml.writer.template", + ".sxc": "application/vnd.sun.xml.calc", + ".stc": "application/vnd.sun.xml.calc.template", + ".sxd": "application/vnd.sun.xml.draw", + ".std": "application/vnd.sun.xml.draw.template", + ".sxi": "application/vnd.sun.xml.impress", + ".sti": "application/vnd.sun.xml.impress.template", + ".sxg": "application/vnd.sun.xml.writer.global", + ".sxm": "application/vnd.sun.xml.math", + ".sis": "application/vnd.symbian.install", + ".wbxml": "application/vnd.wap.wbxml", + ".wmlc": "application/vnd.wap.wmlc", + ".wmlsc": "application/vnd.wap.wmlscriptc", + ".bcpio": "application/x-bcpio", + ".torrent": "application/x-bittorrent", + ".bz2": "application/x-bzip2", + ".vcd": "application/x-cdlink", + ".pgn": "application/x-chess-pgn", + ".cpio": "application/x-cpio", + ".csh": "application/x-csh", + ".dvi": "application/x-dvi", + ".spl": "application/x-futuresplash", + ".gtar": "application/x-gtar", + ".hdf": "application/x-hdf", + ".jar": "application/x-java-archive", + ".jnlp": "application/x-java-jnlp-file", + ".js": "application/x-javascript", + ".ksp": "application/x-kspread", + ".chrt": "application/x-kchart", + ".kil": "application/x-killustrator", + ".latex": "application/x-latex", + ".rpm": "application/x-rpm", + ".sh": "application/x-sh", + ".shar": "application/x-shar", + ".swf": "application/x-shockwave-flash", + ".sit": "application/x-stuffit", + ".sv4cpio": "application/x-sv4cpio", + ".sv4crc": "application/x-sv4crc", + ".tar": "application/x-tar", + ".tcl": "application/x-tcl", + ".tex": "application/x-tex", + ".man": "application/x-troff-man", + ".me": "application/x-troff-me", + ".ms": "application/x-troff-ms", + ".ustar": "application/x-ustar", + ".src": "application/x-wais-source", + ".zip": "application/zip", + ".m3u": "audio/x-mpegurl", + ".ra": "audio/x-pn-realaudio", + ".wav": "audio/x-wav", + ".wma": "audio/x-ms-wma", + ".wax": "audio/x-ms-wax", + ".pdb": "chemical/x-pdb", + ".xyz": "chemical/x-xyz", + ".bmp": "image/bmp", + ".gif": "image/gif", + ".ief": "image/ief", + ".png": "image/png", + ".wbmp": "image/vnd.wap.wbmp", + ".ras": "image/x-cmu-raster", + ".pnm": "image/x-portable-anymap", + ".pbm": "image/x-portable-bitmap", + ".pgm": "image/x-portable-graymap", + ".ppm": "image/x-portable-pixmap", + ".rgb": "image/x-rgb", + ".xbm": "image/x-xbitmap", + ".xpm": "image/x-xpixmap", + ".xwd": "image/x-xwindowdump", + ".css": "text/css", + ".rtx": "text/richtext", + ".tsv": "text/tab-separated-values", + ".jad": "text/vnd.sun.j2me.app-descriptor", + ".wml": "text/vnd.wap.wml", + ".wmls": "text/vnd.wap.wmlscript", + ".etx": "text/x-setext", + ".mxu": "video/vnd.mpegurl", + ".flv": "video/x-flv", + ".wm": "video/x-ms-wm", + ".wmv": "video/x-ms-wmv", + ".wmx": "video/x-ms-wmx", + ".wvx": "video/x-ms-wvx", + ".avi": "video/x-msvideo", + ".movie": "video/x-sgi-movie", + ".ice": "x-conference/x-cooltalk", + ".3gp": "video/3gpp", + ".ai": "application/postscript", + ".aif": "audio/x-aiff", + ".aifc": "audio/x-aiff", + ".aiff": "audio/x-aiff", + ".asc": "text/plain", + ".atom": "application/atom+xml", + ".au": "audio/basic", + ".bin": "application/octet-stream", + ".cdf": "application/x-netcdf", + ".cgm": "image/cgm", + ".class": "application/octet-stream", + ".dcr": "application/x-director", + ".dif": "video/x-dv", + ".dir": "application/x-director", + ".djv": "image/vnd.djvu", + ".djvu": "image/vnd.djvu", + ".dll": "application/octet-stream", + ".dmg": "application/octet-stream", + ".dms": "application/octet-stream", + ".dtd": "application/xml-dtd", + ".dv": "video/x-dv", + ".dxr": "application/x-director", + ".eps": "application/postscript", + ".exe": "application/octet-stream", + ".ez": "application/andrew-inset", + ".gram": "application/srgs", + ".grxml": "application/srgs+xml", + ".gz": "application/x-gzip", + ".htm": "text/html", + ".html": "text/html", + ".ico": "image/x-icon", + ".ics": "text/calendar", + ".ifb": "text/calendar", + ".iges": "model/iges", + ".igs": "model/iges", + ".jp2": "image/jp2", + ".jpe": "image/jpeg", + ".jpeg": "image/jpeg", + ".jpg": "image/jpeg", + ".kar": "audio/midi", + ".lha": "application/octet-stream", + ".lzh": "application/octet-stream", + ".m4a": "audio/mp4a-latm", + ".m4p": "audio/mp4a-latm", + ".m4u": "video/vnd.mpegurl", + ".m4v": "video/x-m4v", + ".mac": "image/x-macpaint", + ".mathml": "application/mathml+xml", + ".mesh": "model/mesh", + ".mid": "audio/midi", + ".midi": "audio/midi", + ".mov": "video/quicktime", + ".mp2": "audio/mpeg", + ".mp3": "audio/mpeg", + ".mp4": "video/mp4", + ".mpe": "video/mpeg", + ".mpeg": "video/mpeg", + ".mpg": "video/mpeg", + ".mpga": "audio/mpeg", + ".msh": "model/mesh", + ".nc": "application/x-netcdf", + ".oda": "application/oda", + ".ogv": "video/ogv", + ".pct": "image/pict", + ".pic": "image/pict", + ".pict": "image/pict", + ".pnt": "image/x-macpaint", + ".pntg": "image/x-macpaint", + ".ps": "application/postscript", + ".qt": "video/quicktime", + ".qti": "image/x-quicktime", + ".qtif": "image/x-quicktime", + ".ram": "audio/x-pn-realaudio", + ".rdf": "application/rdf+xml", + ".rm": "application/vnd.rn-realmedia", + ".roff": "application/x-troff", + ".sgm": "text/sgml", + ".sgml": "text/sgml", + ".silo": "model/mesh", + ".skd": "application/x-koan", + ".skm": "application/x-koan", + ".skp": "application/x-koan", + ".skt": "application/x-koan", + ".smi": "application/smil", + ".smil": "application/smil", + ".snd": "audio/basic", + ".so": "application/octet-stream", + ".svg": "image/svg+xml", + ".t": "application/x-troff", + ".texi": "application/x-texinfo", + ".texinfo": "application/x-texinfo", + ".tif": "image/tiff", + ".tiff": "image/tiff", + ".tr": "application/x-troff", + ".txt": "text/plain", + ".vrml": "model/vrml", + ".vxml": "application/voicexml+xml", + ".webm": "video/webm", + ".wrl": "model/vrml", + ".xht": "application/xhtml+xml", + ".xhtml": "application/xhtml+xml", + ".xml": "application/xml", + ".xsl": "application/xml", + ".xslt": "application/xslt+xml", + ".xul": "application/vnd.mozilla.xul+xml", + ".webp": "image/webp", +} + +// TypeByExtension returns the MIME type associated with the file extension ext. +// gets the file's MIME type for HTTP header Content-Type +func TypeByExtension(filePath string) string { + typ := mime.TypeByExtension(path.Ext(filePath)) + if typ == "" { + typ = extToMimeType[strings.ToLower(path.Ext(filePath))] + } + return typ +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go new file mode 100644 index 00000000..b0b4a502 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/model.go @@ -0,0 +1,69 @@ +package oss + +import ( + "hash" + "io" + "net/http" +) + +// Response defines HTTP response from OSS +type Response struct { + StatusCode int + Headers http.Header + Body io.ReadCloser + ClientCRC uint64 + ServerCRC uint64 +} + +func (r *Response) Read(p []byte) (n int, err error) { + return r.Body.Read(p) +} + +// Close close http reponse body +func (r *Response) Close() error { + return r.Body.Close() +} + +// PutObjectRequest is the request of DoPutObject +type PutObjectRequest struct { + ObjectKey string + Reader io.Reader +} + +// GetObjectRequest is the request of DoGetObject +type GetObjectRequest struct { + ObjectKey string +} + +// GetObjectResult is the result of DoGetObject +type GetObjectResult struct { + Response *Response + ClientCRC hash.Hash64 + ServerCRC uint64 +} + +// AppendObjectRequest is the requtest of DoAppendObject +type AppendObjectRequest struct { + ObjectKey string + Reader io.Reader + Position int64 +} + +// AppendObjectResult is the result of DoAppendObject +type AppendObjectResult struct { + NextPosition int64 + CRC uint64 +} + +// UploadPartRequest is the request of DoUploadPart +type UploadPartRequest struct { + InitResult *InitiateMultipartUploadResult + Reader io.Reader + PartSize int64 + PartNumber int +} + +// UploadPartResult is the result of DoUploadPart +type UploadPartResult struct { + Part UploadPart +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go new file mode 100644 index 00000000..acca2b43 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multicopy.go @@ -0,0 +1,462 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "os" + "strconv" +) + +// CopyFile is multipart copy object +// +// srcBucketName source bucket name +// srcObjectKey source object name +// destObjectKey target object name in the form of bucketname.objectkey +// partSize the part size in byte. +// options object's contraints. Check out function InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CopyFile(srcBucketName, srcObjectKey, destObjectKey string, partSize int64, options ...Option) error { + destBucketName := bucket.BucketName + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (1024KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getCopyCpFilePath(cpConf, srcBucketName, srcObjectKey, destBucketName, destObjectKey) + if cpFilePath != "" { + return bucket.copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey, partSize, options, cpFilePath, routines) + } + } + + return bucket.copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey, + partSize, options, routines) +} + +func getCopyCpFilePath(cpConf *cpConfig, srcBucket, srcObject, destBucket, destObject string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + src := fmt.Sprintf("oss://%v/%v", srcBucket, srcObject) + cpFileName := getCpFileName(src, dest) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- Concurrently copy without checkpoint --------- + +// copyWorkerArg defines the copy worker arguments +type copyWorkerArg struct { + bucket *Bucket + imur InitiateMultipartUploadResult + srcBucketName string + srcObjectKey string + options []Option + hook copyPartHook +} + +// copyPartHook is the hook for testing purpose +type copyPartHook func(part copyPart) error + +var copyPartHooker copyPartHook = defaultCopyPartHook + +func defaultCopyPartHook(part copyPart) error { + return nil +} + +// copyWorker copies worker +func copyWorker(id int, arg copyWorkerArg, jobs <-chan copyPart, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(chunk); err != nil { + failed <- err + break + } + chunkSize := chunk.End - chunk.Start + 1 + part, err := arg.bucket.UploadPartCopy(arg.imur, arg.srcBucketName, arg.srcObjectKey, + chunk.Start, chunkSize, chunk.Number, arg.options...) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// copyScheduler +func copyScheduler(jobs chan copyPart, parts []copyPart) { + for _, part := range parts { + jobs <- part + } + close(jobs) +} + +// copyPart structure +type copyPart struct { + Number int // Part number (from 1 to 10,000) + Start int64 // The start index in the source file. + End int64 // The end index in the source file +} + +// getCopyParts calculates copy parts +func getCopyParts(objectSize, partSize int64) []copyPart { + parts := []copyPart{} + part := copyPart{} + i := 0 + for offset := int64(0); offset < objectSize; offset += partSize { + part.Number = i + 1 + part.Start = offset + part.End = GetPartEnd(offset, objectSize, partSize) + parts = append(parts, part) + i++ + } + return parts +} + +// getSrcObjectBytes gets the source file size +func getSrcObjectBytes(parts []copyPart) int64 { + var ob int64 + for _, part := range parts { + ob += (part.End - part.Start + 1) + } + return ob +} + +// copyFile is a concurrently copy without checkpoint +func (bucket Bucket) copyFile(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := getProgressListener(options) + + // for get whole length + skipOptions := deleteOption(options, HTTPHeaderRange) + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, skipOptions...) + if err != nil { + return err + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + // Get copy parts + parts := getCopyParts(objectSize, partSize) + // Initialize the multipart upload + imur, err := descBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getSrcObjectBytes(parts) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start to copy workers + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts finished. + completed := 0 + ups := make([]UploadPart, len(parts)) + for completed < len(parts) { + select { + case part := <-results: + completed++ + ups[part.PartNumber-1] = part + copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + completedBytes += copyBytes + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, copyBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + descBucket.AbortMultipartUpload(imur, options...) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + // Complete the multipart upload + _, err = descBucket.CompleteMultipartUpload(imur, ups, options...) + if err != nil { + bucket.AbortMultipartUpload(imur, options...) + return err + } + return nil +} + +// ----- Concurrently copy with checkpoint ----- + +const copyCpMagic = "84F1F18C-FF1D-403B-A1D8-9DEB5F65910A" + +type copyCheckpoint struct { + Magic string // Magic + MD5 string // CP content MD5 + SrcBucketName string // Source bucket + SrcObjectKey string // Source object + DestBucketName string // Target bucket + DestObjectKey string // Target object + CopyID string // Copy ID + ObjStat objectStat // Object stat + Parts []copyPart // Copy parts + CopyParts []UploadPart // The uploaded parts + PartStat []bool // The part status +} + +// isValid checks if the data is valid which means CP is valid and object is not updated. +func (cp copyCheckpoint) isValid(meta http.Header) (bool, error) { + // Compare CP's magic number and the MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != downloadCpMagic || b64 != cp.MD5 { + return false, nil + } + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return false, err + } + + // Compare the object size and last modified time and etag. + if cp.ObjStat.Size != objectSize || + cp.ObjStat.LastModified != meta.Get(HTTPHeaderLastModified) || + cp.ObjStat.Etag != meta.Get(HTTPHeaderEtag) { + return false, nil + } + + return true, nil +} + +// load loads from the checkpoint file +func (cp *copyCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// update updates the parts status +func (cp *copyCheckpoint) update(part UploadPart) { + cp.CopyParts[part.PartNumber-1] = part + cp.PartStat[part.PartNumber-1] = true +} + +// dump dumps the CP to the file +func (cp *copyCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// todoParts returns unfinished parts +func (cp copyCheckpoint) todoParts() []copyPart { + dps := []copyPart{} + for i, ps := range cp.PartStat { + if !ps { + dps = append(dps, cp.Parts[i]) + } + } + return dps +} + +// getCompletedBytes returns finished bytes count +func (cp copyCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for i, part := range cp.Parts { + if cp.PartStat[i] { + completedBytes += (part.End - part.Start + 1) + } + } + return completedBytes +} + +// prepare initializes the multipart upload +func (cp *copyCheckpoint) prepare(meta http.Header, srcBucket *Bucket, srcObjectKey string, destBucket *Bucket, destObjectKey string, + partSize int64, options []Option) error { + // CP + cp.Magic = copyCpMagic + cp.SrcBucketName = srcBucket.BucketName + cp.SrcObjectKey = srcObjectKey + cp.DestBucketName = destBucket.BucketName + cp.DestObjectKey = destObjectKey + + objectSize, err := strconv.ParseInt(meta.Get(HTTPHeaderContentLength), 10, 0) + if err != nil { + return err + } + + cp.ObjStat.Size = objectSize + cp.ObjStat.LastModified = meta.Get(HTTPHeaderLastModified) + cp.ObjStat.Etag = meta.Get(HTTPHeaderEtag) + + // Parts + cp.Parts = getCopyParts(objectSize, partSize) + cp.PartStat = make([]bool, len(cp.Parts)) + for i := range cp.PartStat { + cp.PartStat[i] = false + } + cp.CopyParts = make([]UploadPart, len(cp.Parts)) + + // Init copy + imur, err := destBucket.InitiateMultipartUpload(destObjectKey, options...) + if err != nil { + return err + } + cp.CopyID = imur.UploadID + + return nil +} + +func (cp *copyCheckpoint) complete(bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: cp.DestBucketName, + Key: cp.DestObjectKey, UploadID: cp.CopyID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// copyFileWithCp is concurrently copy with checkpoint +func (bucket Bucket) copyFileWithCp(srcBucketName, srcObjectKey, destBucketName, destObjectKey string, + partSize int64, options []Option, cpFilePath string, routines int) error { + descBucket, err := bucket.Client.Bucket(destBucketName) + srcBucket, err := bucket.Client.Bucket(srcBucketName) + listener := getProgressListener(options) + + // Load CP data + ccp := copyCheckpoint{} + err = ccp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Make sure the object is not updated. + // get whole length + skipOptions := deleteOption(options, HTTPHeaderRange) + meta, err := srcBucket.GetObjectDetailedMeta(srcObjectKey, skipOptions...) + if err != nil { + return err + } + + // Load error or the CP data is invalid---reinitialize + valid, err := ccp.isValid(meta) + if err != nil || !valid { + if err = ccp.prepare(meta, srcBucket, srcObjectKey, descBucket, destObjectKey, partSize, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + // Unfinished parts + parts := ccp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: destBucketName, + Key: destObjectKey, + UploadID: ccp.CopyID} + + jobs := make(chan copyPart, len(parts)) + results := make(chan UploadPart, len(parts)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ccp.getCompletedBytes() + event := newProgressEvent(TransferStartedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + + // Start the worker coroutines + arg := copyWorkerArg{descBucket, imur, srcBucketName, srcObjectKey, options, copyPartHooker} + for w := 1; w <= routines; w++ { + go copyWorker(w, arg, jobs, results, failed, die) + } + + // Start the scheduler + go copyScheduler(jobs, parts) + + // Wait for the parts completed. + completed := 0 + for completed < len(parts) { + select { + case part := <-results: + completed++ + ccp.update(part) + ccp.dump(cpFilePath) + copyBytes := (parts[part.PartNumber-1].End - parts[part.PartNumber-1].Start + 1) + completedBytes += copyBytes + event = newProgressEvent(TransferDataEvent, completedBytes, ccp.ObjStat.Size, copyBytes) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ccp.ObjStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(parts) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ccp.ObjStat.Size,0) + publishProgress(listener, event) + + return ccp.complete(descBucket, ccp.CopyParts, cpFilePath, options) +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go new file mode 100644 index 00000000..657cb68f --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/multipart.go @@ -0,0 +1,302 @@ +package oss + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" + "net/url" + "os" + "sort" + "strconv" +) + +// InitiateMultipartUpload initializes multipart upload +// +// objectKey object name +// options the object constricts for upload. The valid options are CacheControl, ContentDisposition, ContentEncoding, Expires, +// ServerSideEncryption, Meta, check out the following link: +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/InitiateMultipartUpload.html +// +// InitiateMultipartUploadResult the return value of the InitiateMultipartUpload, which is used for calls later on such as UploadPartFromFile,UploadPartCopy. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) InitiateMultipartUpload(objectKey string, options ...Option) (InitiateMultipartUploadResult, error) { + var imur InitiateMultipartUploadResult + opts := addContentType(options, objectKey) + params := map[string]interface{}{} + params["uploads"] = nil + resp, err := bucket.do("POST", objectKey, params, opts, nil, nil) + if err != nil { + return imur, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &imur) + return imur, err +} + +// UploadPart uploads parts +// +// After initializing a Multipart Upload, the upload Id and object key could be used for uploading the parts. +// Each part has its part number (ranges from 1 to 10,000). And for each upload Id, the part number identifies the position of the part in the whole file. +// And thus with the same part number and upload Id, another part upload will overwrite the data. +// Except the last one, minimal part size is 100KB. There's no limit on the last part size. +// +// imur the returned value of InitiateMultipartUpload. +// reader io.Reader the reader for the part's data. +// size the part size. +// partNumber the part number (ranges from 1 to 10,000). Invalid part number will lead to InvalidArgument error. +// +// UploadPart the return value of the upload part. It consists of PartNumber and ETag. It's valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPart(imur InitiateMultipartUploadResult, reader io.Reader, + partSize int64, partNumber int, options ...Option) (UploadPart, error) { + request := &UploadPartRequest{ + InitResult: &imur, + Reader: reader, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// UploadPartFromFile uploads part from the file. +// +// imur the return value of a successful InitiateMultipartUpload. +// filePath the local file path to upload. +// startPosition the start position in the local file. +// partSize the part size. +// partNumber the part number (from 1 to 10,000) +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartFromFile(imur InitiateMultipartUploadResult, filePath string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var part = UploadPart{} + fd, err := os.Open(filePath) + if err != nil { + return part, err + } + defer fd.Close() + fd.Seek(startPosition, os.SEEK_SET) + + request := &UploadPartRequest{ + InitResult: &imur, + Reader: fd, + PartSize: partSize, + PartNumber: partNumber, + } + + result, err := bucket.DoUploadPart(request, options) + + return result.Part, err +} + +// DoUploadPart does the actual part upload. +// +// request part upload request +// +// UploadPartResult the result of uploading part. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) DoUploadPart(request *UploadPartRequest, options []Option) (*UploadPartResult, error) { + listener := getProgressListener(options) + options = append(options, ContentLength(request.PartSize)) + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(request.PartNumber) + params["uploadId"] = request.InitResult.UploadID + resp, err := bucket.do("PUT", request.InitResult.Key, params, options, + &io.LimitedReader{R: request.Reader, N: request.PartSize}, listener) + if err != nil { + return &UploadPartResult{}, err + } + defer resp.Body.Close() + + part := UploadPart{ + ETag: resp.Headers.Get(HTTPHeaderEtag), + PartNumber: request.PartNumber, + } + + if bucket.getConfig().IsEnableCRC { + err = checkCRC(resp, "DoUploadPart") + if err != nil { + return &UploadPartResult{part}, err + } + } + + return &UploadPartResult{part}, nil +} + +// UploadPartCopy uploads part copy +// +// imur the return value of InitiateMultipartUpload +// copySrc source Object name +// startPosition the part's start index in the source file +// partSize the part size +// partNumber the part number, ranges from 1 to 10,000. If it exceeds the range OSS returns InvalidArgument error. +// options the constraints of source object for the copy. The copy happens only when these contraints are met. Otherwise it returns error. +// CopySourceIfNoneMatch, CopySourceIfModifiedSince CopySourceIfUnmodifiedSince, check out the following link for the detail +// https://help.aliyun.com/document_detail/oss/api-reference/multipart-upload/UploadPartCopy.html +// +// UploadPart the return value consists of PartNumber and ETag. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadPartCopy(imur InitiateMultipartUploadResult, srcBucketName, srcObjectKey string, + startPosition, partSize int64, partNumber int, options ...Option) (UploadPart, error) { + var out UploadPartCopyResult + var part UploadPart + var opts []Option + + //first find version id + versionIdKey := "versionId" + versionId, _ := findOption(options, versionIdKey, nil) + if versionId == nil { + opts = []Option{CopySource(srcBucketName, url.QueryEscape(srcObjectKey)), + CopySourceRange(startPosition, partSize)} + } else { + opts = []Option{CopySourceVersion(srcBucketName, url.QueryEscape(srcObjectKey), versionId.(string)), + CopySourceRange(startPosition, partSize)} + options = deleteOption(options, versionIdKey) + } + + opts = append(opts, options...) + + params := map[string]interface{}{} + params["partNumber"] = strconv.Itoa(partNumber) + params["uploadId"] = imur.UploadID + resp, err := bucket.do("PUT", imur.Key, params, opts, nil, nil) + if err != nil { + return part, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return part, err + } + part.ETag = out.ETag + part.PartNumber = partNumber + + return part, nil +} + +// CompleteMultipartUpload completes the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// parts the array of return value of UploadPart/UploadPartFromFile/UploadPartCopy. +// +// CompleteMultipartUploadResponse the return value when the call succeeds. Only valid when the error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) CompleteMultipartUpload(imur InitiateMultipartUploadResult, + parts []UploadPart, options ...Option) (CompleteMultipartUploadResult, error) { + var out CompleteMultipartUploadResult + + sort.Sort(uploadParts(parts)) + cxml := completeMultipartUploadXML{} + cxml.Part = parts + bs, err := xml.Marshal(cxml) + if err != nil { + return out, err + } + buffer := new(bytes.Buffer) + buffer.Write(bs) + + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("POST", imur.Key, params, options, buffer, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + return out, err +} + +// AbortMultipartUpload aborts the multipart upload. +// +// imur the return value of InitiateMultipartUpload. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) AbortMultipartUpload(imur InitiateMultipartUploadResult, options ...Option) error { + params := map[string]interface{}{} + params["uploadId"] = imur.UploadID + resp, err := bucket.do("DELETE", imur.Key, params, options, nil, nil) + if err != nil { + return err + } + defer resp.Body.Close() + return checkRespCode(resp.StatusCode, []int{http.StatusNoContent}) +} + +// ListUploadedParts lists the uploaded parts. +// +// imur the return value of InitiateMultipartUpload. +// +// ListUploadedPartsResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListUploadedParts(imur InitiateMultipartUploadResult, options ...Option) (ListUploadedPartsResult, error) { + var out ListUploadedPartsResult + options = append(options, EncodingType("url")) + + params := map[string]interface{}{} + params, err := getRawParams(options) + if err != nil { + return out, err + } + + params["uploadId"] = imur.UploadID + resp, err := bucket.do("GET", imur.Key, params, nil, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListUploadedPartsResult(&out) + return out, err +} + +// ListMultipartUploads lists all ongoing multipart upload tasks +// +// options listObject's filter. Prefix specifies the returned object's prefix; KeyMarker specifies the returned object's start point in lexicographic order; +// MaxKeys specifies the max entries to return; Delimiter is the character for grouping object keys. +// +// ListMultipartUploadResponse the return value if it succeeds, only valid when error is nil. +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) ListMultipartUploads(options ...Option) (ListMultipartUploadResult, error) { + var out ListMultipartUploadResult + + options = append(options, EncodingType("url")) + params, err := getRawParams(options) + if err != nil { + return out, err + } + params["uploads"] = nil + + resp, err := bucket.do("GET", "", params, options, nil, nil) + if err != nil { + return out, err + } + defer resp.Body.Close() + + err = xmlUnmarshal(resp.Body, &out) + if err != nil { + return out, err + } + err = decodeListMultipartUploadResult(&out) + return out, err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go new file mode 100644 index 00000000..c2146f0e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/option.go @@ -0,0 +1,547 @@ +package oss + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +type optionType string + +const ( + optionParam optionType = "HTTPParameter" // URL parameter + optionHTTP optionType = "HTTPHeader" // HTTP header + optionArg optionType = "FuncArgument" // Function argument +) + +const ( + deleteObjectsQuiet = "delete-objects-quiet" + routineNum = "x-routine-num" + checkpointConfig = "x-cp-config" + initCRC64 = "init-crc64" + progressListener = "x-progress-listener" + storageClass = "storage-class" + responseHeader = "x-response-header" +) + +type ( + optionValue struct { + Value interface{} + Type optionType + } + + // Option HTTP option + Option func(map[string]optionValue) error +) + +// ACL is an option to set X-Oss-Acl header +func ACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssACL, string(acl)) +} + +// ContentType is an option to set Content-Type header +func ContentType(value string) Option { + return setHeader(HTTPHeaderContentType, value) +} + +// ContentLength is an option to set Content-Length header +func ContentLength(length int64) Option { + return setHeader(HTTPHeaderContentLength, strconv.FormatInt(length, 10)) +} + +// CacheControl is an option to set Cache-Control header +func CacheControl(value string) Option { + return setHeader(HTTPHeaderCacheControl, value) +} + +// ContentDisposition is an option to set Content-Disposition header +func ContentDisposition(value string) Option { + return setHeader(HTTPHeaderContentDisposition, value) +} + +// ContentEncoding is an option to set Content-Encoding header +func ContentEncoding(value string) Option { + return setHeader(HTTPHeaderContentEncoding, value) +} + +// ContentLanguage is an option to set Content-Language header +func ContentLanguage(value string) Option { + return setHeader(HTTPHeaderContentLanguage, value) +} + +// ContentMD5 is an option to set Content-MD5 header +func ContentMD5(value string) Option { + return setHeader(HTTPHeaderContentMD5, value) +} + +// Expires is an option to set Expires header +func Expires(t time.Time) Option { + return setHeader(HTTPHeaderExpires, t.Format(http.TimeFormat)) +} + +// Meta is an option to set Meta header +func Meta(key, value string) Option { + return setHeader(HTTPHeaderOssMetaPrefix+key, value) +} + +// Range is an option to set Range header, [start, end] +func Range(start, end int64) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%d-%d", start, end)) +} + +// NormalizedRange is an option to set Range header, such as 1024-2048 or 1024- or -2048 +func NormalizedRange(nr string) Option { + return setHeader(HTTPHeaderRange, fmt.Sprintf("bytes=%s", strings.TrimSpace(nr))) +} + +// AcceptEncoding is an option to set Accept-Encoding header +func AcceptEncoding(value string) Option { + return setHeader(HTTPHeaderAcceptEncoding, value) +} + +// IfModifiedSince is an option to set If-Modified-Since header +func IfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfModifiedSince, t.Format(http.TimeFormat)) +} + +// IfUnmodifiedSince is an option to set If-Unmodified-Since header +func IfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// IfMatch is an option to set If-Match header +func IfMatch(value string) Option { + return setHeader(HTTPHeaderIfMatch, value) +} + +// IfNoneMatch is an option to set IfNoneMatch header +func IfNoneMatch(value string) Option { + return setHeader(HTTPHeaderIfNoneMatch, value) +} + +// CopySource is an option to set X-Oss-Copy-Source header +func CopySource(sourceBucket, sourceObject string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject) +} + +// CopySourceVersion is an option to set X-Oss-Copy-Source header,include versionId +func CopySourceVersion(sourceBucket, sourceObject string, versionId string) Option { + return setHeader(HTTPHeaderOssCopySource, "/"+sourceBucket+"/"+sourceObject+"?"+"versionId="+versionId) +} + +// CopySourceRange is an option to set X-Oss-Copy-Source header +func CopySourceRange(startPosition, partSize int64) Option { + val := "bytes=" + strconv.FormatInt(startPosition, 10) + "-" + + strconv.FormatInt((startPosition+partSize-1), 10) + return setHeader(HTTPHeaderOssCopySourceRange, val) +} + +// CopySourceIfMatch is an option to set X-Oss-Copy-Source-If-Match header +func CopySourceIfMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfMatch, value) +} + +// CopySourceIfNoneMatch is an option to set X-Oss-Copy-Source-If-None-Match header +func CopySourceIfNoneMatch(value string) Option { + return setHeader(HTTPHeaderOssCopySourceIfNoneMatch, value) +} + +// CopySourceIfModifiedSince is an option to set X-Oss-CopySource-If-Modified-Since header +func CopySourceIfModifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfModifiedSince, t.Format(http.TimeFormat)) +} + +// CopySourceIfUnmodifiedSince is an option to set X-Oss-Copy-Source-If-Unmodified-Since header +func CopySourceIfUnmodifiedSince(t time.Time) Option { + return setHeader(HTTPHeaderOssCopySourceIfUnmodifiedSince, t.Format(http.TimeFormat)) +} + +// MetadataDirective is an option to set X-Oss-Metadata-Directive header +func MetadataDirective(directive MetadataDirectiveType) Option { + return setHeader(HTTPHeaderOssMetadataDirective, string(directive)) +} + +// ServerSideEncryption is an option to set X-Oss-Server-Side-Encryption header +func ServerSideEncryption(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryption, value) +} + +// ServerSideEncryptionKeyID is an option to set X-Oss-Server-Side-Encryption-Key-Id header +func ServerSideEncryptionKeyID(value string) Option { + return setHeader(HTTPHeaderOssServerSideEncryptionKeyID, value) +} + +// ObjectACL is an option to set X-Oss-Object-Acl header +func ObjectACL(acl ACLType) Option { + return setHeader(HTTPHeaderOssObjectACL, string(acl)) +} + +// symlinkTarget is an option to set X-Oss-Symlink-Target +func symlinkTarget(targetObjectKey string) Option { + return setHeader(HTTPHeaderOssSymlinkTarget, targetObjectKey) +} + +// Origin is an option to set Origin header +func Origin(value string) Option { + return setHeader(HTTPHeaderOrigin, value) +} + +// ObjectStorageClass is an option to set the storage class of object +func ObjectStorageClass(storageClass StorageClassType) Option { + return setHeader(HTTPHeaderOssStorageClass, string(storageClass)) +} + +// Callback is an option to set callback values +func Callback(callback string) Option { + return setHeader(HTTPHeaderOssCallback, callback) +} + +// CallbackVar is an option to set callback user defined values +func CallbackVar(callbackVar string) Option { + return setHeader(HTTPHeaderOssCallbackVar, callbackVar) +} + +// RequestPayer is an option to set payer who pay for the request +func RequestPayer(payerType PayerType) Option { + return setHeader(HTTPHeaderOssRequester, strings.ToLower(string(payerType))) +} + +// SetTagging is an option to set object tagging +func SetTagging(tagging Tagging) Option { + if len(tagging.Tags) == 0 { + return nil + } + + taggingValue := "" + for index, tag := range tagging.Tags { + if index != 0 { + taggingValue += "&" + } + taggingValue += url.QueryEscape(tag.Key) + "=" + url.QueryEscape(tag.Value) + } + return setHeader(HTTPHeaderOssTagging, taggingValue) +} + +// TaggingDirective is an option to set X-Oss-Metadata-Directive header +func TaggingDirective(directive TaggingDirectiveType) Option { + return setHeader(HTTPHeaderOssTaggingDirective, string(directive)) +} + +// ACReqMethod is an option to set Access-Control-Request-Method header +func ACReqMethod(value string) Option { + return setHeader(HTTPHeaderACReqMethod, value) +} + +// ACReqHeaders is an option to set Access-Control-Request-Headers header +func ACReqHeaders(value string) Option { + return setHeader(HTTPHeaderACReqHeaders, value) +} + +// TrafficLimitHeader is an option to set X-Oss-Traffic-Limit +func TrafficLimitHeader(value int64) Option { + return setHeader(HTTPHeaderOssTrafficLimit, strconv.FormatInt(value, 10)) +} + +// Delimiter is an option to set delimiler parameter +func Delimiter(value string) Option { + return addParam("delimiter", value) +} + +// Marker is an option to set marker parameter +func Marker(value string) Option { + return addParam("marker", value) +} + +// MaxKeys is an option to set maxkeys parameter +func MaxKeys(value int) Option { + return addParam("max-keys", strconv.Itoa(value)) +} + +// Prefix is an option to set prefix parameter +func Prefix(value string) Option { + return addParam("prefix", value) +} + +// EncodingType is an option to set encoding-type parameter +func EncodingType(value string) Option { + return addParam("encoding-type", value) +} + +// MaxUploads is an option to set max-uploads parameter +func MaxUploads(value int) Option { + return addParam("max-uploads", strconv.Itoa(value)) +} + +// KeyMarker is an option to set key-marker parameter +func KeyMarker(value string) Option { + return addParam("key-marker", value) +} + +// VersionIdMarker is an option to set version-id-marker parameter +func VersionIdMarker(value string) Option { + return addParam("version-id-marker", value) +} + +// VersionId is an option to set versionId parameter +func VersionId(value string) Option { + return addParam("versionId", value) +} + +// TagKey is an option to set tag key parameter +func TagKey(value string) Option { + return addParam("tag-key", value) +} + +// TagValue is an option to set tag value parameter +func TagValue(value string) Option { + return addParam("tag-value", value) +} + +// UploadIDMarker is an option to set upload-id-marker parameter +func UploadIDMarker(value string) Option { + return addParam("upload-id-marker", value) +} + +// MaxParts is an option to set max-parts parameter +func MaxParts(value int) Option { + return addParam("max-parts", strconv.Itoa(value)) +} + +// PartNumberMarker is an option to set part-number-marker parameter +func PartNumberMarker(value int) Option { + return addParam("part-number-marker", strconv.Itoa(value)) +} + +// DeleteObjectsQuiet false:DeleteObjects in verbose mode; true:DeleteObjects in quite mode. Default is false. +func DeleteObjectsQuiet(isQuiet bool) Option { + return addArg(deleteObjectsQuiet, isQuiet) +} + +// StorageClass bucket storage class +func StorageClass(value StorageClassType) Option { + return addArg(storageClass, value) +} + +// Checkpoint configuration +type cpConfig struct { + IsEnable bool + FilePath string + DirPath string +} + +// Checkpoint sets the isEnable flag and checkpoint file path for DownloadFile/UploadFile. +func Checkpoint(isEnable bool, filePath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, FilePath: filePath}) +} + +// CheckpointDir sets the isEnable flag and checkpoint dir path for DownloadFile/UploadFile. +func CheckpointDir(isEnable bool, dirPath string) Option { + return addArg(checkpointConfig, &cpConfig{IsEnable: isEnable, DirPath: dirPath}) +} + +// Routines DownloadFile/UploadFile routine count +func Routines(n int) Option { + return addArg(routineNum, n) +} + +// InitCRC Init AppendObject CRC +func InitCRC(initCRC uint64) Option { + return addArg(initCRC64, initCRC) +} + +// Progress set progress listener +func Progress(listener ProgressListener) Option { + return addArg(progressListener, listener) +} + +// GetResponseHeader for get response http header +func GetResponseHeader(respHeader *http.Header) Option { + return addArg(responseHeader, respHeader) +} + +// ResponseContentType is an option to set response-content-type param +func ResponseContentType(value string) Option { + return addParam("response-content-type", value) +} + +// ResponseContentLanguage is an option to set response-content-language param +func ResponseContentLanguage(value string) Option { + return addParam("response-content-language", value) +} + +// ResponseExpires is an option to set response-expires param +func ResponseExpires(value string) Option { + return addParam("response-expires", value) +} + +// ResponseCacheControl is an option to set response-cache-control param +func ResponseCacheControl(value string) Option { + return addParam("response-cache-control", value) +} + +// ResponseContentDisposition is an option to set response-content-disposition param +func ResponseContentDisposition(value string) Option { + return addParam("response-content-disposition", value) +} + +// ResponseContentEncoding is an option to set response-content-encoding param +func ResponseContentEncoding(value string) Option { + return addParam("response-content-encoding", value) +} + +// Process is an option to set x-oss-process param +func Process(value string) Option { + return addParam("x-oss-process", value) +} + +// TrafficLimitParam is a option to set x-oss-traffic-limit +func TrafficLimitParam(value int64) Option { + return addParam("x-oss-traffic-limit", strconv.FormatInt(value, 10)) +} + +func setHeader(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionHTTP} + return nil + } +} + +func addParam(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionParam} + return nil + } +} + +func addArg(key string, value interface{}) Option { + return func(params map[string]optionValue) error { + if value == nil { + return nil + } + params[key] = optionValue{value, optionArg} + return nil + } +} + +func handleOptions(headers map[string]string, options []Option) error { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return err + } + } + } + + for k, v := range params { + if v.Type == optionHTTP { + headers[k] = v.Value.(string) + } + } + return nil +} + +func getRawParams(options []Option) (map[string]interface{}, error) { + // Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + paramsm := map[string]interface{}{} + // Serialize + for k, v := range params { + if v.Type == optionParam { + vs := params[k] + paramsm[k] = vs.Value.(string) + } + } + + return paramsm, nil +} + +func findOption(options []Option, param string, defaultVal interface{}) (interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return nil, err + } + } + } + + if val, ok := params[param]; ok { + return val.Value, nil + } + return defaultVal, nil +} + +func isOptionSet(options []Option, option string) (bool, interface{}, error) { + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + if err := option(params); err != nil { + return false, nil, err + } + } + } + + if val, ok := params[option]; ok { + return true, val.Value, nil + } + return false, nil, nil +} + +func deleteOption(options []Option, strKey string) []Option { + var outOption []Option + params := map[string]optionValue{} + for _, option := range options { + if option != nil { + option(params) + _, exist := params[strKey] + if !exist { + outOption = append(outOption, option) + } else { + delete(params, strKey) + } + } + } + return outOption +} + +func GetRequestId(header http.Header) string { + return header.Get("x-oss-request-id") +} + +func GetVersionId(header http.Header) string { + return header.Get("x-oss-version-id") +} + +func GetCopySrcVersionId(header http.Header) string { + return header.Get("x-oss-copy-source-version-id") +} + +func GetDeleteMark(header http.Header) bool { + value := header.Get("x-oss-delete-marker") + if strings.ToUpper(value) == "TRUE" { + return true + } + return false +} + +func GetQosDelayTime(header http.Header) string { + return header.Get("x-oss-qos-delay-time") +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go new file mode 100644 index 00000000..33cbebd5 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/progress.go @@ -0,0 +1,114 @@ +package oss + +import "io" + +// ProgressEventType defines transfer progress event type +type ProgressEventType int + +const ( + // TransferStartedEvent transfer started, set TotalBytes + TransferStartedEvent ProgressEventType = 1 + iota + // TransferDataEvent transfer data, set ConsumedBytes anmd TotalBytes + TransferDataEvent + // TransferCompletedEvent transfer completed + TransferCompletedEvent + // TransferFailedEvent transfer encounters an error + TransferFailedEvent +) + +// ProgressEvent defines progress event +type ProgressEvent struct { + ConsumedBytes int64 + TotalBytes int64 + RwBytes int64 + EventType ProgressEventType +} + +// ProgressListener listens progress change +type ProgressListener interface { + ProgressChanged(event *ProgressEvent) +} + +// -------------------- Private -------------------- + +func newProgressEvent(eventType ProgressEventType, consumed, total int64, rwBytes int64) *ProgressEvent { + return &ProgressEvent{ + ConsumedBytes: consumed, + TotalBytes: total, + RwBytes: rwBytes, + EventType: eventType} +} + +// publishProgress +func publishProgress(listener ProgressListener, event *ProgressEvent) { + if listener != nil && event != nil { + listener.ProgressChanged(event) + } +} + +type readerTracker struct { + completedBytes int64 +} + +type teeReader struct { + reader io.Reader + writer io.Writer + listener ProgressListener + consumedBytes int64 + totalBytes int64 + tracker *readerTracker +} + +// TeeReader returns a Reader that writes to w what it reads from r. +// All reads from r performed through it are matched with +// corresponding writes to w. There is no internal buffering - +// the write must complete before the read completes. +// Any error encountered while writing is reported as a read error. +func TeeReader(reader io.Reader, writer io.Writer, totalBytes int64, listener ProgressListener, tracker *readerTracker) io.ReadCloser { + return &teeReader{ + reader: reader, + writer: writer, + listener: listener, + consumedBytes: 0, + totalBytes: totalBytes, + tracker: tracker, + } +} + +func (t *teeReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + + // Read encountered error + if err != nil && err != io.EOF { + event := newProgressEvent(TransferFailedEvent, t.consumedBytes, t.totalBytes, 0) + publishProgress(t.listener, event) + } + + if n > 0 { + t.consumedBytes += int64(n) + // CRC + if t.writer != nil { + if n, err := t.writer.Write(p[:n]); err != nil { + return n, err + } + } + // Progress + if t.listener != nil { + event := newProgressEvent(TransferDataEvent, t.consumedBytes, t.totalBytes, int64(n)) + publishProgress(t.listener, event) + } + // Track + if t.tracker != nil { + t.tracker.completedBytes = t.consumedBytes + } + } + + return +} + +func (t *teeReader) Close() error { + if rc, ok := t.reader.(io.ReadCloser); ok { + return rc.Close() + } + return nil +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go new file mode 100644 index 00000000..e6de4cdd --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_6.go @@ -0,0 +1,26 @@ +// +build !go1.7 + +package oss + +import ( + "net" + "net/http" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go new file mode 100644 index 00000000..006ea47a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/transport_1_7.go @@ -0,0 +1,28 @@ +// +build go1.7 + +package oss + +import ( + "net" + "net/http" +) + +func newTransport(conn *Conn, config *Config) *http.Transport { + httpTimeOut := conn.config.HTTPTimeout + httpMaxConns := conn.config.HTTPMaxConns + // New Transport + transport := &http.Transport{ + Dial: func(netw, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(netw, addr, httpTimeOut.ConnectTimeout) + if err != nil { + return nil, err + } + return newTimeoutConn(conn, httpTimeOut.ReadWriteTimeout, httpTimeOut.LongTimeout), nil + }, + MaxIdleConns: httpMaxConns.MaxIdleConns, + MaxIdleConnsPerHost: httpMaxConns.MaxIdleConnsPerHost, + IdleConnTimeout: httpTimeOut.IdleConnTimeout, + ResponseHeaderTimeout: httpTimeOut.HeaderTimeout, + } + return transport +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go new file mode 100644 index 00000000..1aac3330 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/type.go @@ -0,0 +1,860 @@ +package oss + +import ( + "encoding/xml" + "fmt" + "net/url" + "time" +) + +// ListBucketsResult defines the result object from ListBuckets request +type ListBucketsResult struct { + XMLName xml.Name `xml:"ListAllMyBucketsResult"` + Prefix string `xml:"Prefix"` // The prefix in this query + Marker string `xml:"Marker"` // The marker filter + MaxKeys int `xml:"MaxKeys"` // The max entry count to return. This information is returned when IsTruncated is true. + IsTruncated bool `xml:"IsTruncated"` // Flag true means there's remaining buckets to return. + NextMarker string `xml:"NextMarker"` // The marker filter for the next list call + Owner Owner `xml:"Owner"` // The owner information + Buckets []BucketProperties `xml:"Buckets>Bucket"` // The bucket list +} + +// BucketProperties defines bucket properties +type BucketProperties struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket create time + StorageClass string `xml:"StorageClass"` // Bucket storage class +} + +// GetBucketACLResult defines GetBucketACL request's result +type GetBucketACLResult struct { + XMLName xml.Name `xml:"AccessControlPolicy"` + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + Owner Owner `xml:"Owner"` // Bucket owner +} + +// LifecycleConfiguration is the Bucket Lifecycle configuration +type LifecycleConfiguration struct { + XMLName xml.Name `xml:"LifecycleConfiguration"` + Rules []LifecycleRule `xml:"Rule"` +} + +// LifecycleRule defines Lifecycle rules +type LifecycleRule struct { + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID,omitempty"` // The rule ID + Prefix string `xml:"Prefix"` // The object key prefix + Status string `xml:"Status"` // The rule status (enabled or not) + Tags []Tag `xml:"Tag,omitempty"` // the tags property + Expiration *LifecycleExpiration `xml:"Expiration,omitempty"` // The expiration property + Transitions []LifecycleTransition `xml:"Transition,omitempty"` // The transition property + AbortMultipartUpload *LifecycleAbortMultipartUpload `xml:"AbortMultipartUpload,omitempty"` // The AbortMultipartUpload property +} + +// LifecycleExpiration defines the rule's expiration property +type LifecycleExpiration struct { + XMLName xml.Name `xml:"Expiration"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + Date string `xml:"Date,omitempty"` // Absolute expiration time: The expiration time in date, not recommended + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired +} + +// LifecycleTransition defines the rule's transition propery +type LifecycleTransition struct { + XMLName xml.Name `xml:"Transition"` + Days int `xml:"Days,omitempty"` // Relative transition time: The transition time in days after the last modified time + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired + StorageClass StorageClassType `xml:"StorageClass,omitempty"` // Specifies the target storage type +} + +// LifecycleAbortMultipartUpload defines the rule's abort multipart upload propery +type LifecycleAbortMultipartUpload struct { + XMLName xml.Name `xml:"AbortMultipartUpload"` + Days int `xml:"Days,omitempty"` // Relative expiration time: The expiration time in days after the last modified time + CreatedBeforeDate string `xml:"CreatedBeforeDate,omitempty"` // objects created before the date will be expired +} + +const iso8601DateFormat = "2006-01-02T15:04:05.000Z" + +// BuildLifecycleRuleByDays builds a lifecycle rule objects will expiration in days after the last modified time +func BuildLifecycleRuleByDays(id, prefix string, status bool, days int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: &LifecycleExpiration{Days: days}} +} + +// BuildLifecycleRuleByDate builds a lifecycle rule objects will expiration in specified date +func BuildLifecycleRuleByDate(id, prefix string, status bool, year, month, day int) LifecycleRule { + var statusStr = "Enabled" + if !status { + statusStr = "Disabled" + } + date := time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC).Format(iso8601DateFormat) + return LifecycleRule{ID: id, Prefix: prefix, Status: statusStr, + Expiration: &LifecycleExpiration{Date: date}} +} + +// ValidateLifecycleRule Determine if a lifecycle rule is valid, if it is invalid, it will return an error. +func verifyLifecycleRules(rules []LifecycleRule) error { + if len(rules) == 0 { + return fmt.Errorf("invalid rules, the length of rules is zero") + } + for _, rule := range rules { + if rule.Status != "Enabled" && rule.Status != "Disabled" { + return fmt.Errorf("invalid rule, the value of status must be Enabled or Disabled") + } + + expiration := rule.Expiration + if expiration != nil { + if (expiration.Days != 0 && expiration.CreatedBeforeDate != "") || (expiration.Days != 0 && expiration.Date != "") || (expiration.CreatedBeforeDate != "" && expiration.Date != "") || (expiration.Days == 0 && expiration.CreatedBeforeDate == "" && expiration.Date == "") { + return fmt.Errorf("invalid expiration lifecycle, must be set one of CreatedBeforeDate, Days and Date") + } + } + + abortMPU := rule.AbortMultipartUpload + if abortMPU != nil { + if (abortMPU.Days != 0 && abortMPU.CreatedBeforeDate != "") || (abortMPU.Days == 0 && abortMPU.CreatedBeforeDate == "") { + return fmt.Errorf("invalid abort multipart upload lifecycle, must be set one of CreatedBeforeDate and Days") + } + } + + transitions := rule.Transitions + if len(transitions) > 0 { + if len(transitions) > 2 { + return fmt.Errorf("invalid count of transition lifecycles, the count must than less than 3") + } + + for _, transition := range transitions { + if (transition.Days != 0 && transition.CreatedBeforeDate != "") || (transition.Days == 0 && transition.CreatedBeforeDate == "") { + return fmt.Errorf("invalid transition lifecycle, must be set one of CreatedBeforeDate and Days") + } + if transition.StorageClass != StorageIA && transition.StorageClass != StorageArchive { + return fmt.Errorf("invalid transition lifecylce, the value of storage class must be IA or Archive") + } + } + } else if expiration == nil && abortMPU == nil { + return fmt.Errorf("invalid rule, must set one of Expiration, AbortMultipartUplaod and Transitions") + } + } + + return nil +} + +// GetBucketLifecycleResult defines GetBucketLifecycle's result object +type GetBucketLifecycleResult LifecycleConfiguration + +// RefererXML defines Referer configuration +type RefererXML struct { + XMLName xml.Name `xml:"RefererConfiguration"` + AllowEmptyReferer bool `xml:"AllowEmptyReferer"` // Allow empty referrer + RefererList []string `xml:"RefererList>Referer"` // Referer whitelist +} + +// GetBucketRefererResult defines result object for GetBucketReferer request +type GetBucketRefererResult RefererXML + +// LoggingXML defines logging configuration +type LoggingXML struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` + LoggingEnabled LoggingEnabled `xml:"LoggingEnabled"` // The logging configuration information +} + +type loggingXMLEmpty struct { + XMLName xml.Name `xml:"BucketLoggingStatus"` +} + +// LoggingEnabled defines the logging configuration information +type LoggingEnabled struct { + XMLName xml.Name `xml:"LoggingEnabled"` + TargetBucket string `xml:"TargetBucket"` // The bucket name for storing the log files + TargetPrefix string `xml:"TargetPrefix"` // The log file prefix +} + +// GetBucketLoggingResult defines the result from GetBucketLogging request +type GetBucketLoggingResult LoggingXML + +// WebsiteXML defines Website configuration +type WebsiteXML struct { + XMLName xml.Name `xml:"WebsiteConfiguration"` + IndexDocument IndexDocument `xml:"IndexDocument,omitempty"` // The index page + ErrorDocument ErrorDocument `xml:"ErrorDocument,omitempty"` // The error page + RoutingRules []RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` // The routing Rule list +} + +// IndexDocument defines the index page info +type IndexDocument struct { + XMLName xml.Name `xml:"IndexDocument"` + Suffix string `xml:"Suffix"` // The file name for the index page +} + +// ErrorDocument defines the 404 error page info +type ErrorDocument struct { + XMLName xml.Name `xml:"ErrorDocument"` + Key string `xml:"Key"` // 404 error file name +} + +// RoutingRule defines the routing rules +type RoutingRule struct { + XMLName xml.Name `xml:"RoutingRule"` + RuleNumber int `xml:"RuleNumber,omitempty"` // The routing number + Condition Condition `xml:"Condition,omitempty"` // The routing condition + Redirect Redirect `xml:"Redirect,omitempty"` // The routing redirect + +} + +// Condition defines codition in the RoutingRule +type Condition struct { + XMLName xml.Name `xml:"Condition"` + KeyPrefixEquals string `xml:"KeyPrefixEquals,omitempty"` // Matching objcet prefix + HTTPErrorCodeReturnedEquals int `xml:"HttpErrorCodeReturnedEquals,omitempty"` // The rule is for Accessing to the specified object + IncludeHeader []IncludeHeader `xml:"IncludeHeader"` // The rule is for request which include header +} + +// IncludeHeader defines includeHeader in the RoutingRule's Condition +type IncludeHeader struct { + XMLName xml.Name `xml:"IncludeHeader"` + Key string `xml:"Key,omitempty"` // The Include header key + Equals string `xml:"Equals,omitempty"` // The Include header value +} + +// Redirect defines redirect in the RoutingRule +type Redirect struct { + XMLName xml.Name `xml:"Redirect"` + RedirectType string `xml:"RedirectType,omitempty"` // The redirect type, it have Mirror,External,Internal,AliCDN + PassQueryString *bool `xml:"PassQueryString"` // Whether to send the specified request's parameters, true or false + MirrorURL string `xml:"MirrorURL,omitempty"` // Mirror of the website address back to the source. + MirrorPassQueryString *bool `xml:"MirrorPassQueryString"` // To Mirror of the website Whether to send the specified request's parameters, true or false + MirrorFollowRedirect *bool `xml:"MirrorFollowRedirect"` // Redirect the location, if the mirror return 3XX + MirrorCheckMd5 *bool `xml:"MirrorCheckMd5"` // Check the mirror is MD5. + MirrorHeaders MirrorHeaders `xml:"MirrorHeaders,omitempty"` // Mirror headers + Protocol string `xml:"Protocol,omitempty"` // The redirect Protocol + HostName string `xml:"HostName,omitempty"` // The redirect HostName + ReplaceKeyPrefixWith string `xml:"ReplaceKeyPrefixWith,omitempty"` // object name'Prefix replace the value + HttpRedirectCode int `xml:"HttpRedirectCode,omitempty"` // THe redirect http code + ReplaceKeyWith string `xml:"ReplaceKeyWith,omitempty"` // object name replace the value +} + +// MirrorHeaders defines MirrorHeaders in the Redirect +type MirrorHeaders struct { + XMLName xml.Name `xml:"MirrorHeaders"` + PassAll *bool `xml:"PassAll"` // Penetrating all of headers to source website. + Pass []string `xml:"Pass"` // Penetrating some of headers to source website. + Remove []string `xml:"Remove"` // Prohibit passthrough some of headers to source website + Set []MirrorHeaderSet `xml:"Set"` // Setting some of headers send to source website +} + +// MirrorHeaderSet defines Set for Redirect's MirrorHeaders +type MirrorHeaderSet struct { + XMLName xml.Name `xml:"Set"` + Key string `xml:"Key,omitempty"` // The mirror header key + Value string `xml:"Value,omitempty"` // The mirror header value +} + +// GetBucketWebsiteResult defines the result from GetBucketWebsite request. +type GetBucketWebsiteResult WebsiteXML + +// CORSXML defines CORS configuration +type CORSXML struct { + XMLName xml.Name `xml:"CORSConfiguration"` + CORSRules []CORSRule `xml:"CORSRule"` // CORS rules +} + +// CORSRule defines CORS rules +type CORSRule struct { + XMLName xml.Name `xml:"CORSRule"` + AllowedOrigin []string `xml:"AllowedOrigin"` // Allowed origins. By default it's wildcard '*' + AllowedMethod []string `xml:"AllowedMethod"` // Allowed methods + AllowedHeader []string `xml:"AllowedHeader"` // Allowed headers + ExposeHeader []string `xml:"ExposeHeader"` // Allowed response headers + MaxAgeSeconds int `xml:"MaxAgeSeconds"` // Max cache ages in seconds +} + +// GetBucketCORSResult defines the result from GetBucketCORS request. +type GetBucketCORSResult CORSXML + +// GetBucketInfoResult defines the result from GetBucketInfo request. +type GetBucketInfoResult struct { + XMLName xml.Name `xml:"BucketInfo"` + BucketInfo BucketInfo `xml:"Bucket"` +} + +// BucketInfo defines Bucket information +type BucketInfo struct { + XMLName xml.Name `xml:"Bucket"` + Name string `xml:"Name"` // Bucket name + Location string `xml:"Location"` // Bucket datacenter + CreationDate time.Time `xml:"CreationDate"` // Bucket creation time + ExtranetEndpoint string `xml:"ExtranetEndpoint"` // Bucket external endpoint + IntranetEndpoint string `xml:"IntranetEndpoint"` // Bucket internal endpoint + ACL string `xml:"AccessControlList>Grant"` // Bucket ACL + Owner Owner `xml:"Owner"` // Bucket owner + StorageClass string `xml:"StorageClass"` // Bucket storage class + SseRule SSERule `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule + Versioning string `xml:"Versioning"` // Bucket Versioning +} + +type SSERule struct { + XMLName xml.Name `xml:"ServerSideEncryptionRule"` // Bucket ServerSideEncryptionRule + KMSMasterKeyID string `xml:"KMSMasterKeyID"` // Bucket KMSMasterKeyID + SSEAlgorithm string `xml:"SSEAlgorithm"` // Bucket SSEAlgorithm +} + +// ListObjectsResult defines the result from ListObjects request +type ListObjectsResult struct { + XMLName xml.Name `xml:"ListBucketResult"` + Prefix string `xml:"Prefix"` // The object prefix + Marker string `xml:"Marker"` // The marker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextMarker string `xml:"NextMarker"` // The start point of the next query + Objects []ObjectProperties `xml:"Contents"` // Object list + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter +} + +// ObjectProperties defines Objecct properties +type ObjectProperties struct { + XMLName xml.Name `xml:"Contents"` + Key string `xml:"Key"` // Object key + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + Owner Owner `xml:"Owner"` // Object owner information + LastModified time.Time `xml:"LastModified"` // Object last modified time + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) +} + +// ListObjectVersionsResult defines the result from ListObjectVersions request +type ListObjectVersionsResult struct { + XMLName xml.Name `xml:"ListVersionsResult"` + Name string `xml:"Name"` // The Bucket Name + Owner Owner `xml:"Owner"` // The owner of bucket + Prefix string `xml:"Prefix"` // The object prefix + KeyMarker string `xml:"KeyMarker"` // The start marker filter. + VersionIdMarker string `xml:"VersionIdMarker"` // The start VersionIdMarker filter. + MaxKeys int `xml:"MaxKeys"` // Max keys to return + Delimiter string `xml:"Delimiter"` // The delimiter for grouping objects' name + IsTruncated bool `xml:"IsTruncated"` // Flag indicates if all results are returned (when it's false) + NextKeyMarker string `xml:"NextKeyMarker"` // The start point of the next query + NextVersionIdMarker string `xml:"NextVersionIdMarker"` // The start point of the next query + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // You can think of commonprefixes as "folders" whose names end with the delimiter + ObjectDeleteMarkers []ObjectDeleteMarkerProperties `xml:"DeleteMarker"` // DeleteMarker list + ObjectVersions []ObjectVersionProperties `xml:"Version"` // version list +} + +type ObjectDeleteMarkerProperties struct { + XMLName xml.Name `xml:"DeleteMarker"` + Key string `xml:"Key"` // The Object Key + VersionId string `xml:"VersionId"` // The Object VersionId + IsLatest bool `xml:"IsLatest"` // is current version or not + LastModified time.Time `xml:"LastModified"` // Object last modified time + Owner Owner `xml:"Owner"` // bucket owner element +} + +type ObjectVersionProperties struct { + XMLName xml.Name `xml:"Version"` + Key string `xml:"Key"` // The Object Key + VersionId string `xml:"VersionId"` // The Object VersionId + IsLatest bool `xml:"IsLatest"` // is latest version or not + LastModified time.Time `xml:"LastModified"` // Object last modified time + Type string `xml:"Type"` // Object type + Size int64 `xml:"Size"` // Object size + ETag string `xml:"ETag"` // Object ETag + StorageClass string `xml:"StorageClass"` // Object storage class (Standard, IA, Archive) + Owner Owner `xml:"Owner"` // bucket owner element +} + +// Owner defines Bucket/Object's owner +type Owner struct { + XMLName xml.Name `xml:"Owner"` + ID string `xml:"ID"` // Owner ID + DisplayName string `xml:"DisplayName"` // Owner's display name +} + +// CopyObjectResult defines result object of CopyObject +type CopyObjectResult struct { + XMLName xml.Name `xml:"CopyObjectResult"` + LastModified time.Time `xml:"LastModified"` // New object's last modified time. + ETag string `xml:"ETag"` // New object's ETag +} + +// GetObjectACLResult defines result of GetObjectACL request +type GetObjectACLResult GetBucketACLResult + +type deleteXML struct { + XMLName xml.Name `xml:"Delete"` + Objects []DeleteObject `xml:"Object"` // Objects to delete + Quiet bool `xml:"Quiet"` // Flag of quiet mode. +} + +// DeleteObject defines the struct for deleting object +type DeleteObject struct { + XMLName xml.Name `xml:"Object"` + Key string `xml:"Key"` // Object name + VersionId string `xml:"VersionId,omitempty"` // Object VersionId +} + +// DeleteObjectsResult defines result of DeleteObjects request +type DeleteObjectsResult struct { + XMLName xml.Name + DeletedObjects []string // Deleted object key list +} + +// DeleteObjectsResult_inner defines result of DeleteObjects request +type DeleteObjectVersionsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjectsDetail []DeletedKeyInfo `xml:"Deleted"` // Deleted object detail info +} + +// DeleteKeyInfo defines object delete info +type DeletedKeyInfo struct { + XMLName xml.Name `xml:"Deleted"` + Key string `xml:"Key"` // Object key + VersionId string `xml:"VersionId"` // VersionId + DeleteMarker bool `xml:"DeleteMarker"` // Object DeleteMarker + DeleteMarkerVersionId string `xml:"DeleteMarkerVersionId"` // Object DeleteMarkerVersionId +} + +// InitiateMultipartUploadResult defines result of InitiateMultipartUpload request +type InitiateMultipartUploadResult struct { + XMLName xml.Name `xml:"InitiateMultipartUploadResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name to upload + UploadID string `xml:"UploadId"` // Generated UploadId +} + +// UploadPart defines the upload/copy part +type UploadPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + ETag string `xml:"ETag"` // ETag value of the part's data +} + +type uploadParts []UploadPart + +func (slice uploadParts) Len() int { + return len(slice) +} + +func (slice uploadParts) Less(i, j int) bool { + return slice[i].PartNumber < slice[j].PartNumber +} + +func (slice uploadParts) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +// UploadPartCopyResult defines result object of multipart copy request. +type UploadPartCopyResult struct { + XMLName xml.Name `xml:"CopyPartResult"` + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag +} + +type completeMultipartUploadXML struct { + XMLName xml.Name `xml:"CompleteMultipartUpload"` + Part []UploadPart `xml:"Part"` +} + +// CompleteMultipartUploadResult defines result object of CompleteMultipartUploadRequest +type CompleteMultipartUploadResult struct { + XMLName xml.Name `xml:"CompleteMultipartUploadResult"` + Location string `xml:"Location"` // Object URL + Bucket string `xml:"Bucket"` // Bucket name + ETag string `xml:"ETag"` // Object ETag + Key string `xml:"Key"` // Object name +} + +// ListUploadedPartsResult defines result object of ListUploadedParts +type ListUploadedPartsResult struct { + XMLName xml.Name `xml:"ListPartsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // Upload ID + NextPartNumberMarker string `xml:"NextPartNumberMarker"` // Next part number + MaxParts int `xml:"MaxParts"` // Max parts count + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries returned.false: all entries returned. + UploadedParts []UploadedPart `xml:"Part"` // Uploaded parts +} + +// UploadedPart defines uploaded part +type UploadedPart struct { + XMLName xml.Name `xml:"Part"` + PartNumber int `xml:"PartNumber"` // Part number + LastModified time.Time `xml:"LastModified"` // Last modified time + ETag string `xml:"ETag"` // ETag cache + Size int `xml:"Size"` // Part size +} + +// ListMultipartUploadResult defines result object of ListMultipartUpload +type ListMultipartUploadResult struct { + XMLName xml.Name `xml:"ListMultipartUploadsResult"` + Bucket string `xml:"Bucket"` // Bucket name + Delimiter string `xml:"Delimiter"` // Delimiter for grouping object. + Prefix string `xml:"Prefix"` // Object prefix + KeyMarker string `xml:"KeyMarker"` // Object key marker + UploadIDMarker string `xml:"UploadIdMarker"` // UploadId marker + NextKeyMarker string `xml:"NextKeyMarker"` // Next key marker, if not all entries returned. + NextUploadIDMarker string `xml:"NextUploadIdMarker"` // Next uploadId marker, if not all entries returned. + MaxUploads int `xml:"MaxUploads"` // Max uploads to return + IsTruncated bool `xml:"IsTruncated"` // Flag indicates all entries are returned. + Uploads []UncompletedUpload `xml:"Upload"` // Ongoing uploads (not completed, not aborted) + CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` // Common prefixes list. +} + +// UncompletedUpload structure wraps an uncompleted upload task +type UncompletedUpload struct { + XMLName xml.Name `xml:"Upload"` + Key string `xml:"Key"` // Object name + UploadID string `xml:"UploadId"` // The UploadId + Initiated time.Time `xml:"Initiated"` // Initialization time in the format such as 2012-02-23T04:18:23.000Z +} + +// ProcessObjectResult defines result object of ProcessObject +type ProcessObjectResult struct { + Bucket string `json:"bucket"` + FileSize int `json:"fileSize"` + Object string `json:"object"` + Status string `json:"status"` +} + +// decodeDeleteObjectsResult decodes deleting objects result in URL encoding +func decodeDeleteObjectsResult(result *DeleteObjectVersionsResult) error { + var err error + for i := 0; i < len(result.DeletedObjectsDetail); i++ { + result.DeletedObjectsDetail[i].Key, err = url.QueryUnescape(result.DeletedObjectsDetail[i].Key) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectsResult decodes list objects result in URL encoding +func decodeListObjectsResult(result *ListObjectsResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Marker, err = url.QueryUnescape(result.Marker) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.NextMarker, err = url.QueryUnescape(result.NextMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Objects); i++ { + result.Objects[i].Key, err = url.QueryUnescape(result.Objects[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// decodeListObjectVersionsResult decodes list version objects result in URL encoding +func decodeListObjectVersionsResult(result *ListObjectVersionsResult) error { + var err error + + // decode:Delimiter + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + + // decode Prefix + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + + // decode KeyMarker + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + + // decode VersionIdMarker + result.VersionIdMarker, err = url.QueryUnescape(result.VersionIdMarker) + if err != nil { + return err + } + + // decode NextKeyMarker + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + + // decode NextVersionIdMarker + result.NextVersionIdMarker, err = url.QueryUnescape(result.NextVersionIdMarker) + if err != nil { + return err + } + + // decode CommonPrefixes + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + + // decode deleteMarker + for i := 0; i < len(result.ObjectDeleteMarkers); i++ { + result.ObjectDeleteMarkers[i].Key, err = url.QueryUnescape(result.ObjectDeleteMarkers[i].Key) + if err != nil { + return err + } + } + + // decode ObjectVersions + for i := 0; i < len(result.ObjectVersions); i++ { + result.ObjectVersions[i].Key, err = url.QueryUnescape(result.ObjectVersions[i].Key) + if err != nil { + return err + } + } + + return nil +} + +// decodeListUploadedPartsResult decodes +func decodeListUploadedPartsResult(result *ListUploadedPartsResult) error { + var err error + result.Key, err = url.QueryUnescape(result.Key) + if err != nil { + return err + } + return nil +} + +// decodeListMultipartUploadResult decodes list multipart upload result in URL encoding +func decodeListMultipartUploadResult(result *ListMultipartUploadResult) error { + var err error + result.Prefix, err = url.QueryUnescape(result.Prefix) + if err != nil { + return err + } + result.Delimiter, err = url.QueryUnescape(result.Delimiter) + if err != nil { + return err + } + result.KeyMarker, err = url.QueryUnescape(result.KeyMarker) + if err != nil { + return err + } + result.NextKeyMarker, err = url.QueryUnescape(result.NextKeyMarker) + if err != nil { + return err + } + for i := 0; i < len(result.Uploads); i++ { + result.Uploads[i].Key, err = url.QueryUnescape(result.Uploads[i].Key) + if err != nil { + return err + } + } + for i := 0; i < len(result.CommonPrefixes); i++ { + result.CommonPrefixes[i], err = url.QueryUnescape(result.CommonPrefixes[i]) + if err != nil { + return err + } + } + return nil +} + +// createBucketConfiguration defines the configuration for creating a bucket. +type createBucketConfiguration struct { + XMLName xml.Name `xml:"CreateBucketConfiguration"` + StorageClass StorageClassType `xml:"StorageClass,omitempty"` +} + +// LiveChannelConfiguration defines the configuration for live-channel +type LiveChannelConfiguration struct { + XMLName xml.Name `xml:"LiveChannelConfiguration"` + Description string `xml:"Description,omitempty"` //Description of live-channel, up to 128 bytes + Status string `xml:"Status,omitempty"` //Specify the status of livechannel + Target LiveChannelTarget `xml:"Target"` //target configuration of live-channel + // use point instead of struct to avoid omit empty snapshot + Snapshot *LiveChannelSnapshot `xml:"Snapshot,omitempty"` //snapshot configuration of live-channel +} + +// LiveChannelTarget target configuration of live-channel +type LiveChannelTarget struct { + XMLName xml.Name `xml:"Target"` + Type string `xml:"Type"` //the type of object, only supports HLS + FragDuration int `xml:"FragDuration,omitempty"` //the length of each ts object (in seconds), in the range [1,100] + FragCount int `xml:"FragCount,omitempty"` //the number of ts objects in the m3u8 object, in the range of [1,100] + PlaylistName string `xml:"PlaylistName,omitempty"` //the name of m3u8 object, which must end with ".m3u8" and the length range is [6,128] +} + +// LiveChannelSnapshot snapshot configuration of live-channel +type LiveChannelSnapshot struct { + XMLName xml.Name `xml:"Snapshot"` + RoleName string `xml:"RoleName,omitempty"` //The role of snapshot operations, it sholud has write permission of DestBucket and the permission to send messages to the NotifyTopic. + DestBucket string `xml:"DestBucket,omitempty"` //Bucket the snapshots will be written to. should be the same owner as the source bucket. + NotifyTopic string `xml:"NotifyTopic,omitempty"` //Topics of MNS for notifying users of high frequency screenshot operation results + Interval int `xml:"Interval,omitempty"` //interval of snapshots, threre is no snapshot if no I-frame during the interval time +} + +// CreateLiveChannelResult the result of crete live-channel +type CreateLiveChannelResult struct { + XMLName xml.Name `xml:"CreateLiveChannelResult"` + PublishUrls []string `xml:"PublishUrls>Url"` //push urls list + PlayUrls []string `xml:"PlayUrls>Url"` //play urls list +} + +// LiveChannelStat the result of get live-channel state +type LiveChannelStat struct { + XMLName xml.Name `xml:"LiveChannelStat"` + Status string `xml:"Status"` //Current push status of live-channel: Disabled,Live,Idle + ConnectedTime time.Time `xml:"ConnectedTime"` //The time when the client starts pushing, format: ISO8601 + RemoteAddr string `xml:"RemoteAddr"` //The ip address of the client + Video LiveChannelVideo `xml:"Video"` //Video stream information + Audio LiveChannelAudio `xml:"Audio"` //Audio stream information +} + +// LiveChannelVideo video stream information +type LiveChannelVideo struct { + XMLName xml.Name `xml:"Video"` + Width int `xml:"Width"` //Width (unit: pixels) + Height int `xml:"Height"` //Height (unit: pixels) + FrameRate int `xml:"FrameRate"` //FramRate + Bandwidth int `xml:"Bandwidth"` //Bandwidth (unit: B/s) +} + +// LiveChannelAudio audio stream information +type LiveChannelAudio struct { + XMLName xml.Name `xml:"Audio"` + SampleRate int `xml:"SampleRate"` //SampleRate + Bandwidth int `xml:"Bandwidth"` //Bandwidth (unit: B/s) + Codec string `xml:"Codec"` //Encoding forma +} + +// LiveChannelHistory the result of GetLiveChannelHistory, at most return up to lastest 10 push records +type LiveChannelHistory struct { + XMLName xml.Name `xml:"LiveChannelHistory"` + Record []LiveRecord `xml:"LiveRecord"` //push records list +} + +// LiveRecord push recode +type LiveRecord struct { + XMLName xml.Name `xml:"LiveRecord"` + StartTime time.Time `xml:"StartTime"` //StartTime, format: ISO8601 + EndTime time.Time `xml:"EndTime"` //EndTime, format: ISO8601 + RemoteAddr string `xml:"RemoteAddr"` //The ip address of remote client +} + +// ListLiveChannelResult the result of ListLiveChannel +type ListLiveChannelResult struct { + XMLName xml.Name `xml:"ListLiveChannelResult"` + Prefix string `xml:"Prefix"` //Filter by the name start with the value of "Prefix" + Marker string `xml:"Marker"` //cursor from which starting list + MaxKeys int `xml:"MaxKeys"` //The maximum count returned. the default value is 100. it cannot be greater than 1000. + IsTruncated bool `xml:"IsTruncated"` //Indicates whether all results have been returned, "true" indicates partial results returned while "false" indicates all results have been returned + NextMarker string `xml:"NextMarker"` //NextMarker indicate the Marker value of the next request + LiveChannel []LiveChannelInfo `xml:"LiveChannel"` //The infomation of live-channel +} + +// LiveChannelInfo the infomation of live-channel +type LiveChannelInfo struct { + XMLName xml.Name `xml:"LiveChannel"` + Name string `xml:"Name"` //The name of live-channel + Description string `xml:"Description"` //Description of live-channel + Status string `xml:"Status"` //Status: disabled or enabled + LastModified time.Time `xml:"LastModified"` //Last modification time, format: ISO8601 + PublishUrls []string `xml:"PublishUrls>Url"` //push urls list + PlayUrls []string `xml:"PlayUrls>Url"` //play urls list +} + +// Tag a tag for the object +type Tag struct { + XMLName xml.Name `xml:"Tag"` + Key string `xml:"Key"` + Value string `xml:"Value"` +} + +// Tagging tagset for the object +type Tagging struct { + XMLName xml.Name `xml:"Tagging"` + Tags []Tag `xml:"TagSet>Tag,omitempty"` +} + +// for GetObjectTagging return value +type GetObjectTaggingResult Tagging + +// VersioningConfig for the bucket +type VersioningConfig struct { + XMLName xml.Name `xml:"VersioningConfiguration"` + Status string `xml:"Status"` +} + +type GetBucketVersioningResult VersioningConfig + +// Server Encryption rule for the bucket +type ServerEncryptionRule struct { + XMLName xml.Name `xml:"ServerSideEncryptionRule"` + SSEDefault SSEDefaultRule `xml:"ApplyServerSideEncryptionByDefault"` +} + +// Server Encryption deafult rule for the bucket +type SSEDefaultRule struct { + XMLName xml.Name `xml:"ApplyServerSideEncryptionByDefault"` + SSEAlgorithm string `xml:"SSEAlgorithm"` + KMSMasterKeyID string `xml:"KMSMasterKeyID"` +} + +type GetBucketEncryptionResult ServerEncryptionRule +type GetBucketTaggingResult Tagging + +type BucketStat struct { + XMLName xml.Name `xml:"BucketStat"` + Storage int64 `xml:"Storage"` + ObjectCount int64 `xml:"ObjectCount"` + MultipartUploadCount int64 `xml:"MultipartUploadCount"` +} +type GetBucketStatResult BucketStat + +// RequestPaymentConfiguration define the request payment configuration +type RequestPaymentConfiguration struct { + XMLName xml.Name `xml:"RequestPaymentConfiguration"` + Payer string `xml:"Payer,omitempty"` +} + +// BucketQoSConfiguration define QoS configuration +type BucketQoSConfiguration struct { + XMLName xml.Name `xml:"QoSConfiguration"` + TotalUploadBandwidth *int `xml:"TotalUploadBandwidth"` // Total upload bandwidth + IntranetUploadBandwidth *int `xml:"IntranetUploadBandwidth"` // Intranet upload bandwidth + ExtranetUploadBandwidth *int `xml:"ExtranetUploadBandwidth"` // Extranet upload bandwidth + TotalDownloadBandwidth *int `xml:"TotalDownloadBandwidth"` // Total download bandwidth + IntranetDownloadBandwidth *int `xml:"IntranetDownloadBandwidth"` // Intranet download bandwidth + ExtranetDownloadBandwidth *int `xml:"ExtranetDownloadBandwidth"` // Extranet download bandwidth + TotalQPS *int `xml:"TotalQps"` // Total Qps + IntranetQPS *int `xml:"IntranetQps"` // Intranet Qps + ExtranetQPS *int `xml:"ExtranetQps"` // Extranet Qps +} + +// UserQoSConfiguration define QoS and Range configuration +type UserQoSConfiguration struct { + XMLName xml.Name `xml:"QoSConfiguration"` + Region string `xml:"Region,omitempty"` // Effective area of Qos configuration + BucketQoSConfiguration +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go new file mode 100644 index 00000000..4ae85473 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/upload.go @@ -0,0 +1,520 @@ +package oss + +import ( + "crypto/md5" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" +) + +// UploadFile is multipart file upload. +// +// objectKey the object name. +// filePath the local file path to upload. +// partSize the part size in byte. +// options the options for uploading object. +// +// error it's nil if the operation succeeds, otherwise it's an error object. +// +func (bucket Bucket) UploadFile(objectKey, filePath string, partSize int64, options ...Option) error { + if partSize < MinPartSize || partSize > MaxPartSize { + return errors.New("oss: part size invalid range (100KB, 5GB]") + } + + cpConf := getCpConfig(options) + routines := getRoutines(options) + + if cpConf != nil && cpConf.IsEnable { + cpFilePath := getUploadCpFilePath(cpConf, filePath, bucket.BucketName, objectKey) + if cpFilePath != "" { + return bucket.uploadFileWithCp(objectKey, filePath, partSize, options, cpFilePath, routines) + } + } + + return bucket.uploadFile(objectKey, filePath, partSize, options, routines) +} + +func getUploadCpFilePath(cpConf *cpConfig, srcFile, destBucket, destObject string) string { + if cpConf.FilePath == "" && cpConf.DirPath != "" { + dest := fmt.Sprintf("oss://%v/%v", destBucket, destObject) + absPath, _ := filepath.Abs(srcFile) + cpFileName := getCpFileName(absPath, dest) + cpConf.FilePath = cpConf.DirPath + string(os.PathSeparator) + cpFileName + } + return cpConf.FilePath +} + +// ----- concurrent upload without checkpoint ----- + +// getCpConfig gets checkpoint configuration +func getCpConfig(options []Option) *cpConfig { + cpcOpt, err := findOption(options, checkpointConfig, nil) + if err != nil || cpcOpt == nil { + return nil + } + + return cpcOpt.(*cpConfig) +} + +// getCpFileName return the name of the checkpoint file +func getCpFileName(src, dest string) string { + md5Ctx := md5.New() + md5Ctx.Write([]byte(src)) + srcCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + md5Ctx.Reset() + md5Ctx.Write([]byte(dest)) + destCheckSum := hex.EncodeToString(md5Ctx.Sum(nil)) + + return fmt.Sprintf("%v-%v.cp", srcCheckSum, destCheckSum) +} + +// getRoutines gets the routine count. by default it's 1. +func getRoutines(options []Option) int { + rtnOpt, err := findOption(options, routineNum, nil) + if err != nil || rtnOpt == nil { + return 1 + } + + rs := rtnOpt.(int) + if rs < 1 { + rs = 1 + } else if rs > 100 { + rs = 100 + } + + return rs +} + +// getPayer return the payer of the request +func getPayer(options []Option) string { + payerOpt, err := findOption(options, HTTPHeaderOssRequester, nil) + if err != nil || payerOpt == nil { + return "" + } + + return payerOpt.(string) +} + +// getProgressListener gets the progress callback +func getProgressListener(options []Option) ProgressListener { + isSet, listener, _ := isOptionSet(options, progressListener) + if !isSet { + return nil + } + return listener.(ProgressListener) +} + +// uploadPartHook is for testing usage +type uploadPartHook func(id int, chunk FileChunk) error + +var uploadPartHooker uploadPartHook = defaultUploadPart + +func defaultUploadPart(id int, chunk FileChunk) error { + return nil +} + +// workerArg defines worker argument structure +type workerArg struct { + bucket *Bucket + filePath string + imur InitiateMultipartUploadResult + options []Option + hook uploadPartHook +} + +// worker is the worker coroutine function +func worker(id int, arg workerArg, jobs <-chan FileChunk, results chan<- UploadPart, failed chan<- error, die <-chan bool) { + for chunk := range jobs { + if err := arg.hook(id, chunk); err != nil { + failed <- err + break + } + part, err := arg.bucket.UploadPartFromFile(arg.imur, arg.filePath, chunk.Offset, chunk.Size, chunk.Number, arg.options...) + if err != nil { + failed <- err + break + } + select { + case <-die: + return + default: + } + results <- part + } +} + +// scheduler function +func scheduler(jobs chan FileChunk, chunks []FileChunk) { + for _, chunk := range chunks { + jobs <- chunk + } + close(jobs) +} + +func getTotalBytes(chunks []FileChunk) int64 { + var tb int64 + for _, chunk := range chunks { + tb += chunk.Size + } + return tb +} + +// uploadFile is a concurrent upload, without checkpoint +func (bucket Bucket) uploadFile(objectKey, filePath string, partSize int64, options []Option, routines int) error { + listener := getProgressListener(options) + + chunks, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + // Initialize the multipart upload + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + var completedBytes int64 + totalBytes := getTotalBytes(chunks) + event := newProgressEvent(TransferStartedEvent, 0, totalBytes, 0) + publishProgress(listener, event) + + // Start the worker coroutine + arg := workerArg{&bucket, filePath, imur, options, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule the jobs + go scheduler(jobs, chunks) + + // Waiting for the upload finished + completed := 0 + parts := make([]UploadPart, len(chunks)) + for completed < len(chunks) { + select { + case part := <-results: + completed++ + parts[part.PartNumber-1] = part + completedBytes += chunks[part.PartNumber-1].Size + + // why RwBytes in ProgressEvent is 0 ? + // because read or write event has been notified in teeReader.Read() + event = newProgressEvent(TransferDataEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + bucket.AbortMultipartUpload(imur, options...) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferStartedEvent, completedBytes, totalBytes, 0) + publishProgress(listener, event) + + // Complete the multpart upload + _, err = bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + bucket.AbortMultipartUpload(imur, options...) + return err + } + return nil +} + +// ----- concurrent upload with checkpoint ----- +const uploadCpMagic = "FE8BB4EA-B593-4FAC-AD7A-2459A36E2E62" + +type uploadCheckpoint struct { + Magic string // Magic + MD5 string // Checkpoint file content's MD5 + FilePath string // Local file path + FileStat cpStat // File state + ObjectKey string // Key + UploadID string // Upload ID + Parts []cpPart // All parts of the local file +} + +type cpStat struct { + Size int64 // File size + LastModified time.Time // File's last modified time + MD5 string // Local file's MD5 +} + +type cpPart struct { + Chunk FileChunk // File chunk + Part UploadPart // Uploaded part + IsCompleted bool // Upload complete flag +} + +// isValid checks if the uploaded data is valid---it's valid when the file is not updated and the checkpoint data is valid. +func (cp uploadCheckpoint) isValid(filePath string) (bool, error) { + // Compare the CP's magic number and MD5. + cpb := cp + cpb.MD5 = "" + js, _ := json.Marshal(cpb) + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + + if cp.Magic != uploadCpMagic || b64 != cp.MD5 { + return false, nil + } + + // Make sure if the local file is updated. + fd, err := os.Open(filePath) + if err != nil { + return false, err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return false, err + } + + md, err := calcFileMD5(filePath) + if err != nil { + return false, err + } + + // Compare the file size, file's last modified time and file's MD5 + if cp.FileStat.Size != st.Size() || + cp.FileStat.LastModified != st.ModTime() || + cp.FileStat.MD5 != md { + return false, nil + } + + return true, nil +} + +// load loads from the file +func (cp *uploadCheckpoint) load(filePath string) error { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + err = json.Unmarshal(contents, cp) + return err +} + +// dump dumps to the local file +func (cp *uploadCheckpoint) dump(filePath string) error { + bcp := *cp + + // Calculate MD5 + bcp.MD5 = "" + js, err := json.Marshal(bcp) + if err != nil { + return err + } + sum := md5.Sum(js) + b64 := base64.StdEncoding.EncodeToString(sum[:]) + bcp.MD5 = b64 + + // Serialization + js, err = json.Marshal(bcp) + if err != nil { + return err + } + + // Dump + return ioutil.WriteFile(filePath, js, FilePermMode) +} + +// updatePart updates the part status +func (cp *uploadCheckpoint) updatePart(part UploadPart) { + cp.Parts[part.PartNumber-1].Part = part + cp.Parts[part.PartNumber-1].IsCompleted = true +} + +// todoParts returns unfinished parts +func (cp *uploadCheckpoint) todoParts() []FileChunk { + fcs := []FileChunk{} + for _, part := range cp.Parts { + if !part.IsCompleted { + fcs = append(fcs, part.Chunk) + } + } + return fcs +} + +// allParts returns all parts +func (cp *uploadCheckpoint) allParts() []UploadPart { + ps := []UploadPart{} + for _, part := range cp.Parts { + ps = append(ps, part.Part) + } + return ps +} + +// getCompletedBytes returns completed bytes count +func (cp *uploadCheckpoint) getCompletedBytes() int64 { + var completedBytes int64 + for _, part := range cp.Parts { + if part.IsCompleted { + completedBytes += part.Chunk.Size + } + } + return completedBytes +} + +// calcFileMD5 calculates the MD5 for the specified local file +func calcFileMD5(filePath string) (string, error) { + return "", nil +} + +// prepare initializes the multipart upload +func prepare(cp *uploadCheckpoint, objectKey, filePath string, partSize int64, bucket *Bucket, options []Option) error { + // CP + cp.Magic = uploadCpMagic + cp.FilePath = filePath + cp.ObjectKey = objectKey + + // Local file + fd, err := os.Open(filePath) + if err != nil { + return err + } + defer fd.Close() + + st, err := fd.Stat() + if err != nil { + return err + } + cp.FileStat.Size = st.Size() + cp.FileStat.LastModified = st.ModTime() + md, err := calcFileMD5(filePath) + if err != nil { + return err + } + cp.FileStat.MD5 = md + + // Chunks + parts, err := SplitFileByPartSize(filePath, partSize) + if err != nil { + return err + } + + cp.Parts = make([]cpPart, len(parts)) + for i, part := range parts { + cp.Parts[i].Chunk = part + cp.Parts[i].IsCompleted = false + } + + // Init load + imur, err := bucket.InitiateMultipartUpload(objectKey, options...) + if err != nil { + return err + } + cp.UploadID = imur.UploadID + + return nil +} + +// complete completes the multipart upload and deletes the local CP files +func complete(cp *uploadCheckpoint, bucket *Bucket, parts []UploadPart, cpFilePath string, options []Option) error { + imur := InitiateMultipartUploadResult{Bucket: bucket.BucketName, + Key: cp.ObjectKey, UploadID: cp.UploadID} + _, err := bucket.CompleteMultipartUpload(imur, parts, options...) + if err != nil { + return err + } + os.Remove(cpFilePath) + return err +} + +// uploadFileWithCp handles concurrent upload with checkpoint +func (bucket Bucket) uploadFileWithCp(objectKey, filePath string, partSize int64, options []Option, cpFilePath string, routines int) error { + listener := getProgressListener(options) + + // Load CP data + ucp := uploadCheckpoint{} + err := ucp.load(cpFilePath) + if err != nil { + os.Remove(cpFilePath) + } + + // Load error or the CP data is invalid. + valid, err := ucp.isValid(filePath) + if err != nil || !valid { + if err = prepare(&ucp, objectKey, filePath, partSize, &bucket, options); err != nil { + return err + } + os.Remove(cpFilePath) + } + + chunks := ucp.todoParts() + imur := InitiateMultipartUploadResult{ + Bucket: bucket.BucketName, + Key: objectKey, + UploadID: ucp.UploadID} + + jobs := make(chan FileChunk, len(chunks)) + results := make(chan UploadPart, len(chunks)) + failed := make(chan error) + die := make(chan bool) + + completedBytes := ucp.getCompletedBytes() + + // why RwBytes in ProgressEvent is 0 ? + // because read or write event has been notified in teeReader.Read() + event := newProgressEvent(TransferStartedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + + // Start the workers + arg := workerArg{&bucket, filePath, imur, options, uploadPartHooker} + for w := 1; w <= routines; w++ { + go worker(w, arg, jobs, results, failed, die) + } + + // Schedule jobs + go scheduler(jobs, chunks) + + // Waiting for the job finished + completed := 0 + for completed < len(chunks) { + select { + case part := <-results: + completed++ + ucp.updatePart(part) + ucp.dump(cpFilePath) + completedBytes += ucp.Parts[part.PartNumber-1].Chunk.Size + event = newProgressEvent(TransferDataEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + case err := <-failed: + close(die) + event = newProgressEvent(TransferFailedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + return err + } + + if completed >= len(chunks) { + break + } + } + + event = newProgressEvent(TransferCompletedEvent, completedBytes, ucp.FileStat.Size, 0) + publishProgress(listener, event) + + // Complete the multipart upload + err = complete(&ucp, &bucket, ucp.allParts(), cpFilePath, options) + return err +} diff --git a/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go new file mode 100644 index 00000000..c0e7b2b1 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-oss-go-sdk/oss/utils.go @@ -0,0 +1,265 @@ +package oss + +import ( + "bytes" + "errors" + "fmt" + "hash/crc64" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "time" +) + +// userAgent gets user agent +// It has the SDK version information, OS information and GO version +func userAgent() string { + sys := getSysInfo() + return fmt.Sprintf("aliyun-sdk-go/%s (%s/%s/%s;%s)", Version, sys.name, + sys.release, sys.machine, runtime.Version()) +} + +type sysInfo struct { + name string // OS name such as windows/Linux + release string // OS version 2.6.32-220.23.2.ali1089.el5.x86_64 etc + machine string // CPU type amd64/x86_64 +} + +// getSysInfo gets system info +// gets the OS information and CPU type +func getSysInfo() sysInfo { + name := runtime.GOOS + release := "-" + machine := runtime.GOARCH + if out, err := exec.Command("uname", "-s").CombinedOutput(); err == nil { + name = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-r").CombinedOutput(); err == nil { + release = string(bytes.TrimSpace(out)) + } + if out, err := exec.Command("uname", "-m").CombinedOutput(); err == nil { + machine = string(bytes.TrimSpace(out)) + } + return sysInfo{name: name, release: release, machine: machine} +} + +// unpackedRange +type unpackedRange struct { + hasStart bool // Flag indicates if the start point is specified + hasEnd bool // Flag indicates if the end point is specified + start int64 // Start point + end int64 // End point +} + +// invalidRangeError returns invalid range error +func invalidRangeError(r string) error { + return fmt.Errorf("InvalidRange %s", r) +} + +// parseRange parse various styles of range such as bytes=M-N +func parseRange(normalizedRange string) (*unpackedRange, error) { + var err error + hasStart := false + hasEnd := false + var start int64 + var end int64 + + // Bytes==M-N or ranges=M-N + nrSlice := strings.Split(normalizedRange, "=") + if len(nrSlice) != 2 || nrSlice[0] != "bytes" { + return nil, invalidRangeError(normalizedRange) + } + + // Bytes=M-N,X-Y + rSlice := strings.Split(nrSlice[1], ",") + rStr := rSlice[0] + + if strings.HasSuffix(rStr, "-") { // M- + startStr := rStr[:len(rStr)-1] + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasStart = true + } else if strings.HasPrefix(rStr, "-") { // -N + len := rStr[1:] + end, err = strconv.ParseInt(len, 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + if end == 0 { // -0 + return nil, invalidRangeError(normalizedRange) + } + hasEnd = true + } else { // M-N + valSlice := strings.Split(rStr, "-") + if len(valSlice) != 2 { + return nil, invalidRangeError(normalizedRange) + } + start, err = strconv.ParseInt(valSlice[0], 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasStart = true + end, err = strconv.ParseInt(valSlice[1], 10, 64) + if err != nil { + return nil, invalidRangeError(normalizedRange) + } + hasEnd = true + } + + return &unpackedRange{hasStart, hasEnd, start, end}, nil +} + +// adjustRange returns adjusted range, adjust the range according to the length of the file +func adjustRange(ur *unpackedRange, size int64) (start, end int64) { + if ur == nil { + return 0, size + } + + if ur.hasStart && ur.hasEnd { + start = ur.start + end = ur.end + 1 + if ur.start < 0 || ur.start >= size || ur.end > size || ur.start > ur.end { + start = 0 + end = size + } + } else if ur.hasStart { + start = ur.start + end = size + if ur.start < 0 || ur.start >= size { + start = 0 + } + } else if ur.hasEnd { + start = size - ur.end + end = size + if ur.end < 0 || ur.end > size { + start = 0 + end = size + } + } + return +} + +// GetNowSec returns Unix time, the number of seconds elapsed since January 1, 1970 UTC. +// gets the current time in Unix time, in seconds. +func GetNowSec() int64 { + return time.Now().Unix() +} + +// GetNowNanoSec returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. The result is undefined if the Unix time +// in nanoseconds cannot be represented by an int64. Note that this +// means the result of calling UnixNano on the zero Time is undefined. +// gets the current time in Unix time, in nanoseconds. +func GetNowNanoSec() int64 { + return time.Now().UnixNano() +} + +// GetNowGMT gets the current time in GMT format. +func GetNowGMT() string { + return time.Now().UTC().Format(http.TimeFormat) +} + +// FileChunk is the file chunk definition +type FileChunk struct { + Number int // Chunk number + Offset int64 // Chunk offset + Size int64 // Chunk size. +} + +// SplitFileByPartNum splits big file into parts by the num of parts. +// Split the file with specified parts count, returns the split result when error is nil. +func SplitFileByPartNum(fileName string, chunkNum int) ([]FileChunk, error) { + if chunkNum <= 0 || chunkNum > 10000 { + return nil, errors.New("chunkNum invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + + if int64(chunkNum) > stat.Size() { + return nil, errors.New("oss: chunkNum invalid") + } + + var chunks []FileChunk + var chunk = FileChunk{} + var chunkN = (int64)(chunkNum) + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * (stat.Size() / chunkN) + if i == chunkN-1 { + chunk.Size = stat.Size()/chunkN + stat.Size()%chunkN + } else { + chunk.Size = stat.Size() / chunkN + } + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// SplitFileByPartSize splits big file into parts by the size of parts. +// Splits the file by the part size. Returns the FileChunk when error is nil. +func SplitFileByPartSize(fileName string, chunkSize int64) ([]FileChunk, error) { + if chunkSize <= 0 { + return nil, errors.New("chunkSize invalid") + } + + file, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return nil, err + } + var chunkN = stat.Size() / chunkSize + if chunkN >= 10000 { + return nil, errors.New("Too many parts, please increase part size") + } + + var chunks []FileChunk + var chunk = FileChunk{} + for i := int64(0); i < chunkN; i++ { + chunk.Number = int(i + 1) + chunk.Offset = i * chunkSize + chunk.Size = chunkSize + chunks = append(chunks, chunk) + } + + if stat.Size()%chunkSize > 0 { + chunk.Number = len(chunks) + 1 + chunk.Offset = int64(len(chunks)) * chunkSize + chunk.Size = stat.Size() % chunkSize + chunks = append(chunks, chunk) + } + + return chunks, nil +} + +// GetPartEnd calculates the end position +func GetPartEnd(begin int64, total int64, per int64) int64 { + if begin+per > total { + return total - 1 + } + return begin + per - 1 +} + +// crcTable returns the table constructed from the specified polynomial +var crcTable = func() *crc64.Table { + return crc64.MakeTable(crc64.ECMA) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/LICENSE b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/LICENSE new file mode 100644 index 00000000..b463b291 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 1999-2017 Alibaba Group Holding Ltd. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/api.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/api.go new file mode 100644 index 00000000..a13ddc25 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/api.go @@ -0,0 +1,1277 @@ +package tablestore + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" + "io" + "math/rand" + "net" + "net/http" + "strings" + "time" +) + +const ( + userAgent = "aliyun-tablestore-sdk-golang/4.0.2" + + createTableUri = "/CreateTable" + listTableUri = "/ListTable" + deleteTableUri = "/DeleteTable" + describeTableUri = "/DescribeTable" + updateTableUri = "/UpdateTable" + putRowUri = "/PutRow" + deleteRowUri = "/DeleteRow" + getRowUri = "/GetRow" + updateRowUri = "/UpdateRow" + batchGetRowUri = "/BatchGetRow" + batchWriteRowUri = "/BatchWriteRow" + getRangeUri = "/GetRange" + listStreamUri = "/ListStream" + describeStreamUri = "/DescribeStream" + getShardIteratorUri = "/GetShardIterator" + getStreamRecordUri = "/GetStreamRecord" + computeSplitPointsBySizeRequestUri = "/ComputeSplitPointsBySize" + searchUri = "/Search" + createSearchIndexUri = "/CreateSearchIndex" + listSearchIndexUri = "/ListSearchIndex" + deleteSearchIndexUri = "/DeleteSearchIndex" + describeSearchIndexUri = "/DescribeSearchIndex" + + createIndexUri = "/CreateIndex" + dropIndexUri = "/DropIndex" + + createlocaltransactionuri = "/StartLocalTransaction" + committransactionuri = "/CommitTransaction" + aborttransactionuri = "/AbortTransaction" +) + +// Constructor: to create the client of TableStore service. +// 构造函数:创建表格存储服务的客户端。 +// +// @param endPoint The address of TableStore service. 表格存储服务地址。 +// @param instanceName +// @param accessId The Access ID. 用于标示用户的ID。 +// @param accessKey The Access Key. 用于签名和验证的密钥。 +// @param options set client config +func NewClient(endPoint, instanceName, accessKeyId, accessKeySecret string, options ...ClientOption) *TableStoreClient { + client := NewClientWithConfig(endPoint, instanceName, accessKeyId, accessKeySecret, "", nil) + // client options parse + for _, option := range options { + option(client) + } + + return client +} + +type GetHttpClient func() IHttpClient + +var currentGetHttpClientFunc GetHttpClient = func() IHttpClient { + return &TableStoreHttpClient{} +} + +// Constructor: to create the client of OTS service. 传入config +// 构造函数:创建OTS服务的客户端。 +func NewClientWithConfig(endPoint, instanceName, accessKeyId, accessKeySecret string, securityToken string, config *TableStoreConfig) *TableStoreClient { + tableStoreClient := new(TableStoreClient) + tableStoreClient.endPoint = endPoint + tableStoreClient.instanceName = instanceName + tableStoreClient.accessKeyId = accessKeyId + tableStoreClient.accessKeySecret = accessKeySecret + tableStoreClient.securityToken = securityToken + if config == nil { + config = NewDefaultTableStoreConfig() + } + tableStoreClient.config = config + var tableStoreTransportProxy http.RoundTripper + if config.Transport != nil { + tableStoreTransportProxy = config.Transport + } else { + tableStoreTransportProxy = &http.Transport{ + MaxIdleConnsPerHost: config.MaxIdleConnections, + Dial: (&net.Dialer{ + Timeout: config.HTTPTimeout.ConnectionTimeout, + }).Dial, + } + } + + tableStoreClient.httpClient = currentGetHttpClientFunc() + + httpClient := &http.Client{ + Transport: tableStoreTransportProxy, + Timeout: tableStoreClient.config.HTTPTimeout.RequestTimeout, + } + tableStoreClient.httpClient.New(httpClient) + + tableStoreClient.random = rand.New(rand.NewSource(time.Now().Unix())) + + return tableStoreClient +} + +func NewClientWithExternalHeader(endPoint, instanceName, accessKeyId, accessKeySecret string, securityToken string, config *TableStoreConfig, header map[string]string) *TableStoreClient { + tableStoreClient := NewClientWithConfig(endPoint, instanceName, accessKeyId, accessKeySecret, securityToken, config) + tableStoreClient.externalHeader = header + return tableStoreClient +} + +// 请求服务端 +func (tableStoreClient *TableStoreClient) doRequestWithRetry(uri string, req, resp proto.Message, responseInfo *ResponseInfo) error { + end := time.Now().Add(tableStoreClient.config.MaxRetryTime) + url := fmt.Sprintf("%s%s", tableStoreClient.endPoint, uri) + /* request body */ + var body []byte + var err error + if req != nil { + body, err = proto.Marshal(req) + if err != nil { + return err + } + } else { + body = nil + } + + var value int64 + var i uint + var respBody []byte + var requestId string + for i = 0; ; i++ { + respBody, err, requestId = tableStoreClient.doRequest(url, uri, body, resp) + responseInfo.RequestId = requestId + + if err == nil { + break + } else { + value = getNextPause(tableStoreClient, err, i, end, value, uri) + + // fmt.Println("hit retry", uri, err, *e.Code, value) + if value <= 0 { + return err + } + + time.Sleep(time.Duration(value) * time.Millisecond) + } + } + + if respBody == nil || len(respBody) == 0 { + return nil + } + + err = proto.Unmarshal(respBody, resp) + if err != nil { + return fmt.Errorf("decode resp failed: %s", err) + } + + return nil +} + +func getNextPause(tableStoreClient *TableStoreClient, err error, count uint, end time.Time, lastInterval int64, action string) int64 { + if tableStoreClient.config.RetryTimes <= count || time.Now().After(end) { + return 0 + } + var retry bool + if otsErr, ok := err.(*OtsError); ok { + retry = shouldRetry(tableStoreClient, otsErr.Code, otsErr.Message, action, otsErr.HttpStatusCode) + } else { + if err == io.EOF || err == io.ErrUnexpectedEOF || //retry on special net error contains EOF or reset + strings.Contains(err.Error(), io.EOF.Error()) || + strings.Contains(err.Error(), "Connection reset by peer") || + strings.Contains(err.Error(), "connection reset by peer") { + retry = true + } else if nErr, ok := err.(net.Error); ok { + retry = nErr.Temporary() + } + } + + if retry { + value := lastInterval*2 + tableStoreClient.random.Int63n(DefaultRetryInterval-1) + 1 + if value > MaxRetryInterval { + value = MaxRetryInterval + } + + return value + } + return 0 +} + +func shouldRetry(tableStoreClient *TableStoreClient, errorCode string, errorMsg string, action string, statusCode int) bool { + if tableStoreClient.CustomizedRetryFunc != nil { + if tableStoreClient.CustomizedRetryFunc(errorCode, errorMsg, action, statusCode) == true { + return true + } + } + + if retryNotMatterActions(errorCode, errorMsg) == true { + return true + } + + if isIdempotent(action) && + (errorCode == STORAGE_TIMEOUT || errorCode == INTERNAL_SERVER_ERROR || errorCode == SERVER_UNAVAILABLE) { + return true + } + return false +} + +type CustomizedRetryNotMatterActions func(errorCode string, errorMsg string, action string, httpStatus int) bool + +func retryNotMatterActions(errorCode string, errorMsg string) bool { + if errorCode == ROW_OPERATION_CONFLICT || errorCode == NOT_ENOUGH_CAPACITY_UNIT || + errorCode == TABLE_NOT_READY || errorCode == PARTITION_UNAVAILABLE || + errorCode == SERVER_BUSY || errorCode == STORAGE_SERVER_BUSY || (errorCode == QUOTA_EXHAUSTED && errorMsg == "Too frequent table operations.") { + return true + } else { + return false + } +} + +func isIdempotent(action string) bool { + if action == batchGetRowUri || action == describeTableUri || + action == getRangeUri || action == getRowUri || + action == listTableUri || action == listStreamUri || + action == getStreamRecordUri || action == describeStreamUri { + return true + } else { + return false + } +} + +func (tableStoreClient *TableStoreClient) doRequest(url string, uri string, body []byte, resp proto.Message) ([]byte, error, string) { + hreq, err := http.NewRequest("POST", url, bytes.NewBuffer(body)) + if err != nil { + return nil, err, "" + } + /* set headers */ + hreq.Header.Set("User-Agent", userAgent) + + date := time.Now().UTC().Format(xOtsDateFormat) + + hreq.Header.Set(xOtsDate, date) + hreq.Header.Set(xOtsApiversion, ApiVersion) + hreq.Header.Set(xOtsAccesskeyid, tableStoreClient.accessKeyId) + hreq.Header.Set(xOtsInstanceName, tableStoreClient.instanceName) + for key, value := range tableStoreClient.externalHeader { + hreq.Header[key] = []string{value} + } + + md5Byte := md5.Sum(body) + md5Base64 := base64.StdEncoding.EncodeToString(md5Byte[:16]) + hreq.Header.Set(xOtsContentmd5, md5Base64) + + otshead := createOtsHeaders(tableStoreClient.accessKeySecret) + otshead.set(xOtsDate, date) + otshead.set(xOtsApiversion, ApiVersion) + otshead.set(xOtsAccesskeyid, tableStoreClient.accessKeyId) + if tableStoreClient.securityToken != "" { + hreq.Header.Set(xOtsHeaderStsToken, tableStoreClient.securityToken) + otshead.set(xOtsHeaderStsToken, tableStoreClient.securityToken) + } + otshead.set(xOtsContentmd5, md5Base64) + otshead.set(xOtsInstanceName, tableStoreClient.instanceName) + for key, value := range tableStoreClient.externalHeader { + if strings.HasPrefix(key, xOtsPrefix) { + otshead.set(key, value) + } + } + sign, err := otshead.signature(uri, "POST", tableStoreClient.accessKeySecret) + + if err != nil { + return nil, err, "" + } + hreq.Header.Set(xOtsSignature, sign) + + /* end set headers */ + return tableStoreClient.postReq(hreq, url) +} + +// table API +// Create a table with the CreateTableRequest, in which the table name and +// primary keys are required. +// 根据CreateTableRequest创建一个表,其中表名和主健列是必选项 +// +// @param request of CreateTableRequest. +// @return Void. 无返回值。 +func (tableStoreClient *TableStoreClient) CreateTable(request *CreateTableRequest) (*CreateTableResponse, error) { + if len(request.TableMeta.TableName) > maxTableNameLength { + return nil, errTableNameTooLong(request.TableMeta.TableName) + } + + if len(request.TableMeta.SchemaEntry) > maxPrimaryKeyNum { + return nil, errPrimaryKeyTooMuch + } + + if len(request.TableMeta.SchemaEntry) == 0 { + return nil, errCreateTableNoPrimaryKey + } + + req := new(otsprotocol.CreateTableRequest) + req.TableMeta = new(otsprotocol.TableMeta) + req.TableMeta.TableName = proto.String(request.TableMeta.TableName) + + if len(request.TableMeta.DefinedColumns) > 0 { + for _, value := range request.TableMeta.DefinedColumns { + req.TableMeta.DefinedColumn = append(req.TableMeta.DefinedColumn, &otsprotocol.DefinedColumnSchema{Name: &value.Name, Type: value.ColumnType.ConvertToPbDefinedColumnType().Enum()}) + } + } + + if len(request.IndexMetas) > 0 { + for _, value := range request.IndexMetas { + req.IndexMetas = append(req.IndexMetas, value.ConvertToPbIndexMeta()) + } + } + + for _, key := range request.TableMeta.SchemaEntry { + keyType := otsprotocol.PrimaryKeyType(*key.Type) + if key.Option != nil { + keyOption := otsprotocol.PrimaryKeyOption(*key.Option) + req.TableMeta.PrimaryKey = append(req.TableMeta.PrimaryKey, &otsprotocol.PrimaryKeySchema{Name: key.Name, Type: &keyType, Option: &keyOption}) + } else { + req.TableMeta.PrimaryKey = append(req.TableMeta.PrimaryKey, &otsprotocol.PrimaryKeySchema{Name: key.Name, Type: &keyType}) + } + } + + req.ReservedThroughput = new(otsprotocol.ReservedThroughput) + req.ReservedThroughput.CapacityUnit = new(otsprotocol.CapacityUnit) + req.ReservedThroughput.CapacityUnit.Read = proto.Int32(int32(request.ReservedThroughput.Readcap)) + req.ReservedThroughput.CapacityUnit.Write = proto.Int32(int32(request.ReservedThroughput.Writecap)) + + req.TableOptions = new(otsprotocol.TableOptions) + req.TableOptions.TimeToLive = proto.Int32(int32(request.TableOption.TimeToAlive)) + req.TableOptions.MaxVersions = proto.Int32(int32(request.TableOption.MaxVersion)) + + if request.TableOption.DeviationCellVersionInSec > 0 { + req.TableOptions.DeviationCellVersionInSec = proto.Int64(request.TableOption.DeviationCellVersionInSec) + } + + if request.StreamSpec != nil { + var ss otsprotocol.StreamSpecification + if request.StreamSpec.EnableStream { + ss = otsprotocol.StreamSpecification{ + EnableStream: &request.StreamSpec.EnableStream, + ExpirationTime: &request.StreamSpec.ExpirationTime} + } else { + ss = otsprotocol.StreamSpecification{ + EnableStream: &request.StreamSpec.EnableStream} + } + + req.StreamSpec = &ss + } + + resp := new(otsprotocol.CreateTableResponse) + response := &CreateTableResponse{} + if err := tableStoreClient.doRequestWithRetry(createTableUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} + +func (tableStoreClient *TableStoreClient) CreateIndex(request *CreateIndexRequest) (*CreateIndexResponse, error) { + if len(request.MainTableName) > maxTableNameLength { + return nil, errTableNameTooLong(request.MainTableName) + } + + req := new(otsprotocol.CreateIndexRequest) + req.IndexMeta = request.IndexMeta.ConvertToPbIndexMeta() + req.IncludeBaseData = proto.Bool(request.IncludeBaseData) + req.MainTableName = proto.String(request.MainTableName) + + resp := new(otsprotocol.CreateIndexResponse) + response := &CreateIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(createIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} + +func (tableStoreClient *TableStoreClient) DeleteIndex(request *DeleteIndexRequest) (*DeleteIndexResponse, error) { + if len(request.MainTableName) > maxTableNameLength { + return nil, errTableNameTooLong(request.MainTableName) + } + + req := new(otsprotocol.DropIndexRequest) + req.IndexName = proto.String(request.IndexName) + req.MainTableName = proto.String(request.MainTableName) + + resp := new(otsprotocol.DropIndexResponse) + response := &DeleteIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(dropIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} + +// List all tables. If done, all table names will be returned. +// 列出所有的表,如果操作成功,将返回所有表的名称。 +// +// @param tableNames The returned table names. 返回的表名集合。 +// @return Void. 无返回值。 +func (tableStoreClient *TableStoreClient) ListTable() (*ListTableResponse, error) { + resp := new(otsprotocol.ListTableResponse) + response := &ListTableResponse{} + if err := tableStoreClient.doRequestWithRetry(listTableUri, nil, resp, &response.ResponseInfo); err != nil { + return response, err + } + + response.TableNames = resp.TableNames + return response, nil +} + +// Delete a table and all its views will be deleted. +// 删除一个表 +// +// @param tableName The table name. 表名。 +// @return Void. 无返回值。 +func (tableStoreClient *TableStoreClient) DeleteTable(request *DeleteTableRequest) (*DeleteTableResponse, error) { + req := new(otsprotocol.DeleteTableRequest) + req.TableName = proto.String(request.TableName) + + response := &DeleteTableResponse{} + if err := tableStoreClient.doRequestWithRetry(deleteTableUri, req, nil, &response.ResponseInfo); err != nil { + return nil, err + } + return response, nil +} + +// Query the tablemeta, tableoption and reservedthroughtputdetails +// @param DescribeTableRequest +// @param DescribeTableResponse +func (tableStoreClient *TableStoreClient) DescribeTable(request *DescribeTableRequest) (*DescribeTableResponse, error) { + req := new(otsprotocol.DescribeTableRequest) + req.TableName = proto.String(request.TableName) + + resp := new(otsprotocol.DescribeTableResponse) + response := new(DescribeTableResponse) + + if err := tableStoreClient.doRequestWithRetry(describeTableUri, req, resp, &response.ResponseInfo); err != nil { + return &DescribeTableResponse{}, err + } + + response.ReservedThroughput = &ReservedThroughput{Readcap: int(*(resp.ReservedThroughputDetails.CapacityUnit.Read)), Writecap: int(*(resp.ReservedThroughputDetails.CapacityUnit.Write))} + + responseTableMeta := new(TableMeta) + responseTableMeta.TableName = *resp.TableMeta.TableName + + for _, key := range resp.TableMeta.PrimaryKey { + keyType := PrimaryKeyType(*key.Type) + + // enable it when we support kep option in describe table + if key.Option != nil { + keyOption := PrimaryKeyOption(*key.Option) + responseTableMeta.SchemaEntry = append(responseTableMeta.SchemaEntry, &PrimaryKeySchema{Name: key.Name, Type: &keyType, Option: &keyOption}) + } else { + responseTableMeta.SchemaEntry = append(responseTableMeta.SchemaEntry, &PrimaryKeySchema{Name: key.Name, Type: &keyType}) + } + } + response.TableMeta = responseTableMeta + response.TableOption = &TableOption{TimeToAlive: int(*resp.TableOptions.TimeToLive), MaxVersion: int(*resp.TableOptions.MaxVersions), DeviationCellVersionInSec: *resp.TableOptions.DeviationCellVersionInSec} + if resp.StreamDetails != nil && *resp.StreamDetails.EnableStream { + response.StreamDetails = &StreamDetails{ + EnableStream: *resp.StreamDetails.EnableStream, + StreamId: (*StreamId)(resp.StreamDetails.StreamId), + ExpirationTime: *resp.StreamDetails.ExpirationTime, + LastEnableTime: *resp.StreamDetails.LastEnableTime} + } else { + response.StreamDetails = &StreamDetails{ + EnableStream: false} + } + + for _, meta := range resp.IndexMetas { + response.IndexMetas = append(response.IndexMetas, ConvertPbIndexMetaToIndexMeta(meta)) + } + + return response, nil +} + +// Update the table info includes tableoptions and reservedthroughput +// @param UpdateTableRequest +// @param UpdateTableResponse +func (tableStoreClient *TableStoreClient) UpdateTable(request *UpdateTableRequest) (*UpdateTableResponse, error) { + req := new(otsprotocol.UpdateTableRequest) + req.TableName = proto.String(request.TableName) + + if request.ReservedThroughput != nil { + req.ReservedThroughput = new(otsprotocol.ReservedThroughput) + req.ReservedThroughput.CapacityUnit = new(otsprotocol.CapacityUnit) + req.ReservedThroughput.CapacityUnit.Read = proto.Int32(int32(request.ReservedThroughput.Readcap)) + req.ReservedThroughput.CapacityUnit.Write = proto.Int32(int32(request.ReservedThroughput.Writecap)) + } + + if request.TableOption != nil { + req.TableOptions = new(otsprotocol.TableOptions) + req.TableOptions.TimeToLive = proto.Int32(int32(request.TableOption.TimeToAlive)) + req.TableOptions.MaxVersions = proto.Int32(int32(request.TableOption.MaxVersion)) + + if request.TableOption.DeviationCellVersionInSec > 0 { + req.TableOptions.DeviationCellVersionInSec = proto.Int64(request.TableOption.DeviationCellVersionInSec) + } + } + + if request.StreamSpec != nil { + if request.StreamSpec.EnableStream == true { + req.StreamSpec = &otsprotocol.StreamSpecification{ + EnableStream: &request.StreamSpec.EnableStream, + ExpirationTime: &request.StreamSpec.ExpirationTime} + } else { + req.StreamSpec = &otsprotocol.StreamSpecification{EnableStream: &request.StreamSpec.EnableStream} + } + } + + resp := new(otsprotocol.UpdateTableResponse) + response := new(UpdateTableResponse) + + if err := tableStoreClient.doRequestWithRetry(updateTableUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ReservedThroughput = &ReservedThroughput{ + Readcap: int(*(resp.ReservedThroughputDetails.CapacityUnit.Read)), + Writecap: int(*(resp.ReservedThroughputDetails.CapacityUnit.Write))} + response.TableOption = &TableOption{ + TimeToAlive: int(*resp.TableOptions.TimeToLive), + MaxVersion: int(*resp.TableOptions.MaxVersions), + DeviationCellVersionInSec: *resp.TableOptions.DeviationCellVersionInSec} + + if *resp.StreamDetails.EnableStream { + response.StreamDetails = &StreamDetails{ + EnableStream: *resp.StreamDetails.EnableStream, + StreamId: (*StreamId)(resp.StreamDetails.StreamId), + ExpirationTime: *resp.StreamDetails.ExpirationTime, + LastEnableTime: *resp.StreamDetails.LastEnableTime} + } else { + response.StreamDetails = &StreamDetails{ + EnableStream: false} + } + return response, nil +} + +// Put or update a row in a table. The operation is determined by CheckingType, +// which has three options: NO, UPDATE, INSERT. The transaction id is optional. +// 插入或更新行数据。操作针对数据的存在性包含三种检查类型:NO(不检查),UPDATE +// (更新,数据必须存在)和INSERT(插入,数据必须不存在)。事务ID是可选项。 +// +// @param builder The builder for putting a row. 插入或更新数据的Builder。 +// @return Void. 无返回值。 +func (tableStoreClient *TableStoreClient) PutRow(request *PutRowRequest) (*PutRowResponse, error) { + if request == nil { + return nil, nil + } + + if request.PutRowChange == nil { + return nil, nil + } + + req := new(otsprotocol.PutRowRequest) + req.TableName = proto.String(request.PutRowChange.TableName) + req.Row = request.PutRowChange.Serialize() + + condition := new(otsprotocol.Condition) + condition.RowExistence = request.PutRowChange.Condition.buildCondition() + if request.PutRowChange.Condition.ColumnCondition != nil { + condition.ColumnCondition = request.PutRowChange.Condition.ColumnCondition.Serialize() + } + + if request.PutRowChange.ReturnType == ReturnType_RT_PK { + content := otsprotocol.ReturnContent{ReturnType: otsprotocol.ReturnType_RT_PK.Enum()} + req.ReturnContent = &content + } + + if request.PutRowChange.TransactionId != nil { + req.TransactionId = request.PutRowChange.TransactionId + } + + req.Condition = condition + + resp := new(otsprotocol.PutRowResponse) + response := &PutRowResponse{} + if err := tableStoreClient.doRequestWithRetry(putRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ConsumedCapacityUnit = &ConsumedCapacityUnit{} + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + + if request.PutRowChange.ReturnType == ReturnType_RT_PK { + rows, err := readRowsWithHeader(bytes.NewReader(resp.Row)) + if err != nil { + return response, err + } + + for _, pk := range rows[0].primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + response.PrimaryKey.PrimaryKeys = append(response.PrimaryKey.PrimaryKeys, pkColumn) + } + } + + return response, nil +} + +// Delete row with pk +// @param DeleteRowRequest +func (tableStoreClient *TableStoreClient) DeleteRow(request *DeleteRowRequest) (*DeleteRowResponse, error) { + req := new(otsprotocol.DeleteRowRequest) + req.TableName = proto.String(request.DeleteRowChange.TableName) + req.Condition = request.DeleteRowChange.getCondition() + req.PrimaryKey = request.DeleteRowChange.PrimaryKey.Build(true) + + if request.DeleteRowChange.TransactionId != nil { + req.TransactionId = request.DeleteRowChange.TransactionId + } + + resp := new(otsprotocol.DeleteRowResponse) + response := &DeleteRowResponse{} + if err := tableStoreClient.doRequestWithRetry(deleteRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ConsumedCapacityUnit = &ConsumedCapacityUnit{} + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + return response, nil +} + +// row API +// Get the data of a row or some columns. +// +// @param getrowrequest +func (tableStoreClient *TableStoreClient) GetRow(request *GetRowRequest) (*GetRowResponse, error) { + req := new(otsprotocol.GetRowRequest) + resp := new(otsprotocol.GetRowResponse) + + req.TableName = proto.String(request.SingleRowQueryCriteria.TableName) + + if (request.SingleRowQueryCriteria.getColumnsToGet() != nil) && len(request.SingleRowQueryCriteria.getColumnsToGet()) > 0 { + req.ColumnsToGet = request.SingleRowQueryCriteria.getColumnsToGet() + } + + req.PrimaryKey = request.SingleRowQueryCriteria.PrimaryKey.Build(false) + + if request.SingleRowQueryCriteria.MaxVersion != 0 { + req.MaxVersions = proto.Int32(int32(request.SingleRowQueryCriteria.MaxVersion)) + } + + if request.SingleRowQueryCriteria.TransactionId != nil { + req.TransactionId = request.SingleRowQueryCriteria.TransactionId + } + + if request.SingleRowQueryCriteria.StartColumn != nil { + req.StartColumn = request.SingleRowQueryCriteria.StartColumn + } + + if request.SingleRowQueryCriteria.EndColumn != nil { + req.EndColumn = request.SingleRowQueryCriteria.EndColumn + } + + if request.SingleRowQueryCriteria.TimeRange != nil { + if request.SingleRowQueryCriteria.TimeRange.Specific != 0 { + req.TimeRange = &otsprotocol.TimeRange{SpecificTime: proto.Int64(request.SingleRowQueryCriteria.TimeRange.Specific)} + } else { + req.TimeRange = &otsprotocol.TimeRange{StartTime: proto.Int64(request.SingleRowQueryCriteria.TimeRange.Start), EndTime: proto.Int64(request.SingleRowQueryCriteria.TimeRange.End)} + } + } else if request.SingleRowQueryCriteria.MaxVersion == 0 { + return nil, errInvalidInput + } + + if request.SingleRowQueryCriteria.Filter != nil { + req.Filter = request.SingleRowQueryCriteria.Filter.Serialize() + } + + response := &GetRowResponse{ConsumedCapacityUnit: &ConsumedCapacityUnit{}} + if err := tableStoreClient.doRequestWithRetry(getRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + + if len(resp.Row) == 0 { + return response, nil + } + + rows, err := readRowsWithHeader(bytes.NewReader(resp.Row)) + if err != nil { + return nil, err + } + + for _, pk := range rows[0].primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + response.PrimaryKey.PrimaryKeys = append(response.PrimaryKey.PrimaryKeys, pkColumn) + } + + for _, cell := range rows[0].cells { + dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + response.Columns = append(response.Columns, dataColumn) + } + + return response, nil +} + +// Update row +// @param UpdateRowRequest +func (tableStoreClient *TableStoreClient) UpdateRow(request *UpdateRowRequest) (*UpdateRowResponse, error) { + req := new(otsprotocol.UpdateRowRequest) + resp := new(otsprotocol.UpdateRowResponse) + + req.TableName = proto.String(request.UpdateRowChange.TableName) + req.Condition = request.UpdateRowChange.getCondition() + req.RowChange = request.UpdateRowChange.Serialize() + if request.UpdateRowChange.TransactionId != nil { + req.TransactionId = request.UpdateRowChange.TransactionId + } + + response := &UpdateRowResponse{ConsumedCapacityUnit: &ConsumedCapacityUnit{}} + + if request.UpdateRowChange.ReturnType == ReturnType_RT_AFTER_MODIFY { + content := otsprotocol.ReturnContent{ReturnType: otsprotocol.ReturnType_RT_AFTER_MODIFY.Enum()} + for _, column := range request.UpdateRowChange.ColumnNamesToReturn { + content.ReturnColumnNames = append(content.ReturnColumnNames, column) + } + req.ReturnContent = &content + } + + if err := tableStoreClient.doRequestWithRetry(updateRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + if request.UpdateRowChange.ReturnType == ReturnType_RT_AFTER_MODIFY { + plainbufferRow, err := readRowsWithHeader(bytes.NewReader(resp.Row)) + if err != nil { + return response, err + } + for _, cell := range plainbufferRow[0].cells { + attribute := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + response.Columns = append(response.Columns, attribute) + } + } + + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + return response, nil +} + +// Batch Get Row +// @param BatchGetRowRequest +func (tableStoreClient *TableStoreClient) BatchGetRow(request *BatchGetRowRequest) (*BatchGetRowResponse, error) { + req := new(otsprotocol.BatchGetRowRequest) + + var tablesInBatch []*otsprotocol.TableInBatchGetRowRequest + + for _, Criteria := range request.MultiRowQueryCriteria { + table := new(otsprotocol.TableInBatchGetRowRequest) + table.TableName = proto.String(Criteria.TableName) + table.ColumnsToGet = Criteria.ColumnsToGet + + if Criteria.StartColumn != nil { + table.StartColumn = Criteria.StartColumn + } + + if Criteria.EndColumn != nil { + table.EndColumn = Criteria.EndColumn + } + + if Criteria.Filter != nil { + table.Filter = Criteria.Filter.Serialize() + } + + if Criteria.MaxVersion != 0 { + table.MaxVersions = proto.Int32(int32(Criteria.MaxVersion)) + } + + if Criteria.TimeRange != nil { + if Criteria.TimeRange.Specific != 0 { + table.TimeRange = &otsprotocol.TimeRange{SpecificTime: proto.Int64(Criteria.TimeRange.Specific)} + } else { + table.TimeRange = &otsprotocol.TimeRange{StartTime: proto.Int64(Criteria.TimeRange.Start), EndTime: proto.Int64(Criteria.TimeRange.End)} + } + } else if Criteria.MaxVersion == 0 { + return nil, errInvalidInput + } + + for _, pk := range Criteria.PrimaryKey { + pkWithBytes := pk.Build(false) + table.PrimaryKey = append(table.PrimaryKey, pkWithBytes) + } + + tablesInBatch = append(tablesInBatch, table) + } + + req.Tables = tablesInBatch + resp := new(otsprotocol.BatchGetRowResponse) + + response := &BatchGetRowResponse{TableToRowsResult: make(map[string][]RowResult)} + if err := tableStoreClient.doRequestWithRetry(batchGetRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + for _, table := range resp.Tables { + index := int32(0) + for _, row := range table.Rows { + rowResult := &RowResult{TableName: *table.TableName, IsSucceed: *row.IsOk, ConsumedCapacityUnit: &ConsumedCapacityUnit{}, Index: index} + index++ + if *row.IsOk == false { + rowResult.Error = Error{Code: *row.Error.Code, Message: *row.Error.Message} + } else { + // len == 0 means row not exist + if len(row.Row) > 0 { + rows, err := readRowsWithHeader(bytes.NewReader(row.Row)) + if err != nil { + return nil, err + } + + for _, pk := range rows[0].primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + rowResult.PrimaryKey.PrimaryKeys = append(rowResult.PrimaryKey.PrimaryKeys, pkColumn) + } + + for _, cell := range rows[0].cells { + dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + rowResult.Columns = append(rowResult.Columns, dataColumn) + } + } + + rowResult.ConsumedCapacityUnit.Read = *row.Consumed.CapacityUnit.Read + rowResult.ConsumedCapacityUnit.Write = *row.Consumed.CapacityUnit.Write + } + + response.TableToRowsResult[*table.TableName] = append(response.TableToRowsResult[*table.TableName], *rowResult) + } + + } + return response, nil +} + +// Batch Write Row +// @param BatchWriteRowRequest +func (tableStoreClient *TableStoreClient) BatchWriteRow(request *BatchWriteRowRequest) (*BatchWriteRowResponse, error) { + req := new(otsprotocol.BatchWriteRowRequest) + + var tablesInBatch []*otsprotocol.TableInBatchWriteRowRequest + + for key, value := range request.RowChangesGroupByTable { + table := new(otsprotocol.TableInBatchWriteRowRequest) + table.TableName = proto.String(key) + + for _, row := range value { + rowInBatch := &otsprotocol.RowInBatchWriteRowRequest{} + rowInBatch.Condition = row.getCondition() + rowInBatch.RowChange = row.Serialize() + rowInBatch.Type = row.getOperationType().Enum() + table.Rows = append(table.Rows, rowInBatch) + } + + tablesInBatch = append(tablesInBatch, table) + } + + req.Tables = tablesInBatch + + resp := new(otsprotocol.BatchWriteRowResponse) + response := &BatchWriteRowResponse{TableToRowsResult: make(map[string][]RowResult)} + + if err := tableStoreClient.doRequestWithRetry(batchWriteRowUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + for _, table := range resp.Tables { + index := int32(0) + for _, row := range table.Rows { + rowResult := &RowResult{TableName: *table.TableName, IsSucceed: *row.IsOk, ConsumedCapacityUnit: &ConsumedCapacityUnit{}, Index: index} + index++ + if *row.IsOk == false { + rowResult.Error = Error{Code: *row.Error.Code, Message: *row.Error.Message} + } else { + rowResult.ConsumedCapacityUnit.Read = *row.Consumed.CapacityUnit.Read + rowResult.ConsumedCapacityUnit.Write = *row.Consumed.CapacityUnit.Write + } /*else { + rows, err := readRowsWithHeader(bytes.NewReader(row.Row)) + if err != nil { + return nil, err + } + + for _, pk := range (rows[0].primaryKey) { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + rowResult.PrimaryKey.PrimaryKeys = append(rowResult.PrimaryKey.PrimaryKeys, pkColumn) + } + + for _, cell := range (rows[0].cells) { + dataColumn := &DataColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value} + rowResult.Columns = append(rowResult.Columns, dataColumn) + } + + rowResult.ConsumedCapacityUnit.Read = *row.Consumed.CapacityUnit.Read + rowResult.ConsumedCapacityUnit.Write = *row.Consumed.CapacityUnit.Write + }*/ + + response.TableToRowsResult[*table.TableName] = append(response.TableToRowsResult[*table.TableName], *rowResult) + } + } + return response, nil +} + +// Get Range +// @param GetRangeRequest +func (tableStoreClient *TableStoreClient) GetRange(request *GetRangeRequest) (*GetRangeResponse, error) { + req := new(otsprotocol.GetRangeRequest) + req.TableName = proto.String(request.RangeRowQueryCriteria.TableName) + req.Direction = request.RangeRowQueryCriteria.Direction.ToDirection().Enum() + + if request.RangeRowQueryCriteria.MaxVersion != 0 { + req.MaxVersions = proto.Int32(request.RangeRowQueryCriteria.MaxVersion) + } + + if request.RangeRowQueryCriteria.TransactionId != nil { + req.TransactionId = request.RangeRowQueryCriteria.TransactionId + } + + if request.RangeRowQueryCriteria.TimeRange != nil { + if request.RangeRowQueryCriteria.TimeRange.Specific != 0 { + req.TimeRange = &otsprotocol.TimeRange{SpecificTime: proto.Int64(request.RangeRowQueryCriteria.TimeRange.Specific)} + } else { + req.TimeRange = &otsprotocol.TimeRange{StartTime: proto.Int64(request.RangeRowQueryCriteria.TimeRange.Start), EndTime: proto.Int64(request.RangeRowQueryCriteria.TimeRange.End)} + } + } else if request.RangeRowQueryCriteria.MaxVersion == 0 { + return nil, errInvalidInput + } + + if request.RangeRowQueryCriteria.Limit != 0 { + req.Limit = proto.Int32(request.RangeRowQueryCriteria.Limit) + } + + if (request.RangeRowQueryCriteria.ColumnsToGet != nil) && len(request.RangeRowQueryCriteria.ColumnsToGet) > 0 { + req.ColumnsToGet = request.RangeRowQueryCriteria.ColumnsToGet + } + + if request.RangeRowQueryCriteria.Filter != nil { + req.Filter = request.RangeRowQueryCriteria.Filter.Serialize() + } + + if request.RangeRowQueryCriteria.StartColumn != nil { + req.StartColumn = request.RangeRowQueryCriteria.StartColumn + } + + if request.RangeRowQueryCriteria.EndColumn != nil { + req.EndColumn = request.RangeRowQueryCriteria.EndColumn + } + + req.InclusiveStartPrimaryKey = request.RangeRowQueryCriteria.StartPrimaryKey.Build(false) + req.ExclusiveEndPrimaryKey = request.RangeRowQueryCriteria.EndPrimaryKey.Build(false) + + resp := new(otsprotocol.GetRangeResponse) + response := &GetRangeResponse{ConsumedCapacityUnit: &ConsumedCapacityUnit{}} + if err := tableStoreClient.doRequestWithRetry(getRangeUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.ConsumedCapacityUnit.Read = *resp.Consumed.CapacityUnit.Read + response.ConsumedCapacityUnit.Write = *resp.Consumed.CapacityUnit.Write + if len(resp.NextStartPrimaryKey) != 0 { + currentRows, err := readRowsWithHeader(bytes.NewReader(resp.NextStartPrimaryKey)) + if err != nil { + return nil, err + } + + response.NextStartPrimaryKey = &PrimaryKey{} + for _, pk := range currentRows[0].primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + response.NextStartPrimaryKey.PrimaryKeys = append(response.NextStartPrimaryKey.PrimaryKeys, pkColumn) + } + } + + if len(resp.Rows) == 0 { + return response, nil + } + + rows, err := readRowsWithHeader(bytes.NewReader(resp.Rows)) + if err != nil { + return response, err + } + + for _, row := range rows { + currentRow := &Row{} + currentpk := new(PrimaryKey) + for _, pk := range row.primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + currentpk.PrimaryKeys = append(currentpk.PrimaryKeys, pkColumn) + } + + currentRow.PrimaryKey = currentpk + + for _, cell := range row.cells { + dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + currentRow.Columns = append(currentRow.Columns, dataColumn) + } + + response.Rows = append(response.Rows, currentRow) + } + + return response, nil + +} + +func (client *TableStoreClient) ListStream(req *ListStreamRequest) (*ListStreamResponse, error) { + pbReq := &otsprotocol.ListStreamRequest{} + pbReq.TableName = req.TableName + + pbResp := otsprotocol.ListStreamResponse{} + resp := ListStreamResponse{} + if err := client.doRequestWithRetry(listStreamUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + streams := make([]Stream, len(pbResp.Streams)) + for i, pbStream := range pbResp.Streams { + streams[i] = Stream{ + Id: (*StreamId)(pbStream.StreamId), + TableName: pbStream.TableName, + CreationTime: *pbStream.CreationTime} + } + resp.Streams = streams[:] + return &resp, nil +} + +func (client *TableStoreClient) DescribeStream(req *DescribeStreamRequest) (*DescribeStreamResponse, error) { + pbReq := &otsprotocol.DescribeStreamRequest{} + { + pbReq.StreamId = (*string)(req.StreamId) + pbReq.InclusiveStartShardId = (*string)(req.InclusiveStartShardId) + pbReq.ShardLimit = req.ShardLimit + } + pbResp := otsprotocol.DescribeStreamResponse{} + resp := DescribeStreamResponse{} + if err := client.doRequestWithRetry(describeStreamUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + resp.StreamId = (*StreamId)(pbResp.StreamId) + resp.ExpirationTime = *pbResp.ExpirationTime + resp.TableName = pbResp.TableName + resp.CreationTime = *pbResp.CreationTime + Assert(pbResp.StreamStatus != nil, "StreamStatus in DescribeStreamResponse is required.") + switch *pbResp.StreamStatus { + case otsprotocol.StreamStatus_STREAM_ENABLING: + resp.Status = SS_Enabling + case otsprotocol.StreamStatus_STREAM_ACTIVE: + resp.Status = SS_Active + } + resp.NextShardId = (*ShardId)(pbResp.NextShardId) + shards := make([]*StreamShard, len(pbResp.Shards)) + for i, pbShard := range pbResp.Shards { + shards[i] = &StreamShard{ + SelfShard: (*ShardId)(pbShard.ShardId), + FatherShard: (*ShardId)(pbShard.ParentId), + MotherShard: (*ShardId)(pbShard.ParentSiblingId)} + } + resp.Shards = shards[:] + return &resp, nil +} + +func (client *TableStoreClient) GetShardIterator(req *GetShardIteratorRequest) (*GetShardIteratorResponse, error) { + pbReq := &otsprotocol.GetShardIteratorRequest{ + StreamId: (*string)(req.StreamId), + ShardId: (*string)(req.ShardId)} + + if req.Timestamp != nil { + pbReq.Timestamp = req.Timestamp + } + + if req.Token != nil { + pbReq.Token = req.Token + } + + pbResp := otsprotocol.GetShardIteratorResponse{} + resp := GetShardIteratorResponse{} + if err := client.doRequestWithRetry(getShardIteratorUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + resp.ShardIterator = (*ShardIterator)(pbResp.ShardIterator) + resp.Token = pbResp.NextToken + return &resp, nil +} + +func (client TableStoreClient) GetStreamRecord(req *GetStreamRecordRequest) (*GetStreamRecordResponse, error) { + pbReq := &otsprotocol.GetStreamRecordRequest{ + ShardIterator: (*string)(req.ShardIterator)} + if req.Limit != nil { + pbReq.Limit = req.Limit + } + + pbResp := otsprotocol.GetStreamRecordResponse{} + resp := GetStreamRecordResponse{} + if err := client.doRequestWithRetry(getStreamRecordUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + if pbResp.NextShardIterator != nil { + resp.NextShardIterator = (*ShardIterator)(pbResp.NextShardIterator) + } + records := make([]*StreamRecord, len(pbResp.StreamRecords)) + for i, pbRecord := range pbResp.StreamRecords { + record := StreamRecord{} + records[i] = &record + + switch *pbRecord.ActionType { + case otsprotocol.ActionType_PUT_ROW: + record.Type = AT_Put + case otsprotocol.ActionType_UPDATE_ROW: + record.Type = AT_Update + case otsprotocol.ActionType_DELETE_ROW: + record.Type = AT_Delete + } + + plainRows, err := readRowsWithHeader(bytes.NewReader(pbRecord.Record)) + if err != nil { + return nil, err + } + Assert(len(plainRows) == 1, + "There must be exactly one row in a StreamRecord.") + plainRow := plainRows[0] + pkey := PrimaryKey{} + record.PrimaryKey = &pkey + pkey.PrimaryKeys = make([]*PrimaryKeyColumn, len(plainRow.primaryKey)) + for i, pk := range plainRow.primaryKey { + pkc := PrimaryKeyColumn{ + ColumnName: string(pk.cellName), + Value: pk.cellValue.Value} + pkey.PrimaryKeys[i] = &pkc + } + Assert(plainRow.extension != nil, + "extension in a stream record is required.") + record.Info = plainRow.extension + record.Columns = make([]*RecordColumn, len(plainRow.cells)) + for i, plainCell := range plainRow.cells { + cell := RecordColumn{} + record.Columns[i] = &cell + + name := string(plainCell.cellName) + cell.Name = &name + if plainCell.cellValue != nil { + cell.Type = RCT_Put + } else { + if plainCell.cellTimestamp > 0 { + cell.Type = RCT_DeleteOneVersion + } else { + cell.Type = RCT_DeleteAllVersions + } + } + switch cell.Type { + case RCT_Put: + cell.Value = plainCell.cellValue.Value + fallthrough + case RCT_DeleteOneVersion: + cell.Timestamp = &plainCell.cellTimestamp + case RCT_DeleteAllVersions: + break + } + } + } + resp.Records = records + return &resp, nil +} + +func (client TableStoreClient) ComputeSplitPointsBySize(req *ComputeSplitPointsBySizeRequest) (*ComputeSplitPointsBySizeResponse, error) { + pbReq := &otsprotocol.ComputeSplitPointsBySizeRequest{ + TableName: &(req.TableName), + SplitSize: &(req.SplitSize), + } + + pbResp := otsprotocol.ComputeSplitPointsBySizeResponse{} + resp := ComputeSplitPointsBySizeResponse{} + if err := client.doRequestWithRetry(computeSplitPointsBySizeRequestUri, pbReq, &pbResp, &resp.ResponseInfo); err != nil { + return nil, err + } + + beginPk := &PrimaryKey{} + endPk := &PrimaryKey{} + for _, pkSchema := range pbResp.Schema { + beginPk.AddPrimaryKeyColumnWithMinValue(*pkSchema.Name) + endPk.AddPrimaryKeyColumnWithMaxValue(*pkSchema.Name) + } + lastPk := beginPk + nowPk := endPk + + for _, pbRecord := range pbResp.SplitPoints { + plainRows, err := readRowsWithHeader(bytes.NewReader(pbRecord)) + if err != nil { + return nil, err + } + + nowPk = &PrimaryKey{} + for _, pk := range plainRows[0].primaryKey { + nowPk.AddPrimaryKeyColumn(string(pk.cellName), pk.cellValue.Value) + } + + if len(pbResp.Schema) > 1 { + for i := 1; i < len(pbResp.Schema); i++ { + nowPk.AddPrimaryKeyColumnWithMinValue(*pbResp.Schema[i].Name) + } + } + + newSplit := &Split{LowerBound: lastPk, UpperBound: nowPk} + resp.Splits = append(resp.Splits, newSplit) + lastPk = nowPk + + } + + newSplit := &Split{LowerBound: lastPk, UpperBound: endPk} + resp.Splits = append(resp.Splits, newSplit) + + index := 0 + for _, pbLocation := range pbResp.Locations { + count := *pbLocation.Repeat + value := *pbLocation.Location + + for i := int64(0); i < count; i++ { + resp.Splits[index].Location = value + index++ + } + } + return &resp, nil +} + +func (client *TableStoreClient) StartLocalTransaction(request *StartLocalTransactionRequest) (*StartLocalTransactionResponse, error) { + req := new(otsprotocol.StartLocalTransactionRequest) + resp := new(otsprotocol.StartLocalTransactionResponse) + + req.TableName = proto.String(request.TableName) + req.Key = request.PrimaryKey.Build(false) + + response := &StartLocalTransactionResponse{} + if err := client.doRequestWithRetry(createlocaltransactionuri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + response.TransactionId = resp.TransactionId + return response, nil +} + +func (client *TableStoreClient) CommitTransaction(request *CommitTransactionRequest) (*CommitTransactionResponse, error) { + req := new(otsprotocol.CommitTransactionRequest) + resp := new(otsprotocol.CommitTransactionResponse) + + req.TransactionId = request.TransactionId + + response := &CommitTransactionResponse{} + if err := client.doRequestWithRetry(committransactionuri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} + +func (client *TableStoreClient) AbortTransaction(request *AbortTransactionRequest) (*AbortTransactionResponse, error) { + req := new(otsprotocol.AbortTransactionRequest) + resp := new(otsprotocol.AbortTransactionResponse) + + req.TransactionId = request.TransactionId + + response := &AbortTransactionResponse{} + if err := client.doRequestWithRetry(aborttransactionuri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + + return response, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/error.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/error.go new file mode 100644 index 00000000..2c4b1c72 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/error.go @@ -0,0 +1,54 @@ +package tablestore + +import ( + "errors" + "fmt" +) + +var ( + errMissMustHeader = func(header string) error { + return errors.New("[tablestore] miss must header: " + header) + } + errTableNameTooLong = func(name string) error { + return errors.New("[tablestore] table name: \"" + name + "\" too long") + } + + errInvalidPartitionType = errors.New("[tablestore] invalid partition key") + errMissPrimaryKey = errors.New("[tablestore] missing primary key") + errPrimaryKeyTooMuch = errors.New("[tablestore] primary key too much") + errMultiDeleteRowsTooMuch = errors.New("[tablestore] multi delete rows too much") + errCreateTableNoPrimaryKey = errors.New("[tablestore] create table no primary key") + errUnexpectIoEnd = errors.New("[tablestore] unexpect io end") + errTag = errors.New("[tablestore] unexpect tag") + errNoChecksum = errors.New("[tablestore] expect checksum") + errChecksum = errors.New("[tablestore] checksum failed") + errInvalidInput = errors.New("[tablestore] invalid input") +) + +const ( + OTS_CLIENT_UNKNOWN = "OTSClientUnknownError" + + ROW_OPERATION_CONFLICT = "OTSRowOperationConflict" + NOT_ENOUGH_CAPACITY_UNIT = "OTSNotEnoughCapacityUnit" + TABLE_NOT_READY = "OTSTableNotReady" + PARTITION_UNAVAILABLE = "OTSPartitionUnavailable" + SERVER_BUSY = "OTSServerBusy" + STORAGE_SERVER_BUSY = "OTSStorageServerBusy" + QUOTA_EXHAUSTED = "OTSQuotaExhausted" + + STORAGE_TIMEOUT = "OTSTimeout" + SERVER_UNAVAILABLE = "OTSServerUnavailable" + INTERNAL_SERVER_ERROR = "OTSInternalServerError" +) + +type OtsError struct { + Code string + Message string + RequestId string + + HttpStatusCode int +} + +func (e *OtsError) Error() string { + return fmt.Sprintf("%s %s %s", e.Code, e.Message, e.RequestId) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/interface.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/interface.go new file mode 100644 index 00000000..7e0e3b26 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/interface.go @@ -0,0 +1,22 @@ +package tablestore + +type TableStoreApi interface { + CreateTable(request *CreateTableRequest) (*CreateTableResponse, error) + ListTable() (*ListTableResponse, error) + DeleteTable(request *DeleteTableRequest) (*DeleteTableResponse, error) + DescribeTable(request *DescribeTableRequest) (*DescribeTableResponse, error) + UpdateTable(request *UpdateTableRequest) (*UpdateTableResponse, error) + PutRow(request *PutRowRequest) (*PutRowResponse, error) + DeleteRow(request *DeleteRowRequest) (*DeleteRowResponse, error) + GetRow(request *GetRowRequest) (*GetRowResponse, error) + UpdateRow(request *UpdateRowRequest) (*UpdateRowResponse, error) + BatchGetRow(request *BatchGetRowRequest) (*BatchGetRowResponse, error) + BatchWriteRow(request *BatchWriteRowRequest) (*BatchWriteRowResponse, error) + GetRange(request *GetRangeRequest) (*GetRangeResponse, error) + + // stream related + ListStream(request *ListStreamRequest) (*ListStreamResponse, error) + DescribeStream(request *DescribeStreamRequest) (*DescribeStreamResponse, error) + GetShardIterator(request *GetShardIteratorRequest) (*GetShardIteratorResponse, error) + GetStreamRecord(request *GetStreamRecordRequest) (*GetStreamRecordResponse, error) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/model.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/model.go new file mode 100644 index 00000000..3161a51e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/model.go @@ -0,0 +1,860 @@ +package tablestore + +import ( + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" + "math/rand" + "net/http" + "strconv" + "strings" + "time" + //"github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" +) + +// @class TableStoreClient +// The TableStoreClient, which will connect OTS service for authorization, create/list/ +// delete tables/table groups, to get/put/delete a row. +// Note: TableStoreClient is thread-safe. +// TableStoreClient的功能包括连接OTS服务进行验证、创建/列出/删除表或表组、插入/获取/ +// 删除/更新行数据 +type TableStoreClient struct { + endPoint string + instanceName string + accessKeyId string + accessKeySecret string + securityToken string + + httpClient IHttpClient + config *TableStoreConfig + random *rand.Rand + + externalHeader map[string]string + CustomizedRetryFunc CustomizedRetryNotMatterActions +} + +type ClientOption func(*TableStoreClient) + +type TableStoreHttpClient struct { + httpClient *http.Client +} + +// use this to mock http.client for testing +type IHttpClient interface { + Do(*http.Request) (*http.Response, error) + New(*http.Client) +} + +func (httpClient *TableStoreHttpClient) Do(req *http.Request) (*http.Response, error) { + return httpClient.httpClient.Do(req) +} + +func (httpClient *TableStoreHttpClient) New(client *http.Client) { + httpClient.httpClient = client +} + +type HTTPTimeout struct { + ConnectionTimeout time.Duration + RequestTimeout time.Duration +} + +type TableStoreConfig struct { + RetryTimes uint + MaxRetryTime time.Duration + HTTPTimeout HTTPTimeout + MaxIdleConnections int + Transport http.RoundTripper +} + +func NewDefaultTableStoreConfig() *TableStoreConfig { + httpTimeout := &HTTPTimeout{ + ConnectionTimeout: time.Second * 15, + RequestTimeout: time.Second * 30} + config := &TableStoreConfig{ + RetryTimes: 10, + HTTPTimeout: *httpTimeout, + MaxRetryTime: time.Second * 5, + MaxIdleConnections: 2000} + return config +} + +type CreateTableRequest struct { + TableMeta *TableMeta + TableOption *TableOption + ReservedThroughput *ReservedThroughput + StreamSpec *StreamSpecification + IndexMetas []*IndexMeta +} + +type CreateIndexRequest struct { + MainTableName string + IndexMeta *IndexMeta + IncludeBaseData bool +} + +type DeleteIndexRequest struct { + MainTableName string + IndexName string +} + +type ResponseInfo struct { + RequestId string +} + +type CreateTableResponse struct { + ResponseInfo +} + +type CreateIndexResponse struct { + ResponseInfo +} + +type DeleteIndexResponse struct { + ResponseInfo +} + +type DeleteTableResponse struct { + ResponseInfo +} + +type TableMeta struct { + TableName string + SchemaEntry []*PrimaryKeySchema + DefinedColumns []*DefinedColumnSchema +} + +type PrimaryKeySchema struct { + Name *string + Type *PrimaryKeyType + Option *PrimaryKeyOption +} + +type PrimaryKey struct { + PrimaryKeys []*PrimaryKeyColumn +} + +type TableOption struct { + TimeToAlive, MaxVersion int + DeviationCellVersionInSec int64 +} + +type ReservedThroughput struct { + Readcap, Writecap int +} + +type ListTableResponse struct { + TableNames []string + ResponseInfo +} + +type DeleteTableRequest struct { + TableName string +} + +type DescribeTableRequest struct { + TableName string +} + +type DescribeTableResponse struct { + TableMeta *TableMeta + TableOption *TableOption + ReservedThroughput *ReservedThroughput + StreamDetails *StreamDetails + IndexMetas []*IndexMeta + ResponseInfo +} + +type UpdateTableRequest struct { + TableName string + TableOption *TableOption + ReservedThroughput *ReservedThroughput + StreamSpec *StreamSpecification +} + +type UpdateTableResponse struct { + TableOption *TableOption + ReservedThroughput *ReservedThroughput + StreamDetails *StreamDetails + ResponseInfo +} + +type ConsumedCapacityUnit struct { + Read int32 + Write int32 +} + +type PutRowResponse struct { + ConsumedCapacityUnit *ConsumedCapacityUnit + PrimaryKey PrimaryKey + ResponseInfo +} + +type DeleteRowResponse struct { + ConsumedCapacityUnit *ConsumedCapacityUnit + ResponseInfo +} + +type UpdateRowResponse struct { + Columns []*AttributeColumn + ConsumedCapacityUnit *ConsumedCapacityUnit + ResponseInfo +} + +type PrimaryKeyType int32 + +const ( + PrimaryKeyType_INTEGER PrimaryKeyType = 1 + PrimaryKeyType_STRING PrimaryKeyType = 2 + PrimaryKeyType_BINARY PrimaryKeyType = 3 +) + +const ( + DefaultRetryInterval = 10 + MaxRetryInterval = 320 +) + +type PrimaryKeyOption int32 + +const ( + NONE PrimaryKeyOption = 0 + AUTO_INCREMENT PrimaryKeyOption = 1 + MIN PrimaryKeyOption = 2 + MAX PrimaryKeyOption = 3 +) + +type PrimaryKeyColumn struct { + ColumnName string + Value interface{} + PrimaryKeyOption PrimaryKeyOption +} + +func (this *PrimaryKeyColumn) String() string { + xs := make([]string, 0) + xs = append(xs, fmt.Sprintf("\"Name\": \"%s\"", this.ColumnName)) + switch this.PrimaryKeyOption { + case NONE: + xs = append(xs, fmt.Sprintf("\"Value\": \"%s\"", this.Value)) + case MIN: + xs = append(xs, "\"Value\": -inf") + case MAX: + xs = append(xs, "\"Value\": +inf") + case AUTO_INCREMENT: + xs = append(xs, "\"Value\": auto-incr") + } + return fmt.Sprintf("{%s}", strings.Join(xs, ", ")) +} + +type AttributeColumn struct { + ColumnName string + Value interface{} + Timestamp int64 +} + +type TimeRange struct { + Start int64 + End int64 + Specific int64 +} + +type ColumnToUpdate struct { + ColumnName string + Type byte + Timestamp int64 + HasType bool + HasTimestamp bool + IgnoreValue bool + Value interface{} +} + +type RowExistenceExpectation int + +const ( + RowExistenceExpectation_IGNORE RowExistenceExpectation = 0 + RowExistenceExpectation_EXPECT_EXIST RowExistenceExpectation = 1 + RowExistenceExpectation_EXPECT_NOT_EXIST RowExistenceExpectation = 2 +) + +type ComparatorType int32 + +const ( + CT_EQUAL ComparatorType = 1 + CT_NOT_EQUAL ComparatorType = 2 + CT_GREATER_THAN ComparatorType = 3 + CT_GREATER_EQUAL ComparatorType = 4 + CT_LESS_THAN ComparatorType = 5 + CT_LESS_EQUAL ComparatorType = 6 +) + +type LogicalOperator int32 + +const ( + LO_NOT LogicalOperator = 1 + LO_AND LogicalOperator = 2 + LO_OR LogicalOperator = 3 +) + +type FilterType int32 + +const ( + FT_SINGLE_COLUMN_VALUE FilterType = 1 + FT_COMPOSITE_COLUMN_VALUE FilterType = 2 + FT_COLUMN_PAGINATION FilterType = 3 +) + +type ColumnFilter interface { + Serialize() []byte + ToFilter() *otsprotocol.Filter +} + +type VariantType int32 + +const ( + Variant_INTEGER VariantType = 0; + Variant_DOUBLE VariantType = 1; + //VT_BOOLEAN = 2; + Variant_STRING VariantType = 3; +) + +type ValueTransferRule struct { + Regex string + Cast_type VariantType +} + +type SingleColumnCondition struct { + Comparator *ComparatorType + ColumnName *string + ColumnValue interface{} //[]byte + FilterIfMissing bool + LatestVersionOnly bool + TransferRule *ValueTransferRule +} + +type ReturnType int32 + +const ( + ReturnType_RT_NONE ReturnType = 0 + ReturnType_RT_PK ReturnType = 1 + ReturnType_RT_AFTER_MODIFY ReturnType = 2 +) + +type PaginationFilter struct { + Offset int32 + Limit int32 +} + +type CompositeColumnValueFilter struct { + Operator LogicalOperator + Filters []ColumnFilter +} + +func (ccvfilter *CompositeColumnValueFilter) Serialize() []byte { + result, _ := proto.Marshal(ccvfilter.ToFilter()) + return result +} + +func (ccvfilter *CompositeColumnValueFilter) ToFilter() *otsprotocol.Filter { + compositefilter := NewCompositeFilter(ccvfilter.Filters, ccvfilter.Operator) + compositeFilterToBytes, _ := proto.Marshal(compositefilter) + filter := new(otsprotocol.Filter) + filter.Type = otsprotocol.FilterType_FT_COMPOSITE_COLUMN_VALUE.Enum() + filter.Filter = compositeFilterToBytes + return filter +} + +func (ccvfilter *CompositeColumnValueFilter) AddFilter(filter ColumnFilter) { + ccvfilter.Filters = append(ccvfilter.Filters, filter) +} + +func (condition *SingleColumnCondition) ToFilter() *otsprotocol.Filter { + singlefilter := NewSingleColumnValueFilter(condition) + singleFilterToBytes, _ := proto.Marshal(singlefilter) + filter := new(otsprotocol.Filter) + filter.Type = otsprotocol.FilterType_FT_SINGLE_COLUMN_VALUE.Enum() + filter.Filter = singleFilterToBytes + return filter +} + +func (condition *SingleColumnCondition) Serialize() []byte { + result, _ := proto.Marshal(condition.ToFilter()) + return result +} + +func (pageFilter *PaginationFilter) ToFilter() *otsprotocol.Filter { + compositefilter := NewPaginationFilter(pageFilter) + compositeFilterToBytes, _ := proto.Marshal(compositefilter) + filter := new(otsprotocol.Filter) + filter.Type = otsprotocol.FilterType_FT_COLUMN_PAGINATION.Enum() + filter.Filter = compositeFilterToBytes + return filter +} + +func (pageFilter *PaginationFilter) Serialize() []byte { + result, _ := proto.Marshal(pageFilter.ToFilter()) + return result +} + +func NewTableOptionWithMaxVersion(maxVersion int) *TableOption { + tableOption := new(TableOption) + tableOption.TimeToAlive = -1 + tableOption.MaxVersion = maxVersion + return tableOption +} + +func NewTableOption(timeToAlive int, maxVersion int) *TableOption { + tableOption := new(TableOption) + tableOption.TimeToAlive = timeToAlive + tableOption.MaxVersion = maxVersion + return tableOption +} + +type RowCondition struct { + RowExistenceExpectation RowExistenceExpectation + ColumnCondition ColumnFilter +} + +type PutRowChange struct { + TableName string + PrimaryKey *PrimaryKey + Columns []AttributeColumn + Condition *RowCondition + ReturnType ReturnType + TransactionId *string +} + +type PutRowRequest struct { + PutRowChange *PutRowChange +} + +type DeleteRowChange struct { + TableName string + PrimaryKey *PrimaryKey + Condition *RowCondition + TransactionId *string +} + +type DeleteRowRequest struct { + DeleteRowChange *DeleteRowChange +} + +type SingleRowQueryCriteria struct { + ColumnsToGet []string + TableName string + PrimaryKey *PrimaryKey + MaxVersion int32 + TimeRange *TimeRange + Filter ColumnFilter + StartColumn *string + EndColumn *string + TransactionId *string +} + +type UpdateRowChange struct { + TableName string + PrimaryKey *PrimaryKey + Columns []ColumnToUpdate + Condition *RowCondition + TransactionId *string + ReturnType ReturnType + ColumnNamesToReturn []string +} + +type UpdateRowRequest struct { + UpdateRowChange *UpdateRowChange +} + +func (rowQueryCriteria *SingleRowQueryCriteria) AddColumnToGet(columnName string) { + rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName) +} + +func (rowQueryCriteria *SingleRowQueryCriteria) SetStartColumn(columnName string) { + rowQueryCriteria.StartColumn = &columnName +} + +func (rowQueryCriteria *SingleRowQueryCriteria) SetEndtColumn(columnName string) { + rowQueryCriteria.EndColumn = &columnName +} + +func (rowQueryCriteria *SingleRowQueryCriteria) getColumnsToGet() []string { + return rowQueryCriteria.ColumnsToGet +} + +func (rowQueryCriteria *MultiRowQueryCriteria) AddColumnToGet(columnName string) { + rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName) +} + +func (rowQueryCriteria *RangeRowQueryCriteria) AddColumnToGet(columnName string) { + rowQueryCriteria.ColumnsToGet = append(rowQueryCriteria.ColumnsToGet, columnName) +} + +func (rowQueryCriteria *MultiRowQueryCriteria) AddRow(pk *PrimaryKey) { + rowQueryCriteria.PrimaryKey = append(rowQueryCriteria.PrimaryKey, pk) +} + +type GetRowRequest struct { + SingleRowQueryCriteria *SingleRowQueryCriteria +} + +type MultiRowQueryCriteria struct { + PrimaryKey []*PrimaryKey + ColumnsToGet []string + TableName string + MaxVersion int + TimeRange *TimeRange + Filter ColumnFilter + StartColumn *string + EndColumn *string +} + +type BatchGetRowRequest struct { + MultiRowQueryCriteria []*MultiRowQueryCriteria +} + +type ColumnMap struct { + Columns map[string][]*AttributeColumn + columnsKey []string +} + +type GetRowResponse struct { + PrimaryKey PrimaryKey + Columns []*AttributeColumn + ConsumedCapacityUnit *ConsumedCapacityUnit + columnMap *ColumnMap + ResponseInfo +} + +type Error struct { + Code string + Message string +} + +type RowResult struct { + TableName string + IsSucceed bool + Error Error + PrimaryKey PrimaryKey + Columns []*AttributeColumn + ConsumedCapacityUnit *ConsumedCapacityUnit + Index int32 +} + +type RowChange interface { + Serialize() []byte + getOperationType() otsprotocol.OperationType + getCondition() *otsprotocol.Condition + GetTableName() string +} + +type BatchGetRowResponse struct { + TableToRowsResult map[string][]RowResult + ResponseInfo +} + +type BatchWriteRowRequest struct { + RowChangesGroupByTable map[string][]RowChange +} + +type BatchWriteRowResponse struct { + TableToRowsResult map[string][]RowResult + ResponseInfo +} + +type Direction int32 + +const ( + FORWARD Direction = 0 + BACKWARD Direction = 1 +) + +type RangeRowQueryCriteria struct { + TableName string + StartPrimaryKey *PrimaryKey + EndPrimaryKey *PrimaryKey + ColumnsToGet []string + MaxVersion int32 + TimeRange *TimeRange + Filter ColumnFilter + Direction Direction + Limit int32 + StartColumn *string + EndColumn *string + TransactionId *string +} + +type GetRangeRequest struct { + RangeRowQueryCriteria *RangeRowQueryCriteria +} + +type Row struct { + PrimaryKey *PrimaryKey + Columns []*AttributeColumn +} + +type GetRangeResponse struct { + Rows []*Row + ConsumedCapacityUnit *ConsumedCapacityUnit + NextStartPrimaryKey *PrimaryKey + ResponseInfo +} + +type ListStreamRequest struct { + TableName *string +} + +type Stream struct { + Id *StreamId + TableName *string + CreationTime int64 +} + +type ListStreamResponse struct { + Streams []Stream + ResponseInfo +} + +type StreamSpecification struct { + EnableStream bool + ExpirationTime int32 // must be positive. in hours +} + +type StreamDetails struct { + EnableStream bool + StreamId *StreamId // nil when stream is disabled. + ExpirationTime int32 // in hours + LastEnableTime int64 // the last time stream is enabled, in usec +} + +type DescribeStreamRequest struct { + StreamId *StreamId // required + InclusiveStartShardId *ShardId // optional + ShardLimit *int32 // optional +} + +type DescribeStreamResponse struct { + StreamId *StreamId // required + ExpirationTime int32 // in hours + TableName *string // required + CreationTime int64 // in usec + Status StreamStatus // required + Shards []*StreamShard + NextShardId *ShardId // optional. nil means "no more shards" + ResponseInfo +} + +type GetShardIteratorRequest struct { + StreamId *StreamId // required + ShardId *ShardId // required + Timestamp *int64 + Token *string +} + +type GetShardIteratorResponse struct { + ShardIterator *ShardIterator // required + Token *string + ResponseInfo +} + +type GetStreamRecordRequest struct { + ShardIterator *ShardIterator // required + Limit *int32 // optional. max records which will reside in response +} + +type GetStreamRecordResponse struct { + Records []*StreamRecord + NextShardIterator *ShardIterator // optional. an indicator to be used to read more records in this shard + ResponseInfo +} + +type ComputeSplitPointsBySizeRequest struct { + TableName string + SplitSize int64 +} + +type ComputeSplitPointsBySizeResponse struct { + SchemaEntry []*PrimaryKeySchema + Splits []*Split + ResponseInfo +} + +type Split struct { + LowerBound *PrimaryKey + UpperBound *PrimaryKey + Location string +} + +type StreamId string +type ShardId string +type ShardIterator string +type StreamStatus int + +const ( + SS_Enabling StreamStatus = iota + SS_Active +) + +/* + * Shards are possibly splitted into two or merged from two. + * After splitting, both newly generated shards have the same FatherShard. + * After merging, the newly generated shard have both FatherShard and MotherShard. + */ +type StreamShard struct { + SelfShard *ShardId // required + FatherShard *ShardId // optional + MotherShard *ShardId // optional +} + +type StreamRecord struct { + Type ActionType + Info *RecordSequenceInfo // required + PrimaryKey *PrimaryKey // required + Columns []*RecordColumn +} + +func (this *StreamRecord) String() string { + return fmt.Sprintf( + "{\"Type\":%s, \"PrimaryKey\":%s, \"Info\":%s, \"Columns\":%s}", + this.Type, + *this.PrimaryKey, + this.Info, + this.Columns) +} + +type ActionType int + +const ( + AT_Put ActionType = iota + AT_Update + AT_Delete +) + +func (this ActionType) String() string { + switch this { + case AT_Put: + return "\"PutRow\"" + case AT_Update: + return "\"UpdateRow\"" + case AT_Delete: + return "\"DeleteRow\"" + default: + panic(fmt.Sprintf("unknown action type: %d", int(this))) + } +} + +type RecordSequenceInfo struct { + Epoch int32 + Timestamp int64 + RowIndex int32 +} + +func (this *RecordSequenceInfo) String() string { + return fmt.Sprintf( + "{\"Epoch\":%d, \"Timestamp\": %d, \"RowIndex\": %d}", + this.Epoch, + this.Timestamp, + this.RowIndex) +} + +type RecordColumn struct { + Type RecordColumnType + Name *string // required + Value interface{} // optional. present when Type is RCT_Put + Timestamp *int64 // optional, in msec. present when Type is RCT_Put or RCT_DeleteOneVersion +} + +func (this *RecordColumn) String() string { + xs := make([]string, 0) + xs = append(xs, fmt.Sprintf("\"Name\":%s", strconv.Quote(*this.Name))) + switch this.Type { + case RCT_DeleteAllVersions: + xs = append(xs, "\"Type\":\"DeleteAllVersions\"") + case RCT_DeleteOneVersion: + xs = append(xs, "\"Type\":\"DeleteOneVersion\"") + xs = append(xs, fmt.Sprintf("\"Timestamp\":%d", *this.Timestamp)) + case RCT_Put: + xs = append(xs, "\"Type\":\"Put\"") + xs = append(xs, fmt.Sprintf("\"Timestamp\":%d", *this.Timestamp)) + xs = append(xs, fmt.Sprintf("\"Value\":%s", this.Value)) + } + return fmt.Sprintf("{%s}", strings.Join(xs, ", ")) +} + +type RecordColumnType int + +const ( + RCT_Put RecordColumnType = iota + RCT_DeleteOneVersion + RCT_DeleteAllVersions +) + +type IndexMeta struct { + IndexName string + Primarykey []string + DefinedColumns []string + IndexType IndexType +} + +type DefinedColumnSchema struct { + Name string + ColumnType DefinedColumnType +} + +type IndexType int32 + +const ( + IT_GLOBAL_INDEX IndexType = 1 + IT_LOCAL_INDEX IndexType = 2 +) + +type DefinedColumnType int32 + +const ( + /** + * 64位整数。 + */ + DefinedColumn_INTEGER DefinedColumnType = 1 + + /** + * 浮点数。 + */ + DefinedColumn_DOUBLE DefinedColumnType = 2 + + /** + * 布尔值。 + */ + DefinedColumn_BOOLEAN DefinedColumnType = 3 + + /** + * 字符串。 + */ + DefinedColumn_STRING DefinedColumnType = 4 + + /** + * BINARY。 + */ + DefinedColumn_BINARY DefinedColumnType = 5 +) + +type StartLocalTransactionRequest struct { + PrimaryKey *PrimaryKey + TableName string +} + +type StartLocalTransactionResponse struct { + TransactionId *string + ResponseInfo +} + +type CommitTransactionRequest struct { + TransactionId *string +} + +type CommitTransactionResponse struct { + ResponseInfo +} + +type AbortTransactionRequest struct { + TransactionId *string +} + +type AbortTransactionResponse struct { + ResponseInfo +} \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/ots_header.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/ots_header.go new file mode 100644 index 00000000..5c302a35 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/ots_header.go @@ -0,0 +1,127 @@ +package tablestore + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/base64" + "hash" + "sort" + "strings" +) + +const ( + xOtsDate = "x-ots-date" + xOtsApiversion = "x-ots-apiversion" + xOtsAccesskeyid = "x-ots-accesskeyid" + xOtsContentmd5 = "x-ots-contentmd5" + xOtsHeaderStsToken = "x-ots-ststoken" + xOtsHeaderChargeAdmin = "x-ots-charge-for-admin" + xOtsSignature = "x-ots-signature" + xOtsRequestCompressType = "x-ots-request-compress-type" + xOtsRequestCompressSize = "x-ots-request-compress-size" + xOtsResponseCompressTye = "x-ots-response-compress-type" + xOtsPrefix = "x-ots" +) + +type otsHeader struct { + name string + value string + must bool +} + +type otsHeaders struct { + headers []*otsHeader + hmacSha1 hash.Hash +} + +func createOtsHeaders(accessKey string) *otsHeaders { + h := new(otsHeaders) + + h.headers = []*otsHeader{ + &otsHeader{name: xOtsDate, must: true}, + &otsHeader{name: xOtsApiversion, must: true}, + &otsHeader{name: xOtsAccesskeyid, must: true}, + &otsHeader{name: xOtsContentmd5, must: true}, + &otsHeader{name: xOtsInstanceName, must: true}, + &otsHeader{name: xOtsSignature, must: true}, + &otsHeader{name: xOtsRequestCompressSize, must: false}, + &otsHeader{name: xOtsResponseCompressTye, must: false}, + &otsHeader{name: xOtsRequestCompressType, must: false}, + &otsHeader{name: xOtsHeaderStsToken, must: false}, + &otsHeader{name: xOtsHeaderChargeAdmin, must: false}, + } + + sort.Sort(h) + + h.hmacSha1 = hmac.New(sha1.New, []byte(accessKey)) + return h +} + +func (h *otsHeaders) Len() int { + return len(h.headers) +} + +func (h *otsHeaders) Swap(i, j int) { + h.headers[i], h.headers[j] = h.headers[j], h.headers[i] +} + +func (h *otsHeaders) Less(i, j int) bool { + if h.headers[i].name == xOtsSignature { + return false + } + + if h.headers[j].name == xOtsSignature { + return true + } + + return h.headers[i].name < h.headers[j].name +} + +func (h *otsHeaders) search(name string) *otsHeader { + index := sort.Search(len(h.headers)-1, func(i int) bool { + return h.headers[i].name >= name + }) + + if index >= len(h.headers) { + return nil + } + + return h.headers[index] +} + +func (h *otsHeaders) set(name, value string) { + header := h.search(name) + if header == nil { + return + } + + header.value = value +} + +func (h *otsHeaders) signature(uri, method, accessKey string) (string, error) { + for _, header := range h.headers[:len(h.headers)-1] { + if header.must && header.value == "" { + return "", errMissMustHeader(header.name) + } + } + + // StringToSign = CanonicalURI + '\n' + HTTPRequestMethod + '\n' + CanonicalQueryString + '\n' + CanonicalHeaders + '\n' + // TODO CanonicalQueryString 为空 + stringToSign := uri + "\n" + method + "\n" + "\n" + + // 最后一个header 为 xOtsSignature + for _, header := range h.headers[:len(h.headers)-1] { + if header.value != "" { + stringToSign = stringToSign + header.name + ":" + strings.TrimSpace(header.value) + "\n" + } + } + + h.hmacSha1.Reset() + h.hmacSha1.Write([]byte(stringToSign)) + + // fmt.Println("stringToSign:" + stringToSign) + sign := base64.StdEncoding.EncodeToString(h.hmacSha1.Sum(nil)) + h.set(xOtsSignature, sign) + // fmt.Println("sign:" + sign) + return sign, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/build_proto.sh b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/build_proto.sh new file mode 100644 index 00000000..18cb5079 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/build_proto.sh @@ -0,0 +1 @@ +protoc --go_out=. search.proto ots_filter.proto table_store.proto diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.pb.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.pb.go new file mode 100644 index 00000000..4172901c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.pb.go @@ -0,0 +1,390 @@ +// Code generated by protoc-gen-go. +// source: ots_filter.proto +// DO NOT EDIT! + +package otsprotocol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type VariantType int32 + +const ( + VariantType_VT_INTEGER VariantType = 0 + VariantType_VT_DOUBLE VariantType = 1 + // VT_BOOLEAN = 2; + VariantType_VT_STRING VariantType = 3 + VariantType_VT_NULL VariantType = 6 + VariantType_VT_BLOB VariantType = 7 +) + +var VariantType_name = map[int32]string{ + 0: "VT_INTEGER", + 1: "VT_DOUBLE", + 3: "VT_STRING", + 6: "VT_NULL", + 7: "VT_BLOB", +} +var VariantType_value = map[string]int32{ + "VT_INTEGER": 0, + "VT_DOUBLE": 1, + "VT_STRING": 3, + "VT_NULL": 6, + "VT_BLOB": 7, +} + +func (x VariantType) Enum() *VariantType { + p := new(VariantType) + *p = x + return p +} +func (x VariantType) String() string { + return proto.EnumName(VariantType_name, int32(x)) +} +func (x *VariantType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(VariantType_value, data, "VariantType") + if err != nil { + return err + } + *x = VariantType(value) + return nil +} +func (VariantType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +type FilterType int32 + +const ( + FilterType_FT_SINGLE_COLUMN_VALUE FilterType = 1 + FilterType_FT_COMPOSITE_COLUMN_VALUE FilterType = 2 + FilterType_FT_COLUMN_PAGINATION FilterType = 3 +) + +var FilterType_name = map[int32]string{ + 1: "FT_SINGLE_COLUMN_VALUE", + 2: "FT_COMPOSITE_COLUMN_VALUE", + 3: "FT_COLUMN_PAGINATION", +} +var FilterType_value = map[string]int32{ + "FT_SINGLE_COLUMN_VALUE": 1, + "FT_COMPOSITE_COLUMN_VALUE": 2, + "FT_COLUMN_PAGINATION": 3, +} + +func (x FilterType) Enum() *FilterType { + p := new(FilterType) + *p = x + return p +} +func (x FilterType) String() string { + return proto.EnumName(FilterType_name, int32(x)) +} +func (x *FilterType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FilterType_value, data, "FilterType") + if err != nil { + return err + } + *x = FilterType(value) + return nil +} +func (FilterType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type ComparatorType int32 + +const ( + ComparatorType_CT_EQUAL ComparatorType = 1 + ComparatorType_CT_NOT_EQUAL ComparatorType = 2 + ComparatorType_CT_GREATER_THAN ComparatorType = 3 + ComparatorType_CT_GREATER_EQUAL ComparatorType = 4 + ComparatorType_CT_LESS_THAN ComparatorType = 5 + ComparatorType_CT_LESS_EQUAL ComparatorType = 6 +) + +var ComparatorType_name = map[int32]string{ + 1: "CT_EQUAL", + 2: "CT_NOT_EQUAL", + 3: "CT_GREATER_THAN", + 4: "CT_GREATER_EQUAL", + 5: "CT_LESS_THAN", + 6: "CT_LESS_EQUAL", +} +var ComparatorType_value = map[string]int32{ + "CT_EQUAL": 1, + "CT_NOT_EQUAL": 2, + "CT_GREATER_THAN": 3, + "CT_GREATER_EQUAL": 4, + "CT_LESS_THAN": 5, + "CT_LESS_EQUAL": 6, +} + +func (x ComparatorType) Enum() *ComparatorType { + p := new(ComparatorType) + *p = x + return p +} +func (x ComparatorType) String() string { + return proto.EnumName(ComparatorType_name, int32(x)) +} +func (x *ComparatorType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ComparatorType_value, data, "ComparatorType") + if err != nil { + return err + } + *x = ComparatorType(value) + return nil +} +func (ComparatorType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +type LogicalOperator int32 + +const ( + LogicalOperator_LO_NOT LogicalOperator = 1 + LogicalOperator_LO_AND LogicalOperator = 2 + LogicalOperator_LO_OR LogicalOperator = 3 +) + +var LogicalOperator_name = map[int32]string{ + 1: "LO_NOT", + 2: "LO_AND", + 3: "LO_OR", +} +var LogicalOperator_value = map[string]int32{ + "LO_NOT": 1, + "LO_AND": 2, + "LO_OR": 3, +} + +func (x LogicalOperator) Enum() *LogicalOperator { + p := new(LogicalOperator) + *p = x + return p +} +func (x LogicalOperator) String() string { + return proto.EnumName(LogicalOperator_name, int32(x)) +} +func (x *LogicalOperator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogicalOperator_value, data, "LogicalOperator") + if err != nil { + return err + } + *x = LogicalOperator(value) + return nil +} +func (LogicalOperator) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +type ValueTransferRule struct { + Regex *string `protobuf:"bytes,1,req,name=regex" json:"regex,omitempty"` + CastType *VariantType `protobuf:"varint,2,opt,name=cast_type,enum=otsprotocol.VariantType" json:"cast_type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ValueTransferRule) Reset() { *m = ValueTransferRule{} } +func (m *ValueTransferRule) String() string { return proto.CompactTextString(m) } +func (*ValueTransferRule) ProtoMessage() {} +func (*ValueTransferRule) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *ValueTransferRule) GetRegex() string { + if m != nil && m.Regex != nil { + return *m.Regex + } + return "" +} + +func (m *ValueTransferRule) GetCastType() VariantType { + if m != nil && m.CastType != nil { + return *m.CastType + } + return VariantType_VT_INTEGER +} + +type SingleColumnValueFilter struct { + Comparator *ComparatorType `protobuf:"varint,1,req,name=comparator,enum=otsprotocol.ComparatorType" json:"comparator,omitempty"` + ColumnName *string `protobuf:"bytes,2,req,name=column_name" json:"column_name,omitempty"` + ColumnValue []byte `protobuf:"bytes,3,req,name=column_value" json:"column_value,omitempty"` + FilterIfMissing *bool `protobuf:"varint,4,req,name=filter_if_missing" json:"filter_if_missing,omitempty"` + LatestVersionOnly *bool `protobuf:"varint,5,req,name=latest_version_only" json:"latest_version_only,omitempty"` + ValueTransRule *ValueTransferRule `protobuf:"bytes,6,opt,name=value_trans_rule" json:"value_trans_rule,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SingleColumnValueFilter) Reset() { *m = SingleColumnValueFilter{} } +func (m *SingleColumnValueFilter) String() string { return proto.CompactTextString(m) } +func (*SingleColumnValueFilter) ProtoMessage() {} +func (*SingleColumnValueFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *SingleColumnValueFilter) GetComparator() ComparatorType { + if m != nil && m.Comparator != nil { + return *m.Comparator + } + return ComparatorType_CT_EQUAL +} + +func (m *SingleColumnValueFilter) GetColumnName() string { + if m != nil && m.ColumnName != nil { + return *m.ColumnName + } + return "" +} + +func (m *SingleColumnValueFilter) GetColumnValue() []byte { + if m != nil { + return m.ColumnValue + } + return nil +} + +func (m *SingleColumnValueFilter) GetFilterIfMissing() bool { + if m != nil && m.FilterIfMissing != nil { + return *m.FilterIfMissing + } + return false +} + +func (m *SingleColumnValueFilter) GetLatestVersionOnly() bool { + if m != nil && m.LatestVersionOnly != nil { + return *m.LatestVersionOnly + } + return false +} + +func (m *SingleColumnValueFilter) GetValueTransRule() *ValueTransferRule { + if m != nil { + return m.ValueTransRule + } + return nil +} + +type CompositeColumnValueFilter struct { + Combinator *LogicalOperator `protobuf:"varint,1,req,name=combinator,enum=otsprotocol.LogicalOperator" json:"combinator,omitempty"` + SubFilters []*Filter `protobuf:"bytes,2,rep,name=sub_filters" json:"sub_filters,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CompositeColumnValueFilter) Reset() { *m = CompositeColumnValueFilter{} } +func (m *CompositeColumnValueFilter) String() string { return proto.CompactTextString(m) } +func (*CompositeColumnValueFilter) ProtoMessage() {} +func (*CompositeColumnValueFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *CompositeColumnValueFilter) GetCombinator() LogicalOperator { + if m != nil && m.Combinator != nil { + return *m.Combinator + } + return LogicalOperator_LO_NOT +} + +func (m *CompositeColumnValueFilter) GetSubFilters() []*Filter { + if m != nil { + return m.SubFilters + } + return nil +} + +type ColumnPaginationFilter struct { + Offset *int32 `protobuf:"varint,1,req,name=offset" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnPaginationFilter) Reset() { *m = ColumnPaginationFilter{} } +func (m *ColumnPaginationFilter) String() string { return proto.CompactTextString(m) } +func (*ColumnPaginationFilter) ProtoMessage() {} +func (*ColumnPaginationFilter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *ColumnPaginationFilter) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *ColumnPaginationFilter) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type Filter struct { + Type *FilterType `protobuf:"varint,1,req,name=type,enum=otsprotocol.FilterType" json:"type,omitempty"` + Filter []byte `protobuf:"bytes,2,req,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Filter) Reset() { *m = Filter{} } +func (m *Filter) String() string { return proto.CompactTextString(m) } +func (*Filter) ProtoMessage() {} +func (*Filter) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *Filter) GetType() FilterType { + if m != nil && m.Type != nil { + return *m.Type + } + return FilterType_FT_SINGLE_COLUMN_VALUE +} + +func (m *Filter) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +func init() { + proto.RegisterType((*ValueTransferRule)(nil), "otsprotocol.ValueTransferRule") + proto.RegisterType((*SingleColumnValueFilter)(nil), "otsprotocol.SingleColumnValueFilter") + proto.RegisterType((*CompositeColumnValueFilter)(nil), "otsprotocol.CompositeColumnValueFilter") + proto.RegisterType((*ColumnPaginationFilter)(nil), "otsprotocol.ColumnPaginationFilter") + proto.RegisterType((*Filter)(nil), "otsprotocol.Filter") + proto.RegisterEnum("otsprotocol.VariantType", VariantType_name, VariantType_value) + proto.RegisterEnum("otsprotocol.FilterType", FilterType_name, FilterType_value) + proto.RegisterEnum("otsprotocol.ComparatorType", ComparatorType_name, ComparatorType_value) + proto.RegisterEnum("otsprotocol.LogicalOperator", LogicalOperator_name, LogicalOperator_value) +} + +func init() { proto.RegisterFile("ots_filter.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 585 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x92, 0x51, 0x6b, 0xdb, 0x30, + 0x14, 0x85, 0x67, 0xa7, 0x71, 0x9b, 0xeb, 0x34, 0x55, 0x95, 0xd2, 0xba, 0xed, 0x36, 0x42, 0x60, + 0x60, 0x32, 0xe8, 0x46, 0x18, 0x6c, 0x6f, 0xc3, 0x75, 0xdd, 0x2c, 0xe0, 0xda, 0x5d, 0xa2, 0xf8, + 0x55, 0xb8, 0x41, 0x09, 0x02, 0xc7, 0x0a, 0x96, 0x52, 0xda, 0xb7, 0xfd, 0xdb, 0xfd, 0x8d, 0x61, + 0xd9, 0x1d, 0x69, 0xe8, 0x9b, 0x75, 0xef, 0xd5, 0x77, 0xee, 0xd1, 0x31, 0x20, 0xa1, 0x24, 0x5d, + 0xf0, 0x4c, 0xb1, 0xe2, 0x6a, 0x5d, 0x08, 0x25, 0xb0, 0x2d, 0x94, 0xd4, 0x5f, 0x73, 0x91, 0xf5, + 0x63, 0x38, 0x4e, 0xd2, 0x6c, 0xc3, 0x48, 0x91, 0xe6, 0x72, 0xc1, 0x8a, 0xc9, 0x26, 0x63, 0xf8, + 0x10, 0x9a, 0x05, 0x5b, 0xb2, 0x27, 0xc7, 0xe8, 0x99, 0x6e, 0x0b, 0x7f, 0x86, 0xd6, 0x3c, 0x95, + 0x8a, 0xaa, 0xe7, 0x35, 0x73, 0xcc, 0x9e, 0xe1, 0x76, 0x86, 0xce, 0xd5, 0x16, 0xe4, 0x2a, 0x49, + 0x0b, 0x9e, 0xe6, 0x8a, 0x3c, 0xaf, 0x59, 0xff, 0xaf, 0x01, 0x67, 0x53, 0x9e, 0x2f, 0x33, 0xe6, + 0x8b, 0x6c, 0xb3, 0xca, 0x35, 0xfd, 0x56, 0xeb, 0xe3, 0x2f, 0x00, 0x73, 0xb1, 0x5a, 0xa7, 0x45, + 0xaa, 0x44, 0xa1, 0xe1, 0x9d, 0xe1, 0xe5, 0x2b, 0x92, 0xff, 0xbf, 0x5d, 0xc2, 0x70, 0x17, 0xec, + 0xb9, 0xa6, 0xd0, 0x3c, 0x5d, 0x95, 0xda, 0xe5, 0x3a, 0x27, 0xd0, 0xae, 0x8b, 0x8f, 0x25, 0xdb, + 0x69, 0xf4, 0x4c, 0xb7, 0x8d, 0xcf, 0xe1, 0xb8, 0x72, 0x49, 0xf9, 0x82, 0xae, 0xb8, 0x94, 0x3c, + 0x5f, 0x3a, 0x7b, 0x3d, 0xd3, 0x3d, 0xc0, 0x97, 0xd0, 0xcd, 0x52, 0xc5, 0xa4, 0xa2, 0x8f, 0xac, + 0x90, 0x5c, 0xe4, 0x54, 0xe4, 0xd9, 0xb3, 0xd3, 0xd4, 0xcd, 0x1f, 0x80, 0x34, 0x86, 0xaa, 0xf2, + 0x05, 0x68, 0xb1, 0xc9, 0x98, 0x63, 0xf5, 0x0c, 0xd7, 0x1e, 0x7e, 0xdc, 0xf1, 0xb8, 0xf3, 0x4a, + 0xfd, 0x27, 0xb8, 0x28, 0xd7, 0x15, 0x92, 0xab, 0x37, 0xbc, 0x7e, 0xd5, 0x5e, 0x1f, 0x78, 0xbe, + 0xe5, 0xf5, 0xfd, 0x2b, 0x62, 0x28, 0x96, 0x7c, 0x9e, 0x66, 0xf1, 0x9a, 0x69, 0xc3, 0xd8, 0x05, + 0x5b, 0x6e, 0x1e, 0xea, 0xac, 0xa4, 0x63, 0xf6, 0x1a, 0xae, 0x3d, 0xec, 0xbe, 0xba, 0x52, 0xb1, + 0xfb, 0xdf, 0xe1, 0xb4, 0x12, 0xbc, 0x4f, 0x97, 0xa5, 0x00, 0x17, 0x79, 0xad, 0xda, 0x01, 0x4b, + 0x2c, 0x16, 0x92, 0x29, 0xad, 0xd8, 0x2c, 0x93, 0xcc, 0xf8, 0x8a, 0x2b, 0xfd, 0x74, 0xcd, 0xfe, + 0x4f, 0xb0, 0xea, 0xc1, 0x4f, 0xb0, 0xa7, 0xe3, 0xac, 0x16, 0x3b, 0x7b, 0x43, 0x45, 0x07, 0xd0, + 0x01, 0xab, 0xda, 0x47, 0x03, 0xda, 0x83, 0x19, 0xd8, 0x5b, 0x61, 0xe3, 0x0e, 0x40, 0x42, 0xe8, + 0x38, 0x22, 0xc1, 0x28, 0x98, 0xa0, 0x77, 0xf8, 0x10, 0x5a, 0x09, 0xa1, 0x37, 0xf1, 0xec, 0x3a, + 0x0c, 0x90, 0x51, 0x1f, 0xa7, 0x64, 0x32, 0x8e, 0x46, 0xa8, 0x81, 0x6d, 0xd8, 0x4f, 0x08, 0x8d, + 0x66, 0x61, 0x88, 0xac, 0xfa, 0x70, 0x1d, 0xc6, 0xd7, 0x68, 0x7f, 0x90, 0x02, 0x6c, 0x89, 0x5e, + 0xc0, 0xe9, 0x2d, 0xa1, 0xd3, 0x71, 0x34, 0x0a, 0x03, 0xea, 0xc7, 0xe1, 0xec, 0x2e, 0xa2, 0x89, + 0x17, 0xce, 0x4a, 0xe4, 0x07, 0x38, 0xbf, 0x25, 0xd4, 0x8f, 0xef, 0xee, 0xe3, 0xe9, 0x98, 0xec, + 0xb4, 0x4d, 0xec, 0xc0, 0x89, 0x6e, 0xeb, 0xe2, 0xbd, 0x37, 0x1a, 0x47, 0x1e, 0x19, 0xc7, 0x11, + 0x6a, 0x0c, 0xfe, 0x18, 0xd0, 0xd9, 0xf9, 0xbb, 0xda, 0x70, 0xe0, 0x13, 0x1a, 0xfc, 0x9e, 0x79, + 0x21, 0x32, 0x30, 0x82, 0xb6, 0x4f, 0x68, 0x14, 0xbf, 0x54, 0x4c, 0xdc, 0x85, 0x23, 0x9f, 0xd0, + 0xd1, 0x24, 0xf0, 0x48, 0x30, 0xa1, 0xe4, 0x97, 0x17, 0xa1, 0x06, 0x3e, 0x01, 0xb4, 0x55, 0xac, + 0x46, 0xf7, 0xea, 0xcb, 0x61, 0x30, 0x9d, 0x56, 0x73, 0x4d, 0x7c, 0x0c, 0x87, 0x2f, 0x95, 0x6a, + 0xc8, 0x1a, 0x7c, 0x83, 0xa3, 0xdd, 0xcc, 0x01, 0xac, 0x30, 0x2e, 0x45, 0x91, 0x51, 0x7f, 0x7b, + 0xd1, 0x0d, 0x32, 0x71, 0x0b, 0x9a, 0x61, 0x4c, 0xe3, 0x09, 0x6a, 0xfc, 0x0b, 0x00, 0x00, 0xff, + 0xff, 0xb3, 0x10, 0x19, 0xa7, 0xc1, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.proto b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.proto new file mode 100644 index 00000000..172d6b47 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/ots_filter.proto @@ -0,0 +1,61 @@ +syntax = "proto2"; +package otsprotocol; + +enum VariantType { + VT_INTEGER = 0; + VT_DOUBLE = 1; + //VT_BOOLEAN = 2; + VT_STRING = 3; + VT_NULL = 6; + VT_BLOB = 7; +} + +message ValueTransferRule { + required string regex = 1; + optional VariantType cast_type = 2; +} + +enum FilterType { + FT_SINGLE_COLUMN_VALUE = 1; + FT_COMPOSITE_COLUMN_VALUE = 2; + FT_COLUMN_PAGINATION = 3; +} + +enum ComparatorType { + CT_EQUAL = 1; + CT_NOT_EQUAL = 2; + CT_GREATER_THAN = 3; + CT_GREATER_EQUAL = 4; + CT_LESS_THAN = 5; + CT_LESS_EQUAL = 6; +} + +message SingleColumnValueFilter { + required ComparatorType comparator = 1; + required string column_name = 2; + required bytes column_value = 3; // Serialized SQLVariant + required bool filter_if_missing = 4; + required bool latest_version_only = 5; + optional ValueTransferRule value_trans_rule = 6; +} + +enum LogicalOperator { + LO_NOT = 1; + LO_AND = 2; + LO_OR = 3; +} + +message CompositeColumnValueFilter { + required LogicalOperator combinator = 1; + repeated Filter sub_filters = 2; +} + +message ColumnPaginationFilter { + required int32 offset = 1; + required int32 limit = 2; +} + +message Filter { + required FilterType type = 1; + required bytes filter = 2; // Serialized string of filter of the type +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.pb.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.pb.go new file mode 100644 index 00000000..b7892ff0 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.pb.go @@ -0,0 +1,2020 @@ +// Code generated by protoc-gen-go. +// source: search.proto +// DO NOT EDIT! + +/* +Package otsprotocol is a generated protocol buffer package. + +It is generated from these files: + search.proto + ots_filter.proto + table_store.proto + +It has these top-level messages: + MatchQuery + MatchPhraseQuery + MatchAllQuery + TermQuery + TermsQuery + RangeQuery + PrefixQuery + WildcardQuery + BoolQuery + ConstScoreQuery + FieldValueFactor + FunctionScoreQuery + NestedQuery + GeoBoundingBoxQuery + GeoDistanceQuery + GeoPolygonQuery + ExistsQuery + Query + Collapse + NestedFilter + ScoreSort + FieldSort + GeoDistanceSort + PrimaryKeySort + Sorter + Sort + SearchQuery + ColumnsToGet + SearchRequest + SearchResponse + FieldSchema + IndexSchema + IndexSetting + CreateSearchIndexRequest + CreateSearchIndexResponse + IndexInfo + ListSearchIndexRequest + ListSearchIndexResponse + DeleteSearchIndexRequest + DeleteSearchIndexResponse + SyncStat + DescribeSearchIndexRequest + DescribeSearchIndexResponse + ValueTransferRule + SingleColumnValueFilter + CompositeColumnValueFilter + ColumnPaginationFilter + Filter + Error + PrimaryKeySchema + PartitionRange + TableOptions + TableMeta + Condition + CapacityUnit + ReservedThroughputDetails + ReservedThroughput + ConsumedCapacity + StreamSpecification + StreamDetails + CreateTableRequest + CreateTableResponse + UpdateTableRequest + UpdateTableResponse + DescribeTableRequest + DescribeTableResponse + ListTableRequest + ListTableResponse + DeleteTableRequest + DeleteTableResponse + LoadTableRequest + LoadTableResponse + UnloadTableRequest + UnloadTableResponse + TimeRange + ReturnContent + GetRowRequest + GetRowResponse + UpdateRowRequest + UpdateRowResponse + PutRowRequest + PutRowResponse + DeleteRowRequest + DeleteRowResponse + TableInBatchGetRowRequest + BatchGetRowRequest + RowInBatchGetRowResponse + TableInBatchGetRowResponse + BatchGetRowResponse + RowInBatchWriteRowRequest + TableInBatchWriteRowRequest + BatchWriteRowRequest + RowInBatchWriteRowResponse + TableInBatchWriteRowResponse + BatchWriteRowResponse + GetRangeRequest + GetRangeResponse + ListStreamRequest + Stream + ListStreamResponse + StreamShard + DescribeStreamRequest + DescribeStreamResponse + GetShardIteratorRequest + GetShardIteratorResponse + GetStreamRecordRequest + GetStreamRecordResponse + ComputeSplitPointsBySizeRequest + ComputeSplitPointsBySizeResponse + DefinedColumnSchema + IndexMeta + CreateIndexRequest + CreateIndexResponse + DropIndexRequest + DropIndexResponse + StartLocalTransactionRequest + StartLocalTransactionResponse + CommitTransactionRequest + CommitTransactionResponse + AbortTransactionRequest + AbortTransactionResponse +*/ +package otsprotocol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type QueryType int32 + +const ( + QueryType_MATCH_QUERY QueryType = 1 + QueryType_MATCH_PHRASE_QUERY QueryType = 2 + QueryType_TERM_QUERY QueryType = 3 + QueryType_RANGE_QUERY QueryType = 4 + QueryType_PREFIX_QUERY QueryType = 5 + QueryType_BOOL_QUERY QueryType = 6 + QueryType_CONST_SCORE_QUERY QueryType = 7 + QueryType_FUNCTION_SCORE_QUERY QueryType = 8 + QueryType_NESTED_QUERY QueryType = 9 + QueryType_WILDCARD_QUERY QueryType = 10 + QueryType_MATCH_ALL_QUERY QueryType = 11 + QueryType_GEO_BOUNDING_BOX_QUERY QueryType = 12 + QueryType_GEO_DISTANCE_QUERY QueryType = 13 + QueryType_GEO_POLYGON_QUERY QueryType = 14 + QueryType_TERMS_QUERY QueryType = 15 + QueryType_EXISTS_QUERY QueryType = 16 +) + +var QueryType_name = map[int32]string{ + 1: "MATCH_QUERY", + 2: "MATCH_PHRASE_QUERY", + 3: "TERM_QUERY", + 4: "RANGE_QUERY", + 5: "PREFIX_QUERY", + 6: "BOOL_QUERY", + 7: "CONST_SCORE_QUERY", + 8: "FUNCTION_SCORE_QUERY", + 9: "NESTED_QUERY", + 10: "WILDCARD_QUERY", + 11: "MATCH_ALL_QUERY", + 12: "GEO_BOUNDING_BOX_QUERY", + 13: "GEO_DISTANCE_QUERY", + 14: "GEO_POLYGON_QUERY", + 15: "TERMS_QUERY", + 16: "EXISTS_QUERY", +} +var QueryType_value = map[string]int32{ + "MATCH_QUERY": 1, + "MATCH_PHRASE_QUERY": 2, + "TERM_QUERY": 3, + "RANGE_QUERY": 4, + "PREFIX_QUERY": 5, + "BOOL_QUERY": 6, + "CONST_SCORE_QUERY": 7, + "FUNCTION_SCORE_QUERY": 8, + "NESTED_QUERY": 9, + "WILDCARD_QUERY": 10, + "MATCH_ALL_QUERY": 11, + "GEO_BOUNDING_BOX_QUERY": 12, + "GEO_DISTANCE_QUERY": 13, + "GEO_POLYGON_QUERY": 14, + "TERMS_QUERY": 15, + "EXISTS_QUERY": 16, +} + +func (x QueryType) Enum() *QueryType { + p := new(QueryType) + *p = x + return p +} +func (x QueryType) String() string { + return proto.EnumName(QueryType_name, int32(x)) +} +func (x *QueryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QueryType_value, data, "QueryType") + if err != nil { + return err + } + *x = QueryType(value) + return nil +} +func (QueryType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type QueryOperator int32 + +const ( + QueryOperator_OR QueryOperator = 1 + QueryOperator_AND QueryOperator = 2 +) + +var QueryOperator_name = map[int32]string{ + 1: "OR", + 2: "AND", +} +var QueryOperator_value = map[string]int32{ + "OR": 1, + "AND": 2, +} + +func (x QueryOperator) Enum() *QueryOperator { + p := new(QueryOperator) + *p = x + return p +} +func (x QueryOperator) String() string { + return proto.EnumName(QueryOperator_name, int32(x)) +} +func (x *QueryOperator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(QueryOperator_value, data, "QueryOperator") + if err != nil { + return err + } + *x = QueryOperator(value) + return nil +} +func (QueryOperator) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +type ScoreMode int32 + +const ( + ScoreMode_SCORE_MODE_NONE ScoreMode = 1 + ScoreMode_SCORE_MODE_AVG ScoreMode = 2 + ScoreMode_SCORE_MODE_MAX ScoreMode = 3 + ScoreMode_SCORE_MODE_TOTAL ScoreMode = 4 + ScoreMode_SCORE_MODE_MIN ScoreMode = 5 +) + +var ScoreMode_name = map[int32]string{ + 1: "SCORE_MODE_NONE", + 2: "SCORE_MODE_AVG", + 3: "SCORE_MODE_MAX", + 4: "SCORE_MODE_TOTAL", + 5: "SCORE_MODE_MIN", +} +var ScoreMode_value = map[string]int32{ + "SCORE_MODE_NONE": 1, + "SCORE_MODE_AVG": 2, + "SCORE_MODE_MAX": 3, + "SCORE_MODE_TOTAL": 4, + "SCORE_MODE_MIN": 5, +} + +func (x ScoreMode) Enum() *ScoreMode { + p := new(ScoreMode) + *p = x + return p +} +func (x ScoreMode) String() string { + return proto.EnumName(ScoreMode_name, int32(x)) +} +func (x *ScoreMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ScoreMode_value, data, "ScoreMode") + if err != nil { + return err + } + *x = ScoreMode(value) + return nil +} +func (ScoreMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type SortOrder int32 + +const ( + SortOrder_SORT_ORDER_ASC SortOrder = 0 + SortOrder_SORT_ORDER_DESC SortOrder = 1 +) + +var SortOrder_name = map[int32]string{ + 0: "SORT_ORDER_ASC", + 1: "SORT_ORDER_DESC", +} +var SortOrder_value = map[string]int32{ + "SORT_ORDER_ASC": 0, + "SORT_ORDER_DESC": 1, +} + +func (x SortOrder) Enum() *SortOrder { + p := new(SortOrder) + *p = x + return p +} +func (x SortOrder) String() string { + return proto.EnumName(SortOrder_name, int32(x)) +} +func (x *SortOrder) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SortOrder_value, data, "SortOrder") + if err != nil { + return err + } + *x = SortOrder(value) + return nil +} +func (SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +type SortMode int32 + +const ( + SortMode_SORT_MODE_MIN SortMode = 0 + SortMode_SORT_MODE_MAX SortMode = 1 + SortMode_SORT_MODE_AVG SortMode = 2 +) + +var SortMode_name = map[int32]string{ + 0: "SORT_MODE_MIN", + 1: "SORT_MODE_MAX", + 2: "SORT_MODE_AVG", +} +var SortMode_value = map[string]int32{ + "SORT_MODE_MIN": 0, + "SORT_MODE_MAX": 1, + "SORT_MODE_AVG": 2, +} + +func (x SortMode) Enum() *SortMode { + p := new(SortMode) + *p = x + return p +} +func (x SortMode) String() string { + return proto.EnumName(SortMode_name, int32(x)) +} +func (x *SortMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SortMode_value, data, "SortMode") + if err != nil { + return err + } + *x = SortMode(value) + return nil +} +func (SortMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type GeoDistanceType int32 + +const ( + GeoDistanceType_GEO_DISTANCE_ARC GeoDistanceType = 0 + GeoDistanceType_GEO_DISTANCE_PLANE GeoDistanceType = 1 +) + +var GeoDistanceType_name = map[int32]string{ + 0: "GEO_DISTANCE_ARC", + 1: "GEO_DISTANCE_PLANE", +} +var GeoDistanceType_value = map[string]int32{ + "GEO_DISTANCE_ARC": 0, + "GEO_DISTANCE_PLANE": 1, +} + +func (x GeoDistanceType) Enum() *GeoDistanceType { + p := new(GeoDistanceType) + *p = x + return p +} +func (x GeoDistanceType) String() string { + return proto.EnumName(GeoDistanceType_name, int32(x)) +} +func (x *GeoDistanceType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(GeoDistanceType_value, data, "GeoDistanceType") + if err != nil { + return err + } + *x = GeoDistanceType(value) + return nil +} +func (GeoDistanceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type ColumnReturnType int32 + +const ( + ColumnReturnType_RETURN_ALL ColumnReturnType = 1 + ColumnReturnType_RETURN_SPECIFIED ColumnReturnType = 2 + ColumnReturnType_RETURN_NONE ColumnReturnType = 3 +) + +var ColumnReturnType_name = map[int32]string{ + 1: "RETURN_ALL", + 2: "RETURN_SPECIFIED", + 3: "RETURN_NONE", +} +var ColumnReturnType_value = map[string]int32{ + "RETURN_ALL": 1, + "RETURN_SPECIFIED": 2, + "RETURN_NONE": 3, +} + +func (x ColumnReturnType) Enum() *ColumnReturnType { + p := new(ColumnReturnType) + *p = x + return p +} +func (x ColumnReturnType) String() string { + return proto.EnumName(ColumnReturnType_name, int32(x)) +} +func (x *ColumnReturnType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ColumnReturnType_value, data, "ColumnReturnType") + if err != nil { + return err + } + *x = ColumnReturnType(value) + return nil +} +func (ColumnReturnType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type IndexOptions int32 + +const ( + IndexOptions_DOCS IndexOptions = 1 + IndexOptions_FREQS IndexOptions = 2 + IndexOptions_POSITIONS IndexOptions = 3 + IndexOptions_OFFSETS IndexOptions = 4 +) + +var IndexOptions_name = map[int32]string{ + 1: "DOCS", + 2: "FREQS", + 3: "POSITIONS", + 4: "OFFSETS", +} +var IndexOptions_value = map[string]int32{ + "DOCS": 1, + "FREQS": 2, + "POSITIONS": 3, + "OFFSETS": 4, +} + +func (x IndexOptions) Enum() *IndexOptions { + p := new(IndexOptions) + *p = x + return p +} +func (x IndexOptions) String() string { + return proto.EnumName(IndexOptions_name, int32(x)) +} +func (x *IndexOptions) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexOptions_value, data, "IndexOptions") + if err != nil { + return err + } + *x = IndexOptions(value) + return nil +} +func (IndexOptions) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type FieldType int32 + +const ( + FieldType_LONG FieldType = 1 + FieldType_DOUBLE FieldType = 2 + FieldType_BOOLEAN FieldType = 3 + FieldType_KEYWORD FieldType = 4 + FieldType_TEXT FieldType = 5 + FieldType_NESTED FieldType = 6 + FieldType_GEO_POINT FieldType = 7 +) + +var FieldType_name = map[int32]string{ + 1: "LONG", + 2: "DOUBLE", + 3: "BOOLEAN", + 4: "KEYWORD", + 5: "TEXT", + 6: "NESTED", + 7: "GEO_POINT", +} +var FieldType_value = map[string]int32{ + "LONG": 1, + "DOUBLE": 2, + "BOOLEAN": 3, + "KEYWORD": 4, + "TEXT": 5, + "NESTED": 6, + "GEO_POINT": 7, +} + +func (x FieldType) Enum() *FieldType { + p := new(FieldType) + *p = x + return p +} +func (x FieldType) String() string { + return proto.EnumName(FieldType_name, int32(x)) +} +func (x *FieldType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldType_value, data, "FieldType") + if err != nil { + return err + } + *x = FieldType(value) + return nil +} +func (FieldType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +type SyncPhase int32 + +const ( + SyncPhase_FULL SyncPhase = 1 + SyncPhase_INCR SyncPhase = 2 +) + +var SyncPhase_name = map[int32]string{ + 1: "FULL", + 2: "INCR", +} +var SyncPhase_value = map[string]int32{ + "FULL": 1, + "INCR": 2, +} + +func (x SyncPhase) Enum() *SyncPhase { + p := new(SyncPhase) + *p = x + return p +} +func (x SyncPhase) String() string { + return proto.EnumName(SyncPhase_name, int32(x)) +} +func (x *SyncPhase) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(SyncPhase_value, data, "SyncPhase") + if err != nil { + return err + } + *x = SyncPhase(value) + return nil +} +func (SyncPhase) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type MatchQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Text *string `protobuf:"bytes,2,opt,name=text" json:"text,omitempty"` + MinimumShouldMatch *int32 `protobuf:"varint,3,opt,name=minimum_should_match" json:"minimum_should_match,omitempty"` + Operator *QueryOperator `protobuf:"varint,4,opt,name=operator,enum=otsprotocol.QueryOperator" json:"operator,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MatchQuery) Reset() { *m = MatchQuery{} } +func (m *MatchQuery) String() string { return proto.CompactTextString(m) } +func (*MatchQuery) ProtoMessage() {} +func (*MatchQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *MatchQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *MatchQuery) GetText() string { + if m != nil && m.Text != nil { + return *m.Text + } + return "" +} + +func (m *MatchQuery) GetMinimumShouldMatch() int32 { + if m != nil && m.MinimumShouldMatch != nil { + return *m.MinimumShouldMatch + } + return 0 +} + +func (m *MatchQuery) GetOperator() QueryOperator { + if m != nil && m.Operator != nil { + return *m.Operator + } + return QueryOperator_OR +} + +type MatchPhraseQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Text *string `protobuf:"bytes,2,opt,name=text" json:"text,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MatchPhraseQuery) Reset() { *m = MatchPhraseQuery{} } +func (m *MatchPhraseQuery) String() string { return proto.CompactTextString(m) } +func (*MatchPhraseQuery) ProtoMessage() {} +func (*MatchPhraseQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *MatchPhraseQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *MatchPhraseQuery) GetText() string { + if m != nil && m.Text != nil { + return *m.Text + } + return "" +} + +type MatchAllQuery struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *MatchAllQuery) Reset() { *m = MatchAllQuery{} } +func (m *MatchAllQuery) String() string { return proto.CompactTextString(m) } +func (*MatchAllQuery) ProtoMessage() {} +func (*MatchAllQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type TermQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Term []byte `protobuf:"bytes,2,opt,name=term" json:"term,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TermQuery) Reset() { *m = TermQuery{} } +func (m *TermQuery) String() string { return proto.CompactTextString(m) } +func (*TermQuery) ProtoMessage() {} +func (*TermQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *TermQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *TermQuery) GetTerm() []byte { + if m != nil { + return m.Term + } + return nil +} + +type TermsQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Terms [][]byte `protobuf:"bytes,2,rep,name=terms" json:"terms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TermsQuery) Reset() { *m = TermsQuery{} } +func (m *TermsQuery) String() string { return proto.CompactTextString(m) } +func (*TermsQuery) ProtoMessage() {} +func (*TermsQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *TermsQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *TermsQuery) GetTerms() [][]byte { + if m != nil { + return m.Terms + } + return nil +} + +type RangeQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + RangeFrom []byte `protobuf:"bytes,2,opt,name=range_from" json:"range_from,omitempty"` + RangeTo []byte `protobuf:"bytes,3,opt,name=range_to" json:"range_to,omitempty"` + IncludeLower *bool `protobuf:"varint,4,opt,name=include_lower" json:"include_lower,omitempty"` + IncludeUpper *bool `protobuf:"varint,5,opt,name=include_upper" json:"include_upper,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RangeQuery) Reset() { *m = RangeQuery{} } +func (m *RangeQuery) String() string { return proto.CompactTextString(m) } +func (*RangeQuery) ProtoMessage() {} +func (*RangeQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *RangeQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *RangeQuery) GetRangeFrom() []byte { + if m != nil { + return m.RangeFrom + } + return nil +} + +func (m *RangeQuery) GetRangeTo() []byte { + if m != nil { + return m.RangeTo + } + return nil +} + +func (m *RangeQuery) GetIncludeLower() bool { + if m != nil && m.IncludeLower != nil { + return *m.IncludeLower + } + return false +} + +func (m *RangeQuery) GetIncludeUpper() bool { + if m != nil && m.IncludeUpper != nil { + return *m.IncludeUpper + } + return false +} + +type PrefixQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Prefix *string `protobuf:"bytes,2,opt,name=prefix" json:"prefix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PrefixQuery) Reset() { *m = PrefixQuery{} } +func (m *PrefixQuery) String() string { return proto.CompactTextString(m) } +func (*PrefixQuery) ProtoMessage() {} +func (*PrefixQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *PrefixQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *PrefixQuery) GetPrefix() string { + if m != nil && m.Prefix != nil { + return *m.Prefix + } + return "" +} + +type WildcardQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WildcardQuery) Reset() { *m = WildcardQuery{} } +func (m *WildcardQuery) String() string { return proto.CompactTextString(m) } +func (*WildcardQuery) ProtoMessage() {} +func (*WildcardQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *WildcardQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *WildcardQuery) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type BoolQuery struct { + MustQueries []*Query `protobuf:"bytes,1,rep,name=must_queries" json:"must_queries,omitempty"` + MustNotQueries []*Query `protobuf:"bytes,2,rep,name=must_not_queries" json:"must_not_queries,omitempty"` + FilterQueries []*Query `protobuf:"bytes,3,rep,name=filter_queries" json:"filter_queries,omitempty"` + ShouldQueries []*Query `protobuf:"bytes,4,rep,name=should_queries" json:"should_queries,omitempty"` + MinimumShouldMatch *int32 `protobuf:"varint,5,opt,name=minimum_should_match" json:"minimum_should_match,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BoolQuery) Reset() { *m = BoolQuery{} } +func (m *BoolQuery) String() string { return proto.CompactTextString(m) } +func (*BoolQuery) ProtoMessage() {} +func (*BoolQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *BoolQuery) GetMustQueries() []*Query { + if m != nil { + return m.MustQueries + } + return nil +} + +func (m *BoolQuery) GetMustNotQueries() []*Query { + if m != nil { + return m.MustNotQueries + } + return nil +} + +func (m *BoolQuery) GetFilterQueries() []*Query { + if m != nil { + return m.FilterQueries + } + return nil +} + +func (m *BoolQuery) GetShouldQueries() []*Query { + if m != nil { + return m.ShouldQueries + } + return nil +} + +func (m *BoolQuery) GetMinimumShouldMatch() int32 { + if m != nil && m.MinimumShouldMatch != nil { + return *m.MinimumShouldMatch + } + return 0 +} + +type ConstScoreQuery struct { + Filter *Query `protobuf:"bytes,1,opt,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConstScoreQuery) Reset() { *m = ConstScoreQuery{} } +func (m *ConstScoreQuery) String() string { return proto.CompactTextString(m) } +func (*ConstScoreQuery) ProtoMessage() {} +func (*ConstScoreQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ConstScoreQuery) GetFilter() *Query { + if m != nil { + return m.Filter + } + return nil +} + +type FieldValueFactor struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldValueFactor) Reset() { *m = FieldValueFactor{} } +func (m *FieldValueFactor) String() string { return proto.CompactTextString(m) } +func (*FieldValueFactor) ProtoMessage() {} +func (*FieldValueFactor) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *FieldValueFactor) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +type FunctionScoreQuery struct { + Query *Query `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` + FieldValueFactor *FieldValueFactor `protobuf:"bytes,2,opt,name=field_value_factor" json:"field_value_factor,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FunctionScoreQuery) Reset() { *m = FunctionScoreQuery{} } +func (m *FunctionScoreQuery) String() string { return proto.CompactTextString(m) } +func (*FunctionScoreQuery) ProtoMessage() {} +func (*FunctionScoreQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FunctionScoreQuery) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *FunctionScoreQuery) GetFieldValueFactor() *FieldValueFactor { + if m != nil { + return m.FieldValueFactor + } + return nil +} + +type NestedQuery struct { + Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Query *Query `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` + ScoreMode *ScoreMode `protobuf:"varint,3,opt,name=score_mode,enum=otsprotocol.ScoreMode" json:"score_mode,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NestedQuery) Reset() { *m = NestedQuery{} } +func (m *NestedQuery) String() string { return proto.CompactTextString(m) } +func (*NestedQuery) ProtoMessage() {} +func (*NestedQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *NestedQuery) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +func (m *NestedQuery) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *NestedQuery) GetScoreMode() ScoreMode { + if m != nil && m.ScoreMode != nil { + return *m.ScoreMode + } + return ScoreMode_SCORE_MODE_NONE +} + +type GeoBoundingBoxQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + TopLeft *string `protobuf:"bytes,2,opt,name=top_left" json:"top_left,omitempty"` + BottomRight *string `protobuf:"bytes,3,opt,name=bottom_right" json:"bottom_right,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeoBoundingBoxQuery) Reset() { *m = GeoBoundingBoxQuery{} } +func (m *GeoBoundingBoxQuery) String() string { return proto.CompactTextString(m) } +func (*GeoBoundingBoxQuery) ProtoMessage() {} +func (*GeoBoundingBoxQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *GeoBoundingBoxQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *GeoBoundingBoxQuery) GetTopLeft() string { + if m != nil && m.TopLeft != nil { + return *m.TopLeft + } + return "" +} + +func (m *GeoBoundingBoxQuery) GetBottomRight() string { + if m != nil && m.BottomRight != nil { + return *m.BottomRight + } + return "" +} + +type GeoDistanceQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + CenterPoint *string `protobuf:"bytes,2,opt,name=center_point" json:"center_point,omitempty"` + Distance *float64 `protobuf:"fixed64,3,opt,name=distance" json:"distance,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeoDistanceQuery) Reset() { *m = GeoDistanceQuery{} } +func (m *GeoDistanceQuery) String() string { return proto.CompactTextString(m) } +func (*GeoDistanceQuery) ProtoMessage() {} +func (*GeoDistanceQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *GeoDistanceQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *GeoDistanceQuery) GetCenterPoint() string { + if m != nil && m.CenterPoint != nil { + return *m.CenterPoint + } + return "" +} + +func (m *GeoDistanceQuery) GetDistance() float64 { + if m != nil && m.Distance != nil { + return *m.Distance + } + return 0 +} + +type GeoPolygonQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Points []string `protobuf:"bytes,2,rep,name=points" json:"points,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeoPolygonQuery) Reset() { *m = GeoPolygonQuery{} } +func (m *GeoPolygonQuery) String() string { return proto.CompactTextString(m) } +func (*GeoPolygonQuery) ProtoMessage() {} +func (*GeoPolygonQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *GeoPolygonQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *GeoPolygonQuery) GetPoints() []string { + if m != nil { + return m.Points + } + return nil +} + +type ExistsQuery struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExistsQuery) Reset() { *m = ExistsQuery{} } +func (m *ExistsQuery) String() string { return proto.CompactTextString(m) } +func (*ExistsQuery) ProtoMessage() {} +func (*ExistsQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *ExistsQuery) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +type Query struct { + Type *QueryType `protobuf:"varint,1,opt,name=type,enum=otsprotocol.QueryType" json:"type,omitempty"` + Query []byte `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *Query) GetType() QueryType { + if m != nil && m.Type != nil { + return *m.Type + } + return QueryType_MATCH_QUERY +} + +func (m *Query) GetQuery() []byte { + if m != nil { + return m.Query + } + return nil +} + +type Collapse struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Collapse) Reset() { *m = Collapse{} } +func (m *Collapse) String() string { return proto.CompactTextString(m) } +func (*Collapse) ProtoMessage() {} +func (*Collapse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *Collapse) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +type NestedFilter struct { + Path *string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Filter *Query `protobuf:"bytes,2,opt,name=filter" json:"filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *NestedFilter) Reset() { *m = NestedFilter{} } +func (m *NestedFilter) String() string { return proto.CompactTextString(m) } +func (*NestedFilter) ProtoMessage() {} +func (*NestedFilter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *NestedFilter) GetPath() string { + if m != nil && m.Path != nil { + return *m.Path + } + return "" +} + +func (m *NestedFilter) GetFilter() *Query { + if m != nil { + return m.Filter + } + return nil +} + +type ScoreSort struct { + Order *SortOrder `protobuf:"varint,1,opt,name=order,enum=otsprotocol.SortOrder" json:"order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ScoreSort) Reset() { *m = ScoreSort{} } +func (m *ScoreSort) String() string { return proto.CompactTextString(m) } +func (*ScoreSort) ProtoMessage() {} +func (*ScoreSort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *ScoreSort) GetOrder() SortOrder { + if m != nil && m.Order != nil { + return *m.Order + } + return SortOrder_SORT_ORDER_ASC +} + +type FieldSort struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Order *SortOrder `protobuf:"varint,2,opt,name=order,enum=otsprotocol.SortOrder" json:"order,omitempty"` + Mode *SortMode `protobuf:"varint,3,opt,name=mode,enum=otsprotocol.SortMode" json:"mode,omitempty"` + NestedFilter *NestedFilter `protobuf:"bytes,4,opt,name=nested_filter" json:"nested_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldSort) Reset() { *m = FieldSort{} } +func (m *FieldSort) String() string { return proto.CompactTextString(m) } +func (*FieldSort) ProtoMessage() {} +func (*FieldSort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *FieldSort) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *FieldSort) GetOrder() SortOrder { + if m != nil && m.Order != nil { + return *m.Order + } + return SortOrder_SORT_ORDER_ASC +} + +func (m *FieldSort) GetMode() SortMode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return SortMode_SORT_MODE_MIN +} + +func (m *FieldSort) GetNestedFilter() *NestedFilter { + if m != nil { + return m.NestedFilter + } + return nil +} + +type GeoDistanceSort struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + Points []string `protobuf:"bytes,2,rep,name=points" json:"points,omitempty"` + Order *SortOrder `protobuf:"varint,3,opt,name=order,enum=otsprotocol.SortOrder" json:"order,omitempty"` + Mode *SortMode `protobuf:"varint,4,opt,name=mode,enum=otsprotocol.SortMode" json:"mode,omitempty"` + DistanceType *GeoDistanceType `protobuf:"varint,5,opt,name=distance_type,enum=otsprotocol.GeoDistanceType" json:"distance_type,omitempty"` + NestedFilter *NestedFilter `protobuf:"bytes,6,opt,name=nested_filter" json:"nested_filter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeoDistanceSort) Reset() { *m = GeoDistanceSort{} } +func (m *GeoDistanceSort) String() string { return proto.CompactTextString(m) } +func (*GeoDistanceSort) ProtoMessage() {} +func (*GeoDistanceSort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *GeoDistanceSort) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *GeoDistanceSort) GetPoints() []string { + if m != nil { + return m.Points + } + return nil +} + +func (m *GeoDistanceSort) GetOrder() SortOrder { + if m != nil && m.Order != nil { + return *m.Order + } + return SortOrder_SORT_ORDER_ASC +} + +func (m *GeoDistanceSort) GetMode() SortMode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return SortMode_SORT_MODE_MIN +} + +func (m *GeoDistanceSort) GetDistanceType() GeoDistanceType { + if m != nil && m.DistanceType != nil { + return *m.DistanceType + } + return GeoDistanceType_GEO_DISTANCE_ARC +} + +func (m *GeoDistanceSort) GetNestedFilter() *NestedFilter { + if m != nil { + return m.NestedFilter + } + return nil +} + +type PrimaryKeySort struct { + Order *SortOrder `protobuf:"varint,1,opt,name=order,enum=otsprotocol.SortOrder" json:"order,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PrimaryKeySort) Reset() { *m = PrimaryKeySort{} } +func (m *PrimaryKeySort) String() string { return proto.CompactTextString(m) } +func (*PrimaryKeySort) ProtoMessage() {} +func (*PrimaryKeySort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *PrimaryKeySort) GetOrder() SortOrder { + if m != nil && m.Order != nil { + return *m.Order + } + return SortOrder_SORT_ORDER_ASC +} + +type Sorter struct { + FieldSort *FieldSort `protobuf:"bytes,1,opt,name=field_sort" json:"field_sort,omitempty"` + GeoDistanceSort *GeoDistanceSort `protobuf:"bytes,2,opt,name=geo_distance_sort" json:"geo_distance_sort,omitempty"` + ScoreSort *ScoreSort `protobuf:"bytes,3,opt,name=score_sort" json:"score_sort,omitempty"` + PkSort *PrimaryKeySort `protobuf:"bytes,4,opt,name=pk_sort" json:"pk_sort,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Sorter) Reset() { *m = Sorter{} } +func (m *Sorter) String() string { return proto.CompactTextString(m) } +func (*Sorter) ProtoMessage() {} +func (*Sorter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *Sorter) GetFieldSort() *FieldSort { + if m != nil { + return m.FieldSort + } + return nil +} + +func (m *Sorter) GetGeoDistanceSort() *GeoDistanceSort { + if m != nil { + return m.GeoDistanceSort + } + return nil +} + +func (m *Sorter) GetScoreSort() *ScoreSort { + if m != nil { + return m.ScoreSort + } + return nil +} + +func (m *Sorter) GetPkSort() *PrimaryKeySort { + if m != nil { + return m.PkSort + } + return nil +} + +type Sort struct { + Sorter []*Sorter `protobuf:"bytes,1,rep,name=sorter" json:"sorter,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Sort) Reset() { *m = Sort{} } +func (m *Sort) String() string { return proto.CompactTextString(m) } +func (*Sort) ProtoMessage() {} +func (*Sort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *Sort) GetSorter() []*Sorter { + if m != nil { + return m.Sorter + } + return nil +} + +type SearchQuery struct { + Offset *int32 `protobuf:"varint,1,opt,name=offset" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` + Query *Query `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"` + Collapse *Collapse `protobuf:"bytes,5,opt,name=collapse" json:"collapse,omitempty"` + Sort *Sort `protobuf:"bytes,6,opt,name=sort" json:"sort,omitempty"` + GetTotalCount *bool `protobuf:"varint,8,opt,name=getTotalCount" json:"getTotalCount,omitempty"` + Token []byte `protobuf:"bytes,9,opt,name=token" json:"token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchQuery) Reset() { *m = SearchQuery{} } +func (m *SearchQuery) String() string { return proto.CompactTextString(m) } +func (*SearchQuery) ProtoMessage() {} +func (*SearchQuery) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *SearchQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return 0 +} + +func (m *SearchQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *SearchQuery) GetQuery() *Query { + if m != nil { + return m.Query + } + return nil +} + +func (m *SearchQuery) GetCollapse() *Collapse { + if m != nil { + return m.Collapse + } + return nil +} + +func (m *SearchQuery) GetSort() *Sort { + if m != nil { + return m.Sort + } + return nil +} + +func (m *SearchQuery) GetGetTotalCount() bool { + if m != nil && m.GetTotalCount != nil { + return *m.GetTotalCount + } + return false +} + +func (m *SearchQuery) GetToken() []byte { + if m != nil { + return m.Token + } + return nil +} + +type ColumnsToGet struct { + ReturnType *ColumnReturnType `protobuf:"varint,1,opt,name=return_type,enum=otsprotocol.ColumnReturnType" json:"return_type,omitempty"` + ColumnNames []string `protobuf:"bytes,2,rep,name=column_names" json:"column_names,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ColumnsToGet) Reset() { *m = ColumnsToGet{} } +func (m *ColumnsToGet) String() string { return proto.CompactTextString(m) } +func (*ColumnsToGet) ProtoMessage() {} +func (*ColumnsToGet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *ColumnsToGet) GetReturnType() ColumnReturnType { + if m != nil && m.ReturnType != nil { + return *m.ReturnType + } + return ColumnReturnType_RETURN_ALL +} + +func (m *ColumnsToGet) GetColumnNames() []string { + if m != nil { + return m.ColumnNames + } + return nil +} + +type SearchRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + ColumnsToGet *ColumnsToGet `protobuf:"bytes,3,opt,name=columns_to_get" json:"columns_to_get,omitempty"` + SearchQuery []byte `protobuf:"bytes,4,opt,name=search_query" json:"search_query,omitempty"` + RoutingValues [][]byte `protobuf:"bytes,5,rep,name=routing_values" json:"routing_values,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchRequest) Reset() { *m = SearchRequest{} } +func (m *SearchRequest) String() string { return proto.CompactTextString(m) } +func (*SearchRequest) ProtoMessage() {} +func (*SearchRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *SearchRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *SearchRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *SearchRequest) GetColumnsToGet() *ColumnsToGet { + if m != nil { + return m.ColumnsToGet + } + return nil +} + +func (m *SearchRequest) GetSearchQuery() []byte { + if m != nil { + return m.SearchQuery + } + return nil +} + +func (m *SearchRequest) GetRoutingValues() [][]byte { + if m != nil { + return m.RoutingValues + } + return nil +} + +type SearchResponse struct { + TotalHits *int64 `protobuf:"varint,1,opt,name=total_hits" json:"total_hits,omitempty"` + Rows [][]byte `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + IsAllSucceeded *bool `protobuf:"varint,3,opt,name=is_all_succeeded" json:"is_all_succeeded,omitempty"` + NextToken []byte `protobuf:"bytes,6,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SearchResponse) Reset() { *m = SearchResponse{} } +func (m *SearchResponse) String() string { return proto.CompactTextString(m) } +func (*SearchResponse) ProtoMessage() {} +func (*SearchResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *SearchResponse) GetTotalHits() int64 { + if m != nil && m.TotalHits != nil { + return *m.TotalHits + } + return 0 +} + +func (m *SearchResponse) GetRows() [][]byte { + if m != nil { + return m.Rows + } + return nil +} + +func (m *SearchResponse) GetIsAllSucceeded() bool { + if m != nil && m.IsAllSucceeded != nil { + return *m.IsAllSucceeded + } + return false +} + +func (m *SearchResponse) GetNextToken() []byte { + if m != nil { + return m.NextToken + } + return nil +} + +type FieldSchema struct { + FieldName *string `protobuf:"bytes,1,opt,name=field_name" json:"field_name,omitempty"` + FieldType *FieldType `protobuf:"varint,2,opt,name=field_type,enum=otsprotocol.FieldType" json:"field_type,omitempty"` + IndexOptions *IndexOptions `protobuf:"varint,3,opt,name=index_options,enum=otsprotocol.IndexOptions" json:"index_options,omitempty"` + Analyzer *string `protobuf:"bytes,4,opt,name=analyzer" json:"analyzer,omitempty"` + Index *bool `protobuf:"varint,5,opt,name=index" json:"index,omitempty"` + DocValues *bool `protobuf:"varint,6,opt,name=doc_values" json:"doc_values,omitempty"` + Store *bool `protobuf:"varint,7,opt,name=store" json:"store,omitempty"` + FieldSchemas []*FieldSchema `protobuf:"bytes,8,rep,name=field_schemas" json:"field_schemas,omitempty"` + IsArray *bool `protobuf:"varint,9,opt,name=is_array" json:"is_array,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldSchema) Reset() { *m = FieldSchema{} } +func (m *FieldSchema) String() string { return proto.CompactTextString(m) } +func (*FieldSchema) ProtoMessage() {} +func (*FieldSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +func (m *FieldSchema) GetFieldName() string { + if m != nil && m.FieldName != nil { + return *m.FieldName + } + return "" +} + +func (m *FieldSchema) GetFieldType() FieldType { + if m != nil && m.FieldType != nil { + return *m.FieldType + } + return FieldType_LONG +} + +func (m *FieldSchema) GetIndexOptions() IndexOptions { + if m != nil && m.IndexOptions != nil { + return *m.IndexOptions + } + return IndexOptions_DOCS +} + +func (m *FieldSchema) GetAnalyzer() string { + if m != nil && m.Analyzer != nil { + return *m.Analyzer + } + return "" +} + +func (m *FieldSchema) GetIndex() bool { + if m != nil && m.Index != nil { + return *m.Index + } + return false +} + +func (m *FieldSchema) GetDocValues() bool { + if m != nil && m.DocValues != nil { + return *m.DocValues + } + return false +} + +func (m *FieldSchema) GetStore() bool { + if m != nil && m.Store != nil { + return *m.Store + } + return false +} + +func (m *FieldSchema) GetFieldSchemas() []*FieldSchema { + if m != nil { + return m.FieldSchemas + } + return nil +} + +func (m *FieldSchema) GetIsArray() bool { + if m != nil && m.IsArray != nil { + return *m.IsArray + } + return false +} + +type IndexSchema struct { + FieldSchemas []*FieldSchema `protobuf:"bytes,1,rep,name=field_schemas" json:"field_schemas,omitempty"` + IndexSetting *IndexSetting `protobuf:"bytes,2,opt,name=index_setting" json:"index_setting,omitempty"` + IndexSort *Sort `protobuf:"bytes,3,opt,name=index_sort" json:"index_sort,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexSchema) Reset() { *m = IndexSchema{} } +func (m *IndexSchema) String() string { return proto.CompactTextString(m) } +func (*IndexSchema) ProtoMessage() {} +func (*IndexSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *IndexSchema) GetFieldSchemas() []*FieldSchema { + if m != nil { + return m.FieldSchemas + } + return nil +} + +func (m *IndexSchema) GetIndexSetting() *IndexSetting { + if m != nil { + return m.IndexSetting + } + return nil +} + +func (m *IndexSchema) GetIndexSort() *Sort { + if m != nil { + return m.IndexSort + } + return nil +} + +type IndexSetting struct { + NumberOfShards *int32 `protobuf:"varint,1,opt,name=number_of_shards" json:"number_of_shards,omitempty"` + RoutingFields []string `protobuf:"bytes,2,rep,name=routing_fields" json:"routing_fields,omitempty"` + RoutingPartitionSize *int32 `protobuf:"varint,3,opt,name=routing_partition_size" json:"routing_partition_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexSetting) Reset() { *m = IndexSetting{} } +func (m *IndexSetting) String() string { return proto.CompactTextString(m) } +func (*IndexSetting) ProtoMessage() {} +func (*IndexSetting) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *IndexSetting) GetNumberOfShards() int32 { + if m != nil && m.NumberOfShards != nil { + return *m.NumberOfShards + } + return 0 +} + +func (m *IndexSetting) GetRoutingFields() []string { + if m != nil { + return m.RoutingFields + } + return nil +} + +func (m *IndexSetting) GetRoutingPartitionSize() int32 { + if m != nil && m.RoutingPartitionSize != nil { + return *m.RoutingPartitionSize + } + return 0 +} + +type CreateSearchIndexRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,req,name=index_name" json:"index_name,omitempty"` + Schema *IndexSchema `protobuf:"bytes,3,opt,name=schema" json:"schema,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSearchIndexRequest) Reset() { *m = CreateSearchIndexRequest{} } +func (m *CreateSearchIndexRequest) String() string { return proto.CompactTextString(m) } +func (*CreateSearchIndexRequest) ProtoMessage() {} +func (*CreateSearchIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *CreateSearchIndexRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *CreateSearchIndexRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CreateSearchIndexRequest) GetSchema() *IndexSchema { + if m != nil { + return m.Schema + } + return nil +} + +type CreateSearchIndexResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateSearchIndexResponse) Reset() { *m = CreateSearchIndexResponse{} } +func (m *CreateSearchIndexResponse) String() string { return proto.CompactTextString(m) } +func (*CreateSearchIndexResponse) ProtoMessage() {} +func (*CreateSearchIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +type IndexInfo struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexInfo) Reset() { *m = IndexInfo{} } +func (m *IndexInfo) String() string { return proto.CompactTextString(m) } +func (*IndexInfo) ProtoMessage() {} +func (*IndexInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *IndexInfo) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *IndexInfo) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +type ListSearchIndexRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListSearchIndexRequest) Reset() { *m = ListSearchIndexRequest{} } +func (m *ListSearchIndexRequest) String() string { return proto.CompactTextString(m) } +func (*ListSearchIndexRequest) ProtoMessage() {} +func (*ListSearchIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *ListSearchIndexRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type ListSearchIndexResponse struct { + Indices []*IndexInfo `protobuf:"bytes,1,rep,name=indices" json:"indices,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListSearchIndexResponse) Reset() { *m = ListSearchIndexResponse{} } +func (m *ListSearchIndexResponse) String() string { return proto.CompactTextString(m) } +func (*ListSearchIndexResponse) ProtoMessage() {} +func (*ListSearchIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +func (m *ListSearchIndexResponse) GetIndices() []*IndexInfo { + if m != nil { + return m.Indices + } + return nil +} + +type DeleteSearchIndexRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSearchIndexRequest) Reset() { *m = DeleteSearchIndexRequest{} } +func (m *DeleteSearchIndexRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteSearchIndexRequest) ProtoMessage() {} +func (*DeleteSearchIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *DeleteSearchIndexRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *DeleteSearchIndexRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +type DeleteSearchIndexResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteSearchIndexResponse) Reset() { *m = DeleteSearchIndexResponse{} } +func (m *DeleteSearchIndexResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteSearchIndexResponse) ProtoMessage() {} +func (*DeleteSearchIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type SyncStat struct { + SyncPhase *SyncPhase `protobuf:"varint,1,opt,name=sync_phase,enum=otsprotocol.SyncPhase" json:"sync_phase,omitempty"` + CurrentSyncTimestamp *int64 `protobuf:"varint,2,opt,name=current_sync_timestamp" json:"current_sync_timestamp,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SyncStat) Reset() { *m = SyncStat{} } +func (m *SyncStat) String() string { return proto.CompactTextString(m) } +func (*SyncStat) ProtoMessage() {} +func (*SyncStat) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *SyncStat) GetSyncPhase() SyncPhase { + if m != nil && m.SyncPhase != nil { + return *m.SyncPhase + } + return SyncPhase_FULL +} + +func (m *SyncStat) GetCurrentSyncTimestamp() int64 { + if m != nil && m.CurrentSyncTimestamp != nil { + return *m.CurrentSyncTimestamp + } + return 0 +} + +type DescribeSearchIndexRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeSearchIndexRequest) Reset() { *m = DescribeSearchIndexRequest{} } +func (m *DescribeSearchIndexRequest) String() string { return proto.CompactTextString(m) } +func (*DescribeSearchIndexRequest) ProtoMessage() {} +func (*DescribeSearchIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *DescribeSearchIndexRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *DescribeSearchIndexRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +type DescribeSearchIndexResponse struct { + Schema *IndexSchema `protobuf:"bytes,1,opt,name=schema" json:"schema,omitempty"` + SyncStat *SyncStat `protobuf:"bytes,2,opt,name=sync_stat" json:"sync_stat,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeSearchIndexResponse) Reset() { *m = DescribeSearchIndexResponse{} } +func (m *DescribeSearchIndexResponse) String() string { return proto.CompactTextString(m) } +func (*DescribeSearchIndexResponse) ProtoMessage() {} +func (*DescribeSearchIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *DescribeSearchIndexResponse) GetSchema() *IndexSchema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *DescribeSearchIndexResponse) GetSyncStat() *SyncStat { + if m != nil { + return m.SyncStat + } + return nil +} + +func init() { + proto.RegisterType((*MatchQuery)(nil), "otsprotocol.MatchQuery") + proto.RegisterType((*MatchPhraseQuery)(nil), "otsprotocol.MatchPhraseQuery") + proto.RegisterType((*MatchAllQuery)(nil), "otsprotocol.MatchAllQuery") + proto.RegisterType((*TermQuery)(nil), "otsprotocol.TermQuery") + proto.RegisterType((*TermsQuery)(nil), "otsprotocol.TermsQuery") + proto.RegisterType((*RangeQuery)(nil), "otsprotocol.RangeQuery") + proto.RegisterType((*PrefixQuery)(nil), "otsprotocol.PrefixQuery") + proto.RegisterType((*WildcardQuery)(nil), "otsprotocol.WildcardQuery") + proto.RegisterType((*BoolQuery)(nil), "otsprotocol.BoolQuery") + proto.RegisterType((*ConstScoreQuery)(nil), "otsprotocol.ConstScoreQuery") + proto.RegisterType((*FieldValueFactor)(nil), "otsprotocol.FieldValueFactor") + proto.RegisterType((*FunctionScoreQuery)(nil), "otsprotocol.FunctionScoreQuery") + proto.RegisterType((*NestedQuery)(nil), "otsprotocol.NestedQuery") + proto.RegisterType((*GeoBoundingBoxQuery)(nil), "otsprotocol.GeoBoundingBoxQuery") + proto.RegisterType((*GeoDistanceQuery)(nil), "otsprotocol.GeoDistanceQuery") + proto.RegisterType((*GeoPolygonQuery)(nil), "otsprotocol.GeoPolygonQuery") + proto.RegisterType((*ExistsQuery)(nil), "otsprotocol.ExistsQuery") + proto.RegisterType((*Query)(nil), "otsprotocol.Query") + proto.RegisterType((*Collapse)(nil), "otsprotocol.Collapse") + proto.RegisterType((*NestedFilter)(nil), "otsprotocol.NestedFilter") + proto.RegisterType((*ScoreSort)(nil), "otsprotocol.ScoreSort") + proto.RegisterType((*FieldSort)(nil), "otsprotocol.FieldSort") + proto.RegisterType((*GeoDistanceSort)(nil), "otsprotocol.GeoDistanceSort") + proto.RegisterType((*PrimaryKeySort)(nil), "otsprotocol.PrimaryKeySort") + proto.RegisterType((*Sorter)(nil), "otsprotocol.Sorter") + proto.RegisterType((*Sort)(nil), "otsprotocol.Sort") + proto.RegisterType((*SearchQuery)(nil), "otsprotocol.SearchQuery") + proto.RegisterType((*ColumnsToGet)(nil), "otsprotocol.ColumnsToGet") + proto.RegisterType((*SearchRequest)(nil), "otsprotocol.SearchRequest") + proto.RegisterType((*SearchResponse)(nil), "otsprotocol.SearchResponse") + proto.RegisterType((*FieldSchema)(nil), "otsprotocol.FieldSchema") + proto.RegisterType((*IndexSchema)(nil), "otsprotocol.IndexSchema") + proto.RegisterType((*IndexSetting)(nil), "otsprotocol.IndexSetting") + proto.RegisterType((*CreateSearchIndexRequest)(nil), "otsprotocol.CreateSearchIndexRequest") + proto.RegisterType((*CreateSearchIndexResponse)(nil), "otsprotocol.CreateSearchIndexResponse") + proto.RegisterType((*IndexInfo)(nil), "otsprotocol.IndexInfo") + proto.RegisterType((*ListSearchIndexRequest)(nil), "otsprotocol.ListSearchIndexRequest") + proto.RegisterType((*ListSearchIndexResponse)(nil), "otsprotocol.ListSearchIndexResponse") + proto.RegisterType((*DeleteSearchIndexRequest)(nil), "otsprotocol.DeleteSearchIndexRequest") + proto.RegisterType((*DeleteSearchIndexResponse)(nil), "otsprotocol.DeleteSearchIndexResponse") + proto.RegisterType((*SyncStat)(nil), "otsprotocol.SyncStat") + proto.RegisterType((*DescribeSearchIndexRequest)(nil), "otsprotocol.DescribeSearchIndexRequest") + proto.RegisterType((*DescribeSearchIndexResponse)(nil), "otsprotocol.DescribeSearchIndexResponse") + proto.RegisterEnum("otsprotocol.QueryType", QueryType_name, QueryType_value) + proto.RegisterEnum("otsprotocol.QueryOperator", QueryOperator_name, QueryOperator_value) + proto.RegisterEnum("otsprotocol.ScoreMode", ScoreMode_name, ScoreMode_value) + proto.RegisterEnum("otsprotocol.SortOrder", SortOrder_name, SortOrder_value) + proto.RegisterEnum("otsprotocol.SortMode", SortMode_name, SortMode_value) + proto.RegisterEnum("otsprotocol.GeoDistanceType", GeoDistanceType_name, GeoDistanceType_value) + proto.RegisterEnum("otsprotocol.ColumnReturnType", ColumnReturnType_name, ColumnReturnType_value) + proto.RegisterEnum("otsprotocol.IndexOptions", IndexOptions_name, IndexOptions_value) + proto.RegisterEnum("otsprotocol.FieldType", FieldType_name, FieldType_value) + proto.RegisterEnum("otsprotocol.SyncPhase", SyncPhase_name, SyncPhase_value) +} + +func init() { proto.RegisterFile("search.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1950 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x58, 0xd9, 0x72, 0xdb, 0xd6, + 0x19, 0x0e, 0xb8, 0x89, 0xfc, 0xb9, 0xe8, 0x18, 0x96, 0x55, 0x3a, 0xce, 0xa2, 0x20, 0x4d, 0xa3, + 0x61, 0x5d, 0xa7, 0x51, 0xd2, 0xc9, 0x74, 0xa6, 0x33, 0x29, 0x45, 0x82, 0x0c, 0x27, 0x12, 0x40, + 0x03, 0x94, 0x2d, 0x5f, 0xc1, 0x30, 0x78, 0x28, 0xa2, 0x01, 0x71, 0xe8, 0x83, 0xc3, 0x46, 0xf4, + 0x03, 0xf4, 0x15, 0xda, 0x9b, 0x3e, 0x4b, 0x2f, 0x7b, 0xdd, 0xcb, 0xbe, 0x41, 0x1f, 0xa3, 0x73, + 0x16, 0x70, 0xa7, 0xac, 0x4e, 0xef, 0xcc, 0x1f, 0xdf, 0xbf, 0x7d, 0xff, 0x72, 0x7e, 0x0b, 0x2a, + 0x09, 0xf6, 0x69, 0x30, 0x7e, 0x36, 0xa5, 0x84, 0x11, 0xbd, 0x4c, 0x58, 0x22, 0xfe, 0x15, 0x90, + 0xc8, 0x78, 0x07, 0x70, 0xe9, 0xb3, 0x60, 0xfc, 0x7c, 0x86, 0xe9, 0x5c, 0xd7, 0x01, 0x46, 0x21, + 0x8e, 0x86, 0x5e, 0xec, 0x4f, 0x70, 0x5d, 0x3b, 0xd1, 0x4e, 0x4b, 0x7a, 0x05, 0x72, 0x0c, 0xdf, + 0xb2, 0x7a, 0x46, 0xfc, 0xfa, 0x08, 0x8e, 0x26, 0x61, 0x1c, 0x4e, 0x66, 0x13, 0x2f, 0x19, 0x93, + 0x59, 0x34, 0xf4, 0x26, 0x5c, 0xbd, 0x9e, 0x3d, 0xd1, 0x4e, 0xf3, 0xfa, 0x53, 0x28, 0x92, 0x29, + 0xa6, 0x3e, 0x23, 0xb4, 0x9e, 0x3b, 0xd1, 0x4e, 0x6b, 0x67, 0x1f, 0x3e, 0x5b, 0xf1, 0xf6, 0x4c, + 0x78, 0xb1, 0x15, 0xc2, 0xf8, 0x16, 0x90, 0xf0, 0xdd, 0x1f, 0x53, 0x3f, 0xc1, 0xf7, 0x8c, 0xc0, + 0x38, 0x84, 0xaa, 0xd0, 0x6a, 0x46, 0x91, 0x50, 0x31, 0x7e, 0x03, 0xa5, 0x01, 0xa6, 0x93, 0xf7, + 0xe8, 0xd3, 0x89, 0xd0, 0xaf, 0x18, 0x5f, 0x01, 0x70, 0x78, 0xb2, 0x1f, 0x5f, 0x85, 0x3c, 0xc7, + 0x27, 0xf5, 0xcc, 0x49, 0xf6, 0xb4, 0x62, 0x30, 0x00, 0xc7, 0x8f, 0x6f, 0xee, 0x08, 0x50, 0x07, + 0xa0, 0x1c, 0xe1, 0x8d, 0x28, 0x51, 0x6e, 0x74, 0x04, 0x45, 0x29, 0x63, 0x44, 0x90, 0x53, 0xd1, + 0x1f, 0x41, 0x35, 0x8c, 0x83, 0x68, 0x36, 0xc4, 0x5e, 0x44, 0x7e, 0xc6, 0x92, 0xa1, 0xe2, 0xaa, + 0x78, 0x36, 0x9d, 0x62, 0x5a, 0xcf, 0x73, 0xb1, 0xf1, 0x35, 0x94, 0xfb, 0x14, 0x8f, 0xc2, 0xdb, + 0xfd, 0x6e, 0x6b, 0x50, 0x98, 0x0a, 0x88, 0x62, 0xe6, 0x0c, 0xaa, 0x2f, 0xc3, 0x68, 0x18, 0xf8, + 0x74, 0x78, 0x67, 0x72, 0x7f, 0xf6, 0xa3, 0x19, 0x56, 0x3a, 0xff, 0xd6, 0xa0, 0x74, 0x4e, 0x88, + 0xa4, 0x52, 0x3f, 0x85, 0xca, 0x64, 0x96, 0x30, 0xef, 0xed, 0x0c, 0xd3, 0x10, 0x27, 0x75, 0xed, + 0x24, 0x7b, 0x5a, 0x3e, 0xd3, 0xb7, 0x6b, 0xa8, 0x3f, 0x05, 0x24, 0x90, 0x31, 0x59, 0xa2, 0x33, + 0x7b, 0xd1, 0x0d, 0xa8, 0x8d, 0xc2, 0x88, 0x61, 0xba, 0xc0, 0x66, 0xef, 0xc2, 0xaa, 0xce, 0x4a, + 0xb1, 0xb9, 0xbd, 0xd8, 0x7d, 0xdd, 0xc8, 0x29, 0xcc, 0x1b, 0xbf, 0x83, 0xc3, 0x16, 0x89, 0x13, + 0xe6, 0x06, 0x84, 0xaa, 0xea, 0x19, 0x50, 0x90, 0x81, 0x08, 0x36, 0x76, 0x1a, 0x35, 0x7e, 0x05, + 0xa8, 0xc3, 0x59, 0x7b, 0xc1, 0x69, 0xea, 0xf8, 0x01, 0x23, 0x74, 0x17, 0x93, 0x06, 0x05, 0xbd, + 0x33, 0x8b, 0x03, 0x16, 0x92, 0x78, 0xc5, 0xc3, 0x67, 0x90, 0xe7, 0x71, 0xcf, 0xf7, 0x3b, 0xd0, + 0x7f, 0x0f, 0xba, 0x34, 0x26, 0x0a, 0xe1, 0x8d, 0x84, 0x0b, 0x51, 0x8f, 0xf2, 0xd9, 0xc7, 0x6b, + 0xf8, 0xcd, 0x38, 0x8c, 0x3f, 0x41, 0xd9, 0xc2, 0x09, 0xc3, 0xaa, 0xc0, 0x15, 0xc8, 0x4d, 0x7d, + 0x36, 0x56, 0xa5, 0x5d, 0xb8, 0xce, 0xec, 0x75, 0xdd, 0x00, 0x48, 0x78, 0xac, 0xde, 0x84, 0x0c, + 0xb1, 0xe8, 0xcb, 0xda, 0xd9, 0xf1, 0x1a, 0x4e, 0xa4, 0x72, 0x49, 0x86, 0xd8, 0x78, 0x0e, 0x0f, + 0xbb, 0x98, 0x9c, 0x93, 0x59, 0x3c, 0x0c, 0xe3, 0x9b, 0x73, 0x72, 0x47, 0x27, 0x22, 0x28, 0x32, + 0x32, 0xf5, 0x22, 0x3c, 0x4a, 0xf7, 0xc4, 0x11, 0x54, 0xde, 0x10, 0xc6, 0xc8, 0xc4, 0xa3, 0xe1, + 0xcd, 0x98, 0x09, 0x57, 0x25, 0xc3, 0x02, 0xd4, 0xc5, 0xa4, 0x1d, 0x26, 0xcc, 0x8f, 0x83, 0x3b, + 0x06, 0xea, 0x08, 0x2a, 0x01, 0x8e, 0x79, 0xbf, 0x4c, 0x49, 0x18, 0xa7, 0x36, 0x11, 0x14, 0x87, + 0x4a, 0x55, 0xd8, 0xd3, 0x78, 0x85, 0xbb, 0x98, 0xf4, 0x49, 0x34, 0xbf, 0x21, 0xf1, 0xdd, 0x83, + 0xc2, 0xed, 0xc8, 0x16, 0x2d, 0x19, 0x9f, 0x41, 0xd9, 0xbc, 0x0d, 0x13, 0xb6, 0x7f, 0x07, 0x18, + 0x7f, 0x80, 0xbc, 0xfc, 0xf8, 0x4b, 0xc8, 0xb1, 0xf9, 0x54, 0x8a, 0x37, 0xb9, 0x12, 0x88, 0xc1, + 0x7c, 0x8a, 0xf9, 0x54, 0x2d, 0xa9, 0xaf, 0x18, 0x9f, 0x40, 0xb1, 0x45, 0xa2, 0xc8, 0x9f, 0x26, + 0x78, 0xa7, 0xf5, 0x3f, 0x42, 0x45, 0x96, 0xb1, 0x23, 0x9a, 0x71, 0xa3, 0x8e, 0xcb, 0x26, 0xdd, + 0x5b, 0x48, 0xe3, 0x0c, 0x4a, 0xa2, 0x52, 0x2e, 0xa1, 0x4c, 0xff, 0x02, 0xf2, 0x84, 0x0e, 0x55, + 0x53, 0x6f, 0x15, 0x94, 0x50, 0x66, 0xf3, 0xaf, 0xc6, 0xdf, 0x35, 0x28, 0x89, 0x8e, 0x12, 0x4a, + 0xbb, 0x88, 0x5a, 0x18, 0xca, 0xdc, 0x65, 0x48, 0xff, 0x1c, 0x72, 0x2b, 0xfd, 0xf3, 0x68, 0x0b, + 0xc5, 0xdb, 0x47, 0xff, 0x2d, 0x54, 0x63, 0x91, 0xa3, 0xa7, 0x92, 0xc9, 0x89, 0x64, 0x1e, 0xaf, + 0xa1, 0x57, 0x59, 0x30, 0xfe, 0xa3, 0x89, 0x72, 0xa6, 0xed, 0xb1, 0x37, 0xca, 0x8d, 0x72, 0x2e, + 0xa3, 0xce, 0xde, 0x2b, 0xea, 0xdc, 0x5d, 0x51, 0x7f, 0x03, 0xd5, 0xb4, 0xc7, 0x3c, 0x51, 0xf7, + 0xbc, 0x40, 0x7f, 0xb4, 0x86, 0x5e, 0x09, 0x52, 0x54, 0x7f, 0x2b, 0xd5, 0xc2, 0xfb, 0x52, 0xfd, + 0x0e, 0x6a, 0x7d, 0x1a, 0x4e, 0x7c, 0x3a, 0xff, 0x11, 0xcf, 0xff, 0x97, 0x1a, 0xfe, 0x53, 0x83, + 0x02, 0xff, 0x85, 0x29, 0x9f, 0x65, 0x49, 0x4d, 0x42, 0x28, 0x53, 0xeb, 0xe6, 0x78, 0x7b, 0x7d, + 0x08, 0xeb, 0xdf, 0xc1, 0x83, 0x1b, 0x4c, 0xbc, 0x45, 0x6a, 0x42, 0x45, 0x76, 0xd7, 0xde, 0xd4, + 0x84, 0xe2, 0x62, 0x61, 0x08, 0x8d, 0xec, 0x0e, 0x27, 0xcb, 0x36, 0x7c, 0x0a, 0x07, 0xd3, 0x9f, + 0x24, 0x50, 0xd6, 0xfa, 0xc9, 0x1a, 0x70, 0x3d, 0x61, 0xe3, 0xd7, 0x90, 0x13, 0x5a, 0x9f, 0x43, + 0x21, 0x11, 0x09, 0xa9, 0xd7, 0xe6, 0xe1, 0x56, 0xe6, 0x98, 0x1a, 0xff, 0xd0, 0xa0, 0xec, 0x8a, + 0x23, 0x46, 0x4e, 0x65, 0x0d, 0x0a, 0x64, 0x34, 0x4a, 0xb0, 0xcc, 0x3b, 0xcf, 0xe7, 0x2f, 0x0a, + 0x27, 0xa1, 0xcc, 0x29, 0xbf, 0xdc, 0x84, 0xb9, 0xbd, 0x9b, 0xf0, 0x4b, 0x28, 0x06, 0x6a, 0x44, + 0x45, 0x8d, 0xcb, 0x1b, 0x1d, 0xb1, 0x98, 0xdf, 0x4f, 0x21, 0x27, 0x52, 0x92, 0x35, 0x7d, 0xb0, + 0x15, 0x1d, 0x7f, 0xc0, 0x6f, 0x30, 0x1b, 0x10, 0xe6, 0x47, 0x2d, 0x32, 0x8b, 0x59, 0xbd, 0x28, + 0xde, 0x75, 0x7e, 0x45, 0x90, 0x9f, 0x70, 0x5c, 0x2f, 0x89, 0x95, 0x70, 0x0d, 0x95, 0x16, 0x89, + 0x66, 0x93, 0x38, 0x19, 0x90, 0x2e, 0x66, 0xfa, 0x19, 0x94, 0x29, 0x66, 0x33, 0x1a, 0x7b, 0x2b, + 0xeb, 0xe5, 0xe3, 0xcd, 0x10, 0x66, 0x93, 0xd8, 0x11, 0x28, 0xd1, 0x67, 0x7c, 0x2d, 0x0a, 0x99, + 0x98, 0x86, 0x74, 0x9b, 0xfd, 0x4d, 0x83, 0xaa, 0xe4, 0xc6, 0xc1, 0x6f, 0x67, 0x38, 0x11, 0x43, + 0xc3, 0xfc, 0x37, 0x11, 0xde, 0xb8, 0x51, 0xc2, 0x78, 0x88, 0x6f, 0xa5, 0x4c, 0x2e, 0xd4, 0xaf, + 0xa1, 0x26, 0xed, 0x25, 0x1e, 0x23, 0xde, 0x0d, 0x4e, 0x0b, 0xfc, 0x78, 0x47, 0x18, 0x2a, 0xec, + 0xa3, 0xf4, 0x98, 0xf4, 0x96, 0x04, 0x57, 0xf4, 0x63, 0xa8, 0x51, 0x32, 0x63, 0x61, 0x7c, 0x23, + 0xdf, 0xb4, 0xa4, 0x9e, 0x17, 0xa7, 0xd3, 0x6b, 0xa8, 0xa5, 0x91, 0x25, 0x53, 0x12, 0xcb, 0x6d, + 0xc8, 0x38, 0x53, 0xde, 0x38, 0x64, 0x89, 0x08, 0x2d, 0xcb, 0xb7, 0x1f, 0x25, 0x3f, 0xab, 0x73, + 0x4b, 0xaf, 0x03, 0x0a, 0x13, 0xcf, 0x8f, 0x22, 0x2f, 0x99, 0x05, 0x01, 0xc6, 0x43, 0x3c, 0x14, + 0x61, 0x15, 0xb9, 0x6e, 0x8c, 0x6f, 0x99, 0x27, 0x69, 0x2d, 0x08, 0x5a, 0xff, 0x92, 0x81, 0xb2, + 0x6c, 0xf3, 0x60, 0x8c, 0x27, 0xfe, 0xce, 0x7d, 0xb1, 0x18, 0x14, 0xc1, 0xf4, 0xae, 0xd5, 0x26, + 0x2c, 0xa4, 0xa3, 0x2c, 0x69, 0x22, 0x53, 0xfe, 0xb0, 0x27, 0x6a, 0xa7, 0xac, 0x33, 0xd2, 0xe3, + 0x08, 0x5b, 0x02, 0xf8, 0xab, 0xe4, 0xc7, 0x7e, 0x34, 0x7f, 0xa7, 0x56, 0x9c, 0x38, 0xb1, 0x84, + 0x0d, 0x79, 0xc9, 0xf1, 0x90, 0x86, 0x24, 0x48, 0x89, 0x29, 0xa4, 0xcd, 0x91, 0x30, 0x42, 0x71, + 0xfd, 0x40, 0xfc, 0xfc, 0x0a, 0xaa, 0x6a, 0x94, 0x45, 0x16, 0x49, 0xbd, 0x28, 0x46, 0xa1, 0xbe, + 0x63, 0x9a, 0x65, 0x9a, 0x08, 0x8a, 0x9c, 0x24, 0x4a, 0xfd, 0xb9, 0xe8, 0xaf, 0xa2, 0xf1, 0x57, + 0x0d, 0xca, 0x22, 0x2e, 0x85, 0xd8, 0x32, 0xa9, 0xbd, 0xc7, 0xe4, 0x22, 0xf3, 0x04, 0x33, 0x5e, + 0x49, 0xb5, 0x1e, 0x76, 0x64, 0xee, 0x4a, 0x80, 0xfe, 0x45, 0xda, 0x52, 0x2b, 0xbb, 0x61, 0x7b, + 0x3e, 0x8c, 0xd7, 0x50, 0x59, 0x53, 0xab, 0x03, 0x8a, 0x67, 0x93, 0x37, 0x98, 0x7a, 0x64, 0xe4, + 0x25, 0x63, 0x9f, 0x0e, 0x13, 0x35, 0xc5, 0x2b, 0x6d, 0x24, 0x62, 0x4f, 0x17, 0xfc, 0x27, 0x70, + 0x9c, 0xca, 0xa7, 0x3e, 0x65, 0x21, 0x27, 0xde, 0x4b, 0xc2, 0x77, 0xf2, 0x05, 0xca, 0x1b, 0x11, + 0xd4, 0x5b, 0x14, 0xfb, 0x0c, 0xcb, 0x66, 0x13, 0xde, 0xf6, 0xcd, 0x42, 0x66, 0xc7, 0x2c, 0x70, + 0xd9, 0x29, 0x14, 0x24, 0x53, 0x2a, 0x91, 0xfa, 0x8e, 0xbc, 0xc5, 0x77, 0xe3, 0x09, 0x3c, 0xde, + 0xe1, 0x4d, 0xf6, 0xb7, 0xf1, 0x0d, 0x94, 0x84, 0xa0, 0x17, 0x8f, 0xc8, 0x7d, 0xe7, 0xd0, 0x78, + 0x0a, 0xc7, 0x17, 0x61, 0xc2, 0xee, 0x11, 0x3d, 0x47, 0x9f, 0xc3, 0x2f, 0xb6, 0xd0, 0x6a, 0xba, + 0xbe, 0x84, 0x83, 0x30, 0x1e, 0x86, 0xc1, 0xe2, 0x74, 0x3f, 0xde, 0xce, 0x82, 0x47, 0x66, 0x9c, + 0x43, 0xbd, 0x8d, 0x23, 0x7c, 0x2f, 0xc6, 0xf6, 0x45, 0xfd, 0x04, 0x1e, 0xef, 0xb0, 0xa1, 0x78, + 0x78, 0x01, 0x45, 0x77, 0x1e, 0x07, 0x2e, 0xf3, 0xe5, 0x1b, 0x32, 0x8f, 0x03, 0x6f, 0x3a, 0xf6, + 0x93, 0xdd, 0x87, 0x14, 0x87, 0xf6, 0xf9, 0x57, 0x5e, 0xea, 0x60, 0x46, 0x29, 0x8e, 0x99, 0x27, + 0x74, 0x58, 0x38, 0xc1, 0x09, 0xf3, 0x27, 0x53, 0xe1, 0x34, 0x6b, 0xb4, 0xe1, 0xc3, 0x36, 0x4e, + 0x02, 0x1a, 0xbe, 0xf9, 0x7f, 0x42, 0x7f, 0x0b, 0x4f, 0x76, 0x5a, 0x51, 0x34, 0x2e, 0x7b, 0x41, + 0xbb, 0xbb, 0x17, 0xf4, 0x53, 0x28, 0x89, 0x30, 0x13, 0xe6, 0xa7, 0xef, 0xe9, 0xa3, 0xad, 0xcc, + 0x38, 0x09, 0x8d, 0x7f, 0x65, 0xa0, 0xb4, 0xbc, 0x17, 0x0f, 0xa1, 0x7c, 0xd9, 0x1c, 0xb4, 0x7e, + 0xf0, 0x9e, 0x5f, 0x99, 0xce, 0x2b, 0xa4, 0xe9, 0xc7, 0xa0, 0x4b, 0x41, 0xff, 0x07, 0xa7, 0xe9, + 0x9a, 0x4a, 0x9e, 0xd1, 0x6b, 0x00, 0x03, 0xd3, 0xb9, 0x54, 0xbf, 0xb3, 0x5c, 0xd1, 0x69, 0x5a, + 0xdd, 0x14, 0x90, 0xd3, 0x11, 0x54, 0xfa, 0x8e, 0xd9, 0xe9, 0x5d, 0x2b, 0x49, 0x9e, 0xab, 0x9c, + 0xdb, 0xf6, 0x85, 0xfa, 0x5d, 0xd0, 0x1f, 0xc1, 0x83, 0x96, 0x6d, 0xb9, 0x03, 0xcf, 0x6d, 0xd9, + 0x4e, 0xaa, 0x78, 0xa0, 0xd7, 0xe1, 0xa8, 0x73, 0x65, 0xb5, 0x06, 0x3d, 0xdb, 0x5a, 0xfb, 0x52, + 0xe4, 0x26, 0x2d, 0xd3, 0x1d, 0x98, 0x6d, 0x25, 0xe1, 0x1c, 0xd6, 0x5e, 0xf6, 0x2e, 0xda, 0xad, + 0xa6, 0x93, 0xca, 0x40, 0x7f, 0x08, 0x87, 0x32, 0xe2, 0xe6, 0x45, 0xea, 0xab, 0xac, 0x7f, 0x08, + 0xc7, 0x5d, 0xd3, 0xf6, 0xce, 0xed, 0x2b, 0xab, 0xdd, 0xb3, 0xba, 0xde, 0xb9, 0x9d, 0xc6, 0xc5, + 0x1f, 0x09, 0x9d, 0x7f, 0x6b, 0xf7, 0xdc, 0x41, 0xd3, 0x6a, 0xa5, 0xee, 0xaa, 0x3c, 0x3e, 0x2e, + 0xef, 0xdb, 0x17, 0xaf, 0xba, 0xb6, 0xa5, 0xc4, 0x35, 0x9e, 0x29, 0xcf, 0xdc, 0x55, 0x82, 0x43, + 0x1e, 0x96, 0x79, 0xdd, 0x73, 0x07, 0xa9, 0x04, 0x35, 0x4e, 0xa0, 0xba, 0xf6, 0x17, 0x05, 0xbd, + 0x00, 0x19, 0xdb, 0x41, 0x9a, 0x7e, 0x00, 0xd9, 0xa6, 0xd5, 0x46, 0x99, 0x06, 0x55, 0x67, 0xb2, + 0xb8, 0xed, 0x1e, 0xc2, 0xa1, 0x4c, 0xf4, 0xd2, 0x6e, 0x9b, 0x9e, 0x65, 0x5b, 0x26, 0xd2, 0x78, + 0x6a, 0x2b, 0xc2, 0xe6, 0x8b, 0x2e, 0xca, 0x6c, 0xc8, 0x2e, 0x9b, 0xd7, 0x28, 0xab, 0x1f, 0x01, + 0x5a, 0x91, 0x0d, 0xec, 0x41, 0xf3, 0x02, 0xe5, 0x36, 0x91, 0x3d, 0x0b, 0xe5, 0x1b, 0xdf, 0x42, + 0x69, 0x79, 0x74, 0x72, 0x80, 0xed, 0x0c, 0x3c, 0xdb, 0x69, 0x9b, 0x8e, 0xd7, 0x74, 0x5b, 0xe8, + 0x03, 0x11, 0xc7, 0x52, 0xd6, 0x36, 0xdd, 0x16, 0xd2, 0x1a, 0x2d, 0x28, 0x2e, 0x8e, 0xd0, 0x07, + 0x50, 0x15, 0x80, 0x85, 0xd1, 0x0f, 0x36, 0x44, 0xcd, 0x6b, 0xa4, 0xad, 0x8b, 0x44, 0xe0, 0x8d, + 0xef, 0xd7, 0x0e, 0x68, 0x75, 0x33, 0xa0, 0x35, 0xd6, 0x9b, 0x0e, 0x0f, 0x61, 0xb3, 0x16, 0xfd, + 0x8b, 0x26, 0x67, 0xa3, 0xd1, 0x03, 0xb4, 0x75, 0x75, 0xd4, 0x00, 0x1c, 0x73, 0x70, 0xe5, 0x58, + 0xbc, 0xd2, 0x48, 0xe3, 0x16, 0xd5, 0x6f, 0xb7, 0x6f, 0xb6, 0x7a, 0x9d, 0x9e, 0xd9, 0x46, 0x19, + 0xd1, 0x98, 0x52, 0x2a, 0x88, 0xcd, 0x36, 0xbe, 0x57, 0x6b, 0x3f, 0x7d, 0x27, 0x8b, 0x90, 0x6b, + 0xdb, 0x2d, 0x17, 0x69, 0x7a, 0x09, 0xf2, 0x1d, 0xc7, 0x7c, 0xee, 0xa2, 0x8c, 0x5e, 0x85, 0x52, + 0xdf, 0x76, 0x7b, 0xbc, 0x09, 0x5d, 0x94, 0xd5, 0xcb, 0x70, 0x60, 0x77, 0x3a, 0xae, 0x39, 0x70, + 0x51, 0xae, 0xf1, 0x5a, 0xfd, 0x6f, 0x45, 0x04, 0x51, 0x84, 0xdc, 0x85, 0x6d, 0x75, 0x91, 0xa6, + 0x03, 0x14, 0xda, 0xf6, 0xd5, 0xf9, 0x85, 0x89, 0x32, 0x1c, 0xcf, 0x5b, 0xdd, 0x6c, 0x5a, 0x52, + 0xf9, 0x47, 0xf3, 0xd5, 0x4b, 0xdb, 0x69, 0xa3, 0x1c, 0xc7, 0x0f, 0xcc, 0xeb, 0x01, 0xca, 0x73, + 0xbc, 0xec, 0x66, 0x54, 0xe0, 0xee, 0x64, 0xab, 0xf5, 0xac, 0x01, 0x3a, 0x68, 0x7c, 0x0a, 0xa5, + 0xe5, 0xe6, 0x29, 0x42, 0xae, 0x73, 0x25, 0x12, 0x2c, 0x42, 0xae, 0x67, 0xb5, 0x1c, 0x94, 0xf9, + 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf0, 0x5e, 0x78, 0x2f, 0x39, 0x13, 0x00, 0x00, +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.proto b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.proto new file mode 100644 index 00000000..35bc1224 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/search.proto @@ -0,0 +1,328 @@ +syntax = "proto2"; +package otsprotocol; + +enum QueryType { + MATCH_QUERY = 1; + MATCH_PHRASE_QUERY = 2; + TERM_QUERY = 3; + RANGE_QUERY = 4; + PREFIX_QUERY = 5; + BOOL_QUERY = 6; + CONST_SCORE_QUERY = 7; + FUNCTION_SCORE_QUERY = 8; + NESTED_QUERY = 9; + WILDCARD_QUERY = 10; + MATCH_ALL_QUERY = 11; + GEO_BOUNDING_BOX_QUERY = 12; + GEO_DISTANCE_QUERY = 13; + GEO_POLYGON_QUERY = 14; + TERMS_QUERY = 15; + EXISTS_QUERY = 16; +} + +enum QueryOperator { + OR = 1; + AND = 2; +} + +message MatchQuery { + optional string field_name = 1; + optional string text = 2; + optional int32 minimum_should_match = 3; + optional QueryOperator operator = 4; +} + +message MatchPhraseQuery { + optional string field_name = 1; + optional string text = 2; +} + +message MatchAllQuery { +} + +message TermQuery { + optional string field_name = 1; + optional bytes term = 2; +} + +message TermsQuery { + optional string field_name = 1; + repeated bytes terms = 2; +} + +message RangeQuery { + optional string field_name = 1; + optional bytes range_from = 2; // variant value + optional bytes range_to = 3; // variant value + optional bool include_lower = 4; + optional bool include_upper = 5; +} + +message PrefixQuery { + optional string field_name = 1; + optional string prefix = 2; +} + +message WildcardQuery { + optional string field_name = 1; + optional string value = 2; +} + +message BoolQuery { + repeated Query must_queries = 1; + repeated Query must_not_queries = 2; + repeated Query filter_queries = 3; + repeated Query should_queries = 4; + optional int32 minimum_should_match = 5; +} + +message ConstScoreQuery { + optional Query filter = 1; +} + +message FieldValueFactor { + optional string field_name = 1; +} + +message FunctionScoreQuery { + optional Query query = 1; + optional FieldValueFactor field_value_factor = 2; +} + +enum ScoreMode { + SCORE_MODE_NONE = 1; + SCORE_MODE_AVG = 2; + SCORE_MODE_MAX = 3; + SCORE_MODE_TOTAL = 4; + SCORE_MODE_MIN = 5; +} + +message NestedQuery { + optional string path = 1; + optional Query query = 2; + optional ScoreMode score_mode = 3; +} + +message GeoBoundingBoxQuery { + optional string field_name = 1; + optional string top_left = 2; + optional string bottom_right = 3; +} + +message GeoDistanceQuery { + optional string field_name = 1; + optional string center_point = 2; + optional double distance = 3; +} + +message GeoPolygonQuery { + optional string field_name = 1; + repeated string points = 2; +} + +message ExistsQuery { + optional string field_name = 1; +} + +message Query { + optional QueryType type = 1; + optional bytes query = 2; +} + +message Collapse { + optional string field_name = 1; +} + +message NestedFilter { + optional string path = 1; + optional Query filter = 2; +} + +enum SortOrder { + SORT_ORDER_ASC = 0; + SORT_ORDER_DESC = 1; +} + +enum SortMode { + SORT_MODE_MIN = 0; + SORT_MODE_MAX = 1; + SORT_MODE_AVG = 2; +} + +message ScoreSort { + optional SortOrder order = 1; +} + +message FieldSort { + optional string field_name = 1; + optional SortOrder order = 2; + optional SortMode mode = 3; + optional NestedFilter nested_filter = 4; +} + +enum GeoDistanceType { + GEO_DISTANCE_ARC = 0; + GEO_DISTANCE_PLANE = 1; +} + +message GeoDistanceSort { + optional string field_name = 1; + repeated string points = 2; + optional SortOrder order = 3; + optional SortMode mode = 4; + optional GeoDistanceType distance_type = 5; + optional NestedFilter nested_filter = 6; +} + +message PrimaryKeySort { + optional SortOrder order = 1; +} + +message Sorter { + optional FieldSort field_sort = 1; + optional GeoDistanceSort geo_distance_sort = 2; + optional ScoreSort score_sort = 3; + optional PrimaryKeySort pk_sort = 4; +} + +message Sort { + repeated Sorter sorter = 1; +} + +message SearchQuery { + optional int32 offset = 1; + optional int32 limit = 2; + optional Query query = 4; + optional Collapse collapse = 5; + optional Sort sort = 6; + optional bool getTotalCount = 8; + optional bytes token = 9; +} + +enum ColumnReturnType { + RETURN_ALL = 1; + RETURN_SPECIFIED = 2; + RETURN_NONE = 3; +} + +message ColumnsToGet { + optional ColumnReturnType return_type = 1; + repeated string column_names = 2; +} + +message SearchRequest { + optional string table_name = 1; + optional string index_name = 2; + optional ColumnsToGet columns_to_get = 3; + optional bytes search_query = 4; + repeated bytes routing_values = 5; +} + +/** + * Response部分: + **/ + +message SearchResponse { + optional int64 total_hits = 1; + repeated bytes rows = 2; + optional bool is_all_succeeded = 3; + optional bytes next_token = 6; +} + +/* Create Search Index */ + +enum IndexOptions { + DOCS = 1; + FREQS = 2; + POSITIONS = 3; + OFFSETS = 4; +} + +enum FieldType { + LONG = 1; + DOUBLE = 2; + BOOLEAN = 3; + KEYWORD = 4; + TEXT = 5; + NESTED = 6; + GEO_POINT = 7; +} + +message FieldSchema { + optional string field_name = 1; + optional FieldType field_type = 2; + optional IndexOptions index_options = 3; + optional string analyzer = 4; + optional bool index = 5; + optional bool doc_values = 6; + optional bool store = 7; + repeated FieldSchema field_schemas = 8; // only for nested type + optional bool is_array = 9; +} + +message IndexSchema { + repeated FieldSchema field_schemas = 1; + optional IndexSetting index_setting = 2; + optional Sort index_sort = 3; +} + +message IndexSetting { + optional int32 number_of_shards = 1; + repeated string routing_fields = 2; + optional int32 routing_partition_size = 3; +} + +message CreateSearchIndexRequest { + required string table_name = 1; + required string index_name = 2; + optional IndexSchema schema = 3; +} + +message CreateSearchIndexResponse { +} + +/* List Search Index */ + +message IndexInfo { + optional string table_name = 1; + optional string index_name = 2; +} + +message ListSearchIndexRequest { + optional string table_name = 1; +} +message ListSearchIndexResponse { + repeated IndexInfo indices = 1; +} + +/* Delete Search Index */ + +message DeleteSearchIndexRequest { + optional string table_name = 1; + optional string index_name = 2; +} + +message DeleteSearchIndexResponse { +} + +/* Describe Search Index */ + +enum SyncPhase { + FULL = 1; + INCR = 2; +} + +message SyncStat { + optional SyncPhase sync_phase = 1; + optional int64 current_sync_timestamp = 2; // 同步进度,参考TunnelService。 +} + +message DescribeSearchIndexRequest { + optional string table_name = 1; + optional string index_name = 2; +} + +message DescribeSearchIndexResponse { + optional IndexSchema schema = 1; + optional SyncStat sync_stat = 2; +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.pb.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.pb.go new file mode 100644 index 00000000..de5d507d --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.pb.go @@ -0,0 +1,3141 @@ +// Code generated by protoc-gen-go. +// source: table_store.proto +// DO NOT EDIT! + +package otsprotocol + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type PrimaryKeyType int32 + +const ( + PrimaryKeyType_INTEGER PrimaryKeyType = 1 + PrimaryKeyType_STRING PrimaryKeyType = 2 + PrimaryKeyType_BINARY PrimaryKeyType = 3 +) + +var PrimaryKeyType_name = map[int32]string{ + 1: "INTEGER", + 2: "STRING", + 3: "BINARY", +} +var PrimaryKeyType_value = map[string]int32{ + "INTEGER": 1, + "STRING": 2, + "BINARY": 3, +} + +func (x PrimaryKeyType) Enum() *PrimaryKeyType { + p := new(PrimaryKeyType) + *p = x + return p +} +func (x PrimaryKeyType) String() string { + return proto.EnumName(PrimaryKeyType_name, int32(x)) +} +func (x *PrimaryKeyType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PrimaryKeyType_value, data, "PrimaryKeyType") + if err != nil { + return err + } + *x = PrimaryKeyType(value) + return nil +} +func (PrimaryKeyType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +type PrimaryKeyOption int32 + +const ( + PrimaryKeyOption_AUTO_INCREMENT PrimaryKeyOption = 1 +) + +var PrimaryKeyOption_name = map[int32]string{ + 1: "AUTO_INCREMENT", +} +var PrimaryKeyOption_value = map[string]int32{ + "AUTO_INCREMENT": 1, +} + +func (x PrimaryKeyOption) Enum() *PrimaryKeyOption { + p := new(PrimaryKeyOption) + *p = x + return p +} +func (x PrimaryKeyOption) String() string { + return proto.EnumName(PrimaryKeyOption_name, int32(x)) +} +func (x *PrimaryKeyOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PrimaryKeyOption_value, data, "PrimaryKeyOption") + if err != nil { + return err + } + *x = PrimaryKeyOption(value) + return nil +} +func (PrimaryKeyOption) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +type BloomFilterType int32 + +const ( + BloomFilterType_NONE BloomFilterType = 1 + BloomFilterType_CELL BloomFilterType = 2 + BloomFilterType_ROW BloomFilterType = 3 +) + +var BloomFilterType_name = map[int32]string{ + 1: "NONE", + 2: "CELL", + 3: "ROW", +} +var BloomFilterType_value = map[string]int32{ + "NONE": 1, + "CELL": 2, + "ROW": 3, +} + +func (x BloomFilterType) Enum() *BloomFilterType { + p := new(BloomFilterType) + *p = x + return p +} +func (x BloomFilterType) String() string { + return proto.EnumName(BloomFilterType_name, int32(x)) +} +func (x *BloomFilterType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BloomFilterType_value, data, "BloomFilterType") + if err != nil { + return err + } + *x = BloomFilterType(value) + return nil +} +func (BloomFilterType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +// * +// 表的状态变更只与用户的操作对应,内部的机器failover等状况不对应表的状态变更。 +// 有三个考虑: +// 一是一般场景下用户只会在做了对表的修改操作后才会去检查表的状态; +// 二是内部机器failover导致访问异常到用户能够查看到表的状态变更这两个时刻之间会有一段延迟,无法将表的不可服务状态与用户查看到的表的状态完全匹配上。 +// 三是内部机器failover后不能说是表的整个状态变更,而应该是partition的状态变更,对应表的状态就是PARTIAL_FAILOVER,这个partial的粒度无法体现,会让用户更加困惑。 +type TableStatus int32 + +const ( + TableStatus_ACTIVE TableStatus = 1 + TableStatus_INACTIVE TableStatus = 2 + TableStatus_LOADING TableStatus = 3 + TableStatus_UNLOADING TableStatus = 4 + TableStatus_UPDATING TableStatus = 5 +) + +var TableStatus_name = map[int32]string{ + 1: "ACTIVE", + 2: "INACTIVE", + 3: "LOADING", + 4: "UNLOADING", + 5: "UPDATING", +} +var TableStatus_value = map[string]int32{ + "ACTIVE": 1, + "INACTIVE": 2, + "LOADING": 3, + "UNLOADING": 4, + "UPDATING": 5, +} + +func (x TableStatus) Enum() *TableStatus { + p := new(TableStatus) + *p = x + return p +} +func (x TableStatus) String() string { + return proto.EnumName(TableStatus_name, int32(x)) +} +func (x *TableStatus) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(TableStatus_value, data, "TableStatus") + if err != nil { + return err + } + *x = TableStatus(value) + return nil +} +func (TableStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +type RowExistenceExpectation int32 + +const ( + RowExistenceExpectation_IGNORE RowExistenceExpectation = 0 + RowExistenceExpectation_EXPECT_EXIST RowExistenceExpectation = 1 + RowExistenceExpectation_EXPECT_NOT_EXIST RowExistenceExpectation = 2 +) + +var RowExistenceExpectation_name = map[int32]string{ + 0: "IGNORE", + 1: "EXPECT_EXIST", + 2: "EXPECT_NOT_EXIST", +} +var RowExistenceExpectation_value = map[string]int32{ + "IGNORE": 0, + "EXPECT_EXIST": 1, + "EXPECT_NOT_EXIST": 2, +} + +func (x RowExistenceExpectation) Enum() *RowExistenceExpectation { + p := new(RowExistenceExpectation) + *p = x + return p +} +func (x RowExistenceExpectation) String() string { + return proto.EnumName(RowExistenceExpectation_name, int32(x)) +} +func (x *RowExistenceExpectation) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RowExistenceExpectation_value, data, "RowExistenceExpectation") + if err != nil { + return err + } + *x = RowExistenceExpectation(value) + return nil +} +func (RowExistenceExpectation) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +type ReturnType int32 + +const ( + ReturnType_RT_NONE ReturnType = 0 + ReturnType_RT_PK ReturnType = 1 + ReturnType_RT_AFTER_MODIFY ReturnType = 2 +) + +var ReturnType_name = map[int32]string{ + 0: "RT_NONE", + 1: "RT_PK", + 2: "RT_AFTER_MODIFY", +} +var ReturnType_value = map[string]int32{ + "RT_NONE": 0, + "RT_PK": 1, + "RT_AFTER_MODIFY": 2, +} + +func (x ReturnType) Enum() *ReturnType { + p := new(ReturnType) + *p = x + return p +} +func (x ReturnType) String() string { + return proto.EnumName(ReturnType_name, int32(x)) +} +func (x *ReturnType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ReturnType_value, data, "ReturnType") + if err != nil { + return err + } + *x = ReturnType(value) + return nil +} +func (ReturnType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +type OperationType int32 + +const ( + OperationType_PUT OperationType = 1 + OperationType_UPDATE OperationType = 2 + OperationType_DELETE OperationType = 3 +) + +var OperationType_name = map[int32]string{ + 1: "PUT", + 2: "UPDATE", + 3: "DELETE", +} +var OperationType_value = map[string]int32{ + "PUT": 1, + "UPDATE": 2, + "DELETE": 3, +} + +func (x OperationType) Enum() *OperationType { + p := new(OperationType) + *p = x + return p +} +func (x OperationType) String() string { + return proto.EnumName(OperationType_name, int32(x)) +} +func (x *OperationType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(OperationType_value, data, "OperationType") + if err != nil { + return err + } + *x = OperationType(value) + return nil +} +func (OperationType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +// ############################################# GetRange ############################################# +type Direction int32 + +const ( + Direction_FORWARD Direction = 0 + Direction_BACKWARD Direction = 1 +) + +var Direction_name = map[int32]string{ + 0: "FORWARD", + 1: "BACKWARD", +} +var Direction_value = map[string]int32{ + "FORWARD": 0, + "BACKWARD": 1, +} + +func (x Direction) Enum() *Direction { + p := new(Direction) + *p = x + return p +} +func (x Direction) String() string { + return proto.EnumName(Direction_name, int32(x)) +} +func (x *Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Direction_value, data, "Direction") + if err != nil { + return err + } + *x = Direction(value) + return nil +} +func (Direction) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +type StreamStatus int32 + +const ( + StreamStatus_STREAM_ENABLING StreamStatus = 1 + StreamStatus_STREAM_ACTIVE StreamStatus = 2 +) + +var StreamStatus_name = map[int32]string{ + 1: "STREAM_ENABLING", + 2: "STREAM_ACTIVE", +} +var StreamStatus_value = map[string]int32{ + "STREAM_ENABLING": 1, + "STREAM_ACTIVE": 2, +} + +func (x StreamStatus) Enum() *StreamStatus { + p := new(StreamStatus) + *p = x + return p +} +func (x StreamStatus) String() string { + return proto.EnumName(StreamStatus_name, int32(x)) +} +func (x *StreamStatus) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(StreamStatus_value, data, "StreamStatus") + if err != nil { + return err + } + *x = StreamStatus(value) + return nil +} +func (StreamStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +type ActionType int32 + +const ( + ActionType_PUT_ROW ActionType = 1 + ActionType_UPDATE_ROW ActionType = 2 + ActionType_DELETE_ROW ActionType = 3 +) + +var ActionType_name = map[int32]string{ + 1: "PUT_ROW", + 2: "UPDATE_ROW", + 3: "DELETE_ROW", +} +var ActionType_value = map[string]int32{ + "PUT_ROW": 1, + "UPDATE_ROW": 2, + "DELETE_ROW": 3, +} + +func (x ActionType) Enum() *ActionType { + p := new(ActionType) + *p = x + return p +} +func (x ActionType) String() string { + return proto.EnumName(ActionType_name, int32(x)) +} +func (x *ActionType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ActionType_value, data, "ActionType") + if err != nil { + return err + } + *x = ActionType(value) + return nil +} +func (ActionType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +type DefinedColumnType int32 + +const ( + DefinedColumnType_DCT_INTEGER DefinedColumnType = 1 + DefinedColumnType_DCT_DOUBLE DefinedColumnType = 2 + DefinedColumnType_DCT_BOOLEAN DefinedColumnType = 3 + DefinedColumnType_DCT_STRING DefinedColumnType = 4 + // field 5 is reserved for date type, not supported yet + // field 6 is reserved for decimal type, not supported yet + DefinedColumnType_DCT_BLOB DefinedColumnType = 7 +) + +var DefinedColumnType_name = map[int32]string{ + 1: "DCT_INTEGER", + 2: "DCT_DOUBLE", + 3: "DCT_BOOLEAN", + 4: "DCT_STRING", + 7: "DCT_BLOB", +} +var DefinedColumnType_value = map[string]int32{ + "DCT_INTEGER": 1, + "DCT_DOUBLE": 2, + "DCT_BOOLEAN": 3, + "DCT_STRING": 4, + "DCT_BLOB": 7, +} + +func (x DefinedColumnType) Enum() *DefinedColumnType { + p := new(DefinedColumnType) + *p = x + return p +} +func (x DefinedColumnType) String() string { + return proto.EnumName(DefinedColumnType_name, int32(x)) +} +func (x *DefinedColumnType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(DefinedColumnType_value, data, "DefinedColumnType") + if err != nil { + return err + } + *x = DefinedColumnType(value) + return nil +} +func (DefinedColumnType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } + +type IndexUpdateMode int32 + +const ( + IndexUpdateMode_IUM_ASYNC_INDEX IndexUpdateMode = 0 + IndexUpdateMode_IUM_SYNC_INDEX IndexUpdateMode = 1 +) + +var IndexUpdateMode_name = map[int32]string{ + 0: "IUM_ASYNC_INDEX", + 1: "IUM_SYNC_INDEX", +} +var IndexUpdateMode_value = map[string]int32{ + "IUM_ASYNC_INDEX": 0, + "IUM_SYNC_INDEX": 1, +} + +func (x IndexUpdateMode) Enum() *IndexUpdateMode { + p := new(IndexUpdateMode) + *p = x + return p +} +func (x IndexUpdateMode) String() string { + return proto.EnumName(IndexUpdateMode_name, int32(x)) +} +func (x *IndexUpdateMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexUpdateMode_value, data, "IndexUpdateMode") + if err != nil { + return err + } + *x = IndexUpdateMode(value) + return nil +} +func (IndexUpdateMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } + +type IndexType int32 + +const ( + IndexType_IT_GLOBAL_INDEX IndexType = 0 + IndexType_IT_LOCAL_INDEX IndexType = 1 +) + +var IndexType_name = map[int32]string{ + 0: "IT_GLOBAL_INDEX", + 1: "IT_LOCAL_INDEX", +} +var IndexType_value = map[string]int32{ + "IT_GLOBAL_INDEX": 0, + "IT_LOCAL_INDEX": 1, +} + +func (x IndexType) Enum() *IndexType { + p := new(IndexType) + *p = x + return p +} +func (x IndexType) String() string { + return proto.EnumName(IndexType_name, int32(x)) +} +func (x *IndexType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(IndexType_value, data, "IndexType") + if err != nil { + return err + } + *x = IndexType(value) + return nil +} +func (IndexType) EnumDescriptor() ([]byte, []int) { return fileDescriptor2, []int{12} } + +type Error struct { + Code *string `protobuf:"bytes,1,req,name=code" json:"code,omitempty"` + Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *Error) GetCode() string { + if m != nil && m.Code != nil { + return *m.Code + } + return "" +} + +func (m *Error) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type PrimaryKeySchema struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Type *PrimaryKeyType `protobuf:"varint,2,req,name=type,enum=otsprotocol.PrimaryKeyType" json:"type,omitempty"` + Option *PrimaryKeyOption `protobuf:"varint,3,opt,name=option,enum=otsprotocol.PrimaryKeyOption" json:"option,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PrimaryKeySchema) Reset() { *m = PrimaryKeySchema{} } +func (m *PrimaryKeySchema) String() string { return proto.CompactTextString(m) } +func (*PrimaryKeySchema) ProtoMessage() {} +func (*PrimaryKeySchema) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *PrimaryKeySchema) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *PrimaryKeySchema) GetType() PrimaryKeyType { + if m != nil && m.Type != nil { + return *m.Type + } + return PrimaryKeyType_INTEGER +} + +func (m *PrimaryKeySchema) GetOption() PrimaryKeyOption { + if m != nil && m.Option != nil { + return *m.Option + } + return PrimaryKeyOption_AUTO_INCREMENT +} + +type PartitionRange struct { + Begin []byte `protobuf:"bytes,1,req,name=begin" json:"begin,omitempty"` + End []byte `protobuf:"bytes,2,req,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PartitionRange) Reset() { *m = PartitionRange{} } +func (m *PartitionRange) String() string { return proto.CompactTextString(m) } +func (*PartitionRange) ProtoMessage() {} +func (*PartitionRange) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{2} } + +func (m *PartitionRange) GetBegin() []byte { + if m != nil { + return m.Begin + } + return nil +} + +func (m *PartitionRange) GetEnd() []byte { + if m != nil { + return m.End + } + return nil +} + +type TableOptions struct { + TimeToLive *int32 `protobuf:"varint,1,opt,name=time_to_live" json:"time_to_live,omitempty"` + MaxVersions *int32 `protobuf:"varint,2,opt,name=max_versions" json:"max_versions,omitempty"` + BloomFilterType *BloomFilterType `protobuf:"varint,3,opt,name=bloom_filter_type,enum=otsprotocol.BloomFilterType" json:"bloom_filter_type,omitempty"` + BlockSize *int32 `protobuf:"varint,4,opt,name=block_size" json:"block_size,omitempty"` + DeviationCellVersionInSec *int64 `protobuf:"varint,5,opt,name=deviation_cell_version_in_sec" json:"deviation_cell_version_in_sec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (m *TableOptions) String() string { return proto.CompactTextString(m) } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{3} } + +func (m *TableOptions) GetTimeToLive() int32 { + if m != nil && m.TimeToLive != nil { + return *m.TimeToLive + } + return 0 +} + +func (m *TableOptions) GetMaxVersions() int32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return 0 +} + +func (m *TableOptions) GetBloomFilterType() BloomFilterType { + if m != nil && m.BloomFilterType != nil { + return *m.BloomFilterType + } + return BloomFilterType_NONE +} + +func (m *TableOptions) GetBlockSize() int32 { + if m != nil && m.BlockSize != nil { + return *m.BlockSize + } + return 0 +} + +func (m *TableOptions) GetDeviationCellVersionInSec() int64 { + if m != nil && m.DeviationCellVersionInSec != nil { + return *m.DeviationCellVersionInSec + } + return 0 +} + +type TableMeta struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + PrimaryKey []*PrimaryKeySchema `protobuf:"bytes,2,rep,name=primary_key" json:"primary_key,omitempty"` + DefinedColumn []*DefinedColumnSchema `protobuf:"bytes,3,rep,name=defined_column" json:"defined_column,omitempty"` + IndexMeta []*IndexMeta `protobuf:"bytes,4,rep,name=index_meta" json:"index_meta,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableMeta) Reset() { *m = TableMeta{} } +func (m *TableMeta) String() string { return proto.CompactTextString(m) } +func (*TableMeta) ProtoMessage() {} +func (*TableMeta) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{4} } + +func (m *TableMeta) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableMeta) GetPrimaryKey() []*PrimaryKeySchema { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *TableMeta) GetDefinedColumn() []*DefinedColumnSchema { + if m != nil { + return m.DefinedColumn + } + return nil +} + +func (m *TableMeta) GetIndexMeta() []*IndexMeta { + if m != nil { + return m.IndexMeta + } + return nil +} + +type Condition struct { + RowExistence *RowExistenceExpectation `protobuf:"varint,1,req,name=row_existence,enum=otsprotocol.RowExistenceExpectation" json:"row_existence,omitempty"` + ColumnCondition []byte `protobuf:"bytes,2,opt,name=column_condition" json:"column_condition,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Condition) Reset() { *m = Condition{} } +func (m *Condition) String() string { return proto.CompactTextString(m) } +func (*Condition) ProtoMessage() {} +func (*Condition) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{5} } + +func (m *Condition) GetRowExistence() RowExistenceExpectation { + if m != nil && m.RowExistence != nil { + return *m.RowExistence + } + return RowExistenceExpectation_IGNORE +} + +func (m *Condition) GetColumnCondition() []byte { + if m != nil { + return m.ColumnCondition + } + return nil +} + +type CapacityUnit struct { + Read *int32 `protobuf:"varint,1,opt,name=read" json:"read,omitempty"` + Write *int32 `protobuf:"varint,2,opt,name=write" json:"write,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CapacityUnit) Reset() { *m = CapacityUnit{} } +func (m *CapacityUnit) String() string { return proto.CompactTextString(m) } +func (*CapacityUnit) ProtoMessage() {} +func (*CapacityUnit) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{6} } + +func (m *CapacityUnit) GetRead() int32 { + if m != nil && m.Read != nil { + return *m.Read + } + return 0 +} + +func (m *CapacityUnit) GetWrite() int32 { + if m != nil && m.Write != nil { + return *m.Write + } + return 0 +} + +type ReservedThroughputDetails struct { + CapacityUnit *CapacityUnit `protobuf:"bytes,1,req,name=capacity_unit" json:"capacity_unit,omitempty"` + LastIncreaseTime *int64 `protobuf:"varint,2,req,name=last_increase_time" json:"last_increase_time,omitempty"` + LastDecreaseTime *int64 `protobuf:"varint,3,opt,name=last_decrease_time" json:"last_decrease_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReservedThroughputDetails) Reset() { *m = ReservedThroughputDetails{} } +func (m *ReservedThroughputDetails) String() string { return proto.CompactTextString(m) } +func (*ReservedThroughputDetails) ProtoMessage() {} +func (*ReservedThroughputDetails) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{7} } + +func (m *ReservedThroughputDetails) GetCapacityUnit() *CapacityUnit { + if m != nil { + return m.CapacityUnit + } + return nil +} + +func (m *ReservedThroughputDetails) GetLastIncreaseTime() int64 { + if m != nil && m.LastIncreaseTime != nil { + return *m.LastIncreaseTime + } + return 0 +} + +func (m *ReservedThroughputDetails) GetLastDecreaseTime() int64 { + if m != nil && m.LastDecreaseTime != nil { + return *m.LastDecreaseTime + } + return 0 +} + +type ReservedThroughput struct { + CapacityUnit *CapacityUnit `protobuf:"bytes,1,req,name=capacity_unit" json:"capacity_unit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReservedThroughput) Reset() { *m = ReservedThroughput{} } +func (m *ReservedThroughput) String() string { return proto.CompactTextString(m) } +func (*ReservedThroughput) ProtoMessage() {} +func (*ReservedThroughput) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{8} } + +func (m *ReservedThroughput) GetCapacityUnit() *CapacityUnit { + if m != nil { + return m.CapacityUnit + } + return nil +} + +type ConsumedCapacity struct { + CapacityUnit *CapacityUnit `protobuf:"bytes,1,req,name=capacity_unit" json:"capacity_unit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConsumedCapacity) Reset() { *m = ConsumedCapacity{} } +func (m *ConsumedCapacity) String() string { return proto.CompactTextString(m) } +func (*ConsumedCapacity) ProtoMessage() {} +func (*ConsumedCapacity) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{9} } + +func (m *ConsumedCapacity) GetCapacityUnit() *CapacityUnit { + if m != nil { + return m.CapacityUnit + } + return nil +} + +type StreamSpecification struct { + EnableStream *bool `protobuf:"varint,1,req,name=enable_stream" json:"enable_stream,omitempty"` + ExpirationTime *int32 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamSpecification) Reset() { *m = StreamSpecification{} } +func (m *StreamSpecification) String() string { return proto.CompactTextString(m) } +func (*StreamSpecification) ProtoMessage() {} +func (*StreamSpecification) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{10} } + +func (m *StreamSpecification) GetEnableStream() bool { + if m != nil && m.EnableStream != nil { + return *m.EnableStream + } + return false +} + +func (m *StreamSpecification) GetExpirationTime() int32 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +type StreamDetails struct { + EnableStream *bool `protobuf:"varint,1,req,name=enable_stream" json:"enable_stream,omitempty"` + StreamId *string `protobuf:"bytes,2,opt,name=stream_id" json:"stream_id,omitempty"` + ExpirationTime *int32 `protobuf:"varint,3,opt,name=expiration_time" json:"expiration_time,omitempty"` + LastEnableTime *int64 `protobuf:"varint,4,opt,name=last_enable_time" json:"last_enable_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamDetails) Reset() { *m = StreamDetails{} } +func (m *StreamDetails) String() string { return proto.CompactTextString(m) } +func (*StreamDetails) ProtoMessage() {} +func (*StreamDetails) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{11} } + +func (m *StreamDetails) GetEnableStream() bool { + if m != nil && m.EnableStream != nil { + return *m.EnableStream + } + return false +} + +func (m *StreamDetails) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *StreamDetails) GetExpirationTime() int32 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +func (m *StreamDetails) GetLastEnableTime() int64 { + if m != nil && m.LastEnableTime != nil { + return *m.LastEnableTime + } + return 0 +} + +// * +// table_meta用于存储表中不可更改的schema属性,可以更改的ReservedThroughput和TableOptions独立出来,作为UpdateTable的参数。 +// 加入GlobalIndex和LocalIndex之后,结构会变为: +// message CreateTableRequest { +// required TableMeta table_meta = 1; +// required ReservedThroughput reserved_throughput = 2; +// required TableOptions table_options = 3; +// repeated LocalIndex local_indexes = 4; // LocalIndex不再单独包含ReservedThroughput和TableOptions,其与主表共享配置。 +// repeated GlobalIndex global_indexes = 5; // GlobalIndex内单独包含ReservedThroughput和TableOptions +// } +type CreateTableRequest struct { + TableMeta *TableMeta `protobuf:"bytes,1,req,name=table_meta" json:"table_meta,omitempty"` + ReservedThroughput *ReservedThroughput `protobuf:"bytes,2,req,name=reserved_throughput" json:"reserved_throughput,omitempty"` + TableOptions *TableOptions `protobuf:"bytes,3,opt,name=table_options" json:"table_options,omitempty"` + Partitions []*PartitionRange `protobuf:"bytes,4,rep,name=partitions" json:"partitions,omitempty"` + StreamSpec *StreamSpecification `protobuf:"bytes,5,opt,name=stream_spec" json:"stream_spec,omitempty"` + IndexMetas []*IndexMeta `protobuf:"bytes,7,rep,name=index_metas" json:"index_metas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateTableRequest) Reset() { *m = CreateTableRequest{} } +func (m *CreateTableRequest) String() string { return proto.CompactTextString(m) } +func (*CreateTableRequest) ProtoMessage() {} +func (*CreateTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{12} } + +func (m *CreateTableRequest) GetTableMeta() *TableMeta { + if m != nil { + return m.TableMeta + } + return nil +} + +func (m *CreateTableRequest) GetReservedThroughput() *ReservedThroughput { + if m != nil { + return m.ReservedThroughput + } + return nil +} + +func (m *CreateTableRequest) GetTableOptions() *TableOptions { + if m != nil { + return m.TableOptions + } + return nil +} + +func (m *CreateTableRequest) GetPartitions() []*PartitionRange { + if m != nil { + return m.Partitions + } + return nil +} + +func (m *CreateTableRequest) GetStreamSpec() *StreamSpecification { + if m != nil { + return m.StreamSpec + } + return nil +} + +func (m *CreateTableRequest) GetIndexMetas() []*IndexMeta { + if m != nil { + return m.IndexMetas + } + return nil +} + +type CreateTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateTableResponse) Reset() { *m = CreateTableResponse{} } +func (m *CreateTableResponse) String() string { return proto.CompactTextString(m) } +func (*CreateTableResponse) ProtoMessage() {} +func (*CreateTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{13} } + +// ############################################# UpdateTable ############################################# +type UpdateTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + ReservedThroughput *ReservedThroughput `protobuf:"bytes,2,opt,name=reserved_throughput" json:"reserved_throughput,omitempty"` + TableOptions *TableOptions `protobuf:"bytes,3,opt,name=table_options" json:"table_options,omitempty"` + StreamSpec *StreamSpecification `protobuf:"bytes,4,opt,name=stream_spec" json:"stream_spec,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UpdateTableRequest) Reset() { *m = UpdateTableRequest{} } +func (m *UpdateTableRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateTableRequest) ProtoMessage() {} +func (*UpdateTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{14} } + +func (m *UpdateTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *UpdateTableRequest) GetReservedThroughput() *ReservedThroughput { + if m != nil { + return m.ReservedThroughput + } + return nil +} + +func (m *UpdateTableRequest) GetTableOptions() *TableOptions { + if m != nil { + return m.TableOptions + } + return nil +} + +func (m *UpdateTableRequest) GetStreamSpec() *StreamSpecification { + if m != nil { + return m.StreamSpec + } + return nil +} + +type UpdateTableResponse struct { + ReservedThroughputDetails *ReservedThroughputDetails `protobuf:"bytes,1,req,name=reserved_throughput_details" json:"reserved_throughput_details,omitempty"` + TableOptions *TableOptions `protobuf:"bytes,2,req,name=table_options" json:"table_options,omitempty"` + StreamDetails *StreamDetails `protobuf:"bytes,3,opt,name=stream_details" json:"stream_details,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UpdateTableResponse) Reset() { *m = UpdateTableResponse{} } +func (m *UpdateTableResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateTableResponse) ProtoMessage() {} +func (*UpdateTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{15} } + +func (m *UpdateTableResponse) GetReservedThroughputDetails() *ReservedThroughputDetails { + if m != nil { + return m.ReservedThroughputDetails + } + return nil +} + +func (m *UpdateTableResponse) GetTableOptions() *TableOptions { + if m != nil { + return m.TableOptions + } + return nil +} + +func (m *UpdateTableResponse) GetStreamDetails() *StreamDetails { + if m != nil { + return m.StreamDetails + } + return nil +} + +// ############################################# DescribeTable ############################################# +type DescribeTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeTableRequest) Reset() { *m = DescribeTableRequest{} } +func (m *DescribeTableRequest) String() string { return proto.CompactTextString(m) } +func (*DescribeTableRequest) ProtoMessage() {} +func (*DescribeTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{16} } + +func (m *DescribeTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type DescribeTableResponse struct { + TableMeta *TableMeta `protobuf:"bytes,1,req,name=table_meta" json:"table_meta,omitempty"` + ReservedThroughputDetails *ReservedThroughputDetails `protobuf:"bytes,2,req,name=reserved_throughput_details" json:"reserved_throughput_details,omitempty"` + TableOptions *TableOptions `protobuf:"bytes,3,req,name=table_options" json:"table_options,omitempty"` + TableStatus *TableStatus `protobuf:"varint,4,req,name=table_status,enum=otsprotocol.TableStatus" json:"table_status,omitempty"` + StreamDetails *StreamDetails `protobuf:"bytes,5,opt,name=stream_details" json:"stream_details,omitempty"` + ShardSplits [][]byte `protobuf:"bytes,6,rep,name=shard_splits" json:"shard_splits,omitempty"` + IndexMetas []*IndexMeta `protobuf:"bytes,8,rep,name=index_metas" json:"index_metas,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeTableResponse) Reset() { *m = DescribeTableResponse{} } +func (m *DescribeTableResponse) String() string { return proto.CompactTextString(m) } +func (*DescribeTableResponse) ProtoMessage() {} +func (*DescribeTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{17} } + +func (m *DescribeTableResponse) GetTableMeta() *TableMeta { + if m != nil { + return m.TableMeta + } + return nil +} + +func (m *DescribeTableResponse) GetReservedThroughputDetails() *ReservedThroughputDetails { + if m != nil { + return m.ReservedThroughputDetails + } + return nil +} + +func (m *DescribeTableResponse) GetTableOptions() *TableOptions { + if m != nil { + return m.TableOptions + } + return nil +} + +func (m *DescribeTableResponse) GetTableStatus() TableStatus { + if m != nil && m.TableStatus != nil { + return *m.TableStatus + } + return TableStatus_ACTIVE +} + +func (m *DescribeTableResponse) GetStreamDetails() *StreamDetails { + if m != nil { + return m.StreamDetails + } + return nil +} + +func (m *DescribeTableResponse) GetShardSplits() [][]byte { + if m != nil { + return m.ShardSplits + } + return nil +} + +func (m *DescribeTableResponse) GetIndexMetas() []*IndexMeta { + if m != nil { + return m.IndexMetas + } + return nil +} + +// ############################################# ListTable ############################################# +type ListTableRequest struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListTableRequest) Reset() { *m = ListTableRequest{} } +func (m *ListTableRequest) String() string { return proto.CompactTextString(m) } +func (*ListTableRequest) ProtoMessage() {} +func (*ListTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{18} } + +// * +// 当前只返回一个简单的名称列表,需要讨论是否有业务场景需要获取除了表名之外的其他信息。 +// 其他信息可以包含预留吞吐量以及表的状态,这个信息只能是一个粗略的信息,表的详细信息还是需要通过DescribeTable来获取。 +type ListTableResponse struct { + TableNames []string `protobuf:"bytes,1,rep,name=table_names" json:"table_names,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListTableResponse) Reset() { *m = ListTableResponse{} } +func (m *ListTableResponse) String() string { return proto.CompactTextString(m) } +func (*ListTableResponse) ProtoMessage() {} +func (*ListTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{19} } + +func (m *ListTableResponse) GetTableNames() []string { + if m != nil { + return m.TableNames + } + return nil +} + +// ############################################# DeleteTable ############################################# +type DeleteTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteTableRequest) Reset() { *m = DeleteTableRequest{} } +func (m *DeleteTableRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteTableRequest) ProtoMessage() {} +func (*DeleteTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{20} } + +func (m *DeleteTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type DeleteTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteTableResponse) Reset() { *m = DeleteTableResponse{} } +func (m *DeleteTableResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteTableResponse) ProtoMessage() {} +func (*DeleteTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{21} } + +// ############################################# LoadTable ############################################# +type LoadTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LoadTableRequest) Reset() { *m = LoadTableRequest{} } +func (m *LoadTableRequest) String() string { return proto.CompactTextString(m) } +func (*LoadTableRequest) ProtoMessage() {} +func (*LoadTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{22} } + +func (m *LoadTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type LoadTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *LoadTableResponse) Reset() { *m = LoadTableResponse{} } +func (m *LoadTableResponse) String() string { return proto.CompactTextString(m) } +func (*LoadTableResponse) ProtoMessage() {} +func (*LoadTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{23} } + +// ############################################# UnloadTable ############################################# +type UnloadTableRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UnloadTableRequest) Reset() { *m = UnloadTableRequest{} } +func (m *UnloadTableRequest) String() string { return proto.CompactTextString(m) } +func (*UnloadTableRequest) ProtoMessage() {} +func (*UnloadTableRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{24} } + +func (m *UnloadTableRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type UnloadTableResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *UnloadTableResponse) Reset() { *m = UnloadTableResponse{} } +func (m *UnloadTableResponse) String() string { return proto.CompactTextString(m) } +func (*UnloadTableResponse) ProtoMessage() {} +func (*UnloadTableResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{25} } + +// * +// 时间戳的取值最小值为0,最大值为INT64.MAX +// 1. 若要查询一个范围,则指定start_time和end_time +// 2. 若要查询一个特定时间戳,则指定specific_time +type TimeRange struct { + StartTime *int64 `protobuf:"varint,1,opt,name=start_time" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,2,opt,name=end_time" json:"end_time,omitempty"` + SpecificTime *int64 `protobuf:"varint,3,opt,name=specific_time" json:"specific_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TimeRange) Reset() { *m = TimeRange{} } +func (m *TimeRange) String() string { return proto.CompactTextString(m) } +func (*TimeRange) ProtoMessage() {} +func (*TimeRange) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{26} } + +func (m *TimeRange) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *TimeRange) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *TimeRange) GetSpecificTime() int64 { + if m != nil && m.SpecificTime != nil { + return *m.SpecificTime + } + return 0 +} + +type ReturnContent struct { + ReturnType *ReturnType `protobuf:"varint,1,opt,name=return_type,enum=otsprotocol.ReturnType" json:"return_type,omitempty"` + ReturnColumnNames []string `protobuf:"bytes,2,rep,name=return_column_names" json:"return_column_names,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ReturnContent) Reset() { *m = ReturnContent{} } +func (m *ReturnContent) String() string { return proto.CompactTextString(m) } +func (*ReturnContent) ProtoMessage() {} +func (*ReturnContent) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{27} } + +func (m *ReturnContent) GetReturnType() ReturnType { + if m != nil && m.ReturnType != nil { + return *m.ReturnType + } + return ReturnType_RT_NONE +} + +func (m *ReturnContent) GetReturnColumnNames() []string { + if m != nil { + return m.ReturnColumnNames + } + return nil +} + +// * +// 1. 支持用户指定版本时间戳范围或者特定的版本时间来读取指定版本的列 +// 2. 目前暂不支持行内的断点 +type GetRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + PrimaryKey []byte `protobuf:"bytes,2,req,name=primary_key" json:"primary_key,omitempty"` + ColumnsToGet []string `protobuf:"bytes,3,rep,name=columns_to_get" json:"columns_to_get,omitempty"` + TimeRange *TimeRange `protobuf:"bytes,4,opt,name=time_range" json:"time_range,omitempty"` + MaxVersions *int32 `protobuf:"varint,5,opt,name=max_versions" json:"max_versions,omitempty"` + CacheBlocks *bool `protobuf:"varint,6,opt,name=cache_blocks,def=1" json:"cache_blocks,omitempty"` + Filter []byte `protobuf:"bytes,7,opt,name=filter" json:"filter,omitempty"` + StartColumn *string `protobuf:"bytes,8,opt,name=start_column" json:"start_column,omitempty"` + EndColumn *string `protobuf:"bytes,9,opt,name=end_column" json:"end_column,omitempty"` + Token []byte `protobuf:"bytes,10,opt,name=token" json:"token,omitempty"` + TransactionId *string `protobuf:"bytes,11,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRowRequest) Reset() { *m = GetRowRequest{} } +func (m *GetRowRequest) String() string { return proto.CompactTextString(m) } +func (*GetRowRequest) ProtoMessage() {} +func (*GetRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{28} } + +const Default_GetRowRequest_CacheBlocks bool = true + +func (m *GetRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *GetRowRequest) GetPrimaryKey() []byte { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *GetRowRequest) GetColumnsToGet() []string { + if m != nil { + return m.ColumnsToGet + } + return nil +} + +func (m *GetRowRequest) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *GetRowRequest) GetMaxVersions() int32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return 0 +} + +func (m *GetRowRequest) GetCacheBlocks() bool { + if m != nil && m.CacheBlocks != nil { + return *m.CacheBlocks + } + return Default_GetRowRequest_CacheBlocks +} + +func (m *GetRowRequest) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +func (m *GetRowRequest) GetStartColumn() string { + if m != nil && m.StartColumn != nil { + return *m.StartColumn + } + return "" +} + +func (m *GetRowRequest) GetEndColumn() string { + if m != nil && m.EndColumn != nil { + return *m.EndColumn + } + return "" +} + +func (m *GetRowRequest) GetToken() []byte { + if m != nil { + return m.Token + } + return nil +} + +func (m *GetRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type GetRowResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,2,req,name=row" json:"row,omitempty"` + NextToken []byte `protobuf:"bytes,3,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRowResponse) Reset() { *m = GetRowResponse{} } +func (m *GetRowResponse) String() string { return proto.CompactTextString(m) } +func (*GetRowResponse) ProtoMessage() {} +func (*GetRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{29} } + +func (m *GetRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *GetRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *GetRowResponse) GetNextToken() []byte { + if m != nil { + return m.NextToken + } + return nil +} + +// ############################################# UpdateRow ############################################# +type UpdateRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + RowChange []byte `protobuf:"bytes,2,req,name=row_change" json:"row_change,omitempty"` + Condition *Condition `protobuf:"bytes,3,req,name=condition" json:"condition,omitempty"` + ReturnContent *ReturnContent `protobuf:"bytes,4,opt,name=return_content" json:"return_content,omitempty"` + TransactionId *string `protobuf:"bytes,5,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UpdateRowRequest) Reset() { *m = UpdateRowRequest{} } +func (m *UpdateRowRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateRowRequest) ProtoMessage() {} +func (*UpdateRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{30} } + +func (m *UpdateRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *UpdateRowRequest) GetRowChange() []byte { + if m != nil { + return m.RowChange + } + return nil +} + +func (m *UpdateRowRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *UpdateRowRequest) GetReturnContent() *ReturnContent { + if m != nil { + return m.ReturnContent + } + return nil +} + +func (m *UpdateRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type UpdateRowResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,2,opt,name=row" json:"row,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UpdateRowResponse) Reset() { *m = UpdateRowResponse{} } +func (m *UpdateRowResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateRowResponse) ProtoMessage() {} +func (*UpdateRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{31} } + +func (m *UpdateRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *UpdateRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +// * +// 这里允许用户为每列单独设置timestamp,而不是强制整行统一一个timestamp。 +// 原因是列都是用统一的结构,该结构本身是带timestamp的,其次强制统一timestamp增强了规范性但是丧失了灵活性,且该规范性没有明显的好处,反而带来了结构的复杂。 +type PutRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Row []byte `protobuf:"bytes,2,req,name=row" json:"row,omitempty"` + Condition *Condition `protobuf:"bytes,3,req,name=condition" json:"condition,omitempty"` + ReturnContent *ReturnContent `protobuf:"bytes,4,opt,name=return_content" json:"return_content,omitempty"` + TransactionId *string `protobuf:"bytes,5,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRowRequest) Reset() { *m = PutRowRequest{} } +func (m *PutRowRequest) String() string { return proto.CompactTextString(m) } +func (*PutRowRequest) ProtoMessage() {} +func (*PutRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{32} } + +func (m *PutRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *PutRowRequest) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *PutRowRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *PutRowRequest) GetReturnContent() *ReturnContent { + if m != nil { + return m.ReturnContent + } + return nil +} + +func (m *PutRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type PutRowResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,2,opt,name=row" json:"row,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *PutRowResponse) Reset() { *m = PutRowResponse{} } +func (m *PutRowResponse) String() string { return proto.CompactTextString(m) } +func (*PutRowResponse) ProtoMessage() {} +func (*PutRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{33} } + +func (m *PutRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *PutRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +// * +// OTS只支持删除该行的所有列所有版本,不支持: +// 1. 删除所有列的所有小于等于某个版本的所有版本 +type DeleteRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + PrimaryKey []byte `protobuf:"bytes,2,req,name=primary_key" json:"primary_key,omitempty"` + Condition *Condition `protobuf:"bytes,3,req,name=condition" json:"condition,omitempty"` + ReturnContent *ReturnContent `protobuf:"bytes,4,opt,name=return_content" json:"return_content,omitempty"` + TransactionId *string `protobuf:"bytes,5,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRowRequest) Reset() { *m = DeleteRowRequest{} } +func (m *DeleteRowRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRowRequest) ProtoMessage() {} +func (*DeleteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{34} } + +func (m *DeleteRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *DeleteRowRequest) GetPrimaryKey() []byte { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *DeleteRowRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *DeleteRowRequest) GetReturnContent() *ReturnContent { + if m != nil { + return m.ReturnContent + } + return nil +} + +func (m *DeleteRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type DeleteRowResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,2,opt,name=row" json:"row,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DeleteRowResponse) Reset() { *m = DeleteRowResponse{} } +func (m *DeleteRowResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteRowResponse) ProtoMessage() {} +func (*DeleteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{35} } + +func (m *DeleteRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *DeleteRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +// * +// HBase支持Batch操作的每行都拥有不同的查询参数,OTS不支持。 +type TableInBatchGetRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + PrimaryKey [][]byte `protobuf:"bytes,2,rep,name=primary_key" json:"primary_key,omitempty"` + Token [][]byte `protobuf:"bytes,3,rep,name=token" json:"token,omitempty"` + ColumnsToGet []string `protobuf:"bytes,4,rep,name=columns_to_get" json:"columns_to_get,omitempty"` + TimeRange *TimeRange `protobuf:"bytes,5,opt,name=time_range" json:"time_range,omitempty"` + MaxVersions *int32 `protobuf:"varint,6,opt,name=max_versions" json:"max_versions,omitempty"` + CacheBlocks *bool `protobuf:"varint,7,opt,name=cache_blocks,def=1" json:"cache_blocks,omitempty"` + Filter []byte `protobuf:"bytes,8,opt,name=filter" json:"filter,omitempty"` + StartColumn *string `protobuf:"bytes,9,opt,name=start_column" json:"start_column,omitempty"` + EndColumn *string `protobuf:"bytes,10,opt,name=end_column" json:"end_column,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableInBatchGetRowRequest) Reset() { *m = TableInBatchGetRowRequest{} } +func (m *TableInBatchGetRowRequest) String() string { return proto.CompactTextString(m) } +func (*TableInBatchGetRowRequest) ProtoMessage() {} +func (*TableInBatchGetRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{36} } + +const Default_TableInBatchGetRowRequest_CacheBlocks bool = true + +func (m *TableInBatchGetRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableInBatchGetRowRequest) GetPrimaryKey() [][]byte { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetToken() [][]byte { + if m != nil { + return m.Token + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetColumnsToGet() []string { + if m != nil { + return m.ColumnsToGet + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetMaxVersions() int32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return 0 +} + +func (m *TableInBatchGetRowRequest) GetCacheBlocks() bool { + if m != nil && m.CacheBlocks != nil { + return *m.CacheBlocks + } + return Default_TableInBatchGetRowRequest_CacheBlocks +} + +func (m *TableInBatchGetRowRequest) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +func (m *TableInBatchGetRowRequest) GetStartColumn() string { + if m != nil && m.StartColumn != nil { + return *m.StartColumn + } + return "" +} + +func (m *TableInBatchGetRowRequest) GetEndColumn() string { + if m != nil && m.EndColumn != nil { + return *m.EndColumn + } + return "" +} + +type BatchGetRowRequest struct { + Tables []*TableInBatchGetRowRequest `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BatchGetRowRequest) Reset() { *m = BatchGetRowRequest{} } +func (m *BatchGetRowRequest) String() string { return proto.CompactTextString(m) } +func (*BatchGetRowRequest) ProtoMessage() {} +func (*BatchGetRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{37} } + +func (m *BatchGetRowRequest) GetTables() []*TableInBatchGetRowRequest { + if m != nil { + return m.Tables + } + return nil +} + +type RowInBatchGetRowResponse struct { + IsOk *bool `protobuf:"varint,1,req,name=is_ok" json:"is_ok,omitempty"` + Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + Consumed *ConsumedCapacity `protobuf:"bytes,3,opt,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,4,opt,name=row" json:"row,omitempty"` + NextToken []byte `protobuf:"bytes,5,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RowInBatchGetRowResponse) Reset() { *m = RowInBatchGetRowResponse{} } +func (m *RowInBatchGetRowResponse) String() string { return proto.CompactTextString(m) } +func (*RowInBatchGetRowResponse) ProtoMessage() {} +func (*RowInBatchGetRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{38} } + +func (m *RowInBatchGetRowResponse) GetIsOk() bool { + if m != nil && m.IsOk != nil { + return *m.IsOk + } + return false +} + +func (m *RowInBatchGetRowResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *RowInBatchGetRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *RowInBatchGetRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +func (m *RowInBatchGetRowResponse) GetNextToken() []byte { + if m != nil { + return m.NextToken + } + return nil +} + +type TableInBatchGetRowResponse struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Rows []*RowInBatchGetRowResponse `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableInBatchGetRowResponse) Reset() { *m = TableInBatchGetRowResponse{} } +func (m *TableInBatchGetRowResponse) String() string { return proto.CompactTextString(m) } +func (*TableInBatchGetRowResponse) ProtoMessage() {} +func (*TableInBatchGetRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{39} } + +func (m *TableInBatchGetRowResponse) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableInBatchGetRowResponse) GetRows() []*RowInBatchGetRowResponse { + if m != nil { + return m.Rows + } + return nil +} + +type BatchGetRowResponse struct { + Tables []*TableInBatchGetRowResponse `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BatchGetRowResponse) Reset() { *m = BatchGetRowResponse{} } +func (m *BatchGetRowResponse) String() string { return proto.CompactTextString(m) } +func (*BatchGetRowResponse) ProtoMessage() {} +func (*BatchGetRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{40} } + +func (m *BatchGetRowResponse) GetTables() []*TableInBatchGetRowResponse { + if m != nil { + return m.Tables + } + return nil +} + +type RowInBatchWriteRowRequest struct { + Type *OperationType `protobuf:"varint,1,req,name=type,enum=otsprotocol.OperationType" json:"type,omitempty"` + RowChange []byte `protobuf:"bytes,2,req,name=row_change" json:"row_change,omitempty"` + Condition *Condition `protobuf:"bytes,3,req,name=condition" json:"condition,omitempty"` + ReturnContent *ReturnContent `protobuf:"bytes,4,opt,name=return_content" json:"return_content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RowInBatchWriteRowRequest) Reset() { *m = RowInBatchWriteRowRequest{} } +func (m *RowInBatchWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*RowInBatchWriteRowRequest) ProtoMessage() {} +func (*RowInBatchWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{41} } + +func (m *RowInBatchWriteRowRequest) GetType() OperationType { + if m != nil && m.Type != nil { + return *m.Type + } + return OperationType_PUT +} + +func (m *RowInBatchWriteRowRequest) GetRowChange() []byte { + if m != nil { + return m.RowChange + } + return nil +} + +func (m *RowInBatchWriteRowRequest) GetCondition() *Condition { + if m != nil { + return m.Condition + } + return nil +} + +func (m *RowInBatchWriteRowRequest) GetReturnContent() *ReturnContent { + if m != nil { + return m.ReturnContent + } + return nil +} + +type TableInBatchWriteRowRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Rows []*RowInBatchWriteRowRequest `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableInBatchWriteRowRequest) Reset() { *m = TableInBatchWriteRowRequest{} } +func (m *TableInBatchWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*TableInBatchWriteRowRequest) ProtoMessage() {} +func (*TableInBatchWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{42} } + +func (m *TableInBatchWriteRowRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableInBatchWriteRowRequest) GetRows() []*RowInBatchWriteRowRequest { + if m != nil { + return m.Rows + } + return nil +} + +type BatchWriteRowRequest struct { + Tables []*TableInBatchWriteRowRequest `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + TransactionId *string `protobuf:"bytes,2,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BatchWriteRowRequest) Reset() { *m = BatchWriteRowRequest{} } +func (m *BatchWriteRowRequest) String() string { return proto.CompactTextString(m) } +func (*BatchWriteRowRequest) ProtoMessage() {} +func (*BatchWriteRowRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{43} } + +func (m *BatchWriteRowRequest) GetTables() []*TableInBatchWriteRowRequest { + if m != nil { + return m.Tables + } + return nil +} + +func (m *BatchWriteRowRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type RowInBatchWriteRowResponse struct { + IsOk *bool `protobuf:"varint,1,req,name=is_ok" json:"is_ok,omitempty"` + Error *Error `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + Consumed *ConsumedCapacity `protobuf:"bytes,3,opt,name=consumed" json:"consumed,omitempty"` + Row []byte `protobuf:"bytes,4,opt,name=row" json:"row,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *RowInBatchWriteRowResponse) Reset() { *m = RowInBatchWriteRowResponse{} } +func (m *RowInBatchWriteRowResponse) String() string { return proto.CompactTextString(m) } +func (*RowInBatchWriteRowResponse) ProtoMessage() {} +func (*RowInBatchWriteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{44} } + +func (m *RowInBatchWriteRowResponse) GetIsOk() bool { + if m != nil && m.IsOk != nil { + return *m.IsOk + } + return false +} + +func (m *RowInBatchWriteRowResponse) GetError() *Error { + if m != nil { + return m.Error + } + return nil +} + +func (m *RowInBatchWriteRowResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *RowInBatchWriteRowResponse) GetRow() []byte { + if m != nil { + return m.Row + } + return nil +} + +type TableInBatchWriteRowResponse struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Rows []*RowInBatchWriteRowResponse `protobuf:"bytes,2,rep,name=rows" json:"rows,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TableInBatchWriteRowResponse) Reset() { *m = TableInBatchWriteRowResponse{} } +func (m *TableInBatchWriteRowResponse) String() string { return proto.CompactTextString(m) } +func (*TableInBatchWriteRowResponse) ProtoMessage() {} +func (*TableInBatchWriteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{45} } + +func (m *TableInBatchWriteRowResponse) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *TableInBatchWriteRowResponse) GetRows() []*RowInBatchWriteRowResponse { + if m != nil { + return m.Rows + } + return nil +} + +type BatchWriteRowResponse struct { + Tables []*TableInBatchWriteRowResponse `protobuf:"bytes,1,rep,name=tables" json:"tables,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *BatchWriteRowResponse) Reset() { *m = BatchWriteRowResponse{} } +func (m *BatchWriteRowResponse) String() string { return proto.CompactTextString(m) } +func (*BatchWriteRowResponse) ProtoMessage() {} +func (*BatchWriteRowResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{46} } + +func (m *BatchWriteRowResponse) GetTables() []*TableInBatchWriteRowResponse { + if m != nil { + return m.Tables + } + return nil +} + +// * +// HBase支持以下参数: +// 1. TimeRange或指定time +// 2. Filter(根据列值或列名来过滤) +// 我们只支持给同版本的选择条件。 +type GetRangeRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Direction *Direction `protobuf:"varint,2,req,name=direction,enum=otsprotocol.Direction" json:"direction,omitempty"` + ColumnsToGet []string `protobuf:"bytes,3,rep,name=columns_to_get" json:"columns_to_get,omitempty"` + TimeRange *TimeRange `protobuf:"bytes,4,opt,name=time_range" json:"time_range,omitempty"` + MaxVersions *int32 `protobuf:"varint,5,opt,name=max_versions" json:"max_versions,omitempty"` + Limit *int32 `protobuf:"varint,6,opt,name=limit" json:"limit,omitempty"` + InclusiveStartPrimaryKey []byte `protobuf:"bytes,7,req,name=inclusive_start_primary_key" json:"inclusive_start_primary_key,omitempty"` + ExclusiveEndPrimaryKey []byte `protobuf:"bytes,8,req,name=exclusive_end_primary_key" json:"exclusive_end_primary_key,omitempty"` + CacheBlocks *bool `protobuf:"varint,9,opt,name=cache_blocks,def=1" json:"cache_blocks,omitempty"` + Filter []byte `protobuf:"bytes,10,opt,name=filter" json:"filter,omitempty"` + StartColumn *string `protobuf:"bytes,11,opt,name=start_column" json:"start_column,omitempty"` + EndColumn *string `protobuf:"bytes,12,opt,name=end_column" json:"end_column,omitempty"` + Token []byte `protobuf:"bytes,13,opt,name=token" json:"token,omitempty"` + TransactionId *string `protobuf:"bytes,14,opt,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRangeRequest) Reset() { *m = GetRangeRequest{} } +func (m *GetRangeRequest) String() string { return proto.CompactTextString(m) } +func (*GetRangeRequest) ProtoMessage() {} +func (*GetRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{47} } + +const Default_GetRangeRequest_CacheBlocks bool = true + +func (m *GetRangeRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *GetRangeRequest) GetDirection() Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Direction_FORWARD +} + +func (m *GetRangeRequest) GetColumnsToGet() []string { + if m != nil { + return m.ColumnsToGet + } + return nil +} + +func (m *GetRangeRequest) GetTimeRange() *TimeRange { + if m != nil { + return m.TimeRange + } + return nil +} + +func (m *GetRangeRequest) GetMaxVersions() int32 { + if m != nil && m.MaxVersions != nil { + return *m.MaxVersions + } + return 0 +} + +func (m *GetRangeRequest) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *GetRangeRequest) GetInclusiveStartPrimaryKey() []byte { + if m != nil { + return m.InclusiveStartPrimaryKey + } + return nil +} + +func (m *GetRangeRequest) GetExclusiveEndPrimaryKey() []byte { + if m != nil { + return m.ExclusiveEndPrimaryKey + } + return nil +} + +func (m *GetRangeRequest) GetCacheBlocks() bool { + if m != nil && m.CacheBlocks != nil { + return *m.CacheBlocks + } + return Default_GetRangeRequest_CacheBlocks +} + +func (m *GetRangeRequest) GetFilter() []byte { + if m != nil { + return m.Filter + } + return nil +} + +func (m *GetRangeRequest) GetStartColumn() string { + if m != nil && m.StartColumn != nil { + return *m.StartColumn + } + return "" +} + +func (m *GetRangeRequest) GetEndColumn() string { + if m != nil && m.EndColumn != nil { + return *m.EndColumn + } + return "" +} + +func (m *GetRangeRequest) GetToken() []byte { + if m != nil { + return m.Token + } + return nil +} + +func (m *GetRangeRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type GetRangeResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Rows []byte `protobuf:"bytes,2,req,name=rows" json:"rows,omitempty"` + NextStartPrimaryKey []byte `protobuf:"bytes,3,opt,name=next_start_primary_key" json:"next_start_primary_key,omitempty"` + NextToken []byte `protobuf:"bytes,4,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetRangeResponse) Reset() { *m = GetRangeResponse{} } +func (m *GetRangeResponse) String() string { return proto.CompactTextString(m) } +func (*GetRangeResponse) ProtoMessage() {} +func (*GetRangeResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{48} } + +func (m *GetRangeResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *GetRangeResponse) GetRows() []byte { + if m != nil { + return m.Rows + } + return nil +} + +func (m *GetRangeResponse) GetNextStartPrimaryKey() []byte { + if m != nil { + return m.NextStartPrimaryKey + } + return nil +} + +func (m *GetRangeResponse) GetNextToken() []byte { + if m != nil { + return m.NextToken + } + return nil +} + +type ListStreamRequest struct { + TableName *string `protobuf:"bytes,1,opt,name=table_name" json:"table_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListStreamRequest) Reset() { *m = ListStreamRequest{} } +func (m *ListStreamRequest) String() string { return proto.CompactTextString(m) } +func (*ListStreamRequest) ProtoMessage() {} +func (*ListStreamRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{49} } + +func (m *ListStreamRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +type Stream struct { + StreamId *string `protobuf:"bytes,1,req,name=stream_id" json:"stream_id,omitempty"` + TableName *string `protobuf:"bytes,2,req,name=table_name" json:"table_name,omitempty"` + CreationTime *int64 `protobuf:"varint,3,req,name=creation_time" json:"creation_time,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Stream) Reset() { *m = Stream{} } +func (m *Stream) String() string { return proto.CompactTextString(m) } +func (*Stream) ProtoMessage() {} +func (*Stream) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{50} } + +func (m *Stream) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *Stream) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *Stream) GetCreationTime() int64 { + if m != nil && m.CreationTime != nil { + return *m.CreationTime + } + return 0 +} + +type ListStreamResponse struct { + Streams []*Stream `protobuf:"bytes,1,rep,name=streams" json:"streams,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ListStreamResponse) Reset() { *m = ListStreamResponse{} } +func (m *ListStreamResponse) String() string { return proto.CompactTextString(m) } +func (*ListStreamResponse) ProtoMessage() {} +func (*ListStreamResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{51} } + +func (m *ListStreamResponse) GetStreams() []*Stream { + if m != nil { + return m.Streams + } + return nil +} + +type StreamShard struct { + ShardId *string `protobuf:"bytes,1,req,name=shard_id" json:"shard_id,omitempty"` + ParentId *string `protobuf:"bytes,2,opt,name=parent_id" json:"parent_id,omitempty"` + ParentSiblingId *string `protobuf:"bytes,3,opt,name=parent_sibling_id" json:"parent_sibling_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StreamShard) Reset() { *m = StreamShard{} } +func (m *StreamShard) String() string { return proto.CompactTextString(m) } +func (*StreamShard) ProtoMessage() {} +func (*StreamShard) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{52} } + +func (m *StreamShard) GetShardId() string { + if m != nil && m.ShardId != nil { + return *m.ShardId + } + return "" +} + +func (m *StreamShard) GetParentId() string { + if m != nil && m.ParentId != nil { + return *m.ParentId + } + return "" +} + +func (m *StreamShard) GetParentSiblingId() string { + if m != nil && m.ParentSiblingId != nil { + return *m.ParentSiblingId + } + return "" +} + +type DescribeStreamRequest struct { + StreamId *string `protobuf:"bytes,1,req,name=stream_id" json:"stream_id,omitempty"` + InclusiveStartShardId *string `protobuf:"bytes,2,opt,name=inclusive_start_shard_id" json:"inclusive_start_shard_id,omitempty"` + ShardLimit *int32 `protobuf:"varint,3,opt,name=shard_limit" json:"shard_limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeStreamRequest) Reset() { *m = DescribeStreamRequest{} } +func (m *DescribeStreamRequest) String() string { return proto.CompactTextString(m) } +func (*DescribeStreamRequest) ProtoMessage() {} +func (*DescribeStreamRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{53} } + +func (m *DescribeStreamRequest) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *DescribeStreamRequest) GetInclusiveStartShardId() string { + if m != nil && m.InclusiveStartShardId != nil { + return *m.InclusiveStartShardId + } + return "" +} + +func (m *DescribeStreamRequest) GetShardLimit() int32 { + if m != nil && m.ShardLimit != nil { + return *m.ShardLimit + } + return 0 +} + +type DescribeStreamResponse struct { + StreamId *string `protobuf:"bytes,1,req,name=stream_id" json:"stream_id,omitempty"` + ExpirationTime *int32 `protobuf:"varint,2,req,name=expiration_time" json:"expiration_time,omitempty"` + TableName *string `protobuf:"bytes,3,req,name=table_name" json:"table_name,omitempty"` + CreationTime *int64 `protobuf:"varint,4,req,name=creation_time" json:"creation_time,omitempty"` + StreamStatus *StreamStatus `protobuf:"varint,5,req,name=stream_status,enum=otsprotocol.StreamStatus" json:"stream_status,omitempty"` + Shards []*StreamShard `protobuf:"bytes,6,rep,name=shards" json:"shards,omitempty"` + NextShardId *string `protobuf:"bytes,7,opt,name=next_shard_id" json:"next_shard_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescribeStreamResponse) Reset() { *m = DescribeStreamResponse{} } +func (m *DescribeStreamResponse) String() string { return proto.CompactTextString(m) } +func (*DescribeStreamResponse) ProtoMessage() {} +func (*DescribeStreamResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{54} } + +func (m *DescribeStreamResponse) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *DescribeStreamResponse) GetExpirationTime() int32 { + if m != nil && m.ExpirationTime != nil { + return *m.ExpirationTime + } + return 0 +} + +func (m *DescribeStreamResponse) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *DescribeStreamResponse) GetCreationTime() int64 { + if m != nil && m.CreationTime != nil { + return *m.CreationTime + } + return 0 +} + +func (m *DescribeStreamResponse) GetStreamStatus() StreamStatus { + if m != nil && m.StreamStatus != nil { + return *m.StreamStatus + } + return StreamStatus_STREAM_ENABLING +} + +func (m *DescribeStreamResponse) GetShards() []*StreamShard { + if m != nil { + return m.Shards + } + return nil +} + +func (m *DescribeStreamResponse) GetNextShardId() string { + if m != nil && m.NextShardId != nil { + return *m.NextShardId + } + return "" +} + +type GetShardIteratorRequest struct { + StreamId *string `protobuf:"bytes,1,req,name=stream_id" json:"stream_id,omitempty"` + ShardId *string `protobuf:"bytes,2,req,name=shard_id" json:"shard_id,omitempty"` + Timestamp *int64 `protobuf:"varint,3,opt,name=timestamp" json:"timestamp,omitempty"` + Token *string `protobuf:"bytes,4,opt,name=token" json:"token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetShardIteratorRequest) Reset() { *m = GetShardIteratorRequest{} } +func (m *GetShardIteratorRequest) String() string { return proto.CompactTextString(m) } +func (*GetShardIteratorRequest) ProtoMessage() {} +func (*GetShardIteratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{55} } + +func (m *GetShardIteratorRequest) GetStreamId() string { + if m != nil && m.StreamId != nil { + return *m.StreamId + } + return "" +} + +func (m *GetShardIteratorRequest) GetShardId() string { + if m != nil && m.ShardId != nil { + return *m.ShardId + } + return "" +} + +func (m *GetShardIteratorRequest) GetTimestamp() int64 { + if m != nil && m.Timestamp != nil { + return *m.Timestamp + } + return 0 +} + +func (m *GetShardIteratorRequest) GetToken() string { + if m != nil && m.Token != nil { + return *m.Token + } + return "" +} + +type GetShardIteratorResponse struct { + ShardIterator *string `protobuf:"bytes,1,req,name=shard_iterator" json:"shard_iterator,omitempty"` + NextToken *string `protobuf:"bytes,2,opt,name=next_token" json:"next_token,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetShardIteratorResponse) Reset() { *m = GetShardIteratorResponse{} } +func (m *GetShardIteratorResponse) String() string { return proto.CompactTextString(m) } +func (*GetShardIteratorResponse) ProtoMessage() {} +func (*GetShardIteratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{56} } + +func (m *GetShardIteratorResponse) GetShardIterator() string { + if m != nil && m.ShardIterator != nil { + return *m.ShardIterator + } + return "" +} + +func (m *GetShardIteratorResponse) GetNextToken() string { + if m != nil && m.NextToken != nil { + return *m.NextToken + } + return "" +} + +type GetStreamRecordRequest struct { + ShardIterator *string `protobuf:"bytes,1,req,name=shard_iterator" json:"shard_iterator,omitempty"` + Limit *int32 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetStreamRecordRequest) Reset() { *m = GetStreamRecordRequest{} } +func (m *GetStreamRecordRequest) String() string { return proto.CompactTextString(m) } +func (*GetStreamRecordRequest) ProtoMessage() {} +func (*GetStreamRecordRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{57} } + +func (m *GetStreamRecordRequest) GetShardIterator() string { + if m != nil && m.ShardIterator != nil { + return *m.ShardIterator + } + return "" +} + +func (m *GetStreamRecordRequest) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +type GetStreamRecordResponse struct { + StreamRecords []*GetStreamRecordResponse_StreamRecord `protobuf:"bytes,1,rep,name=stream_records" json:"stream_records,omitempty"` + NextShardIterator *string `protobuf:"bytes,2,opt,name=next_shard_iterator" json:"next_shard_iterator,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetStreamRecordResponse) Reset() { *m = GetStreamRecordResponse{} } +func (m *GetStreamRecordResponse) String() string { return proto.CompactTextString(m) } +func (*GetStreamRecordResponse) ProtoMessage() {} +func (*GetStreamRecordResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{58} } + +func (m *GetStreamRecordResponse) GetStreamRecords() []*GetStreamRecordResponse_StreamRecord { + if m != nil { + return m.StreamRecords + } + return nil +} + +func (m *GetStreamRecordResponse) GetNextShardIterator() string { + if m != nil && m.NextShardIterator != nil { + return *m.NextShardIterator + } + return "" +} + +type GetStreamRecordResponse_StreamRecord struct { + ActionType *ActionType `protobuf:"varint,1,req,name=action_type,enum=otsprotocol.ActionType" json:"action_type,omitempty"` + Record []byte `protobuf:"bytes,2,req,name=record" json:"record,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GetStreamRecordResponse_StreamRecord) Reset() { *m = GetStreamRecordResponse_StreamRecord{} } +func (m *GetStreamRecordResponse_StreamRecord) String() string { return proto.CompactTextString(m) } +func (*GetStreamRecordResponse_StreamRecord) ProtoMessage() {} +func (*GetStreamRecordResponse_StreamRecord) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{58, 0} +} + +func (m *GetStreamRecordResponse_StreamRecord) GetActionType() ActionType { + if m != nil && m.ActionType != nil { + return *m.ActionType + } + return ActionType_PUT_ROW +} + +func (m *GetStreamRecordResponse_StreamRecord) GetRecord() []byte { + if m != nil { + return m.Record + } + return nil +} + +// +++++ ComputeSplitPointsBySize +++++ +type ComputeSplitPointsBySizeRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + SplitSize *int64 `protobuf:"varint,2,req,name=split_size" json:"split_size,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComputeSplitPointsBySizeRequest) Reset() { *m = ComputeSplitPointsBySizeRequest{} } +func (m *ComputeSplitPointsBySizeRequest) String() string { return proto.CompactTextString(m) } +func (*ComputeSplitPointsBySizeRequest) ProtoMessage() {} +func (*ComputeSplitPointsBySizeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{59} +} + +func (m *ComputeSplitPointsBySizeRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *ComputeSplitPointsBySizeRequest) GetSplitSize() int64 { + if m != nil && m.SplitSize != nil { + return *m.SplitSize + } + return 0 +} + +type ComputeSplitPointsBySizeResponse struct { + Consumed *ConsumedCapacity `protobuf:"bytes,1,req,name=consumed" json:"consumed,omitempty"` + Schema []*PrimaryKeySchema `protobuf:"bytes,2,rep,name=schema" json:"schema,omitempty"` + // * + // Split points between splits, in the increasing order + // + // A split is a consecutive range of primary keys, + // whose data size is about split_size specified in the request. + // The size could be hard to be precise. + // + // A split point is an array of primary-key column w.r.t. table schema, + // which is never longer than that of table schema. + // Tailing -inf will be omitted to reduce transmission payloads. + SplitPoints [][]byte `protobuf:"bytes,3,rep,name=split_points" json:"split_points,omitempty"` + Locations []*ComputeSplitPointsBySizeResponse_SplitLocation `protobuf:"bytes,4,rep,name=locations" json:"locations,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComputeSplitPointsBySizeResponse) Reset() { *m = ComputeSplitPointsBySizeResponse{} } +func (m *ComputeSplitPointsBySizeResponse) String() string { return proto.CompactTextString(m) } +func (*ComputeSplitPointsBySizeResponse) ProtoMessage() {} +func (*ComputeSplitPointsBySizeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{60} +} + +func (m *ComputeSplitPointsBySizeResponse) GetConsumed() *ConsumedCapacity { + if m != nil { + return m.Consumed + } + return nil +} + +func (m *ComputeSplitPointsBySizeResponse) GetSchema() []*PrimaryKeySchema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *ComputeSplitPointsBySizeResponse) GetSplitPoints() [][]byte { + if m != nil { + return m.SplitPoints + } + return nil +} + +func (m *ComputeSplitPointsBySizeResponse) GetLocations() []*ComputeSplitPointsBySizeResponse_SplitLocation { + if m != nil { + return m.Locations + } + return nil +} + +// * +// Locations where splits lies in. +// +// By the managed nature of TableStore, these locations are no more than hints. +// If a location is not suitable to be seen, an empty string will be placed. +type ComputeSplitPointsBySizeResponse_SplitLocation struct { + Location *string `protobuf:"bytes,1,req,name=location" json:"location,omitempty"` + Repeat *int64 `protobuf:"zigzag64,2,req,name=repeat" json:"repeat,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ComputeSplitPointsBySizeResponse_SplitLocation) Reset() { + *m = ComputeSplitPointsBySizeResponse_SplitLocation{} +} +func (m *ComputeSplitPointsBySizeResponse_SplitLocation) String() string { + return proto.CompactTextString(m) +} +func (*ComputeSplitPointsBySizeResponse_SplitLocation) ProtoMessage() {} +func (*ComputeSplitPointsBySizeResponse_SplitLocation) Descriptor() ([]byte, []int) { + return fileDescriptor2, []int{60, 0} +} + +func (m *ComputeSplitPointsBySizeResponse_SplitLocation) GetLocation() string { + if m != nil && m.Location != nil { + return *m.Location + } + return "" +} + +func (m *ComputeSplitPointsBySizeResponse_SplitLocation) GetRepeat() int64 { + if m != nil && m.Repeat != nil { + return *m.Repeat + } + return 0 +} + +type DefinedColumnSchema struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + Type *DefinedColumnType `protobuf:"varint,2,req,name=type,enum=otsprotocol.DefinedColumnType" json:"type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DefinedColumnSchema) Reset() { *m = DefinedColumnSchema{} } +func (m *DefinedColumnSchema) String() string { return proto.CompactTextString(m) } +func (*DefinedColumnSchema) ProtoMessage() {} +func (*DefinedColumnSchema) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{61} } + +func (m *DefinedColumnSchema) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DefinedColumnSchema) GetType() DefinedColumnType { + if m != nil && m.Type != nil { + return *m.Type + } + return DefinedColumnType_DCT_INTEGER +} + +type IndexMeta struct { + Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` + PrimaryKey []string `protobuf:"bytes,2,rep,name=primary_key" json:"primary_key,omitempty"` + DefinedColumn []string `protobuf:"bytes,3,rep,name=defined_column" json:"defined_column,omitempty"` + IndexUpdateMode *IndexUpdateMode `protobuf:"varint,4,req,name=index_update_mode,enum=otsprotocol.IndexUpdateMode" json:"index_update_mode,omitempty"` + IndexType *IndexType `protobuf:"varint,5,req,name=index_type,enum=otsprotocol.IndexType" json:"index_type,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *IndexMeta) Reset() { *m = IndexMeta{} } +func (m *IndexMeta) String() string { return proto.CompactTextString(m) } +func (*IndexMeta) ProtoMessage() {} +func (*IndexMeta) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{62} } + +func (m *IndexMeta) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *IndexMeta) GetPrimaryKey() []string { + if m != nil { + return m.PrimaryKey + } + return nil +} + +func (m *IndexMeta) GetDefinedColumn() []string { + if m != nil { + return m.DefinedColumn + } + return nil +} + +func (m *IndexMeta) GetIndexUpdateMode() IndexUpdateMode { + if m != nil && m.IndexUpdateMode != nil { + return *m.IndexUpdateMode + } + return IndexUpdateMode_IUM_ASYNC_INDEX +} + +func (m *IndexMeta) GetIndexType() IndexType { + if m != nil && m.IndexType != nil { + return *m.IndexType + } + return IndexType_IT_GLOBAL_INDEX +} + +type CreateIndexRequest struct { + MainTableName *string `protobuf:"bytes,1,req,name=main_table_name" json:"main_table_name,omitempty"` + IndexMeta *IndexMeta `protobuf:"bytes,2,req,name=index_meta" json:"index_meta,omitempty"` + IncludeBaseData *bool `protobuf:"varint,3,opt,name=include_base_data" json:"include_base_data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateIndexRequest) Reset() { *m = CreateIndexRequest{} } +func (m *CreateIndexRequest) String() string { return proto.CompactTextString(m) } +func (*CreateIndexRequest) ProtoMessage() {} +func (*CreateIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{63} } + +func (m *CreateIndexRequest) GetMainTableName() string { + if m != nil && m.MainTableName != nil { + return *m.MainTableName + } + return "" +} + +func (m *CreateIndexRequest) GetIndexMeta() *IndexMeta { + if m != nil { + return m.IndexMeta + } + return nil +} + +func (m *CreateIndexRequest) GetIncludeBaseData() bool { + if m != nil && m.IncludeBaseData != nil { + return *m.IncludeBaseData + } + return false +} + +type CreateIndexResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CreateIndexResponse) Reset() { *m = CreateIndexResponse{} } +func (m *CreateIndexResponse) String() string { return proto.CompactTextString(m) } +func (*CreateIndexResponse) ProtoMessage() {} +func (*CreateIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{64} } + +type DropIndexRequest struct { + MainTableName *string `protobuf:"bytes,1,req,name=main_table_name" json:"main_table_name,omitempty"` + IndexName *string `protobuf:"bytes,2,req,name=index_name" json:"index_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DropIndexRequest) Reset() { *m = DropIndexRequest{} } +func (m *DropIndexRequest) String() string { return proto.CompactTextString(m) } +func (*DropIndexRequest) ProtoMessage() {} +func (*DropIndexRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{65} } + +func (m *DropIndexRequest) GetMainTableName() string { + if m != nil && m.MainTableName != nil { + return *m.MainTableName + } + return "" +} + +func (m *DropIndexRequest) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +type DropIndexResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *DropIndexResponse) Reset() { *m = DropIndexResponse{} } +func (m *DropIndexResponse) String() string { return proto.CompactTextString(m) } +func (*DropIndexResponse) ProtoMessage() {} +func (*DropIndexResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{66} } + +// ########################################### LocalTransaction ########################################### +type StartLocalTransactionRequest struct { + TableName *string `protobuf:"bytes,1,req,name=table_name" json:"table_name,omitempty"` + Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartLocalTransactionRequest) Reset() { *m = StartLocalTransactionRequest{} } +func (m *StartLocalTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*StartLocalTransactionRequest) ProtoMessage() {} +func (*StartLocalTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{67} } + +func (m *StartLocalTransactionRequest) GetTableName() string { + if m != nil && m.TableName != nil { + return *m.TableName + } + return "" +} + +func (m *StartLocalTransactionRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type StartLocalTransactionResponse struct { + TransactionId *string `protobuf:"bytes,1,req,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StartLocalTransactionResponse) Reset() { *m = StartLocalTransactionResponse{} } +func (m *StartLocalTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*StartLocalTransactionResponse) ProtoMessage() {} +func (*StartLocalTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{68} } + +func (m *StartLocalTransactionResponse) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type CommitTransactionRequest struct { + TransactionId *string `protobuf:"bytes,1,req,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitTransactionRequest) Reset() { *m = CommitTransactionRequest{} } +func (m *CommitTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*CommitTransactionRequest) ProtoMessage() {} +func (*CommitTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{69} } + +func (m *CommitTransactionRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type CommitTransactionResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *CommitTransactionResponse) Reset() { *m = CommitTransactionResponse{} } +func (m *CommitTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*CommitTransactionResponse) ProtoMessage() {} +func (*CommitTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{70} } + +type AbortTransactionRequest struct { + TransactionId *string `protobuf:"bytes,1,req,name=transaction_id" json:"transaction_id,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *AbortTransactionRequest) Reset() { *m = AbortTransactionRequest{} } +func (m *AbortTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*AbortTransactionRequest) ProtoMessage() {} +func (*AbortTransactionRequest) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{71} } + +func (m *AbortTransactionRequest) GetTransactionId() string { + if m != nil && m.TransactionId != nil { + return *m.TransactionId + } + return "" +} + +type AbortTransactionResponse struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *AbortTransactionResponse) Reset() { *m = AbortTransactionResponse{} } +func (m *AbortTransactionResponse) String() string { return proto.CompactTextString(m) } +func (*AbortTransactionResponse) ProtoMessage() {} +func (*AbortTransactionResponse) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{72} } + +func init() { + proto.RegisterType((*Error)(nil), "otsprotocol.Error") + proto.RegisterType((*PrimaryKeySchema)(nil), "otsprotocol.PrimaryKeySchema") + proto.RegisterType((*PartitionRange)(nil), "otsprotocol.PartitionRange") + proto.RegisterType((*TableOptions)(nil), "otsprotocol.TableOptions") + proto.RegisterType((*TableMeta)(nil), "otsprotocol.TableMeta") + proto.RegisterType((*Condition)(nil), "otsprotocol.Condition") + proto.RegisterType((*CapacityUnit)(nil), "otsprotocol.CapacityUnit") + proto.RegisterType((*ReservedThroughputDetails)(nil), "otsprotocol.ReservedThroughputDetails") + proto.RegisterType((*ReservedThroughput)(nil), "otsprotocol.ReservedThroughput") + proto.RegisterType((*ConsumedCapacity)(nil), "otsprotocol.ConsumedCapacity") + proto.RegisterType((*StreamSpecification)(nil), "otsprotocol.StreamSpecification") + proto.RegisterType((*StreamDetails)(nil), "otsprotocol.StreamDetails") + proto.RegisterType((*CreateTableRequest)(nil), "otsprotocol.CreateTableRequest") + proto.RegisterType((*CreateTableResponse)(nil), "otsprotocol.CreateTableResponse") + proto.RegisterType((*UpdateTableRequest)(nil), "otsprotocol.UpdateTableRequest") + proto.RegisterType((*UpdateTableResponse)(nil), "otsprotocol.UpdateTableResponse") + proto.RegisterType((*DescribeTableRequest)(nil), "otsprotocol.DescribeTableRequest") + proto.RegisterType((*DescribeTableResponse)(nil), "otsprotocol.DescribeTableResponse") + proto.RegisterType((*ListTableRequest)(nil), "otsprotocol.ListTableRequest") + proto.RegisterType((*ListTableResponse)(nil), "otsprotocol.ListTableResponse") + proto.RegisterType((*DeleteTableRequest)(nil), "otsprotocol.DeleteTableRequest") + proto.RegisterType((*DeleteTableResponse)(nil), "otsprotocol.DeleteTableResponse") + proto.RegisterType((*LoadTableRequest)(nil), "otsprotocol.LoadTableRequest") + proto.RegisterType((*LoadTableResponse)(nil), "otsprotocol.LoadTableResponse") + proto.RegisterType((*UnloadTableRequest)(nil), "otsprotocol.UnloadTableRequest") + proto.RegisterType((*UnloadTableResponse)(nil), "otsprotocol.UnloadTableResponse") + proto.RegisterType((*TimeRange)(nil), "otsprotocol.TimeRange") + proto.RegisterType((*ReturnContent)(nil), "otsprotocol.ReturnContent") + proto.RegisterType((*GetRowRequest)(nil), "otsprotocol.GetRowRequest") + proto.RegisterType((*GetRowResponse)(nil), "otsprotocol.GetRowResponse") + proto.RegisterType((*UpdateRowRequest)(nil), "otsprotocol.UpdateRowRequest") + proto.RegisterType((*UpdateRowResponse)(nil), "otsprotocol.UpdateRowResponse") + proto.RegisterType((*PutRowRequest)(nil), "otsprotocol.PutRowRequest") + proto.RegisterType((*PutRowResponse)(nil), "otsprotocol.PutRowResponse") + proto.RegisterType((*DeleteRowRequest)(nil), "otsprotocol.DeleteRowRequest") + proto.RegisterType((*DeleteRowResponse)(nil), "otsprotocol.DeleteRowResponse") + proto.RegisterType((*TableInBatchGetRowRequest)(nil), "otsprotocol.TableInBatchGetRowRequest") + proto.RegisterType((*BatchGetRowRequest)(nil), "otsprotocol.BatchGetRowRequest") + proto.RegisterType((*RowInBatchGetRowResponse)(nil), "otsprotocol.RowInBatchGetRowResponse") + proto.RegisterType((*TableInBatchGetRowResponse)(nil), "otsprotocol.TableInBatchGetRowResponse") + proto.RegisterType((*BatchGetRowResponse)(nil), "otsprotocol.BatchGetRowResponse") + proto.RegisterType((*RowInBatchWriteRowRequest)(nil), "otsprotocol.RowInBatchWriteRowRequest") + proto.RegisterType((*TableInBatchWriteRowRequest)(nil), "otsprotocol.TableInBatchWriteRowRequest") + proto.RegisterType((*BatchWriteRowRequest)(nil), "otsprotocol.BatchWriteRowRequest") + proto.RegisterType((*RowInBatchWriteRowResponse)(nil), "otsprotocol.RowInBatchWriteRowResponse") + proto.RegisterType((*TableInBatchWriteRowResponse)(nil), "otsprotocol.TableInBatchWriteRowResponse") + proto.RegisterType((*BatchWriteRowResponse)(nil), "otsprotocol.BatchWriteRowResponse") + proto.RegisterType((*GetRangeRequest)(nil), "otsprotocol.GetRangeRequest") + proto.RegisterType((*GetRangeResponse)(nil), "otsprotocol.GetRangeResponse") + proto.RegisterType((*ListStreamRequest)(nil), "otsprotocol.ListStreamRequest") + proto.RegisterType((*Stream)(nil), "otsprotocol.Stream") + proto.RegisterType((*ListStreamResponse)(nil), "otsprotocol.ListStreamResponse") + proto.RegisterType((*StreamShard)(nil), "otsprotocol.StreamShard") + proto.RegisterType((*DescribeStreamRequest)(nil), "otsprotocol.DescribeStreamRequest") + proto.RegisterType((*DescribeStreamResponse)(nil), "otsprotocol.DescribeStreamResponse") + proto.RegisterType((*GetShardIteratorRequest)(nil), "otsprotocol.GetShardIteratorRequest") + proto.RegisterType((*GetShardIteratorResponse)(nil), "otsprotocol.GetShardIteratorResponse") + proto.RegisterType((*GetStreamRecordRequest)(nil), "otsprotocol.GetStreamRecordRequest") + proto.RegisterType((*GetStreamRecordResponse)(nil), "otsprotocol.GetStreamRecordResponse") + proto.RegisterType((*GetStreamRecordResponse_StreamRecord)(nil), "otsprotocol.GetStreamRecordResponse.StreamRecord") + proto.RegisterType((*ComputeSplitPointsBySizeRequest)(nil), "otsprotocol.ComputeSplitPointsBySizeRequest") + proto.RegisterType((*ComputeSplitPointsBySizeResponse)(nil), "otsprotocol.ComputeSplitPointsBySizeResponse") + proto.RegisterType((*ComputeSplitPointsBySizeResponse_SplitLocation)(nil), "otsprotocol.ComputeSplitPointsBySizeResponse.SplitLocation") + proto.RegisterType((*DefinedColumnSchema)(nil), "otsprotocol.DefinedColumnSchema") + proto.RegisterType((*IndexMeta)(nil), "otsprotocol.IndexMeta") + proto.RegisterType((*CreateIndexRequest)(nil), "otsprotocol.CreateIndexRequest") + proto.RegisterType((*CreateIndexResponse)(nil), "otsprotocol.CreateIndexResponse") + proto.RegisterType((*DropIndexRequest)(nil), "otsprotocol.DropIndexRequest") + proto.RegisterType((*DropIndexResponse)(nil), "otsprotocol.DropIndexResponse") + proto.RegisterType((*StartLocalTransactionRequest)(nil), "otsprotocol.StartLocalTransactionRequest") + proto.RegisterType((*StartLocalTransactionResponse)(nil), "otsprotocol.StartLocalTransactionResponse") + proto.RegisterType((*CommitTransactionRequest)(nil), "otsprotocol.CommitTransactionRequest") + proto.RegisterType((*CommitTransactionResponse)(nil), "otsprotocol.CommitTransactionResponse") + proto.RegisterType((*AbortTransactionRequest)(nil), "otsprotocol.AbortTransactionRequest") + proto.RegisterType((*AbortTransactionResponse)(nil), "otsprotocol.AbortTransactionResponse") + proto.RegisterEnum("otsprotocol.PrimaryKeyType", PrimaryKeyType_name, PrimaryKeyType_value) + proto.RegisterEnum("otsprotocol.PrimaryKeyOption", PrimaryKeyOption_name, PrimaryKeyOption_value) + proto.RegisterEnum("otsprotocol.BloomFilterType", BloomFilterType_name, BloomFilterType_value) + proto.RegisterEnum("otsprotocol.TableStatus", TableStatus_name, TableStatus_value) + proto.RegisterEnum("otsprotocol.RowExistenceExpectation", RowExistenceExpectation_name, RowExistenceExpectation_value) + proto.RegisterEnum("otsprotocol.ReturnType", ReturnType_name, ReturnType_value) + proto.RegisterEnum("otsprotocol.OperationType", OperationType_name, OperationType_value) + proto.RegisterEnum("otsprotocol.Direction", Direction_name, Direction_value) + proto.RegisterEnum("otsprotocol.StreamStatus", StreamStatus_name, StreamStatus_value) + proto.RegisterEnum("otsprotocol.ActionType", ActionType_name, ActionType_value) + proto.RegisterEnum("otsprotocol.DefinedColumnType", DefinedColumnType_name, DefinedColumnType_value) + proto.RegisterEnum("otsprotocol.IndexUpdateMode", IndexUpdateMode_name, IndexUpdateMode_value) + proto.RegisterEnum("otsprotocol.IndexType", IndexType_name, IndexType_value) +} + +func init() { proto.RegisterFile("table_store.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 2751 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x59, 0x5b, 0x6f, 0xe3, 0xc6, + 0xf5, 0x0f, 0x49, 0x5d, 0xac, 0xa3, 0x8b, 0x29, 0x6a, 0x6d, 0xcb, 0xf6, 0x26, 0x71, 0xf8, 0x4f, + 0x36, 0x5a, 0x65, 0xe3, 0x24, 0xfe, 0x27, 0xd9, 0x5c, 0x0a, 0x04, 0xb2, 0x24, 0xbb, 0xc2, 0xca, + 0x92, 0x23, 0xd3, 0x4d, 0xb6, 0x2f, 0x2c, 0x4d, 0xce, 0xda, 0xc4, 0x4a, 0xa4, 0x4a, 0x8e, 0xd6, + 0xf6, 0xbe, 0x16, 0x79, 0x2b, 0xd0, 0x0f, 0x50, 0xa0, 0xaf, 0x7d, 0x28, 0x50, 0xa0, 0x7d, 0x6a, + 0x3f, 0x40, 0x81, 0xbe, 0xe5, 0x3b, 0xf4, 0xad, 0x5f, 0xa2, 0x28, 0xe6, 0x42, 0x89, 0xa4, 0x28, + 0xcb, 0xbb, 0x49, 0xba, 0x6f, 0xe4, 0xcc, 0x9c, 0xdb, 0xef, 0x9c, 0x39, 0x73, 0xe6, 0x0c, 0x94, + 0xb1, 0x71, 0x36, 0x44, 0xba, 0x8f, 0x5d, 0x0f, 0xed, 0x8e, 0x3d, 0x17, 0xbb, 0x4a, 0xde, 0xc5, + 0x3e, 0xfd, 0x32, 0xdd, 0xa1, 0x7a, 0x0f, 0xd2, 0x6d, 0xcf, 0x73, 0x3d, 0xa5, 0x00, 0x29, 0xd3, + 0xb5, 0x50, 0x55, 0xd8, 0x11, 0x6b, 0x39, 0x65, 0x15, 0xb2, 0x23, 0xe4, 0xfb, 0xc6, 0x39, 0xaa, + 0x8a, 0x3b, 0x42, 0x2d, 0xa7, 0x3e, 0x07, 0xf9, 0xd8, 0xb3, 0x47, 0x86, 0x77, 0xfd, 0x08, 0x5d, + 0x9f, 0x98, 0x17, 0x68, 0x64, 0x10, 0x12, 0xc7, 0x18, 0x05, 0x24, 0xf7, 0x21, 0x85, 0xaf, 0xc7, + 0x64, 0xbd, 0x58, 0x2b, 0xed, 0x6d, 0xef, 0x86, 0xa4, 0xec, 0xce, 0x48, 0xb5, 0xeb, 0x31, 0x52, + 0xde, 0x87, 0x8c, 0x3b, 0xc6, 0xb6, 0xeb, 0x54, 0xa5, 0x1d, 0xa1, 0x56, 0xda, 0x7b, 0x7d, 0xc1, + 0xe2, 0x3e, 0x5d, 0xa4, 0x3e, 0x80, 0xd2, 0xb1, 0xe1, 0x61, 0x9b, 0xfc, 0x0c, 0x0c, 0xe7, 0x1c, + 0x29, 0x45, 0x48, 0x9f, 0xa1, 0x73, 0xdb, 0xa1, 0xa2, 0x0b, 0x4a, 0x1e, 0x24, 0xe4, 0x58, 0x54, + 0x72, 0x41, 0xfd, 0xb3, 0x00, 0x05, 0x8d, 0x18, 0xcd, 0xa8, 0x7d, 0xe5, 0x0e, 0x14, 0xb0, 0x3d, + 0x42, 0x3a, 0x76, 0xf5, 0xa1, 0xfd, 0x8c, 0xa8, 0x2b, 0xd4, 0xd2, 0x64, 0x74, 0x64, 0x5c, 0xe9, + 0xcf, 0x90, 0xe7, 0x93, 0x55, 0xd4, 0xcc, 0xb4, 0xf2, 0x10, 0xca, 0x67, 0x43, 0xd7, 0x1d, 0xe9, + 0x4f, 0xec, 0x21, 0x46, 0x9e, 0x4e, 0x2d, 0x62, 0x4a, 0xde, 0x8d, 0x28, 0xb9, 0x4f, 0x56, 0x1d, + 0xd0, 0x45, 0xd4, 0x24, 0x05, 0xe0, 0x6c, 0xe8, 0x9a, 0x4f, 0x75, 0xdf, 0x7e, 0x8e, 0xaa, 0x29, + 0xca, 0xec, 0x1d, 0x78, 0xdd, 0x42, 0xcf, 0x6c, 0x83, 0xa8, 0xa1, 0x9b, 0x68, 0x38, 0x0c, 0xa4, + 0xe9, 0xb6, 0xa3, 0xfb, 0xc8, 0xac, 0xa6, 0x77, 0x84, 0x9a, 0xa4, 0xfe, 0x5d, 0x80, 0x1c, 0x55, + 0xf8, 0x08, 0x61, 0x83, 0x30, 0x62, 0x2e, 0x0b, 0x41, 0xbb, 0x07, 0xf9, 0x31, 0x03, 0x45, 0x7f, + 0x8a, 0xae, 0xab, 0xe2, 0x8e, 0x54, 0xcb, 0x2f, 0x04, 0x8d, 0x3b, 0xe7, 0x33, 0x28, 0x59, 0xe8, + 0x89, 0xed, 0x20, 0x4b, 0x37, 0xdd, 0xe1, 0x64, 0x44, 0xb0, 0x26, 0x64, 0x3b, 0x11, 0xb2, 0x16, + 0x5b, 0xd2, 0xa4, 0x2b, 0x38, 0x65, 0x1d, 0xc0, 0x76, 0x2c, 0x74, 0xa5, 0x8f, 0x10, 0x36, 0xaa, + 0x29, 0x4a, 0xb5, 0x1e, 0xa1, 0xea, 0x90, 0x69, 0xa2, 0xad, 0x7a, 0x06, 0xb9, 0xa6, 0xeb, 0x58, + 0xd4, 0x35, 0xca, 0x97, 0x50, 0xf4, 0xdc, 0x4b, 0x1d, 0x5d, 0xd9, 0x3e, 0x46, 0x8e, 0xc9, 0xb4, + 0x2f, 0xed, 0xbd, 0x1d, 0xa1, 0x1d, 0xb8, 0x97, 0xed, 0x60, 0x41, 0xfb, 0x6a, 0x8c, 0x4c, 0x4c, + 0xf1, 0x51, 0xaa, 0x20, 0x33, 0x3d, 0x75, 0x33, 0x60, 0x48, 0x7d, 0x52, 0x50, 0xdf, 0x83, 0x42, + 0xd3, 0x18, 0x1b, 0xa6, 0x8d, 0xaf, 0x4f, 0x1d, 0x1b, 0x93, 0xb0, 0xf3, 0x90, 0x61, 0x71, 0x3f, + 0x16, 0x21, 0x7d, 0xe9, 0xd9, 0x98, 0xc5, 0x69, 0x5a, 0xfd, 0x4e, 0x80, 0xcd, 0x01, 0xf2, 0x91, + 0xf7, 0x0c, 0x59, 0xda, 0x85, 0xe7, 0x4e, 0xce, 0x2f, 0xc6, 0x13, 0xdc, 0x42, 0xd8, 0xb0, 0x87, + 0xbe, 0xf2, 0x21, 0x14, 0x4d, 0xce, 0x4a, 0x9f, 0x38, 0x36, 0xa6, 0x1a, 0xe6, 0xf7, 0x36, 0x23, + 0x1a, 0x46, 0x84, 0x6d, 0x81, 0x32, 0x34, 0x7c, 0xac, 0xdb, 0x8e, 0xe9, 0x21, 0xc3, 0x47, 0x3a, + 0x09, 0x25, 0x1a, 0x69, 0xd2, 0x74, 0xce, 0x42, 0xe1, 0x39, 0x89, 0x3a, 0xf5, 0x00, 0x94, 0x79, + 0x35, 0x5e, 0x5c, 0xbe, 0xda, 0x02, 0xb9, 0xe9, 0x3a, 0xfe, 0x64, 0x84, 0xac, 0x60, 0xfc, 0x25, + 0xb8, 0xb4, 0xa1, 0x72, 0x82, 0x3d, 0x64, 0x8c, 0x4e, 0xc6, 0xc8, 0xb4, 0x9f, 0xd8, 0x26, 0xc3, + 0x7c, 0x0d, 0x8a, 0xc8, 0xe1, 0xf9, 0x81, 0xcc, 0x52, 0x46, 0x2b, 0xca, 0x06, 0xac, 0xa2, 0xab, + 0xb1, 0xed, 0xb1, 0xc0, 0xe5, 0x06, 0x13, 0x70, 0x87, 0x50, 0x64, 0x6c, 0x02, 0x3c, 0x17, 0x30, + 0x28, 0x43, 0x8e, 0xfd, 0xeb, 0xb6, 0xc5, 0xf2, 0x47, 0x12, 0x4f, 0x89, 0xfa, 0xaf, 0x0a, 0x32, + 0x05, 0x91, 0xf3, 0xa1, 0x33, 0x29, 0x0a, 0xe1, 0x3f, 0x44, 0x50, 0x9a, 0x1e, 0x32, 0x30, 0xa2, + 0xbb, 0x63, 0x80, 0x7e, 0x3d, 0x41, 0x3e, 0x26, 0xe1, 0xc9, 0x36, 0x08, 0x0d, 0x4f, 0x66, 0x7a, + 0x34, 0x3c, 0x67, 0x9b, 0xe9, 0x67, 0x50, 0xf1, 0xb8, 0x17, 0x74, 0x3c, 0x75, 0x03, 0x75, 0x5f, + 0x7e, 0xef, 0xcd, 0x68, 0x5c, 0x26, 0x7a, 0x8b, 0x49, 0x62, 0xc9, 0xca, 0xa7, 0x1a, 0xc7, 0x71, + 0x8e, 0xa4, 0x9a, 0x0f, 0x00, 0xc6, 0x41, 0xa6, 0xf2, 0xf9, 0xd6, 0x89, 0x65, 0xc2, 0x68, 0x22, + 0xfb, 0x04, 0xf2, 0x1c, 0x29, 0x7f, 0xcc, 0x13, 0x42, 0x7c, 0x8b, 0x26, 0x39, 0xee, 0x3d, 0xc8, + 0xcf, 0xb6, 0xa8, 0x5f, 0xcd, 0xde, 0xb8, 0x47, 0xd7, 0xa0, 0x12, 0x81, 0xd1, 0x1f, 0xbb, 0x8e, + 0x8f, 0xd4, 0xef, 0x05, 0x50, 0x4e, 0xc7, 0x56, 0x1c, 0xde, 0xa4, 0xfc, 0xb3, 0x10, 0x46, 0xe1, + 0xa7, 0x81, 0x31, 0x86, 0x4a, 0xea, 0x76, 0xa8, 0xa8, 0xff, 0x14, 0xa0, 0x12, 0xb1, 0x88, 0x59, + 0xaa, 0x3c, 0x82, 0xed, 0x04, 0xf5, 0x75, 0x8b, 0x05, 0x31, 0x0f, 0xa1, 0x7b, 0x4b, 0xcc, 0x08, + 0xa5, 0x90, 0xa8, 0x35, 0x62, 0xc2, 0xe6, 0x8b, 0x58, 0xb3, 0x07, 0x25, 0x6e, 0x4d, 0x20, 0x91, + 0x01, 0xb0, 0x95, 0x60, 0x10, 0x97, 0xa2, 0xd6, 0xe1, 0x4e, 0x0b, 0xf9, 0xa6, 0x67, 0x9f, 0x2d, + 0xf5, 0x8e, 0xfa, 0x6f, 0x11, 0xd6, 0x62, 0x8b, 0xb9, 0xe1, 0x2f, 0xb2, 0x55, 0x96, 0x80, 0x24, + 0xfe, 0x30, 0x90, 0xa4, 0x65, 0x20, 0xed, 0x42, 0x21, 0xa8, 0x54, 0x0c, 0x3c, 0x21, 0x7b, 0x87, + 0x1c, 0x1d, 0xd5, 0x79, 0x82, 0x13, 0x3a, 0x9f, 0x00, 0x6a, 0x7a, 0x19, 0xa8, 0xe4, 0xc8, 0xf7, + 0x2f, 0x0c, 0xcf, 0xd2, 0xfd, 0xf1, 0xd0, 0xc6, 0x7e, 0x35, 0xb3, 0x23, 0xd5, 0x0a, 0xf1, 0xbd, + 0xb4, 0x72, 0xe3, 0x5e, 0x52, 0x40, 0xee, 0xda, 0x3e, 0x0e, 0xfb, 0x44, 0xad, 0x41, 0x39, 0x34, + 0xc6, 0xa1, 0xaf, 0x40, 0x7e, 0xe6, 0x28, 0x12, 0x63, 0x52, 0x2d, 0xa7, 0xd6, 0x40, 0x69, 0xa1, + 0x21, 0x5a, 0xbe, 0xe3, 0xc8, 0x9e, 0x8d, 0xac, 0xe4, 0x7b, 0xf6, 0x1e, 0xc8, 0x5d, 0xd7, 0xb0, + 0x96, 0x92, 0x57, 0xa0, 0x1c, 0x5a, 0xc7, 0x89, 0x6b, 0xa0, 0x9c, 0x3a, 0xc3, 0xdb, 0x90, 0xaf, + 0x41, 0x25, 0xb2, 0x92, 0x33, 0xf8, 0x39, 0xe4, 0x34, 0x7b, 0x84, 0x58, 0xe6, 0x52, 0x00, 0x7c, + 0x6c, 0x78, 0x98, 0x65, 0x6c, 0x72, 0x16, 0x4b, 0x8a, 0x0c, 0x2b, 0xc8, 0xb1, 0x66, 0x27, 0x86, + 0x44, 0x0e, 0x08, 0x9f, 0xef, 0xd1, 0xf0, 0xe9, 0xf8, 0x4b, 0x28, 0x0e, 0x10, 0x9e, 0x78, 0x4e, + 0xd3, 0x75, 0x30, 0x72, 0xb0, 0xf2, 0x00, 0xf2, 0x1e, 0x1d, 0x60, 0x15, 0x97, 0x40, 0x2b, 0xae, + 0x8d, 0x58, 0xb4, 0x91, 0x79, 0x5a, 0x6c, 0x6d, 0x93, 0x7c, 0x44, 0x57, 0xf3, 0x92, 0x81, 0x81, + 0x2c, 0x52, 0x90, 0x7f, 0x27, 0x42, 0xf1, 0x10, 0xe1, 0x81, 0x7b, 0x79, 0x53, 0x4a, 0xab, 0xc4, + 0x4b, 0x2a, 0x52, 0x47, 0xae, 0x43, 0x89, 0x31, 0xf4, 0x49, 0xb1, 0x78, 0x8e, 0x30, 0xad, 0x99, + 0x72, 0x74, 0x1f, 0x91, 0x0a, 0xd2, 0x23, 0x96, 0xf3, 0x74, 0x14, 0xdb, 0x47, 0x53, 0x5c, 0xe2, + 0x75, 0x65, 0x9a, 0x9e, 0x72, 0x5b, 0x50, 0x30, 0x0d, 0xf3, 0x02, 0xe9, 0xb4, 0x48, 0x24, 0xa1, + 0x27, 0xd4, 0x56, 0xbe, 0x48, 0x61, 0x6f, 0x82, 0x94, 0x12, 0x64, 0x58, 0xb5, 0x59, 0xcd, 0x92, + 0x7a, 0x87, 0x86, 0x29, 0x45, 0x96, 0xd7, 0x6d, 0x2b, 0xf4, 0x00, 0x55, 0x00, 0x08, 0xb6, 0x7c, + 0x2c, 0x47, 0xc7, 0x8a, 0x90, 0xc6, 0xee, 0x53, 0xe4, 0x54, 0x81, 0x12, 0xae, 0x43, 0x09, 0x7b, + 0x86, 0xe3, 0x1b, 0x26, 0x3d, 0x64, 0x6d, 0xab, 0x9a, 0xa7, 0xb5, 0xfb, 0x19, 0x94, 0x02, 0x40, + 0x78, 0x74, 0x7e, 0x00, 0x2b, 0x26, 0xaf, 0x2a, 0x78, 0x5a, 0x88, 0x56, 0x93, 0x73, 0x25, 0x47, + 0x1e, 0x24, 0xcf, 0xbd, 0xe4, 0x30, 0x29, 0x00, 0x0e, 0xba, 0xc2, 0x3a, 0x93, 0x2d, 0xd1, 0x22, + 0xed, 0x2f, 0x02, 0xc8, 0x2c, 0xf7, 0x2e, 0x01, 0x5e, 0x01, 0x20, 0x45, 0xa2, 0x79, 0x41, 0xb1, + 0x64, 0x0c, 0xef, 0x43, 0x6e, 0x56, 0xf4, 0x49, 0x09, 0x69, 0x6a, 0x56, 0x63, 0xee, 0x41, 0x69, + 0xea, 0x7a, 0x1a, 0x3a, 0xdc, 0x1d, 0x5b, 0x09, 0xb1, 0x12, 0x04, 0xd7, 0x3c, 0x2e, 0x69, 0x8a, + 0xcb, 0xd7, 0x50, 0x0e, 0xa9, 0xfc, 0x83, 0xa1, 0x21, 0x30, 0xfc, 0x51, 0x80, 0xe2, 0xf1, 0x64, + 0x59, 0xf0, 0x45, 0xd0, 0x7c, 0x45, 0xc6, 0xf7, 0xa0, 0x14, 0x28, 0xfa, 0xa3, 0x58, 0xfe, 0x57, + 0x01, 0x64, 0x96, 0xb2, 0x5e, 0x66, 0xe7, 0xbd, 0xba, 0x08, 0x08, 0xe9, 0xfc, 0xa3, 0xe0, 0xf0, + 0x1b, 0x11, 0x36, 0x69, 0xda, 0xec, 0x38, 0xfb, 0x06, 0x36, 0x2f, 0x5e, 0x22, 0x15, 0x91, 0x53, + 0x69, 0xba, 0xb5, 0x25, 0xfa, 0x3b, 0x9f, 0x99, 0x52, 0x09, 0x99, 0x29, 0xfd, 0x42, 0x99, 0x29, + 0x93, 0x98, 0x99, 0xb2, 0x89, 0x99, 0x69, 0x25, 0x31, 0x33, 0xe5, 0x12, 0x32, 0x13, 0x50, 0x60, + 0xbb, 0xa0, 0x24, 0x58, 0xff, 0x29, 0x64, 0xa8, 0xf5, 0xec, 0x3c, 0x8c, 0x97, 0x13, 0x0b, 0x51, + 0x53, 0xff, 0x20, 0x40, 0x75, 0xe0, 0x5e, 0xc6, 0xe6, 0xb8, 0xbb, 0x8a, 0x90, 0xb6, 0x7d, 0xdd, + 0x7d, 0xca, 0xef, 0x1e, 0x6f, 0x41, 0x1a, 0x79, 0x9e, 0xeb, 0xf1, 0xea, 0x54, 0x89, 0x88, 0x60, + 0xad, 0x8e, 0xb0, 0x83, 0x59, 0x29, 0x76, 0x3b, 0x07, 0xa7, 0x28, 0x08, 0xd1, 0xec, 0x97, 0xa6, + 0x4e, 0x47, 0xb0, 0x95, 0xa4, 0x3d, 0xd7, 0x30, 0xc9, 0xe9, 0xff, 0x0f, 0x29, 0xcf, 0xbd, 0xf4, + 0xf9, 0x5d, 0xfe, 0x9d, 0xf8, 0x15, 0x39, 0x91, 0x91, 0xda, 0x83, 0x4a, 0x12, 0xff, 0x87, 0x31, + 0x58, 0xdf, 0x5d, 0x0a, 0x2b, 0xe7, 0xf7, 0x37, 0x72, 0x59, 0x9e, 0x0a, 0xfb, 0x86, 0x5c, 0xa3, + 0x43, 0xde, 0xaa, 0xf1, 0x86, 0x0e, 0xbb, 0xc5, 0x47, 0xb7, 0x57, 0x7f, 0x8c, 0xd8, 0x55, 0x2e, + 0x68, 0x7e, 0xfc, 0x8f, 0x73, 0xba, 0x7a, 0x0e, 0xdb, 0x61, 0xc3, 0xe2, 0xba, 0x27, 0x41, 0xfe, + 0x71, 0x04, 0xf2, 0x7b, 0x0b, 0x20, 0x8f, 0x71, 0x52, 0x2f, 0xe0, 0x4e, 0xa2, 0x84, 0xcf, 0x62, + 0xa0, 0xd7, 0x16, 0x82, 0x1e, 0xa7, 0x9c, 0x4f, 0x46, 0xac, 0xc5, 0xf6, 0x5b, 0x01, 0xb6, 0x92, + 0xf4, 0x78, 0x35, 0x71, 0xae, 0xda, 0x70, 0x37, 0xd9, 0x8a, 0x1b, 0xa2, 0xfa, 0x93, 0x08, 0xc4, + 0xef, 0x2e, 0x85, 0x98, 0xc7, 0xe1, 0x00, 0xd6, 0x92, 0x65, 0x7c, 0x1e, 0x03, 0xf9, 0xfe, 0x2d, + 0x40, 0xe6, 0x3c, 0xff, 0x23, 0xc2, 0x2a, 0x09, 0x77, 0x12, 0x92, 0x37, 0x45, 0xc5, 0x7d, 0xc8, + 0x59, 0xb6, 0x87, 0x4c, 0xde, 0x70, 0x22, 0xa1, 0x1e, 0x8d, 0xd3, 0x56, 0x30, 0xfb, 0x13, 0x96, + 0x87, 0x45, 0x48, 0x0f, 0xed, 0x91, 0x8d, 0x79, 0x4e, 0xfe, 0x3f, 0xd8, 0xb6, 0x1d, 0x73, 0x38, + 0xf1, 0xed, 0x67, 0xf4, 0x42, 0xe4, 0x61, 0x3d, 0x7c, 0x42, 0x64, 0xe9, 0x06, 0x7b, 0x0b, 0x36, + 0xd1, 0x55, 0xb0, 0x88, 0x24, 0xe0, 0xf0, 0x92, 0x15, 0xba, 0x24, 0x9e, 0xdb, 0x73, 0x89, 0xb9, + 0x1d, 0x12, 0x73, 0x7b, 0x3e, 0x21, 0xb7, 0x17, 0xa2, 0x55, 0x67, 0x71, 0x41, 0xd5, 0x59, 0xa2, + 0xe1, 0xfc, 0x9d, 0x00, 0xf2, 0xcc, 0x01, 0x2f, 0x7b, 0xb6, 0x16, 0xa6, 0x11, 0x45, 0x0c, 0x7a, + 0x03, 0xd6, 0x69, 0xee, 0x9d, 0xc7, 0x44, 0x4a, 0xc8, 0xcd, 0x2c, 0x8e, 0xdf, 0x65, 0xd7, 0x33, + 0x76, 0x15, 0x5c, 0x14, 0x09, 0x44, 0xe1, 0x03, 0xc8, 0xb0, 0x45, 0xd1, 0xfe, 0xd5, 0xb4, 0x6c, + 0x0d, 0x11, 0x88, 0x74, 0x6c, 0x0d, 0x8a, 0xa6, 0x87, 0x22, 0x1d, 0x2d, 0xb1, 0x26, 0xa9, 0x5f, + 0x80, 0x12, 0x16, 0xc8, 0x2d, 0x7f, 0x1b, 0xb2, 0x8c, 0x67, 0x10, 0xcb, 0x95, 0x84, 0x9b, 0xaa, + 0x7a, 0x04, 0x79, 0xde, 0xd9, 0x20, 0x17, 0x55, 0x72, 0xa1, 0x62, 0x37, 0xd6, 0xa9, 0x1e, 0x65, + 0xc8, 0x8d, 0x0d, 0x0f, 0x39, 0x78, 0xd6, 0x5a, 0xdb, 0x84, 0x32, 0x1f, 0xf2, 0xed, 0xb3, 0xa1, + 0xed, 0x9c, 0x93, 0x29, 0x89, 0x9a, 0x64, 0xcc, 0x3a, 0x03, 0x51, 0xfb, 0x13, 0x2c, 0xdc, 0x81, + 0x6a, 0x3c, 0xe8, 0xa6, 0xb2, 0x99, 0xa0, 0x0a, 0xe4, 0xd9, 0x08, 0x8b, 0x55, 0xda, 0xbf, 0x53, + 0xff, 0x25, 0xc0, 0x7a, 0x5c, 0x06, 0x37, 0x39, 0x41, 0x48, 0x62, 0x6b, 0x51, 0xac, 0xa5, 0x63, + 0xf8, 0x4a, 0xc9, 0xf8, 0xa6, 0x68, 0xdb, 0xf5, 0x43, 0x28, 0x06, 0xdd, 0x21, 0xd6, 0x2b, 0x48, + 0xd3, 0x5d, 0xbb, 0x99, 0xd4, 0x1f, 0x62, 0xcd, 0x82, 0x1a, 0x64, 0xa8, 0xe2, 0xec, 0xca, 0x9f, + 0x8f, 0xb5, 0x15, 0xc2, 0x80, 0xaf, 0x41, 0x91, 0x05, 0x58, 0x60, 0x79, 0x96, 0xe2, 0xf8, 0x2b, + 0xd8, 0x38, 0x44, 0x98, 0x2e, 0xe9, 0x60, 0x72, 0xf2, 0xb9, 0xde, 0x0d, 0x48, 0x86, 0xbd, 0x26, + 0x06, 0x5e, 0x23, 0x06, 0xf8, 0xd8, 0x18, 0x8d, 0xd9, 0x15, 0x78, 0xb6, 0x8b, 0x52, 0x3c, 0xf8, + 0xaa, 0xf3, 0x12, 0x38, 0x8e, 0xeb, 0x50, 0xe2, 0xfc, 0xf8, 0xcc, 0x2c, 0x26, 0x43, 0xd1, 0xce, + 0x0e, 0x91, 0xaf, 0x60, 0x9d, 0xf0, 0xe1, 0x8e, 0x30, 0x5d, 0xcf, 0x0a, 0x1d, 0x3b, 0x89, 0x5c, + 0xa6, 0xb9, 0x87, 0xf5, 0x78, 0xbf, 0x17, 0x98, 0xad, 0x11, 0x0e, 0x5c, 0x91, 0xce, 0xb4, 0xe9, + 0xe2, 0xd1, 0x89, 0x20, 0x94, 0x3f, 0x8a, 0xe0, 0xb9, 0x80, 0x7a, 0x37, 0x3c, 0x48, 0xae, 0xf0, + 0x61, 0xa0, 0x03, 0x95, 0xa8, 0x11, 0x5b, 0x5d, 0x28, 0x44, 0x16, 0x3f, 0x80, 0x3c, 0xcf, 0x2e, + 0xa1, 0x82, 0x24, 0xda, 0x1d, 0x68, 0x98, 0xd3, 0x6a, 0xa4, 0x04, 0x19, 0xa6, 0x1e, 0x7f, 0x10, + 0xea, 0xc0, 0x9b, 0x4d, 0x77, 0x34, 0x9e, 0x60, 0x74, 0x32, 0x1e, 0xda, 0xf8, 0xd8, 0xb5, 0x1d, + 0xec, 0xef, 0x5f, 0x9f, 0xd8, 0xcf, 0xd1, 0x92, 0x8b, 0x2a, 0xed, 0x13, 0xb1, 0x17, 0x1d, 0xda, + 0xf1, 0x57, 0x7f, 0x2f, 0xc2, 0xce, 0x62, 0x5e, 0x2f, 0x9b, 0xe3, 0xde, 0x87, 0x8c, 0x4f, 0x9f, + 0x5e, 0x6e, 0xf7, 0xb2, 0x43, 0x32, 0x35, 0x55, 0x6c, 0x4c, 0xa5, 0xf3, 0x1b, 0x42, 0x0f, 0x72, + 0x43, 0x97, 0x35, 0x42, 0x83, 0xce, 0xf3, 0x97, 0x31, 0xb1, 0x37, 0xeb, 0xbd, 0x4b, 0x67, 0xba, + 0x9c, 0xc7, 0xd6, 0x47, 0x50, 0x8c, 0x0c, 0x90, 0xa8, 0x0e, 0x04, 0x70, 0x84, 0x28, 0xd0, 0x63, + 0x64, 0xb0, 0x86, 0xba, 0xa2, 0x7e, 0x0d, 0x95, 0xa4, 0xf7, 0xa4, 0xe8, 0x33, 0xe1, 0x83, 0xc8, + 0x33, 0xe1, 0x1b, 0x8b, 0x5f, 0xa3, 0x88, 0x2f, 0xd5, 0x3f, 0x09, 0x90, 0x9b, 0x76, 0xdf, 0x62, + 0x9c, 0x12, 0xee, 0x4d, 0x39, 0x12, 0xe5, 0x09, 0xcf, 0x5e, 0x39, 0xe5, 0x21, 0x94, 0x59, 0x97, + 0x6f, 0x42, 0x6f, 0xfc, 0xfa, 0xc8, 0xb5, 0x10, 0x6f, 0x32, 0xde, 0x9d, 0xef, 0xf5, 0xb1, 0xb6, + 0xc0, 0x91, 0x6b, 0xa1, 0xd9, 0x6b, 0x18, 0xd5, 0x3a, 0x9d, 0x50, 0x20, 0x50, 0x0a, 0xaa, 0x2d, + 0x0e, 0x1e, 0x2c, 0xe8, 0x50, 0x10, 0x5c, 0x1b, 0xb0, 0x3a, 0x32, 0x6c, 0x47, 0x9f, 0x8b, 0xb0, + 0xe8, 0x43, 0x9b, 0x98, 0x50, 0x23, 0xcf, 0x4c, 0xdf, 0x24, 0xfa, 0x9b, 0xc3, 0x89, 0x85, 0xf4, + 0x33, 0xc3, 0x47, 0xba, 0x65, 0x60, 0x83, 0x66, 0x92, 0x95, 0x59, 0x7f, 0x9f, 0x4b, 0xe5, 0x05, + 0xd0, 0x57, 0x20, 0xb7, 0x3c, 0x77, 0x7c, 0x3b, 0x55, 0x94, 0x40, 0x95, 0xd9, 0xf1, 0xa6, 0x56, + 0xa0, 0x1c, 0x62, 0x30, 0xe5, 0x7a, 0xf7, 0x84, 0x9c, 0x0d, 0x24, 0x2c, 0x86, 0xda, 0xec, 0xdc, + 0x5f, 0xd2, 0xee, 0x98, 0xde, 0xf4, 0xd5, 0x87, 0xf0, 0xfa, 0x02, 0x06, 0xb3, 0x6c, 0x17, 0xab, + 0x27, 0x58, 0x53, 0x72, 0x0f, 0xaa, 0x4d, 0x77, 0x34, 0xb2, 0x71, 0x82, 0xd4, 0x45, 0x34, 0xdb, + 0xb0, 0x99, 0x40, 0xc3, 0x4d, 0xf9, 0x08, 0x36, 0x1a, 0x67, 0xae, 0xf7, 0x22, 0xfc, 0xb6, 0xa0, + 0x3a, 0x4f, 0xc2, 0xd8, 0xd5, 0x3f, 0x81, 0x52, 0xec, 0x99, 0x3b, 0x0f, 0xd9, 0x4e, 0x4f, 0x6b, + 0x1f, 0xb6, 0x07, 0xb2, 0xa0, 0x00, 0x64, 0x4e, 0xb4, 0x41, 0xa7, 0x77, 0x28, 0x8b, 0xe4, 0x7b, + 0xbf, 0xd3, 0x6b, 0x0c, 0x1e, 0xcb, 0x52, 0xfd, 0x5e, 0xf8, 0x61, 0x9d, 0x75, 0xc3, 0x15, 0x05, + 0x4a, 0x8d, 0x53, 0xad, 0xaf, 0x77, 0x7a, 0xcd, 0x41, 0xfb, 0xa8, 0xdd, 0xd3, 0x64, 0xa1, 0xbe, + 0x0b, 0xab, 0xf1, 0x37, 0xe7, 0x15, 0x48, 0xf5, 0xfa, 0xbd, 0xb6, 0x2c, 0x90, 0xaf, 0x66, 0xbb, + 0xdb, 0x95, 0x45, 0x25, 0x0b, 0xd2, 0xa0, 0xff, 0x8d, 0x2c, 0xd5, 0xbf, 0x86, 0x7c, 0xb8, 0x5f, + 0x0e, 0x90, 0x69, 0x34, 0xb5, 0xce, 0x2f, 0xc8, 0xea, 0x02, 0xac, 0x74, 0x7a, 0xfc, 0x4f, 0x24, + 0x5a, 0x76, 0xfb, 0x8d, 0x16, 0xd1, 0x8c, 0x9c, 0x4a, 0xb9, 0xd3, 0x5e, 0xf0, 0x9b, 0x22, 0x2b, + 0x4f, 0x8f, 0x5b, 0x0d, 0x8d, 0xfc, 0xa5, 0xeb, 0x47, 0xb0, 0xb1, 0xe8, 0xf5, 0x16, 0x20, 0xd3, + 0x39, 0xec, 0xf5, 0x07, 0x6d, 0xf9, 0x35, 0x45, 0x86, 0x42, 0xfb, 0xdb, 0xe3, 0x76, 0x53, 0xd3, + 0xdb, 0xdf, 0x76, 0x4e, 0x34, 0x59, 0x50, 0xee, 0x80, 0xcc, 0x47, 0x7a, 0xfd, 0x60, 0x54, 0xac, + 0x7f, 0x0e, 0x10, 0xea, 0xe9, 0xe6, 0x21, 0x3b, 0x20, 0xf3, 0x3d, 0xc2, 0x22, 0x07, 0xe9, 0x81, + 0xa6, 0x1f, 0x3f, 0x92, 0x05, 0xa5, 0x02, 0xab, 0x03, 0x4d, 0x6f, 0x1c, 0x68, 0xed, 0x81, 0x7e, + 0xd4, 0x6f, 0x75, 0x0e, 0x1e, 0xcb, 0x62, 0xfd, 0x43, 0x28, 0x46, 0x6f, 0xa0, 0x59, 0x90, 0x8e, + 0x4f, 0x35, 0x06, 0x33, 0xd5, 0xb8, 0xcd, 0x60, 0x6e, 0xb5, 0xbb, 0x6d, 0xad, 0x4d, 0x61, 0xce, + 0xcd, 0x0a, 0xf9, 0x3c, 0x64, 0x0f, 0xfa, 0x83, 0x6f, 0x1a, 0x83, 0x96, 0xfc, 0x1a, 0xb1, 0x71, + 0xbf, 0xd1, 0x7c, 0x44, 0xff, 0x84, 0xfa, 0xa7, 0xc1, 0xd1, 0xc3, 0x71, 0xab, 0xc0, 0xea, 0x89, + 0x36, 0x68, 0x37, 0x8e, 0xf4, 0x76, 0xaf, 0xb1, 0xdf, 0x25, 0x40, 0x08, 0x4a, 0x19, 0x8a, 0x7c, + 0x30, 0x40, 0x91, 0x18, 0x13, 0x3a, 0x82, 0xf2, 0x90, 0x3d, 0x3e, 0xd5, 0x74, 0xe2, 0x09, 0x41, + 0x29, 0x01, 0x30, 0x95, 0xe8, 0xbf, 0x48, 0xfe, 0x99, 0x5a, 0x3a, 0xf3, 0x94, 0x09, 0xe5, 0xb9, + 0xc4, 0xa7, 0xac, 0x42, 0xbe, 0xd5, 0xd4, 0xf4, 0x59, 0xfc, 0x10, 0xaa, 0xa6, 0xa6, 0xb7, 0xfa, + 0xa7, 0xfb, 0x5d, 0x62, 0x1c, 0x5f, 0xb0, 0xdf, 0xef, 0x77, 0xdb, 0x8d, 0x9e, 0x2c, 0x05, 0x0b, + 0x78, 0x90, 0x51, 0xdf, 0xd1, 0x05, 0xdd, 0xfe, 0xbe, 0x9c, 0xad, 0x7f, 0x01, 0xab, 0xf1, 0xcc, + 0x56, 0x81, 0xd5, 0xce, 0xe9, 0x91, 0xde, 0x38, 0x79, 0xdc, 0x6b, 0xea, 0x9d, 0x5e, 0xab, 0xfd, + 0xad, 0xfc, 0x1a, 0x09, 0x3d, 0x32, 0x18, 0x1a, 0x13, 0xea, 0x1f, 0xf3, 0x1c, 0x4c, 0x15, 0x23, + 0x54, 0x9a, 0x7e, 0xd8, 0xed, 0xef, 0x37, 0xba, 0x11, 0x2a, 0x4d, 0xef, 0xf6, 0x9b, 0xd3, 0x31, + 0xe1, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9a, 0x12, 0x13, 0xc3, 0x7a, 0x22, 0x00, 0x00, +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.proto b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.proto new file mode 100644 index 00000000..a2fab70a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol/table_store.proto @@ -0,0 +1,616 @@ +syntax = "proto2"; + +package otsprotocol; + +message Error { + required string code = 1; + optional string message = 2; +} + +enum PrimaryKeyType { + INTEGER = 1; + STRING = 2; + BINARY = 3; +} + +enum PrimaryKeyOption { + AUTO_INCREMENT = 1; +} + +message PrimaryKeySchema { + required string name = 1; + required PrimaryKeyType type = 2; + optional PrimaryKeyOption option = 3; +} + +message PartitionRange { + required bytes begin = 1; // encoded as SQLVariant + required bytes end = 2; // encoded as SQLVariant +} + +enum BloomFilterType { + NONE = 1; + CELL = 2; + ROW = 3; +} + +message TableOptions { + optional int32 time_to_live = 1; // 可以动态更改 + optional int32 max_versions = 2; // 可以动态更改 + optional BloomFilterType bloom_filter_type = 3; // 可以动态更改 + optional int32 block_size = 4; // 可以动态更改 + optional int64 deviation_cell_version_in_sec = 5; // 可以动态修改 +} + +message TableMeta { + required string table_name = 1; + repeated PrimaryKeySchema primary_key = 2; + repeated DefinedColumnSchema defined_column = 3; + repeated IndexMeta index_meta = 4; +} + +/** + * 表的状态变更只与用户的操作对应,内部的机器failover等状况不对应表的状态变更。 + * 有三个考虑: + * 一是一般场景下用户只会在做了对表的修改操作后才会去检查表的状态; + * 二是内部机器failover导致访问异常到用户能够查看到表的状态变更这两个时刻之间会有一段延迟,无法将表的不可服务状态与用户查看到的表的状态完全匹配上。 + * 三是内部机器failover后不能说是表的整个状态变更,而应该是partition的状态变更,对应表的状态就是PARTIAL_FAILOVER,这个partial的粒度无法体现,会让用户更加困惑。 + */ +enum TableStatus { + ACTIVE = 1; // 表处于可服务状态。 + INACTIVE = 2; // 用户通过UnloadTable将表禁用。 + LOADING = 3; // 表正在被创建,partition还未全部加载完毕;或者表刚从INACTIVE状态被Enable。 + UNLOADING = 4; // 表正在被删除(从delete table到partition完全unload的这段期间)或者表从ACTIVE状态被Unload。 + UPDATING = 5; // 表正在被更新(table属性变更、预留吞吐量变更)。 +} + +enum RowExistenceExpectation { + IGNORE = 0; + EXPECT_EXIST = 1; + EXPECT_NOT_EXIST = 2; +} + +message Condition { + required RowExistenceExpectation row_existence = 1; + optional bytes column_condition = 2; +} + +message CapacityUnit { + optional int32 read = 1; + optional int32 write = 2; +} + +message ReservedThroughputDetails { + required CapacityUnit capacity_unit = 1; // 表当前的预留吞吐量的值。 + required int64 last_increase_time = 2; // 最后一次上调预留吞吐量的时间。 + optional int64 last_decrease_time = 3; // 最后一次下调预留吞吐量的时间。 +} + +message ReservedThroughput { + required CapacityUnit capacity_unit = 1; +} + +message ConsumedCapacity { + required CapacityUnit capacity_unit = 1; +} + +message StreamSpecification { + required bool enable_stream = 1; + optional int32 expiration_time = 2; +} + +message StreamDetails { + required bool enable_stream = 1; + optional string stream_id = 2; + optional int32 expiration_time = 3; + optional int64 last_enable_time = 4; +} + +/* ############################################# CreateTable ############################################# */ +/** + * table_meta用于存储表中不可更改的schema属性,可以更改的ReservedThroughput和TableOptions独立出来,作为UpdateTable的参数。 + * 加入GlobalIndex和LocalIndex之后,结构会变为: + * message CreateTableRequest { + * required TableMeta table_meta = 1; + * required ReservedThroughput reserved_throughput = 2; + * required TableOptions table_options = 3; + * repeated LocalIndex local_indexes = 4; // LocalIndex不再单独包含ReservedThroughput和TableOptions,其与主表共享配置。 + * repeated GlobalIndex global_indexes = 5; // GlobalIndex内单独包含ReservedThroughput和TableOptions + * } + */ +message CreateTableRequest { + required TableMeta table_meta = 1; + required ReservedThroughput reserved_throughput = 2; // 未放在TableOptions内,原因是UpdateTableResponse中会返回ReservedThroughputDetails,而TableOptions没有类似的返回结构。 + optional TableOptions table_options = 3; + repeated PartitionRange partitions = 4; + optional StreamSpecification stream_spec = 5; + repeated IndexMeta index_metas = 7; +} + +message CreateTableResponse { +} + +/* ######################################################################################################### */ + + +/* ############################################# UpdateTable ############################################# */ +message UpdateTableRequest { + required string table_name = 1; + optional ReservedThroughput reserved_throughput = 2; + optional TableOptions table_options = 3; + optional StreamSpecification stream_spec = 4; +} + +message UpdateTableResponse { + required ReservedThroughputDetails reserved_throughput_details = 1; + required TableOptions table_options = 2; + optional StreamDetails stream_details = 3; +} +/* ######################################################################################################### */ + +/* ############################################# DescribeTable ############################################# */ +message DescribeTableRequest { + required string table_name = 1; +} + +message DescribeTableResponse { + required TableMeta table_meta = 1; + required ReservedThroughputDetails reserved_throughput_details = 2; + required TableOptions table_options = 3; + required TableStatus table_status = 4; + optional StreamDetails stream_details = 5; + repeated bytes shard_splits = 6; + repeated IndexMeta index_metas = 8; +} +/* ########################################################################################################### */ + +/* ############################################# ListTable ############################################# */ +message ListTableRequest { +} + +/** + * 当前只返回一个简单的名称列表,需要讨论是否有业务场景需要获取除了表名之外的其他信息。 + * 其他信息可以包含预留吞吐量以及表的状态,这个信息只能是一个粗略的信息,表的详细信息还是需要通过DescribeTable来获取。 + */ +message ListTableResponse { + repeated string table_names = 1; +} +/* ####################################################################################################### */ + +/* ############################################# DeleteTable ############################################# */ +message DeleteTableRequest { + required string table_name = 1; +} + +message DeleteTableResponse { +} +/* ######################################################################################################### */ + +/* ############################################# LoadTable ############################################# */ +message LoadTableRequest { + required string table_name = 1; +} + +message LoadTableResponse { +} +/* ######################################################################################################### */ + +/* ############################################# UnloadTable ############################################# */ +message UnloadTableRequest { + required string table_name = 1; +} + +message UnloadTableResponse { + +} +/* ########################################################################################################## */ + +/** + * 时间戳的取值最小值为0,最大值为INT64.MAX + * 1. 若要查询一个范围,则指定start_time和end_time + * 2. 若要查询一个特定时间戳,则指定specific_time + */ +message TimeRange { + optional int64 start_time = 1; + optional int64 end_time = 2; + optional int64 specific_time = 3; +} + +/* ############################################# GetRow ############################################# */ + +enum ReturnType { + RT_NONE = 0; + RT_PK = 1; + RT_AFTER_MODIFY = 2; +} + +message ReturnContent { + optional ReturnType return_type = 1; + repeated string return_column_names = 2; +} + +/** + * 1. 支持用户指定版本时间戳范围或者特定的版本时间来读取指定版本的列 + * 2. 目前暂不支持行内的断点 + */ +message GetRowRequest { + required string table_name = 1; + required bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key + repeated string columns_to_get = 3; // 不指定则读出所有的列 + optional TimeRange time_range = 4; + optional int32 max_versions = 5; + optional bool cache_blocks = 6 [default = true]; // 本次读出的数据是否进入BlockCache + optional bytes filter = 7; + optional string start_column = 8; + optional string end_column = 9; + optional bytes token = 10; + optional string transaction_id = 11; +} + +message GetRowResponse { + required ConsumedCapacity consumed = 1; + required bytes row = 2; // encoded as InplaceRowChangeSet + optional bytes next_token = 3; +} +/* #################################################################################################### */ + +/* ############################################# UpdateRow ############################################# */ +message UpdateRowRequest { + required string table_name = 1; + required bytes row_change = 2; + required Condition condition = 3; + optional ReturnContent return_content = 4; + optional string transaction_id = 5; +} + +message UpdateRowResponse { + required ConsumedCapacity consumed = 1; + optional bytes row = 2; +} + +/* ####################################################################################################### */ + +/* ############################################# PutRow ############################################# */ + + +/** + * 这里允许用户为每列单独设置timestamp,而不是强制整行统一一个timestamp。 + * 原因是列都是用统一的结构,该结构本身是带timestamp的,其次强制统一timestamp增强了规范性但是丧失了灵活性,且该规范性没有明显的好处,反而带来了结构的复杂。 + */ +message PutRowRequest { + required string table_name = 1; + required bytes row = 2; // encoded as InplaceRowChangeSet + required Condition condition = 3; + optional ReturnContent return_content = 4; + optional string transaction_id = 5; +} + +message PutRowResponse { + required ConsumedCapacity consumed = 1; + optional bytes row = 2; +} +/* #################################################################################################### */ + +/* ############################################# DeleteRow ############################################# */ +/** + * OTS只支持删除该行的所有列所有版本,不支持: + * 1. 删除所有列的所有小于等于某个版本的所有版本 + */ +message DeleteRowRequest { + required string table_name = 1; + required bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key + required Condition condition = 3; + optional ReturnContent return_content = 4; + optional string transaction_id = 5; +} + +message DeleteRowResponse { + required ConsumedCapacity consumed = 1; + optional bytes row = 2; +} +/* ####################################################################################################### */ + +/* ############################################# BatchGetRow ############################################# */ +/** + * HBase支持Batch操作的每行都拥有不同的查询参数,OTS不支持。 + */ +message TableInBatchGetRowRequest { + required string table_name = 1; + repeated bytes primary_key = 2; // encoded as InplaceRowChangeSet, but only has primary key + repeated bytes token = 3; + repeated string columns_to_get = 4; // 不指定则读出所有的列 + optional TimeRange time_range = 5; + optional int32 max_versions = 6; + optional bool cache_blocks = 7 [default = true]; // 本次读出的数据是否进入BlockCache + optional bytes filter = 8; + optional string start_column = 9; + optional string end_column = 10; +} + +message BatchGetRowRequest { + repeated TableInBatchGetRowRequest tables = 1; +} + +message RowInBatchGetRowResponse { + required bool is_ok = 1; + optional Error error = 2; + optional ConsumedCapacity consumed = 3; + optional bytes row = 4; // encoded as InplaceRowChangeSet + optional bytes next_token = 5; +} + +message TableInBatchGetRowResponse { + required string table_name = 1; + repeated RowInBatchGetRowResponse rows = 2; +} + +message BatchGetRowResponse { + repeated TableInBatchGetRowResponse tables = 1; +} +/* ######################################################################################################### */ + +/* ############################################# BatchWriteRow ############################################# */ + +enum OperationType { + PUT = 1; + UPDATE = 2; + DELETE = 3; +} + +message RowInBatchWriteRowRequest { + required OperationType type = 1; + required bytes row_change = 2; // encoded as InplaceRowChangeSet + required Condition condition = 3; + optional ReturnContent return_content = 4; +} + +message TableInBatchWriteRowRequest { + required string table_name = 1; + repeated RowInBatchWriteRowRequest rows = 2; +} + +message BatchWriteRowRequest { + repeated TableInBatchWriteRowRequest tables = 1; + optional string transaction_id = 2; +} + +message RowInBatchWriteRowResponse { + required bool is_ok = 1; + optional Error error = 2; + optional ConsumedCapacity consumed = 3; + optional bytes row = 4; +} + +message TableInBatchWriteRowResponse { + required string table_name = 1; + repeated RowInBatchWriteRowResponse rows = 2; +} + +message BatchWriteRowResponse { + repeated TableInBatchWriteRowResponse tables = 1; +} +/* ########################################################################################################### */ + +/* ############################################# GetRange ############################################# */ +enum Direction { + FORWARD = 0; + BACKWARD = 1; +} + +/** + * HBase支持以下参数: + * 1. TimeRange或指定time + * 2. Filter(根据列值或列名来过滤) + * 我们只支持给同版本的选择条件。 + */ +message GetRangeRequest { + required string table_name = 1; + required Direction direction = 2; + repeated string columns_to_get = 3; // 不指定则读出所有的列 + optional TimeRange time_range = 4; + optional int32 max_versions = 5; + optional int32 limit = 6; + required bytes inclusive_start_primary_key = 7; // encoded as InplaceRowChangeSet, but only has primary key + required bytes exclusive_end_primary_key = 8; // encoded as InplaceRowChangeSet, but only has primary key + optional bool cache_blocks = 9 [default = true]; // 本次读出的数据是否进入BlockCache + optional bytes filter = 10; + optional string start_column = 11; + optional string end_column = 12; + optional bytes token = 13; + optional string transaction_id = 14; +} + +message GetRangeResponse { + required ConsumedCapacity consumed = 1; + required bytes rows = 2; // encoded as InplaceRowChangeSet + optional bytes next_start_primary_key = 3; // 若为空,则代表数据全部读取完毕. encoded as InplaceRowChangeSet, but only has primary key + optional bytes next_token = 4; +} +/* ###################################################################################################### */ +/* ############################################# Stream ############################################# */ + +message ListStreamRequest { + optional string table_name = 1; +} + +message Stream { + required string stream_id = 1; + required string table_name = 2; + required int64 creation_time = 3; +} + +message ListStreamResponse { + repeated Stream streams = 1; +} + +message StreamShard { + required string shard_id = 1; + optional string parent_id = 2; + optional string parent_sibling_id = 3; +} + +enum StreamStatus { + STREAM_ENABLING = 1; + STREAM_ACTIVE = 2; +} + +message DescribeStreamRequest { + required string stream_id = 1; + optional string inclusive_start_shard_id = 2; + optional int32 shard_limit = 3; +} + +message DescribeStreamResponse { + required string stream_id = 1; + required int32 expiration_time = 2; + required string table_name = 3; + required int64 creation_time = 4; + required StreamStatus stream_status = 5; + repeated StreamShard shards = 6; + optional string next_shard_id = 7; +} + +message GetShardIteratorRequest { + required string stream_id = 1; + required string shard_id = 2; + optional int64 timestamp = 3; + optional string token = 4; +} + +message GetShardIteratorResponse { + required string shard_iterator = 1; + optional string next_token = 2; +} + +message GetStreamRecordRequest { + required string shard_iterator = 1; + optional int32 limit = 2; +} + +enum ActionType { + PUT_ROW = 1; + UPDATE_ROW = 2; + DELETE_ROW = 3; +} + +message GetStreamRecordResponse { + message StreamRecord { + required ActionType action_type = 1; + required bytes record = 2; + } + repeated StreamRecord stream_records = 1; + optional string next_shard_iterator = 2; +} + +/* +++++ ComputeSplitPointsBySize +++++ */ +message ComputeSplitPointsBySizeRequest { + required string table_name = 1; + required int64 split_size = 2; // in 100MB +} + +message ComputeSplitPointsBySizeResponse { + required ConsumedCapacity consumed = 1; + repeated PrimaryKeySchema schema = 2; + + /** + * Split points between splits, in the increasing order + * + * A split is a consecutive range of primary keys, + * whose data size is about split_size specified in the request. + * The size could be hard to be precise. + * + * A split point is an array of primary-key column w.r.t. table schema, + * which is never longer than that of table schema. + * Tailing -inf will be omitted to reduce transmission payloads. + */ + repeated bytes split_points = 3; + + /** + * Locations where splits lies in. + * + * By the managed nature of TableStore, these locations are no more than hints. + * If a location is not suitable to be seen, an empty string will be placed. + */ + message SplitLocation { + required string location = 1; + required sint64 repeat = 2; + } + repeated SplitLocation locations = 4; +} +/* -------------------------------------- */ + +enum DefinedColumnType { + DCT_INTEGER = 1; + DCT_DOUBLE = 2; + DCT_BOOLEAN = 3; + DCT_STRING = 4; + // field 5 is reserved for date type, not supported yet + // field 6 is reserved for decimal type, not supported yet + DCT_BLOB = 7; +} + +message DefinedColumnSchema { + required string name = 1; + required DefinedColumnType type = 2; +} + +enum IndexUpdateMode { + IUM_ASYNC_INDEX = 0; + IUM_SYNC_INDEX = 1; +} + +enum IndexType { + IT_GLOBAL_INDEX = 0; + IT_LOCAL_INDEX = 1; +} + +message IndexMeta { + required string name = 1; + repeated string primary_key = 2; + repeated string defined_column = 3; + required IndexUpdateMode index_update_mode = 4; + required IndexType index_type = 5; +} + +message CreateIndexRequest { + required string main_table_name = 1; + required IndexMeta index_meta = 2; + optional bool include_base_data = 3; +} + +message CreateIndexResponse { +} + +message DropIndexRequest { + required string main_table_name = 1; + required string index_name = 2; +} + +message DropIndexResponse { +} + +/* ########################################### LocalTransaction ########################################### */ +message StartLocalTransactionRequest { + required string table_name = 1; + required bytes key = 2; // encoded as SQLVariant +} + +message StartLocalTransactionResponse { + required string transaction_id = 1; +}; + +message CommitTransactionRequest { + required string transaction_id = 1; +} + +message CommitTransactionResponse { +}; + +message AbortTransactionRequest { + required string transaction_id = 1; +} + +message AbortTransactionResponse { +}; + +/* ######################################################################################################### */ \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/plain_buffer.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/plain_buffer.go new file mode 100644 index 00000000..681d3b08 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/plain_buffer.go @@ -0,0 +1,473 @@ +package tablestore + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + HEADER = 0x75 + + // tag type + TAG_ROW_PK = 0x1 + TAG_ROW_DATA = 0x2 + TAG_CELL = 0x3 + TAG_CELL_NAME = 0x4 + TAG_CELL_VALUE = 0x5 + TAG_CELL_TYPE = 0x6 + TAG_CELL_TIMESTAMP = 0x7 + TAG_DELETE_ROW_MARKER = 0x8 + TAG_ROW_CHECKSUM = 0x9 + TAG_CELL_CHECKSUM = 0x0A + TAG_EXTENSION = 0x0B + TAG_SEQ_INFO = 0x0C + TAG_SEQ_INFO_EPOCH = 0x0D + TAG_SEQ_INFO_TS = 0x0E + TAG_SEQ_INFO_ROW_INDEX = 0x0F + + // cell op type + DELETE_ALL_VERSION = 0x1 + DELETE_ONE_VERSION = 0x3 + INCREMENT = 0x4; + + // variant type + VT_INTEGER = 0x0 + VT_DOUBLE = 0x1 + VT_BOOLEAN = 0x2 + VT_STRING = 0x3 + + //public final static byte VT_NULL = 0x6; + VT_BLOB = 0x7 + VT_INF_MIN = 0x9 + VT_INF_MAX = 0xa + VT_AUTO_INCREMENT = 0xb + + LITTLE_ENDIAN_32_SIZE = 4 + LITTLE_ENDIAN_64_SIZE = 8 +) + +const spaceSize = 256 + +var crc8Table = make([]byte, spaceSize) + +func init() { + for i := 0; i < spaceSize; i++ { + x := byte(i) + for j := 8; j > 0; j-- { + if (x & 0x80) != 0 { + x = (x << 1) ^ 0x07 + } else { + x = (x << 1) ^ 0 + } + } + crc8Table[i] = x + } +} + +func crc8Byte(crc, in byte) byte { + return crc8Table[(crc^in)&0xff] +} + +func crc8Int32(crc byte, in int32) byte { + for i := 0; i < 4; i++ { + crc = crc8Byte(crc, byte((in & 0xff))) + in >>= 8 + } + + return crc +} + +func crc8Int64(crc byte, in int64) byte { + for i := 0; i < 8; i++ { + crc = crc8Byte(crc, byte((in & 0xff))) + in >>= 8 + } + + return crc +} + +func crc8Bytes(crc byte, in []byte) byte { + for i := 0; i < len(in); i++ { + crc = crc8Byte(crc, in[i]) + } + + return crc +} + +func writeRawByte(w io.Writer, value byte) { + w.Write([]byte{value}) +} + +/*func writeRawByteInt8(w io.Writer, value int) { + w.Write([]byte{byte(value)}) +}*/ + +func writeRawLittleEndian32(w io.Writer, value int32) { + w.Write([]byte{byte((value) & 0xFF)}) + w.Write([]byte{byte((value >> 8) & 0xFF)}) + w.Write([]byte{byte((value >> 16) & 0xFF)}) + w.Write([]byte{byte((value >> 24) & 0xFF)}) +} + +func writeRawLittleEndian64(w io.Writer, value int64) { + w.Write([]byte{byte((value) & 0xFF)}) + w.Write([]byte{byte((value >> 8) & 0xFF)}) + w.Write([]byte{byte((value >> 16) & 0xFF)}) + w.Write([]byte{byte((value >> 24) & 0xFF)}) + w.Write([]byte{byte((value >> 32) & 0xFF)}) + w.Write([]byte{byte((value >> 40) & 0xFF)}) + w.Write([]byte{byte((value >> 48) & 0xFF)}) + w.Write([]byte{byte((value >> 56) & 0xFF)}) +} + +func writeDouble(w io.Writer, value float64) { + writeRawLittleEndian64(w, int64(math.Float64bits(value))) +} + +func writeBoolean(w io.Writer, value bool) { + if value { + w.Write([]byte{byte(1)}) + } else { + w.Write([]byte{byte(0)}) + } +} + +func writeBytes(w io.Writer, value []byte) { + w.Write(value) +} + +func writeHeader(w io.Writer) { + writeRawLittleEndian32(w, HEADER) +} + +func writeTag(w io.Writer, tag byte) { + writeRawByte(w, tag) +} + +func writeCellName(w io.Writer, name []byte) { + writeTag(w, TAG_CELL_NAME) + writeRawLittleEndian32(w, int32(len(name))) + writeBytes(w, name) +} + +type PlainBufferCell struct { + cellName []byte + cellValue *ColumnValue + cellTimestamp int64 + cellType byte + ignoreValue bool + hasCellTimestamp bool + hasCellType bool +} + +func (cell *PlainBufferCell) writeCell(w io.Writer) { + writeTag(w, TAG_CELL) + writeCellName(w, cell.cellName) + if cell.ignoreValue == false { + cell.cellValue.writeCellValue(w) + } + + if cell.hasCellType { + writeTag(w, TAG_CELL_TYPE) + writeRawByte(w, cell.cellType) + } + + if cell.hasCellTimestamp { + writeTag(w, TAG_CELL_TIMESTAMP) + writeRawLittleEndian64(w, cell.cellTimestamp) + } + + writeTag(w, TAG_CELL_CHECKSUM) + writeRawByte(w, cell.getCheckSum(byte(0x0))) +} + +func (cell *PlainBufferCell) getCheckSum(crc byte) byte { + crc = crc8Bytes(crc, cell.cellName) + if cell.ignoreValue == false { + crc = cell.cellValue.getCheckSum(crc) + } + + if cell.hasCellTimestamp { + crc = crc8Int64(crc, cell.cellTimestamp) + } + if cell.hasCellType { + crc = crc8Byte(crc, cell.cellType) + } + return crc +} + +type PlainBufferRow struct { + primaryKey []*PlainBufferCell + cells []*PlainBufferCell + hasDeleteMarker bool + extension *RecordSequenceInfo // optional +} + +func (row *PlainBufferRow) writeRow(w io.Writer) { + /* pk */ + writeTag(w, TAG_ROW_PK) + for _, pk := range row.primaryKey { + pk.writeCell(w) + } + + if len(row.cells) > 0 { + writeTag(w, TAG_ROW_DATA) + for _, cell := range row.cells { + cell.writeCell(w) + } + } + + writeTag(w, TAG_ROW_CHECKSUM) + writeRawByte(w, row.getCheckSum(byte(0x0))) +} + +func (row *PlainBufferRow) writeRowWithHeader(w io.Writer) { + writeHeader(w) + row.writeRow(w) +} + +func (row *PlainBufferRow) getCheckSum(crc byte) byte { + for _, cell := range row.primaryKey { + crcCell := cell.getCheckSum(byte(0x0)) + crc = crc8Byte(crc, crcCell) + } + + for _, cell := range row.cells { + crcCell := cell.getCheckSum(byte(0x0)) + crc = crc8Byte(crc, crcCell) + } + + del := byte(0x0) + if row.hasDeleteMarker { + del = byte(0x1) + } + + crc = crc8Byte(crc, del) + + return crc +} + +func readRawByte(r *bytes.Reader) byte { + if r.Len() == 0 { + panic(errUnexpectIoEnd) + } + + b, _ := r.ReadByte() + + return b +} + +func readTag(r *bytes.Reader) int { + return int(readRawByte(r)) +} + +func readRawLittleEndian64(r *bytes.Reader) int64 { + if r.Len() < 8 { + panic(errUnexpectIoEnd) + } + + var v int64 + binary.Read(r, binary.LittleEndian, &v) + + return v +} + +func readRawLittleEndian32(r *bytes.Reader) int32 { + if r.Len() < 4 { + panic(errUnexpectIoEnd) + } + + var v int32 + binary.Read(r, binary.LittleEndian, &v) + + return v +} + +func readBoolean(r *bytes.Reader) bool { + return readRawByte(r) != 0 +} + +func readBytes(r *bytes.Reader, size int32) []byte { + if int32(r.Len()) < size { + panic(errUnexpectIoEnd) + } + v := make([]byte, size) + r.Read(v) + return v +} + +func readCellValue(r *bytes.Reader) *ColumnValue { + value := new(ColumnValue) + readRawLittleEndian32(r) + tp := readRawByte(r) + switch tp { + case VT_INTEGER: + value.Type = ColumnType_INTEGER + value.Value = readRawLittleEndian64(r) + case VT_DOUBLE: + value.Type = ColumnType_DOUBLE + value.Value = math.Float64frombits(uint64(readRawLittleEndian64(r))) + case VT_BOOLEAN: + value.Type = ColumnType_BOOLEAN + value.Value = readBoolean(r) + case VT_STRING: + value.Type = ColumnType_STRING + value.Value = string(readBytes(r, readRawLittleEndian32(r))) + case VT_BLOB: + value.Type = ColumnType_BINARY + value.Value = []byte(readBytes(r, readRawLittleEndian32(r))) + } + return value +} + +func readCell(r *bytes.Reader) *PlainBufferCell { + cell := new(PlainBufferCell) + tag := readTag(r) + if tag != TAG_CELL_NAME { + panic(errTag) + } + + cell.cellName = readBytes(r, readRawLittleEndian32(r)) + tag = readTag(r) + + if tag == TAG_CELL_VALUE { + cell.cellValue = readCellValue(r) + tag = readTag(r) + } + if tag == TAG_CELL_TYPE { + readRawByte(r) + tag = readTag(r) + } + + if tag == TAG_CELL_TIMESTAMP { + cell.cellTimestamp = readRawLittleEndian64(r) + tag = readTag(r) + } + + if tag == TAG_CELL_CHECKSUM { + readRawByte(r) + } else { + panic(errNoChecksum) + } + + return cell +} + +func readRowPk(r *bytes.Reader) []*PlainBufferCell { + primaryKeyColumns := make([]*PlainBufferCell, 0, 4) + + tag := readTag(r) + for tag == TAG_CELL { + primaryKeyColumns = append(primaryKeyColumns, readCell(r)) + tag = readTag(r) + } + + r.Seek(-1, 1) + + return primaryKeyColumns +} + +func readRowData(r *bytes.Reader) []*PlainBufferCell { + columns := make([]*PlainBufferCell, 0, 10) + + tag := readTag(r) + for tag == TAG_CELL { + columns = append(columns, readCell(r)) + tag = readTag(r) + } + + r.Seek(-1, 1) + + return columns +} + +func readRow(r *bytes.Reader) *PlainBufferRow { + row := new(PlainBufferRow) + tag := readTag(r) + if tag == TAG_ROW_PK { + row.primaryKey = readRowPk(r) + tag = readTag(r) + } + + if tag == TAG_ROW_DATA { + row.cells = readRowData(r) + tag = readTag(r) + } + + if tag == TAG_DELETE_ROW_MARKER { + row.hasDeleteMarker = true + tag = readTag(r) + } + + if tag == TAG_EXTENSION { + row.extension = readRowExtension(r) + tag = readTag(r) + } + + if tag == TAG_ROW_CHECKSUM { + readRawByte(r) + } else { + panic(errNoChecksum) + } + return row +} + +func readRowsWithHeader(r *bytes.Reader) (rows []*PlainBufferRow, err error) { + defer func() { + if err2 := recover(); err2 != nil { + if _, ok := err2.(error); ok { + err = err2.(error) + } + return + } + }() + + // TODO: panic + if readRawLittleEndian32(r) != HEADER { + return nil, fmt.Errorf("Invalid header from plain buffer") + } + + rows = make([]*PlainBufferRow, 0, 10) + + for r.Len() > 0 { + rows = append(rows, readRow(r)) + } + + return rows, nil +} + +func readRowExtension(r *bytes.Reader) *RecordSequenceInfo { + readRawLittleEndian32(r) // useless + tag := readTag(r) + if tag != TAG_SEQ_INFO { + panic(errTag) + } + + readRawLittleEndian32(r) // useless + tag = readTag(r) + if tag != TAG_SEQ_INFO_EPOCH { + panic(errTag) + } + epoch := readRawLittleEndian32(r) + + tag = readTag(r) + if tag != TAG_SEQ_INFO_TS { + panic(errTag) + } + ts := readRawLittleEndian64(r) + + tag = readTag(r) + if tag != TAG_SEQ_INFO_ROW_INDEX { + panic(errTag) + } + rowIndex := readRawLittleEndian32(r) + + ext := RecordSequenceInfo{} + ext.Epoch = epoch + ext.Timestamp = ts + ext.RowIndex = rowIndex + return &ext +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/collapse.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/collapse.go new file mode 100644 index 00000000..60b19cba --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/collapse.go @@ -0,0 +1,14 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type Collapse struct { + FieldName string +} + +func (c *Collapse) ProtoBuffer() (*otsprotocol.Collapse, error) { + pb := &otsprotocol.Collapse{ + FieldName: &c.FieldName, + } + return pb, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query.go new file mode 100644 index 00000000..3612c2c2 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query.go @@ -0,0 +1,88 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type QueryType int + +const ( + QueryType_None QueryType = 0 + QueryType_MatchQuery QueryType = 1 + QueryType_MatchPhraseQuery QueryType = 2 + QueryType_TermQuery QueryType = 3 + QueryType_RangeQuery QueryType = 4 + QueryType_PrefixQuery QueryType = 5 + QueryType_BoolQuery QueryType = 6 + QueryType_ConstScoreQuery QueryType = 7 + QueryType_FunctionScoreQuery QueryType = 8 + QueryType_NestedQuery QueryType = 9 + QueryType_WildcardQuery QueryType = 10 + QueryType_MatchAllQuery QueryType = 11 + QueryType_GeoBoundingBoxQuery QueryType = 12 + QueryType_GeoDistanceQuery QueryType = 13 + QueryType_GeoPolygonQuery QueryType = 14 + QueryType_TermsQuery QueryType = 15 + QueryType_ExistsQuery QueryType = 16 +) + +func (q QueryType) Enum() *QueryType { + newQuery := q + return &newQuery +} + +func (q QueryType) ToPB() *otsprotocol.QueryType { + switch q { + case QueryType_None: + return nil + case QueryType_MatchQuery: + return otsprotocol.QueryType_MATCH_QUERY.Enum() + case QueryType_MatchPhraseQuery: + return otsprotocol.QueryType_MATCH_PHRASE_QUERY.Enum() + case QueryType_TermQuery: + return otsprotocol.QueryType_TERM_QUERY.Enum() + case QueryType_RangeQuery: + return otsprotocol.QueryType_RANGE_QUERY.Enum() + case QueryType_PrefixQuery: + return otsprotocol.QueryType_PREFIX_QUERY.Enum() + case QueryType_BoolQuery: + return otsprotocol.QueryType_BOOL_QUERY.Enum() + case QueryType_ConstScoreQuery: + return otsprotocol.QueryType_CONST_SCORE_QUERY.Enum() + case QueryType_FunctionScoreQuery: + return otsprotocol.QueryType_FUNCTION_SCORE_QUERY.Enum() + case QueryType_NestedQuery: + return otsprotocol.QueryType_NESTED_QUERY.Enum() + case QueryType_WildcardQuery: + return otsprotocol.QueryType_WILDCARD_QUERY.Enum() + case QueryType_MatchAllQuery: + return otsprotocol.QueryType_MATCH_ALL_QUERY.Enum() + case QueryType_GeoBoundingBoxQuery: + return otsprotocol.QueryType_GEO_BOUNDING_BOX_QUERY.Enum() + case QueryType_GeoDistanceQuery: + return otsprotocol.QueryType_GEO_DISTANCE_QUERY.Enum() + case QueryType_GeoPolygonQuery: + return otsprotocol.QueryType_GEO_POLYGON_QUERY.Enum() + case QueryType_TermsQuery: + return otsprotocol.QueryType_TERMS_QUERY.Enum() + case QueryType_ExistsQuery: + return otsprotocol.QueryType_EXISTS_QUERY.Enum() + default: + panic("unexpected") + } +} + +type Query interface { + Type() QueryType + Serialize() ([]byte, error) + ProtoBuffer() (*otsprotocol.Query, error) +} + +func BuildPBForQuery(q Query) (*otsprotocol.Query, error) { + query := &otsprotocol.Query{} + query.Type = q.Type().ToPB() + data, err := q.Serialize() + if err != nil { + return nil, err + } + query.Query = data + return query, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_bool.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_bool.go new file mode 100644 index 00000000..230d2e00 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_bool.go @@ -0,0 +1,75 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type BoolQuery struct { + MustQueries []Query + MustNotQueries []Query + FilterQueries []Query + ShouldQueries []Query + MinimumShouldMatch *int32 +} + +func (q *BoolQuery) Type() QueryType { + return QueryType_BoolQuery +} + +func (q *BoolQuery) Serialize() ([]byte, error) { + query := &otsprotocol.BoolQuery{} + if q.MustQueries != nil { + pbMustQs := make([]*otsprotocol.Query, 0) + for _, mustQ := range q.MustQueries { + pbQ, err := mustQ.ProtoBuffer() + if err != nil { + return nil, err + } + pbMustQs = append(pbMustQs, pbQ) + } + query.MustQueries = pbMustQs + } + if q.MustNotQueries != nil { + pbMustNotQs := make([]*otsprotocol.Query, 0) + for _, mustNotQ := range q.MustNotQueries { + pbQ, err := mustNotQ.ProtoBuffer() + if err != nil { + return nil, err + } + pbMustNotQs = append(pbMustNotQs, pbQ) + } + query.MustNotQueries = pbMustNotQs + } + if q.FilterQueries != nil { + pbFilterQs := make([]*otsprotocol.Query, 0) + for _, filterQ := range q.FilterQueries { + pbQ, err := filterQ.ProtoBuffer() + if err != nil { + return nil, err + } + pbFilterQs = append(pbFilterQs, pbQ) + } + query.FilterQueries = pbFilterQs + } + if q.ShouldQueries != nil { + pbShouldQs := make([]*otsprotocol.Query, 0) + for _, shouldQ := range q.ShouldQueries { + pbQ, err := shouldQ.ProtoBuffer() + if err != nil { + return nil, err + } + pbShouldQs = append(pbShouldQs, pbQ) + } + query.ShouldQueries = pbShouldQs + } + if (q.MinimumShouldMatch != nil) { + query.MinimumShouldMatch = q.MinimumShouldMatch + } + data, err := proto.Marshal(query) + return data, err +} + +func (q *BoolQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_const_score.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_const_score.go new file mode 100644 index 00000000..124d8262 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_const_score.go @@ -0,0 +1,29 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type ConstScoreQuery struct { + Filter Query +} + +func (q *ConstScoreQuery) Type() QueryType { + return QueryType_ConstScoreQuery +} + +func (q *ConstScoreQuery) Serialize() ([]byte, error) { + query := &otsprotocol.ConstScoreQuery{} + pbQ, err := q.Filter.ProtoBuffer() + if err != nil { + return nil, err + } + query.Filter = pbQ + data, err := proto.Marshal(query) + return data, err +} + +func (q *ConstScoreQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_exists.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_exists.go new file mode 100644 index 00000000..7710e418 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_exists.go @@ -0,0 +1,25 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type ExistsQuery struct { + FieldName string +} + +func (q *ExistsQuery) Type() QueryType { + return QueryType_ExistsQuery +} + +func (q *ExistsQuery) Serialize() ([]byte, error) { + query := &otsprotocol.ExistsQuery{} + query.FieldName = &q.FieldName + data, err := proto.Marshal(query) + return data, err +} + +func (q *ExistsQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} \ No newline at end of file diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_function_score.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_function_score.go new file mode 100644 index 00000000..c1115305 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_function_score.go @@ -0,0 +1,49 @@ +package search + +import ( + "errors" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type FieldValueFactor struct { + FieldName string +} + +func (f *FieldValueFactor) ProtoBuffer() (*otsprotocol.FieldValueFactor, error) { + pb := &otsprotocol.FieldValueFactor{} + pb.FieldName = &f.FieldName + return pb, nil +} + +type FunctionScoreQuery struct { + Query Query + FieldValueFactor *FieldValueFactor +} + +func (q *FunctionScoreQuery) Type() QueryType { + return QueryType_FunctionScoreQuery +} + +func (q *FunctionScoreQuery) Serialize() ([]byte, error) { + if q.Query == nil || q.FieldValueFactor == nil { + return nil, errors.New("FunctionScoreQuery: Query or FieldValueFactor is nil") + } + query := &otsprotocol.FunctionScoreQuery{} + pbQ, err := q.Query.ProtoBuffer() + if err != nil { + return nil, err + } + query.Query = pbQ + pbF, err := q.FieldValueFactor.ProtoBuffer() + if err != nil { + return nil, err + } + query.FieldValueFactor = pbF + data, err := proto.Marshal(query) + return data, err +} + +func (q *FunctionScoreQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_bounding_box.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_bounding_box.go new file mode 100644 index 00000000..9dcbfe81 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_bounding_box.go @@ -0,0 +1,29 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type GeoBoundingBoxQuery struct { + FieldName string + TopLeft string + BottomRight string +} + +func (q *GeoBoundingBoxQuery) Type() QueryType { + return QueryType_GeoBoundingBoxQuery +} + +func (q *GeoBoundingBoxQuery) Serialize() ([]byte, error) { + query := &otsprotocol.GeoBoundingBoxQuery{} + query.FieldName = &q.FieldName + query.TopLeft = &q.TopLeft + query.BottomRight = &q.BottomRight + data, err := proto.Marshal(query) + return data, err +} + +func (q *GeoBoundingBoxQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_distance.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_distance.go new file mode 100644 index 00000000..5906cb69 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_distance.go @@ -0,0 +1,29 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type GeoDistanceQuery struct { + FieldName string + CenterPoint string + DistanceInMeter float64 +} + +func (q *GeoDistanceQuery) Type() QueryType { + return QueryType_GeoDistanceQuery +} + +func (q *GeoDistanceQuery) Serialize() ([]byte, error) { + query := &otsprotocol.GeoDistanceQuery{} + query.FieldName = &q.FieldName + query.CenterPoint = &q.CenterPoint + query.Distance = &q.DistanceInMeter + data, err := proto.Marshal(query) + return data, err +} + +func (q *GeoDistanceQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_polygon.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_polygon.go new file mode 100644 index 00000000..f38efe7a --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_geo_polygon.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type GeoPolygonQuery struct { + FieldName string + Points []string +} + +func (q *GeoPolygonQuery) Type() QueryType { + return QueryType_GeoPolygonQuery +} + +func (q *GeoPolygonQuery) Serialize() ([]byte, error) { + query := &otsprotocol.GeoPolygonQuery{} + query.FieldName = &q.FieldName + query.Points = q.Points + data, err := proto.Marshal(query) + return data, err +} + +func (q *GeoPolygonQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match.go new file mode 100644 index 00000000..d02f85c0 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match.go @@ -0,0 +1,68 @@ +package search + +import ( + "errors" + "fmt" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type QueryOperator int8 + +const ( + QueryOperator_OR QueryOperator = 0 + QueryOperator_AND QueryOperator = 1 +) + +func (x QueryOperator) Enum() *QueryOperator { + p := new(QueryOperator) + *p = x + return p +} + +func (o *QueryOperator) ProtoBuffer() (*otsprotocol.QueryOperator, error) { + if o == nil { + return nil, errors.New("query operator is nil") + } + if *o == QueryOperator_OR { + return otsprotocol.QueryOperator_OR.Enum(), nil + } else if *o == QueryOperator_AND { + return otsprotocol.QueryOperator_AND.Enum(), nil + } else { + return nil, errors.New("unknown query operator: " + fmt.Sprintf("%#v", *o)) + } +} + +type MatchQuery struct { + FieldName string + Text string + MinimumShouldMatch *int32 + Operator *QueryOperator +} + +func (q *MatchQuery) Type() QueryType { + return QueryType_MatchQuery +} + +func (q *MatchQuery) Serialize() ([]byte, error) { + query := &otsprotocol.MatchQuery{} + query.FieldName = &q.FieldName + query.Text = &q.Text + if q.MinimumShouldMatch != nil { + query.MinimumShouldMatch = q.MinimumShouldMatch + } + if q.Operator != nil { + pbOperator, err := q.Operator.ProtoBuffer() + if err != nil { + return nil, err + } + query.Operator = pbOperator + } + data, err := proto.Marshal(query) + return data, err +} + +func (q *MatchQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match_phrase.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match_phrase.go new file mode 100644 index 00000000..fc4511bd --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_match_phrase.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type MatchPhraseQuery struct { + FieldName string + Text string +} + +func (q *MatchPhraseQuery) Type() QueryType { + return QueryType_MatchPhraseQuery +} + +func (q *MatchPhraseQuery) Serialize() ([]byte, error) { + query := &otsprotocol.MatchPhraseQuery{} + query.FieldName = &q.FieldName + query.Text = &q.Text + data, err := proto.Marshal(query) + return data, err +} + +func (q *MatchPhraseQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_matchall.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_matchall.go new file mode 100644 index 00000000..778dbecd --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_matchall.go @@ -0,0 +1,23 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type MatchAllQuery struct { +} + +func (q *MatchAllQuery) Type() QueryType { + return QueryType_MatchAllQuery +} + +func (q *MatchAllQuery) Serialize() ([]byte, error) { + query := &otsprotocol.MatchAllQuery{} + data, err := proto.Marshal(query) + return data, err +} + +func (q *MatchAllQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_nested.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_nested.go new file mode 100644 index 00000000..15a9bad2 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_nested.go @@ -0,0 +1,54 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type ScoreModeType int + +const ( + ScoreMode_None ScoreModeType = 1 + ScoreMode_Avg ScoreModeType = 2 + ScoreMode_Max ScoreModeType = 3 + ScoreMode_Total ScoreModeType = 4 + ScoreMode_Min ScoreModeType = 5 +) + +type NestedQuery struct { + Path string + Query Query + ScoreMode ScoreModeType +} + +func (q *NestedQuery) Type() QueryType { + return QueryType_NestedQuery +} + +func (q *NestedQuery) Serialize() ([]byte, error) { + query := &otsprotocol.NestedQuery{} + pbQ, err := q.Query.ProtoBuffer() + if err != nil { + return nil, err + } + query.Query = pbQ + query.Path = &q.Path + switch q.ScoreMode { + case ScoreMode_None: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_NONE.Enum() + case ScoreMode_Avg: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_AVG.Enum() + case ScoreMode_Max: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_MAX.Enum() + case ScoreMode_Min: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_MIN.Enum() + case ScoreMode_Total: + query.ScoreMode = otsprotocol.ScoreMode_SCORE_MODE_TOTAL.Enum() + } + data, err := proto.Marshal(query) + return data, err +} + +func (q *NestedQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_prefix.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_prefix.go new file mode 100644 index 00000000..a94d0cdb --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_prefix.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type PrefixQuery struct { + FieldName string + Prefix string +} + +func (q *PrefixQuery) Type() QueryType { + return QueryType_PrefixQuery +} + +func (q *PrefixQuery) Serialize() ([]byte, error) { + query := &otsprotocol.PrefixQuery{} + query.FieldName = &q.FieldName + query.Prefix = &q.Prefix + data, err := proto.Marshal(query) + return data, err +} + +func (q *PrefixQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_range.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_range.go new file mode 100644 index 00000000..28e3353b --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_range.go @@ -0,0 +1,75 @@ +package search + +import ( + "errors" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type RangeQuery struct { + FieldName string + From interface{} + To interface{} + IncludeLower bool + IncludeUpper bool +} + +func (q *RangeQuery) GT(value interface{}) { + q.from(value, false) +} + +func (q *RangeQuery) GTE(value interface{}) { + q.from(value, true) +} + +func (q *RangeQuery) LT(value interface{}) { + q.to(value, false) +} + +func (q *RangeQuery) LTE(value interface{}) { + q.to(value, true) +} + +func (q *RangeQuery) from(value interface{}, includeLower bool) { + q.From = value + q.IncludeLower = includeLower +} + +func (q *RangeQuery) to(value interface{}, includeUpper bool) { + q.To = value + q.IncludeUpper = includeUpper +} + +func (q *RangeQuery) Type() QueryType { + return QueryType_RangeQuery +} + +func (q *RangeQuery) Serialize() ([]byte, error) { + if q.FieldName == "" { + return nil, errors.New("RangeQuery: fieldName not set.") + } + query := &otsprotocol.RangeQuery{} + query.FieldName = &q.FieldName + if q.From != nil { + vFrom, err := ToVariantValue(q.From) + if err != nil { + return nil, err + } + query.RangeFrom = ([]byte)(vFrom) + } + if q.To != nil { + vTo, err := ToVariantValue(q.To) + if err != nil { + return nil, err + } + query.RangeTo = ([]byte)(vTo) + } + query.IncludeLower = &q.IncludeLower + query.IncludeUpper = &q.IncludeUpper + data, err := proto.Marshal(query) + return data, err +} + +func (q *RangeQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_term.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_term.go new file mode 100644 index 00000000..1aac7180 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_term.go @@ -0,0 +1,31 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type TermQuery struct { + FieldName string + Term interface{} +} + +func (q *TermQuery) Type() QueryType { + return QueryType_TermQuery +} + +func (q *TermQuery) Serialize() ([]byte, error) { + term := &otsprotocol.TermQuery{} + term.FieldName = &q.FieldName + vt, err := ToVariantValue(q.Term) + if err != nil { + return nil, err + } + term.Term = []byte(vt) + data, err := proto.Marshal(term) + return data, err +} + +func (q *TermQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_terms.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_terms.go new file mode 100644 index 00000000..1401ff48 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_terms.go @@ -0,0 +1,35 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type TermsQuery struct { + FieldName string + Terms []interface{} +} + +func (q *TermsQuery) Type() QueryType { + return QueryType_TermsQuery +} + +func (q *TermsQuery) Serialize() ([]byte, error) { + term := &otsprotocol.TermsQuery{} + term.FieldName = &q.FieldName + term.Terms = make([][]byte, 0) + + for _, value := range q.Terms { + vt, err := ToVariantValue(value) + if err != nil { + return nil, err + } + term.Terms = append(term.Terms, []byte(vt)) + } + data, err := proto.Marshal(term) + return data, err +} + +func (q *TermsQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_wildcard.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_wildcard.go new file mode 100644 index 00000000..f885ca6e --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/query_wildcard.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type WildcardQuery struct { + FieldName string + Value string +} + +func (q *WildcardQuery) Type() QueryType { + return QueryType_WildcardQuery +} + +func (q *WildcardQuery) Serialize() ([]byte, error) { + query := &otsprotocol.WildcardQuery{} + query.FieldName = &q.FieldName + query.Value = &q.Value + data, err := proto.Marshal(query) + return data, err +} + +func (q *WildcardQuery) ProtoBuffer() (*otsprotocol.Query, error) { + return BuildPBForQuery(q) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/search_query.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/search_query.go new file mode 100644 index 00000000..9bc47ebe --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/search_query.go @@ -0,0 +1,101 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +type SearchQuery interface { + Serialize() ([]byte, error) +} + +type searchQuery struct { + Offset int32 + Limit int32 + Query Query + Collapse *Collapse + Sort *Sort + GetTotalCount bool + Token []byte +} + +func NewSearchQuery() *searchQuery { + return &searchQuery{ + Offset: -1, + Limit: -1, + GetTotalCount: false, + } +} + +func (s *searchQuery) SetOffset(offset int32) *searchQuery { + s.Offset = offset + return s +} + +func (s *searchQuery) SetLimit(limit int32) *searchQuery { + s.Limit = limit + return s +} + +func (s *searchQuery) SetQuery(query Query) *searchQuery { + s.Query = query + return s +} + +func (s *searchQuery) SetCollapse(collapse *Collapse) *searchQuery { + s.Collapse = collapse + return s +} + +func (s *searchQuery) SetSort(sort *Sort) *searchQuery { + s.Sort = sort + return s +} + +func (s *searchQuery) SetGetTotalCount(getTotalCount bool) *searchQuery { + s.GetTotalCount = getTotalCount + return s +} + +func (s *searchQuery) SetToken(token []byte) *searchQuery { + s.Token = token + s.Sort = nil + return s +} + +func (s *searchQuery) Serialize() ([]byte, error) { + search_query := &otsprotocol.SearchQuery{} + if s.Offset >= 0 { + search_query.Offset = &s.Offset + } + if s.Limit >= 0 { + search_query.Limit = &s.Limit + } + if s.Query != nil { + pbQuery, err := s.Query.ProtoBuffer() + if err != nil { + return nil, err + } + search_query.Query = pbQuery + } + if s.Collapse != nil { + pbCollapse, err := s.Collapse.ProtoBuffer() + if err != nil { + return nil, err + } + search_query.Collapse = pbCollapse + } + if s.Sort != nil { + pbSort, err := s.Sort.ProtoBuffer() + if err != nil { + return nil, err + } + search_query.Sort = pbSort + } + search_query.GetTotalCount = &s.GetTotalCount + if s.Token != nil && len(s.Token) > 0 { + search_query.Token = s.Token + } + data, err := proto.Marshal(search_query) + return data, err +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort.go new file mode 100644 index 00000000..3b3e4a11 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort.go @@ -0,0 +1,27 @@ +package search + +import ( + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" +) + +type Sorter interface { + ProtoBuffer() (*otsprotocol.Sorter, error) +} + +type Sort struct { + Sorters []Sorter +} + +func (s *Sort) ProtoBuffer() (*otsprotocol.Sort, error) { + pbSort := &otsprotocol.Sort{} + pbSortors := make([]*otsprotocol.Sorter, 0) + for _, fs := range s.Sorters { + pbFs, err := fs.ProtoBuffer() + if err != nil { + return nil, err + } + pbSortors = append(pbSortors, pbFs) + } + pbSort.Sorter = pbSortors + return pbSort, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_field.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_field.go new file mode 100644 index 00000000..c4272a86 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_field.go @@ -0,0 +1,67 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type NestedFilter struct { + Path string + Filter Query +} + +func (f *NestedFilter) ProtoBuffer() (*otsprotocol.NestedFilter, error) { + pbF := &otsprotocol.NestedFilter{ + Path: &f.Path, + } + pbQ, err := f.Filter.ProtoBuffer() + if err != nil { + return nil, err + } + pbF.Filter = pbQ + return pbF, err +} + +type FieldSort struct { + FieldName string + Order *SortOrder + Mode *SortMode + NestedFilter *NestedFilter +} + +func NewFieldSort(fieldName string, order SortOrder) *FieldSort { + return &FieldSort{ + FieldName: fieldName, + Order: order.Enum(), + } +} + +func (s *FieldSort) ProtoBuffer() (*otsprotocol.Sorter, error) { + pbFieldSort := &otsprotocol.FieldSort{ + FieldName: &s.FieldName, + } + if s.Order != nil { + pbOrder, err := s.Order.ProtoBuffer() + if err != nil { + return nil, err + } + pbFieldSort.Order = pbOrder + } + if s.Mode != nil { + pbMode, err := s.Mode.ProtoBuffer() + if err != nil { + return nil, err + } + if pbMode != nil { + pbFieldSort.Mode = pbMode + } + } + if s.NestedFilter != nil { + pbFilter, err := s.NestedFilter.ProtoBuffer() + if err != nil { + return nil, err + } + pbFieldSort.NestedFilter = pbFilter + } + pbSorter := &otsprotocol.Sorter{ + FieldSort: pbFieldSort, + } + return pbSorter, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_geo_distance.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_geo_distance.go new file mode 100644 index 00000000..de4b07c3 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_geo_distance.go @@ -0,0 +1,77 @@ +package search + +import ( + "errors" + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" +) + +type GeoDistanceType int8 + +const ( + GeoDistanceType_ARC GeoDistanceType = 0 + GeoDistanceType_PLANE GeoDistanceType = 0 +) + +func (t *GeoDistanceType) ProtoBuffer() (*otsprotocol.GeoDistanceType, error) { + if t == nil { + return nil, errors.New("type is nil") + } + if *t == GeoDistanceType_ARC { + return otsprotocol.GeoDistanceType_GEO_DISTANCE_ARC.Enum(), nil + } else if *t == GeoDistanceType_PLANE { + return otsprotocol.GeoDistanceType_GEO_DISTANCE_PLANE.Enum(), nil + } else { + return nil, errors.New("unknown distance type: " + fmt.Sprintf("%#v", *t)) + } +} + +type GeoDistanceSort struct { + FieldName string + Points []string + Order *SortOrder + Mode *SortMode + GeoDistanceType *GeoDistanceType + NestedFilter *NestedFilter +} + +func (s *GeoDistanceSort) ProtoBuffer() (*otsprotocol.Sorter, error) { + pbGeoDistanceSort := &otsprotocol.GeoDistanceSort{ + FieldName: &s.FieldName, + Points: s.Points, + } + if s.Order != nil { + pbOrder, err := s.Order.ProtoBuffer() + if err != nil { + return nil, err + } + pbGeoDistanceSort.Order = pbOrder + } + if s.Mode != nil { + pbMode, err := s.Mode.ProtoBuffer() + if err != nil { + return nil, err + } + if pbMode != nil { + pbGeoDistanceSort.Mode = pbMode + } + } + if s.GeoDistanceType != nil { + pbGeoDisType, err := s.GeoDistanceType.ProtoBuffer() + if err != nil { + return nil, err + } + pbGeoDistanceSort.DistanceType = pbGeoDisType + } + if s.NestedFilter != nil { + pbFilter, err := s.NestedFilter.ProtoBuffer() + if err != nil { + return nil, err + } + pbGeoDistanceSort.NestedFilter = pbFilter + } + pbSorter := &otsprotocol.Sorter{ + GeoDistanceSort: pbGeoDistanceSort, + } + return pbSorter, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_mode.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_mode.go new file mode 100644 index 00000000..4b04ac1b --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_mode.go @@ -0,0 +1,36 @@ +package search + +import ( + "errors" + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" +) + +type SortMode int8 + +const ( + SortMode_Min SortMode = 0 + SortMode_Max SortMode = 1 + SortMode_Avg SortMode = 2 +) + +func (x SortMode) Enum() *SortMode { + p := new(SortMode) + *p = x + return p +} + +func (m *SortMode) ProtoBuffer() (*otsprotocol.SortMode, error) { + if m == nil { + return nil, errors.New("sort mode is nil") + } + if *m == SortMode_Min { + return otsprotocol.SortMode_SORT_MODE_MIN.Enum(), nil + } else if *m == SortMode_Max { + return otsprotocol.SortMode_SORT_MODE_MAX.Enum(), nil + } else if *m == SortMode_Avg { + return otsprotocol.SortMode_SORT_MODE_AVG.Enum(), nil + } else { + return nil, errors.New("unknown sort mode: " + fmt.Sprintf("%#v", *m)) + } +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_order.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_order.go new file mode 100644 index 00000000..b936bfe7 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_order.go @@ -0,0 +1,47 @@ +package search + +import ( + "errors" + "fmt" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" +) + +type SortOrder int8 + +const ( + SortOrder_ASC SortOrder = 0 + SortOrder_DESC SortOrder = 1 +) + +func (x SortOrder) Enum() *SortOrder { + p := new(SortOrder) + *p = x + return p +} + +func (o *SortOrder) ProtoBuffer() (*otsprotocol.SortOrder, error) { + if o == nil { + return nil, errors.New("sort order is nil") + } + if *o == SortOrder_ASC { + return otsprotocol.SortOrder_SORT_ORDER_ASC.Enum(), nil + } else if *o == SortOrder_DESC { + return otsprotocol.SortOrder_SORT_ORDER_DESC.Enum(), nil + } else { + return nil, errors.New("unknown sort order: " + fmt.Sprintf("%#v", *o)) + } +} + +func ParseSortOrder(order *otsprotocol.SortOrder) *SortOrder { + if order == nil { + return nil + } + if *order == otsprotocol.SortOrder_SORT_ORDER_ASC { + return SortOrder_ASC.Enum() + } else if *order == otsprotocol.SortOrder_SORT_ORDER_DESC { + return SortOrder_DESC.Enum() + } else { + return nil + } +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_primary_key.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_primary_key.go new file mode 100644 index 00000000..4a54a352 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_primary_key.go @@ -0,0 +1,28 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type PrimaryKeySort struct { + Order *SortOrder +} + +func NewPrimaryKeySort() *PrimaryKeySort { + return &PrimaryKeySort{ + Order: SortOrder_ASC.Enum(), + } +} + +func (s *PrimaryKeySort) ProtoBuffer() (*otsprotocol.Sorter, error) { + pbPrimaryKeySort := &otsprotocol.PrimaryKeySort{} + if s.Order != nil { + pbOrder, err := s.Order.ProtoBuffer() + if err != nil { + return nil, err + } + pbPrimaryKeySort.Order = pbOrder + } + pbSorter := &otsprotocol.Sorter{ + PkSort: pbPrimaryKeySort, + } + return pbSorter, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_score.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_score.go new file mode 100644 index 00000000..00eef31d --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/sort_score.go @@ -0,0 +1,28 @@ +package search + +import "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + +type ScoreSort struct { + Order *SortOrder +} + +func NewScoreSort() *ScoreSort { + return &ScoreSort{ + Order: SortOrder_DESC.Enum(), + } +} + +func (s *ScoreSort) ProtoBuffer() (*otsprotocol.Sorter, error) { + pbScoreSort := &otsprotocol.ScoreSort{} + if s.Order != nil { + pbOrder, err := s.Order.ProtoBuffer() + if err != nil { + return nil, err + } + pbScoreSort.Order = pbOrder + } + pbSorter := &otsprotocol.Sorter{ + ScoreSort: pbScoreSort, + } + return pbSorter, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/variant_types.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/variant_types.go new file mode 100644 index 00000000..7ec743f7 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search/variant_types.go @@ -0,0 +1,74 @@ +package search + +import ( + "encoding/binary" + "errors" + "math" + "reflect" +) + +type VariantValue []byte +type VariantType byte + +const ( + // variant type + VT_INTEGER VariantType = 0x0 + VT_DOUBLE VariantType = 0x1 + VT_BOOLEAN VariantType = 0x2 + VT_STRING VariantType = 0x3 +) + +func ToVariantValue(value interface{}) (VariantValue, error) { + t := reflect.TypeOf(value) + switch t.Kind() { + case reflect.String: + return VTString(value.(string)), nil + case reflect.Int: + return VTInteger(int64(value.(int))), nil + case reflect.Int64: + return VTInteger(value.(int64)), nil + case reflect.Float64: + return VTDouble(value.(float64)), nil + case reflect.Bool: + return VTBoolean(value.(bool)), nil + default: + return nil, errors.New("interface{} type must be string/int64/float64.") + } +} + +func (v *VariantValue) GetType() VariantType { + return VariantType(([]byte)(*v)[0]) +} + +func VTInteger(v int64) VariantValue { + buf := make([]byte, 9) + buf[0] = byte(VT_INTEGER) + binary.LittleEndian.PutUint64(buf[1:9], uint64(v)) + return (VariantValue)(buf) +} + +func VTDouble(v float64) VariantValue { + buf := make([]byte, 9) + buf[0] = byte(VT_DOUBLE) + binary.LittleEndian.PutUint64(buf[1:9], math.Float64bits(v)) + return (VariantValue)(buf) +} + +func VTString(v string) VariantValue { + buf := make([]byte, 5+len(v)) + buf[0] = byte(VT_STRING) + binary.LittleEndian.PutUint32(buf[1:5], uint32(len(v))) + copy(buf[5:], v) + return (VariantValue)(buf) +} + +func VTBoolean(b bool) VariantValue { + buf := make([]byte, 2) + buf[0] = byte(VT_BOOLEAN) + if b { + buf[1] = 1 + } else { + buf[1] = 0 + } + return (VariantValue)(buf) +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_api.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_api.go new file mode 100644 index 00000000..29e0c1ec --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_api.go @@ -0,0 +1,136 @@ +package tablestore + +import ( + "bytes" + "errors" + "fmt" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" +) + +func (tableStoreClient *TableStoreClient) CreateSearchIndex(request *CreateSearchIndexRequest) (*CreateSearchIndexResponse, error) { + req := new(otsprotocol.CreateSearchIndexRequest) + req.TableName = proto.String(request.TableName) + req.IndexName = proto.String(request.IndexName) + var err error + req.Schema, err = convertToPbSchema(request.IndexSchema) + if err != nil { + return nil, err + } + resp := new(otsprotocol.CreateSearchIndexRequest) + response := &CreateSearchIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(createSearchIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + return response, nil +} + +func (tableStoreClient *TableStoreClient) DeleteSearchIndex(request *DeleteSearchIndexRequest) (*DeleteSearchIndexResponse, error) { + req := new(otsprotocol.DeleteSearchIndexRequest) + req.TableName = proto.String(request.TableName) + req.IndexName = proto.String(request.IndexName) + + resp := new(otsprotocol.DeleteSearchIndexResponse) + response := &DeleteSearchIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(deleteSearchIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + return response, nil +} + +func (tableStoreClient *TableStoreClient) ListSearchIndex(request *ListSearchIndexRequest) (*ListSearchIndexResponse, error) { + req := new(otsprotocol.ListSearchIndexRequest) + req.TableName = proto.String(request.TableName) + + resp := new(otsprotocol.ListSearchIndexResponse) + response := &ListSearchIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(listSearchIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + indexs := make([]*IndexInfo, 0) + for _, info := range resp.Indices { + indexs = append(indexs, &IndexInfo{ + TableName: *info.TableName, + IndexName: *info.IndexName, + }) + } + response.IndexInfo = indexs + return response, nil +} + +func (tableStoreClient *TableStoreClient) DescribeSearchIndex(request *DescribeSearchIndexRequest) (*DescribeSearchIndexResponse, error) { + req := new(otsprotocol.DescribeSearchIndexRequest) + req.TableName = proto.String(request.TableName) + req.IndexName = proto.String(request.IndexName) + + resp := new(otsprotocol.DescribeSearchIndexResponse) + response := &DescribeSearchIndexResponse{} + if err := tableStoreClient.doRequestWithRetry(describeSearchIndexUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + schema, err := parseFromPbSchema(resp.Schema) + if err != nil { + return nil, err + } + response.Schema = schema + if resp.SyncStat != nil { + response.SyncStat = &SyncStat{ + CurrentSyncTimestamp: resp.SyncStat.CurrentSyncTimestamp, + } + syncPhase := resp.SyncStat.SyncPhase + if syncPhase == nil { + return nil, errors.New("missing [SyncPhase] in DescribeSearchIndexResponse") + } else if *syncPhase == otsprotocol.SyncPhase_FULL { + response.SyncStat.SyncPhase = SyncPhase_FULL + } else if *syncPhase == otsprotocol.SyncPhase_INCR { + response.SyncStat.SyncPhase = SyncPhase_INCR + } else { + return nil, errors.New(fmt.Sprintf("unknown SyncPhase: %v", syncPhase)) + } + } + return response, nil +} + +func (tableStoreClient *TableStoreClient) Search(request *SearchRequest) (*SearchResponse, error) { + req, err := request.ProtoBuffer() + if err != nil { + return nil, err + } + resp := new(otsprotocol.SearchResponse) + response := &SearchResponse{} + if err := tableStoreClient.doRequestWithRetry(searchUri, req, resp, &response.ResponseInfo); err != nil { + return nil, err + } + response.TotalCount = *resp.TotalHits + + rows := make([]*PlainBufferRow, 0) + for _, buf := range resp.Rows { + row, err := readRowsWithHeader(bytes.NewReader(buf)) + if err != nil { + return nil, err + } + rows = append(rows, row[0]) + } + + for _, row := range rows { + currentRow := &Row{} + currentPk := new(PrimaryKey) + for _, pk := range row.primaryKey { + pkColumn := &PrimaryKeyColumn{ColumnName: string(pk.cellName), Value: pk.cellValue.Value} + currentPk.PrimaryKeys = append(currentPk.PrimaryKeys, pkColumn) + } + currentRow.PrimaryKey = currentPk + for _, cell := range row.cells { + dataColumn := &AttributeColumn{ColumnName: string(cell.cellName), Value: cell.cellValue.Value, Timestamp: cell.cellTimestamp} + currentRow.Columns = append(currentRow.Columns, dataColumn) + } + response.Rows = append(response.Rows, currentRow) + } + + response.IsAllSuccess = *resp.IsAllSucceeded + if resp.NextToken != nil && len(resp.NextToken) > 0 { + response.NextToken = resp.NextToken + } + return response, nil +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_model.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_model.go new file mode 100644 index 00000000..6ec022e5 --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search_model.go @@ -0,0 +1,327 @@ +package tablestore + +import ( + "encoding/json" + "errors" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/search" + "github.com/golang/protobuf/proto" +) + +type ColumnsToGet struct { + Columns []string + ReturnAll bool +} + +type SearchRequest struct { + TableName string + IndexName string + SearchQuery search.SearchQuery + ColumnsToGet *ColumnsToGet + RoutingValues []*PrimaryKey +} + +func (r *SearchRequest) SetTableName(tableName string) *SearchRequest { + r.TableName = tableName + return r +} + +func (r *SearchRequest) SetIndexName(indexName string) *SearchRequest { + r.IndexName = indexName + return r +} + +func (r *SearchRequest) SetSearchQuery(searchQuery search.SearchQuery) *SearchRequest { + r.SearchQuery = searchQuery + return r +} + +func (r *SearchRequest) SetColumnsToGet(columnToGet *ColumnsToGet) *SearchRequest { + r.ColumnsToGet = columnToGet + return r +} + +func (r *SearchRequest) SetRoutingValues(routingValues []*PrimaryKey) *SearchRequest { + r.RoutingValues = routingValues + return r +} + +func (r *SearchRequest) AddRoutingValue(routingValue *PrimaryKey) *SearchRequest { + r.RoutingValues = append(r.RoutingValues, routingValue) + return r +} + +func (r *SearchRequest) ProtoBuffer() (*otsprotocol.SearchRequest, error) { + req := &otsprotocol.SearchRequest{} + req.TableName = &r.TableName + req.IndexName = &r.IndexName + query, err := r.SearchQuery.Serialize() + if err != nil { + return nil, err + } + req.SearchQuery = query + pbColumns := &otsprotocol.ColumnsToGet{} + pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_NONE.Enum() + if r.ColumnsToGet != nil { + if r.ColumnsToGet.ReturnAll { + pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_ALL.Enum() + } else if len(r.ColumnsToGet.Columns) > 0 { + pbColumns.ReturnType = otsprotocol.ColumnReturnType_RETURN_SPECIFIED.Enum() + pbColumns.ColumnNames = r.ColumnsToGet.Columns + } + } + req.ColumnsToGet = pbColumns + if r.RoutingValues != nil { + for _, routingValue := range r.RoutingValues { + req.RoutingValues = append(req.RoutingValues, routingValue.Build(false)) + } + } + return req, err +} + +type SearchResponse struct { + TotalCount int64 + Rows []*Row + IsAllSuccess bool + NextToken []byte + ResponseInfo +} + +func convertFieldSchemaToPBFieldSchema(fieldSchemas []*FieldSchema) []*otsprotocol.FieldSchema { + var schemas []*otsprotocol.FieldSchema + for _, value := range fieldSchemas { + field := new(otsprotocol.FieldSchema) + + field.FieldName = proto.String(*value.FieldName) + field.FieldType = otsprotocol.FieldType(int32(value.FieldType)).Enum() + + if value.Index != nil { + field.Index = proto.Bool(*value.Index) + } else if value.FieldType != FieldType_NESTED { + field.Index = proto.Bool(true) + } + if value.IndexOptions != nil { + field.IndexOptions = otsprotocol.IndexOptions(int32(*value.IndexOptions)).Enum() + } + if value.Analyzer != nil { + field.Analyzer = proto.String(string(*value.Analyzer)) + } + if value.EnableSortAndAgg != nil { + field.DocValues = proto.Bool(*value.EnableSortAndAgg) + } + if value.Store != nil { + field.Store = proto.Bool(*value.Store) + } else if value.FieldType != FieldType_NESTED { + if *field.FieldType == otsprotocol.FieldType_TEXT { + field.Store = proto.Bool(false) + } else { + field.Store = proto.Bool(true) + } + } + if value.IsArray != nil { + field.IsArray = proto.Bool(*value.IsArray) + } + if value.FieldType == FieldType_NESTED { + field.FieldSchemas = convertFieldSchemaToPBFieldSchema(value.FieldSchemas) + } + + schemas = append(schemas, field) + } + + return schemas +} + +func convertToPbSchema(schema *IndexSchema) (*otsprotocol.IndexSchema, error) { + indexSchema := new(otsprotocol.IndexSchema) + indexSchema.FieldSchemas = convertFieldSchemaToPBFieldSchema(schema.FieldSchemas) + indexSchema.IndexSetting = new(otsprotocol.IndexSetting) + var defaultNumberOfShards int32 = 1 + indexSchema.IndexSetting.NumberOfShards = &defaultNumberOfShards + if schema.IndexSetting != nil { + indexSchema.IndexSetting.RoutingFields = schema.IndexSetting.RoutingFields + } + if schema.IndexSort != nil { + pbSort, err := schema.IndexSort.ProtoBuffer() + if err != nil { + return nil, err + } + indexSchema.IndexSort = pbSort + } + return indexSchema, nil +} + +func parseFieldSchemaFromPb(pbFieldSchemas []*otsprotocol.FieldSchema) []*FieldSchema { + var schemas []*FieldSchema + for _, value := range pbFieldSchemas { + field := new(FieldSchema) + field.FieldName = value.FieldName + field.FieldType = FieldType(*value.FieldType) + field.Index = value.Index + if value.IndexOptions != nil { + indexOption := IndexOptions(*value.IndexOptions) + field.IndexOptions = &indexOption + } + field.Analyzer = (*Analyzer)(value.Analyzer) + field.EnableSortAndAgg = value.DocValues + field.Store = value.Store + field.IsArray = value.IsArray + if field.FieldType == FieldType_NESTED { + field.FieldSchemas = parseFieldSchemaFromPb(value.FieldSchemas) + } + schemas = append(schemas, field) + } + return schemas +} + +func parseIndexSortFromPb(pbIndexSort *otsprotocol.Sort) (*search.Sort, error) { + indexSort := &search.Sort{ + Sorters: make([]search.Sorter, 0), + } + for _, sorter := range pbIndexSort.GetSorter() { + if sorter.GetFieldSort() != nil { + fieldSort := &search.FieldSort{ + FieldName: *sorter.GetFieldSort().FieldName, + Order: search.ParseSortOrder(sorter.GetFieldSort().Order), + } + indexSort.Sorters = append(indexSort.Sorters, fieldSort) + } else if sorter.GetPkSort() != nil { + pkSort := &search.PrimaryKeySort{ + Order: search.ParseSortOrder(sorter.GetPkSort().Order), + } + indexSort.Sorters = append(indexSort.Sorters, pkSort) + } else { + return nil, errors.New("unknown index sort type") + } + } + return indexSort, nil +} + +func parseFromPbSchema(pbSchema *otsprotocol.IndexSchema) (*IndexSchema, error) { + schema := &IndexSchema{ + IndexSetting: &IndexSetting{ + RoutingFields: pbSchema.IndexSetting.RoutingFields, + }, + } + schema.FieldSchemas = parseFieldSchemaFromPb(pbSchema.GetFieldSchemas()) + indexSort, err := parseIndexSortFromPb(pbSchema.GetIndexSort()) + if err != nil { + return nil, err + } + schema.IndexSort = indexSort + return schema, nil +} + +type IndexSchema struct { + IndexSetting *IndexSetting + FieldSchemas []*FieldSchema + IndexSort *search.Sort +} + +type FieldType int32 + +const ( + FieldType_LONG FieldType = 1 + FieldType_DOUBLE FieldType = 2 + FieldType_BOOLEAN FieldType = 3 + FieldType_KEYWORD FieldType = 4 + FieldType_TEXT FieldType = 5 + FieldType_NESTED FieldType = 6 + FieldType_GEO_POINT FieldType = 7 +) + +type IndexOptions int32 + +const ( + IndexOptions_DOCS IndexOptions = 1 + IndexOptions_FREQS IndexOptions = 2 + IndexOptions_POSITIONS IndexOptions = 3 + IndexOptions_OFFSETS IndexOptions = 4 +) + +type Analyzer string + +const ( + Analyzer_SingleWord Analyzer = "single_word" + Analyzer_MaxWord Analyzer = "max_word" +) + +type FieldSchema struct { + FieldName *string + FieldType FieldType + Index *bool + IndexOptions *IndexOptions + Analyzer *Analyzer + EnableSortAndAgg *bool + Store *bool + IsArray *bool + FieldSchemas []*FieldSchema +} + +func (fs *FieldSchema) String() string { + out, err := json.Marshal(fs) + if err != nil { + panic(err) + } + return string(out) +} + +type IndexSetting struct { + RoutingFields []string +} + +type CreateSearchIndexRequest struct { + TableName string + IndexName string + IndexSchema *IndexSchema +} + +type CreateSearchIndexResponse struct { + ResponseInfo ResponseInfo +} + +type DescribeSearchIndexRequest struct { + TableName string + IndexName string +} + +type SyncPhase int32 + +const ( + SyncPhase_FULL SyncPhase = 1 + SyncPhase_INCR SyncPhase = 2 +) + +type SyncStat struct { + SyncPhase SyncPhase + CurrentSyncTimestamp *int64 +} + +type DescribeSearchIndexResponse struct { + Schema *IndexSchema + SyncStat *SyncStat + ResponseInfo ResponseInfo +} + +type ListSearchIndexRequest struct { + TableName string +} + +type IndexInfo struct { + TableName string + IndexName string +} + +type ListSearchIndexResponse struct { + IndexInfo []*IndexInfo + ResponseInfo ResponseInfo +} + +type DeleteSearchIndexRequest struct { + TableName string + IndexName string +} + +type DeleteSearchIndexResponse struct { + ResponseInfo ResponseInfo +} diff --git a/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/util.go b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/util.go new file mode 100644 index 00000000..a2fc847c --- /dev/null +++ b/vendor/github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/util.go @@ -0,0 +1,984 @@ +package tablestore + +import ( + "bytes" + "fmt" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore/otsprotocol" + "github.com/golang/protobuf/proto" + "io" + "io/ioutil" + "math" + "net/http" + "reflect" + "sort" +) + +const ( + maxTableNameLength = 100 + maxPrimaryKeyLength = 255 + maxPrimaryKeyNum = 10 + maxMultiDeleteRows = 100 +) + +type ColumnType int32 + +const ( + ColumnType_STRING ColumnType = 1 + ColumnType_INTEGER ColumnType = 2 + ColumnType_BOOLEAN ColumnType = 3 + ColumnType_DOUBLE ColumnType = 4 + ColumnType_BINARY ColumnType = 5 +) + +const ( + Version = "1.0" + ApiVersion = "2015-12-31" + xOtsDateFormat = "2006-01-02T15:04:05.123Z" + xOtsInstanceName = "x-ots-instancename" + xOtsRequestId = "x-ots-requestid" +) + +type ColumnValue struct { + Type ColumnType + Value interface{} +} + +func (cv *ColumnValue) writeCellValue(w io.Writer) { + writeTag(w, TAG_CELL_VALUE) + if cv == nil { + writeRawLittleEndian32(w, 1) + writeRawByte(w, VT_AUTO_INCREMENT) + return + } + + switch cv.Type { + case ColumnType_STRING: + v := cv.Value.(string) + + writeRawLittleEndian32(w, int32(LITTLE_ENDIAN_32_SIZE+1+len(v))) // length + type + value + writeRawByte(w, VT_STRING) + writeRawLittleEndian32(w, int32(len(v))) + writeBytes(w, []byte(v)) + + case ColumnType_INTEGER: + v := cv.Value.(int64) + writeRawLittleEndian32(w, int32(LITTLE_ENDIAN_64_SIZE+1)) + writeRawByte(w, VT_INTEGER) + writeRawLittleEndian64(w, v) + case ColumnType_BOOLEAN: + v := cv.Value.(bool) + writeRawLittleEndian32(w, 2) + writeRawByte(w, VT_BOOLEAN) + writeBoolean(w, v) + + case ColumnType_DOUBLE: + v := cv.Value.(float64) + + writeRawLittleEndian32(w, LITTLE_ENDIAN_64_SIZE+1) + writeRawByte(w, VT_DOUBLE) + writeDouble(w, v) + + case ColumnType_BINARY: + v := cv.Value.([]byte) + + writeRawLittleEndian32(w, int32(LITTLE_ENDIAN_32_SIZE+1+len(v))) // length + type + value + writeRawByte(w, VT_BLOB) + writeRawLittleEndian32(w, int32(len(v))) + writeBytes(w, v) + } +} + +func (cv *ColumnValue) writeCellValueWithoutLengthPrefix() []byte { + var b bytes.Buffer + w := &b + switch cv.Type { + case ColumnType_STRING: + v := cv.Value.(string) + + writeRawByte(w, VT_STRING) + writeRawLittleEndian32(w, int32(len(v))) + writeBytes(w, []byte(v)) + + case ColumnType_INTEGER: + v := cv.Value.(int64) + writeRawByte(w, VT_INTEGER) + writeRawLittleEndian64(w, v) + case ColumnType_BOOLEAN: + v := cv.Value.(bool) + writeRawByte(w, VT_BOOLEAN) + writeBoolean(w, v) + + case ColumnType_DOUBLE: + v := cv.Value.(float64) + + writeRawByte(w, VT_DOUBLE) + writeDouble(w, v) + + case ColumnType_BINARY: + v := cv.Value.([]byte) + + writeRawByte(w, VT_BLOB) + writeRawLittleEndian32(w, int32(len(v))) + writeBytes(w, v) + } + + return b.Bytes() +} + +func (cv *ColumnValue) getCheckSum(crc byte) byte { + if cv == nil { + return crc8Byte(crc, VT_AUTO_INCREMENT) + } + + switch cv.Type { + case ColumnType_STRING: + v := cv.Value.(string) + crc = crc8Byte(crc, VT_STRING) + crc = crc8Int32(crc, int32(len(v))) + crc = crc8Bytes(crc, []byte(v)) + case ColumnType_INTEGER: + v := cv.Value.(int64) + crc = crc8Byte(crc, VT_INTEGER) + crc = crc8Int64(crc, v) + case ColumnType_BOOLEAN: + v := cv.Value.(bool) + crc = crc8Byte(crc, VT_BOOLEAN) + if v { + crc = crc8Byte(crc, 0x1) + } else { + crc = crc8Byte(crc, 0x0) + } + + case ColumnType_DOUBLE: + v := cv.Value.(float64) + crc = crc8Byte(crc, VT_DOUBLE) + crc = crc8Int64(crc, int64(math.Float64bits(v))) + case ColumnType_BINARY: + v := cv.Value.([]byte) + crc = crc8Byte(crc, VT_BLOB) + crc = crc8Int32(crc, int32(len(v))) + crc = crc8Bytes(crc, v) + } + + return crc +} + +type Column struct { + Name []byte + Value ColumnValue + Type byte + Timestamp int64 + HasType bool + HasTimestamp bool + IgnoreValue bool +} + +func NewColumn(name []byte, value interface{}) *Column { + + v := &Column{} + v.Name = name + + if value != nil { + t := reflect.TypeOf(value) + switch t.Kind() { + case reflect.String: + v.Value.Type = ColumnType_STRING + + case reflect.Int64: + v.Value.Type = ColumnType_INTEGER + + case reflect.Bool: + v.Value.Type = ColumnType_BOOLEAN + + case reflect.Float64: + v.Value.Type = ColumnType_DOUBLE + + case reflect.Slice: + v.Value.Type = ColumnType_BINARY + default: + panic(errInvalidInput) + } + + v.Value.Value = value + } + + return v +} + +func (c *Column) toPlainBufferCell(ignoreValue bool) *PlainBufferCell { + cell := &PlainBufferCell{} + cell.cellName = c.Name + cell.ignoreValue = ignoreValue + if ignoreValue == false { + cell.cellValue = &c.Value + } + + if c.HasType { + cell.hasCellType = c.HasType + cell.cellType = byte(c.Type) + } + + if c.HasTimestamp { + cell.hasCellTimestamp = c.HasTimestamp + cell.cellTimestamp = c.Timestamp + } + + return cell +} + +type PrimaryKeyColumnInner struct { + Name []byte + Type otsprotocol.PrimaryKeyType + Value interface{} +} + +func NewPrimaryKeyColumnINF_MAX(name []byte) *PrimaryKeyColumnInner { + v := &PrimaryKeyColumnInner{} + v.Name = name + v.Type = 0 + v.Value = "INF_MAX" + + return v +} + +func NewPrimaryKeyColumnINF_MIN(name []byte) *PrimaryKeyColumnInner { + v := &PrimaryKeyColumnInner{} + v.Name = name + v.Type = 0 + v.Value = "INF_MIN" + + return v +} + +func NewPrimaryKeyColumnAuto_Increment(name []byte) *PrimaryKeyColumnInner { + v := &PrimaryKeyColumnInner{} + v.Name = name + v.Type = 0 + v.Value = "AUTO_INCRMENT" + return v +} + +func NewPrimaryKeyColumn(name []byte, value interface{}, option PrimaryKeyOption) *PrimaryKeyColumnInner { + + if option == NONE { + v := &PrimaryKeyColumnInner{} + v.Name = name + + t := reflect.TypeOf(value) + switch t.Kind() { + case reflect.String: + v.Type = otsprotocol.PrimaryKeyType_STRING + + case reflect.Int64: + v.Type = otsprotocol.PrimaryKeyType_INTEGER + + case reflect.Slice: + v.Type = otsprotocol.PrimaryKeyType_BINARY + + default: + panic(errInvalidInput) + } + + v.Value = value + + return v + } else if option == AUTO_INCREMENT { + return NewPrimaryKeyColumnAuto_Increment(name) + } else if option == MIN { + return NewPrimaryKeyColumnINF_MIN(name) + } else { + return NewPrimaryKeyColumnINF_MAX(name) + } +} + +func (pkc *PrimaryKeyColumnInner) toColumnValue() *ColumnValue { + switch pkc.Type { + case otsprotocol.PrimaryKeyType_INTEGER: + return &ColumnValue{ColumnType_INTEGER, pkc.Value} + case otsprotocol.PrimaryKeyType_STRING: + return &ColumnValue{ColumnType_STRING, pkc.Value} + case otsprotocol.PrimaryKeyType_BINARY: + return &ColumnValue{ColumnType_BINARY, pkc.Value} + } + + return nil +} + +func (pkc *PrimaryKeyColumnInner) toPlainBufferCell() *PlainBufferCell { + cell := &PlainBufferCell{} + cell.cellName = pkc.Name + cell.cellValue = pkc.toColumnValue() + return cell +} + +func (pkc *PrimaryKeyColumnInner) isInfMin() bool { + if pkc.Type == 0 && pkc.Value.(string) == "INF_MIN" { + return true + } + + return false +} + +func (pkc *PrimaryKeyColumnInner) isInfMax() bool { + if pkc.Type == 0 && pkc.Value.(string) == "INF_MAX" { + return true + } + + return false +} + +func (pkc *PrimaryKeyColumnInner) isAutoInc() bool { + if pkc.Type == 0 && pkc.Value.(string) == "AUTO_INCRMENT" { + return true + } + return false +} + +func (pkc *PrimaryKeyColumnInner) getCheckSum(crc byte) byte { + if pkc.isInfMin() { + return crc8Byte(crc, VT_INF_MIN) + } + if pkc.isInfMax() { + return crc8Byte(crc, VT_INF_MAX) + } + if pkc.isAutoInc() { + return crc8Byte(crc, VT_AUTO_INCREMENT) + } + + return pkc.toColumnValue().getCheckSum(crc) +} + +func (pkc *PrimaryKeyColumnInner) writePrimaryKeyColumn(w io.Writer) { + writeTag(w, TAG_CELL) + writeCellName(w, []byte(pkc.Name)) + if pkc.isInfMin() { + writeTag(w, TAG_CELL_VALUE) + writeRawLittleEndian32(w, 1) + writeRawByte(w, VT_INF_MIN) + return + } + if pkc.isInfMax() { + writeTag(w, TAG_CELL_VALUE) + writeRawLittleEndian32(w, 1) + writeRawByte(w, VT_INF_MAX) + return + } + if pkc.isAutoInc() { + writeTag(w, TAG_CELL_VALUE) + writeRawLittleEndian32(w, 1) + writeRawByte(w, VT_AUTO_INCREMENT) + return + } + pkc.toColumnValue().writeCellValue(w) +} + +type PrimaryKey2 struct { + primaryKey []*PrimaryKeyColumnInner +} + +func (pk *PrimaryKey) Build(isDelete bool) []byte { + var b bytes.Buffer + writeHeader(&b) + writeTag(&b, TAG_ROW_PK) + + rowChecksum := byte(0x0) + var cellChecksum byte + + for _, column := range pk.PrimaryKeys { + primaryKeyColumn := NewPrimaryKeyColumn([]byte(column.ColumnName), column.Value, column.PrimaryKeyOption) + + cellChecksum = crc8Bytes(byte(0x0), []byte(primaryKeyColumn.Name)) + cellChecksum = primaryKeyColumn.getCheckSum(cellChecksum) + rowChecksum = crc8Byte(rowChecksum, cellChecksum) + primaryKeyColumn.writePrimaryKeyColumn(&b) + + writeTag(&b, TAG_CELL_CHECKSUM) + writeRawByte(&b, cellChecksum) + } + + // 没有deleteMarker, 要与0x0做crc. + if isDelete { + writeTag(&b, TAG_DELETE_ROW_MARKER) + rowChecksum = crc8Byte(rowChecksum, byte(0x1)) + } else { + rowChecksum = crc8Byte(rowChecksum, byte(0x0)) + } + writeTag(&b, TAG_ROW_CHECKSUM) + writeRawByte(&b, rowChecksum) + + return b.Bytes() +} + +type RowPutChange struct { + primaryKey []*PrimaryKeyColumnInner + columnsToPut []*Column +} + +type RowUpdateChange struct { + primaryKey []*PrimaryKeyColumnInner + columnsToUpdate []*Column +} + +func (rpc *RowPutChange) Build() []byte { + pkCells := make([]*PlainBufferCell, len(rpc.primaryKey)) + for i, pkc := range rpc.primaryKey { + pkCells[i] = pkc.toPlainBufferCell() + } + + cells := make([]*PlainBufferCell, len(rpc.columnsToPut)) + for i, c := range rpc.columnsToPut { + cells[i] = c.toPlainBufferCell(false) + } + + row := &PlainBufferRow{ + primaryKey: pkCells, + cells: cells} + var b bytes.Buffer + row.writeRowWithHeader(&b) + + return b.Bytes() +} + +func (ruc *RowUpdateChange) Build() []byte { + pkCells := make([]*PlainBufferCell, len(ruc.primaryKey)) + for i, pkc := range ruc.primaryKey { + pkCells[i] = pkc.toPlainBufferCell() + } + + cells := make([]*PlainBufferCell, len(ruc.columnsToUpdate)) + for i, c := range ruc.columnsToUpdate { + cells[i] = c.toPlainBufferCell(c.IgnoreValue) + } + + row := &PlainBufferRow{ + primaryKey: pkCells, + cells: cells} + var b bytes.Buffer + row.writeRowWithHeader(&b) + + return b.Bytes() +} + +const ( + MaxValue = "_get_range_max" + MinValue = "_get_range_min" +) + +func (comparatorType *ComparatorType) ConvertToPbComparatorType() otsprotocol.ComparatorType { + switch *comparatorType { + case CT_EQUAL: + return otsprotocol.ComparatorType_CT_EQUAL + case CT_NOT_EQUAL: + return otsprotocol.ComparatorType_CT_NOT_EQUAL + case CT_GREATER_THAN: + return otsprotocol.ComparatorType_CT_GREATER_THAN + case CT_GREATER_EQUAL: + return otsprotocol.ComparatorType_CT_GREATER_EQUAL + case CT_LESS_THAN: + return otsprotocol.ComparatorType_CT_LESS_THAN + default: + return otsprotocol.ComparatorType_CT_LESS_EQUAL + } +} + +func (columnType DefinedColumnType) ConvertToPbDefinedColumnType() otsprotocol.DefinedColumnType { + switch columnType { + case DefinedColumn_INTEGER: + return otsprotocol.DefinedColumnType_DCT_INTEGER + case DefinedColumn_DOUBLE: + return otsprotocol.DefinedColumnType_DCT_DOUBLE + case DefinedColumn_BOOLEAN: + return otsprotocol.DefinedColumnType_DCT_BOOLEAN + case DefinedColumn_STRING: + return otsprotocol.DefinedColumnType_DCT_STRING + default: + return otsprotocol.DefinedColumnType_DCT_BLOB + } +} + +func (loType *LogicalOperator) ConvertToPbLoType() otsprotocol.LogicalOperator { + switch *loType { + case LO_NOT: + return otsprotocol.LogicalOperator_LO_NOT + case LO_AND: + return otsprotocol.LogicalOperator_LO_AND + default: + return otsprotocol.LogicalOperator_LO_OR + } +} + +func ConvertToPbCastType(variantType VariantType) *otsprotocol.VariantType { + switch variantType { + case Variant_INTEGER: + return otsprotocol.VariantType_VT_INTEGER.Enum() + case Variant_DOUBLE: + return otsprotocol.VariantType_VT_DOUBLE.Enum() + case Variant_STRING: + return otsprotocol.VariantType_VT_STRING.Enum() + default: + panic("invalid VariantType") + } +} + +func NewValueTransferRule(regex string, vt VariantType) *ValueTransferRule{ + return &ValueTransferRule{Regex: regex, Cast_type: vt} +} + +func NewSingleColumnValueRegexFilter(columnName string, comparator ComparatorType, rule *ValueTransferRule, value interface{}) *SingleColumnCondition { + return &SingleColumnCondition{ColumnName: &columnName, Comparator: &comparator, ColumnValue: value, TransferRule: rule} +} + +func NewSingleColumnValueFilter(condition *SingleColumnCondition) *otsprotocol.SingleColumnValueFilter { + filter := new(otsprotocol.SingleColumnValueFilter) + + comparatorType := condition.Comparator.ConvertToPbComparatorType() + filter.Comparator = &comparatorType + filter.ColumnName = condition.ColumnName + col := NewColumn([]byte(*condition.ColumnName), condition.ColumnValue) + filter.ColumnValue = col.toPlainBufferCell(false).cellValue.writeCellValueWithoutLengthPrefix() + filter.FilterIfMissing = proto.Bool(condition.FilterIfMissing) + filter.LatestVersionOnly = proto.Bool(condition.LatestVersionOnly) + if condition.TransferRule != nil { + filter.ValueTransRule = &otsprotocol.ValueTransferRule{ Regex: proto.String(condition.TransferRule.Regex), CastType: ConvertToPbCastType(condition.TransferRule.Cast_type) } + } + return filter +} + +func NewCompositeFilter(filters []ColumnFilter, lo LogicalOperator) *otsprotocol.CompositeColumnValueFilter { + ccvfilter := new(otsprotocol.CompositeColumnValueFilter) + combinator := lo.ConvertToPbLoType() + ccvfilter.Combinator = &combinator + for _, cf := range filters { + filter := cf.ToFilter() + ccvfilter.SubFilters = append(ccvfilter.SubFilters, filter) + } + + return ccvfilter +} + +func NewPaginationFilter(filter *PaginationFilter) *otsprotocol.ColumnPaginationFilter { + pageFilter := new(otsprotocol.ColumnPaginationFilter) + pageFilter.Offset = proto.Int32(filter.Offset) + pageFilter.Limit = proto.Int32(filter.Limit) + return pageFilter +} + +func (otsClient *TableStoreClient) postReq(req *http.Request, url string) ([]byte, error, string) { + resp, err := otsClient.httpClient.Do(req) + if err != nil { + return nil, err, "" + } + defer resp.Body.Close() + + reqId := getRequestId(resp) + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err, reqId + } + + if (resp.StatusCode >= 200 && resp.StatusCode < 300) == false { + var retErr *OtsError + perr := new(otsprotocol.Error) + errUm := proto.Unmarshal(body, perr) + if errUm != nil { + retErr = rawHttpToOtsError(resp.StatusCode, body, reqId) + } else { + retErr = pbErrToOtsError(resp.StatusCode, perr, reqId) + } + return nil, retErr, reqId + } + + return body, nil, reqId +} + +func rawHttpToOtsError(code int, body []byte, reqId string) *OtsError { + oerr := &OtsError{ + Message: string(body), + RequestId: reqId, + HttpStatusCode: code, + } + if code >= 500 && code < 600 { + oerr.Code = SERVER_UNAVAILABLE + } else { + oerr.Code = OTS_CLIENT_UNKNOWN + } + return oerr +} + +func pbErrToOtsError(statusCode int, pbErr *otsprotocol.Error, reqId string) *OtsError { + return &OtsError{ + Code: pbErr.GetCode(), + Message: pbErr.GetMessage(), + RequestId: reqId, + HttpStatusCode : statusCode, + } +} + +func getRequestId(response *http.Response) string { + if response == nil || response.Header == nil { + return "" + } + + return response.Header.Get(xOtsRequestId) +} + +func buildRowPutChange(primarykey *PrimaryKey, columns []AttributeColumn) *RowPutChange { + row := new(RowPutChange) + row.primaryKey = make([]*PrimaryKeyColumnInner, len(primarykey.PrimaryKeys)) + for i, p := range primarykey.PrimaryKeys { + row.primaryKey[i] = NewPrimaryKeyColumn([]byte(p.ColumnName), p.Value, p.PrimaryKeyOption) + } + + row.columnsToPut = make([]*Column, len(columns)) + for i, p := range columns { + row.columnsToPut[i] = NewColumn([]byte(p.ColumnName), p.Value) + if p.Timestamp != 0 { + row.columnsToPut[i].HasTimestamp = true + row.columnsToPut[i].Timestamp = p.Timestamp + } + } + + return row +} + +func buildRowUpdateChange(primarykey *PrimaryKey, columns []ColumnToUpdate) *RowUpdateChange { + row := new(RowUpdateChange) + row.primaryKey = make([]*PrimaryKeyColumnInner, len(primarykey.PrimaryKeys)) + for i, p := range primarykey.PrimaryKeys { + row.primaryKey[i] = NewPrimaryKeyColumn([]byte(p.ColumnName), p.Value, p.PrimaryKeyOption) + } + + row.columnsToUpdate = make([]*Column, len(columns)) + for i, p := range columns { + row.columnsToUpdate[i] = NewColumn([]byte(p.ColumnName), p.Value) + row.columnsToUpdate[i].HasTimestamp = p.HasTimestamp + row.columnsToUpdate[i].HasType = p.HasType + row.columnsToUpdate[i].Type = p.Type + row.columnsToUpdate[i].Timestamp = p.Timestamp + row.columnsToUpdate[i].IgnoreValue = p.IgnoreValue + } + + return row +} + +func (condition *RowCondition) buildCondition() *otsprotocol.RowExistenceExpectation { + switch condition.RowExistenceExpectation { + case RowExistenceExpectation_IGNORE: + return otsprotocol.RowExistenceExpectation_IGNORE.Enum() + case RowExistenceExpectation_EXPECT_EXIST: + return otsprotocol.RowExistenceExpectation_EXPECT_EXIST.Enum() + case RowExistenceExpectation_EXPECT_NOT_EXIST: + return otsprotocol.RowExistenceExpectation_EXPECT_NOT_EXIST.Enum() + } + + panic(errInvalidInput) +} + +// build primary key for create table, put row, delete row and update row +// value only support int64,string,[]byte or you will get panic +func buildPrimaryKey(primaryKeyName string, value interface{}) *PrimaryKeyColumn { + // Todo: validate the input + return &PrimaryKeyColumn{ColumnName: primaryKeyName, Value: value, PrimaryKeyOption: NONE} +} + +// value only support int64,string,bool,float64,[]byte. other type will get panic +func (rowchange *PutRowChange) AddColumn(columnName string, value interface{}) { + // Todo: validate the input + column := &AttributeColumn{ColumnName: columnName, Value: value} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *PutRowChange) SetReturnPk() { + rowchange.ReturnType = ReturnType(ReturnType_RT_PK) +} + +func (rowchange *UpdateRowChange) SetReturnIncrementValue() { + rowchange.ReturnType = ReturnType(ReturnType_RT_AFTER_MODIFY) +} + +func (rowchange *UpdateRowChange) AppendIncrementColumnToReturn(name string) { + rowchange.ColumnNamesToReturn = append(rowchange.ColumnNamesToReturn, name) +} + +// value only support int64,string,bool,float64,[]byte. other type will get panic +func (rowchange *PutRowChange) AddColumnWithTimestamp(columnName string, value interface{}, timestamp int64) { + // Todo: validate the input + column := &AttributeColumn{ColumnName: columnName, Value: value} + column.Timestamp = timestamp + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (pk *PrimaryKey) AddPrimaryKeyColumn(primaryKeyName string, value interface{}) { + pk.PrimaryKeys = append(pk.PrimaryKeys, buildPrimaryKey(primaryKeyName, value)) +} + +func (pk *PrimaryKey) AddPrimaryKeyColumnWithAutoIncrement(primaryKeyName string) { + pk.PrimaryKeys = append(pk.PrimaryKeys, &PrimaryKeyColumn{ColumnName: primaryKeyName, PrimaryKeyOption: AUTO_INCREMENT}) +} + +func (pk *PrimaryKey) AddPrimaryKeyColumnWithMinValue(primaryKeyName string) { + pk.PrimaryKeys = append(pk.PrimaryKeys, &PrimaryKeyColumn{ColumnName: primaryKeyName, PrimaryKeyOption: MIN}) +} + +// Only used for range query +func (pk *PrimaryKey) AddPrimaryKeyColumnWithMaxValue(primaryKeyName string) { + pk.PrimaryKeys = append(pk.PrimaryKeys, &PrimaryKeyColumn{ColumnName: primaryKeyName, PrimaryKeyOption: MAX}) +} + +func (rowchange *PutRowChange) SetCondition(rowExistenceExpectation RowExistenceExpectation) { + rowchange.Condition = &RowCondition{RowExistenceExpectation: rowExistenceExpectation} +} + +func (rowchange *DeleteRowChange) SetCondition(rowExistenceExpectation RowExistenceExpectation) { + rowchange.Condition = &RowCondition{RowExistenceExpectation: rowExistenceExpectation} +} + +func (Criteria *SingleRowQueryCriteria) SetFilter(filter ColumnFilter) { + Criteria.Filter = filter +} + +func (Criteria *MultiRowQueryCriteria) SetFilter(filter ColumnFilter) { + Criteria.Filter = filter +} + +func NewSingleColumnCondition(columnName string, comparator ComparatorType, value interface{}) *SingleColumnCondition { + return &SingleColumnCondition{ColumnName: &columnName, Comparator: &comparator, ColumnValue: value} +} + +func NewCompositeColumnCondition(lo LogicalOperator) *CompositeColumnValueFilter { + return &CompositeColumnValueFilter{Operator: lo} +} + +func (rowchange *PutRowChange) SetColumnCondition(condition ColumnFilter) { + rowchange.Condition.ColumnCondition = condition +} + +func (rowchange *UpdateRowChange) SetCondition(rowExistenceExpectation RowExistenceExpectation) { + rowchange.Condition = &RowCondition{RowExistenceExpectation: rowExistenceExpectation} +} + +func (rowchange *UpdateRowChange) SetColumnCondition(condition ColumnFilter) { + rowchange.Condition.ColumnCondition = condition +} + +func (rowchange *DeleteRowChange) SetColumnCondition(condition ColumnFilter) { + rowchange.Condition.ColumnCondition = condition +} + +func (meta *TableMeta) AddPrimaryKeyColumn(name string, keyType PrimaryKeyType) { + meta.SchemaEntry = append(meta.SchemaEntry, &PrimaryKeySchema{Name: &name, Type: &keyType}) +} + +func (meta *TableMeta) AddPrimaryKeyColumnOption(name string, keyType PrimaryKeyType, keyOption PrimaryKeyOption) { + meta.SchemaEntry = append(meta.SchemaEntry, &PrimaryKeySchema{Name: &name, Type: &keyType, Option: &keyOption}) +} + +// value only support int64,string,bool,float64,[]byte. other type will get panic +func (rowchange *UpdateRowChange) PutColumn(columnName string, value interface{}) { + // Todo: validate the input + column := &ColumnToUpdate{ColumnName: columnName, Value: value} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *UpdateRowChange) DeleteColumn(columnName string) { + // Todo: validate the input + column := &ColumnToUpdate{ColumnName: columnName, Value: nil, Type: DELETE_ALL_VERSION, HasType: true, IgnoreValue: true} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *UpdateRowChange) DeleteColumnWithTimestamp(columnName string, timestamp int64) { + // Todo: validate the input + column := &ColumnToUpdate{ColumnName: columnName, Value: nil, Type: DELETE_ONE_VERSION, HasType: true, HasTimestamp: true, Timestamp: timestamp, IgnoreValue: true} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *UpdateRowChange) IncrementColumn(columnName string, value int64) { + // Todo: validate the input + column := &ColumnToUpdate{ColumnName: columnName, Value: value, Type: INCREMENT, HasType: true, IgnoreValue: false} + rowchange.Columns = append(rowchange.Columns, *column) +} + +func (rowchange *DeleteRowChange) Serialize() []byte { + return rowchange.PrimaryKey.Build(true) +} + +func (rowchange *PutRowChange) Serialize() []byte { + row := buildRowPutChange(rowchange.PrimaryKey, rowchange.Columns) + return row.Build() +} + +func (rowchange *UpdateRowChange) Serialize() []byte { + row := buildRowUpdateChange(rowchange.PrimaryKey, rowchange.Columns) + return row.Build() +} + +func (rowchange *DeleteRowChange) GetTableName() string { + return rowchange.TableName +} + +func (rowchange *PutRowChange) GetTableName() string { + return rowchange.TableName +} + +func (rowchange *UpdateRowChange) GetTableName() string { + return rowchange.TableName +} + +func (rowchange *DeleteRowChange) getOperationType() otsprotocol.OperationType { + return otsprotocol.OperationType_DELETE +} + +func (rowchange *PutRowChange) getOperationType() otsprotocol.OperationType { + return otsprotocol.OperationType_PUT +} + +func (rowchange *UpdateRowChange) getOperationType() otsprotocol.OperationType { + return otsprotocol.OperationType_UPDATE +} + +func (rowchange *DeleteRowChange) getCondition() *otsprotocol.Condition { + condition := new(otsprotocol.Condition) + condition.RowExistence = rowchange.Condition.buildCondition() + if rowchange.Condition.ColumnCondition != nil { + condition.ColumnCondition = rowchange.Condition.ColumnCondition.Serialize() + } + return condition +} + +func (rowchange *UpdateRowChange) getCondition() *otsprotocol.Condition { + condition := new(otsprotocol.Condition) + condition.RowExistence = rowchange.Condition.buildCondition() + if rowchange.Condition.ColumnCondition != nil { + condition.ColumnCondition = rowchange.Condition.ColumnCondition.Serialize() + } + return condition +} + +func (rowchange *PutRowChange) getCondition() *otsprotocol.Condition { + condition := new(otsprotocol.Condition) + condition.RowExistence = rowchange.Condition.buildCondition() + if rowchange.Condition.ColumnCondition != nil { + condition.ColumnCondition = rowchange.Condition.ColumnCondition.Serialize() + } + return condition +} + +func (request *BatchWriteRowRequest) AddRowChange(change RowChange) { + if request.RowChangesGroupByTable == nil { + request.RowChangesGroupByTable = make(map[string][]RowChange) + } + request.RowChangesGroupByTable[change.GetTableName()] = append(request.RowChangesGroupByTable[change.GetTableName()], change) +} + +func (direction Direction) ToDirection() otsprotocol.Direction { + if direction == FORWARD { + return otsprotocol.Direction_FORWARD + } else { + return otsprotocol.Direction_BACKWARD + } +} + +func (columnMap *ColumnMap) GetRange(start int, count int) ([]*AttributeColumn, error) { + columns := []*AttributeColumn{} + + end := start + count + if len(columnMap.columnsKey) < end { + return nil, fmt.Errorf("invalid arugment") + } + + for i := start; i < end; i++ { + subColumns := columnMap.Columns[columnMap.columnsKey[i]] + for _, column := range subColumns { + columns = append(columns, column) + } + } + + return columns, nil +} + +func (response *GetRowResponse) GetColumnMap() *ColumnMap { + if response == nil { + return nil + } + if response.columnMap != nil { + return response.columnMap + } else { + response.columnMap = &ColumnMap{} + response.columnMap.Columns = make(map[string][]*AttributeColumn) + + if len(response.Columns) == 0 { + return response.columnMap + } else { + for _, column := range response.Columns { + if _, ok := response.columnMap.Columns[column.ColumnName]; ok { + response.columnMap.Columns[column.ColumnName] = append(response.columnMap.Columns[column.ColumnName], column) + } else { + response.columnMap.columnsKey = append(response.columnMap.columnsKey, column.ColumnName) + value := []*AttributeColumn{} + value = append(value, column) + response.columnMap.Columns[column.ColumnName] = value + } + } + + sort.Strings(response.columnMap.columnsKey) + return response.columnMap + } + } +} + +func Assert(cond bool, msg string) { + if !cond { + panic(msg) + } +} + +func (meta *TableMeta) AddDefinedColumn(name string, definedType DefinedColumnType) { + meta.DefinedColumns = append(meta.DefinedColumns, &DefinedColumnSchema{Name: name, ColumnType: definedType}) +} + +func (meta *IndexMeta) AddDefinedColumn(name string) { + meta.DefinedColumns = append(meta.DefinedColumns, name) +} + +func (meta *IndexMeta) AddPrimaryKeyColumn(name string) { + meta.Primarykey = append(meta.Primarykey, name) +} + +func (request *CreateTableRequest) AddIndexMeta(meta *IndexMeta) { + request.IndexMetas = append(request.IndexMetas, meta) +} + +func (meta *IndexMeta) ConvertToPbIndexMeta() *otsprotocol.IndexMeta { + return &otsprotocol.IndexMeta { + Name: &meta.IndexName, + PrimaryKey: meta.Primarykey, + DefinedColumn: meta.DefinedColumns, + IndexUpdateMode: otsprotocol.IndexUpdateMode_IUM_ASYNC_INDEX.Enum(), + IndexType: otsprotocol.IndexType_IT_GLOBAL_INDEX.Enum(), + } +} + +func ConvertPbIndexTypeToIndexType(indexType *otsprotocol.IndexType) IndexType { + switch *indexType { + case otsprotocol.IndexType_IT_GLOBAL_INDEX: + return IT_GLOBAL_INDEX + default: + return IT_LOCAL_INDEX + } +} +func ConvertPbIndexMetaToIndexMeta(meta *otsprotocol.IndexMeta) *IndexMeta { + indexmeta := &IndexMeta { + IndexName: *meta.Name, + IndexType: ConvertPbIndexTypeToIndexType(meta.IndexType), + } + + for _, pk := range meta.PrimaryKey { + indexmeta.Primarykey = append(indexmeta.Primarykey, pk) + } + + for _, col := range meta.DefinedColumn { + indexmeta.DefinedColumns = append(indexmeta.DefinedColumns, col) + } + + return indexmeta +} diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index 1583d638..0c9c3d39 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -61,11 +61,23 @@ func Host(base *net.IPNet, num int) (net.IP, error) { hostLen := addrLen - parentLen maxHostNum := uint64(1< maxHostNum { - return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + + numUint64 := uint64(num) + if num < 0 { + numUint64 = uint64(-num) - 1 + num = int(maxHostNum - numUint64) } - return insertNumIntoIP(ip, num, 32), nil + if numUint64 > maxHostNum { + return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) + } + var bitlength int + if ip.To4() != nil { + bitlength = 32 + } else { + bitlength = 128 + } + return insertNumIntoIP(ip, num, bitlength), nil } // AddressRange returns the first and last addresses in the given CIDR range. @@ -103,3 +115,100 @@ func AddressCount(network *net.IPNet) uint64 { prefixLen, bits := network.Mask.Size() return 1 << (uint64(bits) - uint64(prefixLen)) } + +//VerifyNoOverlap takes a list subnets and supernet (CIDRBlock) and verifies +//none of the subnets overlap and all subnets are in the supernet +//it returns an error if any of those conditions are not satisfied +func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { + firstLastIP := make([][]net.IP, len(subnets)) + for i, s := range subnets { + first, last := AddressRange(s) + firstLastIP[i] = []net.IP{first, last} + } + for i, s := range subnets { + if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { + return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) + } + for j := 0; j < len(subnets); j++ { + if i == j { + continue + } + + first := firstLastIP[j][0] + last := firstLastIP[j][1] + if s.Contains(first) || s.Contains(last) { + return fmt.Errorf("%s overlaps with %s", subnets[j].String(), s.String()) + } + } + } + return nil +} + +// PreviousSubnet returns the subnet of the desired mask in the IP space +// just lower than the start of IPNet provided. If the IP space rolls over +// then the second return value is true +func PreviousSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + startIP := checkIPv4(network.IP) + previousIP := make(net.IP, len(startIP)) + copy(previousIP, startIP) + cMask := net.CIDRMask(prefixLen, 8*len(previousIP)) + previousIP = Dec(previousIP) + previous := &net.IPNet{IP: previousIP.Mask(cMask), Mask: cMask} + if startIP.Equal(net.IPv4zero) || startIP.Equal(net.IPv6zero) { + return previous, true + } + return previous, false +} + +// NextSubnet returns the next available subnet of the desired mask size +// starting for the maximum IP of the offset subnet +// If the IP exceeds the maxium IP then the second return value is true +func NextSubnet(network *net.IPNet, prefixLen int) (*net.IPNet, bool) { + _, currentLast := AddressRange(network) + mask := net.CIDRMask(prefixLen, 8*len(currentLast)) + currentSubnet := &net.IPNet{IP: currentLast.Mask(mask), Mask: mask} + _, last := AddressRange(currentSubnet) + last = Inc(last) + next := &net.IPNet{IP: last.Mask(mask), Mask: mask} + if last.Equal(net.IPv4zero) || last.Equal(net.IPv6zero) { + return next, true + } + return next, false +} + +//Inc increases the IP by one this returns a new []byte for the IP +func Inc(IP net.IP) net.IP { + IP = checkIPv4(IP) + incIP := make([]byte, len(IP)) + copy(incIP, IP) + for j := len(incIP) - 1; j >= 0; j-- { + incIP[j]++ + if incIP[j] > 0 { + break + } + } + return incIP +} + +//Dec decreases the IP by one this returns a new []byte for the IP +func Dec(IP net.IP) net.IP { + IP = checkIPv4(IP) + decIP := make([]byte, len(IP)) + copy(decIP, IP) + decIP = checkIPv4(decIP) + for j := len(decIP) - 1; j >= 0; j-- { + decIP[j]-- + if decIP[j] < 255 { + break + } + } + return decIP +} + +func checkIPv4(ip net.IP) net.IP { + // Go for some reason allocs IPv6len for IPv4 so we have to correct it + if v4 := ip.To4(); v4 != nil { + return v4 + } + return ip +} diff --git a/vendor/github.com/apparentlymart/go-textseg/LICENSE b/vendor/github.com/apparentlymart/go-textseg/LICENSE new file mode 100644 index 00000000..684b03b4 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/LICENSE @@ -0,0 +1,95 @@ +Copyright (c) 2017 Martin Atkins + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------- + +Unicode table generation programs are under a separate copyright and license: + +Copyright (c) 2014 Couchbase, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file +except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the +License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +either express or implied. See the License for the specific language governing permissions +and limitations under the License. + +--------- + +Grapheme break data is provided as part of the Unicode character database, +copright 2016 Unicode, Inc, which is provided with the following license: + +Unicode Data Files include all data files under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +Unicode Data Files do not include PDF online code charts under the +directory http://www.unicode.org/Public/. + +Software includes any source code published in the Unicode Standard +or under the directories +http://www.unicode.org/Public/, http://www.unicode.org/reports/, +http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and +http://www.unicode.org/utility/trac/browser/. + +NOTICE TO USER: Carefully read the following legal agreement. +BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S +DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"), +YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE +TERMS AND CONDITIONS OF THIS AGREEMENT. +IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE +THE DATA FILES OR SOFTWARE. + +COPYRIGHT AND PERMISSION NOTICE + +Copyright © 1991-2017 Unicode, Inc. All rights reserved. +Distributed under the Terms of Use in http://www.unicode.org/copyright.html. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of the Unicode data files and any associated documentation +(the "Data Files") or Unicode software and any associated documentation +(the "Software") to deal in the Data Files or Software +without restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, and/or sell copies of +the Data Files or Software, and to permit persons to whom the Data Files +or Software are furnished to do so, provided that either +(a) this copyright and permission notice appear with all copies +of the Data Files or Software, or +(b) this copyright and permission notice appear in associated +Documentation. + +THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT OF THIRD PARTY RIGHTS. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS +NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL +DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, +DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER +TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THE DATA FILES OR SOFTWARE. + +Except as contained in this notice, the name of a copyright holder +shall not be used in advertising or otherwise to promote the sale, +use or other dealings in these Data Files or Software without prior +written authorization of the copyright holder. diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go new file mode 100644 index 00000000..5752e9ef --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/all_tokens.go @@ -0,0 +1,30 @@ +package textseg + +import ( + "bufio" + "bytes" +) + +// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of +// all of the recognized tokens in the given buffer. +func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret [][]byte + for scanner.Scan() { + ret = append(ret, scanner.Bytes()) + } + return ret, scanner.Err() +} + +// TokenCount is a utility that uses a bufio.SplitFunc to count the number of +// recognized tokens in the given buffer. +func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) { + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Split(splitFunc) + var ret int + for scanner.Scan() { + ret++ + } + return ret, scanner.Err() +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go new file mode 100644 index 00000000..81f3a747 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/generate.go @@ -0,0 +1,7 @@ +package textseg + +//go:generate go run make_tables.go -output tables.go +//go:generate go run make_test_tables.go -output tables_test.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,E_Base,E_Modifier,ZWJ,Glue_After_Zwj,E_Base_GAZ" -o grapheme_clusters_table.rl +//go:generate ragel -Z grapheme_clusters.rl +//go:generate gofmt -w grapheme_clusters.go diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go new file mode 100644 index 00000000..012bc690 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.go @@ -0,0 +1,5276 @@ + +// line 1 "grapheme_clusters.rl" +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT + +// line 13 "grapheme_clusters.go" +var _graphclust_actions []byte = []byte{ + 0, 1, 0, 1, 4, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 21, 2, 1, + 7, 2, 1, 8, 2, 2, 3, 2, + 5, 1, 3, 0, 1, 8, 3, 5, + 0, 1, 3, 5, 1, 6, +} + +var _graphclust_key_offsets []int16 = []int16{ + 0, 0, 1, 3, 5, 7, 10, 15, + 17, 20, 28, 31, 33, 35, 37, 67, + 75, 77, 81, 84, 89, 94, 104, 116, + 122, 127, 137, 140, 147, 151, 159, 169, + 173, 181, 183, 191, 194, 196, 201, 203, + 210, 212, 220, 221, 242, 246, 252, 257, + 259, 263, 267, 269, 273, 275, 278, 282, + 284, 291, 293, 297, 301, 305, 307, 309, + 318, 322, 327, 329, 335, 337, 338, 340, + 341, 343, 345, 347, 349, 364, 368, 370, + 372, 377, 381, 385, 387, 389, 393, 397, + 399, 403, 410, 415, 419, 422, 423, 427, + 434, 439, 440, 441, 443, 452, 454, 477, + 481, 483, 487, 491, 492, 496, 500, 503, + 505, 510, 523, 525, 527, 529, 531, 535, + 539, 541, 543, 545, 549, 553, 557, 559, + 561, 563, 565, 566, 568, 574, 580, 586, + 588, 592, 596, 601, 604, 614, 616, 618, + 621, 623, 625, 627, 629, 632, 637, 639, + 642, 650, 653, 655, 657, 659, 690, 698, + 700, 704, 711, 723, 730, 744, 750, 768, + 779, 785, 797, 800, 809, 814, 824, 830, + 844, 850, 862, 874, 878, 880, 886, 888, + 895, 898, 906, 907, 928, 937, 945, 951, + 953, 957, 961, 966, 972, 974, 977, 990, + 995, 1009, 1011, 1020, 1027, 1038, 1048, 1056, + 1067, 1071, 1076, 1078, 1080, 1082, 1083, 1085, + 1087, 1089, 1091, 1106, 1110, 1112, 1114, 1122, + 1130, 1132, 1136, 1147, 1150, 1160, 1164, 1171, + 1179, 1185, 1188, 1189, 1193, 1200, 1205, 1206, + 1207, 1209, 1218, 1220, 1243, 1248, 1250, 1259, + 1264, 1265, 1274, 1280, 1290, 1295, 1302, 1316, + 1320, 1325, 1336, 1339, 1349, 1353, 1362, 1364, + 1372, 1379, 1385, 1392, 1396, 1398, 1400, 1402, + 1403, 1405, 1411, 1419, 1425, 1427, 1431, 1435, + 1440, 1443, 1453, 1455, 1457, 1458, 1460, 1461, + 1467, 1469, 1471, 1471, 1472, 1473, 1474, 1480, + 1482, 1484, 1484, 1490, 1492, 1497, 1502, 1504, + 1506, 1508, 1511, 1516, 1518, 1521, 1529, 1532, + 1534, 1536, 1538, 1568, 1576, 1578, 1582, 1585, + 1590, 1595, 1605, 1617, 1623, 1628, 1638, 1641, + 1648, 1652, 1660, 1670, 1674, 1682, 1684, 1692, + 1695, 1697, 1702, 1704, 1711, 1713, 1721, 1722, + 1743, 1747, 1753, 1758, 1760, 1764, 1768, 1770, + 1774, 1776, 1779, 1783, 1785, 1792, 1794, 1798, + 1802, 1806, 1808, 1810, 1819, 1823, 1828, 1830, + 1836, 1838, 1839, 1841, 1842, 1844, 1846, 1848, + 1850, 1865, 1869, 1871, 1873, 1878, 1882, 1886, + 1888, 1890, 1894, 1898, 1900, 1904, 1911, 1916, + 1920, 1923, 1924, 1928, 1935, 1940, 1941, 1942, + 1944, 1953, 1955, 1978, 1982, 1984, 1988, 1992, + 1993, 1997, 2001, 2004, 2006, 2011, 2024, 2026, + 2028, 2030, 2032, 2036, 2040, 2042, 2044, 2046, + 2050, 2054, 2058, 2060, 2062, 2064, 2066, 2067, + 2069, 2075, 2081, 2087, 2089, 2093, 2097, 2102, + 2105, 2115, 2117, 2119, 2122, 2124, 2126, 2128, + 2130, 2133, 2138, 2140, 2143, 2151, 2154, 2156, + 2158, 2160, 2191, 2199, 2201, 2205, 2212, 2224, + 2231, 2245, 2251, 2269, 2280, 2286, 2298, 2301, + 2310, 2315, 2325, 2331, 2345, 2351, 2363, 2375, + 2379, 2381, 2387, 2389, 2396, 2399, 2407, 2408, + 2429, 2438, 2446, 2452, 2454, 2458, 2462, 2467, + 2473, 2475, 2478, 2491, 2496, 2510, 2512, 2521, + 2528, 2539, 2549, 2557, 2568, 2572, 2577, 2579, + 2581, 2583, 2584, 2586, 2588, 2590, 2592, 2607, + 2611, 2613, 2615, 2623, 2631, 2633, 2637, 2648, + 2651, 2661, 2665, 2672, 2680, 2686, 2689, 2690, + 2694, 2701, 2706, 2707, 2708, 2710, 2719, 2721, + 2744, 2749, 2751, 2760, 2765, 2766, 2775, 2781, + 2791, 2796, 2803, 2817, 2821, 2826, 2837, 2840, + 2850, 2854, 2863, 2865, 2873, 2880, 2886, 2893, + 2897, 2899, 2901, 2903, 2904, 2906, 2912, 2920, + 2926, 2928, 2932, 2936, 2941, 2944, 2954, 2956, + 2958, 2959, 2961, 2962, 2968, 2970, 2972, 2972, + 2973, 2974, 2975, 2981, 2983, 2985, 2985, 2991, + 2993, 2997, 3003, 3006, 3009, 3013, 3016, 3019, + 3026, 3028, 3052, 3054, 3078, 3080, 3082, 3105, + 3107, 3109, 3110, 3112, 3114, 3116, 3122, 3124, + 3156, 3160, 3165, 3188, 3190, 3192, 3194, 3196, + 3199, 3201, 3203, 3207, 3207, 3263, 3319, 3350, + 3355, 3359, 3366, 3374, 3378, 3381, 3384, 3390, + 3392, 3412, 3418, 3423, 3425, 3427, 3430, 3432, + 3434, 3438, 3494, 3550, 3581, 3586, 3594, 3598, + 3600, 3605, 3611, 3615, 3618, 3624, 3627, 3631, + 3634, 3638, 3651, 3655, 3662, 3663, 3665, 3668, + 3678, 3698, 3705, 3709, 3716, 3726, 3733, 3736, + 3751, 3753, 3756, 3761, 3763, 3766, 3769, 3773, + 3776, 3779, 3786, 3788, 3790, 3792, 3794, 3797, + 3802, 3804, 3807, 3815, 3818, 3820, 3822, 3824, + 3854, 3862, 3864, 3868, 3871, 3876, 3881, 3891, + 3903, 3909, 3914, 3924, 3927, 3934, 3938, 3946, + 3956, 3960, 3968, 3970, 3978, 3981, 3983, 3988, + 3990, 3997, 3999, 4007, 4008, 4029, 4033, 4039, + 4044, 4046, 4050, 4054, 4056, 4060, 4062, 4065, + 4069, 4071, 4078, 4080, 4084, 4088, 4092, 4094, + 4096, 4105, 4109, 4114, 4116, 4122, 4124, 4125, + 4127, 4128, 4130, 4132, 4134, 4136, 4151, 4155, + 4157, 4159, 4164, 4168, 4172, 4174, 4176, 4180, + 4184, 4186, 4190, 4197, 4202, 4206, 4209, 4210, + 4214, 4221, 4226, 4227, 4228, 4230, 4239, 4241, + 4264, 4268, 4270, 4274, 4278, 4279, 4283, 4287, + 4290, 4292, 4297, 4310, 4312, 4314, 4316, 4318, + 4322, 4326, 4328, 4330, 4332, 4336, 4340, 4344, + 4346, 4348, 4350, 4352, 4353, 4355, 4361, 4367, + 4373, 4375, 4379, 4383, 4388, 4391, 4401, 4403, + 4405, 4408, 4410, 4412, 4414, 4416, 4419, 4424, + 4426, 4429, 4437, 4440, 4442, 4444, 4446, 4477, + 4485, 4487, 4491, 4498, 4510, 4517, 4531, 4537, + 4555, 4566, 4572, 4584, 4587, 4596, 4601, 4611, + 4617, 4631, 4637, 4649, 4661, 4665, 4667, 4673, + 4675, 4682, 4685, 4693, 4694, 4715, 4724, 4732, + 4738, 4740, 4744, 4748, 4753, 4759, 4761, 4764, + 4777, 4782, 4796, 4798, 4807, 4814, 4825, 4835, + 4843, 4854, 4858, 4863, 4865, 4867, 4869, 4870, + 4872, 4874, 4876, 4878, 4893, 4897, 4899, 4901, + 4909, 4917, 4919, 4923, 4934, 4937, 4947, 4951, + 4958, 4966, 4972, 4975, 4976, 4980, 4987, 4992, + 4993, 4994, 4996, 5005, 5007, 5030, 5035, 5037, + 5046, 5051, 5052, 5061, 5067, 5077, 5082, 5089, + 5103, 5107, 5112, 5123, 5126, 5136, 5140, 5149, + 5151, 5159, 5166, 5172, 5179, 5183, 5185, 5187, + 5189, 5190, 5192, 5198, 5206, 5212, 5214, 5218, + 5222, 5227, 5230, 5240, 5242, 5244, 5245, 5247, + 5248, 5254, 5256, 5258, 5258, 5259, 5260, 5261, + 5267, 5269, 5271, 5271, 5277, 5301, 5303, 5327, + 5329, 5331, 5354, 5356, 5358, 5359, 5361, 5363, + 5365, 5371, 5373, 5405, 5409, 5414, 5437, 5439, + 5441, 5443, 5445, 5448, 5450, 5452, 5456, 5456, + 5512, 5568, 5599, 5604, 5607, 5614, 5626, 5628, + 5630, 5632, 5635, 5640, 5642, 5645, 5653, 5656, + 5658, 5660, 5662, 5692, 5700, 5702, 5706, 5709, + 5714, 5719, 5729, 5741, 5747, 5752, 5762, 5765, + 5772, 5776, 5784, 5794, 5798, 5806, 5808, 5816, + 5819, 5821, 5826, 5828, 5835, 5837, 5845, 5846, + 5867, 5871, 5877, 5882, 5884, 5888, 5892, 5894, + 5898, 5900, 5903, 5907, 5909, 5916, 5918, 5922, + 5926, 5930, 5932, 5934, 5943, 5947, 5952, 5954, + 5956, 5958, 5959, 5961, 5963, 5965, 5967, 5982, + 5986, 5988, 5990, 5995, 5999, 6003, 6005, 6007, + 6011, 6015, 6017, 6021, 6028, 6033, 6037, 6040, + 6041, 6045, 6051, 6056, 6057, 6058, 6060, 6069, + 6071, 6094, 6098, 6100, 6104, 6108, 6109, 6113, + 6117, 6120, 6122, 6127, 6140, 6142, 6144, 6146, + 6148, 6152, 6156, 6158, 6160, 6162, 6166, 6170, + 6174, 6176, 6178, 6180, 6182, 6183, 6185, 6191, + 6197, 6203, 6205, 6209, 6213, 6218, 6221, 6231, + 6233, 6235, 6236, 6242, 6244, 6246, 6246, 6252, + 6253, 6260, 6263, 6265, 6267, 6269, 6271, 6274, + 6279, 6281, 6284, 6292, 6295, 6297, 6299, 6301, + 6332, 6340, 6342, 6346, 6353, 6365, 6372, 6386, + 6392, 6410, 6421, 6427, 6439, 6442, 6451, 6456, + 6466, 6472, 6486, 6492, 6504, 6516, 6520, 6522, + 6528, 6530, 6537, 6540, 6548, 6549, 6570, 6579, + 6587, 6593, 6595, 6599, 6603, 6608, 6614, 6616, + 6619, 6632, 6637, 6651, 6653, 6662, 6669, 6680, + 6690, 6698, 6709, 6713, 6718, 6720, 6722, 6724, + 6725, 6727, 6729, 6731, 6733, 6748, 6752, 6754, + 6756, 6764, 6772, 6774, 6778, 6789, 6792, 6802, + 6806, 6813, 6821, 6827, 6830, 6831, 6835, 6842, + 6847, 6848, 6849, 6851, 6860, 6862, 6885, 6890, + 6892, 6901, 6906, 6907, 6916, 6922, 6932, 6937, + 6944, 6958, 6962, 6967, 6978, 6981, 6991, 6995, + 7004, 7006, 7014, 7021, 7027, 7034, 7038, 7040, + 7042, 7044, 7045, 7047, 7053, 7061, 7067, 7069, + 7073, 7077, 7082, 7085, 7095, 7097, 7099, 7100, + 7102, 7103, 7109, 7111, 7113, 7113, 7114, 7115, + 7121, 7124, 7126, 7128, 7130, 7133, 7138, 7140, + 7143, 7151, 7154, 7156, 7158, 7160, 7191, 7199, + 7201, 7205, 7212, 7214, 7216, 7218, 7221, 7226, + 7228, 7231, 7239, 7242, 7244, 7246, 7248, 7278, + 7286, 7288, 7292, 7295, 7300, 7305, 7315, 7327, + 7333, 7338, 7348, 7351, 7358, 7362, 7370, 7380, + 7384, 7392, 7394, 7402, 7405, 7407, 7412, 7414, + 7421, 7423, 7431, 7432, 7453, 7457, 7463, 7468, + 7470, 7474, 7478, 7480, 7484, 7486, 7489, 7493, + 7495, 7502, 7504, 7508, 7512, 7516, 7518, 7520, + 7529, 7533, 7538, 7540, 7546, 7548, 7549, 7551, + 7552, 7554, 7556, 7558, 7560, 7575, 7579, 7581, + 7583, 7588, 7592, 7596, 7598, 7600, 7604, 7608, + 7610, 7614, 7621, 7626, 7630, 7633, 7634, 7638, + 7645, 7650, 7651, 7652, 7654, 7663, 7665, 7688, + 7692, 7694, 7698, 7702, 7703, 7707, 7711, 7714, + 7716, 7721, 7734, 7736, 7738, 7740, 7742, 7746, + 7750, 7752, 7754, 7756, 7760, 7764, 7768, 7770, + 7772, 7774, 7776, 7777, 7779, 7785, 7791, 7797, + 7799, 7803, 7807, 7812, 7815, 7825, 7827, 7829, + 7832, 7834, 7835, 7836, 7837, 7843, 7845, 7847, + 7847, 7853, 7865, 7872, 7886, 7892, 7910, 7921, + 7927, 7939, 7942, 7951, 7956, 7966, 7972, 7986, + 7992, 8004, 8016, 8020, 8022, 8028, 8030, 8037, + 8040, 8048, 8049, 8070, 8079, 8087, 8093, 8095, + 8099, 8103, 8108, 8114, 8116, 8119, 8132, 8137, + 8151, 8153, 8162, 8169, 8180, 8190, 8198, 8209, + 8213, 8218, 8220, 8222, 8224, 8225, 8227, 8229, + 8231, 8233, 8248, 8252, 8254, 8256, 8264, 8272, + 8274, 8278, 8289, 8292, 8302, 8306, 8313, 8321, + 8327, 8330, 8331, 8335, 8342, 8347, 8348, 8349, + 8351, 8360, 8362, 8385, 8390, 8392, 8401, 8406, + 8407, 8416, 8422, 8432, 8437, 8444, 8458, 8462, + 8467, 8478, 8481, 8491, 8495, 8504, 8506, 8514, + 8521, 8527, 8534, 8538, 8540, 8542, 8544, 8545, + 8547, 8553, 8561, 8567, 8569, 8573, 8577, 8582, + 8585, 8595, 8597, 8599, 8600, 8602, 8603, 8609, + 8611, 8613, 8613, 8616, 8622, 8624, 8644, 8650, + 8655, 8657, 8659, 8662, 8664, 8666, 8670, 8726, + 8782, 8817, 8822, 8830, 8832, 8832, 8834, 8838, + 8841, 8848, 8854, 8858, 8861, 8867, 8870, 8876, + 8879, 8885, 8898, 8902, 8904, 8906, 8908, 8911, + 8916, 8918, 8921, 8929, 8932, 8934, 8936, 8938, + 8968, 8976, 8978, 8982, 8985, 8990, 8995, 9005, + 9017, 9023, 9028, 9038, 9041, 9048, 9052, 9060, + 9070, 9074, 9082, 9084, 9092, 9095, 9097, 9102, + 9104, 9111, 9113, 9121, 9122, 9143, 9147, 9153, + 9158, 9160, 9164, 9168, 9170, 9174, 9176, 9179, + 9183, 9185, 9192, 9194, 9198, 9202, 9206, 9208, + 9210, 9219, 9223, 9228, 9230, 9236, 9238, 9239, + 9241, 9242, 9244, 9246, 9248, 9250, 9265, 9269, + 9271, 9273, 9278, 9282, 9286, 9288, 9290, 9294, + 9298, 9300, 9304, 9311, 9316, 9320, 9323, 9324, + 9328, 9335, 9340, 9341, 9342, 9344, 9353, 9355, + 9378, 9382, 9384, 9388, 9392, 9393, 9397, 9401, + 9404, 9406, 9411, 9424, 9426, 9428, 9430, 9432, + 9436, 9440, 9442, 9444, 9446, 9450, 9454, 9458, + 9460, 9462, 9464, 9466, 9467, 9469, 9475, 9481, + 9487, 9489, 9493, 9497, 9502, 9505, 9515, 9517, + 9519, 9522, 9524, 9526, 9528, 9530, 9533, 9538, + 9540, 9543, 9551, 9554, 9556, 9558, 9560, 9591, + 9599, 9601, 9605, 9612, 9624, 9631, 9645, 9651, + 9669, 9680, 9686, 9698, 9701, 9710, 9715, 9725, + 9731, 9745, 9751, 9763, 9775, 9779, 9781, 9787, + 9789, 9796, 9799, 9807, 9808, 9829, 9838, 9846, + 9852, 9854, 9858, 9862, 9867, 9873, 9875, 9878, + 9891, 9896, 9910, 9912, 9921, 9928, 9939, 9949, + 9957, 9968, 9972, 9977, 9979, 9981, 9983, 9984, + 9986, 9988, 9990, 9992, 10007, 10011, 10013, 10015, + 10023, 10031, 10033, 10037, 10048, 10051, 10061, 10065, + 10072, 10080, 10086, 10089, 10090, 10094, 10101, 10106, + 10107, 10108, 10110, 10119, 10121, 10144, 10149, 10151, + 10160, 10165, 10166, 10175, 10181, 10191, 10196, 10203, + 10217, 10221, 10226, 10237, 10240, 10250, 10254, 10263, + 10265, 10273, 10280, 10286, 10293, 10297, 10299, 10301, + 10303, 10304, 10306, 10312, 10320, 10326, 10328, 10332, + 10336, 10341, 10344, 10354, 10356, 10358, 10359, 10361, + 10362, 10368, 10370, 10372, 10372, 10373, 10374, 10375, + 10381, 10383, 10385, 10385, 10391, 10398, 10399, 10401, + 10404, 10414, 10434, 10441, 10445, 10452, 10462, 10469, + 10472, 10487, 10489, 10492, 10501, 10505, 10509, 10538, + 10558, 10578, 10598, 10620, 10640, 10660, 10680, 10703, + 10724, 10745, 10766, 10786, 10809, 10829, 10849, 10869, + 10890, 10911, 10932, 10952, 10972, 10992, 11012, 11032, + 11052, 11072, 11092, 11112, +} + +var _graphclust_trans_keys []byte = []byte{ + 10, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 167, 169, 171, 173, 174, 175, + 176, 177, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 191, 166, + 170, 172, 178, 150, 153, 155, 163, 165, + 167, 169, 173, 153, 155, 148, 161, 163, + 255, 189, 132, 185, 144, 152, 161, 164, + 255, 188, 129, 131, 190, 255, 133, 134, + 137, 138, 142, 150, 152, 161, 164, 255, + 131, 134, 137, 138, 142, 144, 146, 175, + 178, 180, 182, 255, 134, 138, 142, 161, + 164, 255, 188, 129, 131, 190, 191, 128, + 132, 135, 136, 139, 141, 150, 151, 162, + 163, 130, 190, 191, 151, 128, 130, 134, + 136, 138, 141, 128, 131, 190, 255, 133, + 137, 142, 148, 151, 161, 164, 255, 128, + 132, 134, 136, 138, 141, 149, 150, 162, + 163, 129, 131, 190, 255, 133, 137, 142, + 150, 152, 161, 164, 255, 130, 131, 138, + 150, 143, 148, 152, 159, 178, 179, 177, + 179, 186, 135, 142, 177, 179, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 177, 191, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 173, 183, 185, 190, 150, 153, + 158, 160, 177, 180, 130, 141, 157, 132, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 180, 255, 148, 156, 158, + 255, 139, 141, 169, 133, 134, 160, 171, + 176, 187, 151, 155, 160, 162, 191, 149, + 158, 165, 188, 176, 190, 128, 132, 180, + 255, 133, 170, 180, 255, 128, 130, 161, + 173, 166, 179, 164, 183, 173, 144, 146, + 148, 168, 178, 180, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 128, 131, 157, 179, 181, 183, 144, + 176, 164, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 163, + 167, 128, 129, 180, 255, 134, 159, 178, + 255, 166, 173, 135, 147, 128, 131, 179, + 255, 129, 164, 166, 255, 169, 182, 131, + 188, 140, 141, 176, 178, 180, 183, 184, + 190, 191, 129, 171, 175, 181, 182, 163, + 170, 172, 173, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 128, 130, 184, + 255, 135, 190, 131, 175, 187, 255, 128, + 130, 167, 180, 179, 128, 130, 179, 255, + 129, 137, 141, 255, 190, 172, 183, 159, + 170, 188, 128, 131, 190, 191, 151, 128, + 132, 135, 136, 139, 141, 162, 163, 166, + 172, 176, 180, 181, 191, 128, 134, 176, + 255, 132, 255, 175, 181, 184, 255, 129, + 155, 158, 255, 129, 255, 171, 183, 157, + 171, 175, 182, 184, 191, 146, 167, 169, + 182, 171, 172, 189, 190, 176, 180, 176, + 182, 145, 190, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 128, 182, 187, 255, + 173, 180, 182, 255, 132, 155, 159, 161, + 175, 128, 163, 165, 128, 134, 136, 152, + 155, 161, 163, 164, 166, 170, 144, 150, + 132, 138, 145, 146, 151, 166, 169, 0, + 127, 176, 255, 131, 137, 191, 145, 189, + 135, 129, 130, 132, 133, 144, 154, 176, + 139, 159, 150, 156, 159, 164, 167, 168, + 170, 173, 145, 176, 255, 139, 255, 166, + 176, 171, 179, 160, 161, 163, 164, 165, + 166, 167, 169, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 168, 170, 150, 153, 155, 163, 165, 167, + 169, 173, 153, 155, 148, 161, 163, 255, + 131, 187, 189, 132, 185, 190, 255, 141, + 144, 129, 136, 145, 151, 152, 161, 162, + 163, 164, 255, 129, 188, 190, 130, 131, + 191, 255, 141, 151, 129, 132, 133, 134, + 137, 138, 142, 161, 162, 163, 164, 255, + 131, 188, 129, 130, 190, 255, 145, 181, + 129, 130, 131, 134, 135, 136, 137, 138, + 139, 141, 142, 175, 176, 177, 178, 255, + 134, 138, 141, 129, 136, 142, 161, 162, + 163, 164, 255, 129, 188, 130, 131, 190, + 191, 128, 141, 129, 132, 135, 136, 139, + 140, 150, 151, 162, 163, 130, 190, 191, + 128, 141, 151, 129, 130, 134, 136, 138, + 140, 128, 129, 131, 190, 255, 133, 137, + 129, 132, 142, 148, 151, 161, 164, 255, + 129, 188, 190, 191, 130, 131, 130, 134, + 128, 132, 135, 136, 138, 139, 140, 141, + 149, 150, 162, 163, 129, 190, 130, 131, + 191, 255, 133, 137, 141, 151, 129, 132, + 142, 161, 162, 163, 164, 255, 138, 143, + 150, 159, 144, 145, 146, 148, 152, 158, + 178, 179, 177, 179, 180, 186, 135, 142, + 177, 179, 180, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 191, + 177, 190, 128, 132, 134, 135, 141, 151, + 153, 188, 134, 128, 129, 130, 141, 156, + 157, 158, 159, 160, 162, 164, 168, 169, + 170, 172, 173, 174, 175, 176, 179, 183, + 177, 173, 183, 185, 186, 187, 188, 189, + 190, 150, 151, 152, 153, 158, 160, 177, + 180, 130, 132, 141, 157, 133, 134, 157, + 159, 146, 148, 178, 180, 146, 147, 178, + 179, 182, 180, 189, 190, 255, 134, 157, + 137, 147, 148, 255, 139, 141, 169, 133, + 134, 178, 160, 162, 163, 166, 167, 168, + 169, 171, 176, 184, 185, 187, 155, 151, + 152, 153, 154, 150, 160, 162, 191, 149, + 151, 152, 158, 165, 172, 173, 178, 179, + 188, 176, 190, 132, 181, 187, 128, 131, + 180, 188, 189, 255, 130, 133, 170, 171, + 179, 180, 255, 130, 161, 170, 128, 129, + 162, 165, 166, 167, 168, 173, 167, 173, + 166, 169, 170, 174, 175, 177, 178, 179, + 164, 171, 172, 179, 180, 181, 182, 183, + 161, 173, 180, 144, 146, 148, 168, 178, + 179, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 144, 176, + 175, 177, 191, 160, 191, 128, 130, 170, + 175, 153, 154, 153, 154, 155, 160, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 175, 175, 178, 180, 189, 158, 159, + 176, 177, 130, 134, 139, 167, 163, 164, + 165, 166, 132, 133, 134, 159, 160, 177, + 178, 255, 166, 173, 135, 145, 146, 147, + 131, 179, 188, 128, 130, 180, 181, 182, + 185, 186, 255, 165, 129, 255, 169, 174, + 175, 176, 177, 178, 179, 180, 181, 182, + 131, 140, 141, 188, 176, 178, 180, 183, + 184, 190, 191, 129, 171, 181, 182, 172, + 173, 174, 175, 165, 168, 172, 173, 163, + 170, 172, 184, 190, 158, 128, 143, 160, + 175, 144, 145, 150, 155, 157, 158, 159, + 135, 139, 141, 168, 171, 189, 160, 182, + 186, 191, 129, 131, 133, 134, 140, 143, + 184, 186, 165, 166, 128, 129, 130, 132, + 133, 134, 135, 136, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 156, 176, 178, 129, 128, 130, 184, 255, + 135, 190, 130, 131, 175, 176, 178, 183, + 184, 187, 255, 172, 128, 130, 167, 180, + 179, 130, 128, 129, 179, 181, 182, 190, + 191, 255, 129, 137, 138, 140, 141, 255, + 180, 190, 172, 174, 175, 177, 178, 181, + 182, 183, 159, 160, 162, 163, 170, 188, + 190, 191, 128, 129, 130, 131, 128, 151, + 129, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 183, 184, 191, + 133, 128, 129, 130, 134, 176, 185, 189, + 177, 178, 179, 186, 187, 190, 191, 255, + 129, 132, 255, 175, 190, 176, 177, 178, + 181, 184, 187, 188, 255, 129, 155, 158, + 255, 189, 176, 178, 179, 186, 187, 190, + 191, 255, 129, 255, 172, 182, 171, 173, + 174, 175, 176, 183, 166, 157, 159, 160, + 161, 162, 171, 175, 190, 176, 182, 184, + 191, 169, 177, 180, 146, 167, 170, 182, + 171, 172, 189, 190, 176, 180, 176, 182, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 166, 173, 165, 169, 174, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 128, 182, 187, 255, 173, + 180, 182, 255, 132, 155, 159, 161, 175, + 128, 163, 165, 128, 134, 136, 152, 155, + 161, 163, 164, 166, 170, 144, 150, 132, + 138, 143, 187, 191, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 139, + 168, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 144, 145, 150, 155, + 157, 158, 128, 191, 173, 128, 159, 160, + 191, 156, 128, 133, 134, 191, 0, 127, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 128, + 191, 160, 172, 174, 191, 128, 133, 134, + 155, 157, 191, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 128, 255, 128, 129, 130, 132, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 160, 255, 128, 129, + 130, 133, 134, 135, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 160, 255, + 168, 255, 128, 129, 130, 134, 135, 141, + 156, 157, 158, 159, 160, 162, 164, 168, + 169, 170, 172, 173, 174, 175, 176, 179, + 183, 168, 255, 192, 255, 159, 139, 187, + 158, 159, 176, 255, 135, 138, 139, 187, + 188, 255, 168, 255, 153, 154, 155, 160, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 175, 177, 178, 179, 180, 181, + 182, 184, 185, 186, 187, 188, 189, 191, + 176, 190, 192, 255, 135, 147, 160, 188, + 128, 156, 184, 129, 255, 128, 129, 130, + 133, 134, 141, 156, 157, 158, 159, 160, + 162, 164, 168, 169, 170, 172, 173, 174, + 175, 176, 179, 183, 158, 159, 135, 255, + 148, 176, 140, 168, 132, 160, 188, 152, + 180, 144, 172, 136, 164, 192, 255, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 160, 161, 162, 164, 165, 166, + 167, 168, 169, 171, 172, 173, 174, 175, + 176, 178, 179, 180, 181, 182, 183, 185, + 186, 187, 188, 189, 190, 128, 191, 129, + 130, 131, 132, 133, 134, 136, 137, 138, + 139, 140, 141, 143, 144, 145, 146, 147, + 148, 150, 151, 152, 153, 154, 155, 157, + 158, 159, 128, 156, 160, 255, 136, 164, + 175, 176, 255, 128, 141, 143, 191, 128, + 129, 152, 155, 156, 130, 191, 140, 141, + 128, 138, 144, 167, 175, 191, 128, 159, + 176, 191, 157, 128, 191, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 128, 156, 160, 255, 136, 164, 175, + 176, 255, 135, 138, 139, 187, 188, 191, + 192, 255, 187, 191, 128, 190, 128, 190, + 188, 128, 175, 190, 191, 145, 155, 157, + 159, 128, 191, 130, 135, 128, 191, 189, + 128, 191, 128, 129, 130, 131, 132, 191, + 178, 128, 191, 128, 159, 164, 191, 133, + 128, 191, 128, 178, 187, 191, 135, 142, + 143, 145, 146, 149, 150, 153, 154, 155, + 164, 128, 191, 128, 165, 166, 191, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 129, 135, 132, + 134, 128, 175, 157, 128, 191, 143, 128, + 191, 163, 181, 128, 191, 162, 128, 191, + 142, 128, 191, 132, 133, 134, 135, 160, + 128, 191, 0, 127, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 166, 167, 169, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 168, 170, 150, 153, 155, + 163, 165, 167, 169, 173, 153, 155, 148, + 161, 163, 255, 131, 187, 189, 132, 185, + 190, 255, 141, 144, 129, 136, 145, 151, + 152, 161, 162, 163, 164, 255, 129, 188, + 190, 130, 131, 191, 255, 141, 151, 129, + 132, 133, 134, 137, 138, 142, 161, 162, + 163, 164, 255, 131, 188, 129, 130, 190, + 255, 145, 181, 129, 130, 131, 134, 135, + 136, 137, 138, 139, 141, 142, 175, 176, + 177, 178, 255, 134, 138, 141, 129, 136, + 142, 161, 162, 163, 164, 255, 129, 188, + 130, 131, 190, 191, 128, 141, 129, 132, + 135, 136, 139, 140, 150, 151, 162, 163, + 130, 190, 191, 128, 141, 151, 129, 130, + 134, 136, 138, 140, 128, 129, 131, 190, + 255, 133, 137, 129, 132, 142, 148, 151, + 161, 164, 255, 129, 188, 190, 191, 130, + 131, 130, 134, 128, 132, 135, 136, 138, + 139, 140, 141, 149, 150, 162, 163, 129, + 190, 130, 131, 191, 255, 133, 137, 141, + 151, 129, 132, 142, 161, 162, 163, 164, + 255, 138, 143, 150, 159, 144, 145, 146, + 148, 152, 158, 178, 179, 177, 179, 180, + 186, 135, 142, 177, 179, 180, 185, 187, + 188, 136, 141, 181, 183, 185, 152, 153, + 190, 191, 191, 177, 190, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 177, 173, 183, 185, 186, + 187, 188, 189, 190, 150, 151, 152, 153, + 158, 160, 177, 180, 130, 132, 141, 157, + 133, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 182, 180, 189, 190, + 255, 134, 157, 137, 147, 148, 255, 139, + 141, 169, 133, 134, 178, 160, 162, 163, + 166, 167, 168, 169, 171, 176, 184, 185, + 187, 155, 151, 152, 153, 154, 150, 160, + 162, 191, 149, 151, 152, 158, 165, 172, + 173, 178, 179, 188, 176, 190, 132, 181, + 187, 128, 131, 180, 188, 189, 255, 130, + 133, 170, 171, 179, 180, 255, 130, 161, + 170, 128, 129, 162, 165, 166, 167, 168, + 173, 167, 173, 166, 169, 170, 174, 175, + 177, 178, 179, 164, 171, 172, 179, 180, + 181, 182, 183, 161, 173, 180, 144, 146, + 148, 168, 178, 179, 184, 185, 128, 181, + 187, 191, 128, 131, 179, 181, 183, 140, + 141, 144, 176, 175, 177, 191, 160, 191, + 128, 130, 170, 175, 153, 154, 153, 154, + 155, 160, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 175, 175, 178, 180, + 189, 158, 159, 176, 177, 130, 134, 139, + 167, 163, 164, 165, 166, 132, 133, 134, + 159, 160, 177, 178, 255, 166, 173, 135, + 145, 146, 147, 131, 179, 188, 128, 130, + 180, 181, 182, 185, 186, 255, 165, 129, + 255, 169, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 131, 140, 141, 188, 176, + 178, 180, 183, 184, 190, 191, 129, 171, + 181, 182, 172, 173, 174, 175, 165, 168, + 172, 173, 163, 170, 172, 184, 190, 158, + 128, 143, 160, 175, 144, 145, 150, 155, + 157, 158, 159, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 129, 128, + 130, 184, 255, 135, 190, 130, 131, 175, + 176, 178, 183, 184, 187, 255, 172, 128, + 130, 167, 180, 179, 130, 128, 129, 179, + 181, 182, 190, 191, 255, 129, 137, 138, + 140, 141, 255, 180, 190, 172, 174, 175, + 177, 178, 181, 182, 183, 159, 160, 162, + 163, 170, 188, 190, 191, 128, 129, 130, + 131, 128, 151, 129, 132, 135, 136, 139, + 141, 162, 163, 166, 172, 176, 180, 181, + 183, 184, 191, 133, 128, 129, 130, 134, + 176, 185, 189, 177, 178, 179, 186, 187, + 190, 191, 255, 129, 132, 255, 175, 190, + 176, 177, 178, 181, 184, 187, 188, 255, + 129, 155, 158, 255, 189, 176, 178, 179, + 186, 187, 190, 191, 255, 129, 255, 172, + 182, 171, 173, 174, 175, 176, 183, 166, + 157, 159, 160, 161, 162, 171, 175, 190, + 176, 182, 184, 191, 169, 177, 180, 146, + 167, 170, 182, 171, 172, 189, 190, 176, + 180, 176, 182, 143, 146, 178, 157, 158, + 133, 134, 137, 168, 169, 170, 166, 173, + 165, 169, 174, 178, 187, 255, 131, 132, + 140, 169, 174, 255, 130, 132, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 163, 165, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 144, 150, 132, 138, 143, 187, 191, 160, + 128, 129, 132, 135, 133, 134, 160, 255, + 192, 255, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 128, 129, 130, + 132, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 160, 255, 128, + 129, 130, 133, 134, 135, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 160, + 255, 168, 255, 128, 129, 130, 134, 135, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 168, 255, 192, 255, 159, 139, + 187, 158, 159, 176, 255, 135, 138, 139, + 187, 188, 255, 168, 255, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 177, 178, 179, 180, + 181, 182, 184, 185, 186, 187, 188, 189, + 191, 176, 190, 192, 255, 135, 147, 160, + 188, 128, 156, 184, 129, 255, 128, 129, + 130, 133, 134, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 158, 159, 135, + 255, 148, 176, 140, 168, 132, 160, 188, + 152, 180, 144, 172, 136, 164, 192, 255, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 160, 161, 162, 164, 165, + 166, 167, 168, 169, 171, 172, 173, 174, + 175, 176, 178, 179, 180, 181, 182, 183, + 185, 186, 187, 188, 189, 190, 128, 191, + 129, 130, 131, 132, 133, 134, 136, 137, + 138, 139, 140, 141, 143, 144, 145, 146, + 147, 148, 150, 151, 152, 153, 154, 155, + 157, 158, 159, 128, 156, 160, 255, 136, + 164, 175, 176, 255, 142, 128, 191, 128, + 129, 152, 155, 156, 130, 191, 139, 141, + 128, 140, 142, 143, 144, 167, 168, 174, + 175, 191, 128, 255, 176, 255, 131, 137, + 191, 145, 189, 135, 129, 130, 132, 133, + 144, 154, 176, 139, 159, 150, 156, 159, + 164, 167, 168, 170, 173, 145, 176, 255, + 139, 255, 166, 176, 171, 179, 160, 161, + 163, 164, 165, 167, 169, 171, 173, 174, + 175, 176, 177, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 166, 170, 172, 178, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 189, 132, 185, 144, 152, 161, + 164, 255, 188, 129, 131, 190, 255, 133, + 134, 137, 138, 142, 150, 152, 161, 164, + 255, 131, 134, 137, 138, 142, 144, 146, + 175, 178, 180, 182, 255, 134, 138, 142, + 161, 164, 255, 188, 129, 131, 190, 191, + 128, 132, 135, 136, 139, 141, 150, 151, + 162, 163, 130, 190, 191, 151, 128, 130, + 134, 136, 138, 141, 128, 131, 190, 255, + 133, 137, 142, 148, 151, 161, 164, 255, + 128, 132, 134, 136, 138, 141, 149, 150, + 162, 163, 129, 131, 190, 255, 133, 137, + 142, 150, 152, 161, 164, 255, 130, 131, + 138, 150, 143, 148, 152, 159, 178, 179, + 177, 179, 186, 135, 142, 177, 179, 185, + 187, 188, 136, 141, 181, 183, 185, 152, + 153, 190, 191, 177, 191, 128, 132, 134, + 135, 141, 151, 153, 188, 134, 128, 129, + 130, 141, 156, 157, 158, 159, 160, 162, + 164, 168, 169, 170, 172, 173, 174, 175, + 176, 179, 183, 173, 183, 185, 190, 150, + 153, 158, 160, 177, 180, 130, 141, 157, + 132, 134, 157, 159, 146, 148, 178, 180, + 146, 147, 178, 179, 180, 255, 148, 156, + 158, 255, 139, 141, 169, 133, 134, 160, + 171, 176, 187, 151, 155, 160, 162, 191, + 149, 158, 165, 188, 176, 190, 128, 132, + 180, 255, 133, 170, 180, 255, 128, 130, + 161, 173, 166, 179, 164, 183, 173, 144, + 146, 148, 168, 178, 180, 184, 185, 128, + 181, 187, 191, 128, 131, 179, 181, 183, + 140, 141, 144, 176, 175, 177, 191, 160, + 191, 128, 130, 170, 175, 153, 154, 153, + 154, 155, 160, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 175, 175, 178, + 180, 189, 158, 159, 176, 177, 130, 134, + 139, 163, 167, 128, 129, 180, 255, 134, + 159, 178, 255, 166, 173, 135, 147, 128, + 131, 179, 255, 129, 164, 166, 255, 169, + 182, 131, 188, 140, 141, 176, 178, 180, + 183, 184, 190, 191, 129, 171, 175, 181, + 182, 163, 170, 172, 173, 172, 184, 190, + 158, 128, 143, 160, 175, 144, 145, 150, + 155, 157, 158, 135, 139, 141, 168, 171, + 189, 160, 182, 186, 191, 129, 131, 133, + 134, 140, 143, 184, 186, 165, 166, 128, + 129, 130, 132, 133, 134, 135, 136, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 176, 178, 128, 130, + 184, 255, 135, 190, 131, 175, 187, 255, + 128, 130, 167, 180, 179, 128, 130, 179, + 255, 129, 137, 141, 255, 190, 172, 183, + 159, 170, 188, 128, 131, 190, 191, 151, + 128, 132, 135, 136, 139, 141, 162, 163, + 166, 172, 176, 180, 181, 191, 128, 134, + 176, 255, 132, 255, 175, 181, 184, 255, + 129, 155, 158, 255, 129, 255, 171, 183, + 157, 171, 175, 182, 184, 191, 146, 167, + 169, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 145, 190, 143, 146, 178, 157, + 158, 133, 134, 137, 168, 169, 170, 165, + 169, 173, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 160, 128, 129, 132, 135, + 133, 134, 160, 255, 192, 255, 128, 131, + 157, 179, 181, 183, 164, 144, 145, 150, + 155, 157, 158, 159, 145, 146, 151, 166, + 169, 128, 255, 176, 255, 131, 137, 191, + 145, 189, 135, 129, 130, 132, 133, 144, + 154, 176, 139, 159, 150, 156, 159, 164, + 167, 168, 170, 173, 145, 176, 255, 139, + 255, 166, 176, 171, 179, 160, 161, 163, + 164, 165, 166, 167, 169, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 168, 170, 150, 153, 155, 163, + 165, 167, 169, 173, 153, 155, 148, 161, + 163, 255, 131, 187, 189, 132, 185, 190, + 255, 141, 144, 129, 136, 145, 151, 152, + 161, 162, 163, 164, 255, 129, 188, 190, + 130, 131, 191, 255, 141, 151, 129, 132, + 133, 134, 137, 138, 142, 161, 162, 163, + 164, 255, 131, 188, 129, 130, 190, 255, + 145, 181, 129, 130, 131, 134, 135, 136, + 137, 138, 139, 141, 142, 175, 176, 177, + 178, 255, 134, 138, 141, 129, 136, 142, + 161, 162, 163, 164, 255, 129, 188, 130, + 131, 190, 191, 128, 141, 129, 132, 135, + 136, 139, 140, 150, 151, 162, 163, 130, + 190, 191, 128, 141, 151, 129, 130, 134, + 136, 138, 140, 128, 129, 131, 190, 255, + 133, 137, 129, 132, 142, 148, 151, 161, + 164, 255, 129, 188, 190, 191, 130, 131, + 130, 134, 128, 132, 135, 136, 138, 139, + 140, 141, 149, 150, 162, 163, 129, 190, + 130, 131, 191, 255, 133, 137, 141, 151, + 129, 132, 142, 161, 162, 163, 164, 255, + 138, 143, 150, 159, 144, 145, 146, 148, + 152, 158, 178, 179, 177, 179, 180, 186, + 135, 142, 177, 179, 180, 185, 187, 188, + 136, 141, 181, 183, 185, 152, 153, 190, + 191, 191, 177, 190, 128, 132, 134, 135, + 141, 151, 153, 188, 134, 128, 129, 130, + 141, 156, 157, 158, 159, 160, 162, 164, + 168, 169, 170, 172, 173, 174, 175, 176, + 179, 183, 177, 173, 183, 185, 186, 187, + 188, 189, 190, 150, 151, 152, 153, 158, + 160, 177, 180, 130, 132, 141, 157, 133, + 134, 157, 159, 146, 148, 178, 180, 146, + 147, 178, 179, 182, 180, 189, 190, 255, + 134, 157, 137, 147, 148, 255, 139, 141, + 169, 133, 134, 178, 160, 162, 163, 166, + 167, 168, 169, 171, 176, 184, 185, 187, + 155, 151, 152, 153, 154, 150, 160, 162, + 191, 149, 151, 152, 158, 165, 172, 173, + 178, 179, 188, 176, 190, 132, 181, 187, + 128, 131, 180, 188, 189, 255, 130, 133, + 170, 171, 179, 180, 255, 130, 161, 170, + 128, 129, 162, 165, 166, 167, 168, 173, + 167, 173, 166, 169, 170, 174, 175, 177, + 178, 179, 164, 171, 172, 179, 180, 181, + 182, 183, 161, 173, 180, 144, 146, 148, + 168, 178, 179, 184, 185, 128, 181, 187, + 191, 128, 131, 179, 181, 183, 140, 141, + 144, 176, 175, 177, 191, 160, 191, 128, + 130, 170, 175, 153, 154, 153, 154, 155, + 160, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 175, 175, 178, 180, 189, + 158, 159, 176, 177, 130, 134, 139, 167, + 163, 164, 165, 166, 132, 133, 134, 159, + 160, 177, 178, 255, 166, 173, 135, 145, + 146, 147, 131, 179, 188, 128, 130, 180, + 181, 182, 185, 186, 255, 165, 129, 255, + 169, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 131, 140, 141, 188, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 181, + 182, 172, 173, 174, 175, 165, 168, 172, + 173, 163, 170, 172, 184, 190, 158, 128, + 143, 160, 175, 144, 145, 150, 155, 157, + 158, 159, 135, 139, 141, 168, 171, 189, + 160, 182, 186, 191, 129, 131, 133, 134, + 140, 143, 184, 186, 165, 166, 128, 129, + 130, 132, 133, 134, 135, 136, 139, 140, + 141, 144, 145, 146, 147, 150, 151, 152, + 153, 154, 156, 176, 178, 129, 128, 130, + 184, 255, 135, 190, 130, 131, 175, 176, + 178, 183, 184, 187, 255, 172, 128, 130, + 167, 180, 179, 130, 128, 129, 179, 181, + 182, 190, 191, 255, 129, 137, 138, 140, + 141, 255, 180, 190, 172, 174, 175, 177, + 178, 181, 182, 183, 159, 160, 162, 163, + 170, 188, 190, 191, 128, 129, 130, 131, + 128, 151, 129, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 183, + 184, 191, 133, 128, 129, 130, 134, 176, + 185, 189, 177, 178, 179, 186, 187, 190, + 191, 255, 129, 132, 255, 175, 190, 176, + 177, 178, 181, 184, 187, 188, 255, 129, + 155, 158, 255, 189, 176, 178, 179, 186, + 187, 190, 191, 255, 129, 255, 172, 182, + 171, 173, 174, 175, 176, 183, 166, 157, + 159, 160, 161, 162, 171, 175, 190, 176, + 182, 184, 191, 169, 177, 180, 146, 167, + 170, 182, 171, 172, 189, 190, 176, 180, + 176, 182, 143, 146, 178, 157, 158, 133, + 134, 137, 168, 169, 170, 166, 173, 165, + 169, 174, 178, 187, 255, 131, 132, 140, + 169, 174, 255, 130, 132, 128, 182, 187, + 255, 173, 180, 182, 255, 132, 155, 159, + 161, 175, 128, 163, 165, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 144, + 150, 132, 138, 143, 187, 191, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 139, 168, 128, 159, 160, 175, 176, + 191, 157, 128, 191, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 167, 169, 171, + 173, 174, 175, 176, 177, 179, 180, 181, + 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 166, 170, 172, 178, 150, 153, + 155, 163, 165, 167, 169, 173, 153, 155, + 148, 161, 163, 255, 189, 132, 185, 144, + 152, 161, 164, 255, 188, 129, 131, 190, + 255, 133, 134, 137, 138, 142, 150, 152, + 161, 164, 255, 131, 134, 137, 138, 142, + 144, 146, 175, 178, 180, 182, 255, 134, + 138, 142, 161, 164, 255, 188, 129, 131, + 190, 191, 128, 132, 135, 136, 139, 141, + 150, 151, 162, 163, 130, 190, 191, 151, + 128, 130, 134, 136, 138, 141, 128, 131, + 190, 255, 133, 137, 142, 148, 151, 161, + 164, 255, 128, 132, 134, 136, 138, 141, + 149, 150, 162, 163, 129, 131, 190, 255, + 133, 137, 142, 150, 152, 161, 164, 255, + 130, 131, 138, 150, 143, 148, 152, 159, + 178, 179, 177, 179, 186, 135, 142, 177, + 179, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 177, 191, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 173, 183, 185, + 190, 150, 153, 158, 160, 177, 180, 130, + 141, 157, 132, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 180, 255, + 148, 156, 158, 255, 139, 141, 169, 133, + 134, 160, 171, 176, 187, 151, 155, 160, + 162, 191, 149, 158, 165, 188, 176, 190, + 128, 132, 180, 255, 133, 170, 180, 255, + 128, 130, 161, 173, 166, 179, 164, 183, + 173, 144, 146, 148, 168, 178, 180, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 128, 131, 157, 179, + 181, 183, 144, 176, 164, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 163, 167, 128, 129, 180, 255, + 134, 159, 178, 255, 166, 173, 135, 147, + 128, 131, 179, 255, 129, 164, 166, 255, + 169, 182, 131, 188, 140, 141, 176, 178, + 180, 183, 184, 190, 191, 129, 171, 175, + 181, 182, 163, 170, 172, 173, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 128, 130, 184, 255, 135, 190, 131, 175, + 187, 255, 128, 130, 167, 180, 179, 128, + 130, 179, 255, 129, 137, 141, 255, 190, + 172, 183, 159, 170, 188, 128, 131, 190, + 191, 151, 128, 132, 135, 136, 139, 141, + 162, 163, 166, 172, 176, 180, 181, 191, + 128, 134, 176, 255, 132, 255, 175, 181, + 184, 255, 129, 155, 158, 255, 129, 255, + 171, 183, 157, 171, 175, 182, 184, 191, + 146, 167, 169, 182, 171, 172, 189, 190, + 176, 180, 176, 182, 145, 190, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 163, 165, 128, + 134, 136, 152, 155, 161, 163, 164, 166, + 170, 144, 150, 132, 138, 145, 146, 151, + 166, 169, 139, 168, 160, 128, 129, 132, + 135, 133, 134, 160, 255, 192, 255, 144, + 145, 150, 155, 157, 158, 141, 144, 129, + 136, 145, 151, 152, 161, 162, 163, 164, + 255, 129, 188, 190, 130, 131, 191, 255, + 141, 151, 129, 132, 133, 134, 137, 138, + 142, 161, 162, 163, 164, 255, 131, 188, + 129, 130, 190, 255, 145, 181, 129, 130, + 131, 134, 135, 136, 137, 138, 139, 141, + 142, 175, 176, 177, 178, 255, 134, 138, + 141, 129, 136, 142, 161, 162, 163, 164, + 255, 129, 188, 130, 131, 190, 191, 128, + 141, 129, 132, 135, 136, 139, 140, 150, + 151, 162, 163, 130, 190, 191, 128, 141, + 151, 129, 130, 134, 136, 138, 140, 128, + 129, 131, 190, 255, 133, 137, 129, 132, + 142, 148, 151, 161, 164, 255, 129, 188, + 190, 191, 130, 131, 130, 134, 128, 132, + 135, 136, 138, 139, 140, 141, 149, 150, + 162, 163, 129, 190, 130, 131, 191, 255, + 133, 137, 141, 151, 129, 132, 142, 161, + 162, 163, 164, 255, 138, 143, 150, 159, + 144, 145, 146, 148, 152, 158, 178, 179, + 177, 179, 180, 186, 135, 142, 177, 179, + 180, 185, 187, 188, 136, 141, 181, 183, + 185, 152, 153, 190, 191, 191, 177, 190, + 128, 132, 134, 135, 141, 151, 153, 188, + 134, 128, 129, 130, 141, 156, 157, 158, + 159, 160, 162, 164, 168, 169, 170, 172, + 173, 174, 175, 176, 179, 183, 177, 173, + 183, 185, 186, 187, 188, 189, 190, 150, + 151, 152, 153, 158, 160, 177, 180, 130, + 132, 141, 157, 133, 134, 157, 159, 146, + 148, 178, 180, 146, 147, 178, 179, 182, + 180, 189, 190, 255, 134, 157, 137, 147, + 148, 255, 139, 141, 169, 133, 134, 178, + 160, 162, 163, 166, 167, 168, 169, 171, + 176, 184, 185, 187, 155, 151, 152, 153, + 154, 150, 160, 162, 191, 149, 151, 152, + 158, 165, 172, 173, 178, 179, 188, 176, + 190, 132, 181, 187, 128, 131, 180, 188, + 189, 255, 130, 133, 170, 171, 179, 180, + 255, 130, 161, 170, 128, 129, 162, 165, + 166, 167, 168, 173, 167, 173, 166, 169, + 170, 174, 175, 177, 178, 179, 164, 171, + 172, 179, 180, 181, 182, 183, 161, 173, + 180, 144, 146, 148, 168, 178, 179, 184, + 185, 128, 181, 187, 191, 128, 131, 179, + 181, 183, 140, 141, 144, 176, 175, 177, + 191, 160, 191, 128, 130, 170, 175, 153, + 154, 153, 154, 155, 160, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 175, + 175, 178, 180, 189, 158, 159, 176, 177, + 130, 134, 139, 167, 163, 164, 165, 166, + 132, 133, 134, 159, 160, 177, 178, 255, + 166, 173, 135, 145, 146, 147, 131, 179, + 188, 128, 130, 180, 181, 182, 185, 186, + 255, 165, 129, 255, 169, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 131, 140, + 141, 188, 176, 178, 180, 183, 184, 190, + 191, 129, 171, 181, 182, 172, 173, 174, + 175, 165, 168, 172, 173, 163, 170, 172, + 184, 190, 158, 128, 143, 160, 175, 144, + 145, 150, 155, 157, 158, 159, 135, 139, + 141, 168, 171, 189, 160, 182, 186, 191, + 129, 131, 133, 134, 140, 143, 184, 186, + 165, 166, 128, 129, 130, 132, 133, 134, + 135, 136, 139, 140, 141, 144, 145, 146, + 147, 150, 151, 152, 153, 154, 156, 176, + 178, 129, 128, 130, 184, 255, 135, 190, + 130, 131, 175, 176, 178, 183, 184, 187, + 255, 172, 128, 130, 167, 180, 179, 130, + 128, 129, 179, 181, 182, 190, 191, 255, + 129, 137, 138, 140, 141, 255, 180, 190, + 172, 174, 175, 177, 178, 181, 182, 183, + 159, 160, 162, 163, 170, 188, 190, 191, + 128, 129, 130, 131, 128, 151, 129, 132, + 135, 136, 139, 141, 162, 163, 166, 172, + 176, 180, 181, 183, 184, 191, 133, 128, + 129, 130, 134, 176, 185, 189, 177, 178, + 179, 186, 187, 190, 191, 255, 129, 132, + 255, 175, 190, 176, 177, 178, 181, 184, + 187, 188, 255, 129, 155, 158, 255, 189, + 176, 178, 179, 186, 187, 190, 191, 255, + 129, 255, 172, 182, 171, 173, 174, 175, + 176, 183, 166, 157, 159, 160, 161, 162, + 171, 175, 190, 176, 182, 184, 191, 169, + 177, 180, 146, 167, 170, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 143, 146, + 178, 157, 158, 133, 134, 137, 168, 169, + 170, 166, 173, 165, 169, 174, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 143, + 187, 191, 160, 128, 129, 132, 135, 133, + 134, 160, 255, 192, 255, 185, 128, 191, + 128, 137, 138, 141, 142, 191, 128, 191, + 165, 177, 178, 179, 180, 181, 182, 184, + 185, 186, 187, 188, 189, 191, 128, 175, + 176, 190, 192, 255, 128, 159, 160, 188, + 189, 191, 128, 156, 184, 129, 255, 148, + 176, 140, 168, 132, 160, 188, 152, 180, + 144, 172, 136, 164, 192, 255, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 161, 162, 164, 165, 166, 167, + 168, 169, 171, 172, 173, 174, 175, 176, + 178, 179, 180, 181, 182, 183, 185, 186, + 187, 188, 189, 190, 128, 191, 129, 130, + 131, 132, 133, 134, 136, 137, 138, 139, + 140, 141, 143, 144, 145, 146, 147, 148, + 150, 151, 152, 153, 154, 155, 157, 158, + 159, 160, 191, 128, 156, 161, 190, 192, + 255, 136, 164, 175, 176, 255, 135, 138, + 139, 187, 188, 191, 192, 255, 0, 127, + 192, 255, 187, 191, 128, 190, 191, 128, + 190, 188, 128, 175, 176, 189, 190, 191, + 145, 155, 157, 159, 128, 191, 130, 135, + 128, 191, 189, 128, 191, 128, 129, 130, + 131, 132, 191, 178, 128, 191, 128, 159, + 160, 163, 164, 191, 133, 128, 191, 128, + 178, 179, 186, 187, 191, 135, 142, 143, + 145, 146, 149, 150, 153, 154, 155, 164, + 128, 191, 128, 165, 166, 191, 128, 255, + 176, 255, 131, 137, 191, 145, 189, 135, + 129, 130, 132, 133, 144, 154, 176, 139, + 159, 150, 156, 159, 164, 167, 168, 170, + 173, 145, 176, 255, 139, 255, 166, 176, + 171, 179, 160, 161, 163, 164, 165, 167, + 169, 171, 173, 174, 175, 176, 177, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 166, 170, 172, 178, + 150, 153, 155, 163, 165, 167, 169, 173, + 153, 155, 148, 161, 163, 255, 189, 132, + 185, 144, 152, 161, 164, 255, 188, 129, + 131, 190, 255, 133, 134, 137, 138, 142, + 150, 152, 161, 164, 255, 131, 134, 137, + 138, 142, 144, 146, 175, 178, 180, 182, + 255, 134, 138, 142, 161, 164, 255, 188, + 129, 131, 190, 191, 128, 132, 135, 136, + 139, 141, 150, 151, 162, 163, 130, 190, + 191, 151, 128, 130, 134, 136, 138, 141, + 128, 131, 190, 255, 133, 137, 142, 148, + 151, 161, 164, 255, 128, 132, 134, 136, + 138, 141, 149, 150, 162, 163, 129, 131, + 190, 255, 133, 137, 142, 150, 152, 161, + 164, 255, 130, 131, 138, 150, 143, 148, + 152, 159, 178, 179, 177, 179, 186, 135, + 142, 177, 179, 185, 187, 188, 136, 141, + 181, 183, 185, 152, 153, 190, 191, 177, + 191, 128, 132, 134, 135, 141, 151, 153, + 188, 134, 128, 129, 130, 141, 156, 157, + 158, 159, 160, 162, 164, 168, 169, 170, + 172, 173, 174, 175, 176, 179, 183, 173, + 183, 185, 190, 150, 153, 158, 160, 177, + 180, 130, 141, 157, 132, 134, 157, 159, + 146, 148, 178, 180, 146, 147, 178, 179, + 180, 255, 148, 156, 158, 255, 139, 141, + 169, 133, 134, 160, 171, 176, 187, 151, + 155, 160, 162, 191, 149, 158, 165, 188, + 176, 190, 128, 132, 180, 255, 133, 170, + 180, 255, 128, 130, 161, 173, 166, 179, + 164, 183, 173, 144, 146, 148, 168, 178, + 180, 184, 185, 128, 181, 187, 191, 128, + 131, 179, 181, 183, 140, 141, 128, 131, + 157, 179, 181, 183, 144, 176, 164, 175, + 177, 191, 160, 191, 128, 130, 170, 175, + 153, 154, 153, 154, 155, 160, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 175, 175, 178, 180, 189, 158, 159, 176, + 177, 130, 134, 139, 163, 167, 128, 129, + 180, 255, 134, 159, 178, 255, 166, 173, + 135, 147, 128, 131, 179, 255, 129, 164, + 166, 255, 169, 182, 131, 188, 140, 141, + 176, 178, 180, 183, 184, 190, 191, 129, + 171, 175, 181, 182, 163, 170, 172, 173, + 172, 184, 190, 158, 128, 143, 160, 175, + 144, 145, 150, 155, 157, 158, 159, 135, + 139, 141, 168, 171, 189, 160, 182, 186, + 191, 129, 131, 133, 134, 140, 143, 184, + 186, 165, 166, 128, 129, 130, 132, 133, + 134, 135, 136, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 176, 178, 128, 130, 184, 255, 135, 190, + 131, 175, 187, 255, 128, 130, 167, 180, + 179, 128, 130, 179, 255, 129, 137, 141, + 255, 190, 172, 183, 159, 170, 188, 128, + 131, 190, 191, 151, 128, 132, 135, 136, + 139, 141, 162, 163, 166, 172, 176, 180, + 181, 191, 128, 134, 176, 255, 132, 255, + 175, 181, 184, 255, 129, 155, 158, 255, + 129, 255, 171, 183, 157, 171, 175, 182, + 184, 191, 146, 167, 169, 182, 171, 172, + 189, 190, 176, 180, 176, 182, 145, 190, + 143, 146, 178, 157, 158, 133, 134, 137, + 168, 169, 170, 165, 169, 173, 178, 187, + 255, 131, 132, 140, 169, 174, 255, 130, + 132, 128, 182, 187, 255, 173, 180, 182, + 255, 132, 155, 159, 161, 175, 128, 163, + 165, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 144, 150, 132, 138, 145, + 146, 151, 166, 169, 128, 255, 176, 255, + 131, 137, 191, 145, 189, 135, 129, 130, + 132, 133, 144, 154, 176, 139, 159, 150, + 156, 159, 164, 167, 168, 170, 173, 145, + 176, 255, 139, 255, 166, 176, 171, 179, + 160, 161, 163, 164, 165, 166, 167, 169, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 168, 170, 150, + 153, 155, 163, 165, 167, 169, 173, 153, + 155, 148, 161, 163, 255, 131, 187, 189, + 132, 185, 190, 255, 141, 144, 129, 136, + 145, 151, 152, 161, 162, 163, 164, 255, + 129, 188, 190, 130, 131, 191, 255, 141, + 151, 129, 132, 133, 134, 137, 138, 142, + 161, 162, 163, 164, 255, 131, 188, 129, + 130, 190, 255, 145, 181, 129, 130, 131, + 134, 135, 136, 137, 138, 139, 141, 142, + 175, 176, 177, 178, 255, 134, 138, 141, + 129, 136, 142, 161, 162, 163, 164, 255, + 129, 188, 130, 131, 190, 191, 128, 141, + 129, 132, 135, 136, 139, 140, 150, 151, + 162, 163, 130, 190, 191, 128, 141, 151, + 129, 130, 134, 136, 138, 140, 128, 129, + 131, 190, 255, 133, 137, 129, 132, 142, + 148, 151, 161, 164, 255, 129, 188, 190, + 191, 130, 131, 130, 134, 128, 132, 135, + 136, 138, 139, 140, 141, 149, 150, 162, + 163, 129, 190, 130, 131, 191, 255, 133, + 137, 141, 151, 129, 132, 142, 161, 162, + 163, 164, 255, 138, 143, 150, 159, 144, + 145, 146, 148, 152, 158, 178, 179, 177, + 179, 180, 186, 135, 142, 177, 179, 180, + 185, 187, 188, 136, 141, 181, 183, 185, + 152, 153, 190, 191, 191, 177, 190, 128, + 132, 134, 135, 141, 151, 153, 188, 134, + 128, 129, 130, 141, 156, 157, 158, 159, + 160, 162, 164, 168, 169, 170, 172, 173, + 174, 175, 176, 179, 183, 177, 173, 183, + 185, 186, 187, 188, 189, 190, 150, 151, + 152, 153, 158, 160, 177, 180, 130, 132, + 141, 157, 133, 134, 157, 159, 146, 148, + 178, 180, 146, 147, 178, 179, 182, 180, + 189, 190, 255, 134, 157, 137, 147, 148, + 255, 139, 141, 169, 133, 134, 178, 160, + 162, 163, 166, 167, 168, 169, 171, 176, + 184, 185, 187, 155, 151, 152, 153, 154, + 150, 160, 162, 191, 149, 151, 152, 158, + 165, 172, 173, 178, 179, 188, 176, 190, + 132, 181, 187, 128, 131, 180, 188, 189, + 255, 130, 133, 170, 171, 179, 180, 255, + 130, 161, 170, 128, 129, 162, 165, 166, + 167, 168, 173, 167, 173, 166, 169, 170, + 174, 175, 177, 178, 179, 164, 171, 172, + 179, 180, 181, 182, 183, 161, 173, 180, + 144, 146, 148, 168, 178, 179, 184, 185, + 128, 181, 187, 191, 128, 131, 179, 181, + 183, 140, 141, 144, 176, 175, 177, 191, + 160, 191, 128, 130, 170, 175, 153, 154, + 153, 154, 155, 160, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 175, 175, + 178, 180, 189, 158, 159, 176, 177, 130, + 134, 139, 167, 163, 164, 165, 166, 132, + 133, 134, 159, 160, 177, 178, 255, 166, + 173, 135, 145, 146, 147, 131, 179, 188, + 128, 130, 180, 181, 182, 185, 186, 255, + 165, 129, 255, 169, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 131, 140, 141, + 188, 176, 178, 180, 183, 184, 190, 191, + 129, 171, 181, 182, 172, 173, 174, 175, + 165, 168, 172, 173, 163, 170, 172, 184, + 190, 158, 128, 143, 160, 175, 144, 145, + 150, 155, 157, 158, 159, 135, 139, 141, + 168, 171, 189, 160, 182, 186, 191, 129, + 131, 133, 134, 140, 143, 184, 186, 165, + 166, 128, 129, 130, 132, 133, 134, 135, + 136, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 176, 178, + 129, 128, 130, 184, 255, 135, 190, 130, + 131, 175, 176, 178, 183, 184, 187, 255, + 172, 128, 130, 167, 180, 179, 130, 128, + 129, 179, 181, 182, 190, 191, 255, 129, + 137, 138, 140, 141, 255, 180, 190, 172, + 174, 175, 177, 178, 181, 182, 183, 159, + 160, 162, 163, 170, 188, 190, 191, 128, + 129, 130, 131, 128, 151, 129, 132, 135, + 136, 139, 141, 162, 163, 166, 172, 176, + 180, 181, 183, 184, 191, 133, 128, 129, + 130, 134, 176, 185, 189, 177, 178, 179, + 186, 187, 190, 191, 255, 129, 132, 255, + 175, 190, 176, 177, 178, 181, 184, 187, + 188, 255, 129, 155, 158, 255, 189, 176, + 178, 179, 186, 187, 190, 191, 255, 129, + 255, 172, 182, 171, 173, 174, 175, 176, + 183, 166, 157, 159, 160, 161, 162, 171, + 175, 190, 176, 182, 184, 191, 169, 177, + 180, 146, 167, 170, 182, 171, 172, 189, + 190, 176, 180, 176, 182, 143, 146, 178, + 157, 158, 133, 134, 137, 168, 169, 170, + 166, 173, 165, 169, 174, 178, 187, 255, + 131, 132, 140, 169, 174, 255, 130, 132, + 128, 182, 187, 255, 173, 180, 182, 255, + 132, 155, 159, 161, 175, 128, 163, 165, + 128, 134, 136, 152, 155, 161, 163, 164, + 166, 170, 144, 150, 132, 138, 143, 187, + 191, 160, 128, 129, 132, 135, 133, 134, + 160, 255, 192, 255, 139, 168, 160, 128, + 129, 132, 135, 133, 134, 160, 255, 192, + 255, 144, 145, 150, 155, 157, 158, 144, + 145, 150, 155, 157, 158, 159, 135, 166, + 191, 133, 128, 191, 128, 130, 131, 132, + 133, 137, 138, 139, 140, 191, 174, 188, + 128, 129, 130, 131, 132, 133, 134, 144, + 145, 165, 166, 169, 170, 175, 176, 184, + 185, 191, 128, 132, 170, 129, 135, 136, + 191, 181, 186, 128, 191, 144, 128, 148, + 149, 150, 151, 191, 128, 132, 133, 135, + 136, 138, 139, 143, 144, 191, 163, 128, + 179, 180, 182, 183, 191, 128, 129, 191, + 166, 176, 191, 128, 151, 152, 158, 159, + 178, 179, 185, 186, 187, 188, 190, 128, + 191, 160, 128, 191, 128, 130, 131, 135, + 191, 129, 134, 136, 190, 128, 159, 160, + 191, 128, 175, 176, 255, 10, 13, 127, + 194, 216, 219, 220, 224, 225, 226, 234, + 235, 236, 237, 239, 240, 243, 0, 31, + 128, 191, 192, 223, 227, 238, 241, 247, + 248, 255, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 239, 240, 243, 194, 216, + 219, 220, 224, 225, 226, 234, 235, 236, + 237, 239, 240, 243, 32, 126, 192, 223, + 227, 238, 241, 247, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 235, 236, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 237, 239, 240, 243, 204, 205, + 210, 214, 215, 216, 217, 219, 220, 221, + 222, 223, 224, 225, 226, 227, 234, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 235, 236, 237, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 239, 240, + 243, 204, 205, 210, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 234, 239, 240, 243, 204, 205, 210, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 234, 237, 239, + 240, 243, 204, 205, 210, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 234, 237, 239, 240, 243, 204, + 205, 210, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 234, + 237, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, 204, 205, 210, 214, + 215, 216, 217, 219, 220, 221, 222, 223, + 224, 225, 226, 227, 234, 239, 240, 243, + 204, 205, 210, 214, 215, 216, 217, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 234, 239, 240, 243, +} + +var _graphclust_single_lengths []byte = []byte{ + 0, 1, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 2, 3, 2, 2, 2, 3, + 2, 2, 3, 3, 1, 2, 4, 2, + 2, 4, 4, 2, 0, 2, 0, 3, + 1, 0, 1, 21, 1, 0, 4, 0, + 0, 0, 1, 2, 0, 1, 1, 1, + 4, 0, 3, 1, 3, 2, 0, 3, + 0, 5, 2, 0, 0, 1, 0, 2, + 0, 0, 15, 0, 0, 0, 4, 0, + 0, 0, 3, 1, 0, 4, 1, 4, + 4, 3, 1, 0, 7, 5, 1, 1, + 0, 1, 0, 23, 1, 0, 1, 1, + 1, 1, 0, 2, 1, 3, 2, 0, + 1, 3, 1, 2, 0, 1, 0, 2, + 1, 2, 3, 4, 0, 0, 0, 1, + 0, 6, 2, 0, 0, 0, 0, 1, + 3, 0, 0, 0, 1, 0, 1, 4, + 0, 0, 0, 1, 1, 1, 4, 0, + 0, 0, 6, 0, 1, 1, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 6, + 0, 1, 0, 1, 0, 2, 0, 0, + 15, 0, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 0, 2, 1, 1, 0, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 0, 0, 0, 0, 1, + 0, 0, 1, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 4, 0, 0, 0, 0, 1, 0, + 6, 0, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 3, 0, 0, 0, 0, + 1, 1, 0, 1, 0, 1, 0, 0, + 0, 29, 0, 0, 0, 3, 2, 3, + 2, 2, 2, 3, 2, 2, 3, 3, + 1, 2, 4, 2, 2, 4, 4, 2, + 0, 2, 0, 3, 1, 0, 1, 21, + 1, 0, 4, 0, 0, 0, 1, 2, + 0, 1, 1, 1, 4, 0, 3, 1, + 3, 2, 0, 3, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 4, 0, 0, 0, 3, 1, + 0, 4, 1, 4, 4, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 1, 0, 1, 1, 1, 1, 0, 2, + 1, 3, 2, 0, 1, 3, 1, 2, + 0, 1, 0, 2, 1, 2, 3, 4, + 0, 0, 0, 1, 0, 6, 2, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 1, 0, 1, 4, 0, 0, 0, 1, + 1, 1, 4, 0, 0, 0, 6, 0, + 0, 0, 1, 1, 2, 1, 1, 5, + 0, 24, 0, 24, 0, 0, 23, 0, + 0, 1, 0, 2, 0, 0, 0, 28, + 0, 3, 23, 2, 0, 2, 2, 3, + 2, 2, 2, 0, 54, 54, 27, 1, + 0, 5, 2, 0, 1, 1, 0, 0, + 14, 0, 3, 2, 2, 3, 2, 2, + 2, 54, 54, 27, 1, 0, 2, 0, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 7, 1, 0, 1, 0, + 2, 3, 2, 1, 0, 1, 1, 3, + 0, 1, 3, 0, 1, 1, 2, 1, + 1, 5, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 24, 0, 24, 0, + 0, 23, 0, 0, 1, 0, 2, 0, + 0, 0, 28, 0, 3, 23, 2, 0, + 2, 2, 3, 2, 2, 2, 0, 54, + 54, 27, 1, 1, 5, 2, 0, 0, + 0, 1, 1, 0, 1, 0, 1, 0, + 0, 0, 26, 0, 0, 0, 1, 1, + 1, 0, 0, 2, 1, 0, 1, 1, + 0, 2, 0, 0, 2, 0, 2, 1, + 0, 1, 0, 3, 0, 0, 1, 21, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 3, 0, 0, 0, + 0, 0, 0, 1, 0, 5, 2, 0, + 0, 1, 0, 2, 0, 0, 15, 0, + 0, 0, 3, 0, 0, 0, 0, 0, + 0, 0, 2, 1, 1, 0, 3, 1, + 0, 6, 5, 1, 1, 0, 1, 0, + 23, 0, 0, 0, 0, 1, 0, 0, + 1, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 4, + 0, 0, 0, 0, 1, 0, 6, 0, + 0, 0, 0, 0, 1, 3, 0, 0, + 0, 1, 4, 0, 0, 0, 6, 1, + 7, 3, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 29, + 0, 0, 0, 3, 2, 3, 2, 2, + 2, 3, 2, 2, 3, 3, 1, 2, + 4, 2, 2, 4, 4, 2, 0, 2, + 0, 3, 1, 0, 1, 21, 1, 0, + 4, 0, 0, 0, 1, 2, 0, 1, + 1, 1, 4, 0, 3, 1, 3, 2, + 0, 3, 0, 5, 2, 0, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 4, 0, 0, 0, 3, 1, 0, 4, + 1, 4, 4, 3, 1, 0, 7, 5, + 1, 1, 0, 1, 0, 23, 1, 0, + 1, 1, 1, 1, 0, 2, 1, 3, + 2, 0, 1, 3, 1, 2, 0, 1, + 0, 2, 1, 2, 3, 4, 0, 0, + 0, 1, 0, 6, 2, 0, 0, 0, + 0, 1, 3, 0, 0, 0, 1, 0, + 1, 4, 0, 0, 0, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 0, 1, + 0, 1, 0, 0, 0, 29, 0, 0, + 0, 3, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 26, 0, + 0, 0, 1, 1, 1, 0, 0, 2, + 1, 0, 1, 1, 0, 2, 0, 0, + 2, 0, 2, 1, 0, 1, 0, 3, + 0, 0, 1, 21, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 1, + 0, 5, 2, 6, 0, 1, 0, 1, + 0, 2, 0, 0, 15, 0, 0, 0, + 3, 0, 0, 0, 0, 0, 0, 0, + 2, 1, 1, 0, 3, 1, 0, 7, + 5, 1, 1, 0, 1, 0, 23, 0, + 0, 0, 0, 1, 0, 0, 1, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 0, 0, + 0, 0, 1, 0, 6, 0, 0, 0, + 0, 0, 1, 3, 0, 0, 0, 3, + 0, 1, 1, 1, 4, 0, 0, 0, + 6, 2, 3, 2, 2, 2, 3, 2, + 2, 3, 3, 1, 2, 4, 2, 2, + 4, 4, 2, 0, 2, 0, 3, 1, + 0, 1, 21, 1, 0, 4, 0, 0, + 0, 1, 2, 0, 1, 1, 1, 4, + 0, 3, 1, 3, 2, 0, 3, 0, + 5, 2, 0, 0, 1, 0, 2, 0, + 0, 15, 0, 0, 0, 4, 0, 0, + 0, 3, 1, 0, 4, 1, 4, 4, + 3, 1, 0, 7, 5, 1, 1, 0, + 1, 0, 23, 1, 0, 1, 1, 1, + 1, 0, 2, 1, 3, 2, 0, 1, + 3, 1, 2, 0, 1, 0, 2, 1, + 2, 3, 4, 0, 0, 0, 1, 0, + 6, 2, 0, 0, 0, 0, 1, 3, + 0, 0, 0, 1, 0, 1, 4, 0, + 0, 0, 1, 0, 0, 14, 0, 3, + 2, 2, 3, 2, 2, 2, 54, 54, + 29, 1, 0, 0, 0, 0, 2, 1, + 1, 4, 2, 1, 0, 1, 0, 1, + 0, 11, 0, 0, 0, 0, 1, 1, + 0, 1, 0, 1, 0, 0, 0, 26, + 0, 0, 0, 1, 1, 1, 0, 0, + 2, 1, 0, 1, 1, 0, 2, 0, + 0, 2, 0, 2, 1, 0, 1, 0, + 3, 0, 0, 1, 21, 0, 0, 3, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 1, 0, 5, 2, 6, 0, 1, 0, + 1, 0, 2, 0, 0, 15, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 2, 1, 1, 0, 3, 1, 0, + 7, 5, 1, 1, 0, 1, 0, 23, + 0, 0, 0, 0, 1, 0, 0, 1, + 0, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 0, + 0, 0, 0, 1, 0, 6, 0, 0, + 0, 0, 0, 1, 3, 0, 0, 0, + 3, 0, 0, 0, 0, 1, 1, 0, + 1, 0, 1, 0, 0, 0, 29, 0, + 0, 0, 3, 2, 3, 2, 2, 2, + 3, 2, 2, 3, 3, 1, 2, 4, + 2, 2, 4, 4, 2, 0, 2, 0, + 3, 1, 0, 1, 21, 1, 0, 4, + 0, 0, 0, 1, 2, 0, 1, 1, + 1, 4, 0, 3, 1, 3, 2, 0, + 3, 0, 5, 2, 0, 0, 1, 0, + 2, 0, 0, 15, 0, 0, 0, 4, + 0, 0, 0, 3, 1, 0, 4, 1, + 4, 4, 3, 1, 0, 7, 5, 1, + 1, 0, 1, 0, 23, 1, 0, 1, + 1, 1, 1, 0, 2, 1, 3, 2, + 0, 1, 3, 1, 2, 0, 1, 0, + 2, 1, 2, 3, 4, 0, 0, 0, + 1, 0, 6, 2, 0, 0, 0, 0, + 1, 3, 0, 0, 0, 1, 0, 1, + 4, 0, 0, 0, 1, 1, 1, 4, + 0, 0, 0, 6, 7, 1, 0, 1, + 0, 2, 3, 2, 1, 0, 1, 1, + 3, 0, 1, 5, 0, 0, 17, 20, + 20, 20, 14, 20, 20, 20, 23, 21, + 21, 21, 20, 23, 20, 20, 20, 21, + 21, 21, 20, 20, 20, 20, 20, 20, + 20, 20, 20, 20, +} + +var _graphclust_range_lengths []byte = []byte{ + 0, 0, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 5, 2, 6, 2, 8, 4, + 2, 5, 0, 3, 2, 4, 1, 6, + 2, 4, 4, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 4, 4, 1, 1, + 2, 2, 2, 2, 1, 1, 6, 2, + 5, 1, 3, 3, 4, 4, 4, 4, + 2, 0, 0, 1, 1, 0, 1, 0, + 1, 1, 0, 2, 1, 1, 2, 4, + 1, 2, 4, 1, 5, 0, 3, 2, + 1, 0, 0, 2, 0, 0, 0, 0, + 1, 4, 1, 0, 2, 1, 4, 2, + 0, 4, 3, 4, 2, 2, 6, 2, + 2, 4, 1, 4, 2, 4, 1, 3, + 3, 2, 2, 0, 1, 1, 1, 0, + 1, 0, 3, 3, 1, 2, 2, 2, + 0, 5, 1, 1, 0, 1, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 1, 0, 0, 1, 2, 2, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 0, + 1, 0, 1, 0, 1, 0, 1, 1, + 0, 2, 1, 1, 1, 2, 2, 1, + 1, 2, 2, 1, 1, 3, 2, 2, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 2, 2, 0, + 2, 2, 1, 1, 2, 6, 1, 1, + 1, 1, 2, 2, 1, 1, 1, 2, + 2, 0, 1, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 1, 1, 1, + 1, 2, 1, 1, 4, 1, 1, 1, + 1, 1, 4, 1, 2, 2, 5, 2, + 6, 2, 8, 4, 2, 5, 0, 3, + 2, 4, 1, 6, 2, 4, 4, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 4, 4, 1, 1, 2, 2, 2, 2, + 1, 1, 6, 2, 5, 1, 3, 3, + 4, 4, 4, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 2, 4, 1, 2, 4, 1, + 5, 0, 3, 2, 1, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 4, 2, 0, 4, 3, 4, + 2, 2, 6, 2, 2, 4, 1, 4, + 2, 4, 1, 3, 3, 2, 2, 0, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 0, 1, 1, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 2, 3, 1, 1, 1, 1, 1, 1, + 1, 0, 1, 0, 1, 1, 0, 1, + 1, 0, 1, 0, 1, 3, 1, 2, + 2, 1, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 1, 1, 2, 2, + 2, 1, 3, 2, 1, 1, 3, 1, + 3, 3, 1, 0, 0, 0, 0, 0, + 1, 1, 1, 2, 2, 4, 1, 1, + 2, 1, 1, 1, 3, 1, 2, 1, + 2, 1, 2, 0, 0, 1, 1, 5, + 9, 2, 1, 3, 5, 3, 1, 6, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 1, 0, 1, + 1, 0, 1, 1, 0, 1, 0, 1, + 3, 1, 2, 2, 1, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 1, + 1, 2, 2, 1, 1, 5, 1, 1, + 1, 1, 2, 1, 1, 4, 1, 1, + 1, 1, 2, 4, 1, 2, 1, 2, + 2, 5, 6, 2, 2, 5, 1, 3, + 2, 3, 5, 2, 3, 1, 3, 1, + 1, 2, 1, 2, 1, 4, 0, 0, + 2, 3, 1, 1, 2, 2, 1, 2, + 1, 1, 2, 1, 2, 1, 2, 2, + 2, 1, 1, 4, 2, 0, 0, 1, + 1, 0, 1, 0, 1, 1, 0, 2, + 1, 1, 1, 2, 2, 1, 1, 2, + 2, 1, 1, 3, 2, 2, 0, 0, + 2, 0, 0, 0, 0, 1, 4, 1, + 0, 2, 1, 2, 2, 0, 2, 2, + 1, 1, 2, 6, 1, 1, 1, 1, + 2, 2, 1, 1, 1, 2, 2, 0, + 1, 1, 1, 1, 0, 1, 0, 3, + 3, 1, 2, 2, 2, 0, 5, 1, + 1, 0, 1, 1, 1, 0, 0, 0, + 0, 0, 1, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 1, + 4, 1, 2, 2, 5, 2, 6, 2, + 8, 4, 2, 5, 0, 3, 2, 4, + 1, 6, 2, 4, 4, 1, 1, 2, + 1, 2, 1, 4, 0, 0, 4, 4, + 1, 1, 2, 2, 2, 2, 1, 1, + 6, 2, 5, 1, 3, 3, 4, 4, + 4, 4, 2, 0, 0, 1, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 2, 4, 1, 2, 4, 1, 5, 0, + 3, 2, 1, 0, 0, 2, 0, 0, + 0, 0, 1, 4, 1, 0, 2, 1, + 4, 2, 0, 4, 3, 4, 2, 2, + 6, 2, 2, 4, 1, 4, 2, 4, + 1, 3, 3, 2, 2, 0, 1, 1, + 1, 0, 1, 0, 3, 3, 1, 2, + 2, 2, 0, 5, 1, 1, 0, 1, + 0, 1, 1, 1, 0, 0, 0, 3, + 1, 1, 1, 1, 1, 2, 1, 1, + 4, 1, 1, 1, 1, 1, 4, 1, + 2, 2, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 2, 4, + 1, 2, 1, 2, 2, 5, 6, 2, + 2, 5, 1, 3, 2, 3, 5, 2, + 3, 1, 3, 1, 1, 2, 1, 2, + 1, 4, 0, 0, 2, 3, 1, 1, + 2, 2, 1, 2, 1, 1, 2, 1, + 2, 1, 2, 2, 2, 1, 1, 4, + 2, 0, 0, 0, 1, 0, 1, 0, + 1, 0, 1, 1, 0, 2, 1, 1, + 1, 2, 2, 1, 1, 2, 2, 1, + 1, 3, 2, 2, 0, 0, 2, 0, + 0, 0, 0, 1, 4, 1, 0, 2, + 1, 2, 2, 0, 2, 2, 1, 1, + 2, 6, 1, 1, 1, 1, 2, 2, + 1, 1, 1, 2, 2, 0, 1, 1, + 1, 1, 0, 1, 0, 3, 3, 1, + 2, 2, 2, 0, 5, 1, 1, 0, + 1, 0, 0, 0, 1, 1, 1, 0, + 0, 5, 2, 6, 2, 8, 4, 2, + 5, 0, 3, 2, 4, 1, 6, 2, + 4, 4, 1, 1, 2, 1, 2, 1, + 4, 0, 0, 4, 4, 1, 1, 2, + 2, 2, 2, 1, 1, 6, 2, 5, + 1, 3, 3, 4, 4, 4, 4, 2, + 0, 0, 1, 1, 0, 1, 0, 1, + 1, 0, 2, 1, 1, 2, 4, 1, + 2, 4, 1, 5, 0, 3, 2, 1, + 0, 0, 2, 0, 0, 0, 0, 1, + 4, 1, 0, 2, 1, 4, 2, 0, + 4, 3, 4, 2, 2, 6, 2, 2, + 4, 1, 4, 2, 4, 1, 3, 3, + 2, 2, 0, 1, 1, 1, 0, 1, + 0, 3, 3, 1, 2, 2, 2, 0, + 5, 1, 1, 0, 1, 0, 1, 1, + 1, 0, 1, 3, 1, 3, 3, 1, + 0, 0, 0, 0, 0, 1, 1, 1, + 3, 2, 4, 1, 0, 1, 1, 1, + 3, 1, 1, 1, 3, 1, 3, 1, + 3, 1, 2, 1, 1, 1, 1, 2, + 1, 1, 4, 1, 1, 1, 1, 2, + 4, 1, 2, 1, 2, 2, 5, 6, + 2, 2, 5, 1, 3, 2, 3, 5, + 2, 3, 1, 3, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 2, 3, 1, + 1, 2, 2, 1, 2, 1, 1, 2, + 1, 2, 1, 2, 2, 2, 1, 1, + 4, 2, 0, 0, 0, 1, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 1, + 1, 1, 2, 2, 1, 1, 2, 2, + 1, 1, 3, 2, 2, 0, 0, 2, + 0, 0, 0, 0, 1, 4, 1, 0, + 2, 1, 2, 2, 0, 2, 2, 1, + 1, 2, 6, 1, 1, 1, 1, 2, + 2, 1, 1, 1, 2, 2, 0, 1, + 1, 1, 1, 0, 1, 0, 3, 3, + 1, 2, 2, 2, 0, 5, 1, 1, + 0, 1, 1, 1, 1, 1, 2, 1, + 1, 4, 1, 1, 1, 1, 1, 4, + 1, 2, 2, 5, 2, 6, 2, 8, + 4, 2, 5, 0, 3, 2, 4, 1, + 6, 2, 4, 4, 1, 1, 2, 1, + 2, 1, 4, 0, 0, 4, 4, 1, + 1, 2, 2, 2, 2, 1, 1, 6, + 2, 5, 1, 3, 3, 4, 4, 4, + 4, 2, 0, 0, 1, 1, 0, 1, + 0, 1, 1, 0, 2, 1, 1, 2, + 4, 1, 2, 4, 1, 5, 0, 3, + 2, 1, 0, 0, 2, 0, 0, 0, + 0, 1, 4, 1, 0, 2, 1, 4, + 2, 0, 4, 3, 4, 2, 2, 6, + 2, 2, 4, 1, 4, 2, 4, 1, + 3, 3, 2, 2, 0, 1, 1, 1, + 0, 1, 0, 3, 3, 1, 2, 2, + 2, 0, 5, 1, 1, 0, 1, 0, + 1, 1, 1, 0, 0, 0, 0, 1, + 1, 1, 0, 0, 0, 0, 1, 1, + 5, 9, 2, 1, 3, 5, 3, 1, + 6, 1, 1, 2, 2, 2, 6, 0, + 0, 0, 4, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 6, 8, 11, 15, + 17, 20, 25, 28, 30, 32, 34, 63, + 68, 70, 73, 76, 80, 84, 90, 97, + 102, 106, 112, 115, 120, 123, 129, 135, + 138, 144, 146, 152, 155, 157, 161, 163, + 169, 171, 176, 178, 200, 203, 207, 212, + 214, 217, 220, 222, 225, 227, 230, 233, + 235, 241, 243, 246, 249, 252, 254, 256, + 262, 265, 271, 274, 281, 283, 285, 287, + 289, 291, 294, 296, 298, 314, 317, 319, + 321, 326, 329, 332, 334, 336, 339, 342, + 344, 348, 353, 357, 360, 364, 366, 369, + 377, 383, 385, 387, 389, 395, 397, 421, + 424, 426, 429, 432, 434, 437, 440, 443, + 445, 449, 457, 459, 461, 463, 465, 468, + 471, 473, 475, 477, 480, 483, 488, 490, + 492, 494, 496, 498, 500, 507, 511, 515, + 517, 520, 523, 527, 531, 537, 539, 541, + 545, 547, 549, 551, 553, 556, 560, 562, + 565, 570, 573, 575, 577, 579, 610, 615, + 617, 620, 626, 634, 640, 649, 654, 665, + 673, 678, 686, 690, 697, 701, 708, 714, + 723, 728, 737, 746, 750, 752, 757, 759, + 765, 768, 773, 775, 797, 803, 808, 814, + 816, 819, 822, 826, 831, 833, 836, 844, + 848, 858, 860, 867, 872, 880, 887, 892, + 900, 903, 909, 912, 914, 916, 918, 920, + 923, 925, 927, 943, 946, 948, 950, 957, + 962, 964, 967, 975, 978, 984, 989, 994, + 1001, 1007, 1011, 1013, 1016, 1024, 1030, 1032, + 1034, 1036, 1042, 1044, 1068, 1072, 1074, 1080, + 1084, 1086, 1092, 1096, 1103, 1107, 1113, 1122, + 1125, 1129, 1137, 1140, 1147, 1150, 1156, 1158, + 1164, 1169, 1174, 1180, 1185, 1187, 1189, 1191, + 1193, 1195, 1202, 1208, 1212, 1214, 1217, 1220, + 1224, 1228, 1234, 1236, 1238, 1240, 1242, 1244, + 1250, 1252, 1254, 1255, 1257, 1259, 1261, 1267, + 1269, 1271, 1272, 1279, 1281, 1285, 1289, 1291, + 1293, 1295, 1298, 1302, 1304, 1307, 1312, 1315, + 1317, 1319, 1321, 1350, 1355, 1357, 1360, 1363, + 1367, 1371, 1377, 1384, 1389, 1393, 1399, 1402, + 1407, 1410, 1416, 1422, 1425, 1431, 1433, 1439, + 1442, 1444, 1448, 1450, 1456, 1458, 1463, 1465, + 1487, 1490, 1494, 1499, 1501, 1504, 1507, 1509, + 1512, 1514, 1517, 1520, 1522, 1528, 1530, 1533, + 1536, 1539, 1541, 1543, 1549, 1552, 1558, 1561, + 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, + 1585, 1601, 1604, 1606, 1608, 1613, 1616, 1619, + 1621, 1623, 1626, 1629, 1631, 1635, 1640, 1644, + 1647, 1651, 1653, 1656, 1664, 1670, 1672, 1674, + 1676, 1682, 1684, 1708, 1711, 1713, 1716, 1719, + 1721, 1724, 1727, 1730, 1732, 1736, 1744, 1746, + 1748, 1750, 1752, 1755, 1758, 1760, 1762, 1764, + 1767, 1770, 1775, 1777, 1779, 1781, 1783, 1785, + 1787, 1794, 1798, 1802, 1804, 1807, 1810, 1814, + 1818, 1824, 1826, 1828, 1832, 1834, 1836, 1838, + 1840, 1843, 1847, 1849, 1852, 1857, 1860, 1862, + 1864, 1866, 1897, 1902, 1904, 1907, 1913, 1921, + 1927, 1936, 1941, 1952, 1960, 1965, 1973, 1977, + 1984, 1988, 1995, 2001, 2010, 2015, 2024, 2033, + 2037, 2039, 2044, 2046, 2052, 2055, 2060, 2062, + 2084, 2090, 2095, 2101, 2103, 2106, 2109, 2113, + 2118, 2120, 2123, 2131, 2135, 2145, 2147, 2154, + 2159, 2167, 2174, 2179, 2187, 2190, 2196, 2199, + 2201, 2203, 2205, 2207, 2210, 2212, 2214, 2230, + 2233, 2235, 2237, 2244, 2249, 2251, 2254, 2262, + 2265, 2271, 2276, 2281, 2288, 2294, 2298, 2300, + 2303, 2311, 2317, 2319, 2321, 2323, 2329, 2331, + 2355, 2359, 2361, 2367, 2371, 2373, 2379, 2383, + 2390, 2394, 2400, 2409, 2412, 2416, 2424, 2427, + 2434, 2437, 2443, 2445, 2451, 2456, 2461, 2467, + 2472, 2474, 2476, 2478, 2480, 2482, 2489, 2495, + 2499, 2501, 2504, 2507, 2511, 2515, 2521, 2523, + 2525, 2527, 2529, 2531, 2537, 2539, 2541, 2542, + 2544, 2546, 2548, 2554, 2556, 2558, 2559, 2566, + 2568, 2571, 2575, 2578, 2581, 2585, 2588, 2591, + 2598, 2600, 2625, 2627, 2652, 2654, 2656, 2680, + 2682, 2684, 2686, 2688, 2691, 2693, 2697, 2699, + 2730, 2733, 2738, 2762, 2765, 2767, 2770, 2773, + 2777, 2780, 2783, 2787, 2788, 2844, 2900, 2930, + 2934, 2937, 2944, 2950, 2953, 2956, 2959, 2963, + 2965, 2983, 2987, 2992, 2995, 2998, 3002, 3005, + 3008, 3012, 3068, 3124, 3154, 3158, 3163, 3167, + 3169, 3173, 3179, 3183, 3186, 3190, 3193, 3196, + 3199, 3202, 3215, 3218, 3226, 3228, 3230, 3233, + 3239, 3251, 3257, 3261, 3266, 3272, 3277, 3280, + 3290, 3292, 3295, 3300, 3302, 3305, 3308, 3312, + 3315, 3318, 3325, 3327, 3329, 3331, 3333, 3336, + 3340, 3342, 3345, 3350, 3353, 3355, 3357, 3359, + 3388, 3393, 3395, 3398, 3401, 3405, 3409, 3415, + 3422, 3427, 3431, 3437, 3440, 3445, 3448, 3454, + 3460, 3463, 3469, 3471, 3477, 3480, 3482, 3486, + 3488, 3494, 3496, 3501, 3503, 3525, 3528, 3532, + 3537, 3539, 3542, 3545, 3547, 3550, 3552, 3555, + 3558, 3560, 3566, 3568, 3571, 3574, 3577, 3579, + 3581, 3587, 3590, 3596, 3599, 3606, 3608, 3610, + 3612, 3614, 3616, 3619, 3621, 3623, 3639, 3642, + 3644, 3646, 3651, 3654, 3657, 3659, 3661, 3664, + 3667, 3669, 3673, 3678, 3682, 3685, 3689, 3691, + 3694, 3702, 3708, 3710, 3712, 3714, 3720, 3722, + 3746, 3749, 3751, 3754, 3757, 3759, 3762, 3765, + 3768, 3770, 3774, 3782, 3784, 3786, 3788, 3790, + 3793, 3796, 3798, 3800, 3802, 3805, 3808, 3813, + 3815, 3817, 3819, 3821, 3823, 3825, 3832, 3836, + 3840, 3842, 3845, 3848, 3852, 3856, 3862, 3864, + 3866, 3870, 3872, 3874, 3876, 3878, 3881, 3885, + 3887, 3890, 3895, 3898, 3900, 3902, 3904, 3935, + 3940, 3942, 3945, 3951, 3959, 3965, 3974, 3979, + 3990, 3998, 4003, 4011, 4015, 4022, 4026, 4033, + 4039, 4048, 4053, 4062, 4071, 4075, 4077, 4082, + 4084, 4090, 4093, 4098, 4100, 4122, 4128, 4133, + 4139, 4141, 4144, 4147, 4151, 4156, 4158, 4161, + 4169, 4173, 4183, 4185, 4192, 4197, 4205, 4212, + 4217, 4225, 4228, 4234, 4237, 4239, 4241, 4243, + 4245, 4248, 4250, 4252, 4268, 4271, 4273, 4275, + 4282, 4287, 4289, 4292, 4300, 4303, 4309, 4314, + 4319, 4326, 4332, 4336, 4338, 4341, 4349, 4355, + 4357, 4359, 4361, 4367, 4369, 4393, 4397, 4399, + 4405, 4409, 4411, 4417, 4421, 4428, 4432, 4438, + 4447, 4450, 4454, 4462, 4465, 4472, 4475, 4481, + 4483, 4489, 4494, 4499, 4505, 4510, 4512, 4514, + 4516, 4518, 4520, 4527, 4533, 4537, 4539, 4542, + 4545, 4549, 4553, 4559, 4561, 4563, 4565, 4567, + 4569, 4575, 4577, 4579, 4580, 4582, 4584, 4586, + 4592, 4594, 4596, 4597, 4604, 4629, 4631, 4656, + 4658, 4660, 4684, 4686, 4688, 4690, 4692, 4695, + 4697, 4701, 4703, 4734, 4737, 4742, 4766, 4769, + 4771, 4774, 4777, 4781, 4784, 4787, 4791, 4792, + 4848, 4904, 4934, 4938, 4941, 4948, 4956, 4958, + 4960, 4962, 4965, 4969, 4971, 4974, 4979, 4982, + 4984, 4986, 4988, 5017, 5022, 5024, 5027, 5030, + 5034, 5038, 5044, 5051, 5056, 5060, 5066, 5069, + 5074, 5077, 5083, 5089, 5092, 5098, 5100, 5106, + 5109, 5111, 5115, 5117, 5123, 5125, 5130, 5132, + 5154, 5157, 5161, 5166, 5168, 5171, 5174, 5176, + 5179, 5181, 5184, 5187, 5189, 5195, 5197, 5200, + 5203, 5206, 5208, 5210, 5216, 5219, 5225, 5228, + 5230, 5232, 5234, 5236, 5239, 5241, 5243, 5259, + 5262, 5264, 5266, 5271, 5274, 5277, 5279, 5281, + 5284, 5287, 5289, 5293, 5298, 5302, 5305, 5309, + 5311, 5314, 5321, 5327, 5329, 5331, 5333, 5339, + 5341, 5365, 5368, 5370, 5373, 5376, 5378, 5381, + 5384, 5387, 5389, 5393, 5401, 5403, 5405, 5407, + 5409, 5412, 5415, 5417, 5419, 5421, 5424, 5427, + 5432, 5434, 5436, 5438, 5440, 5442, 5444, 5451, + 5455, 5459, 5461, 5464, 5467, 5471, 5475, 5481, + 5483, 5485, 5487, 5493, 5495, 5497, 5498, 5505, + 5507, 5515, 5519, 5521, 5523, 5525, 5527, 5530, + 5534, 5536, 5539, 5544, 5547, 5549, 5551, 5553, + 5584, 5589, 5591, 5594, 5600, 5608, 5614, 5623, + 5628, 5639, 5647, 5652, 5660, 5664, 5671, 5675, + 5682, 5688, 5697, 5702, 5711, 5720, 5724, 5726, + 5731, 5733, 5739, 5742, 5747, 5749, 5771, 5777, + 5782, 5788, 5790, 5793, 5796, 5800, 5805, 5807, + 5810, 5818, 5822, 5832, 5834, 5841, 5846, 5854, + 5861, 5866, 5874, 5877, 5883, 5886, 5888, 5890, + 5892, 5894, 5897, 5899, 5901, 5917, 5920, 5922, + 5924, 5931, 5936, 5938, 5941, 5949, 5952, 5958, + 5963, 5968, 5975, 5981, 5985, 5987, 5990, 5998, + 6004, 6006, 6008, 6010, 6016, 6018, 6042, 6046, + 6048, 6054, 6058, 6060, 6066, 6070, 6077, 6081, + 6087, 6096, 6099, 6103, 6111, 6114, 6121, 6124, + 6130, 6132, 6138, 6143, 6148, 6154, 6159, 6161, + 6163, 6165, 6167, 6169, 6176, 6182, 6186, 6188, + 6191, 6194, 6198, 6202, 6208, 6210, 6212, 6214, + 6216, 6218, 6224, 6226, 6228, 6229, 6231, 6233, + 6237, 6240, 6242, 6244, 6246, 6249, 6253, 6255, + 6258, 6263, 6266, 6268, 6270, 6272, 6303, 6308, + 6310, 6313, 6319, 6321, 6323, 6325, 6328, 6332, + 6334, 6337, 6342, 6345, 6347, 6349, 6351, 6380, + 6385, 6387, 6390, 6393, 6397, 6401, 6407, 6414, + 6419, 6423, 6429, 6432, 6437, 6440, 6446, 6452, + 6455, 6461, 6463, 6469, 6472, 6474, 6478, 6480, + 6486, 6488, 6493, 6495, 6517, 6520, 6524, 6529, + 6531, 6534, 6537, 6539, 6542, 6544, 6547, 6550, + 6552, 6558, 6560, 6563, 6566, 6569, 6571, 6573, + 6579, 6582, 6588, 6591, 6598, 6600, 6602, 6604, + 6606, 6608, 6611, 6613, 6615, 6631, 6634, 6636, + 6638, 6643, 6646, 6649, 6651, 6653, 6656, 6659, + 6661, 6665, 6670, 6674, 6677, 6681, 6683, 6686, + 6694, 6700, 6702, 6704, 6706, 6712, 6714, 6738, + 6741, 6743, 6746, 6749, 6751, 6754, 6757, 6760, + 6762, 6766, 6774, 6776, 6778, 6780, 6782, 6785, + 6788, 6790, 6792, 6794, 6797, 6800, 6805, 6807, + 6809, 6811, 6813, 6815, 6817, 6824, 6828, 6832, + 6834, 6837, 6840, 6844, 6848, 6854, 6856, 6858, + 6862, 6864, 6866, 6868, 6870, 6876, 6878, 6880, + 6881, 6888, 6896, 6902, 6911, 6916, 6927, 6935, + 6940, 6948, 6952, 6959, 6963, 6970, 6976, 6985, + 6990, 6999, 7008, 7012, 7014, 7019, 7021, 7027, + 7030, 7035, 7037, 7059, 7065, 7070, 7076, 7078, + 7081, 7084, 7088, 7093, 7095, 7098, 7106, 7110, + 7120, 7122, 7129, 7134, 7142, 7149, 7154, 7162, + 7165, 7171, 7174, 7176, 7178, 7180, 7182, 7185, + 7187, 7189, 7205, 7208, 7210, 7212, 7219, 7224, + 7226, 7229, 7237, 7240, 7246, 7251, 7256, 7263, + 7269, 7273, 7275, 7278, 7286, 7292, 7294, 7296, + 7298, 7304, 7306, 7330, 7334, 7336, 7342, 7346, + 7348, 7354, 7358, 7365, 7369, 7375, 7384, 7387, + 7391, 7399, 7402, 7409, 7412, 7418, 7420, 7426, + 7431, 7436, 7442, 7447, 7449, 7451, 7453, 7455, + 7457, 7464, 7470, 7474, 7476, 7479, 7482, 7486, + 7490, 7496, 7498, 7500, 7502, 7504, 7506, 7512, + 7514, 7516, 7517, 7520, 7524, 7526, 7544, 7548, + 7553, 7556, 7559, 7563, 7566, 7569, 7573, 7629, + 7685, 7718, 7722, 7727, 7729, 7730, 7732, 7736, + 7739, 7744, 7750, 7754, 7757, 7761, 7764, 7768, + 7771, 7775, 7788, 7791, 7793, 7795, 7797, 7800, + 7804, 7806, 7809, 7814, 7817, 7819, 7821, 7823, + 7852, 7857, 7859, 7862, 7865, 7869, 7873, 7879, + 7886, 7891, 7895, 7901, 7904, 7909, 7912, 7918, + 7924, 7927, 7933, 7935, 7941, 7944, 7946, 7950, + 7952, 7958, 7960, 7965, 7967, 7989, 7992, 7996, + 8001, 8003, 8006, 8009, 8011, 8014, 8016, 8019, + 8022, 8024, 8030, 8032, 8035, 8038, 8041, 8043, + 8045, 8051, 8054, 8060, 8063, 8070, 8072, 8074, + 8076, 8078, 8080, 8083, 8085, 8087, 8103, 8106, + 8108, 8110, 8115, 8118, 8121, 8123, 8125, 8128, + 8131, 8133, 8137, 8142, 8146, 8149, 8153, 8155, + 8158, 8166, 8172, 8174, 8176, 8178, 8184, 8186, + 8210, 8213, 8215, 8218, 8221, 8223, 8226, 8229, + 8232, 8234, 8238, 8246, 8248, 8250, 8252, 8254, + 8257, 8260, 8262, 8264, 8266, 8269, 8272, 8277, + 8279, 8281, 8283, 8285, 8287, 8289, 8296, 8300, + 8304, 8306, 8309, 8312, 8316, 8320, 8326, 8328, + 8330, 8334, 8336, 8338, 8340, 8342, 8345, 8349, + 8351, 8354, 8359, 8362, 8364, 8366, 8368, 8399, + 8404, 8406, 8409, 8415, 8423, 8429, 8438, 8443, + 8454, 8462, 8467, 8475, 8479, 8486, 8490, 8497, + 8503, 8512, 8517, 8526, 8535, 8539, 8541, 8546, + 8548, 8554, 8557, 8562, 8564, 8586, 8592, 8597, + 8603, 8605, 8608, 8611, 8615, 8620, 8622, 8625, + 8633, 8637, 8647, 8649, 8656, 8661, 8669, 8676, + 8681, 8689, 8692, 8698, 8701, 8703, 8705, 8707, + 8709, 8712, 8714, 8716, 8732, 8735, 8737, 8739, + 8746, 8751, 8753, 8756, 8764, 8767, 8773, 8778, + 8783, 8790, 8796, 8800, 8802, 8805, 8813, 8819, + 8821, 8823, 8825, 8831, 8833, 8857, 8861, 8863, + 8869, 8873, 8875, 8881, 8885, 8892, 8896, 8902, + 8911, 8914, 8918, 8926, 8929, 8936, 8939, 8945, + 8947, 8953, 8958, 8963, 8969, 8974, 8976, 8978, + 8980, 8982, 8984, 8991, 8997, 9001, 9003, 9006, + 9009, 9013, 9017, 9023, 9025, 9027, 9029, 9031, + 9033, 9039, 9041, 9043, 9044, 9046, 9048, 9050, + 9056, 9058, 9060, 9061, 9068, 9076, 9078, 9080, + 9083, 9089, 9101, 9107, 9111, 9116, 9122, 9127, + 9130, 9140, 9142, 9145, 9153, 9156, 9159, 9183, + 9204, 9225, 9246, 9265, 9286, 9307, 9328, 9352, + 9374, 9396, 9418, 9439, 9463, 9484, 9505, 9526, + 9548, 9570, 9592, 9613, 9634, 9655, 9676, 9697, + 9718, 9739, 9760, 9781, +} + +var _graphclust_indicies []int16 = []int16{ + 0, 1, 3, 2, 2, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 2, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 2, 2, 3, 3, 2, + 3, 2, 4, 5, 6, 7, 8, 10, + 11, 12, 14, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 9, 13, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 2, 2, 3, 2, 2, 2, 3, + 3, 3, 3, 2, 2, 2, 2, 2, + 2, 3, 2, 2, 2, 2, 2, 2, + 3, 2, 2, 2, 2, 3, 3, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 3, 3, 3, 2, + 3, 3, 2, 2, 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 2, 3, + 3, 2, 2, 2, 2, 2, 2, 3, + 3, 2, 3, 3, 3, 3, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 2, + 3, 2, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 2, + 3, 3, 2, 3, 3, 3, 2, 3, + 3, 3, 3, 2, 3, 2, 3, 3, + 2, 3, 3, 2, 3, 2, 2, 2, + 3, 3, 2, 3, 3, 2, 3, 3, + 2, 3, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 3, 2, 2, 2, + 3, 3, 3, 2, 3, 2, 3, 2, + 3, 3, 3, 3, 3, 2, 3, 3, + 2, 53, 54, 55, 56, 57, 2, 3, + 58, 2, 53, 54, 59, 55, 56, 57, + 2, 3, 2, 3, 2, 3, 2, 3, + 2, 3, 2, 60, 61, 2, 3, 2, + 3, 2, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 72, 73, 74, 75, + 76, 2, 3, 3, 2, 3, 2, 3, + 2, 3, 3, 3, 3, 2, 3, 3, + 2, 2, 2, 3, 3, 2, 3, 2, + 3, 3, 2, 2, 2, 3, 3, 2, + 3, 3, 3, 2, 3, 3, 3, 3, + 2, 3, 3, 3, 2, 3, 3, 2, + 77, 78, 63, 2, 3, 2, 3, 3, + 2, 79, 80, 81, 82, 83, 84, 85, + 2, 86, 87, 88, 89, 90, 2, 3, + 2, 3, 2, 3, 2, 3, 3, 3, + 3, 3, 2, 3, 2, 91, 92, 93, + 94, 95, 96, 97, 98, 99, 100, 101, + 102, 103, 104, 105, 106, 107, 104, 108, + 109, 110, 111, 112, 2, 3, 3, 2, + 2, 3, 2, 2, 3, 3, 3, 2, + 3, 2, 3, 3, 2, 2, 2, 3, + 3, 3, 2, 3, 2, 3, 3, 3, + 2, 3, 3, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 3, 2, 2, + 3, 3, 3, 2, 2, 2, 3, 2, + 3, 3, 2, 3, 2, 3, 3, 2, + 3, 3, 2, 113, 114, 115, 116, 2, + 3, 2, 3, 2, 3, 2, 3, 2, + 117, 2, 3, 2, 118, 119, 120, 121, + 122, 123, 2, 3, 3, 3, 2, 2, + 2, 2, 3, 3, 2, 3, 3, 2, + 2, 2, 3, 3, 3, 3, 2, 124, + 125, 126, 2, 3, 3, 3, 3, 3, + 2, 3, 2, 3, 2, 127, 128, 129, + 2, 130, 2, 2, 130, 2, 130, 130, + 2, 130, 130, 2, 130, 130, 130, 2, + 130, 2, 130, 130, 2, 130, 130, 130, + 130, 2, 130, 130, 2, 2, 130, 130, + 2, 130, 2, 131, 132, 133, 134, 135, + 136, 137, 139, 140, 141, 142, 143, 144, + 145, 146, 147, 148, 149, 150, 22, 151, + 152, 153, 154, 155, 156, 157, 158, 159, + 138, 2, 130, 130, 130, 130, 2, 130, + 2, 130, 130, 2, 3, 3, 2, 2, + 3, 130, 130, 2, 130, 130, 2, 130, + 2, 3, 130, 130, 130, 3, 3, 2, + 130, 130, 130, 2, 2, 2, 130, 2, + 3, 3, 130, 130, 3, 2, 130, 130, + 130, 2, 130, 2, 130, 2, 130, 2, + 3, 2, 2, 130, 130, 2, 130, 2, + 3, 130, 130, 3, 130, 2, 3, 130, + 130, 3, 3, 130, 130, 2, 130, 130, + 3, 2, 130, 130, 130, 3, 3, 3, + 2, 130, 3, 130, 2, 2, 2, 3, + 2, 2, 2, 130, 130, 130, 3, 130, + 3, 2, 130, 130, 3, 3, 3, 130, + 130, 130, 2, 130, 130, 3, 3, 2, + 2, 2, 130, 130, 130, 2, 130, 2, + 3, 130, 130, 130, 130, 3, 130, 3, + 3, 2, 130, 3, 130, 2, 130, 2, + 130, 3, 130, 130, 2, 130, 2, 130, + 130, 130, 130, 3, 2, 3, 130, 2, + 130, 130, 130, 130, 2, 130, 2, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 2, 3, 130, 130, + 3, 130, 2, 3, 130, 130, 130, 2, + 130, 3, 130, 130, 130, 2, 130, 2, + 130, 130, 2, 130, 130, 2, 3, 130, + 3, 2, 130, 130, 130, 2, 3, 130, + 2, 130, 130, 2, 130, 130, 3, 130, + 3, 3, 130, 2, 130, 130, 3, 2, + 130, 130, 130, 130, 3, 130, 130, 3, + 130, 2, 130, 2, 3, 3, 3, 130, + 130, 3, 2, 130, 2, 130, 2, 3, + 3, 3, 3, 130, 130, 3, 130, 2, + 3, 130, 130, 3, 130, 3, 2, 3, + 130, 3, 130, 2, 3, 130, 130, 130, + 130, 3, 130, 2, 130, 130, 2, 181, + 182, 183, 184, 185, 2, 130, 58, 2, + 130, 2, 130, 2, 130, 2, 130, 2, + 186, 187, 2, 130, 2, 130, 2, 188, + 189, 190, 191, 66, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 2, 130, + 130, 2, 130, 2, 130, 2, 130, 130, + 130, 3, 3, 130, 2, 130, 2, 130, + 2, 3, 130, 2, 130, 3, 2, 3, + 130, 130, 130, 3, 130, 3, 2, 130, + 2, 3, 130, 3, 130, 3, 130, 2, + 130, 130, 3, 130, 2, 130, 130, 130, + 130, 2, 130, 3, 3, 130, 130, 3, + 2, 130, 130, 3, 130, 3, 2, 202, + 203, 189, 2, 130, 2, 130, 130, 2, + 204, 205, 206, 207, 208, 209, 210, 2, + 211, 212, 213, 214, 215, 2, 130, 2, + 130, 2, 130, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 216, 217, 218, 219, + 220, 221, 222, 223, 224, 225, 226, 227, + 228, 229, 230, 231, 232, 233, 234, 235, + 236, 237, 238, 2, 130, 3, 130, 2, + 2, 130, 3, 2, 3, 3, 2, 130, + 3, 130, 130, 2, 130, 2, 3, 130, + 3, 130, 3, 2, 2, 130, 2, 3, + 130, 130, 3, 130, 3, 130, 2, 130, + 3, 130, 2, 130, 130, 3, 130, 3, + 2, 130, 130, 3, 3, 3, 3, 130, + 130, 2, 3, 130, 2, 3, 3, 130, + 2, 130, 3, 130, 3, 130, 3, 130, + 2, 3, 2, 130, 130, 3, 3, 130, + 3, 130, 2, 2, 2, 130, 130, 3, + 130, 3, 130, 2, 2, 130, 3, 3, + 130, 3, 130, 2, 3, 130, 3, 130, + 2, 3, 3, 130, 130, 2, 3, 3, + 3, 130, 130, 2, 239, 240, 115, 241, + 2, 130, 2, 130, 2, 130, 2, 242, + 2, 130, 2, 243, 244, 245, 246, 247, + 248, 2, 3, 3, 130, 130, 130, 2, + 2, 2, 2, 130, 130, 2, 130, 130, + 2, 2, 2, 130, 130, 130, 130, 2, + 249, 250, 251, 2, 130, 130, 130, 130, + 130, 2, 130, 2, 130, 2, 252, 2, + 3, 2, 253, 2, 254, 255, 256, 258, + 257, 2, 130, 2, 2, 130, 130, 3, + 2, 3, 2, 259, 2, 260, 261, 262, + 264, 263, 2, 3, 2, 2, 3, 3, + 79, 80, 81, 82, 83, 84, 2, 3, + 1, 265, 265, 3, 1, 265, 266, 3, + 1, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 267, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 267, 267, 268, 268, 267, 268, + 267, 269, 270, 271, 272, 273, 275, 276, + 277, 279, 280, 281, 282, 283, 284, 285, + 286, 287, 288, 289, 290, 291, 292, 293, + 294, 295, 296, 274, 278, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 267, 267, 268, 267, 267, 267, 268, 268, + 268, 268, 267, 267, 267, 267, 267, 267, + 268, 267, 267, 267, 267, 267, 267, 268, + 267, 267, 267, 267, 268, 268, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 268, 268, 268, 267, 268, + 268, 267, 267, 267, 267, 267, 267, 268, + 268, 268, 268, 268, 268, 267, 268, 268, + 267, 267, 267, 267, 267, 267, 268, 268, + 267, 268, 268, 268, 268, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 267, 268, + 267, 297, 298, 299, 300, 301, 302, 303, + 304, 305, 306, 307, 308, 309, 310, 311, + 312, 313, 314, 315, 316, 317, 267, 268, + 268, 267, 268, 268, 268, 267, 268, 268, + 268, 268, 267, 268, 267, 268, 268, 267, + 268, 268, 267, 268, 267, 267, 267, 268, + 268, 267, 268, 268, 267, 268, 268, 267, + 268, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 268, 267, 267, 267, 268, + 268, 268, 267, 268, 267, 268, 267, 268, + 268, 268, 268, 268, 267, 268, 268, 267, + 318, 319, 320, 321, 322, 267, 268, 323, + 267, 318, 319, 324, 320, 321, 322, 267, + 268, 267, 268, 267, 268, 267, 268, 267, + 268, 267, 325, 326, 267, 268, 267, 268, + 267, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 267, 268, 268, 267, 268, 267, 268, 267, + 268, 268, 268, 268, 267, 268, 268, 267, + 267, 267, 268, 268, 267, 268, 267, 268, + 268, 267, 267, 267, 268, 268, 267, 268, + 268, 268, 267, 268, 268, 268, 268, 267, + 268, 268, 268, 267, 268, 268, 267, 342, + 343, 328, 267, 268, 267, 268, 268, 267, + 344, 345, 346, 347, 348, 349, 350, 267, + 351, 352, 353, 354, 355, 267, 268, 267, + 268, 267, 268, 267, 268, 268, 268, 268, + 268, 267, 268, 267, 356, 357, 358, 359, + 360, 361, 362, 363, 364, 365, 366, 367, + 368, 369, 370, 371, 372, 369, 373, 374, + 375, 376, 377, 267, 268, 268, 267, 267, + 268, 267, 267, 268, 268, 268, 267, 268, + 267, 268, 268, 267, 267, 267, 268, 268, + 268, 267, 268, 267, 268, 268, 268, 267, + 268, 268, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 268, 267, 267, 268, + 268, 268, 267, 267, 267, 268, 267, 268, + 268, 267, 268, 267, 268, 268, 267, 268, + 268, 267, 378, 379, 380, 381, 267, 268, + 267, 268, 267, 268, 267, 268, 267, 382, + 267, 268, 267, 383, 384, 385, 386, 387, + 388, 267, 268, 268, 268, 267, 267, 267, + 267, 268, 268, 267, 268, 268, 267, 267, + 267, 268, 268, 268, 268, 267, 389, 390, + 391, 267, 268, 268, 268, 268, 268, 267, + 268, 267, 268, 267, 392, 393, 394, 267, + 395, 267, 395, 267, 267, 395, 395, 267, + 395, 395, 267, 395, 395, 395, 267, 395, + 267, 395, 395, 267, 395, 395, 395, 395, + 267, 395, 395, 267, 267, 395, 395, 267, + 395, 267, 396, 397, 398, 399, 400, 401, + 402, 404, 405, 406, 407, 408, 409, 410, + 411, 412, 413, 414, 415, 287, 416, 417, + 418, 419, 420, 421, 422, 423, 424, 403, + 267, 395, 395, 395, 395, 267, 395, 267, + 395, 395, 267, 268, 268, 267, 267, 268, + 395, 395, 267, 395, 395, 267, 395, 267, + 268, 395, 395, 395, 268, 268, 267, 395, + 395, 395, 267, 267, 267, 395, 267, 268, + 268, 395, 395, 268, 267, 395, 395, 395, + 267, 395, 267, 395, 267, 395, 267, 268, + 267, 267, 395, 395, 267, 395, 267, 268, + 395, 395, 268, 395, 267, 268, 395, 395, + 268, 268, 395, 395, 267, 395, 395, 268, + 267, 395, 395, 395, 268, 268, 268, 267, + 395, 268, 395, 267, 267, 267, 268, 267, + 267, 267, 395, 395, 395, 268, 395, 268, + 267, 395, 395, 268, 268, 268, 395, 395, + 395, 267, 395, 395, 268, 268, 267, 267, + 267, 395, 395, 395, 267, 395, 267, 268, + 395, 395, 395, 395, 268, 395, 268, 268, + 267, 395, 268, 395, 267, 395, 267, 395, + 268, 395, 395, 267, 395, 267, 395, 395, + 395, 395, 268, 267, 268, 395, 267, 395, + 395, 395, 395, 267, 395, 267, 425, 426, + 427, 428, 429, 430, 431, 432, 433, 434, + 435, 436, 437, 438, 439, 440, 441, 442, + 443, 444, 445, 267, 268, 395, 395, 268, + 395, 267, 268, 395, 395, 395, 267, 395, + 268, 395, 395, 395, 267, 395, 267, 395, + 395, 267, 395, 395, 267, 268, 395, 268, + 267, 395, 395, 395, 267, 268, 395, 267, + 395, 395, 267, 395, 395, 268, 395, 268, + 268, 395, 267, 395, 395, 268, 267, 395, + 395, 395, 395, 268, 395, 395, 268, 395, + 267, 395, 267, 268, 268, 268, 395, 395, + 268, 267, 395, 267, 395, 267, 268, 268, + 268, 268, 395, 395, 268, 395, 267, 268, + 395, 395, 268, 395, 268, 267, 268, 395, + 268, 395, 267, 268, 395, 395, 395, 395, + 268, 395, 267, 395, 395, 267, 446, 447, + 448, 449, 450, 267, 395, 323, 267, 395, + 267, 395, 267, 395, 267, 395, 267, 451, + 452, 267, 395, 267, 395, 267, 453, 454, + 455, 456, 331, 457, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 267, 395, 395, + 267, 395, 267, 395, 267, 395, 395, 395, + 268, 268, 395, 267, 395, 267, 395, 267, + 268, 395, 267, 395, 268, 267, 268, 395, + 395, 395, 268, 395, 268, 267, 395, 267, + 268, 395, 268, 395, 268, 395, 267, 395, + 395, 268, 395, 267, 395, 395, 395, 395, + 267, 395, 268, 268, 395, 395, 268, 267, + 395, 395, 268, 395, 268, 267, 467, 468, + 454, 267, 395, 267, 395, 395, 267, 469, + 470, 471, 472, 473, 474, 475, 267, 476, + 477, 478, 479, 480, 267, 395, 267, 395, + 267, 395, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 481, 482, 483, 484, 485, + 486, 487, 488, 489, 490, 491, 492, 493, + 494, 495, 496, 497, 498, 499, 500, 501, + 502, 503, 267, 395, 268, 395, 267, 267, + 395, 268, 267, 268, 268, 267, 395, 268, + 395, 395, 267, 395, 267, 268, 395, 268, + 395, 268, 267, 267, 395, 267, 268, 395, + 395, 268, 395, 268, 395, 267, 395, 268, + 395, 267, 395, 395, 268, 395, 268, 267, + 395, 395, 268, 268, 268, 268, 395, 395, + 267, 268, 395, 267, 268, 268, 395, 267, + 395, 268, 395, 268, 395, 268, 395, 267, + 268, 267, 395, 395, 268, 268, 395, 268, + 395, 267, 267, 267, 395, 395, 268, 395, + 268, 395, 267, 267, 395, 268, 268, 395, + 268, 395, 267, 268, 395, 268, 395, 267, + 268, 268, 395, 395, 267, 268, 268, 268, + 395, 395, 267, 504, 505, 380, 506, 267, + 395, 267, 395, 267, 395, 267, 507, 267, + 395, 267, 508, 509, 510, 511, 512, 513, + 267, 268, 268, 395, 395, 395, 267, 267, + 267, 267, 395, 395, 267, 395, 395, 267, + 267, 267, 395, 395, 395, 395, 267, 514, + 515, 516, 267, 395, 395, 395, 395, 395, + 267, 395, 267, 395, 267, 517, 267, 268, + 267, 518, 267, 519, 520, 521, 523, 522, + 267, 395, 267, 267, 395, 395, 268, 267, + 268, 267, 524, 267, 525, 526, 527, 529, + 528, 267, 268, 267, 267, 268, 268, 344, + 345, 346, 347, 348, 349, 267, 268, 267, + 268, 268, 267, 266, 268, 268, 267, 266, + 268, 267, 266, 268, 267, 531, 532, 530, + 267, 266, 268, 267, 266, 268, 267, 533, + 534, 535, 536, 537, 530, 267, 538, 267, + 297, 298, 299, 533, 534, 539, 300, 301, + 302, 303, 304, 305, 306, 307, 308, 309, + 310, 311, 312, 313, 314, 315, 316, 317, + 267, 540, 538, 297, 298, 299, 541, 535, + 536, 300, 301, 302, 303, 304, 305, 306, + 307, 308, 309, 310, 311, 312, 313, 314, + 315, 316, 317, 267, 540, 267, 542, 540, + 297, 298, 299, 543, 536, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 267, + 542, 267, 267, 542, 544, 267, 542, 267, + 545, 546, 267, 540, 267, 267, 542, 267, + 540, 267, 540, 327, 328, 329, 330, 331, + 332, 333, 547, 335, 336, 337, 338, 339, + 340, 341, 549, 550, 551, 552, 553, 554, + 549, 550, 551, 552, 553, 554, 549, 548, + 555, 267, 268, 538, 267, 556, 556, 556, + 542, 267, 297, 298, 299, 541, 539, 300, + 301, 302, 303, 304, 305, 306, 307, 308, + 309, 310, 311, 312, 313, 314, 315, 316, + 317, 267, 545, 557, 267, 267, 540, 556, + 556, 542, 556, 556, 542, 556, 556, 556, + 542, 556, 556, 542, 556, 556, 542, 556, + 556, 267, 542, 542, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 551, 552, 553, 554, 548, 549, + 551, 552, 553, 554, 548, 549, 551, 552, + 553, 554, 548, 549, 551, 552, 553, 554, + 548, 549, 550, 555, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 552, 553, 554, 548, 549, 550, + 552, 553, 554, 548, 549, 550, 552, 553, + 554, 548, 549, 550, 552, 553, 554, 548, + 549, 550, 551, 555, 553, 554, 548, 549, + 550, 551, 553, 554, 548, 549, 550, 551, + 553, 554, 548, 549, 550, 551, 553, 554, + 548, 549, 550, 551, 553, 558, 557, 552, + 267, 555, 556, 267, 540, 542, 268, 268, + 267, 559, 560, 561, 562, 563, 530, 267, + 268, 323, 268, 268, 268, 267, 268, 268, + 267, 395, 268, 267, 395, 268, 267, 268, + 395, 268, 267, 530, 267, 564, 566, 567, + 568, 569, 570, 571, 566, 567, 568, 569, + 570, 571, 566, 530, 565, 555, 267, 268, + 538, 268, 267, 540, 540, 540, 542, 267, + 540, 540, 542, 540, 540, 542, 540, 540, + 540, 542, 540, 540, 542, 540, 540, 542, + 540, 540, 267, 542, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 568, 569, 570, 571, 565, 566, + 568, 569, 570, 571, 565, 566, 568, 569, + 570, 571, 565, 566, 568, 569, 570, 571, + 565, 566, 567, 555, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 569, 570, 571, 565, 566, 567, + 569, 570, 571, 565, 566, 567, 569, 570, + 571, 565, 566, 567, 569, 570, 571, 565, + 566, 567, 568, 555, 570, 571, 565, 566, + 567, 568, 570, 571, 565, 566, 567, 568, + 570, 571, 565, 566, 567, 568, 570, 571, + 565, 566, 567, 568, 570, 572, 573, 569, + 267, 555, 540, 268, 540, 542, 268, 542, + 268, 267, 540, 574, 575, 530, 267, 268, + 267, 268, 268, 268, 267, 577, 578, 579, + 580, 576, 267, 581, 582, 530, 267, 266, + 268, 267, 268, 266, 268, 267, 583, 530, + 267, 268, 268, 267, 584, 530, 267, 268, + 268, 267, 585, 586, 587, 588, 589, 590, + 591, 592, 593, 594, 595, 530, 267, 268, + 596, 267, 344, 345, 346, 347, 348, 349, + 597, 267, 598, 267, 268, 267, 395, 268, + 267, 268, 395, 268, 395, 268, 267, 395, + 395, 268, 395, 268, 395, 268, 395, 268, + 395, 268, 267, 268, 268, 395, 395, 268, + 267, 395, 395, 268, 267, 395, 268, 395, + 268, 267, 268, 395, 268, 395, 268, 267, + 395, 268, 395, 268, 267, 395, 268, 267, + 395, 395, 268, 268, 395, 268, 395, 268, + 395, 267, 576, 267, 599, 576, 267, 322, + 530, 600, 530, 267, 268, 267, 266, 3, + 1, 266, 3, 1, 602, 603, 601, 1, + 266, 3, 1, 266, 3, 1, 604, 605, + 606, 607, 608, 601, 1, 609, 610, 612, + 611, 611, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 611, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 611, 611, 612, 612, 611, 612, 611, 613, + 614, 615, 616, 617, 619, 620, 621, 623, + 624, 625, 626, 627, 628, 629, 630, 631, + 632, 633, 634, 635, 636, 637, 638, 639, + 640, 618, 622, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 611, 611, + 612, 611, 611, 611, 612, 612, 612, 612, + 611, 611, 611, 611, 611, 611, 612, 611, + 611, 611, 611, 611, 611, 612, 611, 611, + 611, 611, 612, 612, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 612, 612, 612, 611, 612, 612, 611, + 611, 611, 611, 611, 611, 612, 612, 612, + 612, 612, 612, 611, 612, 612, 611, 611, + 611, 611, 611, 611, 612, 612, 611, 612, + 612, 612, 612, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 611, 612, 611, 641, + 642, 643, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 612, 612, 611, + 612, 612, 612, 611, 612, 612, 612, 612, + 611, 612, 611, 612, 612, 611, 612, 612, + 611, 612, 611, 611, 611, 612, 612, 611, + 612, 612, 611, 612, 612, 611, 612, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 612, 611, 611, 611, 612, 612, 612, + 611, 612, 611, 612, 611, 612, 612, 612, + 612, 612, 611, 612, 612, 611, 662, 663, + 664, 665, 666, 611, 612, 667, 611, 662, + 663, 668, 664, 665, 666, 611, 612, 611, + 612, 611, 612, 611, 612, 611, 612, 611, + 669, 670, 611, 612, 611, 612, 611, 671, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 683, 684, 685, 611, 612, + 612, 611, 612, 611, 612, 611, 612, 612, + 612, 612, 611, 612, 612, 611, 611, 611, + 612, 612, 611, 612, 611, 612, 612, 611, + 611, 611, 612, 612, 611, 612, 612, 612, + 611, 612, 612, 612, 612, 611, 612, 612, + 612, 611, 612, 612, 611, 686, 687, 672, + 611, 612, 611, 612, 612, 611, 688, 689, + 690, 691, 692, 693, 694, 611, 695, 696, + 697, 698, 699, 611, 612, 611, 612, 611, + 612, 611, 612, 612, 612, 612, 612, 611, + 612, 611, 700, 701, 702, 703, 704, 705, + 706, 707, 708, 709, 710, 711, 712, 713, + 714, 715, 716, 713, 717, 718, 719, 720, + 721, 611, 612, 612, 611, 611, 612, 611, + 611, 612, 612, 612, 611, 612, 611, 612, + 612, 611, 611, 611, 612, 612, 612, 611, + 612, 611, 612, 612, 612, 611, 612, 612, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 612, 611, 611, 612, 612, 612, + 611, 611, 611, 612, 611, 612, 612, 611, + 612, 611, 612, 612, 611, 612, 612, 611, + 722, 723, 724, 725, 611, 612, 611, 612, + 611, 612, 611, 612, 611, 726, 611, 612, + 611, 727, 728, 729, 730, 731, 732, 611, + 612, 612, 612, 611, 611, 611, 611, 612, + 612, 611, 612, 612, 611, 611, 611, 612, + 612, 612, 612, 611, 733, 734, 735, 611, + 612, 612, 612, 612, 612, 611, 612, 611, + 612, 611, 736, 737, 738, 611, 739, 611, + 739, 611, 611, 739, 739, 611, 739, 739, + 611, 739, 739, 739, 611, 739, 611, 739, + 739, 611, 739, 739, 739, 739, 611, 739, + 739, 611, 611, 739, 739, 611, 739, 611, + 740, 741, 742, 743, 744, 745, 746, 748, + 749, 750, 751, 752, 753, 754, 755, 756, + 757, 758, 759, 631, 760, 761, 762, 763, + 764, 765, 766, 767, 768, 747, 611, 739, + 739, 739, 739, 611, 739, 611, 739, 739, + 611, 612, 612, 611, 611, 612, 739, 739, + 611, 739, 739, 611, 739, 611, 612, 739, + 739, 739, 612, 612, 611, 739, 739, 739, + 611, 611, 611, 739, 611, 612, 612, 739, + 739, 612, 611, 739, 739, 739, 611, 739, + 611, 739, 611, 739, 611, 612, 611, 611, + 739, 739, 611, 739, 611, 612, 739, 739, + 612, 739, 611, 612, 739, 739, 612, 612, + 739, 739, 611, 739, 739, 612, 611, 739, + 739, 739, 612, 612, 612, 611, 739, 612, + 739, 611, 611, 611, 612, 611, 611, 611, + 739, 739, 739, 612, 739, 612, 611, 739, + 739, 612, 612, 612, 739, 739, 739, 611, + 739, 739, 612, 612, 611, 611, 611, 739, + 739, 739, 611, 739, 611, 612, 739, 739, + 739, 739, 612, 739, 612, 612, 611, 739, + 612, 739, 611, 739, 611, 739, 612, 739, + 739, 611, 739, 611, 739, 739, 739, 739, + 612, 611, 612, 739, 611, 739, 739, 739, + 739, 611, 739, 611, 769, 770, 771, 772, + 773, 774, 775, 776, 777, 778, 779, 780, + 781, 782, 783, 784, 785, 786, 787, 788, + 789, 611, 612, 739, 739, 612, 739, 611, + 612, 739, 739, 739, 611, 739, 612, 739, + 739, 739, 611, 739, 611, 739, 739, 611, + 739, 739, 611, 612, 739, 612, 611, 739, + 739, 739, 611, 612, 739, 611, 739, 739, + 611, 739, 739, 612, 739, 612, 612, 739, + 611, 739, 739, 612, 611, 739, 739, 739, + 739, 612, 739, 739, 612, 739, 611, 739, + 611, 612, 612, 612, 739, 739, 612, 611, + 739, 611, 739, 611, 612, 612, 612, 612, + 739, 739, 612, 739, 611, 612, 739, 739, + 612, 739, 612, 611, 612, 739, 612, 739, + 611, 612, 739, 739, 739, 739, 612, 739, + 611, 739, 739, 611, 790, 791, 792, 793, + 794, 611, 739, 667, 611, 739, 611, 739, + 611, 739, 611, 739, 611, 795, 796, 611, + 739, 611, 739, 611, 797, 798, 799, 800, + 675, 801, 802, 803, 804, 805, 806, 807, + 808, 809, 810, 611, 739, 739, 611, 739, + 611, 739, 611, 739, 739, 739, 612, 612, + 739, 611, 739, 611, 739, 611, 612, 739, + 611, 739, 612, 611, 612, 739, 739, 739, + 612, 739, 612, 611, 739, 611, 612, 739, + 612, 739, 612, 739, 611, 739, 739, 612, + 739, 611, 739, 739, 739, 739, 611, 739, + 612, 612, 739, 739, 612, 611, 739, 739, + 612, 739, 612, 611, 811, 812, 798, 611, + 739, 611, 739, 739, 611, 813, 814, 815, + 816, 817, 818, 819, 611, 820, 821, 822, + 823, 824, 611, 739, 611, 739, 611, 739, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 825, 826, 827, 828, 829, 830, 831, + 832, 833, 834, 835, 836, 837, 838, 839, + 840, 841, 842, 843, 844, 845, 846, 847, + 611, 739, 612, 739, 611, 611, 739, 612, + 611, 612, 612, 611, 739, 612, 739, 739, + 611, 739, 611, 612, 739, 612, 739, 612, + 611, 611, 739, 611, 612, 739, 739, 612, + 739, 612, 739, 611, 739, 612, 739, 611, + 739, 739, 612, 739, 612, 611, 739, 739, + 612, 612, 612, 612, 739, 739, 611, 612, + 739, 611, 612, 612, 739, 611, 739, 612, + 739, 612, 739, 612, 739, 611, 612, 611, + 739, 739, 612, 612, 739, 612, 739, 611, + 611, 611, 739, 739, 612, 739, 612, 739, + 611, 611, 739, 612, 612, 739, 612, 739, + 611, 612, 739, 612, 739, 611, 612, 612, + 739, 739, 611, 612, 612, 612, 739, 739, + 611, 848, 849, 724, 850, 611, 739, 611, + 739, 611, 739, 611, 851, 611, 739, 611, + 852, 853, 854, 855, 856, 857, 611, 612, + 612, 739, 739, 739, 611, 611, 611, 611, + 739, 739, 611, 739, 739, 611, 611, 611, + 739, 739, 739, 739, 611, 858, 859, 860, + 611, 739, 739, 739, 739, 739, 611, 739, + 611, 739, 611, 861, 611, 612, 611, 862, + 611, 863, 864, 865, 867, 866, 611, 739, + 611, 611, 739, 739, 612, 611, 612, 611, + 868, 611, 869, 870, 871, 873, 872, 611, + 612, 611, 611, 612, 612, 688, 689, 690, + 691, 692, 693, 611, 641, 642, 643, 604, + 605, 874, 644, 645, 646, 647, 648, 649, + 650, 651, 652, 653, 654, 655, 656, 657, + 658, 659, 660, 661, 611, 875, 610, 641, + 642, 643, 876, 606, 607, 644, 645, 646, + 647, 648, 649, 650, 651, 652, 653, 654, + 655, 656, 657, 658, 659, 660, 661, 611, + 875, 611, 877, 875, 641, 642, 643, 878, + 607, 644, 645, 646, 647, 648, 649, 650, + 651, 652, 653, 654, 655, 656, 657, 658, + 659, 660, 661, 611, 877, 611, 609, 877, + 879, 611, 877, 611, 880, 881, 611, 875, + 611, 611, 877, 611, 875, 611, 875, 671, + 672, 673, 674, 675, 676, 677, 882, 679, + 680, 681, 682, 683, 684, 685, 884, 885, + 886, 887, 888, 889, 884, 885, 886, 887, + 888, 889, 884, 883, 890, 611, 612, 610, + 611, 891, 891, 891, 877, 611, 641, 642, + 643, 876, 874, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 657, 658, 659, 660, 661, 611, 880, 892, + 611, 611, 875, 891, 891, 877, 891, 891, + 877, 891, 891, 891, 877, 891, 891, 877, + 891, 891, 877, 891, 891, 611, 877, 877, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 886, 887, + 888, 889, 883, 884, 886, 887, 888, 889, + 883, 884, 886, 887, 888, 889, 883, 884, + 886, 887, 888, 889, 883, 884, 885, 890, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 887, 888, + 889, 883, 884, 885, 887, 888, 889, 883, + 884, 885, 887, 888, 889, 883, 884, 885, + 887, 888, 889, 883, 884, 885, 886, 890, + 888, 889, 883, 884, 885, 886, 888, 889, + 883, 884, 885, 886, 888, 889, 883, 884, + 885, 886, 888, 889, 883, 884, 885, 886, + 888, 893, 892, 887, 611, 890, 891, 611, + 875, 877, 265, 3, 1, 894, 895, 896, + 897, 898, 601, 1, 265, 899, 3, 265, + 3, 265, 3, 1, 901, 900, 900, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 900, 901, 901, 900, 901, 901, + 901, 901, 900, 901, 901, 900, 900, 901, + 901, 900, 901, 900, 902, 903, 904, 905, + 906, 908, 909, 910, 912, 913, 914, 915, + 916, 917, 918, 919, 920, 921, 922, 923, + 924, 925, 926, 927, 928, 929, 907, 911, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 900, 900, 901, 900, 900, + 900, 901, 901, 901, 901, 900, 900, 900, + 900, 900, 900, 901, 900, 900, 900, 900, + 900, 900, 901, 900, 900, 900, 900, 901, + 901, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 900, 900, 900, 900, + 900, 900, 901, 901, 901, 901, 901, 901, + 900, 901, 901, 900, 900, 900, 900, 900, + 900, 901, 901, 900, 901, 901, 901, 901, + 901, 900, 901, 901, 900, 901, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 900, 901, 900, 930, 931, 932, 933, + 934, 935, 936, 937, 938, 939, 940, 941, + 942, 943, 944, 945, 946, 947, 948, 949, + 950, 900, 901, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 900, 901, 900, + 901, 901, 900, 901, 901, 900, 901, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 901, 900, + 901, 901, 900, 951, 952, 953, 954, 955, + 900, 901, 899, 900, 901, 900, 901, 900, + 901, 900, 901, 900, 956, 957, 900, 901, + 900, 901, 900, 958, 959, 960, 961, 962, + 963, 964, 965, 966, 967, 968, 969, 970, + 971, 972, 900, 901, 901, 900, 901, 900, + 901, 900, 901, 901, 901, 901, 900, 901, + 901, 900, 900, 900, 901, 901, 900, 901, + 900, 901, 901, 900, 900, 900, 901, 901, + 900, 901, 901, 901, 900, 901, 901, 901, + 901, 900, 901, 901, 901, 900, 901, 901, + 900, 973, 974, 959, 900, 901, 900, 901, + 901, 900, 975, 976, 977, 978, 979, 980, + 900, 981, 982, 983, 984, 985, 900, 901, + 900, 901, 900, 901, 900, 901, 901, 901, + 901, 901, 900, 901, 900, 986, 987, 988, + 989, 990, 991, 992, 993, 994, 995, 996, + 997, 998, 999, 1000, 1001, 1002, 999, 1003, + 1004, 1005, 1006, 1007, 900, 901, 901, 900, + 900, 901, 900, 900, 901, 901, 901, 900, + 901, 900, 901, 901, 900, 900, 900, 901, + 901, 901, 900, 901, 900, 901, 901, 901, + 900, 901, 901, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 901, 900, 900, + 901, 901, 901, 900, 900, 900, 901, 900, + 901, 901, 900, 901, 900, 901, 901, 900, + 901, 901, 900, 1008, 1009, 1010, 1011, 900, + 901, 900, 901, 900, 901, 900, 901, 900, + 1012, 900, 901, 900, 1013, 1014, 1015, 1016, + 1017, 1018, 900, 901, 901, 901, 900, 900, + 900, 900, 901, 901, 900, 901, 901, 900, + 900, 900, 901, 901, 901, 901, 900, 1019, + 1020, 1021, 900, 901, 901, 901, 901, 901, + 900, 901, 900, 901, 900, 1022, 900, 1023, + 1024, 1025, 1027, 1026, 900, 901, 900, 900, + 901, 901, 951, 952, 1028, 953, 954, 955, + 900, 901, 900, 975, 976, 977, 978, 979, + 980, 1029, 900, 1030, 1031, 1032, 900, 1033, + 900, 1033, 900, 900, 1033, 1033, 900, 1033, + 1033, 900, 1033, 1033, 1033, 900, 1033, 900, + 1033, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 1033, 900, 900, 1033, 1033, 900, 1033, + 900, 1034, 1035, 1036, 1037, 1038, 1039, 1040, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 920, 1054, 1055, 1056, + 1057, 1058, 1059, 1060, 1061, 1062, 1041, 900, + 1033, 1033, 1033, 1033, 900, 1033, 900, 1033, + 1033, 900, 901, 901, 900, 900, 901, 1033, + 1033, 900, 1033, 1033, 900, 1033, 900, 901, + 1033, 1033, 1033, 901, 901, 900, 1033, 1033, + 1033, 900, 900, 900, 1033, 900, 901, 901, + 1033, 1033, 901, 900, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 901, 900, + 900, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 901, 1033, 900, 901, 1033, 1033, 901, + 901, 1033, 1033, 900, 1033, 1033, 901, 900, + 1033, 1033, 1033, 901, 901, 901, 900, 1033, + 901, 1033, 900, 900, 900, 901, 900, 900, + 900, 1033, 1033, 1033, 901, 1033, 901, 900, + 1033, 1033, 901, 901, 901, 1033, 1033, 1033, + 900, 1033, 1033, 901, 901, 900, 900, 900, + 1033, 1033, 1033, 900, 1033, 900, 901, 1033, + 1033, 1033, 1033, 901, 1033, 901, 901, 900, + 1033, 901, 1033, 900, 1033, 900, 1033, 901, + 1033, 1033, 900, 1033, 900, 1033, 1033, 1033, + 1033, 901, 900, 901, 1033, 900, 1033, 1033, + 1033, 1033, 900, 1033, 900, 1063, 1064, 1065, + 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, + 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, + 1082, 1083, 900, 901, 1033, 1033, 901, 1033, + 900, 901, 1033, 1033, 1033, 900, 1033, 901, + 1033, 1033, 1033, 900, 1033, 900, 1033, 1033, + 900, 1033, 1033, 900, 901, 1033, 901, 900, + 1033, 1033, 1033, 900, 901, 1033, 900, 1033, + 1033, 900, 1033, 1033, 901, 1033, 901, 901, + 1033, 900, 1033, 1033, 901, 900, 1033, 1033, + 1033, 1033, 901, 1033, 1033, 901, 1033, 900, + 1033, 900, 901, 901, 901, 1033, 1033, 901, + 900, 1033, 900, 1033, 900, 901, 901, 901, + 901, 1033, 1033, 901, 1033, 900, 901, 1033, + 1033, 901, 1033, 901, 900, 901, 1033, 901, + 1033, 900, 901, 1033, 1033, 1033, 1033, 901, + 1033, 900, 1033, 1033, 900, 1084, 1085, 1086, + 1087, 1088, 900, 1033, 899, 900, 1033, 900, + 1033, 900, 1033, 900, 1033, 900, 1089, 1090, + 900, 1033, 900, 1033, 900, 1091, 1092, 1093, + 1094, 962, 1095, 1096, 1097, 1098, 1099, 1100, + 1101, 1102, 1103, 1104, 900, 1033, 1033, 900, + 1033, 900, 1033, 900, 1033, 1033, 1033, 901, + 901, 1033, 900, 1033, 900, 1033, 900, 901, + 1033, 900, 1033, 901, 900, 901, 1033, 1033, + 1033, 901, 1033, 901, 900, 1033, 900, 901, + 1033, 901, 1033, 901, 1033, 900, 1033, 1033, + 901, 1033, 900, 1033, 1033, 1033, 1033, 900, + 1033, 901, 901, 1033, 1033, 901, 900, 1033, + 1033, 901, 1033, 901, 900, 1105, 1106, 1092, + 900, 1033, 900, 1033, 1033, 900, 1107, 1108, + 1109, 1110, 1111, 1112, 1113, 900, 1114, 1115, + 1116, 1117, 1118, 900, 1033, 900, 1033, 900, + 1033, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1119, 1120, 1121, 1122, 1123, 1124, + 1125, 1126, 1127, 1128, 1129, 1130, 1131, 1132, + 1133, 1134, 1135, 1136, 1137, 1138, 1139, 1140, + 1141, 900, 1033, 901, 1033, 900, 900, 1033, + 901, 900, 901, 901, 900, 1033, 901, 1033, + 1033, 900, 1033, 900, 901, 1033, 901, 1033, + 901, 900, 900, 1033, 900, 901, 1033, 1033, + 901, 1033, 901, 1033, 900, 1033, 901, 1033, + 900, 1033, 1033, 901, 1033, 901, 900, 1033, + 1033, 901, 901, 901, 901, 1033, 1033, 900, + 901, 1033, 900, 901, 901, 1033, 900, 1033, + 901, 1033, 901, 1033, 901, 1033, 900, 901, + 900, 1033, 1033, 901, 901, 1033, 901, 1033, + 900, 900, 900, 1033, 1033, 901, 1033, 901, + 1033, 900, 900, 1033, 901, 901, 1033, 901, + 1033, 900, 901, 1033, 901, 1033, 900, 901, + 901, 1033, 1033, 900, 901, 901, 901, 1033, + 1033, 900, 1142, 1143, 1010, 1144, 900, 1033, + 900, 1033, 900, 1033, 900, 1145, 900, 1033, + 900, 1146, 1147, 1148, 1149, 1150, 1151, 900, + 901, 901, 1033, 1033, 1033, 900, 900, 900, + 900, 1033, 1033, 900, 1033, 1033, 900, 900, + 900, 1033, 1033, 1033, 1033, 900, 1152, 1153, + 1154, 900, 1033, 1033, 1033, 1033, 1033, 900, + 1033, 900, 1033, 900, 1155, 900, 901, 900, + 1156, 900, 1157, 1158, 1159, 1161, 1160, 900, + 1033, 900, 900, 1033, 1033, 901, 900, 901, + 900, 3, 265, 3, 1, 1162, 3, 1, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1162, 1163, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1162, 1162, 1163, 1162, + 1162, 1163, 1163, 1162, 1162, 1163, 1162, 1163, + 1164, 1165, 1166, 1167, 1168, 1169, 1170, 1172, + 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, + 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, + 1189, 1190, 1191, 1192, 1193, 1171, 1163, 1162, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1194, 1194, 1163, 1163, 1194, 1162, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1163, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1163, 1163, 1194, 1194, 1163, 1194, 1163, 1195, + 1196, 1197, 1198, 1199, 1201, 1202, 1203, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1184, + 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, + 1221, 1200, 1204, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1194, + 1163, 1163, 1163, 1163, 1163, 1163, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1163, 1194, 1194, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1163, 1163, 1163, 1194, 1194, 1194, + 1194, 1194, 1194, 1163, 1194, 1194, 1163, 1163, + 1163, 1163, 1163, 1163, 1194, 1194, 1163, 1194, + 1194, 1194, 1194, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1163, 1194, 1163, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1163, 1194, 1194, 1163, + 1194, 1194, 1194, 1163, 1194, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1194, 1163, 1194, 1194, + 1163, 1194, 1163, 1163, 1163, 1194, 1194, 1163, + 1194, 1194, 1163, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1194, 1163, 1163, 1163, 1194, 1194, 1194, + 1163, 1194, 1163, 1194, 1163, 1194, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1243, 1244, + 1245, 1246, 1247, 1163, 1194, 1248, 1163, 1243, + 1244, 1249, 1245, 1246, 1247, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1163, + 1250, 1251, 1163, 1194, 1163, 1194, 1163, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1262, 1263, 1264, 1265, 1266, 1163, 1194, + 1194, 1163, 1194, 1163, 1194, 1163, 1194, 1194, + 1194, 1194, 1163, 1194, 1194, 1163, 1163, 1163, + 1194, 1194, 1163, 1194, 1163, 1194, 1194, 1163, + 1163, 1163, 1194, 1194, 1163, 1194, 1194, 1194, + 1163, 1194, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1163, 1194, 1194, 1163, 1267, 1268, 1253, + 1163, 1194, 1163, 1194, 1194, 1163, 1269, 1270, + 1271, 1272, 1273, 1274, 1275, 1163, 1276, 1277, + 1278, 1279, 1280, 1163, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1194, 1194, 1163, + 1194, 1163, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1294, 1298, 1299, 1300, 1301, + 1302, 1163, 1194, 1194, 1163, 1163, 1194, 1163, + 1163, 1194, 1194, 1194, 1163, 1194, 1163, 1194, + 1194, 1163, 1163, 1163, 1194, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1194, 1163, 1194, 1194, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1194, 1163, 1163, 1194, 1194, 1194, + 1163, 1163, 1163, 1194, 1163, 1194, 1194, 1163, + 1194, 1163, 1194, 1194, 1163, 1194, 1194, 1163, + 1303, 1304, 1305, 1306, 1163, 1194, 1163, 1194, + 1163, 1194, 1163, 1194, 1163, 1307, 1163, 1194, + 1163, 1308, 1309, 1310, 1311, 1312, 1313, 1163, + 1194, 1194, 1194, 1163, 1163, 1163, 1163, 1194, + 1194, 1163, 1194, 1194, 1163, 1163, 1163, 1194, + 1194, 1194, 1194, 1163, 1314, 1315, 1316, 1163, + 1194, 1194, 1194, 1194, 1194, 1163, 1194, 1163, + 1194, 1163, 1317, 1318, 1319, 1163, 1162, 1163, + 1194, 1163, 1194, 1163, 1320, 1163, 1321, 1322, + 1323, 1325, 1324, 1163, 1194, 1163, 1163, 1194, + 1194, 1269, 1270, 1271, 1272, 1273, 1274, 1163, + 1162, 1163, 1162, 1162, 1163, 1162, 1163, 1194, + 1162, 1162, 1162, 1194, 1194, 1163, 1162, 1162, + 1162, 1163, 1163, 1163, 1162, 1163, 1194, 1194, + 1162, 1162, 1194, 1163, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1194, 1163, + 1163, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1163, 1194, 1162, 1162, 1194, + 1194, 1162, 1162, 1163, 1162, 1162, 1194, 1163, + 1162, 1162, 1162, 1194, 1194, 1194, 1163, 1162, + 1194, 1162, 1163, 1163, 1163, 1194, 1163, 1163, + 1163, 1162, 1162, 1162, 1194, 1162, 1194, 1163, + 1162, 1162, 1194, 1194, 1194, 1162, 1162, 1162, + 1163, 1162, 1162, 1194, 1194, 1163, 1163, 1163, + 1162, 1162, 1162, 1163, 1162, 1163, 1194, 1162, + 1162, 1162, 1162, 1194, 1162, 1194, 1194, 1163, + 1162, 1194, 1162, 1163, 1162, 1163, 1162, 1194, + 1162, 1162, 1163, 1162, 1163, 1162, 1162, 1162, + 1162, 1194, 1163, 1194, 1162, 1163, 1162, 1162, + 1162, 1162, 1163, 1162, 1163, 1326, 1327, 1328, + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1163, 1194, 1162, 1162, 1194, 1162, + 1163, 1194, 1162, 1162, 1162, 1163, 1162, 1194, + 1162, 1162, 1162, 1163, 1162, 1163, 1162, 1162, + 1163, 1162, 1162, 1163, 1194, 1162, 1194, 1163, + 1162, 1162, 1162, 1163, 1194, 1162, 1163, 1162, + 1162, 1163, 1162, 1162, 1194, 1162, 1194, 1194, + 1162, 1163, 1162, 1162, 1194, 1163, 1162, 1162, + 1162, 1162, 1194, 1162, 1162, 1194, 1162, 1163, + 1162, 1163, 1194, 1194, 1194, 1162, 1162, 1194, + 1163, 1162, 1163, 1162, 1163, 1194, 1194, 1194, + 1194, 1162, 1162, 1194, 1162, 1163, 1194, 1162, + 1162, 1194, 1162, 1194, 1163, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1162, 1162, 1162, 1194, + 1162, 1163, 1162, 1162, 1163, 1347, 1348, 1349, + 1350, 1351, 1163, 1162, 1248, 1163, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1163, 1352, 1353, + 1163, 1162, 1163, 1162, 1163, 1354, 1355, 1356, + 1357, 1256, 1358, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1163, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1162, 1162, 1162, 1194, + 1194, 1162, 1163, 1162, 1163, 1162, 1163, 1194, + 1162, 1163, 1162, 1194, 1163, 1194, 1162, 1162, + 1162, 1194, 1162, 1194, 1163, 1162, 1163, 1194, + 1162, 1194, 1162, 1194, 1162, 1163, 1162, 1162, + 1194, 1162, 1163, 1162, 1162, 1162, 1162, 1163, + 1162, 1194, 1194, 1162, 1162, 1194, 1163, 1162, + 1162, 1194, 1162, 1194, 1163, 1368, 1369, 1355, + 1163, 1162, 1163, 1162, 1162, 1163, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1163, 1377, 1378, + 1379, 1380, 1381, 1163, 1162, 1163, 1162, 1163, + 1162, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1382, 1383, 1384, 1385, 1386, 1387, + 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, + 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, + 1404, 1163, 1162, 1194, 1162, 1163, 1163, 1162, + 1194, 1163, 1194, 1194, 1163, 1162, 1194, 1162, + 1162, 1163, 1162, 1163, 1194, 1162, 1194, 1162, + 1194, 1163, 1163, 1162, 1163, 1194, 1162, 1162, + 1194, 1162, 1194, 1162, 1163, 1162, 1194, 1162, + 1163, 1162, 1162, 1194, 1162, 1194, 1163, 1162, + 1162, 1194, 1194, 1194, 1194, 1162, 1162, 1163, + 1194, 1162, 1163, 1194, 1194, 1162, 1163, 1162, + 1194, 1162, 1194, 1162, 1194, 1162, 1163, 1194, + 1163, 1162, 1162, 1194, 1194, 1162, 1194, 1162, + 1163, 1163, 1163, 1162, 1162, 1194, 1162, 1194, + 1162, 1163, 1163, 1162, 1194, 1194, 1162, 1194, + 1162, 1163, 1194, 1162, 1194, 1162, 1163, 1194, + 1194, 1162, 1162, 1163, 1194, 1194, 1194, 1162, + 1162, 1163, 1405, 1406, 1305, 1407, 1163, 1162, + 1163, 1162, 1163, 1162, 1163, 1408, 1163, 1162, + 1163, 1409, 1410, 1411, 1412, 1413, 1414, 1163, + 1194, 1194, 1162, 1162, 1162, 1163, 1163, 1163, + 1163, 1162, 1162, 1163, 1162, 1162, 1163, 1163, + 1163, 1162, 1162, 1162, 1162, 1163, 1415, 1416, + 1417, 1163, 1162, 1162, 1162, 1162, 1162, 1163, + 1162, 1163, 1162, 1163, 1418, 1163, 1194, 1163, + 1419, 1163, 1420, 1421, 1422, 1424, 1423, 1163, + 1162, 1163, 1163, 1162, 1162, 1162, 3, 1, + 3, 1162, 3, 1, 601, 1, 1425, 1427, + 1428, 1429, 1430, 1431, 1432, 1427, 1428, 1429, + 1430, 1431, 1432, 1427, 601, 1426, 890, 1, + 3, 610, 3, 1, 875, 875, 875, 877, + 1, 875, 875, 877, 875, 875, 877, 875, + 875, 875, 877, 875, 875, 877, 875, 875, + 877, 875, 875, 1, 877, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1429, 1430, 1431, 1432, 1426, + 1427, 1429, 1430, 1431, 1432, 1426, 1427, 1429, + 1430, 1431, 1432, 1426, 1427, 1429, 1430, 1431, + 1432, 1426, 1427, 1428, 890, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1430, 1431, 1432, 1426, 1427, + 1428, 1430, 1431, 1432, 1426, 1427, 1428, 1430, + 1431, 1432, 1426, 1427, 1428, 1430, 1431, 1432, + 1426, 1427, 1428, 1429, 890, 1431, 1432, 1426, + 1427, 1428, 1429, 1431, 1432, 1426, 1427, 1428, + 1429, 1431, 1432, 1426, 1427, 1428, 1429, 1431, + 1432, 1426, 1427, 1428, 1429, 1431, 1433, 1434, + 1435, 1437, 1430, 1436, 1, 890, 875, 3, + 875, 877, 3, 877, 3, 1, 875, 1, + 265, 265, 1, 265, 1438, 1439, 601, 1, + 265, 3, 1, 3, 3, 265, 3, 1, + 1441, 1442, 1443, 1444, 1440, 1, 1445, 1446, + 601, 1, 266, 3, 1, 3, 266, 3, + 1, 1447, 601, 1, 3, 265, 3, 1, + 1448, 601, 1, 3, 265, 3, 1, 1449, + 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, + 1458, 1459, 601, 1, 3, 1460, 1, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1461, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1461, 1461, 1462, 1462, 1461, 1462, 1461, 1463, + 1464, 1465, 1466, 1467, 1469, 1470, 1471, 1473, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1468, 1472, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1462, + 1461, 1461, 1461, 1461, 1461, 1461, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1461, 1461, + 1461, 1461, 1462, 1462, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1461, 1461, 1461, 1462, 1462, 1462, + 1462, 1462, 1462, 1461, 1462, 1462, 1461, 1461, + 1461, 1461, 1461, 1461, 1462, 1462, 1461, 1462, + 1462, 1462, 1462, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1461, 1462, 1461, 1491, + 1492, 1493, 1494, 1495, 1496, 1497, 1498, 1499, + 1500, 1501, 1502, 1503, 1504, 1505, 1506, 1507, + 1508, 1509, 1510, 1511, 1461, 1462, 1462, 1461, + 1462, 1462, 1462, 1461, 1462, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1462, 1461, 1462, 1462, + 1461, 1462, 1461, 1461, 1461, 1462, 1462, 1461, + 1462, 1462, 1461, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1462, 1461, 1461, 1461, 1462, 1462, 1462, + 1461, 1462, 1461, 1462, 1461, 1462, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1512, 1513, + 1514, 1515, 1516, 1461, 1462, 1517, 1461, 1512, + 1513, 1518, 1514, 1515, 1516, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1461, + 1519, 1520, 1461, 1462, 1461, 1462, 1461, 1521, + 1522, 1523, 1524, 1525, 1526, 1527, 1528, 1529, + 1530, 1531, 1532, 1533, 1534, 1535, 1461, 1462, + 1462, 1461, 1462, 1461, 1462, 1461, 1462, 1462, + 1462, 1462, 1461, 1462, 1462, 1461, 1461, 1461, + 1462, 1462, 1461, 1462, 1461, 1462, 1462, 1461, + 1461, 1461, 1462, 1462, 1461, 1462, 1462, 1462, + 1461, 1462, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1461, 1462, 1462, 1461, 1536, 1537, 1522, + 1461, 1462, 1461, 1462, 1462, 1461, 1538, 1539, + 1540, 1541, 1542, 1543, 1544, 1461, 1545, 1546, + 1547, 1548, 1549, 1461, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1462, 1462, 1461, + 1462, 1461, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1563, 1567, 1568, 1569, 1570, + 1571, 1461, 1462, 1462, 1461, 1461, 1462, 1461, + 1461, 1462, 1462, 1462, 1461, 1462, 1461, 1462, + 1462, 1461, 1461, 1461, 1462, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1462, 1461, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1462, 1461, 1461, 1462, 1462, 1462, + 1461, 1461, 1461, 1462, 1461, 1462, 1462, 1461, + 1462, 1461, 1462, 1462, 1461, 1462, 1462, 1461, + 1572, 1573, 1574, 1575, 1461, 1462, 1461, 1462, + 1461, 1462, 1461, 1462, 1461, 1576, 1461, 1462, + 1461, 1577, 1578, 1579, 1580, 1581, 1582, 1461, + 1462, 1462, 1462, 1461, 1461, 1461, 1461, 1462, + 1462, 1461, 1462, 1462, 1461, 1461, 1461, 1462, + 1462, 1462, 1462, 1461, 1583, 1584, 1585, 1461, + 1462, 1462, 1462, 1462, 1462, 1461, 1462, 1461, + 1462, 1461, 1586, 1587, 1588, 1461, 1589, 1461, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1589, 1461, 1589, 1461, 1589, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1589, 1461, 1461, 1589, 1589, 1461, 1589, 1461, + 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1598, + 1599, 1600, 1601, 1602, 1603, 1604, 1605, 1606, + 1607, 1608, 1609, 1481, 1610, 1611, 1612, 1613, + 1614, 1615, 1616, 1617, 1618, 1597, 1461, 1589, + 1589, 1589, 1589, 1461, 1589, 1461, 1589, 1589, + 1461, 1462, 1462, 1461, 1461, 1462, 1589, 1589, + 1461, 1589, 1589, 1461, 1589, 1461, 1462, 1589, + 1589, 1589, 1462, 1462, 1461, 1589, 1589, 1589, + 1461, 1461, 1461, 1589, 1461, 1462, 1462, 1589, + 1589, 1462, 1461, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1462, 1461, 1461, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1461, 1462, 1589, 1589, 1462, 1462, + 1589, 1589, 1461, 1589, 1589, 1462, 1461, 1589, + 1589, 1589, 1462, 1462, 1462, 1461, 1589, 1462, + 1589, 1461, 1461, 1461, 1462, 1461, 1461, 1461, + 1589, 1589, 1589, 1462, 1589, 1462, 1461, 1589, + 1589, 1462, 1462, 1462, 1589, 1589, 1589, 1461, + 1589, 1589, 1462, 1462, 1461, 1461, 1461, 1589, + 1589, 1589, 1461, 1589, 1461, 1462, 1589, 1589, + 1589, 1589, 1462, 1589, 1462, 1462, 1461, 1589, + 1462, 1589, 1461, 1589, 1461, 1589, 1462, 1589, + 1589, 1461, 1589, 1461, 1589, 1589, 1589, 1589, + 1462, 1461, 1462, 1589, 1461, 1589, 1589, 1589, + 1589, 1461, 1589, 1461, 1619, 1620, 1621, 1622, + 1623, 1624, 1625, 1626, 1627, 1628, 1629, 1630, + 1631, 1632, 1633, 1634, 1635, 1636, 1637, 1638, + 1639, 1461, 1462, 1589, 1589, 1462, 1589, 1461, + 1462, 1589, 1589, 1589, 1461, 1589, 1462, 1589, + 1589, 1589, 1461, 1589, 1461, 1589, 1589, 1461, + 1589, 1589, 1461, 1462, 1589, 1462, 1461, 1589, + 1589, 1589, 1461, 1462, 1589, 1461, 1589, 1589, + 1461, 1589, 1589, 1462, 1589, 1462, 1462, 1589, + 1461, 1589, 1589, 1462, 1461, 1589, 1589, 1589, + 1589, 1462, 1589, 1589, 1462, 1589, 1461, 1589, + 1461, 1462, 1462, 1462, 1589, 1589, 1462, 1461, + 1589, 1461, 1589, 1461, 1462, 1462, 1462, 1462, + 1589, 1589, 1462, 1589, 1461, 1462, 1589, 1589, + 1462, 1589, 1462, 1461, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1589, 1589, 1589, 1462, 1589, + 1461, 1589, 1589, 1461, 1640, 1641, 1642, 1643, + 1644, 1461, 1589, 1517, 1461, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1461, 1645, 1646, 1461, + 1589, 1461, 1589, 1461, 1647, 1648, 1649, 1650, + 1525, 1651, 1652, 1653, 1654, 1655, 1656, 1657, + 1658, 1659, 1660, 1461, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1589, 1589, 1589, 1462, 1462, + 1589, 1461, 1589, 1461, 1589, 1461, 1462, 1589, + 1461, 1589, 1462, 1461, 1462, 1589, 1589, 1589, + 1462, 1589, 1462, 1461, 1589, 1461, 1462, 1589, + 1462, 1589, 1462, 1589, 1461, 1589, 1589, 1462, + 1589, 1461, 1589, 1589, 1589, 1589, 1461, 1589, + 1462, 1462, 1589, 1589, 1462, 1461, 1589, 1589, + 1462, 1589, 1462, 1461, 1661, 1662, 1648, 1461, + 1589, 1461, 1589, 1589, 1461, 1663, 1664, 1665, + 1666, 1667, 1668, 1669, 1461, 1670, 1671, 1672, + 1673, 1674, 1461, 1589, 1461, 1589, 1461, 1589, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1675, 1676, 1677, 1678, 1679, 1680, 1681, + 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, + 1690, 1691, 1692, 1693, 1694, 1695, 1696, 1697, + 1461, 1589, 1462, 1589, 1461, 1461, 1589, 1462, + 1461, 1462, 1462, 1461, 1589, 1462, 1589, 1589, + 1461, 1589, 1461, 1462, 1589, 1462, 1589, 1462, + 1461, 1461, 1589, 1461, 1462, 1589, 1589, 1462, + 1589, 1462, 1589, 1461, 1589, 1462, 1589, 1461, + 1589, 1589, 1462, 1589, 1462, 1461, 1589, 1589, + 1462, 1462, 1462, 1462, 1589, 1589, 1461, 1462, + 1589, 1461, 1462, 1462, 1589, 1461, 1589, 1462, + 1589, 1462, 1589, 1462, 1589, 1461, 1462, 1461, + 1589, 1589, 1462, 1462, 1589, 1462, 1589, 1461, + 1461, 1461, 1589, 1589, 1462, 1589, 1462, 1589, + 1461, 1461, 1589, 1462, 1462, 1589, 1462, 1589, + 1461, 1462, 1589, 1462, 1589, 1461, 1462, 1462, + 1589, 1589, 1461, 1462, 1462, 1462, 1589, 1589, + 1461, 1698, 1699, 1574, 1700, 1461, 1589, 1461, + 1589, 1461, 1589, 1461, 1701, 1461, 1589, 1461, + 1702, 1703, 1704, 1705, 1706, 1707, 1461, 1462, + 1462, 1589, 1589, 1589, 1461, 1461, 1461, 1461, + 1589, 1589, 1461, 1589, 1589, 1461, 1461, 1461, + 1589, 1589, 1589, 1589, 1461, 1708, 1709, 1710, + 1461, 1589, 1589, 1589, 1589, 1589, 1461, 1589, + 1461, 1589, 1461, 1711, 1461, 1462, 1461, 1712, + 1461, 1713, 1714, 1715, 1717, 1716, 1461, 1589, + 1461, 1461, 1589, 1589, 1462, 1461, 1462, 1461, + 1718, 1461, 1719, 1720, 1721, 1723, 1722, 1461, + 1462, 1461, 1461, 1462, 1462, 1538, 1539, 1540, + 1541, 1542, 1543, 1461, 1538, 1539, 1540, 1541, + 1542, 1543, 1724, 1461, 1725, 1461, 1462, 1461, + 1162, 3, 1, 3, 1162, 3, 1162, 3, + 1, 1162, 1162, 3, 1162, 3, 1162, 3, + 1162, 3, 1162, 3, 1, 3, 3, 1162, + 1162, 3, 1, 1162, 1162, 3, 1, 1162, + 3, 1162, 3, 1, 3, 1162, 3, 1162, + 3, 1, 1162, 3, 1162, 3, 1, 1162, + 3, 1, 1162, 1162, 3, 3, 1162, 3, + 1162, 3, 1162, 1, 1440, 1, 1726, 1440, + 1, 1727, 1435, 1437, 1728, 1437, 601, 1436, + 1, 265, 3, 1, 3, 265, 1, 1, + 1730, 1729, 1733, 1734, 1735, 1736, 1737, 1738, + 1739, 1741, 1742, 1743, 1744, 1745, 1746, 1748, + 1729, 1, 1732, 1740, 1747, 1, 1731, 262, + 264, 1750, 1751, 1752, 1753, 1754, 1755, 1756, + 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, + 1765, 1766, 1767, 1749, 262, 264, 1750, 1751, + 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, + 1760, 1761, 1768, 1763, 1764, 1765, 1769, 1767, + 1749, 256, 258, 1770, 1771, 1772, 1773, 1774, + 1775, 1776, 1777, 1778, 1779, 1780, 1781, 1782, + 1783, 1784, 1785, 1786, 1787, 1749, 1789, 1790, + 1791, 1792, 1793, 1794, 1795, 1796, 1797, 1798, + 1799, 1800, 1801, 1803, 268, 530, 576, 1802, + 1788, 527, 529, 1804, 1805, 1806, 1807, 1808, + 1809, 1810, 1811, 1812, 1813, 1814, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1815, 1822, 1817, 1818, 1819, + 1823, 1821, 1788, 521, 523, 1824, 1825, 1826, + 1827, 1828, 1829, 1830, 1831, 1832, 1833, 1834, + 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1842, 1816, 1817, + 1843, 1844, 1845, 1846, 1819, 1820, 1821, 1788, + 527, 529, 1804, 1805, 1806, 1807, 1808, 1809, + 1810, 1811, 1812, 1813, 1814, 1847, 1816, 1817, + 1818, 1848, 1819, 1820, 1821, 1788, 527, 529, + 1804, 1805, 1806, 1807, 1808, 1809, 1810, 1811, + 1812, 1813, 1814, 1849, 1816, 1817, 1818, 1850, + 1819, 1820, 1821, 1788, 527, 529, 1804, 1805, + 1806, 1807, 1808, 1809, 1810, 1811, 1812, 1813, + 1814, 1851, 1816, 1817, 1818, 1852, 1819, 1820, + 1821, 1788, 527, 529, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1815, + 1816, 1817, 1818, 1819, 1853, 1821, 1788, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, + 1870, 1871, 1872, 1873, 1874, 1875, 1854, 871, + 873, 1855, 1856, 1857, 1858, 1859, 1860, 1861, + 1862, 1863, 1864, 1865, 1876, 1867, 1868, 1877, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1876, 1878, 1868, 1877, 1873, 1879, 1875, + 1854, 865, 867, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, + 1893, 1894, 1895, 1896, 1897, 1854, 871, 873, + 1855, 1856, 1857, 1858, 1859, 1860, 1861, 1862, + 1863, 1864, 1865, 1898, 1867, 1868, 1877, 1899, + 1873, 1874, 1875, 1854, 871, 873, 1855, 1856, + 1857, 1858, 1859, 1860, 1861, 1862, 1863, 1864, + 1865, 1900, 1867, 1868, 1877, 1901, 1873, 1874, + 1875, 1854, 871, 873, 1855, 1856, 1857, 1858, + 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1902, + 1867, 1868, 1877, 1903, 1873, 1874, 1875, 1854, + 1025, 1027, 1905, 1906, 1907, 1908, 1909, 1910, + 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, + 1919, 1920, 1921, 1922, 1904, 1025, 1027, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1923, 1918, 1919, 1920, 1924, + 1922, 1904, 1159, 1161, 1925, 1926, 1927, 1928, + 1929, 1930, 1931, 1932, 1933, 1934, 1935, 1936, + 1937, 1938, 1939, 1940, 1941, 1942, 1904, 1422, + 1424, 1944, 1945, 1946, 1947, 1948, 1949, 1950, + 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, + 1959, 1960, 1961, 1943, 1323, 1325, 1962, 1963, + 1964, 1965, 1966, 1967, 1968, 1969, 1970, 1971, + 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, + 1943, 1323, 1325, 1962, 1963, 1964, 1965, 1966, + 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1980, + 1975, 1976, 1977, 1981, 1979, 1943, 1721, 1723, + 1983, 1984, 1985, 1986, 1987, 1988, 1989, 1990, + 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, + 1999, 2000, 1982, 1721, 1723, 1983, 1984, 1985, + 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, + 1994, 1995, 1996, 1997, 1998, 2001, 2000, 1982, + 1721, 1723, 1983, 1984, 1985, 1986, 1987, 1988, + 1989, 1990, 1991, 1992, 1993, 1994, 2002, 1996, + 1997, 1998, 2003, 2000, 1982, 1715, 1717, 2004, + 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, + 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, + 2021, 1982, +} + +var _graphclust_trans_targs []int16 = []int16{ + 1974, 0, 1974, 1975, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, + 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 66, 68, 70, + 71, 72, 1976, 69, 74, 75, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 93, 94, 96, + 102, 125, 130, 132, 139, 143, 97, 98, + 99, 100, 101, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, + 116, 117, 118, 119, 120, 121, 122, 123, + 124, 126, 127, 128, 129, 131, 133, 134, + 135, 136, 137, 138, 140, 141, 142, 144, + 291, 292, 1977, 158, 159, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 188, 189, 190, 191, 192, 193, 194, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 206, 207, 208, 210, 211, 212, + 213, 214, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 234, 235, 237, 243, 267, 271, + 273, 280, 284, 238, 239, 240, 241, 242, + 244, 245, 246, 247, 248, 249, 250, 251, + 252, 253, 254, 255, 256, 257, 258, 259, + 260, 261, 262, 263, 264, 265, 266, 268, + 269, 270, 272, 274, 275, 276, 277, 278, + 279, 281, 282, 283, 285, 287, 288, 289, + 145, 290, 146, 294, 295, 296, 2, 297, + 3, 1974, 1978, 1974, 1979, 315, 316, 317, + 318, 319, 320, 321, 322, 323, 324, 325, + 326, 327, 328, 329, 330, 331, 332, 333, + 334, 335, 336, 337, 338, 339, 340, 341, + 342, 344, 345, 346, 347, 348, 349, 350, + 351, 352, 353, 354, 355, 356, 357, 358, + 359, 360, 361, 362, 363, 364, 366, 368, + 370, 371, 372, 1980, 369, 374, 375, 377, + 378, 379, 380, 381, 382, 383, 384, 385, + 386, 387, 388, 389, 390, 391, 393, 394, + 396, 402, 425, 430, 432, 439, 443, 397, + 398, 399, 400, 401, 403, 404, 405, 406, + 407, 408, 409, 410, 411, 412, 413, 414, + 415, 416, 417, 418, 419, 420, 421, 422, + 423, 424, 426, 427, 428, 429, 431, 433, + 434, 435, 436, 437, 438, 440, 441, 442, + 444, 591, 592, 1981, 458, 459, 460, 461, + 462, 463, 464, 465, 466, 467, 468, 469, + 470, 471, 472, 473, 474, 475, 476, 477, + 478, 479, 480, 481, 482, 483, 484, 485, + 486, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 504, 505, 506, 507, 508, 510, 511, + 512, 513, 514, 516, 517, 519, 520, 521, + 522, 523, 524, 525, 526, 527, 528, 529, + 530, 531, 532, 534, 535, 537, 543, 567, + 571, 573, 580, 584, 538, 539, 540, 541, + 542, 544, 545, 546, 547, 548, 549, 550, + 551, 552, 553, 554, 555, 556, 557, 558, + 559, 560, 561, 562, 563, 564, 565, 566, + 568, 569, 570, 572, 574, 575, 576, 577, + 578, 579, 581, 582, 583, 585, 587, 588, + 589, 445, 590, 446, 594, 595, 596, 302, + 597, 303, 599, 605, 606, 608, 610, 613, + 616, 640, 1982, 622, 1983, 612, 1984, 615, + 618, 620, 621, 624, 625, 629, 630, 631, + 632, 633, 634, 635, 1985, 628, 639, 642, + 643, 644, 645, 646, 649, 650, 651, 652, + 653, 654, 655, 656, 660, 661, 663, 664, + 647, 666, 669, 671, 673, 667, 668, 670, + 672, 674, 678, 679, 680, 681, 682, 683, + 684, 685, 686, 687, 1986, 676, 677, 690, + 691, 299, 695, 696, 698, 997, 1000, 1003, + 1027, 1974, 1987, 1974, 1988, 712, 713, 714, + 715, 716, 717, 718, 719, 720, 721, 722, + 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 733, 734, 735, 736, 737, 738, + 739, 741, 742, 743, 744, 745, 746, 747, + 748, 749, 750, 751, 752, 753, 754, 755, + 756, 757, 758, 759, 760, 761, 763, 765, + 767, 768, 769, 1989, 766, 771, 772, 774, + 775, 776, 777, 778, 779, 780, 781, 782, + 783, 784, 785, 786, 787, 788, 790, 791, + 793, 799, 822, 827, 829, 836, 840, 794, + 795, 796, 797, 798, 800, 801, 802, 803, + 804, 805, 806, 807, 808, 809, 810, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 823, 824, 825, 826, 828, 830, + 831, 832, 833, 834, 835, 837, 838, 839, + 841, 988, 989, 1990, 855, 856, 857, 858, + 859, 860, 861, 862, 863, 864, 865, 866, + 867, 868, 869, 870, 871, 872, 873, 874, + 875, 876, 877, 878, 879, 880, 881, 882, + 883, 885, 886, 887, 888, 889, 890, 891, + 892, 893, 894, 895, 896, 897, 898, 899, + 900, 901, 902, 903, 904, 905, 907, 908, + 909, 910, 911, 913, 914, 916, 917, 918, + 919, 920, 921, 922, 923, 924, 925, 926, + 927, 928, 929, 931, 932, 934, 940, 964, + 968, 970, 977, 981, 935, 936, 937, 938, + 939, 941, 942, 943, 944, 945, 946, 947, + 948, 949, 950, 951, 952, 953, 954, 955, + 956, 957, 958, 959, 960, 961, 962, 963, + 965, 966, 967, 969, 971, 972, 973, 974, + 975, 976, 978, 979, 980, 982, 984, 985, + 986, 842, 987, 843, 991, 992, 993, 699, + 994, 700, 1009, 1991, 999, 1992, 1002, 1005, + 1007, 1008, 1011, 1012, 1016, 1017, 1018, 1019, + 1020, 1021, 1022, 1993, 1015, 1026, 1029, 1327, + 1328, 1626, 1627, 1994, 1974, 1995, 1043, 1044, + 1045, 1046, 1047, 1048, 1049, 1050, 1051, 1052, + 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, + 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, + 1069, 1070, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1094, + 1095, 1096, 1097, 1098, 1100, 1101, 1103, 1104, + 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1119, 1120, 1122, + 1128, 1151, 1156, 1158, 1165, 1123, 1124, 1125, + 1126, 1127, 1129, 1130, 1131, 1132, 1133, 1134, + 1135, 1136, 1137, 1138, 1139, 1140, 1141, 1142, + 1143, 1144, 1145, 1146, 1147, 1148, 1149, 1150, + 1152, 1153, 1154, 1155, 1157, 1159, 1160, 1161, + 1162, 1163, 1164, 1166, 1167, 1168, 1170, 1171, + 1172, 1030, 1173, 1031, 1175, 1177, 1178, 1325, + 1326, 1996, 1192, 1193, 1194, 1195, 1196, 1197, + 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, + 1206, 1207, 1208, 1209, 1210, 1211, 1212, 1213, + 1214, 1215, 1216, 1217, 1218, 1219, 1220, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1238, + 1239, 1240, 1241, 1242, 1244, 1245, 1246, 1247, + 1248, 1250, 1251, 1253, 1254, 1255, 1256, 1257, + 1258, 1259, 1260, 1261, 1262, 1263, 1264, 1265, + 1266, 1268, 1269, 1271, 1277, 1301, 1305, 1307, + 1314, 1318, 1272, 1273, 1274, 1275, 1276, 1278, + 1279, 1280, 1281, 1282, 1283, 1284, 1285, 1286, + 1287, 1288, 1289, 1290, 1291, 1292, 1293, 1294, + 1295, 1296, 1297, 1298, 1299, 1300, 1302, 1303, + 1304, 1306, 1308, 1309, 1310, 1311, 1312, 1313, + 1315, 1316, 1317, 1319, 1321, 1322, 1323, 1179, + 1324, 1180, 1997, 1974, 1342, 1343, 1344, 1345, + 1497, 1498, 1499, 1500, 1501, 1502, 1503, 1504, + 1505, 1506, 1507, 1508, 1509, 1510, 1511, 1512, + 1377, 1513, 1514, 1515, 1516, 1517, 1518, 1519, + 1520, 1521, 1998, 1359, 1360, 1361, 1362, 1363, + 1364, 1365, 1366, 1367, 1368, 1369, 1370, 1371, + 1372, 1373, 1374, 1375, 1376, 1378, 1379, 1380, + 1381, 1382, 1383, 1384, 1385, 1386, 1388, 1389, + 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, + 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1405, + 1406, 1407, 1408, 1410, 1412, 1414, 1415, 1416, + 1999, 1413, 1418, 1419, 1421, 1422, 1423, 1424, + 1425, 1426, 1427, 1428, 1429, 1430, 1431, 1432, + 1433, 1434, 1435, 1437, 1438, 1440, 1446, 1469, + 1474, 1476, 1483, 1487, 1441, 1442, 1443, 1444, + 1445, 1447, 1448, 1449, 1450, 1451, 1452, 1453, + 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, + 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1470, + 1471, 1472, 1473, 1475, 1477, 1478, 1479, 1480, + 1481, 1482, 1484, 1485, 1486, 1488, 1489, 1490, + 1492, 1493, 1494, 1346, 1495, 1347, 1523, 1524, + 1525, 1526, 1527, 1528, 1529, 1530, 1531, 1532, + 1533, 1534, 1535, 1536, 1537, 1538, 1539, 1540, + 1541, 1542, 1543, 1545, 1546, 1547, 1548, 1549, + 1551, 1552, 1554, 1555, 1556, 1557, 1558, 1559, + 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, + 1569, 1570, 1572, 1578, 1602, 1606, 1608, 1615, + 1619, 1573, 1574, 1575, 1576, 1577, 1579, 1580, + 1581, 1582, 1583, 1584, 1585, 1586, 1587, 1588, + 1589, 1590, 1591, 1592, 1593, 1594, 1595, 1596, + 1597, 1598, 1599, 1600, 1601, 1603, 1604, 1605, + 1607, 1609, 1610, 1611, 1612, 1613, 1614, 1616, + 1617, 1618, 1620, 1622, 1623, 1624, 1329, 1625, + 1330, 1630, 1631, 1632, 1633, 1634, 1635, 1636, + 1637, 1641, 1642, 1643, 1644, 1645, 1647, 1648, + 1628, 1650, 1653, 1655, 1657, 1651, 1652, 1654, + 1656, 1658, 1959, 1960, 1961, 1962, 1963, 1964, + 1965, 1966, 1967, 1968, 2000, 1974, 2001, 1672, + 1673, 1674, 1675, 1676, 1677, 1678, 1679, 1680, + 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, + 1689, 1690, 1691, 1692, 1693, 1694, 1695, 1696, + 1697, 1698, 1699, 1701, 1702, 1703, 1704, 1705, + 1706, 1707, 1708, 1709, 1710, 1711, 1712, 1713, + 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, + 1723, 1725, 1727, 1728, 1729, 2002, 1726, 1731, + 1732, 1734, 1735, 1736, 1737, 1738, 1739, 1740, + 1741, 1742, 1743, 1744, 1745, 1746, 1747, 1748, + 1750, 1751, 1753, 1759, 1782, 1787, 1789, 1796, + 1800, 1754, 1755, 1756, 1757, 1758, 1760, 1761, + 1762, 1763, 1764, 1765, 1766, 1767, 1768, 1769, + 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, + 1778, 1779, 1780, 1781, 1783, 1784, 1785, 1786, + 1788, 1790, 1791, 1792, 1793, 1794, 1795, 1797, + 1798, 1799, 1801, 1948, 1949, 2003, 1815, 1816, + 1817, 1818, 1819, 1820, 1821, 1822, 1823, 1824, + 1825, 1826, 1827, 1828, 1829, 1830, 1831, 1832, + 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, + 1841, 1842, 1843, 1845, 1846, 1847, 1848, 1849, + 1850, 1851, 1852, 1853, 1854, 1855, 1856, 1857, + 1858, 1859, 1860, 1861, 1862, 1863, 1864, 1865, + 1867, 1868, 1869, 1870, 1871, 1873, 1874, 1876, + 1877, 1878, 1879, 1880, 1881, 1882, 1883, 1884, + 1885, 1886, 1887, 1888, 1889, 1891, 1892, 1894, + 1900, 1924, 1928, 1930, 1937, 1941, 1895, 1896, + 1897, 1898, 1899, 1901, 1902, 1903, 1904, 1905, + 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, + 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1921, + 1922, 1923, 1925, 1926, 1927, 1929, 1931, 1932, + 1933, 1934, 1935, 1936, 1938, 1939, 1940, 1942, + 1944, 1945, 1946, 1802, 1947, 1803, 1951, 1952, + 1953, 1659, 1954, 1660, 1957, 1958, 1971, 1972, + 1973, 1974, 1, 1975, 299, 300, 301, 692, + 693, 694, 697, 1028, 1628, 1629, 1638, 1639, + 1640, 1646, 1649, 1969, 1970, 1974, 4, 5, + 6, 7, 8, 9, 10, 11, 12, 13, + 14, 43, 65, 73, 76, 92, 298, 293, + 67, 95, 147, 148, 149, 150, 151, 152, + 153, 154, 155, 156, 157, 187, 209, 215, + 218, 233, 236, 286, 1974, 600, 601, 602, + 603, 604, 607, 641, 648, 657, 658, 659, + 662, 665, 688, 689, 304, 305, 306, 307, + 308, 309, 310, 311, 312, 313, 314, 343, + 365, 373, 376, 392, 598, 593, 367, 395, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 487, 509, 515, 518, 533, + 536, 586, 609, 623, 636, 637, 638, 611, + 619, 614, 617, 626, 627, 675, 1974, 701, + 702, 703, 704, 705, 706, 707, 708, 709, + 710, 711, 996, 762, 770, 1010, 1023, 1024, + 1025, 789, 995, 990, 740, 773, 764, 792, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 884, 906, 912, 915, 930, + 933, 983, 998, 1006, 1001, 1004, 1013, 1014, + 1974, 1032, 1033, 1034, 1035, 1036, 1037, 1038, + 1039, 1040, 1041, 1042, 1071, 1174, 1099, 1102, + 1118, 1176, 1169, 1093, 1121, 1181, 1182, 1183, + 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, + 1221, 1243, 1249, 1252, 1267, 1270, 1320, 1974, + 1331, 1332, 1333, 1334, 1335, 1336, 1337, 1338, + 1339, 1340, 1341, 1522, 1544, 1550, 1553, 1568, + 1571, 1621, 1348, 1349, 1350, 1351, 1352, 1353, + 1354, 1355, 1356, 1357, 1358, 1387, 1409, 1417, + 1420, 1436, 1496, 1491, 1411, 1439, 1974, 1661, + 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, + 1670, 1671, 1700, 1722, 1730, 1733, 1749, 1956, + 1950, 1955, 1724, 1752, 1804, 1805, 1806, 1807, + 1808, 1809, 1810, 1811, 1812, 1813, 1814, 1844, + 1866, 1872, 1875, 1890, 1893, 1943, +} + +var _graphclust_trans_actions []byte = []byte{ + 31, 0, 27, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 34, 40, 25, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 40, 0, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 29, 51, 17, 40, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 51, 0, 51, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 40, 0, 0, 0, 0, + 0, 0, 0, 40, 21, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 40, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 19, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 40, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 40, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 40, 23, 40, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 40, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 43, 1, 47, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 15, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 13, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 9, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _graphclust_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 37, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 3, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, +} + +var _graphclust_eof_trans []int16 = []int16{ + 0, 0, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 0, 0, 0, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 268, 268, 268, 268, + 268, 268, 268, 268, 0, 0, 0, 0, + 0, 0, 610, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 610, 612, 612, + 610, 612, 612, 610, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 612, 612, + 612, 612, 612, 612, 612, 612, 610, 612, + 612, 612, 612, 0, 0, 0, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 901, + 901, 901, 901, 901, 901, 901, 901, 0, + 0, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 1164, 1164, 1164, 1164, 1164, 1164, + 1164, 1164, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 1462, + 1462, 1462, 1462, 1462, 1462, 1462, 1462, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 1750, + 1750, 1750, 1789, 1789, 1789, 1789, 1789, 1789, + 1789, 1789, 1789, 1855, 1855, 1855, 1855, 1855, + 1855, 1855, 1905, 1905, 1905, 1944, 1944, 1944, + 1983, 1983, 1983, 1983, +} + +const graphclust_start int = 1974 +const graphclust_first_final int = 1974 +const graphclust_error int = 0 + +const graphclust_en_main int = 1974 + + +// line 14 "grapheme_clusters.rl" + + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + +// line 4976 "grapheme_clusters.go" + { + cs = graphclust_start + ts = 0 + te = 0 + act = 0 + } + +// line 4984 "grapheme_clusters.go" + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } +_resume: + _acts = int(_graphclust_from_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts - 1] { + case 4: +// line 1 "NONE" + +ts = p + +// line 5008 "grapheme_clusters.go" + } + } + + _keys = int(_graphclust_key_offsets[cs]) + _trans = int(_graphclust_index_offsets[cs]) + + _klen = int(_graphclust_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _graphclust_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_graphclust_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _graphclust_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _graphclust_trans_keys[_mid + 1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + +_match: + _trans = int(_graphclust_indicies[_trans]) +_eof_trans: + cs = int(_graphclust_trans_targs[_trans]) + + if _graphclust_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_graphclust_trans_actions[_trans]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 0: +// line 46 "grapheme_clusters.rl" + + + startPos = p + + case 1: +// line 50 "grapheme_clusters.rl" + + + endPos = p + + case 5: +// line 1 "NONE" + +te = p+1 + + case 6: +// line 54 "grapheme_clusters.rl" + +act = 3; + case 7: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 8: +// line 54 "grapheme_clusters.rl" + +te = p+1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 9: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 10: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 11: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 12: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 13: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 14: +// line 54 "grapheme_clusters.rl" + +te = p +p-- +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 15: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 16: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 17: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 18: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 19: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 20: +// line 54 "grapheme_clusters.rl" + +p = (te) - 1 +{ + return endPos+1, data[startPos:endPos+1], nil + } + case 21: +// line 1 "NONE" + + switch act { + case 0: + {cs = 0 +goto _again +} + case 3: + {p = (te) - 1 + + return endPos+1, data[startPos:endPos+1], nil + } + } + +// line 5218 "grapheme_clusters.go" + } + } + +_again: + _acts = int(_graphclust_to_state_actions[cs]) + _nacts = uint(_graphclust_actions[_acts]); _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _graphclust_actions[_acts-1] { + case 2: +// line 1 "NONE" + +ts = 0 + + case 3: +// line 1 "NONE" + +act = 0 + +// line 5238 "grapheme_clusters.go" + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: {} + if p == eof { + if _graphclust_eof_trans[cs] > 0 { + _trans = int(_graphclust_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: {} + } + +// line 116 "grapheme_clusters.rl" + + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl new file mode 100644 index 00000000..003ffbf5 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters.rl @@ -0,0 +1,132 @@ +package textseg + +import ( + "errors" + "unicode/utf8" +) + +// Generated from grapheme_clusters.rl. DO NOT EDIT +%%{ + # (except you are actually in grapheme_clusters.rl here, so edit away!) + + machine graphclust; + write data; +}%% + +var Error = errors.New("invalid UTF8 text") + +// ScanGraphemeClusters is a split function for bufio.Scanner that splits +// on grapheme cluster boundaries. +func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) { + if len(data) == 0 { + return 0, nil, nil + } + + // Ragel state + cs := 0 // Current State + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + startPos := 0 + endPos := 0 + + %%{ + include GraphemeCluster "grapheme_clusters_table.rl"; + + action start { + startPos = p + } + + action end { + endPos = p + } + + action emit { + return endPos+1, data[startPos:endPos+1], nil + } + + ZWJGlue = ZWJ (Glue_After_Zwj | E_Base_GAZ Extend* E_Modifier?)?; + AnyExtender = Extend | ZWJGlue | SpacingMark; + Extension = AnyExtender*; + ReplacementChar = (0xEF 0xBF 0xBD); + + CRLFSeq = CR LF; + ControlSeq = Control | ReplacementChar; + HangulSeq = ( + L+ (((LV? V+ | LVT) T*)?|LV?) | + LV V* T* | + V+ T* | + LVT T* | + T+ + ) Extension; + EmojiSeq = (E_Base | E_Base_GAZ) Extend* E_Modifier? Extension; + ZWJSeq = ZWJGlue Extension; + EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + + # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension + OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|E_Base|E_Base_GAZ|ZWJ|Regional_Indicator|Prepend)) Extension; + + # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break + PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?; + + CRLFTok = CRLFSeq >start @end; + ControlTok = ControlSeq >start @end; + HangulTok = HangulSeq >start @end; + EmojiTok = EmojiSeq >start @end; + ZWJTok = ZWJSeq >start @end; + EmojiFlagTok = EmojiFlagSeq >start @end; + OtherTok = OtherSeq >start @end; + PrependTok = PrependSeq >start @end; + + main := |* + CRLFTok => emit; + ControlTok => emit; + HangulTok => emit; + EmojiTok => emit; + ZWJTok => emit; + EmojiFlagTok => emit; + PrependTok => emit; + OtherTok => emit; + + # any single valid UTF-8 character would also be valid per spec, + # but we'll handle that separately after the loop so we can deal + # with requesting more bytes if we're not at EOF. + *|; + + write init; + write exec; + }%% + + // If we fall out here then we were unable to complete a sequence. + // If we weren't able to complete a sequence then either we've + // reached the end of a partial buffer (so there's more data to come) + // or we have an isolated symbol that would normally be part of a + // grapheme cluster but has appeared in isolation here. + + if !atEOF { + // Request more + return 0, nil, nil + } + + // Just take the first UTF-8 sequence and return that. + _, seqLen := utf8.DecodeRune(data) + return seqLen, data[:seqLen], nil +} diff --git a/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl new file mode 100644 index 00000000..fb451182 --- /dev/null +++ b/vendor/github.com/apparentlymart/go-textseg/textseg/grapheme_clusters_table.rl @@ -0,0 +1,1583 @@ +# The following Ragel file was autogenerated with unicode2ragel.rb +# from: http://www.unicode.org/Public/9.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +# +# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "E_Base", "E_Modifier", "ZWJ", "Glue_After_Zwj", "E_Base_GAZ"]. +# +# To use this, make sure that your alphtype is set to byte, +# and that your input is in utf8. + +%%{ + machine GraphemeCluster; + + Prepend = + 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ... + | 0xDB 0x9D #Cf ARABIC END OF AYAH + | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK + | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH + | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH + | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN + | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA... + ; + + CR = + 0x0D #Cc + ; + + LF = + 0x0A #Cc + ; + + Control = + 0x00..0x09 #Cc [10] .. + | 0x0B..0x0C #Cc [2] .. + | 0x0E..0x1F #Cc [18] .. + | 0x7F #Cc [33] .. + | 0xC2 0x80..0x9F # + | 0xC2 0xAD #Cf SOFT HYPHEN + | 0xD8 0x9C #Cf ARABIC LETTER MARK + | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR + | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE + | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ... + | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR + | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR + | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-... + | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS + | 0xE2 0x81 0xA5 #Cn + | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG... + | 0xED 0xA0 0x80..0xFF #Cs [2048] .... + | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT... + | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP... + | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI... + | 0xF3 0xA0 0x80 0x80 #Cn + | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG + | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] .. + | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] .. + | 0xF3 0xA0 0x83 0x00..0xBF # + | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] .. +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts < b.size { + buf = buf[int64(n)-b.size:] + } + + // Copy in place + remain := b.size - b.writeCursor + copy(b.data[b.writeCursor:], buf) + if int64(len(buf)) > remain { + copy(b.data, buf[remain:]) + } + + // Update location of the cursor + b.writeCursor = ((b.writeCursor + int64(len(buf))) % b.size) + return n, nil +} + +// Size returns the size of the buffer +func (b *Buffer) Size() int64 { + return b.size +} + +// TotalWritten provides the total number of bytes written +func (b *Buffer) TotalWritten() int64 { + return b.written +} + +// Bytes provides a slice of the bytes written. This +// slice should not be written to. +func (b *Buffer) Bytes() []byte { + switch { + case b.written >= b.size && b.writeCursor == 0: + return b.data + case b.written > b.size: + out := make([]byte, b.size) + copy(out, b.data[b.writeCursor:]) + copy(out[b.size-b.writeCursor:], b.data[:b.writeCursor]) + return out + default: + return b.data[:b.writeCursor] + } +} + +// Reset resets the buffer so it has no content. +func (b *Buffer) Reset() { + b.writeCursor = 0 + b.written = 0 +} + +// String returns the contents of the buffer as a string +func (b *Buffer) String() string { + return string(b.Bytes()) +} diff --git a/vendor/github.com/armon/circbuf/go.mod b/vendor/github.com/armon/circbuf/go.mod new file mode 100644 index 00000000..23362b19 --- /dev/null +++ b/vendor/github.com/armon/circbuf/go.mod @@ -0,0 +1 @@ +module github.com/armon/circbuf diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 00000000..106569e5 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md new file mode 100644 index 00000000..aa73348c --- /dev/null +++ b/vendor/github.com/armon/go-metrics/README.md @@ -0,0 +1,91 @@ +go-metrics +========== + +This library provides a `metrics` package which can be used to instrument code, +expose application metrics, and profile runtime performance in a flexible manner. + +Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics) + +Sinks +----- + +The `metrics` package makes use of a `MetricSink` interface to support delivery +to any type of backend. Currently the following sinks are provided: + +* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP) +* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP) +* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes) +* InmemSink : Provides in-memory aggregation, can be used to export stats +* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. +* BlackholeSink : Sinks to nowhere + +In addition to the sinks, the `InmemSignal` can be used to catch a signal, +and dump a formatted output of recent metrics. For example, when a process gets +a SIGUSR1, it can dump to stderr recent performance metrics for debugging. + +Labels +------ + +Most metrics do have an equivalent ending with `WithLabels`, such methods +allow to push metrics with labels and use some features of underlying Sinks +(ex: translated into Prometheus labels). + +Since some of these labels may increase greatly cardinality of metrics, the +library allow to filter labels using a blacklist/whitelist filtering system +which is global to all metrics. + +* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default. +* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks. + +By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that +no tags are filetered at all, but it allow to a user to globally block some tags with high +cardinality at application level. + +Examples +-------- + +Here is an example of using the package: + +```go +func SlowMethod() { + // Profiling the runtime of a method + defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) +} + +// Configure a statsite sink as the global metrics sink +sink, _ := metrics.NewStatsiteSink("statsite:8125") +metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) + +// Emit a Key/Value pair +metrics.EmitKey([]string{"questions", "meaning of life"}, 42) +``` + +Here is an example of setting up a signal handler: + +```go +// Setup the inmem sink and signal handler +inm := metrics.NewInmemSink(10*time.Second, time.Minute) +sig := metrics.DefaultInmemSignal(inm) +metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) + +// Run some code +inm.SetGauge([]string{"foo"}, 42) +inm.EmitKey([]string{"bar"}, 30) + +inm.IncrCounter([]string{"baz"}, 42) +inm.IncrCounter([]string{"baz"}, 1) +inm.IncrCounter([]string{"baz"}, 80) + +inm.AddSample([]string{"method", "wow"}, 42) +inm.AddSample([]string{"method", "wow"}, 100) +inm.AddSample([]string{"method", "wow"}, 22) + +.... +``` + +When a signal comes in, output like the following will be dumped to stderr: + + [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 + [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 + [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 + [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 \ No newline at end of file diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 00000000..31098dd5 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 00000000..38136af3 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/go.mod b/vendor/github.com/armon/go-metrics/go.mod new file mode 100644 index 00000000..88e1e98f --- /dev/null +++ b/vendor/github.com/armon/go-metrics/go.mod @@ -0,0 +1,16 @@ +module github.com/armon/go-metrics + +go 1.12 + +require ( + github.com/DataDog/datadog-go v2.2.0+incompatible + github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible + github.com/circonus-labs/circonusllhist v0.1.3 // indirect + github.com/hashicorp/go-immutable-radix v1.0.0 + github.com/hashicorp/go-retryablehttp v0.5.3 // indirect + github.com/pascaldekloe/goe v0.1.0 + github.com/pkg/errors v0.8.1 // indirect + github.com/prometheus/client_golang v0.9.2 + github.com/stretchr/testify v1.3.0 // indirect + github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect +) diff --git a/vendor/github.com/armon/go-metrics/go.sum b/vendor/github.com/armon/go-metrics/go.sum new file mode 100644 index 00000000..5ffd8329 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/go.sum @@ -0,0 +1,46 @@ +github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 00000000..93b0e0ad --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,348 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v.deepCopy() + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v.deepCopy() + } + current.RUnlock() + + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + key := buf.String() + + for _, label := range labels { + replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 00000000..5fac958d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,131 @@ +package metrics + +import ( + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// deepCopy allocates a new instance of AggregateSample +func (source *SampledValue) deepCopy() SampledValue { + dest := *source + if source.AggregateSample != nil { + dest.AggregateSample = &AggregateSample{} + *dest.AggregateSample = *source.AggregateSample + } + return dest +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = data[0] + default: + // Show the most recent finished interval if we have one + interval = data[n-2] + } + + interval.RLock() + defer interval.RUnlock() + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary, nil +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 00000000..0937f4ae --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 00000000..4920d683 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,278 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.SetGaugeWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + allowed, _ := m.allowMetric(key, nil) + if !allowed { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.IncrCounterWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + m.sink.AddSampleWithLabels(key, val, labelsFiltered) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + allowed, labelsFiltered := m.allowMetric(key, labels) + if !allowed { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labelsFiltered) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels) +} + +// UpdateFilterAndLabels overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + if allowedLabels == nil { + // Having a white list means we take only elements from it + m.allowedLabels = nil + } else { + m.allowedLabels = make(map[string]bool) + for _, v := range allowedLabels { + m.allowedLabels[v] = true + } + } + m.blockedLabels = make(map[string]bool) + for _, v := range blockedLabels { + m.blockedLabels[v] = true + } + m.AllowedLabels = allowedLabels + m.BlockedLabels = blockedLabels + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +// labelIsAllowed return true if a should be included in metric +// the caller should lock m.filterLock while calling this method +func (m *Metrics) labelIsAllowed(label *Label) bool { + labelName := (*label).Name + if m.blockedLabels != nil { + _, ok := m.blockedLabels[labelName] + if ok { + // If present, let's remove this label + return false + } + } + if m.allowedLabels != nil { + _, ok := m.allowedLabels[labelName] + return ok + } + // Allow by default + return true +} + +// filterLabels return only allowed labels +// the caller should lock m.filterLock while calling this method +func (m *Metrics) filterLabels(labels []Label) []Label { + if labels == nil { + return nil + } + toReturn := []Label{} + for _, label := range labels { + if m.labelIsAllowed(&label) { + toReturn = append(toReturn, label) + } + } + return toReturn +} + +// Returns whether the metric should be allowed based on configured prefix filters +// Also return the applicable labels +func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault, m.filterLabels(labels) + } + + return allowed.(bool), m.filterLabels(labels) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 00000000..0b7d6e4b --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 00000000..32a28c48 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,141 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + AllowedLabels []string // A list of metric labels to allow, with '.' as the separator + BlockedLabels []string // A list of metric labels to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + allowedLabels map[string]bool + blockedLabels map[string]bool + filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} + +// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels +// and blockedLabels - when not nil - allow filtering of labels in order to +// block/allow globally labels (especially useful when having large number of +// values for a given label). See README.md for more information about usage. +func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) { + globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels) +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 00000000..1bfffce4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 00000000..6c0d284d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 00000000..a5df10e6 --- /dev/null +++ b/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/README.md b/vendor/github.com/armon/go-radix/README.md new file mode 100644 index 00000000..26f42a28 --- /dev/null +++ b/vendor/github.com/armon/go-radix/README.md @@ -0,0 +1,38 @@ +go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) +========= + +Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +For an immutable variant, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := radix.New() +r.Insert("foo", 1) +r.Insert("bar", 2) +r.Insert("foobar", 2) + +// Find the longest prefix match +m, _, _ := r.LongestPrefix("foozip") +if m != "foo" { + panic("should be foo") +} +``` + diff --git a/vendor/github.com/armon/go-radix/go.mod b/vendor/github.com/armon/go-radix/go.mod new file mode 100644 index 00000000..4336aa29 --- /dev/null +++ b/vendor/github.com/armon/go-radix/go.mod @@ -0,0 +1 @@ +module github.com/armon/go-radix diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 00000000..e2bb22eb --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,540 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) updateEdge(label byte, node *node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + n.edges[idx].node = node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.updateEdge(search[0], child) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt index 5f14d116..899129ec 100644 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -1,3 +1,3 @@ AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 00000000..44aa125a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,86 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index 56fdfc2b..99849c0e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -138,8 +138,27 @@ type RequestFailure interface { RequestID() string } -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. +// NewRequestFailure returns a wrapped error with additional information for +// request status code, and service requestID. +// +// Should be used to wrap all request which involve service requests. Even if +// the request failed without a service response, but had an HTTP status code +// that may be meaningful. func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { return newRequestError(err, statusCode, reqID) } + +// UnmarshalError provides the interface for the SDK failing to unmarshal data. +type UnmarshalError interface { + awsError + Bytes() []byte +} + +// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding +// the bytes that fail to unmarshal to the error. +func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError { + return &unmarshalError{ + awsError: New("UnmarshalError", msg, err), + bytes: bytes, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 0202a008..a2c5817c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -1,6 +1,9 @@ package awserr -import "fmt" +import ( + "encoding/hex" + "fmt" +) // SprintError returns a string of the formatted error code. // @@ -119,6 +122,7 @@ type requestError struct { awsError statusCode int requestID string + bytes []byte } // newRequestError returns a wrapped error with additional information for @@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error { return []error{r.OrigErr()} } +type unmarshalError struct { + awsError + bytes []byte +} + +// Error returns the string representation of the error. +// Satisfies the error interface. +func (e unmarshalError) Error() string { + extra := hex.Dump(e.bytes) + return SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +// Alias for Error to satisfy the stringer interface. +func (e unmarshalError) String() string { + return e.Error() +} + +// Bytes returns the bytes that failed to unmarshal. +func (e unmarshalError) Bytes() []byte { + return e.bytes +} + // An error list that satisfies the golang interface type errorList []error diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go index 59fa4a55..142a7a01 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -15,7 +15,7 @@ func DeepEqual(a, b interface{}) bool { rb := reflect.Indirect(reflect.ValueOf(b)) if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal + // If the elements are both nil, and of the same type they are equal // If they are of different types they are not equal return reflect.TypeOf(a) == reflect.TypeOf(b) } else if raValid != rbValid { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go index b6432f1a..645df245 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -23,28 +23,27 @@ func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { case reflect.Struct: buf.WriteString("{\n") - names := []string{} for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { + ft := v.Type().Field(i) + fv := v.Field(i) + + if ft.Name[0:1] == strings.ToLower(ft.Name[0:1]) { continue // ignore unexported fields } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + if (fv.Kind() == reflect.Ptr || fv.Kind() == reflect.Slice) && fv.IsNil() { continue // ignore unset fields } - names = append(names, name) - } - for i, n := range names { - val := v.FieldByName(n) buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) + buf.WriteString(ft.Name + ": ") - if i < len(names)-1 { - buf.WriteString(",\n") + if tag := ft.Tag.Get("sensitive"); tag == "true" { + buf.WriteString("") + } else { + stringValue(fv, indent+2, buf) } + + buf.WriteString(",\n") } buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 17fc76a0..70960538 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -2,7 +2,6 @@ package client import ( "fmt" - "net/http/httputil" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client/metadata" @@ -16,6 +15,12 @@ type Config struct { Endpoint string SigningRegion string SigningName string + + // States that the signing name did not come from a modeled source but + // was derived based on other data. Used by service client constructors + // to determine if the signin name can be overridden based on metadata the + // service has. + SigningNameDerived bool } // ConfigProvider provides a generic way for a service client to receive @@ -46,7 +51,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op svc := &Client{ Config: cfg, ClientInfo: info, - Handlers: handlers, + Handlers: handlers.Copy(), } switch retryer, ok := cfg.Retryer.(request.Retryer); { @@ -86,61 +91,6 @@ func (c *Client) AddDebugHandlers() { return } - c.Handlers.Send.PushFront(logRequest) - c.Handlers.Send.PushBack(logResponse) -} - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -const logReqErrMsg = `DEBUG ERROR: Request %s/%s: ----[ REQUEST DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.ResetBody() - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -const logRespErrMsg = `DEBUG ERROR: Response %s/%s: ----[ RESPONSE DUMP ERROR ]----------------------------- -%s ------------------------------------------------------` - -func logResponse(r *request.Request) { - var msg = "no response data" - if r.HTTPResponse != nil { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody) - if err != nil { - r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) - return - } - - msg = string(dumpedBody) - } else if r.Error != nil { - msg = r.Error.Error() - } - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) + c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler) + c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go index 43a3676b..a397b0d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -1,11 +1,11 @@ package client import ( - "math/rand" - "sync" + "strconv" "time" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkrand" ) // DefaultRetryer implements basic retry logic using exponential backoff for @@ -15,11 +15,11 @@ import ( // the MaxRetries method: // // type retryer struct { -// service.DefaultRetryer +// client.DefaultRetryer // } // // // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } +// func (d retryer) MaxRetries() int { return 100 } type DefaultRetryer struct { NumMaxRetries int } @@ -30,31 +30,39 @@ func (d DefaultRetryer) MaxRetries() int { return d.NumMaxRetries } -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - // RetryRules returns the delay duration before retrying this request again func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { // Set the upper limit of delay in retrying at ~five minutes minTime := 30 throttle := d.shouldThrottle(r) if throttle { + if delay, ok := getRetryDelay(r); ok { + return delay + } + minTime = 500 } retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { + if throttle && retryCount > 8 { retryCount = 8 + } else if retryCount > 13 { + retryCount = 13 } - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + delay := (1 << uint(retryCount)) * (sdkrand.SeededRand.Intn(minTime) + minTime) return time.Duration(delay) * time.Millisecond } // ShouldRetry returns true if the request should be retried. func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - if r.HTTPResponse.StatusCode >= 500 { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + + if r.HTTPResponse.StatusCode >= 500 && r.HTTPResponse.StatusCode != 501 { return true } return r.IsErrorRetryable() || d.shouldThrottle(r) @@ -62,29 +70,47 @@ func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { // ShouldThrottle returns true if the request should be throttled. func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true + switch r.HTTPResponse.StatusCode { + case 429: + case 502: + case 503: + case 504: + default: + return r.IsErrorThrottle() } - return r.IsErrorThrottle() -} -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source + return true } -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return +// This will look in the Retry-After header, RFC 7231, for how long +// it will wait before attempting another request +func getRetryDelay(r *request.Request) (time.Duration, bool) { + if !canUseRetryAfterHeader(r) { + return 0, false + } + + delayStr := r.HTTPResponse.Header.Get("Retry-After") + if len(delayStr) == 0 { + return 0, false + } + + delay, err := strconv.Atoi(delayStr) + if err != nil { + return 0, false + } + + return time.Duration(delay) * time.Second, true } -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() +// Will look at the status code to see if the retry header pertains to +// the status code. +func canUseRetryAfterHeader(r *request.Request) bool { + switch r.HTTPResponse.StatusCode { + case 429: + case 503: + default: + return false + } + + return true } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 00000000..7b5e1276 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,190 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent +// to a service. Will include the HTTP request body if the LogLevel of the +// request matches LogDebugWithHTTPBody. +var LogHTTPRequestHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequest", + Fn: logRequest, +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + bodySeekable := aws.IsReaderSeekable(r.Body) + + b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + if !bodySeekable { + r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body)) + } + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent +// to a service. Will only log the HTTP request's headers. The request payload +// will not be read. +var LogHTTPRequestHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogRequestHeader", + Fn: logRequestHeader, +} + +func logRequestHeader(r *request.Request) { + b, err := httputil.DumpRequestOut(r.HTTPRequest, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +// LogHTTPResponseHandler is a SDK request handler to log the HTTP response +// received from a service. Will include the HTTP response body if the LogLevel +// of the request matches LogDebugWithHTTPBody. +var LogHTTPResponseHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponse", + Fn: logResponse, +} + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + + if r.HTTPResponse == nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil")) + return + } + + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + if logBody { + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + } + + handlerFn := func(req *request.Request) { + b, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(fmt.Sprintf(logRespMsg, + req.ClientInfo.ServiceName, req.Operation.Name, string(b))) + + if logBody { + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, + req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} + +// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP +// response received from a service. Will only log the HTTP response's headers. +// The response payload will not be read. +var LogHTTPResponseHeaderHandler = request.NamedHandler{ + Name: "awssdk.client.LogResponseHeader", + Fn: logResponseHeader, +} + +func logResponseHeader(r *request.Request) { + if r.Config.Logger == nil { + return + } + + b, err := httputil.DumpResponse(r.HTTPResponse, false) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, + r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + r.Config.Logger.Log(fmt.Sprintf(logRespMsg, + r.ClientInfo.ServiceName, r.Operation.Name, string(b))) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go index 4778056d..920e9fdd 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -3,6 +3,7 @@ package metadata // ClientInfo wraps immutable data from the client.Client structure. type ClientInfo struct { ServiceName string + ServiceID string APIVersion string Endpoint string SigningName string diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index f5a7c379..10634d17 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -18,7 +18,7 @@ const UseServiceDefaultRetries = -1 type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, -// all clients will use the defaults.DefaultConfig tructure. +// all clients will use the defaults.DefaultConfig structure. // // // Create Session with MaxRetry configuration to be shared by multiple // // service clients. @@ -45,21 +45,28 @@ type Config struct { // that overrides the default generated endpoint for a client. Set this // to `""` to use the default generated endpoint. // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. + // Note: You must still provide a `Region` value when specifying an + // endpoint for a client. Endpoint *string // The resolver to use for looking up endpoints for AWS service clients // to use based on region. EndpointResolver endpoints.Resolver + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + // The region to send requests to. This parameter is required and must // be configured globally or on a per-client basis unless otherwise // noted. A full list of regions is found in the "Regions and Endpoints" // document. // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints + // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS + // Regions and Endpoints. Region *string // Set this to `true` to disable SSL when sending requests. Defaults @@ -88,7 +95,7 @@ type Config struct { // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. + // the client.DefaultRetryer will be used. // // When both Retryer and MaxRetries are non-nil, the former is used and // the latter ignored. @@ -113,9 +120,10 @@ type Config struct { // will use virtual hosted bucket addressing when possible // (`http://BUCKET.s3.amazonaws.com/KEY`). // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets + // Note: This configuration option is specific to the Amazon S3 service. + // + // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // for Amazon S3: Virtual Hosting of Buckets S3ForcePathStyle *bool // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` @@ -144,6 +152,15 @@ type Config struct { // with accelerate. S3UseAccelerate *bool + // S3DisableContentMD5Validation config option is temporarily disabled, + // For S3 GetObject API calls, #1837. + // + // Set this to `true` to disable the S3 service client from automatically + // adding the ContentMD5 to S3 Object Put and Upload API calls. This option + // will also disable the SDK from performing object ContentMD5 validation + // on GetObject API calls. + S3DisableContentMD5Validation *bool + // Set this to `true` to disable the EC2Metadata client from overriding the // default http.Client's Timeout. This is helpful if you do not want the // EC2Metadata client to create a new http.Client. This options is only @@ -161,7 +178,7 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool - // Instructs the endpiont to be generated for a service client to + // Instructs the endpoint to be generated for a service client to // be the dual stack endpoint. The dual stack endpoint will support // both IPv4 and IPv6 addressing. // @@ -187,6 +204,10 @@ type Config struct { // request delays. This value should only be used for testing. To adjust // the delay of a request see the aws/client.DefaultRetryer and // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. SleepDelay func(time.Duration) // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. @@ -203,6 +224,28 @@ type Config struct { // Key: aws.String("//foo//bar//moo"), // }) DisableRestProtocolURICleaning *bool + + // EnableEndpointDiscovery will allow for endpoint discovery on operations that + // have the definition in its model. By default, endpoint discovery is off. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // EnableEndpointDiscovery: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("/foo/bar/moo"), + // }) + EnableEndpointDiscovery *bool + + // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing + // request endpoint hosts with modeled information. + // + // Disabling this feature is useful when you want to use local endpoints + // for testing that do not support the modeled host prefix pattern. + DisableEndpointHostPrefix *bool } // NewConfig returns a new Config pointer that can be chained with builder @@ -325,6 +368,15 @@ func (c *Config) WithS3Disable100Continue(disable bool) *Config { func (c *Config) WithS3UseAccelerate(enable bool) *Config { c.S3UseAccelerate = &enable return c + +} + +// WithS3DisableContentMD5Validation sets a config +// S3DisableContentMD5Validation value returning a Config pointer for chaining. +func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config { + c.S3DisableContentMD5Validation = &enable + return c + } // WithUseDualStack sets a config UseDualStack value returning a Config @@ -348,6 +400,19 @@ func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { return c } +// WithEndpointDiscovery will set whether or not to use endpoint discovery. +func (c *Config) WithEndpointDiscovery(t bool) *Config { + c.EnableEndpointDiscovery = &t + return c +} + +// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix +// when making requests. +func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config { + c.DisableEndpointHostPrefix = &t + return c +} + // MergeIn merges the passed in configs into the existing config object. func (c *Config) MergeIn(cfgs ...*Config) { for _, other := range cfgs { @@ -424,6 +489,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3UseAccelerate = other.S3UseAccelerate } + if other.S3DisableContentMD5Validation != nil { + dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation + } + if other.UseDualStack != nil { dst.UseDualStack = other.UseDualStack } @@ -439,6 +508,18 @@ func mergeInConfig(dst *Config, other *Config) { if other.DisableRestProtocolURICleaning != nil { dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } + + if other.EnableEndpointDiscovery != nil { + dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery + } + + if other.DisableEndpointHostPrefix != nil { + dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go new file mode 100644 index 00000000..2866f9a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go @@ -0,0 +1,37 @@ +// +build !go1.9 + +package aws + +import "time" + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go new file mode 100644 index 00000000..3718b26e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go @@ -0,0 +1,11 @@ +// +build go1.9 + +package aws + +import "context" + +// Context is an alias of the Go stdlib's context.Context interface. +// It can be used within the SDK's API operation "WithContext" methods. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context = context.Context diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go new file mode 100644 index 00000000..66c5945d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go @@ -0,0 +1,56 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to +// provide a 1.6 and 1.5 safe version of context that is compatible with Go +// 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go new file mode 100644 index 00000000..9c29f29a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go @@ -0,0 +1,20 @@ +// +build go1.7 + +package aws + +import "context" + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return context.Background() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go new file mode 100644 index 00000000..304fd156 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go @@ -0,0 +1,24 @@ +package aws + +import ( + "time" +) + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index 3b73a7da..ff5d58e0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// SecondsTimeValue converts an int64 pointer to a time.Time value +// representing seconds since Epoch or time.Time{} if the pointer is nil. +func SecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix((*v / 1000), 0) + } + return time.Time{} +} + +// MillisecondsTimeValue converts an int64 pointer to a time.Time value +// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil. +func MillisecondsTimeValue(v *int64) time.Time { + if v != nil { + return time.Unix(0, (*v * 1000000)) + } + return time.Time{} +} + // TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". // The result is undefined if the Unix time cannot be represented by an int64. // Which includes calling TimeUnixMilli on a zero Time is undefined. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go index 8a7bafc7..f8853d78 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -3,12 +3,10 @@ package corehandlers import ( "bytes" "fmt" - "io" "io/ioutil" "net/http" "net/url" "regexp" - "runtime" "strconv" "time" @@ -27,7 +25,7 @@ type lener interface { // or will use the HTTPRequest.Header's "Content-Length" if defined. If unable // to determine request body length and no "Content-Length" was specified it will panic. // -// The Content-Length will only be aded to the request if the length of the body +// The Content-Length will only be added to the request if the length of the body // is greater than 0. If the body is empty or the current `Content-Length` // header is <= 0, the header will also be stripped. var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { @@ -36,18 +34,13 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { length, _ = strconv.ParseInt(slength, 10, 64) } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") + if r.Body != nil { + var err error + length, err = aws.SeekerLen(r.Body) + if err != nil { + r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err) + return + } } } @@ -60,19 +53,12 @@ var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLen } }} -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - var reStatusCode = regexp.MustCompile(`^(\d{3})`) // ValidateReqSigHandler is a request handler to ensure that the request's // signature doesn't expire before it is sent. This can happen when a request -// is built and signed signficantly before it is sent. Or significant delays -// occur whne retrying requests that would cause the signature to expire. +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. var ValidateReqSigHandler = request.NamedHandler{ Name: "core.ValidateReqSigHandler", Fn: func(r *request.Request) { @@ -86,9 +72,9 @@ var ValidateReqSigHandler = request.NamedHandler{ signedTime = r.LastSignedAt } - // 10 minutes to allow for some clock skew/delays in transmission. + // 5 minutes to allow for some clock skew/delays in transmission. // Would be improved with aws/aws-sdk-go#423 - if signedTime.Add(10 * time.Minute).After(time.Now()) { + if signedTime.Add(5 * time.Minute).After(time.Now()) { return } @@ -98,44 +84,95 @@ var ValidateReqSigHandler = request.NamedHandler{ } // SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { - var err error - r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) - if err != nil { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other url redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), + StatusCode: int(code), + Status: http.StatusText(int(code)), Body: ioutil.NopCloser(bytes.NewReader([]byte{})), } + return } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable } -}} + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} // ValidateResponseHandler is a request handler to validate service response. var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { @@ -150,13 +187,22 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { // If one of the other handlers already set the retry state // we don't want to override it based on the service's state - if r.Retryable == nil { + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { r.Retryable = aws.Bool(r.ShouldRetry(r)) } if r.WillRetry() { r.RetryDelay = r.RetryRules(r) - r.Config.SleepDelay(r.RetryDelay) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } // when the expired token exception occurs the credentials // need to be expired locally so that the next request to diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go new file mode 100644 index 00000000..ab69c7a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go @@ -0,0 +1,37 @@ +package corehandlers + +import ( + "os" + "runtime" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version +// to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +const execEnvVar = `AWS_EXECUTION_ENV` +const execEnvUAKey = `exec-env` + +// AddHostExecEnvUserAgentHander is a request handler appending the SDK's +// execution environment to the user agent. +// +// If the environment variable AWS_EXECUTION_ENV is set, its value will be +// appended to the user agent string. +var AddHostExecEnvUserAgentHander = request.NamedHandler{ + Name: "core.AddHostExecEnvUserAgentHander", + Fn: func(r *request.Request) { + v := os.Getenv(execEnvVar) + if len(v) == 0 { + return + } + + request.AddToUserAgent(r, execEnvUAKey+"/"+v) + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index 6efc77bf..3ad1e798 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -9,11 +9,9 @@ var ( // providers in the ChainProvider. // // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly + // aws.Config.CredentialsChainVerboseErrors to true. ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. + `no valid providers in chain. Deprecated. For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, nil) ) @@ -39,16 +37,18 @@ var ( // does not return any credentials ChainProvider will return the error // ErrNoValidProvidersFoundInChain // -// creds := NewChainCredentials( -// []Provider{ -// &EnvProvider{}, -// &EC2RoleProvider{ +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ // Client: ec2metadata.New(sess), // }, // }) // // // Usage of ChainCredentials with aws.Config -// svc := ec2.New(&aws.Config{Credentials: creds}) +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) // type ChainProvider struct { Providers []Provider diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 7b8ebf5f..83bbc311 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -14,7 +14,7 @@ // // Example of using the environment variable credentials. // -// creds := NewEnvCredentials() +// creds := credentials.NewEnvCredentials() // // // Retrieve the credentials value // credValue, err := creds.Get() @@ -26,7 +26,7 @@ // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewCredentials(&EC2RoleProvider{}) +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) // creds.Expire() // credsValue, err := creds.Get() // // New credentials will be retrieved instead of from cache. @@ -43,14 +43,17 @@ // func (m *MyProvider) Retrieve() (Value, error) {...} // func (m *MyProvider) IsExpired() bool {...} // -// creds := NewCredentials(&MyProvider{}) +// creds := credentials.NewCredentials(&MyProvider{}) // credValue, err := creds.Get() // package credentials import ( + "fmt" "sync" "time" + + "github.com/aws/aws-sdk-go/aws/awserr" ) // AnonymousCredentials is an empty Credential object that can be used as @@ -60,10 +63,10 @@ import ( // when making service API calls. For example, when accessing public // s3 buckets. // -// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) // // Access public S3 buckets. -// -// @readonly var AnonymousCredentials = NewStaticCredentials("", "", "") // A Value is the AWS credentials value for individual credential fields. @@ -88,7 +91,7 @@ type Value struct { // The Provider should not need to implement its own mutexes, because // that will be managed by Credentials. type Provider interface { - // Refresh returns nil if it successfully retrieved the value. + // Retrieve returns nil if it successfully retrieved the value. // Error is returned if the value were not obtainable, or empty. Retrieve() (Value, error) @@ -97,6 +100,35 @@ type Provider interface { IsExpired() bool } +// An Expirer is an interface that Providers can implement to expose the expiration +// time, if known. If the Provider cannot accurately provide this info, +// it should not implement this interface. +type Expirer interface { + // The time at which the credentials are no longer valid + ExpiresAt() time.Time +} + +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + // A Expiry provides shared expiration logic to be used by credentials // providers to implement expiry functionality. // @@ -135,13 +167,19 @@ func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { // IsExpired returns if the credentials are expired. func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now + curTime := e.CurrentTime + if curTime == nil { + curTime = time.Now } - return e.expiration.Before(e.CurrentTime()) + return e.expiration.Before(curTime()) } -// A Credentials provides synchronous safe retrieval of AWS credentials Value. +// ExpiresAt returns the expiration time of the credential +func (e *Expiry) ExpiresAt() time.Time { + return e.expiration +} + +// A Credentials provides concurrency safe retrieval of AWS credentials Value. // Credentials will cache the credentials value until they expire. Once the value // expires the next Get will attempt to retrieve valid credentials. // @@ -155,7 +193,8 @@ func (e *Expiry) IsExpired() bool { type Credentials struct { creds Value forceRefresh bool - m sync.Mutex + + m sync.RWMutex provider Provider } @@ -178,6 +217,17 @@ func NewCredentials(provider Provider) *Credentials { // If Credentials.Expire() was called the credentials Value will be force // expired, and the next call to Get() will cause them to be refreshed. func (c *Credentials) Get() (Value, error) { + // Check the cached credentials first with just the read lock. + c.m.RLock() + if !c.isExpired() { + creds := c.creds + c.m.RUnlock() + return creds, nil + } + c.m.RUnlock() + + // Credentials are expired need to retrieve the credentials taking the full + // lock. c.m.Lock() defer c.m.Unlock() @@ -211,8 +261,8 @@ func (c *Credentials) Expire() { // If the Credentials were forced to be expired with Expire() this will // reflect that override. func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() + c.m.RLock() + defer c.m.RUnlock() return c.isExpired() } @@ -221,3 +271,23 @@ func (c *Credentials) IsExpired() bool { func (c *Credentials) isExpired() bool { return c.forceRefresh || c.provider.IsExpired() } + +// ExpiresAt provides access to the functionality of the Expirer interface of +// the underlying Provider, if it supports that interface. Otherwise, it returns +// an error. +func (c *Credentials) ExpiresAt() (time.Time, error) { + c.m.RLock() + defer c.m.RUnlock() + + expirer, ok := c.provider.(Expirer) + if !ok { + return time.Time{}, awserr.New("ProviderNotExpirer", + fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.ProviderName), + nil) + } + if c.forceRefresh { + // set expiration time to the distant past + return time.Time{}, nil + } + return expirer.ExpiresAt(), nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index c3974952..43d4ed38 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -4,7 +4,6 @@ import ( "bufio" "encoding/json" "fmt" - "path" "strings" "time" @@ -12,6 +11,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // ProviderName provides a name of EC2Role provider @@ -125,7 +126,7 @@ type ec2RoleCredRespBody struct { Message string } -const iamSecurityCredsPath = "/iam/security-credentials" +const iamSecurityCredsPath = "iam/security-credentials/" // requestCredList requests a list of credentials from the EC2 service. // If there are no credentials, or there is an error making or receiving the request @@ -142,7 +143,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { } if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + return nil, awserr.New(request.ErrCodeSerialization, + "failed to read EC2 instance role from metadata service", err) } return credsList, nil @@ -153,7 +155,7 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { // If the credentials cannot be found, or there is an error reading the response // and error will be returned. func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + resp, err := client.GetMetadata(sdkuri.PathJoin(iamSecurityCredsPath, credsName)) if err != nil { return ec2RoleCredRespBody{}, awserr.New("EC2RoleRequestError", @@ -164,7 +166,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred respCreds := ec2RoleCredRespBody{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { return ec2RoleCredRespBody{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), err) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go index a4cec5c5..c2b2c5d6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -39,6 +39,7 @@ import ( "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" ) // ProviderName is the name of the credentials provider. @@ -65,6 +66,10 @@ type Provider struct { // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration + + // Optional authorization token value if set will be used as the value of + // the Authorization header of the endpoint credential request. + AuthorizationToken string } // NewProviderClient returns a credentials Provider for retrieving AWS credentials @@ -152,6 +157,9 @@ func (p *Provider) getCredentials() (*getCredentialsOutput, error) { out := &getCredentialsOutput{} req := p.Client.NewRequest(op, nil, out) req.HTTPRequest.Header.Set("Accept", "application/json") + if authToken := p.AuthorizationToken; len(authToken) != 0 { + req.HTTPRequest.Header.Set("Authorization", authToken) + } return out, req.Send() } @@ -167,7 +175,7 @@ func unmarshalHandler(r *request.Request) { out := r.Data.(*getCredentialsOutput) if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode endpoint credentials", err, ) @@ -178,11 +186,15 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() var errOut errorOutput - if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { - r.Error = awserr.New("SerializationError", - "failed to decode endpoint credentials", - err, + err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, ) + return } // Response body format is not consistent between metadata endpoints. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go index 96655bc4..54c5cf73 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -12,14 +12,10 @@ const EnvProviderName = "EnvProvider" var ( // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be // found in the process's environment. - // - // @readonly ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key // can't be found in the process's environment. - // - // @readonly ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) ) @@ -29,6 +25,7 @@ var ( // Environment variables used: // // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY type EnvProvider struct { retrieved bool diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go new file mode 100644 index 00000000..1980c8c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go @@ -0,0 +1,425 @@ +/* +Package processcreds is a credential Provider to retrieve `credential_process` +credentials. + +WARNING: The following describes a method of sourcing credentials from an external +process. This can potentially be dangerous, so proceed with caution. Other +credential providers should be preferred if at all possible. If using this +option, you should make sure that the config file is as locked down as possible +using security best practices for your operating system. + +You can use credentials from a `credential_process` in a variety of ways. + +One way is to setup your shared config file, located in the default +location, with the `credential_process` key and the command you want to be +called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable +(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file. + + [default] + credential_process = /command/to/call + +Creating a new session will use the credential process to retrieve credentials. +NOTE: If there are credentials in the profile you are using, the credential +process will not be used. + + // Initialize a session to load credentials. + sess, _ := session.NewSession(&aws.Config{ + Region: aws.String("us-east-1")}, + ) + + // Create S3 service client to use the credentials. + svc := s3.New(sess) + +Another way to use the `credential_process` method is by using +`credentials.NewCredentials()` and providing a command to be executed to +retrieve credentials: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentials("/path/to/command") + + // Create service client value configured for credentials. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +You can set a non-default timeout for the `credential_process` with another +constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To +set a one minute timeout: + + // Create credentials using the ProcessProvider. + creds := processcreds.NewCredentialsTimeout( + "/path/to/command", + time.Duration(500) * time.Millisecond) + +If you need more control, you can set any configurable options in the +credentials using one or more option functions. For example, you can set a two +minute timeout, a credential duration of 60 minutes, and a maximum stdout +buffer size of 2k. + + creds := processcreds.NewCredentials( + "/path/to/command", + func(opt *ProcessProvider) { + opt.Timeout = time.Duration(2) * time.Minute + opt.Duration = time.Duration(60) * time.Minute + opt.MaxBufSize = 2048 + }) + +You can also use your own `exec.Cmd`: + + // Create an exec.Cmd + myCommand := exec.Command("/path/to/command") + + // Create credentials using your exec.Cmd and custom timeout + creds := processcreds.NewCredentialsCommand( + myCommand, + func(opt *processcreds.ProcessProvider) { + opt.Timeout = time.Duration(1) * time.Second + }) +*/ +package processcreds + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "runtime" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" +) + +const ( + // ProviderName is the name this credentials provider will label any + // returned credentials Value with. + ProviderName = `ProcessProvider` + + // ErrCodeProcessProviderParse error parsing process output + ErrCodeProcessProviderParse = "ProcessProviderParseError" + + // ErrCodeProcessProviderVersion version error in output + ErrCodeProcessProviderVersion = "ProcessProviderVersionError" + + // ErrCodeProcessProviderRequired required attribute missing in output + ErrCodeProcessProviderRequired = "ProcessProviderRequiredError" + + // ErrCodeProcessProviderExecution execution of command failed + ErrCodeProcessProviderExecution = "ProcessProviderExecutionError" + + // errMsgProcessProviderTimeout process took longer than allowed + errMsgProcessProviderTimeout = "credential process timed out" + + // errMsgProcessProviderProcess process error + errMsgProcessProviderProcess = "error in credential_process" + + // errMsgProcessProviderParse problem parsing output + errMsgProcessProviderParse = "parse failed of credential_process output" + + // errMsgProcessProviderVersion version error in output + errMsgProcessProviderVersion = "wrong version in process output (not 1)" + + // errMsgProcessProviderMissKey missing access key id in output + errMsgProcessProviderMissKey = "missing AccessKeyId in process output" + + // errMsgProcessProviderMissSecret missing secret acess key in output + errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output" + + // errMsgProcessProviderPrepareCmd prepare of command failed + errMsgProcessProviderPrepareCmd = "failed to prepare command" + + // errMsgProcessProviderEmptyCmd command must not be empty + errMsgProcessProviderEmptyCmd = "command must not be empty" + + // errMsgProcessProviderPipe failed to initialize pipe + errMsgProcessProviderPipe = "failed to initialize pipe" + + // DefaultDuration is the default amount of time in minutes that the + // credentials will be valid for. + DefaultDuration = time.Duration(15) * time.Minute + + // DefaultBufSize limits buffer size from growing to an enormous + // amount due to a faulty process. + DefaultBufSize = 1024 + + // DefaultTimeout default limit on time a process can run. + DefaultTimeout = time.Duration(1) * time.Minute +) + +// ProcessProvider satisfies the credentials.Provider interface, and is a +// client to retrieve credentials from a process. +type ProcessProvider struct { + staticCreds bool + credentials.Expiry + originalCommand []string + + // Expiry duration of the credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration + + // A string representing an os command that should return a JSON with + // credential information. + command *exec.Cmd + + // MaxBufSize limits memory usage from growing to an enormous + // amount due to a faulty process. + MaxBufSize int + + // Timeout limits the time a process can run. + Timeout time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// ProcessProvider. The credentials will expire every 15 minutes by default. +func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: exec.Command(command), + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsTimeout returns a pointer to a new Credentials object with +// the specified command and timeout, and default duration and max buffer size. +func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials { + p := NewCredentials(command, func(opt *ProcessProvider) { + opt.Timeout = timeout + }) + + return p +} + +// NewCredentialsCommand returns a pointer to a new Credentials object with +// the specified command, and default timeout, duration and max buffer size. +func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials { + p := &ProcessProvider{ + command: command, + Duration: DefaultDuration, + Timeout: DefaultTimeout, + MaxBufSize: DefaultBufSize, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +type credentialProcessResponse struct { + Version int + AccessKeyID string `json:"AccessKeyId"` + SecretAccessKey string + SessionToken string + Expiration *time.Time +} + +// Retrieve executes the 'credential_process' and returns the credentials. +func (p *ProcessProvider) Retrieve() (credentials.Value, error) { + out, err := p.executeCredentialProcess() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // Serialize and validate response + resp := &credentialProcessResponse{} + if err = json.Unmarshal(out, resp); err != nil { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderParse, + fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)), + err) + } + + if resp.Version != 1 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderVersion, + errMsgProcessProviderVersion, + nil) + } + + if len(resp.AccessKeyID) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissKey, + nil) + } + + if len(resp.SecretAccessKey) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New( + ErrCodeProcessProviderRequired, + errMsgProcessProviderMissSecret, + nil) + } + + // Handle expiration + p.staticCreds = resp.Expiration == nil + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } + + return credentials.Value{ + ProviderName: ProviderName, + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.SessionToken, + }, nil +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *ProcessProvider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// prepareCommand prepares the command to be executed. +func (p *ProcessProvider) prepareCommand() error { + + var cmdArgs []string + if runtime.GOOS == "windows" { + cmdArgs = []string{"cmd.exe", "/C"} + } else { + cmdArgs = []string{"sh", "-c"} + } + + if len(p.originalCommand) == 0 { + p.originalCommand = make([]string, len(p.command.Args)) + copy(p.originalCommand, p.command.Args) + + // check for empty command because it succeeds + if len(strings.TrimSpace(p.originalCommand[0])) < 1 { + return awserr.New( + ErrCodeProcessProviderExecution, + fmt.Sprintf( + "%s: %s", + errMsgProcessProviderPrepareCmd, + errMsgProcessProviderEmptyCmd), + nil) + } + } + + cmdArgs = append(cmdArgs, p.originalCommand...) + p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...) + p.command.Env = os.Environ() + + return nil +} + +// executeCredentialProcess starts the credential process on the OS and +// returns the results or an error. +func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) { + + if err := p.prepareCommand(); err != nil { + return nil, err + } + + // Setup the pipes + outReadPipe, outWritePipe, err := os.Pipe() + if err != nil { + return nil, awserr.New( + ErrCodeProcessProviderExecution, + errMsgProcessProviderPipe, + err) + } + + p.command.Stderr = os.Stderr // display stderr on console for MFA + p.command.Stdout = outWritePipe // get creds json on process's stdout + p.command.Stdin = os.Stdin // enable stdin for MFA + + output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize)) + + stdoutCh := make(chan error, 1) + go readInput( + io.LimitReader(outReadPipe, int64(p.MaxBufSize)), + output, + stdoutCh) + + execCh := make(chan error, 1) + go executeCommand(*p.command, execCh) + + finished := false + var errors []error + for !finished { + select { + case readError := <-stdoutCh: + errors = appendError(errors, readError) + finished = true + case execError := <-execCh: + err := outWritePipe.Close() + errors = appendError(errors, err) + errors = appendError(errors, execError) + if errors != nil { + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderProcess, + errors) + } + case <-time.After(p.Timeout): + finished = true + return output.Bytes(), awserr.NewBatchError( + ErrCodeProcessProviderExecution, + errMsgProcessProviderTimeout, + errors) // errors can be nil + } + } + + out := output.Bytes() + + if runtime.GOOS == "windows" { + // windows adds slashes to quotes + out = []byte(strings.Replace(string(out), `\"`, `"`, -1)) + } + + return out, nil +} + +// appendError conveniently checks for nil before appending slice +func appendError(errors []error, err error) []error { + if err != nil { + return append(errors, err) + } + return errors +} + +func executeCommand(cmd exec.Cmd, exec chan error) { + // Start the command + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + exec <- err +} + +func readInput(r io.Reader, w io.Writer, read chan error) { + tee := io.TeeReader(r, w) + + _, err := ioutil.ReadAll(tee) + + if err == io.EOF { + err = nil + } + + read <- err // will only arrive here when write end of pipe is closed +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index 7fb7cbf0..e1551495 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -3,11 +3,10 @@ package credentials import ( "fmt" "os" - "path/filepath" - - "github.com/go-ini/ini" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/internal/ini" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // SharedCredsProviderName provides a name of SharedCreds provider @@ -15,8 +14,6 @@ const SharedCredsProviderName = "SharedCredentialsProvider" var ( // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - // - // @readonly ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) ) @@ -79,36 +76,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool { // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) + config, err := ini.OpenFile(filename) if err != nil { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) + + iniProfile, ok := config.GetSection(profile) + if !ok { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil) } - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { + id := iniProfile.String("aws_access_key_id") + if len(id) == 0 { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) + nil) } - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { + secret := iniProfile.String("aws_secret_access_key") + if len(secret) == 0 { return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), nil) } // Default to empty string if not found - token := iniProfile.Key("aws_session_token") + token := iniProfile.String("aws_session_token") return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), + AccessKeyID: id, + SecretAccessKey: secret, + SessionToken: token, ProviderName: SharedCredsProviderName, }, nil } @@ -117,22 +115,23 @@ func loadProfile(filename, profile string) (Value, error) { // // Will return an error if the user's home directory path cannot be found. func (p *SharedCredentialsProvider) filename() (string, error) { - if p.Filename == "" { - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { - return p.Filename, nil - } - - homeDir := os.Getenv("HOME") // *nix - if homeDir == "" { // Windows - homeDir = os.Getenv("USERPROFILE") - } - if homeDir == "" { - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = filepath.Join(homeDir, ".aws", "credentials") + if len(p.Filename) != 0 { + return p.Filename, nil + } + + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 { + return p.Filename, nil } + if home := shareddefaults.UserHomeDir(); len(home) == 0 { + // Backwards compatibility of home directly not found error being returned. + // This error is too verbose, failure when opening the file would of been + // a better error to return. + return "", ErrSharedCredentialsHomeNotFound + } + + p.Filename = shareddefaults.SharedCredentialsFilename() + return p.Filename, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 4f5dab3f..531139e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -9,8 +9,6 @@ const StaticProviderName = "StaticProvider" var ( // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go index b8406233..2e528d13 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -12,7 +12,7 @@ between multiple Credentials, Sessions or service clients. Assume Role To assume an IAM role using STS with the SDK you can create a new Credentials -with the SDKs's stscreds package. +with the SDKs's stscreds package. // Initial credentials loaded from SDK's default credential chain. Such as // the environment, shared credentials (~/.aws/credentials), or EC2 Instance @@ -80,16 +80,18 @@ package stscreds import ( "fmt" + "os" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/internal/sdkrand" "github.com/aws/aws-sdk-go/service/sts" ) -// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// StdinTokenProvider will prompt on stderr and read from stdin for a string value. // An error is returned if reading from stdin fails. // // Use this function go read MFA tokens from stdin. The function makes no attempt @@ -102,7 +104,7 @@ import ( // Will wait forever until something is provided on the stdin. func StdinTokenProvider() (string, error) { var v string - fmt.Printf("Assume Role MFA token code: ") + fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ") _, err := fmt.Scanln(&v) return v, err @@ -193,6 +195,18 @@ type AssumeRoleProvider struct { // // If ExpiryWindow is 0 or less it will be ignored. ExpiryWindow time.Duration + + // MaxJitterFrac reduces the effective Duration of each credential requested + // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must + // have a value between 0 and 1. Any other value may lead to expected behavior. + // With a MaxJitterFrac value of 0, default) will no jitter will be used. + // + // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the + // AssumeRole call will be made with an arbitrary Duration between 27m and + // 30m. + // + // MaxJitterFrac should not be negative. + MaxJitterFrac float64 } // NewCredentials returns a pointer to a new Credentials object wrapping the @@ -244,7 +258,6 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(* // Retrieve generates a new set of temporary credentials using STS. func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - // Apply defaults where parameters are not set. if p.RoleSessionName == "" { // Try to work out a role name that will hopefully end up unique. @@ -254,8 +267,9 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { // Expire as often as AWS permits. p.Duration = DefaultDuration } + jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration)) input := &sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)), RoleArn: aws.String(p.RoleARN), RoleSessionName: aws.String(p.RoleSessionName), ExternalId: p.ExternalID, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go new file mode 100644 index 00000000..a00ab6c6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go @@ -0,0 +1,119 @@ +package crr + +import ( + "sync/atomic" +) + +// EndpointCache is an LRU cache that holds a series of endpoints +// based on some key. The datastructure makes use of a read write +// mutex to enable asynchronous use. +type EndpointCache struct { + endpoints syncMap + endpointLimit int64 + // size is used to count the number elements in the cache. + // The atomic package is used to ensure this size is accurate when + // using multiple goroutines. + size int64 +} + +// NewEndpointCache will return a newly initialized cache with a limit +// of endpointLimit entries. +func NewEndpointCache(endpointLimit int64) *EndpointCache { + return &EndpointCache{ + endpointLimit: endpointLimit, + endpoints: newSyncMap(), + } +} + +// get is a concurrent safe get operation that will retrieve an endpoint +// based on endpointKey. A boolean will also be returned to illustrate whether +// or not the endpoint had been found. +func (c *EndpointCache) get(endpointKey string) (Endpoint, bool) { + endpoint, ok := c.endpoints.Load(endpointKey) + if !ok { + return Endpoint{}, false + } + + c.endpoints.Store(endpointKey, endpoint) + return endpoint.(Endpoint), true +} + +// Has returns if the enpoint cache contains a valid entry for the endpoint key +// provided. +func (c *EndpointCache) Has(endpointKey string) bool { + endpoint, ok := c.get(endpointKey) + _, found := endpoint.GetValidAddress() + + return ok && found +} + +// Get will retrieve a weighted address based off of the endpoint key. If an endpoint +// should be retrieved, due to not existing or the current endpoint has expired +// the Discoverer object that was passed in will attempt to discover a new endpoint +// and add that to the cache. +func (c *EndpointCache) Get(d Discoverer, endpointKey string, required bool) (WeightedAddress, error) { + var err error + endpoint, ok := c.get(endpointKey) + weighted, found := endpoint.GetValidAddress() + shouldGet := !ok || !found + + if required && shouldGet { + if endpoint, err = c.discover(d, endpointKey); err != nil { + return WeightedAddress{}, err + } + + weighted, _ = endpoint.GetValidAddress() + } else if shouldGet { + go c.discover(d, endpointKey) + } + + return weighted, nil +} + +// Add is a concurrent safe operation that will allow new endpoints to be added +// to the cache. If the cache is full, the number of endpoints equal endpointLimit, +// then this will remove the oldest entry before adding the new endpoint. +func (c *EndpointCache) Add(endpoint Endpoint) { + // de-dups multiple adds of an endpoint with a pre-existing key + if iface, ok := c.endpoints.Load(endpoint.Key); ok { + e := iface.(Endpoint) + if e.Len() > 0 { + return + } + } + c.endpoints.Store(endpoint.Key, endpoint) + + size := atomic.AddInt64(&c.size, 1) + if size > 0 && size > c.endpointLimit { + c.deleteRandomKey() + } +} + +// deleteRandomKey will delete a random key from the cache. If +// no key was deleted false will be returned. +func (c *EndpointCache) deleteRandomKey() bool { + atomic.AddInt64(&c.size, -1) + found := false + + c.endpoints.Range(func(key, value interface{}) bool { + found = true + c.endpoints.Delete(key) + + return false + }) + + return found +} + +// discover will get and store and endpoint using the Discoverer. +func (c *EndpointCache) discover(d Discoverer, endpointKey string) (Endpoint, error) { + endpoint, err := d.Discover() + if err != nil { + return Endpoint{}, err + } + + endpoint.Key = endpointKey + c.Add(endpoint) + + return endpoint, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go new file mode 100644 index 00000000..d5599188 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go @@ -0,0 +1,99 @@ +package crr + +import ( + "net/url" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +// Endpoint represents an endpoint used in endpoint discovery. +type Endpoint struct { + Key string + Addresses WeightedAddresses +} + +// WeightedAddresses represents a list of WeightedAddress. +type WeightedAddresses []WeightedAddress + +// WeightedAddress represents an address with a given weight. +type WeightedAddress struct { + URL *url.URL + Expired time.Time +} + +// HasExpired will return whether or not the endpoint has expired with +// the exception of a zero expiry meaning does not expire. +func (e WeightedAddress) HasExpired() bool { + return e.Expired.Before(time.Now()) +} + +// Add will add a given WeightedAddress to the address list of Endpoint. +func (e *Endpoint) Add(addr WeightedAddress) { + e.Addresses = append(e.Addresses, addr) +} + +// Len returns the number of valid endpoints where valid means the endpoint +// has not expired. +func (e *Endpoint) Len() int { + validEndpoints := 0 + for _, endpoint := range e.Addresses { + if endpoint.HasExpired() { + continue + } + + validEndpoints++ + } + return validEndpoints +} + +// GetValidAddress will return a non-expired weight endpoint +func (e *Endpoint) GetValidAddress() (WeightedAddress, bool) { + for i := 0; i < len(e.Addresses); i++ { + we := e.Addresses[i] + + if we.HasExpired() { + e.Addresses = append(e.Addresses[:i], e.Addresses[i+1:]...) + i-- + continue + } + + return we, true + } + + return WeightedAddress{}, false +} + +// Discoverer is an interface used to discovery which endpoint hit. This +// allows for specifics about what parameters need to be used to be contained +// in the Discoverer implementor. +type Discoverer interface { + Discover() (Endpoint, error) +} + +// BuildEndpointKey will sort the keys in alphabetical order and then retrieve +// the values in that order. Those values are then concatenated together to form +// the endpoint key. +func BuildEndpointKey(params map[string]*string) string { + keys := make([]string, len(params)) + i := 0 + + for k := range params { + keys[i] = k + i++ + } + sort.Strings(keys) + + values := make([]string, len(params)) + for i, k := range keys { + if params[k] == nil { + continue + } + + values[i] = aws.StringValue(params[k]) + } + + return strings.Join(values, ".") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go new file mode 100644 index 00000000..e414eaac --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go @@ -0,0 +1,29 @@ +// +build go1.9 + +package crr + +import ( + "sync" +) + +type syncMap sync.Map + +func newSyncMap() syncMap { + return syncMap{} +} + +func (m *syncMap) Load(key interface{}) (interface{}, bool) { + return (*sync.Map)(m).Load(key) +} + +func (m *syncMap) Store(key interface{}, value interface{}) { + (*sync.Map)(m).Store(key, value) +} + +func (m *syncMap) Delete(key interface{}) { + (*sync.Map)(m).Delete(key) +} + +func (m *syncMap) Range(f func(interface{}, interface{}) bool) { + (*sync.Map)(m).Range(f) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go new file mode 100644 index 00000000..e0b12200 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go @@ -0,0 +1,48 @@ +// +build !go1.9 + +package crr + +import ( + "sync" +) + +type syncMap struct { + container map[interface{}]interface{} + lock sync.RWMutex +} + +func newSyncMap() syncMap { + return syncMap{ + container: map[interface{}]interface{}{}, + } +} + +func (m *syncMap) Load(key interface{}) (interface{}, bool) { + m.lock.RLock() + defer m.lock.RUnlock() + + v, ok := m.container[key] + return v, ok +} + +func (m *syncMap) Store(key interface{}, value interface{}) { + m.lock.Lock() + defer m.lock.Unlock() + + m.container[key] = value +} + +func (m *syncMap) Delete(key interface{}) { + m.lock.Lock() + defer m.lock.Unlock() + + delete(m.container, key) +} + +func (m *syncMap) Range(f func(interface{}, interface{}) bool) { + for k, v := range m.container { + if !f(k, v) { + return + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go new file mode 100644 index 00000000..25a66d1d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go @@ -0,0 +1,69 @@ +// Package csm provides the Client Side Monitoring (CSM) client which enables +// sending metrics via UDP connection to the CSM agent. This package provides +// control options, and configuration for the CSM client. The client can be +// controlled manually, or automatically via the SDK's Session configuration. +// +// Enabling CSM client via SDK's Session configuration +// +// The CSM client can be enabled automatically via SDK's Session configuration. +// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT +// environment variable is set to a non-empty value. +// +// The configuration options for the CSM client via the SDK's session +// configuration are: +// +// * AWS_CSM_PORT= +// The port number the CSM agent will receive metrics on. +// +// * AWS_CSM_HOST= +// The hostname, or IP address the CSM agent will receive metrics on. +// Without port number. +// +// Manually enabling the CSM client +// +// The CSM client can be started, paused, and resumed manually. The Start +// function will enable the CSM client to publish metrics to the CSM agent. It +// is safe to call Start concurrently, but if Start is called additional times +// with different ClientID or address it will panic. +// +// r, err := csm.Start("clientID", ":31000") +// if err != nil { +// panic(fmt.Errorf("failed starting CSM: %v", err)) +// } +// +// When controlling the CSM client manually, you must also inject its request +// handlers into the SDK's Session configuration for the SDK's API clients to +// publish metrics. +// +// sess, err := session.NewSession(&aws.Config{}) +// if err != nil { +// panic(fmt.Errorf("failed loading session: %v", err)) +// } +// +// // Add CSM client's metric publishing request handlers to the SDK's +// // Session Configuration. +// r.InjectHandlers(&sess.Handlers) +// +// Controlling CSM client +// +// Once the CSM client has been enabled the Get function will return a Reporter +// value that you can use to pause and resume the metrics published to the CSM +// agent. If Get function is called before the reporter is enabled with the +// Start function or via SDK's Session configuration nil will be returned. +// +// The Pause method can be called to stop the CSM client publishing metrics to +// the CSM agent. The Continue method will resume metric publishing. +// +// // Get the CSM client Reporter. +// r := csm.Get() +// +// // Will pause monitoring +// r.Pause() +// resp, err = client.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +// +// // Resume monitoring +// r.Continue() +package csm diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go new file mode 100644 index 00000000..4b19e280 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go @@ -0,0 +1,89 @@ +package csm + +import ( + "fmt" + "strings" + "sync" +) + +var ( + lock sync.Mutex +) + +const ( + // DefaultPort is used when no port is specified. + DefaultPort = "31000" + + // DefaultHost is the host that will be used when none is specified. + DefaultHost = "127.0.0.1" +) + +// AddressWithDefaults returns a CSM address built from the host and port +// values. If the host or port is not set, default values will be used +// instead. If host is "localhost" it will be replaced with "127.0.0.1". +func AddressWithDefaults(host, port string) string { + if len(host) == 0 || strings.EqualFold(host, "localhost") { + host = DefaultHost + } + + if len(port) == 0 { + port = DefaultPort + } + + // Only IP6 host can contain a colon + if strings.Contains(host, ":") { + return "[" + host + "]:" + port + } + + return host + ":" + port +} + +// Start will start a long running go routine to capture +// client side metrics. Calling start multiple time will only +// start the metric listener once and will panic if a different +// client ID or port is passed in. +// +// r, err := csm.Start("clientID", "127.0.0.1:31000") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// sess := session.NewSession() +// r.InjectHandlers(sess.Handlers) +// +// svc := s3.New(sess) +// out, err := svc.GetObject(&s3.GetObjectInput{ +// Bucket: aws.String("bucket"), +// Key: aws.String("key"), +// }) +func Start(clientID string, url string) (*Reporter, error) { + lock.Lock() + defer lock.Unlock() + + if sender == nil { + sender = newReporter(clientID, url) + } else { + if sender.clientID != clientID { + panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID)) + } + + if sender.url != url { + panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url)) + } + } + + if err := connect(url); err != nil { + sender = nil + return nil, err + } + + return sender, nil +} + +// Get will return a reporter if one exists, if one does not exist, nil will +// be returned. +func Get() *Reporter { + lock.Lock() + defer lock.Unlock() + + return sender +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go new file mode 100644 index 00000000..5bacc791 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go @@ -0,0 +1,109 @@ +package csm + +import ( + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" +) + +type metricTime time.Time + +func (t metricTime) MarshalJSON() ([]byte, error) { + ns := time.Duration(time.Time(t).UnixNano()) + return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil +} + +type metric struct { + ClientID *string `json:"ClientId,omitempty"` + API *string `json:"Api,omitempty"` + Service *string `json:"Service,omitempty"` + Timestamp *metricTime `json:"Timestamp,omitempty"` + Type *string `json:"Type,omitempty"` + Version *int `json:"Version,omitempty"` + + AttemptCount *int `json:"AttemptCount,omitempty"` + Latency *int `json:"Latency,omitempty"` + + Fqdn *string `json:"Fqdn,omitempty"` + UserAgent *string `json:"UserAgent,omitempty"` + AttemptLatency *int `json:"AttemptLatency,omitempty"` + + SessionToken *string `json:"SessionToken,omitempty"` + Region *string `json:"Region,omitempty"` + AccessKey *string `json:"AccessKey,omitempty"` + HTTPStatusCode *int `json:"HttpStatusCode,omitempty"` + XAmzID2 *string `json:"XAmzId2,omitempty"` + XAmzRequestID *string `json:"XAmznRequestId,omitempty"` + + AWSException *string `json:"AwsException,omitempty"` + AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"` + SDKException *string `json:"SdkException,omitempty"` + SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"` + + FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"` + FinalAWSException *string `json:"FinalAwsException,omitempty"` + FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"` + FinalSDKException *string `json:"FinalSdkException,omitempty"` + FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"` + + DestinationIP *string `json:"DestinationIp,omitempty"` + ConnectionReused *int `json:"ConnectionReused,omitempty"` + + AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"` + ConnectLatency *int `json:"ConnectLatency,omitempty"` + RequestLatency *int `json:"RequestLatency,omitempty"` + DNSLatency *int `json:"DnsLatency,omitempty"` + TCPLatency *int `json:"TcpLatency,omitempty"` + SSLLatency *int `json:"SslLatency,omitempty"` + + MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"` +} + +func (m *metric) TruncateFields() { + m.ClientID = truncateString(m.ClientID, 255) + m.UserAgent = truncateString(m.UserAgent, 256) + + m.AWSException = truncateString(m.AWSException, 128) + m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512) + + m.SDKException = truncateString(m.SDKException, 128) + m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512) + + m.FinalAWSException = truncateString(m.FinalAWSException, 128) + m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512) + + m.FinalSDKException = truncateString(m.FinalSDKException, 128) + m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512) +} + +func truncateString(v *string, l int) *string { + if v != nil && len(*v) > l { + nv := (*v)[:l] + return &nv + } + + return v +} + +func (m *metric) SetException(e metricException) { + switch te := e.(type) { + case awsException: + m.AWSException = aws.String(te.exception) + m.AWSExceptionMessage = aws.String(te.message) + case sdkException: + m.SDKException = aws.String(te.exception) + m.SDKExceptionMessage = aws.String(te.message) + } +} + +func (m *metric) SetFinalException(e metricException) { + switch te := e.(type) { + case awsException: + m.FinalAWSException = aws.String(te.exception) + m.FinalAWSExceptionMessage = aws.String(te.message) + case sdkException: + m.FinalSDKException = aws.String(te.exception) + m.FinalSDKExceptionMessage = aws.String(te.message) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go new file mode 100644 index 00000000..514fc373 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go @@ -0,0 +1,54 @@ +package csm + +import ( + "sync/atomic" +) + +const ( + runningEnum = iota + pausedEnum +) + +var ( + // MetricsChannelSize of metrics to hold in the channel + MetricsChannelSize = 100 +) + +type metricChan struct { + ch chan metric + paused int64 +} + +func newMetricChan(size int) metricChan { + return metricChan{ + ch: make(chan metric, size), + } +} + +func (ch *metricChan) Pause() { + atomic.StoreInt64(&ch.paused, pausedEnum) +} + +func (ch *metricChan) Continue() { + atomic.StoreInt64(&ch.paused, runningEnum) +} + +func (ch *metricChan) IsPaused() bool { + v := atomic.LoadInt64(&ch.paused) + return v == pausedEnum +} + +// Push will push metrics to the metric channel if the channel +// is not paused +func (ch *metricChan) Push(m metric) bool { + if ch.IsPaused() { + return false + } + + select { + case ch.ch <- m: + return true + default: + return false + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go new file mode 100644 index 00000000..54a99280 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go @@ -0,0 +1,26 @@ +package csm + +type metricException interface { + Exception() string + Message() string +} + +type requestException struct { + exception string + message string +} + +func (e requestException) Exception() string { + return e.exception +} +func (e requestException) Message() string { + return e.message +} + +type awsException struct { + requestException +} + +type sdkException struct { + requestException +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go new file mode 100644 index 00000000..0d368491 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go @@ -0,0 +1,265 @@ +package csm + +import ( + "encoding/json" + "net" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Reporter will gather metrics of API requests made and +// send those metrics to the CSM endpoint. +type Reporter struct { + clientID string + url string + conn net.Conn + metricsCh metricChan + done chan struct{} +} + +var ( + sender *Reporter +) + +func connect(url string) error { + const network = "udp" + if err := sender.connect(network, url); err != nil { + return err + } + + if sender.done == nil { + sender.done = make(chan struct{}) + go sender.start() + } + + return nil +} + +func newReporter(clientID, url string) *Reporter { + return &Reporter{ + clientID: clientID, + url: url, + metricsCh: newMetricChan(MetricsChannelSize), + } +} + +func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + creds, _ := r.Config.Credentials.Get() + + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Region: r.Config.Region, + Type: aws.String("ApiCallAttempt"), + Version: aws.Int(1), + + XAmzRequestID: aws.String(r.RequestID), + + AttemptCount: aws.Int(r.RetryCount + 1), + AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))), + AccessKey: aws.String(creds.AccessKeyID), + } + + if r.HTTPResponse != nil { + m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetException(getMetricException(awserr)) + } + } + + m.TruncateFields() + rep.metricsCh.Push(m) +} + +func getMetricException(err awserr.Error) metricException { + msg := err.Error() + code := err.Code() + + switch code { + case "RequestError", + request.ErrCodeSerialization, + request.CanceledErrorCode: + return sdkException{ + requestException{exception: code, message: msg}, + } + default: + return awsException{ + requestException{exception: code, message: msg}, + } + } +} + +func (rep *Reporter) sendAPICallMetric(r *request.Request) { + if rep == nil { + return + } + + now := time.Now() + m := metric{ + ClientID: aws.String(rep.clientID), + API: aws.String(r.Operation.Name), + Service: aws.String(r.ClientInfo.ServiceID), + Timestamp: (*metricTime)(&now), + UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")), + Type: aws.String("ApiCall"), + AttemptCount: aws.Int(r.RetryCount + 1), + Region: r.Config.Region, + Latency: aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)), + XAmzRequestID: aws.String(r.RequestID), + MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())), + } + + if r.HTTPResponse != nil { + m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode) + } + + if r.Error != nil { + if awserr, ok := r.Error.(awserr.Error); ok { + m.SetFinalException(getMetricException(awserr)) + } + } + + m.TruncateFields() + + // TODO: Probably want to figure something out for logging dropped + // metrics + rep.metricsCh.Push(m) +} + +func (rep *Reporter) connect(network, url string) error { + if rep.conn != nil { + rep.conn.Close() + } + + conn, err := net.Dial(network, url) + if err != nil { + return awserr.New("UDPError", "Could not connect", err) + } + + rep.conn = conn + + return nil +} + +func (rep *Reporter) close() { + if rep.done != nil { + close(rep.done) + } + + rep.metricsCh.Pause() +} + +func (rep *Reporter) start() { + defer func() { + rep.metricsCh.Pause() + }() + + for { + select { + case <-rep.done: + rep.done = nil + return + case m := <-rep.metricsCh.ch: + // TODO: What to do with this error? Probably should just log + b, err := json.Marshal(m) + if err != nil { + continue + } + + rep.conn.Write(b) + } + } +} + +// Pause will pause the metric channel preventing any new metrics from being +// added. It is safe to call concurrently with other calls to Pause, but if +// called concurently with Continue can lead to unexpected state. +func (rep *Reporter) Pause() { + lock.Lock() + defer lock.Unlock() + + if rep == nil { + return + } + + rep.close() +} + +// Continue will reopen the metric channel and allow for monitoring to be +// resumed. It is safe to call concurrently with other calls to Continue, but +// if called concurently with Pause can lead to unexpected state. +func (rep *Reporter) Continue() { + lock.Lock() + defer lock.Unlock() + if rep == nil { + return + } + + if !rep.metricsCh.IsPaused() { + return + } + + rep.metricsCh.Continue() +} + +// Client side metric handler names +const ( + APICallMetricHandlerName = "awscsm.SendAPICallMetric" + APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric" +) + +// InjectHandlers will will enable client side metrics and inject the proper +// handlers to handle how metrics are sent. +// +// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers +// multiple times may lead to unexpected behavior, (e.g. duplicate metrics). +// +// // Start must be called in order to inject the correct handlers +// r, err := csm.Start("clientID", "127.0.0.1:8094") +// if err != nil { +// panic(fmt.Errorf("expected no error, but received %v", err)) +// } +// +// sess := session.NewSession() +// r.InjectHandlers(&sess.Handlers) +// +// // create a new service client with our client side metric session +// svc := s3.New(sess) +func (rep *Reporter) InjectHandlers(handlers *request.Handlers) { + if rep == nil { + return + } + + handlers.Complete.PushFrontNamed(request.NamedHandler{ + Name: APICallMetricHandlerName, + Fn: rep.sendAPICallMetric, + }) + + handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{ + Name: APICallAttemptMetricHandlerName, + Fn: rep.sendAPICallAttemptMetric, + }) +} + +// boolIntValue return 1 for true and 0 for false. +func boolIntValue(b bool) int { + if b { + return 1 + } + + return 0 +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 0ef55040..23bb639e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -9,11 +9,14 @@ package defaults import ( "fmt" + "net" "net/http" + "net/url" "os" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" @@ -21,6 +24,7 @@ import ( "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" ) // A Defaults provides a collection of default values for SDK clients. @@ -56,7 +60,6 @@ func Config() *aws.Config { WithMaxRetries(aws.UseServiceDefaultRetries). WithLogger(aws.NewDefaultLogger()). WithLogLevel(aws.LogOff). - WithSleepDelay(time.Sleep). WithEndpointResolver(endpoints.DefaultResolver()) } @@ -71,6 +74,7 @@ func Handlers() request.Handlers { handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) @@ -89,33 +93,102 @@ func Handlers() request.Handlers { func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { return credentials.NewCredentials(&credentials.ChainProvider{ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - RemoteCredProvider(*cfg, handlers), - }, + Providers: CredProviders(cfg, handlers), }) } -// RemoteCredProvider returns a credenitials provider for the default remote +// CredProviders returns the slice of providers used in +// the default credential chain. +// +// For applications that need to use some other provider (for example use +// different environment variables for legacy reasons) but still fall back +// on the default chain of providers. This allows that default chaint to be +// automatically updated +func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider { + return []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + } +} + +const ( + httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN" + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote // endpoints such as EC2 or ECS Roles. func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { - ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } - if len(ecsCredURI) > 0 { - return ecsCredProvider(cfg, handlers, ecsCredURI) + if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri) + return httpCredProvider(cfg, handlers, u) } return ec2RoleProvider(cfg, handlers) } -func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { - const host = `169.254.170.2` +var lookupHostFn = net.LookupHost + +func isLoopbackHost(host string) (bool, error) { + ip := net.ParseIP(host) + if ip != nil { + return ip.IsLoopback(), nil + } + + // Host is not an ip, perform lookup + addrs, err := lookupHostFn(host) + if err != nil { + return false, err + } + for _, addr := range addrs { + if !net.ParseIP(addr).IsLoopback() { + return false, nil + } + } + + return true, nil +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else { + host := aws.URLHostname(parsed) + if len(host) == 0 { + errMsg = "unable to parse host from local HTTP cred provider URL" + } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil { + errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, loopbackErr) + } else if !isLoopback { + errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback hosts are allowed.", host) + } + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} - return endpointcreds.NewProviderClient(cfg, handlers, - fmt.Sprintf("http://%s%s", host, uri), +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, func(p *endpointcreds.Provider) { p.ExpiryWindow = 5 * time.Minute + p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar) }, ) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go new file mode 100644 index 00000000..ca0ee1dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go @@ -0,0 +1,27 @@ +package defaults + +import ( + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return shareddefaults.SharedCredentialsFilename() +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return shareddefaults.SharedConfigFilename() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 00000000..4fcb6161 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 984407a5..2c8d5f56 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -4,12 +4,12 @@ import ( "encoding/json" "fmt" "net/http" - "path" "strings" "time" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkuri" ) // GetMetadata uses the path provided to request information from the EC2 @@ -19,13 +19,14 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { op := &request.Operation{ Name: "GetMetadata", HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), + HTTPPath: sdkuri.PathJoin("/meta-data", p), } output := &metadataOutput{} req := c.NewRequest(op, nil, output) + err := req.Send() - return output.Content, req.Send() + return output.Content, err } // GetUserData returns the userdata that was configured for the service. If @@ -35,7 +36,7 @@ func (c *EC2Metadata) GetUserData() (string, error) { op := &request.Operation{ Name: "GetUserData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "user-data"), + HTTPPath: "/user-data", } output := &metadataOutput{} @@ -45,8 +46,9 @@ func (c *EC2Metadata) GetUserData() (string, error) { r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) } }) + err := req.Send() - return output.Content, req.Send() + return output.Content, err } // GetDynamicData uses the path provided to request information from the EC2 @@ -56,13 +58,14 @@ func (c *EC2Metadata) GetDynamicData(p string) (string, error) { op := &request.Operation{ Name: "GetDynamicData", HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), + HTTPPath: sdkuri.PathJoin("/dynamic", p), } output := &metadataOutput{} req := c.NewRequest(op, nil, output) + err := req.Send() - return output.Content, req.Send() + return output.Content, err } // GetInstanceIdentityDocument retrieves an identity document describing an @@ -79,7 +82,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument doc := EC2InstanceIdentityDocument{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 instance identity document", err) } @@ -98,7 +101,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { info := EC2IAMInfo{} if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { return EC2IAMInfo{}, - awserr.New("SerializationError", + awserr.New(request.ErrCodeSerialization, "failed to decode EC2 IAM info", err) } @@ -118,6 +121,10 @@ func (c *EC2Metadata) Region() (string, error) { return "", err } + if len(resp) == 0 { + return "", awserr.New("EC2MetadataError", "invalid Region response", nil) + } + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 return resp[:len(resp)-1], nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index 5b4379db..f0c1d31e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -1,5 +1,10 @@ // Package ec2metadata provides the client for making API calls to the // EC2 Metadata service. +// +// This package's client can be disabled completely by setting the environment +// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to +// true instructs the SDK to disable the EC2 Metadata client. The client cannot +// be used while the environment variable is set to true, (case insensitive). package ec2metadata import ( @@ -7,17 +12,21 @@ import ( "errors" "io" "net/http" + "os" + "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/request" ) // ServiceName is the name of the service. const ServiceName = "ec2metadata" +const disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED" // A EC2Metadata is an EC2 Metadata service Client. type EC2Metadata struct { @@ -63,6 +72,7 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceName, Endpoint: endpoint, APIVersion: "latest", }, @@ -75,6 +85,24 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio svc.Handlers.Validate.Clear() svc.Handlers.Validate.PushBack(validateEndpointHandler) + // Disable the EC2 Metadata service if the environment variable is set. + // This shortcirctes the service's functionality to always fail to send + // requests. + if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" { + svc.Handlers.Send.SwapNamed(request.NamedHandler{ + Name: corehandlers.SendHandler.Name, + Fn: func(r *request.Request) { + r.HTTPResponse = &http.Response{ + Header: http.Header{}, + } + r.Error = awserr.New( + request.CanceledErrorCode, + "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var", + nil) + }, + }) + } + // Add additional options to the service config for _, option := range opts { option(svc.Client) @@ -95,7 +123,7 @@ func unmarshalHandler(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err) return } @@ -108,7 +136,7 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() b := &bytes.Buffer{} if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err) return } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go index 74f72de0..87b9ff3f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -84,6 +84,8 @@ func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resol custAddEC2Metadata(p) custAddS3DualStack(p) custRmIotDataService(p) + custFixAppAutoscalingChina(p) + custFixAppAutoscalingUsGov(p) } return ps, nil @@ -94,7 +96,12 @@ func custAddS3DualStack(p *partition) { return } - s, ok := p.Services["s3"] + custAddDualstack(p, "s3") + custAddDualstack(p, "s3-control") +} + +func custAddDualstack(p *partition, svcName string) { + s, ok := p.Services[svcName] if !ok { return } @@ -102,7 +109,7 @@ func custAddS3DualStack(p *partition) { s.Defaults.HasDualStack = boxedTrue s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" - p.Services["s3"] = s + p.Services[svcName] = s } func custAddEC2Metadata(p *partition) { @@ -122,6 +129,54 @@ func custRmIotDataService(p *partition) { delete(p.Services, "data.iot") } +func custFixAppAutoscalingChina(p *partition) { + if p.ID != "aws-cn" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + const expectHostname = `autoscaling.{region}.amazonaws.com` + if e, a := s.Defaults.Hostname, expectHostname; e != a { + fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a) + return + } + + s.Defaults.Hostname = expectHostname + ".cn" + p.Services[serviceName] = s +} + +func custFixAppAutoscalingUsGov(p *partition) { + if p.ID != "aws-us-gov" { + return + } + + const serviceName = "application-autoscaling" + s, ok := p.Services[serviceName] + if !ok { + return + } + + if a := s.Defaults.CredentialScope.Service; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a) + return + } + + if a := s.Defaults.Hostname; a != "" { + fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a) + return + } + + s.Defaults.CredentialScope.Service = "application-autoscaling" + s.Defaults.Hostname = "autoscaling.{region}.amazonaws.com" + + p.Services[serviceName] = s +} + type decodeModelError struct { awsError } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 5f0f6dd3..371dfa6f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. package endpoints @@ -15,6 +15,7 @@ const ( // AWS Standard partition's regions. const ( + ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). @@ -22,8 +23,10 @@ const ( ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). CaCentral1RegionID = "ca-central-1" // Canada (Central). EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuNorth1RegionID = "eu-north-1" // EU (Stockholm). EuWest1RegionID = "eu-west-1" // EU (Ireland). EuWest2RegionID = "eu-west-2" // EU (London). + EuWest3RegionID = "eu-west-3" // EU (Paris). SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). UsEast1RegionID = "us-east-1" // US East (N. Virginia). UsEast2RegionID = "us-east-2" // US East (Ohio). @@ -33,125 +36,33 @@ const ( // AWS China partition's regions. const ( - CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorth1RegionID = "cn-north-1" // China (Beijing). + CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). ) // AWS GovCloud (US) partition's regions. const ( + UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). ) -// Service identifiers -const ( - AcmServiceID = "acm" // Acm. - ApigatewayServiceID = "apigateway" // Apigateway. - ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. - Appstream2ServiceID = "appstream2" // Appstream2. - AutoscalingServiceID = "autoscaling" // Autoscaling. - BatchServiceID = "batch" // Batch. - BudgetsServiceID = "budgets" // Budgets. - ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. - CloudformationServiceID = "cloudformation" // Cloudformation. - CloudfrontServiceID = "cloudfront" // Cloudfront. - CloudhsmServiceID = "cloudhsm" // Cloudhsm. - CloudsearchServiceID = "cloudsearch" // Cloudsearch. - CloudtrailServiceID = "cloudtrail" // Cloudtrail. - CodebuildServiceID = "codebuild" // Codebuild. - CodecommitServiceID = "codecommit" // Codecommit. - CodedeployServiceID = "codedeploy" // Codedeploy. - CodepipelineServiceID = "codepipeline" // Codepipeline. - CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. - CognitoIdpServiceID = "cognito-idp" // CognitoIdp. - CognitoSyncServiceID = "cognito-sync" // CognitoSync. - ConfigServiceID = "config" // Config. - CurServiceID = "cur" // Cur. - DatapipelineServiceID = "datapipeline" // Datapipeline. - DevicefarmServiceID = "devicefarm" // Devicefarm. - DirectconnectServiceID = "directconnect" // Directconnect. - DiscoveryServiceID = "discovery" // Discovery. - DmsServiceID = "dms" // Dms. - DsServiceID = "ds" // Ds. - DynamodbServiceID = "dynamodb" // Dynamodb. - Ec2ServiceID = "ec2" // Ec2. - Ec2metadataServiceID = "ec2metadata" // Ec2metadata. - EcrServiceID = "ecr" // Ecr. - EcsServiceID = "ecs" // Ecs. - ElasticacheServiceID = "elasticache" // Elasticache. - ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. - ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. - ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. - ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. - ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. - EmailServiceID = "email" // Email. - EsServiceID = "es" // Es. - EventsServiceID = "events" // Events. - FirehoseServiceID = "firehose" // Firehose. - GameliftServiceID = "gamelift" // Gamelift. - GlacierServiceID = "glacier" // Glacier. - HealthServiceID = "health" // Health. - IamServiceID = "iam" // Iam. - ImportexportServiceID = "importexport" // Importexport. - InspectorServiceID = "inspector" // Inspector. - IotServiceID = "iot" // Iot. - KinesisServiceID = "kinesis" // Kinesis. - KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. - KmsServiceID = "kms" // Kms. - LambdaServiceID = "lambda" // Lambda. - LightsailServiceID = "lightsail" // Lightsail. - LogsServiceID = "logs" // Logs. - MachinelearningServiceID = "machinelearning" // Machinelearning. - MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. - MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. - MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. - MonitoringServiceID = "monitoring" // Monitoring. - MturkRequesterServiceID = "mturk-requester" // MturkRequester. - OpsworksServiceID = "opsworks" // Opsworks. - OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. - OrganizationsServiceID = "organizations" // Organizations. - PinpointServiceID = "pinpoint" // Pinpoint. - PollyServiceID = "polly" // Polly. - RdsServiceID = "rds" // Rds. - RedshiftServiceID = "redshift" // Redshift. - RekognitionServiceID = "rekognition" // Rekognition. - Route53ServiceID = "route53" // Route53. - Route53domainsServiceID = "route53domains" // Route53domains. - RuntimeLexServiceID = "runtime.lex" // RuntimeLex. - S3ServiceID = "s3" // S3. - SdbServiceID = "sdb" // Sdb. - ServicecatalogServiceID = "servicecatalog" // Servicecatalog. - ShieldServiceID = "shield" // Shield. - SmsServiceID = "sms" // Sms. - SnowballServiceID = "snowball" // Snowball. - SnsServiceID = "sns" // Sns. - SqsServiceID = "sqs" // Sqs. - SsmServiceID = "ssm" // Ssm. - StatesServiceID = "states" // States. - StoragegatewayServiceID = "storagegateway" // Storagegateway. - StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. - StsServiceID = "sts" // Sts. - SupportServiceID = "support" // Support. - SwfServiceID = "swf" // Swf. - WafServiceID = "waf" // Waf. - WafRegionalServiceID = "waf-regional" // WafRegional. - WorkdocsServiceID = "workdocs" // Workdocs. - WorkspacesServiceID = "workspaces" // Workspaces. - XrayServiceID = "xray" // Xray. -) - // DefaultResolver returns an Endpoint resolver that will be able // to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). // -// Casting the return value of this func to a EnumPartitions will -// allow you to get a list of the partitions in the order the endpoints -// will be resolved in. +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). // -// resolver := endpoints.DefaultResolver() -// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// partitions := endpoints.DefaultPartitions // for _, p := range partitions { // // ... inspect partitions // } -func DefaultResolver() Resolver { - return defaultPartitions +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() } var defaultPartitions = partitions{ @@ -181,6 +92,9 @@ var awsPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ + "ap-east-1": region{ + Description: "Asia Pacific (Hong Kong)", + }, "ap-northeast-1": region{ Description: "Asia Pacific (Tokyo)", }, @@ -202,12 +116,18 @@ var awsPartition = partition{ "eu-central-1": region{ Description: "EU (Frankfurt)", }, + "eu-north-1": region{ + Description: "EU (Stockholm)", + }, "eu-west-1": region{ Description: "EU (Ireland)", }, "eu-west-2": region{ Description: "EU (London)", }, + "eu-west-3": region{ + Description: "EU (Paris)", + }, "sa-east-1": region{ Description: "South America (Sao Paulo)", }, @@ -225,9 +145,16 @@ var awsPartition = partition{ }, }, Services: services{ + "a4b": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "acm": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -235,8 +162,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -244,65 +173,159 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "apigateway": service{ - + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "application-autoscaling": service{ - Defaults: endpoint{ - Hostname: "autoscaling.{region}.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Service: "application-autoscaling", + "api.ecr": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "api.ecr.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{ + Hostname: "api.ecr.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "api.ecr.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "api.ecr.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "api.ecr.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "api.ecr.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "api.ecr.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "api.ecr.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "api.ecr.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "api.ecr.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "api.ecr.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "api.ecr.eu-west-3.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "api.ecr.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.ecr.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "api.ecr.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "api.ecr.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "api.ecr.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, }, }, + }, + "api.mediatailor": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "appstream2": service{ + "api.pricing": service{ Defaults: endpoint{ - Protocols: []string{"https"}, CredentialScope: credentialScope{ - Service: "appstream", + Service: "pricing", }, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, }, }, - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "api.sagemaker": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -313,46 +336,68 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "batch": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "budgets": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "budgets.amazonaws.com", + "us-east-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "clouddirectory": service{ + "apigateway": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cloudformation": service{ - + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -360,8 +405,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -369,37 +416,31 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "cloudfront": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "cloudfront.amazonaws.com", - Protocols: []string{"http", "https"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "cloudhsm": service{ + "appmesh": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cloudsearch": service{ - + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -407,13 +448,11 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cloudtrail": service{ + "appsync": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -421,36 +460,15 @@ var awsPartition = partition{ "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codebuild": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codecommit": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "codedeploy": service{ + "athena": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -460,74 +478,174 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "codepipeline": service{ - + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cognito-identity": service{ - + "autoscaling-plans": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "autoscaling-plans", + }, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cognito-idp": service{ + "backup": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "cognito-sync": service{ + "batch": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "config": service{ + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "ce": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "ce.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "chime": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "service.chime.aws.amazon.com", + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloud9": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -535,8 +653,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -544,31 +664,43 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "cur": service{ + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "us-east-1": endpoint{}, + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, }, }, - "datapipeline": service{ + "cloudhsm": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "devicefarm": service{ - - Endpoints: endpoints{ - "us-west-2": endpoint{}, + "cloudhsmv2": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, }, - }, - "directconnect": service{ - Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -576,24 +708,35 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "discovery": service{ + "cloudsearch": service{ Endpoints: endpoints{ - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "dms": service{ + "cloudtrail": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -601,8 +744,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -610,22 +755,53 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ds": service{ + "codebuild": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "codebuild-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codebuild-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codebuild-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codebuild-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "codecommit": service{ + Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -636,11 +812,11 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "codecommit-fips.ca-central-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Region: "ca-central-1", }, }, "sa-east-1": endpoint{}, @@ -650,11 +826,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "codedeploy": service{ + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -662,46 +837,66 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, + "us-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "codedeploy-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, }, }, }, - "ecr": service{ + "codepipeline": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "ecs": service{ + "codestar": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, @@ -714,7 +909,7 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "elasticache": service{ + "cognito-identity": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -726,14 +921,12 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticbeanstalk": service{ + "cognito-idp": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, @@ -745,100 +938,70 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticfilesystem": service{ + "cognito-sync": service{ - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "elasticmapreduce": service{ + "comprehend": service{ Defaults: endpoint{ - SSLCommonName: "{region}.{service}.{dnsSuffix}", - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, - "eu-central-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{region}.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "elastictranscoder": service{ + "comprehendmedical": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, - "us-west-1": endpoint{}, + "us-east-2": endpoint{}, "us-west-2": endpoint{}, }, }, - "email": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "es": service{ + "config": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -846,18 +1009,59 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "events": service{ + "cur": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "data.mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datasync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dax": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -865,115 +1069,201 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "firehose": service{ + "devicefarm": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "gamelift": service{ + "directconnect": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, }, + }, + "dms": service{ + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "health": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "iam": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "docdb": service{ Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "iam.amazonaws.com", + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - }, - }, - "importexport": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "importexport.amazonaws.com", - SignatureVersions: []string{"v2", "v4"}, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", - Service: "IngestionService", + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", }, }, }, }, - "inspector": service{ + "ds": service{ Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "iot": service{ + "dynamodb": service{ Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "execute-api", - }, + Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, }, }, - "kinesis": service{ - + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -981,8 +1271,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -990,17 +1282,21 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "kinesisanalytics": service{ + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, }, }, - "kms": service{ + "ecs": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1008,8 +1304,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1017,31 +1315,38 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "lambda": service{ + "elasticache": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "lightsail": service{ - - Endpoints: endpoints{ + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "elasticache-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "logs": service{ + "elasticbeanstalk": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1049,8 +1354,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1058,25 +1365,8 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "machinelearning": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - }, - }, - "marketplacecommerceanalytics": service{ + "elasticfilesystem": service{ - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "metering.marketplace": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "aws-marketplace", - }, - }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1087,24 +1377,19 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-3": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "mobileanalytics": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "monitoring": service{ + "elasticloadbalancing": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1112,8 +1397,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1121,77 +1408,73 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "mturk-requester": service{ - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "sandbox": endpoint{ - Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", - }, - "us-east-1": endpoint{}, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"https"}, }, - }, - "opsworks": service{ - Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "opsworks-cm": service{ + "elastictranscoder": service{ Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "organizations": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "email": service{ Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "organizations.us-east-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "pinpoint": service{ + "entitlement.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ - Service: "mobiletargeting", + Service: "aws-marketplace", }, }, Endpoints: endpoints{ "us-east-1": endpoint{}, }, }, - "polly": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "rds": service{ + "es": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1199,20 +1482,27 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "{service}.{dnsSuffix}", + "eu-west-3": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "redshift": service{ + "events": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1220,8 +1510,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1229,172 +1521,83 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "rekognition": service{ - - Endpoints: endpoints{ - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, - }, - }, - "route53": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, - - Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "route53.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - }, - }, - "route53domains": service{ - - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "runtime.lex": service{ - Defaults: endpoint{ - CredentialScope: credentialScope{ - Service: "lex", - }, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "s3": service{ - PartitionEndpoint: "us-east-1", - IsRegionalized: boxedTrue, - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, + "firehose": service{ - HasDualStack: boxedTrue, - DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", - }, Endpoints: endpoints{ - "ap-northeast-1": endpoint{ - Hostname: "s3-ap-northeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, + "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{ - Hostname: "s3-ap-southeast-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ap-southeast-2": endpoint{ - Hostname: "s3-ap-southeast-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{ - Hostname: "s3-eu-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "eu-west-2": endpoint{}, - "s3-external-1": endpoint{ - Hostname: "s3-external-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - CredentialScope: credentialScope{ - Region: "us-east-1", - }, - }, - "sa-east-1": endpoint{ - Hostname: "s3-sa-east-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-1": endpoint{ - Hostname: "s3.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{ - Hostname: "s3-us-west-1.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - "us-west-2": endpoint{ - Hostname: "s3-us-west-2.amazonaws.com", - SignatureVersions: []string{"s3", "s3v4"}, - }, - }, - }, - "sdb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"v2"}, - }, - Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - Hostname: "sdb.amazonaws.com", - }, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "servicecatalog": service{ - + "fms": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "shield": service{ - IsRegionalized: boxedFalse, - Defaults: endpoint{ - SSLCommonName: "Shield.us-east-1.amazonaws.com", - Protocols: []string{"https"}, - }, - Endpoints: endpoints{ - "us-east-1": endpoint{}, - }, - }, - "sms": service{ + "fsx": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "snowball": service{ + "gamelift": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "sns": service{ + "glacier": service{ Defaults: endpoint{ Protocols: []string{"http", "https"}, }, Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1402,8 +1605,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1411,12 +1616,10 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, - }, + "glue": service{ + Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1424,55 +1627,60 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, - "us-east-1": endpoint{ - SSLCommonName: "queue.{dnsSuffix}", - }, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, - "ssm": service{ - + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, + "eu-west-2": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, - "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, - "states": service{ + "groundstation": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-2": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "storagegateway": service{ - + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1480,61 +1688,137 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "http", "https", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", - }, - }, + "health": service{ + Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "ca-central-1": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "eu-west-2": endpoint{}, - "local": endpoint{ - Hostname: "localhost:8000", - Protocols: []string{"http"}, + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", CredentialScope: credentialScope{ Region: "us-east-1", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, }, }, - "sts": service{ + "importexport": service{ PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ Defaults: endpoint{ - Hostname: "sts.amazonaws.com", CredentialScope: credentialScope{ - Region: "us-east-1", + Service: "execute-api", }, }, Endpoints: endpoints{ "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{ - Hostname: "sts.ap-northeast-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-northeast-2", - }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotanalytics": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iotthingsgraph": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "iotthingsgraph", }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kafka": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-1": endpoint{}, "ap-southeast-2": endpoint{}, - "aws-global": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1542,15 +1826,36 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "support": service{ + "kinesisanalytics": service{ Endpoints: endpoints{ - "us-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, }, }, - "swf": service{ + "kinesisvideo": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1558,8 +1863,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1567,83 +1874,2112 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, - "waf": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "lambda": service{ Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "waf.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-1", - }, + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "mediaconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "medialive": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediapackage": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mediastore": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mgh": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mq": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "neptune": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "rds.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "rds.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "rds.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "rds.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "rds.ap-southeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "eu-central-1": endpoint{ + Hostname: "rds.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "rds.eu-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "rds.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "rds.eu-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "us-east-1": endpoint{ + Hostname: "rds.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "rds.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{ + Hostname: "rds.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "projects.iot1click": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ram": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "resource-groups": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "robomaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "route53resolver": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "runtime.sagemaker": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{ + Hostname: "s3.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-control.ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + "ap-northeast-2": endpoint{ + Hostname: "s3-control.ap-northeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{ + Hostname: "s3-control.ap-south-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, + "ap-southeast-1": endpoint{ + Hostname: "s3-control.ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-control.ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ap-southeast-2", + }, + }, + "ca-central-1": endpoint{ + Hostname: "s3-control.ca-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{ + Hostname: "s3-control.eu-central-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + "eu-north-1": endpoint{ + Hostname: "s3-control.eu-north-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-north-1", + }, + }, + "eu-west-1": endpoint{ + Hostname: "s3-control.eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "eu-west-2": endpoint{ + Hostname: "s3-control.eu-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-2", + }, + }, + "eu-west-3": endpoint{ + Hostname: "s3-control.eu-west-3.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "eu-west-3", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-control.sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "s3-control.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{ + Hostname: "s3-control.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-east-2-fips": endpoint{ + Hostname: "s3-control-fips.us-east-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{ + Hostname: "s3-control.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{ + Hostname: "s3-control.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "us-west-2-fips": endpoint{ + Hostname: "s3-control-fips.us-west-2.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "secretsmanager-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "securityhub": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-northeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ap-south-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-1": endpoint{ + Protocols: []string{"https"}, + }, + "ap-southeast-2": endpoint{ + Protocols: []string{"https"}, + }, + "ca-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-central-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-north-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-2": endpoint{ + Protocols: []string{"https"}, + }, + "eu-west-3": endpoint{ + Protocols: []string{"https"}, + }, + "sa-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-east-2": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-1": endpoint{ + Protocols: []string{"https"}, + }, + "us-west-2": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "servicecatalog-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "servicediscovery": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "sqs-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "sqs-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "sqs-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "sqs-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "ca-central-1-fips": endpoint{ + Hostname: "dynamodb-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + }, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "dynamodb-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-east-1": endpoint{ + Hostname: "sts.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + PartitionEndpoint: "aws-global", + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "support.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "transfer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "translate-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "translate-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "translate-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workmail": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-east-1": endpoint{}, + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + "cn-northwest-1": region{ + Description: "China (Ningxia)", + }, + }, + Services: services{ + "api.ecr": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "api.ecr.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "license-manager": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "mediaconvert": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{ + Hostname: "s3-control.cn-north-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + "cn-northwest-1": endpoint{ + Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", }, }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, }, - "waf-regional": service{ + "sts": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, - "workdocs": service{ + "support": service{ + PartitionEndpoint: "aws-cn-global", Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "aws-cn-global": endpoint{ + Hostname: "support.cn-north-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, }, }, - "workspaces": service{ + "swf": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "us-east-1": endpoint{}, - "us-west-2": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, - "xray": service{ + "tagging": service{ Endpoints: endpoints{ - "ap-northeast-1": endpoint{}, - "ap-northeast-2": endpoint{}, - "ap-south-1": endpoint{}, - "ap-southeast-1": endpoint{}, - "ap-southeast-2": endpoint{}, - "eu-central-1": endpoint{}, - "eu-west-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, }, }, }, } -// AwsCnPartition returns the Resolver for AWS China. -func AwsCnPartition() Partition { - return awscnPartition.Partition() +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() } -var awscnPartition = partition{ - ID: "aws-cn", - Name: "AWS China", - DNSSuffix: "amazonaws.com.cn", +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", RegionRegex: regionRegex{ Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") return reg }(), }, @@ -1653,376 +3989,489 @@ var awscnPartition = partition{ SignatureVersions: []string{"v4"}, }, Regions: regions{ - "cn-north-1": region{ - Description: "China (Beijing)", + "us-gov-east-1": region{ + Description: "AWS GovCloud (US-East)", + }, + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", }, }, Services: services{ - "autoscaling": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "acm": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "cloudformation": service{ - + "acm-pca": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "cloudtrail": service{ + "api.ecr": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{ + Hostname: "api.ecr.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "api.ecr.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "config": service{ + "api.sagemaker": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "directconnect": service{ + "apigateway": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "dynamodb": service{ + "application-autoscaling": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + Hostname: "autoscaling.{region}.amazonaws.com", + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "ec2": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "athena": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "autoscaling": service{ Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, }, }, }, - "elasticache": service{ + "clouddirectory": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "elasticbeanstalk": service{ + "cloudformation": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "elasticloadbalancing": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - }, + "cloudhsm": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "elasticmapreduce": service{ + "cloudhsmv2": service{ Defaults: endpoint{ - Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "cloudhsm", + }, }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "events": service{ + "cloudtrail": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "glacier": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + "codebuild": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, + }, + "codecommit": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "iam": service{ - PartitionEndpoint: "aws-cn-global", - IsRegionalized: boxedFalse, + "codedeploy": service{ Endpoints: endpoints{ - "aws-cn-global": endpoint{ - Hostname: "iam.cn-north-1.amazonaws.com.cn", + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ - Region: "cn-north-1", + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", }, }, }, }, - "kinesis": service{ - + "comprehend": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "logs": service{ + "config": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "monitoring": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + "datasync": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, }, + }, + "directconnect": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "rds": service{ + "dms": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "redshift": service{ + "ds": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "s3": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, - SignatureVersions: []string{"s3v4"}, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, + }, + "ec2": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "sns": service{ - Defaults: endpoint{ - Protocols: []string{"http", "https"}, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, }, + }, + "ecs": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "sqs": service{ - Defaults: endpoint{ - SSLCommonName: "{region}.queue.{dnsSuffix}", - Protocols: []string{"http", "https"}, + "elasticache": service{ + + Endpoints: endpoints{ + "fips": endpoint{ + Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, + }, + "elasticbeanstalk": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "storagegateway": service{ + "elasticfilesystem": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "streams.dynamodb": service{ - Defaults: endpoint{ - Protocols: []string{"http", "http", "https", "https"}, - CredentialScope: credentialScope{ - Service: "dynamodb", + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, }, }, + }, + "elasticmapreduce": service{ + Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, }, }, - "sts": service{ + "es": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "fips": endpoint{ + Hostname: "es-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "swf": service{ + "events": service{ Endpoints: endpoints{ - "cn-north-1": endpoint{}, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - }, -} - -// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). -func AwsUsGovPartition() Partition { - return awsusgovPartition.Partition() -} + "firehose": service{ -var awsusgovPartition = partition{ - ID: "aws-us-gov", - Name: "AWS GovCloud (US)", - DNSSuffix: "amazonaws.com", - RegionRegex: regionRegex{ - Regexp: func() *regexp.Regexp { - reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") - return reg - }(), - }, - Defaults: endpoint{ - Hostname: "{service}.{region}.{dnsSuffix}", - Protocols: []string{"https"}, - SignatureVersions: []string{"v4"}, - }, - Regions: regions{ - "us-gov-west-1": region{ - Description: "AWS GovCloud (US)", + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, }, - }, - Services: services{ - "autoscaling": service{ + "glacier": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, }, }, - "cloudformation": service{ + "glue": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "cloudhsm": service{ - + "greengrass": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, - "cloudtrail": service{ - + "guardduty": service{ + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"https"}, + }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, - "config": service{ + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, Endpoints: endpoints{ - "us-gov-west-1": endpoint{}, + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, - "directconnect": service{ + "inspector": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "dynamodb": service{ - + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, - "ec2": service{ + "kinesis": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "ec2metadata": service{ - PartitionEndpoint: "aws-global", - IsRegionalized: boxedFalse, + "kms": service{ Endpoints: endpoints{ - "aws-global": endpoint{ - Hostname: "169.254.169.254/latest", - Protocols: []string{"http"}, + "ProdFips": endpoint{ + Hostname: "kms-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "elasticache": service{ + "lambda": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "elasticloadbalancing": service{ + "license-manager": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "elasticmapreduce": service{ + "logs": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, - }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, }, }, - "glacier": service{ + "mediaconvert": service{ Endpoints: endpoints{ - "us-gov-west-1": endpoint{ - Protocols: []string{"http", "https"}, + "us-gov-west-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", }, }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, }, - "iam": service{ + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "organizations": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, Endpoints: endpoints{ "aws-us-gov-global": endpoint{ - Hostname: "iam.us-gov.amazonaws.com", + Hostname: "organizations.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, }, }, - "kinesis": service{ + "polly": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, - "kms": service{ + "ram": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, - "logs": service{ + "rds": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "monitoring": service{ + "redshift": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, - "rds": service{ + "rekognition": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, }, - "redshift": service{ + "runtime.sagemaker": service{ Endpoints: endpoints{ "us-gov-west-1": endpoint{}, @@ -2039,21 +4488,92 @@ var awsusgovPartition = partition{ Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{ + Hostname: "s3.us-gov-east-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, "us-gov-west-1": endpoint{ - Hostname: "s3-us-gov-west-1.amazonaws.com", + Hostname: "s3.us-gov-west-1.amazonaws.com", Protocols: []string{"http", "https"}, }, }, }, + "s3-control": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "us-gov-east-1": endpoint{ + Hostname: "s3-control.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-control.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1-fips": endpoint{ + Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com", + SignatureVersions: []string{"s3v4"}, + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "serverlessrepo": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"https"}, + }, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "sns": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Protocols: []string{"http", "https"}, }, @@ -2062,12 +4582,33 @@ var awsusgovPartition = partition{ "sqs": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ SSLCommonName: "{region}.queue.{dnsSuffix}", Protocols: []string{"http", "https"}, }, }, }, + "ssm": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, "streams.dynamodb": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2075,17 +4616,65 @@ var awsusgovPartition = partition{ }, }, Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "dynamodb.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "sts": service{ Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, }, "swf": service{ + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, + "translate": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + "us-gov-west-1-fips": endpoint{ + Hostname: "translate-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "workspaces": service{ + Endpoints: endpoints{ "us-gov-west-1": endpoint{}, }, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go new file mode 100644 index 00000000..ca8fc828 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go @@ -0,0 +1,141 @@ +package endpoints + +// Service identifiers +// +// Deprecated: Use client package's EndpointsID value instead of these +// ServiceIDs. These IDs are not maintained, and are out of date. +const ( + A4bServiceID = "a4b" // A4b. + AcmServiceID = "acm" // Acm. + AcmPcaServiceID = "acm-pca" // AcmPca. + ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor. + ApiPricingServiceID = "api.pricing" // ApiPricing. + ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AppsyncServiceID = "appsync" // Appsync. + AthenaServiceID = "athena" // Athena. + AutoscalingServiceID = "autoscaling" // Autoscaling. + AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + CeServiceID = "ce" // Ce. + ChimeServiceID = "chime" // Chime. + Cloud9ServiceID = "cloud9" // Cloud9. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ComprehendServiceID = "comprehend" // Comprehend. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DaxServiceID = "dax" // Dax. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + FmsServiceID = "fms" // Fms. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + GlueServiceID = "glue" // Glue. + GreengrassServiceID = "greengrass" // Greengrass. + GuarddutyServiceID = "guardduty" // Guardduty. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + IotanalyticsServiceID = "iotanalytics" // Iotanalytics. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MediaconvertServiceID = "mediaconvert" // Mediaconvert. + MedialiveServiceID = "medialive" // Medialive. + MediapackageServiceID = "mediapackage" // Mediapackage. + MediastoreServiceID = "mediastore" // Mediastore. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MghServiceID = "mgh" // Mgh. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + NeptuneServiceID = "neptune" // Neptune. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + ResourceGroupsServiceID = "resource-groups" // ResourceGroups. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker. + S3ServiceID = "s3" // S3. + S3ControlServiceID = "s3-control" // S3Control. + SagemakerServiceID = "api.sagemaker" // Sagemaker. + SdbServiceID = "sdb" // Sdb. + SecretsmanagerServiceID = "secretsmanager" // Secretsmanager. + ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ServicediscoveryServiceID = "servicediscovery" // Servicediscovery. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + TransferServiceID = "transfer" // Transfer. + TranslateServiceID = "translate" // Translate. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkmailServiceID = "workmail" // Workmail. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go index a0e9bc45..84316b92 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -21,12 +21,12 @@ // partitions := resolver.(endpoints.EnumPartitions).Partitions() // // for _, p := range partitions { -// fmt.Println("Regions for", p.Name) +// fmt.Println("Regions for", p.ID()) // for id, _ := range p.Regions() { // fmt.Println("*", id) // } // -// fmt.Println("Services for", p.Name) +// fmt.Println("Services for", p.ID()) // for id, _ := range p.Services() { // fmt.Println("*", id) // } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go index 37e19ab0..f82babf6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -35,7 +35,7 @@ type Options struct { // // If resolving an endpoint on the partition list the provided region will // be used to determine which partition's domain name pattern to the service - // endpoint ID with. If both the service and region are unkonwn and resolving + // endpoint ID with. If both the service and region are unknown and resolving // the endpoint on partition list an UnknownEndpointError error will be returned. // // If resolving and endpoint on a partition specific resolver that partition's @@ -124,6 +124,49 @@ type EnumPartitions interface { Partitions() []Partition } +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + // A Partition provides the ability to enumerate the partition's regions // and services. type Partition struct { @@ -132,7 +175,7 @@ type Partition struct { } // ID returns the identifier of the partition. -func (p *Partition) ID() string { return p.id } +func (p Partition) ID() string { return p.id } // EndpointFor attempts to resolve the endpoint based on service and region. // See Options for information on configuring how the endpoint is resolved. @@ -155,18 +198,19 @@ func (p *Partition) ID() string { return p.id } // Errors that can be returned. // * UnknownServiceError // * UnknownEndpointError -func (p *Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return p.p.EndpointFor(service, region, opts...) } // Regions returns a map of Regions indexed by their ID. This is useful for // enumerating over the regions in a partition. -func (p *Partition) Regions() map[string]Region { +func (p Partition) Regions() map[string]Region { rs := map[string]Region{} - for id := range p.p.Regions { + for id, r := range p.p.Regions { rs[id] = Region{ - id: id, - p: p.p, + id: id, + desc: r.Description, + p: p.p, } } @@ -175,7 +219,7 @@ func (p *Partition) Regions() map[string]Region { // Services returns a map of Service indexed by their ID. This is useful for // enumerating over the services in a partition. -func (p *Partition) Services() map[string]Service { +func (p Partition) Services() map[string]Service { ss := map[string]Service{} for id := range p.p.Services { ss[id] = Service{ @@ -195,16 +239,20 @@ type Region struct { } // ID returns the region's identifier. -func (r *Region) ID() string { return r.id } +func (r Region) ID() string { return r.id } + +// Description returns the region's description. The region description +// is free text, it can be empty, and it may change between SDK releases. +func (r Region) Description() string { return r.desc } // ResolveEndpoint resolves an endpoint from the context of the region given // a service. See Partition.EndpointFor for usage and errors that can be returned. -func (r *Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { return r.p.EndpointFor(service, r.id, opts...) } // Services returns a list of all services that are known to be in this region. -func (r *Region) Services() map[string]Service { +func (r Region) Services() map[string]Service { ss := map[string]Service{} for id, s := range r.p.Services { if _, ok := s.Endpoints[r.id]; ok { @@ -226,17 +274,39 @@ type Service struct { } // ID returns the identifier for the service. -func (s *Service) ID() string { return s.id } +func (s Service) ID() string { return s.id } // ResolveEndpoint resolves an endpoint from the context of a service given // a region. See Partition.EndpointFor for usage and errors that can be returned. -func (s *Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { return s.p.EndpointFor(s.id, region, opts...) } +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if r, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + desc: r.Description, + p: s.p, + } + } + } + + return rs +} + // Endpoints returns a map of Endpoints indexed by their ID for all known // endpoints for a service. -func (s *Service) Endpoints() map[string]Endpoint { +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { es := map[string]Endpoint{} for id := range s.p.Services[s.id].Endpoints { es[id] = Endpoint{ @@ -259,15 +329,15 @@ type Endpoint struct { } // ID returns the identifier for an endpoint. -func (e *Endpoint) ID() string { return e.id } +func (e Endpoint) ID() string { return e.id } // ServiceID returns the identifier the endpoint belongs to. -func (e *Endpoint) ServiceID() string { return e.serviceID } +func (e Endpoint) ServiceID() string { return e.serviceID } // ResolveEndpoint resolves an endpoint from the context of a service and // region the endpoint represents. See Partition.EndpointFor for usage and // errors that can be returned. -func (e *Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { return e.p.EndpointFor(e.serviceID, e.id, opts...) } @@ -283,6 +353,10 @@ type ResolvedEndpoint struct { // The service name that should be used for signing requests. SigningName string + // States that the signing name for this endpoint was derived from metadata + // passed in, but was not explicitly modeled. + SigningNameDerived bool + // The signing method that should be used for signing requests. SigningMethod string } @@ -300,28 +374,6 @@ type EndpointNotFoundError struct { Region string } -//// NewEndpointNotFoundError builds and returns NewEndpointNotFoundError. -//func NewEndpointNotFoundError(p, s, r string) EndpointNotFoundError { -// return EndpointNotFoundError{ -// awsError: awserr.New("EndpointNotFoundError", "unable to find endpoint", nil), -// Partition: p, -// Service: s, -// Region: r, -// } -//} -// -//// Error returns string representation of the error. -//func (e EndpointNotFoundError) Error() string { -// extra := fmt.Sprintf("partition: %q, service: %q, region: %q", -// e.Partition, e.Service, e.Region) -// return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) -//} -// -//// String returns the string representation of the error. -//func (e EndpointNotFoundError) String() string { -// return e.Error() -//} - // A UnknownServiceError is returned when the service does not resolve to an // endpoint. Includes a list of all known services for the partition. Returned // when a partition does not support the service. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go index 13d968a2..ff6f76db 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -226,16 +226,20 @@ func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, op if len(signingRegion) == 0 { signingRegion = region } + signingName := e.CredentialScope.Service + var signingNameDerived bool if len(signingName) == 0 { signingName = service + signingNameDerived = true } return ResolvedEndpoint{ - URL: u, - SigningRegion: signingRegion, - SigningName: signingName, - SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningNameDerived: signingNameDerived, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go index 1e7369db..0fdfcc56 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -16,6 +16,10 @@ import ( type CodeGenOptions struct { // Options for how the model will be decoded. DecodeModelOptions DecodeModelOptions + + // Disables code generation of the service endpoint prefix IDs defined in + // the model. + DisableGenerateServiceIDs bool } // Set combines all of the option functions together @@ -39,8 +43,16 @@ func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGe return err } + v := struct { + Resolver + CodeGenOptions + }{ + Resolver: resolver, + CodeGenOptions: opts, + } + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) - if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil { return fmt.Errorf("failed to execute template, %v", err) } @@ -158,7 +170,7 @@ var funcMap = template.FuncMap{ const v3Tmpl = ` {{ define "defaults" -}} -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. package endpoints @@ -166,15 +178,17 @@ import ( "regexp" ) - {{ template "partition consts" . }} + {{ template "partition consts" $.Resolver }} - {{ range $_, $partition := . }} + {{ range $_, $partition := $.Resolver }} {{ template "partition region consts" $partition }} {{ end }} - {{ template "service consts" . }} + {{ if not $.DisableGenerateServiceIDs -}} + {{ template "service consts" $.Resolver }} + {{- end }} - {{ template "endpoint resolvers" . }} + {{ template "endpoint resolvers" $.Resolver }} {{- end }} {{ define "partition consts" }} @@ -209,17 +223,20 @@ import ( // DefaultResolver returns an Endpoint resolver that will be able // to resolve endpoints for: {{ ListPartitionNames . }}. // - // Casting the return value of this func to a EnumPartitions will - // allow you to get a list of the partitions in the order the endpoints - // will be resolved in. + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. // - // resolver := endpoints.DefaultResolver() - // partitions := resolver.(endpoints.EnumPartitions).Partitions() + // partitions := endpoints.DefaultPartitions // for _, p := range partitions { // // ... inspect partitions // } - func DefaultResolver() Resolver { - return defaultPartitions + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() } var defaultPartitions = partitions{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go index 57663616..fa06f7a8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -5,13 +5,9 @@ import "github.com/aws/aws-sdk-go/aws/awserr" var ( // ErrMissingRegion is an error that is returned if region configuration is // not found. - // - // @readonly ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) // ErrMissingEndpoint is an error that is returned if an endpoint cannot be // resolved for a service. - // - // @readonly ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) ) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 00000000..91a6f277 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go index db87188e..6ed15b2e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType { // Matches returns true if the v LogLevel is enabled by this LogLevel. Should be // used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. +// LogLevel is nil, will default to LogOff comparison. func (l *LogLevelType) Matches(v LogLevelType) bool { c := l.Value() return c&v == v } // AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default +// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default // to LogOff comparison. func (l *LogLevelType) AtLeast(v LogLevelType) bool { c := l.Value() @@ -71,6 +71,12 @@ const ( // LogDebugWithRequestErrors states the SDK should log when service requests fail // to build, send, validate, or unmarshal. LogDebugWithRequestErrors + + // LogDebugWithEventStreamBody states the SDK should log EventStream + // request and response bodys. This should be used to log the EventStream + // wire unmarshaled message content of requests and responses made while + // using the SDK Will also enable LogDebug. + LogDebugWithEventStreamBody ) // A Logger is a minimalistic interface for the SDK to log messages to. Should diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 00000000..d9b37f4d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,18 @@ +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + if strings.Contains(err.Error(), "read: connection reset") { + return false + } + + if strings.Contains(err.Error(), "connection reset") || + strings.Contains(err.Error(), "broken pipe") { + return true + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go index 5279c19c..627ec722 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -14,10 +14,13 @@ type Handlers struct { Send HandlerList ValidateResponse HandlerList Unmarshal HandlerList + UnmarshalStream HandlerList UnmarshalMeta HandlerList UnmarshalError HandlerList Retry HandlerList AfterRetry HandlerList + CompleteAttempt HandlerList + Complete HandlerList } // Copy returns of this handler's lists. @@ -29,10 +32,13 @@ func (h *Handlers) Copy() Handlers { Send: h.Send.copy(), ValidateResponse: h.ValidateResponse.copy(), Unmarshal: h.Unmarshal.copy(), + UnmarshalStream: h.UnmarshalStream.copy(), UnmarshalError: h.UnmarshalError.copy(), UnmarshalMeta: h.UnmarshalMeta.copy(), Retry: h.Retry.copy(), AfterRetry: h.AfterRetry.copy(), + CompleteAttempt: h.CompleteAttempt.copy(), + Complete: h.Complete.copy(), } } @@ -43,11 +49,59 @@ func (h *Handlers) Clear() { h.Send.Clear() h.Sign.Clear() h.Unmarshal.Clear() + h.UnmarshalStream.Clear() h.UnmarshalMeta.Clear() h.UnmarshalError.Clear() h.ValidateResponse.Clear() h.Retry.Clear() h.AfterRetry.Clear() + h.CompleteAttempt.Clear() + h.Complete.Clear() +} + +// IsEmpty returns if there are no handlers in any of the handlerlists. +func (h *Handlers) IsEmpty() bool { + if h.Validate.Len() != 0 { + return false + } + if h.Build.Len() != 0 { + return false + } + if h.Send.Len() != 0 { + return false + } + if h.Sign.Len() != 0 { + return false + } + if h.Unmarshal.Len() != 0 { + return false + } + if h.UnmarshalStream.Len() != 0 { + return false + } + if h.UnmarshalMeta.Len() != 0 { + return false + } + if h.UnmarshalError.Len() != 0 { + return false + } + if h.ValidateResponse.Len() != 0 { + return false + } + if h.Retry.Len() != 0 { + return false + } + if h.AfterRetry.Len() != 0 { + return false + } + if h.CompleteAttempt.Len() != 0 { + return false + } + if h.Complete.Len() != 0 { + return false + } + + return true } // A HandlerListRunItem represents an entry in the HandlerList which @@ -85,13 +139,17 @@ func (l *HandlerList) copy() HandlerList { n := HandlerList{ AfterEachFn: l.AfterEachFn, } - n.list = append([]NamedHandler{}, l.list...) + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) return n } // Clear clears the handler list. func (l *HandlerList) Clear() { - l.list = []NamedHandler{} + l.list = l.list[0:0] } // Len returns the number of handlers in the list. @@ -101,33 +159,100 @@ func (l *HandlerList) Len() int { // PushBack pushes handler f to the back of the handler list. func (l *HandlerList) PushBack(f func(*Request)) { - l.list = append(l.list, NamedHandler{"__anonymous", f}) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) + l.PushBackNamed(NamedHandler{"__anonymous", f}) } // PushBackNamed pushes named handler f to the back of the handler list. func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } l.list = append(l.list, n) } +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + // PushFrontNamed pushes named handler f to the front of the handler list. func (l *HandlerList) PushFrontNamed(n NamedHandler) { - l.list = append([]NamedHandler{n}, l.list...) + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } } // Remove removes a NamedHandler n func (l *HandlerList) Remove(n NamedHandler) { - newlist := []NamedHandler{} - for _, m := range l.list { - if m.Name != n.Name { - newlist = append(newlist, m) + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// Swap will swap out all handlers matching the name passed in. The matched +// handlers will be swapped in. True is returned if the handlers were swapped. +func (l *HandlerList) Swap(name string, replace NamedHandler) bool { + var swapped bool + + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == name { + l.list[i] = replace + swapped = true } } - l.list = newlist + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } } // Run executes all handlers in the list with a given request object. @@ -163,6 +288,16 @@ func HandlerListStopOnError(item HandlerListRunItem) bool { return item.Request.Error == nil } +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + // MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request // header. If the extra parameters are provided they will be added as metadata to the // name/version pair resulting in the following format. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index 02f07f4a..b0c2ef4f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -3,6 +3,8 @@ package request import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) // offsetReader is a thread-safe io.ReadCloser to prevent racing @@ -15,7 +17,7 @@ type offsetReader struct { func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { reader := &offsetReader{} - buf.Seek(offset, 0) + buf.Seek(offset, sdkio.SeekStart) reader.buf = buf return reader diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 77312bb6..2f0c4a90 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -14,6 +14,29 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is received + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // ErrCodeInvalidPresignExpire is returned when the expire time provided to + // presign is invalid + ErrCodeInvalidPresignExpire = "InvalidPresignExpireError" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" ) // A Request is the service request to be made. @@ -23,30 +46,38 @@ type Request struct { Handlers Handlers Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - LastSignedAt time.Time + AttemptTime time.Time + Time time.Time + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + // A value greater than 0 instructs the request to be signed as Presigned URL + // You should not set this field directly. Instead use Request's + // Presign or PresignRequest methods. + ExpireTime time.Duration + + context aws.Context built bool - // Need to persist an intermideant body betweend the input Body and HTTP + // Need to persist an intermediate body between the input Body and HTTP // request body because the HTTP Client's transport can maintain a reference // to the HTTP request's body after the client has returned. This value is - // safe to use concurrently and rewraps the input Body for each HTTP request. + // safe to use concurrently and wrap the input Body for each HTTP request. safeBody *offsetReader } @@ -60,14 +91,6 @@ type Operation struct { BeforePresignFn func(r *Request) error } -// Paginator keeps track of pagination configuration for an API operation. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - // New returns a new Request pointer for the service API // operation and parameters. // @@ -91,6 +114,8 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) } + SanitizeHostForHeader(httpReq) + r := &Request{ Config: cfg, ClientInfo: clientInfo, @@ -111,11 +136,106 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, return r } +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + // WillRetry returns if the request's can be retried. func (r *Request) WillRetry() bool { + if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody { + return false + } return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() } +func fmtAttemptCount(retryCount, maxRetries int) string { + return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries) +} + // ParamsFilled returns if the request's parameters have been populated // and the parameters are valid. False is returned if no parameters are // provided or invalid. @@ -144,59 +264,95 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { r.Body = reader + r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset. r.ResetBody() } // Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime +// if the signing fails. The expire parameter is only used for presigned Amazon +// S3 API requests. All other AWS services will use a fixed expiration +// time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +func (r *Request) Presign(expire time.Duration) (string, error) { + r = r.copy() + + // Presign requires all headers be hoisted. There is no way to retrieve + // the signed headers not hoisted without this. Making the presigned URL + // useless. r.NotHoist = false + u, _, err := getPresignedURL(r, expire) + return u, err +} + +// PresignRequest behaves just like presign, with the addition of returning a +// set of headers that were signed. The expire parameter is only used for +// presigned Amazon S3 API requests. All other AWS services will use a fixed +// expiration time of 15 minutes. +// +// It is invalid to create a presigned URL with a expire duration 0 or less. An +// error is returned if expire duration is 0 or less. +// +// Returns the URL string for the API operation with signature in the query string, +// and the HTTP headers that were included in the signature. These headers must +// be included in any HTTP request made with the presigned URL. +// +// To prevent hoisting any headers to the query string set NotHoist to true on +// this Request value prior to calling PresignRequest. +func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) { + r = r.copy() + return getPresignedURL(r, expire) +} + +// IsPresigned returns true if the request represents a presigned API url. +func (r *Request) IsPresigned() bool { + return r.ExpireTime != 0 +} + +func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) { + if expire <= 0 { + return "", nil, awserr.New( + ErrCodeInvalidPresignExpire, + "presigned URL requires an expire duration greater than 0", + nil, + ) + } + + r.ExpireTime = expire + if r.Operation.BeforePresignFn != nil { - r = r.copy() - err := r.Operation.BeforePresignFn(r) - if err != nil { - return "", err + if err := r.Operation.BeforePresignFn(r); err != nil { + return "", nil, err } } - r.Sign() - if r.Error != nil { - return "", r.Error + if err := r.Sign(); err != nil { + return "", nil, err } - return r.HTTPRequest.URL.String(), nil -} -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil } -func debugLogReqError(r *Request, stage string, retrying bool, err error) { +const ( + willRetry = "will retry" + notRetrying = "not retrying" + retryCount = "retry %v/%v" +) + +func debugLogReqError(r *Request, stage, retryStr string, err error) { if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { return } - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) } // Build will build the request's object so it can be signed and sent // to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run +// Any additional build Handlers set on this request will be run // in the order they were set. // // The request will only be built once. Multiple calls to build will have @@ -208,12 +364,12 @@ func (r *Request) Build() error { if !r.built { r.Handlers.Validate.Run(r) if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) + debugLogReqError(r, "Validate Request", notRetrying, r.Error) return r.Error } r.Handlers.Build.Run(r) if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } r.built = true @@ -222,14 +378,14 @@ func (r *Request) Build() error { return r.Error } -// Sign will sign the request returning error if errors are encountered. +// Sign will sign the request, returning error if errors are encountered. // -// Send will build the request prior to signing. All Sign Handlers will +// Sign will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. func (r *Request) Sign() error { r.Build() if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) + debugLogReqError(r, "Build Request", notRetrying, r.Error) return r.Error } @@ -237,10 +393,7 @@ func (r *Request) Sign() error { return r.Error } -// ResetBody rewinds the request body backto its starting position, and -// set's the HTTP Request body reference. When the body is read prior -// to being sent in the HTTP request it will need to be rewound. -func (r *Request) ResetBody() { +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { if r.safeBody != nil { r.safeBody.Close() } @@ -260,16 +413,16 @@ func (r *Request) ResetBody() { // of the SDK if they used that field. // // Related golang/go#18257 - l, err := computeBodyLength(r.Body) + l, err := aws.SeekerLen(r.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to compute request body size", err) - return + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) } + var body io.ReadCloser if l == 0 { - r.HTTPRequest.Body = noBodyReader + body = NoBody } else if l > 0 { - r.HTTPRequest.Body = r.safeBody + body = r.safeBody } else { // Hack to prevent sending bodies for methods where the body // should be ignored by the server. Sending bodies on these @@ -278,50 +431,17 @@ func (r *Request) ResetBody() { // Transfer-Encoding: chunked bodies for these methods. // // This would only happen if a aws.ReaderSeekerCloser was used with - // a io.Reader that was not also an io.Seeker. + // a io.Reader that was not also an io.Seeker, or did not implement + // Len() method. switch r.Operation.HTTPMethod { case "GET", "HEAD", "DELETE": - r.HTTPRequest.Body = noBodyReader + body = NoBody default: - r.HTTPRequest.Body = r.safeBody + body = r.safeBody } } -} - -// Attempts to compute the length of the body of the reader using the -// io.Seeker interface. If the value is not seekable because of being -// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. -// If no error occurs the length of the body will be returned. -func computeBodyLength(r io.ReadSeeker) (int64, error) { - seekable := true - // Determine if the seeker is actually seekable. ReaderSeekerCloser - // hides the fact that a io.Readers might not actually be seekable. - switch v := r.(type) { - case aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - case *aws.ReaderSeekerCloser: - seekable = v.IsSeeker() - } - if !seekable { - return -1, nil - } - - curOffset, err := r.Seek(0, 1) - if err != nil { - return 0, err - } - endOffset, err := r.Seek(0, 2) - if err != nil { - return 0, err - } - - _, err = r.Seek(curOffset, 0) - if err != nil { - return 0, err - } - - return endOffset - curOffset, nil + return body, nil } // GetBody will return an io.ReadSeeker of the Request's underlying @@ -330,7 +450,7 @@ func (r *Request) GetBody() io.ReadSeeker { return r.safeBody } -// Send will send the request returning error if errors are encountered. +// Send will send the request, returning error if errors are encountered. // // Send will sign the request prior to sending. All Send Handlers will // be executed in the order they were set. @@ -344,78 +464,90 @@ func (r *Request) GetBody() io.ReadSeeker { // // Send will not close the request.Request's body. func (r *Request) Send() error { - for { - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - // The previous http.Request will have a reference to the r.Body - // and the HTTP Client's Transport may still be reading from - // the request's body even though the Client's Do returned. - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) - r.ResetBody() + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + if err := r.Error; err != nil { + return err + } - // Closing response body to ensure that no response body is leaked - // between retry attempts. - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - r.HTTPResponse.Body.Close() - } - } + for { + r.Error = nil + r.AttemptTime = time.Now() - r.Sign() - if r.Error != nil { - return r.Error + if err := r.Sign(); err != nil { + debugLogReqError(r, "Sign Request", notRetrying, err) + return err } - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if !shouldRetryCancel(r) { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, r.Error) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue - } - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.UnmarshalError.Run(r) + if err := r.sendRequest(); err == nil { + return nil + } else if !shouldRetryError(r.Error) { + return err + } else { r.Handlers.Retry.Run(r) r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, r.Error) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue - } - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, r.Error) + if r.Error != nil || !aws.BoolValue(r.Retryable) { return r.Error } - debugLogReqError(r, "Unmarshal Response", true, err) + + r.prepareRetry() continue } + } +} - break +func (r *Request) prepareRetry() { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } +} + +func (r *Request) sendRequest() (sendErr error) { + defer r.Handlers.CompleteAttempt.Run(r) + + r.Retryable = nil + r.Handlers.Send.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + debugLogReqError(r, "Validate Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", + fmtAttemptCount(r.RetryCount, r.MaxRetries()), + r.Error) + return r.Error } return nil @@ -441,25 +573,116 @@ func AddToUserAgent(r *Request, s string) { r.HTTPRequest.Header.Set("User-Agent", s) } -func shouldRetryCancel(r *Request) bool { - awsErr, ok := r.Error.(awserr.Error) - timeoutErr := false - errStr := r.Error.Error() - if ok { - err := awsErr.OrigErr() - netErr, netOK := err.(net.Error) - timeoutErr = netOK && netErr.Temporary() - if urlErr, ok := err.(*url.Error); !timeoutErr && ok { - errStr = urlErr.Err.Error() +type temporary interface { + Temporary() bool +} + +func shouldRetryError(origErr error) bool { + switch err := origErr.(type) { + case awserr.Error: + if err.Code() == CanceledErrorCode { + return false + } + return shouldRetryError(err.OrigErr()) + case *url.Error: + if strings.Contains(err.Error(), "connection refused") { + // Refused connections should be retried as the service may not yet + // be running on the port. Go TCP dial considers refused + // connections as not temporary. + return true + } + // *url.Error only implements Temporary after golang 1.6 but since + // url.Error only wraps the error: + return shouldRetryError(err.Err) + case temporary: + if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" { + return true } + // If the error is temporary, we want to allow continuation of the + // retry process + return err.Temporary() || isErrConnectionReset(origErr) + case nil: + // `awserr.Error.OrigErr()` can be nil, meaning there was an error but + // because we don't know the cause, it is marked as retryable. See + // TestRequest4xxUnretryable for an example. + return true + default: + switch err.Error() { + case "net/http: request canceled", + "net/http: request canceled while waiting for connection": + // known 1.5 error case when an http request is cancelled + return false + } + // here we don't know the error; so we allow a retry. + return true } +} - // There can be two types of canceled errors here. - // The first being a net.Error and the other being an error. - // If the request was timed out, we want to continue the retry - // process. Otherwise, return the canceled error. - return timeoutErr || - (errStr != "net/http: request canceled" && - errStr != "net/http: request canceled while waiting for connection") +// SanitizeHostForHeader removes default port from host and updates request.Host +func SanitizeHostForHeader(r *http.Request) { + host := getHost(r) + port := portOnly(host) + if port != "" && isDefaultPort(r.URL.Scheme, port) { + r.Host = stripPort(host) + } +} + +// Returns host from request +func getHost(r *http.Request) string { + if r.Host != "" { + return r.Host + } + + return r.URL.Host +} + +// Hostname returns u.Host, without any port number. +// +// If Host is an IPv6 literal with a port number, Hostname returns the +// IPv6 literal without the square brackets. IPv6 literals may include +// a zone identifier. +// +// Copied from the Go 1.8 standard library (net/url) +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} + +// Port returns the port part of u.Host, without the leading colon. +// If u.Host doesn't contain a port, Port returns an empty string. +// +// Copied from the Go 1.8 standard library (net/url) +func portOnly(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return "" + } + if i := strings.Index(hostport, "]:"); i != -1 { + return hostport[i+len("]:"):] + } + if strings.Contains(hostport, "]") { + return "" + } + return hostport[colon+len(":"):] +} + +// Returns true if the specified URI is using the standard port +// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs) +func isDefaultPort(scheme, port string) bool { + if port == "" { + return true + } + + lowerCaseScheme := strings.ToLower(scheme) + if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") { + return true + } + return false } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go index 1323af90..e36e468b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -16,6 +16,24 @@ func (noBody) Read([]byte) (int, error) { return 0, io.EOF } func (noBody) Close() error { return nil } func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } -// Is an empty reader that will trigger the Go HTTP client to not include +// NoBody is an empty reader that will trigger the Go HTTP client to not include // and body in the HTTP request. -var noBodyReader = noBody{} +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go index 8b963f4d..7c6a8000 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -2,8 +2,32 @@ package request -import "net/http" +import ( + "net/http" +) -// Is a http.NoBody reader instructing Go HTTP client to not include +// NoBody is a http.NoBody reader instructing Go HTTP client to not include // and body in the HTTP request. -var noBodyReader = http.NoBody +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// sets the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 00000000..a7365cd1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 00000000..307fa070 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go index 2939ec47..a633ed5a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -2,29 +2,138 @@ package request import ( "reflect" + "sync/atomic" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" ) -//type Paginater interface { -// HasNextPage() bool -// NextPage() *Request -// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error -//} +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + // EndPageOnSameToken, when enabled, will allow the paginator to stop on + // token that are the same as its previous tokens. + EndPageOnSameToken bool -// HasNextPage returns true if this request has more pages of data available. -func (r *Request) HasNextPage() bool { - return len(r.nextPageTokens()) > 0 + started bool + prevTokens []interface{} + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + if !p.started { + return true + } + + hasNextPage := len(p.nextTokens) != 0 + if p.EndPageOnSameToken { + return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens) + } + return hasNextPage +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err } -// nextPageTokens returns the tokens to use when asking for the next page of -// data. +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.prevTokens = p.nextTokens + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. func (r *Request) nextPageTokens() []interface{} { if r.Operation.Paginator == nil { return nil } - if r.Operation.TruncationToken != "" { tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) if len(tr) == 0 { @@ -46,13 +155,28 @@ func (r *Request) nextPageTokens() []interface{} { tokens := []interface{}{} tokenAdded := false for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { + vs, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(vs) == 0 { tokens = append(tokens, nil) + continue + } + v := vs[0] + + switch tv := v.(type) { + case *string: + if len(aws.StringValue(tv)) == 0 { + tokens = append(tokens, nil) + continue + } + case string: + if len(tv) == 0 { + tokens = append(tokens, nil) + continue + } } + + tokenAdded = true + tokens = append(tokens, v) } if !tokenAdded { return nil @@ -61,9 +185,40 @@ func (r *Request) nextPageTokens() []interface{} { return tokens } +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + // NextPage returns a new Request that can be executed to return the next // page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + tokens := r.nextPageTokens() if len(tokens) == 0 { return nil @@ -90,7 +245,12 @@ func (r *Request) NextPage() *Request { // as the structure "T". The lastPage value represents whether the page is // the last page of data or not. The return value of this function should // return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + for page := r; page != nil; page = page.NextPage() { if err := page.Send(); err != nil { return err diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index ebd60ccc..d0aa54c6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -8,7 +8,7 @@ import ( ) // Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer +// The default implementation used by most services is the client.DefaultRetryer // structure, which contains basic retry logic using exponential backoff. type Retryer interface { RetryRules(*Request) time.Duration @@ -26,8 +26,10 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { // retryableCodes is a collection of service response codes which are retry-able // without any further action. var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout } var throttleCodes = map[string]struct{}{ @@ -36,9 +38,10 @@ var throttleCodes = map[string]struct{}{ "ThrottlingException": {}, "RequestLimitExceeded": {}, "RequestThrottled": {}, - "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once + "RequestThrottledException": {}, "TooManyRequestsException": {}, // Lambda functions "PriorRequestNotComplete": {}, // Route53 + "TransactionInProgressException": {}, } // credsExpiredCodes is a collection of error codes which signify the credentials @@ -68,35 +71,93 @@ func isCodeExpiredCreds(code string) bool { return ok } +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: {}, + ErrCodeRead: {}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() || isErrConnectionReset(err) + } + + return isErrConnectionReset(err) +} + // IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -func (r *Request) IsErrorRetryable() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeRetryable(err.Code()) +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) } } return false } // IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -func (r *Request) IsErrorThrottle() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeThrottle(err.Code()) +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) } } return false } -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -func (r *Request) IsErrorExpired() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeExpiredCreds(err.Code()) +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) } } return false } + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 00000000..09a44eb9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go index 2520286b..8630683f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -17,6 +17,12 @@ const ( ParamMinValueErrCode = "ParamMinValueError" // ParamMinLenErrCode is the error code for fields without enough elements. ParamMinLenErrCode = "ParamMinLenError" + // ParamMaxLenErrCode is the error code for value being too long. + ParamMaxLenErrCode = "ParamMaxLenError" + + // ParamFormatErrCode is the error code for a field with invalid + // format or characters. + ParamFormatErrCode = "ParamFormatInvalidError" ) // Validator provides a way for types to perform validation logic on their @@ -220,7 +226,7 @@ type ErrParamMinLen struct { func NewErrParamMinLen(field string, min int) *ErrParamMinLen { return &ErrParamMinLen{ errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, + code: ParamMinLenErrCode, field: field, msg: fmt.Sprintf("minimum field size of %v", min), }, @@ -232,3 +238,49 @@ func NewErrParamMinLen(field string, min int) *ErrParamMinLen { func (e *ErrParamMinLen) MinLen() int { return e.min } + +// An ErrParamMaxLen represents a maximum length parameter error. +type ErrParamMaxLen struct { + errInvalidParam + max int +} + +// NewErrParamMaxLen creates a new maximum length parameter error. +func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen { + return &ErrParamMaxLen{ + errInvalidParam: errInvalidParam{ + code: ParamMaxLenErrCode, + field: field, + msg: fmt.Sprintf("maximum size of %v, %v", max, value), + }, + max: max, + } +} + +// MaxLen returns the field's required minimum length. +func (e *ErrParamMaxLen) MaxLen() int { + return e.max +} + +// An ErrParamFormat represents a invalid format parameter error. +type ErrParamFormat struct { + errInvalidParam + format string +} + +// NewErrParamFormat creates a new invalid format parameter error. +func NewErrParamFormat(field string, format, value string) *ErrParamFormat { + return &ErrParamFormat{ + errInvalidParam: errInvalidParam{ + code: ParamFormatErrCode, + field: field, + msg: fmt.Sprintf("format %v, %v", format, value), + }, + format: format, + } +} + +// Format returns the field's required format. +func (e *ErrParamFormat) Format() string { + return e.format +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 00000000..4601f883 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,295 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) + SleepWithContext func(aws.Context, time.Duration) error +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else { + sleepCtxFn := w.SleepWithContext + if sleepCtxFn == nil { + sleepCtxFn = aws.SleepWithContext + } + + if err := sleepCtxFn(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go new file mode 100644 index 00000000..ea9ebb6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go @@ -0,0 +1,26 @@ +// +build go1.7 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go new file mode 100644 index 00000000..fec39dfc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go @@ -0,0 +1,22 @@ +// +build !go1.6,go1.5 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go new file mode 100644 index 00000000..1c5a5391 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go @@ -0,0 +1,23 @@ +// +build !go1.7,go1.6 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCABundleTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go new file mode 100644 index 00000000..9fd66368 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go @@ -0,0 +1,207 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/processcreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/shareddefaults" +) + +// valid credential source values +const ( + credSourceEc2Metadata = "Ec2InstanceMetadata" + credSourceEnvironment = "Environment" + credSourceECSContainer = "EcsContainer" +) + +func resolveCredentials(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + // Credentials from Assume Role with specific credentials source. + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + return resolveCredsFromSource(cfg, envCfg, sharedCfg, handlers, sessOpts) + } + + // Credentials from environment variables + if len(envCfg.Creds.AccessKeyID) > 0 { + return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil + } + + // Fallback to the "default" credential resolution chain. + return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts) +} + +func resolveCredsFromProfile(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + + if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + // Assume IAM role with credentials source from a different profile. + cred, err := resolveCredsFromProfile(cfg, envCfg, *sharedCfg.AssumeRoleSource, handlers, sessOpts) + if err != nil { + return nil, err + } + + cfgCp := *cfg + cfgCp.Credentials = cred + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + // Static Credentials from Shared Config/Credentials file. + return credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ), nil + + } else if len(sharedCfg.CredentialProcess) > 0 { + // Get credentials from CredentialProcess + cred := processcreds.NewCredentials(sharedCfg.CredentialProcess) + // if RoleARN is provided, so the obtained cred from the Credential Process to assume the role using RoleARN + if len(sharedCfg.AssumeRole.RoleARN) > 0 { + cfgCp := *cfg + cfgCp.Credentials = cred + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) + } + return cred, nil + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 { + // Assume IAM Role with specific credential source. + return resolveCredsFromSource(cfg, envCfg, sharedCfg, handlers, sessOpts) + } + + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{ + Err: awserr.New("EnvAccessKeyNotFound", + "failed to find credentials in the environment.", nil), + }, + &credProviderError{ + Err: awserr.New("SharedCredsLoad", + fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil), + }, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }), nil +} + +func resolveCredsFromSource(cfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) (*credentials.Credentials, error) { + // if both credential_source and source_profile have been set, return an + // error as this is undefined behavior. Only one can be used at a time + // within a profile. + if len(sharedCfg.AssumeRole.SourceProfile) > 0 { + return nil, ErrSharedConfigSourceCollision + } + + cfgCp := *cfg + switch sharedCfg.AssumeRole.CredentialSource { + case credSourceEc2Metadata: + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + case credSourceEnvironment: + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(envCfg.Creds) + + case credSourceECSContainer: + if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 { + return nil, ErrSharedConfigECSContainerEnvVarEmpty + } + + p := defaults.RemoteCredProvider(cfgCp, handlers) + cfgCp.Credentials = credentials.NewCredentials(p) + + default: + return nil, ErrSharedConfigInvalidCredSource + } + + return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts) +} + +func credsFromAssumeRole(cfg aws.Config, + handlers request.Handlers, + sharedCfg sharedConfig, + sessOpts Options, +) (*credentials.Credentials, error) { + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return nil, AssumeRoleTokenProviderNotSetError{} + } + + return stscreds.NewCredentials( + &Session{ + Config: &cfg, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + opt.Duration = sessOpts.AssumeRoleDuration + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ), nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index 660d9bef..38a7b05a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -23,7 +23,7 @@ additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. Alternatively you can explicitly create a Session with shared config enabled. To do this you can use NewSessionWithOptions to configure how the Session will be created. Using the NewSessionWithOptions with SharedConfigState set to -SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG environment variable was set. Creating Sessions @@ -84,7 +84,7 @@ override the shared config state (AWS_SDK_LOAD_CONFIG). // Force enable Shared Config support sess := session.Must(session.NewSessionWithOptions(session.Options{ - SharedConfigState: SharedConfigEnable, + SharedConfigState: session.SharedConfigEnable, })) Adding Handlers @@ -99,7 +99,7 @@ handler logs every request and its payload made by a service client: sess.Handlers.Send.PushFront(func(r *request.Request) { // Log every request made and its payload - logger.Println("Request: %s/%s, Payload: %s", + logger.Printf("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) }) @@ -124,12 +124,11 @@ file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both files have the same format. If both config files are present the configuration from both files will be -read. The Session will be created from configuration values from the shared -credentials file (~/.aws/credentials) over those in the shared credentials -file (~/.aws/config). +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). Credentials are the values the SDK should use for authenticating requests with -AWS Services. They arfrom a configuration file will need to include both +AWS Services. They are from a configuration file will need to include both aws_access_key_id and aws_secret_access_key must be provided together in the same file to be considered valid. The values will be ignored if not a complete group. aws_session_token is an optional field that can be provided if both of @@ -184,7 +183,7 @@ be returned when creating the session. // from assumed role. svc := s3.New(sess) -To setup assume role outside of a session see the stscrds.AssumeRoleProvider +To setup assume role outside of a session see the stscreds.AssumeRoleProvider documentation. Environment Variables diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index e6278a78..cdb42497 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -2,12 +2,16 @@ package session import ( "os" - "path/filepath" "strconv" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/defaults" ) +// EnvProviderName provides a name of the provider when config is loaded from environment. +const EnvProviderName = "EnvConfigCredentials" + // envConfig is a collection of environment values the SDK will read // setup config from. All environment values are optional. But some values // such as credentials require multiple values to be complete or the values @@ -76,8 +80,8 @@ type envConfig struct { // AWS_CONFIG_FILE=$HOME/my_shared_config SharedConfigFile string - // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file - // that the SDK will use instead of the the system's root CA bundle. + // Sets the path to a custom Credentials Authority (CA) Bundle PEM file + // that the SDK will use instead of the system's root CA bundle. // Only use this if you want to configure the SDK to use a custom set // of CAs. // @@ -93,9 +97,33 @@ type envConfig struct { // // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle CustomCABundle string + + csmEnabled string + CSMEnabled bool + CSMPort string + CSMClientID string + CSMHost string + + enableEndpointDiscovery string + // Enables endpoint discovery via environment variables. + // + // AWS_ENABLE_ENDPOINT_DISCOVERY=true + EnableEndpointDiscovery *bool } var ( + csmEnabledEnvKey = []string{ + "AWS_CSM_ENABLED", + } + csmHostEnvKey = []string{ + "AWS_CSM_HOST", + } + csmPortEnvKey = []string{ + "AWS_CSM_PORT", + } + csmClientIDEnvKey = []string{ + "AWS_CSM_CLIENT_ID", + } credAccessEnvKey = []string{ "AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", @@ -108,6 +136,10 @@ var ( "AWS_SESSION_TOKEN", } + enableEndpointDiscoveryEnvKey = []string{ + "AWS_ENABLE_ENDPOINT_DISCOVERY", + } + regionEnvKeys = []string{ "AWS_REGION", "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set @@ -116,6 +148,12 @@ var ( "AWS_PROFILE", "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set } + sharedCredsFileEnvKey = []string{ + "AWS_SHARED_CREDENTIALS_FILE", + } + sharedConfigFileEnvKey = []string{ + "AWS_CONFIG_FILE", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -148,11 +186,18 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + // CSM environment variables + setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey) + setFromEnvVal(&cfg.CSMHost, csmHostEnvKey) + setFromEnvVal(&cfg.CSMPort, csmPortEnvKey) + setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey) + cfg.CSMEnabled = len(cfg.csmEnabled) > 0 + // Require logical grouping of credentials if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { cfg.Creds = credentials.Value{} } else { - cfg.Creds.ProviderName = "EnvConfigCredentials" + cfg.Creds.ProviderName = EnvProviderName } regionKeys := regionEnvKeys @@ -165,8 +210,21 @@ func envConfigLoad(enableSharedConfig bool) envConfig { setFromEnvVal(&cfg.Region, regionKeys) setFromEnvVal(&cfg.Profile, profileKeys) - cfg.SharedCredentialsFile = sharedCredentialsFilename() - cfg.SharedConfigFile = sharedConfigFilename() + // endpoint discovery is in reference to it being enabled. + setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey) + if len(cfg.enableEndpointDiscovery) > 0 { + cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false") + } + + setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey) + setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey) + + if len(cfg.SharedCredentialsFile) == 0 { + cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename() + } + if len(cfg.SharedConfigFile) == 0 { + cfg.SharedConfigFile = defaults.SharedConfigFilename() + } cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") @@ -181,28 +239,3 @@ func setFromEnvVal(dst *string, keys []string) { } } } - -func sharedCredentialsFilename() string { - if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { - return name - } - - return filepath.Join(userHomeDir(), ".aws", "credentials") -} - -func sharedConfigFilename() string { - if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { - return name - } - - return filepath.Join(userHomeDir(), ".aws", "config") -} - -func userHomeDir() string { - homeDir := os.Getenv("HOME") // *nix - if len(homeDir) == 0 { // windows - homeDir = os.Getenv("USERPROFILE") - } - - return homeDir -} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 96c740d0..5da98abe 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -8,25 +8,43 @@ import ( "io/ioutil" "net/http" "os" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/csm" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/aws/request" ) +const ( + // ErrCodeSharedConfig represents an error that occurs in the shared + // configuration logic + ErrCodeSharedConfig = "SharedConfigErr" +) + +// ErrSharedConfigSourceCollision will be returned if a section contains both +// source_profile and credential_source +var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only source profile or credential source can be specified, not both", nil) + +// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment +// variables are empty and Environment was set as the credential source +var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil) + +// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided +var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil) + // A Session provides a central location to create service clients from and // store configurations and request handlers for those services. // // Sessions are safe to create service clients concurrently, but it is not safe // to mutate the Session concurrently. // -// The Session satisfies the service client's client.ClientConfigProvider. +// The Session satisfies the service client's client.ConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers @@ -58,7 +76,12 @@ func New(cfgs ...*aws.Config) *Session { envCfg := loadEnvConfig() if envCfg.EnableSharedConfig { - s, err := newSession(Options{}, envCfg, cfgs...) + var cfg aws.Config + cfg.MergeIn(cfgs...) + s, err := NewSessionWithOptions(Options{ + Config: cfg, + SharedConfigState: SharedConfigEnable, + }) if err != nil { // Old session.New expected all errors to be discovered when // a request is made, and would report the errors then. This @@ -76,10 +99,24 @@ func New(cfgs ...*aws.Config) *Session { r.Error = err }) } + return s } - return deprecatedNewSession(cfgs...) + s := deprecatedNewSession(cfgs...) + if envCfg.CSMEnabled { + err := enableCSM(&s.Handlers, envCfg.CSMClientID, + envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + if err != nil { + err = fmt.Errorf("failed to enable CSM, %v", err) + s.Config.Logger.Log("ERROR:", err.Error()) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + } + + return s } // NewSession returns a new Session created from SDK defaults, config files, @@ -155,6 +192,10 @@ type Options struct { // and enable or disable the shared config functionality. SharedConfigState SharedConfigState + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + // When the SDK's shared config is configured to assume a role with MFA // this option is required in order to provide the mechanism that will // retrieve the MFA token. There is no default value for this field. If @@ -175,6 +216,12 @@ type Options struct { // the config enables assume role wit MFA via the mfa_serial field. AssumeRoleTokenProvider func() (string, error) + // When the SDK's shared config is configured to assume a role this option + // may be provided to set the expiry duration of the STS credentials. + // Defaults to 15 minutes if not set as documented in the + // stscreds.AssumeRoleProvider. + AssumeRoleDuration time.Duration + // Reader for a custom Credentials Authority (CA) bundle in PEM format that // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -189,6 +236,12 @@ type Options struct { // to also enable this feature. CustomCABundle session option field has priority // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. CustomCABundle io.Reader + + // The handlers that the session and all API clients will be created with. + // This must be a complete set of handlers. Use the defaults.Handlers() + // function to initialize this value before changing the handlers to be + // used by the SDK. + Handlers request.Handlers } // NewSessionWithOptions returns a new Session created from SDK defaults, config files, @@ -218,7 +271,7 @@ type Options struct { // // // Force enable Shared Config support // sess := session.Must(session.NewSessionWithOptions(session.Options{ -// SharedConfigState: SharedConfigEnable, +// SharedConfigState: session.SharedConfigEnable, // })) func NewSessionWithOptions(opts Options) (*Session, error) { var envCfg envConfig @@ -291,26 +344,51 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session { } initHandlers(s) - return s } +func enableCSM(handlers *request.Handlers, + clientID, host, port string, + logger aws.Logger, +) error { + if logger != nil { + logger.Log("Enabling CSM") + } + + r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port)) + if err != nil { + return err + } + r.InjectHandlers(handlers) + + return nil +} + func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { cfg := defaults.Config() - handlers := defaults.Handlers() + + handlers := opts.Handlers + if handlers.IsEmpty() { + handlers = defaults.Handlers() + } // Get a merged version of the user provided config to determine if // credentials were. userCfg := &aws.Config{} userCfg.MergeIn(cfgs...) - // Order config files will be loaded in with later files overwriting + // Ordered config files will be loaded in with later files overwriting // previous config file values. - cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} - if !envCfg.EnableSharedConfig { - // The shared config file (~/.aws/config) is only loaded if instructed - // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). - cfgFiles = cfgFiles[1:] + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } } // Load additional config from file(s) @@ -329,6 +407,13 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } initHandlers(s) + if envCfg.CSMEnabled { + err := enableCSM(&s.Handlers, envCfg.CSMClientID, + envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger) + if err != nil { + return nil, err + } + } // Setup HTTP client with custom cert bundle if enabled if opts.CustomCABundle != nil { @@ -352,7 +437,10 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error { } } if t == nil { - t = &http.Transport{} + // Nil transport implies `http.DefaultTransport` should be used. Since + // the SDK cannot modify, nor copy the `DefaultTransport` specifying + // the values the next closest behavior. + t = getCABundleTransport() } p, err := loadCertPool(bundle) @@ -385,7 +473,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) { return p, nil } -func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { +func mergeConfigSrcs(cfg, userCfg *aws.Config, + envCfg envConfig, sharedCfg sharedConfig, + handlers request.Handlers, + sessOpts Options, +) error { // Merge in user provided configuration cfg.MergeIn(userCfg) @@ -398,103 +490,27 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share } } - // Configure credentials if not already set - if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { - if len(envCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - envCfg.Creds, - ) - } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { - cfgCp := *cfg - cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.AssumeRoleSource.Creds, - ) - if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { - // AssumeRole Token provider is required if doing Assume Role - // with MFA. - return AssumeRoleTokenProviderNotSetError{} - } - cfg.Credentials = stscreds.NewCredentials( - &Session{ - Config: &cfgCp, - Handlers: handlers.Copy(), - }, - sharedCfg.AssumeRole.RoleARN, - func(opt *stscreds.AssumeRoleProvider) { - opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName - - // Assume role with external ID - if len(sharedCfg.AssumeRole.ExternalID) > 0 { - opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) - } - - // Assume role with MFA - if len(sharedCfg.AssumeRole.MFASerial) > 0 { - opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) - opt.TokenProvider = sessOpts.AssumeRoleTokenProvider - } - }, - ) - } else if len(sharedCfg.Creds.AccessKeyID) > 0 { - cfg.Credentials = credentials.NewStaticCredentialsFromCreds( - sharedCfg.Creds, - ) - } else { - // Fallback to default credentials provider, include mock errors - // for the credential chain so user can identify why credentials - // failed to be retrieved. - cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, - &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, - defaults.RemoteCredProvider(*cfg, handlers), - }, - }) + if cfg.EnableEndpointDiscovery == nil { + if envCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery) + } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil { + cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery) } } - return nil -} - -// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the -// MFAToken option is not set when shared config is configured load assume a -// role with an MFA token. -type AssumeRoleTokenProviderNotSetError struct{} - -// Code is the short id of the error. -func (e AssumeRoleTokenProviderNotSetError) Code() string { - return "AssumeRoleTokenProviderNotSetError" -} - -// Message is the description of the error -func (e AssumeRoleTokenProviderNotSetError) Message() string { - return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") -} + // Configure credentials if not already set by the user when creating the + // Session. + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts) + if err != nil { + return err + } + cfg.Credentials = creds + } -// OrigErr is the underlying error that caused the failure. -func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { return nil } -// Error satisfies the error interface. -func (e AssumeRoleTokenProviderNotSetError) Error() string { - return awserr.SprintError(e.Code(), e.Message(), "", nil) -} - -type credProviderError struct { - Err error -} - -var emptyCreds = credentials.Value{} - -func (c credProviderError) Retrieve() (credentials.Value, error) { - return credentials.Value{}, c.Err -} -func (c credProviderError) IsExpired() bool { - return true -} - func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -557,11 +573,12 @@ func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) ( } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, }, err } @@ -581,10 +598,11 @@ func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Conf } return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: resolved.URL, - SigningRegion: resolved.SigningRegion, - SigningName: resolved.SigningName, + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningNameDerived: resolved.SigningNameDerived, + SigningName: resolved.SigningName, } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index b58076f5..324927f5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -2,11 +2,10 @@ package session import ( "fmt" - "io/ioutil" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/go-ini/ini" + "github.com/aws/aws-sdk-go/internal/ini" ) const ( @@ -16,15 +15,21 @@ const ( sessionTokenKey = `aws_session_token` // optional // Assume Role Credentials group - roleArnKey = `role_arn` // group required - sourceProfileKey = `source_profile` // group required - externalIDKey = `external_id` // optional - mfaSerialKey = `mfa_serial` // optional - roleSessionNameKey = `role_session_name` // optional + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required (or credential_source) + credentialSourceKey = `credential_source` // group required (or source_profile) + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional // Additional Config fields regionKey = `region` + // endpoint discovery group + enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional + // External Credential Process + credentialProcessKey = `credential_process` + // DefaultSharedConfigProfile is the default profile to be used when // loading configuration from the config files if another profile name // is not provided. @@ -32,11 +37,12 @@ const ( ) type assumeRoleConfig struct { - RoleARN string - SourceProfile string - ExternalID string - MFASerial string - RoleSessionName string + RoleARN string + SourceProfile string + CredentialSource string + ExternalID string + MFASerial string + RoleSessionName string } // sharedConfig represents the configuration fields of the SDK config files. @@ -55,16 +61,25 @@ type sharedConfig struct { AssumeRole assumeRoleConfig AssumeRoleSource *sharedConfig + // An external process to request credentials + CredentialProcess string + // Region is the region the SDK should use for looking up AWS service endpoints // and signing requests. // // region Region string + + // EnableEndpointDiscovery can be enabled in the shared config by setting + // endpoint_discovery_enabled to true + // + // endpoint_discovery_enabled = true + EnableEndpointDiscovery *bool } type sharedConfigFile struct { Filename string - IniData *ini.File + IniData ini.Sections } // loadSharedConfig retrieves the configuration from the list of files @@ -105,19 +120,16 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { files := make([]sharedConfigFile, 0, len(filenames)) for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { + sections, err := ini.OpenFile(filename) + if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile { // Skip files which can't be opened and read for whatever reason continue - } - - f, err := ini.Load(b) - if err != nil { - return nil, SharedConfigLoadError{Filename: filename} + } else if err != nil { + return nil, SharedConfigLoadError{Filename: filename, Err: err} } files = append(files, sharedConfigFile{ - Filename: filename, IniData: f, + Filename: filename, IniData: sections, }) } @@ -127,6 +139,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { var assumeRoleSrc sharedConfig + if len(cfg.AssumeRole.CredentialSource) > 0 { + // setAssumeRoleSource is only called when source_profile is found. + // If both source_profile and credential_source are set, then + // ErrSharedConfigSourceCollision will be returned + return ErrSharedConfigSourceCollision + } + // Multiple level assume role chains are not support if cfg.AssumeRole.SourceProfile == origProfile { assumeRoleSrc = *cfg @@ -136,10 +155,21 @@ func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedC if err != nil { return err } + + // Chain if profile depends of other profiles + if len(assumeRoleSrc.AssumeRole.SourceProfile) > 0 { + err := assumeRoleSrc.setAssumeRoleSource(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } } - if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { - return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + if cfg.AssumeRole.SourceProfile == origProfile || len(assumeRoleSrc.AssumeRole.SourceProfile) == 0 { + //Check if at least either Credential Source, static creds, or credential process is set to retain credentials. + if len(assumeRoleSrc.AssumeRole.CredentialSource) == 0 && len(assumeRoleSrc.Creds.AccessKeyID) == 0 && len(assumeRoleSrc.CredentialProcess) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } } cfg.AssumeRoleSource = &assumeRoleSrc @@ -171,45 +201,61 @@ func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFil // if a config file only includes aws_access_key_id but no aws_secret_access_key // the aws_access_key_id will be ignored. func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { - section, err := file.IniData.GetSection(profile) - if err != nil { + section, ok := file.IniData.GetSection(profile) + if !ok { // Fallback to to alternate profile name: profile - section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) - if err != nil { - return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if !ok { + return SharedConfigProfileNotExistsError{Profile: profile, Err: nil} } } // Shared Credentials - akid := section.Key(accessKeyIDKey).String() - secret := section.Key(secretAccessKey).String() + akid := section.String(accessKeyIDKey) + secret := section.String(secretAccessKey) if len(akid) > 0 && len(secret) > 0 { cfg.Creds = credentials.Value{ AccessKeyID: akid, SecretAccessKey: secret, - SessionToken: section.Key(sessionTokenKey).String(), + SessionToken: section.String(sessionTokenKey), ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), } } // Assume Role - roleArn := section.Key(roleArnKey).String() - srcProfile := section.Key(sourceProfileKey).String() - if len(roleArn) > 0 && len(srcProfile) > 0 { + roleArn := section.String(roleArnKey) + srcProfile := section.String(sourceProfileKey) + credentialSource := section.String(credentialSourceKey) + credentialProcess := section.String(credentialProcessKey) + //Has source to make sure the Assume Role has at least either srcProfile, credential Source, or credential Process. + hasSource := len(srcProfile) > 0 || len(credentialSource) > 0 || len(credentialProcess) > 0 + if len(roleArn) > 0 && hasSource { cfg.AssumeRole = assumeRoleConfig{ - RoleARN: roleArn, - SourceProfile: srcProfile, - ExternalID: section.Key(externalIDKey).String(), - MFASerial: section.Key(mfaSerialKey).String(), - RoleSessionName: section.Key(roleSessionNameKey).String(), + RoleARN: roleArn, + SourceProfile: srcProfile, + CredentialSource: credentialSource, + ExternalID: section.String(externalIDKey), + MFASerial: section.String(mfaSerialKey), + RoleSessionName: section.String(roleSessionNameKey), } } + // `credential_process` + if credProc := section.String(credentialProcessKey); len(credProc) > 0 { + cfg.CredentialProcess = credProc + } + // Region - if v := section.Key(regionKey).String(); len(v) > 0 { + if v := section.String(regionKey); len(v) > 0 { cfg.Region = v } + // Endpoint discovery + if section.Has(enableEndpointDiscoveryKey) { + v := section.Bool(enableEndpointDiscoveryKey) + cfg.EnableEndpointDiscovery = &v + } + return nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 00000000..6aa2ed24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index 98bfe742..523db79f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -45,7 +45,7 @@ // If signing a request intended for HTTP2 server, and you're using Go 1.6.2 // through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the // request URL. https://github.com/golang/go/issues/16847 points to a bug in -// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP // message. URL.Opaque generally will force Go to make requests with absolute URL. // URL.RawPath does not do this, but RawPath must be a valid escaping of Path // or url.EscapedPath will ignore the RawPath escaping. @@ -55,7 +55,6 @@ package v4 import ( - "bytes" "crypto/hmac" "crypto/sha256" "encoding/hex" @@ -72,6 +71,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" "github.com/aws/aws-sdk-go/private/protocol/rest" ) @@ -98,25 +98,25 @@ var ignoredHeaders = rules{ var requiredSignedHeaders = rules{ whitelist{ mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, @@ -134,7 +134,9 @@ var requiredSignedHeaders = rules{ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Tagging": struct{}{}, "X-Amz-Website-Redirect-Location": struct{}{}, + "X-Amz-Content-Sha256": struct{}{}, }, }, patterns{"X-Amz-Meta-"}, @@ -180,7 +182,7 @@ type Signer struct { // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html DisableURIPathEscaping bool - // Disales the automatical setting of the HTTP request's Body field with the + // Disables the automatical setting of the HTTP request's Body field with the // io.ReadSeeker passed in to the signer. This is useful if you're using a // custom wrapper around the body for the io.ReadSeeker and want to preserve // the Body value on the Request.Body. @@ -194,6 +196,10 @@ type Signer struct { // This value should only be used for testing. If it is nil the default // time.Now will be used. currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool } // NewSigner returns a Signer pointer configured with the credentials and optional @@ -227,6 +233,7 @@ type signingCtx struct { isPresign bool formattedTime string formattedShortTime string + unsignedPayload bool bodyDigest string signedHeaders string @@ -264,7 +271,7 @@ type signingCtx struct { // "X-Amz-Content-Sha256" header with a precomputed value. The signer will // only compute the hash if the request header value is empty. func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, 0, signTime) + return v4.signWithBody(r, body, service, region, 0, false, signTime) } // Presign signs AWS v4 requests with the provided body, service name, region @@ -298,10 +305,10 @@ func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region strin // presigned request's signature you can set the "X-Amz-Content-Sha256" // HTTP header and that will be included in the request's signature. func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { - return v4.signWithBody(r, body, service, region, exp, signTime) + return v4.signWithBody(r, body, service, region, exp, true, signTime) } -func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) { currentTimeFn := v4.currentTimeFn if currentTimeFn == nil { currentTimeFn = time.Now @@ -313,10 +320,11 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi Query: r.URL.Query(), Time: signTime, ExpireTime: exp, - isPresign: exp != 0, + isPresign: isPresign, ServiceName: service, Region: region, DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, } for key := range ctx.Query { @@ -334,8 +342,11 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return http.Header{}, err } + ctx.sanitizeHostForHeader() ctx.assignAmzQueryValues() - ctx.build(v4.DisableHeaderHoisting) + if err := ctx.build(v4.DisableHeaderHoisting); err != nil { + return nil, err + } // If the request is not presigned the body should be attached to it. This // prevents the confusion of wanting to send a signed request without @@ -358,6 +369,10 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi return ctx.SignedHeaderVals, nil } +func (ctx *signingCtx) sanitizeHostForHeader() { + request.SanitizeHostForHeader(ctx.Request) +} + func (ctx *signingCtx) handlePresignRemoval() { if !ctx.isPresign { return @@ -396,7 +411,7 @@ var SignRequestHandler = request.NamedHandler{ } // SignSDKRequest signs an AWS request with the V4 signature. This -// request handler is bested used only with the SDK's built in service client's +// request handler should only be used with the SDK's built in service client's // API operation requests. // // This function should not be used on its on its own, but in conjunction with @@ -407,9 +422,23 @@ var SignRequestHandler = request.NamedHandler{ // If the credentials of the request's config are set to // credentials.AnonymousCredentials the request will not be signed. func SignSDKRequest(req *request.Request) { - signSDKRequestWithCurrTime(req, time.Now) + SignSDKRequestWithCurrentTime(req, time.Now) } -func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + SignSDKRequestWithCurrentTime(req, time.Now, opts...) + }, + } +} + +// SignSDKRequestWithCurrentTime will sign the SDK's request using the time +// function passed in. Behaves the same as SignSDKRequest with the exception +// the request is signed with the value returned by the current time function. +func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { // If the request does not need to be signed ignore the signing of the // request if the AnonymousCredentials object is used. if req.Config.Credentials == credentials.AnonymousCredentials { @@ -441,13 +470,13 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time v4.DisableRequestBodyOverwrite = true }) - signingTime := req.Time - if !req.LastSignedAt.IsZero() { - signingTime = req.LastSignedAt + for _, opt := range opts { + opt(v4) } + curTime := curTimeFn() signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), - name, region, req.ExpireTime, signingTime, + name, region, req.ExpireTime, req.ExpireTime > 0, curTime, ) if err != nil { req.Error = err @@ -456,7 +485,7 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time } req.SignedHeaderVals = signedHeaders - req.LastSignedAt = curTimeFn() + req.LastSignedAt = curTime } const logSignInfoMsg = `DEBUG: Request Signature: @@ -478,10 +507,14 @@ func (v4 *Signer) logSigningInfo(ctx *signingCtx) { v4.Logger.Log(msg) } -func (ctx *signingCtx) build(disableHeaderHoisting bool) { +func (ctx *signingCtx) build(disableHeaderHoisting bool) error { ctx.buildTime() // no depends ctx.buildCredentialString() // no depends + if err := ctx.buildBodyDigest(); err != nil { + return err + } + unsignedHeaders := ctx.Request.Header if ctx.isPresign { if !disableHeaderHoisting { @@ -493,7 +526,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } } - ctx.buildBodyDigest() ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) ctx.buildCanonicalString() // depends on canon headers / signed headers ctx.buildStringToSign() // depends on canon string @@ -509,6 +541,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) { } ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) } + + return nil } func (ctx *signingCtx) buildTime() { @@ -583,14 +617,18 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { headerValues := make([]string, len(headers)) for i, k := range headers { if k == "host" { - headerValues[i] = "host:" + ctx.Request.URL.Host + if ctx.Request.Host != "" { + headerValues[i] = "host:" + ctx.Request.Host + } else { + headerValues[i] = "host:" + ctx.Request.URL.Host + } } else { headerValues[i] = k + ":" + strings.Join(ctx.SignedHeaderVals[k], ",") } } - - ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") + stripExcessSpaces(headerValues) + ctx.canonicalHeaders = strings.Join(headerValues, "\n") } func (ctx *signingCtx) buildCanonicalString() { @@ -631,21 +669,34 @@ func (ctx *signingCtx) buildSignature() { ctx.signature = hex.EncodeToString(signature) } -func (ctx *signingCtx) buildBodyDigest() { +func (ctx *signingCtx) buildBodyDigest() error { hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") if hash == "" { - if ctx.isPresign && ctx.ServiceName == "s3" { + includeSHA256Header := ctx.unsignedPayload || + ctx.ServiceName == "s3" || + ctx.ServiceName == "glacier" + + s3Presign := ctx.isPresign && ctx.ServiceName == "s3" + + if ctx.unsignedPayload || s3Presign { hash = "UNSIGNED-PAYLOAD" + includeSHA256Header = !s3Presign } else if ctx.Body == nil { hash = emptyStringSHA256 } else { + if !aws.IsReaderSeekable(ctx.Body) { + return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body) + } hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + + if includeSHA256Header { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } ctx.bodyDigest = hash + + return nil } // isRequestSigned returns if the request is currently signed or presigned @@ -685,56 +736,61 @@ func makeSha256(data []byte) []byte { func makeSha256Reader(reader io.ReadSeeker) []byte { hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) + start, _ := reader.Seek(0, sdkio.SeekCurrent) + defer reader.Seek(start, sdkio.SeekStart) + + // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies + // smaller than 32KB. Fall back to io.Copy if we fail to determine the size. + size, err := aws.SeekerLen(reader) + if err != nil { + io.Copy(hash, reader) + } else { + io.CopyN(hash, reader, size) + } - io.Copy(hash, reader) return hash.Sum(nil) } -const doubleSpaces = " " +const doubleSpace = " " -var doubleSpaceBytes = []byte(doubleSpaces) - -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - // Trim leading and trailing spaces - trimmed := strings.TrimSpace(str) +// stripExcessSpaces will rewrite the passed in slice's string values to not +// contain multiple side-by-side spaces. +func stripExcessSpaces(vals []string) { + var j, k, l, m, spaces int + for i, str := range vals { + // Trim trailing spaces + for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- { + } - idx := strings.Index(trimmed, doubleSpaces) - var buf []byte - for idx > -1 { - // Multiple adjacent spaces found - if buf == nil { - // first time create the buffer - buf = []byte(trimmed) - } + // Trim leading spaces + for k = 0; k < j && str[k] == ' '; k++ { + } + str = str[k : j+1] - stripToIdx := -1 - for j := idx + 1; j < len(buf); j++ { - if buf[j] != ' ' { - buf = append(buf[:idx+1], buf[j:]...) - stripToIdx = j - break - } - } + // Strip multiple spaces. + j = strings.Index(str, doubleSpace) + if j < 0 { + vals[i] = str + continue + } - if stripToIdx >= 0 { - idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) - if idx >= 0 { - idx += stripToIdx + buf := []byte(str) + for k, m, l = j, j, len(buf); k < l; k++ { + if buf[k] == ' ' { + if spaces == 0 { + // First space. + buf[m] = buf[k] + m++ } + spaces++ } else { - idx = -1 + // End of multiple spaces. + spaces = 0 + buf[m] = buf[k] + m++ } } - if buf != nil { - vals[i] = string(buf) - } else { - vals[i] = trimmed - } + vals[i] = string(buf[:m]) } - return vals } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go index 0e2d864e..45509154 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -3,15 +3,22 @@ package aws import ( "io" "sync" + + "github.com/aws/aws-sdk-go/internal/sdkio" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should -// only be used with an io.Reader that is also an io.Seeker. Doing so may -// cause request signature errors, or request body's not sent for GET, HEAD -// and DELETE HTTP methods. +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the +// SDK to accept an io.Reader that is not also an io.Seeker for unsigned +// streaming payload API operations. +// +// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API +// operation's input will prevent that operation being retried in the case of +// network errors, and cause operation requests to fail if the operation +// requires payload signing. // -// Deprecated: Should only be used with io.ReadSeeker. If using for -// S3 PutObject to stream content use s3manager.Uploader instead. +// Note: If using With S3 PutObject to stream an object upload The SDK's S3 +// Upload manager (s3manager.Uploader) provides support for streaming with the +// ability to retry network errors. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -22,10 +29,27 @@ type ReaderSeekerCloser struct { r io.Reader } +// IsReaderSeekable returns if the underlying reader type can be seeked. A +// io.Reader might not actually be seekable if it is the ReaderSeekerCloser +// type. +func IsReaderSeekable(r io.Reader) bool { + switch v := r.(type) { + case ReaderSeekerCloser: + return v.IsSeeker() + case *ReaderSeekerCloser: + return v.IsSeeker() + case io.ReadSeeker: + return true + default: + return false + } +} + // Read reads from the reader up to size of p. The number of bytes read, and // error if it occurred will be returned. // -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// If the reader is not an io.Reader zero bytes read, and nil error will be +// returned. // // Performs the same functionality as io.Reader Read func (r ReaderSeekerCloser) Read(p []byte) (int, error) { @@ -56,6 +80,71 @@ func (r ReaderSeekerCloser) IsSeeker() bool { return ok } +// HasLen returns the length of the underlying reader if the value implements +// the Len() int method. +func (r ReaderSeekerCloser) HasLen() (int, bool) { + type lenner interface { + Len() int + } + + if lr, ok := r.r.(lenner); ok { + return lr.Len(), true + } + + return 0, false +} + +// GetLen returns the length of the bytes remaining in the underlying reader. +// Checks first for Len(), then io.Seeker to determine the size of the +// underlying reader. +// +// Will return -1 if the length cannot be determined. +func (r ReaderSeekerCloser) GetLen() (int64, error) { + if l, ok := r.HasLen(); ok { + return int64(l), nil + } + + if s, ok := r.r.(io.Seeker); ok { + return seekerLen(s) + } + + return -1, nil +} + +// SeekerLen attempts to get the number of bytes remaining at the seeker's +// current position. Returns the number of bytes remaining or error. +func SeekerLen(s io.Seeker) (int64, error) { + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := s.(type) { + case ReaderSeekerCloser: + return v.GetLen() + case *ReaderSeekerCloser: + return v.GetLen() + } + + return seekerLen(s) +} + +func seekerLen(s io.Seeker) (int64, error) { + curOffset, err := s.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + endOffset, err := s.Seek(0, sdkio.SeekEnd) + if err != nil { + return 0, err + } + + _, err = s.Seek(curOffset, sdkio.SeekStart) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + // Close closes the ReaderSeekerCloser. // // If the ReaderSeekerCloser is not an io.Closer nothing will be done. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 00000000..6192b245 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 00000000..0210d272 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 438506bf..adbd0ac6 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.7.9" +const SDKVersion = "1.20.18" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go new file mode 100644 index 00000000..e83a9988 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go @@ -0,0 +1,120 @@ +package ini + +// ASTKind represents different states in the parse table +// and the type of AST that is being constructed +type ASTKind int + +// ASTKind* is used in the parse table to transition between +// the different states +const ( + ASTKindNone = ASTKind(iota) + ASTKindStart + ASTKindExpr + ASTKindEqualExpr + ASTKindStatement + ASTKindSkipStatement + ASTKindExprStatement + ASTKindSectionStatement + ASTKindNestedSectionStatement + ASTKindCompletedNestedSectionStatement + ASTKindCommentStatement + ASTKindCompletedSectionStatement +) + +func (k ASTKind) String() string { + switch k { + case ASTKindNone: + return "none" + case ASTKindStart: + return "start" + case ASTKindExpr: + return "expr" + case ASTKindStatement: + return "stmt" + case ASTKindSectionStatement: + return "section_stmt" + case ASTKindExprStatement: + return "expr_stmt" + case ASTKindCommentStatement: + return "comment" + case ASTKindNestedSectionStatement: + return "nested_section_stmt" + case ASTKindCompletedSectionStatement: + return "completed_stmt" + case ASTKindSkipStatement: + return "skip" + default: + return "" + } +} + +// AST interface allows us to determine what kind of node we +// are on and casting may not need to be necessary. +// +// The root is always the first node in Children +type AST struct { + Kind ASTKind + Root Token + RootToken bool + Children []AST +} + +func newAST(kind ASTKind, root AST, children ...AST) AST { + return AST{ + Kind: kind, + Children: append([]AST{root}, children...), + } +} + +func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST { + return AST{ + Kind: kind, + Root: root, + RootToken: true, + Children: children, + } +} + +// AppendChild will append to the list of children an AST has. +func (a *AST) AppendChild(child AST) { + a.Children = append(a.Children, child) +} + +// GetRoot will return the root AST which can be the first entry +// in the children list or a token. +func (a *AST) GetRoot() AST { + if a.RootToken { + return *a + } + + if len(a.Children) == 0 { + return AST{} + } + + return a.Children[0] +} + +// GetChildren will return the current AST's list of children +func (a *AST) GetChildren() []AST { + if len(a.Children) == 0 { + return []AST{} + } + + if a.RootToken { + return a.Children + } + + return a.Children[1:] +} + +// SetChildren will set and override all children of the AST. +func (a *AST) SetChildren(children []AST) { + if a.RootToken { + a.Children = children + } else { + a.Children = append(a.Children[:1], children...) + } +} + +// Start is used to indicate the starting state of the parse table. +var Start = newAST(ASTKindStart, AST{}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go new file mode 100644 index 00000000..0895d53c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go @@ -0,0 +1,11 @@ +package ini + +var commaRunes = []rune(",") + +func isComma(b rune) bool { + return b == ',' +} + +func newCommaToken() Token { + return newToken(TokenComma, commaRunes, NoneType) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go new file mode 100644 index 00000000..0b76999b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go @@ -0,0 +1,35 @@ +package ini + +// isComment will return whether or not the next byte(s) is a +// comment. +func isComment(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case ';': + return true + case '#': + return true + } + + return false +} + +// newCommentToken will create a comment token and +// return how many bytes were read. +func newCommentToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if b[i] == '\n' { + break + } + + if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' { + break + } + } + + return newToken(TokenComment, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go new file mode 100644 index 00000000..25ce0fe1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go @@ -0,0 +1,29 @@ +// Package ini is an LL(1) parser for configuration files. +// +// Example: +// sections, err := ini.OpenFile("/path/to/file") +// if err != nil { +// panic(err) +// } +// +// profile := "foo" +// section, ok := sections.GetSection(profile) +// if !ok { +// fmt.Printf("section %q could not be found", profile) +// } +// +// Below is the BNF that describes this parser +// Grammar: +// stmt -> value stmt' +// stmt' -> epsilon | op stmt +// value -> number | string | boolean | quoted_string +// +// section -> [ section' +// section' -> value section_close +// section_close -> ] +// +// SkipState will skip (NL WS)+ +// +// comment -> # comment' | ; comment' +// comment' -> epsilon | value +package ini diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go new file mode 100644 index 00000000..04345a54 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go @@ -0,0 +1,4 @@ +package ini + +// emptyToken is used to satisfy the Token interface +var emptyToken = newToken(TokenNone, []rune{}, NoneType) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go new file mode 100644 index 00000000..91ba2a59 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go @@ -0,0 +1,24 @@ +package ini + +// newExpression will return an expression AST. +// Expr represents an expression +// +// grammar: +// expr -> string | number +func newExpression(tok Token) AST { + return newASTWithRootToken(ASTKindExpr, tok) +} + +func newEqualExpr(left AST, tok Token) AST { + return newASTWithRootToken(ASTKindEqualExpr, tok, left) +} + +// EqualExprKey will return a LHS value in the equal expr +func EqualExprKey(ast AST) string { + children := ast.GetChildren() + if len(children) == 0 || ast.Kind != ASTKindEqualExpr { + return "" + } + + return string(children[0].Root.Raw()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go new file mode 100644 index 00000000..8d462f77 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package ini + +import ( + "bytes" +) + +func Fuzz(data []byte) int { + b := bytes.NewReader(data) + + if _, err := Parse(b); err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go new file mode 100644 index 00000000..3b0ca7af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go @@ -0,0 +1,51 @@ +package ini + +import ( + "io" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// OpenFile takes a path to a given file, and will open and parse +// that file. +func OpenFile(path string) (Sections, error) { + f, err := os.Open(path) + if err != nil { + return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err) + } + defer f.Close() + + return Parse(f) +} + +// Parse will parse the given file using the shared config +// visitor. +func Parse(f io.Reader) (Sections, error) { + tree, err := ParseAST(f) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} + +// ParseBytes will parse the given bytes and return the parsed sections. +func ParseBytes(b []byte) (Sections, error) { + tree, err := ParseASTBytes(b) + if err != nil { + return Sections{}, err + } + + v := NewDefaultVisitor() + if err = Walk(tree, v); err != nil { + return Sections{}, err + } + + return v.Sections, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go new file mode 100644 index 00000000..582c024a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go @@ -0,0 +1,165 @@ +package ini + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // ErrCodeUnableToReadFile is used when a file is failed to be + // opened or read from. + ErrCodeUnableToReadFile = "FailedRead" +) + +// TokenType represents the various different tokens types +type TokenType int + +func (t TokenType) String() string { + switch t { + case TokenNone: + return "none" + case TokenLit: + return "literal" + case TokenSep: + return "sep" + case TokenOp: + return "op" + case TokenWS: + return "ws" + case TokenNL: + return "newline" + case TokenComment: + return "comment" + case TokenComma: + return "comma" + default: + return "" + } +} + +// TokenType enums +const ( + TokenNone = TokenType(iota) + TokenLit + TokenSep + TokenComma + TokenOp + TokenWS + TokenNL + TokenComment +) + +type iniLexer struct{} + +// Tokenize will return a list of tokens during lexical analysis of the +// io.Reader. +func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err) + } + + return l.tokenize(b) +} + +func (l *iniLexer) tokenize(b []byte) ([]Token, error) { + runes := bytes.Runes(b) + var err error + n := 0 + tokenAmount := countTokens(runes) + tokens := make([]Token, tokenAmount) + count := 0 + + for len(runes) > 0 && count < tokenAmount { + switch { + case isWhitespace(runes[0]): + tokens[count], n, err = newWSToken(runes) + case isComma(runes[0]): + tokens[count], n = newCommaToken(), 1 + case isComment(runes): + tokens[count], n, err = newCommentToken(runes) + case isNewline(runes): + tokens[count], n, err = newNewlineToken(runes) + case isSep(runes): + tokens[count], n, err = newSepToken(runes) + case isOp(runes): + tokens[count], n, err = newOpToken(runes) + default: + tokens[count], n, err = newLitToken(runes) + } + + if err != nil { + return nil, err + } + + count++ + + runes = runes[n:] + } + + return tokens[:count], nil +} + +func countTokens(runes []rune) int { + count, n := 0, 0 + var err error + + for len(runes) > 0 { + switch { + case isWhitespace(runes[0]): + _, n, err = newWSToken(runes) + case isComma(runes[0]): + _, n = newCommaToken(), 1 + case isComment(runes): + _, n, err = newCommentToken(runes) + case isNewline(runes): + _, n, err = newNewlineToken(runes) + case isSep(runes): + _, n, err = newSepToken(runes) + case isOp(runes): + _, n, err = newOpToken(runes) + default: + _, n, err = newLitToken(runes) + } + + if err != nil { + return 0 + } + + count++ + runes = runes[n:] + } + + return count + 1 +} + +// Token indicates a metadata about a given value. +type Token struct { + t TokenType + ValueType ValueType + base int + raw []rune +} + +var emptyValue = Value{} + +func newToken(t TokenType, raw []rune, v ValueType) Token { + return Token{ + t: t, + raw: raw, + ValueType: v, + } +} + +// Raw return the raw runes that were consumed +func (tok Token) Raw() []rune { + return tok.raw +} + +// Type returns the token type +func (tok Token) Type() TokenType { + return tok.t +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go new file mode 100644 index 00000000..e56dcee2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go @@ -0,0 +1,349 @@ +package ini + +import ( + "fmt" + "io" +) + +// State enums for the parse table +const ( + InvalidState = iota + // stmt -> value stmt' + StatementState + // stmt' -> MarkComplete | op stmt + StatementPrimeState + // value -> number | string | boolean | quoted_string + ValueState + // section -> [ section' + OpenScopeState + // section' -> value section_close + SectionState + // section_close -> ] + CloseScopeState + // SkipState will skip (NL WS)+ + SkipState + // SkipTokenState will skip any token and push the previous + // state onto the stack. + SkipTokenState + // comment -> # comment' | ; comment' + // comment' -> MarkComplete | value + CommentState + // MarkComplete state will complete statements and move that + // to the completed AST list + MarkCompleteState + // TerminalState signifies that the tokens have been fully parsed + TerminalState +) + +// parseTable is a state machine to dictate the grammar above. +var parseTable = map[ASTKind]map[TokenType]int{ + ASTKindStart: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, + ASTKindCommentStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExpr: map[TokenType]int{ + TokenOp: StatementPrimeState, + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenWS: ValueState, + TokenNL: SkipState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindEqualExpr: map[TokenType]int{ + TokenLit: ValueState, + TokenWS: SkipTokenState, + TokenNL: SkipState, + }, + ASTKindStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenSep: CloseScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindExprStatement: map[TokenType]int{ + TokenLit: ValueState, + TokenSep: OpenScopeState, + TokenOp: ValueState, + TokenWS: ValueState, + TokenNL: MarkCompleteState, + TokenComment: CommentState, + TokenNone: TerminalState, + TokenComma: SkipState, + }, + ASTKindSectionStatement: map[TokenType]int{ + TokenLit: SectionState, + TokenOp: SectionState, + TokenSep: CloseScopeState, + TokenWS: SectionState, + TokenNL: SkipTokenState, + }, + ASTKindCompletedSectionStatement: map[TokenType]int{ + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenComment: CommentState, + TokenNone: MarkCompleteState, + }, + ASTKindSkipStatement: map[TokenType]int{ + TokenLit: StatementState, + TokenSep: OpenScopeState, + TokenWS: SkipTokenState, + TokenNL: SkipTokenState, + TokenComment: CommentState, + TokenNone: TerminalState, + }, +} + +// ParseAST will parse input from an io.Reader using +// an LL(1) parser. +func ParseAST(r io.Reader) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.Tokenize(r) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +// ParseASTBytes will parse input from a byte slice using +// an LL(1) parser. +func ParseASTBytes(b []byte) ([]AST, error) { + lexer := iniLexer{} + tokens, err := lexer.tokenize(b) + if err != nil { + return []AST{}, err + } + + return parse(tokens) +} + +func parse(tokens []Token) ([]AST, error) { + start := Start + stack := newParseStack(3, len(tokens)) + + stack.Push(start) + s := newSkipper() + +loop: + for stack.Len() > 0 { + k := stack.Pop() + + var tok Token + if len(tokens) == 0 { + // this occurs when all the tokens have been processed + // but reduction of what's left on the stack needs to + // occur. + tok = emptyToken + } else { + tok = tokens[0] + } + + step := parseTable[k.Kind][tok.Type()] + if s.ShouldSkip(tok) { + // being in a skip state with no tokens will break out of + // the parse loop since there is nothing left to process. + if len(tokens) == 0 { + break loop + } + + step = SkipTokenState + } + + switch step { + case TerminalState: + // Finished parsing. Push what should be the last + // statement to the stack. If there is anything left + // on the stack, an error in parsing has occurred. + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + break loop + case SkipTokenState: + // When skipping a token, the previous state was popped off the stack. + // To maintain the correct state, the previous state will be pushed + // onto the stack. + stack.Push(k) + case StatementState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + expr := newExpression(tok) + stack.Push(expr) + case StatementPrimeState: + if tok.Type() != TokenOp { + stack.MarkComplete(k) + continue + } + + if k.Kind != ASTKindExpr { + return nil, NewParseError( + fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k), + ) + } + + k = trimSpaces(k) + expr := newEqualExpr(k, tok) + stack.Push(expr) + case ValueState: + // ValueState requires the previous state to either be an equal expression + // or an expression statement. + // + // This grammar occurs when the RHS is a number, word, or quoted string. + // equal_expr -> lit op equal_expr' + // equal_expr' -> number | string | quoted_string + // quoted_string -> " quoted_string' + // quoted_string' -> string quoted_string_end + // quoted_string_end -> " + // + // otherwise + // expr_stmt -> equal_expr (expr_stmt')* + // expr_stmt' -> ws S | op S | MarkComplete + // S -> equal_expr' expr_stmt' + switch k.Kind { + case ASTKindEqualExpr: + // assiging a value to some key + k.AppendChild(newExpression(tok)) + stack.Push(newExprStatement(k)) + case ASTKindExpr: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stack.Push(k) + case ASTKindExprStatement: + root := k.GetRoot() + children := root.GetChildren() + if len(children) == 0 { + return nil, NewParseError( + fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind), + ) + } + + rhs := children[len(children)-1] + + if rhs.Root.ValueType != QuotedStringType { + rhs.Root.ValueType = StringType + rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...) + + } + + children[len(children)-1] = rhs + k.SetChildren(children) + + stack.Push(k) + } + case OpenScopeState: + if !runeCompare(tok.Raw(), openBrace) { + return nil, NewParseError("expected '['") + } + + stmt := newStatement() + stack.Push(stmt) + case CloseScopeState: + if !runeCompare(tok.Raw(), closeBrace) { + return nil, NewParseError("expected ']'") + } + + k = trimSpaces(k) + stack.Push(newCompletedSectionStatement(k)) + case SectionState: + var stmt AST + + switch k.Kind { + case ASTKindStatement: + // If there are multiple literals inside of a scope declaration, + // then the current token's raw value will be appended to the Name. + // + // This handles cases like [ profile default ] + // + // k will represent a SectionStatement with the children representing + // the label of the section + stmt = newSectionStatement(tok) + case ASTKindSectionStatement: + k.Root.raw = append(k.Root.raw, tok.Raw()...) + stmt = k + default: + return nil, NewParseError( + fmt.Sprintf("invalid statement: expected statement: %v", k.Kind), + ) + } + + stack.Push(stmt) + case MarkCompleteState: + if k.Kind != ASTKindStart { + stack.MarkComplete(k) + } + + if stack.Len() == 0 { + stack.Push(start) + } + case SkipState: + stack.Push(newSkipStatement(k)) + s.Skip() + case CommentState: + if k.Kind == ASTKindStart { + stack.Push(k) + } else { + stack.MarkComplete(k) + } + + stmt := newCommentStatement(tok) + stack.Push(stmt) + default: + return nil, NewParseError( + fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", + k, tok.Type())) + } + + if len(tokens) > 0 { + tokens = tokens[1:] + } + } + + // this occurs when a statement has not been completed + if stack.top > 1 { + return nil, NewParseError(fmt.Sprintf("incomplete ini expression")) + } + + // returns a sublist which excludes the start symbol + return stack.List(), nil +} + +// trimSpaces will trim spaces on the left and right hand side of +// the literal. +func trimSpaces(k AST) AST { + // trim left hand side of spaces + for i := 0; i < len(k.Root.raw); i++ { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[1:] + i-- + } + + // trim right hand side of spaces + for i := len(k.Root.raw) - 1; i >= 0; i-- { + if !isWhitespace(k.Root.raw[i]) { + break + } + + k.Root.raw = k.Root.raw[:len(k.Root.raw)-1] + } + + return k +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go new file mode 100644 index 00000000..24df543d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -0,0 +1,324 @@ +package ini + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + runesTrue = []rune("true") + runesFalse = []rune("false") +) + +var literalValues = [][]rune{ + runesTrue, + runesFalse, +} + +func isBoolValue(b []rune) bool { + for _, lv := range literalValues { + if isLitValue(lv, b) { + return true + } + } + return false +} + +func isLitValue(want, have []rune) bool { + if len(have) < len(want) { + return false + } + + for i := 0; i < len(want); i++ { + if want[i] != have[i] { + return false + } + } + + return true +} + +// isNumberValue will return whether not the leading characters in +// a byte slice is a number. A number is delimited by whitespace or +// the newline token. +// +// A number is defined to be in a binary, octal, decimal (int | float), hex format, +// or in scientific notation. +func isNumberValue(b []rune) bool { + negativeIndex := 0 + helper := numberHelper{} + needDigit := false + + for i := 0; i < len(b); i++ { + negativeIndex++ + + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return false + } + helper.Determine(b[i]) + needDigit = true + continue + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return false + } + negativeIndex = 0 + needDigit = true + continue + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + needDigit = true + if i == 0 { + return false + } + + fallthrough + case '.': + if err := helper.Determine(b[i]); err != nil { + return false + } + needDigit = true + continue + } + + if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { + return !needDigit + } + + if !helper.CorrectByte(b[i]) { + return false + } + needDigit = false + } + + return !needDigit +} + +func isValid(b []rune) (bool, int, error) { + if len(b) == 0 { + // TODO: should probably return an error + return false, 0, nil + } + + return isValidRune(b[0]), 1, nil +} + +func isValidRune(r rune) bool { + return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n' +} + +// ValueType is an enum that will signify what type +// the Value is +type ValueType int + +func (v ValueType) String() string { + switch v { + case NoneType: + return "NONE" + case DecimalType: + return "FLOAT" + case IntegerType: + return "INT" + case StringType: + return "STRING" + case BoolType: + return "BOOL" + } + + return "" +} + +// ValueType enums +const ( + NoneType = ValueType(iota) + DecimalType + IntegerType + StringType + QuotedStringType + BoolType +) + +// Value is a union container +type Value struct { + Type ValueType + raw []rune + + integer int64 + decimal float64 + boolean bool + str string +} + +func newValue(t ValueType, base int, raw []rune) (Value, error) { + v := Value{ + Type: t, + raw: raw, + } + var err error + + switch t { + case DecimalType: + v.decimal, err = strconv.ParseFloat(string(raw), 64) + case IntegerType: + if base != 10 { + raw = raw[2:] + } + + v.integer, err = strconv.ParseInt(string(raw), base, 64) + case StringType: + v.str = string(raw) + case QuotedStringType: + v.str = string(raw[1 : len(raw)-1]) + case BoolType: + v.boolean = runeCompare(v.raw, runesTrue) + } + + // issue 2253 + // + // if the value trying to be parsed is too large, then we will use + // the 'StringType' and raw value instead. + if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { + v.Type = StringType + v.str = string(raw) + err = nil + } + + return v, err +} + +// Append will append values and change the type to a string +// type. +func (v *Value) Append(tok Token) { + r := tok.Raw() + if v.Type != QuotedStringType { + v.Type = StringType + r = tok.raw[1 : len(tok.raw)-1] + } + if tok.Type() != TokenLit { + v.raw = append(v.raw, tok.Raw()...) + } else { + v.raw = append(v.raw, r...) + } +} + +func (v Value) String() string { + switch v.Type { + case DecimalType: + return fmt.Sprintf("decimal: %f", v.decimal) + case IntegerType: + return fmt.Sprintf("integer: %d", v.integer) + case StringType: + return fmt.Sprintf("string: %s", string(v.raw)) + case QuotedStringType: + return fmt.Sprintf("quoted string: %s", string(v.raw)) + case BoolType: + return fmt.Sprintf("bool: %t", v.boolean) + default: + return "union not set" + } +} + +func newLitToken(b []rune) (Token, int, error) { + n := 0 + var err error + + token := Token{} + if b[0] == '"' { + n, err = getStringValue(b) + if err != nil { + return token, n, err + } + + token = newToken(TokenLit, b[:n], QuotedStringType) + } else if isNumberValue(b) { + var base int + base, n, err = getNumericalValue(b) + if err != nil { + return token, 0, err + } + + value := b[:n] + vType := IntegerType + if contains(value, '.') || hasExponent(value) { + vType = DecimalType + } + token = newToken(TokenLit, value, vType) + token.base = base + } else if isBoolValue(b) { + n, err = getBoolValue(b) + + token = newToken(TokenLit, b[:n], BoolType) + } else { + n, err = getValue(b) + token = newToken(TokenLit, b[:n], StringType) + } + + return token, n, err +} + +// IntValue returns an integer value +func (v Value) IntValue() int64 { + return v.integer +} + +// FloatValue returns a float value +func (v Value) FloatValue() float64 { + return v.decimal +} + +// BoolValue returns a bool value +func (v Value) BoolValue() bool { + return v.boolean +} + +func isTrimmable(r rune) bool { + switch r { + case '\n', ' ': + return true + } + return false +} + +// StringValue returns the string value +func (v Value) StringValue() string { + switch v.Type { + case StringType: + return strings.TrimFunc(string(v.raw), isTrimmable) + case QuotedStringType: + // preserve all characters in the quotes + return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1])) + default: + return strings.TrimFunc(string(v.raw), isTrimmable) + } +} + +func contains(runes []rune, c rune) bool { + for i := 0; i < len(runes); i++ { + if runes[i] == c { + return true + } + } + + return false +} + +func runeCompare(v1 []rune, v2 []rune) bool { + if len(v1) != len(v2) { + return false + } + + for i := 0; i < len(v1); i++ { + if v1[i] != v2[i] { + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go new file mode 100644 index 00000000..e52ac399 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go @@ -0,0 +1,30 @@ +package ini + +func isNewline(b []rune) bool { + if len(b) == 0 { + return false + } + + if b[0] == '\n' { + return true + } + + if len(b) < 2 { + return false + } + + return b[0] == '\r' && b[1] == '\n' +} + +func newNewlineToken(b []rune) (Token, int, error) { + i := 1 + if b[0] == '\r' && isNewline(b[1:]) { + i++ + } + + if !isNewline([]rune(b[:i])) { + return emptyToken, 0, NewParseError("invalid new line token") + } + + return newToken(TokenNL, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go new file mode 100644 index 00000000..a45c0bc5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go @@ -0,0 +1,152 @@ +package ini + +import ( + "bytes" + "fmt" + "strconv" +) + +const ( + none = numberFormat(iota) + binary + octal + decimal + hex + exponent +) + +type numberFormat int + +// numberHelper is used to dictate what format a number is in +// and what to do for negative values. Since -1e-4 is a valid +// number, we cannot just simply check for duplicate negatives. +type numberHelper struct { + numberFormat numberFormat + + negative bool + negativeExponent bool +} + +func (b numberHelper) Exists() bool { + return b.numberFormat != none +} + +func (b numberHelper) IsNegative() bool { + return b.negative || b.negativeExponent +} + +func (b *numberHelper) Determine(c rune) error { + if b.Exists() { + return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) + } + + switch c { + case 'b': + b.numberFormat = binary + case 'o': + b.numberFormat = octal + case 'x': + b.numberFormat = hex + case 'e', 'E': + b.numberFormat = exponent + case '-': + if b.numberFormat != exponent { + b.negative = true + } else { + b.negativeExponent = true + } + case '.': + b.numberFormat = decimal + default: + return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) + } + + return nil +} + +func (b numberHelper) CorrectByte(c rune) bool { + switch { + case b.numberFormat == binary: + if !isBinaryByte(c) { + return false + } + case b.numberFormat == octal: + if !isOctalByte(c) { + return false + } + case b.numberFormat == hex: + if !isHexByte(c) { + return false + } + case b.numberFormat == decimal: + if !isDigit(c) { + return false + } + case b.numberFormat == exponent: + if !isDigit(c) { + return false + } + case b.negativeExponent: + if !isDigit(c) { + return false + } + case b.negative: + if !isDigit(c) { + return false + } + default: + if !isDigit(c) { + return false + } + } + + return true +} + +func (b numberHelper) Base() int { + switch b.numberFormat { + case binary: + return 2 + case octal: + return 8 + case hex: + return 16 + default: + return 10 + } +} + +func (b numberHelper) String() string { + buf := bytes.Buffer{} + i := 0 + + switch b.numberFormat { + case binary: + i++ + buf.WriteString(strconv.Itoa(i) + ": binary format\n") + case octal: + i++ + buf.WriteString(strconv.Itoa(i) + ": octal format\n") + case hex: + i++ + buf.WriteString(strconv.Itoa(i) + ": hex format\n") + case exponent: + i++ + buf.WriteString(strconv.Itoa(i) + ": exponent format\n") + default: + i++ + buf.WriteString(strconv.Itoa(i) + ": integer format\n") + } + + if b.negative { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative format\n") + } + + if b.negativeExponent { + i++ + buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go new file mode 100644 index 00000000..8a84c7cb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go @@ -0,0 +1,39 @@ +package ini + +import ( + "fmt" +) + +var ( + equalOp = []rune("=") + equalColonOp = []rune(":") +) + +func isOp(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '=': + return true + case ':': + return true + default: + return false + } +} + +func newOpToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '=': + tok = newToken(TokenOp, equalOp, NoneType) + case ':': + tok = newToken(TokenOp, equalColonOp, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go new file mode 100644 index 00000000..45728701 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go @@ -0,0 +1,43 @@ +package ini + +import "fmt" + +const ( + // ErrCodeParseError is returned when a parsing error + // has occurred. + ErrCodeParseError = "INIParseError" +) + +// ParseError is an error which is returned during any part of +// the parsing process. +type ParseError struct { + msg string +} + +// NewParseError will return a new ParseError where message +// is the description of the error. +func NewParseError(message string) *ParseError { + return &ParseError{ + msg: message, + } +} + +// Code will return the ErrCodeParseError +func (err *ParseError) Code() string { + return ErrCodeParseError +} + +// Message returns the error's message +func (err *ParseError) Message() string { + return err.msg +} + +// OrigError return nothing since there will never be any +// original error. +func (err *ParseError) OrigError() error { + return nil +} + +func (err *ParseError) Error() string { + return fmt.Sprintf("%s: %s", err.Code(), err.Message()) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go new file mode 100644 index 00000000..7f01cf7c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go @@ -0,0 +1,60 @@ +package ini + +import ( + "bytes" + "fmt" +) + +// ParseStack is a stack that contains a container, the stack portion, +// and the list which is the list of ASTs that have been successfully +// parsed. +type ParseStack struct { + top int + container []AST + list []AST + index int +} + +func newParseStack(sizeContainer, sizeList int) ParseStack { + return ParseStack{ + container: make([]AST, sizeContainer), + list: make([]AST, sizeList), + } +} + +// Pop will return and truncate the last container element. +func (s *ParseStack) Pop() AST { + s.top-- + return s.container[s.top] +} + +// Push will add the new AST to the container +func (s *ParseStack) Push(ast AST) { + s.container[s.top] = ast + s.top++ +} + +// MarkComplete will append the AST to the list of completed statements +func (s *ParseStack) MarkComplete(ast AST) { + s.list[s.index] = ast + s.index++ +} + +// List will return the completed statements +func (s ParseStack) List() []AST { + return s.list[:s.index] +} + +// Len will return the length of the container +func (s *ParseStack) Len() int { + return s.top +} + +func (s ParseStack) String() string { + buf := bytes.Buffer{} + for i, node := range s.list { + buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node)) + } + + return buf.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go new file mode 100644 index 00000000..f82095ba --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go @@ -0,0 +1,41 @@ +package ini + +import ( + "fmt" +) + +var ( + emptyRunes = []rune{} +) + +func isSep(b []rune) bool { + if len(b) == 0 { + return false + } + + switch b[0] { + case '[', ']': + return true + default: + return false + } +} + +var ( + openBrace = []rune("[") + closeBrace = []rune("]") +) + +func newSepToken(b []rune) (Token, int, error) { + tok := Token{} + + switch b[0] { + case '[': + tok = newToken(TokenSep, openBrace, NoneType) + case ']': + tok = newToken(TokenSep, closeBrace, NoneType) + default: + return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0])) + } + return tok, 1, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go new file mode 100644 index 00000000..6bb69644 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go @@ -0,0 +1,45 @@ +package ini + +// skipper is used to skip certain blocks of an ini file. +// Currently skipper is used to skip nested blocks of ini +// files. See example below +// +// [ foo ] +// nested = ; this section will be skipped +// a=b +// c=d +// bar=baz ; this will be included +type skipper struct { + shouldSkip bool + TokenSet bool + prevTok Token +} + +func newSkipper() skipper { + return skipper{ + prevTok: emptyToken, + } +} + +func (s *skipper) ShouldSkip(tok Token) bool { + if s.shouldSkip && + s.prevTok.Type() == TokenNL && + tok.Type() != TokenWS { + + s.Continue() + return false + } + s.prevTok = tok + + return s.shouldSkip +} + +func (s *skipper) Skip() { + s.shouldSkip = true + s.prevTok = emptyToken +} + +func (s *skipper) Continue() { + s.shouldSkip = false + s.prevTok = emptyToken +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go new file mode 100644 index 00000000..18f3fe89 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go @@ -0,0 +1,35 @@ +package ini + +// Statement is an empty AST mostly used for transitioning states. +func newStatement() AST { + return newAST(ASTKindStatement, AST{}) +} + +// SectionStatement represents a section AST +func newSectionStatement(tok Token) AST { + return newASTWithRootToken(ASTKindSectionStatement, tok) +} + +// ExprStatement represents a completed expression AST +func newExprStatement(ast AST) AST { + return newAST(ASTKindExprStatement, ast) +} + +// CommentStatement represents a comment in the ini definition. +// +// grammar: +// comment -> #comment' | ;comment' +// comment' -> epsilon | value +func newCommentStatement(tok Token) AST { + return newAST(ASTKindCommentStatement, newExpression(tok)) +} + +// CompletedSectionStatement represents a completed section +func newCompletedSectionStatement(ast AST) AST { + return newAST(ASTKindCompletedSectionStatement, ast) +} + +// SkipStatement is used to skip whole statements +func newSkipStatement(ast AST) AST { + return newAST(ASTKindSkipStatement, ast) +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go new file mode 100644 index 00000000..305999d2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go @@ -0,0 +1,284 @@ +package ini + +import ( + "fmt" +) + +// getStringValue will return a quoted string and the amount +// of bytes read +// +// an error will be returned if the string is not properly formatted +func getStringValue(b []rune) (int, error) { + if b[0] != '"' { + return 0, NewParseError("strings must start with '\"'") + } + + endQuote := false + i := 1 + + for ; i < len(b) && !endQuote; i++ { + if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped { + endQuote = true + break + } else if escaped { + /*c, err := getEscapedByte(b[i]) + if err != nil { + return 0, err + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i--*/ + + continue + } + } + + if !endQuote { + return 0, NewParseError("missing '\"' in string value") + } + + return i + 1, nil +} + +// getBoolValue will return a boolean and the amount +// of bytes read +// +// an error will be returned if the boolean is not of a correct +// value +func getBoolValue(b []rune) (int, error) { + if len(b) < 4 { + return 0, NewParseError("invalid boolean value") + } + + n := 0 + for _, lv := range literalValues { + if len(lv) > len(b) { + continue + } + + if isLitValue(lv, b) { + n = len(lv) + } + } + + if n == 0 { + return 0, NewParseError("invalid boolean value") + } + + return n, nil +} + +// getNumericalValue will return a numerical string, the amount +// of bytes read, and the base of the number +// +// an error will be returned if the number is not of a correct +// value +func getNumericalValue(b []rune) (int, int, error) { + if !isDigit(b[0]) { + return 0, 0, NewParseError("invalid digit value") + } + + i := 0 + helper := numberHelper{} + +loop: + for negativeIndex := 0; i < len(b); i++ { + negativeIndex++ + + if !isDigit(b[i]) { + switch b[i] { + case '-': + if helper.IsNegative() || negativeIndex != 1 { + return 0, 0, NewParseError("parse error '-'") + } + + n := getNegativeNumber(b[i:]) + i += (n - 1) + helper.Determine(b[i]) + continue + case '.': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + case 'e', 'E': + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + + negativeIndex = 0 + case 'b': + if helper.numberFormat == hex { + break + } + fallthrough + case 'o', 'x': + if i == 0 && b[i] != '0' { + return 0, 0, NewParseError("incorrect base format, expected leading '0'") + } + + if i != 1 { + return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) + } + + if err := helper.Determine(b[i]); err != nil { + return 0, 0, err + } + default: + if isWhitespace(b[i]) { + break loop + } + + if isNewline(b[i:]) { + break loop + } + + if !(helper.numberFormat == hex && isHexByte(b[i])) { + if i+2 < len(b) && !isNewline(b[i:i+2]) { + return 0, 0, NewParseError("invalid numerical character") + } else if !isNewline([]rune{b[i]}) { + return 0, 0, NewParseError("invalid numerical character") + } + + break loop + } + } + } + } + + return helper.Base(), i, nil +} + +// isDigit will return whether or not something is an integer +func isDigit(b rune) bool { + return b >= '0' && b <= '9' +} + +func hasExponent(v []rune) bool { + return contains(v, 'e') || contains(v, 'E') +} + +func isBinaryByte(b rune) bool { + switch b { + case '0', '1': + return true + default: + return false + } +} + +func isOctalByte(b rune) bool { + switch b { + case '0', '1', '2', '3', '4', '5', '6', '7': + return true + default: + return false + } +} + +func isHexByte(b rune) bool { + if isDigit(b) { + return true + } + return (b >= 'A' && b <= 'F') || + (b >= 'a' && b <= 'f') +} + +func getValue(b []rune) (int, error) { + i := 0 + + for i < len(b) { + if isNewline(b[i:]) { + break + } + + if isOp(b[i:]) { + break + } + + valid, n, err := isValid(b[i:]) + if err != nil { + return 0, err + } + + if !valid { + break + } + + i += n + } + + return i, nil +} + +// getNegativeNumber will return a negative number from a +// byte slice. This will iterate through all characters until +// a non-digit has been found. +func getNegativeNumber(b []rune) int { + if b[0] != '-' { + return 0 + } + + i := 1 + for ; i < len(b); i++ { + if !isDigit(b[i]) { + return i + } + } + + return i +} + +// isEscaped will return whether or not the character is an escaped +// character. +func isEscaped(value []rune, b rune) bool { + if len(value) == 0 { + return false + } + + switch b { + case '\'': // single quote + case '"': // quote + case 'n': // newline + case 't': // tab + case '\\': // backslash + default: + return false + } + + return value[len(value)-1] == '\\' +} + +func getEscapedByte(b rune) (rune, error) { + switch b { + case '\'': // single quote + return '\'', nil + case '"': // quote + return '"', nil + case 'n': // newline + return '\n', nil + case 't': // table + return '\t', nil + case '\\': // backslash + return '\\', nil + default: + return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b)) + } +} + +func removeEscapedCharacters(b []rune) []rune { + for i := 0; i < len(b); i++ { + if isEscaped(b[:i], b[i]) { + c, err := getEscapedByte(b[i]) + if err != nil { + return b + } + + b[i-1] = c + b = append(b[:i], b[i+1:]...) + i-- + } + } + + return b +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go new file mode 100644 index 00000000..94841c32 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -0,0 +1,166 @@ +package ini + +import ( + "fmt" + "sort" +) + +// Visitor is an interface used by walkers that will +// traverse an array of ASTs. +type Visitor interface { + VisitExpr(AST) error + VisitStatement(AST) error +} + +// DefaultVisitor is used to visit statements and expressions +// and ensure that they are both of the correct format. +// In addition, upon visiting this will build sections and populate +// the Sections field which can be used to retrieve profile +// configuration. +type DefaultVisitor struct { + scope string + Sections Sections +} + +// NewDefaultVisitor return a DefaultVisitor +func NewDefaultVisitor() *DefaultVisitor { + return &DefaultVisitor{ + Sections: Sections{ + container: map[string]Section{}, + }, + } +} + +// VisitExpr visits expressions... +func (v *DefaultVisitor) VisitExpr(expr AST) error { + t := v.Sections.container[v.scope] + if t.values == nil { + t.values = values{} + } + + switch expr.Kind { + case ASTKindExprStatement: + opExpr := expr.GetRoot() + switch opExpr.Kind { + case ASTKindEqualExpr: + children := opExpr.GetChildren() + if len(children) <= 1 { + return NewParseError("unexpected token type") + } + + rhs := children[1] + + if rhs.Root.Type() != TokenLit { + return NewParseError("unexpected token type") + } + + key := EqualExprKey(opExpr) + v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw()) + if err != nil { + return err + } + + t.values[key] = v + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + default: + return NewParseError(fmt.Sprintf("unsupported expression %v", expr)) + } + + v.Sections.container[v.scope] = t + return nil +} + +// VisitStatement visits statements... +func (v *DefaultVisitor) VisitStatement(stmt AST) error { + switch stmt.Kind { + case ASTKindCompletedSectionStatement: + child := stmt.GetRoot() + if child.Kind != ASTKindSectionStatement { + return NewParseError(fmt.Sprintf("unsupported child statement: %T", child)) + } + + name := string(child.Root.Raw()) + v.Sections.container[name] = Section{} + v.scope = name + default: + return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind)) + } + + return nil +} + +// Sections is a map of Section structures that represent +// a configuration. +type Sections struct { + container map[string]Section +} + +// GetSection will return section p. If section p does not exist, +// false will be returned in the second parameter. +func (t Sections) GetSection(p string) (Section, bool) { + v, ok := t.container[p] + return v, ok +} + +// values represents a map of union values. +type values map[string]Value + +// List will return a list of all sections that were successfully +// parsed. +func (t Sections) List() []string { + keys := make([]string, len(t.container)) + i := 0 + for k := range t.container { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + +// Section contains a name and values. This represent +// a sectioned entry in a configuration file. +type Section struct { + Name string + values values +} + +// Has will return whether or not an entry exists in a given section +func (t Section) Has(k string) bool { + _, ok := t.values[k] + return ok +} + +// ValueType will returned what type the union is set to. If +// k was not found, the NoneType will be returned. +func (t Section) ValueType(k string) (ValueType, bool) { + v, ok := t.values[k] + return v.Type, ok +} + +// Bool returns a bool value at k +func (t Section) Bool(k string) bool { + return t.values[k].BoolValue() +} + +// Int returns an integer value at k +func (t Section) Int(k string) int64 { + return t.values[k].IntValue() +} + +// Float64 returns a float value at k +func (t Section) Float64(k string) float64 { + return t.values[k].FloatValue() +} + +// String returns the string value at k +func (t Section) String(k string) string { + _, ok := t.values[k] + if !ok { + return "" + } + return t.values[k].StringValue() +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go new file mode 100644 index 00000000..99915f7f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go @@ -0,0 +1,25 @@ +package ini + +// Walk will traverse the AST using the v, the Visitor. +func Walk(tree []AST, v Visitor) error { + for _, node := range tree { + switch node.Kind { + case ASTKindExpr, + ASTKindExprStatement: + + if err := v.VisitExpr(node); err != nil { + return err + } + case ASTKindStatement, + ASTKindCompletedSectionStatement, + ASTKindNestedSectionStatement, + ASTKindCompletedNestedSectionStatement: + + if err := v.VisitStatement(node); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go new file mode 100644 index 00000000..7ffb4ae0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go @@ -0,0 +1,24 @@ +package ini + +import ( + "unicode" +) + +// isWhitespace will return whether or not the character is +// a whitespace character. +// +// Whitespace is defined as a space or tab. +func isWhitespace(c rune) bool { + return unicode.IsSpace(c) && c != '\n' && c != '\r' +} + +func newWSToken(b []rune) (Token, int, error) { + i := 0 + for ; i < len(b); i++ { + if !isWhitespace(b[i]) { + break + } + } + + return newToken(TokenWS, b[:i], NoneType), i, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go new file mode 100644 index 00000000..0b9b0dfc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/s3err/error.go @@ -0,0 +1,57 @@ +package s3err + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RequestFailure provides additional S3 specific metadata for the request +// failure. +type RequestFailure struct { + awserr.RequestFailure + + hostID string +} + +// NewRequestFailure returns a request failure error decordated with S3 +// specific metadata. +func NewRequestFailure(err awserr.RequestFailure, hostID string) *RequestFailure { + return &RequestFailure{RequestFailure: err, hostID: hostID} +} + +func (r RequestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r RequestFailure) String() string { + return r.Error() +} + +// HostID returns the HostID request response value. +func (r RequestFailure) HostID() string { + return r.hostID +} + +// RequestFailureWrapperHandler returns a handler to rap an +// awserr.RequestFailure with the S3 request ID 2 from the response. +func RequestFailureWrapperHandler() request.NamedHandler { + return request.NamedHandler{ + Name: "awssdk.s3.errorHandler", + Fn: func(req *request.Request) { + reqErr, ok := req.Error.(awserr.RequestFailure) + if !ok || reqErr == nil { + return + } + + hostID := req.HTTPResponse.Header.Get("X-Amz-Id-2") + if req.Error == nil { + return + } + + req.Error = NewRequestFailure(reqErr, hostID) + }, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go new file mode 100644 index 00000000..5aa9137e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go @@ -0,0 +1,10 @@ +// +build !go1.7 + +package sdkio + +// Copy of Go 1.7 io package's Seeker constants. +const ( + SeekStart = 0 // seek relative to the origin of the file + SeekCurrent = 1 // seek relative to the current offset + SeekEnd = 2 // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go new file mode 100644 index 00000000..e5f00561 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go @@ -0,0 +1,12 @@ +// +build go1.7 + +package sdkio + +import "io" + +// Alias for Go 1.7 io package Seeker constants +const ( + SeekStart = io.SeekStart // seek relative to the origin of the file + SeekCurrent = io.SeekCurrent // seek relative to the current offset + SeekEnd = io.SeekEnd // seek relative to the end +) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go new file mode 100644 index 00000000..0c9802d8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go @@ -0,0 +1,29 @@ +package sdkrand + +import ( + "math/rand" + "sync" + "time" +) + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// SeededRand is a new RNG using a thread safe implementation of rand.Source +var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go new file mode 100644 index 00000000..38ea61af --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go @@ -0,0 +1,23 @@ +package sdkuri + +import ( + "path" + "strings" +) + +// PathJoin will join the elements of the path delimited by the "/" +// character. Similar to path.Join with the exception the trailing "/" +// character is preserved if present. +func PathJoin(elems ...string) string { + if len(elems) == 0 { + return "" + } + + hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/") + str := path.Join(elems...) + if hasTrailing && str != "/" { + str += "/" + } + + return str +} diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go new file mode 100644 index 00000000..7da8a49c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go @@ -0,0 +1,12 @@ +package shareddefaults + +const ( + // ECSCredsProviderEnvVar is an environmental variable key used to + // determine which path needs to be hit. + ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// ECSContainerCredentialsURI is the endpoint to retrieve container +// credentials. This can be overridden to test to ensure the credential process +// is behaving correctly. +var ECSContainerCredentialsURI = "http://169.254.170.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go new file mode 100644 index 00000000..ebcbc2b4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go @@ -0,0 +1,40 @@ +package shareddefaults + +import ( + "os" + "path/filepath" + "runtime" +) + +// SharedCredentialsFilename returns the SDK's default file path +// for the shared credentials file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/credentials +// - Windows: %USERPROFILE%\.aws\credentials +func SharedCredentialsFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "credentials") +} + +// SharedConfigFilename returns the SDK's default file path for +// the shared config file. +// +// Builds the shared config file path based on the OS's platform. +// +// - Linux/Unix: $HOME/.aws/config +// - Windows: %USERPROFILE%\.aws\config +func SharedConfigFilename() string { + return filepath.Join(UserHomeDir(), ".aws", "config") +} + +// UserHomeDir returns the home directory for the user the process is +// running under. +func UserHomeDir() string { + if runtime.GOOS == "windows" { // Windows + return os.Getenv("USERPROFILE") + } + + // *nix + return os.Getenv("HOME") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go new file mode 100644 index 00000000..ecc7bf82 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/debug.go @@ -0,0 +1,144 @@ +package eventstream + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" +) + +type decodedMessage struct { + rawMessage + Headers decodedHeaders `json:"headers"` +} +type jsonMessage struct { + Length json.Number `json:"total_length"` + HeadersLen json.Number `json:"headers_length"` + PreludeCRC json.Number `json:"prelude_crc"` + Headers decodedHeaders `json:"headers"` + Payload []byte `json:"payload"` + CRC json.Number `json:"message_crc"` +} + +func (d *decodedMessage) UnmarshalJSON(b []byte) (err error) { + var jsonMsg jsonMessage + if err = json.Unmarshal(b, &jsonMsg); err != nil { + return err + } + + d.Length, err = numAsUint32(jsonMsg.Length) + if err != nil { + return err + } + d.HeadersLen, err = numAsUint32(jsonMsg.HeadersLen) + if err != nil { + return err + } + d.PreludeCRC, err = numAsUint32(jsonMsg.PreludeCRC) + if err != nil { + return err + } + d.Headers = jsonMsg.Headers + d.Payload = jsonMsg.Payload + d.CRC, err = numAsUint32(jsonMsg.CRC) + if err != nil { + return err + } + + return nil +} + +func (d *decodedMessage) MarshalJSON() ([]byte, error) { + jsonMsg := jsonMessage{ + Length: json.Number(strconv.Itoa(int(d.Length))), + HeadersLen: json.Number(strconv.Itoa(int(d.HeadersLen))), + PreludeCRC: json.Number(strconv.Itoa(int(d.PreludeCRC))), + Headers: d.Headers, + Payload: d.Payload, + CRC: json.Number(strconv.Itoa(int(d.CRC))), + } + + return json.Marshal(jsonMsg) +} + +func numAsUint32(n json.Number) (uint32, error) { + v, err := n.Int64() + if err != nil { + return 0, fmt.Errorf("failed to get int64 json number, %v", err) + } + + return uint32(v), nil +} + +func (d decodedMessage) Message() Message { + return Message{ + Headers: Headers(d.Headers), + Payload: d.Payload, + } +} + +type decodedHeaders Headers + +func (hs *decodedHeaders) UnmarshalJSON(b []byte) error { + var jsonHeaders []struct { + Name string `json:"name"` + Type valueType `json:"type"` + Value interface{} `json:"value"` + } + + decoder := json.NewDecoder(bytes.NewReader(b)) + decoder.UseNumber() + if err := decoder.Decode(&jsonHeaders); err != nil { + return err + } + + var headers Headers + for _, h := range jsonHeaders { + value, err := valueFromType(h.Type, h.Value) + if err != nil { + return err + } + headers.Set(h.Name, value) + } + (*hs) = decodedHeaders(headers) + + return nil +} + +func valueFromType(typ valueType, val interface{}) (Value, error) { + switch typ { + case trueValueType: + return BoolValue(true), nil + case falseValueType: + return BoolValue(false), nil + case int8ValueType: + v, err := val.(json.Number).Int64() + return Int8Value(int8(v)), err + case int16ValueType: + v, err := val.(json.Number).Int64() + return Int16Value(int16(v)), err + case int32ValueType: + v, err := val.(json.Number).Int64() + return Int32Value(int32(v)), err + case int64ValueType: + v, err := val.(json.Number).Int64() + return Int64Value(v), err + case bytesValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return BytesValue(v), err + case stringValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + return StringValue(string(v)), err + case timestampValueType: + v, err := val.(json.Number).Int64() + return TimestampValue(timeFromEpochMilli(v)), err + case uuidValueType: + v, err := base64.StdEncoding.DecodeString(val.(string)) + var tv UUIDValue + copy(tv[:], v) + return tv, err + default: + panic(fmt.Sprintf("unknown type, %s, %T", typ.String(), val)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go new file mode 100644 index 00000000..4b972b2d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/decode.go @@ -0,0 +1,199 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "hash/crc32" + "io" + + "github.com/aws/aws-sdk-go/aws" +) + +// Decoder provides decoding of an Event Stream messages. +type Decoder struct { + r io.Reader + logger aws.Logger +} + +// NewDecoder initializes and returns a Decoder for decoding event +// stream messages from the reader provided. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + r: r, + } +} + +// Decode attempts to decode a single message from the event stream reader. +// Will return the event stream message, or error if Decode fails to read +// the message from the stream. +func (d *Decoder) Decode(payloadBuf []byte) (m Message, err error) { + reader := d.r + if d.logger != nil { + debugMsgBuf := bytes.NewBuffer(nil) + reader = io.TeeReader(reader, debugMsgBuf) + defer func() { + logMessageDecode(d.logger, debugMsgBuf, m, err) + }() + } + + crc := crc32.New(crc32IEEETable) + hashReader := io.TeeReader(reader, crc) + + prelude, err := decodePrelude(hashReader, crc) + if err != nil { + return Message{}, err + } + + if prelude.HeadersLen > 0 { + lr := io.LimitReader(hashReader, int64(prelude.HeadersLen)) + m.Headers, err = decodeHeaders(lr) + if err != nil { + return Message{}, err + } + } + + if payloadLen := prelude.PayloadLen(); payloadLen > 0 { + buf, err := decodePayload(payloadBuf, io.LimitReader(hashReader, int64(payloadLen))) + if err != nil { + return Message{}, err + } + m.Payload = buf + } + + msgCRC := crc.Sum32() + if err := validateCRC(reader, msgCRC); err != nil { + return Message{}, err + } + + return m, nil +} + +// UseLogger specifies the Logger that that the decoder should use to log the +// message decode to. +func (d *Decoder) UseLogger(logger aws.Logger) { + d.logger = logger +} + +func logMessageDecode(logger aws.Logger, msgBuf *bytes.Buffer, msg Message, decodeErr error) { + w := bytes.NewBuffer(nil) + defer func() { logger.Log(w.String()) }() + + fmt.Fprintf(w, "Raw message:\n%s\n", + hex.Dump(msgBuf.Bytes())) + + if decodeErr != nil { + fmt.Fprintf(w, "Decode error: %v\n", decodeErr) + return + } + + rawMsg, err := msg.rawMessage() + if err != nil { + fmt.Fprintf(w, "failed to create raw message, %v\n", err) + return + } + + decodedMsg := decodedMessage{ + rawMessage: rawMsg, + Headers: decodedHeaders(msg.Headers), + } + + fmt.Fprintf(w, "Decoded message:\n") + encoder := json.NewEncoder(w) + if err := encoder.Encode(decodedMsg); err != nil { + fmt.Fprintf(w, "failed to generate decoded message, %v\n", err) + } +} + +func decodePrelude(r io.Reader, crc hash.Hash32) (messagePrelude, error) { + var p messagePrelude + + var err error + p.Length, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + p.HeadersLen, err = decodeUint32(r) + if err != nil { + return messagePrelude{}, err + } + + if err := p.ValidateLens(); err != nil { + return messagePrelude{}, err + } + + preludeCRC := crc.Sum32() + if err := validateCRC(r, preludeCRC); err != nil { + return messagePrelude{}, err + } + + p.PreludeCRC = preludeCRC + + return p, nil +} + +func decodePayload(buf []byte, r io.Reader) ([]byte, error) { + w := bytes.NewBuffer(buf[0:0]) + + _, err := io.Copy(w, r) + return w.Bytes(), err +} + +func decodeUint8(r io.Reader) (uint8, error) { + type byteReader interface { + ReadByte() (byte, error) + } + + if br, ok := r.(byteReader); ok { + v, err := br.ReadByte() + return uint8(v), err + } + + var b [1]byte + _, err := io.ReadFull(r, b[:]) + return uint8(b[0]), err +} +func decodeUint16(r io.Reader) (uint16, error) { + var b [2]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(bs), nil +} +func decodeUint32(r io.Reader) (uint32, error) { + var b [4]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(bs), nil +} +func decodeUint64(r io.Reader) (uint64, error) { + var b [8]byte + bs := b[:] + _, err := io.ReadFull(r, bs) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint64(bs), nil +} + +func validateCRC(r io.Reader, expect uint32) error { + msgCRC, err := decodeUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return ChecksumError{} + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go new file mode 100644 index 00000000..150a6098 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/encode.go @@ -0,0 +1,114 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash" + "hash/crc32" + "io" +) + +// Encoder provides EventStream message encoding. +type Encoder struct { + w io.Writer + + headersBuf *bytes.Buffer +} + +// NewEncoder initializes and returns an Encoder to encode Event Stream +// messages to an io.Writer. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + headersBuf: bytes.NewBuffer(nil), + } +} + +// Encode encodes a single EventStream message to the io.Writer the Encoder +// was created with. An error is returned if writing the message fails. +func (e *Encoder) Encode(msg Message) error { + e.headersBuf.Reset() + + err := encodeHeaders(e.headersBuf, msg.Headers) + if err != nil { + return err + } + + crc := crc32.New(crc32IEEETable) + hashWriter := io.MultiWriter(e.w, crc) + + headersLen := uint32(e.headersBuf.Len()) + payloadLen := uint32(len(msg.Payload)) + + if err := encodePrelude(hashWriter, crc, headersLen, payloadLen); err != nil { + return err + } + + if headersLen > 0 { + if _, err := io.Copy(hashWriter, e.headersBuf); err != nil { + return err + } + } + + if payloadLen > 0 { + if _, err := hashWriter.Write(msg.Payload); err != nil { + return err + } + } + + msgCRC := crc.Sum32() + return binary.Write(e.w, binary.BigEndian, msgCRC) +} + +func encodePrelude(w io.Writer, crc hash.Hash32, headersLen, payloadLen uint32) error { + p := messagePrelude{ + Length: minMsgLen + headersLen + payloadLen, + HeadersLen: headersLen, + } + if err := p.ValidateLens(); err != nil { + return err + } + + err := binaryWriteFields(w, binary.BigEndian, + p.Length, + p.HeadersLen, + ) + if err != nil { + return err + } + + p.PreludeCRC = crc.Sum32() + err = binary.Write(w, binary.BigEndian, p.PreludeCRC) + if err != nil { + return err + } + + return nil +} + +func encodeHeaders(w io.Writer, headers Headers) error { + for _, h := range headers { + hn := headerName{ + Len: uint8(len(h.Name)), + } + copy(hn.Name[:hn.Len], h.Name) + if err := hn.encode(w); err != nil { + return err + } + + if err := h.Value.encode(w); err != nil { + return err + } + } + + return nil +} + +func binaryWriteFields(w io.Writer, order binary.ByteOrder, vs ...interface{}) error { + for _, v := range vs { + if err := binary.Write(w, order, v); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go new file mode 100644 index 00000000..5481ef30 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/error.go @@ -0,0 +1,23 @@ +package eventstream + +import "fmt" + +// LengthError provides the error for items being larger than a maximum length. +type LengthError struct { + Part string + Want int + Have int + Value interface{} +} + +func (e LengthError) Error() string { + return fmt.Sprintf("%s length invalid, %d/%d, %v", + e.Part, e.Want, e.Have, e.Value) +} + +// ChecksumError provides the error for message checksum invalidation errors. +type ChecksumError struct{} + +func (e ChecksumError) Error() string { + return "message checksum mismatch" +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go new file mode 100644 index 00000000..97937c8e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/api.go @@ -0,0 +1,196 @@ +package eventstreamapi + +import ( + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" +) + +// Unmarshaler provides the interface for unmarshaling a EventStream +// message into a SDK type. +type Unmarshaler interface { + UnmarshalEvent(protocol.PayloadUnmarshaler, eventstream.Message) error +} + +// EventStream headers with specific meaning to async API functionality. +const ( + MessageTypeHeader = `:message-type` // Identifies type of message. + EventMessageType = `event` + ErrorMessageType = `error` + ExceptionMessageType = `exception` + + // Message Events + EventTypeHeader = `:event-type` // Identifies message event type e.g. "Stats". + + // Message Error + ErrorCodeHeader = `:error-code` + ErrorMessageHeader = `:error-message` + + // Message Exception + ExceptionTypeHeader = `:exception-type` +) + +// EventReader provides reading from the EventStream of an reader. +type EventReader struct { + reader io.ReadCloser + decoder *eventstream.Decoder + + unmarshalerForEventType func(string) (Unmarshaler, error) + payloadUnmarshaler protocol.PayloadUnmarshaler + + payloadBuf []byte +} + +// NewEventReader returns a EventReader built from the reader and unmarshaler +// provided. Use ReadStream method to start reading from the EventStream. +func NewEventReader( + reader io.ReadCloser, + payloadUnmarshaler protocol.PayloadUnmarshaler, + unmarshalerForEventType func(string) (Unmarshaler, error), +) *EventReader { + return &EventReader{ + reader: reader, + decoder: eventstream.NewDecoder(reader), + payloadUnmarshaler: payloadUnmarshaler, + unmarshalerForEventType: unmarshalerForEventType, + payloadBuf: make([]byte, 10*1024), + } +} + +// UseLogger instructs the EventReader to use the logger and log level +// specified. +func (r *EventReader) UseLogger(logger aws.Logger, logLevel aws.LogLevelType) { + if logger != nil && logLevel.Matches(aws.LogDebugWithEventStreamBody) { + r.decoder.UseLogger(logger) + } +} + +// ReadEvent attempts to read a message from the EventStream and return the +// unmarshaled event value that the message is for. +// +// For EventStream API errors check if the returned error satisfies the +// awserr.Error interface to get the error's Code and Message components. +// +// EventUnmarshalers called with EventStream messages must take copies of the +// message's Payload. The payload will is reused between events read. +func (r *EventReader) ReadEvent() (event interface{}, err error) { + msg, err := r.decoder.Decode(r.payloadBuf) + if err != nil { + return nil, err + } + defer func() { + // Reclaim payload buffer for next message read. + r.payloadBuf = msg.Payload[0:0] + }() + + typ, err := GetHeaderString(msg, MessageTypeHeader) + if err != nil { + return nil, err + } + + switch typ { + case EventMessageType: + return r.unmarshalEventMessage(msg) + case ExceptionMessageType: + err = r.unmarshalEventException(msg) + return nil, err + case ErrorMessageType: + return nil, r.unmarshalErrorMessage(msg) + default: + return nil, fmt.Errorf("unknown eventstream message type, %v", typ) + } +} + +func (r *EventReader) unmarshalEventMessage( + msg eventstream.Message, +) (event interface{}, err error) { + eventType, err := GetHeaderString(msg, EventTypeHeader) + if err != nil { + return nil, err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return nil, err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return nil, err + } + + return ev, nil +} + +func (r *EventReader) unmarshalEventException( + msg eventstream.Message, +) (err error) { + eventType, err := GetHeaderString(msg, ExceptionTypeHeader) + if err != nil { + return err + } + + ev, err := r.unmarshalerForEventType(eventType) + if err != nil { + return err + } + + err = ev.UnmarshalEvent(r.payloadUnmarshaler, msg) + if err != nil { + return err + } + + var ok bool + err, ok = ev.(error) + if !ok { + err = messageError{ + code: "SerializationError", + msg: fmt.Sprintf( + "event stream exception %s mapped to non-error %T, %v", + eventType, ev, ev, + ), + } + } + + return err +} + +func (r *EventReader) unmarshalErrorMessage(msg eventstream.Message) (err error) { + var msgErr messageError + + msgErr.code, err = GetHeaderString(msg, ErrorCodeHeader) + if err != nil { + return err + } + + msgErr.msg, err = GetHeaderString(msg, ErrorMessageHeader) + if err != nil { + return err + } + + return msgErr +} + +// Close closes the EventReader's EventStream reader. +func (r *EventReader) Close() error { + return r.reader.Close() +} + +// GetHeaderString returns the value of the header as a string. If the header +// is not set or the value is not a string an error will be returned. +func GetHeaderString(msg eventstream.Message, headerName string) (string, error) { + headerVal := msg.Headers.Get(headerName) + if headerVal == nil { + return "", fmt.Errorf("error header %s not present", headerName) + } + + v, ok := headerVal.Get().(string) + if !ok { + return "", fmt.Errorf("error header value is not a string, %T", headerVal) + } + + return v, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go new file mode 100644 index 00000000..5ea5a988 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi/error.go @@ -0,0 +1,24 @@ +package eventstreamapi + +import "fmt" + +type messageError struct { + code string + msg string +} + +func (e messageError) Code() string { + return e.code +} + +func (e messageError) Message() string { + return e.msg +} + +func (e messageError) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.msg) +} + +func (e messageError) OrigErr() error { + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go new file mode 100644 index 00000000..3b44dde2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header.go @@ -0,0 +1,166 @@ +package eventstream + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Headers are a collection of EventStream header values. +type Headers []Header + +// Header is a single EventStream Key Value header pair. +type Header struct { + Name string + Value Value +} + +// Set associates the name with a value. If the header name already exists in +// the Headers the value will be replaced with the new one. +func (hs *Headers) Set(name string, value Value) { + var i int + for ; i < len(*hs); i++ { + if (*hs)[i].Name == name { + (*hs)[i].Value = value + return + } + } + + *hs = append(*hs, Header{ + Name: name, Value: value, + }) +} + +// Get returns the Value associated with the header. Nil is returned if the +// value does not exist. +func (hs Headers) Get(name string) Value { + for i := 0; i < len(hs); i++ { + if h := hs[i]; h.Name == name { + return h.Value + } + } + return nil +} + +// Del deletes the value in the Headers if it exists. +func (hs *Headers) Del(name string) { + for i := 0; i < len(*hs); i++ { + if (*hs)[i].Name == name { + copy((*hs)[i:], (*hs)[i+1:]) + (*hs) = (*hs)[:len(*hs)-1] + } + } +} + +func decodeHeaders(r io.Reader) (Headers, error) { + hs := Headers{} + + for { + name, err := decodeHeaderName(r) + if err != nil { + if err == io.EOF { + // EOF while getting header name means no more headers + break + } + return nil, err + } + + value, err := decodeHeaderValue(r) + if err != nil { + return nil, err + } + + hs.Set(name, value) + } + + return hs, nil +} + +func decodeHeaderName(r io.Reader) (string, error) { + var n headerName + + var err error + n.Len, err = decodeUint8(r) + if err != nil { + return "", err + } + + name := n.Name[:n.Len] + if _, err := io.ReadFull(r, name); err != nil { + return "", err + } + + return string(name), nil +} + +func decodeHeaderValue(r io.Reader) (Value, error) { + var raw rawValue + + typ, err := decodeUint8(r) + if err != nil { + return nil, err + } + raw.Type = valueType(typ) + + var v Value + + switch raw.Type { + case trueValueType: + v = BoolValue(true) + case falseValueType: + v = BoolValue(false) + case int8ValueType: + var tv Int8Value + err = tv.decode(r) + v = tv + case int16ValueType: + var tv Int16Value + err = tv.decode(r) + v = tv + case int32ValueType: + var tv Int32Value + err = tv.decode(r) + v = tv + case int64ValueType: + var tv Int64Value + err = tv.decode(r) + v = tv + case bytesValueType: + var tv BytesValue + err = tv.decode(r) + v = tv + case stringValueType: + var tv StringValue + err = tv.decode(r) + v = tv + case timestampValueType: + var tv TimestampValue + err = tv.decode(r) + v = tv + case uuidValueType: + var tv UUIDValue + err = tv.decode(r) + v = tv + default: + panic(fmt.Sprintf("unknown value type %d", raw.Type)) + } + + // Error could be EOF, let caller deal with it + return v, err +} + +const maxHeaderNameLen = 255 + +type headerName struct { + Len uint8 + Name [maxHeaderNameLen]byte +} + +func (v headerName) encode(w io.Writer) error { + if err := binary.Write(w, binary.BigEndian, v.Len); err != nil { + return err + } + + _, err := w.Write(v.Name[:v.Len]) + return err +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go new file mode 100644 index 00000000..e3fc0766 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/header_value.go @@ -0,0 +1,501 @@ +package eventstream + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "io" + "strconv" + "time" +) + +const maxHeaderValueLen = 1<<15 - 1 // 2^15-1 or 32KB - 1 + +// valueType is the EventStream header value type. +type valueType uint8 + +// Header value types +const ( + trueValueType valueType = iota + falseValueType + int8ValueType // Byte + int16ValueType // Short + int32ValueType // Integer + int64ValueType // Long + bytesValueType + stringValueType + timestampValueType + uuidValueType +) + +func (t valueType) String() string { + switch t { + case trueValueType: + return "bool" + case falseValueType: + return "bool" + case int8ValueType: + return "int8" + case int16ValueType: + return "int16" + case int32ValueType: + return "int32" + case int64ValueType: + return "int64" + case bytesValueType: + return "byte_array" + case stringValueType: + return "string" + case timestampValueType: + return "timestamp" + case uuidValueType: + return "uuid" + default: + return fmt.Sprintf("unknown value type %d", uint8(t)) + } +} + +type rawValue struct { + Type valueType + Len uint16 // Only set for variable length slices + Value []byte // byte representation of value, BigEndian encoding. +} + +func (r rawValue) encodeScalar(w io.Writer, v interface{}) error { + return binaryWriteFields(w, binary.BigEndian, + r.Type, + v, + ) +} + +func (r rawValue) encodeFixedSlice(w io.Writer, v []byte) error { + binary.Write(w, binary.BigEndian, r.Type) + + _, err := w.Write(v) + return err +} + +func (r rawValue) encodeBytes(w io.Writer, v []byte) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + _, err = w.Write(v) + return err +} + +func (r rawValue) encodeString(w io.Writer, v string) error { + if len(v) > maxHeaderValueLen { + return LengthError{ + Part: "header value", + Want: maxHeaderValueLen, Have: len(v), + Value: v, + } + } + r.Len = uint16(len(v)) + + type stringWriter interface { + WriteString(string) (int, error) + } + + err := binaryWriteFields(w, binary.BigEndian, + r.Type, + r.Len, + ) + if err != nil { + return err + } + + if sw, ok := w.(stringWriter); ok { + _, err = sw.WriteString(v) + } else { + _, err = w.Write([]byte(v)) + } + + return err +} + +func decodeFixedBytesValue(r io.Reader, buf []byte) error { + _, err := io.ReadFull(r, buf) + return err +} + +func decodeBytesValue(r io.Reader) ([]byte, error) { + var raw rawValue + var err error + raw.Len, err = decodeUint16(r) + if err != nil { + return nil, err + } + + buf := make([]byte, raw.Len) + _, err = io.ReadFull(r, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeStringValue(r io.Reader) (string, error) { + v, err := decodeBytesValue(r) + return string(v), err +} + +// Value represents the abstract header value. +type Value interface { + Get() interface{} + String() string + valueType() valueType + encode(io.Writer) error +} + +// An BoolValue provides eventstream encoding, and representation +// of a Go bool value. +type BoolValue bool + +// Get returns the underlying type +func (v BoolValue) Get() interface{} { + return bool(v) +} + +// valueType returns the EventStream header value type value. +func (v BoolValue) valueType() valueType { + if v { + return trueValueType + } + return falseValueType +} + +func (v BoolValue) String() string { + return strconv.FormatBool(bool(v)) +} + +// encode encodes the BoolValue into an eventstream binary value +// representation. +func (v BoolValue) encode(w io.Writer) error { + return binary.Write(w, binary.BigEndian, v.valueType()) +} + +// An Int8Value provides eventstream encoding, and representation of a Go +// int8 value. +type Int8Value int8 + +// Get returns the underlying value. +func (v Int8Value) Get() interface{} { + return int8(v) +} + +// valueType returns the EventStream header value type value. +func (Int8Value) valueType() valueType { + return int8ValueType +} + +func (v Int8Value) String() string { + return fmt.Sprintf("0x%02x", int8(v)) +} + +// encode encodes the Int8Value into an eventstream binary value +// representation. +func (v Int8Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeScalar(w, v) +} + +func (v *Int8Value) decode(r io.Reader) error { + n, err := decodeUint8(r) + if err != nil { + return err + } + + *v = Int8Value(n) + return nil +} + +// An Int16Value provides eventstream encoding, and representation of a Go +// int16 value. +type Int16Value int16 + +// Get returns the underlying value. +func (v Int16Value) Get() interface{} { + return int16(v) +} + +// valueType returns the EventStream header value type value. +func (Int16Value) valueType() valueType { + return int16ValueType +} + +func (v Int16Value) String() string { + return fmt.Sprintf("0x%04x", int16(v)) +} + +// encode encodes the Int16Value into an eventstream binary value +// representation. +func (v Int16Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int16Value) decode(r io.Reader) error { + n, err := decodeUint16(r) + if err != nil { + return err + } + + *v = Int16Value(n) + return nil +} + +// An Int32Value provides eventstream encoding, and representation of a Go +// int32 value. +type Int32Value int32 + +// Get returns the underlying value. +func (v Int32Value) Get() interface{} { + return int32(v) +} + +// valueType returns the EventStream header value type value. +func (Int32Value) valueType() valueType { + return int32ValueType +} + +func (v Int32Value) String() string { + return fmt.Sprintf("0x%08x", int32(v)) +} + +// encode encodes the Int32Value into an eventstream binary value +// representation. +func (v Int32Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int32Value) decode(r io.Reader) error { + n, err := decodeUint32(r) + if err != nil { + return err + } + + *v = Int32Value(n) + return nil +} + +// An Int64Value provides eventstream encoding, and representation of a Go +// int64 value. +type Int64Value int64 + +// Get returns the underlying value. +func (v Int64Value) Get() interface{} { + return int64(v) +} + +// valueType returns the EventStream header value type value. +func (Int64Value) valueType() valueType { + return int64ValueType +} + +func (v Int64Value) String() string { + return fmt.Sprintf("0x%016x", int64(v)) +} + +// encode encodes the Int64Value into an eventstream binary value +// representation. +func (v Int64Value) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + return raw.encodeScalar(w, v) +} + +func (v *Int64Value) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = Int64Value(n) + return nil +} + +// An BytesValue provides eventstream encoding, and representation of a Go +// byte slice. +type BytesValue []byte + +// Get returns the underlying value. +func (v BytesValue) Get() interface{} { + return []byte(v) +} + +// valueType returns the EventStream header value type value. +func (BytesValue) valueType() valueType { + return bytesValueType +} + +func (v BytesValue) String() string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +// encode encodes the BytesValue into an eventstream binary value +// representation. +func (v BytesValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeBytes(w, []byte(v)) +} + +func (v *BytesValue) decode(r io.Reader) error { + buf, err := decodeBytesValue(r) + if err != nil { + return err + } + + *v = BytesValue(buf) + return nil +} + +// An StringValue provides eventstream encoding, and representation of a Go +// string. +type StringValue string + +// Get returns the underlying value. +func (v StringValue) Get() interface{} { + return string(v) +} + +// valueType returns the EventStream header value type value. +func (StringValue) valueType() valueType { + return stringValueType +} + +func (v StringValue) String() string { + return string(v) +} + +// encode encodes the StringValue into an eventstream binary value +// representation. +func (v StringValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeString(w, string(v)) +} + +func (v *StringValue) decode(r io.Reader) error { + s, err := decodeStringValue(r) + if err != nil { + return err + } + + *v = StringValue(s) + return nil +} + +// An TimestampValue provides eventstream encoding, and representation of a Go +// timestamp. +type TimestampValue time.Time + +// Get returns the underlying value. +func (v TimestampValue) Get() interface{} { + return time.Time(v) +} + +// valueType returns the EventStream header value type value. +func (TimestampValue) valueType() valueType { + return timestampValueType +} + +func (v TimestampValue) epochMilli() int64 { + nano := time.Time(v).UnixNano() + msec := nano / int64(time.Millisecond) + return msec +} + +func (v TimestampValue) String() string { + msec := v.epochMilli() + return strconv.FormatInt(msec, 10) +} + +// encode encodes the TimestampValue into an eventstream binary value +// representation. +func (v TimestampValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + msec := v.epochMilli() + return raw.encodeScalar(w, msec) +} + +func (v *TimestampValue) decode(r io.Reader) error { + n, err := decodeUint64(r) + if err != nil { + return err + } + + *v = TimestampValue(timeFromEpochMilli(int64(n))) + return nil +} + +func timeFromEpochMilli(t int64) time.Time { + secs := t / 1e3 + msec := t % 1e3 + return time.Unix(secs, msec*int64(time.Millisecond)).UTC() +} + +// An UUIDValue provides eventstream encoding, and representation of a UUID +// value. +type UUIDValue [16]byte + +// Get returns the underlying value. +func (v UUIDValue) Get() interface{} { + return v[:] +} + +// valueType returns the EventStream header value type value. +func (UUIDValue) valueType() valueType { + return uuidValueType +} + +func (v UUIDValue) String() string { + return fmt.Sprintf(`%X-%X-%X-%X-%X`, v[0:4], v[4:6], v[6:8], v[8:10], v[10:]) +} + +// encode encodes the UUIDValue into an eventstream binary value +// representation. +func (v UUIDValue) encode(w io.Writer) error { + raw := rawValue{ + Type: v.valueType(), + } + + return raw.encodeFixedSlice(w, v[:]) +} + +func (v *UUIDValue) decode(r io.Reader) error { + tv := (*v)[:] + return decodeFixedBytesValue(r, tv) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go new file mode 100644 index 00000000..2dc012a6 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/eventstream/message.go @@ -0,0 +1,103 @@ +package eventstream + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +const preludeLen = 8 +const preludeCRCLen = 4 +const msgCRCLen = 4 +const minMsgLen = preludeLen + preludeCRCLen + msgCRCLen +const maxPayloadLen = 1024 * 1024 * 16 // 16MB +const maxHeadersLen = 1024 * 128 // 128KB +const maxMsgLen = minMsgLen + maxHeadersLen + maxPayloadLen + +var crc32IEEETable = crc32.MakeTable(crc32.IEEE) + +// A Message provides the eventstream message representation. +type Message struct { + Headers Headers + Payload []byte +} + +func (m *Message) rawMessage() (rawMessage, error) { + var raw rawMessage + + if len(m.Headers) > 0 { + var headers bytes.Buffer + if err := encodeHeaders(&headers, m.Headers); err != nil { + return rawMessage{}, err + } + raw.Headers = headers.Bytes() + raw.HeadersLen = uint32(len(raw.Headers)) + } + + raw.Length = raw.HeadersLen + uint32(len(m.Payload)) + minMsgLen + + hash := crc32.New(crc32IEEETable) + binaryWriteFields(hash, binary.BigEndian, raw.Length, raw.HeadersLen) + raw.PreludeCRC = hash.Sum32() + + binaryWriteFields(hash, binary.BigEndian, raw.PreludeCRC) + + if raw.HeadersLen > 0 { + hash.Write(raw.Headers) + } + + // Read payload bytes and update hash for it as well. + if len(m.Payload) > 0 { + raw.Payload = m.Payload + hash.Write(raw.Payload) + } + + raw.CRC = hash.Sum32() + + return raw, nil +} + +type messagePrelude struct { + Length uint32 + HeadersLen uint32 + PreludeCRC uint32 +} + +func (p messagePrelude) PayloadLen() uint32 { + return p.Length - p.HeadersLen - minMsgLen +} + +func (p messagePrelude) ValidateLens() error { + if p.Length == 0 || p.Length > maxMsgLen { + return LengthError{ + Part: "message prelude", + Want: maxMsgLen, + Have: int(p.Length), + } + } + if p.HeadersLen > maxHeadersLen { + return LengthError{ + Part: "message headers", + Want: maxHeadersLen, + Have: int(p.HeadersLen), + } + } + if payloadLen := p.PayloadLen(); payloadLen > maxPayloadLen { + return LengthError{ + Part: "message payload", + Want: maxPayloadLen, + Have: int(payloadLen), + } + } + + return nil +} + +type rawMessage struct { + messagePrelude + + Headers []byte + Payload []byte + + CRC uint32 +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go new file mode 100644 index 00000000..d7d42db0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go @@ -0,0 +1,68 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// ValidateEndpointHostHandler is a request handler that will validate the +// request endpoint's hosts is a valid RFC 3986 host. +var ValidateEndpointHostHandler = request.NamedHandler{ + Name: "awssdk.protocol.ValidateEndpointHostHandler", + Fn: func(r *request.Request) { + err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host) + if err != nil { + r.Error = err + } + }, +} + +// ValidateEndpointHost validates that the host string passed in is a valid RFC +// 3986 host. Returns error if the host is not valid. +func ValidateEndpointHost(opName, host string) error { + paramErrs := request.ErrInvalidParams{Context: opName} + labels := strings.Split(host, ".") + + for i, label := range labels { + if i == len(labels)-1 && len(label) == 0 { + // Allow trailing dot for FQDN hosts. + continue + } + + if !ValidHostLabel(label) { + paramErrs.Add(request.NewErrParamFormat( + "endpoint host label", "[a-zA-Z0-9-]{1,63}", label)) + } + } + + if len(host) > 255 { + paramErrs.Add(request.NewErrParamMaxLen( + "endpoint host", 255, host, + )) + } + + if paramErrs.Len() > 0 { + return paramErrs + } + return nil +} + +// ValidHostLabel returns if the label is a valid RFC 3986 host label. +func ValidHostLabel(label string) bool { + if l := len(label); l == 0 || l > 63 { + return false + } + for _, r := range label { + switch { + case r >= '0' && r <= '9': + case r >= 'A' && r <= 'Z': + case r >= 'a' && r <= 'z': + case r == '-': + default: + return false + } + } + + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go new file mode 100644 index 00000000..915b0fca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go @@ -0,0 +1,54 @@ +package protocol + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// HostPrefixHandlerName is the handler name for the host prefix request +// handler. +const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler" + +// NewHostPrefixHandler constructs a build handler +func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler { + builder := HostPrefixBuilder{ + Prefix: prefix, + LabelsFn: labelsFn, + } + + return request.NamedHandler{ + Name: HostPrefixHandlerName, + Fn: builder.Build, + } +} + +// HostPrefixBuilder provides the request handler to expand and prepend +// the host prefix into the operation's request endpoint host. +type HostPrefixBuilder struct { + Prefix string + LabelsFn func() map[string]string +} + +// Build updates the passed in Request with the HostPrefix template expanded. +func (h HostPrefixBuilder) Build(r *request.Request) { + if aws.BoolValue(r.Config.DisableEndpointHostPrefix) { + return + } + + var labels map[string]string + if h.LabelsFn != nil { + labels = h.LabelsFn() + } + + prefix := h.Prefix + for name, value := range labels { + prefix = strings.Replace(prefix, "{"+name+"}", value, -1) + } + + r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host + if len(r.HTTPRequest.Host) > 0 { + r.HTTPRequest.Host = prefix + r.HTTPRequest.Host + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go new file mode 100644 index 00000000..864fb670 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -0,0 +1,296 @@ +// Package jsonutil provides JSON serialization of AWS requests and responses. +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/private/protocol" +) + +var timeType = reflect.ValueOf(time.Time{}).Type() +var byteSliceType = reflect.ValueOf([]byte{}).Type() + +// BuildJSON builds a JSON string for a given object v. +func BuildJSON(v interface{}) ([]byte, error) { + var buf bytes.Buffer + + err := buildAny(reflect.ValueOf(v), &buf, "") + return buf.Bytes(), err +} + +func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + origVal := value + value = reflect.Indirect(value) + if !value.IsValid() { + return nil + } + + vtype := value.Type() + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if value.Type() != timeType { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return buildStruct(value, buf, tag) + case "list": + return buildList(value, buf, tag) + case "map": + return buildMap(value, buf, tag) + default: + return buildScalar(origVal, buf, tag) + } +} + +func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + if !value.IsValid() { + return nil + } + + // unwrap payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := value.Type().FieldByName(payload) + tag = field.Tag + value = elemOf(value.FieldByName(payload)) + + if !value.IsValid() { + return nil + } + } + + buf.WriteByte('{') + + t := value.Type() + first := true + for i := 0; i < t.NumField(); i++ { + member := value.Field(i) + + // This allocates the most memory. + // Additionally, we cannot skip nil fields due to + // idempotency auto filling. + field := t.Field(i) + + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("json") == "-" { + continue + } + if field.Tag.Get("location") != "" { + continue // ignore non-body elements + } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(member, field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(&token) + } + + if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { + continue // ignore unset fields + } + + if first { + first = false + } else { + buf.WriteByte(',') + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + writeString(name, buf) + buf.WriteString(`:`) + + err := buildAny(member, buf, field.Tag) + if err != nil { + return err + } + + } + + buf.WriteString("}") + + return nil +} + +func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("[") + + for i := 0; i < value.Len(); i++ { + buildAny(value.Index(i), buf, "") + + if i < value.Len()-1 { + buf.WriteString(",") + } + } + + buf.WriteString("]") + + return nil +} + +type sortedValues []reflect.Value + +func (sv sortedValues) Len() int { return len(sv) } +func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } + +func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + buf.WriteString("{") + + sv := sortedValues(value.MapKeys()) + sort.Sort(sv) + + for i, k := range sv { + if i > 0 { + buf.WriteByte(',') + } + + writeString(k.String(), buf) + buf.WriteString(`:`) + + buildAny(value.MapIndex(k), buf, "") + } + + buf.WriteString("}") + + return nil +} + +func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { + // prevents allocation on the heap. + scratch := [64]byte{} + switch value := reflect.Indirect(v); value.Kind() { + case reflect.String: + writeString(value.String(), buf) + case reflect.Bool: + if value.Bool() { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + case reflect.Int64: + buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10)) + case reflect.Float64: + f := value.Float() + if math.IsInf(f, 0) || math.IsNaN(f) { + return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)} + } + buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64)) + default: + switch converted := value.Interface().(type) { + case time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.UnixTimeFormatName + } + + ts := protocol.FormatTime(format, converted) + if format != protocol.UnixTimeFormatName { + ts = `"` + ts + `"` + } + + buf.WriteString(ts) + case []byte: + if !value.IsNil() { + buf.WriteByte('"') + if len(converted) < 1024 { + // for small buffers, using Encode directly is much faster. + dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) + base64.StdEncoding.Encode(dst, converted) + buf.Write(dst) + } else { + // for large buffers, avoid unnecessary extra temporary + // buffer space. + enc := base64.NewEncoder(base64.StdEncoding, buf) + enc.Write(converted) + enc.Close() + } + buf.WriteByte('"') + } + case aws.JSONValue: + str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape) + if err != nil { + return fmt.Errorf("unable to encode JSONValue, %v", err) + } + buf.WriteString(str) + default: + return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) + } + } + return nil +} + +var hex = "0123456789abcdef" + +func writeString(s string, buf *bytes.Buffer) { + buf.WriteByte('"') + for i := 0; i < len(s); i++ { + if s[i] == '"' { + buf.WriteString(`\"`) + } else if s[i] == '\\' { + buf.WriteString(`\\`) + } else if s[i] == '\b' { + buf.WriteString(`\b`) + } else if s[i] == '\f' { + buf.WriteString(`\f`) + } else if s[i] == '\r' { + buf.WriteString(`\r`) + } else if s[i] == '\t' { + buf.WriteString(`\t`) + } else if s[i] == '\n' { + buf.WriteString(`\n`) + } else if s[i] < 32 { + buf.WriteString("\\u00") + buf.WriteByte(hex[s[i]>>4]) + buf.WriteByte(hex[s[i]&0xF]) + } else { + buf.WriteByte(s[i]) + } + } + buf.WriteByte('"') +} + +// Returns the reflection element of a value, if it is a pointer. +func elemOf(value reflect.Value) reflect.Value { + for value.Kind() == reflect.Ptr { + value = value.Elem() + } + return value +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go new file mode 100644 index 00000000..ea0da79a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go @@ -0,0 +1,250 @@ +package jsonutil + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "reflect" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" +) + +// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in +// type. The value to unmarshal the json document into must be a pointer to the +// type. +func UnmarshalJSONError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := json.NewDecoder(body).Decode(v) + if err != nil { + msg := "failed decoding error message" + if err == io.EOF { + msg = "error message missing" + err = nil + } + return awserr.NewUnmarshalError(err, msg, errBuf.Bytes()) + } + + return nil +} + +// UnmarshalJSON reads a stream and unmarshals the results in object v. +func UnmarshalJSON(v interface{}, stream io.Reader) error { + var out interface{} + + err := json.NewDecoder(stream).Decode(&out) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + return unmarshalAny(reflect.ValueOf(v), out, "") +} + +func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { + vtype := value.Type() + if vtype.Kind() == reflect.Ptr { + vtype = vtype.Elem() // check kind of actual element type + } + + t := tag.Get("type") + if t == "" { + switch vtype.Kind() { + case reflect.Struct: + // also it can't be a time object + if _, ok := value.Interface().(*time.Time); !ok { + t = "structure" + } + case reflect.Slice: + // also it can't be a byte slice + if _, ok := value.Interface().([]byte); !ok { + t = "list" + } + case reflect.Map: + // cannot be a JSONValue map + if _, ok := value.Interface().(aws.JSONValue); !ok { + t = "map" + } + } + } + + switch t { + case "structure": + if field, ok := vtype.FieldByName("_"); ok { + tag = field.Tag + } + return unmarshalStruct(value, data, tag) + case "list": + return unmarshalList(value, data, tag) + case "map": + return unmarshalMap(value, data, tag) + default: + return unmarshalScalar(value, data, tag) + } +} + +func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a structure (%#v)", data) + } + + t := value.Type() + if value.Kind() == reflect.Ptr { + if value.IsNil() { // create the structure if it's nil + s := reflect.New(value.Type().Elem()) + value.Set(s) + value = s + } + + value = value.Elem() + t = t.Elem() + } + + // unwrap any payloads + if payload := tag.Get("payload"); payload != "" { + field, _ := t.FieldByName(payload) + return unmarshalAny(value.FieldByName(payload), data, field.Tag) + } + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath != "" { + continue // ignore unexported fields + } + + // figure out what this field is called + name := field.Name + if locName := field.Tag.Get("locationName"); locName != "" { + name = locName + } + + member := value.FieldByIndex(field.Index) + err := unmarshalAny(member, mapData[name], field.Tag) + if err != nil { + return err + } + } + return nil +} + +func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + listData, ok := data.([]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a list (%#v)", data) + } + + if value.IsNil() { + l := len(listData) + value.Set(reflect.MakeSlice(value.Type(), l, l)) + } + + for i, c := range listData { + err := unmarshalAny(value.Index(i), c, "") + if err != nil { + return err + } + } + + return nil +} + +func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { + if data == nil { + return nil + } + mapData, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("JSON value is not a map (%#v)", data) + } + + if value.IsNil() { + value.Set(reflect.MakeMap(value.Type())) + } + + for k, v := range mapData { + kvalue := reflect.ValueOf(k) + vvalue := reflect.New(value.Type().Elem()).Elem() + + unmarshalAny(vvalue, v, "") + value.SetMapIndex(kvalue, vvalue) + } + + return nil +} + +func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { + + switch d := data.(type) { + case nil: + return nil // nothing to do here + case string: + switch value.Interface().(type) { + case *string: + value.Set(reflect.ValueOf(&d)) + case []byte: + b, err := base64.StdEncoding.DecodeString(d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(b)) + case *time.Time: + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, d) + if err != nil { + return err + } + value.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + // No need to use escaping as the value is a non-quoted string. + v, err := protocol.DecodeJSONValue(d, protocol.NoEscape) + if err != nil { + return err + } + value.Set(reflect.ValueOf(v)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case float64: + switch value.Interface().(type) { + case *int64: + di := int64(d) + value.Set(reflect.ValueOf(&di)) + case *float64: + value.Set(reflect.ValueOf(&d)) + case *time.Time: + // Time unmarshaled from a float64 can only be epoch seconds + t := time.Unix(int64(d), 0).UTC() + value.Set(reflect.ValueOf(&t)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + case bool: + switch value.Interface().(type) { + case *bool: + value.Set(reflect.ValueOf(&d)) + default: + return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) + } + default: + return fmt.Errorf("unsupported JSON value (%v)", data) + } + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go new file mode 100644 index 00000000..bfedc9fd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -0,0 +1,110 @@ +// Package jsonrpc provides JSON RPC utilities for serialization of AWS +// requests and responses. +package jsonrpc + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +var emptyJSON = []byte("{}") + +// BuildHandler is a named request handler for building jsonrpc protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError} + +// Build builds a JSON payload for a JSON RPC request. +func Build(req *request.Request) { + var buf []byte + var err error + if req.ParamsFilled() { + buf, err = jsonutil.BuildJSON(req.Params) + if err != nil { + req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err) + return + } + } else { + buf = emptyJSON + } + + if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { + req.SetBufferBody(buf) + } + + if req.ClientInfo.TargetPrefix != "" { + target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name + req.HTTPRequest.Header.Add("X-Amz-Target", target) + } + + // Only set the content type if one is not already specified and an + // JSONVersion is specified. + if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 { + jsonVersion := req.ClientInfo.JSONVersion + req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion) + } +} + +// Unmarshal unmarshals a response for a JSON RPC service. +func Unmarshal(req *request.Request) { + defer req.HTTPResponse.Body.Close() + if req.DataFilled() { + err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + } + } + return +} + +// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. +func UnmarshalMeta(req *request.Request) { + rest.UnmarshalMeta(req) +} + +// UnmarshalError unmarshals an error response for a JSON RPC service. +func UnmarshalError(req *request.Request) { + defer req.HTTPResponse.Body.Close() + + var jsonErr jsonErrorResponse + err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body) + if err != nil { + req.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + req.HTTPResponse.StatusCode, + req.RequestID, + ) + return + } + + codes := strings.SplitN(jsonErr.Code, "#", 2) + req.Error = awserr.NewRequestFailure( + awserr.New(codes[len(codes)-1], jsonErr.Message, nil), + req.HTTPResponse.StatusCode, + req.RequestID, + ) +} + +type jsonErrorResponse struct { + Code string `json:"__type"` + Message string `json:"message"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go new file mode 100644 index 00000000..776d1101 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go @@ -0,0 +1,76 @@ +package protocol + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" +) + +// EscapeMode is the mode that should be use for escaping a value +type EscapeMode uint + +// The modes for escaping a value before it is marshaled, and unmarshaled. +const ( + NoEscape EscapeMode = iota + Base64Escape + QuotedEscape +) + +// EncodeJSONValue marshals the value into a JSON string, and optionally base64 +// encodes the string before returning it. +// +// Will panic if the escape mode is unknown. +func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) { + b, err := json.Marshal(v) + if err != nil { + return "", err + } + + switch escape { + case NoEscape: + return string(b), nil + case Base64Escape: + return base64.StdEncoding.EncodeToString(b), nil + case QuotedEscape: + return strconv.Quote(string(b)), nil + } + + panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape)) +} + +// DecodeJSONValue will attempt to decode the string input as a JSONValue. +// Optionally decoding base64 the value first before JSON unmarshaling. +// +// Will panic if the escape mode is unknown. +func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) { + var b []byte + var err error + + switch escape { + case NoEscape: + b = []byte(v) + case Base64Escape: + b, err = base64.StdEncoding.DecodeString(v) + case QuotedEscape: + var u string + u, err = strconv.Unquote(v) + b = []byte(u) + default: + panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape)) + } + + if err != nil { + return nil, err + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go new file mode 100644 index 00000000..e21614a1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go @@ -0,0 +1,81 @@ +package protocol + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// PayloadUnmarshaler provides the interface for unmarshaling a payload's +// reader into a SDK shape. +type PayloadUnmarshaler interface { + UnmarshalPayload(io.Reader, interface{}) error +} + +// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a +// HandlerList. This provides the support for unmarshaling a payload reader to +// a shape without needing a SDK request first. +type HandlerPayloadUnmarshal struct { + Unmarshalers request.HandlerList +} + +// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using +// the Unmarshalers HandlerList provided. Returns an error if unable +// unmarshaling fails. +func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error { + req := &request.Request{ + HTTPRequest: &http.Request{}, + HTTPResponse: &http.Response{ + StatusCode: 200, + Header: http.Header{}, + Body: ioutil.NopCloser(r), + }, + Data: v, + } + + h.Unmarshalers.Run(req) + + return req.Error +} + +// PayloadMarshaler provides the interface for marshaling a SDK shape into and +// io.Writer. +type PayloadMarshaler interface { + MarshalPayload(io.Writer, interface{}) error +} + +// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList. +// This provides support for marshaling a SDK shape into an io.Writer without +// needing a SDK request first. +type HandlerPayloadMarshal struct { + Marshalers request.HandlerList +} + +// MarshalPayload marshals the SDK shape into the io.Writer using the +// Marshalers HandlerList provided. Returns an error if unable if marshal +// fails. +func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error { + req := request.New( + aws.Config{}, + metadata.ClientInfo{}, + request.Handlers{}, + nil, + &request.Operation{HTTPMethod: "GET"}, + v, + nil, + ) + + h.Marshalers.Run(req) + + if req.Error != nil { + return req.Error + } + + io.Copy(w, req.GetBody()) + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 18169f0f..0cb99eb5 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -21,11 +21,11 @@ func Build(r *request.Request) { "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, false); err != nil { - r.Error = awserr.New("SerializationError", "failed encoding Query request", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err) return } - if r.ExpireTime == 0 { + if !r.IsPresigned() { r.HTTPRequest.Method = "POST" r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") r.SetBufferBody([]byte(body.Encode())) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 524ca952..75866d01 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string return nil } + if _, ok := value.Interface().([]byte); ok { + return q.parseScalar(v, value, prefix, tag) + } + // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { if listName := tag.Get("locationNameList"); listName == "" { @@ -229,7 +233,12 @@ func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, ta v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32)) case time.Time: const ISO8601UTC = "2006-01-02T15:04:05Z" - v.Set(name, value.UTC().Format(ISO8601UTC)) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + v.Set(name, protocol.FormatTime(format, value)) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name()) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go index e0f4d5a5..f69c1efc 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -23,7 +23,11 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go index f2142961..831b0110 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -2,65 +2,68 @@ package query import ( "encoding/xml" - "io/ioutil" + "fmt" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` } -type xmlServiceUnavailableResponse struct { - XMLName xml.Name `xml:"ServiceUnavailableException"` +type xmlResponseError struct { + xmlErrorResponse } -// UnmarshalErrorHandler is a name request handler to unmarshal request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} +func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + const svcUnavailableTagName = "ServiceUnavailableException" + const errorResponseTagName = "ErrorResponse" + + switch start.Name.Local { + case svcUnavailableTagName: + e.Code = svcUnavailableTagName + e.Message = "service is unavailable" + return d.Skip() + + case errorResponseTagName: + return d.DecodeElement(&e.xmlErrorResponse, &start) + + default: + return fmt.Errorf("unknown error response tag, %v", start) + } +} // UnmarshalError unmarshals an error response for an AWS Query service. func UnmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + var respErr xmlResponseError + err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) - return - } - - // First check for specific error - resp := xmlErrorResponse{} - decodeErr := xml.Unmarshal(bodyBytes, &resp) - if decodeErr == nil { - reqID := resp.RequestID - if reqID == "" { - reqID = r.RequestID - } r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), r.HTTPResponse.StatusCode, - reqID, + r.RequestID, ) return } - // Check for unhandled error - servUnavailResp := xmlServiceUnavailableResponse{} - unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) - if unavailErr == nil { - r.Error = awserr.NewRequestFailure( - awserr.New("ServiceUnavailableException", "service is unavailable", nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return + reqID := respErr.RequestID + if len(reqID) == 0 { + reqID = r.RequestID } - // Failed to retrieve any error message from the response body - r.Error = awserr.New("SerializationError", - "failed to decode query XML error response", decodeErr) + r.Error = awserr.NewRequestFailure( + awserr.New(respErr.Code, respErr.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go index 20a41d46..1301b149 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -17,16 +17,16 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - // Whether the byte value can be sent without escaping in AWS URLs var noEscape [256]bool var errValueNotSet = fmt.Errorf("value not set") +var byteSliceType = reflect.TypeOf([]byte{}) + func init() { for i := 0; i < len(noEscape); i++ { // AWS expects every character except these to be escaped @@ -82,8 +82,12 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo if name == "" { name = field.Name } - if m.Kind() == reflect.Ptr { + if kind := m.Kind(); kind == reflect.Ptr { m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } } if !m.IsValid() { continue @@ -92,19 +96,27 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo continue } + // Support the ability to customize values to be marshaled as a + // blob even though they were modeled as a string. Required for S3 + // API operations like SSECustomerKey is modeled as stirng but + // required to be base64 encoded in request. + if field.Tag.Get("marshal-as") == "blob" { + m = m.Convert(byteSliceType) + } + var err error switch field.Tag.Get("location") { case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name) + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) case "uri": - err = buildURI(r.HTTPRequest.URL, m, name) + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) case "querystring": - err = buildQueryString(query, m, name) + err = buildQueryString(query, m, name, field.Tag) default: if buildGETQuery { - err = buildQueryString(query, m, name) + err = buildQueryString(query, m, name, field.Tag) } } r.Error = err @@ -135,7 +147,7 @@ func buildBody(r *request.Request, v reflect.Value) { case string: r.SetStringBody(reader) default: - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to encode REST request", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -145,40 +157,46 @@ func buildBody(r *request.Request, v reflect.Value) { } } -func buildHeader(header *http.Header, v reflect.Value, name string) error { - str, err := convertType(v) +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } + name = strings.TrimSpace(name) + str = strings.TrimSpace(str) + header.Add(name, str) return nil } -func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key)) + str, err := convertType(v.MapIndex(key), tag) if err == errValueNotSet { continue } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } + keyStr := strings.TrimSpace(key.String()) + str = strings.TrimSpace(str) - header.Add(prefix+key.String(), str) + header.Add(prefix+keyStr, str) } return nil } -func buildURI(u *url.URL, v reflect.Value, name string) error { - value, err := convertType(v) +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) @@ -190,7 +208,7 @@ func buildURI(u *url.URL, v reflect.Value, name string) error { return nil } -func buildQueryString(query url.Values, v reflect.Value, name string) error { +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { switch value := v.Interface().(type) { case []*string: for _, item := range value { @@ -207,11 +225,11 @@ func buildQueryString(query url.Values, v reflect.Value, name string) error { } } default: - str, err := convertType(v) + str, err := convertType(v, tag) if err == errValueNotSet { return nil } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) + return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err) } query.Set(name, str) } @@ -246,13 +264,12 @@ func EscapePath(path string, encodeSep bool) string { return buf.String() } -func convertType(v reflect.Value) (string, error) { +func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) { v = reflect.Indirect(v) if !v.IsValid() { return "", errValueNotSet } - var str string switch value := v.Interface().(type) { case string: str = value @@ -265,9 +282,28 @@ func convertType(v reflect.Value) (string, error) { case float64: str = strconv.FormatFloat(value, 'f', -1, 64) case time.Time: - str = value.UTC().Format(RFC822) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + if tag.Get("location") == "querystring" { + format = protocol.ISO8601TimeFormatName + } + } + str = protocol.FormatTime(format, value) + case aws.JSONValue: + if len(value) == 0 { + return "", errValueNotSet + } + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + str, err = protocol.EncodeJSONValue(value, escaping) + if err != nil { + return "", fmt.Errorf("unable to encode JSONValue, %v", err) + } default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type()) return "", err } return str, nil diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 9c00921c..de021367 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -12,8 +12,10 @@ import ( "strings" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" ) // UnmarshalHandler is a named request handler for unmarshaling rest protocol requests @@ -55,7 +57,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } else { payload.Set(reflect.ValueOf(b)) } @@ -63,7 +65,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) } else { str := string(b) payload.Set(reflect.ValueOf(&str)) @@ -75,7 +77,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { case "io.ReadSeeker": b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to read response body", err) return } @@ -83,7 +85,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) { default: io.Copy(ioutil.Discard, r.HTTPResponse.Body) defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) } @@ -111,16 +113,16 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) { case "statusCode": unmarshalStatusCode(m, r.HTTPResponse.StatusCode) case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) break } case "headers": prefix := field.Tag.Get("locationName") err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) + r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err) break } } @@ -158,8 +160,13 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err return nil } -func unmarshalHeader(v reflect.Value, header string) error { - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { return nil } @@ -191,11 +198,25 @@ func unmarshalHeader(v reflect.Value, header string) error { } v.Set(reflect.ValueOf(&f)) case *time.Time: - t, err := time.Parse(RFC822, header) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.RFC822TimeFormatName + } + t, err := protocol.ParseTime(format, header) if err != nil { return err } v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + escaping := protocol.NoEscape + if tag.Get("location") == "header" { + escaping = protocol.Base64Escape + } + m, err := protocol.DecodeJSONValue(header, escaping) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) default: err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) return err diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index 7bdf4c85..cf569645 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -36,7 +36,12 @@ func Build(r *request.Request) { var buf bytes.Buffer err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to encode rest XML request", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } r.SetBufferBody(buf.Bytes()) @@ -50,7 +55,12 @@ func Unmarshal(r *request.Request) { decoder := xml.NewDecoder(r.HTTPResponse.Body) err := xmlutil.UnmarshalXML(r.Data, decoder, "") if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to decode REST XML response", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } } else { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go new file mode 100644 index 00000000..b7ed6c6f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go @@ -0,0 +1,72 @@ +package protocol + +import ( + "strconv" + "time" +) + +// Names of time formats supported by the SDK +const ( + RFC822TimeFormatName = "rfc822" + ISO8601TimeFormatName = "iso8601" + UnixTimeFormatName = "unixTimestamp" +) + +// Time formats supported by the SDK +const ( + // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT + RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT" + + // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z + ISO8601TimeFormat = "2006-01-02T15:04:05Z" +) + +// IsKnownTimestampFormat returns if the timestamp format name +// is know to the SDK's protocols. +func IsKnownTimestampFormat(name string) bool { + switch name { + case RFC822TimeFormatName: + fallthrough + case ISO8601TimeFormatName: + fallthrough + case UnixTimeFormatName: + return true + default: + return false + } +} + +// FormatTime returns a string value of the time. +func FormatTime(name string, t time.Time) string { + t = t.UTC() + + switch name { + case RFC822TimeFormatName: + return t.Format(RFC822TimeFormat) + case ISO8601TimeFormatName: + return t.Format(ISO8601TimeFormat) + case UnixTimeFormatName: + return strconv.FormatInt(t.Unix(), 10) + default: + panic("unknown timestamp format name, " + name) + } +} + +// ParseTime attempts to parse the time given the format. Returns +// the time if it was able to be parsed, and fails otherwise. +func ParseTime(formatName, value string) (time.Time, error) { + switch formatName { + case RFC822TimeFormatName: + return time.Parse(RFC822TimeFormat, value) + case ISO8601TimeFormatName: + return time.Parse(ISO8601TimeFormat, value) + case UnixTimeFormatName: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return time.Time{}, err + } + return time.Unix(int64(v), 0), nil + default: + panic("unknown timestamp format name, " + formatName) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index c74c1919..cf981fe9 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -13,9 +13,13 @@ import ( "github.com/aws/aws-sdk-go/private/protocol" ) -// BuildXML will serialize params into an xml.Encoder. -// Error will be returned if the serialization of any of the params or nested values fails. +// BuildXML will serialize params into an xml.Encoder. Error will be returned +// if the serialization of any of the params or nested values fails. func BuildXML(params interface{}, e *xml.Encoder) error { + return buildXML(params, e, false) +} + +func buildXML(params interface{}, e *xml.Encoder, sorted bool) error { b := xmlBuilder{encoder: e, namespaces: map[string]string{}} root := NewXMLElement(xml.Name{}) if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil { @@ -23,7 +27,7 @@ func BuildXML(params interface{}, e *xml.Encoder) error { } for _, c := range root.Children { for _, v := range c { - return StructToXML(e, v, false) + return StructToXML(e, v, sorted) } } return nil @@ -83,15 +87,13 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle } } -// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested +// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested // types are converted to XMLNodes also. func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error { if !value.IsValid() { return nil } - fieldAdded := false - // unwrap payloads if payload := tag.Get("payload"); payload != "" { field, _ := value.Type().FieldByName(payload) @@ -119,6 +121,8 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl child.Attr = append(child.Attr, ns) } + var payloadFields, nonPayloadFields int + t := value.Type() for i := 0; i < value.NumField(); i++ { member := elemOf(value.Field(i)) @@ -131,11 +135,12 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl continue } - mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members + nonPayloadFields++ continue } + payloadFields++ if protocol.CanSetIdempotencyToken(value.Field(i), field) { token := protocol.GetIdempotencyToken() @@ -150,11 +155,11 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl if err := b.buildValue(member, child, mTag); err != nil { return err } - - fieldAdded = true } - if fieldAdded { // only append this child if we have one ore more valid members + // Only case where the child shape is not added is if the shape only contains + // non-payload fields, e.g headers/query. + if !(payloadFields == 0 && nonPayloadFields > 0) { current.AddChild(child) } @@ -279,8 +284,12 @@ func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag refl case float32: str = strconv.FormatFloat(float64(converted), 'f', -1, 32) case time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - str = converted.UTC().Format(ISO8601UTC) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + str = protocol.FormatTime(format, converted) default: return fmt.Errorf("unsupported value for param %s: %v (%s)", tag.Get("locationName"), value.Interface(), value.Type().Name()) diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 64b6ddd3..7108d380 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -1,6 +1,7 @@ package xmlutil import ( + "bytes" "encoding/base64" "encoding/xml" "fmt" @@ -9,13 +10,36 @@ import ( "strconv" "strings" "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/private/protocol" ) +// UnmarshalXMLError unmarshals the XML error from the stream into the value +// type specified. The value must be a pointer. If the message fails to +// unmarshal, the message content will be included in the returned error as a +// awserr.UnmarshalError. +func UnmarshalXMLError(v interface{}, stream io.Reader) error { + var errBuf bytes.Buffer + body := io.TeeReader(stream, &errBuf) + + err := xml.NewDecoder(body).Decode(v) + if err != nil && err != io.EOF { + return awserr.NewUnmarshalError(err, + "failed to unmarshal error message", errBuf.Bytes()) + } + + return nil +} + // UnmarshalXML deserializes an xml.Decoder into the container v. V // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, _ := XMLToStruct(d, nil) + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } if n.Children != nil { for _, root := range n.Children { for _, c := range root { @@ -23,7 +47,7 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { c = wrappedChild[0] // pull out wrapped element } - err := parse(reflect.ValueOf(v), c, "") + err = parse(reflect.ValueOf(v), c, "") if err != nil { if err == io.EOF { return nil @@ -49,9 +73,15 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { if t == "" { switch rtype.Kind() { case reflect.Struct: - t = "structure" + // also it can't be a time object + if _, ok := r.Interface().(*time.Time); !ok { + t = "structure" + } case reflect.Slice: - t = "list" + // also it can't be a byte slice + if _, ok := r.Interface().([]byte); !ok { + t = "list" + } case reflect.Map: t = "map" } @@ -244,8 +274,12 @@ func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { } r.Set(reflect.ValueOf(&v)) case *time.Time: - const ISO8601UTC = "2006-01-02T15:04:05Z" - t, err := time.Parse(ISO8601UTC, node.Text) + format := tag.Get("timestampFormat") + if len(format) == 0 { + format = protocol.ISO8601TimeFormatName + } + + t, err := protocol.ParseTime(format, node.Text) if err != nil { return err } diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 3112512a..515ce152 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -29,6 +29,7 @@ func NewXMLElement(name xml.Name) *XMLNode { // AddChild adds child to the XMLNode. func (n *XMLNode) AddChild(child *XMLNode) { + child.parent = n if _, ok := n.Children[child.Name.Local]; !ok { n.Children[child.Name.Local] = []*XMLNode{} } @@ -40,11 +41,16 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { out := &XMLNode{} for { tok, err := d.Token() - if tok == nil || err == io.EOF { - break - } if err != nil { - return out, err + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break } switch typed := tok.(type) { diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go new file mode 100644 index 00000000..8c889ff3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -0,0 +1,16231 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "fmt" + "net/url" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/crr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +const opBatchGetItem = "BatchGetItem" + +// BatchGetItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchGetItem for more information on using the BatchGetItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchGetItemRequest method. +// req, resp := client.BatchGetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem +func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { + op := &request.Operation{ + Name: opBatchGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"RequestItems"}, + OutputTokens: []string{"UnprocessedKeys"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &BatchGetItemInput{} + } + + output = &BatchGetItemOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// BatchGetItem API operation for Amazon DynamoDB. +// +// The BatchGetItem operation returns the attributes of one or more items from +// one or more tables. You identify requested items by primary key. +// +// A single operation can retrieve up to 16 MB of data, which can contain as +// many as 100 items. BatchGetItem returns a partial result if the response +// size limit is exceeded, the table's provisioned throughput is exceeded, or +// an internal processing failure occurs. If a partial result is returned, the +// operation returns a value for UnprocessedKeys. You can use this value to +// retry the operation starting with the next item to get. +// +// If you request more than 100 items, BatchGetItem returns a ValidationException +// with the message "Too many items requested for the BatchGetItem call." +// +// For example, if you ask to retrieve 100 items, but each individual item is +// 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB +// limit). It also returns an appropriate UnprocessedKeys value so you can get +// the next page of results. If desired, your application can include its own +// logic to assemble the pages of results into one dataset. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException. +// If at least one of the items is successfully processed, then BatchGetItem +// completes successfully, while returning the keys of the unread items in UnprocessedKeys. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// By default, BatchGetItem performs eventually consistent reads on every table +// in the request. If you want strongly consistent reads instead, you can set +// ConsistentRead to true for any or all tables. +// +// In order to minimize response latency, BatchGetItem retrieves items in parallel. +// +// When designing your application, keep in mind that DynamoDB does not return +// items in any particular order. To help parse the response by item, include +// the primary key values for the items in your request in the ProjectionExpression +// parameter. +// +// If a requested item does not exist, it is not returned in the result. Requests +// for nonexistent items consume the minimum read capacity units according to +// the type of read. For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchGetItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem +func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + return out, req.Send() +} + +// BatchGetItemWithContext is the same as BatchGetItem with the addition of +// the ability to pass a context and additional request options. +// +// See BatchGetItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchGetItemWithContext(ctx aws.Context, input *BatchGetItemInput, opts ...request.Option) (*BatchGetItemOutput, error) { + req, out := c.BatchGetItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// BatchGetItemPages iterates over the pages of a BatchGetItem operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See BatchGetItem method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a BatchGetItem operation. +// pageNum := 0 +// err := client.BatchGetItemPages(params, +// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool) error { + return c.BatchGetItemPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// BatchGetItemPagesWithContext same as BatchGetItemPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchGetItemPagesWithContext(ctx aws.Context, input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *BatchGetItemInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.BatchGetItemRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*BatchGetItemOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opBatchWriteItem = "BatchWriteItem" + +// BatchWriteItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchWriteItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchWriteItem for more information on using the BatchWriteItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchWriteItemRequest method. +// req, resp := client.BatchWriteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem +func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { + op := &request.Operation{ + Name: opBatchWriteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchWriteItemInput{} + } + + output = &BatchWriteItemOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// BatchWriteItem API operation for Amazon DynamoDB. +// +// The BatchWriteItem operation puts or deletes multiple items in one or more +// tables. A single call to BatchWriteItem can write up to 16 MB of data, which +// can comprise as many as 25 put or delete requests. Individual items to be +// written can be as large as 400 KB. +// +// BatchWriteItem cannot update items. To update items, use the UpdateItem action. +// +// The individual PutItem and DeleteItem operations specified in BatchWriteItem +// are atomic; however BatchWriteItem as a whole is not. If any requested operations +// fail because the table's provisioned throughput is exceeded or an internal +// processing failure occurs, the failed operations are returned in the UnprocessedItems +// response parameter. You can investigate and optionally resend the requests. +// Typically, you would call BatchWriteItem in a loop. Each iteration would +// check for unprocessed items and submit a new BatchWriteItem request with +// those unprocessed items until all items have been processed. +// +// If none of the items can be processed due to insufficient provisioned throughput +// on all of the tables in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException. +// +// If DynamoDB returns any unprocessed items, you should retry the batch operation +// on those items. However, we strongly recommend that you use an exponential +// backoff algorithm. If you retry the batch operation immediately, the underlying +// read or write requests can still fail due to throttling on the individual +// tables. If you delay the batch operation using exponential backoff, the individual +// requests in the batch are much more likely to succeed. +// +// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations) +// in the Amazon DynamoDB Developer Guide. +// +// With BatchWriteItem, you can efficiently write or delete large amounts of +// data, such as from Amazon EMR, or copy data from another database into DynamoDB. +// In order to improve performance with these large-scale operations, BatchWriteItem +// does not behave in the same way as individual PutItem and DeleteItem calls +// would. For example, you cannot specify conditions on individual put and delete +// requests, and BatchWriteItem does not return deleted items in the response. +// +// If you use a programming language that supports concurrency, you can use +// threads to write items in parallel. Your application must include the necessary +// logic to manage the threads. With languages that don't support threading, +// you must update or delete the specified items one at a time. In both situations, +// BatchWriteItem performs the specified put and delete operations in parallel, +// giving you the power of the thread pool approach without having to introduce +// complexity into your application. +// +// Parallel processing reduces latency, but each specified put and delete request +// consumes the same number of write capacity units whether it is processed +// in parallel or not. Delete operations on nonexistent items consume one write +// capacity unit. +// +// If one or more of the following is true, DynamoDB rejects the entire batch +// write operation: +// +// * One or more tables specified in the BatchWriteItem request does not +// exist. +// +// * Primary key attributes specified on an item in the request do not match +// those in the corresponding table's primary key schema. +// +// * You try to perform multiple operations on the same item in the same +// BatchWriteItem request. For example, you cannot put and delete the same +// item in the same BatchWriteItem request. +// +// * Your request contains at least two items with identical hash and range +// keys (which essentially is two put operations). +// +// * There are more than 25 requests in the batch. +// +// * Any individual item in a batch exceeds 400 KB. +// +// * The total request size exceeds 16 MB. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchWriteItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeItemCollectionSizeLimitExceededException "ItemCollectionSizeLimitExceededException" +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem +func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + return out, req.Send() +} + +// BatchWriteItemWithContext is the same as BatchWriteItem with the addition of +// the ability to pass a context and additional request options. +// +// See BatchWriteItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchWriteItemWithContext(ctx aws.Context, input *BatchWriteItemInput, opts ...request.Option) (*BatchWriteItemOutput, error) { + req, out := c.BatchWriteItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBackup = "CreateBackup" + +// CreateBackupRequest generates a "aws/request.Request" representing the +// client's request for the CreateBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateBackup for more information on using the CreateBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateBackupRequest method. +// req, resp := client.CreateBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup +func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.Request, output *CreateBackupOutput) { + op := &request.Operation{ + Name: opCreateBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateBackupInput{} + } + + output = &CreateBackupOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// CreateBackup API operation for Amazon DynamoDB. +// +// Creates a backup for an existing table. +// +// Each time you create an on-demand backup, the entire table data is backed +// up. There is no limit to the number of on-demand backups that can be taken. +// +// When you create an on-demand backup, a time marker of the request is cataloged, +// and the backup is created asynchronously, by applying all changes until the +// time of the request to the last full table snapshot. Backup requests are +// processed instantaneously and become available for restore within minutes. +// +// You can call CreateBackup at a maximum rate of 50 times per second. +// +// All backups in DynamoDB work without consuming any provisioned throughput +// on the table. +// +// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed +// to contain all data committed to the table up to 14:24:00, and data committed +// after 14:26:00 will not be. The backup might contain data modifications made +// between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency. +// +// Along with data, the following are also included on the backups: +// +// * Global secondary indexes (GSIs) +// +// * Local secondary indexes (LSIs) +// +// * Streams +// +// * Provisioned read and write capacity +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateBackup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTableNotFoundException "TableNotFoundException" +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// * ErrCodeTableInUseException "TableInUseException" +// A target table with the specified name is either being created or deleted. +// +// * ErrCodeContinuousBackupsUnavailableException "ContinuousBackupsUnavailableException" +// Backups have not yet been enabled for this table. +// +// * ErrCodeBackupInUseException "BackupInUseException" +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup +func (c *DynamoDB) CreateBackup(input *CreateBackupInput) (*CreateBackupOutput, error) { + req, out := c.CreateBackupRequest(input) + return out, req.Send() +} + +// CreateBackupWithContext is the same as CreateBackup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateBackupWithContext(ctx aws.Context, input *CreateBackupInput, opts ...request.Option) (*CreateBackupOutput, error) { + req, out := c.CreateBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateGlobalTable = "CreateGlobalTable" + +// CreateGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGlobalTable for more information on using the CreateGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGlobalTableRequest method. +// req, resp := client.CreateGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable +func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req *request.Request, output *CreateGlobalTableOutput) { + op := &request.Operation{ + Name: opCreateGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGlobalTableInput{} + } + + output = &CreateGlobalTableOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// CreateGlobalTable API operation for Amazon DynamoDB. +// +// Creates a global table from an existing table. A global table creates a replication +// relationship between two or more DynamoDB tables with the same table name +// in the provided Regions. +// +// If you want to add a new replica table to a global table, each of the following +// conditions must be true: +// +// * The table must have the same primary key as all of the other replicas. +// +// * The table must have the same name as all of the other replicas. +// +// * The table must have DynamoDB Streams enabled, with the stream containing +// both the new and the old images of the item. +// +// * None of the replica tables in the global table can contain any data. +// +// If global secondary indexes are specified, then the following conditions +// must also be met: +// +// * The global secondary indexes must have the same name. +// +// * The global secondary indexes must have the same hash key and sort key +// (if present). +// +// Write capacity settings should be set consistently across your replica tables +// and secondary indexes. DynamoDB strongly recommends enabling auto scaling +// to manage the write capacity settings for all of your global tables replicas +// and indexes. +// +// If you prefer to manage write capacity settings manually, you should provision +// equal replicated write capacity units to your replica tables. You should +// also provision equal replicated write capacity units to matching secondary +// indexes across your global table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateGlobalTable for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeGlobalTableAlreadyExistsException "GlobalTableAlreadyExistsException" +// The specified global table already exists. +// +// * ErrCodeTableNotFoundException "TableNotFoundException" +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable +func (c *DynamoDB) CreateGlobalTable(input *CreateGlobalTableInput) (*CreateGlobalTableOutput, error) { + req, out := c.CreateGlobalTableRequest(input) + return out, req.Send() +} + +// CreateGlobalTableWithContext is the same as CreateGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateGlobalTableWithContext(ctx aws.Context, input *CreateGlobalTableInput, opts ...request.Option) (*CreateGlobalTableOutput, error) { + req, out := c.CreateGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateTable = "CreateTable" + +// CreateTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateTable for more information on using the CreateTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateTableRequest method. +// req, resp := client.CreateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable +func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { + op := &request.Operation{ + Name: opCreateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateTableInput{} + } + + output = &CreateTableOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// CreateTable API operation for Amazon DynamoDB. +// +// The CreateTable operation adds a new table to your account. In an AWS account, +// table names must be unique within each Region. That is, you can have two +// tables with same name if you create the tables in different Regions. +// +// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, +// DynamoDB immediately returns a response with a TableStatus of CREATING. After +// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform +// read and write operations only on an ACTIVE table. +// +// You can optionally define secondary indexes on the new table, as part of +// the CreateTable operation. If you want to create multiple tables with secondary +// indexes on them, you must create the tables sequentially. Only one table +// with secondary indexes can be in the CREATING state at any given time. +// +// You can use the DescribeTable action to check the table status. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation CreateTable for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable +func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + return out, req.Send() +} + +// CreateTableWithContext is the same as CreateTable with the addition of +// the ability to pass a context and additional request options. +// +// See CreateTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) CreateTableWithContext(ctx aws.Context, input *CreateTableInput, opts ...request.Option) (*CreateTableOutput, error) { + req, out := c.CreateTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBackup = "DeleteBackup" + +// DeleteBackupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBackup for more information on using the DeleteBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBackupRequest method. +// req, resp := client.DeleteBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup +func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Request, output *DeleteBackupOutput) { + op := &request.Operation{ + Name: opDeleteBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteBackupInput{} + } + + output = &DeleteBackupOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DeleteBackup API operation for Amazon DynamoDB. +// +// Deletes an existing backup of a table. +// +// You can call DeleteBackup at a maximum rate of 10 times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteBackup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBackupNotFoundException "BackupNotFoundException" +// Backup not found for the given BackupARN. +// +// * ErrCodeBackupInUseException "BackupInUseException" +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup +func (c *DynamoDB) DeleteBackup(input *DeleteBackupInput) (*DeleteBackupOutput, error) { + req, out := c.DeleteBackupRequest(input) + return out, req.Send() +} + +// DeleteBackupWithContext is the same as DeleteBackup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteBackupWithContext(ctx aws.Context, input *DeleteBackupInput, opts ...request.Option) (*DeleteBackupOutput, error) { + req, out := c.DeleteBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteItem = "DeleteItem" + +// DeleteItemRequest generates a "aws/request.Request" representing the +// client's request for the DeleteItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteItem for more information on using the DeleteItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteItemRequest method. +// req, resp := client.DeleteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem +func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { + op := &request.Operation{ + Name: opDeleteItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteItemInput{} + } + + output = &DeleteItemOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DeleteItem API operation for Amazon DynamoDB. +// +// Deletes a single item in a table by primary key. You can perform a conditional +// delete operation that deletes the item if it exists, or if it has an expected +// attribute value. +// +// In addition to deleting an item, you can also return the item's attribute +// values in the same operation, using the ReturnValues parameter. +// +// Unless you specify conditions, the DeleteItem is an idempotent operation; +// running it multiple times on the same item or attribute does not result in +// an error response. +// +// Conditional deletes are useful for deleting items only if specific conditions +// are met. If those conditions are met, DynamoDB performs the delete. Otherwise, +// the item is not deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConditionalCheckFailedException "ConditionalCheckFailedException" +// A condition specified in the operation could not be evaluated. +// +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeItemCollectionSizeLimitExceededException "ItemCollectionSizeLimitExceededException" +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// * ErrCodeTransactionConflictException "TransactionConflictException" +// Operation was rejected because there is an ongoing transaction for the item. +// +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem +func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + return out, req.Send() +} + +// DeleteItemWithContext is the same as DeleteItem with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteItemWithContext(ctx aws.Context, input *DeleteItemInput, opts ...request.Option) (*DeleteItemOutput, error) { + req, out := c.DeleteItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteTable = "DeleteTable" + +// DeleteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteTable for more information on using the DeleteTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteTableRequest method. +// req, resp := client.DeleteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable +func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { + op := &request.Operation{ + Name: opDeleteTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteTableInput{} + } + + output = &DeleteTableOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DeleteTable API operation for Amazon DynamoDB. +// +// The DeleteTable operation deletes a table and all of its items. After a DeleteTable +// request, the specified table is in the DELETING state until DynamoDB completes +// the deletion. If the table is in the ACTIVE state, you can delete it. If +// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException. +// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException. +// If table is already in the DELETING state, no error is returned. +// +// DynamoDB might continue to accept data read and write operations, such as +// GetItem and PutItem, on a table in the DELETING state until the table deletion +// is complete. +// +// When you delete a table, any indexes on that table are also deleted. +// +// If you have DynamoDB Streams enabled on the table, then the corresponding +// stream on that table goes into the DISABLED state, and the stream is automatically +// deleted after 24 hours. +// +// Use the DescribeTable action to check the status of the table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DeleteTable for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable +func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + return out, req.Send() +} + +// DeleteTableWithContext is the same as DeleteTable with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DeleteTableWithContext(ctx aws.Context, input *DeleteTableInput, opts ...request.Option) (*DeleteTableOutput, error) { + req, out := c.DeleteTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeBackup = "DescribeBackup" + +// DescribeBackupRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeBackup for more information on using the DescribeBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeBackupRequest method. +// req, resp := client.DescribeBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup +func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *request.Request, output *DescribeBackupOutput) { + op := &request.Operation{ + Name: opDescribeBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeBackupInput{} + } + + output = &DescribeBackupOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DescribeBackup API operation for Amazon DynamoDB. +// +// Describes an existing backup of a table. +// +// You can call DescribeBackup at a maximum rate of 10 times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeBackup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBackupNotFoundException "BackupNotFoundException" +// Backup not found for the given BackupARN. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup +func (c *DynamoDB) DescribeBackup(input *DescribeBackupInput) (*DescribeBackupOutput, error) { + req, out := c.DescribeBackupRequest(input) + return out, req.Send() +} + +// DescribeBackupWithContext is the same as DescribeBackup with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeBackupWithContext(ctx aws.Context, input *DescribeBackupInput, opts ...request.Option) (*DescribeBackupOutput, error) { + req, out := c.DescribeBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeContinuousBackups = "DescribeContinuousBackups" + +// DescribeContinuousBackupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeContinuousBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeContinuousBackups for more information on using the DescribeContinuousBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeContinuousBackupsRequest method. +// req, resp := client.DescribeContinuousBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups +func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBackupsInput) (req *request.Request, output *DescribeContinuousBackupsOutput) { + op := &request.Operation{ + Name: opDescribeContinuousBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeContinuousBackupsInput{} + } + + output = &DescribeContinuousBackupsOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DescribeContinuousBackups API operation for Amazon DynamoDB. +// +// Checks the status of continuous backups and point in time recovery on the +// specified table. Continuous backups are ENABLED on all tables at table creation. +// If point in time recovery is enabled, PointInTimeRecoveryStatus will be set +// to ENABLED. +// +// After continuous backups and point in time recovery are enabled, you can +// restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// +// LatestRestorableDateTime is typically 5 minutes before the current time. +// You can restore your table to any point in time during the last 35 days. +// +// You can call DescribeContinuousBackups at a maximum rate of 10 times per +// second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeContinuousBackups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTableNotFoundException "TableNotFoundException" +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups +func (c *DynamoDB) DescribeContinuousBackups(input *DescribeContinuousBackupsInput) (*DescribeContinuousBackupsOutput, error) { + req, out := c.DescribeContinuousBackupsRequest(input) + return out, req.Send() +} + +// DescribeContinuousBackupsWithContext is the same as DescribeContinuousBackups with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeContinuousBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeContinuousBackupsWithContext(ctx aws.Context, input *DescribeContinuousBackupsInput, opts ...request.Option) (*DescribeContinuousBackupsOutput, error) { + req, out := c.DescribeContinuousBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeEndpoints = "DescribeEndpoints" + +// DescribeEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEndpoints operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeEndpoints for more information on using the DescribeEndpoints +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeEndpointsRequest method. +// req, resp := client.DescribeEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints +func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) { + op := &request.Operation{ + Name: opDescribeEndpoints, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeEndpointsInput{} + } + + output = &DescribeEndpointsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DescribeEndpoints API operation for Amazon DynamoDB. +// +// Returns the regional endpoint information. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeEndpoints for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints +func (c *DynamoDB) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) { + req, out := c.DescribeEndpointsRequest(input) + return out, req.Send() +} + +// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeEndpoints for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) { + req, out := c.DescribeEndpointsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +type discovererDescribeEndpoints struct { + Client *DynamoDB + Required bool + EndpointCache *crr.EndpointCache + Params map[string]*string + Key string +} + +func (d *discovererDescribeEndpoints) Discover() (crr.Endpoint, error) { + input := &DescribeEndpointsInput{} + + resp, err := d.Client.DescribeEndpoints(input) + if err != nil { + return crr.Endpoint{}, err + } + + endpoint := crr.Endpoint{ + Key: d.Key, + } + + for _, e := range resp.Endpoints { + if e.Address == nil { + continue + } + + cachedInMinutes := aws.Int64Value(e.CachePeriodInMinutes) + u, err := url.Parse(*e.Address) + if err != nil { + continue + } + + addr := crr.WeightedAddress{ + URL: u, + Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute), + } + + endpoint.Add(addr) + } + + d.EndpointCache.Add(endpoint) + + return endpoint, nil +} + +func (d *discovererDescribeEndpoints) Handler(r *request.Request) { + endpointKey := crr.BuildEndpointKey(d.Params) + d.Key = endpointKey + + endpoint, err := d.EndpointCache.Get(d, endpointKey, d.Required) + if err != nil { + r.Error = err + return + } + + if endpoint.URL != nil && len(endpoint.URL.String()) > 0 { + r.HTTPRequest.URL = endpoint.URL + } +} + +const opDescribeGlobalTable = "DescribeGlobalTable" + +// DescribeGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGlobalTable for more information on using the DescribeGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeGlobalTableRequest method. +// req, resp := client.DescribeGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable +func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) (req *request.Request, output *DescribeGlobalTableOutput) { + op := &request.Operation{ + Name: opDescribeGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGlobalTableInput{} + } + + output = &DescribeGlobalTableOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DescribeGlobalTable API operation for Amazon DynamoDB. +// +// Returns information about the specified global table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeGlobalTable for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeGlobalTableNotFoundException "GlobalTableNotFoundException" +// The specified global table does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable +func (c *DynamoDB) DescribeGlobalTable(input *DescribeGlobalTableInput) (*DescribeGlobalTableOutput, error) { + req, out := c.DescribeGlobalTableRequest(input) + return out, req.Send() +} + +// DescribeGlobalTableWithContext is the same as DescribeGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeGlobalTableWithContext(ctx aws.Context, input *DescribeGlobalTableInput, opts ...request.Option) (*DescribeGlobalTableOutput, error) { + req, out := c.DescribeGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings" + +// DescribeGlobalTableSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeGlobalTableSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeGlobalTableSettings for more information on using the DescribeGlobalTableSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeGlobalTableSettingsRequest method. +// req, resp := client.DescribeGlobalTableSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings +func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTableSettingsInput) (req *request.Request, output *DescribeGlobalTableSettingsOutput) { + op := &request.Operation{ + Name: opDescribeGlobalTableSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeGlobalTableSettingsInput{} + } + + output = &DescribeGlobalTableSettingsOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DescribeGlobalTableSettings API operation for Amazon DynamoDB. +// +// Describes Region-specific settings for a global table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeGlobalTableSettings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeGlobalTableNotFoundException "GlobalTableNotFoundException" +// The specified global table does not exist. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings +func (c *DynamoDB) DescribeGlobalTableSettings(input *DescribeGlobalTableSettingsInput) (*DescribeGlobalTableSettingsOutput, error) { + req, out := c.DescribeGlobalTableSettingsRequest(input) + return out, req.Send() +} + +// DescribeGlobalTableSettingsWithContext is the same as DescribeGlobalTableSettings with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeGlobalTableSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeGlobalTableSettingsWithContext(ctx aws.Context, input *DescribeGlobalTableSettingsInput, opts ...request.Option) (*DescribeGlobalTableSettingsOutput, error) { + req, out := c.DescribeGlobalTableSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeLimits = "DescribeLimits" + +// DescribeLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLimits operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeLimits for more information on using the DescribeLimits +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeLimitsRequest method. +// req, resp := client.DescribeLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits +func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) { + op := &request.Operation{ + Name: opDescribeLimits, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLimitsInput{} + } + + output = &DescribeLimitsOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DescribeLimits API operation for Amazon DynamoDB. +// +// Returns the current provisioned-capacity limits for your AWS account in a +// Region, both for the Region as a whole and for any one DynamoDB table that +// you create there. +// +// When you establish an AWS account, the account has initial limits on the +// maximum read capacity units and write capacity units that you can provision +// across all of your DynamoDB tables in a given Region. Also, there are per-table +// limits that apply when you create a table there. For more information, see +// Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// page in the Amazon DynamoDB Developer Guide. +// +// Although you can increase these limits by filing a case at AWS Support Center +// (https://console.aws.amazon.com/support/home#/), obtaining the increase is +// not instantaneous. The DescribeLimits action lets you write code to compare +// the capacity you are currently using to those limits imposed by your account +// so that you have enough time to apply for an increase before you hit a limit. +// +// For example, you could use one of the AWS SDKs to do the following: +// +// Call DescribeLimits for a particular Region to obtain your current account +// limits on provisioned capacity there. +// +// Create a variable to hold the aggregate read capacity units provisioned for +// all your tables in that Region, and one to hold the aggregate write capacity +// units. Zero them both. +// +// Call ListTables to obtain a list of all your DynamoDB tables. +// +// For each table name listed by ListTables, do the following: +// +// * Call DescribeTable with the table name. +// +// * Use the data returned by DescribeTable to add the read capacity units +// and write capacity units provisioned for the table itself to your variables. +// +// * If the table has one or more global secondary indexes (GSIs), loop over +// these GSIs and add their provisioned capacity values to your variables +// as well. +// +// Report the account limits for that Region returned by DescribeLimits, along +// with the total current provisioned capacity levels you have calculated. +// +// This will let you see whether you are getting close to your account-level +// limits. +// +// The per-table limits apply only when you are creating a new table. They restrict +// the sum of the provisioned capacity of the new table itself and all its global +// secondary indexes. +// +// For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned +// capacity extremely rapidly. But the only upper limit that applies is that +// the aggregate provisioned capacity over all your tables and GSIs cannot exceed +// either of the per-account limits. +// +// DescribeLimits should only be called periodically. You can expect throttling +// errors if you call it more than once in a minute. +// +// The DescribeLimits Request element has no content. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeLimits for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits +func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) { + req, out := c.DescribeLimitsRequest(input) + return out, req.Send() +} + +// DescribeLimitsWithContext is the same as DescribeLimits with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeLimits for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeLimitsWithContext(ctx aws.Context, input *DescribeLimitsInput, opts ...request.Option) (*DescribeLimitsOutput, error) { + req, out := c.DescribeLimitsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTable = "DescribeTable" + +// DescribeTableRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTable for more information on using the DescribeTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTableRequest method. +// req, resp := client.DescribeTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable +func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { + op := &request.Operation{ + Name: opDescribeTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTableInput{} + } + + output = &DescribeTableOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DescribeTable API operation for Amazon DynamoDB. +// +// Returns information about the table, including the current status of the +// table, when it was created, the primary key schema, and any indexes on the +// table. +// +// If you issue a DescribeTable request immediately after a CreateTable request, +// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable +// uses an eventually consistent query, and the metadata for your table might +// not be available at that moment. Wait for a few seconds, and then try the +// DescribeTable request again. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeTable for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable +func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + return out, req.Send() +} + +// DescribeTableWithContext is the same as DescribeTable with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeTableWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.Option) (*DescribeTableOutput, error) { + req, out := c.DescribeTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDescribeTimeToLive = "DescribeTimeToLive" + +// DescribeTimeToLiveRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTimeToLive operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeTimeToLive for more information on using the DescribeTimeToLive +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeTimeToLiveRequest method. +// req, resp := client.DescribeTimeToLiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive +func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (req *request.Request, output *DescribeTimeToLiveOutput) { + op := &request.Operation{ + Name: opDescribeTimeToLive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeTimeToLiveInput{} + } + + output = &DescribeTimeToLiveOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// DescribeTimeToLive API operation for Amazon DynamoDB. +// +// Gives a description of the Time to Live (TTL) status on the specified table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeTimeToLive for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive +func (c *DynamoDB) DescribeTimeToLive(input *DescribeTimeToLiveInput) (*DescribeTimeToLiveOutput, error) { + req, out := c.DescribeTimeToLiveRequest(input) + return out, req.Send() +} + +// DescribeTimeToLiveWithContext is the same as DescribeTimeToLive with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeTimeToLive for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *DescribeTimeToLiveInput, opts ...request.Option) (*DescribeTimeToLiveOutput, error) { + req, out := c.DescribeTimeToLiveRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetItem = "GetItem" + +// GetItemRequest generates a "aws/request.Request" representing the +// client's request for the GetItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetItem for more information on using the GetItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetItemRequest method. +// req, resp := client.GetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem +func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { + op := &request.Operation{ + Name: opGetItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetItemInput{} + } + + output = &GetItemOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// GetItem API operation for Amazon DynamoDB. +// +// The GetItem operation returns a set of attributes for the item with the given +// primary key. If there is no matching item, GetItem does not return any data +// and there will be no Item element in the response. +// +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true. Although +// a strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation GetItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem +func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + return out, req.Send() +} + +// GetItemWithContext is the same as GetItem with the addition of +// the ability to pass a context and additional request options. +// +// See GetItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts ...request.Option) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBackups = "ListBackups" + +// ListBackupsRequest generates a "aws/request.Request" representing the +// client's request for the ListBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBackups for more information on using the ListBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBackupsRequest method. +// req, resp := client.ListBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups +func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) { + op := &request.Operation{ + Name: opListBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListBackupsInput{} + } + + output = &ListBackupsOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// ListBackups API operation for Amazon DynamoDB. +// +// List backups associated with an AWS account. To list backups for a given +// table, specify TableName. ListBackups returns a paginated list of results +// with at most 1 MB worth of items in a page. You can also specify a limit +// for the maximum number of entries to be returned in a page. +// +// In the request, start time is inclusive, but end time is exclusive. Note +// that these limits are for the time at which the original backup was requested. +// +// You can call ListBackups a maximum of five times per second. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListBackups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups +func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) { + req, out := c.ListBackupsRequest(input) + return out, req.Send() +} + +// ListBackupsWithContext is the same as ListBackups with the addition of +// the ability to pass a context and additional request options. +// +// See ListBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListBackupsWithContext(ctx aws.Context, input *ListBackupsInput, opts ...request.Option) (*ListBackupsOutput, error) { + req, out := c.ListBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListGlobalTables = "ListGlobalTables" + +// ListGlobalTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListGlobalTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGlobalTables for more information on using the ListGlobalTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListGlobalTablesRequest method. +// req, resp := client.ListGlobalTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { + op := &request.Operation{ + Name: opListGlobalTables, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListGlobalTablesInput{} + } + + output = &ListGlobalTablesOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// ListGlobalTables API operation for Amazon DynamoDB. +// +// Lists all global tables that have a replica in the specified Region. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListGlobalTables for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + return out, req.Send() +} + +// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListGlobalTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListTables = "ListTables" + +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTables for more information on using the ListTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { + op := &request.Operation{ + Name: opListTables, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartTableName"}, + OutputTokens: []string{"LastEvaluatedTableName"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListTablesInput{} + } + + output = &ListTablesOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// ListTables API operation for Amazon DynamoDB. +// +// Returns an array of table names associated with the current account and endpoint. +// The output from ListTables is paginated, with each page returning a maximum +// of 100 table names. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListTables for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + return out, req.Send() +} + +// ListTablesWithContext is the same as ListTables with the addition of +// the ability to pass a context and additional request options. +// +// See ListTables for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListTablesPages iterates over the pages of a ListTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTables operation. +// pageNum := 0 +// err := client.ListTablesPages(params, +// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error { + return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListTablesPagesWithContext same as ListTablesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListTablesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListTablesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListTagsOfResource = "ListTagsOfResource" + +// ListTagsOfResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsOfResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListTagsOfResource for more information on using the ListTagsOfResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListTagsOfResourceRequest method. +// req, resp := client.ListTagsOfResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource +func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) { + op := &request.Operation{ + Name: opListTagsOfResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsOfResourceInput{} + } + + output = &ListTagsOfResourceOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// ListTagsOfResource API operation for Amazon DynamoDB. +// +// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource +// up to 10 times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListTagsOfResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource +func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) { + req, out := c.ListTagsOfResourceRequest(input) + return out, req.Send() +} + +// ListTagsOfResourceWithContext is the same as ListTagsOfResource with the addition of +// the ability to pass a context and additional request options. +// +// See ListTagsOfResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListTagsOfResourceWithContext(ctx aws.Context, input *ListTagsOfResourceInput, opts ...request.Option) (*ListTagsOfResourceOutput, error) { + req, out := c.ListTagsOfResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutItem = "PutItem" + +// PutItemRequest generates a "aws/request.Request" representing the +// client's request for the PutItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutItem for more information on using the PutItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutItemRequest method. +// req, resp := client.PutItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem +func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { + op := &request.Operation{ + Name: opPutItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutItemInput{} + } + + output = &PutItemOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// PutItem API operation for Amazon DynamoDB. +// +// Creates a new item, or replaces an old item with a new item. If an item that +// has the same primary key as the new item already exists in the specified +// table, the new item completely replaces the existing item. You can perform +// a conditional put operation (add a new item if one with the specified primary +// key doesn't exist), or replace an existing item if it has certain attribute +// values. You can return the item's attribute values in the same operation, +// using the ReturnValues parameter. +// +// This topic provides general information about the PutItem API. +// +// For information on how to call the PutItem API using the AWS SDK in specific +// languages, see the following: +// +// * PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) +// +// * PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) +// +// * PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) +// +// * PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) +// +// * PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) +// +// * PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) +// +// * PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) +// +// * PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) +// +// * PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) +// +// When you add an item, the primary key attributes are the only required attributes. +// Attribute values cannot be null. String and Binary type attributes must have +// lengths greater than zero. Set type attributes cannot be empty. Requests +// with empty values will be rejected with a ValidationException exception. +// +// To prevent a new item from replacing an existing item, use a conditional +// expression that contains the attribute_not_exists function with the name +// of the attribute being used as the partition key for the table. Since every +// record must contain that attribute, the attribute_not_exists function will +// only succeed if no matching item exists. +// +// For more information about PutItem, see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation PutItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConditionalCheckFailedException "ConditionalCheckFailedException" +// A condition specified in the operation could not be evaluated. +// +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeItemCollectionSizeLimitExceededException "ItemCollectionSizeLimitExceededException" +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// * ErrCodeTransactionConflictException "TransactionConflictException" +// Operation was rejected because there is an ongoing transaction for the item. +// +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem +func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + return out, req.Send() +} + +// PutItemWithContext is the same as PutItem with the addition of +// the ability to pass a context and additional request options. +// +// See PutItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts ...request.Option) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opQuery = "Query" + +// QueryRequest generates a "aws/request.Request" representing the +// client's request for the Query operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Query for more information on using the Query +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the QueryRequest method. +// req, resp := client.QueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query +func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { + op := &request.Operation{ + Name: opQuery, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &QueryInput{} + } + + output = &QueryOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// Query API operation for Amazon DynamoDB. +// +// The Query operation finds items based on primary key values. You can query +// any table or secondary index that has a composite primary key (a partition +// key and a sort key). +// +// Use the KeyConditionExpression parameter to provide a specific value for +// the partition key. The Query operation will return all of the items from +// the table or index with that partition key value. You can optionally narrow +// the scope of the Query operation by specifying a sort key value and a comparison +// operator in KeyConditionExpression. To further refine the Query results, +// you can optionally provide a FilterExpression. A FilterExpression determines +// which items within the results should be returned to you. All of the other +// results are discarded. +// +// A Query operation always returns a result set. If no matching items are found, +// the result set will be empty. Queries that do not return results consume +// the minimum number of read capacity units for that type of read operation. +// +// DynamoDB calculates the number of read capacity units consumed based on item +// size, not on the amount of data that is returned to an application. The number +// of capacity units consumed will be the same whether you request all of the +// attributes (the default behavior) or just some of them (using a projection +// expression). The number will also be the same whether or not you use a FilterExpression. +// +// Query results are always sorted by the sort key value. If the data type of +// the sort key is Number, the results are returned in numeric order; otherwise, +// the results are returned in order of UTF-8 bytes. By default, the sort order +// is ascending. To reverse the order, set the ScanIndexForward parameter to +// false. +// +// A single Query operation will read up to the maximum number of items set +// (if using the Limit parameter) or a maximum of 1 MB of data and then apply +// any filtering to the results using FilterExpression. If LastEvaluatedKey +// is present in the response, you will need to paginate the result set. For +// more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination) +// in the Amazon DynamoDB Developer Guide. +// +// FilterExpression is applied after a Query finishes, but before the results +// are returned. A FilterExpression cannot contain partition key or sort key +// attributes. You need to specify those attributes in the KeyConditionExpression. +// +// A Query operation can return an empty result set and a LastEvaluatedKey if +// all the items read for the page of results are filtered out. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the ConsistentRead +// parameter to true and obtain a strongly consistent result. Global secondary +// indexes support eventually consistent reads only, so do not specify ConsistentRead +// when querying a global secondary index. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation Query for usage and error information. +// +// Returned Error Codes: +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query +func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + return out, req.Send() +} + +// QueryWithContext is the same as Query with the addition of +// the ability to pass a context and additional request options. +// +// See Query for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ...request.Option) (*QueryOutput, error) { + req, out := c.QueryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// QueryPages iterates over the pages of a Query operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Query method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Query operation. +// pageNum := 0 +// err := client.QueryPages(params, +// func(page *dynamodb.QueryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error { + return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// QueryPagesWithContext same as QueryPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn func(*QueryOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *QueryInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.QueryRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*QueryOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opRestoreTableFromBackup = "RestoreTableFromBackup" + +// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableFromBackup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreTableFromBackup for more information on using the RestoreTableFromBackup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreTableFromBackupRequest method. +// req, resp := client.RestoreTableFromBackupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup +func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) { + op := &request.Operation{ + Name: opRestoreTableFromBackup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreTableFromBackupInput{} + } + + output = &RestoreTableFromBackupOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// RestoreTableFromBackup API operation for Amazon DynamoDB. +// +// Creates a new table from an existing backup. Any number of users can execute +// up to 4 concurrent restores (any type of restore) in a given account. +// +// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. +// +// You must manually set up the following on the restored table: +// +// * Auto scaling policies +// +// * IAM policies +// +// * Amazon CloudWatch metrics and alarms +// +// * Tags +// +// * Stream settings +// +// * Time to Live (TTL) settings +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation RestoreTableFromBackup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTableAlreadyExistsException "TableAlreadyExistsException" +// A target table with the specified name already exists. +// +// * ErrCodeTableInUseException "TableInUseException" +// A target table with the specified name is either being created or deleted. +// +// * ErrCodeBackupNotFoundException "BackupNotFoundException" +// Backup not found for the given BackupARN. +// +// * ErrCodeBackupInUseException "BackupInUseException" +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup +func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) { + req, out := c.RestoreTableFromBackupRequest(input) + return out, req.Send() +} + +// RestoreTableFromBackupWithContext is the same as RestoreTableFromBackup with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreTableFromBackup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) RestoreTableFromBackupWithContext(ctx aws.Context, input *RestoreTableFromBackupInput, opts ...request.Option) (*RestoreTableFromBackupOutput, error) { + req, out := c.RestoreTableFromBackupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreTableToPointInTime = "RestoreTableToPointInTime" + +// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableToPointInTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreTableToPointInTime for more information on using the RestoreTableToPointInTime +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreTableToPointInTimeRequest method. +// req, resp := client.RestoreTableToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime +func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) { + op := &request.Operation{ + Name: opRestoreTableToPointInTime, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreTableToPointInTimeInput{} + } + + output = &RestoreTableToPointInTimeOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// RestoreTableToPointInTime API operation for Amazon DynamoDB. +// +// Restores the specified table to the specified point in time within EarliestRestorableDateTime +// and LatestRestorableDateTime. You can restore your table to any point in +// time during the last 35 days. Any number of users can execute up to 4 concurrent +// restores (any type of restore) in a given account. +// +// When you restore using point in time recovery, DynamoDB restores your table +// data to the state based on the selected date and time (day:hour:minute:second) +// to a new table. +// +// Along with data, the following are also included on the new restored table +// using point in time recovery: +// +// * Global secondary indexes (GSIs) +// +// * Local secondary indexes (LSIs) +// +// * Provisioned read and write capacity +// +// * Encryption settings All these settings come from the current settings +// of the source table at the time of restore. +// +// You must manually set up the following on the restored table: +// +// * Auto scaling policies +// +// * IAM policies +// +// * Amazon CloudWatch metrics and alarms +// +// * Tags +// +// * Stream settings +// +// * Time to Live (TTL) settings +// +// * Point in time recovery settings +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation RestoreTableToPointInTime for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTableAlreadyExistsException "TableAlreadyExistsException" +// A target table with the specified name already exists. +// +// * ErrCodeTableNotFoundException "TableNotFoundException" +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// * ErrCodeTableInUseException "TableInUseException" +// A target table with the specified name is either being created or deleted. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInvalidRestoreTimeException "InvalidRestoreTimeException" +// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime +// and LatestRestorableDateTime. +// +// * ErrCodePointInTimeRecoveryUnavailableException "PointInTimeRecoveryUnavailableException" +// Point in time recovery has not yet been enabled for this source table. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime +func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) { + req, out := c.RestoreTableToPointInTimeRequest(input) + return out, req.Send() +} + +// RestoreTableToPointInTimeWithContext is the same as RestoreTableToPointInTime with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreTableToPointInTime for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) RestoreTableToPointInTimeWithContext(ctx aws.Context, input *RestoreTableToPointInTimeInput, opts ...request.Option) (*RestoreTableToPointInTimeOutput, error) { + req, out := c.RestoreTableToPointInTimeRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opScan = "Scan" + +// ScanRequest generates a "aws/request.Request" representing the +// client's request for the Scan operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See Scan for more information on using the Scan +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ScanRequest method. +// req, resp := client.ScanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan +func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { + op := &request.Operation{ + Name: opScan, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, + } + + if input == nil { + input = &ScanInput{} + } + + output = &ScanOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// Scan API operation for Amazon DynamoDB. +// +// The Scan operation returns one or more items and item attributes by accessing +// every item in a table or a secondary index. To have DynamoDB return fewer +// items, you can provide a FilterExpression operation. +// +// If the total number of scanned items exceeds the maximum dataset size limit +// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey +// value to continue the scan in a subsequent operation. The results also include +// the number of items exceeding the limit. A scan can result in no table data +// meeting the filter criteria. +// +// A single Scan operation reads up to the maximum number of items set (if using +// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering +// to the results using FilterExpression. If LastEvaluatedKey is present in +// the response, you need to paginate the result set. For more information, +// see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) +// in the Amazon DynamoDB Developer Guide. +// +// Scan operations proceed sequentially; however, for faster performance on +// a large table or secondary index, applications can request a parallel Scan +// operation by providing the Segment and TotalSegments parameters. For more +// information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) +// in the Amazon DynamoDB Developer Guide. +// +// Scan uses eventually consistent reads when accessing the data in a table; +// therefore, the result set might not include the changes to data in the table +// immediately before the operation began. If you need a consistent copy of +// the data, as of the time that the Scan begins, you can set the ConsistentRead +// parameter to true. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation Scan for usage and error information. +// +// Returned Error Codes: +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan +func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + return out, req.Send() +} + +// ScanWithContext is the same as Scan with the addition of +// the ability to pass a context and additional request options. +// +// See Scan for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...request.Option) (*ScanOutput, error) { + req, out := c.ScanRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ScanPages iterates over the pages of a Scan operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Scan method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Scan operation. +// pageNum := 0 +// err := client.ScanPages(params, +// func(page *dynamodb.ScanOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error { + return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ScanPagesWithContext same as ScanPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn func(*ScanOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ScanInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ScanRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ScanOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagResource for more information on using the TagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource +func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagResourceInput{} + } + + output = &TagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// TagResource API operation for Amazon DynamoDB. +// +// Associate a set of tags with an Amazon DynamoDB resource. You can then activate +// these user-defined tags so that they appear on the Billing and Cost Management +// console for cost allocation tracking. You can call TagResource up to five +// times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource +func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + return out, req.Send() +} + +// TagResourceWithContext is the same as TagResource with the addition of +// the ability to pass a context and additional request options. +// +// See TagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTransactGetItems = "TransactGetItems" + +// TransactGetItemsRequest generates a "aws/request.Request" representing the +// client's request for the TransactGetItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TransactGetItems for more information on using the TransactGetItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TransactGetItemsRequest method. +// req, resp := client.TransactGetItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems +func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) { + op := &request.Operation{ + Name: opTransactGetItems, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TransactGetItemsInput{} + } + + output = &TransactGetItemsOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// TransactGetItems API operation for Amazon DynamoDB. +// +// TransactGetItems is a synchronous operation that atomically retrieves multiple +// items from one or more tables (but not from indexes) in a single account +// and Region. A TransactGetItems call can contain up to 25 TransactGetItem +// objects, each of which contains a Get structure that specifies an item to +// retrieve from a table in the account and Region. A call to TransactGetItems +// cannot retrieve items from tables in more than one AWS account or Region. +// The aggregate size of the items in the transaction cannot exceed 4 MB. +// +// All AWS Regions and AWS GovCloud (US) support up to 25 items per transaction +// with up to 4 MB of data, except the following AWS Regions: +// +// * China (Beijing) +// +// * China (Ningxia) +// +// The China (Beijing) and China (Ningxia) Regions support up to 10 items per +// transaction with up to 4 MB of data. +// +// DynamoDB rejects the entire TransactGetItems request if any of the following +// is true: +// +// * A conflicting operation is in the process of updating an item to be +// read. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * There is a user error, such as an invalid data format. +// +// * The aggregate size of the items in the transaction cannot exceed 4 MB. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TransactGetItems for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeTransactionCanceledException "TransactionCanceledException" +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// * A condition in one of the condition expressions is not met. +// +// * A table in the TransactWriteItems request is in a different account +// or region. +// +// * More than one action in the TransactWriteItems operation targets the +// same item. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// * The aggregate size of the items in the transaction exceeds 4 MBs. +// +// * There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// * There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// * A table in the TransactGetItems request is in a different account or +// region. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * The aggregate size of the items in the transaction exceeds 4 MBs. +// +// * There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have NONE code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// * No Errors: Code: NONE Message: null +// +// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// * Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// * Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems +func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) { + req, out := c.TransactGetItemsRequest(input) + return out, req.Send() +} + +// TransactGetItemsWithContext is the same as TransactGetItems with the addition of +// the ability to pass a context and additional request options. +// +// See TransactGetItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TransactGetItemsWithContext(ctx aws.Context, input *TransactGetItemsInput, opts ...request.Option) (*TransactGetItemsOutput, error) { + req, out := c.TransactGetItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTransactWriteItems = "TransactWriteItems" + +// TransactWriteItemsRequest generates a "aws/request.Request" representing the +// client's request for the TransactWriteItems operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TransactWriteItems for more information on using the TransactWriteItems +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TransactWriteItemsRequest method. +// req, resp := client.TransactWriteItemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems +func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) { + op := &request.Operation{ + Name: opTransactWriteItems, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TransactWriteItemsInput{} + } + + output = &TransactWriteItemsOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// TransactWriteItems API operation for Amazon DynamoDB. +// +// TransactWriteItems is a synchronous write operation that groups up to 25 +// action requests. These actions can target items in different tables, but +// not in different AWS accounts or Regions, and no two actions can target the +// same item. For example, you cannot both ConditionCheck and Update the same +// item. The aggregate size of the items in the transaction cannot exceed 4 +// MB. +// +// All AWS Regions and AWS GovCloud (US) support up to 25 items per transaction +// with up to 4 MB of data, except the following AWS Regions: +// +// * China (Beijing) +// +// * China (Ningxia) +// +// The China (Beijing) and China (Ningxia) Regions support up to 10 items per +// transaction with up to 4 MB of data. +// +// The actions are completed atomically so that either all of them succeed, +// or all of them fail. They are defined by the following objects: +// +// * Put — Initiates a PutItem operation to write a new item. This structure +// specifies the primary key of the item to be written, the name of the table +// to write it in, an optional condition expression that must be satisfied +// for the write to succeed, a list of the item's attributes, and a field +// indicating whether to retrieve the item's attributes if the condition +// is not met. +// +// * Update — Initiates an UpdateItem operation to update an existing item. +// This structure specifies the primary key of the item to be updated, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the update to succeed, an expression that defines +// one or more attributes to be updated, and a field indicating whether to +// retrieve the item's attributes if the condition is not met. +// +// * Delete — Initiates a DeleteItem operation to delete an existing item. +// This structure specifies the primary key of the item to be deleted, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the deletion to succeed, and a field indicating +// whether to retrieve the item's attributes if the condition is not met. +// +// * ConditionCheck — Applies a condition to an item that is not being +// modified by the transaction. This structure specifies the primary key +// of the item to be checked, the name of the table where it resides, a condition +// expression that must be satisfied for the transaction to succeed, and +// a field indicating whether to retrieve the item's attributes if the condition +// is not met. +// +// DynamoDB rejects the entire TransactWriteItems request if any of the following +// is true: +// +// * A condition in one of the condition expressions is not met. +// +// * An ongoing operation is in the process of updating the same item. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * An item size becomes too large (bigger than 400 KB), a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// * The aggregate size of the items in the transaction exceeds 4 MB. +// +// * There is a user error, such as an invalid data format. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation TransactWriteItems for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeTransactionCanceledException "TransactionCanceledException" +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// * A condition in one of the condition expressions is not met. +// +// * A table in the TransactWriteItems request is in a different account +// or region. +// +// * More than one action in the TransactWriteItems operation targets the +// same item. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// * The aggregate size of the items in the transaction exceeds 4 MBs. +// +// * There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// * There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// * A table in the TransactGetItems request is in a different account or +// region. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * The aggregate size of the items in the transaction exceeds 4 MBs. +// +// * There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have NONE code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// * No Errors: Code: NONE Message: null +// +// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// * Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// * Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// * ErrCodeTransactionInProgressException "TransactionInProgressException" +// The transaction with the given request token is already in progress. +// +// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException" +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +// +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems +func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) { + req, out := c.TransactWriteItemsRequest(input) + return out, req.Send() +} + +// TransactWriteItemsWithContext is the same as TransactWriteItems with the addition of +// the ability to pass a context and additional request options. +// +// See TransactWriteItems for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) TransactWriteItemsWithContext(ctx aws.Context, input *TransactWriteItemsInput, opts ...request.Option) (*TransactWriteItemsOutput, error) { + req, out := c.TransactWriteItemsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagResource = "UntagResource" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagResource for more information on using the UntagResource +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource +func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagResourceInput{} + } + + output = &UntagResourceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// UntagResource API operation for Amazon DynamoDB. +// +// Removes the association of tags from an Amazon DynamoDB resource. You can +// call UntagResource up to five times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UntagResource for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource +func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + return out, req.Send() +} + +// UntagResourceWithContext is the same as UntagResource with the addition of +// the ability to pass a context and additional request options. +// +// See UntagResource for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateContinuousBackups = "UpdateContinuousBackups" + +// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContinuousBackups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateContinuousBackups for more information on using the UpdateContinuousBackups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateContinuousBackupsRequest method. +// req, resp := client.UpdateContinuousBackupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups +func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) { + op := &request.Operation{ + Name: opUpdateContinuousBackups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateContinuousBackupsInput{} + } + + output = &UpdateContinuousBackupsOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// UpdateContinuousBackups API operation for Amazon DynamoDB. +// +// UpdateContinuousBackups enables or disables point in time recovery for the +// specified table. A successful UpdateContinuousBackups call returns the current +// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables +// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus +// will be set to ENABLED. +// +// Once continuous backups and point in time recovery are enabled, you can restore +// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// +// LatestRestorableDateTime is typically 5 minutes before the current time. +// You can restore your table to any point in time during the last 35 days. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateContinuousBackups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeTableNotFoundException "TableNotFoundException" +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// * ErrCodeContinuousBackupsUnavailableException "ContinuousBackupsUnavailableException" +// Backups have not yet been enabled for this table. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups +func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) { + req, out := c.UpdateContinuousBackupsRequest(input) + return out, req.Send() +} + +// UpdateContinuousBackupsWithContext is the same as UpdateContinuousBackups with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateContinuousBackups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateContinuousBackupsWithContext(ctx aws.Context, input *UpdateContinuousBackupsInput, opts ...request.Option) (*UpdateContinuousBackupsOutput, error) { + req, out := c.UpdateContinuousBackupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGlobalTable = "UpdateGlobalTable" + +// UpdateGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGlobalTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGlobalTable for more information on using the UpdateGlobalTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGlobalTableRequest method. +// req, resp := client.UpdateGlobalTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable +func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) { + op := &request.Operation{ + Name: opUpdateGlobalTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGlobalTableInput{} + } + + output = &UpdateGlobalTableOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// UpdateGlobalTable API operation for Amazon DynamoDB. +// +// Adds or removes replicas in the specified global table. The global table +// must already exist to be able to use this operation. Any replica to be added +// must be empty, have the same name as the global table, have the same key +// schema, have DynamoDB Streams enabled, and have the same provisioned and +// maximum write capacity units. +// +// Although you can use UpdateGlobalTable to add replicas and remove replicas +// in a single request, for simplicity we recommend that you issue separate +// requests for adding or removing replicas. +// +// If global secondary indexes are specified, then the following conditions +// must also be met: +// +// * The global secondary indexes must have the same name. +// +// * The global secondary indexes must have the same hash key and sort key +// (if present). +// +// * The global secondary indexes must have the same provisioned and maximum +// write capacity units. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateGlobalTable for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// * ErrCodeGlobalTableNotFoundException "GlobalTableNotFoundException" +// The specified global table does not exist. +// +// * ErrCodeReplicaAlreadyExistsException "ReplicaAlreadyExistsException" +// The specified replica is already part of the global table. +// +// * ErrCodeReplicaNotFoundException "ReplicaNotFoundException" +// The specified replica is no longer part of the global table. +// +// * ErrCodeTableNotFoundException "TableNotFoundException" +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable +func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) { + req, out := c.UpdateGlobalTableRequest(input) + return out, req.Send() +} + +// UpdateGlobalTableWithContext is the same as UpdateGlobalTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGlobalTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateGlobalTableWithContext(ctx aws.Context, input *UpdateGlobalTableInput, opts ...request.Option) (*UpdateGlobalTableOutput, error) { + req, out := c.UpdateGlobalTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings" + +// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGlobalTableSettings operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGlobalTableSettings for more information on using the UpdateGlobalTableSettings +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGlobalTableSettingsRequest method. +// req, resp := client.UpdateGlobalTableSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings +func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) { + op := &request.Operation{ + Name: opUpdateGlobalTableSettings, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGlobalTableSettingsInput{} + } + + output = &UpdateGlobalTableSettingsOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// UpdateGlobalTableSettings API operation for Amazon DynamoDB. +// +// Updates settings for a global table. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateGlobalTableSettings for usage and error information. +// +// Returned Error Codes: +// * ErrCodeGlobalTableNotFoundException "GlobalTableNotFoundException" +// The specified global table does not exist. +// +// * ErrCodeReplicaNotFoundException "ReplicaNotFoundException" +// The specified replica is no longer part of the global table. +// +// * ErrCodeIndexNotFoundException "IndexNotFoundException" +// The operation tried to access a nonexistent index. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeResourceInUseException "ResourceInUseException" +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings +func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) { + req, out := c.UpdateGlobalTableSettingsRequest(input) + return out, req.Send() +} + +// UpdateGlobalTableSettingsWithContext is the same as UpdateGlobalTableSettings with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGlobalTableSettings for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateGlobalTableSettingsWithContext(ctx aws.Context, input *UpdateGlobalTableSettingsInput, opts ...request.Option) (*UpdateGlobalTableSettingsOutput, error) { + req, out := c.UpdateGlobalTableSettingsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateItem = "UpdateItem" + +// UpdateItemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateItem for more information on using the UpdateItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateItemRequest method. +// req, resp := client.UpdateItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem +func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { + op := &request.Operation{ + Name: opUpdateItem, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateItemInput{} + } + + output = &UpdateItemOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// UpdateItem API operation for Amazon DynamoDB. +// +// Edits an existing item's attributes, or adds a new item to the table if it +// does not already exist. You can put, delete, or add attribute values. You +// can also perform a conditional update on an existing item (insert a new attribute +// name-value pair if it doesn't exist, or replace an existing name-value pair +// if it has certain expected attribute values). +// +// You can also return the item's attribute values in the same UpdateItem operation +// using the ReturnValues parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateItem for usage and error information. +// +// Returned Error Codes: +// * ErrCodeConditionalCheckFailedException "ConditionalCheckFailedException" +// A condition specified in the operation could not be evaluated. +// +// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeItemCollectionSizeLimitExceededException "ItemCollectionSizeLimitExceededException" +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// * ErrCodeTransactionConflictException "TransactionConflictException" +// Operation was rejected because there is an ongoing transaction for the item. +// +// * ErrCodeRequestLimitExceeded "RequestLimitExceeded" +// Throughput exceeds the current throughput limit for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a limit increase. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem +func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + return out, req.Send() +} + +// UpdateItemWithContext is the same as UpdateItem with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput, opts ...request.Option) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTable = "UpdateTable" + +// UpdateTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTable for more information on using the UpdateTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTableRequest method. +// req, resp := client.UpdateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable +func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { + op := &request.Operation{ + Name: opUpdateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableInput{} + } + + output = &UpdateTableOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// UpdateTable API operation for Amazon DynamoDB. +// +// Modifies the provisioned throughput settings, global secondary indexes, or +// DynamoDB Streams settings for a given table. +// +// You can only perform one of the following operations at once: +// +// * Modify the provisioned throughput settings of the table. +// +// * Enable or disable DynamoDB Streams on the table. +// +// * Remove a global secondary index from the table. +// +// * Create a new global secondary index on the table. After the index begins +// backfilling, you can use UpdateTable to perform other operations. +// +// UpdateTable is an asynchronous operation; while it is executing, the table +// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot +// issue another UpdateTable request. When the table returns to the ACTIVE state, +// the UpdateTable operation is complete. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTable for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable +func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + return out, req.Send() +} + +// UpdateTableWithContext is the same as UpdateTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTimeToLive = "UpdateTimeToLive" + +// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTimeToLive operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTimeToLive for more information on using the UpdateTimeToLive +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTimeToLiveRequest method. +// req, resp := client.UpdateTimeToLiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive +func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) { + op := &request.Operation{ + Name: opUpdateTimeToLive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTimeToLiveInput{} + } + + output = &UpdateTimeToLiveOutput{} + req = c.newRequest(op, input, output) + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + return +} + +// UpdateTimeToLive API operation for Amazon DynamoDB. +// +// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the +// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. +// It can take up to one hour for the change to fully process. Any additional +// UpdateTimeToLive calls for the same table during this one hour duration result +// in a ValidationException. +// +// TTL compares the current time in epoch time format to the time stored in +// the TTL attribute of an item. If the epoch time value stored in the attribute +// is less than the current time, the item is marked as expired and subsequently +// deleted. +// +// The epoch time format is the number of seconds elapsed since 12:00:00 AM +// January 1, 1970 UTC. +// +// DynamoDB deletes expired items on a best-effort basis to ensure availability +// of throughput for other data operations. +// +// DynamoDB typically deletes expired items within two days of expiration. The +// exact duration within which an item gets deleted after expiration is specific +// to the nature of the workload. Items that have expired and not been deleted +// will still show up in reads, queries, and scans. +// +// As items are deleted, they are removed from any local secondary index and +// global secondary index immediately in the same eventually consistent way +// as a standard delete operation. +// +// For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTimeToLive for usage and error information. +// +// Returned Error Codes: +// * ErrCodeResourceInUseException "ResourceInUseException" +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * ErrCodeResourceNotFoundException "ResourceNotFoundException" +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ErrCodeLimitExceededException "LimitExceededException" +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account limit of 256 tables. +// +// * ErrCodeInternalServerError "InternalServerError" +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive +func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) { + req, out := c.UpdateTimeToLiveRequest(input) + return out, req.Send() +} + +// UpdateTimeToLiveWithContext is the same as UpdateTimeToLive with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTimeToLive for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTimeToLiveWithContext(ctx aws.Context, input *UpdateTimeToLiveInput, opts ...request.Option) (*UpdateTimeToLiveOutput, error) { + req, out := c.UpdateTimeToLiveRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Represents an attribute for describing the key schema for the table and indexes. +type AttributeDefinition struct { + _ struct{} `type:"structure"` + + // A name for the attribute. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // The data type for the attribute, where: + // + // * S - the attribute is of type String + // + // * N - the attribute is of type Number + // + // * B - the attribute is of type Binary + // + // AttributeType is a required field + AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"` +} + +// String returns the string representation +func (s AttributeDefinition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeDefinition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttributeDefinition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttributeDefinition"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.AttributeType == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *AttributeDefinition) SetAttributeName(v string) *AttributeDefinition { + s.AttributeName = &v + return s +} + +// SetAttributeType sets the AttributeType field's value. +func (s *AttributeDefinition) SetAttributeType(v string) *AttributeDefinition { + s.AttributeType = &v + return s +} + +// Represents the data for an attribute. +// +// Each attribute value is described as a name-value pair. The name is the data +// type, and the value is the data itself. +// +// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) +// in the Amazon DynamoDB Developer Guide. +type AttributeValue struct { + _ struct{} `type:"structure"` + + // An attribute of type Binary. For example: + // + // "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk" + // + // B is automatically base64 encoded/decoded by the SDK. + B []byte `type:"blob"` + + // An attribute of type Boolean. For example: + // + // "BOOL": true + BOOL *bool `type:"boolean"` + + // An attribute of type Binary Set. For example: + // + // "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="] + BS [][]byte `type:"list"` + + // An attribute of type List. For example: + // + // "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N", "3.14159"}] + L []*AttributeValue `type:"list"` + + // An attribute of type Map. For example: + // + // "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}} + M map[string]*AttributeValue `type:"map"` + + // An attribute of type Number. For example: + // + // "N": "123.45" + // + // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility + // across languages and libraries. However, DynamoDB treats them as number type + // attributes for mathematical operations. + N *string `type:"string"` + + // An attribute of type Number Set. For example: + // + // "NS": ["42.2", "-19", "7.5", "3.14"] + // + // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility + // across languages and libraries. However, DynamoDB treats them as number type + // attributes for mathematical operations. + NS []*string `type:"list"` + + // An attribute of type Null. For example: + // + // "NULL": true + NULL *bool `type:"boolean"` + + // An attribute of type String. For example: + // + // "S": "Hello" + S *string `type:"string"` + + // An attribute of type String Set. For example: + // + // "SS": ["Giraffe", "Hippo" ,"Zebra"] + SS []*string `type:"list"` +} + +// String returns the string representation +func (s AttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValue) GoString() string { + return s.String() +} + +// SetB sets the B field's value. +func (s *AttributeValue) SetB(v []byte) *AttributeValue { + s.B = v + return s +} + +// SetBOOL sets the BOOL field's value. +func (s *AttributeValue) SetBOOL(v bool) *AttributeValue { + s.BOOL = &v + return s +} + +// SetBS sets the BS field's value. +func (s *AttributeValue) SetBS(v [][]byte) *AttributeValue { + s.BS = v + return s +} + +// SetL sets the L field's value. +func (s *AttributeValue) SetL(v []*AttributeValue) *AttributeValue { + s.L = v + return s +} + +// SetM sets the M field's value. +func (s *AttributeValue) SetM(v map[string]*AttributeValue) *AttributeValue { + s.M = v + return s +} + +// SetN sets the N field's value. +func (s *AttributeValue) SetN(v string) *AttributeValue { + s.N = &v + return s +} + +// SetNS sets the NS field's value. +func (s *AttributeValue) SetNS(v []*string) *AttributeValue { + s.NS = v + return s +} + +// SetNULL sets the NULL field's value. +func (s *AttributeValue) SetNULL(v bool) *AttributeValue { + s.NULL = &v + return s +} + +// SetS sets the S field's value. +func (s *AttributeValue) SetS(v string) *AttributeValue { + s.S = &v + return s +} + +// SetSS sets the SS field's value. +func (s *AttributeValue) SetSS(v []*string) *AttributeValue { + s.SS = v + return s +} + +// For the UpdateItem operation, represents the attributes to be modified, the +// action to perform on each, and the new value for each. +// +// You cannot use UpdateItem to update any primary key attributes. Instead, +// you will need to delete the item, and then use PutItem to create a new item +// with new attributes. +// +// Attribute values cannot be null; string and binary type attributes must have +// lengths greater than zero; and set type attributes must not be empty. Requests +// with empty values will be rejected with a ValidationException exception. +type AttributeValueUpdate struct { + _ struct{} `type:"structure"` + + // Specifies how to perform the update. Valid values are PUT (default), DELETE, + // and ADD. The behavior depends on whether the specified primary key already + // exists in the table. + // + // If an item with the specified Key is found in the table: + // + // * PUT - Adds the specified attribute to the item. If the attribute already + // exists, it is replaced by the new value. + // + // * DELETE - If no value is specified, the attribute and its value are removed + // from the item. The data type of the specified value must match the existing + // value's data type. If a set of values is specified, then those values + // are subtracted from the old set. For example, if the attribute value was + // the set [a,b,c] and the DELETE action specified [a,c], then the final + // attribute value would be [b]. Specifying an empty set is an error. + // + // * ADD - If the attribute does not already exist, then the attribute and + // its values are added to the item. If the attribute does exist, then the + // behavior of ADD depends on the data type of the attribute: If the existing + // attribute is a number, and if Value is also a number, then the Value is + // mathematically added to the existing attribute. If Value is a negative + // number, then it is subtracted from the existing attribute. If you use + // ADD to increment or decrement a number value for an item that doesn't + // exist before the update, DynamoDB uses 0 as the initial value. In addition, + // if you use ADD to update an existing item, and intend to increment or + // decrement an attribute value which does not yet exist, DynamoDB uses 0 + // as the initial value. For example, suppose that the item you want to update + // does not yet have an attribute named itemcount, but you decide to ADD + // the number 3 to this attribute anyway, even though it currently does not + // exist. DynamoDB will create the itemcount attribute, set its initial value + // to 0, and finally add 3 to it. The result will be a new itemcount attribute + // in the item, with a value of 3. If the existing data type is a set, and + // if the Value is also a set, then the Value is added to the existing set. + // (This is a set operation, not mathematical addition.) For example, if + // the attribute value was the set [1,2], and the ADD action specified [3], + // then the final attribute value would be [1,2,3]. An error occurs if an + // Add action is specified for a set attribute and the attribute type specified + // does not match the existing set type. Both sets must have the same primitive + // data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. The same holds true for number + // sets and binary sets. This action is only valid for an existing attribute + // whose data type is number or is a set. Do not use ADD for any other data + // types. + // + // If no item with the specified Key is found: + // + // * PUT - DynamoDB creates a new item with the specified primary key, and + // then adds the attribute. + // + // * DELETE - Nothing happens; there is no attribute to delete. + // + // * ADD - DynamoDB creates an item with the supplied primary key and number + // (or set of numbers) for the attribute value. The only data types allowed + // are number and number set; no other data types can be specified. + Action *string `type:"string" enum:"AttributeAction"` + + // Represents the data for an attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) + // in the Amazon DynamoDB Developer Guide. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s AttributeValueUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttributeValueUpdate) GoString() string { + return s.String() +} + +// SetAction sets the Action field's value. +func (s *AttributeValueUpdate) SetAction(v string) *AttributeValueUpdate { + s.Action = &v + return s +} + +// SetValue sets the Value field's value. +func (s *AttributeValueUpdate) SetValue(v *AttributeValue) *AttributeValueUpdate { + s.Value = v + return s +} + +// Represents the properties of the scaling policy. +type AutoScalingPolicyDescription struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // Represents a target tracking scaling policy configuration. + TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription `type:"structure"` +} + +// String returns the string representation +func (s AutoScalingPolicyDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingPolicyDescription) GoString() string { + return s.String() +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AutoScalingPolicyDescription) SetPolicyName(v string) *AutoScalingPolicyDescription { + s.PolicyName = &v + return s +} + +// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. +func (s *AutoScalingPolicyDescription) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) *AutoScalingPolicyDescription { + s.TargetTrackingScalingPolicyConfiguration = v + return s +} + +// Represents the autoscaling policy to be modified. +type AutoScalingPolicyUpdate struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string"` + + // Represents a target tracking scaling policy configuration. + // + // TargetTrackingScalingPolicyConfiguration is a required field + TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AutoScalingPolicyUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingPolicyUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingPolicyUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingPolicyUpdate"} + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.TargetTrackingScalingPolicyConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration")) + } + if s.TargetTrackingScalingPolicyConfiguration != nil { + if err := s.TargetTrackingScalingPolicyConfiguration.Validate(); err != nil { + invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AutoScalingPolicyUpdate) SetPolicyName(v string) *AutoScalingPolicyUpdate { + s.PolicyName = &v + return s +} + +// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. +func (s *AutoScalingPolicyUpdate) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) *AutoScalingPolicyUpdate { + s.TargetTrackingScalingPolicyConfiguration = v + return s +} + +// Represents the autoscaling settings for a global table or global secondary +// index. +type AutoScalingSettingsDescription struct { + _ struct{} `type:"structure"` + + // Disabled autoscaling for this global table or global secondary index. + AutoScalingDisabled *bool `type:"boolean"` + + // Role ARN used for configuring autoScaling policy. + AutoScalingRoleArn *string `type:"string"` + + // The maximum capacity units that a global table or global secondary index + // should be scaled up to. + MaximumUnits *int64 `min:"1" type:"long"` + + // The minimum capacity units that a global table or global secondary index + // should be scaled down to. + MinimumUnits *int64 `min:"1" type:"long"` + + // Information about the scaling policies. + ScalingPolicies []*AutoScalingPolicyDescription `type:"list"` +} + +// String returns the string representation +func (s AutoScalingSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingSettingsDescription) GoString() string { + return s.String() +} + +// SetAutoScalingDisabled sets the AutoScalingDisabled field's value. +func (s *AutoScalingSettingsDescription) SetAutoScalingDisabled(v bool) *AutoScalingSettingsDescription { + s.AutoScalingDisabled = &v + return s +} + +// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value. +func (s *AutoScalingSettingsDescription) SetAutoScalingRoleArn(v string) *AutoScalingSettingsDescription { + s.AutoScalingRoleArn = &v + return s +} + +// SetMaximumUnits sets the MaximumUnits field's value. +func (s *AutoScalingSettingsDescription) SetMaximumUnits(v int64) *AutoScalingSettingsDescription { + s.MaximumUnits = &v + return s +} + +// SetMinimumUnits sets the MinimumUnits field's value. +func (s *AutoScalingSettingsDescription) SetMinimumUnits(v int64) *AutoScalingSettingsDescription { + s.MinimumUnits = &v + return s +} + +// SetScalingPolicies sets the ScalingPolicies field's value. +func (s *AutoScalingSettingsDescription) SetScalingPolicies(v []*AutoScalingPolicyDescription) *AutoScalingSettingsDescription { + s.ScalingPolicies = v + return s +} + +// Represents the autoscaling settings to be modified for a global table or +// global secondary index. +type AutoScalingSettingsUpdate struct { + _ struct{} `type:"structure"` + + // Disabled autoscaling for this global table or global secondary index. + AutoScalingDisabled *bool `type:"boolean"` + + // Role ARN used for configuring autoscaling policy. + AutoScalingRoleArn *string `min:"1" type:"string"` + + // The maximum capacity units that a global table or global secondary index + // should be scaled up to. + MaximumUnits *int64 `min:"1" type:"long"` + + // The minimum capacity units that a global table or global secondary index + // should be scaled down to. + MinimumUnits *int64 `min:"1" type:"long"` + + // The scaling policy to apply for scaling target global table or global secondary + // index capacity units. + ScalingPolicyUpdate *AutoScalingPolicyUpdate `type:"structure"` +} + +// String returns the string representation +func (s AutoScalingSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingSettingsUpdate"} + if s.AutoScalingRoleArn != nil && len(*s.AutoScalingRoleArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingRoleArn", 1)) + } + if s.MaximumUnits != nil && *s.MaximumUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaximumUnits", 1)) + } + if s.MinimumUnits != nil && *s.MinimumUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("MinimumUnits", 1)) + } + if s.ScalingPolicyUpdate != nil { + if err := s.ScalingPolicyUpdate.Validate(); err != nil { + invalidParams.AddNested("ScalingPolicyUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAutoScalingDisabled sets the AutoScalingDisabled field's value. +func (s *AutoScalingSettingsUpdate) SetAutoScalingDisabled(v bool) *AutoScalingSettingsUpdate { + s.AutoScalingDisabled = &v + return s +} + +// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value. +func (s *AutoScalingSettingsUpdate) SetAutoScalingRoleArn(v string) *AutoScalingSettingsUpdate { + s.AutoScalingRoleArn = &v + return s +} + +// SetMaximumUnits sets the MaximumUnits field's value. +func (s *AutoScalingSettingsUpdate) SetMaximumUnits(v int64) *AutoScalingSettingsUpdate { + s.MaximumUnits = &v + return s +} + +// SetMinimumUnits sets the MinimumUnits field's value. +func (s *AutoScalingSettingsUpdate) SetMinimumUnits(v int64) *AutoScalingSettingsUpdate { + s.MinimumUnits = &v + return s +} + +// SetScalingPolicyUpdate sets the ScalingPolicyUpdate field's value. +func (s *AutoScalingSettingsUpdate) SetScalingPolicyUpdate(v *AutoScalingPolicyUpdate) *AutoScalingSettingsUpdate { + s.ScalingPolicyUpdate = v + return s +} + +// Represents the properties of a target tracking scaling policy. +type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether scale in by the target tracking policy is disabled. If + // the value is true, scale in is disabled and the target tracking policy won't + // remove capacity from the scalable resource. Otherwise, scale in is enabled + // and the target tracking policy can remove capacity from the scalable resource. + // The default value is false. + DisableScaleIn *bool `type:"boolean"` + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. The cooldown period is used to block + // subsequent scale in requests until it has expired. You should scale in conservatively + // to protect your application's availability. However, if another alarm triggers + // a scale out policy during the cooldown period after a scale-in, application + // autoscaling scales out your scalable target immediately. + ScaleInCooldown *int64 `type:"integer"` + + // The amount of time, in seconds, after a scale out activity completes before + // another scale out activity can start. While the cooldown period is in effect, + // the capacity that has been added by the previous scale out event that initiated + // the cooldown is calculated as part of the desired capacity for the next scale + // out. You should continuously (but not excessively) scale out. + ScaleOutCooldown *int64 `type:"integer"` + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) GoString() string { + return s.String() +} + +// SetDisableScaleIn sets the DisableScaleIn field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.DisableScaleIn = &v + return s +} + +// SetScaleInCooldown sets the ScaleInCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.ScaleInCooldown = &v + return s +} + +// SetScaleOutCooldown sets the ScaleOutCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.ScaleOutCooldown = &v + return s +} + +// SetTargetValue sets the TargetValue field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription { + s.TargetValue = &v + return s +} + +// Represents the settings of a target tracking scaling policy that will be +// modified. +type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct { + _ struct{} `type:"structure"` + + // Indicates whether scale in by the target tracking policy is disabled. If + // the value is true, scale in is disabled and the target tracking policy won't + // remove capacity from the scalable resource. Otherwise, scale in is enabled + // and the target tracking policy can remove capacity from the scalable resource. + // The default value is false. + DisableScaleIn *bool `type:"boolean"` + + // The amount of time, in seconds, after a scale in activity completes before + // another scale in activity can start. The cooldown period is used to block + // subsequent scale in requests until it has expired. You should scale in conservatively + // to protect your application's availability. However, if another alarm triggers + // a scale out policy during the cooldown period after a scale-in, application + // autoscaling scales out your scalable target immediately. + ScaleInCooldown *int64 `type:"integer"` + + // The amount of time, in seconds, after a scale out activity completes before + // another scale out activity can start. While the cooldown period is in effect, + // the capacity that has been added by the previous scale out event that initiated + // the cooldown is calculated as part of the desired capacity for the next scale + // out. You should continuously (but not excessively) scale out. + ScaleOutCooldown *int64 `type:"integer"` + + // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 + // (Base 10) or 2e-360 to 2e360 (Base 2). + // + // TargetValue is a required field + TargetValue *float64 `type:"double" required:"true"` +} + +// String returns the string representation +func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"} + if s.TargetValue == nil { + invalidParams.Add(request.NewErrParamRequired("TargetValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisableScaleIn sets the DisableScaleIn field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.DisableScaleIn = &v + return s +} + +// SetScaleInCooldown sets the ScaleInCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.ScaleInCooldown = &v + return s +} + +// SetScaleOutCooldown sets the ScaleOutCooldown field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.ScaleOutCooldown = &v + return s +} + +// SetTargetValue sets the TargetValue field's value. +func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate { + s.TargetValue = &v + return s +} + +// Contains the description of the backup created for the table. +type BackupDescription struct { + _ struct{} `type:"structure"` + + // Contains the details of the backup created for the table. + BackupDetails *BackupDetails `type:"structure"` + + // Contains the details of the table when the backup was created. + SourceTableDetails *SourceTableDetails `type:"structure"` + + // Contains the details of the features enabled on the table when the backup + // was created. For example, LSIs, GSIs, streams, TTL. + SourceTableFeatureDetails *SourceTableFeatureDetails `type:"structure"` +} + +// String returns the string representation +func (s BackupDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackupDescription) GoString() string { + return s.String() +} + +// SetBackupDetails sets the BackupDetails field's value. +func (s *BackupDescription) SetBackupDetails(v *BackupDetails) *BackupDescription { + s.BackupDetails = v + return s +} + +// SetSourceTableDetails sets the SourceTableDetails field's value. +func (s *BackupDescription) SetSourceTableDetails(v *SourceTableDetails) *BackupDescription { + s.SourceTableDetails = v + return s +} + +// SetSourceTableFeatureDetails sets the SourceTableFeatureDetails field's value. +func (s *BackupDescription) SetSourceTableFeatureDetails(v *SourceTableFeatureDetails) *BackupDescription { + s.SourceTableFeatureDetails = v + return s +} + +// Contains the details of the backup created for the table. +type BackupDetails struct { + _ struct{} `type:"structure"` + + // ARN associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` + + // Time at which the backup was created. This is the request time of the backup. + // + // BackupCreationDateTime is a required field + BackupCreationDateTime *time.Time `type:"timestamp" required:"true"` + + // Time at which the automatic on-demand backup created by DynamoDB will expire. + // This SYSTEM on-demand backup expires automatically 35 days after its creation. + BackupExpiryDateTime *time.Time `type:"timestamp"` + + // Name of the requested backup. + // + // BackupName is a required field + BackupName *string `min:"3" type:"string" required:"true"` + + // Size of the backup in bytes. + BackupSizeBytes *int64 `type:"long"` + + // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. + // + // BackupStatus is a required field + BackupStatus *string `type:"string" required:"true" enum:"BackupStatus"` + + // BackupType: + // + // * USER - You create and manage these using the on-demand backup feature. + // + // * SYSTEM - If you delete a table with point-in-time recovery enabled, + // a SYSTEM backup is automatically created and is retained for 35 days (at + // no additional cost). System backups allow you to restore the deleted table + // to the state it was in just before the point of deletion. + // + // * AWS_BACKUP - On-demand backup created by you from AWS Backup service. + // + // BackupType is a required field + BackupType *string `type:"string" required:"true" enum:"BackupType"` +} + +// String returns the string representation +func (s BackupDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackupDetails) GoString() string { + return s.String() +} + +// SetBackupArn sets the BackupArn field's value. +func (s *BackupDetails) SetBackupArn(v string) *BackupDetails { + s.BackupArn = &v + return s +} + +// SetBackupCreationDateTime sets the BackupCreationDateTime field's value. +func (s *BackupDetails) SetBackupCreationDateTime(v time.Time) *BackupDetails { + s.BackupCreationDateTime = &v + return s +} + +// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value. +func (s *BackupDetails) SetBackupExpiryDateTime(v time.Time) *BackupDetails { + s.BackupExpiryDateTime = &v + return s +} + +// SetBackupName sets the BackupName field's value. +func (s *BackupDetails) SetBackupName(v string) *BackupDetails { + s.BackupName = &v + return s +} + +// SetBackupSizeBytes sets the BackupSizeBytes field's value. +func (s *BackupDetails) SetBackupSizeBytes(v int64) *BackupDetails { + s.BackupSizeBytes = &v + return s +} + +// SetBackupStatus sets the BackupStatus field's value. +func (s *BackupDetails) SetBackupStatus(v string) *BackupDetails { + s.BackupStatus = &v + return s +} + +// SetBackupType sets the BackupType field's value. +func (s *BackupDetails) SetBackupType(v string) *BackupDetails { + s.BackupType = &v + return s +} + +// Contains details for the backup. +type BackupSummary struct { + _ struct{} `type:"structure"` + + // ARN associated with the backup. + BackupArn *string `min:"37" type:"string"` + + // Time at which the backup was created. + BackupCreationDateTime *time.Time `type:"timestamp"` + + // Time at which the automatic on-demand backup created by DynamoDB will expire. + // This SYSTEM on-demand backup expires automatically 35 days after its creation. + BackupExpiryDateTime *time.Time `type:"timestamp"` + + // Name of the specified backup. + BackupName *string `min:"3" type:"string"` + + // Size of the backup in bytes. + BackupSizeBytes *int64 `type:"long"` + + // Backup can be in one of the following states: CREATING, ACTIVE, DELETED. + BackupStatus *string `type:"string" enum:"BackupStatus"` + + // BackupType: + // + // * USER - You create and manage these using the on-demand backup feature. + // + // * SYSTEM - If you delete a table with point-in-time recovery enabled, + // a SYSTEM backup is automatically created and is retained for 35 days (at + // no additional cost). System backups allow you to restore the deleted table + // to the state it was in just before the point of deletion. + // + // * AWS_BACKUP - On-demand backup created by you from AWS Backup service. + BackupType *string `type:"string" enum:"BackupType"` + + // ARN associated with the table. + TableArn *string `type:"string"` + + // Unique identifier for the table. + TableId *string `type:"string"` + + // Name of the table. + TableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s BackupSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BackupSummary) GoString() string { + return s.String() +} + +// SetBackupArn sets the BackupArn field's value. +func (s *BackupSummary) SetBackupArn(v string) *BackupSummary { + s.BackupArn = &v + return s +} + +// SetBackupCreationDateTime sets the BackupCreationDateTime field's value. +func (s *BackupSummary) SetBackupCreationDateTime(v time.Time) *BackupSummary { + s.BackupCreationDateTime = &v + return s +} + +// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value. +func (s *BackupSummary) SetBackupExpiryDateTime(v time.Time) *BackupSummary { + s.BackupExpiryDateTime = &v + return s +} + +// SetBackupName sets the BackupName field's value. +func (s *BackupSummary) SetBackupName(v string) *BackupSummary { + s.BackupName = &v + return s +} + +// SetBackupSizeBytes sets the BackupSizeBytes field's value. +func (s *BackupSummary) SetBackupSizeBytes(v int64) *BackupSummary { + s.BackupSizeBytes = &v + return s +} + +// SetBackupStatus sets the BackupStatus field's value. +func (s *BackupSummary) SetBackupStatus(v string) *BackupSummary { + s.BackupStatus = &v + return s +} + +// SetBackupType sets the BackupType field's value. +func (s *BackupSummary) SetBackupType(v string) *BackupSummary { + s.BackupType = &v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *BackupSummary) SetTableArn(v string) *BackupSummary { + s.TableArn = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *BackupSummary) SetTableId(v string) *BackupSummary { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *BackupSummary) SetTableName(v string) *BackupSummary { + s.TableName = &v + return s +} + +// Represents the input of a BatchGetItem operation. +type BatchGetItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a map that describes + // one or more items to retrieve from that table. Each table name can be used + // only once per BatchGetItem request. + // + // Each element in the map of items to retrieve consists of the following: + // + // * ConsistentRead - If true, a strongly consistent read is used; if false + // (the default), an eventually consistent read is used. + // + // * ExpressionAttributeNames - One or more substitution tokens for attribute + // names in the ProjectionExpression parameter. The following are some use + // cases for using ExpressionAttributeNames: To access an attribute whose + // name conflicts with a DynamoDB reserved word. To create a placeholder + // for repeating occurrences of an attribute name in an expression. To prevent + // special characters in an attribute name from being misinterpreted in an + // expression. Use the # character in an expression to dereference an attribute + // name. For example, consider the following attribute name: Percentile The + // name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could + // specify the following for ExpressionAttributeNames: {"#P":"Percentile"} + // You could then use this substitution in an expression, as in this example: + // #P = :val Tokens that begin with the : character are expression attribute + // values, which are placeholders for the actual value at runtime. For more + // information about expression attribute names, see Accessing Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // * Keys - An array of primary key attribute values that define specific + // items in the table. For each primary key, you must provide all of the + // key attributes. For example, with a simple primary key, you only need + // to provide the partition key value. For a composite key, you must provide + // both the partition key value and the sort key value. + // + // * ProjectionExpression - A string that identifies one or more attributes + // to retrieve from the table. These attributes can include scalars, sets, + // or elements of a JSON document. The attributes in the expression must + // be separated by commas. If no attribute names are specified, then all + // attributes are returned. If any of the requested attributes are not found, + // they do not appear in the result. For more information, see Accessing + // Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + // + // * AttributesToGet - This is a legacy parameter. Use ProjectionExpression + // instead. For more information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + // + // RequestItems is a required field + RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` +} + +// String returns the string representation +func (s BatchGetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchGetItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchGetItemInput"} + if s.RequestItems == nil { + invalidParams.Add(request.NewErrParamRequired("RequestItems")) + } + if s.RequestItems != nil && len(s.RequestItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) + } + if s.RequestItems != nil { + for i, v := range s.RequestItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequestItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestItems sets the RequestItems field's value. +func (s *BatchGetItemInput) SetRequestItems(v map[string]*KeysAndAttributes) *BatchGetItemInput { + s.RequestItems = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *BatchGetItemInput) SetReturnConsumedCapacity(v string) *BatchGetItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// Represents the output of a BatchGetItem operation. +type BatchGetItemOutput struct { + _ struct{} `type:"structure"` + + // The read capacity units consumed by the entire BatchGetItem operation. + // + // Each element consists of: + // + // * TableName - The table that consumed the provisioned throughput. + // + // * CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A map of table name to a list of items. Each object in Responses consists + // of a table name, along with a map of attribute data consisting of the data + // type and attribute value. + Responses map[string][]map[string]*AttributeValue `type:"map"` + + // A map of tables and their respective keys that were not processed with the + // current response. The UnprocessedKeys value is in the same form as RequestItems, + // so the value can be provided directly to a subsequent BatchGetItem operation. + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // + // * Keys - An array of primary key attribute values that define specific + // items in the table. + // + // * ProjectionExpression - One or more attributes to be retrieved from the + // table or index. By default, all attributes are returned. If a requested + // attribute is not found, it does not appear in the result. + // + // * ConsistentRead - The consistency of a read operation. If set to true, + // then a strongly consistent read is used; otherwise, an eventually consistent + // read is used. + // + // If there are no unprocessed keys remaining, the response contains an empty + // UnprocessedKeys map. + UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchGetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchGetItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchGetItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *BatchGetItemOutput) SetResponses(v map[string][]map[string]*AttributeValue) *BatchGetItemOutput { + s.Responses = v + return s +} + +// SetUnprocessedKeys sets the UnprocessedKeys field's value. +func (s *BatchGetItemOutput) SetUnprocessedKeys(v map[string]*KeysAndAttributes) *BatchGetItemOutput { + s.UnprocessedKeys = v + return s +} + +// Represents the input of a BatchWriteItem operation. +type BatchWriteItemInput struct { + _ struct{} `type:"structure"` + + // A map of one or more table names and, for each table, a list of operations + // to be performed (DeleteRequest or PutRequest). Each element in the map consists + // of the following: + // + // * DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: Key - A map + // of primary key attribute values that uniquely identify the item. Each + // entry in this map consists of an attribute name and an attribute value. + // For each primary key, you must provide all of the key attributes. For + // example, with a simple primary key, you only need to provide a value for + // the partition key. For a composite primary key, you must provide values + // for both the partition key and the sort key. + // + // * PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: Item - A map of attributes + // and their values. Each entry in this map consists of an attribute name + // and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values are rejected + // with a ValidationException exception. If you specify any attributes that + // are part of an index key, then the data types for those attributes must + // match those of the schema in the table's attribute definition. + // + // RequestItems is a required field + RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` +} + +// String returns the string representation +func (s BatchWriteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchWriteItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchWriteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchWriteItemInput"} + if s.RequestItems == nil { + invalidParams.Add(request.NewErrParamRequired("RequestItems")) + } + if s.RequestItems != nil && len(s.RequestItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRequestItems sets the RequestItems field's value. +func (s *BatchWriteItemInput) SetRequestItems(v map[string][]*WriteRequest) *BatchWriteItemInput { + s.RequestItems = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *BatchWriteItemInput) SetReturnConsumedCapacity(v string) *BatchWriteItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *BatchWriteItemInput) SetReturnItemCollectionMetrics(v string) *BatchWriteItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// Represents the output of a BatchWriteItem operation. +type BatchWriteItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire BatchWriteItem operation. + // + // Each element consists of: + // + // * TableName - The table that consumed the provisioned throughput. + // + // * CapacityUnits - The total number of capacity units consumed. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by BatchWriteItem and, for each table, + // information about any item collections that were affected by individual DeleteItem + // or PutItem operations. + // + // Each entry consists of the following subelements: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item. + // + // * SizeEstimateRangeGB - An estimate of item collection size, expressed + // in GB. This is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on the table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` + + // A map of tables and requests against those tables that were not processed. + // The UnprocessedItems value is in the same form as RequestItems, so you can + // provide this value directly to a subsequent BatchGetItem operation. For more + // information, see RequestItems in the Request Parameters section. + // + // Each UnprocessedItems entry consists of a table name and, for that table, + // a list of operations to perform (DeleteRequest or PutRequest). + // + // * DeleteRequest - Perform a DeleteItem operation on the specified item. + // The item to be deleted is identified by a Key subelement: Key - A map + // of primary key attribute values that uniquely identify the item. Each + // entry in this map consists of an attribute name and an attribute value. + // + // * PutRequest - Perform a PutItem operation on the specified item. The + // item to be put is identified by an Item subelement: Item - A map of attributes + // and their values. Each entry in this map consists of an attribute name + // and an attribute value. Attribute values must not be null; string and + // binary type attributes must have lengths greater than zero; and set type + // attributes must not be empty. Requests that contain empty values will + // be rejected with a ValidationException exception. If you specify any attributes + // that are part of an index key, then the data types for those attributes + // must match those of the schema in the table's attribute definition. + // + // If there are no unprocessed items remaining, the response contains an empty + // UnprocessedItems map. + UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchWriteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchWriteItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchWriteItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchWriteItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *BatchWriteItemOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *BatchWriteItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// SetUnprocessedItems sets the UnprocessedItems field's value. +func (s *BatchWriteItemOutput) SetUnprocessedItems(v map[string][]*WriteRequest) *BatchWriteItemOutput { + s.UnprocessedItems = v + return s +} + +// Contains the details for the read/write capacity mode. +type BillingModeSummary struct { + _ struct{} `type:"structure"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend + // using PROVISIONED for predictable workloads. + // + // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST. + // We recommend using PAY_PER_REQUEST for unpredictable workloads. + BillingMode *string `type:"string" enum:"BillingMode"` + + // Represents the time when PAY_PER_REQUEST was last set as the read/write capacity + // mode. + LastUpdateToPayPerRequestDateTime *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s BillingModeSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BillingModeSummary) GoString() string { + return s.String() +} + +// SetBillingMode sets the BillingMode field's value. +func (s *BillingModeSummary) SetBillingMode(v string) *BillingModeSummary { + s.BillingMode = &v + return s +} + +// SetLastUpdateToPayPerRequestDateTime sets the LastUpdateToPayPerRequestDateTime field's value. +func (s *BillingModeSummary) SetLastUpdateToPayPerRequestDateTime(v time.Time) *BillingModeSummary { + s.LastUpdateToPayPerRequestDateTime = &v + return s +} + +// An ordered list of errors for each item in the request which caused the transaction +// to get cancelled. The values of the list are ordered according to the ordering +// of the TransactWriteItems request parameter. If no error occurred for the +// associated item an error with a Null code and Null message will be present. +type CancellationReason struct { + _ struct{} `type:"structure"` + + // Status code for the result of the cancelled transaction. + Code *string `type:"string"` + + // Item in the request which caused the transaction to get cancelled. + Item map[string]*AttributeValue `type:"map"` + + // Cancellation reason message description. + Message *string `type:"string"` +} + +// String returns the string representation +func (s CancellationReason) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CancellationReason) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *CancellationReason) SetCode(v string) *CancellationReason { + s.Code = &v + return s +} + +// SetItem sets the Item field's value. +func (s *CancellationReason) SetItem(v map[string]*AttributeValue) *CancellationReason { + s.Item = v + return s +} + +// SetMessage sets the Message field's value. +func (s *CancellationReason) SetMessage(v string) *CancellationReason { + s.Message = &v + return s +} + +// Represents the amount of provisioned throughput capacity consumed on a table +// or an index. +type Capacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed on a table or an index. + CapacityUnits *float64 `type:"double"` + + // The total number of read capacity units consumed on a table or an index. + ReadCapacityUnits *float64 `type:"double"` + + // The total number of write capacity units consumed on a table or an index. + WriteCapacityUnits *float64 `type:"double"` +} + +// String returns the string representation +func (s Capacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Capacity) GoString() string { + return s.String() +} + +// SetCapacityUnits sets the CapacityUnits field's value. +func (s *Capacity) SetCapacityUnits(v float64) *Capacity { + s.CapacityUnits = &v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *Capacity) SetReadCapacityUnits(v float64) *Capacity { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *Capacity) SetWriteCapacityUnits(v float64) *Capacity { + s.WriteCapacityUnits = &v + return s +} + +// Represents the selection criteria for a Query or Scan operation: +// +// * For a Query operation, Condition is used for specifying the KeyConditions +// to use when querying a table or an index. For KeyConditions, only the +// following comparison operators are supported: EQ | LE | LT | GE | GT | +// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates +// the query results and returns only the desired values. +// +// * For a Scan operation, Condition is used in a ScanFilter, which evaluates +// the scan results and returns only the desired values. +type Condition struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes. For example, equals, greater than, + // less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // * EQ : Equal. EQ is supported for all data types, including lists and + // maps. AttributeValueList can contain only one AttributeValue element of + // type String, Number, Binary, String Set, Number Set, or Binary Set. If + // an item contains an AttributeValue element of a different type than the + // one provided in the request, the value does not match. For example, {"S":"6"} + // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", + // "1"]}. + // + // * NE : Not equal. NE is supported for all data types, including lists + // and maps. AttributeValueList can contain only one AttributeValue of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * LT : Less than. AttributeValueList can contain only one AttributeValue + // of type String, Number, or Binary (not a set type). If an item contains + // an AttributeValue element of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * GE : Greater than or equal. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). + // If an item contains an AttributeValue element of a different type than + // the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data + // types, including lists and maps. This operator tests for the existence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not + // relevant to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. NULL is supported for all data + // types, including lists and maps. This operator tests for the nonexistence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NULL, the result is a Boolean false. + // This is because the attribute "a" exists; its data type is not relevant + // to the NULL comparison operator. + // + // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or + // Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // value in a set. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If the target + // attribute of the comparison is a String, then the operator checks for + // the absence of a substring match. If the target attribute of the comparison + // is Binary, then the operator checks for the absence of a subsequence of + // the target that matches the input. If the target attribute of the comparison + // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if + // it does not find an exact match with any member of the set. NOT_CONTAINS + // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be + // a list; however, "b" cannot be a set, a map, or a list. + // + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). + // The target attribute of the comparison must be of type String or Binary + // (not a Number or a set type). + // + // * IN : Checks for matching elements in a list. AttributeValueList can + // contain one or more AttributeValue elements of type String, Number, or + // Binary. These attributes are compared against an existing attribute of + // an item. If any elements of the input are equal to the item attribute, + // the expression evaluates to true. + // + // * BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. AttributeValueList must contain two AttributeValue + // elements of the same type, either String, Number, or Binary (not a set + // type). A target attribute matches if the target value is greater than, + // or equal to, the first element and less than, or equal to, the second + // element. If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]} + // + // For usage examples of AttributeValueList and ComparisonOperator, see Legacy + // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + // + // ComparisonOperator is a required field + ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Condition) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Condition"} + if s.ComparisonOperator == nil { + invalidParams.Add(request.NewErrParamRequired("ComparisonOperator")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeValueList sets the AttributeValueList field's value. +func (s *Condition) SetAttributeValueList(v []*AttributeValue) *Condition { + s.AttributeValueList = v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *Condition) SetComparisonOperator(v string) *Condition { + s.ComparisonOperator = &v + return s +} + +// Represents a request to perform a check that an item exists or to check the +// condition of specific attributes of the item.. +type ConditionCheck struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // ConditionExpression is a required field + ConditionExpression *string `type:"string" required:"true"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be checked. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure, + // the valid values are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table for the check item request. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s ConditionCheck) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConditionCheck) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ConditionCheck) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ConditionCheck"} + if s.ConditionExpression == nil { + invalidParams.Add(request.NewErrParamRequired("ConditionExpression")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *ConditionCheck) SetConditionExpression(v string) *ConditionCheck { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *ConditionCheck) SetExpressionAttributeNames(v map[string]*string) *ConditionCheck { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *ConditionCheck) SetExpressionAttributeValues(v map[string]*AttributeValue) *ConditionCheck { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *ConditionCheck) SetKey(v map[string]*AttributeValue) *ConditionCheck { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *ConditionCheck) SetReturnValuesOnConditionCheckFailure(v string) *ConditionCheck { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ConditionCheck) SetTableName(v string) *ConditionCheck { + s.TableName = &v + return s +} + +// The capacity units consumed by an operation. The data returned includes the +// total provisioned throughput consumed, along with statistics for the table +// and any indexes involved in the operation. ConsumedCapacity is only returned +// if the request asked for it. For more information, see Provisioned Throughput +// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) +// in the Amazon DynamoDB Developer Guide. +type ConsumedCapacity struct { + _ struct{} `type:"structure"` + + // The total number of capacity units consumed by the operation. + CapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on each global index affected by the operation. + GlobalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The amount of throughput consumed on each local index affected by the operation. + LocalSecondaryIndexes map[string]*Capacity `type:"map"` + + // The total number of read capacity units consumed by the operation. + ReadCapacityUnits *float64 `type:"double"` + + // The amount of throughput consumed on the table affected by the operation. + Table *Capacity `type:"structure"` + + // The name of the table that was affected by the operation. + TableName *string `min:"3" type:"string"` + + // The total number of write capacity units consumed by the operation. + WriteCapacityUnits *float64 `type:"double"` +} + +// String returns the string representation +func (s ConsumedCapacity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ConsumedCapacity) GoString() string { + return s.String() +} + +// SetCapacityUnits sets the CapacityUnits field's value. +func (s *ConsumedCapacity) SetCapacityUnits(v float64) *ConsumedCapacity { + s.CapacityUnits = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *ConsumedCapacity) SetGlobalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity { + s.GlobalSecondaryIndexes = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *ConsumedCapacity) SetLocalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity { + s.LocalSecondaryIndexes = v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ConsumedCapacity) SetReadCapacityUnits(v float64) *ConsumedCapacity { + s.ReadCapacityUnits = &v + return s +} + +// SetTable sets the Table field's value. +func (s *ConsumedCapacity) SetTable(v *Capacity) *ConsumedCapacity { + s.Table = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ConsumedCapacity) SetTableName(v string) *ConsumedCapacity { + s.TableName = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ConsumedCapacity) SetWriteCapacityUnits(v float64) *ConsumedCapacity { + s.WriteCapacityUnits = &v + return s +} + +// Represents the continuous backups and point in time recovery settings on +// the table. +type ContinuousBackupsDescription struct { + _ struct{} `type:"structure"` + + // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED + // + // ContinuousBackupsStatus is a required field + ContinuousBackupsStatus *string `type:"string" required:"true" enum:"ContinuousBackupsStatus"` + + // The description of the point in time recovery settings applied to the table. + PointInTimeRecoveryDescription *PointInTimeRecoveryDescription `type:"structure"` +} + +// String returns the string representation +func (s ContinuousBackupsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuousBackupsDescription) GoString() string { + return s.String() +} + +// SetContinuousBackupsStatus sets the ContinuousBackupsStatus field's value. +func (s *ContinuousBackupsDescription) SetContinuousBackupsStatus(v string) *ContinuousBackupsDescription { + s.ContinuousBackupsStatus = &v + return s +} + +// SetPointInTimeRecoveryDescription sets the PointInTimeRecoveryDescription field's value. +func (s *ContinuousBackupsDescription) SetPointInTimeRecoveryDescription(v *PointInTimeRecoveryDescription) *ContinuousBackupsDescription { + s.PointInTimeRecoveryDescription = v + return s +} + +type CreateBackupInput struct { + _ struct{} `type:"structure"` + + // Specified name for the backup. + // + // BackupName is a required field + BackupName *string `min:"3" type:"string" required:"true"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBackupInput"} + if s.BackupName == nil { + invalidParams.Add(request.NewErrParamRequired("BackupName")) + } + if s.BackupName != nil && len(*s.BackupName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("BackupName", 3)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupName sets the BackupName field's value. +func (s *CreateBackupInput) SetBackupName(v string) *CreateBackupInput { + s.BackupName = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *CreateBackupInput) SetTableName(v string) *CreateBackupInput { + s.TableName = &v + return s +} + +type CreateBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the backup created for the table. + BackupDetails *BackupDetails `type:"structure"` +} + +// String returns the string representation +func (s CreateBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDetails sets the BackupDetails field's value. +func (s *CreateBackupOutput) SetBackupDetails(v *BackupDetails) *CreateBackupOutput { + s.BackupDetails = v + return s +} + +// Represents a new global secondary index to be added to an existing table. +type CreateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be created. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The key schema for the global secondary index. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into an + // index. These are in addition to the primary key attributes and index key + // attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation +func (s CreateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *CreateGlobalSecondaryIndexAction) SetIndexName(v string) *CreateGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *CreateGlobalSecondaryIndexAction) SetKeySchema(v []*KeySchemaElement) *CreateGlobalSecondaryIndexAction { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *CreateGlobalSecondaryIndexAction) SetProjection(v *Projection) *CreateGlobalSecondaryIndexAction { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *CreateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateGlobalSecondaryIndexAction { + s.ProvisionedThroughput = v + return s +} + +type CreateGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The global table name. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // The Regions where the global table needs to be created. + // + // ReplicationGroup is a required field + ReplicationGroup []*Replica `type:"list" required:"true"` +} + +// String returns the string representation +func (s CreateGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.ReplicationGroup == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationGroup")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *CreateGlobalTableInput) SetGlobalTableName(v string) *CreateGlobalTableInput { + s.GlobalTableName = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *CreateGlobalTableInput) SetReplicationGroup(v []*Replica) *CreateGlobalTableInput { + s.ReplicationGroup = v + return s +} + +type CreateGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *CreateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *CreateGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +// Represents a replica to be added. +type CreateReplicaAction struct { + _ struct{} `type:"structure"` + + // The region of the replica to be added. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReplicaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReplicaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReplicaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReplicaAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *CreateReplicaAction) SetRegionName(v string) *CreateReplicaAction { + s.RegionName = &v + return s +} + +// Represents the input of a CreateTable operation. +type CreateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // + // AttributeDefinitions is a required field + AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - Sets the billing mode to PROVISIONED. We recommend using + // PROVISIONED for predictable workloads. + // + // * PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST. We recommend + // using PAY_PER_REQUEST for unpredictable workloads. + BillingMode *string `type:"string" enum:"BillingMode"` + + // One or more global secondary indexes (the maximum is 20) to be created on + // the table. Each global secondary index in the array includes the following: + // + // * IndexName - The name of the global secondary index. Must be unique only + // for this table. + // + // * KeySchema - Specifies the key schema for the global secondary index. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + // + // * ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units. + GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` + + // Specifies the attributes that make up the primary key for a table or an index. + // The attributes in KeySchema must also be defined in the AttributeDefinitions + // array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html) + // in the Amazon DynamoDB Developer Guide. + // + // Each KeySchemaElement in the array is composed of: + // + // * AttributeName - The name of this key attribute. + // + // * KeyType - The role that the key attribute will assume: HASH - partition + // key RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from the DynamoDB usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // For a simple primary key (partition key), you must provide exactly one element + // with a KeyType of HASH. + // + // For a composite primary key (partition key and sort key), you must provide + // exactly two elements, in this order: The first element must have a KeyType + // of HASH, and the second element must have a KeyType of RANGE. + // + // For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key) + // in the Amazon DynamoDB Developer Guide. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // One or more local secondary indexes (the maximum is 5) to be created on the + // table. Each index is scoped to a given partition key value. There is a 10 + // GB size limit per partition key value; otherwise, the size of a local secondary + // index is unconstrained. + // + // Each local secondary index in the array includes the following: + // + // * IndexName - The name of the local secondary index. Must be unique only + // for this table. + // + // * KeySchema - Specifies the key schema for the local secondary index. + // The key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes is in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 100. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"` + + // Represents the provisioned throughput settings for a specified table or index. + // The settings can be modified using the UpdateTable operation. + // + // If you set BillingMode as PROVISIONED, you must specify this property. If + // you set BillingMode as PAY_PER_REQUEST, you cannot specify this property. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // Represents the settings used to enable server-side encryption. + SSESpecification *SSESpecification `type:"structure"` + + // The settings for DynamoDB Streams on the table. These settings consist of: + // + // * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled + // (true) or disabled (false). + // + // * StreamViewType - When an item in the table is modified, StreamViewType + // determines what information is written to the table's stream. Valid values + // for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified + // item are written to the stream. NEW_IMAGE - The entire item, as it appears + // after it was modified, is written to the stream. OLD_IMAGE - The entire + // item, as it appeared before it was modified, is written to the stream. + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The name of the table to create. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // A list of key-value pairs to label the table. For more information, see Tagging + // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html). + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"} + if s.AttributeDefinitions == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions")) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexes != nil { + for i, v := range s.GlobalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.LocalSecondaryIndexes != nil { + for i, v := range s.LocalSecondaryIndexes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexes", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *CreateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *CreateTableInput { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput { + s.BillingMode = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *CreateTableInput) SetKeySchema(v []*KeySchemaElement) *CreateTableInput { + s.KeySchema = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *CreateTableInput { + s.LocalSecondaryIndexes = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput { + s.ProvisionedThroughput = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput { + s.SSESpecification = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *CreateTableInput) SetStreamSpecification(v *StreamSpecification) *CreateTableInput { + s.StreamSpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *CreateTableInput) SetTableName(v string) *CreateTableInput { + s.TableName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTableInput) SetTags(v []*Tag) *CreateTableInput { + s.Tags = v + return s +} + +// Represents the output of a CreateTable operation. +type CreateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTableOutput { + s.TableDescription = v + return s +} + +// Represents a request to perform a DeleteItem operation. +type Delete struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional delete to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be deleted. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Delete condition fails. For ReturnValuesOnConditionCheckFailure, the valid + // values are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table in which the item to be deleted resides. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Delete) SetConditionExpression(v string) *Delete { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Delete) SetExpressionAttributeNames(v map[string]*string) *Delete { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Delete) SetExpressionAttributeValues(v map[string]*AttributeValue) *Delete { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *Delete) SetKey(v map[string]*AttributeValue) *Delete { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Delete) SetReturnValuesOnConditionCheckFailure(v string) *Delete { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Delete) SetTableName(v string) *Delete { + s.TableName = &v + return s +} + +type DeleteBackupInput struct { + _ struct{} `type:"structure"` + + // The ARN associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *DeleteBackupInput) SetBackupArn(v string) *DeleteBackupInput { + s.BackupArn = &v + return s +} + +type DeleteBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the description of the backup created for the table. + BackupDescription *BackupDescription `type:"structure"` +} + +// String returns the string representation +func (s DeleteBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDescription sets the BackupDescription field's value. +func (s *DeleteBackupOutput) SetBackupDescription(v *BackupDescription) *DeleteBackupOutput { + s.BackupDescription = v + return s +} + +// Represents a global secondary index to be deleted from an existing table. +type DeleteGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be deleted. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *DeleteGlobalSecondaryIndexAction) SetIndexName(v string) *DeleteGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// Represents the input of a DeleteItem operation. +type DeleteItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional DeleteItem + // to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to delete. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were deleted. For DeleteItem, the valid values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - The content of the old item is returned. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table from which to delete the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *DeleteItemInput) SetConditionExpression(v string) *DeleteItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *DeleteItemInput) SetConditionalOperator(v string) *DeleteItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *DeleteItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *DeleteItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *DeleteItemInput) SetExpressionAttributeNames(v map[string]*string) *DeleteItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *DeleteItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *DeleteItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteItemInput) SetKey(v map[string]*AttributeValue) *DeleteItemInput { + s.Key = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *DeleteItemInput) SetReturnConsumedCapacity(v string) *DeleteItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *DeleteItemInput) SetReturnItemCollectionMetrics(v string) *DeleteItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput { + s.TableName = &v + return s +} + +// Represents the output of a DeleteItem operation. +type DeleteItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute names to AttributeValue objects, representing the item + // as it appeared before the DeleteItem operation. This map appears in the response + // only if ReturnValues was specified as ALL_OLD in the request. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the DeleteItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics + // for the table and any indexes involved in the operation. ConsumedCapacity + // is only returned if the ReturnConsumedCapacity parameter was specified. For + // more information, see Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the DeleteItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s DeleteItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *DeleteItemOutput) SetAttributes(v map[string]*AttributeValue) *DeleteItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *DeleteItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *DeleteItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *DeleteItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents a replica to be removed. +type DeleteReplicaAction struct { + _ struct{} `type:"structure"` + + // The region of the replica to be removed. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReplicaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReplicaAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicaAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *DeleteReplicaAction) SetRegionName(v string) *DeleteReplicaAction { + s.RegionName = &v + return s +} + +// Represents a request to perform a DeleteItem operation on an item. +type DeleteRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to delete. All of the table's primary key attributes must be + // specified, and their data types must match those of the table's key schema. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation +func (s DeleteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRequest) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest { + s.Key = v + return s +} + +// Represents the input of a DeleteTable operation. +type DeleteTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to delete. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DeleteTableInput) SetTableName(v string) *DeleteTableInput { + s.TableName = &v + return s +} + +// Represents the output of a DeleteTable operation. +type DeleteTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s DeleteTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTableOutput { + s.TableDescription = v + return s +} + +type DescribeBackupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *DescribeBackupInput) SetBackupArn(v string) *DescribeBackupInput { + s.BackupArn = &v + return s +} + +type DescribeBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the description of the backup created for the table. + BackupDescription *BackupDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDescription sets the BackupDescription field's value. +func (s *DescribeBackupOutput) SetBackupDescription(v *BackupDescription) *DescribeBackupOutput { + s.BackupDescription = v + return s +} + +type DescribeContinuousBackupsInput struct { + _ struct{} `type:"structure"` + + // Name of the table for which the customer wants to check the continuous backups + // and point in time recovery settings. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeContinuousBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContinuousBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeContinuousBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeContinuousBackupsInput) SetTableName(v string) *DescribeContinuousBackupsInput { + s.TableName = &v + return s +} + +type DescribeContinuousBackupsOutput struct { + _ struct{} `type:"structure"` + + // Represents the continuous backups and point in time recovery settings on + // the table. + ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeContinuousBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContinuousBackupsOutput) GoString() string { + return s.String() +} + +// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. +func (s *DescribeContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *DescribeContinuousBackupsOutput { + s.ContinuousBackupsDescription = v + return s +} + +type DescribeEndpointsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointsInput) GoString() string { + return s.String() +} + +type DescribeEndpointsOutput struct { + _ struct{} `type:"structure"` + + // List of endpoints. + // + // Endpoints is a required field + Endpoints []*Endpoint `type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeEndpointsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointsOutput) GoString() string { + return s.String() +} + +// SetEndpoints sets the Endpoints field's value. +func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput { + s.Endpoints = v + return s +} + +type DescribeGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableInput) SetGlobalTableName(v string) *DescribeGlobalTableInput { + s.GlobalTableName = &v + return s +} + +type DescribeGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *DescribeGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *DescribeGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +type DescribeGlobalTableSettingsInput struct { + _ struct{} `type:"structure"` + + // The name of the global table to describe. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeGlobalTableSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGlobalTableSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeGlobalTableSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableSettingsInput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsInput { + s.GlobalTableName = &v + return s +} + +type DescribeGlobalTableSettingsOutput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + GlobalTableName *string `min:"3" type:"string"` + + // The Region-specific settings for the global table. + ReplicaSettings []*ReplicaSettingsDescription `type:"list"` +} + +// String returns the string representation +func (s DescribeGlobalTableSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeGlobalTableSettingsOutput) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableSettingsOutput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsOutput { + s.GlobalTableName = &v + return s +} + +// SetReplicaSettings sets the ReplicaSettings field's value. +func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *DescribeGlobalTableSettingsOutput { + s.ReplicaSettings = v + return s +} + +// Represents the input of a DescribeLimits operation. Has no content. +type DescribeLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLimitsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeLimits operation. +type DescribeLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum total read capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum total write capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum read capacity units that your account allows you to provision + // for a new table that you are creating in this Region, including the read + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum write capacity units that your account allows you to provision + // for a new table that you are creating in this Region, including the write + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DescribeLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLimitsOutput) GoString() string { + return s.String() +} + +// SetAccountMaxReadCapacityUnits sets the AccountMaxReadCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetAccountMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { + s.AccountMaxReadCapacityUnits = &v + return s +} + +// SetAccountMaxWriteCapacityUnits sets the AccountMaxWriteCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetAccountMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { + s.AccountMaxWriteCapacityUnits = &v + return s +} + +// SetTableMaxReadCapacityUnits sets the TableMaxReadCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetTableMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { + s.TableMaxReadCapacityUnits = &v + return s +} + +// SetTableMaxWriteCapacityUnits sets the TableMaxWriteCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { + s.TableMaxWriteCapacityUnits = &v + return s +} + +// Represents the input of a DescribeTable operation. +type DescribeTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to describe. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTableInput) SetTableName(v string) *DescribeTableInput { + s.TableName = &v + return s +} + +// Represents the output of a DescribeTable operation. +type DescribeTableOutput struct { + _ struct{} `type:"structure"` + + // The properties of the table. + Table *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTableOutput) GoString() string { + return s.String() +} + +// SetTable sets the Table field's value. +func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput { + s.Table = v + return s +} + +type DescribeTimeToLiveInput struct { + _ struct{} `type:"structure"` + + // The name of the table to be described. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeTimeToLiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTimeToLiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTimeToLiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTimeToLiveInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTimeToLiveInput) SetTableName(v string) *DescribeTimeToLiveInput { + s.TableName = &v + return s +} + +type DescribeTimeToLiveOutput struct { + _ struct{} `type:"structure"` + + // The description of the Time to Live (TTL) status on the specified table. + TimeToLiveDescription *TimeToLiveDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeTimeToLiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeTimeToLiveOutput) GoString() string { + return s.String() +} + +// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. +func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescription) *DescribeTimeToLiveOutput { + s.TimeToLiveDescription = v + return s +} + +// An endpoint information details. +type Endpoint struct { + _ struct{} `type:"structure"` + + // IP address of the endpoint. + // + // Address is a required field + Address *string `type:"string" required:"true"` + + // Endpoint cache time to live (TTL) value. + // + // CachePeriodInMinutes is a required field + CachePeriodInMinutes *int64 `type:"long" required:"true"` +} + +// String returns the string representation +func (s Endpoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Endpoint) GoString() string { + return s.String() +} + +// SetAddress sets the Address field's value. +func (s *Endpoint) SetAddress(v string) *Endpoint { + s.Address = &v + return s +} + +// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value. +func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint { + s.CachePeriodInMinutes = &v + return s +} + +// Represents a condition to be compared with an attribute value. This condition +// can be used with DeleteItem, PutItem or UpdateItem operations; if the comparison +// evaluates to true, the operation succeeds; if not, the operation fails. You +// can use ExpectedAttributeValue in one of two different ways: +// +// * Use AttributeValueList to specify one or more values to compare against +// an attribute. Use ComparisonOperator to specify how you want to perform +// the comparison. If the comparison evaluates to true, then the conditional +// operation succeeds. +// +// * Use Value to specify a value that DynamoDB will compare against an attribute. +// If the values match, then ExpectedAttributeValue evaluates to true and +// the conditional operation succeeds. Optionally, you can also set Exists +// to false, indicating that you do not expect to find the attribute value +// in the table. In this case, the conditional operation succeeds only if +// the comparison evaluates to false. +// +// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. +// Note that if you use both sets of parameters at once, DynamoDB will return +// a ValidationException exception. +type ExpectedAttributeValue struct { + _ struct{} `type:"structure"` + + // One or more values to evaluate against the supplied attribute. The number + // of values in the list depends on the ComparisonOperator being used. + // + // For type Number, value comparisons are numeric. + // + // String value comparisons for greater than, equals, or less than are based + // on ASCII character code values. For example, a is greater than A, and a is + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). + // + // For Binary, DynamoDB treats each byte of the binary data as unsigned when + // it compares binary values. + // + // For information on specifying data types in JSON, see JSON Data Format (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) + // in the Amazon DynamoDB Developer Guide. + AttributeValueList []*AttributeValue `type:"list"` + + // A comparator for evaluating attributes in the AttributeValueList. For example, + // equals, greater than, less than, etc. + // + // The following comparison operators are available: + // + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS | + // BEGINS_WITH | IN | BETWEEN + // + // The following are descriptions of each comparison operator. + // + // * EQ : Equal. EQ is supported for all data types, including lists and + // maps. AttributeValueList can contain only one AttributeValue element of + // type String, Number, Binary, String Set, Number Set, or Binary Set. If + // an item contains an AttributeValue element of a different type than the + // one provided in the request, the value does not match. For example, {"S":"6"} + // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", + // "1"]}. + // + // * NE : Not equal. NE is supported for all data types, including lists + // and maps. AttributeValueList can contain only one AttributeValue of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // + // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * LT : Less than. AttributeValueList can contain only one AttributeValue + // of type String, Number, or Binary (not a set type). If an item contains + // an AttributeValue element of a different type than the one provided in + // the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * GE : Greater than or equal. AttributeValueList can contain only one + // AttributeValue element of type String, Number, or Binary (not a set type). + // If an item contains an AttributeValue element of a different type than + // the one provided in the request, the value does not match. For example, + // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to + // {"NS":["6", "2", "1"]}. + // + // * GT : Greater than. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does + // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2", + // "1"]}. + // + // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data + // types, including lists and maps. This operator tests for the existence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NOT_NULL, the result is a Boolean true. + // This result is because the attribute "a" exists; its data type is not + // relevant to the NOT_NULL comparison operator. + // + // * NULL : The attribute does not exist. NULL is supported for all data + // types, including lists and maps. This operator tests for the nonexistence + // of an attribute, not its data type. If the data type of attribute "a" + // is null, and you evaluate it using NULL, the result is a Boolean false. + // This is because the attribute "a" exists; its data type is not relevant + // to the NULL comparison operator. + // + // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList + // can contain only one AttributeValue element of type String, Number, or + // Binary (not a set type). If the target attribute of the comparison is + // of type String, then the operator checks for a substring match. If the + // target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. + // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can + // be a list; however, "b" cannot be a set, a map, or a list. + // + // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a + // value in a set. AttributeValueList can contain only one AttributeValue + // element of type String, Number, or Binary (not a set type). If the target + // attribute of the comparison is a String, then the operator checks for + // the absence of a substring match. If the target attribute of the comparison + // is Binary, then the operator checks for the absence of a subsequence of + // the target that matches the input. If the target attribute of the comparison + // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if + // it does not find an exact match with any member of the set. NOT_CONTAINS + // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be + // a list; however, "b" cannot be a set, a map, or a list. + // + // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only + // one AttributeValue of type String or Binary (not a Number or a set type). + // The target attribute of the comparison must be of type String or Binary + // (not a Number or a set type). + // + // * IN : Checks for matching elements in a list. AttributeValueList can + // contain one or more AttributeValue elements of type String, Number, or + // Binary. These attributes are compared against an existing attribute of + // an item. If any elements of the input are equal to the item attribute, + // the expression evaluates to true. + // + // * BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. AttributeValueList must contain two AttributeValue + // elements of the same type, either String, Number, or Binary (not a set + // type). A target attribute matches if the target value is greater than, + // or equal to, the first element and less than, or equal to, the second + // element. If an item contains an AttributeValue element of a different + // type than the one provided in the request, the value does not match. For + // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does + // not compare to {"NS":["6", "2", "1"]} + ComparisonOperator *string `type:"string" enum:"ComparisonOperator"` + + // Causes DynamoDB to evaluate the value before attempting a conditional operation: + // + // * If Exists is true, DynamoDB will check to see if that attribute value + // already exists in the table. If it is found, then the operation succeeds. + // If it is not found, the operation fails with a ConditionCheckFailedException. + // + // * If Exists is false, DynamoDB assumes that the attribute value does not + // exist in the table. If in fact the value does not exist, then the assumption + // is valid and the operation succeeds. If the value is found, despite the + // assumption that it does not exist, the operation fails with a ConditionCheckFailedException. + // + // The default setting for Exists is true. If you supply a Value all by itself, + // DynamoDB assumes the attribute exists: You don't have to set Exists to true, + // because it is implied. + // + // DynamoDB returns a ValidationException if: + // + // * Exists is true but there is no Value to check. (You expect a value to + // exist, but don't specify what that value is.) + // + // * Exists is false but you also provide a Value. (You cannot expect an + // attribute to have a value, while also expecting it not to exist.) + Exists *bool `type:"boolean"` + + // Represents the data for the expected attribute. + // + // Each attribute value is described as a name-value pair. The name is the data + // type, and the value is the data itself. + // + // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes) + // in the Amazon DynamoDB Developer Guide. + Value *AttributeValue `type:"structure"` +} + +// String returns the string representation +func (s ExpectedAttributeValue) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExpectedAttributeValue) GoString() string { + return s.String() +} + +// SetAttributeValueList sets the AttributeValueList field's value. +func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue { + s.AttributeValueList = v + return s +} + +// SetComparisonOperator sets the ComparisonOperator field's value. +func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue { + s.ComparisonOperator = &v + return s +} + +// SetExists sets the Exists field's value. +func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue { + s.Exists = &v + return s +} + +// SetValue sets the Value field's value. +func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue { + s.Value = v + return s +} + +// Specifies an item and related attribute values to retrieve in a TransactGetItem +// object. +type Get struct { + _ struct{} `type:"structure"` + + // One or more substitution tokens for attribute names in the ProjectionExpression + // parameter. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects that specifies the primary + // key of the item to retrieve. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes of the specified item to + // retrieve from the table. The attributes in the expression must be separated + // by commas. If no attribute names are specified, then all attributes of the + // specified item are returned. If any of the requested attributes are not found, + // they do not appear in the result. + ProjectionExpression *string `type:"string"` + + // The name of the table from which to retrieve the specified item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s Get) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Get) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Get) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Get"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Get) SetExpressionAttributeNames(v map[string]*string) *Get { + s.ExpressionAttributeNames = v + return s +} + +// SetKey sets the Key field's value. +func (s *Get) SetKey(v map[string]*AttributeValue) *Get { + s.Key = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *Get) SetProjectionExpression(v string) *Get { + s.ProjectionExpression = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Get) SetTableName(v string) *Get { + s.TableName = &v + return s +} + +// Represents the input of a GetItem operation. +type GetItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to retrieve. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes are returned. If + // any of the requested attributes are not found, they do not appear in the + // result. + // + // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // The name of the table containing the requested item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetItemInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *GetItemInput) SetAttributesToGet(v []*string) *GetItemInput { + s.AttributesToGet = v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *GetItemInput) SetConsistentRead(v bool) *GetItemInput { + s.ConsistentRead = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *GetItemInput) SetExpressionAttributeNames(v map[string]*string) *GetItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetKey sets the Key field's value. +func (s *GetItemInput) SetKey(v map[string]*AttributeValue) *GetItemInput { + s.Key = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *GetItemInput) SetProjectionExpression(v string) *GetItemInput { + s.ProjectionExpression = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *GetItemInput) SetReturnConsumedCapacity(v string) *GetItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *GetItemInput) SetTableName(v string) *GetItemInput { + s.TableName = &v + return s +} + +// Represents the output of a GetItem operation. +type GetItemOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the GetItem operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // A map of attribute names to AttributeValue objects, as specified by ProjectionExpression. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation +func (s GetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *GetItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *GetItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItem sets the Item field's value. +func (s *GetItemOutput) SetItem(v map[string]*AttributeValue) *GetItemOutput { + s.Item = v + return s +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndex) SetIndexName(v string) *GlobalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndex { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndex) SetProjection(v *Projection) *GlobalSecondaryIndex { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndex) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndex { + s.ProvisionedThroughput = v + return s +} + +// Represents the properties of a global secondary index. +type GlobalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // Indicates whether the index is currently backfilling. Backfilling is the + // process of reading items from the table and determining whether they can + // be added to the index. (Not all items will qualify: For example, a partition + // key cannot have any duplicate values.) If an item can be added to the index, + // DynamoDB will do so. After all items have been processed, the backfilling + // operation is complete and Backfilling is false. + // + // For indexes that were created during a CreateTable operation, the Backfilling + // attribute does not appear in the DescribeTable output. + Backfilling *bool `type:"boolean"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The current state of the global secondary index: + // + // * CREATING - The index is being created. + // + // * UPDATING - The index is being updated. + // + // * DELETING - The index is being deleted. + // + // * ACTIVE - The index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// SetBackfilling sets the Backfilling field's value. +func (s *GlobalSecondaryIndexDescription) SetBackfilling(v bool) *GlobalSecondaryIndexDescription { + s.Backfilling = &v + return s +} + +// SetIndexArn sets the IndexArn field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexArn(v string) *GlobalSecondaryIndexDescription { + s.IndexArn = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexName(v string) *GlobalSecondaryIndexDescription { + s.IndexName = &v + return s +} + +// SetIndexSizeBytes sets the IndexSizeBytes field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *GlobalSecondaryIndexDescription { + s.IndexSizeBytes = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *GlobalSecondaryIndexDescription) SetIndexStatus(v string) *GlobalSecondaryIndexDescription { + s.IndexStatus = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *GlobalSecondaryIndexDescription) SetItemCount(v int64) *GlobalSecondaryIndexDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexDescription { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndexDescription) SetProjection(v *Projection) *GlobalSecondaryIndexDescription { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndexDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *GlobalSecondaryIndexDescription { + s.ProvisionedThroughput = v + return s +} + +// Represents the properties of a global secondary index for the table when +// the backup was created. +type GlobalSecondaryIndexInfo struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. + IndexName *string `min:"3" type:"string"` + + // The complete key schema for a global secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexInfo) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalSecondaryIndexInfo) SetIndexName(v string) *GlobalSecondaryIndexInfo { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *GlobalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexInfo { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *GlobalSecondaryIndexInfo) SetProjection(v *Projection) *GlobalSecondaryIndexInfo { + s.Projection = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *GlobalSecondaryIndexInfo) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndexInfo { + s.ProvisionedThroughput = v + return s +} + +// Represents one of the following: +// +// * A new global secondary index to be added to an existing table. +// +// * New provisioned throughput parameters for an existing global secondary +// index. +// +// * An existing global secondary index to be removed from an existing table. +type GlobalSecondaryIndexUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a global secondary index on an existing + // table: + // + // * IndexName + // + // * KeySchema + // + // * AttributeDefinitions + // + // * Projection + // + // * ProvisionedThroughput + Create *CreateGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index to be removed. + Delete *DeleteGlobalSecondaryIndexAction `type:"structure"` + + // The name of an existing global secondary index, along with new provisioned + // throughput settings to be applied to that index. + Update *UpdateGlobalSecondaryIndexAction `type:"structure"` +} + +// String returns the string representation +func (s GlobalSecondaryIndexUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalSecondaryIndexUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalSecondaryIndexUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreate sets the Create field's value. +func (s *GlobalSecondaryIndexUpdate) SetCreate(v *CreateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Create = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *GlobalSecondaryIndexUpdate) SetDelete(v *DeleteGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Delete = v + return s +} + +// SetUpdate sets the Update field's value. +func (s *GlobalSecondaryIndexUpdate) SetUpdate(v *UpdateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate { + s.Update = v + return s +} + +// Represents the properties of a global table. +type GlobalTable struct { + _ struct{} `type:"structure"` + + // The global table name. + GlobalTableName *string `min:"3" type:"string"` + + // The regions where the global table has replicas. + ReplicationGroup []*Replica `type:"list"` +} + +// String returns the string representation +func (s GlobalTable) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalTable) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *GlobalTable) SetGlobalTableName(v string) *GlobalTable { + s.GlobalTableName = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *GlobalTable) SetReplicationGroup(v []*Replica) *GlobalTable { + s.ReplicationGroup = v + return s +} + +// Contains details about the global table. +type GlobalTableDescription struct { + _ struct{} `type:"structure"` + + // The creation time of the global table. + CreationDateTime *time.Time `type:"timestamp"` + + // The unique identifier of the global table. + GlobalTableArn *string `type:"string"` + + // The global table name. + GlobalTableName *string `min:"3" type:"string"` + + // The current state of the global table: + // + // * CREATING - The global table is being created. + // + // * UPDATING - The global table is being updated. + // + // * DELETING - The global table is being deleted. + // + // * ACTIVE - The global table is ready for use. + GlobalTableStatus *string `type:"string" enum:"GlobalTableStatus"` + + // The regions where the global table has replicas. + ReplicationGroup []*ReplicaDescription `type:"list"` +} + +// String returns the string representation +func (s GlobalTableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalTableDescription) GoString() string { + return s.String() +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *GlobalTableDescription) SetCreationDateTime(v time.Time) *GlobalTableDescription { + s.CreationDateTime = &v + return s +} + +// SetGlobalTableArn sets the GlobalTableArn field's value. +func (s *GlobalTableDescription) SetGlobalTableArn(v string) *GlobalTableDescription { + s.GlobalTableArn = &v + return s +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *GlobalTableDescription) SetGlobalTableName(v string) *GlobalTableDescription { + s.GlobalTableName = &v + return s +} + +// SetGlobalTableStatus sets the GlobalTableStatus field's value. +func (s *GlobalTableDescription) SetGlobalTableStatus(v string) *GlobalTableDescription { + s.GlobalTableStatus = &v + return s +} + +// SetReplicationGroup sets the ReplicationGroup field's value. +func (s *GlobalTableDescription) SetReplicationGroup(v []*ReplicaDescription) *GlobalTableDescription { + s.ReplicationGroup = v + return s +} + +// Represents the settings of a global secondary index for a global table that +// will be modified. +type GlobalTableGlobalSecondaryIndexSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // AutoScaling settings for managing a global secondary index's write capacity + // units. + ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedWriteCapacityUnits != nil && *s.ProvisionedWriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedWriteCapacityUnits", 1)) + } + if s.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + if err := s.ProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedWriteCapacityAutoScalingSettingsUpdate sets the ProvisionedWriteCapacityAutoScalingSettingsUpdate field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedWriteCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value. +func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityUnits(v int64) *GlobalTableGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedWriteCapacityUnits = &v + return s +} + +// Information about item collections, if any, that were affected by the operation. +// ItemCollectionMetrics is only returned if the request asked for it. If the +// table does not have any local secondary indexes, this information is not +// returned in the response. +type ItemCollectionMetrics struct { + _ struct{} `type:"structure"` + + // The partition key value of the item collection. This value is the same as + // the partition key value of the item. + ItemCollectionKey map[string]*AttributeValue `type:"map"` + + // An estimate of item collection size, in gigabytes. This value is a two-element + // array containing a lower bound and an upper bound for the estimate. The estimate + // includes the size of all the items in the table, plus the size of all attributes + // projected into all of the local secondary indexes on that table. Use this + // estimate to measure whether a local secondary index is approaching its size + // limit. + // + // The estimate is subject to change over time; therefore, do not rely on the + // precision or accuracy of the estimate. + SizeEstimateRangeGB []*float64 `type:"list"` +} + +// String returns the string representation +func (s ItemCollectionMetrics) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ItemCollectionMetrics) GoString() string { + return s.String() +} + +// SetItemCollectionKey sets the ItemCollectionKey field's value. +func (s *ItemCollectionMetrics) SetItemCollectionKey(v map[string]*AttributeValue) *ItemCollectionMetrics { + s.ItemCollectionKey = v + return s +} + +// SetSizeEstimateRangeGB sets the SizeEstimateRangeGB field's value. +func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollectionMetrics { + s.SizeEstimateRangeGB = v + return s +} + +// Details for the requested item. +type ItemResponse struct { + _ struct{} `type:"structure"` + + // Map of attribute data consisting of the data type and attribute value. + Item map[string]*AttributeValue `type:"map"` +} + +// String returns the string representation +func (s ItemResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ItemResponse) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *ItemResponse) SetItem(v map[string]*AttributeValue) *ItemResponse { + s.Item = v + return s +} + +// Represents a single element of a key schema. A key schema specifies the attributes +// that make up the primary key of a table, or the key attributes of an index. +// +// A KeySchemaElement represents exactly one attribute of the primary key. For +// example, a simple primary key would be represented by one KeySchemaElement +// (for the partition key). A composite primary key would require one KeySchemaElement +// for the partition key, and another KeySchemaElement for the sort key. +// +// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute). +// The data type must be one of String, Number, or Binary. The attribute cannot +// be nested within a List or a Map. +type KeySchemaElement struct { + _ struct{} `type:"structure"` + + // The name of a key attribute. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // The role that this key attribute will assume: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeyType is a required field + KeyType *string `type:"string" required:"true" enum:"KeyType"` +} + +// String returns the string representation +func (s KeySchemaElement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeySchemaElement) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KeySchemaElement) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KeySchemaElement"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.KeyType == nil { + invalidParams.Add(request.NewErrParamRequired("KeyType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *KeySchemaElement) SetAttributeName(v string) *KeySchemaElement { + s.AttributeName = &v + return s +} + +// SetKeyType sets the KeyType field's value. +func (s *KeySchemaElement) SetKeyType(v string) *KeySchemaElement { + s.KeyType = &v + return s +} + +// Represents a set of primary keys and, for each key, the attributes to retrieve +// from the table. +// +// For each primary key, you must provide all of the key attributes. For example, +// with a simple primary key, you only need to provide the partition key. For +// a composite primary key, you must provide both the partition key and the +// sort key. +type KeysAndAttributes struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see Legacy Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Accessing Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // The primary key attribute values that define the items and the attributes + // associated with the items. + // + // Keys is a required field + Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the ProjectionExpression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` +} + +// String returns the string representation +func (s KeysAndAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeysAndAttributes) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *KeysAndAttributes) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "KeysAndAttributes"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.Keys == nil { + invalidParams.Add(request.NewErrParamRequired("Keys")) + } + if s.Keys != nil && len(s.Keys) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Keys", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *KeysAndAttributes) SetAttributesToGet(v []*string) *KeysAndAttributes { + s.AttributesToGet = v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *KeysAndAttributes) SetConsistentRead(v bool) *KeysAndAttributes { + s.ConsistentRead = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *KeysAndAttributes) SetExpressionAttributeNames(v map[string]*string) *KeysAndAttributes { + s.ExpressionAttributeNames = v + return s +} + +// SetKeys sets the Keys field's value. +func (s *KeysAndAttributes) SetKeys(v []map[string]*AttributeValue) *KeysAndAttributes { + s.Keys = v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes { + s.ProjectionExpression = &v + return s +} + +type ListBackupsInput struct { + _ struct{} `type:"structure"` + + // The backups from the table specified by BackupType are listed. + // + // Where BackupType can be: + // + // * USER - On-demand backup created by you. + // + // * SYSTEM - On-demand backup automatically created by DynamoDB. + // + // * ALL - All types of on-demand backups (USER and SYSTEM). + BackupType *string `type:"string" enum:"BackupTypeFilter"` + + // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last + // evaluated when the current page of results was returned, inclusive of the + // current page of results. This value may be specified as the ExclusiveStartBackupArn + // of a new ListBackups operation in order to fetch the next page of results. + ExclusiveStartBackupArn *string `min:"37" type:"string"` + + // Maximum number of backups to return at once. + Limit *int64 `min:"1" type:"integer"` + + // The backups from the table specified by TableName are listed. + TableName *string `min:"3" type:"string"` + + // Only backups created after this time are listed. TimeRangeLowerBound is inclusive. + TimeRangeLowerBound *time.Time `type:"timestamp"` + + // Only backups created before this time are listed. TimeRangeUpperBound is + // exclusive. + TimeRangeUpperBound *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ListBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBackupsInput"} + if s.ExclusiveStartBackupArn != nil && len(*s.ExclusiveStartBackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartBackupArn", 37)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupType sets the BackupType field's value. +func (s *ListBackupsInput) SetBackupType(v string) *ListBackupsInput { + s.BackupType = &v + return s +} + +// SetExclusiveStartBackupArn sets the ExclusiveStartBackupArn field's value. +func (s *ListBackupsInput) SetExclusiveStartBackupArn(v string) *ListBackupsInput { + s.ExclusiveStartBackupArn = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListBackupsInput) SetLimit(v int64) *ListBackupsInput { + s.Limit = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ListBackupsInput) SetTableName(v string) *ListBackupsInput { + s.TableName = &v + return s +} + +// SetTimeRangeLowerBound sets the TimeRangeLowerBound field's value. +func (s *ListBackupsInput) SetTimeRangeLowerBound(v time.Time) *ListBackupsInput { + s.TimeRangeLowerBound = &v + return s +} + +// SetTimeRangeUpperBound sets the TimeRangeUpperBound field's value. +func (s *ListBackupsInput) SetTimeRangeUpperBound(v time.Time) *ListBackupsInput { + s.TimeRangeUpperBound = &v + return s +} + +type ListBackupsOutput struct { + _ struct{} `type:"structure"` + + // List of BackupSummary objects. + BackupSummaries []*BackupSummary `type:"list"` + + // The ARN of the backup last evaluated when the current page of results was + // returned, inclusive of the current page of results. This value may be specified + // as the ExclusiveStartBackupArn of a new ListBackups operation in order to + // fetch the next page of results. + // + // If LastEvaluatedBackupArn is empty, then the last page of results has been + // processed and there are no more results to be retrieved. + // + // If LastEvaluatedBackupArn is not empty, this may or may not indicate that + // there is more data to be returned. All results are guaranteed to have been + // returned if and only if no value for LastEvaluatedBackupArn is returned. + LastEvaluatedBackupArn *string `min:"37" type:"string"` +} + +// String returns the string representation +func (s ListBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBackupsOutput) GoString() string { + return s.String() +} + +// SetBackupSummaries sets the BackupSummaries field's value. +func (s *ListBackupsOutput) SetBackupSummaries(v []*BackupSummary) *ListBackupsOutput { + s.BackupSummaries = v + return s +} + +// SetLastEvaluatedBackupArn sets the LastEvaluatedBackupArn field's value. +func (s *ListBackupsOutput) SetLastEvaluatedBackupArn(v string) *ListBackupsOutput { + s.LastEvaluatedBackupArn = &v + return s +} + +type ListGlobalTablesInput struct { + _ struct{} `type:"structure"` + + // The first global table name that this operation will evaluate. + ExclusiveStartGlobalTableName *string `min:"3" type:"string"` + + // The maximum number of table names to return. + Limit *int64 `min:"1" type:"integer"` + + // Lists the global tables in a specific Region. + RegionName *string `type:"string"` +} + +// String returns the string representation +func (s ListGlobalTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGlobalTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGlobalTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGlobalTablesInput"} + if s.ExclusiveStartGlobalTableName != nil && len(*s.ExclusiveStartGlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartGlobalTableName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExclusiveStartGlobalTableName sets the ExclusiveStartGlobalTableName field's value. +func (s *ListGlobalTablesInput) SetExclusiveStartGlobalTableName(v string) *ListGlobalTablesInput { + s.ExclusiveStartGlobalTableName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListGlobalTablesInput) SetLimit(v int64) *ListGlobalTablesInput { + s.Limit = &v + return s +} + +// SetRegionName sets the RegionName field's value. +func (s *ListGlobalTablesInput) SetRegionName(v string) *ListGlobalTablesInput { + s.RegionName = &v + return s +} + +type ListGlobalTablesOutput struct { + _ struct{} `type:"structure"` + + // List of global table names. + GlobalTables []*GlobalTable `type:"list"` + + // Last evaluated global table name. + LastEvaluatedGlobalTableName *string `min:"3" type:"string"` +} + +// String returns the string representation +func (s ListGlobalTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGlobalTablesOutput) GoString() string { + return s.String() +} + +// SetGlobalTables sets the GlobalTables field's value. +func (s *ListGlobalTablesOutput) SetGlobalTables(v []*GlobalTable) *ListGlobalTablesOutput { + s.GlobalTables = v + return s +} + +// SetLastEvaluatedGlobalTableName sets the LastEvaluatedGlobalTableName field's value. +func (s *ListGlobalTablesOutput) SetLastEvaluatedGlobalTableName(v string) *ListGlobalTablesOutput { + s.LastEvaluatedGlobalTableName = &v + return s +} + +// Represents the input of a ListTables operation. +type ListTablesInput struct { + _ struct{} `type:"structure"` + + // The first table name that this operation will evaluate. Use the value that + // was returned for LastEvaluatedTableName in a previous operation, so that + // you can obtain the next page of results. + ExclusiveStartTableName *string `min:"3" type:"string"` + + // A maximum number of table names to return. If this parameter is not specified, + // the limit is 100. + Limit *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListTablesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTablesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTablesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTablesInput"} + if s.ExclusiveStartTableName != nil && len(*s.ExclusiveStartTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTableName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetExclusiveStartTableName sets the ExclusiveStartTableName field's value. +func (s *ListTablesInput) SetExclusiveStartTableName(v string) *ListTablesInput { + s.ExclusiveStartTableName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ListTablesInput) SetLimit(v int64) *ListTablesInput { + s.Limit = &v + return s +} + +// Represents the output of a ListTables operation. +type ListTablesOutput struct { + _ struct{} `type:"structure"` + + // The name of the last table in the current page of results. Use this value + // as the ExclusiveStartTableName in a new request to obtain the next page of + // results, until all the table names are returned. + // + // If you do not receive a LastEvaluatedTableName value in the response, this + // means that there are no more table names to be retrieved. + LastEvaluatedTableName *string `min:"3" type:"string"` + + // The names of the tables associated with the current account at the current + // endpoint. The maximum size of this array is 100. + // + // If LastEvaluatedTableName also appears in the output, you can use this value + // as the ExclusiveStartTableName parameter in a subsequent ListTables request + // and obtain the next page of results. + TableNames []*string `type:"list"` +} + +// String returns the string representation +func (s ListTablesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTablesOutput) GoString() string { + return s.String() +} + +// SetLastEvaluatedTableName sets the LastEvaluatedTableName field's value. +func (s *ListTablesOutput) SetLastEvaluatedTableName(v string) *ListTablesOutput { + s.LastEvaluatedTableName = &v + return s +} + +// SetTableNames sets the TableNames field's value. +func (s *ListTablesOutput) SetTableNames(v []*string) *ListTablesOutput { + s.TableNames = v + return s +} + +type ListTagsOfResourceInput struct { + _ struct{} `type:"structure"` + + // An optional string that, if supplied, must be copied from the output of a + // previous call to ListTagOfResource. When provided in this manner, this API + // fetches the next page of results. + NextToken *string `type:"string"` + + // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon + // Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsOfResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOfResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsOfResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsOfResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsOfResourceInput) SetNextToken(v string) *ListTagsOfResourceInput { + s.NextToken = &v + return s +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *ListTagsOfResourceInput) SetResourceArn(v string) *ListTagsOfResourceInput { + s.ResourceArn = &v + return s +} + +type ListTagsOfResourceOutput struct { + _ struct{} `type:"structure"` + + // If this value is returned, there are additional results to be displayed. + // To retrieve them, call ListTagsOfResource again, with NextToken set to this + // value. + NextToken *string `type:"string"` + + // The tags currently associated with the Amazon DynamoDB resource. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsOfResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsOfResourceOutput) GoString() string { + return s.String() +} + +// SetNextToken sets the NextToken field's value. +func (s *ListTagsOfResourceOutput) SetNextToken(v string) *ListTagsOfResourceOutput { + s.NextToken = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListTagsOfResourceOutput) SetTags(v []*Tag) *ListTagsOfResourceOutput { + s.Tags = v + return s +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndex struct { + _ struct{} `type:"structure"` + + // The name of the local secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Represents attributes that are copied (projected) from the table into the + // local secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + // + // Projection is a required field + Projection *Projection `type:"structure" required:"true"` +} + +// String returns the string representation +func (s LocalSecondaryIndex) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndex) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LocalSecondaryIndex) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LocalSecondaryIndex"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.KeySchema == nil { + invalidParams.Add(request.NewErrParamRequired("KeySchema")) + } + if s.KeySchema != nil && len(s.KeySchema) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1)) + } + if s.Projection == nil { + invalidParams.Add(request.NewErrParamRequired("Projection")) + } + if s.KeySchema != nil { + for i, v := range s.KeySchema { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Projection != nil { + if err := s.Projection.Validate(); err != nil { + invalidParams.AddNested("Projection", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndex) SetIndexName(v string) *LocalSecondaryIndex { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndex { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndex) SetProjection(v *Projection) *LocalSecondaryIndex { + s.Projection = v + return s +} + +// Represents the properties of a local secondary index. +type LocalSecondaryIndexDescription struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the index. + IndexArn *string `type:"string"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The total size of the specified index, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + IndexSizeBytes *int64 `type:"long"` + + // The number of items in the specified index. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The complete key schema for the local secondary index, consisting of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation +func (s LocalSecondaryIndexDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndexDescription) GoString() string { + return s.String() +} + +// SetIndexArn sets the IndexArn field's value. +func (s *LocalSecondaryIndexDescription) SetIndexArn(v string) *LocalSecondaryIndexDescription { + s.IndexArn = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndexDescription) SetIndexName(v string) *LocalSecondaryIndexDescription { + s.IndexName = &v + return s +} + +// SetIndexSizeBytes sets the IndexSizeBytes field's value. +func (s *LocalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *LocalSecondaryIndexDescription { + s.IndexSizeBytes = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *LocalSecondaryIndexDescription) SetItemCount(v int64) *LocalSecondaryIndexDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexDescription { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndexDescription) SetProjection(v *Projection) *LocalSecondaryIndexDescription { + s.Projection = v + return s +} + +// Represents the properties of a local secondary index for the table when the +// backup was created. +type LocalSecondaryIndexInfo struct { + _ struct{} `type:"structure"` + + // Represents the name of the local secondary index. + IndexName *string `min:"3" type:"string"` + + // The complete key schema for a local secondary index, which consists of one + // or more pairs of attribute names and key types: + // + // * HASH - partition key + // + // * RANGE - sort key + // + // The partition key of an item is also known as its hash attribute. The term + // "hash attribute" derives from DynamoDB' usage of an internal hash function + // to evenly distribute data items across partitions, based on their partition + // key values. + // + // The sort key of an item is also known as its range attribute. The term "range + // attribute" derives from the way DynamoDB stores items with the same partition + // key physically close together, in sorted order by the sort key value. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // Represents attributes that are copied (projected) from the table into the + // global secondary index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. + Projection *Projection `type:"structure"` +} + +// String returns the string representation +func (s LocalSecondaryIndexInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LocalSecondaryIndexInfo) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *LocalSecondaryIndexInfo) SetIndexName(v string) *LocalSecondaryIndexInfo { + s.IndexName = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *LocalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexInfo { + s.KeySchema = v + return s +} + +// SetProjection sets the Projection field's value. +func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIndexInfo { + s.Projection = v + return s +} + +// The description of the point in time settings applied to the table. +type PointInTimeRecoveryDescription struct { + _ struct{} `type:"structure"` + + // Specifies the earliest point in time you can restore your table to. It You + // can restore your table to any point in time during the last 35 days. + EarliestRestorableDateTime *time.Time `type:"timestamp"` + + // LatestRestorableDateTime is typically 5 minutes before the current time. + LatestRestorableDateTime *time.Time `type:"timestamp"` + + // The current state of point in time recovery: + // + // * ENABLING - Point in time recovery is being enabled. + // + // * ENABLED - Point in time recovery is enabled. + // + // * DISABLED - Point in time recovery is disabled. + PointInTimeRecoveryStatus *string `type:"string" enum:"PointInTimeRecoveryStatus"` +} + +// String returns the string representation +func (s PointInTimeRecoveryDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PointInTimeRecoveryDescription) GoString() string { + return s.String() +} + +// SetEarliestRestorableDateTime sets the EarliestRestorableDateTime field's value. +func (s *PointInTimeRecoveryDescription) SetEarliestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription { + s.EarliestRestorableDateTime = &v + return s +} + +// SetLatestRestorableDateTime sets the LatestRestorableDateTime field's value. +func (s *PointInTimeRecoveryDescription) SetLatestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription { + s.LatestRestorableDateTime = &v + return s +} + +// SetPointInTimeRecoveryStatus sets the PointInTimeRecoveryStatus field's value. +func (s *PointInTimeRecoveryDescription) SetPointInTimeRecoveryStatus(v string) *PointInTimeRecoveryDescription { + s.PointInTimeRecoveryStatus = &v + return s +} + +// Represents the settings used to enable point in time recovery. +type PointInTimeRecoverySpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether point in time recovery is enabled (true) or disabled (false) + // on the table. + // + // PointInTimeRecoveryEnabled is a required field + PointInTimeRecoveryEnabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s PointInTimeRecoverySpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PointInTimeRecoverySpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PointInTimeRecoverySpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PointInTimeRecoverySpecification"} + if s.PointInTimeRecoveryEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoveryEnabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPointInTimeRecoveryEnabled sets the PointInTimeRecoveryEnabled field's value. +func (s *PointInTimeRecoverySpecification) SetPointInTimeRecoveryEnabled(v bool) *PointInTimeRecoverySpecification { + s.PointInTimeRecoveryEnabled = &v + return s +} + +// Represents attributes that are copied (projected) from the table into an +// index. These are in addition to the primary key attributes and index key +// attributes, which are automatically projected. +type Projection struct { + _ struct{} `type:"structure"` + + // Represents the non-key attribute names which will be projected into the index. + // + // For local secondary indexes, the total count of NonKeyAttributes summed across + // all of the local secondary indexes, must not exceed 20. If you project the + // same attribute into two different indexes, this counts as two distinct attributes + // when determining the total. + NonKeyAttributes []*string `min:"1" type:"list"` + + // The set of attributes that are projected into the index: + // + // * KEYS_ONLY - Only the index and primary keys are projected into the index. + // + // * INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. + // + // * ALL - All of the table attributes are projected into the index. + ProjectionType *string `type:"string" enum:"ProjectionType"` +} + +// String returns the string representation +func (s Projection) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Projection) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Projection) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Projection"} + if s.NonKeyAttributes != nil && len(s.NonKeyAttributes) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NonKeyAttributes", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNonKeyAttributes sets the NonKeyAttributes field's value. +func (s *Projection) SetNonKeyAttributes(v []*string) *Projection { + s.NonKeyAttributes = v + return s +} + +// SetProjectionType sets the ProjectionType field's value. +func (s *Projection) SetProjectionType(v string) *Projection { + s.ProjectionType = &v + return s +} + +// Represents the provisioned throughput settings for a specified table or index. +// The settings can be modified using the UpdateTable operation. +// +// For current minimum and maximum provisioned throughput values, see Limits +// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) +// in the Amazon DynamoDB Developer Guide. +type ProvisionedThroughput struct { + _ struct{} `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // ReadCapacityUnits is a required field + ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + // + // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0. + // + // WriteCapacityUnits is a required field + WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"` +} + +// String returns the string representation +func (s ProvisionedThroughput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedThroughput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ProvisionedThroughput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughput"} + if s.ReadCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("ReadCapacityUnits")) + } + if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1)) + } + if s.WriteCapacityUnits == nil { + invalidParams.Add(request.NewErrParamRequired("WriteCapacityUnits")) + } + if s.WriteCapacityUnits != nil && *s.WriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("WriteCapacityUnits", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ProvisionedThroughput) SetReadCapacityUnits(v int64) *ProvisionedThroughput { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ProvisionedThroughput) SetWriteCapacityUnits(v int64) *ProvisionedThroughput { + s.WriteCapacityUnits = &v + return s +} + +// Represents the provisioned throughput settings for the table, consisting +// of read and write capacity units, along with data about increases and decreases. +type ProvisionedThroughputDescription struct { + _ struct{} `type:"structure"` + + // The date and time of the last provisioned throughput decrease for this table. + LastDecreaseDateTime *time.Time `type:"timestamp"` + + // The date and time of the last provisioned throughput increase for this table. + LastIncreaseDateTime *time.Time `type:"timestamp"` + + // The number of provisioned throughput decreases for this table during this + // UTC calendar day. For current maximums on provisioned throughput decreases, + // see Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + NumberOfDecreasesToday *int64 `min:"1" type:"long"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. Eventually consistent reads require + // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits + // per second provides 100 eventually consistent ReadCapacityUnits per second. + ReadCapacityUnits *int64 `type:"long"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + WriteCapacityUnits *int64 `type:"long"` +} + +// String returns the string representation +func (s ProvisionedThroughputDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedThroughputDescription) GoString() string { + return s.String() +} + +// SetLastDecreaseDateTime sets the LastDecreaseDateTime field's value. +func (s *ProvisionedThroughputDescription) SetLastDecreaseDateTime(v time.Time) *ProvisionedThroughputDescription { + s.LastDecreaseDateTime = &v + return s +} + +// SetLastIncreaseDateTime sets the LastIncreaseDateTime field's value. +func (s *ProvisionedThroughputDescription) SetLastIncreaseDateTime(v time.Time) *ProvisionedThroughputDescription { + s.LastIncreaseDateTime = &v + return s +} + +// SetNumberOfDecreasesToday sets the NumberOfDecreasesToday field's value. +func (s *ProvisionedThroughputDescription) SetNumberOfDecreasesToday(v int64) *ProvisionedThroughputDescription { + s.NumberOfDecreasesToday = &v + return s +} + +// SetReadCapacityUnits sets the ReadCapacityUnits field's value. +func (s *ProvisionedThroughputDescription) SetReadCapacityUnits(v int64) *ProvisionedThroughputDescription { + s.ReadCapacityUnits = &v + return s +} + +// SetWriteCapacityUnits sets the WriteCapacityUnits field's value. +func (s *ProvisionedThroughputDescription) SetWriteCapacityUnits(v int64) *ProvisionedThroughputDescription { + s.WriteCapacityUnits = &v + return s +} + +// Represents a request to perform a PutItem operation. +type Put struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to be written by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item that are part of an index + // key schema for the table, their types must match the index key schema. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Put condition fails. For ReturnValuesOnConditionCheckFailure, the valid values + // are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table in which to write the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s Put) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Put) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Put) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Put"} + if s.Item == nil { + invalidParams.Add(request.NewErrParamRequired("Item")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Put) SetConditionExpression(v string) *Put { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Put) SetExpressionAttributeNames(v map[string]*string) *Put { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Put) SetExpressionAttributeValues(v map[string]*AttributeValue) *Put { + s.ExpressionAttributeValues = v + return s +} + +// SetItem sets the Item field's value. +func (s *Put) SetItem(v map[string]*AttributeValue) *Put { + s.Item = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Put) SetReturnValuesOnConditionCheckFailure(v string) *Put { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Put) SetTableName(v string) *Put { + s.TableName = &v + return s +} + +// Represents the input of a PutItem operation. +type PutItemInput struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional PutItem operation + // to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information on condition expressions, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A map of attribute name/value pairs, one for each attribute. Only the primary + // key attributes are required; you can optionally provide other attribute name-value + // pairs for the item. + // + // You must provide all of the attributes for the primary key. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide both values for both the + // partition key and the sort key. + // + // If you specify any attributes that are part of an index key, then the data + // types for those attributes must match those of the schema in the table's + // attribute definition. + // + // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey) + // in the Amazon DynamoDB Developer Guide. + // + // Each element in the Item map is an AttributeValue object. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were updated with the PutItem request. For PutItem, the valid + // values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - If PutItem overwrote an attribute name-value pair, then the + // content of the old item is returned. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // PutItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table to contain the item. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutItemInput"} + if s.Item == nil { + invalidParams.Add(request.NewErrParamRequired("Item")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *PutItemInput) SetConditionExpression(v string) *PutItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *PutItemInput) SetConditionalOperator(v string) *PutItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *PutItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *PutItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *PutItemInput) SetExpressionAttributeNames(v map[string]*string) *PutItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *PutItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *PutItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetItem sets the Item field's value. +func (s *PutItemInput) SetItem(v map[string]*AttributeValue) *PutItemInput { + s.Item = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *PutItemInput) SetReturnConsumedCapacity(v string) *PutItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *PutItemInput) SetReturnItemCollectionMetrics(v string) *PutItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *PutItemInput) SetReturnValues(v string) *PutItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *PutItemInput) SetTableName(v string) *PutItemInput { + s.TableName = &v + return s +} + +// Represents the output of a PutItem operation. +type PutItemOutput struct { + _ struct{} `type:"structure"` + + // The attribute values as they appeared before the PutItem operation, but only + // if ReturnValues is specified as ALL_OLD in the request. Each element consists + // of an attribute name and an attribute value. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the PutItem operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Read/Write Capacity Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the PutItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s PutItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *PutItemOutput) SetAttributes(v map[string]*AttributeValue) *PutItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *PutItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *PutItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *PutItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *PutItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents a request to perform a PutItem operation on an item. +type PutRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of an item to be processed by PutItem. All of the table's primary key attributes + // must be specified, and their data types must match those of the table's key + // schema. If any attributes are present in the item which are part of an index + // key schema for the table, their types must match the index key schema. + // + // Item is a required field + Item map[string]*AttributeValue `type:"map" required:"true"` +} + +// String returns the string representation +func (s PutRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRequest) GoString() string { + return s.String() +} + +// SetItem sets the Item field's value. +func (s *PutRequest) SetItem(v map[string]*AttributeValue) *PutRequest { + s.Item = v + return s +} + +// Represents the input of a Query operation. +type QueryInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // Determines the read consistency model: If set to true, then the operation + // uses strongly consistent reads; otherwise, the operation uses eventually + // consistent reads. + // + // Strongly consistent reads are not supported on global secondary indexes. + // If you query a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number, or Binary. No + // set data types are allowed. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Specifying Conditions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Query operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression does not allow key attributes. You cannot define a filter + // expression based on a partition key or a sort key. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // in the Amazon DynamoDB Developer Guide. + FilterExpression *string `type:"string"` + + // The name of an index to query. This index can be any local secondary index + // or global secondary index on the table. Note that if you use the IndexName + // parameter, you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The condition that specifies the key values for items to be retrieved by + // the Query action. + // + // The condition must perform an equality test on a single partition key value. + // + // The condition can optionally perform one of several comparison tests on a + // single sort key value. This allows Query to retrieve one item with a given + // partition key value and sort key value, or several items that have the same + // partition key value but different sort key values. + // + // The partition key equality test is required, and must be specified in the + // following format: + // + // partitionKeyName = :partitionkeyval + // + // If you also want to provide a condition for the sort key, it must be combined + // using AND with the condition for the sort key. Following is an example, using + // the = comparison operator for the sort key: + // + // partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval + // + // Valid comparisons for the sort key condition are as follows: + // + // * sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval. + // + // * sortKeyName < :sortkeyval - true if the sort key value is less than + // :sortkeyval. + // + // * sortKeyName <= :sortkeyval - true if the sort key value is less than + // or equal to :sortkeyval. + // + // * sortKeyName > :sortkeyval - true if the sort key value is greater than + // :sortkeyval. + // + // * sortKeyName >= :sortkeyval - true if the sort key value is greater than + // or equal to :sortkeyval. + // + // * sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort + // key value is greater than or equal to :sortkeyval1, and less than or equal + // to :sortkeyval2. + // + // * begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value + // begins with a particular operand. (You cannot use this function with a + // sort key that is of type Number.) Note that the function name begins_with + // is case-sensitive. + // + // Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval + // and :sortval with actual values at runtime. + // + // You can optionally use the ExpressionAttributeNames parameter to replace + // the names of the partition key and sort key with placeholder tokens. This + // option might be necessary if an attribute name conflicts with a DynamoDB + // reserved word. For example, the following KeyConditionExpression parameter + // causes an error because Size is a reserved word: + // + // * Size = :myval + // + // To work around this, define a placeholder (such a #S) to represent the attribute + // name Size. KeyConditionExpression then is as follows: + // + // * #S = :myval + // + // For a list of reserved words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide. + // + // For more information on ExpressionAttributeNames and ExpressionAttributeValues, + // see Using Placeholders for Attribute Names and Values (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use KeyConditionExpression instead. For more + // information, see KeyConditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html) + // in the Amazon DynamoDB Developer Guide. + KeyConditions map[string]*Condition `type:"map"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the table. + // These attributes can include scalars, sets, or elements of a JSON document. + // The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see QueryFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html) + // in the Amazon DynamoDB Developer Guide. + QueryFilter map[string]*Condition `type:"map"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Specifies the order for index traversal: If true (default), the traversal + // is performed in ascending order; if false, the traversal is performed in + // descending order. + // + // Items with the same partition key value are stored in sorted order by sort + // key. If the sort key data type is Number, the results are stored in numeric + // order. For type String, the results are stored in order of UTF-8 bytes. For + // type Binary, DynamoDB treats each byte of the binary data as unsigned. + // + // If ScanIndexForward is true, DynamoDB returns the results in the order in + // which they are stored (by sort key value). This is the default behavior. + // If ScanIndexForward is false, DynamoDB reads the results in reverse order + // by sort key value, and then returns the results to the client. + ScanIndexForward *bool `type:"boolean"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index, DynamoDB fetches the entire item from the parent table. + // If the index is configured to project all item attributes, then all of + // the data can be obtained from the local secondary index, and no fetching + // is required. + // + // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is + // configured to project all attributes, this return value is equivalent + // to specifying ALL_ATTRIBUTES. + // + // * COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. + // This return value is equivalent to specifying AttributesToGet without + // specifying any value for Select. If you query or scan a local secondary + // index and request only attributes that are projected into that index, + // the operation will read only the index and not the table. If any of the + // requested attributes are not projected into the local secondary index, + // DynamoDB fetches each of these attributes from the parent table. This + // extra fetching incurs additional throughput cost and latency. If you query + // or scan a global secondary index, you can only request attributes that + // are projected into the index. Global secondary index queries cannot fetch + // attributes from the parent table. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults to + // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and AttributesToGet together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying AttributesToGet without any value + // for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueryInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.KeyConditions != nil { + for i, v := range s.KeyConditions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeyConditions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueryFilter != nil { + for i, v := range s.QueryFilter { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueryFilter", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *QueryInput) SetAttributesToGet(v []*string) *QueryInput { + s.AttributesToGet = v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *QueryInput) SetConditionalOperator(v string) *QueryInput { + s.ConditionalOperator = &v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *QueryInput) SetConsistentRead(v bool) *QueryInput { + s.ConsistentRead = &v + return s +} + +// SetExclusiveStartKey sets the ExclusiveStartKey field's value. +func (s *QueryInput) SetExclusiveStartKey(v map[string]*AttributeValue) *QueryInput { + s.ExclusiveStartKey = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *QueryInput) SetExpressionAttributeNames(v map[string]*string) *QueryInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *QueryInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *QueryInput { + s.ExpressionAttributeValues = v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *QueryInput) SetFilterExpression(v string) *QueryInput { + s.FilterExpression = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *QueryInput) SetIndexName(v string) *QueryInput { + s.IndexName = &v + return s +} + +// SetKeyConditionExpression sets the KeyConditionExpression field's value. +func (s *QueryInput) SetKeyConditionExpression(v string) *QueryInput { + s.KeyConditionExpression = &v + return s +} + +// SetKeyConditions sets the KeyConditions field's value. +func (s *QueryInput) SetKeyConditions(v map[string]*Condition) *QueryInput { + s.KeyConditions = v + return s +} + +// SetLimit sets the Limit field's value. +func (s *QueryInput) SetLimit(v int64) *QueryInput { + s.Limit = &v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *QueryInput) SetProjectionExpression(v string) *QueryInput { + s.ProjectionExpression = &v + return s +} + +// SetQueryFilter sets the QueryFilter field's value. +func (s *QueryInput) SetQueryFilter(v map[string]*Condition) *QueryInput { + s.QueryFilter = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *QueryInput) SetReturnConsumedCapacity(v string) *QueryInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetScanIndexForward sets the ScanIndexForward field's value. +func (s *QueryInput) SetScanIndexForward(v bool) *QueryInput { + s.ScanIndexForward = &v + return s +} + +// SetSelect sets the Select field's value. +func (s *QueryInput) SetSelect(v string) *QueryInput { + s.Select = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *QueryInput) SetTableName(v string) *QueryInput { + s.TableName = &v + return s +} + +// Represents the output of a Query operation. +type QueryOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the Query operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you used a QueryFilter in the request, then Count is the number of items + // returned after the filter was applied, and ScannedCount is the number of + // matching items before the filter was applied. + // + // If you did not use a filter in the request, then Count and ScannedCount are + // the same. + Count *int64 `type:"integer"` + + // An array of item attributes that match the query criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any QueryFilter is applied. A high + // ScannedCount value with few, or no, Count results indicates an inefficient + // Query operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s QueryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueryOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *QueryOutput) SetConsumedCapacity(v *ConsumedCapacity) *QueryOutput { + s.ConsumedCapacity = v + return s +} + +// SetCount sets the Count field's value. +func (s *QueryOutput) SetCount(v int64) *QueryOutput { + s.Count = &v + return s +} + +// SetItems sets the Items field's value. +func (s *QueryOutput) SetItems(v []map[string]*AttributeValue) *QueryOutput { + s.Items = v + return s +} + +// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. +func (s *QueryOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *QueryOutput { + s.LastEvaluatedKey = v + return s +} + +// SetScannedCount sets the ScannedCount field's value. +func (s *QueryOutput) SetScannedCount(v int64) *QueryOutput { + s.ScannedCount = &v + return s +} + +// Represents the properties of a replica. +type Replica struct { + _ struct{} `type:"structure"` + + // The region where the replica needs to be created. + RegionName *string `type:"string"` +} + +// String returns the string representation +func (s Replica) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Replica) GoString() string { + return s.String() +} + +// SetRegionName sets the RegionName field's value. +func (s *Replica) SetRegionName(v string) *Replica { + s.RegionName = &v + return s +} + +// Contains the details of the replica. +type ReplicaDescription struct { + _ struct{} `type:"structure"` + + // The name of the region. + RegionName *string `type:"string"` +} + +// String returns the string representation +func (s ReplicaDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaDescription) GoString() string { + return s.String() +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaDescription) SetRegionName(v string) *ReplicaDescription { + s.RegionName = &v + return s +} + +// Represents the properties of a global secondary index. +type ReplicaGlobalSecondaryIndexSettingsDescription struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // The current status of the global secondary index: + // + // * CREATING - The global secondary index is being created. + // + // * UPDATING - The global secondary index is being updated. + // + // * DELETING - The global secondary index is being deleted. + // + // * ACTIVE - The global secondary index is ready for use. + IndexStatus *string `type:"string" enum:"IndexStatus"` + + // Autoscaling settings for a global secondary index replica's read capacity + // units. + ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. + ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` + + // AutoScaling settings for a global secondary index replica's write capacity + // units. + ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s ReplicaGlobalSecondaryIndexSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaGlobalSecondaryIndexSettingsDescription) GoString() string { + return s.String() +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.IndexName = &v + return s +} + +// SetIndexStatus sets the IndexStatus field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.IndexStatus = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedReadCapacityUnits = &v + return s +} + +// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription { + s.ProvisionedWriteCapacityUnits = &v + return s +} + +// Represents the settings of a global secondary index for a global table that +// will be modified. +type ReplicaGlobalSecondaryIndexSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index. The name must be unique among all + // other indexes on this table. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Autoscaling settings for managing a global secondary index replica's read + // capacity units. + ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. + ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s ReplicaGlobalSecondaryIndexSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaGlobalSecondaryIndexSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedReadCapacityUnits != nil && *s.ProvisionedReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ProvisionedReadCapacityUnits", 1)) + } + if s.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + if err := s.ProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.IndexName = &v + return s +} + +// SetProvisionedReadCapacityAutoScalingSettingsUpdate sets the ProvisionedReadCapacityAutoScalingSettingsUpdate field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedReadCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value. +func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsUpdate { + s.ProvisionedReadCapacityUnits = &v + return s +} + +// Represents the properties of a replica. +type ReplicaSettingsDescription struct { + _ struct{} `type:"structure"` + + // The region name of the replica. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // The read/write capacity mode of the replica. + ReplicaBillingModeSummary *BillingModeSummary `type:"structure"` + + // Replica global secondary index settings for the global table. + ReplicaGlobalSecondaryIndexSettings []*ReplicaGlobalSecondaryIndexSettingsDescription `type:"list"` + + // Autoscaling settings for a global table replica's read capacity units. + ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedReadCapacityUnits *int64 `type:"long"` + + // AutoScaling settings for a global table replica's write capacity units. + ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. For more information, see Specifying Read and Write + // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedWriteCapacityUnits *int64 `type:"long"` + + // The current state of the region: + // + // * CREATING - The region is being created. + // + // * UPDATING - The region is being updated. + // + // * DELETING - The region is being deleted. + // + // * ACTIVE - The region is ready for use. + ReplicaStatus *string `type:"string" enum:"ReplicaStatus"` +} + +// String returns the string representation +func (s ReplicaSettingsDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaSettingsDescription) GoString() string { + return s.String() +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaSettingsDescription) SetRegionName(v string) *ReplicaSettingsDescription { + s.RegionName = &v + return s +} + +// SetReplicaBillingModeSummary sets the ReplicaBillingModeSummary field's value. +func (s *ReplicaSettingsDescription) SetReplicaBillingModeSummary(v *BillingModeSummary) *ReplicaSettingsDescription { + s.ReplicaBillingModeSummary = v + return s +} + +// SetReplicaGlobalSecondaryIndexSettings sets the ReplicaGlobalSecondaryIndexSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaGlobalSecondaryIndexSettings(v []*ReplicaGlobalSecondaryIndexSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaGlobalSecondaryIndexSettings = v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaProvisionedReadCapacityAutoScalingSettings = v + return s +} + +// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsDescription { + s.ReplicaProvisionedReadCapacityUnits = &v + return s +} + +// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription { + s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v + return s +} + +// SetReplicaProvisionedWriteCapacityUnits sets the ReplicaProvisionedWriteCapacityUnits field's value. +func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityUnits(v int64) *ReplicaSettingsDescription { + s.ReplicaProvisionedWriteCapacityUnits = &v + return s +} + +// SetReplicaStatus sets the ReplicaStatus field's value. +func (s *ReplicaSettingsDescription) SetReplicaStatus(v string) *ReplicaSettingsDescription { + s.ReplicaStatus = &v + return s +} + +// Represents the settings for a global table in a region that will be modified. +type ReplicaSettingsUpdate struct { + _ struct{} `type:"structure"` + + // The region of the replica to be added. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` + + // Represents the settings of a global secondary index for a global table that + // will be modified. + ReplicaGlobalSecondaryIndexSettingsUpdate []*ReplicaGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` + + // Autoscaling settings for managing a global table replica's read capacity + // units. + ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of strongly consistent reads consumed per second before + // DynamoDB returns a ThrottlingException. For more information, see Specifying + // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput) + // in the Amazon DynamoDB Developer Guide. + ReplicaProvisionedReadCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s ReplicaSettingsUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaSettingsUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaSettingsUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaSettingsUpdate"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil && len(s.ReplicaGlobalSecondaryIndexSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaGlobalSecondaryIndexSettingsUpdate", 1)) + } + if s.ReplicaProvisionedReadCapacityUnits != nil && *s.ReplicaProvisionedReadCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("ReplicaProvisionedReadCapacityUnits", 1)) + } + if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil { + for i, v := range s.ReplicaGlobalSecondaryIndexSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil { + if err := s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *ReplicaSettingsUpdate) SetRegionName(v string) *ReplicaSettingsUpdate { + s.RegionName = &v + return s +} + +// SetReplicaGlobalSecondaryIndexSettingsUpdate sets the ReplicaGlobalSecondaryIndexSettingsUpdate field's value. +func (s *ReplicaSettingsUpdate) SetReplicaGlobalSecondaryIndexSettingsUpdate(v []*ReplicaGlobalSecondaryIndexSettingsUpdate) *ReplicaSettingsUpdate { + s.ReplicaGlobalSecondaryIndexSettingsUpdate = v + return s +} + +// SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate sets the ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate field's value. +func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaSettingsUpdate { + s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value. +func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsUpdate { + s.ReplicaProvisionedReadCapacityUnits = &v + return s +} + +// Represents one of the following: +// +// * A new replica to be added to an existing global table. +// +// * New parameters for an existing replica. +// +// * An existing replica to be removed from an existing global table. +type ReplicaUpdate struct { + _ struct{} `type:"structure"` + + // The parameters required for creating a replica on an existing global table. + Create *CreateReplicaAction `type:"structure"` + + // The name of the existing replica to be removed. + Delete *DeleteReplicaAction `type:"structure"` +} + +// String returns the string representation +func (s ReplicaUpdate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaUpdate) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaUpdate) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaUpdate"} + if s.Create != nil { + if err := s.Create.Validate(); err != nil { + invalidParams.AddNested("Create", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCreate sets the Create field's value. +func (s *ReplicaUpdate) SetCreate(v *CreateReplicaAction) *ReplicaUpdate { + s.Create = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *ReplicaUpdate) SetDelete(v *DeleteReplicaAction) *ReplicaUpdate { + s.Delete = v + return s +} + +// Contains details for the restore. +type RestoreSummary struct { + _ struct{} `type:"structure"` + + // Point in time or source backup time. + // + // RestoreDateTime is a required field + RestoreDateTime *time.Time `type:"timestamp" required:"true"` + + // Indicates if a restore is in progress or not. + // + // RestoreInProgress is a required field + RestoreInProgress *bool `type:"boolean" required:"true"` + + // ARN of the backup from which the table was restored. + SourceBackupArn *string `min:"37" type:"string"` + + // ARN of the source table of the backup that is being restored. + SourceTableArn *string `type:"string"` +} + +// String returns the string representation +func (s RestoreSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreSummary) GoString() string { + return s.String() +} + +// SetRestoreDateTime sets the RestoreDateTime field's value. +func (s *RestoreSummary) SetRestoreDateTime(v time.Time) *RestoreSummary { + s.RestoreDateTime = &v + return s +} + +// SetRestoreInProgress sets the RestoreInProgress field's value. +func (s *RestoreSummary) SetRestoreInProgress(v bool) *RestoreSummary { + s.RestoreInProgress = &v + return s +} + +// SetSourceBackupArn sets the SourceBackupArn field's value. +func (s *RestoreSummary) SetSourceBackupArn(v string) *RestoreSummary { + s.SourceBackupArn = &v + return s +} + +// SetSourceTableArn sets the SourceTableArn field's value. +func (s *RestoreSummary) SetSourceTableArn(v string) *RestoreSummary { + s.SourceTableArn = &v + return s +} + +type RestoreTableFromBackupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` + + // The name of the new table to which the backup must be restored. + // + // TargetTableName is a required field + TargetTableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s RestoreTableFromBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreTableFromBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreTableFromBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreTableFromBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + if s.TargetTableName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTableName")) + } + if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *RestoreTableFromBackupInput) SetBackupArn(v string) *RestoreTableFromBackupInput { + s.BackupArn = &v + return s +} + +// SetTargetTableName sets the TargetTableName field's value. +func (s *RestoreTableFromBackupInput) SetTargetTableName(v string) *RestoreTableFromBackupInput { + s.TargetTableName = &v + return s +} + +type RestoreTableFromBackupOutput struct { + _ struct{} `type:"structure"` + + // The description of the table created from an existing backup. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s RestoreTableFromBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreTableFromBackupOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *RestoreTableFromBackupOutput) SetTableDescription(v *TableDescription) *RestoreTableFromBackupOutput { + s.TableDescription = v + return s +} + +type RestoreTableToPointInTimeInput struct { + _ struct{} `type:"structure"` + + // Time in the past to restore the table to. + RestoreDateTime *time.Time `type:"timestamp"` + + // Name of the source table that is being restored. + // + // SourceTableName is a required field + SourceTableName *string `min:"3" type:"string" required:"true"` + + // The name of the new table to which it must be restored to. + // + // TargetTableName is a required field + TargetTableName *string `min:"3" type:"string" required:"true"` + + // Restore the table to the latest possible time. LatestRestorableDateTime is + // typically 5 minutes before the current time. + UseLatestRestorableTime *bool `type:"boolean"` +} + +// String returns the string representation +func (s RestoreTableToPointInTimeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreTableToPointInTimeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreTableToPointInTimeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreTableToPointInTimeInput"} + if s.SourceTableName == nil { + invalidParams.Add(request.NewErrParamRequired("SourceTableName")) + } + if s.SourceTableName != nil && len(*s.SourceTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("SourceTableName", 3)) + } + if s.TargetTableName == nil { + invalidParams.Add(request.NewErrParamRequired("TargetTableName")) + } + if s.TargetTableName != nil && len(*s.TargetTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRestoreDateTime sets the RestoreDateTime field's value. +func (s *RestoreTableToPointInTimeInput) SetRestoreDateTime(v time.Time) *RestoreTableToPointInTimeInput { + s.RestoreDateTime = &v + return s +} + +// SetSourceTableName sets the SourceTableName field's value. +func (s *RestoreTableToPointInTimeInput) SetSourceTableName(v string) *RestoreTableToPointInTimeInput { + s.SourceTableName = &v + return s +} + +// SetTargetTableName sets the TargetTableName field's value. +func (s *RestoreTableToPointInTimeInput) SetTargetTableName(v string) *RestoreTableToPointInTimeInput { + s.TargetTableName = &v + return s +} + +// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value. +func (s *RestoreTableToPointInTimeInput) SetUseLatestRestorableTime(v bool) *RestoreTableToPointInTimeInput { + s.UseLatestRestorableTime = &v + return s +} + +type RestoreTableToPointInTimeOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s RestoreTableToPointInTimeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreTableToPointInTimeOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *RestoreTableToPointInTimeOutput) SetTableDescription(v *TableDescription) *RestoreTableToPointInTimeOutput { + s.TableDescription = v + return s +} + +// The description of the server-side encryption status on the specified table. +type SSEDescription struct { + _ struct{} `type:"structure"` + + // The KMS customer master key (CMK) ARN used for the KMS encryption. + KMSMasterKeyArn *string `type:"string"` + + // Server-side encryption type. The only supported value is: + // + // * KMS - Server-side encryption which uses AWS Key Management Service. + // Key is stored in your account and is managed by AWS KMS (KMS charges apply). + SSEType *string `type:"string" enum:"SSEType"` + + // Represents the current state of server-side encryption. The only supported + // values are: + // + // * ENABLED - Server-side encryption is enabled. + // + // * UPDATING - Server-side encryption is being updated. + Status *string `type:"string" enum:"SSEStatus"` +} + +// String returns the string representation +func (s SSEDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSEDescription) GoString() string { + return s.String() +} + +// SetKMSMasterKeyArn sets the KMSMasterKeyArn field's value. +func (s *SSEDescription) SetKMSMasterKeyArn(v string) *SSEDescription { + s.KMSMasterKeyArn = &v + return s +} + +// SetSSEType sets the SSEType field's value. +func (s *SSEDescription) SetSSEType(v string) *SSEDescription { + s.SSEType = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SSEDescription) SetStatus(v string) *SSEDescription { + s.Status = &v + return s +} + +// Represents the settings used to enable server-side encryption. +type SSESpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether server-side encryption is done using an AWS managed CMK + // or an AWS owned CMK. If enabled (true), server-side encryption type is set + // to KMS and an AWS managed CMK is used (AWS KMS charges apply). If disabled + // (false) or not specified, server-side encryption is set to AWS owned CMK. + Enabled *bool `type:"boolean"` + + // The KMS Customer Master Key (CMK) which should be used for the KMS encryption. + // To specify a CMK, use its key ID, Amazon Resource Name (ARN), alias name, + // or alias ARN. Note that you should only provide this parameter if the key + // is different from the default DynamoDB Customer Master Key alias/aws/dynamodb. + KMSMasterKeyId *string `type:"string"` + + // Server-side encryption type. The only supported value is: + // + // * KMS - Server-side encryption which uses AWS Key Management Service. + // Key is stored in your account and is managed by AWS KMS (KMS charges apply). + SSEType *string `type:"string" enum:"SSEType"` +} + +// String returns the string representation +func (s SSESpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSESpecification) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *SSESpecification) SetEnabled(v bool) *SSESpecification { + s.Enabled = &v + return s +} + +// SetKMSMasterKeyId sets the KMSMasterKeyId field's value. +func (s *SSESpecification) SetKMSMasterKeyId(v string) *SSESpecification { + s.KMSMasterKeyId = &v + return s +} + +// SetSSEType sets the SSEType field's value. +func (s *SSESpecification) SetSSEType(v string) *SSESpecification { + s.SSEType = &v + return s +} + +// Represents the input of a Scan operation. +type ScanInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use ProjectionExpression instead. For more information, + // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html) + // in the Amazon DynamoDB Developer Guide. + AttributesToGet []*string `min:"1" type:"list"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // A Boolean value that determines the read consistency model during the scan: + // + // * If ConsistentRead is false, then the data returned from Scan might not + // contain the results from other recently completed write operations (PutItem, + // UpdateItem, or DeleteItem). + // + // * If ConsistentRead is true, then all of the write operations that completed + // before the Scan began are guaranteed to be contained in the Scan response. + // + // The default setting for ConsistentRead is false. + // + // The ConsistentRead parameter is not supported on global secondary indexes. + // If you scan a global secondary index with ConsistentRead set to true, you + // will receive a ValidationException. + ConsistentRead *bool `type:"boolean"` + + // The primary key of the first item that this operation will evaluate. Use + // the value that was returned for LastEvaluatedKey in the previous operation. + // + // The data type for ExclusiveStartKey must be String, Number or Binary. No + // set data types are allowed. + // + // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify + // the same segment whose previous Scan returned the corresponding value of + // LastEvaluatedKey. + ExclusiveStartKey map[string]*AttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // A string that contains conditions that DynamoDB applies after the Scan operation, + // but before the data is returned to you. Items that do not satisfy the FilterExpression + // criteria are not returned. + // + // A FilterExpression is applied after the items have already been read; the + // process of filtering does not consume any additional read capacity units. + // + // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // in the Amazon DynamoDB Developer Guide. + FilterExpression *string `type:"string"` + + // The name of a secondary index to scan. This index can be any local secondary + // index or global secondary index. Note that if you use the IndexName parameter, + // you must also provide TableName. + IndexName *string `min:"3" type:"string"` + + // The maximum number of items to evaluate (not necessarily the number of matching + // items). If DynamoDB processes the number of items up to the limit while processing + // the results, it stops the operation and returns the matching values up to + // that point, and a key in LastEvaluatedKey to apply in a subsequent operation, + // so that you can pick up where you left off. Also, if the processed dataset + // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation + // and returns the matching values up to the limit, and a key in LastEvaluatedKey + // to apply in a subsequent operation to continue the operation. For more information, + // see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. + Limit *int64 `min:"1" type:"integer"` + + // A string that identifies one or more attributes to retrieve from the specified + // table or index. These attributes can include scalars, sets, or elements of + // a JSON document. The attributes in the expression must be separated by commas. + // + // If no attribute names are specified, then all attributes will be returned. + // If any of the requested attributes are not found, they will not appear in + // the result. + // + // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ProjectionExpression *string `type:"string"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // This is a legacy parameter. Use FilterExpression instead. For more information, + // see ScanFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html) + // in the Amazon DynamoDB Developer Guide. + ScanFilter map[string]*Condition `type:"map"` + + // For a parallel Scan request, Segment identifies an individual segment to + // be scanned by an application worker. + // + // Segment IDs are zero-based, so the first segment is always 0. For example, + // if you want to use four application threads to scan a table or an index, + // then the first thread specifies a Segment value of 0, the second thread specifies + // 1, and so on. + // + // The value of LastEvaluatedKey returned from a parallel Scan request must + // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan + // operation. + // + // The value for Segment must be greater than or equal to 0, and less than the + // value provided for TotalSegments. + // + // If you provide Segment, you must also provide TotalSegments. + Segment *int64 `type:"integer"` + + // The attributes to be returned in the result. You can retrieve all item attributes, + // specific item attributes, the count of matching items, or in the case of + // an index, some or all of the attributes projected into the index. + // + // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // table or index. If you query a local secondary index, then for each matching + // item in the index, DynamoDB fetches the entire item from the parent table. + // If the index is configured to project all item attributes, then all of + // the data can be obtained from the local secondary index, and no fetching + // is required. + // + // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is + // configured to project all attributes, this return value is equivalent + // to specifying ALL_ATTRIBUTES. + // + // * COUNT - Returns the number of matching items, rather than the matching + // items themselves. + // + // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. + // This return value is equivalent to specifying AttributesToGet without + // specifying any value for Select. If you query or scan a local secondary + // index and request only attributes that are projected into that index, + // the operation reads only the index and not the table. If any of the requested + // attributes are not projected into the local secondary index, DynamoDB + // fetches each of these attributes from the parent table. This extra fetching + // incurs additional throughput cost and latency. If you query or scan a + // global secondary index, you can only request attributes that are projected + // into the index. Global secondary index queries cannot fetch attributes + // from the parent table. + // + // If neither Select nor AttributesToGet are specified, DynamoDB defaults to + // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when + // accessing an index. You cannot use both Select and AttributesToGet together + // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES. + // (This usage is equivalent to specifying AttributesToGet without any value + // for Select.) + // + // If you use the ProjectionExpression parameter, then the value for Select + // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an + // error. + Select *string `type:"string" enum:"Select"` + + // The name of the table containing the requested items; or, if you provide + // IndexName, the name of the table to which that index belongs. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // For a parallel Scan request, TotalSegments represents the total number of + // segments into which the Scan operation will be divided. The value of TotalSegments + // corresponds to the number of application workers that will perform the parallel + // scan. For example, if you want to use four application threads to scan a + // table or an index, specify a TotalSegments value of 4. + // + // The value for TotalSegments must be greater than or equal to 1, and less + // than or equal to 1000000. If you specify a TotalSegments value of 1, the + // Scan operation will be sequential rather than parallel. + // + // If you specify TotalSegments, you must also specify Segment. + TotalSegments *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ScanInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScanInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScanInput"} + if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1)) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.Limit != nil && *s.Limit < 1 { + invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.TotalSegments != nil && *s.TotalSegments < 1 { + invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1)) + } + if s.ScanFilter != nil { + for i, v := range s.ScanFilter { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScanFilter", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributesToGet sets the AttributesToGet field's value. +func (s *ScanInput) SetAttributesToGet(v []*string) *ScanInput { + s.AttributesToGet = v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *ScanInput) SetConditionalOperator(v string) *ScanInput { + s.ConditionalOperator = &v + return s +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *ScanInput) SetConsistentRead(v bool) *ScanInput { + s.ConsistentRead = &v + return s +} + +// SetExclusiveStartKey sets the ExclusiveStartKey field's value. +func (s *ScanInput) SetExclusiveStartKey(v map[string]*AttributeValue) *ScanInput { + s.ExclusiveStartKey = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *ScanInput) SetExpressionAttributeNames(v map[string]*string) *ScanInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *ScanInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *ScanInput { + s.ExpressionAttributeValues = v + return s +} + +// SetFilterExpression sets the FilterExpression field's value. +func (s *ScanInput) SetFilterExpression(v string) *ScanInput { + s.FilterExpression = &v + return s +} + +// SetIndexName sets the IndexName field's value. +func (s *ScanInput) SetIndexName(v string) *ScanInput { + s.IndexName = &v + return s +} + +// SetLimit sets the Limit field's value. +func (s *ScanInput) SetLimit(v int64) *ScanInput { + s.Limit = &v + return s +} + +// SetProjectionExpression sets the ProjectionExpression field's value. +func (s *ScanInput) SetProjectionExpression(v string) *ScanInput { + s.ProjectionExpression = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *ScanInput) SetReturnConsumedCapacity(v string) *ScanInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetScanFilter sets the ScanFilter field's value. +func (s *ScanInput) SetScanFilter(v map[string]*Condition) *ScanInput { + s.ScanFilter = v + return s +} + +// SetSegment sets the Segment field's value. +func (s *ScanInput) SetSegment(v int64) *ScanInput { + s.Segment = &v + return s +} + +// SetSelect sets the Select field's value. +func (s *ScanInput) SetSelect(v string) *ScanInput { + s.Select = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *ScanInput) SetTableName(v string) *ScanInput { + s.TableName = &v + return s +} + +// SetTotalSegments sets the TotalSegments field's value. +func (s *ScanInput) SetTotalSegments(v int64) *ScanInput { + s.TotalSegments = &v + return s +} + +// Represents the output of a Scan operation. +type ScanOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the Scan operation. The data returned includes + // the total provisioned throughput consumed, along with statistics for the + // table and any indexes involved in the operation. ConsumedCapacity is only + // returned if the ReturnConsumedCapacity parameter was specified. For more + // information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // The number of items in the response. + // + // If you set ScanFilter in the request, then Count is the number of items returned + // after the filter was applied, and ScannedCount is the number of matching + // items before the filter was applied. + // + // If you did not use a filter in the request, then Count is the same as ScannedCount. + Count *int64 `type:"integer"` + + // An array of item attributes that match the scan criteria. Each element in + // this array consists of an attribute name and the value for that attribute. + Items []map[string]*AttributeValue `type:"list"` + + // The primary key of the item where the operation stopped, inclusive of the + // previous result set. Use this value to start a new operation, excluding this + // value in the new request. + // + // If LastEvaluatedKey is empty, then the "last page" of results has been processed + // and there is no more data to be retrieved. + // + // If LastEvaluatedKey is not empty, it does not necessarily mean that there + // is more data in the result set. The only way to know when you have reached + // the end of the result set is when LastEvaluatedKey is empty. + LastEvaluatedKey map[string]*AttributeValue `type:"map"` + + // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount + // value with few, or no, Count results indicates an inefficient Scan operation. + // For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count) + // in the Amazon DynamoDB Developer Guide. + // + // If you did not use a filter in the request, then ScannedCount is the same + // as Count. + ScannedCount *int64 `type:"integer"` +} + +// String returns the string representation +func (s ScanOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScanOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *ScanOutput) SetConsumedCapacity(v *ConsumedCapacity) *ScanOutput { + s.ConsumedCapacity = v + return s +} + +// SetCount sets the Count field's value. +func (s *ScanOutput) SetCount(v int64) *ScanOutput { + s.Count = &v + return s +} + +// SetItems sets the Items field's value. +func (s *ScanOutput) SetItems(v []map[string]*AttributeValue) *ScanOutput { + s.Items = v + return s +} + +// SetLastEvaluatedKey sets the LastEvaluatedKey field's value. +func (s *ScanOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ScanOutput { + s.LastEvaluatedKey = v + return s +} + +// SetScannedCount sets the ScannedCount field's value. +func (s *ScanOutput) SetScannedCount(v int64) *ScanOutput { + s.ScannedCount = &v + return s +} + +// Contains the details of the table when the backup was created. +type SourceTableDetails struct { + _ struct{} `type:"structure"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. This setting can be changed later. + // + // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend + // using PROVISIONED for predictable workloads. + // + // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST. + // We recommend using PAY_PER_REQUEST for unpredictable workloads. + BillingMode *string `type:"string" enum:"BillingMode"` + + // Number of items in the table. Please note this is an approximate value. + ItemCount *int64 `type:"long"` + + // Schema of the table. + // + // KeySchema is a required field + KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"` + + // Read IOPs and Write IOPS on the table when the backup was created. + // + // ProvisionedThroughput is a required field + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` + + // ARN of the table for which backup was created. + TableArn *string `type:"string"` + + // Time when the source table was created. + // + // TableCreationDateTime is a required field + TableCreationDateTime *time.Time `type:"timestamp" required:"true"` + + // Unique identifier for the table for which the backup was created. + // + // TableId is a required field + TableId *string `type:"string" required:"true"` + + // The name of the table for which the backup was created. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // Size of the table in bytes. Please note this is an approximate value. + TableSizeBytes *int64 `type:"long"` +} + +// String returns the string representation +func (s SourceTableDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceTableDetails) GoString() string { + return s.String() +} + +// SetBillingMode sets the BillingMode field's value. +func (s *SourceTableDetails) SetBillingMode(v string) *SourceTableDetails { + s.BillingMode = &v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *SourceTableDetails) SetItemCount(v int64) *SourceTableDetails { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *SourceTableDetails) SetKeySchema(v []*KeySchemaElement) *SourceTableDetails { + s.KeySchema = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *SourceTableDetails) SetProvisionedThroughput(v *ProvisionedThroughput) *SourceTableDetails { + s.ProvisionedThroughput = v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *SourceTableDetails) SetTableArn(v string) *SourceTableDetails { + s.TableArn = &v + return s +} + +// SetTableCreationDateTime sets the TableCreationDateTime field's value. +func (s *SourceTableDetails) SetTableCreationDateTime(v time.Time) *SourceTableDetails { + s.TableCreationDateTime = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *SourceTableDetails) SetTableId(v string) *SourceTableDetails { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *SourceTableDetails) SetTableName(v string) *SourceTableDetails { + s.TableName = &v + return s +} + +// SetTableSizeBytes sets the TableSizeBytes field's value. +func (s *SourceTableDetails) SetTableSizeBytes(v int64) *SourceTableDetails { + s.TableSizeBytes = &v + return s +} + +// Contains the details of the features enabled on the table when the backup +// was created. For example, LSIs, GSIs, streams, TTL. +type SourceTableFeatureDetails struct { + _ struct{} `type:"structure"` + + // Represents the GSI properties for the table when the backup was created. + // It includes the IndexName, KeySchema, Projection and ProvisionedThroughput + // for the GSIs on the table at the time of backup. + GlobalSecondaryIndexes []*GlobalSecondaryIndexInfo `type:"list"` + + // Represents the LSI properties for the table when the backup was created. + // It includes the IndexName, KeySchema and Projection for the LSIs on the table + // at the time of backup. + LocalSecondaryIndexes []*LocalSecondaryIndexInfo `type:"list"` + + // The description of the server-side encryption status on the table when the + // backup was created. + SSEDescription *SSEDescription `type:"structure"` + + // Stream settings on the table when the backup was created. + StreamDescription *StreamSpecification `type:"structure"` + + // Time to Live settings on the table when the backup was created. + TimeToLiveDescription *TimeToLiveDescription `type:"structure"` +} + +// String returns the string representation +func (s SourceTableFeatureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SourceTableFeatureDetails) GoString() string { + return s.String() +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *SourceTableFeatureDetails) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexInfo) *SourceTableFeatureDetails { + s.GlobalSecondaryIndexes = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *SourceTableFeatureDetails) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexInfo) *SourceTableFeatureDetails { + s.LocalSecondaryIndexes = v + return s +} + +// SetSSEDescription sets the SSEDescription field's value. +func (s *SourceTableFeatureDetails) SetSSEDescription(v *SSEDescription) *SourceTableFeatureDetails { + s.SSEDescription = v + return s +} + +// SetStreamDescription sets the StreamDescription field's value. +func (s *SourceTableFeatureDetails) SetStreamDescription(v *StreamSpecification) *SourceTableFeatureDetails { + s.StreamDescription = v + return s +} + +// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. +func (s *SourceTableFeatureDetails) SetTimeToLiveDescription(v *TimeToLiveDescription) *SourceTableFeatureDetails { + s.TimeToLiveDescription = v + return s +} + +// Represents the DynamoDB Streams configuration for a table in DynamoDB. +type StreamSpecification struct { + _ struct{} `type:"structure"` + + // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) + // on the table. + StreamEnabled *bool `type:"boolean"` + + // When an item in the table is modified, StreamViewType determines what information + // is written to the stream for this table. Valid values for StreamViewType + // are: + // + // * KEYS_ONLY - Only the key attributes of the modified item are written + // to the stream. + // + // * NEW_IMAGE - The entire item, as it appears after it was modified, is + // written to the stream. + // + // * OLD_IMAGE - The entire item, as it appeared before it was modified, + // is written to the stream. + // + // * NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. + StreamViewType *string `type:"string" enum:"StreamViewType"` +} + +// String returns the string representation +func (s StreamSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamSpecification) GoString() string { + return s.String() +} + +// SetStreamEnabled sets the StreamEnabled field's value. +func (s *StreamSpecification) SetStreamEnabled(v bool) *StreamSpecification { + s.StreamEnabled = &v + return s +} + +// SetStreamViewType sets the StreamViewType field's value. +func (s *StreamSpecification) SetStreamViewType(v string) *StreamSpecification { + s.StreamViewType = &v + return s +} + +// Represents the properties of a table. +type TableDescription struct { + _ struct{} `type:"structure"` + + // An array of AttributeDefinition objects. Each of these objects describes + // one attribute in the table and index key schema. + // + // Each AttributeDefinition object in this array is composed of: + // + // * AttributeName - The name of the attribute. + // + // * AttributeType - The data type for the attribute. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // Contains the details for the read/write capacity mode. + BillingModeSummary *BillingModeSummary `type:"structure"` + + // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) + // format. + CreationDateTime *time.Time `type:"timestamp"` + + // The global secondary indexes, if any, on the table. Each index is scoped + // to a given partition key value. Each element is composed of: + // + // * Backfilling - If true, then the index is currently in the backfilling + // phase. Backfilling occurs only when a new global secondary index is added + // to the table; it is the process by which DynamoDB populates the new index + // with data from the table. (This attribute does not appear for indexes + // that were created during a CreateTable operation.) + // + // * IndexName - The name of the global secondary index. + // + // * IndexSizeBytes - The total size of the global secondary index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes + // might not be reflected in this value. + // + // * IndexStatus - The current status of the global secondary index: CREATING + // - The index is being created. UPDATING - The index is being updated. DELETING + // - The index is being deleted. ACTIVE - The index is ready for use. + // + // * ItemCount - The number of items in the global secondary index. DynamoDB + // updates this value approximately every six hours. Recent changes might + // not be reflected in this value. + // + // * KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The + // key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes are in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 20. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + // + // * ProvisionedThroughput - The provisioned throughput settings for the + // global secondary index, consisting of read and write capacity units, along + // with data about increases and decreases. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"` + + // The number of items in the specified table. DynamoDB updates this value approximately + // every six hours. Recent changes might not be reflected in this value. + ItemCount *int64 `type:"long"` + + // The primary key structure for the table. Each KeySchemaElement consists of: + // + // * AttributeName - The name of the attribute. + // + // * KeyType - The role of the attribute: HASH - partition key RANGE - sort + // key The partition key of an item is also known as its hash attribute. + // The term "hash attribute" derives from DynamoDB's usage of an internal + // hash function to evenly distribute data items across partitions, based + // on their partition key values. The sort key of an item is also known as + // its range attribute. The term "range attribute" derives from the way DynamoDB + // stores items with the same partition key physically close together, in + // sorted order by the sort key value. + // + // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // in the Amazon DynamoDB Developer Guide. + KeySchema []*KeySchemaElement `min:"1" type:"list"` + + // The Amazon Resource Name (ARN) that uniquely identifies the latest stream + // for this table. + LatestStreamArn *string `min:"37" type:"string"` + + // A timestamp, in ISO 8601 format, for this stream. + // + // Note that LatestStreamLabel is not a unique identifier for the stream, because + // it is possible that a stream from another table might have the same timestamp. + // However, the combination of the following three elements is guaranteed to + // be unique: + // + // * the AWS customer ID. + // + // * the table name. + // + // * the StreamLabel. + LatestStreamLabel *string `type:"string"` + + // Represents one or more local secondary indexes on the table. Each index is + // scoped to a given partition key value. Tables with one or more local secondary + // indexes are subject to an item collection size limit, where the amount of + // data within a given item collection cannot exceed 10 GB. Each element is + // composed of: + // + // * IndexName - The name of the local secondary index. + // + // * KeySchema - Specifies the complete index key schema. The attribute names + // in the key schema must be between 1 and 255 characters (inclusive). The + // key schema must begin with the same partition key as the table. + // + // * Projection - Specifies attributes that are copied (projected) from the + // table into the index. These are in addition to the primary key attributes + // and index key attributes, which are automatically projected. Each attribute + // specification is composed of: ProjectionType - One of the following: KEYS_ONLY + // - Only the index and primary keys are projected into the index. INCLUDE + // - Only the specified table attributes are projected into the index. The + // list of projected attributes are in NonKeyAttributes. ALL - All of the + // table attributes are projected into the index. NonKeyAttributes - A list + // of one or more non-key attribute names that are projected into the secondary + // index. The total count of attributes provided in NonKeyAttributes, summed + // across all of the secondary indexes, must not exceed 20. If you project + // the same attribute into two different indexes, this counts as two distinct + // attributes when determining the total. + // + // * IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB + // updates this value approximately every six hours. Recent changes might + // not be reflected in this value. + // + // * ItemCount - Represents the number of items in the index. DynamoDB updates + // this value approximately every six hours. Recent changes might not be + // reflected in this value. + // + // If the table is in the DELETING state, no information about indexes will + // be returned. + LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"` + + // The provisioned throughput settings for the table, consisting of read and + // write capacity units, along with data about increases and decreases. + ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"` + + // Contains details for the restore. + RestoreSummary *RestoreSummary `type:"structure"` + + // The description of the server-side encryption status on the specified table. + SSEDescription *SSEDescription `type:"structure"` + + // The current DynamoDB Streams configuration for the table. + StreamSpecification *StreamSpecification `type:"structure"` + + // The Amazon Resource Name (ARN) that uniquely identifies the table. + TableArn *string `type:"string"` + + // Unique identifier for the table for which the backup was created. + TableId *string `type:"string"` + + // The name of the table. + TableName *string `min:"3" type:"string"` + + // The total size of the specified table, in bytes. DynamoDB updates this value + // approximately every six hours. Recent changes might not be reflected in this + // value. + TableSizeBytes *int64 `type:"long"` + + // The current state of the table: + // + // * CREATING - The table is being created. + // + // * UPDATING - The table is being updated. + // + // * DELETING - The table is being deleted. + // + // * ACTIVE - The table is ready for use. + TableStatus *string `type:"string" enum:"TableStatus"` +} + +// String returns the string representation +func (s TableDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TableDescription) GoString() string { + return s.String() +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *TableDescription) SetAttributeDefinitions(v []*AttributeDefinition) *TableDescription { + s.AttributeDefinitions = v + return s +} + +// SetBillingModeSummary sets the BillingModeSummary field's value. +func (s *TableDescription) SetBillingModeSummary(v *BillingModeSummary) *TableDescription { + s.BillingModeSummary = v + return s +} + +// SetCreationDateTime sets the CreationDateTime field's value. +func (s *TableDescription) SetCreationDateTime(v time.Time) *TableDescription { + s.CreationDateTime = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *TableDescription) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexDescription) *TableDescription { + s.GlobalSecondaryIndexes = v + return s +} + +// SetItemCount sets the ItemCount field's value. +func (s *TableDescription) SetItemCount(v int64) *TableDescription { + s.ItemCount = &v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *TableDescription) SetKeySchema(v []*KeySchemaElement) *TableDescription { + s.KeySchema = v + return s +} + +// SetLatestStreamArn sets the LatestStreamArn field's value. +func (s *TableDescription) SetLatestStreamArn(v string) *TableDescription { + s.LatestStreamArn = &v + return s +} + +// SetLatestStreamLabel sets the LatestStreamLabel field's value. +func (s *TableDescription) SetLatestStreamLabel(v string) *TableDescription { + s.LatestStreamLabel = &v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *TableDescription) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexDescription) *TableDescription { + s.LocalSecondaryIndexes = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *TableDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *TableDescription { + s.ProvisionedThroughput = v + return s +} + +// SetRestoreSummary sets the RestoreSummary field's value. +func (s *TableDescription) SetRestoreSummary(v *RestoreSummary) *TableDescription { + s.RestoreSummary = v + return s +} + +// SetSSEDescription sets the SSEDescription field's value. +func (s *TableDescription) SetSSEDescription(v *SSEDescription) *TableDescription { + s.SSEDescription = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *TableDescription) SetStreamSpecification(v *StreamSpecification) *TableDescription { + s.StreamSpecification = v + return s +} + +// SetTableArn sets the TableArn field's value. +func (s *TableDescription) SetTableArn(v string) *TableDescription { + s.TableArn = &v + return s +} + +// SetTableId sets the TableId field's value. +func (s *TableDescription) SetTableId(v string) *TableDescription { + s.TableId = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *TableDescription) SetTableName(v string) *TableDescription { + s.TableName = &v + return s +} + +// SetTableSizeBytes sets the TableSizeBytes field's value. +func (s *TableDescription) SetTableSizeBytes(v int64) *TableDescription { + s.TableSizeBytes = &v + return s +} + +// SetTableStatus sets the TableStatus field's value. +func (s *TableDescription) SetTableStatus(v string) *TableDescription { + s.TableStatus = &v + return s +} + +// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to +// a single DynamoDB table. +// +// AWS-assigned tag names and values are automatically assigned the aws: prefix, +// which the user cannot assign. AWS-assigned tag names do not count towards +// the tag limit of 50. User-assigned tag names have the prefix user: in the +// Cost Allocation Report. You cannot backdate the application of a tag. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can + // only have up to one tag with the same key. If you try to add an existing + // tag (same key), the existing tag value will be updated to the new value. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value of the tag. Tag values are case-sensitive and can be null. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagResourceInput struct { + _ struct{} `type:"structure"` + + // Identifies the Amazon DynamoDB resource to which tags should be added. This + // value is an Amazon Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // The tags to be assigned to the Amazon DynamoDB resource. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput { + s.Tags = v + return s +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// The description of the Time to Live (TTL) status on the specified table. +type TimeToLiveDescription struct { + _ struct{} `type:"structure"` + + // The name of the TTL attribute for items in the table. + AttributeName *string `min:"1" type:"string"` + + // The TTL status for the table. + TimeToLiveStatus *string `type:"string" enum:"TimeToLiveStatus"` +} + +// String returns the string representation +func (s TimeToLiveDescription) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeToLiveDescription) GoString() string { + return s.String() +} + +// SetAttributeName sets the AttributeName field's value. +func (s *TimeToLiveDescription) SetAttributeName(v string) *TimeToLiveDescription { + s.AttributeName = &v + return s +} + +// SetTimeToLiveStatus sets the TimeToLiveStatus field's value. +func (s *TimeToLiveDescription) SetTimeToLiveStatus(v string) *TimeToLiveDescription { + s.TimeToLiveStatus = &v + return s +} + +// Represents the settings used to enable or disable Time to Live (TTL) for +// the specified table. +type TimeToLiveSpecification struct { + _ struct{} `type:"structure"` + + // The name of the TTL attribute used to store the expiration time for items + // in the table. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + + // Indicates whether TTL is to be enabled (true) or disabled (false) on the + // table. + // + // Enabled is a required field + Enabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s TimeToLiveSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TimeToLiveSpecification) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TimeToLiveSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TimeToLiveSpecification"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.AttributeName != nil && len(*s.AttributeName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1)) + } + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeName sets the AttributeName field's value. +func (s *TimeToLiveSpecification) SetAttributeName(v string) *TimeToLiveSpecification { + s.AttributeName = &v + return s +} + +// SetEnabled sets the Enabled field's value. +func (s *TimeToLiveSpecification) SetEnabled(v bool) *TimeToLiveSpecification { + s.Enabled = &v + return s +} + +// Specifies an item to be retrieved as part of the transaction. +type TransactGetItem struct { + _ struct{} `type:"structure"` + + // Contains the primary key that identifies the item to get, together with the + // name of the table that contains the item, and optionally the specific attributes + // of the item to retrieve. + // + // Get is a required field + Get *Get `type:"structure" required:"true"` +} + +// String returns the string representation +func (s TransactGetItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransactGetItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactGetItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactGetItem"} + if s.Get == nil { + invalidParams.Add(request.NewErrParamRequired("Get")) + } + if s.Get != nil { + if err := s.Get.Validate(); err != nil { + invalidParams.AddNested("Get", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGet sets the Get field's value. +func (s *TransactGetItem) SetGet(v *Get) *TransactGetItem { + s.Get = v + return s +} + +type TransactGetItemsInput struct { + _ struct{} `type:"structure"` + + // A value of TOTAL causes consumed capacity information to be returned, and + // a value of NONE prevents that information from being returned. No other value + // is valid. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // An ordered array of up to 25 TransactGetItem objects, each of which contains + // a Get structure. + // + // TransactItems is a required field + TransactItems []*TransactGetItem `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TransactGetItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransactGetItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactGetItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactGetItemsInput"} + if s.TransactItems == nil { + invalidParams.Add(request.NewErrParamRequired("TransactItems")) + } + if s.TransactItems != nil && len(s.TransactItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1)) + } + if s.TransactItems != nil { + for i, v := range s.TransactItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *TransactGetItemsInput) SetReturnConsumedCapacity(v string) *TransactGetItemsInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetTransactItems sets the TransactItems field's value. +func (s *TransactGetItemsInput) SetTransactItems(v []*TransactGetItem) *TransactGetItemsInput { + s.TransactItems = v + return s +} + +type TransactGetItemsOutput struct { + _ struct{} `type:"structure"` + + // If the ReturnConsumedCapacity value was TOTAL, this is an array of ConsumedCapacity + // objects, one for each table addressed by TransactGetItem objects in the TransactItems + // parameter. These ConsumedCapacity objects report the read-capacity units + // consumed by the TransactGetItems call in that table. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // An ordered array of up to 25 ItemResponse objects, each of which corresponds + // to the TransactGetItem object in the same position in the TransactItems array. + // Each ItemResponse object contains a Map of the name-value pairs that are + // the projected attributes of the requested item. + // + // If a requested item could not be retrieved, the corresponding ItemResponse + // object is Null, or if the requested item has no projected attributes, the + // corresponding ItemResponse object is an empty Map. + Responses []*ItemResponse `min:"1" type:"list"` +} + +// String returns the string representation +func (s TransactGetItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransactGetItemsOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *TransactGetItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactGetItemsOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *TransactGetItemsOutput) SetResponses(v []*ItemResponse) *TransactGetItemsOutput { + s.Responses = v + return s +} + +// A list of requests that can perform update, put, delete, or check operations +// on multiple items in one or more tables atomically. +type TransactWriteItem struct { + _ struct{} `type:"structure"` + + // A request to perform a check item operation. + ConditionCheck *ConditionCheck `type:"structure"` + + // A request to perform a DeleteItem operation. + Delete *Delete `type:"structure"` + + // A request to perform a PutItem operation. + Put *Put `type:"structure"` + + // A request to perform an UpdateItem operation. + Update *Update `type:"structure"` +} + +// String returns the string representation +func (s TransactWriteItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransactWriteItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactWriteItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactWriteItem"} + if s.ConditionCheck != nil { + if err := s.ConditionCheck.Validate(); err != nil { + invalidParams.AddNested("ConditionCheck", err.(request.ErrInvalidParams)) + } + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + if s.Put != nil { + if err := s.Put.Validate(); err != nil { + invalidParams.AddNested("Put", err.(request.ErrInvalidParams)) + } + } + if s.Update != nil { + if err := s.Update.Validate(); err != nil { + invalidParams.AddNested("Update", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionCheck sets the ConditionCheck field's value. +func (s *TransactWriteItem) SetConditionCheck(v *ConditionCheck) *TransactWriteItem { + s.ConditionCheck = v + return s +} + +// SetDelete sets the Delete field's value. +func (s *TransactWriteItem) SetDelete(v *Delete) *TransactWriteItem { + s.Delete = v + return s +} + +// SetPut sets the Put field's value. +func (s *TransactWriteItem) SetPut(v *Put) *TransactWriteItem { + s.Put = v + return s +} + +// SetUpdate sets the Update field's value. +func (s *TransactWriteItem) SetUpdate(v *Update) *TransactWriteItem { + s.Update = v + return s +} + +type TransactWriteItemsInput struct { + _ struct{} `type:"structure"` + + // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent, + // meaning that multiple identical calls have the same effect as one single + // call. + // + // Although multiple identical calls using the same client request token produce + // the same result on the server (no side effects), the responses to the calls + // might not be the same. If the ReturnConsumedCapacity> parameter is set, then + // the initial TransactWriteItems call returns the amount of write capacity + // units consumed in making the changes. Subsequent TransactWriteItems calls + // with the same client token return the number of read capacity units consumed + // in reading the item. + // + // A client request token is valid for 10 minutes after the first request that + // uses it is completed. After 10 minutes, any request with the same client + // token is treated as a new request. Do not resubmit the same request with + // the same client token for more than 10 minutes, or the result might not be + // idempotent. + // + // If you submit a request with the same client token but a change in other + // parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch + // exception. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections (if any), that were + // modified during the operation and are returned in the response. If set to + // NONE (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // An ordered array of up to 25 TransactWriteItem objects, each of which contains + // a ConditionCheck, Put, Update, or Delete object. These can operate on items + // in different tables, but the tables must reside in the same AWS account and + // Region, and no two of them can operate on the same item. + // + // TransactItems is a required field + TransactItems []*TransactWriteItem `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s TransactWriteItemsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransactWriteItemsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TransactWriteItemsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TransactWriteItemsInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) + } + if s.TransactItems == nil { + invalidParams.Add(request.NewErrParamRequired("TransactItems")) + } + if s.TransactItems != nil && len(s.TransactItems) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1)) + } + if s.TransactItems != nil { + for i, v := range s.TransactItems { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *TransactWriteItemsInput) SetClientRequestToken(v string) *TransactWriteItemsInput { + s.ClientRequestToken = &v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *TransactWriteItemsInput) SetReturnConsumedCapacity(v string) *TransactWriteItemsInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *TransactWriteItemsInput) SetReturnItemCollectionMetrics(v string) *TransactWriteItemsInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetTransactItems sets the TransactItems field's value. +func (s *TransactWriteItemsInput) SetTransactItems(v []*TransactWriteItem) *TransactWriteItemsInput { + s.TransactItems = v + return s +} + +type TransactWriteItemsOutput struct { + _ struct{} `type:"structure"` + + // The capacity units consumed by the entire TransactWriteItems operation. The + // values of the list are ordered according to the ordering of the TransactItems + // request parameter. + ConsumedCapacity []*ConsumedCapacity `type:"list"` + + // A list of tables that were processed by TransactWriteItems and, for each + // table, information about any item collections that were affected by individual + // UpdateItem, PutItem, or DeleteItem operations. + ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"` +} + +// String returns the string representation +func (s TransactWriteItemsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TransactWriteItemsOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *TransactWriteItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactWriteItemsOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *TransactWriteItemsOutput { + s.ItemCollectionMetrics = v + return s +} + +type UntagResourceInput struct { + _ struct{} `type:"structure"` + + // The DynamoDB resource that the tags will be removed from. This value is an + // Amazon Resource Name (ARN). + // + // ResourceArn is a required field + ResourceArn *string `min:"1" type:"string" required:"true"` + + // A list of tag keys. Existing tags of the resource whose keys are members + // of this list will be removed from the DynamoDB resource. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.ResourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceArn")) + } + if s.ResourceArn != nil && len(*s.ResourceArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetResourceArn sets the ResourceArn field's value. +func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput { + s.ResourceArn = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput { + s.TagKeys = v + return s +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + +// Represents a request to perform an UpdateItem operation. +type Update struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional update to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid + // values are: NONE, ALL_OLD, UPDATED_OLD, ALL_NEW, UPDATED_NEW. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table for the UpdateItem request. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new value(s) for them. + // + // UpdateExpression is a required field + UpdateExpression *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Update) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Update) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Update) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Update"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.UpdateExpression == nil { + invalidParams.Add(request.NewErrParamRequired("UpdateExpression")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Update) SetConditionExpression(v string) *Update { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Update) SetExpressionAttributeNames(v map[string]*string) *Update { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Update) SetExpressionAttributeValues(v map[string]*AttributeValue) *Update { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *Update) SetKey(v map[string]*AttributeValue) *Update { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Update) SetReturnValuesOnConditionCheckFailure(v string) *Update { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Update) SetTableName(v string) *Update { + s.TableName = &v + return s +} + +// SetUpdateExpression sets the UpdateExpression field's value. +func (s *Update) SetUpdateExpression(v string) *Update { + s.UpdateExpression = &v + return s +} + +type UpdateContinuousBackupsInput struct { + _ struct{} `type:"structure"` + + // Represents the settings used to enable point in time recovery. + // + // PointInTimeRecoverySpecification is a required field + PointInTimeRecoverySpecification *PointInTimeRecoverySpecification `type:"structure" required:"true"` + + // The name of the table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateContinuousBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContinuousBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateContinuousBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateContinuousBackupsInput"} + if s.PointInTimeRecoverySpecification == nil { + invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoverySpecification")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.PointInTimeRecoverySpecification != nil { + if err := s.PointInTimeRecoverySpecification.Validate(); err != nil { + invalidParams.AddNested("PointInTimeRecoverySpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPointInTimeRecoverySpecification sets the PointInTimeRecoverySpecification field's value. +func (s *UpdateContinuousBackupsInput) SetPointInTimeRecoverySpecification(v *PointInTimeRecoverySpecification) *UpdateContinuousBackupsInput { + s.PointInTimeRecoverySpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateContinuousBackupsInput) SetTableName(v string) *UpdateContinuousBackupsInput { + s.TableName = &v + return s +} + +type UpdateContinuousBackupsOutput struct { + _ struct{} `type:"structure"` + + // Represents the continuous backups and point in time recovery settings on + // the table. + ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateContinuousBackupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateContinuousBackupsOutput) GoString() string { + return s.String() +} + +// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. +func (s *UpdateContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *UpdateContinuousBackupsOutput { + s.ContinuousBackupsDescription = v + return s +} + +// Represents the new provisioned throughput settings to be applied to a global +// secondary index. +type UpdateGlobalSecondaryIndexAction struct { + _ struct{} `type:"structure"` + + // The name of the global secondary index to be updated. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` + + // Represents the provisioned throughput settings for the specified global secondary + // index. + // + // For current minimum and maximum provisioned throughput values, see Limits + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html) + // in the Amazon DynamoDB Developer Guide. + // + // ProvisionedThroughput is a required field + ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateGlobalSecondaryIndexAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGlobalSecondaryIndexAction) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.ProvisionedThroughput == nil { + invalidParams.Add(request.NewErrParamRequired("ProvisionedThroughput")) + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *UpdateGlobalSecondaryIndexAction) SetIndexName(v string) *UpdateGlobalSecondaryIndexAction { + s.IndexName = &v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *UpdateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateGlobalSecondaryIndexAction { + s.ProvisionedThroughput = v + return s +} + +type UpdateGlobalTableInput struct { + _ struct{} `type:"structure"` + + // The global table name. + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // A list of Regions that should be added or removed from the global table. + // + // ReplicaUpdates is a required field + ReplicaUpdates []*ReplicaUpdate `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateGlobalTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGlobalTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.ReplicaUpdates == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicaUpdates")) + } + if s.ReplicaUpdates != nil { + for i, v := range s.ReplicaUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableInput) SetGlobalTableName(v string) *UpdateGlobalTableInput { + s.GlobalTableName = &v + return s +} + +// SetReplicaUpdates sets the ReplicaUpdates field's value. +func (s *UpdateGlobalTableInput) SetReplicaUpdates(v []*ReplicaUpdate) *UpdateGlobalTableInput { + s.ReplicaUpdates = v + return s +} + +type UpdateGlobalTableOutput struct { + _ struct{} `type:"structure"` + + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateGlobalTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGlobalTableOutput) GoString() string { + return s.String() +} + +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *UpdateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *UpdateGlobalTableOutput { + s.GlobalTableDescription = v + return s +} + +type UpdateGlobalTableSettingsInput struct { + _ struct{} `type:"structure"` + + // The billing mode of the global table. If GlobalTableBillingMode is not specified, + // the global table defaults to PROVISIONED capacity billing mode. + GlobalTableBillingMode *string `type:"string" enum:"BillingMode"` + + // Represents the settings of a global secondary index for a global table that + // will be modified. + GlobalTableGlobalSecondaryIndexSettingsUpdate []*GlobalTableGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"` + + // The name of the global table + // + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` + + // Auto scaling settings for managing provisioned write capacity for the global + // table. + GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"` + + // The maximum number of writes consumed per second before DynamoDB returns + // a ThrottlingException. + GlobalTableProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"` + + // Represents the settings for a global table in a Region that will be modified. + ReplicaSettingsUpdate []*ReplicaSettingsUpdate `min:"1" type:"list"` +} + +// String returns the string representation +func (s UpdateGlobalTableSettingsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGlobalTableSettingsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGlobalTableSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableSettingsInput"} + if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil && len(s.GlobalTableGlobalSecondaryIndexSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableGlobalSecondaryIndexSettingsUpdate", 1)) + } + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) + } + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + } + if s.GlobalTableProvisionedWriteCapacityUnits != nil && *s.GlobalTableProvisionedWriteCapacityUnits < 1 { + invalidParams.Add(request.NewErrParamMinValue("GlobalTableProvisionedWriteCapacityUnits", 1)) + } + if s.ReplicaSettingsUpdate != nil && len(s.ReplicaSettingsUpdate) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ReplicaSettingsUpdate", 1)) + } + if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil { + for i, v := range s.GlobalTableGlobalSecondaryIndexSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalTableGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil { + if err := s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil { + invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams)) + } + } + if s.ReplicaSettingsUpdate != nil { + for i, v := range s.ReplicaSettingsUpdate { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaSettingsUpdate", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalTableBillingMode sets the GlobalTableBillingMode field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableBillingMode(v string) *UpdateGlobalTableSettingsInput { + s.GlobalTableBillingMode = &v + return s +} + +// SetGlobalTableGlobalSecondaryIndexSettingsUpdate sets the GlobalTableGlobalSecondaryIndexSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableGlobalSecondaryIndexSettingsUpdate(v []*GlobalTableGlobalSecondaryIndexSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.GlobalTableGlobalSecondaryIndexSettingsUpdate = v + return s +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsInput { + s.GlobalTableName = &v + return s +} + +// SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate sets the GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate = v + return s +} + +// SetGlobalTableProvisionedWriteCapacityUnits sets the GlobalTableProvisionedWriteCapacityUnits field's value. +func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityUnits(v int64) *UpdateGlobalTableSettingsInput { + s.GlobalTableProvisionedWriteCapacityUnits = &v + return s +} + +// SetReplicaSettingsUpdate sets the ReplicaSettingsUpdate field's value. +func (s *UpdateGlobalTableSettingsInput) SetReplicaSettingsUpdate(v []*ReplicaSettingsUpdate) *UpdateGlobalTableSettingsInput { + s.ReplicaSettingsUpdate = v + return s +} + +type UpdateGlobalTableSettingsOutput struct { + _ struct{} `type:"structure"` + + // The name of the global table. + GlobalTableName *string `min:"3" type:"string"` + + // The Region-specific settings for the global table. + ReplicaSettings []*ReplicaSettingsDescription `type:"list"` +} + +// String returns the string representation +func (s UpdateGlobalTableSettingsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGlobalTableSettingsOutput) GoString() string { + return s.String() +} + +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *UpdateGlobalTableSettingsOutput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsOutput { + s.GlobalTableName = &v + return s +} + +// SetReplicaSettings sets the ReplicaSettings field's value. +func (s *UpdateGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *UpdateGlobalTableSettingsOutput { + s.ReplicaSettings = v + return s +} + +// Represents the input of an UpdateItem operation. +type UpdateItemInput struct { + _ struct{} `type:"structure"` + + // This is a legacy parameter. Use UpdateExpression instead. For more information, + // see AttributeUpdates (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html) + // in the Amazon DynamoDB Developer Guide. + AttributeUpdates map[string]*AttributeValueUpdate `type:"map"` + + // A condition that must be satisfied in order for a conditional update to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see Specifying Conditions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ConditionExpression *string `type:"string"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information about expression attribute names, see Specifying Item + // Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be updated. Each element consists of an attribute + // name and a value for that attribute. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` + + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appear before + // or after they are updated. For UpdateItem, the valid values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - Returns all of the attributes of the item, as they appeared + // before the UpdateItem operation. + // + // * UPDATED_OLD - Returns only the updated attributes, as they appeared + // before the UpdateItem operation. + // + // * ALL_NEW - Returns all of the attributes of the item, as they appear + // after the UpdateItem operation. + // + // * UPDATED_NEW - Returns only the updated attributes, as they appear after + // the UpdateItem operation. + // + // There is no additional cost associated with requesting a return value aside + // from the small network and processing overhead of receiving a larger response. + // No read capacity units are consumed. + // + // The values returned are strongly consistent. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table containing the item to update. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // An expression that defines one or more attributes to be updated, the action + // to be performed on them, and new values for them. + // + // The following action values are available for UpdateExpression. + // + // * SET - Adds one or more attributes and values to an item. If any of these + // attributes already exist, they are replaced by the new values. You can + // also use SET to add or subtract from an attribute that is of type Number. + // For example: SET myNum = myNum + :val SET supports the following functions: + // if_not_exists (path, operand) - if the item does not contain an attribute + // at the specified path, then if_not_exists evaluates to operand; otherwise, + // it evaluates to path. You can use this function to avoid overwriting an + // attribute that may already be present in the item. list_append (operand, + // operand) - evaluates to a list with a new element added to it. You can + // append the new element to the start or the end of the list by reversing + // the order of the operands. These function names are case-sensitive. + // + // * REMOVE - Removes one or more attributes from an item. + // + // * ADD - Adds the specified value to the item, if the attribute does not + // already exist. If the attribute does exist, then the behavior of ADD depends + // on the data type of the attribute: If the existing attribute is a number, + // and if Value is also a number, then Value is mathematically added to the + // existing attribute. If Value is a negative number, then it is subtracted + // from the existing attribute. If you use ADD to increment or decrement + // a number value for an item that doesn't exist before the update, DynamoDB + // uses 0 as the initial value. Similarly, if you use ADD for an existing + // item to increment or decrement an attribute value that doesn't exist before + // the update, DynamoDB uses 0 as the initial value. For example, suppose + // that the item you want to update doesn't have an attribute named itemcount, + // but you decide to ADD the number 3 to this attribute anyway. DynamoDB + // will create the itemcount attribute, set its initial value to 0, and finally + // add 3 to it. The result will be a new itemcount attribute in the item, + // with a value of 3. If the existing data type is a set and if Value is + // also a set, then Value is added to the existing set. For example, if the + // attribute value is the set [1,2], and the ADD action specified [3], then + // the final attribute value is [1,2,3]. An error occurs if an ADD action + // is specified for a set attribute and the attribute type specified does + // not match the existing set type. Both sets must have the same primitive + // data type. For example, if the existing data type is a set of strings, + // the Value must also be a set of strings. The ADD action only supports + // Number and set data types. In addition, ADD can only be used on top-level + // attributes, not nested attributes. + // + // * DELETE - Deletes an element from a set. If a set of values is specified, + // then those values are subtracted from the old set. For example, if the + // attribute value was the set [a,b,c] and the DELETE action specifies [a,c], + // then the final attribute value is [b]. Specifying an empty set is an error. + // The DELETE action only supports set data types. In addition, DELETE can + // only be used on top-level attributes, not nested attributes. + // + // You can have many actions in a single expression, such as the following: + // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5 + // + // For more information on update expressions, see Modifying Items and Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) + // in the Amazon DynamoDB Developer Guide. + UpdateExpression *string `type:"string"` +} + +// String returns the string representation +func (s UpdateItemInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateItemInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateItemInput"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeUpdates sets the AttributeUpdates field's value. +func (s *UpdateItemInput) SetAttributeUpdates(v map[string]*AttributeValueUpdate) *UpdateItemInput { + s.AttributeUpdates = v + return s +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *UpdateItemInput) SetConditionExpression(v string) *UpdateItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *UpdateItemInput) SetConditionalOperator(v string) *UpdateItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *UpdateItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *UpdateItemInput { + s.Expected = v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *UpdateItemInput) SetExpressionAttributeNames(v map[string]*string) *UpdateItemInput { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *UpdateItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *UpdateItemInput { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *UpdateItemInput) SetKey(v map[string]*AttributeValue) *UpdateItemInput { + s.Key = v + return s +} + +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *UpdateItemInput) SetReturnConsumedCapacity(v string) *UpdateItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *UpdateItemInput) SetReturnItemCollectionMetrics(v string) *UpdateItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *UpdateItemInput) SetReturnValues(v string) *UpdateItemInput { + s.ReturnValues = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateItemInput) SetTableName(v string) *UpdateItemInput { + s.TableName = &v + return s +} + +// SetUpdateExpression sets the UpdateExpression field's value. +func (s *UpdateItemInput) SetUpdateExpression(v string) *UpdateItemInput { + s.UpdateExpression = &v + return s +} + +// Represents the output of an UpdateItem operation. +type UpdateItemOutput struct { + _ struct{} `type:"structure"` + + // A map of attribute values as they appear before or after the UpdateItem operation, + // as determined by the ReturnValues parameter. + // + // The Attributes map is only present if ReturnValues was specified as something + // other than NONE in the request. Each element represents one attribute. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the UpdateItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics + // for the table and any indexes involved in the operation. ConsumedCapacity + // is only returned if the ReturnConsumedCapacity parameter was specified. For + // more information, see Provisioned Throughput (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the UpdateItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. + // + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` +} + +// String returns the string representation +func (s UpdateItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateItemOutput) GoString() string { + return s.String() +} + +// SetAttributes sets the Attributes field's value. +func (s *UpdateItemOutput) SetAttributes(v map[string]*AttributeValue) *UpdateItemOutput { + s.Attributes = v + return s +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *UpdateItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *UpdateItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *UpdateItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *UpdateItemOutput { + s.ItemCollectionMetrics = v + return s +} + +// Represents the input of an UpdateTable operation. +type UpdateTableInput struct { + _ struct{} `type:"structure"` + + // An array of attributes that describe the key schema for the table and indexes. + // If you are adding a new global secondary index to the table, AttributeDefinitions + // must include the key element(s) of the new index. + AttributeDefinitions []*AttributeDefinition `type:"list"` + + // Controls how you are charged for read and write throughput and how you manage + // capacity. When switching from pay-per-request to provisioned capacity, initial + // provisioned capacity values must be set. The initial provisioned capacity + // values are estimated based on the consumed read and write capacity of your + // table and global secondary indexes over the past 30 minutes. + // + // * PROVISIONED - Sets the billing mode to PROVISIONED. We recommend using + // PROVISIONED for predictable workloads. + // + // * PAY_PER_REQUEST - Sets the billing mode to PAY_PER_REQUEST. We recommend + // using PAY_PER_REQUEST for unpredictable workloads. + BillingMode *string `type:"string" enum:"BillingMode"` + + // An array of one or more global secondary indexes for the table. For each + // index in the array, you can request one action: + // + // * Create - add a new global secondary index to the table. + // + // * Update - modify the provisioned throughput settings of an existing global + // secondary index. + // + // * Delete - remove a global secondary index from the table. + // + // For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) + // in the Amazon DynamoDB Developer Guide. + GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` + + // The new provisioned throughput settings for the specified table or index. + ProvisionedThroughput *ProvisionedThroughput `type:"structure"` + + // The new server-side encryption settings for the specified table. + SSESpecification *SSESpecification `type:"structure"` + + // Represents the DynamoDB Streams configuration for the table. + // + // You receive a ResourceInUseException if you try to enable a stream on a table + // that already has a stream, or if you try to disable a stream on a table that + // doesn't have a stream. + StreamSpecification *StreamSpecification `type:"structure"` + + // The name of the table to be updated. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateTableInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTableInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.AttributeDefinitions != nil { + for i, v := range s.AttributeDefinitions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.GlobalSecondaryIndexUpdates != nil { + for i, v := range s.GlobalSecondaryIndexUpdates { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams)) + } + } + } + if s.ProvisionedThroughput != nil { + if err := s.ProvisionedThroughput.Validate(); err != nil { + invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *UpdateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *UpdateTableInput { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *UpdateTableInput) SetBillingMode(v string) *UpdateTableInput { + s.BillingMode = &v + return s +} + +// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value. +func (s *UpdateTableInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexUpdate) *UpdateTableInput { + s.GlobalSecondaryIndexUpdates = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *UpdateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateTableInput { + s.ProvisionedThroughput = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *UpdateTableInput) SetSSESpecification(v *SSESpecification) *UpdateTableInput { + s.SSESpecification = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *UpdateTableInput) SetStreamSpecification(v *StreamSpecification) *UpdateTableInput { + s.StreamSpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *UpdateTableInput) SetTableName(v string) *UpdateTableInput { + s.TableName = &v + return s +} + +// Represents the output of an UpdateTable operation. +type UpdateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s UpdateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *UpdateTableOutput) SetTableDescription(v *TableDescription) *UpdateTableOutput { + s.TableDescription = v + return s +} + +// Represents the input of an UpdateTimeToLive operation. +type UpdateTimeToLiveInput struct { + _ struct{} `type:"structure"` + + // The name of the table to be configured. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` + + // Represents the settings used to enable or disable Time to Live for the specified + // table. + // + // TimeToLiveSpecification is a required field + TimeToLiveSpecification *TimeToLiveSpecification `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UpdateTimeToLiveInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTimeToLiveInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateTimeToLiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateTimeToLiveInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + if s.TimeToLiveSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("TimeToLiveSpecification")) + } + if s.TimeToLiveSpecification != nil { + if err := s.TimeToLiveSpecification.Validate(); err != nil { + invalidParams.AddNested("TimeToLiveSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *UpdateTimeToLiveInput) SetTableName(v string) *UpdateTimeToLiveInput { + s.TableName = &v + return s +} + +// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value. +func (s *UpdateTimeToLiveInput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveInput { + s.TimeToLiveSpecification = v + return s +} + +type UpdateTimeToLiveOutput struct { + _ struct{} `type:"structure"` + + // Represents the output of an UpdateTimeToLive operation. + TimeToLiveSpecification *TimeToLiveSpecification `type:"structure"` +} + +// String returns the string representation +func (s UpdateTimeToLiveOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateTimeToLiveOutput) GoString() string { + return s.String() +} + +// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value. +func (s *UpdateTimeToLiveOutput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveOutput { + s.TimeToLiveSpecification = v + return s +} + +// Represents an operation to perform - either DeleteItem or PutItem. You can +// only request one of these operations, not both, in a single WriteRequest. +// If you do need to perform both of these operations, you will need to provide +// two separate WriteRequest objects. +type WriteRequest struct { + _ struct{} `type:"structure"` + + // A request to perform a DeleteItem operation. + DeleteRequest *DeleteRequest `type:"structure"` + + // A request to perform a PutItem operation. + PutRequest *PutRequest `type:"structure"` +} + +// String returns the string representation +func (s WriteRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WriteRequest) GoString() string { + return s.String() +} + +// SetDeleteRequest sets the DeleteRequest field's value. +func (s *WriteRequest) SetDeleteRequest(v *DeleteRequest) *WriteRequest { + s.DeleteRequest = v + return s +} + +// SetPutRequest sets the PutRequest field's value. +func (s *WriteRequest) SetPutRequest(v *PutRequest) *WriteRequest { + s.PutRequest = v + return s +} + +const ( + // AttributeActionAdd is a AttributeAction enum value + AttributeActionAdd = "ADD" + + // AttributeActionPut is a AttributeAction enum value + AttributeActionPut = "PUT" + + // AttributeActionDelete is a AttributeAction enum value + AttributeActionDelete = "DELETE" +) + +const ( + // BackupStatusCreating is a BackupStatus enum value + BackupStatusCreating = "CREATING" + + // BackupStatusDeleted is a BackupStatus enum value + BackupStatusDeleted = "DELETED" + + // BackupStatusAvailable is a BackupStatus enum value + BackupStatusAvailable = "AVAILABLE" +) + +const ( + // BackupTypeUser is a BackupType enum value + BackupTypeUser = "USER" + + // BackupTypeSystem is a BackupType enum value + BackupTypeSystem = "SYSTEM" + + // BackupTypeAwsBackup is a BackupType enum value + BackupTypeAwsBackup = "AWS_BACKUP" +) + +const ( + // BackupTypeFilterUser is a BackupTypeFilter enum value + BackupTypeFilterUser = "USER" + + // BackupTypeFilterSystem is a BackupTypeFilter enum value + BackupTypeFilterSystem = "SYSTEM" + + // BackupTypeFilterAwsBackup is a BackupTypeFilter enum value + BackupTypeFilterAwsBackup = "AWS_BACKUP" + + // BackupTypeFilterAll is a BackupTypeFilter enum value + BackupTypeFilterAll = "ALL" +) + +const ( + // BillingModeProvisioned is a BillingMode enum value + BillingModeProvisioned = "PROVISIONED" + + // BillingModePayPerRequest is a BillingMode enum value + BillingModePayPerRequest = "PAY_PER_REQUEST" +) + +const ( + // ComparisonOperatorEq is a ComparisonOperator enum value + ComparisonOperatorEq = "EQ" + + // ComparisonOperatorNe is a ComparisonOperator enum value + ComparisonOperatorNe = "NE" + + // ComparisonOperatorIn is a ComparisonOperator enum value + ComparisonOperatorIn = "IN" + + // ComparisonOperatorLe is a ComparisonOperator enum value + ComparisonOperatorLe = "LE" + + // ComparisonOperatorLt is a ComparisonOperator enum value + ComparisonOperatorLt = "LT" + + // ComparisonOperatorGe is a ComparisonOperator enum value + ComparisonOperatorGe = "GE" + + // ComparisonOperatorGt is a ComparisonOperator enum value + ComparisonOperatorGt = "GT" + + // ComparisonOperatorBetween is a ComparisonOperator enum value + ComparisonOperatorBetween = "BETWEEN" + + // ComparisonOperatorNotNull is a ComparisonOperator enum value + ComparisonOperatorNotNull = "NOT_NULL" + + // ComparisonOperatorNull is a ComparisonOperator enum value + ComparisonOperatorNull = "NULL" + + // ComparisonOperatorContains is a ComparisonOperator enum value + ComparisonOperatorContains = "CONTAINS" + + // ComparisonOperatorNotContains is a ComparisonOperator enum value + ComparisonOperatorNotContains = "NOT_CONTAINS" + + // ComparisonOperatorBeginsWith is a ComparisonOperator enum value + ComparisonOperatorBeginsWith = "BEGINS_WITH" +) + +const ( + // ConditionalOperatorAnd is a ConditionalOperator enum value + ConditionalOperatorAnd = "AND" + + // ConditionalOperatorOr is a ConditionalOperator enum value + ConditionalOperatorOr = "OR" +) + +const ( + // ContinuousBackupsStatusEnabled is a ContinuousBackupsStatus enum value + ContinuousBackupsStatusEnabled = "ENABLED" + + // ContinuousBackupsStatusDisabled is a ContinuousBackupsStatus enum value + ContinuousBackupsStatusDisabled = "DISABLED" +) + +const ( + // GlobalTableStatusCreating is a GlobalTableStatus enum value + GlobalTableStatusCreating = "CREATING" + + // GlobalTableStatusActive is a GlobalTableStatus enum value + GlobalTableStatusActive = "ACTIVE" + + // GlobalTableStatusDeleting is a GlobalTableStatus enum value + GlobalTableStatusDeleting = "DELETING" + + // GlobalTableStatusUpdating is a GlobalTableStatus enum value + GlobalTableStatusUpdating = "UPDATING" +) + +const ( + // IndexStatusCreating is a IndexStatus enum value + IndexStatusCreating = "CREATING" + + // IndexStatusUpdating is a IndexStatus enum value + IndexStatusUpdating = "UPDATING" + + // IndexStatusDeleting is a IndexStatus enum value + IndexStatusDeleting = "DELETING" + + // IndexStatusActive is a IndexStatus enum value + IndexStatusActive = "ACTIVE" +) + +const ( + // KeyTypeHash is a KeyType enum value + KeyTypeHash = "HASH" + + // KeyTypeRange is a KeyType enum value + KeyTypeRange = "RANGE" +) + +const ( + // PointInTimeRecoveryStatusEnabled is a PointInTimeRecoveryStatus enum value + PointInTimeRecoveryStatusEnabled = "ENABLED" + + // PointInTimeRecoveryStatusDisabled is a PointInTimeRecoveryStatus enum value + PointInTimeRecoveryStatusDisabled = "DISABLED" +) + +const ( + // ProjectionTypeAll is a ProjectionType enum value + ProjectionTypeAll = "ALL" + + // ProjectionTypeKeysOnly is a ProjectionType enum value + ProjectionTypeKeysOnly = "KEYS_ONLY" + + // ProjectionTypeInclude is a ProjectionType enum value + ProjectionTypeInclude = "INCLUDE" +) + +const ( + // ReplicaStatusCreating is a ReplicaStatus enum value + ReplicaStatusCreating = "CREATING" + + // ReplicaStatusUpdating is a ReplicaStatus enum value + ReplicaStatusUpdating = "UPDATING" + + // ReplicaStatusDeleting is a ReplicaStatus enum value + ReplicaStatusDeleting = "DELETING" + + // ReplicaStatusActive is a ReplicaStatus enum value + ReplicaStatusActive = "ACTIVE" +) + +// Determines the level of detail about provisioned throughput consumption that +// is returned in the response: +// +// * INDEXES - The response includes the aggregate ConsumedCapacity for the +// operation, together with ConsumedCapacity for each table and secondary +// index that was accessed. Note that some operations, such as GetItem and +// BatchGetItem, do not access any indexes at all. In these cases, specifying +// INDEXES will only return ConsumedCapacity information for table(s). +// +// * TOTAL - The response includes only the aggregate ConsumedCapacity for +// the operation. +// +// * NONE - No ConsumedCapacity details are included in the response. +const ( + // ReturnConsumedCapacityIndexes is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityIndexes = "INDEXES" + + // ReturnConsumedCapacityTotal is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityTotal = "TOTAL" + + // ReturnConsumedCapacityNone is a ReturnConsumedCapacity enum value + ReturnConsumedCapacityNone = "NONE" +) + +const ( + // ReturnItemCollectionMetricsSize is a ReturnItemCollectionMetrics enum value + ReturnItemCollectionMetricsSize = "SIZE" + + // ReturnItemCollectionMetricsNone is a ReturnItemCollectionMetrics enum value + ReturnItemCollectionMetricsNone = "NONE" +) + +const ( + // ReturnValueNone is a ReturnValue enum value + ReturnValueNone = "NONE" + + // ReturnValueAllOld is a ReturnValue enum value + ReturnValueAllOld = "ALL_OLD" + + // ReturnValueUpdatedOld is a ReturnValue enum value + ReturnValueUpdatedOld = "UPDATED_OLD" + + // ReturnValueAllNew is a ReturnValue enum value + ReturnValueAllNew = "ALL_NEW" + + // ReturnValueUpdatedNew is a ReturnValue enum value + ReturnValueUpdatedNew = "UPDATED_NEW" +) + +const ( + // ReturnValuesOnConditionCheckFailureAllOld is a ReturnValuesOnConditionCheckFailure enum value + ReturnValuesOnConditionCheckFailureAllOld = "ALL_OLD" + + // ReturnValuesOnConditionCheckFailureNone is a ReturnValuesOnConditionCheckFailure enum value + ReturnValuesOnConditionCheckFailureNone = "NONE" +) + +const ( + // SSEStatusEnabling is a SSEStatus enum value + SSEStatusEnabling = "ENABLING" + + // SSEStatusEnabled is a SSEStatus enum value + SSEStatusEnabled = "ENABLED" + + // SSEStatusDisabling is a SSEStatus enum value + SSEStatusDisabling = "DISABLING" + + // SSEStatusDisabled is a SSEStatus enum value + SSEStatusDisabled = "DISABLED" + + // SSEStatusUpdating is a SSEStatus enum value + SSEStatusUpdating = "UPDATING" +) + +const ( + // SSETypeAes256 is a SSEType enum value + SSETypeAes256 = "AES256" + + // SSETypeKms is a SSEType enum value + SSETypeKms = "KMS" +) + +const ( + // ScalarAttributeTypeS is a ScalarAttributeType enum value + ScalarAttributeTypeS = "S" + + // ScalarAttributeTypeN is a ScalarAttributeType enum value + ScalarAttributeTypeN = "N" + + // ScalarAttributeTypeB is a ScalarAttributeType enum value + ScalarAttributeTypeB = "B" +) + +const ( + // SelectAllAttributes is a Select enum value + SelectAllAttributes = "ALL_ATTRIBUTES" + + // SelectAllProjectedAttributes is a Select enum value + SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES" + + // SelectSpecificAttributes is a Select enum value + SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES" + + // SelectCount is a Select enum value + SelectCount = "COUNT" +) + +const ( + // StreamViewTypeNewImage is a StreamViewType enum value + StreamViewTypeNewImage = "NEW_IMAGE" + + // StreamViewTypeOldImage is a StreamViewType enum value + StreamViewTypeOldImage = "OLD_IMAGE" + + // StreamViewTypeNewAndOldImages is a StreamViewType enum value + StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES" + + // StreamViewTypeKeysOnly is a StreamViewType enum value + StreamViewTypeKeysOnly = "KEYS_ONLY" +) + +const ( + // TableStatusCreating is a TableStatus enum value + TableStatusCreating = "CREATING" + + // TableStatusUpdating is a TableStatus enum value + TableStatusUpdating = "UPDATING" + + // TableStatusDeleting is a TableStatus enum value + TableStatusDeleting = "DELETING" + + // TableStatusActive is a TableStatus enum value + TableStatusActive = "ACTIVE" +) + +const ( + // TimeToLiveStatusEnabling is a TimeToLiveStatus enum value + TimeToLiveStatusEnabling = "ENABLING" + + // TimeToLiveStatusDisabling is a TimeToLiveStatus enum value + TimeToLiveStatusDisabling = "DISABLING" + + // TimeToLiveStatusEnabled is a TimeToLiveStatus enum value + TimeToLiveStatusEnabled = "ENABLED" + + // TimeToLiveStatusDisabled is a TimeToLiveStatus enum value + TimeToLiveStatusDisabled = "DISABLED" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go new file mode 100644 index 00000000..333e61bf --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go @@ -0,0 +1,109 @@ +package dynamodb + +import ( + "bytes" + "hash/crc32" + "io" + "io/ioutil" + "math" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +type retryer struct { + client.DefaultRetryer +} + +func (d retryer) RetryRules(r *request.Request) time.Duration { + delay := time.Duration(math.Pow(2, float64(r.RetryCount))) * 50 + return delay * time.Millisecond +} + +func init() { + initClient = func(c *client.Client) { + if c.Config.Retryer == nil { + // Only override the retryer with a custom one if the config + // does not already contain a retryer + setCustomRetryer(c) + } + + c.Handlers.Build.PushBack(disableCompression) + c.Handlers.Unmarshal.PushFront(validateCRC32) + } +} + +func setCustomRetryer(c *client.Client) { + maxRetries := aws.IntValue(c.Config.MaxRetries) + if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 10 + } + + c.Retryer = retryer{ + DefaultRetryer: client.DefaultRetryer{ + NumMaxRetries: maxRetries, + }, + } +} + +func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) { + if length < 0 { + length = 0 + } + buf := bytes.NewBuffer(make([]byte, 0, length)) + + if _, err = buf.ReadFrom(b); err != nil { + return nil, err + } + if err = b.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func disableCompression(r *request.Request) { + r.HTTPRequest.Header.Set("Accept-Encoding", "identity") +} + +func validateCRC32(r *request.Request) { + if r.Error != nil { + return // already have an error, no need to verify CRC + } + + // Checksum validation is off, skip + if aws.BoolValue(r.Config.DisableComputeChecksums) { + return + } + + // Try to get CRC from response + header := r.HTTPResponse.Header.Get("X-Amz-Crc32") + if header == "" { + return // No header, skip + } + + expected, err := strconv.ParseUint(header, 10, 32) + if err != nil { + return // Could not determine CRC value, skip + } + + buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength) + if err != nil { // failed to read the response body, skip + return + } + + // Reset body for subsequent reads + r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes())) + + // Compute the CRC checksum + crc := crc32.ChecksumIEEE(buf.Bytes()) + + if crc != uint32(expected) { + // CRC does not match, set a retryable error + r.Retryable = aws.Bool(true) + r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go new file mode 100644 index 00000000..f244a733 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go @@ -0,0 +1,45 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package dynamodb provides the client and types for making API +// requests to Amazon DynamoDB. +// +// Amazon DynamoDB is a fully managed NoSQL database service that provides fast +// and predictable performance with seamless scalability. DynamoDB lets you +// offload the administrative burdens of operating and scaling a distributed +// database, so that you don't have to worry about hardware provisioning, setup +// and configuration, replication, software patching, or cluster scaling. +// +// With DynamoDB, you can create database tables that can store and retrieve +// any amount of data, and serve any level of request traffic. You can scale +// up or scale down your tables' throughput capacity without downtime or performance +// degradation, and use the AWS Management Console to monitor resource utilization +// and performance metrics. +// +// DynamoDB automatically spreads the data and traffic for your tables over +// a sufficient number of servers to handle your throughput and storage requirements, +// while maintaining consistent and fast performance. All of your data is stored +// on solid state disks (SSDs) and automatically replicated across multiple +// Availability Zones in an AWS region, providing built-in high availability +// and data durability. +// +// See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service. +// +// See dynamodb package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/ +// +// Using the Client +// +// To contact Amazon DynamoDB with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon DynamoDB client DynamoDB for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New +package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go new file mode 100644 index 00000000..013e9b1d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go @@ -0,0 +1,27 @@ +/* +AttributeValue Marshaling and Unmarshaling Helpers + +Utility helpers to marshal and unmarshal AttributeValue to and +from Go types can be found in the dynamodbattribute sub package. This package +provides specialized functions for the common ways of working with +AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and +directly with *AttributeValue. This is helpful for marshaling Go types for API +operations such as PutItem, and unmarshaling Query and Scan APIs' responses. + +See the dynamodbattribute package documentation for more information. +https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/ + +Expression Builders + +The expression package provides utility types and functions to build DynamoDB +expression for type safe construction of API ExpressionAttributeNames, and +ExpressionAttribute Values. + +The package represents the various DynamoDB Expressions as structs named +accordingly. For example, ConditionBuilder represents a DynamoDB Condition +Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on. + +See the expression package documentation for more information. +https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/ +*/ +package dynamodb diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go new file mode 100644 index 00000000..71f3e7d3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -0,0 +1,270 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +const ( + + // ErrCodeBackupInUseException for service response error code + // "BackupInUseException". + // + // There is another ongoing conflicting backup control plane operation on the + // table. The backup is either being created, deleted or restored to a table. + ErrCodeBackupInUseException = "BackupInUseException" + + // ErrCodeBackupNotFoundException for service response error code + // "BackupNotFoundException". + // + // Backup not found for the given BackupARN. + ErrCodeBackupNotFoundException = "BackupNotFoundException" + + // ErrCodeConditionalCheckFailedException for service response error code + // "ConditionalCheckFailedException". + // + // A condition specified in the operation could not be evaluated. + ErrCodeConditionalCheckFailedException = "ConditionalCheckFailedException" + + // ErrCodeContinuousBackupsUnavailableException for service response error code + // "ContinuousBackupsUnavailableException". + // + // Backups have not yet been enabled for this table. + ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException" + + // ErrCodeGlobalTableAlreadyExistsException for service response error code + // "GlobalTableAlreadyExistsException". + // + // The specified global table already exists. + ErrCodeGlobalTableAlreadyExistsException = "GlobalTableAlreadyExistsException" + + // ErrCodeGlobalTableNotFoundException for service response error code + // "GlobalTableNotFoundException". + // + // The specified global table does not exist. + ErrCodeGlobalTableNotFoundException = "GlobalTableNotFoundException" + + // ErrCodeIdempotentParameterMismatchException for service response error code + // "IdempotentParameterMismatchException". + // + // DynamoDB rejected the request because you retried a request with a different + // payload but with an idempotent token that was already used. + ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException" + + // ErrCodeIndexNotFoundException for service response error code + // "IndexNotFoundException". + // + // The operation tried to access a nonexistent index. + ErrCodeIndexNotFoundException = "IndexNotFoundException" + + // ErrCodeInternalServerError for service response error code + // "InternalServerError". + // + // An error occurred on the server side. + ErrCodeInternalServerError = "InternalServerError" + + // ErrCodeInvalidRestoreTimeException for service response error code + // "InvalidRestoreTimeException". + // + // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime + // and LatestRestorableDateTime. + ErrCodeInvalidRestoreTimeException = "InvalidRestoreTimeException" + + // ErrCodeItemCollectionSizeLimitExceededException for service response error code + // "ItemCollectionSizeLimitExceededException". + // + // An item collection is too large. This exception is only returned for tables + // that have one or more local secondary indexes. + ErrCodeItemCollectionSizeLimitExceededException = "ItemCollectionSizeLimitExceededException" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceededException". + // + // There is no limit to the number of daily on-demand backups that can be taken. + // + // Up to 50 simultaneous table operations are allowed per account. These operations + // include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, + // and RestoreTableToPointInTime. + // + // The only exception is when you are creating a table with one or more secondary + // indexes. You can have up to 25 such requests running at a time; however, + // if the table or index specifications are complex, DynamoDB might temporarily + // reduce the number of concurrent operations. + // + // There is a soft account limit of 256 tables. + ErrCodeLimitExceededException = "LimitExceededException" + + // ErrCodePointInTimeRecoveryUnavailableException for service response error code + // "PointInTimeRecoveryUnavailableException". + // + // Point in time recovery has not yet been enabled for this source table. + ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException" + + // ErrCodeProvisionedThroughputExceededException for service response error code + // "ProvisionedThroughputExceededException". + // + // Your request rate is too high. The AWS SDKs for DynamoDB automatically retry + // requests that receive this exception. Your request is eventually successful, + // unless your retry queue is too large to finish. Reduce the frequency of requests + // and use exponential backoff. For more information, go to Error Retries and + // Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) + // in the Amazon DynamoDB Developer Guide. + ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException" + + // ErrCodeReplicaAlreadyExistsException for service response error code + // "ReplicaAlreadyExistsException". + // + // The specified replica is already part of the global table. + ErrCodeReplicaAlreadyExistsException = "ReplicaAlreadyExistsException" + + // ErrCodeReplicaNotFoundException for service response error code + // "ReplicaNotFoundException". + // + // The specified replica is no longer part of the global table. + ErrCodeReplicaNotFoundException = "ReplicaNotFoundException" + + // ErrCodeRequestLimitExceeded for service response error code + // "RequestLimitExceeded". + // + // Throughput exceeds the current throughput limit for your account. Please + // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request + // a limit increase. + ErrCodeRequestLimitExceeded = "RequestLimitExceeded" + + // ErrCodeResourceInUseException for service response error code + // "ResourceInUseException". + // + // The operation conflicts with the resource's availability. For example, you + // attempted to recreate an existing table, or tried to delete a table currently + // in the CREATING state. + ErrCodeResourceInUseException = "ResourceInUseException" + + // ErrCodeResourceNotFoundException for service response error code + // "ResourceNotFoundException". + // + // The operation tried to access a nonexistent table or index. The resource + // might not be specified correctly, or its status might not be ACTIVE. + ErrCodeResourceNotFoundException = "ResourceNotFoundException" + + // ErrCodeTableAlreadyExistsException for service response error code + // "TableAlreadyExistsException". + // + // A target table with the specified name already exists. + ErrCodeTableAlreadyExistsException = "TableAlreadyExistsException" + + // ErrCodeTableInUseException for service response error code + // "TableInUseException". + // + // A target table with the specified name is either being created or deleted. + ErrCodeTableInUseException = "TableInUseException" + + // ErrCodeTableNotFoundException for service response error code + // "TableNotFoundException". + // + // A source table with the name TableName does not currently exist within the + // subscriber's account. + ErrCodeTableNotFoundException = "TableNotFoundException" + + // ErrCodeTransactionCanceledException for service response error code + // "TransactionCanceledException". + // + // The entire transaction request was canceled. + // + // DynamoDB cancels a TransactWriteItems request under the following circumstances: + // + // * A condition in one of the condition expressions is not met. + // + // * A table in the TransactWriteItems request is in a different account + // or region. + // + // * More than one action in the TransactWriteItems operation targets the + // same item. + // + // * There is insufficient provisioned capacity for the transaction to be + // completed. + // + // * An item size becomes too large (larger than 400 KB), or a local secondary + // index (LSI) becomes too large, or a similar validation error occurs because + // of changes made by the transaction. + // + // * The aggregate size of the items in the transaction exceeds 4 MBs. + // + // * There is a user error, such as an invalid data format. + // + // DynamoDB cancels a TransactGetItems request under the following circumstances: + // + // * There is an ongoing TransactGetItems operation that conflicts with a + // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. + // In this case the TransactGetItems operation fails with a TransactionCanceledException. + // + // * A table in the TransactGetItems request is in a different account or + // region. + // + // * There is insufficient provisioned capacity for the transaction to be + // completed. + // + // * The aggregate size of the items in the transaction exceeds 4 MBs. + // + // * There is a user error, such as an invalid data format. + // + // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons + // property. This property is not set for other languages. Transaction cancellation + // reasons are ordered in the order of requested items, if an item has no error + // it will have NONE code and Null message. + // + // Cancellation reason codes and possible error messages: + // + // * No Errors: Code: NONE Message: null + // + // * Conditional Check Failed: Code: ConditionalCheckFailed Message: The + // conditional request failed. + // + // * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded + // Message: Collection size exceeded. + // + // * Transaction Conflict: Code: TransactionConflict Message: Transaction + // is ongoing for the item. + // + // * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded + // Messages: The level of configured provisioned throughput for the table + // was exceeded. Consider increasing your provisioning level with the UpdateTable + // API. This Message is received when provisioned throughput is exceeded + // is on a provisioned DynamoDB table. The level of configured provisioned + // throughput for one or more global secondary indexes of the table was exceeded. + // Consider increasing your provisioning level for the under-provisioned + // global secondary indexes with the UpdateTable API. This message is returned + // when provisioned throughput is exceeded is on a provisioned GSI. + // + // * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds + // the current capacity of your table or index. DynamoDB is automatically + // scaling your table or index so please try again shortly. If exceptions + // persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. + // This message is returned when writes get throttled on an On-Demand table + // as DynamoDB is automatically scaling the table. Throughput exceeds the + // current capacity for one or more global secondary indexes. DynamoDB is + // automatically scaling your index so please try again shortly. This message + // is returned when when writes get throttled on an On-Demand GSI as DynamoDB + // is automatically scaling the GSI. + // + // * Validation Error: Code: ValidationError Messages: One or more parameter + // values were invalid. The update expression attempted to update the secondary + // index key beyond allowed size limits. The update expression attempted + // to update the secondary index key to unsupported type. An operand in the + // update expression has an incorrect data type. Item size to update has + // exceeded the maximum allowed size. Number overflow. Attempting to store + // a number with magnitude larger than supported range. Type mismatch for + // attribute to update. Nesting Levels have exceeded supported limits. The + // document path provided in the update expression is invalid for update. + // The provided expression refers to an attribute that does not exist in + // the item. + ErrCodeTransactionCanceledException = "TransactionCanceledException" + + // ErrCodeTransactionConflictException for service response error code + // "TransactionConflictException". + // + // Operation was rejected because there is an ongoing transaction for the item. + ErrCodeTransactionConflictException = "TransactionConflictException" + + // ErrCodeTransactionInProgressException for service response error code + // "TransactionInProgressException". + // + // The transaction with the given request token is already in progress. + ErrCodeTransactionInProgressException = "TransactionInProgressException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go new file mode 100644 index 00000000..edcb5b85 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -0,0 +1,100 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/crr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// DynamoDB provides the API operation methods for making requests to +// Amazon DynamoDB. See this package's package overview docs +// for details on the service. +// +// DynamoDB methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type DynamoDB struct { + *client.Client + endpointCache *crr.EndpointCache +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "dynamodb" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "DynamoDB" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the DynamoDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a DynamoDB client from just a session. +// svc := dynamodb.New(mySession) +// +// // Create a DynamoDB client with additional configuration +// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *DynamoDB { + svc := &DynamoDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2012-08-10", + JSONVersion: "1.0", + TargetPrefix: "DynamoDB_20120810", + }, + handlers, + ), + } + svc.endpointCache = crr.NewEndpointCache(10) + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a DynamoDB operation and runs any +// custom request initialization. +func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go new file mode 100644 index 00000000..ae515f7d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package dynamodb + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilTableExists uses the DynamoDB API operation +// DescribeTable to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error { + return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTableExistsWithContext is an extended version of WaitUntilTableExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTableExists", + MaxAttempts: 25, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.PathWaiterMatch, Argument: "Table.TableStatus", + Expected: "ACTIVE", + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ResourceNotFoundException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTableInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTableRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilTableNotExists uses the DynamoDB API operation +// DescribeTable to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error { + return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilTableNotExistsWithContext is an extended version of WaitUntilTableNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) WaitUntilTableNotExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilTableNotExists", + MaxAttempts: 25, + Delay: request.ConstantWaiterDelay(20 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "ResourceNotFoundException", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *DescribeTableInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.DescribeTableRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go new file mode 100644 index 00000000..ddefa47b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -0,0 +1,33124 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opAddClientIDToOpenIDConnectProvider = "AddClientIDToOpenIDConnectProvider" + +// AddClientIDToOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the AddClientIDToOpenIDConnectProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddClientIDToOpenIDConnectProvider for more information on using the AddClientIDToOpenIDConnectProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddClientIDToOpenIDConnectProviderRequest method. +// req, resp := client.AddClientIDToOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddClientIDToOpenIDConnectProvider +func (c *IAM) AddClientIDToOpenIDConnectProviderRequest(input *AddClientIDToOpenIDConnectProviderInput) (req *request.Request, output *AddClientIDToOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opAddClientIDToOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddClientIDToOpenIDConnectProviderInput{} + } + + output = &AddClientIDToOpenIDConnectProviderOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AddClientIDToOpenIDConnectProvider API operation for AWS Identity and Access Management. +// +// Adds a new client ID (also known as audience) to the list of client IDs already +// registered for the specified IAM OpenID Connect (OIDC) provider resource. +// +// This operation is idempotent; it does not fail or return an error if you +// add an existing client ID to the provider. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation AddClientIDToOpenIDConnectProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddClientIDToOpenIDConnectProvider +func (c *IAM) AddClientIDToOpenIDConnectProvider(input *AddClientIDToOpenIDConnectProviderInput) (*AddClientIDToOpenIDConnectProviderOutput, error) { + req, out := c.AddClientIDToOpenIDConnectProviderRequest(input) + return out, req.Send() +} + +// AddClientIDToOpenIDConnectProviderWithContext is the same as AddClientIDToOpenIDConnectProvider with the addition of +// the ability to pass a context and additional request options. +// +// See AddClientIDToOpenIDConnectProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) AddClientIDToOpenIDConnectProviderWithContext(ctx aws.Context, input *AddClientIDToOpenIDConnectProviderInput, opts ...request.Option) (*AddClientIDToOpenIDConnectProviderOutput, error) { + req, out := c.AddClientIDToOpenIDConnectProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAddRoleToInstanceProfile = "AddRoleToInstanceProfile" + +// AddRoleToInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the AddRoleToInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddRoleToInstanceProfile for more information on using the AddRoleToInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddRoleToInstanceProfileRequest method. +// req, resp := client.AddRoleToInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddRoleToInstanceProfile +func (c *IAM) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInput) (req *request.Request, output *AddRoleToInstanceProfileOutput) { + op := &request.Operation{ + Name: opAddRoleToInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddRoleToInstanceProfileInput{} + } + + output = &AddRoleToInstanceProfileOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AddRoleToInstanceProfile API operation for AWS Identity and Access Management. +// +// Adds the specified IAM role to the specified instance profile. An instance +// profile can contain only one role, and this limit cannot be increased. You +// can remove the existing role and then add a different role to an instance +// profile. You must then wait for the change to appear across all of AWS because +// of eventual consistency (https://en.wikipedia.org/wiki/Eventual_consistency). +// To force the change, you must disassociate the instance profile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DisassociateIamInstanceProfile.html) +// and then associate the instance profile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateIamInstanceProfile.html), +// or you can stop your instance and then restart it. +// +// The caller of this API must be granted the PassRole permission on the IAM +// role by a permissions policy. +// +// For more information about roles, go to Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation AddRoleToInstanceProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddRoleToInstanceProfile +func (c *IAM) AddRoleToInstanceProfile(input *AddRoleToInstanceProfileInput) (*AddRoleToInstanceProfileOutput, error) { + req, out := c.AddRoleToInstanceProfileRequest(input) + return out, req.Send() +} + +// AddRoleToInstanceProfileWithContext is the same as AddRoleToInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See AddRoleToInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) AddRoleToInstanceProfileWithContext(ctx aws.Context, input *AddRoleToInstanceProfileInput, opts ...request.Option) (*AddRoleToInstanceProfileOutput, error) { + req, out := c.AddRoleToInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAddUserToGroup = "AddUserToGroup" + +// AddUserToGroupRequest generates a "aws/request.Request" representing the +// client's request for the AddUserToGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AddUserToGroup for more information on using the AddUserToGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AddUserToGroupRequest method. +// req, resp := client.AddUserToGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddUserToGroup +func (c *IAM) AddUserToGroupRequest(input *AddUserToGroupInput) (req *request.Request, output *AddUserToGroupOutput) { + op := &request.Operation{ + Name: opAddUserToGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddUserToGroupInput{} + } + + output = &AddUserToGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AddUserToGroup API operation for AWS Identity and Access Management. +// +// Adds the specified user to the specified group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation AddUserToGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddUserToGroup +func (c *IAM) AddUserToGroup(input *AddUserToGroupInput) (*AddUserToGroupOutput, error) { + req, out := c.AddUserToGroupRequest(input) + return out, req.Send() +} + +// AddUserToGroupWithContext is the same as AddUserToGroup with the addition of +// the ability to pass a context and additional request options. +// +// See AddUserToGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) AddUserToGroupWithContext(ctx aws.Context, input *AddUserToGroupInput, opts ...request.Option) (*AddUserToGroupOutput, error) { + req, out := c.AddUserToGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAttachGroupPolicy = "AttachGroupPolicy" + +// AttachGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachGroupPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachGroupPolicy for more information on using the AttachGroupPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachGroupPolicyRequest method. +// req, resp := client.AttachGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachGroupPolicy +func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *request.Request, output *AttachGroupPolicyOutput) { + op := &request.Operation{ + Name: opAttachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachGroupPolicyInput{} + } + + output = &AttachGroupPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AttachGroupPolicy API operation for AWS Identity and Access Management. +// +// Attaches the specified managed policy to the specified IAM group. +// +// You use this API to attach a managed policy to a group. To embed an inline +// policy in a group, use PutGroupPolicy. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation AttachGroupPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodePolicyNotAttachableException "PolicyNotAttachable" +// The request failed because AWS service role policies can only be attached +// to the service-linked role for that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachGroupPolicy +func (c *IAM) AttachGroupPolicy(input *AttachGroupPolicyInput) (*AttachGroupPolicyOutput, error) { + req, out := c.AttachGroupPolicyRequest(input) + return out, req.Send() +} + +// AttachGroupPolicyWithContext is the same as AttachGroupPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See AttachGroupPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) AttachGroupPolicyWithContext(ctx aws.Context, input *AttachGroupPolicyInput, opts ...request.Option) (*AttachGroupPolicyOutput, error) { + req, out := c.AttachGroupPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAttachRolePolicy = "AttachRolePolicy" + +// AttachRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachRolePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachRolePolicy for more information on using the AttachRolePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachRolePolicyRequest method. +// req, resp := client.AttachRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachRolePolicy +func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *request.Request, output *AttachRolePolicyOutput) { + op := &request.Operation{ + Name: opAttachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachRolePolicyInput{} + } + + output = &AttachRolePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AttachRolePolicy API operation for AWS Identity and Access Management. +// +// Attaches the specified managed policy to the specified IAM role. When you +// attach a managed policy to a role, the managed policy becomes part of the +// role's permission (access) policy. +// +// You cannot use a managed policy as the role's trust policy. The role's trust +// policy is created at the same time as the role, using CreateRole. You can +// update a role's trust policy using UpdateAssumeRolePolicy. +// +// Use this API to attach a managed policy to a role. To embed an inline policy +// in a role, use PutRolePolicy. For more information about policies, see Managed +// Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation AttachRolePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodePolicyNotAttachableException "PolicyNotAttachable" +// The request failed because AWS service role policies can only be attached +// to the service-linked role for that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachRolePolicy +func (c *IAM) AttachRolePolicy(input *AttachRolePolicyInput) (*AttachRolePolicyOutput, error) { + req, out := c.AttachRolePolicyRequest(input) + return out, req.Send() +} + +// AttachRolePolicyWithContext is the same as AttachRolePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See AttachRolePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) AttachRolePolicyWithContext(ctx aws.Context, input *AttachRolePolicyInput, opts ...request.Option) (*AttachRolePolicyOutput, error) { + req, out := c.AttachRolePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAttachUserPolicy = "AttachUserPolicy" + +// AttachUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachUserPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See AttachUserPolicy for more information on using the AttachUserPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the AttachUserPolicyRequest method. +// req, resp := client.AttachUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachUserPolicy +func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *request.Request, output *AttachUserPolicyOutput) { + op := &request.Operation{ + Name: opAttachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachUserPolicyInput{} + } + + output = &AttachUserPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// AttachUserPolicy API operation for AWS Identity and Access Management. +// +// Attaches the specified managed policy to the specified user. +// +// You use this API to attach a managed policy to a user. To embed an inline +// policy in a user, use PutUserPolicy. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation AttachUserPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodePolicyNotAttachableException "PolicyNotAttachable" +// The request failed because AWS service role policies can only be attached +// to the service-linked role for that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachUserPolicy +func (c *IAM) AttachUserPolicy(input *AttachUserPolicyInput) (*AttachUserPolicyOutput, error) { + req, out := c.AttachUserPolicyRequest(input) + return out, req.Send() +} + +// AttachUserPolicyWithContext is the same as AttachUserPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See AttachUserPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) AttachUserPolicyWithContext(ctx aws.Context, input *AttachUserPolicyInput, opts ...request.Option) (*AttachUserPolicyOutput, error) { + req, out := c.AttachUserPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opChangePassword = "ChangePassword" + +// ChangePasswordRequest generates a "aws/request.Request" representing the +// client's request for the ChangePassword operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ChangePassword for more information on using the ChangePassword +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ChangePasswordRequest method. +// req, resp := client.ChangePasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ChangePassword +func (c *IAM) ChangePasswordRequest(input *ChangePasswordInput) (req *request.Request, output *ChangePasswordOutput) { + op := &request.Operation{ + Name: opChangePassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangePasswordInput{} + } + + output = &ChangePasswordOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ChangePassword API operation for AWS Identity and Access Management. +// +// Changes the password of the IAM user who is calling this operation. The AWS +// account root user password is not affected by this operation. +// +// To change the password for a different user, see UpdateLoginProfile. For +// more information about modifying passwords, see Managing Passwords (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ChangePassword for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidUserTypeException "InvalidUserType" +// The request was rejected because the type of user for the transaction was +// incorrect. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeEntityTemporarilyUnmodifiableException "EntityTemporarilyUnmodifiable" +// The request was rejected because it referenced an entity that is temporarily +// unmodifiable, such as a user name that was deleted and then recreated. The +// error indicates that the request is likely to succeed if you try again after +// waiting several minutes. The error message describes the entity. +// +// * ErrCodePasswordPolicyViolationException "PasswordPolicyViolation" +// The request was rejected because the provided password did not meet the requirements +// imposed by the account password policy. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ChangePassword +func (c *IAM) ChangePassword(input *ChangePasswordInput) (*ChangePasswordOutput, error) { + req, out := c.ChangePasswordRequest(input) + return out, req.Send() +} + +// ChangePasswordWithContext is the same as ChangePassword with the addition of +// the ability to pass a context and additional request options. +// +// See ChangePassword for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ChangePasswordWithContext(ctx aws.Context, input *ChangePasswordInput, opts ...request.Option) (*ChangePasswordOutput, error) { + req, out := c.ChangePasswordRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateAccessKey = "CreateAccessKey" + +// CreateAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreateAccessKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateAccessKey for more information on using the CreateAccessKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAccessKeyRequest method. +// req, resp := client.CreateAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccessKey +func (c *IAM) CreateAccessKeyRequest(input *CreateAccessKeyInput) (req *request.Request, output *CreateAccessKeyOutput) { + op := &request.Operation{ + Name: opCreateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccessKeyInput{} + } + + output = &CreateAccessKeyOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateAccessKey API operation for AWS Identity and Access Management. +// +// Creates a new AWS secret access key and corresponding AWS access key ID for +// the specified user. The default status for new keys is Active. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials. This is true even if the AWS +// account has no associated users. +// +// For information about limits on the number of keys you can create, see Limitations +// on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. You must save the key (for example, in +// a text file) if you want to be able to access it again. If a secret key is +// lost, you can delete the access keys for the associated user and then create +// new keys. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateAccessKey for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccessKey +func (c *IAM) CreateAccessKey(input *CreateAccessKeyInput) (*CreateAccessKeyOutput, error) { + req, out := c.CreateAccessKeyRequest(input) + return out, req.Send() +} + +// CreateAccessKeyWithContext is the same as CreateAccessKey with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAccessKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateAccessKeyWithContext(ctx aws.Context, input *CreateAccessKeyInput, opts ...request.Option) (*CreateAccessKeyOutput, error) { + req, out := c.CreateAccessKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateAccountAlias = "CreateAccountAlias" + +// CreateAccountAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAccountAlias operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateAccountAlias for more information on using the CreateAccountAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateAccountAliasRequest method. +// req, resp := client.CreateAccountAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccountAlias +func (c *IAM) CreateAccountAliasRequest(input *CreateAccountAliasInput) (req *request.Request, output *CreateAccountAliasOutput) { + op := &request.Operation{ + Name: opCreateAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccountAliasInput{} + } + + output = &CreateAccountAliasOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// CreateAccountAlias API operation for AWS Identity and Access Management. +// +// Creates an alias for your AWS account. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (https://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateAccountAlias for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccountAlias +func (c *IAM) CreateAccountAlias(input *CreateAccountAliasInput) (*CreateAccountAliasOutput, error) { + req, out := c.CreateAccountAliasRequest(input) + return out, req.Send() +} + +// CreateAccountAliasWithContext is the same as CreateAccountAlias with the addition of +// the ability to pass a context and additional request options. +// +// See CreateAccountAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateAccountAliasWithContext(ctx aws.Context, input *CreateAccountAliasInput, opts ...request.Option) (*CreateAccountAliasOutput, error) { + req, out := c.CreateAccountAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateGroup = "CreateGroup" + +// CreateGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateGroup for more information on using the CreateGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateGroupRequest method. +// req, resp := client.CreateGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateGroup +func (c *IAM) CreateGroupRequest(input *CreateGroupInput) (req *request.Request, output *CreateGroupOutput) { + op := &request.Operation{ + Name: opCreateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGroupInput{} + } + + output = &CreateGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateGroup API operation for AWS Identity and Access Management. +// +// Creates a new group. +// +// For information about the number of groups you can create, see Limitations +// on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateGroup +func (c *IAM) CreateGroup(input *CreateGroupInput) (*CreateGroupOutput, error) { + req, out := c.CreateGroupRequest(input) + return out, req.Send() +} + +// CreateGroupWithContext is the same as CreateGroup with the addition of +// the ability to pass a context and additional request options. +// +// See CreateGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateGroupWithContext(ctx aws.Context, input *CreateGroupInput, opts ...request.Option) (*CreateGroupOutput, error) { + req, out := c.CreateGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateInstanceProfile = "CreateInstanceProfile" + +// CreateInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateInstanceProfile for more information on using the CreateInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateInstanceProfileRequest method. +// req, resp := client.CreateInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateInstanceProfile +func (c *IAM) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) (req *request.Request, output *CreateInstanceProfileOutput) { + op := &request.Operation{ + Name: opCreateInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceProfileInput{} + } + + output = &CreateInstanceProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateInstanceProfile API operation for AWS Identity and Access Management. +// +// Creates a new instance profile. For information about instance profiles, +// go to About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// For information about the number of instance profiles you can create, see +// Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateInstanceProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateInstanceProfile +func (c *IAM) CreateInstanceProfile(input *CreateInstanceProfileInput) (*CreateInstanceProfileOutput, error) { + req, out := c.CreateInstanceProfileRequest(input) + return out, req.Send() +} + +// CreateInstanceProfileWithContext is the same as CreateInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See CreateInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateInstanceProfileWithContext(ctx aws.Context, input *CreateInstanceProfileInput, opts ...request.Option) (*CreateInstanceProfileOutput, error) { + req, out := c.CreateInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateLoginProfile = "CreateLoginProfile" + +// CreateLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoginProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateLoginProfile for more information on using the CreateLoginProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateLoginProfileRequest method. +// req, resp := client.CreateLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateLoginProfile +func (c *IAM) CreateLoginProfileRequest(input *CreateLoginProfileInput) (req *request.Request, output *CreateLoginProfileOutput) { + op := &request.Operation{ + Name: opCreateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoginProfileInput{} + } + + output = &CreateLoginProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateLoginProfile API operation for AWS Identity and Access Management. +// +// Creates a password for the specified user, giving the user the ability to +// access AWS services through the AWS Management Console. For more information +// about managing passwords, see Managing Passwords (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateLoginProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodePasswordPolicyViolationException "PasswordPolicyViolation" +// The request was rejected because the provided password did not meet the requirements +// imposed by the account password policy. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateLoginProfile +func (c *IAM) CreateLoginProfile(input *CreateLoginProfileInput) (*CreateLoginProfileOutput, error) { + req, out := c.CreateLoginProfileRequest(input) + return out, req.Send() +} + +// CreateLoginProfileWithContext is the same as CreateLoginProfile with the addition of +// the ability to pass a context and additional request options. +// +// See CreateLoginProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateLoginProfileWithContext(ctx aws.Context, input *CreateLoginProfileInput, opts ...request.Option) (*CreateLoginProfileOutput, error) { + req, out := c.CreateLoginProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateOpenIDConnectProvider = "CreateOpenIDConnectProvider" + +// CreateOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the CreateOpenIDConnectProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateOpenIDConnectProvider for more information on using the CreateOpenIDConnectProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateOpenIDConnectProviderRequest method. +// req, resp := client.CreateOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateOpenIDConnectProvider +func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProviderInput) (req *request.Request, output *CreateOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opCreateOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOpenIDConnectProviderInput{} + } + + output = &CreateOpenIDConnectProviderOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateOpenIDConnectProvider API operation for AWS Identity and Access Management. +// +// Creates an IAM entity to describe an identity provider (IdP) that supports +// OpenID Connect (OIDC) (http://openid.net/connect/). +// +// The OIDC provider that you create with this operation can be used as a principal +// in a role's trust policy. Such a policy establishes a trust relationship +// between AWS and the OIDC provider. +// +// When you create the IAM OIDC provider, you specify the following: +// +// * The URL of the OIDC identity provider (IdP) to trust +// +// * A list of client IDs (also known as audiences) that identify the application +// or applications that are allowed to authenticate using the OIDC provider +// +// * A list of thumbprints of the server certificate(s) that the IdP uses +// +// You get all of this information from the OIDC IdP that you want to use to +// access AWS. +// +// The trust for the OIDC provider is derived from the IAM provider that this +// operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider +// operation to highly privileged users. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateOpenIDConnectProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateOpenIDConnectProvider +func (c *IAM) CreateOpenIDConnectProvider(input *CreateOpenIDConnectProviderInput) (*CreateOpenIDConnectProviderOutput, error) { + req, out := c.CreateOpenIDConnectProviderRequest(input) + return out, req.Send() +} + +// CreateOpenIDConnectProviderWithContext is the same as CreateOpenIDConnectProvider with the addition of +// the ability to pass a context and additional request options. +// +// See CreateOpenIDConnectProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateOpenIDConnectProviderWithContext(ctx aws.Context, input *CreateOpenIDConnectProviderInput, opts ...request.Option) (*CreateOpenIDConnectProviderOutput, error) { + req, out := c.CreateOpenIDConnectProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePolicy = "CreatePolicy" + +// CreatePolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreatePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePolicy for more information on using the CreatePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePolicyRequest method. +// req, resp := client.CreatePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicy +func (c *IAM) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Request, output *CreatePolicyOutput) { + op := &request.Operation{ + Name: opCreatePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyInput{} + } + + output = &CreatePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePolicy API operation for AWS Identity and Access Management. +// +// Creates a new managed policy for your AWS account. +// +// This operation creates a policy version with a version identifier of v1 and +// sets v1 as the policy's default version. For more information about policy +// versions, see Versioning for Managed Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// For more information about managed policies in general, see Managed Policies +// and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreatePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicy +func (c *IAM) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error) { + req, out := c.CreatePolicyRequest(input) + return out, req.Send() +} + +// CreatePolicyWithContext is the same as CreatePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreatePolicyWithContext(ctx aws.Context, input *CreatePolicyInput, opts ...request.Option) (*CreatePolicyOutput, error) { + req, out := c.CreatePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreatePolicyVersion = "CreatePolicyVersion" + +// CreatePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreatePolicyVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreatePolicyVersion for more information on using the CreatePolicyVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreatePolicyVersionRequest method. +// req, resp := client.CreatePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicyVersion +func (c *IAM) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req *request.Request, output *CreatePolicyVersionOutput) { + op := &request.Operation{ + Name: opCreatePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyVersionInput{} + } + + output = &CreatePolicyVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreatePolicyVersion API operation for AWS Identity and Access Management. +// +// Creates a new version of the specified managed policy. To update a managed +// policy, you create a new policy version. A managed policy can have up to +// five versions. If the policy has five versions, you must delete an existing +// version using DeletePolicyVersion before you create a new version. +// +// Optionally, you can set the new version as the policy's default version. +// The default version is the version that is in effect for the IAM users, groups, +// and roles to which the policy is attached. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreatePolicyVersion for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicyVersion +func (c *IAM) CreatePolicyVersion(input *CreatePolicyVersionInput) (*CreatePolicyVersionOutput, error) { + req, out := c.CreatePolicyVersionRequest(input) + return out, req.Send() +} + +// CreatePolicyVersionWithContext is the same as CreatePolicyVersion with the addition of +// the ability to pass a context and additional request options. +// +// See CreatePolicyVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreatePolicyVersionWithContext(ctx aws.Context, input *CreatePolicyVersionInput, opts ...request.Option) (*CreatePolicyVersionOutput, error) { + req, out := c.CreatePolicyVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateRole = "CreateRole" + +// CreateRoleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateRole for more information on using the CreateRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateRoleRequest method. +// req, resp := client.CreateRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateRole +func (c *IAM) CreateRoleRequest(input *CreateRoleInput) (req *request.Request, output *CreateRoleOutput) { + op := &request.Operation{ + Name: opCreateRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRoleInput{} + } + + output = &CreateRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateRole API operation for AWS Identity and Access Management. +// +// Creates a new role for your AWS account. For more information about roles, +// go to IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For information about limitations on role names and the number of roles you +// can create, go to Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateRole +func (c *IAM) CreateRole(input *CreateRoleInput) (*CreateRoleOutput, error) { + req, out := c.CreateRoleRequest(input) + return out, req.Send() +} + +// CreateRoleWithContext is the same as CreateRole with the addition of +// the ability to pass a context and additional request options. +// +// See CreateRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateRoleWithContext(ctx aws.Context, input *CreateRoleInput, opts ...request.Option) (*CreateRoleOutput, error) { + req, out := c.CreateRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateSAMLProvider = "CreateSAMLProvider" + +// CreateSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the CreateSAMLProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateSAMLProvider for more information on using the CreateSAMLProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateSAMLProviderRequest method. +// req, resp := client.CreateSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateSAMLProvider +func (c *IAM) CreateSAMLProviderRequest(input *CreateSAMLProviderInput) (req *request.Request, output *CreateSAMLProviderOutput) { + op := &request.Operation{ + Name: opCreateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSAMLProviderInput{} + } + + output = &CreateSAMLProviderOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateSAMLProvider API operation for AWS Identity and Access Management. +// +// Creates an IAM resource that describes an identity provider (IdP) that supports +// SAML 2.0. +// +// The SAML provider resource that you create with this operation can be used +// as a principal in an IAM role's trust policy. Such a policy can enable federated +// users who sign in using the SAML IdP to assume the role. You can create an +// IAM role that supports Web-based single sign-on (SSO) to the AWS Management +// Console or one that supports API access to AWS. +// +// When you create the SAML provider resource, you upload a SAML metadata document +// that you get from your IdP. That document includes the issuer's name, expiration +// information, and keys that can be used to validate the SAML authentication +// response (assertions) that the IdP sends. You must generate the metadata +// document using the identity management software that is used as your organization's +// IdP. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// For more information, see Enabling SAML 2.0 Federated Users to Access the +// AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) +// and About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateSAMLProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateSAMLProvider +func (c *IAM) CreateSAMLProvider(input *CreateSAMLProviderInput) (*CreateSAMLProviderOutput, error) { + req, out := c.CreateSAMLProviderRequest(input) + return out, req.Send() +} + +// CreateSAMLProviderWithContext is the same as CreateSAMLProvider with the addition of +// the ability to pass a context and additional request options. +// +// See CreateSAMLProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateSAMLProviderWithContext(ctx aws.Context, input *CreateSAMLProviderInput, opts ...request.Option) (*CreateSAMLProviderOutput, error) { + req, out := c.CreateSAMLProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateServiceLinkedRole = "CreateServiceLinkedRole" + +// CreateServiceLinkedRoleRequest generates a "aws/request.Request" representing the +// client's request for the CreateServiceLinkedRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateServiceLinkedRole for more information on using the CreateServiceLinkedRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateServiceLinkedRoleRequest method. +// req, resp := client.CreateServiceLinkedRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceLinkedRole +func (c *IAM) CreateServiceLinkedRoleRequest(input *CreateServiceLinkedRoleInput) (req *request.Request, output *CreateServiceLinkedRoleOutput) { + op := &request.Operation{ + Name: opCreateServiceLinkedRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateServiceLinkedRoleInput{} + } + + output = &CreateServiceLinkedRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateServiceLinkedRole API operation for AWS Identity and Access Management. +// +// Creates an IAM role that is linked to a specific AWS service. The service +// controls the attached policies and when the role can be deleted. This helps +// ensure that the service is not broken by an unexpectedly changed or deleted +// role, which could put your AWS resources into an unknown state. Allowing +// the service to control the role helps improve service stability and proper +// cleanup when a service and its role are no longer needed. For more information, +// see Using Service-Linked Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html) +// in the IAM User Guide. +// +// To attach a policy to this service-linked role, you must make the request +// using the AWS service that depends on this role. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateServiceLinkedRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceLinkedRole +func (c *IAM) CreateServiceLinkedRole(input *CreateServiceLinkedRoleInput) (*CreateServiceLinkedRoleOutput, error) { + req, out := c.CreateServiceLinkedRoleRequest(input) + return out, req.Send() +} + +// CreateServiceLinkedRoleWithContext is the same as CreateServiceLinkedRole with the addition of +// the ability to pass a context and additional request options. +// +// See CreateServiceLinkedRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateServiceLinkedRoleWithContext(ctx aws.Context, input *CreateServiceLinkedRoleInput, opts ...request.Option) (*CreateServiceLinkedRoleOutput, error) { + req, out := c.CreateServiceLinkedRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateServiceSpecificCredential = "CreateServiceSpecificCredential" + +// CreateServiceSpecificCredentialRequest generates a "aws/request.Request" representing the +// client's request for the CreateServiceSpecificCredential operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateServiceSpecificCredential for more information on using the CreateServiceSpecificCredential +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateServiceSpecificCredentialRequest method. +// req, resp := client.CreateServiceSpecificCredentialRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceSpecificCredential +func (c *IAM) CreateServiceSpecificCredentialRequest(input *CreateServiceSpecificCredentialInput) (req *request.Request, output *CreateServiceSpecificCredentialOutput) { + op := &request.Operation{ + Name: opCreateServiceSpecificCredential, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateServiceSpecificCredentialInput{} + } + + output = &CreateServiceSpecificCredentialOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateServiceSpecificCredential API operation for AWS Identity and Access Management. +// +// Generates a set of credentials consisting of a user name and password that +// can be used to access the service specified in the request. These credentials +// are generated by IAM, and can be used only for the specified service. +// +// You can have a maximum of two sets of service-specific credentials for each +// supported service per user. +// +// The only supported service at this time is AWS CodeCommit. +// +// You can reset the password to a new service-generated value by calling ResetServiceSpecificCredential. +// +// For more information about service-specific credentials, see Using IAM with +// AWS CodeCommit: Git Credentials, SSH Keys, and AWS Access Keys (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_ssh-keys.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateServiceSpecificCredential for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceNotSupportedException "NotSupportedService" +// The specified service does not support service-specific credentials. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceSpecificCredential +func (c *IAM) CreateServiceSpecificCredential(input *CreateServiceSpecificCredentialInput) (*CreateServiceSpecificCredentialOutput, error) { + req, out := c.CreateServiceSpecificCredentialRequest(input) + return out, req.Send() +} + +// CreateServiceSpecificCredentialWithContext is the same as CreateServiceSpecificCredential with the addition of +// the ability to pass a context and additional request options. +// +// See CreateServiceSpecificCredential for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateServiceSpecificCredentialWithContext(ctx aws.Context, input *CreateServiceSpecificCredentialInput, opts ...request.Option) (*CreateServiceSpecificCredentialOutput, error) { + req, out := c.CreateServiceSpecificCredentialRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateUser = "CreateUser" + +// CreateUserRequest generates a "aws/request.Request" representing the +// client's request for the CreateUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateUser for more information on using the CreateUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateUserRequest method. +// req, resp := client.CreateUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateUser +func (c *IAM) CreateUserRequest(input *CreateUserInput) (req *request.Request, output *CreateUserOutput) { + op := &request.Operation{ + Name: opCreateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserInput{} + } + + output = &CreateUserOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateUser API operation for AWS Identity and Access Management. +// +// Creates a new IAM user for your AWS account. +// +// For information about limitations on the number of IAM users you can create, +// see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateUser +func (c *IAM) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) + return out, req.Send() +} + +// CreateUserWithContext is the same as CreateUser with the addition of +// the ability to pass a context and additional request options. +// +// See CreateUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateUserWithContext(ctx aws.Context, input *CreateUserInput, opts ...request.Option) (*CreateUserOutput, error) { + req, out := c.CreateUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateVirtualMFADevice = "CreateVirtualMFADevice" + +// CreateVirtualMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the CreateVirtualMFADevice operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See CreateVirtualMFADevice for more information on using the CreateVirtualMFADevice +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the CreateVirtualMFADeviceRequest method. +// req, resp := client.CreateVirtualMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateVirtualMFADevice +func (c *IAM) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) (req *request.Request, output *CreateVirtualMFADeviceOutput) { + op := &request.Operation{ + Name: opCreateVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVirtualMFADeviceInput{} + } + + output = &CreateVirtualMFADeviceOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateVirtualMFADevice API operation for AWS Identity and Access Management. +// +// Creates a new virtual MFA device for the AWS account. After creating the +// virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +// +// For information about limits on the number of MFA devices you can create, +// see Limitations on Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// The seed information contained in the QR code and the Base32 string should +// be treated like any other secret access information. In other words, protect +// the seed information as you would your AWS access keys or your passwords. +// After you provision your virtual device, you should ensure that the information +// is destroyed following secure procedures. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation CreateVirtualMFADevice for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateVirtualMFADevice +func (c *IAM) CreateVirtualMFADevice(input *CreateVirtualMFADeviceInput) (*CreateVirtualMFADeviceOutput, error) { + req, out := c.CreateVirtualMFADeviceRequest(input) + return out, req.Send() +} + +// CreateVirtualMFADeviceWithContext is the same as CreateVirtualMFADevice with the addition of +// the ability to pass a context and additional request options. +// +// See CreateVirtualMFADevice for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) CreateVirtualMFADeviceWithContext(ctx aws.Context, input *CreateVirtualMFADeviceInput, opts ...request.Option) (*CreateVirtualMFADeviceOutput, error) { + req, out := c.CreateVirtualMFADeviceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeactivateMFADevice = "DeactivateMFADevice" + +// DeactivateMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the DeactivateMFADevice operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeactivateMFADevice for more information on using the DeactivateMFADevice +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeactivateMFADeviceRequest method. +// req, resp := client.DeactivateMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeactivateMFADevice +func (c *IAM) DeactivateMFADeviceRequest(input *DeactivateMFADeviceInput) (req *request.Request, output *DeactivateMFADeviceOutput) { + op := &request.Operation{ + Name: opDeactivateMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeactivateMFADeviceInput{} + } + + output = &DeactivateMFADeviceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeactivateMFADevice API operation for AWS Identity and Access Management. +// +// Deactivates the specified MFA device and removes it from association with +// the user name for which it was originally enabled. +// +// For more information about creating and working with virtual MFA devices, +// go to Enabling a Virtual Multi-factor Authentication (MFA) Device (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeactivateMFADevice for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityTemporarilyUnmodifiableException "EntityTemporarilyUnmodifiable" +// The request was rejected because it referenced an entity that is temporarily +// unmodifiable, such as a user name that was deleted and then recreated. The +// error indicates that the request is likely to succeed if you try again after +// waiting several minutes. The error message describes the entity. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeactivateMFADevice +func (c *IAM) DeactivateMFADevice(input *DeactivateMFADeviceInput) (*DeactivateMFADeviceOutput, error) { + req, out := c.DeactivateMFADeviceRequest(input) + return out, req.Send() +} + +// DeactivateMFADeviceWithContext is the same as DeactivateMFADevice with the addition of +// the ability to pass a context and additional request options. +// +// See DeactivateMFADevice for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeactivateMFADeviceWithContext(ctx aws.Context, input *DeactivateMFADeviceInput, opts ...request.Option) (*DeactivateMFADeviceOutput, error) { + req, out := c.DeactivateMFADeviceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAccessKey = "DeleteAccessKey" + +// DeleteAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccessKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAccessKey for more information on using the DeleteAccessKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAccessKeyRequest method. +// req, resp := client.DeleteAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccessKey +func (c *IAM) DeleteAccessKeyRequest(input *DeleteAccessKeyInput) (req *request.Request, output *DeleteAccessKeyOutput) { + op := &request.Operation{ + Name: opDeleteAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccessKeyInput{} + } + + output = &DeleteAccessKeyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAccessKey API operation for AWS Identity and Access Management. +// +// Deletes the access key pair associated with the specified IAM user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteAccessKey for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccessKey +func (c *IAM) DeleteAccessKey(input *DeleteAccessKeyInput) (*DeleteAccessKeyOutput, error) { + req, out := c.DeleteAccessKeyRequest(input) + return out, req.Send() +} + +// DeleteAccessKeyWithContext is the same as DeleteAccessKey with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAccessKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteAccessKeyWithContext(ctx aws.Context, input *DeleteAccessKeyInput, opts ...request.Option) (*DeleteAccessKeyOutput, error) { + req, out := c.DeleteAccessKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAccountAlias = "DeleteAccountAlias" + +// DeleteAccountAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountAlias operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAccountAlias for more information on using the DeleteAccountAlias +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAccountAliasRequest method. +// req, resp := client.DeleteAccountAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountAlias +func (c *IAM) DeleteAccountAliasRequest(input *DeleteAccountAliasInput) (req *request.Request, output *DeleteAccountAliasOutput) { + op := &request.Operation{ + Name: opDeleteAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountAliasInput{} + } + + output = &DeleteAccountAliasOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAccountAlias API operation for AWS Identity and Access Management. +// +// Deletes the specified AWS account alias. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (https://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteAccountAlias for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountAlias +func (c *IAM) DeleteAccountAlias(input *DeleteAccountAliasInput) (*DeleteAccountAliasOutput, error) { + req, out := c.DeleteAccountAliasRequest(input) + return out, req.Send() +} + +// DeleteAccountAliasWithContext is the same as DeleteAccountAlias with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAccountAlias for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteAccountAliasWithContext(ctx aws.Context, input *DeleteAccountAliasInput, opts ...request.Option) (*DeleteAccountAliasOutput, error) { + req, out := c.DeleteAccountAliasRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteAccountPasswordPolicy = "DeleteAccountPasswordPolicy" + +// DeleteAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountPasswordPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteAccountPasswordPolicy for more information on using the DeleteAccountPasswordPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteAccountPasswordPolicyRequest method. +// req, resp := client.DeleteAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountPasswordPolicy +func (c *IAM) DeleteAccountPasswordPolicyRequest(input *DeleteAccountPasswordPolicyInput) (req *request.Request, output *DeleteAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opDeleteAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountPasswordPolicyInput{} + } + + output = &DeleteAccountPasswordPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteAccountPasswordPolicy API operation for AWS Identity and Access Management. +// +// Deletes the password policy for the AWS account. There are no parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteAccountPasswordPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountPasswordPolicy +func (c *IAM) DeleteAccountPasswordPolicy(input *DeleteAccountPasswordPolicyInput) (*DeleteAccountPasswordPolicyOutput, error) { + req, out := c.DeleteAccountPasswordPolicyRequest(input) + return out, req.Send() +} + +// DeleteAccountPasswordPolicyWithContext is the same as DeleteAccountPasswordPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteAccountPasswordPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteAccountPasswordPolicyWithContext(ctx aws.Context, input *DeleteAccountPasswordPolicyInput, opts ...request.Option) (*DeleteAccountPasswordPolicyOutput, error) { + req, out := c.DeleteAccountPasswordPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteGroup = "DeleteGroup" + +// DeleteGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteGroup for more information on using the DeleteGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteGroupRequest method. +// req, resp := client.DeleteGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroup +func (c *IAM) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, output *DeleteGroupOutput) { + op := &request.Operation{ + Name: opDeleteGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupInput{} + } + + output = &DeleteGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteGroup API operation for AWS Identity and Access Management. +// +// Deletes the specified IAM group. The group must not contain any users or +// have any attached policies. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeDeleteConflictException "DeleteConflict" +// The request was rejected because it attempted to delete a resource that has +// attached subordinate entities. The error message describes these entities. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroup +func (c *IAM) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { + req, out := c.DeleteGroupRequest(input) + return out, req.Send() +} + +// DeleteGroupWithContext is the same as DeleteGroup with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteGroupWithContext(ctx aws.Context, input *DeleteGroupInput, opts ...request.Option) (*DeleteGroupOutput, error) { + req, out := c.DeleteGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteGroupPolicy = "DeleteGroupPolicy" + +// DeleteGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGroupPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteGroupPolicy for more information on using the DeleteGroupPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteGroupPolicyRequest method. +// req, resp := client.DeleteGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroupPolicy +func (c *IAM) DeleteGroupPolicyRequest(input *DeleteGroupPolicyInput) (req *request.Request, output *DeleteGroupPolicyOutput) { + op := &request.Operation{ + Name: opDeleteGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupPolicyInput{} + } + + output = &DeleteGroupPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteGroupPolicy API operation for AWS Identity and Access Management. +// +// Deletes the specified inline policy that is embedded in the specified IAM +// group. +// +// A group can also have managed policies attached to it. To detach a managed +// policy from a group, use DetachGroupPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteGroupPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroupPolicy +func (c *IAM) DeleteGroupPolicy(input *DeleteGroupPolicyInput) (*DeleteGroupPolicyOutput, error) { + req, out := c.DeleteGroupPolicyRequest(input) + return out, req.Send() +} + +// DeleteGroupPolicyWithContext is the same as DeleteGroupPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteGroupPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteGroupPolicyWithContext(ctx aws.Context, input *DeleteGroupPolicyInput, opts ...request.Option) (*DeleteGroupPolicyOutput, error) { + req, out := c.DeleteGroupPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteInstanceProfile = "DeleteInstanceProfile" + +// DeleteInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteInstanceProfile for more information on using the DeleteInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteInstanceProfileRequest method. +// req, resp := client.DeleteInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteInstanceProfile +func (c *IAM) DeleteInstanceProfileRequest(input *DeleteInstanceProfileInput) (req *request.Request, output *DeleteInstanceProfileOutput) { + op := &request.Operation{ + Name: opDeleteInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceProfileInput{} + } + + output = &DeleteInstanceProfileOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteInstanceProfile API operation for AWS Identity and Access Management. +// +// Deletes the specified instance profile. The instance profile must not have +// an associated role. +// +// Make sure that you do not have any Amazon EC2 instances running with the +// instance profile you are about to delete. Deleting a role or instance profile +// that is associated with a running instance will break any applications running +// on the instance. +// +// For more information about instance profiles, go to About Instance Profiles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteInstanceProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeDeleteConflictException "DeleteConflict" +// The request was rejected because it attempted to delete a resource that has +// attached subordinate entities. The error message describes these entities. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteInstanceProfile +func (c *IAM) DeleteInstanceProfile(input *DeleteInstanceProfileInput) (*DeleteInstanceProfileOutput, error) { + req, out := c.DeleteInstanceProfileRequest(input) + return out, req.Send() +} + +// DeleteInstanceProfileWithContext is the same as DeleteInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteInstanceProfileWithContext(ctx aws.Context, input *DeleteInstanceProfileInput, opts ...request.Option) (*DeleteInstanceProfileOutput, error) { + req, out := c.DeleteInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteLoginProfile = "DeleteLoginProfile" + +// DeleteLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoginProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteLoginProfile for more information on using the DeleteLoginProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteLoginProfileRequest method. +// req, resp := client.DeleteLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteLoginProfile +func (c *IAM) DeleteLoginProfileRequest(input *DeleteLoginProfileInput) (req *request.Request, output *DeleteLoginProfileOutput) { + op := &request.Operation{ + Name: opDeleteLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoginProfileInput{} + } + + output = &DeleteLoginProfileOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteLoginProfile API operation for AWS Identity and Access Management. +// +// Deletes the password for the specified IAM user, which terminates the user's +// ability to access AWS services through the AWS Management Console. +// +// Deleting a user's password does not prevent a user from accessing AWS through +// the command line interface or the API. To prevent all user access, you must +// also either make any access keys inactive or delete them. For more information +// about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteLoginProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityTemporarilyUnmodifiableException "EntityTemporarilyUnmodifiable" +// The request was rejected because it referenced an entity that is temporarily +// unmodifiable, such as a user name that was deleted and then recreated. The +// error indicates that the request is likely to succeed if you try again after +// waiting several minutes. The error message describes the entity. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteLoginProfile +func (c *IAM) DeleteLoginProfile(input *DeleteLoginProfileInput) (*DeleteLoginProfileOutput, error) { + req, out := c.DeleteLoginProfileRequest(input) + return out, req.Send() +} + +// DeleteLoginProfileWithContext is the same as DeleteLoginProfile with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteLoginProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteLoginProfileWithContext(ctx aws.Context, input *DeleteLoginProfileInput, opts ...request.Option) (*DeleteLoginProfileOutput, error) { + req, out := c.DeleteLoginProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteOpenIDConnectProvider = "DeleteOpenIDConnectProvider" + +// DeleteOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOpenIDConnectProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteOpenIDConnectProvider for more information on using the DeleteOpenIDConnectProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteOpenIDConnectProviderRequest method. +// req, resp := client.DeleteOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteOpenIDConnectProvider +func (c *IAM) DeleteOpenIDConnectProviderRequest(input *DeleteOpenIDConnectProviderInput) (req *request.Request, output *DeleteOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opDeleteOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOpenIDConnectProviderInput{} + } + + output = &DeleteOpenIDConnectProviderOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteOpenIDConnectProvider API operation for AWS Identity and Access Management. +// +// Deletes an OpenID Connect identity provider (IdP) resource object in IAM. +// +// Deleting an IAM OIDC provider resource does not update any roles that reference +// the provider as a principal in their trust policies. Any attempt to assume +// a role that references a deleted provider fails. +// +// This operation is idempotent; it does not fail or return an error if you +// call the operation for a provider that does not exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteOpenIDConnectProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteOpenIDConnectProvider +func (c *IAM) DeleteOpenIDConnectProvider(input *DeleteOpenIDConnectProviderInput) (*DeleteOpenIDConnectProviderOutput, error) { + req, out := c.DeleteOpenIDConnectProviderRequest(input) + return out, req.Send() +} + +// DeleteOpenIDConnectProviderWithContext is the same as DeleteOpenIDConnectProvider with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteOpenIDConnectProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteOpenIDConnectProviderWithContext(ctx aws.Context, input *DeleteOpenIDConnectProviderInput, opts ...request.Option) (*DeleteOpenIDConnectProviderOutput, error) { + req, out := c.DeleteOpenIDConnectProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePolicy for more information on using the DeletePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePolicyRequest method. +// req, resp := client.DeletePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicy +func (c *IAM) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { + op := &request.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + output = &DeletePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePolicy API operation for AWS Identity and Access Management. +// +// Deletes the specified managed policy. +// +// Before you can delete a managed policy, you must first detach the policy +// from all users, groups, and roles that it is attached to. In addition, you +// must delete all the policy's versions. The following steps describe the process +// for deleting a managed policy: +// +// * Detach the policy from all users, groups, and roles that the policy +// is attached to, using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy +// API operations. To list all the users, groups, and roles that a policy +// is attached to, use ListEntitiesForPolicy. +// +// * Delete all versions of the policy using DeletePolicyVersion. To list +// the policy's versions, use ListPolicyVersions. You cannot use DeletePolicyVersion +// to delete the version that is marked as the default version. You delete +// the policy's default version in the next step of the process. +// +// * Delete the policy (this automatically deletes the policy's default version) +// using this API. +// +// For information about managed policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeletePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeDeleteConflictException "DeleteConflict" +// The request was rejected because it attempted to delete a resource that has +// attached subordinate entities. The error message describes these entities. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicy +func (c *IAM) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + return out, req.Send() +} + +// DeletePolicyWithContext is the same as DeletePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeletePolicyWithContext(ctx aws.Context, input *DeletePolicyInput, opts ...request.Option) (*DeletePolicyOutput, error) { + req, out := c.DeletePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePolicyVersion = "DeletePolicyVersion" + +// DeletePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicyVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePolicyVersion for more information on using the DeletePolicyVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePolicyVersionRequest method. +// req, resp := client.DeletePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicyVersion +func (c *IAM) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { + op := &request.Operation{ + Name: opDeletePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyVersionInput{} + } + + output = &DeletePolicyVersionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePolicyVersion API operation for AWS Identity and Access Management. +// +// Deletes the specified version from the specified managed policy. +// +// You cannot delete the default version from a policy using this API. To delete +// the default version from a policy, use DeletePolicy. To find out which version +// of a policy is marked as the default version, use ListPolicyVersions. +// +// For information about versions for managed policies, see Versioning for Managed +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeletePolicyVersion for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeDeleteConflictException "DeleteConflict" +// The request was rejected because it attempted to delete a resource that has +// attached subordinate entities. The error message describes these entities. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicyVersion +func (c *IAM) DeletePolicyVersion(input *DeletePolicyVersionInput) (*DeletePolicyVersionOutput, error) { + req, out := c.DeletePolicyVersionRequest(input) + return out, req.Send() +} + +// DeletePolicyVersionWithContext is the same as DeletePolicyVersion with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePolicyVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeletePolicyVersionWithContext(ctx aws.Context, input *DeletePolicyVersionInput, opts ...request.Option) (*DeletePolicyVersionOutput, error) { + req, out := c.DeletePolicyVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRole = "DeleteRole" + +// DeleteRoleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRole for more information on using the DeleteRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRoleRequest method. +// req, resp := client.DeleteRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRole +func (c *IAM) DeleteRoleRequest(input *DeleteRoleInput) (req *request.Request, output *DeleteRoleOutput) { + op := &request.Operation{ + Name: opDeleteRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRoleInput{} + } + + output = &DeleteRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRole API operation for AWS Identity and Access Management. +// +// Deletes the specified role. The role must not have any policies attached. +// For more information about roles, go to Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Make sure that you do not have any Amazon EC2 instances running with the +// role you are about to delete. Deleting a role or instance profile that is +// associated with a running instance will break any applications running on +// the instance. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeDeleteConflictException "DeleteConflict" +// The request was rejected because it attempted to delete a resource that has +// attached subordinate entities. The error message describes these entities. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRole +func (c *IAM) DeleteRole(input *DeleteRoleInput) (*DeleteRoleOutput, error) { + req, out := c.DeleteRoleRequest(input) + return out, req.Send() +} + +// DeleteRoleWithContext is the same as DeleteRole with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteRoleWithContext(ctx aws.Context, input *DeleteRoleInput, opts ...request.Option) (*DeleteRoleOutput, error) { + req, out := c.DeleteRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRolePermissionsBoundary = "DeleteRolePermissionsBoundary" + +// DeleteRolePermissionsBoundaryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRolePermissionsBoundary operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRolePermissionsBoundary for more information on using the DeleteRolePermissionsBoundary +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRolePermissionsBoundaryRequest method. +// req, resp := client.DeleteRolePermissionsBoundaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePermissionsBoundary +func (c *IAM) DeleteRolePermissionsBoundaryRequest(input *DeleteRolePermissionsBoundaryInput) (req *request.Request, output *DeleteRolePermissionsBoundaryOutput) { + op := &request.Operation{ + Name: opDeleteRolePermissionsBoundary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRolePermissionsBoundaryInput{} + } + + output = &DeleteRolePermissionsBoundaryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRolePermissionsBoundary API operation for AWS Identity and Access Management. +// +// Deletes the permissions boundary for the specified IAM role. +// +// Deleting the permissions boundary for a role might increase its permissions. +// For example, it might allow anyone who assumes the role to perform all the +// actions granted in its permissions policies. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteRolePermissionsBoundary for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePermissionsBoundary +func (c *IAM) DeleteRolePermissionsBoundary(input *DeleteRolePermissionsBoundaryInput) (*DeleteRolePermissionsBoundaryOutput, error) { + req, out := c.DeleteRolePermissionsBoundaryRequest(input) + return out, req.Send() +} + +// DeleteRolePermissionsBoundaryWithContext is the same as DeleteRolePermissionsBoundary with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRolePermissionsBoundary for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteRolePermissionsBoundaryWithContext(ctx aws.Context, input *DeleteRolePermissionsBoundaryInput, opts ...request.Option) (*DeleteRolePermissionsBoundaryOutput, error) { + req, out := c.DeleteRolePermissionsBoundaryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteRolePolicy = "DeleteRolePolicy" + +// DeleteRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRolePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteRolePolicy for more information on using the DeleteRolePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteRolePolicyRequest method. +// req, resp := client.DeleteRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePolicy +func (c *IAM) DeleteRolePolicyRequest(input *DeleteRolePolicyInput) (req *request.Request, output *DeleteRolePolicyOutput) { + op := &request.Operation{ + Name: opDeleteRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRolePolicyInput{} + } + + output = &DeleteRolePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteRolePolicy API operation for AWS Identity and Access Management. +// +// Deletes the specified inline policy that is embedded in the specified IAM +// role. +// +// A role can also have managed policies attached to it. To detach a managed +// policy from a role, use DetachRolePolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteRolePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePolicy +func (c *IAM) DeleteRolePolicy(input *DeleteRolePolicyInput) (*DeleteRolePolicyOutput, error) { + req, out := c.DeleteRolePolicyRequest(input) + return out, req.Send() +} + +// DeleteRolePolicyWithContext is the same as DeleteRolePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteRolePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteRolePolicyWithContext(ctx aws.Context, input *DeleteRolePolicyInput, opts ...request.Option) (*DeleteRolePolicyOutput, error) { + req, out := c.DeleteRolePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSAMLProvider = "DeleteSAMLProvider" + +// DeleteSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSAMLProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSAMLProvider for more information on using the DeleteSAMLProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSAMLProviderRequest method. +// req, resp := client.DeleteSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSAMLProvider +func (c *IAM) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) (req *request.Request, output *DeleteSAMLProviderOutput) { + op := &request.Operation{ + Name: opDeleteSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSAMLProviderInput{} + } + + output = &DeleteSAMLProviderOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSAMLProvider API operation for AWS Identity and Access Management. +// +// Deletes a SAML provider resource in IAM. +// +// Deleting the provider resource from IAM does not update any roles that reference +// the SAML provider resource's ARN as a principal in their trust policies. +// Any attempt to assume a role that references a non-existent provider resource +// ARN fails. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteSAMLProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSAMLProvider +func (c *IAM) DeleteSAMLProvider(input *DeleteSAMLProviderInput) (*DeleteSAMLProviderOutput, error) { + req, out := c.DeleteSAMLProviderRequest(input) + return out, req.Send() +} + +// DeleteSAMLProviderWithContext is the same as DeleteSAMLProvider with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSAMLProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteSAMLProviderWithContext(ctx aws.Context, input *DeleteSAMLProviderInput, opts ...request.Option) (*DeleteSAMLProviderOutput, error) { + req, out := c.DeleteSAMLProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSSHPublicKey = "DeleteSSHPublicKey" + +// DeleteSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSSHPublicKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSSHPublicKey for more information on using the DeleteSSHPublicKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSSHPublicKeyRequest method. +// req, resp := client.DeleteSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSSHPublicKey +func (c *IAM) DeleteSSHPublicKeyRequest(input *DeleteSSHPublicKeyInput) (req *request.Request, output *DeleteSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opDeleteSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSSHPublicKeyInput{} + } + + output = &DeleteSSHPublicKeyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSSHPublicKey API operation for AWS Identity and Access Management. +// +// Deletes the specified SSH public key. +// +// The SSH public key deleted by this operation is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteSSHPublicKey for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSSHPublicKey +func (c *IAM) DeleteSSHPublicKey(input *DeleteSSHPublicKeyInput) (*DeleteSSHPublicKeyOutput, error) { + req, out := c.DeleteSSHPublicKeyRequest(input) + return out, req.Send() +} + +// DeleteSSHPublicKeyWithContext is the same as DeleteSSHPublicKey with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSSHPublicKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteSSHPublicKeyWithContext(ctx aws.Context, input *DeleteSSHPublicKeyInput, opts ...request.Option) (*DeleteSSHPublicKeyOutput, error) { + req, out := c.DeleteSSHPublicKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteServerCertificate = "DeleteServerCertificate" + +// DeleteServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteServerCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteServerCertificate for more information on using the DeleteServerCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteServerCertificateRequest method. +// req, resp := client.DeleteServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServerCertificate +func (c *IAM) DeleteServerCertificateRequest(input *DeleteServerCertificateInput) (req *request.Request, output *DeleteServerCertificateOutput) { + op := &request.Operation{ + Name: opDeleteServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServerCertificateInput{} + } + + output = &DeleteServerCertificateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteServerCertificate API operation for AWS Identity and Access Management. +// +// Deletes the specified server certificate. +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic also includes a list of AWS services that +// can use the server certificates that you manage with IAM. +// +// If you are using a server certificate with Elastic Load Balancing, deleting +// the certificate could have implications for your application. If Elastic +// Load Balancing doesn't detect the deletion of bound certificates, it may +// continue to use the certificates. This could cause Elastic Load Balancing +// to stop accepting traffic. We recommend that you remove the reference to +// the certificate from Elastic Load Balancing before using this command to +// delete the certificate. For more information, go to DeleteLoadBalancerListeners +// (https://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_DeleteLoadBalancerListeners.html) +// in the Elastic Load Balancing API Reference. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteServerCertificate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeDeleteConflictException "DeleteConflict" +// The request was rejected because it attempted to delete a resource that has +// attached subordinate entities. The error message describes these entities. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServerCertificate +func (c *IAM) DeleteServerCertificate(input *DeleteServerCertificateInput) (*DeleteServerCertificateOutput, error) { + req, out := c.DeleteServerCertificateRequest(input) + return out, req.Send() +} + +// DeleteServerCertificateWithContext is the same as DeleteServerCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteServerCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteServerCertificateWithContext(ctx aws.Context, input *DeleteServerCertificateInput, opts ...request.Option) (*DeleteServerCertificateOutput, error) { + req, out := c.DeleteServerCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteServiceLinkedRole = "DeleteServiceLinkedRole" + +// DeleteServiceLinkedRoleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteServiceLinkedRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteServiceLinkedRole for more information on using the DeleteServiceLinkedRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteServiceLinkedRoleRequest method. +// req, resp := client.DeleteServiceLinkedRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRole +func (c *IAM) DeleteServiceLinkedRoleRequest(input *DeleteServiceLinkedRoleInput) (req *request.Request, output *DeleteServiceLinkedRoleOutput) { + op := &request.Operation{ + Name: opDeleteServiceLinkedRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceLinkedRoleInput{} + } + + output = &DeleteServiceLinkedRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteServiceLinkedRole API operation for AWS Identity and Access Management. +// +// Submits a service-linked role deletion request and returns a DeletionTaskId, +// which you can use to check the status of the deletion. Before you call this +// operation, confirm that the role has no active sessions and that any resources +// used by the role in the linked service are deleted. If you call this operation +// more than once for the same service-linked role and an earlier deletion task +// is not complete, then the DeletionTaskId of the earlier request is returned. +// +// If you submit a deletion request for a service-linked role whose linked service +// is still accessing a resource, then the deletion task fails. If it fails, +// the GetServiceLinkedRoleDeletionStatus API operation returns the reason for +// the failure, usually including the resources that must be deleted. To delete +// the service-linked role, you must first remove those resources from the linked +// service and then submit the deletion request again. Resources are specific +// to the service that is linked to the role. For more information about removing +// resources from a service, see the AWS documentation (http://docs.aws.amazon.com/) +// for your service. +// +// For more information about service-linked roles, see Roles Terms and Concepts: +// AWS Service-Linked Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteServiceLinkedRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRole +func (c *IAM) DeleteServiceLinkedRole(input *DeleteServiceLinkedRoleInput) (*DeleteServiceLinkedRoleOutput, error) { + req, out := c.DeleteServiceLinkedRoleRequest(input) + return out, req.Send() +} + +// DeleteServiceLinkedRoleWithContext is the same as DeleteServiceLinkedRole with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteServiceLinkedRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteServiceLinkedRoleWithContext(ctx aws.Context, input *DeleteServiceLinkedRoleInput, opts ...request.Option) (*DeleteServiceLinkedRoleOutput, error) { + req, out := c.DeleteServiceLinkedRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteServiceSpecificCredential = "DeleteServiceSpecificCredential" + +// DeleteServiceSpecificCredentialRequest generates a "aws/request.Request" representing the +// client's request for the DeleteServiceSpecificCredential operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteServiceSpecificCredential for more information on using the DeleteServiceSpecificCredential +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteServiceSpecificCredentialRequest method. +// req, resp := client.DeleteServiceSpecificCredentialRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceSpecificCredential +func (c *IAM) DeleteServiceSpecificCredentialRequest(input *DeleteServiceSpecificCredentialInput) (req *request.Request, output *DeleteServiceSpecificCredentialOutput) { + op := &request.Operation{ + Name: opDeleteServiceSpecificCredential, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceSpecificCredentialInput{} + } + + output = &DeleteServiceSpecificCredentialOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteServiceSpecificCredential API operation for AWS Identity and Access Management. +// +// Deletes the specified service-specific credential. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteServiceSpecificCredential for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceSpecificCredential +func (c *IAM) DeleteServiceSpecificCredential(input *DeleteServiceSpecificCredentialInput) (*DeleteServiceSpecificCredentialOutput, error) { + req, out := c.DeleteServiceSpecificCredentialRequest(input) + return out, req.Send() +} + +// DeleteServiceSpecificCredentialWithContext is the same as DeleteServiceSpecificCredential with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteServiceSpecificCredential for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteServiceSpecificCredentialWithContext(ctx aws.Context, input *DeleteServiceSpecificCredentialInput, opts ...request.Option) (*DeleteServiceSpecificCredentialOutput, error) { + req, out := c.DeleteServiceSpecificCredentialRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteSigningCertificate = "DeleteSigningCertificate" + +// DeleteSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSigningCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteSigningCertificate for more information on using the DeleteSigningCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteSigningCertificateRequest method. +// req, resp := client.DeleteSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSigningCertificate +func (c *IAM) DeleteSigningCertificateRequest(input *DeleteSigningCertificateInput) (req *request.Request, output *DeleteSigningCertificateOutput) { + op := &request.Operation{ + Name: opDeleteSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSigningCertificateInput{} + } + + output = &DeleteSigningCertificateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteSigningCertificate API operation for AWS Identity and Access Management. +// +// Deletes a signing certificate associated with the specified IAM user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated IAM users. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteSigningCertificate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSigningCertificate +func (c *IAM) DeleteSigningCertificate(input *DeleteSigningCertificateInput) (*DeleteSigningCertificateOutput, error) { + req, out := c.DeleteSigningCertificateRequest(input) + return out, req.Send() +} + +// DeleteSigningCertificateWithContext is the same as DeleteSigningCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteSigningCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteSigningCertificateWithContext(ctx aws.Context, input *DeleteSigningCertificateInput, opts ...request.Option) (*DeleteSigningCertificateOutput, error) { + req, out := c.DeleteSigningCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteUser = "DeleteUser" + +// DeleteUserRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteUser for more information on using the DeleteUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteUserRequest method. +// req, resp := client.DeleteUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUser +func (c *IAM) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { + op := &request.Operation{ + Name: opDeleteUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserInput{} + } + + output = &DeleteUserOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteUser API operation for AWS Identity and Access Management. +// +// Deletes the specified IAM user. Unlike the AWS Management Console, when you +// delete a user programmatically, you must delete the items attached to the +// user manually, or the deletion fails. For more information, see Deleting +// an IAM User (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_deleting_cli). +// Before attempting to delete a user, remove the following items: +// +// * Password (DeleteLoginProfile) +// +// * Access keys (DeleteAccessKey) +// +// * Signing certificate (DeleteSigningCertificate) +// +// * SSH public key (DeleteSSHPublicKey) +// +// * Git credentials (DeleteServiceSpecificCredential) +// +// * Multi-factor authentication (MFA) device (DeactivateMFADevice, DeleteVirtualMFADevice) +// +// * Inline policies (DeleteUserPolicy) +// +// * Attached managed policies (DetachUserPolicy) +// +// * Group memberships (RemoveUserFromGroup) +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeDeleteConflictException "DeleteConflict" +// The request was rejected because it attempted to delete a resource that has +// attached subordinate entities. The error message describes these entities. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUser +func (c *IAM) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) + return out, req.Send() +} + +// DeleteUserWithContext is the same as DeleteUser with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteUserWithContext(ctx aws.Context, input *DeleteUserInput, opts ...request.Option) (*DeleteUserOutput, error) { + req, out := c.DeleteUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteUserPermissionsBoundary = "DeleteUserPermissionsBoundary" + +// DeleteUserPermissionsBoundaryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserPermissionsBoundary operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteUserPermissionsBoundary for more information on using the DeleteUserPermissionsBoundary +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteUserPermissionsBoundaryRequest method. +// req, resp := client.DeleteUserPermissionsBoundaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPermissionsBoundary +func (c *IAM) DeleteUserPermissionsBoundaryRequest(input *DeleteUserPermissionsBoundaryInput) (req *request.Request, output *DeleteUserPermissionsBoundaryOutput) { + op := &request.Operation{ + Name: opDeleteUserPermissionsBoundary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPermissionsBoundaryInput{} + } + + output = &DeleteUserPermissionsBoundaryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteUserPermissionsBoundary API operation for AWS Identity and Access Management. +// +// Deletes the permissions boundary for the specified IAM user. +// +// Deleting the permissions boundary for a user might increase its permissions +// by allowing the user to perform all the actions granted in its permissions +// policies. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteUserPermissionsBoundary for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPermissionsBoundary +func (c *IAM) DeleteUserPermissionsBoundary(input *DeleteUserPermissionsBoundaryInput) (*DeleteUserPermissionsBoundaryOutput, error) { + req, out := c.DeleteUserPermissionsBoundaryRequest(input) + return out, req.Send() +} + +// DeleteUserPermissionsBoundaryWithContext is the same as DeleteUserPermissionsBoundary with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteUserPermissionsBoundary for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteUserPermissionsBoundaryWithContext(ctx aws.Context, input *DeleteUserPermissionsBoundaryInput, opts ...request.Option) (*DeleteUserPermissionsBoundaryOutput, error) { + req, out := c.DeleteUserPermissionsBoundaryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteUserPolicy = "DeleteUserPolicy" + +// DeleteUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteUserPolicy for more information on using the DeleteUserPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteUserPolicyRequest method. +// req, resp := client.DeleteUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPolicy +func (c *IAM) DeleteUserPolicyRequest(input *DeleteUserPolicyInput) (req *request.Request, output *DeleteUserPolicyOutput) { + op := &request.Operation{ + Name: opDeleteUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPolicyInput{} + } + + output = &DeleteUserPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteUserPolicy API operation for AWS Identity and Access Management. +// +// Deletes the specified inline policy that is embedded in the specified IAM +// user. +// +// A user can also have managed policies attached to it. To detach a managed +// policy from a user, use DetachUserPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteUserPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPolicy +func (c *IAM) DeleteUserPolicy(input *DeleteUserPolicyInput) (*DeleteUserPolicyOutput, error) { + req, out := c.DeleteUserPolicyRequest(input) + return out, req.Send() +} + +// DeleteUserPolicyWithContext is the same as DeleteUserPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteUserPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteUserPolicyWithContext(ctx aws.Context, input *DeleteUserPolicyInput, opts ...request.Option) (*DeleteUserPolicyOutput, error) { + req, out := c.DeleteUserPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteVirtualMFADevice = "DeleteVirtualMFADevice" + +// DeleteVirtualMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVirtualMFADevice operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteVirtualMFADevice for more information on using the DeleteVirtualMFADevice +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteVirtualMFADeviceRequest method. +// req, resp := client.DeleteVirtualMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteVirtualMFADevice +func (c *IAM) DeleteVirtualMFADeviceRequest(input *DeleteVirtualMFADeviceInput) (req *request.Request, output *DeleteVirtualMFADeviceOutput) { + op := &request.Operation{ + Name: opDeleteVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVirtualMFADeviceInput{} + } + + output = &DeleteVirtualMFADeviceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteVirtualMFADevice API operation for AWS Identity and Access Management. +// +// Deletes a virtual MFA device. +// +// You must deactivate a user's virtual MFA device before you can delete it. +// For information about deactivating MFA devices, see DeactivateMFADevice. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DeleteVirtualMFADevice for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeDeleteConflictException "DeleteConflict" +// The request was rejected because it attempted to delete a resource that has +// attached subordinate entities. The error message describes these entities. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteVirtualMFADevice +func (c *IAM) DeleteVirtualMFADevice(input *DeleteVirtualMFADeviceInput) (*DeleteVirtualMFADeviceOutput, error) { + req, out := c.DeleteVirtualMFADeviceRequest(input) + return out, req.Send() +} + +// DeleteVirtualMFADeviceWithContext is the same as DeleteVirtualMFADevice with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteVirtualMFADevice for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DeleteVirtualMFADeviceWithContext(ctx aws.Context, input *DeleteVirtualMFADeviceInput, opts ...request.Option) (*DeleteVirtualMFADeviceOutput, error) { + req, out := c.DeleteVirtualMFADeviceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDetachGroupPolicy = "DetachGroupPolicy" + +// DetachGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachGroupPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachGroupPolicy for more information on using the DetachGroupPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachGroupPolicyRequest method. +// req, resp := client.DetachGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachGroupPolicy +func (c *IAM) DetachGroupPolicyRequest(input *DetachGroupPolicyInput) (req *request.Request, output *DetachGroupPolicyOutput) { + op := &request.Operation{ + Name: opDetachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachGroupPolicyInput{} + } + + output = &DetachGroupPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DetachGroupPolicy API operation for AWS Identity and Access Management. +// +// Removes the specified managed policy from the specified IAM group. +// +// A group can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteGroupPolicy API. For information about policies, see +// Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DetachGroupPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachGroupPolicy +func (c *IAM) DetachGroupPolicy(input *DetachGroupPolicyInput) (*DetachGroupPolicyOutput, error) { + req, out := c.DetachGroupPolicyRequest(input) + return out, req.Send() +} + +// DetachGroupPolicyWithContext is the same as DetachGroupPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DetachGroupPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DetachGroupPolicyWithContext(ctx aws.Context, input *DetachGroupPolicyInput, opts ...request.Option) (*DetachGroupPolicyOutput, error) { + req, out := c.DetachGroupPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDetachRolePolicy = "DetachRolePolicy" + +// DetachRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachRolePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachRolePolicy for more information on using the DetachRolePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachRolePolicyRequest method. +// req, resp := client.DetachRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachRolePolicy +func (c *IAM) DetachRolePolicyRequest(input *DetachRolePolicyInput) (req *request.Request, output *DetachRolePolicyOutput) { + op := &request.Operation{ + Name: opDetachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachRolePolicyInput{} + } + + output = &DetachRolePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DetachRolePolicy API operation for AWS Identity and Access Management. +// +// Removes the specified managed policy from the specified role. +// +// A role can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteRolePolicy API. For information about policies, see +// Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DetachRolePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachRolePolicy +func (c *IAM) DetachRolePolicy(input *DetachRolePolicyInput) (*DetachRolePolicyOutput, error) { + req, out := c.DetachRolePolicyRequest(input) + return out, req.Send() +} + +// DetachRolePolicyWithContext is the same as DetachRolePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DetachRolePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DetachRolePolicyWithContext(ctx aws.Context, input *DetachRolePolicyInput, opts ...request.Option) (*DetachRolePolicyOutput, error) { + req, out := c.DetachRolePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDetachUserPolicy = "DetachUserPolicy" + +// DetachUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachUserPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DetachUserPolicy for more information on using the DetachUserPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DetachUserPolicyRequest method. +// req, resp := client.DetachUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachUserPolicy +func (c *IAM) DetachUserPolicyRequest(input *DetachUserPolicyInput) (req *request.Request, output *DetachUserPolicyOutput) { + op := &request.Operation{ + Name: opDetachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachUserPolicyInput{} + } + + output = &DetachUserPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DetachUserPolicy API operation for AWS Identity and Access Management. +// +// Removes the specified managed policy from the specified user. +// +// A user can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteUserPolicy API. For information about policies, see +// Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation DetachUserPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachUserPolicy +func (c *IAM) DetachUserPolicy(input *DetachUserPolicyInput) (*DetachUserPolicyOutput, error) { + req, out := c.DetachUserPolicyRequest(input) + return out, req.Send() +} + +// DetachUserPolicyWithContext is the same as DetachUserPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DetachUserPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) DetachUserPolicyWithContext(ctx aws.Context, input *DetachUserPolicyInput, opts ...request.Option) (*DetachUserPolicyOutput, error) { + req, out := c.DetachUserPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opEnableMFADevice = "EnableMFADevice" + +// EnableMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the EnableMFADevice operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See EnableMFADevice for more information on using the EnableMFADevice +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the EnableMFADeviceRequest method. +// req, resp := client.EnableMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/EnableMFADevice +func (c *IAM) EnableMFADeviceRequest(input *EnableMFADeviceInput) (req *request.Request, output *EnableMFADeviceOutput) { + op := &request.Operation{ + Name: opEnableMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableMFADeviceInput{} + } + + output = &EnableMFADeviceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// EnableMFADevice API operation for AWS Identity and Access Management. +// +// Enables the specified MFA device and associates it with the specified IAM +// user. When enabled, the MFA device is required for every subsequent login +// by the IAM user associated with the device. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation EnableMFADevice for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeEntityTemporarilyUnmodifiableException "EntityTemporarilyUnmodifiable" +// The request was rejected because it referenced an entity that is temporarily +// unmodifiable, such as a user name that was deleted and then recreated. The +// error indicates that the request is likely to succeed if you try again after +// waiting several minutes. The error message describes the entity. +// +// * ErrCodeInvalidAuthenticationCodeException "InvalidAuthenticationCode" +// The request was rejected because the authentication code was not recognized. +// The error message describes the specific error. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/EnableMFADevice +func (c *IAM) EnableMFADevice(input *EnableMFADeviceInput) (*EnableMFADeviceOutput, error) { + req, out := c.EnableMFADeviceRequest(input) + return out, req.Send() +} + +// EnableMFADeviceWithContext is the same as EnableMFADevice with the addition of +// the ability to pass a context and additional request options. +// +// See EnableMFADevice for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) EnableMFADeviceWithContext(ctx aws.Context, input *EnableMFADeviceInput, opts ...request.Option) (*EnableMFADeviceOutput, error) { + req, out := c.EnableMFADeviceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGenerateCredentialReport = "GenerateCredentialReport" + +// GenerateCredentialReportRequest generates a "aws/request.Request" representing the +// client's request for the GenerateCredentialReport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GenerateCredentialReport for more information on using the GenerateCredentialReport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GenerateCredentialReportRequest method. +// req, resp := client.GenerateCredentialReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateCredentialReport +func (c *IAM) GenerateCredentialReportRequest(input *GenerateCredentialReportInput) (req *request.Request, output *GenerateCredentialReportOutput) { + op := &request.Operation{ + Name: opGenerateCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateCredentialReportInput{} + } + + output = &GenerateCredentialReportOutput{} + req = c.newRequest(op, input, output) + return +} + +// GenerateCredentialReport API operation for AWS Identity and Access Management. +// +// Generates a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GenerateCredentialReport for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateCredentialReport +func (c *IAM) GenerateCredentialReport(input *GenerateCredentialReportInput) (*GenerateCredentialReportOutput, error) { + req, out := c.GenerateCredentialReportRequest(input) + return out, req.Send() +} + +// GenerateCredentialReportWithContext is the same as GenerateCredentialReport with the addition of +// the ability to pass a context and additional request options. +// +// See GenerateCredentialReport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GenerateCredentialReportWithContext(ctx aws.Context, input *GenerateCredentialReportInput, opts ...request.Option) (*GenerateCredentialReportOutput, error) { + req, out := c.GenerateCredentialReportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGenerateOrganizationsAccessReport = "GenerateOrganizationsAccessReport" + +// GenerateOrganizationsAccessReportRequest generates a "aws/request.Request" representing the +// client's request for the GenerateOrganizationsAccessReport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GenerateOrganizationsAccessReport for more information on using the GenerateOrganizationsAccessReport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GenerateOrganizationsAccessReportRequest method. +// req, resp := client.GenerateOrganizationsAccessReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateOrganizationsAccessReport +func (c *IAM) GenerateOrganizationsAccessReportRequest(input *GenerateOrganizationsAccessReportInput) (req *request.Request, output *GenerateOrganizationsAccessReportOutput) { + op := &request.Operation{ + Name: opGenerateOrganizationsAccessReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateOrganizationsAccessReportInput{} + } + + output = &GenerateOrganizationsAccessReportOutput{} + req = c.newRequest(op, input, output) + return +} + +// GenerateOrganizationsAccessReport API operation for AWS Identity and Access Management. +// +// Generates a report for service last accessed data for AWS Organizations. +// You can generate a report for any entities (organization root, organizational +// unit, or account) or policies in your organization. +// +// To call this operation, you must be signed in using your AWS Organizations +// master account credentials. You can use your long-term IAM user or root user +// credentials, or temporary credentials from assuming an IAM role. SCPs must +// be enabled for your organization root. You must have the required IAM and +// AWS Organizations permissions. For more information, see Refining Permissions +// Using Service Last Accessed Data (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// You can generate a service last accessed data report for entities by specifying +// only the entity's path. This data includes a list of services that are allowed +// by any service control policies (SCPs) that apply to the entity. +// +// You can generate a service last accessed data report for a policy by specifying +// an entity's path and an optional AWS Organizations policy ID. This data includes +// a list of services that are allowed by the specified SCP. +// +// For each service in both report types, the data includes the most recent +// account activity that the policy allows to account principals in the entity +// or the entity's children. For important information about the data, reporting +// period, permissions required, troubleshooting, and supported Regions see +// Reducing Permissions Using Service Last Accessed Data (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// The data includes all attempts to access AWS, not just the successful ones. +// This includes all attempts that were made using the AWS Management Console, +// the AWS API through any of the SDKs, or any of the command line tools. An +// unexpected entry in the service last accessed data does not mean that an +// account has been compromised, because the request might have been denied. +// Refer to your CloudTrail logs as the authoritative source for information +// about all API calls and whether they were successful or denied access. For +// more information, see Logging IAM Events with CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// This operation returns a JobId. Use this parameter in the GetOrganizationsAccessReport +// operation to check the status of the report generation. To check the status +// of this request, use the JobId parameter in the GetOrganizationsAccessReport +// operation and test the JobStatus response parameter. When the job is complete, +// you can retrieve the report. +// +// To generate a service last accessed data report for entities, specify an +// entity path without specifying the optional AWS Organizations policy ID. +// The type of entity that you specify determines the data returned in the report. +// +// * Root – When you specify the organizations root as the entity, the +// resulting report lists all of the services allowed by SCPs that are attached +// to your root. For each service, the report includes data for all accounts +// in your organization except the master account, because the master account +// is not limited by SCPs. +// +// * OU – When you specify an organizational unit (OU) as the entity, the +// resulting report lists all of the services allowed by SCPs that are attached +// to the OU and its parents. For each service, the report includes data +// for all accounts in the OU or its children. This data excludes the master +// account, because the master account is not limited by SCPs. +// +// * Master account – When you specify the master account, the resulting +// report lists all AWS services, because the master account is not limited +// by SCPs. For each service, the report includes data for only the master +// account. +// +// * Account – When you specify another account as the entity, the resulting +// report lists all of the services allowed by SCPs that are attached to +// the account and its parents. For each service, the report includes data +// for only the specified account. +// +// To generate a service last accessed data report for policies, specify an +// entity path and the optional AWS Organizations policy ID. The type of entity +// that you specify determines the data returned for each service. +// +// * Root – When you specify the root entity and a policy ID, the resulting +// report lists all of the services that are allowed by the specified SCP. +// For each service, the report includes data for all accounts in your organization +// to which the SCP applies. This data excludes the master account, because +// the master account is not limited by SCPs. If the SCP is not attached +// to any entities in the organization, then the report will return a list +// of services with no data. +// +// * OU – When you specify an OU entity and a policy ID, the resulting +// report lists all of the services that are allowed by the specified SCP. +// For each service, the report includes data for all accounts in the OU +// or its children to which the SCP applies. This means that other accounts +// outside the OU that are affected by the SCP might not be included in the +// data. This data excludes the master account, because the master account +// is not limited by SCPs. If the SCP is not attached to the OU or one of +// its children, the report will return a list of services with no data. +// +// * Master account – When you specify the master account, the resulting +// report lists all AWS services, because the master account is not limited +// by SCPs. If you specify a policy ID in the CLI or API, the policy is ignored. +// For each service, the report includes data for only the master account. +// +// * Account – When you specify another account entity and a policy ID, +// the resulting report lists all of the services that are allowed by the +// specified SCP. For each service, the report includes data for only the +// specified account. This means that other accounts in the organization +// that are affected by the SCP might not be included in the data. If the +// SCP is not attached to the account, the report will return a list of services +// with no data. +// +// Service last accessed data does not use other policy types when determining +// whether a principal could access a service. These other policy types include +// identity-based policies, resource-based policies, access control lists, IAM +// permissions boundaries, and STS assume role policies. It only applies SCP +// logic. For more about the evaluation of policy types, see Evaluating Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) +// in the IAM User Guide. +// +// For more information about service last accessed data, see Reducing Policy +// Scope by Viewing User Activity (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GenerateOrganizationsAccessReport for usage and error information. +// +// Returned Error Codes: +// * ErrCodeReportGenerationLimitExceededException "ReportGenerationLimitExceeded" +// The request failed because the maximum number of concurrent requests for +// this account are already running. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateOrganizationsAccessReport +func (c *IAM) GenerateOrganizationsAccessReport(input *GenerateOrganizationsAccessReportInput) (*GenerateOrganizationsAccessReportOutput, error) { + req, out := c.GenerateOrganizationsAccessReportRequest(input) + return out, req.Send() +} + +// GenerateOrganizationsAccessReportWithContext is the same as GenerateOrganizationsAccessReport with the addition of +// the ability to pass a context and additional request options. +// +// See GenerateOrganizationsAccessReport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GenerateOrganizationsAccessReportWithContext(ctx aws.Context, input *GenerateOrganizationsAccessReportInput, opts ...request.Option) (*GenerateOrganizationsAccessReportOutput, error) { + req, out := c.GenerateOrganizationsAccessReportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGenerateServiceLastAccessedDetails = "GenerateServiceLastAccessedDetails" + +// GenerateServiceLastAccessedDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GenerateServiceLastAccessedDetails operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GenerateServiceLastAccessedDetails for more information on using the GenerateServiceLastAccessedDetails +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GenerateServiceLastAccessedDetailsRequest method. +// req, resp := client.GenerateServiceLastAccessedDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateServiceLastAccessedDetails +func (c *IAM) GenerateServiceLastAccessedDetailsRequest(input *GenerateServiceLastAccessedDetailsInput) (req *request.Request, output *GenerateServiceLastAccessedDetailsOutput) { + op := &request.Operation{ + Name: opGenerateServiceLastAccessedDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateServiceLastAccessedDetailsInput{} + } + + output = &GenerateServiceLastAccessedDetailsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GenerateServiceLastAccessedDetails API operation for AWS Identity and Access Management. +// +// Generates a report that includes details about when an IAM resource (user, +// group, role, or policy) was last used in an attempt to access AWS services. +// Recent activity usually appears within four hours. IAM reports activity for +// the last 365 days, or less if your Region began supporting this feature within +// the last year. For more information, see Regions Where Data Is Tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period). +// +// The service last accessed data includes all attempts to access an AWS API, +// not just the successful ones. This includes all attempts that were made using +// the AWS Management Console, the AWS API through any of the SDKs, or any of +// the command line tools. An unexpected entry in the service last accessed +// data does not mean that your account has been compromised, because the request +// might have been denied. Refer to your CloudTrail logs as the authoritative +// source for information about all API calls and whether they were successful +// or denied access. For more information, see Logging IAM Events with CloudTrail +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// The GenerateServiceLastAccessedDetails operation returns a JobId. Use this +// parameter in the following operations to retrieve the following details from +// your report: +// +// * GetServiceLastAccessedDetails – Use this operation for users, groups, +// roles, or policies to list every AWS service that the resource could access +// using permissions policies. For each service, the response includes information +// about the most recent access attempt. +// +// * GetServiceLastAccessedDetailsWithEntities – Use this operation for +// groups and policies to list information about the associated entities +// (users or roles) that attempted to access a specific AWS service. +// +// To check the status of the GenerateServiceLastAccessedDetails request, use +// the JobId parameter in the same operations and test the JobStatus response +// parameter. +// +// For additional information about the permissions policies that allow an identity +// (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess +// operation. +// +// Service last accessed data does not use other policy types when determining +// whether a resource could access a service. These other policy types include +// resource-based policies, access control lists, AWS Organizations policies, +// IAM permissions boundaries, and AWS STS assume role policies. It only applies +// permissions policy logic. For more about the evaluation of policy types, +// see Evaluating Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) +// in the IAM User Guide. +// +// For more information about service last accessed data, see Reducing Policy +// Scope by Viewing User Activity (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GenerateServiceLastAccessedDetails for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateServiceLastAccessedDetails +func (c *IAM) GenerateServiceLastAccessedDetails(input *GenerateServiceLastAccessedDetailsInput) (*GenerateServiceLastAccessedDetailsOutput, error) { + req, out := c.GenerateServiceLastAccessedDetailsRequest(input) + return out, req.Send() +} + +// GenerateServiceLastAccessedDetailsWithContext is the same as GenerateServiceLastAccessedDetails with the addition of +// the ability to pass a context and additional request options. +// +// See GenerateServiceLastAccessedDetails for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GenerateServiceLastAccessedDetailsWithContext(ctx aws.Context, input *GenerateServiceLastAccessedDetailsInput, opts ...request.Option) (*GenerateServiceLastAccessedDetailsOutput, error) { + req, out := c.GenerateServiceLastAccessedDetailsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccessKeyLastUsed = "GetAccessKeyLastUsed" + +// GetAccessKeyLastUsedRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyLastUsed operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccessKeyLastUsed for more information on using the GetAccessKeyLastUsed +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccessKeyLastUsedRequest method. +// req, resp := client.GetAccessKeyLastUsedRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccessKeyLastUsed +func (c *IAM) GetAccessKeyLastUsedRequest(input *GetAccessKeyLastUsedInput) (req *request.Request, output *GetAccessKeyLastUsedOutput) { + op := &request.Operation{ + Name: opGetAccessKeyLastUsed, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyLastUsedInput{} + } + + output = &GetAccessKeyLastUsedOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccessKeyLastUsed API operation for AWS Identity and Access Management. +// +// Retrieves information about when the specified access key was last used. +// The information includes the date and time of last use, along with the AWS +// service and Region that were specified in the last request made with that +// key. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetAccessKeyLastUsed for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccessKeyLastUsed +func (c *IAM) GetAccessKeyLastUsed(input *GetAccessKeyLastUsedInput) (*GetAccessKeyLastUsedOutput, error) { + req, out := c.GetAccessKeyLastUsedRequest(input) + return out, req.Send() +} + +// GetAccessKeyLastUsedWithContext is the same as GetAccessKeyLastUsed with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccessKeyLastUsed for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetAccessKeyLastUsedWithContext(ctx aws.Context, input *GetAccessKeyLastUsedInput, opts ...request.Option) (*GetAccessKeyLastUsedOutput, error) { + req, out := c.GetAccessKeyLastUsedRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccountAuthorizationDetails = "GetAccountAuthorizationDetails" + +// GetAccountAuthorizationDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountAuthorizationDetails operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccountAuthorizationDetails for more information on using the GetAccountAuthorizationDetails +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccountAuthorizationDetailsRequest method. +// req, resp := client.GetAccountAuthorizationDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountAuthorizationDetails +func (c *IAM) GetAccountAuthorizationDetailsRequest(input *GetAccountAuthorizationDetailsInput) (req *request.Request, output *GetAccountAuthorizationDetailsOutput) { + op := &request.Operation{ + Name: opGetAccountAuthorizationDetails, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetAccountAuthorizationDetailsInput{} + } + + output = &GetAccountAuthorizationDetailsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccountAuthorizationDetails API operation for AWS Identity and Access Management. +// +// Retrieves information about all IAM users, groups, roles, and policies in +// your AWS account, including their relationships to one another. Use this +// API to obtain a snapshot of the configuration of IAM permissions (users, +// groups, roles, and policies) in your account. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// You can optionally filter the results using the Filter parameter. You can +// paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetAccountAuthorizationDetails for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountAuthorizationDetails +func (c *IAM) GetAccountAuthorizationDetails(input *GetAccountAuthorizationDetailsInput) (*GetAccountAuthorizationDetailsOutput, error) { + req, out := c.GetAccountAuthorizationDetailsRequest(input) + return out, req.Send() +} + +// GetAccountAuthorizationDetailsWithContext is the same as GetAccountAuthorizationDetails with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccountAuthorizationDetails for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetAccountAuthorizationDetailsWithContext(ctx aws.Context, input *GetAccountAuthorizationDetailsInput, opts ...request.Option) (*GetAccountAuthorizationDetailsOutput, error) { + req, out := c.GetAccountAuthorizationDetailsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetAccountAuthorizationDetailsPages iterates over the pages of a GetAccountAuthorizationDetails operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetAccountAuthorizationDetails method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetAccountAuthorizationDetails operation. +// pageNum := 0 +// err := client.GetAccountAuthorizationDetailsPages(params, +// func(page *iam.GetAccountAuthorizationDetailsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) GetAccountAuthorizationDetailsPages(input *GetAccountAuthorizationDetailsInput, fn func(*GetAccountAuthorizationDetailsOutput, bool) bool) error { + return c.GetAccountAuthorizationDetailsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetAccountAuthorizationDetailsPagesWithContext same as GetAccountAuthorizationDetailsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetAccountAuthorizationDetailsPagesWithContext(ctx aws.Context, input *GetAccountAuthorizationDetailsInput, fn func(*GetAccountAuthorizationDetailsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetAccountAuthorizationDetailsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetAccountAuthorizationDetailsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*GetAccountAuthorizationDetailsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opGetAccountPasswordPolicy = "GetAccountPasswordPolicy" + +// GetAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountPasswordPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccountPasswordPolicy for more information on using the GetAccountPasswordPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccountPasswordPolicyRequest method. +// req, resp := client.GetAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountPasswordPolicy +func (c *IAM) GetAccountPasswordPolicyRequest(input *GetAccountPasswordPolicyInput) (req *request.Request, output *GetAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opGetAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountPasswordPolicyInput{} + } + + output = &GetAccountPasswordPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccountPasswordPolicy API operation for AWS Identity and Access Management. +// +// Retrieves the password policy for the AWS account. For more information about +// using a password policy, go to Managing an IAM Password Policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetAccountPasswordPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountPasswordPolicy +func (c *IAM) GetAccountPasswordPolicy(input *GetAccountPasswordPolicyInput) (*GetAccountPasswordPolicyOutput, error) { + req, out := c.GetAccountPasswordPolicyRequest(input) + return out, req.Send() +} + +// GetAccountPasswordPolicyWithContext is the same as GetAccountPasswordPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccountPasswordPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetAccountPasswordPolicyWithContext(ctx aws.Context, input *GetAccountPasswordPolicyInput, opts ...request.Option) (*GetAccountPasswordPolicyOutput, error) { + req, out := c.GetAccountPasswordPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetAccountSummary = "GetAccountSummary" + +// GetAccountSummaryRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountSummary operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetAccountSummary for more information on using the GetAccountSummary +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetAccountSummaryRequest method. +// req, resp := client.GetAccountSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountSummary +func (c *IAM) GetAccountSummaryRequest(input *GetAccountSummaryInput) (req *request.Request, output *GetAccountSummaryOutput) { + op := &request.Operation{ + Name: opGetAccountSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountSummaryInput{} + } + + output = &GetAccountSummaryOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetAccountSummary API operation for AWS Identity and Access Management. +// +// Retrieves information about IAM entity usage and IAM quotas in the AWS account. +// +// For information about limitations on IAM entities, see Limitations on IAM +// Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetAccountSummary for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountSummary +func (c *IAM) GetAccountSummary(input *GetAccountSummaryInput) (*GetAccountSummaryOutput, error) { + req, out := c.GetAccountSummaryRequest(input) + return out, req.Send() +} + +// GetAccountSummaryWithContext is the same as GetAccountSummary with the addition of +// the ability to pass a context and additional request options. +// +// See GetAccountSummary for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetAccountSummaryWithContext(ctx aws.Context, input *GetAccountSummaryInput, opts ...request.Option) (*GetAccountSummaryOutput, error) { + req, out := c.GetAccountSummaryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetContextKeysForCustomPolicy = "GetContextKeysForCustomPolicy" + +// GetContextKeysForCustomPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetContextKeysForCustomPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetContextKeysForCustomPolicy for more information on using the GetContextKeysForCustomPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetContextKeysForCustomPolicyRequest method. +// req, resp := client.GetContextKeysForCustomPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForCustomPolicy +func (c *IAM) GetContextKeysForCustomPolicyRequest(input *GetContextKeysForCustomPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { + op := &request.Operation{ + Name: opGetContextKeysForCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForCustomPolicyInput{} + } + + output = &GetContextKeysForPolicyResponse{} + req = c.newRequest(op, input, output) + return +} + +// GetContextKeysForCustomPolicy API operation for AWS Identity and Access Management. +// +// Gets a list of all of the context keys referenced in the input policies. +// The policies are supplied as a list of one or more strings. To get the context +// keys from policies associated with an IAM user, group, or role, use GetContextKeysForPrincipalPolicy. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. Context keys can be evaluated +// by testing against a value specified in an IAM policy. Use GetContextKeysForCustomPolicy +// to understand what key names and values you must supply when you call SimulateCustomPolicy. +// Note that all parameters are shown in unencoded form here for clarity but +// must be URL encoded to be included as a part of a real HTML request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetContextKeysForCustomPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForCustomPolicy +func (c *IAM) GetContextKeysForCustomPolicy(input *GetContextKeysForCustomPolicyInput) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForCustomPolicyRequest(input) + return out, req.Send() +} + +// GetContextKeysForCustomPolicyWithContext is the same as GetContextKeysForCustomPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetContextKeysForCustomPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetContextKeysForCustomPolicyWithContext(ctx aws.Context, input *GetContextKeysForCustomPolicyInput, opts ...request.Option) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForCustomPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetContextKeysForPrincipalPolicy = "GetContextKeysForPrincipalPolicy" + +// GetContextKeysForPrincipalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetContextKeysForPrincipalPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetContextKeysForPrincipalPolicy for more information on using the GetContextKeysForPrincipalPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetContextKeysForPrincipalPolicyRequest method. +// req, resp := client.GetContextKeysForPrincipalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForPrincipalPolicy +func (c *IAM) GetContextKeysForPrincipalPolicyRequest(input *GetContextKeysForPrincipalPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { + op := &request.Operation{ + Name: opGetContextKeysForPrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForPrincipalPolicyInput{} + } + + output = &GetContextKeysForPolicyResponse{} + req = c.newRequest(op, input, output) + return +} + +// GetContextKeysForPrincipalPolicy API operation for AWS Identity and Access Management. +// +// Gets a list of all of the context keys referenced in all the IAM policies +// that are attached to the specified IAM entity. The entity can be an IAM user, +// group, or role. If you specify a user, then the request also includes all +// of the policies attached to groups that the user is a member of. +// +// You can optionally include a list of one or more additional policies, specified +// as strings. If you want to include only a list of policies by string, use +// GetContextKeysForCustomPolicy instead. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use GetContextKeysForCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. Context keys can be evaluated +// by testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy +// to understand what key names and values you must supply when you call SimulatePrincipalPolicy. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetContextKeysForPrincipalPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForPrincipalPolicy +func (c *IAM) GetContextKeysForPrincipalPolicy(input *GetContextKeysForPrincipalPolicyInput) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForPrincipalPolicyRequest(input) + return out, req.Send() +} + +// GetContextKeysForPrincipalPolicyWithContext is the same as GetContextKeysForPrincipalPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetContextKeysForPrincipalPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetContextKeysForPrincipalPolicyWithContext(ctx aws.Context, input *GetContextKeysForPrincipalPolicyInput, opts ...request.Option) (*GetContextKeysForPolicyResponse, error) { + req, out := c.GetContextKeysForPrincipalPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCredentialReport = "GetCredentialReport" + +// GetCredentialReportRequest generates a "aws/request.Request" representing the +// client's request for the GetCredentialReport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetCredentialReport for more information on using the GetCredentialReport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetCredentialReportRequest method. +// req, resp := client.GetCredentialReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetCredentialReport +func (c *IAM) GetCredentialReportRequest(input *GetCredentialReportInput) (req *request.Request, output *GetCredentialReportOutput) { + op := &request.Operation{ + Name: opGetCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCredentialReportInput{} + } + + output = &GetCredentialReportOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCredentialReport API operation for AWS Identity and Access Management. +// +// Retrieves a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetCredentialReport for usage and error information. +// +// Returned Error Codes: +// * ErrCodeCredentialReportNotPresentException "ReportNotPresent" +// The request was rejected because the credential report does not exist. To +// generate a credential report, use GenerateCredentialReport. +// +// * ErrCodeCredentialReportExpiredException "ReportExpired" +// The request was rejected because the most recent credential report has expired. +// To generate a new credential report, use GenerateCredentialReport. For more +// information about credential report expiration, see Getting Credential Reports +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +// +// * ErrCodeCredentialReportNotReadyException "ReportInProgress" +// The request was rejected because the credential report is still being generated. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetCredentialReport +func (c *IAM) GetCredentialReport(input *GetCredentialReportInput) (*GetCredentialReportOutput, error) { + req, out := c.GetCredentialReportRequest(input) + return out, req.Send() +} + +// GetCredentialReportWithContext is the same as GetCredentialReport with the addition of +// the ability to pass a context and additional request options. +// +// See GetCredentialReport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetCredentialReportWithContext(ctx aws.Context, input *GetCredentialReportInput, opts ...request.Option) (*GetCredentialReportOutput, error) { + req, out := c.GetCredentialReportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetGroup = "GetGroup" + +// GetGroupRequest generates a "aws/request.Request" representing the +// client's request for the GetGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetGroup for more information on using the GetGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetGroupRequest method. +// req, resp := client.GetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroup +func (c *IAM) GetGroupRequest(input *GetGroupInput) (req *request.Request, output *GetGroupOutput) { + op := &request.Operation{ + Name: opGetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetGroupInput{} + } + + output = &GetGroupOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetGroup API operation for AWS Identity and Access Management. +// +// Returns a list of IAM users that are in the specified IAM group. You can +// paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroup +func (c *IAM) GetGroup(input *GetGroupInput) (*GetGroupOutput, error) { + req, out := c.GetGroupRequest(input) + return out, req.Send() +} + +// GetGroupWithContext is the same as GetGroup with the addition of +// the ability to pass a context and additional request options. +// +// See GetGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetGroupWithContext(ctx aws.Context, input *GetGroupInput, opts ...request.Option) (*GetGroupOutput, error) { + req, out := c.GetGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// GetGroupPages iterates over the pages of a GetGroup operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetGroup method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetGroup operation. +// pageNum := 0 +// err := client.GetGroupPages(params, +// func(page *iam.GetGroupOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) GetGroupPages(input *GetGroupInput, fn func(*GetGroupOutput, bool) bool) error { + return c.GetGroupPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// GetGroupPagesWithContext same as GetGroupPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetGroupPagesWithContext(ctx aws.Context, input *GetGroupInput, fn func(*GetGroupOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *GetGroupInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetGroupRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*GetGroupOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opGetGroupPolicy = "GetGroupPolicy" + +// GetGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetGroupPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetGroupPolicy for more information on using the GetGroupPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetGroupPolicyRequest method. +// req, resp := client.GetGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroupPolicy +func (c *IAM) GetGroupPolicyRequest(input *GetGroupPolicyInput) (req *request.Request, output *GetGroupPolicyOutput) { + op := &request.Operation{ + Name: opGetGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetGroupPolicyInput{} + } + + output = &GetGroupPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetGroupPolicy API operation for AWS Identity and Access Management. +// +// Retrieves the specified inline policy document that is embedded in the specified +// IAM group. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM group can also have managed policies attached to it. To retrieve a +// managed policy document that is attached to a group, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetGroupPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroupPolicy +func (c *IAM) GetGroupPolicy(input *GetGroupPolicyInput) (*GetGroupPolicyOutput, error) { + req, out := c.GetGroupPolicyRequest(input) + return out, req.Send() +} + +// GetGroupPolicyWithContext is the same as GetGroupPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetGroupPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetGroupPolicyWithContext(ctx aws.Context, input *GetGroupPolicyInput, opts ...request.Option) (*GetGroupPolicyOutput, error) { + req, out := c.GetGroupPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetInstanceProfile = "GetInstanceProfile" + +// GetInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetInstanceProfile for more information on using the GetInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetInstanceProfileRequest method. +// req, resp := client.GetInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetInstanceProfile +func (c *IAM) GetInstanceProfileRequest(input *GetInstanceProfileInput) (req *request.Request, output *GetInstanceProfileOutput) { + op := &request.Operation{ + Name: opGetInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceProfileInput{} + } + + output = &GetInstanceProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetInstanceProfile API operation for AWS Identity and Access Management. +// +// Retrieves information about the specified instance profile, including the +// instance profile's path, GUID, ARN, and role. For more information about +// instance profiles, see About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetInstanceProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetInstanceProfile +func (c *IAM) GetInstanceProfile(input *GetInstanceProfileInput) (*GetInstanceProfileOutput, error) { + req, out := c.GetInstanceProfileRequest(input) + return out, req.Send() +} + +// GetInstanceProfileWithContext is the same as GetInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See GetInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetInstanceProfileWithContext(ctx aws.Context, input *GetInstanceProfileInput, opts ...request.Option) (*GetInstanceProfileOutput, error) { + req, out := c.GetInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetLoginProfile = "GetLoginProfile" + +// GetLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the GetLoginProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetLoginProfile for more information on using the GetLoginProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetLoginProfileRequest method. +// req, resp := client.GetLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetLoginProfile +func (c *IAM) GetLoginProfileRequest(input *GetLoginProfileInput) (req *request.Request, output *GetLoginProfileOutput) { + op := &request.Operation{ + Name: opGetLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLoginProfileInput{} + } + + output = &GetLoginProfileOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetLoginProfile API operation for AWS Identity and Access Management. +// +// Retrieves the user name and password-creation date for the specified IAM +// user. If the user has not been assigned a password, the operation returns +// a 404 (NoSuchEntity) error. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetLoginProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetLoginProfile +func (c *IAM) GetLoginProfile(input *GetLoginProfileInput) (*GetLoginProfileOutput, error) { + req, out := c.GetLoginProfileRequest(input) + return out, req.Send() +} + +// GetLoginProfileWithContext is the same as GetLoginProfile with the addition of +// the ability to pass a context and additional request options. +// +// See GetLoginProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetLoginProfileWithContext(ctx aws.Context, input *GetLoginProfileInput, opts ...request.Option) (*GetLoginProfileOutput, error) { + req, out := c.GetLoginProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetOpenIDConnectProvider = "GetOpenIDConnectProvider" + +// GetOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the GetOpenIDConnectProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetOpenIDConnectProvider for more information on using the GetOpenIDConnectProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetOpenIDConnectProviderRequest method. +// req, resp := client.GetOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOpenIDConnectProvider +func (c *IAM) GetOpenIDConnectProviderRequest(input *GetOpenIDConnectProviderInput) (req *request.Request, output *GetOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opGetOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIDConnectProviderInput{} + } + + output = &GetOpenIDConnectProviderOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOpenIDConnectProvider API operation for AWS Identity and Access Management. +// +// Returns information about the specified OpenID Connect (OIDC) provider resource +// object in IAM. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetOpenIDConnectProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOpenIDConnectProvider +func (c *IAM) GetOpenIDConnectProvider(input *GetOpenIDConnectProviderInput) (*GetOpenIDConnectProviderOutput, error) { + req, out := c.GetOpenIDConnectProviderRequest(input) + return out, req.Send() +} + +// GetOpenIDConnectProviderWithContext is the same as GetOpenIDConnectProvider with the addition of +// the ability to pass a context and additional request options. +// +// See GetOpenIDConnectProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetOpenIDConnectProviderWithContext(ctx aws.Context, input *GetOpenIDConnectProviderInput, opts ...request.Option) (*GetOpenIDConnectProviderOutput, error) { + req, out := c.GetOpenIDConnectProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetOrganizationsAccessReport = "GetOrganizationsAccessReport" + +// GetOrganizationsAccessReportRequest generates a "aws/request.Request" representing the +// client's request for the GetOrganizationsAccessReport operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetOrganizationsAccessReport for more information on using the GetOrganizationsAccessReport +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetOrganizationsAccessReportRequest method. +// req, resp := client.GetOrganizationsAccessReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOrganizationsAccessReport +func (c *IAM) GetOrganizationsAccessReportRequest(input *GetOrganizationsAccessReportInput) (req *request.Request, output *GetOrganizationsAccessReportOutput) { + op := &request.Operation{ + Name: opGetOrganizationsAccessReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOrganizationsAccessReportInput{} + } + + output = &GetOrganizationsAccessReportOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetOrganizationsAccessReport API operation for AWS Identity and Access Management. +// +// Retrieves the service last accessed data report for AWS Organizations that +// was previously generated using the GenerateOrganizationsAccessReport operation. +// This operation retrieves the status of your report job and the report contents. +// +// Depending on the parameters that you passed when you generated the report, +// the data returned could include different information. For details, see GenerateOrganizationsAccessReport. +// +// To call this operation, you must be signed in to the master account in your +// organization. SCPs must be enabled for your organization root. You must have +// permissions to perform this operation. For more information, see Refining +// Permissions Using Service Last Accessed Data (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// For each service that principals in an account (root users, IAM users, or +// IAM roles) could access using SCPs, the operation returns details about the +// most recent access attempt. If there was no attempt, the service is listed +// without details about the most recent attempt to access the service. If the +// operation fails, it returns the reason that it failed. +// +// By default, the list is sorted by service namespace. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetOrganizationsAccessReport for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOrganizationsAccessReport +func (c *IAM) GetOrganizationsAccessReport(input *GetOrganizationsAccessReportInput) (*GetOrganizationsAccessReportOutput, error) { + req, out := c.GetOrganizationsAccessReportRequest(input) + return out, req.Send() +} + +// GetOrganizationsAccessReportWithContext is the same as GetOrganizationsAccessReport with the addition of +// the ability to pass a context and additional request options. +// +// See GetOrganizationsAccessReport for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetOrganizationsAccessReportWithContext(ctx aws.Context, input *GetOrganizationsAccessReportInput, opts ...request.Option) (*GetOrganizationsAccessReportOutput, error) { + req, out := c.GetOrganizationsAccessReportRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPolicy for more information on using the GetPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPolicyRequest method. +// req, resp := client.GetPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicy +func (c *IAM) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { + op := &request.Operation{ + Name: opGetPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyInput{} + } + + output = &GetPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPolicy API operation for AWS Identity and Access Management. +// +// Retrieves information about the specified managed policy, including the policy's +// default version and the total number of IAM users, groups, and roles to which +// the policy is attached. To retrieve the list of the specific users, groups, +// and roles that the policy is attached to, use the ListEntitiesForPolicy API. +// This API returns metadata about the policy. To retrieve the actual policy +// document for a specific version of the policy, use GetPolicyVersion. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded with an IAM user, group, or role, +// use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicy +func (c *IAM) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + return out, req.Send() +} + +// GetPolicyWithContext is the same as GetPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetPolicyWithContext(ctx aws.Context, input *GetPolicyInput, opts ...request.Option) (*GetPolicyOutput, error) { + req, out := c.GetPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPolicyVersion = "GetPolicyVersion" + +// GetPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicyVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPolicyVersion for more information on using the GetPolicyVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPolicyVersionRequest method. +// req, resp := client.GetPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicyVersion +func (c *IAM) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *request.Request, output *GetPolicyVersionOutput) { + op := &request.Operation{ + Name: opGetPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyVersionInput{} + } + + output = &GetPolicyVersionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPolicyVersion API operation for AWS Identity and Access Management. +// +// Retrieves information about the specified version of the specified managed +// policy, including the policy document. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// To list the available versions for a policy, use ListPolicyVersions. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded in a user, group, or role, use the +// GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about the types of policies, see Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetPolicyVersion for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicyVersion +func (c *IAM) GetPolicyVersion(input *GetPolicyVersionInput) (*GetPolicyVersionOutput, error) { + req, out := c.GetPolicyVersionRequest(input) + return out, req.Send() +} + +// GetPolicyVersionWithContext is the same as GetPolicyVersion with the addition of +// the ability to pass a context and additional request options. +// +// See GetPolicyVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetPolicyVersionWithContext(ctx aws.Context, input *GetPolicyVersionInput, opts ...request.Option) (*GetPolicyVersionOutput, error) { + req, out := c.GetPolicyVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRole = "GetRole" + +// GetRoleRequest generates a "aws/request.Request" representing the +// client's request for the GetRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRole for more information on using the GetRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRoleRequest method. +// req, resp := client.GetRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRole +func (c *IAM) GetRoleRequest(input *GetRoleInput) (req *request.Request, output *GetRoleOutput) { + op := &request.Operation{ + Name: opGetRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRoleInput{} + } + + output = &GetRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRole API operation for AWS Identity and Access Management. +// +// Retrieves information about the specified role, including the role's path, +// GUID, ARN, and the role's trust policy that grants permission to assume the +// role. For more information about roles, see Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRole +func (c *IAM) GetRole(input *GetRoleInput) (*GetRoleOutput, error) { + req, out := c.GetRoleRequest(input) + return out, req.Send() +} + +// GetRoleWithContext is the same as GetRole with the addition of +// the ability to pass a context and additional request options. +// +// See GetRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetRoleWithContext(ctx aws.Context, input *GetRoleInput, opts ...request.Option) (*GetRoleOutput, error) { + req, out := c.GetRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetRolePolicy = "GetRolePolicy" + +// GetRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetRolePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetRolePolicy for more information on using the GetRolePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetRolePolicyRequest method. +// req, resp := client.GetRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRolePolicy +func (c *IAM) GetRolePolicyRequest(input *GetRolePolicyInput) (req *request.Request, output *GetRolePolicyOutput) { + op := &request.Operation{ + Name: opGetRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRolePolicyInput{} + } + + output = &GetRolePolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetRolePolicy API operation for AWS Identity and Access Management. +// +// Retrieves the specified inline policy document that is embedded with the +// specified IAM role. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM role can also have managed policies attached to it. To retrieve a +// managed policy document that is attached to a role, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about roles, see Using Roles to Delegate Permissions +// and Federate Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetRolePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRolePolicy +func (c *IAM) GetRolePolicy(input *GetRolePolicyInput) (*GetRolePolicyOutput, error) { + req, out := c.GetRolePolicyRequest(input) + return out, req.Send() +} + +// GetRolePolicyWithContext is the same as GetRolePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetRolePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetRolePolicyWithContext(ctx aws.Context, input *GetRolePolicyInput, opts ...request.Option) (*GetRolePolicyOutput, error) { + req, out := c.GetRolePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSAMLProvider = "GetSAMLProvider" + +// GetSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the GetSAMLProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSAMLProvider for more information on using the GetSAMLProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSAMLProviderRequest method. +// req, resp := client.GetSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSAMLProvider +func (c *IAM) GetSAMLProviderRequest(input *GetSAMLProviderInput) (req *request.Request, output *GetSAMLProviderOutput) { + op := &request.Operation{ + Name: opGetSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSAMLProviderInput{} + } + + output = &GetSAMLProviderOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSAMLProvider API operation for AWS Identity and Access Management. +// +// Returns the SAML provider metadocument that was uploaded when the IAM SAML +// provider resource object was created or updated. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetSAMLProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSAMLProvider +func (c *IAM) GetSAMLProvider(input *GetSAMLProviderInput) (*GetSAMLProviderOutput, error) { + req, out := c.GetSAMLProviderRequest(input) + return out, req.Send() +} + +// GetSAMLProviderWithContext is the same as GetSAMLProvider with the addition of +// the ability to pass a context and additional request options. +// +// See GetSAMLProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetSAMLProviderWithContext(ctx aws.Context, input *GetSAMLProviderInput, opts ...request.Option) (*GetSAMLProviderOutput, error) { + req, out := c.GetSAMLProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSSHPublicKey = "GetSSHPublicKey" + +// GetSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the GetSSHPublicKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetSSHPublicKey for more information on using the GetSSHPublicKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetSSHPublicKeyRequest method. +// req, resp := client.GetSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSSHPublicKey +func (c *IAM) GetSSHPublicKeyRequest(input *GetSSHPublicKeyInput) (req *request.Request, output *GetSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opGetSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSSHPublicKeyInput{} + } + + output = &GetSSHPublicKeyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSSHPublicKey API operation for AWS Identity and Access Management. +// +// Retrieves the specified SSH public key, including metadata about the key. +// +// The SSH public key retrieved by this operation is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetSSHPublicKey for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeUnrecognizedPublicKeyEncodingException "UnrecognizedPublicKeyEncoding" +// The request was rejected because the public key encoding format is unsupported +// or unrecognized. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSSHPublicKey +func (c *IAM) GetSSHPublicKey(input *GetSSHPublicKeyInput) (*GetSSHPublicKeyOutput, error) { + req, out := c.GetSSHPublicKeyRequest(input) + return out, req.Send() +} + +// GetSSHPublicKeyWithContext is the same as GetSSHPublicKey with the addition of +// the ability to pass a context and additional request options. +// +// See GetSSHPublicKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetSSHPublicKeyWithContext(ctx aws.Context, input *GetSSHPublicKeyInput, opts ...request.Option) (*GetSSHPublicKeyOutput, error) { + req, out := c.GetSSHPublicKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetServerCertificate = "GetServerCertificate" + +// GetServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the GetServerCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetServerCertificate for more information on using the GetServerCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetServerCertificateRequest method. +// req, resp := client.GetServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServerCertificate +func (c *IAM) GetServerCertificateRequest(input *GetServerCertificateInput) (req *request.Request, output *GetServerCertificateOutput) { + op := &request.Operation{ + Name: opGetServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServerCertificateInput{} + } + + output = &GetServerCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetServerCertificate API operation for AWS Identity and Access Management. +// +// Retrieves information about the specified server certificate stored in IAM. +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic includes a list of AWS services that can +// use the server certificates that you manage with IAM. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetServerCertificate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServerCertificate +func (c *IAM) GetServerCertificate(input *GetServerCertificateInput) (*GetServerCertificateOutput, error) { + req, out := c.GetServerCertificateRequest(input) + return out, req.Send() +} + +// GetServerCertificateWithContext is the same as GetServerCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See GetServerCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetServerCertificateWithContext(ctx aws.Context, input *GetServerCertificateInput, opts ...request.Option) (*GetServerCertificateOutput, error) { + req, out := c.GetServerCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetServiceLastAccessedDetails = "GetServiceLastAccessedDetails" + +// GetServiceLastAccessedDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetServiceLastAccessedDetails operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetServiceLastAccessedDetails for more information on using the GetServiceLastAccessedDetails +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetServiceLastAccessedDetailsRequest method. +// req, resp := client.GetServiceLastAccessedDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetails +func (c *IAM) GetServiceLastAccessedDetailsRequest(input *GetServiceLastAccessedDetailsInput) (req *request.Request, output *GetServiceLastAccessedDetailsOutput) { + op := &request.Operation{ + Name: opGetServiceLastAccessedDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceLastAccessedDetailsInput{} + } + + output = &GetServiceLastAccessedDetailsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetServiceLastAccessedDetails API operation for AWS Identity and Access Management. +// +// Retrieves a service last accessed report that was created using the GenerateServiceLastAccessedDetails +// operation. You can use the JobId parameter in GetServiceLastAccessedDetails +// to retrieve the status of your report job. When the report is complete, you +// can retrieve the generated report. The report includes a list of AWS services +// that the resource (user, group, role, or managed policy) can access. +// +// Service last accessed data does not use other policy types when determining +// whether a resource could access a service. These other policy types include +// resource-based policies, access control lists, AWS Organizations policies, +// IAM permissions boundaries, and AWS STS assume role policies. It only applies +// permissions policy logic. For more about the evaluation of policy types, +// see Evaluating Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) +// in the IAM User Guide. +// +// For each service that the resource could access using permissions policies, +// the operation returns details about the most recent access attempt. If there +// was no attempt, the service is listed without details about the most recent +// attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails +// operation returns the reason that it failed. +// +// The GetServiceLastAccessedDetails operation returns a list of services. This +// list includes the number of entities that have attempted to access the service +// and the date and time of the last attempt. It also returns the ARN of the +// following entity, depending on the resource ARN that you used to generate +// the report: +// +// * User – Returns the user ARN that you used to generate the report +// +// * Group – Returns the ARN of the group member (user) that last attempted +// to access the service +// +// * Role – Returns the role ARN that you used to generate the report +// +// * Policy – Returns the ARN of the user or role that last used the policy +// to attempt to access the service +// +// By default, the list is sorted by service namespace. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetServiceLastAccessedDetails for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetails +func (c *IAM) GetServiceLastAccessedDetails(input *GetServiceLastAccessedDetailsInput) (*GetServiceLastAccessedDetailsOutput, error) { + req, out := c.GetServiceLastAccessedDetailsRequest(input) + return out, req.Send() +} + +// GetServiceLastAccessedDetailsWithContext is the same as GetServiceLastAccessedDetails with the addition of +// the ability to pass a context and additional request options. +// +// See GetServiceLastAccessedDetails for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetServiceLastAccessedDetailsWithContext(ctx aws.Context, input *GetServiceLastAccessedDetailsInput, opts ...request.Option) (*GetServiceLastAccessedDetailsOutput, error) { + req, out := c.GetServiceLastAccessedDetailsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetServiceLastAccessedDetailsWithEntities = "GetServiceLastAccessedDetailsWithEntities" + +// GetServiceLastAccessedDetailsWithEntitiesRequest generates a "aws/request.Request" representing the +// client's request for the GetServiceLastAccessedDetailsWithEntities operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetServiceLastAccessedDetailsWithEntities for more information on using the GetServiceLastAccessedDetailsWithEntities +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetServiceLastAccessedDetailsWithEntitiesRequest method. +// req, resp := client.GetServiceLastAccessedDetailsWithEntitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetailsWithEntities +func (c *IAM) GetServiceLastAccessedDetailsWithEntitiesRequest(input *GetServiceLastAccessedDetailsWithEntitiesInput) (req *request.Request, output *GetServiceLastAccessedDetailsWithEntitiesOutput) { + op := &request.Operation{ + Name: opGetServiceLastAccessedDetailsWithEntities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceLastAccessedDetailsWithEntitiesInput{} + } + + output = &GetServiceLastAccessedDetailsWithEntitiesOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetServiceLastAccessedDetailsWithEntities API operation for AWS Identity and Access Management. +// +// After you generate a group or policy report using the GenerateServiceLastAccessedDetails +// operation, you can use the JobId parameter in GetServiceLastAccessedDetailsWithEntities. +// This operation retrieves the status of your report job and a list of entities +// that could have used group or policy permissions to access the specified +// service. +// +// * Group – For a group report, this operation returns a list of users +// in the group that could have used the group’s policies in an attempt +// to access the service. +// +// * Policy – For a policy report, this operation returns a list of entities +// (users or roles) that could have used the policy in an attempt to access +// the service. +// +// You can also use this operation for user or role reports to retrieve details +// about those entities. +// +// If the operation fails, the GetServiceLastAccessedDetailsWithEntities operation +// returns the reason that it failed. +// +// By default, the list of associated entities is sorted by date, with the most +// recent access listed first. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetServiceLastAccessedDetailsWithEntities for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetailsWithEntities +func (c *IAM) GetServiceLastAccessedDetailsWithEntities(input *GetServiceLastAccessedDetailsWithEntitiesInput) (*GetServiceLastAccessedDetailsWithEntitiesOutput, error) { + req, out := c.GetServiceLastAccessedDetailsWithEntitiesRequest(input) + return out, req.Send() +} + +// GetServiceLastAccessedDetailsWithEntitiesWithContext is the same as GetServiceLastAccessedDetailsWithEntities with the addition of +// the ability to pass a context and additional request options. +// +// See GetServiceLastAccessedDetailsWithEntities for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetServiceLastAccessedDetailsWithEntitiesWithContext(ctx aws.Context, input *GetServiceLastAccessedDetailsWithEntitiesInput, opts ...request.Option) (*GetServiceLastAccessedDetailsWithEntitiesOutput, error) { + req, out := c.GetServiceLastAccessedDetailsWithEntitiesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetServiceLinkedRoleDeletionStatus = "GetServiceLinkedRoleDeletionStatus" + +// GetServiceLinkedRoleDeletionStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetServiceLinkedRoleDeletionStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetServiceLinkedRoleDeletionStatus for more information on using the GetServiceLinkedRoleDeletionStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetServiceLinkedRoleDeletionStatusRequest method. +// req, resp := client.GetServiceLinkedRoleDeletionStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatus +func (c *IAM) GetServiceLinkedRoleDeletionStatusRequest(input *GetServiceLinkedRoleDeletionStatusInput) (req *request.Request, output *GetServiceLinkedRoleDeletionStatusOutput) { + op := &request.Operation{ + Name: opGetServiceLinkedRoleDeletionStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceLinkedRoleDeletionStatusInput{} + } + + output = &GetServiceLinkedRoleDeletionStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetServiceLinkedRoleDeletionStatus API operation for AWS Identity and Access Management. +// +// Retrieves the status of your service-linked role deletion. After you use +// the DeleteServiceLinkedRole API operation to submit a service-linked role +// for deletion, you can use the DeletionTaskId parameter in GetServiceLinkedRoleDeletionStatus +// to check the status of the deletion. If the deletion fails, this operation +// returns the reason that it failed, if that information is returned by the +// service. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetServiceLinkedRoleDeletionStatus for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatus +func (c *IAM) GetServiceLinkedRoleDeletionStatus(input *GetServiceLinkedRoleDeletionStatusInput) (*GetServiceLinkedRoleDeletionStatusOutput, error) { + req, out := c.GetServiceLinkedRoleDeletionStatusRequest(input) + return out, req.Send() +} + +// GetServiceLinkedRoleDeletionStatusWithContext is the same as GetServiceLinkedRoleDeletionStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetServiceLinkedRoleDeletionStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetServiceLinkedRoleDeletionStatusWithContext(ctx aws.Context, input *GetServiceLinkedRoleDeletionStatusInput, opts ...request.Option) (*GetServiceLinkedRoleDeletionStatusOutput, error) { + req, out := c.GetServiceLinkedRoleDeletionStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetUser = "GetUser" + +// GetUserRequest generates a "aws/request.Request" representing the +// client's request for the GetUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetUser for more information on using the GetUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetUserRequest method. +// req, resp := client.GetUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUser +func (c *IAM) GetUserRequest(input *GetUserInput) (req *request.Request, output *GetUserOutput) { + op := &request.Operation{ + Name: opGetUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserInput{} + } + + output = &GetUserOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetUser API operation for AWS Identity and Access Management. +// +// Retrieves information about the specified IAM user, including the user's +// creation date, path, unique ID, and ARN. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID used to sign the request to this API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUser +func (c *IAM) GetUser(input *GetUserInput) (*GetUserOutput, error) { + req, out := c.GetUserRequest(input) + return out, req.Send() +} + +// GetUserWithContext is the same as GetUser with the addition of +// the ability to pass a context and additional request options. +// +// See GetUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetUserWithContext(ctx aws.Context, input *GetUserInput, opts ...request.Option) (*GetUserOutput, error) { + req, out := c.GetUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetUserPolicy = "GetUserPolicy" + +// GetUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetUserPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetUserPolicy for more information on using the GetUserPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetUserPolicyRequest method. +// req, resp := client.GetUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUserPolicy +func (c *IAM) GetUserPolicyRequest(input *GetUserPolicyInput) (req *request.Request, output *GetUserPolicyOutput) { + op := &request.Operation{ + Name: opGetUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserPolicyInput{} + } + + output = &GetUserPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetUserPolicy API operation for AWS Identity and Access Management. +// +// Retrieves the specified inline policy document that is embedded in the specified +// IAM user. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM user can also have managed policies attached to it. To retrieve a +// managed policy document that is attached to a user, use GetPolicy to determine +// the policy's default version. Then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation GetUserPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUserPolicy +func (c *IAM) GetUserPolicy(input *GetUserPolicyInput) (*GetUserPolicyOutput, error) { + req, out := c.GetUserPolicyRequest(input) + return out, req.Send() +} + +// GetUserPolicyWithContext is the same as GetUserPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetUserPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) GetUserPolicyWithContext(ctx aws.Context, input *GetUserPolicyInput, opts ...request.Option) (*GetUserPolicyOutput, error) { + req, out := c.GetUserPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListAccessKeys = "ListAccessKeys" + +// ListAccessKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListAccessKeys operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccessKeys for more information on using the ListAccessKeys +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccessKeysRequest method. +// req, resp := client.ListAccessKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccessKeys +func (c *IAM) ListAccessKeysRequest(input *ListAccessKeysInput) (req *request.Request, output *ListAccessKeysOutput) { + op := &request.Operation{ + Name: opListAccessKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccessKeysInput{} + } + + output = &ListAccessKeysOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAccessKeys API operation for AWS Identity and Access Management. +// +// Returns information about the access key IDs associated with the specified +// IAM user. If there is none, the operation returns an empty list. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListAccessKeys for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccessKeys +func (c *IAM) ListAccessKeys(input *ListAccessKeysInput) (*ListAccessKeysOutput, error) { + req, out := c.ListAccessKeysRequest(input) + return out, req.Send() +} + +// ListAccessKeysWithContext is the same as ListAccessKeys with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccessKeys for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAccessKeysWithContext(ctx aws.Context, input *ListAccessKeysInput, opts ...request.Option) (*ListAccessKeysOutput, error) { + req, out := c.ListAccessKeysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccessKeysPages iterates over the pages of a ListAccessKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccessKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccessKeys operation. +// pageNum := 0 +// err := client.ListAccessKeysPages(params, +// func(page *iam.ListAccessKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAccessKeysPages(input *ListAccessKeysInput, fn func(*ListAccessKeysOutput, bool) bool) error { + return c.ListAccessKeysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccessKeysPagesWithContext same as ListAccessKeysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAccessKeysPagesWithContext(ctx aws.Context, input *ListAccessKeysInput, fn func(*ListAccessKeysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccessKeysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccessKeysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListAccessKeysOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListAccountAliases = "ListAccountAliases" + +// ListAccountAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountAliases operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAccountAliases for more information on using the ListAccountAliases +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAccountAliasesRequest method. +// req, resp := client.ListAccountAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccountAliases +func (c *IAM) ListAccountAliasesRequest(input *ListAccountAliasesInput) (req *request.Request, output *ListAccountAliasesOutput) { + op := &request.Operation{ + Name: opListAccountAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccountAliasesInput{} + } + + output = &ListAccountAliasesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAccountAliases API operation for AWS Identity and Access Management. +// +// Lists the account alias associated with the AWS account (Note: you can have +// only one). For information about using an AWS account alias, see Using an +// Alias for Your AWS Account ID (https://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListAccountAliases for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccountAliases +func (c *IAM) ListAccountAliases(input *ListAccountAliasesInput) (*ListAccountAliasesOutput, error) { + req, out := c.ListAccountAliasesRequest(input) + return out, req.Send() +} + +// ListAccountAliasesWithContext is the same as ListAccountAliases with the addition of +// the ability to pass a context and additional request options. +// +// See ListAccountAliases for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAccountAliasesWithContext(ctx aws.Context, input *ListAccountAliasesInput, opts ...request.Option) (*ListAccountAliasesOutput, error) { + req, out := c.ListAccountAliasesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAccountAliasesPages iterates over the pages of a ListAccountAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountAliases operation. +// pageNum := 0 +// err := client.ListAccountAliasesPages(params, +// func(page *iam.ListAccountAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAccountAliasesPages(input *ListAccountAliasesInput, fn func(*ListAccountAliasesOutput, bool) bool) error { + return c.ListAccountAliasesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAccountAliasesPagesWithContext same as ListAccountAliasesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAccountAliasesPagesWithContext(ctx aws.Context, input *ListAccountAliasesInput, fn func(*ListAccountAliasesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAccountAliasesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAccountAliasesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListAccountAliasesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListAttachedGroupPolicies = "ListAttachedGroupPolicies" + +// ListAttachedGroupPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedGroupPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAttachedGroupPolicies for more information on using the ListAttachedGroupPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAttachedGroupPoliciesRequest method. +// req, resp := client.ListAttachedGroupPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedGroupPolicies +func (c *IAM) ListAttachedGroupPoliciesRequest(input *ListAttachedGroupPoliciesInput) (req *request.Request, output *ListAttachedGroupPoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedGroupPoliciesInput{} + } + + output = &ListAttachedGroupPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAttachedGroupPolicies API operation for AWS Identity and Access Management. +// +// Lists all managed policies that are attached to the specified IAM group. +// +// An IAM group can also have inline policies embedded with it. To list the +// inline policies for a group, use the ListGroupPolicies API. For information +// about policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the operation +// returns an empty list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListAttachedGroupPolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedGroupPolicies +func (c *IAM) ListAttachedGroupPolicies(input *ListAttachedGroupPoliciesInput) (*ListAttachedGroupPoliciesOutput, error) { + req, out := c.ListAttachedGroupPoliciesRequest(input) + return out, req.Send() +} + +// ListAttachedGroupPoliciesWithContext is the same as ListAttachedGroupPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListAttachedGroupPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAttachedGroupPoliciesWithContext(ctx aws.Context, input *ListAttachedGroupPoliciesInput, opts ...request.Option) (*ListAttachedGroupPoliciesOutput, error) { + req, out := c.ListAttachedGroupPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAttachedGroupPoliciesPages iterates over the pages of a ListAttachedGroupPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedGroupPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedGroupPolicies operation. +// pageNum := 0 +// err := client.ListAttachedGroupPoliciesPages(params, +// func(page *iam.ListAttachedGroupPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAttachedGroupPoliciesPages(input *ListAttachedGroupPoliciesInput, fn func(*ListAttachedGroupPoliciesOutput, bool) bool) error { + return c.ListAttachedGroupPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAttachedGroupPoliciesPagesWithContext same as ListAttachedGroupPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAttachedGroupPoliciesPagesWithContext(ctx aws.Context, input *ListAttachedGroupPoliciesInput, fn func(*ListAttachedGroupPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAttachedGroupPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAttachedGroupPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListAttachedGroupPoliciesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListAttachedRolePolicies = "ListAttachedRolePolicies" + +// ListAttachedRolePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedRolePolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAttachedRolePolicies for more information on using the ListAttachedRolePolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAttachedRolePoliciesRequest method. +// req, resp := client.ListAttachedRolePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedRolePolicies +func (c *IAM) ListAttachedRolePoliciesRequest(input *ListAttachedRolePoliciesInput) (req *request.Request, output *ListAttachedRolePoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedRolePoliciesInput{} + } + + output = &ListAttachedRolePoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAttachedRolePolicies API operation for AWS Identity and Access Management. +// +// Lists all managed policies that are attached to the specified IAM role. +// +// An IAM role can also have inline policies embedded with it. To list the inline +// policies for a role, use the ListRolePolicies API. For information about +// policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified role (or none that match the specified path prefix), the operation +// returns an empty list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListAttachedRolePolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedRolePolicies +func (c *IAM) ListAttachedRolePolicies(input *ListAttachedRolePoliciesInput) (*ListAttachedRolePoliciesOutput, error) { + req, out := c.ListAttachedRolePoliciesRequest(input) + return out, req.Send() +} + +// ListAttachedRolePoliciesWithContext is the same as ListAttachedRolePolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListAttachedRolePolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAttachedRolePoliciesWithContext(ctx aws.Context, input *ListAttachedRolePoliciesInput, opts ...request.Option) (*ListAttachedRolePoliciesOutput, error) { + req, out := c.ListAttachedRolePoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAttachedRolePoliciesPages iterates over the pages of a ListAttachedRolePolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedRolePolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedRolePolicies operation. +// pageNum := 0 +// err := client.ListAttachedRolePoliciesPages(params, +// func(page *iam.ListAttachedRolePoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAttachedRolePoliciesPages(input *ListAttachedRolePoliciesInput, fn func(*ListAttachedRolePoliciesOutput, bool) bool) error { + return c.ListAttachedRolePoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAttachedRolePoliciesPagesWithContext same as ListAttachedRolePoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAttachedRolePoliciesPagesWithContext(ctx aws.Context, input *ListAttachedRolePoliciesInput, fn func(*ListAttachedRolePoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAttachedRolePoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAttachedRolePoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListAttachedRolePoliciesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListAttachedUserPolicies = "ListAttachedUserPolicies" + +// ListAttachedUserPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedUserPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListAttachedUserPolicies for more information on using the ListAttachedUserPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListAttachedUserPoliciesRequest method. +// req, resp := client.ListAttachedUserPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedUserPolicies +func (c *IAM) ListAttachedUserPoliciesRequest(input *ListAttachedUserPoliciesInput) (req *request.Request, output *ListAttachedUserPoliciesOutput) { + op := &request.Operation{ + Name: opListAttachedUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedUserPoliciesInput{} + } + + output = &ListAttachedUserPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListAttachedUserPolicies API operation for AWS Identity and Access Management. +// +// Lists all managed policies that are attached to the specified IAM user. +// +// An IAM user can also have inline policies embedded with it. To list the inline +// policies for a user, use the ListUserPolicies API. For information about +// policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the operation +// returns an empty list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListAttachedUserPolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedUserPolicies +func (c *IAM) ListAttachedUserPolicies(input *ListAttachedUserPoliciesInput) (*ListAttachedUserPoliciesOutput, error) { + req, out := c.ListAttachedUserPoliciesRequest(input) + return out, req.Send() +} + +// ListAttachedUserPoliciesWithContext is the same as ListAttachedUserPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListAttachedUserPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAttachedUserPoliciesWithContext(ctx aws.Context, input *ListAttachedUserPoliciesInput, opts ...request.Option) (*ListAttachedUserPoliciesOutput, error) { + req, out := c.ListAttachedUserPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListAttachedUserPoliciesPages iterates over the pages of a ListAttachedUserPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedUserPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedUserPolicies operation. +// pageNum := 0 +// err := client.ListAttachedUserPoliciesPages(params, +// func(page *iam.ListAttachedUserPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListAttachedUserPoliciesPages(input *ListAttachedUserPoliciesInput, fn func(*ListAttachedUserPoliciesOutput, bool) bool) error { + return c.ListAttachedUserPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListAttachedUserPoliciesPagesWithContext same as ListAttachedUserPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListAttachedUserPoliciesPagesWithContext(ctx aws.Context, input *ListAttachedUserPoliciesInput, fn func(*ListAttachedUserPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListAttachedUserPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListAttachedUserPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListAttachedUserPoliciesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListEntitiesForPolicy = "ListEntitiesForPolicy" + +// ListEntitiesForPolicyRequest generates a "aws/request.Request" representing the +// client's request for the ListEntitiesForPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListEntitiesForPolicy for more information on using the ListEntitiesForPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListEntitiesForPolicyRequest method. +// req, resp := client.ListEntitiesForPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListEntitiesForPolicy +func (c *IAM) ListEntitiesForPolicyRequest(input *ListEntitiesForPolicyInput) (req *request.Request, output *ListEntitiesForPolicyOutput) { + op := &request.Operation{ + Name: opListEntitiesForPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListEntitiesForPolicyInput{} + } + + output = &ListEntitiesForPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListEntitiesForPolicy API operation for AWS Identity and Access Management. +// +// Lists all IAM users, groups, and roles that the specified managed policy +// is attached to. +// +// You can use the optional EntityFilter parameter to limit the results to a +// particular type of entity (users, groups, or roles). For example, to list +// only the roles that are attached to the specified policy, set EntityFilter +// to Role. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListEntitiesForPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListEntitiesForPolicy +func (c *IAM) ListEntitiesForPolicy(input *ListEntitiesForPolicyInput) (*ListEntitiesForPolicyOutput, error) { + req, out := c.ListEntitiesForPolicyRequest(input) + return out, req.Send() +} + +// ListEntitiesForPolicyWithContext is the same as ListEntitiesForPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See ListEntitiesForPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListEntitiesForPolicyWithContext(ctx aws.Context, input *ListEntitiesForPolicyInput, opts ...request.Option) (*ListEntitiesForPolicyOutput, error) { + req, out := c.ListEntitiesForPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListEntitiesForPolicyPages iterates over the pages of a ListEntitiesForPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEntitiesForPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEntitiesForPolicy operation. +// pageNum := 0 +// err := client.ListEntitiesForPolicyPages(params, +// func(page *iam.ListEntitiesForPolicyOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListEntitiesForPolicyPages(input *ListEntitiesForPolicyInput, fn func(*ListEntitiesForPolicyOutput, bool) bool) error { + return c.ListEntitiesForPolicyPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListEntitiesForPolicyPagesWithContext same as ListEntitiesForPolicyPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListEntitiesForPolicyPagesWithContext(ctx aws.Context, input *ListEntitiesForPolicyInput, fn func(*ListEntitiesForPolicyOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListEntitiesForPolicyInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListEntitiesForPolicyRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListEntitiesForPolicyOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListGroupPolicies = "ListGroupPolicies" + +// ListGroupPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListGroupPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGroupPolicies for more information on using the ListGroupPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListGroupPoliciesRequest method. +// req, resp := client.ListGroupPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupPolicies +func (c *IAM) ListGroupPoliciesRequest(input *ListGroupPoliciesInput) (req *request.Request, output *ListGroupPoliciesOutput) { + op := &request.Operation{ + Name: opListGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupPoliciesInput{} + } + + output = &ListGroupPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListGroupPolicies API operation for AWS Identity and Access Management. +// +// Lists the names of the inline policies that are embedded in the specified +// IAM group. +// +// An IAM group can also have managed policies attached to it. To list the managed +// policies that are attached to a group, use ListAttachedGroupPolicies. For +// more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified group, the operation +// returns an empty list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListGroupPolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupPolicies +func (c *IAM) ListGroupPolicies(input *ListGroupPoliciesInput) (*ListGroupPoliciesOutput, error) { + req, out := c.ListGroupPoliciesRequest(input) + return out, req.Send() +} + +// ListGroupPoliciesWithContext is the same as ListGroupPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListGroupPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListGroupPoliciesWithContext(ctx aws.Context, input *ListGroupPoliciesInput, opts ...request.Option) (*ListGroupPoliciesOutput, error) { + req, out := c.ListGroupPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListGroupPoliciesPages iterates over the pages of a ListGroupPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroupPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroupPolicies operation. +// pageNum := 0 +// err := client.ListGroupPoliciesPages(params, +// func(page *iam.ListGroupPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListGroupPoliciesPages(input *ListGroupPoliciesInput, fn func(*ListGroupPoliciesOutput, bool) bool) error { + return c.ListGroupPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListGroupPoliciesPagesWithContext same as ListGroupPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListGroupPoliciesPagesWithContext(ctx aws.Context, input *ListGroupPoliciesInput, fn func(*ListGroupPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListGroupPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListGroupPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListGroupPoliciesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListGroups = "ListGroups" + +// ListGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListGroups operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGroups for more information on using the ListGroups +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListGroupsRequest method. +// req, resp := client.ListGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroups +func (c *IAM) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { + op := &request.Operation{ + Name: opListGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsInput{} + } + + output = &ListGroupsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListGroups API operation for AWS Identity and Access Management. +// +// Lists the IAM groups that have the specified path prefix. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListGroups for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroups +func (c *IAM) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) + return out, req.Send() +} + +// ListGroupsWithContext is the same as ListGroups with the addition of +// the ability to pass a context and additional request options. +// +// See ListGroups for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListGroupsWithContext(ctx aws.Context, input *ListGroupsInput, opts ...request.Option) (*ListGroupsOutput, error) { + req, out := c.ListGroupsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListGroupsPages iterates over the pages of a ListGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroups operation. +// pageNum := 0 +// err := client.ListGroupsPages(params, +// func(page *iam.ListGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListGroupsPages(input *ListGroupsInput, fn func(*ListGroupsOutput, bool) bool) error { + return c.ListGroupsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListGroupsPagesWithContext same as ListGroupsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListGroupsPagesWithContext(ctx aws.Context, input *ListGroupsInput, fn func(*ListGroupsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListGroupsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListGroupsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListGroupsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListGroupsForUser = "ListGroupsForUser" + +// ListGroupsForUserRequest generates a "aws/request.Request" representing the +// client's request for the ListGroupsForUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGroupsForUser for more information on using the ListGroupsForUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListGroupsForUserRequest method. +// req, resp := client.ListGroupsForUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupsForUser +func (c *IAM) ListGroupsForUserRequest(input *ListGroupsForUserInput) (req *request.Request, output *ListGroupsForUserOutput) { + op := &request.Operation{ + Name: opListGroupsForUser, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsForUserInput{} + } + + output = &ListGroupsForUserOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListGroupsForUser API operation for AWS Identity and Access Management. +// +// Lists the IAM groups that the specified IAM user belongs to. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListGroupsForUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupsForUser +func (c *IAM) ListGroupsForUser(input *ListGroupsForUserInput) (*ListGroupsForUserOutput, error) { + req, out := c.ListGroupsForUserRequest(input) + return out, req.Send() +} + +// ListGroupsForUserWithContext is the same as ListGroupsForUser with the addition of +// the ability to pass a context and additional request options. +// +// See ListGroupsForUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListGroupsForUserWithContext(ctx aws.Context, input *ListGroupsForUserInput, opts ...request.Option) (*ListGroupsForUserOutput, error) { + req, out := c.ListGroupsForUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListGroupsForUserPages iterates over the pages of a ListGroupsForUser operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroupsForUser method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroupsForUser operation. +// pageNum := 0 +// err := client.ListGroupsForUserPages(params, +// func(page *iam.ListGroupsForUserOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListGroupsForUserPages(input *ListGroupsForUserInput, fn func(*ListGroupsForUserOutput, bool) bool) error { + return c.ListGroupsForUserPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListGroupsForUserPagesWithContext same as ListGroupsForUserPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListGroupsForUserPagesWithContext(ctx aws.Context, input *ListGroupsForUserInput, fn func(*ListGroupsForUserOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListGroupsForUserInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListGroupsForUserRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListGroupsForUserOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListInstanceProfiles = "ListInstanceProfiles" + +// ListInstanceProfilesRequest generates a "aws/request.Request" representing the +// client's request for the ListInstanceProfiles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListInstanceProfiles for more information on using the ListInstanceProfiles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListInstanceProfilesRequest method. +// req, resp := client.ListInstanceProfilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfiles +func (c *IAM) ListInstanceProfilesRequest(input *ListInstanceProfilesInput) (req *request.Request, output *ListInstanceProfilesOutput) { + op := &request.Operation{ + Name: opListInstanceProfiles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesInput{} + } + + output = &ListInstanceProfilesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListInstanceProfiles API operation for AWS Identity and Access Management. +// +// Lists the instance profiles that have the specified path prefix. If there +// are none, the operation returns an empty list. For more information about +// instance profiles, go to About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListInstanceProfiles for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfiles +func (c *IAM) ListInstanceProfiles(input *ListInstanceProfilesInput) (*ListInstanceProfilesOutput, error) { + req, out := c.ListInstanceProfilesRequest(input) + return out, req.Send() +} + +// ListInstanceProfilesWithContext is the same as ListInstanceProfiles with the addition of +// the ability to pass a context and additional request options. +// +// See ListInstanceProfiles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListInstanceProfilesWithContext(ctx aws.Context, input *ListInstanceProfilesInput, opts ...request.Option) (*ListInstanceProfilesOutput, error) { + req, out := c.ListInstanceProfilesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListInstanceProfilesPages iterates over the pages of a ListInstanceProfiles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstanceProfiles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstanceProfiles operation. +// pageNum := 0 +// err := client.ListInstanceProfilesPages(params, +// func(page *iam.ListInstanceProfilesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListInstanceProfilesPages(input *ListInstanceProfilesInput, fn func(*ListInstanceProfilesOutput, bool) bool) error { + return c.ListInstanceProfilesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListInstanceProfilesPagesWithContext same as ListInstanceProfilesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListInstanceProfilesPagesWithContext(ctx aws.Context, input *ListInstanceProfilesInput, fn func(*ListInstanceProfilesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListInstanceProfilesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListInstanceProfilesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListInstanceProfilesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListInstanceProfilesForRole = "ListInstanceProfilesForRole" + +// ListInstanceProfilesForRoleRequest generates a "aws/request.Request" representing the +// client's request for the ListInstanceProfilesForRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListInstanceProfilesForRole for more information on using the ListInstanceProfilesForRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListInstanceProfilesForRoleRequest method. +// req, resp := client.ListInstanceProfilesForRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfilesForRole +func (c *IAM) ListInstanceProfilesForRoleRequest(input *ListInstanceProfilesForRoleInput) (req *request.Request, output *ListInstanceProfilesForRoleOutput) { + op := &request.Operation{ + Name: opListInstanceProfilesForRole, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesForRoleInput{} + } + + output = &ListInstanceProfilesForRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListInstanceProfilesForRole API operation for AWS Identity and Access Management. +// +// Lists the instance profiles that have the specified associated IAM role. +// If there are none, the operation returns an empty list. For more information +// about instance profiles, go to About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListInstanceProfilesForRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfilesForRole +func (c *IAM) ListInstanceProfilesForRole(input *ListInstanceProfilesForRoleInput) (*ListInstanceProfilesForRoleOutput, error) { + req, out := c.ListInstanceProfilesForRoleRequest(input) + return out, req.Send() +} + +// ListInstanceProfilesForRoleWithContext is the same as ListInstanceProfilesForRole with the addition of +// the ability to pass a context and additional request options. +// +// See ListInstanceProfilesForRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListInstanceProfilesForRoleWithContext(ctx aws.Context, input *ListInstanceProfilesForRoleInput, opts ...request.Option) (*ListInstanceProfilesForRoleOutput, error) { + req, out := c.ListInstanceProfilesForRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListInstanceProfilesForRolePages iterates over the pages of a ListInstanceProfilesForRole operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstanceProfilesForRole method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstanceProfilesForRole operation. +// pageNum := 0 +// err := client.ListInstanceProfilesForRolePages(params, +// func(page *iam.ListInstanceProfilesForRoleOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListInstanceProfilesForRolePages(input *ListInstanceProfilesForRoleInput, fn func(*ListInstanceProfilesForRoleOutput, bool) bool) error { + return c.ListInstanceProfilesForRolePagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListInstanceProfilesForRolePagesWithContext same as ListInstanceProfilesForRolePages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListInstanceProfilesForRolePagesWithContext(ctx aws.Context, input *ListInstanceProfilesForRoleInput, fn func(*ListInstanceProfilesForRoleOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListInstanceProfilesForRoleInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListInstanceProfilesForRoleRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListInstanceProfilesForRoleOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListMFADevices = "ListMFADevices" + +// ListMFADevicesRequest generates a "aws/request.Request" representing the +// client's request for the ListMFADevices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListMFADevices for more information on using the ListMFADevices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListMFADevicesRequest method. +// req, resp := client.ListMFADevicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListMFADevices +func (c *IAM) ListMFADevicesRequest(input *ListMFADevicesInput) (req *request.Request, output *ListMFADevicesOutput) { + op := &request.Operation{ + Name: opListMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMFADevicesInput{} + } + + output = &ListMFADevicesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMFADevices API operation for AWS Identity and Access Management. +// +// Lists the MFA devices for an IAM user. If the request includes a IAM user +// name, then this operation lists all the MFA devices associated with the specified +// user. If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request for this API. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListMFADevices for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListMFADevices +func (c *IAM) ListMFADevices(input *ListMFADevicesInput) (*ListMFADevicesOutput, error) { + req, out := c.ListMFADevicesRequest(input) + return out, req.Send() +} + +// ListMFADevicesWithContext is the same as ListMFADevices with the addition of +// the ability to pass a context and additional request options. +// +// See ListMFADevices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListMFADevicesWithContext(ctx aws.Context, input *ListMFADevicesInput, opts ...request.Option) (*ListMFADevicesOutput, error) { + req, out := c.ListMFADevicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMFADevicesPages iterates over the pages of a ListMFADevices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMFADevices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMFADevices operation. +// pageNum := 0 +// err := client.ListMFADevicesPages(params, +// func(page *iam.ListMFADevicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListMFADevicesPages(input *ListMFADevicesInput, fn func(*ListMFADevicesOutput, bool) bool) error { + return c.ListMFADevicesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMFADevicesPagesWithContext same as ListMFADevicesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListMFADevicesPagesWithContext(ctx aws.Context, input *ListMFADevicesInput, fn func(*ListMFADevicesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMFADevicesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMFADevicesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListMFADevicesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListOpenIDConnectProviders = "ListOpenIDConnectProviders" + +// ListOpenIDConnectProvidersRequest generates a "aws/request.Request" representing the +// client's request for the ListOpenIDConnectProviders operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListOpenIDConnectProviders for more information on using the ListOpenIDConnectProviders +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListOpenIDConnectProvidersRequest method. +// req, resp := client.ListOpenIDConnectProvidersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListOpenIDConnectProviders +func (c *IAM) ListOpenIDConnectProvidersRequest(input *ListOpenIDConnectProvidersInput) (req *request.Request, output *ListOpenIDConnectProvidersOutput) { + op := &request.Operation{ + Name: opListOpenIDConnectProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListOpenIDConnectProvidersInput{} + } + + output = &ListOpenIDConnectProvidersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListOpenIDConnectProviders API operation for AWS Identity and Access Management. +// +// Lists information about the IAM OpenID Connect (OIDC) provider resource objects +// defined in the AWS account. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListOpenIDConnectProviders for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListOpenIDConnectProviders +func (c *IAM) ListOpenIDConnectProviders(input *ListOpenIDConnectProvidersInput) (*ListOpenIDConnectProvidersOutput, error) { + req, out := c.ListOpenIDConnectProvidersRequest(input) + return out, req.Send() +} + +// ListOpenIDConnectProvidersWithContext is the same as ListOpenIDConnectProviders with the addition of +// the ability to pass a context and additional request options. +// +// See ListOpenIDConnectProviders for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListOpenIDConnectProvidersWithContext(ctx aws.Context, input *ListOpenIDConnectProvidersInput, opts ...request.Option) (*ListOpenIDConnectProvidersOutput, error) { + req, out := c.ListOpenIDConnectProvidersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListPolicies = "ListPolicies" + +// ListPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPolicies for more information on using the ListPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPoliciesRequest method. +// req, resp := client.ListPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPolicies +func (c *IAM) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { + op := &request.Operation{ + Name: opListPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPoliciesInput{} + } + + output = &ListPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPolicies API operation for AWS Identity and Access Management. +// +// Lists all the managed policies that are available in your AWS account, including +// your own customer-defined managed policies and all AWS managed policies. +// +// You can filter the list of policies that is returned using the optional OnlyAttached, +// Scope, and PathPrefix parameters. For example, to list only the customer +// managed policies in your AWS account, set Scope to Local. To list only AWS +// managed policies, set Scope to AWS. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListPolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPolicies +func (c *IAM) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) + return out, req.Send() +} + +// ListPoliciesWithContext is the same as ListPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListPoliciesWithContext(ctx aws.Context, input *ListPoliciesInput, opts ...request.Option) (*ListPoliciesOutput, error) { + req, out := c.ListPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPoliciesPages iterates over the pages of a ListPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPolicies operation. +// pageNum := 0 +// err := client.ListPoliciesPages(params, +// func(page *iam.ListPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListPoliciesPages(input *ListPoliciesInput, fn func(*ListPoliciesOutput, bool) bool) error { + return c.ListPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPoliciesPagesWithContext same as ListPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListPoliciesPagesWithContext(ctx aws.Context, input *ListPoliciesInput, fn func(*ListPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPoliciesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListPoliciesGrantingServiceAccess = "ListPoliciesGrantingServiceAccess" + +// ListPoliciesGrantingServiceAccessRequest generates a "aws/request.Request" representing the +// client's request for the ListPoliciesGrantingServiceAccess operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPoliciesGrantingServiceAccess for more information on using the ListPoliciesGrantingServiceAccess +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPoliciesGrantingServiceAccessRequest method. +// req, resp := client.ListPoliciesGrantingServiceAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPoliciesGrantingServiceAccess +func (c *IAM) ListPoliciesGrantingServiceAccessRequest(input *ListPoliciesGrantingServiceAccessInput) (req *request.Request, output *ListPoliciesGrantingServiceAccessOutput) { + op := &request.Operation{ + Name: opListPoliciesGrantingServiceAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPoliciesGrantingServiceAccessInput{} + } + + output = &ListPoliciesGrantingServiceAccessOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPoliciesGrantingServiceAccess API operation for AWS Identity and Access Management. +// +// Retrieves a list of policies that the IAM identity (user, group, or role) +// can use to access each specified service. +// +// This operation does not use other policy types when determining whether a +// resource could access a service. These other policy types include resource-based +// policies, access control lists, AWS Organizations policies, IAM permissions +// boundaries, and AWS STS assume role policies. It only applies permissions +// policy logic. For more about the evaluation of policy types, see Evaluating +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) +// in the IAM User Guide. +// +// The list of policies returned by the operation depends on the ARN of the +// identity that you provide. +// +// * User – The list of policies includes the managed and inline policies +// that are attached to the user directly. The list also includes any additional +// managed and inline policies that are attached to the group to which the +// user belongs. +// +// * Group – The list of policies includes only the managed and inline +// policies that are attached to the group directly. Policies that are attached +// to the group’s user are not included. +// +// * Role – The list of policies includes only the managed and inline policies +// that are attached to the role. +// +// For each managed policy, this operation returns the ARN and policy name. +// For each inline policy, it returns the policy name and the entity to which +// it is attached. Inline policies do not have an ARN. For more information +// about these policy types, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) +// in the IAM User Guide. +// +// Policies that are attached to users and roles as permissions boundaries are +// not returned. To view which managed policy is currently used to set the permissions +// boundary for a user or role, use the GetUser or GetRole operations. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListPoliciesGrantingServiceAccess for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPoliciesGrantingServiceAccess +func (c *IAM) ListPoliciesGrantingServiceAccess(input *ListPoliciesGrantingServiceAccessInput) (*ListPoliciesGrantingServiceAccessOutput, error) { + req, out := c.ListPoliciesGrantingServiceAccessRequest(input) + return out, req.Send() +} + +// ListPoliciesGrantingServiceAccessWithContext is the same as ListPoliciesGrantingServiceAccess with the addition of +// the ability to pass a context and additional request options. +// +// See ListPoliciesGrantingServiceAccess for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListPoliciesGrantingServiceAccessWithContext(ctx aws.Context, input *ListPoliciesGrantingServiceAccessInput, opts ...request.Option) (*ListPoliciesGrantingServiceAccessOutput, error) { + req, out := c.ListPoliciesGrantingServiceAccessRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListPolicyVersions = "ListPolicyVersions" + +// ListPolicyVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicyVersions operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListPolicyVersions for more information on using the ListPolicyVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListPolicyVersionsRequest method. +// req, resp := client.ListPolicyVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPolicyVersions +func (c *IAM) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *request.Request, output *ListPolicyVersionsOutput) { + op := &request.Operation{ + Name: opListPolicyVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPolicyVersionsInput{} + } + + output = &ListPolicyVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListPolicyVersions API operation for AWS Identity and Access Management. +// +// Lists information about the versions of the specified managed policy, including +// the version that is currently set as the policy's default version. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListPolicyVersions for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPolicyVersions +func (c *IAM) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVersionsOutput, error) { + req, out := c.ListPolicyVersionsRequest(input) + return out, req.Send() +} + +// ListPolicyVersionsWithContext is the same as ListPolicyVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListPolicyVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListPolicyVersionsWithContext(ctx aws.Context, input *ListPolicyVersionsInput, opts ...request.Option) (*ListPolicyVersionsOutput, error) { + req, out := c.ListPolicyVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPolicyVersionsPages iterates over the pages of a ListPolicyVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPolicyVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPolicyVersions operation. +// pageNum := 0 +// err := client.ListPolicyVersionsPages(params, +// func(page *iam.ListPolicyVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListPolicyVersionsPages(input *ListPolicyVersionsInput, fn func(*ListPolicyVersionsOutput, bool) bool) error { + return c.ListPolicyVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPolicyVersionsPagesWithContext same as ListPolicyVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListPolicyVersionsPagesWithContext(ctx aws.Context, input *ListPolicyVersionsInput, fn func(*ListPolicyVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPolicyVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPolicyVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPolicyVersionsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListRolePolicies = "ListRolePolicies" + +// ListRolePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListRolePolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRolePolicies for more information on using the ListRolePolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRolePoliciesRequest method. +// req, resp := client.ListRolePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRolePolicies +func (c *IAM) ListRolePoliciesRequest(input *ListRolePoliciesInput) (req *request.Request, output *ListRolePoliciesOutput) { + op := &request.Operation{ + Name: opListRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolePoliciesInput{} + } + + output = &ListRolePoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRolePolicies API operation for AWS Identity and Access Management. +// +// Lists the names of the inline policies that are embedded in the specified +// IAM role. +// +// An IAM role can also have managed policies attached to it. To list the managed +// policies that are attached to a role, use ListAttachedRolePolicies. For more +// information about policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified role, the operation +// returns an empty list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListRolePolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRolePolicies +func (c *IAM) ListRolePolicies(input *ListRolePoliciesInput) (*ListRolePoliciesOutput, error) { + req, out := c.ListRolePoliciesRequest(input) + return out, req.Send() +} + +// ListRolePoliciesWithContext is the same as ListRolePolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListRolePolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListRolePoliciesWithContext(ctx aws.Context, input *ListRolePoliciesInput, opts ...request.Option) (*ListRolePoliciesOutput, error) { + req, out := c.ListRolePoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListRolePoliciesPages iterates over the pages of a ListRolePolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRolePolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRolePolicies operation. +// pageNum := 0 +// err := client.ListRolePoliciesPages(params, +// func(page *iam.ListRolePoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListRolePoliciesPages(input *ListRolePoliciesInput, fn func(*ListRolePoliciesOutput, bool) bool) error { + return c.ListRolePoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListRolePoliciesPagesWithContext same as ListRolePoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListRolePoliciesPagesWithContext(ctx aws.Context, input *ListRolePoliciesInput, fn func(*ListRolePoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListRolePoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListRolePoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListRolePoliciesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListRoleTags = "ListRoleTags" + +// ListRoleTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListRoleTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRoleTags for more information on using the ListRoleTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRoleTagsRequest method. +// req, resp := client.ListRoleTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRoleTags +func (c *IAM) ListRoleTagsRequest(input *ListRoleTagsInput) (req *request.Request, output *ListRoleTagsOutput) { + op := &request.Operation{ + Name: opListRoleTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRoleTagsInput{} + } + + output = &ListRoleTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRoleTags API operation for AWS Identity and Access Management. +// +// Lists the tags that are attached to the specified role. The returned list +// of tags is sorted by tag key. For more information about tagging, see Tagging +// IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListRoleTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRoleTags +func (c *IAM) ListRoleTags(input *ListRoleTagsInput) (*ListRoleTagsOutput, error) { + req, out := c.ListRoleTagsRequest(input) + return out, req.Send() +} + +// ListRoleTagsWithContext is the same as ListRoleTags with the addition of +// the ability to pass a context and additional request options. +// +// See ListRoleTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListRoleTagsWithContext(ctx aws.Context, input *ListRoleTagsInput, opts ...request.Option) (*ListRoleTagsOutput, error) { + req, out := c.ListRoleTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListRoles = "ListRoles" + +// ListRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListRoles operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListRoles for more information on using the ListRoles +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListRolesRequest method. +// req, resp := client.ListRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRoles +func (c *IAM) ListRolesRequest(input *ListRolesInput) (req *request.Request, output *ListRolesOutput) { + op := &request.Operation{ + Name: opListRoles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolesInput{} + } + + output = &ListRolesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListRoles API operation for AWS Identity and Access Management. +// +// Lists the IAM roles that have the specified path prefix. If there are none, +// the operation returns an empty list. For more information about roles, go +// to Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListRoles for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRoles +func (c *IAM) ListRoles(input *ListRolesInput) (*ListRolesOutput, error) { + req, out := c.ListRolesRequest(input) + return out, req.Send() +} + +// ListRolesWithContext is the same as ListRoles with the addition of +// the ability to pass a context and additional request options. +// +// See ListRoles for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListRolesWithContext(ctx aws.Context, input *ListRolesInput, opts ...request.Option) (*ListRolesOutput, error) { + req, out := c.ListRolesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListRolesPages iterates over the pages of a ListRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRoles operation. +// pageNum := 0 +// err := client.ListRolesPages(params, +// func(page *iam.ListRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListRolesPages(input *ListRolesInput, fn func(*ListRolesOutput, bool) bool) error { + return c.ListRolesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListRolesPagesWithContext same as ListRolesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListRolesPagesWithContext(ctx aws.Context, input *ListRolesInput, fn func(*ListRolesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListRolesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListRolesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListRolesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListSAMLProviders = "ListSAMLProviders" + +// ListSAMLProvidersRequest generates a "aws/request.Request" representing the +// client's request for the ListSAMLProviders operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSAMLProviders for more information on using the ListSAMLProviders +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSAMLProvidersRequest method. +// req, resp := client.ListSAMLProvidersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSAMLProviders +func (c *IAM) ListSAMLProvidersRequest(input *ListSAMLProvidersInput) (req *request.Request, output *ListSAMLProvidersOutput) { + op := &request.Operation{ + Name: opListSAMLProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSAMLProvidersInput{} + } + + output = &ListSAMLProvidersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSAMLProviders API operation for AWS Identity and Access Management. +// +// Lists the SAML provider resource objects defined in IAM in the account. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListSAMLProviders for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSAMLProviders +func (c *IAM) ListSAMLProviders(input *ListSAMLProvidersInput) (*ListSAMLProvidersOutput, error) { + req, out := c.ListSAMLProvidersRequest(input) + return out, req.Send() +} + +// ListSAMLProvidersWithContext is the same as ListSAMLProviders with the addition of +// the ability to pass a context and additional request options. +// +// See ListSAMLProviders for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListSAMLProvidersWithContext(ctx aws.Context, input *ListSAMLProvidersInput, opts ...request.Option) (*ListSAMLProvidersOutput, error) { + req, out := c.ListSAMLProvidersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListSSHPublicKeys = "ListSSHPublicKeys" + +// ListSSHPublicKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListSSHPublicKeys operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSSHPublicKeys for more information on using the ListSSHPublicKeys +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSSHPublicKeysRequest method. +// req, resp := client.ListSSHPublicKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSSHPublicKeys +func (c *IAM) ListSSHPublicKeysRequest(input *ListSSHPublicKeysInput) (req *request.Request, output *ListSSHPublicKeysOutput) { + op := &request.Operation{ + Name: opListSSHPublicKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListSSHPublicKeysInput{} + } + + output = &ListSSHPublicKeysOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSSHPublicKeys API operation for AWS Identity and Access Management. +// +// Returns information about the SSH public keys associated with the specified +// IAM user. If none exists, the operation returns an empty list. +// +// The SSH public keys returned by this operation are used only for authenticating +// the IAM user to an AWS CodeCommit repository. For more information about +// using SSH keys to authenticate to an AWS CodeCommit repository, see Set up +// AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListSSHPublicKeys for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSSHPublicKeys +func (c *IAM) ListSSHPublicKeys(input *ListSSHPublicKeysInput) (*ListSSHPublicKeysOutput, error) { + req, out := c.ListSSHPublicKeysRequest(input) + return out, req.Send() +} + +// ListSSHPublicKeysWithContext is the same as ListSSHPublicKeys with the addition of +// the ability to pass a context and additional request options. +// +// See ListSSHPublicKeys for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListSSHPublicKeysWithContext(ctx aws.Context, input *ListSSHPublicKeysInput, opts ...request.Option) (*ListSSHPublicKeysOutput, error) { + req, out := c.ListSSHPublicKeysRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSSHPublicKeysPages iterates over the pages of a ListSSHPublicKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSSHPublicKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSSHPublicKeys operation. +// pageNum := 0 +// err := client.ListSSHPublicKeysPages(params, +// func(page *iam.ListSSHPublicKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListSSHPublicKeysPages(input *ListSSHPublicKeysInput, fn func(*ListSSHPublicKeysOutput, bool) bool) error { + return c.ListSSHPublicKeysPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSSHPublicKeysPagesWithContext same as ListSSHPublicKeysPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListSSHPublicKeysPagesWithContext(ctx aws.Context, input *ListSSHPublicKeysInput, fn func(*ListSSHPublicKeysOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSSHPublicKeysInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSSHPublicKeysRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListSSHPublicKeysOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListServerCertificates = "ListServerCertificates" + +// ListServerCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListServerCertificates operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListServerCertificates for more information on using the ListServerCertificates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListServerCertificatesRequest method. +// req, resp := client.ListServerCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServerCertificates +func (c *IAM) ListServerCertificatesRequest(input *ListServerCertificatesInput) (req *request.Request, output *ListServerCertificatesOutput) { + op := &request.Operation{ + Name: opListServerCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListServerCertificatesInput{} + } + + output = &ListServerCertificatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListServerCertificates API operation for AWS Identity and Access Management. +// +// Lists the server certificates stored in IAM that have the specified path +// prefix. If none exist, the operation returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic also includes a list of AWS services that +// can use the server certificates that you manage with IAM. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListServerCertificates for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServerCertificates +func (c *IAM) ListServerCertificates(input *ListServerCertificatesInput) (*ListServerCertificatesOutput, error) { + req, out := c.ListServerCertificatesRequest(input) + return out, req.Send() +} + +// ListServerCertificatesWithContext is the same as ListServerCertificates with the addition of +// the ability to pass a context and additional request options. +// +// See ListServerCertificates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListServerCertificatesWithContext(ctx aws.Context, input *ListServerCertificatesInput, opts ...request.Option) (*ListServerCertificatesOutput, error) { + req, out := c.ListServerCertificatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListServerCertificatesPages iterates over the pages of a ListServerCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServerCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServerCertificates operation. +// pageNum := 0 +// err := client.ListServerCertificatesPages(params, +// func(page *iam.ListServerCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListServerCertificatesPages(input *ListServerCertificatesInput, fn func(*ListServerCertificatesOutput, bool) bool) error { + return c.ListServerCertificatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListServerCertificatesPagesWithContext same as ListServerCertificatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListServerCertificatesPagesWithContext(ctx aws.Context, input *ListServerCertificatesInput, fn func(*ListServerCertificatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListServerCertificatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListServerCertificatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListServerCertificatesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListServiceSpecificCredentials = "ListServiceSpecificCredentials" + +// ListServiceSpecificCredentialsRequest generates a "aws/request.Request" representing the +// client's request for the ListServiceSpecificCredentials operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListServiceSpecificCredentials for more information on using the ListServiceSpecificCredentials +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListServiceSpecificCredentialsRequest method. +// req, resp := client.ListServiceSpecificCredentialsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServiceSpecificCredentials +func (c *IAM) ListServiceSpecificCredentialsRequest(input *ListServiceSpecificCredentialsInput) (req *request.Request, output *ListServiceSpecificCredentialsOutput) { + op := &request.Operation{ + Name: opListServiceSpecificCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListServiceSpecificCredentialsInput{} + } + + output = &ListServiceSpecificCredentialsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListServiceSpecificCredentials API operation for AWS Identity and Access Management. +// +// Returns information about the service-specific credentials associated with +// the specified IAM user. If none exists, the operation returns an empty list. +// The service-specific credentials returned by this operation are used only +// for authenticating the IAM user to a specific service. For more information +// about using service-specific credentials to authenticate to an AWS service, +// see Set Up service-specific credentials (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-gc.html) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListServiceSpecificCredentials for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceNotSupportedException "NotSupportedService" +// The specified service does not support service-specific credentials. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServiceSpecificCredentials +func (c *IAM) ListServiceSpecificCredentials(input *ListServiceSpecificCredentialsInput) (*ListServiceSpecificCredentialsOutput, error) { + req, out := c.ListServiceSpecificCredentialsRequest(input) + return out, req.Send() +} + +// ListServiceSpecificCredentialsWithContext is the same as ListServiceSpecificCredentials with the addition of +// the ability to pass a context and additional request options. +// +// See ListServiceSpecificCredentials for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListServiceSpecificCredentialsWithContext(ctx aws.Context, input *ListServiceSpecificCredentialsInput, opts ...request.Option) (*ListServiceSpecificCredentialsOutput, error) { + req, out := c.ListServiceSpecificCredentialsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListSigningCertificates = "ListSigningCertificates" + +// ListSigningCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListSigningCertificates operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListSigningCertificates for more information on using the ListSigningCertificates +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListSigningCertificatesRequest method. +// req, resp := client.ListSigningCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSigningCertificates +func (c *IAM) ListSigningCertificatesRequest(input *ListSigningCertificatesInput) (req *request.Request, output *ListSigningCertificatesOutput) { + op := &request.Operation{ + Name: opListSigningCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListSigningCertificatesInput{} + } + + output = &ListSigningCertificatesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListSigningCertificates API operation for AWS Identity and Access Management. +// +// Returns information about the signing certificates associated with the specified +// IAM user. If none exists, the operation returns an empty list. +// +// Although each user is limited to a small number of signing certificates, +// you can still paginate the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request for this API. This +// operation works for access keys under the AWS account. Consequently, you +// can use this operation to manage AWS account root user credentials even if +// the AWS account has no associated users. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListSigningCertificates for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSigningCertificates +func (c *IAM) ListSigningCertificates(input *ListSigningCertificatesInput) (*ListSigningCertificatesOutput, error) { + req, out := c.ListSigningCertificatesRequest(input) + return out, req.Send() +} + +// ListSigningCertificatesWithContext is the same as ListSigningCertificates with the addition of +// the ability to pass a context and additional request options. +// +// See ListSigningCertificates for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListSigningCertificatesWithContext(ctx aws.Context, input *ListSigningCertificatesInput, opts ...request.Option) (*ListSigningCertificatesOutput, error) { + req, out := c.ListSigningCertificatesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListSigningCertificatesPages iterates over the pages of a ListSigningCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSigningCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSigningCertificates operation. +// pageNum := 0 +// err := client.ListSigningCertificatesPages(params, +// func(page *iam.ListSigningCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListSigningCertificatesPages(input *ListSigningCertificatesInput, fn func(*ListSigningCertificatesOutput, bool) bool) error { + return c.ListSigningCertificatesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListSigningCertificatesPagesWithContext same as ListSigningCertificatesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListSigningCertificatesPagesWithContext(ctx aws.Context, input *ListSigningCertificatesInput, fn func(*ListSigningCertificatesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListSigningCertificatesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListSigningCertificatesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListSigningCertificatesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListUserPolicies = "ListUserPolicies" + +// ListUserPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListUserPolicies operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListUserPolicies for more information on using the ListUserPolicies +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListUserPoliciesRequest method. +// req, resp := client.ListUserPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserPolicies +func (c *IAM) ListUserPoliciesRequest(input *ListUserPoliciesInput) (req *request.Request, output *ListUserPoliciesOutput) { + op := &request.Operation{ + Name: opListUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUserPoliciesInput{} + } + + output = &ListUserPoliciesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListUserPolicies API operation for AWS Identity and Access Management. +// +// Lists the names of the inline policies embedded in the specified IAM user. +// +// An IAM user can also have managed policies attached to it. To list the managed +// policies that are attached to a user, use ListAttachedUserPolicies. For more +// information about policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified user, the operation +// returns an empty list. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListUserPolicies for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserPolicies +func (c *IAM) ListUserPolicies(input *ListUserPoliciesInput) (*ListUserPoliciesOutput, error) { + req, out := c.ListUserPoliciesRequest(input) + return out, req.Send() +} + +// ListUserPoliciesWithContext is the same as ListUserPolicies with the addition of +// the ability to pass a context and additional request options. +// +// See ListUserPolicies for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListUserPoliciesWithContext(ctx aws.Context, input *ListUserPoliciesInput, opts ...request.Option) (*ListUserPoliciesOutput, error) { + req, out := c.ListUserPoliciesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListUserPoliciesPages iterates over the pages of a ListUserPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUserPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUserPolicies operation. +// pageNum := 0 +// err := client.ListUserPoliciesPages(params, +// func(page *iam.ListUserPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListUserPoliciesPages(input *ListUserPoliciesInput, fn func(*ListUserPoliciesOutput, bool) bool) error { + return c.ListUserPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListUserPoliciesPagesWithContext same as ListUserPoliciesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListUserPoliciesPagesWithContext(ctx aws.Context, input *ListUserPoliciesInput, fn func(*ListUserPoliciesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListUserPoliciesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListUserPoliciesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListUserPoliciesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListUserTags = "ListUserTags" + +// ListUserTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListUserTags operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListUserTags for more information on using the ListUserTags +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListUserTagsRequest method. +// req, resp := client.ListUserTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserTags +func (c *IAM) ListUserTagsRequest(input *ListUserTagsInput) (req *request.Request, output *ListUserTagsOutput) { + op := &request.Operation{ + Name: opListUserTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListUserTagsInput{} + } + + output = &ListUserTagsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListUserTags API operation for AWS Identity and Access Management. +// +// Lists the tags that are attached to the specified user. The returned list +// of tags is sorted by tag key. For more information about tagging, see Tagging +// IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListUserTags for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserTags +func (c *IAM) ListUserTags(input *ListUserTagsInput) (*ListUserTagsOutput, error) { + req, out := c.ListUserTagsRequest(input) + return out, req.Send() +} + +// ListUserTagsWithContext is the same as ListUserTags with the addition of +// the ability to pass a context and additional request options. +// +// See ListUserTags for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListUserTagsWithContext(ctx aws.Context, input *ListUserTagsInput, opts ...request.Option) (*ListUserTagsOutput, error) { + req, out := c.ListUserTagsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListUsers = "ListUsers" + +// ListUsersRequest generates a "aws/request.Request" representing the +// client's request for the ListUsers operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListUsers for more information on using the ListUsers +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListUsersRequest method. +// req, resp := client.ListUsersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUsers +func (c *IAM) ListUsersRequest(input *ListUsersInput) (req *request.Request, output *ListUsersOutput) { + op := &request.Operation{ + Name: opListUsers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUsersInput{} + } + + output = &ListUsersOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListUsers API operation for AWS Identity and Access Management. +// +// Lists the IAM users that have the specified path prefix. If no path prefix +// is specified, the operation returns all users in the AWS account. If there +// are none, the operation returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListUsers for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUsers +func (c *IAM) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { + req, out := c.ListUsersRequest(input) + return out, req.Send() +} + +// ListUsersWithContext is the same as ListUsers with the addition of +// the ability to pass a context and additional request options. +// +// See ListUsers for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListUsersWithContext(ctx aws.Context, input *ListUsersInput, opts ...request.Option) (*ListUsersOutput, error) { + req, out := c.ListUsersRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListUsersPages iterates over the pages of a ListUsers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUsers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUsers operation. +// pageNum := 0 +// err := client.ListUsersPages(params, +// func(page *iam.ListUsersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListUsersPages(input *ListUsersInput, fn func(*ListUsersOutput, bool) bool) error { + return c.ListUsersPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListUsersPagesWithContext same as ListUsersPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListUsersPagesWithContext(ctx aws.Context, input *ListUsersInput, fn func(*ListUsersOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListUsersInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListUsersRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListUsersOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListVirtualMFADevices = "ListVirtualMFADevices" + +// ListVirtualMFADevicesRequest generates a "aws/request.Request" representing the +// client's request for the ListVirtualMFADevices operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListVirtualMFADevices for more information on using the ListVirtualMFADevices +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListVirtualMFADevicesRequest method. +// req, resp := client.ListVirtualMFADevicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListVirtualMFADevices +func (c *IAM) ListVirtualMFADevicesRequest(input *ListVirtualMFADevicesInput) (req *request.Request, output *ListVirtualMFADevicesOutput) { + op := &request.Operation{ + Name: opListVirtualMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListVirtualMFADevicesInput{} + } + + output = &ListVirtualMFADevicesOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListVirtualMFADevices API operation for AWS Identity and Access Management. +// +// Lists the virtual MFA devices defined in the AWS account by assignment status. +// If you do not specify an assignment status, the operation returns a list +// of all virtual MFA devices. Assignment status can be Assigned, Unassigned, +// or Any. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ListVirtualMFADevices for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListVirtualMFADevices +func (c *IAM) ListVirtualMFADevices(input *ListVirtualMFADevicesInput) (*ListVirtualMFADevicesOutput, error) { + req, out := c.ListVirtualMFADevicesRequest(input) + return out, req.Send() +} + +// ListVirtualMFADevicesWithContext is the same as ListVirtualMFADevices with the addition of +// the ability to pass a context and additional request options. +// +// See ListVirtualMFADevices for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListVirtualMFADevicesWithContext(ctx aws.Context, input *ListVirtualMFADevicesInput, opts ...request.Option) (*ListVirtualMFADevicesOutput, error) { + req, out := c.ListVirtualMFADevicesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListVirtualMFADevicesPages iterates over the pages of a ListVirtualMFADevices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListVirtualMFADevices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListVirtualMFADevices operation. +// pageNum := 0 +// err := client.ListVirtualMFADevicesPages(params, +// func(page *iam.ListVirtualMFADevicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListVirtualMFADevicesPages(input *ListVirtualMFADevicesInput, fn func(*ListVirtualMFADevicesOutput, bool) bool) error { + return c.ListVirtualMFADevicesPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListVirtualMFADevicesPagesWithContext same as ListVirtualMFADevicesPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ListVirtualMFADevicesPagesWithContext(ctx aws.Context, input *ListVirtualMFADevicesInput, fn func(*ListVirtualMFADevicesOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListVirtualMFADevicesInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListVirtualMFADevicesRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListVirtualMFADevicesOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opPutGroupPolicy = "PutGroupPolicy" + +// PutGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutGroupPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutGroupPolicy for more information on using the PutGroupPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutGroupPolicyRequest method. +// req, resp := client.PutGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutGroupPolicy +func (c *IAM) PutGroupPolicyRequest(input *PutGroupPolicyInput) (req *request.Request, output *PutGroupPolicyOutput) { + op := &request.Operation{ + Name: opPutGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutGroupPolicyInput{} + } + + output = &PutGroupPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutGroupPolicy API operation for AWS Identity and Access Management. +// +// Adds or updates an inline policy document that is embedded in the specified +// IAM group. +// +// A user can also have managed policies attached to it. To attach a managed +// policy to a group, use AttachGroupPolicy. To create a new managed policy, +// use CreatePolicy. For information about policies, see Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a group, see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutGroupPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation PutGroupPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutGroupPolicy +func (c *IAM) PutGroupPolicy(input *PutGroupPolicyInput) (*PutGroupPolicyOutput, error) { + req, out := c.PutGroupPolicyRequest(input) + return out, req.Send() +} + +// PutGroupPolicyWithContext is the same as PutGroupPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutGroupPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) PutGroupPolicyWithContext(ctx aws.Context, input *PutGroupPolicyInput, opts ...request.Option) (*PutGroupPolicyOutput, error) { + req, out := c.PutGroupPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutRolePermissionsBoundary = "PutRolePermissionsBoundary" + +// PutRolePermissionsBoundaryRequest generates a "aws/request.Request" representing the +// client's request for the PutRolePermissionsBoundary operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutRolePermissionsBoundary for more information on using the PutRolePermissionsBoundary +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutRolePermissionsBoundaryRequest method. +// req, resp := client.PutRolePermissionsBoundaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePermissionsBoundary +func (c *IAM) PutRolePermissionsBoundaryRequest(input *PutRolePermissionsBoundaryInput) (req *request.Request, output *PutRolePermissionsBoundaryOutput) { + op := &request.Operation{ + Name: opPutRolePermissionsBoundary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRolePermissionsBoundaryInput{} + } + + output = &PutRolePermissionsBoundaryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutRolePermissionsBoundary API operation for AWS Identity and Access Management. +// +// Adds or updates the policy that is specified as the IAM role's permissions +// boundary. You can use an AWS managed policy or a customer managed policy +// to set the boundary for a role. Use the boundary to control the maximum permissions +// that the role can have. Setting a permissions boundary is an advanced feature +// that can affect the permissions for the role. +// +// You cannot set the boundary for a service-linked role. +// +// Policies used as permissions boundaries do not provide permissions. You must +// also attach a permissions policy to the role. To learn how the effective +// permissions for a role are evaluated, see IAM JSON Policy Evaluation Logic +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation PutRolePermissionsBoundary for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodePolicyNotAttachableException "PolicyNotAttachable" +// The request failed because AWS service role policies can only be attached +// to the service-linked role for that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePermissionsBoundary +func (c *IAM) PutRolePermissionsBoundary(input *PutRolePermissionsBoundaryInput) (*PutRolePermissionsBoundaryOutput, error) { + req, out := c.PutRolePermissionsBoundaryRequest(input) + return out, req.Send() +} + +// PutRolePermissionsBoundaryWithContext is the same as PutRolePermissionsBoundary with the addition of +// the ability to pass a context and additional request options. +// +// See PutRolePermissionsBoundary for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) PutRolePermissionsBoundaryWithContext(ctx aws.Context, input *PutRolePermissionsBoundaryInput, opts ...request.Option) (*PutRolePermissionsBoundaryOutput, error) { + req, out := c.PutRolePermissionsBoundaryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutRolePolicy = "PutRolePolicy" + +// PutRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRolePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutRolePolicy for more information on using the PutRolePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutRolePolicyRequest method. +// req, resp := client.PutRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePolicy +func (c *IAM) PutRolePolicyRequest(input *PutRolePolicyInput) (req *request.Request, output *PutRolePolicyOutput) { + op := &request.Operation{ + Name: opPutRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRolePolicyInput{} + } + + output = &PutRolePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutRolePolicy API operation for AWS Identity and Access Management. +// +// Adds or updates an inline policy document that is embedded in the specified +// IAM role. +// +// When you embed an inline policy in a role, the inline policy is used as part +// of the role's access (permissions) policy. The role's trust policy is created +// at the same time as the role, using CreateRole. You can update a role's trust +// policy using UpdateAssumeRolePolicy. For more information about IAM roles, +// go to Using Roles to Delegate Permissions and Federate Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// A role can also have a managed policy attached to it. To attach a managed +// policy to a role, use AttachRolePolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed with a role, see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutRolePolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation PutRolePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePolicy +func (c *IAM) PutRolePolicy(input *PutRolePolicyInput) (*PutRolePolicyOutput, error) { + req, out := c.PutRolePolicyRequest(input) + return out, req.Send() +} + +// PutRolePolicyWithContext is the same as PutRolePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutRolePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) PutRolePolicyWithContext(ctx aws.Context, input *PutRolePolicyInput, opts ...request.Option) (*PutRolePolicyOutput, error) { + req, out := c.PutRolePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutUserPermissionsBoundary = "PutUserPermissionsBoundary" + +// PutUserPermissionsBoundaryRequest generates a "aws/request.Request" representing the +// client's request for the PutUserPermissionsBoundary operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutUserPermissionsBoundary for more information on using the PutUserPermissionsBoundary +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutUserPermissionsBoundaryRequest method. +// req, resp := client.PutUserPermissionsBoundaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPermissionsBoundary +func (c *IAM) PutUserPermissionsBoundaryRequest(input *PutUserPermissionsBoundaryInput) (req *request.Request, output *PutUserPermissionsBoundaryOutput) { + op := &request.Operation{ + Name: opPutUserPermissionsBoundary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutUserPermissionsBoundaryInput{} + } + + output = &PutUserPermissionsBoundaryOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutUserPermissionsBoundary API operation for AWS Identity and Access Management. +// +// Adds or updates the policy that is specified as the IAM user's permissions +// boundary. You can use an AWS managed policy or a customer managed policy +// to set the boundary for a user. Use the boundary to control the maximum permissions +// that the user can have. Setting a permissions boundary is an advanced feature +// that can affect the permissions for the user. +// +// Policies that are used as permissions boundaries do not provide permissions. +// You must also attach a permissions policy to the user. To learn how the effective +// permissions for a user are evaluated, see IAM JSON Policy Evaluation Logic +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation PutUserPermissionsBoundary for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodePolicyNotAttachableException "PolicyNotAttachable" +// The request failed because AWS service role policies can only be attached +// to the service-linked role for that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPermissionsBoundary +func (c *IAM) PutUserPermissionsBoundary(input *PutUserPermissionsBoundaryInput) (*PutUserPermissionsBoundaryOutput, error) { + req, out := c.PutUserPermissionsBoundaryRequest(input) + return out, req.Send() +} + +// PutUserPermissionsBoundaryWithContext is the same as PutUserPermissionsBoundary with the addition of +// the ability to pass a context and additional request options. +// +// See PutUserPermissionsBoundary for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) PutUserPermissionsBoundaryWithContext(ctx aws.Context, input *PutUserPermissionsBoundaryInput, opts ...request.Option) (*PutUserPermissionsBoundaryOutput, error) { + req, out := c.PutUserPermissionsBoundaryRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutUserPolicy = "PutUserPolicy" + +// PutUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutUserPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutUserPolicy for more information on using the PutUserPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutUserPolicyRequest method. +// req, resp := client.PutUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPolicy +func (c *IAM) PutUserPolicyRequest(input *PutUserPolicyInput) (req *request.Request, output *PutUserPolicyOutput) { + op := &request.Operation{ + Name: opPutUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutUserPolicyInput{} + } + + output = &PutUserPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutUserPolicy API operation for AWS Identity and Access Management. +// +// Adds or updates an inline policy document that is embedded in the specified +// IAM user. +// +// An IAM user can also have a managed policy attached to it. To attach a managed +// policy to a user, use AttachUserPolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a user, see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutUserPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation PutUserPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPolicy +func (c *IAM) PutUserPolicy(input *PutUserPolicyInput) (*PutUserPolicyOutput, error) { + req, out := c.PutUserPolicyRequest(input) + return out, req.Send() +} + +// PutUserPolicyWithContext is the same as PutUserPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutUserPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) PutUserPolicyWithContext(ctx aws.Context, input *PutUserPolicyInput, opts ...request.Option) (*PutUserPolicyOutput, error) { + req, out := c.PutUserPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveClientIDFromOpenIDConnectProvider = "RemoveClientIDFromOpenIDConnectProvider" + +// RemoveClientIDFromOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the RemoveClientIDFromOpenIDConnectProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveClientIDFromOpenIDConnectProvider for more information on using the RemoveClientIDFromOpenIDConnectProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveClientIDFromOpenIDConnectProviderRequest method. +// req, resp := client.RemoveClientIDFromOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveClientIDFromOpenIDConnectProvider +func (c *IAM) RemoveClientIDFromOpenIDConnectProviderRequest(input *RemoveClientIDFromOpenIDConnectProviderInput) (req *request.Request, output *RemoveClientIDFromOpenIDConnectProviderOutput) { + op := &request.Operation{ + Name: opRemoveClientIDFromOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveClientIDFromOpenIDConnectProviderInput{} + } + + output = &RemoveClientIDFromOpenIDConnectProviderOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemoveClientIDFromOpenIDConnectProvider API operation for AWS Identity and Access Management. +// +// Removes the specified client ID (also known as audience) from the list of +// client IDs registered for the specified IAM OpenID Connect (OIDC) provider +// resource object. +// +// This operation is idempotent; it does not fail or return an error if you +// try to remove a client ID that does not exist. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation RemoveClientIDFromOpenIDConnectProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveClientIDFromOpenIDConnectProvider +func (c *IAM) RemoveClientIDFromOpenIDConnectProvider(input *RemoveClientIDFromOpenIDConnectProviderInput) (*RemoveClientIDFromOpenIDConnectProviderOutput, error) { + req, out := c.RemoveClientIDFromOpenIDConnectProviderRequest(input) + return out, req.Send() +} + +// RemoveClientIDFromOpenIDConnectProviderWithContext is the same as RemoveClientIDFromOpenIDConnectProvider with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveClientIDFromOpenIDConnectProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) RemoveClientIDFromOpenIDConnectProviderWithContext(ctx aws.Context, input *RemoveClientIDFromOpenIDConnectProviderInput, opts ...request.Option) (*RemoveClientIDFromOpenIDConnectProviderOutput, error) { + req, out := c.RemoveClientIDFromOpenIDConnectProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveRoleFromInstanceProfile = "RemoveRoleFromInstanceProfile" + +// RemoveRoleFromInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the RemoveRoleFromInstanceProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveRoleFromInstanceProfile for more information on using the RemoveRoleFromInstanceProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveRoleFromInstanceProfileRequest method. +// req, resp := client.RemoveRoleFromInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveRoleFromInstanceProfile +func (c *IAM) RemoveRoleFromInstanceProfileRequest(input *RemoveRoleFromInstanceProfileInput) (req *request.Request, output *RemoveRoleFromInstanceProfileOutput) { + op := &request.Operation{ + Name: opRemoveRoleFromInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveRoleFromInstanceProfileInput{} + } + + output = &RemoveRoleFromInstanceProfileOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemoveRoleFromInstanceProfile API operation for AWS Identity and Access Management. +// +// Removes the specified IAM role from the specified EC2 instance profile. +// +// Make sure that you do not have any Amazon EC2 instances running with the +// role you are about to remove from the instance profile. Removing a role from +// an instance profile that is associated with a running instance might break +// any applications running on the instance. +// +// For more information about IAM roles, go to Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation RemoveRoleFromInstanceProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveRoleFromInstanceProfile +func (c *IAM) RemoveRoleFromInstanceProfile(input *RemoveRoleFromInstanceProfileInput) (*RemoveRoleFromInstanceProfileOutput, error) { + req, out := c.RemoveRoleFromInstanceProfileRequest(input) + return out, req.Send() +} + +// RemoveRoleFromInstanceProfileWithContext is the same as RemoveRoleFromInstanceProfile with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveRoleFromInstanceProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) RemoveRoleFromInstanceProfileWithContext(ctx aws.Context, input *RemoveRoleFromInstanceProfileInput, opts ...request.Option) (*RemoveRoleFromInstanceProfileOutput, error) { + req, out := c.RemoveRoleFromInstanceProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRemoveUserFromGroup = "RemoveUserFromGroup" + +// RemoveUserFromGroupRequest generates a "aws/request.Request" representing the +// client's request for the RemoveUserFromGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RemoveUserFromGroup for more information on using the RemoveUserFromGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RemoveUserFromGroupRequest method. +// req, resp := client.RemoveUserFromGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveUserFromGroup +func (c *IAM) RemoveUserFromGroupRequest(input *RemoveUserFromGroupInput) (req *request.Request, output *RemoveUserFromGroupOutput) { + op := &request.Operation{ + Name: opRemoveUserFromGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveUserFromGroupInput{} + } + + output = &RemoveUserFromGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// RemoveUserFromGroup API operation for AWS Identity and Access Management. +// +// Removes the specified user from the specified group. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation RemoveUserFromGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveUserFromGroup +func (c *IAM) RemoveUserFromGroup(input *RemoveUserFromGroupInput) (*RemoveUserFromGroupOutput, error) { + req, out := c.RemoveUserFromGroupRequest(input) + return out, req.Send() +} + +// RemoveUserFromGroupWithContext is the same as RemoveUserFromGroup with the addition of +// the ability to pass a context and additional request options. +// +// See RemoveUserFromGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) RemoveUserFromGroupWithContext(ctx aws.Context, input *RemoveUserFromGroupInput, opts ...request.Option) (*RemoveUserFromGroupOutput, error) { + req, out := c.RemoveUserFromGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResetServiceSpecificCredential = "ResetServiceSpecificCredential" + +// ResetServiceSpecificCredentialRequest generates a "aws/request.Request" representing the +// client's request for the ResetServiceSpecificCredential operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResetServiceSpecificCredential for more information on using the ResetServiceSpecificCredential +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResetServiceSpecificCredentialRequest method. +// req, resp := client.ResetServiceSpecificCredentialRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResetServiceSpecificCredential +func (c *IAM) ResetServiceSpecificCredentialRequest(input *ResetServiceSpecificCredentialInput) (req *request.Request, output *ResetServiceSpecificCredentialOutput) { + op := &request.Operation{ + Name: opResetServiceSpecificCredential, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetServiceSpecificCredentialInput{} + } + + output = &ResetServiceSpecificCredentialOutput{} + req = c.newRequest(op, input, output) + return +} + +// ResetServiceSpecificCredential API operation for AWS Identity and Access Management. +// +// Resets the password for a service-specific credential. The new password is +// AWS generated and cryptographically strong. It cannot be configured by the +// user. Resetting the password immediately invalidates the previous password +// associated with this user. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ResetServiceSpecificCredential for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResetServiceSpecificCredential +func (c *IAM) ResetServiceSpecificCredential(input *ResetServiceSpecificCredentialInput) (*ResetServiceSpecificCredentialOutput, error) { + req, out := c.ResetServiceSpecificCredentialRequest(input) + return out, req.Send() +} + +// ResetServiceSpecificCredentialWithContext is the same as ResetServiceSpecificCredential with the addition of +// the ability to pass a context and additional request options. +// +// See ResetServiceSpecificCredential for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ResetServiceSpecificCredentialWithContext(ctx aws.Context, input *ResetServiceSpecificCredentialInput, opts ...request.Option) (*ResetServiceSpecificCredentialOutput, error) { + req, out := c.ResetServiceSpecificCredentialRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opResyncMFADevice = "ResyncMFADevice" + +// ResyncMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the ResyncMFADevice operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ResyncMFADevice for more information on using the ResyncMFADevice +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ResyncMFADeviceRequest method. +// req, resp := client.ResyncMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResyncMFADevice +func (c *IAM) ResyncMFADeviceRequest(input *ResyncMFADeviceInput) (req *request.Request, output *ResyncMFADeviceOutput) { + op := &request.Operation{ + Name: opResyncMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResyncMFADeviceInput{} + } + + output = &ResyncMFADeviceOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// ResyncMFADevice API operation for AWS Identity and Access Management. +// +// Synchronizes the specified MFA device with its IAM resource object on the +// AWS servers. +// +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation ResyncMFADevice for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthenticationCodeException "InvalidAuthenticationCode" +// The request was rejected because the authentication code was not recognized. +// The error message describes the specific error. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResyncMFADevice +func (c *IAM) ResyncMFADevice(input *ResyncMFADeviceInput) (*ResyncMFADeviceOutput, error) { + req, out := c.ResyncMFADeviceRequest(input) + return out, req.Send() +} + +// ResyncMFADeviceWithContext is the same as ResyncMFADevice with the addition of +// the ability to pass a context and additional request options. +// +// See ResyncMFADevice for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) ResyncMFADeviceWithContext(ctx aws.Context, input *ResyncMFADeviceInput, opts ...request.Option) (*ResyncMFADeviceOutput, error) { + req, out := c.ResyncMFADeviceRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" + +// SetDefaultPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the SetDefaultPolicyVersion operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetDefaultPolicyVersion for more information on using the SetDefaultPolicyVersion +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetDefaultPolicyVersionRequest method. +// req, resp := client.SetDefaultPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetDefaultPolicyVersion +func (c *IAM) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput) (req *request.Request, output *SetDefaultPolicyVersionOutput) { + op := &request.Operation{ + Name: opSetDefaultPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetDefaultPolicyVersionInput{} + } + + output = &SetDefaultPolicyVersionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetDefaultPolicyVersion API operation for AWS Identity and Access Management. +// +// Sets the specified version of the specified policy as the policy's default +// (operative) version. +// +// This operation affects all users, groups, and roles that the policy is attached +// to. To list the users, groups, and roles that the policy is attached to, +// use the ListEntitiesForPolicy API. +// +// For information about managed policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation SetDefaultPolicyVersion for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetDefaultPolicyVersion +func (c *IAM) SetDefaultPolicyVersion(input *SetDefaultPolicyVersionInput) (*SetDefaultPolicyVersionOutput, error) { + req, out := c.SetDefaultPolicyVersionRequest(input) + return out, req.Send() +} + +// SetDefaultPolicyVersionWithContext is the same as SetDefaultPolicyVersion with the addition of +// the ability to pass a context and additional request options. +// +// See SetDefaultPolicyVersion for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) SetDefaultPolicyVersionWithContext(ctx aws.Context, input *SetDefaultPolicyVersionInput, opts ...request.Option) (*SetDefaultPolicyVersionOutput, error) { + req, out := c.SetDefaultPolicyVersionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSetSecurityTokenServicePreferences = "SetSecurityTokenServicePreferences" + +// SetSecurityTokenServicePreferencesRequest generates a "aws/request.Request" representing the +// client's request for the SetSecurityTokenServicePreferences operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SetSecurityTokenServicePreferences for more information on using the SetSecurityTokenServicePreferences +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SetSecurityTokenServicePreferencesRequest method. +// req, resp := client.SetSecurityTokenServicePreferencesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetSecurityTokenServicePreferences +func (c *IAM) SetSecurityTokenServicePreferencesRequest(input *SetSecurityTokenServicePreferencesInput) (req *request.Request, output *SetSecurityTokenServicePreferencesOutput) { + op := &request.Operation{ + Name: opSetSecurityTokenServicePreferences, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSecurityTokenServicePreferencesInput{} + } + + output = &SetSecurityTokenServicePreferencesOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// SetSecurityTokenServicePreferences API operation for AWS Identity and Access Management. +// +// Sets the specified version of the global endpoint token as the token version +// used for the AWS account. +// +// By default, AWS Security Token Service (STS) is available as a global service, +// and all STS requests go to a single endpoint at https://sts.amazonaws.com. +// AWS recommends using Regional STS endpoints to reduce latency, build in redundancy, +// and increase session token availability. For information about Regional endpoints +// for STS, see AWS Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// If you make an STS call to the global endpoint, the resulting session tokens +// might be valid in some Regions but not others. It depends on the version +// that is set in this operation. Version 1 tokens are valid only in AWS Regions +// that are available by default. These tokens do not work in manually enabled +// Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in +// all Regions. However, version 2 tokens are longer and might affect systems +// where you temporarily store tokens. For information, see Activating and Deactivating +// STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// To view the current session token version, see the GlobalEndpointTokenVersion +// entry in the response of the GetAccountSummary operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation SetSecurityTokenServicePreferences for usage and error information. +// +// Returned Error Codes: +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetSecurityTokenServicePreferences +func (c *IAM) SetSecurityTokenServicePreferences(input *SetSecurityTokenServicePreferencesInput) (*SetSecurityTokenServicePreferencesOutput, error) { + req, out := c.SetSecurityTokenServicePreferencesRequest(input) + return out, req.Send() +} + +// SetSecurityTokenServicePreferencesWithContext is the same as SetSecurityTokenServicePreferences with the addition of +// the ability to pass a context and additional request options. +// +// See SetSecurityTokenServicePreferences for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) SetSecurityTokenServicePreferencesWithContext(ctx aws.Context, input *SetSecurityTokenServicePreferencesInput, opts ...request.Option) (*SetSecurityTokenServicePreferencesOutput, error) { + req, out := c.SetSecurityTokenServicePreferencesRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSimulateCustomPolicy = "SimulateCustomPolicy" + +// SimulateCustomPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SimulateCustomPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SimulateCustomPolicy for more information on using the SimulateCustomPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SimulateCustomPolicyRequest method. +// req, resp := client.SimulateCustomPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulateCustomPolicy +func (c *IAM) SimulateCustomPolicyRequest(input *SimulateCustomPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { + op := &request.Operation{ + Name: opSimulateCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &SimulateCustomPolicyInput{} + } + + output = &SimulatePolicyResponse{} + req = c.newRequest(op, input, output) + return +} + +// SimulateCustomPolicy API operation for AWS Identity and Access Management. +// +// Simulate how a set of IAM policies and optionally a resource-based policy +// works with a list of API operations and AWS resources to determine the policies' +// effective permissions. The policies are provided as strings. +// +// The simulation does not perform the API operations; it only checks the authorization +// to determine if the simulated policies allow or deny the operations. +// +// If you want to simulate existing policies attached to an IAM user, group, +// or role, use SimulatePrincipalPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy. +// +// If the output is long, you can use MaxItems and Marker parameters to paginate +// the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation SimulateCustomPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodePolicyEvaluationException "PolicyEvaluation" +// The request failed because a provided policy could not be successfully evaluated. +// An additional detailed message indicates the source of the failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulateCustomPolicy +func (c *IAM) SimulateCustomPolicy(input *SimulateCustomPolicyInput) (*SimulatePolicyResponse, error) { + req, out := c.SimulateCustomPolicyRequest(input) + return out, req.Send() +} + +// SimulateCustomPolicyWithContext is the same as SimulateCustomPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See SimulateCustomPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) SimulateCustomPolicyWithContext(ctx aws.Context, input *SimulateCustomPolicyInput, opts ...request.Option) (*SimulatePolicyResponse, error) { + req, out := c.SimulateCustomPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// SimulateCustomPolicyPages iterates over the pages of a SimulateCustomPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SimulateCustomPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SimulateCustomPolicy operation. +// pageNum := 0 +// err := client.SimulateCustomPolicyPages(params, +// func(page *iam.SimulatePolicyResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) SimulateCustomPolicyPages(input *SimulateCustomPolicyInput, fn func(*SimulatePolicyResponse, bool) bool) error { + return c.SimulateCustomPolicyPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SimulateCustomPolicyPagesWithContext same as SimulateCustomPolicyPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) SimulateCustomPolicyPagesWithContext(ctx aws.Context, input *SimulateCustomPolicyInput, fn func(*SimulatePolicyResponse, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SimulateCustomPolicyInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SimulateCustomPolicyRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*SimulatePolicyResponse), !p.HasNextPage()) + } + return p.Err() +} + +const opSimulatePrincipalPolicy = "SimulatePrincipalPolicy" + +// SimulatePrincipalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SimulatePrincipalPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SimulatePrincipalPolicy for more information on using the SimulatePrincipalPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SimulatePrincipalPolicyRequest method. +// req, resp := client.SimulatePrincipalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulatePrincipalPolicy +func (c *IAM) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { + op := &request.Operation{ + Name: opSimulatePrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &SimulatePrincipalPolicyInput{} + } + + output = &SimulatePolicyResponse{} + req = c.newRequest(op, input, output) + return +} + +// SimulatePrincipalPolicy API operation for AWS Identity and Access Management. +// +// Simulate how a set of IAM policies attached to an IAM entity works with a +// list of API operations and AWS resources to determine the policies' effective +// permissions. The entity can be an IAM user, group, or role. If you specify +// a user, then the simulation also includes all of the policies that are attached +// to groups that the user belongs to. +// +// You can optionally include a list of one or more additional policies specified +// as strings to include in the simulation. If you want to simulate only policies +// specified as strings, use SimulateCustomPolicy instead. +// +// You can also optionally include one resource-based policy to be evaluated +// with each of the resources included in the simulation. +// +// The simulation does not perform the API operations; it only checks the authorization +// to determine if the simulated policies allow or deny the operations. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use SimulateCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy. +// +// If the output is long, you can use the MaxItems and Marker parameters to +// paginate the results. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation SimulatePrincipalPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodePolicyEvaluationException "PolicyEvaluation" +// The request failed because a provided policy could not be successfully evaluated. +// An additional detailed message indicates the source of the failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulatePrincipalPolicy +func (c *IAM) SimulatePrincipalPolicy(input *SimulatePrincipalPolicyInput) (*SimulatePolicyResponse, error) { + req, out := c.SimulatePrincipalPolicyRequest(input) + return out, req.Send() +} + +// SimulatePrincipalPolicyWithContext is the same as SimulatePrincipalPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See SimulatePrincipalPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) SimulatePrincipalPolicyWithContext(ctx aws.Context, input *SimulatePrincipalPolicyInput, opts ...request.Option) (*SimulatePolicyResponse, error) { + req, out := c.SimulatePrincipalPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// SimulatePrincipalPolicyPages iterates over the pages of a SimulatePrincipalPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SimulatePrincipalPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SimulatePrincipalPolicy operation. +// pageNum := 0 +// err := client.SimulatePrincipalPolicyPages(params, +// func(page *iam.SimulatePolicyResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) SimulatePrincipalPolicyPages(input *SimulatePrincipalPolicyInput, fn func(*SimulatePolicyResponse, bool) bool) error { + return c.SimulatePrincipalPolicyPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// SimulatePrincipalPolicyPagesWithContext same as SimulatePrincipalPolicyPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) SimulatePrincipalPolicyPagesWithContext(ctx aws.Context, input *SimulatePrincipalPolicyInput, fn func(*SimulatePolicyResponse, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *SimulatePrincipalPolicyInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.SimulatePrincipalPolicyRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*SimulatePolicyResponse), !p.HasNextPage()) + } + return p.Err() +} + +const opTagRole = "TagRole" + +// TagRoleRequest generates a "aws/request.Request" representing the +// client's request for the TagRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagRole for more information on using the TagRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagRoleRequest method. +// req, resp := client.TagRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagRole +func (c *IAM) TagRoleRequest(input *TagRoleInput) (req *request.Request, output *TagRoleOutput) { + op := &request.Operation{ + Name: opTagRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagRoleInput{} + } + + output = &TagRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagRole API operation for AWS Identity and Access Management. +// +// Adds one or more tags to an IAM role. The role can be a regular role or a +// service-linked role. If a tag with the same key name already exists, then +// that tag is overwritten with the new value. +// +// A tag consists of a key name and an associated value. By assigning tags to +// your resources, you can do the following: +// +// * Administrative grouping and discovery - Attach tags to resources to +// aid in organization and search. For example, you could search for all +// resources with the key name Project and the value MyImportantProject. +// Or search for all resources with the key name Cost Center and the value +// 41200. +// +// * Access control - Reference tags in IAM user-based and resource-based +// policies. You can use tags to restrict access to only an IAM user or role +// that has a specified tag attached. You can also restrict access to only +// those resources that have a certain tag attached. For examples of policies +// that show how to use tags to control access, see Control Access Using +// IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) +// in the IAM User Guide. +// +// * Cost allocation - Use tags to help track which individuals and teams +// are using which AWS resources. +// +// * Make sure that you have no invalid tags and that you do not exceed the +// allowed number of tags per role. In either case, the entire request fails +// and no tags are added to the role. +// +// * AWS always interprets the tag Value as a single string. If you need +// to store an array, you can store comma-separated values in the string. +// However, you must interpret the value in your code. +// +// For more information about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation TagRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagRole +func (c *IAM) TagRole(input *TagRoleInput) (*TagRoleOutput, error) { + req, out := c.TagRoleRequest(input) + return out, req.Send() +} + +// TagRoleWithContext is the same as TagRole with the addition of +// the ability to pass a context and additional request options. +// +// See TagRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) TagRoleWithContext(ctx aws.Context, input *TagRoleInput, opts ...request.Option) (*TagRoleOutput, error) { + req, out := c.TagRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opTagUser = "TagUser" + +// TagUserRequest generates a "aws/request.Request" representing the +// client's request for the TagUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See TagUser for more information on using the TagUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the TagUserRequest method. +// req, resp := client.TagUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagUser +func (c *IAM) TagUserRequest(input *TagUserInput) (req *request.Request, output *TagUserOutput) { + op := &request.Operation{ + Name: opTagUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagUserInput{} + } + + output = &TagUserOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// TagUser API operation for AWS Identity and Access Management. +// +// Adds one or more tags to an IAM user. If a tag with the same key name already +// exists, then that tag is overwritten with the new value. +// +// A tag consists of a key name and an associated value. By assigning tags to +// your resources, you can do the following: +// +// * Administrative grouping and discovery - Attach tags to resources to +// aid in organization and search. For example, you could search for all +// resources with the key name Project and the value MyImportantProject. +// Or search for all resources with the key name Cost Center and the value +// 41200. +// +// * Access control - Reference tags in IAM user-based and resource-based +// policies. You can use tags to restrict access to only an IAM requesting +// user or to a role that has a specified tag attached. You can also restrict +// access to only those resources that have a certain tag attached. For examples +// of policies that show how to use tags to control access, see Control Access +// Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) +// in the IAM User Guide. +// +// * Cost allocation - Use tags to help track which individuals and teams +// are using which AWS resources. +// +// * Make sure that you have no invalid tags and that you do not exceed the +// allowed number of tags per role. In either case, the entire request fails +// and no tags are added to the role. +// +// * AWS always interprets the tag Value as a single string. If you need +// to store an array, you can store comma-separated values in the string. +// However, you must interpret the value in your code. +// +// For more information about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation TagUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagUser +func (c *IAM) TagUser(input *TagUserInput) (*TagUserOutput, error) { + req, out := c.TagUserRequest(input) + return out, req.Send() +} + +// TagUserWithContext is the same as TagUser with the addition of +// the ability to pass a context and additional request options. +// +// See TagUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) TagUserWithContext(ctx aws.Context, input *TagUserInput, opts ...request.Option) (*TagUserOutput, error) { + req, out := c.TagUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagRole = "UntagRole" + +// UntagRoleRequest generates a "aws/request.Request" representing the +// client's request for the UntagRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagRole for more information on using the UntagRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagRoleRequest method. +// req, resp := client.UntagRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagRole +func (c *IAM) UntagRoleRequest(input *UntagRoleInput) (req *request.Request, output *UntagRoleOutput) { + op := &request.Operation{ + Name: opUntagRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagRoleInput{} + } + + output = &UntagRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagRole API operation for AWS Identity and Access Management. +// +// Removes the specified tags from the role. For more information about tagging, +// see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UntagRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagRole +func (c *IAM) UntagRole(input *UntagRoleInput) (*UntagRoleOutput, error) { + req, out := c.UntagRoleRequest(input) + return out, req.Send() +} + +// UntagRoleWithContext is the same as UntagRole with the addition of +// the ability to pass a context and additional request options. +// +// See UntagRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UntagRoleWithContext(ctx aws.Context, input *UntagRoleInput, opts ...request.Option) (*UntagRoleOutput, error) { + req, out := c.UntagRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUntagUser = "UntagUser" + +// UntagUserRequest generates a "aws/request.Request" representing the +// client's request for the UntagUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UntagUser for more information on using the UntagUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UntagUserRequest method. +// req, resp := client.UntagUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagUser +func (c *IAM) UntagUserRequest(input *UntagUserInput) (req *request.Request, output *UntagUserOutput) { + op := &request.Operation{ + Name: opUntagUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagUserInput{} + } + + output = &UntagUserOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UntagUser API operation for AWS Identity and Access Management. +// +// Removes the specified tags from the user. For more information about tagging, +// see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UntagUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagUser +func (c *IAM) UntagUser(input *UntagUserInput) (*UntagUserOutput, error) { + req, out := c.UntagUserRequest(input) + return out, req.Send() +} + +// UntagUserWithContext is the same as UntagUser with the addition of +// the ability to pass a context and additional request options. +// +// See UntagUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UntagUserWithContext(ctx aws.Context, input *UntagUserInput, opts ...request.Option) (*UntagUserOutput, error) { + req, out := c.UntagUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAccessKey = "UpdateAccessKey" + +// UpdateAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccessKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAccessKey for more information on using the UpdateAccessKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAccessKeyRequest method. +// req, resp := client.UpdateAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccessKey +func (c *IAM) UpdateAccessKeyRequest(input *UpdateAccessKeyInput) (req *request.Request, output *UpdateAccessKeyOutput) { + op := &request.Operation{ + Name: opUpdateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccessKeyInput{} + } + + output = &UpdateAccessKeyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateAccessKey API operation for AWS Identity and Access Management. +// +// Changes the status of the specified access key from Active to Inactive, or +// vice versa. This operation can be used to disable a user's key as part of +// a key rotation workflow. +// +// If the UserName is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// For information about rotating keys, see Managing Keys and Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateAccessKey for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccessKey +func (c *IAM) UpdateAccessKey(input *UpdateAccessKeyInput) (*UpdateAccessKeyOutput, error) { + req, out := c.UpdateAccessKeyRequest(input) + return out, req.Send() +} + +// UpdateAccessKeyWithContext is the same as UpdateAccessKey with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAccessKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateAccessKeyWithContext(ctx aws.Context, input *UpdateAccessKeyInput, opts ...request.Option) (*UpdateAccessKeyOutput, error) { + req, out := c.UpdateAccessKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAccountPasswordPolicy = "UpdateAccountPasswordPolicy" + +// UpdateAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccountPasswordPolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAccountPasswordPolicy for more information on using the UpdateAccountPasswordPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAccountPasswordPolicyRequest method. +// req, resp := client.UpdateAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccountPasswordPolicy +func (c *IAM) UpdateAccountPasswordPolicyRequest(input *UpdateAccountPasswordPolicyInput) (req *request.Request, output *UpdateAccountPasswordPolicyOutput) { + op := &request.Operation{ + Name: opUpdateAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccountPasswordPolicyInput{} + } + + output = &UpdateAccountPasswordPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateAccountPasswordPolicy API operation for AWS Identity and Access Management. +// +// Updates the password policy settings for the AWS account. +// +// * This operation does not support partial updates. No parameters are required, +// but if you do not specify a parameter, that parameter's value reverts +// to its default value. See the Request Parameters section for each parameter's +// default value. Also note that some parameters do not allow the default +// parameter to be explicitly set. Instead, to invoke the default value, +// do not include that parameter when you invoke the operation. +// +// For more information about using a password policy, see Managing an IAM Password +// Policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateAccountPasswordPolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccountPasswordPolicy +func (c *IAM) UpdateAccountPasswordPolicy(input *UpdateAccountPasswordPolicyInput) (*UpdateAccountPasswordPolicyOutput, error) { + req, out := c.UpdateAccountPasswordPolicyRequest(input) + return out, req.Send() +} + +// UpdateAccountPasswordPolicyWithContext is the same as UpdateAccountPasswordPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAccountPasswordPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateAccountPasswordPolicyWithContext(ctx aws.Context, input *UpdateAccountPasswordPolicyInput, opts ...request.Option) (*UpdateAccountPasswordPolicyOutput, error) { + req, out := c.UpdateAccountPasswordPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateAssumeRolePolicy = "UpdateAssumeRolePolicy" + +// UpdateAssumeRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAssumeRolePolicy operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateAssumeRolePolicy for more information on using the UpdateAssumeRolePolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateAssumeRolePolicyRequest method. +// req, resp := client.UpdateAssumeRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAssumeRolePolicy +func (c *IAM) UpdateAssumeRolePolicyRequest(input *UpdateAssumeRolePolicyInput) (req *request.Request, output *UpdateAssumeRolePolicyOutput) { + op := &request.Operation{ + Name: opUpdateAssumeRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssumeRolePolicyInput{} + } + + output = &UpdateAssumeRolePolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateAssumeRolePolicy API operation for AWS Identity and Access Management. +// +// Updates the policy that grants an IAM entity permission to assume a role. +// This is typically referred to as the "role trust policy". For more information +// about roles, go to Using Roles to Delegate Permissions and Federate Identities +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateAssumeRolePolicy for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAssumeRolePolicy +func (c *IAM) UpdateAssumeRolePolicy(input *UpdateAssumeRolePolicyInput) (*UpdateAssumeRolePolicyOutput, error) { + req, out := c.UpdateAssumeRolePolicyRequest(input) + return out, req.Send() +} + +// UpdateAssumeRolePolicyWithContext is the same as UpdateAssumeRolePolicy with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateAssumeRolePolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateAssumeRolePolicyWithContext(ctx aws.Context, input *UpdateAssumeRolePolicyInput, opts ...request.Option) (*UpdateAssumeRolePolicyOutput, error) { + req, out := c.UpdateAssumeRolePolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateGroup = "UpdateGroup" + +// UpdateGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGroup operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateGroup for more information on using the UpdateGroup +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateGroupRequest method. +// req, resp := client.UpdateGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateGroup +func (c *IAM) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, output *UpdateGroupOutput) { + op := &request.Operation{ + Name: opUpdateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGroupInput{} + } + + output = &UpdateGroupOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateGroup API operation for AWS Identity and Access Management. +// +// Updates the name and/or the path of the specified IAM group. +// +// You should understand the implications of changing a group's path or name. +// For more information, see Renaming Users and Groups (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html) +// in the IAM User Guide. +// +// The person making the request (the principal), must have permission to change +// the role group with the old name and the new name. For example, to change +// the group named Managers to MGRs, the principal must have a policy that allows +// them to update both groups. If the principal has permission to update the +// Managers group, but not the MGRs group, then the update fails. For more information +// about permissions, see Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateGroup for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateGroup +func (c *IAM) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { + req, out := c.UpdateGroupRequest(input) + return out, req.Send() +} + +// UpdateGroupWithContext is the same as UpdateGroup with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateGroup for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateGroupWithContext(ctx aws.Context, input *UpdateGroupInput, opts ...request.Option) (*UpdateGroupOutput, error) { + req, out := c.UpdateGroupRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateLoginProfile = "UpdateLoginProfile" + +// UpdateLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLoginProfile operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateLoginProfile for more information on using the UpdateLoginProfile +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateLoginProfileRequest method. +// req, resp := client.UpdateLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateLoginProfile +func (c *IAM) UpdateLoginProfileRequest(input *UpdateLoginProfileInput) (req *request.Request, output *UpdateLoginProfileOutput) { + op := &request.Operation{ + Name: opUpdateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLoginProfileInput{} + } + + output = &UpdateLoginProfileOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateLoginProfile API operation for AWS Identity and Access Management. +// +// Changes the password for the specified IAM user. +// +// IAM users can change their own passwords by calling ChangePassword. For more +// information about modifying passwords, see Managing Passwords (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateLoginProfile for usage and error information. +// +// Returned Error Codes: +// * ErrCodeEntityTemporarilyUnmodifiableException "EntityTemporarilyUnmodifiable" +// The request was rejected because it referenced an entity that is temporarily +// unmodifiable, such as a user name that was deleted and then recreated. The +// error indicates that the request is likely to succeed if you try again after +// waiting several minutes. The error message describes the entity. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodePasswordPolicyViolationException "PasswordPolicyViolation" +// The request was rejected because the provided password did not meet the requirements +// imposed by the account password policy. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateLoginProfile +func (c *IAM) UpdateLoginProfile(input *UpdateLoginProfileInput) (*UpdateLoginProfileOutput, error) { + req, out := c.UpdateLoginProfileRequest(input) + return out, req.Send() +} + +// UpdateLoginProfileWithContext is the same as UpdateLoginProfile with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateLoginProfile for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateLoginProfileWithContext(ctx aws.Context, input *UpdateLoginProfileInput, opts ...request.Option) (*UpdateLoginProfileOutput, error) { + req, out := c.UpdateLoginProfileRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateOpenIDConnectProviderThumbprint = "UpdateOpenIDConnectProviderThumbprint" + +// UpdateOpenIDConnectProviderThumbprintRequest generates a "aws/request.Request" representing the +// client's request for the UpdateOpenIDConnectProviderThumbprint operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateOpenIDConnectProviderThumbprint for more information on using the UpdateOpenIDConnectProviderThumbprint +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateOpenIDConnectProviderThumbprintRequest method. +// req, resp := client.UpdateOpenIDConnectProviderThumbprintRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateOpenIDConnectProviderThumbprint +func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDConnectProviderThumbprintInput) (req *request.Request, output *UpdateOpenIDConnectProviderThumbprintOutput) { + op := &request.Operation{ + Name: opUpdateOpenIDConnectProviderThumbprint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateOpenIDConnectProviderThumbprintInput{} + } + + output = &UpdateOpenIDConnectProviderThumbprintOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateOpenIDConnectProviderThumbprint API operation for AWS Identity and Access Management. +// +// Replaces the existing list of server certificate thumbprints associated with +// an OpenID Connect (OIDC) provider resource object with a new list of thumbprints. +// +// The list that you pass with this operation completely replaces the existing +// list of thumbprints. (The lists are not merged.) +// +// Typically, you need to update a thumbprint only when the identity provider's +// certificate changes, which occurs rarely. However, if the provider's certificate +// does change, any attempt to assume an IAM role that specifies the OIDC provider +// as a principal fails until the certificate thumbprint is updated. +// +// Trust for the OIDC provider is derived from the provider's certificate and +// is validated by the thumbprint. Therefore, it is best to limit access to +// the UpdateOpenIDConnectProviderThumbprint operation to highly privileged +// users. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateOpenIDConnectProviderThumbprint for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateOpenIDConnectProviderThumbprint +func (c *IAM) UpdateOpenIDConnectProviderThumbprint(input *UpdateOpenIDConnectProviderThumbprintInput) (*UpdateOpenIDConnectProviderThumbprintOutput, error) { + req, out := c.UpdateOpenIDConnectProviderThumbprintRequest(input) + return out, req.Send() +} + +// UpdateOpenIDConnectProviderThumbprintWithContext is the same as UpdateOpenIDConnectProviderThumbprint with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateOpenIDConnectProviderThumbprint for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateOpenIDConnectProviderThumbprintWithContext(ctx aws.Context, input *UpdateOpenIDConnectProviderThumbprintInput, opts ...request.Option) (*UpdateOpenIDConnectProviderThumbprintOutput, error) { + req, out := c.UpdateOpenIDConnectProviderThumbprintRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRole = "UpdateRole" + +// UpdateRoleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRole operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRole for more information on using the UpdateRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRoleRequest method. +// req, resp := client.UpdateRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRole +func (c *IAM) UpdateRoleRequest(input *UpdateRoleInput) (req *request.Request, output *UpdateRoleOutput) { + op := &request.Operation{ + Name: opUpdateRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRoleInput{} + } + + output = &UpdateRoleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateRole API operation for AWS Identity and Access Management. +// +// Updates the description or maximum session duration setting of a role. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRole +func (c *IAM) UpdateRole(input *UpdateRoleInput) (*UpdateRoleOutput, error) { + req, out := c.UpdateRoleRequest(input) + return out, req.Send() +} + +// UpdateRoleWithContext is the same as UpdateRole with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateRoleWithContext(ctx aws.Context, input *UpdateRoleInput, opts ...request.Option) (*UpdateRoleOutput, error) { + req, out := c.UpdateRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateRoleDescription = "UpdateRoleDescription" + +// UpdateRoleDescriptionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRoleDescription operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateRoleDescription for more information on using the UpdateRoleDescription +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateRoleDescriptionRequest method. +// req, resp := client.UpdateRoleDescriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRoleDescription +func (c *IAM) UpdateRoleDescriptionRequest(input *UpdateRoleDescriptionInput) (req *request.Request, output *UpdateRoleDescriptionOutput) { + op := &request.Operation{ + Name: opUpdateRoleDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRoleDescriptionInput{} + } + + output = &UpdateRoleDescriptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateRoleDescription API operation for AWS Identity and Access Management. +// +// Use UpdateRole instead. +// +// Modifies only the description of a role. This operation performs the same +// function as the Description parameter in the UpdateRole operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateRoleDescription for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeUnmodifiableEntityException "UnmodifiableEntity" +// The request was rejected because only the service that depends on the service-linked +// role can modify or delete the role on your behalf. The error message includes +// the name of the service that depends on this service-linked role. You must +// request the change through that service. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRoleDescription +func (c *IAM) UpdateRoleDescription(input *UpdateRoleDescriptionInput) (*UpdateRoleDescriptionOutput, error) { + req, out := c.UpdateRoleDescriptionRequest(input) + return out, req.Send() +} + +// UpdateRoleDescriptionWithContext is the same as UpdateRoleDescription with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateRoleDescription for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateRoleDescriptionWithContext(ctx aws.Context, input *UpdateRoleDescriptionInput, opts ...request.Option) (*UpdateRoleDescriptionOutput, error) { + req, out := c.UpdateRoleDescriptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSAMLProvider = "UpdateSAMLProvider" + +// UpdateSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSAMLProvider operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSAMLProvider for more information on using the UpdateSAMLProvider +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSAMLProviderRequest method. +// req, resp := client.UpdateSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSAMLProvider +func (c *IAM) UpdateSAMLProviderRequest(input *UpdateSAMLProviderInput) (req *request.Request, output *UpdateSAMLProviderOutput) { + op := &request.Operation{ + Name: opUpdateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSAMLProviderInput{} + } + + output = &UpdateSAMLProviderOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateSAMLProvider API operation for AWS Identity and Access Management. +// +// Updates the metadata document for an existing SAML provider resource object. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateSAMLProvider for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidInputException "InvalidInput" +// The request was rejected because an invalid or out-of-range value was supplied +// for an input parameter. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSAMLProvider +func (c *IAM) UpdateSAMLProvider(input *UpdateSAMLProviderInput) (*UpdateSAMLProviderOutput, error) { + req, out := c.UpdateSAMLProviderRequest(input) + return out, req.Send() +} + +// UpdateSAMLProviderWithContext is the same as UpdateSAMLProvider with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSAMLProvider for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateSAMLProviderWithContext(ctx aws.Context, input *UpdateSAMLProviderInput, opts ...request.Option) (*UpdateSAMLProviderOutput, error) { + req, out := c.UpdateSAMLProviderRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSSHPublicKey = "UpdateSSHPublicKey" + +// UpdateSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSSHPublicKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSSHPublicKey for more information on using the UpdateSSHPublicKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSSHPublicKeyRequest method. +// req, resp := client.UpdateSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSSHPublicKey +func (c *IAM) UpdateSSHPublicKeyRequest(input *UpdateSSHPublicKeyInput) (req *request.Request, output *UpdateSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opUpdateSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSSHPublicKeyInput{} + } + + output = &UpdateSSHPublicKeyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateSSHPublicKey API operation for AWS Identity and Access Management. +// +// Sets the status of an IAM user's SSH public key to active or inactive. SSH +// public keys that are inactive cannot be used for authentication. This operation +// can be used to disable a user's SSH public key as part of a key rotation +// work flow. +// +// The SSH public key affected by this operation is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateSSHPublicKey for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSSHPublicKey +func (c *IAM) UpdateSSHPublicKey(input *UpdateSSHPublicKeyInput) (*UpdateSSHPublicKeyOutput, error) { + req, out := c.UpdateSSHPublicKeyRequest(input) + return out, req.Send() +} + +// UpdateSSHPublicKeyWithContext is the same as UpdateSSHPublicKey with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSSHPublicKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateSSHPublicKeyWithContext(ctx aws.Context, input *UpdateSSHPublicKeyInput, opts ...request.Option) (*UpdateSSHPublicKeyOutput, error) { + req, out := c.UpdateSSHPublicKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateServerCertificate = "UpdateServerCertificate" + +// UpdateServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateServerCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateServerCertificate for more information on using the UpdateServerCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateServerCertificateRequest method. +// req, resp := client.UpdateServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServerCertificate +func (c *IAM) UpdateServerCertificateRequest(input *UpdateServerCertificateInput) (req *request.Request, output *UpdateServerCertificateOutput) { + op := &request.Operation{ + Name: opUpdateServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServerCertificateInput{} + } + + output = &UpdateServerCertificateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateServerCertificate API operation for AWS Identity and Access Management. +// +// Updates the name and/or the path of the specified server certificate stored +// in IAM. +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic also includes a list of AWS services that +// can use the server certificates that you manage with IAM. +// +// You should understand the implications of changing a server certificate's +// path or name. For more information, see Renaming a Server Certificate (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs_manage.html#RenamingServerCerts) +// in the IAM User Guide. +// +// The person making the request (the principal), must have permission to change +// the server certificate with the old name and the new name. For example, to +// change the certificate named ProductionCert to ProdCert, the principal must +// have a policy that allows them to update both certificates. If the principal +// has permission to update the ProductionCert group, but not the ProdCert certificate, +// then the update fails. For more information about permissions, see Access +// Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateServerCertificate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServerCertificate +func (c *IAM) UpdateServerCertificate(input *UpdateServerCertificateInput) (*UpdateServerCertificateOutput, error) { + req, out := c.UpdateServerCertificateRequest(input) + return out, req.Send() +} + +// UpdateServerCertificateWithContext is the same as UpdateServerCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateServerCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateServerCertificateWithContext(ctx aws.Context, input *UpdateServerCertificateInput, opts ...request.Option) (*UpdateServerCertificateOutput, error) { + req, out := c.UpdateServerCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateServiceSpecificCredential = "UpdateServiceSpecificCredential" + +// UpdateServiceSpecificCredentialRequest generates a "aws/request.Request" representing the +// client's request for the UpdateServiceSpecificCredential operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateServiceSpecificCredential for more information on using the UpdateServiceSpecificCredential +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateServiceSpecificCredentialRequest method. +// req, resp := client.UpdateServiceSpecificCredentialRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServiceSpecificCredential +func (c *IAM) UpdateServiceSpecificCredentialRequest(input *UpdateServiceSpecificCredentialInput) (req *request.Request, output *UpdateServiceSpecificCredentialOutput) { + op := &request.Operation{ + Name: opUpdateServiceSpecificCredential, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceSpecificCredentialInput{} + } + + output = &UpdateServiceSpecificCredentialOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateServiceSpecificCredential API operation for AWS Identity and Access Management. +// +// Sets the status of a service-specific credential to Active or Inactive. Service-specific +// credentials that are inactive cannot be used for authentication to the service. +// This operation can be used to disable a user's service-specific credential +// as part of a credential rotation work flow. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateServiceSpecificCredential for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServiceSpecificCredential +func (c *IAM) UpdateServiceSpecificCredential(input *UpdateServiceSpecificCredentialInput) (*UpdateServiceSpecificCredentialOutput, error) { + req, out := c.UpdateServiceSpecificCredentialRequest(input) + return out, req.Send() +} + +// UpdateServiceSpecificCredentialWithContext is the same as UpdateServiceSpecificCredential with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateServiceSpecificCredential for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateServiceSpecificCredentialWithContext(ctx aws.Context, input *UpdateServiceSpecificCredentialInput, opts ...request.Option) (*UpdateServiceSpecificCredentialOutput, error) { + req, out := c.UpdateServiceSpecificCredentialRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateSigningCertificate = "UpdateSigningCertificate" + +// UpdateSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSigningCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateSigningCertificate for more information on using the UpdateSigningCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateSigningCertificateRequest method. +// req, resp := client.UpdateSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSigningCertificate +func (c *IAM) UpdateSigningCertificateRequest(input *UpdateSigningCertificateInput) (req *request.Request, output *UpdateSigningCertificateOutput) { + op := &request.Operation{ + Name: opUpdateSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSigningCertificateInput{} + } + + output = &UpdateSigningCertificateOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateSigningCertificate API operation for AWS Identity and Access Management. +// +// Changes the status of the specified user signing certificate from active +// to disabled, or vice versa. This operation can be used to disable an IAM +// user's signing certificate as part of a certificate rotation work flow. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateSigningCertificate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSigningCertificate +func (c *IAM) UpdateSigningCertificate(input *UpdateSigningCertificateInput) (*UpdateSigningCertificateOutput, error) { + req, out := c.UpdateSigningCertificateRequest(input) + return out, req.Send() +} + +// UpdateSigningCertificateWithContext is the same as UpdateSigningCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateSigningCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateSigningCertificateWithContext(ctx aws.Context, input *UpdateSigningCertificateInput, opts ...request.Option) (*UpdateSigningCertificateOutput, error) { + req, out := c.UpdateSigningCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateUser = "UpdateUser" + +// UpdateUserRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUser operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateUser for more information on using the UpdateUser +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateUserRequest method. +// req, resp := client.UpdateUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateUser +func (c *IAM) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, output *UpdateUserOutput) { + op := &request.Operation{ + Name: opUpdateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserInput{} + } + + output = &UpdateUserOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(query.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// UpdateUser API operation for AWS Identity and Access Management. +// +// Updates the name and/or the path of the specified IAM user. +// +// You should understand the implications of changing an IAM user's path or +// name. For more information, see Renaming an IAM User (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_renaming) +// and Renaming an IAM Group (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_groups_manage_rename.html) +// in the IAM User Guide. +// +// To change a user name, the requester must have appropriate permissions on +// both the source object and the target object. For example, to change Bob +// to Robert, the entity making the request must have permission on Bob and +// Robert, or must have permission on all (*). For more information about permissions, +// see Permissions and Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UpdateUser for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeEntityTemporarilyUnmodifiableException "EntityTemporarilyUnmodifiable" +// The request was rejected because it referenced an entity that is temporarily +// unmodifiable, such as a user name that was deleted and then recreated. The +// error indicates that the request is likely to succeed if you try again after +// waiting several minutes. The error message describes the entity. +// +// * ErrCodeConcurrentModificationException "ConcurrentModification" +// The request was rejected because multiple requests to change this object +// were submitted simultaneously. Wait a few minutes and submit your request +// again. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateUser +func (c *IAM) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { + req, out := c.UpdateUserRequest(input) + return out, req.Send() +} + +// UpdateUserWithContext is the same as UpdateUser with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateUser for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UpdateUserWithContext(ctx aws.Context, input *UpdateUserInput, opts ...request.Option) (*UpdateUserOutput, error) { + req, out := c.UpdateUserRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadSSHPublicKey = "UploadSSHPublicKey" + +// UploadSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the UploadSSHPublicKey operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadSSHPublicKey for more information on using the UploadSSHPublicKey +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadSSHPublicKeyRequest method. +// req, resp := client.UploadSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSSHPublicKey +func (c *IAM) UploadSSHPublicKeyRequest(input *UploadSSHPublicKeyInput) (req *request.Request, output *UploadSSHPublicKeyOutput) { + op := &request.Operation{ + Name: opUploadSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSSHPublicKeyInput{} + } + + output = &UploadSSHPublicKeyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadSSHPublicKey API operation for AWS Identity and Access Management. +// +// Uploads an SSH public key and associates it with the specified IAM user. +// +// The SSH public key uploaded by this operation can be used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UploadSSHPublicKey for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeInvalidPublicKeyException "InvalidPublicKey" +// The request was rejected because the public key is malformed or otherwise +// invalid. +// +// * ErrCodeDuplicateSSHPublicKeyException "DuplicateSSHPublicKey" +// The request was rejected because the SSH public key is already associated +// with the specified IAM user. +// +// * ErrCodeUnrecognizedPublicKeyEncodingException "UnrecognizedPublicKeyEncoding" +// The request was rejected because the public key encoding format is unsupported +// or unrecognized. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSSHPublicKey +func (c *IAM) UploadSSHPublicKey(input *UploadSSHPublicKeyInput) (*UploadSSHPublicKeyOutput, error) { + req, out := c.UploadSSHPublicKeyRequest(input) + return out, req.Send() +} + +// UploadSSHPublicKeyWithContext is the same as UploadSSHPublicKey with the addition of +// the ability to pass a context and additional request options. +// +// See UploadSSHPublicKey for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UploadSSHPublicKeyWithContext(ctx aws.Context, input *UploadSSHPublicKeyInput, opts ...request.Option) (*UploadSSHPublicKeyOutput, error) { + req, out := c.UploadSSHPublicKeyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadServerCertificate = "UploadServerCertificate" + +// UploadServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UploadServerCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadServerCertificate for more information on using the UploadServerCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadServerCertificateRequest method. +// req, resp := client.UploadServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadServerCertificate +func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput) (req *request.Request, output *UploadServerCertificateOutput) { + op := &request.Operation{ + Name: opUploadServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadServerCertificateInput{} + } + + output = &UploadServerCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadServerCertificate API operation for AWS Identity and Access Management. +// +// Uploads a server certificate entity for the AWS account. The server certificate +// entity includes a public key certificate, a private key, and an optional +// certificate chain, which should all be PEM-encoded. +// +// We recommend that you use AWS Certificate Manager (https://docs.aws.amazon.com/acm/) +// to provision, manage, and deploy your server certificates. With ACM you can +// request a certificate, deploy it to AWS resources, and let ACM handle certificate +// renewals for you. Certificates provided by ACM are free. For more information +// about using ACM, see the AWS Certificate Manager User Guide (https://docs.aws.amazon.com/acm/latest/userguide/). +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic includes a list of AWS services that can +// use the server certificates that you manage with IAM. +// +// For information about the number of server certificates you can upload, see +// Limitations on IAM Entities and Objects (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) +// in the IAM User Guide. +// +// Because the body of the public key certificate, private key, and the certificate +// chain can be large, you should use POST rather than GET when calling UploadServerCertificate. +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Calling the API by Making HTTP Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/programming.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UploadServerCertificate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeMalformedCertificateException "MalformedCertificate" +// The request was rejected because the certificate was malformed or expired. +// The error message describes the specific error. +// +// * ErrCodeKeyPairMismatchException "KeyPairMismatch" +// The request was rejected because the public key certificate and the private +// key do not match. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadServerCertificate +func (c *IAM) UploadServerCertificate(input *UploadServerCertificateInput) (*UploadServerCertificateOutput, error) { + req, out := c.UploadServerCertificateRequest(input) + return out, req.Send() +} + +// UploadServerCertificateWithContext is the same as UploadServerCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See UploadServerCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UploadServerCertificateWithContext(ctx aws.Context, input *UploadServerCertificateInput, opts ...request.Option) (*UploadServerCertificateOutput, error) { + req, out := c.UploadServerCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadSigningCertificate = "UploadSigningCertificate" + +// UploadSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UploadSigningCertificate operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadSigningCertificate for more information on using the UploadSigningCertificate +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadSigningCertificateRequest method. +// req, resp := client.UploadSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSigningCertificate +func (c *IAM) UploadSigningCertificateRequest(input *UploadSigningCertificateInput) (req *request.Request, output *UploadSigningCertificateOutput) { + op := &request.Operation{ + Name: opUploadSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSigningCertificateInput{} + } + + output = &UploadSigningCertificateOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadSigningCertificate API operation for AWS Identity and Access Management. +// +// Uploads an X.509 signing certificate and associates it with the specified +// IAM user. Some AWS services use X.509 signing certificates to validate requests +// that are signed with a corresponding private key. When you upload the certificate, +// its default status is Active. +// +// If the UserName is not specified, the IAM user name is determined implicitly +// based on the AWS access key ID used to sign the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// Because the body of an X.509 certificate can be large, you should use POST +// rather than GET when calling UploadSigningCertificate. For information about +// setting up signatures and authorization through the API, go to Signing AWS +// API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Identity and Access Management's +// API operation UploadSigningCertificate for usage and error information. +// +// Returned Error Codes: +// * ErrCodeLimitExceededException "LimitExceeded" +// The request was rejected because it attempted to create resources beyond +// the current AWS account limits. The error message describes the limit exceeded. +// +// * ErrCodeEntityAlreadyExistsException "EntityAlreadyExists" +// The request was rejected because it attempted to create a resource that already +// exists. +// +// * ErrCodeMalformedCertificateException "MalformedCertificate" +// The request was rejected because the certificate was malformed or expired. +// The error message describes the specific error. +// +// * ErrCodeInvalidCertificateException "InvalidCertificate" +// The request was rejected because the certificate is invalid. +// +// * ErrCodeDuplicateCertificateException "DuplicateCertificate" +// The request was rejected because the same certificate is associated with +// an IAM user in the account. +// +// * ErrCodeNoSuchEntityException "NoSuchEntity" +// The request was rejected because it referenced a resource entity that does +// not exist. The error message describes the resource. +// +// * ErrCodeServiceFailureException "ServiceFailure" +// The request processing has failed because of an unknown error, exception +// or failure. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSigningCertificate +func (c *IAM) UploadSigningCertificate(input *UploadSigningCertificateInput) (*UploadSigningCertificateOutput, error) { + req, out := c.UploadSigningCertificateRequest(input) + return out, req.Send() +} + +// UploadSigningCertificateWithContext is the same as UploadSigningCertificate with the addition of +// the ability to pass a context and additional request options. +// +// See UploadSigningCertificate for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) UploadSigningCertificateWithContext(ctx aws.Context, input *UploadSigningCertificateInput, opts ...request.Option) (*UploadSigningCertificateOutput, error) { + req, out := c.UploadSigningCertificateRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// An object that contains details about when a principal in the reported AWS +// Organizations entity last attempted to access an AWS service. A principal +// can be an IAM user, an IAM role, or the AWS account root user within the +// reported Organizations entity. +// +// This data type is a response element in the GetOrganizationsAccessReport +// operation. +type AccessDetail struct { + _ struct{} `type:"structure"` + + // The path of the Organizations entity (root, organizational unit, or account) + // from which an authenticated principal last attempted to access the service. + // AWS does not report unauthenticated requests. + // + // This field is null if no principals (IAM users, IAM roles, or root users) + // in the reported Organizations entity attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + EntityPath *string `min:"19" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when an authenticated principal most recently attempted to access the service. + // AWS does not report unauthenticated requests. + // + // This field is null if no principals in the reported Organizations entity + // attempted to access the service within the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticatedTime *time.Time `type:"timestamp"` + + // The Region where the last service access attempt occurred. + // + // This field is null if no principals in the reported Organizations entity + // attempted to access the service within the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + Region *string `type:"string"` + + // The name of the service in which access was attempted. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The namespace of the service in which access was attempted. + // + // To learn the service namespace of a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + // + // ServiceNamespace is a required field + ServiceNamespace *string `min:"1" type:"string" required:"true"` + + // The number of accounts with authenticated principals (root users, IAM users, + // and IAM roles) that attempted to access the service in the reporting period. + TotalAuthenticatedEntities *int64 `type:"integer"` +} + +// String returns the string representation +func (s AccessDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessDetail) GoString() string { + return s.String() +} + +// SetEntityPath sets the EntityPath field's value. +func (s *AccessDetail) SetEntityPath(v string) *AccessDetail { + s.EntityPath = &v + return s +} + +// SetLastAuthenticatedTime sets the LastAuthenticatedTime field's value. +func (s *AccessDetail) SetLastAuthenticatedTime(v time.Time) *AccessDetail { + s.LastAuthenticatedTime = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *AccessDetail) SetRegion(v string) *AccessDetail { + s.Region = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *AccessDetail) SetServiceName(v string) *AccessDetail { + s.ServiceName = &v + return s +} + +// SetServiceNamespace sets the ServiceNamespace field's value. +func (s *AccessDetail) SetServiceNamespace(v string) *AccessDetail { + s.ServiceNamespace = &v + return s +} + +// SetTotalAuthenticatedEntities sets the TotalAuthenticatedEntities field's value. +func (s *AccessDetail) SetTotalAuthenticatedEntities(v int64) *AccessDetail { + s.TotalAuthenticatedEntities = &v + return s +} + +// Contains information about an AWS access key. +// +// This data type is used as a response element in the CreateAccessKey and ListAccessKeys +// operations. +// +// The SecretAccessKey value is returned only in response to CreateAccessKey. +// You can get a secret access key only when you first create an access key; +// you cannot recover the secret access key later. If you lose a secret access +// key, you must create a new access key. +type AccessKey struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp"` + + // The secret key used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true" sensitive:"true"` + + // The status of the access key. Active means that the key is valid for API + // calls, while Inactive means it is not. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user that the access key is associated with. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKey) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *AccessKey) SetAccessKeyId(v string) *AccessKey { + s.AccessKeyId = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AccessKey) SetCreateDate(v time.Time) *AccessKey { + s.CreateDate = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *AccessKey) SetSecretAccessKey(v string) *AccessKey { + s.SecretAccessKey = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AccessKey) SetStatus(v string) *AccessKey { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *AccessKey) SetUserName(v string) *AccessKey { + s.UserName = &v + return s +} + +// Contains information about the last time an AWS access key was used since +// IAM began tracking this information on April 22, 2015. +// +// This data type is used as a response element in the GetAccessKeyLastUsed +// operation. +type AccessKeyLastUsed struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the access key was most recently used. This field is null in the following + // situations: + // + // * The user does not have an access key. + // + // * An access key exists but has not been used since IAM began tracking + // this information. + // + // * There is no sign-in data associated with the user. + // + // LastUsedDate is a required field + LastUsedDate *time.Time `type:"timestamp" required:"true"` + + // The AWS Region where this access key was most recently used. The value for + // this field is "N/A" in the following situations: + // + // * The user does not have an access key. + // + // * An access key exists but has not been used since IAM began tracking + // this information. + // + // * There is no sign-in data associated with the user. + // + // For more information about AWS Regions, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html) + // in the Amazon Web Services General Reference. + // + // Region is a required field + Region *string `type:"string" required:"true"` + + // The name of the AWS service with which this access key was most recently + // used. The value of this field is "N/A" in the following situations: + // + // * The user does not have an access key. + // + // * An access key exists but has not been used since IAM started tracking + // this information. + // + // * There is no sign-in data associated with the user. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKeyLastUsed) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKeyLastUsed) GoString() string { + return s.String() +} + +// SetLastUsedDate sets the LastUsedDate field's value. +func (s *AccessKeyLastUsed) SetLastUsedDate(v time.Time) *AccessKeyLastUsed { + s.LastUsedDate = &v + return s +} + +// SetRegion sets the Region field's value. +func (s *AccessKeyLastUsed) SetRegion(v string) *AccessKeyLastUsed { + s.Region = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *AccessKeyLastUsed) SetServiceName(v string) *AccessKeyLastUsed { + s.ServiceName = &v + return s +} + +// Contains information about an AWS access key, without its secret key. +// +// This data type is used as a response element in the ListAccessKeys operation. +type AccessKeyMetadata struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + AccessKeyId *string `min:"16" type:"string"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp"` + + // The status of the access key. Active means that the key is valid for API + // calls; Inactive means it is not. + Status *string `type:"string" enum:"statusType"` + + // The name of the IAM user that the key is associated with. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AccessKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessKeyMetadata) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *AccessKeyMetadata) SetAccessKeyId(v string) *AccessKeyMetadata { + s.AccessKeyId = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *AccessKeyMetadata) SetCreateDate(v time.Time) *AccessKeyMetadata { + s.CreateDate = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *AccessKeyMetadata) SetStatus(v string) *AccessKeyMetadata { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *AccessKeyMetadata) SetUserName(v string) *AccessKeyMetadata { + s.UserName = &v + return s +} + +type AddClientIDToOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to add to the IAM OpenID Connect provider + // resource. + // + // ClientID is a required field + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // resource to add the client ID to. You can get a list of OIDC provider ARNs + // by using the ListOpenIDConnectProviders operation. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddClientIDToOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddClientIDToOpenIDConnectProviderInput"} + if s.ClientID == nil { + invalidParams.Add(request.NewErrParamRequired("ClientID")) + } + if s.ClientID != nil && len(*s.ClientID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientID", 1)) + } + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientID sets the ClientID field's value. +func (s *AddClientIDToOpenIDConnectProviderInput) SetClientID(v string) *AddClientIDToOpenIDConnectProviderInput { + s.ClientID = &v + return s +} + +// SetOpenIDConnectProviderArn sets the OpenIDConnectProviderArn field's value. +func (s *AddClientIDToOpenIDConnectProviderInput) SetOpenIDConnectProviderArn(v string) *AddClientIDToOpenIDConnectProviderInput { + s.OpenIDConnectProviderArn = &v + return s +} + +type AddClientIDToOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type AddRoleToInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to add. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddRoleToInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddRoleToInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddRoleToInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceProfileName sets the InstanceProfileName field's value. +func (s *AddRoleToInstanceProfileInput) SetInstanceProfileName(v string) *AddRoleToInstanceProfileInput { + s.InstanceProfileName = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *AddRoleToInstanceProfileInput) SetRoleName(v string) *AddRoleToInstanceProfileInput { + s.RoleName = &v + return s +} + +type AddRoleToInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddRoleToInstanceProfileOutput) GoString() string { + return s.String() +} + +type AddUserToGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to add. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddUserToGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUserToGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddUserToGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddUserToGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *AddUserToGroupInput) SetGroupName(v string) *AddUserToGroupInput { + s.GroupName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *AddUserToGroupInput) SetUserName(v string) *AddUserToGroupInput { + s.UserName = &v + return s +} + +type AddUserToGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddUserToGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddUserToGroupOutput) GoString() string { + return s.String() +} + +type AttachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to attach the policy to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *AttachGroupPolicyInput) SetGroupName(v string) *AttachGroupPolicyInput { + s.GroupName = &v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *AttachGroupPolicyInput) SetPolicyArn(v string) *AttachGroupPolicyInput { + s.PolicyArn = &v + return s +} + +type AttachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachGroupPolicyOutput) GoString() string { + return s.String() +} + +type AttachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the role to attach the policy to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachRolePolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *AttachRolePolicyInput) SetPolicyArn(v string) *AttachRolePolicyInput { + s.PolicyArn = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *AttachRolePolicyInput) SetRoleName(v string) *AttachRolePolicyInput { + s.RoleName = &v + return s +} + +type AttachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachRolePolicyOutput) GoString() string { + return s.String() +} + +type AttachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM user to attach the policy to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachUserPolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *AttachUserPolicyInput) SetPolicyArn(v string) *AttachUserPolicyInput { + s.PolicyArn = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *AttachUserPolicyInput) SetUserName(v string) *AttachUserPolicyInput { + s.UserName = &v + return s +} + +type AttachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachUserPolicyOutput) GoString() string { + return s.String() +} + +// Contains information about an attached permissions boundary. +// +// An attached permissions boundary is a managed policy that has been attached +// to a user or role to set the permissions boundary. +// +// For more information about permissions boundaries, see Permissions Boundaries +// for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) +// in the IAM User Guide. +type AttachedPermissionsBoundary struct { + _ struct{} `type:"structure"` + + // The ARN of the policy used to set the permissions boundary for the user or + // role. + PermissionsBoundaryArn *string `min:"20" type:"string"` + + // The permissions boundary usage type that indicates what type of IAM resource + // is used as the permissions boundary for an entity. This data type can only + // have a value of Policy. + PermissionsBoundaryType *string `type:"string" enum:"PermissionsBoundaryAttachmentType"` +} + +// String returns the string representation +func (s AttachedPermissionsBoundary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachedPermissionsBoundary) GoString() string { + return s.String() +} + +// SetPermissionsBoundaryArn sets the PermissionsBoundaryArn field's value. +func (s *AttachedPermissionsBoundary) SetPermissionsBoundaryArn(v string) *AttachedPermissionsBoundary { + s.PermissionsBoundaryArn = &v + return s +} + +// SetPermissionsBoundaryType sets the PermissionsBoundaryType field's value. +func (s *AttachedPermissionsBoundary) SetPermissionsBoundaryType(v string) *AttachedPermissionsBoundary { + s.PermissionsBoundaryType = &v + return s +} + +// Contains information about an attached policy. +// +// An attached policy is a managed policy that has been attached to a user, +// group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, +// ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails +// operations. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type AttachedPolicy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string"` + + // The friendly name of the attached policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AttachedPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachedPolicy) GoString() string { + return s.String() +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *AttachedPolicy) SetPolicyArn(v string) *AttachedPolicy { + s.PolicyArn = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *AttachedPolicy) SetPolicyName(v string) *AttachedPolicy { + s.PolicyName = &v + return s +} + +type ChangePasswordInput struct { + _ struct{} `type:"structure"` + + // The new password. The new password must conform to the AWS account's password + // policy, if one exists. + // + // The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate + // this parameter is a string of characters. That string can include almost + // any printable ASCII character from the space (\u0020) through the end of + // the ASCII character range (\u00FF). You can also include the tab (\u0009), + // line feed (\u000A), and carriage return (\u000D) characters. Any of these + // characters are valid in a password. However, many tools, such as the AWS + // Management Console, might restrict the ability to type certain characters + // because they have special meaning within that tool. + // + // NewPassword is a required field + NewPassword *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The IAM user's current password. + // + // OldPassword is a required field + OldPassword *string `min:"1" type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s ChangePasswordInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangePasswordInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ChangePasswordInput"} + if s.NewPassword == nil { + invalidParams.Add(request.NewErrParamRequired("NewPassword")) + } + if s.NewPassword != nil && len(*s.NewPassword) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewPassword", 1)) + } + if s.OldPassword == nil { + invalidParams.Add(request.NewErrParamRequired("OldPassword")) + } + if s.OldPassword != nil && len(*s.OldPassword) < 1 { + invalidParams.Add(request.NewErrParamMinLen("OldPassword", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNewPassword sets the NewPassword field's value. +func (s *ChangePasswordInput) SetNewPassword(v string) *ChangePasswordInput { + s.NewPassword = &v + return s +} + +// SetOldPassword sets the OldPassword field's value. +func (s *ChangePasswordInput) SetOldPassword(v string) *ChangePasswordInput { + s.OldPassword = &v + return s +} + +type ChangePasswordOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangePasswordOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ChangePasswordOutput) GoString() string { + return s.String() +} + +// Contains information about a condition context key. It includes the name +// of the key and specifies the value (or values, if the context key supports +// multiple values) to use in the simulation. This information is used when +// evaluating the Condition elements of the input policies. +// +// This data type is used as an input parameter to SimulateCustomPolicy and +// SimulateCustomPolicy . +type ContextEntry struct { + _ struct{} `type:"structure"` + + // The full name of a condition context key, including the service prefix. For + // example, aws:SourceIp or s3:VersionId. + ContextKeyName *string `min:"5" type:"string"` + + // The data type of the value (or values) specified in the ContextKeyValues + // parameter. + ContextKeyType *string `type:"string" enum:"ContextKeyTypeEnum"` + + // The value (or values, if the condition context key supports multiple values) + // to provide to the simulation when the key is referenced by a Condition element + // in an input policy. + ContextKeyValues []*string `type:"list"` +} + +// String returns the string representation +func (s ContextEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContextEntry) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContextEntry) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ContextEntry"} + if s.ContextKeyName != nil && len(*s.ContextKeyName) < 5 { + invalidParams.Add(request.NewErrParamMinLen("ContextKeyName", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetContextKeyName sets the ContextKeyName field's value. +func (s *ContextEntry) SetContextKeyName(v string) *ContextEntry { + s.ContextKeyName = &v + return s +} + +// SetContextKeyType sets the ContextKeyType field's value. +func (s *ContextEntry) SetContextKeyType(v string) *ContextEntry { + s.ContextKeyType = &v + return s +} + +// SetContextKeyValues sets the ContextKeyValues field's value. +func (s *ContextEntry) SetContextKeyValues(v []*string) *ContextEntry { + s.ContextKeyValues = v + return s +} + +type CreateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM user that the new key will belong to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAccessKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAccessKeyInput"} + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserName sets the UserName field's value. +func (s *CreateAccessKeyInput) SetUserName(v string) *CreateAccessKeyInput { + s.UserName = &v + return s +} + +// Contains the response to a successful CreateAccessKey request. +type CreateAccessKeyOutput struct { + _ struct{} `type:"structure"` + + // A structure with details about the access key. + // + // AccessKey is a required field + AccessKey *AccessKey `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccessKeyOutput) GoString() string { + return s.String() +} + +// SetAccessKey sets the AccessKey field's value. +func (s *CreateAccessKeyOutput) SetAccessKey(v *AccessKey) *CreateAccessKeyOutput { + s.AccessKey = v + return s +} + +type CreateAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The account alias to create. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of lowercase letters, digits, and dashes. + // You cannot start or finish with a dash, nor can you have two dashes in a + // row. + // + // AccountAlias is a required field + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAccountAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateAccountAliasInput"} + if s.AccountAlias == nil { + invalidParams.Add(request.NewErrParamRequired("AccountAlias")) + } + if s.AccountAlias != nil && len(*s.AccountAlias) < 3 { + invalidParams.Add(request.NewErrParamMinLen("AccountAlias", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountAlias sets the AccountAlias field's value. +func (s *CreateAccountAliasInput) SetAccountAlias(v string) *CreateAccountAliasInput { + s.AccountAlias = &v + return s +} + +type CreateAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateAccountAliasOutput) GoString() string { + return s.String() +} + +type CreateGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to create. Do not include the path in this value. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *CreateGroupInput) SetGroupName(v string) *CreateGroupInput { + s.GroupName = &v + return s +} + +// SetPath sets the Path field's value. +func (s *CreateGroupInput) SetPath(v string) *CreateGroupInput { + s.Path = &v + return s +} + +// Contains the response to a successful CreateGroup request. +type CreateGroupOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new group. + // + // Group is a required field + Group *Group `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateGroupOutput) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *CreateGroupOutput) SetGroup(v *Group) *CreateGroupOutput { + s.Group = v + return s +} + +type CreateInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to create. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceProfileName sets the InstanceProfileName field's value. +func (s *CreateInstanceProfileInput) SetInstanceProfileName(v string) *CreateInstanceProfileInput { + s.InstanceProfileName = &v + return s +} + +// SetPath sets the Path field's value. +func (s *CreateInstanceProfileInput) SetPath(v string) *CreateInstanceProfileInput { + s.Path = &v + return s +} + +// Contains the response to a successful CreateInstanceProfile request. +type CreateInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new instance profile. + // + // InstanceProfile is a required field + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateInstanceProfileOutput) GoString() string { + return s.String() +} + +// SetInstanceProfile sets the InstanceProfile field's value. +func (s *CreateInstanceProfileOutput) SetInstanceProfile(v *InstanceProfile) *CreateInstanceProfileOutput { + s.InstanceProfile = v + return s +} + +type CreateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate + // this parameter is a string of characters. That string can include almost + // any printable ASCII character from the space (\u0020) through the end of + // the ASCII character range (\u00FF). You can also include the tab (\u0009), + // line feed (\u000A), and carriage return (\u000D) characters. Any of these + // characters are valid in a password. However, many tools, such as the AWS + // Management Console, might restrict the ability to type certain characters + // because they have special meaning within that tool. + // + // Password is a required field + Password *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the IAM user to create a password for. The user must already + // exist. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoginProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLoginProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateLoginProfileInput"} + if s.Password == nil { + invalidParams.Add(request.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPassword sets the Password field's value. +func (s *CreateLoginProfileInput) SetPassword(v string) *CreateLoginProfileInput { + s.Password = &v + return s +} + +// SetPasswordResetRequired sets the PasswordResetRequired field's value. +func (s *CreateLoginProfileInput) SetPasswordResetRequired(v bool) *CreateLoginProfileInput { + s.PasswordResetRequired = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *CreateLoginProfileInput) SetUserName(v string) *CreateLoginProfileInput { + s.UserName = &v + return s +} + +// Contains the response to a successful CreateLoginProfile request. +type CreateLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the user name and password create date. + // + // LoginProfile is a required field + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateLoginProfileOutput) GoString() string { + return s.String() +} + +// SetLoginProfile sets the LoginProfile field's value. +func (s *CreateLoginProfileOutput) SetLoginProfile(v *LoginProfile) *CreateLoginProfileOutput { + s.LoginProfile = v + return s +} + +type CreateOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences). When a mobile or web app + // registers with an OpenID Connect provider, they establish a value that identifies + // the application. (This is the value that's sent as the client_id parameter + // on OAuth requests.) + // + // You can register multiple client IDs with the same provider. For example, + // you might have multiple applications that use the same OIDC provider. You + // cannot register more than 100 client IDs with a single IAM OIDC provider. + // + // There is no defined format for a client ID. The CreateOpenIDConnectProviderRequest + // operation accepts client IDs up to 255 characters long. + ClientIDList []*string `type:"list"` + + // A list of server certificate thumbprints for the OpenID Connect (OIDC) identity + // provider's server certificates. Typically this list includes only one entry. + // However, IAM lets you have up to five thumbprints for an OIDC provider. This + // lets you maintain multiple thumbprints if the identity provider is rotating + // certificates. + // + // The server certificate thumbprint is the hex-encoded SHA-1 hash value of + // the X.509 certificate used by the domain where the OpenID Connect provider + // makes its keys available. It is always a 40-character string. + // + // You must provide at least one thumbprint when creating an IAM OIDC provider. + // For example, assume that the OIDC provider is server.example.com and the + // provider stores its keys at https://keys.server.example.com/openid-connect. + // In that case, the thumbprint string would be the hex-encoded SHA-1 hash value + // of the certificate used by https://keys.server.example.com. + // + // For more information about obtaining the OIDC provider's thumbprint, see + // Obtaining the Thumbprint for an OpenID Connect Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/identity-providers-oidc-obtain-thumbprint.html) + // in the IAM User Guide. + // + // ThumbprintList is a required field + ThumbprintList []*string `type:"list" required:"true"` + + // The URL of the identity provider. The URL must begin with https:// and should + // correspond to the iss claim in the provider's OpenID Connect ID tokens. Per + // the OIDC standard, path components are allowed but query parameters are not. + // Typically the URL consists of only a hostname, like https://server.example.org + // or https://example.com. + // + // You cannot register the same provider multiple times in a single AWS account. + // If you try to submit a URL that has already been used for an OpenID Connect + // provider in the AWS account, you will get an error. + // + // Url is a required field + Url *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateOpenIDConnectProviderInput"} + if s.ThumbprintList == nil { + invalidParams.Add(request.NewErrParamRequired("ThumbprintList")) + } + if s.Url == nil { + invalidParams.Add(request.NewErrParamRequired("Url")) + } + if s.Url != nil && len(*s.Url) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Url", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientIDList sets the ClientIDList field's value. +func (s *CreateOpenIDConnectProviderInput) SetClientIDList(v []*string) *CreateOpenIDConnectProviderInput { + s.ClientIDList = v + return s +} + +// SetThumbprintList sets the ThumbprintList field's value. +func (s *CreateOpenIDConnectProviderInput) SetThumbprintList(v []*string) *CreateOpenIDConnectProviderInput { + s.ThumbprintList = v + return s +} + +// SetUrl sets the Url field's value. +func (s *CreateOpenIDConnectProviderInput) SetUrl(v string) *CreateOpenIDConnectProviderInput { + s.Url = &v + return s +} + +// Contains the response to a successful CreateOpenIDConnectProvider request. +type CreateOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the new IAM OpenID Connect provider that + // is created. For more information, see OpenIDConnectProviderListEntry. + OpenIDConnectProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +// SetOpenIDConnectProviderArn sets the OpenIDConnectProviderArn field's value. +func (s *CreateOpenIDConnectProviderOutput) SetOpenIDConnectProviderArn(v string) *CreateOpenIDConnectProviderOutput { + s.OpenIDConnectProviderArn = &v + return s +} + +type CreatePolicyInput struct { + _ struct{} `type:"structure"` + + // A friendly description of the policy. + // + // Typically used to store information about the permissions defined in the + // policy. For example, "Grants access to production DynamoDB tables." + // + // The policy description is immutable. After a value is assigned, it cannot + // be changed. + Description *string `type:"string"` + + // The path for the policy. + // + // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` + + // The JSON policy document that you want to use as the content for the new + // policy. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The friendly name of the policy. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePolicyInput"} + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *CreatePolicyInput) SetDescription(v string) *CreatePolicyInput { + s.Description = &v + return s +} + +// SetPath sets the Path field's value. +func (s *CreatePolicyInput) SetPath(v string) *CreatePolicyInput { + s.Path = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *CreatePolicyInput) SetPolicyDocument(v string) *CreatePolicyInput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *CreatePolicyInput) SetPolicyName(v string) *CreatePolicyInput { + s.PolicyName = &v + return s +} + +// Contains the response to a successful CreatePolicy request. +type CreatePolicyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *CreatePolicyOutput) SetPolicy(v *Policy) *CreatePolicyOutput { + s.Policy = v + return s +} + +type CreatePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy to which you want to add + // a new version. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The JSON policy document that you want to use as the content for this new + // version of the policy. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // Specifies whether to set this version as the policy's default version. + // + // When this parameter is true, the new policy version becomes the operative + // version. That is, it becomes the version that is in effect for the IAM users, + // groups, and roles that the policy is attached to. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + SetAsDefault *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreatePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreatePolicyVersionInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *CreatePolicyVersionInput) SetPolicyArn(v string) *CreatePolicyVersionInput { + s.PolicyArn = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *CreatePolicyVersionInput) SetPolicyDocument(v string) *CreatePolicyVersionInput { + s.PolicyDocument = &v + return s +} + +// SetSetAsDefault sets the SetAsDefault field's value. +func (s *CreatePolicyVersionInput) SetSetAsDefault(v bool) *CreatePolicyVersionInput { + s.SetAsDefault = &v + return s +} + +// Contains the response to a successful CreatePolicyVersion request. +type CreatePolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new policy version. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreatePolicyVersionOutput) GoString() string { + return s.String() +} + +// SetPolicyVersion sets the PolicyVersion field's value. +func (s *CreatePolicyVersionOutput) SetPolicyVersion(v *PolicyVersion) *CreatePolicyVersionOutput { + s.PolicyVersion = v + return s +} + +type CreateRoleInput struct { + _ struct{} `type:"structure"` + + // The trust relationship policy document that grants an entity permission to + // assume the role. + // + // In IAM, you must provide a JSON policy that has been converted to a string. + // However, for AWS CloudFormation templates formatted in YAML, you can provide + // the policy in JSON or YAML format. AWS CloudFormation always converts a YAML + // policy to JSON format before submitting it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // Upon success, the response includes the same trust policy as a URL-encoded + // JSON string. + // + // AssumeRolePolicyDocument is a required field + AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"` + + // A description of the role. + Description *string `type:"string"` + + // The maximum session duration (in seconds) that you want to set for the specified + // role. If you do not specify a value for this setting, the default maximum + // of one hour is applied. This setting can have a value from 1 hour to 12 hours. + // + // Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds + // API parameter or the duration-seconds CLI parameter to request a longer session. + // The MaxSessionDuration setting determines the maximum duration that can be + // requested using the DurationSeconds parameter. If users don't specify a value + // for the DurationSeconds parameter, their security credentials are valid for + // one hour by default. This applies when you use the AssumeRole* API operations + // or the assume-role* CLI operations but does not apply when you use those + // operations to create a console URL. For more information, see Using IAM Roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the + // IAM User Guide. + MaxSessionDuration *int64 `min:"3600" type:"integer"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` + + // The ARN of the policy that is used to set the permissions boundary for the + // role. + PermissionsBoundary *string `min:"20" type:"string"` + + // The name of the role to create. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` + + // A list of tags that you want to attach to the newly created role. Each tag + // consists of a key name and an associated value. For more information about + // tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags per role, then the entire request fails and the role is not created. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s CreateRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateRoleInput"} + if s.AssumeRolePolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("AssumeRolePolicyDocument")) + } + if s.AssumeRolePolicyDocument != nil && len(*s.AssumeRolePolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AssumeRolePolicyDocument", 1)) + } + if s.MaxSessionDuration != nil && *s.MaxSessionDuration < 3600 { + invalidParams.Add(request.NewErrParamMinValue("MaxSessionDuration", 3600)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.PermissionsBoundary != nil && len(*s.PermissionsBoundary) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PermissionsBoundary", 20)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssumeRolePolicyDocument sets the AssumeRolePolicyDocument field's value. +func (s *CreateRoleInput) SetAssumeRolePolicyDocument(v string) *CreateRoleInput { + s.AssumeRolePolicyDocument = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateRoleInput) SetDescription(v string) *CreateRoleInput { + s.Description = &v + return s +} + +// SetMaxSessionDuration sets the MaxSessionDuration field's value. +func (s *CreateRoleInput) SetMaxSessionDuration(v int64) *CreateRoleInput { + s.MaxSessionDuration = &v + return s +} + +// SetPath sets the Path field's value. +func (s *CreateRoleInput) SetPath(v string) *CreateRoleInput { + s.Path = &v + return s +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *CreateRoleInput) SetPermissionsBoundary(v string) *CreateRoleInput { + s.PermissionsBoundary = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *CreateRoleInput) SetRoleName(v string) *CreateRoleInput { + s.RoleName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateRoleInput) SetTags(v []*Tag) *CreateRoleInput { + s.Tags = v + return s +} + +// Contains the response to a successful CreateRole request. +type CreateRoleOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new role. + // + // Role is a required field + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateRoleOutput) GoString() string { + return s.String() +} + +// SetRole sets the Role field's value. +func (s *CreateRoleOutput) SetRole(v *Role) *CreateRoleOutput { + s.Role = v + return s +} + +type CreateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The name of the provider to create. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + // + // For more information, see About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) + // in the IAM User Guide + // + // SAMLMetadataDocument is a required field + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSAMLProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSAMLProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateSAMLProviderInput"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Name", 1)) + } + if s.SAMLMetadataDocument == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLMetadataDocument")) + } + if s.SAMLMetadataDocument != nil && len(*s.SAMLMetadataDocument) < 1000 { + invalidParams.Add(request.NewErrParamMinLen("SAMLMetadataDocument", 1000)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetName sets the Name field's value. +func (s *CreateSAMLProviderInput) SetName(v string) *CreateSAMLProviderInput { + s.Name = &v + return s +} + +// SetSAMLMetadataDocument sets the SAMLMetadataDocument field's value. +func (s *CreateSAMLProviderInput) SetSAMLMetadataDocument(v string) *CreateSAMLProviderInput { + s.SAMLMetadataDocument = &v + return s +} + +// Contains the response to a successful CreateSAMLProvider request. +type CreateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the new SAML provider resource in IAM. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateSAMLProviderOutput) GoString() string { + return s.String() +} + +// SetSAMLProviderArn sets the SAMLProviderArn field's value. +func (s *CreateSAMLProviderOutput) SetSAMLProviderArn(v string) *CreateSAMLProviderOutput { + s.SAMLProviderArn = &v + return s +} + +type CreateServiceLinkedRoleInput struct { + _ struct{} `type:"structure"` + + // The service principal for the AWS service to which this role is attached. + // You use a string similar to a URL but without the http:// in front. For example: + // elasticbeanstalk.amazonaws.com. + // + // Service principals are unique and case-sensitive. To find the exact service + // principal for your service-linked role, see AWS Services That Work with IAM + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) + // in the IAM User Guide. Look for the services that have Yes in the Service-Linked + // Role column. Choose the Yes link to view the service-linked role documentation + // for that service. + // + // AWSServiceName is a required field + AWSServiceName *string `min:"1" type:"string" required:"true"` + + // A string that you provide, which is combined with the service-provided prefix + // to form the complete role name. If you make multiple requests for the same + // service, then you must supply a different CustomSuffix for each request. + // Otherwise the request fails with a duplicate role name error. For example, + // you could add -1 or -debug to the suffix. + // + // Some services do not support the CustomSuffix parameter. If you provide an + // optional suffix and the operation fails, try the operation again without + // the suffix. + CustomSuffix *string `min:"1" type:"string"` + + // The description of the role. + Description *string `type:"string"` +} + +// String returns the string representation +func (s CreateServiceLinkedRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceLinkedRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateServiceLinkedRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateServiceLinkedRoleInput"} + if s.AWSServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("AWSServiceName")) + } + if s.AWSServiceName != nil && len(*s.AWSServiceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AWSServiceName", 1)) + } + if s.CustomSuffix != nil && len(*s.CustomSuffix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CustomSuffix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAWSServiceName sets the AWSServiceName field's value. +func (s *CreateServiceLinkedRoleInput) SetAWSServiceName(v string) *CreateServiceLinkedRoleInput { + s.AWSServiceName = &v + return s +} + +// SetCustomSuffix sets the CustomSuffix field's value. +func (s *CreateServiceLinkedRoleInput) SetCustomSuffix(v string) *CreateServiceLinkedRoleInput { + s.CustomSuffix = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *CreateServiceLinkedRoleInput) SetDescription(v string) *CreateServiceLinkedRoleInput { + s.Description = &v + return s +} + +type CreateServiceLinkedRoleOutput struct { + _ struct{} `type:"structure"` + + // A Role object that contains details about the newly created role. + Role *Role `type:"structure"` +} + +// String returns the string representation +func (s CreateServiceLinkedRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceLinkedRoleOutput) GoString() string { + return s.String() +} + +// SetRole sets the Role field's value. +func (s *CreateServiceLinkedRoleOutput) SetRole(v *Role) *CreateServiceLinkedRoleOutput { + s.Role = v + return s +} + +type CreateServiceSpecificCredentialInput struct { + _ struct{} `type:"structure"` + + // The name of the AWS service that is to be associated with the credentials. + // The service you specify here is the only service that can be accessed using + // these credentials. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The name of the IAM user that is to be associated with the credentials. The + // new service-specific credentials have the same permissions as the associated + // user except that they can be used only to access the specified service. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateServiceSpecificCredentialInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceSpecificCredentialInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateServiceSpecificCredentialInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateServiceSpecificCredentialInput"} + if s.ServiceName == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceName")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetServiceName sets the ServiceName field's value. +func (s *CreateServiceSpecificCredentialInput) SetServiceName(v string) *CreateServiceSpecificCredentialInput { + s.ServiceName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *CreateServiceSpecificCredentialInput) SetUserName(v string) *CreateServiceSpecificCredentialInput { + s.UserName = &v + return s +} + +type CreateServiceSpecificCredentialOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains information about the newly created service-specific + // credential. + // + // This is the only time that the password for this credential set is available. + // It cannot be recovered later. Instead, you must reset the password with ResetServiceSpecificCredential. + ServiceSpecificCredential *ServiceSpecificCredential `type:"structure"` +} + +// String returns the string representation +func (s CreateServiceSpecificCredentialOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateServiceSpecificCredentialOutput) GoString() string { + return s.String() +} + +// SetServiceSpecificCredential sets the ServiceSpecificCredential field's value. +func (s *CreateServiceSpecificCredentialOutput) SetServiceSpecificCredential(v *ServiceSpecificCredential) *CreateServiceSpecificCredentialOutput { + s.ServiceSpecificCredential = v + return s +} + +type CreateUserInput struct { + _ struct{} `type:"structure"` + + // The path for the user name. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` + + // The ARN of the policy that is used to set the permissions boundary for the + // user. + PermissionsBoundary *string `min:"20" type:"string"` + + // A list of tags that you want to attach to the newly created user. Each tag + // consists of a key name and an associated value. For more information about + // tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags per user, then the entire request fails and the user is not created. + Tags []*Tag `type:"list"` + + // The name of the user to create. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateUserInput"} + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.PermissionsBoundary != nil && len(*s.PermissionsBoundary) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PermissionsBoundary", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPath sets the Path field's value. +func (s *CreateUserInput) SetPath(v string) *CreateUserInput { + s.Path = &v + return s +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *CreateUserInput) SetPermissionsBoundary(v string) *CreateUserInput { + s.PermissionsBoundary = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateUserInput) SetTags(v []*Tag) *CreateUserInput { + s.Tags = v + return s +} + +// SetUserName sets the UserName field's value. +func (s *CreateUserInput) SetUserName(v string) *CreateUserInput { + s.UserName = &v + return s +} + +// Contains the response to a successful CreateUser request. +type CreateUserOutput struct { + _ struct{} `type:"structure"` + + // A structure with details about the new IAM user. + User *User `type:"structure"` +} + +// String returns the string representation +func (s CreateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateUserOutput) GoString() string { + return s.String() +} + +// SetUser sets the User field's value. +func (s *CreateUserOutput) SetUser(v *User) *CreateUserOutput { + s.User = v + return s +} + +type CreateVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The path for the virtual MFA device. For more information about paths, see + // IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` + + // The name of the virtual MFA device. Use with path to uniquely identify a + // virtual MFA device. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // VirtualMFADeviceName is a required field + VirtualMFADeviceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVirtualMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateVirtualMFADeviceInput"} + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.VirtualMFADeviceName == nil { + invalidParams.Add(request.NewErrParamRequired("VirtualMFADeviceName")) + } + if s.VirtualMFADeviceName != nil && len(*s.VirtualMFADeviceName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("VirtualMFADeviceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPath sets the Path field's value. +func (s *CreateVirtualMFADeviceInput) SetPath(v string) *CreateVirtualMFADeviceInput { + s.Path = &v + return s +} + +// SetVirtualMFADeviceName sets the VirtualMFADeviceName field's value. +func (s *CreateVirtualMFADeviceInput) SetVirtualMFADeviceName(v string) *CreateVirtualMFADeviceInput { + s.VirtualMFADeviceName = &v + return s +} + +// Contains the response to a successful CreateVirtualMFADevice request. +type CreateVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new virtual MFA device. + // + // VirtualMFADevice is a required field + VirtualMFADevice *VirtualMFADevice `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateVirtualMFADeviceOutput) GoString() string { + return s.String() +} + +// SetVirtualMFADevice sets the VirtualMFADevice field's value. +func (s *CreateVirtualMFADeviceOutput) SetVirtualMFADevice(v *VirtualMFADevice) *CreateVirtualMFADeviceOutput { + s.VirtualMFADevice = v + return s +} + +type DeactivateMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@:/- + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to deactivate. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeactivateMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeactivateMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeactivateMFADeviceInput"} + if s.SerialNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *DeactivateMFADeviceInput) SetSerialNumber(v string) *DeactivateMFADeviceInput { + s.SerialNumber = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DeactivateMFADeviceInput) SetUserName(v string) *DeactivateMFADeviceInput { + s.UserName = &v + return s +} + +type DeactivateMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeactivateMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeactivateMFADeviceOutput) GoString() string { + return s.String() +} + +type DeleteAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID for the access key ID and secret access key you want to + // delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The name of the user whose access key pair you want to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccessKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccessKeyInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *DeleteAccessKeyInput) SetAccessKeyId(v string) *DeleteAccessKeyInput { + s.AccessKeyId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DeleteAccessKeyInput) SetUserName(v string) *DeleteAccessKeyInput { + s.UserName = &v + return s +} + +type DeleteAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccessKeyOutput) GoString() string { + return s.String() +} + +type DeleteAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The name of the account alias to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of lowercase letters, digits, and dashes. + // You cannot start or finish with a dash, nor can you have two dashes in a + // row. + // + // AccountAlias is a required field + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountAliasInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccountAliasInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAccountAliasInput"} + if s.AccountAlias == nil { + invalidParams.Add(request.NewErrParamRequired("AccountAlias")) + } + if s.AccountAlias != nil && len(*s.AccountAlias) < 3 { + invalidParams.Add(request.NewErrParamMinLen("AccountAlias", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountAlias sets the AccountAlias field's value. +func (s *DeleteAccountAliasInput) SetAccountAlias(v string) *DeleteAccountAliasInput { + s.AccountAlias = &v + return s +} + +type DeleteAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountAliasOutput) GoString() string { + return s.String() +} + +type DeleteAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +type DeleteAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type DeleteGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM group to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *DeleteGroupInput) SetGroupName(v string) *DeleteGroupInput { + s.GroupName = &v + return s +} + +type DeleteGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupOutput) GoString() string { + return s.String() +} + +type DeleteGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the group that the policy is + // embedded in. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The name identifying the policy document to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *DeleteGroupPolicyInput) SetGroupName(v string) *DeleteGroupPolicyInput { + s.GroupName = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *DeleteGroupPolicyInput) SetPolicyName(v string) *DeleteGroupPolicyInput { + s.PolicyName = &v + return s +} + +type DeleteGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteGroupPolicyOutput) GoString() string { + return s.String() +} + +type DeleteInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceProfileName sets the InstanceProfileName field's value. +func (s *DeleteInstanceProfileInput) SetInstanceProfileName(v string) *DeleteInstanceProfileInput { + s.InstanceProfileName = &v + return s +} + +type DeleteInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteInstanceProfileOutput) GoString() string { + return s.String() +} + +type DeleteLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose password you want to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoginProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLoginProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteLoginProfileInput"} + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserName sets the UserName field's value. +func (s *DeleteLoginProfileInput) SetUserName(v string) *DeleteLoginProfileInput { + s.UserName = &v + return s +} + +type DeleteLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteLoginProfileOutput) GoString() string { + return s.String() +} + +type DeleteOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider resource + // object to delete. You can get a list of OpenID Connect provider resource + // ARNs by using the ListOpenIDConnectProviders operation. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteOpenIDConnectProviderInput"} + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpenIDConnectProviderArn sets the OpenIDConnectProviderArn field's value. +func (s *DeleteOpenIDConnectProviderInput) SetOpenIDConnectProviderArn(v string) *DeleteOpenIDConnectProviderInput { + s.OpenIDConnectProviderArn = &v + return s +} + +type DeleteOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to delete. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *DeletePolicyInput) SetPolicyArn(v string) *DeletePolicyInput { + s.PolicyArn = &v + return s +} + +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyOutput) GoString() string { + return s.String() +} + +type DeletePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy from which you want to delete + // a version. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy version to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consists of the lowercase letter 'v' followed + // by one or two digits, and optionally followed by a period '.' and a string + // of letters and digits. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + // + // VersionId is a required field + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePolicyVersionInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.VersionId == nil { + invalidParams.Add(request.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *DeletePolicyVersionInput) SetPolicyArn(v string) *DeletePolicyVersionInput { + s.PolicyArn = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletePolicyVersionInput) SetVersionId(v string) *DeletePolicyVersionInput { + s.VersionId = &v + return s +} + +type DeletePolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePolicyVersionOutput) GoString() string { + return s.String() +} + +type DeleteRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleName sets the RoleName field's value. +func (s *DeleteRoleInput) SetRoleName(v string) *DeleteRoleInput { + s.RoleName = &v + return s +} + +type DeleteRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRoleOutput) GoString() string { + return s.String() +} + +type DeleteRolePermissionsBoundaryInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the IAM role from which you want to + // remove the permissions boundary. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRolePermissionsBoundaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePermissionsBoundaryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRolePermissionsBoundaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRolePermissionsBoundaryInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleName sets the RoleName field's value. +func (s *DeleteRolePermissionsBoundaryInput) SetRoleName(v string) *DeleteRolePermissionsBoundaryInput { + s.RoleName = &v + return s +} + +type DeleteRolePermissionsBoundaryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRolePermissionsBoundaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePermissionsBoundaryOutput) GoString() string { + return s.String() +} + +type DeleteRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the inline policy to delete from the specified IAM role. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the role that the policy is + // embedded in. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteRolePolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyName sets the PolicyName field's value. +func (s *DeleteRolePolicyInput) SetPolicyName(v string) *DeleteRolePolicyInput { + s.PolicyName = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *DeleteRolePolicyInput) SetRoleName(v string) *DeleteRolePolicyInput { + s.RoleName = &v + return s +} + +type DeleteRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteRolePolicyOutput) GoString() string { + return s.String() +} + +type DeleteSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider to delete. + // + // SAMLProviderArn is a required field + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSAMLProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSAMLProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSAMLProviderInput"} + if s.SAMLProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSAMLProviderArn sets the SAMLProviderArn field's value. +func (s *DeleteSAMLProviderInput) SetSAMLProviderArn(v string) *DeleteSAMLProviderInput { + s.SAMLProviderArn = &v + return s +} + +type DeleteSAMLProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSAMLProviderOutput) GoString() string { + return s.String() +} + +type DeleteSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSSHPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSSHPublicKeyInput"} + if s.SSHPublicKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSHPublicKeyId sets the SSHPublicKeyId field's value. +func (s *DeleteSSHPublicKeyInput) SetSSHPublicKeyId(v string) *DeleteSSHPublicKeyInput { + s.SSHPublicKeyId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DeleteSSHPublicKeyInput) SetUserName(v string) *DeleteSSHPublicKeyInput { + s.UserName = &v + return s +} + +type DeleteSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type DeleteServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServerCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServerCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServerCertificateInput"} + if s.ServerCertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetServerCertificateName sets the ServerCertificateName field's value. +func (s *DeleteServerCertificateInput) SetServerCertificateName(v string) *DeleteServerCertificateInput { + s.ServerCertificateName = &v + return s +} + +type DeleteServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServerCertificateOutput) GoString() string { + return s.String() +} + +type DeleteServiceLinkedRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the service-linked role to be deleted. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceLinkedRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceLinkedRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceLinkedRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServiceLinkedRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleName sets the RoleName field's value. +func (s *DeleteServiceLinkedRoleInput) SetRoleName(v string) *DeleteServiceLinkedRoleInput { + s.RoleName = &v + return s +} + +type DeleteServiceLinkedRoleOutput struct { + _ struct{} `type:"structure"` + + // The deletion task identifier that you can use to check the status of the + // deletion. This identifier is returned in the format task/aws-service-role///. + // + // DeletionTaskId is a required field + DeletionTaskId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceLinkedRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceLinkedRoleOutput) GoString() string { + return s.String() +} + +// SetDeletionTaskId sets the DeletionTaskId field's value. +func (s *DeleteServiceLinkedRoleOutput) SetDeletionTaskId(v string) *DeleteServiceLinkedRoleOutput { + s.DeletionTaskId = &v + return s +} + +type DeleteServiceSpecificCredentialInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the service-specific credential. You can get this + // value by calling ListServiceSpecificCredentials. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the service-specific credential. + // If this value is not specified, then the operation assumes the user whose + // credentials are used to call the operation. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteServiceSpecificCredentialInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceSpecificCredentialInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceSpecificCredentialInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteServiceSpecificCredentialInput"} + if s.ServiceSpecificCredentialId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceSpecificCredentialId")) + } + if s.ServiceSpecificCredentialId != nil && len(*s.ServiceSpecificCredentialId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ServiceSpecificCredentialId", 20)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetServiceSpecificCredentialId sets the ServiceSpecificCredentialId field's value. +func (s *DeleteServiceSpecificCredentialInput) SetServiceSpecificCredentialId(v string) *DeleteServiceSpecificCredentialInput { + s.ServiceSpecificCredentialId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DeleteServiceSpecificCredentialInput) SetUserName(v string) *DeleteServiceSpecificCredentialInput { + s.UserName = &v + return s +} + +type DeleteServiceSpecificCredentialOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServiceSpecificCredentialOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteServiceSpecificCredentialOutput) GoString() string { + return s.String() +} + +type DeleteSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate to delete. + // + // The format of this parameter, as described by its regex (http://wikipedia.org/wiki/regex) + // pattern, is a string of characters that can be upper- or lower-cased letters + // or digits. + // + // CertificateId is a required field + CertificateId *string `min:"24" type:"string" required:"true"` + + // The name of the user the signing certificate belongs to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSigningCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSigningCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteSigningCertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 24 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 24)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateId sets the CertificateId field's value. +func (s *DeleteSigningCertificateInput) SetCertificateId(v string) *DeleteSigningCertificateInput { + s.CertificateId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DeleteSigningCertificateInput) SetUserName(v string) *DeleteSigningCertificateInput { + s.UserName = &v + return s +} + +type DeleteSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteSigningCertificateOutput) GoString() string { + return s.String() +} + +type DeleteUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserInput"} + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserName sets the UserName field's value. +func (s *DeleteUserInput) SetUserName(v string) *DeleteUserInput { + s.UserName = &v + return s +} + +type DeleteUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserOutput) GoString() string { + return s.String() +} + +type DeleteUserPermissionsBoundaryInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the IAM user from which you want to + // remove the permissions boundary. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPermissionsBoundaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPermissionsBoundaryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserPermissionsBoundaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserPermissionsBoundaryInput"} + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserName sets the UserName field's value. +func (s *DeleteUserPermissionsBoundaryInput) SetUserName(v string) *DeleteUserPermissionsBoundaryInput { + s.UserName = &v + return s +} + +type DeleteUserPermissionsBoundaryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPermissionsBoundaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPermissionsBoundaryOutput) GoString() string { + return s.String() +} + +type DeleteUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name identifying the policy document to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the user that the policy is + // embedded in. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteUserPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyName sets the PolicyName field's value. +func (s *DeleteUserPolicyInput) SetPolicyName(v string) *DeleteUserPolicyInput { + s.PolicyName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DeleteUserPolicyInput) SetUserName(v string) *DeleteUserPolicyInput { + s.UserName = &v + return s +} + +type DeleteUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteUserPolicyOutput) GoString() string { + return s.String() +} + +type DeleteVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the same as the ARN. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@:/- + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVirtualMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVirtualMFADeviceInput"} + if s.SerialNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *DeleteVirtualMFADeviceInput) SetSerialNumber(v string) *DeleteVirtualMFADeviceInput { + s.SerialNumber = &v + return s +} + +type DeleteVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVirtualMFADeviceOutput) GoString() string { + return s.String() +} + +// The reason that the service-linked role deletion failed. +// +// This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus +// operation. +type DeletionTaskFailureReasonType struct { + _ struct{} `type:"structure"` + + // A short description of the reason that the service-linked role deletion failed. + Reason *string `type:"string"` + + // A list of objects that contains details about the service-linked role deletion + // failure, if that information is returned by the service. If the service-linked + // role has active sessions or if any resources that were used by the role have + // not been deleted from the linked service, the role can't be deleted. This + // parameter includes a list of the resources that are associated with the role + // and the Region in which the resources are being used. + RoleUsageList []*RoleUsageType `type:"list"` +} + +// String returns the string representation +func (s DeletionTaskFailureReasonType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletionTaskFailureReasonType) GoString() string { + return s.String() +} + +// SetReason sets the Reason field's value. +func (s *DeletionTaskFailureReasonType) SetReason(v string) *DeletionTaskFailureReasonType { + s.Reason = &v + return s +} + +// SetRoleUsageList sets the RoleUsageList field's value. +func (s *DeletionTaskFailureReasonType) SetRoleUsageList(v []*RoleUsageType) *DeletionTaskFailureReasonType { + s.RoleUsageList = v + return s +} + +type DetachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the IAM group to detach the policy from. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *DetachGroupPolicyInput) SetGroupName(v string) *DetachGroupPolicyInput { + s.GroupName = &v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *DetachGroupPolicyInput) SetPolicyArn(v string) *DetachGroupPolicyInput { + s.PolicyArn = &v + return s +} + +type DetachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachGroupPolicyOutput) GoString() string { + return s.String() +} + +type DetachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM role to detach the policy from. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachRolePolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *DetachRolePolicyInput) SetPolicyArn(v string) *DetachRolePolicyInput { + s.PolicyArn = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *DetachRolePolicyInput) SetRoleName(v string) *DetachRolePolicyInput { + s.RoleName = &v + return s +} + +type DetachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachRolePolicyOutput) GoString() string { + return s.String() +} + +type DetachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM user to detach the policy from. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachUserPolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *DetachUserPolicyInput) SetPolicyArn(v string) *DetachUserPolicyInput { + s.PolicyArn = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *DetachUserPolicyInput) SetUserName(v string) *DetachUserPolicyInput { + s.UserName = &v + return s +} + +type DetachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachUserPolicyOutput) GoString() string { + return s.String() +} + +type EnableMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + // + // The format for this parameter is a string of six digits. + // + // Submit your request immediately after generating the authentication codes. + // If you generate the codes and then wait too long to submit the request, the + // MFA device successfully associates with the user but the MFA device becomes + // out of sync. This happens because time-based one-time passwords (TOTP) expire + // after a short period of time. If this happens, you can resync the device + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_sync.html). + // + // AuthenticationCode1 is a required field + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + // + // The format for this parameter is a string of six digits. + // + // Submit your request immediately after generating the authentication codes. + // If you generate the codes and then wait too long to submit the request, the + // MFA device successfully associates with the user but the MFA device becomes + // out of sync. This happens because time-based one-time passwords (TOTP) expire + // after a short period of time. If this happens, you can resync the device + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_sync.html). + // + // AuthenticationCode2 is a required field + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@:/- + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the IAM user for whom you want to enable the MFA device. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableMFADeviceInput"} + if s.AuthenticationCode1 == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationCode1")) + } + if s.AuthenticationCode1 != nil && len(*s.AuthenticationCode1) < 6 { + invalidParams.Add(request.NewErrParamMinLen("AuthenticationCode1", 6)) + } + if s.AuthenticationCode2 == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationCode2")) + } + if s.AuthenticationCode2 != nil && len(*s.AuthenticationCode2) < 6 { + invalidParams.Add(request.NewErrParamMinLen("AuthenticationCode2", 6)) + } + if s.SerialNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationCode1 sets the AuthenticationCode1 field's value. +func (s *EnableMFADeviceInput) SetAuthenticationCode1(v string) *EnableMFADeviceInput { + s.AuthenticationCode1 = &v + return s +} + +// SetAuthenticationCode2 sets the AuthenticationCode2 field's value. +func (s *EnableMFADeviceInput) SetAuthenticationCode2(v string) *EnableMFADeviceInput { + s.AuthenticationCode2 = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *EnableMFADeviceInput) SetSerialNumber(v string) *EnableMFADeviceInput { + s.SerialNumber = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *EnableMFADeviceInput) SetUserName(v string) *EnableMFADeviceInput { + s.UserName = &v + return s +} + +type EnableMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableMFADeviceOutput) GoString() string { + return s.String() +} + +// An object that contains details about when the IAM entities (users or roles) +// were last used in an attempt to access the specified AWS service. +// +// This data type is a response element in the GetServiceLastAccessedDetailsWithEntities +// operation. +type EntityDetails struct { + _ struct{} `type:"structure"` + + // The EntityInfo object that contains details about the entity (user or role). + // + // EntityInfo is a required field + EntityInfo *EntityInfo `type:"structure" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the authenticated entity last attempted to access AWS. AWS does not + // report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticated *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s EntityDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EntityDetails) GoString() string { + return s.String() +} + +// SetEntityInfo sets the EntityInfo field's value. +func (s *EntityDetails) SetEntityInfo(v *EntityInfo) *EntityDetails { + s.EntityInfo = v + return s +} + +// SetLastAuthenticated sets the LastAuthenticated field's value. +func (s *EntityDetails) SetLastAuthenticated(v time.Time) *EntityDetails { + s.LastAuthenticated = &v + return s +} + +// Contains details about the specified entity (user or role). +// +// This data type is an element of the EntityDetails object. +type EntityInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The identifier of the entity (user or role). + // + // Id is a required field + Id *string `min:"16" type:"string" required:"true"` + + // The name of the entity (user or role). + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The path to the entity (user or role). For more information about paths, + // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The type of entity (user or role). + // + // Type is a required field + Type *string `type:"string" required:"true" enum:"policyOwnerEntityType"` +} + +// String returns the string representation +func (s EntityInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EntityInfo) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *EntityInfo) SetArn(v string) *EntityInfo { + s.Arn = &v + return s +} + +// SetId sets the Id field's value. +func (s *EntityInfo) SetId(v string) *EntityInfo { + s.Id = &v + return s +} + +// SetName sets the Name field's value. +func (s *EntityInfo) SetName(v string) *EntityInfo { + s.Name = &v + return s +} + +// SetPath sets the Path field's value. +func (s *EntityInfo) SetPath(v string) *EntityInfo { + s.Path = &v + return s +} + +// SetType sets the Type field's value. +func (s *EntityInfo) SetType(v string) *EntityInfo { + s.Type = &v + return s +} + +// Contains information about the reason that the operation failed. +// +// This data type is used as a response element in the GetOrganizationsAccessReport, +// GetServiceLastAccessedDetails, and GetServiceLastAccessedDetailsWithEntities +// operations. +type ErrorDetails struct { + _ struct{} `type:"structure"` + + // The error code associated with the operation failure. + // + // Code is a required field + Code *string `type:"string" required:"true"` + + // Detailed information about the reason that the operation failed. + // + // Message is a required field + Message *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDetails) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *ErrorDetails) SetCode(v string) *ErrorDetails { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *ErrorDetails) SetMessage(v string) *ErrorDetails { + s.Message = &v + return s +} + +// Contains the results of a simulation. +// +// This data type is used by the return parameter of SimulateCustomPolicy and +// SimulatePrincipalPolicy . +type EvaluationResult struct { + _ struct{} `type:"structure"` + + // The name of the API operation tested on the indicated resource. + // + // EvalActionName is a required field + EvalActionName *string `min:"3" type:"string" required:"true"` + + // The result of the simulation. + // + // EvalDecision is a required field + EvalDecision *string `type:"string" required:"true" enum:"PolicyEvaluationDecisionType"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. See How IAM Roles Differ from Resource-based + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_compare-resource-policies.html) + EvalDecisionDetails map[string]*string `type:"map"` + + // The ARN of the resource that the indicated API operation was tested on. + EvalResourceName *string `min:"1" type:"string"` + + // A list of the statements in the input policies that determine the result + // for this scenario. Remember that even if multiple statements allow the operation + // on the resource, if only one statement denies that operation, then the explicit + // deny overrides any allow. In addition, the deny statement is the only entry + // included in the result. + MatchedStatements []*Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. This list is used + // when the resource in a simulation is "*", either explicitly, or when the + // ResourceArns parameter blank. If you include a list of resources, then any + // missing context values are instead included under the ResourceSpecificResults + // section. To discover the context keys used by a set of policies, you can + // call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy. + MissingContextValues []*string `type:"list"` + + // A structure that details how Organizations and its service control policies + // affect the results of the simulation. Only applies if the simulated user's + // account is part of an organization. + OrganizationsDecisionDetail *OrganizationsDecisionDetail `type:"structure"` + + // The individual results of the simulation of the API operation specified in + // EvalActionName on each resource. + ResourceSpecificResults []*ResourceSpecificResult `type:"list"` +} + +// String returns the string representation +func (s EvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EvaluationResult) GoString() string { + return s.String() +} + +// SetEvalActionName sets the EvalActionName field's value. +func (s *EvaluationResult) SetEvalActionName(v string) *EvaluationResult { + s.EvalActionName = &v + return s +} + +// SetEvalDecision sets the EvalDecision field's value. +func (s *EvaluationResult) SetEvalDecision(v string) *EvaluationResult { + s.EvalDecision = &v + return s +} + +// SetEvalDecisionDetails sets the EvalDecisionDetails field's value. +func (s *EvaluationResult) SetEvalDecisionDetails(v map[string]*string) *EvaluationResult { + s.EvalDecisionDetails = v + return s +} + +// SetEvalResourceName sets the EvalResourceName field's value. +func (s *EvaluationResult) SetEvalResourceName(v string) *EvaluationResult { + s.EvalResourceName = &v + return s +} + +// SetMatchedStatements sets the MatchedStatements field's value. +func (s *EvaluationResult) SetMatchedStatements(v []*Statement) *EvaluationResult { + s.MatchedStatements = v + return s +} + +// SetMissingContextValues sets the MissingContextValues field's value. +func (s *EvaluationResult) SetMissingContextValues(v []*string) *EvaluationResult { + s.MissingContextValues = v + return s +} + +// SetOrganizationsDecisionDetail sets the OrganizationsDecisionDetail field's value. +func (s *EvaluationResult) SetOrganizationsDecisionDetail(v *OrganizationsDecisionDetail) *EvaluationResult { + s.OrganizationsDecisionDetail = v + return s +} + +// SetResourceSpecificResults sets the ResourceSpecificResults field's value. +func (s *EvaluationResult) SetResourceSpecificResults(v []*ResourceSpecificResult) *EvaluationResult { + s.ResourceSpecificResults = v + return s +} + +type GenerateCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GenerateCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GenerateCredentialReport request. +type GenerateCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Information about the credential report. + Description *string `type:"string"` + + // Information about the state of the credential report. + State *string `type:"string" enum:"ReportStateType"` +} + +// String returns the string representation +func (s GenerateCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateCredentialReportOutput) GoString() string { + return s.String() +} + +// SetDescription sets the Description field's value. +func (s *GenerateCredentialReportOutput) SetDescription(v string) *GenerateCredentialReportOutput { + s.Description = &v + return s +} + +// SetState sets the State field's value. +func (s *GenerateCredentialReportOutput) SetState(v string) *GenerateCredentialReportOutput { + s.State = &v + return s +} + +type GenerateOrganizationsAccessReportInput struct { + _ struct{} `type:"structure"` + + // The path of the AWS Organizations entity (root, OU, or account). You can + // build an entity path using the known structure of your organization. For + // example, assume that your account ID is 123456789012 and its parent OU ID + // is ou-rge0-awsabcde. The organization root ID is r-f6g7h8i9j0example and + // your organization ID is o-a1b2c3d4e5. Your entity path is o-a1b2c3d4e5/r-f6g7h8i9j0example/ou-rge0-awsabcde/123456789012. + // + // EntityPath is a required field + EntityPath *string `min:"19" type:"string" required:"true"` + + // The identifier of the AWS Organizations service control policy (SCP). This + // parameter is optional. + // + // This ID is used to generate information about when an account principal that + // is limited by the SCP attempted to access an AWS service. + OrganizationsPolicyId *string `type:"string"` +} + +// String returns the string representation +func (s GenerateOrganizationsAccessReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateOrganizationsAccessReportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateOrganizationsAccessReportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GenerateOrganizationsAccessReportInput"} + if s.EntityPath == nil { + invalidParams.Add(request.NewErrParamRequired("EntityPath")) + } + if s.EntityPath != nil && len(*s.EntityPath) < 19 { + invalidParams.Add(request.NewErrParamMinLen("EntityPath", 19)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntityPath sets the EntityPath field's value. +func (s *GenerateOrganizationsAccessReportInput) SetEntityPath(v string) *GenerateOrganizationsAccessReportInput { + s.EntityPath = &v + return s +} + +// SetOrganizationsPolicyId sets the OrganizationsPolicyId field's value. +func (s *GenerateOrganizationsAccessReportInput) SetOrganizationsPolicyId(v string) *GenerateOrganizationsAccessReportInput { + s.OrganizationsPolicyId = &v + return s +} + +type GenerateOrganizationsAccessReportOutput struct { + _ struct{} `type:"structure"` + + // The job identifier that you can use in the GetOrganizationsAccessReport operation. + JobId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s GenerateOrganizationsAccessReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateOrganizationsAccessReportOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *GenerateOrganizationsAccessReportOutput) SetJobId(v string) *GenerateOrganizationsAccessReportOutput { + s.JobId = &v + return s +} + +type GenerateServiceLastAccessedDetailsInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM resource (user, group, role, or managed policy) used to + // generate information about when the resource was last used in an attempt + // to access an AWS service. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GenerateServiceLastAccessedDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateServiceLastAccessedDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateServiceLastAccessedDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GenerateServiceLastAccessedDetailsInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *GenerateServiceLastAccessedDetailsInput) SetArn(v string) *GenerateServiceLastAccessedDetailsInput { + s.Arn = &v + return s +} + +type GenerateServiceLastAccessedDetailsOutput struct { + _ struct{} `type:"structure"` + + // The job ID that you can use in the GetServiceLastAccessedDetails or GetServiceLastAccessedDetailsWithEntities + // operations. + JobId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s GenerateServiceLastAccessedDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GenerateServiceLastAccessedDetailsOutput) GoString() string { + return s.String() +} + +// SetJobId sets the JobId field's value. +func (s *GenerateServiceLastAccessedDetailsOutput) SetJobId(v string) *GenerateServiceLastAccessedDetailsOutput { + s.JobId = &v + return s +} + +type GetAccessKeyLastUsedInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyLastUsedInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyLastUsedInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyLastUsedInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *GetAccessKeyLastUsedInput) SetAccessKeyId(v string) *GetAccessKeyLastUsedInput { + s.AccessKeyId = &v + return s +} + +// Contains the response to a successful GetAccessKeyLastUsed request. It is +// also returned as a member of the AccessKeyMetaData structure returned by +// the ListAccessKeys action. +type GetAccessKeyLastUsedOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the last time the access key was used. + AccessKeyLastUsed *AccessKeyLastUsed `type:"structure"` + + // The name of the AWS IAM user that owns this access key. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccessKeyLastUsedOutput) GoString() string { + return s.String() +} + +// SetAccessKeyLastUsed sets the AccessKeyLastUsed field's value. +func (s *GetAccessKeyLastUsedOutput) SetAccessKeyLastUsed(v *AccessKeyLastUsed) *GetAccessKeyLastUsedOutput { + s.AccessKeyLastUsed = v + return s +} + +// SetUserName sets the UserName field's value. +func (s *GetAccessKeyLastUsedOutput) SetUserName(v string) *GetAccessKeyLastUsedOutput { + s.UserName = &v + return s +} + +type GetAccountAuthorizationDetailsInput struct { + _ struct{} `type:"structure"` + + // A list of entity types used to filter the results. Only the entities that + // match the types you specify are included in the output. Use the value LocalManagedPolicy + // to include customer managed policies. + // + // The format for this parameter is a comma-separated (if more than one) list + // of strings. Each string value in the list must be one of the valid values + // listed below. + Filter []*string `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountAuthorizationDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccountAuthorizationDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAccountAuthorizationDetailsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *GetAccountAuthorizationDetailsInput) SetFilter(v []*string) *GetAccountAuthorizationDetailsInput { + s.Filter = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetAccountAuthorizationDetailsInput) SetMarker(v string) *GetAccountAuthorizationDetailsInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *GetAccountAuthorizationDetailsInput) SetMaxItems(v int64) *GetAccountAuthorizationDetailsInput { + s.MaxItems = &v + return s +} + +// Contains the response to a successful GetAccountAuthorizationDetails request. +type GetAccountAuthorizationDetailsOutput struct { + _ struct{} `type:"structure"` + + // A list containing information about IAM groups. + GroupDetailList []*GroupDetail `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list containing information about managed policies. + Policies []*ManagedPolicyDetail `type:"list"` + + // A list containing information about IAM roles. + RoleDetailList []*RoleDetail `type:"list"` + + // A list containing information about IAM users. + UserDetailList []*UserDetail `type:"list"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountAuthorizationDetailsOutput) GoString() string { + return s.String() +} + +// SetGroupDetailList sets the GroupDetailList field's value. +func (s *GetAccountAuthorizationDetailsOutput) SetGroupDetailList(v []*GroupDetail) *GetAccountAuthorizationDetailsOutput { + s.GroupDetailList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetAccountAuthorizationDetailsOutput) SetIsTruncated(v bool) *GetAccountAuthorizationDetailsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetAccountAuthorizationDetailsOutput) SetMarker(v string) *GetAccountAuthorizationDetailsOutput { + s.Marker = &v + return s +} + +// SetPolicies sets the Policies field's value. +func (s *GetAccountAuthorizationDetailsOutput) SetPolicies(v []*ManagedPolicyDetail) *GetAccountAuthorizationDetailsOutput { + s.Policies = v + return s +} + +// SetRoleDetailList sets the RoleDetailList field's value. +func (s *GetAccountAuthorizationDetailsOutput) SetRoleDetailList(v []*RoleDetail) *GetAccountAuthorizationDetailsOutput { + s.RoleDetailList = v + return s +} + +// SetUserDetailList sets the UserDetailList field's value. +func (s *GetAccountAuthorizationDetailsOutput) SetUserDetailList(v []*UserDetail) *GetAccountAuthorizationDetailsOutput { + s.UserDetailList = v + return s +} + +type GetAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountPasswordPolicy request. +type GetAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains details about the account's password policy. + // + // PasswordPolicy is a required field + PasswordPolicy *PasswordPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +// SetPasswordPolicy sets the PasswordPolicy field's value. +func (s *GetAccountPasswordPolicyOutput) SetPasswordPolicy(v *PasswordPolicy) *GetAccountPasswordPolicyOutput { + s.PasswordPolicy = v + return s +} + +type GetAccountSummaryInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSummaryInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetAccountSummary request. +type GetAccountSummaryOutput struct { + _ struct{} `type:"structure"` + + // A set of key–value pairs containing information about IAM entity usage + // and IAM quotas. + SummaryMap map[string]*int64 `type:"map"` +} + +// String returns the string representation +func (s GetAccountSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAccountSummaryOutput) GoString() string { + return s.String() +} + +// SetSummaryMap sets the SummaryMap field's value. +func (s *GetAccountSummaryOutput) SetSummaryMap(v map[string]*int64) *GetAccountSummaryOutput { + s.SummaryMap = v + return s +} + +type GetContextKeysForCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of policies for which you want the list of context keys referenced + // in those policies. Each document is specified as a string containing the + // complete, valid JSON text of an IAM policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyInputList is a required field + PolicyInputList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForCustomPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetContextKeysForCustomPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetContextKeysForCustomPolicyInput"} + if s.PolicyInputList == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyInputList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyInputList sets the PolicyInputList field's value. +func (s *GetContextKeysForCustomPolicyInput) SetPolicyInputList(v []*string) *GetContextKeysForCustomPolicyInput { + s.PolicyInputList = v + return s +} + +// Contains the response to a successful GetContextKeysForPrincipalPolicy or +// GetContextKeysForCustomPolicy request. +type GetContextKeysForPolicyResponse struct { + _ struct{} `type:"structure"` + + // The list of context keys that are referenced in the input policies. + ContextKeyNames []*string `type:"list"` +} + +// String returns the string representation +func (s GetContextKeysForPolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForPolicyResponse) GoString() string { + return s.String() +} + +// SetContextKeyNames sets the ContextKeyNames field's value. +func (s *GetContextKeysForPolicyResponse) SetContextKeyNames(v []*string) *GetContextKeysForPolicyResponse { + s.ContextKeyNames = v + return s +} + +type GetContextKeysForPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // An optional list of additional policies for which you want the list of context + // keys that are referenced. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + PolicyInputList []*string `type:"list"` + + // The ARN of a user, group, or role whose policies contain the context keys + // that you want listed. If you specify a user, the list includes context keys + // that are found in all policies that are attached to the user. The list also + // includes all groups that the user is a member of. If you pick a group or + // a role, then it includes only those context keys that are found in policies + // attached to that entity. Note that all parameters are shown in unencoded + // form here for clarity, but must be URL encoded to be included as a part of + // a real HTML request. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicySourceArn is a required field + PolicySourceArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetContextKeysForPrincipalPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetContextKeysForPrincipalPolicyInput"} + if s.PolicySourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicySourceArn")) + } + if s.PolicySourceArn != nil && len(*s.PolicySourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicySourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyInputList sets the PolicyInputList field's value. +func (s *GetContextKeysForPrincipalPolicyInput) SetPolicyInputList(v []*string) *GetContextKeysForPrincipalPolicyInput { + s.PolicyInputList = v + return s +} + +// SetPolicySourceArn sets the PolicySourceArn field's value. +func (s *GetContextKeysForPrincipalPolicyInput) SetPolicySourceArn(v string) *GetContextKeysForPrincipalPolicyInput { + s.PolicySourceArn = &v + return s +} + +type GetCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialReportInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCredentialReport request. +type GetCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Contains the credential report. The report is Base64-encoded. + // + // Content is automatically base64 encoded/decoded by the SDK. + Content []byte `type:"blob"` + + // The date and time when the credential report was created, in ISO 8601 date-time + // format (http://www.iso.org/iso/iso8601). + GeneratedTime *time.Time `type:"timestamp"` + + // The format (MIME type) of the credential report. + ReportFormat *string `type:"string" enum:"ReportFormatType"` +} + +// String returns the string representation +func (s GetCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCredentialReportOutput) GoString() string { + return s.String() +} + +// SetContent sets the Content field's value. +func (s *GetCredentialReportOutput) SetContent(v []byte) *GetCredentialReportOutput { + s.Content = v + return s +} + +// SetGeneratedTime sets the GeneratedTime field's value. +func (s *GetCredentialReportOutput) SetGeneratedTime(v time.Time) *GetCredentialReportOutput { + s.GeneratedTime = &v + return s +} + +// SetReportFormat sets the ReportFormat field's value. +func (s *GetCredentialReportOutput) SetReportFormat(v string) *GetCredentialReportOutput { + s.ReportFormat = &v + return s +} + +type GetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *GetGroupInput) SetGroupName(v string) *GetGroupInput { + s.GroupName = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetGroupInput) SetMarker(v string) *GetGroupInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *GetGroupInput) SetMaxItems(v int64) *GetGroupInput { + s.MaxItems = &v + return s +} + +// Contains the response to a successful GetGroup request. +type GetGroupOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains details about the group. + // + // Group is a required field + Group *Group `type:"structure" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of users in the group. + // + // Users is a required field + Users []*User `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupOutput) GoString() string { + return s.String() +} + +// SetGroup sets the Group field's value. +func (s *GetGroupOutput) SetGroup(v *Group) *GetGroupOutput { + s.Group = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetGroupOutput) SetIsTruncated(v bool) *GetGroupOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetGroupOutput) SetMarker(v string) *GetGroupOutput { + s.Marker = &v + return s +} + +// SetUsers sets the Users field's value. +func (s *GetGroupOutput) SetUsers(v []*User) *GetGroupOutput { + s.Users = v + return s +} + +type GetGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group the policy is associated with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the policy document to get. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *GetGroupPolicyInput) SetGroupName(v string) *GetGroupPolicyInput { + s.GroupName = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *GetGroupPolicyInput) SetPolicyName(v string) *GetGroupPolicyInput { + s.PolicyName = &v + return s +} + +// Contains the response to a successful GetGroupPolicy request. +type GetGroupPolicyOutput struct { + _ struct{} `type:"structure"` + + // The group the policy is associated with. + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + // + // IAM stores policies in JSON format. However, resources that were created + // using AWS CloudFormation templates can be formatted in YAML. AWS CloudFormation + // always converts a YAML policy to JSON format before submitting it to IAM. + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetGroupPolicyOutput) GoString() string { + return s.String() +} + +// SetGroupName sets the GroupName field's value. +func (s *GetGroupPolicyOutput) SetGroupName(v string) *GetGroupPolicyOutput { + s.GroupName = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *GetGroupPolicyOutput) SetPolicyDocument(v string) *GetGroupPolicyOutput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *GetGroupPolicyOutput) SetPolicyName(v string) *GetGroupPolicyOutput { + s.PolicyName = &v + return s +} + +type GetInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to get information about. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceProfileName sets the InstanceProfileName field's value. +func (s *GetInstanceProfileInput) SetInstanceProfileName(v string) *GetInstanceProfileInput { + s.InstanceProfileName = &v + return s +} + +// Contains the response to a successful GetInstanceProfile request. +type GetInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the instance profile. + // + // InstanceProfile is a required field + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetInstanceProfileOutput) GoString() string { + return s.String() +} + +// SetInstanceProfile sets the InstanceProfile field's value. +func (s *GetInstanceProfileOutput) SetInstanceProfile(v *InstanceProfile) *GetInstanceProfileOutput { + s.InstanceProfile = v + return s +} + +type GetLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose login profile you want to retrieve. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoginProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLoginProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetLoginProfileInput"} + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserName sets the UserName field's value. +func (s *GetLoginProfileInput) SetUserName(v string) *GetLoginProfileInput { + s.UserName = &v + return s +} + +// Contains the response to a successful GetLoginProfile request. +type GetLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the user name and password create date for the user. + // + // LoginProfile is a required field + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetLoginProfileOutput) GoString() string { + return s.String() +} + +// SetLoginProfile sets the LoginProfile field's value. +func (s *GetLoginProfileOutput) SetLoginProfile(v *LoginProfile) *GetLoginProfileOutput { + s.LoginProfile = v + return s +} + +type GetOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM + // to get information for. You can get a list of OIDC provider resource ARNs + // by using the ListOpenIDConnectProviders operation. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOpenIDConnectProviderInput"} + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpenIDConnectProviderArn sets the OpenIDConnectProviderArn field's value. +func (s *GetOpenIDConnectProviderInput) SetOpenIDConnectProviderArn(v string) *GetOpenIDConnectProviderInput { + s.OpenIDConnectProviderArn = &v + return s +} + +// Contains the response to a successful GetOpenIDConnectProvider request. +type GetOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences) that are associated with the + // specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider. + ClientIDList []*string `type:"list"` + + // The date and time when the IAM OIDC provider resource object was created + // in the AWS account. + CreateDate *time.Time `type:"timestamp"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider. + ThumbprintList []*string `type:"list"` + + // The URL that the IAM OIDC provider resource object is associated with. For + // more information, see CreateOpenIDConnectProvider. + Url *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +// SetClientIDList sets the ClientIDList field's value. +func (s *GetOpenIDConnectProviderOutput) SetClientIDList(v []*string) *GetOpenIDConnectProviderOutput { + s.ClientIDList = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *GetOpenIDConnectProviderOutput) SetCreateDate(v time.Time) *GetOpenIDConnectProviderOutput { + s.CreateDate = &v + return s +} + +// SetThumbprintList sets the ThumbprintList field's value. +func (s *GetOpenIDConnectProviderOutput) SetThumbprintList(v []*string) *GetOpenIDConnectProviderOutput { + s.ThumbprintList = v + return s +} + +// SetUrl sets the Url field's value. +func (s *GetOpenIDConnectProviderOutput) SetUrl(v string) *GetOpenIDConnectProviderOutput { + s.Url = &v + return s +} + +type GetOrganizationsAccessReportInput struct { + _ struct{} `type:"structure"` + + // The identifier of the request generated by the GenerateOrganizationsAccessReport + // operation. + // + // JobId is a required field + JobId *string `min:"36" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The key that is used to sort the results. If you choose the namespace key, + // the results are returned in alphabetical order. If you choose the time key, + // the results are sorted numerically by the date and time. + SortKey *string `type:"string" enum:"sortKeyType"` +} + +// String returns the string representation +func (s GetOrganizationsAccessReportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOrganizationsAccessReportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOrganizationsAccessReportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetOrganizationsAccessReportInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 36)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *GetOrganizationsAccessReportInput) SetJobId(v string) *GetOrganizationsAccessReportInput { + s.JobId = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetOrganizationsAccessReportInput) SetMarker(v string) *GetOrganizationsAccessReportInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *GetOrganizationsAccessReportInput) SetMaxItems(v int64) *GetOrganizationsAccessReportInput { + s.MaxItems = &v + return s +} + +// SetSortKey sets the SortKey field's value. +func (s *GetOrganizationsAccessReportInput) SetSortKey(v string) *GetOrganizationsAccessReportInput { + s.SortKey = &v + return s +} + +type GetOrganizationsAccessReportOutput struct { + _ struct{} `type:"structure"` + + // An object that contains details about the most recent attempt to access the + // service. + AccessDetails []*AccessDetail `type:"list"` + + // Contains information about the reason that the operation failed. + // + // This data type is used as a response element in the GetOrganizationsAccessReport, + // GetServiceLastAccessedDetails, and GetServiceLastAccessedDetailsWithEntities + // operations. + ErrorDetails *ErrorDetails `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the generated report job was completed or failed. + // + // This field is null if the job is still in progress, as indicated by a job + // status value of IN_PROGRESS. + JobCompletionDate *time.Time `type:"timestamp"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the report job was created. + // + // JobCreationDate is a required field + JobCreationDate *time.Time `type:"timestamp" required:"true"` + + // The status of the job. + // + // JobStatus is a required field + JobStatus *string `type:"string" required:"true" enum:"jobStatusType"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `min:"1" type:"string"` + + // The number of services that the applicable SCPs allow account principals + // to access. + NumberOfServicesAccessible *int64 `type:"integer"` + + // The number of services that account principals are allowed but did not attempt + // to access. + NumberOfServicesNotAccessed *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetOrganizationsAccessReportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetOrganizationsAccessReportOutput) GoString() string { + return s.String() +} + +// SetAccessDetails sets the AccessDetails field's value. +func (s *GetOrganizationsAccessReportOutput) SetAccessDetails(v []*AccessDetail) *GetOrganizationsAccessReportOutput { + s.AccessDetails = v + return s +} + +// SetErrorDetails sets the ErrorDetails field's value. +func (s *GetOrganizationsAccessReportOutput) SetErrorDetails(v *ErrorDetails) *GetOrganizationsAccessReportOutput { + s.ErrorDetails = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetOrganizationsAccessReportOutput) SetIsTruncated(v bool) *GetOrganizationsAccessReportOutput { + s.IsTruncated = &v + return s +} + +// SetJobCompletionDate sets the JobCompletionDate field's value. +func (s *GetOrganizationsAccessReportOutput) SetJobCompletionDate(v time.Time) *GetOrganizationsAccessReportOutput { + s.JobCompletionDate = &v + return s +} + +// SetJobCreationDate sets the JobCreationDate field's value. +func (s *GetOrganizationsAccessReportOutput) SetJobCreationDate(v time.Time) *GetOrganizationsAccessReportOutput { + s.JobCreationDate = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *GetOrganizationsAccessReportOutput) SetJobStatus(v string) *GetOrganizationsAccessReportOutput { + s.JobStatus = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetOrganizationsAccessReportOutput) SetMarker(v string) *GetOrganizationsAccessReportOutput { + s.Marker = &v + return s +} + +// SetNumberOfServicesAccessible sets the NumberOfServicesAccessible field's value. +func (s *GetOrganizationsAccessReportOutput) SetNumberOfServicesAccessible(v int64) *GetOrganizationsAccessReportOutput { + s.NumberOfServicesAccessible = &v + return s +} + +// SetNumberOfServicesNotAccessed sets the NumberOfServicesNotAccessed field's value. +func (s *GetOrganizationsAccessReportOutput) SetNumberOfServicesNotAccessed(v int64) *GetOrganizationsAccessReportOutput { + s.NumberOfServicesNotAccessed = &v + return s +} + +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the managed policy that you want information + // about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPolicyInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *GetPolicyInput) SetPolicyArn(v string) *GetPolicyInput { + s.PolicyArn = &v + return s +} + +// Contains the response to a successful GetPolicy request. +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetPolicyOutput) SetPolicy(v *Policy) *GetPolicyOutput { + s.Policy = v + return s +} + +type GetPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the managed policy that you want information + // about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // Identifies the policy version to retrieve. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consists of the lowercase letter 'v' followed + // by one or two digits, and optionally followed by a period '.' and a string + // of letters and digits. + // + // VersionId is a required field + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPolicyVersionInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.VersionId == nil { + invalidParams.Add(request.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *GetPolicyVersionInput) SetPolicyArn(v string) *GetPolicyVersionInput { + s.PolicyArn = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetPolicyVersionInput) SetVersionId(v string) *GetPolicyVersionInput { + s.VersionId = &v + return s +} + +// Contains the response to a successful GetPolicyVersion request. +type GetPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the policy version. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPolicyVersionOutput) GoString() string { + return s.String() +} + +// SetPolicyVersion sets the PolicyVersion field's value. +func (s *GetPolicyVersionOutput) SetPolicyVersion(v *PolicyVersion) *GetPolicyVersionOutput { + s.PolicyVersion = v + return s +} + +type GetRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM role to get information about. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRoleInput) SetRoleName(v string) *GetRoleInput { + s.RoleName = &v + return s +} + +// Contains the response to a successful GetRole request. +type GetRoleOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the IAM role. + // + // Role is a required field + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRoleOutput) GoString() string { + return s.String() +} + +// SetRole sets the Role field's value. +func (s *GetRoleOutput) SetRole(v *Role) *GetRoleOutput { + s.Role = v + return s +} + +type GetRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role associated with the policy. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetRolePolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyName sets the PolicyName field's value. +func (s *GetRolePolicyInput) SetPolicyName(v string) *GetRolePolicyInput { + s.PolicyName = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRolePolicyInput) SetRoleName(v string) *GetRolePolicyInput { + s.RoleName = &v + return s +} + +// Contains the response to a successful GetRolePolicy request. +type GetRolePolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // IAM stores policies in JSON format. However, resources that were created + // using AWS CloudFormation templates can be formatted in YAML. AWS CloudFormation + // always converts a YAML policy to JSON format before submitting it to IAM. + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The role the policy is associated with. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetRolePolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *GetRolePolicyOutput) SetPolicyDocument(v string) *GetRolePolicyOutput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *GetRolePolicyOutput) SetPolicyName(v string) *GetRolePolicyOutput { + s.PolicyName = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *GetRolePolicyOutput) SetRoleName(v string) *GetRolePolicyOutput { + s.RoleName = &v + return s +} + +type GetSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider resource object in IAM + // to get information about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // SAMLProviderArn is a required field + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSAMLProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSAMLProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSAMLProviderInput"} + if s.SAMLProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSAMLProviderArn sets the SAMLProviderArn field's value. +func (s *GetSAMLProviderInput) SetSAMLProviderArn(v string) *GetSAMLProviderInput { + s.SAMLProviderArn = &v + return s +} + +// Contains the response to a successful GetSAMLProvider request. +type GetSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp"` + + // The XML metadata document that includes information about an identity provider. + SAMLMetadataDocument *string `min:"1000" type:"string"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s GetSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSAMLProviderOutput) GoString() string { + return s.String() +} + +// SetCreateDate sets the CreateDate field's value. +func (s *GetSAMLProviderOutput) SetCreateDate(v time.Time) *GetSAMLProviderOutput { + s.CreateDate = &v + return s +} + +// SetSAMLMetadataDocument sets the SAMLMetadataDocument field's value. +func (s *GetSAMLProviderOutput) SetSAMLMetadataDocument(v string) *GetSAMLProviderOutput { + s.SAMLMetadataDocument = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *GetSAMLProviderOutput) SetValidUntil(v time.Time) *GetSAMLProviderOutput { + s.ValidUntil = &v + return s +} + +type GetSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // Specifies the public key encoding format to use in the response. To retrieve + // the public key in ssh-rsa format, use SSH. To retrieve the public key in + // PEM format, use PEM. + // + // Encoding is a required field + Encoding *string `type:"string" required:"true" enum:"encodingType"` + + // The unique identifier for the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSSHPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSSHPublicKeyInput"} + if s.Encoding == nil { + invalidParams.Add(request.NewErrParamRequired("Encoding")) + } + if s.SSHPublicKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncoding sets the Encoding field's value. +func (s *GetSSHPublicKeyInput) SetEncoding(v string) *GetSSHPublicKeyInput { + s.Encoding = &v + return s +} + +// SetSSHPublicKeyId sets the SSHPublicKeyId field's value. +func (s *GetSSHPublicKeyInput) SetSSHPublicKeyId(v string) *GetSSHPublicKeyInput { + s.SSHPublicKeyId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *GetSSHPublicKeyInput) SetUserName(v string) *GetSSHPublicKeyInput { + s.UserName = &v + return s +} + +// Contains the response to a successful GetSSHPublicKey request. +type GetSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s GetSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSSHPublicKeyOutput) GoString() string { + return s.String() +} + +// SetSSHPublicKey sets the SSHPublicKey field's value. +func (s *GetSSHPublicKeyOutput) SetSSHPublicKey(v *SSHPublicKey) *GetSSHPublicKeyOutput { + s.SSHPublicKey = v + return s +} + +type GetServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to retrieve information about. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServerCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServerCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServerCertificateInput"} + if s.ServerCertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetServerCertificateName sets the ServerCertificateName field's value. +func (s *GetServerCertificateInput) SetServerCertificateName(v string) *GetServerCertificateInput { + s.ServerCertificateName = &v + return s +} + +// Contains the response to a successful GetServerCertificate request. +type GetServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the server certificate. + // + // ServerCertificate is a required field + ServerCertificate *ServerCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServerCertificateOutput) GoString() string { + return s.String() +} + +// SetServerCertificate sets the ServerCertificate field's value. +func (s *GetServerCertificateOutput) SetServerCertificate(v *ServerCertificate) *GetServerCertificateOutput { + s.ServerCertificate = v + return s +} + +type GetServiceLastAccessedDetailsInput struct { + _ struct{} `type:"structure"` + + // The ID of the request generated by the GenerateServiceLastAccessedDetails + // operation. + // + // JobId is a required field + JobId *string `min:"36" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetServiceLastAccessedDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceLastAccessedDetailsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceLastAccessedDetailsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServiceLastAccessedDetailsInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 36)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *GetServiceLastAccessedDetailsInput) SetJobId(v string) *GetServiceLastAccessedDetailsInput { + s.JobId = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetServiceLastAccessedDetailsInput) SetMarker(v string) *GetServiceLastAccessedDetailsInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *GetServiceLastAccessedDetailsInput) SetMaxItems(v int64) *GetServiceLastAccessedDetailsInput { + s.MaxItems = &v + return s +} + +type GetServiceLastAccessedDetailsOutput struct { + _ struct{} `type:"structure"` + + // An object that contains details about the reason the operation failed. + Error *ErrorDetails `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the generated report job was completed or failed. + // + // This field is null if the job is still in progress, as indicated by a job + // status value of IN_PROGRESS. + // + // JobCompletionDate is a required field + JobCompletionDate *time.Time `type:"timestamp" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the report job was created. + // + // JobCreationDate is a required field + JobCreationDate *time.Time `type:"timestamp" required:"true"` + + // The status of the job. + // + // JobStatus is a required field + JobStatus *string `type:"string" required:"true" enum:"jobStatusType"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A ServiceLastAccessed object that contains details about the most recent + // attempt to access the service. + // + // ServicesLastAccessed is a required field + ServicesLastAccessed []*ServiceLastAccessed `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetServiceLastAccessedDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceLastAccessedDetailsOutput) GoString() string { + return s.String() +} + +// SetError sets the Error field's value. +func (s *GetServiceLastAccessedDetailsOutput) SetError(v *ErrorDetails) *GetServiceLastAccessedDetailsOutput { + s.Error = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetServiceLastAccessedDetailsOutput) SetIsTruncated(v bool) *GetServiceLastAccessedDetailsOutput { + s.IsTruncated = &v + return s +} + +// SetJobCompletionDate sets the JobCompletionDate field's value. +func (s *GetServiceLastAccessedDetailsOutput) SetJobCompletionDate(v time.Time) *GetServiceLastAccessedDetailsOutput { + s.JobCompletionDate = &v + return s +} + +// SetJobCreationDate sets the JobCreationDate field's value. +func (s *GetServiceLastAccessedDetailsOutput) SetJobCreationDate(v time.Time) *GetServiceLastAccessedDetailsOutput { + s.JobCreationDate = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *GetServiceLastAccessedDetailsOutput) SetJobStatus(v string) *GetServiceLastAccessedDetailsOutput { + s.JobStatus = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetServiceLastAccessedDetailsOutput) SetMarker(v string) *GetServiceLastAccessedDetailsOutput { + s.Marker = &v + return s +} + +// SetServicesLastAccessed sets the ServicesLastAccessed field's value. +func (s *GetServiceLastAccessedDetailsOutput) SetServicesLastAccessed(v []*ServiceLastAccessed) *GetServiceLastAccessedDetailsOutput { + s.ServicesLastAccessed = v + return s +} + +type GetServiceLastAccessedDetailsWithEntitiesInput struct { + _ struct{} `type:"structure"` + + // The ID of the request generated by the GenerateServiceLastAccessedDetails + // operation. + // + // JobId is a required field + JobId *string `min:"36" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The service namespace for an AWS service. Provide the service namespace to + // learn when the IAM entity last attempted to access the specified service. + // + // To learn the service namespace for a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + // + // ServiceNamespace is a required field + ServiceNamespace *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServiceLastAccessedDetailsWithEntitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceLastAccessedDetailsWithEntitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceLastAccessedDetailsWithEntitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServiceLastAccessedDetailsWithEntitiesInput"} + if s.JobId == nil { + invalidParams.Add(request.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 36 { + invalidParams.Add(request.NewErrParamMinLen("JobId", 36)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + if s.ServiceNamespace != nil && len(*s.ServiceNamespace) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceNamespace", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetJobId sets the JobId field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesInput) SetJobId(v string) *GetServiceLastAccessedDetailsWithEntitiesInput { + s.JobId = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesInput) SetMarker(v string) *GetServiceLastAccessedDetailsWithEntitiesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesInput) SetMaxItems(v int64) *GetServiceLastAccessedDetailsWithEntitiesInput { + s.MaxItems = &v + return s +} + +// SetServiceNamespace sets the ServiceNamespace field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesInput) SetServiceNamespace(v string) *GetServiceLastAccessedDetailsWithEntitiesInput { + s.ServiceNamespace = &v + return s +} + +type GetServiceLastAccessedDetailsWithEntitiesOutput struct { + _ struct{} `type:"structure"` + + // An EntityDetailsList object that contains details about when an IAM entity + // (user or role) used group or policy permissions in an attempt to access the + // specified AWS service. + // + // EntityDetailsList is a required field + EntityDetailsList []*EntityDetails `type:"list" required:"true"` + + // An object that contains details about the reason the operation failed. + Error *ErrorDetails `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the generated report job was completed or failed. + // + // This field is null if the job is still in progress, as indicated by a job + // status value of IN_PROGRESS. + // + // JobCompletionDate is a required field + JobCompletionDate *time.Time `type:"timestamp" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the report job was created. + // + // JobCreationDate is a required field + JobCreationDate *time.Time `type:"timestamp" required:"true"` + + // The status of the job. + // + // JobStatus is a required field + JobStatus *string `type:"string" required:"true" enum:"jobStatusType"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s GetServiceLastAccessedDetailsWithEntitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceLastAccessedDetailsWithEntitiesOutput) GoString() string { + return s.String() +} + +// SetEntityDetailsList sets the EntityDetailsList field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesOutput) SetEntityDetailsList(v []*EntityDetails) *GetServiceLastAccessedDetailsWithEntitiesOutput { + s.EntityDetailsList = v + return s +} + +// SetError sets the Error field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesOutput) SetError(v *ErrorDetails) *GetServiceLastAccessedDetailsWithEntitiesOutput { + s.Error = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesOutput) SetIsTruncated(v bool) *GetServiceLastAccessedDetailsWithEntitiesOutput { + s.IsTruncated = &v + return s +} + +// SetJobCompletionDate sets the JobCompletionDate field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesOutput) SetJobCompletionDate(v time.Time) *GetServiceLastAccessedDetailsWithEntitiesOutput { + s.JobCompletionDate = &v + return s +} + +// SetJobCreationDate sets the JobCreationDate field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesOutput) SetJobCreationDate(v time.Time) *GetServiceLastAccessedDetailsWithEntitiesOutput { + s.JobCreationDate = &v + return s +} + +// SetJobStatus sets the JobStatus field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesOutput) SetJobStatus(v string) *GetServiceLastAccessedDetailsWithEntitiesOutput { + s.JobStatus = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *GetServiceLastAccessedDetailsWithEntitiesOutput) SetMarker(v string) *GetServiceLastAccessedDetailsWithEntitiesOutput { + s.Marker = &v + return s +} + +type GetServiceLinkedRoleDeletionStatusInput struct { + _ struct{} `type:"structure"` + + // The deletion task identifier. This identifier is returned by the DeleteServiceLinkedRole + // operation in the format task/aws-service-role///. + // + // DeletionTaskId is a required field + DeletionTaskId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServiceLinkedRoleDeletionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceLinkedRoleDeletionStatusInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceLinkedRoleDeletionStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetServiceLinkedRoleDeletionStatusInput"} + if s.DeletionTaskId == nil { + invalidParams.Add(request.NewErrParamRequired("DeletionTaskId")) + } + if s.DeletionTaskId != nil && len(*s.DeletionTaskId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("DeletionTaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeletionTaskId sets the DeletionTaskId field's value. +func (s *GetServiceLinkedRoleDeletionStatusInput) SetDeletionTaskId(v string) *GetServiceLinkedRoleDeletionStatusInput { + s.DeletionTaskId = &v + return s +} + +type GetServiceLinkedRoleDeletionStatusOutput struct { + _ struct{} `type:"structure"` + + // An object that contains details about the reason the deletion failed. + Reason *DeletionTaskFailureReasonType `type:"structure"` + + // The status of the deletion. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"DeletionTaskStatusType"` +} + +// String returns the string representation +func (s GetServiceLinkedRoleDeletionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetServiceLinkedRoleDeletionStatusOutput) GoString() string { + return s.String() +} + +// SetReason sets the Reason field's value. +func (s *GetServiceLinkedRoleDeletionStatusOutput) SetReason(v *DeletionTaskFailureReasonType) *GetServiceLinkedRoleDeletionStatusOutput { + s.Reason = v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetServiceLinkedRoleDeletionStatusOutput) SetStatus(v string) *GetServiceLinkedRoleDeletionStatusOutput { + s.Status = &v + return s +} + +type GetUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to get information about. + // + // This parameter is optional. If it is not included, it defaults to the user + // making the request. This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUserInput"} + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetUserName sets the UserName field's value. +func (s *GetUserInput) SetUserName(v string) *GetUserInput { + s.UserName = &v + return s +} + +// Contains the response to a successful GetUser request. +type GetUserOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the IAM user. + // + // Due to a service issue, password last used data does not include password + // use from May 3, 2018 22:50 PDT to May 23, 2018 14:08 PDT. This affects last + // sign-in (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html) + // dates shown in the IAM console and password last used dates in the IAM credential + // report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html), + // and returned by this GetUser API. If users signed in during the affected + // time, the password last used date that is returned is the date the user last + // signed in before May 3, 2018. For users that signed in after May 23, 2018 + // 14:08 PDT, the returned password last used date is accurate. + // + // You can use password last used information to identify unused credentials + // for deletion. For example, you might delete users who did not sign in to + // AWS in the last 90 days. In cases like this, we recommend that you adjust + // your evaluation window to include dates after May 23, 2018. Alternatively, + // if your users use access keys to access AWS programmatically you can refer + // to access key last used information because it is accurate for all dates. + // + // User is a required field + User *User `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserOutput) GoString() string { + return s.String() +} + +// SetUser sets the User field's value. +func (s *GetUserOutput) SetUser(v *User) *GetUserOutput { + s.User = v + return s +} + +type GetUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user who the policy is associated with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetUserPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyName sets the PolicyName field's value. +func (s *GetUserPolicyInput) SetPolicyName(v string) *GetUserPolicyInput { + s.PolicyName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *GetUserPolicyInput) SetUserName(v string) *GetUserPolicyInput { + s.UserName = &v + return s +} + +// Contains the response to a successful GetUserPolicy request. +type GetUserPolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // IAM stores policies in JSON format. However, resources that were created + // using AWS CloudFormation templates can be formatted in YAML. AWS CloudFormation + // always converts a YAML policy to JSON format before submitting it to IAM. + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The user the policy is associated with. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetUserPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *GetUserPolicyOutput) SetPolicyDocument(v string) *GetUserPolicyOutput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *GetUserPolicyOutput) SetPolicyName(v string) *GetUserPolicyOutput { + s.PolicyName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *GetUserPolicyOutput) SetUserName(v string) *GetUserPolicyOutput { + s.UserName = &v + return s +} + +// Contains information about an IAM group entity. +// +// This data type is used as a response element in the following operations: +// +// * CreateGroup +// +// * GetGroup +// +// * ListGroups +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the group. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" required:"true"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // GroupId is a required field + GroupId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the group. + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Group) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Group) SetArn(v string) *Group { + s.Arn = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *Group) SetCreateDate(v time.Time) *Group { + s.CreateDate = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *Group) SetGroupId(v string) *Group { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *Group) SetGroupName(v string) *Group { + s.GroupName = &v + return s +} + +// SetPath sets the Path field's value. +func (s *Group) SetPath(v string) *Group { + s.Path = &v + return s +} + +// Contains information about an IAM group, including all of the group's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +type GroupDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the group. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + CreateDate *time.Time `type:"timestamp"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + GroupId *string `min:"16" type:"string"` + + // The friendly name that identifies the group. + GroupName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the group. + GroupPolicyList []*PolicyDetail `type:"list"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GroupDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GroupDetail) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *GroupDetail) SetArn(v string) *GroupDetail { + s.Arn = &v + return s +} + +// SetAttachedManagedPolicies sets the AttachedManagedPolicies field's value. +func (s *GroupDetail) SetAttachedManagedPolicies(v []*AttachedPolicy) *GroupDetail { + s.AttachedManagedPolicies = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *GroupDetail) SetCreateDate(v time.Time) *GroupDetail { + s.CreateDate = &v + return s +} + +// SetGroupId sets the GroupId field's value. +func (s *GroupDetail) SetGroupId(v string) *GroupDetail { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *GroupDetail) SetGroupName(v string) *GroupDetail { + s.GroupName = &v + return s +} + +// SetGroupPolicyList sets the GroupPolicyList field's value. +func (s *GroupDetail) SetGroupPolicyList(v []*PolicyDetail) *GroupDetail { + s.GroupPolicyList = v + return s +} + +// SetPath sets the Path field's value. +func (s *GroupDetail) SetPath(v string) *GroupDetail { + s.Path = &v + return s +} + +// Contains information about an instance profile. +// +// This data type is used as a response element in the following operations: +// +// * CreateInstanceProfile +// +// * GetInstanceProfile +// +// * ListInstanceProfiles +// +// * ListInstanceProfilesForRole +type InstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the instance profile. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The date when the instance profile was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" required:"true"` + + // The stable and unique string identifying the instance profile. For more information + // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // InstanceProfileId is a required field + InstanceProfileId *string `min:"16" type:"string" required:"true"` + + // The name identifying the instance profile. + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // The role associated with the instance profile. + // + // Roles is a required field + Roles []*Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceProfile) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *InstanceProfile) SetArn(v string) *InstanceProfile { + s.Arn = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *InstanceProfile) SetCreateDate(v time.Time) *InstanceProfile { + s.CreateDate = &v + return s +} + +// SetInstanceProfileId sets the InstanceProfileId field's value. +func (s *InstanceProfile) SetInstanceProfileId(v string) *InstanceProfile { + s.InstanceProfileId = &v + return s +} + +// SetInstanceProfileName sets the InstanceProfileName field's value. +func (s *InstanceProfile) SetInstanceProfileName(v string) *InstanceProfile { + s.InstanceProfileName = &v + return s +} + +// SetPath sets the Path field's value. +func (s *InstanceProfile) SetPath(v string) *InstanceProfile { + s.Path = &v + return s +} + +// SetRoles sets the Roles field's value. +func (s *InstanceProfile) SetRoles(v []*Role) *InstanceProfile { + s.Roles = v + return s +} + +type ListAccessKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccessKeysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccessKeysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccessKeysInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListAccessKeysInput) SetMarker(v string) *ListAccessKeysInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListAccessKeysInput) SetMaxItems(v int64) *ListAccessKeysInput { + s.MaxItems = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListAccessKeysInput) SetUserName(v string) *ListAccessKeysInput { + s.UserName = &v + return s +} + +// Contains the response to a successful ListAccessKeys request. +type ListAccessKeysOutput struct { + _ struct{} `type:"structure"` + + // A list of objects containing metadata about the access keys. + // + // AccessKeyMetadata is a required field + AccessKeyMetadata []*AccessKeyMetadata `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccessKeysOutput) GoString() string { + return s.String() +} + +// SetAccessKeyMetadata sets the AccessKeyMetadata field's value. +func (s *ListAccessKeysOutput) SetAccessKeyMetadata(v []*AccessKeyMetadata) *ListAccessKeysOutput { + s.AccessKeyMetadata = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListAccessKeysOutput) SetIsTruncated(v bool) *ListAccessKeysOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListAccessKeysOutput) SetMarker(v string) *ListAccessKeysOutput { + s.Marker = &v + return s +} + +type ListAccountAliasesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListAccountAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountAliasesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountAliasesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAccountAliasesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListAccountAliasesInput) SetMarker(v string) *ListAccountAliasesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListAccountAliasesInput) SetMaxItems(v int64) *ListAccountAliasesInput { + s.MaxItems = &v + return s +} + +// Contains the response to a successful ListAccountAliases request. +type ListAccountAliasesOutput struct { + _ struct{} `type:"structure"` + + // A list of aliases associated with the account. AWS supports only one alias + // per account. + // + // AccountAliases is a required field + AccountAliases []*string `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAccountAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAccountAliasesOutput) GoString() string { + return s.String() +} + +// SetAccountAliases sets the AccountAliases field's value. +func (s *ListAccountAliasesOutput) SetAccountAliases(v []*string) *ListAccountAliasesOutput { + s.AccountAliases = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListAccountAliasesOutput) SetIsTruncated(v bool) *ListAccountAliasesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListAccountAliasesOutput) SetMarker(v string) *ListAccountAliasesOutput { + s.Marker = &v + return s +} + +type ListAttachedGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to list attached policies + // for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedGroupPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedGroupPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAttachedGroupPoliciesInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *ListAttachedGroupPoliciesInput) SetGroupName(v string) *ListAttachedGroupPoliciesInput { + s.GroupName = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListAttachedGroupPoliciesInput) SetMarker(v string) *ListAttachedGroupPoliciesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListAttachedGroupPoliciesInput) SetMaxItems(v int64) *ListAttachedGroupPoliciesInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListAttachedGroupPoliciesInput) SetPathPrefix(v string) *ListAttachedGroupPoliciesInput { + s.PathPrefix = &v + return s +} + +// Contains the response to a successful ListAttachedGroupPolicies request. +type ListAttachedGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedGroupPoliciesOutput) GoString() string { + return s.String() +} + +// SetAttachedPolicies sets the AttachedPolicies field's value. +func (s *ListAttachedGroupPoliciesOutput) SetAttachedPolicies(v []*AttachedPolicy) *ListAttachedGroupPoliciesOutput { + s.AttachedPolicies = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListAttachedGroupPoliciesOutput) SetIsTruncated(v bool) *ListAttachedGroupPoliciesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListAttachedGroupPoliciesOutput) SetMarker(v string) *ListAttachedGroupPoliciesOutput { + s.Marker = &v + return s +} + +type ListAttachedRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` + + // The name (friendly name, not ARN) of the role to list attached policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRolePoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedRolePoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAttachedRolePoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListAttachedRolePoliciesInput) SetMarker(v string) *ListAttachedRolePoliciesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListAttachedRolePoliciesInput) SetMaxItems(v int64) *ListAttachedRolePoliciesInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListAttachedRolePoliciesInput) SetPathPrefix(v string) *ListAttachedRolePoliciesInput { + s.PathPrefix = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ListAttachedRolePoliciesInput) SetRoleName(v string) *ListAttachedRolePoliciesInput { + s.RoleName = &v + return s +} + +// Contains the response to a successful ListAttachedRolePolicies request. +type ListAttachedRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedRolePoliciesOutput) GoString() string { + return s.String() +} + +// SetAttachedPolicies sets the AttachedPolicies field's value. +func (s *ListAttachedRolePoliciesOutput) SetAttachedPolicies(v []*AttachedPolicy) *ListAttachedRolePoliciesOutput { + s.AttachedPolicies = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListAttachedRolePoliciesOutput) SetIsTruncated(v bool) *ListAttachedRolePoliciesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListAttachedRolePoliciesOutput) SetMarker(v string) *ListAttachedRolePoliciesOutput { + s.Marker = &v + return s +} + +type ListAttachedUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` + + // The name (friendly name, not ARN) of the user to list attached policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedUserPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedUserPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListAttachedUserPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListAttachedUserPoliciesInput) SetMarker(v string) *ListAttachedUserPoliciesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListAttachedUserPoliciesInput) SetMaxItems(v int64) *ListAttachedUserPoliciesInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListAttachedUserPoliciesInput) SetPathPrefix(v string) *ListAttachedUserPoliciesInput { + s.PathPrefix = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListAttachedUserPoliciesInput) SetUserName(v string) *ListAttachedUserPoliciesInput { + s.UserName = &v + return s +} + +// Contains the response to a successful ListAttachedUserPolicies request. +type ListAttachedUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []*AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListAttachedUserPoliciesOutput) GoString() string { + return s.String() +} + +// SetAttachedPolicies sets the AttachedPolicies field's value. +func (s *ListAttachedUserPoliciesOutput) SetAttachedPolicies(v []*AttachedPolicy) *ListAttachedUserPoliciesOutput { + s.AttachedPolicies = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListAttachedUserPoliciesOutput) SetIsTruncated(v bool) *ListAttachedUserPoliciesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListAttachedUserPoliciesOutput) SetMarker(v string) *ListAttachedUserPoliciesOutput { + s.Marker = &v + return s +} + +type ListEntitiesForPolicyInput struct { + _ struct{} `type:"structure"` + + // The entity type to use for filtering the results. + // + // For example, when EntityFilter is Role, only the roles that are attached + // to the specified policy are returned. This parameter is optional. If it is + // not included, all attached entities (users, groups, and roles) are returned. + // The argument for this parameter must be one of the valid values listed below. + EntityFilter *string `type:"string" enum:"EntityType"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all entities. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM policy for which you want the versions. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy usage method to use for filtering the results. + // + // To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. + // To list only the policies used to set permissions boundaries, set the value + // to PermissionsBoundary. + // + // This parameter is optional. If it is not included, all policies are returned. + PolicyUsageFilter *string `type:"string" enum:"PolicyUsageType"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitiesForPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEntitiesForPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListEntitiesForPolicyInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEntityFilter sets the EntityFilter field's value. +func (s *ListEntitiesForPolicyInput) SetEntityFilter(v string) *ListEntitiesForPolicyInput { + s.EntityFilter = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListEntitiesForPolicyInput) SetMarker(v string) *ListEntitiesForPolicyInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListEntitiesForPolicyInput) SetMaxItems(v int64) *ListEntitiesForPolicyInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListEntitiesForPolicyInput) SetPathPrefix(v string) *ListEntitiesForPolicyInput { + s.PathPrefix = &v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *ListEntitiesForPolicyInput) SetPolicyArn(v string) *ListEntitiesForPolicyInput { + s.PolicyArn = &v + return s +} + +// SetPolicyUsageFilter sets the PolicyUsageFilter field's value. +func (s *ListEntitiesForPolicyInput) SetPolicyUsageFilter(v string) *ListEntitiesForPolicyInput { + s.PolicyUsageFilter = &v + return s +} + +// Contains the response to a successful ListEntitiesForPolicy request. +type ListEntitiesForPolicyOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of IAM groups that the policy is attached to. + PolicyGroups []*PolicyGroup `type:"list"` + + // A list of IAM roles that the policy is attached to. + PolicyRoles []*PolicyRole `type:"list"` + + // A list of IAM users that the policy is attached to. + PolicyUsers []*PolicyUser `type:"list"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListEntitiesForPolicyOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListEntitiesForPolicyOutput) SetIsTruncated(v bool) *ListEntitiesForPolicyOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListEntitiesForPolicyOutput) SetMarker(v string) *ListEntitiesForPolicyOutput { + s.Marker = &v + return s +} + +// SetPolicyGroups sets the PolicyGroups field's value. +func (s *ListEntitiesForPolicyOutput) SetPolicyGroups(v []*PolicyGroup) *ListEntitiesForPolicyOutput { + s.PolicyGroups = v + return s +} + +// SetPolicyRoles sets the PolicyRoles field's value. +func (s *ListEntitiesForPolicyOutput) SetPolicyRoles(v []*PolicyRole) *ListEntitiesForPolicyOutput { + s.PolicyRoles = v + return s +} + +// SetPolicyUsers sets the PolicyUsers field's value. +func (s *ListEntitiesForPolicyOutput) SetPolicyUsers(v []*PolicyUser) *ListEntitiesForPolicyOutput { + s.PolicyUsers = v + return s +} + +type ListGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the group to list policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGroupPoliciesInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *ListGroupPoliciesInput) SetGroupName(v string) *ListGroupPoliciesInput { + s.GroupName = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListGroupPoliciesInput) SetMarker(v string) *ListGroupPoliciesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListGroupPoliciesInput) SetMaxItems(v int64) *ListGroupPoliciesInput { + s.MaxItems = &v + return s +} + +// Contains the response to a successful ListGroupPolicies request. +type ListGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policy names. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyNames is a required field + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupPoliciesOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListGroupPoliciesOutput) SetIsTruncated(v bool) *ListGroupPoliciesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListGroupPoliciesOutput) SetMarker(v string) *ListGroupPoliciesOutput { + s.Marker = &v + return s +} + +// SetPolicyNames sets the PolicyNames field's value. +func (s *ListGroupPoliciesOutput) SetPolicyNames(v []*string) *ListGroupPoliciesOutput { + s.PolicyNames = v + return s +} + +type ListGroupsForUserInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list groups for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListGroupsForUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsForUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupsForUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGroupsForUserInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListGroupsForUserInput) SetMarker(v string) *ListGroupsForUserInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListGroupsForUserInput) SetMaxItems(v int64) *ListGroupsForUserInput { + s.MaxItems = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListGroupsForUserInput) SetUserName(v string) *ListGroupsForUserInput { + s.UserName = &v + return s +} + +// Contains the response to a successful ListGroupsForUser request. +type ListGroupsForUserOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + // + // Groups is a required field + Groups []*Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListGroupsForUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsForUserOutput) GoString() string { + return s.String() +} + +// SetGroups sets the Groups field's value. +func (s *ListGroupsForUserOutput) SetGroups(v []*Group) *ListGroupsForUserOutput { + s.Groups = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListGroupsForUserOutput) SetIsTruncated(v bool) *ListGroupsForUserOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListGroupsForUserOutput) SetMarker(v string) *ListGroupsForUserOutput { + s.Marker = &v + return s +} + +type ListGroupsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ + // gets all groups whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all groups. This parameter allows (through its regex pattern + // (http://wikipedia.org/wiki/regex)) a string of characters consisting of either + // a forward slash (/) by itself or a string that must begin and end with forward + // slashes. In addition, it can contain any ASCII character from the ! (\u0021) + // through the DEL character (\u007F), including most punctuation characters, + // digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListGroupsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListGroupsInput) SetMarker(v string) *ListGroupsInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListGroupsInput) SetMaxItems(v int64) *ListGroupsInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListGroupsInput) SetPathPrefix(v string) *ListGroupsInput { + s.PathPrefix = &v + return s +} + +// Contains the response to a successful ListGroups request. +type ListGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + // + // Groups is a required field + Groups []*Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListGroupsOutput) GoString() string { + return s.String() +} + +// SetGroups sets the Groups field's value. +func (s *ListGroupsOutput) SetGroups(v []*Group) *ListGroupsOutput { + s.Groups = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListGroupsOutput) SetIsTruncated(v bool) *ListGroupsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListGroupsOutput) SetMarker(v string) *ListGroupsOutput { + s.Marker = &v + return s +} + +type ListInstanceProfilesForRoleInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list instance profiles for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesForRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstanceProfilesForRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInstanceProfilesForRoleInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListInstanceProfilesForRoleInput) SetMarker(v string) *ListInstanceProfilesForRoleInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListInstanceProfilesForRoleInput) SetMaxItems(v int64) *ListInstanceProfilesForRoleInput { + s.MaxItems = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ListInstanceProfilesForRoleInput) SetRoleName(v string) *ListInstanceProfilesForRoleInput { + s.RoleName = &v + return s +} + +// Contains the response to a successful ListInstanceProfilesForRole request. +type ListInstanceProfilesForRoleOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + // + // InstanceProfiles is a required field + InstanceProfiles []*InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesForRoleOutput) GoString() string { + return s.String() +} + +// SetInstanceProfiles sets the InstanceProfiles field's value. +func (s *ListInstanceProfilesForRoleOutput) SetInstanceProfiles(v []*InstanceProfile) *ListInstanceProfilesForRoleOutput { + s.InstanceProfiles = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListInstanceProfilesForRoleOutput) SetIsTruncated(v bool) *ListInstanceProfilesForRoleOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListInstanceProfilesForRoleOutput) SetMarker(v string) *ListInstanceProfilesForRoleOutput { + s.Marker = &v + return s +} + +type ListInstanceProfilesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all instance profiles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all instance profiles. This parameter allows (through its regex + // pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting + // of either a forward slash (/) by itself or a string that must begin and end + // with forward slashes. In addition, it can contain any ASCII character from + // the ! (\u0021) through the DEL character (\u007F), including most punctuation + // characters, digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstanceProfilesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInstanceProfilesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListInstanceProfilesInput) SetMarker(v string) *ListInstanceProfilesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListInstanceProfilesInput) SetMaxItems(v int64) *ListInstanceProfilesInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListInstanceProfilesInput) SetPathPrefix(v string) *ListInstanceProfilesInput { + s.PathPrefix = &v + return s +} + +// Contains the response to a successful ListInstanceProfiles request. +type ListInstanceProfilesOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + // + // InstanceProfiles is a required field + InstanceProfiles []*InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListInstanceProfilesOutput) GoString() string { + return s.String() +} + +// SetInstanceProfiles sets the InstanceProfiles field's value. +func (s *ListInstanceProfilesOutput) SetInstanceProfiles(v []*InstanceProfile) *ListInstanceProfilesOutput { + s.InstanceProfiles = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListInstanceProfilesOutput) SetIsTruncated(v bool) *ListInstanceProfilesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListInstanceProfilesOutput) SetMarker(v string) *ListInstanceProfilesOutput { + s.Marker = &v + return s +} + +type ListMFADevicesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user whose MFA devices you want to list. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMFADevicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMFADevicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMFADevicesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListMFADevicesInput) SetMarker(v string) *ListMFADevicesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListMFADevicesInput) SetMaxItems(v int64) *ListMFADevicesInput { + s.MaxItems = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListMFADevicesInput) SetUserName(v string) *ListMFADevicesInput { + s.UserName = &v + return s +} + +// Contains the response to a successful ListMFADevices request. +type ListMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // A list of MFA devices. + // + // MFADevices is a required field + MFADevices []*MFADevice `type:"list" required:"true"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMFADevicesOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMFADevicesOutput) SetIsTruncated(v bool) *ListMFADevicesOutput { + s.IsTruncated = &v + return s +} + +// SetMFADevices sets the MFADevices field's value. +func (s *ListMFADevicesOutput) SetMFADevices(v []*MFADevice) *ListMFADevicesOutput { + s.MFADevices = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListMFADevicesOutput) SetMarker(v string) *ListMFADevicesOutput { + s.Marker = &v + return s +} + +type ListOpenIDConnectProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenIDConnectProvidersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListOpenIDConnectProviders request. +type ListOpenIDConnectProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of IAM OIDC provider resource objects defined in the AWS account. + OpenIDConnectProviderList []*OpenIDConnectProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListOpenIDConnectProvidersOutput) GoString() string { + return s.String() +} + +// SetOpenIDConnectProviderList sets the OpenIDConnectProviderList field's value. +func (s *ListOpenIDConnectProvidersOutput) SetOpenIDConnectProviderList(v []*OpenIDConnectProviderListEntry) *ListOpenIDConnectProvidersOutput { + s.OpenIDConnectProviderList = v + return s +} + +// Contains details about the permissions policies that are attached to the +// specified identity (user, group, or role). +// +// This data type is used as a response element in the ListPoliciesGrantingServiceAccess +// operation. +type ListPoliciesGrantingServiceAccessEntry struct { + _ struct{} `type:"structure"` + + // The PoliciesGrantingServiceAccess object that contains details about the + // policy. + Policies []*PolicyGrantingServiceAccess `type:"list"` + + // The namespace of the service that was accessed. + // + // To learn the service namespace of a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + ServiceNamespace *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListPoliciesGrantingServiceAccessEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesGrantingServiceAccessEntry) GoString() string { + return s.String() +} + +// SetPolicies sets the Policies field's value. +func (s *ListPoliciesGrantingServiceAccessEntry) SetPolicies(v []*PolicyGrantingServiceAccess) *ListPoliciesGrantingServiceAccessEntry { + s.Policies = v + return s +} + +// SetServiceNamespace sets the ServiceNamespace field's value. +func (s *ListPoliciesGrantingServiceAccessEntry) SetServiceNamespace(v string) *ListPoliciesGrantingServiceAccessEntry { + s.ServiceNamespace = &v + return s +} + +type ListPoliciesGrantingServiceAccessInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM identity (user, group, or role) whose policies you want + // to list. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // The service namespace for the AWS services whose policies you want to list. + // + // To learn the service namespace for a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + // + // ServiceNamespaces is a required field + ServiceNamespaces []*string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListPoliciesGrantingServiceAccessInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesGrantingServiceAccessInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPoliciesGrantingServiceAccessInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPoliciesGrantingServiceAccessInput"} + if s.Arn == nil { + invalidParams.Add(request.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.ServiceNamespaces == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespaces")) + } + if s.ServiceNamespaces != nil && len(s.ServiceNamespaces) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServiceNamespaces", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *ListPoliciesGrantingServiceAccessInput) SetArn(v string) *ListPoliciesGrantingServiceAccessInput { + s.Arn = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListPoliciesGrantingServiceAccessInput) SetMarker(v string) *ListPoliciesGrantingServiceAccessInput { + s.Marker = &v + return s +} + +// SetServiceNamespaces sets the ServiceNamespaces field's value. +func (s *ListPoliciesGrantingServiceAccessInput) SetServiceNamespaces(v []*string) *ListPoliciesGrantingServiceAccessInput { + s.ServiceNamespaces = v + return s +} + +type ListPoliciesGrantingServiceAccessOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. We recommend that you check IsTruncated + // after every call to ensure that you receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A ListPoliciesGrantingServiceAccess object that contains details about the + // permissions policies attached to the specified identity (user, group, or + // role). + // + // PoliciesGrantingServiceAccess is a required field + PoliciesGrantingServiceAccess []*ListPoliciesGrantingServiceAccessEntry `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListPoliciesGrantingServiceAccessOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesGrantingServiceAccessOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPoliciesGrantingServiceAccessOutput) SetIsTruncated(v bool) *ListPoliciesGrantingServiceAccessOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListPoliciesGrantingServiceAccessOutput) SetMarker(v string) *ListPoliciesGrantingServiceAccessOutput { + s.Marker = &v + return s +} + +// SetPoliciesGrantingServiceAccess sets the PoliciesGrantingServiceAccess field's value. +func (s *ListPoliciesGrantingServiceAccessOutput) SetPoliciesGrantingServiceAccess(v []*ListPoliciesGrantingServiceAccessEntry) *ListPoliciesGrantingServiceAccessOutput { + s.PoliciesGrantingServiceAccess = v + return s +} + +type ListPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A flag to filter the results to only the attached policies. + // + // When OnlyAttached is true, the returned list contains only the policies that + // are attached to an IAM user, group, or role. When OnlyAttached is false, + // or when the parameter is not included, all policies are returned. + OnlyAttached *bool `type:"boolean"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. This + // parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` + + // The policy usage method to use for filtering the results. + // + // To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. + // To list only the policies used to set permissions boundaries, set the value + // to PermissionsBoundary. + // + // This parameter is optional. If it is not included, all policies are returned. + PolicyUsageFilter *string `type:"string" enum:"PolicyUsageType"` + + // The scope to use for filtering the results. + // + // To list only AWS managed policies, set Scope to AWS. To list only the customer + // managed policies in your AWS account, set Scope to Local. + // + // This parameter is optional. If it is not included, or if it is set to All, + // all policies are returned. + Scope *string `type:"string" enum:"policyScopeType"` +} + +// String returns the string representation +func (s ListPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListPoliciesInput) SetMarker(v string) *ListPoliciesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListPoliciesInput) SetMaxItems(v int64) *ListPoliciesInput { + s.MaxItems = &v + return s +} + +// SetOnlyAttached sets the OnlyAttached field's value. +func (s *ListPoliciesInput) SetOnlyAttached(v bool) *ListPoliciesInput { + s.OnlyAttached = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListPoliciesInput) SetPathPrefix(v string) *ListPoliciesInput { + s.PathPrefix = &v + return s +} + +// SetPolicyUsageFilter sets the PolicyUsageFilter field's value. +func (s *ListPoliciesInput) SetPolicyUsageFilter(v string) *ListPoliciesInput { + s.PolicyUsageFilter = &v + return s +} + +// SetScope sets the Scope field's value. +func (s *ListPoliciesInput) SetScope(v string) *ListPoliciesInput { + s.Scope = &v + return s +} + +// Contains the response to a successful ListPolicies request. +type ListPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policies. + Policies []*Policy `type:"list"` +} + +// String returns the string representation +func (s ListPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPoliciesOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPoliciesOutput) SetIsTruncated(v bool) *ListPoliciesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListPoliciesOutput) SetMarker(v string) *ListPoliciesOutput { + s.Marker = &v + return s +} + +// SetPolicies sets the Policies field's value. +func (s *ListPoliciesOutput) SetPolicies(v []*Policy) *ListPoliciesOutput { + s.Policies = v + return s +} + +type ListPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM policy for which you want the versions. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPolicyVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPolicyVersionsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListPolicyVersionsInput) SetMarker(v string) *ListPolicyVersionsInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListPolicyVersionsInput) SetMaxItems(v int64) *ListPolicyVersionsInput { + s.MaxItems = &v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *ListPolicyVersionsInput) SetPolicyArn(v string) *ListPolicyVersionsInput { + s.PolicyArn = &v + return s +} + +// Contains the response to a successful ListPolicyVersions request. +type ListPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policy versions. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + Versions []*PolicyVersion `type:"list"` +} + +// String returns the string representation +func (s ListPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPolicyVersionsOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPolicyVersionsOutput) SetIsTruncated(v bool) *ListPolicyVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListPolicyVersionsOutput) SetMarker(v string) *ListPolicyVersionsOutput { + s.Marker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListPolicyVersionsOutput) SetVersions(v []*PolicyVersion) *ListPolicyVersionsOutput { + s.Versions = v + return s +} + +type ListRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolePoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRolePoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRolePoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListRolePoliciesInput) SetMarker(v string) *ListRolePoliciesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListRolePoliciesInput) SetMaxItems(v int64) *ListRolePoliciesInput { + s.MaxItems = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ListRolePoliciesInput) SetRoleName(v string) *ListRolePoliciesInput { + s.RoleName = &v + return s +} + +// Contains the response to a successful ListRolePolicies request. +type ListRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policy names. + // + // PolicyNames is a required field + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolePoliciesOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListRolePoliciesOutput) SetIsTruncated(v bool) *ListRolePoliciesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListRolePoliciesOutput) SetMarker(v string) *ListRolePoliciesOutput { + s.Marker = &v + return s +} + +// SetPolicyNames sets the PolicyNames field's value. +func (s *ListRolePoliciesOutput) SetPolicyNames(v []*string) *ListRolePoliciesOutput { + s.PolicyNames = v + return s +} + +type ListRoleTagsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // (Optional) Use this only when paginating results to indicate the maximum + // number of items that you want in the response. If additional items exist + // beyond the maximum that you specify, the IsTruncated response element is + // true. + // + // If you do not include this parameter, it defaults to 100. Note that IAM might + // return fewer results, even when more results are available. In that case, + // the IsTruncated response element returns true, and Marker contains a value + // to include in the subsequent call that tells the service where to continue + // from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM role for which you want to see the list of tags. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRoleTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRoleTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRoleTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRoleTagsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListRoleTagsInput) SetMarker(v string) *ListRoleTagsInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListRoleTagsInput) SetMaxItems(v int64) *ListRoleTagsInput { + s.MaxItems = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *ListRoleTagsInput) SetRoleName(v string) *ListRoleTagsInput { + s.RoleName = &v + return s +} + +type ListRoleTagsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can use the Marker request parameter to make a subsequent + // pagination request that retrieves more items. Note that IAM might return + // fewer than the MaxItems number of results even when more results are available. + // Check IsTruncated after every call to ensure that you receive all of your + // results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // The list of tags currently that is attached to the role. Each tag consists + // of a key name and an associated value. If no tags are attached to the specified + // role, the response contains an empty list. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRoleTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRoleTagsOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListRoleTagsOutput) SetIsTruncated(v bool) *ListRoleTagsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListRoleTagsOutput) SetMarker(v string) *ListRoleTagsOutput { + s.Marker = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListRoleTagsOutput) SetTags(v []*Tag) *ListRoleTagsOutput { + s.Tags = v + return s +} + +type ListRolesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all roles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all roles. This parameter allows (through its regex pattern + // (http://wikipedia.org/wiki/regex)) a string of characters consisting of either + // a forward slash (/) by itself or a string that must begin and end with forward + // slashes. In addition, it can contain any ASCII character from the ! (\u0021) + // through the DEL character (\u007F), including most punctuation characters, + // digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRolesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRolesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListRolesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListRolesInput) SetMarker(v string) *ListRolesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListRolesInput) SetMaxItems(v int64) *ListRolesInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListRolesInput) SetPathPrefix(v string) *ListRolesInput { + s.PathPrefix = &v + return s +} + +// Contains the response to a successful ListRoles request. +type ListRolesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of roles. + // + // Roles is a required field + Roles []*Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListRolesOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListRolesOutput) SetIsTruncated(v bool) *ListRolesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListRolesOutput) SetMarker(v string) *ListRolesOutput { + s.Marker = &v + return s +} + +// SetRoles sets the Roles field's value. +func (s *ListRolesOutput) SetRoles(v []*Role) *ListRolesOutput { + s.Roles = v + return s +} + +type ListSAMLProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListSAMLProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSAMLProvidersInput) GoString() string { + return s.String() +} + +// Contains the response to a successful ListSAMLProviders request. +type ListSAMLProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of SAML provider resource objects defined in IAM for this AWS account. + SAMLProviderList []*SAMLProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListSAMLProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSAMLProvidersOutput) GoString() string { + return s.String() +} + +// SetSAMLProviderList sets the SAMLProviderList field's value. +func (s *ListSAMLProvidersOutput) SetSAMLProviderList(v []*SAMLProviderListEntry) *ListSAMLProvidersOutput { + s.SAMLProviderList = v + return s +} + +type ListSSHPublicKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user to list SSH public keys for. If none is specified, + // the UserName field is determined implicitly based on the AWS access key used + // to sign the request. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSSHPublicKeysInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSSHPublicKeysInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSSHPublicKeysInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSSHPublicKeysInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListSSHPublicKeysInput) SetMarker(v string) *ListSSHPublicKeysInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListSSHPublicKeysInput) SetMaxItems(v int64) *ListSSHPublicKeysInput { + s.MaxItems = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListSSHPublicKeysInput) SetUserName(v string) *ListSSHPublicKeysInput { + s.UserName = &v + return s +} + +// Contains the response to a successful ListSSHPublicKeys request. +type ListSSHPublicKeysOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of the SSH public keys assigned to IAM user. + SSHPublicKeys []*SSHPublicKeyMetadata `type:"list"` +} + +// String returns the string representation +func (s ListSSHPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSSHPublicKeysOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListSSHPublicKeysOutput) SetIsTruncated(v bool) *ListSSHPublicKeysOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListSSHPublicKeysOutput) SetMarker(v string) *ListSSHPublicKeysOutput { + s.Marker = &v + return s +} + +// SetSSHPublicKeys sets the SSHPublicKeys field's value. +func (s *ListSSHPublicKeysOutput) SetSSHPublicKeys(v []*SSHPublicKeyMetadata) *ListSSHPublicKeysOutput { + s.SSHPublicKeys = v + return s +} + +type ListServerCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /company/servercerts + // would get all server certificates for which the path starts with /company/servercerts. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all server certificates. This parameter allows (through its + // regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting + // of either a forward slash (/) by itself or a string that must begin and end + // with forward slashes. In addition, it can contain any ASCII character from + // the ! (\u0021) through the DEL character (\u007F), including most punctuation + // characters, digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListServerCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServerCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListServerCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListServerCertificatesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListServerCertificatesInput) SetMarker(v string) *ListServerCertificatesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListServerCertificatesInput) SetMaxItems(v int64) *ListServerCertificatesInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListServerCertificatesInput) SetPathPrefix(v string) *ListServerCertificatesInput { + s.PathPrefix = &v + return s +} + +// Contains the response to a successful ListServerCertificates request. +type ListServerCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of server certificates. + // + // ServerCertificateMetadataList is a required field + ServerCertificateMetadataList []*ServerCertificateMetadata `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListServerCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServerCertificatesOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListServerCertificatesOutput) SetIsTruncated(v bool) *ListServerCertificatesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListServerCertificatesOutput) SetMarker(v string) *ListServerCertificatesOutput { + s.Marker = &v + return s +} + +// SetServerCertificateMetadataList sets the ServerCertificateMetadataList field's value. +func (s *ListServerCertificatesOutput) SetServerCertificateMetadataList(v []*ServerCertificateMetadata) *ListServerCertificatesOutput { + s.ServerCertificateMetadataList = v + return s +} + +type ListServiceSpecificCredentialsInput struct { + _ struct{} `type:"structure"` + + // Filters the returned results to only those for the specified AWS service. + // If not specified, then AWS returns service-specific credentials for all services. + ServiceName *string `type:"string"` + + // The name of the user whose service-specific credentials you want information + // about. If this value is not specified, then the operation assumes the user + // whose credentials are used to call the operation. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListServiceSpecificCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServiceSpecificCredentialsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListServiceSpecificCredentialsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListServiceSpecificCredentialsInput"} + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetServiceName sets the ServiceName field's value. +func (s *ListServiceSpecificCredentialsInput) SetServiceName(v string) *ListServiceSpecificCredentialsInput { + s.ServiceName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListServiceSpecificCredentialsInput) SetUserName(v string) *ListServiceSpecificCredentialsInput { + s.UserName = &v + return s +} + +type ListServiceSpecificCredentialsOutput struct { + _ struct{} `type:"structure"` + + // A list of structures that each contain details about a service-specific credential. + ServiceSpecificCredentials []*ServiceSpecificCredentialMetadata `type:"list"` +} + +// String returns the string representation +func (s ListServiceSpecificCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListServiceSpecificCredentialsOutput) GoString() string { + return s.String() +} + +// SetServiceSpecificCredentials sets the ServiceSpecificCredentials field's value. +func (s *ListServiceSpecificCredentialsOutput) SetServiceSpecificCredentials(v []*ServiceSpecificCredentialMetadata) *ListServiceSpecificCredentialsOutput { + s.ServiceSpecificCredentials = v + return s +} + +type ListSigningCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user whose signing certificates you want to examine. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSigningCertificatesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSigningCertificatesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListSigningCertificatesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListSigningCertificatesInput) SetMarker(v string) *ListSigningCertificatesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListSigningCertificatesInput) SetMaxItems(v int64) *ListSigningCertificatesInput { + s.MaxItems = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListSigningCertificatesInput) SetUserName(v string) *ListSigningCertificatesInput { + s.UserName = &v + return s +} + +// Contains the response to a successful ListSigningCertificates request. +type ListSigningCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the user's signing certificate information. + // + // Certificates is a required field + Certificates []*SigningCertificate `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListSigningCertificatesOutput) GoString() string { + return s.String() +} + +// SetCertificates sets the Certificates field's value. +func (s *ListSigningCertificatesOutput) SetCertificates(v []*SigningCertificate) *ListSigningCertificatesOutput { + s.Certificates = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListSigningCertificatesOutput) SetIsTruncated(v bool) *ListSigningCertificatesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListSigningCertificatesOutput) SetMarker(v string) *ListSigningCertificatesOutput { + s.Marker = &v + return s +} + +type ListUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUserPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUserPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListUserPoliciesInput) SetMarker(v string) *ListUserPoliciesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListUserPoliciesInput) SetMaxItems(v int64) *ListUserPoliciesInput { + s.MaxItems = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListUserPoliciesInput) SetUserName(v string) *ListUserPoliciesInput { + s.UserName = &v + return s +} + +// Contains the response to a successful ListUserPolicies request. +type ListUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policy names. + // + // PolicyNames is a required field + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserPoliciesOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListUserPoliciesOutput) SetIsTruncated(v bool) *ListUserPoliciesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListUserPoliciesOutput) SetMarker(v string) *ListUserPoliciesOutput { + s.Marker = &v + return s +} + +// SetPolicyNames sets the PolicyNames field's value. +func (s *ListUserPoliciesOutput) SetPolicyNames(v []*string) *ListUserPoliciesOutput { + s.PolicyNames = v + return s +} + +type ListUserTagsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // (Optional) Use this only when paginating results to indicate the maximum + // number of items that you want in the response. If additional items exist + // beyond the maximum that you specify, the IsTruncated response element is + // true. + // + // If you do not include this parameter, it defaults to 100. Note that IAM might + // return fewer results, even when more results are available. In that case, + // the IsTruncated response element returns true, and Marker contains a value + // to include in the subsequent call that tells the service where to continue + // from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user whose tags you want to see. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListUserTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUserTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUserTagsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListUserTagsInput) SetMarker(v string) *ListUserTagsInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListUserTagsInput) SetMaxItems(v int64) *ListUserTagsInput { + s.MaxItems = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ListUserTagsInput) SetUserName(v string) *ListUserTagsInput { + s.UserName = &v + return s +} + +type ListUserTagsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can use the Marker request parameter to make a subsequent + // pagination request that retrieves more items. Note that IAM might return + // fewer than the MaxItems number of results even when more results are available. + // Check IsTruncated after every call to ensure that you receive all of your + // results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // The list of tags that are currently attached to the user. Each tag consists + // of a key name and an associated value. If no tags are attached to the specified + // user, the response contains an empty list. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUserTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUserTagsOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListUserTagsOutput) SetIsTruncated(v bool) *ListUserTagsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListUserTagsOutput) SetMarker(v string) *ListUserTagsOutput { + s.Marker = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ListUserTagsOutput) SetTags(v []*Tag) *ListUserTagsOutput { + s.Tags = v + return s +} + +type ListUsersInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, + // which would get all user names whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all user names. This parameter allows (through its regex pattern + // (http://wikipedia.org/wiki/regex)) a string of characters consisting of either + // a forward slash (/) by itself or a string that must begin and end with forward + // slashes. In addition, it can contain any ASCII character from the ! (\u0021) + // through the DEL character (\u007F), including most punctuation characters, + // digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListUsersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUsersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListUsersInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetMarker sets the Marker field's value. +func (s *ListUsersInput) SetMarker(v string) *ListUsersInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListUsersInput) SetMaxItems(v int64) *ListUsersInput { + s.MaxItems = &v + return s +} + +// SetPathPrefix sets the PathPrefix field's value. +func (s *ListUsersInput) SetPathPrefix(v string) *ListUsersInput { + s.PathPrefix = &v + return s +} + +// Contains the response to a successful ListUsers request. +type ListUsersOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of users. + // + // Users is a required field + Users []*User `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUsersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListUsersOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListUsersOutput) SetIsTruncated(v bool) *ListUsersOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListUsersOutput) SetMarker(v string) *ListUsersOutput { + s.Marker = &v + return s +} + +// SetUsers sets the Users field's value. +func (s *ListUsersOutput) SetUsers(v []*User) *ListUsersOutput { + s.Users = v + return s +} + +type ListVirtualMFADevicesInput struct { + _ struct{} `type:"structure"` + + // The status (Unassigned or Assigned) of the devices to list. If you do not + // specify an AssignmentStatus, the operation defaults to Any, which lists both + // assigned and unassigned virtual MFA devices., + AssignmentStatus *string `type:"string" enum:"assignmentStatusType"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualMFADevicesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVirtualMFADevicesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListVirtualMFADevicesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAssignmentStatus sets the AssignmentStatus field's value. +func (s *ListVirtualMFADevicesInput) SetAssignmentStatus(v string) *ListVirtualMFADevicesInput { + s.AssignmentStatus = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListVirtualMFADevicesInput) SetMarker(v string) *ListVirtualMFADevicesInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *ListVirtualMFADevicesInput) SetMaxItems(v int64) *ListVirtualMFADevicesInput { + s.MaxItems = &v + return s +} + +// Contains the response to a successful ListVirtualMFADevices request. +type ListVirtualMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // The list of virtual MFA devices in the current account that match the AssignmentStatus + // value that was passed in the request. + // + // VirtualMFADevices is a required field + VirtualMFADevices []*VirtualMFADevice `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVirtualMFADevicesOutput) GoString() string { + return s.String() +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListVirtualMFADevicesOutput) SetIsTruncated(v bool) *ListVirtualMFADevicesOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListVirtualMFADevicesOutput) SetMarker(v string) *ListVirtualMFADevicesOutput { + s.Marker = &v + return s +} + +// SetVirtualMFADevices sets the VirtualMFADevices field's value. +func (s *ListVirtualMFADevicesOutput) SetVirtualMFADevices(v []*VirtualMFADevice) *ListVirtualMFADevicesOutput { + s.VirtualMFADevices = v + return s +} + +// Contains the user name and password create date for a user. +// +// This data type is used as a response element in the CreateLoginProfile and +// GetLoginProfile operations. +type LoginProfile struct { + _ struct{} `type:"structure"` + + // The date when the password for the user was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user, which can be used for signing in to the AWS Management + // Console. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoginProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoginProfile) GoString() string { + return s.String() +} + +// SetCreateDate sets the CreateDate field's value. +func (s *LoginProfile) SetCreateDate(v time.Time) *LoginProfile { + s.CreateDate = &v + return s +} + +// SetPasswordResetRequired sets the PasswordResetRequired field's value. +func (s *LoginProfile) SetPasswordResetRequired(v bool) *LoginProfile { + s.PasswordResetRequired = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *LoginProfile) SetUserName(v string) *LoginProfile { + s.UserName = &v + return s +} + +// Contains information about an MFA device. +// +// This data type is used as a response element in the ListMFADevices operation. +type MFADevice struct { + _ struct{} `type:"structure"` + + // The date when the MFA device was enabled for the user. + // + // EnableDate is a required field + EnableDate *time.Time `type:"timestamp" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The user with whom the MFA device is associated. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MFADevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MFADevice) GoString() string { + return s.String() +} + +// SetEnableDate sets the EnableDate field's value. +func (s *MFADevice) SetEnableDate(v time.Time) *MFADevice { + s.EnableDate = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *MFADevice) SetSerialNumber(v string) *MFADevice { + s.SerialNumber = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *MFADevice) SetUserName(v string) *MFADevice { + s.UserName = &v + return s +} + +// Contains information about a managed policy, including the policy's ARN, +// versions, and the number of principal entities (users, groups, and roles) +// that the policy is attached to. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type ManagedPolicyDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of principal entities (users, groups, and roles) that the policy + // is attached to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp"` + + // The identifier for the version of the policy that is set as the default (operative) + // version. + // + // For more information about policy versions, see Versioning for Managed Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the Using IAM guide. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The number of entities (users and roles) for which the policy is used as + // the permissions boundary. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundaryUsageCount *int64 `type:"integer"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // A list containing information about the versions of the policy. + PolicyVersionList []*PolicyVersion `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ManagedPolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ManagedPolicyDetail) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *ManagedPolicyDetail) SetArn(v string) *ManagedPolicyDetail { + s.Arn = &v + return s +} + +// SetAttachmentCount sets the AttachmentCount field's value. +func (s *ManagedPolicyDetail) SetAttachmentCount(v int64) *ManagedPolicyDetail { + s.AttachmentCount = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *ManagedPolicyDetail) SetCreateDate(v time.Time) *ManagedPolicyDetail { + s.CreateDate = &v + return s +} + +// SetDefaultVersionId sets the DefaultVersionId field's value. +func (s *ManagedPolicyDetail) SetDefaultVersionId(v string) *ManagedPolicyDetail { + s.DefaultVersionId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *ManagedPolicyDetail) SetDescription(v string) *ManagedPolicyDetail { + s.Description = &v + return s +} + +// SetIsAttachable sets the IsAttachable field's value. +func (s *ManagedPolicyDetail) SetIsAttachable(v bool) *ManagedPolicyDetail { + s.IsAttachable = &v + return s +} + +// SetPath sets the Path field's value. +func (s *ManagedPolicyDetail) SetPath(v string) *ManagedPolicyDetail { + s.Path = &v + return s +} + +// SetPermissionsBoundaryUsageCount sets the PermissionsBoundaryUsageCount field's value. +func (s *ManagedPolicyDetail) SetPermissionsBoundaryUsageCount(v int64) *ManagedPolicyDetail { + s.PermissionsBoundaryUsageCount = &v + return s +} + +// SetPolicyId sets the PolicyId field's value. +func (s *ManagedPolicyDetail) SetPolicyId(v string) *ManagedPolicyDetail { + s.PolicyId = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *ManagedPolicyDetail) SetPolicyName(v string) *ManagedPolicyDetail { + s.PolicyName = &v + return s +} + +// SetPolicyVersionList sets the PolicyVersionList field's value. +func (s *ManagedPolicyDetail) SetPolicyVersionList(v []*PolicyVersion) *ManagedPolicyDetail { + s.PolicyVersionList = v + return s +} + +// SetUpdateDate sets the UpdateDate field's value. +func (s *ManagedPolicyDetail) SetUpdateDate(v time.Time) *ManagedPolicyDetail { + s.UpdateDate = &v + return s +} + +// Contains the Amazon Resource Name (ARN) for an IAM OpenID Connect provider. +type OpenIDConnectProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s OpenIDConnectProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OpenIDConnectProviderListEntry) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *OpenIDConnectProviderListEntry) SetArn(v string) *OpenIDConnectProviderListEntry { + s.Arn = &v + return s +} + +// Contains information about the effect that Organizations has on a policy +// simulation. +type OrganizationsDecisionDetail struct { + _ struct{} `type:"structure"` + + // Specifies whether the simulated operation is allowed by the Organizations + // service control policies that impact the simulated user's account. + AllowedByOrganizations *bool `type:"boolean"` +} + +// String returns the string representation +func (s OrganizationsDecisionDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OrganizationsDecisionDetail) GoString() string { + return s.String() +} + +// SetAllowedByOrganizations sets the AllowedByOrganizations field's value. +func (s *OrganizationsDecisionDetail) SetAllowedByOrganizations(v bool) *OrganizationsDecisionDetail { + s.AllowedByOrganizations = &v + return s +} + +// Contains information about the account password policy. +// +// This data type is used as a response element in the GetAccountPasswordPolicy +// operation. +type PasswordPolicy struct { + _ struct{} `type:"structure"` + + // Specifies whether IAM users are allowed to change their own password. + AllowUsersToChangePassword *bool `type:"boolean"` + + // Indicates whether passwords in the account expire. Returns true if MaxPasswordAge + // contains a value greater than 0. Returns false if MaxPasswordAge is 0 or + // not present. + ExpirePasswords *bool `type:"boolean"` + + // Specifies whether IAM users are prevented from setting a new password after + // their password has expired. + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // Minimum length to require for IAM user passwords. + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether to require lowercase characters for IAM user passwords. + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether to require numbers for IAM user passwords. + RequireNumbers *bool `type:"boolean"` + + // Specifies whether to require symbols for IAM user passwords. + RequireSymbols *bool `type:"boolean"` + + // Specifies whether to require uppercase characters for IAM user passwords. + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s PasswordPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PasswordPolicy) GoString() string { + return s.String() +} + +// SetAllowUsersToChangePassword sets the AllowUsersToChangePassword field's value. +func (s *PasswordPolicy) SetAllowUsersToChangePassword(v bool) *PasswordPolicy { + s.AllowUsersToChangePassword = &v + return s +} + +// SetExpirePasswords sets the ExpirePasswords field's value. +func (s *PasswordPolicy) SetExpirePasswords(v bool) *PasswordPolicy { + s.ExpirePasswords = &v + return s +} + +// SetHardExpiry sets the HardExpiry field's value. +func (s *PasswordPolicy) SetHardExpiry(v bool) *PasswordPolicy { + s.HardExpiry = &v + return s +} + +// SetMaxPasswordAge sets the MaxPasswordAge field's value. +func (s *PasswordPolicy) SetMaxPasswordAge(v int64) *PasswordPolicy { + s.MaxPasswordAge = &v + return s +} + +// SetMinimumPasswordLength sets the MinimumPasswordLength field's value. +func (s *PasswordPolicy) SetMinimumPasswordLength(v int64) *PasswordPolicy { + s.MinimumPasswordLength = &v + return s +} + +// SetPasswordReusePrevention sets the PasswordReusePrevention field's value. +func (s *PasswordPolicy) SetPasswordReusePrevention(v int64) *PasswordPolicy { + s.PasswordReusePrevention = &v + return s +} + +// SetRequireLowercaseCharacters sets the RequireLowercaseCharacters field's value. +func (s *PasswordPolicy) SetRequireLowercaseCharacters(v bool) *PasswordPolicy { + s.RequireLowercaseCharacters = &v + return s +} + +// SetRequireNumbers sets the RequireNumbers field's value. +func (s *PasswordPolicy) SetRequireNumbers(v bool) *PasswordPolicy { + s.RequireNumbers = &v + return s +} + +// SetRequireSymbols sets the RequireSymbols field's value. +func (s *PasswordPolicy) SetRequireSymbols(v bool) *PasswordPolicy { + s.RequireSymbols = &v + return s +} + +// SetRequireUppercaseCharacters sets the RequireUppercaseCharacters field's value. +func (s *PasswordPolicy) SetRequireUppercaseCharacters(v bool) *PasswordPolicy { + s.RequireUppercaseCharacters = &v + return s +} + +// Contains information about a managed policy. +// +// This data type is used as a response element in the CreatePolicy, GetPolicy, +// and ListPolicies operations. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type Policy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of entities (users, groups, and roles) that the policy is attached + // to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp"` + + // The identifier for the version of the policy that is set as the default version. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + // + // This element is included in the response to the GetPolicy operation. It is + // not included in the response to the ListPolicies operation. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The number of entities (users and roles) for which the policy is used to + // set the permissions boundary. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundaryUsageCount *int64 `type:"integer"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s Policy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Policy) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Policy) SetArn(v string) *Policy { + s.Arn = &v + return s +} + +// SetAttachmentCount sets the AttachmentCount field's value. +func (s *Policy) SetAttachmentCount(v int64) *Policy { + s.AttachmentCount = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *Policy) SetCreateDate(v time.Time) *Policy { + s.CreateDate = &v + return s +} + +// SetDefaultVersionId sets the DefaultVersionId field's value. +func (s *Policy) SetDefaultVersionId(v string) *Policy { + s.DefaultVersionId = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Policy) SetDescription(v string) *Policy { + s.Description = &v + return s +} + +// SetIsAttachable sets the IsAttachable field's value. +func (s *Policy) SetIsAttachable(v bool) *Policy { + s.IsAttachable = &v + return s +} + +// SetPath sets the Path field's value. +func (s *Policy) SetPath(v string) *Policy { + s.Path = &v + return s +} + +// SetPermissionsBoundaryUsageCount sets the PermissionsBoundaryUsageCount field's value. +func (s *Policy) SetPermissionsBoundaryUsageCount(v int64) *Policy { + s.PermissionsBoundaryUsageCount = &v + return s +} + +// SetPolicyId sets the PolicyId field's value. +func (s *Policy) SetPolicyId(v string) *Policy { + s.PolicyId = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *Policy) SetPolicyName(v string) *Policy { + s.PolicyName = &v + return s +} + +// SetUpdateDate sets the UpdateDate field's value. +func (s *Policy) SetUpdateDate(v time.Time) *Policy { + s.UpdateDate = &v + return s +} + +// Contains information about an IAM policy, including the policy document. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +type PolicyDetail struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDetail) GoString() string { + return s.String() +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PolicyDetail) SetPolicyDocument(v string) *PolicyDetail { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PolicyDetail) SetPolicyName(v string) *PolicyDetail { + s.PolicyName = &v + return s +} + +// Contains details about the permissions policies that are attached to the +// specified identity (user, group, or role). +// +// This data type is an element of the ListPoliciesGrantingServiceAccessEntry +// object. +type PolicyGrantingServiceAccess struct { + _ struct{} `type:"structure"` + + // The name of the entity (user or role) to which the inline policy is attached. + // + // This field is null for managed policies. For more information about these + // policy types, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) + // in the IAM User Guide. + EntityName *string `min:"1" type:"string"` + + // The type of entity (user or role) that used the policy to access the service + // to which the inline policy is attached. + // + // This field is null for managed policies. For more information about these + // policy types, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) + // in the IAM User Guide. + EntityType *string `type:"string" enum:"policyOwnerEntityType"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string"` + + // The policy name. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The policy type. For more information about these policy types, see Managed + // Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) + // in the IAM User Guide. + // + // PolicyType is a required field + PolicyType *string `type:"string" required:"true" enum:"policyType"` +} + +// String returns the string representation +func (s PolicyGrantingServiceAccess) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyGrantingServiceAccess) GoString() string { + return s.String() +} + +// SetEntityName sets the EntityName field's value. +func (s *PolicyGrantingServiceAccess) SetEntityName(v string) *PolicyGrantingServiceAccess { + s.EntityName = &v + return s +} + +// SetEntityType sets the EntityType field's value. +func (s *PolicyGrantingServiceAccess) SetEntityType(v string) *PolicyGrantingServiceAccess { + s.EntityType = &v + return s +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *PolicyGrantingServiceAccess) SetPolicyArn(v string) *PolicyGrantingServiceAccess { + s.PolicyArn = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PolicyGrantingServiceAccess) SetPolicyName(v string) *PolicyGrantingServiceAccess { + s.PolicyName = &v + return s +} + +// SetPolicyType sets the PolicyType field's value. +func (s *PolicyGrantingServiceAccess) SetPolicyType(v string) *PolicyGrantingServiceAccess { + s.PolicyType = &v + return s +} + +// Contains information about a group that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// operation. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyGroup struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + GroupId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the group. + GroupName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyGroup) GoString() string { + return s.String() +} + +// SetGroupId sets the GroupId field's value. +func (s *PolicyGroup) SetGroupId(v string) *PolicyGroup { + s.GroupId = &v + return s +} + +// SetGroupName sets the GroupName field's value. +func (s *PolicyGroup) SetGroupName(v string) *PolicyGroup { + s.GroupName = &v + return s +} + +// Contains information about a role that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// operation. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyRole struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + RoleId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the role. + RoleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyRole) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyRole) GoString() string { + return s.String() +} + +// SetRoleId sets the RoleId field's value. +func (s *PolicyRole) SetRoleId(v string) *PolicyRole { + s.RoleId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *PolicyRole) SetRoleName(v string) *PolicyRole { + s.RoleName = &v + return s +} + +// Contains information about a user that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// operation. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyUser struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + UserId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyUser) GoString() string { + return s.String() +} + +// SetUserId sets the UserId field's value. +func (s *PolicyUser) SetUserId(v string) *PolicyUser { + s.UserId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *PolicyUser) SetUserName(v string) *PolicyUser { + s.UserName = &v + return s +} + +// Contains information about a version of a managed policy. +// +// This data type is used as a response element in the CreatePolicyVersion, +// GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails +// operations. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +type PolicyVersion struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy version was created. + CreateDate *time.Time `type:"timestamp"` + + // The policy document. + // + // The policy document is returned in the response to the GetPolicyVersion and + // GetAccountAuthorizationDetails operations. It is not returned in the response + // to the CreatePolicyVersion or ListPolicyVersions operations. + // + // The policy document returned in this structure is URL-encoded compliant with + // RFC 3986 (https://tools.ietf.org/html/rfc3986). You can use a URL decoding + // method to convert the policy back to plain JSON text. For example, if you + // use Java, you can use the decode method of the java.net.URLDecoder utility + // class in the Java SDK. Other languages and SDKs provide similar functionality. + Document *string `min:"1" type:"string"` + + // Specifies whether the policy version is set as the policy's default version. + IsDefaultVersion *bool `type:"boolean"` + + // The identifier for the policy version. + // + // Policy version identifiers always begin with v (always lowercase). When a + // policy is created, the first policy version is v1. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s PolicyVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyVersion) GoString() string { + return s.String() +} + +// SetCreateDate sets the CreateDate field's value. +func (s *PolicyVersion) SetCreateDate(v time.Time) *PolicyVersion { + s.CreateDate = &v + return s +} + +// SetDocument sets the Document field's value. +func (s *PolicyVersion) SetDocument(v string) *PolicyVersion { + s.Document = &v + return s +} + +// SetIsDefaultVersion sets the IsDefaultVersion field's value. +func (s *PolicyVersion) SetIsDefaultVersion(v bool) *PolicyVersion { + s.IsDefaultVersion = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PolicyVersion) SetVersionId(v string) *PolicyVersion { + s.VersionId = &v + return s +} + +// Contains the row and column of a location of a Statement element in a policy +// document. +// +// This data type is used as a member of the Statement type. +type Position struct { + _ struct{} `type:"structure"` + + // The column in the line containing the specified position in the document. + Column *int64 `type:"integer"` + + // The line containing the specified position in the document. + Line *int64 `type:"integer"` +} + +// String returns the string representation +func (s Position) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Position) GoString() string { + return s.String() +} + +// SetColumn sets the Column field's value. +func (s *Position) SetColumn(v int64) *Position { + s.Column = &v + return s +} + +// SetLine sets the Line field's value. +func (s *Position) SetLine(v int64) *Position { + s.Line = &v + return s +} + +type PutGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group to associate the policy with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@-. + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutGroupPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutGroupPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutGroupPolicyInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *PutGroupPolicyInput) SetGroupName(v string) *PutGroupPolicyInput { + s.GroupName = &v + return s +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PutGroupPolicyInput) SetPolicyDocument(v string) *PutGroupPolicyInput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PutGroupPolicyInput) SetPolicyName(v string) *PutGroupPolicyInput { + s.PolicyName = &v + return s +} + +type PutGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutGroupPolicyOutput) GoString() string { + return s.String() +} + +type PutRolePermissionsBoundaryInput struct { + _ struct{} `type:"structure"` + + // The ARN of the policy that is used to set the permissions boundary for the + // role. + // + // PermissionsBoundary is a required field + PermissionsBoundary *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM role for which you want to set + // the permissions boundary. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRolePermissionsBoundaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePermissionsBoundaryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRolePermissionsBoundaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRolePermissionsBoundaryInput"} + if s.PermissionsBoundary == nil { + invalidParams.Add(request.NewErrParamRequired("PermissionsBoundary")) + } + if s.PermissionsBoundary != nil && len(*s.PermissionsBoundary) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PermissionsBoundary", 20)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *PutRolePermissionsBoundaryInput) SetPermissionsBoundary(v string) *PutRolePermissionsBoundaryInput { + s.PermissionsBoundary = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *PutRolePermissionsBoundaryInput) SetRoleName(v string) *PutRolePermissionsBoundaryInput { + s.RoleName = &v + return s +} + +type PutRolePermissionsBoundaryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRolePermissionsBoundaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePermissionsBoundaryOutput) GoString() string { + return s.String() +} + +type PutRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role to associate the policy with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutRolePolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PutRolePolicyInput) SetPolicyDocument(v string) *PutRolePolicyInput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PutRolePolicyInput) SetPolicyName(v string) *PutRolePolicyInput { + s.PolicyName = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *PutRolePolicyInput) SetRoleName(v string) *PutRolePolicyInput { + s.RoleName = &v + return s +} + +type PutRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutRolePolicyOutput) GoString() string { + return s.String() +} + +type PutUserPermissionsBoundaryInput struct { + _ struct{} `type:"structure"` + + // The ARN of the policy that is used to set the permissions boundary for the + // user. + // + // PermissionsBoundary is a required field + PermissionsBoundary *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM user for which you want to set + // the permissions boundary. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutUserPermissionsBoundaryInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPermissionsBoundaryInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutUserPermissionsBoundaryInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutUserPermissionsBoundaryInput"} + if s.PermissionsBoundary == nil { + invalidParams.Add(request.NewErrParamRequired("PermissionsBoundary")) + } + if s.PermissionsBoundary != nil && len(*s.PermissionsBoundary) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PermissionsBoundary", 20)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *PutUserPermissionsBoundaryInput) SetPermissionsBoundary(v string) *PutUserPermissionsBoundaryInput { + s.PermissionsBoundary = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *PutUserPermissionsBoundaryInput) SetUserName(v string) *PutUserPermissionsBoundaryInput { + s.UserName = &v + return s +} + +type PutUserPermissionsBoundaryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutUserPermissionsBoundaryOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPermissionsBoundaryOutput) GoString() string { + return s.String() +} + +type PutUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user to associate the policy with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutUserPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutUserPolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *PutUserPolicyInput) SetPolicyDocument(v string) *PutUserPolicyInput { + s.PolicyDocument = &v + return s +} + +// SetPolicyName sets the PolicyName field's value. +func (s *PutUserPolicyInput) SetPolicyName(v string) *PutUserPolicyInput { + s.PolicyName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *PutUserPolicyInput) SetUserName(v string) *PutUserPolicyInput { + s.UserName = &v + return s +} + +type PutUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutUserPolicyOutput) GoString() string { + return s.String() +} + +type RemoveClientIDFromOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to remove from the IAM OIDC provider + // resource. For more information about client IDs, see CreateOpenIDConnectProvider. + // + // ClientID is a required field + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove + // the client ID from. You can get a list of OIDC provider ARNs by using the + // ListOpenIDConnectProviders operation. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveClientIDFromOpenIDConnectProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveClientIDFromOpenIDConnectProviderInput"} + if s.ClientID == nil { + invalidParams.Add(request.NewErrParamRequired("ClientID")) + } + if s.ClientID != nil && len(*s.ClientID) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientID", 1)) + } + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetClientID sets the ClientID field's value. +func (s *RemoveClientIDFromOpenIDConnectProviderInput) SetClientID(v string) *RemoveClientIDFromOpenIDConnectProviderInput { + s.ClientID = &v + return s +} + +// SetOpenIDConnectProviderArn sets the OpenIDConnectProviderArn field's value. +func (s *RemoveClientIDFromOpenIDConnectProviderInput) SetOpenIDConnectProviderArn(v string) *RemoveClientIDFromOpenIDConnectProviderInput { + s.OpenIDConnectProviderArn = &v + return s +} + +type RemoveClientIDFromOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) GoString() string { + return s.String() +} + +type RemoveRoleFromInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to remove. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveRoleFromInstanceProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveRoleFromInstanceProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveRoleFromInstanceProfileInput"} + if s.InstanceProfileName == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceProfileName", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetInstanceProfileName sets the InstanceProfileName field's value. +func (s *RemoveRoleFromInstanceProfileInput) SetInstanceProfileName(v string) *RemoveRoleFromInstanceProfileInput { + s.InstanceProfileName = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RemoveRoleFromInstanceProfileInput) SetRoleName(v string) *RemoveRoleFromInstanceProfileInput { + s.RoleName = &v + return s +} + +type RemoveRoleFromInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) GoString() string { + return s.String() +} + +type RemoveUserFromGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to remove. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveUserFromGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveUserFromGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveUserFromGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveUserFromGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *RemoveUserFromGroupInput) SetGroupName(v string) *RemoveUserFromGroupInput { + s.GroupName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *RemoveUserFromGroupInput) SetUserName(v string) *RemoveUserFromGroupInput { + s.UserName = &v + return s +} + +type RemoveUserFromGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveUserFromGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveUserFromGroupOutput) GoString() string { + return s.String() +} + +type ResetServiceSpecificCredentialInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the service-specific credential. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the service-specific credential. + // If this value is not specified, then the operation assumes the user whose + // credentials are used to call the operation. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResetServiceSpecificCredentialInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetServiceSpecificCredentialInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetServiceSpecificCredentialInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResetServiceSpecificCredentialInput"} + if s.ServiceSpecificCredentialId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceSpecificCredentialId")) + } + if s.ServiceSpecificCredentialId != nil && len(*s.ServiceSpecificCredentialId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ServiceSpecificCredentialId", 20)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetServiceSpecificCredentialId sets the ServiceSpecificCredentialId field's value. +func (s *ResetServiceSpecificCredentialInput) SetServiceSpecificCredentialId(v string) *ResetServiceSpecificCredentialInput { + s.ServiceSpecificCredentialId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ResetServiceSpecificCredentialInput) SetUserName(v string) *ResetServiceSpecificCredentialInput { + s.UserName = &v + return s +} + +type ResetServiceSpecificCredentialOutput struct { + _ struct{} `type:"structure"` + + // A structure with details about the updated service-specific credential, including + // the new password. + // + // This is the only time that you can access the password. You cannot recover + // the password later, but you can reset it again. + ServiceSpecificCredential *ServiceSpecificCredential `type:"structure"` +} + +// String returns the string representation +func (s ResetServiceSpecificCredentialOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResetServiceSpecificCredentialOutput) GoString() string { + return s.String() +} + +// SetServiceSpecificCredential sets the ServiceSpecificCredential field's value. +func (s *ResetServiceSpecificCredentialOutput) SetServiceSpecificCredential(v *ServiceSpecificCredential) *ResetServiceSpecificCredentialOutput { + s.ServiceSpecificCredential = v + return s +} + +// Contains the result of the simulation of a single API operation call on a +// single resource. +// +// This data type is used by a member of the EvaluationResult data type. +type ResourceSpecificResult struct { + _ struct{} `type:"structure"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. + EvalDecisionDetails map[string]*string `type:"map"` + + // The result of the simulation of the simulated API operation on the resource + // specified in EvalResourceName. + // + // EvalResourceDecision is a required field + EvalResourceDecision *string `type:"string" required:"true" enum:"PolicyEvaluationDecisionType"` + + // The name of the simulated resource, in Amazon Resource Name (ARN) format. + // + // EvalResourceName is a required field + EvalResourceName *string `min:"1" type:"string" required:"true"` + + // A list of the statements in the input policies that determine the result + // for this part of the simulation. Remember that even if multiple statements + // allow the operation on the resource, if any statement denies that operation, + // then the explicit deny overrides any allow. In addition, the deny statement + // is the only entry included in the result. + MatchedStatements []*Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. This list is used + // when a list of ARNs is included in the ResourceArns parameter instead of + // "*". If you do not specify individual resources, by setting ResourceArns + // to "*" or by not including the ResourceArns parameter, then any missing context + // values are instead included under the EvaluationResults section. To discover + // the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy + // or GetContextKeysForPrincipalPolicy. + MissingContextValues []*string `type:"list"` +} + +// String returns the string representation +func (s ResourceSpecificResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResourceSpecificResult) GoString() string { + return s.String() +} + +// SetEvalDecisionDetails sets the EvalDecisionDetails field's value. +func (s *ResourceSpecificResult) SetEvalDecisionDetails(v map[string]*string) *ResourceSpecificResult { + s.EvalDecisionDetails = v + return s +} + +// SetEvalResourceDecision sets the EvalResourceDecision field's value. +func (s *ResourceSpecificResult) SetEvalResourceDecision(v string) *ResourceSpecificResult { + s.EvalResourceDecision = &v + return s +} + +// SetEvalResourceName sets the EvalResourceName field's value. +func (s *ResourceSpecificResult) SetEvalResourceName(v string) *ResourceSpecificResult { + s.EvalResourceName = &v + return s +} + +// SetMatchedStatements sets the MatchedStatements field's value. +func (s *ResourceSpecificResult) SetMatchedStatements(v []*Statement) *ResourceSpecificResult { + s.MatchedStatements = v + return s +} + +// SetMissingContextValues sets the MissingContextValues field's value. +func (s *ResourceSpecificResult) SetMissingContextValues(v []*string) *ResourceSpecificResult { + s.MissingContextValues = v + return s +} + +type ResyncMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + // + // The format for this parameter is a sequence of six digits. + // + // AuthenticationCode1 is a required field + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + // + // The format for this parameter is a sequence of six digits. + // + // AuthenticationCode2 is a required field + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // Serial number that uniquely identifies the MFA device. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to resynchronize. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResyncMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResyncMFADeviceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResyncMFADeviceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ResyncMFADeviceInput"} + if s.AuthenticationCode1 == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationCode1")) + } + if s.AuthenticationCode1 != nil && len(*s.AuthenticationCode1) < 6 { + invalidParams.Add(request.NewErrParamMinLen("AuthenticationCode1", 6)) + } + if s.AuthenticationCode2 == nil { + invalidParams.Add(request.NewErrParamRequired("AuthenticationCode2")) + } + if s.AuthenticationCode2 != nil && len(*s.AuthenticationCode2) < 6 { + invalidParams.Add(request.NewErrParamMinLen("AuthenticationCode2", 6)) + } + if s.SerialNumber == nil { + invalidParams.Add(request.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAuthenticationCode1 sets the AuthenticationCode1 field's value. +func (s *ResyncMFADeviceInput) SetAuthenticationCode1(v string) *ResyncMFADeviceInput { + s.AuthenticationCode1 = &v + return s +} + +// SetAuthenticationCode2 sets the AuthenticationCode2 field's value. +func (s *ResyncMFADeviceInput) SetAuthenticationCode2(v string) *ResyncMFADeviceInput { + s.AuthenticationCode2 = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *ResyncMFADeviceInput) SetSerialNumber(v string) *ResyncMFADeviceInput { + s.SerialNumber = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ResyncMFADeviceInput) SetUserName(v string) *ResyncMFADeviceInput { + s.UserName = &v + return s +} + +type ResyncMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResyncMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ResyncMFADeviceOutput) GoString() string { + return s.String() +} + +// Contains information about an IAM role. This structure is returned as a response +// element in several API operations that interact with roles. +type Role struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the role. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The policy that grants an entity permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" required:"true"` + + // A description of the role that you provide. + Description *string `type:"string"` + + // The maximum session duration (in seconds) for the specified role. Anyone + // who uses the AWS CLI, or API to assume the role can specify the duration + // using the optional DurationSeconds API parameter or duration-seconds CLI + // parameter. + MaxSessionDuration *int64 `min:"3600" type:"integer"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // The ARN of the policy used to set the permissions boundary for the role. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // RoleId is a required field + RoleId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the role. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` + + // A list of tags that are attached to the specified role. For more information + // about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s Role) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Role) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *Role) SetArn(v string) *Role { + s.Arn = &v + return s +} + +// SetAssumeRolePolicyDocument sets the AssumeRolePolicyDocument field's value. +func (s *Role) SetAssumeRolePolicyDocument(v string) *Role { + s.AssumeRolePolicyDocument = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *Role) SetCreateDate(v time.Time) *Role { + s.CreateDate = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *Role) SetDescription(v string) *Role { + s.Description = &v + return s +} + +// SetMaxSessionDuration sets the MaxSessionDuration field's value. +func (s *Role) SetMaxSessionDuration(v int64) *Role { + s.MaxSessionDuration = &v + return s +} + +// SetPath sets the Path field's value. +func (s *Role) SetPath(v string) *Role { + s.Path = &v + return s +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *Role) SetPermissionsBoundary(v *AttachedPermissionsBoundary) *Role { + s.PermissionsBoundary = v + return s +} + +// SetRoleId sets the RoleId field's value. +func (s *Role) SetRoleId(v string) *Role { + s.RoleId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *Role) SetRoleName(v string) *Role { + s.RoleName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *Role) SetTags(v []*Tag) *Role { + s.Tags = v + return s +} + +// Contains information about an IAM role, including all of the role's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +type RoleDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The trust policy that grants permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // A list of managed policies attached to the role. These policies are the role's + // access (permissions) policies. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + CreateDate *time.Time `type:"timestamp"` + + // A list of instance profiles that contain this role. + InstanceProfileList []*InstanceProfile `type:"list"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The ARN of the policy used to set the permissions boundary for the role. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + RoleId *string `min:"16" type:"string"` + + // The friendly name that identifies the role. + RoleName *string `min:"1" type:"string"` + + // A list of inline policies embedded in the role. These policies are the role's + // access (permissions) policies. + RolePolicyList []*PolicyDetail `type:"list"` + + // A list of tags that are attached to the specified role. For more information + // about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s RoleDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleDetail) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *RoleDetail) SetArn(v string) *RoleDetail { + s.Arn = &v + return s +} + +// SetAssumeRolePolicyDocument sets the AssumeRolePolicyDocument field's value. +func (s *RoleDetail) SetAssumeRolePolicyDocument(v string) *RoleDetail { + s.AssumeRolePolicyDocument = &v + return s +} + +// SetAttachedManagedPolicies sets the AttachedManagedPolicies field's value. +func (s *RoleDetail) SetAttachedManagedPolicies(v []*AttachedPolicy) *RoleDetail { + s.AttachedManagedPolicies = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *RoleDetail) SetCreateDate(v time.Time) *RoleDetail { + s.CreateDate = &v + return s +} + +// SetInstanceProfileList sets the InstanceProfileList field's value. +func (s *RoleDetail) SetInstanceProfileList(v []*InstanceProfile) *RoleDetail { + s.InstanceProfileList = v + return s +} + +// SetPath sets the Path field's value. +func (s *RoleDetail) SetPath(v string) *RoleDetail { + s.Path = &v + return s +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *RoleDetail) SetPermissionsBoundary(v *AttachedPermissionsBoundary) *RoleDetail { + s.PermissionsBoundary = v + return s +} + +// SetRoleId sets the RoleId field's value. +func (s *RoleDetail) SetRoleId(v string) *RoleDetail { + s.RoleId = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *RoleDetail) SetRoleName(v string) *RoleDetail { + s.RoleName = &v + return s +} + +// SetRolePolicyList sets the RolePolicyList field's value. +func (s *RoleDetail) SetRolePolicyList(v []*PolicyDetail) *RoleDetail { + s.RolePolicyList = v + return s +} + +// SetTags sets the Tags field's value. +func (s *RoleDetail) SetTags(v []*Tag) *RoleDetail { + s.Tags = v + return s +} + +// An object that contains details about how a service-linked role is used, +// if that information is returned by the service. +// +// This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus +// operation. +type RoleUsageType struct { + _ struct{} `type:"structure"` + + // The name of the Region where the service-linked role is being used. + Region *string `min:"1" type:"string"` + + // The name of the resource that is using the service-linked role. + Resources []*string `type:"list"` +} + +// String returns the string representation +func (s RoleUsageType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoleUsageType) GoString() string { + return s.String() +} + +// SetRegion sets the Region field's value. +func (s *RoleUsageType) SetRegion(v string) *RoleUsageType { + s.Region = &v + return s +} + +// SetResources sets the Resources field's value. +func (s *RoleUsageType) SetResources(v []*string) *RoleUsageType { + s.Resources = v + return s +} + +// Contains the list of SAML providers for this account. +type SAMLProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider. + Arn *string `min:"20" type:"string"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s SAMLProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SAMLProviderListEntry) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *SAMLProviderListEntry) SetArn(v string) *SAMLProviderListEntry { + s.Arn = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *SAMLProviderListEntry) SetCreateDate(v time.Time) *SAMLProviderListEntry { + s.CreateDate = &v + return s +} + +// SetValidUntil sets the ValidUntil field's value. +func (s *SAMLProviderListEntry) SetValidUntil(v time.Time) *SAMLProviderListEntry { + s.ValidUntil = &v + return s +} + +// Contains information about an SSH public key. +// +// This data type is used as a response element in the GetSSHPublicKey and UploadSSHPublicKey +// operations. +type SSHPublicKey struct { + _ struct{} `type:"structure"` + + // The MD5 message digest of the SSH public key. + // + // Fingerprint is a required field + Fingerprint *string `min:"48" type:"string" required:"true"` + + // The SSH public key. + // + // SSHPublicKeyBody is a required field + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The unique identifier for the SSH public key. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means that the key can be used for + // authentication with an AWS CodeCommit repository. Inactive means that the + // key cannot be used. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + UploadDate *time.Time `type:"timestamp"` + + // The name of the IAM user associated with the SSH public key. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKey) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSHPublicKey) GoString() string { + return s.String() +} + +// SetFingerprint sets the Fingerprint field's value. +func (s *SSHPublicKey) SetFingerprint(v string) *SSHPublicKey { + s.Fingerprint = &v + return s +} + +// SetSSHPublicKeyBody sets the SSHPublicKeyBody field's value. +func (s *SSHPublicKey) SetSSHPublicKeyBody(v string) *SSHPublicKey { + s.SSHPublicKeyBody = &v + return s +} + +// SetSSHPublicKeyId sets the SSHPublicKeyId field's value. +func (s *SSHPublicKey) SetSSHPublicKeyId(v string) *SSHPublicKey { + s.SSHPublicKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SSHPublicKey) SetStatus(v string) *SSHPublicKey { + s.Status = &v + return s +} + +// SetUploadDate sets the UploadDate field's value. +func (s *SSHPublicKey) SetUploadDate(v time.Time) *SSHPublicKey { + s.UploadDate = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SSHPublicKey) SetUserName(v string) *SSHPublicKey { + s.UserName = &v + return s +} + +// Contains information about an SSH public key, without the key's body or fingerprint. +// +// This data type is used as a response element in the ListSSHPublicKeys operation. +type SSHPublicKeyMetadata struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means that the key can be used for + // authentication with an AWS CodeCommit repository. Inactive means that the + // key cannot be used. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + // + // UploadDate is a required field + UploadDate *time.Time `type:"timestamp" required:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSHPublicKeyMetadata) GoString() string { + return s.String() +} + +// SetSSHPublicKeyId sets the SSHPublicKeyId field's value. +func (s *SSHPublicKeyMetadata) SetSSHPublicKeyId(v string) *SSHPublicKeyMetadata { + s.SSHPublicKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SSHPublicKeyMetadata) SetStatus(v string) *SSHPublicKeyMetadata { + s.Status = &v + return s +} + +// SetUploadDate sets the UploadDate field's value. +func (s *SSHPublicKeyMetadata) SetUploadDate(v time.Time) *SSHPublicKeyMetadata { + s.UploadDate = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SSHPublicKeyMetadata) SetUserName(v string) *SSHPublicKeyMetadata { + s.UserName = &v + return s +} + +// Contains information about a server certificate. +// +// This data type is used as a response element in the GetServerCertificate +// operation. +type ServerCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate. + // + // CertificateBody is a required field + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the public key certificate chain. + CertificateChain *string `min:"1" type:"string"` + + // The meta information of the server certificate, such as its name, path, ID, + // and ARN. + // + // ServerCertificateMetadata is a required field + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ServerCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerCertificate) GoString() string { + return s.String() +} + +// SetCertificateBody sets the CertificateBody field's value. +func (s *ServerCertificate) SetCertificateBody(v string) *ServerCertificate { + s.CertificateBody = &v + return s +} + +// SetCertificateChain sets the CertificateChain field's value. +func (s *ServerCertificate) SetCertificateChain(v string) *ServerCertificate { + s.CertificateChain = &v + return s +} + +// SetServerCertificateMetadata sets the ServerCertificateMetadata field's value. +func (s *ServerCertificate) SetServerCertificateMetadata(v *ServerCertificateMetadata) *ServerCertificate { + s.ServerCertificateMetadata = v + return s +} + +// Contains information about a server certificate without its certificate body, +// certificate chain, and private key. +// +// This data type is used as a response element in the UploadServerCertificate +// and ListServerCertificates operations. +type ServerCertificateMetadata struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the server certificate. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The date on which the certificate is set to expire. + Expiration *time.Time `type:"timestamp"` + + // The path to the server certificate. For more information about paths, see + // IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the server certificate. For more + // information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // ServerCertificateId is a required field + ServerCertificateId *string `min:"16" type:"string" required:"true"` + + // The name that identifies the server certificate. + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` + + // The date when the server certificate was uploaded. + UploadDate *time.Time `type:"timestamp"` +} + +// String returns the string representation +func (s ServerCertificateMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServerCertificateMetadata) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *ServerCertificateMetadata) SetArn(v string) *ServerCertificateMetadata { + s.Arn = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *ServerCertificateMetadata) SetExpiration(v time.Time) *ServerCertificateMetadata { + s.Expiration = &v + return s +} + +// SetPath sets the Path field's value. +func (s *ServerCertificateMetadata) SetPath(v string) *ServerCertificateMetadata { + s.Path = &v + return s +} + +// SetServerCertificateId sets the ServerCertificateId field's value. +func (s *ServerCertificateMetadata) SetServerCertificateId(v string) *ServerCertificateMetadata { + s.ServerCertificateId = &v + return s +} + +// SetServerCertificateName sets the ServerCertificateName field's value. +func (s *ServerCertificateMetadata) SetServerCertificateName(v string) *ServerCertificateMetadata { + s.ServerCertificateName = &v + return s +} + +// SetUploadDate sets the UploadDate field's value. +func (s *ServerCertificateMetadata) SetUploadDate(v time.Time) *ServerCertificateMetadata { + s.UploadDate = &v + return s +} + +// Contains details about the most recent attempt to access the service. +// +// This data type is used as a response element in the GetServiceLastAccessedDetails +// operation. +type ServiceLastAccessed struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when an authenticated entity most recently attempted to access the service. + // AWS does not report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticated *time.Time `type:"timestamp"` + + // The ARN of the authenticated entity (user or role) that last attempted to + // access the service. AWS does not report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticatedEntity *string `min:"20" type:"string"` + + // The name of the service in which access was attempted. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The namespace of the service in which access was attempted. + // + // To learn the service namespace of a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + // + // ServiceNamespace is a required field + ServiceNamespace *string `min:"1" type:"string" required:"true"` + + // The total number of authenticated principals (root user, IAM users, or IAM + // roles) that have attempted to access the service. + // + // This field is null if no principals attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + TotalAuthenticatedEntities *int64 `type:"integer"` +} + +// String returns the string representation +func (s ServiceLastAccessed) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceLastAccessed) GoString() string { + return s.String() +} + +// SetLastAuthenticated sets the LastAuthenticated field's value. +func (s *ServiceLastAccessed) SetLastAuthenticated(v time.Time) *ServiceLastAccessed { + s.LastAuthenticated = &v + return s +} + +// SetLastAuthenticatedEntity sets the LastAuthenticatedEntity field's value. +func (s *ServiceLastAccessed) SetLastAuthenticatedEntity(v string) *ServiceLastAccessed { + s.LastAuthenticatedEntity = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceLastAccessed) SetServiceName(v string) *ServiceLastAccessed { + s.ServiceName = &v + return s +} + +// SetServiceNamespace sets the ServiceNamespace field's value. +func (s *ServiceLastAccessed) SetServiceNamespace(v string) *ServiceLastAccessed { + s.ServiceNamespace = &v + return s +} + +// SetTotalAuthenticatedEntities sets the TotalAuthenticatedEntities field's value. +func (s *ServiceLastAccessed) SetTotalAuthenticatedEntities(v int64) *ServiceLastAccessed { + s.TotalAuthenticatedEntities = &v + return s +} + +// Contains the details of a service-specific credential. +type ServiceSpecificCredential struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the service-specific credential were created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" required:"true"` + + // The name of the service associated with the service-specific credential. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The generated password for the service-specific credential. + // + // ServicePassword is a required field + ServicePassword *string `type:"string" required:"true" sensitive:"true"` + + // The unique identifier for the service-specific credential. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The generated user name for the service-specific credential. This value is + // generated by combining the IAM user's name combined with the ID number of + // the AWS account, as in jane-at-123456789012, for example. This value cannot + // be configured by the user. + // + // ServiceUserName is a required field + ServiceUserName *string `min:"17" type:"string" required:"true"` + + // The status of the service-specific credential. Active means that the key + // is valid for API calls, while Inactive means it is not. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user associated with the service-specific credential. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ServiceSpecificCredential) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceSpecificCredential) GoString() string { + return s.String() +} + +// SetCreateDate sets the CreateDate field's value. +func (s *ServiceSpecificCredential) SetCreateDate(v time.Time) *ServiceSpecificCredential { + s.CreateDate = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceSpecificCredential) SetServiceName(v string) *ServiceSpecificCredential { + s.ServiceName = &v + return s +} + +// SetServicePassword sets the ServicePassword field's value. +func (s *ServiceSpecificCredential) SetServicePassword(v string) *ServiceSpecificCredential { + s.ServicePassword = &v + return s +} + +// SetServiceSpecificCredentialId sets the ServiceSpecificCredentialId field's value. +func (s *ServiceSpecificCredential) SetServiceSpecificCredentialId(v string) *ServiceSpecificCredential { + s.ServiceSpecificCredentialId = &v + return s +} + +// SetServiceUserName sets the ServiceUserName field's value. +func (s *ServiceSpecificCredential) SetServiceUserName(v string) *ServiceSpecificCredential { + s.ServiceUserName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ServiceSpecificCredential) SetStatus(v string) *ServiceSpecificCredential { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ServiceSpecificCredential) SetUserName(v string) *ServiceSpecificCredential { + s.UserName = &v + return s +} + +// Contains additional details about a service-specific credential. +type ServiceSpecificCredentialMetadata struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the service-specific credential were created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" required:"true"` + + // The name of the service associated with the service-specific credential. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The unique identifier for the service-specific credential. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The generated user name for the service-specific credential. + // + // ServiceUserName is a required field + ServiceUserName *string `min:"17" type:"string" required:"true"` + + // The status of the service-specific credential. Active means that the key + // is valid for API calls, while Inactive means it is not. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user associated with the service-specific credential. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ServiceSpecificCredentialMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ServiceSpecificCredentialMetadata) GoString() string { + return s.String() +} + +// SetCreateDate sets the CreateDate field's value. +func (s *ServiceSpecificCredentialMetadata) SetCreateDate(v time.Time) *ServiceSpecificCredentialMetadata { + s.CreateDate = &v + return s +} + +// SetServiceName sets the ServiceName field's value. +func (s *ServiceSpecificCredentialMetadata) SetServiceName(v string) *ServiceSpecificCredentialMetadata { + s.ServiceName = &v + return s +} + +// SetServiceSpecificCredentialId sets the ServiceSpecificCredentialId field's value. +func (s *ServiceSpecificCredentialMetadata) SetServiceSpecificCredentialId(v string) *ServiceSpecificCredentialMetadata { + s.ServiceSpecificCredentialId = &v + return s +} + +// SetServiceUserName sets the ServiceUserName field's value. +func (s *ServiceSpecificCredentialMetadata) SetServiceUserName(v string) *ServiceSpecificCredentialMetadata { + s.ServiceUserName = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ServiceSpecificCredentialMetadata) SetStatus(v string) *ServiceSpecificCredentialMetadata { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *ServiceSpecificCredentialMetadata) SetUserName(v string) *ServiceSpecificCredentialMetadata { + s.UserName = &v + return s +} + +type SetDefaultPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy whose default version you + // want to set. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The version of the policy to set as the default (operative) version. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + // + // VersionId is a required field + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetDefaultPolicyVersionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetDefaultPolicyVersionInput"} + if s.PolicyArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicyArn", 20)) + } + if s.VersionId == nil { + invalidParams.Add(request.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyArn sets the PolicyArn field's value. +func (s *SetDefaultPolicyVersionInput) SetPolicyArn(v string) *SetDefaultPolicyVersionInput { + s.PolicyArn = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *SetDefaultPolicyVersionInput) SetVersionId(v string) *SetDefaultPolicyVersionInput { + s.VersionId = &v + return s +} + +type SetDefaultPolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetDefaultPolicyVersionOutput) GoString() string { + return s.String() +} + +type SetSecurityTokenServicePreferencesInput struct { + _ struct{} `type:"structure"` + + // The version of the global endpoint token. Version 1 tokens are valid only + // in AWS Regions that are available by default. These tokens do not work in + // manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens + // are valid in all Regions. However, version 2 tokens are longer and might + // affect systems where you temporarily store tokens. + // + // For information, see Activating and Deactivating STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + // + // GlobalEndpointTokenVersion is a required field + GlobalEndpointTokenVersion *string `type:"string" required:"true" enum:"globalEndpointTokenVersion"` +} + +// String returns the string representation +func (s SetSecurityTokenServicePreferencesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSecurityTokenServicePreferencesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetSecurityTokenServicePreferencesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetSecurityTokenServicePreferencesInput"} + if s.GlobalEndpointTokenVersion == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalEndpointTokenVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGlobalEndpointTokenVersion sets the GlobalEndpointTokenVersion field's value. +func (s *SetSecurityTokenServicePreferencesInput) SetGlobalEndpointTokenVersion(v string) *SetSecurityTokenServicePreferencesInput { + s.GlobalEndpointTokenVersion = &v + return s +} + +type SetSecurityTokenServicePreferencesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetSecurityTokenServicePreferencesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSecurityTokenServicePreferencesOutput) GoString() string { + return s.String() +} + +// Contains information about an X.509 signing certificate. +// +// This data type is used as a response element in the UploadSigningCertificate +// and ListSigningCertificates operations. +type SigningCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + // + // CertificateBody is a required field + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The ID for the signing certificate. + // + // CertificateId is a required field + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status of the signing certificate. Active means that the key is valid + // for API calls, while Inactive means it is not. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The date when the signing certificate was uploaded. + UploadDate *time.Time `type:"timestamp"` + + // The name of the user the signing certificate is associated with. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SigningCertificate) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SigningCertificate) GoString() string { + return s.String() +} + +// SetCertificateBody sets the CertificateBody field's value. +func (s *SigningCertificate) SetCertificateBody(v string) *SigningCertificate { + s.CertificateBody = &v + return s +} + +// SetCertificateId sets the CertificateId field's value. +func (s *SigningCertificate) SetCertificateId(v string) *SigningCertificate { + s.CertificateId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *SigningCertificate) SetStatus(v string) *SigningCertificate { + s.Status = &v + return s +} + +// SetUploadDate sets the UploadDate field's value. +func (s *SigningCertificate) SetUploadDate(v time.Time) *SigningCertificate { + s.UploadDate = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *SigningCertificate) SetUserName(v string) *SigningCertificate { + s.UserName = &v + return s +} + +type SimulateCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API operations to evaluate in the simulation. Each operation + // is evaluated against each resource. Each operation must include the service + // identifier, such as iam:CreateUser. This operation does not support using + // wildcards (*) in an action name. + // + // ActionNames is a required field + ActionNames []*string `type:"list" required:"true"` + + // The ARN of the IAM user that you want to use as the simulated caller of the + // API operations. CallerArn is required if you include a ResourcePolicy so + // that the policy's Principal element has a value to use in evaluating the + // policy. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN of + // an assumed role, federated user, or a service principal. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated in one of the simulated IAM permissions + // policies, the corresponding value is supplied. + ContextEntries []*ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A list of policy documents to include in the simulation. Each document is + // specified as a string containing the complete, valid JSON text of an IAM + // policy. Do not include any resource-based policies in this parameter. Any + // resource-based policy must be submitted with the ResourcePolicy parameter. + // The policies cannot be "scope-down" policies, such as you could include in + // a call to GetFederationToken (https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetFederationToken.html) + // or one of the AssumeRole (https://docs.aws.amazon.com/IAM/latest/APIReference/API_AssumeRole.html) + // API operations. In other words, do not use policies designed to restrict + // what a user can do while using the temporary credentials. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyInputList is a required field + PolicyInputList []*string `type:"list" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided, then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + // + // If you include a ResourcePolicy, then it must be applicable to all of the + // resources included in the simulation or you receive an invalid input error. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + ResourceArns []*string `type:"list"` + + // Specifies the type of simulation to run. Different API operations that support + // resource-based policies require different combinations of resources. By specifying + // the type of simulation to run, you enable the policy simulator to enforce + // the presence of the required resources to ensure reliable simulation results. + // If your simulation does not match one of the following scenarios, then you + // can omit this parameter. The following list shows each of the supported scenario + // values and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security-group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network-interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the Amazon EC2 User Guide. + // + // * EC2-Classic-InstanceStore instance, image, security-group + // + // * EC2-Classic-EBS instance, image, security-group, volume + // + // * EC2-VPC-InstanceStore instance, image, security-group, network-interface + // + // * EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, + // subnet + // + // * EC2-VPC-EBS instance, image, security-group, network-interface, volume + // + // * EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, + // subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An ARN representing the AWS account ID that specifies the owner of any simulated + // resource that does not identify its owner in the resource ARN. Examples of + // resource ARNs include an S3 bucket or object. If ResourceOwner is specified, + // it is also used as the account owner of any ResourcePolicy included in the + // simulation. If the ResourceOwner parameter is not specified, then the owner + // of the resources and the resource policy defaults to the account of the identity + // provided in CallerArn. This parameter is required only if you specify a resource-based + // policy and account that owns the resource is different from the account that + // owns the simulated calling user CallerArn. + // + // The ARN for an account uses the following syntax: arn:aws:iam::AWS-account-ID:root. + // For example, to represent the account with the 112233445566 ID, use the following + // ARN: arn:aws:iam::112233445566-ID:root. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulateCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulateCustomPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SimulateCustomPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SimulateCustomPolicyInput"} + if s.ActionNames == nil { + invalidParams.Add(request.NewErrParamRequired("ActionNames")) + } + if s.CallerArn != nil && len(*s.CallerArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerArn", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PolicyInputList == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyInputList")) + } + if s.ResourceHandlingOption != nil && len(*s.ResourceHandlingOption) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceHandlingOption", 1)) + } + if s.ResourceOwner != nil && len(*s.ResourceOwner) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceOwner", 1)) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.ContextEntries != nil { + for i, v := range s.ContextEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContextEntries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionNames sets the ActionNames field's value. +func (s *SimulateCustomPolicyInput) SetActionNames(v []*string) *SimulateCustomPolicyInput { + s.ActionNames = v + return s +} + +// SetCallerArn sets the CallerArn field's value. +func (s *SimulateCustomPolicyInput) SetCallerArn(v string) *SimulateCustomPolicyInput { + s.CallerArn = &v + return s +} + +// SetContextEntries sets the ContextEntries field's value. +func (s *SimulateCustomPolicyInput) SetContextEntries(v []*ContextEntry) *SimulateCustomPolicyInput { + s.ContextEntries = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *SimulateCustomPolicyInput) SetMarker(v string) *SimulateCustomPolicyInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *SimulateCustomPolicyInput) SetMaxItems(v int64) *SimulateCustomPolicyInput { + s.MaxItems = &v + return s +} + +// SetPolicyInputList sets the PolicyInputList field's value. +func (s *SimulateCustomPolicyInput) SetPolicyInputList(v []*string) *SimulateCustomPolicyInput { + s.PolicyInputList = v + return s +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *SimulateCustomPolicyInput) SetResourceArns(v []*string) *SimulateCustomPolicyInput { + s.ResourceArns = v + return s +} + +// SetResourceHandlingOption sets the ResourceHandlingOption field's value. +func (s *SimulateCustomPolicyInput) SetResourceHandlingOption(v string) *SimulateCustomPolicyInput { + s.ResourceHandlingOption = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *SimulateCustomPolicyInput) SetResourceOwner(v string) *SimulateCustomPolicyInput { + s.ResourceOwner = &v + return s +} + +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *SimulateCustomPolicyInput) SetResourcePolicy(v string) *SimulateCustomPolicyInput { + s.ResourcePolicy = &v + return s +} + +// Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy +// request. +type SimulatePolicyResponse struct { + _ struct{} `type:"structure"` + + // The results of the simulation. + EvaluationResults []*EvaluationResult `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s SimulatePolicyResponse) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulatePolicyResponse) GoString() string { + return s.String() +} + +// SetEvaluationResults sets the EvaluationResults field's value. +func (s *SimulatePolicyResponse) SetEvaluationResults(v []*EvaluationResult) *SimulatePolicyResponse { + s.EvaluationResults = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *SimulatePolicyResponse) SetIsTruncated(v bool) *SimulatePolicyResponse { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *SimulatePolicyResponse) SetMarker(v string) *SimulatePolicyResponse { + s.Marker = &v + return s +} + +type SimulatePrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API operations to evaluate in the simulation. Each operation + // is evaluated for each resource. Each operation must include the service identifier, + // such as iam:CreateUser. + // + // ActionNames is a required field + ActionNames []*string `type:"list" required:"true"` + + // The ARN of the IAM user that you want to specify as the simulated caller + // of the API operations. If you do not specify a CallerArn, it defaults to + // the ARN of the user that you specify in PolicySourceArn, if you specified + // a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) + // and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result + // is that you simulate calling the API operations as Bob, as if Bob had David's + // policies. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN of + // an assumed role, federated user, or a service principal. + // + // CallerArn is required if you include a ResourcePolicy and the PolicySourceArn + // is not the ARN for an IAM user. This is required so that the resource-based + // policy's Principal element has a value to use in evaluating the policy. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated in one of the simulated IAM permissions + // policies, the corresponding value is supplied. + ContextEntries []*ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // An optional list of additional policy documents to include in the simulation. + // Each document is specified as a string containing the complete, valid JSON + // text of an IAM policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + PolicyInputList []*string `type:"list"` + + // The Amazon Resource Name (ARN) of a user, group, or role whose policies you + // want to include in the simulation. If you specify a user, group, or role, + // the simulation includes all policies that are associated with that entity. + // If you specify a user, the simulation also includes all policies that are + // attached to any groups the user belongs to. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicySourceArn is a required field + PolicySourceArn *string `min:"20" type:"string" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided, then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + ResourceArns []*string `type:"list"` + + // Specifies the type of simulation to run. Different API operations that support + // resource-based policies require different combinations of resources. By specifying + // the type of simulation to run, you enable the policy simulator to enforce + // the presence of the required resources to ensure reliable simulation results. + // If your simulation does not match one of the following scenarios, then you + // can omit this parameter. The following list shows each of the supported scenario + // values and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the Amazon EC2 User Guide. + // + // * EC2-Classic-InstanceStore instance, image, security group + // + // * EC2-Classic-EBS instance, image, security group, volume + // + // * EC2-VPC-InstanceStore instance, image, security group, network interface + // + // * EC2-VPC-InstanceStore-Subnet instance, image, security group, network + // interface, subnet + // + // * EC2-VPC-EBS instance, image, security group, network interface, volume + // + // * EC2-VPC-EBS-Subnet instance, image, security group, network interface, + // subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An AWS account ID that specifies the owner of any simulated resource that + // does not identify its owner in the resource ARN. Examples of resource ARNs + // include an S3 bucket or object. If ResourceOwner is specified, it is also + // used as the account owner of any ResourcePolicy included in the simulation. + // If the ResourceOwner parameter is not specified, then the owner of the resources + // and the resource policy defaults to the account of the identity provided + // in CallerArn. This parameter is required only if you specify a resource-based + // policy and account that owns the resource is different from the account that + // owns the simulated calling user CallerArn. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulatePrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SimulatePrincipalPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SimulatePrincipalPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SimulatePrincipalPolicyInput"} + if s.ActionNames == nil { + invalidParams.Add(request.NewErrParamRequired("ActionNames")) + } + if s.CallerArn != nil && len(*s.CallerArn) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CallerArn", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxItems", 1)) + } + if s.PolicySourceArn == nil { + invalidParams.Add(request.NewErrParamRequired("PolicySourceArn")) + } + if s.PolicySourceArn != nil && len(*s.PolicySourceArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PolicySourceArn", 20)) + } + if s.ResourceHandlingOption != nil && len(*s.ResourceHandlingOption) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceHandlingOption", 1)) + } + if s.ResourceOwner != nil && len(*s.ResourceOwner) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceOwner", 1)) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.ContextEntries != nil { + for i, v := range s.ContextEntries { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContextEntries", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetActionNames sets the ActionNames field's value. +func (s *SimulatePrincipalPolicyInput) SetActionNames(v []*string) *SimulatePrincipalPolicyInput { + s.ActionNames = v + return s +} + +// SetCallerArn sets the CallerArn field's value. +func (s *SimulatePrincipalPolicyInput) SetCallerArn(v string) *SimulatePrincipalPolicyInput { + s.CallerArn = &v + return s +} + +// SetContextEntries sets the ContextEntries field's value. +func (s *SimulatePrincipalPolicyInput) SetContextEntries(v []*ContextEntry) *SimulatePrincipalPolicyInput { + s.ContextEntries = v + return s +} + +// SetMarker sets the Marker field's value. +func (s *SimulatePrincipalPolicyInput) SetMarker(v string) *SimulatePrincipalPolicyInput { + s.Marker = &v + return s +} + +// SetMaxItems sets the MaxItems field's value. +func (s *SimulatePrincipalPolicyInput) SetMaxItems(v int64) *SimulatePrincipalPolicyInput { + s.MaxItems = &v + return s +} + +// SetPolicyInputList sets the PolicyInputList field's value. +func (s *SimulatePrincipalPolicyInput) SetPolicyInputList(v []*string) *SimulatePrincipalPolicyInput { + s.PolicyInputList = v + return s +} + +// SetPolicySourceArn sets the PolicySourceArn field's value. +func (s *SimulatePrincipalPolicyInput) SetPolicySourceArn(v string) *SimulatePrincipalPolicyInput { + s.PolicySourceArn = &v + return s +} + +// SetResourceArns sets the ResourceArns field's value. +func (s *SimulatePrincipalPolicyInput) SetResourceArns(v []*string) *SimulatePrincipalPolicyInput { + s.ResourceArns = v + return s +} + +// SetResourceHandlingOption sets the ResourceHandlingOption field's value. +func (s *SimulatePrincipalPolicyInput) SetResourceHandlingOption(v string) *SimulatePrincipalPolicyInput { + s.ResourceHandlingOption = &v + return s +} + +// SetResourceOwner sets the ResourceOwner field's value. +func (s *SimulatePrincipalPolicyInput) SetResourceOwner(v string) *SimulatePrincipalPolicyInput { + s.ResourceOwner = &v + return s +} + +// SetResourcePolicy sets the ResourcePolicy field's value. +func (s *SimulatePrincipalPolicyInput) SetResourcePolicy(v string) *SimulatePrincipalPolicyInput { + s.ResourcePolicy = &v + return s +} + +// Contains a reference to a Statement element in a policy document that determines +// the result of the simulation. +// +// This data type is used by the MatchedStatements member of the EvaluationResult +// type. +type Statement struct { + _ struct{} `type:"structure"` + + // The row and column of the end of a Statement in an IAM policy. + EndPosition *Position `type:"structure"` + + // The identifier of the policy that was provided as an input. + SourcePolicyId *string `type:"string"` + + // The type of the policy. + SourcePolicyType *string `type:"string" enum:"PolicySourceType"` + + // The row and column of the beginning of the Statement in an IAM policy. + StartPosition *Position `type:"structure"` +} + +// String returns the string representation +func (s Statement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Statement) GoString() string { + return s.String() +} + +// SetEndPosition sets the EndPosition field's value. +func (s *Statement) SetEndPosition(v *Position) *Statement { + s.EndPosition = v + return s +} + +// SetSourcePolicyId sets the SourcePolicyId field's value. +func (s *Statement) SetSourcePolicyId(v string) *Statement { + s.SourcePolicyId = &v + return s +} + +// SetSourcePolicyType sets the SourcePolicyType field's value. +func (s *Statement) SetSourcePolicyType(v string) *Statement { + s.SourcePolicyType = &v + return s +} + +// SetStartPosition sets the StartPosition field's value. +func (s *Statement) SetStartPosition(v *Position) *Statement { + s.StartPosition = v + return s +} + +// A structure that represents user-provided metadata that can be associated +// with a resource such as an IAM user or role. For more information about tagging, +// see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +type Tag struct { + _ struct{} `type:"structure"` + + // The key name that can be used to look up or retrieve the associated value. + // For example, Department or Cost Center are common choices. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value associated with this tag. For example, tags with a key name of + // Department could have values such as Human Resources, Accounting, and Support. + // Tags with a key name of Cost Center might have values that consist of the + // number associated with the different cost centers in your company. Typically, + // many resources have tags with the same key name but with different values. + // + // AWS always interprets the tag Value as a single string. If you need to store + // an array, you can store comma-separated values in the string. However, you + // must interpret the value in your code. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +type TagRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role that you want to add tags to. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` + + // The list of tags that you want to attach to the role. Each tag consists of + // a key name and an associated value. You can specify this with a JSON string. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleName sets the RoleName field's value. +func (s *TagRoleInput) SetRoleName(v string) *TagRoleInput { + s.RoleName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *TagRoleInput) SetTags(v []*Tag) *TagRoleInput { + s.Tags = v + return s +} + +type TagRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagRoleOutput) GoString() string { + return s.String() +} + +type TagUserInput struct { + _ struct{} `type:"structure"` + + // The list of tags that you want to attach to the user. Each tag consists of + // a key name and an associated value. + // + // Tags is a required field + Tags []*Tag `type:"list" required:"true"` + + // The name of the user that you want to add tags to. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TagUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagUserInput"} + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTags sets the Tags field's value. +func (s *TagUserInput) SetTags(v []*Tag) *TagUserInput { + s.Tags = v + return s +} + +// SetUserName sets the UserName field's value. +func (s *TagUserInput) SetUserName(v string) *TagUserInput { + s.UserName = &v + return s +} + +type TagUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagUserOutput) GoString() string { + return s.String() +} + +type UntagRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM role from which you want to remove tags. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` + + // A list of key names as a simple array of strings. The tags with matching + // keys are removed from the specified role. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagRoleInput"} + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRoleName sets the RoleName field's value. +func (s *UntagRoleInput) SetRoleName(v string) *UntagRoleInput { + s.RoleName = &v + return s +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagRoleInput) SetTagKeys(v []*string) *UntagRoleInput { + s.TagKeys = v + return s +} + +type UntagRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagRoleOutput) GoString() string { + return s.String() +} + +type UntagUserInput struct { + _ struct{} `type:"structure"` + + // A list of key names as a simple array of strings. The tags with matching + // keys are removed from the specified user. + // + // TagKeys is a required field + TagKeys []*string `type:"list" required:"true"` + + // The name of the IAM user from which you want to remove tags. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UntagUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagUserInput"} + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagKeys sets the TagKeys field's value. +func (s *UntagUserInput) SetTagKeys(v []*string) *UntagUserInput { + s.TagKeys = v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UntagUserInput) SetUserName(v string) *UntagUserInput { + s.UserName = &v + return s +} + +type UntagUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagUserOutput) GoString() string { + return s.String() +} + +type UpdateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID of the secret access key you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The status you want to assign to the secret access key. Active means that + // the key can be used for API calls to AWS, while Inactive means that the key + // cannot be used. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the user whose key you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccessKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAccessKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAccessKeyInput"} + if s.AccessKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *UpdateAccessKeyInput) SetAccessKeyId(v string) *UpdateAccessKeyInput { + s.AccessKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateAccessKeyInput) SetStatus(v string) *UpdateAccessKeyInput { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UpdateAccessKeyInput) SetUserName(v string) *UpdateAccessKeyInput { + s.UserName = &v + return s +} + +type UpdateAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccessKeyOutput) GoString() string { + return s.String() +} + +type UpdateAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` + + // Allows all IAM users in your account to use the AWS Management Console to + // change their own passwords. For more information, see Letting IAM Users Change + // Their Own Passwords (https://docs.aws.amazon.com/IAM/latest/UserGuide/HowToPwdIAMUser.html) + // in the IAM User Guide. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that IAM users in the account do + // not automatically have permissions to change their own password. + AllowUsersToChangePassword *bool `type:"boolean"` + + // Prevents IAM users from setting a new password after their password has expired. + // The IAM user cannot be accessed until an administrator resets the password. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that IAM users can change their + // passwords after they expire and continue to sign in as the user. + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of 0. The result is that IAM user passwords never expire. + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // The minimum number of characters allowed in an IAM user password. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of 6. + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of 0. The result is that IAM users are not prevented from + // reusing previous passwords. + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether IAM user passwords must contain at least one lowercase + // character from the ISO basic Latin alphabet (a to z). + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that passwords do not require at + // least one lowercase character. + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one numeric character + // (0 to 9). + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that passwords do not require at + // least one numeric character. + RequireNumbers *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one of the following + // non-alphanumeric characters: + // + // ! @ # $ % ^ & * ( ) _ + - = [ ] { } | ' + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that passwords do not require at + // least one symbol character. + RequireSymbols *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one uppercase + // character from the ISO basic Latin alphabet (A to Z). + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that passwords do not require at + // least one uppercase character. + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountPasswordPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAccountPasswordPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAccountPasswordPolicyInput"} + if s.MaxPasswordAge != nil && *s.MaxPasswordAge < 1 { + invalidParams.Add(request.NewErrParamMinValue("MaxPasswordAge", 1)) + } + if s.MinimumPasswordLength != nil && *s.MinimumPasswordLength < 6 { + invalidParams.Add(request.NewErrParamMinValue("MinimumPasswordLength", 6)) + } + if s.PasswordReusePrevention != nil && *s.PasswordReusePrevention < 1 { + invalidParams.Add(request.NewErrParamMinValue("PasswordReusePrevention", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowUsersToChangePassword sets the AllowUsersToChangePassword field's value. +func (s *UpdateAccountPasswordPolicyInput) SetAllowUsersToChangePassword(v bool) *UpdateAccountPasswordPolicyInput { + s.AllowUsersToChangePassword = &v + return s +} + +// SetHardExpiry sets the HardExpiry field's value. +func (s *UpdateAccountPasswordPolicyInput) SetHardExpiry(v bool) *UpdateAccountPasswordPolicyInput { + s.HardExpiry = &v + return s +} + +// SetMaxPasswordAge sets the MaxPasswordAge field's value. +func (s *UpdateAccountPasswordPolicyInput) SetMaxPasswordAge(v int64) *UpdateAccountPasswordPolicyInput { + s.MaxPasswordAge = &v + return s +} + +// SetMinimumPasswordLength sets the MinimumPasswordLength field's value. +func (s *UpdateAccountPasswordPolicyInput) SetMinimumPasswordLength(v int64) *UpdateAccountPasswordPolicyInput { + s.MinimumPasswordLength = &v + return s +} + +// SetPasswordReusePrevention sets the PasswordReusePrevention field's value. +func (s *UpdateAccountPasswordPolicyInput) SetPasswordReusePrevention(v int64) *UpdateAccountPasswordPolicyInput { + s.PasswordReusePrevention = &v + return s +} + +// SetRequireLowercaseCharacters sets the RequireLowercaseCharacters field's value. +func (s *UpdateAccountPasswordPolicyInput) SetRequireLowercaseCharacters(v bool) *UpdateAccountPasswordPolicyInput { + s.RequireLowercaseCharacters = &v + return s +} + +// SetRequireNumbers sets the RequireNumbers field's value. +func (s *UpdateAccountPasswordPolicyInput) SetRequireNumbers(v bool) *UpdateAccountPasswordPolicyInput { + s.RequireNumbers = &v + return s +} + +// SetRequireSymbols sets the RequireSymbols field's value. +func (s *UpdateAccountPasswordPolicyInput) SetRequireSymbols(v bool) *UpdateAccountPasswordPolicyInput { + s.RequireSymbols = &v + return s +} + +// SetRequireUppercaseCharacters sets the RequireUppercaseCharacters field's value. +func (s *UpdateAccountPasswordPolicyInput) SetRequireUppercaseCharacters(v bool) *UpdateAccountPasswordPolicyInput { + s.RequireUppercaseCharacters = &v + return s +} + +type UpdateAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAccountPasswordPolicyOutput) GoString() string { + return s.String() +} + +type UpdateAssumeRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy that grants an entity permission to assume the role. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the role to update with the new policy. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssumeRolePolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAssumeRolePolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateAssumeRolePolicyInput"} + if s.PolicyDocument == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyDocument", 1)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPolicyDocument sets the PolicyDocument field's value. +func (s *UpdateAssumeRolePolicyInput) SetPolicyDocument(v string) *UpdateAssumeRolePolicyInput { + s.PolicyDocument = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *UpdateAssumeRolePolicyInput) SetRoleName(v string) *UpdateAssumeRolePolicyInput { + s.RoleName = &v + return s +} + +type UpdateAssumeRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateAssumeRolePolicyOutput) GoString() string { + return s.String() +} + +type UpdateGroupInput struct { + _ struct{} `type:"structure"` + + // Name of the IAM group to update. If you're changing the name of the group, + // this is the original name. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // New name for the IAM group. Only include this if changing the group's name. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + NewGroupName *string `min:"1" type:"string"` + + // New path for the IAM group. Only include this if changing the group's path. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + NewPath *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateGroupInput"} + if s.GroupName == nil { + invalidParams.Add(request.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("GroupName", 1)) + } + if s.NewGroupName != nil && len(*s.NewGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewGroupName", 1)) + } + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewPath", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGroupName sets the GroupName field's value. +func (s *UpdateGroupInput) SetGroupName(v string) *UpdateGroupInput { + s.GroupName = &v + return s +} + +// SetNewGroupName sets the NewGroupName field's value. +func (s *UpdateGroupInput) SetNewGroupName(v string) *UpdateGroupInput { + s.NewGroupName = &v + return s +} + +// SetNewPath sets the NewPath field's value. +func (s *UpdateGroupInput) SetNewPath(v string) *UpdateGroupInput { + s.NewPath = &v + return s +} + +type UpdateGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateGroupOutput) GoString() string { + return s.String() +} + +type UpdateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the specified IAM user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // However, the format can be further restricted by the account administrator + // by setting a password policy on the AWS account. For more information, see + // UpdateAccountPasswordPolicy. + Password *string `min:"1" type:"string" sensitive:"true"` + + // Allows this new password to be used only once by requiring the specified + // IAM user to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user whose password you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLoginProfileInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLoginProfileInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateLoginProfileInput"} + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Password", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPassword sets the Password field's value. +func (s *UpdateLoginProfileInput) SetPassword(v string) *UpdateLoginProfileInput { + s.Password = &v + return s +} + +// SetPasswordResetRequired sets the PasswordResetRequired field's value. +func (s *UpdateLoginProfileInput) SetPasswordResetRequired(v bool) *UpdateLoginProfileInput { + s.PasswordResetRequired = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UpdateLoginProfileInput) SetUserName(v string) *UpdateLoginProfileInput { + s.UserName = &v + return s +} + +type UpdateLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateLoginProfileOutput) GoString() string { + return s.String() +} + +type UpdateOpenIDConnectProviderThumbprintInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for + // which you want to update the thumbprint. You can get a list of OIDC provider + // ARNs by using the ListOpenIDConnectProviders operation. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + // + // ThumbprintList is a required field + ThumbprintList []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateOpenIDConnectProviderThumbprintInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateOpenIDConnectProviderThumbprintInput"} + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + if s.ThumbprintList == nil { + invalidParams.Add(request.NewErrParamRequired("ThumbprintList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOpenIDConnectProviderArn sets the OpenIDConnectProviderArn field's value. +func (s *UpdateOpenIDConnectProviderThumbprintInput) SetOpenIDConnectProviderArn(v string) *UpdateOpenIDConnectProviderThumbprintInput { + s.OpenIDConnectProviderArn = &v + return s +} + +// SetThumbprintList sets the ThumbprintList field's value. +func (s *UpdateOpenIDConnectProviderThumbprintInput) SetThumbprintList(v []*string) *UpdateOpenIDConnectProviderThumbprintInput { + s.ThumbprintList = v + return s +} + +type UpdateOpenIDConnectProviderThumbprintOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) GoString() string { + return s.String() +} + +type UpdateRoleDescriptionInput struct { + _ struct{} `type:"structure"` + + // The new description that you want to apply to the specified role. + // + // Description is a required field + Description *string `type:"string" required:"true"` + + // The name of the role that you want to modify. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRoleDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRoleDescriptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRoleDescriptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRoleDescriptionInput"} + if s.Description == nil { + invalidParams.Add(request.NewErrParamRequired("Description")) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateRoleDescriptionInput) SetDescription(v string) *UpdateRoleDescriptionInput { + s.Description = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *UpdateRoleDescriptionInput) SetRoleName(v string) *UpdateRoleDescriptionInput { + s.RoleName = &v + return s +} + +type UpdateRoleDescriptionOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains details about the modified role. + Role *Role `type:"structure"` +} + +// String returns the string representation +func (s UpdateRoleDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRoleDescriptionOutput) GoString() string { + return s.String() +} + +// SetRole sets the Role field's value. +func (s *UpdateRoleDescriptionOutput) SetRole(v *Role) *UpdateRoleDescriptionOutput { + s.Role = v + return s +} + +type UpdateRoleInput struct { + _ struct{} `type:"structure"` + + // The new description that you want to apply to the specified role. + Description *string `type:"string"` + + // The maximum session duration (in seconds) that you want to set for the specified + // role. If you do not specify a value for this setting, the default maximum + // of one hour is applied. This setting can have a value from 1 hour to 12 hours. + // + // Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds + // API parameter or the duration-seconds CLI parameter to request a longer session. + // The MaxSessionDuration setting determines the maximum duration that can be + // requested using the DurationSeconds parameter. If users don't specify a value + // for the DurationSeconds parameter, their security credentials are valid for + // one hour by default. This applies when you use the AssumeRole* API operations + // or the assume-role* CLI operations but does not apply when you use those + // operations to create a console URL. For more information, see Using IAM Roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the + // IAM User Guide. + MaxSessionDuration *int64 `min:"3600" type:"integer"` + + // The name of the role that you want to modify. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateRoleInput"} + if s.MaxSessionDuration != nil && *s.MaxSessionDuration < 3600 { + invalidParams.Add(request.NewErrParamMinValue("MaxSessionDuration", 3600)) + } + if s.RoleName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDescription sets the Description field's value. +func (s *UpdateRoleInput) SetDescription(v string) *UpdateRoleInput { + s.Description = &v + return s +} + +// SetMaxSessionDuration sets the MaxSessionDuration field's value. +func (s *UpdateRoleInput) SetMaxSessionDuration(v int64) *UpdateRoleInput { + s.MaxSessionDuration = &v + return s +} + +// SetRoleName sets the RoleName field's value. +func (s *UpdateRoleInput) SetRoleName(v string) *UpdateRoleInput { + s.RoleName = &v + return s +} + +type UpdateRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateRoleOutput) GoString() string { + return s.String() +} + +type UpdateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + // + // SAMLMetadataDocument is a required field + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the SAML provider to update. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // SAMLProviderArn is a required field + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSAMLProviderInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSAMLProviderInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSAMLProviderInput"} + if s.SAMLMetadataDocument == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLMetadataDocument")) + } + if s.SAMLMetadataDocument != nil && len(*s.SAMLMetadataDocument) < 1000 { + invalidParams.Add(request.NewErrParamMinLen("SAMLMetadataDocument", 1000)) + } + if s.SAMLProviderArn == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSAMLMetadataDocument sets the SAMLMetadataDocument field's value. +func (s *UpdateSAMLProviderInput) SetSAMLMetadataDocument(v string) *UpdateSAMLProviderInput { + s.SAMLMetadataDocument = &v + return s +} + +// SetSAMLProviderArn sets the SAMLProviderArn field's value. +func (s *UpdateSAMLProviderInput) SetSAMLProviderArn(v string) *UpdateSAMLProviderInput { + s.SAMLProviderArn = &v + return s +} + +// Contains the response to a successful UpdateSAMLProvider request. +type UpdateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider that was updated. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSAMLProviderOutput) GoString() string { + return s.String() +} + +// SetSAMLProviderArn sets the SAMLProviderArn field's value. +func (s *UpdateSAMLProviderOutput) SetSAMLProviderArn(v string) *UpdateSAMLProviderOutput { + s.SAMLProviderArn = &v + return s +} + +type UpdateSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status to assign to the SSH public key. Active means that the key can + // be used for authentication with an AWS CodeCommit repository. Inactive means + // that the key cannot be used. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user associated with the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSSHPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSSHPublicKeyInput"} + if s.SSHPublicKeyId == nil { + invalidParams.Add(request.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSHPublicKeyId sets the SSHPublicKeyId field's value. +func (s *UpdateSSHPublicKeyInput) SetSSHPublicKeyId(v string) *UpdateSSHPublicKeyInput { + s.SSHPublicKeyId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateSSHPublicKeyInput) SetStatus(v string) *UpdateSSHPublicKeyInput { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UpdateSSHPublicKeyInput) SetUserName(v string) *UpdateSSHPublicKeyInput { + s.UserName = &v + return s +} + +type UpdateSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSSHPublicKeyOutput) GoString() string { + return s.String() +} + +type UpdateServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The new path for the server certificate. Include this only if you are updating + // the server certificate's path. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + NewPath *string `min:"1" type:"string"` + + // The new name for the server certificate. Include this only if you are updating + // the server certificate's name. The name of the certificate cannot contain + // any spaces. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + NewServerCertificateName *string `min:"1" type:"string"` + + // The name of the server certificate that you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServerCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServerCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateServerCertificateInput"} + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewPath", 1)) + } + if s.NewServerCertificateName != nil && len(*s.NewServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewServerCertificateName", 1)) + } + if s.ServerCertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNewPath sets the NewPath field's value. +func (s *UpdateServerCertificateInput) SetNewPath(v string) *UpdateServerCertificateInput { + s.NewPath = &v + return s +} + +// SetNewServerCertificateName sets the NewServerCertificateName field's value. +func (s *UpdateServerCertificateInput) SetNewServerCertificateName(v string) *UpdateServerCertificateInput { + s.NewServerCertificateName = &v + return s +} + +// SetServerCertificateName sets the ServerCertificateName field's value. +func (s *UpdateServerCertificateInput) SetServerCertificateName(v string) *UpdateServerCertificateInput { + s.ServerCertificateName = &v + return s +} + +type UpdateServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServerCertificateOutput) GoString() string { + return s.String() +} + +type UpdateServiceSpecificCredentialInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the service-specific credential. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The status to be assigned to the service-specific credential. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user associated with the service-specific credential. + // If you do not specify this value, then the operation assumes the user whose + // credentials are used to call the operation. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateServiceSpecificCredentialInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceSpecificCredentialInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServiceSpecificCredentialInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateServiceSpecificCredentialInput"} + if s.ServiceSpecificCredentialId == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceSpecificCredentialId")) + } + if s.ServiceSpecificCredentialId != nil && len(*s.ServiceSpecificCredentialId) < 20 { + invalidParams.Add(request.NewErrParamMinLen("ServiceSpecificCredentialId", 20)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetServiceSpecificCredentialId sets the ServiceSpecificCredentialId field's value. +func (s *UpdateServiceSpecificCredentialInput) SetServiceSpecificCredentialId(v string) *UpdateServiceSpecificCredentialInput { + s.ServiceSpecificCredentialId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateServiceSpecificCredentialInput) SetStatus(v string) *UpdateServiceSpecificCredentialInput { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UpdateServiceSpecificCredentialInput) SetUserName(v string) *UpdateServiceSpecificCredentialInput { + s.UserName = &v + return s +} + +type UpdateServiceSpecificCredentialOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateServiceSpecificCredentialOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateServiceSpecificCredentialOutput) GoString() string { + return s.String() +} + +type UpdateSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // CertificateId is a required field + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status you want to assign to the certificate. Active means that the certificate + // can be used for API calls to AWS Inactive means that the certificate cannot + // be used. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"statusType"` + + // The name of the IAM user the signing certificate belongs to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSigningCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSigningCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateSigningCertificateInput"} + if s.CertificateId == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 24 { + invalidParams.Add(request.NewErrParamMinLen("CertificateId", 24)) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateId sets the CertificateId field's value. +func (s *UpdateSigningCertificateInput) SetCertificateId(v string) *UpdateSigningCertificateInput { + s.CertificateId = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *UpdateSigningCertificateInput) SetStatus(v string) *UpdateSigningCertificateInput { + s.Status = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UpdateSigningCertificateInput) SetUserName(v string) *UpdateSigningCertificateInput { + s.UserName = &v + return s +} + +type UpdateSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateSigningCertificateOutput) GoString() string { + return s.String() +} + +type UpdateUserInput struct { + _ struct{} `type:"structure"` + + // New path for the IAM user. Include this parameter only if you're changing + // the user's path. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + NewPath *string `min:"1" type:"string"` + + // New name for the user. Include this parameter only if you're changing the + // user's name. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + NewUserName *string `min:"1" type:"string"` + + // Name of the user to update. If you're changing the name of the user, this + // is the original user name. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateUserInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateUserInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateUserInput"} + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewPath", 1)) + } + if s.NewUserName != nil && len(*s.NewUserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NewUserName", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetNewPath sets the NewPath field's value. +func (s *UpdateUserInput) SetNewPath(v string) *UpdateUserInput { + s.NewPath = &v + return s +} + +// SetNewUserName sets the NewUserName field's value. +func (s *UpdateUserInput) SetNewUserName(v string) *UpdateUserInput { + s.NewUserName = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UpdateUserInput) SetUserName(v string) *UpdateUserInput { + s.UserName = &v + return s +} + +type UpdateUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateUserOutput) GoString() string { + return s.String() +} + +type UploadSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The SSH public key. The public key must be encoded in ssh-rsa format or PEM + // format. The minimum bit-length of the public key is 2048 bits. For example, + // you can generate a 2048-bit key, and the resulting PEM file is 1679 bytes + // long. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // SSHPublicKeyBody is a required field + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The name of the IAM user to associate the SSH public key with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSSHPublicKeyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadSSHPublicKeyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadSSHPublicKeyInput"} + if s.SSHPublicKeyBody == nil { + invalidParams.Add(request.NewErrParamRequired("SSHPublicKeyBody")) + } + if s.SSHPublicKeyBody != nil && len(*s.SSHPublicKeyBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SSHPublicKeyBody", 1)) + } + if s.UserName == nil { + invalidParams.Add(request.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSHPublicKeyBody sets the SSHPublicKeyBody field's value. +func (s *UploadSSHPublicKeyInput) SetSSHPublicKeyBody(v string) *UploadSSHPublicKeyInput { + s.SSHPublicKeyBody = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UploadSSHPublicKeyInput) SetUserName(v string) *UploadSSHPublicKeyInput { + s.UserName = &v + return s +} + +// Contains the response to a successful UploadSSHPublicKey request. +type UploadSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSSHPublicKeyOutput) GoString() string { + return s.String() +} + +// SetSSHPublicKey sets the SSHPublicKey field's value. +func (s *UploadSSHPublicKeyOutput) SetSSHPublicKey(v *SSHPublicKey) *UploadSSHPublicKeyOutput { + s.SSHPublicKey = v + return s +} + +type UploadServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate in PEM-encoded format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // CertificateBody is a required field + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the certificate chain. This is typically a concatenation + // of the PEM-encoded public key certificates of the chain. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + CertificateChain *string `min:"1" type:"string"` + + // The path for the server certificate. For more information about paths, see + // IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + // + // If you are uploading a server certificate specifically for use with Amazon + // CloudFront distributions, you must specify a path using the path parameter. + // The path must begin with /cloudfront and must include a trailing slash (for + // example, /cloudfront/test/). + Path *string `min:"1" type:"string"` + + // The contents of the private key in PEM-encoded format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PrivateKey is a required field + PrivateKey *string `min:"1" type:"string" required:"true" sensitive:"true"` + + // The name for the server certificate. Do not include the path in this value. + // The name of the certificate cannot contain any spaces. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadServerCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadServerCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadServerCertificateInput"} + if s.CertificateBody == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateBody")) + } + if s.CertificateBody != nil && len(*s.CertificateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateBody", 1)) + } + if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateChain", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Path", 1)) + } + if s.PrivateKey == nil { + invalidParams.Add(request.NewErrParamRequired("PrivateKey")) + } + if s.PrivateKey != nil && len(*s.PrivateKey) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PrivateKey", 1)) + } + if s.ServerCertificateName == nil { + invalidParams.Add(request.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateBody sets the CertificateBody field's value. +func (s *UploadServerCertificateInput) SetCertificateBody(v string) *UploadServerCertificateInput { + s.CertificateBody = &v + return s +} + +// SetCertificateChain sets the CertificateChain field's value. +func (s *UploadServerCertificateInput) SetCertificateChain(v string) *UploadServerCertificateInput { + s.CertificateChain = &v + return s +} + +// SetPath sets the Path field's value. +func (s *UploadServerCertificateInput) SetPath(v string) *UploadServerCertificateInput { + s.Path = &v + return s +} + +// SetPrivateKey sets the PrivateKey field's value. +func (s *UploadServerCertificateInput) SetPrivateKey(v string) *UploadServerCertificateInput { + s.PrivateKey = &v + return s +} + +// SetServerCertificateName sets the ServerCertificateName field's value. +func (s *UploadServerCertificateInput) SetServerCertificateName(v string) *UploadServerCertificateInput { + s.ServerCertificateName = &v + return s +} + +// Contains the response to a successful UploadServerCertificate request. +type UploadServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // The meta information of the uploaded server certificate without its certificate + // body, certificate chain, and private key. + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure"` +} + +// String returns the string representation +func (s UploadServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadServerCertificateOutput) GoString() string { + return s.String() +} + +// SetServerCertificateMetadata sets the ServerCertificateMetadata field's value. +func (s *UploadServerCertificateOutput) SetServerCertificateMetadata(v *ServerCertificateMetadata) *UploadServerCertificateOutput { + s.ServerCertificateMetadata = v + return s +} + +type UploadSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // CertificateBody is a required field + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The name of the user the signing certificate is for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UploadSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSigningCertificateInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadSigningCertificateInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadSigningCertificateInput"} + if s.CertificateBody == nil { + invalidParams.Add(request.NewErrParamRequired("CertificateBody")) + } + if s.CertificateBody != nil && len(*s.CertificateBody) < 1 { + invalidParams.Add(request.NewErrParamMinLen("CertificateBody", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCertificateBody sets the CertificateBody field's value. +func (s *UploadSigningCertificateInput) SetCertificateBody(v string) *UploadSigningCertificateInput { + s.CertificateBody = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UploadSigningCertificateInput) SetUserName(v string) *UploadSigningCertificateInput { + s.UserName = &v + return s +} + +// Contains the response to a successful UploadSigningCertificate request. +type UploadSigningCertificateOutput struct { + _ struct{} `type:"structure"` + + // Information about the certificate. + // + // Certificate is a required field + Certificate *SigningCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UploadSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadSigningCertificateOutput) GoString() string { + return s.String() +} + +// SetCertificate sets the Certificate field's value. +func (s *UploadSigningCertificateOutput) SetCertificate(v *SigningCertificate) *UploadSigningCertificateOutput { + s.Certificate = v + return s +} + +// Contains information about an IAM user entity. +// +// This data type is used as a response element in the following operations: +// +// * CreateUser +// +// * GetUser +// +// * ListUsers +type User struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the user. For more information + // about ARNs and how to use ARNs in policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user's password was last used to sign in to an AWS website. For + // a list of AWS websites that capture a user's last sign-in time, see the Credential + // Reports (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) + // topic in the Using IAM guide. If a password is used more than once in a five-minute + // span, only the first use is returned in this field. If the field is null + // (no value), then it indicates that they never signed in with a password. + // This can be because: + // + // * The user never had a password. + // + // * A password exists but has not been used since IAM started tracking this + // information on October 20, 2014. + // + // A null value does not mean that the user never had a password. Also, if the + // user does not currently have a password, but had one in the past, then this + // field contains the date and time the most recent password was used. + // + // This value is returned only in the GetUser and ListUsers operations. + PasswordLastUsed *time.Time `type:"timestamp"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // The ARN of the policy used to set the permissions boundary for the user. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` + + // A list of tags that are associated with the specified user. For more information + // about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // UserId is a required field + UserId *string `min:"16" type:"string" required:"true"` + + // The friendly name identifying the user. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s User) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s User) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *User) SetArn(v string) *User { + s.Arn = &v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *User) SetCreateDate(v time.Time) *User { + s.CreateDate = &v + return s +} + +// SetPasswordLastUsed sets the PasswordLastUsed field's value. +func (s *User) SetPasswordLastUsed(v time.Time) *User { + s.PasswordLastUsed = &v + return s +} + +// SetPath sets the Path field's value. +func (s *User) SetPath(v string) *User { + s.Path = &v + return s +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *User) SetPermissionsBoundary(v *AttachedPermissionsBoundary) *User { + s.PermissionsBoundary = v + return s +} + +// SetTags sets the Tags field's value. +func (s *User) SetTags(v []*Tag) *User { + s.Tags = v + return s +} + +// SetUserId sets the UserId field's value. +func (s *User) SetUserId(v string) *User { + s.UserId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *User) SetUserName(v string) *User { + s.UserName = &v + return s +} + +// Contains information about an IAM user, including all the user's policies +// and all the IAM groups the user is in. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +type UserDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the user. + AttachedManagedPolicies []*AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + CreateDate *time.Time `type:"timestamp"` + + // A list of IAM groups that the user is in. + GroupList []*string `type:"list"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The ARN of the policy used to set the permissions boundary for the user. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` + + // A list of tags that are associated with the specified user. For more information + // about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + Tags []*Tag `type:"list"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + UserId *string `min:"16" type:"string"` + + // The friendly name identifying the user. + UserName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the user. + UserPolicyList []*PolicyDetail `type:"list"` +} + +// String returns the string representation +func (s UserDetail) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UserDetail) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *UserDetail) SetArn(v string) *UserDetail { + s.Arn = &v + return s +} + +// SetAttachedManagedPolicies sets the AttachedManagedPolicies field's value. +func (s *UserDetail) SetAttachedManagedPolicies(v []*AttachedPolicy) *UserDetail { + s.AttachedManagedPolicies = v + return s +} + +// SetCreateDate sets the CreateDate field's value. +func (s *UserDetail) SetCreateDate(v time.Time) *UserDetail { + s.CreateDate = &v + return s +} + +// SetGroupList sets the GroupList field's value. +func (s *UserDetail) SetGroupList(v []*string) *UserDetail { + s.GroupList = v + return s +} + +// SetPath sets the Path field's value. +func (s *UserDetail) SetPath(v string) *UserDetail { + s.Path = &v + return s +} + +// SetPermissionsBoundary sets the PermissionsBoundary field's value. +func (s *UserDetail) SetPermissionsBoundary(v *AttachedPermissionsBoundary) *UserDetail { + s.PermissionsBoundary = v + return s +} + +// SetTags sets the Tags field's value. +func (s *UserDetail) SetTags(v []*Tag) *UserDetail { + s.Tags = v + return s +} + +// SetUserId sets the UserId field's value. +func (s *UserDetail) SetUserId(v string) *UserDetail { + s.UserId = &v + return s +} + +// SetUserName sets the UserName field's value. +func (s *UserDetail) SetUserName(v string) *UserDetail { + s.UserName = &v + return s +} + +// SetUserPolicyList sets the UserPolicyList field's value. +func (s *UserDetail) SetUserPolicyList(v []*PolicyDetail) *UserDetail { + s.UserPolicyList = v + return s +} + +// Contains information about a virtual MFA device. +type VirtualMFADevice struct { + _ struct{} `type:"structure"` + + // The base32 seed defined as specified in RFC3548 (https://tools.ietf.org/html/rfc3548.txt). + // The Base32StringSeed is base64-encoded. + // + // Base32StringSeed is automatically base64 encoded/decoded by the SDK. + Base32StringSeed []byte `type:"blob" sensitive:"true"` + + // The date and time on which the virtual MFA device was enabled. + EnableDate *time.Time `type:"timestamp"` + + // A QR code PNG image that encodes otpauth://totp/$virtualMFADeviceName@$AccountName?secret=$Base32String + // where $virtualMFADeviceName is one of the create call arguments. AccountName + // is the user name if set (otherwise, the account ID otherwise), and Base32String + // is the seed in base32 format. The Base32String value is base64-encoded. + // + // QRCodePNG is automatically base64 encoded/decoded by the SDK. + QRCodePNG []byte `type:"blob" sensitive:"true"` + + // The serial number associated with VirtualMFADevice. + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The IAM user associated with this virtual MFA device. + User *User `type:"structure"` +} + +// String returns the string representation +func (s VirtualMFADevice) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VirtualMFADevice) GoString() string { + return s.String() +} + +// SetBase32StringSeed sets the Base32StringSeed field's value. +func (s *VirtualMFADevice) SetBase32StringSeed(v []byte) *VirtualMFADevice { + s.Base32StringSeed = v + return s +} + +// SetEnableDate sets the EnableDate field's value. +func (s *VirtualMFADevice) SetEnableDate(v time.Time) *VirtualMFADevice { + s.EnableDate = &v + return s +} + +// SetQRCodePNG sets the QRCodePNG field's value. +func (s *VirtualMFADevice) SetQRCodePNG(v []byte) *VirtualMFADevice { + s.QRCodePNG = v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *VirtualMFADevice) SetSerialNumber(v string) *VirtualMFADevice { + s.SerialNumber = &v + return s +} + +// SetUser sets the User field's value. +func (s *VirtualMFADevice) SetUser(v *User) *VirtualMFADevice { + s.User = v + return s +} + +const ( + // ContextKeyTypeEnumString is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumString = "string" + + // ContextKeyTypeEnumStringList is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumStringList = "stringList" + + // ContextKeyTypeEnumNumeric is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumNumeric = "numeric" + + // ContextKeyTypeEnumNumericList is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumNumericList = "numericList" + + // ContextKeyTypeEnumBoolean is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumBoolean = "boolean" + + // ContextKeyTypeEnumBooleanList is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumBooleanList = "booleanList" + + // ContextKeyTypeEnumIp is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumIp = "ip" + + // ContextKeyTypeEnumIpList is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumIpList = "ipList" + + // ContextKeyTypeEnumBinary is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumBinary = "binary" + + // ContextKeyTypeEnumBinaryList is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumBinaryList = "binaryList" + + // ContextKeyTypeEnumDate is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumDate = "date" + + // ContextKeyTypeEnumDateList is a ContextKeyTypeEnum enum value + ContextKeyTypeEnumDateList = "dateList" +) + +const ( + // DeletionTaskStatusTypeSucceeded is a DeletionTaskStatusType enum value + DeletionTaskStatusTypeSucceeded = "SUCCEEDED" + + // DeletionTaskStatusTypeInProgress is a DeletionTaskStatusType enum value + DeletionTaskStatusTypeInProgress = "IN_PROGRESS" + + // DeletionTaskStatusTypeFailed is a DeletionTaskStatusType enum value + DeletionTaskStatusTypeFailed = "FAILED" + + // DeletionTaskStatusTypeNotStarted is a DeletionTaskStatusType enum value + DeletionTaskStatusTypeNotStarted = "NOT_STARTED" +) + +const ( + // EntityTypeUser is a EntityType enum value + EntityTypeUser = "User" + + // EntityTypeRole is a EntityType enum value + EntityTypeRole = "Role" + + // EntityTypeGroup is a EntityType enum value + EntityTypeGroup = "Group" + + // EntityTypeLocalManagedPolicy is a EntityType enum value + EntityTypeLocalManagedPolicy = "LocalManagedPolicy" + + // EntityTypeAwsmanagedPolicy is a EntityType enum value + EntityTypeAwsmanagedPolicy = "AWSManagedPolicy" +) + +const ( + // PermissionsBoundaryAttachmentTypePermissionsBoundaryPolicy is a PermissionsBoundaryAttachmentType enum value + PermissionsBoundaryAttachmentTypePermissionsBoundaryPolicy = "PermissionsBoundaryPolicy" +) + +const ( + // PolicyEvaluationDecisionTypeAllowed is a PolicyEvaluationDecisionType enum value + PolicyEvaluationDecisionTypeAllowed = "allowed" + + // PolicyEvaluationDecisionTypeExplicitDeny is a PolicyEvaluationDecisionType enum value + PolicyEvaluationDecisionTypeExplicitDeny = "explicitDeny" + + // PolicyEvaluationDecisionTypeImplicitDeny is a PolicyEvaluationDecisionType enum value + PolicyEvaluationDecisionTypeImplicitDeny = "implicitDeny" +) + +const ( + // PolicySourceTypeUser is a PolicySourceType enum value + PolicySourceTypeUser = "user" + + // PolicySourceTypeGroup is a PolicySourceType enum value + PolicySourceTypeGroup = "group" + + // PolicySourceTypeRole is a PolicySourceType enum value + PolicySourceTypeRole = "role" + + // PolicySourceTypeAwsManaged is a PolicySourceType enum value + PolicySourceTypeAwsManaged = "aws-managed" + + // PolicySourceTypeUserManaged is a PolicySourceType enum value + PolicySourceTypeUserManaged = "user-managed" + + // PolicySourceTypeResource is a PolicySourceType enum value + PolicySourceTypeResource = "resource" + + // PolicySourceTypeNone is a PolicySourceType enum value + PolicySourceTypeNone = "none" +) + +// The policy usage type that indicates whether the policy is used as a permissions +// policy or as the permissions boundary for an entity. +// +// For more information about permissions boundaries, see Permissions Boundaries +// for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) +// in the IAM User Guide. +const ( + // PolicyUsageTypePermissionsPolicy is a PolicyUsageType enum value + PolicyUsageTypePermissionsPolicy = "PermissionsPolicy" + + // PolicyUsageTypePermissionsBoundary is a PolicyUsageType enum value + PolicyUsageTypePermissionsBoundary = "PermissionsBoundary" +) + +const ( + // ReportFormatTypeTextCsv is a ReportFormatType enum value + ReportFormatTypeTextCsv = "text/csv" +) + +const ( + // ReportStateTypeStarted is a ReportStateType enum value + ReportStateTypeStarted = "STARTED" + + // ReportStateTypeInprogress is a ReportStateType enum value + ReportStateTypeInprogress = "INPROGRESS" + + // ReportStateTypeComplete is a ReportStateType enum value + ReportStateTypeComplete = "COMPLETE" +) + +const ( + // AssignmentStatusTypeAssigned is a assignmentStatusType enum value + AssignmentStatusTypeAssigned = "Assigned" + + // AssignmentStatusTypeUnassigned is a assignmentStatusType enum value + AssignmentStatusTypeUnassigned = "Unassigned" + + // AssignmentStatusTypeAny is a assignmentStatusType enum value + AssignmentStatusTypeAny = "Any" +) + +const ( + // EncodingTypeSsh is a encodingType enum value + EncodingTypeSsh = "SSH" + + // EncodingTypePem is a encodingType enum value + EncodingTypePem = "PEM" +) + +const ( + // GlobalEndpointTokenVersionV1token is a globalEndpointTokenVersion enum value + GlobalEndpointTokenVersionV1token = "v1Token" + + // GlobalEndpointTokenVersionV2token is a globalEndpointTokenVersion enum value + GlobalEndpointTokenVersionV2token = "v2Token" +) + +const ( + // JobStatusTypeInProgress is a jobStatusType enum value + JobStatusTypeInProgress = "IN_PROGRESS" + + // JobStatusTypeCompleted is a jobStatusType enum value + JobStatusTypeCompleted = "COMPLETED" + + // JobStatusTypeFailed is a jobStatusType enum value + JobStatusTypeFailed = "FAILED" +) + +const ( + // PolicyOwnerEntityTypeUser is a policyOwnerEntityType enum value + PolicyOwnerEntityTypeUser = "USER" + + // PolicyOwnerEntityTypeRole is a policyOwnerEntityType enum value + PolicyOwnerEntityTypeRole = "ROLE" + + // PolicyOwnerEntityTypeGroup is a policyOwnerEntityType enum value + PolicyOwnerEntityTypeGroup = "GROUP" +) + +const ( + // PolicyScopeTypeAll is a policyScopeType enum value + PolicyScopeTypeAll = "All" + + // PolicyScopeTypeAws is a policyScopeType enum value + PolicyScopeTypeAws = "AWS" + + // PolicyScopeTypeLocal is a policyScopeType enum value + PolicyScopeTypeLocal = "Local" +) + +const ( + // PolicyTypeInline is a policyType enum value + PolicyTypeInline = "INLINE" + + // PolicyTypeManaged is a policyType enum value + PolicyTypeManaged = "MANAGED" +) + +const ( + // SortKeyTypeServiceNamespaceAscending is a sortKeyType enum value + SortKeyTypeServiceNamespaceAscending = "SERVICE_NAMESPACE_ASCENDING" + + // SortKeyTypeServiceNamespaceDescending is a sortKeyType enum value + SortKeyTypeServiceNamespaceDescending = "SERVICE_NAMESPACE_DESCENDING" + + // SortKeyTypeLastAuthenticatedTimeAscending is a sortKeyType enum value + SortKeyTypeLastAuthenticatedTimeAscending = "LAST_AUTHENTICATED_TIME_ASCENDING" + + // SortKeyTypeLastAuthenticatedTimeDescending is a sortKeyType enum value + SortKeyTypeLastAuthenticatedTimeDescending = "LAST_AUTHENTICATED_TIME_DESCENDING" +) + +const ( + // StatusTypeActive is a statusType enum value + StatusTypeActive = "Active" + + // StatusTypeInactive is a statusType enum value + StatusTypeInactive = "Inactive" +) + +const ( + // SummaryKeyTypeUsers is a summaryKeyType enum value + SummaryKeyTypeUsers = "Users" + + // SummaryKeyTypeUsersQuota is a summaryKeyType enum value + SummaryKeyTypeUsersQuota = "UsersQuota" + + // SummaryKeyTypeGroups is a summaryKeyType enum value + SummaryKeyTypeGroups = "Groups" + + // SummaryKeyTypeGroupsQuota is a summaryKeyType enum value + SummaryKeyTypeGroupsQuota = "GroupsQuota" + + // SummaryKeyTypeServerCertificates is a summaryKeyType enum value + SummaryKeyTypeServerCertificates = "ServerCertificates" + + // SummaryKeyTypeServerCertificatesQuota is a summaryKeyType enum value + SummaryKeyTypeServerCertificatesQuota = "ServerCertificatesQuota" + + // SummaryKeyTypeUserPolicySizeQuota is a summaryKeyType enum value + SummaryKeyTypeUserPolicySizeQuota = "UserPolicySizeQuota" + + // SummaryKeyTypeGroupPolicySizeQuota is a summaryKeyType enum value + SummaryKeyTypeGroupPolicySizeQuota = "GroupPolicySizeQuota" + + // SummaryKeyTypeGroupsPerUserQuota is a summaryKeyType enum value + SummaryKeyTypeGroupsPerUserQuota = "GroupsPerUserQuota" + + // SummaryKeyTypeSigningCertificatesPerUserQuota is a summaryKeyType enum value + SummaryKeyTypeSigningCertificatesPerUserQuota = "SigningCertificatesPerUserQuota" + + // SummaryKeyTypeAccessKeysPerUserQuota is a summaryKeyType enum value + SummaryKeyTypeAccessKeysPerUserQuota = "AccessKeysPerUserQuota" + + // SummaryKeyTypeMfadevices is a summaryKeyType enum value + SummaryKeyTypeMfadevices = "MFADevices" + + // SummaryKeyTypeMfadevicesInUse is a summaryKeyType enum value + SummaryKeyTypeMfadevicesInUse = "MFADevicesInUse" + + // SummaryKeyTypeAccountMfaenabled is a summaryKeyType enum value + SummaryKeyTypeAccountMfaenabled = "AccountMFAEnabled" + + // SummaryKeyTypeAccountAccessKeysPresent is a summaryKeyType enum value + SummaryKeyTypeAccountAccessKeysPresent = "AccountAccessKeysPresent" + + // SummaryKeyTypeAccountSigningCertificatesPresent is a summaryKeyType enum value + SummaryKeyTypeAccountSigningCertificatesPresent = "AccountSigningCertificatesPresent" + + // SummaryKeyTypeAttachedPoliciesPerGroupQuota is a summaryKeyType enum value + SummaryKeyTypeAttachedPoliciesPerGroupQuota = "AttachedPoliciesPerGroupQuota" + + // SummaryKeyTypeAttachedPoliciesPerRoleQuota is a summaryKeyType enum value + SummaryKeyTypeAttachedPoliciesPerRoleQuota = "AttachedPoliciesPerRoleQuota" + + // SummaryKeyTypeAttachedPoliciesPerUserQuota is a summaryKeyType enum value + SummaryKeyTypeAttachedPoliciesPerUserQuota = "AttachedPoliciesPerUserQuota" + + // SummaryKeyTypePolicies is a summaryKeyType enum value + SummaryKeyTypePolicies = "Policies" + + // SummaryKeyTypePoliciesQuota is a summaryKeyType enum value + SummaryKeyTypePoliciesQuota = "PoliciesQuota" + + // SummaryKeyTypePolicySizeQuota is a summaryKeyType enum value + SummaryKeyTypePolicySizeQuota = "PolicySizeQuota" + + // SummaryKeyTypePolicyVersionsInUse is a summaryKeyType enum value + SummaryKeyTypePolicyVersionsInUse = "PolicyVersionsInUse" + + // SummaryKeyTypePolicyVersionsInUseQuota is a summaryKeyType enum value + SummaryKeyTypePolicyVersionsInUseQuota = "PolicyVersionsInUseQuota" + + // SummaryKeyTypeVersionsPerPolicyQuota is a summaryKeyType enum value + SummaryKeyTypeVersionsPerPolicyQuota = "VersionsPerPolicyQuota" + + // SummaryKeyTypeGlobalEndpointTokenVersion is a summaryKeyType enum value + SummaryKeyTypeGlobalEndpointTokenVersion = "GlobalEndpointTokenVersion" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/doc.go b/vendor/github.com/aws/aws-sdk-go/service/iam/doc.go new file mode 100644 index 00000000..0d709cd2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/doc.go @@ -0,0 +1,80 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package iam provides the client and types for making API +// requests to AWS Identity and Access Management. +// +// AWS Identity and Access Management (IAM) is a web service that you can use +// to manage users and user permissions under your AWS account. This guide provides +// descriptions of IAM actions that you can call programmatically. For general +// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/). +// For the user guide for IAM, see Using IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/). +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs +// provide a convenient way to create programmatic access to IAM and AWS. For +// example, the SDKs take care of tasks such as cryptographically signing requests +// (see below), managing errors, and retrying requests automatically. For information +// about the AWS SDKs, including how to download and install them, see the Tools +// for Amazon Web Services (http://aws.amazon.com/tools/) page. +// +// We recommend that you use the AWS SDKs to make programmatic API calls to +// IAM. However, you can also use the IAM Query API to make direct calls to +// the IAM web service. To learn more about the IAM Query API, see Making Query +// Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. IAM supports GET and POST requests for all actions. +// That is, the API does not require you to use GET for some actions and POST +// for others. However, GET requests are subject to the limitation size of a +// URL. Therefore, for operations that require larger sizes, use a POST request. +// +// Signing Requests +// +// Requests must be signed using an access key ID and a secret access key. We +// strongly recommend that you do not use your AWS account access key ID and +// secret access key for everyday work with IAM. You can use the access key +// ID and secret access key for an IAM user or you can use the AWS Security +// Token Service to generate temporary security credentials and use those to +// sign requests. +// +// To sign requests, we recommend that you use Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// If you have an existing application that uses Signature Version 2, you do +// not have to update it to use Signature Version 4. However, some operations +// now require Signature Version 4. The documentation for operations that require +// version 4 indicate this requirement. +// +// Additional Resources +// +// For more information, see the following: +// +// * AWS Security Credentials (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html). +// This topic provides general information about the types of credentials +// used for accessing AWS. +// +// * IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html). +// This topic presents a list of suggestions for using the IAM service to +// help secure your AWS resources. +// +// * Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). +// This set of topics walk you through the process of signing a request using +// an access key ID and secret access key. +// +// See https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08 for more information on this service. +// +// See iam package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/iam/ +// +// Using the Client +// +// To contact AWS Identity and Access Management with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Identity and Access Management client IAM for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/iam/#New +package iam diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go b/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go new file mode 100644 index 00000000..30a85b3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/errors.go @@ -0,0 +1,200 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +const ( + + // ErrCodeConcurrentModificationException for service response error code + // "ConcurrentModification". + // + // The request was rejected because multiple requests to change this object + // were submitted simultaneously. Wait a few minutes and submit your request + // again. + ErrCodeConcurrentModificationException = "ConcurrentModification" + + // ErrCodeCredentialReportExpiredException for service response error code + // "ReportExpired". + // + // The request was rejected because the most recent credential report has expired. + // To generate a new credential report, use GenerateCredentialReport. For more + // information about credential report expiration, see Getting Credential Reports + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) + // in the IAM User Guide. + ErrCodeCredentialReportExpiredException = "ReportExpired" + + // ErrCodeCredentialReportNotPresentException for service response error code + // "ReportNotPresent". + // + // The request was rejected because the credential report does not exist. To + // generate a credential report, use GenerateCredentialReport. + ErrCodeCredentialReportNotPresentException = "ReportNotPresent" + + // ErrCodeCredentialReportNotReadyException for service response error code + // "ReportInProgress". + // + // The request was rejected because the credential report is still being generated. + ErrCodeCredentialReportNotReadyException = "ReportInProgress" + + // ErrCodeDeleteConflictException for service response error code + // "DeleteConflict". + // + // The request was rejected because it attempted to delete a resource that has + // attached subordinate entities. The error message describes these entities. + ErrCodeDeleteConflictException = "DeleteConflict" + + // ErrCodeDuplicateCertificateException for service response error code + // "DuplicateCertificate". + // + // The request was rejected because the same certificate is associated with + // an IAM user in the account. + ErrCodeDuplicateCertificateException = "DuplicateCertificate" + + // ErrCodeDuplicateSSHPublicKeyException for service response error code + // "DuplicateSSHPublicKey". + // + // The request was rejected because the SSH public key is already associated + // with the specified IAM user. + ErrCodeDuplicateSSHPublicKeyException = "DuplicateSSHPublicKey" + + // ErrCodeEntityAlreadyExistsException for service response error code + // "EntityAlreadyExists". + // + // The request was rejected because it attempted to create a resource that already + // exists. + ErrCodeEntityAlreadyExistsException = "EntityAlreadyExists" + + // ErrCodeEntityTemporarilyUnmodifiableException for service response error code + // "EntityTemporarilyUnmodifiable". + // + // The request was rejected because it referenced an entity that is temporarily + // unmodifiable, such as a user name that was deleted and then recreated. The + // error indicates that the request is likely to succeed if you try again after + // waiting several minutes. The error message describes the entity. + ErrCodeEntityTemporarilyUnmodifiableException = "EntityTemporarilyUnmodifiable" + + // ErrCodeInvalidAuthenticationCodeException for service response error code + // "InvalidAuthenticationCode". + // + // The request was rejected because the authentication code was not recognized. + // The error message describes the specific error. + ErrCodeInvalidAuthenticationCodeException = "InvalidAuthenticationCode" + + // ErrCodeInvalidCertificateException for service response error code + // "InvalidCertificate". + // + // The request was rejected because the certificate is invalid. + ErrCodeInvalidCertificateException = "InvalidCertificate" + + // ErrCodeInvalidInputException for service response error code + // "InvalidInput". + // + // The request was rejected because an invalid or out-of-range value was supplied + // for an input parameter. + ErrCodeInvalidInputException = "InvalidInput" + + // ErrCodeInvalidPublicKeyException for service response error code + // "InvalidPublicKey". + // + // The request was rejected because the public key is malformed or otherwise + // invalid. + ErrCodeInvalidPublicKeyException = "InvalidPublicKey" + + // ErrCodeInvalidUserTypeException for service response error code + // "InvalidUserType". + // + // The request was rejected because the type of user for the transaction was + // incorrect. + ErrCodeInvalidUserTypeException = "InvalidUserType" + + // ErrCodeKeyPairMismatchException for service response error code + // "KeyPairMismatch". + // + // The request was rejected because the public key certificate and the private + // key do not match. + ErrCodeKeyPairMismatchException = "KeyPairMismatch" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceeded". + // + // The request was rejected because it attempted to create resources beyond + // the current AWS account limits. The error message describes the limit exceeded. + ErrCodeLimitExceededException = "LimitExceeded" + + // ErrCodeMalformedCertificateException for service response error code + // "MalformedCertificate". + // + // The request was rejected because the certificate was malformed or expired. + // The error message describes the specific error. + ErrCodeMalformedCertificateException = "MalformedCertificate" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodeNoSuchEntityException for service response error code + // "NoSuchEntity". + // + // The request was rejected because it referenced a resource entity that does + // not exist. The error message describes the resource. + ErrCodeNoSuchEntityException = "NoSuchEntity" + + // ErrCodePasswordPolicyViolationException for service response error code + // "PasswordPolicyViolation". + // + // The request was rejected because the provided password did not meet the requirements + // imposed by the account password policy. + ErrCodePasswordPolicyViolationException = "PasswordPolicyViolation" + + // ErrCodePolicyEvaluationException for service response error code + // "PolicyEvaluation". + // + // The request failed because a provided policy could not be successfully evaluated. + // An additional detailed message indicates the source of the failure. + ErrCodePolicyEvaluationException = "PolicyEvaluation" + + // ErrCodePolicyNotAttachableException for service response error code + // "PolicyNotAttachable". + // + // The request failed because AWS service role policies can only be attached + // to the service-linked role for that service. + ErrCodePolicyNotAttachableException = "PolicyNotAttachable" + + // ErrCodeReportGenerationLimitExceededException for service response error code + // "ReportGenerationLimitExceeded". + // + // The request failed because the maximum number of concurrent requests for + // this account are already running. + ErrCodeReportGenerationLimitExceededException = "ReportGenerationLimitExceeded" + + // ErrCodeServiceFailureException for service response error code + // "ServiceFailure". + // + // The request processing has failed because of an unknown error, exception + // or failure. + ErrCodeServiceFailureException = "ServiceFailure" + + // ErrCodeServiceNotSupportedException for service response error code + // "NotSupportedService". + // + // The specified service does not support service-specific credentials. + ErrCodeServiceNotSupportedException = "NotSupportedService" + + // ErrCodeUnmodifiableEntityException for service response error code + // "UnmodifiableEntity". + // + // The request was rejected because only the service that depends on the service-linked + // role can modify or delete the role on your behalf. The error message includes + // the name of the service that depends on this service-linked role. You must + // request the change through that service. + ErrCodeUnmodifiableEntityException = "UnmodifiableEntity" + + // ErrCodeUnrecognizedPublicKeyEncodingException for service response error code + // "UnrecognizedPublicKeyEncoding". + // + // The request was rejected because the public key encoding format is unsupported + // or unrecognized. + ErrCodeUnrecognizedPublicKeyEncodingException = "UnrecognizedPublicKeyEncoding" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go new file mode 100644 index 00000000..940b4ce3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -0,0 +1,95 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// IAM provides the API operation methods for making requests to +// AWS Identity and Access Management. See this package's package overview docs +// for details on the service. +// +// IAM methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type IAM struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "iam" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "IAM" // ServiceID is a unique identifer of a specific service. +) + +// New creates a new instance of the IAM client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a IAM client from just a session. +// svc := iam.New(mySession) +// +// // Create a IAM client with additional configuration +// svc := iam.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *IAM { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *IAM { + svc := &IAM{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + ServiceID: ServiceID, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-05-08", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a IAM operation and runs any +// custom request initialization. +func (c *IAM) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go new file mode 100644 index 00000000..4331ba37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilInstanceProfileExists uses the IAM API operation +// GetInstanceProfile to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *IAM) WaitUntilInstanceProfileExists(input *GetInstanceProfileInput) error { + return c.WaitUntilInstanceProfileExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilInstanceProfileExistsWithContext is an extended version of WaitUntilInstanceProfileExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) WaitUntilInstanceProfileExistsWithContext(ctx aws.Context, input *GetInstanceProfileInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilInstanceProfileExists", + MaxAttempts: 40, + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetInstanceProfileInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetInstanceProfileRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilPolicyExists uses the IAM API operation +// GetPolicy to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *IAM) WaitUntilPolicyExists(input *GetPolicyInput) error { + return c.WaitUntilPolicyExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilPolicyExistsWithContext is an extended version of WaitUntilPolicyExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) WaitUntilPolicyExistsWithContext(ctx aws.Context, input *GetPolicyInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilPolicyExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "NoSuchEntity", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetPolicyInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetPolicyRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilRoleExists uses the IAM API operation +// GetRole to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *IAM) WaitUntilRoleExists(input *GetRoleInput) error { + return c.WaitUntilRoleExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilRoleExistsWithContext is an extended version of WaitUntilRoleExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) WaitUntilRoleExistsWithContext(ctx aws.Context, input *GetRoleInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilRoleExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "NoSuchEntity", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetRoleInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetRoleRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilUserExists uses the IAM API operation +// GetUser to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +func (c *IAM) WaitUntilUserExists(input *GetUserInput) error { + return c.WaitUntilUserExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilUserExistsWithContext is an extended version of WaitUntilUserExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *IAM) WaitUntilUserExistsWithContext(ctx aws.Context, input *GetUserInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilUserExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(1 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.ErrorWaiterMatch, + Expected: "NoSuchEntity", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *GetUserInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.GetUserRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 9b205f3f..139c27d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -1,16 +1,24 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package s3 provides a client for Amazon Simple Storage Service. package s3 import ( + "bytes" "fmt" "io" + "sync" + "sync/atomic" "time" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/eventstream" + "github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi" + "github.com/aws/aws-sdk-go/private/protocol/rest" "github.com/aws/aws-sdk-go/private/protocol/restxml" ) @@ -18,19 +26,18 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // AbortMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the AbortMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See AbortMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AbortMultipartUpload method directly -// instead. +// See AbortMultipartUpload for more information on using the AbortMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AbortMultipartUploadRequest method. // req, resp := client.AbortMultipartUploadRequest(params) @@ -40,7 +47,7 @@ const opAbortMultipartUpload = "AbortMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -76,30 +83,44 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req // * ErrCodeNoSuchUpload "NoSuchUpload" // The specified multipart upload does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { req, out := c.AbortMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCompleteMultipartUpload = "CompleteMultipartUpload" // CompleteMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CompleteMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See CompleteMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CompleteMultipartUpload method directly -// instead. +// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CompleteMultipartUploadRequest method. // req, resp := client.CompleteMultipartUploadRequest(params) @@ -109,7 +130,7 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -136,30 +157,44 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CompleteMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { req, out := c.CompleteMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCopyObject = "CopyObject" // CopyObjectRequest generates a "aws/request.Request" representing the // client's request for the CopyObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See CopyObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CopyObject method directly -// instead. +// See CopyObject for more information on using the CopyObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CopyObjectRequest method. // req, resp := client.CopyObjectRequest(params) @@ -169,7 +204,7 @@ const opCopyObject = "CopyObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { op := &request.Operation{ Name: opCopyObject, @@ -202,30 +237,44 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // The source object of the COPY operation is not in the active tier and is // only stored in Amazon Glacier. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { req, out := c.CopyObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateBucket = "CreateBucket" // CreateBucketRequest generates a "aws/request.Request" representing the // client's request for the CreateBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See CreateBucket for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateBucket method directly -// instead. +// See CreateBucket for more information on using the CreateBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateBucketRequest method. // req, resp := client.CreateBucketRequest(params) @@ -235,7 +284,7 @@ const opCreateBucket = "CreateBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { op := &request.Operation{ Name: opCreateBucket, @@ -270,30 +319,44 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request // // * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { req, out := c.CreateBucketRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opCreateMultipartUpload = "CreateMultipartUpload" // CreateMultipartUploadRequest generates a "aws/request.Request" representing the // client's request for the CreateMultipartUpload operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See CreateMultipartUpload for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the CreateMultipartUpload method directly -// instead. +// See CreateMultipartUpload for more information on using the CreateMultipartUpload +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the CreateMultipartUploadRequest method. // req, resp := client.CreateMultipartUploadRequest(params) @@ -303,7 +366,7 @@ const opCreateMultipartUpload = "CreateMultipartUpload" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { op := &request.Operation{ Name: opCreateMultipartUpload, @@ -336,30 +399,44 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation CreateMultipartUpload for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { req, out := c.CreateMultipartUploadRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucket = "DeleteBucket" // DeleteBucketRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DeleteBucket for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucket method directly -// instead. +// See DeleteBucket for more information on using the DeleteBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketRequest method. // req, resp := client.DeleteBucketRequest(params) @@ -369,7 +446,7 @@ const opDeleteBucket = "DeleteBucket" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ Name: opDeleteBucket, @@ -383,8 +460,7 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request output = &DeleteBucketOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -399,30 +475,44 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucket for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { req, out := c.DeleteBucketRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" // DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DeleteBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly -// instead. +// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. // req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) @@ -432,7 +522,7 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketAnalyticsConfiguration, @@ -446,8 +536,7 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt output = &DeleteBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -456,36 +545,54 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt // Deletes an analytics configuration for the bucket (specified by the analytics // configuration ID). // +// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration +// action. The bucket owner has this permission by default. The bucket owner +// can grant this permission to others. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketCors = "DeleteBucketCors" // DeleteBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketCors for usage and error information. +// See DeleteBucketCors for more information on using the DeleteBucketCors +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketCors method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketCorsRequest method. // req, resp := client.DeleteBucketCorsRequest(params) @@ -495,7 +602,7 @@ const opDeleteBucketCors = "DeleteBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { op := &request.Operation{ Name: opDeleteBucketCors, @@ -509,14 +616,13 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request output = &DeleteBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketCors API operation for Amazon Simple Storage Service. // -// Deletes the cors configuration information set for the bucket. +// Deletes the CORS configuration information set for the bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -524,30 +630,119 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { req, out := c.DeleteBucketCorsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketEncryption = "DeleteBucketEncryption" + +// DeleteBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeleteBucketEncryption for more information on using the DeleteBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeleteBucketEncryptionRequest method. +// req, resp := client.DeleteBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryptionRequest(input *DeleteBucketEncryptionInput) (req *request.Request, output *DeleteBucketEncryptionOutput) { + op := &request.Operation{ + Name: opDeleteBucketEncryption, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &DeleteBucketEncryptionInput{} + } + + output = &DeleteBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketEncryption API operation for Amazon Simple Storage Service. +// +// Deletes the server-side encryption configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption +func (c *S3) DeleteBucketEncryption(input *DeleteBucketEncryptionInput) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + return out, req.Send() +} + +// DeleteBucketEncryptionWithContext is the same as DeleteBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketEncryptionWithContext(ctx aws.Context, input *DeleteBucketEncryptionInput, opts ...request.Option) (*DeleteBucketEncryptionOutput, error) { + req, out := c.DeleteBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" // DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketInventoryConfiguration for usage and error information. +// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketInventoryConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. // req, resp := client.DeleteBucketInventoryConfigurationRequest(params) @@ -557,7 +752,7 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketInventoryConfiguration, @@ -571,8 +766,7 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent output = &DeleteBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -587,30 +781,44 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { req, out := c.DeleteBucketInventoryConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketLifecycle for usage and error information. +// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketLifecycle method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketLifecycleRequest method. // req, resp := client.DeleteBucketLifecycleRequest(params) @@ -620,7 +828,7 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { op := &request.Operation{ Name: opDeleteBucketLifecycle, @@ -634,8 +842,7 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re output = &DeleteBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -649,30 +856,44 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { req, out := c.DeleteBucketLifecycleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketMetricsConfiguration for usage and error information. +// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketMetricsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. // req, resp := client.DeleteBucketMetricsConfigurationRequest(params) @@ -682,7 +903,7 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opDeleteBucketMetricsConfiguration, @@ -696,8 +917,7 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC output = &DeleteBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -712,30 +932,44 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { req, out := c.DeleteBucketMetricsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketPolicy = "DeleteBucketPolicy" // DeleteBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketPolicy for usage and error information. +// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketPolicyRequest method. // req, resp := client.DeleteBucketPolicyRequest(params) @@ -745,7 +979,7 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ Name: opDeleteBucketPolicy, @@ -759,8 +993,7 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req output = &DeleteBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -774,30 +1007,44 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { req, out := c.DeleteBucketPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketReplication = "DeleteBucketReplication" // DeleteBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketReplication for usage and error information. +// See DeleteBucketReplication for more information on using the DeleteBucketReplication +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketReplication method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketReplicationRequest method. // req, resp := client.DeleteBucketReplicationRequest(params) @@ -807,7 +1054,7 @@ const opDeleteBucketReplication = "DeleteBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { op := &request.Operation{ Name: opDeleteBucketReplication, @@ -821,14 +1068,15 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) output = &DeleteBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // DeleteBucketReplication API operation for Amazon Simple Storage Service. // -// Deletes the replication configuration from the bucket. +// Deletes the replication configuration from the bucket. For information about +// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -836,30 +1084,44 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { req, out := c.DeleteBucketReplicationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketTagging = "DeleteBucketTagging" // DeleteBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketTagging for usage and error information. +// See DeleteBucketTagging for more information on using the DeleteBucketTagging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketTagging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketTaggingRequest method. // req, resp := client.DeleteBucketTaggingRequest(params) @@ -869,7 +1131,7 @@ const opDeleteBucketTagging = "DeleteBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ Name: opDeleteBucketTagging, @@ -883,8 +1145,7 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r output = &DeleteBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -898,30 +1159,44 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { req, out := c.DeleteBucketTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteBucketWebsite = "DeleteBucketWebsite" // DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the DeleteBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteBucketWebsite for usage and error information. +// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteBucketWebsite method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteBucketWebsiteRequest method. // req, resp := client.DeleteBucketWebsiteRequest(params) @@ -931,7 +1206,7 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { op := &request.Operation{ Name: opDeleteBucketWebsite, @@ -945,8 +1220,7 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r output = &DeleteBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -960,30 +1234,44 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { req, out := c.DeleteBucketWebsiteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteObject = "DeleteObject" // DeleteObjectRequest generates a "aws/request.Request" representing the // client's request for the DeleteObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteObject for usage and error information. +// See DeleteObject for more information on using the DeleteObject +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObject method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectRequest method. // req, resp := client.DeleteObjectRequest(params) @@ -993,7 +1281,7 @@ const opDeleteObject = "DeleteObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { op := &request.Operation{ Name: opDeleteObject, @@ -1022,30 +1310,44 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { req, out := c.DeleteObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteObjectTagging = "DeleteObjectTagging" // DeleteObjectTaggingRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteObjectTagging for usage and error information. +// See DeleteObjectTagging for more information on using the DeleteObjectTagging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjectTagging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectTaggingRequest method. // req, resp := client.DeleteObjectTaggingRequest(params) @@ -1055,7 +1357,7 @@ const opDeleteObjectTagging = "DeleteObjectTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { op := &request.Operation{ Name: opDeleteObjectTagging, @@ -1082,30 +1384,44 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { req, out := c.DeleteObjectTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDeleteObjects = "DeleteObjects" // DeleteObjectsRequest generates a "aws/request.Request" representing the // client's request for the DeleteObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See DeleteObjects for usage and error information. +// See DeleteObjects for more information on using the DeleteObjects +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DeleteObjects method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DeleteObjectsRequest method. // req, resp := client.DeleteObjectsRequest(params) @@ -1115,7 +1431,7 @@ const opDeleteObjects = "DeleteObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { op := &request.Operation{ Name: opDeleteObjects, @@ -1143,30 +1459,119 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation DeleteObjects for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { req, out := c.DeleteObjectsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeletePublicAccessBlock = "DeletePublicAccessBlock" + +// DeletePublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the DeletePublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DeletePublicAccessBlock for more information on using the DeletePublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DeletePublicAccessBlockRequest method. +// req, resp := client.DeletePublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlockRequest(input *DeletePublicAccessBlockInput) (req *request.Request, output *DeletePublicAccessBlockOutput) { + op := &request.Operation{ + Name: opDeletePublicAccessBlock, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &DeletePublicAccessBlockInput{} + } + + output = &DeletePublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeletePublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Removes the PublicAccessBlock configuration from an Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeletePublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock +func (c *S3) DeletePublicAccessBlock(input *DeletePublicAccessBlockInput) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + return out, req.Send() +} + +// DeletePublicAccessBlockWithContext is the same as DeletePublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See DeletePublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeletePublicAccessBlockWithContext(ctx aws.Context, input *DeletePublicAccessBlockInput, opts ...request.Option) (*DeletePublicAccessBlockOutput, error) { + req, out := c.DeletePublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketAccelerateConfiguration for usage and error information. +// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAccelerateConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAccelerateConfigurationRequest method. // req, resp := client.GetBucketAccelerateConfigurationRequest(params) @@ -1176,7 +1581,7 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAccelerateConfiguration, @@ -1203,30 +1608,44 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { req, out := c.GetBucketAccelerateConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketAcl = "GetBucketAcl" // GetBucketAclRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketAcl for usage and error information. +// See GetBucketAcl for more information on using the GetBucketAcl +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAcl method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAclRequest method. // req, resp := client.GetBucketAclRequest(params) @@ -1236,7 +1655,7 @@ const opGetBucketAcl = "GetBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { op := &request.Operation{ Name: opGetBucketAcl, @@ -1263,30 +1682,44 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { req, out := c.GetBucketAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketAnalyticsConfiguration for usage and error information. +// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketAnalyticsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. // req, resp := client.GetBucketAnalyticsConfigurationRequest(params) @@ -1296,7 +1729,7 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAnalyticsConfiguration, @@ -1324,30 +1757,44 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { req, out := c.GetBucketAnalyticsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketCors = "GetBucketCors" // GetBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the GetBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketCors for usage and error information. +// See GetBucketCors for more information on using the GetBucketCors +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketCors method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketCorsRequest method. // req, resp := client.GetBucketCorsRequest(params) @@ -1357,7 +1804,7 @@ const opGetBucketCors = "GetBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { op := &request.Operation{ Name: opGetBucketCors, @@ -1376,7 +1823,7 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // GetBucketCors API operation for Amazon Simple Storage Service. // -// Returns the cors configuration for the bucket. +// Returns the CORS configuration for the bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1384,30 +1831,118 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { req, out := c.GetBucketCorsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketEncryption = "GetBucketEncryption" + +// GetBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketEncryption for more information on using the GetBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketEncryptionRequest method. +// req, resp := client.GetBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryptionRequest(input *GetBucketEncryptionInput) (req *request.Request, output *GetBucketEncryptionOutput) { + op := &request.Operation{ + Name: opGetBucketEncryption, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &GetBucketEncryptionInput{} + } + + output = &GetBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketEncryption API operation for Amazon Simple Storage Service. +// +// Returns the server-side encryption configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption +func (c *S3) GetBucketEncryption(input *GetBucketEncryptionInput) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + return out, req.Send() +} + +// GetBucketEncryptionWithContext is the same as GetBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketEncryptionWithContext(ctx aws.Context, input *GetBucketEncryptionInput, opts ...request.Option) (*GetBucketEncryptionOutput, error) { + req, out := c.GetBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketInventoryConfiguration for usage and error information. +// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketInventoryConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketInventoryConfigurationRequest method. // req, resp := client.GetBucketInventoryConfigurationRequest(params) @@ -1417,7 +1952,7 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opGetBucketInventoryConfiguration, @@ -1445,30 +1980,44 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { req, out := c.GetBucketInventoryConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketLifecycle = "GetBucketLifecycle" // GetBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketLifecycle for usage and error information. +// See GetBucketLifecycle for more information on using the GetBucketLifecycle +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycle method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLifecycleRequest method. // req, resp := client.GetBucketLifecycleRequest(params) @@ -1478,7 +2027,9 @@ const opGetBucketLifecycle = "GetBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") @@ -1500,7 +2051,7 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // GetBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deprecated, see the GetBucketLifecycleConfiguration operation. +// No longer used, see the GetBucketLifecycleConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1508,30 +2059,48 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +// +// Deprecated: GetBucketLifecycle has been deprecated func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { req, out := c.GetBucketLifecycleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketLifecycleWithContext has been deprecated +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketLifecycleConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLifecycleConfiguration method directly -// instead. +// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLifecycleConfigurationRequest method. // req, resp := client.GetBucketLifecycleConfigurationRequest(params) @@ -1541,7 +2110,7 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opGetBucketLifecycleConfiguration, @@ -1568,30 +2137,44 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { req, out := c.GetBucketLifecycleConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketLocation = "GetBucketLocation" // GetBucketLocationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLocation operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketLocation for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLocation method directly -// instead. +// See GetBucketLocation for more information on using the GetBucketLocation +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLocationRequest method. // req, resp := client.GetBucketLocationRequest(params) @@ -1601,7 +2184,7 @@ const opGetBucketLocation = "GetBucketLocation" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ Name: opGetBucketLocation, @@ -1628,30 +2211,44 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLocation for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { req, out := c.GetBucketLocationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketLogging = "GetBucketLogging" // GetBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketLogging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketLogging method directly -// instead. +// See GetBucketLogging for more information on using the GetBucketLogging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketLoggingRequest method. // req, resp := client.GetBucketLoggingRequest(params) @@ -1661,7 +2258,7 @@ const opGetBucketLogging = "GetBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { op := &request.Operation{ Name: opGetBucketLogging, @@ -1689,30 +2286,44 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { req, out := c.GetBucketLoggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketMetricsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketMetricsConfiguration method directly -// instead. +// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketMetricsConfigurationRequest method. // req, resp := client.GetBucketMetricsConfigurationRequest(params) @@ -1722,7 +2333,7 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opGetBucketMetricsConfiguration, @@ -1750,30 +2361,44 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { req, out := c.GetBucketMetricsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketNotification = "GetBucketNotification" // GetBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketNotification for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotification method directly -// instead. +// See GetBucketNotification for more information on using the GetBucketNotification +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketNotificationRequest method. // req, resp := client.GetBucketNotificationRequest(params) @@ -1783,7 +2408,9 @@ const opGetBucketNotification = "GetBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") @@ -1805,7 +2432,7 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // GetBucketNotification API operation for Amazon Simple Storage Service. // -// Deprecated, see the GetBucketNotificationConfiguration operation. +// No longer used, see the GetBucketNotificationConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -1813,30 +2440,48 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +// +// Deprecated: GetBucketNotification has been deprecated func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { req, out := c.GetBucketNotificationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: GetBucketNotificationWithContext has been deprecated +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" // GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketNotificationConfiguration for usage and error information. +// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketNotificationConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketNotificationConfigurationRequest method. // req, resp := client.GetBucketNotificationConfigurationRequest(params) @@ -1846,7 +2491,7 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { op := &request.Operation{ Name: opGetBucketNotificationConfiguration, @@ -1873,30 +2518,44 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { req, out := c.GetBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketPolicy = "GetBucketPolicy" // GetBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the GetBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketPolicy for usage and error information. +// See GetBucketPolicy for more information on using the GetBucketPolicy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketPolicy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketPolicyRequest method. // req, resp := client.GetBucketPolicyRequest(params) @@ -1906,7 +2565,7 @@ const opGetBucketPolicy = "GetBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { op := &request.Operation{ Name: opGetBucketPolicy, @@ -1933,30 +2592,119 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { req, out := c.GetBucketPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicyStatus = "GetBucketPolicyStatus" + +// GetBucketPolicyStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicyStatus operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetBucketPolicyStatus for more information on using the GetBucketPolicyStatus +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetBucketPolicyStatusRequest method. +// req, resp := client.GetBucketPolicyStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatusRequest(input *GetBucketPolicyStatusInput) (req *request.Request, output *GetBucketPolicyStatusOutput) { + op := &request.Operation{ + Name: opGetBucketPolicyStatus, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policyStatus", + } + + if input == nil { + input = &GetBucketPolicyStatusInput{} + } + + output = &GetBucketPolicyStatusOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicyStatus API operation for Amazon Simple Storage Service. +// +// Retrieves the policy status for an Amazon S3 bucket, indicating whether the +// bucket is public. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicyStatus for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus +func (c *S3) GetBucketPolicyStatus(input *GetBucketPolicyStatusInput) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + return out, req.Send() +} + +// GetBucketPolicyStatusWithContext is the same as GetBucketPolicyStatus with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicyStatus for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyStatusWithContext(ctx aws.Context, input *GetBucketPolicyStatusInput, opts ...request.Option) (*GetBucketPolicyStatusOutput, error) { + req, out := c.GetBucketPolicyStatusRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketReplication = "GetBucketReplication" // GetBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the GetBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetBucketReplication for usage and error information. +// See GetBucketReplication for more information on using the GetBucketReplication +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketReplication method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketReplicationRequest method. // req, resp := client.GetBucketReplicationRequest(params) @@ -1966,7 +2714,7 @@ const opGetBucketReplication = "GetBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { op := &request.Operation{ Name: opGetBucketReplication, @@ -1987,36 +2735,54 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req // // Returns the replication configuration of a bucket. // +// It can take a while to propagate the put or delete a replication configuration +// to all Amazon S3 systems. Therefore, a get request soon after put or delete +// can return a wrong result. +// // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketRequestPayment = "GetBucketRequestPayment" // GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the GetBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketRequestPayment for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketRequestPayment method directly -// instead. +// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketRequestPaymentRequest method. // req, resp := client.GetBucketRequestPaymentRequest(params) @@ -2026,7 +2792,7 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { op := &request.Operation{ Name: opGetBucketRequestPayment, @@ -2053,30 +2819,44 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { req, out := c.GetBucketRequestPaymentRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketTagging = "GetBucketTagging" // GetBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the GetBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketTagging method directly -// instead. +// See GetBucketTagging for more information on using the GetBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketTaggingRequest method. // req, resp := client.GetBucketTaggingRequest(params) @@ -2086,7 +2866,7 @@ const opGetBucketTagging = "GetBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { op := &request.Operation{ Name: opGetBucketTagging, @@ -2113,30 +2893,44 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { req, out := c.GetBucketTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketVersioning = "GetBucketVersioning" // GetBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the GetBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketVersioning for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketVersioning method directly -// instead. +// See GetBucketVersioning for more information on using the GetBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketVersioningRequest method. // req, resp := client.GetBucketVersioningRequest(params) @@ -2146,7 +2940,7 @@ const opGetBucketVersioning = "GetBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { op := &request.Operation{ Name: opGetBucketVersioning, @@ -2173,30 +2967,44 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { req, out := c.GetBucketVersioningRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetBucketWebsite = "GetBucketWebsite" // GetBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the GetBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetBucketWebsite for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetBucketWebsite method directly -// instead. +// See GetBucketWebsite for more information on using the GetBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetBucketWebsiteRequest method. // req, resp := client.GetBucketWebsiteRequest(params) @@ -2206,7 +3014,7 @@ const opGetBucketWebsite = "GetBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { op := &request.Operation{ Name: opGetBucketWebsite, @@ -2233,30 +3041,44 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation GetBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { req, out := c.GetBucketWebsiteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetObject = "GetObject" // GetObjectRequest generates a "aws/request.Request" representing the // client's request for the GetObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObject method directly -// instead. +// See GetObject for more information on using the GetObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectRequest method. // req, resp := client.GetObjectRequest(params) @@ -2266,7 +3088,7 @@ const opGetObject = "GetObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { op := &request.Operation{ Name: opGetObject, @@ -2298,30 +3120,44 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { req, out := c.GetObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetObjectAcl = "GetObjectAcl" // GetObjectAclRequest generates a "aws/request.Request" representing the // client's request for the GetObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetObjectAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectAcl method directly -// instead. +// See GetObjectAcl for more information on using the GetObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetObjectAclRequest method. // req, resp := client.GetObjectAclRequest(params) @@ -2331,7 +3167,7 @@ const opGetObjectAcl = "GetObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { op := &request.Operation{ Name: opGetObjectAcl, @@ -2363,345 +3199,724 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { req, out := c.GetObjectAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() } -const opGetObjectTagging = "GetObjectTagging" +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetObjectLegalHold = "GetObjectLegalHold" + +// GetObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See GetObjectTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTagging method directly -// instead. +// See GetObjectLegalHold for more information on using the GetObjectLegalHold +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetObjectTaggingRequest method. -// req, resp := client.GetObjectTaggingRequest(params) +// +// // Example sending a request using the GetObjectLegalHoldRequest method. +// req, resp := client.GetObjectLegalHoldRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHoldRequest(input *GetObjectLegalHoldInput) (req *request.Request, output *GetObjectLegalHoldOutput) { op := &request.Operation{ - Name: opGetObjectTagging, + Name: opGetObjectLegalHold, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?tagging", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", } if input == nil { - input = &GetObjectTaggingInput{} + input = &GetObjectLegalHoldInput{} } - output = &GetObjectTaggingOutput{} + output = &GetObjectLegalHoldOutput{} req = c.newRequest(op, input, output) return } -// GetObjectTagging API operation for Amazon Simple Storage Service. +// GetObjectLegalHold API operation for Amazon Simple Storage Service. // -// Returns the tag-set of an object. +// Gets an object's current Legal Hold status. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging -func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { - req, out := c.GetObjectTaggingRequest(input) - err := req.Send() - return out, err +// API operation GetObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold +func (c *S3) GetObjectLegalHold(input *GetObjectLegalHoldInput) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + return out, req.Send() } -const opGetObjectTorrent = "GetObjectTorrent" +// GetObjectLegalHoldWithContext is the same as GetObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLegalHoldWithContext(ctx aws.Context, input *GetObjectLegalHoldInput, opts ...request.Option) (*GetObjectLegalHoldOutput, error) { + req, out := c.GetObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// GetObjectTorrentRequest generates a "aws/request.Request" representing the -// client's request for the GetObjectTorrent operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetObjectLockConfiguration = "GetObjectLockConfiguration" + +// GetObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See GetObjectTorrent for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetObjectTorrent method directly -// instead. +// See GetObjectLockConfiguration for more information on using the GetObjectLockConfiguration +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the GetObjectTorrentRequest method. -// req, resp := client.GetObjectTorrentRequest(params) +// +// // Example sending a request using the GetObjectLockConfigurationRequest method. +// req, resp := client.GetObjectLockConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfigurationInput) (req *request.Request, output *GetObjectLockConfigurationOutput) { op := &request.Operation{ - Name: opGetObjectTorrent, + Name: opGetObjectLockConfiguration, HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", + HTTPPath: "/{Bucket}?object-lock", } if input == nil { - input = &GetObjectTorrentInput{} + input = &GetObjectLockConfigurationInput{} } - output = &GetObjectTorrentOutput{} + output = &GetObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) return } -// GetObjectTorrent API operation for Amazon Simple Storage Service. +// GetObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Return torrent files from a bucket. +// Gets the object lock configuration for a bucket. The rule specified in the +// object lock configuration will be applied by default to every new object +// placed in the specified bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation GetObjectTorrent for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent -func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - err := req.Send() - return out, err +// API operation GetObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration +func (c *S3) GetObjectLockConfiguration(input *GetObjectLockConfigurationInput) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + return out, req.Send() } -const opHeadBucket = "HeadBucket" +// GetObjectLockConfigurationWithContext is the same as GetObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectLockConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectLockConfigurationWithContext(ctx aws.Context, input *GetObjectLockConfigurationInput, opts ...request.Option) (*GetObjectLockConfigurationOutput, error) { + req, out := c.GetObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// HeadBucketRequest generates a "aws/request.Request" representing the -// client's request for the HeadBucket operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opGetObjectRetention = "GetObjectRetention" + +// GetObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See HeadBucket for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadBucket method directly -// instead. +// See GetObjectRetention for more information on using the GetObjectRetention +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the HeadBucketRequest method. -// req, resp := client.HeadBucketRequest(params) +// +// // Example sending a request using the GetObjectRetentionRequest method. +// req, resp := client.GetObjectRetentionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetentionRequest(input *GetObjectRetentionInput) (req *request.Request, output *GetObjectRetentionOutput) { op := &request.Operation{ - Name: opHeadBucket, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}", + Name: opGetObjectRetention, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?retention", } if input == nil { - input = &HeadBucketInput{} + input = &GetObjectRetentionInput{} } - output = &HeadBucketOutput{} + output = &GetObjectRetentionOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) return } -// HeadBucket API operation for Amazon Simple Storage Service. +// GetObjectRetention API operation for Amazon Simple Storage Service. // -// This operation is useful to determine if a bucket exists and you have permission -// to access it. +// Retrieves an object's retention settings. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadBucket for usage and error information. +// API operation GetObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention +func (c *S3) GetObjectRetention(input *GetObjectRetentionInput) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + return out, req.Send() +} + +// GetObjectRetentionWithContext is the same as GetObjectRetention with the addition of +// the ability to pass a context and additional request options. // -// Returned Error Codes: -// * ErrCodeNoSuchBucket "NoSuchBucket" -// The specified bucket does not exist. +// See GetObjectRetention for details on how to use this API operation. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket -func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - err := req.Send() - return out, err +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectRetentionWithContext(ctx aws.Context, input *GetObjectRetentionInput, opts ...request.Option) (*GetObjectRetentionOutput, error) { + req, out := c.GetObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opHeadObject = "HeadObject" +const opGetObjectTagging = "GetObjectTagging" -// HeadObjectRequest generates a "aws/request.Request" representing the -// client's request for the HeadObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See HeadObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the HeadObject method directly -// instead. +// See GetObjectTagging for more information on using the GetObjectTagging +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the HeadObjectRequest method. -// req, resp := client.HeadObjectRequest(params) +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { op := &request.Operation{ - Name: opHeadObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}/{Key+}", + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", } if input == nil { - input = &HeadObjectInput{} + input = &GetObjectTaggingInput{} } - output = &HeadObjectOutput{} + output = &GetObjectTaggingOutput{} req = c.newRequest(op, input, output) return } -// HeadObject API operation for Amazon Simple Storage Service. +// GetObjectTagging API operation for Amazon Simple Storage Service. // -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. +// Returns the tag-set of an object. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation HeadObject for usage and error information. +// API operation GetObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. // -// Returned Error Codes: -// * ErrCodeNoSuchKey "NoSuchKey" -// The specified key does not exist. +// See GetObjectTagging for details on how to use this API operation. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject -func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - err := req.Send() - return out, err +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" +const opGetObjectTorrent = "GetObjectTorrent" -// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the -// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See ListBucketAnalyticsConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketAnalyticsConfigurations method directly -// instead. +// See GetObjectTorrent for more information on using the GetObjectTorrent +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. -// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { op := &request.Operation{ - Name: opListBucketAnalyticsConfigurations, + Name: opGetObjectTorrent, HTTPMethod: "GET", - HTTPPath: "/{Bucket}?analytics", + HTTPPath: "/{Bucket}/{Key+}?torrent", } if input == nil { - input = &ListBucketAnalyticsConfigurationsInput{} + input = &GetObjectTorrentInput{} } - output = &ListBucketAnalyticsConfigurationsOutput{} + output = &GetObjectTorrentOutput{} req = c.newRequest(op, input, output) return } -// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// GetObjectTorrent API operation for Amazon Simple Storage Service. // -// Lists the analytics configurations for the bucket. +// Return torrent files from a bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation ListBucketAnalyticsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations -func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { +// API operation GetObjectTorrent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetPublicAccessBlock = "GetPublicAccessBlock" + +// GetPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the GetPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See GetPublicAccessBlock for more information on using the GetPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the GetPublicAccessBlockRequest method. +// req, resp := client.GetPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlockRequest(input *GetPublicAccessBlockInput) (req *request.Request, output *GetPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opGetPublicAccessBlock, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &GetPublicAccessBlockInput{} + } + + output = &GetPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock +func (c *S3) GetPublicAccessBlock(input *GetPublicAccessBlockInput) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + return out, req.Send() +} + +// GetPublicAccessBlockWithContext is the same as GetPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See GetPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetPublicAccessBlockWithContext(ctx aws.Context, input *GetPublicAccessBlockInput, opts ...request.Option) (*GetPublicAccessBlockOutput, error) { + req, out := c.GetPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadBucket for more information on using the HeadBucket +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See HeadObject for more information on using the HeadObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { req, out := c.ListBucketAnalyticsConfigurationsRequest(input) - err := req.Send() - return out, err + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketInventoryConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListBucketInventoryConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketInventoryConfigurations method directly -// instead. +// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketInventoryConfigurationsRequest method. // req, resp := client.ListBucketInventoryConfigurationsRequest(params) @@ -2711,7 +3926,7 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { op := &request.Operation{ Name: opListBucketInventoryConfigurations, @@ -2738,30 +3953,44 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketInventoryConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { req, out := c.ListBucketInventoryConfigurationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the // client's request for the ListBucketMetricsConfigurations operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListBucketMetricsConfigurations for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBucketMetricsConfigurations method directly -// instead. +// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketMetricsConfigurationsRequest method. // req, resp := client.ListBucketMetricsConfigurationsRequest(params) @@ -2771,7 +4000,7 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { op := &request.Operation{ Name: opListBucketMetricsConfigurations, @@ -2798,30 +4027,44 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBucketMetricsConfigurations for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { req, out := c.ListBucketMetricsConfigurationsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListBuckets = "ListBuckets" // ListBucketsRequest generates a "aws/request.Request" representing the // client's request for the ListBuckets operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListBuckets for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListBuckets method directly -// instead. +// See ListBuckets for more information on using the ListBuckets +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListBucketsRequest method. // req, resp := client.ListBucketsRequest(params) @@ -2831,7 +4074,7 @@ const opListBuckets = "ListBuckets" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { op := &request.Operation{ Name: opListBuckets, @@ -2858,30 +4101,44 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListBuckets for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { req, out := c.ListBucketsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opListMultipartUploads = "ListMultipartUploads" // ListMultipartUploadsRequest generates a "aws/request.Request" representing the // client's request for the ListMultipartUploads operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListMultipartUploads for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListMultipartUploads method directly -// instead. +// See ListMultipartUploads for more information on using the ListMultipartUploads +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListMultipartUploadsRequest method. // req, resp := client.ListMultipartUploadsRequest(params) @@ -2891,7 +4148,7 @@ const opListMultipartUploads = "ListMultipartUploads" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ Name: opListMultipartUploads, @@ -2924,11 +4181,26 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListMultipartUploads for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { req, out := c.ListMultipartUploadsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, @@ -2942,37 +4214,61 @@ func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultip // // Example iterating over at most 3 pages of a ListMultipartUploads operation. // pageNum := 0 // err := client.ListMultipartUploadsPages(params, -// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListMultipartUploadsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListMultipartUploadsOutput), lastPage) - }) +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + } + return p.Err() } const opListObjectVersions = "ListObjectVersions" // ListObjectVersionsRequest generates a "aws/request.Request" representing the // client's request for the ListObjectVersions operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListObjectVersions for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectVersions method directly -// instead. +// See ListObjectVersions for more information on using the ListObjectVersions +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectVersionsRequest method. // req, resp := client.ListObjectVersionsRequest(params) @@ -2982,7 +4278,7 @@ const opListObjectVersions = "ListObjectVersions" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ Name: opListObjectVersions, @@ -3015,11 +4311,26 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListObjectVersions for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { req, out := c.ListObjectVersionsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, @@ -3033,37 +4344,61 @@ func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVers // // Example iterating over at most 3 pages of a ListObjectVersions operation. // pageNum := 0 // err := client.ListObjectVersionsPages(params, -// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectVersionsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectVersionsOutput), lastPage) - }) +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + } + return p.Err() } const opListObjects = "ListObjects" // ListObjectsRequest generates a "aws/request.Request" representing the // client's request for the ListObjects operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListObjects for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjects method directly -// instead. +// See ListObjects for more information on using the ListObjects +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectsRequest method. // req, resp := client.ListObjectsRequest(params) @@ -3073,7 +4408,7 @@ const opListObjects = "ListObjects" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ Name: opListObjects, @@ -3113,14 +4448,29 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { req, out := c.ListObjectsRequest(input) - err := req.Send() - return out, err + return out, req.Send() } -// ListObjectsPages iterates over the pages of a ListObjects operation, +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // @@ -3131,37 +4481,61 @@ func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { // // Example iterating over at most 3 pages of a ListObjects operation. // pageNum := 0 // err := client.ListObjectsPages(params, -// func(page *ListObjectsOutput, lastPage bool) bool { +// func(page *s3.ListObjectsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsOutput), lastPage) - }) +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + } + return p.Err() } const opListObjectsV2 = "ListObjectsV2" // ListObjectsV2Request generates a "aws/request.Request" representing the // client's request for the ListObjectsV2 operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListObjectsV2 for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListObjectsV2 method directly -// instead. +// See ListObjectsV2 for more information on using the ListObjectsV2 +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListObjectsV2Request method. // req, resp := client.ListObjectsV2Request(params) @@ -3171,7 +4545,7 @@ const opListObjectsV2 = "ListObjectsV2" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { op := &request.Operation{ Name: opListObjectsV2, @@ -3212,11 +4586,26 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque // * ErrCodeNoSuchBucket "NoSuchBucket" // The specified bucket does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { req, out := c.ListObjectsV2Request(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, @@ -3230,37 +4619,61 @@ func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, err // // Example iterating over at most 3 pages of a ListObjectsV2 operation. // pageNum := 0 // err := client.ListObjectsV2Pages(params, -// func(page *ListObjectsV2Output, lastPage bool) bool { +// func(page *s3.ListObjectsV2Output, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsV2Request(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsV2Output), lastPage) - }) +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + } + return p.Err() } const opListParts = "ListParts" // ListPartsRequest generates a "aws/request.Request" representing the // client's request for the ListParts operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See ListParts for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the ListParts method directly -// instead. +// See ListParts for more information on using the ListParts +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the ListPartsRequest method. // req, resp := client.ListPartsRequest(params) @@ -3270,7 +4683,7 @@ const opListParts = "ListParts" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ Name: opListParts, @@ -3303,11 +4716,26 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation ListParts for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { req, out := c.ListPartsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } // ListPartsPages iterates over the pages of a ListParts operation, @@ -3321,37 +4749,61 @@ func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { // // Example iterating over at most 3 pages of a ListParts operation. // pageNum := 0 // err := client.ListPartsPages(params, -// func(page *ListPartsOutput, lastPage bool) bool { +// func(page *s3.ListPartsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListPartsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListPartsOutput), lastPage) - }) +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + } + return p.Err() } const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAccelerateConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketAccelerateConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAccelerateConfiguration method directly -// instead. +// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAccelerateConfigurationRequest method. // req, resp := client.PutBucketAccelerateConfigurationRequest(params) @@ -3361,7 +4813,7 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAccelerateConfiguration, @@ -3375,8 +4827,7 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC output = &PutBucketAccelerateConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3390,30 +4841,44 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAccelerateConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { req, out := c.PutBucketAccelerateConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketAcl = "PutBucketAcl" // PutBucketAclRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAcl method directly -// instead. +// See PutBucketAcl for more information on using the PutBucketAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAclRequest method. // req, resp := client.PutBucketAclRequest(params) @@ -3423,7 +4888,7 @@ const opPutBucketAcl = "PutBucketAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { op := &request.Operation{ Name: opPutBucketAcl, @@ -3437,8 +4902,7 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request output = &PutBucketAclOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3452,30 +4916,44 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAcl for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { req, out := c.PutBucketAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketAnalyticsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketAnalyticsConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketAnalyticsConfiguration method directly -// instead. +// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. // req, resp := client.PutBucketAnalyticsConfigurationRequest(params) @@ -3485,7 +4963,7 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAnalyticsConfiguration, @@ -3499,8 +4977,7 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon output = &PutBucketAnalyticsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3515,30 +4992,44 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketAnalyticsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { req, out := c.PutBucketAnalyticsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketCors = "PutBucketCors" // PutBucketCorsRequest generates a "aws/request.Request" representing the // client's request for the PutBucketCors operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketCors for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketCors method directly -// instead. +// See PutBucketCors for more information on using the PutBucketCors +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketCorsRequest method. // req, resp := client.PutBucketCorsRequest(params) @@ -3548,7 +5039,7 @@ const opPutBucketCors = "PutBucketCors" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { op := &request.Operation{ Name: opPutBucketCors, @@ -3562,14 +5053,13 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque output = &PutBucketCorsOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketCors API operation for Amazon Simple Storage Service. // -// Sets the cors configuration for a bucket. +// Sets the CORS configuration for a bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3577,30 +5067,120 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketCors for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { req, out := c.PutBucketCorsRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketEncryption = "PutBucketEncryption" + +// PutBucketEncryptionRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketEncryption operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutBucketEncryption for more information on using the PutBucketEncryption +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutBucketEncryptionRequest method. +// req, resp := client.PutBucketEncryptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *request.Request, output *PutBucketEncryptionOutput) { + op := &request.Operation{ + Name: opPutBucketEncryption, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?encryption", + } + + if input == nil { + input = &PutBucketEncryptionInput{} + } + + output = &PutBucketEncryptionOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketEncryption API operation for Amazon Simple Storage Service. +// +// Creates a new server-side encryption configuration (or replaces an existing +// one, if present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketEncryption for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption +func (c *S3) PutBucketEncryption(input *PutBucketEncryptionInput) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + return out, req.Send() +} + +// PutBucketEncryptionWithContext is the same as PutBucketEncryption with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketEncryption for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketEncryptionWithContext(ctx aws.Context, input *PutBucketEncryptionInput, opts ...request.Option) (*PutBucketEncryptionOutput, error) { + req, out := c.PutBucketEncryptionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketInventoryConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketInventoryConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketInventoryConfiguration method directly -// instead. +// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketInventoryConfigurationRequest method. // req, resp := client.PutBucketInventoryConfigurationRequest(params) @@ -3610,7 +5190,7 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { op := &request.Operation{ Name: opPutBucketInventoryConfiguration, @@ -3624,8 +5204,7 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon output = &PutBucketInventoryConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3640,30 +5219,44 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketInventoryConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { req, out := c.PutBucketInventoryConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketLifecycle = "PutBucketLifecycle" // PutBucketLifecycleRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycle operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketLifecycle for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycle method directly -// instead. +// See PutBucketLifecycle for more information on using the PutBucketLifecycle +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLifecycleRequest method. // req, resp := client.PutBucketLifecycleRequest(params) @@ -3673,7 +5266,9 @@ const opPutBucketLifecycle = "PutBucketLifecycle" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") @@ -3690,14 +5285,13 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req output = &PutBucketLifecycleOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketLifecycle API operation for Amazon Simple Storage Service. // -// Deprecated, see the PutBucketLifecycleConfiguration operation. +// No longer used, see the PutBucketLifecycleConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3705,30 +5299,48 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycle for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +// +// Deprecated: PutBucketLifecycle has been deprecated func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { req, out := c.PutBucketLifecycleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketLifecycleWithContext has been deprecated +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLifecycleConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketLifecycleConfiguration for usage and error information. +// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLifecycleConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLifecycleConfigurationRequest method. // req, resp := client.PutBucketLifecycleConfigurationRequest(params) @@ -3738,7 +5350,7 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opPutBucketLifecycleConfiguration, @@ -3752,8 +5364,7 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon output = &PutBucketLifecycleConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3768,30 +5379,44 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLifecycleConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { req, out := c.PutBucketLifecycleConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketLogging = "PutBucketLogging" // PutBucketLoggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketLogging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketLogging for usage and error information. +// See PutBucketLogging for more information on using the PutBucketLogging +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketLogging method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketLoggingRequest method. // req, resp := client.PutBucketLoggingRequest(params) @@ -3801,7 +5426,7 @@ const opPutBucketLogging = "PutBucketLogging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ Name: opPutBucketLogging, @@ -3815,8 +5440,7 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request output = &PutBucketLoggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3832,30 +5456,44 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketLogging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { req, out := c.PutBucketLoggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketMetricsConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketMetricsConfiguration for usage and error information. +// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketMetricsConfiguration method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketMetricsConfigurationRequest method. // req, resp := client.PutBucketMetricsConfigurationRequest(params) @@ -3865,7 +5503,7 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { op := &request.Operation{ Name: opPutBucketMetricsConfiguration, @@ -3879,8 +5517,7 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu output = &PutBucketMetricsConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -3895,30 +5532,44 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketMetricsConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { req, out := c.PutBucketMetricsConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketNotification = "PutBucketNotification" // PutBucketNotificationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotification operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See PutBucketNotification for usage and error information. +// See PutBucketNotification for more information on using the PutBucketNotification +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotification method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketNotificationRequest method. // req, resp := client.PutBucketNotificationRequest(params) @@ -3928,7 +5579,9 @@ const opPutBucketNotification = "PutBucketNotification" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") @@ -3945,14 +5598,13 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re output = &PutBucketNotificationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketNotification API operation for Amazon Simple Storage Service. // -// Deprecated, see the PutBucketNotificationConfiguraiton operation. +// No longer used, see the PutBucketNotificationConfiguration operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -3960,30 +5612,48 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotification for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +// +// Deprecated: PutBucketNotification has been deprecated func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { req, out := c.PutBucketNotificationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +// +// Deprecated: PutBucketNotificationWithContext has been deprecated +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" // PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketNotificationConfiguration operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketNotificationConfiguration for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketNotificationConfiguration method directly -// instead. +// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketNotificationConfigurationRequest method. // req, resp := client.PutBucketNotificationConfigurationRequest(params) @@ -3993,7 +5663,7 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { op := &request.Operation{ Name: opPutBucketNotificationConfiguration, @@ -4007,8 +5677,7 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat output = &PutBucketNotificationConfigurationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4022,30 +5691,44 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketNotificationConfiguration for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { req, out := c.PutBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketPolicy = "PutBucketPolicy" // PutBucketPolicyRequest generates a "aws/request.Request" representing the // client's request for the PutBucketPolicy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketPolicy for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketPolicy method directly -// instead. +// See PutBucketPolicy for more information on using the PutBucketPolicy +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketPolicyRequest method. // req, resp := client.PutBucketPolicyRequest(params) @@ -4055,7 +5738,7 @@ const opPutBucketPolicy = "PutBucketPolicy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { op := &request.Operation{ Name: opPutBucketPolicy, @@ -4069,15 +5752,13 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R output = &PutBucketPolicyOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketPolicy API operation for Amazon Simple Storage Service. // -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. +// Applies an Amazon S3 bucket policy to an Amazon S3 bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4085,30 +5766,44 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketPolicy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { req, out := c.PutBucketPolicyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketReplication = "PutBucketReplication" // PutBucketReplicationRequest generates a "aws/request.Request" representing the // client's request for the PutBucketReplication operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketReplication for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketReplication method directly -// instead. +// See PutBucketReplication for more information on using the PutBucketReplication +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketReplicationRequest method. // req, resp := client.PutBucketReplicationRequest(params) @@ -4118,7 +5813,7 @@ const opPutBucketReplication = "PutBucketReplication" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { op := &request.Operation{ Name: opPutBucketReplication, @@ -4132,15 +5827,15 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req output = &PutBucketReplicationOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } // PutBucketReplication API operation for Amazon Simple Storage Service. // -// Creates a new replication configuration (or replaces an existing one, if -// present). +// Creates a replication configuration or replaces an existing one. For more +// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) +// in the Amazon S3 Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -4148,30 +5843,44 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketReplication for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { req, out := c.PutBucketReplicationRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketRequestPayment = "PutBucketRequestPayment" // PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the // client's request for the PutBucketRequestPayment operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketRequestPayment for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketRequestPayment method directly -// instead. +// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketRequestPaymentRequest method. // req, resp := client.PutBucketRequestPaymentRequest(params) @@ -4181,7 +5890,7 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { op := &request.Operation{ Name: opPutBucketRequestPayment, @@ -4195,8 +5904,7 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) output = &PutBucketRequestPaymentOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4214,30 +5922,44 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketRequestPayment for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { req, out := c.PutBucketRequestPaymentRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketTagging = "PutBucketTagging" // PutBucketTaggingRequest generates a "aws/request.Request" representing the // client's request for the PutBucketTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketTagging method directly -// instead. +// See PutBucketTagging for more information on using the PutBucketTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketTaggingRequest method. // req, resp := client.PutBucketTaggingRequest(params) @@ -4247,7 +5969,7 @@ const opPutBucketTagging = "PutBucketTagging" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { op := &request.Operation{ Name: opPutBucketTagging, @@ -4261,8 +5983,7 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request output = &PutBucketTaggingOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4276,30 +5997,44 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { req, out := c.PutBucketTaggingRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketVersioning = "PutBucketVersioning" // PutBucketVersioningRequest generates a "aws/request.Request" representing the // client's request for the PutBucketVersioning operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketVersioning for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketVersioning method directly -// instead. +// See PutBucketVersioning for more information on using the PutBucketVersioning +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketVersioningRequest method. // req, resp := client.PutBucketVersioningRequest(params) @@ -4309,7 +6044,7 @@ const opPutBucketVersioning = "PutBucketVersioning" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { op := &request.Operation{ Name: opPutBucketVersioning, @@ -4323,8 +6058,7 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r output = &PutBucketVersioningOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4339,30 +6073,44 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketVersioning for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { req, out := c.PutBucketVersioningRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutBucketWebsite = "PutBucketWebsite" // PutBucketWebsiteRequest generates a "aws/request.Request" representing the // client's request for the PutBucketWebsite operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutBucketWebsite for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutBucketWebsite method directly -// instead. +// See PutBucketWebsite for more information on using the PutBucketWebsite +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutBucketWebsiteRequest method. // req, resp := client.PutBucketWebsiteRequest(params) @@ -4372,7 +6120,7 @@ const opPutBucketWebsite = "PutBucketWebsite" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { op := &request.Operation{ Name: opPutBucketWebsite, @@ -4386,8 +6134,7 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request output = &PutBucketWebsiteOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) return } @@ -4401,30 +6148,44 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutBucketWebsite for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { req, out := c.PutBucketWebsiteRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutObject = "PutObject" // PutObjectRequest generates a "aws/request.Request" representing the // client's request for the PutObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObject method directly -// instead. +// See PutObject for more information on using the PutObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectRequest method. // req, resp := client.PutObjectRequest(params) @@ -4434,7 +6195,7 @@ const opPutObject = "PutObject" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { op := &request.Operation{ Name: opPutObject, @@ -4461,30 +6222,44 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation PutObject for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { req, out := c.PutObjectRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opPutObjectAcl = "PutObjectAcl" // PutObjectAclRequest generates a "aws/request.Request" representing the // client's request for the PutObjectAcl operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See PutObjectAcl for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectAcl method directly -// instead. +// See PutObjectAcl for more information on using the PutObjectAcl +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the PutObjectAclRequest method. // req, resp := client.PutObjectAclRequest(params) @@ -4494,7 +6269,7 @@ const opPutObjectAcl = "PutObjectAcl" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { op := &request.Operation{ Name: opPutObjectAcl, @@ -4527,187 +6302,611 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request // * ErrCodeNoSuchKey "NoSuchKey" // The specified key does not exist. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { req, out := c.PutObjectAclRequest(input) - err := req.Send() - return out, err + return out, req.Send() } -const opPutObjectTagging = "PutObjectTagging" +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// PutObjectTaggingRequest generates a "aws/request.Request" representing the -// client's request for the PutObjectTagging operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opPutObjectLegalHold = "PutObjectLegalHold" + +// PutObjectLegalHoldRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLegalHold operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See PutObjectTagging for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the PutObjectTagging method directly -// instead. +// See PutObjectLegalHold for more information on using the PutObjectLegalHold +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the PutObjectTaggingRequest method. -// req, resp := client.PutObjectTaggingRequest(params) +// +// // Example sending a request using the PutObjectLegalHoldRequest method. +// req, resp := client.PutObjectLegalHoldRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHoldRequest(input *PutObjectLegalHoldInput) (req *request.Request, output *PutObjectLegalHoldOutput) { op := &request.Operation{ - Name: opPutObjectTagging, + Name: opPutObjectLegalHold, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?tagging", + HTTPPath: "/{Bucket}/{Key+}?legal-hold", } if input == nil { - input = &PutObjectTaggingInput{} + input = &PutObjectLegalHoldInput{} } - output = &PutObjectTaggingOutput{} + output = &PutObjectLegalHoldOutput{} req = c.newRequest(op, input, output) return } -// PutObjectTagging API operation for Amazon Simple Storage Service. +// PutObjectLegalHold API operation for Amazon Simple Storage Service. // -// Sets the supplied tag-set to an object that already exists in a bucket +// Applies a Legal Hold configuration to the specified object. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation PutObjectTagging for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging -func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { - req, out := c.PutObjectTaggingRequest(input) - err := req.Send() - return out, err +// API operation PutObjectLegalHold for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold +func (c *S3) PutObjectLegalHold(input *PutObjectLegalHoldInput) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + return out, req.Send() } -const opRestoreObject = "RestoreObject" +// PutObjectLegalHoldWithContext is the same as PutObjectLegalHold with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectLegalHold for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLegalHoldWithContext(ctx aws.Context, input *PutObjectLegalHoldInput, opts ...request.Option) (*PutObjectLegalHoldOutput, error) { + req, out := c.PutObjectLegalHoldRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} -// RestoreObjectRequest generates a "aws/request.Request" representing the -// client's request for the RestoreObject operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +const opPutObjectLockConfiguration = "PutObjectLockConfiguration" + +// PutObjectLockConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectLockConfiguration operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See RestoreObject for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the RestoreObject method directly -// instead. +// See PutObjectLockConfiguration for more information on using the PutObjectLockConfiguration +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the RestoreObjectRequest method. -// req, resp := client.RestoreObjectRequest(params) +// +// // Example sending a request using the PutObjectLockConfigurationRequest method. +// req, resp := client.PutObjectLockConfigurationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfigurationInput) (req *request.Request, output *PutObjectLockConfigurationOutput) { op := &request.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", + Name: opPutObjectLockConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?object-lock", } if input == nil { - input = &RestoreObjectInput{} + input = &PutObjectLockConfigurationInput{} } - output = &RestoreObjectOutput{} + output = &PutObjectLockConfigurationOutput{} req = c.newRequest(op, input, output) return } -// RestoreObject API operation for Amazon Simple Storage Service. +// PutObjectLockConfiguration API operation for Amazon Simple Storage Service. // -// Restores an archived copy of an object back into Amazon S3 +// Places an object lock configuration on the specified bucket. The rule specified +// in the object lock configuration will be applied by default to every new +// object placed in the specified bucket. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Simple Storage Service's -// API operation RestoreObject for usage and error information. +// API operation PutObjectLockConfiguration for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration +func (c *S3) PutObjectLockConfiguration(input *PutObjectLockConfigurationInput) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + return out, req.Send() +} + +// PutObjectLockConfigurationWithContext is the same as PutObjectLockConfiguration with the addition of +// the ability to pass a context and additional request options. // -// Returned Error Codes: -// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" -// This operation is not allowed against this storage tier +// See PutObjectLockConfiguration for details on how to use this API operation. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - err := req.Send() - return out, err +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectLockConfigurationWithContext(ctx aws.Context, input *PutObjectLockConfigurationInput, opts ...request.Option) (*PutObjectLockConfigurationOutput, error) { + req, out := c.PutObjectLockConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -const opUploadPart = "UploadPart" +const opPutObjectRetention = "PutObjectRetention" -// UploadPartRequest generates a "aws/request.Request" representing the -// client's request for the UploadPart operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// PutObjectRetentionRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectRetention operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // -// See UploadPart for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPart method directly -// instead. +// See PutObjectRetention for more information on using the PutObjectRetention +// API call, and error handling. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// // Example sending a request using the UploadPartRequest method. -// req, resp := client.UploadPartRequest(params) +// +// // Example sending a request using the PutObjectRetentionRequest method. +// req, resp := client.PutObjectRetentionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetentionRequest(input *PutObjectRetentionInput) (req *request.Request, output *PutObjectRetentionOutput) { op := &request.Operation{ - Name: opUploadPart, + Name: opPutObjectRetention, HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", + HTTPPath: "/{Bucket}/{Key+}?retention", } if input == nil { - input = &UploadPartInput{} + input = &PutObjectRetentionInput{} } - output = &UploadPartOutput{} + output = &PutObjectRetentionOutput{} req = c.newRequest(op, input, output) return } -// UploadPart API operation for Amazon Simple Storage Service. +// PutObjectRetention API operation for Amazon Simple Storage Service. // -// Uploads a part in a multipart upload. +// Places an Object Retention configuration on an object. // -// Note: After you initiate multipart upload and upload one or more parts, you -// must either complete or abort multipart upload in order to stop getting charged +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectRetention for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention +func (c *S3) PutObjectRetention(input *PutObjectRetentionInput) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + return out, req.Send() +} + +// PutObjectRetentionWithContext is the same as PutObjectRetention with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectRetention for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectRetentionWithContext(ctx aws.Context, input *PutObjectRetentionInput, opts ...request.Option) (*PutObjectRetentionOutput, error) { + req, out := c.PutObjectRetentionRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutObjectTagging for more information on using the PutObjectTagging +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutPublicAccessBlock = "PutPublicAccessBlock" + +// PutPublicAccessBlockRequest generates a "aws/request.Request" representing the +// client's request for the PutPublicAccessBlock operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See PutPublicAccessBlock for more information on using the PutPublicAccessBlock +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the PutPublicAccessBlockRequest method. +// req, resp := client.PutPublicAccessBlockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlockRequest(input *PutPublicAccessBlockInput) (req *request.Request, output *PutPublicAccessBlockOutput) { + op := &request.Operation{ + Name: opPutPublicAccessBlock, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?publicAccessBlock", + } + + if input == nil { + input = &PutPublicAccessBlockInput{} + } + + output = &PutPublicAccessBlockOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutPublicAccessBlock API operation for Amazon Simple Storage Service. +// +// Creates or modifies the PublicAccessBlock configuration for an Amazon S3 +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutPublicAccessBlock for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock +func (c *S3) PutPublicAccessBlock(input *PutPublicAccessBlockInput) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + return out, req.Send() +} + +// PutPublicAccessBlockWithContext is the same as PutPublicAccessBlock with the addition of +// the ability to pass a context and additional request options. +// +// See PutPublicAccessBlock for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutPublicAccessBlockWithContext(ctx aws.Context, input *PutPublicAccessBlockInput, opts ...request.Option) (*PutPublicAccessBlockOutput, error) { + req, out := c.PutPublicAccessBlockRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See RestoreObject for more information on using the RestoreObject +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opSelectObjectContent = "SelectObjectContent" + +// SelectObjectContentRequest generates a "aws/request.Request" representing the +// client's request for the SelectObjectContent operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See SelectObjectContent for more information on using the SelectObjectContent +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the SelectObjectContentRequest method. +// req, resp := client.SelectObjectContentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContentRequest(input *SelectObjectContentInput) (req *request.Request, output *SelectObjectContentOutput) { + op := &request.Operation{ + Name: opSelectObjectContent, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?select&select-type=2", + } + + if input == nil { + input = &SelectObjectContentInput{} + } + + output = &SelectObjectContentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Send.Swap(client.LogHTTPResponseHandler.Name, client.LogHTTPResponseHeaderHandler) + req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, rest.UnmarshalHandler) + req.Handlers.Unmarshal.PushBack(output.runEventStreamLoop) + return +} + +// SelectObjectContent API operation for Amazon Simple Storage Service. +// +// This operation filters the contents of an Amazon S3 object based on a simple +// Structured Query Language (SQL) statement. In the request, along with the +// SQL expression, you must also specify a data serialization format (JSON or +// CSV) of the object. Amazon S3 uses this to parse object data into records, +// and returns only records that match the specified SQL expression. You must +// also specify the data serialization format for the response. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation SelectObjectContent for usage and error information. +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent +func (c *S3) SelectObjectContent(input *SelectObjectContentInput) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + return out, req.Send() +} + +// SelectObjectContentWithContext is the same as SelectObjectContent with the addition of +// the ability to pass a context and additional request options. +// +// See SelectObjectContent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) SelectObjectContentWithContext(ctx aws.Context, input *SelectObjectContentInput, opts ...request.Option) (*SelectObjectContentOutput, error) { + req, out := c.SelectObjectContentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UploadPart for more information on using the UploadPart +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged // for storage of the uploaded parts. Only after you either complete or abort // multipart upload, Amazon S3 frees up the parts storage and stops charging // you for the parts storage. @@ -4718,30 +6917,44 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation UploadPart for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { req, out := c.UploadPartRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opUploadPartCopy = "UploadPartCopy" // UploadPartCopyRequest generates a "aws/request.Request" representing the // client's request for the UploadPartCopy operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See UploadPartCopy for usage and error information. +// See UploadPartCopy for more information on using the UploadPartCopy +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the UploadPartCopy method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the UploadPartCopyRequest method. // req, resp := client.UploadPartCopyRequest(params) @@ -4751,7 +6964,7 @@ const opUploadPartCopy = "UploadPartCopy" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { op := &request.Operation{ Name: opUploadPartCopy, @@ -4778,21 +6991,38 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req // // See the AWS API reference guide for Amazon Simple Storage Service's // API operation UploadPartCopy for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +// See also, https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { req, out := c.UploadPartCopyRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// Specifies the days since the initiation of an Incomplete Multipart Upload -// that Lifecycle will wait before permanently removing all parts of the upload. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload +// Specifies the days since the initiation of an incomplete multipart upload +// that Amazon S3 will wait before permanently removing all parts of the upload. +// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket +// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) +// in the Amazon Simple Storage Service Developer Guide. type AbortIncompleteMultipartUpload struct { _ struct{} `type:"structure"` - // Indicates the number of days that must pass since initiation for Lifecycle - // to abort an Incomplete Multipart Upload. + // Specifies the number of days after which Amazon S3 aborts an incomplete multipart + // upload. DaysAfterInitiation *int64 `type:"integer"` } @@ -4812,13 +7042,16 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest type AbortMultipartUploadInput struct { _ struct{} `type:"structure"` + // Name of the bucket to which the multipart upload was initiated. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Key of the object for which the multipart upload was initiated. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -4828,6 +7061,8 @@ type AbortMultipartUploadInput struct { // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + // Upload ID that identifies the multipart upload. + // // UploadId is a required field UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` } @@ -4848,6 +7083,9 @@ func (s *AbortMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -4870,6 +7108,13 @@ func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInp return s } +func (s *AbortMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { s.Key = &v @@ -4888,7 +7133,6 @@ func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput type AbortMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -4913,11 +7157,13 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration +// Configures the transfer acceleration state for an Amazon S3 bucket. For more +// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html) +// in the Amazon Simple Storage Service Developer Guide. type AccelerateConfiguration struct { _ struct{} `type:"structure"` - // The accelerate configuration of the bucket. + // Specifies the transfer acceleration status of the bucket. Status *string `type:"string" enum:"BucketAccelerateStatus"` } @@ -4937,13 +7183,14 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy +// Contains the elements that set the ACL permissions for an object per grantee. type AccessControlPolicy struct { _ struct{} `type:"structure"` // A list of grants. Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + // Container for the bucket owner's display name and ID. Owner *Owner `type:"structure"` } @@ -4989,11 +7236,55 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator -type AnalyticsAndOperator struct { +// A container for information about access control for replicas. +type AccessControlTranslation struct { _ struct{} `type:"structure"` - // The prefix to use when evaluating an AND predicate. + // Specifies the replica ownership. For default and valid values, see PUT bucket + // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // in the Amazon Simple Storage Service API Reference. + // + // Owner is a required field + Owner *string `type:"string" required:"true" enum:"OwnerOverride"` +} + +// String returns the string representation +func (s AccessControlTranslation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlTranslation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlTranslation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlTranslation"} + if s.Owner == nil { + invalidParams.Add(request.NewErrParamRequired("Owner")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation { + s.Owner = &v + return s +} + +// A conjunction (logical AND) of predicates, which is used in evaluating a +// metrics filter. The operator must have at least two predicates in any combination, +// and an object must match all of the predicates for the filter to apply. +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate: The prefix that an object + // must have to be included in the metrics results. Prefix *string `type:"string"` // The list of tags to use when evaluating an AND predicate. @@ -5042,7 +7333,11 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration +// Specifies the configuration and any analyses for the analytics filter of +// an Amazon S3 bucket. +// +// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html) +// in the Amazon Simple Storage Service API Reference. type AnalyticsConfiguration struct { _ struct{} `type:"structure"` @@ -5051,13 +7346,13 @@ type AnalyticsConfiguration struct { // If no filter is provided, all objects will be considered in any analysis. Filter *AnalyticsFilter `type:"structure"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `type:"string" required:"true"` - // If present, it indicates that data related to access patterns will be collected - // and made available to analyze the tradeoffs between different storage classes. + // Contains data related to access patterns to be collected and made available + // to analyze the tradeoffs between different storage classes. // // StorageClassAnalysis is a required field StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` @@ -5117,7 +7412,7 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination +// Where to publish the analytics results. type AnalyticsExportDestination struct { _ struct{} `type:"structure"` @@ -5161,7 +7456,6 @@ func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3Bucket return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter type AnalyticsFilter struct { _ struct{} `type:"structure"` @@ -5224,11 +7518,10 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination type AnalyticsS3BucketDestination struct { _ struct{} `type:"structure"` - // The Amazon resource name (ARN) of the bucket to which data is exported. + // The Amazon Resource Name (ARN) of the bucket to which data is exported. // // Bucket is a required field Bucket *string `type:"string" required:"true"` @@ -5237,13 +7530,12 @@ type AnalyticsS3BucketDestination struct { // the owner will not be validated prior to exporting data. BucketAccountId *string `type:"string"` - // The file format used when exporting data to Amazon S3. + // Specifies the file format used when exporting data to Amazon S3. // // Format is a required field Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` - // The prefix to use when exporting data. The exported data begins with this - // prefix. + // The prefix to use when exporting data. The prefix is prepended to all results. Prefix *string `type:"string"` } @@ -5279,6 +7571,13 @@ func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDes return s } +func (s *AnalyticsS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetBucketAccountId sets the BucketAccountId field's value. func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { s.BucketAccountId = &v @@ -5297,12 +7596,11 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket type Bucket struct { _ struct{} `type:"structure"` // Date the bucket was created. - CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + CreationDate *time.Time `type:"timestamp"` // The name of the bucket. Name *string `type:"string"` @@ -5330,10 +7628,14 @@ func (s *Bucket) SetName(v string) *Bucket { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration +// Specifies the lifecycle configuration for objects in an Amazon S3 bucket. +// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) +// in the Amazon Simple Storage Service Developer Guide. type BucketLifecycleConfiguration struct { _ struct{} `type:"structure"` + // A lifecycle rule for individual objects in an Amazon S3 bucket. + // // Rules is a required field Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } @@ -5377,10 +7679,13 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus type BucketLoggingStatus struct { _ struct{} `type:"structure"` + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -5415,10 +7720,15 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration +// Describes the cross-origin access configuration for objects in an Amazon +// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing +// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon +// Simple Storage Service Developer Guide. type CORSConfiguration struct { _ struct{} `type:"structure"` + // A set of allowed origins and methods. + // // CORSRules is a required field CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` } @@ -5462,15 +7772,18 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule +// Specifies a cross-origin access rule for an Amazon S3 bucket. type CORSRule struct { _ struct{} `type:"structure"` - // Specifies which headers are allowed in a pre-flight OPTIONS request. + // Headers that are specified in the Access-Control-Request-Headers header. + // These headers are allowed in a preflight OPTIONS request. In response to + // any preflight OPTIONS request, Amazon S3 returns any requested headers that + // are allowed. AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. + // An HTTP method that you allow the origin to execute. Valid values are GET, + // PUT, HEAD, POST, and DELETE. // // AllowedMethods is a required field AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` @@ -5546,18 +7859,162 @@ func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration +// Describes how a CSV-formatted input object is formatted. +type CSVInput struct { + _ struct{} `type:"structure"` + + // Specifies that CSV field values may contain quoted record delimiters and + // such records should be allowed. Default value is FALSE. Setting this value + // to TRUE may lower performance. + AllowQuotedRecordDelimiter *bool `type:"boolean"` + + // The single character used to indicate a row should be ignored when present + // at the start of a row. + Comments *string `type:"string"` + + // The value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // Describes the first line of input. Valid values: None, Ignore, Use. + FileHeaderInfo *string `type:"string" enum:"FileHeaderInfo"` + + // Value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // The single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // The value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVInput) GoString() string { + return s.String() +} + +// SetAllowQuotedRecordDelimiter sets the AllowQuotedRecordDelimiter field's value. +func (s *CSVInput) SetAllowQuotedRecordDelimiter(v bool) *CSVInput { + s.AllowQuotedRecordDelimiter = &v + return s +} + +// SetComments sets the Comments field's value. +func (s *CSVInput) SetComments(v string) *CSVInput { + s.Comments = &v + return s +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVInput) SetFieldDelimiter(v string) *CSVInput { + s.FieldDelimiter = &v + return s +} + +// SetFileHeaderInfo sets the FileHeaderInfo field's value. +func (s *CSVInput) SetFileHeaderInfo(v string) *CSVInput { + s.FileHeaderInfo = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVInput) SetQuoteCharacter(v string) *CSVInput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVInput) SetQuoteEscapeCharacter(v string) *CSVInput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVInput) SetRecordDelimiter(v string) *CSVInput { + s.RecordDelimiter = &v + return s +} + +// Describes how CSV-formatted results are formatted. +type CSVOutput struct { + _ struct{} `type:"structure"` + + // The value used to separate individual fields in a record. + FieldDelimiter *string `type:"string"` + + // The value used for escaping where the field delimiter is part of the value. + QuoteCharacter *string `type:"string"` + + // Th single character used for escaping the quote character inside an already + // escaped value. + QuoteEscapeCharacter *string `type:"string"` + + // Indicates whether or not all output fields should be quoted. + QuoteFields *string `type:"string" enum:"QuoteFields"` + + // The value used to separate individual records. + RecordDelimiter *string `type:"string"` +} + +// String returns the string representation +func (s CSVOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CSVOutput) GoString() string { + return s.String() +} + +// SetFieldDelimiter sets the FieldDelimiter field's value. +func (s *CSVOutput) SetFieldDelimiter(v string) *CSVOutput { + s.FieldDelimiter = &v + return s +} + +// SetQuoteCharacter sets the QuoteCharacter field's value. +func (s *CSVOutput) SetQuoteCharacter(v string) *CSVOutput { + s.QuoteCharacter = &v + return s +} + +// SetQuoteEscapeCharacter sets the QuoteEscapeCharacter field's value. +func (s *CSVOutput) SetQuoteEscapeCharacter(v string) *CSVOutput { + s.QuoteEscapeCharacter = &v + return s +} + +// SetQuoteFields sets the QuoteFields field's value. +func (s *CSVOutput) SetQuoteFields(v string) *CSVOutput { + s.QuoteFields = &v + return s +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *CSVOutput) SetRecordDelimiter(v string) *CSVOutput { + s.RecordDelimiter = &v + return s +} + type CloudFunctionConfiguration struct { _ struct{} `type:"structure"` CloudFunction *string `type:"string"` - // Bucket event for which to send notifications. + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` Events []*string `locationName:"Event" type:"list" flattened:"true"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` @@ -5604,7 +8061,6 @@ func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix type CommonPrefix struct { _ struct{} `type:"structure"` @@ -5627,7 +8083,6 @@ func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest type CompleteMultipartUploadInput struct { _ struct{} `type:"structure" payload:"MultipartUpload"` @@ -5637,7 +8092,7 @@ type CompleteMultipartUploadInput struct { // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. @@ -5665,6 +8120,9 @@ func (s *CompleteMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -5687,6 +8145,13 @@ func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUpl return s } +func (s *CompleteMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { s.Key = &v @@ -5711,7 +8176,6 @@ func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartU return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput type CompleteMultipartUploadOutput struct { _ struct{} `type:"structure"` @@ -5734,7 +8198,7 @@ type CompleteMultipartUploadOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -5760,6 +8224,13 @@ func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUp return s } +func (s *CompleteMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetETag sets the ETag field's value. func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { s.ETag = &v @@ -5808,7 +8279,6 @@ func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipar return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload type CompletedMultipartUpload struct { _ struct{} `type:"structure"` @@ -5831,7 +8301,6 @@ func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultip return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart type CompletedPart struct { _ struct{} `type:"structure"` @@ -5865,7 +8334,7 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition +// Specifies a condition that must be met for a redirect to apply. type Condition struct { _ struct{} `type:"structure"` @@ -5908,7 +8377,32 @@ func (s *Condition) SetKeyPrefixEquals(v string) *Condition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest +type ContinuationEvent struct { + _ struct{} `locationName:"ContinuationEvent" type:"structure"` +} + +// String returns the string representation +func (s ContinuationEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ContinuationEvent) GoString() string { + return s.String() +} + +// The ContinuationEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ContinuationEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ContinuationEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ContinuationEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + type CopyObjectInput struct { _ struct{} `type:"structure"` @@ -5945,14 +8439,14 @@ type CopyObjectInput struct { CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // Specifies the algorithm to use when decrypting the source object (e.g., AES256). CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` @@ -5960,7 +8454,7 @@ type CopyObjectInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -5968,7 +8462,7 @@ type CopyObjectInput struct { CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -5992,6 +8486,15 @@ type CopyObjectInput struct { // with metadata provided in the request. MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + // Specifies whether you want to apply a Legal Hold to the copied object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode that you want to apply to the copied object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want the copied object's object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -6006,18 +8509,23 @@ type CopyObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -6057,6 +8565,9 @@ func (s *CopyObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -6085,6 +8596,13 @@ func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { return s } +func (s *CopyObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { s.CacheControl = &v @@ -6157,6 +8675,13 @@ func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput return s } +func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + // SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { s.CopySourceSSECustomerKeyMD5 = &v @@ -6211,6 +8736,24 @@ func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CopyObjectInput) SetObjectLockLegalHoldStatus(v string) *CopyObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CopyObjectInput) SetObjectLockMode(v string) *CopyObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CopyObjectInput) SetObjectLockRetainUntilDate(v time.Time) *CopyObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetRequestPayer sets the RequestPayer field's value. func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { s.RequestPayer = &v @@ -6229,12 +8772,25 @@ func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { return s } +func (s *CopyObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { s.SSECustomerKeyMD5 = &v return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { s.SSEKMSKeyId = &v @@ -6271,7 +8827,6 @@ func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` @@ -6296,9 +8851,14 @@ type CopyObjectOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -6354,6 +8914,12 @@ func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { s.SSEKMSKeyId = &v @@ -6372,13 +8938,12 @@ func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult type CopyObjectResult struct { _ struct{} `type:"structure"` ETag *string `type:"string"` - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` } // String returns the string representation @@ -6403,7 +8968,6 @@ func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult type CopyPartResult struct { _ struct{} `type:"structure"` @@ -6411,7 +8975,7 @@ type CopyPartResult struct { ETag *string `type:"string"` // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` } // String returns the string representation @@ -6436,12 +9000,11 @@ func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration type CreateBucketConfiguration struct { _ struct{} `type:"structure"` // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket will be created in US Standard. + // a region, the bucket is created in US East (N. Virginia) Region (us-east-1). LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` } @@ -6461,7 +9024,6 @@ func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucke return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest type CreateBucketInput struct { _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` @@ -6471,7 +9033,7 @@ type CreateBucketInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Allows grantee the read, write, read ACP, and write ACP permissions on the // bucket. @@ -6488,6 +9050,10 @@ type CreateBucketInput struct { // Allows grantee to write the ACL for the applicable bucket. GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Specifies whether you want Amazon S3 object lock to be enabled for the new + // bucket. + ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"` } // String returns the string representation @@ -6506,6 +9072,9 @@ func (s *CreateBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -6525,6 +9094,13 @@ func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { return s } +func (s *CreateBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { s.CreateBucketConfiguration = v @@ -6561,7 +9137,12 @@ func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput +// SetObjectLockEnabledForBucket sets the ObjectLockEnabledForBucket field's value. +func (s *CreateBucketInput) SetObjectLockEnabledForBucket(v bool) *CreateBucketInput { + s.ObjectLockEnabledForBucket = &v + return s +} + type CreateBucketOutput struct { _ struct{} `type:"structure"` @@ -6584,7 +9165,6 @@ func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest type CreateMultipartUploadInput struct { _ struct{} `type:"structure"` @@ -6612,7 +9192,7 @@ type CreateMultipartUploadInput struct { ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -6632,6 +9212,15 @@ type CreateMultipartUploadInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + // Specifies whether you want to apply a Legal Hold to the uploaded object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // Specifies the object lock mode that you want to apply to the uploaded object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // Specifies the date and time when you want the object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -6646,18 +9235,23 @@ type CreateMultipartUploadInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -6666,6 +9260,9 @@ type CreateMultipartUploadInput struct { // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + // If the bucket is configured as a website, redirects requests for this object // to another object in the same bucket or to an external URL. Amazon S3 stores // the value of this header in the object metadata. @@ -6688,6 +9285,9 @@ func (s *CreateMultipartUploadInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -6713,6 +9313,13 @@ func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadI return s } +func (s *CreateMultipartUploadInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { s.CacheControl = &v @@ -6785,6 +9392,24 @@ func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMu return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *CreateMultipartUploadInput) SetObjectLockLegalHoldStatus(v string) *CreateMultipartUploadInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *CreateMultipartUploadInput) SetObjectLockMode(v string) *CreateMultipartUploadInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *CreateMultipartUploadInput) SetObjectLockRetainUntilDate(v time.Time) *CreateMultipartUploadInput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetRequestPayer sets the RequestPayer field's value. func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { s.RequestPayer = &v @@ -6803,12 +9428,25 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipar return s } +func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { s.SSECustomerKeyMD5 = &v return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { s.SSEKMSKeyId = &v @@ -6827,18 +9465,23 @@ func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartU return s } +// SetTagging sets the Tagging field's value. +func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput { + s.Tagging = &v + return s +} + // SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { s.WebsiteRedirectLocation = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput type CreateMultipartUploadOutput struct { _ struct{} `type:"structure"` // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // Id of the lifecycle rule that makes a multipart upload eligible for abort // operation. @@ -6864,9 +9507,14 @@ type CreateMultipartUploadOutput struct { // verification of the customer-provided encryption key. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -6904,6 +9552,13 @@ func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUpload return s } +func (s *CreateMultipartUploadOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { s.Key = &v @@ -6928,6 +9583,12 @@ func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMult return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { s.SSEKMSKeyId = &v @@ -6946,7 +9607,50 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete +// The container element for specifying the default object lock retention settings +// for new objects placed in the specified bucket. +type DefaultRetention struct { + _ struct{} `type:"structure"` + + // The number of days that you want to specify for the default retention period. + Days *int64 `type:"integer"` + + // The default object lock retention mode you want to apply to new objects placed + // in the specified bucket. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The number of years that you want to specify for the default retention period. + Years *int64 `type:"integer"` +} + +// String returns the string representation +func (s DefaultRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DefaultRetention) GoString() string { + return s.String() +} + +// SetDays sets the Days field's value. +func (s *DefaultRetention) SetDays(v int64) *DefaultRetention { + s.Days = &v + return s +} + +// SetMode sets the Mode field's value. +func (s *DefaultRetention) SetMode(v string) *DefaultRetention { + s.Mode = &v + return s +} + +// SetYears sets the Years field's value. +func (s *DefaultRetention) SetYears(v int64) *DefaultRetention { + s.Years = &v + return s +} + type Delete struct { _ struct{} `type:"structure"` @@ -7003,7 +9707,6 @@ func (s *Delete) SetQuiet(v bool) *Delete { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest type DeleteBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` @@ -7012,7 +9715,7 @@ type DeleteBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -7034,6 +9737,9 @@ func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -7050,13 +9756,19 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBuc return s } +func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput type DeleteBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -7071,7 +9783,6 @@ func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest type DeleteBucketCorsInput struct { _ struct{} `type:"structure"` @@ -7095,6 +9806,9 @@ func (s *DeleteBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7108,7 +9822,13 @@ func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput +func (s *DeleteBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -7123,30 +9843,35 @@ func (s DeleteBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest -type DeleteBucketInput struct { +type DeleteBucketEncryptionInput struct { _ struct{} `type:"structure"` + // The name of the bucket containing the server-side encryption configuration + // to delete. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s DeleteBucketInput) String() string { +func (s DeleteBucketEncryptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBucketInput) GoString() string { +func (s DeleteBucketEncryptionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBucketInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} +func (s *DeleteBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketEncryptionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7155,13 +9880,79 @@ func (s *DeleteBucketInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { +func (s *DeleteBucketEncryptionInput) SetBucket(v string) *DeleteBucketEncryptionInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest -type DeleteBucketInventoryConfigurationInput struct { +func (s *DeleteBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketEncryptionOutput) GoString() string { + return s.String() +} + +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +func (s *DeleteBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeleteBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` // The name of the bucket containing the inventory configuration to delete. @@ -7191,6 +9982,9 @@ func (s *DeleteBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -7207,13 +10001,19 @@ func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBuc return s } +func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput type DeleteBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -7228,7 +10028,6 @@ func (s DeleteBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest type DeleteBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -7252,6 +10051,9 @@ func (s *DeleteBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7265,7 +10067,13 @@ func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleI return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput +func (s *DeleteBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -7280,7 +10088,6 @@ func (s DeleteBucketLifecycleOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest type DeleteBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` @@ -7311,6 +10118,9 @@ func (s *DeleteBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -7327,13 +10137,19 @@ func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucke return s } +func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput type DeleteBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -7348,7 +10164,6 @@ func (s DeleteBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput type DeleteBucketOutput struct { _ struct{} `type:"structure"` } @@ -7363,7 +10178,6 @@ func (s DeleteBucketOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest type DeleteBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -7387,6 +10201,9 @@ func (s *DeleteBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7400,7 +10217,13 @@ func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput +func (s *DeleteBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -7415,10 +10238,14 @@ func (s DeleteBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest type DeleteBucketReplicationInput struct { _ struct{} `type:"structure"` + // The bucket name. + // + // It can take a while to propagate the deletion of a replication configuration + // to all Amazon S3 systems. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } @@ -7439,6 +10266,9 @@ func (s *DeleteBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7452,7 +10282,13 @@ func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicat return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput +func (s *DeleteBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -7467,7 +10303,6 @@ func (s DeleteBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest type DeleteBucketTaggingInput struct { _ struct{} `type:"structure"` @@ -7491,6 +10326,9 @@ func (s *DeleteBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7504,7 +10342,13 @@ func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput +func (s *DeleteBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -7519,7 +10363,6 @@ func (s DeleteBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest type DeleteBucketWebsiteInput struct { _ struct{} `type:"structure"` @@ -7543,6 +10386,9 @@ func (s *DeleteBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -7556,7 +10402,13 @@ func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput +func (s *DeleteBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type DeleteBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -7571,7 +10423,6 @@ func (s DeleteBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry type DeleteMarkerEntry struct { _ struct{} `type:"structure"` @@ -7583,7 +10434,7 @@ type DeleteMarkerEntry struct { Key *string `min:"1" type:"string"` // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` Owner *Owner `type:"structure"` @@ -7631,13 +10482,43 @@ func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest +// Specifies whether Amazon S3 should replicate delete makers. +type DeleteMarkerReplication struct { + _ struct{} `type:"structure"` + + // The status of the delete marker replication. + // + // In the current implementation, Amazon S3 doesn't replicate the delete markers. + // The status must be Disabled. + Status *string `type:"string" enum:"DeleteMarkerReplicationStatus"` +} + +// String returns the string representation +func (s DeleteMarkerReplication) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerReplication) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *DeleteMarkerReplication) SetStatus(v string) *DeleteMarkerReplication { + s.Status = &v + return s +} + type DeleteObjectInput struct { _ struct{} `type:"structure"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions + // to process this operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -7671,6 +10552,9 @@ func (s *DeleteObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -7690,6 +10574,19 @@ func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { return s } +func (s *DeleteObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectInput) SetBypassGovernanceRetention(v bool) *DeleteObjectInput { + s.BypassGovernanceRetention = &v + return s +} + // SetKey sets the Key field's value. func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { s.Key = &v @@ -7714,7 +10611,6 @@ func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput type DeleteObjectOutput struct { _ struct{} `type:"structure"` @@ -7759,7 +10655,6 @@ func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest type DeleteObjectTaggingInput struct { _ struct{} `type:"structure"` @@ -7789,6 +10684,9 @@ func (s *DeleteObjectTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -7808,6 +10706,13 @@ func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput return s } +func (s *DeleteObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { s.Key = &v @@ -7820,7 +10725,6 @@ func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingIn return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput type DeleteObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -7844,15 +10748,19 @@ func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest type DeleteObjectsInput struct { _ struct{} `type:"structure" payload:"Delete"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies whether you want to delete this object even if it has a Governance-type + // object lock in place. You must have sufficient permissions to perform this + // operation. + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + // Delete is a required field - Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The concatenation of the authentication device's serial number, a space, // and the value that is displayed on your authentication device. @@ -7881,6 +10789,9 @@ func (s *DeleteObjectsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Delete == nil { invalidParams.Add(request.NewErrParamRequired("Delete")) } @@ -7902,6 +10813,19 @@ func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { return s } +func (s *DeleteObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *DeleteObjectsInput) SetBypassGovernanceRetention(v bool) *DeleteObjectsInput { + s.BypassGovernanceRetention = &v + return s +} + // SetDelete sets the Delete field's value. func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { s.Delete = v @@ -7920,7 +10844,6 @@ func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput type DeleteObjectsOutput struct { _ struct{} `type:"structure"` @@ -7961,7 +10884,68 @@ func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject +type DeletePublicAccessBlockInput struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket whose PublicAccessBlock configuration you want to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletePublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeletePublicAccessBlockInput) SetBucket(v string) *DeletePublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *DeletePublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type DeletePublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletePublicAccessBlockOutput) GoString() string { + return s.String() +} + type DeletedObject struct { _ struct{} `type:"structure"` @@ -8008,17 +10992,48 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination +// Specifies information about where to publish analysis or configuration results +// for an Amazon S3 bucket. type Destination struct { _ struct{} `type:"structure"` - // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store - // replicas of the object identified by the rule. + // Specify this only in a cross-account scenario (where source and destination + // bucket owners are not the same), and you want to change replica ownership + // to the AWS account that owns the destination bucket. If this is not specified + // in the replication configuration, the replicas are owned by same AWS account + // that owns the source object. + AccessControlTranslation *AccessControlTranslation `type:"structure"` + + // Destination bucket owner account ID. In a cross-account scenario, if you + // direct Amazon S3 to change replica ownership to the AWS account that owns + // the destination bucket by specifying the AccessControlTranslation property, + // this is the account ID of the destination bucket owner. For more information, + // see Cross-Region Replication Additional Configuration: Change Replica Owner + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in + // the Amazon Simple Storage Service Developer Guide. + Account *string `type:"string"` + + // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to + // store replicas of the object identified by the rule. + // + // A replication configuration can replicate objects to only one destination + // bucket. If there are multiple rules in your replication configuration, all + // rules must specify the same destination bucket. // // Bucket is a required field Bucket *string `type:"string" required:"true"` - // The class of storage used to store the object. + // A container that provides information about encryption. If SourceSelectionCriteria + // is specified, you must specify this element. + EncryptionConfiguration *EncryptionConfiguration `type:"structure"` + + // The storage class to use when replicating objects, such as standard or reduced + // redundancy. By default, Amazon S3 uses the storage class of the source object + // to create the object replica. + // + // For valid values, see the StorageClass element of the PUT Bucket replication + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) + // action in the Amazon Simple Storage Service API Reference. StorageClass *string `type:"string" enum:"StorageClass"` } @@ -8038,6 +11053,11 @@ func (s *Destination) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.AccessControlTranslation != nil { + if err := s.AccessControlTranslation.Validate(); err != nil { + invalidParams.AddNested("AccessControlTranslation", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -8045,118 +11065,256 @@ func (s *Destination) Validate() error { return nil } +// SetAccessControlTranslation sets the AccessControlTranslation field's value. +func (s *Destination) SetAccessControlTranslation(v *AccessControlTranslation) *Destination { + s.AccessControlTranslation = v + return s +} + +// SetAccount sets the Account field's value. +func (s *Destination) SetAccount(v string) *Destination { + s.Account = &v + return s +} + // SetBucket sets the Bucket field's value. func (s *Destination) SetBucket(v string) *Destination { s.Bucket = &v return s } +func (s *Destination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryptionConfiguration sets the EncryptionConfiguration field's value. +func (s *Destination) SetEncryptionConfiguration(v *EncryptionConfiguration) *Destination { + s.EncryptionConfiguration = v + return s +} + // SetStorageClass sets the StorageClass field's value. func (s *Destination) SetStorageClass(v string) *Destination { s.StorageClass = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error -type Error struct { +// Describes the server-side encryption that will be applied to the restore +// results. +type Encryption struct { _ struct{} `type:"structure"` - Code *string `type:"string"` - - Key *string `min:"1" type:"string"` + // The server-side encryption algorithm used when storing job results in Amazon + // S3 (e.g., AES256, aws:kms). + // + // EncryptionType is a required field + EncryptionType *string `type:"string" required:"true" enum:"ServerSideEncryption"` - Message *string `type:"string"` + // If the encryption type is aws:kms, this optional value can be used to specify + // the encryption context for the restore results. + KMSContext *string `type:"string"` - VersionId *string `type:"string"` + // If the encryption type is aws:kms, this optional value specifies the AWS + // KMS key ID to use for encryption of job results. + KMSKeyId *string `type:"string" sensitive:"true"` } // String returns the string representation -func (s Error) String() string { +func (s Encryption) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Error) GoString() string { +func (s Encryption) GoString() string { return s.String() } -// SetCode sets the Code field's value. -func (s *Error) SetCode(v string) *Error { - s.Code = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *Encryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Encryption"} + if s.EncryptionType == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptionType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetKey sets the Key field's value. -func (s *Error) SetKey(v string) *Error { - s.Key = &v +// SetEncryptionType sets the EncryptionType field's value. +func (s *Encryption) SetEncryptionType(v string) *Encryption { + s.EncryptionType = &v return s } -// SetMessage sets the Message field's value. -func (s *Error) SetMessage(v string) *Error { - s.Message = &v +// SetKMSContext sets the KMSContext field's value. +func (s *Encryption) SetKMSContext(v string) *Encryption { + s.KMSContext = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *Error) SetVersionId(v string) *Error { - s.VersionId = &v +// SetKMSKeyId sets the KMSKeyId field's value. +func (s *Encryption) SetKMSKeyId(v string) *Encryption { + s.KMSKeyId = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument -type ErrorDocument struct { +// Specifies encryption-related information for an Amazon S3 bucket that is +// a destination for replicated objects. +type EncryptionConfiguration struct { _ struct{} `type:"structure"` - // The object key name to use when a 4XX class error occurs. - // - // Key is a required field - Key *string `min:"1" type:"string" required:"true"` + // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket. + // Amazon S3 uses this key to encrypt replica objects. + ReplicaKmsKeyID *string `type:"string"` } // String returns the string representation -func (s ErrorDocument) String() string { +func (s EncryptionConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ErrorDocument) GoString() string { +func (s EncryptionConfiguration) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ErrorDocument) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } +// SetReplicaKmsKeyID sets the ReplicaKmsKeyID field's value. +func (s *EncryptionConfiguration) SetReplicaKmsKeyID(v string) *EncryptionConfiguration { + s.ReplicaKmsKeyID = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +type EndEvent struct { + _ struct{} `locationName:"EndEvent" type:"structure"` } -// SetKey sets the Key field's value. -func (s *ErrorDocument) SetKey(v string) *ErrorDocument { - s.Key = &v +// String returns the string representation +func (s EndEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EndEvent) GoString() string { + return s.String() +} + +// The EndEvent is and event in the SelectObjectContentEventStream group of events. +func (s *EndEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the EndEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *EndEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + return nil +} + +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v return s } -// Container for key value pair that defines the criteria for the filter rule. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule +// Specifies the Amazon S3 object key name to filter on and whether to filter +// on the suffix or prefix of the key name. type FilterRule struct { _ struct{} `type:"structure"` - // Object key name prefix or suffix identifying one or more objects to which - // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. - // Overlapping prefixes and suffixes are not supported. For more information, - // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // The object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. The maximum length is 1,024 characters. Overlapping + // prefixes and suffixes are not supported. For more information, see Configuring + // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Name *string `type:"string" enum:"FilterRuleName"` + // The value that the filter searches for in object key names. Value *string `type:"string"` } @@ -8182,7 +11340,6 @@ func (s *FilterRule) SetValue(v string) *FilterRule { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest type GetBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure"` @@ -8208,6 +11365,9 @@ func (s *GetBucketAccelerateConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8221,7 +11381,13 @@ func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAc return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput +func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` @@ -8245,7 +11411,6 @@ func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketA return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest type GetBucketAclInput struct { _ struct{} `type:"structure"` @@ -8269,6 +11434,9 @@ func (s *GetBucketAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8282,7 +11450,13 @@ func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput +func (s *GetBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketAclOutput struct { _ struct{} `type:"structure"` @@ -8314,7 +11488,6 @@ func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest type GetBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure"` @@ -8323,7 +11496,7 @@ type GetBucketAnalyticsConfigurationInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -8345,6 +11518,9 @@ func (s *GetBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -8361,13 +11537,19 @@ func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAna return s } +func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput type GetBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` @@ -8391,7 +11573,6 @@ func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *Ana return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest type GetBucketCorsInput struct { _ struct{} `type:"structure"` @@ -8415,6 +11596,9 @@ func (s *GetBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8428,7 +11612,13 @@ func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput +func (s *GetBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketCorsOutput struct { _ struct{} `type:"structure"` @@ -8451,7 +11641,78 @@ func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest +type GetBucketEncryptionInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which the server-side encryption configuration + // is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketEncryptionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketEncryptionInput) SetBucket(v string) *GetBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *GetBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketEncryptionOutput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` + + // Specifies the default server-side-encryption configuration. + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketEncryptionOutput) GoString() string { + return s.String() +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *GetBucketEncryptionOutput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *GetBucketEncryptionOutput { + s.ServerSideEncryptionConfiguration = v + return s +} + type GetBucketInventoryConfigurationInput struct { _ struct{} `type:"structure"` @@ -8482,6 +11743,9 @@ func (s *GetBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -8498,13 +11762,19 @@ func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInv return s } +func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput type GetBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure" payload:"InventoryConfiguration"` @@ -8528,7 +11798,6 @@ func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *Inv return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest type GetBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure"` @@ -8552,6 +11821,9 @@ func (s *GetBucketLifecycleConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8565,7 +11837,13 @@ func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLif return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput +func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` @@ -8588,7 +11866,6 @@ func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *Ge return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest type GetBucketLifecycleInput struct { _ struct{} `type:"structure"` @@ -8612,6 +11889,9 @@ func (s *GetBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8625,7 +11905,13 @@ func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput +func (s *GetBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketLifecycleOutput struct { _ struct{} `type:"structure"` @@ -8648,7 +11934,6 @@ func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest type GetBucketLocationInput struct { _ struct{} `type:"structure"` @@ -8672,6 +11957,9 @@ func (s *GetBucketLocationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8685,7 +11973,13 @@ func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput +func (s *GetBucketLocationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketLocationOutput struct { _ struct{} `type:"structure"` @@ -8708,7 +12002,6 @@ func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLoca return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest type GetBucketLoggingInput struct { _ struct{} `type:"structure"` @@ -8732,6 +12025,9 @@ func (s *GetBucketLoggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8745,10 +12041,20 @@ func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput +func (s *GetBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketLoggingOutput struct { _ struct{} `type:"structure"` + // Describes where logs are stored and the prefix that Amazon S3 assigns to + // all log object keys for a bucket. For more information, see PUT Bucket logging + // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) + // in the Amazon Simple Storage Service API Reference. LoggingEnabled *LoggingEnabled `type:"structure"` } @@ -8768,7 +12074,6 @@ func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucket return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest type GetBucketMetricsConfigurationInput struct { _ struct{} `type:"structure"` @@ -8799,6 +12104,9 @@ func (s *GetBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -8815,13 +12123,19 @@ func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetri return s } +func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput type GetBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -8845,7 +12159,6 @@ func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *Metrics return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest type GetBucketNotificationConfigurationRequest struct { _ struct{} `type:"structure"` @@ -8871,6 +12184,9 @@ func (s *GetBucketNotificationConfigurationRequest) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8884,7 +12200,13 @@ func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBuck return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest +func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketPolicyInput struct { _ struct{} `type:"structure"` @@ -8908,6 +12230,9 @@ func (s *GetBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8921,7 +12246,13 @@ func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput +func (s *GetBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketPolicyOutput struct { _ struct{} `type:"structure" payload:"Policy"` @@ -8945,30 +12276,34 @@ func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest -type GetBucketReplicationInput struct { +type GetBucketPolicyStatusInput struct { _ struct{} `type:"structure"` + // The name of the Amazon S3 bucket whose policy status you want to retrieve. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` } // String returns the string representation -func (s GetBucketReplicationInput) String() string { +func (s GetBucketPolicyStatusInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketReplicationInput) GoString() string { +func (s GetBucketPolicyStatusInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketReplicationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} +func (s *GetBucketPolicyStatusInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyStatusInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -8977,38 +12312,42 @@ func (s *GetBucketReplicationInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { +func (s *GetBucketPolicyStatusInput) SetBucket(v string) *GetBucketPolicyStatusInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput -type GetBucketReplicationOutput struct { - _ struct{} `type:"structure" payload:"ReplicationConfiguration"` +func (s *GetBucketPolicyStatusInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +type GetBucketPolicyStatusOutput struct { + _ struct{} `type:"structure" payload:"PolicyStatus"` + + // The policy status for the specified bucket. + PolicyStatus *PolicyStatus `type:"structure"` } // String returns the string representation -func (s GetBucketReplicationOutput) String() string { +func (s GetBucketPolicyStatusOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketReplicationOutput) GoString() string { +func (s GetBucketPolicyStatusOutput) GoString() string { return s.String() } -// SetReplicationConfiguration sets the ReplicationConfiguration field's value. -func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { - s.ReplicationConfiguration = v +// SetPolicyStatus sets the PolicyStatus field's value. +func (s *GetBucketPolicyStatusOutput) SetPolicyStatus(v *PolicyStatus) *GetBucketPolicyStatusOutput { + s.PolicyStatus = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest -type GetBucketRequestPaymentInput struct { +type GetBucketReplicationInput struct { _ struct{} `type:"structure"` // Bucket is a required field @@ -9016,21 +12355,24 @@ type GetBucketRequestPaymentInput struct { } // String returns the string representation -func (s GetBucketRequestPaymentInput) String() string { +func (s GetBucketReplicationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketRequestPaymentInput) GoString() string { +func (s GetBucketReplicationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetBucketRequestPaymentInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9039,36 +12381,111 @@ func (s *GetBucketRequestPaymentInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { s.Bucket = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput -type GetBucketRequestPaymentOutput struct { - _ struct{} `type:"structure"` +func (s *GetBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} - // Specifies who pays for the download and request fees. - Payer *string `type:"string" enum:"Payer"` +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` } // String returns the string representation -func (s GetBucketRequestPaymentOutput) String() string { +func (s GetBucketReplicationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetBucketRequestPaymentOutput) GoString() string { +func (s GetBucketReplicationOutput) GoString() string { return s.String() } -// SetPayer sets the Payer field's value. -func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +func (s *GetBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { s.Payer = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest type GetBucketTaggingInput struct { _ struct{} `type:"structure"` @@ -9092,6 +12509,9 @@ func (s *GetBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9105,7 +12525,13 @@ func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput +func (s *GetBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketTaggingOutput struct { _ struct{} `type:"structure"` @@ -9129,7 +12555,6 @@ func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest type GetBucketVersioningInput struct { _ struct{} `type:"structure"` @@ -9153,6 +12578,9 @@ func (s *GetBucketVersioningInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9166,7 +12594,13 @@ func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput +func (s *GetBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketVersioningOutput struct { _ struct{} `type:"structure"` @@ -9201,7 +12635,6 @@ func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest type GetBucketWebsiteInput struct { _ struct{} `type:"structure"` @@ -9225,6 +12658,9 @@ func (s *GetBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -9238,7 +12674,13 @@ func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput +func (s *GetBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type GetBucketWebsiteOutput struct { _ struct{} `type:"structure"` @@ -9246,6 +12688,8 @@ type GetBucketWebsiteOutput struct { IndexDocument *IndexDocument `type:"structure"` + // Specifies the redirect behavior of all requests to a website endpoint of + // an Amazon S3 bucket. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` @@ -9285,7 +12729,6 @@ func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWeb return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest type GetObjectAclInput struct { _ struct{} `type:"structure"` @@ -9321,6 +12764,9 @@ func (s *GetObjectAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9340,6 +12786,13 @@ func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { return s } +func (s *GetObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { s.Key = &v @@ -9358,7 +12811,6 @@ func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput type GetObjectAclOutput struct { _ struct{} `type:"structure"` @@ -9400,7 +12852,6 @@ func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest type GetObjectInput struct { _ struct{} `type:"structure"` @@ -9413,7 +12864,7 @@ type GetObjectInput struct { // Return the object only if it has been modified since the specified time, // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one // specified, otherwise return a 304 (not modified). @@ -9421,7 +12872,7 @@ type GetObjectInput struct { // Return the object only if it has not been modified since the specified time, // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -9457,7 +12908,7 @@ type GetObjectInput struct { ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` // Specifies the algorithm to use to when encrypting the object (e.g., AES256). SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` @@ -9467,7 +12918,7 @@ type GetObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -9494,6 +12945,9 @@ func (s *GetObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9513,6 +12967,13 @@ func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetIfMatch sets the IfMatch field's value. func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { s.IfMatch = &v @@ -9609,6 +13070,13 @@ func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { return s } +func (s *GetObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { s.SSECustomerKeyMD5 = &v @@ -9621,7 +13089,186 @@ func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput +type GetObjectLegalHoldInput struct { + _ struct{} `type:"structure"` + + // The bucket containing the object whose Legal Hold status you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The key name for the object whose Legal Hold status you want to retrieve. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID of the object whose Legal Hold status you want to retrieve. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectLegalHoldInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLegalHoldInput) SetBucket(v string) *GetObjectLegalHoldInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectLegalHoldInput) SetKey(v string) *GetObjectLegalHoldInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectLegalHoldInput) SetRequestPayer(v string) *GetObjectLegalHoldInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectLegalHoldInput) SetVersionId(v string) *GetObjectLegalHoldInput { + s.VersionId = &v + return s +} + +type GetObjectLegalHoldOutput struct { + _ struct{} `type:"structure" payload:"LegalHold"` + + // The current Legal Hold status for the specified object. + LegalHold *ObjectLockLegalHold `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetLegalHold sets the LegalHold field's value. +func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObjectLegalHoldOutput { + s.LegalHold = v + return s +} + +type GetObjectLockConfigurationInput struct { + _ struct{} `type:"structure"` + + // The bucket whose object lock configuration you want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectLockConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectLockConfigurationInput) SetBucket(v string) *GetObjectLockConfigurationInput { + s.Bucket = &v + return s +} + +func (s *GetObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetObjectLockConfigurationOutput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + + // The specified bucket's object lock configuration. + ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetObjectLockConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectLockConfigurationOutput) GoString() string { + return s.String() +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *GetObjectLockConfigurationOutput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *GetObjectLockConfigurationOutput { + s.ObjectLockConfiguration = v + return s +} + type GetObjectOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -9671,7 +13318,7 @@ type GetObjectOutput struct { Expires *string `location:"header" locationName:"Expires" type:"string"` // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` @@ -9682,6 +13329,16 @@ type GetObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + // Indicates whether this object has an active legal hold. This field is only + // returned if you have permission to view an object's legal hold status. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's object lock will expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` @@ -9707,7 +13364,7 @@ type GetObjectOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -9833,6 +13490,24 @@ func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *GetObjectOutput) SetObjectLockLegalHoldStatus(v string) *GetObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *GetObjectOutput) SetObjectLockMode(v string) *GetObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *GetObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *GetObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetPartsCount sets the PartsCount field's value. func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { s.PartsCount = &v @@ -9905,35 +13580,48 @@ func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest -type GetObjectTaggingInput struct { +type GetObjectRetentionInput struct { _ struct{} `type:"structure"` + // The bucket containing the object whose retention settings you want to retrieve. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // The key name for the object whose retention settings you want to retrieve. + // // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The version ID for the object whose retention settings you want to retrieve. VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s GetObjectTaggingInput) String() string { +func (s GetObjectRetentionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s GetObjectTaggingInput) GoString() string { +func (s GetObjectRetentionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *GetObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} +func (s *GetObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectRetentionInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9948,13 +13636,118 @@ func (s *GetObjectTaggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { +func (s *GetObjectRetentionInput) SetBucket(v string) *GetObjectRetentionInput { s.Bucket = &v return s } -// SetKey sets the Key field's value. -func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { +func (s *GetObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectRetentionInput) SetKey(v string) *GetObjectRetentionInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectRetentionInput) SetRequestPayer(v string) *GetObjectRetentionInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectRetentionInput) SetVersionId(v string) *GetObjectRetentionInput { + s.VersionId = &v + return s +} + +type GetObjectRetentionOutput struct { + _ struct{} `type:"structure" payload:"Retention"` + + // The container element for an object's retention settings. + Retention *ObjectLockRetention `type:"structure"` +} + +// String returns the string representation +func (s GetObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRetention sets the Retention field's value. +func (s *GetObjectRetentionOutput) SetRetention(v *ObjectLockRetention) *GetObjectRetentionOutput { + s.Retention = v + return s +} + +type GetObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *GetObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { s.Key = &v return s } @@ -9965,7 +13758,6 @@ func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput type GetObjectTaggingOutput struct { _ struct{} `type:"structure"` @@ -9997,7 +13789,6 @@ func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest type GetObjectTorrentInput struct { _ struct{} `type:"structure"` @@ -10030,6 +13821,9 @@ func (s *GetObjectTorrentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -10049,6 +13843,13 @@ func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { return s } +func (s *GetObjectTorrentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { s.Key = &v @@ -10061,7 +13862,6 @@ func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput type GetObjectTorrentOutput struct { _ struct{} `type:"structure" payload:"Body"` @@ -10094,7 +13894,79 @@ func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters +type GetPublicAccessBlockInput struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetPublicAccessBlockInput) SetBucket(v string) *GetPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *GetPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +type GetPublicAccessBlockOutput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The PublicAccessBlock configuration currently in effect for this Amazon S3 + // bucket. + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *GetPublicAccessBlockOutput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *GetPublicAccessBlockOutput { + s.PublicAccessBlockConfiguration = v + return s +} + type GlacierJobParameters struct { _ struct{} `type:"structure"` @@ -10133,11 +14005,10 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant type Grant struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Specifies the permission given to the grantee. Permission *string `type:"string" enum:"Permission"` @@ -10180,7 +14051,6 @@ func (s *Grant) SetPermission(v string) *Grant { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee type Grantee struct { _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` @@ -10255,7 +14125,6 @@ func (s *Grantee) SetURI(v string) *Grantee { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest type HeadBucketInput struct { _ struct{} `type:"structure"` @@ -10279,6 +14148,9 @@ func (s *HeadBucketInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -10292,7 +14164,13 @@ func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput +func (s *HeadBucketInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type HeadBucketOutput struct { _ struct{} `type:"structure"` } @@ -10307,7 +14185,6 @@ func (s HeadBucketOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest type HeadObjectInput struct { _ struct{} `type:"structure"` @@ -10320,7 +14197,7 @@ type HeadObjectInput struct { // Return the object only if it has been modified since the specified time, // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp"` // Return the object only if its entity tag (ETag) is different from the one // specified, otherwise return a 304 (not modified). @@ -10328,7 +14205,7 @@ type HeadObjectInput struct { // Return the object only if it has not been modified since the specified time, // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp"` // Key is a required field Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -10357,7 +14234,7 @@ type HeadObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -10384,6 +14261,9 @@ func (s *HeadObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -10403,6 +14283,13 @@ func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetIfMatch sets the IfMatch field's value. func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { s.IfMatch = &v @@ -10463,6 +14350,13 @@ func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { return s } +func (s *HeadObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { s.SSECustomerKeyMD5 = &v @@ -10475,7 +14369,6 @@ func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput type HeadObjectOutput struct { _ struct{} `type:"structure"` @@ -10519,7 +14412,7 @@ type HeadObjectOutput struct { Expires *string `location:"header" locationName:"Expires" type:"string"` // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp"` // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` @@ -10530,6 +14423,15 @@ type HeadObjectOutput struct { // you can create metadata whose values are not legal HTTP headers. MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + // The Legal Hold status for the specified object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode currently in place for this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when this object's object lock expires. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // The count of parts this object has. PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` @@ -10555,7 +14457,7 @@ type HeadObjectOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -10666,6 +14568,24 @@ func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *HeadObjectOutput) SetObjectLockLegalHoldStatus(v string) *HeadObjectOutput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *HeadObjectOutput) SetObjectLockMode(v string) *HeadObjectOutput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *HeadObjectOutput) SetObjectLockRetainUntilDate(v time.Time) *HeadObjectOutput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetPartsCount sets the PartsCount field's value. func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { s.PartsCount = &v @@ -10732,7 +14652,6 @@ func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutpu return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument type IndexDocument struct { _ struct{} `type:"structure"` @@ -10774,7 +14693,6 @@ func (s *IndexDocument) SetSuffix(v string) *IndexDocument { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator type Initiator struct { _ struct{} `type:"structure"` @@ -10808,7 +14726,61 @@ func (s *Initiator) SetID(v string) *Initiator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration +// Describes the serialization format of the object. +type InputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of a CSV-encoded object. + CSV *CSVInput `type:"structure"` + + // Specifies object's compression format. Valid values: NONE, GZIP, BZIP2. Default + // Value: NONE. + CompressionType *string `type:"string" enum:"CompressionType"` + + // Specifies JSON as object's input serialization format. + JSON *JSONInput `type:"structure"` + + // Specifies Parquet as object's input serialization format. + Parquet *ParquetInput `type:"structure"` +} + +// String returns the string representation +func (s InputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *InputSerialization) SetCSV(v *CSVInput) *InputSerialization { + s.CSV = v + return s +} + +// SetCompressionType sets the CompressionType field's value. +func (s *InputSerialization) SetCompressionType(v string) *InputSerialization { + s.CompressionType = &v + return s +} + +// SetJSON sets the JSON field's value. +func (s *InputSerialization) SetJSON(v *JSONInput) *InputSerialization { + s.JSON = v + return s +} + +// SetParquet sets the Parquet field's value. +func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization { + s.Parquet = v + return s +} + +// Specifies the inventory configuration for an Amazon S3 bucket. For more information, +// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) +// in the Amazon Simple Storage Service API Reference. type InventoryConfiguration struct { _ struct{} `type:"structure"` @@ -10826,12 +14798,16 @@ type InventoryConfiguration struct { // Id is a required field Id *string `type:"string" required:"true"` - // Specifies which object version(s) to included in the inventory results. + // Object versions to include in the inventory list. If set to All, the list + // includes all the object versions, which adds the version-related fields VersionId, + // IsLatest, and DeleteMarker to the list. If set to Current, the list does + // not contain these version-related fields. // // IncludedObjectVersions is a required field IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` - // Specifies whether the inventory is enabled or disabled. + // Specifies whether the inventory is enabled or disabled. If set to True, an + // inventory list is generated. If set to False, no inventory list is generated. // // IsEnabled is a required field IsEnabled *bool `type:"boolean" required:"true"` @@ -10937,7 +14913,6 @@ func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryCon return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination type InventoryDestination struct { _ struct{} `type:"structure"` @@ -10982,7 +14957,55 @@ func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter +// Contains the type of server-side encryption used to encrypt the inventory +// results. +type InventoryEncryption struct { + _ struct{} `type:"structure"` + + // Specifies the use of SSE-KMS to encrypt delivered Inventory reports. + SSEKMS *SSEKMS `locationName:"SSE-KMS" type:"structure"` + + // Specifies the use of SSE-S3 to encrypt delivered Inventory reports. + SSES3 *SSES3 `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s InventoryEncryption) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryEncryption) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryEncryption) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryEncryption"} + if s.SSEKMS != nil { + if err := s.SSEKMS.Validate(); err != nil { + invalidParams.AddNested("SSEKMS", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSSEKMS sets the SSEKMS field's value. +func (s *InventoryEncryption) SetSSEKMS(v *SSEKMS) *InventoryEncryption { + s.SSEKMS = v + return s +} + +// SetSSES3 sets the SSES3 field's value. +func (s *InventoryEncryption) SetSSES3(v *SSES3) *InventoryEncryption { + s.SSES3 = v + return s +} + type InventoryFilter struct { _ struct{} `type:"structure"` @@ -11021,7 +15044,6 @@ func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination type InventoryS3BucketDestination struct { _ struct{} `type:"structure"` @@ -11034,6 +15056,10 @@ type InventoryS3BucketDestination struct { // Bucket is a required field Bucket *string `type:"string" required:"true"` + // Contains the type of server-side encryption used to encrypt the inventory + // results. + Encryption *InventoryEncryption `type:"structure"` + // Specifies the output format of the inventory results. // // Format is a required field @@ -11062,6 +15088,11 @@ func (s *InventoryS3BucketDestination) Validate() error { if s.Format == nil { invalidParams.Add(request.NewErrParamRequired("Format")) } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } if invalidParams.Len() > 0 { return invalidParams @@ -11081,6 +15112,19 @@ func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDes return s } +func (s *InventoryS3BucketDestination) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetEncryption sets the Encryption field's value. +func (s *InventoryS3BucketDestination) SetEncryption(v *InventoryEncryption) *InventoryS3BucketDestination { + s.Encryption = v + return s +} + // SetFormat sets the Format field's value. func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { s.Format = &v @@ -11093,7 +15137,6 @@ func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDes return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule type InventorySchedule struct { _ struct{} `type:"structure"` @@ -11132,57 +15175,106 @@ func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { return s } -// Container for object key name prefix and suffix filtering rules. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter -type KeyFilter struct { +type JSONInput struct { _ struct{} `type:"structure"` - // A list of containers for key value pair that defines the criteria for the - // filter rule. - FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` + // The type of JSON. Valid values: Document, Lines. + Type *string `type:"string" enum:"JSONType"` } // String returns the string representation -func (s KeyFilter) String() string { +func (s JSONInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s KeyFilter) GoString() string { +func (s JSONInput) GoString() string { return s.String() } -// SetFilterRules sets the FilterRules field's value. -func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { - s.FilterRules = v +// SetType sets the Type field's value. +func (s *JSONInput) SetType(v string) *JSONInput { + s.Type = &v return s } -// Container for specifying the AWS Lambda notification configuration. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration -type LambdaFunctionConfiguration struct { +type JSONOutput struct { _ struct{} `type:"structure"` - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Filter *NotificationConfigurationFilter `type:"structure"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` - - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. - // - // LambdaFunctionArn is a required field - LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` + // The value used to separate individual records in the output. + RecordDelimiter *string `type:"string"` } // String returns the string representation -func (s LambdaFunctionConfiguration) String() string { +func (s JSONOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s JSONOutput) GoString() string { + return s.String() +} + +// SetRecordDelimiter sets the RecordDelimiter field's value. +func (s *JSONOutput) SetRecordDelimiter(v string) *JSONOutput { + s.RecordDelimiter = &v + return s +} + +// A container for object key name prefix and suffix filtering rules. +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for the key value pair that defines the criteria for + // the filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// A container for specifying the configuration for AWS Lambda notifications. +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For + // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3 + // invokes when the specified event type occurs. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { return awsutil.Prettify(s) } @@ -11231,7 +15323,6 @@ func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunc return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration type LifecycleConfiguration struct { _ struct{} `type:"structure"` @@ -11278,7 +15369,6 @@ func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration type LifecycleExpiration struct { _ struct{} `type:"structure"` @@ -11325,12 +15415,14 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule type LifecycleRule struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` Expiration *LifecycleExpiration `type:"structure"` @@ -11352,7 +15444,9 @@ type LifecycleRule struct { NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` // Prefix identifying one or more objects to which the rule applies. This is - // deprecated; use Filter instead. + // No longer used; use Filter instead. + // + // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule @@ -11449,7 +15543,6 @@ func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { // This is used in a Lifecycle Rule Filter to apply a logical AND to two or // more predicates. The Lifecycle Rule will apply to any object matching all // of the predicates configured inside the And operator. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator type LifecycleRuleAndOperator struct { _ struct{} `type:"structure"` @@ -11504,7 +15597,6 @@ func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { // The Filter is used to identify objects that a Lifecycle Rule applies to. // A Filter must have exactly one of Prefix, Tag, or And specified. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter type LifecycleRuleFilter struct { _ struct{} `type:"structure"` @@ -11568,7 +15660,6 @@ func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest type ListBucketAnalyticsConfigurationsInput struct { _ struct{} `type:"structure"` @@ -11598,6 +15689,9 @@ func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11611,13 +15705,19 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucket return s } +func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { s.ContinuationToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput type ListBucketAnalyticsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -11672,7 +15772,6 @@ func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v str return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest type ListBucketInventoryConfigurationsInput struct { _ struct{} `type:"structure"` @@ -11704,6 +15803,9 @@ func (s *ListBucketInventoryConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11717,13 +15819,19 @@ func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucket return s } +func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { s.ContinuationToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput type ListBucketInventoryConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -11778,7 +15886,6 @@ func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v str return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest type ListBucketMetricsConfigurationsInput struct { _ struct{} `type:"structure"` @@ -11810,6 +15917,9 @@ func (s *ListBucketMetricsConfigurationsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11823,13 +15933,19 @@ func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMe return s } +func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { s.ContinuationToken = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput type ListBucketMetricsConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -11886,7 +16002,6 @@ func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v strin return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput type ListBucketsInput struct { _ struct{} `type:"structure"` } @@ -11901,7 +16016,6 @@ func (s ListBucketsInput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput type ListBucketsOutput struct { _ struct{} `type:"structure"` @@ -11932,7 +16046,6 @@ func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest type ListMultipartUploadsInput struct { _ struct{} `type:"structure"` @@ -11985,6 +16098,9 @@ func (s *ListMultipartUploadsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -11998,6 +16114,13 @@ func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInp return s } +func (s *ListMultipartUploadsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { s.Delimiter = &v @@ -12034,7 +16157,6 @@ func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUp return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput type ListMultipartUploadsOutput struct { _ struct{} `type:"structure"` @@ -12095,6 +16217,13 @@ func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOu return s } +func (s *ListMultipartUploadsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCommonPrefixes sets the CommonPrefixes field's value. func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { s.CommonPrefixes = v @@ -12161,7 +16290,6 @@ func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMulti return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest type ListObjectVersionsInput struct { _ struct{} `type:"structure"` @@ -12209,6 +16337,9 @@ func (s *ListObjectVersionsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -12222,6 +16353,13 @@ func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { return s } +func (s *ListObjectVersionsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { s.Delimiter = &v @@ -12258,7 +16396,6 @@ func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersio return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput type ListObjectVersionsOutput struct { _ struct{} `type:"structure"` @@ -12386,7 +16523,6 @@ func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVe return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest type ListObjectsInput struct { _ struct{} `type:"structure"` @@ -12436,6 +16572,9 @@ func (s *ListObjectsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -12449,6 +16588,13 @@ func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { return s } +func (s *ListObjectsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetDelimiter sets the Delimiter field's value. func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { s.Delimiter = &v @@ -12485,7 +16631,6 @@ func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput type ListObjectsOutput struct { _ struct{} `type:"structure"` @@ -12590,7 +16735,6 @@ func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request type ListObjectsV2Input struct { _ struct{} `type:"structure"` @@ -12648,6 +16792,9 @@ func (s *ListObjectsV2Input) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -12661,6 +16808,13 @@ func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { return s } +func (s *ListObjectsV2Input) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContinuationToken sets the ContinuationToken field's value. func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { s.ContinuationToken = &v @@ -12709,7 +16863,6 @@ func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output type ListObjectsV2Output struct { _ struct{} `type:"structure"` @@ -12843,7 +16996,6 @@ func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest type ListPartsInput struct { _ struct{} `type:"structure"` @@ -12888,6 +17040,9 @@ func (s *ListPartsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -12910,6 +17065,13 @@ func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { return s } +func (s *ListPartsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetKey sets the Key field's value. func (s *ListPartsInput) SetKey(v string) *ListPartsInput { s.Key = &v @@ -12940,12 +17102,11 @@ func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput type ListPartsOutput struct { _ struct{} `type:"structure"` // Date when multipart upload will become eligible for abort operation by lifecycle. - AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp"` // Id of the lifecycle rule that makes a multipart upload eligible for abort // operation. @@ -13017,6 +17178,13 @@ func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { return s } +func (s *ListPartsOutput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetInitiator sets the Initiator field's value. func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { s.Initiator = v @@ -13083,7 +17251,138 @@ func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled +// Describes an S3 location that will receive the results of the restore request. +type Location struct { + _ struct{} `type:"structure"` + + // A list of grants that control access to the staged results. + AccessControlList []*Grant `locationNameList:"Grant" type:"list"` + + // The name of the bucket where the restore results will be placed. + // + // BucketName is a required field + BucketName *string `type:"string" required:"true"` + + // The canned ACL to apply to the restore results. + CannedACL *string `type:"string" enum:"ObjectCannedACL"` + + // Describes the server-side encryption that will be applied to the restore + // results. + Encryption *Encryption `type:"structure"` + + // The prefix that is prepended to the restore results for this request. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The class of storage used to store the restore results. + StorageClass *string `type:"string" enum:"StorageClass"` + + // The tag-set that is applied to the restore results. + Tagging *Tagging `type:"structure"` + + // A list of metadata to store with the restore results in S3. + UserMetadata []*MetadataEntry `locationNameList:"MetadataEntry" type:"list"` +} + +// String returns the string representation +func (s Location) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Location) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Location) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Location"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.AccessControlList != nil { + for i, v := range s.AccessControlList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AccessControlList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Encryption != nil { + if err := s.Encryption.Validate(); err != nil { + invalidParams.AddNested("Encryption", err.(request.ErrInvalidParams)) + } + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccessControlList sets the AccessControlList field's value. +func (s *Location) SetAccessControlList(v []*Grant) *Location { + s.AccessControlList = v + return s +} + +// SetBucketName sets the BucketName field's value. +func (s *Location) SetBucketName(v string) *Location { + s.BucketName = &v + return s +} + +// SetCannedACL sets the CannedACL field's value. +func (s *Location) SetCannedACL(v string) *Location { + s.CannedACL = &v + return s +} + +// SetEncryption sets the Encryption field's value. +func (s *Location) SetEncryption(v *Encryption) *Location { + s.Encryption = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Location) SetPrefix(v string) *Location { + s.Prefix = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Location) SetStorageClass(v string) *Location { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *Location) SetTagging(v *Tagging) *Location { + s.Tagging = v + return s +} + +// SetUserMetadata sets the UserMetadata field's value. +func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location { + s.UserMetadata = v + return s +} + +// Describes where logs are stored and the prefix that Amazon S3 assigns to +// all log object keys for a bucket. For more information, see PUT Bucket logging +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html) +// in the Amazon Simple Storage Service API Reference. type LoggingEnabled struct { _ struct{} `type:"structure"` @@ -13093,13 +17392,18 @@ type LoggingEnabled struct { // to deliver their logs to the same target bucket. In this case you should // choose a different TargetPrefix for each source bucket so that the delivered // log files can be distinguished by key. - TargetBucket *string `type:"string"` + // + // TargetBucket is a required field + TargetBucket *string `type:"string" required:"true"` TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - // This element lets you specify a prefix for the keys that the log files will - // be stored under. - TargetPrefix *string `type:"string"` + // A prefix for all log object keys. If you store log files from multiple Amazon + // S3 buckets in a single bucket, you can use a prefix to distinguish which + // log files came from which bucket. + // + // TargetPrefix is a required field + TargetPrefix *string `type:"string" required:"true"` } // String returns the string representation @@ -13115,6 +17419,12 @@ func (s LoggingEnabled) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *LoggingEnabled) Validate() error { invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetBucket == nil { + invalidParams.Add(request.NewErrParamRequired("TargetBucket")) + } + if s.TargetPrefix == nil { + invalidParams.Add(request.NewErrParamRequired("TargetPrefix")) + } if s.TargetGrants != nil { for i, v := range s.TargetGrants { if v == nil { @@ -13150,7 +17460,37 @@ func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator +// A metadata key-value pair to store with an object. +type MetadataEntry struct { + _ struct{} `type:"structure"` + + Name *string `type:"string"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s MetadataEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetadataEntry) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *MetadataEntry) SetName(v string) *MetadataEntry { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *MetadataEntry) SetValue(v string) *MetadataEntry { + s.Value = &v + return s +} + type MetricsAndOperator struct { _ struct{} `type:"structure"` @@ -13203,7 +17543,13 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration +// Specifies a metrics configuration for the CloudWatch request metrics (specified +// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating +// an existing metrics configuration, note that this is a full replacement of +// the existing metrics configuration. If you don't include the elements you +// want to keep, they are erased. For more information, see PUT Bucket metrics +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html) +// in the Amazon Simple Storage Service API Reference. type MetricsConfiguration struct { _ struct{} `type:"structure"` @@ -13258,7 +17604,6 @@ func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter type MetricsFilter struct { _ struct{} `type:"structure"` @@ -13322,12 +17667,11 @@ func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload type MultipartUpload struct { _ struct{} `type:"structure"` // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + Initiated *time.Time `type:"timestamp"` // Identifies who initiated the multipart upload. Initiator *Initiator `type:"structure"` @@ -13395,14 +17739,14 @@ func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { // configuration action on a bucket that has versioning enabled (or suspended) // to request that Amazon S3 delete noncurrent object versions at a specific // period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration type NoncurrentVersionExpiration struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations) + // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -13423,18 +17767,20 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers } // Container for the transition rule that describes when noncurrent objects -// transition to the STANDARD_IA or GLACIER storage class. If your bucket is -// versioning-enabled (or versioning is suspended), you can set this action -// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA -// or GLACIER storage class at a specific period in the object's lifetime. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition +// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, +// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning +// is suspended), you can set this action to request that Amazon S3 transition +// noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, +// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's +// lifetime. type NoncurrentVersionTransition struct { _ struct{} `type:"structure"` // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + // in the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -13463,16 +17809,21 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi return s } -// Container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off on the bucket. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration +// A container for specifying the notification configuration of the bucket. +// If this element is empty, notifications are turned off for the bucket. type NotificationConfiguration struct { _ struct{} `type:"structure"` + // Describes the AWS Lambda functions to invoke and the events for which to + // invoke them. LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + // The Amazon Simple Queue Service queues to publish messages to and the events + // for which to publish messages. QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + // The topic to which notifications are sent and the events for which notifications + // are generated. TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` } @@ -13544,7 +17895,6 @@ func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfigurati return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated type NotificationConfigurationDeprecated struct { _ struct{} `type:"structure"` @@ -13583,13 +17933,13 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf return s } -// Container for object key name filtering rules. For information about key -// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter +// Specifies object key name filtering rules. For information about key name +// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// in the Amazon Simple Storage Service Developer Guide. type NotificationConfigurationFilter struct { _ struct{} `type:"structure"` - // Container for object key name prefix and suffix filtering rules. + // A container for object key name prefix and suffix filtering rules. Key *KeyFilter `locationName:"S3Key" type:"structure"` } @@ -13609,7 +17959,6 @@ func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConf return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object type Object struct { _ struct{} `type:"structure"` @@ -13617,7 +17966,7 @@ type Object struct { Key *string `min:"1" type:"string"` - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` Owner *Owner `type:"structure"` @@ -13673,7 +18022,6 @@ func (s *Object) SetStorageClass(v string) *Object { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier type ObjectIdentifier struct { _ struct{} `type:"structure"` @@ -13724,60 +18072,174 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion -type ObjectVersion struct { +// The container element for object lock configuration parameters. +type ObjectLockConfiguration struct { _ struct{} `type:"structure"` - ETag *string `type:"string"` + // Indicates whether this bucket has an object lock configuration enabled. + ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"` - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` + // The object lock rule in place for the specified object. + Rule *ObjectLockRule `type:"structure"` +} - // The object key. - Key *string `min:"1" type:"string"` +// String returns the string representation +func (s ObjectLockConfiguration) String() string { + return awsutil.Prettify(s) +} - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +// GoString returns the string representation +func (s ObjectLockConfiguration) GoString() string { + return s.String() +} - Owner *Owner `type:"structure"` +// SetObjectLockEnabled sets the ObjectLockEnabled field's value. +func (s *ObjectLockConfiguration) SetObjectLockEnabled(v string) *ObjectLockConfiguration { + s.ObjectLockEnabled = &v + return s +} - // Size in bytes of the object. - Size *int64 `type:"integer"` +// SetRule sets the Rule field's value. +func (s *ObjectLockConfiguration) SetRule(v *ObjectLockRule) *ObjectLockConfiguration { + s.Rule = v + return s +} - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` +// A Legal Hold configuration for an object. +type ObjectLockLegalHold struct { + _ struct{} `type:"structure"` - // Version ID of an object. - VersionId *string `type:"string"` + // Indicates whether the specified object has a Legal Hold in place. + Status *string `type:"string" enum:"ObjectLockLegalHoldStatus"` } // String returns the string representation -func (s ObjectVersion) String() string { +func (s ObjectLockLegalHold) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ObjectVersion) GoString() string { +func (s ObjectLockLegalHold) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *ObjectVersion) SetETag(v string) *ObjectVersion { - s.ETag = &v +// SetStatus sets the Status field's value. +func (s *ObjectLockLegalHold) SetStatus(v string) *ObjectLockLegalHold { + s.Status = &v return s } -// SetIsLatest sets the IsLatest field's value. -func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { - s.IsLatest = &v - return s +// A Retention configuration for an object. +type ObjectLockRetention struct { + _ struct{} `type:"structure"` + + // Indicates the Retention mode for the specified object. + Mode *string `type:"string" enum:"ObjectLockRetentionMode"` + + // The date on which this object lock retention expires. + RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` } -// SetKey sets the Key field's value. -func (s *ObjectVersion) SetKey(v string) *ObjectVersion { - s.Key = &v - return s +// String returns the string representation +func (s ObjectLockRetention) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRetention) GoString() string { + return s.String() +} + +// SetMode sets the Mode field's value. +func (s *ObjectLockRetention) SetMode(v string) *ObjectLockRetention { + s.Mode = &v + return s +} + +// SetRetainUntilDate sets the RetainUntilDate field's value. +func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetention { + s.RetainUntilDate = &v + return s +} + +// The container element for an object lock rule. +type ObjectLockRule struct { + _ struct{} `type:"structure"` + + // The default retention period that you want to apply to new objects placed + // in the specified bucket. + DefaultRetention *DefaultRetention `type:"structure"` +} + +// String returns the string representation +func (s ObjectLockRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectLockRule) GoString() string { + return s.String() +} + +// SetDefaultRetention sets the DefaultRetention field's value. +func (s *ObjectLockRule) SetDefaultRetention(v *DefaultRetention) *ObjectLockRule { + s.DefaultRetention = v + return s +} + +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s } // SetLastModified sets the LastModified field's value. @@ -13810,7 +18272,78 @@ func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner +// Describes the location where the restore job's output is stored. +type OutputLocation struct { + _ struct{} `type:"structure"` + + // Describes an S3 location that will receive the results of the restore request. + S3 *Location `type:"structure"` +} + +// String returns the string representation +func (s OutputLocation) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputLocation) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OutputLocation) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OutputLocation"} + if s.S3 != nil { + if err := s.S3.Validate(); err != nil { + invalidParams.AddNested("S3", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3 sets the S3 field's value. +func (s *OutputLocation) SetS3(v *Location) *OutputLocation { + s.S3 = v + return s +} + +// Describes how results of the Select job are serialized. +type OutputSerialization struct { + _ struct{} `type:"structure"` + + // Describes the serialization of CSV-encoded Select results. + CSV *CSVOutput `type:"structure"` + + // Specifies JSON as request's output serialization format. + JSON *JSONOutput `type:"structure"` +} + +// String returns the string representation +func (s OutputSerialization) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OutputSerialization) GoString() string { + return s.String() +} + +// SetCSV sets the CSV field's value. +func (s *OutputSerialization) SetCSV(v *CSVOutput) *OutputSerialization { + s.CSV = v + return s +} + +// SetJSON sets the JSON field's value. +func (s *OutputSerialization) SetJSON(v *JSONOutput) *OutputSerialization { + s.JSON = v + return s +} + type Owner struct { _ struct{} `type:"structure"` @@ -13841,7 +18374,20 @@ func (s *Owner) SetID(v string) *Owner { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part +type ParquetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ParquetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParquetInput) GoString() string { + return s.String() +} + type Part struct { _ struct{} `type:"structure"` @@ -13849,13 +18395,13 @@ type Part struct { ETag *string `type:"string"` // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + LastModified *time.Time `type:"timestamp"` // Part number identifying the part. This is a positive integer between 1 and // 10,000. PartNumber *int64 `type:"integer"` - // Size of the uploaded part data. + // Size in bytes of the uploaded part data. Size *int64 `type:"integer"` } @@ -13893,14 +18439,195 @@ func (s *Part) SetSize(v int64) *Part { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest +// The container element for a bucket's policy status. +type PolicyStatus struct { + _ struct{} `type:"structure"` + + // The policy status for this bucket. TRUE indicates that this bucket is public. + // FALSE indicates that the bucket is not public. + IsPublic *bool `locationName:"IsPublic" type:"boolean"` +} + +// String returns the string representation +func (s PolicyStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyStatus) GoString() string { + return s.String() +} + +// SetIsPublic sets the IsPublic field's value. +func (s *PolicyStatus) SetIsPublic(v bool) *PolicyStatus { + s.IsPublic = &v + return s +} + +type Progress struct { + _ struct{} `type:"structure"` + + // The current number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` + + // The current number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` + + // The current number of object bytes scanned. + BytesScanned *int64 `type:"long"` +} + +// String returns the string representation +func (s Progress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Progress) GoString() string { + return s.String() +} + +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Progress) SetBytesProcessed(v int64) *Progress { + s.BytesProcessed = &v + return s +} + +// SetBytesReturned sets the BytesReturned field's value. +func (s *Progress) SetBytesReturned(v int64) *Progress { + s.BytesReturned = &v + return s +} + +// SetBytesScanned sets the BytesScanned field's value. +func (s *Progress) SetBytesScanned(v int64) *Progress { + s.BytesScanned = &v + return s +} + +type ProgressEvent struct { + _ struct{} `locationName:"ProgressEvent" type:"structure" payload:"Details"` + + // The Progress event details. + Details *Progress `locationName:"Details" type:"structure"` +} + +// String returns the string representation +func (s ProgressEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProgressEvent) GoString() string { + return s.String() +} + +// SetDetails sets the Details field's value. +func (s *ProgressEvent) SetDetails(v *Progress) *ProgressEvent { + s.Details = v + return s +} + +// The ProgressEvent is and event in the SelectObjectContentEventStream group of events. +func (s *ProgressEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the ProgressEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *ProgressEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil +} + +// Specifies the Block Public Access configuration for an Amazon S3 bucket. +type PublicAccessBlockConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should block public access control lists (ACLs) + // for this bucket and objects in this bucket. Setting this element to TRUE + // causes the following behavior: + // + // * PUT Bucket acl and PUT Object acl calls fail if the specified ACL is + // public. + // + // * PUT Object calls fail if the request includes a public ACL. + // + // Enabling this setting doesn't affect existing policies or ACLs. + BlockPublicAcls *bool `locationName:"BlockPublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should block public bucket policies for this + // bucket. Setting this element to TRUE causes Amazon S3 to reject calls to + // PUT Bucket policy if the specified bucket policy allows public access. + // + // Enabling this setting doesn't affect existing bucket policies. + BlockPublicPolicy *bool `locationName:"BlockPublicPolicy" type:"boolean"` + + // Specifies whether Amazon S3 should ignore public ACLs for this bucket and + // objects in this bucket. Setting this element to TRUE causes Amazon S3 to + // ignore all public ACLs on this bucket and objects in this bucket. + // + // Enabling this setting doesn't affect the persistence of any existing ACLs + // and doesn't prevent new public ACLs from being set. + IgnorePublicAcls *bool `locationName:"IgnorePublicAcls" type:"boolean"` + + // Specifies whether Amazon S3 should restrict public bucket policies for this + // bucket. Setting this element to TRUE restricts access to this bucket to only + // AWS services and authorized users within this account if the bucket has a + // public policy. + // + // Enabling this setting doesn't affect previously stored bucket policies, except + // that public and cross-account access within any public bucket policy, including + // non-public delegation to specific accounts, is blocked. + RestrictPublicBuckets *bool `locationName:"RestrictPublicBuckets" type:"boolean"` +} + +// String returns the string representation +func (s PublicAccessBlockConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PublicAccessBlockConfiguration) GoString() string { + return s.String() +} + +// SetBlockPublicAcls sets the BlockPublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicAcls(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicAcls = &v + return s +} + +// SetBlockPublicPolicy sets the BlockPublicPolicy field's value. +func (s *PublicAccessBlockConfiguration) SetBlockPublicPolicy(v bool) *PublicAccessBlockConfiguration { + s.BlockPublicPolicy = &v + return s +} + +// SetIgnorePublicAcls sets the IgnorePublicAcls field's value. +func (s *PublicAccessBlockConfiguration) SetIgnorePublicAcls(v bool) *PublicAccessBlockConfiguration { + s.IgnorePublicAcls = &v + return s +} + +// SetRestrictPublicBuckets sets the RestrictPublicBuckets field's value. +func (s *PublicAccessBlockConfiguration) SetRestrictPublicBuckets(v bool) *PublicAccessBlockConfiguration { + s.RestrictPublicBuckets = &v + return s +} + type PutBucketAccelerateConfigurationInput struct { _ struct{} `type:"structure" payload:"AccelerateConfiguration"` // Specifies the Accelerate Configuration you want to set for the bucket. // // AccelerateConfiguration is a required field - AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Name of the bucket for which the accelerate configuration is set. // @@ -13927,6 +18654,9 @@ func (s *PutBucketAccelerateConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if invalidParams.Len() > 0 { return invalidParams @@ -13946,7 +18676,13 @@ func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAc return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput +func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + type PutBucketAccelerateConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -13961,14 +18697,14 @@ func (s PutBucketAccelerateConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest type PutBucketAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the bucket. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -14006,6 +18742,9 @@ func (s *PutBucketAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.AccessControlPolicy != nil { if err := s.AccessControlPolicy.Validate(); err != nil { invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) @@ -14036,6 +18775,13 @@ func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { return s } +func (s *PutBucketAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetGrantFullControl sets the GrantFullControl field's value. func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { s.GrantFullControl = &v @@ -14066,7 +18812,6 @@ func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput type PutBucketAclOutput struct { _ struct{} `type:"structure"` } @@ -14081,21 +18826,20 @@ func (s PutBucketAclOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest type PutBucketAnalyticsConfigurationInput struct { _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` // The configuration and any analyses for the analytics filter. // // AnalyticsConfiguration is a required field - AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"` + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // The name of the bucket to which an analytics configuration is stored. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The identifier used to represent an analytics configuration. + // The ID that identifies the analytics configuration. // // Id is a required field Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` @@ -14120,6 +18864,9 @@ func (s *PutBucketAnalyticsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -14147,13 +18894,19 @@ func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAna return s } +func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { s.Id = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput type PutBucketAnalyticsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -14168,15 +18921,19 @@ func (s PutBucketAnalyticsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest type PutBucketCorsInput struct { _ struct{} `type:"structure" payload:"CORSConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Describes the cross-origin access configuration for objects in an Amazon + // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing + // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon + // Simple Storage Service Developer Guide. + // // CORSConfiguration is a required field - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14195,6 +18952,9 @@ func (s *PutBucketCorsInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CORSConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) } @@ -14216,13 +18976,19 @@ func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { return s } +func (s *PutBucketCorsInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCORSConfiguration sets the CORSConfiguration field's value. func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { s.CORSConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput type PutBucketCorsOutput struct { _ struct{} `type:"structure"` } @@ -14237,33 +19003,117 @@ func (s PutBucketCorsOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest -type PutBucketInventoryConfigurationInput struct { - _ struct{} `type:"structure" payload:"InventoryConfiguration"` +type PutBucketEncryptionInput struct { + _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"` - // The name of the bucket where the inventory configuration will be stored. + // Specifies default encryption for a bucket using server-side encryption with + // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information + // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket + // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) + // in the Amazon Simple Storage Service Developer Guide. // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // The ID used to identify the inventory configuration. - // - // Id is a required field - Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` - - // Specifies the inventory configuration. + // Specifies the default server-side-encryption configuration. // - // InventoryConfiguration is a required field - InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"` + // ServerSideEncryptionConfiguration is a required field + ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation -func (s PutBucketInventoryConfigurationInput) String() string { +func (s PutBucketEncryptionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutBucketInventoryConfigurationInput) GoString() string { +func (s PutBucketEncryptionInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketEncryptionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketEncryptionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.ServerSideEncryptionConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ServerSideEncryptionConfiguration")) + } + if s.ServerSideEncryptionConfiguration != nil { + if err := s.ServerSideEncryptionConfiguration.Validate(); err != nil { + invalidParams.AddNested("ServerSideEncryptionConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketEncryptionInput) SetBucket(v string) *PutBucketEncryptionInput { + s.Bucket = &v + return s +} + +func (s *PutBucketEncryptionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetServerSideEncryptionConfiguration sets the ServerSideEncryptionConfiguration field's value. +func (s *PutBucketEncryptionInput) SetServerSideEncryptionConfiguration(v *ServerSideEncryptionConfiguration) *PutBucketEncryptionInput { + s.ServerSideEncryptionConfiguration = v + return s +} + +type PutBucketEncryptionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketEncryptionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketEncryptionOutput) GoString() string { + return s.String() +} + +type PutBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationInput) GoString() string { return s.String() } @@ -14273,6 +19123,9 @@ func (s *PutBucketInventoryConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -14297,6 +19150,13 @@ func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInv return s } +func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { s.Id = &v @@ -14309,7 +19169,6 @@ func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *Inve return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput type PutBucketInventoryConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -14324,14 +19183,16 @@ func (s PutBucketInventoryConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest type PutBucketLifecycleConfigurationInput struct { _ struct{} `type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + // Specifies the lifecycle configuration for objects in an Amazon S3 bucket. + // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) + // in the Amazon Simple Storage Service Developer Guide. + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14350,6 +19211,9 @@ func (s *PutBucketLifecycleConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.LifecycleConfiguration != nil { if err := s.LifecycleConfiguration.Validate(); err != nil { invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) @@ -14368,13 +19232,19 @@ func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLif return s } +func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetLifecycleConfiguration sets the LifecycleConfiguration field's value. func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { s.LifecycleConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput type PutBucketLifecycleConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -14389,14 +19259,13 @@ func (s PutBucketLifecycleConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest type PutBucketLifecycleInput struct { _ struct{} `type:"structure" payload:"LifecycleConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14415,6 +19284,9 @@ func (s *PutBucketLifecycleInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.LifecycleConfiguration != nil { if err := s.LifecycleConfiguration.Validate(); err != nil { invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) @@ -14433,13 +19305,19 @@ func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { return s } +func (s *PutBucketLifecycleInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetLifecycleConfiguration sets the LifecycleConfiguration field's value. func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { s.LifecycleConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput type PutBucketLifecycleOutput struct { _ struct{} `type:"structure"` } @@ -14454,7 +19332,6 @@ func (s PutBucketLifecycleOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest type PutBucketLoggingInput struct { _ struct{} `type:"structure" payload:"BucketLoggingStatus"` @@ -14462,7 +19339,7 @@ type PutBucketLoggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // BucketLoggingStatus is a required field - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14481,6 +19358,9 @@ func (s *PutBucketLoggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.BucketLoggingStatus == nil { invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) } @@ -14502,13 +19382,19 @@ func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { return s } +func (s *PutBucketLoggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetBucketLoggingStatus sets the BucketLoggingStatus field's value. func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { s.BucketLoggingStatus = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput type PutBucketLoggingOutput struct { _ struct{} `type:"structure"` } @@ -14523,7 +19409,6 @@ func (s PutBucketLoggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest type PutBucketMetricsConfigurationInput struct { _ struct{} `type:"structure" payload:"MetricsConfiguration"` @@ -14540,7 +19425,7 @@ type PutBucketMetricsConfigurationInput struct { // Specifies the metrics configuration. // // MetricsConfiguration is a required field - MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"` + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14559,6 +19444,9 @@ func (s *PutBucketMetricsConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } @@ -14583,6 +19471,13 @@ func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetri return s } +func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetId sets the Id field's value. func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { s.Id = &v @@ -14595,7 +19490,6 @@ func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsC return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput type PutBucketMetricsConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -14610,18 +19504,17 @@ func (s PutBucketMetricsConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest type PutBucketNotificationConfigurationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off on the bucket. + // A container for specifying the notification configuration of the bucket. + // If this element is empty, notifications are turned off for the bucket. // // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14640,6 +19533,9 @@ func (s *PutBucketNotificationConfigurationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } @@ -14661,13 +19557,19 @@ func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucket return s } +func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetNotificationConfiguration sets the NotificationConfiguration field's value. func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { s.NotificationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput type PutBucketNotificationConfigurationOutput struct { _ struct{} `type:"structure"` } @@ -14682,7 +19584,6 @@ func (s PutBucketNotificationConfigurationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest type PutBucketNotificationInput struct { _ struct{} `type:"structure" payload:"NotificationConfiguration"` @@ -14690,7 +19591,7 @@ type PutBucketNotificationInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // NotificationConfiguration is a required field - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14709,6 +19610,9 @@ func (s *PutBucketNotificationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.NotificationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) } @@ -14725,13 +19629,19 @@ func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationI return s } +func (s *PutBucketNotificationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetNotificationConfiguration sets the NotificationConfiguration field's value. func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { s.NotificationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput type PutBucketNotificationOutput struct { _ struct{} `type:"structure"` } @@ -14746,13 +19656,16 @@ func (s PutBucketNotificationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest type PutBucketPolicyInput struct { _ struct{} `type:"structure" payload:"Policy"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Set this parameter to true to confirm that you want to remove your permissions + // to change this bucket policy in the future. + ConfirmRemoveSelfBucketAccess *bool `location:"header" locationName:"x-amz-confirm-remove-self-bucket-access" type:"boolean"` + // The bucket policy as a JSON document. // // Policy is a required field @@ -14775,6 +19688,9 @@ func (s *PutBucketPolicyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Policy == nil { invalidParams.Add(request.NewErrParamRequired("Policy")) } @@ -14791,13 +19707,25 @@ func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { return s } +func (s *PutBucketPolicyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetConfirmRemoveSelfBucketAccess sets the ConfirmRemoveSelfBucketAccess field's value. +func (s *PutBucketPolicyInput) SetConfirmRemoveSelfBucketAccess(v bool) *PutBucketPolicyInput { + s.ConfirmRemoveSelfBucketAccess = &v + return s +} + // SetPolicy sets the Policy field's value. func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { s.Policy = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput type PutBucketPolicyOutput struct { _ struct{} `type:"structure"` } @@ -14812,18 +19740,20 @@ func (s PutBucketPolicyOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest type PutBucketReplicationInput struct { _ struct{} `type:"structure" payload:"ReplicationConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. + // A container for replication rules. You can add up to 1,000 rules. The maximum + // size of a replication configuration is 2 MB. // // ReplicationConfiguration is a required field - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // A token that allows Amazon S3 object lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } // String returns the string representation @@ -14842,6 +19772,9 @@ func (s *PutBucketReplicationInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.ReplicationConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) } @@ -14863,13 +19796,25 @@ func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInp return s } +func (s *PutBucketReplicationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetReplicationConfiguration sets the ReplicationConfiguration field's value. func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { s.ReplicationConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput +// SetToken sets the Token field's value. +func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput { + s.Token = &v + return s +} + type PutBucketReplicationOutput struct { _ struct{} `type:"structure"` } @@ -14884,7 +19829,6 @@ func (s PutBucketReplicationOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest type PutBucketRequestPaymentInput struct { _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` @@ -14892,7 +19836,7 @@ type PutBucketRequestPaymentInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // RequestPaymentConfiguration is a required field - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14911,6 +19855,9 @@ func (s *PutBucketRequestPaymentInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.RequestPaymentConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) } @@ -14932,13 +19879,19 @@ func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaym return s } +func (s *PutBucketRequestPaymentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { s.RequestPaymentConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput type PutBucketRequestPaymentOutput struct { _ struct{} `type:"structure"` } @@ -14953,7 +19906,6 @@ func (s PutBucketRequestPaymentOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest type PutBucketTaggingInput struct { _ struct{} `type:"structure" payload:"Tagging"` @@ -14961,7 +19913,7 @@ type PutBucketTaggingInput struct { Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -14980,6 +19932,9 @@ func (s *PutBucketTaggingInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Tagging == nil { invalidParams.Add(request.NewErrParamRequired("Tagging")) } @@ -15001,13 +19956,19 @@ func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { return s } +func (s *PutBucketTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetTagging sets the Tagging field's value. func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { s.Tagging = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput type PutBucketTaggingOutput struct { _ struct{} `type:"structure"` } @@ -15022,7 +19983,6 @@ func (s PutBucketTaggingOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest type PutBucketVersioningInput struct { _ struct{} `type:"structure" payload:"VersioningConfiguration"` @@ -15033,8 +19993,12 @@ type PutBucketVersioningInput struct { // and the value that is displayed on your authentication device. MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + // Describes the versioning state of an Amazon S3 bucket. For more information, + // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) + // in the Amazon Simple Storage Service API Reference. + // // VersioningConfiguration is a required field - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15053,6 +20017,9 @@ func (s *PutBucketVersioningInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.VersioningConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) } @@ -15069,6 +20036,13 @@ func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput return s } +func (s *PutBucketVersioningInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetMFA sets the MFA field's value. func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { s.MFA = &v @@ -15081,7 +20055,6 @@ func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfi return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput type PutBucketVersioningOutput struct { _ struct{} `type:"structure"` } @@ -15096,15 +20069,16 @@ func (s PutBucketVersioningOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest type PutBucketWebsiteInput struct { _ struct{} `type:"structure" payload:"WebsiteConfiguration"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies website configuration parameters for an Amazon S3 bucket. + // // WebsiteConfiguration is a required field - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` } // String returns the string representation @@ -15123,6 +20097,9 @@ func (s *PutBucketWebsiteInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.WebsiteConfiguration == nil { invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) } @@ -15144,13 +20121,19 @@ func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { return s } +func (s *PutBucketWebsiteInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetWebsiteConfiguration sets the WebsiteConfiguration field's value. func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { s.WebsiteConfiguration = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput type PutBucketWebsiteOutput struct { _ struct{} `type:"structure"` } @@ -15165,14 +20148,14 @@ func (s PutBucketWebsiteOutput) GoString() string { return s.String() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest type PutObjectAclInput struct { _ struct{} `type:"structure" payload:"AccessControlPolicy"` // The canned ACL to apply to the object. ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + // Contains the elements that set the ACL permissions for an object per grantee. + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` @@ -15222,6 +20205,9 @@ func (s *PutObjectAclInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -15258,6 +20244,13 @@ func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { return s } +func (s *PutObjectAclInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetGrantFullControl sets the GrantFullControl field's value. func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { s.GrantFullControl = &v @@ -15306,7 +20299,6 @@ func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput type PutObjectAclOutput struct { _ struct{} `type:"structure"` @@ -15331,7 +20323,6 @@ func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest type PutObjectInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -15364,11 +20355,16 @@ type PutObjectInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameted is required + // if object lock parameters are specified. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"` // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` @@ -15390,6 +20386,15 @@ type PutObjectInput struct { // A map of metadata to store with the object in S3. Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + // The Legal Hold status that you want to apply to the specified object. + ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"` + + // The object lock mode that you want to apply to this object. + ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"` + + // The date and time when you want this object's object lock to expire. + ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"` + // Confirms that the requester knows that she or he will be charged for the // request. Bucket owners need not specify this parameter in their requests. // Documentation on downloading objects from requester pays buckets can be found @@ -15404,18 +20409,23 @@ type PutObjectInput struct { // does not store the encryption key. The key must be appropriate for use with // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption // key was transmitted without error. SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // Specifies the AWS KMS Encryption Context to use for object encryption. The + // value of this header is a base64-encoded UTF-8 string holding JSON with the + // encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT // requests for an object protected by AWS KMS will fail if not made via SSL // or using SigV4. Documentation on configuring any of the officially supported // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -15424,7 +20434,8 @@ type PutObjectInput struct { // The type of storage to use for the object. Defaults to 'STANDARD'. StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - // The tag-set for the object. The tag-set must be encoded as URL Query parameters + // The tag-set for the object. The tag-set must be encoded as URL Query parameters. + // (For example, "Key1=Value1") Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` // If the bucket is configured as a website, redirects requests for this object @@ -15449,6 +20460,9 @@ func (s *PutObjectInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -15480,6 +20494,13 @@ func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCacheControl sets the CacheControl field's value. func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { s.CacheControl = &v @@ -15510,6 +20531,12 @@ func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput { + s.ContentMD5 = &v + return s +} + // SetContentType sets the ContentType field's value. func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { s.ContentType = &v @@ -15558,6 +20585,24 @@ func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { return s } +// SetObjectLockLegalHoldStatus sets the ObjectLockLegalHoldStatus field's value. +func (s *PutObjectInput) SetObjectLockLegalHoldStatus(v string) *PutObjectInput { + s.ObjectLockLegalHoldStatus = &v + return s +} + +// SetObjectLockMode sets the ObjectLockMode field's value. +func (s *PutObjectInput) SetObjectLockMode(v string) *PutObjectInput { + s.ObjectLockMode = &v + return s +} + +// SetObjectLockRetainUntilDate sets the ObjectLockRetainUntilDate field's value. +func (s *PutObjectInput) SetObjectLockRetainUntilDate(v time.Time) *PutObjectInput { + s.ObjectLockRetainUntilDate = &v + return s +} + // SetRequestPayer sets the RequestPayer field's value. func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { s.RequestPayer = &v @@ -15576,12 +20621,25 @@ func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { return s } +func (s *PutObjectInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { s.SSECustomerKeyMD5 = &v return s } +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput { + s.SSEKMSEncryptionContext = &v + return s +} + // SetSSEKMSKeyId sets the SSEKMSKeyId field's value. func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { s.SSEKMSKeyId = &v @@ -15612,146 +20670,165 @@ func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput -type PutObjectOutput struct { - _ struct{} `type:"structure"` - - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +type PutObjectLegalHoldInput struct { + _ struct{} `type:"structure" payload:"LegalHold"` - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + // The bucket containing the object that you want to place a Legal Hold on. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + // The key name for the object that you want to place a Legal Hold on. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + // Container element for the Legal Hold configuration you want to apply to the + // specified object. + LegalHold *ObjectLockLegalHold `locationName:"LegalHold" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - // Version of the object. - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // The version ID of the object that you want to place a Legal Hold on. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s PutObjectOutput) String() string { +func (s PutObjectLegalHoldInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectOutput) GoString() string { +func (s PutObjectLegalHoldInput) GoString() string { return s.String() } -// SetETag sets the ETag field's value. -func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { - s.ETag = &v - return s +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectLegalHoldInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLegalHoldInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetExpiration sets the Expiration field's value. -func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { - s.Expiration = &v +// SetBucket sets the Bucket field's value. +func (s *PutObjectLegalHoldInput) SetBucket(v string) *PutObjectLegalHoldInput { + s.Bucket = &v return s } -// SetRequestCharged sets the RequestCharged field's value. -func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { - s.RequestCharged = &v - return s +func (s *PutObjectLegalHoldInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. -func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { - s.SSECustomerAlgorithm = &v +// SetKey sets the Key field's value. +func (s *PutObjectLegalHoldInput) SetKey(v string) *PutObjectLegalHoldInput { + s.Key = &v return s } -// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. -func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { - s.SSECustomerKeyMD5 = &v +// SetLegalHold sets the LegalHold field's value. +func (s *PutObjectLegalHoldInput) SetLegalHold(v *ObjectLockLegalHold) *PutObjectLegalHoldInput { + s.LegalHold = v return s } -// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. -func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { - s.SSEKMSKeyId = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLegalHoldInput) SetRequestPayer(v string) *PutObjectLegalHoldInput { + s.RequestPayer = &v return s } -// SetServerSideEncryption sets the ServerSideEncryption field's value. -func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { - s.ServerSideEncryption = &v +// SetVersionId sets the VersionId field's value. +func (s *PutObjectLegalHoldInput) SetVersionId(v string) *PutObjectLegalHoldInput { + s.VersionId = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { - s.VersionId = &v +type PutObjectLegalHoldOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectLegalHoldOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectLegalHoldOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHoldOutput { + s.RequestCharged = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest -type PutObjectTaggingInput struct { - _ struct{} `type:"structure" payload:"Tagging"` +type PutObjectLockConfigurationInput struct { + _ struct{} `type:"structure" payload:"ObjectLockConfiguration"` + // The bucket whose object lock configuration you want to create or replace. + // // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + // The object lock configuration that you want to apply to the specified bucket. + ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` - // Tagging is a required field - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // A token to allow Amazon S3 object lock to be enabled for an existing bucket. + Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"` } // String returns the string representation -func (s PutObjectTaggingInput) String() string { +func (s PutObjectLockConfigurationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectTaggingInput) GoString() string { +func (s PutObjectLockConfigurationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *PutObjectTaggingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} +func (s *PutObjectLockConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectLockConfigurationInput"} if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) - } - if s.Tagging == nil { - invalidParams.Add(request.NewErrParamRequired("Tagging")) - } - if s.Tagging != nil { - if err := s.Tagging.Validate(); err != nil { - invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) - } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) } if invalidParams.Len() > 0 { @@ -15761,410 +20838,2117 @@ func (s *PutObjectTaggingInput) Validate() error { } // SetBucket sets the Bucket field's value. -func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { +func (s *PutObjectLockConfigurationInput) SetBucket(v string) *PutObjectLockConfigurationInput { s.Bucket = &v return s } -// SetKey sets the Key field's value. -func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { - s.Key = &v +func (s *PutObjectLockConfigurationInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetObjectLockConfiguration sets the ObjectLockConfiguration field's value. +func (s *PutObjectLockConfigurationInput) SetObjectLockConfiguration(v *ObjectLockConfiguration) *PutObjectLockConfigurationInput { + s.ObjectLockConfiguration = v return s } -// SetTagging sets the Tagging field's value. -func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { - s.Tagging = v +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectLockConfigurationInput) SetRequestPayer(v string) *PutObjectLockConfigurationInput { + s.RequestPayer = &v return s } -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { - s.VersionId = &v +// SetToken sets the Token field's value. +func (s *PutObjectLockConfigurationInput) SetToken(v string) *PutObjectLockConfigurationInput { + s.Token = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput -type PutObjectTaggingOutput struct { +type PutObjectLockConfigurationOutput struct { _ struct{} `type:"structure"` - VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` } // String returns the string representation -func (s PutObjectTaggingOutput) String() string { +func (s PutObjectLockConfigurationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s PutObjectTaggingOutput) GoString() string { +func (s PutObjectLockConfigurationOutput) GoString() string { return s.String() } -// SetVersionId sets the VersionId field's value. -func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { - s.VersionId = &v +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjectLockConfigurationOutput { + s.RequestCharged = &v return s } -// Container for specifying an configuration when you want Amazon S3 to publish -// events to an Amazon Simple Queue Service (Amazon SQS) queue. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration -type QueueConfiguration struct { +type PutObjectOutput struct { _ struct{} `type:"structure"` - // Events is a required field - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) - Filter *NotificationConfigurationFilter `type:"structure"` + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects - // events of specified type. - // - // QueueArn is a required field - QueueArn *string `locationName:"Queue" type:"string" required:"true"` + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the AWS KMS Encryption Context to use for object encryption. + // The value of this header is a base64-encoded UTF-8 string holding JSON with + // the encryption context key-value pairs. + SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` } // String returns the string representation -func (s QueueConfiguration) String() string { +func (s PutObjectOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s QueueConfiguration) GoString() string { +func (s PutObjectOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *QueueConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} - if s.Events == nil { - invalidParams.Add(request.NewErrParamRequired("Events")) - } - if s.QueueArn == nil { - invalidParams.Add(request.NewErrParamRequired("QueueArn")) - } +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s } -// SetEvents sets the Events field's value. -func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { - s.Events = v +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v return s } -// SetFilter sets the Filter field's value. -func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { - s.Filter = v +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v return s } -// SetId sets the Id field's value. -func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { - s.Id = &v +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v return s } -// SetQueueArn sets the QueueArn field's value. -func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { - s.QueueArn = &v +// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value. +func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput { + s.SSEKMSEncryptionContext = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated -type QueueConfigurationDeprecated struct { - _ struct{} `type:"structure"` +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} - // Bucket event for which to send notifications. - Event *string `deprecated:"true" type:"string" enum:"Event"` +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} - Events []*string `locationName:"Event" type:"list" flattened:"true"` +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - Id *string `type:"string"` +type PutObjectRetentionInput struct { + _ struct{} `type:"structure" payload:"Retention"` - Queue *string `type:"string"` + // The bucket that contains the object you want to apply this Object Retention + // configuration to. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Indicates whether this operation should bypass Governance-mode restrictions.j + BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"` + + // The key name for the object that you want to apply this Object Retention + // configuration to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // The container element for the Object Retention configuration. + Retention *ObjectLockRetention `locationName:"Retention" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The version ID for the object that you want to apply this Object Retention + // configuration to. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` } // String returns the string representation -func (s QueueConfigurationDeprecated) String() string { +func (s PutObjectRetentionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s QueueConfigurationDeprecated) GoString() string { +func (s PutObjectRetentionInput) GoString() string { return s.String() } -// SetEvent sets the Event field's value. -func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { - s.Event = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectRetentionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectRetentionInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectRetentionInput) SetBucket(v string) *PutObjectRetentionInput { + s.Bucket = &v return s } -// SetEvents sets the Events field's value. -func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { - s.Events = v +func (s *PutObjectRetentionInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetBypassGovernanceRetention sets the BypassGovernanceRetention field's value. +func (s *PutObjectRetentionInput) SetBypassGovernanceRetention(v bool) *PutObjectRetentionInput { + s.BypassGovernanceRetention = &v return s } -// SetId sets the Id field's value. -func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { - s.Id = &v +// SetKey sets the Key field's value. +func (s *PutObjectRetentionInput) SetKey(v string) *PutObjectRetentionInput { + s.Key = &v return s } -// SetQueue sets the Queue field's value. -func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { - s.Queue = &v +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectRetentionInput) SetRequestPayer(v string) *PutObjectRetentionInput { + s.RequestPayer = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect -type Redirect struct { - _ struct{} `type:"structure"` +// SetRetention sets the Retention field's value. +func (s *PutObjectRetentionInput) SetRetention(v *ObjectLockRetention) *PutObjectRetentionInput { + s.Retention = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectRetentionInput) SetVersionId(v string) *PutObjectRetentionInput { + s.VersionId = &v + return s +} + +type PutObjectRetentionOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectRetentionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectRetentionOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectRetentionOutput) SetRequestCharged(v string) *PutObjectRetentionOutput { + s.RequestCharged = &v + return s +} + +type PutObjectTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +func (s *PutObjectTaggingInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +type PutPublicAccessBlockInput struct { + _ struct{} `type:"structure" payload:"PublicAccessBlockConfiguration"` + + // The name of the Amazon S3 bucket whose PublicAccessBlock configuration you + // want to set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The PublicAccessBlock configuration that you want to apply to this Amazon + // S3 bucket. You can enable the configuration options in any combination. For + // more information about when Amazon S3 considers a bucket or object public, + // see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) + // in the Amazon Simple Storage Service Developer Guide. + // + // PublicAccessBlockConfiguration is a required field + PublicAccessBlockConfiguration *PublicAccessBlockConfiguration `locationName:"PublicAccessBlockConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` +} + +// String returns the string representation +func (s PutPublicAccessBlockInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutPublicAccessBlockInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutPublicAccessBlockInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.PublicAccessBlockConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("PublicAccessBlockConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutPublicAccessBlockInput) SetBucket(v string) *PutPublicAccessBlockInput { + s.Bucket = &v + return s +} + +func (s *PutPublicAccessBlockInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetPublicAccessBlockConfiguration sets the PublicAccessBlockConfiguration field's value. +func (s *PutPublicAccessBlockInput) SetPublicAccessBlockConfiguration(v *PublicAccessBlockConfiguration) *PutPublicAccessBlockInput { + s.PublicAccessBlockConfiguration = v + return s +} + +type PutPublicAccessBlockOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutPublicAccessBlockOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutPublicAccessBlockOutput) GoString() string { + return s.String() +} + +// Specifies the configuration for publishing messages to an Amazon Simple Queue +// Service (Amazon SQS) queue when Amazon S3 detects specified events. +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + Filter *NotificationConfigurationFilter `type:"structure"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3 + // publishes a message when it detects events of the specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // The bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // An optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +type RecordsEvent struct { + _ struct{} `locationName:"RecordsEvent" type:"structure" payload:"Payload"` + + // The byte array of partial, one or more result records. + // + // Payload is automatically base64 encoded/decoded by the SDK. + Payload []byte `type:"blob"` +} + +// String returns the string representation +func (s RecordsEvent) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecordsEvent) GoString() string { + return s.String() +} + +// SetPayload sets the Payload field's value. +func (s *RecordsEvent) SetPayload(v []byte) *RecordsEvent { + s.Payload = v + return s +} + +// The RecordsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *RecordsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the RecordsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *RecordsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + s.Payload = make([]byte, len(msg.Payload)) + copy(s.Payload, msg.Payload) + return nil +} + +// Specifies how requests are redirected. In the event of an error, you can +// specify a different error code to return. +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the siblings is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Specifies the redirect behavior of all requests to a website endpoint of +// an Amazon S3 bucket. +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests are redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use when redirecting requests. The default is the protocol that + // is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// A container for replication rules. You can add up to 1,000 rules. The maximum +// size of a replication configuration is 2 MB. +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that Amazon S3 assumes when replicating objects. For more information, + // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html) + // in the Amazon Simple Storage Service Developer Guide. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // A container for one or more replication rules. A replication configuration + // must have at least one rule and can contain a maximum of 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Specifies which Amazon S3 objects to replicate and where to store the replicas. +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 should replicate delete makers. + DeleteMarkerReplication *DeleteMarkerReplication `type:"structure"` + + // A container for information about the replication destination. + // + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // A filter that identifies the subset of objects to which the replication rule + // applies. A Filter must specify exactly one Prefix, Tag, or an And child element. + Filter *ReplicationRuleFilter `type:"structure"` + + // A unique identifier for the rule. The maximum value is 255 characters. + ID *string `type:"string"` + + // An object keyname prefix that identifies the object or objects to which the + // rule applies. The maximum prefix length is 1,024 characters. To include all + // objects in a bucket, specify an empty string. + // + // Deprecated: Prefix has been deprecated + Prefix *string `deprecated:"true" type:"string"` + + // The priority associated with the rule. If you specify multiple rules in a + // replication configuration, Amazon S3 prioritizes the rules to prevent conflicts + // when filtering. If two or more rules identify the same object based on a + // specified filter, the rule with higher priority takes precedence. For example: + // + // * Same object quality prefix based filter criteria If prefixes you specified + // in multiple rules overlap + // + // * Same object qualify tag based filter criteria specified in multiple + // rules + // + // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html) + // in the Amazon S3 Developer Guide. + Priority *int64 `type:"integer"` + + // A container that describes additional filters for identifying the source + // objects that you want to replicate. You can choose to enable or disable the + // replication of these objects. Currently, Amazon S3 supports only the filter + // that you can specify for objects created with server-side encryption using + // an AWS KMS-Managed Key (SSE-KMS). + SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"` + + // Specifies whether the rule is enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.SourceSelectionCriteria != nil { + if err := s.SourceSelectionCriteria.Validate(); err != nil { + invalidParams.AddNested("SourceSelectionCriteria", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDeleteMarkerReplication sets the DeleteMarkerReplication field's value. +func (s *ReplicationRule) SetDeleteMarkerReplication(v *DeleteMarkerReplication) *ReplicationRule { + s.DeleteMarkerReplication = v + return s +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *ReplicationRule) SetFilter(v *ReplicationRuleFilter) *ReplicationRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetPriority sets the Priority field's value. +func (s *ReplicationRule) SetPriority(v int64) *ReplicationRule { + s.Priority = &v + return s +} + +// SetSourceSelectionCriteria sets the SourceSelectionCriteria field's value. +func (s *ReplicationRule) SetSourceSelectionCriteria(v *SourceSelectionCriteria) *ReplicationRule { + s.SourceSelectionCriteria = v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +type ReplicationRuleAndOperator struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` + + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ReplicationRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleAndOperator) SetPrefix(v string) *ReplicationRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *ReplicationRuleAndOperator) SetTags(v []*Tag) *ReplicationRuleAndOperator { + s.Tags = v + return s +} + +// A filter that identifies the subset of objects to which the replication rule +// applies. A Filter must specify exactly one Prefix, Tag, or an And child element. +type ReplicationRuleFilter struct { + _ struct{} `type:"structure"` + + // A container for specifying rule filters. The filters determine the subset + // of objects to which the rule applies. This element is required only if you + // specify more than one filter. For example: + // + // * If you specify both a Prefix and a Tag filter, wrap these filters in + // an And tag. + // + // * If you specify a filter based on multiple tags, wrap the Tag elements + // in an And tag. + And *ReplicationRuleAndOperator `type:"structure"` + + // An object keyname prefix that identifies the subset of objects to which the + // rule applies. + Prefix *string `type:"string"` + + // A container for specifying a tag key and value. + // + // The rule applies only to objects that have the tag in their tag set. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s ReplicationRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *ReplicationRuleFilter) SetAnd(v *ReplicationRuleAndOperator) *ReplicationRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRuleFilter) SetPrefix(v string) *ReplicationRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *ReplicationRuleFilter) SetTag(v *Tag) *ReplicationRuleFilter { + s.Tag = v + return s +} + +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +type RequestProgress struct { + _ struct{} `type:"structure"` + + // Specifies whether periodic QueryProgress frames should be sent. Valid values: + // TRUE, FALSE. Default value: FALSE. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s RequestProgress) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestProgress) GoString() string { + return s.String() +} + +// SetEnabled sets the Enabled field's value. +func (s *RequestProgress) SetEnabled(v bool) *RequestProgress { + s.Enabled = &v + return s +} + +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Container for restore job parameters. + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +func (s *RestoreObjectInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Indicates the path in the provided S3 output location where Select results + // will be restored to. + RestoreOutputPath *string `location:"header" locationName:"x-amz-restore-output-path" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestoreOutputPath sets the RestoreOutputPath field's value. +func (s *RestoreObjectOutput) SetRestoreOutputPath(v string) *RestoreObjectOutput { + s.RestoreOutputPath = &v + return s +} + +// Container for restore job parameters. +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days. Do not use with restores that specify + // OutputLocation. + Days *int64 `type:"integer"` + + // The optional description for the job. + Description *string `type:"string"` + + // Glacier related parameters pertaining to this job. Do not use with restores + // that specify OutputLocation. + GlacierJobParameters *GlacierJobParameters `type:"structure"` + + // Describes the location where the restore job's output is stored. + OutputLocation *OutputLocation `type:"structure"` + + // Describes the parameters for Select job types. + SelectParameters *SelectParameters `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + Tier *string `type:"string" enum:"Tier"` + + // Type of restore request. + Type *string `type:"string" enum:"RestoreRequestType"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + if s.OutputLocation != nil { + if err := s.OutputLocation.Validate(); err != nil { + invalidParams.AddNested("OutputLocation", err.(request.ErrInvalidParams)) + } + } + if s.SelectParameters != nil { + if err := s.SelectParameters.Validate(); err != nil { + invalidParams.AddNested("SelectParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetDescription sets the Description field's value. +func (s *RestoreRequest) SetDescription(v string) *RestoreRequest { + s.Description = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// SetOutputLocation sets the OutputLocation field's value. +func (s *RestoreRequest) SetOutputLocation(v *OutputLocation) *RestoreRequest { + s.OutputLocation = v + return s +} + +// SetSelectParameters sets the SelectParameters field's value. +func (s *RestoreRequest) SetSelectParameters(v *SelectParameters) *RestoreRequest { + s.SelectParameters = v + return s +} + +// SetTier sets the Tier field's value. +func (s *RestoreRequest) SetTier(v string) *RestoreRequest { + s.Tier = &v + return s +} + +// SetType sets the Type field's value. +func (s *RestoreRequest) SetType(v string) *RestoreRequest { + s.Type = &v + return s +} + +// Specifies the redirect behavior and when a redirect is applied. +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Specifies lifecycle rules for an Amazon S3 bucket. For more information, +// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html) +// in the Amazon Simple Storage Service API Reference. +type Rule struct { + _ struct{} `type:"structure"` - // The host name to use in the redirect request. - HostName *string `type:"string"` + // Specifies the days since the initiation of an incomplete multipart upload + // that Amazon S3 will wait before permanently removing all parts of the upload. + // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket + // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config) + // in the Amazon Simple Storage Service Developer Guide. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HttpRedirectCode *string `type:"string"` + Expiration *LifecycleExpiration `type:"structure"` - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` + // Unique identifier for the rule. The value can't be longer than 255 characters. + ID *string `type:"string"` - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required - // if one of the siblings is present. Can be present only if ReplaceKeyWith - // is not provided. - ReplaceKeyPrefixWith *string `type:"string"` + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, + // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning + // is suspended), you can set this action to request that Amazon S3 transition + // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, + // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's + // lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Object key prefix that identifies one or more objects to which this rule + // applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If Enabled, the rule is currently being applied. If Disabled, the rule is + // not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + // Specifies when an object transitions to a specified storage class. + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Specifies the use of SSE-KMS to encrypt delivered Inventory reports. +type SSEKMS struct { + _ struct{} `locationName:"SSE-KMS" type:"structure"` + + // Specifies the ID of the AWS Key Management Service (KMS) master encryption + // key to use for encrypting Inventory reports. + // + // KeyId is a required field + KeyId *string `type:"string" required:"true" sensitive:"true"` +} + +// String returns the string representation +func (s SSEKMS) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSEKMS) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SSEKMS) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SSEKMS"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKeyId sets the KeyId field's value. +func (s *SSEKMS) SetKeyId(v string) *SSEKMS { + s.KeyId = &v + return s +} + +// Specifies the use of SSE-S3 to encrypt delivered Inventory reports. +type SSES3 struct { + _ struct{} `locationName:"SSE-S3" type:"structure"` +} + +// String returns the string representation +func (s SSES3) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SSES3) GoString() string { + return s.String() +} + +// SelectObjectContentEventStream provides handling of EventStreams for +// the SelectObjectContent API. +// +// Use this type to receive SelectObjectContentEventStream events. The events +// can be read from the Events channel member. +// +// The events that can be received are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStream struct { + // Reader is the EventStream reader for the SelectObjectContentEventStream + // events. This value is automatically set by the SDK when the API call is made + // Use this member when unit testing your code with the SDK to mock out the + // EventStream Reader. + // + // Must not be nil. + Reader SelectObjectContentEventStreamReader + + // StreamCloser is the io.Closer for the EventStream connection. For HTTP + // EventStream this is the response Body. The stream will be closed when + // the Close method of the EventStream is called. + StreamCloser io.Closer +} + +// Close closes the EventStream. This will also cause the Events channel to be +// closed. You can use the closing of the Events channel to terminate your +// application's read from the API's EventStream. +// +// Will close the underlying EventStream reader. For EventStream over HTTP +// connection this will also close the HTTP connection. +// +// Close must be called when done using the EventStream API. Not calling Close +// may result in resource leaks. +func (es *SelectObjectContentEventStream) Close() (err error) { + es.Reader.Close() + return es.Err() +} + +// Err returns any error that occurred while reading EventStream Events from +// the service API's response. Returns nil if there were no errors. +func (es *SelectObjectContentEventStream) Err() error { + if err := es.Reader.Err(); err != nil { + return err + } + es.StreamCloser.Close() + + return nil +} + +// Events returns a channel to read EventStream Events from the +// SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +func (es *SelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return es.Reader.Events() +} + +// SelectObjectContentEventStreamEvent groups together all EventStream +// events read from the SelectObjectContent API. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamEvent interface { + eventSelectObjectContentEventStream() +} + +// SelectObjectContentEventStreamReader provides the interface for reading EventStream +// Events from the SelectObjectContent API. The +// default implementation for this interface will be SelectObjectContentEventStream. +// +// The reader's Close method must allow multiple concurrent calls. +// +// These events are: +// +// * ContinuationEvent +// * EndEvent +// * ProgressEvent +// * RecordsEvent +// * StatsEvent +type SelectObjectContentEventStreamReader interface { + // Returns a channel of events as they are read from the event stream. + Events() <-chan SelectObjectContentEventStreamEvent + + // Close will close the underlying event stream reader. For event stream over + // HTTP this will also close the HTTP connection. + Close() error + + // Returns any error that has occurred while reading from the event stream. + Err() error +} + +type readSelectObjectContentEventStream struct { + eventReader *eventstreamapi.EventReader + stream chan SelectObjectContentEventStreamEvent + errVal atomic.Value + + done chan struct{} + closeOnce sync.Once +} + +func newReadSelectObjectContentEventStream( + reader io.ReadCloser, + unmarshalers request.HandlerList, + logger aws.Logger, + logLevel aws.LogLevelType, +) *readSelectObjectContentEventStream { + r := &readSelectObjectContentEventStream{ + stream: make(chan SelectObjectContentEventStreamEvent), + done: make(chan struct{}), + } + + r.eventReader = eventstreamapi.NewEventReader( + reader, + protocol.HandlerPayloadUnmarshal{ + Unmarshalers: unmarshalers, + }, + r.unmarshalerForEventType, + ) + r.eventReader.UseLogger(logger, logLevel) + + return r +} + +// Close will close the underlying event stream reader. For EventStream over +// HTTP this will also close the HTTP connection. +func (r *readSelectObjectContentEventStream) Close() error { + r.closeOnce.Do(r.safeClose) + + return r.Err() +} + +func (r *readSelectObjectContentEventStream) safeClose() { + close(r.done) + err := r.eventReader.Close() + if err != nil { + r.errVal.Store(err) + } +} + +func (r *readSelectObjectContentEventStream) Err() error { + if v := r.errVal.Load(); v != nil { + return v.(error) + } + + return nil +} + +func (r *readSelectObjectContentEventStream) Events() <-chan SelectObjectContentEventStreamEvent { + return r.stream +} + +func (r *readSelectObjectContentEventStream) readEventStream() { + defer close(r.stream) + + for { + event, err := r.eventReader.ReadEvent() + if err != nil { + if err == io.EOF { + return + } + select { + case <-r.done: + // If closed already ignore the error + return + default: + } + r.errVal.Store(err) + return + } + + select { + case r.stream <- event.(SelectObjectContentEventStreamEvent): + case <-r.done: + return + } + } +} + +func (r *readSelectObjectContentEventStream) unmarshalerForEventType( + eventType string, +) (eventstreamapi.Unmarshaler, error) { + switch eventType { + case "Cont": + return &ContinuationEvent{}, nil + + case "End": + return &EndEvent{}, nil + + case "Progress": + return &ProgressEvent{}, nil + + case "Records": + return &RecordsEvent{}, nil + + case "Stats": + return &StatsEvent{}, nil + default: + return nil, awserr.New( + request.ErrCodeSerialization, + fmt.Sprintf("unknown event type name, %s, for SelectObjectContentEventStream", eventType), + nil, + ) + } +} + +// Request to filter the contents of an Amazon S3 object based on a simple Structured +// Query Language (SQL) statement. In the request, along with the SQL expression, +// you must specify a data serialization format (JSON or CSV) of the object. +// Amazon S3 uses this to parse object data into records. It returns only records +// that match the specified SQL expression. You must also specify the data serialization +// format for the response. For more information, see S3Select API Documentation +// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html). +type SelectObjectContentInput struct { + _ struct{} `locationName:"SelectObjectContentRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"` + + // The S3 bucket. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` + + // The type of the provided expression (for example., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` + + // Describes the format of the data in the object that is being queried. + // + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` + + // The object key. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Describes the format of the data that you want Amazon S3 to return in response. + // + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` + + // Specifies if periodic request progress information should be enabled. + RequestProgress *RequestProgress `type:"structure"` + + // The SSE Algorithm used to encrypt the object. For more information, see Server-Side + // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // The SSE Customer Key. For more information, see Server-Side Encryption (Using + // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` + + // The SSE Customer Key MD5. For more information, see Server-Side Encryption + // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html). + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` +} + +// String returns the string representation +func (s SelectObjectContentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectObjectContentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectObjectContentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectObjectContentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) + } + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) + } + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) + } - // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can - // be present only if ReplaceKeyPrefixWith is not provided. - ReplaceKeyWith *string `type:"string"` + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// String returns the string representation -func (s Redirect) String() string { - return awsutil.Prettify(s) +// SetBucket sets the Bucket field's value. +func (s *SelectObjectContentInput) SetBucket(v string) *SelectObjectContentInput { + s.Bucket = &v + return s } -// GoString returns the string representation -func (s Redirect) GoString() string { - return s.String() +func (s *SelectObjectContentInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket } -// SetHostName sets the HostName field's value. -func (s *Redirect) SetHostName(v string) *Redirect { - s.HostName = &v +// SetExpression sets the Expression field's value. +func (s *SelectObjectContentInput) SetExpression(v string) *SelectObjectContentInput { + s.Expression = &v return s } -// SetHttpRedirectCode sets the HttpRedirectCode field's value. -func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { - s.HttpRedirectCode = &v +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectObjectContentInput) SetExpressionType(v string) *SelectObjectContentInput { + s.ExpressionType = &v return s } -// SetProtocol sets the Protocol field's value. -func (s *Redirect) SetProtocol(v string) *Redirect { - s.Protocol = &v +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectObjectContentInput) SetInputSerialization(v *InputSerialization) *SelectObjectContentInput { + s.InputSerialization = v return s } -// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. -func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { - s.ReplaceKeyPrefixWith = &v +// SetKey sets the Key field's value. +func (s *SelectObjectContentInput) SetKey(v string) *SelectObjectContentInput { + s.Key = &v return s } -// SetReplaceKeyWith sets the ReplaceKeyWith field's value. -func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { - s.ReplaceKeyWith = &v +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectObjectContentInput) SetOutputSerialization(v *OutputSerialization) *SelectObjectContentInput { + s.OutputSerialization = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo -type RedirectAllRequestsTo struct { - _ struct{} `type:"structure"` - - // Name of the host where requests will be redirected. - // - // HostName is a required field - HostName *string `type:"string" required:"true"` - - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` +// SetRequestProgress sets the RequestProgress field's value. +func (s *SelectObjectContentInput) SetRequestProgress(v *RequestProgress) *SelectObjectContentInput { + s.RequestProgress = v + return s } -// String returns the string representation -func (s RedirectAllRequestsTo) String() string { - return awsutil.Prettify(s) +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *SelectObjectContentInput) SetSSECustomerAlgorithm(v string) *SelectObjectContentInput { + s.SSECustomerAlgorithm = &v + return s } -// GoString returns the string representation -func (s RedirectAllRequestsTo) GoString() string { - return s.String() +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *SelectObjectContentInput) SetSSECustomerKey(v string) *SelectObjectContentInput { + s.SSECustomerKey = &v + return s } -// Validate inspects the fields of the type to determine if they are valid. -func (s *RedirectAllRequestsTo) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} - if s.HostName == nil { - invalidParams.Add(request.NewErrParamRequired("HostName")) - } - - if invalidParams.Len() > 0 { - return invalidParams +func (s *SelectObjectContentInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v } - return nil -} - -// SetHostName sets the HostName field's value. -func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { - s.HostName = &v - return s + return *s.SSECustomerKey } -// SetProtocol sets the Protocol field's value. -func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { - s.Protocol = &v +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *SelectObjectContentInput) SetSSECustomerKeyMD5(v string) *SelectObjectContentInput { + s.SSECustomerKeyMD5 = &v return s } -// Container for replication rules. You can add as many as 1,000 rules. Total -// replication configuration size can be up to 2 MB. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration -type ReplicationConfiguration struct { - _ struct{} `type:"structure"` - - // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating - // the objects. - // - // Role is a required field - Role *string `type:"string" required:"true"` +type SelectObjectContentOutput struct { + _ struct{} `type:"structure" payload:"Payload"` - // Container for information about a particular replication rule. Replication - // configuration must have at least one rule and can contain up to 1,000 rules. - // - // Rules is a required field - Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` + // Use EventStream to use the API's stream. + EventStream *SelectObjectContentEventStream `type:"structure"` } // String returns the string representation -func (s ReplicationConfiguration) String() string { +func (s SelectObjectContentOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ReplicationConfiguration) GoString() string { +func (s SelectObjectContentOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} - if s.Role == nil { - invalidParams.Add(request.NewErrParamRequired("Role")) - } - if s.Rules == nil { - invalidParams.Add(request.NewErrParamRequired("Rules")) - } - if s.Rules != nil { - for i, v := range s.Rules { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetRole sets the Role field's value. -func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { - s.Role = &v +// SetEventStream sets the EventStream field's value. +func (s *SelectObjectContentOutput) SetEventStream(v *SelectObjectContentEventStream) *SelectObjectContentOutput { + s.EventStream = v return s } -// SetRules sets the Rules field's value. -func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { - s.Rules = v - return s +func (s *SelectObjectContentOutput) runEventStreamLoop(r *request.Request) { + if r.Error != nil { + return + } + reader := newReadSelectObjectContentEventStream( + r.HTTPResponse.Body, + r.Handlers.UnmarshalStream, + r.Config.Logger, + r.Config.LogLevel.Value(), + ) + go reader.readEventStream() + + eventStream := &SelectObjectContentEventStream{ + StreamCloser: r.HTTPResponse.Body, + Reader: reader, + } + s.EventStream = eventStream } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule -type ReplicationRule struct { +// Describes the parameters for Select job types. +type SelectParameters struct { _ struct{} `type:"structure"` - // Destination is a required field - Destination *Destination `type:"structure" required:"true"` + // The expression that is used to query the object. + // + // Expression is a required field + Expression *string `type:"string" required:"true"` - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` + // The type of the provided expression (e.g., SQL). + // + // ExpressionType is a required field + ExpressionType *string `type:"string" required:"true" enum:"ExpressionType"` - // Object keyname prefix identifying one or more objects to which the rule applies. - // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes - // are not supported. + // Describes the serialization format of the object. // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` + // InputSerialization is a required field + InputSerialization *InputSerialization `type:"structure" required:"true"` - // The rule is ignored if status is not Enabled. + // Describes how the results of the Select job are serialized. // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` + // OutputSerialization is a required field + OutputSerialization *OutputSerialization `type:"structure" required:"true"` } // String returns the string representation -func (s ReplicationRule) String() string { +func (s SelectParameters) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s ReplicationRule) GoString() string { +func (s SelectParameters) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *ReplicationRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) +func (s *SelectParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectParameters"} + if s.Expression == nil { + invalidParams.Add(request.NewErrParamRequired("Expression")) } - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) + if s.ExpressionType == nil { + invalidParams.Add(request.NewErrParamRequired("ExpressionType")) } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) + if s.InputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("InputSerialization")) } - if s.Destination != nil { - if err := s.Destination.Validate(); err != nil { - invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) - } + if s.OutputSerialization == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSerialization")) } if invalidParams.Len() > 0 { @@ -16173,55 +22957,63 @@ func (s *ReplicationRule) Validate() error { return nil } -// SetDestination sets the Destination field's value. -func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { - s.Destination = v +// SetExpression sets the Expression field's value. +func (s *SelectParameters) SetExpression(v string) *SelectParameters { + s.Expression = &v return s } -// SetID sets the ID field's value. -func (s *ReplicationRule) SetID(v string) *ReplicationRule { - s.ID = &v +// SetExpressionType sets the ExpressionType field's value. +func (s *SelectParameters) SetExpressionType(v string) *SelectParameters { + s.ExpressionType = &v return s } -// SetPrefix sets the Prefix field's value. -func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { - s.Prefix = &v +// SetInputSerialization sets the InputSerialization field's value. +func (s *SelectParameters) SetInputSerialization(v *InputSerialization) *SelectParameters { + s.InputSerialization = v return s } -// SetStatus sets the Status field's value. -func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { - s.Status = &v +// SetOutputSerialization sets the OutputSerialization field's value. +func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *SelectParameters { + s.OutputSerialization = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration -type RequestPaymentConfiguration struct { +// Describes the default server-side encryption to apply to new objects in the +// bucket. If a PUT Object request doesn't specify any server-side encryption, +// this default encryption will be applied. For more information, see PUT Bucket +// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html) +// in the Amazon Simple Storage Service API Reference. +type ServerSideEncryptionByDefault struct { _ struct{} `type:"structure"` - // Specifies who pays for the download and request fees. + // KMS master key ID to use for the default encryption. This parameter is allowed + // if and only if SSEAlgorithm is set to aws:kms. + KMSMasterKeyID *string `type:"string" sensitive:"true"` + + // Server-side encryption algorithm to use for the default encryption. // - // Payer is a required field - Payer *string `type:"string" required:"true" enum:"Payer"` + // SSEAlgorithm is a required field + SSEAlgorithm *string `type:"string" required:"true" enum:"ServerSideEncryption"` } // String returns the string representation -func (s RequestPaymentConfiguration) String() string { +func (s ServerSideEncryptionByDefault) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RequestPaymentConfiguration) GoString() string { +func (s ServerSideEncryptionByDefault) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RequestPaymentConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} - if s.Payer == nil { - invalidParams.Add(request.NewErrParamRequired("Payer")) +func (s *ServerSideEncryptionByDefault) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionByDefault"} + if s.SSEAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("SSEAlgorithm")) } if invalidParams.Len() > 0 { @@ -16230,58 +23022,53 @@ func (s *RequestPaymentConfiguration) Validate() error { return nil } -// SetPayer sets the Payer field's value. -func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { - s.Payer = &v +// SetKMSMasterKeyID sets the KMSMasterKeyID field's value. +func (s *ServerSideEncryptionByDefault) SetKMSMasterKeyID(v string) *ServerSideEncryptionByDefault { + s.KMSMasterKeyID = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest -type RestoreObjectInput struct { - _ struct{} `type:"structure" payload:"RestoreRequest"` - - // Bucket is a required field - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Key is a required field - Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +// SetSSEAlgorithm sets the SSEAlgorithm field's value. +func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEncryptionByDefault { + s.SSEAlgorithm = &v + return s +} - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` +// Specifies the default server-side-encryption configuration. +type ServerSideEncryptionConfiguration struct { + _ struct{} `type:"structure"` - VersionId *string `location:"querystring" locationName:"versionId" type:"string"` + // Container for information about a particular server-side encryption configuration + // rule. + // + // Rules is a required field + Rules []*ServerSideEncryptionRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` } // String returns the string representation -func (s RestoreObjectInput) String() string { +func (s ServerSideEncryptionConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreObjectInput) GoString() string { +func (s ServerSideEncryptionConfiguration) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreObjectInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} - if s.Bucket == nil { - invalidParams.Add(request.NewErrParamRequired("Bucket")) - } - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } - if s.Key != nil && len(*s.Key) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Key", 1)) +func (s *ServerSideEncryptionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) } - if s.RestoreRequest != nil { - if err := s.RestoreRequest.Validate(); err != nil { - invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } } } @@ -16290,94 +23077,84 @@ func (s *RestoreObjectInput) Validate() error { } return nil } - -// SetBucket sets the Bucket field's value. -func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { - s.Bucket = &v - return s -} - -// SetKey sets the Key field's value. -func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { - s.Key = &v - return s -} - -// SetRequestPayer sets the RequestPayer field's value. -func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { - s.RequestPayer = &v - return s -} - -// SetRestoreRequest sets the RestoreRequest field's value. -func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { - s.RestoreRequest = v - return s -} - -// SetVersionId sets the VersionId field's value. -func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { - s.VersionId = &v + +// SetRules sets the Rules field's value. +func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRule) *ServerSideEncryptionConfiguration { + s.Rules = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput -type RestoreObjectOutput struct { +// Specifies the default server-side encryption configuration. +type ServerSideEncryptionRule struct { _ struct{} `type:"structure"` - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + // Specifies the default server-side encryption to apply to new objects in the + // bucket. If a PUT Object request doesn't specify any server-side encryption, + // this default encryption will be applied. + ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` } // String returns the string representation -func (s RestoreObjectOutput) String() string { +func (s ServerSideEncryptionRule) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreObjectOutput) GoString() string { +func (s ServerSideEncryptionRule) GoString() string { return s.String() } -// SetRequestCharged sets the RequestCharged field's value. -func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { - s.RequestCharged = &v +// Validate inspects the fields of the type to determine if they are valid. +func (s *ServerSideEncryptionRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ServerSideEncryptionRule"} + if s.ApplyServerSideEncryptionByDefault != nil { + if err := s.ApplyServerSideEncryptionByDefault.Validate(); err != nil { + invalidParams.AddNested("ApplyServerSideEncryptionByDefault", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetApplyServerSideEncryptionByDefault sets the ApplyServerSideEncryptionByDefault field's value. +func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *ServerSideEncryptionByDefault) *ServerSideEncryptionRule { + s.ApplyServerSideEncryptionByDefault = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest -type RestoreRequest struct { +// A container that describes additional filters for identifying the source +// objects that you want to replicate. You can choose to enable or disable the +// replication of these objects. Currently, Amazon S3 supports only the filter +// that you can specify for objects created with server-side encryption using +// an AWS KMS-Managed Key (SSE-KMS). +type SourceSelectionCriteria struct { _ struct{} `type:"structure"` - // Lifetime of the active copy in days - // - // Days is a required field - Days *int64 `type:"integer" required:"true"` - - // Glacier related prameters pertaining to this job. - GlacierJobParameters *GlacierJobParameters `type:"structure"` + // A container for filter information for the selection of Amazon S3 objects + // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication + // configuration, this element is required. + SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"` } // String returns the string representation -func (s RestoreRequest) String() string { +func (s SourceSelectionCriteria) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RestoreRequest) GoString() string { +func (s SourceSelectionCriteria) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RestoreRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} - if s.Days == nil { - invalidParams.Add(request.NewErrParamRequired("Days")) - } - if s.GlacierJobParameters != nil { - if err := s.GlacierJobParameters.Validate(); err != nil { - invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) +func (s *SourceSelectionCriteria) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.SseKmsEncryptedObjects != nil { + if err := s.SseKmsEncryptedObjects.Validate(); err != nil { + invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) } } @@ -16387,51 +23164,39 @@ func (s *RestoreRequest) Validate() error { return nil } -// SetDays sets the Days field's value. -func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { - s.Days = &v - return s -} - -// SetGlacierJobParameters sets the GlacierJobParameters field's value. -func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { - s.GlacierJobParameters = v +// SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. +func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { + s.SseKmsEncryptedObjects = v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule -type RoutingRule struct { +// A container for filter information for the selection of S3 objects encrypted +// with AWS KMS. +type SseKmsEncryptedObjects struct { _ struct{} `type:"structure"` - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can can specify a different error code to return. + // Specifies whether Amazon S3 replicates objects created with server-side encryption + // using an AWS KMS-managed key. // - // Redirect is a required field - Redirect *Redirect `type:"structure" required:"true"` + // Status is a required field + Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"` } // String returns the string representation -func (s RoutingRule) String() string { +func (s SseKmsEncryptedObjects) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s RoutingRule) GoString() string { +func (s SseKmsEncryptedObjects) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *RoutingRule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} - if s.Redirect == nil { - invalidParams.Add(request.NewErrParamRequired("Redirect")) +func (s *SseKmsEncryptedObjects) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SseKmsEncryptedObjects"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) } if invalidParams.Len() > 0 { @@ -16440,139 +23205,101 @@ func (s *RoutingRule) Validate() error { return nil } -// SetCondition sets the Condition field's value. -func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { - s.Condition = v - return s -} - -// SetRedirect sets the Redirect field's value. -func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { - s.Redirect = v +// SetStatus sets the Status field's value. +func (s *SseKmsEncryptedObjects) SetStatus(v string) *SseKmsEncryptedObjects { + s.Status = &v return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule -type Rule struct { +type Stats struct { _ struct{} `type:"structure"` - // Specifies the days since the initiation of an Incomplete Multipart Upload - // that Lifecycle will wait before permanently removing all parts of the upload. - AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` - - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Container for the transition rule that describes when noncurrent objects - // transition to the STANDARD_IA or GLACIER storage class. If your bucket is - // versioning-enabled (or versioning is suspended), you can set this action - // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA - // or GLACIER storage class at a specific period in the object's lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - - // Prefix identifying one or more objects to which the rule applies. - // - // Prefix is a required field - Prefix *string `type:"string" required:"true"` + // The total number of uncompressed object bytes processed. + BytesProcessed *int64 `type:"long"` - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - // - // Status is a required field - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + // The total number of bytes of records payload data returned. + BytesReturned *int64 `type:"long"` - Transition *Transition `type:"structure"` + // The total number of object bytes scanned. + BytesScanned *int64 `type:"long"` } // String returns the string representation -func (s Rule) String() string { +func (s Stats) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Rule) GoString() string { +func (s Stats) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *Rule) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Rule"} - if s.Prefix == nil { - invalidParams.Add(request.NewErrParamRequired("Prefix")) - } - if s.Status == nil { - invalidParams.Add(request.NewErrParamRequired("Status")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. -func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { - s.AbortIncompleteMultipartUpload = v +// SetBytesProcessed sets the BytesProcessed field's value. +func (s *Stats) SetBytesProcessed(v int64) *Stats { + s.BytesProcessed = &v return s } -// SetExpiration sets the Expiration field's value. -func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { - s.Expiration = v +// SetBytesReturned sets the BytesReturned field's value. +func (s *Stats) SetBytesReturned(v int64) *Stats { + s.BytesReturned = &v return s } -// SetID sets the ID field's value. -func (s *Rule) SetID(v string) *Rule { - s.ID = &v +// SetBytesScanned sets the BytesScanned field's value. +func (s *Stats) SetBytesScanned(v int64) *Stats { + s.BytesScanned = &v return s } -// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. -func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { - s.NoncurrentVersionExpiration = v - return s +type StatsEvent struct { + _ struct{} `locationName:"StatsEvent" type:"structure" payload:"Details"` + + // The Stats event details. + Details *Stats `locationName:"Details" type:"structure"` } -// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. -func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { - s.NoncurrentVersionTransition = v - return s +// String returns the string representation +func (s StatsEvent) String() string { + return awsutil.Prettify(s) } -// SetPrefix sets the Prefix field's value. -func (s *Rule) SetPrefix(v string) *Rule { - s.Prefix = &v - return s +// GoString returns the string representation +func (s StatsEvent) GoString() string { + return s.String() } -// SetStatus sets the Status field's value. -func (s *Rule) SetStatus(v string) *Rule { - s.Status = &v +// SetDetails sets the Details field's value. +func (s *StatsEvent) SetDetails(v *Stats) *StatsEvent { + s.Details = v return s } -// SetTransition sets the Transition field's value. -func (s *Rule) SetTransition(v *Transition) *Rule { - s.Transition = v - return s +// The StatsEvent is and event in the SelectObjectContentEventStream group of events. +func (s *StatsEvent) eventSelectObjectContentEventStream() {} + +// UnmarshalEvent unmarshals the EventStream Message into the StatsEvent value. +// This method is only used internally within the SDK's EventStream handling. +func (s *StatsEvent) UnmarshalEvent( + payloadUnmarshaler protocol.PayloadUnmarshaler, + msg eventstream.Message, +) error { + if err := payloadUnmarshaler.UnmarshalPayload( + bytes.NewReader(msg.Payload), s, + ); err != nil { + return err + } + return nil } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis +// Specifies data related to access patterns to be collected and made available +// to analyze the tradeoffs between different storage classes for an Amazon +// S3 bucket. type StorageClassAnalysis struct { _ struct{} `type:"structure"` - // A container used to describe how data related to the storage class analysis - // should be exported. + // Specifies how data related to the storage class analysis for an Amazon S3 + // bucket should be exported. DataExport *StorageClassAnalysisDataExport `type:"structure"` } @@ -16607,7 +23334,6 @@ func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport type StorageClassAnalysisDataExport struct { _ struct{} `type:"structure"` @@ -16665,7 +23391,6 @@ func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *Stora return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag type Tag struct { _ struct{} `type:"structure"` @@ -16721,7 +23446,6 @@ func (s *Tag) SetValue(v string) *Tag { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging type Tagging struct { _ struct{} `type:"structure"` @@ -16768,11 +23492,10 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant type TargetGrant struct { _ struct{} `type:"structure"` - Grantee *Grantee `type:"structure"` + Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` // Logging permissions assigned to the Grantee for the bucket. Permission *string `type:"string" enum:"BucketLogsPermission"` @@ -16815,25 +23538,30 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant { return s } -// Container for specifying the configuration when you want Amazon S3 to publish -// events to an Amazon Simple Notification Service (Amazon SNS) topic. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration +// A container for specifying the configuration for publication of messages +// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3 +// detects specified events. type TopicConfiguration struct { _ struct{} `type:"structure"` + // The Amazon S3 bucket event about which to send notifications. For more information, + // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. + // // Events is a required field Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - // Container for object key name filtering rules. For information about key - // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // Specifies object key name filtering rules. For information about key name + // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + // in the Amazon Simple Storage Service Developer Guide. Filter *NotificationConfigurationFilter `type:"structure"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` - // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects - // events of specified type. + // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3 + // publishes a message when it detects events of the specified type. // // TopicArn is a required field TopicArn *string `locationName:"Topic" type:"string" required:"true"` @@ -16889,16 +23617,17 @@ func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated type TopicConfigurationDeprecated struct { _ struct{} `type:"structure"` // Bucket event for which to send notifications. + // + // Deprecated: Event has been deprecated Event *string `deprecated:"true" type:"string" enum:"Event"` Events []*string `locationName:"Event" type:"list" flattened:"true"` - // Optional unique identifier for configurations in a notification configuration. + // An optional unique identifier for configurations in a notification configuration. // If you don't provide one, Amazon S3 will assign an ID. Id *string `type:"string"` @@ -16941,19 +23670,19 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition +// Specifies when an object transitions to a specified storage class. type Transition struct { _ struct{} `type:"structure"` - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. + // Indicates when objects are transitioned to the specified storage class. The + // date value must be in ISO 8601 format. The time is always midnight UTC. Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. + // Indicates the number of days after creation when objects are transitioned + // to the specified storage class. The value must be a positive integer. Days *int64 `type:"integer"` - // The class of storage used to store the object. + // The storage class to which you want the object to transition. StorageClass *string `type:"string" enum:"TransitionStorageClass"` } @@ -16985,7 +23714,6 @@ func (s *Transition) SetStorageClass(v string) *Transition { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest type UploadPartCopyInput struct { _ struct{} `type:"structure"` @@ -17002,20 +23730,20 @@ type UploadPartCopyInput struct { CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp"` // Copies the object if its entity tag (ETag) is different than the specified // ETag. CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp"` // The range of bytes to copy from the source object. The range value must use // the form bytes=first-last, where the first and last are the zero-based byte // offsets to copy. For example, bytes=0-9 indicates that you want to copy the // first ten bytes of the source. You can copy a range only if the source object - // is greater than 5 GB. + // is greater than 5 MB. CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` // Specifies the algorithm to use when decrypting the source object (e.g., AES256). @@ -17024,7 +23752,7 @@ type UploadPartCopyInput struct { // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt // the source object. The encryption key provided in this header must be one // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -17055,7 +23783,7 @@ type UploadPartCopyInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -17084,6 +23812,9 @@ func (s *UploadPartCopyInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.CopySource == nil { invalidParams.Add(request.NewErrParamRequired("CopySource")) } @@ -17112,6 +23843,13 @@ func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetCopySource sets the CopySource field's value. func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { s.CopySource = &v @@ -17160,6 +23898,13 @@ func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartC return s } +func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) { + if s.CopySourceSSECustomerKey == nil { + return v + } + return *s.CopySourceSSECustomerKey +} + // SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { s.CopySourceSSECustomerKeyMD5 = &v @@ -17196,6 +23941,13 @@ func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { return s } +func (s *UploadPartCopyInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { s.SSECustomerKeyMD5 = &v @@ -17208,7 +23960,6 @@ func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` @@ -17234,7 +23985,7 @@ type UploadPartCopyOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -17293,7 +24044,6 @@ func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopy return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest type UploadPartInput struct { _ struct{} `type:"structure" payload:"Body"` @@ -17309,6 +24059,11 @@ type UploadPartInput struct { // body cannot be determined automatically. ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + // The base64-encoded 128-bit MD5 digest of the part data. This parameter is + // auto-populated when using the command from the CLI. This parameted is required + // if object lock parameters are specified. + ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"` + // Object key for which the multipart upload was initiated. // // Key is a required field @@ -17335,7 +24090,7 @@ type UploadPartInput struct { // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm // header. This must be the same encryption key specified in the initiate multipart // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"` // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. // Amazon S3 uses this header for a message integrity check to ensure the encryption @@ -17364,6 +24119,9 @@ func (s *UploadPartInput) Validate() error { if s.Bucket == nil { invalidParams.Add(request.NewErrParamRequired("Bucket")) } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -17395,12 +24153,25 @@ func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getBucket() (v string) { + if s.Bucket == nil { + return v + } + return *s.Bucket +} + // SetContentLength sets the ContentLength field's value. func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { s.ContentLength = &v return s } +// SetContentMD5 sets the ContentMD5 field's value. +func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput { + s.ContentMD5 = &v + return s +} + // SetKey sets the Key field's value. func (s *UploadPartInput) SetKey(v string) *UploadPartInput { s.Key = &v @@ -17431,6 +24202,13 @@ func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { return s } +func (s *UploadPartInput) getSSECustomerKey() (v string) { + if s.SSECustomerKey == nil { + return v + } + return *s.SSECustomerKey +} + // SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { s.SSECustomerKeyMD5 = &v @@ -17443,7 +24221,6 @@ func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput type UploadPartOutput struct { _ struct{} `type:"structure"` @@ -17466,7 +24243,7 @@ type UploadPartOutput struct { // If present, specifies the ID of the AWS Key Management Service (KMS) master // encryption key that was used for the object. - SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"` // The Server-side encryption algorithm used when storing this object in S3 // (e.g., AES256, aws:kms). @@ -17519,7 +24296,9 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration +// Describes the versioning state of an Amazon S3 bucket. For more information, +// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html) +// in the Amazon Simple Storage Service API Reference. type VersioningConfiguration struct { _ struct{} `type:"structure"` @@ -17554,16 +24333,22 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration +// Specifies website configuration parameters for an Amazon S3 bucket. type WebsiteConfiguration struct { _ struct{} `type:"structure"` + // The name of the error document for the website. ErrorDocument *ErrorDocument `type:"structure"` + // The name of the index document for the website. IndexDocument *IndexDocument `type:"structure"` + // The redirect behavior for every request to this bucket's website endpoint. + // + // If you specify this property, you can't specify any other property. RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + // Rules that define when a redirect is applied and the redirect behavior. RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` } @@ -17717,6 +24502,25 @@ const ( BucketVersioningStatusSuspended = "Suspended" ) +const ( + // CompressionTypeNone is a CompressionType enum value + CompressionTypeNone = "NONE" + + // CompressionTypeGzip is a CompressionType enum value + CompressionTypeGzip = "GZIP" + + // CompressionTypeBzip2 is a CompressionType enum value + CompressionTypeBzip2 = "BZIP2" +) + +const ( + // DeleteMarkerReplicationStatusEnabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusEnabled = "Enabled" + + // DeleteMarkerReplicationStatusDisabled is a DeleteMarkerReplicationStatus enum value + DeleteMarkerReplicationStatusDisabled = "Disabled" +) + // Requests Amazon S3 to encode the object keys in the response and specifies // the encoding method to use. An object key may contain any Unicode character; // however, XML 1.0 parser cannot parse some characters, such as characters @@ -17728,7 +24532,7 @@ const ( EncodingTypeUrl = "url" ) -// Bucket event for which to send notifications. +// The bucket event for which to send notifications. const ( // EventS3ReducedRedundancyLostObject is a Event enum value EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" @@ -17756,6 +24560,12 @@ const ( // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + + // EventS3ObjectRestorePost is a Event enum value + EventS3ObjectRestorePost = "s3:ObjectRestore:Post" + + // EventS3ObjectRestoreCompleted is a Event enum value + EventS3ObjectRestoreCompleted = "s3:ObjectRestore:Completed" ) const ( @@ -17766,6 +24576,22 @@ const ( ExpirationStatusDisabled = "Disabled" ) +const ( + // ExpressionTypeSql is a ExpressionType enum value + ExpressionTypeSql = "SQL" +) + +const ( + // FileHeaderInfoUse is a FileHeaderInfo enum value + FileHeaderInfoUse = "USE" + + // FileHeaderInfoIgnore is a FileHeaderInfo enum value + FileHeaderInfoIgnore = "IGNORE" + + // FileHeaderInfoNone is a FileHeaderInfo enum value + FileHeaderInfoNone = "NONE" +) + const ( // FilterRuleNamePrefix is a FilterRuleName enum value FilterRuleNamePrefix = "prefix" @@ -17777,6 +24603,12 @@ const ( const ( // InventoryFormatCsv is a InventoryFormat enum value InventoryFormatCsv = "CSV" + + // InventoryFormatOrc is a InventoryFormat enum value + InventoryFormatOrc = "ORC" + + // InventoryFormatParquet is a InventoryFormat enum value + InventoryFormatParquet = "Parquet" ) const ( @@ -17813,6 +24645,26 @@ const ( // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value InventoryOptionalFieldReplicationStatus = "ReplicationStatus" + + // InventoryOptionalFieldEncryptionStatus is a InventoryOptionalField enum value + InventoryOptionalFieldEncryptionStatus = "EncryptionStatus" + + // InventoryOptionalFieldObjectLockRetainUntilDate is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockRetainUntilDate = "ObjectLockRetainUntilDate" + + // InventoryOptionalFieldObjectLockMode is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockMode = "ObjectLockMode" + + // InventoryOptionalFieldObjectLockLegalHoldStatus is a InventoryOptionalField enum value + InventoryOptionalFieldObjectLockLegalHoldStatus = "ObjectLockLegalHoldStatus" +) + +const ( + // JSONTypeDocument is a JSONType enum value + JSONTypeDocument = "DOCUMENT" + + // JSONTypeLines is a JSONType enum value + JSONTypeLines = "LINES" ) const ( @@ -17862,6 +24714,35 @@ const ( ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" ) +const ( + // ObjectLockEnabledEnabled is a ObjectLockEnabled enum value + ObjectLockEnabledEnabled = "Enabled" +) + +const ( + // ObjectLockLegalHoldStatusOn is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOn = "ON" + + // ObjectLockLegalHoldStatusOff is a ObjectLockLegalHoldStatus enum value + ObjectLockLegalHoldStatusOff = "OFF" +) + +const ( + // ObjectLockModeGovernance is a ObjectLockMode enum value + ObjectLockModeGovernance = "GOVERNANCE" + + // ObjectLockModeCompliance is a ObjectLockMode enum value + ObjectLockModeCompliance = "COMPLIANCE" +) + +const ( + // ObjectLockRetentionModeGovernance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeGovernance = "GOVERNANCE" + + // ObjectLockRetentionModeCompliance is a ObjectLockRetentionMode enum value + ObjectLockRetentionModeCompliance = "COMPLIANCE" +) + const ( // ObjectStorageClassStandard is a ObjectStorageClass enum value ObjectStorageClassStandard = "STANDARD" @@ -17871,6 +24752,18 @@ const ( // ObjectStorageClassGlacier is a ObjectStorageClass enum value ObjectStorageClassGlacier = "GLACIER" + + // ObjectStorageClassStandardIa is a ObjectStorageClass enum value + ObjectStorageClassStandardIa = "STANDARD_IA" + + // ObjectStorageClassOnezoneIa is a ObjectStorageClass enum value + ObjectStorageClassOnezoneIa = "ONEZONE_IA" + + // ObjectStorageClassIntelligentTiering is a ObjectStorageClass enum value + ObjectStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // ObjectStorageClassDeepArchive is a ObjectStorageClass enum value + ObjectStorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( @@ -17878,6 +24771,11 @@ const ( ObjectVersionStorageClassStandard = "STANDARD" ) +const ( + // OwnerOverrideDestination is a OwnerOverride enum value + OwnerOverrideDestination = "Destination" +) + const ( // PayerRequester is a Payer enum value PayerRequester = "Requester" @@ -17911,6 +24809,14 @@ const ( ProtocolHttps = "https" ) +const ( + // QuoteFieldsAlways is a QuoteFields enum value + QuoteFieldsAlways = "ALWAYS" + + // QuoteFieldsAsneeded is a QuoteFields enum value + QuoteFieldsAsneeded = "ASNEEDED" +) + const ( // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value ReplicationRuleStatusEnabled = "Enabled" @@ -17949,6 +24855,11 @@ const ( RequestPayerRequester = "requester" ) +const ( + // RestoreRequestTypeSelect is a RestoreRequestType enum value + RestoreRequestTypeSelect = "SELECT" +) + const ( // ServerSideEncryptionAes256 is a ServerSideEncryption enum value ServerSideEncryptionAes256 = "AES256" @@ -17957,6 +24868,14 @@ const ( ServerSideEncryptionAwsKms = "aws:kms" ) +const ( + // SseKmsEncryptedObjectsStatusEnabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusEnabled = "Enabled" + + // SseKmsEncryptedObjectsStatusDisabled is a SseKmsEncryptedObjectsStatus enum value + SseKmsEncryptedObjectsStatusDisabled = "Disabled" +) + const ( // StorageClassStandard is a StorageClass enum value StorageClassStandard = "STANDARD" @@ -17966,6 +24885,18 @@ const ( // StorageClassStandardIa is a StorageClass enum value StorageClassStandardIa = "STANDARD_IA" + + // StorageClassOnezoneIa is a StorageClass enum value + StorageClassOnezoneIa = "ONEZONE_IA" + + // StorageClassIntelligentTiering is a StorageClass enum value + StorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // StorageClassGlacier is a StorageClass enum value + StorageClassGlacier = "GLACIER" + + // StorageClassDeepArchive is a StorageClass enum value + StorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( @@ -17998,6 +24929,15 @@ const ( // TransitionStorageClassStandardIa is a TransitionStorageClass enum value TransitionStorageClassStandardIa = "STANDARD_IA" + + // TransitionStorageClassOnezoneIa is a TransitionStorageClass enum value + TransitionStorageClassOnezoneIa = "ONEZONE_IA" + + // TransitionStorageClassIntelligentTiering is a TransitionStorageClass enum value + TransitionStorageClassIntelligentTiering = "INTELLIGENT_TIERING" + + // TransitionStorageClassDeepArchive is a TransitionStorageClass enum value + TransitionStorageClassDeepArchive = "DEEP_ARCHIVE" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go new file mode 100644 index 00000000..5c8ce5cc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/body_hash.go @@ -0,0 +1,249 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" +) + +const ( + contentMD5Header = "Content-Md5" + contentSha256Header = "X-Amz-Content-Sha256" + amzTeHeader = "X-Amz-Te" + amzTxEncodingHeader = "X-Amz-Transfer-Encoding" + + appendMD5TxEncoding = "append-md5" +) + +// contentMD5 computes and sets the HTTP Content-MD5 header for requests that +// require it. +func contentMD5(r *request.Request) { + h := md5.New() + + if !aws.IsReaderSeekable(r.Body) { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "Unable to compute Content-MD5 for unseekable body, S3.%s", + r.Operation.Name)) + } + return + } + + if _, err := copySeekableBody(h, r.Body); err != nil { + r.Error = awserr.New("ContentMD5", "failed to compute body MD5", err) + return + } + + // encode the md5 checksum in base64 and set the request header. + v := base64.StdEncoding.EncodeToString(h.Sum(nil)) + r.HTTPRequest.Header.Set(contentMD5Header, v) +} + +// computeBodyHashes will add Content MD5 and Content Sha256 hashes to the +// request. If the body is not seekable or S3DisableContentMD5Validation set +// this handler will be ignored. +func computeBodyHashes(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + if r.Error != nil || !aws.IsReaderSeekable(r.Body) { + return + } + + var md5Hash, sha256Hash hash.Hash + hashers := make([]io.Writer, 0, 2) + + // Determine upfront which hashes can be set without overriding user + // provide header data. + if v := r.HTTPRequest.Header.Get(contentMD5Header); len(v) == 0 { + md5Hash = md5.New() + hashers = append(hashers, md5Hash) + } + + if v := r.HTTPRequest.Header.Get(contentSha256Header); len(v) == 0 { + sha256Hash = sha256.New() + hashers = append(hashers, sha256Hash) + } + + // Create the destination writer based on the hashes that are not already + // provided by the user. + var dst io.Writer + switch len(hashers) { + case 0: + return + case 1: + dst = hashers[0] + default: + dst = io.MultiWriter(hashers...) + } + + if _, err := copySeekableBody(dst, r.Body); err != nil { + r.Error = awserr.New("BodyHashError", "failed to compute body hashes", err) + return + } + + // For the hashes created, set the associated headers that the user did not + // already provide. + if md5Hash != nil { + sum := make([]byte, md5.Size) + encoded := make([]byte, md5Base64EncLen) + + base64.StdEncoding.Encode(encoded, md5Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentMD5Header] = []string{string(encoded)} + } + + if sha256Hash != nil { + encoded := make([]byte, sha256HexEncLen) + sum := make([]byte, sha256.Size) + + hex.Encode(encoded, sha256Hash.Sum(sum[0:0])) + r.HTTPRequest.Header[contentSha256Header] = []string{string(encoded)} + } +} + +const ( + md5Base64EncLen = (md5.Size + 2) / 3 * 4 // base64.StdEncoding.EncodedLen + sha256HexEncLen = sha256.Size * 2 // hex.EncodedLen +) + +func copySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) { + curPos, err := src.Seek(0, sdkio.SeekCurrent) + if err != nil { + return 0, err + } + + // hash the body. seek back to the first position after reading to reset + // the body for transmission. copy errors may be assumed to be from the + // body. + n, err := io.Copy(dst, src) + if err != nil { + return n, err + } + + _, err = src.Seek(curPos, sdkio.SeekStart) + if err != nil { + return n, err + } + + return n, nil +} + +// Adds the x-amz-te: append_md5 header to the request. This requests the service +// responds with a trailing MD5 checksum. +// +// Will not ask for append MD5 if disabled, the request is presigned or, +// or the API operation does not support content MD5 validation. +func askForTxEncodingAppendMD5(r *request.Request) { + if aws.BoolValue(r.Config.S3DisableContentMD5Validation) { + return + } + if r.IsPresigned() { + return + } + r.HTTPRequest.Header.Set(amzTeHeader, appendMD5TxEncoding) +} + +func useMD5ValidationReader(r *request.Request) { + if r.Error != nil { + return + } + + if v := r.HTTPResponse.Header.Get(amzTxEncodingHeader); v != appendMD5TxEncoding { + return + } + + var bodyReader *io.ReadCloser + var contentLen int64 + switch tv := r.Data.(type) { + case *GetObjectOutput: + bodyReader = &tv.Body + contentLen = aws.Int64Value(tv.ContentLength) + // Update ContentLength hiden the trailing MD5 checksum. + tv.ContentLength = aws.Int64(contentLen - md5.Size) + tv.ContentRange = aws.String(r.HTTPResponse.Header.Get("X-Amz-Content-Range")) + default: + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("%s: %s header received on unsupported API, %s", + amzTxEncodingHeader, appendMD5TxEncoding, r.Operation.Name, + ), nil) + return + } + + if contentLen < md5.Size { + r.Error = awserr.New("ChecksumValidationError", + fmt.Sprintf("invalid Content-Length %d for %s %s", + contentLen, appendMD5TxEncoding, amzTxEncodingHeader, + ), nil) + return + } + + // Wrap and swap the response body reader with the validation reader. + *bodyReader = newMD5ValidationReader(*bodyReader, contentLen-md5.Size) +} + +type md5ValidationReader struct { + rawReader io.ReadCloser + payload io.Reader + hash hash.Hash + + payloadLen int64 + read int64 +} + +func newMD5ValidationReader(reader io.ReadCloser, payloadLen int64) *md5ValidationReader { + h := md5.New() + return &md5ValidationReader{ + rawReader: reader, + payload: io.TeeReader(&io.LimitedReader{R: reader, N: payloadLen}, h), + hash: h, + payloadLen: payloadLen, + } +} + +func (v *md5ValidationReader) Read(p []byte) (n int, err error) { + n, err = v.payload.Read(p) + if err != nil && err != io.EOF { + return n, err + } + + v.read += int64(n) + + if err == io.EOF { + if v.read != v.payloadLen { + return n, io.ErrUnexpectedEOF + } + expectSum := make([]byte, md5.Size) + actualSum := make([]byte, md5.Size) + if _, sumReadErr := io.ReadFull(v.rawReader, expectSum); sumReadErr != nil { + return n, sumReadErr + } + actualSum = v.hash.Sum(actualSum[0:0]) + if !bytes.Equal(expectSum, actualSum) { + return n, awserr.New("InvalidChecksum", + fmt.Sprintf("expected MD5 checksum %s, got %s", + hex.EncodeToString(expectSum), + hex.EncodeToString(actualSum), + ), + nil) + } + } + + return n, err +} + +func (v *md5ValidationReader) Close() error { + return v.rawReader.Close() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go index c3a2702d..9ba8a788 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -12,19 +12,83 @@ import ( var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + func buildGetBucketLocation(r *request.Request) { if r.DataFilled() { out := r.Data.(*GetBucketLocationOutput) b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) + r.Error = awserr.New(request.ErrCodeSerialization, + "failed reading response body", err) return } match := reBucketLocation.FindSubmatch(b) if len(match) > 1 { loc := string(match[1]) - out.LocationConstraint = &loc + out.LocationConstraint = aws.String(loc) } } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go index 84633472..23d386b1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -3,6 +3,7 @@ package s3 import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/s3err" ) func init() { @@ -16,11 +17,13 @@ func defaultInitClientFn(c *client.Client) { // Require SSL when using SSE keys c.Handlers.Validate.PushBack(validateSSERequiresSSL) - c.Handlers.Build.PushBack(computeSSEKeys) + c.Handlers.Build.PushBack(computeSSEKeyMD5) + c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5) // S3 uses custom error unmarshaling logic c.Handlers.UnmarshalError.Clear() c.Handlers.UnmarshalError.PushBack(unmarshalError) + c.Handlers.UnmarshalError.PushBackNamed(s3err.RequestFailureWrapperHandler()) } func defaultInitRequestFn(r *request.Request) { @@ -31,6 +34,7 @@ func defaultInitRequestFn(r *request.Request) { switch r.Operation.Name { case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutObjectLegalHold, opPutObjectRetention, opPutObjectLockConfiguration, opPutBucketReplication: // These S3 operations require Content-MD5 to be set r.Handlers.Build.PushBack(contentMD5) @@ -42,5 +46,30 @@ func defaultInitRequestFn(r *request.Request) { r.Handlers.Validate.PushFront(populateLocationConstraint) case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + r.Handlers.Unmarshal.PushBackNamed(s3err.RequestFailureWrapperHandler()) + case opPutObject, opUploadPart: + r.Handlers.Build.PushBack(computeBodyHashes) + // Disabled until #1837 root issue is resolved. + // case opGetObject: + // r.Handlers.Build.PushBack(askForTxEncodingAppendMD5) + // r.Handlers.Unmarshal.PushBack(useMD5ValidationReader) } } + +// bucketGetter is an accessor interface to grab the "Bucket" field from +// an S3 type. +type bucketGetter interface { + getBucket() string +} + +// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey" +// field from an S3 type. +type sseCustomerKeyGetter interface { + getSSECustomerKey() string +} + +// copySourceSSECustomerKeyGetter is an accessor interface to grab the +// "CopySourceSSECustomerKey" field from an S3 type. +type copySourceSSECustomerKeyGetter interface { + getCopySourceSSECustomerKey() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 00000000..0def0225 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,26 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To contact Amazon Simple Storage Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 00000000..39b912c2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,109 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Download concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to download file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// sess := session.Must(session.NewSession()) +// +// // Create the decryption client. +// svc := s3crypto.NewDecryptionClient(sess) +// +// // The object will be downloaded from S3 and decrypted locally. By metadata +// // about the object's encryption will instruct the decryption client how +// // decrypt the content of the object. By default KMS is used for keys. +// result, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String(myBucket), +// Key: aws.String(myKey), +// }) +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go index 13ebbdad..931cb17b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index ec3ffe44..a7fbc2de 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -8,7 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) @@ -113,15 +112,9 @@ func updateEndpointForAccelerate(r *request.Request) { // Attempts to retrieve the bucket name from the request input parameters. // If no bucket is found, or the field is empty "", false will be returned. func bucketNameFromReqParams(params interface{}) (string, bool) { - b, _ := awsutil.ValuesAtPath(params, "Bucket") - if len(b) == 0 { - return "", false - } - - if bucket, ok := b[0].(*string); ok { - if bucketStr := aws.StringValue(bucket); bucketStr != "" { - return bucketStr, true - } + if iface, ok := params.(bucketGetter); ok { + b := iface.getBucket() + return b, len(b) > 0 } return "", false diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index 5e6f2299..d17dcc9d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package s3 @@ -11,10 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restxml" ) -// S3 is a client for Amazon S3. -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type S3 struct { *client.Client } @@ -27,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "s3" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "s3" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "S3" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the S3 client with a session. @@ -53,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, @@ -63,12 +67,16 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Sign.PushBackNamed(v4.BuildNamedHandler(v4.SignRequestHandler.Name, func(s *v4.Signer) { + s.DisableURIPathEscaping = true + })) svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + svc.Handlers.UnmarshalStream.PushBackNamed(restxml.UnmarshalHandler) + // Run custom client initialization if present if initClient != nil { initClient(svc.Client) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 268ea2fb..b71c835d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -3,42 +3,82 @@ package s3 import ( "crypto/md5" "encoding/base64" + "net/http" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) func validateSSERequiresSSL(r *request.Request) { - if r.HTTPRequest.URL.Scheme != "https" { - p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") - if len(p) > 0 { + if r.HTTPRequest.URL.Scheme == "https" { + return + } + + if iface, ok := r.Params.(sseCustomerKeyGetter); ok { + if len(iface.getSSECustomerKey()) > 0 { + r.Error = errSSERequiresSSL + return + } + } + + if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + if len(iface.getCopySourceSSECustomerKey()) > 0 { r.Error = errSSERequiresSSL + return } } } -func computeSSEKeys(r *request.Request) { - headers := []string{ - "x-amz-server-side-encryption-customer-key", - "x-amz-copy-source-server-side-encryption-customer-key", +const ( + sseKeyHeader = "x-amz-server-side-encryption-customer-key" + sseKeyMD5Header = sseKeyHeader + "-md5" +) + +func computeSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(sseCustomerKeyGetter); ok { + key = g.getSSECustomerKey() } - for _, h := range headers { - md5h := h + "-md5" - if key := r.HTTPRequest.Header.Get(h); key != "" { - // Base64-encode the value - b64v := base64.StdEncoding.EncodeToString([]byte(key)) - r.HTTPRequest.Header.Set(h, b64v) - - // Add MD5 if it wasn't computed - if r.HTTPRequest.Header.Get(md5h) == "" { - sum := md5.Sum([]byte(key)) - b64sum := base64.StdEncoding.EncodeToString(sum[:]) - r.HTTPRequest.Header.Set(md5h, b64sum) - } + computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest) +} + +const ( + copySrcSSEKeyHeader = "x-amz-copy-source-server-side-encryption-customer-key" + copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5" +) + +func computeCopySourceSSEKeyMD5(r *request.Request) { + var key string + if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok { + key = g.getCopySourceSSECustomerKey() + } + + computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest) +} + +func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) { + if len(key) == 0 { + // Backwards compatiablity where user just set the header value instead + // of using the API parameter, or setting the header value for an + // operation without the parameters modeled. + key = r.Header.Get(keyHeader) + if len(key) == 0 { + return } + + // In backwards compatiable, the header's value is not base64 encoded, + // and needs to be encoded and updated by the SDK's customizations. + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + r.Header.Set(keyHeader, b64Key) + } + + // Only update Key's MD5 if not already set. + if len(r.Header.Get(keyMD5Header)) == 0 { + sum := md5.Sum([]byte(key)) + keyMD5 := base64.StdEncoding.EncodeToString(sum[:]) + r.Header.Set(keyMD5Header, keyMD5) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go index 5a78fd33..f6a69aed 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -7,17 +7,22 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/internal/sdkio" ) func copyMultipartStatusOKUnmarhsalError(r *request.Request) { b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { - r.Error = awserr.New("SerializationError", "unable to read response body", err) + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, "unable to read response body", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) return } body := bytes.NewReader(b) r.HTTPResponse.Body = ioutil.NopCloser(body) - defer body.Seek(0, 0) + defer body.Seek(0, sdkio.SeekStart) if body.Len() == 0 { // If there is no body don't attempt to parse the body. @@ -26,7 +31,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) { unmarshalError(r) if err, ok := r.Error.(awserr.Error); ok && err != nil { - if err.Code() == "SerializationError" { + if err.Code() == request.ErrCodeSerialization { r.Error = nil return } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index ed91c587..5b63fac7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" ) type xmlErrorResponse struct { @@ -26,40 +27,62 @@ func unmarshalError(r *request.Request) { // Bucket exists in a different region, and request needs // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + msg := fmt.Sprintf( + "incorrect region, the bucket is not in '%s' region at endpoint '%s'", + aws.StringValue(r.Config.Region), + aws.StringValue(r.Config.Endpoint), + ) + if v := r.HTTPResponse.Header.Get("x-amz-bucket-region"); len(v) != 0 { + msg += fmt.Sprintf(", bucket is in '%s' region", v) + } r.Error = awserr.NewRequestFailure( - awserr.New("BucketRegionError", - fmt.Sprintf("incorrect region, the bucket is not in '%s' region", - aws.StringValue(r.Config.Region)), - nil), + awserr.New("BucketRegionError", msg, nil), r.HTTPResponse.StatusCode, r.RequestID, ) return } - var errCode, errMsg string - // Attempt to parse error from body if it is known - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - errCode = "SerializationError" - errMsg = "failed to decode S3 XML error response" - } else { - errCode = resp.Code - errMsg = resp.Message + var errResp xmlErrorResponse + err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body) + if err == io.EOF { + // Only capture the error if an unmarshal error occurs that is not EOF, + // because S3 might send an error without a error message which causes + // the XML unmarshal to fail with EOF. + err = nil + } + if err != nil { + r.Error = awserr.NewRequestFailure( + awserr.New(request.ErrCodeSerialization, + "failed to unmarshal error message", err), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return } // Fallback to status code converted to message if still no error code - if len(errCode) == 0 { + if len(errResp.Code) == 0 { statusText := http.StatusText(r.HTTPResponse.StatusCode) - errCode = strings.Replace(statusText, " ", "", -1) - errMsg = statusText + errResp.Code = strings.Replace(statusText, " ", "", -1) + errResp.Message = statusText } r.Error = awserr.NewRequestFailure( - awserr.New(errCode, errMsg, nil), + awserr.New(errResp.Code, errResp.Message, err), r.HTTPResponse.StatusCode, r.RequestID, ) } + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go index 5e16be4b..2596c694 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -1,139 +1,214 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package s3 import ( - "github.com/aws/aws-sdk-go/private/waiter" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" ) // WaitUntilBucketExists uses the Amazon S3 API operation // HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadBucket", - Delay: 5, + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 200, }, { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 301, }, { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 403, }, { - State: "retry", - Matcher: "status", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilBucketNotExists uses the Amazon S3 API operation // HeadBucket to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadBucket", - Delay: 5, + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilObjectExists uses the Amazon S3 API operation // HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadObject", - Delay: 5, + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 200, }, { - State: "retry", - Matcher: "status", - Argument: "", + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } // WaitUntilObjectNotExists uses the Amazon S3 API operation // HeadObject to wait for a condition to be met before returning. -// If the condition is not meet within the max attempt window an error will +// If the condition is not met within the max attempt window, an error will // be returned. func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { - waiterCfg := waiter.Config{ - Operation: "HeadObject", - Delay: 5, + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", MaxAttempts: 20, - Acceptors: []waiter.WaitAcceptor{ + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ { - State: "success", - Matcher: "status", - Argument: "", + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, Expected: 404, }, }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, } + w.ApplyOptions(opts...) - w := waiter.Waiter{ - Client: c, - Input: input, - Config: waiterCfg, - } - return w.Wait() + return w.WaitWithContext(ctx) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index ad42b4c9..9e610591 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -1,12 +1,14 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. -// Package sts provides a client for AWS Security Token Service. package sts import ( + "fmt" "time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/request" ) @@ -14,19 +16,18 @@ const opAssumeRole = "AssumeRole" // AssumeRoleRequest generates a "aws/request.Request" representing the // client's request for the AssumeRole operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See AssumeRole for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRole method directly -// instead. +// See AssumeRole for more information on using the AssumeRole +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleRequest method. // req, resp := client.AssumeRoleRequest(params) @@ -36,7 +37,7 @@ const opAssumeRole = "AssumeRole" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { op := &request.Operation{ Name: opAssumeRole, @@ -55,95 +56,101 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // AssumeRole API operation for AWS Security Token Service. // -// Returns a set of temporary security credentials (consisting of an access -// key ID, a secret access key, and a security token) that you can use to access -// AWS resources that you might not normally have access to. Typically, you -// use AssumeRole for cross-account access or federation. For a comparison of -// AssumeRole with the other APIs that produce temporary credentials, see Requesting -// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// Returns a set of temporary security credentials that you can use to access +// AWS resources that you might not normally have access to. These temporary +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use AssumeRole within your account or for cross-account +// access. For a comparison of AssumeRole with other API operations that produce +// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// Important: You cannot call AssumeRole by using AWS root account credentials; -// access is denied. You must use credentials for an IAM user or an IAM role -// to call AssumeRole. +// You cannot use AWS account root user credentials to call AssumeRole. You +// must use credentials for an IAM user or an IAM role to call AssumeRole. // // For cross-account access, imagine that you own multiple accounts and need // to access resources in each account. You could create long-term credentials // in each account to access those resources. However, managing all those credentials // and remembering which one can access which account can be time consuming. -// Instead, you can create one set of long-term credentials in one account and -// then use temporary security credentials to access all the other accounts +// Instead, you can create one set of long-term credentials in one account. +// Then use temporary security credentials to access all the other accounts // by assuming roles in those accounts. For more information about roles, see -// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html) // in the IAM User Guide. // -// For federation, you can, for example, grant single sign-on access to the -// AWS Management Console. If you already have an identity and authentication -// system in your corporate network, you don't have to recreate user identities -// in AWS in order to grant those user identities access to AWS. Instead, after -// a user has been authenticated, you call AssumeRole (and specify the role -// with the appropriate permissions) to get temporary security credentials for -// that user. With those temporary security credentials, you construct a sign-in -// URL that users can use to access the console. For more information, see Common -// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// By default, the temporary security credentials created by AssumeRole last +// for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) // in the IAM User Guide. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a -// maximum of 3600 seconds (1 hour). The default is 1 hour. -// // The temporary security credentials created by AssumeRole can be used to make -// API calls to any AWS service with the following exception: you cannot call -// the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// API calls to any AWS service with the following exception: You cannot call +// the AWS STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // -// To assume a role, your AWS account must be trusted by the role. The trust -// relationship is defined in the role's trust policy when the role is created. -// That trust policy states which accounts are allowed to delegate access to -// this account's role. +// To assume a role from a different account, your AWS account must be trusted +// by the role. The trust relationship is defined in the role's trust policy +// when the role is created. That trust policy states which accounts are allowed +// to delegate that access to users in the account. +// +// A user who wants to access a role in a different account must also have permissions +// that are delegated from the user account administrator. The administrator +// must attach a policy that allows the user to call AssumeRole for the ARN +// of the role in the other account. If the user is in the same account as the +// role, then you can do either of the following: // -// The user who wants to access the role must also have permissions delegated -// from the role's administrator. If the user is in a different account than -// the role, then the user's administrator must attach a policy that allows -// the user to call AssumeRole on the ARN of the role in the other account. -// If the user is in the same account as the role, then you can either attach -// a policy to the user (identical to the previous different account user), -// or you can add the user as a principal directly in the role's trust policy +// * Attach a policy to the user (identical to the previous user in a different +// account). +// +// * Add the user as a principal directly in the role's trust policy. +// +// In this case, the trust policy acts as an IAM resource-based policy. Users +// in the same account as the role do not need explicit permission to assume +// the role. For more information about trust policies and resource-based policies, +// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) +// in the IAM User Guide. // // Using MFA with AssumeRole // -// You can optionally include multi-factor authentication (MFA) information -// when you call AssumeRole. This is useful for cross-account scenarios in which -// you want to make sure that the user who is assuming the role has been authenticated -// using an AWS MFA device. In that scenario, the trust policy of the role being -// assumed includes a condition that tests for MFA authentication; if the caller -// does not include valid MFA information, the request to assume the role is -// denied. The condition in a trust policy that tests for MFA authentication -// might look like the following example. +// (Optional) You can include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios to ensure +// that the user that assumes the role has been authenticated with an AWS MFA +// device. In that scenario, the trust policy of the role being assumed includes +// a condition that tests for MFA authentication. If the caller does not include +// valid MFA information, the request to assume the role is denied. The condition +// in a trust policy that tests for MFA authentication might look like the following +// example. // // "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} // -// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) // in the IAM User Guide guide. // // To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode // parameters. The SerialNumber value identifies the user's hardware or virtual // MFA device. The TokenCode is the time-based one-time password (TOTP) that -// the MFA devices produces. +// the MFA device produces. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -166,33 +173,47 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { req, out := c.AssumeRoleRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithSAML operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AssumeRoleWithSAML for usage and error information. +// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithSAML method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithSAMLRequest method. // req, resp := client.AssumeRoleWithSAMLRequest(params) @@ -202,7 +223,7 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { op := &request.Operation{ Name: opAssumeRoleWithSAML, @@ -216,6 +237,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re output = &AssumeRoleWithSAMLOutput{} req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials return } @@ -225,45 +247,53 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // via a SAML authentication response. This operation provides a mechanism for // tying an enterprise identity store or directory to role-based AWS access // without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// with the other API operations that produce temporary credentials, see Requesting +// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this operation consist of // an access key ID, a secret access key, and a security token. Applications // can use these temporary security credentials to sign calls to AWS services. // -// The temporary security credentials are valid for the duration that you specified -// when calling AssumeRole, or until the time specified in the SAML authentication -// response's SessionNotOnOrAfter value, whichever is shorter. The duration -// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). -// The default is 1 hour. +// By default, the temporary security credentials created by AssumeRoleWithSAML +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. Your role session lasts for the +// duration that you specify, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. You can provide +// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session +// duration setting for the role. This setting can have a value from 1 hour +// to 12 hours. To learn how to view the maximum value for your role, see View +// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithSAML can be used // to make API calls to any AWS service with the following exception: you cannot -// call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by the intersection of both the access policy -// of the role that is being assumed, and the policy that you pass. This means -// that both policies must grant the permission for the action to be allowed. -// This gives you a way to further restrict the permissions for the resulting -// temporary security credentials. You cannot use the passed policy to grant -// permissions that are in excess of those allowed by the access policy of the -// role that is being assumed. For more information, see Permissions for AssumeRole, -// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Before your application can call AssumeRoleWithSAML, you must configure your // SAML identity provider (IdP) to issue the claims required by AWS. Additionally, // you must use AWS Identity and Access Management (IAM) to create a SAML provider -// entity in your AWS account that represents your identity provider, and create -// an IAM role that specifies this SAML provider in its trust policy. +// entity in your AWS account that represents your identity provider. You must +// also create an IAM role that specifies this SAML provider in its trust policy. // // Calling AssumeRoleWithSAML does not require the use of AWS security credentials. // The identity of the caller is validated by using keys in the metadata document @@ -277,16 +307,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // // For more information, see the following resources: // -// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) // in the IAM User Guide. // -// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) // in the IAM User Guide. // -// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) // in the IAM User Guide. // -// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -325,33 +355,47 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { req, out := c.AssumeRoleWithSAMLRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // client's request for the AssumeRoleWithWebIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See AssumeRoleWithWebIdentity for usage and error information. +// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the AssumeRoleWithWebIdentity method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the AssumeRoleWithWebIdentityRequest method. // req, resp := client.AssumeRoleWithWebIdentityRequest(params) @@ -361,7 +405,7 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { op := &request.Operation{ Name: opAssumeRoleWithWebIdentity, @@ -375,62 +419,72 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI output = &AssumeRoleWithWebIdentityOutput{} req = c.newRequest(op, input, output) + req.Config.Credentials = credentials.AnonymousCredentials return } // AssumeRoleWithWebIdentity API operation for AWS Security Token Service. // // Returns a set of temporary security credentials for users who have been authenticated -// in a mobile or web application with a web identity provider, such as Amazon -// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible -// identity provider. +// in a mobile or web application with a web identity provider. Example providers +// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID +// Connect-compatible identity provider. // // For mobile applications, we recommend that you use Amazon Cognito. You can -// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) -// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely -// identify a user and supply the user with a consistent identity throughout -// the lifetime of an application. -// -// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) -// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview -// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/) +// to uniquely identify a user. You can also supply the user with a consistent +// identity throughout the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) // in the AWS SDK for iOS Developer Guide. // // Calling AssumeRoleWithWebIdentity does not require the use of AWS security // credentials. Therefore, you can distribute an application (for example, on // mobile devices) that requests temporary security credentials without including -// long-term AWS credentials in the application, and without deploying server-based -// proxy services that use long-term AWS credentials. Instead, the identity -// of the caller is validated by using a token from the web identity provider. -// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce -// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// long-term AWS credentials in the application. You also don't need to deploy +// server-based proxy services that use long-term AWS credentials. Instead, +// the identity of the caller is validated by using a token from the web identity +// provider. For a comparison of AssumeRoleWithWebIdentity with the other API +// operations that produce temporary credentials, see Requesting Temporary Security +// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // // The temporary security credentials returned by this API consist of an access // key ID, a secret access key, and a security token. Applications can use these -// temporary security credentials to sign calls to AWS service APIs. -// -// The credentials are valid for the duration that you specified when calling -// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to -// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// temporary security credentials to sign calls to AWS service API operations. +// +// By default, the temporary security credentials created by AssumeRoleWithWebIdentity +// last for one hour. However, you can use the optional DurationSeconds parameter +// to specify the duration of your session. You can provide a value from 900 +// seconds (15 minutes) up to the maximum session duration setting for the role. +// This setting can have a value from 1 hour to 12 hours. To learn how to view +// the maximum value for your role, see View the Maximum Session Duration Setting +// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) +// in the IAM User Guide. The maximum session duration limit applies when you +// use the AssumeRole* API operations or the assume-role* CLI commands. However +// the limit does not apply when you use those operations to create a console +// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) +// in the IAM User Guide. // // The temporary security credentials created by AssumeRoleWithWebIdentity can // be used to make API calls to any AWS service with the following exception: -// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. -// -// Optionally, you can pass an IAM access policy to this operation. If you choose -// not to pass a policy, the temporary security credentials that are returned -// by the operation have the permissions that are defined in the access policy -// of the role that is being assumed. If you pass a policy to this operation, -// the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to further -// restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// you cannot call the STS GetFederationToken or GetSessionToken API operations. +// +// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. Passing policies +// to this operation returns new temporary credentials. The resulting session's +// permissions are the intersection of the role's identity-based policy and +// the session policies. You can use the role's temporary credentials in subsequent +// AWS API calls to access resources in the account that owns the role. You +// cannot use session policies to grant more permissions than those allowed +// by the identity-based policy of the role that is being assumed. For more +// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // // Before your application can call AssumeRoleWithWebIdentity, you must have @@ -449,23 +503,21 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // For more information about how to use web identity federation and the AssumeRoleWithWebIdentity // API, see the following resources: // -// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) -// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// +// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). // -// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). -// This interactive website lets you walk through the process of authenticating -// via Login with Amazon, Facebook, or Google, getting temporary security -// credentials, and then using those credentials to make a request to AWS. +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// Walk through the process of authenticating through Login with Amazon, +// Facebook, or Google, getting temporary security credentials, and then +// using those credentials to make a request to AWS. // +// * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and +// AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/). +// These toolkits contain sample apps that show how to invoke the identity +// providers, and then how to use the information from these providers to +// get and use temporary security credentials. // -// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android -// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample -// apps that show how to invoke the identity providers, and then how to use -// the information from these providers to get and use temporary security -// credentials. -// -// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications). // This article discusses web identity federation and shows an example of // how to use web identity federation to get access to content in Amazon // S3. @@ -513,33 +565,47 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { req, out := c.AssumeRoleWithWebIdentityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // client's request for the DecodeAuthorizationMessage operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See DecodeAuthorizationMessage for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the DecodeAuthorizationMessage method directly -// instead. +// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the DecodeAuthorizationMessageRequest method. // req, resp := client.DecodeAuthorizationMessageRequest(params) @@ -549,7 +615,7 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { op := &request.Operation{ Name: opDecodeAuthorizationMessage, @@ -571,17 +637,17 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // Decodes additional information about the authorization status of a request // from an encoded message returned in response to an AWS request. // -// For example, if a user is not authorized to perform an action that he or -// she has requested, the request returns a Client.UnauthorizedOperation response -// (an HTTP 403 response). Some AWS actions additionally return an encoded message -// that can provide details about this authorization failure. +// For example, if a user is not authorized to perform an operation that he +// or she has requested, the request returns a Client.UnauthorizedOperation +// response (an HTTP 403 response). Some AWS operations additionally return +// an encoded message that can provide details about this authorization failure. // -// Only certain AWS actions return an encoded authorization message. The documentation -// for an individual action indicates whether that action returns an encoded -// message in addition to returning an HTTP code. +// Only certain AWS operations return an encoded authorization message. The +// documentation for an individual operation indicates whether that operation +// returns an encoded message in addition to returning an HTTP code. // // The message is encoded because the details of the authorization status can -// constitute privileged information that the user who requested the action +// constitute privileged information that the user who requested the operation // should not see. To decode an authorization status message, a user must be // granted permissions via an IAM policy to request the DecodeAuthorizationMessage // (sts:DecodeAuthorizationMessage) action. @@ -590,7 +656,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // // * Whether the request was denied due to an explicit deny or due to the // absence of an explicit allow. For more information, see Determining Whether -// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) // in the IAM User Guide. // // * The principal who made the request. @@ -614,30 +680,44 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag // invalid. This can happen if the token contains invalid characters, such as // linebreaks. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { req, out := c.DecodeAuthorizationMessageRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetCallerIdentity = "GetCallerIdentity" // GetCallerIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCallerIdentity operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetCallerIdentity for usage and error information. +// See GetCallerIdentity for more information on using the GetCallerIdentity +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetCallerIdentity method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetCallerIdentityRequest method. // req, resp := client.GetCallerIdentityRequest(params) @@ -647,7 +727,7 @@ const opGetCallerIdentity = "GetCallerIdentity" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { op := &request.Operation{ Name: opGetCallerIdentity, @@ -675,30 +755,44 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ // // See the AWS API reference guide for AWS Security Token Service's // API operation GetCallerIdentity for usage and error information. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { req, out := c.GetCallerIdentityRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetFederationToken = "GetFederationToken" // GetFederationTokenRequest generates a "aws/request.Request" representing the // client's request for the GetFederationToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// See GetFederationToken for usage and error information. +// See GetFederationToken for more information on using the GetFederationToken +// API call, and error handling. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetFederationToken method directly -// instead. +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetFederationTokenRequest method. // req, resp := client.GetFederationTokenRequest(params) @@ -708,7 +802,7 @@ const opGetFederationToken = "GetFederationToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { op := &request.Operation{ Name: opGetFederationToken, @@ -730,81 +824,65 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // Returns a set of temporary security credentials (consisting of an access // key ID, a secret access key, and a security token) for a federated user. // A typical use is in a proxy application that gets temporary security credentials -// on behalf of distributed applications inside a corporate network. Because -// you must call the GetFederationToken action using the long-term security -// credentials of an IAM user, this call is appropriate in contexts where those +// on behalf of distributed applications inside a corporate network. You must +// call the GetFederationToken operation using the long-term security credentials +// of an IAM user. As a result, this call is appropriate in contexts where those // credentials can be safely stored, usually in a server-based application. -// For a comparison of GetFederationToken with the other APIs that produce temporary -// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// For a comparison of GetFederationToken with the other API operations that +// produce temporary credentials, see Requesting Temporary Security Credentials +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// If you are creating a mobile-based or browser-based app that can authenticate +// You can create a mobile-based or browser-based app that can authenticate // users using a web identity provider like Login with Amazon, Facebook, Google, -// or an OpenID Connect-compatible identity provider, we recommend that you -// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// or an OpenID Connect-compatible identity provider. In this case, we recommend +// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. // For more information, see Federation Through a Web-based Identity Provider -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). -// -// The GetFederationToken action must be called by using the long-term AWS security -// credentials of an IAM user. You can also call GetFederationToken using the -// security credentials of an AWS root account, but we do not recommended it. -// Instead, we recommend that you create an IAM user for the purpose of the -// proxy application and then attach a policy to the IAM user that limits federated -// users to only the actions and resources that they need access to. For more -// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// You can also call GetFederationToken using the security credentials of an +// AWS account root user, but we do not recommend it. Instead, we recommend +// that you create an IAM user for the purpose of the proxy application. Then +// attach a policy to the IAM user that limits federated users to only the actions +// and resources that they need to access. For more information, see IAM Best +// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) // in the IAM User Guide. // -// The temporary security credentials that are obtained by using the long-term -// credentials of an IAM user are valid for the specified duration, from 900 -// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default -// is 43200 seconds (12 hours). Temporary credentials that are obtained by using -// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// The temporary credentials are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default +// is 43,200 seconds (12 hours). Temporary credentials that are obtained by +// using AWS account root user credentials have a maximum duration of 3,600 +// seconds (1 hour). // // The temporary security credentials created by GetFederationToken can be used // to make API calls to any AWS service with the following exceptions: // -// * You cannot use these credentials to call any IAM APIs. +// * You cannot use these credentials to call any IAM API operations. // -// * You cannot call any STS APIs except GetCallerIdentity. +// * You cannot call any STS API operations except GetCallerIdentity. // // Permissions // -// The permissions for the temporary security credentials returned by GetFederationToken -// are determined by a combination of the following: -// -// * The policy or policies that are attached to the IAM user whose credentials -// are used to call GetFederationToken. -// -// * The policy that is passed as a parameter in the call. -// -// The passed policy is attached to the temporary security credentials that -// result from the GetFederationToken API call--that is, to the federated user. -// When the federated user makes an AWS request, AWS evaluates the policy attached -// to the federated user in combination with the policy or policies attached -// to the IAM user whose credentials were used to call GetFederationToken. AWS -// allows the federated user's request only when both the federated user and -// the IAM user are explicitly allowed to perform the requested action. The -// passed policy cannot grant more permissions than those that are defined in -// the IAM user policy. -// -// A typical use case is that the permissions of the IAM user whose credentials -// are used to call GetFederationToken are designed to allow access to all the -// actions and resources that any federated user will need. Then, for individual -// users, you pass a policy to the operation that scopes down the permissions -// to a level that's appropriate to that individual user, using a policy that -// allows only a subset of permissions that are granted to the IAM user. -// -// If you do not pass a policy, the resulting temporary security credentials -// have no effective permissions. The only exception is when the temporary security -// credentials are used to access a resource that has a resource-based policy -// that specifically allows the federated user to access the resource. -// -// For more information about how permissions work, see Permissions for GetFederationToken -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). -// For information about using GetFederationToken to create temporary security -// credentials, see GetFederationToken—Federation Through a Custom Identity -// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// to this operation. You can pass a single JSON policy document to use as an +// inline session policy. You can also specify up to 10 managed policies to +// use as managed session policies. The plain text that you use for both inline +// and managed session policies shouldn't exceed 2048 characters. +// +// Though the session policy parameters are optional, if you do not pass a policy, +// then the resulting federated user session has no permissions. The only exception +// is when the credentials are used to access a resource that has a resource-based +// policy that specifically references the federated user session in the Principal +// element of the policy. When you pass session policies, the session permissions +// are the intersection of the IAM user policies and the session policies that +// you pass. This gives you a way to further restrict the permissions for a +// federated user. You cannot use session policies to grant more permissions +// than those that are defined in the permissions policy of the IAM user. For +// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) +// in the IAM User Guide. For information about using GetFederationToken to +// create temporary security credentials, see GetFederationToken—Federation +// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about @@ -827,33 +905,47 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { req, out := c.GetFederationTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } const opGetSessionToken = "GetSessionToken" // GetSessionTokenRequest generates a "aws/request.Request" representing the // client's request for the GetSessionToken operation. The "output" return -// value can be used to capture response data after the request's "Send" method -// is called. +// value will be populated with the request's response once the request completes +// successfully. // -// See GetSessionToken for usage and error information. +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. // -// Creating a request object using this method should be used when you want to inject -// custom logic into the request's lifecycle using a custom handler, or if you want to -// access properties on the request object before or after sending the request. If -// you just want the service response, call the GetSessionToken method directly -// instead. +// See GetSessionToken for more information on using the GetSessionToken +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. // -// Note: You must call the "Send" method on the returned request object in order -// to execute the request. // // // Example sending a request using the GetSessionTokenRequest method. // req, resp := client.GetSessionTokenRequest(params) @@ -863,7 +955,7 @@ const opGetSessionToken = "GetSessionToken" // fmt.Println(resp) // } // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { op := &request.Operation{ Name: opGetSessionToken, @@ -885,48 +977,47 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // Returns a set of temporary credentials for an AWS account or IAM user. The // credentials consist of an access key ID, a secret access key, and a security // token. Typically, you use GetSessionToken if you want to use MFA to protect -// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled -// IAM users would need to call GetSessionToken and submit an MFA code that -// is associated with their MFA device. Using the temporary security credentials -// that are returned from the call, IAM users can then make programmatic calls -// to APIs that require MFA authentication. If you do not supply a correct MFA -// code, then the API returns an access denied error. For a comparison of GetSessionToken -// with the other APIs that produce temporary credentials, see Requesting Temporary -// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) -// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances. +// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA +// code that is associated with their MFA device. Using the temporary security +// credentials that are returned from the call, IAM users can then make programmatic +// calls to API operations that require MFA authentication. If you do not supply +// a correct MFA code, then the API returns an access denied error. For a comparison +// of GetSessionToken with the other API operations that produce temporary credentials, +// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) // in the IAM User Guide. // -// The GetSessionToken action must be called by using the long-term AWS security -// credentials of the AWS account or an IAM user. Credentials that are created -// by IAM users are valid for the duration that you specify, from 900 seconds -// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default -// of 43200 seconds (12 hours); credentials that are created by using account -// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 -// seconds (1 hour), with a default of 1 hour. +// The GetSessionToken operation must be called by using the long-term AWS security +// credentials of the AWS account root user or an IAM user. Credentials that +// are created by IAM users are valid for the duration that you specify. This +// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600 +// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials +// based on account credentials can range from 900 seconds (15 minutes) up to +// 3,600 seconds (1 hour), with a default of 1 hour. // // The temporary security credentials created by GetSessionToken can be used // to make API calls to any AWS service with the following exceptions: // -// * You cannot call any IAM APIs unless MFA authentication information is -// included in the request. +// * You cannot call any IAM API operations unless MFA authentication information +// is included in the request. // -// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// * You cannot call any STS API except AssumeRole or GetCallerIdentity. // -// We recommend that you do not call GetSessionToken with root account credentials. -// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// We recommend that you do not call GetSessionToken with AWS account root user +// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) // by creating one or more IAM users, giving them the necessary permissions, // and using IAM users for everyday interaction with AWS. // -// The permissions associated with the temporary security credentials returned -// by GetSessionToken are based on the permissions associated with account or -// IAM user whose credentials are used to call the action. If GetSessionToken -// is called using root account credentials, the temporary credentials have -// root account permissions. Similarly, if GetSessionToken is called using the -// credentials of an IAM user, the temporary credentials have the same permissions -// as the IAM user. +// The credentials that are returned by GetSessionToken are based on permissions +// associated with the user whose credentials were used to call the operation. +// If GetSessionToken is called using AWS account root user credentials, the +// temporary credentials have root user permissions. Similarly, if GetSessionToken +// is called using the credentials of an IAM user, the temporary credentials +// have the same permissions as the IAM user. // // For more information about using GetSessionToken to create temporary credentials, -// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) // in the IAM User Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions @@ -941,74 +1032,122 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request. // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating -// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. // -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { req, out := c.GetSessionTokenRequest(input) - err := req.Send() - return out, err + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest type AssumeRoleInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. - // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // A unique identifier that is used by third parties when assuming roles in - // their customers' accounts. For each role that the third party can assume, - // they should instruct their customers to ensure the role's trust policy checks - // for the external ID that the third party generated. Each time the third party - // assumes the role, they should pass the customer's external ID. The external - // ID is useful in order to help third parties bind a role to the customer who - // created it. For more information about the external ID, see How to Use an - // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // A unique identifier that might be required when you assume a role in another + // account. If the administrator of the account to which the role belongs provided + // you with an external ID, then provide that value in the ExternalId parameter. + // This value can be any string, such as a passphrase or account number. A cross-account + // role is usually set up to trust everyone in an account. Therefore, the administrator + // of the trusting account might send an external ID to the administrator of + // the trusted account. That way, only someone with the ID can assume the role, + // rather than everyone in the account. For more information about the external + // ID, see How to Use an External ID When Granting Access to Your AWS Resources + // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) // in the IAM User Guide. // - // The regex used to validated this parameter is a string of characters consisting + // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@:\/- + // also include underscores or any of the following characters: =,.@:/- ExternalId *string `min:"2" type:"string"` - // An IAM policy in JSON format. - // - // This parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both (the intersection of) the access policy of the role that - // is being assumed, and the policy that you pass. This gives you a way to further - // restrict the permissions for the resulting temporary security credentials. - // You cannot use the passed policy to grant permissions that are in excess - // of those allowed by the access policy of the role that is being assumed. - // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, - // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The Amazon Resource Name (ARN) of the role to assume. // // RoleArn is a required field @@ -1021,8 +1160,8 @@ type AssumeRoleInput struct { // scenarios, the role session name is visible to, and can be logged by the // account that owns the role. The role session name is also used in the ARN // of the assumed role principal. This means that subsequent cross-account API - // requests using the temporary security credentials will expose the role session - // name to the external account in their CloudTrail logs. + // requests that use the temporary security credentials will expose the role + // session name to the external account in their AWS CloudTrail logs. // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can @@ -1092,6 +1231,16 @@ func (s *AssumeRoleInput) Validate() error { if s.TokenCode != nil && len(*s.TokenCode) < 6 { invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1117,6 +1266,12 @@ func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput { + s.PolicyArns = v + return s +} + // SetRoleArn sets the RoleArn field's value. func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { s.RoleArn = &v @@ -1143,7 +1298,6 @@ func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { // Contains the response to a successful AssumeRole request, including temporary // AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse type AssumeRoleOutput struct { _ struct{} `type:"structure"` @@ -1157,10 +1311,8 @@ type AssumeRoleOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -1197,51 +1349,83 @@ func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest type AssumeRoleWithSAMLInput struct { _ struct{} `type:"structure"` - // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. An expiration can also be specified in the SAML authentication - // response's SessionNotOnOrAfter value. The actual expiration time is whichever - // value is shorter. - // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Enabling SAML 2.0 Federated - // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // The duration, in seconds, of the role session. Your role session lasts for + // the duration that you specify for the DurationSeconds parameter, or until + // the time specified in the SAML authentication response's SessionNotOnOrAfter + // value, whichever is shorter. You can provide a DurationSeconds value from + // 900 seconds (15 minutes) up to the maximum session duration setting for the + // role. This setting can have a value from 1 hour to 12 hours. If you specify + // a value higher than this setting, the operation fails. For example, if you + // specify a session duration of 12 hours, but your administrator set the maximum + // session duration to 6 hours, your operation fails. To learn how to view the + // maximum value for your role, see View the Maximum Session Duration Setting + // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format. - // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // An IAM policy in JSON format that you want to use as an inline session policy. + // + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes // the IdP. // @@ -1255,8 +1439,8 @@ type AssumeRoleWithSAMLInput struct { // The base-64 encoded SAML authentication response provided by the IdP. // - // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) - // in the Using IAM guide. + // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the IAM User Guide. // // SAMLAssertion is a required field SAMLAssertion *string `min:"4" type:"string" required:"true"` @@ -1299,6 +1483,16 @@ func (s *AssumeRoleWithSAMLInput) Validate() error { if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1318,6 +1512,12 @@ func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput { + s.PolicyArns = v + return s +} + // SetPrincipalArn sets the PrincipalArn field's value. func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { s.PrincipalArn = &v @@ -1338,7 +1538,6 @@ func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAML // Contains the response to a successful AssumeRoleWithSAML request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse type AssumeRoleWithSAMLOutput struct { _ struct{} `type:"structure"` @@ -1353,10 +1552,8 @@ type AssumeRoleWithSAMLOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // The value of the Issuer element of the SAML assertion. @@ -1450,48 +1647,80 @@ func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLO return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest type AssumeRoleWithWebIdentityInput struct { _ struct{} `type:"structure"` // The duration, in seconds, of the role session. The value can range from 900 - // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set - // to 3600 seconds. - // - // This is separate from the duration of a console session that you might request - // using the returned credentials. The request to the federation endpoint for - // a console sign-in token takes a SessionDuration parameter that specifies - // the maximum length of the console session, separately from the DurationSeconds - // parameter on this API. For more information, see Creating a URL that Enables - // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // seconds (15 minutes) up to the maximum session duration setting for the role. + // This setting can have a value from 1 hour to 12 hours. If you specify a value + // higher than this setting, the operation fails. For example, if you specify + // a session duration of 12 hours, but your administrator set the maximum session + // duration to 6 hours, your operation fails. To learn how to view the maximum + // value for your role, see View the Maximum Session Duration Setting for a + // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session) + // in the IAM User Guide. + // + // By default, the value is set to 3600 seconds. + // + // The DurationSeconds parameter is separate from the duration of a console + // session that you might request using the returned credentials. The request + // to the federation endpoint for a console sign-in token takes a SessionDuration + // parameter that specifies the maximum length of the console session. For more + // information, see Creating a URL that Enables Federated Users to Access the + // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` - // An IAM policy in JSON format. + // An IAM policy in JSON format that you want to use as an inline session policy. // - // The policy parameter is optional. If you pass a policy, the temporary security - // credentials that are returned by the operation have the permissions that - // are allowed by both the access policy of the role that is being assumed, - // and the policy that you pass. This gives you a way to further restrict the - // permissions for the resulting temporary security credentials. You cannot - // use the passed policy to grant permissions that are in excess of those allowed - // by the access policy of the role that is being assumed. For more information, - // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // This parameter is optional. Passing policies to this operation returns new + // temporary credentials. The resulting session's permissions are the intersection + // of the role's identity-based policy and the session policies. You can use + // the role's temporary credentials in subsequent AWS API calls to access resources + // in the account that owns the role. You cannot use session policies to grant + // more permissions than those allowed by the identity-based policy of the role + // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) // in the IAM User Guide. // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), // and carriage return (\u000D) characters. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as managed session policies. The policies must exist in the same account + // as the role. + // + // This parameter is optional. You can provide up to 10 managed policy ARNs. + // However, the plain text that you use for both inline and managed session + // policies shouldn't exceed 2048 characters. For more information about ARNs, + // see Amazon Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + // + // Passing policies to this operation returns new temporary credentials. The + // resulting session's permissions are the intersection of the role's identity-based + // policy and the session policies. You can use the role's temporary credentials + // in subsequent AWS API calls to access resources in the account that owns + // the role. You cannot use session policies to grant more permissions than + // those allowed by the identity-based policy of the role that is being assumed. + // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + PolicyArns []*PolicyDescriptorType `type:"list"` + // The fully qualified host component of the domain name of the identity provider. // // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com @@ -1568,6 +1797,16 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error { if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -1587,6 +1826,12 @@ func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebI return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput { + s.PolicyArns = v + return s +} + // SetProviderId sets the ProviderId field's value. func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { s.ProviderId = &v @@ -1613,7 +1858,6 @@ func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRo // Contains the response to a successful AssumeRoleWithWebIdentity request, // including temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse type AssumeRoleWithWebIdentityOutput struct { _ struct{} `type:"structure"` @@ -1632,10 +1876,8 @@ type AssumeRoleWithWebIdentityOutput struct { // The temporary security credentials, which include an access key ID, a secret // access key, and a security token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // A percentage value that indicates the size of the policy in packed form. @@ -1644,7 +1886,7 @@ type AssumeRoleWithWebIdentityOutput struct { PackedPolicySize *int64 `type:"integer"` // The issuing authority of the web identity token presented. For OpenID Connect - // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // ID tokens, this contains the value of the iss field. For OAuth 2.0 access // tokens, this contains the value of the ProviderId parameter that was passed // in the AssumeRoleWithWebIdentity request. Provider *string `type:"string"` @@ -1706,13 +1948,12 @@ func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v strin // The identifiers for the temporary security credentials that the operation // returns. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser type AssumedRoleUser struct { _ struct{} `type:"structure"` // The ARN of the temporary security credentials that are returned from the // AssumeRole action. For more information about ARNs and how to use them in - // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in Using IAM. // // Arn is a required field @@ -1749,7 +1990,6 @@ func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { } // AWS credentials for API authentication. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials type Credentials struct { _ struct{} `type:"structure"` @@ -1761,7 +2001,7 @@ type Credentials struct { // The date on which the current credentials expire. // // Expiration is a required field - Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + Expiration *time.Time `type:"timestamp" required:"true"` // The secret access key that can be used to sign requests. // @@ -1808,7 +2048,6 @@ func (s *Credentials) SetSessionToken(v string) *Credentials { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest type DecodeAuthorizationMessageInput struct { _ struct{} `type:"structure"` @@ -1853,7 +2092,6 @@ func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAut // A document that contains additional information about the authorization status // of a request from an encoded message that is returned in response to an AWS // request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse type DecodeAuthorizationMessageOutput struct { _ struct{} `type:"structure"` @@ -1878,13 +2116,12 @@ func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAu } // Identifiers for the federated user that is associated with the credentials. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser type FederatedUser struct { _ struct{} `type:"structure"` // The ARN that specifies the federated user that is associated with the credentials. // For more information about ARNs and how to use them in policies, see IAM - // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) // in Using IAM. // // Arn is a required field @@ -1919,7 +2156,6 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest type GetCallerIdentityInput struct { _ struct{} `type:"structure"` } @@ -1936,7 +2172,6 @@ func (s GetCallerIdentityInput) GoString() string { // Contains the response to a successful GetCallerIdentity request, including // information about the entity making the request. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse type GetCallerIdentityOutput struct { _ struct{} `type:"structure"` @@ -1948,8 +2183,8 @@ type GetCallerIdentityOutput struct { Arn *string `min:"20" type:"string"` // The unique identifier of the calling entity. The exact value depends on the - // type of entity making the call. The values returned are those listed in the - // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // type of entity that is making the call. The values returned are those listed + // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) // found on the Policy Variables reference page in the IAM User Guide. UserId *string `type:"string"` } @@ -1982,17 +2217,15 @@ func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest type GetFederationTokenInput struct { _ struct{} `type:"structure"` // The duration, in seconds, that the session should last. Acceptable durations - // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds - // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained - // using AWS account (root) credentials are restricted to a maximum of 3600 + // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds + // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained + // using AWS account root user credentials are restricted to a maximum of 3,600 // seconds (one hour). If the specified duration is longer than one hour, the - // session obtained by using AWS account (root) credentials defaults to one - // hour. + // session obtained by using root user credentials defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The name of the federated user. The name is used as an identifier for the @@ -2007,36 +2240,73 @@ type GetFederationTokenInput struct { // Name is a required field Name *string `min:"2" type:"string" required:"true"` - // An IAM policy in JSON format that is passed with the GetFederationToken call - // and evaluated along with the policy or policies that are attached to the - // IAM user whose credentials are used to call GetFederationToken. The passed - // policy is used to scope down the permissions that are available to the IAM - // user, by allowing only a subset of the permissions that are granted to the - // IAM user. The passed policy cannot grant more permissions than those granted - // to the IAM user. The final permissions for the federated user are the most - // restrictive set based on the intersection of the passed policy and the IAM - // user policy. - // - // If you do not pass a policy, the resulting temporary security credentials - // have no effective permissions. The only exception is when the temporary security - // credentials are used to access a resource that has a resource-based policy - // that specifically allows the federated user to access the resource. - // - // The format for this parameter, as described by its regex pattern, is a string - // of characters up to 2048 characters in length. The characters can be any - // ASCII character from the space character to the end of the valid character - // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), - // and carriage return (\u000D) characters. + // An IAM policy in JSON format that you want to use as an inline session policy. // - // The policy plain text must be 2048 bytes or shorter. However, an internal - // conversion compresses it into a packed binary format with a separate limit. - // The PackedPolicySize response element indicates by percentage how close to - // the upper size limit the policy is, with 100% equaling the maximum allowed - // size. + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. // - // For more information about how permissions work, see Permissions for GetFederationToken - // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The plain text that you use for both inline and managed session policies + // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII + // character from the space character to the end of the valid character list + // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. Policy *string `min:"1" type:"string"` + + // The Amazon Resource Names (ARNs) of the IAM managed policies that you want + // to use as a managed session policy. The policies must exist in the same account + // as the IAM user that is requesting federated access. + // + // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // to this operation. You can pass a single JSON policy document to use as an + // inline session policy. You can also specify up to 10 managed policies to + // use as managed session policies. The plain text that you use for both inline + // and managed session policies shouldn't exceed 2048 characters. You can provide + // up to 10 managed policy ARNs. For more information about ARNs, see Amazon + // Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // This parameter is optional. However, if you do not pass any session policies, + // then the resulting federated user session has no permissions. The only exception + // is when the credentials are used to access a resource that has a resource-based + // policy that specifically references the federated user session in the Principal + // element of the policy. + // + // When you pass session policies, the session permissions are the intersection + // of the IAM user policies and the session policies that you pass. This gives + // you a way to further restrict the permissions for a federated user. You cannot + // use session policies to grant more permissions than those that are defined + // in the permissions policy of the IAM user. For more information, see Session + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session) + // in the IAM User Guide. + // + // The characters in this parameter count towards the 2048 character session + // policy guideline. However, an AWS conversion compresses the session policies + // into a packed binary format that has a separate limit. This is the enforced + // limit. The PackedPolicySize response element indicates by percentage how + // close the policy is to the upper size limit. + PolicyArns []*PolicyDescriptorType `type:"list"` } // String returns the string representation @@ -2064,6 +2334,16 @@ func (s *GetFederationTokenInput) Validate() error { if s.Policy != nil && len(*s.Policy) < 1 { invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) } + if s.PolicyArns != nil { + for i, v := range s.PolicyArns { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams)) + } + } + } if invalidParams.Len() > 0 { return invalidParams @@ -2089,19 +2369,22 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { return s } +// SetPolicyArns sets the PolicyArns field's value. +func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput { + s.PolicyArns = v + return s +} + // Contains the response to a successful GetFederationToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse type GetFederationTokenOutput struct { _ struct{} `type:"structure"` // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` // Identifiers for the federated user associated with the credentials (such @@ -2144,16 +2427,15 @@ func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTo return s } -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest type GetSessionTokenInput struct { _ struct{} `type:"structure"` // The duration, in seconds, that the credentials should remain valid. Acceptable - // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 - // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions - // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). - // If the duration is longer than one hour, the session for AWS account owners - // defaults to one hour. + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600 + // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3,600 seconds (one + // hour). If the duration is longer than one hour, the session for AWS account + // owners defaults to one hour. DurationSeconds *int64 `min:"900" type:"integer"` // The identification number of the MFA device that is associated with the IAM @@ -2166,14 +2448,14 @@ type GetSessionTokenInput struct { // // The regex used to validate this parameter is a string of characters consisting // of upper- and lower-case alphanumeric characters with no spaces. You can - // also include underscores or any of the following characters: =,.@- + // also include underscores or any of the following characters: =,.@:/- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if MFA is required. If any policy requires // the IAM user to submit an MFA code, specify this value. If MFA authentication - // is required, and the user does not provide a code when requesting a set of - // temporary security credentials, the user will receive an "access denied" - // response when requesting resources that require MFA authentication. + // is required, the user must provide a code when requesting a set of temporary + // security credentials. A user who fails to provide the code receives an "access + // denied" response when requesting resources that require MFA authentication. // // The format for this parameter, as described by its regex pattern, is a sequence // of six numeric digits. @@ -2229,17 +2511,14 @@ func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { // Contains the response to a successful GetSessionToken request, including // temporary AWS credentials that can be used to make AWS requests. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse type GetSessionTokenOutput struct { _ struct{} `type:"structure"` // The temporary security credentials, which include an access key ID, a secret // access key, and a security (or session) token. // - // Note: The size of the security token that STS APIs return is not fixed. We - // strongly recommend that you make no assumptions about the maximum size. As - // of this writing, the typical size is less than 4096 bytes, but that can vary. - // Also, future updates to AWS might require larger sizes. + // The size of the security token that STS API operations return is not fixed. + // We strongly recommend that you make no assumptions about the maximum size. Credentials *Credentials `type:"structure"` } @@ -2258,3 +2537,44 @@ func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenO s.Credentials = v return s } + +// A reference to the IAM managed policy that is passed as a session policy +// for a role session or a federated user session. +type PolicyDescriptorType struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session + // policy for the role. For more information about ARNs, see Amazon Resource + // Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `locationName:"arn" min:"20" type:"string"` +} + +// String returns the string representation +func (s PolicyDescriptorType) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PolicyDescriptorType) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PolicyDescriptorType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"} + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetArn sets the Arn field's value. +func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType { + s.Arn = &v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 00000000..fcb720dc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,108 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// By default, AWS Security Token Service (STS) is available as a global service, +// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com. +// Global requests map to the US East (N. Virginia) region. AWS recommends using +// Regional AWS STS endpoints instead of the global endpoint to reduce latency, +// build in redundancy, and increase session token validity. For more information, +// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Most AWS Regions are enabled for operations in all AWS services by default. +// Those Regions are automatically activated for use with AWS STS. Some Regions, +// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more +// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html) +// in the AWS General Reference. When you enable these AWS Regions, they are +// automatically activated for use with AWS STS. You cannot activate the STS +// endpoint for a Region that is disabled. Tokens that are valid in all AWS +// Regions are longer than tokens that are valid in Regions that are enabled +// by default. Changing this setting might affect existing systems where you +// temporarily store tokens. For more information, see Managing Global Endpoint +// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens) +// in the IAM User Guide. +// +// After you activate a Region for use with AWS STS, you can direct AWS STS +// API calls to that Region. AWS STS recommends that you provide both the Region +// and endpoint when you make calls to a Regional endpoint. You can provide +// the Region alone for manually enabled Regions, such as Asia Pacific (Hong +// Kong). In this case, the calls are directed to the STS Regional endpoint. +// However, if you provide the Region alone for Regions enabled by default, +// the calls are directed to the global endpoint of https://sts.amazonaws.com. +// +// To view the list of AWS STS endpoints and whether they are active by default, +// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code) +// in the IAM User Guide. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. +// +// If you activate AWS STS endpoints in Regions other than the default global +// endpoint, then you must also turn on CloudTrail logging in those Regions. +// This is necessary to record any AWS STS API calls that are made in those +// Regions. For more information, see Turning On CloudTrail in Additional Regions +// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html) +// in the AWS CloudTrail User Guide. +// +// AWS Security Token Service (STS) is a global service with a single endpoint +// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls +// to a global service. However, because this endpoint is physically located +// in the US East (N. Virginia) Region, your logs list us-east-1 as the event +// Region. CloudTrail does not write these logs to the US East (Ohio) Region +// unless you choose to include global service logs in that Region. CloudTrail +// writes calls to all Regional endpoints to their respective Regions. For example, +// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio) +// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU +// (Frankfurt) Region. +// +// To learn more about CloudTrail, including how to turn it on and find your +// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To contact AWS Security Token Service with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go index dbcd6675..41ea09c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package sts @@ -67,7 +67,7 @@ const ( // STS is not activated in the requested region for the account that is being // asked to generate credentials. The account administrator must use the IAM // console to activate STS in that region. For more information, see Activating - // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) // in the IAM User Guide. ErrCodeRegionDisabledException = "RegionDisabledException" ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index 9c4bfb83..185c914d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -1,4 +1,4 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package sts @@ -11,54 +11,12 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" ) -// The AWS Security Token Service (STS) is a web service that enables you to -// request temporary, limited-privilege credentials for AWS Identity and Access -// Management (IAM) users or for users that you authenticate (federated users). -// This guide provides descriptions of the STS API. For more detailed information -// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. // -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to STS. For example, the SDKs take care -// of cryptographically signing requests, managing errors, and retrying requests -// automatically. For information about the AWS SDKs, including how to download -// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). -// -// For information about setting up signatures and authorization through the -// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) -// in the AWS General Reference. For general information about the Query API, -// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in Using IAM. For information about using security tokens with other AWS -// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) -// in the IAM User Guide. -// -// If you're new to AWS and need additional technical information about a specific -// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ -// (http://aws.amazon.com/documentation/). -// -// Endpoints -// -// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com -// that maps to the US East (N. Virginia) region. Additional regions are available -// and are activated by default. For more information, see Activating and Deactivating -// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) -// in the IAM User Guide. -// -// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) -// in the AWS General Reference. -// -// Recording API requests -// -// STS supports AWS CloudTrail, which is a service that records AWS calls for -// your AWS account and delivers log files to an Amazon S3 bucket. By using -// information collected by CloudTrail, you can determine what requests were -// successfully made to STS, who made the request, when it was made, and so -// on. To learn more about CloudTrail, including how to turn it on and find -// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). -// The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. type STS struct { *client.Client } @@ -71,8 +29,9 @@ var initRequest func(*request.Request) // Service information constants const ( - ServiceName = "sts" // Service endpoint prefix API calls made to. - EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. + ServiceName = "sts" // Name of service. + EndpointsID = ServiceName // ID to lookup a service endpoint with. + ServiceID = "STS" // ServiceID is a unique identifer of a specific service. ) // New creates a new instance of the STS client with a session. @@ -97,6 +56,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio cfg, metadata.ClientInfo{ ServiceName: ServiceName, + ServiceID: ServiceID, SigningName: signingName, SigningRegion: signingRegion, Endpoint: endpoint, diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE b/vendor/github.com/bgentry/speakeasy/LICENSE new file mode 100644 index 00000000..37d60fc3 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE @@ -0,0 +1,24 @@ +MIT License + +Copyright (c) 2017 Blake Gentry + +This license applies to the non-Windows portions of this library. The Windows +portion maintains its own Apache 2.0 license. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS new file mode 100644 index 00000000..ff177f61 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/LICENSE_WINDOWS @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [2013] [the CloudFoundry Authors] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/bgentry/speakeasy/Readme.md b/vendor/github.com/bgentry/speakeasy/Readme.md new file mode 100644 index 00000000..fceda751 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/Readme.md @@ -0,0 +1,30 @@ +# Speakeasy + +This package provides cross-platform Go (#golang) helpers for taking user input +from the terminal while not echoing the input back (similar to `getpasswd`). The +package uses syscalls to avoid any dependence on cgo, and is therefore +compatible with cross-compiling. + +[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc] + +## Unicode + +Multi-byte unicode characters work successfully on Mac OS X. On Windows, +however, this may be problematic (as is UTF in general on Windows). Other +platforms have not been tested. + +## License + +The code herein was not written by me, but was compiled from two separate open +source packages. Unix portions were imported from [gopass][gopass], while +Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s +[Windows terminal helpers][cf-ui-windows]. + +The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly +from the source (though I attempted to fill in the correct owner in the +boilerplate copyright notice). + +[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI" +[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers" +[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org" +[gopass]: https://code.google.com/p/gopass "gopass" diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy.go b/vendor/github.com/bgentry/speakeasy/speakeasy.go new file mode 100644 index 00000000..71c1dd1b --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy.go @@ -0,0 +1,49 @@ +package speakeasy + +import ( + "fmt" + "io" + "os" + "strings" +) + +// Ask the user to enter a password with input hidden. prompt is a string to +// display before the user's input. Returns the provided password, or an error +// if the command failed. +func Ask(prompt string) (password string, err error) { + return FAsk(os.Stdout, prompt) +} + +// FAsk is the same as Ask, except it is possible to specify the file to write +// the prompt to. If 'nil' is passed as the writer, no prompt will be written. +func FAsk(wr io.Writer, prompt string) (password string, err error) { + if wr != nil && prompt != "" { + fmt.Fprint(wr, prompt) // Display the prompt. + } + password, err = getPassword() + + // Carriage return after the user input. + if wr != nil { + fmt.Fprintln(wr, "") + } + return +} + +func readline() (value string, err error) { + var valb []byte + var n int + b := make([]byte, 1) + for { + // read one byte at a time so we don't accidentally read extra bytes + n, err = os.Stdin.Read(b) + if err != nil && err != io.EOF { + return "", err + } + if n == 0 || b[0] == '\n' { + break + } + valb = append(valb, b[0]) + } + + return strings.TrimSuffix(string(valb), "\r"), nil +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go new file mode 100644 index 00000000..d99fda19 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_unix.go @@ -0,0 +1,93 @@ +// based on https://code.google.com/p/gopass +// Author: johnsiilver@gmail.com (John Doak) +// +// Original code is based on code by RogerV in the golang-nuts thread: +// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247 + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package speakeasy + +import ( + "fmt" + "os" + "os/signal" + "strings" + "syscall" +) + +const sttyArg0 = "/bin/stty" + +var ( + sttyArgvEOff = []string{"stty", "-echo"} + sttyArgvEOn = []string{"stty", "echo"} +) + +// getPassword gets input hidden from the terminal from a user. This is +// accomplished by turning off terminal echo, reading input from the user and +// finally turning on terminal echo. +func getPassword() (password string, err error) { + sig := make(chan os.Signal, 10) + brk := make(chan bool) + + // File descriptors for stdin, stdout, and stderr. + fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()} + + // Setup notifications of termination signals to channel sig, create a process to + // watch for these signals so we can turn back on echo if need be. + signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT, + syscall.SIGTERM) + go catchSignal(fd, sig, brk) + + // Turn off the terminal echo. + pid, err := echoOff(fd) + if err != nil { + return "", err + } + + // Turn on the terminal echo and stop listening for signals. + defer signal.Stop(sig) + defer close(brk) + defer echoOn(fd) + + syscall.Wait4(pid, nil, 0, nil) + + line, err := readline() + if err == nil { + password = strings.TrimSpace(line) + } else { + err = fmt.Errorf("failed during password entry: %s", err) + } + + return password, err +} + +// echoOff turns off the terminal echo. +func echoOff(fd []uintptr) (int, error) { + pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd}) + if err != nil { + return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err) + } + return pid, nil +} + +// echoOn turns back on the terminal echo. +func echoOn(fd []uintptr) { + // Turn on the terminal echo. + pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd}) + if e == nil { + syscall.Wait4(pid, nil, 0, nil) + } +} + +// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn +// terminal echo back on before the program ends. Otherwise the user is left +// with echo off on their terminal. +func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) { + select { + case <-sig: + echoOn(fd) + os.Exit(-1) + case <-brk: + } +} diff --git a/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go new file mode 100644 index 00000000..c2093a80 --- /dev/null +++ b/vendor/github.com/bgentry/speakeasy/speakeasy_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package speakeasy + +import ( + "syscall" +) + +// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT: +// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx +const ENABLE_ECHO_INPUT = 0x0004 + +func getPassword() (password string, err error) { + var oldMode uint32 + + err = syscall.GetConsoleMode(syscall.Stdin, &oldMode) + if err != nil { + return + } + + var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT) + + err = setConsoleMode(syscall.Stdin, newMode) + defer setConsoleMode(syscall.Stdin, oldMode) + if err != nil { + return + } + + return readline() +} + +func setConsoleMode(console syscall.Handle, mode uint32) (err error) { + dll := syscall.MustLoadDLL("kernel32") + proc := dll.MustFindProc("SetConsoleMode") + r, _, err := proc.Call(uintptr(console), uintptr(mode)) + + if r == 0 { + return err + } + return nil +} diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE new file mode 100644 index 00000000..5ba5c86f --- /dev/null +++ b/vendor/github.com/blang/semver/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md new file mode 100644 index 00000000..e05f9865 --- /dev/null +++ b/vendor/github.com/blang/semver/README.md @@ -0,0 +1,194 @@ +semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.svg)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/blang/semver)](https://goreportcard.com/report/github.com/blang/semver) +====== + +semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. + +Usage +----- +```bash +$ go get github.com/blang/semver +``` +Note: Always vendor your dependencies or fix on a specific version tag. + +```go +import github.com/blang/semver +v1, err := semver.Make("1.0.0-beta") +v2, err := semver.Make("2.0.0-beta") +v1.Compare(v2) +``` + +Also check the [GoDocs](http://godoc.org/github.com/blang/semver). + +Why should I use this lib? +----- + +- Fully spec compatible +- No reflection +- No regex +- Fully tested (Coverage >99%) +- Readable parsing/validation errors +- Fast (See [Benchmarks](#benchmarks)) +- Only Stdlib +- Uses values instead of pointers +- Many features, see below + + +Features +----- + +- Parsing and validation at all levels +- Comparator-like comparisons +- Compare Helper Methods +- InPlace manipulation +- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` +- Wildcards `>=1.x`, `<=2.5.x` +- Sortable (implements sort.Interface) +- database/sql compatible (sql.Scanner/Valuer) +- encoding/json compatible (json.Marshaler/Unmarshaler) + +Ranges +------ + +A `Range` is a set of conditions which specify which versions satisfy the range. + +A condition is composed of an operator and a version. The supported operators are: + +- `<1.0.0` Less than `1.0.0` +- `<=1.0.0` Less than or equal to `1.0.0` +- `>1.0.0` Greater than `1.0.0` +- `>=1.0.0` Greater than or equal to `1.0.0` +- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` +- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. + +Note that spaces between the operator and the version will be gracefully tolerated. + +A `Range` can link multiple `Ranges` separated by space: + +Ranges can be linked by logical AND: + + - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` + - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` + +Ranges can also be linked by logical OR: + + - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` + +AND has a higher precedence than OR. It's not possible to use brackets. + +Ranges can be combined by both AND and OR + + - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` + +Range usage: + +``` +v, err := semver.Parse("1.2.3") +expectedRange, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") +if expectedRange(v) { + //valid +} + +``` + +Example +----- + +Have a look at full examples in [examples/main.go](examples/main.go) + +```go +import github.com/blang/semver + +v, err := semver.Make("0.0.1-alpha.preview+123.github") +fmt.Printf("Major: %d\n", v.Major) +fmt.Printf("Minor: %d\n", v.Minor) +fmt.Printf("Patch: %d\n", v.Patch) +fmt.Printf("Pre: %s\n", v.Pre) +fmt.Printf("Build: %s\n", v.Build) + +// Prerelease versions array +if len(v.Pre) > 0 { + fmt.Println("Prerelease versions:") + for i, pre := range v.Pre { + fmt.Printf("%d: %q\n", i, pre) + } +} + +// Build meta data array +if len(v.Build) > 0 { + fmt.Println("Build meta data:") + for i, build := range v.Build { + fmt.Printf("%d: %q\n", i, build) + } +} + +v001, err := semver.Make("0.0.1") +// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE +v001.GT(v) == true +v.LT(v001) == true +v.GTE(v) == true +v.LTE(v) == true + +// Or use v.Compare(v2) for comparisons (-1, 0, 1): +v001.Compare(v) == 1 +v.Compare(v001) == -1 +v.Compare(v) == 0 + +// Manipulate Version in place: +v.Pre[0], err = semver.NewPRVersion("beta") +if err != nil { + fmt.Printf("Error parsing pre release version: %q", err) +} + +fmt.Println("\nValidate versions:") +v.Build[0] = "?" + +err = v.Validate() +if err != nil { + fmt.Printf("Validation failed: %s\n", err) +} +``` + + +Benchmarks +----- + + BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op + BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op + BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op + BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op + BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op + BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op + BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op + BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op + BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op + BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op + BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op + BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op + BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op + BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op + BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op + BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op + BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op + BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op + +See benchmark cases at [semver_test.go](semver_test.go) + + +Motivation +----- + +I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. + + +Contribution +----- + +Feel free to make a pull request. For bigger changes create a issue first to discuss about it. + + +License +----- + +See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/go.mod b/vendor/github.com/blang/semver/go.mod new file mode 100644 index 00000000..d0083058 --- /dev/null +++ b/vendor/github.com/blang/semver/go.mod @@ -0,0 +1 @@ +module github.com/blang/semver diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go new file mode 100644 index 00000000..a74bf7c4 --- /dev/null +++ b/vendor/github.com/blang/semver/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json new file mode 100644 index 00000000..78e27c5c --- /dev/null +++ b/vendor/github.com/blang/semver/package.json @@ -0,0 +1,17 @@ +{ + "author": "blang", + "bugs": { + "URL": "https://github.com/blang/semver/issues", + "url": "https://github.com/blang/semver/issues" + }, + "gx": { + "dvcsimport": "github.com/blang/semver" + }, + "gxVersion": "0.10.0", + "language": "go", + "license": "MIT", + "name": "semver", + "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", + "version": "3.6.1" +} + diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go new file mode 100644 index 00000000..95f7139b --- /dev/null +++ b/vendor/github.com/blang/semver/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Contains(ap, "x") { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go new file mode 100644 index 00000000..4165bc79 --- /dev/null +++ b/vendor/github.com/blang/semver/semver.go @@ -0,0 +1,455 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precedence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// IncrementPatch increments the patch version +func (v *Version) IncrementPatch() error { + if v.Major == 0 { + return fmt.Errorf("Patch version can not be incremented for %q", v.String()) + } + v.Patch += 1 + return nil +} + +// IncrementMinor increments the minor version +func (v *Version) IncrementMinor() error { + if v.Major == 0 { + return fmt.Errorf("Minor version can not be incremented for %q", v.String()) + } + v.Minor += 1 + v.Patch = 0 + return nil +} + +// IncrementMajor increments the major version +func (v *Version) IncrementMajor() error { + if v.Major == 0 { + return fmt.Errorf("Major version can not be incremented for %q", v.String()) + } + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + return nil +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (vp *Version, err error) { + v, err := Parse(s) + vp = &v + return +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions +// with only major and minor components specified, and removes leading 0s. +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + // Remove leading zeros. + for i, p := range parts { + if len(p) > 1 { + parts[i] = strings.TrimPrefix(p, "0") + } + } + // Fill up shortened versions. + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + } + s = strings.Join(parts, ".") + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go new file mode 100644 index 00000000..e18f8808 --- /dev/null +++ b/vendor/github.com/blang/semver/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go new file mode 100644 index 00000000..db958134 --- /dev/null +++ b/vendor/github.com/blang/semver/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("version.Scan: cannot convert %T to string", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go new file mode 100644 index 00000000..a6f0febe --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/common/v1/common.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LibraryInfo_Language int32 + +const ( + LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 + LibraryInfo_CPP LibraryInfo_Language = 1 + LibraryInfo_C_SHARP LibraryInfo_Language = 2 + LibraryInfo_ERLANG LibraryInfo_Language = 3 + LibraryInfo_GO_LANG LibraryInfo_Language = 4 + LibraryInfo_JAVA LibraryInfo_Language = 5 + LibraryInfo_NODE_JS LibraryInfo_Language = 6 + LibraryInfo_PHP LibraryInfo_Language = 7 + LibraryInfo_PYTHON LibraryInfo_Language = 8 + LibraryInfo_RUBY LibraryInfo_Language = 9 + LibraryInfo_WEB_JS LibraryInfo_Language = 10 +) + +var LibraryInfo_Language_name = map[int32]string{ + 0: "LANGUAGE_UNSPECIFIED", + 1: "CPP", + 2: "C_SHARP", + 3: "ERLANG", + 4: "GO_LANG", + 5: "JAVA", + 6: "NODE_JS", + 7: "PHP", + 8: "PYTHON", + 9: "RUBY", + 10: "WEB_JS", +} + +var LibraryInfo_Language_value = map[string]int32{ + "LANGUAGE_UNSPECIFIED": 0, + "CPP": 1, + "C_SHARP": 2, + "ERLANG": 3, + "GO_LANG": 4, + "JAVA": 5, + "NODE_JS": 6, + "PHP": 7, + "PYTHON": 8, + "RUBY": 9, + "WEB_JS": 10, +} + +func (x LibraryInfo_Language) String() string { + return proto.EnumName(LibraryInfo_Language_name, int32(x)) +} + +func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2, 0} +} + +// Identifier metadata of the Node that produces the span or tracing data. +// Note, this is not the metadata about the Node or service that is described by associated spans. +// In the future we plan to extend the identifier proto definition to support +// additional information (e.g cloud id, etc.) +type Node struct { + // Identifier that uniquely identifies a process within a VM/container. + Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // Information on the OpenCensus Library that initiates the stream. + LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` + // Additional information on service. + ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` + // Additional attributes. + Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{0} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetIdentifier() *ProcessIdentifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (m *Node) GetLibraryInfo() *LibraryInfo { + if m != nil { + return m.LibraryInfo + } + return nil +} + +func (m *Node) GetServiceInfo() *ServiceInfo { + if m != nil { + return m.ServiceInfo + } + return nil +} + +func (m *Node) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// Identifier that uniquely identifies a process within a VM/container. +type ProcessIdentifier struct { + // The host name. Usually refers to the machine/container name. + // For example: os.Hostname() in Go, socket.gethostname() in Python. + HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + // Process id. + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + // Start time of this ProcessIdentifier. Represented in epoch time. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } +func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } +func (*ProcessIdentifier) ProtoMessage() {} +func (*ProcessIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{1} +} + +func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) +} +func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) +} +func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessIdentifier.Merge(m, src) +} +func (m *ProcessIdentifier) XXX_Size() int { + return xxx_messageInfo_ProcessIdentifier.Size(m) +} +func (m *ProcessIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo + +func (m *ProcessIdentifier) GetHostName() string { + if m != nil { + return m.HostName + } + return "" +} + +func (m *ProcessIdentifier) GetPid() uint32 { + if m != nil { + return m.Pid + } + return 0 +} + +func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +// Information on OpenCensus Library. +type LibraryInfo struct { + // Language of OpenCensus Library. + Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` + // Version of Agent exporter of Library. + ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` + // Version of OpenCensus Library. + CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } +func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } +func (*LibraryInfo) ProtoMessage() {} +func (*LibraryInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2} +} + +func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) +} +func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) +} +func (m *LibraryInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LibraryInfo.Merge(m, src) +} +func (m *LibraryInfo) XXX_Size() int { + return xxx_messageInfo_LibraryInfo.Size(m) +} +func (m *LibraryInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LibraryInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo + +func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { + if m != nil { + return m.Language + } + return LibraryInfo_LANGUAGE_UNSPECIFIED +} + +func (m *LibraryInfo) GetExporterVersion() string { + if m != nil { + return m.ExporterVersion + } + return "" +} + +func (m *LibraryInfo) GetCoreLibraryVersion() string { + if m != nil { + return m.CoreLibraryVersion + } + return "" +} + +// Additional service information. +type ServiceInfo struct { + // Name of the service. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } +func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } +func (*ServiceInfo) ProtoMessage() {} +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{3} +} + +func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) +} +func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) +} +func (m *ServiceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceInfo.Merge(m, src) +} +func (m *ServiceInfo) XXX_Size() int { + return xxx_messageInfo_ServiceInfo.Size(m) +} +func (m *ServiceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo + +func (m *ServiceInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) + proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") + proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") + proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") + proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) +} + +var fileDescriptor_126c72ed8a252c84 = []byte{ + // 618 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x6e, 0xda, 0x4e, + 0x14, 0xc7, 0x7f, 0xc6, 0x24, 0x81, 0xe7, 0x5f, 0x13, 0x77, 0x94, 0x05, 0x4a, 0x17, 0xa5, 0x74, + 0x93, 0x2e, 0xb0, 0x9b, 0x44, 0xaa, 0xaa, 0x4a, 0x5d, 0x18, 0xe2, 0x26, 0x44, 0x11, 0x58, 0x26, + 0xa1, 0x4a, 0x37, 0x96, 0x21, 0x83, 0x33, 0x2a, 0x9e, 0x41, 0xe3, 0x31, 0x2a, 0x27, 0xe8, 0x09, + 0xda, 0x03, 0xf4, 0x50, 0x3d, 0x44, 0x4f, 0x51, 0xcd, 0x8c, 0x01, 0xab, 0x59, 0x90, 0xdd, 0xfb, + 0xf3, 0xfd, 0x7e, 0x9e, 0xf5, 0xe6, 0xc9, 0xd0, 0x66, 0x73, 0x4c, 0x27, 0x98, 0x66, 0x79, 0xe6, + 0xce, 0x39, 0x13, 0xcc, 0x8d, 0x13, 0x4c, 0x85, 0x3b, 0x61, 0x69, 0xca, 0xa8, 0xbb, 0x38, 0x29, + 0x22, 0x47, 0x35, 0x51, 0x73, 0x23, 0xd7, 0x15, 0x47, 0xc9, 0x9d, 0x42, 0xb4, 0x38, 0x39, 0x7a, + 0x99, 0x30, 0x96, 0xcc, 0xb0, 0x86, 0x8d, 0xf3, 0xa9, 0x2b, 0x48, 0x8a, 0x33, 0x11, 0xa7, 0x73, + 0x6d, 0x68, 0xfd, 0x34, 0xa1, 0xda, 0x67, 0xf7, 0x18, 0x0d, 0x01, 0xc8, 0x3d, 0xa6, 0x82, 0x4c, + 0x09, 0xe6, 0x0d, 0xa3, 0x69, 0x1c, 0x5b, 0xa7, 0x67, 0xce, 0xb6, 0x01, 0x4e, 0xc0, 0xd9, 0x04, + 0x67, 0x59, 0x6f, 0x6d, 0x0d, 0x4b, 0x18, 0x14, 0xc0, 0xff, 0x33, 0x32, 0xe6, 0x31, 0x5f, 0x46, + 0x84, 0x4e, 0x59, 0xa3, 0xa2, 0xb0, 0xed, 0xed, 0xd8, 0x6b, 0xed, 0xea, 0xd1, 0x29, 0x0b, 0xad, + 0xd9, 0x26, 0x91, 0xc4, 0x0c, 0xf3, 0x05, 0x99, 0x60, 0x4d, 0x34, 0x9f, 0x4a, 0x1c, 0x6a, 0x97, + 0x26, 0x66, 0x9b, 0x04, 0x8d, 0x00, 0x62, 0x21, 0x38, 0x19, 0xe7, 0x02, 0x67, 0x8d, 0x6a, 0xd3, + 0x3c, 0xb6, 0x4e, 0xdf, 0x6d, 0xe7, 0xc9, 0xa5, 0x39, 0xde, 0xda, 0xe8, 0x53, 0xc1, 0x97, 0x61, + 0x89, 0x74, 0xf4, 0x11, 0x0e, 0xfe, 0x69, 0x23, 0x1b, 0xcc, 0xaf, 0x78, 0xa9, 0x96, 0x5b, 0x0f, + 0x65, 0x88, 0x0e, 0x61, 0x67, 0x11, 0xcf, 0x72, 0xac, 0x36, 0x53, 0x0f, 0x75, 0xf2, 0xa1, 0xf2, + 0xde, 0x68, 0x7d, 0x37, 0xe0, 0xf9, 0xa3, 0xe5, 0xa2, 0x17, 0x50, 0x7f, 0x60, 0x99, 0x88, 0x68, + 0x9c, 0xe2, 0x82, 0x53, 0x93, 0x85, 0x7e, 0x9c, 0x62, 0x89, 0x9f, 0x93, 0x7b, 0x85, 0x7a, 0x16, + 0xca, 0x10, 0x75, 0xe1, 0x20, 0x13, 0x31, 0x17, 0xd1, 0xfa, 0xd9, 0x8b, 0x85, 0x1d, 0x39, 0xfa, + 0x30, 0x9c, 0xd5, 0x61, 0x38, 0x37, 0x2b, 0x45, 0xb8, 0xaf, 0x2c, 0xeb, 0xbc, 0xf5, 0xbb, 0x02, + 0x56, 0xe9, 0x3d, 0x50, 0x08, 0xb5, 0x59, 0x4c, 0x93, 0x3c, 0x4e, 0xf4, 0x27, 0xec, 0x3f, 0x65, + 0x5d, 0x25, 0x80, 0x73, 0x5d, 0xb8, 0xc3, 0x35, 0x07, 0xbd, 0x01, 0x1b, 0x7f, 0x9b, 0x33, 0x2e, + 0x30, 0x8f, 0x16, 0x98, 0x67, 0x84, 0xd1, 0x62, 0x25, 0x07, 0xab, 0xfa, 0x48, 0x97, 0xd1, 0x5b, + 0x38, 0x9c, 0x30, 0x8e, 0xa3, 0xd5, 0x61, 0xad, 0xe4, 0xa6, 0x92, 0x23, 0xd9, 0x2b, 0x86, 0x15, + 0x8e, 0xd6, 0x0f, 0x03, 0x6a, 0xab, 0x99, 0xa8, 0x01, 0x87, 0xd7, 0x5e, 0xff, 0xe2, 0xd6, 0xbb, + 0xf0, 0xa3, 0xdb, 0xfe, 0x30, 0xf0, 0xbb, 0xbd, 0x4f, 0x3d, 0xff, 0xdc, 0xfe, 0x0f, 0xed, 0x81, + 0xd9, 0x0d, 0x02, 0xdb, 0x40, 0x16, 0xec, 0x75, 0xa3, 0xe1, 0xa5, 0x17, 0x06, 0x76, 0x05, 0x01, + 0xec, 0xfa, 0xa1, 0x74, 0xd8, 0xa6, 0x6c, 0x5c, 0x0c, 0x22, 0x95, 0x54, 0x51, 0x0d, 0xaa, 0x57, + 0xde, 0xc8, 0xb3, 0x77, 0x64, 0xb9, 0x3f, 0x38, 0xf7, 0xa3, 0xab, 0xa1, 0xbd, 0x2b, 0x29, 0xc1, + 0x65, 0x60, 0xef, 0x49, 0x63, 0x70, 0x77, 0x73, 0x39, 0xe8, 0xdb, 0x35, 0xa9, 0x0d, 0x6f, 0x3b, + 0x77, 0x76, 0x5d, 0x56, 0x3f, 0xfb, 0x1d, 0x29, 0x85, 0xd6, 0x2b, 0xb0, 0x4a, 0x57, 0x89, 0x10, + 0x54, 0x4b, 0xcf, 0xaa, 0xe2, 0xce, 0x2f, 0x03, 0x5e, 0x13, 0xb6, 0x75, 0xbd, 0x1d, 0xab, 0xab, + 0xc2, 0x40, 0x36, 0x03, 0xe3, 0x4b, 0x2f, 0x21, 0xe2, 0x21, 0x1f, 0x4b, 0x81, 0xab, 0x7d, 0x6d, + 0x42, 0x33, 0xc1, 0xf3, 0x14, 0x53, 0x11, 0x0b, 0xc2, 0xa8, 0xbb, 0x41, 0xb6, 0xf5, 0x9f, 0x26, + 0xc1, 0xb4, 0x9d, 0x3c, 0xfa, 0xe1, 0xfc, 0xa9, 0x34, 0x07, 0x73, 0x4c, 0xbb, 0x7a, 0xb8, 0xe2, + 0x3b, 0x9e, 0x1a, 0xae, 0x27, 0x3a, 0xa3, 0x93, 0xf1, 0xae, 0x02, 0x9c, 0xfd, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x53, 0x74, 0x5e, 0xbe, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go new file mode 100644 index 00000000..5f222b47 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go @@ -0,0 +1,275 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/metrics/v1/metrics_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ExportMetricsServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportMetricsServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Metrics from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of metrics that belong to the last received Node. + Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + // The resource for the metrics in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known + // at all or when all sent metrics have an explicit resource set. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } +func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceRequest) ProtoMessage() {} +func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_47e253a956287d04, []int{0} +} + +func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) +} +func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) +} +func (m *ExportMetricsServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) +} +func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo + +func (m *ExportMetricsServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportMetricsServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } +func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceResponse) ProtoMessage() {} +func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_47e253a956287d04, []int{1} +} + +func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) +} +func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) +} +func (m *ExportMetricsServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) +} +func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest") + proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04) +} + +var fileDescriptor_47e253a956287d04 = []byte{ + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x4a, 0xc3, 0x40, + 0x14, 0x86, 0x9d, 0x56, 0xaa, 0x4c, 0xc1, 0x45, 0xdc, 0x94, 0x2a, 0xd2, 0x56, 0x91, 0x8a, 0x64, + 0x62, 0xea, 0x42, 0x10, 0x54, 0xac, 0xb8, 0x11, 0xd4, 0x12, 0xc1, 0x85, 0x1b, 0x69, 0xd3, 0x47, + 0xcc, 0x22, 0x33, 0x71, 0x66, 0x12, 0xbc, 0x85, 0x77, 0x70, 0xef, 0x8d, 0x3c, 0x81, 0xa7, 0x90, + 0xe4, 0x4d, 0x5a, 0x4a, 0x8c, 0x05, 0x77, 0x8f, 0xe4, 0xff, 0xfe, 0xf7, 0xff, 0x33, 0x43, 0x4f, + 0x44, 0x0c, 0xdc, 0x07, 0xae, 0x12, 0xe5, 0xc4, 0x52, 0x68, 0xe1, 0x8c, 0x03, 0xe0, 0xda, 0x89, + 0x40, 0xcb, 0xd0, 0x57, 0x4e, 0xea, 0x16, 0xe3, 0xb3, 0x02, 0x99, 0x86, 0x3e, 0xb0, 0x5c, 0x66, + 0x75, 0xe7, 0x20, 0x7e, 0x61, 0x39, 0xc8, 0x8c, 0x9a, 0xa5, 0x6e, 0xdb, 0xae, 0xf0, 0xf6, 0x45, + 0x14, 0x09, 0x9e, 0x59, 0xe3, 0x84, 0x7c, 0xfb, 0xa0, 0x24, 0x2f, 0x87, 0x30, 0xd2, 0xc3, 0x92, + 0x54, 0x82, 0x12, 0x89, 0xf4, 0x21, 0xd3, 0x16, 0x33, 0x8a, 0x7b, 0x5f, 0x84, 0x6e, 0x5d, 0xbf, + 0xc5, 0x42, 0xea, 0x5b, 0x34, 0x79, 0xc0, 0x22, 0x1e, 0xbc, 0x26, 0xa0, 0xb4, 0x75, 0x4a, 0x57, + 0xb9, 0x98, 0x42, 0x8b, 0x74, 0x48, 0xbf, 0x39, 0xd8, 0x67, 0x15, 0xc5, 0x4c, 0xd6, 0xd4, 0x65, + 0x77, 0x62, 0x0a, 0x5e, 0xce, 0x58, 0x67, 0x74, 0xcd, 0x24, 0x6b, 0xd5, 0x3a, 0xf5, 0x7e, 0x73, + 0xb0, 0x5b, 0xc6, 0xe7, 0x27, 0xc2, 0x30, 0x80, 0x57, 0x30, 0xd6, 0x90, 0xae, 0x17, 0x61, 0x5b, + 0xf5, 0xaa, 0xf5, 0xb3, 0x3a, 0xa9, 0xcb, 0x3c, 0x33, 0x7b, 0x33, 0xae, 0xb7, 0x43, 0xb7, 0x7f, + 0x6f, 0xa7, 0x62, 0xc1, 0x15, 0x0c, 0x3e, 0x08, 0xdd, 0x58, 0xfc, 0x65, 0xbd, 0x13, 0xda, 0x40, + 0xc6, 0x3a, 0x67, 0x4b, 0xef, 0x91, 0xfd, 0x71, 0x78, 0xed, 0x8b, 0x7f, 0xf3, 0x18, 0xaf, 0xb7, + 0xd2, 0x27, 0x47, 0x64, 0xf8, 0x49, 0xe8, 0x5e, 0x28, 0x96, 0x7b, 0x0d, 0x37, 0x17, 0x6d, 0x46, + 0x99, 0x6a, 0x44, 0x9e, 0x6e, 0x82, 0x50, 0xbf, 0x24, 0x93, 0xec, 0x92, 0x1c, 0x34, 0xb0, 0x43, + 0xae, 0xb4, 0x4c, 0x22, 0xe0, 0x7a, 0xac, 0x43, 0xc1, 0x9d, 0xb9, 0xb7, 0x8d, 0x4f, 0x26, 0x00, + 0x6e, 0x07, 0xe5, 0xf7, 0xfe, 0x5d, 0xeb, 0xde, 0xc7, 0xc0, 0xaf, 0x30, 0x46, 0xbe, 0x80, 0x5d, + 0xe6, 0x31, 0xcc, 0x6a, 0xf6, 0xe8, 0x4e, 0x1a, 0xb9, 0xc5, 0xf1, 0x4f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x19, 0x28, 0xa4, 0x50, 0x3f, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) + if err != nil { + return nil, err + } + x := &metricsServiceExportClient{stream} + return x, nil +} + +type MetricsService_ExportClient interface { + Send(*ExportMetricsServiceRequest) error + Recv() (*ExportMetricsServiceResponse, error) + grpc.ClientStream +} + +type metricsServiceExportClient struct { + grpc.ClientStream +} + +func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { + m := new(ExportMetricsServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(MetricsService_ExportServer) error +} + +// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (*UnimplementedMetricsServiceServer) Export(srv MetricsService_ExportServer) error { + return status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) +} + +type MetricsService_ExportServer interface { + Send(*ExportMetricsServiceResponse) error + Recv() (*ExportMetricsServiceRequest, error) + grpc.ServerStream +} + +type metricsServiceExportServer struct { + grpc.ServerStream +} + +func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { + m := new(ExportMetricsServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Export", + Handler: _MetricsService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go new file mode 100644 index 00000000..a0a3504d --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go @@ -0,0 +1,457 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type CurrentLibraryConfig struct { + // This is required only in the first message on the stream or if the + // previous sent CurrentLibraryConfig message has a different Node (e.g. + // when the same RPC is used to configure multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Current configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} } +func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*CurrentLibraryConfig) ProtoMessage() {} +func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{0} +} + +func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b) +} +func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic) +} +func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CurrentLibraryConfig.Merge(m, src) +} +func (m *CurrentLibraryConfig) XXX_Size() int { + return xxx_messageInfo_CurrentLibraryConfig.Size(m) +} +func (m *CurrentLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo + +func (m *CurrentLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type UpdatedLibraryConfig struct { + // This field is ignored when the RPC is used to configure only one Application. + // This is required only in the first message on the stream or if the + // previous sent UpdatedLibraryConfig message has a different Node. + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Requested updated configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} } +func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*UpdatedLibraryConfig) ProtoMessage() {} +func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{1} +} + +func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b) +} +func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic) +} +func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src) +} +func (m *UpdatedLibraryConfig) XXX_Size() int { + return xxx_messageInfo_UpdatedLibraryConfig.Size(m) +} +func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo + +func (m *UpdatedLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type ExportTraceServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportTraceServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Spans from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of Spans that belong to the last received Node. + Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // The resource for the spans in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } +func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceRequest) ProtoMessage() {} +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{2} +} + +func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) +} +func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) +} +func (m *ExportTraceServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceRequest.Size(m) +} +func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo + +func (m *ExportTraceServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span { + if m != nil { + return m.Spans + } + return nil +} + +func (m *ExportTraceServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportTraceServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } +func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceResponse) ProtoMessage() {} +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{3} +} + +func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) +} +func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) +} +func (m *ExportTraceServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceResponse.Size(m) +} +func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig") + proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig") + proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest") + proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5) +} + +var fileDescriptor_7027f99caf7ac6a5 = []byte{ + // 442 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x94, 0xcf, 0xaa, 0xd4, 0x30, + 0x14, 0xc6, 0x4d, 0xaf, 0x16, 0xc9, 0x75, 0x63, 0x71, 0x51, 0x8b, 0x30, 0x97, 0x82, 0x32, 0xa0, + 0x4d, 0xed, 0x5c, 0xee, 0xe6, 0x0a, 0x82, 0x33, 0x08, 0x2e, 0x44, 0x2f, 0x1d, 0x75, 0xe1, 0x66, + 0xe8, 0xb4, 0xc7, 0xda, 0xc5, 0x24, 0x31, 0x49, 0x8b, 0x82, 0x7b, 0xf7, 0x2e, 0x7c, 0x03, 0x5f, + 0xc8, 0xc7, 0xf0, 0x29, 0xa4, 0x39, 0x9d, 0x3f, 0x3a, 0x53, 0x0b, 0xba, 0xb9, 0xbb, 0x43, 0xf3, + 0xfd, 0xbe, 0xf3, 0x25, 0x39, 0x29, 0x3d, 0x15, 0x12, 0x78, 0x0e, 0x5c, 0xd7, 0x3a, 0x96, 0x4a, + 0x18, 0x11, 0x67, 0x25, 0x70, 0x13, 0x1b, 0x95, 0xe5, 0x10, 0x37, 0x09, 0x16, 0x0b, 0x0d, 0xaa, + 0xa9, 0x72, 0x60, 0x56, 0xe2, 0x8d, 0xb6, 0x10, 0x7e, 0x61, 0x16, 0x62, 0x56, 0xcb, 0x9a, 0x24, + 0x88, 0x7a, 0x5c, 0x73, 0xb1, 0x5a, 0x09, 0xde, 0xda, 0x62, 0x85, 0x74, 0x70, 0x7f, 0x4f, 0xae, + 0x40, 0x8b, 0x5a, 0x61, 0x82, 0x75, 0xdd, 0x89, 0xef, 0xee, 0x89, 0x7f, 0xcf, 0xda, 0xc9, 0x1e, + 0x0c, 0xc8, 0x16, 0xb9, 0xe0, 0xef, 0xaa, 0x12, 0xd5, 0xe1, 0x57, 0x42, 0x6f, 0xcd, 0x6a, 0xa5, + 0x80, 0x9b, 0xe7, 0xd5, 0x52, 0x65, 0xea, 0xd3, 0xcc, 0x2e, 0x7b, 0xe7, 0xf4, 0x2a, 0x17, 0x05, + 0xf8, 0xe4, 0x84, 0x8c, 0x8f, 0x27, 0xf7, 0x58, 0xcf, 0xce, 0xbb, 0xed, 0x34, 0x09, 0x7b, 0x21, + 0x0a, 0x48, 0x2d, 0xe3, 0x3d, 0xa6, 0x2e, 0x36, 0xf1, 0x9d, 0x3e, 0x7a, 0x7d, 0x62, 0xec, 0x55, + 0x5b, 0x60, 0xcf, 0xb4, 0xa3, 0x6c, 0xa8, 0xd7, 0xb2, 0xc8, 0x0c, 0x14, 0x97, 0x27, 0xd4, 0x0f, + 0x42, 0x6f, 0x3f, 0xfd, 0x28, 0x85, 0x32, 0x76, 0x75, 0x8e, 0x83, 0x91, 0xc2, 0x87, 0x1a, 0xb4, + 0xf9, 0xaf, 0x64, 0x67, 0xf4, 0x9a, 0x96, 0x19, 0xd7, 0xbe, 0x73, 0x72, 0x34, 0x3e, 0x9e, 0x8c, + 0xfe, 0x12, 0x6c, 0x2e, 0x33, 0x9e, 0xa2, 0xda, 0x9b, 0xd2, 0xeb, 0xeb, 0x09, 0xf1, 0x8f, 0xfa, + 0xda, 0x6e, 0x66, 0xa8, 0x49, 0x58, 0xda, 0xd5, 0xe9, 0x86, 0x0b, 0xef, 0xd0, 0xe0, 0xd0, 0x9e, + 0xb4, 0x14, 0x5c, 0xc3, 0xe4, 0x9b, 0x43, 0x6f, 0xec, 0x2e, 0x78, 0x9f, 0xa9, 0xdb, 0xdd, 0xc4, + 0x19, 0x1b, 0x78, 0x0a, 0xec, 0xd0, 0x54, 0x05, 0xc3, 0xd8, 0xa1, 0x7b, 0x0f, 0xaf, 0x8c, 0xc9, + 0x43, 0xe2, 0x7d, 0x21, 0xd4, 0xc5, 0xb4, 0xde, 0xf9, 0xa0, 0x4f, 0xef, 0x55, 0x05, 0x8f, 0xfe, + 0x89, 0xc5, 0x23, 0xc1, 0x24, 0xd3, 0xef, 0x84, 0x86, 0x95, 0x18, 0xf2, 0x99, 0xde, 0xdc, 0xb5, + 0xb8, 0x68, 0x15, 0x17, 0xe4, 0xed, 0xb3, 0xb2, 0x32, 0xef, 0xeb, 0x65, 0x3b, 0x0a, 0x31, 0xc2, + 0x51, 0xc5, 0xb5, 0x51, 0xf5, 0x0a, 0xb8, 0xc9, 0x4c, 0x25, 0x78, 0xbc, 0xf5, 0x8d, 0xf0, 0x05, + 0x97, 0xc0, 0xa3, 0xf2, 0xcf, 0x3f, 0xd4, 0x4f, 0x67, 0xf4, 0x52, 0x02, 0x9f, 0x61, 0x00, 0x6b, + 0xcf, 0x9e, 0xd8, 0x00, 0xb6, 0x2d, 0x7b, 0x93, 0x2c, 0x5d, 0x8b, 0x9f, 0xfe, 0x0a, 0x00, 0x00, + 0xff, 0xff, 0x65, 0x76, 0xd7, 0xb9, 0xed, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) + if err != nil { + return nil, err + } + x := &traceServiceConfigClient{stream} + return x, nil +} + +type TraceService_ConfigClient interface { + Send(*CurrentLibraryConfig) error + Recv() (*UpdatedLibraryConfig, error) + grpc.ClientStream +} + +type traceServiceConfigClient struct { + grpc.ClientStream +} + +func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { + m := new(UpdatedLibraryConfig) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) + if err != nil { + return nil, err + } + x := &traceServiceExportClient{stream} + return x, nil +} + +type TraceService_ExportClient interface { + Send(*ExportTraceServiceRequest) error + Recv() (*ExportTraceServiceResponse, error) + grpc.ClientStream +} + +type traceServiceExportClient struct { + grpc.ClientStream +} + +func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { + m := new(ExportTraceServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(TraceService_ConfigServer) error + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(TraceService_ExportServer) error +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (*UnimplementedTraceServiceServer) Config(srv TraceService_ConfigServer) error { + return status.Errorf(codes.Unimplemented, "method Config not implemented") +} +func (*UnimplementedTraceServiceServer) Export(srv TraceService_ExportServer) error { + return status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) +} + +type TraceService_ConfigServer interface { + Send(*UpdatedLibraryConfig) error + Recv() (*CurrentLibraryConfig, error) + grpc.ServerStream +} + +type traceServiceConfigServer struct { + grpc.ServerStream +} + +func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { + m := new(CurrentLibraryConfig) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) +} + +type TraceService_ExportServer interface { + Send(*ExportTraceServiceResponse) error + Recv() (*ExportTraceServiceRequest, error) + grpc.ServerStream +} + +type traceServiceExportServer struct { + grpc.ServerStream +} + +func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { + m := new(ExportTraceServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Config", + Handler: _TraceService_Config_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Export", + Handler: _TraceService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go new file mode 100644 index 00000000..334331b0 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) { + var metadata runtime.ServerMetadata + stream, err := client.Export(ctx) + if err != nil { + grpclog.Infof("Failed to start streaming: %v", err) + return nil, metadata, err + } + dec := marshaler.NewDecoder(req.Body) + handleSend := func() error { + var protoReq ExportTraceServiceRequest + err := dec.Decode(&protoReq) + if err == io.EOF { + return err + } + if err != nil { + grpclog.Infof("Failed to decode request: %v", err) + return err + } + if err := stream.Send(&protoReq); err != nil { + grpclog.Infof("Failed to send request: %v", err) + return err + } + return nil + } + if err := handleSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { + grpclog.Infof("Failed to terminate client stream: %v", cerr) + } + if err == io.EOF { + return stream, metadata, nil + } + return nil, metadata, err + } + go func() { + for { + if err := handleSend(); err != nil { + break + } + } + if err := stream.CloseSend(); err != nil { + grpclog.Infof("Failed to terminate client stream: %v", err) + } + }() + header, err := stream.Header() + if err != nil { + grpclog.Infof("Failed to get header from client: %v", err) + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseStream +) diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go new file mode 100644 index 00000000..466b2342 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go @@ -0,0 +1,1127 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/metrics/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The kind of metric. It describes how the data is reported. +// +// A gauge is an instantaneous measurement of a value. +// +// A cumulative measurement is a value accumulated over a time interval. In +// a time series, cumulative measurements should have the same start time, +// increasing values and increasing end times, until an event resets the +// cumulative value to zero and sets a new start time for the following +// points. +type MetricDescriptor_Type int32 + +const ( + // Do not use this default value. + MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0 + // Integer gauge. The value can go both up and down. + MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1 + // Floating point gauge. The value can go both up and down. + MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2 + // Distribution gauge measurement. The count and sum can go both up and + // down. Recorded values are always >= 0. + // Used in scenarios like a snapshot of time the current items in a queue + // have spent there. + MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3 + // Integer cumulative measurement. The value cannot decrease, if resets + // then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4 + // Floating point cumulative measurement. The value cannot decrease, if + // resets then the start_time should also be reset. Recorded values are + // always >= 0. + MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5 + // Distribution cumulative measurement. The count and sum cannot decrease, + // if resets then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6 + // Some frameworks implemented Histograms as a summary of observations + // (usually things like request durations and response sizes). While it + // also provides a total count of observations and a sum of all observed + // values, it calculates configurable percentiles over a sliding time + // window. This is not recommended, since it cannot be aggregated. + MetricDescriptor_SUMMARY MetricDescriptor_Type = 7 +) + +var MetricDescriptor_Type_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "GAUGE_INT64", + 2: "GAUGE_DOUBLE", + 3: "GAUGE_DISTRIBUTION", + 4: "CUMULATIVE_INT64", + 5: "CUMULATIVE_DOUBLE", + 6: "CUMULATIVE_DISTRIBUTION", + 7: "SUMMARY", +} + +var MetricDescriptor_Type_value = map[string]int32{ + "UNSPECIFIED": 0, + "GAUGE_INT64": 1, + "GAUGE_DOUBLE": 2, + "GAUGE_DISTRIBUTION": 3, + "CUMULATIVE_INT64": 4, + "CUMULATIVE_DOUBLE": 5, + "CUMULATIVE_DISTRIBUTION": 6, + "SUMMARY": 7, +} + +func (x MetricDescriptor_Type) String() string { + return proto.EnumName(MetricDescriptor_Type_name, int32(x)) +} + +func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1, 0} +} + +// Defines a Metric which has one or more timeseries. +type Metric struct { + // The descriptor of the Metric. + // TODO(issue #152): consider only sending the name of descriptor for + // optimization. + MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` + // One or more timeseries for a single metric, where each timeseries has + // one or more points. + Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"` + // The resource for the metric. If unset, it may be set to a default value + // provided for a sequence of messages in an RPC stream. + Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{0} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetMetricDescriptor() *MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +func (m *Metric) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +func (m *Metric) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +// Defines a metric type and its schema. +type MetricDescriptor struct { + // The metric type, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"` + // The label keys associated with the metric descriptor. + LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1} +} + +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) +} +func (m *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(m, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor.Size(m) +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetType() MetricDescriptor_Type { + if m != nil { + return m.Type + } + return MetricDescriptor_UNSPECIFIED +} + +func (m *MetricDescriptor) GetLabelKeys() []*LabelKey { + if m != nil { + return m.LabelKeys + } + return nil +} + +// Defines a label key associated with a metric descriptor. +type LabelKey struct { + // The key for the label. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // A human-readable description of what this label key represents. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelKey) Reset() { *m = LabelKey{} } +func (m *LabelKey) String() string { return proto.CompactTextString(m) } +func (*LabelKey) ProtoMessage() {} +func (*LabelKey) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{2} +} + +func (m *LabelKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelKey.Unmarshal(m, b) +} +func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic) +} +func (m *LabelKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelKey.Merge(m, src) +} +func (m *LabelKey) XXX_Size() int { + return xxx_messageInfo_LabelKey.Size(m) +} +func (m *LabelKey) XXX_DiscardUnknown() { + xxx_messageInfo_LabelKey.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelKey proto.InternalMessageInfo + +func (m *LabelKey) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelKey) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// A collection of data points that describes the time-varying values +// of a metric. +type TimeSeries struct { + // Must be present for cumulative metrics. The time when the cumulative value + // was reset to zero. Exclusive. The cumulative value is over the time interval + // (start_timestamp, timestamp]. If not specified, the backend can use the + // previous recorded value. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + // The set of label values that uniquely identify this timeseries. Applies to + // all points. The order of label values must match that of label keys in the + // metric descriptor. + LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + // The data points of this timeseries. Point.value type MUST match the + // MetricDescriptor.type. + Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{3} +} + +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeries.Unmarshal(m, b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return xxx_messageInfo_TimeSeries.Size(m) +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +func (m *TimeSeries) GetLabelValues() []*LabelValue { + if m != nil { + return m.LabelValues + } + return nil +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +type LabelValue struct { + // The value for the label. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // If false the value field is ignored and considered not set. + // This is used to differentiate a missing label from an empty string. + HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValue) Reset() { *m = LabelValue{} } +func (m *LabelValue) String() string { return proto.CompactTextString(m) } +func (*LabelValue) ProtoMessage() {} +func (*LabelValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{4} +} + +func (m *LabelValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelValue.Unmarshal(m, b) +} +func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic) +} +func (m *LabelValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValue.Merge(m, src) +} +func (m *LabelValue) XXX_Size() int { + return xxx_messageInfo_LabelValue.Size(m) +} +func (m *LabelValue) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValue.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValue proto.InternalMessageInfo + +func (m *LabelValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *LabelValue) GetHasValue() bool { + if m != nil { + return m.HasValue + } + return false +} + +// A timestamped measurement. +type Point struct { + // The moment when this point was recorded. Inclusive. + // If not specified, the timestamp will be decided by the backend. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The actual point value. + // + // Types that are valid to be assigned to Value: + // *Point_Int64Value + // *Point_DoubleValue + // *Point_DistributionValue + // *Point_SummaryValue + Value isPoint_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{5} +} + +func (m *Point) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Point.Unmarshal(m, b) +} +func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Point.Marshal(b, m, deterministic) +} +func (m *Point) XXX_Merge(src proto.Message) { + xxx_messageInfo_Point.Merge(m, src) +} +func (m *Point) XXX_Size() int { + return xxx_messageInfo_Point.Size(m) +} +func (m *Point) XXX_DiscardUnknown() { + xxx_messageInfo_Point.DiscardUnknown(m) +} + +var xxx_messageInfo_Point proto.InternalMessageInfo + +func (m *Point) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type isPoint_Value interface { + isPoint_Value() +} + +type Point_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Point_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Point_DistributionValue struct { + DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +type Point_SummaryValue struct { + SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"` +} + +func (*Point_Int64Value) isPoint_Value() {} + +func (*Point_DoubleValue) isPoint_Value() {} + +func (*Point_DistributionValue) isPoint_Value() {} + +func (*Point_SummaryValue) isPoint_Value() {} + +func (m *Point) GetValue() isPoint_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Point) GetInt64Value() int64 { + if x, ok := m.GetValue().(*Point_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *Point) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*Point_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *Point) GetDistributionValue() *DistributionValue { + if x, ok := m.GetValue().(*Point_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +func (m *Point) GetSummaryValue() *SummaryValue { + if x, ok := m.GetValue().(*Point_SummaryValue); ok { + return x.SummaryValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Point) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Point_Int64Value)(nil), + (*Point_DoubleValue)(nil), + (*Point_DistributionValue)(nil), + (*Point_SummaryValue)(nil), + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type DistributionValue struct { + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // Don't change bucket boundaries within a TimeSeries if your backend doesn't + // support this. + // TODO(issue #152): consider not required to send bucket options for + // optimization. + BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue) Reset() { *m = DistributionValue{} } +func (m *DistributionValue) String() string { return proto.CompactTextString(m) } +func (*DistributionValue) ProtoMessage() {} +func (*DistributionValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6} +} + +func (m *DistributionValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue.Unmarshal(m, b) +} +func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic) +} +func (m *DistributionValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue.Merge(m, src) +} +func (m *DistributionValue) XXX_Size() int { + return xxx_messageInfo_DistributionValue.Size(m) +} +func (m *DistributionValue) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue proto.InternalMessageInfo + +func (m *DistributionValue) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DistributionValue) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket { + if m != nil { + return m.Buckets + } + return nil +} + +// A Distribution may optionally contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described by +// BucketOptions. +// +// If bucket_options has no type, then there is no histogram associated with +// the Distribution. +type DistributionValue_BucketOptions struct { + // Types that are valid to be assigned to Type: + // *DistributionValue_BucketOptions_Explicit_ + Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} } +func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions) ProtoMessage() {} +func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0} +} + +func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src) +} +func (m *DistributionValue_BucketOptions) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions.Size(m) +} +func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo + +type isDistributionValue_BucketOptions_Type interface { + isDistributionValue_BucketOptions_Type() +} + +type DistributionValue_BucketOptions_Explicit_ struct { + Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"` +} + +func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {} + +func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit { + if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok { + return x.Explicit + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*DistributionValue_BucketOptions_Explicit_)(nil), + } +} + +// Specifies a set of buckets with arbitrary upper-bounds. +// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket +// index i are: +// +// [0, bucket_bounds[i]) for i == 0 +// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 +// [bucket_bounds[i], +infinity) for i == N-1 +type DistributionValue_BucketOptions_Explicit struct { + // The values must be strictly increasing and > 0. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions_Explicit) Reset() { + *m = DistributionValue_BucketOptions_Explicit{} +} +func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {} +func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0} +} + +func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo + +func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +type DistributionValue_Bucket struct { + // The number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // If the distribution does not have a histogram, then omit this field. + Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} } +func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Bucket) ProtoMessage() {} +func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 1} +} + +func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b) +} +func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Bucket.Merge(m, src) +} +func (m *DistributionValue_Bucket) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Bucket.Size(m) +} +func (m *DistributionValue_Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo + +func (m *DistributionValue_Bucket) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// Exemplars are example points that may be used to annotate aggregated +// Distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket. +type DistributionValue_Exemplar struct { + // Value of the exemplar point. It determines which bucket the exemplar + // belongs to. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. + Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} } +func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Exemplar) ProtoMessage() {} +func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 2} +} + +func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b) +} +func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src) +} +func (m *DistributionValue_Exemplar) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Exemplar.Size(m) +} +func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo + +func (m *DistributionValue_Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *DistributionValue_Exemplar) GetAttachments() map[string]string { + if m != nil { + return m.Attachments + } + return nil +} + +// The start_timestamp only applies to the count and sum in the SummaryValue. +type SummaryValue struct { + // The total number of recorded values since start_time. Optional since + // some systems don't expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The total sum of recorded values since start_time. Optional since some + // systems don't expose this. If count is zero then this field must be zero. + // This field must be unset if the sum is not available. + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // Values calculated over an arbitrary time window. + Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue) Reset() { *m = SummaryValue{} } +func (m *SummaryValue) String() string { return proto.CompactTextString(m) } +func (*SummaryValue) ProtoMessage() {} +func (*SummaryValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7} +} + +func (m *SummaryValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue.Unmarshal(m, b) +} +func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) +} +func (m *SummaryValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue.Merge(m, src) +} +func (m *SummaryValue) XXX_Size() int { + return xxx_messageInfo_SummaryValue.Size(m) +} +func (m *SummaryValue) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue proto.InternalMessageInfo + +func (m *SummaryValue) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// The values in this message can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type SummaryValue_Snapshot struct { + // The number of values in the snapshot. Optional since some systems don't + // expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of values in the snapshot. Optional since some systems don't + // expose this. If count is zero then this field must be zero or not set + // (if not supported). + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // A list of values at different percentiles of the distribution calculated + // from the current snapshot. The percentiles must be strictly increasing. + PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} } +func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot) ProtoMessage() {} +func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0} +} + +func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src) +} +func (m *SummaryValue_Snapshot) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot.Size(m) +} +func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile { + if m != nil { + return m.PercentileValues + } + return nil +} + +// Represents the value at a given percentile of a distribution. +type SummaryValue_Snapshot_ValueAtPercentile struct { + // The percentile of a distribution. Must be in the interval + // (0.0, 100.0]. + Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"` + // The value at the given percentile of a distribution. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() { + *m = SummaryValue_Snapshot_ValueAtPercentile{} +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {} +func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0} +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 { + if m != nil { + return m.Percentile + } + return 0 +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value) + proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric") + proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor") + proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey") + proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries") + proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue") + proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point") + proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue") + proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions") + proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit") + proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket") + proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry") + proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue") + proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot") + proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile") +} + +func init() { + proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a) +} + +var fileDescriptor_0ee3deb72053811a = []byte{ + // 1118 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, + 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xd2, 0xf5, 0xa8, 0xed, 0xdf, 0x72, 0xfe, 0x0a, 0x61, + 0x11, 0x90, 0x0a, 0x65, 0xad, 0x98, 0xd2, 0x56, 0x15, 0x2a, 0x8a, 0x63, 0x37, 0x31, 0x24, 0xb1, + 0x35, 0xb6, 0x23, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x0d, 0xf8, + 0x05, 0x78, 0x02, 0xc4, 0x35, 0xb7, 0x88, 0xe7, 0xe0, 0x8a, 0x27, 0xe0, 0x15, 0xb8, 0x41, 0xbc, + 0x01, 0xda, 0x99, 0xd9, 0x8f, 0xc4, 0x60, 0xea, 0x22, 0x71, 0x77, 0xe6, 0xcc, 0x39, 0xbf, 0xfd, + 0x9d, 0xcf, 0x1d, 0x78, 0xe4, 0xf9, 0xc4, 0xb5, 0x88, 0x4b, 0x43, 0x5a, 0xf7, 0x03, 0x8f, 0x79, + 0x75, 0x87, 0xb0, 0xc0, 0xb6, 0x68, 0xfd, 0x66, 0x3f, 0x16, 0x0d, 0x7e, 0x81, 0xb6, 0x52, 0x53, + 0xa1, 0x31, 0xe2, 0xfb, 0x9b, 0xfd, 0xda, 0x3b, 0x97, 0x9e, 0x77, 0x39, 0x25, 0x02, 0x63, 0x1c, + 0x5e, 0xd4, 0x99, 0xed, 0x10, 0xca, 0x4c, 0xc7, 0x17, 0xb6, 0xb5, 0xed, 0xbb, 0x06, 0x5f, 0x07, + 0xa6, 0xef, 0x93, 0x40, 0x62, 0xd5, 0x3e, 0x9a, 0x23, 0x12, 0x10, 0xea, 0x85, 0x81, 0x45, 0x22, + 0x26, 0xb1, 0x2c, 0x8c, 0xf5, 0x3f, 0x14, 0x28, 0x9e, 0xf2, 0x8f, 0xa3, 0x57, 0x50, 0x11, 0x34, + 0x46, 0x13, 0x42, 0xad, 0xc0, 0xf6, 0x99, 0x17, 0x54, 0x95, 0x1d, 0x65, 0x57, 0x6d, 0xec, 0x19, + 0x0b, 0x18, 0x1b, 0xc2, 0xbf, 0x95, 0x38, 0x61, 0xcd, 0xb9, 0xa3, 0x41, 0x47, 0x00, 0x3c, 0x0c, + 0x12, 0xd8, 0x84, 0x56, 0x73, 0x3b, 0xf9, 0x5d, 0xb5, 0xf1, 0xe1, 0x42, 0xd0, 0x81, 0xed, 0x90, + 0x3e, 0x37, 0xc7, 0x19, 0x57, 0xd4, 0x84, 0x52, 0x1c, 0x41, 0x35, 0xcf, 0xb9, 0x7d, 0x30, 0x0f, + 0x93, 0xc4, 0x78, 0xb3, 0x6f, 0x60, 0x29, 0xe3, 0xc4, 0x4f, 0xff, 0x3e, 0x0f, 0xda, 0x5d, 0xce, + 0x08, 0x41, 0xc1, 0x35, 0x1d, 0xc2, 0x03, 0x5e, 0xc7, 0x5c, 0x46, 0x3b, 0xa0, 0xc6, 0xa9, 0xb0, + 0x3d, 0xb7, 0x9a, 0xe3, 0x57, 0x59, 0x55, 0xe4, 0x15, 0xba, 0x36, 0xe3, 0x54, 0xd6, 0x31, 0x97, + 0xd1, 0x4b, 0x28, 0xb0, 0x99, 0x4f, 0xaa, 0x85, 0x1d, 0x65, 0x77, 0xb3, 0xd1, 0x58, 0x2a, 0x75, + 0xc6, 0x60, 0xe6, 0x13, 0xcc, 0xfd, 0x51, 0x0b, 0x60, 0x6a, 0x8e, 0xc9, 0x74, 0x74, 0x4d, 0x66, + 0xb4, 0xba, 0xca, 0x73, 0xf6, 0xfe, 0x42, 0xb4, 0x93, 0xc8, 0xfc, 0x0b, 0x32, 0xc3, 0xeb, 0x53, + 0x29, 0x51, 0xfd, 0x47, 0x05, 0x0a, 0x11, 0x28, 0xba, 0x07, 0xea, 0xf0, 0xac, 0xdf, 0x6b, 0x1f, + 0x76, 0x5e, 0x76, 0xda, 0x2d, 0x6d, 0x25, 0x52, 0x1c, 0x1d, 0x0c, 0x8f, 0xda, 0xa3, 0xce, 0xd9, + 0xe0, 0xc9, 0x63, 0x4d, 0x41, 0x1a, 0x94, 0x85, 0xa2, 0xd5, 0x1d, 0x36, 0x4f, 0xda, 0x5a, 0x0e, + 0x3d, 0x04, 0x24, 0x35, 0x9d, 0xfe, 0x00, 0x77, 0x9a, 0xc3, 0x41, 0xa7, 0x7b, 0xa6, 0xe5, 0xd1, + 0x7d, 0xd0, 0x0e, 0x87, 0xa7, 0xc3, 0x93, 0x83, 0x41, 0xe7, 0x3c, 0xf6, 0x2f, 0xa0, 0x07, 0x50, + 0xc9, 0x68, 0x25, 0xc8, 0x2a, 0xda, 0x82, 0xff, 0x65, 0xd5, 0x59, 0xa4, 0x22, 0x52, 0x61, 0xad, + 0x3f, 0x3c, 0x3d, 0x3d, 0xc0, 0x5f, 0x6a, 0x6b, 0xfa, 0x0b, 0x28, 0xc5, 0x21, 0x20, 0x0d, 0xf2, + 0xd7, 0x64, 0x26, 0xcb, 0x11, 0x89, 0xff, 0x5c, 0x0d, 0xfd, 0x57, 0x05, 0x20, 0xed, 0x1b, 0x74, + 0x08, 0xf7, 0x28, 0x33, 0x03, 0x36, 0x4a, 0x26, 0x48, 0xb6, 0x73, 0xcd, 0x10, 0x23, 0x64, 0xc4, + 0x23, 0xc4, 0xbb, 0x8d, 0x5b, 0xe0, 0x4d, 0xee, 0x92, 0x9c, 0xd1, 0xe7, 0x50, 0x16, 0x55, 0xb8, + 0x31, 0xa7, 0xe1, 0x1b, 0xf6, 0x2e, 0x0f, 0xe2, 0x3c, 0xb2, 0xc7, 0xea, 0x34, 0x91, 0x29, 0x7a, + 0x0e, 0x45, 0xdf, 0xb3, 0x5d, 0x46, 0xab, 0x79, 0x8e, 0xa2, 0x2f, 0x44, 0xe9, 0x45, 0xa6, 0x58, + 0x7a, 0xe8, 0x9f, 0x01, 0xa4, 0xb0, 0xe8, 0x3e, 0xac, 0x72, 0x3e, 0x32, 0x3f, 0xe2, 0x80, 0xb6, + 0x60, 0xfd, 0xca, 0xa4, 0x82, 0x29, 0xcf, 0x4f, 0x09, 0x97, 0xae, 0x4c, 0xca, 0x5d, 0xf4, 0x9f, + 0x73, 0xb0, 0xca, 0x21, 0xd1, 0x33, 0x58, 0x5f, 0x26, 0x23, 0xa9, 0x31, 0x7a, 0x17, 0x54, 0xdb, + 0x65, 0x4f, 0x1e, 0x67, 0x3e, 0x91, 0x3f, 0x5e, 0xc1, 0xc0, 0x95, 0x82, 0xd9, 0x7b, 0x50, 0x9e, + 0x78, 0xe1, 0x78, 0x4a, 0xa4, 0x4d, 0x34, 0x19, 0xca, 0xf1, 0x0a, 0x56, 0x85, 0x56, 0x18, 0x8d, + 0x00, 0x4d, 0x6c, 0xca, 0x02, 0x7b, 0x1c, 0x46, 0x85, 0x93, 0xa6, 0x05, 0x4e, 0xc5, 0x58, 0x98, + 0x94, 0x56, 0xc6, 0x8d, 0x63, 0x1d, 0xaf, 0xe0, 0xca, 0xe4, 0xae, 0x12, 0xf5, 0x60, 0x83, 0x86, + 0x8e, 0x63, 0x06, 0x33, 0x89, 0xbd, 0xca, 0xb1, 0x1f, 0x2d, 0xc4, 0xee, 0x0b, 0x8f, 0x18, 0xb6, + 0x4c, 0x33, 0xe7, 0xe6, 0x9a, 0xcc, 0xb8, 0xfe, 0x4b, 0x11, 0x2a, 0x73, 0x2c, 0xa2, 0x82, 0x58, + 0x5e, 0xe8, 0x32, 0x9e, 0xcf, 0x3c, 0x16, 0x87, 0xa8, 0x89, 0x69, 0xe8, 0xf0, 0x3c, 0x29, 0x38, + 0x12, 0xd1, 0x53, 0xa8, 0xd2, 0xd0, 0x19, 0x79, 0x17, 0x23, 0xfa, 0x3a, 0x34, 0x03, 0x32, 0x19, + 0x4d, 0xc8, 0x8d, 0x6d, 0xf2, 0x8e, 0xe6, 0xa9, 0xc2, 0x0f, 0x68, 0xe8, 0x74, 0x2f, 0xfa, 0xe2, + 0xb6, 0x15, 0x5f, 0x22, 0x0b, 0x36, 0xc7, 0xa1, 0x75, 0x4d, 0xd8, 0xc8, 0xe3, 0xcd, 0x4e, 0x65, + 0xba, 0x3e, 0x5d, 0x2e, 0x5d, 0x46, 0x93, 0x83, 0x74, 0x05, 0x06, 0xde, 0x18, 0x67, 0x8f, 0xa8, + 0x0b, 0x6b, 0x42, 0x11, 0xef, 0x9b, 0x4f, 0xde, 0x0a, 0x1d, 0xc7, 0x28, 0xb5, 0x1f, 0x14, 0xd8, + 0xb8, 0xf5, 0x45, 0x64, 0x41, 0x89, 0x7c, 0xe3, 0x4f, 0x6d, 0xcb, 0x66, 0xb2, 0xf7, 0xda, 0xff, + 0x26, 0x02, 0xa3, 0x2d, 0xc1, 0x8e, 0x57, 0x70, 0x02, 0x5c, 0xd3, 0xa1, 0x14, 0xeb, 0xd1, 0x43, + 0x28, 0x8e, 0xbd, 0xd0, 0x9d, 0xd0, 0xaa, 0xb2, 0x93, 0xdf, 0x55, 0xb0, 0x3c, 0x35, 0x8b, 0x62, + 0x4d, 0xd7, 0x28, 0x14, 0x05, 0xe2, 0xdf, 0xd4, 0xb0, 0x1f, 0x11, 0x26, 0x8e, 0x3f, 0x35, 0x03, + 0x5e, 0x48, 0xb5, 0xf1, 0x74, 0x49, 0xc2, 0x6d, 0xe9, 0x8e, 0x13, 0xa0, 0xda, 0xb7, 0xb9, 0x88, + 0xa1, 0x38, 0xdc, 0x1e, 0x66, 0x25, 0x1e, 0xe6, 0x5b, 0x53, 0x9a, 0x5b, 0x66, 0x4a, 0xbf, 0x02, + 0xd5, 0x64, 0xcc, 0xb4, 0xae, 0x1c, 0x92, 0xee, 0x9a, 0xe3, 0xb7, 0x24, 0x6d, 0x1c, 0xa4, 0x50, + 0x6d, 0x97, 0x05, 0x33, 0x9c, 0x05, 0xaf, 0xbd, 0x00, 0xed, 0xae, 0xc1, 0x5f, 0xac, 0xee, 0x24, + 0xc2, 0x5c, 0x66, 0x5d, 0x3d, 0xcf, 0x3d, 0x53, 0xf4, 0xdf, 0xf3, 0x50, 0xce, 0xce, 0x1d, 0xda, + 0xcf, 0x16, 0x41, 0x6d, 0x6c, 0xcd, 0x85, 0xdc, 0x49, 0x76, 0x4d, 0x5c, 0x21, 0x23, 0x9d, 0x32, + 0xb5, 0xf1, 0xff, 0x39, 0x87, 0x56, 0xba, 0x78, 0xc4, 0x0c, 0x9e, 0x41, 0x89, 0xba, 0xa6, 0x4f, + 0xaf, 0x3c, 0x26, 0xdf, 0x10, 0x8d, 0x37, 0xde, 0x0b, 0x46, 0x5f, 0x7a, 0xe2, 0x04, 0xa3, 0xf6, + 0x53, 0x0e, 0x4a, 0xb1, 0xfa, 0xbf, 0xe0, 0xff, 0x1a, 0x2a, 0x3e, 0x09, 0x2c, 0xe2, 0x32, 0x3b, + 0x5e, 0xb3, 0x71, 0x95, 0x5b, 0xcb, 0x07, 0x62, 0xf0, 0xe3, 0x01, 0xeb, 0x25, 0x90, 0x58, 0x4b, + 0xe1, 0xc5, 0x9f, 0xab, 0xd6, 0x81, 0xca, 0x9c, 0x19, 0xda, 0x06, 0x48, 0x0d, 0x65, 0xf3, 0x66, + 0x34, 0xb7, 0xab, 0x1e, 0xf7, 0x75, 0xf3, 0x3b, 0x05, 0xb6, 0x6d, 0x6f, 0x11, 0xcf, 0x66, 0x59, + 0x3c, 0x8b, 0x68, 0x2f, 0xba, 0xe8, 0x29, 0xaf, 0x5a, 0x97, 0x36, 0xbb, 0x0a, 0xc7, 0x86, 0xe5, + 0x39, 0x75, 0xe1, 0xb3, 0x67, 0xbb, 0x94, 0x05, 0x61, 0xd4, 0x74, 0x7c, 0x3d, 0xd6, 0x53, 0xb8, + 0x3d, 0xf1, 0xe6, 0xbd, 0x24, 0xee, 0xde, 0x65, 0xf6, 0x0d, 0xfe, 0x5b, 0x6e, 0xab, 0xeb, 0x13, + 0xf7, 0x50, 0x7c, 0x93, 0x43, 0xcb, 0xe7, 0x17, 0x35, 0xce, 0xf7, 0xc7, 0x45, 0xee, 0xf6, 0xf1, + 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xd0, 0xb4, 0x8d, 0xc7, 0x0b, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go new file mode 100644 index 00000000..5dba6a2a --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Type identifier for the resource. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Set of labels that describe the resource. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_584700775a2fc762, []int{0} +} + +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Resource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") +} + +func init() { + proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) +} + +var fileDescriptor_584700775a2fc762 = []byte{ + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, + 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, + 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, + 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, + 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, + 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, + 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, + 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, + 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, + 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0x69, 0x06, 0x23, 0x97, 0x7c, 0x66, 0x3e, + 0x5e, 0xbb, 0x9d, 0x78, 0x61, 0x96, 0x07, 0x80, 0xa4, 0x02, 0x18, 0xa3, 0x5c, 0xd3, 0x33, 0x4b, + 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x21, 0xba, 0x74, 0x33, 0xf3, 0x8a, 0x4b, 0x8a, + 0x4a, 0x73, 0x53, 0xf3, 0x4a, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0xf4, 0x11, 0x06, 0xea, 0x42, 0x42, + 0x32, 0x3d, 0x35, 0x4f, 0x37, 0x1d, 0x25, 0x40, 0x5f, 0x31, 0xc9, 0xf8, 0x17, 0xa4, 0xe6, 0x39, + 0x43, 0xac, 0x05, 0x9b, 0x8d, 0xf0, 0x66, 0x98, 0x61, 0x12, 0x1b, 0x58, 0xa3, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0xcf, 0x32, 0xff, 0x46, 0x96, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go new file mode 100644 index 00000000..2f4ab19b --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go @@ -0,0 +1,1553 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SERVER Span_SpanKind = 1 + // Indicates that the span covers the client-side wrapper around an RPC or + // other remote request. + Span_CLIENT Span_SpanKind = 2 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SERVER", + 2: "CLIENT", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SERVER": 1, + "CLIENT": 2, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +// Indicates whether the message was sent or received. +type Span_TimeEvent_MessageEvent_Type int32 + +const ( + // Unknown event type. + Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 + // Indicates a sent message. + Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 + // Indicates a received message. + Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 +) + +var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "SENT", + 2: "RECEIVED", +} + +var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "SENT": 1, + "RECEIVED": 2, +} + +func (x Span_TimeEvent_MessageEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) +} + +func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0} +} + +// The relationship of the current span relative to the linked span: child, +// parent, or unspecified. +type Span_Link_Type int32 + +const ( + // The relationship of the two spans is unknown, or known but other + // than parent-child. + Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 + // The linked span is a child of the current span. + Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 + // The linked span is a parent of the current span. + Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CHILD_LINKED_SPAN", + 2: "PARENT_LINKED_SPAN", +} + +var Span_Link_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CHILD_LINKED_SPAN": 1, + "PARENT_LINKED_SPAN": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} + +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0} +} + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace. And form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next id is 17. +// TODO(bdrutu): Add an example. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The Tracestate on the span. + Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // When null or empty string received - receiver may use string "name" + // as a replacement. There might be smarted algorithms implemented by + // receiver to fix the empty span name. + // + // This field is required. + Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to the value of end_time field if it was + // set. Or to the current time if neither was set. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to start_time value. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // A set of attributes on the span. + Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` + // A stack trace captured at the start of the span. + StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + // The included time events. + TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` + // The included links. + Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` + // An optional final status for this span. Semantically when Status + // wasn't set it is means span ended without errors and assume + // Status.Ok (code = 0). + Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` + // An optional resource that is associated with this span. If not set, this span + // should be part of a batch that does include the resource information, unless resource + // information is unknown. + Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"` + // A highly recommended but not required flag that identifies when a + // trace crosses a process boundary. True when the parent_span belongs + // to the same process as the current span. This flag is most commonly + // used to indicate the need to adjust time as clocks in different + // processes may not be synchronized. + SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + // An optional number of child spans that were generated while this span + // was active. If set, allows an implementation to detect missing child spans. + ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0} +} + +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span) GetTracestate() *Span_Tracestate { + if m != nil { + return m.Tracestate + } + return nil +} + +func (m *Span) GetParentSpanId() []byte { + if m != nil { + return m.ParentSpanId + } + return nil +} + +func (m *Span) GetName() *TruncatableString { + if m != nil { + return m.Name + } + return nil +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Span) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Span) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() *Span_TimeEvents { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() *Span_Links { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { + if m != nil { + return m.SameProcessAsParentSpan + } + return nil +} + +func (m *Span) GetChildSpanCount() *wrappers.UInt32Value { + if m != nil { + return m.ChildSpanCount + } + return nil +} + +// This field conveys information about request position in multiple distributed tracing graphs. +// It is a list of Tracestate.Entry with a maximum of 32 members in the list. +// +// See the https://github.com/w3c/distributed-tracing for more details about this field. +type Span_Tracestate struct { + // A list of entries that represent the Tracestate. + Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} } +func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate) ProtoMessage() {} +func (*Span_Tracestate) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b) +} +func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate.Merge(m, src) +} +func (m *Span_Tracestate) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate.Size(m) +} +func (m *Span_Tracestate) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo + +func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type Span_Tracestate_Entry struct { + // The key must begin with a lowercase letter, and can only contain + // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes + // '-', asterisks '*', and forward slashes '/'. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value is opaque string up to 256 characters printable ASCII + // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. + // Note that this also excludes tabs, newlines, carriage returns, etc. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} } +func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate_Entry) ProtoMessage() {} +func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0} +} + +func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b) +} +func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src) +} +func (m *Span_Tracestate_Entry) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate_Entry.Size(m) +} +func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo + +func (m *Span_Tracestate_Entry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Span_Tracestate_Entry) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// A set of attributes, each with a key and a value. +type Span_Attributes struct { + // The set of attributes. The value can be a string, an integer, a double + // or the Boolean values `true` or `false`. Note, global attributes like + // server name can be set as tags using resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0, then no attributes were dropped. + DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } +func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } +func (*Span_Attributes) ProtoMessage() {} +func (*Span_Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 1} +} + +func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) +} +func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) +} +func (m *Span_Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Attributes.Merge(m, src) +} +func (m *Span_Attributes) XXX_Size() int { + return xxx_messageInfo_Span_Attributes.Size(m) +} +func (m *Span_Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo + +func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { + if m != nil { + return m.AttributeMap + } + return nil +} + +func (m *Span_Attributes) GetDroppedAttributesCount() int32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A time-stamped annotation or message event in the Span. +type Span_TimeEvent struct { + // The time the event occurred. + Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + // + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_MessageEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2} +} + +func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent.Merge(m, src) +} +func (m *Span_TimeEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent.Size(m) +} +func (m *Span_TimeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` +} + +type Span_TimeEvent_MessageEvent_ struct { + MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} + +func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { + return x.MessageEvent + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_MessageEvent_)(nil), + } +} + +// A text annotation with a set of attributes. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. + Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // A set of attributes on the annotation. + Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0} +} + +func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) +} +func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) +} +func (m *Span_TimeEvent_Annotation) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) +} +func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo + +func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { + if m != nil { + return m.Description + } + return nil +} + +func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// An event describing a message sent/received between Spans. +type Span_TimeEvent_MessageEvent struct { + // The type of MessageEvent. Indicates whether the message was sent or + // received. + Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. For example, this field could + // represent a sequence ID for a streaming RPC. It is recommended to be + // unique within a Span. + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // The number of uncompressed bytes sent or received. + UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"` + // The number of compressed bytes sent or received. If zero, assumed to + // be the same size as uncompressed. + CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } +func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} +func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1} +} + +func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) +} +func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED +} + +func (m *Span_TimeEvent_MessageEvent) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 { + if m != nil { + return m.UncompressedSize + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 { + if m != nil { + return m.CompressedSize + } + return 0 +} + +// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +// on the span, consisting of either user-supplied key-value pairs, or +// details of a message sent/received between Spans. +type Span_TimeEvents struct { + // A collection of `TimeEvent`s. + TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } +func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvents) ProtoMessage() {} +func (*Span_TimeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 3} +} + +func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) +} +func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvents.Merge(m, src) +} +func (m *Span_TimeEvents) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvents.Size(m) +} +func (m *Span_TimeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo + +func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { + if m != nil { + return m.TimeEvent + } + return nil +} + +func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { + if m != nil { + return m.DroppedAnnotationsCount + } + return 0 +} + +func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { + if m != nil { + return m.DroppedMessageEventsCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The relationship of the current span relative to the linked span. + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` + // A set of attributes on the link. + Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + // The Tracestate associated with the link. + Tracestate *Span_Tracestate `protobuf:"bytes,5,opt,name=tracestate,proto3" json:"tracestate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4} +} + +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span_Link) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_TYPE_UNSPECIFIED +} + +func (m *Span_Link) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Link) GetTracestate() *Span_Tracestate { + if m != nil { + return m.Tracestate + } + return nil +} + +// A collection of links, which are references from this span to a span +// in the same or different trace. +type Span_Links struct { + // A collection of links. + Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Links) Reset() { *m = Span_Links{} } +func (m *Span_Links) String() string { return proto.CompactTextString(m) } +func (*Span_Links) ProtoMessage() {} +func (*Span_Links) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 5} +} + +func (m *Span_Links) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Links.Unmarshal(m, b) +} +func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) +} +func (m *Span_Links) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Links.Merge(m, src) +} +func (m *Span_Links) XXX_Size() int { + return xxx_messageInfo_Span_Links.Size(m) +} +func (m *Span_Links) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Links.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Links proto.InternalMessageInfo + +func (m *Span_Links) GetLink() []*Span_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Span_Links) GetDroppedLinksCount() int32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. This proto's fields +// are a subset of those of +// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), +// which is used by [gRPC](https://github.com/grpc). +type Status struct { + // The status code. This is optional field. It is safe to assume 0 (OK) + // when not set. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{1} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// The value of an Attribute. +type AttributeValue struct { + // The type of the value. + // + // Types that are valid to be assigned to Value: + // *AttributeValue_StringValue + // *AttributeValue_IntValue + // *AttributeValue_BoolValue + // *AttributeValue_DoubleValue + Value isAttributeValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributeValue) Reset() { *m = AttributeValue{} } +func (m *AttributeValue) String() string { return proto.CompactTextString(m) } +func (*AttributeValue) ProtoMessage() {} +func (*AttributeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{2} +} + +func (m *AttributeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributeValue.Unmarshal(m, b) +} +func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) +} +func (m *AttributeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeValue.Merge(m, src) +} +func (m *AttributeValue) XXX_Size() int { + return xxx_messageInfo_AttributeValue.Size(m) +} +func (m *AttributeValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeValue proto.InternalMessageInfo + +type isAttributeValue_Value interface { + isAttributeValue_Value() +} + +type AttributeValue_StringValue struct { + StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AttributeValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AttributeValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AttributeValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +func (*AttributeValue_StringValue) isAttributeValue_Value() {} + +func (*AttributeValue_IntValue) isAttributeValue_Value() {} + +func (*AttributeValue_BoolValue) isAttributeValue_Value() {} + +func (*AttributeValue_DoubleValue) isAttributeValue_Value() {} + +func (m *AttributeValue) GetValue() isAttributeValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttributeValue) GetStringValue() *TruncatableString { + if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { + return x.StringValue + } + return nil +} + +func (m *AttributeValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AttributeValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *AttributeValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttributeValue_StringValue)(nil), + (*AttributeValue_IntValue)(nil), + (*AttributeValue_BoolValue)(nil), + (*AttributeValue_DoubleValue)(nil), + } +} + +// The call stack which originated this span. +type StackTrace struct { + // Stack frames in this stack trace. + StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both + // `stack_frames` and a value in `stack_trace_hash_id`. + // + // Subsequent spans within the same request can refer + // to that stack trace by setting only `stack_trace_hash_id`. + // + // TODO: describe how to deal with the case where stack_trace_hash_id is + // zero because it was not set. + StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3} +} + +func (m *StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace.Unmarshal(m, b) +} +func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) +} +func (m *StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace.Merge(m, src) +} +func (m *StackTrace) XXX_Size() int { + return xxx_messageInfo_StackTrace.Size(m) +} +func (m *StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace proto.InternalMessageInfo + +func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() uint64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// A single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame. + FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully qualified. + OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` + // The name of the source file where the function call appears. + FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + // The line number in `file_name` where the function call appears. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` + // The binary module from where the code was loaded. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` + // The version of the deployed source code. + SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 0} +} + +func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) +} +func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) +} +func (m *StackTrace_StackFrame) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrame.Size(m) +} +func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo + +func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { + if m != nil { + return m.FunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { + if m != nil { + return m.OriginalFunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { + if m != nil { + return m.FileName + } + return nil +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { + if m != nil { + return m.SourceVersion + } + return nil +} + +// A collection of stack frames, which can be truncated. +type StackTrace_StackFrames struct { + // Stack frames in this call stack. + Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } +func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrames) ProtoMessage() {} +func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 1} +} + +func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) +} +func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) +} +func (m *StackTrace_StackFrames) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrames.Size(m) +} +func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo + +func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { + if m != nil { + return m.Frame + } + return nil +} + +func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { + if m != nil { + return m.DroppedFramesCount + } + return 0 +} + +// A description of a binary module. +type Module struct { + // TODO: document the meaning of this field. + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so. + Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // A unique identifier for the module, usually a hash of its + // contents. + BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{4} +} + +func (m *Module) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Module.Unmarshal(m, b) +} +func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Module.Marshal(b, m, deterministic) +} +func (m *Module) XXX_Merge(src proto.Message) { + xxx_messageInfo_Module.Merge(m, src) +} +func (m *Module) XXX_Size() int { + return xxx_messageInfo_Module.Size(m) +} +func (m *Module) XXX_DiscardUnknown() { + xxx_messageInfo_Module.DiscardUnknown(m) +} + +var xxx_messageInfo_Module proto.InternalMessageInfo + +func (m *Module) GetModule() *TruncatableString { + if m != nil { + return m.Module + } + return nil +} + +func (m *Module) GetBuildId() *TruncatableString { + if m != nil { + return m.BuildId + } + return nil +} + +// A string that might be shortened to a specified length. +type TruncatableString struct { + // The shortened string. For example, if the original string was 500 bytes long and + // the limit of the string was 128 bytes, then this value contains the first 128 + // bytes of the 500-byte string. Note that truncation always happens on a + // character boundary, to ensure that a truncated string is still valid UTF-8. + // Because it may contain multi-byte characters, the size of the truncated string + // may be less than the truncation limit. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TruncatableString) Reset() { *m = TruncatableString{} } +func (m *TruncatableString) String() string { return proto.CompactTextString(m) } +func (*TruncatableString) ProtoMessage() {} +func (*TruncatableString) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{5} +} + +func (m *TruncatableString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TruncatableString.Unmarshal(m, b) +} +func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) +} +func (m *TruncatableString) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncatableString.Merge(m, src) +} +func (m *TruncatableString) XXX_Size() int { + return xxx_messageInfo_TruncatableString.Size(m) +} +func (m *TruncatableString) XXX_DiscardUnknown() { + xxx_messageInfo_TruncatableString.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncatableString proto.InternalMessageInfo + +func (m *TruncatableString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *TruncatableString) GetTruncatedByteCount() int32 { + if m != nil { + return m.TruncatedByteCount + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) + proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span") + proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate") + proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry") + proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes") + proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry") + proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent") + proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents") + proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link") + proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links") + proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status") + proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue") + proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame") + proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames") + proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module") + proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584) +} + +var fileDescriptor_8ea38bbb821bf584 = []byte{ + // 1581 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xdb, 0x6e, 0x1b, 0x41, + 0x19, 0xce, 0xfa, 0xec, 0xdf, 0x8e, 0xeb, 0x4c, 0xd3, 0x74, 0x63, 0x0a, 0x0d, 0x6e, 0x0b, 0x29, + 0x25, 0x9b, 0x26, 0x2d, 0x55, 0x8f, 0x2a, 0x71, 0xe2, 0x60, 0x37, 0xa9, 0xeb, 0x8e, 0xdd, 0x88, + 0x83, 0xd0, 0x6a, 0xed, 0x9d, 0x38, 0x4b, 0xec, 0xd9, 0x65, 0x77, 0x36, 0x28, 0x7d, 0x01, 0x84, + 0xe0, 0x86, 0x0b, 0xc4, 0x0b, 0x70, 0xc1, 0xeb, 0x20, 0xee, 0x79, 0x00, 0x24, 0x9e, 0x80, 0x1b, + 0x34, 0x33, 0x7b, 0x72, 0xd2, 0x26, 0xc6, 0xbd, 0xb1, 0xe6, 0xf0, 0x7f, 0xdf, 0x3f, 0xff, 0xcc, + 0x7f, 0x5a, 0xc3, 0x03, 0xdb, 0x21, 0x74, 0x48, 0xa8, 0xe7, 0x7b, 0x9b, 0x8e, 0x6b, 0x33, 0x7b, + 0x93, 0xb9, 0xc6, 0x90, 0x6c, 0x9e, 0x6d, 0xc9, 0x81, 0x26, 0x16, 0xd1, 0x6a, 0x2c, 0x26, 0x57, + 0x34, 0xb9, 0x7b, 0xb6, 0x55, 0x7b, 0x74, 0x89, 0xc1, 0x25, 0x9e, 0xed, 0xbb, 0x92, 0x24, 0x1c, + 0x4b, 0x54, 0xed, 0xee, 0xc8, 0xb6, 0x47, 0x63, 0x22, 0x05, 0x07, 0xfe, 0xf1, 0x26, 0xb3, 0x26, + 0xc4, 0x63, 0xc6, 0xc4, 0x09, 0x04, 0xbe, 0x77, 0x51, 0xe0, 0x77, 0xae, 0xe1, 0x38, 0xc4, 0x0d, + 0xd4, 0xd6, 0xff, 0xbc, 0x02, 0x99, 0x9e, 0x63, 0x50, 0xb4, 0x0a, 0x05, 0x71, 0x04, 0xdd, 0x32, + 0x55, 0x65, 0x4d, 0x59, 0x2f, 0xe3, 0xbc, 0x98, 0xb7, 0x4d, 0x74, 0x1b, 0xf2, 0x9e, 0x63, 0x50, + 0xbe, 0x93, 0x12, 0x3b, 0x39, 0x3e, 0x6d, 0x9b, 0xe8, 0x1d, 0x80, 0x90, 0xf1, 0x98, 0xc1, 0x88, + 0x7a, 0x63, 0x4d, 0x59, 0x2f, 0x6d, 0xff, 0x48, 0xfb, 0xaa, 0x69, 0x1a, 0x57, 0xa4, 0xf5, 0x23, + 0x04, 0x4e, 0xa0, 0xd1, 0x7d, 0xa8, 0x38, 0x86, 0x4b, 0x28, 0xd3, 0x43, 0x5d, 0x69, 0xa1, 0xab, + 0x2c, 0x57, 0x7b, 0x52, 0xe3, 0x4f, 0x21, 0x43, 0x8d, 0x09, 0x51, 0x33, 0x42, 0xd7, 0x8f, 0xaf, + 0xd0, 0xd5, 0x77, 0x7d, 0x3a, 0x34, 0x98, 0x31, 0x18, 0x93, 0x1e, 0x73, 0x2d, 0x3a, 0xc2, 0x02, + 0x89, 0x5e, 0x43, 0xe6, 0xd4, 0xa2, 0xa6, 0x5a, 0x59, 0x53, 0xd6, 0x2b, 0xdb, 0xeb, 0xd7, 0x9d, + 0x96, 0xff, 0x1c, 0x58, 0xd4, 0xc4, 0x02, 0x85, 0x5e, 0x00, 0x78, 0xcc, 0x70, 0x99, 0xce, 0xef, + 0x59, 0xcd, 0x8a, 0x53, 0xd4, 0x34, 0x79, 0xc7, 0x5a, 0x78, 0xc7, 0x5a, 0x3f, 0x7c, 0x04, 0x5c, + 0x14, 0xd2, 0x7c, 0x8e, 0x7e, 0x02, 0x05, 0x42, 0x4d, 0x09, 0xcc, 0x5d, 0x0b, 0xcc, 0x13, 0x6a, + 0x0a, 0xd8, 0x3b, 0x00, 0x83, 0x31, 0xd7, 0x1a, 0xf8, 0x8c, 0x78, 0x6a, 0x7e, 0xb6, 0x3b, 0xde, + 0x89, 0x10, 0x38, 0x81, 0x46, 0xfb, 0x50, 0xf2, 0x98, 0x31, 0x3c, 0xd5, 0x85, 0xb4, 0x5a, 0x10, + 0x64, 0x0f, 0xae, 0x22, 0xe3, 0xd2, 0xe2, 0xc1, 0x30, 0x78, 0xd1, 0x18, 0x1d, 0x40, 0x89, 0x9b, + 0xa1, 0x93, 0x33, 0x42, 0x99, 0xa7, 0x16, 0x67, 0x7c, 0x78, 0x6b, 0x42, 0x9a, 0x02, 0x81, 0x81, + 0x45, 0x63, 0xf4, 0x0a, 0xb2, 0x63, 0x8b, 0x9e, 0x7a, 0x2a, 0x5c, 0x7f, 0x1c, 0x4e, 0x73, 0xc8, + 0x85, 0xb1, 0xc4, 0xa0, 0x17, 0x90, 0xe3, 0xee, 0xe3, 0x7b, 0x6a, 0x49, 0xa0, 0xbf, 0x7f, 0xb5, + 0x31, 0xcc, 0xf7, 0x70, 0x00, 0x40, 0x0d, 0x28, 0x84, 0xc1, 0xa4, 0x56, 0x05, 0xf8, 0x07, 0x97, + 0xc1, 0x51, 0xb8, 0x9d, 0x6d, 0x69, 0x38, 0x18, 0xe3, 0x08, 0x87, 0x7e, 0x0e, 0xdf, 0xf1, 0x8c, + 0x09, 0xd1, 0x1d, 0xd7, 0x1e, 0x12, 0xcf, 0xd3, 0x0d, 0x4f, 0x4f, 0x38, 0xb1, 0x5a, 0xfe, 0xca, + 0x33, 0x37, 0x6c, 0x7b, 0x7c, 0x64, 0x8c, 0x7d, 0x82, 0x6f, 0x73, 0x78, 0x57, 0xa2, 0x77, 0xbc, + 0x6e, 0xe4, 0xea, 0x68, 0x1f, 0xaa, 0xc3, 0x13, 0x6b, 0x6c, 0xca, 0x68, 0x18, 0xda, 0x3e, 0x65, + 0xea, 0xa2, 0xa0, 0xbb, 0x73, 0x89, 0xee, 0x53, 0x9b, 0xb2, 0x27, 0xdb, 0x92, 0xb0, 0x22, 0x50, + 0x9c, 0x62, 0x97, 0x63, 0x6a, 0x7f, 0x50, 0x00, 0xe2, 0x88, 0x43, 0xef, 0x20, 0x4f, 0x28, 0x73, + 0x2d, 0xe2, 0xa9, 0xca, 0x5a, 0x7a, 0xbd, 0xb4, 0xfd, 0x78, 0xf6, 0x70, 0xd5, 0x9a, 0x94, 0xb9, + 0xe7, 0x38, 0x24, 0xa8, 0x6d, 0x42, 0x56, 0xac, 0xa0, 0x2a, 0xa4, 0x4f, 0xc9, 0xb9, 0xc8, 0x1a, + 0x45, 0xcc, 0x87, 0x68, 0x19, 0xb2, 0x67, 0xfc, 0x38, 0x22, 0x5f, 0x14, 0xb1, 0x9c, 0xd4, 0xfe, + 0x92, 0x02, 0x88, 0x3d, 0x13, 0x19, 0xb0, 0x18, 0xf9, 0xa6, 0x3e, 0x31, 0x9c, 0xe0, 0x44, 0xaf, + 0x67, 0x77, 0xee, 0x78, 0xf8, 0xde, 0x70, 0xe4, 0xe9, 0xca, 0x46, 0x62, 0x09, 0x3d, 0x07, 0xd5, + 0x74, 0x6d, 0xc7, 0x21, 0xa6, 0x1e, 0x87, 0x41, 0x70, 0x9b, 0xfc, 0x68, 0x59, 0xbc, 0x12, 0xec, + 0xc7, 0xa4, 0xf2, 0xde, 0x7e, 0x03, 0x4b, 0x97, 0xc8, 0xbf, 0x60, 0xe8, 0xdb, 0xa4, 0xa1, 0xa5, + 0xed, 0x87, 0x57, 0x9c, 0x3d, 0xa2, 0x93, 0x0f, 0x25, 0x71, 0x2f, 0x53, 0xcf, 0x95, 0xda, 0xdf, + 0xb2, 0x50, 0x8c, 0x82, 0x03, 0x69, 0x90, 0x11, 0x39, 0x42, 0xb9, 0x36, 0x47, 0x08, 0x39, 0x74, + 0x04, 0x60, 0x50, 0x6a, 0x33, 0x83, 0x59, 0x36, 0x0d, 0xce, 0xf1, 0x74, 0xe6, 0x58, 0xd4, 0x76, + 0x22, 0x6c, 0x6b, 0x01, 0x27, 0x98, 0xd0, 0xaf, 0x61, 0x71, 0x42, 0x3c, 0xcf, 0x18, 0x05, 0x71, + 0x2e, 0xf2, 0x71, 0x69, 0xfb, 0xd9, 0xec, 0xd4, 0xef, 0x25, 0x5c, 0x4c, 0x5a, 0x0b, 0xb8, 0x3c, + 0x49, 0xcc, 0x6b, 0x7f, 0x57, 0x00, 0x62, 0xdd, 0xa8, 0x03, 0x25, 0x93, 0x78, 0x43, 0xd7, 0x72, + 0x84, 0x19, 0xca, 0x1c, 0xf9, 0x3d, 0x49, 0x70, 0x21, 0x6d, 0xa6, 0xbe, 0x25, 0x6d, 0xd6, 0xfe, + 0xab, 0x40, 0x39, 0x69, 0x0b, 0xfa, 0x00, 0x19, 0x76, 0xee, 0xc8, 0x27, 0xaa, 0x6c, 0xbf, 0x9a, + 0xef, 0x46, 0xb4, 0xfe, 0xb9, 0x43, 0xb0, 0x20, 0x42, 0x15, 0x48, 0x05, 0xc5, 0x35, 0x83, 0x53, + 0x96, 0x89, 0x1e, 0xc1, 0x92, 0x4f, 0x87, 0xf6, 0xc4, 0x71, 0x89, 0xe7, 0x11, 0x53, 0xf7, 0xac, + 0xcf, 0x44, 0xdc, 0x7f, 0x06, 0x57, 0x93, 0x1b, 0x3d, 0xeb, 0x33, 0x41, 0x3f, 0x84, 0x1b, 0x17, + 0x45, 0x33, 0x42, 0xb4, 0x32, 0x2d, 0x58, 0x7f, 0x0a, 0x19, 0xae, 0x13, 0x2d, 0x43, 0xb5, 0xff, + 0x8b, 0x6e, 0x53, 0xff, 0xd4, 0xe9, 0x75, 0x9b, 0xbb, 0xed, 0xfd, 0x76, 0x73, 0xaf, 0xba, 0x80, + 0x0a, 0x90, 0xe9, 0x35, 0x3b, 0xfd, 0xaa, 0x82, 0xca, 0x50, 0xc0, 0xcd, 0xdd, 0x66, 0xfb, 0xa8, + 0xb9, 0x57, 0x4d, 0x35, 0xf2, 0x81, 0x8b, 0xd7, 0xfe, 0xc9, 0x53, 0x49, 0x9c, 0xb7, 0x5b, 0x00, + 0x71, 0x11, 0x08, 0x62, 0xf7, 0xe1, 0xcc, 0x57, 0x81, 0x8b, 0x51, 0x09, 0x40, 0x2f, 0x61, 0x35, + 0x8a, 0xd2, 0xc8, 0x23, 0xa6, 0xc3, 0xf4, 0x76, 0x18, 0xa6, 0xf1, 0xbe, 0x88, 0x53, 0xf4, 0x16, + 0xee, 0x84, 0xd8, 0x29, 0x6f, 0x0d, 0xe1, 0x69, 0x01, 0x0f, 0xf9, 0x93, 0xf7, 0x1f, 0x04, 0xfa, + 0xbf, 0x52, 0x90, 0xe1, 0x25, 0x65, 0xae, 0x06, 0xe8, 0x4d, 0xe0, 0x08, 0x69, 0xe1, 0x08, 0x0f, + 0x67, 0x29, 0x5d, 0xc9, 0x67, 0x9f, 0x76, 0xd2, 0xcc, 0x37, 0xd5, 0xf6, 0xe9, 0x5e, 0x2c, 0xfb, + 0x2d, 0xbd, 0x58, 0xfd, 0xe0, 0x4a, 0x47, 0xb9, 0x05, 0x4b, 0xbb, 0xad, 0xf6, 0xe1, 0x9e, 0x7e, + 0xd8, 0xee, 0x1c, 0x34, 0xf7, 0xf4, 0x5e, 0x77, 0xa7, 0x53, 0x55, 0xd0, 0x0a, 0xa0, 0xee, 0x0e, + 0x6e, 0x76, 0xfa, 0x53, 0xeb, 0xa9, 0xda, 0x6f, 0x21, 0x2b, 0x4a, 0x36, 0x7a, 0x0e, 0x19, 0x5e, + 0xb4, 0x03, 0x57, 0xb9, 0x3f, 0xcb, 0x65, 0x61, 0x81, 0x40, 0x1a, 0xdc, 0x0c, 0x1f, 0x59, 0x94, + 0xfd, 0x29, 0xd7, 0x58, 0x0a, 0xb6, 0x84, 0x12, 0xf1, 0xa6, 0xf5, 0x37, 0x50, 0x08, 0xfb, 0x36, + 0xb4, 0x0a, 0xb7, 0xf8, 0x41, 0xf4, 0x83, 0x76, 0x67, 0xef, 0x82, 0x21, 0x00, 0xb9, 0x5e, 0x13, + 0x1f, 0x35, 0x71, 0x55, 0xe1, 0xe3, 0xdd, 0xc3, 0x36, 0xf7, 0xff, 0x54, 0xfd, 0x19, 0xe4, 0x64, + 0xaf, 0x80, 0x10, 0x64, 0x86, 0xb6, 0x29, 0x03, 0x3d, 0x8b, 0xc5, 0x18, 0xa9, 0x90, 0x0f, 0x3c, + 0x2d, 0xa8, 0x6e, 0xe1, 0xb4, 0xfe, 0x0f, 0x05, 0x2a, 0xd3, 0x59, 0x1e, 0x7d, 0x84, 0xb2, 0x27, + 0xb2, 0x93, 0x2e, 0xcb, 0xc4, 0x1c, 0x79, 0xad, 0xb5, 0x80, 0x4b, 0x92, 0x43, 0x52, 0x7e, 0x17, + 0x8a, 0x16, 0x65, 0x7a, 0x5c, 0x76, 0xd2, 0xad, 0x05, 0x5c, 0xb0, 0x28, 0x93, 0xdb, 0x77, 0x01, + 0x06, 0xb6, 0x3d, 0x0e, 0xf6, 0xb9, 0x63, 0x16, 0x5a, 0x0b, 0xb8, 0x38, 0x08, 0x5b, 0x0e, 0x74, + 0x0f, 0xca, 0xa6, 0xed, 0x0f, 0xc6, 0x24, 0x10, 0xe1, 0x6e, 0xa7, 0x70, 0x25, 0x72, 0x55, 0x08, + 0x45, 0x41, 0x5f, 0xff, 0x63, 0x0e, 0x20, 0xee, 0x02, 0x51, 0x9f, 0xdb, 0xc3, 0x3b, 0xc8, 0x63, + 0xd7, 0x98, 0x88, 0x26, 0x82, 0xdb, 0xb3, 0x35, 0x53, 0x0b, 0x29, 0x87, 0xfb, 0x02, 0x88, 0x65, + 0x23, 0x2a, 0x27, 0x68, 0x03, 0x6e, 0x26, 0xfa, 0x52, 0xfd, 0xc4, 0xf0, 0x4e, 0xf4, 0x28, 0x1f, + 0x56, 0xe3, 0xc6, 0xb3, 0x65, 0x78, 0x27, 0x6d, 0xb3, 0xf6, 0x9f, 0x74, 0x70, 0x26, 0x01, 0x47, + 0x1f, 0x61, 0xf1, 0xd8, 0xa7, 0x43, 0x9e, 0x14, 0x74, 0xf1, 0x71, 0x30, 0x4f, 0xf1, 0x28, 0x87, + 0x14, 0x1d, 0x4e, 0x39, 0x80, 0x15, 0xdb, 0xb5, 0x46, 0x16, 0x35, 0xc6, 0xfa, 0x34, 0x77, 0x6a, + 0x0e, 0xee, 0xe5, 0x90, 0x6b, 0x3f, 0xa9, 0xa3, 0x0d, 0xc5, 0x63, 0x6b, 0x4c, 0x24, 0x6d, 0x7a, + 0x0e, 0xda, 0x02, 0x87, 0x0b, 0xaa, 0xbb, 0x50, 0x1a, 0x5b, 0x94, 0xe8, 0xd4, 0x9f, 0x0c, 0x88, + 0x2b, 0x5e, 0x34, 0x8d, 0x81, 0x2f, 0x75, 0xc4, 0x0a, 0xba, 0x07, 0x8b, 0x43, 0x7b, 0xec, 0x4f, + 0x68, 0x28, 0x92, 0x15, 0x22, 0x65, 0xb9, 0x18, 0x08, 0x35, 0xa0, 0x34, 0xb6, 0x0d, 0x53, 0x9f, + 0xd8, 0xa6, 0x3f, 0x0e, 0xbf, 0x51, 0xae, 0x6a, 0xa8, 0xdf, 0x0b, 0x41, 0x0c, 0x1c, 0x25, 0xc7, + 0xa8, 0x07, 0x15, 0xd9, 0x1a, 0xeb, 0x67, 0xc4, 0xf5, 0x78, 0x25, 0xcf, 0xcf, 0x61, 0xd9, 0xa2, + 0xe4, 0x38, 0x92, 0x14, 0xb5, 0xdf, 0x2b, 0x50, 0x4a, 0xf8, 0x0e, 0xda, 0x87, 0xac, 0x70, 0xbf, + 0x59, 0x5a, 0xd8, 0x2f, 0x79, 0x1f, 0x96, 0x70, 0xf4, 0x18, 0x96, 0xc3, 0xb4, 0x22, 0xdd, 0x79, + 0x2a, 0xaf, 0xa0, 0x60, 0x4f, 0x2a, 0x95, 0x89, 0xe5, 0xaf, 0x0a, 0xe4, 0x02, 0x4b, 0xf7, 0x20, + 0x17, 0x5c, 0xd4, 0x3c, 0xee, 0x16, 0x60, 0xd1, 0xcf, 0xa0, 0x30, 0xf0, 0x79, 0x9b, 0x1f, 0xb8, + 0xfb, 0xff, 0xcb, 0x93, 0x17, 0xe8, 0xb6, 0x59, 0xff, 0x15, 0x2c, 0x5d, 0xda, 0x8d, 0xdb, 0x70, + 0x25, 0xd1, 0x86, 0x73, 0xb3, 0x99, 0x14, 0x25, 0xa6, 0x3e, 0x38, 0x67, 0x64, 0xda, 0xec, 0x68, + 0xaf, 0x71, 0xce, 0x88, 0x30, 0xbb, 0xf1, 0x27, 0x05, 0xee, 0x58, 0xf6, 0xd7, 0x0f, 0xd6, 0x90, + 0x9f, 0x18, 0x5d, 0xbe, 0xd8, 0x55, 0x7e, 0xd9, 0x18, 0x59, 0xec, 0xc4, 0x1f, 0x68, 0x43, 0x7b, + 0xb2, 0x29, 0xe5, 0x37, 0x2c, 0xea, 0x31, 0xd7, 0x9f, 0x10, 0x2a, 0x8b, 0xf7, 0x66, 0x4c, 0xb5, + 0x21, 0xff, 0xe3, 0x18, 0x11, 0xba, 0x31, 0x8a, 0xff, 0x2c, 0xf9, 0x77, 0x6a, 0xf5, 0x83, 0x43, + 0xe8, 0xae, 0xd4, 0x26, 0x88, 0x65, 0xb1, 0xd2, 0x8e, 0xb6, 0x06, 0x39, 0x01, 0x79, 0xf2, 0xbf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x56, 0xb6, 0xfd, 0x6c, 0x11, 0x00, 0x00, +} diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go new file mode 100644 index 00000000..02538778 --- /dev/null +++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go @@ -0,0 +1,359 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace_config.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// How spans should be sampled: +// - Always off +// - Always on +// - Always follow the parent Span's decision (off if no parent). +type ConstantSampler_ConstantDecision int32 + +const ( + ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 + ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 + ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 +) + +var ConstantSampler_ConstantDecision_name = map[int32]string{ + 0: "ALWAYS_OFF", + 1: "ALWAYS_ON", + 2: "ALWAYS_PARENT", +} + +var ConstantSampler_ConstantDecision_value = map[string]int32{ + "ALWAYS_OFF": 0, + "ALWAYS_ON": 1, + "ALWAYS_PARENT": 2, +} + +func (x ConstantSampler_ConstantDecision) String() string { + return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) +} + +func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2, 0} +} + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +type TraceConfig struct { + // The global default sampler used to make decisions on span sampling. + // + // Types that are valid to be assigned to Sampler: + // *TraceConfig_ProbabilitySampler + // *TraceConfig_ConstantSampler + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"` + // The global default max number of message events per span. + MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceConfig) Reset() { *m = TraceConfig{} } +func (m *TraceConfig) String() string { return proto.CompactTextString(m) } +func (*TraceConfig) ProtoMessage() {} +func (*TraceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{0} +} + +func (m *TraceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceConfig.Unmarshal(m, b) +} +func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) +} +func (m *TraceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceConfig.Merge(m, src) +} +func (m *TraceConfig) XXX_Size() int { + return xxx_messageInfo_TraceConfig.Size(m) +} +func (m *TraceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TraceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceConfig proto.InternalMessageInfo + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() +} + +type TraceConfig_ProbabilitySampler struct { + ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"` +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` +} + +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` +} + +func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler { + if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok { + return x.ProbabilitySampler + } + return nil +} + +func (m *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { + if m != nil { + return m.MaxNumberOfAttributes + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 { + if m != nil { + return m.MaxNumberOfAnnotations + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 { + if m != nil { + return m.MaxNumberOfMessageEvents + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfLinks() int64 { + if m != nil { + return m.MaxNumberOfLinks + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TraceConfig_ProbabilitySampler)(nil), + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } +} + +// Sampler that tries to uniformly sample traces with a given probability. +// The probability of sampling a trace is equal to that of the specified probability. +type ProbabilitySampler struct { + // The desired probability of sampling. Must be within [0.0, 1.0]. + SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} } +func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) } +func (*ProbabilitySampler) ProtoMessage() {} +func (*ProbabilitySampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{1} +} + +func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b) +} +func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic) +} +func (m *ProbabilitySampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbabilitySampler.Merge(m, src) +} +func (m *ProbabilitySampler) XXX_Size() int { + return xxx_messageInfo_ProbabilitySampler.Size(m) +} +func (m *ProbabilitySampler) XXX_DiscardUnknown() { + xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo + +func (m *ProbabilitySampler) GetSamplingProbability() float64 { + if m != nil { + return m.SamplingProbability + } + return 0 +} + +// Sampler that always makes a constant decision on span sampling. +type ConstantSampler struct { + Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } +func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } +func (*ConstantSampler) ProtoMessage() {} +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2} +} + +func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) +} +func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) +} +func (m *ConstantSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConstantSampler.Merge(m, src) +} +func (m *ConstantSampler) XXX_Size() int { + return xxx_messageInfo_ConstantSampler.Size(m) +} +func (m *ConstantSampler) XXX_DiscardUnknown() { + xxx_messageInfo_ConstantSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo + +func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { + if m != nil { + return m.Decision + } + return ConstantSampler_ALWAYS_OFF +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } +func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } +func (*RateLimitingSampler) ProtoMessage() {} +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{3} +} + +func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) +} +func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) +} +func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitingSampler.Merge(m, src) +} +func (m *RateLimitingSampler) XXX_Size() int { + return xxx_messageInfo_RateLimitingSampler.Size(m) +} +func (m *RateLimitingSampler) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo + +func (m *RateLimitingSampler) GetQps() int64 { + if m != nil { + return m.Qps + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) + proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") + proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") + proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") + proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5) +} + +var fileDescriptor_5359209b41ff50c5 = []byte{ + // 506 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6e, 0xd3, 0x30, + 0x18, 0xc7, 0x97, 0x76, 0x6c, 0xec, 0x9b, 0xb6, 0x05, 0x57, 0x43, 0xa9, 0xb4, 0xc3, 0x94, 0x0b, + 0x13, 0x22, 0x09, 0x1d, 0x07, 0x84, 0x90, 0x90, 0xda, 0x6e, 0x15, 0x87, 0xd2, 0x56, 0xd9, 0x44, + 0x05, 0x97, 0xe0, 0x64, 0x6e, 0xb0, 0x68, 0xec, 0x60, 0x3b, 0xd5, 0x78, 0x0d, 0xce, 0x3c, 0x04, + 0xcf, 0xc5, 0x53, 0xa0, 0x3a, 0x21, 0x49, 0xdb, 0x6d, 0xe2, 0x96, 0xef, 0xfb, 0x7f, 0xbf, 0x9f, + 0xad, 0xd8, 0x86, 0x17, 0x3c, 0x25, 0x2c, 0x22, 0x4c, 0x66, 0xd2, 0x4b, 0x05, 0x57, 0xdc, 0x53, + 0x02, 0x47, 0xc4, 0x5b, 0x74, 0xf2, 0x8f, 0x20, 0xe2, 0x6c, 0x46, 0x63, 0x57, 0x67, 0xa8, 0x5d, + 0x4d, 0xe7, 0x1d, 0x57, 0x0f, 0xb9, 0x8b, 0x8e, 0xfd, 0x6b, 0x1b, 0xf6, 0xaf, 0x97, 0x45, 0x5f, + 0x03, 0xe8, 0x0b, 0xb4, 0x52, 0xc1, 0x43, 0x1c, 0xd2, 0x39, 0x55, 0x3f, 0x02, 0x89, 0x93, 0x74, + 0x4e, 0x84, 0x65, 0x9c, 0x1a, 0x67, 0xfb, 0xe7, 0x8e, 0x7b, 0xaf, 0xc8, 0x9d, 0x54, 0xd4, 0x55, + 0x0e, 0xbd, 0xdf, 0xf2, 0x51, 0xba, 0xd1, 0x45, 0x53, 0x30, 0x23, 0xce, 0xa4, 0xc2, 0x4c, 0x95, + 0xfa, 0x86, 0xd6, 0x3f, 0x7f, 0x40, 0xdf, 0x2f, 0x90, 0xca, 0x7d, 0x14, 0xad, 0xb6, 0xd0, 0x0d, + 0x1c, 0x0b, 0xac, 0x48, 0x30, 0xa7, 0x09, 0x55, 0x94, 0xc5, 0xa5, 0xbd, 0xa9, 0xed, 0xee, 0x03, + 0x76, 0x1f, 0x2b, 0x32, 0x2c, 0xb0, 0x6a, 0x85, 0x96, 0xd8, 0x6c, 0xa3, 0xd7, 0x60, 0x25, 0xf8, + 0x36, 0x60, 0x59, 0x12, 0x12, 0x11, 0xf0, 0x59, 0x80, 0x95, 0x12, 0x34, 0xcc, 0x14, 0x91, 0xd6, + 0xf6, 0xa9, 0x71, 0xd6, 0xf4, 0x8f, 0x13, 0x7c, 0x3b, 0xd2, 0xf1, 0x78, 0xd6, 0x2d, 0x43, 0xf4, + 0x06, 0xda, 0x6b, 0x20, 0x63, 0x5c, 0x61, 0x45, 0x39, 0x93, 0xd6, 0x23, 0x4d, 0x3e, 0xad, 0x93, + 0x55, 0x8a, 0xde, 0xc1, 0xc9, 0x2a, 0x9a, 0x10, 0x29, 0x71, 0x4c, 0x02, 0xb2, 0x20, 0x4c, 0x49, + 0x6b, 0x47, 0xd3, 0x56, 0x8d, 0xfe, 0x90, 0x0f, 0x5c, 0xea, 0x1c, 0x39, 0xd0, 0x5a, 0xe5, 0xe7, + 0x94, 0x7d, 0x93, 0xd6, 0xae, 0xc6, 0xcc, 0x1a, 0x36, 0x5c, 0xf6, 0x7b, 0x7b, 0xb0, 0x5b, 0xfc, + 0x3a, 0x7b, 0x00, 0x68, 0xf3, 0x60, 0xd1, 0x4b, 0x68, 0xe9, 0x01, 0xca, 0xe2, 0x5a, 0xaa, 0x2f, + 0x89, 0xe1, 0xdf, 0x15, 0xd9, 0xbf, 0x0d, 0x38, 0x5a, 0x3b, 0x42, 0x34, 0x85, 0xc7, 0x37, 0x24, + 0xa2, 0x92, 0x72, 0xa6, 0xd1, 0xc3, 0xf3, 0xb7, 0xff, 0x7f, 0x01, 0xca, 0xfa, 0xa2, 0x50, 0xf8, + 0xa5, 0xcc, 0xbe, 0x00, 0x73, 0x3d, 0x45, 0x87, 0x00, 0xdd, 0xe1, 0xb4, 0xfb, 0xe9, 0x2a, 0x18, + 0x0f, 0x06, 0xe6, 0x16, 0x3a, 0x80, 0xbd, 0x7f, 0xf5, 0xc8, 0x34, 0xd0, 0x13, 0x38, 0x28, 0xca, + 0x49, 0xd7, 0xbf, 0x1c, 0x5d, 0x9b, 0x0d, 0xfb, 0x19, 0xb4, 0xee, 0xb8, 0x16, 0xc8, 0x84, 0xe6, + 0xf7, 0x54, 0xea, 0x0d, 0x37, 0xfd, 0xe5, 0x67, 0xef, 0xa7, 0x01, 0x27, 0x94, 0xdf, 0xbf, 0xf5, + 0x9e, 0x59, 0x7b, 0x60, 0x93, 0x65, 0x34, 0x31, 0x3e, 0xf7, 0x62, 0xaa, 0xbe, 0x66, 0xa1, 0x1b, + 0xf1, 0xc4, 0xcb, 0x29, 0x87, 0x32, 0xa9, 0x44, 0x96, 0x10, 0x96, 0x1f, 0xbb, 0x57, 0x09, 0x9d, + 0xfc, 0x89, 0xc7, 0x84, 0x39, 0x71, 0xf5, 0xd2, 0xff, 0x34, 0xda, 0xe3, 0x94, 0xb0, 0x7e, 0xbe, + 0xa6, 0x16, 0xbb, 0x7a, 0x25, 0xf7, 0x63, 0x27, 0xdc, 0xd1, 0xc8, 0xab, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x50, 0x0c, 0xfe, 0x32, 0x29, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/chzyer/readline/CHANGELOG.md b/vendor/github.com/chzyer/readline/CHANGELOG.md new file mode 100644 index 00000000..14ff5be1 --- /dev/null +++ b/vendor/github.com/chzyer/readline/CHANGELOG.md @@ -0,0 +1,58 @@ +# ChangeLog + +### 1.4 - 2016-07-25 + +* [#60][60] Support dynamic autocompletion +* Fix ANSI parser on Windows +* Fix wrong column width in complete mode on Windows +* Remove dependent package "golang.org/x/crypto/ssh/terminal" + +### 1.3 - 2016-05-09 + +* [#38][38] add SetChildren for prefix completer interface +* [#42][42] improve multiple lines compatibility +* [#43][43] remove sub-package(runes) for gopkg compatibility +* [#46][46] Auto complete with space prefixed line +* [#48][48] support suspend process (ctrl+Z) +* [#49][49] fix bug that check equals with previous command +* [#53][53] Fix bug which causes integer divide by zero panicking when input buffer is empty + +### 1.2 - 2016-03-05 + +* Add a demo for checking password strength [example/readline-pass-strength](https://github.com/chzyer/readline/blob/master/example/readline-pass-strength/readline-pass-strength.go), , written by [@sahib](https://github.com/sahib) +* [#23][23], support stdin remapping +* [#27][27], add a `UniqueEditLine` to `Config`, which will erase the editing line after user submited it, usually use in IM. +* Add a demo for multiline [example/readline-multiline](https://github.com/chzyer/readline/blob/master/example/readline-multiline/readline-multiline.go) which can submit one SQL by multiple lines. +* Supports performs even stdin/stdout is not a tty. +* Add a new simple apis for single instance, check by [here](https://github.com/chzyer/readline/blob/master/std.go). It need to save history manually if using this api. +* [#28][28], fixes the history is not working as expected. +* [#33][33], vim mode now support `c`, `d`, `x (delete character)`, `r (replace character)` + +### 1.1 - 2015-11-20 + +* [#12][12] Add support for key ``/``/`` +* Only enter raw mode as needed (calling `Readline()`), program will receive signal(e.g. Ctrl+C) if not interact with `readline`. +* Bugs fixed for `PrefixCompleter` +* Press `Ctrl+D` in empty line will cause `io.EOF` in error, Press `Ctrl+C` in anytime will cause `ErrInterrupt` instead of `io.EOF`, this will privodes a shell-like user experience. +* Customable Interrupt/EOF prompt in `Config` +* [#17][17] Change atomic package to use 32bit function to let it runnable on arm 32bit devices +* Provides a new password user experience(`readline.ReadPasswordEx()`). + +### 1.0 - 2015-10-14 + +* Initial public release. + +[12]: https://github.com/chzyer/readline/pull/12 +[17]: https://github.com/chzyer/readline/pull/17 +[23]: https://github.com/chzyer/readline/pull/23 +[27]: https://github.com/chzyer/readline/pull/27 +[28]: https://github.com/chzyer/readline/pull/28 +[33]: https://github.com/chzyer/readline/pull/33 +[38]: https://github.com/chzyer/readline/pull/38 +[42]: https://github.com/chzyer/readline/pull/42 +[43]: https://github.com/chzyer/readline/pull/43 +[46]: https://github.com/chzyer/readline/pull/46 +[48]: https://github.com/chzyer/readline/pull/48 +[49]: https://github.com/chzyer/readline/pull/49 +[53]: https://github.com/chzyer/readline/pull/53 +[60]: https://github.com/chzyer/readline/pull/60 diff --git a/vendor/github.com/chzyer/readline/LICENSE b/vendor/github.com/chzyer/readline/LICENSE new file mode 100644 index 00000000..c9afab3d --- /dev/null +++ b/vendor/github.com/chzyer/readline/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Chzyer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/chzyer/readline/README.md b/vendor/github.com/chzyer/readline/README.md new file mode 100644 index 00000000..fab974b7 --- /dev/null +++ b/vendor/github.com/chzyer/readline/README.md @@ -0,0 +1,114 @@ +[![Build Status](https://travis-ci.org/chzyer/readline.svg?branch=master)](https://travis-ci.org/chzyer/readline) +[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.md) +[![Version](https://img.shields.io/github/tag/chzyer/readline.svg)](https://github.com/chzyer/readline/releases) +[![GoDoc](https://godoc.org/github.com/chzyer/readline?status.svg)](https://godoc.org/github.com/chzyer/readline) +[![OpenCollective](https://opencollective.com/readline/badge/backers.svg)](#backers) +[![OpenCollective](https://opencollective.com/readline/badge/sponsors.svg)](#sponsors) + +

+ + + +

+ +A powerful readline library in `Linux` `macOS` `Windows` `Solaris` + +## Guide + +* [Demo](example/readline-demo/readline-demo.go) +* [Shortcut](doc/shortcut.md) + +## Repos using readline + +[![cockroachdb](https://img.shields.io/github/stars/cockroachdb/cockroach.svg?label=cockroachdb/cockroach)](https://github.com/cockroachdb/cockroach) +[![robertkrimen/otto](https://img.shields.io/github/stars/robertkrimen/otto.svg?label=robertkrimen/otto)](https://github.com/robertkrimen/otto) +[![empire](https://img.shields.io/github/stars/remind101/empire.svg?label=remind101/empire)](https://github.com/remind101/empire) +[![mehrdadrad/mylg](https://img.shields.io/github/stars/mehrdadrad/mylg.svg?label=mehrdadrad/mylg)](https://github.com/mehrdadrad/mylg) +[![knq/usql](https://img.shields.io/github/stars/knq/usql.svg?label=knq/usql)](https://github.com/knq/usql) +[![youtube/doorman](https://img.shields.io/github/stars/youtube/doorman.svg?label=youtube/doorman)](https://github.com/youtube/doorman) +[![bom-d-van/harp](https://img.shields.io/github/stars/bom-d-van/harp.svg?label=bom-d-van/harp)](https://github.com/bom-d-van/harp) +[![abiosoft/ishell](https://img.shields.io/github/stars/abiosoft/ishell.svg?label=abiosoft/ishell)](https://github.com/abiosoft/ishell) +[![Netflix/hal-9001](https://img.shields.io/github/stars/Netflix/hal-9001.svg?label=Netflix/hal-9001)](https://github.com/Netflix/hal-9001) +[![docker/go-p9p](https://img.shields.io/github/stars/docker/go-p9p.svg?label=docker/go-p9p)](https://github.com/docker/go-p9p) + + +## Feedback + +If you have any questions, please submit a github issue and any pull requests is welcomed :) + +* [https://twitter.com/chzyer](https://twitter.com/chzyer) +* [http://weibo.com/2145262190](http://weibo.com/2145262190) + + +## Backers + +Love Readline? Help me keep it alive by donating funds to cover project expenses!
+[[Become a backer](https://opencollective.com/readline#backer)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +## Sponsors + +Become a sponsor and get your logo here on our Github page. [[Become a sponsor](https://opencollective.com/readline#sponsor)] + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/chzyer/readline/ansi_windows.go b/vendor/github.com/chzyer/readline/ansi_windows.go new file mode 100644 index 00000000..63b908c1 --- /dev/null +++ b/vendor/github.com/chzyer/readline/ansi_windows.go @@ -0,0 +1,249 @@ +// +build windows + +package readline + +import ( + "bufio" + "io" + "strconv" + "strings" + "sync" + "unicode/utf8" + "unsafe" +) + +const ( + _ = uint16(0) + COLOR_FBLUE = 0x0001 + COLOR_FGREEN = 0x0002 + COLOR_FRED = 0x0004 + COLOR_FINTENSITY = 0x0008 + + COLOR_BBLUE = 0x0010 + COLOR_BGREEN = 0x0020 + COLOR_BRED = 0x0040 + COLOR_BINTENSITY = 0x0080 + + COMMON_LVB_UNDERSCORE = 0x8000 + COMMON_LVB_BOLD = 0x0007 +) + +var ColorTableFg = []word{ + 0, // 30: Black + COLOR_FRED, // 31: Red + COLOR_FGREEN, // 32: Green + COLOR_FRED | COLOR_FGREEN, // 33: Yellow + COLOR_FBLUE, // 34: Blue + COLOR_FRED | COLOR_FBLUE, // 35: Magenta + COLOR_FGREEN | COLOR_FBLUE, // 36: Cyan + COLOR_FRED | COLOR_FBLUE | COLOR_FGREEN, // 37: White +} + +var ColorTableBg = []word{ + 0, // 40: Black + COLOR_BRED, // 41: Red + COLOR_BGREEN, // 42: Green + COLOR_BRED | COLOR_BGREEN, // 43: Yellow + COLOR_BBLUE, // 44: Blue + COLOR_BRED | COLOR_BBLUE, // 45: Magenta + COLOR_BGREEN | COLOR_BBLUE, // 46: Cyan + COLOR_BRED | COLOR_BBLUE | COLOR_BGREEN, // 47: White +} + +type ANSIWriter struct { + target io.Writer + wg sync.WaitGroup + ctx *ANSIWriterCtx + sync.Mutex +} + +func NewANSIWriter(w io.Writer) *ANSIWriter { + a := &ANSIWriter{ + target: w, + ctx: NewANSIWriterCtx(w), + } + return a +} + +func (a *ANSIWriter) Close() error { + a.wg.Wait() + return nil +} + +type ANSIWriterCtx struct { + isEsc bool + isEscSeq bool + arg []string + target *bufio.Writer + wantFlush bool +} + +func NewANSIWriterCtx(target io.Writer) *ANSIWriterCtx { + return &ANSIWriterCtx{ + target: bufio.NewWriter(target), + } +} + +func (a *ANSIWriterCtx) Flush() { + a.target.Flush() +} + +func (a *ANSIWriterCtx) process(r rune) bool { + if a.wantFlush { + if r == 0 || r == CharEsc { + a.wantFlush = false + a.target.Flush() + } + } + if a.isEscSeq { + a.isEscSeq = a.ioloopEscSeq(a.target, r, &a.arg) + return true + } + + switch r { + case CharEsc: + a.isEsc = true + case '[': + if a.isEsc { + a.arg = nil + a.isEscSeq = true + a.isEsc = false + break + } + fallthrough + default: + a.target.WriteRune(r) + a.wantFlush = true + } + return true +} + +func (a *ANSIWriterCtx) ioloopEscSeq(w *bufio.Writer, r rune, argptr *[]string) bool { + arg := *argptr + var err error + + if r >= 'A' && r <= 'D' { + count := short(GetInt(arg, 1)) + info, err := GetConsoleScreenBufferInfo() + if err != nil { + return false + } + switch r { + case 'A': // up + info.dwCursorPosition.y -= count + case 'B': // down + info.dwCursorPosition.y += count + case 'C': // right + info.dwCursorPosition.x += count + case 'D': // left + info.dwCursorPosition.x -= count + } + SetConsoleCursorPosition(&info.dwCursorPosition) + return false + } + + switch r { + case 'J': + killLines() + case 'K': + eraseLine() + case 'm': + color := word(0) + for _, item := range arg { + var c int + c, err = strconv.Atoi(item) + if err != nil { + w.WriteString("[" + strings.Join(arg, ";") + "m") + break + } + if c >= 30 && c < 40 { + color ^= COLOR_FINTENSITY + color |= ColorTableFg[c-30] + } else if c >= 40 && c < 50 { + color ^= COLOR_BINTENSITY + color |= ColorTableBg[c-40] + } else if c == 4 { + color |= COMMON_LVB_UNDERSCORE | ColorTableFg[7] + } else if c == 1 { + color |= COMMON_LVB_BOLD | COLOR_FINTENSITY + } else { // unknown code treat as reset + color = ColorTableFg[7] + } + } + if err != nil { + break + } + kernel.SetConsoleTextAttribute(stdout, uintptr(color)) + case '\007': // set title + case ';': + if len(arg) == 0 || arg[len(arg)-1] != "" { + arg = append(arg, "") + *argptr = arg + } + return true + default: + if len(arg) == 0 { + arg = append(arg, "") + } + arg[len(arg)-1] += string(r) + *argptr = arg + return true + } + *argptr = nil + return false +} + +func (a *ANSIWriter) Write(b []byte) (int, error) { + a.Lock() + defer a.Unlock() + + off := 0 + for len(b) > off { + r, size := utf8.DecodeRune(b[off:]) + if size == 0 { + return off, io.ErrShortWrite + } + off += size + a.ctx.process(r) + } + a.ctx.Flush() + return off, nil +} + +func killLines() error { + sbi, err := GetConsoleScreenBufferInfo() + if err != nil { + return err + } + + size := (sbi.dwCursorPosition.y - sbi.dwSize.y) * sbi.dwSize.x + size += sbi.dwCursorPosition.x + + var written int + kernel.FillConsoleOutputAttribute(stdout, uintptr(ColorTableFg[7]), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) + return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) +} + +func eraseLine() error { + sbi, err := GetConsoleScreenBufferInfo() + if err != nil { + return err + } + + size := sbi.dwSize.x + sbi.dwCursorPosition.x = 0 + var written int + return kernel.FillConsoleOutputCharacterW(stdout, uintptr(' '), + uintptr(size), + sbi.dwCursorPosition.ptr(), + uintptr(unsafe.Pointer(&written)), + ) +} diff --git a/vendor/github.com/chzyer/readline/complete.go b/vendor/github.com/chzyer/readline/complete.go new file mode 100644 index 00000000..c08c9941 --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete.go @@ -0,0 +1,285 @@ +package readline + +import ( + "bufio" + "bytes" + "fmt" + "io" +) + +type AutoCompleter interface { + // Readline will pass the whole line and current offset to it + // Completer need to pass all the candidates, and how long they shared the same characters in line + // Example: + // [go, git, git-shell, grep] + // Do("g", 1) => ["o", "it", "it-shell", "rep"], 1 + // Do("gi", 2) => ["t", "t-shell"], 2 + // Do("git", 3) => ["", "-shell"], 3 + Do(line []rune, pos int) (newLine [][]rune, length int) +} + +type TabCompleter struct{} + +func (t *TabCompleter) Do([]rune, int) ([][]rune, int) { + return [][]rune{[]rune("\t")}, 0 +} + +type opCompleter struct { + w io.Writer + op *Operation + width int + + inCompleteMode bool + inSelectMode bool + candidate [][]rune + candidateSource []rune + candidateOff int + candidateChoise int + candidateColNum int +} + +func newOpCompleter(w io.Writer, op *Operation, width int) *opCompleter { + return &opCompleter{ + w: w, + op: op, + width: width, + } +} + +func (o *opCompleter) doSelect() { + if len(o.candidate) == 1 { + o.op.buf.WriteRunes(o.candidate[0]) + o.ExitCompleteMode(false) + return + } + o.nextCandidate(1) + o.CompleteRefresh() +} + +func (o *opCompleter) nextCandidate(i int) { + o.candidateChoise += i + o.candidateChoise = o.candidateChoise % len(o.candidate) + if o.candidateChoise < 0 { + o.candidateChoise = len(o.candidate) + o.candidateChoise + } +} + +func (o *opCompleter) OnComplete() bool { + if o.width == 0 { + return false + } + if o.IsInCompleteSelectMode() { + o.doSelect() + return true + } + + buf := o.op.buf + rs := buf.Runes() + + if o.IsInCompleteMode() && o.candidateSource != nil && runes.Equal(rs, o.candidateSource) { + o.EnterCompleteSelectMode() + o.doSelect() + return true + } + + o.ExitCompleteSelectMode() + o.candidateSource = rs + newLines, offset := o.op.cfg.AutoComplete.Do(rs, buf.idx) + if len(newLines) == 0 { + o.ExitCompleteMode(false) + return true + } + + // only Aggregate candidates in non-complete mode + if !o.IsInCompleteMode() { + if len(newLines) == 1 { + buf.WriteRunes(newLines[0]) + o.ExitCompleteMode(false) + return true + } + + same, size := runes.Aggregate(newLines) + if size > 0 { + buf.WriteRunes(same) + o.ExitCompleteMode(false) + return true + } + } + + o.EnterCompleteMode(offset, newLines) + return true +} + +func (o *opCompleter) IsInCompleteSelectMode() bool { + return o.inSelectMode +} + +func (o *opCompleter) IsInCompleteMode() bool { + return o.inCompleteMode +} + +func (o *opCompleter) HandleCompleteSelect(r rune) bool { + next := true + switch r { + case CharEnter, CharCtrlJ: + next = false + o.op.buf.WriteRunes(o.op.candidate[o.op.candidateChoise]) + o.ExitCompleteMode(false) + case CharLineStart: + num := o.candidateChoise % o.candidateColNum + o.nextCandidate(-num) + case CharLineEnd: + num := o.candidateColNum - o.candidateChoise%o.candidateColNum - 1 + o.candidateChoise += num + if o.candidateChoise >= len(o.candidate) { + o.candidateChoise = len(o.candidate) - 1 + } + case CharBackspace: + o.ExitCompleteSelectMode() + next = false + case CharTab, CharForward: + o.doSelect() + case CharBell, CharInterrupt: + o.ExitCompleteMode(true) + next = false + case CharNext: + tmpChoise := o.candidateChoise + o.candidateColNum + if tmpChoise >= o.getMatrixSize() { + tmpChoise -= o.getMatrixSize() + } else if tmpChoise >= len(o.candidate) { + tmpChoise += o.candidateColNum + tmpChoise -= o.getMatrixSize() + } + o.candidateChoise = tmpChoise + case CharBackward: + o.nextCandidate(-1) + case CharPrev: + tmpChoise := o.candidateChoise - o.candidateColNum + if tmpChoise < 0 { + tmpChoise += o.getMatrixSize() + if tmpChoise >= len(o.candidate) { + tmpChoise -= o.candidateColNum + } + } + o.candidateChoise = tmpChoise + default: + next = false + o.ExitCompleteSelectMode() + } + if next { + o.CompleteRefresh() + return true + } + return false +} + +func (o *opCompleter) getMatrixSize() int { + line := len(o.candidate) / o.candidateColNum + if len(o.candidate)%o.candidateColNum != 0 { + line++ + } + return line * o.candidateColNum +} + +func (o *opCompleter) OnWidthChange(newWidth int) { + o.width = newWidth +} + +func (o *opCompleter) CompleteRefresh() { + if !o.inCompleteMode { + return + } + lineCnt := o.op.buf.CursorLineCount() + colWidth := 0 + for _, c := range o.candidate { + w := runes.WidthAll(c) + if w > colWidth { + colWidth = w + } + } + colWidth += o.candidateOff + 1 + same := o.op.buf.RuneSlice(-o.candidateOff) + + // -1 to avoid reach the end of line + width := o.width - 1 + colNum := width / colWidth + if colNum != 0 { + colWidth += (width - (colWidth * colNum)) / colNum + } + + o.candidateColNum = colNum + buf := bufio.NewWriter(o.w) + buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) + + colIdx := 0 + lines := 1 + buf.WriteString("\033[J") + for idx, c := range o.candidate { + inSelect := idx == o.candidateChoise && o.IsInCompleteSelectMode() + if inSelect { + buf.WriteString("\033[30;47m") + } + buf.WriteString(string(same)) + buf.WriteString(string(c)) + buf.Write(bytes.Repeat([]byte(" "), colWidth-runes.WidthAll(c)-runes.WidthAll(same))) + + if inSelect { + buf.WriteString("\033[0m") + } + + colIdx++ + if colIdx == colNum { + buf.WriteString("\n") + lines++ + colIdx = 0 + } + } + + // move back + fmt.Fprintf(buf, "\033[%dA\r", lineCnt-1+lines) + fmt.Fprintf(buf, "\033[%dC", o.op.buf.idx+o.op.buf.PromptLen()) + buf.Flush() +} + +func (o *opCompleter) aggCandidate(candidate [][]rune) int { + offset := 0 + for i := 0; i < len(candidate[0]); i++ { + for j := 0; j < len(candidate)-1; j++ { + if i > len(candidate[j]) { + goto aggregate + } + if candidate[j][i] != candidate[j+1][i] { + goto aggregate + } + } + offset = i + } +aggregate: + return offset +} + +func (o *opCompleter) EnterCompleteSelectMode() { + o.inSelectMode = true + o.candidateChoise = -1 + o.CompleteRefresh() +} + +func (o *opCompleter) EnterCompleteMode(offset int, candidate [][]rune) { + o.inCompleteMode = true + o.candidate = candidate + o.candidateOff = offset + o.CompleteRefresh() +} + +func (o *opCompleter) ExitCompleteSelectMode() { + o.inSelectMode = false + o.candidate = nil + o.candidateChoise = -1 + o.candidateOff = -1 + o.candidateSource = nil +} + +func (o *opCompleter) ExitCompleteMode(revent bool) { + o.inCompleteMode = false + o.ExitCompleteSelectMode() +} diff --git a/vendor/github.com/chzyer/readline/complete_helper.go b/vendor/github.com/chzyer/readline/complete_helper.go new file mode 100644 index 00000000..58d72487 --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete_helper.go @@ -0,0 +1,165 @@ +package readline + +import ( + "bytes" + "strings" +) + +// Caller type for dynamic completion +type DynamicCompleteFunc func(string) []string + +type PrefixCompleterInterface interface { + Print(prefix string, level int, buf *bytes.Buffer) + Do(line []rune, pos int) (newLine [][]rune, length int) + GetName() []rune + GetChildren() []PrefixCompleterInterface + SetChildren(children []PrefixCompleterInterface) +} + +type DynamicPrefixCompleterInterface interface { + PrefixCompleterInterface + IsDynamic() bool + GetDynamicNames(line []rune) [][]rune +} + +type PrefixCompleter struct { + Name []rune + Dynamic bool + Callback DynamicCompleteFunc + Children []PrefixCompleterInterface +} + +func (p *PrefixCompleter) Tree(prefix string) string { + buf := bytes.NewBuffer(nil) + p.Print(prefix, 0, buf) + return buf.String() +} + +func Print(p PrefixCompleterInterface, prefix string, level int, buf *bytes.Buffer) { + if strings.TrimSpace(string(p.GetName())) != "" { + buf.WriteString(prefix) + if level > 0 { + buf.WriteString("├") + buf.WriteString(strings.Repeat("─", (level*4)-2)) + buf.WriteString(" ") + } + buf.WriteString(string(p.GetName()) + "\n") + level++ + } + for _, ch := range p.GetChildren() { + ch.Print(prefix, level, buf) + } +} + +func (p *PrefixCompleter) Print(prefix string, level int, buf *bytes.Buffer) { + Print(p, prefix, level, buf) +} + +func (p *PrefixCompleter) IsDynamic() bool { + return p.Dynamic +} + +func (p *PrefixCompleter) GetName() []rune { + return p.Name +} + +func (p *PrefixCompleter) GetDynamicNames(line []rune) [][]rune { + var names = [][]rune{} + for _, name := range p.Callback(string(line)) { + names = append(names, []rune(name+" ")) + } + return names +} + +func (p *PrefixCompleter) GetChildren() []PrefixCompleterInterface { + return p.Children +} + +func (p *PrefixCompleter) SetChildren(children []PrefixCompleterInterface) { + p.Children = children +} + +func NewPrefixCompleter(pc ...PrefixCompleterInterface) *PrefixCompleter { + return PcItem("", pc...) +} + +func PcItem(name string, pc ...PrefixCompleterInterface) *PrefixCompleter { + name += " " + return &PrefixCompleter{ + Name: []rune(name), + Dynamic: false, + Children: pc, + } +} + +func PcItemDynamic(callback DynamicCompleteFunc, pc ...PrefixCompleterInterface) *PrefixCompleter { + return &PrefixCompleter{ + Callback: callback, + Dynamic: true, + Children: pc, + } +} + +func (p *PrefixCompleter) Do(line []rune, pos int) (newLine [][]rune, offset int) { + return doInternal(p, line, pos, line) +} + +func Do(p PrefixCompleterInterface, line []rune, pos int) (newLine [][]rune, offset int) { + return doInternal(p, line, pos, line) +} + +func doInternal(p PrefixCompleterInterface, line []rune, pos int, origLine []rune) (newLine [][]rune, offset int) { + line = runes.TrimSpaceLeft(line[:pos]) + goNext := false + var lineCompleter PrefixCompleterInterface + for _, child := range p.GetChildren() { + childNames := make([][]rune, 1) + + childDynamic, ok := child.(DynamicPrefixCompleterInterface) + if ok && childDynamic.IsDynamic() { + childNames = childDynamic.GetDynamicNames(origLine) + } else { + childNames[0] = child.GetName() + } + + for _, childName := range childNames { + if len(line) >= len(childName) { + if runes.HasPrefix(line, childName) { + if len(line) == len(childName) { + newLine = append(newLine, []rune{' '}) + } else { + newLine = append(newLine, childName) + } + offset = len(childName) + lineCompleter = child + goNext = true + } + } else { + if runes.HasPrefix(childName, line) { + newLine = append(newLine, childName[len(line):]) + offset = len(line) + lineCompleter = child + } + } + } + } + + if len(newLine) != 1 { + return + } + + tmpLine := make([]rune, 0, len(line)) + for i := offset; i < len(line); i++ { + if line[i] == ' ' { + continue + } + + tmpLine = append(tmpLine, line[i:]...) + return doInternal(lineCompleter, tmpLine, len(tmpLine), origLine) + } + + if goNext { + return doInternal(lineCompleter, nil, 0, origLine) + } + return +} diff --git a/vendor/github.com/chzyer/readline/complete_segment.go b/vendor/github.com/chzyer/readline/complete_segment.go new file mode 100644 index 00000000..5ceadd80 --- /dev/null +++ b/vendor/github.com/chzyer/readline/complete_segment.go @@ -0,0 +1,82 @@ +package readline + +type SegmentCompleter interface { + // a + // |- a1 + // |--- a11 + // |- a2 + // b + // input: + // DoTree([], 0) [a, b] + // DoTree([a], 1) [a] + // DoTree([a, ], 0) [a1, a2] + // DoTree([a, a], 1) [a1, a2] + // DoTree([a, a1], 2) [a1] + // DoTree([a, a1, ], 0) [a11] + // DoTree([a, a1, a], 1) [a11] + DoSegment([][]rune, int) [][]rune +} + +type dumpSegmentCompleter struct { + f func([][]rune, int) [][]rune +} + +func (d *dumpSegmentCompleter) DoSegment(segment [][]rune, n int) [][]rune { + return d.f(segment, n) +} + +func SegmentFunc(f func([][]rune, int) [][]rune) AutoCompleter { + return &SegmentComplete{&dumpSegmentCompleter{f}} +} + +func SegmentAutoComplete(completer SegmentCompleter) *SegmentComplete { + return &SegmentComplete{ + SegmentCompleter: completer, + } +} + +type SegmentComplete struct { + SegmentCompleter +} + +func RetSegment(segments [][]rune, cands [][]rune, idx int) ([][]rune, int) { + ret := make([][]rune, 0, len(cands)) + lastSegment := segments[len(segments)-1] + for _, cand := range cands { + if !runes.HasPrefix(cand, lastSegment) { + continue + } + ret = append(ret, cand[len(lastSegment):]) + } + return ret, idx +} + +func SplitSegment(line []rune, pos int) ([][]rune, int) { + segs := [][]rune{} + lastIdx := -1 + line = line[:pos] + pos = 0 + for idx, l := range line { + if l == ' ' { + pos = 0 + segs = append(segs, line[lastIdx+1:idx]) + lastIdx = idx + } else { + pos++ + } + } + segs = append(segs, line[lastIdx+1:]) + return segs, pos +} + +func (c *SegmentComplete) Do(line []rune, pos int) (newLine [][]rune, offset int) { + + segment, idx := SplitSegment(line, pos) + + cands := c.DoSegment(segment, idx) + newLine, offset = RetSegment(segment, cands, idx) + for idx := range newLine { + newLine[idx] = append(newLine[idx], ' ') + } + return newLine, offset +} diff --git a/vendor/github.com/chzyer/readline/history.go b/vendor/github.com/chzyer/readline/history.go new file mode 100644 index 00000000..6b17c464 --- /dev/null +++ b/vendor/github.com/chzyer/readline/history.go @@ -0,0 +1,330 @@ +package readline + +import ( + "bufio" + "container/list" + "fmt" + "os" + "strings" + "sync" +) + +type hisItem struct { + Source []rune + Version int64 + Tmp []rune +} + +func (h *hisItem) Clean() { + h.Source = nil + h.Tmp = nil +} + +type opHistory struct { + cfg *Config + history *list.List + historyVer int64 + current *list.Element + fd *os.File + fdLock sync.Mutex + enable bool +} + +func newOpHistory(cfg *Config) (o *opHistory) { + o = &opHistory{ + cfg: cfg, + history: list.New(), + enable: true, + } + return o +} + +func (o *opHistory) Reset() { + o.history = list.New() + o.current = nil +} + +func (o *opHistory) IsHistoryClosed() bool { + o.fdLock.Lock() + defer o.fdLock.Unlock() + return o.fd.Fd() == ^(uintptr(0)) +} + +func (o *opHistory) Init() { + if o.IsHistoryClosed() { + o.initHistory() + } +} + +func (o *opHistory) initHistory() { + if o.cfg.HistoryFile != "" { + o.historyUpdatePath(o.cfg.HistoryFile) + } +} + +// only called by newOpHistory +func (o *opHistory) historyUpdatePath(path string) { + o.fdLock.Lock() + defer o.fdLock.Unlock() + f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) + if err != nil { + return + } + o.fd = f + r := bufio.NewReader(o.fd) + total := 0 + for ; ; total++ { + line, err := r.ReadString('\n') + if err != nil { + break + } + // ignore the empty line + line = strings.TrimSpace(line) + if len(line) == 0 { + continue + } + o.Push([]rune(line)) + o.Compact() + } + if total > o.cfg.HistoryLimit { + o.rewriteLocked() + } + o.historyVer++ + o.Push(nil) + return +} + +func (o *opHistory) Compact() { + for o.history.Len() > o.cfg.HistoryLimit && o.history.Len() > 0 { + o.history.Remove(o.history.Front()) + } +} + +func (o *opHistory) Rewrite() { + o.fdLock.Lock() + defer o.fdLock.Unlock() + o.rewriteLocked() +} + +func (o *opHistory) rewriteLocked() { + if o.cfg.HistoryFile == "" { + return + } + + tmpFile := o.cfg.HistoryFile + ".tmp" + fd, err := os.OpenFile(tmpFile, os.O_CREATE|os.O_WRONLY|os.O_TRUNC|os.O_APPEND, 0666) + if err != nil { + return + } + + buf := bufio.NewWriter(fd) + for elem := o.history.Front(); elem != nil; elem = elem.Next() { + buf.WriteString(string(elem.Value.(*hisItem).Source) + "\n") + } + buf.Flush() + + // replace history file + if err = os.Rename(tmpFile, o.cfg.HistoryFile); err != nil { + fd.Close() + return + } + + if o.fd != nil { + o.fd.Close() + } + // fd is write only, just satisfy what we need. + o.fd = fd +} + +func (o *opHistory) Close() { + o.fdLock.Lock() + defer o.fdLock.Unlock() + if o.fd != nil { + o.fd.Close() + } +} + +func (o *opHistory) FindBck(isNewSearch bool, rs []rune, start int) (int, *list.Element) { + for elem := o.current; elem != nil; elem = elem.Prev() { + item := o.showItem(elem.Value) + if isNewSearch { + start += len(rs) + } + if elem == o.current { + if len(item) >= start { + item = item[:start] + } + } + idx := runes.IndexAllBckEx(item, rs, o.cfg.HistorySearchFold) + if idx < 0 { + continue + } + return idx, elem + } + return -1, nil +} + +func (o *opHistory) FindFwd(isNewSearch bool, rs []rune, start int) (int, *list.Element) { + for elem := o.current; elem != nil; elem = elem.Next() { + item := o.showItem(elem.Value) + if isNewSearch { + start -= len(rs) + if start < 0 { + start = 0 + } + } + if elem == o.current { + if len(item)-1 >= start { + item = item[start:] + } else { + continue + } + } + idx := runes.IndexAllEx(item, rs, o.cfg.HistorySearchFold) + if idx < 0 { + continue + } + if elem == o.current { + idx += start + } + return idx, elem + } + return -1, nil +} + +func (o *opHistory) showItem(obj interface{}) []rune { + item := obj.(*hisItem) + if item.Version == o.historyVer { + return item.Tmp + } + return item.Source +} + +func (o *opHistory) Prev() []rune { + if o.current == nil { + return nil + } + current := o.current.Prev() + if current == nil { + return nil + } + o.current = current + return runes.Copy(o.showItem(current.Value)) +} + +func (o *opHistory) Next() ([]rune, bool) { + if o.current == nil { + return nil, false + } + current := o.current.Next() + if current == nil { + return nil, false + } + + o.current = current + return runes.Copy(o.showItem(current.Value)), true +} + +// Disable the current history +func (o *opHistory) Disable() { + o.enable = false +} + +// Enable the current history +func (o *opHistory) Enable() { + o.enable = true +} + +func (o *opHistory) debug() { + Debug("-------") + for item := o.history.Front(); item != nil; item = item.Next() { + Debug(fmt.Sprintf("%+v", item.Value)) + } +} + +// save history +func (o *opHistory) New(current []rune) (err error) { + + // history deactivated + if !o.enable { + return nil + } + + current = runes.Copy(current) + + // if just use last command without modify + // just clean lastest history + if back := o.history.Back(); back != nil { + prev := back.Prev() + if prev != nil { + if runes.Equal(current, prev.Value.(*hisItem).Source) { + o.current = o.history.Back() + o.current.Value.(*hisItem).Clean() + o.historyVer++ + return nil + } + } + } + + if len(current) == 0 { + o.current = o.history.Back() + if o.current != nil { + o.current.Value.(*hisItem).Clean() + o.historyVer++ + return nil + } + } + + if o.current != o.history.Back() { + // move history item to current command + currentItem := o.current.Value.(*hisItem) + // set current to last item + o.current = o.history.Back() + + current = runes.Copy(currentItem.Tmp) + } + + // err only can be a IO error, just report + err = o.Update(current, true) + + // push a new one to commit current command + o.historyVer++ + o.Push(nil) + return +} + +func (o *opHistory) Revert() { + o.historyVer++ + o.current = o.history.Back() +} + +func (o *opHistory) Update(s []rune, commit bool) (err error) { + o.fdLock.Lock() + defer o.fdLock.Unlock() + s = runes.Copy(s) + if o.current == nil { + o.Push(s) + o.Compact() + return + } + r := o.current.Value.(*hisItem) + r.Version = o.historyVer + if commit { + r.Source = s + if o.fd != nil { + // just report the error + _, err = o.fd.Write([]byte(string(r.Source) + "\n")) + } + } else { + r.Tmp = append(r.Tmp[:0], s...) + } + o.current.Value = r + o.Compact() + return +} + +func (o *opHistory) Push(s []rune) { + s = runes.Copy(s) + elem := o.history.PushBack(&hisItem{Source: s}) + o.current = elem +} diff --git a/vendor/github.com/chzyer/readline/operation.go b/vendor/github.com/chzyer/readline/operation.go new file mode 100644 index 00000000..4c31624f --- /dev/null +++ b/vendor/github.com/chzyer/readline/operation.go @@ -0,0 +1,531 @@ +package readline + +import ( + "errors" + "io" + "sync" +) + +var ( + ErrInterrupt = errors.New("Interrupt") +) + +type InterruptError struct { + Line []rune +} + +func (*InterruptError) Error() string { + return "Interrupted" +} + +type Operation struct { + m sync.Mutex + cfg *Config + t *Terminal + buf *RuneBuffer + outchan chan []rune + errchan chan error + w io.Writer + + history *opHistory + *opSearch + *opCompleter + *opPassword + *opVim +} + +func (o *Operation) SetBuffer(what string) { + o.buf.Set([]rune(what)) +} + +type wrapWriter struct { + r *Operation + t *Terminal + target io.Writer +} + +func (w *wrapWriter) Write(b []byte) (int, error) { + if !w.t.IsReading() { + return w.target.Write(b) + } + + var ( + n int + err error + ) + w.r.buf.Refresh(func() { + n, err = w.target.Write(b) + }) + + if w.r.IsSearchMode() { + w.r.SearchRefresh(-1) + } + if w.r.IsInCompleteMode() { + w.r.CompleteRefresh() + } + return n, err +} + +func NewOperation(t *Terminal, cfg *Config) *Operation { + width := cfg.FuncGetWidth() + op := &Operation{ + t: t, + buf: NewRuneBuffer(t, cfg.Prompt, cfg, width), + outchan: make(chan []rune), + errchan: make(chan error, 1), + } + op.w = op.buf.w + op.SetConfig(cfg) + op.opVim = newVimMode(op) + op.opCompleter = newOpCompleter(op.buf.w, op, width) + op.opPassword = newOpPassword(op) + op.cfg.FuncOnWidthChanged(func() { + newWidth := cfg.FuncGetWidth() + op.opCompleter.OnWidthChange(newWidth) + op.opSearch.OnWidthChange(newWidth) + op.buf.OnWidthChange(newWidth) + }) + go op.ioloop() + return op +} + +func (o *Operation) SetPrompt(s string) { + o.buf.SetPrompt(s) +} + +func (o *Operation) SetMaskRune(r rune) { + o.buf.SetMask(r) +} + +func (o *Operation) GetConfig() *Config { + o.m.Lock() + cfg := *o.cfg + o.m.Unlock() + return &cfg +} + +func (o *Operation) ioloop() { + for { + keepInSearchMode := false + keepInCompleteMode := false + r := o.t.ReadRune() + if o.GetConfig().FuncFilterInputRune != nil { + var process bool + r, process = o.GetConfig().FuncFilterInputRune(r) + if !process { + o.buf.Refresh(nil) // to refresh the line + continue // ignore this rune + } + } + + if r == 0 { // io.EOF + if o.buf.Len() == 0 { + o.buf.Clean() + select { + case o.errchan <- io.EOF: + } + break + } else { + // if stdin got io.EOF and there is something left in buffer, + // let's flush them by sending CharEnter. + // And we will got io.EOF int next loop. + r = CharEnter + } + } + isUpdateHistory := true + + if o.IsInCompleteSelectMode() { + keepInCompleteMode = o.HandleCompleteSelect(r) + if keepInCompleteMode { + continue + } + + o.buf.Refresh(nil) + switch r { + case CharEnter, CharCtrlJ: + o.history.Update(o.buf.Runes(), false) + fallthrough + case CharInterrupt: + o.t.KickRead() + fallthrough + case CharBell: + continue + } + } + + if o.IsEnableVimMode() { + r = o.HandleVim(r, o.t.ReadRune) + if r == 0 { + continue + } + } + + switch r { + case CharBell: + if o.IsSearchMode() { + o.ExitSearchMode(true) + o.buf.Refresh(nil) + } + if o.IsInCompleteMode() { + o.ExitCompleteMode(true) + o.buf.Refresh(nil) + } + case CharTab: + if o.GetConfig().AutoComplete == nil { + o.t.Bell() + break + } + if o.OnComplete() { + keepInCompleteMode = true + } else { + o.t.Bell() + break + } + + case CharBckSearch: + if !o.SearchMode(S_DIR_BCK) { + o.t.Bell() + break + } + keepInSearchMode = true + case CharCtrlU: + o.buf.KillFront() + case CharFwdSearch: + if !o.SearchMode(S_DIR_FWD) { + o.t.Bell() + break + } + keepInSearchMode = true + case CharKill: + o.buf.Kill() + keepInCompleteMode = true + case MetaForward: + o.buf.MoveToNextWord() + case CharTranspose: + o.buf.Transpose() + case MetaBackward: + o.buf.MoveToPrevWord() + case MetaDelete: + o.buf.DeleteWord() + case CharLineStart: + o.buf.MoveToLineStart() + case CharLineEnd: + o.buf.MoveToLineEnd() + case CharBackspace, CharCtrlH: + if o.IsSearchMode() { + o.SearchBackspace() + keepInSearchMode = true + break + } + + if o.buf.Len() == 0 { + o.t.Bell() + break + } + o.buf.Backspace() + if o.IsInCompleteMode() { + o.OnComplete() + } + case CharCtrlZ: + o.buf.Clean() + o.t.SleepToResume() + o.Refresh() + case CharCtrlL: + ClearScreen(o.w) + o.Refresh() + case MetaBackspace, CharCtrlW: + o.buf.BackEscapeWord() + case CharCtrlY: + o.buf.Yank() + case CharEnter, CharCtrlJ: + if o.IsSearchMode() { + o.ExitSearchMode(false) + } + o.buf.MoveToLineEnd() + var data []rune + if !o.GetConfig().UniqueEditLine { + o.buf.WriteRune('\n') + data = o.buf.Reset() + data = data[:len(data)-1] // trim \n + } else { + o.buf.Clean() + data = o.buf.Reset() + } + o.outchan <- data + if !o.GetConfig().DisableAutoSaveHistory { + // ignore IO error + _ = o.history.New(data) + } else { + isUpdateHistory = false + } + case CharBackward: + o.buf.MoveBackward() + case CharForward: + o.buf.MoveForward() + case CharPrev: + buf := o.history.Prev() + if buf != nil { + o.buf.Set(buf) + } else { + o.t.Bell() + } + case CharNext: + buf, ok := o.history.Next() + if ok { + o.buf.Set(buf) + } else { + o.t.Bell() + } + case CharDelete: + if o.buf.Len() > 0 || !o.IsNormalMode() { + o.t.KickRead() + if !o.buf.Delete() { + o.t.Bell() + } + break + } + + // treat as EOF + if !o.GetConfig().UniqueEditLine { + o.buf.WriteString(o.GetConfig().EOFPrompt + "\n") + } + o.buf.Reset() + isUpdateHistory = false + o.history.Revert() + o.errchan <- io.EOF + if o.GetConfig().UniqueEditLine { + o.buf.Clean() + } + case CharInterrupt: + if o.IsSearchMode() { + o.t.KickRead() + o.ExitSearchMode(true) + break + } + if o.IsInCompleteMode() { + o.t.KickRead() + o.ExitCompleteMode(true) + o.buf.Refresh(nil) + break + } + o.buf.MoveToLineEnd() + o.buf.Refresh(nil) + hint := o.GetConfig().InterruptPrompt + "\n" + if !o.GetConfig().UniqueEditLine { + o.buf.WriteString(hint) + } + remain := o.buf.Reset() + if !o.GetConfig().UniqueEditLine { + remain = remain[:len(remain)-len([]rune(hint))] + } + isUpdateHistory = false + o.history.Revert() + o.errchan <- &InterruptError{remain} + default: + if o.IsSearchMode() { + o.SearchChar(r) + keepInSearchMode = true + break + } + o.buf.WriteRune(r) + if o.IsInCompleteMode() { + o.OnComplete() + keepInCompleteMode = true + } + } + + listener := o.GetConfig().Listener + if listener != nil { + newLine, newPos, ok := listener.OnChange(o.buf.Runes(), o.buf.Pos(), r) + if ok { + o.buf.SetWithIdx(newPos, newLine) + } + } + + o.m.Lock() + if !keepInSearchMode && o.IsSearchMode() { + o.ExitSearchMode(false) + o.buf.Refresh(nil) + } else if o.IsInCompleteMode() { + if !keepInCompleteMode { + o.ExitCompleteMode(false) + o.Refresh() + } else { + o.buf.Refresh(nil) + o.CompleteRefresh() + } + } + if isUpdateHistory && !o.IsSearchMode() { + // it will cause null history + o.history.Update(o.buf.Runes(), false) + } + o.m.Unlock() + } +} + +func (o *Operation) Stderr() io.Writer { + return &wrapWriter{target: o.GetConfig().Stderr, r: o, t: o.t} +} + +func (o *Operation) Stdout() io.Writer { + return &wrapWriter{target: o.GetConfig().Stdout, r: o, t: o.t} +} + +func (o *Operation) String() (string, error) { + r, err := o.Runes() + return string(r), err +} + +func (o *Operation) Runes() ([]rune, error) { + o.t.EnterRawMode() + defer o.t.ExitRawMode() + + listener := o.GetConfig().Listener + if listener != nil { + listener.OnChange(nil, 0, 0) + } + + o.buf.Refresh(nil) // print prompt + o.t.KickRead() + select { + case r := <-o.outchan: + return r, nil + case err := <-o.errchan: + if e, ok := err.(*InterruptError); ok { + return e.Line, ErrInterrupt + } + return nil, err + } +} + +func (o *Operation) PasswordEx(prompt string, l Listener) ([]byte, error) { + cfg := o.GenPasswordConfig() + cfg.Prompt = prompt + cfg.Listener = l + return o.PasswordWithConfig(cfg) +} + +func (o *Operation) GenPasswordConfig() *Config { + return o.opPassword.PasswordConfig() +} + +func (o *Operation) PasswordWithConfig(cfg *Config) ([]byte, error) { + if err := o.opPassword.EnterPasswordMode(cfg); err != nil { + return nil, err + } + defer o.opPassword.ExitPasswordMode() + return o.Slice() +} + +func (o *Operation) Password(prompt string) ([]byte, error) { + return o.PasswordEx(prompt, nil) +} + +func (o *Operation) SetTitle(t string) { + o.w.Write([]byte("\033[2;" + t + "\007")) +} + +func (o *Operation) Slice() ([]byte, error) { + r, err := o.Runes() + if err != nil { + return nil, err + } + return []byte(string(r)), nil +} + +func (o *Operation) Close() { + o.history.Close() +} + +func (o *Operation) SetHistoryPath(path string) { + if o.history != nil { + o.history.Close() + } + o.cfg.HistoryFile = path + o.history = newOpHistory(o.cfg) +} + +func (o *Operation) IsNormalMode() bool { + return !o.IsInCompleteMode() && !o.IsSearchMode() +} + +func (op *Operation) SetConfig(cfg *Config) (*Config, error) { + op.m.Lock() + defer op.m.Unlock() + if op.cfg == cfg { + return op.cfg, nil + } + if err := cfg.Init(); err != nil { + return op.cfg, err + } + old := op.cfg + op.cfg = cfg + op.SetPrompt(cfg.Prompt) + op.SetMaskRune(cfg.MaskRune) + op.buf.SetConfig(cfg) + width := op.cfg.FuncGetWidth() + + if cfg.opHistory == nil { + op.SetHistoryPath(cfg.HistoryFile) + cfg.opHistory = op.history + cfg.opSearch = newOpSearch(op.buf.w, op.buf, op.history, cfg, width) + } + op.history = cfg.opHistory + + // SetHistoryPath will close opHistory which already exists + // so if we use it next time, we need to reopen it by `InitHistory()` + op.history.Init() + + if op.cfg.AutoComplete != nil { + op.opCompleter = newOpCompleter(op.buf.w, op, width) + } + + op.opSearch = cfg.opSearch + return old, nil +} + +func (o *Operation) ResetHistory() { + o.history.Reset() +} + +// if err is not nil, it just mean it fail to write to file +// other things goes fine. +func (o *Operation) SaveHistory(content string) error { + return o.history.New([]rune(content)) +} + +func (o *Operation) Refresh() { + if o.t.IsReading() { + o.buf.Refresh(nil) + } +} + +func (o *Operation) Clean() { + o.buf.Clean() +} + +func FuncListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) Listener { + return &DumpListener{f: f} +} + +type DumpListener struct { + f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) +} + +func (d *DumpListener) OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) { + return d.f(line, pos, key) +} + +type Listener interface { + OnChange(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool) +} + +type Painter interface { + Paint(line []rune, pos int) []rune +} + +type defaultPainter struct{} + +func (p *defaultPainter) Paint(line []rune, _ int) []rune { + return line +} diff --git a/vendor/github.com/chzyer/readline/password.go b/vendor/github.com/chzyer/readline/password.go new file mode 100644 index 00000000..414288c2 --- /dev/null +++ b/vendor/github.com/chzyer/readline/password.go @@ -0,0 +1,33 @@ +package readline + +type opPassword struct { + o *Operation + backupCfg *Config +} + +func newOpPassword(o *Operation) *opPassword { + return &opPassword{o: o} +} + +func (o *opPassword) ExitPasswordMode() { + o.o.SetConfig(o.backupCfg) + o.backupCfg = nil +} + +func (o *opPassword) EnterPasswordMode(cfg *Config) (err error) { + o.backupCfg, err = o.o.SetConfig(cfg) + return +} + +func (o *opPassword) PasswordConfig() *Config { + return &Config{ + EnableMask: true, + InterruptPrompt: "\n", + EOFPrompt: "\n", + HistoryLimit: -1, + Painter: &defaultPainter{}, + + Stdout: o.o.cfg.Stdout, + Stderr: o.o.cfg.Stderr, + } +} diff --git a/vendor/github.com/chzyer/readline/rawreader_windows.go b/vendor/github.com/chzyer/readline/rawreader_windows.go new file mode 100644 index 00000000..073ef150 --- /dev/null +++ b/vendor/github.com/chzyer/readline/rawreader_windows.go @@ -0,0 +1,125 @@ +// +build windows + +package readline + +import "unsafe" + +const ( + VK_CANCEL = 0x03 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_ESCAPE = 0x1B + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_DELETE = 0x2E + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 +) + +// RawReader translate input record to ANSI escape sequence. +// To provides same behavior as unix terminal. +type RawReader struct { + ctrlKey bool + altKey bool +} + +func NewRawReader() *RawReader { + r := new(RawReader) + return r +} + +// only process one action in one read +func (r *RawReader) Read(buf []byte) (int, error) { + ir := new(_INPUT_RECORD) + var read int + var err error +next: + err = kernel.ReadConsoleInputW(stdin, + uintptr(unsafe.Pointer(ir)), + 1, + uintptr(unsafe.Pointer(&read)), + ) + if err != nil { + return 0, err + } + if ir.EventType != EVENT_KEY { + goto next + } + ker := (*_KEY_EVENT_RECORD)(unsafe.Pointer(&ir.Event[0])) + if ker.bKeyDown == 0 { // keyup + if r.ctrlKey || r.altKey { + switch ker.wVirtualKeyCode { + case VK_RCONTROL, VK_LCONTROL: + r.ctrlKey = false + case VK_MENU: //alt + r.altKey = false + } + } + goto next + } + + if ker.unicodeChar == 0 { + var target rune + switch ker.wVirtualKeyCode { + case VK_RCONTROL, VK_LCONTROL: + r.ctrlKey = true + case VK_MENU: //alt + r.altKey = true + case VK_LEFT: + target = CharBackward + case VK_RIGHT: + target = CharForward + case VK_UP: + target = CharPrev + case VK_DOWN: + target = CharNext + } + if target != 0 { + return r.write(buf, target) + } + goto next + } + char := rune(ker.unicodeChar) + if r.ctrlKey { + switch char { + case 'A': + char = CharLineStart + case 'E': + char = CharLineEnd + case 'R': + char = CharBckSearch + case 'S': + char = CharFwdSearch + } + } else if r.altKey { + switch char { + case VK_BACK: + char = CharBackspace + } + return r.writeEsc(buf, char) + } + return r.write(buf, char) +} + +func (r *RawReader) writeEsc(b []byte, char rune) (int, error) { + b[0] = '\033' + n := copy(b[1:], []byte(string(char))) + return n + 1, nil +} + +func (r *RawReader) write(b []byte, char rune) (int, error) { + n := copy(b, []byte(string(char))) + return n, nil +} + +func (r *RawReader) Close() error { + return nil +} diff --git a/vendor/github.com/chzyer/readline/readline.go b/vendor/github.com/chzyer/readline/readline.go new file mode 100644 index 00000000..0e7aca06 --- /dev/null +++ b/vendor/github.com/chzyer/readline/readline.go @@ -0,0 +1,326 @@ +// Readline is a pure go implementation for GNU-Readline kind library. +// +// example: +// rl, err := readline.New("> ") +// if err != nil { +// panic(err) +// } +// defer rl.Close() +// +// for { +// line, err := rl.Readline() +// if err != nil { // io.EOF +// break +// } +// println(line) +// } +// +package readline + +import "io" + +type Instance struct { + Config *Config + Terminal *Terminal + Operation *Operation +} + +type Config struct { + // prompt supports ANSI escape sequence, so we can color some characters even in windows + Prompt string + + // readline will persist historys to file where HistoryFile specified + HistoryFile string + // specify the max length of historys, it's 500 by default, set it to -1 to disable history + HistoryLimit int + DisableAutoSaveHistory bool + // enable case-insensitive history searching + HistorySearchFold bool + + // AutoCompleter will called once user press TAB + AutoComplete AutoCompleter + + // Any key press will pass to Listener + // NOTE: Listener will be triggered by (nil, 0, 0) immediately + Listener Listener + + Painter Painter + + // If VimMode is true, readline will in vim.insert mode by default + VimMode bool + + InterruptPrompt string + EOFPrompt string + + FuncGetWidth func() int + + Stdin io.ReadCloser + StdinWriter io.Writer + Stdout io.Writer + Stderr io.Writer + + EnableMask bool + MaskRune rune + + // erase the editing line after user submited it + // it use in IM usually. + UniqueEditLine bool + + // filter input runes (may be used to disable CtrlZ or for translating some keys to different actions) + // -> output = new (translated) rune and true/false if continue with processing this one + FuncFilterInputRune func(rune) (rune, bool) + + // force use interactive even stdout is not a tty + FuncIsTerminal func() bool + FuncMakeRaw func() error + FuncExitRaw func() error + FuncOnWidthChanged func(func()) + ForceUseInteractive bool + + // private fields + inited bool + opHistory *opHistory + opSearch *opSearch +} + +func (c *Config) useInteractive() bool { + if c.ForceUseInteractive { + return true + } + return c.FuncIsTerminal() +} + +func (c *Config) Init() error { + if c.inited { + return nil + } + c.inited = true + if c.Stdin == nil { + c.Stdin = NewCancelableStdin(Stdin) + } + + c.Stdin, c.StdinWriter = NewFillableStdin(c.Stdin) + + if c.Stdout == nil { + c.Stdout = Stdout + } + if c.Stderr == nil { + c.Stderr = Stderr + } + if c.HistoryLimit == 0 { + c.HistoryLimit = 500 + } + + if c.InterruptPrompt == "" { + c.InterruptPrompt = "^C" + } else if c.InterruptPrompt == "\n" { + c.InterruptPrompt = "" + } + if c.EOFPrompt == "" { + c.EOFPrompt = "^D" + } else if c.EOFPrompt == "\n" { + c.EOFPrompt = "" + } + + if c.AutoComplete == nil { + c.AutoComplete = &TabCompleter{} + } + if c.FuncGetWidth == nil { + c.FuncGetWidth = GetScreenWidth + } + if c.FuncIsTerminal == nil { + c.FuncIsTerminal = DefaultIsTerminal + } + rm := new(RawMode) + if c.FuncMakeRaw == nil { + c.FuncMakeRaw = rm.Enter + } + if c.FuncExitRaw == nil { + c.FuncExitRaw = rm.Exit + } + if c.FuncOnWidthChanged == nil { + c.FuncOnWidthChanged = DefaultOnWidthChanged + } + + return nil +} + +func (c Config) Clone() *Config { + c.opHistory = nil + c.opSearch = nil + return &c +} + +func (c *Config) SetListener(f func(line []rune, pos int, key rune) (newLine []rune, newPos int, ok bool)) { + c.Listener = FuncListener(f) +} + +func (c *Config) SetPainter(p Painter) { + c.Painter = p +} + +func NewEx(cfg *Config) (*Instance, error) { + t, err := NewTerminal(cfg) + if err != nil { + return nil, err + } + rl := t.Readline() + if cfg.Painter == nil { + cfg.Painter = &defaultPainter{} + } + return &Instance{ + Config: cfg, + Terminal: t, + Operation: rl, + }, nil +} + +func New(prompt string) (*Instance, error) { + return NewEx(&Config{Prompt: prompt}) +} + +func (i *Instance) ResetHistory() { + i.Operation.ResetHistory() +} + +func (i *Instance) SetPrompt(s string) { + i.Operation.SetPrompt(s) +} + +func (i *Instance) SetMaskRune(r rune) { + i.Operation.SetMaskRune(r) +} + +// change history persistence in runtime +func (i *Instance) SetHistoryPath(p string) { + i.Operation.SetHistoryPath(p) +} + +// readline will refresh automatic when write through Stdout() +func (i *Instance) Stdout() io.Writer { + return i.Operation.Stdout() +} + +// readline will refresh automatic when write through Stdout() +func (i *Instance) Stderr() io.Writer { + return i.Operation.Stderr() +} + +// switch VimMode in runtime +func (i *Instance) SetVimMode(on bool) { + i.Operation.SetVimMode(on) +} + +func (i *Instance) IsVimMode() bool { + return i.Operation.IsEnableVimMode() +} + +func (i *Instance) GenPasswordConfig() *Config { + return i.Operation.GenPasswordConfig() +} + +// we can generate a config by `i.GenPasswordConfig()` +func (i *Instance) ReadPasswordWithConfig(cfg *Config) ([]byte, error) { + return i.Operation.PasswordWithConfig(cfg) +} + +func (i *Instance) ReadPasswordEx(prompt string, l Listener) ([]byte, error) { + return i.Operation.PasswordEx(prompt, l) +} + +func (i *Instance) ReadPassword(prompt string) ([]byte, error) { + return i.Operation.Password(prompt) +} + +type Result struct { + Line string + Error error +} + +func (l *Result) CanContinue() bool { + return len(l.Line) != 0 && l.Error == ErrInterrupt +} + +func (l *Result) CanBreak() bool { + return !l.CanContinue() && l.Error != nil +} + +func (i *Instance) Line() *Result { + ret, err := i.Readline() + return &Result{ret, err} +} + +// err is one of (nil, io.EOF, readline.ErrInterrupt) +func (i *Instance) Readline() (string, error) { + return i.Operation.String() +} + +func (i *Instance) ReadlineWithDefault(what string) (string, error) { + i.Operation.SetBuffer(what) + return i.Operation.String() +} + +func (i *Instance) SaveHistory(content string) error { + return i.Operation.SaveHistory(content) +} + +// same as readline +func (i *Instance) ReadSlice() ([]byte, error) { + return i.Operation.Slice() +} + +// we must make sure that call Close() before process exit. +func (i *Instance) Close() error { + if err := i.Terminal.Close(); err != nil { + return err + } + i.Config.Stdin.Close() + i.Operation.Close() + return nil +} +func (i *Instance) Clean() { + i.Operation.Clean() +} + +func (i *Instance) Write(b []byte) (int, error) { + return i.Stdout().Write(b) +} + +// WriteStdin prefill the next Stdin fetch +// Next time you call ReadLine() this value will be writen before the user input +// ie : +// i := readline.New() +// i.WriteStdin([]byte("test")) +// _, _= i.Readline() +// +// gives +// +// > test[cursor] +func (i *Instance) WriteStdin(val []byte) (int, error) { + return i.Terminal.WriteStdin(val) +} + +func (i *Instance) SetConfig(cfg *Config) *Config { + if i.Config == cfg { + return cfg + } + old := i.Config + i.Config = cfg + i.Operation.SetConfig(cfg) + i.Terminal.SetConfig(cfg) + return old +} + +func (i *Instance) Refresh() { + i.Operation.Refresh() +} + +// HistoryDisable the save of the commands into the history +func (i *Instance) HistoryDisable() { + i.Operation.history.Disable() +} + +// HistoryEnable the save of the commands into the history (default on) +func (i *Instance) HistoryEnable() { + i.Operation.history.Enable() +} diff --git a/vendor/github.com/chzyer/readline/remote.go b/vendor/github.com/chzyer/readline/remote.go new file mode 100644 index 00000000..74dbf569 --- /dev/null +++ b/vendor/github.com/chzyer/readline/remote.go @@ -0,0 +1,475 @@ +package readline + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "os" + "sync" + "sync/atomic" +) + +type MsgType int16 + +const ( + T_DATA = MsgType(iota) + T_WIDTH + T_WIDTH_REPORT + T_ISTTY_REPORT + T_RAW + T_ERAW // exit raw + T_EOF +) + +type RemoteSvr struct { + eof int32 + closed int32 + width int32 + reciveChan chan struct{} + writeChan chan *writeCtx + conn net.Conn + isTerminal bool + funcWidthChan func() + stopChan chan struct{} + + dataBufM sync.Mutex + dataBuf bytes.Buffer +} + +type writeReply struct { + n int + err error +} + +type writeCtx struct { + msg *Message + reply chan *writeReply +} + +func newWriteCtx(msg *Message) *writeCtx { + return &writeCtx{ + msg: msg, + reply: make(chan *writeReply), + } +} + +func NewRemoteSvr(conn net.Conn) (*RemoteSvr, error) { + rs := &RemoteSvr{ + width: -1, + conn: conn, + writeChan: make(chan *writeCtx), + reciveChan: make(chan struct{}), + stopChan: make(chan struct{}), + } + buf := bufio.NewReader(rs.conn) + + if err := rs.init(buf); err != nil { + return nil, err + } + + go rs.readLoop(buf) + go rs.writeLoop() + return rs, nil +} + +func (r *RemoteSvr) init(buf *bufio.Reader) error { + m, err := ReadMessage(buf) + if err != nil { + return err + } + // receive isTerminal + if m.Type != T_ISTTY_REPORT { + return fmt.Errorf("unexpected init message") + } + r.GotIsTerminal(m.Data) + + // receive width + m, err = ReadMessage(buf) + if err != nil { + return err + } + if m.Type != T_WIDTH_REPORT { + return fmt.Errorf("unexpected init message") + } + r.GotReportWidth(m.Data) + + return nil +} + +func (r *RemoteSvr) HandleConfig(cfg *Config) { + cfg.Stderr = r + cfg.Stdout = r + cfg.Stdin = r + cfg.FuncExitRaw = r.ExitRawMode + cfg.FuncIsTerminal = r.IsTerminal + cfg.FuncMakeRaw = r.EnterRawMode + cfg.FuncExitRaw = r.ExitRawMode + cfg.FuncGetWidth = r.GetWidth + cfg.FuncOnWidthChanged = func(f func()) { + r.funcWidthChan = f + } +} + +func (r *RemoteSvr) IsTerminal() bool { + return r.isTerminal +} + +func (r *RemoteSvr) checkEOF() error { + if atomic.LoadInt32(&r.eof) == 1 { + return io.EOF + } + return nil +} + +func (r *RemoteSvr) Read(b []byte) (int, error) { + r.dataBufM.Lock() + n, err := r.dataBuf.Read(b) + r.dataBufM.Unlock() + if n == 0 { + if err := r.checkEOF(); err != nil { + return 0, err + } + } + + if n == 0 && err == io.EOF { + <-r.reciveChan + r.dataBufM.Lock() + n, err = r.dataBuf.Read(b) + r.dataBufM.Unlock() + } + if n == 0 { + if err := r.checkEOF(); err != nil { + return 0, err + } + } + + return n, err +} + +func (r *RemoteSvr) writeMsg(m *Message) error { + ctx := newWriteCtx(m) + r.writeChan <- ctx + reply := <-ctx.reply + return reply.err +} + +func (r *RemoteSvr) Write(b []byte) (int, error) { + ctx := newWriteCtx(NewMessage(T_DATA, b)) + r.writeChan <- ctx + reply := <-ctx.reply + return reply.n, reply.err +} + +func (r *RemoteSvr) EnterRawMode() error { + return r.writeMsg(NewMessage(T_RAW, nil)) +} + +func (r *RemoteSvr) ExitRawMode() error { + return r.writeMsg(NewMessage(T_ERAW, nil)) +} + +func (r *RemoteSvr) writeLoop() { + defer r.Close() + +loop: + for { + select { + case ctx, ok := <-r.writeChan: + if !ok { + break + } + n, err := ctx.msg.WriteTo(r.conn) + ctx.reply <- &writeReply{n, err} + case <-r.stopChan: + break loop + } + } +} + +func (r *RemoteSvr) Close() error { + if atomic.CompareAndSwapInt32(&r.closed, 0, 1) { + close(r.stopChan) + r.conn.Close() + } + return nil +} + +func (r *RemoteSvr) readLoop(buf *bufio.Reader) { + defer r.Close() + for { + m, err := ReadMessage(buf) + if err != nil { + break + } + switch m.Type { + case T_EOF: + atomic.StoreInt32(&r.eof, 1) + select { + case r.reciveChan <- struct{}{}: + default: + } + case T_DATA: + r.dataBufM.Lock() + r.dataBuf.Write(m.Data) + r.dataBufM.Unlock() + select { + case r.reciveChan <- struct{}{}: + default: + } + case T_WIDTH_REPORT: + r.GotReportWidth(m.Data) + case T_ISTTY_REPORT: + r.GotIsTerminal(m.Data) + } + } +} + +func (r *RemoteSvr) GotIsTerminal(data []byte) { + if binary.BigEndian.Uint16(data) == 0 { + r.isTerminal = false + } else { + r.isTerminal = true + } +} + +func (r *RemoteSvr) GotReportWidth(data []byte) { + atomic.StoreInt32(&r.width, int32(binary.BigEndian.Uint16(data))) + if r.funcWidthChan != nil { + r.funcWidthChan() + } +} + +func (r *RemoteSvr) GetWidth() int { + return int(atomic.LoadInt32(&r.width)) +} + +// ----------------------------------------------------------------------------- + +type Message struct { + Type MsgType + Data []byte +} + +func ReadMessage(r io.Reader) (*Message, error) { + m := new(Message) + var length int32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + if err := binary.Read(r, binary.BigEndian, &m.Type); err != nil { + return nil, err + } + m.Data = make([]byte, int(length)-2) + if _, err := io.ReadFull(r, m.Data); err != nil { + return nil, err + } + return m, nil +} + +func NewMessage(t MsgType, data []byte) *Message { + return &Message{t, data} +} + +func (m *Message) WriteTo(w io.Writer) (int, error) { + buf := bytes.NewBuffer(make([]byte, 0, len(m.Data)+2+4)) + binary.Write(buf, binary.BigEndian, int32(len(m.Data)+2)) + binary.Write(buf, binary.BigEndian, m.Type) + buf.Write(m.Data) + n, err := buf.WriteTo(w) + return int(n), err +} + +// ----------------------------------------------------------------------------- + +type RemoteCli struct { + conn net.Conn + raw RawMode + receiveChan chan struct{} + inited int32 + isTerminal *bool + + data bytes.Buffer + dataM sync.Mutex +} + +func NewRemoteCli(conn net.Conn) (*RemoteCli, error) { + r := &RemoteCli{ + conn: conn, + receiveChan: make(chan struct{}), + } + return r, nil +} + +func (r *RemoteCli) MarkIsTerminal(is bool) { + r.isTerminal = &is +} + +func (r *RemoteCli) init() error { + if !atomic.CompareAndSwapInt32(&r.inited, 0, 1) { + return nil + } + + if err := r.reportIsTerminal(); err != nil { + return err + } + + if err := r.reportWidth(); err != nil { + return err + } + + // register sig for width changed + DefaultOnWidthChanged(func() { + r.reportWidth() + }) + return nil +} + +func (r *RemoteCli) writeMsg(m *Message) error { + r.dataM.Lock() + _, err := m.WriteTo(r.conn) + r.dataM.Unlock() + return err +} + +func (r *RemoteCli) Write(b []byte) (int, error) { + m := NewMessage(T_DATA, b) + r.dataM.Lock() + _, err := m.WriteTo(r.conn) + r.dataM.Unlock() + return len(b), err +} + +func (r *RemoteCli) reportWidth() error { + screenWidth := GetScreenWidth() + data := make([]byte, 2) + binary.BigEndian.PutUint16(data, uint16(screenWidth)) + msg := NewMessage(T_WIDTH_REPORT, data) + + if err := r.writeMsg(msg); err != nil { + return err + } + return nil +} + +func (r *RemoteCli) reportIsTerminal() error { + var isTerminal bool + if r.isTerminal != nil { + isTerminal = *r.isTerminal + } else { + isTerminal = DefaultIsTerminal() + } + data := make([]byte, 2) + if isTerminal { + binary.BigEndian.PutUint16(data, 1) + } else { + binary.BigEndian.PutUint16(data, 0) + } + msg := NewMessage(T_ISTTY_REPORT, data) + if err := r.writeMsg(msg); err != nil { + return err + } + return nil +} + +func (r *RemoteCli) readLoop() { + buf := bufio.NewReader(r.conn) + for { + msg, err := ReadMessage(buf) + if err != nil { + break + } + switch msg.Type { + case T_ERAW: + r.raw.Exit() + case T_RAW: + r.raw.Enter() + case T_DATA: + os.Stdout.Write(msg.Data) + } + } +} + +func (r *RemoteCli) ServeBy(source io.Reader) error { + if err := r.init(); err != nil { + return err + } + + go func() { + defer r.Close() + for { + n, _ := io.Copy(r, source) + if n == 0 { + break + } + } + }() + defer r.raw.Exit() + r.readLoop() + return nil +} + +func (r *RemoteCli) Close() { + r.writeMsg(NewMessage(T_EOF, nil)) +} + +func (r *RemoteCli) Serve() error { + return r.ServeBy(os.Stdin) +} + +func ListenRemote(n, addr string, cfg *Config, h func(*Instance), onListen ...func(net.Listener) error) error { + ln, err := net.Listen(n, addr) + if err != nil { + return err + } + if len(onListen) > 0 { + if err := onListen[0](ln); err != nil { + return err + } + } + for { + conn, err := ln.Accept() + if err != nil { + break + } + go func() { + defer conn.Close() + rl, err := HandleConn(*cfg, conn) + if err != nil { + return + } + h(rl) + }() + } + return nil +} + +func HandleConn(cfg Config, conn net.Conn) (*Instance, error) { + r, err := NewRemoteSvr(conn) + if err != nil { + return nil, err + } + r.HandleConfig(&cfg) + + rl, err := NewEx(&cfg) + if err != nil { + return nil, err + } + return rl, nil +} + +func DialRemote(n, addr string) error { + conn, err := net.Dial(n, addr) + if err != nil { + return err + } + defer conn.Close() + + cli, err := NewRemoteCli(conn) + if err != nil { + return err + } + return cli.Serve() +} diff --git a/vendor/github.com/chzyer/readline/runebuf.go b/vendor/github.com/chzyer/readline/runebuf.go new file mode 100644 index 00000000..81d2da50 --- /dev/null +++ b/vendor/github.com/chzyer/readline/runebuf.go @@ -0,0 +1,629 @@ +package readline + +import ( + "bufio" + "bytes" + "io" + "strconv" + "strings" + "sync" +) + +type runeBufferBck struct { + buf []rune + idx int +} + +type RuneBuffer struct { + buf []rune + idx int + prompt []rune + w io.Writer + + hadClean bool + interactive bool + cfg *Config + + width int + + bck *runeBufferBck + + offset string + + lastKill []rune + + sync.Mutex +} + +func (r* RuneBuffer) pushKill(text []rune) { + r.lastKill = append([]rune{}, text...) +} + +func (r *RuneBuffer) OnWidthChange(newWidth int) { + r.Lock() + r.width = newWidth + r.Unlock() +} + +func (r *RuneBuffer) Backup() { + r.Lock() + r.bck = &runeBufferBck{r.buf, r.idx} + r.Unlock() +} + +func (r *RuneBuffer) Restore() { + r.Refresh(func() { + if r.bck == nil { + return + } + r.buf = r.bck.buf + r.idx = r.bck.idx + }) +} + +func NewRuneBuffer(w io.Writer, prompt string, cfg *Config, width int) *RuneBuffer { + rb := &RuneBuffer{ + w: w, + interactive: cfg.useInteractive(), + cfg: cfg, + width: width, + } + rb.SetPrompt(prompt) + return rb +} + +func (r *RuneBuffer) SetConfig(cfg *Config) { + r.Lock() + r.cfg = cfg + r.interactive = cfg.useInteractive() + r.Unlock() +} + +func (r *RuneBuffer) SetMask(m rune) { + r.Lock() + r.cfg.MaskRune = m + r.Unlock() +} + +func (r *RuneBuffer) CurrentWidth(x int) int { + r.Lock() + defer r.Unlock() + return runes.WidthAll(r.buf[:x]) +} + +func (r *RuneBuffer) PromptLen() int { + r.Lock() + width := r.promptLen() + r.Unlock() + return width +} + +func (r *RuneBuffer) promptLen() int { + return runes.WidthAll(runes.ColorFilter(r.prompt)) +} + +func (r *RuneBuffer) RuneSlice(i int) []rune { + r.Lock() + defer r.Unlock() + + if i > 0 { + rs := make([]rune, i) + copy(rs, r.buf[r.idx:r.idx+i]) + return rs + } + rs := make([]rune, -i) + copy(rs, r.buf[r.idx+i:r.idx]) + return rs +} + +func (r *RuneBuffer) Runes() []rune { + r.Lock() + newr := make([]rune, len(r.buf)) + copy(newr, r.buf) + r.Unlock() + return newr +} + +func (r *RuneBuffer) Pos() int { + r.Lock() + defer r.Unlock() + return r.idx +} + +func (r *RuneBuffer) Len() int { + r.Lock() + defer r.Unlock() + return len(r.buf) +} + +func (r *RuneBuffer) MoveToLineStart() { + r.Refresh(func() { + if r.idx == 0 { + return + } + r.idx = 0 + }) +} + +func (r *RuneBuffer) MoveBackward() { + r.Refresh(func() { + if r.idx == 0 { + return + } + r.idx-- + }) +} + +func (r *RuneBuffer) WriteString(s string) { + r.WriteRunes([]rune(s)) +} + +func (r *RuneBuffer) WriteRune(s rune) { + r.WriteRunes([]rune{s}) +} + +func (r *RuneBuffer) WriteRunes(s []rune) { + r.Refresh(func() { + tail := append(s, r.buf[r.idx:]...) + r.buf = append(r.buf[:r.idx], tail...) + r.idx += len(s) + }) +} + +func (r *RuneBuffer) MoveForward() { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + r.idx++ + }) +} + +func (r *RuneBuffer) IsCursorInEnd() bool { + r.Lock() + defer r.Unlock() + return r.idx == len(r.buf) +} + +func (r *RuneBuffer) Replace(ch rune) { + r.Refresh(func() { + r.buf[r.idx] = ch + }) +} + +func (r *RuneBuffer) Erase() { + r.Refresh(func() { + r.idx = 0 + r.pushKill(r.buf[:]) + r.buf = r.buf[:0] + }) +} + +func (r *RuneBuffer) Delete() (success bool) { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + r.pushKill(r.buf[r.idx : r.idx+1]) + r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) + success = true + }) + return +} + +func (r *RuneBuffer) DeleteWord() { + if r.idx == len(r.buf) { + return + } + init := r.idx + for init < len(r.buf) && IsWordBreak(r.buf[init]) { + init++ + } + for i := init + 1; i < len(r.buf); i++ { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.pushKill(r.buf[r.idx:i-1]) + r.Refresh(func() { + r.buf = append(r.buf[:r.idx], r.buf[i-1:]...) + }) + return + } + } + r.Kill() +} + +func (r *RuneBuffer) MoveToPrevWord() (success bool) { + r.Refresh(func() { + if r.idx == 0 { + return + } + + for i := r.idx - 1; i > 0; i-- { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.idx = i + success = true + return + } + } + r.idx = 0 + success = true + }) + return +} + +func (r *RuneBuffer) KillFront() { + r.Refresh(func() { + if r.idx == 0 { + return + } + + length := len(r.buf) - r.idx + r.pushKill(r.buf[:r.idx]) + copy(r.buf[:length], r.buf[r.idx:]) + r.idx = 0 + r.buf = r.buf[:length] + }) +} + +func (r *RuneBuffer) Kill() { + r.Refresh(func() { + r.pushKill(r.buf[r.idx:]) + r.buf = r.buf[:r.idx] + }) +} + +func (r *RuneBuffer) Transpose() { + r.Refresh(func() { + if len(r.buf) == 1 { + r.idx++ + } + + if len(r.buf) < 2 { + return + } + + if r.idx == 0 { + r.idx = 1 + } else if r.idx >= len(r.buf) { + r.idx = len(r.buf) - 1 + } + r.buf[r.idx], r.buf[r.idx-1] = r.buf[r.idx-1], r.buf[r.idx] + r.idx++ + }) +} + +func (r *RuneBuffer) MoveToNextWord() { + r.Refresh(func() { + for i := r.idx + 1; i < len(r.buf); i++ { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.idx = i + return + } + } + + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) MoveToEndWord() { + r.Refresh(func() { + // already at the end, so do nothing + if r.idx == len(r.buf) { + return + } + // if we are at the end of a word already, go to next + if !IsWordBreak(r.buf[r.idx]) && IsWordBreak(r.buf[r.idx+1]) { + r.idx++ + } + + // keep going until at the end of a word + for i := r.idx + 1; i < len(r.buf); i++ { + if IsWordBreak(r.buf[i]) && !IsWordBreak(r.buf[i-1]) { + r.idx = i - 1 + return + } + } + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) BackEscapeWord() { + r.Refresh(func() { + if r.idx == 0 { + return + } + for i := r.idx - 1; i > 0; i-- { + if !IsWordBreak(r.buf[i]) && IsWordBreak(r.buf[i-1]) { + r.pushKill(r.buf[i:r.idx]) + r.buf = append(r.buf[:i], r.buf[r.idx:]...) + r.idx = i + return + } + } + + r.buf = r.buf[:0] + r.idx = 0 + }) +} + +func (r *RuneBuffer) Yank() { + if len(r.lastKill) == 0 { + return + } + r.Refresh(func() { + buf := make([]rune, 0, len(r.buf) + len(r.lastKill)) + buf = append(buf, r.buf[:r.idx]...) + buf = append(buf, r.lastKill...) + buf = append(buf, r.buf[r.idx:]...) + r.buf = buf + r.idx += len(r.lastKill) + }) +} + +func (r *RuneBuffer) Backspace() { + r.Refresh(func() { + if r.idx == 0 { + return + } + + r.idx-- + r.buf = append(r.buf[:r.idx], r.buf[r.idx+1:]...) + }) +} + +func (r *RuneBuffer) MoveToLineEnd() { + r.Refresh(func() { + if r.idx == len(r.buf) { + return + } + + r.idx = len(r.buf) + }) +} + +func (r *RuneBuffer) LineCount(width int) int { + if width == -1 { + width = r.width + } + return LineCount(width, + runes.WidthAll(r.buf)+r.PromptLen()) +} + +func (r *RuneBuffer) MoveTo(ch rune, prevChar, reverse bool) (success bool) { + r.Refresh(func() { + if reverse { + for i := r.idx - 1; i >= 0; i-- { + if r.buf[i] == ch { + r.idx = i + if prevChar { + r.idx++ + } + success = true + return + } + } + return + } + for i := r.idx + 1; i < len(r.buf); i++ { + if r.buf[i] == ch { + r.idx = i + if prevChar { + r.idx-- + } + success = true + return + } + } + }) + return +} + +func (r *RuneBuffer) isInLineEdge() bool { + if isWindows { + return false + } + sp := r.getSplitByLine(r.buf) + return len(sp[len(sp)-1]) == 0 +} + +func (r *RuneBuffer) getSplitByLine(rs []rune) []string { + return SplitByLine(r.promptLen(), r.width, rs) +} + +func (r *RuneBuffer) IdxLine(width int) int { + r.Lock() + defer r.Unlock() + return r.idxLine(width) +} + +func (r *RuneBuffer) idxLine(width int) int { + if width == 0 { + return 0 + } + sp := r.getSplitByLine(r.buf[:r.idx]) + return len(sp) - 1 +} + +func (r *RuneBuffer) CursorLineCount() int { + return r.LineCount(r.width) - r.IdxLine(r.width) +} + +func (r *RuneBuffer) Refresh(f func()) { + r.Lock() + defer r.Unlock() + + if !r.interactive { + if f != nil { + f() + } + return + } + + r.clean() + if f != nil { + f() + } + r.print() +} + +func (r *RuneBuffer) SetOffset(offset string) { + r.Lock() + r.offset = offset + r.Unlock() +} + +func (r *RuneBuffer) print() { + r.w.Write(r.output()) + r.hadClean = false +} + +func (r *RuneBuffer) output() []byte { + buf := bytes.NewBuffer(nil) + buf.WriteString(string(r.prompt)) + if r.cfg.EnableMask && len(r.buf) > 0 { + buf.Write([]byte(strings.Repeat(string(r.cfg.MaskRune), len(r.buf)-1))) + if r.buf[len(r.buf)-1] == '\n' { + buf.Write([]byte{'\n'}) + } else { + buf.Write([]byte(string(r.cfg.MaskRune))) + } + if len(r.buf) > r.idx { + buf.Write(r.getBackspaceSequence()) + } + + } else { + for _, e := range r.cfg.Painter.Paint(r.buf, r.idx) { + if e == '\t' { + buf.WriteString(strings.Repeat(" ", TabWidth)) + } else { + buf.WriteRune(e) + } + } + if r.isInLineEdge() { + buf.Write([]byte(" \b")) + } + } + // cursor position + if len(r.buf) > r.idx { + buf.Write(r.getBackspaceSequence()) + } + return buf.Bytes() +} + +func (r *RuneBuffer) getBackspaceSequence() []byte { + var sep = map[int]bool{} + + var i int + for { + if i >= runes.WidthAll(r.buf) { + break + } + + if i == 0 { + i -= r.promptLen() + } + i += r.width + + sep[i] = true + } + var buf []byte + for i := len(r.buf); i > r.idx; i-- { + // move input to the left of one + buf = append(buf, '\b') + if sep[i] { + // up one line, go to the start of the line and move cursor right to the end (r.width) + buf = append(buf, "\033[A\r"+"\033["+strconv.Itoa(r.width)+"C"...) + } + } + + return buf + +} + +func (r *RuneBuffer) Reset() []rune { + ret := runes.Copy(r.buf) + r.buf = r.buf[:0] + r.idx = 0 + return ret +} + +func (r *RuneBuffer) calWidth(m int) int { + if m > 0 { + return runes.WidthAll(r.buf[r.idx : r.idx+m]) + } + return runes.WidthAll(r.buf[r.idx+m : r.idx]) +} + +func (r *RuneBuffer) SetStyle(start, end int, style string) { + if end < start { + panic("end < start") + } + + // goto start + move := start - r.idx + if move > 0 { + r.w.Write([]byte(string(r.buf[r.idx : r.idx+move]))) + } else { + r.w.Write(bytes.Repeat([]byte("\b"), r.calWidth(move))) + } + r.w.Write([]byte("\033[" + style + "m")) + r.w.Write([]byte(string(r.buf[start:end]))) + r.w.Write([]byte("\033[0m")) + // TODO: move back +} + +func (r *RuneBuffer) SetWithIdx(idx int, buf []rune) { + r.Refresh(func() { + r.buf = buf + r.idx = idx + }) +} + +func (r *RuneBuffer) Set(buf []rune) { + r.SetWithIdx(len(buf), buf) +} + +func (r *RuneBuffer) SetPrompt(prompt string) { + r.Lock() + r.prompt = []rune(prompt) + r.Unlock() +} + +func (r *RuneBuffer) cleanOutput(w io.Writer, idxLine int) { + buf := bufio.NewWriter(w) + + if r.width == 0 { + buf.WriteString(strings.Repeat("\r\b", len(r.buf)+r.promptLen())) + buf.Write([]byte("\033[J")) + } else { + buf.Write([]byte("\033[J")) // just like ^k :) + if idxLine == 0 { + buf.WriteString("\033[2K") + buf.WriteString("\r") + } else { + for i := 0; i < idxLine; i++ { + io.WriteString(buf, "\033[2K\r\033[A") + } + io.WriteString(buf, "\033[2K\r") + } + } + buf.Flush() + return +} + +func (r *RuneBuffer) Clean() { + r.Lock() + r.clean() + r.Unlock() +} + +func (r *RuneBuffer) clean() { + r.cleanWithIdxLine(r.idxLine(r.width)) +} + +func (r *RuneBuffer) cleanWithIdxLine(idxLine int) { + if r.hadClean || !r.interactive { + return + } + r.hadClean = true + r.cleanOutput(r.w, idxLine) +} diff --git a/vendor/github.com/chzyer/readline/runes.go b/vendor/github.com/chzyer/readline/runes.go new file mode 100644 index 00000000..a669bc48 --- /dev/null +++ b/vendor/github.com/chzyer/readline/runes.go @@ -0,0 +1,223 @@ +package readline + +import ( + "bytes" + "unicode" + "unicode/utf8" +) + +var runes = Runes{} +var TabWidth = 4 + +type Runes struct{} + +func (Runes) EqualRune(a, b rune, fold bool) bool { + if a == b { + return true + } + if !fold { + return false + } + if a > b { + a, b = b, a + } + if b < utf8.RuneSelf && 'A' <= a && a <= 'Z' { + if b == a+'a'-'A' { + return true + } + } + return false +} + +func (r Runes) EqualRuneFold(a, b rune) bool { + return r.EqualRune(a, b, true) +} + +func (r Runes) EqualFold(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if r.EqualRuneFold(a[i], b[i]) { + continue + } + return false + } + + return true +} + +func (Runes) Equal(a, b []rune) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true +} + +func (rs Runes) IndexAllBckEx(r, sub []rune, fold bool) int { + for i := len(r) - len(sub); i >= 0; i-- { + found := true + for j := 0; j < len(sub); j++ { + if !rs.EqualRune(r[i+j], sub[j], fold) { + found = false + break + } + } + if found { + return i + } + } + return -1 +} + +// Search in runes from end to front +func (rs Runes) IndexAllBck(r, sub []rune) int { + return rs.IndexAllBckEx(r, sub, false) +} + +// Search in runes from front to end +func (rs Runes) IndexAll(r, sub []rune) int { + return rs.IndexAllEx(r, sub, false) +} + +func (rs Runes) IndexAllEx(r, sub []rune, fold bool) int { + for i := 0; i < len(r); i++ { + found := true + if len(r[i:]) < len(sub) { + return -1 + } + for j := 0; j < len(sub); j++ { + if !rs.EqualRune(r[i+j], sub[j], fold) { + found = false + break + } + } + if found { + return i + } + } + return -1 +} + +func (Runes) Index(r rune, rs []rune) int { + for i := 0; i < len(rs); i++ { + if rs[i] == r { + return i + } + } + return -1 +} + +func (Runes) ColorFilter(r []rune) []rune { + newr := make([]rune, 0, len(r)) + for pos := 0; pos < len(r); pos++ { + if r[pos] == '\033' && r[pos+1] == '[' { + idx := runes.Index('m', r[pos+2:]) + if idx == -1 { + continue + } + pos += idx + 2 + continue + } + newr = append(newr, r[pos]) + } + return newr +} + +var zeroWidth = []*unicode.RangeTable{ + unicode.Mn, + unicode.Me, + unicode.Cc, + unicode.Cf, +} + +var doubleWidth = []*unicode.RangeTable{ + unicode.Han, + unicode.Hangul, + unicode.Hiragana, + unicode.Katakana, +} + +func (Runes) Width(r rune) int { + if r == '\t' { + return TabWidth + } + if unicode.IsOneOf(zeroWidth, r) { + return 0 + } + if unicode.IsOneOf(doubleWidth, r) { + return 2 + } + return 1 +} + +func (Runes) WidthAll(r []rune) (length int) { + for i := 0; i < len(r); i++ { + length += runes.Width(r[i]) + } + return +} + +func (Runes) Backspace(r []rune) []byte { + return bytes.Repeat([]byte{'\b'}, runes.WidthAll(r)) +} + +func (Runes) Copy(r []rune) []rune { + n := make([]rune, len(r)) + copy(n, r) + return n +} + +func (Runes) HasPrefixFold(r, prefix []rune) bool { + if len(r) < len(prefix) { + return false + } + return runes.EqualFold(r[:len(prefix)], prefix) +} + +func (Runes) HasPrefix(r, prefix []rune) bool { + if len(r) < len(prefix) { + return false + } + return runes.Equal(r[:len(prefix)], prefix) +} + +func (Runes) Aggregate(candicate [][]rune) (same []rune, size int) { + for i := 0; i < len(candicate[0]); i++ { + for j := 0; j < len(candicate)-1; j++ { + if i >= len(candicate[j]) || i >= len(candicate[j+1]) { + goto aggregate + } + if candicate[j][i] != candicate[j+1][i] { + goto aggregate + } + } + size = i + 1 + } +aggregate: + if size > 0 { + same = runes.Copy(candicate[0][:size]) + for i := 0; i < len(candicate); i++ { + n := runes.Copy(candicate[i]) + copy(n, n[size:]) + candicate[i] = n[:len(n)-size] + } + } + return +} + +func (Runes) TrimSpaceLeft(in []rune) []rune { + firstIndex := len(in) + for i, r := range in { + if unicode.IsSpace(r) == false { + firstIndex = i + break + } + } + return in[firstIndex:] +} diff --git a/vendor/github.com/chzyer/readline/search.go b/vendor/github.com/chzyer/readline/search.go new file mode 100644 index 00000000..52e8ff09 --- /dev/null +++ b/vendor/github.com/chzyer/readline/search.go @@ -0,0 +1,164 @@ +package readline + +import ( + "bytes" + "container/list" + "fmt" + "io" +) + +const ( + S_STATE_FOUND = iota + S_STATE_FAILING +) + +const ( + S_DIR_BCK = iota + S_DIR_FWD +) + +type opSearch struct { + inMode bool + state int + dir int + source *list.Element + w io.Writer + buf *RuneBuffer + data []rune + history *opHistory + cfg *Config + markStart int + markEnd int + width int +} + +func newOpSearch(w io.Writer, buf *RuneBuffer, history *opHistory, cfg *Config, width int) *opSearch { + return &opSearch{ + w: w, + buf: buf, + cfg: cfg, + history: history, + width: width, + } +} + +func (o *opSearch) OnWidthChange(newWidth int) { + o.width = newWidth +} + +func (o *opSearch) IsSearchMode() bool { + return o.inMode +} + +func (o *opSearch) SearchBackspace() { + if len(o.data) > 0 { + o.data = o.data[:len(o.data)-1] + o.search(true) + } +} + +func (o *opSearch) findHistoryBy(isNewSearch bool) (int, *list.Element) { + if o.dir == S_DIR_BCK { + return o.history.FindBck(isNewSearch, o.data, o.buf.idx) + } + return o.history.FindFwd(isNewSearch, o.data, o.buf.idx) +} + +func (o *opSearch) search(isChange bool) bool { + if len(o.data) == 0 { + o.state = S_STATE_FOUND + o.SearchRefresh(-1) + return true + } + idx, elem := o.findHistoryBy(isChange) + if elem == nil { + o.SearchRefresh(-2) + return false + } + o.history.current = elem + + item := o.history.showItem(o.history.current.Value) + start, end := 0, 0 + if o.dir == S_DIR_BCK { + start, end = idx, idx+len(o.data) + } else { + start, end = idx, idx+len(o.data) + idx += len(o.data) + } + o.buf.SetWithIdx(idx, item) + o.markStart, o.markEnd = start, end + o.SearchRefresh(idx) + return true +} + +func (o *opSearch) SearchChar(r rune) { + o.data = append(o.data, r) + o.search(true) +} + +func (o *opSearch) SearchMode(dir int) bool { + if o.width == 0 { + return false + } + alreadyInMode := o.inMode + o.inMode = true + o.dir = dir + o.source = o.history.current + if alreadyInMode { + o.search(false) + } else { + o.SearchRefresh(-1) + } + return true +} + +func (o *opSearch) ExitSearchMode(revert bool) { + if revert { + o.history.current = o.source + o.buf.Set(o.history.showItem(o.history.current.Value)) + } + o.markStart, o.markEnd = 0, 0 + o.state = S_STATE_FOUND + o.inMode = false + o.source = nil + o.data = nil +} + +func (o *opSearch) SearchRefresh(x int) { + if x == -2 { + o.state = S_STATE_FAILING + } else if x >= 0 { + o.state = S_STATE_FOUND + } + if x < 0 { + x = o.buf.idx + } + x = o.buf.CurrentWidth(x) + x += o.buf.PromptLen() + x = x % o.width + + if o.markStart > 0 { + o.buf.SetStyle(o.markStart, o.markEnd, "4") + } + + lineCnt := o.buf.CursorLineCount() + buf := bytes.NewBuffer(nil) + buf.Write(bytes.Repeat([]byte("\n"), lineCnt)) + buf.WriteString("\033[J") + if o.state == S_STATE_FAILING { + buf.WriteString("failing ") + } + if o.dir == S_DIR_BCK { + buf.WriteString("bck") + } else if o.dir == S_DIR_FWD { + buf.WriteString("fwd") + } + buf.WriteString("-i-search: ") + buf.WriteString(string(o.data)) // keyword + buf.WriteString("\033[4m \033[0m") // _ + fmt.Fprintf(buf, "\r\033[%dA", lineCnt) // move prev + if x > 0 { + fmt.Fprintf(buf, "\033[%dC", x) // move forward + } + o.w.Write(buf.Bytes()) +} diff --git a/vendor/github.com/chzyer/readline/std.go b/vendor/github.com/chzyer/readline/std.go new file mode 100644 index 00000000..61d44b75 --- /dev/null +++ b/vendor/github.com/chzyer/readline/std.go @@ -0,0 +1,197 @@ +package readline + +import ( + "io" + "os" + "sync" + "sync/atomic" +) + +var ( + Stdin io.ReadCloser = os.Stdin + Stdout io.WriteCloser = os.Stdout + Stderr io.WriteCloser = os.Stderr +) + +var ( + std *Instance + stdOnce sync.Once +) + +// global instance will not submit history automatic +func getInstance() *Instance { + stdOnce.Do(func() { + std, _ = NewEx(&Config{ + DisableAutoSaveHistory: true, + }) + }) + return std +} + +// let readline load history from filepath +// and try to persist history into disk +// set fp to "" to prevent readline persisting history to disk +// so the `AddHistory` will return nil error forever. +func SetHistoryPath(fp string) { + ins := getInstance() + cfg := ins.Config.Clone() + cfg.HistoryFile = fp + ins.SetConfig(cfg) +} + +// set auto completer to global instance +func SetAutoComplete(completer AutoCompleter) { + ins := getInstance() + cfg := ins.Config.Clone() + cfg.AutoComplete = completer + ins.SetConfig(cfg) +} + +// add history to global instance manually +// raise error only if `SetHistoryPath` is set with a non-empty path +func AddHistory(content string) error { + ins := getInstance() + return ins.SaveHistory(content) +} + +func Password(prompt string) ([]byte, error) { + ins := getInstance() + return ins.ReadPassword(prompt) +} + +// readline with global configs +func Line(prompt string) (string, error) { + ins := getInstance() + ins.SetPrompt(prompt) + return ins.Readline() +} + +type CancelableStdin struct { + r io.Reader + mutex sync.Mutex + stop chan struct{} + closed int32 + notify chan struct{} + data []byte + read int + err error +} + +func NewCancelableStdin(r io.Reader) *CancelableStdin { + c := &CancelableStdin{ + r: r, + notify: make(chan struct{}), + stop: make(chan struct{}), + } + go c.ioloop() + return c +} + +func (c *CancelableStdin) ioloop() { +loop: + for { + select { + case <-c.notify: + c.read, c.err = c.r.Read(c.data) + select { + case c.notify <- struct{}{}: + case <-c.stop: + break loop + } + case <-c.stop: + break loop + } + } +} + +func (c *CancelableStdin) Read(b []byte) (n int, err error) { + c.mutex.Lock() + defer c.mutex.Unlock() + if atomic.LoadInt32(&c.closed) == 1 { + return 0, io.EOF + } + + c.data = b + select { + case c.notify <- struct{}{}: + case <-c.stop: + return 0, io.EOF + } + select { + case <-c.notify: + return c.read, c.err + case <-c.stop: + return 0, io.EOF + } +} + +func (c *CancelableStdin) Close() error { + if atomic.CompareAndSwapInt32(&c.closed, 0, 1) { + close(c.stop) + } + return nil +} + +// FillableStdin is a stdin reader which can prepend some data before +// reading into the real stdin +type FillableStdin struct { + sync.Mutex + stdin io.Reader + stdinBuffer io.ReadCloser + buf []byte + bufErr error +} + +// NewFillableStdin gives you FillableStdin +func NewFillableStdin(stdin io.Reader) (io.ReadCloser, io.Writer) { + r, w := io.Pipe() + s := &FillableStdin{ + stdinBuffer: r, + stdin: stdin, + } + s.ioloop() + return s, w +} + +func (s *FillableStdin) ioloop() { + go func() { + for { + bufR := make([]byte, 100) + var n int + n, s.bufErr = s.stdinBuffer.Read(bufR) + if s.bufErr != nil { + if s.bufErr == io.ErrClosedPipe { + break + } + } + s.Lock() + s.buf = append(s.buf, bufR[:n]...) + s.Unlock() + } + }() +} + +// Read will read from the local buffer and if no data, read from stdin +func (s *FillableStdin) Read(p []byte) (n int, err error) { + s.Lock() + i := len(s.buf) + if len(p) < i { + i = len(p) + } + if i > 0 { + n := copy(p, s.buf) + s.buf = s.buf[:0] + cerr := s.bufErr + s.bufErr = nil + s.Unlock() + return n, cerr + } + s.Unlock() + n, err = s.stdin.Read(p) + return n, err +} + +func (s *FillableStdin) Close() error { + s.stdinBuffer.Close() + return nil +} diff --git a/vendor/github.com/chzyer/readline/std_windows.go b/vendor/github.com/chzyer/readline/std_windows.go new file mode 100644 index 00000000..b10f91bc --- /dev/null +++ b/vendor/github.com/chzyer/readline/std_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package readline + +func init() { + Stdin = NewRawReader() + Stdout = NewANSIWriter(Stdout) + Stderr = NewANSIWriter(Stderr) +} diff --git a/vendor/github.com/chzyer/readline/term.go b/vendor/github.com/chzyer/readline/term.go new file mode 100644 index 00000000..133993ca --- /dev/null +++ b/vendor/github.com/chzyer/readline/term.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package readline + +import ( + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios Termios +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := getTermios(fd) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var oldState State + + if termios, err := getTermios(fd); err != nil { + return nil, err + } else { + oldState.termios = *termios + } + + newState := oldState.termios + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON + // newState.Oflag &^= syscall.OPOST + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN + newState.Cflag &^= syscall.CSIZE | syscall.PARENB + newState.Cflag |= syscall.CS8 + + newState.Cc[syscall.VMIN] = 1 + newState.Cc[syscall.VTIME] = 0 + + return &oldState, setTermios(fd, &newState) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := getTermios(fd) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func restoreTerm(fd int, state *State) error { + return setTermios(fd, &state.termios) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + oldState, err := getTermios(fd) + if err != nil { + return nil, err + } + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + if err := setTermios(fd, newState); err != nil { + return nil, err + } + + defer func() { + setTermios(fd, oldState) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/chzyer/readline/term_bsd.go b/vendor/github.com/chzyer/readline/term_bsd.go new file mode 100644 index 00000000..68b56ea6 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package readline + +import ( + "syscall" + "unsafe" +) + +func getTermios(fd int) (*Termios, error) { + termios := new(Termios) + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCGETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return nil, err + } + return termios, nil +} + +func setTermios(fd int, termios *Termios) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), syscall.TIOCSETA, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/chzyer/readline/term_linux.go b/vendor/github.com/chzyer/readline/term_linux.go new file mode 100644 index 00000000..e3392b4a --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_linux.go @@ -0,0 +1,33 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package readline + +import ( + "syscall" + "unsafe" +) + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS + +func getTermios(fd int) (*Termios, error) { + termios := new(Termios) + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return nil, err + } + return termios, nil +} + +func setTermios(fd int, termios *Termios) error { + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(termios)), 0, 0, 0) + if err != 0 { + return err + } + return nil +} diff --git a/vendor/github.com/chzyer/readline/term_solaris.go b/vendor/github.com/chzyer/readline/term_solaris.go new file mode 100644 index 00000000..4c27273c --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_solaris.go @@ -0,0 +1,32 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package readline + +import "golang.org/x/sys/unix" + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (int, int, error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} + +type Termios unix.Termios + +func getTermios(fd int) (*Termios, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + return (*Termios)(termios), nil +} + +func setTermios(fd int, termios *Termios) error { + return unix.IoctlSetTermios(fd, unix.TCSETSF, (*unix.Termios)(termios)) +} diff --git a/vendor/github.com/chzyer/readline/term_unix.go b/vendor/github.com/chzyer/readline/term_unix.go new file mode 100644 index 00000000..d3ea2424 --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_unix.go @@ -0,0 +1,24 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +package readline + +import ( + "syscall" + "unsafe" +) + +type Termios syscall.Termios + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (int, int, error) { + var dimensions [4]uint16 + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0) + if err != 0 { + return 0, 0, err + } + return int(dimensions[1]), int(dimensions[0]), nil +} diff --git a/vendor/github.com/chzyer/readline/term_windows.go b/vendor/github.com/chzyer/readline/term_windows.go new file mode 100644 index 00000000..1290e00b --- /dev/null +++ b/vendor/github.com/chzyer/readline/term_windows.go @@ -0,0 +1,171 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package readline + +import ( + "io" + "syscall" + "unsafe" +) + +const ( + enableLineInput = 2 + enableEchoInput = 4 + enableProcessedInput = 1 + enableWindowInput = 8 + enableMouseInput = 16 + enableInsertMode = 32 + enableQuickEditMode = 64 + enableExtendedFlags = 128 + enableAutoPosition = 256 + enableProcessedOutput = 1 + enableWrapAtEolOutput = 2 +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") +) + +type ( + coord struct { + x short + y short + } + smallRect struct { + left short + top short + right short + bottom short + } + consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord + } +) + +type State struct { + mode uint32 +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func restoreTerm(fd int, state *State) error { + _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) + return err +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + var info consoleScreenBufferInfo + _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) + if e != 0 { + return 0, 0, error(e) + } + return int(info.size.x), int(info.size.y), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + if e != 0 { + return nil, error(e) + } + old := st + + st &^= (enableEchoInput) + st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) + _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) + if e != 0 { + return nil, error(e) + } + + defer func() { + syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) + }() + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(syscall.Handle(fd), buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} diff --git a/vendor/github.com/chzyer/readline/terminal.go b/vendor/github.com/chzyer/readline/terminal.go new file mode 100644 index 00000000..1078631c --- /dev/null +++ b/vendor/github.com/chzyer/readline/terminal.go @@ -0,0 +1,238 @@ +package readline + +import ( + "bufio" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" +) + +type Terminal struct { + m sync.Mutex + cfg *Config + outchan chan rune + closed int32 + stopChan chan struct{} + kickChan chan struct{} + wg sync.WaitGroup + isReading int32 + sleeping int32 + + sizeChan chan string +} + +func NewTerminal(cfg *Config) (*Terminal, error) { + if err := cfg.Init(); err != nil { + return nil, err + } + t := &Terminal{ + cfg: cfg, + kickChan: make(chan struct{}, 1), + outchan: make(chan rune), + stopChan: make(chan struct{}, 1), + sizeChan: make(chan string, 1), + } + + go t.ioloop() + return t, nil +} + +// SleepToResume will sleep myself, and return only if I'm resumed. +func (t *Terminal) SleepToResume() { + if !atomic.CompareAndSwapInt32(&t.sleeping, 0, 1) { + return + } + defer atomic.StoreInt32(&t.sleeping, 0) + + t.ExitRawMode() + ch := WaitForResume() + SuspendMe() + <-ch + t.EnterRawMode() +} + +func (t *Terminal) EnterRawMode() (err error) { + return t.cfg.FuncMakeRaw() +} + +func (t *Terminal) ExitRawMode() (err error) { + return t.cfg.FuncExitRaw() +} + +func (t *Terminal) Write(b []byte) (int, error) { + return t.cfg.Stdout.Write(b) +} + +// WriteStdin prefill the next Stdin fetch +// Next time you call ReadLine() this value will be writen before the user input +func (t *Terminal) WriteStdin(b []byte) (int, error) { + return t.cfg.StdinWriter.Write(b) +} + +type termSize struct { + left int + top int +} + +func (t *Terminal) GetOffset(f func(offset string)) { + go func() { + f(<-t.sizeChan) + }() + t.Write([]byte("\033[6n")) +} + +func (t *Terminal) Print(s string) { + fmt.Fprintf(t.cfg.Stdout, "%s", s) +} + +func (t *Terminal) PrintRune(r rune) { + fmt.Fprintf(t.cfg.Stdout, "%c", r) +} + +func (t *Terminal) Readline() *Operation { + return NewOperation(t, t.cfg) +} + +// return rune(0) if meet EOF +func (t *Terminal) ReadRune() rune { + ch, ok := <-t.outchan + if !ok { + return rune(0) + } + return ch +} + +func (t *Terminal) IsReading() bool { + return atomic.LoadInt32(&t.isReading) == 1 +} + +func (t *Terminal) KickRead() { + select { + case t.kickChan <- struct{}{}: + default: + } +} + +func (t *Terminal) ioloop() { + t.wg.Add(1) + defer func() { + t.wg.Done() + close(t.outchan) + }() + + var ( + isEscape bool + isEscapeEx bool + expectNextChar bool + ) + + buf := bufio.NewReader(t.getStdin()) + for { + if !expectNextChar { + atomic.StoreInt32(&t.isReading, 0) + select { + case <-t.kickChan: + atomic.StoreInt32(&t.isReading, 1) + case <-t.stopChan: + return + } + } + expectNextChar = false + r, _, err := buf.ReadRune() + if err != nil { + if strings.Contains(err.Error(), "interrupted system call") { + expectNextChar = true + continue + } + break + } + + if isEscape { + isEscape = false + if r == CharEscapeEx { + expectNextChar = true + isEscapeEx = true + continue + } + r = escapeKey(r, buf) + } else if isEscapeEx { + isEscapeEx = false + if key := readEscKey(r, buf); key != nil { + r = escapeExKey(key) + // offset + if key.typ == 'R' { + if _, _, ok := key.Get2(); ok { + select { + case t.sizeChan <- key.attr: + default: + } + } + expectNextChar = true + continue + } + } + if r == 0 { + expectNextChar = true + continue + } + } + + expectNextChar = true + switch r { + case CharEsc: + if t.cfg.VimMode { + t.outchan <- r + break + } + isEscape = true + case CharInterrupt, CharEnter, CharCtrlJ, CharDelete: + expectNextChar = false + fallthrough + default: + t.outchan <- r + } + } + +} + +func (t *Terminal) Bell() { + fmt.Fprintf(t, "%c", CharBell) +} + +func (t *Terminal) Close() error { + if atomic.SwapInt32(&t.closed, 1) != 0 { + return nil + } + if closer, ok := t.cfg.Stdin.(io.Closer); ok { + closer.Close() + } + close(t.stopChan) + t.wg.Wait() + return t.ExitRawMode() +} + +func (t *Terminal) GetConfig() *Config { + t.m.Lock() + cfg := *t.cfg + t.m.Unlock() + return &cfg +} + +func (t *Terminal) getStdin() io.Reader { + t.m.Lock() + r := t.cfg.Stdin + t.m.Unlock() + return r +} + +func (t *Terminal) SetConfig(c *Config) error { + if err := c.Init(); err != nil { + return err + } + t.m.Lock() + t.cfg = c + t.m.Unlock() + return nil +} diff --git a/vendor/github.com/chzyer/readline/utils.go b/vendor/github.com/chzyer/readline/utils.go new file mode 100644 index 00000000..af4e0052 --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils.go @@ -0,0 +1,277 @@ +package readline + +import ( + "bufio" + "bytes" + "container/list" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + "unicode" +) + +var ( + isWindows = false +) + +const ( + CharLineStart = 1 + CharBackward = 2 + CharInterrupt = 3 + CharDelete = 4 + CharLineEnd = 5 + CharForward = 6 + CharBell = 7 + CharCtrlH = 8 + CharTab = 9 + CharCtrlJ = 10 + CharKill = 11 + CharCtrlL = 12 + CharEnter = 13 + CharNext = 14 + CharPrev = 16 + CharBckSearch = 18 + CharFwdSearch = 19 + CharTranspose = 20 + CharCtrlU = 21 + CharCtrlW = 23 + CharCtrlY = 25 + CharCtrlZ = 26 + CharEsc = 27 + CharEscapeEx = 91 + CharBackspace = 127 +) + +const ( + MetaBackward rune = -iota - 1 + MetaForward + MetaDelete + MetaBackspace + MetaTranspose +) + +// WaitForResume need to call before current process got suspend. +// It will run a ticker until a long duration is occurs, +// which means this process is resumed. +func WaitForResume() chan struct{} { + ch := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + ticker := time.NewTicker(10 * time.Millisecond) + t := time.Now() + wg.Done() + for { + now := <-ticker.C + if now.Sub(t) > 100*time.Millisecond { + break + } + t = now + } + ticker.Stop() + ch <- struct{}{} + }() + wg.Wait() + return ch +} + +func Restore(fd int, state *State) error { + err := restoreTerm(fd, state) + if err != nil { + // errno 0 means everything is ok :) + if err.Error() == "errno 0" { + return nil + } else { + return err + } + } + return nil +} + +func IsPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// translate Esc[X +func escapeExKey(key *escapeKeyPair) rune { + var r rune + switch key.typ { + case 'D': + r = CharBackward + case 'C': + r = CharForward + case 'A': + r = CharPrev + case 'B': + r = CharNext + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + case '~': + if key.attr == "3" { + r = CharDelete + } + default: + } + return r +} + +type escapeKeyPair struct { + attr string + typ rune +} + +func (e *escapeKeyPair) Get2() (int, int, bool) { + sp := strings.Split(e.attr, ";") + if len(sp) < 2 { + return -1, -1, false + } + s1, err := strconv.Atoi(sp[0]) + if err != nil { + return -1, -1, false + } + s2, err := strconv.Atoi(sp[1]) + if err != nil { + return -1, -1, false + } + return s1, s2, true +} + +func readEscKey(r rune, reader *bufio.Reader) *escapeKeyPair { + p := escapeKeyPair{} + buf := bytes.NewBuffer(nil) + for { + if r == ';' { + } else if unicode.IsNumber(r) { + } else { + p.typ = r + break + } + buf.WriteRune(r) + r, _, _ = reader.ReadRune() + } + p.attr = buf.String() + return &p +} + +// translate EscX to Meta+X +func escapeKey(r rune, reader *bufio.Reader) rune { + switch r { + case 'b': + r = MetaBackward + case 'f': + r = MetaForward + case 'd': + r = MetaDelete + case CharTranspose: + r = MetaTranspose + case CharBackspace: + r = MetaBackspace + case 'O': + d, _, _ := reader.ReadRune() + switch d { + case 'H': + r = CharLineStart + case 'F': + r = CharLineEnd + default: + reader.UnreadRune() + } + case CharEsc: + + } + return r +} + +func SplitByLine(start, screenWidth int, rs []rune) []string { + var ret []string + buf := bytes.NewBuffer(nil) + currentWidth := start + for _, r := range rs { + w := runes.Width(r) + currentWidth += w + buf.WriteRune(r) + if currentWidth >= screenWidth { + ret = append(ret, buf.String()) + buf.Reset() + currentWidth = 0 + } + } + ret = append(ret, buf.String()) + return ret +} + +// calculate how many lines for N character +func LineCount(screenWidth, w int) int { + r := w / screenWidth + if w%screenWidth != 0 { + r++ + } + return r +} + +func IsWordBreak(i rune) bool { + switch { + case i >= 'a' && i <= 'z': + case i >= 'A' && i <= 'Z': + case i >= '0' && i <= '9': + default: + return true + } + return false +} + +func GetInt(s []string, def int) int { + if len(s) == 0 { + return def + } + c, err := strconv.Atoi(s[0]) + if err != nil { + return def + } + return c +} + +type RawMode struct { + state *State +} + +func (r *RawMode) Enter() (err error) { + r.state, err = MakeRaw(GetStdin()) + return err +} + +func (r *RawMode) Exit() error { + if r.state == nil { + return nil + } + return Restore(GetStdin(), r.state) +} + +// ----------------------------------------------------------------------------- + +func sleep(n int) { + Debug(n) + time.Sleep(2000 * time.Millisecond) +} + +// print a linked list to Debug() +func debugList(l *list.List) { + idx := 0 + for e := l.Front(); e != nil; e = e.Next() { + Debug(idx, fmt.Sprintf("%+v", e.Value)) + idx++ + } +} + +// append log info to another file +func Debug(o ...interface{}) { + f, _ := os.OpenFile("debug.tmp", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + fmt.Fprintln(f, o...) + f.Close() +} diff --git a/vendor/github.com/chzyer/readline/utils_unix.go b/vendor/github.com/chzyer/readline/utils_unix.go new file mode 100644 index 00000000..f88dac97 --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils_unix.go @@ -0,0 +1,83 @@ +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd solaris + +package readline + +import ( + "io" + "os" + "os/signal" + "sync" + "syscall" +) + +type winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +// SuspendMe use to send suspend signal to myself, when we in the raw mode. +// For OSX it need to send to parent's pid +// For Linux it need to send to myself +func SuspendMe() { + p, _ := os.FindProcess(os.Getppid()) + p.Signal(syscall.SIGTSTP) + p, _ = os.FindProcess(os.Getpid()) + p.Signal(syscall.SIGTSTP) +} + +// get width of the terminal +func getWidth(stdoutFd int) int { + cols, _, err := GetSize(stdoutFd) + if err != nil { + return -1 + } + return cols +} + +func GetScreenWidth() int { + w := getWidth(syscall.Stdout) + if w < 0 { + w = getWidth(syscall.Stderr) + } + return w +} + +// ClearScreen clears the console screen +func ClearScreen(w io.Writer) (int, error) { + return w.Write([]byte("\033[H")) +} + +func DefaultIsTerminal() bool { + return IsTerminal(syscall.Stdin) && (IsTerminal(syscall.Stdout) || IsTerminal(syscall.Stderr)) +} + +func GetStdin() int { + return syscall.Stdin +} + +// ----------------------------------------------------------------------------- + +var ( + widthChange sync.Once + widthChangeCallback func() +) + +func DefaultOnWidthChanged(f func()) { + widthChangeCallback = f + widthChange.Do(func() { + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGWINCH) + + go func() { + for { + _, ok := <-ch + if !ok { + break + } + widthChangeCallback() + } + }() + }) +} diff --git a/vendor/github.com/chzyer/readline/utils_windows.go b/vendor/github.com/chzyer/readline/utils_windows.go new file mode 100644 index 00000000..5bfa55dc --- /dev/null +++ b/vendor/github.com/chzyer/readline/utils_windows.go @@ -0,0 +1,41 @@ +// +build windows + +package readline + +import ( + "io" + "syscall" +) + +func SuspendMe() { +} + +func GetStdin() int { + return int(syscall.Stdin) +} + +func init() { + isWindows = true +} + +// get width of the terminal +func GetScreenWidth() int { + info, _ := GetConsoleScreenBufferInfo() + if info == nil { + return -1 + } + return int(info.dwSize.x) +} + +// ClearScreen clears the console screen +func ClearScreen(_ io.Writer) error { + return SetConsoleCursorPosition(&_COORD{0, 0}) +} + +func DefaultIsTerminal() bool { + return true +} + +func DefaultOnWidthChanged(func()) { + +} diff --git a/vendor/github.com/chzyer/readline/vim.go b/vendor/github.com/chzyer/readline/vim.go new file mode 100644 index 00000000..bedf2c1a --- /dev/null +++ b/vendor/github.com/chzyer/readline/vim.go @@ -0,0 +1,176 @@ +package readline + +const ( + VIM_NORMAL = iota + VIM_INSERT + VIM_VISUAL +) + +type opVim struct { + cfg *Config + op *Operation + vimMode int +} + +func newVimMode(op *Operation) *opVim { + ov := &opVim{ + cfg: op.cfg, + op: op, + } + ov.SetVimMode(ov.cfg.VimMode) + return ov +} + +func (o *opVim) SetVimMode(on bool) { + if o.cfg.VimMode && !on { // turn off + o.ExitVimMode() + } + o.cfg.VimMode = on + o.vimMode = VIM_INSERT +} + +func (o *opVim) ExitVimMode() { + o.vimMode = VIM_INSERT +} + +func (o *opVim) IsEnableVimMode() bool { + return o.cfg.VimMode +} + +func (o *opVim) handleVimNormalMovement(r rune, readNext func() rune) (t rune, handled bool) { + rb := o.op.buf + handled = true + switch r { + case 'h': + t = CharBackward + case 'j': + t = CharNext + case 'k': + t = CharPrev + case 'l': + t = CharForward + case '0', '^': + rb.MoveToLineStart() + case '$': + rb.MoveToLineEnd() + case 'x': + rb.Delete() + if rb.IsCursorInEnd() { + rb.MoveBackward() + } + case 'r': + rb.Replace(readNext()) + case 'd': + next := readNext() + switch next { + case 'd': + rb.Erase() + case 'w': + rb.DeleteWord() + case 'h': + rb.Backspace() + case 'l': + rb.Delete() + } + case 'p': + rb.Yank() + case 'b', 'B': + rb.MoveToPrevWord() + case 'w', 'W': + rb.MoveToNextWord() + case 'e', 'E': + rb.MoveToEndWord() + case 'f', 'F', 't', 'T': + next := readNext() + prevChar := r == 't' || r == 'T' + reverse := r == 'F' || r == 'T' + switch next { + case CharEsc: + default: + rb.MoveTo(next, prevChar, reverse) + } + default: + return r, false + } + return t, true +} + +func (o *opVim) handleVimNormalEnterInsert(r rune, readNext func() rune) (t rune, handled bool) { + rb := o.op.buf + handled = true + switch r { + case 'i': + case 'I': + rb.MoveToLineStart() + case 'a': + rb.MoveForward() + case 'A': + rb.MoveToLineEnd() + case 's': + rb.Delete() + case 'S': + rb.Erase() + case 'c': + next := readNext() + switch next { + case 'c': + rb.Erase() + case 'w': + rb.DeleteWord() + case 'h': + rb.Backspace() + case 'l': + rb.Delete() + } + default: + return r, false + } + + o.EnterVimInsertMode() + return +} + +func (o *opVim) HandleVimNormal(r rune, readNext func() rune) (t rune) { + switch r { + case CharEnter, CharInterrupt: + o.ExitVimMode() + return r + } + + if r, handled := o.handleVimNormalMovement(r, readNext); handled { + return r + } + + if r, handled := o.handleVimNormalEnterInsert(r, readNext); handled { + return r + } + + // invalid operation + o.op.t.Bell() + return 0 +} + +func (o *opVim) EnterVimInsertMode() { + o.vimMode = VIM_INSERT +} + +func (o *opVim) ExitVimInsertMode() { + o.vimMode = VIM_NORMAL +} + +func (o *opVim) HandleVim(r rune, readNext func() rune) rune { + if o.vimMode == VIM_NORMAL { + return o.HandleVimNormal(r, readNext) + } + if r == CharEsc { + o.ExitVimInsertMode() + return 0 + } + + switch o.vimMode { + case VIM_INSERT: + return r + case VIM_VISUAL: + } + return r +} diff --git a/vendor/github.com/chzyer/readline/windows_api.go b/vendor/github.com/chzyer/readline/windows_api.go new file mode 100644 index 00000000..63f4f7b7 --- /dev/null +++ b/vendor/github.com/chzyer/readline/windows_api.go @@ -0,0 +1,152 @@ +// +build windows + +package readline + +import ( + "reflect" + "syscall" + "unsafe" +) + +var ( + kernel = NewKernel() + stdout = uintptr(syscall.Stdout) + stdin = uintptr(syscall.Stdin) +) + +type Kernel struct { + SetConsoleCursorPosition, + SetConsoleTextAttribute, + FillConsoleOutputCharacterW, + FillConsoleOutputAttribute, + ReadConsoleInputW, + GetConsoleScreenBufferInfo, + GetConsoleCursorInfo, + GetStdHandle CallFunc +} + +type short int16 +type word uint16 +type dword uint32 +type wchar uint16 + +type _COORD struct { + x short + y short +} + +func (c *_COORD) ptr() uintptr { + return uintptr(*(*int32)(unsafe.Pointer(c))) +} + +const ( + EVENT_KEY = 0x0001 + EVENT_MOUSE = 0x0002 + EVENT_WINDOW_BUFFER_SIZE = 0x0004 + EVENT_MENU = 0x0008 + EVENT_FOCUS = 0x0010 +) + +type _KEY_EVENT_RECORD struct { + bKeyDown int32 + wRepeatCount word + wVirtualKeyCode word + wVirtualScanCode word + unicodeChar wchar + dwControlKeyState dword +} + +// KEY_EVENT_RECORD KeyEvent; +// MOUSE_EVENT_RECORD MouseEvent; +// WINDOW_BUFFER_SIZE_RECORD WindowBufferSizeEvent; +// MENU_EVENT_RECORD MenuEvent; +// FOCUS_EVENT_RECORD FocusEvent; +type _INPUT_RECORD struct { + EventType word + Padding uint16 + Event [16]byte +} + +type _CONSOLE_SCREEN_BUFFER_INFO struct { + dwSize _COORD + dwCursorPosition _COORD + wAttributes word + srWindow _SMALL_RECT + dwMaximumWindowSize _COORD +} + +type _SMALL_RECT struct { + left short + top short + right short + bottom short +} + +type _CONSOLE_CURSOR_INFO struct { + dwSize dword + bVisible bool +} + +type CallFunc func(u ...uintptr) error + +func NewKernel() *Kernel { + k := &Kernel{} + kernel32 := syscall.NewLazyDLL("kernel32.dll") + v := reflect.ValueOf(k).Elem() + t := v.Type() + for i := 0; i < t.NumField(); i++ { + name := t.Field(i).Name + f := kernel32.NewProc(name) + v.Field(i).Set(reflect.ValueOf(k.Wrap(f))) + } + return k +} + +func (k *Kernel) Wrap(p *syscall.LazyProc) CallFunc { + return func(args ...uintptr) error { + var r0 uintptr + var e1 syscall.Errno + size := uintptr(len(args)) + if len(args) <= 3 { + buf := make([]uintptr, 3) + copy(buf, args) + r0, _, e1 = syscall.Syscall(p.Addr(), size, + buf[0], buf[1], buf[2]) + } else { + buf := make([]uintptr, 6) + copy(buf, args) + r0, _, e1 = syscall.Syscall6(p.Addr(), size, + buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], + ) + } + + if int(r0) == 0 { + if e1 != 0 { + return error(e1) + } else { + return syscall.EINVAL + } + } + return nil + } + +} + +func GetConsoleScreenBufferInfo() (*_CONSOLE_SCREEN_BUFFER_INFO, error) { + t := new(_CONSOLE_SCREEN_BUFFER_INFO) + err := kernel.GetConsoleScreenBufferInfo( + stdout, + uintptr(unsafe.Pointer(t)), + ) + return t, err +} + +func GetConsoleCursorInfo() (*_CONSOLE_CURSOR_INFO, error) { + t := new(_CONSOLE_CURSOR_INFO) + err := kernel.GetConsoleCursorInfo(stdout, uintptr(unsafe.Pointer(t))) + return t, err +} + +func SetConsoleCursorPosition(c *_COORD) error { + return kernel.SetConsoleCursorPosition(stdout, c.ptr()) +} diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 00000000..b39ddfa5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md new file mode 100644 index 00000000..521d6c01 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/README.md @@ -0,0 +1,112 @@ +# etcd/client + +etcd/client is the Go client library for etcd. + +[![GoDoc](https://godoc.org/go.etcd.io/etcd/client?status.png)](https://godoc.org/go.etcd.io/etcd/client) + +For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). + +## Install + +```bash +go get go.etcd.io/etcd/client +``` + +## Usage + +```go +package main + +import ( + "log" + "time" + "context" + + "go.etcd.io/etcd/client" +) + +func main() { + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: client.DefaultTransport, + // set timeout per request to fail fast when the target endpoint is unavailable + HeaderTimeoutPerRequest: time.Second, + } + c, err := client.New(cfg) + if err != nil { + log.Fatal(err) + } + kapi := client.NewKeysAPI(c) + // set "/foo" key with "bar" value + log.Print("Setting '/foo' key with 'bar' value") + resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Set is done. Metadata is %q\n", resp) + } + // get "/foo" key's value + log.Print("Getting '/foo' key value") + resp, err = kapi.Get(context.Background(), "/foo", nil) + if err != nil { + log.Fatal(err) + } else { + // print common key info + log.Printf("Get is done. Metadata is %q\n", resp) + // print value + log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) + } +} +``` + +## Error Handling + +etcd client might return three types of errors. + +- context error + +Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. + +- cluster error + +Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. + +- response error + +If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. + +Here is the example code to handle client errors: + +```go +cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} +c, err := client.New(cfg) +if err != nil { + log.Fatal(err) +} + +kapi := client.NewKeysAPI(c) +resp, err := kapi.Set(ctx, "test", "bar", nil) +if err != nil { + if err == context.Canceled { + // ctx is canceled by another routine + } else if err == context.DeadlineExceeded { + // ctx is attached with a deadline and it exceeded + } else if cerr, ok := err.(*client.ClusterError); ok { + // process (cerr.Errors) + } else { + // bad cluster endpoints, which are not etcd servers + } +} +``` + + +## Caveat + +1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. + +2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. + +3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. + +4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information. diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go new file mode 100644 index 00000000..b6ba7e15 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_role.go @@ -0,0 +1,236 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" +) + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV rwPermission `json:"kv"` +} + +type rwPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type PermissionType int + +const ( + ReadPermission PermissionType = iota + WritePermission + ReadWritePermission +) + +// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to +// interact with etcd's role creation and modification features. +func NewAuthRoleAPI(c Client) AuthRoleAPI { + return &httpAuthRoleAPI{ + client: c, + } +} + +type AuthRoleAPI interface { + // AddRole adds a role. + AddRole(ctx context.Context, role string) error + + // RemoveRole removes a role. + RemoveRole(ctx context.Context, role string) error + + // GetRole retrieves role details. + GetRole(ctx context.Context, role string) (*Role, error) + + // GrantRoleKV grants a role some permission prefixes for the KV store. + GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // RevokeRoleKV revokes some permission prefixes for a role on the KV store. + RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // ListRoles lists roles. + ListRoles(ctx context.Context) ([]string, error) +} + +type httpAuthRoleAPI struct { + client httpClient +} + +type authRoleAPIAction struct { + verb string + name string + role *Role +} + +type authRoleAPIList struct{} + +func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", l.name) + if l.role == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.role) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { + resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + var roleList struct { + Roles []Role `json:"roles"` + } + if err = json.Unmarshal(body, &roleList); err != nil { + return nil, err + } + ret := make([]string, 0, len(roleList.Roles)) + for _, r := range roleList.Roles { + ret = append(ret, r.Role) + } + return ret, nil +} + +func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { + role := &Role{ + Role: rolename, + } + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "DELETE", + name: rolename, + }) +} + +func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return err + } + if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err := json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { + return r.modRole(ctx, &authRoleAPIAction{ + verb: "GET", + name: rolename, + }) +} + +func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { + var out rwPermission + switch permType { + case ReadPermission: + out.Read = prefixes + case WritePermission: + out.Write = prefixes + case ReadWritePermission: + out.Read = prefixes + out.Write = prefixes + } + return out +} + +func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Grant: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Revoke: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var role Role + if err = json.Unmarshal(body, &role); err != nil { + return nil, err + } + return &role, nil +} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go new file mode 100644 index 00000000..8e7e2efe --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_user.go @@ -0,0 +1,319 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" + "path" +) + +var ( + defaultV2AuthPrefix = "/v2/auth" +) + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +// userListEntry is the user representation given by the server for ListUsers +type userListEntry struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +type UserRoles struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +func v2AuthURL(ep url.URL, action string, name string) *url.URL { + if name != "" { + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) + return &ep + } + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) + return &ep +} + +// NewAuthAPI constructs a new AuthAPI that uses HTTP to +// interact with etcd's general auth features. +func NewAuthAPI(c Client) AuthAPI { + return &httpAuthAPI{ + client: c, + } +} + +type AuthAPI interface { + // Enable auth. + Enable(ctx context.Context) error + + // Disable auth. + Disable(ctx context.Context) error +} + +type httpAuthAPI struct { + client httpClient +} + +func (s *httpAuthAPI) Enable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"PUT"}) +} + +func (s *httpAuthAPI) Disable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"DELETE"}) +} + +func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { + resp, body, err := s.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +type authAPIAction struct { + verb string +} + +func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "enable", "") + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req +} + +type authError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e authError) Error() string { + return e.Message +} + +// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to +// interact with etcd's user creation and modification features. +func NewAuthUserAPI(c Client) AuthUserAPI { + return &httpAuthUserAPI{ + client: c, + } +} + +type AuthUserAPI interface { + // AddUser adds a user. + AddUser(ctx context.Context, username string, password string) error + + // RemoveUser removes a user. + RemoveUser(ctx context.Context, username string) error + + // GetUser retrieves user details. + GetUser(ctx context.Context, username string) (*User, error) + + // GrantUser grants a user some permission roles. + GrantUser(ctx context.Context, username string, roles []string) (*User, error) + + // RevokeUser revokes some permission roles from a user. + RevokeUser(ctx context.Context, username string, roles []string) (*User, error) + + // ChangePassword changes the user's password. + ChangePassword(ctx context.Context, username string, password string) (*User, error) + + // ListUsers lists the users. + ListUsers(ctx context.Context) ([]string, error) +} + +type httpAuthUserAPI struct { + client httpClient +} + +type authUserAPIAction struct { + verb string + username string + user *User +} + +type authUserAPIList struct{} + +func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", l.username) + if l.user == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.user) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { + resp, body, err := u.client.Do(ctx, &authUserAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + + var userList struct { + Users []userListEntry `json:"users"` + } + + if err = json.Unmarshal(body, &userList); err != nil { + return nil, err + } + + ret := make([]string, 0, len(userList.Users)) + for _, u := range userList.Users { + ret = append(ret, u.User) + } + return ret, nil +} + +func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { + user := &User{ + User: username, + Password: password, + } + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "DELETE", + username: username, + }) +} + +func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { + return u.modUser(ctx, &authUserAPIAction{ + verb: "GET", + username: username, + }) +} + +func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Grant: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Revoke: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { + user := &User{ + User: username, + Password: password, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var user User + if err = json.Unmarshal(body, &user); err != nil { + var userR UserRoles + if urerr := json.Unmarshal(body, &userR); urerr != nil { + return nil, err + } + user.User = userR.User + for _, r := range userR.Roles { + user.Roles = append(user.Roles, r.Role) + } + } + return &user, nil +} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go new file mode 100644 index 00000000..76d1f040 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cancelreq.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq.go + +package client + +import "net/http" + +func requestCanceler(tr CancelableTransport, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go new file mode 100644 index 00000000..de9ab798 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -0,0 +1,710 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "sync" + "time" + + "go.etcd.io/etcd/version" +) + +var ( + ErrNoEndpoints = errors.New("client: no endpoints available") + ErrTooManyRedirects = errors.New("client: too many redirects") + ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") + ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") + errTooManyRedirectChecks = errors.New("client: too many redirect checks") + + // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so + // that Do() will not retry a request + oneShotCtxValue interface{} +) + +var DefaultRequestTimeout = 5 * time.Second + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +type EndpointSelectionMode int + +const ( + // EndpointSelectionRandom is the default value of the 'SelectionMode'. + // As the name implies, the client object will pick a node from the members + // of the cluster in a random fashion. If the cluster has three members, A, B, + // and C, the client picks any node from its three members as its request + // destination. + EndpointSelectionRandom EndpointSelectionMode = iota + + // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', + // requests are sent directly to the cluster leader. This reduces + // forwarding roundtrips compared to making requests to etcd followers + // who then forward them to the cluster leader. In the event of a leader + // failure, however, clients configured this way cannot prioritize among + // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' + // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to + // maintain its knowledge of current cluster state. + // + // This mode should be used with Client.AutoSync(). + EndpointSelectionPrioritizeLeader +) + +type Config struct { + // Endpoints defines a set of URLs (schemes, hosts and ports only) + // that can be used to communicate with a logical etcd cluster. For + // example, a three-node cluster could be provided like so: + // + // Endpoints: []string{ + // "http://node1.example.com:2379", + // "http://node2.example.com:2379", + // "http://node3.example.com:2379", + // } + // + // If multiple endpoints are provided, the Client will attempt to + // use them all in the event that one or more of them are unusable. + // + // If Client.Sync is ever called, the Client may cache an alternate + // set of endpoints to continue operation. + Endpoints []string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport + + // CheckRedirect specifies the policy for handling HTTP redirects. + // If CheckRedirect is not nil, the Client calls it before + // following an HTTP redirect. The sole argument is the number of + // requests that have already been made. If CheckRedirect returns + // an error, Client.Do will not make any further requests and return + // the error back it to the caller. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect CheckRedirectFunc + + // Username specifies the user credential to add as an authorization header + Username string + + // Password is the password for the specified user to add as an authorization header + // to the request. + Password string + + // HeaderTimeoutPerRequest specifies the time limit to wait for response + // header in a single request made by the Client. The timeout includes + // connection time, any redirects, and header wait time. + // + // For non-watch GET request, server returns the response body immediately. + // For PUT/POST/DELETE request, server will attempt to commit request + // before responding, which is expected to take `100ms + 2 * RTT`. + // For watch request, server returns the header immediately to notify Client + // watch start. But if server is behind some kind of proxy, the response + // header may be cached at proxy, and Client cannot rely on this behavior. + // + // Especially, wait request will ignore this timeout. + // + // One API call may send multiple requests to different etcd servers until it + // succeeds. Use context of the API to specify the overall timeout. + // + // A HeaderTimeoutPerRequest of zero means no timeout. + HeaderTimeoutPerRequest time.Duration + + // SelectionMode is an EndpointSelectionMode enum that specifies the + // policy for choosing the etcd cluster node to which requests are sent. + SelectionMode EndpointSelectionMode +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +func (cfg *Config) checkRedirect() CheckRedirectFunc { + if cfg.CheckRedirect == nil { + return DefaultCheckRedirect + } + return cfg.CheckRedirect +} + +// CancelableTransport mimics net/http.Transport, but requires that +// the object also support request cancellation. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +type CheckRedirectFunc func(via int) error + +// DefaultCheckRedirect follows up to 10 redirects, but no more. +var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { + if via > 10 { + return ErrTooManyRedirects + } + return nil +} + +type Client interface { + // Sync updates the internal cache of the etcd cluster's membership. + Sync(context.Context) error + + // AutoSync periodically calls Sync() every given interval. + // The recommended sync interval is 10 seconds to 1 minute, which does + // not bring too much overhead to server and makes client catch up the + // cluster change in time. + // + // The example to use it: + // + // for { + // err := client.AutoSync(ctx, 10*time.Second) + // if err == context.DeadlineExceeded || err == context.Canceled { + // break + // } + // log.Print(err) + // } + AutoSync(context.Context, time.Duration) error + + // Endpoints returns a copy of the current set of API endpoints used + // by Client to resolve HTTP requests. If Sync has ever been called, + // this may differ from the initial Endpoints provided in the Config. + Endpoints() []string + + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + + httpClient +} + +func New(cfg Config) (Client, error) { + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), + rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + selectionMode: cfg.SelectionMode, + } + if cfg.Username != "" { + c.credentials = &credentials{ + username: cfg.Username, + password: cfg.Password, + } + } + if err := c.SetEndpoints(cfg.Endpoints); err != nil { + return nil, err + } + return c, nil +} + +type httpClient interface { + Do(context.Context, httpAction) (*http.Response, []byte, error) +} + +func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { + return func(ep url.URL) httpClient { + return &redirectFollowingHTTPClient{ + checkRedirect: cr, + client: &simpleHTTPClient{ + transport: tr, + endpoint: ep, + headerTimeout: headerTimeout, + }, + } + } +} + +type credentials struct { + username string + password string +} + +type httpClientFactory func(url.URL) httpClient + +type httpAction interface { + HTTPRequest(url.URL) *http.Request +} + +type httpClusterClient struct { + clientFactory httpClientFactory + endpoints []url.URL + pinned int + credentials *credentials + sync.RWMutex + rand *rand.Rand + selectionMode EndpointSelectionMode +} + +func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { + ceps := make([]url.URL, len(eps)) + copy(ceps, eps) + + // To perform a lookup on the new endpoint list without using the current + // client, we'll copy it + clientCopy := &httpClusterClient{ + clientFactory: c.clientFactory, + credentials: c.credentials, + rand: c.rand, + + pinned: 0, + endpoints: ceps, + } + + mAPI := NewMembersAPI(clientCopy) + leader, err := mAPI.Leader(ctx) + if err != nil { + return "", err + } + if len(leader.ClientURLs) == 0 { + return "", ErrNoLeaderEndpoint + } + + return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? +} + +func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { + if len(eps) == 0 { + return []url.URL{}, ErrNoEndpoints + } + + neps := make([]url.URL, len(eps)) + for i, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + return []url.URL{}, err + } + neps[i] = *u + } + return neps, nil +} + +func (c *httpClusterClient) SetEndpoints(eps []string) error { + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + c.endpoints = shuffleEndpoints(c.rand, neps) + // We're not doing anything for PrioritizeLeader here. This is + // due to not having a context meaning we can't call getLeaderEndpoint + // However, if you're using PrioritizeLeader, you've already been told + // to regularly call sync, where we do have a ctx, and can figure the + // leader. PrioritizeLeader is also quite a loose guarantee, so deal + // with it + c.pinned = 0 + + return nil +} + +func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + action := act + c.RLock() + leps := len(c.endpoints) + eps := make([]url.URL, leps) + n := copy(eps, c.endpoints) + pinned := c.pinned + + if c.credentials != nil { + action = &authedAction{ + act: act, + credentials: *c.credentials, + } + } + c.RUnlock() + + if leps == 0 { + return nil, nil, ErrNoEndpoints + } + + if leps != n { + return nil, nil, errors.New("unable to pick endpoint: copy failed") + } + + var resp *http.Response + var body []byte + var err error + cerr := &ClusterError{} + isOneShot := ctx.Value(&oneShotCtxValue) != nil + + for i := pinned; i < leps+pinned; i++ { + k := i % leps + hc := c.clientFactory(eps[k]) + resp, body, err = hc.Do(ctx, action) + if err != nil { + cerr.Errors = append(cerr.Errors, err) + if err == ctx.Err() { + return nil, nil, ctx.Err() + } + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + } else if resp.StatusCode/100 == 5 { + switch resp.StatusCode { + case http.StatusInternalServerError, http.StatusServiceUnavailable: + // TODO: make sure this is a no leader response + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) + default: + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) + } + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue + } + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err + } + if k != pinned { + c.Lock() + c.pinned = k + c.Unlock() + } + return resp, body, nil + } + + return nil, nil, cerr +} + +func (c *httpClusterClient) Endpoints() []string { + c.RLock() + defer c.RUnlock() + + eps := make([]string, len(c.endpoints)) + for i, ep := range c.endpoints { + eps[i] = ep.String() + } + + return eps +} + +func (c *httpClusterClient) Sync(ctx context.Context) error { + mAPI := NewMembersAPI(c) + ms, err := mAPI.List(ctx) + if err != nil { + return err + } + + var eps []string + for _, m := range ms { + eps = append(eps, m.ClientURLs...) + } + + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + npin := 0 + + switch c.selectionMode { + case EndpointSelectionRandom: + c.RLock() + eq := endpointsEqual(c.endpoints, neps) + c.RUnlock() + + if eq { + return nil + } + // When items in the endpoint list changes, we choose a new pin + neps = shuffleEndpoints(c.rand, neps) + case EndpointSelectionPrioritizeLeader: + nle, err := c.getLeaderEndpoint(ctx, neps) + if err != nil { + return ErrNoLeaderEndpoint + } + + for i, n := range neps { + if n.String() == nle { + npin = i + break + } + } + default: + return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) + } + + c.Lock() + defer c.Unlock() + c.endpoints = neps + c.pinned = npin + + return nil +} + +func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + err := c.Sync(ctx) + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + +type roundTripResponse struct { + resp *http.Response + err error +} + +type simpleHTTPClient struct { + transport CancelableTransport + endpoint url.URL + headerTimeout time.Duration +} + +func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + req := act.HTTPRequest(c.endpoint) + + if err := printcURL(req); err != nil { + return nil, nil, err + } + + isWait := false + if req != nil && req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + + var hctx context.Context + var hcancel context.CancelFunc + if !isWait && c.headerTimeout > 0 { + hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) + } else { + hctx, hcancel = context.WithCancel(ctx) + } + defer hcancel() + + reqcancel := requestCanceler(c.transport, req) + + rtchan := make(chan roundTripResponse, 1) + go func() { + resp, err := c.transport.RoundTrip(req) + rtchan <- roundTripResponse{resp: resp, err: err} + close(rtchan) + }() + + var resp *http.Response + var err error + + select { + case rtresp := <-rtchan: + resp, err = rtresp.resp, rtresp.err + case <-hctx.Done(): + // cancel and wait for request to actually exit before continuing + reqcancel() + rtresp := <-rtchan + resp = rtresp.resp + switch { + case ctx.Err() != nil: + err = ctx.Err() + case hctx.Err() != nil: + err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) + default: + panic("failed to get error from context") + } + } + + // always check for resp nil-ness to deal with possible + // race conditions between channels above + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + resp.Body.Close() + <-done + return nil, nil, ctx.Err() + case <-done: + } + + return resp, body, err +} + +type authedAction struct { + act httpAction + credentials credentials +} + +func (a *authedAction) HTTPRequest(url url.URL) *http.Request { + r := a.act.HTTPRequest(url) + r.SetBasicAuth(a.credentials.username, a.credentials.password) + return r +} + +type redirectFollowingHTTPClient struct { + client httpClient + checkRedirect CheckRedirectFunc +} + +func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + next := act + for i := 0; i < 100; i++ { + if i > 0 { + if err := r.checkRedirect(i); err != nil { + return nil, nil, err + } + } + resp, body, err := r.client.Do(ctx, next) + if err != nil { + return nil, nil, err + } + if resp.StatusCode/100 == 3 { + hdr := resp.Header.Get("Location") + if hdr == "" { + return nil, nil, fmt.Errorf("location header not set") + } + loc, err := url.Parse(hdr) + if err != nil { + return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr) + } + next = &redirectedHTTPAction{ + action: act, + location: *loc, + } + continue + } + return resp, body, nil + } + + return nil, nil, errTooManyRedirectChecks +} + +type redirectedHTTPAction struct { + action httpAction + location url.URL +} + +func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { + orig := r.action.HTTPRequest(ep) + orig.URL = &r.location + return orig +} + +func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { + // copied from Go 1.9<= rand.Rand.Perm + n := len(eps) + p := make([]int, n) + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + p[i] = p[j] + p[j] = i + } + neps := make([]url.URL, n) + for i, k := range p { + neps[i] = eps[k] + } + return neps +} + +func endpointsEqual(left, right []url.URL) bool { + if len(left) != len(right) { + return false + } + + sLeft := make([]string, len(left)) + sRight := make([]string, len(right)) + for i, l := range left { + sLeft[i] = l.String() + } + for i, r := range right { + sRight[i] = r.String() + } + + sort.Strings(sLeft) + sort.Strings(sRight) + for i := range sLeft { + if sLeft[i] != sRight[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go new file mode 100644 index 00000000..34618cdb --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -0,0 +1,37 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import "fmt" + +type ClusterError struct { + Errors []error +} + +func (ce *ClusterError) Error() string { + s := ErrClusterUnavailable.Error() + for i, e := range ce.Errors { + s += fmt.Sprintf("; error #%d: %s\n", i, e) + } + return s +} + +func (ce *ClusterError) Detail() string { + s := "" + for i, e := range ce.Errors { + s += fmt.Sprintf("error #%d: %s\n", i, e) + } + return s +} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go new file mode 100644 index 00000000..c8bc9fba --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/curl.go @@ -0,0 +1,70 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" +) + +var ( + cURLDebug = false +) + +func EnablecURLDebug() { + cURLDebug = true +} + +func DisablecURLDebug() { + cURLDebug = false +} + +// printcURL prints the cURL equivalent request to stderr. +// It returns an error if the body of the request cannot +// be read. +// The caller MUST cancel the request if there is an error. +func printcURL(req *http.Request) error { + if !cURLDebug { + return nil + } + var ( + command string + b []byte + err error + ) + + if req.URL != nil { + command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) + } + + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return err + } + command += fmt.Sprintf(" -d %q", string(b)) + } + + fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + + // reset body + body := bytes.NewBuffer(b) + req.Body = ioutil.NopCloser(body) + + return nil +} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go new file mode 100644 index 00000000..580c2562 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "go.etcd.io/etcd/pkg/srv" +) + +// Discoverer is an interface that wraps the Discover method. +type Discoverer interface { + // Discover looks up the etcd servers for the domain. + Discover(domain string, serviceName string) ([]string, error) +} + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string, serviceName string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain, serviceName) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go new file mode 100644 index 00000000..abe5199c --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/doc.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides bindings for the etcd APIs. + +Create a Config and exchange it for a Client: + + import ( + "net/http" + "context" + + "go.etcd.io/etcd/client" + ) + + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: DefaultTransport, + } + + c, err := client.New(cfg) + if err != nil { + // handle error + } + +Clients are safe for concurrent use by multiple goroutines. + +Create a KeysAPI using the Client, then use it to interact with etcd: + + kAPI := client.NewKeysAPI(c) + + // create a new key /foo with the value "bar" + _, err = kAPI.Create(context.Background(), "/foo", "bar") + if err != nil { + // handle error + } + + // delete the newly created key only if the value is still "bar" + _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) + if err != nil { + // handle error + } + +Use a custom context to set timeouts on your operations: + + import "time" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // set a new key, ignoring its previous state + _, err := kAPI.Set(ctx, "/ping", "pong", nil) + if err != nil { + if err == context.DeadlineExceeded { + // request took longer than 5s + } else { + // handle error + } + } + +*/ +package client diff --git a/vendor/github.com/coreos/etcd/client/json.go b/vendor/github.com/coreos/etcd/client/json.go new file mode 100644 index 00000000..97cdbcd7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/json.go @@ -0,0 +1,72 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "github.com/json-iterator/go" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type customNumberExtension struct { + jsoniter.DummyExtension +} + +func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { + if typ.String() == "interface {}" { + return customNumberDecoder{} + } + return nil +} + +type customNumberDecoder struct { +} + +func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + var number jsoniter.Number + iter.ReadVal(&number) + i64, err := strconv.ParseInt(string(number), 10, 64) + if err == nil { + *(*interface{})(ptr) = i64 + return + } + f64, err := strconv.ParseFloat(string(number), 64) + if err == nil { + *(*interface{})(ptr) = f64 + return + } + iter.ReportError("DecodeNumber", err.Error()) + default: + *(*interface{})(ptr) = iter.Read() + } +} + +// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive when unmarshalling, and otherwise compatible with +// the encoding/json standard library. +func caseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go new file mode 100644 index 00000000..ec53830c --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -0,0 +1,679 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "go.etcd.io/etcd/pkg/pathutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +const ( + ErrorCodeKeyNotFound = 100 + ErrorCodeTestFailed = 101 + ErrorCodeNotFile = 102 + ErrorCodeNotDir = 104 + ErrorCodeNodeExist = 105 + ErrorCodeRootROnly = 107 + ErrorCodeDirNotEmpty = 108 + ErrorCodeUnauthorized = 110 + + ErrorCodePrevValueRequired = 201 + ErrorCodeTTLNaN = 202 + ErrorCodeIndexNaN = 203 + ErrorCodeInvalidField = 209 + ErrorCodeInvalidForm = 210 + + ErrorCodeRaftInternal = 300 + ErrorCodeLeaderElect = 301 + + ErrorCodeWatcherCleared = 400 + ErrorCodeEventIndexCleared = 401 +) + +type Error struct { + Code int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause"` + Index uint64 `json:"index"` +} + +func (e Error) Error() string { + return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) +} + +var ( + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint") + ErrEmptyBody = errors.New("client: response body is empty") +) + +// PrevExistType is used to define an existence condition when setting +// or deleting Nodes. +type PrevExistType string + +const ( + PrevIgnore = PrevExistType("") + PrevExist = PrevExistType("true") + PrevNoExist = PrevExistType("false") +) + +var ( + defaultV2KeysPrefix = "/v2/keys" +) + +// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value +// API over HTTP. +func NewKeysAPI(c Client) KeysAPI { + return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) +} + +// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller +// to provide a custom base URL path. This should only be used in +// very rare cases. +func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { + return &httpKeysAPI{ + client: c, + prefix: p, + } +} + +type KeysAPI interface { + // Get retrieves a set of Nodes from etcd + Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) + + // Set assigns a new value to a Node identified by a given key. The caller + // may define a set of conditions in the SetOptions. If SetOptions.Dir=true + // then value is ignored. + Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) + + // Delete removes a Node identified by the given key, optionally destroying + // all of its children as well. The caller may define a set of required + // conditions in an DeleteOptions object. + Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) + + // Create is an alias for Set w/ PrevExist=false + Create(ctx context.Context, key, value string) (*Response, error) + + // CreateInOrder is used to atomically create in-order keys within the given directory. + CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) + + // Update is an alias for Set w/ PrevExist=true + Update(ctx context.Context, key, value string) (*Response, error) + + // Watcher builds a new Watcher targeted at a specific Node identified + // by the given key. The Watcher may be configured at creation time + // through a WatcherOptions object. The returned Watcher is designed + // to emit events that happen to a Node, and optionally to its children. + Watcher(key string, opts *WatcherOptions) Watcher +} + +type WatcherOptions struct { + // AfterIndex defines the index after-which the Watcher should + // start emitting events. For example, if a value of 5 is + // provided, the first event will have an index >= 6. + // + // Setting AfterIndex to 0 (default) means that the Watcher + // should start watching for events starting at the current + // index, whatever that may be. + AfterIndex uint64 + + // Recursive specifies whether or not the Watcher should emit + // events that occur in children of the given keyspace. If set + // to false (default), events will be limited to those that + // occur for the exact key. + Recursive bool +} + +type CreateInOrderOptions struct { + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration +} + +type SetOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Set operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + // + // PrevValue is ignored if Dir=true + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Set operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // PrevExist specifies whether the Node must currently exist + // (PrevExist) or not (PrevNoExist). If the caller does not + // care about existence, set PrevExist to PrevIgnore, or simply + // leave it unset. + PrevExist PrevExistType + + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration + + // Refresh set to true means a TTL value can be updated + // without firing a watch or changing the node value. A + // value must not be provided when refreshing a key. + Refresh bool + + // Dir specifies whether or not this Node should be created as a directory. + Dir bool + + // NoValueOnSuccess specifies whether the response contains the current value of the Node. + // If set, the response will only contain the current value when the request fails. + NoValueOnSuccess bool +} + +type GetOptions struct { + // Recursive defines whether or not all children of the Node + // should be returned. + Recursive bool + + // Sort instructs the server whether or not to sort the Nodes. + // If true, the Nodes are sorted alphabetically by key in + // ascending order (A to z). If false (default), the Nodes will + // not be sorted and the ordering used should not be considered + // predictable. + Sort bool + + // Quorum specifies whether it gets the latest committed value that + // has been applied in quorum of members, which ensures external + // consistency (or linearizability). + Quorum bool +} + +type DeleteOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Delete operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Delete operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // Recursive defines whether or not all children of the Node + // should be deleted. If set to true, all children of the Node + // identified by the given key will be deleted. If left unset + // or explicitly set to false, only a single Node will be + // deleted. + Recursive bool + + // Dir specifies whether or not this Node should be removed as a directory. + Dir bool +} + +type Watcher interface { + // Next blocks until an etcd event occurs, then returns a Response + // representing that event. The behavior of Next depends on the + // WatcherOptions used to construct the Watcher. Next is designed to + // be called repeatedly, each time blocking until a subsequent event + // is available. + // + // If the provided context is cancelled, Next will return a non-nil + // error. Any other failures encountered while waiting for the next + // event (connection issues, deserialization failures, etc) will + // also result in a non-nil error. + Next(context.Context) (*Response, error) +} + +type Response struct { + // Action is the name of the operation that occurred. Possible values + // include get, set, delete, update, create, compareAndSwap, + // compareAndDelete and expire. + Action string `json:"action"` + + // Node represents the state of the relevant etcd Node. + Node *Node `json:"node"` + + // PrevNode represents the previous state of the Node. PrevNode is non-nil + // only if the Node existed before the action occurred and the action + // caused a change to the Node. + PrevNode *Node `json:"prevNode"` + + // Index holds the cluster-level index at the time the Response was generated. + // This index is not tied to the Node(s) contained in this Response. + Index uint64 `json:"-"` + + // ClusterID holds the cluster-level ID reported by the server. This + // should be different for different etcd clusters. + ClusterID string `json:"-"` +} + +type Node struct { + // Key represents the unique location of this Node (e.g. "/foo/bar"). + Key string `json:"key"` + + // Dir reports whether node describes a directory. + Dir bool `json:"dir,omitempty"` + + // Value is the current data stored on this Node. If this Node + // is a directory, Value will be empty. + Value string `json:"value"` + + // Nodes holds the children of this Node, only if this Node is a directory. + // This slice of will be arbitrarily deep (children, grandchildren, great- + // grandchildren, etc.) if a recursive Get or Watch request were made. + Nodes Nodes `json:"nodes"` + + // CreatedIndex is the etcd index at-which this Node was created. + CreatedIndex uint64 `json:"createdIndex"` + + // ModifiedIndex is the etcd index at-which this Node was last modified. + ModifiedIndex uint64 `json:"modifiedIndex"` + + // Expiration is the server side expiration time of the key. + Expiration *time.Time `json:"expiration,omitempty"` + + // TTL is the time to live of the key in second. + TTL int64 `json:"ttl,omitempty"` +} + +func (n *Node) String() string { + return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) +} + +// TTLDuration returns the Node's TTL as a time.Duration object +func (n *Node) TTLDuration() time.Duration { + return time.Duration(n.TTL) * time.Second +} + +type Nodes []*Node + +// interfaces for sorting + +func (ns Nodes) Len() int { return len(ns) } +func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } +func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } + +type httpKeysAPI struct { + client httpClient + prefix string +} + +func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { + act := &setAction{ + Prefix: k.prefix, + Key: key, + Value: val, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.PrevExist = opts.PrevExist + act.TTL = opts.TTL + act.Refresh = opts.Refresh + act.Dir = opts.Dir + act.NoValueOnSuccess = opts.NoValueOnSuccess + } + + doCtx := ctx + if act.PrevExist == PrevNoExist { + doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) + } + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) +} + +func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { + act := &createInOrderAction{ + Prefix: k.prefix, + Dir: dir, + Value: val, + } + + if opts != nil { + act.TTL = opts.TTL + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) +} + +func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { + act := &deleteAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.Dir = opts.Dir + act.Recursive = opts.Recursive + } + + doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { + act := &getAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + act.Sorted = opts.Sort + act.Quorum = opts.Quorum + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { + act := waitAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + if opts.AfterIndex > 0 { + act.WaitIndex = opts.AfterIndex + 1 + } + } + + return &httpWatcher{ + client: k.client, + nextWait: act, + } +} + +type httpWatcher struct { + client httpClient + nextWait waitAction +} + +func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { + for { + httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) + if err != nil { + return nil, err + } + + resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) + if err != nil { + if err == ErrEmptyBody { + continue + } + return nil, err + } + + hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 + return resp, nil + } +} + +// v2KeysURL forms a URL representing the location of a key. +// The endpoint argument represents the base URL of an etcd +// server. The prefix is the path needed to route from the +// provided endpoint's path to the root of the keys API +// (typically "/v2/keys"). +func v2KeysURL(ep url.URL, prefix, key string) *url.URL { + // We concatenate all parts together manually. We cannot use + // path.Join because it does not reserve trailing slash. + // We call CanonicalURLPath to further cleanup the path. + if prefix != "" && prefix[0] != '/' { + prefix = "/" + prefix + } + if key != "" && key[0] != '/' { + key = "/" + key + } + ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) + return &ep +} + +type getAction struct { + Prefix string + Key string + Recursive bool + Sorted bool + Quorum bool +} + +func (g *getAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, g.Prefix, g.Key) + + params := u.Query() + params.Set("recursive", strconv.FormatBool(g.Recursive)) + params.Set("sorted", strconv.FormatBool(g.Sorted)) + params.Set("quorum", strconv.FormatBool(g.Quorum)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type waitAction struct { + Prefix string + Key string + WaitIndex uint64 + Recursive bool +} + +func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, w.Prefix, w.Key) + + params := u.Query() + params.Set("wait", "true") + params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) + params.Set("recursive", strconv.FormatBool(w.Recursive)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type setAction struct { + Prefix string + Key string + Value string + PrevValue string + PrevIndex uint64 + PrevExist PrevExistType + TTL time.Duration + Refresh bool + Dir bool + NoValueOnSuccess bool +} + +func (a *setAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + form := url.Values{} + + // we're either creating a directory or setting a key + if a.Dir { + params.Set("dir", strconv.FormatBool(a.Dir)) + } else { + // These options are only valid for setting a key + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + form.Add("value", a.Value) + } + + // Options which apply to both setting a key and creating a dir + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.PrevExist != PrevIgnore { + params.Set("prevExist", string(a.PrevExist)) + } + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + + if a.Refresh { + form.Add("refresh", "true") + } + if a.NoValueOnSuccess { + params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) + } + + u.RawQuery = params.Encode() + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("PUT", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type deleteAction struct { + Prefix string + Key string + PrevValue string + PrevIndex uint64 + Dir bool + Recursive bool +} + +func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.Dir { + params.Set("dir", "true") + } + if a.Recursive { + params.Set("recursive", "true") + } + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("DELETE", u.String(), nil) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type createInOrderAction struct { + Prefix string + Dir string + Value string + TTL time.Duration +} + +func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Dir) + + form := url.Values{} + form.Add("value", a.Value) + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("POST", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req +} + +func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { + switch code { + case http.StatusOK, http.StatusCreated: + if len(body) == 0 { + return nil, ErrEmptyBody + } + res, err = unmarshalSuccessfulKeysResponse(header, body) + default: + err = unmarshalFailedKeysResponse(body) + } + return res, err +} + +var jsonIterator = caseSensitiveJsonIterator() + +func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { + var res Response + err := jsonIterator.Unmarshal(body, &res) + if err != nil { + return nil, ErrInvalidJSON + } + if header.Get("X-Etcd-Index") != "" { + res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) + if err != nil { + return nil, err + } + } + res.ClusterID = header.Get("X-Etcd-Cluster-ID") + return &res, nil +} + +func unmarshalFailedKeysResponse(body []byte) error { + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return ErrInvalidJSON + } + return etcdErr +} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go new file mode 100644 index 00000000..657131ab --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/members.go @@ -0,0 +1,303 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + + "go.etcd.io/etcd/pkg/types" +) + +var ( + defaultV2MembersPrefix = "/v2/members" + defaultLeaderSuffix = "/leader" +) + +type Member struct { + // ID is the unique identifier of this Member. + ID string `json:"id"` + + // Name is a human-readable, non-unique identifier of this Member. + Name string `json:"name"` + + // PeerURLs represents the HTTP(S) endpoints this Member uses to + // participate in etcd's consensus protocol. + PeerURLs []string `json:"peerURLs"` + + // ClientURLs represents the HTTP(S) endpoints on which this Member + // serves its client-facing APIs. + ClientURLs []string `json:"clientURLs"` +} + +type memberCollection []Member + +func (c *memberCollection) UnmarshalJSON(data []byte) error { + d := struct { + Members []Member + }{} + + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + if d.Members == nil { + *c = make([]Member, 0) + return nil + } + + *c = d.Members + return nil +} + +type memberCreateOrUpdateRequest struct { + PeerURLs types.URLs +} + +func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{ + PeerURLs: make([]string, len(m.PeerURLs)), + } + + for i, u := range m.PeerURLs { + s.PeerURLs[i] = u.String() + } + + return json.Marshal(&s) +} + +// NewMembersAPI constructs a new MembersAPI that uses HTTP to +// interact with etcd's membership API. +func NewMembersAPI(c Client) MembersAPI { + return &httpMembersAPI{ + client: c, + } +} + +type MembersAPI interface { + // List enumerates the current cluster membership. + List(ctx context.Context) ([]Member, error) + + // Add instructs etcd to accept a new Member into the cluster. + Add(ctx context.Context, peerURL string) (*Member, error) + + // Remove demotes an existing Member out of the cluster. + Remove(ctx context.Context, mID string) error + + // Update instructs etcd to update an existing Member in the cluster. + Update(ctx context.Context, mID string, peerURLs []string) error + + // Leader gets current leader of the cluster + Leader(ctx context.Context) (*Member, error) +} + +type httpMembersAPI struct { + client httpClient +} + +func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { + req := &membersAPIActionList{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var mCollection memberCollection + if err := json.Unmarshal(body, &mCollection); err != nil { + return nil, err + } + + return []Member(mCollection), nil +} + +func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { + urls, err := types.NewURLs([]string{peerURL}) + if err != nil { + return nil, err + } + + req := &membersAPIActionAdd{peerURLs: urls} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusCreated { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return nil, err + } + return nil, merr + } + + var memb Member + if err := json.Unmarshal(body, &memb); err != nil { + return nil, err + } + + return &memb, nil +} + +func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { + urls, err := types.NewURLs(peerURLs) + if err != nil { + return err + } + + req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return err + } + return merr + } + + return nil +} + +func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { + req := &membersAPIActionRemove{memberID: memberID} + resp, _, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) +} + +func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { + req := &membersAPIActionLeader{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var leader Member + if err := json.Unmarshal(body, &leader); err != nil { + return nil, err + } + + return &leader, nil +} + +type membersAPIActionList struct{} + +func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type membersAPIActionRemove struct { + memberID string +} + +func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, d.memberID) + req, _ := http.NewRequest("DELETE", u.String(), nil) + return req +} + +type membersAPIActionAdd struct { + peerURLs types.URLs +} + +func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +type membersAPIActionUpdate struct { + memberID string + peerURLs types.URLs +} + +func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + u.Path = path.Join(u.Path, a.memberID) + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +func assertStatusCode(got int, want ...int) (err error) { + for _, w := range want { + if w == got { + return nil + } + } + return fmt.Errorf("unexpected status code %d", got) +} + +type membersAPIActionLeader struct{} + +func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, defaultLeaderSuffix) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +// v2MembersURL add the necessary path to the provided endpoint +// to route requests to the default v2 members API. +func v2MembersURL(ep url.URL) *url.URL { + ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) + return &ep +} + +type membersError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e membersError) Error() string { + return e.Message +} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go new file mode 100644 index 00000000..15a8babf --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/util.go @@ -0,0 +1,53 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "regexp" +) + +var ( + roleNotFoundRegExp *regexp.Regexp + userNotFoundRegExp *regexp.Regexp +) + +func init() { + roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") + userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") +} + +// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. +func IsKeyNotFound(err error) bool { + if cErr, ok := err.(Error); ok { + return cErr.Code == ErrorCodeKeyNotFound + } + return false +} + +// IsRoleNotFound returns true if the error means role not found of v2 API. +func IsRoleNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return roleNotFoundRegExp.MatchString(ae.Message) + } + return false +} + +// IsUserNotFound returns true if the error means user not found of v2 API. +func IsUserNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return userNotFoundRegExp.MatchString(ae.Message) + } + return false +} diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md new file mode 100644 index 00000000..6c6fe7c6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/README.md @@ -0,0 +1,85 @@ +# etcd/clientv3 + +[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/clientv3) + +`etcd/clientv3` is the official Go etcd client for v3. + +## Install + +```bash +go get go.etcd.io/etcd/clientv3 +``` + +## Get started + +Create client using `clientv3.New`: + +```go +cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, + DialTimeout: 5 * time.Second, +}) +if err != nil { + // handle error! +} +defer cli.Close() +``` + +etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses +[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it. +If the client is not closed, the connection will have leaky goroutines. To specify client request timeout, +pass `context.WithTimeout` to APIs: + +```go +ctx, cancel := context.WithTimeout(context.Background(), timeout) +resp, err := cli.Put(ctx, "sample_key", "sample_value") +cancel() +if err != nil { + // handle error! +} +// use the response +``` + +For full compatibility, it is recommended to vendor builds using etcd's vendored packages, using tools like `golang/dep`, as in [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories). + +## Error Handling + +etcd client returns 2 types of errors: + +1. context error: canceled or deadline exceeded. +2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes). + +Here is the example code to handle client errors: + +```go +resp, err := cli.Put(ctx, "", "") +if err != nil { + switch err { + case context.Canceled: + log.Fatalf("ctx is canceled by another routine: %v", err) + case context.DeadlineExceeded: + log.Fatalf("ctx is attached with a deadline is exceeded: %v", err) + case rpctypes.ErrEmptyKey: + log.Fatalf("client-side error: %v", err) + default: + log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err) + } +} +``` + +## Metrics + +The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/master/clientv3/example_metrics_test.go). + +## Namespacing + +The [namespace](https://godoc.org/go.etcd.io/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + +## Request size limit + +Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`. + +## Examples + +More code examples can be found at [GoDoc](https://godoc.org/go.etcd.io/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go new file mode 100644 index 00000000..c954f1bf --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -0,0 +1,242 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "strings" + + "go.etcd.io/etcd/auth/authpb" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "google.golang.org/grpc" +) + +type ( + AuthEnableResponse pb.AuthEnableResponse + AuthDisableResponse pb.AuthDisableResponse + AuthenticateResponse pb.AuthenticateResponse + AuthUserAddResponse pb.AuthUserAddResponse + AuthUserDeleteResponse pb.AuthUserDeleteResponse + AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse + AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse + AuthUserGetResponse pb.AuthUserGetResponse + AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse + AuthRoleAddResponse pb.AuthRoleAddResponse + AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse + AuthRoleGetResponse pb.AuthRoleGetResponse + AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse + AuthRoleDeleteResponse pb.AuthRoleDeleteResponse + AuthUserListResponse pb.AuthUserListResponse + AuthRoleListResponse pb.AuthRoleListResponse + + PermissionType authpb.Permission_Type + Permission authpb.Permission +) + +const ( + PermRead = authpb.READ + PermWrite = authpb.WRITE + PermReadWrite = authpb.READWRITE +) + +type UserAddOptions authpb.UserAddOptions + +type Auth interface { + // AuthEnable enables auth of an etcd cluster. + AuthEnable(ctx context.Context) (*AuthEnableResponse, error) + + // AuthDisable disables auth of an etcd cluster. + AuthDisable(ctx context.Context) (*AuthDisableResponse, error) + + // UserAdd adds a new user to an etcd cluster. + UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) + + // UserAddWithOptions adds a new user to an etcd cluster with some options. + UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error) + + // UserDelete deletes a user from an etcd cluster. + UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) + + // UserChangePassword changes a password of a user. + UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) + + // UserGrantRole grants a role to a user. + UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) + + // UserGet gets a detailed information of a user. + UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) + + // UserList gets a list of all users. + UserList(ctx context.Context) (*AuthUserListResponse, error) + + // UserRevokeRole revokes a role of a user. + UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) + + // RoleAdd adds a new role to an etcd cluster. + RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) + + // RoleGrantPermission grants a permission to a role. + RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) + + // RoleGet gets a detailed information of a role. + RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) + + // RoleList gets a list of all roles. + RoleList(ctx context.Context) (*AuthRoleListResponse, error) + + // RoleRevokePermission revokes a permission from a role. + RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) + + // RoleDelete deletes a role. + RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) +} + +type authClient struct { + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func NewAuth(c *Client) Auth { + api := &authClient{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) + return (*AuthEnableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) + return (*AuthDisableResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) { + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...) + return (*AuthUserAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) + return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) + return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) + return (*AuthUserGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) { + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) + return (*AuthUserListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) + return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) + return (*AuthRoleAddResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) { + perm := &authpb.Permission{ + Key: []byte(key), + RangeEnd: []byte(rangeEnd), + PermType: authpb.Permission_Type(permType), + } + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) + return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) + return (*AuthRoleGetResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) + return (*AuthRoleListResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...) + return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) +} + +func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) + return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) +} + +func StrToPermissionType(s string) (PermissionType, error) { + val, ok := authpb.Permission_Type_value[strings.ToUpper(s)] + if ok { + return PermissionType(val), nil + } + return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s) +} + +type authenticator struct { + conn *grpc.ClientConn // conn in-use + remote pb.AuthClient + callOpts []grpc.CallOption +} + +func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) + return (*AuthenticateResponse)(resp), toErr(ctx, err) +} + +func (auth *authenticator) close() { + auth.conn.Close() +} + +func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) { + conn, err := grpc.DialContext(ctx, target, opts...) + if err != nil { + return nil, err + } + + api := &authenticator{ + conn: conn, + remote: pb.NewAuthClient(conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api, nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go new file mode 100644 index 00000000..b91cbf95 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -0,0 +1,669 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/google/uuid" + "go.etcd.io/etcd/clientv3/balancer" + "go.etcd.io/etcd/clientv3/balancer/picker" + "go.etcd.io/etcd/clientv3/balancer/resolver/endpoint" + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.etcd.io/etcd/pkg/logutil" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var ( + ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") + + roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String()) +) + +func init() { + lg := zap.NewNop() + if os.Getenv("ETCD_CLIENT_DEBUG") != "" { + var err error + lg, err = zap.NewProductionConfig().Build() // info level logging + if err != nil { + panic(err) + } + } + balancer.RegisterBuilder(balancer.Config{ + Policy: picker.RoundrobinBalanced, + Name: roundRobinBalancerName, + Logger: lg, + }) +} + +// Client provides and manages an etcd v3 client session. +type Client struct { + Cluster + KV + Lease + Watcher + Auth + Maintenance + + conn *grpc.ClientConn + + cfg Config + creds *credentials.TransportCredentials + resolverGroup *endpoint.ResolverGroup + mu *sync.RWMutex + + ctx context.Context + cancel context.CancelFunc + + // Username is a user name for authentication. + Username string + // Password is a password for authentication. + Password string + // tokenCred is an instance of WithPerRPCCredentials()'s argument + tokenCred *authTokenCredential + + callOpts []grpc.CallOption + + lg *zap.Logger +} + +// New creates a new etcdv3 client from a given configuration. +func New(cfg Config) (*Client, error) { + if len(cfg.Endpoints) == 0 { + return nil, ErrNoAvailableEndpoints + } + + return newClient(&cfg) +} + +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context) *Client { + cctx, cancel := context.WithCancel(ctx) + return &Client{ctx: cctx, cancel: cancel} +} + +// NewFromURL creates a new etcdv3 client from a URL. +func NewFromURL(url string) (*Client, error) { + return New(Config{Endpoints: []string{url}}) +} + +// NewFromURLs creates a new etcdv3 client from URLs. +func NewFromURLs(urls []string) (*Client, error) { + return New(Config{Endpoints: urls}) +} + +// Close shuts down the client's etcd connections. +func (c *Client) Close() error { + c.cancel() + c.Watcher.Close() + c.Lease.Close() + if c.resolverGroup != nil { + c.resolverGroup.Close() + } + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() +} + +// Ctx is a context for "out of band" messages (e.g., for sending +// "clean up" message when another context is canceled). It is +// canceled on client Close(). +func (c *Client) Ctx() context.Context { return c.ctx } + +// Endpoints lists the registered endpoints for the client. +func (c *Client) Endpoints() []string { + // copy the slice; protect original endpoints from being changed + c.mu.RLock() + defer c.mu.RUnlock() + eps := make([]string, len(c.cfg.Endpoints)) + copy(eps, c.cfg.Endpoints) + return eps +} + +// SetEndpoints updates client's endpoints. +func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() + defer c.mu.Unlock() + c.cfg.Endpoints = eps + c.resolverGroup.SetEndpoints(eps) +} + +// Sync synchronizes client's endpoints with the known endpoints from the etcd membership. +func (c *Client) Sync(ctx context.Context) error { + mresp, err := c.MemberList(ctx) + if err != nil { + return err + } + var eps []string + for _, m := range mresp.Members { + eps = append(eps, m.ClientURLs...) + } + c.SetEndpoints(eps...) + return nil +} + +func (c *Client) autoSync() { + if c.cfg.AutoSyncInterval == time.Duration(0) { + return + } + + for { + select { + case <-c.ctx.Done(): + return + case <-time.After(c.cfg.AutoSyncInterval): + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { + lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err) + } + } + } +} + +type authTokenCredential struct { + token string + tokenMu *sync.RWMutex +} + +func (cred authTokenCredential) RequireTransportSecurity() bool { + return false +} + +func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) { + cred.tokenMu.RLock() + defer cred.tokenMu.RUnlock() + return map[string]string{ + rpctypes.TokenFieldNameGRPC: cred.token, + }, nil +} + +func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { + creds = c.creds + switch scheme { + case "unix": + case "http": + creds = nil + case "https", "unixs": + if creds != nil { + break + } + tlsconfig := &tls.Config{} + emptyCreds := credentials.NewTLS(tlsconfig) + creds = &emptyCreds + default: + creds = nil + } + return creds +} + +// dialSetupOpts gives the dial opts prior to any authentication. +func (c *Client) dialSetupOpts(creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) { + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + PermitWithoutStream: c.cfg.PermitWithoutStream, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } + opts = append(opts, dopts...) + + // Provide a net dialer that supports cancelation and timeout. + f := func(dialEp string, t time.Duration) (net.Conn, error) { + proto, host, _ := endpoint.ParseEndpoint(dialEp) + select { + case <-c.ctx.Done(): + return nil, c.ctx.Err() + default: + } + dialer := &net.Dialer{Timeout: t} + return dialer.DialContext(c.ctx, proto, host) + } + opts = append(opts, grpc.WithDialer(f)) + + if creds != nil { + opts = append(opts, grpc.WithTransportCredentials(*creds)) + } else { + opts = append(opts, grpc.WithInsecure()) + } + + // Interceptor retry and backoff. + // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with + // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy + // once it is available. + rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction)) + opts = append(opts, + // Disable stream retry by default since go-grpc-middleware/retry does not support client streams. + // Streams that are safe to retry are enabled individually. + grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)), + grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)), + ) + + return opts, nil +} + +// Dial connects to a single endpoint using the client's config. +func (c *Client) Dial(ep string) (*grpc.ClientConn, error) { + creds := c.directDialCreds(ep) + // Use the grpc passthrough resolver to directly dial a single endpoint. + // This resolver passes through the 'unix' and 'unixs' endpoints schemes used + // by etcd without modification, allowing us to directly dial endpoints and + // using the same dial functions that we use for load balancer dialing. + return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds) +} + +func (c *Client) getToken(ctx context.Context) error { + var err error // return last error in a case of fail + var auth *authenticator + + for i := 0; i < len(c.cfg.Endpoints); i++ { + ep := c.cfg.Endpoints[i] + // use dial options without dopts to avoid reusing the client balancer + var dOpts []grpc.DialOption + _, host, _ := endpoint.ParseEndpoint(ep) + target := c.resolverGroup.Target(host) + creds := c.dialWithBalancerCreds(ep) + dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...) + if err != nil { + err = fmt.Errorf("failed to configure auth dialer: %v", err) + continue + } + dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName)) + auth, err = newAuthenticator(ctx, target, dOpts, c) + if err != nil { + continue + } + defer auth.close() + + var resp *AuthenticateResponse + resp, err = auth.authenticate(ctx, c.Username, c.Password) + if err != nil { + // return err without retrying other endpoints + if err == rpctypes.ErrAuthNotEnabled { + return err + } + continue + } + + c.tokenCred.tokenMu.Lock() + c.tokenCred.token = resp.Token + c.tokenCred.tokenMu.Unlock() + + return nil + } + + return err +} + +// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host +// of the provided endpoint determines the scheme used for all endpoints of the client connection. +func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + _, host, _ := endpoint.ParseEndpoint(ep) + target := c.resolverGroup.Target(host) + creds := c.dialWithBalancerCreds(ep) + return c.dial(target, creds, dopts...) +} + +// dial configures and dials any grpc balancer target. +func (c *Client) dial(target string, creds *credentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) { + opts, err := c.dialSetupOpts(creds, dopts...) + if err != nil { + return nil, fmt.Errorf("failed to configure dialer: %v", err) + } + + if c.Username != "" && c.Password != "" { + c.tokenCred = &authTokenCredential{ + tokenMu: &sync.RWMutex{}, + } + + ctx, cancel := c.ctx, func() {} + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + + err = c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = context.DeadlineExceeded + } + cancel() + return nil, err + } + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) + } + cancel() + } + + opts = append(opts, c.cfg.DialOptions...) + + dctx := c.ctx + if c.cfg.DialTimeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout) + defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options? + } + + conn, err := grpc.DialContext(dctx, target, opts...) + if err != nil { + return nil, err + } + return conn, nil +} + +func (c *Client) directDialCreds(ep string) *credentials.TransportCredentials { + _, hostPort, scheme := endpoint.ParseEndpoint(ep) + creds := c.creds + if len(scheme) != 0 { + creds = c.processCreds(scheme) + if creds != nil { + c := *creds + clone := c.Clone() + // Set the server name must to the endpoint hostname without port since grpc + // otherwise attempts to check if x509 cert is valid for the full endpoint + // including the scheme and port, which fails. + host, _ := endpoint.ParseHostPort(hostPort) + clone.OverrideServerName(host) + creds = &clone + } + } + return creds +} + +func (c *Client) dialWithBalancerCreds(ep string) *credentials.TransportCredentials { + _, _, scheme := endpoint.ParseEndpoint(ep) + creds := c.creds + if len(scheme) != 0 { + creds = c.processCreds(scheme) + } + return creds +} + +// WithRequireLeader requires client requests to only succeed +// when the cluster has a leader. +func WithRequireLeader(ctx context.Context) context.Context { + md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) + return metadata.NewOutgoingContext(ctx, md) +} + +func newClient(cfg *Config) (*Client, error) { + if cfg == nil { + cfg = &Config{} + } + var creds *credentials.TransportCredentials + if cfg.TLS != nil { + c := credentials.NewTLS(cfg.TLS) + creds = &c + } + + // use a temporary skeleton client to bootstrap first connection + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) + client := &Client{ + conn: nil, + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + mu: new(sync.RWMutex), + callOpts: defaultCallOpts, + } + + lcfg := logutil.DefaultZapLoggerConfig + if cfg.LogConfig != nil { + lcfg = *cfg.LogConfig + } + var err error + client.lg, err = lcfg.Build() + if err != nil { + return nil, err + } + + if cfg.Username != "" && cfg.Password != "" { + client.Username = cfg.Username + client.Password = cfg.Password + } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultFailFast, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } + + // Prepare a 'endpoint:///' resolver for the client and create a endpoint target to pass + // to dial so the client knows to use this resolver. + client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String())) + if err != nil { + client.cancel() + return nil, err + } + client.resolverGroup.SetEndpoints(cfg.Endpoints) + + if len(cfg.Endpoints) < 1 { + return nil, fmt.Errorf("at least one Endpoint must is required in client config") + } + dialEndpoint := cfg.Endpoints[0] + + // Use a provided endpoint target so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. + conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName)) + if err != nil { + client.cancel() + client.resolverGroup.Close() + return nil, err + } + // TODO: With the old grpc balancer interface, we waited until the dial timeout + // for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface? + client.conn = conn + + client.Cluster = NewCluster(client) + client.KV = NewKV(client) + client.Lease = NewLease(client) + client.Watcher = NewWatcher(client) + client.Auth = NewAuth(client) + client.Maintenance = NewMaintenance(client) + + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + + go client.autoSync() + return client, nil +} + +// roundRobinQuorumBackoff retries against quorum between each backoff. +// This is intended for use with a round robin load balancer. +func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + // after each round robin across quorum, backoff for our wait between duration + n := uint(len(c.Endpoints())) + quorum := (n/2 + 1) + if attempt%quorum == 0 { + c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction)) + return jitterUp(waitBetween, jitterFraction) + } + c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum)) + return 0 + } +} + +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + errc := make(chan error, len(c.cfg.Endpoints)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + wg.Add(len(c.cfg.Endpoints)) + for _, ep := range c.cfg.Endpoints { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + maj, _ = strconv.Atoi(vs[0]) + min, rerr = strconv.Atoi(vs[1]) + } + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for i := 0; i < len(c.cfg.Endpoints); i++ { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + +// ActiveConnection returns the current in-use connection +func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } + +// isHaltErr returns true if the given error and context indicate no forward +// progress can be made, even after reconnecting. +func isHaltErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return true + } + if err == nil { + return false + } + ev, _ := status.FromError(err) + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + // Treat Internal codes as if something failed, leaving the + // system in an inconsistent state, but retrying could make progress. + // (e.g., failed in middle of send, corrupted frame) + // TODO: are permanent Internal errors possible from grpc? + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal +} + +// isUnavailableErr returns true if the given error is an unavailable error +func isUnavailableErr(ctx context.Context, err error) bool { + if ctx != nil && ctx.Err() != nil { + return false + } + if err == nil { + return false + } + ev, _ := status.FromError(err) + // Unavailable codes mean the system will be right back. + // (e.g., can't connect, lost leader) + return ev.Code() == codes.Unavailable +} + +func toErr(ctx context.Context, err error) error { + if err == nil { + return nil + } + err = rpctypes.Error(err) + if _, ok := err.(rpctypes.EtcdError); ok { + return err + } + if ev, ok := status.FromError(err); ok { + code := ev.Code() + switch code { + case codes.DeadlineExceeded: + fallthrough + case codes.Canceled: + if ctx.Err() != nil { + err = ctx.Err() + } + case codes.Unavailable: + case codes.FailedPrecondition: + err = grpc.ErrClientConnClosing + } + } + return err +} + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} + +// IsConnCanceled returns true, if error is from a closed gRPC connection. +// ref. https://github.com/grpc/grpc-go/pull/1854 +func IsConnCanceled(err error) bool { + if err == nil { + return false + } + // >= gRPC v1.10.x + s, ok := status.FromError(err) + if ok { + // connection is canceled or server has already closed the connection + return s.Code() == codes.Canceled || s.Message() == "transport is closing" + } + // >= gRPC v1.10.x + if err == context.Canceled { + return true + } + // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")' + return strings.Contains(err.Error(), "grpc: the client connection is closing") +} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go new file mode 100644 index 00000000..ce97e5c8 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -0,0 +1,141 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/pkg/types" + + "google.golang.org/grpc" +) + +type ( + Member pb.Member + MemberListResponse pb.MemberListResponse + MemberAddResponse pb.MemberAddResponse + MemberRemoveResponse pb.MemberRemoveResponse + MemberUpdateResponse pb.MemberUpdateResponse + MemberPromoteResponse pb.MemberPromoteResponse +) + +type Cluster interface { + // MemberList lists the current cluster membership. + MemberList(ctx context.Context) (*MemberListResponse, error) + + // MemberAdd adds a new member into the cluster. + MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberAddAsLearner adds a new learner member into the cluster. + MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) + + // MemberRemove removes an existing member from the cluster. + MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) + + // MemberUpdate updates the peer addresses of the member. + MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) + + // MemberPromote promotes a member from raft learner (non-voting) to raft voting member. + MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) +} + +type cluster struct { + remote pb.ClusterClient + callOpts []grpc.CallOption +} + +func NewCluster(c *Client) Cluster { + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, false) +} + +func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { + return c.memberAdd(ctx, peerAddrs, true) +} + +func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + r := &pb.MemberAddRequest{ + PeerURLs: peerAddrs, + IsLearner: isLearner, + } + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberAddResponse)(resp), nil +} + +func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { + r := &pb.MemberRemoveRequest{ID: id} + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberRemoveResponse)(resp), nil +} + +func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { + // fail-fast before panic in rafthttp + if _, err := types.NewURLs(peerAddrs); err != nil { + return nil, err + } + + // it is safe to retry on update. + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { + // it is safe to retry on list. + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) { + r := &pb.MemberPromoteRequest{ID: id} + resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*MemberPromoteResponse)(resp), nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go new file mode 100644 index 00000000..5779713d --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go @@ -0,0 +1,51 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" +) + +// CompactOp represents a compact operation. +type CompactOp struct { + revision int64 + physical bool +} + +// CompactOption configures compact operation. +type CompactOption func(*CompactOp) + +func (op *CompactOp) applyCompactOpts(opts []CompactOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpCompact wraps slice CompactOption to create a CompactOp. +func OpCompact(rev int64, opts ...CompactOption) CompactOp { + ret := CompactOp{revision: rev} + ret.applyCompactOpts(opts) + return ret +} + +func (op CompactOp) toRequest() *pb.CompactionRequest { + return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} +} + +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. +func WithCompactPhysical() CompactOption { + return func(op *CompactOp) { op.physical = true } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go new file mode 100644 index 00000000..01ed68e9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -0,0 +1,140 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" +) + +type CompareTarget int +type CompareResult int + +const ( + CompareVersion CompareTarget = iota + CompareCreated + CompareModified + CompareValue +) + +type Cmp pb.Compare + +func Compare(cmp Cmp, result string, v interface{}) Cmp { + var r pb.Compare_CompareResult + + switch result { + case "=": + r = pb.Compare_EQUAL + case "!=": + r = pb.Compare_NOT_EQUAL + case ">": + r = pb.Compare_GREATER + case "<": + r = pb.Compare_LESS + default: + panic("Unknown result op") + } + + cmp.Result = r + switch cmp.Target { + case pb.Compare_VALUE: + val, ok := v.(string) + if !ok { + panic("bad compare value") + } + cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)} + case pb.Compare_VERSION: + cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)} + case pb.Compare_CREATE: + cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} + case pb.Compare_MOD: + cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} + case pb.Compare_LEASE: + cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)} + default: + panic("Unknown compare type") + } + return cmp +} + +func Value(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VALUE} +} + +func Version(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_VERSION} +} + +func CreateRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_CREATE} +} + +func ModRevision(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_MOD} +} + +// LeaseValue compares a key's LeaseID to a value of your choosing. The empty +// LeaseID is 0, otherwise known as `NoLease`. +func LeaseValue(key string) Cmp { + return Cmp{Key: []byte(key), Target: pb.Compare_LEASE} +} + +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// WithRange sets the comparison to scan the range [key, end). +func (cmp Cmp) WithRange(end string) Cmp { + cmp.RangeEnd = []byte(end) + return cmp +} + +// WithPrefix sets the comparison to scan all keys prefixed by the key. +func (cmp Cmp) WithPrefix() Cmp { + cmp.RangeEnd = getPrefix(cmp.Key) + return cmp +} + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. +func mustInt64(val interface{}) int64 { + if v, ok := val.(int64); ok { + return v + } + if v, ok := val.(int); ok { + return int64(v) + } + panic("bad value") +} + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go new file mode 100644 index 00000000..dcdbf511 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go @@ -0,0 +1,17 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package concurrency implements concurrency operations on top of +// etcd such as distributed locks, barriers, and elections. +package concurrency diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go new file mode 100644 index 00000000..2521db6a --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go @@ -0,0 +1,254 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "errors" + "fmt" + + v3 "go.etcd.io/etcd/clientv3" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc/mvccpb" +) + +var ( + ErrElectionNotLeader = errors.New("election: not leader") + ErrElectionNoLeader = errors.New("election: no leader") +) + +type Election struct { + session *Session + + keyPrefix string + + leaderKey string + leaderRev int64 + leaderSession *Session + hdr *pb.ResponseHeader +} + +// NewElection returns a new election on a given key prefix. +func NewElection(s *Session, pfx string) *Election { + return &Election{session: s, keyPrefix: pfx + "/"} +} + +// ResumeElection initializes an election with a known leader. +func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { + return &Election{ + keyPrefix: pfx, + session: s, + leaderKey: leaderKey, + leaderRev: leaderRev, + leaderSession: s, + } +} + +// Campaign puts a value as eligible for the election on the prefix +// key. +// Multiple sessions can participate in the election for the +// same prefix, but only one can be the leader at a time. +// +// If the context is 'context.TODO()/context.Background()', the Campaign +// will continue to be blocked for other keys to be deleted, unless server +// returns a non-recoverable error (e.g. ErrCompacted). +// Otherwise, until the context is not cancelled or timed-out, Campaign will +// continue to be blocked until it becomes the leader. +func (e *Election) Campaign(ctx context.Context, val string) error { + s := e.session + client := e.session.Client() + + k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) + txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) + txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) + txn = txn.Else(v3.OpGet(k)) + resp, err := txn.Commit() + if err != nil { + return err + } + e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s + if !resp.Succeeded { + kv := resp.Responses[0].GetResponseRange().Kvs[0] + e.leaderRev = kv.CreateRevision + if string(kv.Value) != val { + if err = e.Proclaim(ctx, val); err != nil { + e.Resign(ctx) + return err + } + } + } + + _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) + if err != nil { + // clean up in case of context cancel + select { + case <-ctx.Done(): + e.Resign(client.Ctx()) + default: + e.leaderSession = nil + } + return err + } + e.hdr = resp.Header + + return nil +} + +// Proclaim lets the leader announce a new value without another election. +func (e *Election) Proclaim(ctx context.Context, val string) error { + if e.leaderSession == nil { + return ErrElectionNotLeader + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + txn := client.Txn(ctx).If(cmp) + txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) + tresp, terr := txn.Commit() + if terr != nil { + return terr + } + if !tresp.Succeeded { + e.leaderKey = "" + return ErrElectionNotLeader + } + + e.hdr = tresp.Header + return nil +} + +// Resign lets a leader start a new election. +func (e *Election) Resign(ctx context.Context) (err error) { + if e.leaderSession == nil { + return nil + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() + if err == nil { + e.hdr = resp.Header + } + e.leaderKey = "" + e.leaderSession = nil + return err +} + +// Leader returns the leader value for the current election. +func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { + client := e.session.Client() + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return nil, err + } else if len(resp.Kvs) == 0 { + // no leader currently elected + return nil, ErrElectionNoLeader + } + return resp, nil +} + +// Observe returns a channel that reliably observes ordered leader proposals +// as GetResponse values on every current elected leader key. It will not +// necessarily fetch all historical leader updates, but will always post the +// most recent leader value. +// +// The channel closes when the context is canceled or the underlying watcher +// is otherwise disrupted. +func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { + retc := make(chan v3.GetResponse) + go e.observe(ctx, retc) + return retc +} + +func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { + client := e.session.Client() + + defer close(ch) + for { + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return + } + + var kv *mvccpb.KeyValue + var hdr *pb.ResponseHeader + + if len(resp.Kvs) == 0 { + cctx, cancel := context.WithCancel(ctx) + // wait for first key put on prefix + opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} + wch := client.Watch(cctx, e.keyPrefix, opts...) + for kv == nil { + wr, ok := <-wch + if !ok || wr.Err() != nil { + cancel() + return + } + // only accept puts; a delete will make observe() spin + for _, ev := range wr.Events { + if ev.Type == mvccpb.PUT { + hdr, kv = &wr.Header, ev.Kv + // may have multiple revs; hdr.rev = the last rev + // set to kv's rev in case batch has multiple Puts + hdr.Revision = kv.ModRevision + break + } + } + } + cancel() + } else { + hdr, kv = resp.Header, resp.Kvs[0] + } + + select { + case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: + case <-ctx.Done(): + return + } + + cctx, cancel := context.WithCancel(ctx) + wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) + keyDeleted := false + for !keyDeleted { + wr, ok := <-wch + if !ok { + cancel() + return + } + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + keyDeleted = true + break + } + resp.Header = &wr.Header + resp.Kvs = []*mvccpb.KeyValue{ev.Kv} + select { + case ch <- *resp: + case <-cctx.Done(): + cancel() + return + } + } + } + cancel() + } +} + +// Key returns the leader key if elected, empty string otherwise. +func (e *Election) Key() string { return e.leaderKey } + +// Rev returns the leader key's creation revision, if elected. +func (e *Election) Rev() int64 { return e.leaderRev } + +// Header is the response header from the last successful election proposal. +func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go new file mode 100644 index 00000000..e4cf7751 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go @@ -0,0 +1,65 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "fmt" + + v3 "go.etcd.io/etcd/clientv3" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + "go.etcd.io/etcd/mvcc/mvccpb" +) + +func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wr v3.WatchResponse + wch := client.Watch(cctx, key, v3.WithRev(rev)) + for wr = range wch { + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + return nil + } + } + } + if err := wr.Err(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + return fmt.Errorf("lost watcher waiting for delete") +} + +// waitDeletes efficiently waits until all keys matching the prefix and no greater +// than the create revision. +func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { + getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) + for { + resp, err := client.Get(ctx, pfx, getOpts...) + if err != nil { + return nil, err + } + if len(resp.Kvs) == 0 { + return resp.Header, nil + } + lastKey := string(resp.Kvs[0].Key) + if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go new file mode 100644 index 00000000..01353419 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go @@ -0,0 +1,117 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "fmt" + "sync" + + v3 "go.etcd.io/etcd/clientv3" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" +) + +// Mutex implements the sync Locker interface with etcd +type Mutex struct { + s *Session + + pfx string + myKey string + myRev int64 + hdr *pb.ResponseHeader +} + +func NewMutex(s *Session, pfx string) *Mutex { + return &Mutex{s, pfx + "/", "", -1, nil} +} + +// Lock locks the mutex with a cancelable context. If the context is canceled +// while trying to acquire the lock, the mutex tries to clean its stale lock entry. +func (m *Mutex) Lock(ctx context.Context) error { + s := m.s + client := m.s.Client() + + m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) + cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) + // put self in lock waiters via myKey; oldest waiter holds lock + put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) + // reuse key in case this session already holds the lock + get := v3.OpGet(m.myKey) + // fetch current holder to complete uncontended path with only one RPC + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() + if err != nil { + return err + } + m.myRev = resp.Header.Revision + if !resp.Succeeded { + m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision + } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + + // wait for deletion revisions prior to myKey + hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) + // release lock key if wait failed + if werr != nil { + m.Unlock(client.Ctx()) + } else { + m.hdr = hdr + } + return werr +} + +func (m *Mutex) Unlock(ctx context.Context) error { + client := m.s.Client() + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return nil +} + +func (m *Mutex) IsOwner() v3.Cmp { + return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) +} + +func (m *Mutex) Key() string { return m.myKey } + +// Header is the response header received from etcd on acquiring the lock. +func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } + +type lockerMutex struct{ *Mutex } + +func (lm *lockerMutex) Lock() { + client := lm.s.Client() + if err := lm.Mutex.Lock(client.Ctx()); err != nil { + panic(err) + } +} +func (lm *lockerMutex) Unlock() { + client := lm.s.Client() + if err := lm.Mutex.Unlock(client.Ctx()); err != nil { + panic(err) + } +} + +// NewLocker creates a sync.Locker backed by an etcd mutex. +func NewLocker(s *Session, pfx string) sync.Locker { + return &lockerMutex{NewMutex(s, pfx)} +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go new file mode 100644 index 00000000..97eb7631 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go @@ -0,0 +1,141 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "time" + + v3 "go.etcd.io/etcd/clientv3" +) + +const defaultSessionTTL = 60 + +// Session represents a lease kept alive for the lifetime of a client. +// Fault-tolerant applications may use sessions to reason about liveness. +type Session struct { + client *v3.Client + opts *sessionOptions + id v3.LeaseID + + cancel context.CancelFunc + donec <-chan struct{} +} + +// NewSession gets the leased session for a client. +func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { + ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} + for _, opt := range opts { + opt(ops) + } + + id := ops.leaseID + if id == v3.NoLease { + resp, err := client.Grant(ops.ctx, int64(ops.ttl)) + if err != nil { + return nil, err + } + id = resp.ID + } + + ctx, cancel := context.WithCancel(ops.ctx) + keepAlive, err := client.KeepAlive(ctx, id) + if err != nil || keepAlive == nil { + cancel() + return nil, err + } + + donec := make(chan struct{}) + s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} + + // keep the lease alive until client error or cancelled context + go func() { + defer close(donec) + for range keepAlive { + // eat messages until keep alive channel closes + } + }() + + return s, nil +} + +// Client is the etcd client that is attached to the session. +func (s *Session) Client() *v3.Client { + return s.client +} + +// Lease is the lease ID for keys bound to the session. +func (s *Session) Lease() v3.LeaseID { return s.id } + +// Done returns a channel that closes when the lease is orphaned, expires, or +// is otherwise no longer being refreshed. +func (s *Session) Done() <-chan struct{} { return s.donec } + +// Orphan ends the refresh for the session lease. This is useful +// in case the state of the client connection is indeterminate (revoke +// would fail) or when transferring lease ownership. +func (s *Session) Orphan() { + s.cancel() + <-s.donec +} + +// Close orphans the session and revokes the session lease. +func (s *Session) Close() error { + s.Orphan() + // if revoke takes longer than the ttl, lease is expired anyway + ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) + _, err := s.client.Revoke(ctx, s.id) + cancel() + return err +} + +type sessionOptions struct { + ttl int + leaseID v3.LeaseID + ctx context.Context +} + +// SessionOption configures Session. +type SessionOption func(*sessionOptions) + +// WithTTL configures the session's TTL in seconds. +// If TTL is <= 0, the default 60 seconds TTL will be used. +func WithTTL(ttl int) SessionOption { + return func(so *sessionOptions) { + if ttl > 0 { + so.ttl = ttl + } + } +} + +// WithLease specifies the existing leaseID to be used for the session. +// This is useful in process restart scenario, for example, to reclaim +// leadership from an election prior to restart. +func WithLease(leaseID v3.LeaseID) SessionOption { + return func(so *sessionOptions) { + so.leaseID = leaseID + } +} + +// WithContext assigns a context to the session instead of defaulting to +// using the client context. This is useful for canceling NewSession and +// Close operations immediately without having to close the client. If the +// context is canceled before Close() completes, the session's lease will be +// abandoned and left to expire instead of being revoked. +func WithContext(ctx context.Context) SessionOption { + return func(so *sessionOptions) { + so.ctx = ctx + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go new file mode 100644 index 00000000..ee115107 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go @@ -0,0 +1,387 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "context" + "math" + + v3 "go.etcd.io/etcd/clientv3" +) + +// STM is an interface for software transactional memory. +type STM interface { + // Get returns the value for a key and inserts the key in the txn's read set. + // If Get fails, it aborts the transaction with an error, never returning. + Get(key ...string) string + // Put adds a value for a key to the write set. + Put(key, val string, opts ...v3.OpOption) + // Rev returns the revision of a key in the read set. + Rev(key string) int64 + // Del deletes a key. + Del(key string) + + // commit attempts to apply the txn's changes to the server. + commit() *v3.TxnResponse + reset() +} + +// Isolation is an enumeration of transactional isolation levels which +// describes how transactions should interfere and conflict. +type Isolation int + +const ( + // SerializableSnapshot provides serializable isolation and also checks + // for write conflicts. + SerializableSnapshot Isolation = iota + // Serializable reads within the same transaction attempt return data + // from the at the revision of the first read. + Serializable + // RepeatableReads reads within the same transaction attempt always + // return the same data. + RepeatableReads + // ReadCommitted reads keys from any committed revision. + ReadCommitted +) + +// stmError safely passes STM errors through panic to the STM error channel. +type stmError struct{ err error } + +type stmOptions struct { + iso Isolation + ctx context.Context + prefetch []string +} + +type stmOption func(*stmOptions) + +// WithIsolation specifies the transaction isolation level. +func WithIsolation(lvl Isolation) stmOption { + return func(so *stmOptions) { so.iso = lvl } +} + +// WithAbortContext specifies the context for permanently aborting the transaction. +func WithAbortContext(ctx context.Context) stmOption { + return func(so *stmOptions) { so.ctx = ctx } +} + +// WithPrefetch is a hint to prefetch a list of keys before trying to apply. +// If an STM transaction will unconditionally fetch a set of keys, prefetching +// those keys will save the round-trip cost from requesting each key one by one +// with Get(). +func WithPrefetch(keys ...string) stmOption { + return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } +} + +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. +func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { + opts := &stmOptions{ctx: c.Ctx()} + for _, f := range so { + f(opts) + } + if len(opts.prefetch) != 0 { + f := apply + apply = func(s STM) error { + s.Get(opts.prefetch...) + return f(s) + } + } + return runSTM(mkSTM(c, opts), apply) +} + +func mkSTM(c *v3.Client, opts *stmOptions) STM { + switch opts.iso { + case SerializableSnapshot: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { + return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) + } + return s + case Serializable: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case RepeatableReads: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case ReadCommitted: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return nil } + return s + default: + panic("unsupported stm") + } +} + +type stmResponse struct { + resp *v3.TxnResponse + err error +} + +func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { + outc := make(chan stmResponse, 1) + go func() { + defer func() { + if r := recover(); r != nil { + e, ok := r.(stmError) + if !ok { + // client apply panicked + panic(r) + } + outc <- stmResponse{nil, e.err} + } + }() + var out stmResponse + for { + s.reset() + if out.err = apply(s); out.err != nil { + break + } + if out.resp = s.commit(); out.resp != nil { + break + } + } + outc <- out + }() + r := <-outc + return r.resp, r.err +} + +// stm implements repeatable-read software transactional memory over etcd +type stm struct { + client *v3.Client + ctx context.Context + // rset holds read key values and revisions + rset readSet + // wset holds overwritten keys and their values + wset writeSet + // getOpts are the opts used for gets + getOpts []v3.OpOption + // conflicts computes the current conflicts on the txn + conflicts func() []v3.Cmp +} + +type stmPut struct { + val string + op v3.Op +} + +type readSet map[string]*v3.GetResponse + +func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { + for i, resp := range txnresp.Responses { + rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) + } +} + +// first returns the store revision from the first fetch +func (rs readSet) first() int64 { + ret := int64(math.MaxInt64 - 1) + for _, resp := range rs { + if rev := resp.Header.Revision; rev < ret { + ret = rev + } + } + return ret +} + +// cmps guards the txn from updates to read set +func (rs readSet) cmps() []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(rs)) + for k, rk := range rs { + cmps = append(cmps, isKeyCurrent(k, rk)) + } + return cmps +} + +type writeSet map[string]stmPut + +func (ws writeSet) get(keys ...string) *stmPut { + for _, key := range keys { + if wv, ok := ws[key]; ok { + return &wv + } + } + return nil +} + +// cmps returns a cmp list testing no writes have happened past rev +func (ws writeSet) cmps(rev int64) []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(ws)) + for key := range ws { + cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) + } + return cmps +} + +// puts is the list of ops for all pending writes +func (ws writeSet) puts() []v3.Op { + puts := make([]v3.Op, 0, len(ws)) + for _, v := range ws { + puts = append(puts, v.op) + } + return puts +} + +func (s *stm) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + return respToValue(s.fetch(keys...)) +} + +func (s *stm) Put(key, val string, opts ...v3.OpOption) { + s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} +} + +func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } + +func (s *stm) Rev(key string) int64 { + if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { + return resp.Kvs[0].ModRevision + } + return 0 +} + +func (s *stm) commit() *v3.TxnResponse { + txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + return nil +} + +func (s *stm) fetch(keys ...string) *v3.GetResponse { + if len(keys) == 0 { + return nil + } + ops := make([]v3.Op, len(keys)) + for i, key := range keys { + if resp, ok := s.rset[key]; ok { + return resp + } + ops[i] = v3.OpGet(key, s.getOpts...) + } + txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() + if err != nil { + panic(stmError{err}) + } + s.rset.add(keys, txnresp) + return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) +} + +func (s *stm) reset() { + s.rset = make(map[string]*v3.GetResponse) + s.wset = make(map[string]stmPut) +} + +type stmSerializable struct { + stm + prefetch map[string]*v3.GetResponse +} + +func (s *stmSerializable) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + firstRead := len(s.rset) == 0 + for _, key := range keys { + if resp, ok := s.prefetch[key]; ok { + delete(s.prefetch, key) + s.rset[key] = resp + } + } + resp := s.stm.fetch(keys...) + if firstRead { + // txn's base revision is defined by the first read + s.getOpts = []v3.OpOption{ + v3.WithRev(resp.Header.Revision), + v3.WithSerializable(), + } + } + return respToValue(resp) +} + +func (s *stmSerializable) Rev(key string) int64 { + s.Get(key) + return s.stm.Rev(key) +} + +func (s *stmSerializable) gets() ([]string, []v3.Op) { + keys := make([]string, 0, len(s.rset)) + ops := make([]v3.Op, 0, len(s.rset)) + for k := range s.rset { + keys = append(keys, k) + ops = append(ops, v3.OpGet(k)) + } + return keys, ops +} + +func (s *stmSerializable) commit() *v3.TxnResponse { + keys, getops := s.gets() + txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) + // use Else to prefetch keys in case of conflict to save a round trip + txnresp, err := txn.Else(getops...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + // load prefetch with Else data + s.rset.add(keys, txnresp) + s.prefetch = s.rset + s.getOpts = nil + return nil +} + +func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { + if len(r.Kvs) != 0 { + return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) + } + return v3.Compare(v3.ModRevision(k), "=", 0) +} + +func respToValue(resp *v3.GetResponse) string { + if resp == nil || len(resp.Kvs) == 0 { + return "" + } + return string(resp.Kvs[0].Value) +} + +// NewSTMRepeatable is deprecated. +func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) +} + +// NewSTMSerializable is deprecated. +func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) +} + +// NewSTMReadCommitted is deprecated. +func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go new file mode 100644 index 00000000..bd037688 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -0,0 +1,84 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "crypto/tls" + "time" + + "go.uber.org/zap" + "google.golang.org/grpc" +) + +type Config struct { + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` + + // AutoSyncInterval is the interval to update endpoints with its latest members. + // 0 disables auto-sync. By default auto-sync is disabled. + AutoSyncInterval time.Duration `json:"auto-sync-interval"` + + // DialTimeout is the timeout for failing to establish a connection. + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // MaxCallSendMsgSize is the client-side request send limit in bytes. + // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). + // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallSendMsgSize int + + // MaxCallRecvMsgSize is the client-side response receive limit. + // If 0, it defaults to "math.MaxInt32", because range response can + // easily exceed request send limits. + // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallRecvMsgSize int + + // TLS holds the client secure credentials, if any. + TLS *tls.Config + + // Username is a user name for authentication. + Username string `json:"username"` + + // Password is a password for authentication. + Password string `json:"password"` + + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + DialOptions []grpc.DialOption + + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context + + // LogConfig configures client-side logger. + // If nil, use the default logger. + // TODO: configure gRPC logger + LogConfig *zap.Config + + // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs). + PermitWithoutStream bool `json:"permit-without-stream"` +} diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go new file mode 100644 index 00000000..01a3f596 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -0,0 +1,106 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package clientv3 implements the official Go etcd client for v3. +// +// Create client using `clientv3.New`: +// +// // expect dial time-out on ipv4 blackhole +// _, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"http://254.0.0.1:12345"}, +// DialTimeout: 2 * time.Second, +// }) +// +// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3 +// if err == context.DeadlineExceeded { +// // handle errors +// } +// +// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1 +// if err == grpc.ErrClientConnTimeout { +// // handle errors +// } +// +// cli, err := clientv3.New(clientv3.Config{ +// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"}, +// DialTimeout: 5 * time.Second, +// }) +// if err != nil { +// // handle error! +// } +// defer cli.Close() +// +// Make sure to close the client after using it. If the client is not closed, the +// connection will have leaky goroutines. +// +// To specify a client request timeout, wrap the context with context.WithTimeout: +// +// ctx, cancel := context.WithTimeout(context.Background(), timeout) +// resp, err := kvc.Put(ctx, "sample_key", "sample_value") +// cancel() +// if err != nil { +// // handle error! +// } +// // use the response +// +// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed. +// Clients are safe for concurrent use by multiple goroutines. +// +// etcd client returns 3 types of errors: +// +// 1. context error: canceled or deadline exceeded. +// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded. +// 3. gRPC error: see https://github.com/etcd-io/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go +// +// Here is the example code to handle client errors: +// +// resp, err := kvc.Put(ctx, "", "") +// if err != nil { +// if err == context.Canceled { +// // ctx is canceled by another routine +// } else if err == context.DeadlineExceeded { +// // ctx is attached with a deadline and it exceeded +// } else if err == rpctypes.ErrEmptyKey { +// // client-side error: key is not provided +// } else if ev, ok := status.FromError(err); ok { +// code := ev.Code() +// if code == codes.DeadlineExceeded { +// // server-side context might have timed-out first (due to clock skew) +// // while original client-side context is not timed-out yet +// } +// } else { +// // bad cluster endpoints, which are not etcd servers +// } +// } +// +// go func() { cli.Close() }() +// _, err := kvc.Get(ctx, "a") +// if err != nil { +// // with etcd clientv3 <= v3.3 +// if err == context.Canceled { +// // grpc balancer calls 'Get' with an inflight client.Close +// } else if err == grpc.ErrClientConnClosing { +// // grpc balancer calls 'Get' after client.Close. +// } +// // with etcd clientv3 >= v3.4 +// if clientv3.IsConnCanceled(err) { +// // gRPC client connection is closed +// } +// } +// +// The grpc load balancer is registered statically and is shared across etcd clients. +// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment +// variable. E.g. "ETCD_CLIENT_DEBUG=1". +// +package clientv3 diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go new file mode 100644 index 00000000..2b7864ad --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -0,0 +1,177 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" +) + +type ( + CompactResponse pb.CompactionResponse + PutResponse pb.PutResponse + GetResponse pb.RangeResponse + DeleteResponse pb.DeleteRangeResponse + TxnResponse pb.TxnResponse +) + +type KV interface { + // Put puts a key-value pair into etcd. + // Note that key,value can be plain bytes array and string is + // an immutable representation of that bytes array. + // To get a string of bytes, do string([]byte{0x10, 0x20}). + Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) + + // Get retrieves keys. + // By default, Get will return the value for "key", if any. + // When passed WithRange(end), Get will return the keys in the range [key, end). + // When passed WithFromKey(), Get returns keys greater than or equal to key. + // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision; + // if the required revision is compacted, the request will fail with ErrCompacted . + // When passed WithLimit(limit), the number of returned keys is bounded by limit. + // When passed WithSort(), the keys will be sorted. + Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) + + // Delete deletes a key, or optionally using WithRange(end), [key, end). + Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) + + // Compact compacts etcd KV history before the given rev. + Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) + + // Do applies a single Op on KV without a transaction. + // Do is useful when creating arbitrary operations to be issued at a + // later time; the user can range over the operations, calling Do to + // execute them. Get/Put/Delete, on the other hand, are best suited + // for when the operation should be issued at the time of declaration. + Do(ctx context.Context, op Op) (OpResponse, error) + + // Txn creates a transaction. + Txn(ctx context.Context) Txn +} + +type OpResponse struct { + put *PutResponse + get *GetResponse + del *DeleteResponse + txn *TxnResponse +} + +func (op OpResponse) Put() *PutResponse { return op.put } +func (op OpResponse) Get() *GetResponse { return op.get } +func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} + +type kv struct { + remote pb.KVClient + callOpts []grpc.CallOption +} + +func NewKV(c *Client) KV { + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { + r, err := kv.Do(ctx, OpPut(key, val, opts...)) + return r.put, toErr(ctx, err) +} + +func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) { + r, err := kv.Do(ctx, OpGet(key, opts...)) + return r.get, toErr(ctx, err) +} + +func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) { + r, err := kv.Do(ctx, OpDelete(key, opts...)) + return r.del, toErr(ctx, err) +} + +func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*CompactResponse)(resp), err +} + +func (kv *kv) Txn(ctx context.Context) Txn { + return &txn{ + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, + } +} + +func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { + var err error + switch op.t { + case tRange: + var resp *pb.RangeResponse + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) + if err == nil { + return OpResponse{get: (*GetResponse)(resp)}, nil + } + case tPut: + var resp *pb.PutResponse + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{put: (*PutResponse)(resp)}, nil + } + case tDeleteRange: + var resp *pb.DeleteRangeResponse + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) + if err == nil { + return OpResponse{del: (*DeleteResponse)(resp)}, nil + } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } + default: + panic("Unknown op") + } + return OpResponse{}, toErr(ctx, err) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go new file mode 100644 index 00000000..c2796fc9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -0,0 +1,596 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + "time" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type ( + LeaseRevokeResponse pb.LeaseRevokeResponse + LeaseID int64 +) + +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. +type LeaseGrantResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 + Error string +} + +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. +type LeaseKeepAliveResponse struct { + *pb.ResponseHeader + ID LeaseID + TTL int64 +} + +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. +type LeaseTimeToLiveResponse struct { + *pb.ResponseHeader + ID LeaseID `json:"id"` + + // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1. + TTL int64 `json:"ttl"` + + // GrantedTTL is the initial granted time in seconds upon lease creation/renewal. + GrantedTTL int64 `json:"granted-ttl"` + + // Keys is the list of keys attached to this lease. + Keys [][]byte `json:"keys"` +} + +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + +// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse. +type LeaseLeasesResponse struct { + *pb.ResponseHeader + Leases []LeaseStatus `json:"leases"` +} + +const ( + // defaultTTL is the assumed lease TTL used for the first keepalive + // deadline before the actual TTL is known to the client. + defaultTTL = 5 * time.Second + // NoLease is a lease ID for the absence of a lease. + NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond +) + +// LeaseResponseChSize is the size of buffer to store unsent lease responses. +// WARNING: DO NOT UPDATE. +// Only for testing purposes. +var LeaseResponseChSize = 16 + +// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. +// +// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected. +type ErrKeepAliveHalted struct { + Reason error +} + +func (e ErrKeepAliveHalted) Error() string { + s := "etcdclient: leases keep alive halted" + if e.Reason != nil { + s += ": " + e.Reason.Error() + } + return s +} + +type Lease interface { + // Grant creates a new lease. + Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) + + // Revoke revokes the given lease. + Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) + + // TimeToLive retrieves the lease information of the given lease ID. + TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) + + // Leases retrieves all leases. + Leases(ctx context.Context) (*LeaseLeasesResponse, error) + + // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted + // to the channel are not consumed promptly the channel may become full. When full, the lease + // client will continue sending keep alive requests to the etcd server, but will drop responses + // until there is capacity on the channel to send more responses. + // + // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or + // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error + // containing the error reason. + // + // The returned "LeaseKeepAliveResponse" channel closes if underlying keep + // alive stream is interrupted in some way the client cannot handle itself; + // given context "ctx" is canceled or timed out. + // + // TODO(v4.0): post errors to last keep alive message before closing + // (see https://github.com/etcd-io/etcd/pull/7866) + KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) + + // KeepAliveOnce renews the lease once. The response corresponds to the + // first message from calling KeepAlive. If the response has a recoverable + // error, KeepAliveOnce will retry the RPC with a new keep alive message. + // + // In most of the cases, Keepalive should be used instead of KeepAliveOnce. + KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) + + // Close releases all resources Lease keeps for efficient communication + // with the etcd server. + Close() error +} + +type lessor struct { + mu sync.Mutex // guards all fields + + // donec is closed and loopErr is set when recvKeepAliveLoop stops + donec chan struct{} + loopErr error + + remote pb.LeaseClient + + stream pb.Lease_LeaseKeepAliveClient + streamCancel context.CancelFunc + + stopCtx context.Context + stopCancel context.CancelFunc + + keepAlives map[LeaseID]*keepAlive + + // firstKeepAliveTimeout is the timeout for the first keepalive request + // before the actual TTL is known to the lease client + firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption + + lg *zap.Logger +} + +// keepAlive multiplexes a keepalive for a lease over multiple channels +type keepAlive struct { + chs []chan<- *LeaseKeepAliveResponse + ctxs []context.Context + // deadline is the time the keep alive channels close if no response + deadline time.Time + // nextKeepAlive is when to send the next keep alive message + nextKeepAlive time.Time + // donec is closed on lease revoke, expiration, or cancel. + donec chan struct{} +} + +func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { + l := &lessor{ + donec: make(chan struct{}), + keepAlives: make(map[LeaseID]*keepAlive), + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, + lg: c.lg, + } + if l.firstKeepAliveTimeout == time.Second { + l.firstKeepAliveTimeout = defaultTTL + } + if c != nil { + l.callOpts = c.callOpts + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) + return l +} + +func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, + } + return gresp, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) { + resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...) + if err == nil { + leases := make([]LeaseStatus, len(resp.Leases)) + for i := range resp.Leases { + leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)} + } + return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil + } + return nil, toErr(ctx, err) +} + +func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { + ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize) + + l.mu.Lock() + // ensure that recvKeepAliveLoop is still running + select { + case <-l.donec: + err := l.loopErr + l.mu.Unlock() + close(ch) + return ch, ErrKeepAliveHalted{Reason: err} + default: + } + ka, ok := l.keepAlives[id] + if !ok { + // create fresh keep alive + ka = &keepAlive{ + chs: []chan<- *LeaseKeepAliveResponse{ch}, + ctxs: []context.Context{ctx}, + deadline: time.Now().Add(l.firstKeepAliveTimeout), + nextKeepAlive: time.Now(), + donec: make(chan struct{}), + } + l.keepAlives[id] = ka + } else { + // add channel and context to existing keep alive + ka.ctxs = append(ka.ctxs, ctx) + ka.chs = append(ka.chs, ch) + } + l.mu.Unlock() + + go l.keepAliveCtxCloser(ctx, id, ka.donec) + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) + + return ch, nil +} + +func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + for { + resp, err := l.keepAliveOnce(ctx, id) + if err == nil { + if resp.TTL <= 0 { + err = rpctypes.ErrLeaseNotFound + } + return resp, err + } + if isHaltErr(ctx, err) { + return nil, toErr(ctx, err) + } + } +} + +func (l *lessor) Close() error { + l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) + <-l.donec + return nil +} + +func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) { + select { + case <-donec: + return + case <-l.donec: + return + case <-ctx.Done(): + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[id] + if !ok { + return + } + + // close channel and remove context if still associated with keep alive + for i, c := range ka.ctxs { + if c == ctx { + close(ka.chs[i]) + ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...) + ka.chs = append(ka.chs[:i], ka.chs[i+1:]...) + break + } + } + // remove if no one more listeners + if len(ka.chs) == 0 { + delete(l.keepAlives, id) + } +} + +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + +func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + + err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)}) + if err != nil { + return nil, toErr(ctx, err) + } + + resp, rerr := stream.Recv() + if rerr != nil { + return nil, toErr(ctx, rerr) + } + + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + return karesp, nil +} + +func (l *lessor) recvKeepAliveLoop() (gerr error) { + defer func() { + l.mu.Lock() + close(l.donec) + l.loopErr = gerr + for _, ka := range l.keepAlives { + ka.close() + } + l.keepAlives = make(map[LeaseID]*keepAlive) + l.mu.Unlock() + }() + + for { + stream, err := l.resetRecv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): + case <-l.stopCtx.Done(): + return l.stopCtx.Err() + } + } +} + +// resetRecv opens a new lease stream and starts sending keep alive requests. +func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { + sctx, cancel := context.WithCancel(l.stopCtx) + stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...) + if err != nil { + cancel() + return nil, err + } + + l.mu.Lock() + defer l.mu.Unlock() + if l.stream != nil && l.streamCancel != nil { + l.streamCancel() + } + + l.streamCancel = cancel + l.stream = stream + + go l.sendKeepAliveLoop(stream) + return stream, nil +} + +// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse +func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { + karesp := &LeaseKeepAliveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + } + + l.mu.Lock() + defer l.mu.Unlock() + + ka, ok := l.keepAlives[karesp.ID] + if !ok { + return + } + + if karesp.TTL <= 0 { + // lease expired; close all keep alive channels + delete(l.keepAlives, karesp.ID) + ka.close() + return + } + + // send update to all channels + nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0) + ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second) + for _, ch := range ka.chs { + select { + case ch <- karesp: + default: + if l.lg != nil { + l.lg.Warn("lease keepalive response queue is full; dropping response send", + zap.Int("queue-size", len(ch)), + zap.Int("queue-capacity", cap(ch)), + ) + } + } + // still advance in order to rate-limit keep-alive sends + ka.nextKeepAlive = nextKeepAlive + } +} + +// deadlineLoop reaps any keep alive channels that have not received a response +// within the lease TTL +func (l *lessor) deadlineLoop() { + for { + select { + case <-time.After(time.Second): + case <-l.donec: + return + } + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.deadline.Before(now) { + // waited too long for response; lease may be expired + ka.close() + delete(l.keepAlives, id) + } + } + l.mu.Unlock() + } +} + +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. +func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { + for { + var tosend []LeaseID + + now := time.Now() + l.mu.Lock() + for id, ka := range l.keepAlives { + if ka.nextKeepAlive.Before(now) { + tosend = append(tosend, id) + } + } + l.mu.Unlock() + + for _, id := range tosend { + r := &pb.LeaseKeepAliveRequest{ID: int64(id)} + if err := stream.Send(r); err != nil { + // TODO do something with this error? + return + } + } + + select { + case <-time.After(retryConnWait): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } + } +} + +func (ka *keepAlive) close() { + close(ka.donec) + for _, ch := range ka.chs { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go new file mode 100644 index 00000000..f5ae0109 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -0,0 +1,101 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "io/ioutil" + "sync" + + "go.etcd.io/etcd/pkg/logutil" + + "google.golang.org/grpc/grpclog" +) + +var ( + lgMu sync.RWMutex + lg logutil.Logger +) + +type settableLogger struct { + l grpclog.LoggerV2 + mu sync.RWMutex +} + +func init() { + // disable client side logs by default + lg = &settableLogger{} + SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard)) +} + +// SetLogger sets client-side Logger. +func SetLogger(l grpclog.LoggerV2) { + lgMu.Lock() + lg = logutil.NewLogger(l) + // override grpclog so that any changes happen with locking + grpclog.SetLoggerV2(lg) + lgMu.Unlock() +} + +// GetLogger returns the current logutil.Logger. +func GetLogger() logutil.Logger { + lgMu.RLock() + l := lg + lgMu.RUnlock() + return l +} + +// NewLogger returns a new Logger with logutil.Logger. +func NewLogger(gl grpclog.LoggerV2) logutil.Logger { + return &settableLogger{l: gl} +} + +func (s *settableLogger) get() grpclog.LoggerV2 { + s.mu.RLock() + l := s.l + s.mu.RUnlock() + return l +} + +// implement the grpclog.LoggerV2 interface + +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } +func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } +func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } +func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } +func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 { + s.mu.RLock() + l := s.l + s.mu.RUnlock() + if l.V(lvl) { + return s + } + return logutil.NewDiscardLogger() +} diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go new file mode 100644 index 00000000..744455a3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -0,0 +1,230 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "fmt" + "io" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" +) + +type ( + DefragmentResponse pb.DefragmentResponse + AlarmResponse pb.AlarmResponse + AlarmMember pb.AlarmMember + StatusResponse pb.StatusResponse + HashKVResponse pb.HashKVResponse + MoveLeaderResponse pb.MoveLeaderResponse +) + +type Maintenance interface { + // AlarmList gets all active alarms. + AlarmList(ctx context.Context) (*AlarmResponse, error) + + // AlarmDisarm disarms a given alarm. + AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) + + // Defragment releases wasted space from internal fragmentation on a given etcd member. + // Defragment is only needed when deleting a large number of keys and want to reclaim + // the resources. + // Defragment is an expensive operation. User should avoid defragmenting multiple members + // at the same time. + // To defragment multiple members in the cluster, user need to call defragment multiple + // times with different endpoints. + Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) + + // Status gets the status of the endpoint. + Status(ctx context.Context, endpoint string) (*StatusResponse, error) + + // HashKV returns a hash of the KV state at the time of the RPC. + // If revision is zero, the hash is computed on all keys. If the revision + // is non-zero, the hash is computed on all keys at or below the given revision. + HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) + + // Snapshot provides a reader for a point-in-time snapshot of etcd. + // If the context "ctx" is canceled or timed out, reading from returned + // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded). + Snapshot(ctx context.Context) (io.ReadCloser, error) + + // MoveLeader requests current leader to transfer its leadership to the transferee. + // Request must be made to the leader. + MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) +} + +type maintenance struct { + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption +} + +func NewMaintenance(c *Client) Maintenance { + api := &maintenance{ + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.Dial(endpoint) + if err != nil { + return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err) + } + cancel := func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_GET, + MemberID: 0, // all + Alarm: pb.AlarmType_NONE, // all + } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { + req := &pb.AlarmRequest{ + Action: pb.AlarmRequest_DEACTIVATE, + MemberID: am.MemberID, + Alarm: am.Alarm, + } + + if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE { + ar, err := m.AlarmList(ctx) + if err != nil { + return nil, toErr(ctx, err) + } + ret := AlarmResponse{} + for _, am := range ar.Alarms { + dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am)) + if derr != nil { + return nil, toErr(ctx, derr) + } + ret.Alarms = append(ret.Alarms, dresp.Alarms...) + } + return &ret, nil + } + + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil + } + return nil, toErr(ctx, err) +} + +func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*DefragmentResponse)(resp), nil +} + +func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*StatusResponse)(resp), nil +} + +func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) { + remote, cancel, err := m.dial(endpoint) + if err != nil { + + return nil, toErr(ctx, err) + } + defer cancel() + resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...) + if err != nil { + return nil, toErr(ctx, err) + } + return (*HashKVResponse)(resp), nil +} + +func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...) + if err != nil { + return nil, toErr(ctx, err) + } + + pr, pw := io.Pipe() + go func() { + for { + resp, err := ss.Recv() + if err != nil { + pw.CloseWithError(err) + return + } + if resp == nil && err == nil { + break + } + if _, werr := pw.Write(resp.Blob); werr != nil { + pw.CloseWithError(werr) + return + } + } + pw.Close() + }() + return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil +} + +type snapshotReadCloser struct { + ctx context.Context + io.ReadCloser +} + +func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) { + n, err = rc.ReadCloser.Read(p) + return n, toErr(rc.ctx, err) +} + +func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) { + resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...) + return (*MoveLeaderResponse)(resp), toErr(ctx, err) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go new file mode 100644 index 00000000..81ae31fd --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -0,0 +1,560 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + +type opType int + +const ( + // A default Op has opType 0, which is invalid. + tRange opType = iota + 1 + tPut + tDeleteRange + tTxn +) + +var noPrefixEnd = []byte{0} + +// Op represents an Operation that kv can execute. +type Op struct { + t opType + key []byte + end []byte + + // for range + limit int64 + sort *SortOption + serializable bool + keysOnly bool + countOnly bool + minModRev int64 + maxModRev int64 + minCreateRev int64 + maxCreateRev int64 + + // for range, watch + rev int64 + + // for watch, put, delete + prevKV bool + + // for watch + // fragmentation should be disabled by default + // if true, split watch events when total exceeds + // "--max-request-bytes" flag value + 512-byte + fragment bool + + // for put + ignoreValue bool + ignoreLease bool + + // progressNotify is for progress updates. + progressNotify bool + // createdNotify is for created event + createdNotify bool + // filters for watchers + filterPut bool + filterDelete bool + + // for put + val []byte + leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op +} + +// accessors / mutators + +// IsTxn returns true if the "Op" type is transaction. +func (op Op) IsTxn() bool { + return op.t == tTxn +} + +// Txn returns the comparison(if) operations, "then" operations, and "else" operations. +func (op Op) Txn() ([]Cmp, []Op, []Op) { + return op.cmps, op.thenOps, op.elseOps +} + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + +func (op Op) toRangeRequest() *pb.RangeRequest { + if op.t != tRange { + panic("op.t != tRange") + } + r := &pb.RangeRequest{ + Key: op.key, + RangeEnd: op.end, + Limit: op.limit, + Revision: op.rev, + Serializable: op.serializable, + KeysOnly: op.keysOnly, + CountOnly: op.countOnly, + MinModRevision: op.minModRev, + MaxModRevision: op.maxModRev, + MinCreateRevision: op.minCreateRev, + MaxCreateRevision: op.maxCreateRev, + } + if op.sort != nil { + r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order) + r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target) + } + return r +} + +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + +func (op Op) toRequestOp() *pb.RequestOp { + switch op.t { + case tRange: + return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} + case tPut: + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} + case tDeleteRange: + r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} + return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}} + case tTxn: + return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}} + default: + panic("Unknown Op") + } +} + +func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } + return op.t != tRange +} + +// OpGet returns "get" operation based on given key and operation options. +func OpGet(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + return ret +} + +// OpDelete returns "delete" operation based on given key and operation options. +func OpDelete(key string, opts ...OpOption) Op { + // WithPrefix and WithFromKey are not supported together + if isWithPrefix(opts) && isWithFromKey(opts) { + panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one") + } + ret := Op{t: tDeleteRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in delete") + case ret.limit != 0: + panic("unexpected limit in delete") + case ret.rev != 0: + panic("unexpected revision in delete") + case ret.sort != nil: + panic("unexpected sort in delete") + case ret.serializable: + panic("unexpected serializable in delete") + case ret.countOnly: + panic("unexpected countOnly in delete") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in delete") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in delete") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in delete") + case ret.createdNotify: + panic("unexpected createdNotify in delete") + } + return ret +} + +// OpPut returns "put" operation based on given key-value and operation options. +func OpPut(key, val string, opts ...OpOption) Op { + ret := Op{t: tPut, key: []byte(key), val: []byte(val)} + ret.applyOpts(opts) + switch { + case ret.end != nil: + panic("unexpected range in put") + case ret.limit != 0: + panic("unexpected limit in put") + case ret.rev != 0: + panic("unexpected revision in put") + case ret.sort != nil: + panic("unexpected sort in put") + case ret.serializable: + panic("unexpected serializable in put") + case ret.countOnly: + panic("unexpected countOnly in put") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in put") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in put") + case ret.filterDelete, ret.filterPut: + panic("unexpected filter in put") + case ret.createdNotify: + panic("unexpected createdNotify in put") + } + return ret +} + +// OpTxn returns "txn" operation based on given transaction conditions. +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + +func opWatch(key string, opts ...OpOption) Op { + ret := Op{t: tRange, key: []byte(key)} + ret.applyOpts(opts) + switch { + case ret.leaseID != 0: + panic("unexpected lease in watch") + case ret.limit != 0: + panic("unexpected limit in watch") + case ret.sort != nil: + panic("unexpected sort in watch") + case ret.serializable: + panic("unexpected serializable in watch") + case ret.countOnly: + panic("unexpected countOnly in watch") + case ret.minModRev != 0, ret.maxModRev != 0: + panic("unexpected mod revision filter in watch") + case ret.minCreateRev != 0, ret.maxCreateRev != 0: + panic("unexpected create revision filter in watch") + } + return ret +} + +func (op *Op) applyOpts(opts []OpOption) { + for _, opt := range opts { + opt(op) + } +} + +// OpOption configures Operations like Get, Put, Delete. +type OpOption func(*Op) + +// WithLease attaches a lease ID to a key in 'Put' request. +func WithLease(leaseID LeaseID) OpOption { + return func(op *Op) { op.leaseID = leaseID } +} + +// WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. +func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } + +// WithRev specifies the store revision for 'Get' request. +// Or the start revision of 'Watch' request. +func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } } + +// WithSort specifies the ordering in 'Get' request. It requires +// 'WithRange' and/or 'WithPrefix' to be specified too. +// 'target' specifies the target to sort by: key, version, revisions, value. +// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'. +func WithSort(target SortTarget, order SortOrder) OpOption { + return func(op *Op) { + if target == SortByKey && order == SortAscend { + // If order != SortNone, server fetches the entire key-space, + // and then applies the sort and limit, if provided. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. + order = SortNone + } + op.sort = &SortOption{target, order} + } +} + +// GetPrefixRangeEnd gets the range end of the prefix. +// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'. +func GetPrefixRangeEnd(prefix string) string { + return string(getPrefix([]byte(prefix))) +} + +func getPrefix(key []byte) []byte { + end := make([]byte, len(key)) + copy(end, key) + for i := len(end) - 1; i >= 0; i-- { + if end[i] < 0xff { + end[i] = end[i] + 1 + end = end[:i+1] + return end + } + } + // next prefix does not exist (e.g., 0xffff); + // default to WithFromKey policy + return noPrefixEnd +} + +// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate +// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())' +// can return 'foo1', 'foo2', and so on. +func WithPrefix() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } + op.end = getPrefix(op.key) + } +} + +// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests. +// For example, 'Get' requests with 'WithRange(end)' returns +// the keys in the range [key, end). +// endKey must be lexicographically greater than start key. +func WithRange(endKey string) OpOption { + return func(op *Op) { op.end = []byte(endKey) } +} + +// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests +// to be equal or greater than the key in the argument. +func WithFromKey() OpOption { + return func(op *Op) { + if len(op.key) == 0 { + op.key = []byte{0} + } + op.end = []byte("\x00") + } +} + +// WithSerializable makes 'Get' request serializable. By default, +// it's linearizable. Serializable requests are better for lower latency +// requirement. +func WithSerializable() OpOption { + return func(op *Op) { op.serializable = true } +} + +// WithKeysOnly makes the 'Get' request return only the keys and the corresponding +// values will be omitted. +func WithKeysOnly() OpOption { + return func(op *Op) { op.keysOnly = true } +} + +// WithCountOnly makes the 'Get' request return only the count of keys. +func WithCountOnly() OpOption { + return func(op *Op) { op.countOnly = true } +} + +// WithMinModRev filters out keys for Get with modification revisions less than the given revision. +func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } } + +// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision. +func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } } + +// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision. +func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } } + +// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision. +func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } } + +// WithFirstCreate gets the key with the oldest creation revision in the request range. +func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) } + +// WithLastCreate gets the key with the latest creation revision in the request range. +func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) } + +// WithFirstKey gets the lexically first key in the request range. +func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) } + +// WithLastKey gets the lexically last key in the request range. +func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) } + +// WithFirstRev gets the key with the oldest modification revision in the request range. +func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) } + +// WithLastRev gets the key with the latest modification revision in the request range. +func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) } + +// withTop gets the first key over the get's prefix given a sort order +func withTop(target SortTarget, order SortOrder) []OpOption { + return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)} +} + +// WithProgressNotify makes watch server send periodic progress updates +// every 10 minutes when there is no incoming events. +// Progress updates have zero events in WatchResponse. +func WithProgressNotify() OpOption { + return func(op *Op) { + op.progressNotify = true + } +} + +// WithCreatedNotify makes watch server sends the created event. +func WithCreatedNotify() OpOption { + return func(op *Op) { + op.createdNotify = true + } +} + +// WithFilterPut discards PUT events from the watcher. +func WithFilterPut() OpOption { + return func(op *Op) { op.filterPut = true } +} + +// WithFilterDelete discards DELETE events from the watcher. +func WithFilterDelete() OpOption { + return func(op *Op) { op.filterDelete = true } +} + +// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted, +// nothing will be returned. +func WithPrevKV() OpOption { + return func(op *Op) { + op.prevKV = true + } +} + +// WithFragment to receive raw watch response with fragmentation. +// Fragmentation is disabled by default. If fragmentation is enabled, +// etcd watch server will split watch response before sending to clients +// when the total size of watch events exceed server-side request limit. +// The default server-side request limit is 1.5 MiB, which can be configured +// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes. +// See "etcdserver/api/v3rpc/watch.go" for more details. +func WithFragment() OpOption { + return func(op *Op) { op.fragment = true } +} + +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + +// LeaseOp represents an Operation that lease can execute. +type LeaseOp struct { + id LeaseID + + // for TimeToLive + attachedKeys bool +} + +// LeaseOption configures lease operations. +type LeaseOption func(*LeaseOp) + +func (op *LeaseOp) applyOpts(opts []LeaseOption) { + for _, opt := range opts { + opt(op) + } +} + +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. +func WithAttachedKeys() LeaseOption { + return func(op *LeaseOp) { op.attachedKeys = true } +} + +func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest { + ret := &LeaseOp{id: id} + ret.applyOpts(opts) + return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys} +} + +// isWithPrefix returns true if WithPrefix is being called in the op +func isWithPrefix(opts []OpOption) bool { return isOpFuncCalled("WithPrefix", opts) } + +// isWithFromKey returns true if WithFromKey is being called in the op +func isWithFromKey(opts []OpOption) bool { return isOpFuncCalled("WithFromKey", opts) } diff --git a/vendor/github.com/coreos/etcd/clientv3/options.go b/vendor/github.com/coreos/etcd/clientv3/options.go new file mode 100644 index 00000000..700714c0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/options.go @@ -0,0 +1,65 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + "time" + + "google.golang.org/grpc" +) + +var ( + // client-side handling retrying of request failures where data was not written to the wire or + // where server indicates it did not process the data. gRPC default is default is "FailFast(true)" + // but for etcd we default to "FailFast(false)" to minimize client request error responses due to + // transient failures. + defaultFailFast = grpc.FailFast(false) + + // client-side request send limit, gRPC default is math.MaxInt32 + // Make sure that "client-side send limit < server-side default send/recv limit" + // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + + // client-side response receive limit, gRPC default is 4MB + // Make sure that "client-side receive limit >= server-side default send/recv limit" + // because range response can easily exceed request send limits + // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) + + // client-side non-streaming retry limit, only applied to requests where server responds with + // a error code clearly indicating it was unable to process the request such as codes.Unavailable. + // If set to 0, retry is disabled. + defaultUnaryMaxRetries uint = 100 + + // client-side streaming retry limit, only applied to requests where server responds with + // a error code clearly indicating it was unable to process the request such as codes.Unavailable. + // If set to 0, retry is disabled. + defaultStreamMaxRetries = ^uint(0) // max uint + + // client-side retry backoff wait between requests. + defaultBackoffWaitBetween = 25 * time.Millisecond + + // client-side retry backoff default jitter fraction. + defaultBackoffJitterFraction = 0.10 +) + +// defaultCallOpts defines a list of default "gRPC.CallOption". +// Some options are exposed to "clientv3.Config". +// Defaults will be overridden by the settings in "clientv3.Config". +var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} + +// MaxLeaseTTL is the maximum lease TTL value +const MaxLeaseTTL = 9000000000 diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go new file mode 100644 index 00000000..7e855de0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -0,0 +1,298 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type retryPolicy uint8 + +const ( + repeatable retryPolicy = iota + nonRepeatable +) + +func (rp retryPolicy) String() string { + switch rp { + case repeatable: + return "repeatable" + case nonRepeatable: + return "nonRepeatable" + default: + return "UNKNOWN" + } +} + +// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry. +// +// immutable requests (e.g. Get) should be retried unless it's +// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge). +// +// Returning "false" means retry should stop, since client cannot +// handle itself even with retries. +func isSafeRetryImmutableRPC(err error) bool { + eErr := rpctypes.Error(err) + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + // interrupted by non-transient server-side or gRPC-side error + // client cannot handle itself (e.g. rpctypes.ErrCompacted) + return false + } + // only retry if unavailable + ev, ok := status.FromError(err) + if !ok { + // all errors from RPC is typed "grpc/status.(*statusError)" + // (ref. https://github.com/grpc/grpc-go/pull/1782) + // + // if the error type is not "grpc/status.(*statusError)", + // it could be from "Dial" + // TODO: do not retry for now + // ref. https://github.com/grpc/grpc-go/issues/1581 + return false + } + return ev.Code() == codes.Unavailable +} + +// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry. +// +// mutable requests (e.g. Put, Delete, Txn) should only be retried +// when the status code is codes.Unavailable when initial connection +// has not been established (no endpoint is up). +// +// Returning "false" means retry should stop, otherwise it violates +// write-at-most-once semantics. +func isSafeRetryMutableRPC(err error) bool { + if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable { + // not safe for mutable RPCs + // e.g. interrupted by non-transient error that client cannot handle itself, + // or transient error while the connection has already been established + return false + } + desc := rpctypes.ErrorDesc(err) + return desc == "there is no address available" || desc == "there is no connection available" +} + +type retryKVClient struct { + kc pb.KVClient +} + +// RetryKVClient implements a KVClient. +func RetryKVClient(c *Client) pb.KVClient { + return &retryKVClient{ + kc: pb.NewKVClient(c.conn), + } +} +func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { + return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + return rkv.kc.Put(ctx, in, opts...) +} + +func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + return rkv.kc.DeleteRange(ctx, in, opts...) +} + +func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + return rkv.kc.Txn(ctx, in, opts...) +} + +func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + return rkv.kc.Compact(ctx, in, opts...) +} + +type retryLeaseClient struct { + lc pb.LeaseClient +} + +// RetryLeaseClient implements a LeaseClient. +func RetryLeaseClient(c *Client) pb.LeaseClient { + return &retryLeaseClient{ + lc: pb.NewLeaseClient(c.conn), + } +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) { + return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { + return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { + return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...) +} + +type retryClusterClient struct { + cc pb.ClusterClient +} + +// RetryClusterClient implements a ClusterClient. +func RetryClusterClient(c *Client) pb.ClusterClient { + return &retryClusterClient{ + cc: pb.NewClusterClient(c.conn), + } +} + +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + return rcc.cc.MemberAdd(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + return rcc.cc.MemberRemove(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + return rcc.cc.MemberUpdate(ctx, in, opts...) +} + +func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) { + return rcc.cc.MemberPromote(ctx, in, opts...) +} + +type retryMaintenanceClient struct { + mc pb.MaintenanceClient +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + return &retryMaintenanceClient{ + mc: pb.NewMaintenanceClient(conn), + } +} + +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) { + return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) { + return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + return rmc.mc.Defragment(ctx, in, opts...) +} + +type retryAuthClient struct { + ac pb.AuthClient +} + +// RetryAuthClient implements a AuthClient. +func RetryAuthClient(c *Client) pb.AuthClient { + return &retryAuthClient{ + ac: pb.NewAuthClient(c.conn), + } +} + +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...) +} + +func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + return rac.ac.AuthEnable(ctx, in, opts...) +} + +func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + return rac.ac.AuthDisable(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + return rac.ac.UserAdd(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + return rac.ac.UserDelete(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + return rac.ac.UserChangePassword(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + return rac.ac.UserGrantRole(ctx, in, opts...) +} + +func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + return rac.ac.UserRevokeRole(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + return rac.ac.RoleAdd(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + return rac.ac.RoleDelete(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + return rac.ac.RoleGrantPermission(ctx, in, opts...) +} + +func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + return rac.ac.RoleRevokePermission(ctx, in, opts...) +} + +func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + return rac.ac.Authenticate(ctx, in, opts...) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go b/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go new file mode 100644 index 00000000..080490ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go @@ -0,0 +1,389 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more +// fine grained error checking required by write-at-most-once retry semantics of etcd. + +package clientv3 + +import ( + "context" + "io" + "sync" + "time" + + "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + "go.uber.org/zap" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// unaryClientInterceptor returns a new retrying unary client interceptor. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return invoker(ctx, method, req, reply, cc, grpcOpts...) + } + var lastErr error + for attempt := uint(0); attempt < callOpts.max; attempt++ { + if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil { + return err + } + logger.Debug( + "retrying of unary invoker", + zap.String("target", cc.Target()), + zap.Uint("attempt", attempt), + ) + lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...) + if lastErr == nil { + return nil + } + logger.Warn( + "retrying of unary invoker failed", + zap.String("target", cc.Target()), + zap.Uint("attempt", attempt), + zap.Error(lastErr), + ) + if isContextError(lastErr) { + if ctx.Err() != nil { + // its the context deadline or cancellation. + return lastErr + } + // its the callCtx deadline or cancellation, in which case try again. + continue + } + if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken { + gterr := c.getToken(ctx) + if gterr != nil { + logger.Warn( + "retrying of unary invoker failed to fetch new auth token", + zap.String("target", cc.Target()), + zap.Error(gterr), + ) + return gterr // lastErr must be invalid auth token + } + continue + } + if !isSafeRetry(c.lg, lastErr, callOpts) { + return lastErr + } + } + return lastErr + } +} + +// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls. +// +// The default configuration of the interceptor is to not retry *at all*. This behaviour can be +// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions). +// +// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs +// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams, +// BidiStreams), the retry interceptor will fail the call. +func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor { + intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs) + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + grpcOpts, retryOpts := filterCallOptions(opts) + callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts) + // short circuit for simplicity, and avoiding allocations. + if callOpts.max == 0 { + return streamer(ctx, desc, cc, method, grpcOpts...) + } + if desc.ClientStreams { + return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()") + } + newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...) + logger.Warn("retry stream intercept", zap.Error(err)) + if err != nil { + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + return nil, err + } + retryingStreamer := &serverStreamingRetryingStream{ + client: c, + ClientStream: newStreamer, + callOpts: callOpts, + ctx: ctx, + streamerCall: func(ctx context.Context) (grpc.ClientStream, error) { + return streamer(ctx, desc, cc, method, grpcOpts...) + }, + } + return retryingStreamer, nil + } +} + +// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a +// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish +// a new ClientStream according to the retry policy. +type serverStreamingRetryingStream struct { + grpc.ClientStream + client *Client + bufferedSends []interface{} // single message that the client can sen + receivedGood bool // indicates whether any prior receives were successful + wasClosedSend bool // indicates that CloseSend was closed + ctx context.Context + callOpts *options + streamerCall func(ctx context.Context) (grpc.ClientStream, error) + mu sync.RWMutex +} + +func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) { + s.mu.Lock() + s.ClientStream = clientStream + s.mu.Unlock() +} + +func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream { + s.mu.RLock() + defer s.mu.RUnlock() + return s.ClientStream +} + +func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error { + s.mu.Lock() + s.bufferedSends = append(s.bufferedSends, m) + s.mu.Unlock() + return s.getStream().SendMsg(m) +} + +func (s *serverStreamingRetryingStream) CloseSend() error { + s.mu.Lock() + s.wasClosedSend = true + s.mu.Unlock() + return s.getStream().CloseSend() +} + +func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) { + return s.getStream().Header() +} + +func (s *serverStreamingRetryingStream) Trailer() metadata.MD { + return s.getStream().Trailer() +} + +func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error { + attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m) + if !attemptRetry { + return lastErr // success or hard failure + } + // We start off from attempt 1, because zeroth was already made on normal SendMsg(). + for attempt := uint(1); attempt < s.callOpts.max; attempt++ { + if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil { + return err + } + newStream, err := s.reestablishStreamAndResendBuffer(s.ctx) + if err != nil { + // TODO(mwitkow): Maybe dial and transport errors should be retriable? + return err + } + s.setStream(newStream) + attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m) + //fmt.Printf("Received message and indicate: %v %v\n", attemptRetry, lastErr) + if !attemptRetry { + return lastErr + } + } + return lastErr +} + +func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) { + s.mu.RLock() + wasGood := s.receivedGood + s.mu.RUnlock() + err := s.getStream().RecvMsg(m) + if err == nil || err == io.EOF { + s.mu.Lock() + s.receivedGood = true + s.mu.Unlock() + return false, err + } else if wasGood { + // previous RecvMsg in the stream succeeded, no retry logic should interfere + return false, err + } + if isContextError(err) { + if s.ctx.Err() != nil { + return false, err + } + // its the callCtx deadline or cancellation, in which case try again. + return true, err + } + if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { + gterr := s.client.getToken(s.ctx) + if gterr != nil { + s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr)) + return false, err // return the original error for simplicity + } + return true, err + + } + return isSafeRetry(s.client.lg, err, s.callOpts), err +} + +func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) { + s.mu.RLock() + bufferedSends := s.bufferedSends + s.mu.RUnlock() + newStream, err := s.streamerCall(callCtx) + if err != nil { + return nil, err + } + for _, msg := range bufferedSends { + if err := newStream.SendMsg(msg); err != nil { + return nil, err + } + } + if err := newStream.CloseSend(); err != nil { + return nil, err + } + return newStream, nil +} + +func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error { + waitTime := time.Duration(0) + if attempt > 0 { + waitTime = callOpts.backoffFunc(attempt) + } + if waitTime > 0 { + timer := time.NewTimer(waitTime) + select { + case <-ctx.Done(): + timer.Stop() + return contextErrToGrpcErr(ctx.Err()) + case <-timer.C: + } + } + return nil +} + +// isSafeRetry returns "true", if request is safe for retry with the given error. +func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool { + if isContextError(err) { + return false + } + switch callOpts.retryPolicy { + case repeatable: + return isSafeRetryImmutableRPC(err) + case nonRepeatable: + return isSafeRetryMutableRPC(err) + default: + lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String())) + return false + } +} + +func isContextError(err error) bool { + return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled +} + +func contextErrToGrpcErr(err error) error { + switch err { + case context.DeadlineExceeded: + return status.Errorf(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Errorf(codes.Canceled, err.Error()) + default: + return status.Errorf(codes.Unknown, err.Error()) + } +} + +var ( + defaultOptions = &options{ + retryPolicy: nonRepeatable, + max: 0, // disable + backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10), + retryAuth: true, + } +) + +// backoffFunc denotes a family of functions that control the backoff duration between call retries. +// +// They are called with an identifier of the attempt, and should return a time the system client should +// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request +// the deadline of the request takes precedence and the wait will be interrupted before proceeding +// with the next iteration. +type backoffFunc func(attempt uint) time.Duration + +// withRetryPolicy sets the retry policy of this call. +func withRetryPolicy(rp retryPolicy) retryOption { + return retryOption{applyFunc: func(o *options) { + o.retryPolicy = rp + }} +} + +// withMax sets the maximum number of retries on this call, or this interceptor. +func withMax(maxRetries uint) retryOption { + return retryOption{applyFunc: func(o *options) { + o.max = maxRetries + }} +} + +// WithBackoff sets the `BackoffFunc `used to control time between retries. +func withBackoff(bf backoffFunc) retryOption { + return retryOption{applyFunc: func(o *options) { + o.backoffFunc = bf + }} +} + +type options struct { + retryPolicy retryPolicy + max uint + backoffFunc backoffFunc + retryAuth bool +} + +// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor. +type retryOption struct { + grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic. + applyFunc func(opt *options) +} + +func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options { + if len(retryOptions) == 0 { + return opt + } + optCopy := &options{} + *optCopy = *opt + for _, f := range retryOptions { + f.applyFunc(optCopy) + } + return optCopy +} + +func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) { + for _, opt := range callOptions { + if co, ok := opt.(retryOption); ok { + retryOptions = append(retryOptions, co) + } else { + grpcOptions = append(grpcOptions, opt) + } + } + return grpcOptions, retryOptions +} + +// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment). +// +// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms. +func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc { + return func(attempt uint) time.Duration { + return jitterUp(waitBetween, jitterFraction) + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go new file mode 100644 index 00000000..2bb9d9a1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/sort.go @@ -0,0 +1,37 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +type SortTarget int +type SortOrder int + +const ( + SortNone SortOrder = iota + SortAscend + SortDescend +) + +const ( + SortByKey SortTarget = iota + SortByVersion + SortByCreateRevision + SortByModRevision + SortByValue +) + +type SortOption struct { + Target SortTarget + Order SortOrder +} diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go new file mode 100644 index 00000000..c19715da --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -0,0 +1,151 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "sync" + + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + + "google.golang.org/grpc" +) + +// Txn is the interface that wraps mini-transactions. +// +// Txn(context.TODO()).If( +// Compare(Value(k1), ">", v1), +// Compare(Version(k1), "=", 2) +// ).Then( +// OpPut(k2,v2), OpPut(k3,v3) +// ).Else( +// OpPut(k4,v4), OpPut(k5,v5) +// ).Commit() +// +type Txn interface { + // If takes a list of comparison. If all comparisons passed in succeed, + // the operations passed into Then() will be executed. Or the operations + // passed into Else() will be executed. + If(cs ...Cmp) Txn + + // Then takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() succeed. + Then(ops ...Op) Txn + + // Else takes a list of operations. The Ops list will be executed, if the + // comparisons passed in If() fail. + Else(ops ...Op) Txn + + // Commit tries to commit the transaction. + Commit() (*TxnResponse, error) +} + +type txn struct { + kv *kv + ctx context.Context + + mu sync.Mutex + cif bool + cthen bool + celse bool + + isWrite bool + + cmps []*pb.Compare + + sus []*pb.RequestOp + fas []*pb.RequestOp + + callOpts []grpc.CallOption +} + +func (txn *txn) If(cs ...Cmp) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cif { + panic("cannot call If twice!") + } + + if txn.cthen { + panic("cannot call If after Then!") + } + + if txn.celse { + panic("cannot call If after Else!") + } + + txn.cif = true + + for i := range cs { + txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i])) + } + + return txn +} + +func (txn *txn) Then(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.cthen { + panic("cannot call Then twice!") + } + if txn.celse { + panic("cannot call Then after Else!") + } + + txn.cthen = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.sus = append(txn.sus, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Else(ops ...Op) Txn { + txn.mu.Lock() + defer txn.mu.Unlock() + + if txn.celse { + panic("cannot call Else twice!") + } + + txn.celse = true + + for _, op := range ops { + txn.isWrite = txn.isWrite || op.isWrite() + txn.fas = append(txn.fas, op.toRequestOp()) + } + + return txn +} + +func (txn *txn) Commit() (*TxnResponse, error) { + txn.mu.Lock() + defer txn.mu.Unlock() + + r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} + + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) + if err != nil { + return nil, toErr(txn.ctx, err) + } + return (*TxnResponse)(resp), nil +} diff --git a/vendor/github.com/coreos/etcd/clientv3/utils.go b/vendor/github.com/coreos/etcd/clientv3/utils.go new file mode 100644 index 00000000..b998c41b --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/utils.go @@ -0,0 +1,49 @@ +// Copyright 2018 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math/rand" + "reflect" + "runtime" + "strings" + "time" +) + +// jitterUp adds random jitter to the duration. +// +// This adds or subtracts time from the duration within a given jitter fraction. +// For example for 10s and jitter 0.1, it will return a time within [9s, 11s]) +// +// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils +func jitterUp(duration time.Duration, jitter float64) time.Duration { + multiplier := jitter * (rand.Float64()*2 - 1) + return time.Duration(float64(duration) * (1 + multiplier)) +} + +// Check if the provided function is being called in the op options. +func isOpFuncCalled(op string, opts []OpOption) bool { + for _, opt := range opts { + v := reflect.ValueOf(opt) + if v.Kind() == reflect.Func { + if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil { + if strings.Contains(opFunc.Name(), op) { + return true + } + } + } + } + return false +} diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go new file mode 100644 index 00000000..87d222d1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -0,0 +1,987 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + v3rpc "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes" + pb "go.etcd.io/etcd/etcdserver/etcdserverpb" + mvccpb "go.etcd.io/etcd/mvcc/mvccpb" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +const ( + EventTypeDelete = mvccpb.DELETE + EventTypePut = mvccpb.PUT + + closeSendErrTimeout = 250 * time.Millisecond +) + +type Event mvccpb.Event + +type WatchChan <-chan WatchResponse + +type Watcher interface { + // Watch watches on a key or prefix. The watched events will be returned + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. + // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed, + // and "WatchResponse" from this closed channel has zero events and nil "Err()". + // The context "ctx" MUST be canceled, as soon as watcher is no longer being used, + // to release the associated resources. + // + // If the context is "context.Background/TODO", returned "WatchChan" will + // not be closed and block until event is triggered, except when server + // returns a non-recoverable error (e.g. ErrCompacted). + // For example, when context passed with "WithRequireLeader" and the + // connected server has no leader (e.g. due to network partition), + // error "etcdserver: no leader" (ErrNoLeader) will be returned, + // and then "WatchChan" is closed with non-nil "Err()". + // In order to prevent a watch stream being stuck in a partitioned node, + // make sure to wrap context with "WithRequireLeader". + // + // Otherwise, as long as the context has not been canceled or timed out, + // watch will retry on other recoverable errors forever until reconnected. + // + // TODO: explicitly set context error in the last "WatchResponse" message and close channel? + // Currently, client contexts are overwritten with "valCtx" that never closes. + // TODO(v3.4): configure watch retry policy, limit maximum retry number + // (see https://github.com/etcd-io/etcd/issues/8980) + Watch(ctx context.Context, key string, opts ...OpOption) WatchChan + + // RequestProgress requests a progress notify response be sent in all watch channels. + RequestProgress(ctx context.Context) error + + // Close closes the watcher and cancels all watch requests. + Close() error +} + +type WatchResponse struct { + Header pb.ResponseHeader + Events []*Event + + // CompactRevision is the minimum revision the watcher may receive. + CompactRevision int64 + + // Canceled is used to indicate watch failure. + // If the watch failed and the stream was about to close, before the channel is closed, + // the channel sends a final response that has Canceled set to true with a non-nil Err(). + Canceled bool + + // Created is used to indicate the creation of the watcher. + Created bool + + closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string +} + +// IsCreate returns true if the event tells that the key is newly created. +func (e *Event) IsCreate() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision +} + +// IsModify returns true if the event tells that a new value is put on existing key. +func (e *Event) IsModify() bool { + return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision +} + +// Err is the error value if this WatchResponse holds an error. +func (wr *WatchResponse) Err() error { + switch { + case wr.closeErr != nil: + return v3rpc.Error(wr.closeErr) + case wr.CompactRevision != 0: + return v3rpc.ErrCompacted + case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } + return v3rpc.ErrFutureRev + } + return nil +} + +// IsProgressNotify returns true if the WatchResponse is progress notification. +func (wr *WatchResponse) IsProgressNotify() bool { + return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0 +} + +// watcher implements the Watcher interface +type watcher struct { + remote pb.WatchClient + callOpts []grpc.CallOption + + // mu protects the grpc streams map + mu sync.RWMutex + + // streams holds all the active grpc streams keyed by ctx value. + streams map[string]*watchGrpcStream +} + +// watchGrpcStream tracks all watch resources attached to a single grpc stream. +type watchGrpcStream struct { + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption + + // ctx controls internal remote.Watch requests + ctx context.Context + // ctxKey is the key used when looking up this stream's context + ctxKey string + cancel context.CancelFunc + + // substreams holds all active watchers on this grpc stream + substreams map[int64]*watcherStream + // resuming holds all resuming watchers on this grpc stream + resuming []*watcherStream + + // reqc sends a watch request from Watch() to the main goroutine + reqc chan watchStreamRequest + // respc receives data from the watch client + respc chan *pb.WatchResponse + // donec closes to broadcast shutdown + donec chan struct{} + // errc transmits errors from grpc Recv to the watch stream reconnect logic + errc chan error + // closingc gets the watcherStream of closing watchers + closingc chan *watcherStream + // wg is Done when all substream goroutines have exited + wg sync.WaitGroup + + // resumec closes to signal that all substreams should begin resuming + resumec chan struct{} + // closeErr is the error that closed the watch stream + closeErr error +} + +// watchStreamRequest is a union of the supported watch request operation types +type watchStreamRequest interface { + toPB() *pb.WatchRequest +} + +// watchRequest is issued by the subscriber to start a new watcher +type watchRequest struct { + ctx context.Context + key string + end string + rev int64 + + // send created notification event if this field is true + createdNotify bool + // progressNotify is for progress updates + progressNotify bool + // fragmentation should be disabled by default + // if true, split watch events when total exceeds + // "--max-request-bytes" flag value + 512-byte + fragment bool + + // filters is the list of events to filter out + filters []pb.WatchCreateRequest_FilterType + // get the previous key-value pair before the event happens + prevKV bool + // retc receives a chan WatchResponse once the watcher is established + retc chan chan WatchResponse +} + +// progressRequest is issued by the subscriber to request watch progress +type progressRequest struct { +} + +// watcherStream represents a registered watcher +type watcherStream struct { + // initReq is the request that initiated this request + initReq watchRequest + + // outc publishes watch responses to subscriber + outc chan WatchResponse + // recvc buffers watch responses before publishing + recvc chan *WatchResponse + // donec closes when the watcherStream goroutine stops. + donec chan struct{} + // closing is set to true when stream should be scheduled to shutdown. + closing bool + // id is the registered watch id on the grpc stream + id int64 + + // buf holds all events received from etcd but not yet consumed by the client + buf []*WatchResponse +} + +func NewWatcher(c *Client) Watcher { + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) +} + +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ + remote: wc, + streams: make(map[string]*watchGrpcStream), + } + if c != nil { + w.callOpts = c.callOpts + } + return w +} + +// never closes +var valCtxCh = make(chan struct{}) +var zeroTime = time.Unix(0, 0) + +// ctx with only the values; never Done +type valCtx struct{ context.Context } + +func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false } +func (vc *valCtx) Done() <-chan struct{} { return valCtxCh } +func (vc *valCtx) Err() error { return nil } + +func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { + ctx, cancel := context.WithCancel(&valCtx{inctx}) + wgs := &watchGrpcStream{ + owner: w, + remote: w.remote, + callOpts: w.callOpts, + ctx: ctx, + ctxKey: streamKeyFromCtx(inctx), + cancel: cancel, + substreams: make(map[int64]*watcherStream), + respc: make(chan *pb.WatchResponse), + reqc: make(chan watchStreamRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), + } + go wgs.run() + return wgs +} + +// Watch posts a watch request to run() and waits for a new watcher channel +func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan { + ow := opWatch(key, opts...) + + var filters []pb.WatchCreateRequest_FilterType + if ow.filterPut { + filters = append(filters, pb.WatchCreateRequest_NOPUT) + } + if ow.filterDelete { + filters = append(filters, pb.WatchCreateRequest_NODELETE) + } + + wr := &watchRequest{ + ctx: ctx, + createdNotify: ow.createdNotify, + key: string(ow.key), + end: string(ow.end), + rev: ow.rev, + progressNotify: ow.progressNotify, + fragment: ow.fragment, + filters: filters, + prevKV: ow.prevKV, + retc: make(chan chan WatchResponse, 1), + } + + ok := false + ctxKey := streamKeyFromCtx(ctx) + + // find or allocate appropriate grpc watch stream + w.mu.Lock() + if w.streams == nil { + // closed + w.mu.Unlock() + ch := make(chan WatchResponse) + close(ch) + return ch + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + // couldn't create channel; return closed channel + closeCh := make(chan WatchResponse, 1) + + // submit request + select { + case reqc <- wr: + ok = true + case <-wr.ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + + // receive channel + if ok { + select { + case ret := <-wr.retc: + return ret + case <-ctx.Done(): + case <-donec: + if wgs.closeErr != nil { + closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr} + break + } + // retry; may have dropped stream from no ctxs + return w.Watch(ctx, key, opts...) + } + } + + close(closeCh) + return closeCh +} + +func (w *watcher) Close() (err error) { + w.mu.Lock() + streams := w.streams + w.streams = nil + w.mu.Unlock() + for _, wgs := range streams { + if werr := wgs.close(); werr != nil { + err = werr + } + } + // Consider context.Canceled as a successful close + if err == context.Canceled { + err = nil + } + return err +} + +// RequestProgress requests a progress notify response be sent in all watch channels. +func (w *watcher) RequestProgress(ctx context.Context) (err error) { + ctxKey := streamKeyFromCtx(ctx) + + w.mu.Lock() + if w.streams == nil { + w.mu.Unlock() + return fmt.Errorf("no stream found for context") + } + wgs := w.streams[ctxKey] + if wgs == nil { + wgs = w.newWatcherGrpcStream(ctx) + w.streams[ctxKey] = wgs + } + donec := wgs.donec + reqc := wgs.reqc + w.mu.Unlock() + + pr := &progressRequest{} + + select { + case reqc <- pr: + return nil + case <-ctx.Done(): + if err == nil { + return ctx.Err() + } + return err + case <-donec: + if wgs.closeErr != nil { + return wgs.closeErr + } + // retry; may have dropped stream from no ctxs + return w.RequestProgress(ctx) + } +} + +func (w *watchGrpcStream) close() (err error) { + w.cancel() + <-w.donec + select { + case err = <-w.errc: + default: + } + return toErr(w.ctx, err) +} + +func (w *watcher) closeStream(wgs *watchGrpcStream) { + w.mu.Lock() + close(wgs.donec) + wgs.cancel() + if w.streams != nil { + delete(w.streams, wgs.ctxKey) + } + w.mu.Unlock() +} + +func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) { + // check watch ID for backward compatibility (<= v3.3) + if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") { + w.closeErr = v3rpc.Error(errors.New(resp.CancelReason)) + // failed; no channel + close(ws.recvc) + return + } + ws.id = resp.WatchId + w.substreams[ws.id] = ws +} + +func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) { + select { + case ws.outc <- *resp: + case <-ws.initReq.ctx.Done(): + case <-time.After(closeSendErrTimeout): + } + close(ws.outc) +} + +func (w *watchGrpcStream) closeSubstream(ws *watcherStream) { + // send channel response in case stream was never established + select { + case ws.initReq.retc <- ws.outc: + default: + } + // close subscriber's channel + if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil { + go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr}) + } else if ws.outc != nil { + close(ws.outc) + } + if ws.id != -1 { + delete(w.substreams, ws.id) + return + } + for i := range w.resuming { + if w.resuming[i] == ws { + w.resuming[i] = nil + return + } + } +} + +// run is the root of the goroutines for managing a watcher client +func (w *watchGrpcStream) run() { + var wc pb.Watch_WatchClient + var closeErr error + + // substreams marked to close but goroutine still running; needed for + // avoiding double-closing recvc on grpc stream teardown + closing := make(map[*watcherStream]struct{}) + + defer func() { + w.closeErr = closeErr + // shutdown substreams and resuming substreams + for _, ws := range w.substreams { + if _, ok := closing[ws]; !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + for _, ws := range w.resuming { + if _, ok := closing[ws]; ws != nil && !ok { + close(ws.recvc) + closing[ws] = struct{}{} + } + } + w.joinSubstreams() + for range closing { + w.closeSubstream(<-w.closingc) + } + w.wg.Wait() + w.owner.closeStream(w) + }() + + // start a stream with the etcd grpc server + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + + cancelSet := make(map[int64]struct{}) + + var cur *pb.WatchResponse + for { + select { + // Watch() requested + case req := <-w.reqc: + switch wreq := req.(type) { + case *watchRequest: + outc := make(chan WatchResponse, 1) + // TODO: pass custom watch ID? + ws := &watcherStream{ + initReq: *wreq, + id: -1, + outc: outc, + // unbuffered so resumes won't cause repeat events + recvc: make(chan *WatchResponse), + } + + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + + // queue up for watcher creation/resume + w.resuming = append(w.resuming, ws) + if len(w.resuming) == 1 { + // head of resume queue, can register a new watcher + wc.Send(ws.initReq.toPB()) + } + case *progressRequest: + wc.Send(wreq.toPB()) + } + + // new events from the watch client + case pbresp := <-w.respc: + if cur == nil || pbresp.Created || pbresp.Canceled { + cur = pbresp + } else if cur != nil && cur.WatchId == pbresp.WatchId { + // merge new events + cur.Events = append(cur.Events, pbresp.Events...) + // update "Fragment" field; last response with "Fragment" == false + cur.Fragment = pbresp.Fragment + } + + switch { + case pbresp.Created: + // response to head of queue creation + if ws := w.resuming[0]; ws != nil { + w.addSubstream(pbresp, ws) + w.dispatchEvent(pbresp) + w.resuming[0] = nil + } + + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) + } + + // reset for next iteration + cur = nil + + case pbresp.Canceled && pbresp.CompactRevision == 0: + delete(cancelSet, pbresp.WatchId) + if ws, ok := w.substreams[pbresp.WatchId]; ok { + // signal to stream goroutine to update closingc + close(ws.recvc) + closing[ws] = struct{}{} + } + + // reset for next iteration + cur = nil + + case cur.Fragment: + // watch response events are still fragmented + // continue to fetch next fragmented event arrival + continue + + default: + // dispatch to appropriate watch stream + ok := w.dispatchEvent(cur) + + // reset for next iteration + cur = nil + + if ok { + break + } + + // watch response on unexpected watch id; cancel id + if _, ok := cancelSet[pbresp.WatchId]; ok { + break + } + + cancelSet[pbresp.WatchId] = struct{}{} + cr := &pb.WatchRequest_CancelRequest{ + CancelRequest: &pb.WatchCancelRequest{ + WatchId: pbresp.WatchId, + }, + } + req := &pb.WatchRequest{RequestUnion: cr} + wc.Send(req) + } + + // watch client failed on Recv; spawn another if possible + case err := <-w.errc: + if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { + closeErr = err + return + } + if wc, closeErr = w.newWatchClient(); closeErr != nil { + return + } + if ws := w.nextResume(); ws != nil { + wc.Send(ws.initReq.toPB()) + } + cancelSet = make(map[int64]struct{}) + + case <-w.ctx.Done(): + return + + case ws := <-w.closingc: + w.closeSubstream(ws) + delete(closing, ws) + // no more watchers on this stream, shutdown + if len(w.substreams)+len(w.resuming) == 0 { + return + } + } + } +} + +// nextResume chooses the next resuming to register with the grpc stream. Abandoned +// streams are marked as nil in the queue since the head must wait for its inflight registration. +func (w *watchGrpcStream) nextResume() *watcherStream { + for len(w.resuming) != 0 { + if w.resuming[0] != nil { + return w.resuming[0] + } + w.resuming = w.resuming[1:len(w.resuming)] + } + return nil +} + +// dispatchEvent sends a WatchResponse to the appropriate watcher stream +func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { + events := make([]*Event, len(pbresp.Events)) + for i, ev := range pbresp.Events { + events[i] = (*Event)(ev) + } + // TODO: return watch ID? + wr := &WatchResponse{ + Header: *pbresp.Header, + Events: events, + CompactRevision: pbresp.CompactRevision, + Created: pbresp.Created, + Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + + // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to + // indicate they should be broadcast. + if wr.IsProgressNotify() && pbresp.WatchId == -1 { + return w.broadcastResponse(wr) + } + + return w.unicastResponse(wr, pbresp.WatchId) + +} + +// broadcastResponse send a watch response to all watch substreams. +func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool { + for _, ws := range w.substreams { + select { + case ws.recvc <- wr: + case <-ws.donec: + } + } + return true +} + +// unicastResponse sends a watch response to a specific watch substream. +func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool { + ws, ok := w.substreams[watchId] + if !ok { + return false + } + select { + case ws.recvc <- wr: + case <-ws.donec: + return false + } + return true +} + +// serveWatchClient forwards messages from the grpc stream to run() +func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) { + for { + resp, err := wc.Recv() + if err != nil { + select { + case w.errc <- err: + case <-w.donec: + } + return + } + select { + case w.respc <- resp: + case <-w.donec: + return + } + } +} + +// serveSubstream forwards watch responses from run() to the subscriber +func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) { + if ws.closing { + panic("created substream goroutine but substream is closing") + } + + // nextRev is the minimum expected next revision + nextRev := ws.initReq.rev + resuming := false + defer func() { + if !resuming { + ws.closing = true + } + close(ws.donec) + if !resuming { + w.closingc <- ws + } + w.wg.Done() + }() + + emptyWr := &WatchResponse{} + for { + curWr := emptyWr + outc := ws.outc + + if len(ws.buf) > 0 { + curWr = ws.buf[0] + } else { + outc = nil + } + select { + case outc <- *curWr: + if ws.buf[0].Err() != nil { + return + } + ws.buf[0] = nil + ws.buf = ws.buf[1:] + case wr, ok := <-ws.recvc: + if !ok { + // shutdown from closeSubstream + return + } + + if wr.Created { + if ws.initReq.retc != nil { + ws.initReq.retc <- ws.outc + // to prevent next write from taking the slot in buffered channel + // and posting duplicate create events + ws.initReq.retc = nil + + // send first creation event only if requested + if ws.initReq.createdNotify { + ws.outc <- *wr + } + // once the watch channel is returned, a current revision + // watch must resume at the store revision. This is necessary + // for the following case to work as expected: + // wch := m1.Watch("a") + // m2.Put("a", "b") + // <-wch + // If the revision is only bound on the first observed event, + // if wch is disconnected before the Put is issued, then reconnects + // after it is committed, it'll miss the Put. + if ws.initReq.rev == 0 { + nextRev = wr.Header.Revision + } + } + } else { + // current progress of watch; <= store revision + nextRev = wr.Header.Revision + } + + if len(wr.Events) > 0 { + nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1 + } + ws.initReq.rev = nextRev + + // created event is already sent above, + // watcher should not post duplicate events + if wr.Created { + continue + } + + // TODO pause channel if buffer gets too large + ws.buf = append(ws.buf, wr) + case <-w.ctx.Done(): + return + case <-ws.initReq.ctx.Done(): + return + case <-resumec: + resuming = true + return + } + } + // lazily send cancel message if events on missing id +} + +func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) { + // mark all substreams as resuming + close(w.resumec) + w.resumec = make(chan struct{}) + w.joinSubstreams() + for _, ws := range w.substreams { + ws.id = -1 + w.resuming = append(w.resuming, ws) + } + // strip out nils, if any + var resuming []*watcherStream + for _, ws := range w.resuming { + if ws != nil { + resuming = append(resuming, ws) + } + } + w.resuming = resuming + w.substreams = make(map[int64]*watcherStream) + + // connect to grpc stream while accepting watcher cancelation + stopc := make(chan struct{}) + donec := w.waitCancelSubstreams(stopc) + wc, err := w.openWatchClient() + close(stopc) + <-donec + + // serve all non-closing streams, even if there's a client error + // so that the teardown path can shutdown the streams as expected. + for _, ws := range w.resuming { + if ws.closing { + continue + } + ws.donec = make(chan struct{}) + w.wg.Add(1) + go w.serveSubstream(ws, w.resumec) + } + + if err != nil { + return nil, v3rpc.Error(err) + } + + // receive data from new grpc stream + go w.serveWatchClient(wc) + return wc, nil +} + +func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} { + var wg sync.WaitGroup + wg.Add(len(w.resuming)) + donec := make(chan struct{}) + for i := range w.resuming { + go func(ws *watcherStream) { + defer wg.Done() + if ws.closing { + if ws.initReq.ctx.Err() != nil && ws.outc != nil { + close(ws.outc) + ws.outc = nil + } + return + } + select { + case <-ws.initReq.ctx.Done(): + // closed ws will be removed from resuming + ws.closing = true + close(ws.outc) + ws.outc = nil + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() + case <-stopc: + } + }(w.resuming[i]) + } + go func() { + defer close(donec) + wg.Wait() + }() + return donec +} + +// joinSubstreams waits for all substream goroutines to complete. +func (w *watchGrpcStream) joinSubstreams() { + for _, ws := range w.substreams { + <-ws.donec + } + for _, ws := range w.resuming { + if ws != nil { + <-ws.donec + } + } +} + +var maxBackoff = 100 * time.Millisecond + +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false +func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { + backoff := time.Millisecond + for { + select { + case <-w.ctx.Done(): + if err == nil { + return nil, w.ctx.Err() + } + return nil, err + default: + } + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { + break + } + if isHaltErr(w.ctx, err) { + return nil, v3rpc.Error(err) + } + if isUnavailableErr(w.ctx, err) { + // retry, but backoff + if backoff < maxBackoff { + // 25% backoff factor + backoff = backoff + backoff/4 + if backoff > maxBackoff { + backoff = maxBackoff + } + } + time.Sleep(backoff) + } + } + return ws, nil +} + +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. +func (wr *watchRequest) toPB() *pb.WatchRequest { + req := &pb.WatchCreateRequest{ + StartRevision: wr.rev, + Key: []byte(wr.key), + RangeEnd: []byte(wr.end), + ProgressNotify: wr.progressNotify, + Filters: wr.filters, + PrevKv: wr.prevKV, + Fragment: wr.fragment, + } + cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +// toPB converts an internal progress request structure to its protobuf WatchRequest structure. +func (pr *progressRequest) toPB() *pb.WatchRequest { + req := &pb.WatchProgressRequest{} + cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req} + return &pb.WatchRequest{RequestUnion: cr} +} + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/github.com/coreos/etcd/pkg/transport/doc.go new file mode 100644 index 00000000..37658ce5 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport implements various HTTP transport utilities based on Go +// net package. +package transport diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go new file mode 100644 index 00000000..4ff8e7f0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go @@ -0,0 +1,94 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/tls" + "fmt" + "net" + "time" +) + +type keepAliveConn interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(d time.Duration) error +} + +// NewKeepAliveListener returns a listener that listens on the given address. +// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil. +// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake. +// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html +func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) { + if scheme == "https" { + if tlscfg == nil { + return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented") + } + return newTLSKeepaliveListener(l, tlscfg), nil + } + + return &keepaliveListener{ + Listener: l, + }, nil +} + +type keepaliveListener struct{ net.Listener } + +func (kln *keepaliveListener) Accept() (net.Conn, error) { + c, err := kln.Listener.Accept() + if err != nil { + return nil, err + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + return c, nil +} + +// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections. +type tlsKeepaliveListener struct { + net.Listener + config *tls.Config +} + +// Accept waits for and returns the next incoming TLS connection. +// The returned connection c is a *tls.Conn. +func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) { + c, err = l.Listener.Accept() + if err != nil { + return + } + kac := c.(keepAliveConn) + // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl + // default on linux: 30 + 8 * 30 + // default on osx: 30 + 8 * 75 + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(30 * time.Second) + c = tls.Server(c, l.config) + return c, nil +} + +// NewListener creates a Listener which accepts connections from an inner +// Listener and wraps each connection with Server. +// The configuration config must be non-nil and must have +// at least one certificate. +func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener { + l := &tlsKeepaliveListener{} + l.Listener = inner + l.config = config + return l +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go new file mode 100644 index 00000000..930c5420 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go @@ -0,0 +1,80 @@ +// Copyright 2013 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides network utility functions, complementing the more +// common ones in the net package. +package transport + +import ( + "errors" + "net" + "sync" + "time" +) + +var ( + ErrNotTCP = errors.New("only tcp connections have keepalive") +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} + +func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlive(doKeepAlive) +} + +func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlivePeriod(d) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go new file mode 100644 index 00000000..0c593e8e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -0,0 +1,417 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "os" + "path/filepath" + "strings" + "time" + + "go.etcd.io/etcd/pkg/tlsutil" + + "go.uber.org/zap" +) + +// NewListener creates a new listner. +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { + if l, err = newListener(addr, scheme); err != nil { + return nil, err + } + return wrapTLS(scheme, tlsinfo, l) +} + +func newListener(addr string, scheme string) (net.Listener, error) { + if scheme == "unix" || scheme == "unixs" { + // unix sockets via unix://laddr + return NewUnixListener(addr) + } + return net.Listen("tcp", addr) +} + +func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { + if scheme != "https" && scheme != "unixs" { + return l, nil + } + return newTLSListener(l, tlsinfo, checkSAN) +} + +type TLSInfo struct { + CertFile string + KeyFile string + TrustedCAFile string + ClientCertAuth bool + CRLFile string + InsecureSkipVerify bool + + // ServerName ensures the cert matches the given host in case of discovery / virtual hosting + ServerName string + + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + + // CipherSuites is a list of supported cipher suites. + // If empty, Go auto-populates it by default. + // Note that cipher suites are prioritized in the given order. + CipherSuites []uint16 + + selfCert bool + + // parseFunc exists to simplify testing. Typically, parseFunc + // should be left nil. In that case, tls.X509KeyPair will be used. + parseFunc func([]byte, []byte) (tls.Certificate, error) + + // AllowedCN is a CN which must be provided by a client. + AllowedCN string + + // Logger logs TLS errors. + // If nil, all logs are discarded. + Logger *zap.Logger + + // EmptyCN indicates that the cert must have empty CN. + // If true, ClientConfig() will return an error for a cert with non empty CN. + EmptyCN bool +} + +func (info TLSInfo) String() string { + return fmt.Sprintf("cert = %s, key = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile) +} + +func (info TLSInfo) Empty() bool { + return info.CertFile == "" && info.KeyFile == "" +} + +func SelfCert(lg *zap.Logger, dirpath string, hosts []string) (info TLSInfo, err error) { + if err = os.MkdirAll(dirpath, 0700); err != nil { + return + } + info.Logger = lg + + certPath := filepath.Join(dirpath, "cert.pem") + keyPath := filepath.Join(dirpath, "key.pem") + _, errcert := os.Stat(certPath) + _, errkey := os.Stat(keyPath) + if errcert == nil && errkey == nil { + info.CertFile = certPath + info.KeyFile = keyPath + info.selfCert = true + return + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate random number", + zap.Error(err), + ) + } + return + } + + tmpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{Organization: []string{"etcd"}}, + NotBefore: time.Now(), + NotAfter: time.Now().Add(365 * (24 * time.Hour)), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + for _, host := range hosts { + h, _, _ := net.SplitHostPort(host) + if ip := net.ParseIP(h); ip != nil { + tmpl.IPAddresses = append(tmpl.IPAddresses, ip) + } else { + tmpl.DNSNames = append(tmpl.DNSNames, h) + } + } + + priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate ECDSA key", + zap.Error(err), + ) + } + return + } + + derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot generate x509 certificate", + zap.Error(err), + ) + } + return + } + + certOut, err := os.Create(certPath) + if err != nil { + info.Logger.Warn( + "cannot cert file", + zap.String("path", certPath), + zap.Error(err), + ) + return + } + pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + certOut.Close() + if info.Logger != nil { + info.Logger.Info("created cert file", zap.String("path", certPath)) + } + + b, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return + } + keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "cannot key file", + zap.String("path", keyPath), + zap.Error(err), + ) + } + return + } + pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) + keyOut.Close() + if info.Logger != nil { + info.Logger.Info("created key file", zap.String("path", keyPath)) + } + return SelfCert(lg, dirpath, hosts) +} + +// baseConfig is called on initial TLS handshake start. +// +// Previously, +// 1. Server has non-empty (*tls.Config).Certificates on client hello +// 2. Server calls (*tls.Config).GetCertificate iff: +// - Server's (*tls.Config).Certificates is not empty, or +// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName +// +// When (*tls.Config).Certificates is always populated on initial handshake, +// client is expected to provide a valid matching SNI to pass the TLS +// verification, thus trigger server (*tls.Config).GetCertificate to reload +// TLS assets. However, a cert whose SAN field does not include domain names +// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus +// it was never able to trigger TLS reload on initial handshake; first +// ceritifcate object was being used, never being updated. +// +// Now, (*tls.Config).Certificates is created empty on initial TLS client +// handshake, in order to trigger (*tls.Config).GetCertificate and populate +// rest of the certificates on every new TLS connection, even when client +// SNI is empty (e.g. cert only includes IPs). +func (info TLSInfo) baseConfig() (*tls.Config, error) { + if info.KeyFile == "" || info.CertFile == "" { + return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile) + } + if info.Logger == nil { + info.Logger = zap.NewNop() + } + + _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if err != nil { + return nil, err + } + + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + ServerName: info.ServerName, + } + + if len(info.CipherSuites) > 0 { + cfg.CipherSuites = info.CipherSuites + } + + if info.AllowedCN != "" { + cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error { + for _, chains := range verifiedChains { + if len(chains) != 0 { + if info.AllowedCN == chains[0].Subject.CommonName { + return nil + } + } + } + return errors.New("CommonName authentication failed") + } + } + + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find peer cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create peer certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) { + cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + if os.IsNotExist(err) { + if info.Logger != nil { + info.Logger.Warn( + "failed to find client cert files", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } else if err != nil { + if info.Logger != nil { + info.Logger.Warn( + "failed to create client certificate", + zap.String("cert-file", info.CertFile), + zap.String("key-file", info.KeyFile), + zap.Error(err), + ) + } + } + return cert, err + } + return cfg, nil +} + +// cafiles returns a list of CA file paths. +func (info TLSInfo) cafiles() []string { + cs := make([]string, 0) + if info.TrustedCAFile != "" { + cs = append(cs, info.TrustedCAFile) + } + return cs +} + +// ServerConfig generates a tls.Config object for use by an HTTP server. +func (info TLSInfo) ServerConfig() (*tls.Config, error) { + cfg, err := info.baseConfig() + if err != nil { + return nil, err + } + + cfg.ClientAuth = tls.NoClientCert + if info.TrustedCAFile != "" || info.ClientCertAuth { + cfg.ClientAuth = tls.RequireAndVerifyClientCert + } + + cs := info.cafiles() + if len(cs) > 0 { + cp, err := tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + cfg.ClientCAs = cp + } + + // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server + cfg.NextProtos = []string{"h2"} + + return cfg, nil +} + +// ClientConfig generates a tls.Config object for use by an HTTP client. +func (info TLSInfo) ClientConfig() (*tls.Config, error) { + var cfg *tls.Config + var err error + + if !info.Empty() { + cfg, err = info.baseConfig() + if err != nil { + return nil, err + } + } else { + cfg = &tls.Config{ServerName: info.ServerName} + } + cfg.InsecureSkipVerify = info.InsecureSkipVerify + + cs := info.cafiles() + if len(cs) > 0 { + cfg.RootCAs, err = tlsutil.NewCertPool(cs) + if err != nil { + return nil, err + } + } + + if info.selfCert { + cfg.InsecureSkipVerify = true + } + + if info.EmptyCN { + hasNonEmptyCN := false + cn := "" + tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) { + var block *pem.Block + block, _ = pem.Decode(certPEMBlock) + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return tls.Certificate{}, err + } + if len(cert.Subject.CommonName) != 0 { + hasNonEmptyCN = true + cn = cert.Subject.CommonName + } + return tls.X509KeyPair(certPEMBlock, keyPEMBlock) + }) + if hasNonEmptyCN { + return nil, fmt.Errorf("cert has non empty Common Name (%s)", cn) + } + } + + return cfg, nil +} + +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go new file mode 100644 index 00000000..6f160094 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,272 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials or CRL revoked +// certificates. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) + check tlsCheckFunc +} + +type tlsCheckFunc func(context.Context, *tls.Conn) error + +// NewTLSListener handshakes TLS connections and performs optional CRL checking. +func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + check := func(context.Context, *tls.Conn) error { return nil } + return newTLSListener(l, tlsinfo, check) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + + if len(tlsinfo.CRLFile) > 0 { + prevCheck := check + check = func(ctx context.Context, tlsConn *tls.Conn) error { + if err := prevCheck(ctx, tlsConn); err != nil { + return err + } + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + return checkCRL(tlsinfo.CRLFile, certs) + } + return nil + } + } + + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + check: check, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +func checkSAN(ctx context.Context, tlsConn *tls.Conn) error { + st := tlsConn.ConnectionState() + if certs := st.PeerCertificates; len(certs) > 0 { + addr := tlsConn.RemoteAddr().String() + return checkCertSAN(ctx, certs[0], addr) + } + return nil +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + if err := l.check(ctx, tlsConn); err != nil { + l.handshakeFailure(tlsConn, err) + return + } + + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCRL(crlPath string, cert []*x509.Certificate) error { + // TODO: cache + crlBytes, err := ioutil.ReadFile(crlPath) + if err != nil { + return err + } + certList, err := x509.ParseCRL(crlBytes) + if err != nil { + return err + } + revokedSerials := make(map[string]struct{}) + for _, rc := range certList.TBSCertList.RevokedCertificates { + revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{} + } + for _, c := range cert { + serial := string(c.SerialNumber.Bytes()) + if _, ok := revokedSerials[serial]; ok { + return fmt.Errorf("transport: certificate serial %x revoked", serial) + } + } + return nil +} + +func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + h, _, herr := net.SplitHostPort(remoteAddr) + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go new file mode 100644 index 00000000..7e8c0203 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go @@ -0,0 +1,44 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type timeoutConn struct { + net.Conn + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (c timeoutConn) Write(b []byte) (n int, err error) { + if c.wtimeoutd > 0 { + if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Write(b) +} + +func (c timeoutConn) Read(b []byte) (n int, err error) { + if c.rdtimeoutd > 0 { + if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil { + return 0, err + } + } + return c.Conn.Read(b) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go new file mode 100644 index 00000000..6ae39ecf --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go @@ -0,0 +1,36 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +type rwTimeoutDialer struct { + wtimeoutd time.Duration + rdtimeoutd time.Duration + net.Dialer +} + +func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) { + conn, err := d.Dialer.Dial(network, address) + tconn := &timeoutConn{ + rdtimeoutd: d.rdtimeoutd, + wtimeoutd: d.wtimeoutd, + Conn: conn, + } + return tconn, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go new file mode 100644 index 00000000..273e99fe --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go @@ -0,0 +1,57 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "time" +) + +// NewTimeoutListener returns a listener that listens on the given address. +// If read/write on the accepted connection blocks longer than its time limit, +// it will return timeout error. +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { + ln, err := newListener(addr, scheme) + if err != nil { + return nil, err + } + ln = &rwTimeoutListener{ + Listener: ln, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + } + if ln, err = wrapTLS(scheme, tlsinfo, ln); err != nil { + return nil, err + } + return ln, nil +} + +type rwTimeoutListener struct { + net.Listener + wtimeoutd time.Duration + rdtimeoutd time.Duration +} + +func (rwln *rwTimeoutListener) Accept() (net.Conn, error) { + c, err := rwln.Listener.Accept() + if err != nil { + return nil, err + } + return timeoutConn{ + Conn: c, + wtimeoutd: rwln.wtimeoutd, + rdtimeoutd: rwln.rdtimeoutd, + }, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go new file mode 100644 index 00000000..ea16b4c0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go @@ -0,0 +1,51 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "time" +) + +// NewTimeoutTransport returns a transport created using the given TLS info. +// If read/write on the created connection blocks longer than its time limit, +// it will return timeout error. +// If read/write timeout is set, transport will not be able to reuse connection. +func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) { + tr, err := NewTransport(info, dialtimeoutd) + if err != nil { + return nil, err + } + + if rdtimeoutd != 0 || wtimeoutd != 0 { + // the timed out connection will timeout soon after it is idle. + // it should not be put back to http transport as an idle connection for future usage. + tr.MaxIdleConnsPerHost = -1 + } else { + // allow more idle connections between peers to avoid unnecessary port allocation. + tr.MaxIdleConnsPerHost = 1024 + } + + tr.Dial = (&rwTimeoutDialer{ + Dialer: net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }, + rdtimeoutd: rdtimeoutd, + wtimeoutd: wtimeoutd, + }).Dial + return tr, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/github.com/coreos/etcd/pkg/transport/tls.go new file mode 100644 index 00000000..62fe0d38 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/tls.go @@ -0,0 +1,49 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "strings" + "time" +) + +// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those +// endpoints that could be validated as secure. +func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) { + t, err := NewTransport(tlsInfo, 5*time.Second) + if err != nil { + return nil, err + } + var errs []string + var endpoints []string + for _, ep := range eps { + if !strings.HasPrefix(ep, "https://") { + errs = append(errs, fmt.Sprintf("%q is insecure", ep)) + continue + } + conn, cerr := t.Dial("tcp", ep[len("https://"):]) + if cerr != nil { + errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr)) + continue + } + conn.Close() + endpoints = append(endpoints, ep) + } + if len(errs) != 0 { + err = fmt.Errorf("%s", strings.Join(errs, ",")) + } + return endpoints, err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go new file mode 100644 index 00000000..4a7fe69d --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/transport.go @@ -0,0 +1,71 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "net/http" + "strings" + "time" +) + +type unixTransport struct{ *http.Transport } + +func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) { + cfg, err := info.ClientConfig() + if err != nil { + return nil, err + } + + t := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: dialtimeoutd, + // value taken from http.DefaultTransport + KeepAlive: 30 * time.Second, + }).Dial, + // value taken from http.DefaultTransport + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + + dialer := (&net.Dialer{ + Timeout: dialtimeoutd, + KeepAlive: 30 * time.Second, + }) + dial := func(net, addr string) (net.Conn, error) { + return dialer.Dial("unix", addr) + } + + tu := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: cfg, + } + ut := &unixTransport{tu} + + t.RegisterProtocol("unix", ut) + t.RegisterProtocol("unixs", ut) + + return t, nil +} + +func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) { + url := *req.URL + req.URL = &url + req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1) + return urt.Transport.RoundTrip(req) +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go new file mode 100644 index 00000000..123e2036 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go @@ -0,0 +1,40 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net" + "os" +) + +type unixListener struct{ net.Listener } + +func NewUnixListener(addr string) (net.Listener, error) { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return nil, err + } + l, err := net.Listen("unix", addr) + if err != nil { + return nil, err + } + return &unixListener{l}, nil +} + +func (ul *unixListener) Close() error { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { + return err + } + return ul.Listener.Close() +} diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 00000000..23a0ada2 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 00000000..76cf4852 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 00000000..e256b41a --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE new file mode 100644 index 00000000..23a0ada2 --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go new file mode 100644 index 00000000..a0f4837a --- /dev/null +++ b/vendor/github.com/coreos/go-systemd/journal/journal.go @@ -0,0 +1,225 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package journal provides write bindings to the local systemd journal. +// It is implemented in pure Go and connects to the journal directly over its +// unix socket. +// +// To read from the journal, see the "sdjournal" package, which wraps the +// sd-journal a C API. +// +// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html +package journal + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// Priority of a journal message +type Priority int + +const ( + PriEmerg Priority = iota + PriAlert + PriCrit + PriErr + PriWarning + PriNotice + PriInfo + PriDebug +) + +var ( + // This can be overridden at build-time: + // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable + journalSocket = "/run/systemd/journal/socket" + + // unixConnPtr atomically holds the local unconnected Unix-domain socket. + // Concrete safe pointer type: *net.UnixConn + unixConnPtr unsafe.Pointer + // onceConn ensures that unixConnPtr is initialized exactly once. + onceConn sync.Once +) + +func init() { + onceConn.Do(initConn) +} + +// Enabled checks whether the local systemd journal is available for logging. +func Enabled() bool { + onceConn.Do(initConn) + + if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil { + return false + } + + if _, err := net.Dial("unixgram", journalSocket); err != nil { + return false + } + + return true +} + +// Send a message to the local systemd journal. vars is a map of journald +// fields to values. Fields must be composed of uppercase letters, numbers, +// and underscores, but must not start with an underscore. Within these +// restrictions, any arbitrary field name may be used. Some names have special +// significance: see the journalctl documentation +// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) +// for more details. vars may be nil. +func Send(message string, priority Priority, vars map[string]string) error { + conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) + if conn == nil { + return errors.New("could not initialize socket to journald") + } + + socketAddr := &net.UnixAddr{ + Name: journalSocket, + Net: "unixgram", + } + + data := new(bytes.Buffer) + appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) + appendVariable(data, "MESSAGE", message) + for k, v := range vars { + appendVariable(data, k, v) + } + + _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr) + if err == nil { + return nil + } + if !isSocketSpaceError(err) { + return err + } + + // Large log entry, send it via tempfile and ancillary-fd. + file, err := tempFd() + if err != nil { + return err + } + defer file.Close() + _, err = io.Copy(file, data) + if err != nil { + return err + } + rights := syscall.UnixRights(int(file.Fd())) + _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr) + if err != nil { + return err + } + + return nil +} + +// Print prints a message to the local systemd journal using Send(). +func Print(priority Priority, format string, a ...interface{}) error { + return Send(fmt.Sprintf(format, a...), priority, nil) +} + +func appendVariable(w io.Writer, name, value string) { + if err := validVarName(name); err != nil { + fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name) + } + if strings.ContainsRune(value, '\n') { + /* When the value contains a newline, we write: + * - the variable name, followed by a newline + * - the size (in 64bit little endian format) + * - the data, followed by a newline + */ + fmt.Fprintln(w, name) + binary.Write(w, binary.LittleEndian, uint64(len(value))) + fmt.Fprintln(w, value) + } else { + /* just write the variable and value all on one line */ + fmt.Fprintf(w, "%s=%s\n", name, value) + } +} + +// validVarName validates a variable name to make sure journald will accept it. +// The variable name must be in uppercase and consist only of characters, +// numbers and underscores, and may not begin with an underscore: +// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html +func validVarName(name string) error { + if name == "" { + return errors.New("Empty variable name") + } else if name[0] == '_' { + return errors.New("Variable name begins with an underscore") + } + + for _, c := range name { + if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') { + return errors.New("Variable name contains invalid characters") + } + } + return nil +} + +// isSocketSpaceError checks whether the error is signaling +// an "overlarge message" condition. +func isSocketSpaceError(err error) bool { + opErr, ok := err.(*net.OpError) + if !ok || opErr == nil { + return false + } + + sysErr, ok := opErr.Err.(*os.SyscallError) + if !ok || sysErr == nil { + return false + } + + return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS +} + +// tempFd creates a temporary, unlinked file under `/dev/shm`. +func tempFd() (*os.File, error) { + file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") + if err != nil { + return nil, err + } + err = syscall.Unlink(file.Name()) + if err != nil { + return nil, err + } + return file, nil +} + +// initConn initializes the global `unixConnPtr` socket. +// It is meant to be called exactly once, at program startup. +func initConn() { + autobind, err := net.ResolveUnixAddr("unixgram", "") + if err != nil { + return + } + + sock, err := net.ListenUnixgram("unixgram", autobind) + if err != nil { + return + } + + atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock)) +} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/coreos/pkg/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE new file mode 100644 index 00000000..b39ddfa5 --- /dev/null +++ b/vendor/github.com/coreos/pkg/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md new file mode 100644 index 00000000..f79dbfca --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/README.md @@ -0,0 +1,39 @@ +# capnslog, the CoreOS logging package + +There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). +capnslog provides a simple but consistent logging interface suitable for all kinds of projects. + +### Design Principles + +##### `package main` is the place where logging gets turned on and routed + +A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. + +##### All log options are runtime-configurable. + +Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. + +##### There is one log object per package. It is registered under its repository and package name. + +`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. + +##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. + +Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. + +Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application. + +##### Log objects are an interface + +An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. + +##### Log levels have specific meanings: + + * Critical: Unrecoverable. Must fail. + * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost + * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. + * Notice: Normal, but important (uncommon) log information. + * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. + * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. + * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. + diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go new file mode 100644 index 00000000..b305a845 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go @@ -0,0 +1,157 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "fmt" + "io" + "log" + "runtime" + "strings" + "time" +) + +type Formatter interface { + Format(pkg string, level LogLevel, depth int, entries ...interface{}) + Flush() +} + +func NewStringFormatter(w io.Writer) Formatter { + return &StringFormatter{ + w: bufio.NewWriter(w), + } +} + +type StringFormatter struct { + w *bufio.Writer +} + +func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { + now := time.Now().UTC() + s.w.WriteString(now.Format(time.RFC3339)) + s.w.WriteByte(' ') + writeEntries(s.w, pkg, l, i, entries...) + s.Flush() +} + +func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { + if pkg != "" { + w.WriteString(pkg + ": ") + } + str := fmt.Sprint(entries...) + endsInNL := strings.HasSuffix(str, "\n") + w.WriteString(str) + if !endsInNL { + w.WriteString("\n") + } +} + +func (s *StringFormatter) Flush() { + s.w.Flush() +} + +func NewPrettyFormatter(w io.Writer, debug bool) Formatter { + return &PrettyFormatter{ + w: bufio.NewWriter(w), + debug: debug, + } +} + +type PrettyFormatter struct { + w *bufio.Writer + debug bool +} + +func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { + now := time.Now() + ts := now.Format("2006-01-02 15:04:05") + c.w.WriteString(ts) + ms := now.Nanosecond() / 1000 + c.w.WriteString(fmt.Sprintf(".%06d", ms)) + if c.debug { + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) + } + c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) + writeEntries(c.w, pkg, l, depth, entries...) + c.Flush() +} + +func (c *PrettyFormatter) Flush() { + c.w.Flush() +} + +// LogFormatter emulates the form of the traditional built-in logger. +type LogFormatter struct { + logger *log.Logger + prefix string +} + +// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the +// golang log package to actually do the logging work so that logs look similar. +func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter { + return &LogFormatter{ + logger: log.New(w, "", flag), // don't use prefix here + prefix: prefix, // save it instead + } +} + +// Format builds a log message for the LogFormatter. The LogLevel is ignored. +func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) { + str := fmt.Sprint(entries...) + prefix := lf.prefix + if pkg != "" { + prefix = fmt.Sprintf("%s%s: ", prefix, pkg) + } + lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5 +} + +// Flush is included so that the interface is complete, but is a no-op. +func (lf *LogFormatter) Flush() { + // noop +} + +// NilFormatter is a no-op log formatter that does nothing. +type NilFormatter struct { +} + +// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no +// messages so that you can cause part of your logging to be silent. +func NewNilFormatter() Formatter { + return &NilFormatter{} +} + +// Format does nothing. +func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) { + // noop +} + +// Flush is included so that the interface is complete, but is a no-op. +func (_ *NilFormatter) Flush() { + // noop +} diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go new file mode 100644 index 00000000..426603ef --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go @@ -0,0 +1,96 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "bufio" + "bytes" + "io" + "os" + "runtime" + "strconv" + "strings" + "time" +) + +var pid = os.Getpid() + +type GlogFormatter struct { + StringFormatter +} + +func NewGlogFormatter(w io.Writer) *GlogFormatter { + g := &GlogFormatter{} + g.w = bufio.NewWriter(w) + return g +} + +func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { + g.w.Write(GlogHeader(level, depth+1)) + g.StringFormatter.Format(pkg, level, depth+1, entries...) +} + +func GlogHeader(level LogLevel, depth int) []byte { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := time.Now().UTC() + _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number + } + buf := &bytes.Buffer{} + buf.Grow(30) + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.WriteString(level.Char()) + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) + buf.WriteByte('Z') + buf.WriteByte(' ') + buf.WriteString(strconv.Itoa(pid)) + buf.WriteByte(' ') + buf.WriteString(file) + buf.WriteByte(':') + buf.WriteString(strconv.Itoa(line)) + buf.WriteByte(']') + buf.WriteByte(' ') + return buf.Bytes() +} + +const digits = "0123456789" + +func twoDigits(b *bytes.Buffer, d int) { + c2 := digits[d%10] + d /= 10 + c1 := digits[d%10] + b.WriteByte(c1) + b.WriteByte(c2) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go new file mode 100644 index 00000000..38ce6d26 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init.go @@ -0,0 +1,49 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "io" + "os" + "syscall" +) + +// Here's where the opinionation comes in. We need some sensible defaults, +// especially after taking over the log package. Your project (whatever it may +// be) may see things differently. That's okay; there should be no defaults in +// the main package that cannot be controlled or overridden programatically, +// otherwise it's a bug. Doing so is creating your own init_log.go file much +// like this one. + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewDefaultFormatter(os.Stderr)) + SetGlobalLogLevel(INFO) +} + +func NewDefaultFormatter(out io.Writer) Formatter { + if syscall.Getppid() == 1 { + // We're running under init, which may be systemd. + f, err := NewJournaldFormatter() + if err == nil { + return f + } + } + return NewPrettyFormatter(out, false) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go new file mode 100644 index 00000000..45530506 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go @@ -0,0 +1,25 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import "os" + +func init() { + initHijack() + + // Go `log` package uses os.Stderr. + SetFormatter(NewPrettyFormatter(os.Stderr, false)) + SetGlobalLogLevel(INFO) +} diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go new file mode 100644 index 00000000..72e05207 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go @@ -0,0 +1,68 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "errors" + "fmt" + "os" + "path/filepath" + + "github.com/coreos/go-systemd/journal" +) + +func NewJournaldFormatter() (Formatter, error) { + if !journal.Enabled() { + return nil, errors.New("No systemd detected") + } + return &journaldFormatter{}, nil +} + +type journaldFormatter struct{} + +func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + var pri journal.Priority + switch l { + case CRITICAL: + pri = journal.PriCrit + case ERROR: + pri = journal.PriErr + case WARNING: + pri = journal.PriWarning + case NOTICE: + pri = journal.PriNotice + case INFO: + pri = journal.PriInfo + case DEBUG: + pri = journal.PriDebug + case TRACE: + pri = journal.PriDebug + default: + panic("Unhandled loglevel") + } + msg := fmt.Sprint(entries...) + tags := map[string]string{ + "PACKAGE": pkg, + "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), + } + err := journal.Send(msg, pri, tags) + if err != nil { + fmt.Fprintln(os.Stderr, err) + } +} + +func (j *journaldFormatter) Flush() {} diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go new file mode 100644 index 00000000..970086b9 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go @@ -0,0 +1,39 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "log" +) + +func initHijack() { + pkg := NewPackageLogger("log", "") + w := packageWriter{pkg} + log.SetFlags(0) + log.SetPrefix("") + log.SetOutput(w) +} + +type packageWriter struct { + pl *PackageLogger +} + +func (p packageWriter) Write(b []byte) (int, error) { + if p.pl.level < INFO { + return 0, nil + } + p.pl.internalLog(calldepth+2, INFO, string(b)) + return len(b), nil +} diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go new file mode 100644 index 00000000..226b60c2 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go @@ -0,0 +1,245 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "errors" + "strings" + "sync" +) + +// LogLevel is the set of all log levels. +type LogLevel int8 + +const ( + // CRITICAL is the lowest log level; only errors which will end the program will be propagated. + CRITICAL LogLevel = iota - 1 + // ERROR is for errors that are not fatal but lead to troubling behavior. + ERROR + // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. + WARNING + // NOTICE is for normal but significant conditions. + NOTICE + // INFO is a log level for common, everyday log updates. + INFO + // DEBUG is the default hidden level for more verbose updates about internal processes. + DEBUG + // TRACE is for (potentially) call by call tracing of programs. + TRACE +) + +// Char returns a single-character representation of the log level. +func (l LogLevel) Char() string { + switch l { + case CRITICAL: + return "C" + case ERROR: + return "E" + case WARNING: + return "W" + case NOTICE: + return "N" + case INFO: + return "I" + case DEBUG: + return "D" + case TRACE: + return "T" + default: + panic("Unhandled loglevel") + } +} + +// String returns a multi-character representation of the log level. +func (l LogLevel) String() string { + switch l { + case CRITICAL: + return "CRITICAL" + case ERROR: + return "ERROR" + case WARNING: + return "WARNING" + case NOTICE: + return "NOTICE" + case INFO: + return "INFO" + case DEBUG: + return "DEBUG" + case TRACE: + return "TRACE" + default: + panic("Unhandled loglevel") + } +} + +// Update using the given string value. Fulfills the flag.Value interface. +func (l *LogLevel) Set(s string) error { + value, err := ParseLevel(s) + if err != nil { + return err + } + + *l = value + return nil +} + +// Returns an empty string, only here to fulfill the pflag.Value interface. +func (l *LogLevel) Type() string { + return "" +} + +// ParseLevel translates some potential loglevel strings into their corresponding levels. +func ParseLevel(s string) (LogLevel, error) { + switch s { + case "CRITICAL", "C": + return CRITICAL, nil + case "ERROR", "0", "E": + return ERROR, nil + case "WARNING", "1", "W": + return WARNING, nil + case "NOTICE", "2", "N": + return NOTICE, nil + case "INFO", "3", "I": + return INFO, nil + case "DEBUG", "4", "D": + return DEBUG, nil + case "TRACE", "5", "T": + return TRACE, nil + } + return CRITICAL, errors.New("couldn't parse log level " + s) +} + +type RepoLogger map[string]*PackageLogger + +type loggerStruct struct { + sync.Mutex + repoMap map[string]RepoLogger + formatter Formatter +} + +// logger is the global logger +var logger = new(loggerStruct) + +// SetGlobalLogLevel sets the log level for all packages in all repositories +// registered with capnslog. +func SetGlobalLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + for _, r := range logger.repoMap { + r.setRepoLogLevelInternal(l) + } +} + +// GetRepoLogger may return the handle to the repository's set of packages' loggers. +func GetRepoLogger(repo string) (RepoLogger, error) { + logger.Lock() + defer logger.Unlock() + r, ok := logger.repoMap[repo] + if !ok { + return nil, errors.New("no packages registered for repo " + repo) + } + return r, nil +} + +// MustRepoLogger returns the handle to the repository's packages' loggers. +func MustRepoLogger(repo string) RepoLogger { + r, err := GetRepoLogger(repo) + if err != nil { + panic(err) + } + return r +} + +// SetRepoLogLevel sets the log level for all packages in the repository. +func (r RepoLogger) SetRepoLogLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + r.setRepoLogLevelInternal(l) +} + +func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { + for _, v := range r { + v.level = l + } +} + +// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in +// order, and returns a map of the results, for use in SetLogLevel. +func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { + setlist := strings.Split(conf, ",") + out := make(map[string]LogLevel) + for _, setstring := range setlist { + setting := strings.Split(setstring, "=") + if len(setting) != 2 { + return nil, errors.New("oddly structured `pkg=level` option: " + setstring) + } + l, err := ParseLevel(setting[1]) + if err != nil { + return nil, err + } + out[setting[0]] = l + } + return out, nil +} + +// SetLogLevel takes a map of package names within a repository to their desired +// loglevel, and sets the levels appropriately. Unknown packages are ignored. +// "*" is a special package name that corresponds to all packages, and will be +// processed first. +func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { + logger.Lock() + defer logger.Unlock() + if l, ok := m["*"]; ok { + r.setRepoLogLevelInternal(l) + } + for k, v := range m { + l, ok := r[k] + if !ok { + continue + } + l.level = v + } +} + +// SetFormatter sets the formatting function for all logs. +func SetFormatter(f Formatter) { + logger.Lock() + defer logger.Unlock() + logger.formatter = f +} + +// NewPackageLogger creates a package logger object. +// This should be defined as a global var in your package, referencing your repo. +func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { + logger.Lock() + defer logger.Unlock() + if logger.repoMap == nil { + logger.repoMap = make(map[string]RepoLogger) + } + r, rok := logger.repoMap[repo] + if !rok { + logger.repoMap[repo] = make(RepoLogger) + r = logger.repoMap[repo] + } + p, pok := r[pkg] + if !pok { + r[pkg] = &PackageLogger{ + pkg: pkg, + level: INFO, + } + p = r[pkg] + } + return +} diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go new file mode 100644 index 00000000..00ff3714 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go @@ -0,0 +1,191 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package capnslog + +import ( + "fmt" + "os" +) + +type PackageLogger struct { + pkg string + level LogLevel +} + +const calldepth = 2 + +func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { + logger.Lock() + defer logger.Unlock() + if inLevel != CRITICAL && p.level < inLevel { + return + } + if logger.formatter != nil { + logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) + } +} + +// SetLevel allows users to change the current logging level. +func (p *PackageLogger) SetLevel(l LogLevel) { + logger.Lock() + defer logger.Unlock() + p.level = l +} + +// LevelAt checks if the given log level will be outputted under current setting. +func (p *PackageLogger) LevelAt(l LogLevel) bool { + logger.Lock() + defer logger.Unlock() + return p.level >= l +} + +// Log a formatted string at any level between ERROR and TRACE +func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) +} + +// Log a message at any level between ERROR and TRACE +func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { + p.internalLog(calldepth, l, fmt.Sprint(args...)) +} + +// log stdlib compatibility + +func (p *PackageLogger) Println(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) +} + +func (p *PackageLogger) Printf(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Print(args ...interface{}) { + p.internalLog(calldepth, INFO, fmt.Sprint(args...)) +} + +// Panic and fatal + +func (p *PackageLogger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Panicln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + panic(s) +} + +func (p *PackageLogger) Fatalf(format string, args ...interface{}) { + p.Logf(CRITICAL, format, args...) + os.Exit(1) +} + +func (p *PackageLogger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +func (p *PackageLogger) Fatalln(args ...interface{}) { + s := fmt.Sprintln(args...) + p.internalLog(calldepth, CRITICAL, s) + os.Exit(1) +} + +// Error Functions + +func (p *PackageLogger) Errorf(format string, args ...interface{}) { + p.Logf(ERROR, format, args...) +} + +func (p *PackageLogger) Error(entries ...interface{}) { + p.internalLog(calldepth, ERROR, entries...) +} + +// Warning Functions + +func (p *PackageLogger) Warningf(format string, args ...interface{}) { + p.Logf(WARNING, format, args...) +} + +func (p *PackageLogger) Warning(entries ...interface{}) { + p.internalLog(calldepth, WARNING, entries...) +} + +// Notice Functions + +func (p *PackageLogger) Noticef(format string, args ...interface{}) { + p.Logf(NOTICE, format, args...) +} + +func (p *PackageLogger) Notice(entries ...interface{}) { + p.internalLog(calldepth, NOTICE, entries...) +} + +// Info Functions + +func (p *PackageLogger) Infof(format string, args ...interface{}) { + p.Logf(INFO, format, args...) +} + +func (p *PackageLogger) Info(entries ...interface{}) { + p.internalLog(calldepth, INFO, entries...) +} + +// Debug Functions + +func (p *PackageLogger) Debugf(format string, args ...interface{}) { + if p.level < DEBUG { + return + } + p.Logf(DEBUG, format, args...) +} + +func (p *PackageLogger) Debug(entries ...interface{}) { + if p.level < DEBUG { + return + } + p.internalLog(calldepth, DEBUG, entries...) +} + +// Trace Functions + +func (p *PackageLogger) Tracef(format string, args ...interface{}) { + if p.level < TRACE { + return + } + p.Logf(TRACE, format, args...) +} + +func (p *PackageLogger) Trace(entries ...interface{}) { + if p.level < TRACE { + return + } + p.internalLog(calldepth, TRACE, entries...) +} + +func (p *PackageLogger) Flush() { + logger.Lock() + defer logger.Unlock() + logger.formatter.Flush() +} diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go new file mode 100644 index 00000000..4be5a1f2 --- /dev/null +++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go @@ -0,0 +1,65 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// +build !windows + +package capnslog + +import ( + "fmt" + "log/syslog" +) + +func NewSyslogFormatter(w *syslog.Writer) Formatter { + return &syslogFormatter{w} +} + +func NewDefaultSyslogFormatter(tag string) (Formatter, error) { + w, err := syslog.New(syslog.LOG_DEBUG, tag) + if err != nil { + return nil, err + } + return NewSyslogFormatter(w), nil +} + +type syslogFormatter struct { + w *syslog.Writer +} + +func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { + for _, entry := range entries { + str := fmt.Sprint(entry) + switch l { + case CRITICAL: + s.w.Crit(str) + case ERROR: + s.w.Err(str) + case WARNING: + s.w.Warning(str) + case NOTICE: + s.w.Notice(str) + case INFO: + s.w.Info(str) + case DEBUG: + s.w.Debug(str) + case TRACE: + s.w.Debug(str) + default: + panic("Unhandled loglevel") + } + } +} + +func (s *syslogFormatter) Flush() { +} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index c8364161..bc52e96f 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -2,7 +2,7 @@ ISC License Copyright (c) 2012-2016 Dave Collins -Permission to use, copy, modify, and distribute this software for any +Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index 8a4a6589..79299478 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -16,7 +16,9 @@ // when the code is not running on Google App Engine, compiled by GopherJS, and // "-tags safe" is not added to the go build command line. The "disableunsafe" // tag is deprecated and thus should not be used. -// +build !js,!appengine,!safe,!disableunsafe +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 package spew @@ -34,80 +36,49 @@ const ( ptrSize = unsafe.Sizeof((*byte)(nil)) ) +type flag uintptr + var ( - // offsetPtr, offsetScalar, and offsetFlag are the offsets for the - // internal reflect.Value fields. These values are valid before golang - // commit ecccf07e7f9d which changed the format. The are also valid - // after commit 82f48826c6c7 which changed the format again to mirror - // the original format. Code in the init function updates these offsets - // as necessary. - offsetPtr = uintptr(ptrSize) - offsetScalar = uintptr(0) - offsetFlag = uintptr(ptrSize * 2) - - // flagKindWidth and flagKindShift indicate various bits that the - // reflect package uses internally to track kind information. - // - // flagRO indicates whether or not the value field of a reflect.Value is - // read-only. - // - // flagIndir indicates whether the value field of a reflect.Value is - // the actual data or a pointer to the data. - // - // These values are valid before golang commit 90a7c3c86944 which - // changed their positions. Code in the init function updates these - // flags as necessary. - flagKindWidth = uintptr(5) - flagKindShift = uintptr(flagKindWidth - 1) - flagRO = uintptr(1 << 0) - flagIndir = uintptr(1 << 1) + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag ) -func init() { - // Older versions of reflect.Value stored small integers directly in the - // ptr field (which is named val in the older versions). Versions - // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named - // scalar for this purpose which unfortunately came before the flag - // field, so the offset of the flag field is different for those - // versions. - // - // This code constructs a new reflect.Value from a known small integer - // and checks if the size of the reflect.Value struct indicates it has - // the scalar field. When it does, the offsets are updated accordingly. - vv := reflect.ValueOf(0xf00) - if unsafe.Sizeof(vv) == (ptrSize * 4) { - offsetScalar = ptrSize * 2 - offsetFlag = ptrSize * 3 - } +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) - // Commit 90a7c3c86944 changed the flag positions such that the low - // order bits are the kind. This code extracts the kind from the flags - // field and ensures it's the correct type. When it's not, the flag - // order has been changed to the newer format, so the flags are updated - // accordingly. - upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) - upfv := *(*uintptr)(upf) - flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { - flagKindShift = 0 - flagRO = 1 << 5 - flagIndir = 1 << 6 - - // Commit adf9b30e5594 modified the flags to separate the - // flagRO flag into two bits which specifies whether or not the - // field is embedded. This causes flagIndir to move over a bit - // and means that flagRO is the combination of either of the - // original flagRO bit and the new bit. - // - // This code detects the change by extracting what used to be - // the indirect bit to ensure it's set. When it's not, the flag - // order has been changed to the newer format, so the flags are - // updated accordingly. - if upfv&flagIndir == 0 { - flagRO = 3 << 5 - flagIndir = 1 << 7 - } +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) } // unsafeReflectValue converts the passed reflect.Value into a one that bypasses @@ -119,34 +90,56 @@ func init() { // This allows us to check for implementations of the Stringer and error // interfaces to be used for pretty printing ordinarily unaddressable and // inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { - indirects := 1 - vt := v.Type() - upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) - rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) - if rvf&flagIndir != 0 { - vt = reflect.PtrTo(v.Type()) - indirects++ - } else if offsetScalar != 0 { - // The value is in the scalar field when it's not one of the - // reference types. - switch vt.Kind() { - case reflect.Uintptr: - case reflect.Chan: - case reflect.Func: - case reflect.Map: - case reflect.Ptr: - case reflect.UnsafePointer: - default: - upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + - offsetScalar) - } +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} - pv := reflect.NewAt(vt, upv) - rv = pv - for i := 0; i < indirects; i++ { - rv = rv.Elem() +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } } - return rv + panic("reflect.Value read-only flag has changed semantics") } diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 00000000..205c28d6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 7c519ff4..1be8ce94 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) { w.Write(closeParenBytes) } -// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' // prefix to Writer w. func printHexPtr(w io.Writer, p uintptr) { // Null pointer. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index df1d582a..f78d89fc 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -35,16 +35,16 @@ var ( // cCharRE is a regular expression that matches a cgo char. // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) // cUnsignedCharRE is a regular expression that matches a cgo unsigned // char. It is used to detect unsigned character arrays to hexdump // them. - cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) // cUint8tCharRE is a regular expression that matches a cgo uint8_t. // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) ) // dumpState contains information about the state of a dump operation. @@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) { // Display dereferenced value. d.w.Write(openParenBytes) switch { - case nilFound == true: + case nilFound: d.w.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: d.w.Write(circularBytes) default: diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index c49875ba..b04edb7d 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) { // Display dereferenced value. switch { - case nilFound == true: + case nilFound: f.fs.Write(nilAngleBytes) - case cycleFound == true: + case cycleFound: f.fs.Write(circularShortBytes) default: diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 00000000..df83a9c2 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 00000000..7fc1f793 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 00000000..d7749077 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,104 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 00000000..63702983 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 00000000..f0228f02 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 00000000..a86dc1a3 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 00000000..f9773812 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 00000000..d19624b7 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 00000000..1c93024a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 00000000..addbe5d4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 00000000..291213c4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 00000000..f04d189d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 00000000..d6901d9a --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 00000000..e4caf1ca --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 00000000..10ee9db8 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 00000000..a5ababf9 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 00000000..ed1f212b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 00000000..d637e086 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/dimchansky/utfbom/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/dimchansky/utfbom/README.md new file mode 100644 index 00000000..8ece2800 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/README.md @@ -0,0 +1,66 @@ +# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) + +The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. + +## Installation + + go get -u github.com/dimchansky/utfbom + +## Example + +```go +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" +) + +func main() { + trySkip([]byte("\xEF\xBB\xBFhello")) + trySkip([]byte("hello")) +} + +func trySkip(byteData []byte) { + fmt.Println("Input:", byteData) + + // just skip BOM + output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM skipping", output) + + // skip BOM and detect encoding + sr, enc := utfbom.Skip(bytes.NewReader(byteData)) + fmt.Printf("Detected encoding: %s\n", enc) + output, err = ioutil.ReadAll(sr) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM detection and skipping", output) + fmt.Println() +} +``` + +Output: + +``` +$ go run main.go +Input: [239 187 191 104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: UTF8 +ReadAll with BOM detection and skipping [104 101 108 108 111] + +Input: [104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: Unknown +ReadAll with BOM detection and skipping [104 101 108 108 111] +``` + + diff --git a/vendor/github.com/dimchansky/utfbom/go.mod b/vendor/github.com/dimchansky/utfbom/go.mod new file mode 100644 index 00000000..4b9ecc6f --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/go.mod @@ -0,0 +1 @@ +module github.com/dimchansky/utfbom \ No newline at end of file diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go new file mode 100644 index 00000000..77a303e5 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/utfbom.go @@ -0,0 +1,192 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. +func (e Encoding) String() string { + switch e { + case UTF8: + return "UTF8" + case UTF16BigEndian: + return "UTF16BigEndian" + case UTF16LittleEndian: + return "UTF16LittleEndian" + case UTF32BigEndian: + return "UTF32BigEndian" + case UTF32LittleEndian: + return "UTF32LittleEndian" + default: + return "Unknown" + } +} + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && err != io.EOF) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { + if n, err = rd.Read(bom[len(buf):]); n < 0 { + panic(errNegativeRead) + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/github.com/dylanmei/iso8601/LICENSE b/vendor/github.com/dylanmei/iso8601/LICENSE new file mode 100644 index 00000000..dcabcdc7 --- /dev/null +++ b/vendor/github.com/dylanmei/iso8601/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dylan Meissner + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/dylanmei/iso8601/README.md b/vendor/github.com/dylanmei/iso8601/README.md new file mode 100644 index 00000000..c93b3cf7 --- /dev/null +++ b/vendor/github.com/dylanmei/iso8601/README.md @@ -0,0 +1,9 @@ + +iso 8601 parser and formatter +============================= + +An [ISO8601](https://en.wikipedia.org/wiki/ISO_8601) Go utility. + +- *Time* is not yet implemented +- *Duration* is mostly implemented + diff --git a/vendor/github.com/dylanmei/iso8601/duration.go b/vendor/github.com/dylanmei/iso8601/duration.go new file mode 100644 index 00000000..d5cab17d --- /dev/null +++ b/vendor/github.com/dylanmei/iso8601/duration.go @@ -0,0 +1,96 @@ +package iso8601 + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "time" +) + +var ( + // ErrBadFormat is returned when parsing fails + ErrBadFormat = errors.New("bad format string") + + // ErrNoMonth is raised when a month is in the format string + ErrNoMonth = errors.New("no months allowed") + + full = regexp.MustCompile(`P((?P\d+)Y)?((?P\d+)M)?((?P\d+)D)?(T((?P\d+)H)?((?P\d+)M)?((?P\d+)S)?)?`) + week = regexp.MustCompile(`P((?P\d+)W)`) +) + +// adapted from https://github.com/BrianHicks/finch/duration +func ParseDuration(value string) (time.Duration, error) { + var match []string + var regex *regexp.Regexp + + if week.MatchString(value) { + match = week.FindStringSubmatch(value) + regex = week + } else if full.MatchString(value) { + match = full.FindStringSubmatch(value) + regex = full + } else { + return time.Duration(0), ErrBadFormat + } + + d := time.Duration(0) + day := time.Hour * 24 + week := day * 7 + year := day * 365 + + for i, name := range regex.SubexpNames() { + part := match[i] + if i == 0 || name == "" || part == "" { + continue + } + + value, err := strconv.Atoi(part) + if err != nil { + return time.Duration(0), err + } + switch name { + case "year": + d += year * time.Duration(value) + case "month": + return time.Duration(0), ErrNoMonth + case "week": + d += week * time.Duration(value) + case "day": + d += day * time.Duration(value) + case "hour": + d += time.Hour * time.Duration(value) + case "minute": + d += time.Minute * time.Duration(value) + case "second": + d += time.Second * time.Duration(value) + } + } + + return d, nil +} + +func FormatDuration(duration time.Duration) string { + // we're not doing negative durations + if duration.Seconds() <= 0 { + return "PT0S" + } + + hours := int(duration.Hours()) + minutes := int(duration.Minutes()) - (hours * 60) + seconds := int(duration.Seconds()) - (hours*3600 + minutes*60) + + // we're not doing Y,M,W + s := "PT" + if hours > 0 { + s = fmt.Sprintf("%s%dH", s, hours) + } + if minutes > 0 { + s = fmt.Sprintf("%s%dM", s, minutes) + } + if seconds > 0 { + s = fmt.Sprintf("%s%dS", s, seconds) + } + + return s +} diff --git a/vendor/github.com/fatih/color/Gopkg.lock b/vendor/github.com/fatih/color/Gopkg.lock new file mode 100644 index 00000000..7d879e9c --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/fatih/color/Gopkg.toml b/vendor/github.com/fatih/color/Gopkg.toml new file mode 100644 index 00000000..ff1617f7 --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.toml @@ -0,0 +1,30 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/mattn/go-colorable" + version = "0.0.9" + +[[constraint]] + name = "github.com/mattn/go-isatty" + version = "0.0.3" diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 00000000..25fdaf63 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 00000000..affe322c --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,185 @@ +# Archived project. No maintenance. + +This project is not maintained anymore and is archived. Feel free to fork and +make your own changes if needed. For more detail read my blog post: [Taking an indefinite sabbatical from my projects](https://arslan.io/2018/10/09/taking-an-indefinite-sabbatical-from-my-projects/) + +Thanks to everyone for their valuable feedback and contributions. + + +# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + + +![Color](https://i.imgur.com/c1JI0lA.png) + + +## Install + +```bash +go get github.com/fatih/color +``` + +Note that the `vendor` folder is here for stability. Remove the folder if you +already have the dependencies in your GOPATH. + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 00000000..91c8e9f0 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 00000000..cf1e9650 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE new file mode 100644 index 00000000..926d5498 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013-2018 by Maxim Bublis + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md new file mode 100644 index 00000000..efc3204f --- /dev/null +++ b/vendor/github.com/gofrs/uuid/README.md @@ -0,0 +1,109 @@ +# UUID + +[![License](https://img.shields.io/github/license/gofrs/uuid.svg)](https://github.com/gofrs/uuid/blob/master/LICENSE) +[![Build Status](https://travis-ci.org/gofrs/uuid.svg?branch=master)](https://travis-ci.org/gofrs/uuid) +[![GoDoc](http://godoc.org/github.com/gofrs/uuid?status.svg)](http://godoc.org/github.com/gofrs/uuid) +[![Coverage Status](https://codecov.io/gh/gofrs/uuid/branch/master/graphs/badge.svg?branch=master)](https://codecov.io/gh/gofrs/uuid/) +[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/uuid)](https://goreportcard.com/report/github.com/gofrs/uuid) + +Package uuid provides a pure Go implementation of Universally Unique Identifiers +(UUID) variant as defined in RFC-4122. This package supports both the creation +and parsing of UUIDs in different formats. + +This package supports the following UUID versions: +* Version 1, based on timestamp and MAC address (RFC-4122) +* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1) +* Version 3, based on MD5 hashing of a named value (RFC-4122) +* Version 4, based on random numbers (RFC-4122) +* Version 5, based on SHA-1 hashing of a named value (RFC-4122) + +## Project History + +This project was originally forked from the +[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after +it appeared to be no longer maintained, while exhibiting [critical +flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take +over this project to ensure it receives regular maintenance for the benefit of +the larger Go community. + +We'd like to thank Maxim Bublis for his hard work on the original iteration of +the package. + +## License + +This source code of this package is released under the MIT License. Please see +the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full +content of the license. + +## Recommended Package Version + +We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were +created before our fork of the original package and have some known +deficiencies. + +## Installation + +It is recommended to use a package manager like `dep` that understands tagged +releases of a package, as well as semantic versioning. + +If you are unable to make use of a dependency manager with your project, you can +use the `go get` command to download it directly: + +```Shell +$ go get github.com/gofrs/uuid +``` + +## Requirements + +Due to subtests not being supported in older versions of Go, this package is +only regularly tested against Go 1.7+. This package may work perfectly fine with +Go 1.2+, but support for these older versions is not actively maintained. + +## Go 1.11 Modules + +As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path. + +An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0. + +Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details. + +## Usage + +Here is a quick overview of how to use this package. For more detailed +documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid). + +```go +package main + +import ( + "log" + + "github.com/gofrs/uuid" +) + +// Create a Version 4 UUID, panicking on error. +// Use this form to initialize package-level variables. +var u1 = uuid.Must(uuid.NewV4()) + +func main() { + // Create a Version 4 UUID. + u2, err := uuid.NewV4() + if err != nil { + log.Fatalf("failed to generate UUID: %v", err) + } + log.Printf("generated Version 4 UUID %v", u2) + + // Parse a UUID from a string. + s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + u3, err := uuid.FromString(s) + if err != nil { + log.Fatalf("failed to parse UUID %q: %v", s, err) + } + log.Printf("successfully parsed UUID %v", u3) +} +``` + +## References + +* [RFC-4122](https://tools.ietf.org/html/rfc4122) +* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01) diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go new file mode 100644 index 00000000..e3014c68 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/codec.go @@ -0,0 +1,212 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns a UUID generated from the raw byte slice input. +// It will return an error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (UUID, error) { + u := UUID{} + err := u.UnmarshalBinary(input) + return u, err +} + +// FromBytesOrNil returns a UUID generated from the raw byte slice input. +// Same behavior as FromBytes(), but returns uuid.Nil instead of an error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns a UUID parsed from the input string. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (UUID, error) { + u := UUID{} + err := u.UnmarshalText([]byte(input)) + return u, err +} + +// FromStringOrNil returns a UUID parsed from the input string. +// Same behavior as FromString(), but returns uuid.Nil instead of an error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by the String() method. +func (u UUID) MarshalText() ([]byte, error) { + return []byte(u.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" +// +// ABNF for supported UUID text representation follows: +// +// URN := 'urn' +// UUID-NID := 'uuid' +// +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct +// +// hashlike := 12hexoct +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// +// plain := canonical | hashlike +// uuid := canonical | hashlike | braced | urn +// +// braced := '{' plain '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' plain +// +func (u *UUID) UnmarshalText(text []byte) error { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 34, 38: + return u.decodeBraced(text) + case 36: + return u.decodeCanonical(text) + case 41, 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(text), text) + } +} + +// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3): +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) error { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + src := t + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return err + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return nil +} + +// decodeHashLike decodes UUID strings that are using the following format: +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) error { + src := t[:] + dst := u[:] + + _, err := hex.Decode(dst, src) + return err +} + +// decodeBraced decodes UUID strings that are using the following formats: +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) error { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID strings that are using the following formats: +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) error { + total := len(t) + + urnUUIDPrefix := t[:9] + + if !bytes.Equal(urnUUIDPrefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format in string %q", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID strings that are using the following formats: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) error { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(t), t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() ([]byte, error) { + return u.Bytes(), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return an error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) error { + if len(data) != Size { + return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + } + copy(u[:], data) + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go new file mode 100644 index 00000000..afaefbc8 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/fuzz.go @@ -0,0 +1,47 @@ +// Copyright (c) 2018 Andrei Tudor Călin +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// +build gofuzz + +package uuid + +// Fuzz implements a simple fuzz test for FromString / UnmarshalText. +// +// To run: +// +// $ go get github.com/dvyukov/go-fuzz/... +// $ cd $GOPATH/src/github.com/gofrs/uuid +// $ go-fuzz-build github.com/gofrs/uuid +// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata +// +// If you make significant changes to FromString / UnmarshalText and add +// new cases to fromStringTests (in codec_test.go), please run +// +// $ go test -seed_fuzz_corpus +// +// to seed the corpus with the new interesting inputs, then run the fuzzer. +func Fuzz(data []byte) int { + _, err := FromString(string(data)) + if err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go new file mode 100644 index 00000000..4257761f --- /dev/null +++ b/vendor/github.com/gofrs/uuid/generator.go @@ -0,0 +1,299 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "fmt" + "hash" + "io" + "net" + "os" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +type epochFunc func() time.Time + +// HWAddrFunc is the function type used to provide hardware (MAC) addresses. +type HWAddrFunc func() (net.HardwareAddr, error) + +// DefaultGenerator is the default UUID Generator used by this package. +var DefaultGenerator Generator = NewGen() + +var ( + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func NewV1() (UUID, error) { + return DefaultGenerator.NewV1() +} + +// NewV2 returns a DCE Security UUID based on the POSIX UID/GID. +func NewV2(domain byte) (UUID, error) { + return DefaultGenerator.NewV2(domain) +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return DefaultGenerator.NewV3(ns, name) +} + +// NewV4 returns a randomly generated UUID. +func NewV4() (UUID, error) { + return DefaultGenerator.NewV4() +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return DefaultGenerator.NewV5(ns, name) +} + +// Generator provides an interface for generating UUIDs. +type Generator interface { + NewV1() (UUID, error) + NewV2(domain byte) (UUID, error) + NewV3(ns UUID, name string) UUID + NewV4() (UUID, error) + NewV5(ns UUID, name string) UUID +} + +// Gen is a reference UUID generator based on the specifications laid out in +// RFC-4122 and DCE 1.1: Authentication and Security Services. This type +// satisfies the Generator interface as defined in this package. +// +// For consumers who are generating V1 UUIDs, but don't want to expose the MAC +// address of the node generating the UUIDs, the NewGenWithHWAF() function has been +// provided as a convenience. See the function's documentation for more info. +// +// The authors of this package do not feel that the majority of users will need +// to obfuscate their MAC address, and so we recommend using NewGen() to create +// a new generator. +type Gen struct { + clockSequenceOnce sync.Once + hardwareAddrOnce sync.Once + storageMutex sync.Mutex + + rand io.Reader + + epochFunc epochFunc + hwAddrFunc HWAddrFunc + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte +} + +// interface check -- build will fail if *Gen doesn't satisfy Generator +var _ Generator = (*Gen)(nil) + +// NewGen returns a new instance of Gen with some default values set. Most +// people should use this. +func NewGen() *Gen { + return NewGenWithHWAF(defaultHWAddrFunc) +} + +// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most +// consumers should use NewGen() instead. +// +// This is used so that consumers can generate their own MAC addresses, for use +// in the generated UUIDs, if there is some concern about exposing the physical +// address of the machine generating the UUID. +// +// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC +// address for all the future UUIDs generated by it. If you'd like to switch the +// MAC address being used, you'll need to create a new generator using this +// function. +func NewGenWithHWAF(hwaf HWAddrFunc) *Gen { + return &Gen{ + epochFunc: time.Now, + hwAddrFunc: hwaf, + rand: rand.Reader, + } +} + +// NewV1 returns a UUID based on the current timestamp and MAC address. +func (g *Gen) NewV1() (UUID, error) { + u := UUID{} + + timeNow, clockSeq, err := g.getClockSequence() + if err != nil { + return Nil, err + } + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + hardwareAddr, err := g.getHardwareAddr() + if err != nil { + return Nil, err + } + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV2 returns a DCE Security UUID based on the POSIX UID/GID. +func (g *Gen) NewV2(domain byte) (UUID, error) { + u, err := g.NewV1() + if err != nil { + return Nil, err + } + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[:], posixGID) + } + + u[9] = domain + + u.SetVersion(V2) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name. +func (g *Gen) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns a randomly generated UUID. +func (g *Gen) NewV4() (UUID, error) { + u := UUID{} + if _, err := io.ReadFull(g.rand, u[:]); err != nil { + return Nil, err + } + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u, nil +} + +// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name. +func (g *Gen) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +// Returns the epoch and clock sequence. +func (g *Gen) getClockSequence() (uint64, uint16, error) { + var err error + g.clockSequenceOnce.Do(func() { + buf := make([]byte, 2) + if _, err = io.ReadFull(g.rand, buf); err != nil { + return + } + g.clockSequence = binary.BigEndian.Uint16(buf) + }) + if err != nil { + return 0, 0, err + } + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := g.getEpoch() + // Clock didn't change since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, nil +} + +// Returns the hardware address. +func (g *Gen) getHardwareAddr() ([]byte, error) { + var err error + g.hardwareAddrOnce.Do(func() { + var hwAddr net.HardwareAddr + if hwAddr, err = g.hwAddrFunc(); err == nil { + copy(g.hardwareAddr[:], hwAddr) + return + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence. + if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil { + return + } + // Set multicast bit as recommended by RFC-4122 + g.hardwareAddr[0] |= 0x01 + }) + if err != nil { + return []byte{}, err + } + return g.hardwareAddr[:], nil +} + +// Returns the difference between UUID epoch (October 15, 1582) +// and current time in 100-nanosecond intervals. +func (g *Gen) getEpoch() uint64 { + return epochStart + uint64(g.epochFunc().UnixNano()/100) +} + +// Returns the UUID based on the hashing of the namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} + +// Returns the hardware address. +func defaultHWAddrFunc() (net.HardwareAddr, error) { + ifaces, err := net.Interfaces() + if err != nil { + return []byte{}, err + } + for _, iface := range ifaces { + if len(iface.HardwareAddr) >= 6 { + return iface.HardwareAddr, nil + } + } + return []byte{}, fmt.Errorf("uuid: no HW address found") +} diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go new file mode 100644 index 00000000..6f254a4f --- /dev/null +++ b/vendor/github.com/gofrs/uuid/sql.go @@ -0,0 +1,109 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice will be handled by UnmarshalBinary, while +// a longer byte slice or a string will be handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case UUID: // support gorm convert from UUID to NullUUID + *u = src + return nil + + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database. +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} + +// MarshalJSON marshals the NullUUID as null or the nested UUID +func (u NullUUID) MarshalJSON() ([]byte, error) { + if !u.Valid { + return json.Marshal(nil) + } + + return json.Marshal(u.UUID) +} + +// UnmarshalJSON unmarshals a NullUUID +func (u *NullUUID) UnmarshalJSON(b []byte) error { + if bytes.Equal(b, []byte("null")) { + u.UUID, u.Valid = Nil, false + return nil + } + + if err := json.Unmarshal(b, &u.UUID); err != nil { + return err + } + + u.Valid = true + + return nil +} diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go new file mode 100644 index 00000000..9c4547f1 --- /dev/null +++ b/vendor/github.com/gofrs/uuid/uuid.go @@ -0,0 +1,250 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +// Package uuid provides implementations of the Universally Unique Identifier (UUID), as specified in RFC-4122 and DCE 1.1. +// +// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. +// +// DCE 1.1[2] provides the specification for version 2. +// +// [1] https://tools.ietf.org/html/rfc4122 +// [2] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01 +package uuid + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "strings" + "time" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID is an array type to represent the value of a UUID, as defined in RFC-4122. +type UUID [Size]byte + +// UUID versions. +const ( + _ byte = iota + V1 // Version 1 (date-time and MAC address) + V2 // Version 2 (date-time and MAC address, DCE security version) + V3 // Version 3 (namespace name-based) + V4 // Version 4 (random) + V5 // Version 5 (namespace name-based) +) + +// UUID layout variants. +const ( + VariantNCS byte = iota + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// UUID DCE domains. +const ( + DomainPerson = iota + DomainGroup + DomainOrg +) + +// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00, +// 15 October 1582 within a V1 UUID. This type has no meaning for V2-V5 +// UUIDs since they don't have an embedded timestamp. +type Timestamp uint64 + +const _100nsPerSecond = 10000000 + +// Time returns the UTC time.Time representation of a Timestamp +func (t Timestamp) Time() (time.Time, error) { + secs := uint64(t) / _100nsPerSecond + nsecs := 100 * (uint64(t) % _100nsPerSecond) + return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil +} + +// TimestampFromV1 returns the Timestamp embedded within a V1 UUID. +// Returns an error if the UUID is any version other than 1. +func TimestampFromV1(u UUID) (Timestamp, error) { + if u.Version() != 1 { + err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version()) + return 0, err + } + low := binary.BigEndian.Uint32(u[0:4]) + mid := binary.BigEndian.Uint16(u[4:6]) + hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff + return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil +} + +// String parse helpers. +var ( + urnPrefix = []byte("urn:uuid:") + byteGroups = []int{8, 4, 4, 4, 12} +) + +// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to +// zero. +var Nil = UUID{} + +// Predefined namespace UUIDs. +var ( + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) +) + +// Version returns the algorithm version used to generate the UUID. +func (u UUID) Version() byte { + return u[6] >> 4 +} + +// Variant returns the UUID layout variant. +func (u UUID) Variant() byte { + switch { + case (u[8] >> 7) == 0x00: + return VariantNCS + case (u[8] >> 6) == 0x02: + return VariantRFC4122 + case (u[8] >> 5) == 0x06: + return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture + } +} + +// Bytes returns a byte slice representation of the UUID. +func (u UUID) Bytes() []byte { + return u[:] +} + +// String returns a canonical RFC-4122 string representation of the UUID: +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx. +func (u UUID) String() string { + buf := make([]byte, 36) + + hex.Encode(buf[0:8], u[0:4]) + buf[8] = '-' + hex.Encode(buf[9:13], u[4:6]) + buf[13] = '-' + hex.Encode(buf[14:18], u[6:8]) + buf[18] = '-' + hex.Encode(buf[19:23], u[8:10]) + buf[23] = '-' + hex.Encode(buf[24:], u[10:]) + + return string(buf) +} + +// Format implements fmt.Formatter for UUID values. +// +// The behavior is as follows: +// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'. +// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation. +// The 'S' verb returns the RFC-4122 format, but with capital hex digits. +// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer. +// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return +// "%!verb(uuid.UUID=value)" as recommended by the fmt package. +func (u UUID) Format(f fmt.State, c rune) { + switch c { + case 'x', 'X': + s := hex.EncodeToString(u.Bytes()) + if c == 'X' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'v': + var s string + if f.Flag('#') { + s = fmt.Sprintf("%#v", [Size]byte(u)) + } else { + s = u.String() + } + _, _ = io.WriteString(f, s) + case 's', 'S': + s := u.String() + if c == 'S' { + s = strings.Map(toCapitalHexDigits, s) + } + _, _ = io.WriteString(f, s) + case 'q': + _, _ = io.WriteString(f, `"`+u.String()+`"`) + default: + // invalid/unsupported format verb + fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String()) + } +} + +func toCapitalHexDigits(ch rune) rune { + // convert a-f hex digits to A-F + switch ch { + case 'a': + return 'A' + case 'b': + return 'B' + case 'c': + return 'C' + case 'd': + return 'D' + case 'e': + return 'E' + case 'f': + return 'F' + default: + return ch + } +} + +// SetVersion sets the version bits. +func (u *UUID) SetVersion(v byte) { + u[6] = (u[6] & 0x0f) | (v << 4) +} + +// SetVariant sets the variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + } +} + +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +func Must(u UUID, err error) UUID { + if err != nil { + panic(err) + } + return u +} diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 00000000..f57de90d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 00000000..0b4659b7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 00000000..081c86fa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 00000000..e352808b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 00000000..f6502e4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 00000000..b80c8565 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 00000000..390d4e4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 00000000..00d65f32 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 00000000..a26b046d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 00000000..24552483 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 00000000..63b0f08b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 00000000..35b882c0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 00000000..fe1bd7d9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 00000000..93464c91 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 00000000..e748e173 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 00000000..3abfed2c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 00000000..0f5fb173 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 00000000..d4db5a1c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 00000000..341c6f57 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,605 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 00000000..53ebd8cc --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,368 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 00000000..d17f8020 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,967 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 00000000..b3aa3919 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 00000000..f48a7567 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000..b6cad908 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 00000000..7ffd3c29 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000..d55a335d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 00000000..aca8eed0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 00000000..c9e5fa02 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,599 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 00000000..40ea3dd9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 00000000..5a5fd93f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 00000000..9b1538d0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,3006 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 00000000..997f57c1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 00000000..f520106e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000..bb2622f2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2245 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 00000000..00d6c7ad --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 00000000..0407ba85 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 00000000..1d6c6aa0 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 00000000..1ce0be2f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1018 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 00000000..9324f654 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 00000000..38439fa9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 00000000..b175d1b6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 00000000..c1cf7bf8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 00000000..3496dc99 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000..a85bf198 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 00000000..cacfa392 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 00000000..165b2110 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 00000000..e0846a35 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 00000000..ceadde6a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 00000000..0f646931 --- /dev/null +++ b/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go new file mode 100644 index 00000000..e9cc2025 --- /dev/null +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1284 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { + ns = -ns + if s == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + var err error + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(wkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 00000000..3cd3249f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,253 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 00000000..63b0f08b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 00000000..35b882c0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 00000000..dea2617c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 00000000..3abfed2c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 00000000..f9b6e41b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,301 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 00000000..fa88add3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,607 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. + value interface{} + + // enc is the raw bytes for the extension field. + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return extensionAsLegacyType(e.value), nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = extensionAsStorageType(v) + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return extensionAsLegacyType(e.value), nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 00000000..fdd328bb --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,965 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 00000000..f48a7567 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000..94fa9194 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,360 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + if deref { + u = u.Elem() + } + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000..dbfffe07 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,313 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + } + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 00000000..a4b8c0cd --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,544 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + switch t1 := typ; t1.Kind() { + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + + case reflect.Slice: + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() + } + + case reflect.Map: + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 00000000..5cb11fa9 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2776 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } + sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + deref: deref, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 00000000..5525def6 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000..acee2fc5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2053 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 00000000..1aaee725 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,843 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 00000000..bb55a3af --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,880 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go new file mode 100644 index 00000000..1ded05bb --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -0,0 +1,2887 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{0} +} + +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{1} +} + +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2} +} + +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{4} +} + +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{5} +} + +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{6} +} + +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{6, 0} +} + +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{7} +} + +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{8} +} + +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{9} +} + +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{18} +} + +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{19} +} + +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{19, 0} +} + +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{20} +} + +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_e5baabe45344a177, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) } + +var fileDescriptor_e5baabe45344a177 = []byte{ + // 2589 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, + 0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca, + 0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee, + 0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca, + 0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80, + 0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c, + 0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04, + 0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a, + 0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0, + 0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52, + 0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90, + 0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88, + 0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd, + 0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c, + 0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6, + 0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf, + 0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79, + 0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11, + 0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53, + 0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84, + 0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4, + 0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e, + 0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9, + 0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2, + 0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02, + 0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6, + 0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d, + 0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33, + 0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79, + 0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a, + 0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a, + 0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c, + 0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56, + 0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06, + 0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1, + 0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23, + 0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01, + 0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f, + 0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d, + 0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58, + 0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32, + 0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e, + 0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb, + 0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11, + 0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02, + 0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f, + 0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31, + 0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac, + 0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f, + 0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d, + 0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac, + 0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e, + 0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72, + 0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b, + 0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2, + 0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e, + 0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94, + 0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a, + 0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61, + 0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0, + 0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5, + 0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a, + 0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a, + 0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8, + 0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64, + 0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c, + 0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8, + 0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb, + 0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2, + 0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33, + 0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4, + 0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15, + 0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39, + 0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9, + 0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41, + 0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40, + 0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35, + 0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0, + 0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4, + 0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53, + 0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e, + 0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d, + 0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e, + 0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f, + 0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36, + 0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60, + 0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1, + 0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d, + 0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45, + 0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58, + 0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3, + 0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c, + 0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87, + 0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49, + 0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26, + 0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39, + 0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c, + 0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77, + 0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83, + 0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87, + 0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20, + 0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6, + 0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e, + 0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c, + 0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0, + 0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8, + 0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0, + 0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3, + 0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1, + 0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53, + 0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42, + 0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b, + 0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15, + 0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae, + 0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2, + 0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35, + 0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0, + 0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82, + 0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a, + 0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c, + 0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5, + 0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29, + 0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca, + 0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd, + 0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18, + 0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb, + 0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae, + 0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6, + 0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a, + 0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47, + 0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8, + 0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e, + 0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0, + 0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70, + 0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41, + 0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e, + 0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c, + 0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47, + 0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2, + 0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66, + 0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2, + 0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0, + 0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9, + 0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40, + 0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c, + 0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe, + 0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d, + 0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99, + 0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69, + 0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2, + 0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda, + 0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86, + 0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24, + 0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8, + 0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25, + 0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e, + 0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee, + 0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39, + 0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f, + 0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto new file mode 100644 index 00000000..ed08fcbc --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -0,0 +1,883 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// Based on original Protocol Buffers design by +// Sanjay Ghemawat, Jeff Dean, and others. +// +// The messages in this file describe the definitions found in .proto files. +// A valid .proto file can be translated directly to a FileDescriptorProto +// without any other information (e.g. without reading its imports). + + +syntax = "proto2"; + +package google.protobuf; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DescriptorProtos"; +option csharp_namespace = "Google.Protobuf.Reflection"; +option objc_class_prefix = "GPB"; +option cc_enable_arenas = true; + +// descriptor.proto must be optimized for speed because reflection-based +// algorithms don't work during bootstrapping. +option optimize_for = SPEED; + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +message FileDescriptorSet { + repeated FileDescriptorProto file = 1; +} + +// Describes a complete .proto file. +message FileDescriptorProto { + optional string name = 1; // file name, relative to root of source tree + optional string package = 2; // e.g. "foo", "foo.bar", etc. + + // Names of files imported by this file. + repeated string dependency = 3; + // Indexes of the public imported files in the dependency list above. + repeated int32 public_dependency = 10; + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + repeated int32 weak_dependency = 11; + + // All top-level definitions in this file. + repeated DescriptorProto message_type = 4; + repeated EnumDescriptorProto enum_type = 5; + repeated ServiceDescriptorProto service = 6; + repeated FieldDescriptorProto extension = 7; + + optional FileOptions options = 8; + + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + optional SourceCodeInfo source_code_info = 9; + + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + optional string syntax = 12; +} + +// Describes a message type. +message DescriptorProto { + optional string name = 1; + + repeated FieldDescriptorProto field = 2; + repeated FieldDescriptorProto extension = 6; + + repeated DescriptorProto nested_type = 3; + repeated EnumDescriptorProto enum_type = 4; + + message ExtensionRange { + optional int32 start = 1; + optional int32 end = 2; + + optional ExtensionRangeOptions options = 3; + } + repeated ExtensionRange extension_range = 5; + + repeated OneofDescriptorProto oneof_decl = 8; + + optional MessageOptions options = 7; + + // Range of reserved tag numbers. Reserved tag numbers may not be used by + // fields or extension ranges in the same message. Reserved ranges may + // not overlap. + message ReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Exclusive. + } + repeated ReservedRange reserved_range = 9; + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + repeated string reserved_name = 10; +} + +message ExtensionRangeOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +// Describes a field within a message. +message FieldDescriptorProto { + enum Type { + // 0 is reserved for errors. + // Order is weird for historical reasons. + TYPE_DOUBLE = 1; + TYPE_FLOAT = 2; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + TYPE_INT64 = 3; + TYPE_UINT64 = 4; + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + TYPE_INT32 = 5; + TYPE_FIXED64 = 6; + TYPE_FIXED32 = 7; + TYPE_BOOL = 8; + TYPE_STRING = 9; + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; + TYPE_MESSAGE = 11; // Length-delimited aggregate. + + // New in version 2. + TYPE_BYTES = 12; + TYPE_UINT32 = 13; + TYPE_ENUM = 14; + TYPE_SFIXED32 = 15; + TYPE_SFIXED64 = 16; + TYPE_SINT32 = 17; // Uses ZigZag encoding. + TYPE_SINT64 = 18; // Uses ZigZag encoding. + }; + + enum Label { + // 0 is reserved for errors + LABEL_OPTIONAL = 1; + LABEL_REQUIRED = 2; + LABEL_REPEATED = 3; + }; + + optional string name = 1; + optional int32 number = 3; + optional Label label = 4; + + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + optional Type type = 5; + + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + optional string type_name = 6; + + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + optional string extendee = 2; + + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + optional string default_value = 7; + + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + optional int32 oneof_index = 9; + + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + optional string json_name = 10; + + optional FieldOptions options = 8; +} + +// Describes a oneof. +message OneofDescriptorProto { + optional string name = 1; + optional OneofOptions options = 2; +} + +// Describes an enum type. +message EnumDescriptorProto { + optional string name = 1; + + repeated EnumValueDescriptorProto value = 2; + + optional EnumOptions options = 3; + + // Range of reserved numeric values. Reserved values may not be used by + // entries in the same enum. Reserved ranges may not overlap. + // + // Note that this is distinct from DescriptorProto.ReservedRange in that it + // is inclusive such that it can appropriately represent the entire int32 + // domain. + message EnumReservedRange { + optional int32 start = 1; // Inclusive. + optional int32 end = 2; // Inclusive. + } + + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + repeated EnumReservedRange reserved_range = 4; + + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + repeated string reserved_name = 5; +} + +// Describes a value within an enum. +message EnumValueDescriptorProto { + optional string name = 1; + optional int32 number = 2; + + optional EnumValueOptions options = 3; +} + +// Describes a service. +message ServiceDescriptorProto { + optional string name = 1; + repeated MethodDescriptorProto method = 2; + + optional ServiceOptions options = 3; +} + +// Describes a method of a service. +message MethodDescriptorProto { + optional string name = 1; + + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + optional string input_type = 2; + optional string output_type = 3; + + optional MethodOptions options = 4; + + // Identifies if client streams multiple client messages + optional bool client_streaming = 5 [default=false]; + // Identifies if server streams multiple server messages + optional bool server_streaming = 6 [default=false]; +} + + +// =================================================================== +// Options + +// Each of the definitions above may have "options" attached. These are +// just annotations which may cause code to be generated slightly differently +// or may contain hints for code that manipulates protocol messages. +// +// Clients may define custom options as extensions of the *Options messages. +// These extensions may not yet be known at parsing time, so the parser cannot +// store the values in them. Instead it stores them in a field in the *Options +// message called uninterpreted_option. This field must have the same name +// across all *Options messages. We then use this field to populate the +// extensions when we build a descriptor, at which point all protos have been +// parsed and so all extensions are known. +// +// Extension numbers for custom options may be chosen as follows: +// * For options which will only be used within a single application or +// organization, or for experimental options, use field numbers 50000 +// through 99999. It is up to you to ensure that you do not use the +// same number for multiple options. +// * For options which will be published and used publicly by multiple +// independent entities, e-mail protobuf-global-extension-registry@google.com +// to reserve extension numbers. Simply provide your project name (e.g. +// Objective-C plugin) and your project website (if available) -- there's no +// need to explain how you intend to use them. Usually you only need one +// extension number. You can declare multiple options with only one extension +// number by putting them in a sub-message. See the Custom Options section of +// the docs for examples: +// https://developers.google.com/protocol-buffers/docs/proto#options +// If this turns out to be popular, a web service will be set up +// to automatically assign option numbers. + + +message FileOptions { + + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + optional string java_package = 1; + + + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + optional string java_outer_classname = 8; + + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + optional bool java_multiple_files = 10 [default=false]; + + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; + + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + optional bool java_string_check_utf8 = 27 [default=false]; + + + // Generated classes can be optimized for speed or code size. + enum OptimizeMode { + SPEED = 1; // Generate complete code for parsing, serialization, + // etc. + CODE_SIZE = 2; // Use ReflectionOps to implement these methods. + LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. + } + optional OptimizeMode optimize_for = 9 [default=SPEED]; + + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + optional string go_package = 11; + + + + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + optional bool cc_generic_services = 16 [default=false]; + optional bool java_generic_services = 17 [default=false]; + optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 42 [default=false]; + + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + optional bool deprecated = 23 [default=false]; + + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + optional bool cc_enable_arenas = 31 [default=false]; + + + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + optional string objc_class_prefix = 36; + + // Namespace for generated classes; defaults to the package. + optional string csharp_namespace = 37; + + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + + + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + optional string php_metadata_namespace = 44; + + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + optional string ruby_package = 45; + + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. + // See the documentation for the "Options" section above. + extensions 1000 to max; + + reserved 38; +} + +message MessageOptions { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + optional bool message_set_wire_format = 1 [default=false]; + + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + optional bool no_standard_descriptor_accessor = 2 [default=false]; + + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + optional bool deprecated = 3 [default=false]; + + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + optional bool map_entry = 7; + + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message FieldOptions { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + optional CType ctype = 1 [default = STRING]; + enum CType { + // Default mode. + STRING = 0; + + CORD = 1; + + STRING_PIECE = 2; + } + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + optional bool packed = 2; + + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + optional JSType jstype = 6 [default = JS_NORMAL]; + enum JSType { + // Use the default type. + JS_NORMAL = 0; + + // Use JavaScript strings. + JS_STRING = 1; + + // Use JavaScript numbers. + JS_NUMBER = 2; + } + + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + optional bool lazy = 5 [default=false]; + + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + optional bool deprecated = 3 [default=false]; + + // For Google-internal migration only. Do not use. + optional bool weak = 10 [default=false]; + + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; + + reserved 4; // removed jtype +} + +message OneofOptions { + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumOptions { + + // Set this option to true to allow mapping different tag names to the same + // value. + optional bool allow_alias = 2; + + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + optional bool deprecated = 3 [default=false]; + + reserved 5; // javanano_as_lite + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message EnumValueOptions { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + optional bool deprecated = 1 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message ServiceOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + optional bool deprecated = 33 [default=false]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + +message MethodOptions { + + // Note: Field numbers 1 through 32 are reserved for Google's internal RPC + // framework. We apologize for hoarding these numbers to ourselves, but + // we were already using them long before we decided to release Protocol + // Buffers. + + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + optional bool deprecated = 33 [default=false]; + + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + + // The parser stores options it doesn't recognize here. See above. + repeated UninterpretedOption uninterpreted_option = 999; + + // Clients can define custom options in extensions of this message. See above. + extensions 1000 to max; +} + + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +message UninterpretedOption { + // The name of the uninterpreted option. Each string represents a segment in + // a dot-separated name. is_extension is true iff a segment represents an + // extension (denoted with parentheses in options specs in .proto files). + // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents + // "foo.(bar.baz).qux". + message NamePart { + required string name_part = 1; + required bool is_extension = 2; + } + repeated NamePart name = 2; + + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + optional string identifier_value = 3; + optional uint64 positive_int_value = 4; + optional int64 negative_int_value = 5; + optional double double_value = 6; + optional bytes string_value = 7; + optional string aggregate_value = 8; +} + +// =================================================================== +// Optional source code info + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +message SourceCodeInfo { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + repeated Location location = 1; + message Location { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + repeated int32 path = 1 [packed=true]; + + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + repeated int32 span = 2 [packed=true]; + + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + optional string leading_comments = 3; + optional string trailing_comments = 4; + repeated string leading_detached_comments = 6; + } +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +message GeneratedCodeInfo { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + repeated Annotation annotation = 1; + message Annotation { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + repeated int32 path = 1 [packed=true]; + + // Identifies the filesystem path to the original source .proto. + optional string source_file = 2; + + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + optional int32 begin = 3; + + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + optional int32 end = 4; + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go new file mode 100644 index 00000000..6f4a902b --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -0,0 +1,2806 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + plugin "github.com/golang/protobuf/protoc-gen-go/plugin" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// proto package is introduced; the generated code references +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 3 + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +var plugins []Plugin + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *FileDescriptor // File this object comes from. +} + +// GoImportPath is the import path of the Go package containing the type. +func (c *common) GoImportPath() GoImportPath { + return c.file.importPath +} + +func (c *common) File() *FileDescriptor { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + enums []*EnumDescriptor // Inner enums, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(*e.Name) + "_" + } + typeName := e.TypeName() + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + importPath GoImportPath // Import path of this file's package. + packageName GoPackageName // Name of this file's Go package. + + proto3 bool // whether to generate proto3 code for this file +} + +// VarName is the variable name we'll use in the generated code to refer +// to the compressed bytes of this descriptor. It is not exported, so +// it is only valid inside the generated package. +func (d *FileDescriptor) VarName() string { + h := sha256.Sum256([]byte(d.GetName())) + return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8])) +} + +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "", false + } + // A semicolon-delimited suffix delimits the import path and package name. + sc := strings.Index(opt, ";") + if sc >= 0 { + return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true + } + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(opt, "/") + if slash >= 0 { + return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true + } + return "", cleanPackageName(opt), true +} + +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName(pathType pathType) string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + if pathType == pathTypeSourceRelative { + return name + } + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(string(impPath), name) + return name + } + + return name +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, filename string, pkg GoPackageName) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + oneofTypes []string +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ contains a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + g.P("// ", ms.sym, " from public import ", filename) + g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) + for _, name := range ms.oneofTypes { + g.P("type ", name, " = ", pkg, ".", name) + } +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + s := es.name + g.P("// ", s, " from public import ", filename) + g.P("type ", s, " = ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + v := string(pkg) + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + GoImportPath() GoImportPath + TypeName() []string + File() *FileDescriptor +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from .proto file name to import path + + Pkg map[string]string // The names under which we import support packages + + outputImportPath GoImportPath // Package we're generating code for. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. + usedPackages map[GoImportPath]bool // Packages used in current file. + usedPackageNames map[GoPackageName]bool // Package names used in the current file. + addedImports map[GoImportPath]bool // Additional imports to emit. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. + indent string + pathType pathType // How to generate output filenames. + writeOutput bool + annotateCode bool // whether to store annotations + annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store +} + +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "paths": + switch v { + case "import": + g.pathType = pathTypeImport + case "source_relative": + g.pathType = pathTypeSourceRelative + default: + g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) + } + case "plugins": + pluginList = v + case "annotate_code": + if v == "true" { + g.annotateCode = true + } + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins []Plugin + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + importPath := obj.GoImportPath() + if importPath == g.outputImportPath { + return "" + } + return string(g.GoPackageName(importPath)) + "." +} + +// GoPackageName returns the name used for a package. +func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { + if name, ok := g.packageNames[importPath]; ok { + return name + } + name := cleanPackageName(baseName(string(importPath))) + for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[importPath] = name + g.usedPackageNames[name] = true + return name +} + +// AddImport adds a package to the generated file's import section. +// It returns the name used for the package. +func (g *Generator) AddImport(importPath GoImportPath) GoPackageName { + g.addedImports[importPath] = true + return g.GoPackageName(importPath) +} + +var globalPackageNames = map[GoPackageName]bool{ + "fmt": true, + "math": true, + "proto": true, +} + +// Create and remember a guaranteed unique package name. Pkg is the candidate name. +// The FileDescriptor parameter is unused. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + name := cleanPackageName(pkg) + for i, orig := 1, name; globalPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + globalPackageNames[name] = true + return string(name) +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +var isGoPredeclaredIdentifier = map[string]bool{ + "append": true, + "bool": true, + "byte": true, + "cap": true, + "close": true, + "complex": true, + "complex128": true, + "complex64": true, + "copy": true, + "delete": true, + "error": true, + "false": true, + "float32": true, + "float64": true, + "imag": true, + "int": true, + "int16": true, + "int32": true, + "int64": true, + "int8": true, + "iota": true, + "len": true, + "make": true, + "new": true, + "nil": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, + "rune": true, + "string": true, + "true": true, + "uint": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uint8": true, + "uintptr": true, +} + +func cleanPackageName(name string) GoPackageName { + name = strings.Map(badToUnderscore, name) + // Identifier must not be keyword or predeclared identifier: insert _. + if isGoKeyword[name] { + name = "_" + name + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { + name = "_" + name + } + return GoPackageName(name) +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() GoPackageName { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + return cleanPackageName(p) +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + g.outputImportPath = g.genFiles[0].importPath + + defaultPackageNames := make(map[GoImportPath]GoPackageName) + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + defaultPackageNames[f.importPath] = p + } + } + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + // Source file: option go_package = "quux/bar"; + f.packageName = p + } else if p, ok := defaultPackageNames[f.importPath]; ok { + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + f.packageName = p + } else if p := g.defaultGoPackage(); p != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets a package name for files which don't + // contain a go_package option. + f.packageName = p + } else if p := f.GetPackage(); p != "" { + // Source file: package quux.bar; + f.packageName = cleanPackageName(p) + } else { + // Source filename. + f.packageName = cleanPackageName(baseName(f.GetName())) + } + } + + // Check that all files have a consistent package name and import path. + for _, f := range g.genFiles[1:] { + if a, b := g.genFiles[0].importPath, f.importPath; a != b { + g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) + } + if a, b := g.genFiles[0].packageName, f.packageName; a != b { + g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) + } + } + + // Names of support packages. These never vary (if there are conflicts, + // we rename the conflicting package), so this could be removed someday. + g.Pkg = map[string]string{ + "fmt": "fmt", + "math": "math", + "proto": "proto", + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + genFileNames := make(map[string]bool) + for _, n := range g.Request.FileToGenerate { + genFileNames[n] = true + } + for _, f := range g.Request.ProtoFile { + fd := &FileDescriptor{ + FileDescriptorProto: f, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + // The import path may be set in a number of ways. + if substitution, ok := g.ImportMap[f.GetName()]; ok { + // Command-line: M=foo.proto=quux/bar. + // + // Explicit mapping of source file to import path. + fd.importPath = GoImportPath(substitution) + } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets the import path for every file that + // we generate code for. + fd.importPath = GoImportPath(g.PackageImportPath) + } else if p, _, _ := fd.goPackageOption(); p != "" { + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + fd.importPath = p + } else { + // Source filename. + // + // Last resort when nothing else is available. + fd.importPath = GoImportPath(path.Dir(f.GetName())) + } + // We must wrap the descriptors before we wrap the enums + fd.desc = wrapDescriptors(fd) + g.buildNestedDescriptors(fd.desc) + fd.enum = wrapEnumDescriptors(fd, fd.desc) + g.buildNestedEnums(fd.desc, fd.enum) + fd.ext = wrapExtensions(fd) + extractComments(fd) + g.allFiles = append(g.allFiles, fd) + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd, g) + } + + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { + g.Fail("could not find file named", fileName) + } + g.genFiles = append(g.genFiles, fd) + } +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + for _, nest := range descs { + if nest.parent == desc { + desc.nested = append(desc.nested, nest) + } + } + if len(desc.nested) != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { + for _, desc := range descs { + if len(desc.EnumType) != 0 { + for _, enum := range enums { + if enum.parent == desc { + desc.enums = append(desc.enums, enum) + } + } + if len(desc.enums) != len(desc.EnumType) { + g.Fail("internal error: enum nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *FileDescriptor) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + if d.GetOptions().GetMapEntry() { + continue + } + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + return o +} + +// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. +type AnnotatedAtoms struct { + source string + path string + atoms []interface{} +} + +// Annotate records the file name and proto AST path of a list of atoms +// so that a later call to P can emit a link from each atom to its origin. +func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { + return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} +} + +// printAtom prints the (atomic, non-annotation) argument to the generated output. +func (g *Generator) printAtom(v interface{}) { + switch v := v.(type) { + case string: + g.WriteString(v) + case *string: + g.WriteString(*v) + case bool: + fmt.Fprint(g, v) + case *bool: + fmt.Fprint(g, *v) + case int: + fmt.Fprint(g, v) + case *int32: + fmt.Fprint(g, *v) + case *int64: + fmt.Fprint(g, *v) + case float64: + fmt.Fprint(g, v) + case *float64: + fmt.Fprint(g, *v) + case GoPackageName: + g.WriteString(string(v)) + case GoImportPath: + g.WriteString(strconv.Quote(string(v))) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit +// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode +// is true). +func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } + g.WriteString(g.indent) + for _, v := range str { + switch v := v.(type) { + case *AnnotatedAtoms: + begin := int32(g.Len()) + for _, v := range v.atoms { + g.printAtom(v) + } + if g.annotateCode { + end := int32(g.Len()) + var path []int32 + for _, token := range strings.Split(v.path, ",") { + val, err := strconv.ParseInt(token, 10, 32) + if err != nil { + g.Fail("could not parse proto AST path: ", err.Error()) + } + path = append(path, int32(val)) + } + g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ + Path: path, + SourceFile: &v.source, + Begin: &begin, + End: &end, + }) + } + default: + g.printAtom(v) + } + } + g.WriteByte('\n') +} + +// addInitf stores the given statement to be printed inside the file's init function. +// The statement is given as a format specifier and arguments. +func (g *Generator) addInitf(stmt string, a ...interface{}) { + g.init = append(g.init, fmt.Sprintf(stmt, a...)) +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + for _, file := range g.allFiles { + g.Reset() + g.annotations = nil + g.writeOutput = genFileMap[file] + g.generate(file) + if !g.writeOutput { + continue + } + fname := file.goFileName(g.pathType) + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(fname), + Content: proto.String(g.String()), + }) + if g.annotateCode { + // Store the generated code annotations in text, as the protoc plugin protocol requires that + // strings contain valid UTF-8. + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName(g.pathType) + ".meta"), + Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), + }) + } + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.file = file + g.usedPackages = make(map[GoImportPath]bool) + g.packageNames = make(map[GoImportPath]GoPackageName) + g.usedPackageNames = make(map[GoPackageName]bool) + g.addedImports = make(map[GoImportPath]bool) + for name := range globalPackageNames { + g.usedPackageNames[name] = true + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + g.generateFileDescriptor(file) + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + remAnno := g.annotations + g.Buffer = new(bytes.Buffer) + g.annotations = nil + g.generateHeader() + g.generateImports() + if !g.writeOutput { + return + } + // Adjust the offsets for annotations displaced by the header and imports. + for _, anno := range remAnno { + *anno.Begin += int32(g.Len()) + *anno.End += int32(g.Len()) + g.annotations = append(g.annotations, anno) + } + g.Write(rem.Bytes()) + + // Reformat generated code and patch annotation locations. + fset := token.NewFileSet() + original := g.Bytes() + if g.annotateCode { + // make a copy independent of g; we'll need it after Reset. + original = append([]byte(nil), original...) + } + fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code, + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) + } + ast.SortImports(fset, fileAST) + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } + if g.annotateCode { + m, err := remap.Compute(original, g.Bytes()) + if err != nil { + g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) + } + for _, anno := range g.annotations { + new, ok := m.Find(int(*anno.Begin), int(*anno.End)) + if !ok { + g.Fail("span in formatted generated Go source code could not be mapped back to the original code") + } + *anno.Begin = int32(new.Pos) + *anno.End = int32(new.End) + } + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + if g.file.GetOptions().GetDeprecated() { + g.P("// ", g.file.Name, " is a deprecated file.") + } else { + g.P("// source: ", g.file.Name) + } + g.P() + g.PrintComments(strconv.Itoa(packagePath)) + g.P() + g.P("package ", g.file.packageName) + g.P() +} + +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// It returns an indication of whether any comments were printed. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } + if c, ok := g.makeComments(path); ok { + g.P(c) + return true + } + return false +} + +// makeComments generates the comment string for the field, no "\n" at the end +func (g *Generator) makeComments(path string) (string, bool) { + loc, ok := g.file.comments[path] + if !ok { + return "", false + } + w := new(bytes.Buffer) + nl := "" + for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { + fmt.Fprintf(w, "%s//%s", nl, line) + nl = "\n" + } + return w.String(), true +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + return g.allFilesByName[filename] +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + imports := make(map[GoImportPath]GoPackageName) + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + importPath := fd.importPath + // Do not import our own package. + if importPath == g.file.importPath { + continue + } + // Do not import weak imports. + if g.weak(int32(i)) { + continue + } + // Do not import a package twice. + if _, ok := imports[importPath]; ok { + continue + } + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + packageName := g.GoPackageName(importPath) + if _, ok := g.usedPackages[importPath]; !ok { + packageName = "_" + } + imports[importPath] = packageName + } + for importPath := range g.addedImports { + imports[importPath] = g.GoPackageName(importPath) + } + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the fmt and math packages. + g.P("import (") + g.P(g.Pkg["fmt"] + ` "fmt"`) + g.P(g.Pkg["math"] + ` "math"`) + g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") + for importPath, packageName := range imports { + g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath) + } + g.P(")") + g.P() + // TODO: may need to worry about uniqueness across plugins + for _, p := range plugins { + p.GenerateImports(g.file) + g.P() + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + g.P("var _ = ", g.Pkg["fmt"], ".Errorf") + g.P("var _ = ", g.Pkg["math"], ".Inf") + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + df := id.o.File() + filename := *df.Name + if df.importPath == g.file.importPath { + // Don't generate type aliases for files in the same Go package as this one. + return + } + if !supportTypeAliases { + g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) + } + g.usedPackages[df.importPath] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath)) + } + + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + deprecatedEnum := "" + if enum.GetOptions().GetDeprecated() { + deprecatedEnum = deprecationComment + } + g.PrintComments(enum.path) + g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + for i, e := range enum.Value { + etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) + g.PrintComments(etorPath) + + deprecatedValue := "" + if e.GetOptions().GetDeprecated() { + deprecatedValue = deprecationComment + } + + name := ccPrefix + *e.Name + g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.P(")") + g.P() + g.P("var ", ccTypeName, "_name = map[int32]string{") + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.P("}") + g.P() + g.P("var ", ccTypeName, "_value = map[string]int32{") + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + } + + g.P("func (x ", ccTypeName, ") String() string {") + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.P("}") + g.P() + } + + var indexes []string + for m := enum.parent; m != nil; m = m.parent { + // XXX: skip groups? + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + indexes = append(indexes, strconv.Itoa(enum.index)) + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + g.P() + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + g.P() + } + + g.generateEnumRegistration(enum) +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 32); err == nil { + defaultValue = fmt.Sprint(float32(f)) + } + } + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 64); err == nil { + defaultValue = fmt.Sprint(f) + } + } + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.GoPackageName(), because we want to use the + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && isScalar(field)) { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } + name = ",name=" + name + if message.proto3() { + name += ",proto3" + } + oneof := "" + if field.OneofIndex != nil { + oneof = ",oneof" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + oneof, + defaultValue)) +} + +func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { + switch typ { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + return false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + return false + case descriptor.FieldDescriptorProto_TYPE_BYTES: + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + if isRepeated(field) { + typ = "[]" + typ + } else if message != nil && message.proto3() { + return + } else if field.OneofIndex != nil && message != nil { + return + } else if needsStar(*field.Type) { + typ = "*" + typ + } + return +} + +func (g *Generator) RecordTypeUse(t string) { + if _, ok := g.typeNameToObject[t]; !ok { + return + } + importPath := g.ObjectNamed(t).GoImportPath() + if importPath == g.outputImportPath { + // Don't record use of objects in our package. + return + } + g.AddImport(importPath) + g.usedPackages[importPath] = true +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. Any change to this set is a potential incompatible +// API change because it changes generated field names. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", +} + +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + +// getterDefault finds the default value for the field to return from a getter, +// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" +func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { + if isRepeated(field) { + return "nil" + } + if def := field.GetDefaultValue(); def != "" { + defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + return defaultConstant + } + return "append([]byte(nil), " + defaultConstant + "...)" + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + return "false" + case descriptor.FieldDescriptorProto_TYPE_STRING: + return `""` + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: + return "nil" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + return "nil" + } + if len(enum.Value) == 0 { + return "0 // empty enum" + } + first := enum.Value[0].GetName() + return g.DefaultPackageName(obj) + enum.prefix() + first + default: + return "0" + } +} + +// defaultConstantName builds the name of the default constant from the message +// type name and the untouched field name, e.g. "Default_MessageType_FieldName" +func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { + return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) +} + +// The different types of fields in a message and how to actually print them +// Most of the logic for generateMessage is in the methods of these types. +// +// Note that the content of the field is irrelevant, a simpleField can contain +// anything from a scalar to a group (which is just a message). +// +// Extension fields (and message sets) are however handled separately. +// +// simpleField - a field that is neiter weak nor oneof, possibly repeated +// oneofField - field containing list of subfields: +// - oneofSubField - a field within the oneof + +// msgCtx contains the context for the generator functions. +type msgCtx struct { + goName string // Go struct name of the message, e.g. MessageName + message *Descriptor // The descriptor for the message +} + +// fieldCommon contains data common to all types of fields. +type fieldCommon struct { + goName string // Go name of field, e.g. "FieldName" or "Descriptor_" + protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" + getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" + goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" + tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` + fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" +} + +// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". +func (f *fieldCommon) getProtoName() string { + return f.protoName +} + +// getGoType returns the go type of the field as a string, e.g. "*int32". +func (f *fieldCommon) getGoType() string { + return f.goType +} + +// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. +type simpleField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + comment string // The full comment for the field, e.g. "// Useful information" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *simpleField) decl(g *Generator, mc *msgCtx) { + g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) +} + +// getter prints the getter for the field. +func (f *simpleField) getter(g *Generator, mc *msgCtx) { + star := "" + tname := f.goType + if needsStar(f.protoType) && tname[0] == '*' { + tname = tname[1:] + star = "*" + } + if f.deprecated != "" { + g.P(f.deprecated) + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") + if f.getterDef == "nil" { // Simpler getter + g.P("if m != nil {") + g.P("return m." + f.goName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + return + } + if mc.message.proto3() { + g.P("if m != nil {") + } else { + g.P("if m != nil && m." + f.goName + " != nil {") + } + g.P("return " + star + "m." + f.goName) + g.P("}") + g.P("return ", f.getterDef) + g.P("}") + g.P() +} + +// setter prints the setter method of the field. +func (f *simpleField) setter(g *Generator, mc *msgCtx) { + // No setter for regular fields yet +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *simpleField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *simpleField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. +type oneofSubField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" + fieldNumber int // Actual field number, as defined in proto, e.g. 12 + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + deprecated string // Deprecation comment, if any. +} + +// typedNil prints a nil casted to the pointer to this field. +// - for XXX_OneofWrappers +func (f *oneofSubField) typedNil(g *Generator) { + g.P("(*", f.oneofTypeName, ")(nil),") +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *oneofSubField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *oneofSubField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +// oneofField represents the oneof on top level. +// The alternative fields within the oneof are represented by oneofSubField. +type oneofField struct { + fieldCommon + subFields []*oneofSubField // All the possible oneof fields + comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *oneofField) decl(g *Generator, mc *msgCtx) { + comment := f.comment + for _, sf := range f.subFields { + comment += "//\t*" + sf.oneofTypeName + "\n" + } + g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") +} + +// getter for a oneof field will print additional discriminators and interfaces for the oneof, +// also it prints all the getters for the sub fields. +func (f *oneofField) getter(g *Generator, mc *msgCtx) { + // The discriminator type + g.P("type ", f.goType, " interface {") + g.P(f.goType, "()") + g.P("}") + g.P() + // The subField types, fulfilling the discriminator type contract + for _, sf := range f.subFields { + g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") + g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") + g.P("}") + g.P() + } + for _, sf := range f.subFields { + g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") + g.P() + } + // Getter for the oneof field + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") + g.P("if m != nil { return m.", f.goName, " }") + g.P("return nil") + g.P("}") + g.P() + // Getters for each oneof + for _, sf := range f.subFields { + if sf.deprecated != "" { + g.P(sf.deprecated) + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") + g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") + g.P("return x.", sf.goName) + g.P("}") + g.P("return ", sf.getterDef) + g.P("}") + g.P() + } +} + +// setter prints the setter method of the field. +func (f *oneofField) setter(g *Generator, mc *msgCtx) { + // No setters for oneof yet +} + +// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). +type topLevelField interface { + decl(g *Generator, mc *msgCtx) // print declaration within the struct + getter(g *Generator, mc *msgCtx) // print getter + setter(g *Generator, mc *msgCtx) // print setter if applicable +} + +// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). +type defField interface { + getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" + getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" + getGoType() string // go type of the field as a string, e.g. "*int32" + getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" + getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 +} + +// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. +// explicit in the proto. +func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { + // Collect fields that can have defaults + dFields := []defField{} + for _, pf := range topLevelFields { + if f, ok := pf.(*oneofField); ok { + for _, osf := range f.subFields { + dFields = append(dFields, osf) + } + continue + } + dFields = append(dFields, pf.(defField)) + } + for _, df := range dFields { + def := df.getProtoDef() + if def == "" { + continue + } + fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) + typename := df.getGoType() + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(unescape(def)) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT: + if f, err := strconv.ParseFloat(def, 32); err == nil { + def = fmt.Sprint(float32(f)) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if f, err := strconv.ParseFloat(def, 64); err == nil { + def = fmt.Sprint(f) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(df.getProtoTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + def = g.DefaultPackageName(obj) + enum.prefix() + def + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() +} + +// generateInternalStructFields just adds the XXX_ fields to the message struct. +func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { + g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals + if len(mc.message.ExtensionRange) > 0 { + messageset := "" + if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { + messageset = "protobuf_messageset:\"1\" " + } + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") + } + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + g.P("XXX_sizecache\tint32 `json:\"-\"`") + +} + +// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. +func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { + ofields := []*oneofField{} + for _, f := range topLevelFields { + if o, ok := f.(*oneofField); ok { + ofields = append(ofields, o) + } + } + if len(ofields) == 0 { + return + } + + // OneofFuncs + g.P("// XXX_OneofWrappers is for the internal use of the proto package.") + g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {") + g.P("return []interface{}{") + for _, of := range ofields { + for _, sf := range of.subFields { + sf.typedNil(g) + } + } + g.P("}") + g.P("}") + g.P() +} + +// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. +func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { + comments := g.PrintComments(mc.message.path) + + // Guarantee deprecation comments appear after user-provided comments. + if mc.message.GetOptions().GetDeprecated() { + if comments { + // Convention: Separate deprecation comments from original + // comments with an empty line. + g.P("//") + } + g.P(deprecationComment) + } + + g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") + for _, pf := range topLevelFields { + pf.decl(g, mc) + } + g.generateInternalStructFields(mc, topLevelFields) + g.P("}") +} + +// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.getter(g, mc) + } +} + +// generateSetters add setters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.setter(g, mc) + } +} + +// generateCommonMethods adds methods to the message that are not on a per field basis. +func (g *Generator) generateCommonMethods(mc *msgCtx) { + // Reset, String and ProtoMessage methods. + g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") + g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + g.P("func (*", mc.goName, ") ProtoMessage() {}") + var indexes []string + for m := mc.message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + g.P() + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { + g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) + g.P() + } + + // Extension support methods + if len(mc.message.ExtensionRange) > 0 { + g.P() + g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") + for _, r := range mc.message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{Start: ", r.Start, ", End: ", end, "},") + } + g.P("}") + g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.P("return extRange_", mc.goName) + g.P("}") + g.P() + } + + // TODO: It does not scale to keep adding another method for every + // operation on protos that we want to switch over to using the + // table-driven approach. Instead, we should only add a single method + // that allows getting access to the *InternalMessageInfo struct and then + // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. + + // Wrapper for table-driven marshaling and unmarshaling. + g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") + g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") + g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") + g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message + g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") + g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") + g.P("}") + + g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") + g.P() +} + +// Generate the type, methods and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + topLevelFields := []topLevelField{} + oFields := make(map[int32]*oneofField) + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + goTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + + // allocNames finds a conflict-free variation of the given strings, + // consistently mutating their suffixes. + // It returns the same number of strings. + allocNames := func(ns ...string) []string { + Loop: + for { + for _, n := range ns { + if usedNames[n] { + for i := range ns { + ns[i] += "_" + } + continue Loop + } + } + for _, n := range ns { + usedNames[n] = true + } + return ns + } + } + + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later + + // Build a structure more suitable for generating the text in one pass + for i, field := range message.Field { + // Allocate the getter and the field at the same time so name + // collisions create field/method consistent names. + // TODO: This allocation occurs based on the order of the fields + // in the proto file, meaning that a change in the field + // ordering can change generated Method/Field names. + base := CamelCase(*field.Name) + ns := allocNames(base, "Get"+base) + fieldName, fieldGetterName := ns[0], ns[1] + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") + + oneof := field.OneofIndex != nil + if oneof && oFields[*field.OneofIndex] == nil { + odp := message.OneofDecl[int(*field.OneofIndex)] + base := CamelCase(odp.GetName()) + fname := allocNames(base)[0] + + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) + c, ok := g.makeComments(oneofFullPath) + if ok { + c += "\n//\n" + } + c += "// Types that are valid to be assigned to " + fname + ":\n" + // Generate the rest of this comment later, + // when we've computed any disambiguation. + + dname := "is" + goTypeName + "_" + fname + tag := `protobuf_oneof:"` + odp.GetName() + `"` + of := oneofField{ + fieldCommon: fieldCommon{ + goName: fname, + getterName: "Get"+fname, + goType: dname, + tags: tag, + protoName: odp.GetName(), + fullPath: oneofFullPath, + }, + comment: c, + } + topLevelFields = append(topLevelFields, &of) + oFields[*field.OneofIndex] = &of + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + // Figure out the Go types and tags for the key and value types. + keyField, valField := d.Field[0], d.Field[1] + keyType, keyWire := g.GoType(d, keyField) + valType, valWire := g.GoType(d, valField) + keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *valField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(valField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.RecordTypeUse(valField.GetTypeName()) + default: + valType = strings.TrimPrefix(valType, "*") + } + + typename = fmt.Sprintf("map[%s]%s", keyType, valType) + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) + } + } + + fieldDeprecated := "" + if field.GetOptions().GetDeprecated() { + fieldDeprecated = deprecationComment + } + + dvalue := g.getterDefault(field, goTypeName) + if oneof { + tname := goTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } + } + if !ok { + tname += "_" + continue + } + break + } + + oneofField := oFields[*field.OneofIndex] + tag := "protobuf:" + g.goTag(message, field, wiretype) + sf := oneofSubField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), + }, + protoTypeName: field.GetTypeName(), + fieldNumber: int(*field.Number), + protoType: *field.Type, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + oneofTypeName: tname, + deprecated: fieldDeprecated, + } + oneofField.subFields = append(oneofField.subFields, &sf) + g.RecordTypeUse(field.GetTypeName()) + continue + } + + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + c, ok := g.makeComments(fieldFullPath) + if ok { + c += "\n" + } + rf := simpleField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fieldFullPath, + }, + protoTypeName: field.GetTypeName(), + protoType: *field.Type, + deprecated: fieldDeprecated, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + comment: c, + } + var pf topLevelField = &rf + + topLevelFields = append(topLevelFields, pf) + g.RecordTypeUse(field.GetTypeName()) + } + + mc := &msgCtx{ + goName: goTypeName, + message: message, + } + + g.generateMessageStruct(mc, topLevelFields) + g.P() + g.generateCommonMethods(mc) + g.P() + g.generateDefaultConstants(mc, topLevelFields) + g.P() + g.generateGetters(mc, topLevelFields) + g.P() + g.generateSetters(mc, topLevelFields) + g.P() + g.generateOneofFuncs(mc, topLevelFields) + g.P() + + var oneofTypes []string + for _, f := range topLevelFields { + if of, ok := f.(*oneofField); ok { + for _, osf := range of.subFields { + oneofTypes = append(oneofTypes, osf.oneofTypeName) + } + } + } + + opts := message.Options + ms := &messageSymbol{ + sym: goTypeName, + hasExtensions: len(message.ExtensionRange) > 0, + isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), + oneofTypes: oneofTypes, + } + g.file.addExport(message, ms) + + for _, ext := range message.ext { + g.generateExtension(ext) + } + + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) + // Register types for native map types. + for _, k := range mapFieldKeys(mapFieldTypes) { + fullName := strings.TrimPrefix(*k.TypeName, ".") + g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) + } + +} + +type byTypeName []*descriptor.FieldDescriptorProto + +func (a byTypeName) Len() int { return len(a) } +func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } + +// mapFieldKeys returns the keys of m in a consistent order. +func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { + keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(byTypeName(keys)) + return keys +} + +var escapeChars = [256]byte{ + 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', +} + +// unescape reverses the "C" escaping that protoc does for default values of bytes fields. +// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape +// sequences are conveyed, unmodified, into the decoded result. +func unescape(s string) string { + // NB: Sadly, we can't use strconv.Unquote because protoc will escape both + // single and double quotes, but strconv.Unquote only allows one or the + // other (based on actual surrounding quotes of its input argument). + + var out []byte + for len(s) > 0 { + // regular character, or too short to be valid escape + if s[0] != '\\' || len(s) < 2 { + out = append(out, s[0]) + s = s[1:] + } else if c := escapeChars[s[1]]; c != 0 { + // escape sequence + out = append(out, c) + s = s[2:] + } else if s[1] == 'x' || s[1] == 'X' { + // hex escape, e.g. "\x80 + if len(s) < 4 { + // too short to be valid + out = append(out, s[:2]...) + s = s[2:] + continue + } + v, err := strconv.ParseUint(s[2:4], 16, 8) + if err != nil { + out = append(out, s[:4]...) + } else { + out = append(out, byte(v)) + } + s = s[4:] + } else if '0' <= s[1] && s[1] <= '7' { + // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" + // so consume up to 2 more bytes or up to end-of-string + n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(v)) + } + s = s[1+n:] + } else { + // bad escape, just propagate the slash as-is + out = append(out, s[0]) + s = s[1:] + } + } + + return string(out) +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extObj := g.ObjectNamed(*ext.Extendee) + var extDesc *Descriptor + if id, ok := extObj.(*ImportedDescriptor); ok { + // This is extending a publicly imported message. + // We need the underlying type for goTag. + extDesc = id.o.(*Descriptor) + } else { + extDesc = extObj.(*Descriptor) + } + extendedType := "*" + g.TypeName(extObj) // always use the original + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2.bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + // + // TODO: This should be implemented in the text formatter rather than the generator. + // In addition, the situation for when to apply this special case is implemented + // differently in other languages: + // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 + if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + g.P(`Filename: "`, g.file.GetName(), `",`) + + g.P("}") + g.P() + + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + if len(g.init) == 0 { + return + } + g.P("func init() {") + for _, l := range g.init { + g.P(l) + } + g.P("}") + g.init = nil +} + +func (g *Generator) generateFileDescriptor(file *FileDescriptor) { + // Make a copy and trim source_code_info data. + // TODO: Trim this more when we know exactly what we need. + pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) + pb.SourceCodeInfo = nil + + b, err := proto.Marshal(pb) + if err != nil { + g.Fail(err.Error()) + } + + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + + v := file.VarName() + g.P() + g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + g.P("var ", v, " = []byte{") + g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// Is this field a scalar numeric type? +func isScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + messageOneofPath = 8 // oneof_decl + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) + +var supportTypeAliases bool + +func init() { + for _, tag := range build.Default.ReleaseTags { + if tag == "go1.9" { + supportTypeAliases = true + return + } + } +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go new file mode 100644 index 00000000..a9b61036 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go @@ -0,0 +1,117 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package remap handles tracking the locations of Go tokens in a source text +across a rewrite by the Go formatter. +*/ +package remap + +import ( + "fmt" + "go/scanner" + "go/token" +) + +// A Location represents a span of byte offsets in the source text. +type Location struct { + Pos, End int // End is exclusive +} + +// A Map represents a mapping between token locations in an input source text +// and locations in the correspnding output text. +type Map map[Location]Location + +// Find reports whether the specified span is recorded by m, and if so returns +// the new location it was mapped to. If the input span was not found, the +// returned location is the same as the input. +func (m Map) Find(pos, end int) (Location, bool) { + key := Location{ + Pos: pos, + End: end, + } + if loc, ok := m[key]; ok { + return loc, true + } + return key, false +} + +func (m Map) add(opos, oend, npos, nend int) { + m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} +} + +// Compute constructs a location mapping from input to output. An error is +// reported if any of the tokens of output cannot be mapped. +func Compute(input, output []byte) (Map, error) { + itok := tokenize(input) + otok := tokenize(output) + if len(itok) != len(otok) { + return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) + } + m := make(Map) + for i, ti := range itok { + to := otok[i] + if ti.Token != to.Token { + return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) + } + m.add(ti.pos, ti.end, to.pos, to.end) + } + return m, nil +} + +// tokinfo records the span and type of a source token. +type tokinfo struct { + pos, end int + token.Token +} + +func tokenize(src []byte) []tokinfo { + fs := token.NewFileSet() + var s scanner.Scanner + s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) + var info []tokinfo + for { + pos, next, lit := s.Scan() + switch next { + case token.SEMICOLON: + continue + } + info = append(info, tokinfo{ + pos: int(pos - 1), + end: int(pos + token.Pos(len(lit)) - 1), + Token: next, + }) + if next == token.EOF { + break + } + } + return info +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go new file mode 100644 index 00000000..61bfc10e --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -0,0 +1,369 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +/* +Package plugin_go is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/compiler/plugin.proto + +It has these top-level messages: + Version + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package plugin_go + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of protocol compiler. +type Version struct { + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Version) Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil && m.Patch != nil { + return *m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil && m.Suffix != nil { + return *m.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) +} +func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) +} +func (m *CodeGeneratorRequest) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorRequest.Size(m) +} +func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) +} +func (m *CodeGeneratorResponse) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse.Size(m) +} +func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) +} +func (m *CodeGeneratorResponse_File) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) +} +func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, + 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, + 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, + 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, + 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, + 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, + 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, + 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, + 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, + 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, + 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, + 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, + 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, + 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, + 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, + 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, + 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, + 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, + 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, + 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, + 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, + 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, + 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, + 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, + 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, + 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden new file mode 100644 index 00000000..8953d0ff --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/compiler/plugin.proto +// DO NOT EDIT! + +package google_protobuf_compiler + +import proto "github.com/golang/protobuf/proto" +import "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference proto and math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +type CodeGeneratorRequest struct { + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } +func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (this *CodeGeneratorRequest) GetParameter() string { + if this != nil && this.Parameter != nil { + return *this.Parameter + } + return "" +} + +type CodeGeneratorResponse struct { + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } +func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (this *CodeGeneratorResponse) GetError() string { + if this != nil && this.Error != nil { + return *this.Error + } + return "" +} + +type CodeGeneratorResponse_File struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } +func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (this *CodeGeneratorResponse_File) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { + if this != nil && this.InsertionPoint != nil { + return *this.InsertionPoint + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetContent() string { + if this != nil && this.Content != nil { + return *this.Content + } + return "" +} + +func init() { +} diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto new file mode 100644 index 00000000..5b557452 --- /dev/null +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 00000000..70276e8f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,141 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*any.Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func Empty(any *any.Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *any.Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = Empty(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *any.Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 00000000..78ee5233 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,200 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +package any + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} + +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto new file mode 100644 index 00000000..49329425 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -0,0 +1,154 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option go_package = "github.com/golang/protobuf/ptypes/any"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "AnyProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +message Any { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + string type_url = 1; + + // Must be a valid serialized protocol buffer of the above specified type. + bytes value = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 00000000..c0d595da --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package ptypes contains code for interacting with well-known types. +*/ +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 00000000..26d1ca2f --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,102 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" +) + +const ( + // Range of a durpb.Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the durpb.Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid durpb.Duration +// may still be too large to fit into a time.Duration (the range of durpb.Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *durpb.Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) + } + return nil +} + +// Duration converts a durpb.Duration to a time.Duration. Duration +// returns an error if the durpb.Duration is invalid or is too large to be +// represented in a time.Duration. +func Duration(p *durpb.Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durpb.Duration. +func DurationProto(d time.Duration) *durpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durpb.Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 00000000..0d681ee2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package duration + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} + +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto new file mode 100644 index 00000000..975fce41 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -0,0 +1,117 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/duration"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "DurationProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +message Duration { + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 00000000..33daa73d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} + +func (*Value) XXX_WellKnownType() string { return "Value" } + +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} + +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 00000000..7d7808e7 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 00000000..8da0df01 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,132 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package ptypes + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *tspb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *tspb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*tspb.Timestamp, error) { + ts := &tspb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *tspb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 00000000..31cd846d --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,179 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package timestamp + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} + +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto new file mode 100644 index 00000000..eafb3fa0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -0,0 +1,135 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/timestamp"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "TimestampProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +message Timestamp { + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 00000000..add19a1a --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,461 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} + +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} + +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} + +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} + +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} + +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} + +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} + +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} + +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} + +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 00000000..01947639 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 00000000..bcfa1952 --- /dev/null +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 00000000..931ae316 --- /dev/null +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README new file mode 100644 index 00000000..cea12879 --- /dev/null +++ b/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000..72efb035 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 00000000..fcd192b8 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000..e6179f65 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000..8c9f2049 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000..8d393e90 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 00000000..150d91bc --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000..adfd979f --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000..dbcae905 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod new file mode 100644 index 00000000..f6406bb2 --- /dev/null +++ b/vendor/github.com/golang/snappy/go.mod @@ -0,0 +1 @@ +module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000..ece692ea --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE new file mode 100644 index 00000000..32017f8f --- /dev/null +++ b/vendor/github.com/google/go-cmp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2017 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go new file mode 100644 index 00000000..2133562b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -0,0 +1,616 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package cmp determines equality of values. +// +// This package is intended to be a more powerful and safer alternative to +// reflect.DeepEqual for comparing whether two values are semantically equal. +// +// The primary features of cmp are: +// +// • When the default behavior of equality does not suit the needs of the test, +// custom equality functions can override the equality operation. +// For example, an equality function may report floats as equal so long as they +// are within some tolerance of each other. +// +// • Types that have an Equal method may use that method to determine equality. +// This allows package authors to determine the equality operation for the types +// that they define. +// +// • If no custom equality functions are used and no Equal method is defined, +// equality is determined by recursively comparing the primitive kinds on both +// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported +// fields are not compared by default; they result in panics unless suppressed +// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly compared +// using the AllowUnexported option. +package cmp + +import ( + "fmt" + "reflect" + "strings" + + "github.com/google/go-cmp/cmp/internal/diff" + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/function" + "github.com/google/go-cmp/cmp/internal/value" +) + +// Equal reports whether x and y are equal by recursively applying the +// following rules in the given order to x and y and all of their sub-values: +// +// • Let S be the set of all Ignore, Transformer, and Comparer options that +// remain after applying all path filters, value filters, and type filters. +// If at least one Ignore exists in S, then the comparison is ignored. +// If the number of Transformer and Comparer options in S is greater than one, +// then Equal panics because it is ambiguous which option to use. +// If S contains a single Transformer, then use that to transform the current +// values and recursively call Equal on the output values. +// If S contains a single Comparer, then use that to compare the current values. +// Otherwise, evaluation proceeds to the next rule. +// +// • If the values have an Equal method of the form "(T) Equal(T) bool" or +// "(T) Equal(I) bool" where T is assignable to I, then use the result of +// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and +// evaluation proceeds to the next rule. +// +// • Lastly, try to compare x and y based on their basic kinds. +// Simple kinds like booleans, integers, floats, complex numbers, strings, and +// channels are compared using the equivalent of the == operator in Go. +// Functions are only equal if they are both nil, otherwise they are unequal. +// +// Structs are equal if recursively calling Equal on all fields report equal. +// If a struct contains unexported fields, Equal panics unless an Ignore option +// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported +// option explicitly permits comparing the unexported field. +// +// Slices are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored slice or array elements report equal. +// Empty non-nil slices and nil slices are not equal; to equate empty slices, +// consider using cmpopts.EquateEmpty. +// +// Maps are equal if they are both nil or both non-nil, where recursively +// calling Equal on all non-ignored map entries report equal. +// Map keys are equal according to the == operator. +// To use custom comparisons for map keys, consider using cmpopts.SortMaps. +// Empty non-nil maps and nil maps are not equal; to equate empty maps, +// consider using cmpopts.EquateEmpty. +// +// Pointers and interfaces are equal if they are both nil or both non-nil, +// where they have the same underlying concrete type and recursively +// calling Equal on the underlying values reports equal. +func Equal(x, y interface{}, opts ...Option) bool { + vx := reflect.ValueOf(x) + vy := reflect.ValueOf(y) + + // If the inputs are different types, auto-wrap them in an empty interface + // so that they have the same parent type. + var t reflect.Type + if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() { + t = reflect.TypeOf((*interface{})(nil)).Elem() + if vx.IsValid() { + vvx := reflect.New(t).Elem() + vvx.Set(vx) + vx = vvx + } + if vy.IsValid() { + vvy := reflect.New(t).Elem() + vvy.Set(vy) + vy = vvy + } + } else { + t = vx.Type() + } + + s := newState(opts) + s.compareAny(&pathStep{t, vx, vy}) + return s.result.Equal() +} + +// Diff returns a human-readable report of the differences between two values. +// It returns an empty string if and only if Equal returns true for the same +// input values and options. +// +// The output is displayed as a literal in pseudo-Go syntax. +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added to y, and the lack of a prefix +// indicates an element common to both x and y. If possible, the output +// uses fmt.Stringer.String or error.Error methods to produce more humanly +// readable outputs. In such cases, the string is prefixed with either an +// 's' or 'e' character, respectively, to indicate that the method was called. +// +// Do not depend on this output being stable. If you need the ability to +// programmatically interpret the difference, consider using a custom Reporter. +func Diff(x, y interface{}, opts ...Option) string { + r := new(defaultReporter) + eq := Equal(x, y, Options(opts), Reporter(r)) + d := r.String() + if (d == "") != eq { + panic("inconsistent difference and equality results") + } + return d +} + +type state struct { + // These fields represent the "comparison state". + // Calling statelessCompare must not result in observable changes to these. + result diff.Result // The current result of comparison + curPath Path // The current path in the value tree + reporters []reporter // Optional reporters + + // recChecker checks for infinite cycles applying the same set of + // transformers upon the output of itself. + recChecker recChecker + + // dynChecker triggers pseudo-random checks for option correctness. + // It is safe for statelessCompare to mutate this value. + dynChecker dynChecker + + // These fields, once set by processOption, will not change. + exporters map[reflect.Type]bool // Set of structs with unexported field visibility + opts Options // List of all fundamental and filter options +} + +func newState(opts []Option) *state { + // Always ensure a validator option exists to validate the inputs. + s := &state{opts: Options{validator{}}} + s.processOption(Options(opts)) + return s +} + +func (s *state) processOption(opt Option) { + switch opt := opt.(type) { + case nil: + case Options: + for _, o := range opt { + s.processOption(o) + } + case coreOption: + type filtered interface { + isFiltered() bool + } + if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() { + panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt)) + } + s.opts = append(s.opts, opt) + case visibleStructs: + if s.exporters == nil { + s.exporters = make(map[reflect.Type]bool) + } + for t := range opt { + s.exporters[t] = true + } + case reporter: + s.reporters = append(s.reporters, opt) + default: + panic(fmt.Sprintf("unknown option %T", opt)) + } +} + +// statelessCompare compares two values and returns the result. +// This function is stateless in that it does not alter the current result, +// or output to any registered reporters. +func (s *state) statelessCompare(step PathStep) diff.Result { + // We do not save and restore the curPath because all of the compareX + // methods should properly push and pop from the path. + // It is an implementation bug if the contents of curPath differs from + // when calling this function to when returning from it. + + oldResult, oldReporters := s.result, s.reporters + s.result = diff.Result{} // Reset result + s.reporters = nil // Remove reporters to avoid spurious printouts + s.compareAny(step) + res := s.result + s.result, s.reporters = oldResult, oldReporters + return res +} + +func (s *state) compareAny(step PathStep) { + // Update the path stack. + s.curPath.push(step) + defer s.curPath.pop() + for _, r := range s.reporters { + r.PushStep(step) + defer r.PopStep() + } + s.recChecker.Check(s.curPath) + + // Obtain the current type and values. + t := step.Type() + vx, vy := step.Values() + + // Rule 1: Check whether an option applies on this node in the value tree. + if s.tryOptions(t, vx, vy) { + return + } + + // Rule 2: Check whether the type has a valid Equal method. + if s.tryMethod(t, vx, vy) { + return + } + + // Rule 3: Compare based on the underlying kind. + switch t.Kind() { + case reflect.Bool: + s.report(vx.Bool() == vy.Bool(), 0) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s.report(vx.Int() == vy.Int(), 0) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s.report(vx.Uint() == vy.Uint(), 0) + case reflect.Float32, reflect.Float64: + s.report(vx.Float() == vy.Float(), 0) + case reflect.Complex64, reflect.Complex128: + s.report(vx.Complex() == vy.Complex(), 0) + case reflect.String: + s.report(vx.String() == vy.String(), 0) + case reflect.Chan, reflect.UnsafePointer: + s.report(vx.Pointer() == vy.Pointer(), 0) + case reflect.Func: + s.report(vx.IsNil() && vy.IsNil(), 0) + case reflect.Struct: + s.compareStruct(t, vx, vy) + case reflect.Slice, reflect.Array: + s.compareSlice(t, vx, vy) + case reflect.Map: + s.compareMap(t, vx, vy) + case reflect.Ptr: + s.comparePtr(t, vx, vy) + case reflect.Interface: + s.compareInterface(t, vx, vy) + default: + panic(fmt.Sprintf("%v kind not handled", t.Kind())) + } +} + +func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool { + // Evaluate all filters and apply the remaining options. + if opt := s.opts.filter(s, t, vx, vy); opt != nil { + opt.apply(s, vx, vy) + return true + } + return false +} + +func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool { + // Check if this type even has an Equal method. + m, ok := t.MethodByName("Equal") + if !ok || !function.IsType(m.Type, function.EqualAssignable) { + return false + } + + eq := s.callTTBFunc(m.Func, vx, vy) + s.report(eq, reportByMethod) + return true +} + +func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value { + v = sanitizeValue(v, f.Type().In(0)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{v})[0] + } + + // Run the function twice and ensure that we get the same results back. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, v) + got := <-c + want := f.Call([]reflect.Value{v})[0] + if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() { + // To avoid false-positives with non-reflexive equality operations, + // we sanity check whether a value is equal to itself. + if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() { + return want + } + panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f))) + } + return want +} + +func (s *state) callTTBFunc(f, x, y reflect.Value) bool { + x = sanitizeValue(x, f.Type().In(0)) + y = sanitizeValue(y, f.Type().In(1)) + if !s.dynChecker.Next() { + return f.Call([]reflect.Value{x, y})[0].Bool() + } + + // Swapping the input arguments is sufficient to check that + // f is symmetric and deterministic. + // We run in goroutines so that the race detector (if enabled) can detect + // unsafe mutations to the input. + c := make(chan reflect.Value) + go detectRaces(c, f, y, x) + got := <-c + want := f.Call([]reflect.Value{x, y})[0].Bool() + if !got.IsValid() || got.Bool() != want { + panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f))) + } + return want +} + +func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) { + var ret reflect.Value + defer func() { + recover() // Ignore panics, let the other call to f panic instead + c <- ret + }() + ret = f.Call(vs)[0] +} + +// sanitizeValue converts nil interfaces of type T to those of type R, +// assuming that T is assignable to R. +// Otherwise, it returns the input value as is. +func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value { + // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143). + if !flags.AtLeastGo110 { + if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t { + return reflect.New(t).Elem() + } + } + return v +} + +func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) { + var vax, vay reflect.Value // Addressable versions of vx and vy + + step := StructField{&structField{}} + for i := 0; i < t.NumField(); i++ { + step.typ = t.Field(i).Type + step.vx = vx.Field(i) + step.vy = vy.Field(i) + step.name = t.Field(i).Name + step.idx = i + step.unexported = !isExported(step.name) + if step.unexported { + if step.name == "_" { + continue + } + // Defer checking of unexported fields until later to give an + // Ignore a chance to ignore the field. + if !vax.IsValid() || !vay.IsValid() { + // For retrieveUnexportedField to work, the parent struct must + // be addressable. Create a new copy of the values if + // necessary to make them addressable. + vax = makeAddressable(vx) + vay = makeAddressable(vy) + } + step.mayForce = s.exporters[t] + step.pvx = vax + step.pvy = vay + step.field = t.Field(i) + } + s.compareAny(step) + } +} + +func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) { + isSlice := t.Kind() == reflect.Slice + if isSlice && (vx.IsNil() || vy.IsNil()) { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}} + withIndexes := func(ix, iy int) SliceIndex { + if ix >= 0 { + step.vx, step.xkey = vx.Index(ix), ix + } else { + step.vx, step.xkey = reflect.Value{}, -1 + } + if iy >= 0 { + step.vy, step.ykey = vy.Index(iy), iy + } else { + step.vy, step.ykey = reflect.Value{}, -1 + } + return step + } + + // Ignore options are able to ignore missing elements in a slice. + // However, detecting these reliably requires an optimal differencing + // algorithm, for which diff.Difference is not. + // + // Instead, we first iterate through both slices to detect which elements + // would be ignored if standing alone. The index of non-discarded elements + // are stored in a separate slice, which diffing is then performed on. + var indexesX, indexesY []int + var ignoredX, ignoredY []bool + for ix := 0; ix < vx.Len(); ix++ { + ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0 + if !ignored { + indexesX = append(indexesX, ix) + } + ignoredX = append(ignoredX, ignored) + } + for iy := 0; iy < vy.Len(); iy++ { + ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0 + if !ignored { + indexesY = append(indexesY, iy) + } + ignoredY = append(ignoredY, ignored) + } + + // Compute an edit-script for slices vx and vy (excluding ignored elements). + edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result { + return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy])) + }) + + // Replay the ignore-scripts and the edit-script. + var ix, iy int + for ix < vx.Len() || iy < vy.Len() { + var e diff.EditType + switch { + case ix < len(ignoredX) && ignoredX[ix]: + e = diff.UniqueX + case iy < len(ignoredY) && ignoredY[iy]: + e = diff.UniqueY + default: + e, edits = edits[0], edits[1:] + } + switch e { + case diff.UniqueX: + s.compareAny(withIndexes(ix, -1)) + ix++ + case diff.UniqueY: + s.compareAny(withIndexes(-1, iy)) + iy++ + default: + s.compareAny(withIndexes(ix, iy)) + ix++ + iy++ + } + } +} + +func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + // We combine and sort the two map keys so that we can perform the + // comparisons in a deterministic order. + step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}} + for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) { + step.vx = vx.MapIndex(k) + step.vy = vy.MapIndex(k) + step.key = k + if !step.vx.IsValid() && !step.vy.IsValid() { + // It is possible for both vx and vy to be invalid if the + // key contained a NaN value in it. + // + // Even with the ability to retrieve NaN keys in Go 1.12, + // there still isn't a sensible way to compare the values since + // a NaN key may map to multiple unordered values. + // The most reasonable way to compare NaNs would be to compare the + // set of values. However, this is impossible to do efficiently + // since set equality is provably an O(n^2) operation given only + // an Equal function. If we had a Less function or Hash function, + // this could be done in O(n*log(n)) or O(n), respectively. + // + // Rather than adding complex logic to deal with NaNs, make it + // the user's responsibility to compare such obscure maps. + const help = "consider providing a Comparer to compare the map" + panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help)) + } + s.compareAny(step) + } +} + +func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + + // TODO: Support cyclic data structures. + + vx, vy = vx.Elem(), vy.Elem() + s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}}) +} + +func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) { + if vx.IsNil() || vy.IsNil() { + s.report(vx.IsNil() && vy.IsNil(), 0) + return + } + vx, vy = vx.Elem(), vy.Elem() + if vx.Type() != vy.Type() { + s.report(false, 0) + return + } + s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}}) +} + +func (s *state) report(eq bool, rf resultFlags) { + if rf&reportByIgnore == 0 { + if eq { + s.result.NumSame++ + rf |= reportEqual + } else { + s.result.NumDiff++ + rf |= reportUnequal + } + } + for _, r := range s.reporters { + r.Report(Result{flags: rf}) + } +} + +// recChecker tracks the state needed to periodically perform checks that +// user provided transformers are not stuck in an infinitely recursive cycle. +type recChecker struct{ next int } + +// Check scans the Path for any recursive transformers and panics when any +// recursive transformers are detected. Note that the presence of a +// recursive Transformer does not necessarily imply an infinite cycle. +// As such, this check only activates after some minimal number of path steps. +func (rc *recChecker) Check(p Path) { + const minLen = 1 << 16 + if rc.next == 0 { + rc.next = minLen + } + if len(p) < rc.next { + return + } + rc.next <<= 1 + + // Check whether the same transformer has appeared at least twice. + var ss []string + m := map[Option]int{} + for _, ps := range p { + if t, ok := ps.(Transform); ok { + t := t.Option() + if m[t] == 1 { // Transformer was used exactly once before + tf := t.(*transformer).fnc.Type() + ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0))) + } + m[t]++ + } + } + if len(ss) > 0 { + const warning = "recursive set of Transformers detected" + const help = "consider using cmpopts.AcyclicTransformer" + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help)) + } +} + +// dynChecker tracks the state needed to periodically perform checks that +// user provided functions are symmetric and deterministic. +// The zero value is safe for immediate use. +type dynChecker struct{ curr, next int } + +// Next increments the state and reports whether a check should be performed. +// +// Checks occur every Nth function call, where N is a triangular number: +// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ... +// See https://en.wikipedia.org/wiki/Triangular_number +// +// This sequence ensures that the cost of checks drops significantly as +// the number of functions calls grows larger. +func (dc *dynChecker) Next() bool { + ok := dc.curr == dc.next + if ok { + dc.curr = 0 + dc.next++ + } + dc.curr++ + return ok +} + +// makeAddressable returns a value that is always addressable. +// It returns the input verbatim if it is already addressable, +// otherwise it creates a new value and returns an addressable copy. +func makeAddressable(v reflect.Value) reflect.Value { + if v.CanAddr() { + return v + } + vc := reflect.New(v.Type()).Elem() + vc.Set(v) + return vc +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go new file mode 100644 index 00000000..abc3a1c3 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -0,0 +1,15 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package cmp + +import "reflect" + +const supportAllowUnexported = false + +func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value { + panic("retrieveUnexportedField is not implemented") +} diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go new file mode 100644 index 00000000..59d4ee91 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -0,0 +1,23 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package cmp + +import ( + "reflect" + "unsafe" +) + +const supportAllowUnexported = true + +// retrieveUnexportedField uses unsafe to forcibly retrieve any field from +// a struct such that the value has read-write permissions. +// +// The parent struct, v, must be addressable, while f must be a StructField +// describing the field to retrieve. +func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value { + return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem() +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go new file mode 100644 index 00000000..fe98dcc6 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -0,0 +1,17 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !cmp_debug + +package diff + +var debug debugger + +type debugger struct{} + +func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc { + return f +} +func (debugger) Update() {} +func (debugger) Finish() {} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go new file mode 100644 index 00000000..597b6ae5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -0,0 +1,122 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build cmp_debug + +package diff + +import ( + "fmt" + "strings" + "sync" + "time" +) + +// The algorithm can be seen running in real-time by enabling debugging: +// go test -tags=cmp_debug -v +// +// Example output: +// === RUN TestDifference/#34 +// ┌───────────────────────────────┐ +// │ \ · · · · · · · · · · · · · · │ +// │ · # · · · · · · · · · · · · · │ +// │ · \ · · · · · · · · · · · · · │ +// │ · · \ · · · · · · · · · · · · │ +// │ · · · X # · · · · · · · · · · │ +// │ · · · # \ · · · · · · · · · · │ +// │ · · · · · # # · · · · · · · · │ +// │ · · · · · # \ · · · · · · · · │ +// │ · · · · · · · \ · · · · · · · │ +// │ · · · · · · · · \ · · · · · · │ +// │ · · · · · · · · · \ · · · · · │ +// │ · · · · · · · · · · \ · · # · │ +// │ · · · · · · · · · · · \ # # · │ +// │ · · · · · · · · · · · # # # · │ +// │ · · · · · · · · · · # # # # · │ +// │ · · · · · · · · · # # # # # · │ +// │ · · · · · · · · · · · · · · \ │ +// └───────────────────────────────┘ +// [.Y..M.XY......YXYXY.|] +// +// The grid represents the edit-graph where the horizontal axis represents +// list X and the vertical axis represents list Y. The start of the two lists +// is the top-left, while the ends are the bottom-right. The '·' represents +// an unexplored node in the graph. The '\' indicates that the two symbols +// from list X and Y are equal. The 'X' indicates that two symbols are similar +// (but not exactly equal) to each other. The '#' indicates that the two symbols +// are different (and not similar). The algorithm traverses this graph trying to +// make the paths starting in the top-left and the bottom-right connect. +// +// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents +// the currently established path from the forward and reverse searches, +// separated by a '|' character. + +const ( + updateDelay = 100 * time.Millisecond + finishDelay = 500 * time.Millisecond + ansiTerminal = true // ANSI escape codes used to move terminal cursor +) + +var debug debugger + +type debugger struct { + sync.Mutex + p1, p2 EditScript + fwdPath, revPath *EditScript + grid []byte + lines int +} + +func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc { + dbg.Lock() + dbg.fwdPath, dbg.revPath = p1, p2 + top := "┌─" + strings.Repeat("──", nx) + "┐\n" + row := "│ " + strings.Repeat("· ", nx) + "│\n" + btm := "└─" + strings.Repeat("──", nx) + "┘\n" + dbg.grid = []byte(top + strings.Repeat(row, ny) + btm) + dbg.lines = strings.Count(dbg.String(), "\n") + fmt.Print(dbg) + + // Wrap the EqualFunc so that we can intercept each result. + return func(ix, iy int) (r Result) { + cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")] + for i := range cell { + cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot + } + switch r = f(ix, iy); { + case r.Equal(): + cell[0] = '\\' + case r.Similar(): + cell[0] = 'X' + default: + cell[0] = '#' + } + return + } +} + +func (dbg *debugger) Update() { + dbg.print(updateDelay) +} + +func (dbg *debugger) Finish() { + dbg.print(finishDelay) + dbg.Unlock() +} + +func (dbg *debugger) String() string { + dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0] + for i := len(*dbg.revPath) - 1; i >= 0; i-- { + dbg.p2 = append(dbg.p2, (*dbg.revPath)[i]) + } + return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2) +} + +func (dbg *debugger) print(d time.Duration) { + if ansiTerminal { + fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor + } + fmt.Print(dbg) + time.Sleep(d) +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go new file mode 100644 index 00000000..3d2e4266 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -0,0 +1,372 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package diff implements an algorithm for producing edit-scripts. +// The edit-script is a sequence of operations needed to transform one list +// of symbols into another (or vice-versa). The edits allowed are insertions, +// deletions, and modifications. The summation of all edits is called the +// Levenshtein distance as this problem is well-known in computer science. +// +// This package prioritizes performance over accuracy. That is, the run time +// is more important than obtaining a minimal Levenshtein distance. +package diff + +// EditType represents a single operation within an edit-script. +type EditType uint8 + +const ( + // Identity indicates that a symbol pair is identical in both list X and Y. + Identity EditType = iota + // UniqueX indicates that a symbol only exists in X and not Y. + UniqueX + // UniqueY indicates that a symbol only exists in Y and not X. + UniqueY + // Modified indicates that a symbol pair is a modification of each other. + Modified +) + +// EditScript represents the series of differences between two lists. +type EditScript []EditType + +// String returns a human-readable string representing the edit-script where +// Identity, UniqueX, UniqueY, and Modified are represented by the +// '.', 'X', 'Y', and 'M' characters, respectively. +func (es EditScript) String() string { + b := make([]byte, len(es)) + for i, e := range es { + switch e { + case Identity: + b[i] = '.' + case UniqueX: + b[i] = 'X' + case UniqueY: + b[i] = 'Y' + case Modified: + b[i] = 'M' + default: + panic("invalid edit-type") + } + } + return string(b) +} + +// stats returns a histogram of the number of each type of edit operation. +func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) { + for _, e := range es { + switch e { + case Identity: + s.NI++ + case UniqueX: + s.NX++ + case UniqueY: + s.NY++ + case Modified: + s.NM++ + default: + panic("invalid edit-type") + } + } + return +} + +// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if +// lists X and Y are equal. +func (es EditScript) Dist() int { return len(es) - es.stats().NI } + +// LenX is the length of the X list. +func (es EditScript) LenX() int { return len(es) - es.stats().NY } + +// LenY is the length of the Y list. +func (es EditScript) LenY() int { return len(es) - es.stats().NX } + +// EqualFunc reports whether the symbols at indexes ix and iy are equal. +// When called by Difference, the index is guaranteed to be within nx and ny. +type EqualFunc func(ix int, iy int) Result + +// Result is the result of comparison. +// NumSame is the number of sub-elements that are equal. +// NumDiff is the number of sub-elements that are not equal. +type Result struct{ NumSame, NumDiff int } + +// BoolResult returns a Result that is either Equal or not Equal. +func BoolResult(b bool) Result { + if b { + return Result{NumSame: 1} // Equal, Similar + } else { + return Result{NumDiff: 2} // Not Equal, not Similar + } +} + +// Equal indicates whether the symbols are equal. Two symbols are equal +// if and only if NumDiff == 0. If Equal, then they are also Similar. +func (r Result) Equal() bool { return r.NumDiff == 0 } + +// Similar indicates whether two symbols are similar and may be represented +// by using the Modified type. As a special case, we consider binary comparisons +// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar. +// +// The exact ratio of NumSame to NumDiff to determine similarity may change. +func (r Result) Similar() bool { + // Use NumSame+1 to offset NumSame so that binary comparisons are similar. + return r.NumSame+1 >= r.NumDiff +} + +// Difference reports whether two lists of lengths nx and ny are equal +// given the definition of equality provided as f. +// +// This function returns an edit-script, which is a sequence of operations +// needed to convert one list into the other. The following invariants for +// the edit-script are maintained: +// • eq == (es.Dist()==0) +// • nx == es.LenX() +// • ny == es.LenY() +// +// This algorithm is not guaranteed to be an optimal solution (i.e., one that +// produces an edit-script with a minimal Levenshtein distance). This algorithm +// favors performance over optimality. The exact output is not guaranteed to +// be stable and may change over time. +func Difference(nx, ny int, f EqualFunc) (es EditScript) { + // This algorithm is based on traversing what is known as an "edit-graph". + // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations" + // by Eugene W. Myers. Since D can be as large as N itself, this is + // effectively O(N^2). Unlike the algorithm from that paper, we are not + // interested in the optimal path, but at least some "decent" path. + // + // For example, let X and Y be lists of symbols: + // X = [A B C A B B A] + // Y = [C B A B A C] + // + // The edit-graph can be drawn as the following: + // A B C A B B A + // ┌─────────────┐ + // C │_|_|\|_|_|_|_│ 0 + // B │_|\|_|_|\|\|_│ 1 + // A │\|_|_|\|_|_|\│ 2 + // B │_|\|_|_|\|\|_│ 3 + // A │\|_|_|\|_|_|\│ 4 + // C │ | |\| | | | │ 5 + // └─────────────┘ 6 + // 0 1 2 3 4 5 6 7 + // + // List X is written along the horizontal axis, while list Y is written + // along the vertical axis. At any point on this grid, if the symbol in + // list X matches the corresponding symbol in list Y, then a '\' is drawn. + // The goal of any minimal edit-script algorithm is to find a path from the + // top-left corner to the bottom-right corner, while traveling through the + // fewest horizontal or vertical edges. + // A horizontal edge is equivalent to inserting a symbol from list X. + // A vertical edge is equivalent to inserting a symbol from list Y. + // A diagonal edge is equivalent to a matching symbol between both X and Y. + + // Invariants: + // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx + // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny + // + // In general: + // • fwdFrontier.X < revFrontier.X + // • fwdFrontier.Y < revFrontier.Y + // Unless, it is time for the algorithm to terminate. + fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)} + revPath := path{-1, point{nx, ny}, make(EditScript, 0)} + fwdFrontier := fwdPath.point // Forward search frontier + revFrontier := revPath.point // Reverse search frontier + + // Search budget bounds the cost of searching for better paths. + // The longest sequence of non-matching symbols that can be tolerated is + // approximately the square-root of the search budget. + searchBudget := 4 * (nx + ny) // O(n) + + // The algorithm below is a greedy, meet-in-the-middle algorithm for + // computing sub-optimal edit-scripts between two lists. + // + // The algorithm is approximately as follows: + // • Searching for differences switches back-and-forth between + // a search that starts at the beginning (the top-left corner), and + // a search that starts at the end (the bottom-right corner). The goal of + // the search is connect with the search from the opposite corner. + // • As we search, we build a path in a greedy manner, where the first + // match seen is added to the path (this is sub-optimal, but provides a + // decent result in practice). When matches are found, we try the next pair + // of symbols in the lists and follow all matches as far as possible. + // • When searching for matches, we search along a diagonal going through + // through the "frontier" point. If no matches are found, we advance the + // frontier towards the opposite corner. + // • This algorithm terminates when either the X coordinates or the + // Y coordinates of the forward and reverse frontier points ever intersect. + // + // This algorithm is correct even if searching only in the forward direction + // or in the reverse direction. We do both because it is commonly observed + // that two lists commonly differ because elements were added to the front + // or end of the other list. + // + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + for { + // Forward search from the beginning. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{fwdFrontier.X + z, fwdFrontier.Y - z} + switch { + case p.X >= revPath.X || p.Y < fwdPath.Y: + stop1 = true // Hit top-right corner + case p.Y >= revPath.Y || p.X < fwdPath.X: + stop2 = true // Hit bottom-left corner + case f(p.X, p.Y).Equal(): + // Match found, so connect the path to this point. + fwdPath.connect(p, f) + fwdPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(fwdPath.X, fwdPath.Y).Equal() { + break + } + fwdPath.append(Identity) + } + fwdFrontier = fwdPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards reverse point. + if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y { + fwdFrontier.X++ + } else { + fwdFrontier.Y++ + } + + // Reverse search from the end. + if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { + break + } + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { + // Search in a diagonal pattern for a match. + z := zigzag(i) + p := point{revFrontier.X - z, revFrontier.Y + z} + switch { + case fwdPath.X >= p.X || revPath.Y < p.Y: + stop1 = true // Hit bottom-left corner + case fwdPath.Y >= p.Y || revPath.X < p.X: + stop2 = true // Hit top-right corner + case f(p.X-1, p.Y-1).Equal(): + // Match found, so connect the path to this point. + revPath.connect(p, f) + revPath.append(Identity) + // Follow sequence of matches as far as possible. + for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y { + if !f(revPath.X-1, revPath.Y-1).Equal() { + break + } + revPath.append(Identity) + } + revFrontier = revPath.point + stop1, stop2 = true, true + default: + searchBudget-- // Match not found + } + debug.Update() + } + // Advance the frontier towards forward point. + if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y { + revFrontier.X-- + } else { + revFrontier.Y-- + } + } + + // Join the forward and reverse paths and then append the reverse path. + fwdPath.connect(revPath.point, f) + for i := len(revPath.es) - 1; i >= 0; i-- { + t := revPath.es[i] + revPath.es = revPath.es[:i] + fwdPath.append(t) + } + debug.Finish() + return fwdPath.es +} + +type path struct { + dir int // +1 if forward, -1 if reverse + point // Leading point of the EditScript path + es EditScript +} + +// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types +// to the edit-script to connect p.point to dst. +func (p *path) connect(dst point, f EqualFunc) { + if p.dir > 0 { + // Connect in forward direction. + for dst.X > p.X && dst.Y > p.Y { + switch r := f(p.X, p.Y); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case dst.X-p.X >= dst.Y-p.Y: + p.append(UniqueX) + default: + p.append(UniqueY) + } + } + for dst.X > p.X { + p.append(UniqueX) + } + for dst.Y > p.Y { + p.append(UniqueY) + } + } else { + // Connect in reverse direction. + for p.X > dst.X && p.Y > dst.Y { + switch r := f(p.X-1, p.Y-1); { + case r.Equal(): + p.append(Identity) + case r.Similar(): + p.append(Modified) + case p.Y-dst.Y >= p.X-dst.X: + p.append(UniqueY) + default: + p.append(UniqueX) + } + } + for p.X > dst.X { + p.append(UniqueX) + } + for p.Y > dst.Y { + p.append(UniqueY) + } + } +} + +func (p *path) append(t EditType) { + p.es = append(p.es, t) + switch t { + case Identity, Modified: + p.add(p.dir, p.dir) + case UniqueX: + p.add(p.dir, 0) + case UniqueY: + p.add(0, p.dir) + } + debug.Update() +} + +type point struct{ X, Y int } + +func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy } + +// zigzag maps a consecutive sequence of integers to a zig-zag sequence. +// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...] +func zigzag(x int) int { + if x&1 != 0 { + x = ^x + } + return x >> 1 +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go new file mode 100644 index 00000000..a9e7fc0b --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -0,0 +1,9 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package flags + +// Deterministic controls whether the output of Diff should be deterministic. +// This is only used for testing. +var Deterministic bool diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go new file mode 100644 index 00000000..01aed0a1 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = false diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go new file mode 100644 index 00000000..c0b667f5 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -0,0 +1,10 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build go1.10 + +package flags + +// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10. +const AtLeastGo110 = true diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go new file mode 100644 index 00000000..ace1dbe8 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -0,0 +1,99 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// Package function provides functionality for identifying function types. +package function + +import ( + "reflect" + "regexp" + "runtime" + "strings" +) + +type funcType int + +const ( + _ funcType = iota + + tbFunc // func(T) bool + ttbFunc // func(T, T) bool + trbFunc // func(T, R) bool + tibFunc // func(T, I) bool + trFunc // func(T) R + + Equal = ttbFunc // func(T, T) bool + EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool + Transformer = trFunc // func(T) R + ValueFilter = ttbFunc // func(T, T) bool + Less = ttbFunc // func(T, T) bool + ValuePredicate = tbFunc // func(T) bool + KeyValuePredicate = trbFunc // func(T, R) bool +) + +var boolType = reflect.TypeOf(true) + +// IsType reports whether the reflect.Type is of the specified function type. +func IsType(t reflect.Type, ft funcType) bool { + if t == nil || t.Kind() != reflect.Func || t.IsVariadic() { + return false + } + ni, no := t.NumIn(), t.NumOut() + switch ft { + case tbFunc: // func(T) bool + if ni == 1 && no == 1 && t.Out(0) == boolType { + return true + } + case ttbFunc: // func(T, T) bool + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { + return true + } + case trbFunc: // func(T, R) bool + if ni == 2 && no == 1 && t.Out(0) == boolType { + return true + } + case tibFunc: // func(T, I) bool + if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType { + return true + } + case trFunc: // func(T) R + if ni == 1 && no == 1 { + return true + } + } + return false +} + +var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`) + +// NameOf returns the name of the function value. +func NameOf(v reflect.Value) string { + fnc := runtime.FuncForPC(v.Pointer()) + if fnc == nil { + return "" + } + fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm" + + // Method closures have a "-fm" suffix. + fullName = strings.TrimSuffix(fullName, "-fm") + + var name string + for len(fullName) > 0 { + inParen := strings.HasSuffix(fullName, ")") + fullName = strings.TrimSuffix(fullName, ")") + + s := lastIdentRx.FindString(fullName) + if s == "" { + break + } + name = s + "." + name + fullName = strings.TrimSuffix(fullName, s) + + if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 { + fullName = fullName[:i] + } + fullName = strings.TrimSuffix(fullName, ".") + } + return strings.TrimSuffix(name, ".") +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go new file mode 100644 index 00000000..0a01c479 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -0,0 +1,23 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build purego + +package value + +import "reflect" + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p uintptr + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // NOTE: Storing a pointer as an uintptr is technically incorrect as it + // assumes that the GC implementation does not use a moving collector. + return Pointer{v.Pointer(), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go new file mode 100644 index 00000000..da134ae2 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -0,0 +1,26 @@ +// Copyright 2018, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +// +build !purego + +package value + +import ( + "reflect" + "unsafe" +) + +// Pointer is an opaque typed pointer and is guaranteed to be comparable. +type Pointer struct { + p unsafe.Pointer + t reflect.Type +} + +// PointerOf returns a Pointer from v, which must be a +// reflect.Ptr, reflect.Slice, or reflect.Map. +func PointerOf(v reflect.Value) Pointer { + // The proper representation of a pointer is unsafe.Pointer, + // which is necessary if the GC ever uses a moving collector. + return Pointer{unsafe.Pointer(v.Pointer()), v.Type()} +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go new file mode 100644 index 00000000..938f646f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -0,0 +1,104 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// SortKeys sorts a list of map keys, deduplicating keys if necessary. +// The type of each value must be comparable. +func SortKeys(vs []reflect.Value) []reflect.Value { + if len(vs) == 0 { + return vs + } + + // Sort the map keys. + sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) }) + + // Deduplicate keys (fails for NaNs). + vs2 := vs[:1] + for _, v := range vs[1:] { + if isLess(vs2[len(vs2)-1], v) { + vs2 = append(vs2, v) + } + } + return vs2 +} + +// isLess is a generic function for sorting arbitrary map keys. +// The inputs must be of the same type and must be comparable. +func isLess(x, y reflect.Value) bool { + switch x.Type().Kind() { + case reflect.Bool: + return !x.Bool() && y.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return x.Int() < y.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return x.Uint() < y.Uint() + case reflect.Float32, reflect.Float64: + fx, fy := x.Float(), y.Float() + return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy) + case reflect.Complex64, reflect.Complex128: + cx, cy := x.Complex(), y.Complex() + rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy) + if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) { + return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy) + } + return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry) + case reflect.Ptr, reflect.UnsafePointer, reflect.Chan: + return x.Pointer() < y.Pointer() + case reflect.String: + return x.String() < y.String() + case reflect.Array: + for i := 0; i < x.Len(); i++ { + if isLess(x.Index(i), y.Index(i)) { + return true + } + if isLess(y.Index(i), x.Index(i)) { + return false + } + } + return false + case reflect.Struct: + for i := 0; i < x.NumField(); i++ { + if isLess(x.Field(i), y.Field(i)) { + return true + } + if isLess(y.Field(i), x.Field(i)) { + return false + } + } + return false + case reflect.Interface: + vx, vy := x.Elem(), y.Elem() + if !vx.IsValid() || !vy.IsValid() { + return !vx.IsValid() && vy.IsValid() + } + tx, ty := vx.Type(), vy.Type() + if tx == ty { + return isLess(x.Elem(), y.Elem()) + } + if tx.Kind() != ty.Kind() { + return vx.Kind() < vy.Kind() + } + if tx.String() != ty.String() { + return tx.String() < ty.String() + } + if tx.PkgPath() != ty.PkgPath() { + return tx.PkgPath() < ty.PkgPath() + } + // This can happen in rare situations, so we fallback to just comparing + // the unique pointer for a reflect.Type. This guarantees deterministic + // ordering within a program, but it is obviously not stable. + return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer() + default: + // Must be Func, Map, or Slice; which are not comparable. + panic(fmt.Sprintf("%T is not comparable", x.Type())) + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go new file mode 100644 index 00000000..d13a12cc --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -0,0 +1,45 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package value + +import "reflect" + +// IsZero reports whether v is the zero value. +// This does not rely on Interface and so can be used on unexported fields. +func IsZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.String: + return v.String() == "" + case reflect.UnsafePointer: + return v.Pointer() == 0 + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if !IsZero(v.Index(i)) { + return false + } + } + return true + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if !IsZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go new file mode 100644 index 00000000..79344816 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -0,0 +1,524 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/google/go-cmp/cmp/internal/function" +) + +// Option configures for specific behavior of Equal and Diff. In particular, +// the fundamental Option functions (Ignore, Transformer, and Comparer), +// configure how equality is determined. +// +// The fundamental options may be composed with filters (FilterPath and +// FilterValues) to control the scope over which they are applied. +// +// The cmp/cmpopts package provides helper functions for creating options that +// may be used with Equal and Diff. +type Option interface { + // filter applies all filters and returns the option that remains. + // Each option may only read s.curPath and call s.callTTBFunc. + // + // An Options is returned only if multiple comparers or transformers + // can apply simultaneously and will only contain values of those types + // or sub-Options containing values of those types. + filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption +} + +// applicableOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Grouping: Options +type applicableOption interface { + Option + + // apply executes the option, which may mutate s or panic. + apply(s *state, vx, vy reflect.Value) +} + +// coreOption represents the following types: +// Fundamental: ignore | validator | *comparer | *transformer +// Filters: *pathFilter | *valuesFilter +type coreOption interface { + Option + isCore() +} + +type core struct{} + +func (core) isCore() {} + +// Options is a list of Option values that also satisfies the Option interface. +// Helper comparison packages may return an Options value when packing multiple +// Option values into a single Option. When this package processes an Options, +// it will be implicitly expanded into a flat list. +// +// Applying a filter on an Options is equivalent to applying that same filter +// on all individual options held within. +type Options []Option + +func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) { + for _, opt := range opts { + switch opt := opt.filter(s, t, vx, vy); opt.(type) { + case ignore: + return ignore{} // Only ignore can short-circuit evaluation + case validator: + out = validator{} // Takes precedence over comparer or transformer + case *comparer, *transformer, Options: + switch out.(type) { + case nil: + out = opt + case validator: + // Keep validator + case *comparer, *transformer, Options: + out = Options{out, opt} // Conflicting comparers or transformers + } + } + } + return out +} + +func (opts Options) apply(s *state, _, _ reflect.Value) { + const warning = "ambiguous set of applicable options" + const help = "consider using filters to ensure at most one Comparer or Transformer may apply" + var ss []string + for _, opt := range flattenOptions(nil, opts) { + ss = append(ss, fmt.Sprint(opt)) + } + set := strings.Join(ss, "\n\t") + panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help)) +} + +func (opts Options) String() string { + var ss []string + for _, opt := range opts { + ss = append(ss, fmt.Sprint(opt)) + } + return fmt.Sprintf("Options{%s}", strings.Join(ss, ", ")) +} + +// FilterPath returns a new Option where opt is only evaluated if filter f +// returns true for the current Path in the value tree. +// +// This filter is called even if a slice element or map entry is missing and +// provides an opportunity to ignore such cases. The filter function must be +// symmetric such that the filter result is identical regardless of whether the +// missing value is from x or y. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterPath(f func(Path) bool, opt Option) Option { + if f == nil { + panic("invalid path filter function") + } + if opt := normalizeOption(opt); opt != nil { + return &pathFilter{fnc: f, opt: opt} + } + return nil +} + +type pathFilter struct { + core + fnc func(Path) bool + opt Option +} + +func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if f.fnc(s.curPath) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f pathFilter) String() string { + return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt) +} + +// FilterValues returns a new Option where opt is only evaluated if filter f, +// which is a function of the form "func(T, T) bool", returns true for the +// current pair of values being compared. If either value is invalid or +// the type of the values is not assignable to T, then this filter implicitly +// returns false. +// +// The filter function must be +// symmetric (i.e., agnostic to the order of the inputs) and +// deterministic (i.e., produces the same result when given the same inputs). +// If T is an interface, it is possible that f is called with two values with +// different concrete types that both implement T. +// +// The option passed in may be an Ignore, Transformer, Comparer, Options, or +// a previously filtered Option. +func FilterValues(f interface{}, opt Option) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() { + panic(fmt.Sprintf("invalid values filter function: %T", f)) + } + if opt := normalizeOption(opt); opt != nil { + vf := &valuesFilter{fnc: v, opt: opt} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + vf.typ = ti + } + return vf + } + return nil +} + +type valuesFilter struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool + opt Option +} + +func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() { + return nil + } + if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) { + return f.opt.filter(s, t, vx, vy) + } + return nil +} + +func (f valuesFilter) String() string { + return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt) +} + +// Ignore is an Option that causes all comparisons to be ignored. +// This value is intended to be combined with FilterPath or FilterValues. +// It is an error to pass an unfiltered Ignore option to Equal. +func Ignore() Option { return ignore{} } + +type ignore struct{ core } + +func (ignore) isFiltered() bool { return false } +func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} } +func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) } +func (ignore) String() string { return "Ignore()" } + +// validator is a sentinel Option type to indicate that some options could not +// be evaluated due to unexported fields, missing slice elements, or +// missing map entries. Both values are validator only for unexported fields. +type validator struct{ core } + +func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption { + if !vx.IsValid() || !vy.IsValid() { + return validator{} + } + if !vx.CanInterface() || !vy.CanInterface() { + return validator{} + } + return nil +} +func (validator) apply(s *state, vx, vy reflect.Value) { + // Implies missing slice element or map entry. + if !vx.IsValid() || !vy.IsValid() { + s.report(vx.IsValid() == vy.IsValid(), 0) + return + } + + // Unable to Interface implies unexported field without visibility access. + if !vx.CanInterface() || !vy.CanInterface() { + const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported" + panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help)) + } + + panic("not reachable") +} + +// identRx represents a valid identifier according to the Go specification. +const identRx = `[_\p{L}][_\p{L}\p{N}]*` + +var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`) + +// Transformer returns an Option that applies a transformation function that +// converts values of a certain type into that of another. +// +// The transformer f must be a function "func(T) R" that converts values of +// type T to those of type R and is implicitly filtered to input values +// assignable to T. The transformer must not mutate T in any way. +// +// To help prevent some cases of infinite recursive cycles applying the +// same transform to the output of itself (e.g., in the case where the +// input and output types are the same), an implicit filter is added such that +// a transformer is applicable only if that exact transformer is not already +// in the tail of the Path since the last non-Transform step. +// For situations where the implicit filter is still insufficient, +// consider using cmpopts.AcyclicTransformer, which adds a filter +// to prevent the transformer from being recursively applied upon itself. +// +// The name is a user provided label that is used as the Transform.Name in the +// transformation PathStep (and eventually shown in the Diff output). +// The name must be a valid identifier or qualified identifier in Go syntax. +// If empty, an arbitrary name is used. +func Transformer(name string, f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Transformer) || v.IsNil() { + panic(fmt.Sprintf("invalid transformer function: %T", f)) + } + if name == "" { + name = function.NameOf(v) + if !identsRx.MatchString(name) { + name = "λ" // Lambda-symbol as placeholder name + } + } else if !identsRx.MatchString(name) { + panic(fmt.Sprintf("invalid name: %q", name)) + } + tr := &transformer{name: name, fnc: reflect.ValueOf(f)} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + tr.typ = ti + } + return tr +} + +type transformer struct { + core + name string + typ reflect.Type // T + fnc reflect.Value // func(T) R +} + +func (tr *transformer) isFiltered() bool { return tr.typ != nil } + +func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption { + for i := len(s.curPath) - 1; i >= 0; i-- { + if t, ok := s.curPath[i].(Transform); !ok { + break // Hit most recent non-Transform step + } else if tr == t.trans { + return nil // Cannot directly use same Transform + } + } + if tr.typ == nil || t.AssignableTo(tr.typ) { + return tr + } + return nil +} + +func (tr *transformer) apply(s *state, vx, vy reflect.Value) { + step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}} + vvx := s.callTRFunc(tr.fnc, vx, step) + vvy := s.callTRFunc(tr.fnc, vy, step) + step.vx, step.vy = vvx, vvy + s.compareAny(step) +} + +func (tr transformer) String() string { + return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc)) +} + +// Comparer returns an Option that determines whether two values are equal +// to each other. +// +// The comparer f must be a function "func(T, T) bool" and is implicitly +// filtered to input values assignable to T. If T is an interface, it is +// possible that f is called with two values of different concrete types that +// both implement T. +// +// The equality function must be: +// • Symmetric: equal(x, y) == equal(y, x) +// • Deterministic: equal(x, y) == equal(x, y) +// • Pure: equal(x, y) does not modify x or y +func Comparer(f interface{}) Option { + v := reflect.ValueOf(f) + if !function.IsType(v.Type(), function.Equal) || v.IsNil() { + panic(fmt.Sprintf("invalid comparer function: %T", f)) + } + cm := &comparer{fnc: v} + if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 { + cm.typ = ti + } + return cm +} + +type comparer struct { + core + typ reflect.Type // T + fnc reflect.Value // func(T, T) bool +} + +func (cm *comparer) isFiltered() bool { return cm.typ != nil } + +func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption { + if cm.typ == nil || t.AssignableTo(cm.typ) { + return cm + } + return nil +} + +func (cm *comparer) apply(s *state, vx, vy reflect.Value) { + eq := s.callTTBFunc(cm.fnc, vx, vy) + s.report(eq, reportByFunc) +} + +func (cm comparer) String() string { + return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc)) +} + +// AllowUnexported returns an Option that forcibly allows operations on +// unexported fields in certain structs, which are specified by passing in a +// value of each struct type. +// +// Users of this option must understand that comparing on unexported fields +// from external packages is not safe since changes in the internal +// implementation of some external package may cause the result of Equal +// to unexpectedly change. However, it may be valid to use this option on types +// defined in an internal package where the semantic meaning of an unexported +// field is in the control of the user. +// +// In many cases, a custom Comparer should be used instead that defines +// equality as a function of the public API of a type rather than the underlying +// unexported implementation. +// +// For example, the reflect.Type documentation defines equality to be determined +// by the == operator on the interface (essentially performing a shallow pointer +// comparison) and most attempts to compare *regexp.Regexp types are interested +// in only checking that the regular expression strings are equal. +// Both of these are accomplished using Comparers: +// +// Comparer(func(x, y reflect.Type) bool { return x == y }) +// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() }) +// +// In other cases, the cmpopts.IgnoreUnexported option can be used to ignore +// all unexported fields on specified struct types. +func AllowUnexported(types ...interface{}) Option { + if !supportAllowUnexported { + panic("AllowUnexported is not supported on purego builds, Google App Engine Standard, or GopherJS") + } + m := make(map[reflect.Type]bool) + for _, typ := range types { + t := reflect.TypeOf(typ) + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("invalid struct type: %T", typ)) + } + m[t] = true + } + return visibleStructs(m) +} + +type visibleStructs map[reflect.Type]bool + +func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// Result represents the comparison result for a single node and +// is provided by cmp when calling Result (see Reporter). +type Result struct { + _ [0]func() // Make Result incomparable + flags resultFlags +} + +// Equal reports whether the node was determined to be equal or not. +// As a special case, ignored nodes are considered equal. +func (r Result) Equal() bool { + return r.flags&(reportEqual|reportByIgnore) != 0 +} + +// ByIgnore reports whether the node is equal because it was ignored. +// This never reports true if Equal reports false. +func (r Result) ByIgnore() bool { + return r.flags&reportByIgnore != 0 +} + +// ByMethod reports whether the Equal method determined equality. +func (r Result) ByMethod() bool { + return r.flags&reportByMethod != 0 +} + +// ByFunc reports whether a Comparer function determined equality. +func (r Result) ByFunc() bool { + return r.flags&reportByFunc != 0 +} + +type resultFlags uint + +const ( + _ resultFlags = (1 << iota) / 2 + + reportEqual + reportUnequal + reportByIgnore + reportByMethod + reportByFunc +) + +// Reporter is an Option that can be passed to Equal. When Equal traverses +// the value trees, it calls PushStep as it descends into each node in the +// tree and PopStep as it ascend out of the node. The leaves of the tree are +// either compared (determined to be equal or not equal) or ignored and reported +// as such by calling the Report method. +func Reporter(r interface { + // PushStep is called when a tree-traversal operation is performed. + // The PathStep itself is only valid until the step is popped. + // The PathStep.Values are valid for the duration of the entire traversal + // and must not be mutated. + // + // Equal always calls PushStep at the start to provide an operation-less + // PathStep used to report the root values. + // + // Within a slice, the exact set of inserted, removed, or modified elements + // is unspecified and may change in future implementations. + // The entries of a map are iterated through in an unspecified order. + PushStep(PathStep) + + // Report is called exactly once on leaf nodes to report whether the + // comparison identified the node as equal, unequal, or ignored. + // A leaf node is one that is immediately preceded by and followed by + // a pair of PushStep and PopStep calls. + Report(Result) + + // PopStep ascends back up the value tree. + // There is always a matching pop call for every push call. + PopStep() +}) Option { + return reporter{r} +} + +type reporter struct{ reporterIface } +type reporterIface interface { + PushStep(PathStep) + Report(Result) + PopStep() +} + +func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { + panic("not implemented") +} + +// normalizeOption normalizes the input options such that all Options groups +// are flattened and groups with a single element are reduced to that element. +// Only coreOptions and Options containing coreOptions are allowed. +func normalizeOption(src Option) Option { + switch opts := flattenOptions(nil, Options{src}); len(opts) { + case 0: + return nil + case 1: + return opts[0] + default: + return opts + } +} + +// flattenOptions copies all options in src to dst as a flat list. +// Only coreOptions and Options containing coreOptions are allowed. +func flattenOptions(dst, src Options) Options { + for _, opt := range src { + switch opt := opt.(type) { + case nil: + continue + case Options: + dst = flattenOptions(dst, opt) + case coreOption: + dst = append(dst, opt) + default: + panic(fmt.Sprintf("invalid option type: %T", opt)) + } + } + return dst +} diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go new file mode 100644 index 00000000..96fffd29 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -0,0 +1,308 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// Path is a list of PathSteps describing the sequence of operations to get +// from some root type to the current position in the value tree. +// The first Path element is always an operation-less PathStep that exists +// simply to identify the initial type. +// +// When traversing structs with embedded structs, the embedded struct will +// always be accessed as a field before traversing the fields of the +// embedded struct themselves. That is, an exported field from the +// embedded struct will never be accessed directly from the parent struct. +type Path []PathStep + +// PathStep is a union-type for specific operations to traverse +// a value's tree structure. Users of this package never need to implement +// these types as values of this type will be returned by this package. +// +// Implementations of this interface are +// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform. +type PathStep interface { + String() string + + // Type is the resulting type after performing the path step. + Type() reflect.Type + + // Values is the resulting values after performing the path step. + // The type of each valid value is guaranteed to be identical to Type. + // + // In some cases, one or both may be invalid or have restrictions: + // • For StructField, both are not interface-able if the current field + // is unexported and the struct type is not explicitly permitted by + // AllowUnexported to traverse unexported fields. + // • For SliceIndex, one may be invalid if an element is missing from + // either the x or y slice. + // • For MapIndex, one may be invalid if an entry is missing from + // either the x or y map. + // + // The provided values must not be mutated. + Values() (vx, vy reflect.Value) +} + +var ( + _ PathStep = StructField{} + _ PathStep = SliceIndex{} + _ PathStep = MapIndex{} + _ PathStep = Indirect{} + _ PathStep = TypeAssertion{} + _ PathStep = Transform{} +) + +func (pa *Path) push(s PathStep) { + *pa = append(*pa, s) +} + +func (pa *Path) pop() { + *pa = (*pa)[:len(*pa)-1] +} + +// Last returns the last PathStep in the Path. +// If the path is empty, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Last() PathStep { + return pa.Index(-1) +} + +// Index returns the ith step in the Path and supports negative indexing. +// A negative index starts counting from the tail of the Path such that -1 +// refers to the last step, -2 refers to the second-to-last step, and so on. +// If index is invalid, this returns a non-nil PathStep that reports a nil Type. +func (pa Path) Index(i int) PathStep { + if i < 0 { + i = len(pa) + i + } + if i < 0 || i >= len(pa) { + return pathStep{} + } + return pa[i] +} + +// String returns the simplified path to a node. +// The simplified path only contains struct field accesses. +// +// For example: +// MyMap.MySlices.MyField +func (pa Path) String() string { + var ss []string + for _, s := range pa { + if _, ok := s.(StructField); ok { + ss = append(ss, s.String()) + } + } + return strings.TrimPrefix(strings.Join(ss, ""), ".") +} + +// GoString returns the path to a specific node using Go syntax. +// +// For example: +// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField +func (pa Path) GoString() string { + var ssPre, ssPost []string + var numIndirect int + for i, s := range pa { + var nextStep PathStep + if i+1 < len(pa) { + nextStep = pa[i+1] + } + switch s := s.(type) { + case Indirect: + numIndirect++ + pPre, pPost := "(", ")" + switch nextStep.(type) { + case Indirect: + continue // Next step is indirection, so let them batch up + case StructField: + numIndirect-- // Automatic indirection on struct fields + case nil: + pPre, pPost = "", "" // Last step; no need for parenthesis + } + if numIndirect > 0 { + ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect)) + ssPost = append(ssPost, pPost) + } + numIndirect = 0 + continue + case Transform: + ssPre = append(ssPre, s.trans.name+"(") + ssPost = append(ssPost, ")") + continue + } + ssPost = append(ssPost, s.String()) + } + for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 { + ssPre[i], ssPre[j] = ssPre[j], ssPre[i] + } + return strings.Join(ssPre, "") + strings.Join(ssPost, "") +} + +type pathStep struct { + typ reflect.Type + vx, vy reflect.Value +} + +func (ps pathStep) Type() reflect.Type { return ps.typ } +func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy } +func (ps pathStep) String() string { + if ps.typ == nil { + return "" + } + s := ps.typ.String() + if s == "" || strings.ContainsAny(s, "{}\n") { + return "root" // Type too simple or complex to print + } + return fmt.Sprintf("{%s}", s) +} + +// StructField represents a struct field access on a field called Name. +type StructField struct{ *structField } +type structField struct { + pathStep + name string + idx int + + // These fields are used for forcibly accessing an unexported field. + // pvx, pvy, and field are only valid if unexported is true. + unexported bool + mayForce bool // Forcibly allow visibility + pvx, pvy reflect.Value // Parent values + field reflect.StructField // Field information +} + +func (sf StructField) Type() reflect.Type { return sf.typ } +func (sf StructField) Values() (vx, vy reflect.Value) { + if !sf.unexported { + return sf.vx, sf.vy // CanInterface reports true + } + + // Forcibly obtain read-write access to an unexported struct field. + if sf.mayForce { + vx = retrieveUnexportedField(sf.pvx, sf.field) + vy = retrieveUnexportedField(sf.pvy, sf.field) + return vx, vy // CanInterface reports true + } + return sf.vx, sf.vy // CanInterface reports false +} +func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) } + +// Name is the field name. +func (sf StructField) Name() string { return sf.name } + +// Index is the index of the field in the parent struct type. +// See reflect.Type.Field. +func (sf StructField) Index() int { return sf.idx } + +// SliceIndex is an index operation on a slice or array at some index Key. +type SliceIndex struct{ *sliceIndex } +type sliceIndex struct { + pathStep + xkey, ykey int +} + +func (si SliceIndex) Type() reflect.Type { return si.typ } +func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy } +func (si SliceIndex) String() string { + switch { + case si.xkey == si.ykey: + return fmt.Sprintf("[%d]", si.xkey) + case si.ykey == -1: + // [5->?] means "I don't know where X[5] went" + return fmt.Sprintf("[%d->?]", si.xkey) + case si.xkey == -1: + // [?->3] means "I don't know where Y[3] came from" + return fmt.Sprintf("[?->%d]", si.ykey) + default: + // [5->3] means "X[5] moved to Y[3]" + return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey) + } +} + +// Key is the index key; it may return -1 if in a split state +func (si SliceIndex) Key() int { + if si.xkey != si.ykey { + return -1 + } + return si.xkey +} + +// SplitKeys are the indexes for indexing into slices in the +// x and y values, respectively. These indexes may differ due to the +// insertion or removal of an element in one of the slices, causing +// all of the indexes to be shifted. If an index is -1, then that +// indicates that the element does not exist in the associated slice. +// +// Key is guaranteed to return -1 if and only if the indexes returned +// by SplitKeys are not the same. SplitKeys will never return -1 for +// both indexes. +func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey } + +// MapIndex is an index operation on a map at some index Key. +type MapIndex struct{ *mapIndex } +type mapIndex struct { + pathStep + key reflect.Value +} + +func (mi MapIndex) Type() reflect.Type { return mi.typ } +func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy } +func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) } + +// Key is the value of the map key. +func (mi MapIndex) Key() reflect.Value { return mi.key } + +// Indirect represents pointer indirection on the parent type. +type Indirect struct{ *indirect } +type indirect struct { + pathStep +} + +func (in Indirect) Type() reflect.Type { return in.typ } +func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy } +func (in Indirect) String() string { return "*" } + +// TypeAssertion represents a type assertion on an interface. +type TypeAssertion struct{ *typeAssertion } +type typeAssertion struct { + pathStep +} + +func (ta TypeAssertion) Type() reflect.Type { return ta.typ } +func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy } +func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) } + +// Transform is a transformation from the parent type to the current type. +type Transform struct{ *transform } +type transform struct { + pathStep + trans *transformer +} + +func (tf Transform) Type() reflect.Type { return tf.typ } +func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy } +func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) } + +// Name is the name of the Transformer. +func (tf Transform) Name() string { return tf.trans.name } + +// Func is the function pointer to the transformer function. +func (tf Transform) Func() reflect.Value { return tf.trans.fnc } + +// Option returns the originally constructed Transformer option. +// The == operator can be used to detect the exact option used. +func (tf Transform) Option() Option { return tf.trans } + +// isExported reports whether the identifier is exported. +func isExported(id string) bool { + r, _ := utf8.DecodeRuneInString(id) + return unicode.IsUpper(r) +} diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go new file mode 100644 index 00000000..6ddf2999 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -0,0 +1,51 @@ +// Copyright 2017, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +// defaultReporter implements the reporter interface. +// +// As Equal serially calls the PushStep, Report, and PopStep methods, the +// defaultReporter constructs a tree-based representation of the compared value +// and the result of each comparison (see valueNode). +// +// When the String method is called, the FormatDiff method transforms the +// valueNode tree into a textNode tree, which is a tree-based representation +// of the textual output (see textNode). +// +// Lastly, the textNode.String method produces the final report as a string. +type defaultReporter struct { + root *valueNode + curr *valueNode +} + +func (r *defaultReporter) PushStep(ps PathStep) { + r.curr = r.curr.PushStep(ps) + if r.root == nil { + r.root = r.curr + } +} +func (r *defaultReporter) Report(rs Result) { + r.curr.Report(rs) +} +func (r *defaultReporter) PopStep() { + r.curr = r.curr.PopStep() +} + +// String provides a full report of the differences detected as a structured +// literal in pseudo-Go syntax. String may only be called after the entire tree +// has been traversed. +func (r *defaultReporter) String() string { + assert(r.root != nil && r.curr == nil) + if r.root.NumDiff == 0 { + return "" + } + return formatOptions{}.FormatDiff(r.root).String() +} + +func assert(ok bool) { + if !ok { + panic("assertion failure") + } +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go new file mode 100644 index 00000000..05efb992 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -0,0 +1,296 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp/internal/value" +) + +// TODO: Enforce limits? +// * Enforce maximum number of records to print per node? +// * Enforce maximum size in bytes allowed? +// * As a heuristic, use less verbosity for equal nodes than unequal nodes. +// TODO: Enforce unique outputs? +// * Avoid Stringer methods if it results in same output? +// * Print pointer address if outputs still equal? + +// numContextRecords is the number of surrounding equal records to print. +const numContextRecords = 2 + +type diffMode byte + +const ( + diffUnknown diffMode = 0 + diffIdentical diffMode = ' ' + diffRemoved diffMode = '-' + diffInserted diffMode = '+' +) + +type typeMode int + +const ( + // emitType always prints the type. + emitType typeMode = iota + // elideType never prints the type. + elideType + // autoType prints the type only for composite kinds + // (i.e., structs, slices, arrays, and maps). + autoType +) + +type formatOptions struct { + // DiffMode controls the output mode of FormatDiff. + // + // If diffUnknown, then produce a diff of the x and y values. + // If diffIdentical, then emit values as if they were equal. + // If diffRemoved, then only emit x values (ignoring y values). + // If diffInserted, then only emit y values (ignoring x values). + DiffMode diffMode + + // TypeMode controls whether to print the type for the current node. + // + // As a general rule of thumb, we always print the type of the next node + // after an interface, and always elide the type of the next node after + // a slice or map node. + TypeMode typeMode + + // formatValueOptions are options specific to printing reflect.Values. + formatValueOptions +} + +func (opts formatOptions) WithDiffMode(d diffMode) formatOptions { + opts.DiffMode = d + return opts +} +func (opts formatOptions) WithTypeMode(t typeMode) formatOptions { + opts.TypeMode = t + return opts +} + +// FormatDiff converts a valueNode tree into a textNode tree, where the later +// is a textual representation of the differences detected in the former. +func (opts formatOptions) FormatDiff(v *valueNode) textNode { + // Check whether we have specialized formatting for this node. + // This is not necessary, but helpful for producing more readable outputs. + if opts.CanFormatDiffSlice(v) { + return opts.FormatDiffSlice(v) + } + + // For leaf nodes, format the value based on the reflect.Values alone. + if v.MaxDepth == 0 { + switch opts.DiffMode { + case diffUnknown, diffIdentical: + // Format Equal. + if v.NumDiff == 0 { + outx := opts.FormatValue(v.ValueX, visitedPointers{}) + outy := opts.FormatValue(v.ValueY, visitedPointers{}) + if v.NumIgnored > 0 && v.NumSame == 0 { + return textEllipsis + } else if outx.Len() < outy.Len() { + return outx + } else { + return outy + } + } + + // Format unequal. + assert(opts.DiffMode == diffUnknown) + var list textList + outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{}) + outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{}) + if outx != nil { + list = append(list, textRecord{Diff: '-', Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: '+', Value: outy}) + } + return opts.WithTypeMode(emitType).FormatType(v.Type, list) + case diffRemoved: + return opts.FormatValue(v.ValueX, visitedPointers{}) + case diffInserted: + return opts.FormatValue(v.ValueY, visitedPointers{}) + default: + panic("invalid diff mode") + } + } + + // Descend into the child value node. + if v.TransformerName != "" { + out := opts.WithTypeMode(emitType).FormatDiff(v.Value) + out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"} + return opts.FormatType(v.Type, out) + } else { + switch k := v.Type.Kind(); k { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k)) + case reflect.Ptr: + return textWrap{"&", opts.FormatDiff(v.Value), ""} + case reflect.Interface: + return opts.WithTypeMode(emitType).FormatDiff(v.Value) + default: + panic(fmt.Sprintf("%v cannot have children", k)) + } + } +} + +func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode { + // Derive record name based on the data structure kind. + var name string + var formatKey func(reflect.Value) string + switch k { + case reflect.Struct: + name = "field" + opts = opts.WithTypeMode(autoType) + formatKey = func(v reflect.Value) string { return v.String() } + case reflect.Slice, reflect.Array: + name = "element" + opts = opts.WithTypeMode(elideType) + formatKey = func(reflect.Value) string { return "" } + case reflect.Map: + name = "entry" + opts = opts.WithTypeMode(elideType) + formatKey = formatMapKey + } + + // Handle unification. + switch opts.DiffMode { + case diffIdentical, diffRemoved, diffInserted: + var list textList + var deferredEllipsis bool // Add final "..." to indicate records were dropped + for _, r := range recs { + // Elide struct fields that are zero value. + if k == reflect.Struct { + var isZero bool + switch opts.DiffMode { + case diffIdentical: + isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX) + case diffRemoved: + isZero = value.IsZero(r.Value.ValueX) + case diffInserted: + isZero = value.IsZero(r.Value.ValueY) + } + if isZero { + continue + } + } + // Elide ignored nodes. + if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 { + deferredEllipsis = !(k == reflect.Slice || k == reflect.Array) + if !deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + continue + } + if out := opts.FormatDiff(r.Value); out != nil { + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + if deferredEllipsis { + list.AppendEllipsis(diffStats{}) + } + return textWrap{"{", list, "}"} + case diffUnknown: + default: + panic("invalid diff mode") + } + + // Handle differencing. + var list textList + groups := coalesceAdjacentRecords(name, recs) + for i, ds := range groups { + // Handle equal records. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing records to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 { + if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numLo++ + } + for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 { + break + } + numHi++ + } + if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 { + numHi++ // Avoid pointless coalescing of a single equal record + } + + // Format the equal values. + for _, r := range recs[:numLo] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + for _, r := range recs[numEqual-numHi : numEqual] { + out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + recs = recs[numEqual:] + continue + } + + // Handle unequal records. + for _, r := range recs[:ds.NumDiff()] { + switch { + case opts.CanFormatDiffSlice(r.Value): + out := opts.FormatDiffSlice(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + case r.Value.NumChildren == r.Value.MaxDepth: + outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value) + outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value) + if outx != nil { + list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx}) + } + if outy != nil { + list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy}) + } + default: + out := opts.FormatDiff(r.Value) + list = append(list, textRecord{Key: formatKey(r.Key), Value: out}) + } + } + recs = recs[ds.NumDiff():] + } + assert(len(recs) == 0) + return textWrap{"{", list, "}"} +} + +// coalesceAdjacentRecords coalesces the list of records into groups of +// adjacent equal, or unequal counts. +func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, r := range recs { + switch rv := r.Value; { + case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0: + lastStats(1).NumIgnored++ + case rv.NumDiff == 0: + lastStats(1).NumIdentical++ + case rv.NumDiff > 0 && !rv.ValueY.IsValid(): + lastStats(2).NumRemoved++ + case rv.NumDiff > 0 && !rv.ValueX.IsValid(): + lastStats(2).NumInserted++ + default: + lastStats(2).NumModified++ + } + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go new file mode 100644 index 00000000..2761b628 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -0,0 +1,278 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "unicode" + + "github.com/google/go-cmp/cmp/internal/flags" + "github.com/google/go-cmp/cmp/internal/value" +) + +type formatValueOptions struct { + // AvoidStringer controls whether to avoid calling custom stringer + // methods like error.Error or fmt.Stringer.String. + AvoidStringer bool + + // ShallowPointers controls whether to avoid descending into pointers. + // Useful when printing map keys, where pointer comparison is performed + // on the pointer address rather than the pointed-at value. + ShallowPointers bool + + // PrintAddresses controls whether to print the address of all pointers, + // slice elements, and maps. + PrintAddresses bool +} + +// FormatType prints the type as if it were wrapping s. +// This may return s as-is depending on the current type and TypeMode mode. +func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode { + // Check whether to emit the type or not. + switch opts.TypeMode { + case autoType: + switch t.Kind() { + case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map: + if s.Equal(textNil) { + return s + } + default: + return s + } + case elideType: + return s + } + + // Determine the type label, applying special handling for unnamed types. + typeName := t.String() + if t.Name() == "" { + // According to Go grammar, certain type literals contain symbols that + // do not strongly bind to the next lexicographical token (e.g., *T). + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Ptr: + typeName = "(" + typeName + ")" + } + typeName = strings.Replace(typeName, "struct {", "struct{", -1) + typeName = strings.Replace(typeName, "interface {", "interface{", -1) + } + + // Avoid wrap the value in parenthesis if unnecessary. + if s, ok := s.(textWrap); ok { + hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")") + hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}") + if hasParens || hasBraces { + return textWrap{typeName, s, ""} + } + } + return textWrap{typeName + "(", s, ")"} +} + +// FormatValue prints the reflect.Value, taking extra care to avoid descending +// into pointers already in m. As pointers are visited, m is also updated. +func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) { + if !v.IsValid() { + return nil + } + t := v.Type() + + // Check whether there is an Error or String method to call. + if !opts.AvoidStringer && v.CanInterface() { + // Avoid calling Error or String methods on nil receivers since many + // implementations crash when doing so. + if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() { + switch v := v.Interface().(type) { + case error: + return textLine("e" + formatString(v.Error())) + case fmt.Stringer: + return textLine("s" + formatString(v.String())) + } + } + } + + // Check whether to explicitly wrap the result with the type. + var skipType bool + defer func() { + if !skipType { + out = opts.FormatType(t, out) + } + }() + + var ptr string + switch t.Kind() { + case reflect.Bool: + return textLine(fmt.Sprint(v.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return textLine(fmt.Sprint(v.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + // Unnamed uints are usually bytes or words, so use hexadecimal. + if t.PkgPath() == "" || t.Kind() == reflect.Uintptr { + return textLine(formatHex(v.Uint())) + } + return textLine(fmt.Sprint(v.Uint())) + case reflect.Float32, reflect.Float64: + return textLine(fmt.Sprint(v.Float())) + case reflect.Complex64, reflect.Complex128: + return textLine(fmt.Sprint(v.Complex())) + case reflect.String: + return textLine(formatString(v.String())) + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + return textLine(formatPointer(v)) + case reflect.Struct: + var list textList + for i := 0; i < v.NumField(); i++ { + vv := v.Field(i) + if value.IsZero(vv) { + continue // Elide fields with zero values + } + s := opts.WithTypeMode(autoType).FormatValue(vv, m) + list = append(list, textRecord{Key: t.Field(i).Name, Value: s}) + } + return textWrap{"{", list, "}"} + case reflect.Slice: + if v.IsNil() { + return textNil + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + fallthrough + case reflect.Array: + var list textList + for i := 0; i < v.Len(); i++ { + vi := v.Index(i) + if vi.CanAddr() { // Check for cyclic elements + p := vi.Addr() + if m.Visit(p) { + var out textNode + out = textLine(formatPointer(p)) + out = opts.WithTypeMode(emitType).FormatType(p.Type(), out) + out = textWrap{"*", out, ""} + list = append(list, textRecord{Value: out}) + continue + } + } + s := opts.WithTypeMode(elideType).FormatValue(vi, m) + list = append(list, textRecord{Value: s}) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Map: + if v.IsNil() { + return textNil + } + if m.Visit(v) { + return textLine(formatPointer(v)) + } + + var list textList + for _, k := range value.SortKeys(v.MapKeys()) { + sk := formatMapKey(k) + sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m) + list = append(list, textRecord{Key: sk, Value: sv}) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + return textWrap{ptr + "{", list, "}"} + case reflect.Ptr: + if v.IsNil() { + return textNil + } + if m.Visit(v) || opts.ShallowPointers { + return textLine(formatPointer(v)) + } + if opts.PrintAddresses { + ptr = formatPointer(v) + } + skipType = true // Let the underlying value print the type instead + return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""} + case reflect.Interface: + if v.IsNil() { + return textNil + } + // Interfaces accept different concrete types, + // so configure the underlying value to explicitly print the type. + skipType = true // Print the concrete type instead + return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m) + default: + panic(fmt.Sprintf("%v kind not handled", v.Kind())) + } +} + +// formatMapKey formats v as if it were a map key. +// The result is guaranteed to be a single line. +func formatMapKey(v reflect.Value) string { + var opts formatOptions + opts.TypeMode = elideType + opts.ShallowPointers = true + s := opts.FormatValue(v, visitedPointers{}).String() + return strings.TrimSpace(s) +} + +// formatString prints s as a double-quoted or backtick-quoted string. +func formatString(s string) string { + // Use quoted string if it the same length as a raw string literal. + // Otherwise, attempt to use the raw string form. + qs := strconv.Quote(s) + if len(qs) == 1+len(s)+1 { + return qs + } + + // Disallow newlines to ensure output is a single line. + // Only allow printable runes for readability purposes. + rawInvalid := func(r rune) bool { + return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t') + } + if strings.IndexFunc(s, rawInvalid) < 0 { + return "`" + s + "`" + } + return qs +} + +// formatHex prints u as a hexadecimal integer in Go notation. +func formatHex(u uint64) string { + var f string + switch { + case u <= 0xff: + f = "0x%02x" + case u <= 0xffff: + f = "0x%04x" + case u <= 0xffffff: + f = "0x%06x" + case u <= 0xffffffff: + f = "0x%08x" + case u <= 0xffffffffff: + f = "0x%010x" + case u <= 0xffffffffffff: + f = "0x%012x" + case u <= 0xffffffffffffff: + f = "0x%014x" + case u <= 0xffffffffffffffff: + f = "0x%016x" + } + return fmt.Sprintf(f, u) +} + +// formatPointer prints the address of the pointer. +func formatPointer(v reflect.Value) string { + p := v.Pointer() + if flags.Deterministic { + p = 0xdeadf00f // Only used for stable testing purposes + } + return fmt.Sprintf("⟪0x%x⟫", p) +} + +type visitedPointers map[value.Pointer]struct{} + +// Visit inserts pointer v into the visited map and reports whether it had +// already been visited before. +func (m visitedPointers) Visit(v reflect.Value) bool { + p := value.PointerOf(v) + _, visited := m[p] + m[p] = struct{}{} + return visited +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go new file mode 100644 index 00000000..8cb3265e --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -0,0 +1,333 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/google/go-cmp/cmp/internal/diff" +) + +// CanFormatDiffSlice reports whether we support custom formatting for nodes +// that are slices of primitive kinds or strings. +func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool { + switch { + case opts.DiffMode != diffUnknown: + return false // Must be formatting in diff mode + case v.NumDiff == 0: + return false // No differences detected + case v.NumIgnored+v.NumCompared+v.NumTransformed > 0: + // TODO: Handle the case where someone uses bytes.Equal on a large slice. + return false // Some custom option was used to determined equality + case !v.ValueX.IsValid() || !v.ValueY.IsValid(): + return false // Both values must be valid + } + + switch t := v.Type; t.Kind() { + case reflect.String: + case reflect.Array, reflect.Slice: + // Only slices of primitive types have specialized handling. + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + default: + return false + } + + // If a sufficient number of elements already differ, + // use specialized formatting even if length requirement is not met. + if v.NumDiff > v.NumSame { + return true + } + default: + return false + } + + // Use specialized string diffing for longer slices or strings. + const minLength = 64 + return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength +} + +// FormatDiffSlice prints a diff for the slices (or strings) represented by v. +// This provides custom-tailored logic to make printing of differences in +// textual strings and slices of primitive kinds more readable. +func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode { + assert(opts.DiffMode == diffUnknown) + t, vx, vy := v.Type, v.ValueX, v.ValueY + + // Auto-detect the type of the data. + var isLinedText, isText, isBinary bool + var sx, sy string + switch { + case t.Kind() == reflect.String: + sx, sy = vx.String(), vy.String() + isText = true // Initial estimate, verify later + case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)): + sx, sy = string(vx.Bytes()), string(vy.Bytes()) + isBinary = true // Initial estimate, verify later + case t.Kind() == reflect.Array: + // Arrays need to be addressable for slice operations to work. + vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem() + vx2.Set(vx) + vy2.Set(vy) + vx, vy = vx2, vy2 + } + if isText || isBinary { + var numLines, lastLineIdx, maxLineLen int + isBinary = false + for i, r := range sx + sy { + if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError { + isBinary = true + break + } + if r == '\n' { + if maxLineLen < i-lastLineIdx { + lastLineIdx = i - lastLineIdx + } + lastLineIdx = i + 1 + numLines++ + } + } + isText = !isBinary + isLinedText = isText && numLines >= 4 && maxLineLen <= 256 + } + + // Format the string into printable records. + var list textList + var delim string + switch { + // If the text appears to be multi-lined text, + // then perform differencing across individual lines. + case isLinedText: + ssx := strings.Split(sx, "\n") + ssy := strings.Split(sy, "\n") + list = opts.formatDiffSlice( + reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.Index(0).String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "\n" + // If the text appears to be single-lined text, + // then perform differencing in approximately fixed-sized chunks. + // The output is printed as quoted strings. + case isText: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte", + func(v reflect.Value, d diffMode) textRecord { + s := formatString(v.String()) + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + delim = "" + // If the text appears to be binary data, + // then perform differencing in approximately fixed-sized chunks. + // The output is inspired by hexdump. + case isBinary: + list = opts.formatDiffSlice( + reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte", + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + ss = append(ss, formatHex(v.Index(i).Uint())) + } + s := strings.Join(ss, ", ") + comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String()))) + return textRecord{Diff: d, Value: textLine(s), Comment: comment} + }, + ) + // For all other slices of primitive types, + // then perform differencing in approximately fixed-sized chunks. + // The size of each chunk depends on the width of the element kind. + default: + var chunkSize int + if t.Elem().Kind() == reflect.Bool { + chunkSize = 16 + } else { + switch t.Elem().Bits() { + case 8: + chunkSize = 16 + case 16: + chunkSize = 12 + case 32: + chunkSize = 8 + default: + chunkSize = 8 + } + } + list = opts.formatDiffSlice( + vx, vy, chunkSize, t.Elem().Kind().String(), + func(v reflect.Value, d diffMode) textRecord { + var ss []string + for i := 0; i < v.Len(); i++ { + switch t.Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + ss = append(ss, fmt.Sprint(v.Index(i).Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + ss = append(ss, formatHex(v.Index(i).Uint())) + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + ss = append(ss, fmt.Sprint(v.Index(i).Interface())) + } + } + s := strings.Join(ss, ", ") + return textRecord{Diff: d, Value: textLine(s)} + }, + ) + } + + // Wrap the output with appropriate type information. + var out textNode = textWrap{"{", list, "}"} + if !isText { + // The "{...}" byte-sequence literal is not valid Go syntax for strings. + // Emit the type for extra clarity (e.g. "string{...}"). + if t.Kind() == reflect.String { + opts = opts.WithTypeMode(emitType) + } + return opts.FormatType(t, out) + } + switch t.Kind() { + case reflect.String: + out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf(string("")) { + out = opts.FormatType(t, out) + } + case reflect.Slice: + out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)} + if t != reflect.TypeOf([]byte(nil)) { + out = opts.FormatType(t, out) + } + } + return out +} + +// formatASCII formats s as an ASCII string. +// This is useful for printing binary strings in a semi-legible way. +func formatASCII(s string) string { + b := bytes.Repeat([]byte{'.'}, len(s)) + for i := 0; i < len(s); i++ { + if ' ' <= s[i] && s[i] <= '~' { + b[i] = s[i] + } + } + return string(b) +} + +func (opts formatOptions) formatDiffSlice( + vx, vy reflect.Value, chunkSize int, name string, + makeRec func(reflect.Value, diffMode) textRecord, +) (list textList) { + es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result { + return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface()) + }) + + appendChunks := func(v reflect.Value, d diffMode) int { + n0 := v.Len() + for v.Len() > 0 { + n := chunkSize + if n > v.Len() { + n = v.Len() + } + list = append(list, makeRec(v.Slice(0, n), d)) + v = v.Slice(n, v.Len()) + } + return n0 - v.Len() + } + + groups := coalesceAdjacentEdits(name, es) + groups = coalesceInterveningIdentical(groups, chunkSize/4) + for i, ds := range groups { + // Print equal. + if ds.NumDiff() == 0 { + // Compute the number of leading and trailing equal bytes to print. + var numLo, numHi int + numEqual := ds.NumIgnored + ds.NumIdentical + for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 { + numLo++ + } + for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 { + numHi++ + } + if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 { + numHi = numEqual - numLo // Avoid pointless coalescing of single equal row + } + + // Print the equal bytes. + appendChunks(vx.Slice(0, numLo), diffIdentical) + if numEqual > numLo+numHi { + ds.NumIdentical -= numLo + numHi + list.AppendEllipsis(ds) + } + appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical) + vx = vx.Slice(numEqual, vx.Len()) + vy = vy.Slice(numEqual, vy.Len()) + continue + } + + // Print unequal. + nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved) + vx = vx.Slice(nx, vx.Len()) + ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted) + vy = vy.Slice(ny, vy.Len()) + } + assert(vx.Len() == 0 && vy.Len() == 0) + return list +} + +// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent +// equal or unequal counts. +func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) { + var prevCase int // Arbitrary index into which case last occurred + lastStats := func(i int) *diffStats { + if prevCase != i { + groups = append(groups, diffStats{Name: name}) + prevCase = i + } + return &groups[len(groups)-1] + } + for _, e := range es { + switch e { + case diff.Identity: + lastStats(1).NumIdentical++ + case diff.UniqueX: + lastStats(2).NumRemoved++ + case diff.UniqueY: + lastStats(2).NumInserted++ + case diff.Modified: + lastStats(2).NumModified++ + } + } + return groups +} + +// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize) +// equal groups into adjacent unequal groups that currently result in a +// dual inserted/removed printout. This acts as a high-pass filter to smooth +// out high-frequency changes within the windowSize. +func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats { + groups, groupsOrig := groups[:0], groups + for i, ds := range groupsOrig { + if len(groups) >= 2 && ds.NumDiff() > 0 { + prev := &groups[len(groups)-2] // Unequal group + curr := &groups[len(groups)-1] // Equal group + next := &groupsOrig[i] // Unequal group + hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0 + hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0 + if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize { + *prev = (*prev).Append(*curr).Append(*next) + groups = groups[:len(groups)-1] // Truncate off equal group + continue + } + } + groups = append(groups, ds) + } + return groups +} diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go new file mode 100644 index 00000000..0353b507 --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -0,0 +1,387 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import ( + "bytes" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/google/go-cmp/cmp/internal/flags" +) + +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 + +type indentMode int + +func (n indentMode) appendIndent(b []byte, d diffMode) []byte { + // The output of Diff is documented as being unstable to provide future + // flexibility in changing the output for more humanly readable reports. + // This logic intentionally introduces instability to the exact output + // so that users can detect accidental reliance on stability early on, + // rather than much later when an actual change to the format occurs. + if flags.Deterministic || randBool { + // Use regular spaces (U+0020). + switch d { + case diffUnknown, diffIdentical: + b = append(b, " "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } else { + // Use non-breaking spaces (U+00a0). + switch d { + case diffUnknown, diffIdentical: + b = append(b, "  "...) + case diffRemoved: + b = append(b, "- "...) + case diffInserted: + b = append(b, "+ "...) + } + } + return repeatCount(n).appendChar(b, '\t') +} + +type repeatCount int + +func (n repeatCount) appendChar(b []byte, c byte) []byte { + for ; n > 0; n-- { + b = append(b, c) + } + return b +} + +// textNode is a simplified tree-based representation of structured text. +// Possible node types are textWrap, textList, or textLine. +type textNode interface { + // Len reports the length in bytes of a single-line version of the tree. + // Nested textRecord.Diff and textRecord.Comment fields are ignored. + Len() int + // Equal reports whether the two trees are structurally identical. + // Nested textRecord.Diff and textRecord.Comment fields are compared. + Equal(textNode) bool + // String returns the string representation of the text tree. + // It is not guaranteed that len(x.String()) == x.Len(), + // nor that x.String() == y.String() implies that x.Equal(y). + String() string + + // formatCompactTo formats the contents of the tree as a single-line string + // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment + // fields are ignored. + // + // However, not all nodes in the tree should be collapsed as a single-line. + // If a node can be collapsed as a single-line, it is replaced by a textLine + // node. Since the top-level node cannot replace itself, this also returns + // the current node itself. + // + // This does not mutate the receiver. + formatCompactTo([]byte, diffMode) ([]byte, textNode) + // formatExpandedTo formats the contents of the tree as a multi-line string + // to the provided buffer. In order for column alignment to operate well, + // formatCompactTo must be called before calling formatExpandedTo. + formatExpandedTo([]byte, diffMode, indentMode) []byte +} + +// textWrap is a wrapper that concatenates a prefix and/or a suffix +// to the underlying node. +type textWrap struct { + Prefix string // e.g., "bytes.Buffer{" + Value textNode // textWrap | textList | textLine + Suffix string // e.g., "}" +} + +func (s textWrap) Len() int { + return len(s.Prefix) + s.Value.Len() + len(s.Suffix) +} +func (s1 textWrap) Equal(s2 textNode) bool { + if s2, ok := s2.(textWrap); ok { + return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix + } + return false +} +func (s textWrap) String() string { + var d diffMode + var n indentMode + _, s2 := s.formatCompactTo(nil, d) + b := n.appendIndent(nil, d) // Leading indent + b = s2.formatExpandedTo(b, d, n) // Main body + b = append(b, '\n') // Trailing newline + return string(b) +} +func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + n0 := len(b) // Original buffer length + b = append(b, s.Prefix...) + b, s.Value = s.Value.formatCompactTo(b, d) + b = append(b, s.Suffix...) + if _, ok := s.Value.(textLine); ok { + return b, textLine(b[n0:]) + } + return b, s +} +func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + b = append(b, s.Prefix...) + b = s.Value.formatExpandedTo(b, d, n) + b = append(b, s.Suffix...) + return b +} + +// textList is a comma-separated list of textWrap or textLine nodes. +// The list may be formatted as multi-lines or single-line at the discretion +// of the textList.formatCompactTo method. +type textList []textRecord +type textRecord struct { + Diff diffMode // e.g., 0 or '-' or '+' + Key string // e.g., "MyField" + Value textNode // textWrap | textLine + Comment fmt.Stringer // e.g., "6 identical fields" +} + +// AppendEllipsis appends a new ellipsis node to the list if none already +// exists at the end. If cs is non-zero it coalesces the statistics with the +// previous diffStats. +func (s *textList) AppendEllipsis(ds diffStats) { + hasStats := ds != diffStats{} + if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) { + if hasStats { + *s = append(*s, textRecord{Value: textEllipsis, Comment: ds}) + } else { + *s = append(*s, textRecord{Value: textEllipsis}) + } + return + } + if hasStats { + (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds) + } +} + +func (s textList) Len() (n int) { + for i, r := range s { + n += len(r.Key) + if r.Key != "" { + n += len(": ") + } + n += r.Value.Len() + if i < len(s)-1 { + n += len(", ") + } + } + return n +} + +func (s1 textList) Equal(s2 textNode) bool { + if s2, ok := s2.(textList); ok { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + r1, r2 := s1[i], s2[i] + if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) { + return false + } + } + return true + } + return false +} + +func (s textList) String() string { + return textWrap{"{", s, "}"}.String() +} + +func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + s = append(textList(nil), s...) // Avoid mutating original + + // Determine whether we can collapse this list as a single line. + n0 := len(b) // Original buffer length + var multiLine bool + for i, r := range s { + if r.Diff == diffInserted || r.Diff == diffRemoved { + multiLine = true + } + b = append(b, r.Key...) + if r.Key != "" { + b = append(b, ": "...) + } + b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff) + if _, ok := s[i].Value.(textLine); !ok { + multiLine = true + } + if r.Comment != nil { + multiLine = true + } + if i < len(s)-1 { + b = append(b, ", "...) + } + } + // Force multi-lined output when printing a removed/inserted node that + // is sufficiently long. + if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 { + multiLine = true + } + if !multiLine { + return b, textLine(b[n0:]) + } + return b, s +} + +func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte { + alignKeyLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return r.Key == "" || !isLine + }, + func(r textRecord) int { return len(r.Key) }, + ) + alignValueLens := s.alignLens( + func(r textRecord) bool { + _, isLine := r.Value.(textLine) + return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil + }, + func(r textRecord) int { return len(r.Value.(textLine)) }, + ) + + // Format the list as a multi-lined output. + n++ + for i, r := range s { + b = n.appendIndent(append(b, '\n'), d|r.Diff) + if r.Key != "" { + b = append(b, r.Key+": "...) + } + b = alignKeyLens[i].appendChar(b, ' ') + + b = r.Value.formatExpandedTo(b, d|r.Diff, n) + if !r.Value.Equal(textEllipsis) { + b = append(b, ',') + } + b = alignValueLens[i].appendChar(b, ' ') + + if r.Comment != nil { + b = append(b, " // "+r.Comment.String()...) + } + } + n-- + + return n.appendIndent(append(b, '\n'), d) +} + +func (s textList) alignLens( + skipFunc func(textRecord) bool, + lenFunc func(textRecord) int, +) []repeatCount { + var startIdx, endIdx, maxLen int + lens := make([]repeatCount, len(s)) + for i, r := range s { + if skipFunc(r) { + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + startIdx, endIdx, maxLen = i+1, i+1, 0 + } else { + if maxLen < lenFunc(r) { + maxLen = lenFunc(r) + } + endIdx = i + 1 + } + } + for j := startIdx; j < endIdx && j < len(s); j++ { + lens[j] = repeatCount(maxLen - lenFunc(s[j])) + } + return lens +} + +// textLine is a single-line segment of text and is always a leaf node +// in the textNode tree. +type textLine []byte + +var ( + textNil = textLine("nil") + textEllipsis = textLine("...") +) + +func (s textLine) Len() int { + return len(s) +} +func (s1 textLine) Equal(s2 textNode) bool { + if s2, ok := s2.(textLine); ok { + return bytes.Equal([]byte(s1), []byte(s2)) + } + return false +} +func (s textLine) String() string { + return string(s) +} +func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) { + return append(b, s...), s +} +func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte { + return append(b, s...) +} + +type diffStats struct { + Name string + NumIgnored int + NumIdentical int + NumRemoved int + NumInserted int + NumModified int +} + +func (s diffStats) NumDiff() int { + return s.NumRemoved + s.NumInserted + s.NumModified +} + +func (s diffStats) Append(ds diffStats) diffStats { + assert(s.Name == ds.Name) + s.NumIgnored += ds.NumIgnored + s.NumIdentical += ds.NumIdentical + s.NumRemoved += ds.NumRemoved + s.NumInserted += ds.NumInserted + s.NumModified += ds.NumModified + return s +} + +// String prints a humanly-readable summary of coalesced records. +// +// Example: +// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields" +func (s diffStats) String() string { + var ss []string + var sum int + labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"} + counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified} + for i, n := range counts { + if n > 0 { + ss = append(ss, fmt.Sprintf("%d %v", n, labels[i])) + } + sum += n + } + + // Pluralize the name (adjusting for some obscure English grammar rules). + name := s.Name + if sum > 1 { + name = name + "s" + if strings.HasSuffix(name, "ys") { + name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries" + } + } + + // Format the list according to English grammar (with Oxford comma). + switch n := len(ss); n { + case 0: + return "" + case 1, 2: + return strings.Join(ss, " and ") + " " + name + default: + return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name + } +} + +type commentString string + +func (s commentString) String() string { return string(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go new file mode 100644 index 00000000..83031a7f --- /dev/null +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -0,0 +1,121 @@ +// Copyright 2019, The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.md file. + +package cmp + +import "reflect" + +// valueNode represents a single node within a report, which is a +// structured representation of the value tree, containing information +// regarding which nodes are equal or not. +type valueNode struct { + parent *valueNode + + Type reflect.Type + ValueX reflect.Value + ValueY reflect.Value + + // NumSame is the number of leaf nodes that are equal. + // All descendants are equal only if NumDiff is 0. + NumSame int + // NumDiff is the number of leaf nodes that are not equal. + NumDiff int + // NumIgnored is the number of leaf nodes that are ignored. + NumIgnored int + // NumCompared is the number of leaf nodes that were compared + // using an Equal method or Comparer function. + NumCompared int + // NumTransformed is the number of non-leaf nodes that were transformed. + NumTransformed int + // NumChildren is the number of transitive descendants of this node. + // This counts from zero; thus, leaf nodes have no descendants. + NumChildren int + // MaxDepth is the maximum depth of the tree. This counts from zero; + // thus, leaf nodes have a depth of zero. + MaxDepth int + + // Records is a list of struct fields, slice elements, or map entries. + Records []reportRecord // If populated, implies Value is not populated + + // Value is the result of a transformation, pointer indirect, of + // type assertion. + Value *valueNode // If populated, implies Records is not populated + + // TransformerName is the name of the transformer. + TransformerName string // If non-empty, implies Value is populated +} +type reportRecord struct { + Key reflect.Value // Invalid for slice element + Value *valueNode +} + +func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) { + vx, vy := ps.Values() + child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy} + switch s := ps.(type) { + case StructField: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child}) + case SliceIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Value: child}) + case MapIndex: + assert(parent.Value == nil) + parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child}) + case Indirect: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case TypeAssertion: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + case Transform: + assert(parent.Value == nil && parent.Records == nil) + parent.Value = child + parent.TransformerName = s.Name() + parent.NumTransformed++ + default: + assert(parent == nil) // Must be the root step + } + return child +} + +func (r *valueNode) Report(rs Result) { + assert(r.MaxDepth == 0) // May only be called on leaf nodes + + if rs.ByIgnore() { + r.NumIgnored++ + } else { + if rs.Equal() { + r.NumSame++ + } else { + r.NumDiff++ + } + } + assert(r.NumSame+r.NumDiff+r.NumIgnored == 1) + + if rs.ByMethod() { + r.NumCompared++ + } + if rs.ByFunc() { + r.NumCompared++ + } + assert(r.NumCompared <= 1) +} + +func (child *valueNode) PopStep() (parent *valueNode) { + if child.parent == nil { + return nil + } + parent = child.parent + parent.NumSame += child.NumSame + parent.NumDiff += child.NumDiff + parent.NumIgnored += child.NumIgnored + parent.NumCompared += child.NumCompared + parent.NumTransformed += child.NumTransformed + parent.NumChildren += child.NumChildren + 1 + if parent.MaxDepth < child.MaxDepth+1 { + parent.MaxDepth = child.MaxDepth + 1 + } + return parent +} diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE new file mode 100644 index 00000000..ae121a1e --- /dev/null +++ b/vendor/github.com/google/go-querystring/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Google. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go new file mode 100644 index 00000000..37080b19 --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -0,0 +1,320 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package query implements encoding of structs into URL query parameters. +// +// As a simple example: +// +// type Options struct { +// Query string `url:"q"` +// ShowAll bool `url:"all"` +// Page int `url:"page"` +// } +// +// opt := Options{ "foo", true, 2 } +// v, _ := query.Values(opt) +// fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +// +// The exact mapping between Go values and url.Values is described in the +// documentation for the Values() function. +package query + +import ( + "bytes" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +var timeType = reflect.TypeOf(time.Time{}) + +var encoderType = reflect.TypeOf(new(Encoder)).Elem() + +// Encoder is an interface implemented by any type that wishes to encode +// itself into URL values in a non-standard way. +type Encoder interface { + EncodeValues(key string, v *url.Values) error +} + +// Values returns the url.Values encoding of v. +// +// Values expects to be passed a struct, and traverses it recursively using the +// following encoding rules. +// +// Each exported struct field is encoded as a URL parameter unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option +// +// The empty values are false, 0, any nil pointer or interface value, any array +// slice, map, or string of length zero, and any time.Time that returns true +// for IsZero(). +// +// The URL parameter name defaults to the struct field name but can be +// specified in the struct field's tag value. The "url" key in the struct +// field's tag value is the key name, followed by an optional comma and +// options. For example: +// +// // Field is ignored by this package. +// Field int `url:"-"` +// +// // Field appears as URL parameter "myName". +// Field int `url:"myName"` +// +// // Field appears as URL parameter "myName" and the field is omitted if +// // its value is empty +// Field int `url:"myName,omitempty"` +// +// // Field appears as URL parameter "Field" (the default), but the field +// // is skipped if empty. Note the leading comma. +// Field int `url:",omitempty"` +// +// For encoding individual field values, the following type-dependent rules +// apply: +// +// Boolean values default to encoding as the strings "true" or "false". +// Including the "int" option signals that the field should be encoded as the +// strings "1" or "0". +// +// time.Time values default to encoding as RFC3339 timestamps. Including the +// "unix" option signals that the field should be encoded as a Unix time (see +// time.Unix()) +// +// Slice and Array values default to encoding as multiple URL values of the +// same name. Including the "comma" option signals that the field should be +// encoded as a single comma-delimited value. Including the "space" option +// similarly encodes the value as a single space-delimited string. Including +// the "semicolon" option will encode the value as a semicolon-delimited string. +// Including the "brackets" option signals that the multiple URL values should +// have "[]" appended to the value name. "numbered" will append a number to +// the end of each incidence of the value name, example: +// name0=value0&name1=value1, etc. +// +// Anonymous struct fields are usually encoded as if their inner exported +// fields were fields in the outer struct, subject to the standard Go +// visibility rules. An anonymous struct field with a name given in its URL +// tag is treated as having that name, rather than being anonymous. +// +// Non-nil pointer values are encoded as the value pointed to. +// +// Nested structs are encoded including parent fields in value names for +// scoping. e.g: +// +// "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO" +// +// All other values are encoded using their default string representation. +// +// Multiple fields that encode to the same URL parameter name will be included +// as multiple URL values of the same name. +func Values(v interface{}) (url.Values, error) { + values := make(url.Values) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return values, nil + } + val = val.Elem() + } + + if v == nil { + return values, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(values, val, "") + return values, err +} + +// reflectValue populates the values parameter from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(values url.Values, val reflect.Value, scope string) error { + var embedded []reflect.Value + + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get("url") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if name == "" { + if sf.Anonymous && sv.Kind() == reflect.Struct { + // save embedded struct for later processing + embedded = append(embedded, sv) + continue + } + + name = sf.Name + } + + if scope != "" { + name = scope + "[" + name + "]" + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(encoderType) { + if !reflect.Indirect(sv).IsValid() { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(Encoder) + if err := m.EncodeValues(name, &values); err != nil { + return err + } + continue + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + var del byte + if opts.Contains("comma") { + del = ',' + } else if opts.Contains("space") { + del = ' ' + } else if opts.Contains("semicolon") { + del = ';' + } else if opts.Contains("brackets") { + name = name + "[]" + } + + if del != 0 { + s := new(bytes.Buffer) + first := true + for i := 0; i < sv.Len(); i++ { + if first { + first = false + } else { + s.WriteByte(del) + } + s.WriteString(valueString(sv.Index(i), opts)) + } + values.Add(name, s.String()) + } else { + for i := 0; i < sv.Len(); i++ { + k := name + if opts.Contains("numbered") { + k = fmt.Sprintf("%s%d", name, i) + } + values.Add(k, valueString(sv.Index(i), opts)) + } + } + continue + } + + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Type() == timeType { + values.Add(name, valueString(sv, opts)) + continue + } + + if sv.Kind() == reflect.Struct { + reflectValue(values, sv, name) + continue + } + + values.Add(name, valueString(sv, opts)) + } + + for _, f := range embedded { + if err := reflectValue(values, f, scope); err != nil { + return err + } + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Kind() == reflect.Bool && opts.Contains("int") { + if v.Bool() { + return "1" + } + return "0" + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if opts.Contains("unix") { + return strconv.FormatInt(t.Unix(), 10) + } + return t.Format(time.RFC3339) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + + if v.Type() == timeType { + return v.Interface().(time.Time).IsZero() + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "url" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md new file mode 100644 index 00000000..51cf5cd1 --- /dev/null +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Go makes it very simple to ensure properly formatted code, so always run + `go fmt` on your code before committing it. You should also run + [golint][] over your code. As noted in the [golint readme][], it's not + strictly necessary that your code be completely "lint-free", but this will + help you find common style issues. + + 1. Any significant changes should almost always be accompanied by tests. The + project already has good test coverage, so look at some of the existing + tests if you're unsure how to go about it. [gocov][] and [gocov-html][] + are invaluable tools for seeing which parts of your code aren't being + exercised by your tests. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[golint]: https://github.com/golang/lint +[golint readme]: https://github.com/golang/lint/blob/master/README +[gocov]: https://github.com/axw/gocov +[gocov-html]: https://github.com/matm/gocov-html +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/google/gofuzz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md new file mode 100644 index 00000000..64869af3 --- /dev/null +++ b/vendor/github.com/google/gofuzz/README.md @@ -0,0 +1,71 @@ +gofuzz +====== + +gofuzz is a library for populating go objects with random values. + +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz) +[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) + +This is useful for testing: + +* Do your project's objects really serialize/unserialize correctly in all cases? +* Is there an incorrectly formatted object that will cause your project to panic? + +Import with ```import "github.com/google/gofuzz"``` + +You can use it on single variables: +```go +f := fuzz.New() +var myInt int +f.Fuzz(&myInt) // myInt gets a random value. +``` + +You can use it on maps: +```go +f := fuzz.New().NilChance(0).NumElements(1, 1) +var myMap map[ComplexKeyType]string +f.Fuzz(&myMap) // myMap will have exactly one element. +``` + +Customize the chance of getting a nil pointer: +```go +f := fuzz.New().NilChance(.5) +var fancyStruct struct { + A, B, C, D *string +} +f.Fuzz(&fancyStruct) // About half the pointers should be set. +``` + +You can even customize the randomization completely if needed: +```go +type MyEnum string +const ( + A MyEnum = "A" + B MyEnum = "B" +) +type MyInfo struct { + Type MyEnum + AInfo *string + BInfo *string +} + +f := fuzz.New().NilChance(0).Funcs( + func(e *MyInfo, c fuzz.Continue) { + switch c.Intn(2) { + case 0: + e.Type = A + c.Fuzz(&e.AInfo) + case 1: + e.Type = B + c.Fuzz(&e.BInfo) + } + }, +) + +var myObject MyInfo +f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. +``` + +See more examples in ```example_test.go```. + +Happy testing! diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go new file mode 100644 index 00000000..9f9956d4 --- /dev/null +++ b/vendor/github.com/google/gofuzz/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fuzz is a library for populating go objects with random values. +package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go new file mode 100644 index 00000000..1dfa80a6 --- /dev/null +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -0,0 +1,487 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "fmt" + "math/rand" + "reflect" + "time" +) + +// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. +type fuzzFuncMap map[reflect.Type]reflect.Value + +// Fuzzer knows how to fill any object with random fields. +type Fuzzer struct { + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int +} + +// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, +// RandSource, NilChance, or NumElements in any order. +func New() *Fuzzer { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Fuzzer { + f := &Fuzzer{ + defaultFuzzFuncs: fuzzFuncMap{ + reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), + }, + + fuzzFuncs: fuzzFuncMap{}, + r: rand.New(rand.NewSource(seed)), + nilChance: .2, + minElements: 1, + maxElements: 10, + maxDepth: 100, + } + return f +} + +// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. +// +// Each entry in fuzzFuncs must be a function taking two parameters. +// The first parameter must be a pointer or map. It is the variable that +// function will fill with random data. The second parameter must be a +// fuzz.Continue, which will provide a source of randomness and a way +// to automatically continue fuzzing smaller pieces of the first parameter. +// +// These functions are called sensibly, e.g., if you wanted custom string +// fuzzing, the function `func(s *string, c fuzz.Continue)` would get +// called and passed the address of strings. Maps and pointers will always +// be made/new'd for you, ignoring the NilChange option. For slices, it +// doesn't make much sense to pre-create them--Fuzzer doesn't know how +// long you want your slice--so take a pointer to a slice, and make it +// yourself. (If you don't want your map/pointer type pre-made, take a +// pointer to it, and make it yourself.) See the examples for a range of +// custom functions. +func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { + for i := range fuzzFuncs { + v := reflect.ValueOf(fuzzFuncs[i]) + if v.Kind() != reflect.Func { + panic("Need only funcs!") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 0 { + panic("Need 2 in and 0 out params!") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("fuzzFunc must take pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("fuzzFunc's second parameter must be type fuzz.Continue") + } + f.fuzzFuncs[argT] = v + } + return f +} + +// RandSource causes f to get values from the given source of randomness. +// Use if you want deterministic fuzzing. +func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { + f.r = rand.New(s) + return f +} + +// NilChance sets the probability of creating a nil pointer, map, or slice to +// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. +func (f *Fuzzer) NilChance(p float64) *Fuzzer { + if p < 0 || p > 1 { + panic("p should be between 0 and 1, inclusive.") + } + f.nilChance = p + return f +} + +// NumElements sets the minimum and maximum number of elements that will be +// added to a non-nil map or slice. +func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { + if atLeast > atMost { + panic("atLeast must be <= atMost") + } + if atLeast < 0 { + panic("atLeast must be >= 0") + } + f.minElements = atLeast + f.maxElements = atMost + return f +} + +func (f *Fuzzer) genElementCount() int { + if f.minElements == f.maxElements { + return f.minElements + } + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) +} + +func (f *Fuzzer) genShouldFill() bool { + return f.r.Float64() > f.nilChance +} + +// MaxDepth sets the maximum number of recursive fuzz calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Fuzzer) MaxDepth(d int) *Fuzzer { + f.maxDepth = d + return f +} + +// Fuzz recursively fills all of obj's fields with something random. First +// this tries to find a custom fuzz function (see Funcs). If there is no +// custom function this tests whether the object implements fuzz.Interface and, +// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if +// there is a default fuzz function provided by this package. If all of that +// fails, this will generate random values for all primitive fields and then +// recurse for all non-primitives. +// +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. +// +// obj must be a pointer. Only exported (public) fields can be set (thanks, +// golang :/ ) Intended for tests, so will panic on bad input or unimplemented +// fields. +func (f *Fuzzer) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, 0) +} + +// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +// Not safe for cyclic or tree-like structs! +// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) +// Intended for tests, so will panic on bad input or unimplemented fields. +func (f *Fuzzer) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, flagNoCustomFuzz) +} + +const ( + // Do not try to find a custom fuzz function. Does not apply recursively. + flagNoCustomFuzz uint64 = 1 << iota +) + +func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { + fc := &fuzzerContext{fuzzer: f} + fc.doFuzz(v, flags) +} + +// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer +// be thread-safe. +type fuzzerContext struct { + fuzzer *Fuzzer + curDepth int +} + +func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.fuzzer.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + + if !v.CanSet() { + return + } + + if flags&flagNoCustomFuzz == 0 { + // Check for both pointer and non-pointer custom functions. + if v.CanAddr() && fc.tryCustom(v.Addr()) { + return + } + if fc.tryCustom(v) { + return + } + } + + if fn, ok := fillFuncMap[v.Kind()]; ok { + fn(v, fc.fuzzer.r) + return + } + switch v.Kind() { + case reflect.Map: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.MakeMap(v.Type())) + n := fc.fuzzer.genElementCount() + for i := 0; i < n; i++ { + key := reflect.New(v.Type().Key()).Elem() + fc.doFuzz(key, 0) + val := reflect.New(v.Type().Elem()).Elem() + fc.doFuzz(val, 0) + v.SetMapIndex(key, val) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Ptr: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.New(v.Type().Elem())) + fc.doFuzz(v.Elem(), 0) + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Slice: + if fc.fuzzer.genShouldFill() { + n := fc.fuzzer.genElementCount() + v.Set(reflect.MakeSlice(v.Type(), n, n)) + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if fc.fuzzer.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + fc.doFuzz(v.Field(i), 0) + } + case reflect.Chan: + fallthrough + case reflect.Func: + fallthrough + case reflect.Interface: + fallthrough + default: + panic(fmt.Sprintf("Can't handle %#v", v.Interface())) + } +} + +// tryCustom searches for custom handlers, and returns true iff it finds a match +// and successfully randomizes v. +func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { + // First: see if we have a fuzz function for it. + doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] + if !ok { + // Second: see if it can fuzz itself. + if v.CanInterface() { + intf := v.Interface() + if fuzzable, ok := intf.(Interface); ok { + fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) + return true + } + } + // Finally: see if there is a default fuzz function. + doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] + if !ok { + return false + } + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return false + } + + doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ + fc: fc, + Rand: fc.fuzzer.r, + })}) + return true +} + +// Interface represents an object that knows how to fuzz itself. Any time we +// find a type that implements this interface we will delegate the act of +// fuzzing itself. +type Interface interface { + Fuzz(c Continue) +} + +// Continue can be passed to custom fuzzing functions to allow them to use +// the correct source of randomness and to continue fuzzing their members. +type Continue struct { + fc *fuzzerContext + + // For convenience, Continue implements rand.Rand via embedding. + // Use this for generating any randomness if you want your fuzzing + // to be repeatable for a given seed. + *rand.Rand +} + +// Fuzz continues fuzzing obj. obj must be a pointer. +func (c Continue) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, 0) +} + +// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +func (c Continue) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, flagNoCustomFuzz) +} + +// RandString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func (c Continue) RandString() string { + return randString(c.Rand) +} + +// RandUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func (c Continue) RandUint64() uint64 { + return randUint64(c.Rand) +} + +// RandBool returns true or false randomly. +func (c Continue) RandBool() bool { + return randBool(c.Rand) +} + +func fuzzInt(v reflect.Value, r *rand.Rand) { + v.SetInt(int64(randUint64(r))) +} + +func fuzzUint(v reflect.Value, r *rand.Rand) { + v.SetUint(randUint64(r)) +} + +func fuzzTime(t *time.Time, c Continue) { + var sec, nsec int64 + // Allow for about 1000 years of random time values, which keeps things + // like JSON parsing reasonably happy. + sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) + c.Fuzz(&nsec) + *t = time.Unix(sec, nsec) +} + +var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ + reflect.Bool: func(v reflect.Value, r *rand.Rand) { + v.SetBool(randBool(r)) + }, + reflect.Int: fuzzInt, + reflect.Int8: fuzzInt, + reflect.Int16: fuzzInt, + reflect.Int32: fuzzInt, + reflect.Int64: fuzzInt, + reflect.Uint: fuzzUint, + reflect.Uint8: fuzzUint, + reflect.Uint16: fuzzUint, + reflect.Uint32: fuzzUint, + reflect.Uint64: fuzzUint, + reflect.Uintptr: fuzzUint, + reflect.Float32: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(float64(r.Float32())) + }, + reflect.Float64: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(r.Float64()) + }, + reflect.Complex64: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.Complex128: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.String: func(v reflect.Value, r *rand.Rand) { + v.SetString(randString(r)) + }, + reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, +} + +// randBool returns true or false randomly. +func randBool(r *rand.Rand) bool { + if r.Int()&1 == 1 { + return true + } + return false +} + +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose(rand *rand.Rand) rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {' ', '~'}, // ASCII characters + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +// randString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func randString(r *rand.Rand) string { + n := r.Intn(20) + runes := make([]rune, n) + for i := range runes { + runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) + } + return string(runes) +} + +// randUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func randUint64(r *rand.Rand) uint64 { + return uint64(r.Uint32())<<32 | uint64(r.Uint32()) +} diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod new file mode 100644 index 00000000..8ec4fe9e --- /dev/null +++ b/vendor/github.com/google/gofuzz/go.mod @@ -0,0 +1,3 @@ +module github.com/google/gofuzz + +go 1.12 diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md new file mode 100644 index 00000000..04fdf09f --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -0,0 +1,10 @@ +# How to contribute + +We definitely welcome patches and contribution to this project! + +### Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://cla.developers.google.com/clas). + +You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS new file mode 100644 index 00000000..b4bb97f6 --- /dev/null +++ b/vendor/github.com/google/uuid/CONTRIBUTORS @@ -0,0 +1,9 @@ +Paul Borman +bmatsuo +shawnps +theory +jboverfelt +dsymonds +cd1 +wallclockbuilder +dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE new file mode 100644 index 00000000..5dc68268 --- /dev/null +++ b/vendor/github.com/google/uuid/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md new file mode 100644 index 00000000..9d92c11f --- /dev/null +++ b/vendor/github.com/google/uuid/README.md @@ -0,0 +1,19 @@ +# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +The uuid package generates and inspects UUIDs based on +[RFC 4122](http://tools.ietf.org/html/rfc4122) +and DCE 1.1: Authentication and Security Services. + +This package is based on the github.com/pborman/uuid package (previously named +code.google.com/p/go-uuid). It differs from these earlier packages in that +a UUID is a 16 byte array rather than a byte slice. One loss due to this +change is the ability to represent an invalid UUID (vs a NIL UUID). + +###### Install +`go get github.com/google/uuid` + +###### Documentation +[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) + +Full `go doc` style documentation for the package can be viewed online without +installing this package by using the GoDoc site here: +http://godoc.org/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go new file mode 100644 index 00000000..fa820b9d --- /dev/null +++ b/vendor/github.com/google/uuid/dce.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) (UUID, error) { + uuid, err := NewUUID() + if err == nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid, err +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCESecurity(Person, uint32(os.Getuid())) +func NewDCEPerson() (UUID, error) { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCESecurity(Group, uint32(os.Getgid())) +func NewDCEGroup() (UUID, error) { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID. Domains are only defined +// for Version 2 UUIDs. +func (uuid UUID) Domain() Domain { + return Domain(uuid[9]) +} + +// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 +// UUIDs. +func (uuid UUID) ID() uint32 { + return binary.BigEndian.Uint32(uuid[0:4]) +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go new file mode 100644 index 00000000..5b8a4b9a --- /dev/null +++ b/vendor/github.com/google/uuid/doc.go @@ -0,0 +1,12 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package uuid generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security +// Services. +// +// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to +// maps or compared directly. +package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod new file mode 100644 index 00000000..fc84cd79 --- /dev/null +++ b/vendor/github.com/google/uuid/go.mod @@ -0,0 +1 @@ +module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go new file mode 100644 index 00000000..b1746163 --- /dev/null +++ b/vendor/github.com/google/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known namespace IDs and UUIDs +var ( + NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) + Nil UUID // empty UUID, all zeros +) + +// NewHash returns a new UUID derived from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space[:]) + h.Write(data) + s := h.Sum(nil) + var uuid UUID + copy(uuid[:], s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. It is the same as calling: +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go new file mode 100644 index 00000000..7f9e0c6c --- /dev/null +++ b/vendor/github.com/google/uuid/marshal.go @@ -0,0 +1,37 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "fmt" + +// MarshalText implements encoding.TextMarshaler. +func (uuid UUID) MarshalText() ([]byte, error) { + var js [36]byte + encodeHex(js[:], uuid) + return js[:], nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. +func (uuid *UUID) UnmarshalText(data []byte) error { + id, err := ParseBytes(data) + if err == nil { + *uuid = id + } + return err +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (uuid UUID) MarshalBinary() ([]byte, error) { + return uuid[:], nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (uuid *UUID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) + } + copy(uuid[:], data) + return nil +} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go new file mode 100644 index 00000000..d651a2b0 --- /dev/null +++ b/vendor/github.com/google/uuid/node.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "sync" +) + +var ( + nodeMu sync.Mutex + ifname string // name of interface being used + nodeID [6]byte // hardware for version 1 UUIDs + zeroID [6]byte // nodeID with only 0's +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + defer nodeMu.Unlock() + nodeMu.Lock() + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + defer nodeMu.Unlock() + nodeMu.Lock() + return setNodeInterface(name) +} + +func setNodeInterface(name string) bool { + iname, addr := getHardwareInterface(name) // null implementation for js + if iname != "" && addr != nil { + ifname = iname + copy(nodeID[:], addr) + return true + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + ifname = "random" + randomBits(nodeID[:]) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + defer nodeMu.Unlock() + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nid := nodeID + return nid[:] +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + defer nodeMu.Unlock() + nodeMu.Lock() + copy(nodeID[:], id) + ifname = "user" + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + var node [6]byte + copy(node[:], uuid[10:]) + return node[:] +} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go new file mode 100644 index 00000000..24b78edc --- /dev/null +++ b/vendor/github.com/google/uuid/node_js.go @@ -0,0 +1,12 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js + +package uuid + +// getHardwareInterface returns nil values for the JS version of the code. +// This remvoves the "net" dependency, because it is not used in the browser. +// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. +func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go new file mode 100644 index 00000000..0cbbcddb --- /dev/null +++ b/vendor/github.com/google/uuid/node_net.go @@ -0,0 +1,33 @@ +// Copyright 2017 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !js + +package uuid + +import "net" + +var interfaces []net.Interface // cached list of interfaces + +// getHardwareInterface returns the name and hardware address of interface name. +// If name is "" then the name and hardware address of one of the system's +// interfaces is returned. If no interfaces are found (name does not exist or +// there are no interfaces) then "", nil is returned. +// +// Only addresses of at least 6 bytes are returned. +func getHardwareInterface(name string) (string, []byte) { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil { + return "", nil + } + } + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + return ifs.Name, ifs.HardwareAddr + } + } + return "", nil +} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go new file mode 100644 index 00000000..f326b54d --- /dev/null +++ b/vendor/github.com/google/uuid/sql.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements sql.Scanner so UUIDs can be read from databases transparently +// Currently, database types that map to string and []byte are supported. Please +// consult database-specific driver documentation for matching types. +func (uuid *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case nil: + return nil + + case string: + // if an empty UUID comes from a table, we return a null UUID + if src == "" { + return nil + } + + // see Parse for required string format + u, err := Parse(src) + if err != nil { + return fmt.Errorf("Scan: %v", err) + } + + *uuid = u + + case []byte: + // if an empty UUID comes from a table, we return a null UUID + if len(src) == 0 { + return nil + } + + // assumes a simple slice of bytes if 16 bytes + // otherwise attempts to parse + if len(src) != 16 { + return uuid.Scan(string(src)) + } + copy((*uuid)[:], src) + + default: + return fmt.Errorf("Scan: unable to scan type %T into UUID", src) + } + + return nil +} + +// Value implements sql.Valuer so that UUIDs can be written to databases +// transparently. Currently, UUIDs map to strings. Please consult +// database-specific driver documentation for matching types. +func (uuid UUID) Value() (driver.Value, error) { + return uuid.String(), nil +} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go new file mode 100644 index 00000000..e6ef06cd --- /dev/null +++ b/vendor/github.com/google/uuid/time.go @@ -0,0 +1,123 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + timeMu sync.Mutex + lasttime uint64 // last time we returned + clockSeq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// clock sequence as well as adjusting the clock sequence as needed. An error +// is returned if the current time cannot be determined. +func GetTime() (Time, uint16, error) { + defer timeMu.Unlock() + timeMu.Lock() + return getTime() +} + +func getTime() (Time, uint16, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clockSeq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), clockSeq, nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence is used, a new +// random clock sequence is generated the first time a clock sequence is +// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) +func ClockSequence() int { + defer timeMu.Unlock() + timeMu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clockSeq == 0 { + setClockSequence(-1) + } + return int(clockSeq & 0x3fff) +} + +// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer timeMu.Unlock() + timeMu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + oldSeq := clockSeq + clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if oldSeq != clockSeq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. The time is only defined for version 1 and 2 UUIDs. +func (uuid UUID) Time() Time { + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time) +} + +// ClockSequence returns the clock sequence encoded in uuid. +// The clock sequence is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) ClockSequence() int { + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff +} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go new file mode 100644 index 00000000..5ea6c737 --- /dev/null +++ b/vendor/github.com/google/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = [256]byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts hex characters x1 and x2 into a byte. +func xtob(x1, x2 byte) (byte, bool) { + b1 := xvalues[x1] + b2 := xvalues[x2] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go new file mode 100644 index 00000000..524404cc --- /dev/null +++ b/vendor/github.com/google/uuid/uuid.go @@ -0,0 +1,245 @@ +// Copyright 2018 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID [16]byte + +// A Version represents a UUID's version. +type Version byte + +// A Variant represents a UUID's variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// Parse decodes s into a UUID or returns an error. Both the standard UUID +// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the +// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex +// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. +func Parse(s string) (UUID, error) { + var uuid UUID + switch len(s) { + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36: + + // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: + if strings.ToLower(s[:9]) != "urn:uuid:" { + return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) + } + s = s[9:] + + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + case 36 + 2: + s = s[1:] + + // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + case 32: + var ok bool + for i := range uuid { + uuid[i], ok = xtob(s[i*2], s[i*2+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(s[x], s[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// ParseBytes is like Parse, except it parses a byte slice instead of a string. +func ParseBytes(b []byte) (UUID, error) { + var uuid UUID + switch len(b) { + case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) + } + b = b[9:] + case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + b = b[1:] + case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + var ok bool + for i := 0; i < 32; i += 2 { + uuid[i/2], ok = xtob(b[i], b[i+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + } + return uuid, nil + default: + return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) + } + // s is now at least 36 bytes long + // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + v, ok := xtob(b[x], b[x+1]) + if !ok { + return uuid, errors.New("invalid UUID format") + } + uuid[i] = v + } + return uuid, nil +} + +// MustParse is like Parse but panics if the string cannot be parsed. +// It simplifies safe initialization of global variables holding compiled UUIDs. +func MustParse(s string) UUID { + uuid, err := Parse(s) + if err != nil { + panic(`uuid: Parse(` + s + `): ` + err.Error()) + } + return uuid +} + +// FromBytes creates a new UUID from a byte slice. Returns an error if the slice +// does not have a length of 16. The bytes are copied from the slice. +func FromBytes(b []byte) (uuid UUID, err error) { + err = uuid.UnmarshalBinary(b) + return uuid, err +} + +// Must returns uuid if err is nil and panics otherwise. +func Must(uuid UUID, err error) UUID { + if err != nil { + panic(err) + } + return uuid +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + var buf [36]byte + encodeHex(buf[:], uuid) + return string(buf[:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + var buf [36 + 9]byte + copy(buf[:], "urn:uuid:") + encodeHex(buf[9:], uuid) + return string(buf[:]) +} + +func encodeHex(dst []byte, uuid UUID) { + hex.Encode(dst, uuid[:4]) + dst[8] = '-' + hex.Encode(dst[9:13], uuid[4:6]) + dst[13] = '-' + hex.Encode(dst[14:18], uuid[6:8]) + dst[18] = '-' + hex.Encode(dst[19:23], uuid[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], uuid[10:]) +} + +// Variant returns the variant encoded in uuid. +func (uuid UUID) Variant() Variant { + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } +} + +// Version returns the version of uuid. +func (uuid UUID) Version() Version { + return Version(uuid[6] >> 4) +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implements io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go new file mode 100644 index 00000000..199a1ac6 --- /dev/null +++ b/vendor/github.com/google/uuid/version1.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil and an error. +// +// In most cases, New should be used. +func NewUUID() (UUID, error) { + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } + nodeMu.Unlock() + + var uuid UUID + now, seq, err := GetTime() + if err != nil { + return uuid, err + } + + timeLow := uint32(now & 0xffffffff) + timeMid := uint16((now >> 32) & 0xffff) + timeHi := uint16((now >> 48) & 0x0fff) + timeHi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], timeLow) + binary.BigEndian.PutUint16(uuid[4:], timeMid) + binary.BigEndian.PutUint16(uuid[6:], timeHi) + binary.BigEndian.PutUint16(uuid[8:], seq) + copy(uuid[10:], nodeID[:]) + + return uuid, nil +} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go new file mode 100644 index 00000000..9ad1abad --- /dev/null +++ b/vendor/github.com/google/uuid/version4.go @@ -0,0 +1,43 @@ +// Copyright 2016 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "io" + +// New creates a new random UUID or panics. New is equivalent to +// the expression +// +// uuid.Must(uuid.NewRandom()) +func New() UUID { + return Must(NewRandom()) +} + +// NewRandom returns a Random (Version 4) UUID. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() (UUID, error) { + return NewRandomFromReader(rander) +} + +func NewRandomFromReader(r io.Reader) (UUID, error) { + var uuid UUID + _, err := io.ReadFull(r, uuid[:]) + if err != nil { + return Nil, err + } + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid, nil +} + diff --git a/vendor/github.com/googleapis/gax-go/LICENSE b/vendor/github.com/googleapis/gax-go/LICENSE new file mode 100644 index 00000000..6d16b657 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go new file mode 100644 index 00000000..b1d53dd1 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/call_option.go @@ -0,0 +1,161 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "math/rand" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// CallOption is an option used by Invoke to control behaviors of RPC calls. +// CallOption works by modifying relevant fields of CallSettings. +type CallOption interface { + // Resolve applies the option by modifying cs. + Resolve(cs *CallSettings) +} + +// Retryer is used by Invoke to determine retry behavior. +type Retryer interface { + // Retry reports whether a request should be retriedand how long to pause before retrying + // if the previous attempt returned with err. Invoke never calls Retry with nil error. + Retry(err error) (pause time.Duration, shouldRetry bool) +} + +type retryerOption func() Retryer + +func (o retryerOption) Resolve(s *CallSettings) { + s.Retry = o +} + +// WithRetry sets CallSettings.Retry to fn. +func WithRetry(fn func() Retryer) CallOption { + return retryerOption(fn) +} + +// OnCodes returns a Retryer that retries if and only if +// the previous attempt returns a GRPC error whose error code is stored in cc. +// Pause times between retries are specified by bo. +// +// bo is only used for its parameters; each Retryer has its own copy. +func OnCodes(cc []codes.Code, bo Backoff) Retryer { + return &boRetryer{ + backoff: bo, + codes: append([]codes.Code(nil), cc...), + } +} + +type boRetryer struct { + backoff Backoff + codes []codes.Code +} + +func (r *boRetryer) Retry(err error) (time.Duration, bool) { + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() + for _, rc := range r.codes { + if c == rc { + return r.backoff.Pause(), true + } + } + return 0, false +} + +// Backoff implements exponential backoff. +// The wait time between retries is a random value between 0 and the "retry envelope". +// The envelope starts at Initial and increases by the factor of Multiplier every retry, +// but is capped at Max. +type Backoff struct { + // Initial is the initial value of the retry envelope, defaults to 1 second. + Initial time.Duration + + // Max is the maximum value of the retry envelope, defaults to 30 seconds. + Max time.Duration + + // Multiplier is the factor by which the retry envelope increases. + // It should be greater than 1 and defaults to 2. + Multiplier float64 + + // cur is the current retry envelope + cur time.Duration +} + +// Pause returns the next time.Duration that the caller should use to backoff. +func (bo *Backoff) Pause() time.Duration { + if bo.Initial == 0 { + bo.Initial = time.Second + } + if bo.cur == 0 { + bo.cur = bo.Initial + } + if bo.Max == 0 { + bo.Max = 30 * time.Second + } + if bo.Multiplier < 1 { + bo.Multiplier = 2 + } + // Select a duration between 1ns and the current max. It might seem + // counterintuitive to have so much jitter, but + // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that + // that is the best strategy. + d := time.Duration(1 + rand.Int63n(int64(bo.cur))) + bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) + if bo.cur > bo.Max { + bo.cur = bo.Max + } + return d +} + +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +// WithGRPCOptions allows passing gRPC call options during client creation. +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + +// CallSettings allow fine-grained control over how calls are made. +type CallSettings struct { + // Retry returns a Retryer to be used to control retry logic of a method call. + // If Retry is nil or the returned Retryer is nil, the call will not be retried. + Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption +} diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go new file mode 100644 index 00000000..3fd1b0b8 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/gax.go @@ -0,0 +1,39 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +package gax + +// Version specifies the gax-go version being used. +const Version = "2.0.4" diff --git a/vendor/github.com/googleapis/gax-go/v2/go.mod b/vendor/github.com/googleapis/gax-go/v2/go.mod new file mode 100644 index 00000000..9cdfaf44 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/go.mod @@ -0,0 +1,3 @@ +module github.com/googleapis/gax-go/v2 + +require google.golang.org/grpc v1.19.0 diff --git a/vendor/github.com/googleapis/gax-go/v2/go.sum b/vendor/github.com/googleapis/gax-go/v2/go.sum new file mode 100644 index 00000000..7fa23ecf --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/go.sum @@ -0,0 +1,25 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go new file mode 100644 index 00000000..139371a0 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/header.go @@ -0,0 +1,53 @@ +// Copyright 2018, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go new file mode 100644 index 00000000..fe31dd00 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/invoke.go @@ -0,0 +1,99 @@ +// Copyright 2016, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "context" + "strings" + "time" +) + +// APICall is a user defined call stub. +type APICall func(context.Context, CallSettings) error + +// Invoke calls the given APICall, +// performing retries as specified by opts, if any. +func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { + var settings CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + return invoke(ctx, call, settings, Sleep) +} + +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } +} + +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. +func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { + var retryer Retryer + for { + err := call(ctx, settings) + if err == nil { + return nil + } + if settings.Retry == nil { + return err + } + // Never retry permanent certificate errors. (e.x. if ca-certificates + // are not installed). We should only make very few, targeted + // exceptions: many (other) status=Unavailable should be retried, such + // as if there's a network hiccup, or the internet goes out for a + // minute. This is also why here we are doing string parsing instead of + // simply making Unavailable a non-retried code elsewhere. + if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") { + return err + } + if retryer == nil { + if r := settings.Retry(); r != nil { + retryer = r + } else { + return err + } + } + if d, ok := retryer.Retry(err); !ok { + return err + } else if err = sp(ctx, d); err != nil { + return err + } + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md new file mode 100644 index 00000000..dca51e22 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md @@ -0,0 +1,49 @@ +## 0.3.0 (Unreleaesd) + +IMPROVEMENTS + +* Added `openstack/baremetal/apiversions.List` [GH-1577] +* Added `openstack/baremetal/apiversions.Get` [GH-1577] + +BUG FIXES + +* Changed `baremetal/v1/nodes.CleanStep.Args` from `map[string]string` to `map[string]interface{}` [GH-1638](https://github.com/gophercloud/gophercloud/pull/1638) + +## 0.2.0 (June 17, 2019) + +IMPROVEMENTS + +* Added `networking/v2/extensions/qos/rules.ListBandwidthLimitRules` [GH-1584](https://github.com/gophercloud/gophercloud/pull/1584) +* Added `networking/v2/extensions/qos/rules.GetBandwidthLimitRule` [GH-1584](https://github.com/gophercloud/gophercloud/pull/1584) +* Added `networking/v2/extensions/qos/rules.CreateBandwidthLimitRule` [GH-1584](https://github.com/gophercloud/gophercloud/pull/1584) +* Added `networking/v2/extensions/qos/rules.UpdateBandwidthLimitRule` [GH-1589](https://github.com/gophercloud/gophercloud/pull/1589) +* Added `networking/v2/extensions/qos/rules.DeleteBandwidthLimitRule` [GH-1590](https://github.com/gophercloud/gophercloud/pull/1590) +* Added `networking/v2/extensions/qos/policies.List` [GH-1591](https://github.com/gophercloud/gophercloud/pull/1591) +* Added `networking/v2/extensions/qos/policies.Get` [GH-1593](https://github.com/gophercloud/gophercloud/pull/1593) +* Added `networking/v2/extensions/qos/rules.ListDSCPMarkingRules` [GH-1594](https://github.com/gophercloud/gophercloud/pull/1594) +* Added `networking/v2/extensions/qos/policies.Create` [GH-1595](https://github.com/gophercloud/gophercloud/pull/1595) +* Added `compute/v2/extensions/diagnostics.Get` [GH-1592](https://github.com/gophercloud/gophercloud/pull/1592) +* Added `networking/v2/extensions/qos/policies.Update` [GH-1603](https://github.com/gophercloud/gophercloud/pull/1603) +* Added `networking/v2/extensions/qos/policies.Delete` [GH-1603](https://github.com/gophercloud/gophercloud/pull/1603) +* Added `networking/v2/extensions/qos/rules.CreateDSCPMarkingRule` [GH-1605](https://github.com/gophercloud/gophercloud/pull/1605) +* Added `networking/v2/extensions/qos/rules.UpdateDSCPMarkingRule` [GH-1605](https://github.com/gophercloud/gophercloud/pull/1605) +* Added `networking/v2/extensions/qos/rules.GetDSCPMarkingRule` [GH-1609](https://github.com/gophercloud/gophercloud/pull/1609) +* Added `networking/v2/extensions/qos/rules.DeleteDSCPMarkingRule` [GH-1609](https://github.com/gophercloud/gophercloud/pull/1609) +* Added `networking/v2/extensions/qos/rules.ListMinimumBandwidthRules` [GH-1615](https://github.com/gophercloud/gophercloud/pull/1615) +* Added `networking/v2/extensions/qos/rules.GetMinimumBandwidthRule` [GH-1615](https://github.com/gophercloud/gophercloud/pull/1615) +* Added `networking/v2/extensions/qos/rules.CreateMinimumBandwidthRule` [GH-1615](https://github.com/gophercloud/gophercloud/pull/1615) +* Added `Hostname` to `baremetalintrospection/v1/introspection.Data` [GH-1627](https://github.com/gophercloud/gophercloud/pull/1627) +* Added `networking/v2/extensions/qos/rules.UpdateMinimumBandwidthRule` [GH-1624](https://github.com/gophercloud/gophercloud/pull/1624) +* Added `networking/v2/extensions/qos/rules.DeleteMinimumBandwidthRule` [GH-1624](https://github.com/gophercloud/gophercloud/pull/1624) +* Added `networking/v2/extensions/qos/ruletypes.GetRuleType` [GH-1625](https://github.com/gophercloud/gophercloud/pull/1625) +* Added `Extra` to `baremetalintrospection/v1/introspection.Data` [GH-1611](https://github.com/gophercloud/gophercloud/pull/1611) +* Added `blockstorage/extensions/volumeactions.SetImageMetadata` [GH-1621](https://github.com/gophercloud/gophercloud/pull/1621) + +BUG FIXES + +* Updated `networking/v2/extensions/qos/rules.UpdateBandwidthLimitRule` to use return code 200 [GH-1606](https://github.com/gophercloud/gophercloud/pull/1606) +* Fixed bug in `compute/v2/extensions/schedulerhints.SchedulerHints.Query` where contents will now be marshalled to a string [GH-1620](https://github.com/gophercloud/gophercloud/pull/1620) + +## 0.1.0 (May 27, 2019) + +Initial tagged release. diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE new file mode 100644 index 00000000..fbbbc9e4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/LICENSE @@ -0,0 +1,191 @@ +Copyright 2012-2013 Rackspace, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + +------ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md new file mode 100644 index 00000000..ad29041d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -0,0 +1,159 @@ +# Gophercloud: an OpenStack SDK for Go +[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) +[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) + +Gophercloud is an OpenStack Go SDK. + +## Useful links + +* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) +* [Effective Go](https://golang.org/doc/effective_go.html) + +## How to install + +Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) +is pointing to an appropriate directory where you want to install Gophercloud: + +```bash +mkdir $HOME/go +export GOPATH=$HOME/go +``` + +To protect yourself against changes in your dependencies, we highly recommend choosing a +[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for +your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install +Gophercloud as a dependency like so: + +```bash +go get github.com/gophercloud/gophercloud + +# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" + +godep save ./... +``` + +This will install all the source files you need into a `Godeps/_workspace` directory, which is +referenceable from your own source files when you use the `godep go` command. + +## Getting started + +### Credentials + +Because you'll be hitting an API, you will need to retrieve your OpenStack +credentials and either store them as environment variables or in your local Go +files. The first method is recommended because it decouples credential +information from source code, allowing you to push the latter to your version +control system without any security risk. + +You will need to retrieve the following: + +* username +* password +* a valid Keystone identity URL + +For users that have the OpenStack dashboard installed, there's a shortcut. If +you visit the `project/access_and_security` path in Horizon and click on the +"Download OpenStack RC File" button at the top right hand corner, you will +download a bash file that exports all of your access details to environment +variables. To execute the file, run `source admin-openrc.sh` and you will be +prompted for your password. + +### Authentication + +Once you have access to your credentials, you can begin plugging them into +Gophercloud. The next step is authentication, and this is handled by a base +"Provider" struct. To get one, you can either pass in your credentials +explicitly, or tell Gophercloud to use environment variables: + +```go +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +// Option 1: Pass in the values yourself +opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", +} + +// Option 2: Use a utility function to retrieve all your environment variables +opts, err := openstack.AuthOptionsFromEnv() +``` + +Once you have the `opts` variable, you can pass it in and get back a +`ProviderClient` struct: + +```go +provider, err := openstack.AuthenticatedClient(opts) +``` + +The `ProviderClient` is the top-level client that all of your OpenStack services +derive from. The provider contains all of the authentication details that allow +your Go code to access the API - such as the base URL and token ID. + +### Provision a server + +Once we have a base Provider, we inject it as a dependency into each OpenStack +service. In order to work with the Compute API, we need a Compute service +client; which can be created like so: + +```go +client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), +}) +``` + +We then use this `client` for any Compute API operation we want. In our case, +we want to provision a new server - so we invoke the `Create` method and pass +in the flavor ID (hardware specification) and image ID (operating system) we're +interested in: + +```go +import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + +server, err := servers.Create(client, servers.CreateOpts{ + Name: "My new server!", + FlavorRef: "flavor_id", + ImageRef: "image_id", +}).Extract() +``` + +The above code sample creates a new server with the parameters, and embodies the +new resource in the `server` variable (a +[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). + +## Advanced Usage + +Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. + +## Backwards-Compatibility Guarantees + +None. Vendor it and write tests covering the parts you use. + +## Contributing + +See the [contributing guide](./.github/CONTRIBUTING.md). + +## Help and feedback + +If you're struggling with something or have spotted a potential bug, feel free +to submit an issue to our [bug tracker](https://github.com/gophercloud/gophercloud/issues). + +## Thank You + +We'd like to extend special thanks and appreciation to the following: + +### OpenLab + + + +OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. + +### VEXXHOST + + + +VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go new file mode 100644 index 00000000..5ffa8d1e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -0,0 +1,437 @@ +package gophercloud + +/* +AuthOptions stores information needed to authenticate to an OpenStack Cloud. +You can populate one manually, or use a provider's AuthOptionsFromEnv() function +to read relevant information from the standard environment variables. Pass one +to a provider's AuthenticatedClient function to authenticate and obtain a +ProviderClient representing an active session on that provider. + +Its fields are the union of those recognized by each identity implementation and +provider. + +An example of manually providing authentication information: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +An example of using AuthOptionsFromEnv(), where the environment variables can +be read from a file, such as a standard openrc file: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed by + // all of the identity services, it will often be populated by a provider-level + // function. + // + // The IdentityEndpoint is typically referred to as the "auth_url" or + // "OS_AUTH_URL" in the information provided by the cloud operator. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"-"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // The same fields are known as project_id and project_name in the Identity + // V3 API, but are collected as TenantID and TenantName here in both cases. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + // If DomainID or DomainName are provided, they will also apply to TenantName. + // It is not currently possible to authenticate with Username and a Domain + // and scope to a Project in a different Domain by using TenantName. To + // accomplish that, the ProjectID will need to be provided as the TenantID + // option. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud to + // cache your credentials in memory, and to allow Gophercloud to attempt to + // re-authenticate automatically if/when your token expires. If you set it to + // false, it will not cache these settings, but re-authentication will not be + // possible. This setting defaults to false. + // + // NOTE: The reauth function will try to re-authenticate endlessly if left + // unchecked. The way to limit the number of attempts is to provide a custom + // HTTP client to the provider client and provide a transport that implements + // the RoundTripper interface and stores the number of failed retries. For an + // example of this, see here: + // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` + + // Scope determines the scoping of the authentication request. + Scope *AuthScope `json:"-"` + + // Authentication through Application Credentials requires supplying name, project and secret + // For project we can use TenantID + ApplicationCredentialID string `json:"-"` + ApplicationCredentialName string `json:"-"` + ApplicationCredentialSecret string `json:"-"` +} + +// AuthScope allows a created token to be limited to a specific domain or project. +type AuthScope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder +// interface in the v2 tokens package +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + // Populate the request map. + authMap := make(map[string]interface{}) + + if opts.Username != "" { + if opts.Password != "" { + authMap["passwordCredentials"] = map[string]interface{}{ + "username": opts.Username, + "password": opts.Password, + } + } else { + return nil, ErrMissingInput{Argument: "Password"} + } + } else if opts.TokenID != "" { + authMap["token"] = map[string]interface{}{ + "id": opts.TokenID, + } + } else { + return nil, ErrMissingInput{Argument: "Username"} + } + + if opts.TenantID != "" { + authMap["tenantId"] = opts.TenantID + } + if opts.TenantName != "" { + authMap["tenantName"] = opts.TenantName + } + + return map[string]interface{}{"auth": authMap}, nil +} + +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + type domainReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } + + type projectReq struct { + Domain *domainReq `json:"domain,omitempty"` + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + } + + type userReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Password string `json:"password,omitempty"` + Domain *domainReq `json:"domain,omitempty"` + } + + type passwordReq struct { + User userReq `json:"user"` + } + + type tokenReq struct { + ID string `json:"id"` + } + + type applicationCredentialReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + User *userReq `json:"user,omitempty"` + Secret *string `json:"secret,omitempty"` + } + + type identityReq struct { + Methods []string `json:"methods"` + Password *passwordReq `json:"password,omitempty"` + Token *tokenReq `json:"token,omitempty"` + ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"` + } + + type authReq struct { + Identity identityReq `json:"identity"` + } + + type request struct { + Auth authReq `json:"auth"` + } + + // Populate the request structure based on the provided arguments. Create and return an error + // if insufficient or incompatible information is present. + var req request + + if opts.Password == "" { + if opts.TokenID != "" { + // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication + // parameters. + if opts.Username != "" { + return nil, ErrUsernameWithToken{} + } + if opts.UserID != "" { + return nil, ErrUserIDWithToken{} + } + if opts.DomainID != "" { + return nil, ErrDomainIDWithToken{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithToken{} + } + + // Configure the request for Token authentication. + req.Auth.Identity.Methods = []string{"token"} + req.Auth.Identity.Token = &tokenReq{ + ID: opts.TokenID, + } + + } else if opts.ApplicationCredentialID != "" { + // Configure the request for ApplicationCredentialID authentication. + // https://github.com/openstack/keystoneauth/blob/stable/rocky/keystoneauth1/identity/v3/application_credential.py#L48-L67 + // There are three kinds of possible application_credential requests + // 1. application_credential id + secret + // 2. application_credential name + secret + user_id + // 3. application_credential name + secret + username + domain_id / domain_name + if opts.ApplicationCredentialSecret == "" { + return nil, ErrAppCredMissingSecret{} + } + req.Auth.Identity.Methods = []string{"application_credential"} + req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ + ID: &opts.ApplicationCredentialID, + Secret: &opts.ApplicationCredentialSecret, + } + } else if opts.ApplicationCredentialName != "" { + if opts.ApplicationCredentialSecret == "" { + return nil, ErrAppCredMissingSecret{} + } + + var userRequest *userReq + + if opts.UserID != "" { + // UserID could be used without the domain information + userRequest = &userReq{ + ID: &opts.UserID, + } + } + + if userRequest == nil && opts.Username == "" { + // Make sure that Username or UserID are provided + return nil, ErrUsernameOrUserID{} + } + + if userRequest == nil && opts.DomainID != "" { + userRequest = &userReq{ + Name: &opts.Username, + Domain: &domainReq{ID: &opts.DomainID}, + } + } + + if userRequest == nil && opts.DomainName != "" { + userRequest = &userReq{ + Name: &opts.Username, + Domain: &domainReq{Name: &opts.DomainName}, + } + } + + // Make sure that DomainID or DomainName are provided among Username + if userRequest == nil { + return nil, ErrDomainIDOrDomainName{} + } + + req.Auth.Identity.Methods = []string{"application_credential"} + req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ + Name: &opts.ApplicationCredentialName, + User: userRequest, + Secret: &opts.ApplicationCredentialSecret, + } + } else { + // If no password or token ID or ApplicationCredential are available, authentication can't continue. + return nil, ErrMissingPassword{} + } + } else { + // Password authentication. + req.Auth.Identity.Methods = []string{"password"} + + // At least one of Username and UserID must be specified. + if opts.Username == "" && opts.UserID == "" { + return nil, ErrUsernameOrUserID{} + } + + if opts.Username != "" { + // If Username is provided, UserID may not be provided. + if opts.UserID != "" { + return nil, ErrUsernameOrUserID{} + } + + // Either DomainID or DomainName must also be specified. + if opts.DomainID == "" && opts.DomainName == "" { + return nil, ErrDomainIDOrDomainName{} + } + + if opts.DomainID != "" { + if opts.DomainName != "" { + return nil, ErrDomainIDOrDomainName{} + } + + // Configure the request for Username and Password authentication with a DomainID. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{ID: &opts.DomainID}, + }, + } + } + + if opts.DomainName != "" { + // Configure the request for Username and Password authentication with a DomainName. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{Name: &opts.DomainName}, + }, + } + } + } + + if opts.UserID != "" { + // If UserID is specified, neither DomainID nor DomainName may be. + if opts.DomainID != "" { + return nil, ErrDomainIDWithUserID{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithUserID{} + } + + // Configure the request for UserID and Password authentication. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ID: &opts.UserID, Password: opts.Password}, + } + } + } + + b, err := BuildRequestBody(req, "") + if err != nil { + return nil, err + } + + if len(scope) != 0 { + b["auth"].(map[string]interface{})["scope"] = scope + } + + return b, nil +} + +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + // For backwards compatibility. + // If AuthOptions.Scope was not set, try to determine it. + // This works well for common scenarios. + if opts.Scope == nil { + opts.Scope = new(AuthScope) + if opts.TenantID != "" { + opts.Scope.ProjectID = opts.TenantID + } else { + if opts.TenantName != "" { + opts.Scope.ProjectName = opts.TenantName + opts.Scope.DomainID = opts.DomainID + opts.Scope.DomainName = opts.DomainName + } + } + } + + if opts.Scope.ProjectName != "" { + // ProjectName provided: either DomainID or DomainName must also be supplied. + // ProjectID may not be supplied. + if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + if opts.Scope.ProjectID != "" { + return nil, ErrScopeProjectIDOrProjectName{} + } + + if opts.Scope.DomainID != "" { + // ProjectName + DomainID + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, + }, + }, nil + } + + if opts.Scope.DomainName != "" { + // ProjectName + DomainName + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, + }, + }, nil + } + } else if opts.Scope.ProjectID != "" { + // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. + if opts.Scope.DomainID != "" { + return nil, ErrScopeProjectIDAlone{} + } + if opts.Scope.DomainName != "" { + return nil, ErrScopeProjectIDAlone{} + } + + // ProjectID + return map[string]interface{}{ + "project": map[string]interface{}{ + "id": &opts.Scope.ProjectID, + }, + }, nil + } else if opts.Scope.DomainID != "" { + // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. + if opts.Scope.DomainName != "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + + // DomainID + return map[string]interface{}{ + "domain": map[string]interface{}{ + "id": &opts.Scope.DomainID, + }, + }, nil + } else if opts.Scope.DomainName != "" { + // DomainName + return map[string]interface{}{ + "domain": map[string]interface{}{ + "name": &opts.Scope.DomainName, + }, + }, nil + } + + return nil, nil +} + +func (opts AuthOptions) CanReauth() bool { + return opts.AllowReauth +} diff --git a/vendor/github.com/gophercloud/gophercloud/auth_result.go b/vendor/github.com/gophercloud/gophercloud/auth_result.go new file mode 100644 index 00000000..2e4699b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/auth_result.go @@ -0,0 +1,52 @@ +package gophercloud + +/* +AuthResult is the result from the request that was used to obtain a provider +client's Keystone token. It is returned from ProviderClient.GetAuthResult(). + +The following types satisfy this interface: + + github.com/gophercloud/gophercloud/openstack/identity/v2/tokens.CreateResult + github.com/gophercloud/gophercloud/openstack/identity/v3/tokens.CreateResult + +Usage example: + + import ( + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + ) + + func GetAuthenticatedUserID(providerClient *gophercloud.ProviderClient) (string, error) { + r := providerClient.GetAuthResult() + if r == nil { + //ProviderClient did not use openstack.Authenticate(), e.g. because token + //was set manually with ProviderClient.SetToken() + return "", errors.New("no AuthResult available") + } + switch r := r.(type) { + case tokens2.CreateResult: + u, err := r.ExtractUser() + if err != nil { + return "", err + } + return u.ID, nil + case tokens3.CreateResult: + u, err := r.ExtractUser() + if err != nil { + return "", err + } + return u.ID, nil + default: + panic(fmt.Sprintf("got unexpected AuthResult type %t", r)) + } + } + +Both implementing types share a lot of methods by name, like ExtractUser() in +this example. But those methods cannot be part of the AuthResult interface +because the return types are different (in this case, type tokens2.User vs. +type tokens3.User). +*/ +type AuthResult interface { + ExtractTokenID() (string, error) +} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go new file mode 100644 index 00000000..953ca822 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -0,0 +1,110 @@ +/* +Package gophercloud provides a multi-vendor interface to OpenStack-compatible +clouds. The library has a three-level hierarchy: providers, services, and +resources. + +Authenticating with Providers + +Provider structs represent the cloud providers that offer and manage a +collection of services. You will generally want to create one Provider +client per OpenStack cloud. + + It is now recommended to use the `clientconfig` package found at + https://github.com/gophercloud/utils/tree/master/openstack/clientconfig + for all authentication purposes. + + The below documentation is still relevant. clientconfig simply implements + the below and presents it in an easier and more flexible way. + +Use your OpenStack credentials to create a Provider client. The +IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in +information provided by the cloud operator. Additionally, the cloud may refer to +TenantID or TenantName as project_id and project_name. Credentials are +specified like so: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +You can authenticate with a token by doing: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + TokenID: "{token_id}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +You may also use the openstack.AuthOptionsFromEnv() helper function. This +function reads in standard environment variables frequently found in an +OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" +instead of "project". + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) + +Service Clients + +Service structs are specific to a provider and handle all of the logic and +operations for a particular OpenStack service. Examples of services include: +Compute, Object Storage, Block Storage. In order to define one, you need to +pass in the parent provider, like so: + + opts := gophercloud.EndpointOpts{Region: "RegionOne"} + + client, err := openstack.NewComputeV2(provider, opts) + +Resources + +Resource structs are the domain models that services make use of in order +to work with and represent the state of API resources: + + server, err := servers.Get(client, "{serverId}").Extract() + +Intermediate Result structs are returned for API operations, which allow +generic access to the HTTP headers, response body, and any errors associated +with the network transaction. To turn a result into a usable resource struct, +you must call the Extract method which is chained to the response, or an +Extract function from an applicable extension: + + result := servers.Get(client, "{serverId}") + + // Attempt to extract the disk configuration from the OS-DCF disk config + // extension: + config, err := diskconfig.ExtractGet(result) + +All requests that enumerate a collection return a Pager struct that is used to +iterate through the results one page at a time. Use the EachPage method on that +Pager to handle each successive Page in a closure, then use the appropriate +extraction method from that request's package to interpret that Page as a slice +of results: + + err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { + s, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + + // Handle the []servers.Server slice. + + // Return "false" or an error to prematurely stop fetching new pages. + return true, nil + }) + +If you want to obtain the entire collection of pages without doing any +intermediary processing on each page, you can use the AllPages method: + + allPages, err := servers.List(client, nil).AllPages() + allServers, err := servers.ExtractServers(allPages) + +This top-level package contains utility functions and data types that are used +throughout the provider and service packages. Of particular note for end users +are the AuthOptions and EndpointOpts structs. +*/ +package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go new file mode 100644 index 00000000..2fbc3c97 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go @@ -0,0 +1,76 @@ +package gophercloud + +// Availability indicates to whom a specific service endpoint is accessible: +// the internet at large, internal networks only, or only to administrators. +// Different identity services use different terminology for these. Identity v2 +// lists them as different kinds of URLs within the service catalog ("adminURL", +// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an +// endpoint's response. +type Availability string + +const ( + // AvailabilityAdmin indicates that an endpoint is only available to + // administrators. + AvailabilityAdmin Availability = "admin" + + // AvailabilityPublic indicates that an endpoint is available to everyone on + // the internet. + AvailabilityPublic Availability = "public" + + // AvailabilityInternal indicates that an endpoint is only available within + // the cluster's internal network. + AvailabilityInternal Availability = "internal" +) + +// EndpointOpts specifies search criteria used by queries against an +// OpenStack service catalog. The options must contain enough information to +// unambiguously identify one, and only one, endpoint within the catalog. +// +// Usually, these are passed to service client factory functions in a provider +// package, like "openstack.NewComputeV2()". +type EndpointOpts struct { + // Type [required] is the service type for the client (e.g., "compute", + // "object-store"). Generally, this will be supplied by the service client + // function, but a user-given value will be honored if provided. + Type string + + // Name [optional] is the service name for the client (e.g., "nova") as it + // appears in the service catalog. Services can have the same Type but a + // different Name, which is why both Type and Name are sometimes needed. + Name string + + // Region [required] is the geographic region in which the endpoint resides, + // generally specifying which datacenter should house your resources. + // Required only for services that span multiple regions. + Region string + + // Availability [optional] is the visibility of the endpoint to be returned. + // Valid types include the constants AvailabilityPublic, AvailabilityInternal, + // or AvailabilityAdmin from this package. + // + // Availability is not required, and defaults to AvailabilityPublic. Not all + // providers or services offer all Availability options. + Availability Availability +} + +/* +EndpointLocator is an internal function to be used by provider implementations. + +It provides an implementation that locates a single endpoint from a service +catalog for a specific ProviderClient based on user-provided EndpointOpts. The +provider then uses it to discover related ServiceClients. +*/ +type EndpointLocator func(EndpointOpts) (string, error) + +// ApplyDefaults is an internal method to be used by provider implementations. +// +// It sets EndpointOpts fields if not already set, including a default type. +// Currently, EndpointOpts.Availability defaults to the public endpoint. +func (eo *EndpointOpts) ApplyDefaults(t string) { + if eo.Type == "" { + eo.Type = t + } + if eo.Availability == "" { + eo.Availability = AvailabilityPublic + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go new file mode 100644 index 00000000..0bcb3af7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -0,0 +1,471 @@ +package gophercloud + +import ( + "fmt" + "strings" +) + +// BaseError is an error type that all other error types embed. +type BaseError struct { + DefaultErrString string + Info string +} + +func (e BaseError) Error() string { + e.DefaultErrString = "An error occurred while executing a Gophercloud request." + return e.choseErrString() +} + +func (e BaseError) choseErrString() string { + if e.Info != "" { + return e.Info + } + return e.DefaultErrString +} + +// ErrMissingInput is the error when input is required in a particular +// situation but not provided by the user +type ErrMissingInput struct { + BaseError + Argument string +} + +func (e ErrMissingInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) + return e.choseErrString() +} + +// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. +type ErrInvalidInput struct { + ErrMissingInput + Value interface{} +} + +func (e ErrInvalidInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) + return e.choseErrString() +} + +// ErrMissingEnvironmentVariable is the error when environment variable is required +// in a particular situation but not provided by the user +type ErrMissingEnvironmentVariable struct { + BaseError + EnvironmentVariable string +} + +func (e ErrMissingEnvironmentVariable) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) + return e.choseErrString() +} + +// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables +// is required in a particular situation but not provided by the user +type ErrMissingAnyoneOfEnvironmentVariables struct { + BaseError + EnvironmentVariables []string +} + +func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Missing one of the following environment variables [%s]", + strings.Join(e.EnvironmentVariables, ", "), + ) + return e.choseErrString() +} + +// ErrUnexpectedResponseCode is returned by the Request method when a response code other than +// those listed in OkCodes is encountered. +type ErrUnexpectedResponseCode struct { + BaseError + URL string + Method string + Expected []int + Actual int + Body []byte +} + +func (e ErrUnexpectedResponseCode) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", + e.Expected, e.Method, e.URL, e.Actual, e.Body, + ) + return e.choseErrString() +} + +// ErrDefault400 is the default error type returned on a 400 HTTP response code. +type ErrDefault400 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault401 is the default error type returned on a 401 HTTP response code. +type ErrDefault401 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault403 is the default error type returned on a 403 HTTP response code. +type ErrDefault403 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault404 is the default error type returned on a 404 HTTP response code. +type ErrDefault404 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault405 is the default error type returned on a 405 HTTP response code. +type ErrDefault405 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault408 is the default error type returned on a 408 HTTP response code. +type ErrDefault408 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault409 is the default error type returned on a 409 HTTP response code. +type ErrDefault409 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault429 is the default error type returned on a 429 HTTP response code. +type ErrDefault429 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault500 is the default error type returned on a 500 HTTP response code. +type ErrDefault500 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault503 is the default error type returned on a 503 HTTP response code. +type ErrDefault503 struct { + ErrUnexpectedResponseCode +} + +func (e ErrDefault400) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Bad request with: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} +func (e ErrDefault401) Error() string { + return "Authentication failed" +} +func (e ErrDefault403) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Request forbidden: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} +func (e ErrDefault404) Error() string { + return "Resource not found" +} +func (e ErrDefault405) Error() string { + return "Method not allowed" +} +func (e ErrDefault408) Error() string { + return "The server timed out waiting for the request" +} +func (e ErrDefault429) Error() string { + return "Too many requests have been sent in a given amount of time. Pause" + + " requests, wait up to one minute, and try again." +} +func (e ErrDefault500) Error() string { + return "Internal Server Error" +} +func (e ErrDefault503) Error() string { + return "The service is currently unable to handle the request due to a temporary" + + " overloading or maintenance. This is a temporary condition. Try again later." +} + +// Err400er is the interface resource error types implement to override the error message +// from a 400 error. +type Err400er interface { + Error400(ErrUnexpectedResponseCode) error +} + +// Err401er is the interface resource error types implement to override the error message +// from a 401 error. +type Err401er interface { + Error401(ErrUnexpectedResponseCode) error +} + +// Err403er is the interface resource error types implement to override the error message +// from a 403 error. +type Err403er interface { + Error403(ErrUnexpectedResponseCode) error +} + +// Err404er is the interface resource error types implement to override the error message +// from a 404 error. +type Err404er interface { + Error404(ErrUnexpectedResponseCode) error +} + +// Err405er is the interface resource error types implement to override the error message +// from a 405 error. +type Err405er interface { + Error405(ErrUnexpectedResponseCode) error +} + +// Err408er is the interface resource error types implement to override the error message +// from a 408 error. +type Err408er interface { + Error408(ErrUnexpectedResponseCode) error +} + +// Err409er is the interface resource error types implement to override the error message +// from a 409 error. +type Err409er interface { + Error409(ErrUnexpectedResponseCode) error +} + +// Err429er is the interface resource error types implement to override the error message +// from a 429 error. +type Err429er interface { + Error429(ErrUnexpectedResponseCode) error +} + +// Err500er is the interface resource error types implement to override the error message +// from a 500 error. +type Err500er interface { + Error500(ErrUnexpectedResponseCode) error +} + +// Err503er is the interface resource error types implement to override the error message +// from a 503 error. +type Err503er interface { + Error503(ErrUnexpectedResponseCode) error +} + +// ErrTimeOut is the error type returned when an operations times out. +type ErrTimeOut struct { + BaseError +} + +func (e ErrTimeOut) Error() string { + e.DefaultErrString = "A time out occurred" + return e.choseErrString() +} + +// ErrUnableToReauthenticate is the error type returned when reauthentication fails. +type ErrUnableToReauthenticate struct { + BaseError + ErrOriginal error +} + +func (e ErrUnableToReauthenticate) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrErrorAfterReauthentication is the error type returned when reauthentication +// succeeds, but an error occurs afterword (usually an HTTP error). +type ErrErrorAfterReauthentication struct { + BaseError + ErrOriginal error +} + +func (e ErrErrorAfterReauthentication) Error() string { + e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrServiceNotFound is returned when no service in a service catalog matches +// the provided EndpointOpts. This is generally returned by provider service +// factory methods like "NewComputeV2()" and can mean that a service is not +// enabled for your account. +type ErrServiceNotFound struct { + BaseError +} + +func (e ErrServiceNotFound) Error() string { + e.DefaultErrString = "No suitable service could be found in the service catalog." + return e.choseErrString() +} + +// ErrEndpointNotFound is returned when no available endpoints match the +// provided EndpointOpts. This is also generally returned by provider service +// factory methods, and usually indicates that a region was specified +// incorrectly. +type ErrEndpointNotFound struct { + BaseError +} + +func (e ErrEndpointNotFound) Error() string { + e.DefaultErrString = "No suitable endpoint could be found in the service catalog." + return e.choseErrString() +} + +// ErrResourceNotFound is the error when trying to retrieve a resource's +// ID by name and the resource doesn't exist. +type ErrResourceNotFound struct { + BaseError + Name string + ResourceType string +} + +func (e ErrResourceNotFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrMultipleResourcesFound is the error when trying to retrieve a resource's +// ID by name and multiple resources have the user-provided name. +type ErrMultipleResourcesFound struct { + BaseError + Name string + Count int + ResourceType string +} + +func (e ErrMultipleResourcesFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrUnexpectedType is the error when an unexpected type is encountered +type ErrUnexpectedType struct { + BaseError + Expected string + Actual string +} + +func (e ErrUnexpectedType) Error() string { + e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) + return e.choseErrString() +} + +func unacceptedAttributeErr(attribute string) string { + return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) +} + +func redundantWithTokenErr(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) +} + +func redundantWithUserID(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) +} + +// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. +type ErrAPIKeyProvided struct{ BaseError } + +func (e ErrAPIKeyProvided) Error() string { + return unacceptedAttributeErr("APIKey") +} + +// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. +type ErrTenantIDProvided struct{ BaseError } + +func (e ErrTenantIDProvided) Error() string { + return unacceptedAttributeErr("TenantID") +} + +// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. +type ErrTenantNameProvided struct{ BaseError } + +func (e ErrTenantNameProvided) Error() string { + return unacceptedAttributeErr("TenantName") +} + +// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. +type ErrUsernameWithToken struct{ BaseError } + +func (e ErrUsernameWithToken) Error() string { + return redundantWithTokenErr("Username") +} + +// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. +type ErrUserIDWithToken struct{ BaseError } + +func (e ErrUserIDWithToken) Error() string { + return redundantWithTokenErr("UserID") +} + +// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. +type ErrDomainIDWithToken struct{ BaseError } + +func (e ErrDomainIDWithToken) Error() string { + return redundantWithTokenErr("DomainID") +} + +// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s +type ErrDomainNameWithToken struct{ BaseError } + +func (e ErrDomainNameWithToken) Error() string { + return redundantWithTokenErr("DomainName") +} + +// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. +type ErrUsernameOrUserID struct{ BaseError } + +func (e ErrUsernameOrUserID) Error() string { + return "Exactly one of Username and UserID must be provided for password authentication" +} + +// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. +type ErrDomainIDWithUserID struct{ BaseError } + +func (e ErrDomainIDWithUserID) Error() string { + return redundantWithUserID("DomainID") +} + +// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. +type ErrDomainNameWithUserID struct{ BaseError } + +func (e ErrDomainNameWithUserID) Error() string { + return redundantWithUserID("DomainName") +} + +// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. +// It may also indicate that both a DomainID and a DomainName were provided at once. +type ErrDomainIDOrDomainName struct{ BaseError } + +func (e ErrDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName to authenticate by Username" +} + +// ErrMissingPassword indicates that no password was provided and no token is available. +type ErrMissingPassword struct{ BaseError } + +func (e ErrMissingPassword) Error() string { + return "You must provide a password to authenticate" +} + +// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. +type ErrScopeDomainIDOrDomainName struct{ BaseError } + +func (e ErrScopeDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" +} + +// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. +type ErrScopeProjectIDOrProjectName struct{ BaseError } + +func (e ErrScopeProjectIDOrProjectName) Error() string { + return "You must provide at most one of ProjectID or ProjectName in a Scope" +} + +// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. +type ErrScopeProjectIDAlone struct{ BaseError } + +func (e ErrScopeProjectIDAlone) Error() string { + return "ProjectID must be supplied alone in a Scope" +} + +// ErrScopeEmpty indicates that no credentials were provided in a Scope. +type ErrScopeEmpty struct{ BaseError } + +func (e ErrScopeEmpty) Error() string { + return "You must provide either a Project or Domain in a Scope" +} + +// ErrAppCredMissingSecret indicates that no Application Credential Secret was provided with Application Credential ID or Name +type ErrAppCredMissingSecret struct{ BaseError } + +func (e ErrAppCredMissingSecret) Error() string { + return "You must provide an Application Credential Secret" +} diff --git a/vendor/github.com/gophercloud/gophercloud/go.mod b/vendor/github.com/gophercloud/gophercloud/go.mod new file mode 100644 index 00000000..d1ee3b47 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/go.mod @@ -0,0 +1,7 @@ +module github.com/gophercloud/gophercloud + +require ( + golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 + golang.org/x/sys v0.0.0-20190209173611-3b5209105503 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/gophercloud/gophercloud/go.sum b/vendor/github.com/gophercloud/gophercloud/go.sum new file mode 100644 index 00000000..33cb0be8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/go.sum @@ -0,0 +1,8 @@ +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gophercloud/gophercloud/internal/pkg.go b/vendor/github.com/gophercloud/gophercloud/internal/pkg.go new file mode 100644 index 00000000..5bf0569c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/internal/pkg.go @@ -0,0 +1 @@ +package internal diff --git a/vendor/github.com/gophercloud/gophercloud/internal/util.go b/vendor/github.com/gophercloud/gophercloud/internal/util.go new file mode 100644 index 00000000..8efb283e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/internal/util.go @@ -0,0 +1,34 @@ +package internal + +import ( + "reflect" + "strings" +) + +// RemainingKeys will inspect a struct and compare it to a map. Any struct +// field that does not have a JSON tag that matches a key in the map or +// a matching lower-case field in the map will be returned as an extra. +// +// This is useful for determining the extra fields returned in response bodies +// for resources that can contain an arbitrary or dynamic number of fields. +func RemainingKeys(s interface{}, m map[string]interface{}) (extras map[string]interface{}) { + extras = make(map[string]interface{}) + for k, v := range m { + extras[k] = v + } + + valueOf := reflect.ValueOf(s) + typeOf := reflect.TypeOf(s) + for i := 0; i < valueOf.NumField(); i++ { + field := typeOf.Field(i) + + lowerField := strings.ToLower(field.Name) + delete(extras, lowerField) + + if tagValue := field.Tag.Get("json"); tagValue != "" && tagValue != "-" { + delete(extras, tagValue) + } + } + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go new file mode 100644 index 00000000..0e8d90ff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -0,0 +1,125 @@ +package openstack + +import ( + "os" + + "github.com/gophercloud/gophercloud" +) + +var nilOptions = gophercloud.AuthOptions{} + +/* +AuthOptionsFromEnv fills out an identity.AuthOptions structure with the +settings found on the various OpenStack OS_* environment variables. + +The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, +OS_PASSWORD and OS_PROJECT_ID. + +Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, +or an error will result. OS_PROJECT_ID, is optional. + +OS_TENANT_ID and OS_TENANT_NAME are deprecated forms of OS_PROJECT_ID and +OS_PROJECT_NAME and the latter are expected against a v3 auth api. + +If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred +as "tenant" in Gophercloud. + +If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to +handle projects not on the default domain. + +To use this function, first set the OS_* environment variables (for example, +by sourcing an `openrc` file), then: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { + authURL := os.Getenv("OS_AUTH_URL") + username := os.Getenv("OS_USERNAME") + userID := os.Getenv("OS_USERID") + password := os.Getenv("OS_PASSWORD") + tenantID := os.Getenv("OS_TENANT_ID") + tenantName := os.Getenv("OS_TENANT_NAME") + domainID := os.Getenv("OS_DOMAIN_ID") + domainName := os.Getenv("OS_DOMAIN_NAME") + applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID") + applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME") + applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET") + + // If OS_PROJECT_ID is set, overwrite tenantID with the value. + if v := os.Getenv("OS_PROJECT_ID"); v != "" { + tenantID = v + } + + // If OS_PROJECT_NAME is set, overwrite tenantName with the value. + if v := os.Getenv("OS_PROJECT_NAME"); v != "" { + tenantName = v + } + + if authURL == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_AUTH_URL", + } + return nilOptions, err + } + + if userID == "" && username == "" { + // Empty username and userID could be ignored, when applicationCredentialID and applicationCredentialSecret are set + if applicationCredentialID == "" && applicationCredentialSecret == "" { + err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, + } + return nilOptions, err + } + } + + if password == "" && applicationCredentialID == "" && applicationCredentialName == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PASSWORD", + } + return nilOptions, err + } + + if (applicationCredentialID != "" || applicationCredentialName != "") && applicationCredentialSecret == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_APPLICATION_CREDENTIAL_SECRET", + } + return nilOptions, err + } + + if domainID == "" && domainName == "" && tenantID == "" && tenantName != "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PROJECT_ID", + } + return nilOptions, err + } + + if applicationCredentialID == "" && applicationCredentialName != "" && applicationCredentialSecret != "" { + if userID == "" && username == "" { + return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, + } + } + if username != "" && domainID == "" && domainName == "" { + return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_DOMAIN_ID", "OS_DOMAIN_NAME"}, + } + } + } + + ao := gophercloud.AuthOptions{ + IdentityEndpoint: authURL, + UserID: userID, + Username: username, + Password: password, + TenantID: tenantID, + TenantName: tenantName, + DomainID: domainID, + DomainName: domainName, + ApplicationCredentialID: applicationCredentialID, + ApplicationCredentialName: applicationCredentialName, + ApplicationCredentialSecret: applicationCredentialSecret, + } + + return ao, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go new file mode 100644 index 00000000..a78d3d04 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go @@ -0,0 +1,86 @@ +/* +Package volumeactions provides information and interaction with volumes in the +OpenStack Block Storage service. A volume is a detachable block storage +device, akin to a USB hard drive. + +Example of Attaching a Volume to an Instance + + attachOpts := volumeactions.AttachOpts{ + MountPoint: "/mnt", + Mode: "rw", + InstanceUUID: server.ID, + } + + err := volumeactions.Attach(client, volume.ID, attachOpts).ExtractErr() + if err != nil { + panic(err) + } + + detachOpts := volumeactions.DetachOpts{ + AttachmentID: volume.Attachments[0].AttachmentID, + } + + err = volumeactions.Detach(client, volume.ID, detachOpts).ExtractErr() + if err != nil { + panic(err) + } + + +Example of Creating an Image from a Volume + + uploadImageOpts := volumeactions.UploadImageOpts{ + ImageName: "my_vol", + Force: true, + } + + volumeImage, err := volumeactions.UploadImage(client, volume.ID, uploadImageOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", volumeImage) + +Example of Extending a Volume's Size + + extendOpts := volumeactions.ExtendSizeOpts{ + NewSize: 100, + } + + err := volumeactions.ExtendSize(client, volume.ID, extendOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example of Initializing a Volume Connection + + connectOpts := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + + connectionInfo, err := volumeactions.InitializeConnection(client, volume.ID, connectOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", connectionInfo["data"]) + + terminateOpts := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + + err = volumeactions.TerminateConnection(client, volume.ID, terminateOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package volumeactions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go new file mode 100644 index 00000000..fdf322d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go @@ -0,0 +1,300 @@ +package volumeactions + +import ( + "github.com/gophercloud/gophercloud" +) + +// AttachOptsBuilder allows extensions to add additional parameters to the +// Attach request. +type AttachOptsBuilder interface { + ToVolumeAttachMap() (map[string]interface{}, error) +} + +// AttachMode describes the attachment mode for volumes. +type AttachMode string + +// These constants determine how a volume is attached. +const ( + ReadOnly AttachMode = "ro" + ReadWrite AttachMode = "rw" +) + +// AttachOpts contains options for attaching a Volume. +type AttachOpts struct { + // The mountpoint of this volume. + MountPoint string `json:"mountpoint,omitempty"` + + // The nova instance ID, can't set simultaneously with HostName. + InstanceUUID string `json:"instance_uuid,omitempty"` + + // The hostname of baremetal host, can't set simultaneously with InstanceUUID. + HostName string `json:"host_name,omitempty"` + + // Mount mode of this volume. + Mode AttachMode `json:"mode,omitempty"` +} + +// ToVolumeAttachMap assembles a request body based on the contents of a +// AttachOpts. +func (opts AttachOpts) ToVolumeAttachMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-attach") +} + +// Attach will attach a volume based on the values in AttachOpts. +func Attach(client *gophercloud.ServiceClient, id string, opts AttachOptsBuilder) (r AttachResult) { + b, err := opts.ToVolumeAttachMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// BeginDetach will mark the volume as detaching. +func BeginDetaching(client *gophercloud.ServiceClient, id string) (r BeginDetachingResult) { + b := map[string]interface{}{"os-begin_detaching": make(map[string]interface{})} + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// DetachOptsBuilder allows extensions to add additional parameters to the +// Detach request. +type DetachOptsBuilder interface { + ToVolumeDetachMap() (map[string]interface{}, error) +} + +// DetachOpts contains options for detaching a Volume. +type DetachOpts struct { + // AttachmentID is the ID of the attachment between a volume and instance. + AttachmentID string `json:"attachment_id,omitempty"` +} + +// ToVolumeDetachMap assembles a request body based on the contents of a +// DetachOpts. +func (opts DetachOpts) ToVolumeDetachMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-detach") +} + +// Detach will detach a volume based on volume ID. +func Detach(client *gophercloud.ServiceClient, id string, opts DetachOptsBuilder) (r DetachResult) { + b, err := opts.ToVolumeDetachMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Reserve will reserve a volume based on volume ID. +func Reserve(client *gophercloud.ServiceClient, id string) (r ReserveResult) { + b := map[string]interface{}{"os-reserve": make(map[string]interface{})} + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// Unreserve will unreserve a volume based on volume ID. +func Unreserve(client *gophercloud.ServiceClient, id string) (r UnreserveResult) { + b := map[string]interface{}{"os-unreserve": make(map[string]interface{})} + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// InitializeConnectionOptsBuilder allows extensions to add additional parameters to the +// InitializeConnection request. +type InitializeConnectionOptsBuilder interface { + ToVolumeInitializeConnectionMap() (map[string]interface{}, error) +} + +// InitializeConnectionOpts hosts options for InitializeConnection. +// The fields are specific to the storage driver in use and the destination +// attachment. +type InitializeConnectionOpts struct { + IP string `json:"ip,omitempty"` + Host string `json:"host,omitempty"` + Initiator string `json:"initiator,omitempty"` + Wwpns []string `json:"wwpns,omitempty"` + Wwnns string `json:"wwnns,omitempty"` + Multipath *bool `json:"multipath,omitempty"` + Platform string `json:"platform,omitempty"` + OSType string `json:"os_type,omitempty"` +} + +// ToVolumeInitializeConnectionMap assembles a request body based on the contents of a +// InitializeConnectionOpts. +func (opts InitializeConnectionOpts) ToVolumeInitializeConnectionMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "connector") + return map[string]interface{}{"os-initialize_connection": b}, err +} + +// InitializeConnection initializes an iSCSI connection by volume ID. +func InitializeConnection(client *gophercloud.ServiceClient, id string, opts InitializeConnectionOptsBuilder) (r InitializeConnectionResult) { + b, err := opts.ToVolumeInitializeConnectionMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// TerminateConnectionOptsBuilder allows extensions to add additional parameters to the +// TerminateConnection request. +type TerminateConnectionOptsBuilder interface { + ToVolumeTerminateConnectionMap() (map[string]interface{}, error) +} + +// TerminateConnectionOpts hosts options for TerminateConnection. +type TerminateConnectionOpts struct { + IP string `json:"ip,omitempty"` + Host string `json:"host,omitempty"` + Initiator string `json:"initiator,omitempty"` + Wwpns []string `json:"wwpns,omitempty"` + Wwnns string `json:"wwnns,omitempty"` + Multipath *bool `json:"multipath,omitempty"` + Platform string `json:"platform,omitempty"` + OSType string `json:"os_type,omitempty"` +} + +// ToVolumeTerminateConnectionMap assembles a request body based on the contents of a +// TerminateConnectionOpts. +func (opts TerminateConnectionOpts) ToVolumeTerminateConnectionMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "connector") + return map[string]interface{}{"os-terminate_connection": b}, err +} + +// TerminateConnection terminates an iSCSI connection by volume ID. +func TerminateConnection(client *gophercloud.ServiceClient, id string, opts TerminateConnectionOptsBuilder) (r TerminateConnectionResult) { + b, err := opts.ToVolumeTerminateConnectionMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// ExtendSizeOptsBuilder allows extensions to add additional parameters to the +// ExtendSize request. +type ExtendSizeOptsBuilder interface { + ToVolumeExtendSizeMap() (map[string]interface{}, error) +} + +// ExtendSizeOpts contains options for extending the size of an existing Volume. +// This object is passed to the volumes.ExtendSize function. +type ExtendSizeOpts struct { + // NewSize is the new size of the volume, in GB. + NewSize int `json:"new_size" required:"true"` +} + +// ToVolumeExtendSizeMap assembles a request body based on the contents of an +// ExtendSizeOpts. +func (opts ExtendSizeOpts) ToVolumeExtendSizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-extend") +} + +// ExtendSize will extend the size of the volume based on the provided information. +// This operation does not return a response body. +func ExtendSize(client *gophercloud.ServiceClient, id string, opts ExtendSizeOptsBuilder) (r ExtendSizeResult) { + b, err := opts.ToVolumeExtendSizeMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// UploadImageOptsBuilder allows extensions to add additional parameters to the +// UploadImage request. +type UploadImageOptsBuilder interface { + ToVolumeUploadImageMap() (map[string]interface{}, error) +} + +// UploadImageOpts contains options for uploading a Volume to image storage. +type UploadImageOpts struct { + // Container format, may be bare, ofv, ova, etc. + ContainerFormat string `json:"container_format,omitempty"` + + // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. + DiskFormat string `json:"disk_format,omitempty"` + + // The name of image that will be stored in glance. + ImageName string `json:"image_name,omitempty"` + + // Force image creation, usable if volume attached to instance. + Force bool `json:"force,omitempty"` +} + +// ToVolumeUploadImageMap assembles a request body based on the contents of a +// UploadImageOpts. +func (opts UploadImageOpts) ToVolumeUploadImageMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-volume_upload_image") +} + +// UploadImage will upload an image based on the values in UploadImageOptsBuilder. +func UploadImage(client *gophercloud.ServiceClient, id string, opts UploadImageOptsBuilder) (r UploadImageResult) { + b, err := opts.ToVolumeUploadImageMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// ForceDelete will delete the volume regardless of state. +func ForceDelete(client *gophercloud.ServiceClient, id string) (r ForceDeleteResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-force_delete": ""}, nil, nil) + return +} + +// ImageMetadataOptsBuilder allows extensions to add additional parameters to the +// ImageMetadataRequest request. +type ImageMetadataOptsBuilder interface { + ToImageMetadataMap() (map[string]interface{}, error) +} + +// ImageMetadataOpts contains options for setting image metadata to a volume. +type ImageMetadataOpts struct { + // The image metadata to add to the volume as a set of metadata key and value pairs. + Metadata map[string]string `json:"metadata"` +} + +// ToImageMetadataMap assembles a request body based on the contents of a +// ImageMetadataOpts. +func (opts ImageMetadataOpts) ToImageMetadataMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-set_image_metadata") +} + +// SetImageMetadata will set image metadata on a volume based on the values in ImageMetadataOptsBuilder. +func SetImageMetadata(client *gophercloud.ServiceClient, id string, opts ImageMetadataOptsBuilder) (r SetImageMetadataResult) { + b, err := opts.ToImageMetadataMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go new file mode 100644 index 00000000..aacee4c5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go @@ -0,0 +1,197 @@ +package volumeactions + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" +) + +// AttachResult contains the response body and error from an Attach request. +type AttachResult struct { + gophercloud.ErrResult +} + +// BeginDetachingResult contains the response body and error from a BeginDetach +// request. +type BeginDetachingResult struct { + gophercloud.ErrResult +} + +// DetachResult contains the response body and error from a Detach request. +type DetachResult struct { + gophercloud.ErrResult +} + +// UploadImageResult contains the response body and error from an UploadImage +// request. +type UploadImageResult struct { + gophercloud.Result +} + +// SetImageMetadataResult contains the response body and error from an SetImageMetadata +// request. +type SetImageMetadataResult struct { + gophercloud.ErrResult +} + +// ReserveResult contains the response body and error from a Reserve request. +type ReserveResult struct { + gophercloud.ErrResult +} + +// UnreserveResult contains the response body and error from an Unreserve +// request. +type UnreserveResult struct { + gophercloud.ErrResult +} + +// TerminateConnectionResult contains the response body and error from a +// TerminateConnection request. +type TerminateConnectionResult struct { + gophercloud.ErrResult +} + +// InitializeConnectionResult contains the response body and error from an +// InitializeConnection request. +type InitializeConnectionResult struct { + gophercloud.Result +} + +// ExtendSizeResult contains the response body and error from an ExtendSize request. +type ExtendSizeResult struct { + gophercloud.ErrResult +} + +// Extract will get the connection information out of the +// InitializeConnectionResult object. +// +// This will be a generic map[string]interface{} and the results will be +// dependent on the type of connection made. +func (r InitializeConnectionResult) Extract() (map[string]interface{}, error) { + var s struct { + ConnectionInfo map[string]interface{} `json:"connection_info"` + } + err := r.ExtractInto(&s) + return s.ConnectionInfo, err +} + +// ImageVolumeType contains volume type information obtained from UploadImage +// action. +type ImageVolumeType struct { + // The ID of a volume type. + ID string `json:"id"` + + // Human-readable display name for the volume type. + Name string `json:"name"` + + // Human-readable description for the volume type. + Description string `json:"display_description"` + + // Flag for public access. + IsPublic bool `json:"is_public"` + + // Extra specifications for volume type. + ExtraSpecs map[string]interface{} `json:"extra_specs"` + + // ID of quality of service specs. + QosSpecsID string `json:"qos_specs_id"` + + // Flag for deletion status of volume type. + Deleted bool `json:"deleted"` + + // The date when volume type was deleted. + DeletedAt time.Time `json:"-"` + + // The date when volume type was created. + CreatedAt time.Time `json:"-"` + + // The date when this volume was last updated. + UpdatedAt time.Time `json:"-"` +} + +func (r *ImageVolumeType) UnmarshalJSON(b []byte) error { + type tmp ImageVolumeType + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + DeletedAt gophercloud.JSONRFC3339MilliNoZ `json:"deleted_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ImageVolumeType(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + r.DeletedAt = time.Time(s.DeletedAt) + + return err +} + +// VolumeImage contains information about volume uploaded to an image service. +type VolumeImage struct { + // The ID of a volume an image is created from. + VolumeID string `json:"id"` + + // Container format, may be bare, ofv, ova, etc. + ContainerFormat string `json:"container_format"` + + // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. + DiskFormat string `json:"disk_format"` + + // Human-readable description for the volume. + Description string `json:"display_description"` + + // The ID of the created image. + ImageID string `json:"image_id"` + + // Human-readable display name for the image. + ImageName string `json:"image_name"` + + // Size of the volume in GB. + Size int `json:"size"` + + // Current status of the volume. + Status string `json:"status"` + + // The date when this volume was last updated. + UpdatedAt time.Time `json:"-"` + + // Volume type object of used volume. + VolumeType ImageVolumeType `json:"volume_type"` +} + +func (r *VolumeImage) UnmarshalJSON(b []byte) error { + type tmp VolumeImage + var s struct { + tmp + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = VolumeImage(s.tmp) + + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// Extract will get an object with info about the uploaded image out of the +// UploadImageResult object. +func (r UploadImageResult) Extract() (VolumeImage, error) { + var s struct { + VolumeImage VolumeImage `json:"os-volume_upload_image"` + } + err := r.ExtractInto(&s) + return s.VolumeImage, err +} + +// ForceDeleteResult contains the response body and error from a ForceDelete request. +type ForceDeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go new file mode 100644 index 00000000..20486ed7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go @@ -0,0 +1,7 @@ +package volumeactions + +import "github.com/gophercloud/gophercloud" + +func actionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go new file mode 100644 index 00000000..307b8b12 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go new file mode 100644 index 00000000..1da94238 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go @@ -0,0 +1,172 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + Size int `json:"size" required:"true"` + AvailabilityZone string `json:"availability_zone,omitempty"` + Description string `json:"display_description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + Name string `json:"display_name,omitempty"` + SnapshotID string `json:"snapshot_id,omitempty"` + SourceVolID string `json:"source_volid,omitempty"` + ImageID string `json:"imageRef,omitempty"` + VolumeType string `json:"volume_type,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant volumes. + AllTenants bool `q:"all_tenants"` + // List only volumes that contain Metadata. + Metadata map[string]string `q:"metadata"` + // List only volumes that have Name as the display name. + Name string `q:"display_name"` + // List only volumes that have a status of Status. + Status string `q:"status"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.SinglePageBase(r)} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name *string `json:"display_name,omitempty"` + Description *string `json:"display_description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go new file mode 100644 index 00000000..7f68d148 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go @@ -0,0 +1,109 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Current status of the volume. + Status string `json:"status"` + // Human-readable display name for the volume. + Name string `json:"display_name"` + // Instances onto which the volume is attached. + Attachments []map[string]interface{} `json:"attachments"` + // This parameter is no longer used. + AvailabilityZone string `json:"availability_zone"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // Human-readable description for the volume. + Description string `json:"display_description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // Unique identifier for the volume. + ID string `json:"id"` + // Size of the volume in GB. + Size int `json:"size"` +} + +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + + return err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a VolumePage contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s struct { + Volumes []Volume `json:"volumes"` + } + err := (r.(VolumePage)).ExtractInto(&s) + return s.Volumes, err +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s struct { + Volume *Volume `json:"volume"` + } + err := r.ExtractInto(&s) + return s.Volume, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go new file mode 100644 index 00000000..8a00f97e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go new file mode 100644 index 00000000..e86c1b4b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go new file mode 100644 index 00000000..198f8307 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go @@ -0,0 +1,5 @@ +// Package snapshots provides information and interaction with snapshots in the +// OpenStack Block Storage service. A snapshot is a point in time copy of the +// data contained in an external storage volume, and can be controlled +// programmatically. +package snapshots diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go new file mode 100644 index 00000000..939e5020 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go @@ -0,0 +1,175 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSnapshotCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Snapshot. This object is passed to +// the snapshots.Create function. For more information about these parameters, +// see the Snapshot object. +type CreateOpts struct { + VolumeID string `json:"volume_id" required:"true"` + Force bool `json:"force,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToSnapshotCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "snapshot") +} + +// Create will create a new Snapshot based on the values in CreateOpts. To +// extract the Snapshot object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSnapshotCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete will delete the existing Snapshot with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Snapshot with the provided ID. To extract the Snapshot +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSnapshotListQuery() (string, error) +} + +// ListOpts hold options for listing Snapshots. It is passed to the +// snapshots.List function. +type ListOpts struct { + // AllTenants will retrieve snapshots of all tenants/projects. + AllTenants bool `q:"all_tenants"` + + // Name will filter by the specified snapshot name. + Name string `q:"name"` + + // Status will filter by the specified status. + Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required to use this. + TenantID string `q:"project_id"` + + // VolumeID will filter by a specified volume ID. + VolumeID string `q:"volume_id"` +} + +// ToSnapshotListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSnapshotListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Snapshots optionally limited by the conditions provided in +// ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSnapshotListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SnapshotPage{pagination.SinglePageBase(r)} + }) +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateMetadataOptsBuilder interface { + ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) +} + +// UpdateMetadataOpts contain options for updating an existing Snapshot. This +// object is passed to the snapshots.Update function. For more information +// about the parameters, see the Snapshot object. +type UpdateMetadataOpts struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of +// an UpdateMetadataOpts. +func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// UpdateMetadata will update the Snapshot with provided information. To +// extract the updated Snapshot from the response, call the ExtractMetadata +// method on the UpdateMetadataResult. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToSnapshotUpdateMetadataMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateMetadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a snapshot's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractSnapshots(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "snapshot"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "snapshot"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go new file mode 100644 index 00000000..0b444d08 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go @@ -0,0 +1,120 @@ +package snapshots + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Snapshot contains all the information associated with a Cinder Snapshot. +type Snapshot struct { + // Unique identifier. + ID string `json:"id"` + + // Date created. + CreatedAt time.Time `json:"-"` + + // Date updated. + UpdatedAt time.Time `json:"-"` + + // Display name. + Name string `json:"name"` + + // Display description. + Description string `json:"description"` + + // ID of the Volume from which this Snapshot was created. + VolumeID string `json:"volume_id"` + + // Currect status of the Snapshot. + Status string `json:"status"` + + // Size of the Snapshot, in GB. + Size int `json:"size"` + + // User-defined key-value pairs. + Metadata map[string]string `json:"metadata"` +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// SnapshotPage is a pagination.Pager that is returned from a call to the List function. +type SnapshotPage struct { + pagination.SinglePageBase +} + +func (r *Snapshot) UnmarshalJSON(b []byte) error { + type tmp Snapshot + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Snapshot(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// IsEmpty returns true if a SnapshotPage contains no Snapshots. +func (r SnapshotPage) IsEmpty() (bool, error) { + volumes, err := ExtractSnapshots(r) + return len(volumes) == 0, err +} + +// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call. +func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { + var s struct { + Snapshots []Snapshot `json:"snapshots"` + } + err := (r.(SnapshotPage)).ExtractInto(&s) + return s.Snapshots, err +} + +// UpdateMetadataResult contains the response body and error from an UpdateMetadata request. +type UpdateMetadataResult struct { + commonResult +} + +// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata. +func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) { + if r.Err != nil { + return nil, r.Err + } + m := r.Body.(map[string]interface{})["metadata"] + return m.(map[string]interface{}), nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Snapshot object out of the commonResult object. +func (r commonResult) Extract() (*Snapshot, error) { + var s struct { + Snapshot *Snapshot `json:"snapshot"` + } + err := r.ExtractInto(&s) + return s.Snapshot, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go new file mode 100644 index 00000000..77804374 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go @@ -0,0 +1,27 @@ +package snapshots + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("snapshots") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func metadataURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id, "metadata") +} + +func updateMetadataURL(c *gophercloud.ServiceClient, id string) string { + return metadataURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go new file mode 100644 index 00000000..40fbb827 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go @@ -0,0 +1,22 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go new file mode 100644 index 00000000..307b8b12 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go new file mode 100644 index 00000000..c27ddbf6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go @@ -0,0 +1,235 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + // The size of the volume, in GB + Size int `json:"size" required:"true"` + // The availability zone + AvailabilityZone string `json:"availability_zone,omitempty"` + // ConsistencyGroupID is the ID of a consistency group + ConsistencyGroupID string `json:"consistencygroup_id,omitempty"` + // The volume description + Description string `json:"description,omitempty"` + // One or more metadata key and value pairs to associate with the volume + Metadata map[string]string `json:"metadata,omitempty"` + // The volume name + Name string `json:"name,omitempty"` + // the ID of the existing volume snapshot + SnapshotID string `json:"snapshot_id,omitempty"` + // SourceReplica is a UUID of an existing volume to replicate with + SourceReplica string `json:"source_replica,omitempty"` + // the ID of the existing volume + SourceVolID string `json:"source_volid,omitempty"` + // The ID of the image from which you want to create the volume. + // Required to create a bootable volume. + ImageID string `json:"imageRef,omitempty"` + // The associated volume type + VolumeType string `json:"volume_type,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToVolumeDeleteQuery() (string, error) +} + +// DeleteOpts contains options for deleting a Volume. This object is passed to +// the volumes.Delete function. +type DeleteOpts struct { + // Delete all snapshots of this volume as well. + Cascade bool `q:"cascade"` +} + +// ToLoadBalancerDeleteQuery formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToVolumeDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string, opts DeleteOptsBuilder) (r DeleteResult) { + url := deleteURL(client, id) + if opts != nil { + query, err := opts.ToVolumeDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + _, r.Err = client.Delete(url, nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // AllTenants will retrieve volumes of all tenants/projects. + AllTenants bool `q:"all_tenants"` + + // Metadata will filter results based on specified metadata. + Metadata map[string]string `q:"metadata"` + + // Name will filter by the specified volume name. + Name string `q:"name"` + + // Status will filter by the specified status. + Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required for this. + TenantID string `q:"project_id"` + + // Comma-separated list of sort keys and optional sort directions in the + // form of [:]. + Sort string `q:"sort"` + + // Requests a page size of items. + Limit int `q:"limit"` + + // Used in conjunction with limit to return a slice of items. + Offset int `q:"offset"` + + // The ID of the last-seen item. + Marker string `q:"marker"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go new file mode 100644 index 00000000..96572b01 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go @@ -0,0 +1,167 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type Attachment struct { + AttachedAt time.Time `json:"-"` + AttachmentID string `json:"attachment_id"` + Device string `json:"device"` + HostName string `json:"host_name"` + ID string `json:"id"` + ServerID string `json:"server_id"` + VolumeID string `json:"volume_id"` +} + +func (r *Attachment) UnmarshalJSON(b []byte) error { + type tmp Attachment + var s struct { + tmp + AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:"attached_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Attachment(s.tmp) + + r.AttachedAt = time.Time(s.AttachedAt) + + return err +} + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Unique identifier for the volume. + ID string `json:"id"` + // Current status of the volume. + Status string `json:"status"` + // Size of the volume in GB. + Size int `json:"size"` + // AvailabilityZone is which availability zone the volume is in. + AvailabilityZone string `json:"availability_zone"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // The date when this volume was last updated + UpdatedAt time.Time `json:"-"` + // Instances onto which the volume is attached. + Attachments []Attachment `json:"attachments"` + // Human-readable display name for the volume. + Name string `json:"name"` + // Human-readable description for the volume. + Description string `json:"description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // UserID is the id of the user who created the volume. + UserID string `json:"user_id"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // Encrypted denotes if the volume is encrypted. + Encrypted bool `json:"encrypted"` + // ReplicationStatus is the status of replication. + ReplicationStatus string `json:"replication_status"` + // ConsistencyGroupID is the consistency group ID. + ConsistencyGroupID string `json:"consistencygroup_id"` + // Multiattach denotes if the volume is multi-attach capable. + Multiattach bool `json:"multiattach"` +} + +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a ListResult contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r VolumePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"volumes_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s []Volume + err := ExtractVolumesInto(r, &s) + return s, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s Volume + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "volume") +} + +func ExtractVolumesInto(r pagination.Page, v interface{}) error { + return r.(VolumePage).Result.ExtractIntoSlicePtr(v, "volumes") +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go new file mode 100644 index 00000000..17072490 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go new file mode 100644 index 00000000..e86c1b4b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/doc.go new file mode 100644 index 00000000..198f8307 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/doc.go @@ -0,0 +1,5 @@ +// Package snapshots provides information and interaction with snapshots in the +// OpenStack Block Storage service. A snapshot is a point in time copy of the +// data contained in an external storage volume, and can be controlled +// programmatically. +package snapshots diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/requests.go new file mode 100644 index 00000000..22531c9f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/requests.go @@ -0,0 +1,186 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSnapshotCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Snapshot. This object is passed to +// the snapshots.Create function. For more information about these parameters, +// see the Snapshot object. +type CreateOpts struct { + VolumeID string `json:"volume_id" required:"true"` + Force bool `json:"force,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToSnapshotCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "snapshot") +} + +// Create will create a new Snapshot based on the values in CreateOpts. To +// extract the Snapshot object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSnapshotCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete will delete the existing Snapshot with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Snapshot with the provided ID. To extract the Snapshot +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSnapshotListQuery() (string, error) +} + +type ListOpts struct { + // AllTenants will retrieve snapshots of all tenants/projects. + AllTenants bool `q:"all_tenants"` + + // Name will filter by the specified snapshot name. + Name string `q:"name"` + + // Status will filter by the specified status. + Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required to use this. + TenantID string `q:"project_id"` + + // VolumeID will filter by a specified volume ID. + VolumeID string `q:"volume_id"` + + // Comma-separated list of sort keys and optional sort directions in the + // form of [:]. + Sort string `q:"sort"` + + // Requests a page size of items. + Limit int `q:"limit"` + + // Used in conjunction with limit to return a slice of items. + Offset int `q:"offset"` + + // The ID of the last-seen item. + Marker string `q:"marker"` +} + +// ToSnapshotListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSnapshotListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Snapshots optionally limited by the conditions provided in +// ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSnapshotListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SnapshotPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateMetadataOptsBuilder interface { + ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) +} + +// UpdateMetadataOpts contain options for updating an existing Snapshot. This +// object is passed to the snapshots.Update function. For more information +// about the parameters, see the Snapshot object. +type UpdateMetadataOpts struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of +// an UpdateMetadataOpts. +func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// UpdateMetadata will update the Snapshot with provided information. To +// extract the updated Snapshot from the response, call the ExtractMetadata +// method on the UpdateMetadataResult. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToSnapshotUpdateMetadataMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateMetadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a snapshot's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractSnapshots(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "snapshot"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "snapshot"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/results.go new file mode 100644 index 00000000..1ffae8f6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/results.go @@ -0,0 +1,132 @@ +package snapshots + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Snapshot contains all the information associated with a Cinder Snapshot. +type Snapshot struct { + // Unique identifier. + ID string `json:"id"` + + // Date created. + CreatedAt time.Time `json:"-"` + + // Date updated. + UpdatedAt time.Time `json:"-"` + + // Display name. + Name string `json:"name"` + + // Display description. + Description string `json:"description"` + + // ID of the Volume from which this Snapshot was created. + VolumeID string `json:"volume_id"` + + // Currect status of the Snapshot. + Status string `json:"status"` + + // Size of the Snapshot, in GB. + Size int `json:"size"` + + // User-defined key-value pairs. + Metadata map[string]string `json:"metadata"` +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// SnapshotPage is a pagination.Pager that is returned from a call to the List function. +type SnapshotPage struct { + pagination.LinkedPageBase +} + +// UnmarshalJSON converts our JSON API response into our snapshot struct +func (r *Snapshot) UnmarshalJSON(b []byte) error { + type tmp Snapshot + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Snapshot(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// IsEmpty returns true if a SnapshotPage contains no Snapshots. +func (r SnapshotPage) IsEmpty() (bool, error) { + volumes, err := ExtractSnapshots(r) + return len(volumes) == 0, err +} + +func (page SnapshotPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"snapshots_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call. +func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { + var s struct { + Snapshots []Snapshot `json:"snapshots"` + } + err := (r.(SnapshotPage)).ExtractInto(&s) + return s.Snapshots, err +} + +// UpdateMetadataResult contains the response body and error from an UpdateMetadata request. +type UpdateMetadataResult struct { + commonResult +} + +// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata. +func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) { + if r.Err != nil { + return nil, r.Err + } + m := r.Body.(map[string]interface{})["metadata"] + return m.(map[string]interface{}), nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Snapshot object out of the commonResult object. +func (r commonResult) Extract() (*Snapshot, error) { + var s struct { + Snapshot *Snapshot `json:"snapshot"` + } + err := r.ExtractInto(&s) + return s.Snapshot, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/urls.go new file mode 100644 index 00000000..77804374 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/urls.go @@ -0,0 +1,27 @@ +package snapshots + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("snapshots") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func metadataURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id, "metadata") +} + +func updateMetadataURL(c *gophercloud.ServiceClient, id string) string { + return metadataURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/util.go new file mode 100644 index 00000000..40fbb827 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/util.go @@ -0,0 +1,22 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go new file mode 100644 index 00000000..307b8b12 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go new file mode 100644 index 00000000..25f70b27 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go @@ -0,0 +1,237 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + // The size of the volume, in GB + Size int `json:"size" required:"true"` + // The availability zone + AvailabilityZone string `json:"availability_zone,omitempty"` + // ConsistencyGroupID is the ID of a consistency group + ConsistencyGroupID string `json:"consistencygroup_id,omitempty"` + // The volume description + Description string `json:"description,omitempty"` + // One or more metadata key and value pairs to associate with the volume + Metadata map[string]string `json:"metadata,omitempty"` + // The volume name + Name string `json:"name,omitempty"` + // the ID of the existing volume snapshot + SnapshotID string `json:"snapshot_id,omitempty"` + // SourceReplica is a UUID of an existing volume to replicate with + SourceReplica string `json:"source_replica,omitempty"` + // the ID of the existing volume + SourceVolID string `json:"source_volid,omitempty"` + // The ID of the image from which you want to create the volume. + // Required to create a bootable volume. + ImageID string `json:"imageRef,omitempty"` + // The associated volume type + VolumeType string `json:"volume_type,omitempty"` + // Multiattach denotes if the volume is multi-attach capable. + Multiattach bool `json:"multiattach,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToVolumeDeleteQuery() (string, error) +} + +// DeleteOpts contains options for deleting a Volume. This object is passed to +// the volumes.Delete function. +type DeleteOpts struct { + // Delete all snapshots of this volume as well. + Cascade bool `q:"cascade"` +} + +// ToLoadBalancerDeleteQuery formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToVolumeDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string, opts DeleteOptsBuilder) (r DeleteResult) { + url := deleteURL(client, id) + if opts != nil { + query, err := opts.ToVolumeDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + _, r.Err = client.Delete(url, nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // AllTenants will retrieve volumes of all tenants/projects. + AllTenants bool `q:"all_tenants"` + + // Metadata will filter results based on specified metadata. + Metadata map[string]string `q:"metadata"` + + // Name will filter by the specified volume name. + Name string `q:"name"` + + // Status will filter by the specified status. + Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required for this. + TenantID string `q:"project_id"` + + // Comma-separated list of sort keys and optional sort directions in the + // form of [:]. + Sort string `q:"sort"` + + // Requests a page size of items. + Limit int `q:"limit"` + + // Used in conjunction with limit to return a slice of items. + Offset int `q:"offset"` + + // The ID of the last-seen item. + Marker string `q:"marker"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go new file mode 100644 index 00000000..3a33b586 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go @@ -0,0 +1,172 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Attachment represents a Volume Attachment record +type Attachment struct { + AttachedAt time.Time `json:"-"` + AttachmentID string `json:"attachment_id"` + Device string `json:"device"` + HostName string `json:"host_name"` + ID string `json:"id"` + ServerID string `json:"server_id"` + VolumeID string `json:"volume_id"` +} + +// UnmarshalJSON is our unmarshalling helper +func (r *Attachment) UnmarshalJSON(b []byte) error { + type tmp Attachment + var s struct { + tmp + AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:"attached_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Attachment(s.tmp) + + r.AttachedAt = time.Time(s.AttachedAt) + + return err +} + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Unique identifier for the volume. + ID string `json:"id"` + // Current status of the volume. + Status string `json:"status"` + // Size of the volume in GB. + Size int `json:"size"` + // AvailabilityZone is which availability zone the volume is in. + AvailabilityZone string `json:"availability_zone"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // The date when this volume was last updated + UpdatedAt time.Time `json:"-"` + // Instances onto which the volume is attached. + Attachments []Attachment `json:"attachments"` + // Human-readable display name for the volume. + Name string `json:"name"` + // Human-readable description for the volume. + Description string `json:"description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // UserID is the id of the user who created the volume. + UserID string `json:"user_id"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // Encrypted denotes if the volume is encrypted. + Encrypted bool `json:"encrypted"` + // ReplicationStatus is the status of replication. + ReplicationStatus string `json:"replication_status"` + // ConsistencyGroupID is the consistency group ID. + ConsistencyGroupID string `json:"consistencygroup_id"` + // Multiattach denotes if the volume is multi-attach capable. + Multiattach bool `json:"multiattach"` + // Image metadata entries, only included for volumes that were created from an image, or from a snapshot of a volume originally created from an image. + VolumeImageMetadata map[string]string `json:"volume_image_metadata"` +} + +// UnmarshalJSON another unmarshalling function +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a ListResult contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +func (page VolumePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"volumes_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s []Volume + err := ExtractVolumesInto(r, &s) + return s, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s Volume + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractInto converts our response data into a volume struct +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "volume") +} + +// ExtractVolumesInto similar to ExtractInto but operates on a `list` of volumes +func ExtractVolumesInto(r pagination.Page, v interface{}) error { + return r.(VolumePage).Result.ExtractIntoSlicePtr(v, "volumes") +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go new file mode 100644 index 00000000..17072490 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go new file mode 100644 index 00000000..e86c1b4b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go new file mode 100644 index 00000000..50f23971 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -0,0 +1,438 @@ +package openstack + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +const ( + // v2 represents Keystone v2. + // It should never increase beyond 2.0. + v2 = "v2.0" + + // v3 represents Keystone v3. + // The version can be anything from v3 to v3.x. + v3 = "v3" +) + +/* +NewClient prepares an unauthenticated ProviderClient instance. +Most users will probably prefer using the AuthenticatedClient function +instead. + +This is useful if you wish to explicitly control the version of the identity +service that's used for authentication explicitly, for example. + +A basic example of using this would be: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.NewClient(ao.IdentityEndpoint) + client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) +*/ +func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err + } + + endpoint = gophercloud.NormalizeURL(endpoint) + base = gophercloud.NormalizeURL(base) + + p := new(gophercloud.ProviderClient) + p.IdentityBase = base + p.IdentityEndpoint = endpoint + p.UseTokenLock() + + return p, nil +} + +/* +AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint +specified by the options, acquires a token, and returns a Provider Client +instance that's ready to operate. + +If the full path to a versioned identity endpoint was specified (example: +http://example.com:5000/v3), that path will be used as the endpoint to query. + +If a versionless endpoint was specified (example: http://example.com:5000/), +the endpoint will be queried to determine which versions of the identity service +are available, then chooses the most recent or most supported version. + +Example: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { + client, err := NewClient(options.IdentityEndpoint) + if err != nil { + return nil, err + } + + err = Authenticate(client, options) + if err != nil { + return nil, err + } + return client, nil +} + +// Authenticate or re-authenticate against the most recent identity service +// supported at the provided endpoint. +func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + versions := []*utils.Version{ + {ID: v2, Priority: 20, Suffix: "/v2.0/"}, + {ID: v3, Priority: 30, Suffix: "/v3/"}, + } + + chosen, endpoint, err := utils.ChooseVersion(client, versions) + if err != nil { + return err + } + + switch chosen.ID { + case v2: + return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) + case v3: + return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) + default: + // The switch statement must be out of date from the versions list. + return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) + } +} + +// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. +func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + return v2auth(client, "", options, eo) +} + +func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + v2Client, err := NewIdentityV2(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v2Client.Endpoint = endpoint + } + + v2Opts := tokens2.AuthOptions{ + IdentityEndpoint: options.IdentityEndpoint, + Username: options.Username, + Password: options.Password, + TenantID: options.TenantID, + TenantName: options.TenantName, + AllowReauth: options.AllowReauth, + TokenID: options.TokenID, + } + + result := tokens2.Create(v2Client, v2Opts) + + err = client.SetTokenAndAuthResult(result) + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if options.AllowReauth { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.SetThrowaway(true) + tac.ReauthFunc = nil + tac.SetTokenAndAuthResult(nil) + tao := options + tao.AllowReauth = false + client.ReauthFunc = func() error { + err := v2auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.CopyTokenFrom(&tac) + return nil + } + } + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V2EndpointURL(catalog, opts) + } + + return nil +} + +// AuthenticateV3 explicitly authenticates against the identity v3 service. +func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + return v3auth(client, "", options, eo) +} + +func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + // Override the generated service endpoint with the one returned by the version endpoint. + v3Client, err := NewIdentityV3(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v3Client.Endpoint = endpoint + } + + result := tokens3.Create(v3Client, opts) + + err = client.SetTokenAndAuthResult(result) + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if opts.CanReauth() { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.SetThrowaway(true) + tac.ReauthFunc = nil + tac.SetTokenAndAuthResult(nil) + var tao tokens3.AuthOptionsBuilder + switch ot := opts.(type) { + case *gophercloud.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + case *tokens3.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + default: + tao = opts + } + client.ReauthFunc = func() error { + err := v3auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.CopyTokenFrom(&tac) + return nil + } + } + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V3EndpointURL(catalog, opts) + } + + return nil +} + +// NewIdentityV2 creates a ServiceClient that may be used to interact with the +// v2 identity service. +func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v2.0/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +// NewIdentityV3 creates a ServiceClient that may be used to access the v3 +// identity service. +func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v3/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + // Ensure endpoint still has a suffix of v3. + // This is because EndpointLocator might have found a versionless + // endpoint or the published endpoint is still /v2.0. In both + // cases, we need to fix the endpoint to point to /v3. + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err + } + + base = gophercloud.NormalizeURL(base) + + endpoint = base + "v3/" + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { + sc := new(gophercloud.ServiceClient) + eo.ApplyDefaults(clientType) + url, err := client.EndpointLocator(eo) + if err != nil { + return sc, err + } + sc.ProviderClient = client + sc.Endpoint = url + sc.Type = clientType + return sc, nil +} + +// NewBareMetalV1 creates a ServiceClient that may be used with the v1 +// bare metal package. +func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "baremetal") +} + +// NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 +// bare metal introspection package. +func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "baremetal-inspector") +} + +// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 +// object storage package. +func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "object-store") +} + +// NewComputeV2 creates a ServiceClient that may be used with the v2 compute +// package. +func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "compute") +} + +// NewNetworkV2 creates a ServiceClient that may be used with the v2 network +// package. +func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "network") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 +// block storage service. +func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volume") +} + +// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 +// block storage service. +func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev2") +} + +// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. +func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev3") +} + +// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. +func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "sharev2") +} + +// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 +// CDN service. +func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "cdn") +} + +// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 +// orchestration service. +func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "orchestration") +} + +// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. +func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "database") +} + +// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS +// service. +func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "dns") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 +// image service. +func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "image") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 +// load balancer service. +func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "load-balancer") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering +// package. +func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "clustering") +} + +// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging +// service. +func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "messaging") + sc.MoreHeaders = map[string]string{"Client-ID": clientID} + return sc, err +} + +// NewContainerV1 creates a ServiceClient that may be used with v1 container package +func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container") +} + +// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key +// manager service. +func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "key-manager") + sc.ResourceBase = sc.Endpoint + "v1/" + return sc, err +} + +// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management +// package. +func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container-infra") +} + +// NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. +func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "workflowv2") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go new file mode 100644 index 00000000..3653122b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go @@ -0,0 +1,52 @@ +/* +Package attachinterfaces provides the ability to retrieve and manage network +interfaces through Nova. + +Example of Listing a Server's Interfaces + + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + allPages, err := attachinterfaces.List(computeClient, serverID).AllPages() + if err != nil { + panic(err) + } + + allInterfaces, err := attachinterfaces.ExtractInterfaces(allPages) + if err != nil { + panic(err) + } + + for _, interface := range allInterfaces { + fmt.Printf("%+v\n", interface) + } + +Example to Get a Server's Interface + + portID = "0dde1598-b374-474e-986f-5b8dd1df1d4e" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + interface, err := attachinterfaces.Get(computeClient, serverID, portID).Extract() + if err != nil { + panic(err) + } + +Example to Create a new Interface attachment on the Server + + networkID := "8a5fe506-7e9f-4091-899b-96336909d93c" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + attachOpts := attachinterfaces.CreateOpts{ + NetworkID: networkID, + } + interface, err := attachinterfaces.Create(computeClient, serverID, attachOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Interface attachment from the Server + + portID = "0dde1598-b374-474e-986f-5b8dd1df1d4e" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + err := attachinterfaces.Delete(computeClient, serverID, portID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package attachinterfaces diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go new file mode 100644 index 00000000..874f7a61 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go @@ -0,0 +1,72 @@ +package attachinterfaces + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List makes a request against the nova API to list the server's interfaces. +func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return pagination.NewPager(client, listInterfaceURL(client, serverID), func(r pagination.PageResult) pagination.Page { + return InterfacePage{pagination.SinglePageBase(r)} + }) +} + +// Get requests details on a single interface attachment by the server and port IDs. +func Get(client *gophercloud.ServiceClient, serverID, portID string) (r GetResult) { + _, r.Err = client.Get(getInterfaceURL(client, serverID, portID), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToAttachInterfacesCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters of a new interface attachment. +type CreateOpts struct { + // PortID is the ID of the port for which you want to create an interface. + // The NetworkID and PortID parameters are mutually exclusive. + // If you do not specify the PortID parameter, the OpenStack Networking API + // v2.0 allocates a port and creates an interface for it on the network. + PortID string `json:"port_id,omitempty"` + + // NetworkID is the ID of the network for which you want to create an interface. + // The NetworkID and PortID parameters are mutually exclusive. + // If you do not specify the NetworkID parameter, the OpenStack Networking + // API v2.0 uses the network information cache that is associated with the instance. + NetworkID string `json:"net_id,omitempty"` + + // Slice of FixedIPs. If you request a specific FixedIP address without a + // NetworkID, the request returns a Bad Request (400) response code. + // Note: this uses the FixedIP struct, but only the IPAddress field can be used. + FixedIPs []FixedIP `json:"fixed_ips,omitempty"` +} + +// ToAttachInterfacesCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToAttachInterfacesCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "interfaceAttachment") +} + +// Create requests the creation of a new interface attachment on the server. +func Create(client *gophercloud.ServiceClient, serverID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToAttachInterfacesCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createInterfaceURL(client, serverID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete makes a request against the nova API to detach a single interface from the server. +// It needs server and port IDs to make a such request. +func Delete(client *gophercloud.ServiceClient, serverID, portID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteInterfaceURL(client, serverID, portID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go new file mode 100644 index 00000000..7d15e1ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go @@ -0,0 +1,80 @@ +package attachinterfaces + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type attachInterfaceResult struct { + gophercloud.Result +} + +// Extract interprets any attachInterfaceResult as an Interface, if possible. +func (r attachInterfaceResult) Extract() (*Interface, error) { + var s struct { + Interface *Interface `json:"interfaceAttachment"` + } + err := r.ExtractInto(&s) + return s.Interface, err +} + +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as an Interface. +type GetResult struct { + attachInterfaceResult +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as an Interface. +type CreateResult struct { + attachInterfaceResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// FixedIP represents a Fixed IP Address. +// This struct is also used when creating an attachment, +// but it is not possible to specify a SubnetID. +type FixedIP struct { + SubnetID string `json:"subnet_id,omitempty"` + IPAddress string `json:"ip_address"` +} + +// Interface represents a network interface on a server. +type Interface struct { + PortState string `json:"port_state"` + FixedIPs []FixedIP `json:"fixed_ips"` + PortID string `json:"port_id"` + NetID string `json:"net_id"` + MACAddr string `json:"mac_addr"` +} + +// InterfacePage abstracts the raw results of making a List() request against +// the API. +// +// As OpenStack extensions may freely alter the response bodies of structures +// returned to the client, you may only safely access the data provided through +// the ExtractInterfaces call. +type InterfacePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if an InterfacePage contains no interfaces. +func (r InterfacePage) IsEmpty() (bool, error) { + interfaces, err := ExtractInterfaces(r) + return len(interfaces) == 0, err +} + +// ExtractInterfaces interprets the results of a single page from a List() call, +// producing a slice of Interface structs. +func ExtractInterfaces(r pagination.Page) ([]Interface, error) { + var s struct { + Interfaces []Interface `json:"interfaceAttachments"` + } + err := (r.(InterfacePage)).ExtractInto(&s) + return s.Interfaces, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go new file mode 100644 index 00000000..50292e8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go @@ -0,0 +1,18 @@ +package attachinterfaces + +import "github.com/gophercloud/gophercloud" + +func listInterfaceURL(client *gophercloud.ServiceClient, serverID string) string { + return client.ServiceURL("servers", serverID, "os-interface") +} + +func getInterfaceURL(client *gophercloud.ServiceClient, serverID, portID string) string { + return client.ServiceURL("servers", serverID, "os-interface", portID) +} + +func createInterfaceURL(client *gophercloud.ServiceClient, serverID string) string { + return client.ServiceURL("servers", serverID, "os-interface") +} +func deleteInterfaceURL(client *gophercloud.ServiceClient, serverID, portID string) string { + return client.ServiceURL("servers", serverID, "os-interface", portID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go new file mode 100644 index 00000000..29b554d2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go @@ -0,0 +1,61 @@ +/* +Package availabilityzones provides the ability to get lists and detailed +availability zone information and to extend a server result with +availability zone information. + +Example of Extend server result with Availability Zone Information: + + type ServerWithAZ struct { + servers.Server + availabilityzones.ServerAvailabilityZoneExt + } + + var allServers []ServerWithAZ + + allPages, err := servers.List(client, nil).AllPages() + if err != nil { + panic("Unable to retrieve servers: %s", err) + } + + err = servers.ExtractServersInto(allPages, &allServers) + if err != nil { + panic("Unable to extract servers: %s", err) + } + + for _, server := range allServers { + fmt.Println(server.AvailabilityZone) + } + +Example of Get Availability Zone Information + + allPages, err := availabilityzones.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + availabilityZoneInfo, err := availabilityzones.ExtractAvailabilityZones(allPages) + if err != nil { + panic(err) + } + + for _, zoneInfo := range availabilityZoneInfo { + fmt.Printf("%+v\n", zoneInfo) + } + +Example of Get Detailed Availability Zone Information + + allPages, err := availabilityzones.ListDetail(computeClient).AllPages() + if err != nil { + panic(err) + } + + availabilityZoneInfo, err := availabilityzones.ExtractAvailabilityZones(allPages) + if err != nil { + panic(err) + } + + for _, zoneInfo := range availabilityZoneInfo { + fmt.Printf("%+v\n", zoneInfo) + } +*/ +package availabilityzones diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/requests.go new file mode 100644 index 00000000..f9a2e86e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/requests.go @@ -0,0 +1,20 @@ +package availabilityzones + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will return the existing availability zones. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return AvailabilityZonePage{pagination.SinglePageBase(r)} + }) +} + +// ListDetail will return the existing availability zones with detailed information. +func ListDetail(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listDetailURL(client), func(r pagination.PageResult) pagination.Page { + return AvailabilityZonePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go new file mode 100644 index 00000000..d48a0ea8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go @@ -0,0 +1,76 @@ +package availabilityzones + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ServerAvailabilityZoneExt is an extension to the base Server object. +type ServerAvailabilityZoneExt struct { + // AvailabilityZone is the availabilty zone the server is in. + AvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` +} + +// ServiceState represents the state of a service in an AvailabilityZone. +type ServiceState struct { + Active bool `json:"active"` + Available bool `json:"available"` + UpdatedAt time.Time `json:"-"` +} + +// UnmarshalJSON to override default +func (r *ServiceState) UnmarshalJSON(b []byte) error { + type tmp ServiceState + var s struct { + tmp + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ServiceState(s.tmp) + + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +// Services is a map of services contained in an AvailabilityZone. +type Services map[string]ServiceState + +// Hosts is map of hosts/nodes contained in an AvailabilityZone. +// Each host can have multiple services. +type Hosts map[string]Services + +// ZoneState represents the current state of the availability zone. +type ZoneState struct { + // Returns true if the availability zone is available + Available bool `json:"available"` +} + +// AvailabilityZone contains all the information associated with an OpenStack +// AvailabilityZone. +type AvailabilityZone struct { + Hosts Hosts `json:"hosts"` + // The availability zone name + ZoneName string `json:"zoneName"` + ZoneState ZoneState `json:"zoneState"` +} + +type AvailabilityZonePage struct { + pagination.SinglePageBase +} + +// ExtractAvailabilityZones returns a slice of AvailabilityZones contained in a +// single page of results. +func ExtractAvailabilityZones(r pagination.Page) ([]AvailabilityZone, error) { + var s struct { + AvailabilityZoneInfo []AvailabilityZone `json:"availabilityZoneInfo"` + } + err := (r.(AvailabilityZonePage)).ExtractInto(&s) + return s.AvailabilityZoneInfo, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/urls.go new file mode 100644 index 00000000..9d99ec74 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/urls.go @@ -0,0 +1,11 @@ +package availabilityzones + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-availability-zone") +} + +func listDetailURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-availability-zone", "detail") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go new file mode 100644 index 00000000..d291325e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go @@ -0,0 +1,152 @@ +/* +Package bootfromvolume extends a server create request with the ability to +specify block device options. This can be used to boot a server from a block +storage volume as well as specify multiple ephemeral disks upon creation. + +It is recommended to refer to the Block Device Mapping documentation to see +all possible ways to configure a server's block devices at creation time: + +https://docs.openstack.org/nova/latest/user/block-device-mapping.html + +Note that this package implements `block_device_mapping_v2`. + +Example of Creating a Server From an Image + +This example will boot a server from an image and use a standard ephemeral +disk as the server's root disk. This is virtually no different than creating +a server without using block device mappings. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + ImageRef: "image-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server From a New Volume + +This example will create a block storage volume based on the given Image. The +server will use this volume as its root disk. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + VolumeSize: 2, + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server From an Existing Volume + +This example will create a server with an existing volume as its root disk. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceVolume, + UUID: "volume-uuid", + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server with Multiple Ephemeral Disks + +This example will create a server with multiple ephemeral disks. The first +block device will be based off of an existing Image. Each additional +ephemeral disks must have an index of -1. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + VolumeSize: 5, + }, + bootfromvolume.BlockDevice{ + BootIndex: -1, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + bootfromvolume.BlockDevice{ + BootIndex: -1, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + ImageRef: "image-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package bootfromvolume diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go new file mode 100644 index 00000000..30c61701 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go @@ -0,0 +1,128 @@ +package bootfromvolume + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +type ( + // DestinationType represents the type of medium being used as the + // destination of the bootable device. + DestinationType string + + // SourceType represents the type of medium being used as the source of the + // bootable device. + SourceType string +) + +const ( + // DestinationLocal DestinationType is for using an ephemeral disk as the + // destination. + DestinationLocal DestinationType = "local" + + // DestinationVolume DestinationType is for using a volume as the destination. + DestinationVolume DestinationType = "volume" + + // SourceBlank SourceType is for a "blank" or empty source. + SourceBlank SourceType = "blank" + + // SourceImage SourceType is for using images as the source of a block device. + SourceImage SourceType = "image" + + // SourceSnapshot SourceType is for using a volume snapshot as the source of + // a block device. + SourceSnapshot SourceType = "snapshot" + + // SourceVolume SourceType is for using a volume as the source of block + // device. + SourceVolume SourceType = "volume" +) + +// BlockDevice is a structure with options for creating block devices in a +// server. The block device may be created from an image, snapshot, new volume, +// or existing volume. The destination may be a new volume, existing volume +// which will be attached to the instance, ephemeral disk, or boot device. +type BlockDevice struct { + // SourceType must be one of: "volume", "snapshot", "image", or "blank". + SourceType SourceType `json:"source_type" required:"true"` + + // UUID is the unique identifier for the existing volume, snapshot, or + // image (see above). + UUID string `json:"uuid,omitempty"` + + // BootIndex is the boot index. It defaults to 0. + BootIndex int `json:"boot_index"` + + // DeleteOnTermination specifies whether or not to delete the attached volume + // when the server is deleted. Defaults to `false`. + DeleteOnTermination bool `json:"delete_on_termination"` + + // DestinationType is the type that gets created. Possible values are "volume" + // and "local". + DestinationType DestinationType `json:"destination_type,omitempty"` + + // GuestFormat specifies the format of the block device. + GuestFormat string `json:"guest_format,omitempty"` + + // VolumeSize is the size of the volume to create (in gigabytes). This can be + // omitted for existing volumes. + VolumeSize int `json:"volume_size,omitempty"` + + // DeviceType specifies the device type of the block devices. + // Examples of this are disk, cdrom, floppy, lun, etc. + DeviceType string `json:"device_type,omitempty"` + + // DiskBus is the bus type of the block devices. + // Examples of this are ide, usb, virtio, scsi, etc. + DiskBus string `json:"disk_bus,omitempty"` +} + +// CreateOptsExt is a structure that extends the server `CreateOpts` structure +// by allowing for a block device mapping. +type CreateOptsExt struct { + servers.CreateOptsBuilder + BlockDevice []BlockDevice `json:"block_device_mapping_v2,omitempty"` +} + +// ToServerCreateMap adds the block device mapping option to the base server +// creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if len(opts.BlockDevice) == 0 { + err := gophercloud.ErrMissingInput{} + err.Argument = "bootfromvolume.CreateOptsExt.BlockDevice" + return nil, err + } + + serverMap := base["server"].(map[string]interface{}) + + blockDevice := make([]map[string]interface{}, len(opts.BlockDevice)) + + for i, bd := range opts.BlockDevice { + b, err := gophercloud.BuildRequestBody(bd, "") + if err != nil { + return nil, err + } + blockDevice[i] = b + } + serverMap["block_device_mapping_v2"] = blockDevice + + return base, nil +} + +// Create requests the creation of a server from the given block device mapping. +func Create(client *gophercloud.ServiceClient, opts servers.CreateOptsBuilder) (r servers.CreateResult) { + b, err := opts.ToServerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go new file mode 100644 index 00000000..ba1eafab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go @@ -0,0 +1,12 @@ +package bootfromvolume + +import ( + os "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +// CreateResult temporarily contains the response from a Create call. +// It embeds the standard servers.CreateResults type and so can be used the +// same way as a standard server request result. +type CreateResult struct { + os.CreateResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go new file mode 100644 index 00000000..dc007ead --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go @@ -0,0 +1,7 @@ +package bootfromvolume + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-volumes_boot") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go new file mode 100644 index 00000000..f5dbdbf8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go @@ -0,0 +1,68 @@ +/* +Package floatingips provides the ability to manage floating ips through the +Nova API. + +This API has been deprecated and will be removed from a future release of the +Nova API service. + +For environements that support this extension, this package can be used +regardless of if either Neutron or nova-network is used as the cloud's network +service. + +Example to List Floating IPs + + allPages, err := floatingips.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allFloatingIPs, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + panic(err) + } + + for _, fip := range allFloatingIPs { + fmt.Printf("%+v\n", fip) + } + +Example to Create a Floating IP + + createOpts := floatingips.CreateOpts{ + Pool: "nova", + } + + fip, err := floatingips.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Floating IP + + err := floatingips.Delete(computeClient, "floatingip-id").ExtractErr() + if err != nil { + panic(err) + } + +Example to Associate a Floating IP With a Server + + associateOpts := floatingips.AssociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.AssociateInstance(computeClient, "server-id", associateOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Disassociate a Floating IP From a Server + + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.DisassociateInstance(computeClient, "server-id", disassociateOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go new file mode 100644 index 00000000..a922639d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go @@ -0,0 +1,114 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of FloatingIPs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return FloatingIPPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFloatingIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies a Floating IP allocation request. +type CreateOpts struct { + // Pool is the pool of Floating IPs to allocate one from. + Pool string `json:"pool" required:"true"` +} + +// ToFloatingIPCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create requests the creation of a new Floating IP. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFloatingIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns data about a previously created Floating IP. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous allocated Floating IP. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// AssociateOptsBuilder allows extensions to add additional parameters to the +// Associate request. +type AssociateOptsBuilder interface { + ToFloatingIPAssociateMap() (map[string]interface{}, error) +} + +// AssociateOpts specifies the required information to associate a Floating IP with an instance +type AssociateOpts struct { + // FloatingIP is the Floating IP to associate with an instance. + FloatingIP string `json:"address" required:"true"` + + // FixedIP is an optional fixed IP address of the server. + FixedIP string `json:"fixed_address,omitempty"` +} + +// ToFloatingIPAssociateMap constructs a request body from AssociateOpts. +func (opts AssociateOpts) ToFloatingIPAssociateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addFloatingIp") +} + +// AssociateInstance pairs an allocated Floating IP with a server. +func AssociateInstance(client *gophercloud.ServiceClient, serverID string, opts AssociateOptsBuilder) (r AssociateResult) { + b, err := opts.ToFloatingIPAssociateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(associateURL(client, serverID), b, nil, nil) + return +} + +// DisassociateOptsBuilder allows extensions to add additional parameters to +// the Disassociate request. +type DisassociateOptsBuilder interface { + ToFloatingIPDisassociateMap() (map[string]interface{}, error) +} + +// DisassociateOpts specifies the required information to disassociate a +// Floating IP with a server. +type DisassociateOpts struct { + FloatingIP string `json:"address" required:"true"` +} + +// ToFloatingIPDisassociateMap constructs a request body from DisassociateOpts. +func (opts DisassociateOpts) ToFloatingIPDisassociateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "removeFloatingIp") +} + +// DisassociateInstance decouples an allocated Floating IP from an instance +func DisassociateInstance(client *gophercloud.ServiceClient, serverID string, opts DisassociateOptsBuilder) (r DisassociateResult) { + b, err := opts.ToFloatingIPDisassociateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(disassociateURL(client, serverID), b, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go new file mode 100644 index 00000000..da4e9da0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go @@ -0,0 +1,115 @@ +package floatingips + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A FloatingIP is an IP that can be associated with a server. +type FloatingIP struct { + // ID is a unique ID of the Floating IP + ID string `json:"-"` + + // FixedIP is a specific IP on the server to pair the Floating IP with. + FixedIP string `json:"fixed_ip,omitempty"` + + // InstanceID is the ID of the server that is using the Floating IP. + InstanceID string `json:"instance_id"` + + // IP is the actual Floating IP. + IP string `json:"ip"` + + // Pool is the pool of Floating IPs that this Floating IP belongs to. + Pool string `json:"pool"` +} + +func (r *FloatingIP) UnmarshalJSON(b []byte) error { + type tmp FloatingIP + var s struct { + tmp + ID interface{} `json:"id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = FloatingIP(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + return err +} + +// FloatingIPPage stores a single page of FloatingIPs from a List call. +type FloatingIPPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a FloatingIPsPage is empty. +func (page FloatingIPPage) IsEmpty() (bool, error) { + va, err := ExtractFloatingIPs(page) + return len(va) == 0, err +} + +// ExtractFloatingIPs interprets a page of results as a slice of FloatingIPs. +func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { + var s struct { + FloatingIPs []FloatingIP `json:"floating_ips"` + } + err := (r.(FloatingIPPage)).ExtractInto(&s) + return s.FloatingIPs, err +} + +// FloatingIPResult is the raw result from a FloatingIP request. +type FloatingIPResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any FloatingIP resource +// response as a FloatingIP struct. +func (r FloatingIPResult) Extract() (*FloatingIP, error) { + var s struct { + FloatingIP *FloatingIP `json:"floating_ip"` + } + err := r.ExtractInto(&s) + return s.FloatingIP, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a FloatingIP. +type CreateResult struct { + FloatingIPResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a FloatingIP. +type GetResult struct { + FloatingIPResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AssociateResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type AssociateResult struct { + gophercloud.ErrResult +} + +// DisassociateResult is the response from a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DisassociateResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go new file mode 100644 index 00000000..4768e5a8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go @@ -0,0 +1,37 @@ +package floatingips + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-floating-ips" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} + +func serverURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers/" + serverID + "/action") +} + +func associateURL(c *gophercloud.ServiceClient, serverID string) string { + return serverURL(c, serverID) +} + +func disassociateURL(c *gophercloud.ServiceClient, serverID string) string { + return serverURL(c, serverID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go new file mode 100644 index 00000000..24c46077 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go @@ -0,0 +1,71 @@ +/* +Package keypairs provides the ability to manage key pairs as well as create +servers with a specified key pair. + +Example to List Key Pairs + + allPages, err := keypairs.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allKeyPairs, err := keypairs.ExtractKeyPairs(allPages) + if err != nil { + panic(err) + } + + for _, kp := range allKeyPairs { + fmt.Printf("%+v\n", kp) + } + +Example to Create a Key Pair + + createOpts := keypairs.CreateOpts{ + Name: "keypair-name", + } + + keypair, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", keypair) + +Example to Import a Key Pair + + createOpts := keypairs.CreateOpts{ + Name: "keypair-name", + PublicKey: "public-key", + } + + keypair, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Key Pair + + err := keypairs.Delete(computeClient, "keypair-name").ExtractErr() + if err != nil { + panic(err) + } + +Example to Create a Server With a Key Pair + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := keypairs.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + KeyName: "keypair-name", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package keypairs diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go new file mode 100644 index 00000000..b7280777 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go @@ -0,0 +1,86 @@ +package keypairs + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsExt adds a KeyPair option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // KeyName is the name of the key pair. + KeyName string `json:"key_name,omitempty"` +} + +// ToServerCreateMap adds the key_name to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if opts.KeyName == "" { + return base, nil + } + + serverMap := base["server"].(map[string]interface{}) + serverMap["key_name"] = opts.KeyName + + return base, nil +} + +// List returns a Pager that allows you to iterate over a collection of KeyPairs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return KeyPairPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToKeyPairCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies KeyPair creation or import parameters. +type CreateOpts struct { + // Name is a friendly name to refer to this KeyPair in other services. + Name string `json:"name" required:"true"` + + // PublicKey [optional] is a pregenerated OpenSSH-formatted public key. + // If provided, this key will be imported and no new key will be created. + PublicKey string `json:"public_key,omitempty"` +} + +// ToKeyPairCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "keypair") +} + +// Create requests the creation of a new KeyPair on the server, or to import a +// pre-existing keypair. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToKeyPairCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get returns public data about a previously uploaded KeyPair. +func Get(client *gophercloud.ServiceClient, name string) (r GetResult) { + _, r.Err = client.Get(getURL(client, name), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous stored KeyPair from the server. +func Delete(client *gophercloud.ServiceClient, name string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, name), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go new file mode 100644 index 00000000..2d71034b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go @@ -0,0 +1,91 @@ +package keypairs + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// KeyPair is an SSH key known to the OpenStack Cloud that is available to be +// injected into servers. +type KeyPair struct { + // Name is used to refer to this keypair from other services within this + // region. + Name string `json:"name"` + + // Fingerprint is a short sequence of bytes that can be used to authenticate + // or validate a longer public key. + Fingerprint string `json:"fingerprint"` + + // PublicKey is the public key from this pair, in OpenSSH format. + // "ssh-rsa AAAAB3Nz..." + PublicKey string `json:"public_key"` + + // PrivateKey is the private key from this pair, in PEM format. + // "-----BEGIN RSA PRIVATE KEY-----\nMIICXA..." + // It is only present if this KeyPair was just returned from a Create call. + PrivateKey string `json:"private_key"` + + // UserID is the user who owns this KeyPair. + UserID string `json:"user_id"` +} + +// KeyPairPage stores a single page of all KeyPair results from a List call. +// Use the ExtractKeyPairs function to convert the results to a slice of +// KeyPairs. +type KeyPairPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a KeyPairPage is empty. +func (page KeyPairPage) IsEmpty() (bool, error) { + ks, err := ExtractKeyPairs(page) + return len(ks) == 0, err +} + +// ExtractKeyPairs interprets a page of results as a slice of KeyPairs. +func ExtractKeyPairs(r pagination.Page) ([]KeyPair, error) { + type pair struct { + KeyPair KeyPair `json:"keypair"` + } + var s struct { + KeyPairs []pair `json:"keypairs"` + } + err := (r.(KeyPairPage)).ExtractInto(&s) + results := make([]KeyPair, len(s.KeyPairs)) + for i, pair := range s.KeyPairs { + results[i] = pair.KeyPair + } + return results, err +} + +type keyPairResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any KeyPair resource response +// as a KeyPair struct. +func (r keyPairResult) Extract() (*KeyPair, error) { + var s struct { + KeyPair *KeyPair `json:"keypair"` + } + err := r.ExtractInto(&s) + return s.KeyPair, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a KeyPair. +type CreateResult struct { + keyPairResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a KeyPair. +type GetResult struct { + keyPairResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go new file mode 100644 index 00000000..fec38f36 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go @@ -0,0 +1,25 @@ +package keypairs + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-keypairs" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, name string) string { + return c.ServiceURL(resourcePath, name) +} + +func deleteURL(c *gophercloud.ServiceClient, name string) string { + return getURL(c, name) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go new file mode 100644 index 00000000..2d9d3acd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go @@ -0,0 +1,76 @@ +/* +Package schedulerhints extends the server create request with the ability to +specify additional parameters which determine where the server will be +created in the OpenStack cloud. + +Example to Add a Server to a Server Group + + schedulerHints := schedulerhints.SchedulerHints{ + Group: "servergroup-uuid", + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Place Server B on a Different Host than Server A + + schedulerHints := schedulerhints.SchedulerHints{ + DifferentHost: []string{ + "server-a-uuid", + } + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_b", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Place Server B on the Same Host as Server A + + schedulerHints := schedulerhints.SchedulerHints{ + SameHost: []string{ + "server-a-uuid", + } + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_b", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package schedulerhints diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go new file mode 100644 index 00000000..96393450 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go @@ -0,0 +1,176 @@ +package schedulerhints + +import ( + "encoding/json" + "net" + "regexp" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +// SchedulerHints represents a set of scheduling hints that are passed to the +// OpenStack scheduler. +type SchedulerHints struct { + // Group specifies a Server Group to place the instance in. + Group string + + // DifferentHost will place the instance on a compute node that does not + // host the given instances. + DifferentHost []string + + // SameHost will place the instance on a compute node that hosts the given + // instances. + SameHost []string + + // Query is a conditional statement that results in compute nodes able to + // host the instance. + Query []interface{} + + // TargetCell specifies a cell name where the instance will be placed. + TargetCell string `json:"target_cell,omitempty"` + + // BuildNearHostIP specifies a subnet of compute nodes to host the instance. + BuildNearHostIP string + + // AdditionalProperies are arbitrary key/values that are not validated by nova. + AdditionalProperties map[string]interface{} +} + +// CreateOptsBuilder builds the scheduler hints into a serializable format. +type CreateOptsBuilder interface { + ToServerSchedulerHintsCreateMap() (map[string]interface{}, error) +} + +// ToServerSchedulerHintsMap builds the scheduler hints into a serializable format. +func (opts SchedulerHints) ToServerSchedulerHintsCreateMap() (map[string]interface{}, error) { + sh := make(map[string]interface{}) + + uuidRegex, _ := regexp.Compile("^[a-z0-9]{8}-[a-z0-9]{4}-[1-5][a-z0-9]{3}-[a-z0-9]{4}-[a-z0-9]{12}$") + + if opts.Group != "" { + if !uuidRegex.MatchString(opts.Group) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.Group" + err.Value = opts.Group + err.Info = "Group must be a UUID" + return nil, err + } + sh["group"] = opts.Group + } + + if len(opts.DifferentHost) > 0 { + for _, diffHost := range opts.DifferentHost { + if !uuidRegex.MatchString(diffHost) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.DifferentHost" + err.Value = opts.DifferentHost + err.Info = "The hosts must be in UUID format." + return nil, err + } + } + sh["different_host"] = opts.DifferentHost + } + + if len(opts.SameHost) > 0 { + for _, sameHost := range opts.SameHost { + if !uuidRegex.MatchString(sameHost) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.SameHost" + err.Value = opts.SameHost + err.Info = "The hosts must be in UUID format." + return nil, err + } + } + sh["same_host"] = opts.SameHost + } + + /* + Query can be something simple like: + [">=", "$free_ram_mb", 1024] + + Or more complex like: + ['and', + ['>=', '$free_ram_mb', 1024], + ['>=', '$free_disk_mb', 200 * 1024] + ] + + Because of the possible complexity, just make sure the length is a minimum of 3. + */ + if len(opts.Query) > 0 { + if len(opts.Query) < 3 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.Query" + err.Value = opts.Query + err.Info = "Must be a conditional statement in the format of [op,variable,value]" + return nil, err + } + + // The query needs to be sent as a marshalled string. + b, err := json.Marshal(opts.Query) + if err != nil { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.Query" + err.Value = opts.Query + err.Info = "Must be a conditional statement in the format of [op,variable,value]" + return nil, err + } + + sh["query"] = string(b) + } + + if opts.TargetCell != "" { + sh["target_cell"] = opts.TargetCell + } + + if opts.BuildNearHostIP != "" { + if _, _, err := net.ParseCIDR(opts.BuildNearHostIP); err != nil { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.BuildNearHostIP" + err.Value = opts.BuildNearHostIP + err.Info = "Must be a valid subnet in the form 192.168.1.1/24" + return nil, err + } + ipParts := strings.Split(opts.BuildNearHostIP, "/") + sh["build_near_host_ip"] = ipParts[0] + sh["cidr"] = "/" + ipParts[1] + } + + if opts.AdditionalProperties != nil { + for k, v := range opts.AdditionalProperties { + sh[k] = v + } + } + + return sh, nil +} + +// CreateOptsExt adds a SchedulerHints option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // SchedulerHints provides a set of hints to the scheduler. + SchedulerHints CreateOptsBuilder +} + +// ToServerCreateMap adds the SchedulerHints option to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + schedulerHints, err := opts.SchedulerHints.ToServerSchedulerHintsCreateMap() + if err != nil { + return nil, err + } + + if len(schedulerHints) == 0 { + return base, nil + } + + base["os:scheduler_hints"] = schedulerHints + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go new file mode 100644 index 00000000..8d3ebf2e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go @@ -0,0 +1,112 @@ +/* +Package secgroups provides the ability to manage security groups through the +Nova API. + +This API has been deprecated and will be removed from a future release of the +Nova API service. + +For environments that support this extension, this package can be used +regardless of if either Neutron or nova-network is used as the cloud's network +service. + +Example to List Security Groups + + allPages, err := secroups.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allSecurityGroups, err := secgroups.ExtractSecurityGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allSecurityGroups { + fmt.Printf("%+v\n", sg) + } + +Example to List Security Groups by Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + + allPages, err := secroups.ListByServer(computeClient, serverID).AllPages() + if err != nil { + panic(err) + } + + allSecurityGroups, err := secgroups.ExtractSecurityGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allSecurityGroups { + fmt.Printf("%+v\n", sg) + } + +Example to Create a Security Group + + createOpts := secgroups.CreateOpts{ + Name: "group_name", + Description: "A Security Group", + } + + sg, err := secgroups.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Security Group Rule + + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + createOpts := secgroups.CreateRuleOpts{ + ParentGroupID: sgID, + FromPort: 22, + ToPort: 22, + IPProtocol: "tcp", + CIDR: "0.0.0.0/0", + } + + rule, err := secgroups.CreateRule(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Add a Security Group to a Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + err := secgroups.AddServer(computeClient, serverID, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Remove a Security Group from a Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + err := secgroups.RemoveServer(computeClient, serverID, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a Security Group + + + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := secgroups.Delete(computeClient, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a Security Group Rule + + ruleID := "6221fe3e-383d-46c9-a3a6-845e66c1e8b4" + err := secgroups.DeleteRule(computeClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package secgroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go new file mode 100644 index 00000000..92a0331e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go @@ -0,0 +1,183 @@ +package secgroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +func commonList(client *gophercloud.ServiceClient, url string) pagination.Pager { + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SecurityGroupPage{pagination.SinglePageBase(r)} + }) +} + +// List will return a collection of all the security groups for a particular +// tenant. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return commonList(client, rootURL(client)) +} + +// ListByServer will return a collection of all the security groups which are +// associated with a particular server. +func ListByServer(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return commonList(client, listByServerURL(client, serverID)) +} + +// CreateOpts is the struct responsible for creating a security group. +type CreateOpts struct { + // the name of your security group. + Name string `json:"name" required:"true"` + // the description of your security group. + Description string `json:"description,omitempty"` +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupCreateMap() (map[string]interface{}, error) +} + +// ToSecGroupCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Create will create a new security group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(rootURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateOpts is the struct responsible for updating an existing security group. +type UpdateOpts struct { + // the name of your security group. + Name string `json:"name,omitempty"` + // the description of your security group. + Description *string `json:"description,omitempty"` +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSecGroupUpdateMap() (map[string]interface{}, error) +} + +// ToSecGroupUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Update will modify the mutable properties of a security group, notably its +// name and description. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSecGroupUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(resourceURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get will return details for a particular security group. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, id), &r.Body, nil) + return +} + +// Delete will permanently delete a security group from the project. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(resourceURL(client, id), nil) + return +} + +// CreateRuleOpts represents the configuration for adding a new rule to an +// existing security group. +type CreateRuleOpts struct { + // ID is the ID of the group that this rule will be added to. + ParentGroupID string `json:"parent_group_id" required:"true"` + + // FromPort is the lower bound of the port range that will be opened. + // Use -1 to allow all ICMP traffic. + FromPort int `json:"from_port"` + + // ToPort is the upper bound of the port range that will be opened. + // Use -1 to allow all ICMP traffic. + ToPort int `json:"to_port"` + + // IPProtocol the protocol type that will be allowed, e.g. TCP. + IPProtocol string `json:"ip_protocol" required:"true"` + + // CIDR is the network CIDR to allow traffic from. + // This is ONLY required if FromGroupID is blank. This represents the IP + // range that will be the source of network traffic to your security group. + // Use 0.0.0.0/0 to allow all IP addresses. + CIDR string `json:"cidr,omitempty" or:"FromGroupID"` + + // FromGroupID represents another security group to allow access. + // This is ONLY required if CIDR is blank. This value represents the ID of a + // group that forwards traffic to the parent group. So, instead of accepting + // network traffic from an entire IP range, you can instead refine the + // inbound source by an existing security group. + FromGroupID string `json:"group_id,omitempty" or:"CIDR"` +} + +// CreateRuleOptsBuilder allows extensions to add additional parameters to the +// CreateRule request. +type CreateRuleOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// ToRuleCreateMap builds a request body from CreateRuleOpts. +func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group_rule") +} + +// CreateRule will add a new rule to an existing security group (whose ID is +// specified in CreateRuleOpts). You have the option of controlling inbound +// traffic from either an IP range (CIDR) or from another security group. +func CreateRule(client *gophercloud.ServiceClient, opts CreateRuleOptsBuilder) (r CreateRuleResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(rootRuleURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteRule will permanently delete a rule from a security group. +func DeleteRule(client *gophercloud.ServiceClient, id string) (r DeleteRuleResult) { + _, r.Err = client.Delete(resourceRuleURL(client, id), nil) + return +} + +func actionMap(prefix, groupName string) map[string]map[string]string { + return map[string]map[string]string{ + prefix + "SecurityGroup": map[string]string{"name": groupName}, + } +} + +// AddServer will associate a server and a security group, enforcing the +// rules of the group on the server. +func AddServer(client *gophercloud.ServiceClient, serverID, groupName string) (r AddServerResult) { + _, r.Err = client.Post(serverActionURL(client, serverID), actionMap("add", groupName), nil, nil) + return +} + +// RemoveServer will disassociate a server from a security group. +func RemoveServer(client *gophercloud.ServiceClient, serverID, groupName string) (r RemoveServerResult) { + _, r.Err = client.Post(serverActionURL(client, serverID), actionMap("remove", groupName), nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go new file mode 100644 index 00000000..04688922 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go @@ -0,0 +1,214 @@ +package secgroups + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecurityGroup represents a security group. +type SecurityGroup struct { + // The unique ID of the group. If Neutron is installed, this ID will be + // represented as a string UUID; if Neutron is not installed, it will be a + // numeric ID. For the sake of consistency, we always cast it to a string. + ID string `json:"-"` + + // The human-readable name of the group, which needs to be unique. + Name string `json:"name"` + + // The human-readable description of the group. + Description string `json:"description"` + + // The rules which determine how this security group operates. + Rules []Rule `json:"rules"` + + // The ID of the tenant to which this security group belongs. + TenantID string `json:"tenant_id"` +} + +func (r *SecurityGroup) UnmarshalJSON(b []byte) error { + type tmp SecurityGroup + var s struct { + tmp + ID interface{} `json:"id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = SecurityGroup(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + return err +} + +// Rule represents a security group rule, a policy which determines how a +// security group operates and what inbound traffic it allows in. +type Rule struct { + // The unique ID. If Neutron is installed, this ID will be + // represented as a string UUID; if Neutron is not installed, it will be a + // numeric ID. For the sake of consistency, we always cast it to a string. + ID string `json:"-"` + + // The lower bound of the port range which this security group should open up. + FromPort int `json:"from_port"` + + // The upper bound of the port range which this security group should open up. + ToPort int `json:"to_port"` + + // The IP protocol (e.g. TCP) which the security group accepts. + IPProtocol string `json:"ip_protocol"` + + // The CIDR IP range whose traffic can be received. + IPRange IPRange `json:"ip_range"` + + // The security group ID to which this rule belongs. + ParentGroupID string `json:"-"` + + // Not documented. + Group Group +} + +func (r *Rule) UnmarshalJSON(b []byte) error { + type tmp Rule + var s struct { + tmp + ID interface{} `json:"id"` + ParentGroupID interface{} `json:"parent_group_id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Rule(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + switch t := s.ParentGroupID.(type) { + case float64: + r.ParentGroupID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ParentGroupID = t + } + + return err +} + +// IPRange represents the IP range whose traffic will be accepted by the +// security group. +type IPRange struct { + CIDR string +} + +// Group represents a group. +type Group struct { + TenantID string `json:"tenant_id"` + Name string +} + +// SecurityGroupPage is a single page of a SecurityGroup collection. +type SecurityGroupPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Security Groups contains any +// results. +func (page SecurityGroupPage) IsEmpty() (bool, error) { + users, err := ExtractSecurityGroups(page) + return len(users) == 0, err +} + +// ExtractSecurityGroups returns a slice of SecurityGroups contained in a +// single page of results. +func ExtractSecurityGroups(r pagination.Page) ([]SecurityGroup, error) { + var s struct { + SecurityGroups []SecurityGroup `json:"security_groups"` + } + err := (r.(SecurityGroupPage)).ExtractInto(&s) + return s.SecurityGroups, err +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type UpdateResult struct { + commonResult +} + +// Extract will extract a SecurityGroup struct from most responses. +func (r commonResult) Extract() (*SecurityGroup, error) { + var s struct { + SecurityGroup *SecurityGroup `json:"security_group"` + } + err := r.ExtractInto(&s) + return s.SecurityGroup, err +} + +// CreateRuleResult represents the result when adding rules to a security group. +// Call its Extract method to interpret the result as a Rule. +type CreateRuleResult struct { + gophercloud.Result +} + +// Extract will extract a Rule struct from a CreateRuleResult. +func (r CreateRuleResult) Extract() (*Rule, error) { + var s struct { + Rule *Rule `json:"security_group_rule"` + } + err := r.ExtractInto(&s) + return s.Rule, err +} + +// DeleteResult is the response from delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// DeleteRuleResult is the response from a DeleteRule operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteRuleResult struct { + gophercloud.ErrResult +} + +// AddServerResult is the response from an AddServer operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type AddServerResult struct { + gophercloud.ErrResult +} + +// RemoveServerResult is the response from a RemoveServer operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type RemoveServerResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go new file mode 100644 index 00000000..d99746ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go @@ -0,0 +1,32 @@ +package secgroups + +import "github.com/gophercloud/gophercloud" + +const ( + secgrouppath = "os-security-groups" + rulepath = "os-security-group-rules" +) + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(secgrouppath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(secgrouppath) +} + +func listByServerURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers", serverID, secgrouppath) +} + +func rootRuleURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rulepath) +} + +func resourceRuleURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rulepath, id) +} + +func serverActionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("servers", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go new file mode 100644 index 00000000..7811d32e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go @@ -0,0 +1,86 @@ +/* +Package servergroups provides the ability to manage server groups. + +Example to List Server Groups + + allpages, err := servergroups.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allServerGroups, err := servergroups.ExtractServerGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allServerGroups { + fmt.Printf("%#v\n", sg) + } + +Example to Create a Server Group + + createOpts := servergroups.CreateOpts{ + Name: "my_sg", + Policies: []string{"anti-affinity"}, + } + + sg, err := servergroups.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Server Group with additional microversion 2.64 fields + + createOpts := servergroups.CreateOpts{ + Name: "my_sg", + Policy: "anti-affinity", + Rules: &servergroups.Rules{ + MaxServerPerHost: 3, + }, + } + + computeClient.Microversion = "2.64" + result := servergroups.Create(computeClient, createOpts) + + serverGroup, err := result.Extract() + if err != nil { + panic(err) + } + + policy, err := servergroups.ExtractPolicy(result.Result) + if err != nil { + panic(err) + } + + rules, err := servergroups.ExtractRules(result.Result) + if err != nil { + panic(err) + } + +Example to Delete a Server Group + + sgID := "7a6f29ad-e34d-4368-951a-58a08f11cfb7" + err := servergroups.Delete(computeClient, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to get additional fields with microversion 2.64 or later + + computeClient.Microversion = "2.64" + result := servergroups.Get(computeClient, "616fb98f-46ca-475e-917e-2563e5a8cd19") + + policy, err := servergroups.ExtractPolicy(result.Result) + if err != nil { + panic(err) + } + fmt.Printf("Policy: %s\n", policy) + + rules, err := servergroups.ExtractRules(result.Result) + if err != nil { + panic(err) + } + fmt.Printf("Max server per host: %s\n", rules.MaxServerPerHost) + +*/ +package servergroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/microversions.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/microversions.go new file mode 100644 index 00000000..4899d9c5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/microversions.go @@ -0,0 +1,32 @@ +package servergroups + +import "github.com/gophercloud/gophercloud" + +// ExtractPolicy will extract the policy attribute. +// This requires the client to be set to microversion 2.64 or later. +func ExtractPolicy(r gophercloud.Result) (string, error) { + var s struct { + Policy string `json:"policy"` + } + err := r.ExtractIntoStructPtr(&s, "server_group") + + return s.Policy, err +} + +// ExtractRules will extract the rules attribute. +// This requires the client to be set to microversion 2.64 or later. +func ExtractRules(r gophercloud.Result) (Rules, error) { + var s struct { + Rules Rules `json:"rules"` + } + err := r.ExtractIntoStructPtr(&s, "server_group") + + return s.Rules, err +} + +// Rules represents set of rules for a policy. +type Rules struct { + // MaxServerPerHost specifies how many servers can reside on a single compute host. + // It can be used only with the "anti-affinity" policy. + MaxServerPerHost int `json:"max_server_per_host,omitempty"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go new file mode 100644 index 00000000..5740afaf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go @@ -0,0 +1,67 @@ +package servergroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of +// ServerGroups. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return ServerGroupPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServerGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies Server Group creation parameters. +type CreateOpts struct { + // Name is the name of the server group. + Name string `json:"name" required:"true"` + + // Policies are the server group policies. + Policies []string `json:"policies,omitempty"` + + // Policy specifies the name of a policy. + // Requires microversion 2.64 or later. + Policy string `json:"policy,omitempty"` + + // Rules specifies the set of rules. + // Requires microversion 2.64 or later. + Rules *Rules `json:"rules,omitempty"` +} + +// ToServerGroupCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "server_group") +} + +// Create requests the creation of a new Server Group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToServerGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns data about a previously created ServerGroup. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete requests the deletion of a previously allocated ServerGroup. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go new file mode 100644 index 00000000..fd6f2c6c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go @@ -0,0 +1,89 @@ +package servergroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A ServerGroup creates a policy for instance placement in the cloud. +// You should use extract methods from microversions.go to retrieve additional +// fields. +type ServerGroup struct { + // ID is the unique ID of the Server Group. + ID string `json:"id"` + + // Name is the common name of the server group. + Name string `json:"name"` + + // Polices are the group policies. + // + // Normally a single policy is applied: + // + // "affinity" will place all servers within the server group on the + // same compute node. + // + // "anti-affinity" will place servers within the server group on different + // compute nodes. + Policies []string `json:"policies"` + + // Members are the members of the server group. + Members []string `json:"members"` + + // Metadata includes a list of all user-specified key-value pairs attached + // to the Server Group. + Metadata map[string]interface{} +} + +// ServerGroupPage stores a single page of all ServerGroups results from a +// List call. +type ServerGroupPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a ServerGroupsPage is empty. +func (page ServerGroupPage) IsEmpty() (bool, error) { + va, err := ExtractServerGroups(page) + return len(va) == 0, err +} + +// ExtractServerGroups interprets a page of results as a slice of +// ServerGroups. +func ExtractServerGroups(r pagination.Page) ([]ServerGroup, error) { + var s struct { + ServerGroups []ServerGroup `json:"server_groups"` + } + err := (r.(ServerGroupPage)).ExtractInto(&s) + return s.ServerGroups, err +} + +type ServerGroupResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Server Group resource +// response as a ServerGroup struct. +func (r ServerGroupResult) Extract() (*ServerGroup, error) { + var s struct { + ServerGroup *ServerGroup `json:"server_group"` + } + err := r.ExtractInto(&s) + return s.ServerGroup, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a ServerGroup. +type CreateResult struct { + ServerGroupResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a ServerGroup. +type GetResult struct { + ServerGroupResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go new file mode 100644 index 00000000..9a1f99b1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go @@ -0,0 +1,25 @@ +package servergroups + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-server-groups" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go new file mode 100644 index 00000000..ab97edb7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go @@ -0,0 +1,19 @@ +/* +Package startstop provides functionality to start and stop servers that have +been provisioned by the OpenStack Compute service. + +Example to Stop and Start a Server + + serverID := "47b6b7b7-568d-40e4-868c-d5c41735532e" + + err := startstop.Stop(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + + err := startstop.Start(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package startstop diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go new file mode 100644 index 00000000..5b4f3f39 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go @@ -0,0 +1,19 @@ +package startstop + +import "github.com/gophercloud/gophercloud" + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +// Start is the operation responsible for starting a Compute server. +func Start(client *gophercloud.ServiceClient, id string) (r StartResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-start": nil}, nil, nil) + return +} + +// Stop is the operation responsible for stopping a Compute server. +func Stop(client *gophercloud.ServiceClient, id string) (r StopResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-stop": nil}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go new file mode 100644 index 00000000..83496893 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go @@ -0,0 +1,15 @@ +package startstop + +import "github.com/gophercloud/gophercloud" + +// StartResult is the response from a Start operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StartResult struct { + gophercloud.ErrResult +} + +// StopResult is the response from Stop operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StopResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go new file mode 100644 index 00000000..a32e8ffd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go @@ -0,0 +1,26 @@ +/* +Package tenantnetworks provides the ability for tenants to see information +about the networks they have access to. + +This is a deprecated API and will be removed from the Nova API service in a +future version. + +This API works in both Neutron and nova-network based OpenStack clouds. + +Example to List Networks Available to a Tenant + + allPages, err := tenantnetworks.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allNetworks, err := tenantnetworks.ExtractNetworks(allPages) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v\n", network) + } +*/ +package tenantnetworks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go new file mode 100644 index 00000000..00899056 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go @@ -0,0 +1,19 @@ +package tenantnetworks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of Networks. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.SinglePageBase(r)} + }) +} + +// Get returns data about a previously created Network. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go new file mode 100644 index 00000000..bda77d5f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go @@ -0,0 +1,58 @@ +package tenantnetworks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A Network represents a network that a server communicates on. +type Network struct { + // CIDR is the IPv4 subnet. + CIDR string `json:"cidr"` + + // ID is the UUID of the network. + ID string `json:"id"` + + // Name is the common name that the network has. + Name string `json:"label"` +} + +// NetworkPage stores a single page of all Networks results from a List call. +type NetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a NetworkPage is empty. +func (page NetworkPage) IsEmpty() (bool, error) { + va, err := ExtractNetworks(page) + return len(va) == 0, err +} + +// ExtractNetworks interprets a page of results as a slice of Network. +func ExtractNetworks(r pagination.Page) ([]Network, error) { + var s struct { + Networks []Network `json:"networks"` + } + err := (r.(NetworkPage)).ExtractInto(&s) + return s.Networks, err +} + +type NetworkResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Network resource response +// as a Network struct. +func (r NetworkResult) Extract() (*Network, error) { + var s struct { + Network *Network `json:"network"` + } + err := r.ExtractInto(&s) + return s.Network, err +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a Network. +type GetResult struct { + NetworkResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go new file mode 100644 index 00000000..683041de --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go @@ -0,0 +1,17 @@ +package tenantnetworks + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-tenant-networks" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go new file mode 100644 index 00000000..484eb200 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go @@ -0,0 +1,30 @@ +/* +Package volumeattach provides the ability to attach and detach volumes +from servers. + +Example to Attach a Volume + + serverID := "7ac8686c-de71-4acb-9600-ec18b1a1ed6d" + volumeID := "87463836-f0e2-4029-abf6-20c8892a3103" + + createOpts := volumeattach.CreateOpts{ + Device: "/dev/vdc", + VolumeID: volumeID, + } + + result, err := volumeattach.Create(computeClient, serverID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Detach a Volume + + serverID := "7ac8686c-de71-4acb-9600-ec18b1a1ed6d" + attachmentID := "ed081613-1c9b-4231-aa5e-ebfd4d87f983" + + err := volumeattach.Delete(computeClient, serverID, attachmentID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package volumeattach diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go new file mode 100644 index 00000000..6a262c21 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go @@ -0,0 +1,60 @@ +package volumeattach + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of +// VolumeAttachments. +func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return pagination.NewPager(client, listURL(client, serverID), func(r pagination.PageResult) pagination.Page { + return VolumeAttachmentPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add parameters to the Create request. +type CreateOptsBuilder interface { + ToVolumeAttachmentCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies volume attachment creation or import parameters. +type CreateOpts struct { + // Device is the device that the volume will attach to the instance as. + // Omit for "auto". + Device string `json:"device,omitempty"` + + // VolumeID is the ID of the volume to attach to the instance. + VolumeID string `json:"volumeId" required:"true"` +} + +// ToVolumeAttachmentCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToVolumeAttachmentCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volumeAttachment") +} + +// Create requests the creation of a new volume attachment on the server. +func Create(client *gophercloud.ServiceClient, serverID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeAttachmentCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client, serverID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns public data about a previously created VolumeAttachment. +func Get(client *gophercloud.ServiceClient, serverID, attachmentID string) (r GetResult) { + _, r.Err = client.Get(getURL(client, serverID, attachmentID), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous stored VolumeAttachment from +// the server. +func Delete(client *gophercloud.ServiceClient, serverID, attachmentID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, serverID, attachmentID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go new file mode 100644 index 00000000..56d50347 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go @@ -0,0 +1,77 @@ +package volumeattach + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// VolumeAttachment contains attachment information between a volume +// and server. +type VolumeAttachment struct { + // ID is a unique id of the attachment. + ID string `json:"id"` + + // Device is what device the volume is attached as. + Device string `json:"device"` + + // VolumeID is the ID of the attached volume. + VolumeID string `json:"volumeId"` + + // ServerID is the ID of the instance that has the volume attached. + ServerID string `json:"serverId"` +} + +// VolumeAttachmentPage stores a single page all of VolumeAttachment +// results from a List call. +type VolumeAttachmentPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a VolumeAttachmentPage is empty. +func (page VolumeAttachmentPage) IsEmpty() (bool, error) { + va, err := ExtractVolumeAttachments(page) + return len(va) == 0, err +} + +// ExtractVolumeAttachments interprets a page of results as a slice of +// VolumeAttachment. +func ExtractVolumeAttachments(r pagination.Page) ([]VolumeAttachment, error) { + var s struct { + VolumeAttachments []VolumeAttachment `json:"volumeAttachments"` + } + err := (r.(VolumeAttachmentPage)).ExtractInto(&s) + return s.VolumeAttachments, err +} + +// VolumeAttachmentResult is the result from a volume attachment operation. +type VolumeAttachmentResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any VolumeAttachment resource +// response as a VolumeAttachment struct. +func (r VolumeAttachmentResult) Extract() (*VolumeAttachment, error) { + var s struct { + VolumeAttachment *VolumeAttachment `json:"volumeAttachment"` + } + err := r.ExtractInto(&s) + return s.VolumeAttachment, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a VolumeAttachment. +type CreateResult struct { + VolumeAttachmentResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a VolumeAttachment. +type GetResult struct { + VolumeAttachmentResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go new file mode 100644 index 00000000..083f8dc4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go @@ -0,0 +1,25 @@ +package volumeattach + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-volume_attachments" + +func resourceURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers", serverID, resourcePath) +} + +func listURL(c *gophercloud.ServiceClient, serverID string) string { + return resourceURL(c, serverID) +} + +func createURL(c *gophercloud.ServiceClient, serverID string) string { + return resourceURL(c, serverID) +} + +func getURL(c *gophercloud.ServiceClient, serverID, aID string) string { + return c.ServiceURL("servers", serverID, resourcePath, aID) +} + +func deleteURL(c *gophercloud.ServiceClient, serverID, aID string) string { + return getURL(c, serverID, aID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go new file mode 100644 index 00000000..34d8764f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go @@ -0,0 +1,137 @@ +/* +Package flavors provides information and interaction with the flavor API +in the OpenStack Compute service. + +A flavor is an available hardware configuration for a server. Each flavor +has a unique combination of disk space, memory capacity and priority for CPU +time. + +Example to List Flavors + + listOpts := flavors.ListOpts{ + AccessType: flavors.PublicAccess, + } + + allPages, err := flavors.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFlavors, err := flavors.ExtractFlavors(allPages) + if err != nil { + panic(err) + } + + for _, flavor := range allFlavors { + fmt.Printf("%+v\n", flavor) + } + +Example to Create a Flavor + + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: gophercloud.IntToPointer(1), + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + flavor, err := flavors.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List Flavor Access + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + allPages, err := flavors.ListAccesses(computeClient, flavorID).AllPages() + if err != nil { + panic(err) + } + + allAccesses, err := flavors.ExtractAccesses(allPages) + if err != nil { + panic(err) + } + + for _, access := range allAccesses { + fmt.Printf("%+v", access) + } + +Example to Grant Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.AddAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.AddAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove/Revoke Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.RemoveAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.RemoveAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + createOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY", + } + createdExtraSpecs, err := flavors.CreateExtraSpecs(computeClient, flavorID, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", createdExtraSpecs) + +Example to Get Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + extraSpecs, err := flavors.ListExtraSpecs(computeClient, flavorID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", extraSpecs) + +Example to Update Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + updateOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_thread_policy": "CPU-THREAD-POLICY-UPDATED", + } + updatedExtraSpec, err := flavors.UpdateExtraSpec(computeClient, flavorID, updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", updatedExtraSpec) + +Example to Delete an Extra Spec for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + err := flavors.DeleteExtraSpec(computeClient, flavorID, "hw:cpu_thread_policy").ExtractErr() + if err != nil { + panic(err) + } +*/ +package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go new file mode 100644 index 00000000..753024a1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go @@ -0,0 +1,357 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFlavorListQuery() (string, error) +} + +/* + AccessType maps to OpenStack's Flavor.is_public field. Although the is_public + field is boolean, the request options are ternary, which is why AccessType is + a string. The following values are allowed: + + The AccessType arguement is optional, and if it is not supplied, OpenStack + returns the PublicAccess flavors. +*/ +type AccessType string + +const ( + // PublicAccess returns public flavors and private flavors associated with + // that project. + PublicAccess AccessType = "true" + + // PrivateAccess (admin only) returns private flavors, across all projects. + PrivateAccess AccessType = "false" + + // AllAccess (admin only) returns public and private flavors across all + // projects. + AllAccess AccessType = "None" +) + +/* + ListOpts filters the results returned by the List() function. + For example, a flavor with a minDisk field of 10 will not be returned if you + specify MinDisk set to 20. + + Typically, software will use the last ID of the previous call to List to set + the Marker for the current call. +*/ +type ListOpts struct { + // ChangesSince, if provided, instructs List to return only those things which + // have changed since the timestamp provided. + ChangesSince string `q:"changes-since"` + + // MinDisk and MinRAM, if provided, elides flavors which do not meet your + // criteria. + MinDisk int `q:"minDisk"` + MinRAM int `q:"minRam"` + + // SortDir allows to select sort direction. + // It can be "asc" or "desc" (default). + SortDir string `q:"sort_dir"` + + // SortKey allows to sort by one of the flavors attributes. + // Default is flavorid. + SortKey string `q:"sort_key"` + + // Marker and Limit control paging. + // Marker instructs List where to start listing from. + Marker string `q:"marker"` + + // Limit instructs List to refrain from sending excessively large lists of + // flavors. + Limit int `q:"limit"` + + // AccessType, if provided, instructs List which set of flavors to return. + // If IsPublic not provided, flavors for the current project are returned. + AccessType AccessType `q:"is_public"` +} + +// ToFlavorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFlavorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail instructs OpenStack to provide a list of flavors. +// You may provide criteria by which List curtails its results for easier +// processing. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToFlavorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type CreateOptsBuilder interface { + ToFlavorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters used for creating a flavor. +type CreateOpts struct { + // Name is the name of the flavor. + Name string `json:"name" required:"true"` + + // RAM is the memory of the flavor, measured in MB. + RAM int `json:"ram" required:"true"` + + // VCPUs is the number of vcpus for the flavor. + VCPUs int `json:"vcpus" required:"true"` + + // Disk the amount of root disk space, measured in GB. + Disk *int `json:"disk" required:"true"` + + // ID is a unique ID for the flavor. + ID string `json:"id,omitempty"` + + // Swap is the amount of swap space for the flavor, measured in MB. + Swap *int `json:"swap,omitempty"` + + // RxTxFactor alters the network bandwidth of a flavor. + RxTxFactor float64 `json:"rxtx_factor,omitempty"` + + // IsPublic flags a flavor as being available to all projects or not. + IsPublic *bool `json:"os-flavor-access:is_public,omitempty"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. + Ephemeral *int `json:"OS-FLV-EXT-DATA:ephemeral,omitempty"` +} + +// ToFlavorCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFlavorCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "flavor") +} + +// Create requests the creation of a new flavor. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFlavorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get retrieves details of a single flavor. Use Extract to convert its +// result into a Flavor. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete deletes the specified flavor ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListAccesses retrieves the tenants which have access to a flavor. +func ListAccesses(client *gophercloud.ServiceClient, id string) pagination.Pager { + url := accessURL(client, id) + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return AccessPage{pagination.SinglePageBase(r)} + }) +} + +// AddAccessOptsBuilder allows extensions to add additional parameters to the +// AddAccess requests. +type AddAccessOptsBuilder interface { + ToFlavorAddAccessMap() (map[string]interface{}, error) +} + +// AddAccessOpts represents options for adding access to a flavor. +type AddAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToFlavorAddAccessMap constructs a request body from AddAccessOpts. +func (opts AddAccessOpts) ToFlavorAddAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addTenantAccess") +} + +// AddAccess grants a tenant/project access to a flavor. +func AddAccess(client *gophercloud.ServiceClient, id string, opts AddAccessOptsBuilder) (r AddAccessResult) { + b, err := opts.ToFlavorAddAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveAccessOptsBuilder allows extensions to add additional parameters to the +// RemoveAccess requests. +type RemoveAccessOptsBuilder interface { + ToFlavorRemoveAccessMap() (map[string]interface{}, error) +} + +// RemoveAccessOpts represents options for removing access to a flavor. +type RemoveAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToFlavorRemoveAccessMap constructs a request body from RemoveAccessOpts. +func (opts RemoveAccessOpts) ToFlavorRemoveAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "removeTenantAccess") +} + +// RemoveAccess removes/revokes a tenant/project access to a flavor. +func RemoveAccess(client *gophercloud.ServiceClient, id string, opts RemoveAccessOptsBuilder) (r RemoveAccessResult) { + b, err := opts.ToFlavorRemoveAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ExtraSpecs requests all the extra-specs for the given flavor ID. +func ListExtraSpecs(client *gophercloud.ServiceClient, flavorID string) (r ListExtraSpecsResult) { + _, r.Err = client.Get(extraSpecsListURL(client, flavorID), &r.Body, nil) + return +} + +func GetExtraSpec(client *gophercloud.ServiceClient, flavorID string, key string) (r GetExtraSpecResult) { + _, r.Err = client.Get(extraSpecsGetURL(client, flavorID, key), &r.Body, nil) + return +} + +// CreateExtraSpecsOptsBuilder allows extensions to add additional parameters to the +// CreateExtraSpecs requests. +type CreateExtraSpecsOptsBuilder interface { + ToFlavorExtraSpecsCreateMap() (map[string]interface{}, error) +} + +// ExtraSpecsOpts is a map that contains key-value pairs. +type ExtraSpecsOpts map[string]string + +// ToFlavorExtraSpecsCreateMap assembles a body for a Create request based on +// the contents of ExtraSpecsOpts. +func (opts ExtraSpecsOpts) ToFlavorExtraSpecsCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{"extra_specs": opts}, nil +} + +// CreateExtraSpecs will create or update the extra-specs key-value pairs for +// the specified Flavor. +func CreateExtraSpecs(client *gophercloud.ServiceClient, flavorID string, opts CreateExtraSpecsOptsBuilder) (r CreateExtraSpecsResult) { + b, err := opts.ToFlavorExtraSpecsCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(extraSpecsCreateURL(client, flavorID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateExtraSpecOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateExtraSpecOptsBuilder interface { + ToFlavorExtraSpecUpdateMap() (map[string]string, string, error) +} + +// ToFlavorExtraSpecUpdateMap assembles a body for an Update request based on +// the contents of a ExtraSpecOpts. +func (opts ExtraSpecsOpts) ToFlavorExtraSpecUpdateMap() (map[string]string, string, error) { + if len(opts) != 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "flavors.ExtraSpecOpts" + err.Info = "Must have 1 and only one key-value pair" + return nil, "", err + } + + var key string + for k := range opts { + key = k + } + + return opts, key, nil +} + +// UpdateExtraSpec will updates the value of the specified flavor's extra spec +// for the key in opts. +func UpdateExtraSpec(client *gophercloud.ServiceClient, flavorID string, opts UpdateExtraSpecOptsBuilder) (r UpdateExtraSpecResult) { + b, key, err := opts.ToFlavorExtraSpecUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(extraSpecUpdateURL(client, flavorID, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteExtraSpec will delete the key-value pair with the given key for the given +// flavor ID. +func DeleteExtraSpec(client *gophercloud.ServiceClient, flavorID, key string) (r DeleteExtraSpecResult) { + _, r.Err = client.Delete(extraSpecDeleteURL(client, flavorID, key), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a flavor's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := ListDetail(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractFlavors(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + err := &gophercloud.ErrResourceNotFound{} + err.ResourceType = "flavor" + err.Name = name + return "", err + case 1: + return id, nil + default: + err := &gophercloud.ErrMultipleResourcesFound{} + err.ResourceType = "flavor" + err.Name = name + err.Count = count + return "", err + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go new file mode 100644 index 00000000..92fe1b18 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go @@ -0,0 +1,252 @@ +package flavors + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. +type CreateResult struct { + commonResult +} + +// GetResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. +type GetResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract provides access to the individual Flavor returned by the Get and +// Create functions. +func (r commonResult) Extract() (*Flavor, error) { + var s struct { + Flavor *Flavor `json:"flavor"` + } + err := r.ExtractInto(&s) + return s.Flavor, err +} + +// Flavor represent (virtual) hardware configurations for server resources +// in a region. +type Flavor struct { + // ID is the flavor's unique ID. + ID string `json:"id"` + + // Disk is the amount of root disk, measured in GB. + Disk int `json:"disk"` + + // RAM is the amount of memory, measured in MB. + RAM int `json:"ram"` + + // Name is the name of the flavor. + Name string `json:"name"` + + // RxTxFactor describes bandwidth alterations of the flavor. + RxTxFactor float64 `json:"rxtx_factor"` + + // Swap is the amount of swap space, measured in MB. + Swap int `json:"-"` + + // VCPUs indicates how many (virtual) CPUs are available for this flavor. + VCPUs int `json:"vcpus"` + + // IsPublic indicates whether the flavor is public. + IsPublic bool `json:"os-flavor-access:is_public"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. + Ephemeral int `json:"OS-FLV-EXT-DATA:ephemeral"` +} + +func (r *Flavor) UnmarshalJSON(b []byte) error { + type tmp Flavor + var s struct { + tmp + Swap interface{} `json:"swap"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Flavor(s.tmp) + + switch t := s.Swap.(type) { + case float64: + r.Swap = int(t) + case string: + switch t { + case "": + r.Swap = 0 + default: + swap, err := strconv.ParseFloat(t, 64) + if err != nil { + return err + } + r.Swap = int(swap) + } + } + + return nil +} + +// FlavorPage contains a single page of all flavors from a ListDetails call. +type FlavorPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a FlavorPage contains any results. +func (page FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(page) + return len(flavors) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (page FlavorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"flavors_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractFlavors provides access to the list of flavors in a page acquired +// from the ListDetail operation. +func ExtractFlavors(r pagination.Page) ([]Flavor, error) { + var s struct { + Flavors []Flavor `json:"flavors"` + } + err := (r.(FlavorPage)).ExtractInto(&s) + return s.Flavors, err +} + +// AccessPage contains a single page of all FlavorAccess entries for a flavor. +type AccessPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether an AccessPage is empty. +func (page AccessPage) IsEmpty() (bool, error) { + v, err := ExtractAccesses(page) + return len(v) == 0, err +} + +// ExtractAccesses interprets a page of results as a slice of FlavorAccess. +func ExtractAccesses(r pagination.Page) ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := (r.(AccessPage)).ExtractInto(&s) + return s.FlavorAccesses, err +} + +type accessResult struct { + gophercloud.Result +} + +// AddAccessResult is the response of an AddAccess operation. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type AddAccessResult struct { + accessResult +} + +// RemoveAccessResult is the response of a RemoveAccess operation. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type RemoveAccessResult struct { + accessResult +} + +// Extract provides access to the result of an access create or delete. +// The result will be all accesses that the flavor has. +func (r accessResult) Extract() ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := r.ExtractInto(&s) + return s.FlavorAccesses, err +} + +// FlavorAccess represents an ACL of tenant access to a specific Flavor. +type FlavorAccess struct { + // FlavorID is the unique ID of the flavor. + FlavorID string `json:"flavor_id"` + + // TenantID is the unique ID of the tenant. + TenantID string `json:"tenant_id"` +} + +// Extract interprets any extraSpecsResult as ExtraSpecs, if possible. +func (r extraSpecsResult) Extract() (map[string]string, error) { + var s struct { + ExtraSpecs map[string]string `json:"extra_specs"` + } + err := r.ExtractInto(&s) + return s.ExtraSpecs, err +} + +// extraSpecsResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type extraSpecsResult struct { + gophercloud.Result +} + +// ListExtraSpecsResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type ListExtraSpecsResult struct { + extraSpecsResult +} + +// CreateExtraSpecResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateExtraSpecsResult struct { + extraSpecsResult +} + +// extraSpecResult contains the result of a call for individual a single +// key-value pair. +type extraSpecResult struct { + gophercloud.Result +} + +// GetExtraSpecResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetExtraSpecResult struct { + extraSpecResult +} + +// UpdateExtraSpecResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. +type UpdateExtraSpecResult struct { + extraSpecResult +} + +// DeleteExtraSpecResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DeleteExtraSpecResult struct { + gophercloud.ErrResult +} + +// Extract interprets any extraSpecResult as an ExtraSpec, if possible. +func (r extraSpecResult) Extract() (map[string]string, error) { + var s map[string]string + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go new file mode 100644 index 00000000..8620dd78 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go @@ -0,0 +1,49 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" +) + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors", "detail") +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func accessURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-flavor-access") +} + +func accessActionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "action") +} + +func extraSpecsListURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecsGetURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecsCreateURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecUpdateURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecDeleteURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go new file mode 100644 index 00000000..22410a79 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go @@ -0,0 +1,32 @@ +/* +Package images provides information and interaction with the images through +the OpenStack Compute service. + +This API is deprecated and will be removed from a future version of the Nova +API service. + +An image is a collection of files used to create or rebuild a server. +Operators provide a number of pre-built OS images by default. You may also +create custom images from cloud servers you have launched. + +Example to List Images + + listOpts := images.ListOpts{ + Limit: 2, + } + + allPages, err := images.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } +*/ +package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go new file mode 100644 index 00000000..558b481b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go @@ -0,0 +1,109 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// ListDetail request. +type ListOptsBuilder interface { + ToImageListQuery() (string, error) +} + +// ListOpts contain options filtering Images returned from a call to ListDetail. +type ListOpts struct { + // ChangesSince filters Images based on the last changed status (in date-time + // format). + ChangesSince string `q:"changes-since"` + + // Limit limits the number of Images to return. + Limit int `q:"limit"` + + // Mark is an Image UUID at which to set a marker. + Marker string `q:"marker"` + + // Name is the name of the Image. + Name string `q:"name"` + + // Server is the name of the Server (in URL format). + Server string `q:"server"` + + // Status is the current status of the Image. + Status string `q:"status"` + + // Type is the type of image (e.g. BASE, SERVER, ALL). + Type string `q:"type"` +} + +// ToImageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToImageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail enumerates the available images. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToImageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ImagePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns data about a specific image by its ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete deletes the specified image ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// IDFromName is a convienience function that returns an image's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := ListDetail(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractImages(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + err := &gophercloud.ErrResourceNotFound{} + err.ResourceType = "image" + err.Name = name + return "", err + case 1: + return id, nil + default: + err := &gophercloud.ErrMultipleResourcesFound{} + err.ResourceType = "image" + err.Name = name + err.Count = count + return "", err + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go new file mode 100644 index 00000000..70d1018c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go @@ -0,0 +1,95 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as an Image. +type GetResult struct { + gophercloud.Result +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract interprets a GetResult as an Image. +func (r GetResult) Extract() (*Image, error) { + var s struct { + Image *Image `json:"image"` + } + err := r.ExtractInto(&s) + return s.Image, err +} + +// Image represents an Image returned by the Compute API. +type Image struct { + // ID is the unique ID of an image. + ID string + + // Created is the date when the image was created. + Created string + + // MinDisk is the minimum amount of disk a flavor must have to be able + // to create a server based on the image, measured in GB. + MinDisk int + + // MinRAM is the minimum amount of RAM a flavor must have to be able + // to create a server based on the image, measured in MB. + MinRAM int + + // Name provides a human-readable moniker for the OS image. + Name string + + // The Progress and Status fields indicate image-creation status. + Progress int + + // Status is the current status of the image. + Status string + + // Update is the date when the image was updated. + Updated string + + // Metadata provides free-form key/value pairs that further describe the + // image. + Metadata map[string]interface{} +} + +// ImagePage contains a single page of all Images returne from a ListDetail +// operation. Use ExtractImages to convert it into a slice of usable structs. +type ImagePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if an ImagePage contains no Image results. +func (page ImagePage) IsEmpty() (bool, error) { + images, err := ExtractImages(page) + return len(images) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (page ImagePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"images_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractImages converts a page of List results into a slice of usable Image +// structs. +func ExtractImages(r pagination.Page) ([]Image, error) { + var s struct { + Images []Image `json:"images"` + } + err := (r.(ImagePage)).ExtractInto(&s) + return s.Images, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go new file mode 100644 index 00000000..57787fb7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go @@ -0,0 +1,15 @@ +package images + +import "github.com/gophercloud/gophercloud" + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("images", "detail") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go new file mode 100644 index 00000000..3b0ab783 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go @@ -0,0 +1,115 @@ +/* +Package servers provides information and interaction with the server API +resource in the OpenStack Compute service. + +A server is a virtual machine instance in the compute system. In order for +one to be provisioned, a valid flavor and image are required. + +Example to List Servers + + listOpts := servers.ListOpts{ + AllTenants: true, + } + + allPages, err := servers.List(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allServers, err := servers.ExtractServers(allPages) + if err != nil { + panic(err) + } + + for _, server := range allServers { + fmt.Printf("%+v\n", server) + } + +Example to Create a Server + + createOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.Delete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Force Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.ForceDelete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Reboot a Server + + rebootOpts := servers.RebootOpts{ + Type: servers.SoftReboot, + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Reboot(computeClient, serverID, rebootOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Rebuild a Server + + rebuildOpts := servers.RebuildOpts{ + Name: "new_name", + ImageID: "image-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + server, err := servers.Rebuilt(computeClient, serverID, rebuildOpts).Extract() + if err != nil { + panic(err) + } + +Example to Resize a Server + + resizeOpts := servers.ResizeOpts{ + FlavorRef: "flavor-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Resize(computeClient, serverID, resizeOpts).ExtractErr() + if err != nil { + panic(err) + } + + err = servers.ConfirmResize(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Snapshot a Server + + snapshotOpts := servers.CreateImageOpts{ + Name: "snapshot_name", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + image, err := servers.CreateImage(computeClient, serverID, snapshotOpts).ExtractImageID() + if err != nil { + panic(err) + } +*/ +package servers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go new file mode 100644 index 00000000..c9f0e3c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go @@ -0,0 +1,71 @@ +package servers + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" +) + +// ErrNeitherImageIDNorImageNameProvided is the error when neither the image +// ID nor the image name is provided for a server operation +type ErrNeitherImageIDNorImageNameProvided struct{ gophercloud.ErrMissingInput } + +func (e ErrNeitherImageIDNorImageNameProvided) Error() string { + return "One and only one of the image ID and the image name must be provided." +} + +// ErrNeitherFlavorIDNorFlavorNameProvided is the error when neither the flavor +// ID nor the flavor name is provided for a server operation +type ErrNeitherFlavorIDNorFlavorNameProvided struct{ gophercloud.ErrMissingInput } + +func (e ErrNeitherFlavorIDNorFlavorNameProvided) Error() string { + return "One and only one of the flavor ID and the flavor name must be provided." +} + +type ErrNoClientProvidedForIDByName struct{ gophercloud.ErrMissingInput } + +func (e ErrNoClientProvidedForIDByName) Error() string { + return "A service client must be provided to find a resource ID by name." +} + +// ErrInvalidHowParameterProvided is the error when an unknown value is given +// for the `how` argument +type ErrInvalidHowParameterProvided struct{ gophercloud.ErrInvalidInput } + +// ErrNoAdminPassProvided is the error when an administrative password isn't +// provided for a server operation +type ErrNoAdminPassProvided struct{ gophercloud.ErrMissingInput } + +// ErrNoImageIDProvided is the error when an image ID isn't provided for a server +// operation +type ErrNoImageIDProvided struct{ gophercloud.ErrMissingInput } + +// ErrNoIDProvided is the error when a server ID isn't provided for a server +// operation +type ErrNoIDProvided struct{ gophercloud.ErrMissingInput } + +// ErrServer is a generic error type for servers HTTP operations. +type ErrServer struct { + gophercloud.ErrUnexpectedResponseCode + ID string +} + +func (se ErrServer) Error() string { + return fmt.Sprintf("Error while executing HTTP request for server [%s]", se.ID) +} + +// Error404 overrides the generic 404 error message. +func (se ErrServer) Error404(e gophercloud.ErrUnexpectedResponseCode) error { + se.ErrUnexpectedResponseCode = e + return &ErrServerNotFound{se} +} + +// ErrServerNotFound is the error when a 404 is received during server HTTP +// operations. +type ErrServerNotFound struct { + ErrServer +} + +func (e ErrServerNotFound) Error() string { + return fmt.Sprintf("I couldn't find server [%s]", e.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/microversions.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/microversions.go new file mode 100644 index 00000000..84ec9f31 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/microversions.go @@ -0,0 +1,11 @@ +package servers + +// ExtractTags will extract the tags of a server. +// This requires the client to be set to microversion 2.26 or later. +func (r serverResult) ExtractTags() ([]string, error) { + var s struct { + Tags []string `json:"tags"` + } + err := r.ExtractInto(&s) + return s.Tags, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go new file mode 100644 index 00000000..ee8e93b1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -0,0 +1,812 @@ +package servers + +import ( + "encoding/base64" + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToServerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +type ListOpts struct { + // ChangesSince is a time/date stamp for when the server last changed status. + ChangesSince string `q:"changes-since"` + + // Image is the name of the image in URL format. + Image string `q:"image"` + + // Flavor is the name of the flavor in URL format. + Flavor string `q:"flavor"` + + // Name of the server as a string; can be queried with regular expressions. + // Realize that ?name=bob returns both bob and bobb. If you need to match bob + // only, you can use a regular expression matching the syntax of the + // underlying database server implemented for Compute. + Name string `q:"name"` + + // Status is the value of the status of the server so that you can filter on + // "ACTIVE" for example. + Status string `q:"status"` + + // Host is the name of the host as a string. + Host string `q:"host"` + + // Marker is a UUID of the server at which you want to set a marker. + Marker string `q:"marker"` + + // Limit is an integer value for the limit of values to return. + Limit int `q:"limit"` + + // AllTenants is a bool to show all tenants. + AllTenants bool `q:"all_tenants"` + + // TenantID lists servers for a particular tenant. + // Setting "AllTenants = true" is required. + TenantID string `q:"tenant_id"` +} + +// ToServerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list servers accessible to you. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToServerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ServerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServerCreateMap() (map[string]interface{}, error) +} + +// Network is used within CreateOpts to control a new server's network +// attachments. +type Network struct { + // UUID of a network to attach to the newly provisioned server. + // Required unless Port is provided. + UUID string + + // Port of a neutron network to attach to the newly provisioned server. + // Required unless UUID is provided. + Port string + + // FixedIP specifies a fixed IPv4 address to be used on this network. + FixedIP string +} + +// Personality is an array of files that are injected into the server at launch. +type Personality []*File + +// File is used within CreateOpts and RebuildOpts to inject a file into the +// server at launch. +// File implements the json.Marshaler interface, so when a Create or Rebuild +// operation is requested, json.Marshal will call File's MarshalJSON method. +type File struct { + // Path of the file. + Path string + + // Contents of the file. Maximum content size is 255 bytes. + Contents []byte +} + +// MarshalJSON marshals the escaped file, base64 encoding the contents. +func (f *File) MarshalJSON() ([]byte, error) { + file := struct { + Path string `json:"path"` + Contents string `json:"contents"` + }{ + Path: f.Path, + Contents: base64.StdEncoding.EncodeToString(f.Contents), + } + return json.Marshal(file) +} + +// CreateOpts specifies server creation parameters. +type CreateOpts struct { + // Name is the name to assign to the newly launched server. + Name string `json:"name" required:"true"` + + // ImageRef [optional; required if ImageName is not provided] is the ID or + // full URL to the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageRef string `json:"imageRef"` + + // ImageName [optional; required if ImageRef is not provided] is the name of + // the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageName string `json:"-"` + + // FlavorRef [optional; required if FlavorName is not provided] is the ID or + // full URL to the flavor that describes the server's specs. + FlavorRef string `json:"flavorRef"` + + // FlavorName [optional; required if FlavorRef is not provided] is the name of + // the flavor that describes the server's specs. + FlavorName string `json:"-"` + + // SecurityGroups lists the names of the security groups to which this server + // should belong. + SecurityGroups []string `json:"-"` + + // UserData contains configuration information or scripts to use upon launch. + // Create will base64-encode it for you, if it isn't already. + UserData []byte `json:"-"` + + // AvailabilityZone in which to launch the server. + AvailabilityZone string `json:"availability_zone,omitempty"` + + // Networks dictates how this server will be attached to available networks. + // By default, the server will be attached to all isolated networks for the + // tenant. + Networks []Network `json:"-"` + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to the + // server. + Metadata map[string]string `json:"metadata,omitempty"` + + // Personality includes files to inject into the server at launch. + // Create will base64-encode file contents for you. + Personality Personality `json:"personality,omitempty"` + + // ConfigDrive enables metadata injection through a configuration drive. + ConfigDrive *bool `json:"config_drive,omitempty"` + + // AdminPass sets the root user password. If not set, a randomly-generated + // password will be created and returned in the response. + AdminPass string `json:"adminPass,omitempty"` + + // AccessIPv4 specifies an IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 specifies an IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` + + // Min specifies Minimum number of servers to launch. + Min int `json:"min_count,omitempty"` + + // Max specifies Maximum number of servers to launch. + Max int `json:"max_count,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. + ServiceClient *gophercloud.ServiceClient `json:"-"` + + // Tags allows a server to be tagged with single-word metadata. + // Requires microversion 2.52 or later. + Tags []string `json:"tags,omitempty"` +} + +// ToServerCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { + sc := opts.ServiceClient + opts.ServiceClient = nil + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.UserData != nil { + var userData string + if _, err := base64.StdEncoding.DecodeString(string(opts.UserData)); err != nil { + userData = base64.StdEncoding.EncodeToString(opts.UserData) + } else { + userData = string(opts.UserData) + } + b["user_data"] = &userData + } + + if len(opts.SecurityGroups) > 0 { + securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups)) + for i, groupName := range opts.SecurityGroups { + securityGroups[i] = map[string]interface{}{"name": groupName} + } + b["security_groups"] = securityGroups + } + + if len(opts.Networks) > 0 { + networks := make([]map[string]interface{}, len(opts.Networks)) + for i, net := range opts.Networks { + networks[i] = make(map[string]interface{}) + if net.UUID != "" { + networks[i]["uuid"] = net.UUID + } + if net.Port != "" { + networks[i]["port"] = net.Port + } + if net.FixedIP != "" { + networks[i]["fixed_ip"] = net.FixedIP + } + } + b["networks"] = networks + } + + // If ImageRef isn't provided, check if ImageName was provided to ascertain + // the image ID. + if opts.ImageRef == "" { + if opts.ImageName != "" { + if sc == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + imageID, err := images.IDFromName(sc, opts.ImageName) + if err != nil { + return nil, err + } + b["imageRef"] = imageID + } + } + + // If FlavorRef isn't provided, use FlavorName to ascertain the flavor ID. + if opts.FlavorRef == "" { + if opts.FlavorName == "" { + err := ErrNeitherFlavorIDNorFlavorNameProvided{} + err.Argument = "FlavorRef/FlavorName" + return nil, err + } + if sc == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + flavorID, err := flavors.IDFromName(sc, opts.FlavorName) + if err != nil { + return nil, err + } + b["flavorRef"] = flavorID + } + + if opts.Min != 0 { + b["min_count"] = opts.Min + } + + if opts.Max != 0 { + b["max_count"] = opts.Max + } + + return map[string]interface{}{"server": b}, nil +} + +// Create requests a server to be provisioned to the user in the current tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + reqBody, err := opts.ToServerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(listURL(client), reqBody, &r.Body, nil) + return +} + +// Delete requests that a server previously provisioned be removed from your +// account. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ForceDelete forces the deletion of a server. +func ForceDelete(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"forceDelete": ""}, nil, nil) + return +} + +// Get requests details on a single server, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToServerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// server. +type UpdateOpts struct { + // Name changes the displayed name of the server. + // The server host name will *not* change. + // Server names are not constrained to be unique, even within the same tenant. + Name string `json:"name,omitempty"` + + // AccessIPv4 provides a new IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 provides a new IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` +} + +// ToServerUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToServerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "server") +} + +// Update requests that various attributes of the indicated server be changed. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToServerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ChangeAdminPassword alters the administrator or root password for a specified +// server. +func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) (r ActionResult) { + b := map[string]interface{}{ + "changePassword": map[string]string{ + "adminPass": newPassword, + }, + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// RebootMethod describes the mechanisms by which a server reboot can be requested. +type RebootMethod string + +// These constants determine how a server should be rebooted. +// See the Reboot() function for further details. +const ( + SoftReboot RebootMethod = "SOFT" + HardReboot RebootMethod = "HARD" + OSReboot = SoftReboot + PowerCycle = HardReboot +) + +// RebootOptsBuilder allows extensions to add additional parameters to the +// reboot request. +type RebootOptsBuilder interface { + ToServerRebootMap() (map[string]interface{}, error) +} + +// RebootOpts provides options to the reboot request. +type RebootOpts struct { + // Type is the type of reboot to perform on the server. + Type RebootMethod `json:"type" required:"true"` +} + +// ToServerRebootMap builds a body for the reboot request. +func (opts RebootOpts) ToServerRebootMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "reboot") +} + +/* + Reboot requests that a given server reboot. + + Two methods exist for rebooting a server: + + HardReboot (aka PowerCycle) starts the server instance by physically cutting + power to the machine, or if a VM, terminating it at the hypervisor level. + It's done. Caput. Full stop. + Then, after a brief while, power is rtored or the VM instance restarted. + + SoftReboot (aka OSReboot) simply tells the OS to restart under its own + procedure. + E.g., in Linux, asking it to enter runlevel 6, or executing + "sudo shutdown -r now", or by asking Windows to rtart the machine. +*/ +func Reboot(client *gophercloud.ServiceClient, id string, opts RebootOptsBuilder) (r ActionResult) { + b, err := opts.ToServerRebootMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// RebuildOptsBuilder allows extensions to provide additional parameters to the +// rebuild request. +type RebuildOptsBuilder interface { + ToServerRebuildMap() (map[string]interface{}, error) +} + +// RebuildOpts represents the configuration options used in a server rebuild +// operation. +type RebuildOpts struct { + // AdminPass is the server's admin password + AdminPass string `json:"adminPass,omitempty"` + + // ImageID is the ID of the image you want your server to be provisioned on. + ImageID string `json:"imageRef"` + + // ImageName is readable name of an image. + ImageName string `json:"-"` + + // Name to set the server to + Name string `json:"name,omitempty"` + + // AccessIPv4 [optional] provides a new IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 [optional] provides a new IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) + // to attach to the server. + Metadata map[string]string `json:"metadata,omitempty"` + + // Personality [optional] includes files to inject into the server at launch. + // Rebuild will base64-encode file contents for you. + Personality Personality `json:"personality,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. + ServiceClient *gophercloud.ServiceClient `json:"-"` +} + +// ToServerRebuildMap formats a RebuildOpts struct into a map for use in JSON +func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + // If ImageRef isn't provided, check if ImageName was provided to ascertain + // the image ID. + if opts.ImageID == "" { + if opts.ImageName != "" { + if opts.ServiceClient == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + imageID, err := images.IDFromName(opts.ServiceClient, opts.ImageName) + if err != nil { + return nil, err + } + b["imageRef"] = imageID + } + } + + return map[string]interface{}{"rebuild": b}, nil +} + +// Rebuild will reprovision the server according to the configuration options +// provided in the RebuildOpts struct. +func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuilder) (r RebuildResult) { + b, err := opts.ToServerRebuildMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, nil) + return +} + +// ResizeOptsBuilder allows extensions to add additional parameters to the +// resize request. +type ResizeOptsBuilder interface { + ToServerResizeMap() (map[string]interface{}, error) +} + +// ResizeOpts represents the configuration options used to control a Resize +// operation. +type ResizeOpts struct { + // FlavorRef is the ID of the flavor you wish your server to become. + FlavorRef string `json:"flavorRef" required:"true"` +} + +// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON +// request body for the Resize request. +func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "resize") +} + +// Resize instructs the provider to change the flavor of the server. +// +// Note that this implies rebuilding it. +// +// Unfortunately, one cannot pass rebuild parameters to the resize function. +// When the resize completes, the server will be in VERIFY_RESIZE state. +// While in this state, you can explore the use of the new server's +// configuration. If you like it, call ConfirmResize() to commit the resize +// permanently. Otherwise, call RevertResize() to restore the old configuration. +func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) (r ActionResult) { + b, err := opts.ToServerResizeMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// ConfirmResize confirms a previous resize operation on a server. +// See Resize() for more details. +func ConfirmResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"confirmResize": nil}, nil, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202, 204}, + }) + return +} + +// RevertResize cancels a previous resize operation on a server. +// See Resize() for more details. +func RevertResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"revertResize": nil}, nil, nil) + return +} + +// ResetMetadataOptsBuilder allows extensions to add additional parameters to +// the Reset request. +type ResetMetadataOptsBuilder interface { + ToMetadataResetMap() (map[string]interface{}, error) +} + +// MetadataOpts is a map that contains key-value pairs. +type MetadataOpts map[string]string + +// ToMetadataResetMap assembles a body for a Reset request based on the contents +// of a MetadataOpts. +func (opts MetadataOpts) ToMetadataResetMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ToMetadataUpdateMap assembles a body for an Update request based on the +// contents of a MetadataOpts. +func (opts MetadataOpts) ToMetadataUpdateMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ResetMetadata will create multiple new key-value pairs for the given server +// ID. +// Note: Using this operation will erase any already-existing metadata and +// create the new metadata provided. To keep any already-existing metadata, +// use the UpdateMetadatas or UpdateMetadata function. +func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetadataOptsBuilder) (r ResetMetadataResult) { + b, err := opts.ToMetadataResetMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Metadata requests all the metadata for the given server ID. +func Metadata(client *gophercloud.ServiceClient, id string) (r GetMetadataResult) { + _, r.Err = client.Get(metadataURL(client, id), &r.Body, nil) + return +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Create request. +type UpdateMetadataOptsBuilder interface { + ToMetadataUpdateMap() (map[string]interface{}, error) +} + +// UpdateMetadata updates (or creates) all the metadata specified by opts for +// the given server ID. This operation does not affect already-existing metadata +// that is not specified by opts. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToMetadataUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// MetadatumOptsBuilder allows extensions to add additional parameters to the +// Create request. +type MetadatumOptsBuilder interface { + ToMetadatumCreateMap() (map[string]interface{}, string, error) +} + +// MetadatumOpts is a map of length one that contains a key-value pair. +type MetadatumOpts map[string]string + +// ToMetadatumCreateMap assembles a body for a Create request based on the +// contents of a MetadataumOpts. +func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string, error) { + if len(opts) != 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "servers.MetadatumOpts" + err.Info = "Must have 1 and only 1 key-value pair" + return nil, "", err + } + metadatum := map[string]interface{}{"meta": opts} + var key string + for k := range metadatum["meta"].(MetadatumOpts) { + key = k + } + return metadatum, key, nil +} + +// CreateMetadatum will create or update the key-value pair with the given key +// for the given server ID. +func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts MetadatumOptsBuilder) (r CreateMetadatumResult) { + b, key, err := opts.ToMetadatumCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadatumURL(client, id, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Metadatum requests the key-value pair with the given key for the given +// server ID. +func Metadatum(client *gophercloud.ServiceClient, id, key string) (r GetMetadatumResult) { + _, r.Err = client.Get(metadatumURL(client, id, key), &r.Body, nil) + return +} + +// DeleteMetadatum will delete the key-value pair with the given key for the +// given server ID. +func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) (r DeleteMetadatumResult) { + _, r.Err = client.Delete(metadatumURL(client, id, key), nil) + return +} + +// ListAddresses makes a request against the API to list the servers IP +// addresses. +func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { + return pagination.NewPager(client, listAddressesURL(client, id), func(r pagination.PageResult) pagination.Page { + return AddressPage{pagination.SinglePageBase(r)} + }) +} + +// ListAddressesByNetwork makes a request against the API to list the servers IP +// addresses for the given network. +func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { + return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), func(r pagination.PageResult) pagination.Page { + return NetworkAddressPage{pagination.SinglePageBase(r)} + }) +} + +// CreateImageOptsBuilder allows extensions to add additional parameters to the +// CreateImage request. +type CreateImageOptsBuilder interface { + ToServerCreateImageMap() (map[string]interface{}, error) +} + +// CreateImageOpts provides options to pass to the CreateImage request. +type CreateImageOpts struct { + // Name of the image/snapshot. + Name string `json:"name" required:"true"` + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to + // the created image. + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToServerCreateImageMap formats a CreateImageOpts structure into a request +// body. +func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "createImage") +} + +// CreateImage makes a request against the nova API to schedule an image to be +// created of the server +func CreateImage(client *gophercloud.ServiceClient, id string, opts CreateImageOptsBuilder) (r CreateImageResult) { + b, err := opts.ToServerCreateImageMap() + if err != nil { + r.Err = err + return + } + resp, err := client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + r.Err = err + r.Header = resp.Header + return +} + +// IDFromName is a convienience function that returns a server's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + allPages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractServers(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "server"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "server"} + } +} + +// GetPassword makes a request against the nova API to get the encrypted +// administrative password. +func GetPassword(client *gophercloud.ServiceClient, serverId string) (r GetPasswordResult) { + _, r.Err = client.Get(passwordURL(client, serverId), &r.Body, nil) + return +} + +// ShowConsoleOutputOptsBuilder is the interface types must satisfy in order to be +// used as ShowConsoleOutput options +type ShowConsoleOutputOptsBuilder interface { + ToServerShowConsoleOutputMap() (map[string]interface{}, error) +} + +// ShowConsoleOutputOpts satisfies the ShowConsoleOutputOptsBuilder +type ShowConsoleOutputOpts struct { + // The number of lines to fetch from the end of console log. + // All lines will be returned if this is not specified. + Length int `json:"length,omitempty"` +} + +// ToServerShowConsoleOutputMap formats a ShowConsoleOutputOpts structure into a request body. +func (opts ShowConsoleOutputOpts) ToServerShowConsoleOutputMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-getConsoleOutput") +} + +// ShowConsoleOutput makes a request against the nova API to get console log from the server +func ShowConsoleOutput(client *gophercloud.ServiceClient, id string, opts ShowConsoleOutputOptsBuilder) (r ShowConsoleOutputResult) { + b, err := opts.ToServerShowConsoleOutputMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go new file mode 100644 index 00000000..f973d1ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go @@ -0,0 +1,414 @@ +package servers + +import ( + "crypto/rsa" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "path" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type serverResult struct { + gophercloud.Result +} + +// Extract interprets any serverResult as a Server, if possible. +func (r serverResult) Extract() (*Server, error) { + var s Server + err := r.ExtractInto(&s) + return &s, err +} + +func (r serverResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "server") +} + +func ExtractServersInto(r pagination.Page, v interface{}) error { + return r.(ServerPage).Result.ExtractIntoSlicePtr(v, "servers") +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as a Server. +type CreateResult struct { + serverResult +} + +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as a Server. +type GetResult struct { + serverResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Server. +type UpdateResult struct { + serverResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// RebuildResult is the response from a Rebuild operation. Call its Extract +// method to interpret it as a Server. +type RebuildResult struct { + serverResult +} + +// ActionResult represents the result of server action operations, like reboot. +// Call its ExtractErr method to determine if the action succeeded or failed. +type ActionResult struct { + gophercloud.ErrResult +} + +// CreateImageResult is the response from a CreateImage operation. Call its +// ExtractImageID method to retrieve the ID of the newly created image. +type CreateImageResult struct { + gophercloud.Result +} + +// ShowConsoleOutputResult represents the result of console output from a server +type ShowConsoleOutputResult struct { + gophercloud.Result +} + +// Extract will return the console output from a ShowConsoleOutput request. +func (r ShowConsoleOutputResult) Extract() (string, error) { + var s struct { + Output string `json:"output"` + } + + err := r.ExtractInto(&s) + return s.Output, err +} + +// GetPasswordResult represent the result of a get os-server-password operation. +// Call its ExtractPassword method to retrieve the password. +type GetPasswordResult struct { + gophercloud.Result +} + +// ExtractPassword gets the encrypted password. +// If privateKey != nil the password is decrypted with the private key. +// If privateKey == nil the encrypted password is returned and can be decrypted +// with: +// echo '' | base64 -D | openssl rsautl -decrypt -inkey +func (r GetPasswordResult) ExtractPassword(privateKey *rsa.PrivateKey) (string, error) { + var s struct { + Password string `json:"password"` + } + err := r.ExtractInto(&s) + if err == nil && privateKey != nil && s.Password != "" { + return decryptPassword(s.Password, privateKey) + } + return s.Password, err +} + +func decryptPassword(encryptedPassword string, privateKey *rsa.PrivateKey) (string, error) { + b64EncryptedPassword := make([]byte, base64.StdEncoding.DecodedLen(len(encryptedPassword))) + + n, err := base64.StdEncoding.Decode(b64EncryptedPassword, []byte(encryptedPassword)) + if err != nil { + return "", fmt.Errorf("Failed to base64 decode encrypted password: %s", err) + } + password, err := rsa.DecryptPKCS1v15(nil, privateKey, b64EncryptedPassword[0:n]) + if err != nil { + return "", fmt.Errorf("Failed to decrypt password: %s", err) + } + + return string(password), nil +} + +// ExtractImageID gets the ID of the newly created server image from the header. +func (r CreateImageResult) ExtractImageID() (string, error) { + if r.Err != nil { + return "", r.Err + } + // Get the image id from the header + u, err := url.ParseRequestURI(r.Header.Get("Location")) + if err != nil { + return "", err + } + imageID := path.Base(u.Path) + if imageID == "." || imageID == "/" { + return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) + } + return imageID, nil +} + +// Server represents a server/instance in the OpenStack cloud. +type Server struct { + // ID uniquely identifies this server amongst all other servers, + // including those not accessible to the current tenant. + ID string `json:"id"` + + // TenantID identifies the tenant owning this server resource. + TenantID string `json:"tenant_id"` + + // UserID uniquely identifies the user account owning the tenant. + UserID string `json:"user_id"` + + // Name contains the human-readable name for the server. + Name string `json:"name"` + + // Updated and Created contain ISO-8601 timestamps of when the state of the + // server last changed, and when it was created. + Updated time.Time `json:"updated"` + Created time.Time `json:"created"` + + // HostID is the host where the server is located in the cloud. + HostID string `json:"hostid"` + + // Status contains the current operational status of the server, + // such as IN_PROGRESS or ACTIVE. + Status string `json:"status"` + + // Progress ranges from 0..100. + // A request made against the server completes only once Progress reaches 100. + Progress int `json:"progress"` + + // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, + // suitable for remote access for administration. + AccessIPv4 string `json:"accessIPv4"` + AccessIPv6 string `json:"accessIPv6"` + + // Image refers to a JSON object, which itself indicates the OS image used to + // deploy the server. + Image map[string]interface{} `json:"-"` + + // Flavor refers to a JSON object, which itself indicates the hardware + // configuration of the deployed server. + Flavor map[string]interface{} `json:"flavor"` + + // Addresses includes a list of all IP addresses assigned to the server, + // keyed by pool. + Addresses map[string]interface{} `json:"addresses"` + + // Metadata includes a list of all user-specified key-value pairs attached + // to the server. + Metadata map[string]string `json:"metadata"` + + // Links includes HTTP references to the itself, useful for passing along to + // other APIs that might want a server reference. + Links []interface{} `json:"links"` + + // KeyName indicates which public key was injected into the server on launch. + KeyName string `json:"key_name"` + + // AdminPass will generally be empty (""). However, it will contain the + // administrative password chosen when provisioning a new server without a + // set AdminPass setting in the first place. + // Note that this is the ONLY time this field will be valid. + AdminPass string `json:"adminPass"` + + // SecurityGroups includes the security groups that this instance has applied + // to it. + SecurityGroups []map[string]interface{} `json:"security_groups"` + + // Fault contains failure information about a server. + Fault Fault `json:"fault"` +} + +type Fault struct { + Code int `json:"code"` + Created time.Time `json:"created"` + Details string `json:"details"` + Message string `json:"message"` +} + +func (r *Server) UnmarshalJSON(b []byte) error { + type tmp Server + var s struct { + tmp + Image interface{} `json:"image"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Server(s.tmp) + + switch t := s.Image.(type) { + case map[string]interface{}: + r.Image = t + case string: + switch t { + case "": + r.Image = nil + } + } + + return err +} + +// ServerPage abstracts the raw results of making a List() request against +// the API. As OpenStack extensions may freely alter the response bodies of +// structures returned to the client, you may only safely access the data +// provided through the ExtractServers call. +type ServerPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (r ServerPage) IsEmpty() (bool, error) { + s, err := ExtractServers(r) + return len(s) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r ServerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"servers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractServers interprets the results of a single page from a List() call, +// producing a slice of Server entities. +func ExtractServers(r pagination.Page) ([]Server, error) { + var s []Server + err := ExtractServersInto(r, &s) + return s, err +} + +// MetadataResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type MetadataResult struct { + gophercloud.Result +} + +// GetMetadataResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetMetadataResult struct { + MetadataResult +} + +// ResetMetadataResult contains the result of a Reset operation. Call its +// Extract method to interpret it as a map[string]interface. +type ResetMetadataResult struct { + MetadataResult +} + +// UpdateMetadataResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. +type UpdateMetadataResult struct { + MetadataResult +} + +// MetadatumResult contains the result of a call for individual a single +// key-value pair. +type MetadatumResult struct { + gophercloud.Result +} + +// GetMetadatumResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetMetadatumResult struct { + MetadatumResult +} + +// CreateMetadatumResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateMetadatumResult struct { + MetadatumResult +} + +// DeleteMetadatumResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DeleteMetadatumResult struct { + gophercloud.ErrResult +} + +// Extract interprets any MetadataResult as a Metadata, if possible. +func (r MetadataResult) Extract() (map[string]string, error) { + var s struct { + Metadata map[string]string `json:"metadata"` + } + err := r.ExtractInto(&s) + return s.Metadata, err +} + +// Extract interprets any MetadatumResult as a Metadatum, if possible. +func (r MetadatumResult) Extract() (map[string]string, error) { + var s struct { + Metadatum map[string]string `json:"meta"` + } + err := r.ExtractInto(&s) + return s.Metadatum, err +} + +// Address represents an IP address. +type Address struct { + Version int `json:"version"` + Address string `json:"addr"` +} + +// AddressPage abstracts the raw results of making a ListAddresses() request +// against the API. As OpenStack extensions may freely alter the response bodies +// of structures returned to the client, you may only safely access the data +// provided through the ExtractAddresses call. +type AddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if an AddressPage contains no networks. +func (r AddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractAddresses(r) + return len(addresses) == 0, err +} + +// ExtractAddresses interprets the results of a single page from a +// ListAddresses() call, producing a map of addresses. +func ExtractAddresses(r pagination.Page) (map[string][]Address, error) { + var s struct { + Addresses map[string][]Address `json:"addresses"` + } + err := (r.(AddressPage)).ExtractInto(&s) + return s.Addresses, err +} + +// NetworkAddressPage abstracts the raw results of making a +// ListAddressesByNetwork() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures +// returned to the client, you may only safely access the data provided through +// the ExtractAddresses call. +type NetworkAddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NetworkAddressPage contains no addresses. +func (r NetworkAddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractNetworkAddresses(r) + return len(addresses) == 0, err +} + +// ExtractNetworkAddresses interprets the results of a single page from a +// ListAddressesByNetwork() call, producing a slice of addresses. +func ExtractNetworkAddresses(r pagination.Page) ([]Address, error) { + var s map[string][]Address + err := (r.(NetworkAddressPage)).ExtractInto(&s) + if err != nil { + return nil, err + } + + var key string + for k := range s { + key = k + } + + return s[key], err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go new file mode 100644 index 00000000..e892e8d9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go @@ -0,0 +1,51 @@ +package servers + +import "github.com/gophercloud/gophercloud" + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers") +} + +func listURL(client *gophercloud.ServiceClient) string { + return createURL(client) +} + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers", "detail") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("servers", id, "metadata", key) +} + +func metadataURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "metadata") +} + +func listAddressesURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "ips") +} + +func listAddressesByNetworkURL(client *gophercloud.ServiceClient, id, network string) string { + return client.ServiceURL("servers", id, "ips", network) +} + +func passwordURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "os-server-password") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go new file mode 100644 index 00000000..cadef054 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go @@ -0,0 +1,21 @@ +package servers + +import "github.com/gophercloud/gophercloud" + +// WaitForStatus will continually poll a server until it successfully +// transitions to a specified status. It will do this for at most the number +// of seconds specified. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/doc.go new file mode 100644 index 00000000..3fd28d10 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/doc.go @@ -0,0 +1,101 @@ +/* +Package clusters contains functionality for working with Magnum Cluster resources. + +Example to Create a Cluster + + masterCount := 1 + nodeCount := 1 + createTimeout := 30 + opts := clusters.CreateOpts{ + ClusterTemplateID: "0562d357-8641-4759-8fed-8173f02c9633", + CreateTimeout: &createTimeout, + DiscoveryURL: "", + FlavorID: "m1.small", + KeyPair: "my_keypair", + Labels: map[string]string{}, + MasterCount: &masterCount, + MasterFlavorID: "m1.small", + Name: "k8s", + NodeCount: &nodeCount, + } + + cluster, err := clusters.Create(serviceClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Get a Cluster + + clusterName := "cluster123" + cluster, err := clusters.Get(serviceClient, clusterName).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", cluster) + +Example to List Clusters + + listOpts := clusters.ListOpts{ + Limit: 20, + } + + allPages, err := clusters.List(serviceClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allClusters, err := clusters.ExtractClusters(allPages) + if err != nil { + panic(err) + } + + for _, cluster := range allClusters { + fmt.Printf("%+v\n", cluster) + } + +Example to List Clusters with detailed information + + allPagesDetail, err := clusters.ListDetail(serviceClient, clusters.ListOpts{}).AllPages() + if err != nil { + panic(err) + } + + allClustersDetail, err := clusters.ExtractClusters(allPagesDetail) + if err != nil { + panic(err) + } + + for _, clusterDetail := range allClustersDetail { + fmt.Printf("%+v\n", clusterDetail) + } + +Example to Update a Cluster + + updateOpts := []clusters.UpdateOptsBuilder{ + clusters.UpdateOpts{ + Op: clusters.ReplaceOp, + Path: "/master_lb_enabled", + Value: "True", + }, + clusters.UpdateOpts{ + Op: clusters.ReplaceOp, + Path: "/registry_enabled", + Value: "True", + }, + } + clusterUUID, err := clusters.Update(serviceClient, clusterUUID, updateOpts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%s\n", clusterUUID) + +Example to Delete a Cluster + + clusterUUID := "dc6d336e3fc4c0a951b5698cd1236ee" + err := clusters.Delete(serviceClient, clusterUUID).ExtractErr() + if err != nil { + panic(err) + } + +*/ +package clusters diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/requests.go new file mode 100644 index 00000000..5d519801 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/requests.go @@ -0,0 +1,214 @@ +package clusters + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder Builder. +type CreateOptsBuilder interface { + ToClusterCreateMap() (map[string]interface{}, error) +} + +// CreateOpts params +type CreateOpts struct { + ClusterTemplateID string `json:"cluster_template_id" required:"true"` + CreateTimeout *int `json:"create_timeout"` + DiscoveryURL string `json:"discovery_url,omitempty"` + DockerVolumeSize *int `json:"docker_volume_size,omitempty"` + FlavorID string `json:"flavor_id,omitempty"` + Keypair string `json:"keypair,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + MasterCount *int `json:"master_count,omitempty"` + MasterFlavorID string `json:"master_flavor_id,omitempty"` + Name string `json:"name"` + NodeCount *int `json:"node_count,omitempty"` +} + +// ToClusterCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToClusterCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create requests the creation of a new cluster. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToClusterCreateMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// Get retrieves a specific clusters based on its unique ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + var result *http.Response + result, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// Delete deletes the specified cluster ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + var result *http.Response + result, r.Err = client.Delete(deleteURL(client, id), nil) + r.Header = result.Header + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToClustersListQuery() (string, error) +} + +// ListOpts allows the sorting of paginated collections through +// the API. SortKey allows you to sort by a particular cluster attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for pagination. +type ListOpts struct { + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToClustersListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToClustersListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// clusters. It accepts a ListOptsBuilder, which allows you to sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToClustersListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ClusterPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListDetail returns a Pager which allows you to iterate over a collection of +// clusters with detailed information. +// It accepts a ListOptsBuilder, which allows you to sort the returned +// collection for greater efficiency. +func ListDetail(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(c) + if opts != nil { + query, err := opts.ToClustersListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ClusterPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type UpdateOp string + +const ( + AddOp UpdateOp = "add" + RemoveOp UpdateOp = "remove" + ReplaceOp UpdateOp = "replace" +) + +type UpdateOpts struct { + Op UpdateOp `json:"op" required:"true"` + Path string `json:"path" required:"true"` + Value interface{} `json:"value,omitempty"` +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToClustersUpdateMap() (map[string]interface{}, error) +} + +// ToClusterUpdateMap assembles a request body based on the contents of +// UpdateOpts. +func (opts UpdateOpts) ToClustersUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Update implements cluster updated request. +func Update(client *gophercloud.ServiceClient, id string, opts []UpdateOptsBuilder) (r UpdateResult) { + var o []map[string]interface{} + for _, opt := range opts { + b, err := opt.ToClustersUpdateMap() + if err != nil { + r.Err = err + return r + } + o = append(o, b) + } + + var result *http.Response + result, r.Err = client.Patch(updateURL(client, id), o, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + if r.Err == nil { + r.Header = result.Header + } + return +} + +// ResizeOptsBuilder allows extensions to add additional parameters to the +// Resize request. +type ResizeOptsBuilder interface { + ToClusterResizeMap() (map[string]interface{}, error) +} + +// ResizeOpts params +type ResizeOpts struct { + NodeCount *int `json:"node_count" required:"true"` + NodesToRemove []string `json:"nodes_to_remove,omitempty"` + NodeGroup string `json:"nodegroup,omitempty"` +} + +// ToClusterResizeMap constructs a request body from ResizeOpts. +func (opts ResizeOpts) ToClusterResizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Resize an existing cluster node count. +func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) (r ResizeResult) { + b, err := opts.ToClusterResizeMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Post(resizeURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + if r.Err == nil { + r.Header = result.Header + } + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/results.go new file mode 100644 index 00000000..b4a8734c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/results.go @@ -0,0 +1,119 @@ +package clusters + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response of a Create operations. +type CreateResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its Extract or ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// Extract is a function that accepts a result and extracts a cluster resource. +func (r commonResult) Extract() (*Cluster, error) { + var s *Cluster + err := r.ExtractInto(&s) + return s, err +} + +// UpdateResult is the response of a Update operations. +type UpdateResult struct { + commonResult +} + +// ResizeResult is the response of a Resize operations. +type ResizeResult struct { + commonResult +} + +func (r CreateResult) Extract() (string, error) { + var s struct { + UUID string + } + err := r.ExtractInto(&s) + return s.UUID, err +} + +func (r UpdateResult) Extract() (string, error) { + var s struct { + UUID string + } + err := r.ExtractInto(&s) + return s.UUID, err +} + +type Cluster struct { + APIAddress string `json:"api_address"` + COEVersion string `json:"coe_version"` + ClusterTemplateID string `json:"cluster_template_id"` + ContainerVersion string `json:"container_version"` + CreateTimeout int `json:"create_timeout"` + CreatedAt time.Time `json:"created_at"` + DiscoveryURL string `json:"discovery_url"` + DockerVolumeSize int `json:"docker_volume_size"` + Faults map[string]string `json:"faults"` + FlavorID string `json:"flavor_id"` + KeyPair string `json:"keypair"` + Labels map[string]string `json:"labels"` + Links []gophercloud.Link `json:"links"` + MasterFlavorID string `json:"master_flavor_id"` + MasterAddresses []string `json:"master_addresses"` + MasterCount int `json:"master_count"` + Name string `json:"name"` + NodeAddresses []string `json:"node_addresses"` + NodeCount int `json:"node_count"` + ProjectID string `json:"project_id"` + StackID string `json:"stack_id"` + Status string `json:"status"` + StatusReason string `json:"status_reason"` + UUID string `json:"uuid"` + UpdatedAt time.Time `json:"updated_at"` + UserID string `json:"user_id"` +} + +type ClusterPage struct { + pagination.LinkedPageBase +} + +func (r ClusterPage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, nil +} + +// IsEmpty checks whether a ClusterPage struct is empty. +func (r ClusterPage) IsEmpty() (bool, error) { + is, err := ExtractClusters(r) + return len(is) == 0, err +} + +func ExtractClusters(r pagination.Page) ([]Cluster, error) { + var s struct { + Clusters []Cluster `json:"clusters"` + } + err := (r.(ClusterPage)).ExtractInto(&s) + return s.Clusters, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/urls.go new file mode 100644 index 00000000..f0fcf1ae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/urls.go @@ -0,0 +1,43 @@ +package clusters + +import ( + "github.com/gophercloud/gophercloud" +) + +var apiName = "clusters" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiName) +} + +func idURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiName, id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("clusters", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("clusters") +} + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("clusters", "detail") +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func resizeURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("clusters", id, "actions/resize") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/doc.go new file mode 100644 index 00000000..12289c2a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/doc.go @@ -0,0 +1,90 @@ +// Package clustertemplates contains functionality for working with Magnum Cluster Templates +// resources. +/* +Package clustertemplates provides information and interaction with the cluster-templates through +the OpenStack Container Infra service. + +Example to Create Cluster Template + + boolFalse := false + boolTrue := true + createOpts := clustertemplates.CreateOpts{ + Name: "test-cluster-template", + Labels: map[string]string{}, + FixedSubnet: "", + MasterFlavorID: "", + NoProxy: "10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", + HTTPSProxy: "http://10.164.177.169:8080", + TLSDisabled: &boolFalse, + KeyPairID: "kp", + Public: &boolFalse, + HTTPProxy: "http://10.164.177.169:8080", + ServerType: "vm", + ExternalNetworkID: "public", + ImageID: "fedora-atomic-latest", + VolumeDriver: "cinder", + RegistryEnabled: &boolFalse, + DockerStorageDriver: "devicemapper", + NetworkDriver: "flannel", + FixedNetwork: "", + COE: "kubernetes", + FlavorID: "m1.small", + MasterLBEnabled: &boolTrue, + DNSNameServer: "8.8.8.8", + } + + clustertemplate, err := clustertemplates.Create(serviceClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete Cluster Template + + clusterTemplateID := "dc6d336e3fc4c0a951b5698cd1236ee" + err := clustertemplates.Delete(serviceClient, clusterTemplateID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Clusters Templates + + listOpts := clustertemplates.ListOpts{ + Limit: 20, + } + + allPages, err := clustertemplates.List(serviceClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allClusterTemplates, err := clusters.ExtractClusterTemplates(allPages) + if err != nil { + panic(err) + } + + for _, clusterTemplate := range allClusterTemplates { + fmt.Printf("%+v\n", clusterTemplate) + } + +Example to Update Cluster Template + + updateOpts := []clustertemplates.UpdateOptsBuilder{ + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/master_lb_enabled", + Value: "True", + }, + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/registry_enabled", + Value: "True", + }, + } + + clustertemplate, err := clustertemplates.Update(serviceClient, updateOpts).Extract() + if err != nil { + panic(err) + } + +*/ +package clustertemplates diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/requests.go new file mode 100644 index 00000000..3b2aac58 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/requests.go @@ -0,0 +1,178 @@ +package clustertemplates + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder Builder. +type CreateOptsBuilder interface { + ToClusterCreateMap() (map[string]interface{}, error) +} + +// CreateOpts params +type CreateOpts struct { + APIServerPort *int `json:"apiserver_port,omitempty"` + COE string `json:"coe" required:"true"` + DNSNameServer string `json:"dns_nameserver,omitempty"` + DockerStorageDriver string `json:"docker_storage_driver,omitempty"` + DockerVolumeSize *int `json:"docker_volume_size,omitempty"` + ExternalNetworkID string `json:"external_network_id,omitempty"` + FixedNetwork string `json:"fixed_network,omitempty"` + FixedSubnet string `json:"fixed_subnet,omitempty"` + FlavorID string `json:"flavor_id,omitempty"` + FloatingIPEnabled *bool `json:"floating_ip_enabled,omitempty"` + HTTPProxy string `json:"http_proxy,omitempty"` + HTTPSProxy string `json:"https_proxy,omitempty"` + ImageID string `json:"image_id" required:"true"` + InsecureRegistry string `json:"insecure_registry,omitempty"` + KeyPairID string `json:"keypair_id,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + MasterFlavorID string `json:"master_flavor_id,omitempty"` + MasterLBEnabled *bool `json:"master_lb_enabled,omitempty"` + Name string `json:"name,omitempty"` + NetworkDriver string `json:"network_driver,omitempty"` + NoProxy string `json:"no_proxy,omitempty"` + Public *bool `json:"public,omitempty"` + RegistryEnabled *bool `json:"registry_enabled,omitempty"` + ServerType string `json:"server_type,omitempty"` + TLSDisabled *bool `json:"tls_disabled,omitempty"` + VolumeDriver string `json:"volume_driver,omitempty"` +} + +// ToClusterCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToClusterCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create requests the creation of a new cluster. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToClusterCreateMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// Delete deletes the specified cluster ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + var result *http.Response + result, r.Err = client.Delete(deleteURL(client, id), nil) + r.Header = result.Header + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToClusterTemplateListQuery() (string, error) +} + +// ListOpts allows the sorting of paginated collections through +// the API. SortKey allows you to sort by a particular cluster templates attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for pagination. +type ListOpts struct { + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToClusterTemplateListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToClusterTemplateListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// cluster-templates. It accepts a ListOptsBuilder, which allows you to sort +// the returned collection for greater efficiency. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToClusterTemplateListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ClusterTemplatePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific cluster-template based on its unique ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + var result *http.Response + result, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + if r.Err == nil { + r.Header = result.Header + } + return +} + +type UpdateOp string + +const ( + AddOp UpdateOp = "add" + RemoveOp UpdateOp = "remove" + ReplaceOp UpdateOp = "replace" +) + +type UpdateOpts struct { + Op UpdateOp `json:"op" required:"true"` + Path string `json:"path" required:"true"` + Value interface{} `json:"value,omitempty"` +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToClusterTemplateUpdateMap() (map[string]interface{}, error) +} + +// ToClusterUpdateMap assembles a request body based on the contents of +// UpdateOpts. +func (opts UpdateOpts) ToClusterTemplateUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + return b, nil +} + +// Update implements cluster updated request. +func Update(client *gophercloud.ServiceClient, id string, opts []UpdateOptsBuilder) (r UpdateResult) { + var o []map[string]interface{} + for _, opt := range opts { + b, err := opt.ToClusterTemplateUpdateMap() + if err != nil { + r.Err = err + return r + } + o = append(o, b) + } + var result *http.Response + result, r.Err = client.Patch(updateURL(client, id), o, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + if r.Err == nil { + r.Header = result.Header + } + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/results.go new file mode 100644 index 00000000..8b416f7e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/results.go @@ -0,0 +1,114 @@ +package clustertemplates + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response of a Create operations. +type CreateResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult is the response of a Get operations. +type GetResult struct { + commonResult +} + +// UpdateResult is the response of a Update operations. +type UpdateResult struct { + commonResult +} + +// Extract is a function that accepts a result and extracts a cluster-template resource. +func (r commonResult) Extract() (*ClusterTemplate, error) { + var s *ClusterTemplate + err := r.ExtractInto(&s) + return s, err +} + +// Represents a template for a Cluster Template +type ClusterTemplate struct { + APIServerPort int `json:"apiserver_port"` + COE string `json:"coe"` + ClusterDistro string `json:"cluster_distro"` + CreatedAt time.Time `json:"created_at"` + DNSNameServer string `json:"dns_nameserver"` + DockerStorageDriver string `json:"docker_storage_driver"` + DockerVolumeSize int `json:"docker_volume_size"` + ExternalNetworkID string `json:"external_network_id"` + FixedNetwork string `json:"fixed_network"` + FixedSubnet string `json:"fixed_subnet"` + FlavorID string `json:"flavor_id"` + FloatingIPEnabled bool `json:"floating_ip_enabled"` + HTTPProxy string `json:"http_proxy"` + HTTPSProxy string `json:"https_proxy"` + ImageID string `json:"image_id"` + InsecureRegistry string `json:"insecure_registry"` + KeyPairID string `json:"keypair_id"` + Labels map[string]string `json:"labels"` + Links []gophercloud.Link `json:"links"` + MasterFlavorID string `json:"master_flavor_id"` + MasterLBEnabled bool `json:"master_lb_enabled"` + Name string `json:"name"` + NetworkDriver string `json:"network_driver"` + NoProxy string `json:"no_proxy"` + ProjectID string `json:"project_id"` + Public bool `json:"public"` + RegistryEnabled bool `json:"registry_enabled"` + ServerType string `json:"server_type"` + TLSDisabled bool `json:"tls_disabled"` + UUID string `json:"uuid"` + UpdatedAt time.Time `json:"updated_at"` + UserID string `json:"user_id"` + VolumeDriver string `json:"volume_driver"` +} + +// ClusterTemplatePage is the page returned by a pager when traversing over a +// collection of cluster-templates. +type ClusterTemplatePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of cluster template has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r ClusterTemplatePage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, nil +} + +// IsEmpty checks whether a ClusterTemplatePage struct is empty. +func (r ClusterTemplatePage) IsEmpty() (bool, error) { + is, err := ExtractClusterTemplates(r) + return len(is) == 0, err +} + +// ExtractClusterTemplates accepts a Page struct, specifically a ClusterTemplatePage struct, +// and extracts the elements into a slice of cluster templates structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractClusterTemplates(r pagination.Page) ([]ClusterTemplate, error) { + var s struct { + ClusterTemplates []ClusterTemplate `json:"clustertemplates"` + } + err := (r.(ClusterTemplatePage)).ExtractInto(&s) + return s.ClusterTemplates, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/urls.go new file mode 100644 index 00000000..f90fb851 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/urls.go @@ -0,0 +1,35 @@ +package clustertemplates + +import ( + "github.com/gophercloud/gophercloud" +) + +var apiName = "clustertemplates" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiName) +} + +func idURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiName, id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/doc.go new file mode 100644 index 00000000..45b9cfb4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/doc.go @@ -0,0 +1,11 @@ +// Package configurations provides information and interaction with the +// configuration API resource in the Rackspace Database service. +// +// A configuration group is a collection of key/value pairs which define how a +// particular database operates. These key/value pairs are specific to each +// datastore type and serve like settings. Some directives are capable of being +// applied dynamically, while other directives require a server restart to take +// effect. The configuration group can be applied to an instance at creation or +// applied to an existing instance to modify the behavior of the running +// datastore on the instance. +package configurations diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/requests.go new file mode 100644 index 00000000..32bfb1dd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/requests.go @@ -0,0 +1,167 @@ +package configurations + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/db/v1/instances" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will list all of the available configurations. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, baseURL(client), func(r pagination.PageResult) pagination.Page { + return ConfigPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder is a top-level interface which renders a JSON map. +type CreateOptsBuilder interface { + ToConfigCreateMap() (map[string]interface{}, error) +} + +// DatastoreOpts is the primary options struct for creating and modifying +// how configuration resources are associated with datastores. +type DatastoreOpts struct { + // The type of datastore. Defaults to "MySQL". + Type string `json:"type,omitempty"` + // The specific version of a datastore. Defaults to "5.6". + Version string `json:"version,omitempty"` +} + +// CreateOpts is the struct responsible for configuring new configurations. +type CreateOpts struct { + // The configuration group name + Name string `json:"name" required:"true"` + // A map of user-defined configuration settings that will define + // how each associated datastore works. Each key/value pair is specific to a + // datastore type. + Values map[string]interface{} `json:"values" required:"true"` + // Associates the configuration group with a particular datastore. + Datastore *DatastoreOpts `json:"datastore,omitempty"` + // A human-readable explanation for the group. + Description string `json:"description,omitempty"` +} + +// ToConfigCreateMap casts a CreateOpts struct into a JSON map. +func (opts CreateOpts) ToConfigCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "configuration") +} + +// Create will create a new configuration group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToConfigCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client), &b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} + +// Get will retrieve the details for a specified configuration group. +func Get(client *gophercloud.ServiceClient, configID string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, configID), &r.Body, nil) + return +} + +// UpdateOptsBuilder is the top-level interface for casting update options into +// JSON maps. +type UpdateOptsBuilder interface { + ToConfigUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the struct responsible for modifying existing configurations. +type UpdateOpts struct { + // The configuration group name + Name string `json:"name,omitempty"` + // A map of user-defined configuration settings that will define + // how each associated datastore works. Each key/value pair is specific to a + // datastore type. + Values map[string]interface{} `json:"values,omitempty"` + // Associates the configuration group with a particular datastore. + Datastore *DatastoreOpts `json:"datastore,omitempty"` + // A human-readable explanation for the group. + Description *string `json:"description,omitempty"` +} + +// ToConfigUpdateMap will cast an UpdateOpts struct into a JSON map. +func (opts UpdateOpts) ToConfigUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "configuration") +} + +// Update will modify an existing configuration group by performing a merge +// between new and existing values. If the key already exists, the new value +// will overwrite. All other keys will remain unaffected. +func Update(client *gophercloud.ServiceClient, configID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToConfigUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(resourceURL(client, configID), &b, nil, nil) + return +} + +// Replace will modify an existing configuration group by overwriting the +// entire parameter group with the new values provided. Any existing keys not +// included in UpdateOptsBuilder will be deleted. +func Replace(client *gophercloud.ServiceClient, configID string, opts UpdateOptsBuilder) (r ReplaceResult) { + b, err := opts.ToConfigUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(resourceURL(client, configID), &b, nil, nil) + return +} + +// Delete will permanently delete a configuration group. Please note that +// config groups cannot be deleted whilst still attached to running instances - +// you must detach and then delete them. +func Delete(client *gophercloud.ServiceClient, configID string) (r DeleteResult) { + _, r.Err = client.Delete(resourceURL(client, configID), nil) + return +} + +// ListInstances will list all the instances associated with a particular +// configuration group. +func ListInstances(client *gophercloud.ServiceClient, configID string) pagination.Pager { + return pagination.NewPager(client, instancesURL(client, configID), func(r pagination.PageResult) pagination.Page { + return instances.InstancePage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListDatastoreParams will list all the available and supported parameters +// that can be used for a particular datastore ID and a particular version. +// For example, if you are wondering how you can configure a MySQL 5.6 instance, +// you can use this operation (you will need to retrieve the MySQL datastore ID +// by using the datastores API). +func ListDatastoreParams(client *gophercloud.ServiceClient, datastoreID, versionID string) pagination.Pager { + return pagination.NewPager(client, listDSParamsURL(client, datastoreID, versionID), func(r pagination.PageResult) pagination.Page { + return ParamPage{pagination.SinglePageBase(r)} + }) +} + +// GetDatastoreParam will retrieve information about a specific configuration +// parameter. For example, you can use this operation to understand more about +// "innodb_file_per_table" configuration param for MySQL datastores. You will +// need the param's ID first, which can be attained by using the ListDatastoreParams +// operation. +func GetDatastoreParam(client *gophercloud.ServiceClient, datastoreID, versionID, paramID string) (r ParamResult) { + _, r.Err = client.Get(getDSParamURL(client, datastoreID, versionID, paramID), &r.Body, nil) + return +} + +// ListGlobalParams is similar to ListDatastoreParams but does not require a +// DatastoreID. +func ListGlobalParams(client *gophercloud.ServiceClient, versionID string) pagination.Pager { + return pagination.NewPager(client, listGlobalParamsURL(client, versionID), func(r pagination.PageResult) pagination.Page { + return ParamPage{pagination.SinglePageBase(r)} + }) +} + +// GetGlobalParam is similar to GetDatastoreParam but does not require a +// DatastoreID. +func GetGlobalParam(client *gophercloud.ServiceClient, versionID, paramID string) (r ParamResult) { + _, r.Err = client.Get(getGlobalParamURL(client, versionID, paramID), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/results.go new file mode 100644 index 00000000..13bcbffe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/results.go @@ -0,0 +1,141 @@ +package configurations + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Config represents a configuration group API resource. +type Config struct { + Created time.Time `json:"-"` + Updated time.Time `json:"-"` + DatastoreName string `json:"datastore_name"` + DatastoreVersionID string `json:"datastore_version_id"` + DatastoreVersionName string `json:"datastore_version_name"` + Description string + ID string + Name string + Values map[string]interface{} +} + +func (r *Config) UnmarshalJSON(b []byte) error { + type tmp Config + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Config(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + + return nil +} + +// ConfigPage contains a page of Config resources in a paginated collection. +type ConfigPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a ConfigPage is empty. +func (r ConfigPage) IsEmpty() (bool, error) { + is, err := ExtractConfigs(r) + return len(is) == 0, err +} + +// ExtractConfigs will retrieve a slice of Config structs from a page. +func ExtractConfigs(r pagination.Page) ([]Config, error) { + var s struct { + Configs []Config `json:"configurations"` + } + err := (r.(ConfigPage)).ExtractInto(&s) + return s.Configs, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will retrieve a Config resource from an operation result. +func (r commonResult) Extract() (*Config, error) { + var s struct { + Config *Config `json:"configuration"` + } + err := r.ExtractInto(&s) + return s.Config, err +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + commonResult +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// ReplaceResult represents the result of a Replace operation. +type ReplaceResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Param represents a configuration parameter API resource. +type Param struct { + Max float64 + Min float64 + Name string + RestartRequired bool `json:"restart_required"` + Type string +} + +// ParamPage contains a page of Param resources in a paginated collection. +type ParamPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a ParamPage is empty. +func (r ParamPage) IsEmpty() (bool, error) { + is, err := ExtractParams(r) + return len(is) == 0, err +} + +// ExtractParams will retrieve a slice of Param structs from a page. +func ExtractParams(r pagination.Page) ([]Param, error) { + var s struct { + Params []Param `json:"configuration-parameters"` + } + err := (r.(ParamPage)).ExtractInto(&s) + return s.Params, err +} + +// ParamResult represents the result of an operation which retrieves details +// about a particular configuration param. +type ParamResult struct { + gophercloud.Result +} + +// Extract will retrieve a param from an operation result. +func (r ParamResult) Extract() (*Param, error) { + var s *Param + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/urls.go new file mode 100644 index 00000000..0a69253a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/urls.go @@ -0,0 +1,31 @@ +package configurations + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("configurations") +} + +func resourceURL(c *gophercloud.ServiceClient, configID string) string { + return c.ServiceURL("configurations", configID) +} + +func instancesURL(c *gophercloud.ServiceClient, configID string) string { + return c.ServiceURL("configurations", configID, "instances") +} + +func listDSParamsURL(c *gophercloud.ServiceClient, datastoreID, versionID string) string { + return c.ServiceURL("datastores", datastoreID, "versions", versionID, "parameters") +} + +func getDSParamURL(c *gophercloud.ServiceClient, datastoreID, versionID, paramID string) string { + return c.ServiceURL("datastores", datastoreID, "versions", versionID, "parameters", paramID) +} + +func listGlobalParamsURL(c *gophercloud.ServiceClient, versionID string) string { + return c.ServiceURL("datastores", "versions", versionID, "parameters") +} + +func getGlobalParamURL(c *gophercloud.ServiceClient, versionID, paramID string) string { + return c.ServiceURL("datastores", "versions", versionID, "parameters", paramID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/doc.go new file mode 100644 index 00000000..15275fe5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/doc.go @@ -0,0 +1,6 @@ +// Package flavors provides information and interaction with the database API +// resource in the OpenStack Database service. +// +// A database, when referred to here, refers to the database engine running on +// an instance. +package databases diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/requests.go new file mode 100644 index 00000000..ef5394f9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/requests.go @@ -0,0 +1,89 @@ +package databases + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder builds create options +type CreateOptsBuilder interface { + ToDBCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the struct responsible for configuring a database; often in +// the context of an instance. +type CreateOpts struct { + // Specifies the name of the database. Valid names can be composed + // of the following characters: letters (either case); numbers; these + // characters '@', '?', '#', ' ' but NEVER beginning a name string; '_' is + // permitted anywhere. Prohibited characters that are forbidden include: + // single quotes, double quotes, back quotes, semicolons, commas, backslashes, + // and forward slashes. + Name string `json:"name" required:"true"` + // Set of symbols and encodings. The default character set is + // "utf8". See http://dev.mysql.com/doc/refman/5.1/en/charset-mysql.html for + // supported character sets. + CharSet string `json:"character_set,omitempty"` + // Set of rules for comparing characters in a character set. The + // default value for collate is "utf8_general_ci". See + // http://dev.mysql.com/doc/refman/5.1/en/charset-mysql.html for supported + // collations. + Collate string `json:"collate,omitempty"` +} + +// ToMap is a helper function to convert individual DB create opt structures +// into sub-maps. +func (opts CreateOpts) ToMap() (map[string]interface{}, error) { + if len(opts.Name) > 64 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "databases.CreateOpts.Name" + err.Value = opts.Name + err.Info = "Must be less than 64 chars long" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "") +} + +// BatchCreateOpts allows for multiple databases to created and modified. +type BatchCreateOpts []CreateOpts + +// ToDBCreateMap renders a JSON map for creating DBs. +func (opts BatchCreateOpts) ToDBCreateMap() (map[string]interface{}, error) { + dbs := make([]map[string]interface{}, len(opts)) + for i, db := range opts { + dbMap, err := db.ToMap() + if err != nil { + return nil, err + } + dbs[i] = dbMap + } + return map[string]interface{}{"databases": dbs}, nil +} + +// Create will create a new database within the specified instance. If the +// specified instance does not exist, a 404 error will be returned. +func Create(client *gophercloud.ServiceClient, instanceID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToDBCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client, instanceID), &b, nil, nil) + return +} + +// List will list all of the databases for a specified instance. Note: this +// operation will only return user-defined databases; it will exclude system +// databases like "mysql", "information_schema", "lost+found" etc. +func List(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + return pagination.NewPager(client, baseURL(client, instanceID), func(r pagination.PageResult) pagination.Page { + return DBPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete will permanently delete the database within a specified instance. +// All contained data inside the database will also be permanently deleted. +func Delete(client *gophercloud.ServiceClient, instanceID, dbName string) (r DeleteResult) { + _, r.Err = client.Delete(dbURL(client, instanceID, dbName), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/results.go new file mode 100644 index 00000000..0479d0e6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/results.go @@ -0,0 +1,63 @@ +package databases + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Database represents a Database API resource. +type Database struct { + // Specifies the name of the MySQL database. + Name string + + // Set of symbols and encodings. The default character set is utf8. + CharSet string + + // Set of rules for comparing characters in a character set. The default + // value for collate is utf8_general_ci. + Collate string +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// DBPage represents a single page of a paginated DB collection. +type DBPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page DBPage) IsEmpty() (bool, error) { + dbs, err := ExtractDBs(page) + return len(dbs) == 0, err +} + +// NextPageURL will retrieve the next page URL. +func (page DBPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"databases_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractDBs will convert a generic pagination struct into a more +// relevant slice of DB structs. +func ExtractDBs(page pagination.Page) ([]Database, error) { + r := page.(DBPage) + var s struct { + Databases []Database `json:"databases"` + } + err := r.ExtractInto(&s) + return s.Databases, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/urls.go new file mode 100644 index 00000000..aba42c9c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/urls.go @@ -0,0 +1,11 @@ +package databases + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("instances", instanceID, "databases") +} + +func dbURL(c *gophercloud.ServiceClient, instanceID, dbName string) string { + return c.ServiceURL("instances", instanceID, "databases", dbName) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/doc.go new file mode 100644 index 00000000..ae14026b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/doc.go @@ -0,0 +1,3 @@ +// Package datastores provides information and interaction with the datastore +// API resource in the Rackspace Database service. +package datastores diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/requests.go new file mode 100644 index 00000000..134e3090 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/requests.go @@ -0,0 +1,33 @@ +package datastores + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will list all available datastore types that instances can use. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, baseURL(client), func(r pagination.PageResult) pagination.Page { + return DatastorePage{pagination.SinglePageBase(r)} + }) +} + +// Get will retrieve the details of a specified datastore type. +func Get(client *gophercloud.ServiceClient, datastoreID string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, datastoreID), &r.Body, nil) + return +} + +// ListVersions will list all of the available versions for a specified +// datastore type. +func ListVersions(client *gophercloud.ServiceClient, datastoreID string) pagination.Pager { + return pagination.NewPager(client, versionsURL(client, datastoreID), func(r pagination.PageResult) pagination.Page { + return VersionPage{pagination.SinglePageBase(r)} + }) +} + +// GetVersion will retrieve the details of a specified datastore version. +func GetVersion(client *gophercloud.ServiceClient, datastoreID, versionID string) (r GetVersionResult) { + _, r.Err = client.Get(versionURL(client, datastoreID, versionID), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/results.go new file mode 100644 index 00000000..a6e27d27 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/results.go @@ -0,0 +1,100 @@ +package datastores + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Version represents a version API resource. Multiple versions belong to a Datastore. +type Version struct { + ID string + Links []gophercloud.Link + Name string +} + +// Datastore represents a Datastore API resource. +type Datastore struct { + DefaultVersion string `json:"default_version"` + ID string + Links []gophercloud.Link + Name string + Versions []Version +} + +// DatastorePartial is a meta structure which is used in various API responses. +// It is a lightweight and truncated version of a full Datastore resource, +// offering details of the Version, Type and VersionID only. +type DatastorePartial struct { + Version string + Type string + VersionID string `json:"version_id"` +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// GetVersionResult represents the result of getting a version. +type GetVersionResult struct { + gophercloud.Result +} + +// DatastorePage represents a page of datastore resources. +type DatastorePage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a Datastore collection is empty. +func (r DatastorePage) IsEmpty() (bool, error) { + is, err := ExtractDatastores(r) + return len(is) == 0, err +} + +// ExtractDatastores retrieves a slice of datastore structs from a paginated +// collection. +func ExtractDatastores(r pagination.Page) ([]Datastore, error) { + var s struct { + Datastores []Datastore `json:"datastores"` + } + err := (r.(DatastorePage)).ExtractInto(&s) + return s.Datastores, err +} + +// Extract retrieves a single Datastore struct from an operation result. +func (r GetResult) Extract() (*Datastore, error) { + var s struct { + Datastore *Datastore `json:"datastore"` + } + err := r.ExtractInto(&s) + return s.Datastore, err +} + +// VersionPage represents a page of version resources. +type VersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a collection of version resources is empty. +func (r VersionPage) IsEmpty() (bool, error) { + is, err := ExtractVersions(r) + return len(is) == 0, err +} + +// ExtractVersions retrieves a slice of versions from a paginated collection. +func ExtractVersions(r pagination.Page) ([]Version, error) { + var s struct { + Versions []Version `json:"versions"` + } + err := (r.(VersionPage)).ExtractInto(&s) + return s.Versions, err +} + +// Extract retrieves a single Version struct from an operation result. +func (r GetVersionResult) Extract() (*Version, error) { + var s struct { + Version *Version `json:"version"` + } + err := r.ExtractInto(&s) + return s.Version, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/urls.go new file mode 100644 index 00000000..06d1b3da --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/urls.go @@ -0,0 +1,19 @@ +package datastores + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("datastores") +} + +func resourceURL(c *gophercloud.ServiceClient, dsID string) string { + return c.ServiceURL("datastores", dsID) +} + +func versionsURL(c *gophercloud.ServiceClient, dsID string) string { + return c.ServiceURL("datastores", dsID, "versions") +} + +func versionURL(c *gophercloud.ServiceClient, dsID, versionID string) string { + return c.ServiceURL("datastores", dsID, "versions", versionID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/doc.go new file mode 100644 index 00000000..dc5c90f9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/doc.go @@ -0,0 +1,7 @@ +// Package instances provides information and interaction with the instance API +// resource in the OpenStack Database service. +// +// A database instance is an isolated database environment with compute and +// storage resources in a single tenant environment on a shared physical host +// machine. +package instances diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/requests.go new file mode 100644 index 00000000..bbd90931 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/requests.go @@ -0,0 +1,219 @@ +package instances + +import ( + "github.com/gophercloud/gophercloud" + db "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/openstack/db/v1/users" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder is the top-level interface for create options. +type CreateOptsBuilder interface { + ToInstanceCreateMap() (map[string]interface{}, error) +} + +// DatastoreOpts represents the configuration for how an instance stores data. +type DatastoreOpts struct { + Version string `json:"version"` + Type string `json:"type"` +} + +// ToMap converts a DatastoreOpts to a map[string]string (for a request body) +func (opts DatastoreOpts) ToMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// NetworkOpts is used within CreateOpts to control a new server's network attachments. +type NetworkOpts struct { + // UUID of a nova-network to attach to the newly provisioned server. + // Required unless Port is provided. + UUID string `json:"net-id,omitempty"` + + // Port of a neutron network to attach to the newly provisioned server. + // Required unless UUID is provided. + Port string `json:"port-id,omitempty"` + + // V4FixedIP [optional] specifies a fixed IPv4 address to be used on this network. + V4FixedIP string `json:"v4-fixed-ip,omitempty"` + + // V6FixedIP [optional] specifies a fixed IPv6 address to be used on this network. + V6FixedIP string `json:"v6-fixed-ip,omitempty"` +} + +// ToMap converts a NetworkOpts to a map[string]string (for a request body) +func (opts NetworkOpts) ToMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// CreateOpts is the struct responsible for configuring a new database instance. +type CreateOpts struct { + // Either the integer UUID (in string form) of the flavor, or its URI + // reference as specified in the response from the List() call. Required. + FlavorRef string + // Specifies the volume size in gigabytes (GB). The value must be between 1 + // and 300. Required. + Size int + // Name of the instance to create. The length of the name is limited to + // 255 characters and any characters are permitted. Optional. + Name string + // A slice of database information options. + Databases db.CreateOptsBuilder + // A slice of user information options. + Users users.CreateOptsBuilder + // Options to configure the type of datastore the instance will use. This is + // optional, and if excluded will default to MySQL. + Datastore *DatastoreOpts + // Networks dictates how this server will be attached to available networks. + Networks []NetworkOpts +} + +// ToInstanceCreateMap will render a JSON map. +func (opts CreateOpts) ToInstanceCreateMap() (map[string]interface{}, error) { + if opts.Size > 300 || opts.Size < 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "instances.CreateOpts.Size" + err.Value = opts.Size + err.Info = "Size (GB) must be between 1-300" + return nil, err + } + + if opts.FlavorRef == "" { + return nil, gophercloud.ErrMissingInput{Argument: "instances.CreateOpts.FlavorRef"} + } + + instance := map[string]interface{}{ + "volume": map[string]int{"size": opts.Size}, + "flavorRef": opts.FlavorRef, + } + + if opts.Name != "" { + instance["name"] = opts.Name + } + if opts.Databases != nil { + dbs, err := opts.Databases.ToDBCreateMap() + if err != nil { + return nil, err + } + instance["databases"] = dbs["databases"] + } + if opts.Users != nil { + users, err := opts.Users.ToUserCreateMap() + if err != nil { + return nil, err + } + instance["users"] = users["users"] + } + if opts.Datastore != nil { + datastore, err := opts.Datastore.ToMap() + if err != nil { + return nil, err + } + instance["datastore"] = datastore + } + + if len(opts.Networks) > 0 { + networks := make([]map[string]interface{}, len(opts.Networks)) + for i, net := range opts.Networks { + var err error + networks[i], err = net.ToMap() + if err != nil { + return nil, err + } + } + instance["nics"] = networks + } + + return map[string]interface{}{"instance": instance}, nil +} + +// Create asynchronously provisions a new database instance. It requires the +// user to specify a flavor and a volume size. The API service then provisions +// the instance with the requested flavor and sets up a volume of the specified +// size, which is the storage for the database instance. +// +// Although this call only allows the creation of 1 instance per request, you +// can create an instance with multiple databases and users. The default +// binding for a MySQL instance is port 3306. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToInstanceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client), &b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} + +// List retrieves the status and information for all database instances. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, baseURL(client), func(r pagination.PageResult) pagination.Page { + return InstancePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves the status and information for a specified database instance. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, id), &r.Body, nil) + return +} + +// Delete permanently destroys the database instance. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(resourceURL(client, id), nil) + return +} + +// EnableRootUser enables the login from any host for the root user and +// provides the user with a generated root password. +func EnableRootUser(client *gophercloud.ServiceClient, id string) (r EnableRootUserResult) { + _, r.Err = client.Post(userRootURL(client, id), nil, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} + +// IsRootEnabled checks an instance to see if root access is enabled. It returns +// True if root user is enabled for the specified database instance or False +// otherwise. +func IsRootEnabled(client *gophercloud.ServiceClient, id string) (r IsRootEnabledResult) { + _, r.Err = client.Get(userRootURL(client, id), &r.Body, nil) + return +} + +// Restart will restart only the MySQL Instance. Restarting MySQL will +// erase any dynamic configuration settings that you have made within MySQL. +// The MySQL service will be unavailable until the instance restarts. +func Restart(client *gophercloud.ServiceClient, id string) (r ActionResult) { + b := map[string]interface{}{"restart": struct{}{}} + _, r.Err = client.Post(actionURL(client, id), &b, nil, nil) + return +} + +// Resize changes the memory size of the instance, assuming a valid +// flavorRef is provided. It will also restart the MySQL service. +func Resize(client *gophercloud.ServiceClient, id, flavorRef string) (r ActionResult) { + b := map[string]interface{}{"resize": map[string]string{"flavorRef": flavorRef}} + _, r.Err = client.Post(actionURL(client, id), &b, nil, nil) + return +} + +// ResizeVolume will resize the attached volume for an instance. It supports +// only increasing the volume size and does not support decreasing the size. +// The volume size is in gigabytes (GB) and must be an integer. +func ResizeVolume(client *gophercloud.ServiceClient, id string, size int) (r ActionResult) { + b := map[string]interface{}{"resize": map[string]interface{}{"volume": map[string]int{"size": size}}} + _, r.Err = client.Post(actionURL(client, id), &b, nil, nil) + return +} + +// AttachConfigurationGroup will attach configuration group to the instance +func AttachConfigurationGroup(client *gophercloud.ServiceClient, instanceID string, configID string) (r ConfigurationResult) { + b := map[string]interface{}{"instance": map[string]interface{}{"configuration": configID}} + _, r.Err = client.Put(resourceURL(client, instanceID), &b, nil, &gophercloud.RequestOpts{OkCodes: []int{202}}) + return +} + +// DetachConfigurationGroup will dettach configuration group from the instance +func DetachConfigurationGroup(client *gophercloud.ServiceClient, instanceID string) (r ConfigurationResult) { + b := map[string]interface{}{"instance": map[string]interface{}{}} + _, r.Err = client.Put(resourceURL(client, instanceID), &b, nil, &gophercloud.RequestOpts{OkCodes: []int{202}}) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/results.go new file mode 100644 index 00000000..e47a740c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/results.go @@ -0,0 +1,219 @@ +package instances + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/db/v1/datastores" + "github.com/gophercloud/gophercloud/openstack/db/v1/users" + "github.com/gophercloud/gophercloud/pagination" +) + +// Volume represents information about an attached volume for a database instance. +type Volume struct { + // The size in GB of the volume + Size int + + Used float64 +} + +// Flavor represents (virtual) hardware configurations for server resources in a region. +type Flavor struct { + // The flavor's unique identifier. + ID string + // Links to access the flavor. + Links []gophercloud.Link +} + +// Fault describes the fault reason in more detail when a database instance has errored +type Fault struct { + // Indicates the time when the fault occured + Created time.Time `json:"-"` + + // A message describing the fault reason + Message string + + // More details about the fault, for example a stack trace. Only filled + // in for admin users. + Details string +} + +func (r *Fault) UnmarshalJSON(b []byte) error { + type tmp Fault + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Fault(s.tmp) + + r.Created = time.Time(s.Created) + + return nil +} + +// Instance represents a remote MySQL instance. +type Instance struct { + // Indicates the datetime that the instance was created + Created time.Time `json:"-"` + + // Indicates the most recent datetime that the instance was updated. + Updated time.Time `json:"-"` + + // Indicates the hardware flavor the instance uses. + Flavor Flavor + + // A DNS-resolvable hostname associated with the database instance (rather + // than an IPv4 address). Since the hostname always resolves to the correct + // IP address of the database instance, this relieves the user from the task + // of maintaining the mapping. Note that although the IP address may likely + // change on resizing, migrating, and so forth, the hostname always resolves + // to the correct database instance. + Hostname string + + // The IP addresses associated with the database instance + // Is empty if the instance has a hostname + IP []string + + // Indicates the unique identifier for the instance resource. + ID string + + // Exposes various links that reference the instance resource. + Links []gophercloud.Link + + // The human-readable name of the instance. + Name string + + // The build status of the instance. + Status string + + // Fault information (only available when the instance has errored) + Fault *Fault + + // Information about the attached volume of the instance. + Volume Volume + + // Indicates how the instance stores data. + Datastore datastores.DatastorePartial +} + +func (r *Instance) UnmarshalJSON(b []byte) error { + type tmp Instance + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Instance(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ConfigurationResult represents the result of a AttachConfigurationGroup/DetachConfigurationGroup operation. +type ConfigurationResult struct { + gophercloud.ErrResult +} + +// Extract will extract an Instance from various result structs. +func (r commonResult) Extract() (*Instance, error) { + var s struct { + Instance *Instance `json:"instance"` + } + err := r.ExtractInto(&s) + return s.Instance, err +} + +// InstancePage represents a single page of a paginated instance collection. +type InstancePage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page InstancePage) IsEmpty() (bool, error) { + instances, err := ExtractInstances(page) + return len(instances) == 0, err +} + +// NextPageURL will retrieve the next page URL. +func (page InstancePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"instances_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractInstances will convert a generic pagination struct into a more +// relevant slice of Instance structs. +func ExtractInstances(r pagination.Page) ([]Instance, error) { + var s struct { + Instances []Instance `json:"instances"` + } + err := (r.(InstancePage)).ExtractInto(&s) + return s.Instances, err +} + +// EnableRootUserResult represents the result of an operation to enable the root user. +type EnableRootUserResult struct { + gophercloud.Result +} + +// Extract will extract root user information from a UserRootResult. +func (r EnableRootUserResult) Extract() (*users.User, error) { + var s struct { + User *users.User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// ActionResult represents the result of action requests, such as: restarting +// an instance service, resizing its memory allocation, and resizing its +// attached volume size. +type ActionResult struct { + gophercloud.ErrResult +} + +// IsRootEnabledResult is the result of a call to IsRootEnabled. To see if +// root is enabled, call the type's Extract method. +type IsRootEnabledResult struct { + gophercloud.Result +} + +// Extract is used to extract the data from a IsRootEnabledResult. +func (r IsRootEnabledResult) Extract() (bool, error) { + return r.Body.(map[string]interface{})["rootEnabled"] == true, r.Err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/urls.go new file mode 100644 index 00000000..76d1ca56 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/urls.go @@ -0,0 +1,19 @@ +package instances + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("instances") +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id) +} + +func userRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id, "root") +} + +func actionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/doc.go new file mode 100644 index 00000000..cf07832f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/doc.go @@ -0,0 +1,3 @@ +// Package users provides information and interaction with the user API +// resource in the OpenStack Database service. +package users diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/requests.go new file mode 100644 index 00000000..d342de34 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/requests.go @@ -0,0 +1,91 @@ +package users + +import ( + "github.com/gophercloud/gophercloud" + db "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder is the top-level interface for creating JSON maps. +type CreateOptsBuilder interface { + ToUserCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the struct responsible for configuring a new user; often in the +// context of an instance. +type CreateOpts struct { + // Specifies a name for the user. Valid names can be composed + // of the following characters: letters (either case); numbers; these + // characters '@', '?', '#', ' ' but NEVER beginning a name string; '_' is + // permitted anywhere. Prohibited characters that are forbidden include: + // single quotes, double quotes, back quotes, semicolons, commas, backslashes, + // and forward slashes. Spaces at the front or end of a user name are also + // not permitted. + Name string `json:"name" required:"true"` + // Specifies a password for the user. + Password string `json:"password" required:"true"` + // An array of databases that this user will connect to. The + // "name" field is the only requirement for each option. + Databases db.BatchCreateOpts `json:"databases,omitempty"` + // Specifies the host from which a user is allowed to connect to + // the database. Possible values are a string containing an IPv4 address or + // "%" to allow connecting from any host. Optional; the default is "%". + Host string `json:"host,omitempty"` +} + +// ToMap is a convenience function for creating sub-maps for individual users. +func (opts CreateOpts) ToMap() (map[string]interface{}, error) { + if opts.Name == "root" { + err := gophercloud.ErrInvalidInput{} + err.Argument = "users.CreateOpts.Name" + err.Value = "root" + err.Info = "root is a reserved user name and cannot be used" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "") +} + +// BatchCreateOpts allows multiple users to be created at once. +type BatchCreateOpts []CreateOpts + +// ToUserCreateMap will generate a JSON map. +func (opts BatchCreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + users := make([]map[string]interface{}, len(opts)) + for i, opt := range opts { + user, err := opt.ToMap() + if err != nil { + return nil, err + } + users[i] = user + } + return map[string]interface{}{"users": users}, nil +} + +// Create asynchronously provisions a new user for the specified database +// instance based on the configuration defined in CreateOpts. If databases are +// assigned for a particular user, the user will be granted all privileges +// for those specified databases. "root" is a reserved name and cannot be used. +func Create(client *gophercloud.ServiceClient, instanceID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToUserCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client, instanceID), &b, nil, nil) + return +} + +// List will list all the users associated with a specified database instance, +// along with their associated databases. This operation will not return any +// system users or administrators for a database. +func List(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + return pagination.NewPager(client, baseURL(client, instanceID), func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete will permanently delete a user from a specified database instance. +func Delete(client *gophercloud.ServiceClient, instanceID, userName string) (r DeleteResult) { + _, r.Err = client.Delete(userURL(client, instanceID, userName), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/results.go new file mode 100644 index 00000000..d12a681b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/results.go @@ -0,0 +1,62 @@ +package users + +import ( + "github.com/gophercloud/gophercloud" + db "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/pagination" +) + +// User represents a database user +type User struct { + // The user name + Name string + + // The user password + Password string + + // The databases associated with this user + Databases []db.Database +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UserPage represents a single page of a paginated user collection. +type UserPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(page) + return len(users) == 0, err +} + +// NextPageURL will retrieve the next page URL. +func (page UserPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"users_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractUsers will convert a generic pagination struct into a more +// relevant slice of User structs. +func ExtractUsers(r pagination.Page) ([]User, error) { + var s struct { + Users []User `json:"users"` + } + err := (r.(UserPage)).ExtractInto(&s) + return s.Users, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/urls.go new file mode 100644 index 00000000..8c36a39b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/urls.go @@ -0,0 +1,11 @@ +package users + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("instances", instanceID, "users") +} + +func userURL(c *gophercloud.ServiceClient, instanceID, userName string) string { + return c.ServiceURL("instances", instanceID, "users", userName) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go new file mode 100644 index 00000000..617fafa6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go @@ -0,0 +1,54 @@ +/* +Package recordsets provides information and interaction with the zone API +resource for the OpenStack DNS service. + +Example to List RecordSets by Zone + + listOpts := recordsets.ListOpts{ + Type: "A", + } + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + + allPages, err := recordsets.ListByZone(dnsClient, zoneID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRRs, err := recordsets.ExtractRecordSets(allPages() + if err != nil { + panic(err) + } + + for _, rr := range allRRs { + fmt.Printf("%+v\n", rr) + } + +Example to Create a RecordSet + + createOpts := recordsets.CreateOpts{ + Name: "example.com.", + Type: "A", + TTL: 3600, + Description: "This is a recordset.", + Records: []string{"10.1.0.2"}, + } + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + + rr, err := recordsets.Create(dnsClient, zoneID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a RecordSet + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + recordsetID := "d96ed01a-b439-4eb8-9b90-7a9f71017f7b" + + err := recordsets.Delete(dnsClient, zoneID, recordsetID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package recordsets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go new file mode 100644 index 00000000..1d9a77bc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go @@ -0,0 +1,173 @@ +package recordsets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToRecordSetListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// https://developer.openstack.org/api-ref/dns/ +type ListOpts struct { + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the recordset at which you want to set a marker. + Marker string `q:"marker"` + + Data string `q:"data"` + Description string `q:"description"` + Name string `q:"name"` + SortDir string `q:"sort_dir"` + SortKey string `q:"sort_key"` + Status string `q:"status"` + TTL int `q:"ttl"` + Type string `q:"type"` + ZoneID string `q:"zone_id"` +} + +// ToRecordSetListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRecordSetListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListByZone implements the recordset list request. +func ListByZone(client *gophercloud.ServiceClient, zoneID string, opts ListOptsBuilder) pagination.Pager { + url := baseURL(client, zoneID) + if opts != nil { + query, err := opts.ToRecordSetListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RecordSetPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get implements the recordset Get request. +func Get(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r GetResult) { + _, r.Err = client.Get(rrsetURL(client, zoneID, rrsetID), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional attributes to the +// Create request. +type CreateOptsBuilder interface { + ToRecordSetCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies the base attributes that may be used to create a +// RecordSet. +type CreateOpts struct { + // Name is the name of the RecordSet. + Name string `json:"name" required:"true"` + + // Description is a description of the RecordSet. + Description string `json:"description,omitempty"` + + // Records are the DNS records of the RecordSet. + Records []string `json:"records,omitempty"` + + // TTL is the time to live of the RecordSet. + TTL int `json:"ttl,omitempty"` + + // Type is the RRTYPE of the RecordSet. + Type string `json:"type,omitempty"` +} + +// ToRecordSetCreateMap formats an CreateOpts structure into a request body. +func (opts CreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + return b, nil +} + +// Create creates a recordset in a given zone. +func Create(client *gophercloud.ServiceClient, zoneID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRecordSetCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToRecordSetUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// RecordSet. +type UpdateOpts struct { + // Description is a description of the RecordSet. + Description *string `json:"description,omitempty"` + + // TTL is the time to live of the RecordSet. + TTL *int `json:"ttl,omitempty"` + + // Records are the DNS records of the RecordSet. + Records []string `json:"records,omitempty"` +} + +// ToRecordSetUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToRecordSetUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + // If opts.TTL was actually set, use 0 as a special value to send "null", + // even though the result from the API is 0. + // + // Otherwise, don't send the TTL field. + if opts.TTL != nil { + ttl := *(opts.TTL) + if ttl > 0 { + b["ttl"] = ttl + } else { + b["ttl"] = nil + } + } + + return b, nil +} + +// Update updates a recordset in a given zone +func Update(client *gophercloud.ServiceClient, zoneID string, rrsetID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRecordSetUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(rrsetURL(client, zoneID, rrsetID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete removes an existing RecordSet. +func Delete(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r DeleteResult) { + _, r.Err = client.Delete(rrsetURL(client, zoneID, rrsetID), &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go new file mode 100644 index 00000000..0fdc1fe5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go @@ -0,0 +1,147 @@ +package recordsets + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a RecordSet. +// An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*RecordSet, error) { + var s *RecordSet + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult is the result of a Create operation. Call its Extract method to +// interpret the result as a RecordSet. +type CreateResult struct { + commonResult +} + +// GetResult is the result of a Get operation. Call its Extract method to +// interpret the result as a RecordSet. +type GetResult struct { + commonResult +} + +// RecordSetPage is a single page of RecordSet results. +type RecordSetPage struct { + pagination.LinkedPageBase +} + +// UpdateResult is result of an Update operation. Call its Extract method to +// interpret the result as a RecordSet. +type UpdateResult struct { + commonResult +} + +// DeleteResult is result of a Delete operation. Call its ExtractErr method to +// determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// IsEmpty returns true if the page contains no results. +func (r RecordSetPage) IsEmpty() (bool, error) { + s, err := ExtractRecordSets(r) + return len(s) == 0, err +} + +// ExtractRecordSets extracts a slice of RecordSets from a List result. +func ExtractRecordSets(r pagination.Page) ([]RecordSet, error) { + var s struct { + RecordSets []RecordSet `json:"recordsets"` + } + err := (r.(RecordSetPage)).ExtractInto(&s) + return s.RecordSets, err +} + +// RecordSet represents a DNS Record Set. +type RecordSet struct { + // ID is the unique ID of the recordset + ID string `json:"id"` + + // ZoneID is the ID of the zone the recordset belongs to. + ZoneID string `json:"zone_id"` + + // ProjectID is the ID of the project that owns the recordset. + ProjectID string `json:"project_id"` + + // Name is the name of the recordset. + Name string `json:"name"` + + // ZoneName is the name of the zone the recordset belongs to. + ZoneName string `json:"zone_name"` + + // Type is the RRTYPE of the recordset. + Type string `json:"type"` + + // Records are the DNS records of the recordset. + Records []string `json:"records"` + + // TTL is the time to live of the recordset. + TTL int `json:"ttl"` + + // Status is the status of the recordset. + Status string `json:"status"` + + // Action is the current action in progress of the recordset. + Action string `json:"action"` + + // Description is the description of the recordset. + Description string `json:"description"` + + // Version is the revision of the recordset. + Version int `json:"version"` + + // CreatedAt is the date when the recordset was created. + CreatedAt time.Time `json:"-"` + + // UpdatedAt is the date when the recordset was updated. + UpdatedAt time.Time `json:"-"` + + // Links includes HTTP references to the itself, + // useful for passing along to other APIs that might want a recordset + // reference. + Links []gophercloud.Link `json:"-"` +} + +func (r *RecordSet) UnmarshalJSON(b []byte) error { + type tmp RecordSet + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + Links map[string]interface{} `json:"links"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = RecordSet(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + if s.Links != nil { + for rel, href := range s.Links { + if v, ok := href.(string); ok { + link := gophercloud.Link{ + Rel: rel, + Href: v, + } + r.Links = append(r.Links, link) + } + } + } + + return err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go new file mode 100644 index 00000000..5ec18d1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go @@ -0,0 +1,11 @@ +package recordsets + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, zoneID string) string { + return c.ServiceURL("zones", zoneID, "recordsets") +} + +func rrsetURL(c *gophercloud.ServiceClient, zoneID string, rrsetID string) string { + return c.ServiceURL("zones", zoneID, "recordsets", rrsetID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go new file mode 100644 index 00000000..7733155b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go @@ -0,0 +1,48 @@ +/* +Package zones provides information and interaction with the zone API +resource for the OpenStack DNS service. + +Example to List Zones + + listOpts := zones.ListOpts{ + Email: "jdoe@example.com", + } + + allPages, err := zones.List(dnsClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allZones, err := zones.ExtractZones(allPages) + if err != nil { + panic(err) + } + + for _, zone := range allZones { + fmt.Printf("%+v\n", zone) + } + +Example to Create a Zone + + createOpts := zones.CreateOpts{ + Name: "example.com.", + Email: "jdoe@example.com", + Type: "PRIMARY", + TTL: 7200, + Description: "This is a zone.", + } + + zone, err := zones.Create(dnsClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Zone + + zoneID := "99d10f68-5623-4491-91a0-6daafa32b60e" + err := zones.Delete(dnsClient, zoneID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package zones diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go new file mode 100644 index 00000000..78b08ae4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go @@ -0,0 +1,174 @@ +package zones + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add parameters to the List request. +type ListOptsBuilder interface { + ToZoneListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// https://developer.openstack.org/api-ref/dns/ +type ListOpts struct { + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the zone at which you want to set a marker. + Marker string `q:"marker"` + + Description string `q:"description"` + Email string `q:"email"` + Name string `q:"name"` + SortDir string `q:"sort_dir"` + SortKey string `q:"sort_key"` + Status string `q:"status"` + TTL int `q:"ttl"` + Type string `q:"type"` +} + +// ToZoneListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToZoneListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List implements a zone List request. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := baseURL(client) + if opts != nil { + query, err := opts.ToZoneListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ZonePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns information about a zone, given its ID. +func Get(client *gophercloud.ServiceClient, zoneID string) (r GetResult) { + _, r.Err = client.Get(zoneURL(client, zoneID), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional attributes to the +// Create request. +type CreateOptsBuilder interface { + ToZoneCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies the attributes used to create a zone. +type CreateOpts struct { + // Attributes are settings that supply hints and filters for the zone. + Attributes map[string]string `json:"attributes,omitempty"` + + // Email contact of the zone. + Email string `json:"email,omitempty"` + + // Description of the zone. + Description string `json:"description,omitempty"` + + // Name of the zone. + Name string `json:"name" required:"true"` + + // Masters specifies zone masters if this is a secondary zone. + Masters []string `json:"masters,omitempty"` + + // TTL is the time to live of the zone. + TTL int `json:"-"` + + // Type specifies if this is a primary or secondary zone. + Type string `json:"type,omitempty"` +} + +// ToZoneCreateMap formats an CreateOpts structure into a request body. +func (opts CreateOpts) ToZoneCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.TTL > 0 { + b["ttl"] = opts.TTL + } + + return b, nil +} + +// Create implements a zone create request. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToZoneCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToZoneUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the attributes to update a zone. +type UpdateOpts struct { + // Email contact of the zone. + Email string `json:"email,omitempty"` + + // TTL is the time to live of the zone. + TTL int `json:"-"` + + // Masters specifies zone masters if this is a secondary zone. + Masters []string `json:"masters,omitempty"` + + // Description of the zone. + Description *string `json:"description,omitempty"` +} + +// ToZoneUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToZoneUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.TTL > 0 { + b["ttl"] = opts.TTL + } + + return b, nil +} + +// Update implements a zone update request. +func Update(client *gophercloud.ServiceClient, zoneID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToZoneUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(zoneURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete implements a zone delete request. +func Delete(client *gophercloud.ServiceClient, zoneID string) (r DeleteResult) { + _, r.Err = client.Delete(zoneURL(client, zoneID), &gophercloud.RequestOpts{ + OkCodes: []int{202}, + JSONResponse: &r.Body, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go new file mode 100644 index 00000000..a36eca7e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go @@ -0,0 +1,166 @@ +package zones + +import ( + "encoding/json" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a Zone. +// An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*Zone, error) { + var s *Zone + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult is the result of a Create request. Call its Extract method +// to interpret the result as a Zone. +type CreateResult struct { + commonResult +} + +// GetResult is the result of a Get request. Call its Extract method +// to interpret the result as a Zone. +type GetResult struct { + commonResult +} + +// UpdateResult is the result of an Update request. Call its Extract method +// to interpret the result as a Zone. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the result of a Delete request. Call its ExtractErr method +// to determine if the request succeeded or failed. +type DeleteResult struct { + commonResult +} + +// ZonePage is a single page of Zone results. +type ZonePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the page contains no results. +func (r ZonePage) IsEmpty() (bool, error) { + s, err := ExtractZones(r) + return len(s) == 0, err +} + +// ExtractZones extracts a slice of Zones from a List result. +func ExtractZones(r pagination.Page) ([]Zone, error) { + var s struct { + Zones []Zone `json:"zones"` + } + err := (r.(ZonePage)).ExtractInto(&s) + return s.Zones, err +} + +// Zone represents a DNS zone. +type Zone struct { + // ID uniquely identifies this zone amongst all other zones, including those + // not accessible to the current tenant. + ID string `json:"id"` + + // PoolID is the ID for the pool hosting this zone. + PoolID string `json:"pool_id"` + + // ProjectID identifies the project/tenant owning this resource. + ProjectID string `json:"project_id"` + + // Name is the DNS Name for the zone. + Name string `json:"name"` + + // Email for the zone. Used in SOA records for the zone. + Email string `json:"email"` + + // Description for this zone. + Description string `json:"description"` + + // TTL is the Time to Live for the zone. + TTL int `json:"ttl"` + + // Serial is the current serial number for the zone. + Serial int `json:"-"` + + // Status is the status of the resource. + Status string `json:"status"` + + // Action is the current action in progress on the resource. + Action string `json:"action"` + + // Version of the resource. + Version int `json:"version"` + + // Attributes for the zone. + Attributes map[string]string `json:"attributes"` + + // Type of zone. Primary is controlled by Designate. + // Secondary zones are slaved from another DNS Server. + // Defaults to Primary. + Type string `json:"type"` + + // Masters is the servers for slave servers to get DNS information from. + Masters []string `json:"masters"` + + // CreatedAt is the date when the zone was created. + CreatedAt time.Time `json:"-"` + + // UpdatedAt is the date when the last change was made to the zone. + UpdatedAt time.Time `json:"-"` + + // TransferredAt is the last time an update was retrieved from the + // master servers. + TransferredAt time.Time `json:"-"` + + // Links includes HTTP references to the itself, useful for passing along + // to other APIs that might want a server reference. + Links map[string]interface{} `json:"links"` +} + +func (r *Zone) UnmarshalJSON(b []byte) error { + type tmp Zone + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + TransferredAt gophercloud.JSONRFC3339MilliNoZ `json:"transferred_at"` + Serial interface{} `json:"serial"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Zone(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + r.TransferredAt = time.Time(s.TransferredAt) + + switch t := s.Serial.(type) { + case float64: + r.Serial = int(t) + case string: + switch t { + case "": + r.Serial = 0 + default: + serial, err := strconv.ParseFloat(t, 64) + if err != nil { + return err + } + r.Serial = int(serial) + } + } + + return err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go new file mode 100644 index 00000000..9bef7058 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go @@ -0,0 +1,11 @@ +package zones + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("zones") +} + +func zoneURL(c *gophercloud.ServiceClient, zoneID string) string { + return c.ServiceURL("zones", zoneID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go new file mode 100644 index 00000000..cedf1f4d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go @@ -0,0 +1,14 @@ +/* +Package openstack contains resources for the individual OpenStack projects +supported in Gophercloud. It also includes functions to authenticate to an +OpenStack cloud and for provisioning various service-level clients. + +Example of Creating a Service Client + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go new file mode 100644 index 00000000..12c8aebc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go @@ -0,0 +1,107 @@ +package openstack + +import ( + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +/* +V2EndpointURL discovers the endpoint URL for a specific service from a +ServiceCatalog acquired during the v2 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. + var endpoints = make([]tokens2.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Region == "" || endpoint.Region == opts.Region { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + err := &ErrMultipleMatchingEndpointsV2{} + err.Endpoints = endpoints + return "", err + } + + // Extract the appropriate URL from the matching Endpoint. + for _, endpoint := range endpoints { + switch opts.Availability { + case gophercloud.AvailabilityPublic: + return gophercloud.NormalizeURL(endpoint.PublicURL), nil + case gophercloud.AvailabilityInternal: + return gophercloud.NormalizeURL(endpoint.InternalURL), nil + case gophercloud.AvailabilityAdmin: + return gophercloud.NormalizeURL(endpoint.AdminURL), nil + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} + +/* +V3EndpointURL discovers the endpoint URL for a specific service from a Catalog +acquired during the v3 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + var endpoints = make([]tokens3.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && + (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints} + } + + // Extract the URL from the matching Endpoint. + for _, endpoint := range endpoints { + return gophercloud.NormalizeURL(endpoint.URL), nil + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go new file mode 100644 index 00000000..df410b1c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go @@ -0,0 +1,71 @@ +package openstack + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +// ErrEndpointNotFound is the error when no suitable endpoint can be found +// in the user's catalog +type ErrEndpointNotFound struct{ gophercloud.BaseError } + +func (e ErrEndpointNotFound) Error() string { + return "No suitable endpoint could be found in the service catalog." +} + +// ErrInvalidAvailabilityProvided is the error when an invalid endpoint +// availability is provided +type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } + +func (e ErrInvalidAvailabilityProvided) Error() string { + return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) +} + +// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint +// for the given options is found in the v2 catalog +type ErrMultipleMatchingEndpointsV2 struct { + gophercloud.BaseError + Endpoints []tokens2.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV2) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint +// for the given options is found in the v3 catalog +type ErrMultipleMatchingEndpointsV3 struct { + gophercloud.BaseError + Endpoints []tokens3.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV3) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not +// found +type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoAuthURL) Error() string { + return "Environment variable OS_AUTH_URL needs to be set." +} + +// ErrNoUsername is the error when the OS_USERNAME environment variable is not +// found +type ErrNoUsername struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoUsername) Error() string { + return "Environment variable OS_USERNAME needs to be set." +} + +// ErrNoPassword is the error when the OS_PASSWORD environment variable is not +// found +type ErrNoPassword struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoPassword) Error() string { + return "Environment variable OS_PASSWORD needs to be set." +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go new file mode 100644 index 00000000..45623369 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go @@ -0,0 +1,65 @@ +/* +Package tenants provides information and interaction with the +tenants API resource for the OpenStack Identity service. + +See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants +for more information. + +Example to List Tenants + + listOpts := tenants.ListOpts{ + Limit: 2, + } + + allPages, err := tenants.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allTenants, err := tenants.ExtractTenants(allPages) + if err != nil { + panic(err) + } + + for _, tenant := range allTenants { + fmt.Printf("%+v\n", tenant) + } + +Example to Create a Tenant + + createOpts := tenants.CreateOpts{ + Name: "tenant_name", + Description: "this is a tenant", + Enabled: gophercloud.Enabled, + } + + tenant, err := tenants.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + updateOpts := tenants.UpdateOpts{ + Description: "this is a new description", + Enabled: gophercloud.Disabled, + } + + tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + err := tenants.Delete(identitYClient, tenantID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go new file mode 100644 index 00000000..f21a58f1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go @@ -0,0 +1,116 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts filters the Tenants that are returned by the List call. +type ListOpts struct { + // Marker is the ID of the last Tenant on the previous page. + Marker string `q:"marker"` + + // Limit specifies the page size. + Limit int `q:"limit"` +} + +// List enumerates the Tenants to which the current token has access. +func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { + url := listURL(client) + if opts != nil { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + url += q.String() + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return TenantPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOpts represents the options needed when creating new tenant. +type CreateOpts struct { + // Name is the name of the tenant. + Name string `json:"name" required:"true"` + + // Description is the description of the tenant. + Description string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// CreateOptsBuilder enables extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToTenantCreateMap() (map[string]interface{}, error) +} + +// ToTenantCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Create is the operation responsible for creating new tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToTenantCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get requests details on a single tenant by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToTenantUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// tenant. +type UpdateOpts struct { + // Name is the name of the tenant. + Name string `json:"name,omitempty"` + + // Description is the description of the tenant. + Description *string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// ToTenantUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Update is the operation responsible for updating exist tenants by their TenantID. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToTenantUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete is the operation responsible for permanently deleting a tenant. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go new file mode 100644 index 00000000..bb6c2c6b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go @@ -0,0 +1,91 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Tenant is a grouping of users in the identity service. +type Tenant struct { + // ID is a unique identifier for this tenant. + ID string `json:"id"` + + // Name is a friendlier user-facing name for this tenant. + Name string `json:"name"` + + // Description is a human-readable explanation of this Tenant's purpose. + Description string `json:"description"` + + // Enabled indicates whether or not a tenant is active. + Enabled bool `json:"enabled"` +} + +// TenantPage is a single page of Tenant results. +type TenantPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (r TenantPage) IsEmpty() (bool, error) { + tenants, err := ExtractTenants(r) + return len(tenants) == 0, err +} + +// NextPageURL extracts the "next" link from the tenants_links section of the result. +func (r TenantPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"tenants_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractTenants returns a slice of Tenants contained in a single page of +// results. +func ExtractTenants(r pagination.Page) ([]Tenant, error) { + var s struct { + Tenants []Tenant `json:"tenants"` + } + err := (r.(TenantPage)).ExtractInto(&s) + return s.Tenants, err +} + +type tenantResult struct { + gophercloud.Result +} + +// Extract interprets any tenantResults as a Tenant. +func (r tenantResult) Extract() (*Tenant, error) { + var s struct { + Tenant *Tenant `json:"tenant"` + } + err := r.ExtractInto(&s) + return s.Tenant, err +} + +// GetResult is the response from a Get request. Call its Extract method to +// interpret it as a Tenant. +type GetResult struct { + tenantResult +} + +// CreateResult is the response from a Create request. Call its Extract method +// to interpret it as a Tenant. +type CreateResult struct { + tenantResult +} + +// DeleteResult is the response from a Get request. Call its ExtractErr method +// to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the response from a Update request. Call its Extract method +// to interpret it as a Tenant. +type UpdateResult struct { + tenantResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go new file mode 100644 index 00000000..0f026690 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go @@ -0,0 +1,23 @@ +package tenants + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func getURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func updateURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go new file mode 100644 index 00000000..5375eea8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go @@ -0,0 +1,46 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 + +Example to Create an Unscoped Token from a Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "pass" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant ID and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantID: "fc394f2ab2df4114bde39905f800dc57" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant Name and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantName: "tenantname" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go new file mode 100644 index 00000000..ab32368c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go @@ -0,0 +1,103 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// PasswordCredentialsV2 represents the required options to authenticate +// with a username and password. +type PasswordCredentialsV2 struct { + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` +} + +// TokenCredentialsV2 represents the required options to authenticate +// with a token. +type TokenCredentialsV2 struct { + ID string `json:"id,omitempty" required:"true"` +} + +// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the +// AuthOptionsBuilder interface. +type AuthOptionsV2 struct { + PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // TokenCredentials allows users to authenticate (possibly as another user) + // with an authentication token ID. + TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` +} + +// AuthOptionsBuilder allows extensions to add additional parameters to the +// token create request. +type AuthOptionsBuilder interface { + // ToTokenCreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV2CreateMap() (map[string]interface{}, error) +} + +// AuthOptions are the valid options for Openstack Identity v2 authentication. +// For field descriptions, see gophercloud.AuthOptions. +type AuthOptions struct { + IdentityEndpoint string `json:"-"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + AllowReauth bool `json:"-"` + TokenID string +} + +// ToTokenV2CreateMap builds a token request body from the given AuthOptions. +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + v2Opts := AuthOptionsV2{ + TenantID: opts.TenantID, + TenantName: opts.TenantName, + } + + if opts.Password != "" { + v2Opts.PasswordCredentials = &PasswordCredentialsV2{ + Username: opts.Username, + Password: opts.Password, + } + } else { + v2Opts.TokenCredentials = &TokenCredentialsV2{ + ID: opts.TokenID, + } + } + + b, err := gophercloud.BuildRequestBody(v2Opts, "auth") + if err != nil { + return nil, err + } + return b, nil +} + +// Create authenticates to the identity service and attempts to acquire a Token. +// Generally, rather than interact with this call directly, end users should +// call openstack.AuthenticatedClient(), which abstracts all of the gory details +// about navigating service catalogs and such. +func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { + b, err := auth.ToTokenV2CreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + return +} + +// Get validates and retrieves information for user's token. +func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { + _, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go new file mode 100644 index 00000000..ee5da37f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go @@ -0,0 +1,174 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" +) + +// Token provides only the most basic information related to an authentication +// token. +type Token struct { + // ID provides the primary means of identifying a user to the OpenStack API. + // OpenStack defines this field as an opaque value, so do not depend on its + // content. It is safe, however, to compare for equality. + ID string + + // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the + // authentication token becomes invalid. After this point in time, future + // API requests made using this authentication token will respond with + // errors. Either the caller will need to reauthenticate manually, or more + // preferably, the caller should exploit automatic re-authentication. + // See the AuthOptions structure for more details. + ExpiresAt time.Time + + // Tenant provides information about the tenant to which this token grants + // access. + Tenant tenants.Tenant +} + +// Role is a role for a user. +type Role struct { + Name string `json:"name"` +} + +// User is an OpenStack user. +type User struct { + ID string `json:"id"` + Name string `json:"name"` + UserName string `json:"username"` + Roles []Role `json:"roles"` +} + +// Endpoint represents a single API endpoint offered by a service. +// It provides the public and internal URLs, if supported, along with a region +// specifier, again if provided. +// +// The significance of the Region field will depend upon your provider. +// +// In addition, the interface offered by the service will have version +// information associated with it through the VersionId, VersionInfo, and +// VersionList fields, if provided or supported. +// +// In all cases, fields which aren't supported by the provider and service +// combined will assume a zero-value (""). +type Endpoint struct { + TenantID string `json:"tenantId"` + PublicURL string `json:"publicURL"` + InternalURL string `json:"internalURL"` + AdminURL string `json:"adminURL"` + Region string `json:"region"` + VersionID string `json:"versionId"` + VersionInfo string `json:"versionInfo"` + VersionList string `json:"versionList"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V2 service +// catalog listing. +// +// Each class of service, such as cloud DNS or block storage services, will have +// a single CatalogEntry representing it. +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may assign + // their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry +} + +// CreateResult is the response from a Create request. Use ExtractToken() to +// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a +// service catalog. +type CreateResult struct { + gophercloud.Result +} + +// GetResult is the deferred response from a Get call, which is the same with a +// Created token. Use ExtractUser() to interpret it as a User. +type GetResult struct { + CreateResult +} + +// ExtractToken returns the just-created Token from a CreateResult. +func (r CreateResult) ExtractToken() (*Token, error) { + var s struct { + Access struct { + Token struct { + Expires string `json:"expires"` + ID string `json:"id"` + Tenant tenants.Tenant `json:"tenant"` + } `json:"token"` + } `json:"access"` + } + + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) + if err != nil { + return nil, err + } + + return &Token{ + ID: s.Access.Token.ID, + ExpiresAt: expiresTs, + Tenant: s.Access.Token.Tenant, + }, nil +} + +// ExtractTokenID implements the gophercloud.AuthResult interface. The returned +// string is the same as the ID field of the Token struct returned from +// ExtractToken(). +func (r CreateResult) ExtractTokenID() (string, error) { + var s struct { + Access struct { + Token struct { + ID string `json:"id"` + } `json:"token"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return s.Access.Token.ID, err +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s struct { + Access struct { + Entries []CatalogEntry `json:"serviceCatalog"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &ServiceCatalog{Entries: s.Access.Entries}, err +} + +// ExtractUser returns the User from a GetResult. +func (r GetResult) ExtractUser() (*User, error) { + var s struct { + Access struct { + User User `json:"user"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &s.Access.User, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go new file mode 100644 index 00000000..ee0a28f2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go @@ -0,0 +1,13 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// CreateURL generates the URL used to create new Tokens. +func CreateURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tokens") +} + +// GetURL generates the URL used to Validate Tokens. +func GetURL(client *gophercloud.ServiceClient, token string) string { + return client.ServiceURL("tokens", token) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/requests.go new file mode 100644 index 00000000..61111d2e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/requests.go @@ -0,0 +1,95 @@ +package applicationcredentials + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToApplicationCredentialListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // Name filters the response by an application credential name + Name string `q:"name"` +} + +// ToApplicationCredentialListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToApplicationCredentialListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates the ApplicationCredentials to which the current token has access. +func List(client *gophercloud.ServiceClient, userID string, opts ListOptsBuilder) pagination.Pager { + url := listURL(client, userID) + if opts != nil { + query, err := opts.ToApplicationCredentialListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ApplicationCredentialPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single user, by ID. +func Get(client *gophercloud.ServiceClient, userID string, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, userID, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToApplicationCredentialCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create an application credential. +type CreateOpts struct { + // The name of the application credential. + Name string `json:"name,omitempty" required:"true"` + // A description of the application credential’s purpose. + Description string `json:"description,omitempty"` + // A flag indicating whether the application credential may be used for creation or destruction of other application credentials or trusts. + // Defaults to false + Unrestricted bool `json:"unrestricted"` + // The secret for the application credential, either generated by the server or provided by the user. + // This is only ever shown once in the response to a create request. It is not stored nor ever shown again. + // If the secret is lost, a new application credential must be created. + Secret string `json:"secret,omitempty"` + // A list of one or more roles that this application credential has associated with its project. + // A token using this application credential will have these same roles. + Roles []Role `json:"roles,omitempty"` + // The expiration time of the application credential, if one was specified. + ExpiresAt string `json:"expires_at,omitempty"` +} + +// ToApplicationCredentialCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToApplicationCredentialCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "application_credential") +} + +// Create creates a new ApplicationCredential. +func Create(client *gophercloud.ServiceClient, userID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToApplicationCredentialCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client, userID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// Delete deletes an application credential. +func Delete(client *gophercloud.ServiceClient, userID string, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, userID, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/results.go new file mode 100644 index 00000000..f6879976 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/results.go @@ -0,0 +1,127 @@ +package applicationcredentials + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type Role struct { + // DomainID is the domain ID the role belongs to. + DomainID string `json:"domain_id,omitempty"` + // ID is the unique ID of the role. + ID string `json:"id,omitempty"` + // Name is the role name + Name string `json:"name,omitempty"` +} + +// ApplicationCredential represents the application credential object +type ApplicationCredential struct { + // The ID of the application credential. + ID string `json:"id"` + // The name of the application credential. + Name string `json:"name"` + // A description of the application credential’s purpose. + Description string `json:"description"` + // A flag indicating whether the application credential may be used for creation or destruction of other application credentials or trusts. + // Defaults to false + Unrestricted bool `json:"unrestricted"` + // The secret for the application credential, either generated by the server or provided by the user. + // This is only ever shown once in the response to a create request. It is not stored nor ever shown again. + // If the secret is lost, a new application credential must be created. + Secret string `json:"secret"` + // The ID of the project the application credential was created for and that authentication requests using this application credential will be scoped to. + ProjectID string `json:"project_id"` + // A list of one or more roles that this application credential has associated with its project. + // A token using this application credential will have these same roles. + Roles []Role `json:"roles"` + // The expiration time of the application credential, if one was specified. + ExpiresAt time.Time `json:"-"` + // Links contains referencing links to the application credential. + Links map[string]interface{} `json:"links"` +} + +func (r *ApplicationCredential) UnmarshalJSON(b []byte) error { + type tmp ApplicationCredential + var s struct { + tmp + ExpiresAt gophercloud.JSONRFC3339MilliNoZ `json:"expires_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ApplicationCredential(s.tmp) + + r.ExpiresAt = time.Time(s.ExpiresAt) + + return nil +} + +type applicationCredentialResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as an ApplicationCredential. +type GetResult struct { + applicationCredentialResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as an ApplicationCredential. +type CreateResult struct { + applicationCredentialResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// an ApplicationCredentialPage is a single page of an ApplicationCredential results. +type ApplicationCredentialPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a an ApplicationCredentialPage contains any results. +func (r ApplicationCredentialPage) IsEmpty() (bool, error) { + applicationCredentials, err := ExtractApplicationCredentials(r) + return len(applicationCredentials) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ApplicationCredentialPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// Extractan ApplicationCredentials returns a slice of ApplicationCredentials contained in a single page of results. +func ExtractApplicationCredentials(r pagination.Page) ([]ApplicationCredential, error) { + var s struct { + ApplicationCredentials []ApplicationCredential `json:"application_credentials"` + } + err := (r.(ApplicationCredentialPage)).ExtractInto(&s) + return s.ApplicationCredentials, err +} + +// Extract interprets any application_credential results as an ApplicationCredential. +func (r applicationCredentialResult) Extract() (*ApplicationCredential, error) { + var s struct { + ApplicationCredential *ApplicationCredential `json:"application_credential"` + } + err := r.ExtractInto(&s) + return s.ApplicationCredential, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/urls.go new file mode 100644 index 00000000..14762069 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/applicationcredentials/urls.go @@ -0,0 +1,19 @@ +package applicationcredentials + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "application_credentials") +} + +func getURL(client *gophercloud.ServiceClient, userID string, id string) string { + return client.ServiceURL("users", userID, "application_credentials", id) +} + +func createURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "application_credentials") +} + +func deleteURL(client *gophercloud.ServiceClient, userID string, id string) string { + return client.ServiceURL("users", userID, "application_credentials", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/doc.go new file mode 100644 index 00000000..5822017c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/doc.go @@ -0,0 +1,69 @@ +/* +Package endpoints provides information and interaction with the service +endpoints API resource in the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v3.html#endpoints-v3 + +Example to List Endpoints + + serviceID := "e629d6e599d9489fb3ae5d9cc12eaea3" + + listOpts := endpoints.ListOpts{ + ServiceID: serviceID, + } + + allPages, err := endpoints.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allEndpoints, err := endpoints.ExtractEndpoints(allPages) + if err != nil { + panic(err) + } + + for _, endpoint := range allEndpoints { + fmt.Printf("%+v\n", endpoint) + } + +Example to Create an Endpoint + + serviceID := "e629d6e599d9489fb3ae5d9cc12eaea3" + + createOpts := endpoints.CreateOpts{ + Availability: gophercloud.AvailabilityPublic, + Name: "neutron", + Region: "RegionOne", + URL: "https://localhost:9696", + ServiceID: serviceID, + } + + endpoint, err := endpoints.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + + +Example to Update an Endpoint + + endpointID := "ad59deeec5154d1fa0dcff518596f499" + + updateOpts := endpoints.UpdateOpts{ + Region: "RegionTwo", + } + + endpoint, err := endpoints.Update(identityClient, endpointID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Endpoint + + endpointID := "ad59deeec5154d1fa0dcff518596f499" + err := endpoints.Delete(identityClient, endpointID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package endpoints diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/requests.go new file mode 100644 index 00000000..0764a118 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/requests.go @@ -0,0 +1,136 @@ +package endpoints + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type CreateOptsBuilder interface { + ToEndpointCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains the subset of Endpoint attributes that should be used +// to create an Endpoint. +type CreateOpts struct { + // Availability is the interface type of the Endpoint (admin, internal, + // or public), referenced by the gophercloud.Availability type. + Availability gophercloud.Availability `json:"interface" required:"true"` + + // Name is the name of the Endpoint. + Name string `json:"name" required:"true"` + + // Region is the region the Endpoint is located in. + // This field can be omitted or left as a blank string. + Region string `json:"region,omitempty"` + + // URL is the url of the Endpoint. + URL string `json:"url" required:"true"` + + // ServiceID is the ID of the service the Endpoint refers to. + ServiceID string `json:"service_id" required:"true"` +} + +// ToEndpointCreateMap builds a request body from the Endpoint Create options. +func (opts CreateOpts) ToEndpointCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "endpoint") +} + +// Create inserts a new Endpoint into the service catalog. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToEndpointCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(listURL(client), &b, &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add parameters to the List request. +type ListOptsBuilder interface { + ToEndpointListParams() (string, error) +} + +// ListOpts allows finer control over the endpoints returned by a List call. +// All fields are optional. +type ListOpts struct { + // Availability is the interface type of the Endpoint (admin, internal, + // or public), referenced by the gophercloud.Availability type. + Availability gophercloud.Availability `q:"interface"` + + // ServiceID is the ID of the service the Endpoint refers to. + ServiceID string `q:"service_id"` + + // RegionID is the ID of the region the Endpoint refers to. + RegionID int `q:"region_id"` +} + +// ToEndpointListParams builds a list request from the List options. +func (opts ListOpts) ToEndpointListParams() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates endpoints in a paginated collection, optionally filtered +// by ListOpts criteria. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + u := listURL(client) + if opts != nil { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + u += q.String() + } + return pagination.NewPager(client, u, func(r pagination.PageResult) pagination.Page { + return EndpointPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add parameters to the Update request. +type UpdateOptsBuilder interface { + ToEndpointUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the subset of Endpoint attributes that should be used to +// update an Endpoint. +type UpdateOpts struct { + // Availability is the interface type of the Endpoint (admin, internal, + // or public), referenced by the gophercloud.Availability type. + Availability gophercloud.Availability `json:"interface,omitempty"` + + // Name is the name of the Endpoint. + Name string `json:"name,omitempty"` + + // Region is the region the Endpoint is located in. + // This field can be omitted or left as a blank string. + Region string `json:"region,omitempty"` + + // URL is the url of the Endpoint. + URL string `json:"url,omitempty"` + + // ServiceID is the ID of the service the Endpoint refers to. + ServiceID string `json:"service_id,omitempty"` +} + +// ToEndpointUpdateMap builds an update request body from the Update options. +func (opts UpdateOpts) ToEndpointUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "endpoint") +} + +// Update changes an existing endpoint with new data. +func Update(client *gophercloud.ServiceClient, endpointID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToEndpointUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(endpointURL(client, endpointID), &b, &r.Body, nil) + return +} + +// Delete removes an endpoint from the service catalog. +func Delete(client *gophercloud.ServiceClient, endpointID string) (r DeleteResult) { + _, r.Err = client.Delete(endpointURL(client, endpointID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/results.go new file mode 100644 index 00000000..6e54f6b4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/results.go @@ -0,0 +1,80 @@ +package endpoints + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a concrete +// Endpoint. An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*Endpoint, error) { + var s struct { + Endpoint *Endpoint `json:"endpoint"` + } + err := r.ExtractInto(&s) + return s.Endpoint, err +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as an Endpoint. +type CreateResult struct { + commonResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as an Endpoint. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Endpoint describes the entry point for another service's API. +type Endpoint struct { + // ID is the unique ID of the endpoint. + ID string `json:"id"` + + // Availability is the interface type of the Endpoint (admin, internal, + // or public), referenced by the gophercloud.Availability type. + Availability gophercloud.Availability `json:"interface"` + + // Name is the name of the Endpoint. + Name string `json:"name"` + + // Region is the region the Endpoint is located in. + Region string `json:"region"` + + // ServiceID is the ID of the service the Endpoint refers to. + ServiceID string `json:"service_id"` + + // URL is the url of the Endpoint. + URL string `json:"url"` +} + +// EndpointPage is a single page of Endpoint results. +type EndpointPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if no Endpoints were returned. +func (r EndpointPage) IsEmpty() (bool, error) { + es, err := ExtractEndpoints(r) + return len(es) == 0, err +} + +// ExtractEndpoints extracts an Endpoint slice from a Page. +func ExtractEndpoints(r pagination.Page) ([]Endpoint, error) { + var s struct { + Endpoints []Endpoint `json:"endpoints"` + } + err := (r.(EndpointPage)).ExtractInto(&s) + return s.Endpoints, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/urls.go new file mode 100644 index 00000000..80cf57eb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/urls.go @@ -0,0 +1,11 @@ +package endpoints + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("endpoints") +} + +func endpointURL(client *gophercloud.ServiceClient, endpointID string) string { + return client.ServiceURL("endpoints", endpointID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go new file mode 100644 index 00000000..696e2a5d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go @@ -0,0 +1,60 @@ +/* +Package groups manages and retrieves Groups in the OpenStack Identity Service. + +Example to List Groups + + listOpts := groups.ListOpts{ + DomainID: "default", + } + + allPages, err := groups.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to Create a Group + + createOpts := groups.CreateOpts{ + Name: "groupname", + DomainID: "default", + Extra: map[string]interface{}{ + "email": "groupname@example.com", + } + } + + group, err := groups.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Group + + groupID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := groups.UpdateOpts{ + Description: "Updated Description for group", + } + + group, err := groups.Update(identityClient, groupID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Group + + groupID := "0fe36e73809d46aeae6705c39077b1b3" + err := groups.Delete(identityClient, groupID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package groups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/errors.go new file mode 100644 index 00000000..98e6fe4b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/errors.go @@ -0,0 +1,17 @@ +package groups + +import "fmt" + +// InvalidListFilter is returned by the ToUserListQuery method when validation of +// a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of NAME__COMPARATOR", + e.FilterName, + ) + return s +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go new file mode 100644 index 00000000..032b5448 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go @@ -0,0 +1,180 @@ +package groups + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToGroupListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Name filters the response by group name. + Name string `q:"name"` + + // Filters filters the response by custom filters such as + // 'name__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToGroupListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToGroupListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the Groups to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToGroupListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return GroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single group, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a group. +type CreateOpts struct { + // Name is the name of the new group. + Name string `json:"name" required:"true"` + + // Description is a description of the group. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the group belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the group. + Extra map[string]interface{} `json:"-"` +} + +// ToGroupCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToGroupCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "group") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["group"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new Group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToGroupUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a group. +type UpdateOpts struct { + // Name is the name of the new group. + Name string `json:"name,omitempty"` + + // Description is a description of the group. + Description *string `json:"description,omitempty"` + + // DomainID is the ID of the domain the group belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the group. + Extra map[string]interface{} `json:"-"` +} + +// ToGroupUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToGroupUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "group") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["group"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing Group. +func Update(client *gophercloud.ServiceClient, groupID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToGroupUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, groupID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes a group. +func Delete(client *gophercloud.ServiceClient, groupID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, groupID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go new file mode 100644 index 00000000..ba7d018d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go @@ -0,0 +1,132 @@ +package groups + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Group helps manage related users. +type Group struct { + // Description describes the group purpose. + Description string `json:"description"` + + // DomainID is the domain ID the group belongs to. + DomainID string `json:"domain_id"` + + // ID is the unique ID of the group. + ID string `json:"id"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` + + // Links contains referencing links to the group. + Links map[string]interface{} `json:"links"` + + // Name is the name of the group. + Name string `json:"name"` +} + +func (r *Group) UnmarshalJSON(b []byte) error { + type tmp Group + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Group(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Group{}, resultMap) + } + } + + return err +} + +type groupResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Group. +type GetResult struct { + groupResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Group. +type CreateResult struct { + groupResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Group. +type UpdateResult struct { + groupResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GroupPage is a single page of Group results. +type GroupPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Groups contains any results. +func (r GroupPage) IsEmpty() (bool, error) { + groups, err := ExtractGroups(r) + return len(groups) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r GroupPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractGroups returns a slice of Groups contained in a single page of results. +func ExtractGroups(r pagination.Page) ([]Group, error) { + var s struct { + Groups []Group `json:"groups"` + } + err := (r.(GroupPage)).ExtractInto(&s) + return s.Groups, err +} + +// Extract interprets any group results as a Group. +func (r groupResult) Extract() (*Group, error) { + var s struct { + Group *Group `json:"group"` + } + err := r.ExtractInto(&s) + return s.Group, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go new file mode 100644 index 00000000..e7d1e53b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go @@ -0,0 +1,23 @@ +package groups + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("groups") +} + +func getURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("groups") +} + +func updateURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} + +func deleteURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go new file mode 100644 index 00000000..4f5b45ab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go @@ -0,0 +1,58 @@ +/* +Package projects manages and retrieves Projects in the OpenStack Identity +Service. + +Example to List Projects + + listOpts := projects.ListOpts{ + Enabled: gophercloud.Enabled, + } + + allPages, err := projects.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allProjects, err := projects.ExtractProjects(allPages) + if err != nil { + panic(err) + } + + for _, project := range allProjects { + fmt.Printf("%+v\n", project) + } + +Example to Create a Project + + createOpts := projects.CreateOpts{ + Name: "project_name", + Description: "Project Description" + } + + project, err := projects.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Project + + projectID := "966b3c7d36a24facaf20b7e458bf2192" + + updateOpts := projects.UpdateOpts{ + Enabled: gophercloud.Disabled, + } + + project, err := projects.Update(identityClient, projectID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Project + + projectID := "966b3c7d36a24facaf20b7e458bf2192" + err := projects.Delete(identityClient, projectID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package projects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/errors.go new file mode 100644 index 00000000..7be97d85 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/errors.go @@ -0,0 +1,17 @@ +package projects + +import "fmt" + +// InvalidListFilter is returned by the ToUserListQuery method when validation of +// a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of NAME__COMPARATOR", + e.FilterName, + ) + return s +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go new file mode 100644 index 00000000..0e461695 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go @@ -0,0 +1,174 @@ +package projects + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToProjectListQuery() (string, error) +} + +// ListOpts enables filtering of a list request. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Enabled filters the response by enabled projects. + Enabled *bool `q:"enabled"` + + // IsDomain filters the response by projects that are domains. + // Setting this to true is effectively listing domains. + IsDomain *bool `q:"is_domain"` + + // Name filters the response by project name. + Name string `q:"name"` + + // ParentID filters the response by projects of a given parent project. + ParentID string `q:"parent_id"` + + // Filters filters the response by custom filters such as + // 'name__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToProjectListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToProjectListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the Projects to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToProjectListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ProjectPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single project, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToProjectCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents parameters used to create a project. +type CreateOpts struct { + // DomainID is the ID this project will belong under. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the project status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // IsDomain indicates if this project is a domain. + IsDomain *bool `json:"is_domain,omitempty"` + + // Name is the name of the project. + Name string `json:"name" required:"true"` + + // ParentID specifies the parent project of this new project. + ParentID string `json:"parent_id,omitempty"` + + // Description is the description of the project. + Description string `json:"description,omitempty"` +} + +// ToProjectCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToProjectCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "project") +} + +// Create creates a new Project. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToProjectCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, nil) + return +} + +// Delete deletes a project. +func Delete(client *gophercloud.ServiceClient, projectID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, projectID), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToProjectUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents parameters to update a project. +type UpdateOpts struct { + // DomainID is the ID this project will belong under. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the project status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // IsDomain indicates if this project is a domain. + IsDomain *bool `json:"is_domain,omitempty"` + + // Name is the name of the project. + Name string `json:"name,omitempty"` + + // ParentID specifies the parent project of this new project. + ParentID string `json:"parent_id,omitempty"` + + // Description is the description of the project. + Description *string `json:"description,omitempty"` +} + +// ToUpdateCreateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToProjectUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "project") +} + +// Update modifies the attributes of a project. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToProjectUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go new file mode 100644 index 00000000..a13fa7f2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go @@ -0,0 +1,103 @@ +package projects + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type projectResult struct { + gophercloud.Result +} + +// GetResult is the result of a Get request. Call its Extract method to +// interpret it as a Project. +type GetResult struct { + projectResult +} + +// CreateResult is the result of a Create request. Call its Extract method to +// interpret it as a Project. +type CreateResult struct { + projectResult +} + +// DeleteResult is the result of a Delete request. Call its ExtractErr method to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the result of an Update request. Call its Extract method to +// interpret it as a Project. +type UpdateResult struct { + projectResult +} + +// Project represents an OpenStack Identity Project. +type Project struct { + // IsDomain indicates whether the project is a domain. + IsDomain bool `json:"is_domain"` + + // Description is the description of the project. + Description string `json:"description"` + + // DomainID is the domain ID the project belongs to. + DomainID string `json:"domain_id"` + + // Enabled is whether or not the project is enabled. + Enabled bool `json:"enabled"` + + // ID is the unique ID of the project. + ID string `json:"id"` + + // Name is the name of the project. + Name string `json:"name"` + + // ParentID is the parent_id of the project. + ParentID string `json:"parent_id"` +} + +// ProjectPage is a single page of Project results. +type ProjectPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Projects contains any results. +func (r ProjectPage) IsEmpty() (bool, error) { + projects, err := ExtractProjects(r) + return len(projects) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ProjectPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractProjects returns a slice of Projects contained in a single page of +// results. +func ExtractProjects(r pagination.Page) ([]Project, error) { + var s struct { + Projects []Project `json:"projects"` + } + err := (r.(ProjectPage)).ExtractInto(&s) + return s.Projects, err +} + +// Extract interprets any projectResults as a Project. +func (r projectResult) Extract() (*Project, error) { + var s struct { + Project *Project `json:"project"` + } + err := r.ExtractInto(&s) + return s.Project, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go new file mode 100644 index 00000000..e26cf368 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go @@ -0,0 +1,23 @@ +package projects + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("projects") +} + +func getURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("projects") +} + +func deleteURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} + +func updateURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/doc.go new file mode 100644 index 00000000..f0e4d045 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/doc.go @@ -0,0 +1,135 @@ +/* +Package roles provides information and interaction with the roles API +resource for the OpenStack Identity service. + +Example to List Roles + + listOpts := roles.ListOpts{ + DomainID: "default", + } + + allPages, err := roles.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRoles, err := roles.ExtractRoles(allPages) + if err != nil { + panic(err) + } + + for _, role := range allRoles { + fmt.Printf("%+v\n", role) + } + +Example to Create a Role + + createOpts := roles.CreateOpts{ + Name: "read-only-admin", + DomainID: "default", + Extra: map[string]interface{}{ + "description": "this role grants read-only privilege cross tenant", + } + } + + role, err := roles.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Role + + roleID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := roles.UpdateOpts{ + Name: "read only admin", + } + + role, err := roles.Update(identityClient, roleID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Role + + roleID := "0fe36e73809d46aeae6705c39077b1b3" + err := roles.Delete(identityClient, roleID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Role Assignments + + listOpts := roles.ListAssignmentsOpts{ + UserID: "97061de2ed0647b28a393c36ab584f39", + ScopeProjectID: "9df1a02f5eb2416a9781e8b0c022d3ae", + } + + allPages, err := roles.ListAssignments(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRoles, err := roles.ExtractRoleAssignments(allPages) + if err != nil { + panic(err) + } + + for _, role := range allRoles { + fmt.Printf("%+v\n", role) + } + +Example to List Role Assignments for a User on a Project + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + userID := "9df1a02f5eb2416a9781e8b0c022d3ae" + listAssignmentsOnResourceOpts := roles.ListAssignmentsOnResourceOpts{ + UserID: userID, + ProjectID: projectID, + } + + allPages, err := roles.ListAssignmentsOnResource(identityClient, listAssignmentsOnResourceOpts).AllPages() + if err != nil { + panic(err) + } + + allRoles, err := roles.ExtractRoles(allPages) + if err != nil { + panic(err) + } + + for _, role := range allRoles { + fmt.Printf("%+v\n", role) + } + +Example to Assign a Role to a User in a Project + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + userID := "9df1a02f5eb2416a9781e8b0c022d3ae" + roleID := "9fe2ff9ee4384b1894a90878d3e92bab" + + err := roles.Assign(identityClient, roleID, roles.AssignOpts{ + UserID: userID, + ProjectID: projectID, + }).ExtractErr() + + if err != nil { + panic(err) + } + +Example to Unassign a Role From a User in a Project + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + userID := "9df1a02f5eb2416a9781e8b0c022d3ae" + roleID := "9fe2ff9ee4384b1894a90878d3e92bab" + + err := roles.Unassign(identityClient, roleID, roles.UnassignOpts{ + UserID: userID, + ProjectID: projectID, + }).ExtractErr() + + if err != nil { + panic(err) + } +*/ +package roles diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/errors.go new file mode 100644 index 00000000..b60d7d18 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/errors.go @@ -0,0 +1,17 @@ +package roles + +import "fmt" + +// InvalidListFilter is returned by the ToUserListQuery method when validation of +// a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of NAME__COMPARATOR", + e.FilterName, + ) + return s +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/requests.go new file mode 100644 index 00000000..573db9ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/requests.go @@ -0,0 +1,392 @@ +package roles + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToRoleListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Name filters the response by role name. + Name string `q:"name"` + + // Filters filters the response by custom filters such as + // 'name__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToRoleListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRoleListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the roles to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToRoleListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RolePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single role, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToRoleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a role. +type CreateOpts struct { + // Name is the name of the new role. + Name string `json:"name" required:"true"` + + // DomainID is the ID of the domain the role belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the role. + Extra map[string]interface{} `json:"-"` +} + +// ToRoleCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToRoleCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "role") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["role"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new Role. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRoleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToRoleUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a role. +type UpdateOpts struct { + // Name is the name of the new role. + Name string `json:"name,omitempty"` + + // Extra is free-form extra key/value pairs to describe the role. + Extra map[string]interface{} `json:"-"` +} + +// ToRoleUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToRoleUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "role") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["role"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing Role. +func Update(client *gophercloud.ServiceClient, roleID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRoleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, roleID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes a role. +func Delete(client *gophercloud.ServiceClient, roleID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, roleID), nil) + return +} + +// ListAssignmentsOptsBuilder allows extensions to add additional parameters to +// the ListAssignments request. +type ListAssignmentsOptsBuilder interface { + ToRolesListAssignmentsQuery() (string, error) +} + +// ListAssignmentsOpts allows you to query the ListAssignments method. +// Specify one of or a combination of GroupId, RoleId, ScopeDomainId, +// ScopeProjectId, and/or UserId to search for roles assigned to corresponding +// entities. +type ListAssignmentsOpts struct { + // GroupID is the group ID to query. + GroupID string `q:"group.id"` + + // RoleID is the specific role to query assignments to. + RoleID string `q:"role.id"` + + // ScopeDomainID filters the results by the given domain ID. + ScopeDomainID string `q:"scope.domain.id"` + + // ScopeProjectID filters the results by the given Project ID. + ScopeProjectID string `q:"scope.project.id"` + + // UserID filterst he results by the given User ID. + UserID string `q:"user.id"` + + // Effective lists effective assignments at the user, project, and domain + // level, allowing for the effects of group membership. + Effective *bool `q:"effective"` +} + +// ToRolesListAssignmentsQuery formats a ListAssignmentsOpts into a query string. +func (opts ListAssignmentsOpts) ToRolesListAssignmentsQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListAssignments enumerates the roles assigned to a specified resource. +func ListAssignments(client *gophercloud.ServiceClient, opts ListAssignmentsOptsBuilder) pagination.Pager { + url := listAssignmentsURL(client) + if opts != nil { + query, err := opts.ToRolesListAssignmentsQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RoleAssignmentPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListAssignmentsOnResourceOpts provides options to list role assignments +// for a user/group on a project/domain +type ListAssignmentsOnResourceOpts struct { + // UserID is the ID of a user to assign a role + // Note: exactly one of UserID or GroupID must be provided + UserID string `xor:"GroupID"` + + // GroupID is the ID of a group to assign a role + // Note: exactly one of UserID or GroupID must be provided + GroupID string `xor:"UserID"` + + // ProjectID is the ID of a project to assign a role on + // Note: exactly one of ProjectID or DomainID must be provided + ProjectID string `xor:"DomainID"` + + // DomainID is the ID of a domain to assign a role on + // Note: exactly one of ProjectID or DomainID must be provided + DomainID string `xor:"ProjectID"` +} + +// AssignOpts provides options to assign a role +type AssignOpts struct { + // UserID is the ID of a user to assign a role + // Note: exactly one of UserID or GroupID must be provided + UserID string `xor:"GroupID"` + + // GroupID is the ID of a group to assign a role + // Note: exactly one of UserID or GroupID must be provided + GroupID string `xor:"UserID"` + + // ProjectID is the ID of a project to assign a role on + // Note: exactly one of ProjectID or DomainID must be provided + ProjectID string `xor:"DomainID"` + + // DomainID is the ID of a domain to assign a role on + // Note: exactly one of ProjectID or DomainID must be provided + DomainID string `xor:"ProjectID"` +} + +// UnassignOpts provides options to unassign a role +type UnassignOpts struct { + // UserID is the ID of a user to unassign a role + // Note: exactly one of UserID or GroupID must be provided + UserID string `xor:"GroupID"` + + // GroupID is the ID of a group to unassign a role + // Note: exactly one of UserID or GroupID must be provided + GroupID string `xor:"UserID"` + + // ProjectID is the ID of a project to unassign a role on + // Note: exactly one of ProjectID or DomainID must be provided + ProjectID string `xor:"DomainID"` + + // DomainID is the ID of a domain to unassign a role on + // Note: exactly one of ProjectID or DomainID must be provided + DomainID string `xor:"ProjectID"` +} + +// ListAssignmentsOnResource is the operation responsible for listing role +// assignments for a user/group on a project/domain. +func ListAssignmentsOnResource(client *gophercloud.ServiceClient, opts ListAssignmentsOnResourceOpts) pagination.Pager { + // Check xor conditions + _, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return pagination.Pager{Err: err} + } + + // Get corresponding URL + var targetID string + var targetType string + if opts.ProjectID != "" { + targetID = opts.ProjectID + targetType = "projects" + } else { + targetID = opts.DomainID + targetType = "domains" + } + + var actorID string + var actorType string + if opts.UserID != "" { + actorID = opts.UserID + actorType = "users" + } else { + actorID = opts.GroupID + actorType = "groups" + } + + url := listAssignmentsOnResourceURL(client, targetType, targetID, actorType, actorID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RolePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Assign is the operation responsible for assigning a role +// to a user/group on a project/domain. +func Assign(client *gophercloud.ServiceClient, roleID string, opts AssignOpts) (r AssignmentResult) { + // Check xor conditions + _, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + r.Err = err + return + } + + // Get corresponding URL + var targetID string + var targetType string + if opts.ProjectID != "" { + targetID = opts.ProjectID + targetType = "projects" + } else { + targetID = opts.DomainID + targetType = "domains" + } + + var actorID string + var actorType string + if opts.UserID != "" { + actorID = opts.UserID + actorType = "users" + } else { + actorID = opts.GroupID + actorType = "groups" + } + + _, r.Err = client.Put(assignURL(client, targetType, targetID, actorType, actorID, roleID), nil, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// Unassign is the operation responsible for unassigning a role +// from a user/group on a project/domain. +func Unassign(client *gophercloud.ServiceClient, roleID string, opts UnassignOpts) (r UnassignmentResult) { + // Check xor conditions + _, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + r.Err = err + return + } + + // Get corresponding URL + var targetID string + var targetType string + if opts.ProjectID != "" { + targetID = opts.ProjectID + targetType = "projects" + } else { + targetID = opts.DomainID + targetType = "domains" + } + + var actorID string + var actorType string + if opts.UserID != "" { + actorID = opts.UserID + actorType = "users" + } else { + actorID = opts.GroupID + actorType = "groups" + } + + _, r.Err = client.Delete(assignURL(client, targetType, targetID, actorType, actorID, roleID), &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/results.go new file mode 100644 index 00000000..af8fd9e6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/results.go @@ -0,0 +1,214 @@ +package roles + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Role grants permissions to a user. +type Role struct { + // DomainID is the domain ID the role belongs to. + DomainID string `json:"domain_id"` + + // ID is the unique ID of the role. + ID string `json:"id"` + + // Links contains referencing links to the role. + Links map[string]interface{} `json:"links"` + + // Name is the role name + Name string `json:"name"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` +} + +func (r *Role) UnmarshalJSON(b []byte) error { + type tmp Role + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Role(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Role{}, resultMap) + } + } + + return err +} + +type roleResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Role. +type GetResult struct { + roleResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Role +type CreateResult struct { + roleResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Role. +type UpdateResult struct { + roleResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// RolePage is a single page of Role results. +type RolePage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Roles contains any results. +func (r RolePage) IsEmpty() (bool, error) { + roles, err := ExtractRoles(r) + return len(roles) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r RolePage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractProjects returns a slice of Roles contained in a single page of +// results. +func ExtractRoles(r pagination.Page) ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := (r.(RolePage)).ExtractInto(&s) + return s.Roles, err +} + +// Extract interprets any roleResults as a Role. +func (r roleResult) Extract() (*Role, error) { + var s struct { + Role *Role `json:"role"` + } + err := r.ExtractInto(&s) + return s.Role, err +} + +// RoleAssignment is the result of a role assignments query. +type RoleAssignment struct { + Role AssignedRole `json:"role,omitempty"` + Scope Scope `json:"scope,omitempty"` + User User `json:"user,omitempty"` + Group Group `json:"group,omitempty"` +} + +// AssignedRole represents a Role in an assignment. +type AssignedRole struct { + ID string `json:"id,omitempty"` +} + +// Scope represents a scope in a Role assignment. +type Scope struct { + Domain Domain `json:"domain,omitempty"` + Project Project `json:"project,omitempty"` +} + +// Domain represents a domain in a role assignment scope. +type Domain struct { + ID string `json:"id,omitempty"` +} + +// Project represents a project in a role assignment scope. +type Project struct { + ID string `json:"id,omitempty"` +} + +// User represents a user in a role assignment scope. +type User struct { + ID string `json:"id,omitempty"` +} + +// Group represents a group in a role assignment scope. +type Group struct { + ID string `json:"id,omitempty"` +} + +// RoleAssignmentPage is a single page of RoleAssignments results. +type RoleAssignmentPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the RoleAssignmentPage contains no results. +func (r RoleAssignmentPage) IsEmpty() (bool, error) { + roleAssignments, err := ExtractRoleAssignments(r) + return len(roleAssignments) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to +// the next page of results. +func (r RoleAssignmentPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + } `json:"links"` + } + err := r.ExtractInto(&s) + return s.Links.Next, err +} + +// ExtractRoleAssignments extracts a slice of RoleAssignments from a Collection +// acquired from List. +func ExtractRoleAssignments(r pagination.Page) ([]RoleAssignment, error) { + var s struct { + RoleAssignments []RoleAssignment `json:"role_assignments"` + } + err := (r.(RoleAssignmentPage)).ExtractInto(&s) + return s.RoleAssignments, err +} + +// AssignmentResult represents the result of an assign operation. +// Call ExtractErr method to determine if the request succeeded or failed. +type AssignmentResult struct { + gophercloud.ErrResult +} + +// UnassignmentResult represents the result of an unassign operation. +// Call ExtractErr method to determine if the request succeeded or failed. +type UnassignmentResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/urls.go new file mode 100644 index 00000000..2b820114 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/urls.go @@ -0,0 +1,39 @@ +package roles + +import "github.com/gophercloud/gophercloud" + +const ( + rolePath = "roles" +) + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(rolePath) +} + +func getURL(client *gophercloud.ServiceClient, roleID string) string { + return client.ServiceURL(rolePath, roleID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(rolePath) +} + +func updateURL(client *gophercloud.ServiceClient, roleID string) string { + return client.ServiceURL(rolePath, roleID) +} + +func deleteURL(client *gophercloud.ServiceClient, roleID string) string { + return client.ServiceURL(rolePath, roleID) +} + +func listAssignmentsURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("role_assignments") +} + +func listAssignmentsOnResourceURL(client *gophercloud.ServiceClient, targetType, targetID, actorType, actorID string) string { + return client.ServiceURL(targetType, targetID, actorType, actorID, rolePath) +} + +func assignURL(client *gophercloud.ServiceClient, targetType, targetID, actorType, actorID, roleID string) string { + return client.ServiceURL(targetType, targetID, actorType, actorID, rolePath, roleID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/doc.go new file mode 100644 index 00000000..81702359 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/doc.go @@ -0,0 +1,66 @@ +/* +Package services provides information and interaction with the services API +resource for the OpenStack Identity service. + +Example to List Services + + listOpts := services.ListOpts{ + ServiceType: "compute", + } + + allPages, err := services.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allServices, err := services.ExtractServices(allPages) + if err != nil { + panic(err) + } + + for _, service := range allServices { + fmt.Printf("%+v\n", service) + } + +Example to Create a Service + + createOpts := services.CreateOpts{ + Type: "compute", + Extra: map[string]interface{}{ + "name": "compute-service", + "description": "Compute Service", + }, + } + + service, err := services.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Service + + serviceID := "3c7bbe9a6ecb453ca1789586291380ed" + + var iFalse bool = false + updateOpts := services.UpdateOpts{ + Enabled: &iFalse, + Extra: map[string]interface{}{ + "description": "Disabled Compute Service" + }, + } + + service, err := services.Update(identityClient, serviceID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Service + + serviceID := "3c7bbe9a6ecb453ca1789586291380ed" + err := services.Delete(identityClient, serviceID).ExtractErr() + if err != nil { + panic(err) + } + +*/ +package services diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/requests.go new file mode 100644 index 00000000..1a8b3523 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/requests.go @@ -0,0 +1,154 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToServiceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a service. +type CreateOpts struct { + // Type is the type of the service. + Type string `json:"type"` + + // Enabled is whether or not the service is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the service. + Extra map[string]interface{} `json:"-"` +} + +// ToServiceCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToServiceCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "service") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["service"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create adds a new service of the requested type to the catalog. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToServiceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// ListOptsBuilder enables extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToServiceListMap() (string, error) +} + +// ListOpts provides options for filtering the List results. +type ListOpts struct { + // ServiceType filter the response by a type of service. + ServiceType string `q:"type"` + + // Name filters the response by a service name. + Name string `q:"name"` +} + +// ToServiceListMap builds a list query from the list options. +func (opts ListOpts) ToServiceListMap() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates the services available to a specific user. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToServiceListMap() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ServicePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns additional information about a service, given its ID. +func Get(client *gophercloud.ServiceClient, serviceID string) (r GetResult) { + _, r.Err = client.Get(serviceURL(client, serviceID), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToServiceUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a service. +type UpdateOpts struct { + // Type is the type of the service. + Type string `json:"type"` + + // Enabled is whether or not the service is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the service. + Extra map[string]interface{} `json:"-"` +} + +// ToServiceUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToServiceUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "service") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["service"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing Service. +func Update(client *gophercloud.ServiceClient, serviceID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToServiceUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, serviceID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete removes an existing service. +// It either deletes all associated endpoints, or fails until all endpoints +// are deleted. +func Delete(client *gophercloud.ServiceClient, serviceID string) (r DeleteResult) { + _, r.Err = client.Delete(serviceURL(client, serviceID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/results.go new file mode 100644 index 00000000..f3484d7a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/results.go @@ -0,0 +1,131 @@ +package services + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +type serviceResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a concrete +// Service. An error is returned if the original call or the extraction failed. +func (r serviceResult) Extract() (*Service, error) { + var s struct { + Service *Service `json:"service"` + } + err := r.ExtractInto(&s) + return s.Service, err +} + +// CreateResult is the response from a Create request. Call its Extract method +// to interpret it as a Service. +type CreateResult struct { + serviceResult +} + +// GetResult is the response from a Get request. Call its Extract method +// to interpret it as a Service. +type GetResult struct { + serviceResult +} + +// UpdateResult is the response from an Update request. Call its Extract method +// to interpret it as a Service. +type UpdateResult struct { + serviceResult +} + +// DeleteResult is the response from a Delete request. Call its ExtractErr +// method to interpret it as a Service. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Service represents an OpenStack Service. +type Service struct { + // ID is the unique ID of the service. + ID string `json:"id"` + + // Type is the type of the service. + Type string `json:"type"` + + // Enabled is whether or not the service is enabled. + Enabled bool `json:"enabled"` + + // Links contains referencing links to the service. + Links map[string]interface{} `json:"links"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` +} + +func (r *Service) UnmarshalJSON(b []byte) error { + type tmp Service + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Service(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Service{}, resultMap) + } + } + + return err +} + +// ServicePage is a single page of Service results. +type ServicePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the ServicePage contains no results. +func (p ServicePage) IsEmpty() (bool, error) { + services, err := ExtractServices(p) + return len(services) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ServicePage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractServices extracts a slice of Services from a Collection acquired +// from List. +func ExtractServices(r pagination.Page) ([]Service, error) { + var s struct { + Services []Service `json:"services"` + } + err := (r.(ServicePage)).ExtractInto(&s) + return s.Services, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/urls.go new file mode 100644 index 00000000..caa625a2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/urls.go @@ -0,0 +1,19 @@ +package services + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("services") +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("services") +} + +func serviceURL(client *gophercloud.ServiceClient, serviceID string) string { + return client.ServiceURL("services", serviceID) +} + +func updateURL(client *gophercloud.ServiceClient, serviceID string) string { + return client.ServiceURL("services", serviceID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go new file mode 100644 index 00000000..966e128f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go @@ -0,0 +1,108 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 + +Example to Create a Token From a Username and Password + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Username, Password, and Domain + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainID: "default", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + + authOptions = tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainName: "default", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Token + + authOptions := tokens.AuthOptions{ + TokenID: "token_id", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project ID Scope + + scope := tokens.Scope{ + ProjectID: "0fe36e73809d46aeae6705c39077b1b3", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Domain ID Scope + + scope := tokens.Scope{ + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project Name Scope + + scope := tokens.Scope{ + ProjectName: "project_name", + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go new file mode 100644 index 00000000..e4d766b2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -0,0 +1,162 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// Scope allows a created token to be limited to a specific domain or project. +type Scope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +// AuthOptionsBuilder provides the ability for extensions to add additional +// parameters to AuthOptions. Extensions must satisfy all required methods. +type AuthOptionsBuilder interface { + // ToTokenV3CreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) + ToTokenV3ScopeMap() (map[string]interface{}, error) + CanReauth() bool +} + +// AuthOptions represents options for authenticating a user. +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed + // by all of the identity services, it will often be populated by a + // provider-level function. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"id,omitempty"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud + // to cache your credentials in memory, and to allow Gophercloud to attempt + // to re-authenticate automatically if/when your token expires. If you set + // it to false, it will not cache these settings, but re-authentication will + // not be possible. This setting defaults to false. + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` + + // Authentication through Application Credentials requires supplying name, project and secret + // For project we can use TenantID + ApplicationCredentialID string `json:"-"` + ApplicationCredentialName string `json:"-"` + ApplicationCredentialSecret string `json:"-"` + + Scope Scope `json:"-"` +} + +// ToTokenV3CreateMap builds a request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + gophercloudAuthOpts := gophercloud.AuthOptions{ + Username: opts.Username, + UserID: opts.UserID, + Password: opts.Password, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + AllowReauth: opts.AllowReauth, + TokenID: opts.TokenID, + ApplicationCredentialID: opts.ApplicationCredentialID, + ApplicationCredentialName: opts.ApplicationCredentialName, + ApplicationCredentialSecret: opts.ApplicationCredentialSecret, + } + + return gophercloudAuthOpts.ToTokenV3CreateMap(scope) +} + +// ToTokenV3CreateMap builds a scope request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + scope := gophercloud.AuthScope(opts.Scope) + + gophercloudAuthOpts := gophercloud.AuthOptions{ + Scope: &scope, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + } + + return gophercloudAuthOpts.ToTokenV3ScopeMap() +} + +func (opts *AuthOptions) CanReauth() bool { + return opts.AllowReauth +} + +func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { + return map[string]string{ + "X-Subject-Token": subjectToken, + } +} + +// Create authenticates and either generates a new token, or changes the Scope +// of an existing token. +func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { + scope, err := opts.ToTokenV3ScopeMap() + if err != nil { + r.Err = err + return + } + + b, err := opts.ToTokenV3CreateMap(scope) + if err != nil { + r.Err = err + return + } + + resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// Get validates and retrieves information about another token. +func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { + resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 203}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// Validate determines if a specified token is valid or not. +func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { + resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 204, 404}, + }) + if err != nil { + return false, err + } + + return resp.StatusCode == 200 || resp.StatusCode == 204, nil +} + +// Revoke immediately makes specified token invalid. +func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { + _, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go new file mode 100644 index 00000000..6f26c96b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go @@ -0,0 +1,178 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" +) + +// Endpoint represents a single API endpoint offered by a service. +// It matches either a public, internal or admin URL. +// If supported, it contains a region specifier, again if provided. +// The significance of the Region field will depend upon your provider. +type Endpoint struct { + ID string `json:"id"` + Region string `json:"region"` + RegionID string `json:"region_id"` + Interface string `json:"interface"` + URL string `json:"url"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V3 service +// catalog listing. Each class of service, such as cloud DNS or block storage +// services, could have multiple CatalogEntry representing it (one by interface +// type, e.g public, admin or internal). +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Service ID + ID string `json:"id"` + + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may + // assign their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry `json:"catalog"` +} + +// Domain provides information about the domain to which this token grants +// access. +type Domain struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// User represents a user resource that exists in the Identity Service. +type User struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// Role provides information about roles to which User is authorized. +type Role struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// Project provides information about project to which User is authorized. +type Project struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// commonResult is the response from a request. A commonResult has various +// methods which can be used to extract different details about the result. +type commonResult struct { + gophercloud.Result +} + +// Extract is a shortcut for ExtractToken. +// This function is deprecated and still present for backward compatibility. +func (r commonResult) Extract() (*Token, error) { + return r.ExtractToken() +} + +// ExtractToken interprets a commonResult as a Token. +func (r commonResult) ExtractToken() (*Token, error) { + var s Token + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + // Parse the token itself from the stored headers. + s.ID = r.Header.Get("X-Subject-Token") + + return &s, err +} + +// ExtractTokenID implements the gophercloud.AuthResult interface. The returned +// string is the same as the ID field of the Token struct returned from +// ExtractToken(). +func (r CreateResult) ExtractTokenID() (string, error) { + return r.Header.Get("X-Subject-Token"), r.Err +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s ServiceCatalog + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractUser returns the User that is the owner of the Token. +func (r commonResult) ExtractUser() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// ExtractRoles returns Roles to which User is authorized. +func (r commonResult) ExtractRoles() ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := r.ExtractInto(&s) + return s.Roles, err +} + +// ExtractProject returns Project to which User is authorized. +func (r commonResult) ExtractProject() (*Project, error) { + var s struct { + Project *Project `json:"project"` + } + err := r.ExtractInto(&s) + return s.Project, err +} + +// CreateResult is the response from a Create request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type CreateResult struct { + commonResult +} + +// GetResult is the response from a Get request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type GetResult struct { + commonResult +} + +// RevokeResult is response from a Revoke request. +type RevokeResult struct { + commonResult +} + +// Token is a string that grants a user access to a controlled set of services +// in an OpenStack provider. Each Token is valid for a set length of time. +type Token struct { + // ID is the issued token. + ID string `json:"id"` + + // ExpiresAt is the timestamp at which this token will no longer be accepted. + ExpiresAt time.Time `json:"expires_at"` +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.ExtractIntoStructPtr(v, "token") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go new file mode 100644 index 00000000..2f864a31 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go @@ -0,0 +1,7 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +func tokenURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("auth", "tokens") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go new file mode 100644 index 00000000..994ce71b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go @@ -0,0 +1,172 @@ +/* +Package users manages and retrieves Users in the OpenStack Identity Service. + +Example to List Users + + listOpts := users.ListOpts{ + DomainID: "default", + } + + allPages, err := users.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allUsers, err := users.ExtractUsers(allPages) + if err != nil { + panic(err) + } + + for _, user := range allUsers { + fmt.Printf("%+v\n", user) + } + +Example to Create a User + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + + createOpts := users.CreateOpts{ + Name: "username", + DomainID: "default", + DefaultProjectID: projectID, + Enabled: gophercloud.Enabled, + Password: "supersecret", + Extra: map[string]interface{}{ + "email": "username@example.com", + } + } + + user, err := users.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a User + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := users.UpdateOpts{ + Enabled: gophercloud.Disabled, + } + + user, err := users.Update(identityClient, userID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Change Password of a User + + userID := "0fe36e73809d46aeae6705c39077b1b3" + originalPassword := "secretsecret" + password := "new_secretsecret" + + changePasswordOpts := users.ChangePasswordOpts{ + OriginalPassword: originalPassword, + Password: password, + } + + err := users.ChangePassword(identityClient, userID, changePasswordOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a User + + userID := "0fe36e73809d46aeae6705c39077b1b3" + err := users.Delete(identityClient, userID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Groups a User Belongs To + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + allPages, err := users.ListGroups(identityClient, userID).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to Add a User to a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + userID := "0fe36e73809d46aeae6705c39077b1b3" + err := users.AddToGroup(identityClient, groupID, userID).ExtractErr() + + if err != nil { + panic(err) + } + +Example to Check Whether a User Belongs to a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + userID := "0fe36e73809d46aeae6705c39077b1b3" + ok, err := users.IsMemberOfGroup(identityClient, groupID, userID).Extract() + if err != nil { + panic(err) + } + + if ok { + fmt.Printf("user %s is a member of group %s\n", userID, groupID) + } + +Example to Remove a User from a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + userID := "0fe36e73809d46aeae6705c39077b1b3" + err := users.RemoveFromGroup(identityClient, groupID, userID).ExtractErr() + + if err != nil { + panic(err) + } + +Example to List Projects a User Belongs To + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + allPages, err := users.ListProjects(identityClient, userID).AllPages() + if err != nil { + panic(err) + } + + allProjects, err := projects.ExtractProjects(allPages) + if err != nil { + panic(err) + } + + for _, project := range allProjects { + fmt.Printf("%+v\n", project) + } + +Example to List Users in a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + listOpts := users.ListOpts{ + DomainID: "default", + } + + allPages, err := users.ListInGroup(identityClient, groupID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allUsers, err := users.ExtractUsers(allPages) + if err != nil { + panic(err) + } + + for _, user := range allUsers { + fmt.Printf("%+v\n", user) + } + +*/ +package users diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/errors.go new file mode 100644 index 00000000..0f0b7987 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/errors.go @@ -0,0 +1,17 @@ +package users + +import "fmt" + +// InvalidListFilter is returned by the ToUserListQuery method when validation of +// a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of NAME__COMPARATOR", + e.FilterName, + ) + return s +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go new file mode 100644 index 00000000..7a40ec76 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go @@ -0,0 +1,338 @@ +package users + +import ( + "net/http" + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/pagination" +) + +// Option is a specific option defined at the API to enable features +// on a user account. +type Option string + +const ( + IgnoreChangePasswordUponFirstUse Option = "ignore_change_password_upon_first_use" + IgnorePasswordExpiry Option = "ignore_password_expiry" + IgnoreLockoutFailureAttempts Option = "ignore_lockout_failure_attempts" + MultiFactorAuthRules Option = "multi_factor_auth_rules" + MultiFactorAuthEnabled Option = "multi_factor_auth_enabled" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToUserListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Enabled filters the response by enabled users. + Enabled *bool `q:"enabled"` + + // IdpID filters the response by an Identity Provider ID. + IdPID string `q:"idp_id"` + + // Name filters the response by username. + Name string `q:"name"` + + // PasswordExpiresAt filters the response based on expiring passwords. + PasswordExpiresAt string `q:"password_expires_at"` + + // ProtocolID filters the response by protocol ID. + ProtocolID string `q:"protocol_id"` + + // UniqueID filters the response by unique ID. + UniqueID string `q:"unique_id"` + + // Filters filters the response by custom filters such as + // 'name__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToUserListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToUserListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the Users to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToUserListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single user, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToUserCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a user. +type CreateOpts struct { + // Name is the name of the new user. + Name string `json:"name" required:"true"` + + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id,omitempty"` + + // Description is a description of the user. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the user belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the user status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the user. + Extra map[string]interface{} `json:"-"` + + // Options are defined options in the API to enable certain features. + Options map[Option]interface{} `json:"options,omitempty"` + + // Password is the password of the new user. + Password string `json:"password,omitempty"` +} + +// ToUserCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "user") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["user"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new User. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToUserCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToUserUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a user account. +type UpdateOpts struct { + // Name is the name of the new user. + Name string `json:"name,omitempty"` + + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id,omitempty"` + + // Description is a description of the user. + Description *string `json:"description,omitempty"` + + // DomainID is the ID of the domain the user belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the user status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the user. + Extra map[string]interface{} `json:"-"` + + // Options are defined options in the API to enable certain features. + Options map[Option]interface{} `json:"options,omitempty"` + + // Password is the password of the new user. + Password string `json:"password,omitempty"` +} + +// ToUserUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToUserUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "user") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["user"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing User. +func Update(client *gophercloud.ServiceClient, userID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToUserUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, userID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ChangePasswordOptsBuilder allows extensions to add additional parameters to +// the ChangePassword request. +type ChangePasswordOptsBuilder interface { + ToUserChangePasswordMap() (map[string]interface{}, error) +} + +// ChangePasswordOpts provides options for changing password for a user. +type ChangePasswordOpts struct { + // OriginalPassword is the original password of the user. + OriginalPassword string `json:"original_password"` + + // Password is the new password of the user. + Password string `json:"password"` +} + +// ToUserChangePasswordMap formats a ChangePasswordOpts into a ChangePassword request. +func (opts ChangePasswordOpts) ToUserChangePasswordMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "user") + if err != nil { + return nil, err + } + + return b, nil +} + +// ChangePassword changes password for a user. +func ChangePassword(client *gophercloud.ServiceClient, userID string, opts ChangePasswordOptsBuilder) (r ChangePasswordResult) { + b, err := opts.ToUserChangePasswordMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(changePasswordURL(client, userID), &b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// Delete deletes a user. +func Delete(client *gophercloud.ServiceClient, userID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, userID), nil) + return +} + +// ListGroups enumerates groups user belongs to. +func ListGroups(client *gophercloud.ServiceClient, userID string) pagination.Pager { + url := listGroupsURL(client, userID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return groups.GroupPage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} + }) +} + +// AddToGroup adds a user to a group. +func AddToGroup(client *gophercloud.ServiceClient, groupID, userID string) (r AddToGroupResult) { + url := addToGroupURL(client, groupID, userID) + _, r.Err = client.Put(url, nil, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// IsMemberOfGroup checks whether a user belongs to a group. +func IsMemberOfGroup(client *gophercloud.ServiceClient, groupID, userID string) (r IsMemberOfGroupResult) { + url := isMemberOfGroupURL(client, groupID, userID) + var response *http.Response + response, r.Err = client.Head(url, &gophercloud.RequestOpts{ + OkCodes: []int{204, 404}, + }) + if r.Err == nil && response != nil { + if (*response).StatusCode == 204 { + r.isMember = true + } + } + + return +} + +// RemoveFromGroup removes a user from a group. +func RemoveFromGroup(client *gophercloud.ServiceClient, groupID, userID string) (r RemoveFromGroupResult) { + url := removeFromGroupURL(client, groupID, userID) + _, r.Err = client.Delete(url, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// ListProjects enumerates groups user belongs to. +func ListProjects(client *gophercloud.ServiceClient, userID string) pagination.Pager { + url := listProjectsURL(client, userID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return projects.ProjectPage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListInGroup enumerates users that belong to a group. +func ListInGroup(client *gophercloud.ServiceClient, groupID string, opts ListOptsBuilder) pagination.Pager { + url := listInGroupURL(client, groupID) + if opts != nil { + query, err := opts.ToUserListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go new file mode 100644 index 00000000..e158ac72 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go @@ -0,0 +1,179 @@ +package users + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// User represents a User in the OpenStack Identity Service. +type User struct { + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id"` + + // Description is the description of the user. + Description string `json:"description"` + + // DomainID is the domain ID the user belongs to. + DomainID string `json:"domain_id"` + + // Enabled is whether or not the user is enabled. + Enabled bool `json:"enabled"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` + + // ID is the unique ID of the user. + ID string `json:"id"` + + // Links contains referencing links to the user. + Links map[string]interface{} `json:"links"` + + // Name is the name of the user. + Name string `json:"name"` + + // Options are a set of defined options of the user. + Options map[string]interface{} `json:"options"` + + // PasswordExpiresAt is the timestamp when the user's password expires. + PasswordExpiresAt time.Time `json:"-"` +} + +func (r *User) UnmarshalJSON(b []byte) error { + type tmp User + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + PasswordExpiresAt gophercloud.JSONRFC3339MilliNoZ `json:"password_expires_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = User(s.tmp) + + r.PasswordExpiresAt = time.Time(s.PasswordExpiresAt) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + delete(resultMap, "password_expires_at") + r.Extra = internal.RemainingKeys(User{}, resultMap) + } + } + + return err +} + +type userResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a User. +type GetResult struct { + userResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a User. +type CreateResult struct { + userResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a User. +type UpdateResult struct { + userResult +} + +// ChangePasswordResult is the response from a ChangePassword operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type ChangePasswordResult struct { + gophercloud.ErrResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AddToGroupResult is the response from a AddToGroup operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type AddToGroupResult struct { + gophercloud.ErrResult +} + +// IsMemberOfGroupResult is the response from a IsMemberOfGroup operation. Call its +// Extract method to determine if the request succeeded or failed. +type IsMemberOfGroupResult struct { + isMember bool + gophercloud.Result +} + +// RemoveFromGroupResult is the response from a RemoveFromGroup operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type RemoveFromGroupResult struct { + gophercloud.ErrResult +} + +// UserPage is a single page of User results. +type UserPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a UserPage contains any results. +func (r UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(r) + return len(users) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r UserPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractUsers returns a slice of Users contained in a single page of results. +func ExtractUsers(r pagination.Page) ([]User, error) { + var s struct { + Users []User `json:"users"` + } + err := (r.(UserPage)).ExtractInto(&s) + return s.Users, err +} + +// Extract interprets any user results as a User. +func (r userResult) Extract() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// Extract extracts IsMemberOfGroupResult as bool and error values +func (r IsMemberOfGroupResult) Extract() (bool, error) { + return r.isMember, r.Err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go new file mode 100644 index 00000000..3caa8bbb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go @@ -0,0 +1,51 @@ +package users + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("users") +} + +func getURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("users") +} + +func updateURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func changePasswordURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "password") +} + +func deleteURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func listGroupsURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "groups") +} + +func addToGroupURL(client *gophercloud.ServiceClient, groupID, userID string) string { + return client.ServiceURL("groups", groupID, "users", userID) +} + +func isMemberOfGroupURL(client *gophercloud.ServiceClient, groupID, userID string) string { + return client.ServiceURL("groups", groupID, "users", userID) +} + +func removeFromGroupURL(client *gophercloud.ServiceClient, groupID, userID string) string { + return client.ServiceURL("groups", groupID, "users", userID) +} + +func listProjectsURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "projects") +} + +func listInGroupURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID, "users") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go new file mode 100644 index 00000000..0c12bf2e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go @@ -0,0 +1,48 @@ +/* +Package imagedata enables management of image data. + +Example to Upload Image Data + + imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" + + imageData, err := os.Open("/path/to/image/file") + if err != nil { + panic(err) + } + defer imageData.Close() + + err = imagedata.Upload(imageClient, imageID, imageData).ExtractErr() + if err != nil { + panic(err) + } + +Example to Stage Image Data + + imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" + + imageData, err := os.Open("/path/to/image/file") + if err != nil { + panic(err) + } + defer imageData.Close() + + err = imagedata.Stage(imageClient, imageID, imageData).ExtractErr() + if err != nil { + panic(err) + } + +Example to Download Image Data + + imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" + + image, err := imagedata.Download(imageClient, imageID).Extract() + if err != nil { + panic(err) + } + + imageData, err := ioutil.ReadAll(image) + if err != nil { + panic(err) + } +*/ +package imagedata diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go new file mode 100644 index 00000000..545c561f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go @@ -0,0 +1,39 @@ +package imagedata + +import ( + "io" + "net/http" + + "github.com/gophercloud/gophercloud" +) + +// Upload uploads an image file. +func Upload(client *gophercloud.ServiceClient, id string, data io.Reader) (r UploadResult) { + _, r.Err = client.Put(uploadURL(client, id), data, nil, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"Content-Type": "application/octet-stream"}, + OkCodes: []int{204}, + }) + return +} + +// Stage performs PUT call on the existing image object in the Imageservice with +// the provided file. +// Existing image object must be in the "queued" status. +func Stage(client *gophercloud.ServiceClient, id string, data io.Reader) (r StageResult) { + _, r.Err = client.Put(stageURL(client, id), data, nil, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"Content-Type": "application/octet-stream"}, + OkCodes: []int{204}, + }) + return +} + +// Download retrieves an image. +func Download(client *gophercloud.ServiceClient, id string) (r DownloadResult) { + var resp *http.Response + resp, r.Err = client.Get(downloadURL(client, id), nil, nil) + if resp != nil { + r.Body = resp.Body + r.Header = resp.Header + } + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go new file mode 100644 index 00000000..17a3f0e0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go @@ -0,0 +1,34 @@ +package imagedata + +import ( + "fmt" + "io" + + "github.com/gophercloud/gophercloud" +) + +// UploadResult is the result of an upload image operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type UploadResult struct { + gophercloud.ErrResult +} + +// StageResult is the result of a stage image operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StageResult struct { + gophercloud.ErrResult +} + +// DownloadResult is the result of a download image operation. Call its Extract +// method to gain access to the image data. +type DownloadResult struct { + gophercloud.Result +} + +// Extract builds images model from io.Reader +func (r DownloadResult) Extract() (io.Reader, error) { + if r, ok := r.Body.(io.Reader); ok { + return r, nil + } + return nil, fmt.Errorf("Expected io.Reader but got: %T(%#v)", r.Body, r.Body) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go new file mode 100644 index 00000000..d9615ba7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go @@ -0,0 +1,23 @@ +package imagedata + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "images" + uploadPath = "file" + stagePath = "stage" +) + +// `imageDataURL(c,i)` is the URL for the binary image data for the +// image identified by ID `i` in the service `c`. +func uploadURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL(rootPath, imageID, uploadPath) +} + +func stageURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL(rootPath, imageID, stagePath) +} + +func downloadURL(c *gophercloud.ServiceClient, imageID string) string { + return uploadURL(c, imageID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go new file mode 100644 index 00000000..14da9ac9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go @@ -0,0 +1,60 @@ +/* +Package images enables management and retrieval of images from the OpenStack +Image Service. + +Example to List Images + + images.ListOpts{ + Owner: "a7509e1ae65945fda83f3e52c6296017", + } + + allPages, err := images.List(imagesClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } + +Example to Create an Image + + createOpts := images.CreateOpts{ + Name: "image_name", + Visibility: images.ImageVisibilityPrivate, + } + + image, err := images.Create(imageClient, createOpts) + if err != nil { + panic(err) + } + +Example to Update an Image + + imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" + + updateOpts := images.UpdateOpts{ + images.ReplaceImageName{ + NewName: "new_name", + }, + } + + image, err := images.Update(imageClient, imageID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Image + + imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" + err := images.Delete(imageClient, imageID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go new file mode 100644 index 00000000..4e487ea9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go @@ -0,0 +1,366 @@ +package images + +import ( + "fmt" + "net/url" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToImageListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// +// http://developer.openstack.org/api-ref-image-v2.html +type ListOpts struct { + // ID is the ID of the image. + // Multiple IDs can be specified by constructing a string + // such as "in:uuid1,uuid2,uuid3". + ID string `q:"id"` + + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the server at which you want to set a marker. + Marker string `q:"marker"` + + // Name filters on the name of the image. + // Multiple names can be specified by constructing a string + // such as "in:name1,name2,name3". + Name string `q:"name"` + + // Visibility filters on the visibility of the image. + Visibility ImageVisibility `q:"visibility"` + + // MemberStatus filters on the member status of the image. + MemberStatus ImageMemberStatus `q:"member_status"` + + // Owner filters on the project ID of the image. + Owner string `q:"owner"` + + // Status filters on the status of the image. + // Multiple statuses can be specified by constructing a string + // such as "in:saving,queued". + Status ImageStatus `q:"status"` + + // SizeMin filters on the size_min image property. + SizeMin int64 `q:"size_min"` + + // SizeMax filters on the size_max image property. + SizeMax int64 `q:"size_max"` + + // Sort sorts the results using the new style of sorting. See the OpenStack + // Image API reference for the exact syntax. + // + // Sort cannot be used with the classic sort options (sort_key and sort_dir). + Sort string `q:"sort"` + + // SortKey will sort the results based on a specified image property. + SortKey string `q:"sort_key"` + + // SortDir will sort the list results either ascending or decending. + SortDir string `q:"sort_dir"` + + // Tags filters on specific image tags. + Tags []string `q:"tag"` + + // CreatedAtQuery filters images based on their creation date. + CreatedAtQuery *ImageDateQuery + + // UpdatedAtQuery filters images based on their updated date. + UpdatedAtQuery *ImageDateQuery + + // ContainerFormat filters images based on the container_format. + // Multiple container formats can be specified by constructing a + // string such as "in:bare,ami". + ContainerFormat string `q:"container_format"` + + // DiskFormat filters images based on the disk_format. + // Multiple disk formats can be specified by constructing a string + // such as "in:qcow2,iso". + DiskFormat string `q:"disk_format"` +} + +// ToImageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToImageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + params := q.Query() + + if opts.CreatedAtQuery != nil { + createdAt := opts.CreatedAtQuery.Date.Format(time.RFC3339) + if v := opts.CreatedAtQuery.Filter; v != "" { + createdAt = fmt.Sprintf("%s:%s", v, createdAt) + } + + params.Add("created_at", createdAt) + } + + if opts.UpdatedAtQuery != nil { + updatedAt := opts.UpdatedAtQuery.Date.Format(time.RFC3339) + if v := opts.UpdatedAtQuery.Filter; v != "" { + updatedAt = fmt.Sprintf("%s:%s", v, updatedAt) + } + + params.Add("updated_at", updatedAt) + } + + q = &url.URL{RawQuery: params.Encode()} + + return q.String(), err +} + +// List implements image list request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToImageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + imagePage := ImagePage{ + serviceURL: c.ServiceURL(), + LinkedPageBase: pagination.LinkedPageBase{PageResult: r}, + } + + return imagePage + }) +} + +// CreateOptsBuilder allows extensions to add parameters to the Create request. +type CreateOptsBuilder interface { + // Returns value that can be passed to json.Marshal + ToImageCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create an image. +type CreateOpts struct { + // Name is the name of the new image. + Name string `json:"name" required:"true"` + + // Id is the the image ID. + ID string `json:"id,omitempty"` + + // Visibility defines who can see/use the image. + Visibility *ImageVisibility `json:"visibility,omitempty"` + + // Tags is a set of image tags. + Tags []string `json:"tags,omitempty"` + + // ContainerFormat is the format of the + // container. Valid values are ami, ari, aki, bare, and ovf. + ContainerFormat string `json:"container_format,omitempty"` + + // DiskFormat is the format of the disk. If set, + // valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, + // and iso. + DiskFormat string `json:"disk_format,omitempty"` + + // MinDisk is the amount of disk space in + // GB that is required to boot the image. + MinDisk int `json:"min_disk,omitempty"` + + // MinRAM is the amount of RAM in MB that + // is required to boot the image. + MinRAM int `json:"min_ram,omitempty"` + + // protected is whether the image is not deletable. + Protected *bool `json:"protected,omitempty"` + + // properties is a set of properties, if any, that + // are associated with the image. + Properties map[string]string `json:"-"` +} + +// ToImageCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToImageCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.Properties != nil { + for k, v := range opts.Properties { + b[k] = v + } + } + return b, nil +} + +// Create implements create image request. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToImageCreateMap() + if err != nil { + r.Err = err + return r + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{201}}) + return +} + +// Delete implements image delete request. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get implements image get request. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Update implements image updated request. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToImageUpdateMap() + if err != nil { + r.Err = err + return r + } + _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + MoreHeaders: map[string]string{"Content-Type": "application/openstack-images-v2.1-json-patch"}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + // returns value implementing json.Marshaler which when marshaled matches + // the patch schema: + // http://specs.openstack.org/openstack/glance-specs/specs/api/v2/http-patch-image-api-v2.html + ToImageUpdateMap() ([]interface{}, error) +} + +// UpdateOpts implements UpdateOpts +type UpdateOpts []Patch + +// ToImageUpdateMap assembles a request body based on the contents of +// UpdateOpts. +func (opts UpdateOpts) ToImageUpdateMap() ([]interface{}, error) { + m := make([]interface{}, len(opts)) + for i, patch := range opts { + patchJSON := patch.ToImagePatchMap() + m[i] = patchJSON + } + return m, nil +} + +// Patch represents a single update to an existing image. Multiple updates +// to an image can be submitted at the same time. +type Patch interface { + ToImagePatchMap() map[string]interface{} +} + +// UpdateVisibility represents an updated visibility property request. +type UpdateVisibility struct { + Visibility ImageVisibility +} + +// ToImagePatchMap assembles a request body based on UpdateVisibility. +func (r UpdateVisibility) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/visibility", + "value": r.Visibility, + } +} + +// ReplaceImageName represents an updated image_name property request. +type ReplaceImageName struct { + NewName string +} + +// ToImagePatchMap assembles a request body based on ReplaceImageName. +func (r ReplaceImageName) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/name", + "value": r.NewName, + } +} + +// ReplaceImageChecksum represents an updated checksum property request. +type ReplaceImageChecksum struct { + Checksum string +} + +// ReplaceImageChecksum assembles a request body based on ReplaceImageChecksum. +func (r ReplaceImageChecksum) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/checksum", + "value": r.Checksum, + } +} + +// ReplaceImageTags represents an updated tags property request. +type ReplaceImageTags struct { + NewTags []string +} + +// ToImagePatchMap assembles a request body based on ReplaceImageTags. +func (r ReplaceImageTags) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/tags", + "value": r.NewTags, + } +} + +// ReplaceImageMinDisk represents an updated min_disk property request. +type ReplaceImageMinDisk struct { + NewMinDisk int +} + +// ToImagePatchMap assembles a request body based on ReplaceImageTags. +func (r ReplaceImageMinDisk) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/min_disk", + "value": r.NewMinDisk, + } +} + +// UpdateOp represents a valid update operation. +type UpdateOp string + +const ( + AddOp UpdateOp = "add" + ReplaceOp UpdateOp = "replace" + RemoveOp UpdateOp = "remove" +) + +// UpdateImageProperty represents an update property request. +type UpdateImageProperty struct { + Op UpdateOp + Name string + Value string +} + +// ToImagePatchMap assembles a request body based on UpdateImageProperty. +func (r UpdateImageProperty) ToImagePatchMap() map[string]interface{} { + updateMap := map[string]interface{}{ + "op": r.Op, + "path": fmt.Sprintf("/%s", r.Name), + } + + if r.Value != "" { + updateMap["value"] = r.Value + } + + return updateMap +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go new file mode 100644 index 00000000..676181e1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go @@ -0,0 +1,202 @@ +package images + +import ( + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Image represents an image found in the OpenStack Image service. +type Image struct { + // ID is the image UUID. + ID string `json:"id"` + + // Name is the human-readable display name for the image. + Name string `json:"name"` + + // Status is the image status. It can be "queued" or "active" + // See imageservice/v2/images/type.go + Status ImageStatus `json:"status"` + + // Tags is a list of image tags. Tags are arbitrarily defined strings + // attached to an image. + Tags []string `json:"tags"` + + // ContainerFormat is the format of the container. + // Valid values are ami, ari, aki, bare, and ovf. + ContainerFormat string `json:"container_format"` + + // DiskFormat is the format of the disk. + // If set, valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, + // and iso. + DiskFormat string `json:"disk_format"` + + // MinDiskGigabytes is the amount of disk space in GB that is required to + // boot the image. + MinDiskGigabytes int `json:"min_disk"` + + // MinRAMMegabytes [optional] is the amount of RAM in MB that is required to + // boot the image. + MinRAMMegabytes int `json:"min_ram"` + + // Owner is the tenant ID the image belongs to. + Owner string `json:"owner"` + + // Protected is whether the image is deletable or not. + Protected bool `json:"protected"` + + // Visibility defines who can see/use the image. + Visibility ImageVisibility `json:"visibility"` + + // Checksum is the checksum of the data that's associated with the image. + Checksum string `json:"checksum"` + + // SizeBytes is the size of the data that's associated with the image. + SizeBytes int64 `json:"-"` + + // Metadata is a set of metadata associated with the image. + // Image metadata allow for meaningfully define the image properties + // and tags. + // See http://docs.openstack.org/developer/glance/metadefs-concepts.html. + Metadata map[string]string `json:"metadata"` + + // Properties is a set of key-value pairs, if any, that are associated with + // the image. + Properties map[string]interface{} + + // CreatedAt is the date when the image has been created. + CreatedAt time.Time `json:"created_at"` + + // UpdatedAt is the date when the last change has been made to the image or + // it's properties. + UpdatedAt time.Time `json:"updated_at"` + + // File is the trailing path after the glance endpoint that represent the + // location of the image or the path to retrieve it. + File string `json:"file"` + + // Schema is the path to the JSON-schema that represent the image or image + // entity. + Schema string `json:"schema"` + + // VirtualSize is the virtual size of the image + VirtualSize int64 `json:"virtual_size"` +} + +func (r *Image) UnmarshalJSON(b []byte) error { + type tmp Image + var s struct { + tmp + SizeBytes interface{} `json:"size"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Image(s.tmp) + + switch t := s.SizeBytes.(type) { + case nil: + r.SizeBytes = 0 + case float32: + r.SizeBytes = int64(t) + case float64: + r.SizeBytes = int64(t) + default: + return fmt.Errorf("Unknown type for SizeBytes: %v (value: %v)", reflect.TypeOf(t), t) + } + + // Bundle all other fields into Properties + var result interface{} + err = json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + delete(resultMap, "self") + delete(resultMap, "size") + r.Properties = internal.RemainingKeys(Image{}, resultMap) + } + + return err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as an Image. +func (r commonResult) Extract() (*Image, error) { + var s *Image + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as an Image. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as an Image. +type UpdateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as an Image. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to interpret it as an Image. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ImagePage represents the results of a List request. +type ImagePage struct { + serviceURL string + pagination.LinkedPageBase +} + +// IsEmpty returns true if an ImagePage contains no Images results. +func (r ImagePage) IsEmpty() (bool, error) { + images, err := ExtractImages(r) + return len(images) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to +// the next page of results. +func (r ImagePage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + if s.Next == "" { + return "", nil + } + + return nextPageURL(r.serviceURL, s.Next) +} + +// ExtractImages interprets the results of a single page from a List() call, +// producing a slice of Image entities. +func ExtractImages(r pagination.Page) ([]Image, error) { + var s struct { + Images []Image `json:"images"` + } + err := (r.(ImagePage)).ExtractInto(&s) + return s.Images, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go new file mode 100644 index 00000000..d2f9cbd3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go @@ -0,0 +1,104 @@ +package images + +import ( + "time" +) + +// ImageStatus image statuses +// http://docs.openstack.org/developer/glance/statuses.html +type ImageStatus string + +const ( + // ImageStatusQueued is a status for an image which identifier has + // been reserved for an image in the image registry. + ImageStatusQueued ImageStatus = "queued" + + // ImageStatusSaving denotes that an image’s raw data is currently being + // uploaded to Glance + ImageStatusSaving ImageStatus = "saving" + + // ImageStatusActive denotes an image that is fully available in Glance. + ImageStatusActive ImageStatus = "active" + + // ImageStatusKilled denotes that an error occurred during the uploading + // of an image’s data, and that the image is not readable. + ImageStatusKilled ImageStatus = "killed" + + // ImageStatusDeleted is used for an image that is no longer available to use. + // The image information is retained in the image registry. + ImageStatusDeleted ImageStatus = "deleted" + + // ImageStatusPendingDelete is similar to Delete, but the image is not yet + // deleted. + ImageStatusPendingDelete ImageStatus = "pending_delete" + + // ImageStatusDeactivated denotes that access to image data is not allowed to + // any non-admin user. + ImageStatusDeactivated ImageStatus = "deactivated" +) + +// ImageVisibility denotes an image that is fully available in Glance. +// This occurs when the image data is uploaded, or the image size is explicitly +// set to zero on creation. +// According to design +// https://wiki.openstack.org/wiki/Glance-v2-community-image-visibility-design +type ImageVisibility string + +const ( + // ImageVisibilityPublic all users + ImageVisibilityPublic ImageVisibility = "public" + + // ImageVisibilityPrivate users with tenantId == tenantId(owner) + ImageVisibilityPrivate ImageVisibility = "private" + + // ImageVisibilityShared images are visible to: + // - users with tenantId == tenantId(owner) + // - users with tenantId in the member-list of the image + // - users with tenantId in the member-list with member_status == 'accepted' + ImageVisibilityShared ImageVisibility = "shared" + + // ImageVisibilityCommunity images: + // - all users can see and boot it + // - users with tenantId in the member-list of the image with + // member_status == 'accepted' have this image in their default image-list. + ImageVisibilityCommunity ImageVisibility = "community" +) + +// MemberStatus is a status for adding a new member (tenant) to an image +// member list. +type ImageMemberStatus string + +const ( + // ImageMemberStatusAccepted is the status for an accepted image member. + ImageMemberStatusAccepted ImageMemberStatus = "accepted" + + // ImageMemberStatusPending shows that the member addition is pending + ImageMemberStatusPending ImageMemberStatus = "pending" + + // ImageMemberStatusAccepted is the status for a rejected image member + ImageMemberStatusRejected ImageMemberStatus = "rejected" + + // ImageMemberStatusAll + ImageMemberStatusAll ImageMemberStatus = "all" +) + +// ImageDateFilter represents a valid filter to use for filtering +// images by their date during a List. +type ImageDateFilter string + +const ( + FilterGT ImageDateFilter = "gt" + FilterGTE ImageDateFilter = "gte" + FilterLT ImageDateFilter = "lt" + FilterLTE ImageDateFilter = "lte" + FilterNEQ ImageDateFilter = "neq" + FilterEQ ImageDateFilter = "eq" +) + +// ImageDateQuery represents a date field to be used for listing images. +// If no filter is specified, the query will act as though FilterEQ was +// set. +type ImageDateQuery struct { + Date time.Time + Filter ImageDateFilter +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go new file mode 100644 index 00000000..1780c3c6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go @@ -0,0 +1,65 @@ +package images + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +// `listURL` is a pure function. `listURL(c)` is a URL for which a GET +// request will respond with a list of images in the service `c`. +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("images") +} + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("images") +} + +// `imageURL(c,i)` is the URL for the image identified by ID `i` in +// the service `c`. +func imageURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL("images", imageID) +} + +// `getURL(c,i)` is a URL for which a GET request will respond with +// information about the image identified by ID `i` in the service +// `c`. +func getURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +func updateURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +func deleteURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +// builds next page full url based on current url +func nextPageURL(serviceURL, requestedNext string) (string, error) { + base, err := utils.BaseEndpoint(serviceURL) + if err != nil { + return "", err + } + + requestedNextURL, err := url.Parse(requestedNext) + if err != nil { + return "", err + } + + base = gophercloud.NormalizeURL(base) + nextPath := base + strings.TrimPrefix(requestedNextURL.Path, "/") + + nextURL, err := url.Parse(nextPath) + if err != nil { + return "", err + } + + nextURL.RawQuery = requestedNextURL.RawQuery + + return nextURL.String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/doc.go new file mode 100644 index 00000000..3257dd1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/doc.go @@ -0,0 +1,37 @@ +/* +Package attributestags manages Tags on Resources created by the OpenStack Neutron Service. + +This enables tagging via a standard interface for resources types which support it. + +See https://developer.openstack.org/api-ref/network/v2/#standard-attributes-tag-extension for more information on the underlying API. + +Example to ReplaceAll Resource Tags + + network, err := networks.Create(conn, createOpts).Extract() + + tagReplaceAllOpts := attributestags.ReplaceAllOpts{ + Tags: []string{"abc", "123"}, + } + attributestags.ReplaceAll(conn, "networks", network.ID, tagReplaceAllOpts) + +Example to List all Resource Tags + + tags, err = attributestags.List(conn, "networks", network.ID).Extract() + +Example to Delete all Resource Tags + + err = attributestags.DeleteAll(conn, "networks", network.ID).ExtractErr() + +Example to Add a tag to a Resource + + err = attributestags.Add(client, "networks", network.ID, "atag").ExtractErr() + +Example to Delete a tag from a Resource + + err = attributestags.Delete(client, "networks", network.ID, "atag").ExtractErr() + +Example to confirm if a tag exists on a resource + + exists, _ := attributestags.Confirm(client, "networks", network.ID, "atag").Extract() +*/ +package attributestags diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/requests.go new file mode 100644 index 00000000..a08bccbb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/requests.go @@ -0,0 +1,81 @@ +package attributestags + +import ( + "github.com/gophercloud/gophercloud" +) + +// ReplaceAllOptsBuilder allows extensions to add additional parameters to +// the ReplaceAll request. +type ReplaceAllOptsBuilder interface { + ToAttributeTagsReplaceAllMap() (map[string]interface{}, error) +} + +// ReplaceAllOpts provides options used to create Tags on a Resource +type ReplaceAllOpts struct { + Tags []string `json:"tags" required:"true"` +} + +// ToAttributeTagsReplaceAllMap formats a ReplaceAllOpts into the body of the +// replace request +func (opts ReplaceAllOpts) ToAttributeTagsReplaceAllMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// ReplaceAll updates all tags on a resource, replacing any existing tags +func ReplaceAll(client *gophercloud.ServiceClient, resourceType string, resourceID string, opts ReplaceAllOptsBuilder) (r ReplaceAllResult) { + b, err := opts.ToAttributeTagsReplaceAllMap() + url := replaceURL(client, resourceType, resourceID) + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(url, &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// List all tags on a resource +func List(client *gophercloud.ServiceClient, resourceType string, resourceID string) (r ListResult) { + url := listURL(client, resourceType, resourceID) + _, r.Err = client.Get(url, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteAll deletes all tags on a resource +func DeleteAll(client *gophercloud.ServiceClient, resourceType string, resourceID string) (r DeleteResult) { + url := deleteAllURL(client, resourceType, resourceID) + _, r.Err = client.Delete(url, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// Add a tag on a resource +func Add(client *gophercloud.ServiceClient, resourceType string, resourceID string, tag string) (r AddResult) { + url := addURL(client, resourceType, resourceID, tag) + _, r.Err = client.Put(url, nil, nil, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// Delete a tag on a resource +func Delete(client *gophercloud.ServiceClient, resourceType string, resourceID string, tag string) (r DeleteResult) { + url := deleteURL(client, resourceType, resourceID, tag) + _, r.Err = client.Delete(url, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// Confirm if a tag exists on a resource +func Confirm(client *gophercloud.ServiceClient, resourceType string, resourceID string, tag string) (r ConfirmResult) { + url := confirmURL(client, resourceType, resourceID, tag) + _, r.Err = client.Get(url, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/results.go new file mode 100644 index 00000000..cea8045b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/results.go @@ -0,0 +1,57 @@ +package attributestags + +import ( + "github.com/gophercloud/gophercloud" +) + +type tagResult struct { + gophercloud.Result +} + +// Extract interprets tagResult to return the list of tags +func (r tagResult) Extract() ([]string, error) { + var s struct { + Tags []string `json:"tags"` + } + err := r.ExtractInto(&s) + return s.Tags, err +} + +// ReplaceAllResult represents the result of a replace operation. +// Call its Extract method to interpret it as a slice of strings. +type ReplaceAllResult struct { + tagResult +} + +type ListResult struct { + tagResult +} + +// DeleteResult is the result from a Delete/DeleteAll operation. +// Call its ExtractErr method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AddResult is the result from an Add operation. +// Call its ExtractErr method to determine if the call succeeded or failed. +type AddResult struct { + gophercloud.ErrResult +} + +// ConfirmResult is the result from an Confirm operation. +type ConfirmResult struct { + gophercloud.Result +} + +func (r ConfirmResult) Extract() (bool, error) { + exists := r.Err == nil + + if r.Err != nil { + if _, ok := r.Err.(gophercloud.ErrDefault404); ok { + r.Err = nil + } + } + + return exists, r.Err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/urls.go new file mode 100644 index 00000000..973e00c4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/attributestags/urls.go @@ -0,0 +1,31 @@ +package attributestags + +import "github.com/gophercloud/gophercloud" + +const ( + tagsPath = "tags" +) + +func replaceURL(c *gophercloud.ServiceClient, r_type string, id string) string { + return c.ServiceURL(r_type, id, tagsPath) +} + +func listURL(c *gophercloud.ServiceClient, r_type string, id string) string { + return c.ServiceURL(r_type, id, tagsPath) +} + +func deleteAllURL(c *gophercloud.ServiceClient, r_type string, id string) string { + return c.ServiceURL(r_type, id, tagsPath) +} + +func addURL(c *gophercloud.ServiceClient, r_type string, id string, tag string) string { + return c.ServiceURL(r_type, id, tagsPath, tag) +} + +func deleteURL(c *gophercloud.ServiceClient, r_type string, id string, tag string) string { + return c.ServiceURL(r_type, id, tagsPath, tag) +} + +func confirmURL(c *gophercloud.ServiceClient, r_type string, id string, tag string) string { + return c.ServiceURL(r_type, id, tagsPath, tag) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/dns/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/dns/requests.go new file mode 100644 index 00000000..b7bd62d2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/dns/requests.go @@ -0,0 +1,171 @@ +package dns + +import ( + "net/url" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// PortListOptsExt adds the DNS options to the base port ListOpts. +type PortListOptsExt struct { + ports.ListOptsBuilder + + DNSName string `q:"dns_name"` +} + +// ToPortListQuery adds the DNS options to the base port list options. +func (opts PortListOptsExt) ToPortListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts.ListOptsBuilder) + if err != nil { + return "", err + } + + params := q.Query() + + if opts.DNSName != "" { + params.Add("dns_name", opts.DNSName) + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// PortCreateOptsExt adds port DNS options to the base ports.CreateOpts. +type PortCreateOptsExt struct { + // CreateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Create operation in this package. + ports.CreateOptsBuilder + + // Set DNS name to the port + DNSName string `json:"dns_name,omitempty"` +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +func (opts PortCreateOptsExt) ToPortCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToPortCreateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.DNSName != "" { + port["dns_name"] = opts.DNSName + } + + return base, nil +} + +// PortUpdateOptsExt adds DNS options to the base ports.UpdateOpts +type PortUpdateOptsExt struct { + // UpdateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Update operation in this package. + ports.UpdateOptsBuilder + + // Set DNS name to the port + DNSName *string `json:"dns_name,omitempty"` +} + +// ToPortUpdateMap casts an UpdateOpts struct to a map. +func (opts PortUpdateOptsExt) ToPortUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToPortUpdateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.DNSName != nil { + port["dns_name"] = *opts.DNSName + } + + return base, nil +} + +// FloatingIPCreateOptsExt adds floating IP DNS options to the base floatingips.CreateOpts. +type FloatingIPCreateOptsExt struct { + // CreateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Create operation in this package. + floatingips.CreateOptsBuilder + + // Set DNS name to the floating IPs + DNSName string `json:"dns_name,omitempty"` + + // Set DNS domain to the floating IPs + DNSDomain string `json:"dns_domain,omitempty"` +} + +// ToFloatingIPCreateMap casts a CreateOpts struct to a map. +func (opts FloatingIPCreateOptsExt) ToFloatingIPCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToFloatingIPCreateMap() + if err != nil { + return nil, err + } + + floatingip := base["floatingip"].(map[string]interface{}) + + if opts.DNSName != "" { + floatingip["dns_name"] = opts.DNSName + } + + if opts.DNSDomain != "" { + floatingip["dns_domain"] = opts.DNSDomain + } + + return base, nil +} + +// NetworkCreateOptsExt adds network DNS options to the base networks.CreateOpts. +type NetworkCreateOptsExt struct { + // CreateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Create operation in this package. + networks.CreateOptsBuilder + + // Set DNS domain to the network + DNSDomain string `json:"dns_domain,omitempty"` +} + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +func (opts NetworkCreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + network := base["network"].(map[string]interface{}) + + if opts.DNSDomain != "" { + network["dns_domain"] = opts.DNSDomain + } + + return base, nil +} + +// NetworkUpdateOptsExt adds network DNS options to the base networks.UpdateOpts +type NetworkUpdateOptsExt struct { + // UpdateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Update operation in this package. + networks.UpdateOptsBuilder + + // Set DNS domain to the network + DNSDomain *string `json:"dns_domain,omitempty"` +} + +// ToNetworkUpdateMap casts an UpdateOpts struct to a map. +func (opts NetworkUpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + network := base["network"].(map[string]interface{}) + + if opts.DNSDomain != nil { + network["dns_domain"] = *opts.DNSDomain + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/dns/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/dns/results.go new file mode 100644 index 00000000..ce5752fc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/dns/results.go @@ -0,0 +1,30 @@ +package dns + +// PortDNSExt represents a decorated form of a Port with the additional +// Port DNS information. +type PortDNSExt struct { + // The DNS name of the port. + DNSName string `json:"dns_name"` + + // The DNS assignment of the port. + DNSAssignment []map[string]string `json:"dns_assignment"` +} + +// FloatingIPDNSExt represents a decorated form of a Floating IP with the +// additional Floating IP DNS information. +type FloatingIPDNSExt struct { + // The DNS name of the floating IP, assigned to the external DNS + // service. + DNSName string `json:"dns_name"` + + // The DNS domain of the floating IP, assigned to the external DNS + // service. + DNSDomain string `json:"dns_domain"` +} + +// NetworkDNSExt represents a decorated form of a Network with the additional +// Network DNS information. +type NetworkDNSExt struct { + // The DNS domain of the network. + DNSDomain string `json:"dns_domain"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go new file mode 100644 index 00000000..eda010cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go @@ -0,0 +1,53 @@ +/* +Package external provides information and interaction with the external +extension for the OpenStack Networking service. + +Example to List Networks with External Information + + iTrue := true + networkListOpts := networks.ListOpts{} + listOpts := external.ListOptsExt{ + ListOptsBuilder: networkListOpts, + External: &iTrue, + } + + type NetworkWithExternalExt struct { + networks.Network + external.NetworkExternalExt + } + + var allNetworks []NetworkWithExternalExt + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Println("%+v\n", network) + } + +Example to Create a Network with External Information + + iTrue := true + networkCreateOpts := networks.CreateOpts{ + Name: "private", + AdminStateUp: &iTrue, + } + + createOpts := external.CreateOptsExt{ + networkCreateOpts, + &iTrue, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package external diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go new file mode 100644 index 00000000..ced5efed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go @@ -0,0 +1,84 @@ +package external + +import ( + "net/url" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +// ListOptsExt adds the external network options to the base ListOpts. +type ListOptsExt struct { + networks.ListOptsBuilder + External *bool `q:"router:external"` +} + +// ToNetworkListQuery adds the router:external option to the base network +// list options. +func (opts ListOptsExt) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts.ListOptsBuilder) + if err != nil { + return "", err + } + + params := q.Query() + if opts.External != nil { + v := strconv.FormatBool(*opts.External) + params.Add("router:external", v) + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// CreateOptsExt is the structure used when creating new external network +// resources. It embeds networks.CreateOpts and so inherits all of its required +// and optional fields, with the addition of the External field. +type CreateOptsExt struct { + networks.CreateOptsBuilder + External *bool `json:"router:external,omitempty"` +} + +// ToNetworkCreateMap adds the router:external options to the base network +// creation options. +func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + if opts.External == nil { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["router:external"] = opts.External + + return base, nil +} + +// UpdateOptsExt is the structure used when updating existing external network +// resources. It embeds networks.UpdateOpts and so inherits all of its required +// and optional fields, with the addition of the External field. +type UpdateOptsExt struct { + networks.UpdateOptsBuilder + External *bool `json:"router:external,omitempty"` +} + +// ToNetworkUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + if opts.External == nil { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["router:external"] = opts.External + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go new file mode 100644 index 00000000..7cbbffdc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go @@ -0,0 +1,8 @@ +package external + +// NetworkExternalExt represents a decorated form of a Network with based on the +// "external-net" extension. +type NetworkExternalExt struct { + // Specifies whether the network is an external network or not. + External bool `json:"router:external"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/doc.go new file mode 100644 index 00000000..ec5d6181 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/doc.go @@ -0,0 +1,80 @@ +/* +Package extradhcpopts allow to work with extra DHCP functionality of Neutron ports. + +Example to Get a Port with Extra DHCP Options + + portID := "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2" + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + err := ports.Get(networkClient, portID).ExtractInto(&s) + if err != nil { + panic(err) + } + +Example to Create a Port with Extra DHCP Options + + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + adminStateUp := true + portCreateOpts := ports.CreateOpts{ + Name: "dhcp-conf-port", + AdminStateUp: &adminStateUp, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + } + + createOpts := extradhcpopts.CreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + ExtraDHCPOpts: []extradhcpopts.CreateExtraDHCPOpt{ + { + OptName: "optionA", + OptValue: "valueA", + }, + }, + } + + err := ports.Create(networkClient, createOpts).ExtractInto(&s) + if err != nil { + panic(err) + } + +Example to Update a Port with Extra DHCP Options + + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + portUpdateOpts := ports.UpdateOpts{ + Name: "updated-dhcp-conf-port", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + } + + value := "valueB" + updateOpts := extradhcpopts.UpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + ExtraDHCPOpts: []extradhcpopts.UpdateExtraDHCPOpt{ + { + OptName: "optionB", + OptValue: &value, + }, + }, + } + + portID := "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2" + err := ports.Update(networkClient, portID, updateOpts).ExtractInto(&s) + if err != nil { + panic(err) + } +*/ +package extradhcpopts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/requests.go new file mode 100644 index 00000000..f3eb9bc4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/requests.go @@ -0,0 +1,102 @@ +package extradhcpopts + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// CreateOptsExt adds extra DHCP options to the base ports.CreateOpts. +type CreateOptsExt struct { + // CreateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Create operation in this package. + ports.CreateOptsBuilder + + // ExtraDHCPOpts field is a set of DHCP options for a single port. + ExtraDHCPOpts []CreateExtraDHCPOpt `json:"extra_dhcp_opts,omitempty"` +} + +// CreateExtraDHCPOpt represents the options required to create an extra DHCP +// option on a port. +type CreateExtraDHCPOpt struct { + // OptName is the name of a DHCP option. + OptName string `json:"opt_name" required:"true"` + + // OptValue is the value of the DHCP option. + OptValue string `json:"opt_value" required:"true"` + + // IPVersion is the IP protocol version of a DHCP option. + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` +} + +// ToPortCreateMap casts a CreateOptsExt struct to a map. +func (opts CreateOptsExt) ToPortCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToPortCreateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + // Convert opts.ExtraDHCPOpts to a slice of maps. + if opts.ExtraDHCPOpts != nil { + extraDHCPOpts := make([]map[string]interface{}, len(opts.ExtraDHCPOpts)) + for i, opt := range opts.ExtraDHCPOpts { + b, err := gophercloud.BuildRequestBody(opt, "") + if err != nil { + return nil, err + } + extraDHCPOpts[i] = b + } + port["extra_dhcp_opts"] = extraDHCPOpts + } + + return base, nil +} + +// UpdateOptsExt adds extra DHCP options to the base ports.UpdateOpts. +type UpdateOptsExt struct { + // UpdateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Update operation in this package. + ports.UpdateOptsBuilder + + // ExtraDHCPOpts field is a set of DHCP options for a single port. + ExtraDHCPOpts []UpdateExtraDHCPOpt `json:"extra_dhcp_opts,omitempty"` +} + +// UpdateExtraDHCPOpt represents the options required to update an extra DHCP +// option on a port. +type UpdateExtraDHCPOpt struct { + // OptName is the name of a DHCP option. + OptName string `json:"opt_name" required:"true"` + + // OptValue is the value of the DHCP option. + OptValue *string `json:"opt_value"` + + // IPVersion is the IP protocol version of a DHCP option. + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` +} + +// ToPortUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOptsExt) ToPortUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToPortUpdateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + // Convert opts.ExtraDHCPOpts to a slice of maps. + if opts.ExtraDHCPOpts != nil { + extraDHCPOpts := make([]map[string]interface{}, len(opts.ExtraDHCPOpts)) + for i, opt := range opts.ExtraDHCPOpts { + b, err := gophercloud.BuildRequestBody(opt, "") + if err != nil { + return nil, err + } + extraDHCPOpts[i] = b + } + port["extra_dhcp_opts"] = extraDHCPOpts + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/results.go new file mode 100644 index 00000000..8e3132ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/results.go @@ -0,0 +1,20 @@ +package extradhcpopts + +// ExtraDHCPOptsExt is a struct that contains different DHCP options for a +// single port. +type ExtraDHCPOptsExt struct { + ExtraDHCPOpts []ExtraDHCPOpt `json:"extra_dhcp_opts"` +} + +// ExtraDHCPOpt represents a single set of extra DHCP options for a single port. +type ExtraDHCPOpt struct { + // OptName is the name of a single DHCP option. + OptName string `json:"opt_name"` + + // OptValue is the value of a single DHCP option. + OptValue string `json:"opt_value"` + + // IPVersion is the IP protocol version of a single DHCP option. + // Valid value is 4 or 6. Default is 4. + IPVersion int `json:"ip_version"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go new file mode 100644 index 00000000..c01070ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go @@ -0,0 +1,60 @@ +/* +Package firewalls allows management and retrieval of firewalls from the +OpenStack Networking Service. + +Example to List Firewalls + + listOpts := firewalls.ListOpts{ + TenantID: "tenant-id", + } + + allPages, err := firewalls.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFirewalls, err := firewalls.ExtractFirewalls(allPages) + if err != nil { + panic(err) + } + + for _, fw := range allFirewalls { + fmt.Printf("%+v\n", fw) + } + +Example to Create a Firewall + + createOpts := firewalls.CreateOpts{ + Name: "firewall_1", + Description: "A firewall", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + AdminStateUp: gophercloud.Enabled, + } + + firewall, err := firewalls.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Firewall + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + + updateOpts := firewalls.UpdateOpts{ + AdminStateUp: gophercloud.Disabled, + } + + firewall, err := firewalls.Update(networkClient, firewallID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Firewall + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + err := firewalls.Delete(networkClient, firewallID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package firewalls diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go new file mode 100644 index 00000000..dd92bb20 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go @@ -0,0 +1,11 @@ +package firewalls + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errPolicyRequired = err("A policy ID is required") +) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go new file mode 100644 index 00000000..c710f94a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go @@ -0,0 +1,139 @@ +package firewalls + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFirewallListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the firewall attributes you want to see returned. SortKey allows you to sort +// by a particular firewall attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp bool `q:"admin_state_up"` + Shared bool `q:"shared"` + PolicyID string `q:"firewall_policy_id"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToFirewallListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFirewallListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// firewalls. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewalls that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToFirewallListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return FirewallPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFirewallCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall. +type CreateOpts struct { + PolicyID string `json:"firewall_policy_id" required:"true"` + // TenantID specifies a tenant to own the firewall. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToFirewallCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall") +} + +// Create accepts a CreateOpts struct and uses the values to create a new firewall. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFirewallCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFirewallUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall. +type UpdateOpts struct { + PolicyID string `json:"firewall_policy_id" required:"true"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToFirewallUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall") +} + +// Update allows firewalls to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFirewallUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go new file mode 100644 index 00000000..9543f0fa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go @@ -0,0 +1,96 @@ +package firewalls + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Firewall is an OpenStack firewall. +type Firewall struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AdminStateUp bool `json:"admin_state_up"` + Status string `json:"status"` + PolicyID string `json:"firewall_policy_id"` + TenantID string `json:"tenant_id"` + ProjectID string `json:"project_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall. +func (r commonResult) Extract() (*Firewall, error) { + var s Firewall + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "firewall") +} + +func ExtractFirewallsInto(r pagination.Page, v interface{}) error { + return r.(FirewallPage).Result.ExtractIntoSlicePtr(v, "firewalls") +} + +// FirewallPage is the page returned by a pager when traversing over a +// collection of firewalls. +type FirewallPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewalls has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r FirewallPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewalls_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a FirewallPage struct is empty. +func (r FirewallPage) IsEmpty() (bool, error) { + is, err := ExtractFirewalls(r) + return len(is) == 0, err +} + +// ExtractFirewalls accepts a Page struct, specifically a FirewallPage struct, +// and extracts the elements into a slice of Firewall structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractFirewalls(r pagination.Page) ([]Firewall, error) { + var s []Firewall + err := ExtractFirewallsInto(r, &s) + return s, err +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as a Firewall. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as a Firewall. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as a Firewall. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go new file mode 100644 index 00000000..807ea1ab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go @@ -0,0 +1,16 @@ +package firewalls + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewalls" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go new file mode 100644 index 00000000..ae824491 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go @@ -0,0 +1,84 @@ +/* +Package policies allows management and retrieval of Firewall Policies in the +OpenStack Networking Service. + +Example to List Policies + + listOpts := policies.ListOpts{ + TenantID: "966b3c7d36a24facaf20b7e458bf2192", + } + + allPages, err := policies.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := policies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + + for _, policy := range allPolicies { + fmt.Printf("%+v\n", policy) + } + +Example to Create a Policy + + createOpts := policies.CreateOpts{ + Name: "policy_1", + Description: "A policy", + Rules: []string{ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "7c4f087a-ed46-4ea8-8040-11ca460a61c0", + } + } + + policy, err := policies.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + + updateOpts := policies.UpdateOpts{ + Description: "New Description", + } + + policy, err := policies.Update(networkClient, policyID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + err := policies.Delete(networkClient, policyID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Add a Rule to a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + ruleOpts := policies.InsertRuleOpts{ + ID: "98a58c87-76be-ae7c-a74e-b77fffb88d95", + } + + policy, err := policies.AddRule(networkClient, policyID, ruleOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Rule from a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + ruleID := "98a58c87-76be-ae7c-a74e-b77fffb88d95", + + policy, err := policies.RemoveRule(networkClient, policyID, ruleID).Extract() + if err != nil { + panic(err) + } +*/ +package policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go new file mode 100644 index 00000000..fcecf6a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go @@ -0,0 +1,179 @@ +package policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the firewall policy attributes you want to see returned. SortKey allows you +// to sort by a particular firewall policy attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Name string `q:"name"` + Description string `q:"description"` + Shared *bool `q:"shared"` + Audited *bool `q:"audited"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// firewall policies. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewall policies that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFirewallPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall policy. +type CreateOpts struct { + // TenantID specifies a tenant to own the firewall. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Shared *bool `json:"shared,omitempty"` + Audited *bool `json:"audited,omitempty"` + Rules []string `json:"firewall_rules,omitempty"` +} + +// ToFirewallPolicyCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToFirewallPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_policy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// firewall policy. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFirewallPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFirewallPolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall policy. +type UpdateOpts struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Shared *bool `json:"shared,omitempty"` + Audited *bool `json:"audited,omitempty"` + Rules []string `json:"firewall_rules,omitempty"` +} + +// ToFirewallPolicyUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToFirewallPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_policy") +} + +// Update allows firewall policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFirewallPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall policy based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// InsertRuleOptsBuilder allows extensions to add additional parameters to the +// InsertRule request. +type InsertRuleOptsBuilder interface { + ToFirewallPolicyInsertRuleMap() (map[string]interface{}, error) +} + +// InsertRuleOpts contains the values used when updating a policy's rules. +type InsertRuleOpts struct { + ID string `json:"firewall_rule_id" required:"true"` + BeforeRuleID string `json:"insert_before,omitempty"` + AfterRuleID string `json:"insert_after,omitempty"` +} + +func (opts InsertRuleOpts) ToFirewallPolicyInsertRuleMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// AddRule will add a rule to a policy. +func AddRule(c *gophercloud.ServiceClient, id string, opts InsertRuleOptsBuilder) (r InsertRuleResult) { + b, err := opts.ToFirewallPolicyInsertRuleMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(insertURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveRule will add a rule to a policy. +func RemoveRule(c *gophercloud.ServiceClient, id, ruleID string) (r RemoveRuleResult) { + b := map[string]interface{}{"firewall_rule_id": ruleID} + _, r.Err = c.Put(removeURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go new file mode 100644 index 00000000..495cef2c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go @@ -0,0 +1,104 @@ +package policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy is a firewall policy. +type Policy struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + TenantID string `json:"tenant_id"` + ProjectID string `json:"project_id"` + Audited bool `json:"audited"` + Shared bool `json:"shared"` + Rules []string `json:"firewall_rules,omitempty"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall policy. +func (r commonResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"firewall_policy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} + +// PolicyPage is the page returned by a pager when traversing over a +// collection of firewall policies. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewall policies has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewall_policies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PolicyPage struct is empty. +func (r PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(r) + return len(is) == 0, err +} + +// ExtractPolicies accepts a Page struct, specifically a Policy struct, +// and extracts the elements into a slice of Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"firewall_policies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Policy. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its +// Extract method to interpret it as a Policy. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Policy. +type CreateResult struct { + commonResult +} + +// InsertRuleResult represents the result of an InsertRule operation. Call its +// Extract method to interpret it as a Policy. +type InsertRuleResult struct { + commonResult +} + +// RemoveRuleResult represents the result of a RemoveRule operation. Call its +// Extract method to interpret it as a Policy. +type RemoveRuleResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go new file mode 100644 index 00000000..c252b79d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go @@ -0,0 +1,26 @@ +package policies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewall_policies" + insertPath = "insert_rule" + removePath = "remove_rule" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func insertURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, insertPath) +} + +func removeURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, removePath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go new file mode 100644 index 00000000..4f0a779e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go @@ -0,0 +1,68 @@ +/* +Package routerinsertion implements the fwaasrouterinsertion Firewall extension. +It is used to manage the router information of a firewall. + +Example to List Firewalls with Router Information + + type FirewallsWithRouters struct { + firewalls.Firewall + routerinsertion.FirewallExt + } + + var allFirewalls []FirewallsWithRouters + + allPages, err := firewalls.List(networkClient, nil).AllPages() + if err != nil { + panic(err) + } + + err = firewalls.ExtractFirewallsInto(allPages, &allFirewalls) + if err != nil { + panic(err) + } + + for _, fw := range allFirewalls { + fmt.Printf("%+v\n", fw) + } + +Example to Create a Firewall with a Router + + firewallCreateOpts := firewalls.CreateOpts{ + Name: "firewall_1", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + + createOpts := routerinsertion.CreateOptsExt{ + CreateOptsBuilder: firewallCreateOpts, + RouterIDs: []string{ + "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8", + }, + } + + firewall, err := firewalls.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Firewall with a Router + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + + firewallUpdateOpts := firewalls.UpdateOpts{ + Description: "updated firewall", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + + updateOpts := routerinsertion.UpdateOptsExt{ + UpdateOptsBuilder: firewallUpdateOpts, + RouterIDs: []string{ + "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8", + }, + } + + firewall, err := firewalls.Update(networkClient, firewallID, updateOpts).Extract() + if err != nil { + panic(err) + } +*/ +package routerinsertion diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go new file mode 100644 index 00000000..b1f6d76e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go @@ -0,0 +1,43 @@ +package routerinsertion + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" +) + +// CreateOptsExt adds the RouterIDs option to the base CreateOpts. +type CreateOptsExt struct { + firewalls.CreateOptsBuilder + RouterIDs []string `json:"router_ids"` +} + +// ToFirewallCreateMap adds router_ids to the base firewall creation options. +func (opts CreateOptsExt) ToFirewallCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToFirewallCreateMap() + if err != nil { + return nil, err + } + + firewallMap := base["firewall"].(map[string]interface{}) + firewallMap["router_ids"] = opts.RouterIDs + + return base, nil +} + +// UpdateOptsExt adds the RouterIDs option to the base UpdateOpts. +type UpdateOptsExt struct { + firewalls.UpdateOptsBuilder + RouterIDs []string `json:"router_ids"` +} + +// ToFirewallUpdateMap adds router_ids to the base firewall update options. +func (opts UpdateOptsExt) ToFirewallUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToFirewallUpdateMap() + if err != nil { + return nil, err + } + + firewallMap := base["firewall"].(map[string]interface{}) + firewallMap["router_ids"] = opts.RouterIDs + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go new file mode 100644 index 00000000..85c288e5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go @@ -0,0 +1,7 @@ +package routerinsertion + +// FirewallExt is an extension to the base Firewall object +type FirewallExt struct { + // RouterIDs are the routers that the firewall is attached to. + RouterIDs []string `json:"router_ids"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go new file mode 100644 index 00000000..3351a3e5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go @@ -0,0 +1,64 @@ +/* +Package rules enables management and retrieval of Firewall Rules in the +OpenStack Networking Service. + +Example to List Rules + + listOpts := rules.ListOpts{ + Protocol: rules.ProtocolAny, + } + + allPages, err := rules.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRules, err := rules.ExtractRules(allPages) + if err != nil { + panic(err) + } + + for _, rule := range allRules { + fmt.Printf("%+v\n", rule) + } + +Example to Create a Rule + + createOpts := rules.CreateOpts{ + Action: "allow", + Protocol: rules.ProtocolTCP, + Description: "ssh", + DestinationPort: 22, + DestinationIPAddress: "192.168.1.0/24", + } + + rule, err := rules.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Rule + + ruleID := "f03bd950-6c56-4f5e-a307-45967078f507" + newPort := 80 + newDescription := "http" + + updateOpts := rules.UpdateOpts{ + Description: &newDescription, + port: &newPort, + } + + rule, err := rules.Update(networkClient, ruleID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Rule + + ruleID := "f03bd950-6c56-4f5e-a307-45967078f507" + err := rules.Delete(networkClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go new file mode 100644 index 00000000..0b29d39f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go @@ -0,0 +1,12 @@ +package rules + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errProtocolRequired = err("A protocol is required (tcp, udp, icmp or any)") + errActionRequired = err("An action is required (allow or deny)") +) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go new file mode 100644 index 00000000..17979b63 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go @@ -0,0 +1,190 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type ( + // Protocol represents a valid rule protocol. + Protocol string +) + +const ( + // ProtocolAny is to allow any protocol. + ProtocolAny Protocol = "any" + + // ProtocolICMP is to allow the ICMP protocol. + ProtocolICMP Protocol = "icmp" + + // ProtocolTCP is to allow the TCP protocol. + ProtocolTCP Protocol = "tcp" + + // ProtocolUDP is to allow the UDP protocol. + ProtocolUDP Protocol = "udp" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToRuleListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Firewall rule attributes you want to see returned. SortKey allows you to +// sort by a particular firewall rule attribute. SortDir sets the direction, and +// is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Name string `q:"name"` + Description string `q:"description"` + Protocol string `q:"protocol"` + Action string `q:"action"` + IPVersion int `q:"ip_version"` + SourceIPAddress string `q:"source_ip_address"` + DestinationIPAddress string `q:"destination_ip_address"` + SourcePort string `q:"source_port"` + DestinationPort string `q:"destination_port"` + Enabled bool `q:"enabled"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToRuleListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRuleListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// firewall rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewall rules that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + + if opts != nil { + query, err := opts.ToRuleListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return RulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall rule. +type CreateOpts struct { + Protocol Protocol `json:"protocol" required:"true"` + Action string `json:"action" required:"true"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` + SourceIPAddress string `json:"source_ip_address,omitempty"` + DestinationIPAddress string `json:"destination_ip_address,omitempty"` + SourcePort string `json:"source_port,omitempty"` + DestinationPort string `json:"destination_port,omitempty"` + Shared *bool `json:"shared,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// ToRuleCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "firewall_rule") + if err != nil { + return nil, err + } + + if m := b["firewall_rule"].(map[string]interface{}); m["protocol"] == "any" { + m["protocol"] = nil + } + + return b, nil +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// firewall rule. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall rule based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall rule. +// These fields are all pointers so that unset fields will not cause the +// existing Rule attribute to be removed. +type UpdateOpts struct { + Protocol *string `json:"protocol,omitempty"` + Action *string `json:"action,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + IPVersion *gophercloud.IPVersion `json:"ip_version,omitempty"` + SourceIPAddress *string `json:"source_ip_address,omitempty"` + DestinationIPAddress *string `json:"destination_ip_address,omitempty"` + SourcePort *string `json:"source_port,omitempty"` + DestinationPort *string `json:"destination_port,omitempty"` + Shared *bool `json:"shared,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// ToRuleUpdateMap casts a UpdateOpts struct to a map. +func (opts UpdateOpts) ToRuleUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_rule") +} + +// Update allows firewall policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRuleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall rule based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go new file mode 100644 index 00000000..82bf4a36 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go @@ -0,0 +1,100 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Rule represents a firewall rule. +type Rule struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Protocol string `json:"protocol"` + Action string `json:"action"` + IPVersion int `json:"ip_version,omitempty"` + SourceIPAddress string `json:"source_ip_address,omitempty"` + DestinationIPAddress string `json:"destination_ip_address,omitempty"` + SourcePort string `json:"source_port,omitempty"` + DestinationPort string `json:"destination_port,omitempty"` + Shared bool `json:"shared,omitempty"` + Enabled bool `json:"enabled,omitempty"` + PolicyID string `json:"firewall_policy_id"` + Position int `json:"position"` + TenantID string `json:"tenant_id"` + ProjectID string `json:"project_id"` +} + +// RulePage is the page returned by a pager when traversing over a +// collection of firewall rules. +type RulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewall rules has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r RulePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewall_rules_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a RulePage struct is empty. +func (r RulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(r) + return len(is) == 0, err +} + +// ExtractRules accepts a Page struct, specifically a RulePage struct, +// and extracts the elements into a slice of Rule structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(r pagination.Page) ([]Rule, error) { + var s struct { + Rules []Rule `json:"firewall_rules"` + } + err := (r.(RulePage)).ExtractInto(&s) + return s.Rules, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall rule. +func (r commonResult) Extract() (*Rule, error) { + var s struct { + Rule *Rule `json:"firewall_rule"` + } + err := r.ExtractInto(&s) + return s.Rule, err +} + +// GetResult represents the result of a get operation. Call its Extract method +// to interpret it as a Rule. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Rule. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Rule. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go new file mode 100644 index 00000000..79654be7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go @@ -0,0 +1,16 @@ +package rules + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewall_rules" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/doc.go new file mode 100644 index 00000000..d68d2b76 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/doc.go @@ -0,0 +1,64 @@ +/* +Package addressscopes provides the ability to retrieve and manage Address scopes through the Neutron API. + +Example of Listing Address scopes + + listOpts := addressscopes.ListOpts{ + IPVersion: 6, + } + + allPages, err := addressscopes.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allAddressScopes, err := addressscopes.ExtractAddressScopes(allPages) + if err != nil { + panic(err) + } + + for _, addressScope := range allAddressScopes { + fmt.Printf("%+v\n", addressScope) + } + +Example to Get an Address scope + + addressScopeID = "9cc35860-522a-4d35-974d-51d4b011801e" + addressScope, err := addressscopes.Get(networkClient, addressScopeID).Extract() + if err != nil { + panic(err) + } + +Example to Create a new Address scope + + addressScopeOpts := addressscopes.CreateOpts{ + Name: "my_address_scope", + IPVersion: 6, + } + addressScope, err := addressscopes.Create(networkClient, addressScopeOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update an Address scope + + addressScopeID = "9cc35860-522a-4d35-974d-51d4b011801e" + newName := "awesome_name" + updateOpts := addressscopes.UpdateOpts{ + Name: &newName, + } + + addressScope, err := addressscopes.Update(networkClient, addressScopeID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Address scope + + addressScopeID = "9cc35860-522a-4d35-974d-51d4b011801e" + err := addressscopes.Delete(networkClient, addressScopeID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package addressscopes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/requests.go new file mode 100644 index 00000000..defa8e1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/requests.go @@ -0,0 +1,147 @@ +package addressscopes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToAddressScopeListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the Neutron API. Filtering is achieved by passing in struct field values +// that map to the address-scope attributes you want to see returned. +// SortKey allows you to sort by a particular address-scope attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for the pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + IPVersion int `q:"ip_version"` + Shared *bool `q:"shared"` + Description string `q:"description"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToAddressScopeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToAddressScopeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// address-scopes. It accepts a ListOpts struct, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only the address-scopes owned by the project +// of the user submitting the request, unless the user has the administrative +// role. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToAddressScopeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return AddressScopePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific address-scope based on its ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToAddressScopeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters of a new address-scope. +type CreateOpts struct { + // Name is the human-readable name of the address-scope. + Name string `json:"name"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id,omitempty"` + + // IPVersion is the IP protocol version. + IPVersion int `json:"ip_version"` + + // Shared indicates whether this address-scope is shared across all projects. + Shared bool `json:"shared,omitempty"` +} + +// ToAddressScopeCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToAddressScopeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "address_scope") +} + +// Create requests the creation of a new address-scope on the server. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToAddressScopeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToAddressScopeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update an address-scope. +type UpdateOpts struct { + // Name is the human-readable name of the address-scope. + Name *string `json:"name,omitempty"` + + // Shared indicates whether this address-scope is shared across all projects. + Shared *bool `json:"shared,omitempty"` +} + +// ToAddressScopeUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToAddressScopeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "address_scope") +} + +// Update accepts a UpdateOpts struct and updates an existing address-scope +// using the values provided. +func Update(c *gophercloud.ServiceClient, addressScopeID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToAddressScopeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, addressScopeID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete accepts a unique ID and deletes the address-scope associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/results.go new file mode 100644 index 00000000..5f78dfee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/results.go @@ -0,0 +1,99 @@ +package addressscopes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an address-scope resource. +func (r commonResult) Extract() (*AddressScope, error) { + var s struct { + AddressScope *AddressScope `json:"address_scope"` + } + err := r.ExtractInto(&s) + return s.AddressScope, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SubnetPool. +type GetResult struct { + commonResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SubnetPool. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as an AddressScope. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AddressScope represents a Neutron address-scope. +type AddressScope struct { + // ID is the id of the address-scope. + ID string `json:"id"` + + // Name is the human-readable name of the address-scope. + Name string `json:"name"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id"` + + // IPVersion is the IP protocol version. + IPVersion int `json:"ip_version"` + + // Shared indicates whether this address-scope is shared across all projects. + Shared bool `json:"shared"` +} + +// AddressScopePage stores a single page of AddressScopes from a List() API call. +type AddressScopePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of address-scope has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r AddressScopePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"address_scopes_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty determines whether or not a AddressScopePage is empty. +func (r AddressScopePage) IsEmpty() (bool, error) { + addressScopes, err := ExtractAddressScopes(r) + return len(addressScopes) == 0, err +} + +// ExtractAddressScopes interprets the results of a single page from a List() +// API call, producing a slice of AddressScopes structs. +func ExtractAddressScopes(r pagination.Page) ([]AddressScope, error) { + var s struct { + AddressScopes []AddressScope `json:"address_scopes"` + } + err := (r.(AddressScopePage)).ExtractInto(&s) + return s.AddressScopes, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/urls.go new file mode 100644 index 00000000..9fe7e01a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/addressscopes/urls.go @@ -0,0 +1,33 @@ +package addressscopes + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "address-scopes" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go new file mode 100644 index 00000000..a71a3ec8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go @@ -0,0 +1,71 @@ +/* +package floatingips enables management and retrieval of Floating IPs from the +OpenStack Networking service. + +Example to List Floating IPs + + listOpts := floatingips.ListOpts{ + FloatingNetworkID: "a6917946-38ab-4ffd-a55a-26c0980ce5ee", + } + + allPages, err := floatingips.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFIPs, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + panic(err) + } + + for _, fip := range allFIPs { + fmt.Printf("%+v\n", fip) + } + +Example to Create a Floating IP + + createOpts := floatingips.CreateOpts{ + FloatingNetworkID: "a6917946-38ab-4ffd-a55a-26c0980ce5ee", + } + + fip, err := floatingips.Create(networkingClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Floating IP + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + portID := "76d0a61b-b8e5-490c-9892-4cf674f2bec8" + + updateOpts := floatingips.UpdateOpts{ + PortID: &portID, + } + + fip, err := floatingips.Update(networkingClient, fipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Disassociate a Floating IP with a Port + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + + updateOpts := floatingips.UpdateOpts{ + PortID: new(string), + } + + fip, err := floatingips.Update(networkingClient, fipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Floating IP + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + err := floatingips.Delete(networkClient, fipID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go new file mode 100644 index 00000000..0c0db64d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go @@ -0,0 +1,182 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFloatingIPListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Description string `q:"description"` + FloatingNetworkID string `q:"floating_network_id"` + PortID string `q:"port_id"` + FixedIP string `q:"fixed_ip_address"` + FloatingIP string `q:"floating_ip_address"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + RouterID string `q:"router_id"` + Status string `q:"status"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFloatingIPListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// floating IP resources. It accepts a ListOpts struct, which allows you to +// filter and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToFloatingIPListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return FloatingIPPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFloatingIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new floating IP +// resource. The only required fields are FloatingNetworkID and PortID which +// refer to the external network and internal port respectively. +type CreateOpts struct { + Description string `json:"description,omitempty"` + FloatingNetworkID string `json:"floating_network_id" required:"true"` + FloatingIP string `json:"floating_ip_address,omitempty"` + PortID string `json:"port_id,omitempty"` + FixedIP string `json:"fixed_ip_address,omitempty"` + SubnetID string `json:"subnet_id,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` +} + +// ToFloatingIPCreateMap allows CreateOpts to satisfy the CreateOptsBuilder +// interface +func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "floatingip") +} + +// Create accepts a CreateOpts struct and uses the values provided to create a +// new floating IP resource. You can create floating IPs on external networks +// only. If you provide a FloatingNetworkID which refers to a network that is +// not external (i.e. its `router:external' attribute is False), the operation +// will fail and return a 400 error. +// +// If you do not specify a FloatingIP address value, the operation will +// automatically allocate an available address for the new resource. If you do +// choose to specify one, it must fall within the subnet range for the external +// network - otherwise the operation returns a 400 error. If the FloatingIP +// address is already in use, the operation returns a 409 error code. +// +// You can associate the new resource with an internal port by using the PortID +// field. If you specify a PortID that is not valid, the operation will fail and +// return 404 error code. +// +// You must also configure an IP address for the port associated with the PortID +// you have provided - this is what the FixedIP refers to: an IP fixed to a +// port. Because a port might be associated with multiple IP addresses, you can +// use the FixedIP field to associate a particular IP address rather than have +// the API assume for you. If you specify an IP address that is not valid, the +// operation will fail and return a 400 error code. If the PortID and FixedIP +// are already associated with another resource, the operation will fail and +// returns a 409 error code. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFloatingIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular floating IP resource based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFloatingIPUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a floating IP resource. The +// only value that can be updated is which internal port the floating IP is +// linked to. To associate the floating IP with a new internal port, provide its +// ID. To disassociate the floating IP from all ports, provide an empty string. +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + PortID *string `json:"port_id,omitempty"` + FixedIP string `json:"fixed_ip_address,omitempty"` +} + +// ToFloatingIPUpdateMap allows UpdateOpts to satisfy the UpdateOptsBuilder +// interface +func (opts UpdateOpts) ToFloatingIPUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "floatingip") + if err != nil { + return nil, err + } + + if m := b["floatingip"].(map[string]interface{}); m["port_id"] == "" { + m["port_id"] = nil + } + + return b, nil +} + +// Update allows floating IP resources to be updated. Currently, the only way to +// "update" a floating IP is to associate it with a new internal port, or +// disassociated it from all ports. See UpdateOpts for instructions of how to +// do this. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFloatingIPUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular floating IP resource. Please +// ensure this is what you want - you can also disassociate the IP from existing +// internal ports. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go new file mode 100644 index 00000000..a9709cce --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go @@ -0,0 +1,131 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// FloatingIP represents a floating IP resource. A floating IP is an external +// IP address that is mapped to an internal port and, optionally, a specific +// IP address on a private network. In other words, it enables access to an +// instance on a private network from an external network. For this reason, +// floating IPs can only be defined on networks where the `router:external' +// attribute (provided by the external network extension) is set to True. +type FloatingIP struct { + // ID is the unique identifier for the floating IP instance. + ID string `json:"id"` + + // Description for the floating IP instance. + Description string `json:"description"` + + // FloatingNetworkID is the UUID of the external network where the floating + // IP is to be created. + FloatingNetworkID string `json:"floating_network_id"` + + // FloatingIP is the address of the floating IP on the external network. + FloatingIP string `json:"floating_ip_address"` + + // PortID is the UUID of the port on an internal network that is associated + // with the floating IP. + PortID string `json:"port_id"` + + // FixedIP is the specific IP address of the internal port which should be + // associated with the floating IP. + FixedIP string `json:"fixed_ip_address"` + + // TenantID is the project owner of the floating IP. Only admin users can + // specify a project identifier other than its own. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the floating IP. + ProjectID string `json:"project_id"` + + // Status is the condition of the API resource. + Status string `json:"status"` + + // RouterID is the ID of the router used for this floating IP. + RouterID string `json:"router_id"` + + // Tags optionally set via extensions/attributestags + Tags []string `json:"tags"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will extract a FloatingIP resource from a result. +func (r commonResult) Extract() (*FloatingIP, error) { + var s FloatingIP + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "floatingip") +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a FloatingIP. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a FloatingIP. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a FloatingIP. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of an update operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// FloatingIPPage is the page returned by a pager when traversing over a +// collection of floating IPs. +type FloatingIPPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of floating IPs has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r FloatingIPPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"floatingips_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a FloatingIPPage struct is empty. +func (r FloatingIPPage) IsEmpty() (bool, error) { + is, err := ExtractFloatingIPs(r) + return len(is) == 0, err +} + +// ExtractFloatingIPs accepts a Page struct, specifically a FloatingIPPage +// struct, and extracts the elements into a slice of FloatingIP structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { + var s struct { + FloatingIPs []FloatingIP `json:"floatingips"` + } + err := (r.(FloatingIPPage)).ExtractInto(&s) + return s.FloatingIPs, err +} + +func ExtractFloatingIPsInto(r pagination.Page, v interface{}) error { + return r.(FloatingIPPage).Result.ExtractIntoSlicePtr(v, "floatingips") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go new file mode 100644 index 00000000..1318a184 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go @@ -0,0 +1,13 @@ +package floatingips + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "floatingips" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go new file mode 100644 index 00000000..6ede7f5e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go @@ -0,0 +1,108 @@ +/* +Package routers enables management and retrieval of Routers from the OpenStack +Networking service. + +Example to List Routers + + listOpts := routers.ListOpts{} + allPages, err := routers.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRouters, err := routers.ExtractRouters(allPages) + if err != nil { + panic(err) + } + + for _, router := range allRoutes { + fmt.Printf("%+v\n", router) + } + +Example to Create a Router + + iTrue := true + gwi := routers.GatewayInfo{ + NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b", + } + + createOpts := routers.CreateOpts{ + Name: "router_1", + AdminStateUp: &iTrue, + GatewayInfo: &gwi, + } + + router, err := routers.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + routes := []routers.Route{{ + DestinationCIDR: "40.0.1.0/24", + NextHop: "10.1.0.10", + }} + + updateOpts := routers.UpdateOpts{ + Name: "new_name", + Routes: routes, + } + + router, err := routers.Update(networkClient, routerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove all Routes from a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + routes := []routers.Route{} + + updateOpts := routers.UpdateOpts{ + Routes: routes, + } + + router, err := routers.Update(networkClient, routerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + err := routers.Delete(networkClient, routerID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Add an Interface to a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + intOpts := routers.AddInterfaceOpts{ + SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", + } + + interface, err := routers.AddInterface(networkClient, routerID, intOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove an Interface from a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + intOpts := routers.RemoveInterfaceOpts{ + SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", + } + + interface, err := routers.RemoveInterface(networkClient, routerID, intOpts).Extract() + if err != nil { + panic(err) + } +*/ +package routers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go new file mode 100644 index 00000000..cf499f98 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go @@ -0,0 +1,233 @@ +package routers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + Distributed *bool `q:"distributed"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// List returns a Pager which allows you to iterate over a collection of +// routers. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those routers that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return RouterPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToRouterCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new router. There are +// no required values. +type CreateOpts struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Distributed *bool `json:"distributed,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + AvailabilityZoneHints []string `json:"availability_zone_hints,omitempty"` +} + +// ToRouterCreateMap builds a create request body from CreateOpts. +func (opts CreateOpts) ToRouterCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "router") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// logical router. When it is created, the router does not have an internal +// interface - it is not associated to any subnet. +// +// You can optionally specify an external gateway for a router using the +// GatewayInfo struct. The external gateway for the router must be plugged into +// an external network (it is external if its `router:external' field is set to +// true). +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRouterCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular router based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToRouterUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a router. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Distributed *bool `json:"distributed,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + Routes []Route `json:"routes"` +} + +// ToRouterUpdateMap builds an update body based on UpdateOpts. +func (opts UpdateOpts) ToRouterUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "router") +} + +// Update allows routers to be updated. You can update the name, administrative +// state, and the external gateway. For more information about how to set the +// external gateway for a router, see Create. This operation does not enable +// the update of router interfaces. To do this, use the AddInterface and +// RemoveInterface functions. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRouterUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular router based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// AddInterfaceOptsBuilder allows extensions to add additional parameters to +// the AddInterface request. +type AddInterfaceOptsBuilder interface { + ToRouterAddInterfaceMap() (map[string]interface{}, error) +} + +// AddInterfaceOpts represents the options for adding an interface to a router. +type AddInterfaceOpts struct { + SubnetID string `json:"subnet_id,omitempty" xor:"PortID"` + PortID string `json:"port_id,omitempty" xor:"SubnetID"` +} + +// ToRouterAddInterfaceMap builds a request body from AddInterfaceOpts. +func (opts AddInterfaceOpts) ToRouterAddInterfaceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// AddInterface attaches a subnet to an internal router interface. You must +// specify either a SubnetID or PortID in the request body. If you specify both, +// the operation will fail and an error will be returned. +// +// If you specify a SubnetID, the gateway IP address for that particular subnet +// is used to create the router interface. Alternatively, if you specify a +// PortID, the IP address associated with the port is used to create the router +// interface. +// +// If you reference a port that is associated with multiple IP addresses, or +// if the port is associated with zero IP addresses, the operation will fail and +// a 400 Bad Request error will be returned. +// +// If you reference a port already in use, the operation will fail and a 409 +// Conflict error will be returned. +// +// The PortID that is returned after using Extract() on the result of this +// operation can either be the same PortID passed in or, on the other hand, the +// identifier of a new port created by this operation. After the operation +// completes, the device ID of the port is set to the router ID, and the +// device owner attribute is set to `network:router_interface'. +func AddInterface(c *gophercloud.ServiceClient, id string, opts AddInterfaceOptsBuilder) (r InterfaceResult) { + b, err := opts.ToRouterAddInterfaceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(addInterfaceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveInterfaceOptsBuilder allows extensions to add additional parameters to +// the RemoveInterface request. +type RemoveInterfaceOptsBuilder interface { + ToRouterRemoveInterfaceMap() (map[string]interface{}, error) +} + +// RemoveInterfaceOpts represents options for removing an interface from +// a router. +type RemoveInterfaceOpts struct { + SubnetID string `json:"subnet_id,omitempty" or:"PortID"` + PortID string `json:"port_id,omitempty" or:"SubnetID"` +} + +// ToRouterRemoveInterfaceMap builds a request body based on +// RemoveInterfaceOpts. +func (opts RemoveInterfaceOpts) ToRouterRemoveInterfaceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// RemoveInterface removes an internal router interface, which detaches a +// subnet from the router. You must specify either a SubnetID or PortID, since +// these values are used to identify the router interface to remove. +// +// Unlike AddInterface, you can also specify both a SubnetID and PortID. If you +// choose to specify both, the subnet ID must correspond to the subnet ID of +// the first IP address on the port specified by the port ID. Otherwise, the +// operation will fail and return a 409 Conflict error. +// +// If the router, subnet or port which are referenced do not exist or are not +// visible to you, the operation will fail and a 404 Not Found error will be +// returned. After this operation completes, the port connecting the router +// with the subnet is removed from the subnet for the network. +func RemoveInterface(c *gophercloud.ServiceClient, id string, opts RemoveInterfaceOptsBuilder) (r InterfaceResult) { + b, err := opts.ToRouterRemoveInterfaceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(removeInterfaceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go new file mode 100644 index 00000000..857e1947 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go @@ -0,0 +1,181 @@ +package routers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GatewayInfo represents the information of an external gateway for any +// particular network router. +type GatewayInfo struct { + NetworkID string `json:"network_id,omitempty"` + EnableSNAT *bool `json:"enable_snat,omitempty"` + ExternalFixedIPs []ExternalFixedIP `json:"external_fixed_ips,omitempty"` +} + +// ExternalFixedIP is the IP address and subnet ID of the external gateway of a +// router. +type ExternalFixedIP struct { + IPAddress string `json:"ip_address,omitempty"` + SubnetID string `json:"subnet_id"` +} + +// Route is a possible route in a router. +type Route struct { + NextHop string `json:"nexthop"` + DestinationCIDR string `json:"destination"` +} + +// Router represents a Neutron router. A router is a logical entity that +// forwards packets across internal subnets and NATs (network address +// translation) them on external networks through an appropriate gateway. +// +// A router has an interface for each subnet with which it is associated. By +// default, the IP address of such interface is the subnet's gateway IP. Also, +// whenever a router is associated with a subnet, a port for that router +// interface is added to the subnet's network. +type Router struct { + // Status indicates whether or not a router is currently operational. + Status string `json:"status"` + + // GateayInfo provides information on external gateway for the router. + GatewayInfo GatewayInfo `json:"external_gateway_info"` + + // AdminStateUp is the administrative state of the router. + AdminStateUp bool `json:"admin_state_up"` + + // Distributed is whether router is disitrubted or not. + Distributed bool `json:"distributed"` + + // Name is the human readable name for the router. It does not have to be + // unique. + Name string `json:"name"` + + // Description for the router. + Description string `json:"description"` + + // ID is the unique identifier for the router. + ID string `json:"id"` + + // TenantID is the project owner of the router. Only admin users can + // specify a project identifier other than its own. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the router. + ProjectID string `json:"project_id"` + + // Routes are a collection of static routes that the router will host. + Routes []Route `json:"routes"` + + // Availability zone hints groups network nodes that run services like DHCP, L3, FW, and others. + // Used to make network resources highly available. + AvailabilityZoneHints []string `json:"availability_zone_hints"` + + // Tags optionally set via extensions/attributestags + Tags []string `json:"tags"` +} + +// RouterPage is the page returned by a pager when traversing over a +// collection of routers. +type RouterPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of routers has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r RouterPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"routers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a RouterPage struct is empty. +func (r RouterPage) IsEmpty() (bool, error) { + is, err := ExtractRouters(r) + return len(is) == 0, err +} + +// ExtractRouters accepts a Page struct, specifically a RouterPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRouters(r pagination.Page) ([]Router, error) { + var s struct { + Routers []Router `json:"routers"` + } + err := (r.(RouterPage)).ExtractInto(&s) + return s.Routers, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Router, error) { + var s struct { + Router *Router `json:"router"` + } + err := r.ExtractInto(&s) + return s.Router, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Router. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Router. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Router. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// InterfaceInfo represents information about a particular router interface. As +// mentioned above, in order for a router to forward to a subnet, it needs an +// interface. +type InterfaceInfo struct { + // SubnetID is the ID of the subnet which this interface is associated with. + SubnetID string `json:"subnet_id"` + + // PortID is the ID of the port that is a part of the subnet. + PortID string `json:"port_id"` + + // ID is the UUID of the interface. + ID string `json:"id"` + + // TenantID is the owner of the interface. + TenantID string `json:"tenant_id"` +} + +// InterfaceResult represents the result of interface operations, such as +// AddInterface() and RemoveInterface(). Call its Extract method to interpret +// the result as a InterfaceInfo. +type InterfaceResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an information struct. +func (r InterfaceResult) Extract() (*InterfaceInfo, error) { + var s InterfaceInfo + err := r.ExtractInto(&s) + return &s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go new file mode 100644 index 00000000..f9e9da32 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go @@ -0,0 +1,21 @@ +package routers + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "routers" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func addInterfaceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "add_router_interface") +} + +func removeInterfaceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "remove_router_interface") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go new file mode 100644 index 00000000..bad3324b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go @@ -0,0 +1,59 @@ +/* +Package members provides information and interaction with Members of the +Load Balancer as a Service extension for the OpenStack Networking service. + +Example to List Members + + listOpts := members.ListOpts{ + ProtocolPort: 80, + } + + allPages, err := members.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := members.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := range allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Create a Member + + createOpts := members.CreateOpts{ + Address: "192.168.2.14", + ProtocolPort: 80, + PoolID: "0b266a12-0fdf-4434-bd11-649d84e54bd5" + } + + member, err := members.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Member + + memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" + + updateOpts := members.UpdateOpts{ + AdminStateUp: gophercloud.Disabled, + } + + member, err := members.Update(networkClient, memberID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" + err := members.Delete(networkClient, memberID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package members diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go new file mode 100644 index 00000000..1a312884 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go @@ -0,0 +1,124 @@ +package members + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Weight int `q:"weight"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + PoolID string `q:"pool_id"` + Address string `q:"address"` + ProtocolPort int `q:"protocol_port"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// members. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those members that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBMemberCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new pool member. +type CreateOpts struct { + // Address is the IP address of the member. + Address string `json:"address" required:"true"` + + // ProtocolPort is the port on which the application is hosted. + ProtocolPort int `json:"protocol_port" required:"true"` + + // PoolID is the pool to which this member will belong. + PoolID string `json:"pool_id" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` +} + +// ToLBMemberCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLBMemberCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool member. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBMemberCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool member based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLBMemberUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a pool member. +type UpdateOpts struct { + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMemberUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLBMemberUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Update allows members to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBMemberUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// Delete will permanently delete a particular member based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go new file mode 100644 index 00000000..804dbe84 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go @@ -0,0 +1,109 @@ +package members + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Member represents the application running on a backend server. +type Member struct { + // Status is the status of the member. Indicates whether the member + // is operational. + Status string + + // Weight is the weight of member. + Weight int + + // AdminStateUp is the administrative state of the member, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // TenantID is the owner of the member. + TenantID string `json:"tenant_id"` + + // PoolID is the pool to which the member belongs. + PoolID string `json:"pool_id"` + + // Address is the IP address of the member. + Address string + + // ProtocolPort is the port on which the application is hosted. + ProtocolPort int `json:"protocol_port"` + + // ID is the unique ID for the member. + ID string +} + +// MemberPage is the page returned by a pager when traversing over a +// collection of pool members. +type MemberPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of members has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MemberPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"members_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MemberPage struct is empty. +func (r MemberPage) IsEmpty() (bool, error) { + is, err := ExtractMembers(r) + return len(is) == 0, err +} + +// ExtractMembers accepts a Page struct, specifically a MemberPage struct, +// and extracts the elements into a slice of Member structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMembers(r pagination.Page) ([]Member, error) { + var s struct { + Members []Member `json:"members"` + } + err := (r.(MemberPage)).ExtractInto(&s) + return s.Members, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a member. +func (r commonResult) Extract() (*Member, error) { + var s struct { + Member *Member `json:"member"` + } + err := r.ExtractInto(&s) + return s.Member, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Member. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Member. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Member. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the result succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go new file mode 100644 index 00000000..e2248f81 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go @@ -0,0 +1,16 @@ +package members + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "members" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go new file mode 100644 index 00000000..b5c0f29f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go @@ -0,0 +1,63 @@ +/* +Package monitors provides information and interaction with the Monitors +of the Load Balancer as a Service extension for the OpenStack Networking +Service. + +Example to List Monitors + + listOpts: monitors.ListOpts{ + Type: "HTTP", + } + + allPages, err := monitors.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, monitor := range allMonitors { + fmt.Printf("%+v\n", monitor) + } + +Example to Create a Monitor + + createOpts := monitors.CreateOpts{ + Type: "HTTP", + Delay: 20, + Timeout: 20, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + } + + monitor, err := monitors.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Monitor + + monitorID := "681aed03-aadb-43ae-aead-b9016375650a" + + updateOpts := monitors.UpdateOpts{ + Timeout: 30, + } + + monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + monitorID := "681aed03-aadb-43ae-aead-b9016375650a" + err := monitors.Delete(networkClient, monitorID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go new file mode 100644 index 00000000..9ed0c769 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go @@ -0,0 +1,227 @@ +package monitors + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + TenantID string `q:"tenant_id"` + Type string `q:"type"` + Delay int `q:"delay"` + Timeout int `q:"timeout"` + MaxRetries int `q:"max_retries"` + HTTPMethod string `q:"http_method"` + URLPath string `q:"url_path"` + ExpectedCodes string `q:"expected_codes"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// monitors. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those monitors that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return MonitorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// MonitorType is the type for all the types of LB monitors. +type MonitorType string + +// Constants that represent approved monitoring types. +const ( + TypePING MonitorType = "PING" + TypeTCP MonitorType = "TCP" + TypeHTTP MonitorType = "HTTP" + TypeHTTPS MonitorType = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBMonitorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new health monitor. +type CreateOpts struct { + // MonitorType is the type of probe, which is PING, TCP, HTTP, or HTTPS, + // that is sent by the load balancer to verify the member state. + Type MonitorType `json:"type" required:"true"` + + // Delay is the time, in seconds, between sending probes to members. + Delay int `json:"delay" required:"true"` + + // Timeout is the maximum number of seconds for a monitor to wait for a ping + // reply before it times out. The value must be less than the delay value. + Timeout int `json:"timeout" required:"true"` + + // MaxRetries is the number of permissible ping failures before changing the + // member's status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries" required:"true"` + + // URLPath is the URI path that will be accessed if monitor type + // is HTTP or HTTPS. Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // HTTPMethod is the HTTP method used for requests by the monitor. If this + // attribute is not specified, it defaults to "GET". Required for HTTP(S) + // types. + HTTPMethod string `json:"http_method,omitempty"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor + // You can either specify a single status like "200", or a range like + // "200-202". Required for HTTP(S) types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // AdminStateUp denotes whether the monitor is administratively up or down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMonitorCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLBMonitorCreateMap() (map[string]interface{}, error) { + if opts.Type == TypeHTTP || opts.Type == TypeHTTPS { + if opts.URLPath == "" { + err := gophercloud.ErrMissingInput{} + err.Argument = "monitors.CreateOpts.URLPath" + return nil, err + } + if opts.ExpectedCodes == "" { + err := gophercloud.ErrMissingInput{} + err.Argument = "monitors.CreateOpts.ExpectedCodes" + return nil, err + } + } + if opts.Delay < opts.Timeout { + err := gophercloud.ErrInvalidInput{} + err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" + err.Info = "Delay must be greater than or equal to timeout" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "health_monitor") +} + +// Create is an operation which provisions a new health monitor. There are +// different types of monitor you can provision: PING, TCP or HTTP(S). Below +// are examples of how to create each one. +// +// Here is an example config struct to use when creating a PING or TCP monitor: +// +// CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} +// CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} +// +// Here is an example config struct to use when creating a HTTP(S) monitor: +// +// CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, +// HttpMethod: "HEAD", ExpectedCodes: "200"} +// +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBMonitorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular health monitor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLBMonitorUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing monitor. +// Attributes not listed here but appear in CreateOpts are immutable and cannot +// be updated. +type UpdateOpts struct { + // Delay is the time, in seconds, between sending probes to members. + Delay int `json:"delay,omitempty"` + + // Timeout is the maximum number of seconds for a monitor to wait for a ping + // reply before it times out. The value must be less than the delay value. + Timeout int `json:"timeout,omitempty"` + + // MaxRetries is the number of permissible ping failures before changing the + // member's status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries,omitempty"` + + // URLPath is the URI path that will be accessed if monitor type + // is HTTP or HTTPS. + URLPath string `json:"url_path,omitempty"` + + // HTTPMethod is the HTTP method used for requests by the monitor. If this + // attribute is not specified, it defaults to "GET". + HTTPMethod string `json:"http_method,omitempty"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor + // You can either specify a single status like "200", or a range like + // "200-202". + ExpectedCodes string `json:"expected_codes,omitempty"` + + // AdminStateUp denotes whether the monitor is administratively up or down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMonitorUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLBMonitorUpdateMap() (map[string]interface{}, error) { + if opts.Delay > 0 && opts.Timeout > 0 && opts.Delay < opts.Timeout { + err := gophercloud.ErrInvalidInput{} + err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" + err.Value = fmt.Sprintf("%d/%d", opts.Delay, opts.Timeout) + err.Info = "Delay must be greater than or equal to timeout" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "health_monitor") +} + +// Update is an operation which modifies the attributes of the specified +// monitor. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBMonitorUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular monitor based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go new file mode 100644 index 00000000..cc99f7cc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go @@ -0,0 +1,141 @@ +package monitors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Monitor represents a load balancer health monitor. A health monitor is used +// to determine whether or not back-end members of the VIP's pool are usable +// for processing a request. A pool can have several health monitors associated +// with it. There are different types of health monitors supported: +// +// PING: used to ping the members using ICMP. +// TCP: used to connect to the members using TCP. +// HTTP: used to send an HTTP request to the member. +// HTTPS: used to send a secure HTTP request to the member. +// +// When a pool has several monitors associated with it, each member of the pool +// is monitored by all these monitors. If any monitor declares the member as +// unhealthy, then the member status is changed to INACTIVE and the member +// won't participate in its pool's load balancing. In other words, ALL monitors +// must declare the member to be healthy for it to stay ACTIVE. +type Monitor struct { + // ID is the unique ID for the Monitor. + ID string + + // Name is the monitor name. Does not have to be unique. + Name string + + // TenantID is the owner of the Monitor. + TenantID string `json:"tenant_id"` + + // Type is the type of probe sent by the load balancer to verify the member + // state, which is PING, TCP, HTTP, or HTTPS. + Type string + + // Delay is the time, in seconds, between sending probes to members. + Delay int + + // Timeout is the maximum number of seconds for a monitor to wait for a + // connection to be established before it times out. This value must be less + // than the delay value. + Timeout int + + // MaxRetries is the number of allowed connection failures before changing the + // status of the member to INACTIVE. A valid value is from 1 to 10. + MaxRetries int `json:"max_retries"` + + // HTTPMethod is the HTTP method that the monitor uses for requests. + HTTPMethod string `json:"http_method"` + + // URLPath is the HTTP path of the request sent by the monitor to test the + // health of a member. Must be a string beginning with a forward slash (/). + URLPath string `json:"url_path"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor. + ExpectedCodes string `json:"expected_codes"` + + // AdminStateUp is the administrative state of the health monitor, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Status is the status of the health monitor. Indicates whether the health + // monitor is operational. + Status string +} + +// MonitorPage is the page returned by a pager when traversing over a +// collection of health monitors. +type MonitorPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of monitors has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MonitorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"health_monitors_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r MonitorPage) IsEmpty() (bool, error) { + is, err := ExtractMonitors(r) + return len(is) == 0, err +} + +// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, +// and extracts the elements into a slice of Monitor structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMonitors(r pagination.Page) ([]Monitor, error) { + var s struct { + Monitors []Monitor `json:"health_monitors"` + } + err := (r.(MonitorPage)).ExtractInto(&s) + return s.Monitors, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a monitor. +func (r commonResult) Extract() (*Monitor, error) { + var s struct { + Monitor *Monitor `json:"health_monitor"` + } + err := r.ExtractInto(&s) + return s.Monitor, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Monitor. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Monitor. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Monitor. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its Extract +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go new file mode 100644 index 00000000..e9d90fcc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "health_monitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go new file mode 100644 index 00000000..25c4204d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go @@ -0,0 +1,81 @@ +/* +Package pools provides information and interaction with the Pools of the +Load Balancing as a Service extension for the OpenStack Networking service. + +Example to List Pools + + listOpts := pools.ListOpts{ + SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", + } + + allPages, err := pools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPools, err := pools.ExtractPools(allPages) + if err != nil { + panic(err) + } + + for _, pool := range allPools { + fmt.Printf("%+v\n", pool) + } + +Example to Create a Pool + + createOpts := pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + Provider: "haproxy", + } + + pool, err := pools.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + + updateOpts := pools.UpdateOpts{ + LBMethod: pools.LBMethodLeastConnections, + } + + pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + err := pools.Delete(networkClient, poolID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Associate a Monitor to a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" + + pool, err := pools.AssociateMonitor(networkClient, poolID, monitorID).Extract() + if err != nil { + panic(err) + } + +Example to Disassociate a Monitor from a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" + + pool, err := pools.DisassociateMonitor(networkClient, poolID, monitorID).Extract() + if err != nil { + panic(err) + } +*/ +package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go new file mode 100644 index 00000000..f5f4e9a0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go @@ -0,0 +1,175 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + LBMethod string `q:"lb_method"` + Protocol string `q:"protocol"` + SubnetID string `q:"subnet_id"` + TenantID string `q:"tenant_id"` + AdminStateUp *bool `q:"admin_state_up"` + Name string `q:"name"` + ID string `q:"id"` + VIPID string `q:"vip_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// LBMethod is a type used for possible load balancing methods. +type LBMethod string + +// LBProtocol is a type used for possible load balancing protocols. +type LBProtocol string + +// Supported attributes for create/update operations. +const ( + LBMethodRoundRobin LBMethod = "ROUND_ROBIN" + LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" + + ProtocolTCP LBProtocol = "TCP" + ProtocolHTTP LBProtocol = "HTTP" + ProtocolHTTPS LBProtocol = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new pool. +type CreateOpts struct { + // Name of the pool. + Name string `json:"name" required:"true"` + + // Protocol used by the pool members, you can use either + // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. + Protocol LBProtocol `json:"protocol" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // SubnetID is the network on which the members of the pool will be located. + // Only members that are on this network can be added to the pool. + SubnetID string `json:"subnet_id,omitempty"` + + // LBMethod is the algorithm used to distribute load between the members of + // the pool. The current specification supports LBMethodRoundRobin and + // LBMethodLeastConnections as valid values for this attribute. + LBMethod LBMethod `json:"lb_method" required:"true"` + + // Provider of the pool. + Provider string `json:"provider,omitempty"` +} + +// ToLBPoolCreateMap builds a request body based on CreateOpts. +func (opts CreateOpts) ToLBPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Create accepts a CreateOptsBuilder and uses the values to create a new +// load balancer pool. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters ot the +// Update request. +type UpdateOptsBuilder interface { + ToLBPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a pool. +type UpdateOpts struct { + // Name of the pool. + Name *string `json:"name,omitempty"` + + // LBMethod is the algorithm used to distribute load between the members of + // the pool. The current specification supports LBMethodRoundRobin and + // LBMethodLeastConnections as valid values for this attribute. + LBMethod LBMethod `json:"lb_method,omitempty"` +} + +// ToLBPoolUpdateMap builds a request body based on UpdateOpts. +func (opts UpdateOpts) ToLBPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Update allows pools to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular pool based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// AssociateMonitor will associate a health monitor with a particular pool. +// Once associated, the health monitor will start monitoring the members of the +// pool and will deactivate these members if they are deemed unhealthy. A +// member can be deactivated (status set to INACTIVE) if any of health monitors +// finds it unhealthy. +func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { + b := map[string]interface{}{"health_monitor": map[string]string{"id": monitorID}} + _, r.Err = c.Post(associateURL(c, poolID), b, &r.Body, nil) + return +} + +// DisassociateMonitor will disassociate a health monitor with a particular +// pool. When dissociation is successful, the health monitor will no longer +// check for the health of the members of the pool. +func DisassociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { + _, r.Err = c.Delete(disassociateURL(c, poolID, monitorID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go new file mode 100644 index 00000000..c2bae82d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go @@ -0,0 +1,137 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Pool represents a logical set of devices, such as web servers, that you +// group together to receive and process traffic. The load balancing function +// chooses a member of the pool according to the configured load balancing +// method to handle the new requests or connections received on the VIP address. +// There is only one pool per virtual IP. +type Pool struct { + // Status of the pool. Indicates whether the pool is operational. + Status string + + // LBMethod is the load-balancer algorithm, which is round-robin, + // least-connections, and so on. This value, which must be supported, is + // dependent on the provider. + LBMethod string `json:"lb_method"` + + // Protocol of the pool, which is TCP, HTTP, or HTTPS. + Protocol string + + // Description for the pool. + Description string + + // MonitorIDs are the IDs of associated monitors which check the health of + // the pool members. + MonitorIDs []string `json:"health_monitors"` + + // SubnetID is the network on which the members of the pool will be located. + // Only members that are on this network can be added to the pool. + SubnetID string `json:"subnet_id"` + + // TenantID is the owner of the pool. + TenantID string `json:"tenant_id"` + + // AdminStateUp is the administrative state of the pool, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Name of the pool. + Name string + + // MemberIDs is the list of member IDs that belong to the pool. + MemberIDs []string `json:"members"` + + // ID is the unique ID for the pool. + ID string + + // VIPID is the ID of the virtual IP associated with this pool. + VIPID string `json:"vip_id"` + + // The provider. + Provider string +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of pools. +type PoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of pools has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"pools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r PoolPage) IsEmpty() (bool, error) { + is, err := ExtractPools(r) + return len(is) == 0, err +} + +// ExtractPools accepts a Page struct, specifically a PoolPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPools(r pagination.Page) ([]Pool, error) { + var s struct { + Pools []Pool `json:"pools"` + } + err := (r.(PoolPage)).ExtractInto(&s) + return s.Pools, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Pool, error) { + var s struct { + Pool *Pool `json:"pool"` + } + err := r.ExtractInto(&s) + return s.Pool, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Pool. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Pool. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Pool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to interpret it as a Pool. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AssociateResult represents the result of an association operation. Call its Extract +// method to interpret it as a Pool. +type AssociateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go new file mode 100644 index 00000000..fe3601bb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go @@ -0,0 +1,25 @@ +package pools + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "pools" + monitorPath = "health_monitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func associateURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, monitorPath) +} + +func disassociateURL(c *gophercloud.ServiceClient, poolID, monitorID string) string { + return c.ServiceURL(rootPath, resourcePath, poolID, monitorPath, monitorID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go new file mode 100644 index 00000000..7fd86104 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go @@ -0,0 +1,65 @@ +/* +Package vips provides information and interaction with the Virtual IPs of the +Load Balancing as a Service extension for the OpenStack Networking service. + +Example to List Virtual IPs + + listOpts := vips.ListOpts{ + SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", + } + + allPages, err := vips.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allVIPs, err := vips.ExtractVIPs(allPages) + if err != nil { + panic(err) + } + + for _, vip := range allVIPs { + fmt.Printf("%+v\n", vip) + } + +Example to Create a Virtual IP + + createOpts := vips.CreateOpts{ + Protocol: "HTTP", + Name: "NewVip", + AdminStateUp: gophercloud.Enabled, + SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", + PoolID: "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + ProtocolPort: 80, + Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, + } + + vip, err := vips.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Virtual IP + + vipID := "93f1bad4-0423-40a8-afac-3fc541839912" + + i1000 := 1000 + updateOpts := vips.UpdateOpts{ + ConnLimit: &i1000, + Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, + } + + vip, err := vips.Update(networkClient, vipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Virtual IP + + vipID := "93f1bad4-0423-40a8-afac-3fc541839912" + err := vips.Delete(networkClient, vipID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package vips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go new file mode 100644 index 00000000..53b81bfd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go @@ -0,0 +1,180 @@ +package vips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + SubnetID string `q:"subnet_id"` + Address string `q:"address"` + PortID string `q:"port_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// Virtual IPs. It accepts a ListOpts struct, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those virtual IPs that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return VIPPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create Request. +type CreateOptsBuilder interface { + ToVIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new virtual IP. +type CreateOpts struct { + // Name is the human-readable name for the VIP. Does not have to be unique. + Name string `json:"name" required:"true"` + + // SubnetID is the network on which to allocate the VIP's address. A tenant + // can only create VIPs on networks authorized by policy (e.g. networks that + // belong to them or networks that are shared). + SubnetID string `json:"subnet_id" required:"true"` + + // Protocol - can either be TCP, HTTP or HTTPS. + Protocol string `json:"protocol" required:"true"` + + // ProtocolPort is the port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID string `json:"pool_id" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // Address is the IP address of the VIP. + Address string `json:"address,omitempty"` + + // Description is the human-readable description for the VIP. + Description string `json:"description,omitempty"` + + // Persistence is the the of session persistence to use. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + ConnLimit *int `json:"connection_limit,omitempty"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToVIPCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToVIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vip") +} + +// Create is an operation which provisions a new virtual IP based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +// +// Please note that the PoolID should refer to a pool that is not already +// associated with another vip. If the pool is already used by another vip, +// then the operation will fail with a 409 Conflict error will be returned. +// +// Users with an admin role can create VIPs on behalf of other tenants by +// specifying a TenantID attribute different than their own. +func Create(c *gophercloud.ServiceClient, opts CreateOpts) (r CreateResult) { + b, err := opts.ToVIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular virtual IP based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVIPUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing virtual IP. +// Attributes not listed here but appear in CreateOpts are immutable and cannot +// be updated. +type UpdateOpts struct { + // Name is the human-readable name for the VIP. Does not have to be unique. + Name *string `json:"name,omitempty"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID *string `json:"pool_id,omitempty"` + + // Description is the human-readable description for the VIP. + Description *string `json:"description,omitempty"` + + // Persistence is the the of session persistence to use. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + ConnLimit *int `json:"connection_limit,omitempty"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToVIPUpdateMap builds a request body based on UpdateOpts. +func (opts UpdateOpts) ToVIPUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vip") +} + +// Update is an operation which modifies the attributes of the specified VIP. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVIPUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular virtual IP based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go new file mode 100644 index 00000000..cb0994a7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go @@ -0,0 +1,156 @@ +package vips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SessionPersistence represents the session persistence feature of the load +// balancing service. It attempts to force connections or requests in the same +// session to be processed by the same member as long as it is ative. Three +// types of persistence are supported: +// +// SOURCE_IP: With this mode, all connections originating from the same source +// IP address, will be handled by the same member of the pool. +// HTTP_COOKIE: With this persistence mode, the load balancing function will +// create a cookie on the first request from a client. Subsequent +// requests containing the same cookie value will be handled by +// the same member of the pool. +// APP_COOKIE: With this persistence mode, the load balancing function will +// rely on a cookie established by the backend application. All +// requests carrying the same cookie value will be handled by the +// same member of the pool. +type SessionPersistence struct { + // Type is the type of persistence mode. + Type string `json:"type"` + + // CookieName is the name of cookie if persistence mode is set appropriately. + CookieName string `json:"cookie_name,omitempty"` +} + +// VirtualIP is the primary load balancing configuration object that specifies +// the virtual IP address and port on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +// This entity is sometimes known in LB products under the name of a "virtual +// server", a "vserver" or a "listener". +type VirtualIP struct { + // ID is the unique ID for the VIP. + ID string `json:"id"` + + // TenantID is the owner of the VIP. + TenantID string `json:"tenant_id"` + + // Name is the human-readable name for the VIP. Does not have to be unique. + Name string `json:"name"` + + // Description is the human-readable description for the VIP. + Description string `json:"description"` + + // SubnetID is the ID of the subnet on which to allocate the VIP address. + SubnetID string `json:"subnet_id"` + + // Address is the IP address of the VIP. + Address string `json:"address"` + + // Protocol of the VIP address. A valid value is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // ProtocolPort is the port on which to listen to client traffic that is + // associated with the VIP address. A valid value is from 0 to 65535. + ProtocolPort int `json:"protocol_port"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID string `json:"pool_id"` + + // PortID is the ID of the port which belongs to the load balancer. + PortID string `json:"port_id"` + + // Persistence indicates whether connections in the same session will be + // processed by the same pool member or not. + Persistence SessionPersistence `json:"session_persistence"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + // Default is -1, meaning no limit. + ConnLimit int `json:"connection_limit"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Status is the status of the VIP. Indicates whether the VIP is operational. + Status string `json:"status"` +} + +// VIPPage is the page returned by a pager when traversing over a +// collection of virtual IPs. +type VIPPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of routers has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r VIPPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"vips_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a VIPPage struct is empty. +func (r VIPPage) IsEmpty() (bool, error) { + is, err := ExtractVIPs(r) + return len(is) == 0, err +} + +// ExtractVIPs accepts a Page struct, specifically a VIPPage struct, +// and extracts the elements into a slice of VirtualIP structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractVIPs(r pagination.Page) ([]VirtualIP, error) { + var s struct { + VIPs []VirtualIP `json:"vips"` + } + err := (r.(VIPPage)).ExtractInto(&s) + return s.VIPs, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a VirtualIP. +func (r commonResult) Extract() (*VirtualIP, error) { + var s struct { + VirtualIP *VirtualIP `json:"vip" json:"vip"` + } + err := r.ExtractInto(&s) + return s.VirtualIP, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a VirtualIP +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a VirtualIP +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a VirtualIP +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go new file mode 100644 index 00000000..584a1cf6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go @@ -0,0 +1,16 @@ +package vips + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "vips" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/doc.go new file mode 100644 index 00000000..81357990 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/doc.go @@ -0,0 +1,123 @@ +/* +Package l7policies provides information and interaction with L7Policies and +Rules of the LBaaS v2 extension for the OpenStack Networking service. + +Example to Create a L7Policy + + createOpts := l7policies.CreateOpts{ + Name: "redirect-example.com", + ListenerID: "023f2e34-7806-443b-bfae-16c324569a3d", + Action: l7policies.ActionRedirectToURL, + RedirectURL: "http://www.example.com", + } + l7policy, err := l7policies.Create(lbClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List L7Policies + + listOpts := l7policies.ListOpts{ + ListenerID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + allPages, err := l7policies.List(lbClient, listOpts).AllPages() + if err != nil { + panic(err) + } + allL7Policies, err := l7policies.ExtractL7Policies(allPages) + if err != nil { + panic(err) + } + for _, l7policy := range allL7Policies { + fmt.Printf("%+v\n", l7policy) + } + +Example to Get a L7Policy + + l7policy, err := l7policies.Get(lbClient, "023f2e34-7806-443b-bfae-16c324569a3d").Extract() + if err != nil { + panic(err) + } + +Example to Delete a L7Policy + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := l7policies.Delete(lbClient, l7policyID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Update a L7Policy + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + name := "new-name" + updateOpts := l7policies.UpdateOpts{ + Name: &name, + } + l7policy, err := l7policies.Update(lbClient, l7policyID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Rule + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + createOpts := l7policies.CreateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareTypeRegex, + Value: "/images*", + } + rule, err := l7policies.CreateRule(lbClient, l7policyID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List L7 Rules + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + listOpts := l7policies.ListRulesOpts{ + RuleType: l7policies.TypePath, + } + allPages, err := l7policies.ListRules(lbClient, l7policyID, listOpts).AllPages() + if err != nil { + panic(err) + } + allRules, err := l7policies.ExtractRules(allPages) + if err != nil { + panic(err) + } + for _, rule := allRules { + fmt.Printf("%+v\n", rule) + } + +Example to Get a l7 rule + + l7rule, err := l7policies.GetRule(lbClient, "023f2e34-7806-443b-bfae-16c324569a3d", "53ad8ab8-40fa-11e8-a508-00224d6b7bc1").Extract() + if err != nil { + panic(err) + } + +Example to Delete a l7 rule + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + ruleID := "64dba99f-8af8-4200-8882-e32a0660f23e" + err := l7policies.DeleteRule(lbClient, l7policyID, ruleID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Update a Rule + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + ruleID := "64dba99f-8af8-4200-8882-e32a0660f23e" + updateOpts := l7policies.UpdateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareTypeRegex, + Value: "/images/special*", + } + rule, err := l7policies.UpdateRule(lbClient, l7policyID, ruleID, updateOpts).Extract() + if err != nil { + panic(err) + } +*/ +package l7policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/requests.go new file mode 100644 index 00000000..9d2b3a0d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/requests.go @@ -0,0 +1,376 @@ +package l7policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToL7PolicyCreateMap() (map[string]interface{}, error) +} + +type Action string +type RuleType string +type CompareType string + +const ( + ActionRedirectToPool Action = "REDIRECT_TO_POOL" + ActionRedirectToURL Action = "REDIRECT_TO_URL" + ActionReject Action = "REJECT" + + TypeCookie RuleType = "COOKIE" + TypeFileType RuleType = "FILE_TYPE" + TypeHeader RuleType = "HEADER" + TypeHostName RuleType = "HOST_NAME" + TypePath RuleType = "PATH" + + CompareTypeContains CompareType = "CONTAINS" + CompareTypeEndWith CompareType = "ENDS_WITH" + CompareTypeEqual CompareType = "EQUAL_TO" + CompareTypeRegex CompareType = "REGEX" + CompareTypeStartWith CompareType = "STARTS_WITH" +) + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Name of the L7 policy. + Name string `json:"name,omitempty"` + + // The ID of the listener. + ListenerID string `json:"listener_id" required:"true"` + + // The L7 policy action. One of REDIRECT_TO_POOL, REDIRECT_TO_URL, or REJECT. + Action Action `json:"action" required:"true"` + + // The position of this policy on the listener. + Position int32 `json:"position,omitempty"` + + // A human-readable description for the resource. + Description string `json:"description,omitempty"` + + // TenantID is the UUID of the tenant who owns the L7 policy in octavia. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // Requests matching this policy will be redirected to the pool with this ID. + // Only valid if action is REDIRECT_TO_POOL. + RedirectPoolID string `json:"redirect_pool_id,omitempty"` + + // Requests matching this policy will be redirected to this URL. + // Only valid if action is REDIRECT_TO_URL. + RedirectURL string `json:"redirect_url,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToL7PolicyCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToL7PolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "l7policy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new l7policy. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToL7PolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToL7PolicyListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. +type ListOpts struct { + Name string `q:"name"` + Description string `q:"description"` + ListenerID string `q:"listener_id"` + Action string `q:"action"` + TenantID string `q:"tenant_id"` + RedirectPoolID string `q:"redirect_pool_id"` + RedirectURL string `q:"redirect_url"` + Position int32 `q:"position"` + AdminStateUp bool `q:"admin_state_up"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToL7PolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToL7PolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// l7policies. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those l7policies that are owned by the +// project who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToL7PolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return L7PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a particular l7policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular l7policy based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToL7PolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Name of the L7 policy, empty string is allowed. + Name *string `json:"name,omitempty"` + + // The L7 policy action. One of REDIRECT_TO_POOL, REDIRECT_TO_URL, or REJECT. + Action Action `json:"action,omitempty"` + + // The position of this policy on the listener. + Position int32 `json:"position,omitempty"` + + // A human-readable description for the resource, empty string is allowed. + Description *string `json:"description,omitempty"` + + // Requests matching this policy will be redirected to the pool with this ID. + // Only valid if action is REDIRECT_TO_POOL. + RedirectPoolID *string `json:"redirect_pool_id,omitempty"` + + // Requests matching this policy will be redirected to this URL. + // Only valid if action is REDIRECT_TO_URL. + RedirectURL *string `json:"redirect_url,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToL7PolicyUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToL7PolicyUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "l7policy") + if err != nil { + return nil, err + } + + m := b["l7policy"].(map[string]interface{}) + + if m["redirect_pool_id"] == "" { + m["redirect_pool_id"] = nil + } + + if m["redirect_url"] == "" { + m["redirect_url"] = nil + } + + return b, nil +} + +// Update allows l7policy to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToL7PolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// CreateRuleOpts is the common options struct used in this package's CreateRule +// operation. +type CreateRuleOpts struct { + // The L7 rule type. One of COOKIE, FILE_TYPE, HEADER, HOST_NAME, or PATH. + RuleType RuleType `json:"type" required:"true"` + + // The comparison type for the L7 rule. One of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, or STARTS_WITH. + CompareType CompareType `json:"compare_type" required:"true"` + + // The value to use for the comparison. For example, the file type to compare. + Value string `json:"value" required:"true"` + + // TenantID is the UUID of the tenant who owns the rule in octavia. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // The key to use for the comparison. For example, the name of the cookie to evaluate. + Key string `json:"key,omitempty"` + + // When true the logic of the rule is inverted. For example, with invert true, + // equal to would become not equal to. Default is false. + Invert bool `json:"invert,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToRuleCreateMap builds a request body from CreateRuleOpts. +func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "rule") +} + +// CreateRule will create and associate a Rule with a particular L7Policy. +func CreateRule(c *gophercloud.ServiceClient, policyID string, opts CreateRuleOpts) (r CreateRuleResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(ruleRootURL(c, policyID), b, &r.Body, nil) + return +} + +// ListRulesOptsBuilder allows extensions to add additional parameters to the +// ListRules request. +type ListRulesOptsBuilder interface { + ToRulesListQuery() (string, error) +} + +// ListRulesOpts allows the filtering and sorting of paginated collections +// through the API. +type ListRulesOpts struct { + RuleType RuleType `q:"type"` + TenantID string `q:"tenant_id"` + CompareType CompareType `q:"compare_type"` + Value string `q:"value"` + Key string `q:"key"` + Invert bool `q:"invert"` + AdminStateUp bool `q:"admin_state_up"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToRulesListQuery formats a ListOpts into a query string. +func (opts ListRulesOpts) ToRulesListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListRules returns a Pager which allows you to iterate over a collection of +// rules. It accepts a ListRulesOptsBuilder, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those rules that are owned by the +// project who submits the request, unless an admin user submits the request. +func ListRules(c *gophercloud.ServiceClient, policyID string, opts ListRulesOptsBuilder) pagination.Pager { + url := ruleRootURL(c, policyID) + if opts != nil { + query, err := opts.ToRulesListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return RulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// GetRule retrieves a particular L7Policy Rule based on its unique ID. +func GetRule(c *gophercloud.ServiceClient, policyID string, ruleID string) (r GetRuleResult) { + _, r.Err = c.Get(ruleResourceURL(c, policyID, ruleID), &r.Body, nil) + return +} + +// DeleteRule will remove a Rule from a particular L7Policy. +func DeleteRule(c *gophercloud.ServiceClient, policyID string, ruleID string) (r DeleteRuleResult) { + _, r.Err = c.Delete(ruleResourceURL(c, policyID, ruleID), nil) + return +} + +// UpdateRuleOptsBuilder allows to add additional parameters to the PUT request. +type UpdateRuleOptsBuilder interface { + ToRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateRuleOpts is the common options struct used in this package's Update +// operation. +type UpdateRuleOpts struct { + // The L7 rule type. One of COOKIE, FILE_TYPE, HEADER, HOST_NAME, or PATH. + RuleType RuleType `json:"type,omitempty"` + + // The comparison type for the L7 rule. One of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, or STARTS_WITH. + CompareType CompareType `json:"compare_type,omitempty"` + + // The value to use for the comparison. For example, the file type to compare. + Value string `json:"value,omitempty"` + + // The key to use for the comparison. For example, the name of the cookie to evaluate. + Key *string `json:"key,omitempty"` + + // When true the logic of the rule is inverted. For example, with invert true, + // equal to would become not equal to. Default is false. + Invert *bool `json:"invert,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToRuleUpdateMap builds a request body from UpdateRuleOpts. +func (opts UpdateRuleOpts) ToRuleUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "rule") + if err != nil { + return nil, err + } + + if m := b["rule"].(map[string]interface{}); m["key"] == "" { + m["key"] = nil + } + + return b, nil +} + +// UpdateRule allows Rule to be updated. +func UpdateRule(c *gophercloud.ServiceClient, policyID string, ruleID string, opts UpdateRuleOptsBuilder) (r UpdateRuleResult) { + b, err := opts.ToRuleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(ruleResourceURL(c, policyID, ruleID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/results.go new file mode 100644 index 00000000..5153b1b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/results.go @@ -0,0 +1,245 @@ +package l7policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// L7Policy is a collection of L7 rules associated with a Listener, and which +// may also have an association to a back-end pool. +type L7Policy struct { + // The unique ID for the L7 policy. + ID string `json:"id"` + + // Name of the L7 policy. + Name string `json:"name"` + + // The ID of the listener. + ListenerID string `json:"listener_id"` + + // The L7 policy action. One of REDIRECT_TO_POOL, REDIRECT_TO_URL, or REJECT. + Action string `json:"action"` + + // The position of this policy on the listener. + Position int32 `json:"position"` + + // A human-readable description for the resource. + Description string `json:"description"` + + // TenantID is the UUID of the tenant who owns the L7 policy in octavia. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id"` + + // Requests matching this policy will be redirected to the pool with this ID. + // Only valid if action is REDIRECT_TO_POOL. + RedirectPoolID string `json:"redirect_pool_id"` + + // Requests matching this policy will be redirected to this URL. + // Only valid if action is REDIRECT_TO_URL. + RedirectURL string `json:"redirect_url"` + + // The administrative state of the L7 policy, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // The provisioning status of the L7 policy. + // This value is ACTIVE, PENDING_* or ERROR. + // This field seems to only be returned during a call to a load balancer's /status + // see: https://github.com/gophercloud/gophercloud/issues/1362 + ProvisioningStatus string `json:"provisioning_status"` + + // The operating status of the L7 policy. + // This field seems to only be returned during a call to a load balancer's /status + // see: https://github.com/gophercloud/gophercloud/issues/1362 + OperatingStatus string `json:"operating_status"` + + // Rules are List of associated L7 rule IDs. + Rules []Rule `json:"rules"` +} + +// Rule represents layer 7 load balancing rule. +type Rule struct { + // The unique ID for the L7 rule. + ID string `json:"id"` + + // The L7 rule type. One of COOKIE, FILE_TYPE, HEADER, HOST_NAME, or PATH. + RuleType string `json:"type"` + + // The comparison type for the L7 rule. One of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, or STARTS_WITH. + CompareType string `json:"compare_type"` + + // The value to use for the comparison. For example, the file type to compare. + Value string `json:"value"` + + // TenantID is the UUID of the tenant who owns the rule in octavia. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id"` + + // The key to use for the comparison. For example, the name of the cookie to evaluate. + Key string `json:"key"` + + // When true the logic of the rule is inverted. For example, with invert true, + // equal to would become not equal to. Default is false. + Invert bool `json:"invert"` + + // The administrative state of the L7 rule, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // The provisioning status of the L7 rule. + // This value is ACTIVE, PENDING_* or ERROR. + // This field seems to only be returned during a call to a load balancer's /status + // see: https://github.com/gophercloud/gophercloud/issues/1362 + ProvisioningStatus string `json:"provisioning_status"` + + // The operating status of the L7 policy. + // This field seems to only be returned during a call to a load balancer's /status + // see: https://github.com/gophercloud/gophercloud/issues/1362 + OperatingStatus string `json:"operating_status"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a l7policy. +func (r commonResult) Extract() (*L7Policy, error) { + var s struct { + L7Policy *L7Policy `json:"l7policy"` + } + err := r.ExtractInto(&s) + return s.L7Policy, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret the result as a L7Policy. +type CreateResult struct { + commonResult +} + +// L7PolicyPage is the page returned by a pager when traversing over a +// collection of l7policies. +type L7PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of l7policies has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r L7PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"l7policies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a L7PolicyPage struct is empty. +func (r L7PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractL7Policies(r) + return len(is) == 0, err +} + +// ExtractL7Policies accepts a Page struct, specifically a L7PolicyPage struct, +// and extracts the elements into a slice of L7Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractL7Policies(r pagination.Page) ([]L7Policy, error) { + var s struct { + L7Policies []L7Policy `json:"l7policies"` + } + err := (r.(L7PolicyPage)).ExtractInto(&s) + return s.L7Policies, err +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret the result as a L7Policy. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret the result as a L7Policy. +type UpdateResult struct { + commonResult +} + +type commonRuleResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a rule. +func (r commonRuleResult) Extract() (*Rule, error) { + var s struct { + Rule *Rule `json:"rule"` + } + err := r.ExtractInto(&s) + return s.Rule, err +} + +// CreateRuleResult represents the result of a CreateRule operation. +// Call its Extract method to interpret it as a Rule. +type CreateRuleResult struct { + commonRuleResult +} + +// RulePage is the page returned by a pager when traversing over a +// collection of Rules in a L7Policy. +type RulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of rules has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r RulePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"rules_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a RulePage struct is empty. +func (r RulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(r) + return len(is) == 0, err +} + +// ExtractRules accepts a Page struct, specifically a RulePage struct, +// and extracts the elements into a slice of Rules structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(r pagination.Page) ([]Rule, error) { + var s struct { + Rules []Rule `json:"rules"` + } + err := (r.(RulePage)).ExtractInto(&s) + return s.Rules, err +} + +// GetRuleResult represents the result of a GetRule operation. +// Call its Extract method to interpret it as a Rule. +type GetRuleResult struct { + commonRuleResult +} + +// DeleteRuleResult represents the result of a DeleteRule operation. +// Call its ExtractErr method to determine if the request succeeded or failed. +type DeleteRuleResult struct { + gophercloud.ErrResult +} + +// UpdateRuleResult represents the result of an UpdateRule operation. +// Call its Extract method to interpret it as a Rule. +type UpdateRuleResult struct { + commonRuleResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/urls.go new file mode 100644 index 00000000..ecb607a8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies/urls.go @@ -0,0 +1,25 @@ +package l7policies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "l7policies" + rulePath = "rules" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func ruleRootURL(c *gophercloud.ServiceClient, policyID string) string { + return c.ServiceURL(rootPath, resourcePath, policyID, rulePath) +} + +func ruleResourceURL(c *gophercloud.ServiceClient, policyID string, ruleID string) string { + return c.ServiceURL(rootPath, resourcePath, policyID, rulePath, ruleID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go new file mode 100644 index 00000000..108cdb03 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go @@ -0,0 +1,63 @@ +/* +Package listeners provides information and interaction with Listeners of the +LBaaS v2 extension for the OpenStack Networking service. + +Example to List Listeners + + listOpts := listeners.ListOpts{ + LoadbalancerID : "ca430f80-1737-4712-8dc6-3f640d55594b", + } + + allPages, err := listeners.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allListeners, err := listeners.ExtractListeners(allPages) + if err != nil { + panic(err) + } + + for _, listener := range allListeners { + fmt.Printf("%+v\n", listener) + } + +Example to Create a Listener + + createOpts := listeners.CreateOpts{ + Protocol: "TCP", + Name: "db", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + AdminStateUp: gophercloud.Enabled, + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ProtocolPort: 3306, + } + + listener, err := listeners.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + + i1001 := 1001 + updateOpts := listeners.UpdateOpts{ + ConnLimit: &i1001, + } + + listener, err := listeners.Update(networkClient, listenerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := listeners.Delete(networkClient, listenerID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package listeners diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go new file mode 100644 index 00000000..f2966b6c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go @@ -0,0 +1,212 @@ +package listeners + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Type Protocol represents a listener protocol. +type Protocol string + +// Supported attributes for create/update operations. +const ( + ProtocolTCP Protocol = "TCP" + ProtocolHTTP Protocol = "HTTP" + ProtocolHTTPS Protocol = "HTTPS" + ProtocolTerminatedHTTPS Protocol = "TERMINATED_HTTPS" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToListenerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular listener attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + LoadbalancerID string `q:"loadbalancer_id"` + DefaultPoolID string `q:"default_pool_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToListenerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToListenerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// listeners. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those listeners that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToListenerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ListenerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToListenerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options for creating a listener. +type CreateOpts struct { + // The load balancer on which to provision this listener. + LoadbalancerID string `json:"loadbalancer_id" required:"true"` + + // The protocol - can either be TCP, HTTP or HTTPS. + Protocol Protocol `json:"protocol" required:"true"` + + // The port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another project. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is only required if the caller has an admin role and wants + // to create a pool for another project. + ProjectID string `json:"project_id,omitempty"` + + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name,omitempty"` + + // The ID of the default pool with which the Listener is associated. + DefaultPoolID string `json:"default_pool_id,omitempty"` + + // Human-readable description for the Listener. + Description string `json:"description,omitempty"` + + // The maximum number of connections allowed for the Listener. + ConnLimit *int `json:"connection_limit,omitempty"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` + + // A list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs,omitempty"` + + // The administrative state of the Listener. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToListenerCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToListenerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "listener") +} + +// Create is an operation which provisions a new Listeners based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +// +// Users with an admin role can create Listeners on behalf of other tenants by +// specifying a TenantID attribute different than their own. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToListenerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Listeners based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToListenerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options for updating a Listener. +type UpdateOpts struct { + // Human-readable name for the Listener. Does not have to be unique. + Name *string `json:"name,omitempty"` + + // The ID of the default pool with which the Listener is associated. + DefaultPoolID *string `json:"default_pool_id,omitempty"` + + // Human-readable description for the Listener. + Description *string `json:"description,omitempty"` + + // The maximum number of connections allowed for the Listener. + ConnLimit *int `json:"connection_limit,omitempty"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` + + // A list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs,omitempty"` + + // The administrative state of the Listener. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToListenerUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToListenerUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "listener") + if err != nil { + return nil, err + } + + if m := b["listener"].(map[string]interface{}); m["default_pool_id"] == "" { + m["default_pool_id"] = nil + } + + return b, nil +} + +// Update is an operation which modifies the attributes of the specified +// Listener. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { + b, err := opts.ToListenerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular Listeners based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go new file mode 100644 index 00000000..ae105793 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go @@ -0,0 +1,141 @@ +package listeners + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/l7policies" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" + "github.com/gophercloud/gophercloud/pagination" +) + +type LoadBalancerID struct { + ID string `json:"id"` +} + +// Listener is the primary load balancing configuration object that specifies +// the loadbalancer and port on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +type Listener struct { + // The unique ID for the Listener. + ID string `json:"id"` + + // Owner of the Listener. + TenantID string `json:"tenant_id"` + + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name"` + + // Human-readable description for the Listener. + Description string `json:"description"` + + // The protocol to loadbalance. A valid value is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // The port on which to listen to client traffic that is associated with the + // Loadbalancer. A valid value is from 0 to 65535. + ProtocolPort int `json:"protocol_port"` + + // The UUID of default pool. Must have compatible protocol with listener. + DefaultPoolID string `json:"default_pool_id"` + + // A list of load balancer IDs. + Loadbalancers []LoadBalancerID `json:"loadbalancers"` + + // The maximum number of connections allowed for the Loadbalancer. + // Default is -1, meaning no limit. + ConnLimit int `json:"connection_limit"` + + // The list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref"` + + // The administrative state of the Listener. A valid value is true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Pools are the pools which are part of this listener. + Pools []pools.Pool `json:"pools"` + + // L7policies are the L7 policies which are part of this listener. + // This field seems to only be returned during a call to a load balancer's /status + // see: https://github.com/gophercloud/gophercloud/issues/1352 + L7Policies []l7policies.L7Policy `json:"l7policies"` + + // The provisioning status of the listener. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// ListenerPage is the page returned by a pager when traversing over a +// collection of listeners. +type ListenerPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of listeners has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r ListenerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"listeners_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a ListenerPage struct is empty. +func (r ListenerPage) IsEmpty() (bool, error) { + is, err := ExtractListeners(r) + return len(is) == 0, err +} + +// ExtractListeners accepts a Page struct, specifically a ListenerPage struct, +// and extracts the elements into a slice of Listener structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractListeners(r pagination.Page) ([]Listener, error) { + var s struct { + Listeners []Listener `json:"listeners"` + } + err := (r.(ListenerPage)).ExtractInto(&s) + return s.Listeners, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a listener. +func (r commonResult) Extract() (*Listener, error) { + var s struct { + Listener *Listener `json:"listener"` + } + err := r.ExtractInto(&s) + return s.Listener, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Listener. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Listener. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Listener. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go new file mode 100644 index 00000000..02fb1eb3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go @@ -0,0 +1,16 @@ +package listeners + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "listeners" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go new file mode 100644 index 00000000..c6d53a7b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go @@ -0,0 +1,79 @@ +/* +Package loadbalancers provides information and interaction with Load Balancers +of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Load Balancers + + listOpts := loadbalancers.ListOpts{ + Provider: "haproxy", + } + + allPages, err := loadbalancers.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allLoadbalancers, err := loadbalancers.ExtractLoadBalancers(allPages) + if err != nil { + panic(err) + } + + for _, lb := range allLoadbalancers { + fmt.Printf("%+v\n", lb) + } + +Example to Create a Load Balancer + + createOpts := loadbalancers.CreateOpts{ + Name: "db_lb", + AdminStateUp: gophercloud.Enabled, + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + Flavor: "medium", + Provider: "haproxy", + } + + lb, err := loadbalancers.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + + i1001 := 1001 + updateOpts := loadbalancers.UpdateOpts{ + Name: "new-name", + } + + lb, err := loadbalancers.Update(networkClient, lbID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Load Balancers + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := loadbalancers.Delete(networkClient, lbID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Get the Status of a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + status, err := loadbalancers.GetStatuses(networkClient, LBID).Extract() + if err != nil { + panic(err) + } + +Example to Get the Statistics of a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + stats, err := loadbalancers.GetStats(networkClient, LBID).Extract() + if err != nil { + panic(err) + } +*/ +package loadbalancers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go new file mode 100644 index 00000000..f5b14134 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go @@ -0,0 +1,204 @@ +package loadbalancers + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToLoadBalancerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Loadbalancer attributes you want to see returned. SortKey allows you to +// sort by a particular attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + ProvisioningStatus string `q:"provisioning_status"` + VipAddress string `q:"vip_address"` + VipPortID string `q:"vip_port_id"` + VipSubnetID string `q:"vip_subnet_id"` + ID string `q:"id"` + OperatingStatus string `q:"operating_status"` + Name string `q:"name"` + Flavor string `q:"flavor"` + Provider string `q:"provider"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToLoadBalancerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToLoadBalancerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// load balancers. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those load balancers that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToLoadBalancerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return LoadBalancerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLoadBalancerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Human-readable name for the Loadbalancer. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Loadbalancer. + Description string `json:"description,omitempty"` + + // The network on which to allocate the Loadbalancer's address. A tenant can + // only create Loadbalancers on networks authorized by policy (e.g. networks + // that belong to them or networks that are shared). + VipSubnetID string `json:"vip_subnet_id" required:"true"` + + // TenantID is the UUID of the project who owns the Loadbalancer. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Loadbalancer. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // The IP address of the Loadbalancer. + VipAddress string `json:"vip_address,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + // The UUID of a flavor. + Flavor string `json:"flavor,omitempty"` + + // The name of the provider. + Provider string `json:"provider,omitempty"` +} + +// ToLoadBalancerCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLoadBalancerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "loadbalancer") +} + +// Create is an operation which provisions a new loadbalancer based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLoadBalancerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Loadbalancer based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLoadBalancerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Human-readable name for the Loadbalancer. Does not have to be unique. + Name *string `json:"name,omitempty"` + + // Human-readable description for the Loadbalancer. + Description *string `json:"description,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLoadBalancerUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "loadbalancer") +} + +// Update is an operation which modifies the attributes of the specified +// LoadBalancer. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { + b, err := opts.ToLoadBalancerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular LoadBalancer based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// CascadingDelete is like `Delete`, but will also delete any of the load balancer's +// children (listener, monitor, etc). +// NOTE: This function will only work with Octavia load balancers; Neutron does not +// support this. +func CascadingDelete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + if c.Type != "load-balancer" { + r.Err = fmt.Errorf("error prior to running cascade delete: only Octavia LBs supported") + return + } + u := fmt.Sprintf("%s?cascade=true", resourceURL(c, id)) + _, r.Err = c.Delete(u, nil) + return +} + +// GetStatuses will return the status of a particular LoadBalancer. +func GetStatuses(c *gophercloud.ServiceClient, id string) (r GetStatusesResult) { + _, r.Err = c.Get(statusRootURL(c, id), &r.Body, nil) + return +} + +// GetStats will return the shows the current statistics of a particular LoadBalancer. +func GetStats(c *gophercloud.ServiceClient, id string) (r StatsResult) { + _, r.Err = c.Get(statisticsRootURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go new file mode 100644 index 00000000..7f423c93 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go @@ -0,0 +1,186 @@ +package loadbalancers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" + "github.com/gophercloud/gophercloud/pagination" +) + +// LoadBalancer is the primary load balancing configuration object that +// specifies the virtual IP address on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +type LoadBalancer struct { + // Human-readable description for the Loadbalancer. + Description string `json:"description"` + + // The administrative state of the Loadbalancer. + // A valid value is true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Owner of the LoadBalancer. + TenantID string `json:"tenant_id"` + + // The provisioning status of the LoadBalancer. + // This value is ACTIVE, PENDING_CREATE or ERROR. + ProvisioningStatus string `json:"provisioning_status"` + + // The IP address of the Loadbalancer. + VipAddress string `json:"vip_address"` + + // The UUID of the port associated with the IP address. + VipPortID string `json:"vip_port_id"` + + // The UUID of the subnet on which to allocate the virtual IP for the + // Loadbalancer address. + VipSubnetID string `json:"vip_subnet_id"` + + // The unique ID for the LoadBalancer. + ID string `json:"id"` + + // The operating status of the LoadBalancer. This value is ONLINE or OFFLINE. + OperatingStatus string `json:"operating_status"` + + // Human-readable name for the LoadBalancer. Does not have to be unique. + Name string `json:"name"` + + // The UUID of a flavor if set. + Flavor string `json:"flavor"` + + // The name of the provider. + Provider string `json:"provider"` + + // Listeners are the listeners related to this Loadbalancer. + Listeners []listeners.Listener `json:"listeners"` + + // Pools are the pools related to this Loadbalancer. + Pools []pools.Pool `json:"pools"` +} + +// StatusTree represents the status of a loadbalancer. +type StatusTree struct { + Loadbalancer *LoadBalancer `json:"loadbalancer"` +} + +type Stats struct { + // The currently active connections. + ActiveConnections int `json:"active_connections"` + + // The total bytes received. + BytesIn int `json:"bytes_in"` + + // The total bytes sent. + BytesOut int `json:"bytes_out"` + + // The total requests that were unable to be fulfilled. + RequestErrors int `json:"request_errors"` + + // The total connections handled. + TotalConnections int `json:"total_connections"` +} + +// LoadBalancerPage is the page returned by a pager when traversing over a +// collection of load balancers. +type LoadBalancerPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of load balancers has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r LoadBalancerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"loadbalancers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a LoadBalancerPage struct is empty. +func (r LoadBalancerPage) IsEmpty() (bool, error) { + is, err := ExtractLoadBalancers(r) + return len(is) == 0, err +} + +// ExtractLoadBalancers accepts a Page struct, specifically a LoadbalancerPage +// struct, and extracts the elements into a slice of LoadBalancer structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractLoadBalancers(r pagination.Page) ([]LoadBalancer, error) { + var s struct { + LoadBalancers []LoadBalancer `json:"loadbalancers"` + } + err := (r.(LoadBalancerPage)).ExtractInto(&s) + return s.LoadBalancers, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a loadbalancer. +func (r commonResult) Extract() (*LoadBalancer, error) { + var s struct { + LoadBalancer *LoadBalancer `json:"loadbalancer"` + } + err := r.ExtractInto(&s) + return s.LoadBalancer, err +} + +// GetStatusesResult represents the result of a GetStatuses operation. +// Call its Extract method to interpret it as a StatusTree. +type GetStatusesResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts the status of +// a Loadbalancer. +func (r GetStatusesResult) Extract() (*StatusTree, error) { + var s struct { + Statuses *StatusTree `json:"statuses"` + } + err := r.ExtractInto(&s) + return s.Statuses, err +} + +// StatsResult represents the result of a GetStats operation. +// Call its Extract method to interpret it as a Stats. +type StatsResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts the status of +// a Loadbalancer. +func (r StatsResult) Extract() (*Stats, error) { + var s struct { + Stats *Stats `json:"stats"` + } + err := r.ExtractInto(&s) + return s.Stats, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a LoadBalancer. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a LoadBalancer. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a LoadBalancer. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go new file mode 100644 index 00000000..2d2a99b7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go @@ -0,0 +1,26 @@ +package loadbalancers + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "loadbalancers" + statusPath = "statuses" + statisticsPath = "stats" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func statusRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, statusPath) +} + +func statisticsRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, statisticsPath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go new file mode 100644 index 00000000..6ed8c8fb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go @@ -0,0 +1,69 @@ +/* +Package monitors provides information and interaction with Monitors +of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Monitors + + listOpts := monitors.ListOpts{ + PoolID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + + allPages, err := monitors.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, monitor := range allMonitors { + fmt.Printf("%+v\n", monitor) + } + +Example to Create a Monitor + + createOpts := monitors.CreateOpts{ + Type: "HTTP", + Name: "db", + PoolID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", + Delay: 20, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + } + + monitor, err := monitors.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Monitor + + monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" + + updateOpts := monitors.UpdateOpts{ + Name: "NewHealthmonitorName", + Delay: 3, + Timeout: 20, + MaxRetries: 10, + URLPath: "/another_check", + ExpectedCodes: "301", + } + + monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Monitor + + monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := monitors.Delete(networkClient, monitorID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go new file mode 100644 index 00000000..f728f5a8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go @@ -0,0 +1,257 @@ +package monitors + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToMonitorListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Monitor attributes you want to see returned. SortKey allows you to +// sort by a particular Monitor attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + PoolID string `q:"pool_id"` + Type string `q:"type"` + Delay int `q:"delay"` + Timeout int `q:"timeout"` + MaxRetries int `q:"max_retries"` + HTTPMethod string `q:"http_method"` + URLPath string `q:"url_path"` + ExpectedCodes string `q:"expected_codes"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToMonitorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToMonitorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// health monitors. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those health monitors that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToMonitorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MonitorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Constants that represent approved monitoring types. +const ( + TypePING = "PING" + TypeTCP = "TCP" + TypeHTTP = "HTTP" + TypeHTTPS = "HTTPS" +) + +var ( + errDelayMustGETimeout = fmt.Errorf("Delay must be greater than or equal to timeout") +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// List request. +type CreateOptsBuilder interface { + ToMonitorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The Pool to Monitor. + PoolID string `json:"pool_id" required:"true"` + + // The type of probe, which is PING, TCP, HTTP, or HTTPS, that is + // sent by the load balancer to verify the member state. + Type string `json:"type" required:"true"` + + // The time, in seconds, between sending probes to members. + Delay int `json:"delay" required:"true"` + + // Maximum number of seconds for a Monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int `json:"timeout" required:"true"` + + // Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries" required:"true"` + + // URI path that will be accessed if Monitor type is HTTP or HTTPS. + // Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // The HTTP method used for requests by the Monitor. If this attribute + // is not specified, it defaults to "GET". Required for HTTP(S) types. + HTTPMethod string `json:"http_method,omitempty"` + + // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify + // a single status like "200", or a range like "200-202". Required for HTTP(S) + // types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // TenantID is the UUID of the project who owns the Monitor. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Monitor. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // The Name of the Monitor. + Name string `json:"name,omitempty"` + + // The administrative state of the Monitor. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMonitorCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToMonitorCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") + if err != nil { + return nil, err + } + + switch opts.Type { + case TypeHTTP, TypeHTTPS: + switch opts.URLPath { + case "": + return nil, fmt.Errorf("URLPath must be provided for HTTP and HTTPS") + } + switch opts.ExpectedCodes { + case "": + return nil, fmt.Errorf("ExpectedCodes must be provided for HTTP and HTTPS") + } + } + + return b, nil +} + +/* + Create is an operation which provisions a new Health Monitor. There are + different types of Monitor you can provision: PING, TCP or HTTP(S). Below + are examples of how to create each one. + + Here is an example config struct to use when creating a PING or TCP Monitor: + + CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} + CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} + + Here is an example config struct to use when creating a HTTP(S) Monitor: + + CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, + HttpMethod: "HEAD", ExpectedCodes: "200", PoolID: "2c946bfc-1804-43ab-a2ff-58f6a762b505"} +*/ +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToMonitorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Health Monitor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToMonitorUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // The time, in seconds, between sending probes to members. + Delay int `json:"delay,omitempty"` + + // Maximum number of seconds for a Monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int `json:"timeout,omitempty"` + + // Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries,omitempty"` + + // URI path that will be accessed if Monitor type is HTTP or HTTPS. + // Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // The HTTP method used for requests by the Monitor. If this attribute + // is not specified, it defaults to "GET". Required for HTTP(S) types. + HTTPMethod string `json:"http_method,omitempty"` + + // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify + // a single status like "200", or a range like "200-202". Required for HTTP(S) + // types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // The Name of the Monitor. + Name *string `json:"name,omitempty"` + + // The administrative state of the Monitor. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMonitorUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "healthmonitor") +} + +// Update is an operation which modifies the attributes of the specified +// Monitor. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToMonitorUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular Monitor based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go new file mode 100644 index 00000000..a78f7aeb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go @@ -0,0 +1,153 @@ +package monitors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type PoolID struct { + ID string `json:"id"` +} + +// Monitor represents a load balancer health monitor. A health monitor is used +// to determine whether or not back-end members of the VIP's pool are usable +// for processing a request. A pool can have several health monitors associated +// with it. There are different types of health monitors supported: +// +// PING: used to ping the members using ICMP. +// TCP: used to connect to the members using TCP. +// HTTP: used to send an HTTP request to the member. +// HTTPS: used to send a secure HTTP request to the member. +// +// When a pool has several monitors associated with it, each member of the pool +// is monitored by all these monitors. If any monitor declares the member as +// unhealthy, then the member status is changed to INACTIVE and the member +// won't participate in its pool's load balancing. In other words, ALL monitors +// must declare the member to be healthy for it to stay ACTIVE. +type Monitor struct { + // The unique ID for the Monitor. + ID string `json:"id"` + + // The Name of the Monitor. + Name string `json:"name"` + + // TenantID is the owner of the Monitor. + TenantID string `json:"tenant_id"` + + // The type of probe sent by the load balancer to verify the member state, + // which is PING, TCP, HTTP, or HTTPS. + Type string `json:"type"` + + // The time, in seconds, between sending probes to members. + Delay int `json:"delay"` + + // The maximum number of seconds for a monitor to wait for a connection to be + // established before it times out. This value must be less than the delay + // value. + Timeout int `json:"timeout"` + + // Number of allowed connection failures before changing the status of the + // member to INACTIVE. A valid value is from 1 to 10. + MaxRetries int `json:"max_retries"` + + // The HTTP method that the monitor uses for requests. + HTTPMethod string `json:"http_method"` + + // The HTTP path of the request sent by the monitor to test the health of a + // member. Must be a string beginning with a forward slash (/). + URLPath string `json:"url_path" ` + + // Expected HTTP codes for a passing HTTP(S) monitor. + ExpectedCodes string `json:"expected_codes"` + + // The administrative state of the health monitor, which is up (true) or + // down (false). + AdminStateUp bool `json:"admin_state_up"` + + // The status of the health monitor. Indicates whether the health monitor is + // operational. + Status string `json:"status"` + + // List of pools that are associated with the health monitor. + Pools []PoolID `json:"pools"` + + // The provisioning status of the monitor. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// MonitorPage is the page returned by a pager when traversing over a +// collection of health monitors. +type MonitorPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of monitors has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MonitorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"healthmonitors_links"` + } + + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MonitorPage struct is empty. +func (r MonitorPage) IsEmpty() (bool, error) { + is, err := ExtractMonitors(r) + return len(is) == 0, err +} + +// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, +// and extracts the elements into a slice of Monitor structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMonitors(r pagination.Page) ([]Monitor, error) { + var s struct { + Monitors []Monitor `json:"healthmonitors"` + } + err := (r.(MonitorPage)).ExtractInto(&s) + return s.Monitors, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a monitor. +func (r commonResult) Extract() (*Monitor, error) { + var s struct { + Monitor *Monitor `json:"healthmonitor"` + } + err := r.ExtractInto(&s) + return s.Monitor, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Monitor. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Monitor. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Monitor. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the result succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go new file mode 100644 index 00000000..a222e52a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "healthmonitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go new file mode 100644 index 00000000..06971486 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go @@ -0,0 +1,126 @@ +/* +Package pools provides information and interaction with Pools and +Members of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Pools + + listOpts := pools.ListOpts{ + LoadbalancerID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + + allPages, err := pools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPools, err := pools.ExtractPools(allPages) + if err != nil { + panic(err) + } + + for _, pools := range allPools { + fmt.Printf("%+v\n", pool) + } + +Example to Create a Pool + + createOpts := pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + } + + pool, err := pools.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Pool + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + updateOpts := pools.UpdateOpts{ + Name: "new-name", + } + + pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Pool + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := pools.Delete(networkClient, poolID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Pool Members + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + listOpts := pools.ListMemberOpts{ + ProtocolPort: 80, + } + + allPages, err := pools.ListMembers(networkClient, poolID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := pools.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Create a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + weight := 10 + createOpts := pools.CreateMemberOpts{ + Name: "db", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + Address: "10.0.2.11", + ProtocolPort: 80, + Weight: &weight, + } + + member, err := pools.CreateMember(networkClient, poolID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" + + weight := 4 + updateOpts := pools.UpdateMemberOpts{ + Name: "new-name", + Weight: &weight, + } + + member, err := pools.UpdateMember(networkClient, poolID, memberID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" + + err := pools.DeleteMember(networkClient, poolID, memberID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go new file mode 100644 index 00000000..f427ae7b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go @@ -0,0 +1,356 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPoolListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Pool attributes you want to see returned. SortKey allows you to +// sort by a particular Pool attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + LBMethod string `q:"lb_algorithm"` + Protocol string `q:"protocol"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + AdminStateUp *bool `q:"admin_state_up"` + Name string `q:"name"` + ID string `q:"id"` + LoadbalancerID string `q:"loadbalancer_id"` + ListenerID string `q:"listener_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPoolListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPoolListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPoolListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type LBMethod string +type Protocol string + +// Supported attributes for create/update operations. +const ( + LBMethodRoundRobin LBMethod = "ROUND_ROBIN" + LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" + LBMethodSourceIp LBMethod = "SOURCE_IP" + + ProtocolTCP Protocol = "TCP" + ProtocolHTTP Protocol = "HTTP" + ProtocolHTTPS Protocol = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin, LBMethodLeastConnections + // and LBMethodSourceIp as valid values for this attribute. + LBMethod LBMethod `json:"lb_algorithm" required:"true"` + + // The protocol used by the pool members, you can use either + // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. + Protocol Protocol `json:"protocol" required:"true"` + + // The Loadbalancer on which the members of the pool will be associated with. + // Note: one of LoadbalancerID or ListenerID must be provided. + LoadbalancerID string `json:"loadbalancer_id,omitempty" xor:"ListenerID"` + + // The Listener on which the members of the pool will be associated with. + // Note: one of LoadbalancerID or ListenerID must be provided. + ListenerID string `json:"listener_id,omitempty" xor:"LoadbalancerID"` + + // TenantID is the UUID of the project who owns the Pool. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Pool. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // Name of the pool. + Name string `json:"name,omitempty"` + + // Human-readable description for the pool. + Description string `json:"description,omitempty"` + + // Persistence is the session persistence of the pool. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToPoolCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Name of the pool. + Name *string `json:"name,omitempty"` + + // Human-readable description for the pool. + Description *string `json:"description,omitempty"` + + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin, LBMethodLeastConnections + // and LBMethodSourceIp as valid values for this attribute. + LBMethod LBMethod `json:"lb_algorithm,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToPoolUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Update allows pools to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular pool based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// ListMemberOptsBuilder allows extensions to add additional parameters to the +// ListMembers request. +type ListMembersOptsBuilder interface { + ToMembersListQuery() (string, error) +} + +// ListMembersOpts allows the filtering and sorting of paginated collections +// through the API. Filtering is achieved by passing in struct field values +// that map to the Member attributes you want to see returned. SortKey allows +// you to sort by a particular Member attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListMembersOpts struct { + Name string `q:"name"` + Weight int `q:"weight"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + Address string `q:"address"` + ProtocolPort int `q:"protocol_port"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToMemberListQuery formats a ListOpts into a query string. +func (opts ListMembersOpts) ToMembersListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListMembers returns a Pager which allows you to iterate over a collection of +// members. It accepts a ListMembersOptsBuilder, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those members that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func ListMembers(c *gophercloud.ServiceClient, poolID string, opts ListMembersOptsBuilder) pagination.Pager { + url := memberRootURL(c, poolID) + if opts != nil { + query, err := opts.ToMembersListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateMemberOptsBuilder allows extensions to add additional parameters to the +// CreateMember request. +type CreateMemberOptsBuilder interface { + ToMemberCreateMap() (map[string]interface{}, error) +} + +// CreateMemberOpts is the common options struct used in this package's CreateMember +// operation. +type CreateMemberOpts struct { + // The IP address of the member to receive traffic from the load balancer. + Address string `json:"address" required:"true"` + + // The port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // Name of the Member. + Name string `json:"name,omitempty"` + + // TenantID is the UUID of the project who owns the Member. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Member. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // A positive integer value that indicates the relative portion of traffic + // that this member should receive from the pool. For example, a member with + // a weight of 10 receives five times as much traffic as a member with a + // weight of 2. + Weight *int `json:"weight,omitempty"` + + // If you omit this parameter, LBaaS uses the vip_subnet_id parameter value + // for the subnet UUID. + SubnetID string `json:"subnet_id,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMemberCreateMap builds a request body from CreateMemberOpts. +func (opts CreateMemberOpts) ToMemberCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// CreateMember will create and associate a Member with a particular Pool. +func CreateMember(c *gophercloud.ServiceClient, poolID string, opts CreateMemberOpts) (r CreateMemberResult) { + b, err := opts.ToMemberCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(memberRootURL(c, poolID), b, &r.Body, nil) + return +} + +// GetMember retrieves a particular Pool Member based on its unique ID. +func GetMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r GetMemberResult) { + _, r.Err = c.Get(memberResourceURL(c, poolID, memberID), &r.Body, nil) + return +} + +// UpdateMemberOptsBuilder allows extensions to add additional parameters to the +// List request. +type UpdateMemberOptsBuilder interface { + ToMemberUpdateMap() (map[string]interface{}, error) +} + +// UpdateMemberOpts is the common options struct used in this package's Update +// operation. +type UpdateMemberOpts struct { + // Name of the Member. + Name *string `json:"name,omitempty"` + + // A positive integer value that indicates the relative portion of traffic + // that this member should receive from the pool. For example, a member with + // a weight of 10 receives five times as much traffic as a member with a + // weight of 2. + Weight *int `json:"weight,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMemberUpdateMap builds a request body from UpdateMemberOpts. +func (opts UpdateMemberOpts) ToMemberUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Update allows Member to be updated. +func UpdateMember(c *gophercloud.ServiceClient, poolID string, memberID string, opts UpdateMemberOptsBuilder) (r UpdateMemberResult) { + b, err := opts.ToMemberUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(memberResourceURL(c, poolID, memberID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// DisassociateMember will remove and disassociate a Member from a particular +// Pool. +func DeleteMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r DeleteMemberResult) { + _, r.Err = c.Delete(memberResourceURL(c, poolID, memberID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go new file mode 100644 index 00000000..fba0d3a8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go @@ -0,0 +1,291 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + "github.com/gophercloud/gophercloud/pagination" +) + +// SessionPersistence represents the session persistence feature of the load +// balancing service. It attempts to force connections or requests in the same +// session to be processed by the same member as long as it is ative. Three +// types of persistence are supported: +// +// SOURCE_IP: With this mode, all connections originating from the same source +// IP address, will be handled by the same Member of the Pool. +// HTTP_COOKIE: With this persistence mode, the load balancing function will +// create a cookie on the first request from a client. Subsequent +// requests containing the same cookie value will be handled by +// the same Member of the Pool. +// APP_COOKIE: With this persistence mode, the load balancing function will +// rely on a cookie established by the backend application. All +// requests carrying the same cookie value will be handled by the +// same Member of the Pool. +type SessionPersistence struct { + // The type of persistence mode. + Type string `json:"type"` + + // Name of cookie if persistence mode is set appropriately. + CookieName string `json:"cookie_name,omitempty"` +} + +// LoadBalancerID represents a load balancer. +type LoadBalancerID struct { + ID string `json:"id"` +} + +// ListenerID represents a listener. +type ListenerID struct { + ID string `json:"id"` +} + +// Pool represents a logical set of devices, such as web servers, that you +// group together to receive and process traffic. The load balancing function +// chooses a Member of the Pool according to the configured load balancing +// method to handle the new requests or connections received on the VIP address. +type Pool struct { + // The load-balancer algorithm, which is round-robin, least-connections, and + // so on. This value, which must be supported, is dependent on the provider. + // Round-robin must be supported. + LBMethod string `json:"lb_algorithm"` + + // The protocol of the Pool, which is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // Description for the Pool. + Description string `json:"description"` + + // A list of listeners objects IDs. + Listeners []ListenerID `json:"listeners"` //[]map[string]interface{} + + // A list of member objects IDs. + Members []Member `json:"members"` + + // The ID of associated health monitor. + MonitorID string `json:"healthmonitor_id"` + + // The network on which the members of the Pool will be located. Only members + // that are on this network can be added to the Pool. + SubnetID string `json:"subnet_id"` + + // Owner of the Pool. + TenantID string `json:"tenant_id"` + + // The administrative state of the Pool, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Pool name. Does not have to be unique. + Name string `json:"name"` + + // The unique ID for the Pool. + ID string `json:"id"` + + // A list of load balancer objects IDs. + Loadbalancers []LoadBalancerID `json:"loadbalancers"` + + // Indicates whether connections in the same session will be processed by the + // same Pool member or not. + Persistence SessionPersistence `json:"session_persistence"` + + // The load balancer provider. + Provider string `json:"provider"` + + // The Monitor associated with this Pool. + Monitor monitors.Monitor `json:"healthmonitor"` + + // The provisioning status of the pool. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` + + // The operating status of the pool. + // This field seems to only be returned during a call to a load balancer's /status + // see: https://github.com/gophercloud/gophercloud/issues/1362 + OperatingStatus string `json:"operating_status"` +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of pools. +type PoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of pools has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"pools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r PoolPage) IsEmpty() (bool, error) { + is, err := ExtractPools(r) + return len(is) == 0, err +} + +// ExtractPools accepts a Page struct, specifically a PoolPage struct, +// and extracts the elements into a slice of Pool structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPools(r pagination.Page) ([]Pool, error) { + var s struct { + Pools []Pool `json:"pools"` + } + err := (r.(PoolPage)).ExtractInto(&s) + return s.Pools, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a pool. +func (r commonResult) Extract() (*Pool, error) { + var s struct { + Pool *Pool `json:"pool"` + } + err := r.ExtractInto(&s) + return s.Pool, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret the result as a Pool. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret the result as a Pool. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret the result as a Pool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Member represents the application running on a backend server. +type Member struct { + // Name of the Member. + Name string `json:"name"` + + // Weight of Member. + Weight int `json:"weight"` + + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Owner of the Member. + TenantID string `json:"tenant_id"` + + // Parameter value for the subnet UUID. + SubnetID string `json:"subnet_id"` + + // The Pool to which the Member belongs. + PoolID string `json:"pool_id"` + + // The IP address of the Member. + Address string `json:"address"` + + // The port on which the application is hosted. + ProtocolPort int `json:"protocol_port"` + + // The unique ID for the Member. + ID string `json:"id"` + + // The provisioning status of the member. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` + + // The operating status of the member. + // This field seems to only be returned during a call to a load balancer's /status + // see: https://github.com/gophercloud/gophercloud/issues/1362 + OperatingStatus string `json:"operating_status"` +} + +// MemberPage is the page returned by a pager when traversing over a +// collection of Members in a Pool. +type MemberPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of members has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MemberPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"members_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MemberPage struct is empty. +func (r MemberPage) IsEmpty() (bool, error) { + is, err := ExtractMembers(r) + return len(is) == 0, err +} + +// ExtractMembers accepts a Page struct, specifically a MemberPage struct, +// and extracts the elements into a slice of Members structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMembers(r pagination.Page) ([]Member, error) { + var s struct { + Members []Member `json:"members"` + } + err := (r.(MemberPage)).ExtractInto(&s) + return s.Members, err +} + +type commonMemberResult struct { + gophercloud.Result +} + +// ExtractMember is a function that accepts a result and extracts a member. +func (r commonMemberResult) Extract() (*Member, error) { + var s struct { + Member *Member `json:"member"` + } + err := r.ExtractInto(&s) + return s.Member, err +} + +// CreateMemberResult represents the result of a CreateMember operation. +// Call its Extract method to interpret it as a Member. +type CreateMemberResult struct { + commonMemberResult +} + +// GetMemberResult represents the result of a GetMember operation. +// Call its Extract method to interpret it as a Member. +type GetMemberResult struct { + commonMemberResult +} + +// UpdateMemberResult represents the result of an UpdateMember operation. +// Call its Extract method to interpret it as a Member. +type UpdateMemberResult struct { + commonMemberResult +} + +// DeleteMemberResult represents the result of a DeleteMember operation. +// Call its ExtractErr method to determine if the request succeeded or failed. +type DeleteMemberResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go new file mode 100644 index 00000000..e7443c4f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go @@ -0,0 +1,25 @@ +package pools + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "pools" + memberPath = "members" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func memberRootURL(c *gophercloud.ServiceClient, poolId string) string { + return c.ServiceURL(rootPath, resourcePath, poolId, memberPath) +} + +func memberResourceURL(c *gophercloud.ServiceClient, poolID string, memberID string) string { + return c.ServiceURL(rootPath, resourcePath, poolID, memberPath, memberID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/mtu/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/mtu/requests.go new file mode 100644 index 00000000..bffb7f5e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/mtu/requests.go @@ -0,0 +1,87 @@ +package mtu + +import ( + "fmt" + "net/url" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +// ListOptsExt adds an MTU option to the base ListOpts. +type ListOptsExt struct { + networks.ListOptsBuilder + + // The maximum transmission unit (MTU) value to address fragmentation. + // Minimum value is 68 for IPv4, and 1280 for IPv6. + MTU int `q:"mtu"` +} + +// ToNetworkListQuery adds the router:external option to the base network +// list options. +func (opts ListOptsExt) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts.ListOptsBuilder) + if err != nil { + return "", err + } + + params := q.Query() + if opts.MTU > 0 { + params.Add("mtu", fmt.Sprintf("%d", opts.MTU)) + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// CreateOptsExt adds an MTU option to the base Network CreateOpts. +type CreateOptsExt struct { + networks.CreateOptsBuilder + + // The maximum transmission unit (MTU) value to address fragmentation. + // Minimum value is 68 for IPv4, and 1280 for IPv6. + MTU int `json:"mtu,omitempty"` +} + +// ToNetworkCreateMap adds an MTU to the base network creation options. +func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + if opts.MTU == 0 { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["mtu"] = opts.MTU + + return base, nil +} + +// CreateOptsExt adds an MTU option to the base Network UpdateOpts. +type UpdateOptsExt struct { + networks.UpdateOptsBuilder + + // The maximum transmission unit (MTU) value to address fragmentation. + // Minimum value is 68 for IPv4, and 1280 for IPv6. + MTU int `json:"mtu,omitempty"` +} + +// ToNetworkUpdateMap adds an MTU to the base network uptade options. +func (opts UpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + if opts.MTU == 0 { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["mtu"] = opts.MTU + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/mtu/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/mtu/results.go new file mode 100644 index 00000000..497c9c37 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/mtu/results.go @@ -0,0 +1,8 @@ +package mtu + +// NetworkMTUExt represents an extended form of a Network with additional MTU field. +type NetworkMTUExt struct { + // The maximum transmission unit (MTU) value to address fragmentation. + // Minimum value is 68 for IPv4, and 1280 for IPv6. + MTU int `json:"mtu"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/doc.go new file mode 100644 index 00000000..0d2ed589 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/doc.go @@ -0,0 +1,3 @@ +// Package portsbinding provides information and interaction with the port +// binding extension for the OpenStack Networking service. +package portsbinding diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/requests.go new file mode 100644 index 00000000..e3800090 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/requests.go @@ -0,0 +1,96 @@ +package portsbinding + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// CreateOptsExt adds port binding options to the base ports.CreateOpts. +type CreateOptsExt struct { + // CreateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Create operation in this package. + ports.CreateOptsBuilder + + // The ID of the host where the port is allocated + HostID string `json:"binding:host_id,omitempty"` + + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"binding:vnic_type,omitempty"` + + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]interface{} `json:"binding:profile,omitempty"` +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +func (opts CreateOptsExt) ToPortCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToPortCreateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.HostID != "" { + port["binding:host_id"] = opts.HostID + } + + if opts.VNICType != "" { + port["binding:vnic_type"] = opts.VNICType + } + + if opts.Profile != nil { + port["binding:profile"] = opts.Profile + } + + return base, nil +} + +// UpdateOptsExt adds port binding options to the base ports.UpdateOpts +type UpdateOptsExt struct { + // UpdateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Update operation in this package. + ports.UpdateOptsBuilder + + // The ID of the host where the port is allocated. + HostID *string `json:"binding:host_id,omitempty"` + + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"binding:vnic_type,omitempty"` + + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]interface{} `json:"binding:profile,omitempty"` +} + +// ToPortUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOptsExt) ToPortUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToPortUpdateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.HostID != nil { + port["binding:host_id"] = *opts.HostID + } + + if opts.VNICType != "" { + port["binding:vnic_type"] = opts.VNICType + } + + if opts.Profile != nil { + if len(opts.Profile) == 0 { + // send null instead of the empty json object ("{}") + port["binding:profile"] = nil + } else { + port["binding:profile"] = opts.Profile + } + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/results.go new file mode 100644 index 00000000..4d8f856a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/results.go @@ -0,0 +1,24 @@ +package portsbinding + +// PortsBindingExt represents a decorated form of a Port with the additional +// port binding information. +type PortsBindingExt struct { + // The ID of the host where the port is allocated. + HostID string `json:"binding:host_id"` + + // A dictionary that enables the application to pass information about + // functions that the Networking API provides. + VIFDetails map[string]interface{} `json:"binding:vif_details"` + + // The VIF type for the port. + VIFType string `json:"binding:vif_type"` + + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"binding:vnic_type"` + + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]interface{} `json:"binding:profile"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/doc.go new file mode 100644 index 00000000..2b9a3916 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/doc.go @@ -0,0 +1,145 @@ +/* +Package portsecurity provides information and interaction with the port +security extension for the OpenStack Networking service. + +Example to List Networks with Port Security Information + + type NetworkWithPortSecurityExt struct { + networks.Network + portsecurity.PortSecurityExt + } + + var allNetworks []NetworkWithPortSecurityExt + + listOpts := networks.ListOpts{ + Name: "network_1", + } + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Println("%+v\n", network) + } + +Example to Create a Network without Port Security + + var networkWithPortSecurityExt struct { + networks.Network + portsecurity.PortSecurityExt + } + + networkCreateOpts := networks.CreateOpts{ + Name: "private", + } + + iFalse := false + createOpts := portsecurity.NetworkCreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + PortSecurityEnabled: &iFalse, + } + + err := networks.Create(networkClient, createOpts).ExtractInto(&networkWithPortSecurityExt) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", networkWithPortSecurityExt) + +Example to Disable Port Security on an Existing Network + + var networkWithPortSecurityExt struct { + networks.Network + portsecurity.PortSecurityExt + } + + iFalse := false + networkID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + networkUpdateOpts := networks.UpdateOpts{} + updateOpts := portsecurity.NetworkUpdateOptsExt{ + UpdateOptsBuilder: networkUpdateOpts, + PortSecurityEnabled: &iFalse, + } + + err := networks.Update(networkClient, networkID, updateOpts).ExtractInto(&networkWithPortSecurityExt) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", networkWithPortSecurityExt) + +Example to Get a Port with Port Security Information + + var portWithPortSecurityExtensions struct { + ports.Port + portsecurity.PortSecurityExt + } + + portID := "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2" + + err := ports.Get(networkingClient, portID).ExtractInto(&portWithPortSecurityExtensions) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", portWithPortSecurityExtensions) + +Example to Create a Port Without Port Security + + var portWithPortSecurityExtensions struct { + ports.Port + portsecurity.PortSecurityExt + } + + iFalse := false + networkID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + subnetID := "a87cc70a-3e15-4acf-8205-9b711a3531b7" + + portCreateOpts := ports.CreateOpts{ + NetworkID: networkID, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + } + + createOpts := portsecurity.PortCreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + PortSecurityEnabled: &iFalse, + } + + err := ports.Create(networkingClient, createOpts).ExtractInto(&portWithPortSecurityExtensions) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", portWithPortSecurityExtensions) + +Example to Disable Port Security on an Existing Port + + var portWithPortSecurityExtensions struct { + ports.Port + portsecurity.PortSecurityExt + } + + iFalse := false + portID := "65c0ee9f-d634-4522-8954-51021b570b0d" + + portUpdateOpts := ports.UpdateOpts{} + updateOpts := portsecurity.PortUpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + PortSecurityEnabled: &iFalse, + } + + err := ports.Update(networkingClient, portID, updateOpts).ExtractInto(&portWithPortSecurityExtensions) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", portWithPortSecurityExtensions) +*/ +package portsecurity diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/requests.go new file mode 100644 index 00000000..c80f47cf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/requests.go @@ -0,0 +1,104 @@ +package portsecurity + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// PortCreateOptsExt adds port security options to the base ports.CreateOpts. +type PortCreateOptsExt struct { + ports.CreateOptsBuilder + + // PortSecurityEnabled toggles port security on a port. + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +func (opts PortCreateOptsExt) ToPortCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToPortCreateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.PortSecurityEnabled != nil { + port["port_security_enabled"] = &opts.PortSecurityEnabled + } + + return base, nil +} + +// PortUpdateOptsExt adds port security options to the base ports.UpdateOpts. +type PortUpdateOptsExt struct { + ports.UpdateOptsBuilder + + // PortSecurityEnabled toggles port security on a port. + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +// ToPortUpdateMap casts a UpdateOpts struct to a map. +func (opts PortUpdateOptsExt) ToPortUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToPortUpdateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.PortSecurityEnabled != nil { + port["port_security_enabled"] = &opts.PortSecurityEnabled + } + + return base, nil +} + +// NetworkCreateOptsExt adds port security options to the base +// networks.CreateOpts. +type NetworkCreateOptsExt struct { + networks.CreateOptsBuilder + + // PortSecurityEnabled toggles port security on a port. + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +func (opts NetworkCreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + network := base["network"].(map[string]interface{}) + + if opts.PortSecurityEnabled != nil { + network["port_security_enabled"] = &opts.PortSecurityEnabled + } + + return base, nil +} + +// NetworkUpdateOptsExt adds port security options to the base +// networks.UpdateOpts. +type NetworkUpdateOptsExt struct { + networks.UpdateOptsBuilder + + // PortSecurityEnabled toggles port security on a port. + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +// ToNetworkUpdateMap casts a UpdateOpts struct to a map. +func (opts NetworkUpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + network := base["network"].(map[string]interface{}) + + if opts.PortSecurityEnabled != nil { + network["port_security_enabled"] = &opts.PortSecurityEnabled + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/results.go new file mode 100644 index 00000000..7b3482a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/results.go @@ -0,0 +1,7 @@ +package portsecurity + +type PortSecurityExt struct { + // PortSecurityEnabled specifies whether port security is enabled or + // disabled. + PortSecurityEnabled bool `json:"port_security_enabled"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go new file mode 100644 index 00000000..ddc44175 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go @@ -0,0 +1,73 @@ +/* +Package provider gives access to the provider Neutron plugin, allowing +network extended attributes. The provider extended attributes for networks +enable administrative users to specify how network objects map to the +underlying networking infrastructure. These extended attributes also appear +when administrative users query networks. + +For more information about extended attributes, see the NetworkExtAttrs +struct. The actual semantics of these attributes depend on the technology +back end of the particular plug-in. See the plug-in documentation and the +OpenStack Cloud Administrator Guide to understand which values should be +specific for each of these attributes when OpenStack Networking is deployed +with a particular plug-in. The examples shown in this chapter refer to the +Open vSwitch plug-in. + +The default policy settings enable only users with administrative rights to +specify these parameters in requests and to see their values in responses. By +default, the provider network extension attributes are completely hidden from +regular tenants. As a rule of thumb, if these attributes are not visible in a +GET /networks/ operation, this implies the user submitting the +request is not authorized to view or manipulate provider network attributes. + +Example to List Networks with Provider Information + + type NetworkWithProvider { + networks.Network + provider.NetworkProviderExt + } + + var allNetworks []NetworkWithProvider + + allPages, err := networks.List(networkClient, nil).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v\n", network) + } + +Example to Create a Provider Network + + segments := []provider.Segment{ + provider.Segment{ + NetworkType: "vxlan", + PhysicalNetwork: "br-ex", + SegmentationID: 615, + }, + } + + iTrue := true + networkCreateOpts := networks.CreateOpts{ + Name: "provider-network", + AdminStateUp: &iTrue, + Shared: &iTrue, + } + + createOpts : provider.CreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + Segments: segments, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package provider diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go new file mode 100644 index 00000000..32c27970 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go @@ -0,0 +1,28 @@ +package provider + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +// CreateOptsExt adds a Segments option to the base Network CreateOpts. +type CreateOptsExt struct { + networks.CreateOptsBuilder + Segments []Segment `json:"segments,omitempty"` +} + +// ToNetworkCreateMap adds segments to the base network creation options. +func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + if opts.Segments == nil { + return base, nil + } + + providerMap := base["network"].(map[string]interface{}) + providerMap["segments"] = opts.Segments + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go new file mode 100644 index 00000000..9babd2ab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go @@ -0,0 +1,62 @@ +package provider + +import ( + "encoding/json" + "strconv" +) + +// NetworkProviderExt represents an extended form of a Network with additional +// fields. +type NetworkProviderExt struct { + // Specifies the nature of the physical network mapped to this network + // resource. Examples are flat, vlan, or gre. + NetworkType string `json:"provider:network_type"` + + // Identifies the physical network on top of which this network object is + // being implemented. The OpenStack Networking API does not expose any + // facility for retrieving the list of available physical networks. As an + // example, in the Open vSwitch plug-in this is a symbolic name which is + // then mapped to specific bridges on each compute host through the Open + // vSwitch plug-in configuration file. + PhysicalNetwork string `json:"provider:physical_network"` + + // Identifies an isolated segment on the physical network; the nature of the + // segment depends on the segmentation model defined by network_type. For + // instance, if network_type is vlan, then this is a vlan identifier; + // otherwise, if network_type is gre, then this will be a gre key. + SegmentationID string `json:"-"` + + // Segments is an array of Segment which defines multiple physical bindings + // to logical networks. + Segments []Segment `json:"segments"` +} + +// Segment defines a physical binding to a logical network. +type Segment struct { + PhysicalNetwork string `json:"provider:physical_network"` + NetworkType string `json:"provider:network_type"` + SegmentationID int `json:"provider:segmentation_id"` +} + +func (r *NetworkProviderExt) UnmarshalJSON(b []byte) error { + type tmp NetworkProviderExt + var networkProviderExt struct { + tmp + SegmentationID interface{} `json:"provider:segmentation_id"` + } + + if err := json.Unmarshal(b, &networkProviderExt); err != nil { + return err + } + + *r = NetworkProviderExt(networkProviderExt.tmp) + + switch t := networkProviderExt.SegmentationID.(type) { + case float64: + r.SegmentationID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.SegmentationID = string(t) + } + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/doc.go new file mode 100644 index 00000000..4cd9c2c3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/doc.go @@ -0,0 +1,257 @@ +/* +Package policies provides information and interaction with the QoS policy extension +for the OpenStack Networking service. + +Example to Get a Port with a QoS policy + + var portWithQoS struct { + ports.Port + policies.QoSPolicyExt + } + + portID := "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2" + + err = ports.Get(client, portID).ExtractInto(&portWithQoS) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Port: %+v\n", portWithQoS) + +Example to Create a Port with a QoS policy + + var portWithQoS struct { + ports.Port + policies.QoSPolicyExt + } + + policyID := "d6ae28ce-fcb5-4180-aa62-d260a27e09ae" + networkID := "7069db8d-e817-4b39-a654-d2dd76e73d36" + + portCreateOpts := ports.CreateOpts{ + NetworkID: networkID, + } + + createOpts := policies.PortCreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + QoSPolicyID: policyID, + } + + err = ports.Create(client, createOpts).ExtractInto(&portWithQoS) + if err != nil { + panic(err) + } + + fmt.Printf("Port: %+v\n", portWithQoS) + +Example to Add a QoS policy to an existing Port + + var portWithQoS struct { + ports.Port + policies.QoSPolicyExt + } + + portUpdateOpts := ports.UpdateOpts{} + + policyID := "d6ae28ce-fcb5-4180-aa62-d260a27e09ae" + + updateOpts := policies.PortUpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + QoSPolicyID: &policyID, + } + + err := ports.Update(client, "65c0ee9f-d634-4522-8954-51021b570b0d", updateOpts).ExtractInto(&portWithQoS) + if err != nil { + panic(err) + } + + fmt.Printf("Port: %+v\n", portWithQoS) + +Example to Delete a QoS policy from the existing Port + + var portWithQoS struct { + ports.Port + policies.QoSPolicyExt + } + + portUpdateOpts := ports.UpdateOpts{} + + policyID := "" + + updateOpts := policies.PortUpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + QoSPolicyID: &policyID, + } + + err := ports.Update(client, "65c0ee9f-d634-4522-8954-51021b570b0d", updateOpts).ExtractInto(&portWithQoS) + if err != nil { + panic(err) + } + + fmt.Printf("Port: %+v\n", portWithQoS) + +Example to Get a Network with a QoS policy + + var networkWithQoS struct { + networks.Network + policies.QoSPolicyExt + } + + networkID := "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2" + + err = networks.Get(client, networkID).ExtractInto(&networkWithQoS) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Network: %+v\n", networkWithQoS) + +Example to Create a Network with a QoS policy + + var networkWithQoS struct { + networks.Network + policies.QoSPolicyExt + } + + policyID := "d6ae28ce-fcb5-4180-aa62-d260a27e09ae" + networkID := "7069db8d-e817-4b39-a654-d2dd76e73d36" + + networkCreateOpts := networks.CreateOpts{ + NetworkID: networkID, + } + + createOpts := policies.NetworkCreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + QoSPolicyID: policyID, + } + + err = networks.Create(client, createOpts).ExtractInto(&networkWithQoS) + if err != nil { + panic(err) + } + + fmt.Printf("Network: %+v\n", networkWithQoS) + +Example to add a QoS policy to an existing Network + + var networkWithQoS struct { + networks.Network + policies.QoSPolicyExt + } + + networkUpdateOpts := networks.UpdateOpts{} + + policyID := "d6ae28ce-fcb5-4180-aa62-d260a27e09ae" + + updateOpts := policies.NetworkUpdateOptsExt{ + UpdateOptsBuilder: networkUpdateOpts, + QoSPolicyID: &policyID, + } + + err := networks.Update(client, "65c0ee9f-d634-4522-8954-51021b570b0d", updateOpts).ExtractInto(&networkWithQoS) + if err != nil { + panic(err) + } + + fmt.Printf("Network: %+v\n", networkWithQoS) + +Example to delete a QoS policy from the existing Network + + var networkWithQoS struct { + networks.Network + policies.QoSPolicyExt + } + + networkUpdateOpts := networks.UpdateOpts{} + + policyID := "" + + updateOpts := policies.NetworkUpdateOptsExt{ + UpdateOptsBuilder: networkUpdateOpts, + QoSPolicyID: &policyID, + } + + err := networks.Update(client, "65c0ee9f-d634-4522-8954-51021b570b0d", updateOpts).ExtractInto(&networkWithQoS) + if err != nil { + panic(err) + } + + fmt.Printf("Network: %+v\n", networkWithQoS) + +Example to List QoS policies + + shared := true + listOpts := policies.ListOpts{ + Name: "shared-policy", + Shared: &shared, + } + + allPages, err := policies.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := policies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + + for _, policy := range allPolicies { + fmt.Printf("%+v\n", policy) + } + +Example to Get a specific QoS policy + + policyID := "30a57f4a-336b-4382-8275-d708babd2241" + + policy, err := policies.Get(networkClient, policyID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", policy) + +Example to Create a QoS policy + + opts := policies.CreateOpts{ + Name: "shared-default-policy", + Shared: true, + IsDefault: true, + } + + policy, err := policies.Create(networkClient, opts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", policy) + +Example to Update a QoS policy + + shared := true + isDefault := false + opts := policies.UpdateOpts{ + Name: "new-name", + Shared: &shared, + IsDefault: &isDefault, + } + + policyID := "30a57f4a-336b-4382-8275-d708babd2241" + + policy, err := policies.Update(networkClient, policyID, opts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", policy) + +Example to Delete a QoS policy + + policyID := "30a57f4a-336b-4382-8275-d708babd2241" + + err := policies.Delete(networkClient, policyID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/requests.go new file mode 100644 index 00000000..8e8b905d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/requests.go @@ -0,0 +1,267 @@ +package policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/pagination" +) + +// PortCreateOptsExt adds QoS options to the base ports.CreateOpts. +type PortCreateOptsExt struct { + ports.CreateOptsBuilder + + // QoSPolicyID represents an associated QoS policy. + QoSPolicyID string `json:"qos_policy_id,omitempty"` +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +func (opts PortCreateOptsExt) ToPortCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToPortCreateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.QoSPolicyID != "" { + port["qos_policy_id"] = opts.QoSPolicyID + } + + return base, nil +} + +// PortUpdateOptsExt adds QoS options to the base ports.UpdateOpts. +type PortUpdateOptsExt struct { + ports.UpdateOptsBuilder + + // QoSPolicyID represents an associated QoS policy. + // Setting it to a pointer of an empty string will remove associated QoS policy from port. + QoSPolicyID *string `json:"qos_policy_id,omitempty"` +} + +// ToPortUpdateMap casts a UpdateOpts struct to a map. +func (opts PortUpdateOptsExt) ToPortUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToPortUpdateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.QoSPolicyID != nil { + qosPolicyID := *opts.QoSPolicyID + if qosPolicyID != "" { + port["qos_policy_id"] = qosPolicyID + } else { + port["qos_policy_id"] = nil + } + } + + return base, nil +} + +// NetworkCreateOptsExt adds QoS options to the base networks.CreateOpts. +type NetworkCreateOptsExt struct { + networks.CreateOptsBuilder + + // QoSPolicyID represents an associated QoS policy. + QoSPolicyID string `json:"qos_policy_id,omitempty"` +} + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +func (opts NetworkCreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + network := base["network"].(map[string]interface{}) + + if opts.QoSPolicyID != "" { + network["qos_policy_id"] = opts.QoSPolicyID + } + + return base, nil +} + +// NetworkUpdateOptsExt adds QoS options to the base networks.UpdateOpts. +type NetworkUpdateOptsExt struct { + networks.UpdateOptsBuilder + + // QoSPolicyID represents an associated QoS policy. + // Setting it to a pointer of an empty string will remove associated QoS policy from network. + QoSPolicyID *string `json:"qos_policy_id,omitempty"` +} + +// ToNetworkUpdateMap casts a UpdateOpts struct to a map. +func (opts NetworkUpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + network := base["network"].(map[string]interface{}) + + if opts.QoSPolicyID != nil { + qosPolicyID := *opts.QoSPolicyID + if qosPolicyID != "" { + network["qos_policy_id"] = qosPolicyID + } else { + network["qos_policy_id"] = nil + } + } + + return base, nil +} + +// PolicyListOptsBuilder allows extensions to add additional parameters to the List request. +type PolicyListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the Neutron API. Filtering is achieved by passing in struct field values +// that map to the Policy attributes you want to see returned. +// SortKey allows you to sort by a particular Policy attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for the pagination. +type ListOpts struct { + ID string `q:"id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Name string `q:"name"` + Description string `q:"description"` + RevisionNumber *int `q:"revision_number"` + IsDefault *bool `q:"is_default"` + Shared *bool `q:"shared"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// Policy. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts PolicyListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + + }) +} + +// Get retrieves a specific QoS policy based on its ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters of a new QoS policy. +type CreateOpts struct { + // Name is the human-readable name of the QoS policy. + Name string `json:"name"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id,omitempty"` + + // Shared indicates whether this QoS policy is shared across all projects. + Shared bool `json:"shared,omitempty"` + + // Description is the human-readable description for the QoS policy. + Description string `json:"description,omitempty"` + + // IsDefault indicates if this QoS policy is default policy or not. + IsDefault bool `json:"is_default,omitempty"` +} + +// ToPolicyCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "policy") +} + +// Create requests the creation of a new QoS policy on the server. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a QoS policy. +type UpdateOpts struct { + // Name is the human-readable name of the QoS policy. + Name string `json:"name,omitempty"` + + // Shared indicates whether this QoS policy is shared across all projects. + Shared *bool `json:"shared,omitempty"` + + // Description is the human-readable description for the QoS policy. + Description *string `json:"description,omitempty"` + + // IsDefault indicates if this QoS policy is default policy or not. + IsDefault *bool `json:"is_default,omitempty"` +} + +// ToPolicyUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "policy") +} + +// Update accepts a UpdateOpts struct and updates an existing policy using the +// values provided. +func Update(c *gophercloud.ServiceClient, policyID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, policyID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete accepts a unique ID and deletes the QoS policy associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/results.go new file mode 100644 index 00000000..43781813 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/results.go @@ -0,0 +1,127 @@ +package policies + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// QoSPolicyExt represents additional resource attributes available with the QoS extension. +type QoSPolicyExt struct { + // QoSPolicyID represents an associated QoS policy. + QoSPolicyID string `json:"qos_policy_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a QoS policy. +type GetResult struct { + commonResult +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as a QoS policy. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of a Create operation. Call its Extract +// method to interpret it as a QoS policy. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract is a function that accepts a result and extracts a QoS policy resource. +func (r commonResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"policy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} + +// Policy represents a QoS policy. +type Policy struct { + // ID is the id of the policy. + ID string `json:"id"` + + // Name is the human-readable name of the policy. + Name string `json:"name"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id"` + + // CreatedAt is the time at which the policy has been created. + CreatedAt time.Time `json:"created_at"` + + // UpdatedAt is the time at which the policy has been created. + UpdatedAt time.Time `json:"updated_at"` + + // IsDefault indicates if the policy is default policy or not. + IsDefault bool `json:"is_default"` + + // Description is thehuman-readable description for the resource. + Description string `json:"description"` + + // Shared indicates whether this policy is shared across all projects. + Shared bool `json:"shared"` + + // RevisionNumber represents revision number of the policy. + RevisionNumber int `json:"revision_number"` + + // Rules represents QoS rules of the policy. + Rules []map[string]interface{} `json:"rules"` + + // Tags optionally set via extensions/attributestags + Tags []string `json:"tags"` +} + +// PolicyPage stores a single page of Policies from a List() API call. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of policies has reached +// the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"policies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PolicyPage is empty. +func (r PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(r) + return len(is) == 0, err +} + +// ExtractPolicies accepts a PolicyPage, and extracts the elements into a slice of Policies. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s []Policy + err := ExtractPolicysInto(r, &s) + return s, err +} + +// ExtractPoliciesInto extracts the elements into a slice of RBAC Policy structs. +func ExtractPolicysInto(r pagination.Page, v interface{}) error { + return r.(PolicyPage).Result.ExtractIntoSlicePtr(v, "policies") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/urls.go new file mode 100644 index 00000000..31ceff9c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/policies/urls.go @@ -0,0 +1,33 @@ +package policies + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "qos/policies" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/doc.go new file mode 100644 index 00000000..8940e2f1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/doc.go @@ -0,0 +1,236 @@ +/* +Package rules provides the ability to retrieve and manage QoS policy rules through the Neutron API. + +Example of Listing BandwidthLimitRules + + listOpts := rules.BandwidthLimitRulesListOpts{ + MaxKBps: 3000, + } + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + + allPages, err := rules.ListBandwidthLimitRules(networkClient, policyID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allBandwidthLimitRules, err := rules.ExtractBandwidthLimitRules(allPages) + if err != nil { + panic(err) + } + + for _, bandwidthLimitRule := range allBandwidthLimitRules { + fmt.Printf("%+v\n", bandwidthLimitRule) + } + +Example of Getting a single BandwidthLimitRule + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + rule, err := rules.GetBandwidthLimitRule(networkClient, policyID, ruleID).ExtractBandwidthLimitRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Creating a single BandwidthLimitRule + + opts := rules.CreateBandwidthLimitRuleOpts{ + MaxKBps: 2000, + MaxBurstKBps: 200, + } + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + + rule, err := rules.CreateBandwidthLimitRule(networkClient, policyID, opts).ExtractBandwidthLimitRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Updating a single BandwidthLimitRule + + maxKBps := 500 + maxBurstKBps := 0 + + opts := rules.UpdateBandwidthLimitRuleOpts{ + MaxKBps: &maxKBps, + MaxBurstKBps: &maxBurstKBps, + } + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + rule, err := rules.UpdateBandwidthLimitRule(networkClient, policyID, ruleID, opts).ExtractBandwidthLimitRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Deleting a single BandwidthLimitRule + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + err := rules.DeleteBandwidthLimitRule(fake.ServiceClient(), "501005fa-3b56-4061-aaca-3f24995112e1", "30a57f4a-336b-4382-8275-d708babd2241").ExtractErr() + if err != nil { + panic(err) + } + +Example of Listing DSCP marking rules + + listOpts := rules.DSCPMarkingRulesListOpts{} + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + + allPages, err := rules.ListDSCPMarkingRules(networkClient, policyID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allDSCPMarkingRules, err := rules.ExtractDSCPMarkingRules(allPages) + if err != nil { + panic(err) + } + + for _, dscpMarkingRule := range allDSCPMarkingRules { + fmt.Printf("%+v\n", dscpMarkingRule) + } + +Example of Getting a single DSCPMarkingRule + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + rule, err := rules.GetDSCPMarkingRule(networkClient, policyID, ruleID).ExtractDSCPMarkingRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Creating a single DSCPMarkingRule + + opts := rules.CreateDSCPMarkingRuleOpts{ + DSCPMark: 20, + } + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + + rule, err := rules.CreateDSCPMarkingRule(networkClient, policyID, opts).ExtractDSCPMarkingRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Updating a single DSCPMarkingRule + + dscpMark := 26 + + opts := rules.UpdateDSCPMarkingRuleOpts{ + DSCPMark: &dscpMark, + } + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + rule, err := rules.UpdateDSCPMarkingRule(networkClient, policyID, ruleID, opts).ExtractDSCPMarkingRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Deleting a single DSCPMarkingRule + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + err := rules.DeleteDSCPMarkingRule(fake.ServiceClient(), "501005fa-3b56-4061-aaca-3f24995112e1", "30a57f4a-336b-4382-8275-d708babd2241").ExtractErr() + if err != nil { + panic(err) + } + +Example of Listing MinimumBandwidthRules + + listOpts := rules.MinimumBandwidthRulesListOpts{ + MinKBps: 3000, + } + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + + allPages, err := rules.ListMinimumBandwidthRules(networkClient, policyID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMinimumBandwidthRules, err := rules.ExtractMinimumBandwidthRules(allPages) + if err != nil { + panic(err) + } + + for _, bandwidthLimitRule := range allMinimumBandwidthRules { + fmt.Printf("%+v\n", bandwidthLimitRule) + } + +Example of Getting a single MinimumBandwidthRule + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + rule, err := rules.GetMinimumBandwidthRule(networkClient, policyID, ruleID).ExtractMinimumBandwidthRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Creating a single MinimumBandwidthRule + + opts := rules.CreateMinimumBandwidthRuleOpts{ + MinKBps: 2000, + } + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + + rule, err := rules.CreateMinimumBandwidthRule(networkClient, policyID, opts).ExtractMinimumBandwidthRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Updating a single MinimumBandwidthRule + + minKBps := 500 + + opts := rules.UpdateMinimumBandwidthRuleOpts{ + MinKBps: &minKBps, + } + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + rule, err := rules.UpdateMinimumBandwidthRule(networkClient, policyID, ruleID, opts).ExtractMinimumBandwidthRule() + if err != nil { + panic(err) + } + + fmt.Printf("Rule: %+v\n", rule) + +Example of Deleting a single MinimumBandwidthRule + + policyID := "501005fa-3b56-4061-aaca-3f24995112e1" + ruleID := "30a57f4a-336b-4382-8275-d708babd2241" + + err := rules.DeleteMinimumBandwidthRule(fake.ServiceClient(), "501005fa-3b56-4061-aaca-3f24995112e1", "30a57f4a-336b-4382-8275-d708babd2241").ExtractErr() + if err != nil { + panic(err) + } +*/ +package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/requests.go new file mode 100644 index 00000000..a003efa8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/requests.go @@ -0,0 +1,393 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type BandwidthLimitRulesListOptsBuilder interface { + ToBandwidthLimitRulesListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the Neutron API. Filtering is achieved by passing in struct field values +// that map to the BandwidthLimitRules attributes you want to see returned. +// SortKey allows you to sort by a particular BandwidthLimitRule attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for the pagination. +type BandwidthLimitRulesListOpts struct { + ID string `q:"id"` + TenantID string `q:"tenant_id"` + MaxKBps int `q:"max_kbps"` + MaxBurstKBps int `q:"max_burst_kbps"` + Direction string `q:"direction"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToBandwidthLimitRulesListQuery formats a ListOpts into a query string. +func (opts BandwidthLimitRulesListOpts) ToBandwidthLimitRulesListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListBandwidthLimitRules returns a Pager which allows you to iterate over a collection of +// BandwidthLimitRules. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func ListBandwidthLimitRules(c *gophercloud.ServiceClient, policyID string, opts BandwidthLimitRulesListOptsBuilder) pagination.Pager { + url := listBandwidthLimitRulesURL(c, policyID) + if opts != nil { + query, err := opts.ToBandwidthLimitRulesListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return BandwidthLimitRulePage{pagination.LinkedPageBase{PageResult: r}} + + }) +} + +// GetBandwidthLimitRule retrieves a specific BandwidthLimitRule based on its ID. +func GetBandwidthLimitRule(c *gophercloud.ServiceClient, policyID, ruleID string) (r GetBandwidthLimitRuleResult) { + _, r.Err = c.Get(getBandwidthLimitRuleURL(c, policyID, ruleID), &r.Body, nil) + return +} + +// CreateBandwidthLimitRuleOptsBuilder allows to add additional parameters to the +// CreateBandwidthLimitRule request. +type CreateBandwidthLimitRuleOptsBuilder interface { + ToBandwidthLimitRuleCreateMap() (map[string]interface{}, error) +} + +// CreateBandwidthLimitRuleOpts specifies parameters of a new BandwidthLimitRule. +type CreateBandwidthLimitRuleOpts struct { + // MaxKBps is a maximum kilobits per second. It's a required parameter. + MaxKBps int `json:"max_kbps"` + + // MaxBurstKBps is a maximum burst size in kilobits. + MaxBurstKBps int `json:"max_burst_kbps,omitempty"` + + // Direction represents the direction of traffic. + Direction string `json:"direction,omitempty"` +} + +// ToBandwidthLimitRuleCreateMap constructs a request body from CreateBandwidthLimitRuleOpts. +func (opts CreateBandwidthLimitRuleOpts) ToBandwidthLimitRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "bandwidth_limit_rule") +} + +// CreateBandwidthLimitRule requests the creation of a new BandwidthLimitRule on the server. +func CreateBandwidthLimitRule(client *gophercloud.ServiceClient, policyID string, opts CreateBandwidthLimitRuleOptsBuilder) (r CreateBandwidthLimitRuleResult) { + b, err := opts.ToBandwidthLimitRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createBandwidthLimitRuleURL(client, policyID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateBandwidthLimitRuleOptsBuilder allows to add additional parameters to the +// UpdateBandwidthLimitRule request. +type UpdateBandwidthLimitRuleOptsBuilder interface { + ToBandwidthLimitRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateBandwidthLimitRuleOpts specifies parameters for the Update call. +type UpdateBandwidthLimitRuleOpts struct { + // MaxKBps is a maximum kilobits per second. + MaxKBps *int `json:"max_kbps,omitempty"` + + // MaxBurstKBps is a maximum burst size in kilobits. + MaxBurstKBps *int `json:"max_burst_kbps,omitempty"` + + // Direction represents the direction of traffic. + Direction string `json:"direction,omitempty"` +} + +// ToBandwidthLimitRuleUpdateMap constructs a request body from UpdateBandwidthLimitRuleOpts. +func (opts UpdateBandwidthLimitRuleOpts) ToBandwidthLimitRuleUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "bandwidth_limit_rule") +} + +// UpdateBandwidthLimitRule requests the creation of a new BandwidthLimitRule on the server. +func UpdateBandwidthLimitRule(client *gophercloud.ServiceClient, policyID, ruleID string, opts UpdateBandwidthLimitRuleOptsBuilder) (r UpdateBandwidthLimitRuleResult) { + b, err := opts.ToBandwidthLimitRuleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateBandwidthLimitRuleURL(client, policyID, ruleID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete accepts policy and rule ID and deletes the BandwidthLimitRule associated with them. +func DeleteBandwidthLimitRule(c *gophercloud.ServiceClient, policyID, ruleID string) (r DeleteBandwidthLimitRuleResult) { + _, r.Err = c.Delete(deleteBandwidthLimitRuleURL(c, policyID, ruleID), nil) + return +} + +// DSCPMarkingRulesListOptsBuilder allows extensions to add additional parameters to the +// List request. +type DSCPMarkingRulesListOptsBuilder interface { + ToDSCPMarkingRulesListQuery() (string, error) +} + +// DSCPMarkingRulesListOpts allows the filtering and sorting of paginated collections through +// the Neutron API. Filtering is achieved by passing in struct field values +// that map to the DSCPMarking attributes you want to see returned. +// SortKey allows you to sort by a particular DSCPMarkingRule attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for the pagination. +type DSCPMarkingRulesListOpts struct { + ID string `q:"id"` + TenantID string `q:"tenant_id"` + DSCPMark int `q:"dscp_mark"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToDSCPMarkingRulesListQuery formats a ListOpts into a query string. +func (opts DSCPMarkingRulesListOpts) ToDSCPMarkingRulesListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDSCPMarkingRules returns a Pager which allows you to iterate over a collection of +// DSCPMarkingRules. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func ListDSCPMarkingRules(c *gophercloud.ServiceClient, policyID string, opts DSCPMarkingRulesListOptsBuilder) pagination.Pager { + url := listDSCPMarkingRulesURL(c, policyID) + if opts != nil { + query, err := opts.ToDSCPMarkingRulesListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return DSCPMarkingRulePage{pagination.LinkedPageBase{PageResult: r}} + + }) +} + +// GetDSCPMarkingRule retrieves a specific DSCPMarkingRule based on its ID. +func GetDSCPMarkingRule(c *gophercloud.ServiceClient, policyID, ruleID string) (r GetDSCPMarkingRuleResult) { + _, r.Err = c.Get(getDSCPMarkingRuleURL(c, policyID, ruleID), &r.Body, nil) + return +} + +// CreateDSCPMarkingRuleOptsBuilder allows to add additional parameters to the +// CreateDSCPMarkingRule request. +type CreateDSCPMarkingRuleOptsBuilder interface { + ToDSCPMarkingRuleCreateMap() (map[string]interface{}, error) +} + +// CreateDSCPMarkingRuleOpts specifies parameters of a new DSCPMarkingRule. +type CreateDSCPMarkingRuleOpts struct { + // DSCPMark contains DSCP mark value. + DSCPMark int `json:"dscp_mark"` +} + +// ToDSCPMarkingRuleCreateMap constructs a request body from CreateDSCPMarkingRuleOpts. +func (opts CreateDSCPMarkingRuleOpts) ToDSCPMarkingRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "dscp_marking_rule") +} + +// CreateDSCPMarkingRule requests the creation of a new DSCPMarkingRule on the server. +func CreateDSCPMarkingRule(client *gophercloud.ServiceClient, policyID string, opts CreateDSCPMarkingRuleOptsBuilder) (r CreateDSCPMarkingRuleResult) { + b, err := opts.ToDSCPMarkingRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createDSCPMarkingRuleURL(client, policyID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateDSCPMarkingRuleOptsBuilder allows to add additional parameters to the +// UpdateDSCPMarkingRule request. +type UpdateDSCPMarkingRuleOptsBuilder interface { + ToDSCPMarkingRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateDSCPMarkingRuleOpts specifies parameters for the Update call. +type UpdateDSCPMarkingRuleOpts struct { + // DSCPMark contains DSCP mark value. + DSCPMark *int `json:"dscp_mark,omitempty"` +} + +// ToDSCPMarkingRuleUpdateMap constructs a request body from UpdateDSCPMarkingRuleOpts. +func (opts UpdateDSCPMarkingRuleOpts) ToDSCPMarkingRuleUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "dscp_marking_rule") +} + +// UpdateDSCPMarkingRule requests the creation of a new DSCPMarkingRule on the server. +func UpdateDSCPMarkingRule(client *gophercloud.ServiceClient, policyID, ruleID string, opts UpdateDSCPMarkingRuleOptsBuilder) (r UpdateDSCPMarkingRuleResult) { + b, err := opts.ToDSCPMarkingRuleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateDSCPMarkingRuleURL(client, policyID, ruleID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteDSCPMarkingRule accepts policy and rule ID and deletes the DSCPMarkingRule associated with them. +func DeleteDSCPMarkingRule(c *gophercloud.ServiceClient, policyID, ruleID string) (r DeleteDSCPMarkingRuleResult) { + _, r.Err = c.Delete(deleteDSCPMarkingRuleURL(c, policyID, ruleID), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type MinimumBandwidthRulesListOptsBuilder interface { + ToMinimumBandwidthRulesListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the Neutron API. Filtering is achieved by passing in struct field values +// that map to the MinimumBandwidthRules attributes you want to see returned. +// SortKey allows you to sort by a particular MinimumBandwidthRule attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for the pagination. +type MinimumBandwidthRulesListOpts struct { + ID string `q:"id"` + TenantID string `q:"tenant_id"` + MinKBps int `q:"min_kbps"` + Direction string `q:"direction"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToMinimumBandwidthRulesListQuery formats a ListOpts into a query string. +func (opts MinimumBandwidthRulesListOpts) ToMinimumBandwidthRulesListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListMinimumBandwidthRules returns a Pager which allows you to iterate over a collection of +// MinimumBandwidthRules. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func ListMinimumBandwidthRules(c *gophercloud.ServiceClient, policyID string, opts MinimumBandwidthRulesListOptsBuilder) pagination.Pager { + url := listMinimumBandwidthRulesURL(c, policyID) + if opts != nil { + query, err := opts.ToMinimumBandwidthRulesListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MinimumBandwidthRulePage{pagination.LinkedPageBase{PageResult: r}} + + }) +} + +// GetMinimumBandwidthRule retrieves a specific MinimumBandwidthRule based on its ID. +func GetMinimumBandwidthRule(c *gophercloud.ServiceClient, policyID, ruleID string) (r GetMinimumBandwidthRuleResult) { + _, r.Err = c.Get(getMinimumBandwidthRuleURL(c, policyID, ruleID), &r.Body, nil) + return +} + +// CreateMinimumBandwidthRuleOptsBuilder allows to add additional parameters to the +// CreateMinimumBandwidthRule request. +type CreateMinimumBandwidthRuleOptsBuilder interface { + ToMinimumBandwidthRuleCreateMap() (map[string]interface{}, error) +} + +// CreateMinimumBandwidthRuleOpts specifies parameters of a new MinimumBandwidthRule. +type CreateMinimumBandwidthRuleOpts struct { + // MaxKBps is a minimum kilobits per second. It's a required parameter. + MinKBps int `json:"min_kbps"` + + // Direction represents the direction of traffic. + Direction string `json:"direction,omitempty"` +} + +// ToMinimumBandwidthRuleCreateMap constructs a request body from CreateMinimumBandwidthRuleOpts. +func (opts CreateMinimumBandwidthRuleOpts) ToMinimumBandwidthRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "minimum_bandwidth_rule") +} + +// CreateMinimumBandwidthRule requests the creation of a new MinimumBandwidthRule on the server. +func CreateMinimumBandwidthRule(client *gophercloud.ServiceClient, policyID string, opts CreateMinimumBandwidthRuleOptsBuilder) (r CreateMinimumBandwidthRuleResult) { + b, err := opts.ToMinimumBandwidthRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createMinimumBandwidthRuleURL(client, policyID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateMinimumBandwidthRuleOptsBuilder allows to add additional parameters to the +// UpdateMinimumBandwidthRule request. +type UpdateMinimumBandwidthRuleOptsBuilder interface { + ToMinimumBandwidthRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateMinimumBandwidthRuleOpts specifies parameters for the Update call. +type UpdateMinimumBandwidthRuleOpts struct { + // MaxKBps is a minimum kilobits per second. It's a required parameter. + MinKBps *int `json:"min_kbps,omitempty"` + + // Direction represents the direction of traffic. + Direction string `json:"direction,omitempty"` +} + +// ToMinimumBandwidthRuleUpdateMap constructs a request body from UpdateMinimumBandwidthRuleOpts. +func (opts UpdateMinimumBandwidthRuleOpts) ToMinimumBandwidthRuleUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "minimum_bandwidth_rule") +} + +// UpdateMinimumBandwidthRule requests the creation of a new MinimumBandwidthRule on the server. +func UpdateMinimumBandwidthRule(client *gophercloud.ServiceClient, policyID, ruleID string, opts UpdateMinimumBandwidthRuleOptsBuilder) (r UpdateMinimumBandwidthRuleResult) { + b, err := opts.ToMinimumBandwidthRuleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateMinimumBandwidthRuleURL(client, policyID, ruleID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteMinimumBandwidthRule accepts policy and rule ID and deletes the MinimumBandwidthRule associated with them. +func DeleteMinimumBandwidthRule(c *gophercloud.ServiceClient, policyID, ruleID string) (r DeleteMinimumBandwidthRuleResult) { + _, r.Err = c.Delete(deleteMinimumBandwidthRuleURL(c, policyID, ruleID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/results.go new file mode 100644 index 00000000..ec193465 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/results.go @@ -0,0 +1,235 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a BandwidthLimitRule. +func (r commonResult) ExtractBandwidthLimitRule() (*BandwidthLimitRule, error) { + var s struct { + BandwidthLimitRule *BandwidthLimitRule `json:"bandwidth_limit_rule"` + } + err := r.ExtractInto(&s) + return s.BandwidthLimitRule, err +} + +// GetBandwidthLimitRuleResult represents the result of a Get operation. Call its Extract +// method to interpret it as a BandwidthLimitRule. +type GetBandwidthLimitRuleResult struct { + commonResult +} + +// CreateBandwidthLimitRuleResult represents the result of a Create operation. Call its Extract +// method to interpret it as a BandwidthLimitRule. +type CreateBandwidthLimitRuleResult struct { + commonResult +} + +// UpdateBandwidthLimitRuleResult represents the result of a Update operation. Call its Extract +// method to interpret it as a BandwidthLimitRule. +type UpdateBandwidthLimitRuleResult struct { + commonResult +} + +// DeleteBandwidthLimitRuleResult represents the result of a Delete operation. Call its Extract +// method to interpret it as a BandwidthLimitRule. +type DeleteBandwidthLimitRuleResult struct { + gophercloud.ErrResult +} + +// BandwidthLimitRule represents a QoS policy rule to set bandwidth limits. +type BandwidthLimitRule struct { + // ID is a unique ID of the policy. + ID string `json:"id"` + + // TenantID is the ID of the Identity project. + TenantID string `json:"tenant_id"` + + // MaxKBps is a maximum kilobits per second. + MaxKBps int `json:"max_kbps"` + + // MaxBurstKBps is a maximum burst size in kilobits. + MaxBurstKBps int `json:"max_burst_kbps"` + + // Direction represents the direction of traffic. + Direction string `json:"direction"` + + // Tags optionally set via extensions/attributestags. + Tags []string `json:"tags"` +} + +// BandwidthLimitRulePage stores a single page of BandwidthLimitRules from a List() API call. +type BandwidthLimitRulePage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks whether a BandwidthLimitRulePage is empty. +func (r BandwidthLimitRulePage) IsEmpty() (bool, error) { + is, err := ExtractBandwidthLimitRules(r) + return len(is) == 0, err +} + +// ExtractBandwidthLimitRules accepts a BandwidthLimitRulePage, and extracts the elements into a slice of +// BandwidthLimitRules. +func ExtractBandwidthLimitRules(r pagination.Page) ([]BandwidthLimitRule, error) { + var s []BandwidthLimitRule + err := ExtractBandwidthLimitRulesInto(r, &s) + return s, err +} + +// ExtractBandwidthLimitRulesInto extracts the elements into a slice of RBAC Policy structs. +func ExtractBandwidthLimitRulesInto(r pagination.Page, v interface{}) error { + return r.(BandwidthLimitRulePage).Result.ExtractIntoSlicePtr(v, "bandwidth_limit_rules") +} + +// Extract is a function that accepts a result and extracts a DSCPMarkingRule. +func (r commonResult) ExtractDSCPMarkingRule() (*DSCPMarkingRule, error) { + var s struct { + DSCPMarkingRule *DSCPMarkingRule `json:"dscp_marking_rule"` + } + err := r.ExtractInto(&s) + return s.DSCPMarkingRule, err +} + +// GetDSCPMarkingRuleResult represents the result of a Get operation. Call its Extract +// method to interpret it as a DSCPMarkingRule. +type GetDSCPMarkingRuleResult struct { + commonResult +} + +// CreateDSCPMarkingRuleResult represents the result of a Create operation. Call its Extract +// method to interpret it as a DSCPMarkingRule. +type CreateDSCPMarkingRuleResult struct { + commonResult +} + +// UpdateDSCPMarkingRuleResult represents the result of a Update operation. Call its Extract +// method to interpret it as a DSCPMarkingRule. +type UpdateDSCPMarkingRuleResult struct { + commonResult +} + +// DeleteDSCPMarkingRuleResult represents the result of a Delete operation. Call its Extract +// method to interpret it as a DSCPMarkingRule. +type DeleteDSCPMarkingRuleResult struct { + gophercloud.ErrResult +} + +// DSCPMarkingRule represents a QoS policy rule to set DSCP marking. +type DSCPMarkingRule struct { + // ID is a unique ID of the policy. + ID string `json:"id"` + + // TenantID is the ID of the Identity project. + TenantID string `json:"tenant_id"` + + // DSCPMark contains DSCP mark value. + DSCPMark int `json:"dscp_mark"` + + // Tags optionally set via extensions/attributestags. + Tags []string `json:"tags"` +} + +// DSCPMarkingRulePage stores a single page of DSCPMarkingRules from a List() API call. +type DSCPMarkingRulePage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks whether a DSCPMarkingRulePage is empty. +func (r DSCPMarkingRulePage) IsEmpty() (bool, error) { + is, err := ExtractDSCPMarkingRules(r) + return len(is) == 0, err +} + +// ExtractDSCPMarkingRules accepts a DSCPMarkingRulePage, and extracts the elements into a slice of +// DSCPMarkingRules. +func ExtractDSCPMarkingRules(r pagination.Page) ([]DSCPMarkingRule, error) { + var s []DSCPMarkingRule + err := ExtractDSCPMarkingRulesInto(r, &s) + return s, err +} + +// ExtractDSCPMarkingRulesInto extracts the elements into a slice of RBAC Policy structs. +func ExtractDSCPMarkingRulesInto(r pagination.Page, v interface{}) error { + return r.(DSCPMarkingRulePage).Result.ExtractIntoSlicePtr(v, "dscp_marking_rules") +} + +// Extract is a function that accepts a result and extracts a BandwidthLimitRule. +func (r commonResult) ExtractMinimumBandwidthRule() (*MinimumBandwidthRule, error) { + var s struct { + MinimumBandwidthRule *MinimumBandwidthRule `json:"minimum_bandwidth_rule"` + } + err := r.ExtractInto(&s) + return s.MinimumBandwidthRule, err +} + +// GetMinimumBandwidthRuleResult represents the result of a Get operation. Call its Extract +// method to interpret it as a MinimumBandwidthRule. +type GetMinimumBandwidthRuleResult struct { + commonResult +} + +// CreateMinimumBandwidthRuleResult represents the result of a Create operation. Call its Extract +// method to interpret it as a MinimumBandwidthtRule. +type CreateMinimumBandwidthRuleResult struct { + commonResult +} + +// UpdateMinimumBandwidthRuleResult represents the result of a Update operation. Call its Extract +// method to interpret it as a MinimumBandwidthRule. +type UpdateMinimumBandwidthRuleResult struct { + commonResult +} + +// DeleteMinimumBandwidthRuleResult represents the result of a Delete operation. Call its Extract +// method to interpret it as a MinimumBandwidthRule. +type DeleteMinimumBandwidthRuleResult struct { + gophercloud.ErrResult +} + +// MinimumBandwidthRule represents a QoS policy rule to set minimum bandwidth. +type MinimumBandwidthRule struct { + // ID is a unique ID of the rule. + ID string `json:"id"` + + // TenantID is the ID of the Identity project. + TenantID string `json:"tenant_id"` + + // MaxKBps is a maximum kilobits per second. + MinKBps int `json:"min_kbps"` + + // Direction represents the direction of traffic. + Direction string `json:"direction"` + + // Tags optionally set via extensions/attributestags. + Tags []string `json:"tags"` +} + +// MinimumBandwidthRulePage stores a single page of MinimumBandwidthRules from a List() API call. +type MinimumBandwidthRulePage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks whether a MinimumBandwidthRulePage is empty. +func (r MinimumBandwidthRulePage) IsEmpty() (bool, error) { + is, err := ExtractMinimumBandwidthRules(r) + return len(is) == 0, err +} + +// ExtractMinimumBandwidthRules accepts a MinimumBandwidthRulePage, and extracts the elements into a slice of +// MinimumBandwidthRules. +func ExtractMinimumBandwidthRules(r pagination.Page) ([]MinimumBandwidthRule, error) { + var s []MinimumBandwidthRule + err := ExtractMinimumBandwidthRulesInto(r, &s) + return s, err +} + +// ExtractMinimumBandwidthRulesInto extracts the elements into a slice of RBAC Policy structs. +func ExtractMinimumBandwidthRulesInto(r pagination.Page, v interface{}) error { + return r.(MinimumBandwidthRulePage).Result.ExtractIntoSlicePtr(v, "minimum_bandwidth_rules") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/urls.go new file mode 100644 index 00000000..6db2bce8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/rules/urls.go @@ -0,0 +1,95 @@ +package rules + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "qos/policies" + + bandwidthLimitRulesResourcePath = "bandwidth_limit_rules" + dscpMarkingRulesResourcePath = "dscp_marking_rules" + minimumBandwidthRulesResourcePath = "minimum_bandwidth_rules" +) + +func bandwidthLimitRulesRootURL(c *gophercloud.ServiceClient, policyID string) string { + return c.ServiceURL(rootPath, policyID, bandwidthLimitRulesResourcePath) +} + +func bandwidthLimitRulesResourceURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return c.ServiceURL(rootPath, policyID, bandwidthLimitRulesResourcePath, ruleID) +} + +func listBandwidthLimitRulesURL(c *gophercloud.ServiceClient, policyID string) string { + return bandwidthLimitRulesRootURL(c, policyID) +} + +func getBandwidthLimitRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return bandwidthLimitRulesResourceURL(c, policyID, ruleID) +} + +func createBandwidthLimitRuleURL(c *gophercloud.ServiceClient, policyID string) string { + return bandwidthLimitRulesRootURL(c, policyID) +} + +func updateBandwidthLimitRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return bandwidthLimitRulesResourceURL(c, policyID, ruleID) +} + +func deleteBandwidthLimitRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return bandwidthLimitRulesResourceURL(c, policyID, ruleID) +} + +func dscpMarkingRulesRootURL(c *gophercloud.ServiceClient, policyID string) string { + return c.ServiceURL(rootPath, policyID, dscpMarkingRulesResourcePath) +} + +func dscpMarkingRulesResourceURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return c.ServiceURL(rootPath, policyID, dscpMarkingRulesResourcePath, ruleID) +} + +func listDSCPMarkingRulesURL(c *gophercloud.ServiceClient, policyID string) string { + return dscpMarkingRulesRootURL(c, policyID) +} + +func getDSCPMarkingRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return dscpMarkingRulesResourceURL(c, policyID, ruleID) +} + +func createDSCPMarkingRuleURL(c *gophercloud.ServiceClient, policyID string) string { + return dscpMarkingRulesRootURL(c, policyID) +} + +func updateDSCPMarkingRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return dscpMarkingRulesResourceURL(c, policyID, ruleID) +} + +func deleteDSCPMarkingRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return dscpMarkingRulesResourceURL(c, policyID, ruleID) +} + +func minimumBandwidthRulesRootURL(c *gophercloud.ServiceClient, policyID string) string { + return c.ServiceURL(rootPath, policyID, minimumBandwidthRulesResourcePath) +} + +func minimumBandwidthRulesResourceURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return c.ServiceURL(rootPath, policyID, minimumBandwidthRulesResourcePath, ruleID) +} + +func listMinimumBandwidthRulesURL(c *gophercloud.ServiceClient, policyID string) string { + return minimumBandwidthRulesRootURL(c, policyID) +} + +func getMinimumBandwidthRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return minimumBandwidthRulesResourceURL(c, policyID, ruleID) +} + +func createMinimumBandwidthRuleURL(c *gophercloud.ServiceClient, policyID string) string { + return minimumBandwidthRulesRootURL(c, policyID) +} + +func updateMinimumBandwidthRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return minimumBandwidthRulesResourceURL(c, policyID, ruleID) +} + +func deleteMinimumBandwidthRuleURL(c *gophercloud.ServiceClient, policyID, ruleID string) string { + return minimumBandwidthRulesResourceURL(c, policyID, ruleID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go new file mode 100644 index 00000000..7d8bbcaa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go @@ -0,0 +1,58 @@ +/* +Package groups provides information and interaction with Security Groups +for the OpenStack Networking service. + +Example to List Security Groups + + listOpts := groups.ListOpts{ + TenantID: "966b3c7d36a24facaf20b7e458bf2192", + } + + allPages, err := groups.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to Create a Security Group + + createOpts := groups.CreateOpts{ + Name: "group_name", + Description: "A Security Group", + } + + group, err := groups.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Security Group + + groupID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + updateOpts := groups.UpdateOpts{ + Name: "new_name", + } + + group, err := groups.Update(networkClient, groupID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Security Group + + groupID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := groups.Delete(networkClient, groupID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package groups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go new file mode 100644 index 00000000..a22cd306 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go @@ -0,0 +1,166 @@ +package groups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the group attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + Description string `q:"description"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// List returns a Pager which allows you to iterate over a collection of +// security groups. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return SecGroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new security group. +type CreateOpts struct { + // Human-readable name for the Security Group. Does not have to be unique. + Name string `json:"name" required:"true"` + + // TenantID is the UUID of the project who owns the Group. + // Only administrative users can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Group. + // Only administrative users can specify a tenant UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // Describes the security group. + Description string `json:"description,omitempty"` +} + +// ToSecGroupCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Create is an operation which provisions a new security group with default +// security group rules for the IPv4 and IPv6 ether types. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSecGroupUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing security +// group. +type UpdateOpts struct { + // Human-readable name for the Security Group. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Describes the security group. + Description *string `json:"description,omitempty"` +} + +// ToSecGroupUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Update is an operation which updates an existing security group. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSecGroupUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get retrieves a particular security group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular security group based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a security group's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractGroups(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "security group"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "security group"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go new file mode 100644 index 00000000..468952b3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go @@ -0,0 +1,108 @@ +package groups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecGroup represents a container for security group rules. +type SecGroup struct { + // The UUID for the security group. + ID string + + // Human-readable name for the security group. Might not be unique. + // Cannot be named "default" as that is automatically created for a tenant. + Name string + + // The security group description. + Description string + + // A slice of security group rules that dictate the permitted behaviour for + // traffic entering and leaving the group. + Rules []rules.SecGroupRule `json:"security_group_rules"` + + // TenantID is the project owner of the security group. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the security group. + ProjectID string `json:"project_id"` + + // Tags optionally set via extensions/attributestags + Tags []string `json:"tags"` +} + +// SecGroupPage is the page returned by a pager when traversing over a +// collection of security groups. +type SecGroupPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of security groups has +// reached the end of a page and the pager seeks to traverse over a new one. In +// order to do this, it needs to construct the next page's URL. +func (r SecGroupPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"security_groups_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SecGroupPage struct is empty. +func (r SecGroupPage) IsEmpty() (bool, error) { + is, err := ExtractGroups(r) + return len(is) == 0, err +} + +// ExtractGroups accepts a Page struct, specifically a SecGroupPage struct, +// and extracts the elements into a slice of SecGroup structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractGroups(r pagination.Page) ([]SecGroup, error) { + var s struct { + SecGroups []SecGroup `json:"security_groups"` + } + err := (r.(SecGroupPage)).ExtractInto(&s) + return s.SecGroups, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a security group. +func (r commonResult) Extract() (*SecGroup, error) { + var s struct { + SecGroup *SecGroup `json:"security_group"` + } + err := r.ExtractInto(&s) + return s.SecGroup, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SecGroup. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a SecGroup. +type UpdateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SecGroup. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go new file mode 100644 index 00000000..104cbcc5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go @@ -0,0 +1,13 @@ +package groups + +import "github.com/gophercloud/gophercloud" + +const rootPath = "security-groups" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go new file mode 100644 index 00000000..bf66dc8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go @@ -0,0 +1,50 @@ +/* +Package rules provides information and interaction with Security Group Rules +for the OpenStack Networking service. + +Example to List Security Groups Rules + + listOpts := rules.ListOpts{ + Protocol: "tcp", + } + + allPages, err := rules.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRules, err := rules.ExtractRules(allPages) + if err != nil { + panic(err) + } + + for _, rule := range allRules { + fmt.Printf("%+v\n", rule) + } + +Example to Create a Security Group Rule + + createOpts := rules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: rules.EtherType4, + PortRangeMax: 80, + Protocol: "tcp", + RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + SecGroupID: "a7734e61-b545-452d-a3cd-0189cbd9747a", + } + + rule, err := rules.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Security Group Rule + + ruleID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := rules.Delete(networkClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go new file mode 100644 index 00000000..c7741ffc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go @@ -0,0 +1,159 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the security group rule attributes you want to see returned. SortKey allows +// you to sort by a particular network attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Direction string `q:"direction"` + EtherType string `q:"ethertype"` + ID string `q:"id"` + Description string `q:"description"` + PortRangeMax int `q:"port_range_max"` + PortRangeMin int `q:"port_range_min"` + Protocol string `q:"protocol"` + RemoteGroupID string `q:"remote_group_id"` + RemoteIPPrefix string `q:"remote_ip_prefix"` + SecGroupID string `q:"security_group_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// security group rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type RuleDirection string +type RuleProtocol string +type RuleEtherType string + +// Constants useful for CreateOpts +const ( + DirIngress RuleDirection = "ingress" + DirEgress RuleDirection = "egress" + EtherType4 RuleEtherType = "IPv4" + EtherType6 RuleEtherType = "IPv6" + ProtocolAH RuleProtocol = "ah" + ProtocolDCCP RuleProtocol = "dccp" + ProtocolEGP RuleProtocol = "egp" + ProtocolESP RuleProtocol = "esp" + ProtocolGRE RuleProtocol = "gre" + ProtocolICMP RuleProtocol = "icmp" + ProtocolIGMP RuleProtocol = "igmp" + ProtocolIPv6Encap RuleProtocol = "ipv6-encap" + ProtocolIPv6Frag RuleProtocol = "ipv6-frag" + ProtocolIPv6ICMP RuleProtocol = "ipv6-icmp" + ProtocolIPv6NoNxt RuleProtocol = "ipv6-nonxt" + ProtocolIPv6Opts RuleProtocol = "ipv6-opts" + ProtocolIPv6Route RuleProtocol = "ipv6-route" + ProtocolOSPF RuleProtocol = "ospf" + ProtocolPGM RuleProtocol = "pgm" + ProtocolRSVP RuleProtocol = "rsvp" + ProtocolSCTP RuleProtocol = "sctp" + ProtocolTCP RuleProtocol = "tcp" + ProtocolUDP RuleProtocol = "udp" + ProtocolUDPLite RuleProtocol = "udplite" + ProtocolVRRP RuleProtocol = "vrrp" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupRuleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new security group +// rule. +type CreateOpts struct { + // Must be either "ingress" or "egress": the direction in which the security + // group rule is applied. + Direction RuleDirection `json:"direction" required:"true"` + + // String description of each rule, optional + Description string `json:"description,omitempty"` + + // Must be "IPv4" or "IPv6", and addresses represented in CIDR must match the + // ingress or egress rules. + EtherType RuleEtherType `json:"ethertype" required:"true"` + + // The security group ID to associate with this security group rule. + SecGroupID string `json:"security_group_id" required:"true"` + + // The maximum port number in the range that is matched by the security group + // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If + // the protocol is ICMP, this value must be an ICMP type. + PortRangeMax int `json:"port_range_max,omitempty"` + + // The minimum port number in the range that is matched by the security group + // rule. If the protocol is TCP or UDP, this value must be less than or equal + // to the value of the PortRangeMax attribute. If the protocol is ICMP, this + // value must be an ICMP type. + PortRangeMin int `json:"port_range_min,omitempty"` + + // The protocol that is matched by the security group rule. Valid values are + // "tcp", "udp", "icmp" or an empty string. + Protocol RuleProtocol `json:"protocol,omitempty"` + + // The remote group ID to be associated with this security group rule. You can + // specify either RemoteGroupID or RemoteIPPrefix. + RemoteGroupID string `json:"remote_group_id,omitempty"` + + // The remote IP prefix to be associated with this security group rule. You can + // specify either RemoteGroupID or RemoteIPPrefix. This attribute matches the + // specified IP prefix as the source IP address of the IP packet. + RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"` + + // TenantID is the UUID of the project who owns the Rule. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` +} + +// ToSecGroupRuleCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group_rule") +} + +// Create is an operation which adds a new security group rule and associates it +// with an existing security group (whose ID is specified in CreateOpts). +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular security group rule based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular security group rule based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go new file mode 100644 index 00000000..3bf5501d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go @@ -0,0 +1,127 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecGroupRule represents a rule to dictate the behaviour of incoming or +// outgoing traffic for a particular security group. +type SecGroupRule struct { + // The UUID for this security group rule. + ID string + + // The direction in which the security group rule is applied. The only values + // allowed are "ingress" or "egress". For a compute instance, an ingress + // security group rule is applied to incoming (ingress) traffic for that + // instance. An egress rule is applied to traffic leaving the instance. + Direction string + + // Descripton of the rule + Description string `json:"description"` + + // Must be IPv4 or IPv6, and addresses represented in CIDR must match the + // ingress or egress rules. + EtherType string `json:"ethertype"` + + // The security group ID to associate with this security group rule. + SecGroupID string `json:"security_group_id"` + + // The minimum port number in the range that is matched by the security group + // rule. If the protocol is TCP or UDP, this value must be less than or equal + // to the value of the PortRangeMax attribute. If the protocol is ICMP, this + // value must be an ICMP type. + PortRangeMin int `json:"port_range_min"` + + // The maximum port number in the range that is matched by the security group + // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If + // the protocol is ICMP, this value must be an ICMP type. + PortRangeMax int `json:"port_range_max"` + + // The protocol that is matched by the security group rule. Valid values are + // "tcp", "udp", "icmp" or an empty string. + Protocol string + + // The remote group ID to be associated with this security group rule. You + // can specify either RemoteGroupID or RemoteIPPrefix. + RemoteGroupID string `json:"remote_group_id"` + + // The remote IP prefix to be associated with this security group rule. You + // can specify either RemoteGroupID or RemoteIPPrefix . This attribute + // matches the specified IP prefix as the source IP address of the IP packet. + RemoteIPPrefix string `json:"remote_ip_prefix"` + + // TenantID is the project owner of this security group rule. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of this security group rule. + ProjectID string `json:"project_id"` +} + +// SecGroupRulePage is the page returned by a pager when traversing over a +// collection of security group rules. +type SecGroupRulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of security group rules has +// reached the end of a page and the pager seeks to traverse over a new one. In +// order to do this, it needs to construct the next page's URL. +func (r SecGroupRulePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"security_group_rules_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SecGroupRulePage struct is empty. +func (r SecGroupRulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(r) + return len(is) == 0, err +} + +// ExtractRules accepts a Page struct, specifically a SecGroupRulePage struct, +// and extracts the elements into a slice of SecGroupRule structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(r pagination.Page) ([]SecGroupRule, error) { + var s struct { + SecGroupRules []SecGroupRule `json:"security_group_rules"` + } + err := (r.(SecGroupRulePage)).ExtractInto(&s) + return s.SecGroupRules, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a security rule. +func (r commonResult) Extract() (*SecGroupRule, error) { + var s struct { + SecGroupRule *SecGroupRule `json:"security_group_rule"` + } + err := r.ExtractInto(&s) + return s.SecGroupRule, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SecGroupRule. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SecGroupRule. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go new file mode 100644 index 00000000..a5ede0e8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go @@ -0,0 +1,13 @@ +package rules + +import "github.com/gophercloud/gophercloud" + +const rootPath = "security-group-rules" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/doc.go new file mode 100644 index 00000000..2a9fe63d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/doc.go @@ -0,0 +1,72 @@ +/* +Package subnetpools provides the ability to retrieve and manage subnetpools through the Neutron API. + +Example of Listing Subnetpools + + listOpts := subnets.ListOpts{ + IPVersion: 6, + } + + allPages, err := subnetpools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allSubnetpools, err := subnetpools.ExtractSubnetPools(allPages) + if err != nil { + panic(err) + } + + for _, subnetpools := range allSubnetpools { + fmt.Printf("%+v\n", subnetpools) + } + +Example to Get a Subnetpool + + subnetPoolID = "23d5d3f7-9dfa-4f73-b72b-8b0b0063ec55" + subnetPool, err := subnetpools.Get(networkClient, subnetPoolID).Extract() + if err != nil { + panic(err) + } + +Example to Create a new Subnetpool + + subnetPoolName := "private_pool" + subnetPoolPrefixes := []string{ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + } + subnetPoolOpts := subnetpools.CreateOpts{ + Name: subnetPoolName, + Prefixes: subnetPoolPrefixes, + } + subnetPool, err := subnetpools.Create(networkClient, subnetPoolOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Subnetpool + + subnetPoolID := "099546ca-788d-41e5-a76d-17d8cd282d3e" + updateOpts := networks.UpdateOpts{ + Prefixes: []string{ + "fdf7:b13d:dead:beef::/64", + }, + MaxPrefixLen: 72, + } + + subnetPool, err := subnetpools.Update(networkClient, subnetPoolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Subnetpool + + subnetPoolID := "23d5d3f7-9dfa-4f73-b72b-8b0b0063ec55" + err := subnetpools.Delete(networkClient, subnetPoolID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package subnetpools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/requests.go new file mode 100644 index 00000000..c54813b8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/requests.go @@ -0,0 +1,225 @@ +package subnetpools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToSubnetPoolListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the Neutron API. Filtering is achieved by passing in struct field values +// that map to the subnetpool attributes you want to see returned. +// SortKey allows you to sort by a particular subnetpool attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for the pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + DefaultQuota int `q:"default_quota"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + DefaultPrefixLen int `q:"default_prefixlen"` + MinPrefixLen int `q:"min_prefixlen"` + MaxPrefixLen int `q:"max_prefixlen"` + AddressScopeID string `q:"address_scope_id"` + IPVersion int `q:"ip_version"` + Shared *bool `q:"shared"` + Description string `q:"description"` + IsDefault *bool `q:"is_default"` + RevisionNumber int `q:"revision_number"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToSubnetPoolListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSubnetPoolListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// subnetpools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only the subnetpools owned by the project +// of the user submitting the request, unless the user has the administrative role. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToSubnetPoolListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return SubnetPoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific subnetpool based on its ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSubnetPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters of a new subnetpool. +type CreateOpts struct { + // Name is the human-readable name of the subnetpool. + Name string `json:"name"` + + // DefaultQuota is the per-project quota on the prefix space + // that can be allocated from the subnetpool for project subnets. + DefaultQuota int `json:"default_quota,omitempty"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id,omitempty"` + + // Prefixes is the list of subnet prefixes to assign to the subnetpool. + // Neutron API merges adjacent prefixes and treats them as a single prefix. + // Each subnet prefix must be unique among all subnet prefixes in all subnetpools + // that are associated with the address scope. + Prefixes []string `json:"prefixes"` + + // DefaultPrefixLen is the size of the prefix to allocate when the cidr + // or prefixlen attributes are omitted when you create the subnet. + // Defaults to the MinPrefixLen. + DefaultPrefixLen int `json:"default_prefixlen,omitempty"` + + // MinPrefixLen is the smallest prefix that can be allocated from a subnetpool. + // For IPv4 subnetpools, default is 8. + // For IPv6 subnetpools, default is 64. + MinPrefixLen int `json:"min_prefixlen,omitempty"` + + // MaxPrefixLen is the maximum prefix size that can be allocated from the subnetpool. + // For IPv4 subnetpools, default is 32. + // For IPv6 subnetpools, default is 128. + MaxPrefixLen int `json:"max_prefixlen,omitempty"` + + // AddressScopeID is the Neutron address scope to assign to the subnetpool. + AddressScopeID string `json:"address_scope_id,omitempty"` + + // Shared indicates whether this network is shared across all projects. + Shared bool `json:"shared,omitempty"` + + // Description is the human-readable description for the resource. + Description string `json:"description,omitempty"` + + // IsDefault indicates if the subnetpool is default pool or not. + IsDefault bool `json:"is_default,omitempty"` +} + +// ToSubnetPoolCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToSubnetPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "subnetpool") +} + +// Create requests the creation of a new subnetpool on the server. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSubnetPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSubnetPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a network. +type UpdateOpts struct { + // Name is the human-readable name of the subnetpool. + Name string `json:"name,omitempty"` + + // DefaultQuota is the per-project quota on the prefix space + // that can be allocated from the subnetpool for project subnets. + DefaultQuota *int `json:"default_quota,omitempty"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id,omitempty"` + + // Prefixes is the list of subnet prefixes to assign to the subnetpool. + // Neutron API merges adjacent prefixes and treats them as a single prefix. + // Each subnet prefix must be unique among all subnet prefixes in all subnetpools + // that are associated with the address scope. + Prefixes []string `json:"prefixes,omitempty"` + + // DefaultPrefixLen is yhe size of the prefix to allocate when the cidr + // or prefixlen attributes are omitted when you create the subnet. + // Defaults to the MinPrefixLen. + DefaultPrefixLen int `json:"default_prefixlen,omitempty"` + + // MinPrefixLen is the smallest prefix that can be allocated from a subnetpool. + // For IPv4 subnetpools, default is 8. + // For IPv6 subnetpools, default is 64. + MinPrefixLen int `json:"min_prefixlen,omitempty"` + + // MaxPrefixLen is the maximum prefix size that can be allocated from the subnetpool. + // For IPv4 subnetpools, default is 32. + // For IPv6 subnetpools, default is 128. + MaxPrefixLen int `json:"max_prefixlen,omitempty"` + + // AddressScopeID is the Neutron address scope to assign to the subnetpool. + AddressScopeID *string `json:"address_scope_id,omitempty"` + + // Description is thehuman-readable description for the resource. + Description *string `json:"description,omitempty"` + + // IsDefault indicates if the subnetpool is default pool or not. + IsDefault *bool `json:"is_default,omitempty"` +} + +// ToSubnetPoolUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSubnetPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "subnetpool") +} + +// Update accepts a UpdateOpts struct and updates an existing subnetpool using the +// values provided. +func Update(c *gophercloud.ServiceClient, subnetPoolID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSubnetPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, subnetPoolID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete accepts a unique ID and deletes the subnetpool associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/results.go new file mode 100644 index 00000000..e97e1e60 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/results.go @@ -0,0 +1,203 @@ +package subnetpools + +import ( + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a subnetpool resource. +func (r commonResult) Extract() (*SubnetPool, error) { + var s struct { + SubnetPool *SubnetPool `json:"subnetpool"` + } + err := r.ExtractInto(&s) + return s.SubnetPool, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SubnetPool. +type GetResult struct { + commonResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SubnetPool. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a SubnetPool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// SubnetPool represents a Neutron subnetpool. +// A subnetpool is a pool of addresses from which subnets can be allocated. +type SubnetPool struct { + // ID is the id of the subnetpool. + ID string `json:"id"` + + // Name is the human-readable name of the subnetpool. + Name string `json:"name"` + + // DefaultQuota is the per-project quota on the prefix space + // that can be allocated from the subnetpool for project subnets. + DefaultQuota int `json:"default_quota"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id"` + + // CreatedAt is the time at which subnetpool has been created. + CreatedAt time.Time `json:"created_at"` + + // UpdatedAt is the time at which subnetpool has been created. + UpdatedAt time.Time `json:"updated_at"` + + // Prefixes is the list of subnet prefixes to assign to the subnetpool. + // Neutron API merges adjacent prefixes and treats them as a single prefix. + // Each subnet prefix must be unique among all subnet prefixes in all subnetpools + // that are associated with the address scope. + Prefixes []string `json:"prefixes"` + + // DefaultPrefixLen is yhe size of the prefix to allocate when the cidr + // or prefixlen attributes are omitted when you create the subnet. + // Defaults to the MinPrefixLen. + DefaultPrefixLen int `json:"-"` + + // MinPrefixLen is the smallest prefix that can be allocated from a subnetpool. + // For IPv4 subnetpools, default is 8. + // For IPv6 subnetpools, default is 64. + MinPrefixLen int `json:"-"` + + // MaxPrefixLen is the maximum prefix size that can be allocated from the subnetpool. + // For IPv4 subnetpools, default is 32. + // For IPv6 subnetpools, default is 128. + MaxPrefixLen int `json:"-"` + + // AddressScopeID is the Neutron address scope to assign to the subnetpool. + AddressScopeID string `json:"address_scope_id"` + + // IPversion is the IP protocol version. + // Valid value is 4 or 6. Default is 4. + IPversion int `json:"ip_version"` + + // Shared indicates whether this network is shared across all projects. + Shared bool `json:"shared"` + + // Description is thehuman-readable description for the resource. + Description string `json:"description"` + + // IsDefault indicates if the subnetpool is default pool or not. + IsDefault bool `json:"is_default"` + + // RevisionNumber is the revision number of the subnetpool. + RevisionNumber int `json:"revision_number"` + + // Tags optionally set via extensions/attributestags + Tags []string `json:"tags"` +} + +func (r *SubnetPool) UnmarshalJSON(b []byte) error { + type tmp SubnetPool + var s struct { + tmp + DefaultPrefixLen interface{} `json:"default_prefixlen"` + MinPrefixLen interface{} `json:"min_prefixlen"` + MaxPrefixLen interface{} `json:"max_prefixlen"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = SubnetPool(s.tmp) + + switch t := s.DefaultPrefixLen.(type) { + case string: + if r.DefaultPrefixLen, err = strconv.Atoi(t); err != nil { + return err + } + case float64: + r.DefaultPrefixLen = int(t) + default: + return fmt.Errorf("DefaultPrefixLen has unexpected type: %T", t) + } + + switch t := s.MinPrefixLen.(type) { + case string: + if r.MinPrefixLen, err = strconv.Atoi(t); err != nil { + return err + } + case float64: + r.MinPrefixLen = int(t) + default: + return fmt.Errorf("MinPrefixLen has unexpected type: %T", t) + } + + switch t := s.MaxPrefixLen.(type) { + case string: + if r.MaxPrefixLen, err = strconv.Atoi(t); err != nil { + return err + } + case float64: + r.MaxPrefixLen = int(t) + default: + return fmt.Errorf("MaxPrefixLen has unexpected type: %T", t) + } + + return nil +} + +// SubnetPoolPage stores a single page of SubnetPools from a List() API call. +type SubnetPoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of subnetpools has reached +// the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r SubnetPoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"subnetpools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty determines whether or not a SubnetPoolPage is empty. +func (r SubnetPoolPage) IsEmpty() (bool, error) { + subnetpools, err := ExtractSubnetPools(r) + return len(subnetpools) == 0, err +} + +// ExtractSubnetPools interprets the results of a single page from a List() API call, +// producing a slice of SubnetPools structs. +func ExtractSubnetPools(r pagination.Page) ([]SubnetPool, error) { + var s struct { + SubnetPools []SubnetPool `json:"subnetpools"` + } + err := (r.(SubnetPoolPage)).ExtractInto(&s) + return s.SubnetPools, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/urls.go new file mode 100644 index 00000000..a05062c9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/urls.go @@ -0,0 +1,33 @@ +package subnetpools + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "subnetpools" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/doc.go new file mode 100644 index 00000000..82496ffd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/doc.go @@ -0,0 +1,143 @@ +/* +Package trunks provides the ability to retrieve and manage trunks through the Neutron API. +Trunks allow you to multiplex multiple ports traffic on a single port. For example, you could +have a compute instance port be the parent port of a trunk and inside the VM run workloads +using other ports, without the need of plugging those ports. + +Example of a new empty Trunk creation + + iTrue := true + createOpts := trunks.CreateOpts{ + Name: "gophertrunk", + Description: "Trunk created by gophercloud", + AdminStateUp: &iTrue, + PortID: "a6f0560c-b7a8-401f-bf6e-d0a5c851ae10", + } + + trunk, err := trunks.Create(networkClient, trunkOpts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", trunk) + +Example of a new Trunk creation with 2 subports + + iTrue := true + createOpts := trunks.CreateOpts{ + Name: "gophertrunk", + Description: "Trunk created by gophercloud", + AdminStateUp: &iTrue, + PortID: "a6f0560c-b7a8-401f-bf6e-d0a5c851ae10", + Subports: []trunks.Subport{ + { + SegmentationID: 1, + SegmentationType: "vlan", + PortID: "bf4efcc0-b1c7-4674-81f0-31f58a33420a", + }, + { + SegmentationID: 10, + SegmentationType: "vlan", + PortID: "2cf671b9-02b3-4121-9e85-e0af3548d112", + }, + }, + } + + trunk, err := trunks.Create(client, trunkOpts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", trunk) + +Example of deleting a Trunk + + trunkID := "c36e7f2e-0c53-4742-8696-aee77c9df159" + err := trunks.Delete(networkClient, trunkID).ExtractErr() + if err != nil { + panic(err) + } + +Example of listing Trunks + + listOpts := trunks.ListOpts{} + allPages, err := trunks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + allTrunks, err := trunks.ExtractTrunks(allPages) + if err != nil { + panic(err) + } + for _, trunk := range allTrunks { + fmt.Printf("%+v\n", trunk) + } + +Example of getting a Trunk + + trunkID = "52d8d124-3dc9-4563-9fef-bad3187ecf2d" + trunk, err := trunks.Get(networkClient, trunkID).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", trunk) + +Example of updating a Trunk + + trunkID := "c36e7f2e-0c53-4742-8696-aee77c9df159" + subports, err := trunks.GetSubports(client, trunkID).Extract() + iFalse := false + updateOpts := trunks.UpdateOpts{ + AdminStateUp: &iFalse, + Name: "updated_gophertrunk", + Description: "trunk updated by gophercloud", + } + trunk, err = trunks.Update(networkClient, trunkID, updateOpts).Extract() + if err != nil { + log.Fatal(err) + } + fmt.Printf("%+v\n", trunk) + +Example of showing subports of a Trunk + + trunkID := "c36e7f2e-0c53-4742-8696-aee77c9df159" + subports, err := trunks.GetSubports(client, trunkID).Extract() + fmt.Printf("%+v\n", subports) + +Example of adding two subports to a Trunk + + trunkID := "c36e7f2e-0c53-4742-8696-aee77c9df159" + addSubportsOpts := trunks.AddSubportsOpts{ + Subports: []trunks.Subport{ + { + SegmentationID: 1, + SegmentationType: "vlan", + PortID: "bf4efcc0-b1c7-4674-81f0-31f58a33420a", + }, + { + SegmentationID: 10, + SegmentationType: "vlan", + PortID: "2cf671b9-02b3-4121-9e85-e0af3548d112", + }, + }, + } + trunk, err := trunks.AddSubports(client, trunkID, addSubportsOpts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", trunk) + +Example of deleting two subports from a Trunk + + trunkID := "c36e7f2e-0c53-4742-8696-aee77c9df159" + removeSubportsOpts := trunks.RemoveSubportsOpts{ + Subports: []trunks.RemoveSubport{ + {PortID: "bf4efcc0-b1c7-4674-81f0-31f58a33420a"}, + {PortID: "2cf671b9-02b3-4121-9e85-e0af3548d112"}, + }, + } + trunk, err := trunks.RemoveSubports(networkClient, trunkID, removeSubportsOpts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", trunk) +*/ +package trunks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/requests.go new file mode 100644 index 00000000..447a0d41 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/requests.go @@ -0,0 +1,195 @@ +package trunks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToTrunkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new trunk. +type CreateOpts struct { + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + PortID string `json:"port_id" required:"true"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Subports []Subport `json:"sub_ports"` +} + +// ToTrunkCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToTrunkCreateMap() (map[string]interface{}, error) { + if opts.Subports == nil { + opts.Subports = []Subport{} + } + return gophercloud.BuildRequestBody(opts, "trunk") +} + +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + body, err := opts.ToTrunkCreateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = c.Post(createURL(c), body, &r.Body, nil) + return +} + +// Delete accepts a unique ID and deletes the trunk associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToTrunkListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the trunk attributes you want to see returned. SortKey allows you to sort +// by a particular trunk attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + AdminStateUp *bool `q:"admin_state_up"` + Description string `q:"description"` + ID string `q:"id"` + Name string `q:"name"` + PortID string `q:"port_id"` + RevisionNumber string `q:"revision_number"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + SortDir string `q:"sort_dir"` + SortKey string `q:"sort_key"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToTrunkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToTrunkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// trunks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those trunks that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToTrunkListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return TrunkPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific trunk based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +type UpdateOptsBuilder interface { + ToTrunkUpdateMap() (map[string]interface{}, error) +} + +type UpdateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` +} + +func (opts UpdateOpts) ToTrunkUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "trunk") +} + +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + body, err := opts.ToTrunkUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, id), body, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +func GetSubports(c *gophercloud.ServiceClient, id string) (r GetSubportsResult) { + _, r.Err = c.Get(getSubportsURL(c, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +type AddSubportsOpts struct { + Subports []Subport `json:"sub_ports" required:"true"` +} + +type AddSubportsOptsBuilder interface { + ToTrunkAddSubportsMap() (map[string]interface{}, error) +} + +func (opts AddSubportsOpts) ToTrunkAddSubportsMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +func AddSubports(c *gophercloud.ServiceClient, id string, opts AddSubportsOptsBuilder) (r UpdateSubportsResult) { + body, err := opts.ToTrunkAddSubportsMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(addSubportsURL(c, id), body, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +type RemoveSubport struct { + PortID string `json:"port_id" required:"true"` +} + +type RemoveSubportsOpts struct { + Subports []RemoveSubport `json:"sub_ports"` +} + +type RemoveSubportsOptsBuilder interface { + ToTrunkRemoveSubportsMap() (map[string]interface{}, error) +} + +func (opts RemoveSubportsOpts) ToTrunkRemoveSubportsMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +func RemoveSubports(c *gophercloud.ServiceClient, id string, opts RemoveSubportsOptsBuilder) (r UpdateSubportsResult) { + body, err := opts.ToTrunkRemoveSubportsMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(removeSubportsURL(c, id), body, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/results.go new file mode 100644 index 00000000..6d979ef7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/results.go @@ -0,0 +1,137 @@ +package trunks + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type Subport struct { + SegmentationID int `json:"segmentation_id" required:"true"` + SegmentationType string `json:"segmentation_type" required:"true"` + PortID string `json:"port_id" required:"true"` +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Trunk. +type CreateResult struct { + commonResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Trunk. +type GetResult struct { + commonResult +} + +// UpdateResult is the result of an Update request. Call its Extract method to +// interpret it as a Trunk. +type UpdateResult struct { + commonResult +} + +// GetSubportsResult is the result of a Get request on the trunks subports +// resource. Call its Extract method to interpret it as a slice of Subport. +type GetSubportsResult struct { + commonResult +} + +// UpdateSubportsResult is the result of either an AddSubports or a RemoveSubports +// request. Call its Extract method to interpret it as a Trunk. +type UpdateSubportsResult struct { + commonResult +} + +type Trunk struct { + // Indicates whether the trunk is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', 'DEGRADED' or `ERROR'. + Status string `json:"status"` + + // A list of ports associated with the trunk + Subports []Subport `json:"sub_ports"` + + // Human-readable name for the trunk. Might not be unique. + Name string `json:"name,omitempty"` + + // The administrative state of the trunk. If false (down), the trunk does not + // forward packets. + AdminStateUp bool `json:"admin_state_up,omitempty"` + + // ProjectID is the project owner of the trunk. + ProjectID string `json:"project_id"` + + // TenantID is the project owner of the trunk. + TenantID string `json:"tenant_id"` + + // The date and time when the resource was created. + CreatedAt time.Time `json:"created_at"` + + // The date and time when the resource was updated, + // if the resource has not been updated, this field will show as null. + UpdatedAt time.Time `json:"updated_at"` + + RevisionNumber int `json:"revision_number"` + + // UUID of the trunk's parent port + PortID string `json:"port_id"` + + // UUID for the trunk resource + ID string `json:"id"` + + // Display description. + Description string `json:"description"` + + // A list of tags associated with the trunk + Tags []string `json:"tags,omitempty"` +} + +func (r commonResult) Extract() (*Trunk, error) { + var s struct { + Trunk *Trunk `json:"trunk"` + } + err := r.ExtractInto(&s) + return s.Trunk, err +} + +// TrunkPage is the page returned by a pager when traversing a collection of +// trunk resources. +type TrunkPage struct { + pagination.LinkedPageBase +} + +func (page TrunkPage) IsEmpty() (bool, error) { + trunks, err := ExtractTrunks(page) + return len(trunks) == 0, err +} + +func ExtractTrunks(page pagination.Page) ([]Trunk, error) { + var a struct { + Trunks []Trunk `json:"trunks"` + } + err := (page.(TrunkPage)).ExtractInto(&a) + return a.Trunks, err +} + +func (r GetSubportsResult) Extract() ([]Subport, error) { + var s struct { + Subports []Subport `json:"sub_ports"` + } + err := r.ExtractInto(&s) + return s.Subports, err +} + +func (r UpdateSubportsResult) Extract() (t *Trunk, err error) { + err = r.ExtractInto(&t) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/urls.go new file mode 100644 index 00000000..ac7dff09 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/trunks/urls.go @@ -0,0 +1,45 @@ +package trunks + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "trunks" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func getSubportsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "get_subports") +} + +func addSubportsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "add_subports") +} + +func removeSubportsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "remove_subports") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/doc.go new file mode 100644 index 00000000..a309a966 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/doc.go @@ -0,0 +1,97 @@ +/* +Package vlantransparent provides the ability to retrieve and manage networks +with the vlan-transparent extension through the Neutron API. + +Example of Listing Networks with the vlan-transparent extension + + iTrue := true + networkListOpts := networks.ListOpts{} + listOpts := vlantransparent.ListOptsExt{ + ListOptsBuilder: networkListOpts, + VLANTransparent: &iTrue, + } + + type NetworkWithVLANTransparentExt struct { + networks.Network + vlantransparent.NetworkVLANTransparentExt + } + + var allNetworks []NetworkWithVLANTransparentExt + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Println("%+v\n", network) + } + +Example of Getting a Network with the vlan-transparent extension + + var network struct { + networks.Network + vlantransparent.TransparentExt + } + + err := networks.Get(networkClient, "db193ab3-96e3-4cb3-8fc5-05f4296d0324").ExtractInto(&network) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", network) + +Example of Creating Network with the vlan-transparent extension + + iTrue := true + networkCreateOpts := networks.CreateOpts{ + Name: "private", + } + + createOpts := vlantransparent.CreateOptsExt{ + CreateOptsBuilder: &networkCreateOpts, + VLANTransparent: &iTrue, + } + + var network struct { + networks.Network + vlantransparent.TransparentExt + } + + err := networks.Create(networkClient, createOpts).ExtractInto(&network) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", network) + +Example of Updating Network with the vlan-transparent extension + + iFalse := false + networkUpdateOpts := networks.UpdateOpts{ + Name: "new_network_name", + } + + updateOpts := vlantransparent.UpdateOptsExt{ + UpdateOptsBuilder: &networkUpdateOpts, + VLANTransparent: &iFalse, + } + + var network struct { + networks.Network + vlantransparent.TransparentExt + } + + err := networks.Update(networkClient, updateOpts).ExtractInto(&network) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", network) +*/ +package vlantransparent diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/requests.go new file mode 100644 index 00000000..65504cf3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/requests.go @@ -0,0 +1,84 @@ +package vlantransparent + +import ( + "net/url" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +// ListOptsExt adds the vlan-transparent network options to the base ListOpts. +type ListOptsExt struct { + networks.ListOptsBuilder + VLANTransparent *bool `q:"vlan_transparent"` +} + +// ToNetworkListQuery adds the vlan_transparent option to the base network +// list options. +func (opts ListOptsExt) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts.ListOptsBuilder) + if err != nil { + return "", err + } + + params := q.Query() + if opts.VLANTransparent != nil { + v := strconv.FormatBool(*opts.VLANTransparent) + params.Add("vlan_transparent", v) + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// CreateOptsExt is the structure used when creating new vlan-transparent +// network resources. It embeds networks.CreateOpts and so inherits all of its +// required and optional fields, with the addition of the VLANTransparent field. +type CreateOptsExt struct { + networks.CreateOptsBuilder + VLANTransparent *bool `json:"vlan_transparent,omitempty"` +} + +// ToNetworkCreateMap adds the vlan_transparent option to the base network +// creation options. +func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + if opts.VLANTransparent == nil { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["vlan_transparent"] = opts.VLANTransparent + + return base, nil +} + +// UpdateOptsExt is the structure used when updating existing vlan-transparent +// network resources. It embeds networks.UpdateOpts and so inherits all of its +// required and optional fields, with the addition of the VLANTransparent field. +type UpdateOptsExt struct { + networks.UpdateOptsBuilder + VLANTransparent *bool `json:"vlan_transparent,omitempty"` +} + +// ToNetworkUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + if opts.VLANTransparent == nil { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["vlan_transparent"] = opts.VLANTransparent + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/results.go new file mode 100644 index 00000000..62eae209 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vlantransparent/results.go @@ -0,0 +1,8 @@ +package vlantransparent + +// TransparentExt represents a decorated form of a network with +// "vlan-transparent" extension attributes. +type TransparentExt struct { + // VLANTransparent whether the network is a VLAN transparent network or not. + VLANTransparent bool `json:"vlan_transparent"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/doc.go new file mode 100644 index 00000000..5f49bd1d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/doc.go @@ -0,0 +1,58 @@ +/* +Package endpointgroups allows management of endpoint groups in the Openstack Network Service + +Example to create an Endpoint Group + + createOpts := endpointgroups.CreateOpts{ + Name: groupName, + Type: endpointgroups.TypeCIDR, + Endpoints: []string{ + "10.2.0.0/24", + "10.3.0.0/24", + }, + } + group, err := endpointgroups.Create(client, createOpts).Extract() + if err != nil { + return group, err + } + +Example to retrieve an Endpoint Group + + group, err := endpointgroups.Get(client, "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a").Extract() + if err != nil { + panic(err) + } + +Example to Delete an Endpoint Group + + err := endpointgroups.Delete(client, "5291b189-fd84-46e5-84bd-78f40c05d69c").ExtractErr() + if err != nil { + panic(err) + } + +Example to List Endpoint groups + + allPages, err := endpointgroups.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := endpointgroups.ExtractEndpointGroups(allPages) + if err != nil { + panic(err) + } + +Example to Update an endpoint group + + name := "updatedname" + description := "updated description" + updateOpts := endpointgroups.UpdateOpts{ + Name: &name, + Description: &description, + } + updatedPolicy, err := endpointgroups.Update(client, "5c561d9d-eaea-45f6-ae3e-08d1a7080828", updateOpts).Extract() + if err != nil { + panic(err) + } +*/ +package endpointgroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/requests.go new file mode 100644 index 00000000..c12d0a80 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/requests.go @@ -0,0 +1,144 @@ +package endpointgroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type EndpointType string + +const ( + TypeSubnet EndpointType = "subnet" + TypeCIDR EndpointType = "cidr" + TypeVLAN EndpointType = "vlan" + TypeNetwork EndpointType = "network" + TypeRouter EndpointType = "router" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToEndpointGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new endpoint group +type CreateOpts struct { + // TenantID specifies a tenant to own the endpoint group. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + + // Description is the human readable description of the endpoint group. + Description string `json:"description,omitempty"` + + // Name is the human readable name of the endpoint group. + Name string `json:"name,omitempty"` + + // The type of the endpoints in the group. + // A valid value is subnet, cidr, network, router, or vlan. + Type EndpointType `json:"type,omitempty"` + + // List of endpoints of the same type, for the endpoint group. + // The values will depend on the type. + Endpoints []string `json:"endpoints"` +} + +// ToEndpointGroupCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToEndpointGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "endpoint_group") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// endpoint group. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToEndpointGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular endpoint group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToEndpointGroupListQuery() (string, error) +} + +// ListOpts allows the filtering of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Endpoint group attributes you want to see returned. +type ListOpts struct { + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Description string `q:"description"` + Name string `q:"name"` + Type string `q:"type"` +} + +// ToEndpointGroupListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToEndpointGroupListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// Endpoint groups. It accepts a ListOpts struct, which allows you to filter +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToEndpointGroupListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return EndpointGroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete will permanently delete a particular endpoint group based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToEndpointGroupUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating an endpoint group. +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ToEndpointGroupUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToEndpointGroupUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "endpoint_group") +} + +// Update allows endpoint groups to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToEndpointGroupUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/results.go new file mode 100644 index 00000000..822b7000 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/results.go @@ -0,0 +1,104 @@ +package endpointgroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// EndpointGroup is an endpoint group. +type EndpointGroup struct { + // TenantID specifies a tenant to own the endpoint group. + TenantID string `json:"tenant_id"` + + // TenantID specifies a tenant to own the endpoint group. + ProjectID string `json:"project_id"` + + // Description is the human readable description of the endpoint group. + Description string `json:"description"` + + // Name is the human readable name of the endpoint group. + Name string `json:"name"` + + // Type is the type of the endpoints in the group. + Type string `json:"type"` + + // Endpoints is a list of endpoints. + Endpoints []string `json:"endpoints"` + + // ID is the id of the endpoint group + ID string `json:"id"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an endpoint group. +func (r commonResult) Extract() (*EndpointGroup, error) { + var s struct { + Service *EndpointGroup `json:"endpoint_group"` + } + err := r.ExtractInto(&s) + return s.Service, err +} + +// EndpointGroupPage is the page returned by a pager when traversing over a +// collection of Policies. +type EndpointGroupPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of Endpoint groups has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r EndpointGroupPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"endpoint_groups_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether an EndpointGroupPage struct is empty. +func (r EndpointGroupPage) IsEmpty() (bool, error) { + is, err := ExtractEndpointGroups(r) + return len(is) == 0, err +} + +// ExtractEndpointGroups accepts a Page struct, specifically an EndpointGroupPage struct, +// and extracts the elements into a slice of Endpoint group structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractEndpointGroups(r pagination.Page) ([]EndpointGroup, error) { + var s struct { + EndpointGroups []EndpointGroup `json:"endpoint_groups"` + } + err := (r.(EndpointGroupPage)).ExtractInto(&s) + return s.EndpointGroups, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as an endpoint group. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as an EndpointGroup. +type GetResult struct { + commonResult +} + +// DeleteResult represents the results of a Delete operation. Call its ExtractErr method +// to determine whether the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. Call its Extract method +// to interpret it as an EndpointGroup. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/urls.go new file mode 100644 index 00000000..9e83563c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/urls.go @@ -0,0 +1,16 @@ +package endpointgroups + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "endpoint-groups" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/doc.go new file mode 100644 index 00000000..ee44279a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/doc.go @@ -0,0 +1,64 @@ +/* +Package ikepolicies allows management and retrieval of IKE policies in the +OpenStack Networking Service. + + +Example to Create an IKE policy + + createOpts := ikepolicies.CreateOpts{ + Name: "ikepolicy1", + Description: "Description of ikepolicy1", + EncryptionAlgorithm: ikepolicies.EncryptionAlgorithm3DES, + PFS: ikepolicies.PFSGroup5, + } + + policy, err := ikepolicies.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Show the details of a specific IKE policy by ID + + policy, err := ikepolicies.Get(client, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c").Extract() + if err != nil { + panic(err) + } + + +Example to Delete a Policy + + err := ikepolicies.Delete(client, "5291b189-fd84-46e5-84bd-78f40c05d69c").ExtractErr() + if err != nil { + panic(err) + +Example to Update an IKE policy + + name := "updatedname" + description := "updated policy" + updateOpts := ikepolicies.UpdateOpts{ + Name: &name, + Description: &description, + Lifetime: &ikepolicies.LifetimeUpdateOpts{ + Value: 7000, + }, + } + updatedPolicy, err := ikepolicies.Update(client, "5c561d9d-eaea-45f6-ae3e-08d1a7080828", updateOpts).Extract() + if err != nil { + panic(err) + } + + +Example to List IKE policies + + allPages, err := ikepolicies.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := ikepolicies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + +*/ +package ikepolicies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/requests.go new file mode 100644 index 00000000..6b084ff6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/requests.go @@ -0,0 +1,209 @@ +package ikepolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type AuthAlgorithm string +type EncryptionAlgorithm string +type PFS string +type Unit string +type IKEVersion string +type Phase1NegotiationMode string + +const ( + AuthAlgorithmSHA1 AuthAlgorithm = "sha1" + AuthAlgorithmSHA256 AuthAlgorithm = "sha256" + AuthAlgorithmSHA384 AuthAlgorithm = "sha384" + AuthAlgorithmSHA512 AuthAlgorithm = "sha512" + EncryptionAlgorithm3DES EncryptionAlgorithm = "3des" + EncryptionAlgorithmAES128 EncryptionAlgorithm = "aes-128" + EncryptionAlgorithmAES256 EncryptionAlgorithm = "aes-256" + EncryptionAlgorithmAES192 EncryptionAlgorithm = "aes-192" + UnitSeconds Unit = "seconds" + UnitKilobytes Unit = "kilobytes" + PFSGroup2 PFS = "group2" + PFSGroup5 PFS = "group5" + PFSGroup14 PFS = "group14" + IKEVersionv1 IKEVersion = "v1" + IKEVersionv2 IKEVersion = "v2" + Phase1NegotiationModeMain Phase1NegotiationMode = "main" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new IKE policy +type CreateOpts struct { + // TenantID specifies a tenant to own the IKE policy. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + + // Description is the human readable description of the policy. + Description string `json:"description,omitempty"` + + // Name is the human readable name of the policy. + // Does not have to be unique. + Name string `json:"name,omitempty"` + + // AuthAlgorithm is the authentication hash algorithm. + // Valid values are sha1, sha256, sha384, sha512. + // The default is sha1. + AuthAlgorithm AuthAlgorithm `json:"auth_algorithm,omitempty"` + + // EncryptionAlgorithm is the encryption algorithm. + // A valid value is 3des, aes-128, aes-192, aes-256, and so on. + // Default is aes-128. + EncryptionAlgorithm EncryptionAlgorithm `json:"encryption_algorithm,omitempty"` + + // PFS is the Perfect forward secrecy mode. + // A valid value is Group2, Group5, Group14, and so on. + // Default is Group5. + PFS PFS `json:"pfs,omitempty"` + + // The IKE mode. + // A valid value is main, which is the default. + Phase1NegotiationMode Phase1NegotiationMode `json:"phase1_negotiation_mode,omitempty"` + + // The IKE version. + // A valid value is v1 or v2. + // Default is v1. + IKEVersion IKEVersion `json:"ike_version,omitempty"` + + //Lifetime is the lifetime of the security association + Lifetime *LifetimeCreateOpts `json:"lifetime,omitempty"` +} + +// The lifetime consists of a unit and integer value +// You can omit either the unit or value portion of the lifetime +type LifetimeCreateOpts struct { + // Units is the units for the lifetime of the security association + // Default unit is seconds + Units Unit `json:"units,omitempty"` + + // The lifetime value. + // Must be a positive integer. + // Default value is 3600. + Value int `json:"value,omitempty"` +} + +// ToPolicyCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ikepolicy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// IKE policy +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular IKE policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular IKE policy based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the IKE policy attributes you want to see returned. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + ProjectID string `q:"project_id"` + AuthAlgorithm string `q:"auth_algorithm"` + EncapsulationMode string `q:"encapsulation_mode"` + EncryptionAlgorithm string `q:"encryption_algorithm"` + PFS string `q:"pfs"` + Phase1NegotiationMode string `q:"phase_1_negotiation_mode"` + IKEVersion string `q:"ike_version"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// IKE policies. It accepts a ListOpts struct, which allows you to filter +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPolicyUpdateMap() (map[string]interface{}, error) +} + +type LifetimeUpdateOpts struct { + Units Unit `json:"units,omitempty"` + Value int `json:"value,omitempty"` +} + +// UpdateOpts contains the values used when updating an IKE policy +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` + AuthAlgorithm AuthAlgorithm `json:"auth_algorithm,omitempty"` + EncryptionAlgorithm EncryptionAlgorithm `json:"encryption_algorithm,omitempty"` + PFS PFS `json:"pfs,omitempty"` + Lifetime *LifetimeUpdateOpts `json:"lifetime,omitempty"` + Phase1NegotiationMode Phase1NegotiationMode `json:"phase_1_negotiation_mode,omitempty"` + IKEVersion IKEVersion `json:"ike_version,omitempty"` +} + +// ToPolicyUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ikepolicy") +} + +// Update allows IKE policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/results.go new file mode 100644 index 00000000..b825f575 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/results.go @@ -0,0 +1,125 @@ +package ikepolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy is an IKE Policy +type Policy struct { + // TenantID is the ID of the project + TenantID string `json:"tenant_id"` + + // ProjectID is the ID of the project + ProjectID string `json:"project_id"` + + // Description is the human readable description of the policy + Description string `json:"description"` + + // Name is the human readable name of the policy + Name string `json:"name"` + + // AuthAlgorithm is the authentication hash algorithm + AuthAlgorithm string `json:"auth_algorithm"` + + // EncryptionAlgorithm is the encryption algorithm + EncryptionAlgorithm string `json:"encryption_algorithm"` + + // PFS is the Perfect forward secrecy (PFS) mode + PFS string `json:"pfs"` + + // Lifetime is the lifetime of the security association + Lifetime Lifetime `json:"lifetime"` + + // ID is the ID of the policy + ID string `json:"id"` + + // Phase1NegotiationMode is the IKE mode + Phase1NegotiationMode string `json:"phase1_negotiation_mode"` + + // IKEVersion is the IKE version. + IKEVersion string `json:"ike_version"` +} + +type commonResult struct { + gophercloud.Result +} +type Lifetime struct { + // Units is the unit for the lifetime + // Default is seconds + Units string `json:"units"` + + // Value is the lifetime + // Default is 3600 + Value int `json:"value"` +} + +// Extract is a function that accepts a result and extracts an IKE Policy. +func (r commonResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"ikepolicy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} + +// PolicyPage is the page returned by a pager when traversing over a +// collection of Policies. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of IKE policies has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ikepolicies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PolicyPage struct is empty. +func (r PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(r) + return len(is) == 0, err +} + +// ExtractPolicies accepts a Page struct, specifically a Policy struct, +// and extracts the elements into a slice of Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"ikepolicies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} + +// CreateResult represents the result of a Create operation. Call its Extract method to +// interpret it as a Policy. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract method to +// interpret it as a Policy. +type GetResult struct { + commonResult +} + +// DeleteResult represents the results of a Delete operation. Call its ExtractErr method +// to determine whether the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. Call its Extract method +// to interpret it as a Policy. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/urls.go new file mode 100644 index 00000000..a364a881 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/urls.go @@ -0,0 +1,16 @@ +package ikepolicies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "ikepolicies" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/doc.go new file mode 100644 index 00000000..91d5451a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/doc.go @@ -0,0 +1,56 @@ +/* +Package ipsecpolicies allows management and retrieval of IPSec Policies in the +OpenStack Networking Service. + +Example to Create a Policy + + createOpts := ipsecpolicies.CreateOpts{ + Name: "IPSecPolicy_1", + } + + policy, err := policies.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Policy + + err := ipsecpolicies.Delete(client, "5291b189-fd84-46e5-84bd-78f40c05d69c").ExtractErr() + if err != nil { + panic(err) + } + +Example to Show the details of a specific IPSec policy by ID + + policy, err := ipsecpolicies.Get(client, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c").Extract() + if err != nil { + panic(err) + } + +Example to Update an IPSec policy + + name := "updatedname" + description := "updated policy" + updateOpts := ipsecpolicies.UpdateOpts{ + Name: &name, + Description: &description, + } + updatedPolicy, err := ipsecpolicies.Update(client, "5c561d9d-eaea-45f6-ae3e-08d1a7080828", updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to List IPSec policies + + allPages, err := ipsecpolicies.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := ipsecpolicies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + +*/ +package ipsecpolicies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/requests.go new file mode 100644 index 00000000..9496365c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/requests.go @@ -0,0 +1,211 @@ +package ipsecpolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type TransformProtocol string +type AuthAlgorithm string +type EncapsulationMode string +type EncryptionAlgorithm string +type PFS string +type Unit string + +const ( + TransformProtocolESP TransformProtocol = "esp" + TransformProtocolAH TransformProtocol = "ah" + TransformProtocolAHESP TransformProtocol = "ah-esp" + AuthAlgorithmSHA1 AuthAlgorithm = "sha1" + AuthAlgorithmSHA256 AuthAlgorithm = "sha256" + AuthAlgorithmSHA384 AuthAlgorithm = "sha384" + AuthAlgorithmSHA512 AuthAlgorithm = "sha512" + EncryptionAlgorithm3DES EncryptionAlgorithm = "3des" + EncryptionAlgorithmAES128 EncryptionAlgorithm = "aes-128" + EncryptionAlgorithmAES256 EncryptionAlgorithm = "aes-256" + EncryptionAlgorithmAES192 EncryptionAlgorithm = "aes-192" + EncapsulationModeTunnel EncapsulationMode = "tunnel" + EncapsulationModeTransport EncapsulationMode = "transport" + UnitSeconds Unit = "seconds" + UnitKilobytes Unit = "kilobytes" + PFSGroup2 PFS = "group2" + PFSGroup5 PFS = "group5" + PFSGroup14 PFS = "group14" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new IPSec policy +type CreateOpts struct { + // TenantID specifies a tenant to own the IPSec policy. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + + // Description is the human readable description of the policy. + Description string `json:"description,omitempty"` + + // Name is the human readable name of the policy. + // Does not have to be unique. + Name string `json:"name,omitempty"` + + // AuthAlgorithm is the authentication hash algorithm. + // Valid values are sha1, sha256, sha384, sha512. + // The default is sha1. + AuthAlgorithm AuthAlgorithm `json:"auth_algorithm,omitempty"` + + // EncapsulationMode is the encapsulation mode. + // A valid value is tunnel or transport. + // Default is tunnel. + EncapsulationMode EncapsulationMode `json:"encapsulation_mode,omitempty"` + + // EncryptionAlgorithm is the encryption algorithm. + // A valid value is 3des, aes-128, aes-192, aes-256, and so on. + // Default is aes-128. + EncryptionAlgorithm EncryptionAlgorithm `json:"encryption_algorithm,omitempty"` + + // PFS is the Perfect forward secrecy mode. + // A valid value is Group2, Group5, Group14, and so on. + // Default is Group5. + PFS PFS `json:"pfs,omitempty"` + + // TransformProtocol is the transform protocol. + // A valid value is ESP, AH, or AH- ESP. + // Default is ESP. + TransformProtocol TransformProtocol `json:"transform_protocol,omitempty"` + + //Lifetime is the lifetime of the security association + Lifetime *LifetimeCreateOpts `json:"lifetime,omitempty"` +} + +// The lifetime consists of a unit and integer value +// You can omit either the unit or value portion of the lifetime +type LifetimeCreateOpts struct { + // Units is the units for the lifetime of the security association + // Default unit is seconds + Units Unit `json:"units,omitempty"` + + // The lifetime value. + // Must be a positive integer. + // Default value is 3600. + Value int `json:"value,omitempty"` +} + +// ToPolicyCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ipsecpolicy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// IPSec policy +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Delete will permanently delete a particular IPSec policy based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// Get retrieves a particular IPSec policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the IPSec policy attributes you want to see returned. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + ProjectID string `q:"project_id"` + AuthAlgorithm string `q:"auth_algorithm"` + EncapsulationMode string `q:"encapsulation_mode"` + EncryptionAlgorithm string `q:"encryption_algorithm"` + PFS string `q:"pfs"` + TransformProtocol string `q:"transform_protocol"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// IPSec policies. It accepts a ListOpts struct, which allows you to filter +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPolicyUpdateMap() (map[string]interface{}, error) +} + +type LifetimeUpdateOpts struct { + Units Unit `json:"units,omitempty"` + Value int `json:"value,omitempty"` +} + +// UpdateOpts contains the values used when updating an IPSec policy +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` + AuthAlgorithm AuthAlgorithm `json:"auth_algorithm,omitempty"` + EncapsulationMode EncapsulationMode `json:"encapsulation_mode,omitempty"` + EncryptionAlgorithm EncryptionAlgorithm `json:"encryption_algorithm,omitempty"` + PFS PFS `json:"pfs,omitempty"` + TransformProtocol TransformProtocol `json:"transform_protocol,omitempty"` + Lifetime *LifetimeUpdateOpts `json:"lifetime,omitempty"` +} + +// ToPolicyUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ipsecpolicy") +} + +// Update allows IPSec policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/results.go new file mode 100644 index 00000000..eda4a1bd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/results.go @@ -0,0 +1,126 @@ +package ipsecpolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy is an IPSec Policy +type Policy struct { + // TenantID is the ID of the project + TenantID string `json:"tenant_id"` + + // ProjectID is the ID of the project + ProjectID string `json:"project_id"` + + // Description is the human readable description of the policy + Description string `json:"description"` + + // Name is the human readable name of the policy + Name string `json:"name"` + + // AuthAlgorithm is the authentication hash algorithm + AuthAlgorithm string `json:"auth_algorithm"` + + // EncapsulationMode is the encapsulation mode + EncapsulationMode string `json:"encapsulation_mode"` + + // EncryptionAlgorithm is the encryption algorithm + EncryptionAlgorithm string `json:"encryption_algorithm"` + + // PFS is the Perfect forward secrecy (PFS) mode + PFS string `json:"pfs"` + + // TransformProtocol is the transform protocol + TransformProtocol string `json:"transform_protocol"` + + // Lifetime is the lifetime of the security association + Lifetime Lifetime `json:"lifetime"` + + // ID is the ID of the policy + ID string `json:"id"` +} + +type Lifetime struct { + // Units is the unit for the lifetime + // Default is seconds + Units string `json:"units"` + + // Value is the lifetime + // Default is 3600 + Value int `json:"value"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an IPSec Policy. +func (r commonResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"ipsecpolicy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Policy. +type CreateResult struct { + commonResult +} + +// CreateResult represents the result of a delete operation. Call its ExtractErr method +// to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Policy. +type GetResult struct { + commonResult +} + +// PolicyPage is the page returned by a pager when traversing over a +// collection of Policies. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of IPSec policies has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ipsecpolicies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PolicyPage struct is empty. +func (r PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(r) + return len(is) == 0, err +} + +// ExtractPolicies accepts a Page struct, specifically a Policy struct, +// and extracts the elements into a slice of Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"ipsecpolicies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Policy. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/urls.go new file mode 100644 index 00000000..8781cc44 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/urls.go @@ -0,0 +1,16 @@ +package ipsecpolicies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "ipsecpolicies" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/doc.go new file mode 100644 index 00000000..6bd3236c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/doc.go @@ -0,0 +1,68 @@ +/* +Package services allows management and retrieval of VPN services in the +OpenStack Networking Service. + +Example to List Services + + listOpts := services.ListOpts{ + TenantID: "966b3c7d36a24facaf20b7e458bf2192", + } + + allPages, err := services.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := services.ExtractServices(allPages) + if err != nil { + panic(err) + } + + for _, service := range allServices { + fmt.Printf("%+v\n", service) + } + +Example to Create a Service + + createOpts := services.CreateOpts{ + Name: "vpnservice1", + Description: "A service", + RouterID: "2512e759-e8d7-4eea-a0af-4a85927a2e59", + AdminStateUp: gophercloud.Enabled, + } + + service, err := services.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Service + + serviceID := "38aee955-6283-4279-b091-8b9c828000ec" + + updateOpts := services.UpdateOpts{ + Description: "New Description", + } + + service, err := services.Update(networkClient, serviceID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Service + + serviceID := "38aee955-6283-4279-b091-8b9c828000ec" + err := services.Delete(networkClient, serviceID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Show the details of a specific Service by ID + + service, err := services.Get(client, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c").Extract() + if err != nil { + panic(err) + } + +*/ +package services diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/requests.go new file mode 100644 index 00000000..8d642197 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/requests.go @@ -0,0 +1,150 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServiceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new VPN service +type CreateOpts struct { + // TenantID specifies a tenant to own the VPN service. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + + // SubnetID is the ID of the subnet. + SubnetID string `json:"subnet_id,omitempty"` + + // RouterID is the ID of the router. + RouterID string `json:"router_id" required:"true"` + + // Description is the human readable description of the service. + Description string `json:"description,omitempty"` + + // AdminStateUp is the administrative state of the resource, which is up (true) or down (false). + AdminStateUp *bool `json:"admin_state_up"` + + // Name is the human readable name of the service. + Name string `json:"name,omitempty"` + + // The ID of the flavor. + FlavorID string `json:"flavor_id,omitempty"` +} + +// ToServiceCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToServiceCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vpnservice") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// VPN service. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToServiceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Delete will permanently delete a particular VPN service based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToServiceUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a VPN service +type UpdateOpts struct { + // Name is the human readable name of the service. + Name *string `json:"name,omitempty"` + + // Description is the human readable description of the service. + Description *string `json:"description,omitempty"` + + // AdminStateUp is the administrative state of the resource, which is up (true) or down (false). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToServiceUpdateMap casts aa UodateOpts struct to a map. +func (opts UpdateOpts) ToServiceUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vpnservice") +} + +// Update allows VPN services to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToServiceUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToServiceListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the VPN service attributes you want to see returned. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + SubnetID string `q:"subnet_id"` + RouterID string `q:"router_id"` + ProjectID string `q:"project_id"` + ExternalV6IP string `q:"external_v6_ip"` + ExternalV4IP string `q:"external_v4_ip"` + FlavorID string `q:"flavor_id"` +} + +// ToServiceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServiceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// VPN services. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToServiceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ServicePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a particular VPN service based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/results.go new file mode 100644 index 00000000..5e555699 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/results.go @@ -0,0 +1,121 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Service is a VPN Service +type Service struct { + // TenantID is the ID of the project. + TenantID string `json:"tenant_id"` + + // ProjectID is the ID of the project. + ProjectID string `json:"project_id"` + + // SubnetID is the ID of the subnet. + SubnetID string `json:"subnet_id"` + + // RouterID is the ID of the router. + RouterID string `json:"router_id"` + + // Description is a human-readable description for the resource. + // Default is an empty string + Description string `json:"description"` + + // AdminStateUp is the administrative state of the resource, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Name is the human readable name of the service. + Name string `json:"name"` + + // Status indicates whether IPsec VPN service is currently operational. + // Values are ACTIVE, DOWN, BUILD, ERROR, PENDING_CREATE, PENDING_UPDATE, or PENDING_DELETE. + Status string `json:"status"` + + // ID is the unique ID of the VPN service. + ID string `json:"id"` + + // ExternalV6IP is the read-only external (public) IPv6 address that is used for the VPN service. + ExternalV6IP string `json:"external_v6_ip"` + + // ExternalV4IP is the read-only external (public) IPv4 address that is used for the VPN service. + ExternalV4IP string `json:"external_v4_ip"` + + // FlavorID is the ID of the flavor. + FlavorID string `json:"flavor_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// ServicePage is the page returned by a pager when traversing over a +// collection of VPN services. +type ServicePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of VPN services has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r ServicePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"vpnservices_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a ServicePage struct is empty. +func (r ServicePage) IsEmpty() (bool, error) { + is, err := ExtractServices(r) + return len(is) == 0, err +} + +// ExtractServices accepts a Page struct, specifically a Service struct, +// and extracts the elements into a slice of Service structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractServices(r pagination.Page) ([]Service, error) { + var s struct { + Services []Service `json:"vpnservices"` + } + err := (r.(ServicePage)).ExtractInto(&s) + return s.Services, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Service. +type GetResult struct { + commonResult +} + +// Extract is a function that accepts a result and extracts a VPN service. +func (r commonResult) Extract() (*Service, error) { + var s struct { + Service *Service `json:"vpnservice"` + } + err := r.ExtractInto(&s) + return s.Service, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Service. +type CreateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a service. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/urls.go new file mode 100644 index 00000000..fe8b343f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/urls.go @@ -0,0 +1,16 @@ +package services + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "vpnservices" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/doc.go new file mode 100644 index 00000000..66befd3b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/doc.go @@ -0,0 +1,68 @@ +/* +Package siteconnections allows management and retrieval of IPSec site connections in the +OpenStack Networking Service. + + +Example to create an IPSec site connection + +createOpts := siteconnections.CreateOpts{ + Name: "Connection1", + PSK: "secret", + Initiator: siteconnections.InitiatorBiDirectional, + AdminStateUp: gophercloud.Enabled, + IPSecPolicyID: "4ab0a72e-64ef-4809-be43-c3f7e0e5239b", + PeerEPGroupID: "5f5801b1-b383-4cf0-bf61-9e85d4044b2d", + IKEPolicyID: "47a880f9-1da9-468c-b289-219c9eca78f0", + VPNServiceID: "692c1ec8-a7cd-44d9-972b-8ed3fe4cc476", + LocalEPGroupID: "498bb96a-1517-47ea-b1eb-c4a53db46a16", + PeerAddress: "172.24.4.233", + PeerID: "172.24.4.233", + MTU: 1500, + } + connection, err := siteconnections.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Show the details of a specific IPSec site connection by ID + + conn, err := siteconnections.Get(client, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c").Extract() + if err != nil { + panic(err) + } + +Example to Delete a site connection + + connID := "38aee955-6283-4279-b091-8b9c828000ec" + err := siteconnections.Delete(networkClient, connID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List site connections + + allPages, err := siteconnections.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allConnections, err := siteconnections.ExtractConnections(allPages) + if err != nil { + panic(err) + } + +Example to Update an IPSec site connection + + description := "updated connection" + name := "updatedname" + updateOpts := siteconnections.UpdateOpts{ + Name: &name, + Description: &description, + } + updatedConnection, err := siteconnections.Update(client, "5c561d9d-eaea-45f6-ae3e-08d1a7080828", updateOpts).Extract() + if err != nil { + panic(err) + } + +*/ +package siteconnections diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/requests.go new file mode 100644 index 00000000..15ce54b5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/requests.go @@ -0,0 +1,243 @@ +package siteconnections + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToConnectionCreateMap() (map[string]interface{}, error) +} +type Action string +type Initiator string + +const ( + ActionHold Action = "hold" + ActionClear Action = "clear" + ActionRestart Action = "restart" + ActionDisabled Action = "disabled" + ActionRestartByPeer Action = "restart-by-peer" + InitiatorBiDirectional Initiator = "bi-directional" + InitiatorResponseOnly Initiator = "response-only" +) + +// DPDCreateOpts contains all the values needed to create a valid configuration for Dead Peer detection protocols +type DPDCreateOpts struct { + // The dead peer detection (DPD) action. + // A valid value is clear, hold, restart, disabled, or restart-by-peer. + // Default value is hold. + Action Action `json:"action,omitempty"` + + // The dead peer detection (DPD) timeout in seconds. + // A valid value is a positive integer that is greater than the DPD interval value. + // Default is 120. + Timeout int `json:"timeout,omitempty"` + + // The dead peer detection (DPD) interval, in seconds. + // A valid value is a positive integer. + // Default is 30. + Interval int `json:"interval,omitempty"` +} + +// CreateOpts contains all the values needed to create a new IPSec site connection +type CreateOpts struct { + // The ID of the IKE policy + IKEPolicyID string `json:"ikepolicy_id"` + + // The ID of the VPN Service + VPNServiceID string `json:"vpnservice_id"` + + // The ID for the endpoint group that contains private subnets for the local side of the connection. + // You must specify this parameter with the peer_ep_group_id parameter unless + // in backward- compatible mode where peer_cidrs is provided with a subnet_id for the VPN service. + LocalEPGroupID string `json:"local_ep_group_id,omitempty"` + + // The ID of the IPsec policy. + IPSecPolicyID string `json:"ipsecpolicy_id"` + + // The peer router identity for authentication. + // A valid value is an IPv4 address, IPv6 address, e-mail address, key ID, or FQDN. + // Typically, this value matches the peer_address value. + PeerID string `json:"peer_id"` + + // The ID of the project + TenantID string `json:"tenant_id,omitempty"` + + // The ID for the endpoint group that contains private CIDRs in the form < net_address > / < prefix > + // for the peer side of the connection. + // You must specify this parameter with the local_ep_group_id parameter unless in backward-compatible mode + // where peer_cidrs is provided with a subnet_id for the VPN service. + PeerEPGroupID string `json:"peer_ep_group_id,omitempty"` + + // An ID to be used instead of the external IP address for a virtual router used in traffic between instances on different networks in east-west traffic. + // Most often, local ID would be domain name, email address, etc. + // If this is not configured then the external IP address will be used as the ID. + LocalID string `json:"local_id,omitempty"` + + // The human readable name of the connection. + // Does not have to be unique. + // Default is an empty string + Name string `json:"name,omitempty"` + + // The human readable description of the connection. + // Does not have to be unique. + // Default is an empty string + Description string `json:"description,omitempty"` + + // The peer gateway public IPv4 or IPv6 address or FQDN. + PeerAddress string `json:"peer_address"` + + // The pre-shared key. + // A valid value is any string. + PSK string `json:"psk"` + + // Indicates whether this VPN can only respond to connections or both respond to and initiate connections. + // A valid value is response-only or bi-directional. Default is bi-directional. + Initiator Initiator `json:"initiator,omitempty"` + + // Unique list of valid peer private CIDRs in the form < net_address > / < prefix > . + PeerCIDRs []string `json:"peer_cidrs,omitempty"` + + // The administrative state of the resource, which is up (true) or down (false). + // Default is false + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + // A dictionary with dead peer detection (DPD) protocol controls. + DPD *DPDCreateOpts `json:"dpd,omitempty"` + + // The maximum transmission unit (MTU) value to address fragmentation. + // Minimum value is 68 for IPv4, and 1280 for IPv6. + MTU int `json:"mtu,omitempty"` +} + +// ToConnectionCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToConnectionCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ipsec_site_connection") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// IPSec site connection. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToConnectionCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + + return +} + +// Delete will permanently delete a particular IPSec site connection based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// Get retrieves a particular IPSec site connection based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToConnectionListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the IPSec site connection attributes you want to see returned. +type ListOpts struct { + IKEPolicyID string `q:"ikepolicy_id"` + VPNServiceID string `q:"vpnservice_id"` + LocalEPGroupID string `q:"local_ep_group_id"` + IPSecPolicyID string `q:"ipsecpolicy_id"` + PeerID string `q:"peer_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + PeerEPGroupID string `q:"peer_ep_group_id"` + LocalID string `q:"local_id"` + Name string `q:"name"` + Description string `q:"description"` + PeerAddress string `q:"peer_address"` + PSK string `q:"psk"` + Initiator Initiator `q:"initiator"` + AdminStateUp *bool `q:"admin_state_up"` + MTU int `q:"mtu"` +} + +// ToConnectionListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToConnectionListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// IPSec site connections. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToConnectionListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ConnectionPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToConnectionUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating the DPD of an IPSec site connection +type DPDUpdateOpts struct { + Action Action `json:"action,omitempty"` + Timeout int `json:"timeout,omitempty"` + Interval int `json:"interval,omitempty"` +} + +// UpdateOpts contains the values used when updating an IPSec site connection +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` + LocalID string `json:"local_id,omitempty"` + PeerAddress string `json:"peer_address,omitempty"` + PeerID string `json:"peer_id,omitempty"` + PeerCIDRs []string `json:"peer_cidrs,omitempty"` + LocalEPGroupID string `json:"local_ep_group_id,omitempty"` + PeerEPGroupID string `json:"peer_ep_group_id,omitempty"` + MTU int `json:"mtu,omitempty"` + Initiator Initiator `json:"initiator,omitempty"` + PSK string `json:"psk,omitempty"` + DPD *DPDUpdateOpts `json:"dpd,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToConnectionUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToConnectionUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ipsec_site_connection") +} + +// Update allows IPSec site connections to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToConnectionUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/results.go new file mode 100644 index 00000000..3c09e4d0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/results.go @@ -0,0 +1,163 @@ +package siteconnections + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type DPD struct { + // Action is the dead peer detection (DPD) action. + Action string `json:"action"` + + // Timeout is the dead peer detection (DPD) timeout in seconds. + Timeout int `json:"timeout"` + + // Interval is the dead peer detection (DPD) interval in seconds. + Interval int `json:"interval"` +} + +// Connection is an IPSec site connection +type Connection struct { + // IKEPolicyID is the ID of the IKE policy. + IKEPolicyID string `json:"ikepolicy_id"` + + // VPNServiceID is the ID of the VPN service. + VPNServiceID string `json:"vpnservice_id"` + + // LocalEPGroupID is the ID for the endpoint group that contains private subnets for the local side of the connection. + LocalEPGroupID string `json:"local_ep_group_id"` + + // IPSecPolicyID is the ID of the IPSec policy + IPSecPolicyID string `json:"ipsecpolicy_id"` + + // PeerID is the peer router identity for authentication. + PeerID string `json:"peer_id"` + + // TenantID is the ID of the project. + TenantID string `json:"tenant_id"` + + // ProjectID is the ID of the project. + ProjectID string `json:"project_id"` + + // PeerEPGroupID is the ID for the endpoint group that contains private CIDRs in the form < net_address > / < prefix > + // for the peer side of the connection. + PeerEPGroupID string `json:"peer_ep_group_id"` + + // LocalID is an ID to be used instead of the external IP address for a virtual router used in traffic + // between instances on different networks in east-west traffic. + LocalID string `json:"local_id"` + + // Name is the human readable name of the connection. + Name string `json:"name"` + + // Description is the human readable description of the connection. + Description string `json:"description"` + + // PeerAddress is the peer gateway public IPv4 or IPv6 address or FQDN. + PeerAddress string `json:"peer_address"` + + // RouteMode is the route mode. + RouteMode string `json:"route_mode"` + + // PSK is the pre-shared key. + PSK string `json:"psk"` + + // Initiator indicates whether this VPN can only respond to connections or both respond to and initiate connections. + Initiator string `json:"initiator"` + + // PeerCIDRs is a unique list of valid peer private CIDRs in the form < net_address > / < prefix > . + PeerCIDRs []string `json:"peer_cidrs"` + + // AdminStateUp is the administrative state of the connection. + AdminStateUp bool `json:"admin_state_up"` + + // DPD is the dead peer detection (DPD) protocol controls. + DPD DPD `json:"dpd"` + + // AuthMode is the authentication mode. + AuthMode string `json:"auth_mode"` + + // MTU is the maximum transmission unit (MTU) value to address fragmentation. + MTU int `json:"mtu"` + + // Status indicates whether the IPsec connection is currently operational. + // Values are ACTIVE, DOWN, BUILD, ERROR, PENDING_CREATE, PENDING_UPDATE, or PENDING_DELETE. + Status string `json:"status"` + + // ID is the id of the connection + ID string `json:"id"` +} + +type commonResult struct { + gophercloud.Result +} + +// ConnectionPage is the page returned by a pager when traversing over a +// collection of IPSec site connections. +type ConnectionPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of IPSec site connections has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r ConnectionPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ipsec_site_connections_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a ConnectionPage struct is empty. +func (r ConnectionPage) IsEmpty() (bool, error) { + is, err := ExtractConnections(r) + return len(is) == 0, err +} + +// ExtractConnections accepts a Page struct, specifically a Connection struct, +// and extracts the elements into a slice of Connection structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractConnections(r pagination.Page) ([]Connection, error) { + var s struct { + Connections []Connection `json:"ipsec_site_connections"` + } + err := (r.(ConnectionPage)).ExtractInto(&s) + return s.Connections, err +} + +// Extract is a function that accepts a result and extracts an IPSec site connection. +func (r commonResult) Extract() (*Connection, error) { + var s struct { + Connection *Connection `json:"ipsec_site_connection"` + } + err := r.ExtractInto(&s) + return s.Connection, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Connection. +type CreateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Connection. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a connection +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/urls.go new file mode 100644 index 00000000..5c8ee9a3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/urls.go @@ -0,0 +1,16 @@ +package siteconnections + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "ipsec-site-connections" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go new file mode 100644 index 00000000..9d1dd5a7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go @@ -0,0 +1,66 @@ +/* +Package networks contains functionality for working with Neutron network +resources. A network is an isolated virtual layer-2 broadcast domain that is +typically reserved for the tenant who created it (unless you configure the +network to be shared). Tenants can create multiple networks until the +thresholds per-tenant quota is reached. + +In the v2.0 Networking API, the network is the main entity. Ports and subnets +are always associated with a network. + +Example to List Networks + + listOpts := networks.ListOpts{ + TenantID: "a99e9b4e620e4db09a2dfb6e42a01e66", + } + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allNetworks, err := networks.ExtractNetworks(allPages) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v", network) + } + +Example to Create a Network + + iTrue := true + createOpts := networks.CreateOpts{ + Name: "network_1", + AdminStateUp: &iTrue, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Network + + networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + + name := "new_name" + updateOpts := networks.UpdateOpts{ + Name: &name, + } + + network, err := networks.Update(networkClient, networkID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Network + + networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + err := networks.Delete(networkClient, networkID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package networks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go new file mode 100644 index 00000000..8006c481 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go @@ -0,0 +1,180 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToNetworkListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the network attributes you want to see returned. SortKey allows you to sort +// by a particular network attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Shared *bool `q:"shared"` + ID string `q:"id"` + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// networks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToNetworkListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific network based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToNetworkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a network. +type CreateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Shared *bool `json:"shared,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + AvailabilityZoneHints []string `json:"availability_zone_hints,omitempty"` +} + +// ToNetworkCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "network") +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. This operation does not actually require a request body, i.e. the +// CreateOpts struct argument can be empty. +// +// The tenant ID that is contained in the URI is the tenant that creates the +// network. An admin user, however, has the option of specifying another tenant +// ID in the CreateOpts struct. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToNetworkCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToNetworkUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a network. +type UpdateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToNetworkUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "network") +} + +// Update accepts a UpdateOpts struct and updates an existing network using the +// values provided. For more information, see the Create function. +func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToNetworkUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, networkID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the network associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, networkID), nil) + return +} + +// IDFromName is a convenience function that returns a network's ID, given +// its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractNetworks(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "network"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "network"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go new file mode 100644 index 00000000..f0306741 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go @@ -0,0 +1,124 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a network resource. +func (r commonResult) Extract() (*Network, error) { + var s Network + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "network") +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Network. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Network. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Network. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Network represents, well, a network. +type Network struct { + // UUID for the network + ID string `json:"id"` + + // Human-readable name for the network. Might not be unique. + Name string `json:"name"` + + // Description for the network + Description string `json:"description"` + + // The administrative state of network. If false (down), the network does not + // forward packets. + AdminStateUp bool `json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional + // values. + Status string `json:"status"` + + // Subnets associated with this network. + Subnets []string `json:"subnets"` + + // TenantID is the project owner of the network. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the network. + ProjectID string `json:"project_id"` + + // Specifies whether the network resource can be accessed by any tenant. + Shared bool `json:"shared"` + + // Availability zone hints groups network nodes that run services like DHCP, L3, FW, and others. + // Used to make network resources highly available. + AvailabilityZoneHints []string `json:"availability_zone_hints"` + + // Tags optionally set via extensions/attributestags + Tags []string `json:"tags"` +} + +// NetworkPage is the page returned by a pager when traversing over a +// collection of networks. +type NetworkPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of networks has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r NetworkPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"networks_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a NetworkPage struct is empty. +func (r NetworkPage) IsEmpty() (bool, error) { + is, err := ExtractNetworks(r) + return len(is) == 0, err +} + +// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct, +// and extracts the elements into a slice of Network structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractNetworks(r pagination.Page) ([]Network, error) { + var s []Network + err := ExtractNetworksInto(r, &s) + return s, err +} + +func ExtractNetworksInto(r pagination.Page, v interface{}) error { + return r.(NetworkPage).Result.ExtractIntoSlicePtr(v, "networks") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go new file mode 100644 index 00000000..4a8fb1dc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go @@ -0,0 +1,31 @@ +package networks + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("networks", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("networks") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go new file mode 100644 index 00000000..cfb1774f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go @@ -0,0 +1,73 @@ +/* +Package ports contains functionality for working with Neutron port resources. + +A port represents a virtual switch port on a logical network switch. Virtual +instances attach their interfaces into ports. The logical port also defines +the MAC address and the IP address(es) to be assigned to the interfaces +plugged into them. When IP addresses are associated to a port, this also +implies the port is associated with a subnet, as the IP address was taken +from the allocation pool for a specific subnet. + +Example to List Ports + + listOpts := ports.ListOpts{ + DeviceID: "b0b89efe-82f8-461d-958b-adbf80f50c7d", + } + + allPages, err := ports.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPorts, err := ports.ExtractPorts(allPages) + if err != nil { + panic(err) + } + + for _, port := range allPorts { + fmt.Printf("%+v\n", port) + } + +Example to Create a Port + + createOtps := ports.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: &[]string{"foo"}, + AllowedAddressPairs: []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + + port, err := ports.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Port + + portID := "c34bae2b-7641-49b6-bf6d-d8e473620ed8" + + updateOpts := ports.UpdateOpts{ + Name: "new_name", + SecurityGroups: &[]string{}, + } + + port, err := ports.Update(networkClient, portID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Port + + portID := "c34bae2b-7641-49b6-bf6d-d8e473620ed8" + err := ports.Delete(networkClient, portID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package ports diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go new file mode 100644 index 00000000..f5f7d761 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go @@ -0,0 +1,191 @@ +package ports + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPortListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the port attributes you want to see returned. SortKey allows you to sort +// by a particular port attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + NetworkID string `q:"network_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + DeviceOwner string `q:"device_owner"` + MACAddress string `q:"mac_address"` + ID string `q:"id"` + DeviceID string `q:"device_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToPortListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPortListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// ports. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those ports that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToPortListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PortPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific port based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPortCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new port. +type CreateOpts struct { + NetworkID string `json:"network_id" required:"true"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + MACAddress string `json:"mac_address,omitempty"` + FixedIPs interface{} `json:"fixed_ips,omitempty"` + DeviceID string `json:"device_id,omitempty"` + DeviceOwner string `json:"device_owner,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + SecurityGroups *[]string `json:"security_groups,omitempty"` + AllowedAddressPairs []AddressPair `json:"allowed_address_pairs,omitempty"` +} + +// ToPortCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToPortCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "port") +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. You must remember to provide a NetworkID value. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPortCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPortUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the attributes used when updating an existing port. +type UpdateOpts struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + FixedIPs interface{} `json:"fixed_ips,omitempty"` + DeviceID *string `json:"device_id,omitempty"` + DeviceOwner *string `json:"device_owner,omitempty"` + SecurityGroups *[]string `json:"security_groups,omitempty"` + AllowedAddressPairs *[]AddressPair `json:"allowed_address_pairs,omitempty"` +} + +// ToPortUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToPortUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "port") +} + +// Update accepts a UpdateOpts struct and updates an existing port using the +// values provided. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPortUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the port associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a port's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractPorts(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "port"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "port"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go new file mode 100644 index 00000000..3941b623 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go @@ -0,0 +1,149 @@ +package ports + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a port resource. +func (r commonResult) Extract() (*Port, error) { + var s Port + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "port") +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Port. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Port. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Port. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// IP is a sub-struct that represents an individual IP. +type IP struct { + SubnetID string `json:"subnet_id"` + IPAddress string `json:"ip_address,omitempty"` +} + +// AddressPair contains the IP Address and the MAC address. +type AddressPair struct { + IPAddress string `json:"ip_address,omitempty"` + MACAddress string `json:"mac_address,omitempty"` +} + +// Port represents a Neutron port. See package documentation for a top-level +// description of what this is. +type Port struct { + // UUID for the port. + ID string `json:"id"` + + // Network that this port is associated with. + NetworkID string `json:"network_id"` + + // Human-readable name for the port. Might not be unique. + Name string `json:"name"` + + // Describes the port. + Description string `json:"description"` + + // Administrative state of port. If false (down), port does not forward + // packets. + AdminStateUp bool `json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional + // values. + Status string `json:"status"` + + // Mac address to use on this port. + MACAddress string `json:"mac_address"` + + // Specifies IP addresses for the port thus associating the port itself with + // the subnets where the IP addresses are picked from + FixedIPs []IP `json:"fixed_ips"` + + // TenantID is the project owner of the port. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the port. + ProjectID string `json:"project_id"` + + // Identifies the entity (e.g.: dhcp agent) using this port. + DeviceOwner string `json:"device_owner"` + + // Specifies the IDs of any security groups associated with a port. + SecurityGroups []string `json:"security_groups"` + + // Identifies the device (e.g., virtual server) using this port. + DeviceID string `json:"device_id"` + + // Identifies the list of IP addresses the port will recognize/accept + AllowedAddressPairs []AddressPair `json:"allowed_address_pairs"` + + // Tags optionally set via extensions/attributestags + Tags []string `json:"tags"` +} + +// PortPage is the page returned by a pager when traversing over a collection +// of network ports. +type PortPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of ports has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PortPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ports_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PortPage struct is empty. +func (r PortPage) IsEmpty() (bool, error) { + is, err := ExtractPorts(r) + return len(is) == 0, err +} + +// ExtractPorts accepts a Page struct, specifically a PortPage struct, +// and extracts the elements into a slice of Port structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPorts(r pagination.Page) ([]Port, error) { + var s []Port + err := ExtractPortsInto(r, &s) + return s, err +} + +func ExtractPortsInto(r pagination.Page, v interface{}) error { + return r.(PortPage).Result.ExtractIntoSlicePtr(v, "ports") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go new file mode 100644 index 00000000..600d6f2f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go @@ -0,0 +1,31 @@ +package ports + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("ports", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("ports") +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go new file mode 100644 index 00000000..7d3a1b9b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go @@ -0,0 +1,135 @@ +/* +Package subnets contains functionality for working with Neutron subnet +resources. A subnet represents an IP address block that can be used to +assign IP addresses to virtual instances. Each subnet must have a CIDR and +must be associated with a network. IPs can either be selected from the whole +subnet CIDR or from allocation pools specified by the user. + +A subnet can also have a gateway, a list of DNS name servers, and host routes. +This information is pushed to instances whose interfaces are associated with +the subnet. + +Example to List Subnets + + listOpts := subnets.ListOpts{ + IPVersion: 4, + } + + allPages, err := subnets.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allSubnets, err := subnets.ExtractSubnets(allPages) + if err != nil { + panic(err) + } + + for _, subnet := range allSubnets { + fmt.Printf("%+v\n", subnet) + } + +Example to Create a Subnet With Specified Gateway + + var gatewayIP = "192.168.199.1" + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + IPVersion: 4, + CIDR: "192.168.199.0/24", + GatewayIP: &gatewayIP, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }, + DNSNameservers: []string{"foo"}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Subnet With No Gateway + + var noGateway = "" + + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + IPVersion: 4, + CIDR: "192.168.1.0/24", + GatewayIP: &noGateway, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + DNSNameservers: []string{}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Subnet With a Default Gateway + + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + IPVersion: 4, + CIDR: "192.168.1.0/24", + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + DNSNameservers: []string{}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Subnet + + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + dnsNameservers := []string{"8.8.8.8"} + name := "new_name" + + updateOpts := subnets.UpdateOpts{ + Name: &name, + DNSNameservers: &dnsNameservers, + } + + subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove a Gateway From a Subnet + + var noGateway = "" + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + + updateOpts := subnets.UpdateOpts{ + GatewayIP: &noGateway, + } + + subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Subnet + + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + err := subnets.Delete(networkClient, subnetID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package subnets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go new file mode 100644 index 00000000..3e56bf38 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go @@ -0,0 +1,269 @@ +package subnets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToSubnetListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the subnet attributes you want to see returned. SortKey allows you to sort +// by a particular subnet attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Name string `q:"name"` + Description string `q:"description"` + EnableDHCP *bool `q:"enable_dhcp"` + NetworkID string `q:"network_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + IPVersion int `q:"ip_version"` + GatewayIP string `q:"gateway_ip"` + CIDR string `q:"cidr"` + IPv6AddressMode string `q:"ipv6_address_mode"` + IPv6RAMode string `q:"ipv6_ra_mode"` + ID string `q:"id"` + SubnetPoolID string `q:"subnetpool_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + Tags string `q:"tags"` + TagsAny string `q:"tags-any"` + NotTags string `q:"not-tags"` + NotTagsAny string `q:"not-tags-any"` +} + +// ToSubnetListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSubnetListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// subnets. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those subnets that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToSubnetListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return SubnetPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific subnet based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// List request. +type CreateOptsBuilder interface { + ToSubnetCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new subnet. +type CreateOpts struct { + // NetworkID is the UUID of the network the subnet will be associated with. + NetworkID string `json:"network_id" required:"true"` + + // CIDR is the address CIDR of the subnet. + CIDR string `json:"cidr,omitempty"` + + // Name is a human-readable name of the subnet. + Name string `json:"name,omitempty"` + + // Description of the subnet. + Description string `json:"description,omitempty"` + + // The UUID of the project who owns the Subnet. Only administrative users + // can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // The UUID of the project who owns the Subnet. Only administrative users + // can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // AllocationPools are IP Address pools that will be available for DHCP. + AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` + + // GatewayIP sets gateway information for the subnet. Setting to nil will + // cause a default gateway to automatically be created. Setting to an empty + // string will cause the subnet to be created with no gateway. Setting to + // an explicit address will set that address as the gateway. + GatewayIP *string `json:"gateway_ip,omitempty"` + + // IPVersion is the IP version for the subnet. + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` + + // EnableDHCP will either enable to disable the DHCP service. + EnableDHCP *bool `json:"enable_dhcp,omitempty"` + + // DNSNameservers are the nameservers to be set via DHCP. + DNSNameservers []string `json:"dns_nameservers,omitempty"` + + // HostRoutes are any static host routes to be set via DHCP. + HostRoutes []HostRoute `json:"host_routes,omitempty"` + + // The IPv6 address modes specifies mechanisms for assigning IPv6 IP addresses. + IPv6AddressMode string `json:"ipv6_address_mode,omitempty"` + + // The IPv6 router advertisement specifies whether the networking service + // should transmit ICMPv6 packets. + IPv6RAMode string `json:"ipv6_ra_mode,omitempty"` + + // SubnetPoolID is the id of the subnet pool that subnet should be associated to. + SubnetPoolID string `json:"subnetpool_id,omitempty"` + + // Prefixlen is used when user creates a subnet from the subnetpool. It will + // overwrite the "default_prefixlen" value of the referenced subnetpool. + Prefixlen int `json:"prefixlen,omitempty"` +} + +// ToSubnetCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "subnet") + if err != nil { + return nil, err + } + + if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { + m["gateway_ip"] = nil + } + + return b, nil +} + +// Create accepts a CreateOpts struct and creates a new subnet using the values +// provided. You must remember to provide a valid NetworkID, CIDR and IP +// version. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSubnetCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSubnetUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the attributes used when updating an existing subnet. +type UpdateOpts struct { + // Name is a human-readable name of the subnet. + Name *string `json:"name,omitempty"` + + // Description of the subnet. + Description *string `json:"description,omitempty"` + + // AllocationPools are IP Address pools that will be available for DHCP. + AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` + + // GatewayIP sets gateway information for the subnet. Setting to nil will + // cause a default gateway to automatically be created. Setting to an empty + // string will cause the subnet to be created with no gateway. Setting to + // an explicit address will set that address as the gateway. + GatewayIP *string `json:"gateway_ip,omitempty"` + + // DNSNameservers are the nameservers to be set via DHCP. + DNSNameservers *[]string `json:"dns_nameservers,omitempty"` + + // HostRoutes are any static host routes to be set via DHCP. + HostRoutes *[]HostRoute `json:"host_routes,omitempty"` + + // EnableDHCP will either enable to disable the DHCP service. + EnableDHCP *bool `json:"enable_dhcp,omitempty"` +} + +// ToSubnetUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSubnetUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "subnet") + if err != nil { + return nil, err + } + + if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { + m["gateway_ip"] = nil + } + + return b, nil +} + +// Update accepts a UpdateOpts struct and updates an existing subnet using the +// values provided. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSubnetUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the subnet associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a subnet's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractSubnets(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "subnet"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "subnet"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go new file mode 100644 index 00000000..cf039701 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go @@ -0,0 +1,152 @@ +package subnets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a subnet resource. +func (r commonResult) Extract() (*Subnet, error) { + var s struct { + Subnet *Subnet `json:"subnet"` + } + err := r.ExtractInto(&s) + return s.Subnet, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Subnet. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Subnet. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Subnet. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AllocationPool represents a sub-range of cidr available for dynamic +// allocation to ports, e.g. {Start: "10.0.0.2", End: "10.0.0.254"} +type AllocationPool struct { + Start string `json:"start"` + End string `json:"end"` +} + +// HostRoute represents a route that should be used by devices with IPs from +// a subnet (not including local subnet route). +type HostRoute struct { + DestinationCIDR string `json:"destination"` + NextHop string `json:"nexthop"` +} + +// Subnet represents a subnet. See package documentation for a top-level +// description of what this is. +type Subnet struct { + // UUID representing the subnet. + ID string `json:"id"` + + // UUID of the parent network. + NetworkID string `json:"network_id"` + + // Human-readable name for the subnet. Might not be unique. + Name string `json:"name"` + + // Description for the subnet. + Description string `json:"description"` + + // IP version, either `4' or `6'. + IPVersion int `json:"ip_version"` + + // CIDR representing IP range for this subnet, based on IP version. + CIDR string `json:"cidr"` + + // Default gateway used by devices in this subnet. + GatewayIP string `json:"gateway_ip"` + + // DNS name servers used by hosts in this subnet. + DNSNameservers []string `json:"dns_nameservers"` + + // Sub-ranges of CIDR available for dynamic allocation to ports. + // See AllocationPool. + AllocationPools []AllocationPool `json:"allocation_pools"` + + // Routes that should be used by devices with IPs from this subnet + // (not including local subnet route). + HostRoutes []HostRoute `json:"host_routes"` + + // Specifies whether DHCP is enabled for this subnet or not. + EnableDHCP bool `json:"enable_dhcp"` + + // TenantID is the project owner of the subnet. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the subnet. + ProjectID string `json:"project_id"` + + // The IPv6 address modes specifies mechanisms for assigning IPv6 IP addresses. + IPv6AddressMode string `json:"ipv6_address_mode"` + + // The IPv6 router advertisement specifies whether the networking service + // should transmit ICMPv6 packets. + IPv6RAMode string `json:"ipv6_ra_mode"` + + // SubnetPoolID is the id of the subnet pool associated with the subnet. + SubnetPoolID string `json:"subnetpool_id"` + + // Tags optionally set via extensions/attributestags + Tags []string `json:"tags"` +} + +// SubnetPage is the page returned by a pager when traversing over a collection +// of subnets. +type SubnetPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of subnets has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r SubnetPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"subnets_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SubnetPage struct is empty. +func (r SubnetPage) IsEmpty() (bool, error) { + is, err := ExtractSubnets(r) + return len(is) == 0, err +} + +// ExtractSubnets accepts a Page struct, specifically a SubnetPage struct, +// and extracts the elements into a slice of Subnet structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractSubnets(r pagination.Page) ([]Subnet, error) { + var s struct { + Subnets []Subnet `json:"subnets"` + } + err := (r.(SubnetPage)).ExtractInto(&s) + return s.Subnets, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go new file mode 100644 index 00000000..7a4f2f7d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go @@ -0,0 +1,31 @@ +package subnets + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("subnets", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("subnets") +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go new file mode 100644 index 00000000..0fa1c083 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go @@ -0,0 +1,29 @@ +/* +Package accounts contains functionality for working with Object Storage +account resources. An account is the top-level resource the object storage +hierarchy: containers belong to accounts, objects belong to containers. + +Another way of thinking of an account is like a namespace for all your +resources. It is synonymous with a project or tenant in other OpenStack +services. + +Example to Get an Account + + account, err := accounts.Get(objectStorageClient, nil).Extract() + fmt.Printf("%+v\n", account) + +Example to Update an Account + + metadata := map[string]string{ + "some": "metadata", + } + + updateOpts := accounts.UpdateOpts{ + Metadata: metadata, + } + + updateResult, err := accounts.Update(objectStorageClient, updateOpts).Extract() + fmt.Printf("%+v\n", updateResult) + +*/ +package accounts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go new file mode 100644 index 00000000..452a331c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go @@ -0,0 +1,100 @@ +package accounts + +import "github.com/gophercloud/gophercloud" + +// GetOptsBuilder allows extensions to add additional headers to the Get +// request. +type GetOptsBuilder interface { + ToAccountGetMap() (map[string]string, error) +} + +// GetOpts is a structure that contains parameters for getting an account's +// metadata. +type GetOpts struct { + Newest bool `h:"X-Newest"` +} + +// ToAccountGetMap formats a GetOpts into a map[string]string of headers. +func (opts GetOpts) ToAccountGetMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Get is a function that retrieves an account's metadata. To extract just the +// custom metadata, call the ExtractMetadata method on the GetResult. To extract +// all the headers that are returned (including the metadata), call the +// Extract method on the GetResult. +func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) (r GetResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToAccountGetMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Head(getURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// UpdateOptsBuilder allows extensions to add additional headers to the Update +// request. +type UpdateOptsBuilder interface { + ToAccountUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that contains parameters for updating, creating, or +// deleting an account's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"` +} + +// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers. +func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) { + headers, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + headers["X-Account-Meta-"+k] = v + } + return headers, err +} + +// Update is a function that creates, updates, or deletes an account's metadata. +// To extract the headers returned, call the Extract method on the UpdateResult. +func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToAccountUpdateMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("POST", updateURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go new file mode 100644 index 00000000..10661e67 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go @@ -0,0 +1,180 @@ +package accounts + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" +) + +// UpdateResult is returned from a call to the Update function. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// UpdateHeader represents the headers returned in the response from an Update +// request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + TransID string `json:"X-Trans-Id"` + Date time.Time `json:"-"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// Extract will return a struct of headers returned from a call to Get. To +// obtain a map of headers, call the Extract method on the GetResult. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + BytesUsed int64 `json:"-"` + QuotaBytes *int64 `json:"-"` + ContainerCount int64 `json:"-"` + ContentLength int64 `json:"-"` + ObjectCount int64 `json:"-"` + ContentType string `json:"Content-Type"` + TransID string `json:"X-Trans-Id"` + TempURLKey string `json:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `json:"X-Account-Meta-Temp-URL-Key-2"` + Date time.Time `json:"-"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + BytesUsed string `json:"X-Account-Bytes-Used"` + QuotaBytes string `json:"X-Account-Meta-Quota-Bytes"` + ContentLength string `json:"Content-Length"` + ContainerCount string `json:"X-Account-Container-Count"` + ObjectCount string `json:"X-Account-Object-Count"` + Date string `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.BytesUsed { + case "": + r.BytesUsed = 0 + default: + r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) + if err != nil { + return err + } + } + + switch s.QuotaBytes { + case "": + r.QuotaBytes = nil + default: + v, err := strconv.ParseInt(s.QuotaBytes, 10, 64) + if err != nil { + return err + } + r.QuotaBytes = &v + } + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch s.ObjectCount { + case "": + r.ObjectCount = 0 + default: + r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) + if err != nil { + return err + } + } + + switch s.ContainerCount { + case "": + r.ContainerCount = 0 + default: + r.ContainerCount, err = strconv.ParseInt(s.ContainerCount, 10, 64) + if err != nil { + return err + } + } + + if s.Date != "" { + r.Date, err = time.Parse(time.RFC1123, s.Date) + } + + return err +} + +// GetResult is returned from a call to the Get function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metatdata associated with the account. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Account-Meta-") { + key := strings.TrimPrefix(k, "X-Account-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go new file mode 100644 index 00000000..71540b1d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go @@ -0,0 +1,11 @@ +package accounts + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func updateURL(c *gophercloud.ServiceClient) string { + return getURL(c) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go new file mode 100644 index 00000000..ffc4f052 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go @@ -0,0 +1,95 @@ +/* +Package containers contains functionality for working with Object Storage +container resources. A container serves as a logical namespace for objects +that are placed inside it - an object with the same name in two different +containers represents two different objects. + +In addition to containing objects, you can also use the container to control +access to objects by using an access control list (ACL). + +Note: When referencing the Object Storage API docs, some of the API actions +are listed under "accounts" rather than "containers". This was an intentional +design in Gophercloud to make some container actions feel more natural. + +Example to List Containers + + listOpts := containers.ListOpts{ + Full: true, + } + + allPages, err := containers.List(objectStorageClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractInfo(allPages) + if err != nil { + panic(err) + } + + for _, container := range allContainers { + fmt.Printf("%+v\n", container) + } + +Example to List Only Container Names + + listOpts := containers.ListOpts{ + Full: false, + } + + allPages, err := containers.List(objectStorageClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractNames(allPages) + if err != nil { + panic(err) + } + + for _, container := range allContainers { + fmt.Printf("%+v\n", container) + } + +Example to Create a Container + + createOpts := containers.CreateOpts{ + ContentType: "application/json", + Metadata: map[string]string{ + "foo": "bar", + }, + } + + container, err := containers.Create(objectStorageClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Container + + containerName := "my_container" + + updateOpts := containers.UpdateOpts{ + Metadata: map[string]string{ + "bar": "baz", + }, + RemoveMetadata: []string{ + "foo", + }, + } + + container, err := containers.Update(objectStorageClient, containerName, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Container + + containerName := "my_container" + + container, err := containers.Delete(objectStorageClient, containerName).Extract() + if err != nil { + panic(err) + } +*/ +package containers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go new file mode 100644 index 00000000..ca99bb2a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go @@ -0,0 +1,231 @@ +package containers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToContainerListParams() (bool, string, error) +} + +// ListOpts is a structure that holds options for listing containers. +type ListOpts struct { + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` +} + +// ToContainerListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each container. +func (opts ListOpts) ToContainerListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Full, q.String(), err +} + +// List is a function that retrieves containers associated with the account as +// well as account metadata. It returns a pager which can be iterated with the +// EachPage function. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c) + if opts != nil { + full, query, err := opts.ToContainerListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ContainerPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) + pager.Headers = headers + return pager +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToContainerCreateMap() (map[string]string, error) +} + +// CreateOpts is a structure that holds parameters for creating a container. +type CreateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + IfNoneMatch string `h:"If-None-Match"` + VersionsLocation string `h:"X-Versions-Location"` + HistoryLocation string `h:"X-History-Location"` +} + +// ToContainerCreateMap formats a CreateOpts into a map of headers. +func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Create is a function that creates a new container. +func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) (r CreateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerCreateMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("PUT", createURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + resp.Body.Close() + } + r.Err = err + return +} + +// Delete is a function that deletes a container. +func Delete(c *gophercloud.ServiceClient, containerName string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, containerName), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToContainerUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting a container's metadata. +type UpdateOpts struct { + Metadata map[string]string + RemoveMetadata []string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + RemoveVersionsLocation string `h:"X-Remove-Versions-Location"` + VersionsLocation string `h:"X-Versions-Location"` + RemoveHistoryLocation string `h:"X-Remove-History-Location"` + HistoryLocation string `h:"X-History-Location"` +} + +// ToContainerUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + + for _, k := range opts.RemoveMetadata { + h["X-Remove-Container-Meta-"+k] = "remove" + } + + return h, nil +} + +// Update is a function that creates, updates, or deletes a container's +// metadata. +func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerUpdateMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("POST", updateURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// GetOptsBuilder allows extensions to add additional parameters to the Get +// request. +type GetOptsBuilder interface { + ToContainerGetMap() (map[string]string, error) +} + +// GetOpts is a structure that holds options for listing containers. +type GetOpts struct { + Newest bool `h:"X-Newest"` +} + +// ToContainerGetMap formats a GetOpts into a map of headers. +func (opts GetOpts) ToContainerGetMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Get is a function that retrieves the metadata of a container. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName string, opts GetOptsBuilder) (r GetResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerGetMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Head(getURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go new file mode 100644 index 00000000..cce2190f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go @@ -0,0 +1,344 @@ +package containers + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Container represents a container resource. +type Container struct { + // The total number of bytes stored in the container. + Bytes int64 `json:"bytes"` + + // The total number of objects stored in the container. + Count int64 `json:"count"` + + // The name of the container. + Name string `json:"name"` +} + +// ContainerPage is the page returned by a pager when traversing over a +// collection of containers. +type ContainerPage struct { + pagination.MarkerPageBase +} + +//IsEmpty returns true if a ListResult contains no container names. +func (r ContainerPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + return len(names) == 0, err +} + +// LastMarker returns the last container name in a ListResult. +func (r ContainerPage) LastMarker() (string, error) { + names, err := ExtractNames(r) + if err != nil { + return "", err + } + if len(names) == 0 { + return "", nil + } + return names[len(names)-1], nil +} + +// ExtractInfo is a function that takes a ListResult and returns the +// containers' information. +func ExtractInfo(r pagination.Page) ([]Container, error) { + var s []Container + err := (r.(ContainerPage)).ExtractInto(&s) + return s, err +} + +// ExtractNames is a function that takes a ListResult and returns the +// containers' names. +func ExtractNames(page pagination.Page) ([]string, error) { + casted := page.(ContainerPage) + ct := casted.Header.Get("Content-Type") + + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(page) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, container := range parsed { + names = append(names, container.Name) + } + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(page.(ContainerPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + AcceptRanges string `json:"Accept-Ranges"` + BytesUsed int64 `json:"-"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + ObjectCount int64 `json:"-"` + Read []string `json:"-"` + TransID string `json:"X-Trans-Id"` + VersionsLocation string `json:"X-Versions-Location"` + HistoryLocation string `json:"X-History-Location"` + Write []string `json:"-"` + StoragePolicy string `json:"X-Storage-Policy"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + BytesUsed string `json:"X-Container-Bytes-Used"` + ContentLength string `json:"Content-Length"` + ObjectCount string `json:"X-Container-Object-Count"` + Write string `json:"X-Container-Write"` + Read string `json:"X-Container-Read"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.BytesUsed { + case "": + r.BytesUsed = 0 + default: + r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) + if err != nil { + return err + } + } + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch s.ObjectCount { + case "": + r.ObjectCount = 0 + default: + r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) + if err != nil { + return err + } + } + + r.Read = strings.Split(s.Read, ",") + r.Write = strings.Split(s.Write, ",") + + r.Date = time.Time(s.Date) + + return err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the container. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Container-Meta-") { + key := strings.TrimPrefix(k, "X-Container-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a Create +// request. +type CreateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CreateHeader) UnmarshalJSON(b []byte) error { + type tmp CreateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CreateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// CreateResult represents the result of a create operation. To extract the +// the headers from the HTTP response, call its Extract method. +type CreateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. +// To extract the headers from the HTTP response, call its Extract method. +func (r CreateResult) Extract() (*CreateHeader, error) { + var s *CreateHeader + err := r.ExtractInto(&s) + return s, err +} + +// UpdateHeader represents the headers returned in the response from a Update +// request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// UpdateResult represents the result of an update operation. To extract the +// the headers from the HTTP response, call its Extract method. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// DeleteHeader represents the headers returned in the response from a Delete +// request. +type DeleteHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DeleteHeader) UnmarshalJSON(b []byte) error { + type tmp DeleteHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DeleteHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// DeleteResult represents the result of a delete operation. To extract the +// the headers from the HTTP response, call its Extract method. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. +func (r DeleteResult) Extract() (*DeleteHeader, error) { + var s *DeleteHeader + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go new file mode 100644 index 00000000..9b380470 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go @@ -0,0 +1,23 @@ +package containers + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func createURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func getURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func deleteURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func updateURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go new file mode 100644 index 00000000..e9b4b8a9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go @@ -0,0 +1,106 @@ +/* +Package objects contains functionality for working with Object Storage +object resources. An object is a resource that represents and contains data +- such as documents, images, and so on. You can also store custom metadata +with an object. + +Note: When referencing the Object Storage API docs, some of the API actions +are listed under "containers" rather than "objects". This was an intentional +design in Gophercloud to make some object actions feel more natural. + +Example to List Objects + + containerName := "my_container" + + listOpts := objects.ListOpts{ + Full: true, + } + + allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() + if err != nil { + panic(err) + } + + allObjects, err := objects.ExtractInfo(allPages) + if err != nil { + panic(err) + } + + for _, object := range allObjects { + fmt.Printf("%+v\n", object) + } + +Example to List Object Names + + containerName := "my_container" + + listOpts := objects.ListOpts{ + Full: false, + } + + allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() + if err != nil { + panic(err) + } + + allObjects, err := objects.ExtractNames(allPages) + if err != nil { + panic(err) + } + + for _, object := range allObjects { + fmt.Printf("%+v\n", object) + } + +Example to Create an Object + + content := "some object content" + objectName := "my_object" + containerName := "my_container" + + createOpts := objects.CreateOpts{ + ContentType: "text/plain" + Content: strings.NewReader(content), + } + + object, err := objects.Create(objectStorageClient, containerName, objectName, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Copy an Object + + objectName := "my_object" + containerName := "my_container" + + copyOpts := objects.CopyOpts{ + Destination: "/newContainer/newObject", + } + + object, err := objects.Copy(objectStorageClient, containerName, objectName, copyOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Object + + objectName := "my_object" + containerName := "my_container" + + object, err := objects.Delete(objectStorageClient, containerName, objectName).Extract() + if err != nil { + panic(err) + } + +Example to Download an Object's Data + + objectName := "my_object" + containerName := "my_container" + + object := objects.Download(objectStorageClient, containerName, objectName, nil) + content, err := object.ExtractContent() + if err != nil { + panic(err) + } +*/ +package objects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go new file mode 100644 index 00000000..5c4ae44d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go @@ -0,0 +1,13 @@ +package objects + +import "github.com/gophercloud/gophercloud" + +// ErrWrongChecksum is the error when the checksum generated for an object +// doesn't match the ETAG header. +type ErrWrongChecksum struct { + gophercloud.BaseError +} + +func (e ErrWrongChecksum) Error() string { + return "Local checksum does not match API ETag header" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go new file mode 100644 index 00000000..7325cd7d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go @@ -0,0 +1,499 @@ +package objects + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToObjectListParams() (bool, string, error) +} + +// ListOpts is a structure that holds parameters for listing objects. +type ListOpts struct { + // Full is a true/false value that represents the amount of object information + // returned. If Full is set to true, then the content-type, number of bytes, + // hash date last modified, and name are returned. If set to false or not set, + // then only the object names are returned. + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` + Path string `q:"path"` +} + +// ToObjectListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each object. +func (opts ListOpts) ToObjectListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Full, q.String(), err +} + +// List is a function that retrieves all objects in a container. It also returns +// the details for the container. To extract only the object information or names, +// pass the ListResult response to the ExtractInfo or ExtractNames function, +// respectively. +func List(c *gophercloud.ServiceClient, containerName string, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c, containerName) + if opts != nil { + full, query, err := opts.ToObjectListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ObjectPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) + pager.Headers = headers + return pager +} + +// DownloadOptsBuilder allows extensions to add additional parameters to the +// Download request. +type DownloadOptsBuilder interface { + ToObjectDownloadParams() (map[string]string, string, error) +} + +// DownloadOpts is a structure that holds parameters for downloading an object. +type DownloadOpts struct { + IfMatch string `h:"If-Match"` + IfModifiedSince time.Time `h:"If-Modified-Since"` + IfNoneMatch string `h:"If-None-Match"` + IfUnmodifiedSince time.Time `h:"If-Unmodified-Since"` + Newest bool `h:"X-Newest"` + Range string `h:"Range"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectDownloadParams formats a DownloadOpts into a query string and map of +// headers. +func (opts DownloadOpts) ToObjectDownloadParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + return h, q.String(), nil +} + +// Download is a function that retrieves the content and metadata for an object. +// To extract just the content, pass the DownloadResult response to the +// ExtractContent function. +func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts DownloadOptsBuilder) (r DownloadResult) { + url := downloadURL(c, containerName, objectName) + h := make(map[string]string) + if opts != nil { + headers, query, err := opts.ToObjectDownloadParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + } + + resp, err := c.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 206, 304}, + }) + if resp != nil { + r.Header = resp.Header + r.Body = resp.Body + } + r.Err = err + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToObjectCreateParams() (io.Reader, map[string]string, string, error) +} + +// CreateOpts is a structure that holds parameters for creating an object. +type CreateOpts struct { + Content io.Reader + Metadata map[string]string + NoETag bool + CacheControl string `h:"Cache-Control"` + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentLength int64 `h:"Content-Length"` + ContentType string `h:"Content-Type"` + CopyFrom string `h:"X-Copy-From"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType string `h:"X-Detect-Content-Type"` + ETag string `h:"ETag"` + IfNoneMatch string `h:"If-None-Match"` + ObjectManifest string `h:"X-Object-Manifest"` + TransferEncoding string `h:"Transfer-Encoding"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectCreateParams formats a CreateOpts into a query string and map of +// headers. +func (opts CreateOpts) ToObjectCreateParams() (io.Reader, map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, nil, "", err + } + + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + + if opts.NoETag { + delete(h, "etag") + return opts.Content, h, q.String(), nil + } + + if h["ETag"] != "" { + return opts.Content, h, q.String(), nil + } + + // When we're dealing with big files an io.ReadSeeker allows us to efficiently calculate + // the md5 sum. An io.Reader is only readable once which means we have to copy the entire + // file content into memory first. + readSeeker, isReadSeeker := opts.Content.(io.ReadSeeker) + if !isReadSeeker { + data, err := ioutil.ReadAll(opts.Content) + if err != nil { + return nil, nil, "", err + } + readSeeker = bytes.NewReader(data) + } + + hash := md5.New() + // io.Copy into md5 is very efficient as it's done in small chunks. + if _, err := io.Copy(hash, readSeeker); err != nil { + return nil, nil, "", err + } + readSeeker.Seek(0, io.SeekStart) + + h["ETag"] = fmt.Sprintf("%x", hash.Sum(nil)) + + return readSeeker, h, q.String(), nil +} + +// Create is a function that creates a new object or replaces an existing +// object. If the returned response's ETag header fails to match the local +// checksum, the failed request will automatically be retried up to a maximum +// of 3 times. +func Create(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateOptsBuilder) (r CreateResult) { + url := createURL(c, containerName, objectName) + h := make(map[string]string) + var b io.Reader + if opts != nil { + tmpB, headers, query, err := opts.ToObjectCreateParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + b = tmpB + } + + resp, err := c.Put(url, nil, nil, &gophercloud.RequestOpts{ + RawBody: b, + MoreHeaders: h, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// CopyOptsBuilder allows extensions to add additional parameters to the +// Copy request. +type CopyOptsBuilder interface { + ToObjectCopyMap() (map[string]string, error) +} + +// CopyOpts is a structure that holds parameters for copying one object to +// another. +type CopyOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + Destination string `h:"Destination" required:"true"` +} + +// ToObjectCopyMap formats a CopyOpts into a map of headers. +func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Copy is a function that copies one object to another. +func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) (r CopyResult) { + h := make(map[string]string) + headers, err := opts.ToObjectCopyMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + + url := copyURL(c, containerName, objectName) + resp, err := c.Request("COPY", url, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToObjectDeleteQuery() (string, error) +} + +// DeleteOpts is a structure that holds parameters for deleting an object. +type DeleteOpts struct { + MultipartManifest string `q:"multipart-manifest"` +} + +// ToObjectDeleteQuery formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToObjectDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete is a function that deletes an object. +func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts DeleteOptsBuilder) (r DeleteResult) { + url := deleteURL(c, containerName, objectName) + if opts != nil { + query, err := opts.ToObjectDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + resp, err := c.Delete(url, nil) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// GetOptsBuilder allows extensions to add additional parameters to the +// Get request. +type GetOptsBuilder interface { + ToObjectGetParams() (map[string]string, string, error) +} + +// GetOpts is a structure that holds parameters for getting an object's +// metadata. +type GetOpts struct { + Newest bool `h:"X-Newest"` + Expires string `q:"expires"` + Signature string `q:"signature"` +} + +// ToObjectGetParams formats a GetOpts into a query string and a map of headers. +func (opts GetOpts) ToObjectGetParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + return h, q.String(), nil +} + +// Get is a function that retrieves the metadata of an object. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts GetOptsBuilder) (r GetResult) { + url := getURL(c, containerName, objectName) + h := make(map[string]string) + if opts != nil { + headers, query, err := opts.ToObjectGetParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + } + + resp, err := c.Head(url, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToObjectUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting an object's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType bool `h:"X-Detect-Content-Type"` +} + +// ToObjectUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToObjectUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes an object's metadata. +func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToObjectUpdateMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + url := updateURL(c, containerName, objectName) + resp, err := c.Post(url, nil, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// HTTPMethod represents an HTTP method string (e.g. "GET"). +type HTTPMethod string + +var ( + // GET represents an HTTP "GET" method. + GET HTTPMethod = "GET" + + // POST represents an HTTP "POST" method. + POST HTTPMethod = "POST" +) + +// CreateTempURLOpts are options for creating a temporary URL for an object. +type CreateTempURLOpts struct { + // (REQUIRED) Method is the HTTP method to allow for users of the temp URL. + // Valid values are "GET" and "POST". + Method HTTPMethod + + // (REQUIRED) TTL is the number of seconds the temp URL should be active. + TTL int + + // (Optional) Split is the string on which to split the object URL. Since only + // the object path is used in the hash, the object URL needs to be parsed. If + // empty, the default OpenStack URL split point will be used ("/v1/"). + Split string +} + +// CreateTempURL is a function for creating a temporary URL for an object. It +// allows users to have "GET" or "POST" access to a particular tenant's object +// for a limited amount of time. +func CreateTempURL(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateTempURLOpts) (string, error) { + if opts.Split == "" { + opts.Split = "/v1/" + } + duration := time.Duration(opts.TTL) * time.Second + expiry := time.Now().Add(duration).Unix() + getHeader, err := accounts.Get(c, nil).Extract() + if err != nil { + return "", err + } + secretKey := []byte(getHeader.TempURLKey) + url := getURL(c, containerName, objectName) + splitPath := strings.Split(url, opts.Split) + baseURL, objectPath := splitPath[0], splitPath[1] + objectPath = opts.Split + objectPath + body := fmt.Sprintf("%s\n%d\n%s", opts.Method, expiry, objectPath) + hash := hmac.New(sha1.New, secretKey) + hash.Write([]byte(body)) + hexsum := fmt.Sprintf("%x", hash.Sum(nil)) + return fmt.Sprintf("%s%s?temp_url_sig=%s&temp_url_expires=%d", baseURL, objectPath, hexsum, expiry), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go new file mode 100644 index 00000000..dd7c7044 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go @@ -0,0 +1,580 @@ +package objects + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Object is a structure that holds information related to a storage object. +type Object struct { + // Bytes is the total number of bytes that comprise the object. + Bytes int64 `json:"bytes"` + + // ContentType is the content type of the object. + ContentType string `json:"content_type"` + + // Hash represents the MD5 checksum value of the object's content. + Hash string `json:"hash"` + + // LastModified is the time the object was last modified. + LastModified time.Time `json:"-"` + + // Name is the unique name for the object. + Name string `json:"name"` + + // Subdir denotes if the result contains a subdir. + Subdir string `json:"subdir"` +} + +func (r *Object) UnmarshalJSON(b []byte) error { + type tmp Object + var s *struct { + tmp + LastModified string `json:"last_modified"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Object(s.tmp) + + if s.LastModified != "" { + t, err := time.Parse(gophercloud.RFC3339MilliNoZ, s.LastModified) + if err != nil { + t, err = time.Parse(gophercloud.RFC3339Milli, s.LastModified) + if err != nil { + return err + } + } + r.LastModified = t + } + + return nil +} + +// ObjectPage is a single page of objects that is returned from a call to the +// List function. +type ObjectPage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a ListResult contains no object names. +func (r ObjectPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + return len(names) == 0, err +} + +// LastMarker returns the last object name in a ListResult. +func (r ObjectPage) LastMarker() (string, error) { + return extractLastMarker(r) +} + +// ExtractInfo is a function that takes a page of objects and returns their +// full information. +func ExtractInfo(r pagination.Page) ([]Object, error) { + var s []Object + err := (r.(ObjectPage)).ExtractInto(&s) + return s, err +} + +// ExtractNames is a function that takes a page of objects and returns only +// their names. +func ExtractNames(r pagination.Page) ([]string, error) { + casted := r.(ObjectPage) + ct := casted.Header.Get("Content-Type") + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(r) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, object := range parsed { + if object.Subdir != "" { + names = append(names, object.Subdir) + } else { + names = append(names, object.Name) + } + } + + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(r.(ObjectPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + case strings.HasPrefix(ct, "text/html"): + return []string{}, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// DownloadHeader represents the headers returned in the response from a +// Download request. +type DownloadHeader struct { + AcceptRanges string `json:"Accept-Ranges"` + ContentDisposition string `json:"Content-Disposition"` + ContentEncoding string `json:"Content-Encoding"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + DeleteAt time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + ObjectManifest string `json:"X-Object-Manifest"` + StaticLargeObject bool `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DownloadHeader) UnmarshalJSON(b []byte) error { + type tmp DownloadHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + StaticLargeObject interface{} `json:"X-Static-Large-Object"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DownloadHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch t := s.StaticLargeObject.(type) { + case string: + if t == "True" || t == "true" { + r.StaticLargeObject = true + } + case bool: + r.StaticLargeObject = t + } + + r.Date = time.Time(s.Date) + r.DeleteAt = time.Time(s.DeleteAt) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// DownloadResult is a *http.Response that is returned from a call to the +// Download function. +type DownloadResult struct { + gophercloud.HeaderResult + Body io.ReadCloser +} + +// Extract will return a struct of headers returned from a call to Download. +func (r DownloadResult) Extract() (*DownloadHeader, error) { + var s *DownloadHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractContent is a function that takes a DownloadResult's io.Reader body +// and reads all available data into a slice of bytes. Please be aware that due +// the nature of io.Reader is forward-only - meaning that it can only be read +// once and not rewound. You can recreate a reader from the output of this +// function by using bytes.NewReader(downloadBytes) +func (r *DownloadResult) ExtractContent() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + return body, nil +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + ContentDisposition string `json:"Content-Disposition"` + ContentEncoding string `json:"Content-Encoding"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + DeleteAt time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + ObjectManifest string `json:"X-Object-Manifest"` + StaticLargeObject bool `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + StaticLargeObject interface{} `json:"X-Static-Large-Object"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch t := s.StaticLargeObject.(type) { + case string: + if t == "True" || t == "true" { + r.StaticLargeObject = true + } + case bool: + r.StaticLargeObject = t + } + + r.Date = time.Time(s.Date) + r.DeleteAt = time.Time(s.DeleteAt) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// GetResult is a *http.Response that is returned from a call to the Get +// function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the object. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Object-Meta-") { + key := strings.TrimPrefix(k, "X-Object-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a +// Create request. +type CreateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CreateHeader) UnmarshalJSON(b []byte) error { + type tmp CreateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CreateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + checksum string + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. +func (r CreateResult) Extract() (*CreateHeader, error) { + //if r.Header.Get("ETag") != fmt.Sprintf("%x", localChecksum) { + // return nil, ErrWrongChecksum{} + //} + var s *CreateHeader + err := r.ExtractInto(&s) + return s, err +} + +// UpdateHeader represents the headers returned in the response from a +// Update request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return nil +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// DeleteHeader represents the headers returned in the response from a +// Delete request. +type DeleteHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DeleteHeader) UnmarshalJSON(b []byte) error { + type tmp DeleteHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DeleteHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return nil +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. +func (r DeleteResult) Extract() (*DeleteHeader, error) { + var s *DeleteHeader + err := r.ExtractInto(&s) + return s, err +} + +// CopyHeader represents the headers returned in the response from a +// Copy request. +type CopyHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + CopiedFrom string `json:"X-Copied-From"` + CopiedFromLastModified time.Time `json:"-"` + Date time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CopyHeader) UnmarshalJSON(b []byte) error { + type tmp CopyHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + CopiedFromLastModified gophercloud.JSONRFC1123 `json:"X-Copied-From-Last-Modified"` + Date gophercloud.JSONRFC1123 `json:"Date"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CopyHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.CopiedFromLastModified = time.Time(s.CopiedFromLastModified) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// CopyResult represents the result of a copy operation. +type CopyResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Copy. +func (r CopyResult) Extract() (*CopyHeader, error) { + var s *CopyHeader + err := r.ExtractInto(&s) + return s, err +} + +// extractLastMarker is a function that takes a page of objects and returns the +// marker for the page. This can either be a subdir or the last object's name. +func extractLastMarker(r pagination.Page) (string, error) { + casted := r.(ObjectPage) + + // If a delimiter was requested, check if a subdir exists. + queryParams, err := url.ParseQuery(casted.URL.RawQuery) + if err != nil { + return "", err + } + + var delimeter bool + if v, ok := queryParams["delimiter"]; ok && len(v) > 0 { + delimeter = true + } + + ct := casted.Header.Get("Content-Type") + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(r) + if err != nil { + return "", err + } + + var lastObject Object + if len(parsed) > 0 { + lastObject = parsed[len(parsed)-1] + } + + if !delimeter { + return lastObject.Name, nil + } + + if lastObject.Name != "" { + return lastObject.Name, nil + } + + return lastObject.Subdir, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(r.(ObjectPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names[len(names)-1], err + case strings.HasPrefix(ct, "text/html"): + return "", nil + default: + return "", fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go new file mode 100644 index 00000000..b3ac304b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go @@ -0,0 +1,33 @@ +package objects + +import ( + "github.com/gophercloud/gophercloud" +) + +func listURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func copyURL(c *gophercloud.ServiceClient, container, object string) string { + return c.ServiceURL(container, object) +} + +func createURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func getURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func deleteURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func downloadURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func updateURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go new file mode 100644 index 00000000..989dc4ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go @@ -0,0 +1,16 @@ +/* +Package swauth implements Swift's built-in authentication. + +Example to Authenticate with swauth + + authOpts := swauth.AuthOpts{ + User: "project:user", + Key: "password", + } + + swiftClient, err := swauth.NewObjectStorageV1(providerClient, authOpts) + if err != nil { + panic(err) + } +*/ +package swauth diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go new file mode 100644 index 00000000..29bdcbcf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go @@ -0,0 +1,70 @@ +package swauth + +import "github.com/gophercloud/gophercloud" + +// AuthOptsBuilder describes struct types that can be accepted by the Auth call. +type AuthOptsBuilder interface { + ToAuthOptsMap() (map[string]string, error) +} + +// AuthOpts specifies an authentication request. +type AuthOpts struct { + // User is an Swauth-based username in username:tenant format. + User string `h:"X-Auth-User" required:"true"` + + // Key is a secret/password to authenticate the User with. + Key string `h:"X-Auth-Key" required:"true"` +} + +// ToAuthOptsMap formats an AuthOpts structure into a request body. +func (opts AuthOpts) ToAuthOptsMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Auth performs an authentication request for a Swauth-based user. +func Auth(c *gophercloud.ProviderClient, opts AuthOptsBuilder) (r GetAuthResult) { + h := make(map[string]string) + + if opts != nil { + headers, err := opts.ToAuthOptsMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("GET", getURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200}, + }) + + if resp != nil { + r.Header = resp.Header + } + + r.Err = err + + return r +} + +// NewObjectStorageV1 creates a Swauth-authenticated *gophercloud.ServiceClient +// client that can issue ObjectStorage-based API calls. +func NewObjectStorageV1(pc *gophercloud.ProviderClient, authOpts AuthOpts) (*gophercloud.ServiceClient, error) { + auth, err := Auth(pc, authOpts).Extract() + if err != nil { + return nil, err + } + + swiftClient := &gophercloud.ServiceClient{ + ProviderClient: pc, + Endpoint: gophercloud.NormalizeURL(auth.StorageURL), + } + + swiftClient.TokenID = auth.Token + + return swiftClient, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go new file mode 100644 index 00000000..f442f472 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go @@ -0,0 +1,27 @@ +package swauth + +import ( + "github.com/gophercloud/gophercloud" +) + +// GetAuthResult contains the response from the Auth request. Call its Extract +// method to interpret it as an AuthResult. +type GetAuthResult struct { + gophercloud.HeaderResult +} + +// AuthResult contains the authentication information from a Swauth +// authentication request. +type AuthResult struct { + Token string `json:"X-Auth-Token"` + StorageURL string `json:"X-Storage-Url"` + CDNURL string `json:"X-CDN-Management-Url"` +} + +// Extract is a method that attempts to interpret any Swauth authentication +// response as a AuthResult struct. +func (r GetAuthResult) Extract() (*AuthResult, error) { + var s *AuthResult + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go new file mode 100644 index 00000000..a30cabd6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go @@ -0,0 +1,7 @@ +package swauth + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ProviderClient) string { + return c.IdentityBase + "auth/v1.0" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/doc.go new file mode 100644 index 00000000..841a9c57 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/doc.go @@ -0,0 +1,3 @@ +// Package apiversions provides information and interaction with the different +// API versions for the Shared File System service, code-named Manila. +package apiversions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/errors.go new file mode 100644 index 00000000..8f0f7628 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/errors.go @@ -0,0 +1,23 @@ +package apiversions + +import ( + "fmt" +) + +// ErrVersionNotFound is the error when the requested API version +// could not be found. +type ErrVersionNotFound struct{} + +func (e ErrVersionNotFound) Error() string { + return fmt.Sprintf("Unable to find requested API version") +} + +// ErrMultipleVersionsFound is the error when a request for an API +// version returns multiple results. +type ErrMultipleVersionsFound struct { + Count int +} + +func (e ErrMultipleVersionsFound) Error() string { + return fmt.Sprintf("Found %d API versions", e.Count) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/requests.go new file mode 100644 index 00000000..2e1f6639 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/requests.go @@ -0,0 +1,19 @@ +package apiversions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List lists all the API versions available to end-users. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, listURL(c), func(r pagination.PageResult) pagination.Page { + return APIVersionPage{pagination.SinglePageBase(r)} + }) +} + +// Get will get a specific API version, specified by major ID. +func Get(client *gophercloud.ServiceClient, v string) (r GetResult) { + _, r.Err = client.Get(getURL(client, v), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/results.go new file mode 100644 index 00000000..60c1f1b3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/results.go @@ -0,0 +1,73 @@ +package apiversions + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// APIVersion represents an API version for the Shared File System service. +type APIVersion struct { + // ID is the unique identifier of the API version. + ID string `json:"id"` + + // MinVersion is the minimum microversion supported. + MinVersion string `json:"min_version"` + + // Status is the API versions status. + Status string `json:"status"` + + // Updated is the date when the API was last updated. + Updated time.Time `json:"updated"` + + // Version is the maximum microversion supported. + Version string `json:"version"` +} + +// APIVersionPage is the page returned by a pager when traversing over a +// collection of API versions. +type APIVersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an APIVersionPage struct is empty. +func (r APIVersionPage) IsEmpty() (bool, error) { + is, err := ExtractAPIVersions(r) + return len(is) == 0, err +} + +// ExtractAPIVersions takes a collection page, extracts all of the elements, +// and returns them a slice of APIVersion structs. It is effectively a cast. +func ExtractAPIVersions(r pagination.Page) ([]APIVersion, error) { + var s struct { + Versions []APIVersion `json:"versions"` + } + err := (r.(APIVersionPage)).ExtractInto(&s) + return s.Versions, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an API version resource. +func (r GetResult) Extract() (*APIVersion, error) { + var s struct { + Versions []APIVersion `json:"versions"` + } + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + switch len(s.Versions) { + case 0: + return nil, ErrVersionNotFound{} + case 1: + return &s.Versions[0], nil + default: + return nil, ErrMultipleVersionsFound{Count: len(s.Versions)} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/urls.go new file mode 100644 index 00000000..41aebdc5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/urls.go @@ -0,0 +1,20 @@ +package apiversions + +import ( + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +func getURL(c *gophercloud.ServiceClient, version string) string { + baseEndpoint, _ := utils.BaseEndpoint(c.Endpoint) + endpoint := strings.TrimRight(baseEndpoint, "/") + "/" + strings.TrimRight(version, "/") + "/" + return endpoint +} + +func listURL(c *gophercloud.ServiceClient) string { + baseEndpoint, _ := utils.BaseEndpoint(c.Endpoint) + endpoint := strings.TrimRight(baseEndpoint, "/") + "/" + return endpoint +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/requests.go new file mode 100644 index 00000000..df10b856 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/requests.go @@ -0,0 +1,13 @@ +package availabilityzones + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will return the existing availability zones. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return AvailabilityZonePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/results.go new file mode 100644 index 00000000..83a76c1a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/results.go @@ -0,0 +1,59 @@ +package availabilityzones + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// AvailabilityZone contains all the information associated with an OpenStack +// AvailabilityZone. +type AvailabilityZone struct { + // The availability zone ID. + ID string `json:"id"` + // The name of the availability zone. + Name string `json:"name"` + // The date and time stamp when the availability zone was created. + CreatedAt time.Time `json:"-"` + // The date and time stamp when the availability zone was updated. + UpdatedAt time.Time `json:"-"` +} + +type commonResult struct { + gophercloud.Result +} + +// ListResult contains the response body and error from a List request. +type AvailabilityZonePage struct { + pagination.SinglePageBase +} + +// ExtractAvailabilityZones will get the AvailabilityZone objects out of the shareTypeAccessResult object. +func ExtractAvailabilityZones(r pagination.Page) ([]AvailabilityZone, error) { + var a struct { + AvailabilityZone []AvailabilityZone `json:"availability_zones"` + } + err := (r.(AvailabilityZonePage)).ExtractInto(&a) + return a.AvailabilityZone, err +} + +func (r *AvailabilityZone) UnmarshalJSON(b []byte) error { + type tmp AvailabilityZone + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = AvailabilityZone(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/urls.go new file mode 100644 index 00000000..fb4cdcf4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/urls.go @@ -0,0 +1,7 @@ +package availabilityzones + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-availability-zone") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/errors/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/errors/errors.go new file mode 100644 index 00000000..cd88a8ac --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/errors/errors.go @@ -0,0 +1,44 @@ +package errors + +import ( + "encoding/json" + "fmt" + + "github.com/gophercloud/gophercloud" +) + +type ManilaError struct { + Code int `json:"code"` + Message string `json:"message"` + Details string `json:"details"` +} + +type ErrorDetails map[string]ManilaError + +// error types from provider_client.go +func ExtractErrorInto(rawError error, errorDetails *ErrorDetails) (err error) { + switch e := rawError.(type) { + case gophercloud.ErrDefault400: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + case gophercloud.ErrDefault401: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + case gophercloud.ErrDefault403: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + case gophercloud.ErrDefault404: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + case gophercloud.ErrDefault405: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + case gophercloud.ErrDefault408: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + case gophercloud.ErrDefault429: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + case gophercloud.ErrDefault500: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + case gophercloud.ErrDefault503: + err = json.Unmarshal(e.ErrUnexpectedResponseCode.Body, errorDetails) + default: + err = fmt.Errorf("Unable to extract detailed error message") + } + + return err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/requests.go new file mode 100644 index 00000000..44e0d94b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/requests.go @@ -0,0 +1,72 @@ +package messages + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Delete will delete the existing Message with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToMessageListQuery() (string, error) +} + +// ListOpts holds options for listing Messages. It is passed to the +// messages.List function. +type ListOpts struct { + // The message ID + ID string `q:"id"` + // The ID of the action during which the message was created + ActionID string `q:"action_id"` + // The ID of the message detail + DetailID string `q:"detail_id"` + // The message level + MessageLevel string `q:"message_level"` + // The UUID of the request during which the message was created + RequestID string `q:"request_id"` + // The UUID of the resource for which the message was created + ResourceID string `q:"resource_id"` + // The type of the resource for which the message was created + ResourceType string `q:"resource_type"` + // The key to sort a list of messages + SortKey string `q:"sort_key"` + // The key to sort a list of messages + SortDir string `q:"sort_dir"` + // The maximum number of messages to return + Limit int `q:"limit"` +} + +// ToMessageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToMessageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Messages optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToMessageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return MessagePage{pagination.SinglePageBase(r)} + }) +} + +// Get retrieves the Message with the provided ID. To extract the Message +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/results.go new file mode 100644 index 00000000..9c48ea07 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/results.go @@ -0,0 +1,99 @@ +package messages + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Message contains all the information associated with an OpenStack +// Message. +type Message struct { + // The message ID + ID string `json:"id"` + // The UUID of the project where the message was created + ProjectID string `json:"project_id"` + // The ID of the action during which the message was created + ActionID string `json:"action_id"` + // The ID of the message detail + DetailID string `json:"detail_id"` + // The message level + MessageLevel string `json:"message_level"` + // The UUID of the request during which the message was created + RequestID string `json:"request_id"` + // The UUID of the resource for which the message was created + ResourceID string `json:"resource_id"` + // The type of the resource for which the message was created + ResourceType string `json:"resource_type"` + // The message text + UserMessage string `json:"user_message"` + // The date and time stamp when the message was created + CreatedAt time.Time `json:"-"` + // The date and time stamp when the message will expire + ExpiresAt time.Time `json:"-"` +} + +func (r *Message) UnmarshalJSON(b []byte) error { + type tmp Message + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + ExpiresAt gophercloud.JSONRFC3339MilliNoZ `json:"expires_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Message(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.ExpiresAt = time.Time(s.ExpiresAt) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// MessagePage is a pagination.pager that is returned from a call to the List function. +type MessagePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no Messages. +func (r MessagePage) IsEmpty() (bool, error) { + messages, err := ExtractMessages(r) + return len(messages) == 0, err +} + +// ExtractMessages extracts and returns Messages. It is used while +// iterating over a messages.List call. +func ExtractMessages(r pagination.Page) ([]Message, error) { + var s struct { + Messages []Message `json:"messages"` + } + err := (r.(MessagePage)).ExtractInto(&s) + return s.Messages, err +} + +// Extract will get the Message object out of the commonResult object. +func (r commonResult) Extract() (*Message, error) { + var s struct { + Message *Message `json:"message"` + } + err := r.ExtractInto(&s) + return s.Message, err +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/urls.go new file mode 100644 index 00000000..7c2c54a8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/messages/urls.go @@ -0,0 +1,15 @@ +package messages + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("messages") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("messages", id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/requests.go new file mode 100644 index 00000000..8bfec43c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/requests.go @@ -0,0 +1,182 @@ +package securityservices + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type SecurityServiceType string + +// Valid security service types +const ( + LDAP SecurityServiceType = "ldap" + Kerberos SecurityServiceType = "kerberos" + ActiveDirectory SecurityServiceType = "active_directory" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecurityServiceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a SecurityService. This object is +// passed to the securityservices.Create function. For more information about +// these parameters, see the SecurityService object. +type CreateOpts struct { + // The security service type. A valid value is ldap, kerberos, or active_directory + Type SecurityServiceType `json:"type" required:"true"` + // The security service name + Name string `json:"name,omitempty"` + // The security service description + Description string `json:"description,omitempty"` + // The DNS IP address that is used inside the tenant network + DNSIP string `json:"dns_ip,omitempty"` + // The security service organizational unit (OU). Minimum supported microversion for OU is 2.44. + OU string `json:"ou,omitempty"` + // The security service user or group name that is used by the tenant + User string `json:"user,omitempty"` + // The user password, if you specify a user + Password string `json:"password,omitempty"` + // The security service domain + Domain string `json:"domain,omitempty"` + // The security service host name or IP address + Server string `json:"server,omitempty"` +} + +// ToSecurityServicesCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSecurityServiceCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_service") +} + +// Create will create a new SecurityService based on the values in CreateOpts. To +// extract the SecurityService object from the response, call the Extract method +// on the CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecurityServiceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will delete the existing SecurityService with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSecurityServiceListQuery() (string, error) +} + +// ListOpts holds options for listing SecurityServices. It is passed to the +// securityservices.List function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant security services. + AllTenants bool `q:"all_tenants"` + // The security service ID + ID string `q:"id"` + // The security service domain + Domain string `q:"domain"` + // The security service type. A valid value is ldap, kerberos, or active_directory + Type SecurityServiceType `q:"type"` + // The security service name + Name string `q:"name"` + // The DNS IP address that is used inside the tenant network + DNSIP string `q:"dns_ip"` + // The security service organizational unit (OU). Minimum supported microversion for OU is 2.44. + OU string `q:"ou"` + // The security service user or group name that is used by the tenant + User string `q:"user"` + // The security service host name or IP address + Server string `q:"server"` + // The ID of the share network using security services + ShareNetworkID string `q:"share_network_id"` +} + +// ToSecurityServiceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSecurityServiceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns SecurityServices optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSecurityServiceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SecurityServicePage{pagination.SinglePageBase(r)} + }) +} + +// Get retrieves the SecurityService with the provided ID. To extract the SecurityService +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSecurityServiceUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing SecurityService. This object is passed +// to the securityservices.Update function. For more information about the parameters, see +// the SecurityService object. +type UpdateOpts struct { + // The security service name + Name *string `json:"name"` + // The security service description + Description *string `json:"description,omitempty"` + // The security service type. A valid value is ldap, kerberos, or active_directory + Type string `json:"type,omitempty"` + // The DNS IP address that is used inside the tenant network + DNSIP *string `json:"dns_ip,omitempty"` + // The security service organizational unit (OU). Minimum supported microversion for OU is 2.44. + OU *string `json:"ou,omitempty"` + // The security service user or group name that is used by the tenant + User *string `json:"user,omitempty"` + // The user password, if you specify a user + Password *string `json:"password,omitempty"` + // The security service domain + Domain *string `json:"domain,omitempty"` + // The security service host name or IP address + Server *string `json:"server,omitempty"` +} + +// ToSecurityServiceUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToSecurityServiceUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_service") +} + +// Update will update the SecurityService with provided information. To extract the updated +// SecurityService from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSecurityServiceUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/results.go new file mode 100644 index 00000000..355f7c76 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/results.go @@ -0,0 +1,115 @@ +package securityservices + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecurityService contains all the information associated with an OpenStack +// SecurityService. +type SecurityService struct { + // The security service ID + ID string `json:"id"` + // The UUID of the project where the security service was created + ProjectID string `json:"project_id"` + // The security service domain + Domain string `json:"domain"` + // The security service status + Status string `json:"status"` + // The security service type. A valid value is ldap, kerberos, or active_directory + Type string `json:"type"` + // The security service name + Name string `json:"name"` + // The security service description + Description string `json:"description"` + // The DNS IP address that is used inside the tenant network + DNSIP string `json:"dns_ip"` + // The security service organizational unit (OU) + OU string `json:"ou"` + // The security service user or group name that is used by the tenant + User string `json:"user"` + // The user password, if you specify a user + Password string `json:"password"` + // The security service host name or IP address + Server string `json:"server"` + // The date and time stamp when the security service was created + CreatedAt time.Time `json:"-"` + // The date and time stamp when the security service was updated + UpdatedAt time.Time `json:"-"` +} + +func (r *SecurityService) UnmarshalJSON(b []byte) error { + type tmp SecurityService + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = SecurityService(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// SecurityServicePage is a pagination.pager that is returned from a call to the List function. +type SecurityServicePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no SecurityServices. +func (r SecurityServicePage) IsEmpty() (bool, error) { + securityServices, err := ExtractSecurityServices(r) + return len(securityServices) == 0, err +} + +// ExtractSecurityServices extracts and returns SecurityServices. It is used while +// iterating over a securityservices.List call. +func ExtractSecurityServices(r pagination.Page) ([]SecurityService, error) { + var s struct { + SecurityServices []SecurityService `json:"security_services"` + } + err := (r.(SecurityServicePage)).ExtractInto(&s) + return s.SecurityServices, err +} + +// Extract will get the SecurityService object out of the commonResult object. +func (r commonResult) Extract() (*SecurityService, error) { + var s struct { + SecurityService *SecurityService `json:"security_service"` + } + err := r.ExtractInto(&s) + return s.SecurityService, err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/urls.go new file mode 100644 index 00000000..c19b1f10 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/urls.go @@ -0,0 +1,23 @@ +package securityservices + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("security-services") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("security-services", id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("security-services", "detail") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/requests.go new file mode 100644 index 00000000..15e664ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/requests.go @@ -0,0 +1,233 @@ +package sharenetworks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToShareNetworkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a ShareNetwork. This object is +// passed to the sharenetworks.Create function. For more information about +// these parameters, see the ShareNetwork object. +type CreateOpts struct { + // The UUID of the Neutron network to set up for share servers + NeutronNetID string `json:"neutron_net_id,omitempty"` + // The UUID of the Neutron subnet to set up for share servers + NeutronSubnetID string `json:"neutron_subnet_id,omitempty"` + // The UUID of the nova network to set up for share servers + NovaNetID string `json:"nova_net_id,omitempty"` + // The share network name + Name string `json:"name"` + // The share network description + Description string `json:"description"` +} + +// ToShareNetworkCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToShareNetworkCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "share_network") +} + +// Create will create a new ShareNetwork based on the values in CreateOpts. To +// extract the ShareNetwork object from the response, call the Extract method +// on the CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToShareNetworkCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will delete the existing ShareNetwork with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToShareNetworkListQuery() (string, error) +} + +// ListOpts holds options for listing ShareNetworks. It is passed to the +// sharenetworks.List function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant share networks. + AllTenants bool `q:"all_tenants"` + // The UUID of the project where the share network was created + ProjectID string `q:"project_id"` + // The neutron network ID + NeutronNetID string `q:"neutron_net_id"` + // The neutron subnet ID + NeutronSubnetID string `q:"neutron_subnet_id"` + // The nova network ID + NovaNetID string `q:"nova_net_id"` + // The network type. A valid value is VLAN, VXLAN, GRE or flat + NetworkType string `q:"network_type"` + // The Share Network name + Name string `q:"name"` + // The Share Network description + Description string `q:"description"` + // The Share Network IP version + IPVersion gophercloud.IPVersion `q:"ip_version"` + // The Share Network segmentation ID + SegmentationID int `q:"segmentation_id"` + // List all share networks created after the given date + CreatedSince string `q:"created_since"` + // List all share networks created before the given date + CreatedBefore string `q:"created_before"` + // Limit specifies the page size. + Limit int `q:"limit"` + // Limit specifies the page number. + Offset int `q:"offset"` +} + +// ToShareNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToShareNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail returns ShareNetworks optionally limited by the conditions provided in ListOpts. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToShareNetworkListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + p := ShareNetworkPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// Get retrieves the ShareNetwork with the provided ID. To extract the ShareNetwork +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToShareNetworkUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing ShareNetwork. This object is passed +// to the sharenetworks.Update function. For more information about the parameters, see +// the ShareNetwork object. +type UpdateOpts struct { + // The share network name + Name *string `json:"name,omitempty"` + // The share network description + Description *string `json:"description,omitempty"` + // The UUID of the Neutron network to set up for share servers + NeutronNetID string `json:"neutron_net_id,omitempty"` + // The UUID of the Neutron subnet to set up for share servers + NeutronSubnetID string `json:"neutron_subnet_id,omitempty"` + // The UUID of the nova network to set up for share servers + NovaNetID string `json:"nova_net_id,omitempty"` +} + +// ToShareNetworkUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToShareNetworkUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "share_network") +} + +// Update will update the ShareNetwork with provided information. To extract the updated +// ShareNetwork from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToShareNetworkUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// AddSecurityServiceOptsBuilder allows extensions to add additional parameters to the +// AddSecurityService request. +type AddSecurityServiceOptsBuilder interface { + ToShareNetworkAddSecurityServiceMap() (map[string]interface{}, error) +} + +// AddSecurityServiceOpts contain options for adding a security service to an +// existing ShareNetwork. This object is passed to the sharenetworks.AddSecurityService +// function. For more information about the parameters, see the ShareNetwork object. +type AddSecurityServiceOpts struct { + SecurityServiceID string `json:"security_service_id"` +} + +// ToShareNetworkAddSecurityServiceMap assembles a request body based on the contents of an +// AddSecurityServiceOpts. +func (opts AddSecurityServiceOpts) ToShareNetworkAddSecurityServiceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "add_security_service") +} + +// AddSecurityService will add the security service to a ShareNetwork. To extract the updated +// ShareNetwork from the response, call the Extract method on the UpdateResult. +func AddSecurityService(client *gophercloud.ServiceClient, id string, opts AddSecurityServiceOptsBuilder) (r UpdateResult) { + b, err := opts.ToShareNetworkAddSecurityServiceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(addSecurityServiceURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveSecurityServiceOptsBuilder allows extensions to add additional parameters to the +// RemoveSecurityService request. +type RemoveSecurityServiceOptsBuilder interface { + ToShareNetworkRemoveSecurityServiceMap() (map[string]interface{}, error) +} + +// RemoveSecurityServiceOpts contain options for removing a security service from an +// existing ShareNetwork. This object is passed to the sharenetworks.RemoveSecurityService +// function. For more information about the parameters, see the ShareNetwork object. +type RemoveSecurityServiceOpts struct { + SecurityServiceID string `json:"security_service_id"` +} + +// ToShareNetworkRemoveSecurityServiceMap assembles a request body based on the contents of an +// RemoveSecurityServiceOpts. +func (opts RemoveSecurityServiceOpts) ToShareNetworkRemoveSecurityServiceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "remove_security_service") +} + +// RemoveSecurityService will remove the security service from a ShareNetwork. To extract the updated +// ShareNetwork from the response, call the Extract method on the UpdateResult. +func RemoveSecurityService(client *gophercloud.ServiceClient, id string, opts RemoveSecurityServiceOptsBuilder) (r UpdateResult) { + b, err := opts.ToShareNetworkRemoveSecurityServiceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(removeSecurityServiceURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/results.go new file mode 100644 index 00000000..fdb72569 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/results.go @@ -0,0 +1,182 @@ +package sharenetworks + +import ( + "encoding/json" + "net/url" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ShareNetwork contains all the information associated with an OpenStack +// ShareNetwork. +type ShareNetwork struct { + // The Share Network ID + ID string `json:"id"` + // The UUID of the project where the share network was created + ProjectID string `json:"project_id"` + // The neutron network ID + NeutronNetID string `json:"neutron_net_id"` + // The neutron subnet ID + NeutronSubnetID string `json:"neutron_subnet_id"` + // The nova network ID + NovaNetID string `json:"nova_net_id"` + // The network type. A valid value is VLAN, VXLAN, GRE or flat + NetworkType string `json:"network_type"` + // The segmentation ID + SegmentationID int `json:"segmentation_id"` + // The IP block from which to allocate the network, in CIDR notation + CIDR string `json:"cidr"` + // The IP version of the network. A valid value is 4 or 6 + IPVersion int `json:"ip_version"` + // The Share Network name + Name string `json:"name"` + // The Share Network description + Description string `json:"description"` + // The date and time stamp when the Share Network was created + CreatedAt time.Time `json:"-"` + // The date and time stamp when the Share Network was updated + UpdatedAt time.Time `json:"-"` +} + +func (r *ShareNetwork) UnmarshalJSON(b []byte) error { + type tmp ShareNetwork + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ShareNetwork(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// ShareNetworkPage is a pagination.pager that is returned from a call to the List function. +type ShareNetworkPage struct { + pagination.MarkerPageBase +} + +// NextPageURL generates the URL for the page of results after this one. +func (r ShareNetworkPage) NextPageURL() (string, error) { + currentURL := r.URL + mark, err := r.Owner.LastMarker() + if err != nil { + return "", err + } + + q := currentURL.Query() + q.Set("offset", mark) + currentURL.RawQuery = q.Encode() + return currentURL.String(), nil +} + +// LastMarker returns the last offset in a ListResult. +func (r ShareNetworkPage) LastMarker() (string, error) { + maxInt := strconv.Itoa(int(^uint(0) >> 1)) + shareNetworks, err := ExtractShareNetworks(r) + if err != nil { + return maxInt, err + } + if len(shareNetworks) == 0 { + return maxInt, nil + } + + u, err := url.Parse(r.URL.String()) + if err != nil { + return maxInt, err + } + queryParams := u.Query() + offset := queryParams.Get("offset") + limit := queryParams.Get("limit") + + // Limit is not present, only one page required + if limit == "" { + return maxInt, nil + } + + iOffset := 0 + if offset != "" { + iOffset, err = strconv.Atoi(offset) + if err != nil { + return maxInt, err + } + } + iLimit, err := strconv.Atoi(limit) + if err != nil { + return maxInt, err + } + iOffset = iOffset + iLimit + offset = strconv.Itoa(iOffset) + + return offset, nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (r ShareNetworkPage) IsEmpty() (bool, error) { + shareNetworks, err := ExtractShareNetworks(r) + return len(shareNetworks) == 0, err +} + +// ExtractShareNetworks extracts and returns ShareNetworks. It is used while +// iterating over a sharenetworks.List call. +func ExtractShareNetworks(r pagination.Page) ([]ShareNetwork, error) { + var s struct { + ShareNetworks []ShareNetwork `json:"share_networks"` + } + err := (r.(ShareNetworkPage)).ExtractInto(&s) + return s.ShareNetworks, err +} + +// Extract will get the ShareNetwork object out of the commonResult object. +func (r commonResult) Extract() (*ShareNetwork, error) { + var s struct { + ShareNetwork *ShareNetwork `json:"share_network"` + } + err := r.ExtractInto(&s) + return s.ShareNetwork, err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// AddSecurityServiceResult contains the response body and error from a security +// service addition request. +type AddSecurityServiceResult struct { + commonResult +} + +// RemoveSecurityServiceResult contains the response body and error from a security +// service removal request. +type RemoveSecurityServiceResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/urls.go new file mode 100644 index 00000000..667bd2a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/urls.go @@ -0,0 +1,31 @@ +package sharenetworks + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("share-networks") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("share-networks", id) +} + +func listDetailURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("share-networks", "detail") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func addSecurityServiceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("share-networks", id, "action") +} + +func removeSecurityServiceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("share-networks", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go new file mode 100644 index 00000000..2028b483 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go @@ -0,0 +1,381 @@ +package shares + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToShareCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains the options for create a Share. This object is +// passed to shares.Create(). For more information about these parameters, +// please refer to the Share object, or the shared file systems API v2 +// documentation +type CreateOpts struct { + // Defines the share protocol to use + ShareProto string `json:"share_proto" required:"true"` + // Size in GB + Size int `json:"size" required:"true"` + // Defines the share name + Name string `json:"name,omitempty"` + // Share description + Description string `json:"description,omitempty"` + // DisplayName is equivalent to Name. The API supports using both + // This is an inherited attribute from the block storage API + DisplayName string `json:"display_name,omitempty"` + // DisplayDescription is equivalent to Description. The API supports using both + // This is an inherited attribute from the block storage API + DisplayDescription string `json:"display_description,omitempty"` + // ShareType defines the sharetype. If omitted, a default share type is used + ShareType string `json:"share_type,omitempty"` + // VolumeType is deprecated but supported. Either ShareType or VolumeType can be used + VolumeType string `json:"volume_type,omitempty"` + // The UUID from which to create a share + SnapshotID string `json:"snapshot_id,omitempty"` + // Determines whether or not the share is public + IsPublic *bool `json:"is_public,omitempty"` + // Key value pairs of user defined metadata + Metadata map[string]string `json:"metadata,omitempty"` + // The UUID of the share network to which the share belongs to + ShareNetworkID string `json:"share_network_id,omitempty"` + // The UUID of the consistency group to which the share belongs to + ConsistencyGroupID string `json:"consistency_group_id,omitempty"` + // The availability zone of the share + AvailabilityZone string `json:"availability_zone,omitempty"` +} + +// ToShareCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToShareCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "share") +} + +// Create will create a new Share based on the values in CreateOpts. To extract +// the Share object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToShareCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// ListOpts holds options for listing Shares. It is passed to the +// shares.List function. +type ListOpts struct { + // (Admin only). Defines whether to list the requested resources for all projects. + AllTenants bool `q:"all_tenants"` + // The share name. + Name string `q:"name"` + // Filters by a share status. + Status string `q:"status"` + // The UUID of the share server. + ShareServerID string `q:"share_server_id"` + // One or more metadata key and value pairs as a dictionary of strings. + Metadata map[string]string `q:"metadata"` + // The extra specifications for the share type. + ExtraSpecs map[string]string `q:"extra_specs"` + // The UUID of the share type. + ShareTypeID string `q:"share_type_id"` + // The maximum number of shares to return. + Limit int `q:"limit"` + // The offset to define start point of share or share group listing. + Offset int `q:"offset"` + // The key to sort a list of shares. + SortKey string `q:"sort_key"` + // The direction to sort a list of shares. + SortDir string `q:"sort_dir"` + // The UUID of the share’s base snapshot to filter the request based on. + SnapshotID string `q:"snapshot_id"` + // The share host name. + Host string `q:"host"` + // The share network ID. + ShareNetworkID string `q:"share_network_id"` + // The UUID of the project in which the share was created. Useful with all_tenants parameter. + ProjectID string `q:"project_id"` + // The level of visibility for the share. + IsPublic *bool `q:"is_public"` + // The UUID of a share group to filter resource. + ShareGroupID string `q:"share_group_id"` + // The export location UUID that can be used to filter shares or share instances. + ExportLocationID string `q:"export_location_id"` + // The export location path that can be used to filter shares or share instances. + ExportLocationPath string `q:"export_location_path"` + // The name pattern that can be used to filter shares, share snapshots, share networks or share groups. + NamePattern string `q:"name~"` + // The description pattern that can be used to filter shares, share snapshots, share networks or share groups. + DescriptionPattern string `q:"description~"` + // Whether to show count in API response or not, default is False. + WithCount bool `q:"with_count"` + // DisplayName is equivalent to Name. The API supports using both + // This is an inherited attribute from the block storage API + DisplayName string `q:"display_name"` + // Equivalent to NamePattern. + DisplayNamePattern string `q:"display_name~"` + // VolumeTypeID is deprecated but supported. Either ShareTypeID or VolumeTypeID can be used + VolumeTypeID string `q:"volume_type_id"` + // The UUID of the share group snapshot. + ShareGroupSnapshotID string `q:"share_group_snapshot_id"` + // DisplayDescription is equivalent to Description. The API supports using both + // This is an inherited attribute from the block storage API + DisplayDescription string `q:"display_description"` + // Equivalent to DescriptionPattern + DisplayDescriptionPattern string `q:"display_description~"` +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToShareListQuery() (string, error) +} + +// ToShareListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToShareListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail returns []Share optionally limited by the conditions provided in ListOpts. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToShareListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + p := SharePage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// Delete will delete an existing Share with the given UUID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get will get a single share with given UUID +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// GetExportLocations will get shareID's export locations. +// Client must have Microversion set; minimum supported microversion for GetExportLocations is 2.14. +func GetExportLocations(client *gophercloud.ServiceClient, id string) (r GetExportLocationsResult) { + _, r.Err = client.Get(getExportLocationsURL(client, id), &r.Body, nil) + return +} + +// GrantAccessOptsBuilder allows extensions to add additional parameters to the +// GrantAccess request. +type GrantAccessOptsBuilder interface { + ToGrantAccessMap() (map[string]interface{}, error) +} + +// GrantAccessOpts contains the options for creation of an GrantAccess request. +// For more information about these parameters, please, refer to the shared file systems API v2, +// Share Actions, Grant Access documentation +type GrantAccessOpts struct { + // The access rule type that can be "ip", "cert" or "user". + AccessType string `json:"access_type"` + // The value that defines the access that can be a valid format of IP, cert or user. + AccessTo string `json:"access_to"` + // The access level to the share is either "rw" or "ro". + AccessLevel string `json:"access_level"` +} + +// ToGrantAccessMap assembles a request body based on the contents of a +// GrantAccessOpts. +func (opts GrantAccessOpts) ToGrantAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "allow_access") +} + +// GrantAccess will grant access to a Share based on the values in GrantAccessOpts. To extract +// the GrantAccess object from the response, call the Extract method on the GrantAccessResult. +// Client must have Microversion set; minimum supported microversion for GrantAccess is 2.7. +func GrantAccess(client *gophercloud.ServiceClient, id string, opts GrantAccessOptsBuilder) (r GrantAccessResult) { + b, err := opts.ToGrantAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(grantAccessURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RevokeAccessOptsBuilder allows extensions to add additional parameters to the +// RevokeAccess request. +type RevokeAccessOptsBuilder interface { + ToRevokeAccessMap() (map[string]interface{}, error) +} + +// RevokeAccessOpts contains the options for creation of a RevokeAccess request. +// For more information about these parameters, please, refer to the shared file systems API v2, +// Share Actions, Revoke Access documentation +type RevokeAccessOpts struct { + AccessID string `json:"access_id"` +} + +// ToRevokeAccessMap assembles a request body based on the contents of a +// RevokeAccessOpts. +func (opts RevokeAccessOpts) ToRevokeAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "deny_access") +} + +// RevokeAccess will revoke an existing access to a Share based on the values in RevokeAccessOpts. +// RevokeAccessResult contains only the error. To extract it, call the ExtractErr method on +// the RevokeAccessResult. Client must have Microversion set; minimum supported microversion +// for RevokeAccess is 2.7. +func RevokeAccess(client *gophercloud.ServiceClient, id string, opts RevokeAccessOptsBuilder) (r RevokeAccessResult) { + b, err := opts.ToRevokeAccessMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(revokeAccessURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + return +} + +// ListAccessRights lists all access rules assigned to a Share based on its id. To extract +// the AccessRight slice from the response, call the Extract method on the ListAccessRightsResult. +// Client must have Microversion set; minimum supported microversion for ListAccessRights is 2.7. +func ListAccessRights(client *gophercloud.ServiceClient, id string) (r ListAccessRightsResult) { + requestBody := map[string]interface{}{"access_list": nil} + _, r.Err = client.Post(listAccessRightsURL(client, id), requestBody, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ExtendOptsBuilder allows extensions to add additional parameters to the +// Extend request. +type ExtendOptsBuilder interface { + ToShareExtendMap() (map[string]interface{}, error) +} + +// ExtendOpts contains options for extending a Share. +// For more information about these parameters, please, refer to the shared file systems API v2, +// Share Actions, Extend share documentation +type ExtendOpts struct { + // New size in GBs. + NewSize int `json:"new_size"` +} + +// ToShareExtendMap assembles a request body based on the contents of a +// ExtendOpts. +func (opts ExtendOpts) ToShareExtendMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "extend") +} + +// Extend will extend the capacity of an existing share. ExtendResult contains only the error. +// To extract it, call the ExtractErr method on the ExtendResult. +// Client must have Microversion set; minimum supported microversion for Extend is 2.7. +func Extend(client *gophercloud.ServiceClient, id string, opts ExtendOptsBuilder) (r ExtendResult) { + b, err := opts.ToShareExtendMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(extendURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return +} + +// ShrinkOptsBuilder allows extensions to add additional parameters to the +// Shrink request. +type ShrinkOptsBuilder interface { + ToShareShrinkMap() (map[string]interface{}, error) +} + +// ShrinkOpts contains options for shrinking a Share. +// For more information about these parameters, please, refer to the shared file systems API v2, +// Share Actions, Shrink share documentation +type ShrinkOpts struct { + // New size in GBs. + NewSize int `json:"new_size"` +} + +// ToShareShrinkMap assembles a request body based on the contents of a +// ShrinkOpts. +func (opts ShrinkOpts) ToShareShrinkMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "shrink") +} + +// Shrink will shrink the capacity of an existing share. ShrinkResult contains only the error. +// To extract it, call the ExtractErr method on the ShrinkResult. +// Client must have Microversion set; minimum supported microversion for Shrink is 2.7. +func Shrink(client *gophercloud.ServiceClient, id string, opts ShrinkOptsBuilder) (r ShrinkResult) { + b, err := opts.ToShareShrinkMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(shrinkURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToShareUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Share. This object is passed +// to the share.Update function. For more information about the parameters, see +// the Share object. +type UpdateOpts struct { + // Share name. Manila share update logic doesn't have a "name" alias. + DisplayName *string `json:"display_name,omitempty"` + // Share description. Manila share update logic doesn't have a "description" alias. + DisplayDescription *string `json:"display_description,omitempty"` + // Determines whether or not the share is public + IsPublic *bool `json:"is_public,omitempty"` +} + +// ToShareUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToShareUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "share") +} + +// Update will update the Share with provided information. To extract the updated +// Share from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToShareUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go new file mode 100644 index 00000000..0deae373 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go @@ -0,0 +1,326 @@ +package shares + +import ( + "encoding/json" + "net/url" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +const ( + invalidMarker = "-1" +) + +// Share contains all information associated with an OpenStack Share +type Share struct { + // The availability zone of the share + AvailabilityZone string `json:"availability_zone"` + // A description of the share + Description string `json:"description,omitempty"` + // DisplayDescription is inherited from BlockStorage API. + // Both Description and DisplayDescription can be used + DisplayDescription string `json:"display_description,omitempty"` + // DisplayName is inherited from BlockStorage API + // Both DisplayName and Name can be used + DisplayName string `json:"display_name,omitempty"` + // Indicates whether a share has replicas or not. + HasReplicas bool `json:"has_replicas"` + // The host name of the share + Host string `json:"host"` + // The UUID of the share + ID string `json:"id"` + // Indicates the visibility of the share + IsPublic bool `json:"is_public,omitempty"` + // Share links for pagination + Links []map[string]string `json:"links"` + // Key, value -pairs of custom metadata + Metadata map[string]string `json:"metadata,omitempty"` + // The name of the share + Name string `json:"name,omitempty"` + // The UUID of the project to which this share belongs to + ProjectID string `json:"project_id"` + // The share replication type + ReplicationType string `json:"replication_type,omitempty"` + // The UUID of the share network + ShareNetworkID string `json:"share_network_id"` + // The shared file system protocol + ShareProto string `json:"share_proto"` + // The UUID of the share server + ShareServerID string `json:"share_server_id"` + // The UUID of the share type. + ShareType string `json:"share_type"` + // The name of the share type. + ShareTypeName string `json:"share_type_name"` + // Size of the share in GB + Size int `json:"size"` + // UUID of the snapshot from which to create the share + SnapshotID string `json:"snapshot_id"` + // The share status + Status string `json:"status"` + // The task state, used for share migration + TaskState string `json:"task_state"` + // The type of the volume + VolumeType string `json:"volume_type,omitempty"` + // The UUID of the consistency group this share belongs to + ConsistencyGroupID string `json:"consistency_group_id"` + // Used for filtering backends which either support or do not support share snapshots + SnapshotSupport bool `json:"snapshot_support"` + SourceCgsnapshotMemberID string `json:"source_cgsnapshot_member_id"` + // Timestamp when the share was created + CreatedAt time.Time `json:"-"` + // Timestamp when the share was updated + UpdatedAt time.Time `json:"-"` +} + +func (r *Share) UnmarshalJSON(b []byte) error { + type tmp Share + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Share(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Share object from the commonResult +func (r commonResult) Extract() (*Share, error) { + var s struct { + Share *Share `json:"share"` + } + err := r.ExtractInto(&s) + return s.Share, err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// SharePage is a pagination.pager that is returned from a call to the List function. +type SharePage struct { + pagination.MarkerPageBase +} + +// NextPageURL generates the URL for the page of results after this one. +func (r SharePage) NextPageURL() (string, error) { + currentURL := r.URL + mark, err := r.Owner.LastMarker() + if err != nil { + return "", err + } + if mark == invalidMarker { + return "", nil + } + + q := currentURL.Query() + q.Set("offset", mark) + currentURL.RawQuery = q.Encode() + return currentURL.String(), nil +} + +// LastMarker returns the last offset in a ListResult. +func (r SharePage) LastMarker() (string, error) { + shares, err := ExtractShares(r) + if err != nil { + return invalidMarker, err + } + if len(shares) == 0 { + return invalidMarker, nil + } + + u, err := url.Parse(r.URL.String()) + if err != nil { + return invalidMarker, err + } + queryParams := u.Query() + offset := queryParams.Get("offset") + limit := queryParams.Get("limit") + + // Limit is not present, only one page required + if limit == "" { + return invalidMarker, nil + } + + iOffset := 0 + if offset != "" { + iOffset, err = strconv.Atoi(offset) + if err != nil { + return invalidMarker, err + } + } + iLimit, err := strconv.Atoi(limit) + if err != nil { + return invalidMarker, err + } + iOffset = iOffset + iLimit + offset = strconv.Itoa(iOffset) + + return offset, nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (r SharePage) IsEmpty() (bool, error) { + shares, err := ExtractShares(r) + return len(shares) == 0, err +} + +// ExtractShares extracts and returns a Share slice. It is used while +// iterating over a shares.List call. +func ExtractShares(r pagination.Page) ([]Share, error) { + var s struct { + Shares []Share `json:"shares"` + } + + err := (r.(SharePage)).ExtractInto(&s) + + return s.Shares, err +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// IDFromName is a convenience function that returns a share's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + r, err := ListDetail(client, &ListOpts{Name: name}).AllPages() + if err != nil { + return "", err + } + + ss, err := ExtractShares(r) + if err != nil { + return "", err + } + + switch len(ss) { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "share"} + case 1: + return ss[0].ID, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: len(ss), ResourceType: "share"} + } +} + +// GetExportLocationsResult contains the result body and error from an +// GetExportLocations request. +type GetExportLocationsResult struct { + gophercloud.Result +} + +// ExportLocation contains all information associated with a share export location +type ExportLocation struct { + // The export location path that should be used for mount operation. + Path string `json:"path"` + // The UUID of the share instance that this export location belongs to. + ShareInstanceID string `json:"share_instance_id"` + // Defines purpose of an export location. + // If set to true, then it is expected to be used for service needs + // and by administrators only. + // If it is set to false, then this export location can be used by end users. + IsAdminOnly bool `json:"is_admin_only"` + // The share export location UUID. + ID string `json:"id"` + // Drivers may use this field to identify which export locations are + // most efficient and should be used preferentially by clients. + // By default it is set to false value. New in version 2.14 + Preferred bool `json:"preferred"` +} + +// Extract will get the Export Locations from the commonResult +func (r GetExportLocationsResult) Extract() ([]ExportLocation, error) { + var s struct { + ExportLocations []ExportLocation `json:"export_locations"` + } + err := r.ExtractInto(&s) + return s.ExportLocations, err +} + +// AccessRight contains all information associated with an OpenStack share +// Grant Access Response +type AccessRight struct { + // The UUID of the share to which you are granted or denied access. + ShareID string `json:"share_id"` + // The access rule type that can be "ip", "cert" or "user". + AccessType string `json:"access_type,omitempty"` + // The value that defines the access that can be a valid format of IP, cert or user. + AccessTo string `json:"access_to,omitempty"` + // The access credential of the entity granted share access. + AccessKey string `json:"access_key,omitempty"` + // The access level to the share is either "rw" or "ro". + AccessLevel string `json:"access_level,omitempty"` + // The state of the access rule + State string `json:"state,omitempty"` + // The access rule ID. + ID string `json:"id"` +} + +// Extract will get the GrantAccess object from the commonResult +func (r GrantAccessResult) Extract() (*AccessRight, error) { + var s struct { + AccessRight *AccessRight `json:"access"` + } + err := r.ExtractInto(&s) + return s.AccessRight, err +} + +// GrantAccessResult contains the result body and error from an GrantAccess request. +type GrantAccessResult struct { + gophercloud.Result +} + +// RevokeAccessResult contains the response body and error from a Revoke access request. +type RevokeAccessResult struct { + gophercloud.ErrResult +} + +// Extract will get a slice of AccessRight objects from the commonResult +func (r ListAccessRightsResult) Extract() ([]AccessRight, error) { + var s struct { + AccessRights []AccessRight `json:"access_list"` + } + err := r.ExtractInto(&s) + return s.AccessRights, err +} + +// ListAccessRightsResult contains the result body and error from a ListAccessRights request. +type ListAccessRightsResult struct { + gophercloud.Result +} + +// ExtendResult contains the response body and error from an Extend request. +type ExtendResult struct { + gophercloud.ErrResult +} + +// ShrinkResult contains the response body and error from a Shrink request. +type ShrinkResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go new file mode 100644 index 00000000..02fba24d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go @@ -0,0 +1,47 @@ +package shares + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("shares") +} + +func listDetailURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("shares", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id) +} + +func getExportLocationsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "export_locations") +} + +func grantAccessURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} + +func revokeAccessURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} + +func listAccessRightsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} + +func extendURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} + +func shrinkURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/requests.go new file mode 100644 index 00000000..1f2f7d7c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/requests.go @@ -0,0 +1,161 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSnapshotCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains the options for create a Snapshot. This object is +// passed to snapshots.Create(). For more information about these parameters, +// please refer to the Snapshot object, or the shared file systems API v2 +// documentation +type CreateOpts struct { + // The UUID of the share from which to create a snapshot + ShareID string `json:"share_id" required:"true"` + // Defines the snapshot name + Name string `json:"name,omitempty"` + // Defines the snapshot description + Description string `json:"description,omitempty"` + // DisplayName is equivalent to Name. The API supports using both + // This is an inherited attribute from the block storage API + DisplayName string `json:"display_name,omitempty"` + // DisplayDescription is equivalent to Description. The API supports using both + // This is an inherited attribute from the block storage API + DisplayDescription string `json:"display_description,omitempty"` +} + +// ToSnapshotCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "snapshot") +} + +// Create will create a new Snapshot based on the values in CreateOpts. To extract +// the Snapshot object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSnapshotCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// ListOpts holds options for listing Snapshots. It is passed to the +// snapshots.List function. +type ListOpts struct { + // (Admin only). Defines whether to list the requested resources for all projects. + AllTenants bool `q:"all_tenants"` + // The snapshot name. + Name string `q:"name"` + // Filter by a snapshot description. + Description string `q:"description"` + // Filters by a share from which the snapshot was created. + ShareID string `q:"share_id"` + // Filters by a snapshot size in GB. + Size int `q:"size"` + // Filters by a snapshot status. + Status string `q:"status"` + // The maximum number of snapshots to return. + Limit int `q:"limit"` + // The offset to define start point of snapshot or snapshot group listing. + Offset int `q:"offset"` + // The key to sort a list of snapshots. + SortKey string `q:"sort_key"` + // The direction to sort a list of snapshots. + SortDir string `q:"sort_dir"` + // The UUID of the project in which the snapshot was created. Useful with all_tenants parameter. + ProjectID string `q:"project_id"` + // The name pattern that can be used to filter snapshots, snapshot snapshots, snapshot networks or snapshot groups. + NamePattern string `q:"name~"` + // The description pattern that can be used to filter snapshots, snapshot snapshots, snapshot networks or snapshot groups. + DescriptionPattern string `q:"description~"` +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSnapshotListQuery() (string, error) +} + +// ToSnapshotListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSnapshotListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail returns []Snapshot optionally limited by the conditions provided in ListOpts. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToSnapshotListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + p := SnapshotPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// Delete will delete an existing Snapshot with the given UUID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get will get a single snapshot with given UUID +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSnapshotUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Snapshot. This object is passed +// to the snapshot.Update function. For more information about the parameters, see +// the Snapshot object. +type UpdateOpts struct { + // Snapshot name. Manila snapshot update logic doesn't have a "name" alias. + DisplayName *string `json:"display_name,omitempty"` + // Snapshot description. Manila snapshot update logic doesn't have a "description" alias. + DisplayDescription *string `json:"display_description,omitempty"` +} + +// ToSnapshotUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToSnapshotUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "snapshot") +} + +// Update will update the Snapshot with provided information. To extract the updated +// Snapshot from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSnapshotUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/results.go new file mode 100644 index 00000000..4952dbd3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/results.go @@ -0,0 +1,193 @@ +package snapshots + +import ( + "encoding/json" + "net/url" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +const ( + invalidMarker = "-1" +) + +// Snapshot contains all information associated with an OpenStack Snapshot +type Snapshot struct { + // The UUID of the snapshot + ID string `json:"id"` + // The name of the snapshot + Name string `json:"name,omitempty"` + // A description of the snapshot + Description string `json:"description,omitempty"` + // UUID of the share from which the snapshot was created + ShareID string `json:"share_id"` + // The shared file system protocol + ShareProto string `json:"share_proto"` + // Size of the snapshot share in GB + ShareSize int `json:"share_size"` + // Size of the snapshot in GB + Size int `json:"size"` + // The snapshot status + Status string `json:"status"` + // The UUID of the project in which the snapshot was created + ProjectID string `json:"project_id"` + // Timestamp when the snapshot was created + CreatedAt time.Time `json:"-"` + // Snapshot links for pagination + Links []map[string]string `json:"links"` +} + +func (r *Snapshot) UnmarshalJSON(b []byte) error { + type tmp Snapshot + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Snapshot(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Snapshot object from the commonResult +func (r commonResult) Extract() (*Snapshot, error) { + var s struct { + Snapshot *Snapshot `json:"snapshot"` + } + err := r.ExtractInto(&s) + return s.Snapshot, err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// SnapshotPage is a pagination.pager that is returned from a call to the List function. +type SnapshotPage struct { + pagination.MarkerPageBase +} + +// NextPageURL generates the URL for the page of results after this one. +func (r SnapshotPage) NextPageURL() (string, error) { + currentURL := r.URL + mark, err := r.Owner.LastMarker() + if err != nil { + return "", err + } + if mark == invalidMarker { + return "", nil + } + + q := currentURL.Query() + q.Set("offset", mark) + currentURL.RawQuery = q.Encode() + return currentURL.String(), nil +} + +// LastMarker returns the last offset in a ListResult. +func (r SnapshotPage) LastMarker() (string, error) { + snapshots, err := ExtractSnapshots(r) + if err != nil { + return invalidMarker, err + } + if len(snapshots) == 0 { + return invalidMarker, nil + } + + u, err := url.Parse(r.URL.String()) + if err != nil { + return invalidMarker, err + } + queryParams := u.Query() + offset := queryParams.Get("offset") + limit := queryParams.Get("limit") + + // Limit is not present, only one page required + if limit == "" { + return invalidMarker, nil + } + + iOffset := 0 + if offset != "" { + iOffset, err = strconv.Atoi(offset) + if err != nil { + return invalidMarker, err + } + } + iLimit, err := strconv.Atoi(limit) + if err != nil { + return invalidMarker, err + } + iOffset = iOffset + iLimit + offset = strconv.Itoa(iOffset) + + return offset, nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (r SnapshotPage) IsEmpty() (bool, error) { + snapshots, err := ExtractSnapshots(r) + return len(snapshots) == 0, err +} + +// ExtractSnapshots extracts and returns a Snapshot slice. It is used while +// iterating over a snapshots.List call. +func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { + var s struct { + Snapshots []Snapshot `json:"snapshots"` + } + + err := (r.(SnapshotPage)).ExtractInto(&s) + + return s.Snapshots, err +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// IDFromName is a convenience function that returns a snapshot's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + r, err := ListDetail(client, &ListOpts{Name: name}).AllPages() + if err != nil { + return "", err + } + + ss, err := ExtractSnapshots(r) + if err != nil { + return "", err + } + + switch len(ss) { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "snapshot"} + case 1: + return ss[0].ID, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: len(ss), ResourceType: "snapshot"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/urls.go new file mode 100644 index 00000000..a07e3ec8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/snapshots/urls.go @@ -0,0 +1,23 @@ +package snapshots + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("snapshots") +} + +func listDetailURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("snapshots", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go new file mode 100644 index 00000000..40080f7a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go @@ -0,0 +1,28 @@ +package utils + +import ( + "net/url" + "regexp" + "strings" +) + +// BaseEndpoint will return a URL without the /vX.Y +// portion of the URL. +func BaseEndpoint(endpoint string) (string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return "", err + } + + u.RawQuery, u.Fragment = "", "" + + path := u.Path + versionRe := regexp.MustCompile("v[0-9.]+/?") + + if version := versionRe.FindString(path); version != "" { + versionIndex := strings.Index(path, version) + u.Path = path[:versionIndex] + } + + return u.String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go new file mode 100644 index 00000000..27da19f9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go @@ -0,0 +1,111 @@ +package utils + +import ( + "fmt" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// Version is a supported API version, corresponding to a vN package within the appropriate service. +type Version struct { + ID string + Suffix string + Priority int +} + +var goodStatus = map[string]bool{ + "current": true, + "supported": true, + "stable": true, +} + +// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's +// published versions. +// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. +func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { + type linkResp struct { + Href string `json:"href"` + Rel string `json:"rel"` + } + + type valueResp struct { + ID string `json:"id"` + Status string `json:"status"` + Links []linkResp `json:"links"` + } + + type versionsResp struct { + Values []valueResp `json:"values"` + } + + type response struct { + Versions versionsResp `json:"versions"` + } + + normalize := func(endpoint string) string { + if !strings.HasSuffix(endpoint, "/") { + return endpoint + "/" + } + return endpoint + } + identityEndpoint := normalize(client.IdentityEndpoint) + + // If a full endpoint is specified, check version suffixes for a match first. + for _, v := range recognized { + if strings.HasSuffix(identityEndpoint, v.Suffix) { + return v, identityEndpoint, nil + } + } + + var resp response + _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ + JSONResponse: &resp, + OkCodes: []int{200, 300}, + }) + + if err != nil { + return nil, "", err + } + + var highest *Version + var endpoint string + + for _, value := range resp.Versions.Values { + href := "" + for _, link := range value.Links { + if link.Rel == "self" { + href = normalize(link.Href) + } + } + + for _, version := range recognized { + if strings.Contains(value.ID, version.ID) { + // Prefer a version that exactly matches the provided endpoint. + if href == identityEndpoint { + if href == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + } + return version, href, nil + } + + // Otherwise, find the highest-priority version with a whitelisted status. + if goodStatus[strings.ToLower(value.Status)] { + if highest == nil || version.Priority > highest.Priority { + highest = version + endpoint = href + } + } + } + } + } + + if highest == nil { + return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) + } + if endpoint == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) + } + + return highest, endpoint, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/vendor/github.com/gophercloud/gophercloud/pagination/http.go new file mode 100644 index 00000000..757295c4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/http.go @@ -0,0 +1,60 @@ +package pagination + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// PageResult stores the HTTP response that returned the current page of results. +type PageResult struct { + gophercloud.Result + url.URL +} + +// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the +// results, interpreting it as JSON if the content type indicates. +func PageResultFrom(resp *http.Response) (PageResult, error) { + var parsedBody interface{} + + defer resp.Body.Close() + rawBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PageResult{}, err + } + + if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { + err = json.Unmarshal(rawBody, &parsedBody) + if err != nil { + return PageResult{}, err + } + } else { + parsedBody = rawBody + } + + return PageResultFromParsed(resp, parsedBody), err +} + +// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its +// body parsed as JSON (and closed). +func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { + return PageResult{ + Result: gophercloud.Result{ + Body: body, + Header: resp.Header, + }, + URL: *resp.Request.URL, + } +} + +// Request performs an HTTP request and extracts the http.Response from the result. +func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { + return client.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: headers, + OkCodes: []int{200, 204, 300}, + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go new file mode 100644 index 00000000..3656fb7f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go @@ -0,0 +1,92 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. +type LinkedPageBase struct { + PageResult + + // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. + // If any link along the path is missing, an empty URL will be returned. + // If any link results in an unexpected value type, an error will be returned. + // When left as "nil", []string{"links", "next"} will be used as a default. + LinkPath []string +} + +// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. +// It assumes that the links are available in a "links" element of the top-level response object. +// If this is not the case, override NextPageURL on your result type. +func (current LinkedPageBase) NextPageURL() (string, error) { + var path []string + var key string + + if current.LinkPath == nil { + path = []string{"links", "next"} + } else { + path = current.LinkPath + } + + submap, ok := current.Body.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return "", err + } + + for { + key, path = path[0], path[1:len(path)] + + value, ok := submap[key] + if !ok { + return "", nil + } + + if len(path) > 0 { + submap, ok = value.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + } else { + if value == nil { + // Actual null element. + return "", nil + } + + url, ok := value.(string) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "string" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + + return url, nil + } + } +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current LinkedPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current LinkedPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go new file mode 100644 index 00000000..52e53bae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go @@ -0,0 +1,58 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. +// For convenience, embed the MarkedPageBase struct. +type MarkerPage interface { + Page + + // LastMarker returns the last "marker" value on this page. + LastMarker() (string, error) +} + +// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. +type MarkerPageBase struct { + PageResult + + // Owner is a reference to the embedding struct. + Owner MarkerPage +} + +// NextPageURL generates the URL for the page of results after this one. +func (current MarkerPageBase) NextPageURL() (string, error) { + currentURL := current.URL + + mark, err := current.Owner.LastMarker() + if err != nil { + return "", err + } + + q := currentURL.Query() + q.Set("marker", mark) + currentURL.RawQuery = q.Encode() + + return currentURL.String(), nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current MarkerPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current MarkerPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go new file mode 100644 index 00000000..42c0b2db --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -0,0 +1,251 @@ +package pagination + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "strings" + + "github.com/gophercloud/gophercloud" +) + +var ( + // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. + ErrPageNotAvailable = errors.New("The requested page does not exist.") +) + +// Page must be satisfied by the result type of any resource collection. +// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. +// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, +// instead. +// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type +// will need to implement. +type Page interface { + // NextPageURL generates the URL for the page of data that follows this collection. + // Return "" if no such page exists. + NextPageURL() (string, error) + + // IsEmpty returns true if this Page has no items in it. + IsEmpty() (bool, error) + + // GetBody returns the Page Body. This is used in the `AllPages` method. + GetBody() interface{} +} + +// Pager knows how to advance through a specific resource collection, one page at a time. +type Pager struct { + client *gophercloud.ServiceClient + + initialURL string + + createPage func(r PageResult) Page + + firstPage Page + + Err error + + // Headers supplies additional HTTP headers to populate on each paged request. + Headers map[string]string +} + +// NewPager constructs a manually-configured pager. +// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. +func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { + return Pager{ + client: client, + initialURL: initialURL, + createPage: createPage, + } +} + +// WithPageCreator returns a new Pager that substitutes a different page creation function. This is +// useful for overriding List functions in delegation. +func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { + return Pager{ + client: p.client, + initialURL: p.initialURL, + createPage: createPage, + } +} + +func (p Pager) fetchNextPage(url string) (Page, error) { + resp, err := Request(p.client, p.Headers, url) + if err != nil { + return nil, err + } + + remembered, err := PageResultFrom(resp) + if err != nil { + return nil, err + } + + return p.createPage(remembered), nil +} + +// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. +// Return "false" from the handler to prematurely stop iterating. +func (p Pager) EachPage(handler func(Page) (bool, error)) error { + if p.Err != nil { + return p.Err + } + currentURL := p.initialURL + for { + var currentPage Page + + // if first page has already been fetched, no need to fetch it again + if p.firstPage != nil { + currentPage = p.firstPage + p.firstPage = nil + } else { + var err error + currentPage, err = p.fetchNextPage(currentURL) + if err != nil { + return err + } + } + + empty, err := currentPage.IsEmpty() + if err != nil { + return err + } + if empty { + return nil + } + + ok, err := handler(currentPage) + if err != nil { + return err + } + if !ok { + return nil + } + + currentURL, err = currentPage.NextPageURL() + if err != nil { + return err + } + if currentURL == "" { + return nil + } + } +} + +// AllPages returns all the pages from a `List` operation in a single page, +// allowing the user to retrieve all the pages at once. +func (p Pager) AllPages() (Page, error) { + // pagesSlice holds all the pages until they get converted into as Page Body. + var pagesSlice []interface{} + // body will contain the final concatenated Page body. + var body reflect.Value + + // Grab a first page to ascertain the page body type. + firstPage, err := p.fetchNextPage(p.initialURL) + if err != nil { + return nil, err + } + // Store the page type so we can use reflection to create a new mega-page of + // that type. + pageType := reflect.TypeOf(firstPage) + + // if it's a single page, just return the firstPage (first page) + if _, found := pageType.FieldByName("SinglePageBase"); found { + return firstPage, nil + } + + // store the first page to avoid getting it twice + p.firstPage = firstPage + + // Switch on the page body type. Recognized types are `map[string]interface{}`, + // `[]byte`, and `[]interface{}`. + switch pb := firstPage.GetBody().(type) { + case map[string]interface{}: + // key is the map key for the page body if the body type is `map[string]interface{}`. + var key string + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().(map[string]interface{}) + for k, v := range b { + // If it's a linked page, we don't want the `links`, we want the other one. + if !strings.HasSuffix(k, "links") { + // check the field's type. we only want []interface{} (which is really []map[string]interface{}) + switch vt := v.(type) { + case []interface{}: + key = k + pagesSlice = append(pagesSlice, vt...) + } + } + } + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `map[string]interface{}` + body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) + body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) + case []byte: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]byte) + pagesSlice = append(pagesSlice, b) + // seperate pages with a comma + pagesSlice = append(pagesSlice, []byte{10}) + return true, nil + }) + if err != nil { + return nil, err + } + if len(pagesSlice) > 0 { + // Remove the trailing comma. + pagesSlice = pagesSlice[:len(pagesSlice)-1] + } + var b []byte + // Combine the slice of slices in to a single slice. + for _, slice := range pagesSlice { + b = append(b, slice.([]byte)...) + } + // Set body to value of type `bytes`. + body = reflect.New(reflect.TypeOf(b)).Elem() + body.SetBytes(b) + case []interface{}: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]interface{}) + pagesSlice = append(pagesSlice, b...) + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `[]interface{}` + body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) + for i, s := range pagesSlice { + body.Index(i).Set(reflect.ValueOf(s)) + } + default: + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}/[]byte/[]interface{}" + err.Actual = fmt.Sprintf("%T", pb) + return nil, err + } + + // Each `Extract*` function is expecting a specific type of page coming back, + // otherwise the type assertion in those functions will fail. pageType is needed + // to create a type in this method that has the same type that the `Extract*` + // function is expecting and set the Body of that object to the concatenated + // pages. + page := reflect.New(pageType) + // Set the page body to be the concatenated pages. + page.Elem().FieldByName("Body").Set(body) + // Set any additional headers that were pass along. The `objectstorage` pacakge, + // for example, passes a Content-Type header. + h := make(http.Header) + for k, v := range p.Headers { + h.Add(k, v) + } + page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) + // Type assert the page to a Page interface so that the type assertion in the + // `Extract*` methods will work. + return page.Elem().Interface().(Page), err +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go new file mode 100644 index 00000000..912daea3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go @@ -0,0 +1,4 @@ +/* +Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. +*/ +package pagination diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/vendor/github.com/gophercloud/gophercloud/pagination/single.go new file mode 100644 index 00000000..4251d649 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/single.go @@ -0,0 +1,33 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. +type SinglePageBase PageResult + +// NextPageURL always returns "" to indicate that there are no more pages to return. +func (current SinglePageBase) NextPageURL() (string, error) { + return "", nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current SinglePageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the single page's body. This method is needed to satisfy the +// Page interface. +func (current SinglePageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go new file mode 100644 index 00000000..b9986660 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -0,0 +1,491 @@ +package gophercloud + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +/* +BuildRequestBody builds a map[string]interface from the given `struct`. If +parent is not an empty string, the final map[string]interface returned will +encapsulate the built one. For example: + + disk := 1 + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: &disk, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + body, err := gophercloud.BuildRequestBody(createOpts, "flavor") + +The above example can be run as-is, however it is recommended to look at how +BuildRequestBody is used within Gophercloud to more fully understand how it +fits within the request process as a whole rather than use it directly as shown +above. +*/ +func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]interface{}) + if optsValue.Kind() == reflect.Struct { + //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + + if f.Name != strings.Title(f.Name) { + //fmt.Printf("Skipping field: %s...\n", f.Name) + continue + } + + //fmt.Printf("Starting on field: %s...\n", f.Name) + + zero := isZero(v) + //fmt.Printf("v is zero?: %v\n", zero) + + // if the field has a required tag that's set to "true" + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) + // if the field's value is zero, return a missing-argument error + if zero { + // if the field has a 'required' tag, it can't have a zero-value + err := ErrMissingInput{} + err.Argument = f.Name + return nil, err + } + } + + if xorTag := f.Tag.Get("xor"); xorTag != "" { + //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) + xorField := optsValue.FieldByName(xorTag) + var xorFieldIsZero bool + if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { + xorFieldIsZero = true + } else { + if xorField.Kind() == reflect.Ptr { + xorField = xorField.Elem() + } + xorFieldIsZero = isZero(xorField) + } + if !(zero != xorFieldIsZero) { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) + err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) + return nil, err + } + } + + if orTag := f.Tag.Get("or"); orTag != "" { + //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) + //fmt.Printf("field is zero?: %v\n", zero) + if zero { + orField := optsValue.FieldByName(orTag) + var orFieldIsZero bool + if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { + orFieldIsZero = true + } else { + if orField.Kind() == reflect.Ptr { + orField = orField.Elem() + } + orFieldIsZero = isZero(orField) + } + if orFieldIsZero { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) + err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) + return nil, err + } + } + } + + jsonTag := f.Tag.Get("json") + if jsonTag == "-" { + continue + } + + if v.Kind() == reflect.Slice || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice) { + sliceValue := v + if sliceValue.Kind() == reflect.Ptr { + sliceValue = sliceValue.Elem() + } + + for i := 0; i < sliceValue.Len(); i++ { + element := sliceValue.Index(i) + if element.Kind() == reflect.Struct || (element.Kind() == reflect.Ptr && element.Elem().Kind() == reflect.Struct) { + _, err := BuildRequestBody(element.Interface(), "") + if err != nil { + return nil, err + } + } + } + } + if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { + if zero { + //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) + if jsonTag != "" { + jsonTagPieces := strings.Split(jsonTag, ",") + if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { + if v.CanSet() { + if !v.IsNil() { + if v.Kind() == reflect.Ptr { + v.Set(reflect.Zero(v.Type())) + } + } + //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) + } + } + } + continue + } + + //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) + _, err := BuildRequestBody(v.Interface(), f.Name) + if err != nil { + return nil, err + } + } + } + + //fmt.Printf("opts: %+v \n", opts) + + b, err := json.Marshal(opts) + if err != nil { + return nil, err + } + + //fmt.Printf("string(b): %s\n", string(b)) + + err = json.Unmarshal(b, &optsMap) + if err != nil { + return nil, err + } + + //fmt.Printf("optsMap: %+v\n", optsMap) + + if parent != "" { + optsMap = map[string]interface{}{parent: optsMap} + } + //fmt.Printf("optsMap after parent added: %+v\n", optsMap) + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +// EnabledState is a convenience type, mostly used in Create and Update +// operations. Because the zero value of a bool is FALSE, we need to use a +// pointer instead to indicate zero-ness. +type EnabledState *bool + +// Convenience vars for EnabledState values. +var ( + iTrue = true + iFalse = false + + Enabled EnabledState = &iTrue + Disabled EnabledState = &iFalse +) + +// IPVersion is a type for the possible IP address versions. Valid instances +// are IPv4 and IPv6 +type IPVersion int + +const ( + // IPv4 is used for IP version 4 addresses + IPv4 IPVersion = 4 + // IPv6 is used for IP version 6 addresses + IPv6 IPVersion = 6 +) + +// IntToPointer is a function for converting integers into integer pointers. +// This is useful when passing in options to operations. +func IntToPointer(i int) *int { + return &i +} + +/* +MaybeString is an internal function to be used by request methods in individual +resource packages. + +It takes a string that might be a zero value and returns either a pointer to its +address or nil. This is useful for allowing users to conveniently omit values +from an options struct by leaving them zeroed, but still pass nil to the JSON +serializer so they'll be omitted from the request body. +*/ +func MaybeString(original string) *string { + if original != "" { + return &original + } + return nil +} + +/* +MaybeInt is an internal function to be used by request methods in individual +resource packages. + +Like MaybeString, it accepts an int that may or may not be a zero value, and +returns either a pointer to its address or nil. It's intended to hint that the +JSON serializer should omit its field. +*/ +func MaybeInt(original int) *int { + if original != 0 { + return &original + } + return nil +} + +/* +func isUnderlyingStructZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr: + return isUnderlyingStructZero(v.Elem()) + default: + return isZero(v) + } +} +*/ + +var t time.Time + +func isZero(v reflect.Value) bool { + //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + return true + } + return false + case reflect.Func, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + z := true + for i := 0; i < v.Len(); i++ { + z = z && isZero(v.Index(i)) + } + return z + case reflect.Struct: + if v.Type() == reflect.TypeOf(t) { + if v.Interface().(time.Time).IsZero() { + return true + } + return false + } + z := true + for i := 0; i < v.NumField(); i++ { + z = z && isZero(v.Field(i)) + } + return z + } + // Compare other types directly: + z := reflect.Zero(v.Type()) + //fmt.Printf("zero type for value: %+v\n\n\n", z) + return v.Interface() == z.Interface() +} + +/* +BuildQueryString is an internal function to be used by request methods in +individual resource packages. + +It accepts a tagged structure and expands it into a URL struct. Field names are +converted into query parameters based on a "q" tag. For example: + + type struct Something { + Bar string `q:"x_bar"` + Baz int `q:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into "?x_bar=AAA&lorem_ipsum=BBB". + +The struct's fields may be strings, integers, or boolean values. Fields left at +their type's zero value will be omitted from the query. +*/ +func BuildQueryString(opts interface{}) (*url.URL, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + params := url.Values{} + + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + qTag := f.Tag.Get("q") + + // if the field has a 'q' tag, it goes in the query string + if qTag != "" { + tags := strings.Split(qTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + loop: + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + goto loop + case reflect.String: + params.Add(tags[0], v.String()) + case reflect.Int: + params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) + case reflect.Bool: + params.Add(tags[0], strconv.FormatBool(v.Bool())) + case reflect.Slice: + switch v.Type().Elem() { + case reflect.TypeOf(0): + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) + } + default: + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], v.Index(i).String()) + } + } + case reflect.Map: + if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { + var s []string + for _, k := range v.MapKeys() { + value := v.MapIndex(k).String() + s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) + } + params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) + } + } + } else { + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + } + } + } + } + + return &url.URL{RawQuery: params.Encode()}, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +/* +BuildHeaders is an internal function to be used by request methods in +individual resource packages. + +It accepts an arbitrary tagged structure and produces a string map that's +suitable for use as the HTTP headers of an outgoing request. Field names are +mapped to header names based in "h" tags. + + type struct Something { + Bar string `h:"x_bar"` + Baz int `h:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into: + + map[string]string{ + "x_bar": "AAA", + "lorem_ipsum": "BBB", + } + +Untagged fields and fields left at their zero values are skipped. Integers, +booleans and string values are supported. +*/ +func BuildHeaders(opts interface{}) (map[string]string, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]string) + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + hTag := f.Tag.Get("h") + + // if the field has a 'h' tag, it goes in the header + if hTag != "" { + tags := strings.Split(hTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + switch v.Kind() { + case reflect.String: + optsMap[tags[0]] = v.String() + case reflect.Int: + optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) + case reflect.Bool: + optsMap[tags[0]] = strconv.FormatBool(v.Bool()) + } + } else { + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) + } + } + } + + } + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return optsMap, fmt.Errorf("Options type is not a struct.") +} + +// IDSliceToQueryString takes a slice of elements and converts them into a query +// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the +// result would be `?name=20&name=40&name=60' +func IDSliceToQueryString(name string, ids []int) string { + str := "" + for k, v := range ids { + if k == 0 { + str += "?" + } else { + str += "&" + } + str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) + } + return str +} + +// IntWithinRange returns TRUE if an integer falls within a defined range, and +// FALSE if not. +func IntWithinRange(val, min, max int) bool { + return val > min && val < max +} diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go new file mode 100644 index 00000000..fce00462 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -0,0 +1,501 @@ +package gophercloud + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "strings" + "sync" +) + +// DefaultUserAgent is the default User-Agent string set in the request header. +const DefaultUserAgent = "gophercloud/2.0.0" + +// UserAgent represents a User-Agent header. +type UserAgent struct { + // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. + // All the strings to prepend are accumulated and prepended in the Join method. + prepend []string +} + +// Prepend prepends a user-defined string to the default User-Agent string. Users +// may pass in one or more strings to prepend. +func (ua *UserAgent) Prepend(s ...string) { + ua.prepend = append(s, ua.prepend...) +} + +// Join concatenates all the user-defined User-Agend strings with the default +// Gophercloud User-Agent string. +func (ua *UserAgent) Join() string { + uaSlice := append(ua.prepend, DefaultUserAgent) + return strings.Join(uaSlice, " ") +} + +// ProviderClient stores details that are required to interact with any +// services within a specific provider's API. +// +// Generally, you acquire a ProviderClient by calling the NewClient method in +// the appropriate provider's child package, providing whatever authentication +// credentials are required. +type ProviderClient struct { + // IdentityBase is the base URL used for a particular provider's identity + // service - it will be used when issuing authenticatation requests. It + // should point to the root resource of the identity service, not a specific + // identity version. + IdentityBase string + + // IdentityEndpoint is the identity endpoint. This may be a specific version + // of the identity service. If this is the case, this endpoint is used rather + // than querying versions first. + IdentityEndpoint string + + // TokenID is the ID of the most recently issued valid token. + // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. + // To safely read or write this value, call `Token` or `SetToken`, respectively + TokenID string + + // EndpointLocator describes how this provider discovers the endpoints for + // its constituent services. + EndpointLocator EndpointLocator + + // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. + HTTPClient http.Client + + // UserAgent represents the User-Agent header in the HTTP request. + UserAgent UserAgent + + // ReauthFunc is the function used to re-authenticate the user if the request + // fails with a 401 HTTP response code. This a needed because there may be multiple + // authentication functions for different Identity service versions. + ReauthFunc func() error + + // Throwaway determines whether if this client is a throw-away client. It's a copy of user's provider client + // with the token and reauth func zeroed. Such client can be used to perform reauthorization. + Throwaway bool + + // Context is the context passed to the HTTP request. + Context context.Context + + // mut is a mutex for the client. It protects read and write access to client attributes such as getting + // and setting the TokenID. + mut *sync.RWMutex + + // reauthmut is a mutex for reauthentication it attempts to ensure that only one reauthentication + // attempt happens at one time. + reauthmut *reauthlock + + authResult AuthResult +} + +// reauthlock represents a set of attributes used to help in the reauthentication process. +type reauthlock struct { + sync.RWMutex + reauthing bool + reauthingErr error + done *sync.Cond +} + +// AuthenticatedHeaders returns a map of HTTP headers that are common for all +// authenticated service requests. Blocks if Reauthenticate is in progress. +func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { + if client.IsThrowaway() { + return + } + if client.reauthmut != nil { + client.reauthmut.Lock() + for client.reauthmut.reauthing { + client.reauthmut.done.Wait() + } + client.reauthmut.Unlock() + } + t := client.Token() + if t == "" { + return + } + return map[string]string{"X-Auth-Token": t} +} + +// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. +// If the application's ProviderClient is not used concurrently, this doesn't need to be called. +func (client *ProviderClient) UseTokenLock() { + client.mut = new(sync.RWMutex) + client.reauthmut = new(reauthlock) +} + +// GetAuthResult returns the result from the request that was used to obtain a +// provider client's Keystone token. +// +// The result is nil when authentication has not yet taken place, when the token +// was set manually with SetToken(), or when a ReauthFunc was used that does not +// record the AuthResult. +func (client *ProviderClient) GetAuthResult() AuthResult { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.authResult +} + +// Token safely reads the value of the auth token from the ProviderClient. Applications should +// call this method to access the token instead of the TokenID field +func (client *ProviderClient) Token() string { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.TokenID +} + +// SetToken safely sets the value of the auth token in the ProviderClient. Applications may +// use this method in a custom ReauthFunc. +// +// WARNING: This function is deprecated. Use SetTokenAndAuthResult() instead. +func (client *ProviderClient) SetToken(t string) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = t + client.authResult = nil +} + +// SetTokenAndAuthResult safely sets the value of the auth token in the +// ProviderClient and also records the AuthResult that was returned from the +// token creation request. Applications may call this in a custom ReauthFunc. +func (client *ProviderClient) SetTokenAndAuthResult(r AuthResult) error { + tokenID := "" + var err error + if r != nil { + tokenID, err = r.ExtractTokenID() + if err != nil { + return err + } + } + + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = tokenID + client.authResult = r + return nil +} + +// CopyTokenFrom safely copies the token from another ProviderClient into the +// this one. +func (client *ProviderClient) CopyTokenFrom(other *ProviderClient) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + if other.mut != nil && other.mut != client.mut { + other.mut.RLock() + defer other.mut.RUnlock() + } + client.TokenID = other.TokenID + client.authResult = other.authResult +} + +// IsThrowaway safely reads the value of the client Throwaway field. +func (client *ProviderClient) IsThrowaway() bool { + if client.reauthmut != nil { + client.reauthmut.RLock() + defer client.reauthmut.RUnlock() + } + return client.Throwaway +} + +// SetThrowaway safely sets the value of the client Throwaway field. +func (client *ProviderClient) SetThrowaway(v bool) { + if client.reauthmut != nil { + client.reauthmut.Lock() + defer client.reauthmut.Unlock() + } + client.Throwaway = v +} + +// Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is +// called because of a 401 response, the caller may pass the previous token. In +// this case, the reauthentication can be skipped if another thread has already +// reauthenticated in the meantime. If no previous token is known, an empty +// string should be passed instead to force unconditional reauthentication. +func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { + if client.ReauthFunc == nil { + return nil + } + + if client.reauthmut == nil { + return client.ReauthFunc() + } + + client.reauthmut.Lock() + if client.reauthmut.reauthing { + for !client.reauthmut.reauthing { + client.reauthmut.done.Wait() + } + err = client.reauthmut.reauthingErr + client.reauthmut.Unlock() + return err + } + client.reauthmut.Unlock() + + client.reauthmut.Lock() + client.reauthmut.reauthing = true + client.reauthmut.done = sync.NewCond(client.reauthmut) + client.reauthmut.reauthingErr = nil + client.reauthmut.Unlock() + + if previousToken == "" || client.TokenID == previousToken { + err = client.ReauthFunc() + } + + client.reauthmut.Lock() + client.reauthmut.reauthing = false + client.reauthmut.reauthingErr = err + client.reauthmut.done.Broadcast() + client.reauthmut.Unlock() + return +} + +// RequestOpts customizes the behavior of the provider.Request() method. +type RequestOpts struct { + // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The + // content type of the request will default to "application/json" unless overridden by MoreHeaders. + // It's an error to specify both a JSONBody and a RawBody. + JSONBody interface{} + // RawBody contains an io.Reader that will be consumed by the request directly. No content-type + // will be set unless one is provided explicitly by MoreHeaders. + RawBody io.Reader + // JSONResponse, if provided, will be populated with the contents of the response body parsed as + // JSON. + JSONResponse interface{} + // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If + // the response has a different code, an error will be returned. + OkCodes []int + // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is + // provided with a blank value (""), that header will be *omitted* instead: use this to suppress + // the default Accept header or an inferred Content-Type, for example. + MoreHeaders map[string]string + // ErrorContext specifies the resource error type to return if an error is encountered. + // This lets resources override default error messages based on the response status code. + ErrorContext error +} + +var applicationJSON = "application/json" + +// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication +// header will automatically be provided. +func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + var body io.Reader + var contentType *string + + // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided + // io.ReadSeeker as-is. Default the content-type to application/json. + if options.JSONBody != nil { + if options.RawBody != nil { + return nil, errors.New("please provide only one of JSONBody or RawBody to gophercloud.Request()") + } + + rendered, err := json.Marshal(options.JSONBody) + if err != nil { + return nil, err + } + + body = bytes.NewReader(rendered) + contentType = &applicationJSON + } + + if options.RawBody != nil { + body = options.RawBody + } + + // Construct the http.Request. + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + if client.Context != nil { + req = req.WithContext(client.Context) + } + + // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to + // modify or omit any header. + if contentType != nil { + req.Header.Set("Content-Type", *contentType) + } + req.Header.Set("Accept", applicationJSON) + + // Set the User-Agent header + req.Header.Set("User-Agent", client.UserAgent.Join()) + + if options.MoreHeaders != nil { + for k, v := range options.MoreHeaders { + if v != "" { + req.Header.Set(k, v) + } else { + req.Header.Del(k) + } + } + } + + // get latest token from client + for k, v := range client.AuthenticatedHeaders() { + req.Header.Set(k, v) + } + + // Set connection parameter to close the connection immediately when we've got the response + req.Close = true + + prereqtok := req.Header.Get("X-Auth-Token") + + // Issue the request. + resp, err := client.HTTPClient.Do(req) + if err != nil { + return nil, err + } + + // Allow default OkCodes if none explicitly set + okc := options.OkCodes + if okc == nil { + okc = defaultOkCodes(method) + } + + // Validate the HTTP response status. + var ok bool + for _, code := range okc { + if resp.StatusCode == code { + ok = true + break + } + } + + if !ok { + body, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + respErr := ErrUnexpectedResponseCode{ + URL: url, + Method: method, + Expected: options.OkCodes, + Actual: resp.StatusCode, + Body: body, + } + + errType := options.ErrorContext + switch resp.StatusCode { + case http.StatusBadRequest: + err = ErrDefault400{respErr} + if error400er, ok := errType.(Err400er); ok { + err = error400er.Error400(respErr) + } + case http.StatusUnauthorized: + if client.ReauthFunc != nil { + err = client.Reauthenticate(prereqtok) + if err != nil { + e := &ErrUnableToReauthenticate{} + e.ErrOriginal = respErr + return nil, e + } + if options.RawBody != nil { + if seeker, ok := options.RawBody.(io.Seeker); ok { + seeker.Seek(0, 0) + } + } + resp, err = client.Request(method, url, options) + if err != nil { + switch err.(type) { + case *ErrUnexpectedResponseCode: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err.(*ErrUnexpectedResponseCode) + return nil, e + default: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err + return nil, e + } + } + return resp, nil + } + err = ErrDefault401{respErr} + if error401er, ok := errType.(Err401er); ok { + err = error401er.Error401(respErr) + } + case http.StatusForbidden: + err = ErrDefault403{respErr} + if error403er, ok := errType.(Err403er); ok { + err = error403er.Error403(respErr) + } + case http.StatusNotFound: + err = ErrDefault404{respErr} + if error404er, ok := errType.(Err404er); ok { + err = error404er.Error404(respErr) + } + case http.StatusMethodNotAllowed: + err = ErrDefault405{respErr} + if error405er, ok := errType.(Err405er); ok { + err = error405er.Error405(respErr) + } + case http.StatusRequestTimeout: + err = ErrDefault408{respErr} + if error408er, ok := errType.(Err408er); ok { + err = error408er.Error408(respErr) + } + case http.StatusConflict: + err = ErrDefault409{respErr} + if error409er, ok := errType.(Err409er); ok { + err = error409er.Error409(respErr) + } + case 429: + err = ErrDefault429{respErr} + if error429er, ok := errType.(Err429er); ok { + err = error429er.Error429(respErr) + } + case http.StatusInternalServerError: + err = ErrDefault500{respErr} + if error500er, ok := errType.(Err500er); ok { + err = error500er.Error500(respErr) + } + case http.StatusServiceUnavailable: + err = ErrDefault503{respErr} + if error503er, ok := errType.(Err503er); ok { + err = error503er.Error503(respErr) + } + } + + if err == nil { + err = respErr + } + + return resp, err + } + + // Parse the response body as JSON, if requested to do so. + if options.JSONResponse != nil { + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { + return nil, err + } + } + + return resp, nil +} + +func defaultOkCodes(method string) []int { + switch { + case method == "GET": + return []int{200} + case method == "POST": + return []int{201, 202} + case method == "PUT": + return []int{201, 202} + case method == "PATCH": + return []int{200, 202, 204} + case method == "DELETE": + return []int{202, 204} + } + + return []int{} +} diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go new file mode 100644 index 00000000..94a16bff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -0,0 +1,448 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + "time" +) + +/* +Result is an internal type to be used by individual resource packages, but its +methods will be available on a wide variety of user-facing embedding types. + +It acts as a base struct that other Result types, returned from request +functions, can embed for convenience. All Results capture basic information +from the HTTP transaction that was performed, including the response body, +HTTP headers, and any errors that happened. + +Generally, each Result type will have an Extract method that can be used to +further interpret the result's payload in a specific context. Extensions or +providers can then provide additional extraction functions to pull out +provider- or extension-specific information as well. +*/ +type Result struct { + // Body is the payload of the HTTP response from the server. In most cases, + // this will be the deserialized JSON structure. + Body interface{} + + // Header contains the HTTP header structure from the original response. + Header http.Header + + // Err is an error that occurred during the operation. It's deferred until + // extraction to make it easier to chain the Extract call. + Err error +} + +// ExtractInto allows users to provide an object into which `Extract` will extract +// the `Result.Body`. This would be useful for OpenStack providers that have +// different fields in the response object than OpenStack proper. +func (r Result) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + if reader, ok := r.Body.(io.Reader); ok { + if readCloser, ok := reader.(io.Closer); ok { + defer readCloser.Close() + } + return json.NewDecoder(reader).Decode(to) + } + + b, err := json.Marshal(r.Body) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +func (r Result) extractIntoPtr(to interface{}, label string) error { + if label == "" { + return r.ExtractInto(&to) + } + + var m map[string]interface{} + err := r.ExtractInto(&m) + if err != nil { + return err + } + + b, err := json.Marshal(m[label]) + if err != nil { + return err + } + + toValue := reflect.ValueOf(to) + if toValue.Kind() == reflect.Ptr { + toValue = toValue.Elem() + } + + switch toValue.Kind() { + case reflect.Slice: + typeOfV := toValue.Type().Elem() + if typeOfV.Kind() == reflect.Struct { + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) + + if mSlice, ok := m[label].([]interface{}); ok { + for _, v := range mSlice { + // For each iteration of the slice, we create a new struct. + // This is to work around a bug where elements of a slice + // are reused and not overwritten when the same copy of the + // struct is used: + // + // https://github.com/golang/go/issues/21092 + // https://github.com/golang/go/issues/24155 + // https://play.golang.org/p/NHo3ywlPZli + newType := reflect.New(typeOfV).Elem() + + b, err := json.Marshal(v) + if err != nil { + return err + } + + // This is needed for structs with an UnmarshalJSON method. + // Technically this is just unmarshalling the response into + // a struct that is never used, but it's good enough to + // trigger the UnmarshalJSON method. + for i := 0; i < newType.NumField(); i++ { + s := newType.Field(i).Addr().Interface() + + // Unmarshal is used rather than NewDecoder to also work + // around the above-mentioned bug. + err = json.Unmarshal(b, s) + if err != nil { + return err + } + } + + newSlice = reflect.Append(newSlice, newType) + } + } + + // "to" should now be properly modeled to receive the + // JSON response body and unmarshal into all the correct + // fields of the struct or composed extension struct + // at the end of this method. + toValue.Set(newSlice) + } + } + case reflect.Struct: + typeOfV := toValue.Type() + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + for i := 0; i < toValue.NumField(); i++ { + toField := toValue.Field(i) + if toField.Kind() == reflect.Struct { + s := toField.Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + } + } + } + + err = json.Unmarshal(b, &to) + return err +} + +// ExtractIntoStructPtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying struct type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Struct: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to struct, got: %v", t) + } +} + +// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying slice type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Slice: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to slice, got: %v", t) + } +} + +// PrettyPrintJSON creates a string containing the full response body as +// pretty-printed JSON. It's useful for capturing test fixtures and for +// debugging extraction bugs. If you include its output in an issue related to +// a buggy extraction function, we will all love you forever. +func (r Result) PrettyPrintJSON() string { + pretty, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + panic(err.Error()) + } + return string(pretty) +} + +// ErrResult is an internal type to be used by individual resource packages, but +// its methods will be available on a wide variety of user-facing embedding +// types. +// +// It represents results that only contain a potential error and +// nothing else. Usually, if the operation executed successfully, the Err field +// will be nil; otherwise it will be stocked with a relevant error. Use the +// ExtractErr method +// to cleanly pull it out. +type ErrResult struct { + Result +} + +// ExtractErr is a function that extracts error information, or nil, from a result. +func (r ErrResult) ExtractErr() error { + return r.Err +} + +/* +HeaderResult is an internal type to be used by individual resource packages, but +its methods will be available on a wide variety of user-facing embedding types. + +It represents a result that only contains an error (possibly nil) and an +http.Header. This is used, for example, by the objectstorage packages in +openstack, because most of the operations don't return response bodies, but do +have relevant information in headers. +*/ +type HeaderResult struct { + Result +} + +// ExtractInto allows users to provide an object into which `Extract` will +// extract the http.Header headers of the result. +func (r HeaderResult) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + tmpHeaderMap := map[string]string{} + for k, v := range r.Header { + if len(v) > 0 { + tmpHeaderMap[k] = v[0] + } + } + + b, err := json.Marshal(tmpHeaderMap) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +// RFC3339Milli describes a common time format used by some API responses. +const RFC3339Milli = "2006-01-02T15:04:05.999999Z" + +type JSONRFC3339Milli time.Time + +func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { + b := bytes.NewBuffer(data) + dec := json.NewDecoder(b) + var s string + if err := dec.Decode(&s); err != nil { + return err + } + t, err := time.Parse(RFC3339Milli, s) + if err != nil { + return err + } + *jt = JSONRFC3339Milli(t) + return nil +} + +const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" + +type JSONRFC3339MilliNoZ time.Time + +func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339MilliNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339MilliNoZ(t) + return nil +} + +type JSONRFC1123 time.Time + +func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + return err + } + *jt = JSONRFC1123(t) + return nil +} + +type JSONUnix time.Time + +func (jt *JSONUnix) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + unix, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + t = time.Unix(unix, 0) + *jt = JSONUnix(t) + return nil +} + +// RFC3339NoZ is the time format used in Heat (Orchestration). +const RFC3339NoZ = "2006-01-02T15:04:05" + +type JSONRFC3339NoZ time.Time + +func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339NoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339NoZ(t) + return nil +} + +// RFC3339ZNoT is the time format used in Zun (Containers Service). +const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" + +type JSONRFC3339ZNoT time.Time + +func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoT, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoT(t) + return nil +} + +// RFC3339ZNoTNoZ is another time format used in Zun (Containers Service). +const RFC3339ZNoTNoZ = "2006-01-02 15:04:05" + +type JSONRFC3339ZNoTNoZ time.Time + +func (jt *JSONRFC3339ZNoTNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoTNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoTNoZ(t) + return nil +} + +/* +Link is an internal type to be used in packages of collection resources that are +paginated in a certain way. + +It's a response substructure common to many paginated collection results that is +used to point to related pages. Usually, the one we care about is the one with +Rel field set to "next". +*/ +type Link struct { + Href string `json:"href"` + Rel string `json:"rel"` +} + +/* +ExtractNextURL is an internal function useful for packages of collection +resources that are paginated in a certain way. + +It attempts to extract the "next" URL from slice of Link structs, or +"" if no such URL is present. +*/ +func ExtractNextURL(links []Link) (string, error) { + var url string + + for _, l := range links { + if l.Rel == "next" { + url = l.Href + } + } + + if url == "" { + return "", nil + } + + return url, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go new file mode 100644 index 00000000..f222f05a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -0,0 +1,154 @@ +package gophercloud + +import ( + "io" + "net/http" + "strings" +) + +// ServiceClient stores details required to interact with a specific service API implemented by a provider. +// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. +type ServiceClient struct { + // ProviderClient is a reference to the provider that implements this service. + *ProviderClient + + // Endpoint is the base URL of the service's API, acquired from a service catalog. + // It MUST end with a /. + Endpoint string + + // ResourceBase is the base URL shared by the resources within a service's API. It should include + // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used + // as-is, instead. + ResourceBase string + + // This is the service client type (e.g. compute, sharev2). + // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. + // It is only exported because it gets set in a different package. + Type string + + // The microversion of the service to use. Set this to use a particular microversion. + Microversion string + + // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, + // values set in this field will be set on all the HTTP requests the service client sends. + MoreHeaders map[string]string +} + +// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. +func (client *ServiceClient) ResourceBaseURL() string { + if client.ResourceBase != "" { + return client.ResourceBase + } + return client.Endpoint +} + +// ServiceURL constructs a URL for a resource belonging to this provider. +func (client *ServiceClient) ServiceURL(parts ...string) string { + return client.ResourceBaseURL() + strings.Join(parts, "/") +} + +func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { + if v, ok := (JSONBody).(io.Reader); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + if opts.MoreHeaders == nil { + opts.MoreHeaders = make(map[string]string) + } + + if client.Microversion != "" { + client.setMicroversionHeader(opts) + } +} + +// Get calls `Request` with the "GET" HTTP verb. +func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, JSONResponse, opts) + return client.Request("GET", url, opts) +} + +// Post calls `Request` with the "POST" HTTP verb. +func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("POST", url, opts) +} + +// Put calls `Request` with the "PUT" HTTP verb. +func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PUT", url, opts) +} + +// Patch calls `Request` with the "PATCH" HTTP verb. +func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PATCH", url, opts) +} + +// Delete calls `Request` with the "DELETE" HTTP verb. +func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("DELETE", url, opts) +} + +// Head calls `Request` with the "HEAD" HTTP verb. +func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("HEAD", url, opts) +} + +func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { + switch client.Type { + case "compute": + opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion + case "sharev2": + opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion + case "volume": + opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion + case "baremetal": + opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion + case "baremetal-introspection": + opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion + } + + if client.Type != "" { + opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion + } +} + +// Request carries out the HTTP operation for the service client +func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + if len(client.MoreHeaders) > 0 { + if options == nil { + options = new(RequestOpts) + } + for k, v := range client.MoreHeaders { + options.MoreHeaders[k] = v + } + } + return client.ProviderClient.Request(method, url, options) +} diff --git a/vendor/github.com/gophercloud/gophercloud/util.go b/vendor/github.com/gophercloud/gophercloud/util.go new file mode 100644 index 00000000..68f9a5d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/util.go @@ -0,0 +1,102 @@ +package gophercloud + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" + "time" +) + +// WaitFor polls a predicate function, once per second, up to a timeout limit. +// This is useful to wait for a resource to transition to a certain state. +// To handle situations when the predicate might hang indefinitely, the +// predicate will be prematurely cancelled after the timeout. +// Resource packages will wrap this in a more convenient function that's +// specific to a certain resource, but it can also be useful on its own. +func WaitFor(timeout int, predicate func() (bool, error)) error { + type WaitForResult struct { + Success bool + Error error + } + + start := time.Now().Unix() + + for { + // If a timeout is set, and that's been exceeded, shut it down. + if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { + return fmt.Errorf("A timeout occurred") + } + + time.Sleep(1 * time.Second) + + var result WaitForResult + ch := make(chan bool, 1) + go func() { + defer close(ch) + satisfied, err := predicate() + result.Success = satisfied + result.Error = err + }() + + select { + case <-ch: + if result.Error != nil { + return result.Error + } + if result.Success { + return nil + } + // If the predicate has not finished by the timeout, cancel it. + case <-time.After(time.Duration(timeout) * time.Second): + return fmt.Errorf("A timeout occurred") + } + } +} + +// NormalizeURL is an internal function to be used by provider clients. +// +// It ensures that each endpoint URL has a closing `/`, as expected by +// ServiceClient's methods. +func NormalizeURL(url string) string { + if !strings.HasSuffix(url, "/") { + return url + "/" + } + return url +} + +// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as +// a reference in the filesystem, if necessary. basePath is assumed to contain +// either '.' when first used, or the file:// type fqdn of the parent resource. +// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml +func NormalizePathURL(basePath, rawPath string) (string, error) { + u, err := url.Parse(rawPath) + if err != nil { + return "", err + } + // if a scheme is defined, it must be a fqdn already + if u.Scheme != "" { + return u.String(), nil + } + // if basePath is a url, then child resources are assumed to be relative to it + bu, err := url.Parse(basePath) + if err != nil { + return "", err + } + var basePathSys, absPathSys string + if bu.Scheme != "" { + basePathSys = filepath.FromSlash(bu.Path) + absPathSys = filepath.Join(basePathSys, rawPath) + bu.Path = filepath.ToSlash(absPathSys) + return bu.String(), nil + } + + absPathSys = filepath.Join(basePath, rawPath) + u.Path = filepath.ToSlash(absPathSys) + if err != nil { + return "", err + } + u.Scheme = "file" + return u.String(), nil + +} diff --git a/vendor/github.com/gophercloud/utils/LICENSE b/vendor/github.com/gophercloud/utils/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/gophercloud/utils/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/doc.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/doc.go new file mode 100644 index 00000000..1f3be212 --- /dev/null +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/doc.go @@ -0,0 +1,49 @@ +/* +Package clientconfig provides convienent functions for creating OpenStack +clients. It is based on the Python os-client-config library. + +See https://docs.openstack.org/os-client-config/latest for details. + +Example to Create a Provider Client From clouds.yaml + + opts := &clientconfig.ClientOpts{ + Name: "hawaii", + } + + pClient, err := clientconfig.AuthenticatedClient(opts) + if err != nil { + panic(err) + } + + +Example to Manually Create a Provider Client + + opts := &clientconfig.ClientOpts{ + AuthInfo: &clientconfig.AuthInfo{ + AuthURL: "https://hi.example.com:5000/v3", + Username: "jdoe", + Password: "password", + ProjectName: "Some Project", + DomainName: "default", + }, + } + + pClient, err := clientconfig.AuthenticatedClient(opts) + if err != nil { + panic(err) + } + + +Example to Create a Service Client from clouds.yaml + + opts := &clientconfig.ClientOpts{ + Name: "hawaii", + } + + computeClient, err := clientconfig.NewServiceClient("compute", opts) + if err != nil { + panic(err) + } + +*/ +package clientconfig diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go new file mode 100644 index 00000000..ad1c33d9 --- /dev/null +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/requests.go @@ -0,0 +1,804 @@ +package clientconfig + +import ( + "fmt" + "os" + "reflect" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + + yaml "gopkg.in/yaml.v2" +) + +// AuthType respresents a valid method of authentication. +type AuthType string + +const ( + // AuthPassword defines an unknown version of the password + AuthPassword AuthType = "password" + // AuthToken defined an unknown version of the token + AuthToken AuthType = "token" + + // AuthV2Password defines version 2 of the password + AuthV2Password AuthType = "v2password" + // AuthV2Token defines version 2 of the token + AuthV2Token AuthType = "v2token" + + // AuthV3Password defines version 3 of the password + AuthV3Password AuthType = "v3password" + // AuthV3Token defines version 3 of the token + AuthV3Token AuthType = "v3token" + + // AuthV3ApplicationCredential defines version 3 of the application credential + AuthV3ApplicationCredential AuthType = "v3applicationcredential" +) + +// ClientOpts represents options to customize the way a client is +// configured. +type ClientOpts struct { + // Cloud is the cloud entry in clouds.yaml to use. + Cloud string + + // EnvPrefix allows a custom environment variable prefix to be used. + EnvPrefix string + + // AuthType specifies the type of authentication to use. + // By default, this is "password". + AuthType AuthType + + // AuthInfo defines the authentication information needed to + // authenticate to a cloud when clouds.yaml isn't used. + AuthInfo *AuthInfo + + // RegionName is the region to create a Service Client in. + // This will override a region in clouds.yaml or can be used + // when authenticating directly with AuthInfo. + RegionName string +} + +// LoadCloudsYAML will load a clouds.yaml file and return the full config. +func LoadCloudsYAML() (map[string]Cloud, error) { + content, err := findAndReadCloudsYAML() + if err != nil { + return nil, err + } + + var clouds Clouds + err = yaml.Unmarshal(content, &clouds) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal yaml: %v", err) + } + + return clouds.Clouds, nil +} + +// LoadSecureCloudsYAML will load a secure.yaml file and return the full config. +func LoadSecureCloudsYAML() (map[string]Cloud, error) { + var secureClouds Clouds + + content, err := findAndReadSecureCloudsYAML() + if err != nil { + if err.Error() == "no secure.yaml file found" { + // secure.yaml is optional so just ignore read error + return secureClouds.Clouds, nil + } + return nil, err + } + + err = yaml.Unmarshal(content, &secureClouds) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal yaml: %v", err) + } + + return secureClouds.Clouds, nil +} + +// LoadPublicCloudsYAML will load a public-clouds.yaml file and return the full config. +func LoadPublicCloudsYAML() (map[string]Cloud, error) { + var publicClouds PublicClouds + + content, err := findAndReadPublicCloudsYAML() + if err != nil { + if err.Error() == "no clouds-public.yaml file found" { + // clouds-public.yaml is optional so just ignore read error + return publicClouds.Clouds, nil + } + + return nil, err + } + + err = yaml.Unmarshal(content, &publicClouds) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal yaml: %v", err) + } + + return publicClouds.Clouds, nil +} + +// GetCloudFromYAML will return a cloud entry from a clouds.yaml file. +func GetCloudFromYAML(opts *ClientOpts) (*Cloud, error) { + clouds, err := LoadCloudsYAML() + if err != nil { + return nil, fmt.Errorf("unable to load clouds.yaml: %s", err) + } + + // Determine which cloud to use. + // First see if a cloud name was explicitly set in opts. + var cloudName string + if opts != nil && opts.Cloud != "" { + cloudName = opts.Cloud + } + + // Next see if a cloud name was specified as an environment variable. + // This is supposed to override an explicit opts setting. + envPrefix := "OS_" + if opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "CLOUD"); v != "" { + cloudName = v + } + + var cloud *Cloud + if cloudName != "" { + v, ok := clouds[cloudName] + if !ok { + return nil, fmt.Errorf("cloud %s does not exist in clouds.yaml", cloudName) + } + cloud = &v + } + + // If a cloud was not specified, and clouds only contains + // a single entry, use that entry. + if cloudName == "" && len(clouds) == 1 { + for _, v := range clouds { + cloud = &v + } + } + + var cloudIsInCloudsYaml bool + if cloud == nil { + // not an immediate error as it might still be defined in secure.yaml + cloudIsInCloudsYaml = false + } else { + cloudIsInCloudsYaml = true + } + + publicClouds, err := LoadPublicCloudsYAML() + if err != nil { + return nil, fmt.Errorf("unable to load clouds-public.yaml: %s", err) + } + + var profileName = defaultIfEmpty(cloud.Profile, cloud.Cloud) + if profileName != "" { + publicCloud, ok := publicClouds[profileName] + if !ok { + return nil, fmt.Errorf("cloud %s does not exist in clouds-public.yaml", profileName) + } + cloud, err = mergeClouds(cloud, publicCloud) + if err != nil { + return nil, fmt.Errorf("Could not merge information from clouds.yaml and clouds-public.yaml for cloud %s", profileName) + } + } + + secureClouds, err := LoadSecureCloudsYAML() + if err != nil { + return nil, fmt.Errorf("unable to load secure.yaml: %s", err) + } + + if secureClouds != nil { + // If no entry was found in clouds.yaml, no cloud name was specified, + // and only one secureCloud entry exists, use that as the cloud entry. + if !cloudIsInCloudsYaml && cloudName == "" && len(secureClouds) == 1 { + for _, v := range secureClouds { + cloud = &v + } + } + + secureCloud, ok := secureClouds[cloudName] + if !ok && cloud == nil { + // cloud == nil serves two purposes here: + // if no entry in clouds.yaml was found and + // if a single-entry secureCloud wasn't used. + // At this point, no entry could be determined at all. + return nil, fmt.Errorf("Could not find cloud %s", cloudName) + } + + // If secureCloud has content and it differs from the cloud entry, + // merge the two together. + if !reflect.DeepEqual((Cloud{}), secureCloud) && !reflect.DeepEqual(cloud, secureCloud) { + cloud, err = mergeClouds(secureCloud, cloud) + if err != nil { + return nil, fmt.Errorf("unable to merge information from clouds.yaml and secure.yaml") + } + } + } + + // Default is to verify SSL API requests + if cloud.Verify == nil { + iTrue := true + cloud.Verify = &iTrue + } + + // TODO: this is where reading vendor files should go be considered when not found in + // clouds-public.yml + // https://github.com/openstack/openstacksdk/tree/master/openstack/config/vendors + + return cloud, nil +} + +// AuthOptions creates a gophercloud.AuthOptions structure with the +// settings found in a specific cloud entry of a clouds.yaml file or +// based on authentication settings given in ClientOpts. +// +// This attempts to be a single point of entry for all OpenStack authentication. +// +// See http://docs.openstack.org/developer/os-client-config and +// https://github.com/openstack/os-client-config/blob/master/os_client_config/config.py. +func AuthOptions(opts *ClientOpts) (*gophercloud.AuthOptions, error) { + cloud := new(Cloud) + + // If no opts were passed in, create an empty ClientOpts. + if opts == nil { + opts = new(ClientOpts) + } + + // Determine if a clouds.yaml entry should be retrieved. + // Start by figuring out the cloud name. + // First check if one was explicitly specified in opts. + var cloudName string + if opts.Cloud != "" { + cloudName = opts.Cloud + } + + // Next see if a cloud name was specified as an environment variable. + envPrefix := "OS_" + if opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "CLOUD"); v != "" { + cloudName = v + } + + // If a cloud name was determined, try to look it up in clouds.yaml. + if cloudName != "" { + // Get the requested cloud. + var err error + cloud, err = GetCloudFromYAML(opts) + if err != nil { + return nil, err + } + } + + // If cloud.AuthInfo is nil, then no cloud was specified. + if cloud.AuthInfo == nil { + // If opts.Auth is not nil, then try using the auth settings from it. + if opts.AuthInfo != nil { + cloud.AuthInfo = opts.AuthInfo + } + + // If cloud.AuthInfo is still nil, then set it to an empty Auth struct + // and rely on environment variables to do the authentication. + if cloud.AuthInfo == nil { + cloud.AuthInfo = new(AuthInfo) + } + } + + identityAPI := determineIdentityAPI(cloud, opts) + switch identityAPI { + case "2.0", "2": + return v2auth(cloud, opts) + case "3": + return v3auth(cloud, opts) + } + + return nil, fmt.Errorf("Unable to build AuthOptions") +} + +func determineIdentityAPI(cloud *Cloud, opts *ClientOpts) string { + var identityAPI string + if cloud.IdentityAPIVersion != "" { + identityAPI = cloud.IdentityAPIVersion + } + + envPrefix := "OS_" + if opts != nil && opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "IDENTITY_API_VERSION"); v != "" { + identityAPI = v + } + + if identityAPI == "" { + if cloud.AuthInfo != nil { + if strings.Contains(cloud.AuthInfo.AuthURL, "v2.0") { + identityAPI = "2.0" + } + + if strings.Contains(cloud.AuthInfo.AuthURL, "v3") { + identityAPI = "3" + } + } + } + + if identityAPI == "" { + switch cloud.AuthType { + case AuthV2Password: + identityAPI = "2.0" + case AuthV2Token: + identityAPI = "2.0" + case AuthV3Password: + identityAPI = "3" + case AuthV3Token: + identityAPI = "3" + case AuthV3ApplicationCredential: + identityAPI = "3" + } + } + + // If an Identity API version could not be determined, + // default to v3. + if identityAPI == "" { + identityAPI = "3" + } + + return identityAPI +} + +// v2auth creates a v2-compatible gophercloud.AuthOptions struct. +func v2auth(cloud *Cloud, opts *ClientOpts) (*gophercloud.AuthOptions, error) { + // Environment variable overrides. + envPrefix := "OS_" + if opts != nil && opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if cloud.AuthInfo.AuthURL == "" { + if v := os.Getenv(envPrefix + "AUTH_URL"); v != "" { + cloud.AuthInfo.AuthURL = v + } + } + + if cloud.AuthInfo.Token == "" { + if v := os.Getenv(envPrefix + "TOKEN"); v != "" { + cloud.AuthInfo.Token = v + } + + if v := os.Getenv(envPrefix + "AUTH_TOKEN"); v != "" { + cloud.AuthInfo.Token = v + } + } + + if cloud.AuthInfo.Username == "" { + if v := os.Getenv(envPrefix + "USERNAME"); v != "" { + cloud.AuthInfo.Username = v + } + } + + if cloud.AuthInfo.Password == "" { + if v := os.Getenv(envPrefix + "PASSWORD"); v != "" { + cloud.AuthInfo.Password = v + } + } + + if cloud.AuthInfo.ProjectID == "" { + if v := os.Getenv(envPrefix + "TENANT_ID"); v != "" { + cloud.AuthInfo.ProjectID = v + } + + if v := os.Getenv(envPrefix + "PROJECT_ID"); v != "" { + cloud.AuthInfo.ProjectID = v + } + } + + if cloud.AuthInfo.ProjectName == "" { + if v := os.Getenv(envPrefix + "TENANT_NAME"); v != "" { + cloud.AuthInfo.ProjectName = v + } + + if v := os.Getenv(envPrefix + "PROJECT_NAME"); v != "" { + cloud.AuthInfo.ProjectName = v + } + } + + ao := &gophercloud.AuthOptions{ + IdentityEndpoint: cloud.AuthInfo.AuthURL, + TokenID: cloud.AuthInfo.Token, + Username: cloud.AuthInfo.Username, + Password: cloud.AuthInfo.Password, + TenantID: cloud.AuthInfo.ProjectID, + TenantName: cloud.AuthInfo.ProjectName, + } + + return ao, nil +} + +// v3auth creates a v3-compatible gophercloud.AuthOptions struct. +func v3auth(cloud *Cloud, opts *ClientOpts) (*gophercloud.AuthOptions, error) { + // Environment variable overrides. + envPrefix := "OS_" + if opts != nil && opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if cloud.AuthInfo.AuthURL == "" { + if v := os.Getenv(envPrefix + "AUTH_URL"); v != "" { + cloud.AuthInfo.AuthURL = v + } + } + + if cloud.AuthInfo.Token == "" { + if v := os.Getenv(envPrefix + "TOKEN"); v != "" { + cloud.AuthInfo.Token = v + } + + if v := os.Getenv(envPrefix + "AUTH_TOKEN"); v != "" { + cloud.AuthInfo.Token = v + } + } + + if cloud.AuthInfo.Username == "" { + if v := os.Getenv(envPrefix + "USERNAME"); v != "" { + cloud.AuthInfo.Username = v + } + } + + if cloud.AuthInfo.UserID == "" { + if v := os.Getenv(envPrefix + "USER_ID"); v != "" { + cloud.AuthInfo.UserID = v + } + } + + if cloud.AuthInfo.Password == "" { + if v := os.Getenv(envPrefix + "PASSWORD"); v != "" { + cloud.AuthInfo.Password = v + } + } + + if cloud.AuthInfo.ProjectID == "" { + if v := os.Getenv(envPrefix + "TENANT_ID"); v != "" { + cloud.AuthInfo.ProjectID = v + } + + if v := os.Getenv(envPrefix + "PROJECT_ID"); v != "" { + cloud.AuthInfo.ProjectID = v + } + } + + if cloud.AuthInfo.ProjectName == "" { + if v := os.Getenv(envPrefix + "TENANT_NAME"); v != "" { + cloud.AuthInfo.ProjectName = v + } + + if v := os.Getenv(envPrefix + "PROJECT_NAME"); v != "" { + cloud.AuthInfo.ProjectName = v + } + } + + if cloud.AuthInfo.DomainID == "" { + if v := os.Getenv(envPrefix + "DOMAIN_ID"); v != "" { + cloud.AuthInfo.DomainID = v + } + } + + if cloud.AuthInfo.DomainName == "" { + if v := os.Getenv(envPrefix + "DOMAIN_NAME"); v != "" { + cloud.AuthInfo.DomainName = v + } + } + + if cloud.AuthInfo.DefaultDomain == "" { + if v := os.Getenv(envPrefix + "DEFAULT_DOMAIN"); v != "" { + cloud.AuthInfo.DefaultDomain = v + } + } + + if cloud.AuthInfo.ProjectDomainID == "" { + if v := os.Getenv(envPrefix + "PROJECT_DOMAIN_ID"); v != "" { + cloud.AuthInfo.ProjectDomainID = v + } + } + + if cloud.AuthInfo.ProjectDomainName == "" { + if v := os.Getenv(envPrefix + "PROJECT_DOMAIN_NAME"); v != "" { + cloud.AuthInfo.ProjectDomainName = v + } + } + + if cloud.AuthInfo.UserDomainID == "" { + if v := os.Getenv(envPrefix + "USER_DOMAIN_ID"); v != "" { + cloud.AuthInfo.UserDomainID = v + } + } + + if cloud.AuthInfo.UserDomainName == "" { + if v := os.Getenv(envPrefix + "USER_DOMAIN_NAME"); v != "" { + cloud.AuthInfo.UserDomainName = v + } + } + + if cloud.AuthInfo.ApplicationCredentialID == "" { + if v := os.Getenv(envPrefix + "APPLICATION_CREDENTIAL_ID"); v != "" { + cloud.AuthInfo.ApplicationCredentialID = v + } + } + + if cloud.AuthInfo.ApplicationCredentialName == "" { + if v := os.Getenv(envPrefix + "APPLICATION_CREDENTIAL_NAME"); v != "" { + cloud.AuthInfo.ApplicationCredentialName = v + } + } + + if cloud.AuthInfo.ApplicationCredentialSecret == "" { + if v := os.Getenv(envPrefix + "APPLICATION_CREDENTIAL_SECRET"); v != "" { + cloud.AuthInfo.ApplicationCredentialSecret = v + } + } + + // Build a scope and try to do it correctly. + // https://github.com/openstack/os-client-config/blob/master/os_client_config/config.py#L595 + scope := new(gophercloud.AuthScope) + + // Application credentials don't support scope + if isApplicationCredential(cloud.AuthInfo) { + // If Domain* is set, but UserDomain* or ProjectDomain* aren't, + // then use Domain* as the default setting. + cloud = setDomainIfNeeded(cloud) + } else { + if !isProjectScoped(cloud.AuthInfo) { + if cloud.AuthInfo.DomainID != "" { + scope.DomainID = cloud.AuthInfo.DomainID + } else if cloud.AuthInfo.DomainName != "" { + scope.DomainName = cloud.AuthInfo.DomainName + } + } else { + // If Domain* is set, but UserDomain* or ProjectDomain* aren't, + // then use Domain* as the default setting. + cloud = setDomainIfNeeded(cloud) + + if cloud.AuthInfo.ProjectID != "" { + scope.ProjectID = cloud.AuthInfo.ProjectID + } else { + scope.ProjectName = cloud.AuthInfo.ProjectName + scope.DomainID = cloud.AuthInfo.ProjectDomainID + scope.DomainName = cloud.AuthInfo.ProjectDomainName + } + } + } + + ao := &gophercloud.AuthOptions{ + Scope: scope, + IdentityEndpoint: cloud.AuthInfo.AuthURL, + TokenID: cloud.AuthInfo.Token, + Username: cloud.AuthInfo.Username, + UserID: cloud.AuthInfo.UserID, + Password: cloud.AuthInfo.Password, + TenantID: cloud.AuthInfo.ProjectID, + TenantName: cloud.AuthInfo.ProjectName, + DomainID: cloud.AuthInfo.UserDomainID, + DomainName: cloud.AuthInfo.UserDomainName, + ApplicationCredentialID: cloud.AuthInfo.ApplicationCredentialID, + ApplicationCredentialName: cloud.AuthInfo.ApplicationCredentialName, + ApplicationCredentialSecret: cloud.AuthInfo.ApplicationCredentialSecret, + } + + // If an auth_type of "token" was specified, then make sure + // Gophercloud properly authenticates with a token. This involves + // unsetting a few other auth options. The reason this is done + // here is to wait until all auth settings (both in clouds.yaml + // and via environment variables) are set and then unset them. + if strings.Contains(string(cloud.AuthType), "token") || ao.TokenID != "" { + ao.Username = "" + ao.Password = "" + ao.UserID = "" + ao.DomainID = "" + ao.DomainName = "" + } + + // Check for absolute minimum requirements. + if ao.IdentityEndpoint == "" { + err := gophercloud.ErrMissingInput{Argument: "auth_url"} + return nil, err + } + + return ao, nil +} + +// AuthenticatedClient is a convenience function to get a new provider client +// based on a clouds.yaml entry. +func AuthenticatedClient(opts *ClientOpts) (*gophercloud.ProviderClient, error) { + ao, err := AuthOptions(opts) + if err != nil { + return nil, err + } + + return openstack.AuthenticatedClient(*ao) +} + +// NewServiceClient is a convenience function to get a new service client. +func NewServiceClient(service string, opts *ClientOpts) (*gophercloud.ServiceClient, error) { + cloud := new(Cloud) + + // If no opts were passed in, create an empty ClientOpts. + if opts == nil { + opts = new(ClientOpts) + } + + // Determine if a clouds.yaml entry should be retrieved. + // Start by figuring out the cloud name. + // First check if one was explicitly specified in opts. + var cloudName string + if opts.Cloud != "" { + cloudName = opts.Cloud + } + + // Next see if a cloud name was specified as an environment variable. + envPrefix := "OS_" + if opts.EnvPrefix != "" { + envPrefix = opts.EnvPrefix + } + + if v := os.Getenv(envPrefix + "CLOUD"); v != "" { + cloudName = v + } + + // If a cloud name was determined, try to look it up in clouds.yaml. + if cloudName != "" { + // Get the requested cloud. + var err error + cloud, err = GetCloudFromYAML(opts) + if err != nil { + return nil, err + } + } + + // Get a Provider Client + pClient, err := AuthenticatedClient(opts) + if err != nil { + return nil, err + } + + // Determine the region to use. + // First, check if the REGION_NAME environment variable is set. + var region string + if v := os.Getenv(envPrefix + "REGION_NAME"); v != "" { + region = v + } + + // Next, check if the cloud entry sets a region. + if v := cloud.RegionName; v != "" { + region = v + } + + // Finally, see if one was specified in the ClientOpts. + // If so, this takes precedence. + if v := opts.RegionName; v != "" { + region = v + } + + eo := gophercloud.EndpointOpts{ + Region: region, + } + + switch service { + case "clustering": + return openstack.NewClusteringV1(pClient, eo) + case "compute": + return openstack.NewComputeV2(pClient, eo) + case "container": + return openstack.NewContainerV1(pClient, eo) + case "database": + return openstack.NewDBV1(pClient, eo) + case "dns": + return openstack.NewDNSV2(pClient, eo) + case "identity": + identityVersion := "3" + if v := cloud.IdentityAPIVersion; v != "" { + identityVersion = v + } + + switch identityVersion { + case "v2", "2", "2.0": + return openstack.NewIdentityV2(pClient, eo) + case "v3", "3": + return openstack.NewIdentityV3(pClient, eo) + default: + return nil, fmt.Errorf("invalid identity API version") + } + case "image": + return openstack.NewImageServiceV2(pClient, eo) + case "load-balancer": + return openstack.NewLoadBalancerV2(pClient, eo) + case "network": + return openstack.NewNetworkV2(pClient, eo) + case "object-store": + return openstack.NewObjectStorageV1(pClient, eo) + case "orchestration": + return openstack.NewOrchestrationV1(pClient, eo) + case "sharev2": + return openstack.NewSharedFileSystemV2(pClient, eo) + case "volume": + volumeVersion := "2" + if v := cloud.VolumeAPIVersion; v != "" { + volumeVersion = v + } + + switch volumeVersion { + case "v1", "1": + return openstack.NewBlockStorageV1(pClient, eo) + case "v2", "2": + return openstack.NewBlockStorageV2(pClient, eo) + case "v3", "3": + return openstack.NewBlockStorageV3(pClient, eo) + default: + return nil, fmt.Errorf("invalid volume API version") + } + } + + return nil, fmt.Errorf("unable to create a service client for %s", service) +} + +// isProjectScoped determines if an auth struct is project scoped. +func isProjectScoped(authInfo *AuthInfo) bool { + if authInfo.ProjectID == "" && authInfo.ProjectName == "" { + return false + } + + return true +} + +// setDomainIfNeeded will set a DomainID and DomainName +// to ProjectDomain* and UserDomain* if not already set. +func setDomainIfNeeded(cloud *Cloud) *Cloud { + if cloud.AuthInfo.DomainID != "" { + if cloud.AuthInfo.UserDomainID == "" { + cloud.AuthInfo.UserDomainID = cloud.AuthInfo.DomainID + } + + if cloud.AuthInfo.ProjectDomainID == "" { + cloud.AuthInfo.ProjectDomainID = cloud.AuthInfo.DomainID + } + + cloud.AuthInfo.DomainID = "" + } + + if cloud.AuthInfo.DomainName != "" { + if cloud.AuthInfo.UserDomainName == "" { + cloud.AuthInfo.UserDomainName = cloud.AuthInfo.DomainName + } + + if cloud.AuthInfo.ProjectDomainName == "" { + cloud.AuthInfo.ProjectDomainName = cloud.AuthInfo.DomainName + } + + cloud.AuthInfo.DomainName = "" + } + + // If Domain fields are still not set, and if DefaultDomain has a value, + // set UserDomainID and ProjectDomainID to DefaultDomain. + // https://github.com/openstack/osc-lib/blob/86129e6f88289ef14bfaa3f7c9cdfbea8d9fc944/osc_lib/cli/client_config.py#L117-L146 + if cloud.AuthInfo.DefaultDomain != "" { + if cloud.AuthInfo.UserDomainName == "" && cloud.AuthInfo.UserDomainID == "" { + cloud.AuthInfo.UserDomainID = cloud.AuthInfo.DefaultDomain + } + + if cloud.AuthInfo.ProjectDomainName == "" && cloud.AuthInfo.ProjectDomainID == "" { + cloud.AuthInfo.ProjectDomainID = cloud.AuthInfo.DefaultDomain + } + } + + return cloud +} + +// isApplicationCredential determines if an application credential is used to auth. +func isApplicationCredential(authInfo *AuthInfo) bool { + if authInfo.ApplicationCredentialID == "" && authInfo.ApplicationCredentialName == "" && authInfo.ApplicationCredentialSecret == "" { + return false + } + return true +} diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/results.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/results.go new file mode 100644 index 00000000..292ff5dc --- /dev/null +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/results.go @@ -0,0 +1,121 @@ +package clientconfig + +// PublicClouds represents a collection of PublicCloud entries in clouds-public.yaml file. +// The format of the clouds-public.yml is documented at +// https://docs.openstack.org/python-openstackclient/latest/configuration/ +type PublicClouds struct { + Clouds map[string]Cloud `yaml:"public-clouds" json:"public-clouds"` +} + +// Clouds represents a collection of Cloud entries in a clouds.yaml file. +// The format of clouds.yaml is documented at +// https://docs.openstack.org/os-client-config/latest/user/configuration.html. +type Clouds struct { + Clouds map[string]Cloud `yaml:"clouds" json:"clouds"` +} + +// Cloud represents an entry in a clouds.yaml/public-clouds.yaml/secure.yaml file. +type Cloud struct { + Cloud string `yaml:"cloud" json:"cloud"` + Profile string `yaml:"profile" json:"profile"` + AuthInfo *AuthInfo `yaml:"auth" json:"auth"` + AuthType AuthType `yaml:"auth_type" json:"auth_type"` + RegionName string `yaml:"region_name" json:"region_name"` + Regions []interface{} `yaml:"regions" json:"regions"` + + // API Version overrides. + IdentityAPIVersion string `yaml:"identity_api_version" json:"identity_api_version"` + VolumeAPIVersion string `yaml:"volume_api_version" json:"volume_api_version"` + + // Verify whether or not SSL API requests should be verified. + Verify *bool `yaml:"verify" json:"verify"` + + // CACertFile a path to a CA Cert bundle that can be used as part of + // verifying SSL API requests. + CACertFile string `yaml:"cacert" json:"cacert"` + + // ClientCertFile a path to a client certificate to use as part of the SSL + // transaction. + ClientCertFile string `yaml:"cert" json:"cert"` + + // ClientKeyFile a path to a client key to use as part of the SSL + // transaction. + ClientKeyFile string `yaml:"key" json:"key"` +} + +// AuthInfo represents the auth section of a cloud entry or +// auth options entered explicitly in ClientOpts. +type AuthInfo struct { + // AuthURL is the keystone/identity endpoint URL. + AuthURL string `yaml:"auth_url" json:"auth_url"` + + // Token is a pre-generated authentication token. + Token string `yaml:"token" json:"token"` + + // Username is the username of the user. + Username string `yaml:"username" json:"username"` + + // UserID is the unique ID of a user. + UserID string `yaml:"user_id" json:"user_id"` + + // Password is the password of the user. + Password string `yaml:"password" json:"password"` + + // Application Credential ID to login with. + ApplicationCredentialID string `yaml:"application_credential_id" json:"application_credential_id"` + + // Application Credential name to login with. + ApplicationCredentialName string `yaml:"application_credential_name" json:"application_credential_name"` + + // Application Credential secret to login with. + ApplicationCredentialSecret string `yaml:"application_credential_secret" json:"application_credential_secret"` + + // ProjectName is the common/human-readable name of a project. + // Users can be scoped to a project. + // ProjectName on its own is not enough to ensure a unique scope. It must + // also be combined with either a ProjectDomainName or ProjectDomainID. + // ProjectName cannot be combined with ProjectID in a scope. + ProjectName string `yaml:"project_name" json:"project_name"` + + // ProjectID is the unique ID of a project. + // It can be used to scope a user to a specific project. + ProjectID string `yaml:"project_id" json:"project_id"` + + // UserDomainName is the name of the domain where a user resides. + // It is used to identify the source domain of a user. + UserDomainName string `yaml:"user_domain_name" json:"user_domain_name"` + + // UserDomainID is the unique ID of the domain where a user resides. + // It is used to identify the source domain of a user. + UserDomainID string `yaml:"user_domain_id" json:"user_domain_id"` + + // ProjectDomainName is the name of the domain where a project resides. + // It is used to identify the source domain of a project. + // ProjectDomainName can be used in addition to a ProjectName when scoping + // a user to a specific project. + ProjectDomainName string `yaml:"project_domain_name" json:"project_domain_name"` + + // ProjectDomainID is the name of the domain where a project resides. + // It is used to identify the source domain of a project. + // ProjectDomainID can be used in addition to a ProjectName when scoping + // a user to a specific project. + ProjectDomainID string `yaml:"project_domain_id" json:"project_domain_id"` + + // DomainName is the name of a domain which can be used to identify the + // source domain of either a user or a project. + // If UserDomainName and ProjectDomainName are not specified, then DomainName + // is used as a default choice. + // It can also be used be used to specify a domain-only scope. + DomainName string `yaml:"domain_name" json:"domain_name"` + + // DomainID is the unique ID of a domain which can be used to identify the + // source domain of eitehr a user or a project. + // If UserDomainID and ProjectDomainID are not specified, then DomainID is + // used as a default choice. + // It can also be used be used to specify a domain-only scope. + DomainID string `yaml:"domain_id" json:"domain_id"` + + // DefaultDomain is the domain ID to fall back on if no other domain has + // been specified and a domain is required for scope. + DefaultDomain string `yaml:"default_domain" json:"default_domain"` +} diff --git a/vendor/github.com/gophercloud/utils/openstack/clientconfig/utils.go b/vendor/github.com/gophercloud/utils/openstack/clientconfig/utils.go new file mode 100644 index 00000000..884e5644 --- /dev/null +++ b/vendor/github.com/gophercloud/utils/openstack/clientconfig/utils.go @@ -0,0 +1,155 @@ +package clientconfig + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "reflect" +) + +// defaultIfEmpty is a helper function to make it cleaner to set default value +// for strings. +func defaultIfEmpty(value string, defaultValue string) string { + if value == "" { + return defaultValue + } + return value +} + +// mergeCLouds merges two Clouds recursively (the AuthInfo also gets merged). +// In case both Clouds define a value, the value in the 'override' cloud takes precedence +func mergeClouds(override, cloud interface{}) (*Cloud, error) { + overrideJson, err := json.Marshal(override) + if err != nil { + return nil, err + } + cloudJson, err := json.Marshal(cloud) + if err != nil { + return nil, err + } + var overrideInterface interface{} + err = json.Unmarshal(overrideJson, &overrideInterface) + if err != nil { + return nil, err + } + var cloudInterface interface{} + err = json.Unmarshal(cloudJson, &cloudInterface) + if err != nil { + return nil, err + } + var mergedCloud Cloud + mergedInterface := mergeInterfaces(overrideInterface, cloudInterface) + mergedJson, err := json.Marshal(mergedInterface) + json.Unmarshal(mergedJson, &mergedCloud) + return &mergedCloud, nil +} + +// merges two interfaces. In cases where a value is defined for both 'overridingInterface' and +// 'inferiorInterface' the value in 'overridingInterface' will take precedence. +func mergeInterfaces(overridingInterface, inferiorInterface interface{}) interface{} { + switch overriding := overridingInterface.(type) { + case map[string]interface{}: + interfaceMap, ok := inferiorInterface.(map[string]interface{}) + if !ok { + return overriding + } + for k, v := range interfaceMap { + if overridingValue, ok := overriding[k]; ok { + overriding[k] = mergeInterfaces(overridingValue, v) + } else { + overriding[k] = v + } + } + case []interface{}: + list, ok := inferiorInterface.([]interface{}) + if !ok { + return overriding + } + for i := range list { + overriding = append(overriding, list[i]) + } + return overriding + case nil: + // mergeClouds(nil, map[string]interface{...}) -> map[string]interface{...} + v, ok := inferiorInterface.(map[string]interface{}) + if ok { + return v + } + } + // We don't want to override with empty values + if reflect.DeepEqual(overridingInterface, nil) || reflect.DeepEqual(reflect.Zero(reflect.TypeOf(overridingInterface)).Interface(), overridingInterface) { + return inferiorInterface + } else { + return overridingInterface + } +} + +// findAndReadCloudsYAML attempts to locate a clouds.yaml file in the following +// locations: +// +// 1. OS_CLIENT_CONFIG_FILE +// 2. Current directory. +// 3. unix-specific user_config_dir (~/.config/openstack/clouds.yaml) +// 4. unix-specific site_config_dir (/etc/openstack/clouds.yaml) +// +// If found, the contents of the file is returned. +func findAndReadCloudsYAML() ([]byte, error) { + // OS_CLIENT_CONFIG_FILE + if v := os.Getenv("OS_CLIENT_CONFIG_FILE"); v != "" { + if ok := fileExists(v); ok { + return ioutil.ReadFile(v) + } + } + + return findAndReadYAML("clouds.yaml") +} + +func findAndReadPublicCloudsYAML() ([]byte, error) { + return findAndReadYAML("clouds-public.yaml") +} + +func findAndReadSecureCloudsYAML() ([]byte, error) { + return findAndReadYAML("secure.yaml") +} + +func findAndReadYAML(yamlFile string) ([]byte, error) { + // current directory + cwd, err := os.Getwd() + if err != nil { + return nil, fmt.Errorf("unable to determine working directory: %s", err) + } + + filename := filepath.Join(cwd, yamlFile) + if ok := fileExists(filename); ok { + return ioutil.ReadFile(filename) + } + + // unix user config directory: ~/.config/openstack. + if currentUser, err := user.Current(); err == nil { + homeDir := currentUser.HomeDir + if homeDir != "" { + filename := filepath.Join(homeDir, ".config/openstack/"+yamlFile) + if ok := fileExists(filename); ok { + return ioutil.ReadFile(filename) + } + } + } + + // unix-specific site config directory: /etc/openstack. + if ok := fileExists("/etc/openstack/" + yamlFile); ok { + return ioutil.ReadFile("/etc/openstack/" + yamlFile) + } + + return nil, fmt.Errorf("no " + yamlFile + " file found") +} + +// fileExists checks for the existence of a file at a given location. +func fileExists(filename string) bool { + if _, err := os.Stat(filename); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt new file mode 100644 index 00000000..36451625 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel new file mode 100644 index 00000000..76cafe6e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +package(default_visibility = ["//visibility:public"]) + +proto_library( + name = "internal_proto", + srcs = ["stream_chunk.proto"], + deps = ["@com_google_protobuf//:any_proto"], +) + +go_proto_library( + name = "internal_go_proto", + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", + proto = ":internal_proto", +) + +go_library( + name = "go_default_library", + embed = [":internal_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go new file mode 100644 index 00000000..8858f069 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go @@ -0,0 +1,118 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: internal/stream_chunk.proto + +package internal + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +type StreamError struct { + GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` + HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` + Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamError) Reset() { *m = StreamError{} } +func (m *StreamError) String() string { return proto.CompactTextString(m) } +func (*StreamError) ProtoMessage() {} +func (*StreamError) Descriptor() ([]byte, []int) { + return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0} +} +func (m *StreamError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamError.Unmarshal(m, b) +} +func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) +} +func (dst *StreamError) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamError.Merge(dst, src) +} +func (m *StreamError) XXX_Size() int { + return xxx_messageInfo_StreamError.Size(m) +} +func (m *StreamError) XXX_DiscardUnknown() { + xxx_messageInfo_StreamError.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamError proto.InternalMessageInfo + +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + +func (m *StreamError) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") +} + +func init() { + proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7) +} + +var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{ + // 223 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30, + 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23, + 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78, + 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce, + 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2, + 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a, + 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f, + 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54, + 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7, + 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5, + 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9, + 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac, + 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18, + 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto new file mode 100644 index 00000000..55f42ce6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package grpc.gateway.runtime; +option go_package = "internal"; + +import "google/protobuf/any.proto"; + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +message StreamError { + int32 grpc_code = 1; + int32 http_code = 2; + string message = 3; + string http_status = 4; + repeated google.protobuf.Any details = 5; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel new file mode 100644 index 00000000..20862228 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel @@ -0,0 +1,84 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "proto_errors.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", + deps = [ + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//grpclog:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "context_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_test.go", + "pattern_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//examples/proto/examplepb:go_default_library", + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:struct_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go new file mode 100644 index 00000000..896057e1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -0,0 +1,210 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +var ( + // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound + // header isn't present. If the value is 0 the sent `context` will not have a timeout. + DefaultContextTimeout = 0 * time.Second +) + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + + for key, vals := range req.Header { + for _, val := range vals { + key = textproto.CanonicalMIMEHeaderKey(key) + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + if fwd := req.Header.Get(xForwardedFor); fwd == "" { + pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) + } else { + pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) + } + } else { + grpclog.Infof("invalid remote addr: %s", addr) + } + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return metadata.NewOutgoingContext(ctx, md), nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + } + return +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permenant request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go new file mode 100644 index 00000000..a5b3bd6a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go @@ -0,0 +1,312 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamp.Timestamp, error) { + var r *timestamp.Timestamp + err := jsonpb.UnmarshalString(val, r) + return r, err +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*duration.Duration, error) { + var r *duration.Duration + err := jsonpb.UnmarshalString(val, r) + return r, err +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +/* + Support fot google.protobuf.wrappers on top of primitive types +*/ + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrappers.StringValue, error) { + return &wrappers.StringValue{Value: val}, nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrappers.FloatValue, error) { + parsedVal, err := Float32(val) + return &wrappers.FloatValue{Value: parsedVal}, err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrappers.DoubleValue, error) { + parsedVal, err := Float64(val) + return &wrappers.DoubleValue{Value: parsedVal}, err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrappers.BoolValue, error) { + parsedVal, err := Bool(val) + return &wrappers.BoolValue{Value: parsedVal}, err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrappers.Int32Value, error) { + parsedVal, err := Int32(val) + return &wrappers.Int32Value{Value: parsedVal}, err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrappers.UInt32Value, error) { + parsedVal, err := Uint32(val) + return &wrappers.UInt32Value{Value: parsedVal}, err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrappers.Int64Value, error) { + parsedVal, err := Int64(val) + return &wrappers.Int64Value{Value: parsedVal}, err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrappers.UInt64Value, error) { + parsedVal, err := Uint64(val) + return &wrappers.UInt64Value{Value: parsedVal}, err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrappers.BytesValue, error) { + parsedVal, err := Bytes(val) + return &wrappers.BytesValue{Value: parsedVal}, err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go new file mode 100644 index 00000000..b6e5ddf7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go new file mode 100644 index 00000000..41d54ef9 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -0,0 +1,145 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + return http.StatusPreconditionFailed + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +var ( + // HTTPError replies to the request with the error. + // You can set a custom function to this variable to customize error format. + HTTPError = DefaultHTTPError + // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest + OtherErrorHandler = DefaultOtherErrorHandler +) + +type errorBody struct { + Error string `protobuf:"bytes,1,name=error" json:"error"` + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + Message string `protobuf:"bytes,1,name=message" json:"message"` + Code int32 `protobuf:"varint,2,name=code" json:"code"` + Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` +} + +// Make this also conform to proto.Message for builtin JSONPb Marshaler +func (e *errorBody) Reset() { *e = errorBody{} } +func (e *errorBody) String() string { return proto.CompactTextString(e) } +func (*errorBody) ProtoMessage() {} + +// DefaultHTTPError is the default implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a JSON object, +// which contains a member whose key is "error" and whose value is err.Error(). +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + const fallback = `{"error": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + pb := s.Proto() + contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + body := &errorBody{ + Error: s.Message(), + Message: s.Message(), + Code: int32(s.Code()), + Details: s.Proto().GetDetails(), + } + + buf, merr := marshaler.Marshal(body) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", body, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. +// It simply writes a string representation of the given error into "w". +func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { + http.Error(w, msg, code) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go new file mode 100644 index 00000000..e1cf7a91 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go @@ -0,0 +1,70 @@ +package runtime + +import ( + "encoding/json" + "io" + "strings" + + "github.com/golang/protobuf/protoc-gen-go/generator" + "google.golang.org/genproto/protobuf/field_mask" +) + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + if m, ok := item.node.(map[string]interface{}); ok { + // if the item is an object, then enqueue all of its children + for k, v := range m { + queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v}) + } + } else if len(item.path) > 0 { + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, strings.Join(item.path, ".")) + } + } + + return fm, nil +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node + path []string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} +} + +// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic +// that's used for naming protobuf fields in Go. +func CamelCaseFieldMask(mask *field_mask.FieldMask) { + if mask == nil || mask.Paths == nil { + return + } + + var newPaths []string + for _, path := range mask.Paths { + lowerCasedParts := strings.Split(path, ".") + var camelCasedParts []string + for _, part := range lowerCasedParts { + camelCasedParts = append(camelCasedParts, generator.CamelCase(part)) + } + newPaths = append(newPaths, strings.Join(camelCasedParts, ".")) + } + + mask.Paths = newPaths +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go new file mode 100644 index 00000000..2af90065 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -0,0 +1,209 @@ +package runtime + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + + "context" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/grpclog" +) + +var errEmptyResponse = errors.New("empty response") + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + f, ok := w.(http.Flusher) + if !ok { + grpclog.Infof("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set("Content-Type", marshaler.ContentType()) + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if err == io.EOF { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + + buf, err := marshaler.Marshal(streamChunk(ctx, resp, mux.streamErrorHandler)) + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err = w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + f.Flush() + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { + for k := range md.TrailerMD { + tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) + w.Header().Add("Trailer", tKey) + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { + for k, vs := range md.TrailerMD { + tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) + for _, v := range vs { + w.Header().Add(tKey, v) + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + contentType = httpBodyMarshaler.ContentTypeFromMessage(resp) + } + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + var err error + if rb, ok := resp.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(resp) + } + if err != nil { + grpclog.Infof("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + return err + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { + serr := streamError(ctx, mux.streamErrorHandler, err) + if !wroteHeader { + w.WriteHeader(int(serr.HttpCode)) + } + buf, merr := marshaler.Marshal(errorChunk(serr)) + if merr != nil { + grpclog.Infof("Failed to marshal an error: %v", merr) + return + } + if _, werr := w.Write(buf); werr != nil { + grpclog.Infof("Failed to notify error to client: %v", werr) + return + } +} + +// streamChunk returns a chunk in a response stream for the given result. The +// given errHandler is used to render an error chunk if result is nil. +func streamChunk(ctx context.Context, result proto.Message, errHandler StreamErrorHandlerFunc) map[string]proto.Message { + if result == nil { + return errorChunk(streamError(ctx, errHandler, errEmptyResponse)) + } + return map[string]proto.Message{"result": result} +} + +// streamError returns the payload for the final message in a response stream +// that represents the given err. +func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { + serr := errHandler(ctx, err) + if serr != nil { + return serr + } + // TODO: log about misbehaving stream error handler? + return DefaultHTTPStreamErrorHandler(ctx, err) +} + +func errorChunk(err *StreamError) map[string]proto.Message { + return map[string]proto.Message{"error": (*internal.StreamError)(err)} +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go new file mode 100644 index 00000000..f55285b5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go @@ -0,0 +1,43 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler +func SetHTTPBodyMarshaler(serveMux *ServeMux) { + serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{OrigName: true}, + } +} + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType implementation to keep backwards compatability with marshal interface +func (h *HTTPBodyMarshaler) ContentType() string { + return h.ContentTypeFromMessage(nil) +} + +// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns +// its specified content type otherwise fall back to the default Marshaler. +func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType() +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.Data, nil + } + return h.Marshaler.Marshal(v) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go new file mode 100644 index 00000000..f9d3a585 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go @@ -0,0 +1,45 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go new file mode 100644 index 00000000..f0de351b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb jsonpb.Marshaler + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return (*jsonpb.Marshaler)(j).Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Indent != "" { + return json.MarshalIndent(m, "", j.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, v) +} + +func decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, v) + } + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, p) +} + +func decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +// allowUnknownFields helps not to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +var allowUnknownFields = true + +// DisallowUnknownFields enables option in decoder (unmarshaller) to +// return an error when it finds an unknown field. This function must be +// called before using the JSON marshaller. +func DisallowUnknownFields() { + allowUnknownFields = false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go new file mode 100644 index 00000000..f65d1a26 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go @@ -0,0 +1,62 @@ +package runtime + +import ( + "io" + + "errors" + "github.com/golang/protobuf/proto" + "io/ioutil" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType() string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + _, err = writer.Write(buffer) + if err != nil { + return err + } + + return nil + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go new file mode 100644 index 00000000..98fe6e88 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go @@ -0,0 +1,48 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + ContentType() string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record seperator for the stream. + Delimiter() []byte +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go new file mode 100644 index 00000000..5cc53ae4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go @@ -0,0 +1,91 @@ +package runtime + +import ( + "errors" + "net/http" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &JSONPb{OrigName: true} +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go new file mode 100644 index 00000000..1da3a588 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -0,0 +1,303 @@ +package runtime + +import ( + "context" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when +// a request is received with a URI path that does not match any registered +// service method. +// +// Since gRPC servers return an "Unimplemented" code for requests with an +// unrecognized URI path, this error also has a gRPC "Unimplemented" code. +var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + streamErrorHandler StreamErrorHandlerFunc + protoErrorHandler ProtoErrorHandlerFunc + disablePathLengthFallback bool + lastMatchWins bool +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used to handle an error as general proto message defined by gRPC. +// The response including body and status is not backward compatible with the default error handler. +// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ProtoErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithLastMatchWins returns a ServeMuxOption that will enable "last +// match wins" behavior, where if multiple path patterns match a +// request path, the last one defined in the .proto file will be used. +func WithLastMatchWins() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.lastMatchWins = true + } +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + streamErrorHandler: DefaultHTTPStreamErrorHandler, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.protoErrorHandler != nil { + HTTPError = serveMux.protoErrorHandler + // OtherErrorHandler is no longer used when protoErrorHandler is set. + // Overwritten by a special error handler to return Unknown. + OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { + ctx := context.Background() + _, outboundMarshaler := MarshalerForRequest(serveMux, r) + sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") + serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) + } + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if s.lastMatchWins { + s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) + } else { + s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) + } +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } + return + } + + components := strings.Split(path[1:], "/") + l := len(components) + var verb string + if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } + return + } else if idx > 0 { + c := components[l-1] + components[l-1], verb = c[:idx], c[idx+1:] + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + } + for _, h := range s.handlers[r.Method] { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + h.h(w, r, pathParams) + return + } + + // lookup other methods to handle fallback from GET to POST and + // to determine if it is MethodNotAllowed or NotFound. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + if s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + h.h(w, r, pathParams) + return + } + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } + return + } + } + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go new file mode 100644 index 00000000..09053695 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "errors" + "fmt" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string + // assumeColonVerb indicates whether a path suffix after a final + // colon may only be interpreted as a verb. + assumeColonVerb bool +} + +type patternOptions struct { + assumeColonVerb bool +} + +// PatternOpt is an option for creating Patterns. +type PatternOpt func(*patternOptions) + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { + options := patternOptions{ + assumeColonVerb: true, + } + for _, o := range opts { + o(&options) + } + + if version != 1 { + grpclog.Infof("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Infof("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Infof("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Infof("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Print("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Infof("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Infof("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + assumeColonVerb: options.assumeColonVerb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// Match examines components if it matches to the Pattern. +// If it matches, the function returns a mapping from field paths to their captured values. +// If otherwise, the function returns an error. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + if p.verb != verb { + if p.assumeColonVerb || p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + verb = "" + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + stack = append(stack, strings.Join(components[pos:end], "/")) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +// AssumeColonVerbOpt indicates whether a path suffix after a final +// colon may only be interpreted as a verb. +func AssumeColonVerbOpt(val bool) PatternOpt { + return PatternOpt(func(o *patternOptions) { + o.assumeColonVerb = val + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go new file mode 100644 index 00000000..a3151e2a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "github.com/golang/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 00000000..ca76324e --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,106 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a +// a proto struct used to represent error at the end of a stream. +type StreamErrorHandlerFunc func(context.Context, error) *StreamError + +// StreamError is the payload for the final message in a server stream in the event that the server returns an +// error after a response message has already been sent. +type StreamError internal.StreamError + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + pb := s.Proto() + contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via +// default logic. +// +// It extracts the gRPC status from err if possible. The fields of the status are +// used to populate the returned StreamError, and the HTTP status code is derived +// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a +// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. +func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { + grpcCode := codes.Unknown + grpcMessage := err.Error() + var grpcDetails []*any.Any + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + grpcMessage = s.Message() + grpcDetails = s.Proto().GetDetails() + } + httpCode := HTTPStatusFromCode(grpcCode) + return &StreamError{ + GrpcCode: int32(grpcCode), + HttpCode: int32(httpCode), + Message: grpcMessage, + HttpStatus: http.StatusText(httpCode), + Details: grpcDetails, + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go new file mode 100644 index 00000000..5fbba5e8 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -0,0 +1,391 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +// PopulateQueryParameters populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + re, err := regexp.Compile("^(.*)\\[(.*)\\]$") + if err != nil { + return err + } + match := re.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +// It instantiates missing protobuf fields as it goes. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg, fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { + m := reflect.ValueOf(msg) + if m.Kind() != reflect.Ptr { + return fmt.Errorf("unexpected type %T: %v", msg, msg) + } + var props *proto.Properties + m = m.Elem() + for i, fieldName := range fieldPath { + isLast := i == len(fieldPath)-1 + if !isLast && m.Kind() != reflect.Struct { + return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) + } + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { + grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) + return nil + } + + switch f.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + m = f + case reflect.Slice: + if !isLast { + return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) + } + // Handle []byte + if f.Type().Elem().Kind() == reflect.Uint8 { + m = f + break + } + return populateRepeatedField(f, values, props) + case reflect.Ptr: + if f.IsNil() { + m = reflect.New(f.Type().Elem()) + f.Set(m.Convert(f.Type())) + } + m = f.Elem() + continue + case reflect.Struct: + m = f + continue + case reflect.Map: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + return populateMapField(f, values, props) + default: + return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) + } + } + switch len(values) { + case 0: + return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) + case 1: + default: + grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) + } + return populateField(m, values[0], props) +} + +// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". +// "m" must be a struct value. It returns zero reflect.Value if no such field found. +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { + props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + if op, ok := props.OneofTypes[name]; ok { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + + for _, p := range props.Prop { + if p.OrigName == name { + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil + } + } + return reflect.Value{}, nil, nil +} + +func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) + } + + key, value := values[0], values[1] + keyType := f.Type().Key() + valueType := f.Type().Elem() + if f.IsNil() { + f.Set(reflect.MakeMap(f.Type())) + } + + keyConv, ok := convFromType[keyType.Kind()] + if !ok { + return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) + } + valueConv, ok := convFromType[valueType.Kind()] + if !ok { + return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) + } + + keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) + if err := keyV[1].Interface(); err != nil { + return err.(error) + } + valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := valueV[1].Interface(); err != nil { + return err.(error) + } + + f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) + + return nil +} + +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { + elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + + conv, ok := convFromType[elemType.Kind()] + if !ok { + return fmt.Errorf("unsupported field type %s", elemType) + } + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) + } + return nil +} + +func populateField(f reflect.Value, value string, props *proto.Properties) error { + i := f.Addr().Interface() + + // Handle protobuf well known types + var name string + switch m := i.(type) { + case interface{ XXX_WellKnownType() string }: + name = m.XXX_WellKnownType() + case proto.Message: + const wktPrefix = "google.protobuf." + if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { + name = fullName[len(wktPrefix):] + } + } + switch name { + case "Timestamp": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.FieldByName("Seconds").SetInt(int64(t.Unix())) + f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) + return nil + case "Duration": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + f.FieldByName("Seconds").SetInt(s) + f.FieldByName("Nanos").SetInt(ns) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.FieldByName("Value").SetBool(true) + } else if value == "false" { + f.FieldByName("Value").SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.FieldByName("Value").SetString(value) + return nil + case "BytesValue": + bytesVal, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return fmt.Errorf("bad BytesValue: %s", value) + } + f.FieldByName("Value").SetBytes(bytesVal) + return nil + case "FieldMask": + p := f.FieldByName("Paths") + for _, v := range strings.Split(value, ",") { + if v != "" { + p.Set(reflect.Append(p, reflect.ValueOf(v))) + } + } + return nil + } + + // Handle Time and Duration stdlib types + switch t := i.(type) { + case *time.Time: + pt, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + *t = pt + return nil + case *time.Duration: + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + *t = d + return nil + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + + conv, ok := convFromType[f.Kind()] + if !ok { + return fmt.Errorf("field type %T is not supported in query parameters", i) + } + result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } + return nil +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel new file mode 100644 index 00000000..7109d793 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = ["trie_test.go"], + embed = [":go_default_library"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go new file mode 100644 index 00000000..cf79a4d5 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go new file mode 100644 index 00000000..dfe7de48 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go new file mode 100644 index 00000000..6dd38546 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go @@ -0,0 +1,20 @@ +package utilities + +import ( + "bytes" + "io" + "io/ioutil" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go new file mode 100644 index 00000000..c2b7b30d --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go @@ -0,0 +1,177 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + var encoded []int + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + if k < len(sj) { + return true + } + return false +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/vendor/github.com/hashicorp/atlas-go/LICENSE b/vendor/github.com/hashicorp/atlas-go/LICENSE new file mode 100644 index 00000000..82b4de97 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/atlas-go/archive/archive.go b/vendor/github.com/hashicorp/atlas-go/archive/archive.go new file mode 100644 index 00000000..91310ad5 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/archive.go @@ -0,0 +1,517 @@ +// archive is package that helps create archives in a format that +// Atlas expects with its various upload endpoints. +package archive + +import ( + "archive/tar" + "bufio" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" +) + +// Archive is the resulting archive. The archive data is generally streamed +// so the io.ReadCloser can be used to backpressure the archive progress +// and avoid memory pressure. +type Archive struct { + io.ReadCloser + + Size int64 + Metadata map[string]string +} + +// ArchiveOpts are the options for defining how the archive will be built. +type ArchiveOpts struct { + // Exclude and Include are filters of files to include/exclude in + // the archive when creating it from a directory. These filters should + // be relative to the packaging directory and should be basic glob + // patterns. + Exclude []string + Include []string + + // Extra is a mapping of extra files to include within the archive. The + // key should be the path within the archive and the value should be + // an absolute path to the file to put into the archive. These extra + // files will override any other files in the archive. + Extra map[string]string + + // VCS, if true, will detect and use a VCS system to determine what + // files to include the archive. + VCS bool +} + +// IsSet says whether any options were set. +func (o *ArchiveOpts) IsSet() bool { + return len(o.Exclude) > 0 || len(o.Include) > 0 || o.VCS +} + +// Constants related to setting special values for Extra in ArchiveOpts. +const ( + // ExtraEntryDir just creates the Extra key as a directory entry. + ExtraEntryDir = "" +) + +// CreateArchive takes the given path and ArchiveOpts and archives it. +// +// The archive will be fully completed and put into a temporary file. +// This must be done to retrieve the content length of the archive which +// is needed for almost all operations involving archives with Atlas. Because +// of this, sufficient disk space will be required to buffer the archive. +func CreateArchive(path string, opts *ArchiveOpts) (*Archive, error) { + log.Printf("[INFO] creating archive from %s", path) + + // Dereference any symlinks and determine the real path and info + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + if fi.Mode()&os.ModeSymlink != 0 { + path, fi, err = readLinkFull(path, fi) + if err != nil { + return nil, err + } + } + + // Windows + path = filepath.ToSlash(path) + + // Direct file paths cannot have archive options + if !fi.IsDir() && opts.IsSet() { + return nil, fmt.Errorf( + "options such as exclude, include, and VCS can't be set when " + + "the path is a file.") + } + + if fi.IsDir() { + return archiveDir(path, opts) + } else { + return archiveFile(path) + } +} + +func archiveFile(path string) (*Archive, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + + if _, err := gzip.NewReader(f); err == nil { + // Reset the read offset for future reading + if _, err := f.Seek(0, 0); err != nil { + f.Close() + return nil, err + } + + // Get the file info for the size + fi, err := f.Stat() + if err != nil { + f.Close() + return nil, err + } + + // This is a gzip file, let it through. + return &Archive{ReadCloser: f, Size: fi.Size()}, nil + } + + // Close the file, no use for it anymore + f.Close() + + // We have a single file that is not gzipped. Compress it. + path, err = filepath.Abs(path) + if err != nil { + return nil, err + } + + // Act like we're compressing a directory, but only include this one + // file. + return archiveDir(filepath.Dir(path), &ArchiveOpts{ + Include: []string{filepath.Base(path)}, + }) +} + +func archiveDir(root string, opts *ArchiveOpts) (*Archive, error) { + + var vcsInclude []string + var metadata map[string]string + if opts.VCS { + var err error + + if err = vcsPreflight(root); err != nil { + return nil, err + } + + vcsInclude, err = vcsFiles(root) + if err != nil { + return nil, err + } + + metadata, err = vcsMetadata(root) + if err != nil { + return nil, err + } + } + + // Make sure the root path is absolute + root, err := filepath.Abs(root) + if err != nil { + return nil, err + } + + // Create the temporary file that we'll send the archive data to. + archiveF, err := ioutil.TempFile("", "atlas-archive") + if err != nil { + return nil, err + } + + // Create the wrapper for the result which will automatically + // remove the temporary file on close. + archiveWrapper := &readCloseRemover{F: archiveF} + + // Buffer the writer so that we can push as much data to disk at + // a time as possible. 4M should be good. + bufW := bufio.NewWriterSize(archiveF, 4096*1024) + + // Gzip compress all the output data + gzipW := gzip.NewWriter(bufW) + + // Tar the file contents + tarW := tar.NewWriter(gzipW) + + // First, walk the path and do the normal files + werr := filepath.Walk(root, copyDirWalkFn( + tarW, root, "", opts, vcsInclude)) + if werr == nil { + // If that succeeded, handle the extra files + werr = copyExtras(tarW, opts.Extra) + } + + // Attempt to close all the things. If we get an error on the way + // and we haven't had an error yet, then record that as the critical + // error. But we still try to close everything. + + // Close the tar writer + if err := tarW.Close(); err != nil && werr == nil { + werr = err + } + + // Close the gzip writer + if err := gzipW.Close(); err != nil && werr == nil { + werr = err + } + + // Flush the buffer + if err := bufW.Flush(); err != nil && werr == nil { + werr = err + } + + // If we had an error, then close the file (removing it) and + // return the error. + if werr != nil { + archiveWrapper.Close() + return nil, werr + } + + // Seek to the beginning + if _, err := archiveWrapper.F.Seek(0, 0); err != nil { + archiveWrapper.Close() + return nil, err + } + + // Get the file information so we can get the size + fi, err := archiveWrapper.F.Stat() + if err != nil { + archiveWrapper.Close() + return nil, err + } + + return &Archive{ + ReadCloser: archiveWrapper, + Size: fi.Size(), + Metadata: metadata, + }, nil +} + +func copyDirWalkFn( + tarW *tar.Writer, root string, prefix string, + opts *ArchiveOpts, vcsInclude []string) filepath.WalkFunc { + + errFunc := func(err error) filepath.WalkFunc { + return func(string, os.FileInfo, error) error { + return err + } + } + + // Windows + root = filepath.ToSlash(root) + + var includeMap map[string]struct{} + + // If we have an include/exclude pattern set, then setup the lookup + // table to determine what we want to include. + if opts != nil && len(opts.Include) > 0 { + includeMap = make(map[string]struct{}) + for _, pattern := range opts.Include { + matches, err := filepath.Glob(filepath.Join(root, pattern)) + if err != nil { + return errFunc(fmt.Errorf( + "error checking include glob '%s': %s", + pattern, err)) + } + + for _, path := range matches { + // Windows + path = filepath.ToSlash(path) + subpath, err := filepath.Rel(root, path) + subpath = filepath.ToSlash(subpath) + + if err != nil { + return errFunc(err) + } + + for { + includeMap[subpath] = struct{}{} + subpath = filepath.Dir(subpath) + if subpath == "." { + break + } + } + } + } + } + + return func(path string, info os.FileInfo, err error) error { + path = filepath.ToSlash(path) + + if err != nil { + return err + } + + // Get the relative path from the path since it contains the root + // plus the path. + subpath, err := filepath.Rel(root, path) + if err != nil { + return err + } + if subpath == "." { + return nil + } + if prefix != "" { + subpath = filepath.Join(prefix, subpath) + } + // Windows + subpath = filepath.ToSlash(subpath) + + // If we have a list of VCS files, check that first + skip := false + if len(vcsInclude) > 0 { + skip = true + for _, f := range vcsInclude { + if f == subpath { + skip = false + break + } + + if info.IsDir() && strings.HasPrefix(f, subpath+"/") { + skip = false + break + } + } + } + + // If include is present, we only include what is listed + if len(includeMap) > 0 { + if _, ok := includeMap[subpath]; !ok { + skip = true + } + } + + // If exclude, it is one last gate to excluding files + if opts != nil { + for _, exclude := range opts.Exclude { + match, err := filepath.Match(exclude, subpath) + if err != nil { + return err + } + if match { + skip = true + break + } + } + } + + // If we have to skip this file, then skip it, properly skipping + // children if we're a directory. + if skip { + if info.IsDir() { + return filepath.SkipDir + } + + return nil + } + + // If this is a symlink, then we need to get the symlink target + // rather than the symlink itself. + if info.Mode()&os.ModeSymlink != 0 { + target, info, err := readLinkFull(path, info) + if err != nil { + return err + } + + // Copy the concrete entry for this path. This will either + // be the file itself or just a directory entry. + if err := copyConcreteEntry(tarW, subpath, target, info); err != nil { + return err + } + + if info.IsDir() { + return filepath.Walk(target, copyDirWalkFn( + tarW, target, subpath, opts, vcsInclude)) + } + // return now so that we don't try to copy twice + return nil + } + + return copyConcreteEntry(tarW, subpath, path, info) + } +} + +func copyConcreteEntry( + tarW *tar.Writer, entry string, + path string, info os.FileInfo) error { + // Windows + path = filepath.ToSlash(path) + + // Build the file header for the tar entry + header, err := tar.FileInfoHeader(info, path) + if err != nil { + return fmt.Errorf( + "failed creating archive header: %s", path) + } + + // Modify the header to properly be the full entry name + header.Name = entry + if info.IsDir() { + header.Name += "/" + } + + // Write the header first to the archive. + if err := tarW.WriteHeader(header); err != nil { + return fmt.Errorf( + "failed writing archive header: %s", path) + } + + // If it is a directory, then we're done (no body to write) + if info.IsDir() { + return nil + } + + // Open the real file to write the data + f, err := os.Open(path) + if err != nil { + return fmt.Errorf( + "failed opening file '%s' to write compressed archive.", path) + } + defer f.Close() + + if _, err = io.Copy(tarW, f); err != nil { + return fmt.Errorf( + "failed copying file to archive: %s, %s", path, err) + } + + return nil +} + +func copyExtras(w *tar.Writer, extra map[string]string) error { + var tmpDir string + defer func() { + if tmpDir != "" { + os.RemoveAll(tmpDir) + } + }() + + for entry, path := range extra { + // If the path is empty, then we set it to a generic empty directory + if path == "" { + // If tmpDir is still empty, then we create an empty dir + if tmpDir == "" { + td, err := ioutil.TempDir("", "archive") + if err != nil { + return err + } + + tmpDir = td + } + + path = tmpDir + } + + info, err := os.Stat(path) + if err != nil { + return err + } + + // No matter what, write the entry. If this is a directory, + // it'll just write the directory header. + if err := copyConcreteEntry(w, entry, path, info); err != nil { + return err + } + + // If this is a directory, then we walk the internal contents + // and copy those as well. + if info.IsDir() { + err := filepath.Walk(path, copyDirWalkFn( + w, path, entry, nil, nil)) + if err != nil { + return err + } + } + } + + return nil +} + +func readLinkFull(path string, info os.FileInfo) (string, os.FileInfo, error) { + target, err := filepath.EvalSymlinks(path) + if err != nil { + return "", nil, err + } + + target, err = filepath.Abs(target) + if err != nil { + return "", nil, err + } + + fi, err := os.Lstat(target) + if err != nil { + return "", nil, err + } + + return target, fi, nil +} + +// readCloseRemover is an io.ReadCloser implementation that will remove +// the file on Close(). We use this to clean up our temporary file for +// the archive. +type readCloseRemover struct { + F *os.File +} + +func (r *readCloseRemover) Read(p []byte) (int, error) { + return r.F.Read(p) +} + +func (r *readCloseRemover) Close() error { + // First close the file + err := r.F.Close() + + // Next make sure to remove it, or at least try, regardless of error + // above. + os.Remove(r.F.Name()) + + return err +} diff --git a/vendor/github.com/hashicorp/atlas-go/archive/vcs.go b/vendor/github.com/hashicorp/atlas-go/archive/vcs.go new file mode 100644 index 00000000..479a5bcd --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/archive/vcs.go @@ -0,0 +1,365 @@ +package archive + +import ( + "bufio" + "bytes" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + + version "github.com/hashicorp/go-version" +) + +// VCS is a struct that explains how to get the file list for a given +// VCS. +type VCS struct { + Name string + + // Detect is a list of files/folders that if they exist, signal that + // this VCS is the VCS in use. + Detect []string + + // Files returns the files that are under version control for the + // given path. + Files VCSFilesFunc + + // Metadata returns arbitrary metadata about the underlying VCS for the + // given path. + Metadata VCSMetadataFunc + + // Preflight is a function to run before looking for VCS files. + Preflight VCSPreflightFunc +} + +// VCSList is the list of VCS we recognize. +var VCSList = []*VCS{ + &VCS{ + Name: "git", + Detect: []string{".git/"}, + Preflight: gitPreflight, + Files: vcsFilesCmd("git", "ls-files"), + Metadata: gitMetadata, + }, + &VCS{ + Name: "hg", + Detect: []string{".hg/"}, + Files: vcsTrimCmd(vcsFilesCmd("hg", "locate", "-f", "--include", ".")), + }, + &VCS{ + Name: "svn", + Detect: []string{".svn/"}, + Files: vcsFilesCmd("svn", "ls"), + }, +} + +// VCSFilesFunc is the callback invoked to return the files in the VCS. +// +// The return value should be paths relative to the given path. +type VCSFilesFunc func(string) ([]string, error) + +// VCSMetadataFunc is the callback invoked to get arbitrary information about +// the current VCS. +// +// The return value should be a map of key-value pairs. +type VCSMetadataFunc func(string) (map[string]string, error) + +// VCSPreflightFunc is a function that runs before VCS detection to be +// configured by the user. It may be used to check if pre-requisites (like the +// actual VCS) are installed or that a program is at the correct version. If an +// error is returned, the VCS will not be processed and the error will be +// returned up the stack. +// +// The given argument is the path where the VCS is running. +type VCSPreflightFunc func(string) error + +// vcsDetect detects the VCS that is used for path. +func vcsDetect(path string) (*VCS, error) { + dir := path + for { + for _, v := range VCSList { + for _, f := range v.Detect { + check := filepath.Join(dir, f) + if _, err := os.Stat(check); err == nil { + return v, nil + } + } + } + lastDir := dir + dir = filepath.Dir(dir) + if dir == lastDir { + break + } + } + + return nil, fmt.Errorf("no VCS found for path: %s", path) +} + +// vcsPreflight returns the metadata for the VCS directory path. +func vcsPreflight(path string) error { + vcs, err := vcsDetect(path) + if err != nil { + return fmt.Errorf("error detecting VCS: %s", err) + } + + if vcs.Preflight != nil { + return vcs.Preflight(path) + } + + return nil +} + +// vcsFiles returns the files for the VCS directory path. +func vcsFiles(path string) ([]string, error) { + vcs, err := vcsDetect(path) + if err != nil { + return nil, fmt.Errorf("error detecting VCS: %s", err) + } + + if vcs.Files != nil { + return vcs.Files(path) + } + + return nil, nil +} + +// vcsFilesCmd creates a Files-compatible function that reads the files +// by executing the command in the repository path and returning each +// line in stdout. +func vcsFilesCmd(args ...string) VCSFilesFunc { + return func(path string) ([]string, error) { + var stderr, stdout bytes.Buffer + + cmd := exec.Command(args[0], args[1:]...) + cmd.Dir = path + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf( + "error executing %s: %s", + strings.Join(args, " "), + err) + } + + // Read each line of output as a path + result := make([]string, 0, 100) + scanner := bufio.NewScanner(&stdout) + for scanner.Scan() { + result = append(result, scanner.Text()) + } + + // Always use *nix-style paths (for Windows) + for idx, value := range result { + result[idx] = filepath.ToSlash(value) + } + + return result, nil + } +} + +// vcsTrimCmd trims the prefix from the paths returned by another VCSFilesFunc. +// This should be used to wrap another function if the return value is known +// to have full paths rather than relative paths +func vcsTrimCmd(f VCSFilesFunc) VCSFilesFunc { + return func(path string) ([]string, error) { + absPath, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf( + "error expanding VCS path: %s", err) + } + + // Now that we have the root path, get the inner files + fs, err := f(path) + if err != nil { + return nil, err + } + + // Trim the root path from the files + result := make([]string, 0, len(fs)) + for _, f := range fs { + if !strings.HasPrefix(f, absPath) { + continue + } + + f, err = filepath.Rel(absPath, f) + if err != nil { + return nil, fmt.Errorf( + "error determining path: %s", err) + } + + result = append(result, f) + } + + return result, nil + } +} + +// vcsMetadata returns the metadata for the VCS directory path. +func vcsMetadata(path string) (map[string]string, error) { + vcs, err := vcsDetect(path) + if err != nil { + return nil, fmt.Errorf("error detecting VCS: %s", err) + } + + if vcs.Metadata != nil { + return vcs.Metadata(path) + } + + return nil, nil +} + +const ignorableDetachedHeadError = "HEAD is not a symbolic ref" + +// gitBranch gets and returns the current git branch for the Git repository +// at the given path. It is assumed that the VCS is git. +func gitBranch(path string) (string, error) { + var stderr, stdout bytes.Buffer + + cmd := exec.Command("git", "symbolic-ref", "--short", "HEAD") + cmd.Dir = path + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + if strings.Contains(stderr.String(), ignorableDetachedHeadError) { + return "", nil + } else { + return "", + fmt.Errorf("error getting git branch: %s\nstdout: %s\nstderr: %s", + err, stdout.String(), stderr.String()) + } + } + + branch := strings.TrimSpace(stdout.String()) + + return branch, nil +} + +// gitCommit gets the SHA of the latest commit for the Git repository at the +// given path. It is assumed that the VCS is git. +func gitCommit(path string) (string, error) { + var stderr, stdout bytes.Buffer + + cmd := exec.Command("git", "log", "-n1", "--pretty=format:%H") + cmd.Dir = path + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return "", fmt.Errorf("error getting git commit: %s\nstdout: %s\nstderr: %s", + err, stdout.String(), stderr.String()) + } + + commit := strings.TrimSpace(stdout.String()) + + return commit, nil +} + +// gitRemotes gets and returns a map of all remotes for the Git repository. The +// map key is the name of the remote of the format "remote.NAME" and the value +// is the endpoint for the remote. It is assumed that the VCS is git. +func gitRemotes(path string) (map[string]string, error) { + var stderr, stdout bytes.Buffer + + cmd := exec.Command("git", "remote", "-v") + cmd.Dir = path + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("error getting git remotes: %s\nstdout: %s\nstderr: %s", + err, stdout.String(), stderr.String()) + } + + // Read each line of output as a remote + result := make(map[string]string) + scanner := bufio.NewScanner(&stdout) + for scanner.Scan() { + line := scanner.Text() + split := strings.Split(line, "\t") + + if len(split) < 2 { + return nil, fmt.Errorf("invalid response from git remote: %s", stdout.String()) + } + + remote := fmt.Sprintf("remote.%s", strings.TrimSpace(split[0])) + if _, ok := result[remote]; !ok { + // https://github.com/foo/bar.git (fetch) #=> https://github.com/foo/bar.git + urlSplit := strings.Split(split[1], " ") + result[remote] = strings.TrimSpace(urlSplit[0]) + } + } + + return result, nil +} + +// gitPreflight is the pre-flight command that runs for Git-based VCSs +func gitPreflight(path string) error { + var stderr, stdout bytes.Buffer + + cmd := exec.Command("git", "--version") + cmd.Dir = path + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("error getting git version: %s\nstdout: %s\nstderr: %s", + err, stdout.String(), stderr.String()) + } + + // Check if the output is valid + output := strings.Split(strings.TrimSpace(stdout.String()), " ") + if len(output) < 1 { + log.Printf("[WARN] could not extract version output from Git") + return nil + } + + // Parse the version + gitv, err := version.NewVersion(output[len(output)-1]) + if err != nil { + log.Printf("[WARN] could not parse version output from Git") + return nil + } + + constraint, err := version.NewConstraint("> 1.8") + if err != nil { + log.Printf("[WARN] could not create version constraint to check") + return nil + } + if !constraint.Check(gitv) { + return fmt.Errorf("git version (%s) is too old, please upgrade", gitv.String()) + } + + return nil +} + +// gitMetadata is the function to parse and return Git metadata +func gitMetadata(path string) (map[string]string, error) { + // Future-self note: Git is NOT threadsafe, so we cannot run these + // operations in go routines or else you're going to have a really really + // bad day and Panda.State == "Sad" :( + + branch, err := gitBranch(path) + if err != nil { + return nil, err + } + + commit, err := gitCommit(path) + if err != nil { + return nil, err + } + + remotes, err := gitRemotes(path) + if err != nil { + return nil, err + } + + // Make the return result (we already know the size) + result := make(map[string]string, 2+len(remotes)) + + result["branch"] = branch + result["commit"] = commit + for remote, value := range remotes { + result[remote] = value + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/application.go b/vendor/github.com/hashicorp/atlas-go/v1/application.go new file mode 100644 index 00000000..42cf8820 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/application.go @@ -0,0 +1,164 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" +) + +// appWrapper is the API wrapper since the server wraps the resulting object. +type appWrapper struct { + Application *App `json:"application"` +} + +// App represents a single instance of an application on the Atlas server. +type App struct { + // User is the namespace (username or organization) under which the + // Atlas application resides + User string `json:"username"` + + // Name is the name of the application + Name string `json:"name"` +} + +// Slug returns the slug format for this App (User/Name) +func (a *App) Slug() string { + return fmt.Sprintf("%s/%s", a.User, a.Name) +} + +// App gets the App by the given user space and name. In the event the App is +// not found (404), or for any other non-200 responses, an error is returned. +func (c *Client) App(user, name string) (*App, error) { + log.Printf("[INFO] getting application %s/%s", user, name) + + endpoint := fmt.Sprintf("/api/v1/vagrant/applications/%s/%s", user, name) + request, err := c.Request("GET", endpoint, nil) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var app App + if err := decodeJSON(response, &app); err != nil { + return nil, err + } + + return &app, nil +} + +// CreateApp creates a new App under the given user with the given name. If the +// App is created successfully, it is returned. If the server returns any +// errors, an error is returned. +func (c *Client) CreateApp(user, name string) (*App, error) { + log.Printf("[INFO] creating application %s/%s", user, name) + + body, err := json.Marshal(&appWrapper{&App{ + User: user, + Name: name, + }}) + if err != nil { + return nil, err + } + + endpoint := "/api/v1/vagrant/applications" + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var app App + if err := decodeJSON(response, &app); err != nil { + return nil, err + } + + return &app, nil +} + +// appVersion represents a specific version of an App in Atlas. It is actually +// an upload container/wrapper. +type appVersion struct { + UploadPath string `json:"upload_path"` + Token string `json:"token"` + Version uint64 `json:"version"` +} + +// appMetadataWrapper is a wrapper around a map the prefixes the json key with +// "metadata" when marshalled to format requests to the API properly. +type appMetadataWrapper struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// UploadApp creates and uploads a new version for the App. If the server does not +// find the application, an error is returned. If the server does not accept the +// data, an error is returned. +// +// It is the responsibility of the caller to create a properly-formed data +// object; this method blindly passes along the contents of the io.Reader. +func (c *Client) UploadApp(app *App, metadata map[string]interface{}, + data io.Reader, size int64) (uint64, error) { + + log.Printf("[INFO] uploading application %s (%d bytes) with metadata %q", + app.Slug(), size, metadata) + + endpoint := fmt.Sprintf("/api/v1/vagrant/applications/%s/%s/versions", + app.User, app.Name) + + // If metadata was given, setup the RequestOptions to pass in the metadata + // with the request. + var ro *RequestOptions + if metadata != nil { + // wrap the struct into the correct JSON format + wrapper := struct { + Application *appMetadataWrapper `json:"application"` + }{ + &appMetadataWrapper{metadata}, + } + m, err := json.Marshal(wrapper) + if err != nil { + return 0, err + } + + // Create the request options. + ro = &RequestOptions{ + Body: bytes.NewReader(m), + BodyLength: int64(len(m)), + } + } + + request, err := c.Request("POST", endpoint, ro) + if err != nil { + return 0, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return 0, err + } + + var av appVersion + if err := decodeJSON(response, &av); err != nil { + return 0, err + } + + if err := c.putFile(av.UploadPath, data, size); err != nil { + return 0, err + } + + return av.Version, nil +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/artifact.go b/vendor/github.com/hashicorp/atlas-go/v1/artifact.go new file mode 100644 index 00000000..0a8dc4b0 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/artifact.go @@ -0,0 +1,248 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" + "net/url" +) + +// Artifact represents a single instance of an artifact. +type Artifact struct { + // User and name are self-explanatory. Tag is the combination + // of both into "username/name" + User string `json:"username"` + Name string `json:"name"` + Tag string `json:",omitempty"` +} + +// ArtifactVersion represents a single version of an artifact. +type ArtifactVersion struct { + User string `json:"username"` + Name string `json:"name"` + Tag string `json:",omitempty"` + Type string `json:"artifact_type"` + ID string `json:"id"` + Version int `json:"version"` + Metadata map[string]string `json:"metadata"` + File bool `json:"file"` + Slug string `json:"slug"` + + UploadPath string `json:"upload_path"` + UploadToken string `json:"upload_token"` +} + +// ArtifactSearchOpts are the options used to search for an artifact. +type ArtifactSearchOpts struct { + User string + Name string + Type string + + Build string + Version string + Metadata map[string]string +} + +// UploadArtifactOpts are the options used to upload an artifact. +type UploadArtifactOpts struct { + User string + Name string + Type string + ID string + File io.Reader + FileSize int64 + Metadata map[string]string + BuildID int + CompileID int +} + +// MarshalJSON converts the UploadArtifactOpts into a JSON struct. +func (o *UploadArtifactOpts) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]interface{}{ + "artifact_version": map[string]interface{}{ + "id": o.ID, + "file": o.File != nil, + "metadata": o.Metadata, + "build_id": o.BuildID, + "compile_id": o.CompileID, + }, + }) +} + +// This is the value that should be used for metadata in ArtifactSearchOpts +// if you don't care what the value is. +const MetadataAnyValue = "943febbf-589f-401b-8f25-58f6d8786848" + +// Artifact finds the Atlas artifact by the given name and returns it. Any +// errors that occur are returned, including ErrAuth and ErrNotFound special +// exceptions which the user may want to handle separately. +func (c *Client) Artifact(user, name string) (*Artifact, error) { + endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s", user, name) + request, err := c.Request("GET", endpoint, nil) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var aw artifactWrapper + if err := decodeJSON(response, &aw); err != nil { + return nil, err + } + + return aw.Artifact, nil +} + +// ArtifactSearch searches Atlas for the given ArtifactSearchOpts and returns +// a slice of ArtifactVersions. +func (c *Client) ArtifactSearch(opts *ArtifactSearchOpts) ([]*ArtifactVersion, error) { + log.Printf("[INFO] searching artifacts: %#v", opts) + + params := make(map[string]string) + if opts.Version != "" { + params["version"] = opts.Version + } + if opts.Build != "" { + params["build"] = opts.Build + } + + i := 1 + for k, v := range opts.Metadata { + prefix := fmt.Sprintf("metadata.%d.", i) + params[prefix+"key"] = k + if v != MetadataAnyValue { + params[prefix+"value"] = v + } + + i++ + } + + endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s/%s/search", + opts.User, opts.Name, opts.Type) + request, err := c.Request("GET", endpoint, &RequestOptions{ + Params: params, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var w artifactSearchWrapper + if err := decodeJSON(response, &w); err != nil { + return nil, err + } + + return w.Versions, nil +} + +// CreateArtifact creates and returns a new Artifact in Atlas. Any errors that +// occurr are returned. +func (c *Client) CreateArtifact(user, name string) (*Artifact, error) { + log.Printf("[INFO] creating artifact: %s/%s", user, name) + body, err := json.Marshal(&artifactWrapper{&Artifact{ + User: user, + Name: name, + }}) + if err != nil { + return nil, err + } + + endpoint := "/api/v1/artifacts" + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var aw artifactWrapper + if err := decodeJSON(response, &aw); err != nil { + return nil, err + } + + return aw.Artifact, nil +} + +// ArtifactFileURL is a helper method for getting the URL for an ArtifactVersion +// from the Client. +func (c *Client) ArtifactFileURL(av *ArtifactVersion) (*url.URL, error) { + if !av.File { + return nil, nil + } + + u := *c.URL + u.Path = fmt.Sprintf("/api/v1/artifacts/%s/%s/%s/%d/file", + av.User, av.Name, av.Type, av.Version) + return &u, nil +} + +// UploadArtifact streams the upload of a file on disk using the given +// UploadArtifactOpts. Any errors that occur are returned. +func (c *Client) UploadArtifact(opts *UploadArtifactOpts) (*ArtifactVersion, error) { + log.Printf("[INFO] uploading artifact: %s/%s (%s)", opts.User, opts.Name, opts.Type) + + endpoint := fmt.Sprintf("/api/v1/artifacts/%s/%s/%s", + opts.User, opts.Name, opts.Type) + + body, err := json.Marshal(opts) + if err != nil { + return nil, err + } + + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var av ArtifactVersion + if err := decodeJSON(response, &av); err != nil { + return nil, err + } + + if opts.File != nil { + if err := c.putFile(av.UploadPath, opts.File, opts.FileSize); err != nil { + return nil, err + } + } + + return &av, nil +} + +type artifactWrapper struct { + Artifact *Artifact `json:"artifact"` +} + +type artifactSearchWrapper struct { + Versions []*ArtifactVersion +} + +type artifactVersionWrapper struct { + Version *ArtifactVersion +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/authentication.go b/vendor/github.com/hashicorp/atlas-go/v1/authentication.go new file mode 100644 index 00000000..613eaefc --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/authentication.go @@ -0,0 +1,88 @@ +package atlas + +import ( + "fmt" + "log" + "net/url" + "strings" +) + +// Login accepts a username and password as string arguments. Both username and +// password must be non-nil, non-empty values. Atlas does not permit +// passwordless authentication. +// +// If authentication is unsuccessful, an error is returned with the body of the +// error containing the server's response. +// +// If authentication is successful, this method sets the Token value on the +// Client and returns the Token as a string. +func (c *Client) Login(username, password string) (string, error) { + log.Printf("[INFO] logging in user %s", username) + + if len(username) == 0 { + return "", fmt.Errorf("client: missing username") + } + + if len(password) == 0 { + return "", fmt.Errorf("client: missing password") + } + + // Make a request + request, err := c.Request("POST", "/api/v1/authenticate", &RequestOptions{ + Body: strings.NewReader(url.Values{ + "user[login]": []string{username}, + "user[password]": []string{password}, + "user[description]": []string{"Created by the Atlas Go Client"}, + }.Encode()), + Headers: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + }) + if err != nil { + return "", err + } + + // Make the request + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return "", err + } + + // Decode the body + var tResponse struct{ Token string } + if err := decodeJSON(response, &tResponse); err != nil { + return "", nil + } + + // Set the token + log.Printf("[DEBUG] setting atlas token (%s)", maskString(tResponse.Token)) + c.Token = tResponse.Token + + // Return the token + return c.Token, nil +} + +// Verify verifies that authentication and communication with Atlas +// is properly functioning. +func (c *Client) Verify() error { + log.Printf("[INFO] verifying authentication") + + request, err := c.Request("GET", "/api/v1/authenticate", nil) + if err != nil { + return err + } + + _, err = checkResp(c.HTTPClient.Do(request)) + return err +} + +// maskString masks all but the first few characters of a string for display +// output. This is useful for tokens so we can display them to the user without +// showing the full output. +func maskString(s string) string { + if len(s) <= 3 { + return "*** (masked)" + } + + return s[0:3] + "*** (masked)" +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/build_config.go b/vendor/github.com/hashicorp/atlas-go/v1/build_config.go new file mode 100644 index 00000000..fbcd9127 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/build_config.go @@ -0,0 +1,193 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" +) + +// bcWrapper is the API wrapper since the server wraps the resulting object. +type bcWrapper struct { + BuildConfig *BuildConfig `json:"build_configuration"` +} + +// Atlas expects a list of key/value vars +type BuildVar struct { + Key string `json:"key"` + Value string `json:"value"` + Sensitive bool `json:"sensitive"` +} +type BuildVars []BuildVar + +// BuildConfig represents a Packer build configuration. +type BuildConfig struct { + // User is the namespace under which the build config lives + User string `json:"username"` + + // Name is the actual name of the build config, unique in the scope + // of the username. + Name string `json:"name"` +} + +// Slug returns the slug format for this BuildConfig (User/Name) +func (b *BuildConfig) Slug() string { + return fmt.Sprintf("%s/%s", b.User, b.Name) +} + +// BuildConfigVersion represents a single uploaded (or uploadable) version +// of a build configuration. +type BuildConfigVersion struct { + // The fields below are the username/name combo to uniquely identify + // a build config. + User string `json:"username"` + Name string `json:"name"` + + // Builds is the list of builds that this version supports. + Builds []BuildConfigBuild +} + +// Slug returns the slug format for this BuildConfigVersion (User/Name) +func (bv *BuildConfigVersion) Slug() string { + return fmt.Sprintf("%s/%s", bv.User, bv.Name) +} + +// BuildConfigBuild is a single build that is present in an uploaded +// build configuration. +type BuildConfigBuild struct { + // Name is a unique name for this build + Name string `json:"name"` + + // Type is the type of builder that this build needs to run on, + // such as "amazon-ebs" or "qemu". + Type string `json:"type"` + + // Artifact is true if this build results in one or more artifacts + // being sent to Atlas + Artifact bool `json:"artifact"` +} + +// BuildConfig gets a single build configuration by user and name. +func (c *Client) BuildConfig(user, name string) (*BuildConfig, error) { + log.Printf("[INFO] getting build configuration %s/%s", user, name) + + endpoint := fmt.Sprintf("/api/v1/packer/build-configurations/%s/%s", user, name) + request, err := c.Request("GET", endpoint, nil) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var bc BuildConfig + if err := decodeJSON(response, &bc); err != nil { + return nil, err + } + + return &bc, nil +} + +// CreateBuildConfig creates a new build configuration. +func (c *Client) CreateBuildConfig(user, name string) (*BuildConfig, error) { + log.Printf("[INFO] creating build configuration %s/%s", user, name) + + endpoint := "/api/v1/packer/build-configurations" + body, err := json.Marshal(&bcWrapper{ + BuildConfig: &BuildConfig{ + User: user, + Name: name, + }, + }) + if err != nil { + return nil, err + } + + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return nil, err + } + + var bc BuildConfig + if err := decodeJSON(response, &bc); err != nil { + return nil, err + } + + return &bc, nil +} + +// UploadBuildConfigVersion creates a single build configuration version +// and uploads the template associated with it. +// +// Actual API: "Create Build Config Version" +func (c *Client) UploadBuildConfigVersion(v *BuildConfigVersion, metadata map[string]interface{}, + vars BuildVars, data io.Reader, size int64) error { + + log.Printf("[INFO] uploading build configuration version %s (%d bytes), with metadata %q", + v.Slug(), size, metadata) + + endpoint := fmt.Sprintf("/api/v1/packer/build-configurations/%s/%s/versions", + v.User, v.Name) + + var bodyData bcCreateWrapper + bodyData.Version.Builds = v.Builds + bodyData.Version.Metadata = metadata + bodyData.Version.Vars = vars + body, err := json.Marshal(bodyData) + if err != nil { + return err + } + + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return err + } + + var bv bcCreate + if err := decodeJSON(response, &bv); err != nil { + return err + } + + if err := c.putFile(bv.UploadPath, data, size); err != nil { + return err + } + + return nil +} + +// bcCreate is the struct returned when creating a build configuration. +type bcCreate struct { + UploadPath string `json:"upload_path"` +} + +// bcCreateWrapper is the wrapper for creating a build config. +type bcCreateWrapper struct { + Version struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` + Builds []BuildConfigBuild `json:"builds"` + Vars BuildVars `json:"packer_vars,omitempty"` + } `json:"version"` +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/client.go b/vendor/github.com/hashicorp/atlas-go/v1/client.go new file mode 100644 index 00000000..a38f97eb --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/client.go @@ -0,0 +1,339 @@ +package atlas + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path" + "runtime" + "strings" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" +) + +const ( + // atlasDefaultEndpoint is the default base URL for connecting to Atlas. + atlasDefaultEndpoint = "https://atlas.hashicorp.com" + + // atlasEndpointEnvVar is the environment variable that overrrides the + // default Atlas address. + atlasEndpointEnvVar = "ATLAS_ADDRESS" + + // atlasCAFileEnvVar is the environment variable that causes the client to + // load trusted certs from a file + atlasCAFileEnvVar = "ATLAS_CAFILE" + + // atlasCAPathEnvVar is the environment variable that causes the client to + // load trusted certs from a directory + atlasCAPathEnvVar = "ATLAS_CAPATH" + + // atlasTLSNoVerifyEnvVar disables TLS verification, similar to curl -k + // This defaults to false (verify) and will change to true (skip + // verification) with any non-empty value + atlasTLSNoVerifyEnvVar = "ATLAS_TLS_NOVERIFY" + + // atlasTokenHeader is the header key used for authenticating with Atlas + atlasTokenHeader = "X-Atlas-Token" +) + +var projectURL = "https://github.com/hashicorp/atlas-go" +var userAgent = fmt.Sprintf("AtlasGo/1.0 (+%s; %s)", + projectURL, runtime.Version()) + +// ErrAuth is the error returned if a 401 is returned by an API request. +var ErrAuth = fmt.Errorf("authentication failed") + +// ErrNotFound is the error returned if a 404 is returned by an API request. +var ErrNotFound = fmt.Errorf("resource not found") + +// RailsError represents an error that was returned from the Rails server. +type RailsError struct { + Errors []string `json:"errors"` +} + +// Error collects all of the errors in the RailsError and returns a comma- +// separated list of the errors that were returned from the server. +func (re *RailsError) Error() string { + return strings.Join(re.Errors, ", ") +} + +// Client represents a single connection to a Atlas API endpoint. +type Client struct { + // URL is the full endpoint address to the Atlas server including the + // protocol, port, and path. + URL *url.URL + + // Token is the Atlas authentication token + Token string + + // HTTPClient is the underlying http client with which to make requests. + HTTPClient *http.Client + + // DefaultHeaders is a set of headers that will be added to every request. + // This minimally includes the atlas user-agent string. + DefaultHeader http.Header +} + +// DefaultClient returns a client that connects to the Atlas API. +func DefaultClient() *Client { + atlasEndpoint := os.Getenv(atlasEndpointEnvVar) + if atlasEndpoint == "" { + atlasEndpoint = atlasDefaultEndpoint + } + + client, err := NewClient(atlasEndpoint) + if err != nil { + panic(err) + } + + return client +} + +// NewClient creates a new Atlas Client from the given URL (as a string). If +// the URL cannot be parsed, an error is returned. The HTTPClient is set to +// an empty http.Client, but this can be changed programmatically by setting +// client.HTTPClient. The user can also programmatically set the URL as a +// *url.URL. +func NewClient(urlString string) (*Client, error) { + if len(urlString) == 0 { + return nil, fmt.Errorf("client: missing url") + } + + parsedURL, err := url.Parse(urlString) + if err != nil { + return nil, err + } + + token := os.Getenv("ATLAS_TOKEN") + if token != "" { + log.Printf("[DEBUG] using ATLAS_TOKEN (%s)", maskString(token)) + } + + client := &Client{ + URL: parsedURL, + Token: token, + DefaultHeader: make(http.Header), + } + + client.DefaultHeader.Set("User-Agent", userAgent) + + if err := client.init(); err != nil { + return nil, err + } + + return client, nil +} + +// init() sets defaults on the client. +func (c *Client) init() error { + c.HTTPClient = cleanhttp.DefaultClient() + tlsConfig := &tls.Config{} + if os.Getenv(atlasTLSNoVerifyEnvVar) != "" { + tlsConfig.InsecureSkipVerify = true + } + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv(atlasCAFileEnvVar), + CAPath: os.Getenv(atlasCAPathEnvVar), + }) + if err != nil { + return err + } + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + c.HTTPClient.Transport = t + return nil +} + +// RequestOptions is the list of options to pass to the request. +type RequestOptions struct { + // Params is a map of key-value pairs that will be added to the Request. + Params map[string]string + + // Headers is a map of key-value pairs that will be added to the Request. + Headers map[string]string + + // Body is an io.Reader object that will be streamed or uploaded with the + // Request. BodyLength is the final size of the Body. + Body io.Reader + BodyLength int64 +} + +// Request creates a new HTTP request using the given verb and sub path. +func (c *Client) Request(verb, spath string, ro *RequestOptions) (*http.Request, error) { + log.Printf("[INFO] request: %s %s", verb, spath) + + // Ensure we have a RequestOptions struct (passing nil is an acceptable) + if ro == nil { + ro = new(RequestOptions) + } + + // Create a new URL with the appended path + u := *c.URL + u.Path = path.Join(c.URL.Path, spath) + + // Add the token and other params + if c.Token != "" { + log.Printf("[DEBUG] request: appending token (%s)", maskString(c.Token)) + if ro.Headers == nil { + ro.Headers = make(map[string]string) + } + + ro.Headers[atlasTokenHeader] = c.Token + } + + return c.rawRequest(verb, &u, ro) +} + +func (c *Client) putFile(rawURL string, r io.Reader, size int64) error { + log.Printf("[INFO] putting file: %s", rawURL) + + url, err := url.Parse(rawURL) + if err != nil { + return err + } + + request, err := c.rawRequest("PUT", url, &RequestOptions{ + Body: r, + BodyLength: size, + }) + if err != nil { + return err + } + + if _, err := checkResp(c.HTTPClient.Do(request)); err != nil { + return err + } + + return nil +} + +// rawRequest accepts a verb, URL, and RequestOptions struct and returns the +// constructed http.Request and any errors that occurred +func (c *Client) rawRequest(verb string, u *url.URL, ro *RequestOptions) (*http.Request, error) { + if verb == "" { + return nil, fmt.Errorf("client: missing verb") + } + + if u == nil { + return nil, fmt.Errorf("client: missing URL.url") + } + + if ro == nil { + return nil, fmt.Errorf("client: missing RequestOptions") + } + + // Add the token and other params + var params = make(url.Values) + for k, v := range ro.Params { + params.Add(k, v) + } + u.RawQuery = params.Encode() + + // Create the request object + request, err := http.NewRequest(verb, u.String(), ro.Body) + if err != nil { + return nil, err + } + + // set our default headers first + for k, v := range c.DefaultHeader { + request.Header[k] = v + } + + // Add any request headers (auth will be here if set) + for k, v := range ro.Headers { + request.Header.Add(k, v) + } + + // Add content-length if we have it + if ro.BodyLength > 0 { + request.ContentLength = ro.BodyLength + } + + log.Printf("[DEBUG] raw request: %#v", request) + + return request, nil +} + +// checkResp wraps http.Client.Do() and verifies that the request was +// successful. A non-200 request returns an error formatted to included any +// validation problems or otherwise. +func checkResp(resp *http.Response, err error) (*http.Response, error) { + // If the err is already there, there was an error higher up the chain, so + // just return that + if err != nil { + return resp, err + } + + log.Printf("[INFO] response: %d (%s)", resp.StatusCode, resp.Status) + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + log.Printf("[ERR] response: error copying response body") + } else { + log.Printf("[DEBUG] response: %s", buf.String()) + + // We are going to reset the response body, so we need to close the old + // one or else it will leak. + resp.Body.Close() + resp.Body = &bytesReadCloser{&buf} + } + + switch resp.StatusCode { + case 200: + return resp, nil + case 201: + return resp, nil + case 202: + return resp, nil + case 204: + return resp, nil + case 400: + return nil, parseErr(resp) + case 401: + return nil, ErrAuth + case 404: + return nil, ErrNotFound + case 422: + return nil, parseErr(resp) + default: + return nil, fmt.Errorf("client: %s", resp.Status) + } +} + +// parseErr is used to take an error JSON response and return a single string +// for use in error messages. +func parseErr(r *http.Response) error { + re := &RailsError{} + + if err := decodeJSON(r, &re); err != nil { + return fmt.Errorf("error decoding JSON body: %s", err) + } + + return re +} + +// decodeJSON is used to JSON decode a body into an interface. +func decodeJSON(resp *http.Response, out interface{}) error { + defer resp.Body.Close() + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// bytesReadCloser is a simple wrapper around a bytes buffer that implements +// Close as a noop. +type bytesReadCloser struct { + *bytes.Buffer +} + +func (nrc *bytesReadCloser) Close() error { + // we don't actually have to do anything here, since the buffer is just some + // data in memory and the error is initialized to no-error + return nil +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/terraform.go b/vendor/github.com/hashicorp/atlas-go/v1/terraform.go new file mode 100644 index 00000000..debd1d31 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/terraform.go @@ -0,0 +1,106 @@ +package atlas + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "log" +) + +// TerraformConfigVersion represents a single uploaded version of a +// Terraform configuration. +type TerraformConfigVersion struct { + Version int + Remotes []string `json:"remotes"` + Metadata map[string]string `json:"metadata"` + Variables map[string]string `json:"variables,omitempty"` + TFVars []TFVar `json:"tf_vars"` +} + +// TFVar is used to serialize a single Terraform variable sent by the +// manager as a collection of Variables in a Job payload. +type TFVar struct { + Key string `json:"key"` + Value string `json:"value"` + IsHCL bool `json:"hcl"` +} + +// TerraformConfigLatest returns the latest Terraform configuration version. +func (c *Client) TerraformConfigLatest(user, name string) (*TerraformConfigVersion, error) { + log.Printf("[INFO] getting terraform configuration %s/%s", user, name) + + endpoint := fmt.Sprintf("/api/v1/terraform/configurations/%s/%s/versions/latest", user, name) + request, err := c.Request("GET", endpoint, nil) + if err != nil { + return nil, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err == ErrNotFound { + return nil, nil + } + if err != nil { + return nil, err + } + + var wrapper tfConfigVersionWrapper + if err := decodeJSON(response, &wrapper); err != nil { + return nil, err + } + + return wrapper.Version, nil +} + +// CreateTerraformConfigVersion creatse a new Terraform configuration +// versions and uploads a slug with it. +func (c *Client) CreateTerraformConfigVersion( + user string, name string, + version *TerraformConfigVersion, + data io.Reader, size int64) (int, error) { + log.Printf("[INFO] creating terraform configuration %s/%s", user, name) + + endpoint := fmt.Sprintf( + "/api/v1/terraform/configurations/%s/%s/versions", user, name) + body, err := json.Marshal(&tfConfigVersionWrapper{ + Version: version, + }) + if err != nil { + return 0, err + } + + request, err := c.Request("POST", endpoint, &RequestOptions{ + Body: bytes.NewReader(body), + Headers: map[string]string{ + "Content-Type": "application/json", + }, + }) + if err != nil { + return 0, err + } + + response, err := checkResp(c.HTTPClient.Do(request)) + if err != nil { + return 0, err + } + + var result tfConfigVersionCreate + if err := decodeJSON(response, &result); err != nil { + return 0, err + } + + if err := c.putFile(result.UploadPath, data, size); err != nil { + return 0, err + } + + return result.Version, nil +} + +type tfConfigVersionCreate struct { + UploadPath string `json:"upload_path"` + Version int +} + +type tfConfigVersionWrapper struct { + Version *TerraformConfigVersion `json:"version"` +} diff --git a/vendor/github.com/hashicorp/atlas-go/v1/util.go b/vendor/github.com/hashicorp/atlas-go/v1/util.go new file mode 100644 index 00000000..9aa0d288 --- /dev/null +++ b/vendor/github.com/hashicorp/atlas-go/v1/util.go @@ -0,0 +1,22 @@ +package atlas + +import ( + "fmt" + "strings" +) + +// ParseSlug parses a slug of the format (x/y) into the x and y components. It +// accepts a string of the format "x/y" ("user/name" for example). If an empty +// string is given, an error is returned. If the given string is not a valid +// slug format, an error is returned. +func ParseSlug(slug string) (string, string, error) { + if slug == "" { + return "", "", fmt.Errorf("missing slug") + } + + parts := strings.Split(slug, "/") + if len(parts) != 2 { + return "", "", fmt.Errorf("malformed slug %q", slug) + } + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/CHANGELOG.md b/vendor/github.com/hashicorp/aws-sdk-go-base/CHANGELOG.md new file mode 100644 index 00000000..f419a62f --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/CHANGELOG.md @@ -0,0 +1,9 @@ +# v0.2.0 (February 20, 2019) + +ENHANCEMENTS + +* validation: Add `ValidateAccountID` and `ValidateRegion` functions [GH-1] + +# v0.1.0 (February 18, 2019) + +* Initial release after split from github.com/terraform-providers/terraform-provider-aws diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/GNUmakefile b/vendor/github.com/hashicorp/aws-sdk-go-base/GNUmakefile new file mode 100644 index 00000000..ef101a11 --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/GNUmakefile @@ -0,0 +1,13 @@ +default: test lint + +lint: + @echo "==> Checking source code against linters..." + @golangci-lint run ./... + +test: + go test -timeout=30s -parallel=4 ./... + +tools: + GO111MODULE=off go get -u github.com/golangci/golangci-lint/cmd/golangci-lint + +.PHONY: lint test tools diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/LICENSE b/vendor/github.com/hashicorp/aws-sdk-go-base/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/README.md b/vendor/github.com/hashicorp/aws-sdk-go-base/README.md new file mode 100644 index 00000000..dc410ebd --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/README.md @@ -0,0 +1,27 @@ +# aws-sdk-go-base + +An opinionated [AWS Go SDK](https://github.com/aws/aws-sdk-go) library for consistent authentication configuration between projects and additional helper functions. This library was originally started in [HashiCorp Terraform](https://github.com/hashicorp/terraform), migrated with the [Terraform AWS Provider](https://github.com/terraform-providers/terraform-provider-aws) during the Terraform 0.10 Core and Provider split, and now is offered as a separate library to allow easier dependency management in the Terraform ecosystem. + +**NOTE:** This library is not currently designed or intended for usage outside the [Terraform S3 Backend](https://www.terraform.io/docs/backends/types/s3.html) and the [Terraform AWS Provider](https://www.terraform.io/docs/providers/aws/index.html). + +## Requirements + +- [Go](https://golang.org/doc/install) 1.11.4+ + +## Development + +Testing this project can be done through Go standard library functionality or if [Make](https://www.gnu.org/software/make/) is available: + +```sh +$ go test -v ./... +# Optionally if Make is available; both run the same testing +$ make test +``` + +Code quality assurance uses [golangci-lint](https://github.com/golangci/golangci-lint): + +```sh +$ golangci-lint run ./... +# Optionally if Make is available; both run the same linting +$ make lint +``` diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/awsauth.go b/vendor/github.com/hashicorp/aws-sdk-go-base/awsauth.go new file mode 100644 index 00000000..17bc443d --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/awsauth.go @@ -0,0 +1,308 @@ +package awsbase + +import ( + "errors" + "fmt" + "log" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go/aws/awserr" + awsCredentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-multierror" +) + +func GetAccountIDAndPartition(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) { + var accountID, partition string + var err, errors error + + if authProviderName == ec2rolecreds.ProviderName { + accountID, partition, err = GetAccountIDAndPartitionFromEC2Metadata() + } else { + accountID, partition, err = GetAccountIDAndPartitionFromIAMGetUser(iamconn) + } + if accountID != "" { + return accountID, partition, nil + } + errors = multierror.Append(errors, err) + + accountID, partition, err = GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsconn) + if accountID != "" { + return accountID, partition, nil + } + errors = multierror.Append(errors, err) + + accountID, partition, err = GetAccountIDAndPartitionFromIAMListRoles(iamconn) + if accountID != "" { + return accountID, partition, nil + } + errors = multierror.Append(errors, err) + + return accountID, partition, errors +} + +func GetAccountIDAndPartitionFromEC2Metadata() (string, string, error) { + log.Println("[DEBUG] Trying to get account information via EC2 Metadata") + + cfg := &aws.Config{} + setOptionalEndpoint(cfg) + sess, err := session.NewSession(cfg) + if err != nil { + return "", "", fmt.Errorf("error creating EC2 Metadata session: %s", err) + } + + metadataClient := ec2metadata.New(sess) + info, err := metadataClient.IAMInfo() + if err != nil { + // We can end up here if there's an issue with the instance metadata service + // or if we're getting credentials from AdRoll's Hologram (in which case IAMInfo will + // error out). + err = fmt.Errorf("failed getting account information via EC2 Metadata IAM information: %s", err) + log.Printf("[DEBUG] %s", err) + return "", "", err + } + + return parseAccountIDAndPartitionFromARN(info.InstanceProfileArn) +} + +func GetAccountIDAndPartitionFromIAMGetUser(iamconn *iam.IAM) (string, string, error) { + log.Println("[DEBUG] Trying to get account information via iam:GetUser") + + output, err := iamconn.GetUser(&iam.GetUserInput{}) + if err != nil { + // AccessDenied and ValidationError can be raised + // if credentials belong to federated profile, so we ignore these + if awsErr, ok := err.(awserr.Error); ok { + switch awsErr.Code() { + case "AccessDenied", "InvalidClientTokenId", "ValidationError": + return "", "", nil + } + } + err = fmt.Errorf("failed getting account information via iam:GetUser: %s", err) + log.Printf("[DEBUG] %s", err) + return "", "", err + } + + if output == nil || output.User == nil { + err = errors.New("empty iam:GetUser response") + log.Printf("[DEBUG] %s", err) + return "", "", err + } + + return parseAccountIDAndPartitionFromARN(aws.StringValue(output.User.Arn)) +} + +func GetAccountIDAndPartitionFromIAMListRoles(iamconn *iam.IAM) (string, string, error) { + log.Println("[DEBUG] Trying to get account information via iam:ListRoles") + + output, err := iamconn.ListRoles(&iam.ListRolesInput{ + MaxItems: aws.Int64(int64(1)), + }) + if err != nil { + err = fmt.Errorf("failed getting account information via iam:ListRoles: %s", err) + log.Printf("[DEBUG] %s", err) + return "", "", err + } + + if output == nil || len(output.Roles) < 1 { + err = fmt.Errorf("empty iam:ListRoles response") + log.Printf("[DEBUG] %s", err) + return "", "", err + } + + return parseAccountIDAndPartitionFromARN(aws.StringValue(output.Roles[0].Arn)) +} + +func GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsconn *sts.STS) (string, string, error) { + log.Println("[DEBUG] Trying to get account information via sts:GetCallerIdentity") + + output, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + if err != nil { + return "", "", fmt.Errorf("error calling sts:GetCallerIdentity: %s", err) + } + + if output == nil || output.Arn == nil { + err = errors.New("empty sts:GetCallerIdentity response") + log.Printf("[DEBUG] %s", err) + return "", "", err + } + + return parseAccountIDAndPartitionFromARN(aws.StringValue(output.Arn)) +} + +func parseAccountIDAndPartitionFromARN(inputARN string) (string, string, error) { + arn, err := arn.Parse(inputARN) + if err != nil { + return "", "", fmt.Errorf("error parsing ARN (%s): %s", inputARN, err) + } + return arn.AccountID, arn.Partition, nil +} + +// This function is responsible for reading credentials from the +// environment in the case that they're not explicitly specified +// in the Terraform configuration. +func GetCredentials(c *Config) (*awsCredentials.Credentials, error) { + // build a chain provider, lazy-evaluated by aws-sdk + providers := []awsCredentials.Provider{ + &awsCredentials.StaticProvider{Value: awsCredentials.Value{ + AccessKeyID: c.AccessKey, + SecretAccessKey: c.SecretKey, + SessionToken: c.Token, + }}, + &awsCredentials.EnvProvider{}, + &awsCredentials.SharedCredentialsProvider{ + Filename: c.CredsFilename, + Profile: c.Profile, + }, + } + + // Build isolated HTTP client to avoid issues with globally-shared settings + client := cleanhttp.DefaultClient() + + // Keep the default timeout (100ms) low as we don't want to wait in non-EC2 environments + client.Timeout = 100 * time.Millisecond + + const userTimeoutEnvVar = "AWS_METADATA_TIMEOUT" + userTimeout := os.Getenv(userTimeoutEnvVar) + if userTimeout != "" { + newTimeout, err := time.ParseDuration(userTimeout) + if err == nil { + if newTimeout.Nanoseconds() > 0 { + client.Timeout = newTimeout + } else { + log.Printf("[WARN] Non-positive value of %s (%s) is meaningless, ignoring", userTimeoutEnvVar, newTimeout.String()) + } + } else { + log.Printf("[WARN] Error converting %s to time.Duration: %s", userTimeoutEnvVar, err) + } + } + + log.Printf("[INFO] Setting AWS metadata API timeout to %s", client.Timeout.String()) + cfg := &aws.Config{ + HTTPClient: client, + } + usedEndpoint := setOptionalEndpoint(cfg) + + // Add the default AWS provider for ECS Task Roles if the relevant env variable is set + if uri := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); len(uri) > 0 { + providers = append(providers, defaults.RemoteCredProvider(*cfg, defaults.Handlers())) + log.Print("[INFO] ECS container credentials detected, RemoteCredProvider added to auth chain") + } + + if !c.SkipMetadataApiCheck { + // Real AWS should reply to a simple metadata request. + // We check it actually does to ensure something else didn't just + // happen to be listening on the same IP:Port + ec2Session, err := session.NewSession(cfg) + + if err != nil { + return nil, fmt.Errorf("error creating EC2 Metadata session: %s", err) + } + + metadataClient := ec2metadata.New(ec2Session) + if metadataClient.Available() { + providers = append(providers, &ec2rolecreds.EC2RoleProvider{ + Client: metadataClient, + }) + log.Print("[INFO] AWS EC2 instance detected via default metadata" + + " API endpoint, EC2RoleProvider added to the auth chain") + } else { + if usedEndpoint == "" { + usedEndpoint = "default location" + } + log.Printf("[INFO] Ignoring AWS metadata API endpoint at %s "+ + "as it doesn't return any instance-id", usedEndpoint) + } + } + + // This is the "normal" flow (i.e. not assuming a role) + if c.AssumeRoleARN == "" { + return awsCredentials.NewChainCredentials(providers), nil + } + + // Otherwise we need to construct and STS client with the main credentials, and verify + // that we can assume the defined role. + log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q, Policy: %q)", + c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID, c.AssumeRolePolicy) + + creds := awsCredentials.NewChainCredentials(providers) + cp, err := creds.Get() + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { + return nil, errors.New(`No valid credential sources found for AWS Provider. + Please see https://terraform.io/docs/providers/aws/index.html for more information on + providing credentials for the AWS Provider`) + } + + return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) + } + + log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) + + awsConfig := &aws.Config{ + Credentials: creds, + Region: aws.String(c.Region), + MaxRetries: aws.Int(c.MaxRetries), + HTTPClient: cleanhttp.DefaultClient(), + } + + assumeRoleSession, err := session.NewSession(awsConfig) + + if err != nil { + return nil, fmt.Errorf("error creating assume role session: %s", err) + } + + stsclient := sts.New(assumeRoleSession) + assumeRoleProvider := &stscreds.AssumeRoleProvider{ + Client: stsclient, + RoleARN: c.AssumeRoleARN, + } + if c.AssumeRoleSessionName != "" { + assumeRoleProvider.RoleSessionName = c.AssumeRoleSessionName + } + if c.AssumeRoleExternalID != "" { + assumeRoleProvider.ExternalID = aws.String(c.AssumeRoleExternalID) + } + if c.AssumeRolePolicy != "" { + assumeRoleProvider.Policy = aws.String(c.AssumeRolePolicy) + } + + providers = []awsCredentials.Provider{assumeRoleProvider} + + assumeRoleCreds := awsCredentials.NewChainCredentials(providers) + _, err = assumeRoleCreds.Get() + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" { + return nil, fmt.Errorf("The role %q cannot be assumed.\n\n"+ + " There are a number of possible causes of this - the most common are:\n"+ + " * The credentials used in order to assume the role are invalid\n"+ + " * The credentials do not have appropriate permission to assume the role\n"+ + " * The role ARN is not valid", + c.AssumeRoleARN) + } + + return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) + } + + return assumeRoleCreds, nil +} + +func setOptionalEndpoint(cfg *aws.Config) string { + endpoint := os.Getenv("AWS_METADATA_URL") + if endpoint != "" { + log.Printf("[INFO] Setting custom metadata endpoint: %q", endpoint) + cfg.Endpoint = aws.String(endpoint) + return endpoint + } + return "" +} diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/awserr.go b/vendor/github.com/hashicorp/aws-sdk-go-base/awserr.go new file mode 100644 index 00000000..28295483 --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/awserr.go @@ -0,0 +1,37 @@ +package awsbase + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// IsAWSErr returns true if the error matches all these conditions: +// * err is of type awserr.Error +// * Error.Code() matches code +// * Error.Message() contains message +func IsAWSErr(err error, code string, message string) bool { + awsErr, ok := err.(awserr.Error) + + if !ok { + return false + } + + if awsErr.Code() != code { + return false + } + + return strings.Contains(awsErr.Message(), message) +} + +// IsAWSErrExtended returns true if the error matches all these conditions: +// * err is of type awserr.Error +// * Error.Code() matches code +// * Error.Message() contains message +// * Error.OrigErr() contains origErrMessage +func IsAWSErrExtended(err error, code string, message string, origErrMessage string) bool { + if !IsAWSErr(err, code, message) { + return false + } + return strings.Contains(err.(awserr.Error).OrigErr().Error(), origErrMessage) +} diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/config.go b/vendor/github.com/hashicorp/aws-sdk-go-base/config.go new file mode 100644 index 00000000..fb57e44e --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/config.go @@ -0,0 +1,29 @@ +package awsbase + +type Config struct { + AccessKey string + AssumeRoleARN string + AssumeRoleExternalID string + AssumeRolePolicy string + AssumeRoleSessionName string + CredsFilename string + DebugLogging bool + IamEndpoint string + Insecure bool + MaxRetries int + Profile string + Region string + SecretKey string + SkipCredsValidation bool + SkipMetadataApiCheck bool + SkipRequestingAccountId bool + StsEndpoint string + Token string + UserAgentProducts []*UserAgentProduct +} + +type UserAgentProduct struct { + Extra []string + Name string + Version string +} diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/go.mod b/vendor/github.com/hashicorp/aws-sdk-go-base/go.mod new file mode 100644 index 00000000..9df74cb3 --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/go.mod @@ -0,0 +1,10 @@ +module github.com/hashicorp/aws-sdk-go-base + +require ( + github.com/aws/aws-sdk-go v1.16.36 + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-multierror v1.0.0 + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/net v0.0.0-20190213061140-3a22650c66bd // indirect + golang.org/x/text v0.3.0 // indirect +) diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/go.sum b/vendor/github.com/hashicorp/aws-sdk-go-base/go.sum new file mode 100644 index 00000000..fe20b5e5 --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/go.sum @@ -0,0 +1,21 @@ +github.com/aws/aws-sdk-go v1.16.36 h1:POeH34ZME++pr7GBGh+ZO6Y5kOwSMQpqp5BGUgooJ6k= +github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/logger.go b/vendor/github.com/hashicorp/aws-sdk-go-base/logger.go new file mode 100644 index 00000000..467d7ace --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/logger.go @@ -0,0 +1,18 @@ +package awsbase + +import ( + "log" + "strings" +) + +type DebugLogger struct{} + +func (l DebugLogger) Log(args ...interface{}) { + tokens := make([]string, 0, len(args)) + for _, arg := range args { + if token, ok := arg.(string); ok { + tokens = append(tokens, token) + } + } + log.Printf("[DEBUG] [aws-sdk-go] %s", strings.Join(tokens, " ")) +} diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/mock.go b/vendor/github.com/hashicorp/aws-sdk-go-base/mock.go new file mode 100644 index 00000000..66d17256 --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/mock.go @@ -0,0 +1,76 @@ +package awsbase + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/http/httptest" + "time" + + "github.com/aws/aws-sdk-go/aws" + awsCredentials "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" +) + +// GetMockedAwsApiSession establishes a httptest server to simulate behaviour +// of a real AWS API server +func GetMockedAwsApiSession(svcName string, endpoints []*MockEndpoint) (func(), *session.Session, error) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(r.Body); err != nil { + w.WriteHeader(500) + fmt.Fprintf(w, "Error reading from HTTP Request Body: %s", err) + return + } + requestBody := buf.String() + + log.Printf("[DEBUG] Received %s API %q request to %q: %s", + svcName, r.Method, r.RequestURI, requestBody) + + for _, e := range endpoints { + if r.Method == e.Request.Method && r.RequestURI == e.Request.Uri && requestBody == e.Request.Body { + log.Printf("[DEBUG] Mocked %s API responding with %d: %s", + svcName, e.Response.StatusCode, e.Response.Body) + + w.WriteHeader(e.Response.StatusCode) + w.Header().Set("Content-Type", e.Response.ContentType) + w.Header().Set("X-Amzn-Requestid", "1b206dd1-f9a8-11e5-becf-051c60f11c4a") + w.Header().Set("Date", time.Now().Format(time.RFC1123)) + + fmt.Fprintln(w, e.Response.Body) + return + } + } + + w.WriteHeader(400) + })) + + sc := awsCredentials.NewStaticCredentials("accessKey", "secretKey", "") + + sess, err := session.NewSession(&aws.Config{ + Credentials: sc, + Region: aws.String("us-east-1"), + Endpoint: aws.String(ts.URL), + CredentialsChainVerboseErrors: aws.Bool(true), + }) + + return ts.Close, sess, err +} + +type MockEndpoint struct { + Request *MockRequest + Response *MockResponse +} + +type MockRequest struct { + Method string + Uri string + Body string +} + +type MockResponse struct { + StatusCode int + Body string + ContentType string +} diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/session.go b/vendor/github.com/hashicorp/aws-sdk-go-base/session.go new file mode 100644 index 00000000..35b991bb --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/session.go @@ -0,0 +1,200 @@ +package awsbase + +import ( + "crypto/tls" + "errors" + "fmt" + "log" + "net/http" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/iam" + "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/go-cleanhttp" +) + +// GetSessionOptions attempts to return valid AWS Go SDK session authentication +// options based on pre-existing credential provider, configured profile, or +// fallback to automatically a determined session via the AWS Go SDK. +func GetSessionOptions(c *Config) (*session.Options, error) { + options := &session.Options{ + Config: aws.Config{ + HTTPClient: cleanhttp.DefaultClient(), + MaxRetries: aws.Int(0), + Region: aws.String(c.Region), + }, + } + + creds, err := GetCredentials(c) + if err != nil { + return nil, err + } + + // Call Get to check for credential provider. If nothing found, we'll get an + // error, and we can present it nicely to the user + cp, err := creds.Get() + if err != nil { + if IsAWSErr(err, "NoCredentialProviders", "") { + // If a profile wasn't specified, the session may still be able to resolve credentials from shared config. + if c.Profile == "" { + sess, err := session.NewSession() + if err != nil { + return nil, errors.New(`No valid credential sources found for AWS Provider. + Please see https://terraform.io/docs/providers/aws/index.html for more information on + providing credentials for the AWS Provider`) + } + _, err = sess.Config.Credentials.Get() + if err != nil { + return nil, errors.New(`No valid credential sources found for AWS Provider. + Please see https://terraform.io/docs/providers/aws/index.html for more information on + providing credentials for the AWS Provider`) + } + log.Printf("[INFO] Using session-derived AWS Auth") + options.Config.Credentials = sess.Config.Credentials + } else { + log.Printf("[INFO] AWS Auth using Profile: %q", c.Profile) + options.Profile = c.Profile + options.SharedConfigState = session.SharedConfigEnable + } + } else { + return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err) + } + } else { + // add the validated credentials to the session options + log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) + options.Config.Credentials = creds + } + + if c.Insecure { + transport := options.Config.HTTPClient.Transport.(*http.Transport) + transport.TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + + if c.DebugLogging { + options.Config.LogLevel = aws.LogLevel(aws.LogDebugWithHTTPBody | aws.LogDebugWithRequestRetries | aws.LogDebugWithRequestErrors) + options.Config.Logger = DebugLogger{} + } + + return options, nil +} + +// GetSession attempts to return valid AWS Go SDK session +func GetSession(c *Config) (*session.Session, error) { + options, err := GetSessionOptions(c) + + if err != nil { + return nil, err + } + + sess, err := session.NewSessionWithOptions(*options) + if err != nil { + if IsAWSErr(err, "NoCredentialProviders", "") { + return nil, errors.New(`No valid credential sources found for AWS Provider. + Please see https://terraform.io/docs/providers/aws/index.html for more information on + providing credentials for the AWS Provider`) + } + return nil, fmt.Errorf("Error creating AWS session: %s", err) + } + + if c.MaxRetries > 0 { + sess = sess.Copy(&aws.Config{MaxRetries: aws.Int(c.MaxRetries)}) + } + + for _, product := range c.UserAgentProducts { + sess.Handlers.Build.PushBack(request.MakeAddToUserAgentHandler(product.Name, product.Version, product.Extra...)) + } + + // Generally, we want to configure a lower retry theshold for networking issues + // as the session retry threshold is very high by default and can mask permanent + // networking failures, such as a non-existent service endpoint. + // MaxRetries will override this logic if it has a lower retry threshold. + // NOTE: This logic can be fooled by other request errors raising the retry count + // before any networking error occurs + sess.Handlers.Retry.PushBack(func(r *request.Request) { + // We currently depend on the DefaultRetryer exponential backoff here. + // ~10 retries gives a fair backoff of a few seconds. + if r.RetryCount < 9 { + return + } + // RequestError: send request failed + // caused by: Post https://FQDN/: dial tcp: lookup FQDN: no such host + if IsAWSErrExtended(r.Error, "RequestError", "send request failed", "no such host") { + log.Printf("[WARN] Disabling retries after next request due to networking issue") + r.Retryable = aws.Bool(false) + } + // RequestError: send request failed + // caused by: Post https://FQDN/: dial tcp IPADDRESS:443: connect: connection refused + if IsAWSErrExtended(r.Error, "RequestError", "send request failed", "connection refused") { + log.Printf("[WARN] Disabling retries after next request due to networking issue") + r.Retryable = aws.Bool(false) + } + }) + + if !c.SkipCredsValidation { + stsClient := sts.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.StsEndpoint)})) + if _, _, err := GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsClient); err != nil { + return nil, fmt.Errorf("error validating provider credentials: %s", err) + } + } + + return sess, nil +} + +// GetSessionWithAccountIDAndPartition attempts to return valid AWS Go SDK session +// along with account ID and partition information if available +func GetSessionWithAccountIDAndPartition(c *Config) (*session.Session, string, string, error) { + sess, err := GetSession(c) + + if err != nil { + return nil, "", "", err + } + + if c.AssumeRoleARN != "" { + accountID, partition, _ := parseAccountIDAndPartitionFromARN(c.AssumeRoleARN) + return sess, accountID, partition, nil + } + + iamClient := iam.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)})) + stsClient := sts.New(sess.Copy(&aws.Config{Endpoint: aws.String(c.StsEndpoint)})) + + if !c.SkipCredsValidation { + accountID, partition, err := GetAccountIDAndPartitionFromSTSGetCallerIdentity(stsClient) + + if err != nil { + return nil, "", "", fmt.Errorf("error validating provider credentials: %s", err) + } + + return sess, accountID, partition, nil + } + + if !c.SkipRequestingAccountId { + credentialsProviderName := "" + + if credentialsValue, err := sess.Config.Credentials.Get(); err == nil { + credentialsProviderName = credentialsValue.ProviderName + } + + accountID, partition, err := GetAccountIDAndPartition(iamClient, stsClient, credentialsProviderName) + + if err == nil { + return sess, accountID, partition, nil + } + + return nil, "", "", fmt.Errorf( + "AWS account ID not previously found and failed retrieving via all available methods. "+ + "See https://www.terraform.io/docs/providers/aws/index.html#skip_requesting_account_id for workaround and implications. "+ + "Errors: %s", err) + } + + var partition string + if p, ok := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), c.Region); ok { + partition = p.ID() + } + + return sess, "", partition, nil +} diff --git a/vendor/github.com/hashicorp/aws-sdk-go-base/validation.go b/vendor/github.com/hashicorp/aws-sdk-go-base/validation.go new file mode 100644 index 00000000..bf320351 --- /dev/null +++ b/vendor/github.com/hashicorp/aws-sdk-go-base/validation.go @@ -0,0 +1,44 @@ +package awsbase + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// ValidateAccountID checks if the given AWS account ID is specifically allowed or forbidden. +// The allowedAccountIDs can be used as a whitelist and forbiddenAccountIDs can be used as a blacklist. +func ValidateAccountID(accountID string, allowedAccountIDs, forbiddenAccountIDs []string) error { + if len(forbiddenAccountIDs) > 0 { + for _, forbiddenAccountID := range forbiddenAccountIDs { + if accountID == forbiddenAccountID { + return fmt.Errorf("Forbidden AWS Account ID: %s", accountID) + } + } + } + + if len(allowedAccountIDs) > 0 { + for _, allowedAccountID := range allowedAccountIDs { + if accountID == allowedAccountID { + return nil + } + } + + return fmt.Errorf("AWS Account ID not allowed: %s)", accountID) + } + + return nil +} + +// ValidateRegion checks if the given region is a valid AWS region. +func ValidateRegion(region string) error { + for _, partition := range endpoints.DefaultPartitions() { + for _, partitionRegion := range partition.Regions() { + if region == partitionRegion.ID() { + return nil + } + } + } + + return fmt.Errorf("Invalid AWS Region: %s", region) +} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/consul/NOTICE.md b/vendor/github.com/hashicorp/consul/NOTICE.md new file mode 100644 index 00000000..fe34b5e5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/NOTICE.md @@ -0,0 +1,3 @@ +Copyright © 2014-2018 HashiCorp, Inc. + +This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this project, you can obtain one at http://mozilla.org/MPL/2.0/. diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md new file mode 100644 index 00000000..3255cbb2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -0,0 +1,67 @@ +Consul API client +================= + +This package provides the `api` package which attempts to +provide programmatic access to the full Consul API. + +Currently, all of the Consul APIs included in version 0.6.0 are supported. + +Documentation +============= + +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) + +Usage +===== + +Below is an example of using the Consul client: + +```go +package main + +import "github.com/hashicorp/consul/api" +import "fmt" + +func main() { + // Get a new client + client, err := api.NewClient(api.DefaultConfig()) + if err != nil { + panic(err) + } + + // Get a handle to the KV API + kv := client.KV() + + // PUT a new KV pair + p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")} + _, err = kv.Put(p, nil) + if err != nil { + panic(err) + } + + // Lookup the pair + pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil) + if err != nil { + panic(err) + } + fmt.Printf("KV: %v %s\n", pair.Key, pair.Value) +} +``` + +To run this example, start a Consul server: + +```bash +consul agent -dev +``` + +Copy the code above into a file such as `main.go`. + +Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed. + +```bash +$ go get +$ go run main.go +KV: REDIS_MAXCLIENTS 1000 +``` + +After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 00000000..124409ff --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,1116 @@ +package api + +import ( + "fmt" + "io" + "io/ioutil" + "net/url" + "time" + + "github.com/mitchellh/mapstructure" +) + +const ( + // ACLClientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +type ACLTokenPolicyLink struct { + ID string + Name string +} +type ACLTokenRoleLink struct { + ID string + Name string +} + +// ACLToken represents an ACL Token +type ACLToken struct { + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + SecretID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Local bool + ExpirationTTL time.Duration `json:",omitempty"` + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time `json:",omitempty"` + Hash []byte `json:",omitempty"` + + // DEPRECATED (ACL-Legacy-Compat) + // Rules will only be present for legacy tokens returned via the new APIs + Rules string `json:",omitempty"` +} + +type ACLTokenListEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + AccessorID string + Description string + Policies []*ACLTokenPolicyLink `json:",omitempty"` + Roles []*ACLTokenRoleLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Local bool + ExpirationTime *time.Time `json:",omitempty"` + CreateTime time.Time + Hash []byte + Legacy bool +} + +// ACLEntry is used to represent a legacy ACL token +// The legacy tokens are deprecated. +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACLReplicationStatus is used to represent the status of ACL replication. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicationType string + ReplicatedIndex uint64 + ReplicatedRoleIndex uint64 + ReplicatedTokenIndex uint64 + LastSuccess time.Time + LastError time.Time +} + +// ACLServiceIdentity represents a high-level grant of all necessary privileges +// to assume the identity of the named Service in the Catalog and within +// Connect. +type ACLServiceIdentity struct { + ServiceName string + Datacenters []string `json:",omitempty"` +} + +// ACLPolicy represents an ACL Policy. +type ACLPolicy struct { + ID string + Name string + Description string + Rules string + Datacenters []string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLPolicyListEntry struct { + ID string + Name string + Description string + Datacenters []string + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLRolePolicyLink struct { + ID string + Name string +} + +// ACLRole represents an ACL Role. +type ACLRole struct { + ID string + Name string + Description string + Policies []*ACLRolePolicyLink `json:",omitempty"` + ServiceIdentities []*ACLServiceIdentity `json:",omitempty"` + Hash []byte + CreateIndex uint64 + ModifyIndex uint64 +} + +// BindingRuleBindType is the type of binding rule mechanism used. +type BindingRuleBindType string + +const ( + // BindingRuleBindTypeService binds to a service identity with the given name. + BindingRuleBindTypeService BindingRuleBindType = "service" + + // BindingRuleBindTypeRole binds to pre-existing roles with the given name. + BindingRuleBindTypeRole BindingRuleBindType = "role" +) + +type ACLBindingRule struct { + ID string + Description string + AuthMethod string + Selector string + BindType BindingRuleBindType + BindName string + + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLAuthMethod struct { + Name string + Type string + Description string + + // Configuration is arbitrary configuration for the auth method. This + // should only contain primitive values and containers (such as lists and + // maps). + Config map[string]interface{} + + CreateIndex uint64 + ModifyIndex uint64 +} + +type ACLAuthMethodListEntry struct { + Name string + Type string + Description string + CreateIndex uint64 + ModifyIndex uint64 +} + +// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed +// KubernetesAuthMethodConfig. +func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) { + var config KubernetesAuthMethodConfig + decodeConf := &mapstructure.DecoderConfig{ + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + +// KubernetesAuthMethodConfig is the config for the built-in Consul auth method +// for Kubernetes. +type KubernetesAuthMethodConfig struct { + Host string `json:",omitempty"` + CACert string `json:",omitempty"` + ServiceAccountJWT string `json:",omitempty"` +} + +// RenderToConfig converts this into a map[string]interface{} suitable for use +// in the ACLAuthMethod.Config field. +func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} { + return map[string]interface{}{ + "Host": c.Host, + "CACert": c.CACert, + "ServiceAccountJWT": c.ServiceAccountJWT, + } +} + +type ACLLoginParams struct { + AuthMethod string + BearerToken string + Meta map[string]string `json:",omitempty"` +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster +// to get the first management token. +func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/bootstrap") + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// Create is used to generate a new token with the given parameters +// +// Deprecated: Use TokenCreate instead. +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +// +// Deprecated: Use TokenUpdate instead. +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +// +// Deprecated: Use TokenDelete instead. +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +// +// Deprecated: Use TokenClone instead. +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +// +// Deprecated: Use TokenRead instead. +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +// +// Deprecated: Use TokenList instead. +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Replication returns the status of the ACL replication process in the datacenter +func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/replication") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries *ACLReplicationStatus + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields +// of the ACLToken structure are empty they will be filled in by Consul. +func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/token") + r.setWriteOptions(q) + r.obj = token + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenUpdate updates a token in place without modifying its AccessorID or SecretID. A valid +// AccessorID must be set in the ACLToken structure passed to this function but the SecretID may +// be omitted and will be filled in by Consul with its existing value. +func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if token.AccessorID == "" { + return nil, nil, fmt.Errorf("Must specify an AccessorID for Token Updating") + } + r := a.c.newRequest("PUT", "/v1/acl/token/"+token.AccessorID) + r.setWriteOptions(q) + r.obj = token + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenClone will create a new token with the same policies and locality as the original +// token but will have its own auto-generated AccessorID and SecretID as well having the +// description passed to this function. The tokenID parameter must be a valid Accessor ID +// of an existing token. +func (a *ACL) TokenClone(tokenID string, description string, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + if tokenID == "" { + return nil, nil, fmt.Errorf("Must specify a tokenID for Token Cloning") + } + + r := a.c.newRequest("PUT", "/v1/acl/token/"+tokenID+"/clone") + r.setWriteOptions(q) + r.obj = struct{ Description string }{description} + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// TokenDelete removes a single ACL token. The tokenID parameter must be a valid +// Accessor ID of an existing token. +func (a *ACL) TokenDelete(tokenID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/token/"+tokenID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// TokenRead retrieves the full token details. The tokenID parameter must be a valid +// Accessor ID of an existing token. +func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/"+tokenID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TokenReadSelf retrieves the full token details of the token currently +// assigned to the API Client. In this manner its possible to read a token +// by its Secret ID. +func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/token/self") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// TokenList lists all tokens. The listing does not contain any SecretIDs as those +// may only be retrieved by a call to TokenRead. +func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/tokens") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLTokenListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// PolicyCreate will create a new policy. It is not allowed for the policy parameters +// ID field to be set as this will be generated by Consul while processing the request. +func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + if policy.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation") + } + r := a.c.newRequest("PUT", "/v1/acl/policy") + r.setWriteOptions(q) + r.obj = policy + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// PolicyUpdate updates a policy. The ID field of the policy parameter must be set to an +// existing policy ID +func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) { + if policy.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Policy Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID) + r.setWriteOptions(q) + r.obj = policy + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// PolicyDelete deletes a policy given its ID. +func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/policy/"+policyID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// PolicyRead retrieves the policy details including the rule set. +func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/policy/"+policyID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ACLPolicy + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// PolicyList retrieves a listing of all policies. The listing does not include the +// rules for any policy as those should be retrieved by subsequent calls to PolicyRead. +func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/policies") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLPolicyListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// RulesTranslate translates the legacy rule syntax into the current syntax. +// +// Deprecated: Support for the legacy syntax translation will be removed +// when legacy ACL support is removed. +func (a *ACL) RulesTranslate(rules io.Reader) (string, error) { + r := a.c.newRequest("POST", "/v1/acl/rules/translate") + r.body = rules + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + ruleBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("Failed to read translated rule body: %v", err) + } + + return string(ruleBytes), nil +} + +// RulesTranslateToken translates the rules associated with the legacy syntax +// into the current syntax and returns the results. +// +// Deprecated: Support for the legacy syntax translation will be removed +// when legacy ACL support is removed. +func (a *ACL) RulesTranslateToken(tokenID string) (string, error) { + r := a.c.newRequest("GET", "/v1/acl/rules/translate/"+tokenID) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + ruleBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("Failed to read translated rule body: %v", err) + } + + return string(ruleBytes), nil +} + +// RoleCreate will create a new role. It is not allowed for the role parameters +// ID field to be set as this will be generated by Consul while processing the request. +func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/role") + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleUpdate updates a role. The ID field of the role parameter must be set to an +// existing role ID +func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) { + if role.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Role Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID) + r.setWriteOptions(q) + r.obj = role + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// RoleDelete deletes a role given its ID. +func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// RoleRead retrieves the role details (by ID). Returns nil if not found. +func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/"+roleID) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleReadByName retrieves the role details (by name). Returns nil if not found. +func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLRole + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// RoleList retrieves a listing of all roles. The listing does not include some +// metadata for the role as those should be retrieved by subsequent calls to +// RoleRead. +func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/roles") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLRole + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// AuthMethodCreate will create a new auth method. +func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method") + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodUpdate updates an auth method. +func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) { + if method.Name == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name)) + r.setWriteOptions(q) + r.obj = method + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// AuthMethodDelete deletes an auth method given its Name. +func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) { + if methodName == "" { + return nil, fmt.Errorf("Must specify a Name in Auth Method Delete") + } + + r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// AuthMethodRead retrieves the auth method. Returns nil if not found. +func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) { + if methodName == "" { + return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read") + } + + r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName)) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLAuthMethod + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// AuthMethodList retrieves a listing of all auth methods. The listing does not +// include some metadata for the auth method as those should be retrieved by +// subsequent calls to AuthMethodRead. +func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/auth-methods") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLAuthMethodListEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// BindingRuleCreate will create a new binding rule. It is not allowed for the +// binding rule parameter's ID field to be set as this will be generated by +// Consul while processing the request. +func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID != "" { + return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule") + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleUpdate updates a binding rule. The ID field of the role binding +// rule parameter must be set to an existing binding rule ID. +func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) { + if rule.ID == "" { + return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update") + } + + r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID) + r.setWriteOptions(q) + r.obj = rule + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, wm, nil +} + +// BindingRuleDelete deletes a binding rule given its ID. +func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// BindingRuleRead retrieves the binding rule details. Returns nil if not found. +func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID) + r.setQueryOptions(q) + found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if !found { + return nil, qm, nil + } + + var out ACLBindingRule + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return &out, qm, nil +} + +// BindingRuleList retrieves a listing of all binding rules. +func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/binding-rules") + if methodName != "" { + r.params.Set("authmethod", methodName) + } + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLBindingRule + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Login is used to exchange auth method credentials for a newly-minted Consul Token. +func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/login") + r.setWriteOptions(q) + r.obj = auth + + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out ACLToken + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, wm, nil +} + +// Logout is used to destroy a Consul Token created via Login(). +func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("POST", "/v1/acl/logout") + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 00000000..04043ba8 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,1035 @@ +package api + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/url" +) + +// ServiceKind is the kind of service being registered. +type ServiceKind string + +const ( + // ServiceKindTypical is a typical, classic Consul service. This is + // represented by the absence of a value. This was chosen for ease of + // backwards compatibility: existing services in the catalog would + // default to the typical service. + ServiceKindTypical ServiceKind = "" + + // ServiceKindConnectProxy is a proxy for the Connect feature. This + // service proxies another service within Consul and speaks the connect + // protocol. + ServiceKindConnectProxy ServiceKind = "connect-proxy" +) + +// ProxyExecMode is the execution mode for a managed Connect proxy. +type ProxyExecMode string + +const ( + // ProxyExecModeDaemon indicates that the proxy command should be long-running + // and should be started and supervised by the agent until it's target service + // is deregistered. + ProxyExecModeDaemon ProxyExecMode = "daemon" + + // ProxyExecModeScript indicates that the proxy command should be invoke to + // completion on each change to the configuration of lifecycle event. The + // script typically fetches the config and certificates from the agent API and + // then configures an externally managed daemon, perhaps starting and stopping + // it if necessary. + ProxyExecModeScript ProxyExecMode = "script" +) + +// UpstreamDestType is the type of upstream discovery mechanism. +type UpstreamDestType string + +const ( + // UpstreamDestTypeService discovers instances via healthy service lookup. + UpstreamDestTypeService UpstreamDestType = "service" + + // UpstreamDestTypePreparedQuery discovers instances via prepared query + // execution. + UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + Definition HealthCheckDefinition +} + +// AgentWeights represent optional weights for a service +type AgentWeights struct { + Passing int + Warning int +} + +// AgentService represents a service known to the agent +type AgentService struct { + Kind ServiceKind `json:",omitempty"` + ID string + Service string + Tags []string + Meta map[string]string + Port int + Address string + Weights AgentWeights + EnableTagOverride bool + CreateIndex uint64 `json:",omitempty" bexpr:"-"` + ModifyIndex uint64 `json:",omitempty" bexpr:"-"` + ContentHash string `json:",omitempty" bexpr:"-"` + // DEPRECATED (ProxyDestination) - remove this field + ProxyDestination string `json:",omitempty" bexpr:"-"` + Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` +} + +// AgentServiceChecksInfo returns information about a Service and its checks +type AgentServiceChecksInfo struct { + AggregatedStatus string + Service *AgentService + Checks HealthChecks +} + +// AgentServiceConnect represents the Connect configuration of a service. +type AgentServiceConnect struct { + Native bool `json:",omitempty"` + Proxy *AgentServiceConnectProxy `json:",omitempty" bexpr:"-"` + SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"` +} + +// AgentServiceConnectProxy represents the Connect Proxy configuration of a +// service. +type AgentServiceConnectProxy struct { + ExecMode ProxyExecMode `json:",omitempty"` + Command []string `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` + Upstreams []Upstream `json:",omitempty"` +} + +// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy +// ServiceDefinition or response. +type AgentServiceConnectProxyConfig struct { + DestinationServiceName string + DestinationServiceID string `json:",omitempty"` + LocalServiceAddress string `json:",omitempty"` + LocalServicePort int `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` + Upstreams []Upstream +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AllSegments is used to select for all segments in MembersOpts. +const AllSegments = "_all" + +// MembersOpts is used for querying member information. +type MembersOpts struct { + // WAN is whether to show members from the WAN. + WAN bool + + // Segment is the LAN segment to show members for. Setting this to the + // AllSegments value above will show members in all segments. + Segment string +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + Kind ServiceKind `json:",omitempty"` + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Meta map[string]string `json:",omitempty"` + Weights *AgentWeights `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks + // DEPRECATED (ProxyDestination) - remove this field + ProxyDestination string `json:",omitempty"` + Proxy *AgentServiceConnectProxyConfig `json:",omitempty"` + Connect *AgentServiceConnect `json:",omitempty"` +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to define a node or service level check +type AgentServiceCheck struct { + CheckID string `json:",omitempty"` + Name string `json:",omitempty"` + Args []string `json:"ScriptArgs,omitempty"` + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + Header map[string][]string `json:",omitempty"` + Method string `json:",omitempty"` + TCP string `json:",omitempty"` + Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` + GRPC string `json:",omitempty"` + GRPCUseTLS bool `json:",omitempty"` + AliasNode string `json:",omitempty"` + AliasService string `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// AgentToken is used when updating ACL tokens for an agent. +type AgentToken struct { + Token string +} + +// Metrics info is used to store different types of metric values from the agent. +type MetricsInfo struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +// GaugeValue stores one value that is updated as time goes on, such as +// the amount of memory allocated. +type GaugeValue struct { + Name string + Value float32 + Labels map[string]string +} + +// PointValue holds a series of points for a metric. +type PointValue struct { + Name string + Points []float32 +} + +// SampledValue stores info about a metric that is incremented over time, +// such as the number of requests to an HTTP endpoint. +type SampledValue struct { + Name string + Count int + Sum float64 + Min float64 + Max float64 + Mean float64 + Stddev float64 + Labels map[string]string +} + +// AgentAuthorizeParams are the request parameters for authorizing a request. +type AgentAuthorizeParams struct { + Target string + ClientCertURI string + ClientCertSerial string +} + +// AgentAuthorize is the response structure for Connect authorization. +type AgentAuthorize struct { + Authorized bool + Reason string +} + +// ConnectProxyConfig is the response structure for agent-local proxy +// configuration. +type ConnectProxyConfig struct { + ProxyServiceID string + TargetServiceID string + TargetServiceName string + ContentHash string + // DEPRECATED(managed-proxies) - this struct is re-used for sidecar configs + // but they don't need ExecMode or Command + ExecMode ProxyExecMode `json:",omitempty"` + Command []string `json:",omitempty"` + Config map[string]interface{} `bexpr:"-"` + Upstreams []Upstream +} + +// Upstream is the response structure for a proxy upstream configuration. +type Upstream struct { + DestinationType UpstreamDestType `json:",omitempty"` + DestinationNamespace string `json:",omitempty"` + DestinationName string + Datacenter string `json:",omitempty"` + LocalBindAddress string `json:",omitempty"` + LocalBindPort int `json:",omitempty"` + Config map[string]interface{} `json:",omitempty" bexpr:"-"` +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Host is used to retrieve information about the host the +// agent is running on such as CPU, memory, and disk. Requires +// a operator:read ACL token. +func (a *Agent) Host() (map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/host") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Metrics is used to query the agent we are speaking to for +// its current internal metric data +func (a *Agent) Metrics() (*MetricsInfo, error) { + r := a.c.newRequest("GET", "/v1/agent/metrics") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out *MetricsInfo + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + return a.ChecksWithFilter("") +} + +// ChecksWithFilter returns a subset of the locally registered checks that match +// the given filter expression +func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + r.filterQuery(filter) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + return a.ServicesWithFilter("") +} + +// ServicesWithFilter returns a subset of the locally registered services that match +// the given filter expression +func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + r.filterQuery(filter) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return out, nil +} + +// AgentHealthServiceByID returns for a given serviceID: the aggregated health status, the service definition or an error if any +// - If the service is not found, will return status (critical, nil, nil) +// - If the service is found, will return (critical|passing|warning), AgentServiceChecksInfo, nil) +// - In all other cases, will return an error +func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceChecksInfo, error) { + path := fmt.Sprintf("/v1/agent/health/service/id/%v", url.PathEscape(serviceID)) + r := a.c.newRequest("GET", path) + r.params.Add("format", "json") + r.header.Set("Accept", "application/json") + _, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + // Service not Found + if resp.StatusCode == http.StatusNotFound { + return HealthCritical, nil, nil + } + var out *AgentServiceChecksInfo + if err := decodeBody(resp, &out); err != nil { + return HealthCritical, out, err + } + switch resp.StatusCode { + case http.StatusOK: + return HealthPassing, out, nil + case http.StatusTooManyRequests: + return HealthWarning, out, nil + case http.StatusServiceUnavailable: + return HealthCritical, out, nil + } + return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path) +} + +// AgentHealthServiceByName returns for a given service name: the aggregated health status for all services +// having the specified name. +// - If no service is not found, will return status (critical, [], nil) +// - If the service is found, will return (critical|passing|warning), []api.AgentServiceChecksInfo, nil) +// - In all other cases, will return an error +func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentServiceChecksInfo, error) { + path := fmt.Sprintf("/v1/agent/health/service/name/%v", url.PathEscape(service)) + r := a.c.newRequest("GET", path) + r.params.Add("format", "json") + r.header.Set("Accept", "application/json") + _, resp, err := a.c.doRequest(r) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + // Service not Found + if resp.StatusCode == http.StatusNotFound { + return HealthCritical, nil, nil + } + var out []AgentServiceChecksInfo + if err := decodeBody(resp, &out); err != nil { + return HealthCritical, out, err + } + switch resp.StatusCode { + case http.StatusOK: + return HealthPassing, out, nil + case http.StatusTooManyRequests: + return HealthWarning, out, nil + case http.StatusServiceUnavailable: + return HealthCritical, out, nil + } + return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path) +} + +// Service returns a locally registered service instance and allows for +// hash-based blocking. +// +// Note that this uses an unconventional blocking mechanism since it's +// agent-local state. That means there is no persistent raft index so we block +// based on object hash instead. +func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + + return out, qm, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// MembersOpts returns the known gossip members and can be passed +// additional options for WAN/segment filtering. +func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + r.params.Set("segment", opts.Segment) + if opts.WAN { + r.params.Set("wan", "1") + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) PassTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) WarnTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) FailTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "fail") +} + +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ConnectAuthorize is used to authorize an incoming connection +// to a natively integrated Connect service. +func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) { + r := a.c.newRequest("POST", "/v1/agent/connect/authorize") + r.obj = auth + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AgentAuthorize + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// ConnectCARoots returns the list of roots. +func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// ConnectCALeaf gets the leaf certificate for the given service ID. +func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out LeafCert + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// ConnectProxyConfig gets the configuration for a local managed proxy instance. +// +// Note that this uses an unconventional blocking mechanism since it's +// agent-local state. That means there is no persistent raft index so we block +// based on object hash instead. +func (a *Agent) ConnectProxyConfig(proxyServiceID string, q *QueryOptions) (*ConnectProxyConfig, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/agent/connect/proxy/"+proxyServiceID) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out ConnectProxyConfig + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream. An empty string will be sent down the given channel when there's +// nothing left to stream, after which the caller should close the stopCh. +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + // An empty string signals to the caller that + // the scan is done, so make sure we only emit + // that when the scanner says it's done, not if + // we happen to ingest an empty line. + if text := scanner.Text(); text != "" { + logCh <- text + } else { + logCh <- " " + } + } else { + logCh <- "" + } + } + }() + + return logCh, nil +} + +// UpdateACLToken updates the agent's "acl_token". See updateToken for more +// details. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateDefaultACLToken for v1.4.3 and above +func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_token", token, q) +} + +// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken +// for more details. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentACLToken for v1.4.3 and above +func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_agent_token", token, q) +} + +// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See +// updateToken for more details. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentMasterACLToken for v1.4.3 and above +func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_agent_master_token", token, q) +} + +// UpdateACLReplicationToken updates the agent's "acl_replication_token". See +// updateToken for more details. +// +// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateReplicationACLToken for v1.4.3 and above +func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_replication_token", token, q) +} + +// UpdateDefaultACLToken updates the agent's "default" token. See updateToken +// for more details +func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback("default", "acl_token", token, q) +} + +// UpdateAgentACLToken updates the agent's "agent" token. See updateToken +// for more details +func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback("agent", "acl_agent_token", token, q) +} + +// UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken +// for more details +func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback("agent_master", "acl_agent_master_token", token, q) +} + +// UpdateReplicationACLToken updates the agent's "replication" token. See updateToken +// for more details +func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateTokenFallback("replication", "acl_replication_token", token, q) +} + +// updateToken can be used to update one of an agent's ACL tokens after the agent has +// started. The tokens are may not be persisted, so will need to be updated again if +// the agent is restarted unless the agent is configured to persist them. +func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { + meta, _, err := a.updateTokenOnce(target, token, q) + return meta, err +} + +func (a *Agent) updateTokenFallback(target, fallback, token string, q *WriteOptions) (*WriteMeta, error) { + meta, status, err := a.updateTokenOnce(target, token, q) + if err != nil && status == 404 { + meta, _, err = a.updateTokenOnce(fallback, token, q) + } + return meta, err +} + +func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMeta, int, error) { + r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) + r.setWriteOptions(q) + r.obj = &AgentToken{Token: token} + + rtt, resp, err := a.c.doRequest(r) + if err != nil { + return nil, 0, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return wm, resp.StatusCode, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + + return wm, resp.StatusCode, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 00000000..11c3a2cd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,966 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" +) + +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPTokenFileEnvName defines an environment variable name which sets + // the HTTP token file. + HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPCAFile defines an environment variable name which sets the + // CA file to use for talking to Consul over TLS. + HTTPCAFile = "CONSUL_CACERT" + + // HTTPCAPath defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul over TLS. + HTTPCAPath = "CONSUL_CAPATH" + + // HTTPClientCert defines an environment variable name which sets the + // client cert file to use for talking to Consul over TLS. + HTTPClientCert = "CONSUL_CLIENT_CERT" + + // HTTPClientKey defines an environment variable name which sets the + // client key file to use for talking to Consul over TLS. + HTTPClientKey = "CONSUL_CLIENT_KEY" + + // HTTPTLSServerName defines an environment variable name which sets the + // server name to use as the SNI host when connecting via TLS + HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" + + // GRPCAddrEnvName defines an environment variable name which sets the gRPC + // address for consul connect envoy. Note this isn't actually used by the api + // client in this package but is defined here for consistency with all the + // other ENV names we use. + GRPCAddrEnvName = "CONSUL_GRPC_ADDR" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // UseCache requests that the agent cache results locally. See + // https://www.consul.io/api/features/caching.html for more details on the + // semantics. + UseCache bool + + // MaxAge limits how old a cached value will be returned if UseCache is true. + // If there is a cached response that is older than the MaxAge, it is treated + // as a cache miss and a new fetch invoked. If the fetch fails, the error is + // returned. Clients that wish to allow for stale results on error can set + // StaleIfError to a longer duration to change this behavior. It is ignored + // if the endpoint supports background refresh caching. See + // https://www.consul.io/api/features/caching.html for more details. + MaxAge time.Duration + + // StaleIfError specifies how stale the client will accept a cached response + // if the servers are unavailable to fetch a fresh one. Only makes sense when + // UseCache is true and MaxAge is set to a lower, non-zero value. It is + // ignored if the endpoint supports background refresh caching. See + // https://www.consul.io/api/features/caching.html for more details. + StaleIfError time.Duration + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitHash is used by some endpoints instead of WaitIndex to perform blocking + // on state based on a hash of the response rather than a monotonic index. + // This is required when the state being blocked on is not stored in Raft, for + // example agent-local proxy configuration. + WaitHash string + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // Connect filters prepared query execution to only include Connect-capable + // services. This currently affects prepared query execution. + Connect bool + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context + + // Filter requests filtering data prior to it being returned. The string + // is a go-bexpr compatible expression. + Filter string +} + +func (o *QueryOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { + o2 := new(QueryOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *WriteOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { + o2 := new(WriteOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // LastContentHash. This can be used as a WaitHash to perform a blocking query + // for endpoints that support hash-based blocking. Endpoints that do not + // support it will return an empty hash. + LastContentHash string + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool + + // CacheHit is true if the result was served from agent-local cache. + CacheHit bool + + // CacheAge is set if request was ?cached and indicates how stale the cached + // response is. + CacheAge time.Duration +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // Transport is the Transport to use for the http client. + Transport *http.Transport + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // TokenFile is a file containing the current token to use for this client. + // If provided it is read once at startup and never again. + TokenFile string + + TLSConfig TLSConfig +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CAPath is the optional path to a directory of CA certificates to use for + // Consul communication, defaults to the system bundle if not specified. + CAPath string + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object, which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. +func DefaultConfig() *Config { + return defaultConfig(cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(transportFn func() *http.Transport) *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + Transport: transportFn(), + } + + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { + config.Address = addr + } + + if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" { + config.TokenFile = tokenFile + } + + if token := os.Getenv(HTTPTokenEnvName); token != "" { + config.Token = token + } + + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) + } + + if enabled { + config.Scheme = "https" + } + } + + if v := os.Getenv(HTTPTLSServerName); v != "" { + config.TLSConfig.Address = v + } + if v := os.Getenv(HTTPCAFile); v != "" { + config.TLSConfig.CAFile = v + } + if v := os.Getenv(HTTPCAPath); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv(HTTPClientCert); v != "" { + config.TLSConfig.CertFile = v + } + if v := os.Getenv(HTTPClientKey); v != "" { + config.TLSConfig.KeyFile = v + } + if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { + doVerify, err := strconv.ParseBool(v) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) + } + if !doVerify { + config.TLSConfig.InsecureSkipVerify = true + } + } + + return config +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: tlsConfig.CAFile, + CAPath: tlsConfig.CAPath, + } + if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { + return nil, err + } + } + + return tlsClientConfig, nil +} + +func (c *Config) GenerateEnv() []string { + env := make([]string, 0, 10) + + env = append(env, + fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address), + fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token), + fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile), + fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"), + fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile), + fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath), + fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile), + fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile), + fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address), + fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify)) + + if c.HttpAuth != nil { + env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password)) + } else { + env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName)) + } + + return env +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.Transport == nil { + config.Transport = defConfig.Transport + } + + if config.TLSConfig.Address == "" { + config.TLSConfig.Address = defConfig.TLSConfig.Address + } + + if config.TLSConfig.CAFile == "" { + config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile + } + + if config.TLSConfig.CAPath == "" { + config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath + } + + if config.TLSConfig.CertFile == "" { + config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile + } + + if config.TLSConfig.KeyFile == "" { + config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile + } + + if !config.TLSConfig.InsecureSkipVerify { + config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify + } + + if config.HttpClient == nil { + var err error + config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) + if err != nil { + return nil, err + } + } + + parts := strings.SplitN(config.Address, "://", 2) + if len(parts) == 2 { + switch parts[0] { + case "http": + config.Scheme = "http" + case "https": + config.Scheme = "https" + case "unix": + trans := cleanhttp.DefaultTransport() + trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + config.HttpClient = &http.Client{ + Transport: trans, + } + default: + return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) + } + config.Address = parts[1] + } + + // If the TokenFile is set, always use that, even if a Token is configured. + // This is because when TokenFile is set it is read into the Token field. + // We want any derived clients to have to re-read the token file. + if config.TokenFile != "" { + data, err := ioutil.ReadFile(config.TokenFile) + if err != nil { + return nil, fmt.Errorf("Error loading token file: %s", err) + } + + if token := strings.TrimSpace(string(data)); token != "" { + config.Token = token + } + } + if config.Token == "" { + config.Token = defConfig.Token + } + + return &Client{config: *config}, nil +} + +// NewHttpClient returns an http client configured with the given Transport and TLS +// config. +func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { + client := &http.Client{ + Transport: transport, + } + + // TODO (slackpad) - Once we get some run time on the HTTP/2 support we + // should turn it on by default if TLS is enabled. We would basically + // just need to call http2.ConfigureTransport(transport) here. We also + // don't want to introduce another external dependency on + // golang.org/x/net/http2 at this time. For a complete recipe for how + // to enable HTTP/2 support on a transport suitable for the API client + // library see agent/http_test.go:TestHTTPServer_H2. + + if transport.TLSClientConfig == nil { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + } + + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + header http.Header + obj interface{} + ctx context.Context +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.WaitHash != "" { + r.params.Set("hash", q.WaitHash) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } + if q.Filter != "" { + r.params.Set("filter", q.Filter) + } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + if q.Connect { + r.params.Set("connect", "true") + } + if q.UseCache && !q.RequireConsistent { + r.params.Set("cached", "") + + cc := []string{} + if q.MaxAge > 0 { + cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds())) + } + if q.StaleIfError > 0 { + cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds())) + } + if len(cc) > 0 { + r.header.Set("Cache-Control", strings.Join(cc, ", ")) + } + } + r.ctx = q.ctx +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsRetryableError returns true for 500 errors from the Consul servers, and +// network connection errors. These are usually retryable at a later time. +// This applies to reads but NOT to writes. This may return true for errors +// on writes that may have still gone through, so do not use this to retry +// any write operations. +func IsRetryableError(err error) bool { + if err == nil { + return false + } + + if _, ok := err.(net.Error); ok { + return true + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + b, err := encodeBody(r.obj) + if err != nil { + return nil, err + } + r.body = b + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + req.Header = r.header + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + if r.ctx != nil { + return req.WithContext(r.ctx), nil + } + + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + header: make(http.Header), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.header.Set("X-Consul-Token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Since(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := c.doRequest(r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } else if _, err := ioutil.ReadAll(resp.Body); err != nil { + return nil, err + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +// +// TODO(rb): bug? the error from this function is never handled +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index (if it's set - hash based blocking queries don't + // set this) + if indexStr := header.Get("X-Consul-Index"); indexStr != "" { + index, err := strconv.ParseUint(indexStr, 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + } + q.LastContentHash = header.Get("X-Consul-ContentHash") + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + + // Parse Cache info + if cacheStr := header.Get("X-Cache"); cacheStr != "" { + q.CacheHit = strings.EqualFold(cacheStr, "HIT") + } + if ageStr := header.Get("Age"); ageStr != "" { + age, err := strconv.ParseUint(ageStr, 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse Age Header: %v", err) + } + q.CacheAge = time.Duration(age) * time.Second + } + + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + return d, nil, generateUnexpectedResponseCodeError(resp) + } + return d, resp, nil +} + +func (req *request) filterQuery(filter string) { + if filter == "" { + return + } + + req.params.Set("filter", filter) +} + +// generateUnexpectedResponseCodeError consumes the rest of the body, closes +// the body stream and generates an error indicating the status code was +// unexpected. +func generateUnexpectedResponseCodeError(resp *http.Response) error { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) +} + +func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return false, d, nil, e + } + switch resp.StatusCode { + case 200: + return true, d, resp, nil + case 404: + return false, d, resp, nil + default: + return false, d, nil, generateUnexpectedResponseCodeError(resp) + } +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 00000000..c175c3ff --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,244 @@ +package api + +type Weights struct { + Passing int + Warning int +} + +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServiceMeta map[string]string + ServicePort int + ServiceWeights Weights + ServiceEnableTagOverride bool + // DEPRECATED (ProxyDestination) - remove the next comment! + // We forgot to ever add ServiceProxyDestination here so no need to deprecate! + ServiceProxy *AgentServiceConnectProxyConfig + CreateIndex uint64 + Checks HealthChecks + ModifyIndex uint64 +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck + Checks HealthChecks + SkipNodeUpdate bool +} + +type CatalogDeregistration struct { + Node string + Address string // Obsolete. + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return c.service(service, tags, q, false) +} + +// Supports multiple tags for filtering +func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tags, q, false) +} + +// Connect is used to query catalog entries for a given Connect-enabled service +func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return c.service(service, tags, q, true) +} + +// Supports multiple tags for filtering +func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + return c.service(service, tags, q, true) +} + +func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) { + path := "/v1/catalog/service/" + service + if connect { + path = "/v1/catalog/connect/" + service + } + r := c.c.newRequest("GET", path) + r.setQueryOptions(q) + if len(tags) > 0 { + for _, tag := range tags { + r.params.Add("tag", tag) + } + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go new file mode 100644 index 00000000..0c18963f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -0,0 +1,255 @@ +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + + "github.com/mitchellh/mapstructure" +) + +const ( + ServiceDefaults string = "service-defaults" + ProxyDefaults string = "proxy-defaults" + ProxyConfigGlobal string = "global" +) + +type ConfigEntry interface { + GetKind() string + GetName() string + GetCreateIndex() uint64 + GetModifyIndex() uint64 +} + +type ServiceConfigEntry struct { + Kind string + Name string + Protocol string + CreateIndex uint64 + ModifyIndex uint64 +} + +func (s *ServiceConfigEntry) GetKind() string { + return s.Kind +} + +func (s *ServiceConfigEntry) GetName() string { + return s.Name +} + +func (s *ServiceConfigEntry) GetCreateIndex() uint64 { + return s.CreateIndex +} + +func (s *ServiceConfigEntry) GetModifyIndex() uint64 { + return s.ModifyIndex +} + +type ProxyConfigEntry struct { + Kind string + Name string + Config map[string]interface{} + CreateIndex uint64 + ModifyIndex uint64 +} + +func (p *ProxyConfigEntry) GetKind() string { + return p.Kind +} + +func (p *ProxyConfigEntry) GetName() string { + return p.Name +} + +func (p *ProxyConfigEntry) GetCreateIndex() uint64 { + return p.CreateIndex +} + +func (p *ProxyConfigEntry) GetModifyIndex() uint64 { + return p.ModifyIndex +} + +type rawEntryListResponse struct { + kind string + Entries []map[string]interface{} +} + +func makeConfigEntry(kind, name string) (ConfigEntry, error) { + switch kind { + case ServiceDefaults: + return &ServiceConfigEntry{Name: name}, nil + case ProxyDefaults: + return &ProxyConfigEntry{Name: name}, nil + default: + return nil, fmt.Errorf("invalid config entry kind: %s", kind) + } +} + +func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { + var entry ConfigEntry + + kindVal, ok := raw["Kind"] + if !ok { + kindVal, ok = raw["kind"] + } + if !ok { + return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level") + } + + if kindStr, ok := kindVal.(string); ok { + newEntry, err := makeConfigEntry(kindStr, "") + if err != nil { + return nil, err + } + entry = newEntry + } else { + return nil, fmt.Errorf("Kind value in payload is not a string") + } + + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Result: &entry, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + return entry, decoder.Decode(raw) +} + +func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) { + var raw map[string]interface{} + if err := json.Unmarshal(data, &raw); err != nil { + return nil, err + } + + return DecodeConfigEntry(raw) +} + +// Config can be used to query the Config endpoints +type ConfigEntries struct { + c *Client +} + +// Config returns a handle to the Config endpoints +func (c *Client) ConfigEntries() *ConfigEntries { + return &ConfigEntries{c} +} + +func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) { + if kind == "" || name == "" { + return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + entry, err := makeConfigEntry(kind, name) + if err != nil { + return nil, nil, err + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setQueryOptions(q) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, entry); err != nil { + return nil, nil, err + } + + return entry, qm, nil +} + +func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) { + if kind == "" { + return nil, nil, fmt.Errorf("The kind parameter must not be empty") + } + + r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind)) + r.setQueryOptions(q) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var raw []map[string]interface{} + if err := decodeBody(resp, &raw); err != nil { + return nil, nil, err + } + + var entries []ConfigEntry + for _, rawEntry := range raw { + entry, err := DecodeConfigEntry(rawEntry) + if err != nil { + return nil, nil, err + } + entries = append(entries, entry) + } + + return entries, qm, nil +} + +func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, nil, w) +} + +func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) { + return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w) +} + +func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) { + r := conf.c.newRequest("PUT", "/v1/config") + r.setWriteOptions(w) + for param, value := range params { + r.params.Set(param, value) + } + r.obj = entry + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + wm := &WriteMeta{RequestTime: rtt} + return res, wm, nil +} + +func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) { + if kind == "" || name == "" { + return nil, fmt.Errorf("Both kind and name parameters must not be empty") + } + + r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name)) + r.setWriteOptions(w) + rtt, resp, err := requireOK(conf.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go new file mode 100644 index 00000000..a40d1e23 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect.go @@ -0,0 +1,12 @@ +package api + +// Connect can be used to work with endpoints related to Connect, the +// feature for securely connecting services within Consul. +type Connect struct { + c *Client +} + +// Connect returns a handle to the connect-related endpoints +func (c *Client) Connect() *Connect { + return &Connect{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go new file mode 100644 index 00000000..600a3e0d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect_ca.go @@ -0,0 +1,174 @@ +package api + +import ( + "fmt" + "time" + + "github.com/mitchellh/mapstructure" +) + +// CAConfig is the structure for the Connect CA configuration. +type CAConfig struct { + // Provider is the CA provider implementation to use. + Provider string + + // Configuration is arbitrary configuration for the provider. This + // should only contain primitive values and containers (such as lists + // and maps). + Config map[string]interface{} + + CreateIndex uint64 + ModifyIndex uint64 +} + +// CommonCAProviderConfig is the common options available to all CA providers. +type CommonCAProviderConfig struct { + LeafCertTTL time.Duration + SkipValidate bool + CSRMaxPerSecond float32 + CSRMaxConcurrent int +} + +// ConsulCAProviderConfig is the config for the built-in Consul CA provider. +type ConsulCAProviderConfig struct { + CommonCAProviderConfig `mapstructure:",squash"` + + PrivateKey string + RootCert string + RotationPeriod time.Duration +} + +// ParseConsulCAConfig takes a raw config map and returns a parsed +// ConsulCAProviderConfig. +func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) { + var config ConsulCAProviderConfig + decodeConf := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Result: &config, + WeaklyTypedInput: true, + } + + decoder, err := mapstructure.NewDecoder(decodeConf) + if err != nil { + return nil, err + } + + if err := decoder.Decode(raw); err != nil { + return nil, fmt.Errorf("error decoding config: %s", err) + } + + return &config, nil +} + +// CARootList is the structure for the results of listing roots. +type CARootList struct { + ActiveRootID string + TrustDomain string + Roots []*CARoot +} + +// CARoot represents a root CA certificate that is trusted. +type CARoot struct { + // ID is a globally unique ID (UUID) representing this CA root. + ID string + + // Name is a human-friendly name for this CA root. This value is + // opaque to Consul and is not used for anything internally. + Name string + + // RootCertPEM is the PEM-encoded public certificate. + RootCertPEM string `json:"RootCert"` + + // Active is true if this is the current active CA. This must only + // be true for exactly one CA. For any method that modifies roots in the + // state store, tests should be written to verify that multiple roots + // cannot be active. + Active bool + + CreateIndex uint64 + ModifyIndex uint64 +} + +// LeafCert is a certificate that has been issued by a Connect CA. +type LeafCert struct { + // SerialNumber is the unique serial number for this certificate. + // This is encoded in standard hex separated by :. + SerialNumber string + + // CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private + // key for that cert, respectively. This should not be stored in the + // state store, but is present in the sign API response. + CertPEM string `json:",omitempty"` + PrivateKeyPEM string `json:",omitempty"` + + // Service is the name of the service for which the cert was issued. + // ServiceURI is the cert URI value. + Service string + ServiceURI string + + // ValidAfter and ValidBefore are the validity periods for the + // certificate. + ValidAfter time.Time + ValidBefore time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +// CARoots queries the list of available roots. +func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/roots") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CARootList + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// CAGetConfig returns the current CA configuration. +func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/ca/configuration") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out CAConfig + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// CASetConfig sets the current CA configuration. +func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("PUT", "/v1/connect/ca/configuration") + r.setWriteOptions(q) + r.obj = conf + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go new file mode 100644 index 00000000..a996c03e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go @@ -0,0 +1,302 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "time" +) + +// Intention defines an intention for the Connect Service Graph. This defines +// the allowed or denied behavior of a connection between two services using +// Connect. +type Intention struct { + // ID is the UUID-based ID for the intention, always generated by Consul. + ID string + + // Description is a human-friendly description of this intention. + // It is opaque to Consul and is only stored and transferred in API + // requests. + Description string + + // SourceNS, SourceName are the namespace and name, respectively, of + // the source service. Either of these may be the wildcard "*", but only + // the full value can be a wildcard. Partial wildcards are not allowed. + // The source may also be a non-Consul service, as specified by SourceType. + // + // DestinationNS, DestinationName is the same, but for the destination + // service. The same rules apply. The destination is always a Consul + // service. + SourceNS, SourceName string + DestinationNS, DestinationName string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType + + // Action is whether this is a whitelist or blacklist intention. + Action IntentionAction + + // DefaultAddr, DefaultPort of the local listening proxy (if any) to + // make this connection. + DefaultAddr string + DefaultPort int + + // Meta is arbitrary metadata associated with the intention. This is + // opaque to Consul but is served in API responses. + Meta map[string]string + + // Precedence is the order that the intention will be applied, with + // larger numbers being applied first. This is a read-only field, on + // any intention update it is updated. + Precedence int + + // CreatedAt and UpdatedAt keep track of when this record was created + // or modified. + CreatedAt, UpdatedAt time.Time + + CreateIndex uint64 + ModifyIndex uint64 +} + +// String returns human-friendly output describing ths intention. +func (i *Intention) String() string { + return fmt.Sprintf("%s => %s (%s)", + i.SourceString(), + i.DestinationString(), + i.Action) +} + +// SourceString returns the namespace/name format for the source, or +// just "name" if the namespace is the default namespace. +func (i *Intention) SourceString() string { + return i.partString(i.SourceNS, i.SourceName) +} + +// DestinationString returns the namespace/name format for the source, or +// just "name" if the namespace is the default namespace. +func (i *Intention) DestinationString() string { + return i.partString(i.DestinationNS, i.DestinationName) +} + +func (i *Intention) partString(ns, n string) string { + // For now we omit the default namespace from the output. In the future + // we might want to look at this and show this in a multi-namespace world. + if ns != "" && ns != IntentionDefaultNamespace { + n = ns + "/" + n + } + + return n +} + +// IntentionDefaultNamespace is the default namespace value. +const IntentionDefaultNamespace = "default" + +// IntentionAction is the action that the intention represents. This +// can be "allow" or "deny" to whitelist or blacklist intentions. +type IntentionAction string + +const ( + IntentionActionAllow IntentionAction = "allow" + IntentionActionDeny IntentionAction = "deny" +) + +// IntentionSourceType is the type of the source within an intention. +type IntentionSourceType string + +const ( + // IntentionSourceConsul is a service within the Consul catalog. + IntentionSourceConsul IntentionSourceType = "consul" +) + +// IntentionMatch are the arguments for the intention match API. +type IntentionMatch struct { + By IntentionMatchType + Names []string +} + +// IntentionMatchType is the target for a match request. For example, +// matching by source will look for all intentions that match the given +// source value. +type IntentionMatchType string + +const ( + IntentionMatchSource IntentionMatchType = "source" + IntentionMatchDestination IntentionMatchType = "destination" +) + +// IntentionCheck are the arguments for the intention check API. For +// more documentation see the IntentionCheck function. +type IntentionCheck struct { + // Source and Destination are the source and destination values to + // check. The destination is always a Consul service, but the source + // may be other values as defined by the SourceType. + Source, Destination string + + // SourceType is the type of the value for the source. + SourceType IntentionSourceType +} + +// Intentions returns the list of intentions. +func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions") + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// IntentionGet retrieves a single intention. +func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/"+id) + r.setQueryOptions(q) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + return nil, qm, nil + } else if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return nil, nil, fmt.Errorf( + "Unexpected response %d: %s", resp.StatusCode, buf.String()) + } + + var out Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + +// IntentionDelete deletes a single intention. +func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + return qm, nil +} + +// IntentionMatch returns the list of intentions that match a given source +// or destination. The returned intentions are ordered by precedence where +// result[0] is the highest precedence (if that matches, then that rule overrides +// all other rules). +// +// Matching can be done for multiple names at the same time. The resulting +// map is keyed by the given names. Casing is preserved. +func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/match") + r.setQueryOptions(q) + r.params.Set("by", string(args.By)) + for _, name := range args.Names { + r.params.Add("name", name) + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]*Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// IntentionCheck returns whether a given source/destination would be allowed +// or not given the current set of intentions and the configuration of Consul. +func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/check") + r.setQueryOptions(q) + r.params.Set("source", args.Source) + r.params.Set("destination", args.Destination) + if args.SourceType != "" { + r.params.Set("source-type", string(args.SourceType)) + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out struct{ Allowed bool } + if err := decodeBody(resp, &out); err != nil { + return false, nil, err + } + return out.Allowed, qm, nil +} + +// IntentionCreate will create a new intention. The ID in the given +// structure must be empty and a generate ID will be returned on +// success. +func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/connect/intentions") + r.setWriteOptions(q) + r.obj = ixn + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// IntentionUpdate will update an existing intention. The ID in the given +// structure must be non-empty. +func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID) + r.setWriteOptions(q) + r.obj = ixn + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 00000000..53318f11 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,106 @@ +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Segment string + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap has the coordinates for servers in a given datacenter +// and area. Network coordinates are only compatible within the same area. +type CoordinateDatacenterMap struct { + Datacenter string + AreaID string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Update inserts or updates the LAN coordinate of a node. +func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/coordinate/update") + r.setWriteOptions(q) + r.obj = coord + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Node is used to return the coordinates of a single in the LAN pool. +func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go new file mode 100644 index 00000000..23804685 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/debug.go @@ -0,0 +1,106 @@ +package api + +import ( + "fmt" + "io/ioutil" + "strconv" +) + +// Debug can be used to query the /debug/pprof endpoints to gather +// profiling information about the target agent.Debug +// +// The agent must have enable_debug set to true for profiling to be enabled +// and for these endpoints to function. +type Debug struct { + c *Client +} + +// Debug returns a handle that exposes the internal debug endpoints. +func (c *Client) Debug() *Debug { + return &Debug{c} +} + +// Heap returns a pprof heap dump +func (d *Debug) Heap() ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/heap") + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer resp.Body.Close() + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// Profile returns a pprof CPU profile for the specified number of seconds +func (d *Debug) Profile(seconds int) ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/profile") + + // Capture a profile for the specified number of seconds + r.params.Set("seconds", strconv.Itoa(seconds)) + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer resp.Body.Close() + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// Trace returns an execution trace +func (d *Debug) Trace(seconds int) ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/trace") + + // Capture a trace for the specified number of seconds + r.params.Set("seconds", strconv.Itoa(seconds)) + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer resp.Body.Close() + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} + +// Goroutine returns a pprof goroutine profile +func (d *Debug) Goroutine() ([]byte, error) { + r := d.c.newRequest("GET", "/debug/pprof/goroutine") + + _, resp, err := d.c.doRequest(r) + if err != nil { + return nil, fmt.Errorf("error making request: %s", err) + } + defer resp.Body.Close() + + // We return a raw response because we're just passing through a response + // from the pprof handlers + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("error decoding body: %s", err) + } + + return body, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 00000000..85b5b069 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod new file mode 100644 index 00000000..e1982189 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/go.mod @@ -0,0 +1,16 @@ +module github.com/hashicorp/consul/api + +go 1.12 + +replace github.com/hashicorp/consul/sdk => ../sdk + +require ( + github.com/hashicorp/consul/sdk v0.1.1 + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-rootcerts v1.0.0 + github.com/hashicorp/go-uuid v1.0.1 + github.com/hashicorp/serf v0.8.2 + github.com/mitchellh/mapstructure v1.1.2 + github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum new file mode 100644 index 00000000..372ebc14 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/go.sum @@ -0,0 +1,76 @@ +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 00000000..9faf6b66 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,330 @@ +package api + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + ServiceTags []string + + Definition HealthCheckDefinition + + CreateIndex uint64 + ModifyIndex uint64 +} + +// HealthCheckDefinition is used to store the details about +// a health check's execution. +type HealthCheckDefinition struct { + HTTP string + Header map[string][]string + Method string + TLSSkipVerify bool + TCP string + IntervalDuration time.Duration `json:"-"` + TimeoutDuration time.Duration `json:"-"` + DeregisterCriticalServiceAfterDuration time.Duration `json:"-"` + + // DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead. + Interval ReadableDuration + Timeout ReadableDuration + DeregisterCriticalServiceAfter ReadableDuration +} + +func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) { + type Alias HealthCheckDefinition + out := &struct { + Interval string + Timeout string + DeregisterCriticalServiceAfter string + *Alias + }{ + Interval: d.Interval.String(), + Timeout: d.Timeout.String(), + DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(), + Alias: (*Alias)(d), + } + + if d.IntervalDuration != 0 { + out.Interval = d.IntervalDuration.String() + } else if d.Interval != 0 { + out.Interval = d.Interval.String() + } + if d.TimeoutDuration != 0 { + out.Timeout = d.TimeoutDuration.String() + } else if d.Timeout != 0 { + out.Timeout = d.Timeout.String() + } + if d.DeregisterCriticalServiceAfterDuration != 0 { + out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String() + } else if d.DeregisterCriticalServiceAfter != 0 { + out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String() + } + + return json.Marshal(out) +} + +func (d *HealthCheckDefinition) UnmarshalJSON(data []byte) error { + type Alias HealthCheckDefinition + aux := &struct { + Interval string + Timeout string + DeregisterCriticalServiceAfter string + *Alias + }{ + Alias: (*Alias)(d), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + // Parse the values into both the time.Duration and old ReadableDuration fields. + var err error + if aux.Interval != "" { + if d.IntervalDuration, err = time.ParseDuration(aux.Interval); err != nil { + return err + } + d.Interval = ReadableDuration(d.IntervalDuration) + } + if aux.Timeout != "" { + if d.TimeoutDuration, err = time.ParseDuration(aux.Timeout); err != nil { + return err + } + d.Timeout = ReadableDuration(d.TimeoutDuration) + } + if aux.DeregisterCriticalServiceAfter != "" { + if d.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(aux.DeregisterCriticalServiceAfter); err != nil { + return err + } + d.DeregisterCriticalServiceAfter = ReadableDuration(d.DeregisterCriticalServiceAfterDuration) + } + return nil +} + +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks HealthChecks +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return h.service(service, tags, passingOnly, q, false) +} + +func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tags, passingOnly, q, false) +} + +// Connect is equivalent to Service except that it will only return services +// which are Connect-enabled and will returns the connection address for Connect +// client's to use which may be a proxy in front of the named service. If +// passingOnly is true only instances where both the service and any proxy are +// healthy will be returned. +func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + var tags []string + if tag != "" { + tags = []string{tag} + } + return h.service(service, tags, passingOnly, q, true) +} + +func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + return h.service(service, tags, passingOnly, q, true) +} + +func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) { + path := "/v1/health/service/" + service + if connect { + path = "/v1/health/connect/" + service + } + r := h.c.newRequest("GET", path) + r.setQueryOptions(q) + if len(tags) > 0 { + for _, tag := range tags { + r.params.Add("tag", tag) + } + } + if passingOnly { + r.params.Set(HealthPassing, "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + switch state { + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 00000000..bd45a067 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,286 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// The Txn function has been deprecated from the KV object; please see the Txn +// object for more information about Transactions. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + var ops TxnOps + for _, op := range txn { + ops = append(ops, &TxnOp{KV: op}) + } + + respOk, txnResp, qm, err := k.c.txn(ops, q) + if err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return respOk, &kvResp, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 00000000..82339cb7 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,386 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + s, err := l.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: l.opts.LockWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > l.opts.LockWaitTime { + return nil, nil + } + + // Query wait time should not exceed the lock wait time + qOpts.WaitTime = l.opts.LockWaitTime - elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, qOpts) + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 00000000..079e2248 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,11 @@ +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go new file mode 100644 index 00000000..5cf7e497 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -0,0 +1,194 @@ +package api + +// The /v1/operator/area endpoints are available only in Consul Enterprise and +// interact with its network area subsystem. Network areas are used to link +// together Consul servers in different Consul datacenters. With network areas, +// Consul datacenters can be linked together in ways other than a fully-connected +// mesh, as is required for Consul's WAN. + +import ( + "net" + "time" +) + +// Area defines a network area. +type Area struct { + // ID is this identifier for an area (a UUID). This must be left empty + // when creating a new area. + ID string + + // PeerDatacenter is the peer Consul datacenter that will make up the + // other side of this network area. Network areas always involve a pair + // of datacenters: the datacenter where the area was created, and the + // peer datacenter. This is required. + PeerDatacenter string + + // RetryJoin specifies the address of Consul servers to join to, such as + // an IPs or hostnames with an optional port number. This is optional. + RetryJoin []string + + // UseTLS specifies whether gossip over this area should be encrypted with TLS + // if possible. + UseTLS bool +} + +// AreaJoinResponse is returned when a join occurs and gives the result for each +// address. +type AreaJoinResponse struct { + // The address that was joined. + Address string + + // Whether or not the join was a success. + Joined bool + + // If we couldn't join, this is the message with information. + Error string +} + +// SerfMember is a generic structure for reporting information about members in +// a Serf cluster. This is only used by the area endpoints right now, but this +// could be expanded to other endpoints in the future. +type SerfMember struct { + // ID is the node identifier (a UUID). + ID string + + // Name is the node name. + Name string + + // Addr has the IP address. + Addr net.IP + + // Port is the RPC port. + Port uint16 + + // Datacenter is the DC name. + Datacenter string + + // Role is "client", "server", or "unknown". + Role string + + // Build has the version of the Consul agent. + Build string + + // Protocol is the protocol of the Consul agent. + Protocol int + + // Status is the Serf health status "none", "alive", "leaving", "left", + // or "failed". + Status string + + // RTT is the estimated round trip time from the server handling the + // request to the this member. This will be negative if no RTT estimate + // is available. + RTT time.Duration +} + +// AreaCreate will create a new network area. The ID in the given structure must +// be empty and a generated ID will be returned on success. +func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("POST", "/v1/operator/area") + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaUpdate will update the configuration of the network area with the given ID. +func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaGet returns a single network area. +func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaList returns all the available network areas. +func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaDelete deletes the given network area. +func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { + r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// AreaJoin attempts to join the given set of join addresses to the given +// network area. See the Area structure for details about join addresses. +func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") + r.setWriteOptions(q) + r.obj = addresses + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out []*AreaJoinResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, wm, nil +} + +// AreaMembers lists the Serf information about the members in the given area. +func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { + var out []*SerfMember + qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go new file mode 100644 index 00000000..b179406d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -0,0 +1,219 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// AutopilotConfiguration is used for querying/setting the Autopilot configuration. +// Autopilot helps manage operator tasks related to Consul servers like removing +// failed servers from the Raft quorum. +type AutopilotConfiguration struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list when a new server joins + CleanupDeadServers bool + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold *ReadableDuration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime *ReadableDuration + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when + // performing upgrade migrations. If left blank, the Consul version will be used. + UpgradeVersionTag string + + // CreateIndex holds the index corresponding the creation of this configuration. + // This is a read-only field. + CreateIndex uint64 + + // ModifyIndex will be set to the index of the last update when retrieving the + // Autopilot configuration. Resubmitting a configuration with + // AutopilotCASConfiguration will perform a check-and-set operation which ensures + // there hasn't been a subsequent update since the configuration was retrieved. + ModifyIndex uint64 +} + +// ServerHealth is the health (from the leader's point of view) of a server. +type ServerHealth struct { + // ID is the raft ID of the server. + ID string + + // Name is the node name of the server. + Name string + + // Address is the address of the server. + Address string + + // The status of the SerfHealth check for the server. + SerfStatus string + + // Version is the Consul version of the server. + Version string + + // Leader is whether this server is currently the leader. + Leader bool + + // LastContact is the time since this node's last contact with the leader. + LastContact *ReadableDuration + + // LastTerm is the highest leader term this server has a record of in its Raft log. + LastTerm uint64 + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 + + // Healthy is whether or not the server is healthy according to the current + // Autopilot config. + Healthy bool + + // Voter is whether this is a voting server. + Voter bool + + // StableSince is the last time this server's Healthy value changed. + StableSince time.Time +} + +// OperatorHealthReply is a representation of the overall health of the cluster +type OperatorHealthReply struct { + // Healthy is true if all the servers in the cluster are healthy. + Healthy bool + + // FailureTolerance is the number of healthy servers that could be lost without + // an outage occurring. + FailureTolerance int + + // Servers holds the health of each server. + Servers []ServerHealth +} + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + str := string(raw) + if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { + return fmt.Errorf("must be enclosed with quotes: %s", str) + } + dur, err := time.ParseDuration(str[1 : len(str)-1]) + if err != nil { + return err + } + *d = ReadableDuration(dur) + return nil +} + +// AutopilotGetConfiguration is used to query the current Autopilot configuration. +func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AutopilotConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} + +// AutopilotSetConfiguration is used to set the current Autopilot configuration. +func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// AutopilotCASConfiguration is used to perform a Check-And-Set update on the +// Autopilot configuration. The ModifyIndex value will be respected. Returns +// true on success or false on failures. +func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + return res, nil +} + +// AutopilotServerHealth +func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/health") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out OperatorHealthReply + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go new file mode 100644 index 00000000..038d5d5b --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -0,0 +1,89 @@ +package api + +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // Segment has the network segment this request corresponds to. + Segment string + + // Messages has information or errors from serf + Messages map[string]string `json:",omitempty"` + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go new file mode 100644 index 00000000..a9844df2 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -0,0 +1,89 @@ +package api + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfiguration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by ID. +func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("id", string(id)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go new file mode 100644 index 00000000..92b05d3c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go @@ -0,0 +1,11 @@ +package api + +// SegmentList returns all the available LAN segments. +func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { + var out []string + qm, err := op.c.query("/v1/operator/segment", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 00000000..02045811 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,217 @@ +package api + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // IgnoreCheckIDs is an optional list of health check IDs to ignore when + // considering which nodes are healthy. It is useful as an emergency measure + // to temporarily override some health check that is producing false negatives + // for example. + IgnoreCheckIDs []string + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string + + // NodeMeta is a map of required node metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + NodeMeta map[string]string + + // ServiceMeta is a map of required service metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + ServiceMeta map[string]string + + // Connect if true will filter the prepared query results to only + // include Connect-capable services. These include both native services + // and proxies for matching services. Note that if a proxy matches, + // the constraints in the query above (Near, OnlyPassing, etc.) apply + // to the _proxy_ and not the service being proxied. In practice, proxies + // should be directly next to their services so this isn't an issue. + Connect bool +} + +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string +} + +// PreparedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 00000000..745a208c --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 00000000..bc4f885f --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,514 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + sess, err := s.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > s.opts.SemaphoreWaitTime { + return nil, nil + } + + // Query wait time should not exceed the semaphore wait time + qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 00000000..1613f11a --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,224 @@ +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { + ctx := q.Context() + + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + + case <-ctx.Done(): + // Bail immediately since attempting the destroy would + // use the canceled context in q, which would just bail. + return ctx.Err() + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 00000000..e902377d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 00000000..74ef61a6 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go new file mode 100644 index 00000000..65d7a16e --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/txn.go @@ -0,0 +1,230 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" +) + +// Txn is used to manipulate the Txn API +type Txn struct { + c *Client +} + +// Txn is used to return a handle to the K/V apis +func (c *Client) Txn() *Txn { + return &Txn{c} +} + +// TxnOp is the internal format we send to Consul. Currently only K/V and +// check operations are supported. +type TxnOp struct { + KV *KVTxnOp + Node *NodeTxnOp + Service *ServiceTxnOp + Check *CheckTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair + Node *Node + Service *CatalogService + Check *HealthCheck +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// KVOp constants give possible operations available in a transaction. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" + KVCheckNotExists KVOp = "check-not-exists" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb KVOp + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + +// NodeOp constants give possible operations available in a transaction. +type NodeOp string + +const ( + NodeGet NodeOp = "get" + NodeSet NodeOp = "set" + NodeCAS NodeOp = "cas" + NodeDelete NodeOp = "delete" + NodeDeleteCAS NodeOp = "delete-cas" +) + +// NodeTxnOp defines a single operation inside a transaction. +type NodeTxnOp struct { + Verb NodeOp + Node Node +} + +// ServiceOp constants give possible operations available in a transaction. +type ServiceOp string + +const ( + ServiceGet ServiceOp = "get" + ServiceSet ServiceOp = "set" + ServiceCAS ServiceOp = "cas" + ServiceDelete ServiceOp = "delete" + ServiceDeleteCAS ServiceOp = "delete-cas" +) + +// ServiceTxnOp defines a single operation inside a transaction. +type ServiceTxnOp struct { + Verb ServiceOp + Node string + Service AgentService +} + +// CheckOp constants give possible operations available in a transaction. +type CheckOp string + +const ( + CheckGet CheckOp = "get" + CheckSet CheckOp = "set" + CheckCAS CheckOp = "cas" + CheckDelete CheckOp = "delete" + CheckDeleteCAS CheckOp = "delete-cas" +) + +// CheckTxnOp defines a single operation inside a transaction. +type CheckTxnOp struct { + Verb CheckOp + Check HealthCheck +} + +// Txn is used to apply multiple Consul operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the different fields in the TxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// &CheckTxnOp{ +// Verb: CheckSet, +// HealthCheck: HealthCheck{ +// Node: "foo", +// CheckID: "redis:a", +// Name: "Redis Health Check", +// Status: "passing", +// }, +// } +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. For KV operations, Deleted keys will have a nil entry in the +// results, and to save space, the Value of each key in the Results will be nil +// unless the operation is a KVGet. If the transaction was rolled back, the Errors +// member will have entries referencing the index of the operation that failed +// along with an error message. +func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) { + return t.c.txn(txn, q) +} + +func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) { + r := c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + r.obj = txn + rtt, resp, err := c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + return resp.StatusCode == http.StatusOK, &txnResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md index 1c95f597..444df08f 100644 --- a/vendor/github.com/hashicorp/errwrap/README.md +++ b/vendor/github.com/hashicorp/errwrap/README.md @@ -48,7 +48,7 @@ func main() { // We can use the Contains helpers to check if an error contains // another error. It is safe to do this with a nil error, or with // an error that doesn't even use the errwrap package. - if errwrap.Contains(err, ErrNotExist) { + if errwrap.Contains(err, "does not exist") { // Do something } if errwrap.ContainsType(err, new(os.PathError)) { diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod new file mode 100644 index 00000000..c9b84022 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/errwrap diff --git a/vendor/github.com/hashicorp/go-azure-helpers/LICENSE b/vendor/github.com/hashicorp/go-azure-helpers/LICENSE new file mode 100644 index 00000000..be2cc4df --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method.go new file mode 100644 index 00000000..cbc73d56 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method.go @@ -0,0 +1,20 @@ +package authentication + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +type authMethod interface { + build(b Builder) (authMethod, error) + + isApplicable(b Builder) bool + + getAuthorizationToken(sender autorest.Sender, oauthConfig *adal.OAuthConfig, endpoint string) (*autorest.BearerAuthorizer, error) + + name() string + + populateConfig(c *Config) error + + validate() error +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_azure_cli_token.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_azure_cli_token.go new file mode 100644 index 00000000..6f854d5e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_azure_cli_token.go @@ -0,0 +1,146 @@ +package authentication + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure/cli" + "github.com/hashicorp/go-multierror" +) + +type azureCliTokenAuth struct { + profile *azureCLIProfile +} + +func (a azureCliTokenAuth) build(b Builder) (authMethod, error) { + auth := azureCliTokenAuth{ + profile: &azureCLIProfile{ + clientId: b.ClientID, + environment: b.Environment, + subscriptionId: b.SubscriptionID, + tenantId: b.TenantID, + }, + } + profilePath, err := cli.ProfilePath() + if err != nil { + return nil, fmt.Errorf("Error loading the Profile Path from the Azure CLI: %+v", err) + } + + profile, err := cli.LoadProfile(profilePath) + if err != nil { + return nil, fmt.Errorf("Azure CLI Authorization Profile was not found. Please ensure the Azure CLI is installed and then log-in with `az login`.") + } + + auth.profile.profile = profile + + err = auth.profile.populateFields() + if err != nil { + return nil, fmt.Errorf("Error retrieving the Profile from the Azure CLI: %s Please re-authenticate using `az login`.", err) + } + + err = auth.profile.populateClientId() + if err != nil { + return nil, fmt.Errorf("Error populating Client ID from the Azure CLI: %+v", err) + } + + return auth, nil +} + +func (a azureCliTokenAuth) isApplicable(b Builder) bool { + return b.SupportsAzureCliToken +} + +func (a azureCliTokenAuth) getAuthorizationToken(sender autorest.Sender, oauthConfig *adal.OAuthConfig, endpoint string) (*autorest.BearerAuthorizer, error) { + // the Azure CLI appears to cache these, so to maintain compatibility with the interface this method is intentionally not on the pointer + token, err := obtainAuthorizationToken(endpoint, a.profile.subscriptionId) + if err != nil { + return nil, fmt.Errorf("Error obtaining Authorization Token from the Azure CLI: %s", err) + } + + adalToken, err := token.ToADALToken() + if err != nil { + return nil, fmt.Errorf("Error converting Authorization Token to an ADAL Token: %s", err) + } + + spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, a.profile.clientId, endpoint, adalToken) + if err != nil { + return nil, err + } + + auth := autorest.NewBearerAuthorizer(spt) + return auth, nil +} + +func (a azureCliTokenAuth) name() string { + return "Obtaining a token from the Azure CLI" +} + +func (a azureCliTokenAuth) populateConfig(c *Config) error { + c.ClientID = a.profile.clientId + c.Environment = a.profile.environment + c.SubscriptionID = a.profile.subscriptionId + c.TenantID = a.profile.tenantId + return nil +} + +func (a azureCliTokenAuth) validate() error { + var err *multierror.Error + + errorMessageFmt := "A %s was not found in your Azure CLI Credentials.\n\nPlease login to the Azure CLI again via `az login`" + + if a.profile == nil { + return fmt.Errorf("Azure CLI Profile is nil - this is an internal error and should be reported.") + } + + if a.profile.clientId == "" { + err = multierror.Append(err, fmt.Errorf(errorMessageFmt, "Client ID")) + } + + if a.profile.subscriptionId == "" { + err = multierror.Append(err, fmt.Errorf(errorMessageFmt, "Subscription ID")) + } + + if a.profile.tenantId == "" { + err = multierror.Append(err, fmt.Errorf(errorMessageFmt, "Tenant ID")) + } + + return err.ErrorOrNil() +} + +func obtainAuthorizationToken(endpoint string, subscriptionId string) (*cli.Token, error) { + var stderr bytes.Buffer + var stdout bytes.Buffer + + cmd := exec.Command("az", "account", "get-access-token", "--resource", endpoint, "--subscription", subscriptionId, "-o=json") + + cmd.Stderr = &stderr + cmd.Stdout = &stdout + + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("Error launching Azure CLI: %+v", err) + } + + if err := cmd.Wait(); err != nil { + return nil, fmt.Errorf("Error waiting for the Azure CLI: %+v", err) + } + + stdOutStr := stdout.String() + stdErrStr := stderr.String() + + if stdErrStr != "" { + return nil, fmt.Errorf("Error retrieving access token from Azure CLI: %s", strings.TrimSpace(stdErrStr)) + } + + var token *cli.Token + err := json.Unmarshal([]byte(stdOutStr), &token) + if err != nil { + return nil, fmt.Errorf("Error unmarshaling Access Token from the Azure CLI: %s", err) + } + + return token, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_client_cert.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_client_cert.go new file mode 100644 index 00000000..f6dd9b77 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_client_cert.go @@ -0,0 +1,125 @@ +package authentication + +import ( + "crypto/rsa" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/hashicorp/go-multierror" + "golang.org/x/crypto/pkcs12" +) + +type servicePrincipalClientCertificateAuth struct { + clientId string + clientCertPath string + clientCertPassword string + subscriptionId string + tenantId string +} + +func (a servicePrincipalClientCertificateAuth) build(b Builder) (authMethod, error) { + method := servicePrincipalClientCertificateAuth{ + clientId: b.ClientID, + clientCertPath: b.ClientCertPath, + clientCertPassword: b.ClientCertPassword, + subscriptionId: b.SubscriptionID, + tenantId: b.TenantID, + } + return method, nil +} + +func (a servicePrincipalClientCertificateAuth) isApplicable(b Builder) bool { + return b.SupportsClientCertAuth && b.ClientCertPath != "" +} + +func (a servicePrincipalClientCertificateAuth) name() string { + return "Service Principal / Client Certificate" +} + +func (a servicePrincipalClientCertificateAuth) getAuthorizationToken(sender autorest.Sender, oauthConfig *adal.OAuthConfig, endpoint string) (*autorest.BearerAuthorizer, error) { + certificateData, err := ioutil.ReadFile(a.clientCertPath) + if err != nil { + return nil, fmt.Errorf("Error reading Client Certificate %q: %v", a.clientCertPath, err) + } + + // Get the certificate and private key from pfx file + certificate, rsaPrivateKey, err := decodePkcs12(certificateData, a.clientCertPassword) + if err != nil { + return nil, fmt.Errorf("Error decoding pkcs12 certificate: %v", err) + } + + spt, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, a.clientId, certificate, rsaPrivateKey, endpoint) + if err != nil { + return nil, err + } + + spt.SetSender(sender) + + err = spt.Refresh() + if err != nil { + return nil, err + } + + auth := autorest.NewBearerAuthorizer(spt) + return auth, nil +} + +func (a servicePrincipalClientCertificateAuth) populateConfig(c *Config) error { + c.AuthenticatedAsAServicePrincipal = true + return nil +} + +func (a servicePrincipalClientCertificateAuth) validate() error { + var err *multierror.Error + + fmtErrorMessage := "A %s must be configured when authenticating as a Service Principal using a Client Certificate." + + if a.subscriptionId == "" { + err = multierror.Append(err, fmt.Errorf(fmtErrorMessage, "Subscription ID")) + } + + if a.clientId == "" { + err = multierror.Append(err, fmt.Errorf(fmtErrorMessage, "Client ID")) + } + + if a.clientCertPath == "" { + err = multierror.Append(err, fmt.Errorf(fmtErrorMessage, "Client Certificate Path")) + } else { + if strings.HasSuffix(strings.ToLower(a.clientCertPath), ".pfx") { + // ensure it exists on disk + _, fileErr := os.Stat(a.clientCertPath) + if os.IsNotExist(fileErr) { + err = multierror.Append(err, fmt.Errorf("Error locating Client Certificate specified at %q: %s", a.clientCertPath, fileErr)) + } + + // we're intentionally /not/ checking it's an actual PFX file at this point, as that happens in the getAuthorizationToken + } else { + err = multierror.Append(err, fmt.Errorf("The Client Certificate Path is not a *.pfx file: %q", a.clientCertPath)) + } + } + + if a.tenantId == "" { + err = multierror.Append(err, fmt.Errorf(fmtErrorMessage, "Tenant ID")) + } + + return err.ErrorOrNil() +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_client_secret.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_client_secret.go new file mode 100644 index 00000000..f31bc20e --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_client_secret.go @@ -0,0 +1,71 @@ +package authentication + +import ( + "fmt" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/hashicorp/go-multierror" +) + +type servicePrincipalClientSecretAuth struct { + clientId string + clientSecret string + subscriptionId string + tenantId string +} + +func (a servicePrincipalClientSecretAuth) build(b Builder) (authMethod, error) { + method := servicePrincipalClientSecretAuth{ + clientId: b.ClientID, + clientSecret: b.ClientSecret, + subscriptionId: b.SubscriptionID, + tenantId: b.TenantID, + } + return method, nil +} + +func (a servicePrincipalClientSecretAuth) isApplicable(b Builder) bool { + return b.SupportsClientSecretAuth && b.ClientSecret != "" +} + +func (a servicePrincipalClientSecretAuth) name() string { + return "Service Principal / Client Secret" +} + +func (a servicePrincipalClientSecretAuth) getAuthorizationToken(sender autorest.Sender, oauthConfig *adal.OAuthConfig, endpoint string) (*autorest.BearerAuthorizer, error) { + spt, err := adal.NewServicePrincipalToken(*oauthConfig, a.clientId, a.clientSecret, endpoint) + if err != nil { + return nil, err + } + spt.SetSender(sender) + + auth := autorest.NewBearerAuthorizer(spt) + return auth, nil +} + +func (a servicePrincipalClientSecretAuth) populateConfig(c *Config) error { + c.AuthenticatedAsAServicePrincipal = true + return nil +} + +func (a servicePrincipalClientSecretAuth) validate() error { + var err *multierror.Error + + fmtErrorMessage := "A %s must be configured when authenticating as a Service Principal using a Client Secret." + + if a.subscriptionId == "" { + err = multierror.Append(err, fmt.Errorf(fmtErrorMessage, "Subscription ID")) + } + if a.clientId == "" { + err = multierror.Append(err, fmt.Errorf(fmtErrorMessage, "Client ID")) + } + if a.clientSecret == "" { + err = multierror.Append(err, fmt.Errorf(fmtErrorMessage, "Client Secret")) + } + if a.tenantId == "" { + err = multierror.Append(err, fmt.Errorf(fmtErrorMessage, "Tenant ID")) + } + + return err.ErrorOrNil() +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_msi.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_msi.go new file mode 100644 index 00000000..883df260 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/auth_method_msi.go @@ -0,0 +1,67 @@ +package authentication + +import ( + "fmt" + "log" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/hashicorp/go-multierror" +) + +type managedServiceIdentityAuth struct { + endpoint string +} + +func (a managedServiceIdentityAuth) build(b Builder) (authMethod, error) { + endpoint := b.MsiEndpoint + if endpoint == "" { + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, fmt.Errorf("Error determining MSI Endpoint: ensure the VM has MSI enabled, or configure the MSI Endpoint. Error: %s", err) + } + endpoint = msiEndpoint + } + + log.Printf("[DEBUG] Using MSI endpoint %q", endpoint) + + auth := managedServiceIdentityAuth{ + endpoint: endpoint, + } + return auth, nil +} + +func (a managedServiceIdentityAuth) isApplicable(b Builder) bool { + return b.SupportsManagedServiceIdentity +} + +func (a managedServiceIdentityAuth) name() string { + return "Managed Service Identity" +} + +func (a managedServiceIdentityAuth) getAuthorizationToken(sender autorest.Sender, oauthConfig *adal.OAuthConfig, endpoint string) (*autorest.BearerAuthorizer, error) { + spt, err := adal.NewServicePrincipalTokenFromMSI(a.endpoint, endpoint) + if err != nil { + return nil, err + } + + spt.SetSender(sender) + + auth := autorest.NewBearerAuthorizer(spt) + return auth, nil +} + +func (a managedServiceIdentityAuth) populateConfig(c *Config) error { + // nothing to populate back + return nil +} + +func (a managedServiceIdentityAuth) validate() error { + var err *multierror.Error + + if a.endpoint == "" { + err = multierror.Append(err, fmt.Errorf("An MSI Endpoint must be configured")) + } + + return err.ErrorOrNil() +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_access_token.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_access_token.go new file mode 100644 index 00000000..822fb2d7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_access_token.go @@ -0,0 +1,42 @@ +package authentication + +import ( + "fmt" + "log" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure/cli" +) + +type azureCliAccessToken struct { + ClientID string + AccessToken *adal.Token +} + +func findValidAccessTokenForTenant(tokens []cli.Token, tenantId string) (*azureCliAccessToken, error) { + for _, accessToken := range tokens { + token, err := accessToken.ToADALToken() + if err != nil { + return nil, fmt.Errorf("[DEBUG] Error converting access token to token: %+v", err) + } + + if !strings.Contains(accessToken.Resource, "management") { + log.Printf("[DEBUG] Resource %q isn't a management domain", accessToken.Resource) + continue + } + + if !strings.HasSuffix(accessToken.Authority, tenantId) { + log.Printf("[DEBUG] Resource %q isn't for the correct Tenant", accessToken.Resource) + continue + } + + validAccessToken := azureCliAccessToken{ + ClientID: accessToken.ClientID, + AccessToken: &token, + } + return &validAccessToken, nil + } + + return nil, fmt.Errorf("No Access Token was found for the Tenant ID %q", tenantId) +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_profile.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_profile.go new file mode 100644 index 00000000..39fb30dd --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_profile.go @@ -0,0 +1,35 @@ +package authentication + +import ( + "github.com/Azure/go-autorest/autorest/azure/cli" +) + +type azureCLIProfile struct { + profile cli.Profile + + clientId string + environment string + subscriptionId string + tenantId string +} + +func (a *azureCLIProfile) populateFields() error { + // ensure we know the Subscription ID - since it's needed for everything else + if a.subscriptionId == "" { + err := a.populateSubscriptionID() + if err != nil { + return err + } + } + + if a.tenantId == "" { + // now we know the subscription ID, find the associated Tenant ID + err := a.populateTenantID() + if err != nil { + return err + } + } + + // always pull the environment from the Azure CLI, since the Access Token's associated with it + return a.populateEnvironment() +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_profile_population.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_profile_population.go new file mode 100644 index 00000000..9c84fd1f --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/azure_cli_profile_population.go @@ -0,0 +1,81 @@ +package authentication + +import ( + "fmt" + "strings" + + "github.com/Azure/go-autorest/autorest/azure/cli" +) + +func (a *azureCLIProfile) populateSubscriptionID() error { + subscriptionId, err := a.findDefaultSubscriptionId() + if err != nil { + return err + } + + a.subscriptionId = subscriptionId + return nil +} + +func (a *azureCLIProfile) populateTenantID() error { + subscription, err := a.findSubscription(a.subscriptionId) + if err != nil { + return err + } + + a.tenantId = subscription.TenantID + return nil +} + +func (a *azureCLIProfile) populateClientId() error { + // we can now pull out the ClientID and the Access Token to use from the Access Token + tokensPath, err := cli.AccessTokensPath() + if err != nil { + return fmt.Errorf("Error loading the Tokens Path from the Azure CLI: %+v", err) + } + + tokens, err := cli.LoadTokens(tokensPath) + if err != nil { + return fmt.Errorf("No Authorization Tokens were found - please ensure the Azure CLI is installed and then log-in with `az login`.") + } + + validToken, err := findValidAccessTokenForTenant(tokens, a.tenantId) + if err != nil { + return fmt.Errorf("No Authorization Tokens were found - please re-authenticate using `az login`.") + } + + token := *validToken + a.clientId = token.ClientID + + return nil +} + +func (a *azureCLIProfile) populateEnvironment() error { + subscription, err := a.findSubscription(a.subscriptionId) + if err != nil { + return err + } + + a.environment = normalizeEnvironmentName(subscription.EnvironmentName) + return nil +} + +func (a azureCLIProfile) findDefaultSubscriptionId() (string, error) { + for _, subscription := range a.profile.Subscriptions { + if subscription.IsDefault { + return subscription.ID, nil + } + } + + return "", fmt.Errorf("No Subscription was Marked as Default in the Azure Profile.") +} + +func (a azureCLIProfile) findSubscription(subscriptionId string) (*cli.Subscription, error) { + for _, subscription := range a.profile.Subscriptions { + if strings.EqualFold(subscription.ID, subscriptionId) { + return &subscription, nil + } + } + + return nil, fmt.Errorf("Subscription %q was not found in your Azure CLI credentials. Please verify it exists in `az account list`.", subscriptionId) +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/builder.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/builder.go new file mode 100644 index 00000000..e37e8b13 --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/builder.go @@ -0,0 +1,81 @@ +package authentication + +import ( + "fmt" + "log" +) + +// Builder supports all of the possible Authentication values and feature toggles +// required to build a working Config for Authentication purposes. +type Builder struct { + // Core + ClientID string + SubscriptionID string + TenantID string + Environment string + + // The custom Resource Manager Endpoint which should be used + // only applicable for Azure Stack at this time. + CustomResourceManagerEndpoint string + + // Azure CLI Tokens Auth + SupportsAzureCliToken bool + + // Managed Service Identity Auth + SupportsManagedServiceIdentity bool + MsiEndpoint string + + // Service Principal (Client Cert) Auth + SupportsClientCertAuth bool + ClientCertPath string + ClientCertPassword string + + // Service Principal (Client Secret) Auth + SupportsClientSecretAuth bool + ClientSecret string +} + +// Build takes the configuration from the Builder and builds up a validated Config +// for authenticating with Azure +func (b Builder) Build() (*Config, error) { + config := Config{ + ClientID: b.ClientID, + SubscriptionID: b.SubscriptionID, + TenantID: b.TenantID, + Environment: b.Environment, + CustomResourceManagerEndpoint: b.CustomResourceManagerEndpoint, + } + + // NOTE: the ordering here is important + // since the Azure CLI Parsing should always be the last thing checked + supportedAuthenticationMethods := []authMethod{ + servicePrincipalClientCertificateAuth{}, + servicePrincipalClientSecretAuth{}, + managedServiceIdentityAuth{}, + azureCliTokenAuth{}, + } + + for _, method := range supportedAuthenticationMethods { + name := method.name() + log.Printf("Testing if %s is applicable for Authentication..", name) + if method.isApplicable(b) { + log.Printf("Using %s for Authentication", name) + auth, err := method.build(b) + if err != nil { + return nil, err + } + + // populate authentication specific fields on the Config + // (e.g. is service principal, fields parsed from the azure cli) + err = auth.populateConfig(&config) + if err != nil { + return nil, err + } + + config.authMethod = auth + return config.validate() + } + } + + return nil, fmt.Errorf("No supported authentication methods were found!") +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/config.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/config.go new file mode 100644 index 00000000..90b78fce --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/config.go @@ -0,0 +1,36 @@ +package authentication + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" +) + +// Config is the configuration structure used to instantiate a +// new Azure management client. +type Config struct { + ClientID string + SubscriptionID string + TenantID string + Environment string + AuthenticatedAsAServicePrincipal bool + + // A Custom Resource Manager Endpoint + // at this time this should only be applicable for Azure Stack. + CustomResourceManagerEndpoint string + + authMethod authMethod +} + +// GetAuthorizationToken returns an authorization token for the authentication method defined in the Config +func (c Config) GetAuthorizationToken(sender autorest.Sender, oauthConfig *adal.OAuthConfig, endpoint string) (*autorest.BearerAuthorizer, error) { + return c.authMethod.getAuthorizationToken(sender, oauthConfig, endpoint) +} + +func (c Config) validate() (*Config, error) { + err := c.authMethod.validate() + if err != nil { + return nil, err + } + + return &c, nil +} diff --git a/vendor/github.com/hashicorp/go-azure-helpers/authentication/environment.go b/vendor/github.com/hashicorp/go-azure-helpers/authentication/environment.go new file mode 100644 index 00000000..7fc4e15b --- /dev/null +++ b/vendor/github.com/hashicorp/go-azure-helpers/authentication/environment.go @@ -0,0 +1,55 @@ +package authentication + +import ( + "fmt" + "strings" + + "github.com/Azure/go-autorest/autorest/azure" +) + +// DetermineEnvironment determines what the Environment name is within +// the Azure SDK for Go and then returns the association environment, if it exists. +func DetermineEnvironment(name string) (*azure.Environment, error) { + // detect cloud from environment + env, envErr := azure.EnvironmentFromName(name) + + if envErr != nil { + // try again with wrapped value to support readable values like german instead of AZUREGERMANCLOUD + wrapped := fmt.Sprintf("AZURE%sCLOUD", name) + env, envErr = azure.EnvironmentFromName(wrapped) + if envErr != nil { + return nil, fmt.Errorf("An Azure Environment with name %q was not found: %+v", name, envErr) + } + } + + return &env, nil +} + +// LoadEnvironmentFromUrl attempts to load the specified environment from the endpoint. +// if the endpoint is an empty string, or an environment can't be +// found at the endpoint url then an error is returned +func LoadEnvironmentFromUrl(endpoint string) (*azure.Environment, error) { + if endpoint == "" { + return nil, fmt.Errorf("Endpoint was not set!") + } + + env, err := azure.EnvironmentFromURL(endpoint) + if err != nil { + return nil, fmt.Errorf("Error retrieving Environment from Endpoint %q: %+v", endpoint, err) + } + + return &env, nil +} + +func normalizeEnvironmentName(input string) string { + // Environment is stored as `Azure{Environment}Cloud` + output := strings.ToLower(input) + output = strings.TrimPrefix(output, "azure") + output = strings.TrimSuffix(output, "cloud") + + // however Azure Public is `AzureCloud` in the CLI Profile and not `AzurePublicCloud`. + if output == "" { + return "public" + } + return output +} diff --git a/vendor/github.com/hashicorp/go-checkpoint/check.go b/vendor/github.com/hashicorp/go-checkpoint/check.go new file mode 100644 index 00000000..109d0d35 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/check.go @@ -0,0 +1,368 @@ +package checkpoint + +import ( + crand "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "io/ioutil" + mrand "math/rand" + "net/http" + "net/url" + "os" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +var magicBytes = [4]byte{0x35, 0x77, 0x69, 0xFB} + +// CheckParams are the parameters for configuring a check request. +type CheckParams struct { + // Product and version are used to lookup the correct product and + // alerts for the proper version. The version is also used to perform + // a version check. + Product string + Version string + + // Arch and OS are used to filter alerts potentially only to things + // affecting a specific os/arch combination. If these aren't specified, + // they'll be automatically filled in. + Arch string + OS string + + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string + SignatureFile string + + // CacheFile, if specified, will cache the result of a check. The + // duration of the cache is specified by CacheDuration, and defaults + // to 48 hours if not specified. If the CacheFile is newer than the + // CacheDuration, than the Check will short-circuit and use those + // results. + // + // If the CacheFile directory doesn't exist, it will be created with + // permissions 0755. + CacheFile string + CacheDuration time.Duration + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// CheckResponse is the response for a check request. +type CheckResponse struct { + Product string `json:"product"` + CurrentVersion string `json:"current_version"` + CurrentReleaseDate int `json:"current_release_date"` + CurrentDownloadURL string `json:"current_download_url"` + CurrentChangelogURL string `json:"current_changelog_url"` + ProjectWebsite string `json:"project_website"` + Outdated bool `json:"outdated"` + Alerts []*CheckAlert `json:"alerts"` +} + +// CheckAlert is a single alert message from a check request. +// +// These never have to be manually constructed, and are typically populated +// into a CheckResponse as a result of the Check request. +type CheckAlert struct { + ID int `json:"id"` + Date int `json:"date"` + Message string `json:"message"` + URL string `json:"url"` + Level string `json:"level"` +} + +// Check checks for alerts and new version information. +func Check(p *CheckParams) (*CheckResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &CheckResponse{}, nil + } + + // Set a default timeout of 3 sec for the check request (in milliseconds) + timeout := 3000 + if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) + } + + // If we have a cached result, then use that + if r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil { + return nil, err + } else if r != nil { + defer r.Close() + return checkResult(r) + } + + var u url.URL + + if p.Arch == "" { + p.Arch = runtime.GOARCH + } + if p.OS == "" { + p.OS = runtime.GOOS + } + + // If we're given a SignatureFile, then attempt to read that. + signature := p.Signature + if p.Signature == "" && p.SignatureFile != "" { + var err error + signature, err = checkSignature(p.SignatureFile) + if err != nil { + return nil, err + } + } + + v := u.Query() + v.Set("version", p.Version) + v.Set("arch", p.Arch) + v.Set("os", p.OS) + v.Set("signature", signature) + + u.Scheme = "https" + u.Host = "checkpoint-api.hashicorp.com" + u.Path = fmt.Sprintf("/v1/check/%s", p.Product) + u.RawQuery = v.Encode() + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + + // We use a short timeout since checking for new versions is not critical + // enough to block on if checkpoint is broken/slow. + client.Timeout = time.Duration(timeout) * time.Millisecond + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + var r io.Reader = resp.Body + if p.CacheFile != "" { + // Make sure the directory holding our cache exists. + if err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil { + return nil, err + } + + // We have to cache the result, so write the response to the + // file as we read it. + f, err := os.Create(p.CacheFile) + if err != nil { + return nil, err + } + + // Write the cache header + if err := writeCacheHeader(f, p.Version); err != nil { + f.Close() + os.Remove(p.CacheFile) + return nil, err + } + + defer f.Close() + r = io.TeeReader(r, f) + } + + return checkResult(r) +} + +// CheckInterval is used to check for a response on a given interval duration. +// The interval is not exact, and checks are randomized to prevent a thundering +// herd. However, it is expected that on average one check is performed per +// interval. The returned channel may be closed to stop background checks. +func CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} { + doneCh := make(chan struct{}) + + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return doneCh + } + + go func() { + for { + select { + case <-time.After(randomStagger(interval)): + resp, err := Check(p) + cb(resp, err) + case <-doneCh: + return + } + } + }() + + return doneCh +} + +// randomStagger returns an interval that is between 3/4 and 5/4 of +// the given interval. The expected value is the interval. +func randomStagger(interval time.Duration) time.Duration { + stagger := time.Duration(mrand.Int63()) % (interval / 2) + return 3*(interval/4) + stagger +} + +func checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) { + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + // File doesn't exist, not a problem + return nil, nil + } + + return nil, err + } + + if d == 0 { + d = 48 * time.Hour + } + + if fi.ModTime().Add(d).Before(time.Now()) { + // Cache is busted, delete the old file and re-request. We ignore + // errors here because re-creating the file is fine too. + os.Remove(path) + return nil, nil + } + + // File looks good so far, open it up so we can inspect the contents. + f, err := os.Open(path) + if err != nil { + return nil, err + } + + // Check the signature of the file + var sig [4]byte + if err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil { + f.Close() + return nil, err + } + if !reflect.DeepEqual(sig, magicBytes) { + // Signatures don't match. Reset. + f.Close() + return nil, nil + } + + // Check the version. If it changed, then rewrite + var length uint32 + if err := binary.Read(f, binary.LittleEndian, &length); err != nil { + f.Close() + return nil, err + } + data := make([]byte, length) + if _, err := io.ReadFull(f, data); err != nil { + f.Close() + return nil, err + } + if string(data) != current { + // Version changed, reset + f.Close() + return nil, nil + } + + return f, nil +} +func checkResult(r io.Reader) (*CheckResponse, error) { + var result CheckResponse + if err := json.NewDecoder(r).Decode(&result); err != nil { + return nil, err + } + return &result, nil +} + +func checkSignature(path string) (string, error) { + _, err := os.Stat(path) + if err == nil { + // The file exists, read it out + sigBytes, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + + // Split the file into lines + lines := strings.SplitN(string(sigBytes), "\n", 2) + if len(lines) > 0 { + return strings.TrimSpace(lines[0]), nil + } + } + + // If this isn't a non-exist error, then return that. + if !os.IsNotExist(err) { + return "", err + } + + // The file doesn't exist, so create a signature. + var b [16]byte + n := 0 + for n < 16 { + n2, err := crand.Read(b[n:]) + if err != nil { + return "", err + } + + n += n2 + } + signature := fmt.Sprintf( + "%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + + // Make sure the directory holding our signature exists. + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return "", err + } + + // Write the signature + if err := ioutil.WriteFile(path, []byte(signature+"\n\n"+userMessage+"\n"), 0644); err != nil { + return "", err + } + + return signature, nil +} + +func writeCacheHeader(f io.Writer, v string) error { + // Write our signature first + if err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil { + return err + } + + // Write out our current version length + length := uint32(len(v)) + if err := binary.Write(f, binary.LittleEndian, length); err != nil { + return err + } + + _, err := f.Write([]byte(v)) + return err +} + +// userMessage is suffixed to the signature file to provide feedback. +var userMessage = ` +This signature is a randomly generated UUID used to de-duplicate +alerts and version information. This signature is random, it is +not based on any personally identifiable information. To create +a new signature, you can simply delete this file at any time. +See the documentation for the software using Checkpoint for more +information on how to disable it. +` diff --git a/vendor/github.com/hashicorp/go-checkpoint/go.mod b/vendor/github.com/hashicorp/go-checkpoint/go.mod new file mode 100644 index 00000000..be0c793e --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/go-checkpoint + +require ( + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-uuid v1.0.0 +) diff --git a/vendor/github.com/hashicorp/go-checkpoint/go.sum b/vendor/github.com/hashicorp/go-checkpoint/go.sum new file mode 100644 index 00000000..2128a0c8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= diff --git a/vendor/github.com/hashicorp/go-checkpoint/telemetry.go b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go new file mode 100644 index 00000000..b9ee6298 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go @@ -0,0 +1,118 @@ +package checkpoint + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "runtime" + "time" + + "github.com/hashicorp/go-cleanhttp" + uuid "github.com/hashicorp/go-uuid" +) + +// ReportParams are the parameters for configuring a telemetry report. +type ReportParams struct { + // Signature is some random signature that should be stored and used + // as a cookie-like value. This ensures that alerts aren't repeated. + // If the signature is changed, repeat alerts may be sent down. The + // signature should NOT be anything identifiable to a user (such as + // a MAC address). It should be random. + // + // If SignatureFile is given, then the signature will be read from this + // file. If the file doesn't exist, then a random signature will + // automatically be generated and stored here. SignatureFile will be + // ignored if Signature is given. + Signature string `json:"signature"` + SignatureFile string `json:"-"` + + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Arch string `json:"arch"` + OS string `json:"os"` + Payload interface{} `json:"payload,omitempty"` + Product string `json:"product"` + RunID string `json:"run_id"` + SchemaVersion string `json:"schema_version"` + Version string `json:"version"` +} + +func (i *ReportParams) signature() string { + signature := i.Signature + if i.Signature == "" && i.SignatureFile != "" { + var err error + signature, err = checkSignature(i.SignatureFile) + if err != nil { + return "" + } + } + return signature +} + +// Report sends telemetry information to checkpoint +func Report(ctx context.Context, r *ReportParams) error { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil + } + + req, err := ReportRequest(r) + if err != nil { + return err + } + + client := cleanhttp.DefaultClient() + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return err + } + if resp.StatusCode != 201 { + return fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + return nil +} + +// ReportRequest creates a request object for making a report +func ReportRequest(r *ReportParams) (*http.Request, error) { + // Populate some fields automatically if we can + if r.RunID == "" { + uuid, err := uuid.GenerateUUID() + if err != nil { + return nil, err + } + r.RunID = uuid + } + if r.Arch == "" { + r.Arch = runtime.GOARCH + } + if r.OS == "" { + r.OS = runtime.GOOS + } + if r.Signature == "" { + r.Signature = r.signature() + } + + b, err := json.Marshal(r) + if err != nil { + return nil, err + } + + u := &url.URL{ + Scheme: "https", + Host: "checkpoint-api.hashicorp.com", + Path: fmt.Sprintf("/v1/telemetry/%s", r.Product), + } + + req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + return req, nil +} diff --git a/vendor/github.com/hashicorp/go-checkpoint/versions.go b/vendor/github.com/hashicorp/go-checkpoint/versions.go new file mode 100644 index 00000000..a5b0d3b3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-checkpoint/versions.go @@ -0,0 +1,90 @@ +package checkpoint + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "strconv" + "time" + + "github.com/hashicorp/go-cleanhttp" +) + +// VersionsParams are the parameters for a versions request. +type VersionsParams struct { + // Service is used to lookup the correct service. + Service string + + // Product is used to filter the version contraints. + Product string + + // Force, if true, will force the check even if CHECKPOINT_DISABLE + // is set. Within HashiCorp products, this is ONLY USED when the user + // specifically requests it. This is never automatically done without + // the user's consent. + Force bool +} + +// VersionsResponse is the response for a versions request. +type VersionsResponse struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// Versions returns the version constrains for a given service and product. +func Versions(p *VersionsParams) (*VersionsResponse, error) { + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force { + return &VersionsResponse{}, nil + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")) + } + + v := url.Values{} + v.Set("product", p.Product) + + u := &url.URL{ + Scheme: "https", + Host: "checkpoint-api.hashicorp.com", + Path: fmt.Sprintf("/v1/versions/%s", p.Service), + RawQuery: v.Encode(), + } + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", "HashiCorp/go-checkpoint") + + client := cleanhttp.DefaultClient() + + // We use a short timeout since checking for new versions is not critical + // enough to block on if checkpoint is broken/slow. + client.Timeout = time.Duration(timeout) * time.Millisecond + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode) + } + + result := &VersionsResponse{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go index 7d8a57c2..8d306bf5 100644 --- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -26,6 +26,7 @@ func DefaultPooledTransport() *http.Transport { DialContext: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, + DualStack: true, }).DialContext, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod new file mode 100644 index 00000000..310f0756 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 00000000..3c845dc0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,48 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r != nil { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + if next != nil { + next.ServeHTTP(w, r) + } + } + + return + }) +} diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index 4a0b6a62..ba4df6f4 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -21,8 +21,7 @@ URLs. For example: "github.com/hashicorp/go-getter" would turn into a Git URL. Or "./foo" would turn into a file URL. These are extensible. This library is used by [Terraform](https://terraform.io) for -downloading modules, [Otto](https://ottoproject.io) for dependencies and -Appfile imports, and [Nomad](https://nomadproject.io) for downloading +downloading modules and [Nomad](https://nomadproject.io) for downloading binaries. ## Installation and Usage @@ -72,6 +71,7 @@ can be augmented at runtime by implementing the `Getter` interface. * Mercurial * HTTP * Amazon S3 + * Google GCP In addition to the above protocols, go-getter has what are called "detectors." These take a URL and attempt to automatically choose the best protocol for @@ -98,7 +98,7 @@ would download the given HTTP URL using the Git protocol. Forced protocols will also override any detectors. -In the absense of a forced protocol, detectors may be run on the URL, transforming +In the absence of a forced protocol, detectors may be run on the URL, transforming the protocol anyways. The above example would've used the Git protocol either way since the Git detector would've detected it was a GitHub URL. @@ -119,26 +119,81 @@ The protocol-specific options are documented below the URL format section. But because they are part of the URL, we point it out here so you know they exist. +### Subdirectories + +If you want to download only a specific subdirectory from a downloaded +directory, you can specify a subdirectory after a double-slash `//`. +go-getter will first download the URL specified _before_ the double-slash +(as if you didn't specify a double-slash), but will then copy the +path after the double slash into the target directory. + +For example, if you're downloading this GitHub repository, but you only +want to download the `test-fixtures` directory, you can do the following: + +``` +https://github.com/hashicorp/go-getter.git//test-fixtures +``` + +If you downloaded this to the `/tmp` directory, then the file +`/tmp/archive.gz` would exist. Notice that this file is in the `test-fixtures` +directory in this repository, but because we specified a subdirectory, +go-getter automatically copied only that directory contents. + +Subdirectory paths may contain may also use filesystem glob patterns. +The path must match _exactly one_ entry or go-getter will return an error. +This is useful if you're not sure the exact directory name but it follows +a predictable naming structure. + +For example, the following URL would also work: + +``` +https://github.com/hashicorp/go-getter.git//test-* +``` + ### Checksumming For file downloads of any protocol, go-getter can automatically verify a checksum for you. Note that checksumming only works for downloading files, not directories, but checksumming will work for any protocol. -To checksum a file, append a `checksum` query parameter to the URL. -The paramter value should be in the format of `type:value`, where -type is "md5", "sha1", "sha256", or "sha512". The "value" should be -the actual checksum value. go-getter will parse out this query parameter -automatically and use it to verify the checksum. An example URL -is shown below: +To checksum a file, append a `checksum` query parameter to the URL. go-getter +will parse out this query parameter automatically and use it to verify the +checksum. The parameter value can be in the format of `type:value` or just +`value`, where type is "md5", "sha1", "sha256", "sha512" or "file" . The +"value" should be the actual checksum value or download URL for "file". When +`type` part is omitted, type will be guessed based on the length of the +checksum string. Examples: ``` ./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 ``` +``` +./foo.txt?checksum=b7d96c89d09d9e204f5fedc4d5d55b21 +``` + +``` +./foo.txt?checksum=file:./foo.txt.sha256sum +``` + +When checksumming from a file - ex: with `checksum=file:url` - go-getter will +get the file linked in the URL after `file:` using the same configuration. For +example, in `file:http://releases.ubuntu.com/cosmic/MD5SUMS` go-getter will +download a checksum file under the aforementioned url using the http protocol. +All protocols supported by go-getter can be used. The checksum file will be +downloaded in a temporary file then parsed. The destination of the temporary +file can be changed by setting system specific environment variables: `TMPDIR` +for unix; `TMP`, `TEMP` or `USERPROFILE` on windows. Read godoc of +[os.TempDir](https://golang.org/pkg/os/#TempDir) for more information on the +temporary directory selection. Content of files are expected to be BSD or GNU +style. Once go-getter is done with the checksum file; it is deleted. + The checksum query parameter is never sent to the backend protocol implementation. It is used at a higher level by go-getter itself. +If the destination file exists and the checksums match: download +will be skipped. + ### Unarchiving go-getter will automatically unarchive files into a file or directory @@ -154,9 +209,11 @@ The following archive formats are supported: * `tar.gz` and `tgz` * `tar.bz2` and `tbz2` + * `tar.xz` and `txz` * `zip` * `gz` * `bz2` + * `xz` For example, an example URL is shown below: @@ -183,11 +240,12 @@ from the URL before going to the final protocol downloader. ## Protocol-Specific Options -This section documents the protocol-specific options that can be specified -for go-getter. These options should be appended to the input as normal query -parameters. Depending on the usage of go-getter, applications may provide -alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io) -provides a nice options block for specifying options rather than in the URL. +This section documents the protocol-specific options that can be specified for +go-getter. These options should be appended to the input as normal query +parameters ([HTTP headers](#headers) are an exception to this, however). +Depending on the usage of go-getter, applications may provide alternate ways of +inputting options. For example, [Nomad](https://www.nomadproject.io) provides a +nice options block for specifying options rather than in the URL. ## General (All Protocols) @@ -200,6 +258,9 @@ The options below are available to all protocols: * `checksum` - Checksum to verify the downloaded file or archive. See the entire section on checksumming above for format and more details. + * `filename` - When in file download mode, allows specifying the name of the + downloaded file on disk. Has no effect in directory mode. + ### Local Files (`file`) None @@ -215,6 +276,19 @@ None from a private key file on disk, you would run `base64 -w0 `. **Note**: Git 2.3+ is required to use this feature. + + * `depth` - The Git clone depth. The provided number specifies the last `n` + revisions to clone from the repository. + + +The `git` getter accepts both URL-style SSH addresses like +`git::ssh://git@example.com/foo/bar`, and "scp-style" addresses like +`git::git@example.com/foo/bar`. In the latter case, omitting the `git::` +force prefix is allowed if the username prefix is exactly `git@`. + +The "scp-style" addresses _cannot_ be used in conjunction with the `ssh://` +scheme prefix, because in that case the colon is used to mark an optional +port number to connect on, rather than to delimit the path from the host. ### Mercurial (`hg`) @@ -222,13 +296,24 @@ None ### HTTP (`http`) -None +#### Basic Authentication + +To use HTTP basic authentication with go-getter, simply prepend `username:password@` to the +hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special +characters, including the username and password, must be URL encoded. + +#### Headers + +Optional request headers can be added by supplying them in a custom +[`HttpGetter`](https://godoc.org/github.com/hashicorp/go-getter#HttpGetter) +(_not_ as query parameters like most other options). These headers will be sent +out on every request the getter in question makes. ### S3 (`s3`) S3 takes various access configurations in the URL. Note that it will also -read these from standard AWS environment variables if they're set. If -the query parameters are present, these take priority. +read these from standard AWS environment variables if they're set. S3 compliant servers like Minio +are also supported. If the query parameters are present, these take priority. * `aws_access_key_id` - AWS access key. * `aws_access_key_secret` - AWS access key secret. @@ -240,6 +325,14 @@ If you use go-getter and want to use an EC2 IAM Instance Profile to avoid using credentials, then just omit these and the profile, if available will be used automatically. +### Using S3 with Minio + If you use go-gitter for Minio support, you must consider the following: + + * `aws_access_key_id` (required) - Minio access key. + * `aws_access_key_secret` (required) - Minio access key secret. + * `region` (optional - defaults to us-east-1) - Region identifier to use. + * `version` (optional - defaults to Minio default) - Configuration file format. + #### S3 Bucket Examples S3 has several addressing schemes used to reference your bucket. These are @@ -250,4 +343,16 @@ Some examples for these addressing schemes: - s3::https://s3-eu-west-1.amazonaws.com/bucket/foo - bucket.s3.amazonaws.com/foo - bucket.s3-eu-west-1.amazonaws.com/foo/bar +- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY®ion=us-east-2" + +### GCS (`gcs`) + +#### GCS Authentication + +In order to access to GCS, authentication credentials should be provided. More information can be found [here](https://cloud.google.com/docs/authentication/getting-started) + +#### GCS Bucket Examples +- gcs::https://www.googleapis.com/storage/v1/bucket +- gcs::https://www.googleapis.com/storage/v1/bucket/foo.zip +- www.googleapis.com/storage/v1/bucket/foo diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml index 159dad4d..1e8718e1 100644 --- a/vendor/github.com/hashicorp/go-getter/appveyor.yml +++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml @@ -1,5 +1,5 @@ version: "build-{branch}-{build}" -image: Visual Studio 2015 +image: Visual Studio 2017 clone_folder: c:\gopath\github.com\hashicorp\go-getter environment: GOPATH: c:\gopath @@ -13,4 +13,4 @@ install: go get -d -v -t ./... build_script: -- cmd: go test -v ./... +- cmd: go test ./... diff --git a/vendor/github.com/hashicorp/go-getter/checksum.go b/vendor/github.com/hashicorp/go-getter/checksum.go new file mode 100644 index 00000000..eeccfea9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/checksum.go @@ -0,0 +1,314 @@ +package getter + +import ( + "bufio" + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "fmt" + "hash" + "io" + "net/url" + "os" + "path/filepath" + "strings" + + urlhelper "github.com/hashicorp/go-getter/helper/url" +) + +// FileChecksum helps verifying the checksum for a file. +type FileChecksum struct { + Type string + Hash hash.Hash + Value []byte + Filename string +} + +// A ChecksumError is returned when a checksum differs +type ChecksumError struct { + Hash hash.Hash + Actual []byte + Expected []byte + File string +} + +func (cerr *ChecksumError) Error() string { + if cerr == nil { + return "" + } + return fmt.Sprintf( + "Checksums did not match for %s.\nExpected: %s\nGot: %s\n%T", + cerr.File, + hex.EncodeToString(cerr.Expected), + hex.EncodeToString(cerr.Actual), + cerr.Hash, // ex: *sha256.digest + ) +} + +// checksum is a simple method to compute the checksum of a source file +// and compare it to the given expected value. +func (c *FileChecksum) checksum(source string) error { + f, err := os.Open(source) + if err != nil { + return fmt.Errorf("Failed to open file for checksum: %s", err) + } + defer f.Close() + + c.Hash.Reset() + if _, err := io.Copy(c.Hash, f); err != nil { + return fmt.Errorf("Failed to hash: %s", err) + } + + if actual := c.Hash.Sum(nil); !bytes.Equal(actual, c.Value) { + return &ChecksumError{ + Hash: c.Hash, + Actual: actual, + Expected: c.Value, + File: source, + } + } + + return nil +} + +// extractChecksum will return a FileChecksum based on the 'checksum' +// parameter of u. +// ex: +// http://hashicorp.com/terraform?checksum= +// http://hashicorp.com/terraform?checksum=: +// http://hashicorp.com/terraform?checksum=file: +// when checksumming from a file, extractChecksum will go get checksum_url +// in a temporary directory, parse the content of the file then delete it. +// Content of files are expected to be BSD style or GNU style. +// +// BSD-style checksum: +// MD5 (file1) = +// MD5 (file2) = +// +// GNU-style: +// file1 +// *file2 +// +// see parseChecksumLine for more detail on checksum file parsing +func (c *Client) extractChecksum(u *url.URL) (*FileChecksum, error) { + q := u.Query() + v := q.Get("checksum") + + if v == "" { + return nil, nil + } + + vs := strings.SplitN(v, ":", 2) + switch len(vs) { + case 2: + break // good + default: + // here, we try to guess the checksum from it's length + // if the type was not passed + return newChecksumFromValue(v, filepath.Base(u.EscapedPath())) + } + + checksumType, checksumValue := vs[0], vs[1] + + switch checksumType { + case "file": + return c.ChecksumFromFile(checksumValue, u) + default: + return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath())) + } +} + +func newChecksum(checksumValue, filename string) (*FileChecksum, error) { + c := &FileChecksum{ + Filename: filename, + } + var err error + c.Value, err = hex.DecodeString(checksumValue) + if err != nil { + return nil, fmt.Errorf("invalid checksum: %s", err) + } + return c, nil +} + +func newChecksumFromType(checksumType, checksumValue, filename string) (*FileChecksum, error) { + c, err := newChecksum(checksumValue, filename) + if err != nil { + return nil, err + } + + c.Type = strings.ToLower(checksumType) + switch c.Type { + case "md5": + c.Hash = md5.New() + case "sha1": + c.Hash = sha1.New() + case "sha256": + c.Hash = sha256.New() + case "sha512": + c.Hash = sha512.New() + default: + return nil, fmt.Errorf( + "unsupported checksum type: %s", checksumType) + } + + return c, nil +} + +func newChecksumFromValue(checksumValue, filename string) (*FileChecksum, error) { + c, err := newChecksum(checksumValue, filename) + if err != nil { + return nil, err + } + + switch len(c.Value) { + case md5.Size: + c.Hash = md5.New() + c.Type = "md5" + case sha1.Size: + c.Hash = sha1.New() + c.Type = "sha1" + case sha256.Size: + c.Hash = sha256.New() + c.Type = "sha256" + case sha512.Size: + c.Hash = sha512.New() + c.Type = "sha512" + default: + return nil, fmt.Errorf("Unknown type for checksum %s", checksumValue) + } + + return c, nil +} + +// ChecksumFromFile will return all the FileChecksums found in file +// +// ChecksumFromFile will try to guess the hashing algorithm based on content +// of checksum file +// +// ChecksumFromFile will only return checksums for files that match file +// behind src +func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileChecksum, error) { + checksumFileURL, err := urlhelper.Parse(checksumFile) + if err != nil { + return nil, err + } + + tempfile, err := tmpFile("", filepath.Base(checksumFileURL.Path)) + if err != nil { + return nil, err + } + defer os.Remove(tempfile) + + c2 := &Client{ + Ctx: c.Ctx, + Getters: c.Getters, + Decompressors: c.Decompressors, + Detectors: c.Detectors, + Pwd: c.Pwd, + Dir: false, + Src: checksumFile, + Dst: tempfile, + ProgressListener: c.ProgressListener, + } + if err = c2.Get(); err != nil { + return nil, fmt.Errorf( + "Error downloading checksum file: %s", err) + } + + filename := filepath.Base(src.Path) + absPath, err := filepath.Abs(src.Path) + if err != nil { + return nil, err + } + checksumFileDir := filepath.Dir(checksumFileURL.Path) + relpath, err := filepath.Rel(checksumFileDir, absPath) + switch { + case err == nil || + err.Error() == "Rel: can't make "+absPath+" relative to "+checksumFileDir: + // ex: on windows C:\gopath\...\content.txt cannot be relative to \ + // which is okay, may be another expected path will work. + break + default: + return nil, err + } + + // possible file identifiers: + options := []string{ + filename, // ubuntu-14.04.1-server-amd64.iso + "*" + filename, // *ubuntu-14.04.1-server-amd64.iso Standard checksum + "?" + filename, // ?ubuntu-14.04.1-server-amd64.iso shasum -p + relpath, // dir/ubuntu-14.04.1-server-amd64.iso + "./" + relpath, // ./dir/ubuntu-14.04.1-server-amd64.iso + absPath, // fullpath; set if local + } + + f, err := os.Open(tempfile) + if err != nil { + return nil, fmt.Errorf( + "Error opening downloaded file: %s", err) + } + defer f.Close() + rd := bufio.NewReader(f) + for { + line, err := rd.ReadString('\n') + if err != nil { + if err != io.EOF { + return nil, fmt.Errorf( + "Error reading checksum file: %s", err) + } + break + } + checksum, err := parseChecksumLine(line) + if err != nil || checksum == nil { + continue + } + if checksum.Filename == "" { + // filename not sure, let's try + return checksum, nil + } + // make sure the checksum is for the right file + for _, option := range options { + if option != "" && checksum.Filename == option { + // any checksum will work so we return the first one + return checksum, nil + } + } + } + return nil, fmt.Errorf("no checksum found in: %s", checksumFile) +} + +// parseChecksumLine takes a line from a checksum file and returns +// checksumType, checksumValue and filename parseChecksumLine guesses the style +// of the checksum BSD vs GNU by splitting the line and by counting the parts. +// of a line. +// for BSD type sums parseChecksumLine guesses the hashing algorithm +// by checking the length of the checksum. +func parseChecksumLine(line string) (*FileChecksum, error) { + parts := strings.Fields(line) + + switch len(parts) { + case 4: + // BSD-style checksum: + // MD5 (file1) = + // MD5 (file2) = + if len(parts[1]) <= 2 || + parts[1][0] != '(' || parts[1][len(parts[1])-1] != ')' { + return nil, fmt.Errorf( + "Unexpected BSD-style-checksum filename format: %s", line) + } + filename := parts[1][1 : len(parts[1])-1] + return newChecksumFromType(parts[0], parts[3], filename) + case 2: + // GNU-style: + // file1 + // *file2 + return newChecksumFromValue(parts[0], parts[1]) + case 0: + return nil, nil // empty line + default: + return newChecksumFromValue(parts[0], "") + } +} diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go index 876812a0..007a78ba 100644 --- a/vendor/github.com/hashicorp/go-getter/client.go +++ b/vendor/github.com/hashicorp/go-getter/client.go @@ -1,15 +1,8 @@ package getter import ( - "bytes" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" + "context" "fmt" - "hash" - "io" "io/ioutil" "os" "path/filepath" @@ -17,6 +10,7 @@ import ( "strings" urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" ) // Client is a client for downloading things. @@ -25,6 +19,9 @@ import ( // Using a client directly allows more fine-grained control over how downloading // is done, as well as customizing the protocols supported. type Client struct { + // Ctx for cancellation + Ctx context.Context + // Src is the source URL to get. // // Dst is the path to save the downloaded thing as. If Dir is set to @@ -61,10 +58,20 @@ type Client struct { // // WARNING: deprecated. If Mode is set, that will take precedence. Dir bool + + // ProgressListener allows to track file downloads. + // By default a no op progress listener is used. + ProgressListener ProgressTracker + + Options []ClientOption } // Get downloads the configured source to the destination. func (c *Client) Get() error { + if err := c.Configure(c.Options...); err != nil { + return err + } + // Store this locally since there are cases we swap this mode := c.Mode if mode == ClientModeInvalid { @@ -75,18 +82,7 @@ func (c *Client) Get() error { } } - // Default decompressor value - decompressors := c.Decompressors - if decompressors == nil { - decompressors = Decompressors - } - - // Detect the URL. This is safe if it is already detected. - detectors := c.Detectors - if detectors == nil { - detectors = Detectors - } - src, err := Detect(c.Src, c.Pwd, detectors) + src, err := Detect(c.Src, c.Pwd, c.Detectors) if err != nil { return err } @@ -100,17 +96,14 @@ func (c *Client) Get() error { dst := c.Dst src, subDir := SourceDirSubdir(src) if subDir != "" { - tmpDir, err := ioutil.TempDir("", "tf") + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(tmpDir); err != nil { - return err - } - defer os.RemoveAll(tmpDir) + defer tdcloser.Close() realDst = dst - dst = tmpDir + dst = td } u, err := urlhelper.Parse(src) @@ -121,12 +114,7 @@ func (c *Client) Get() error { force = u.Scheme } - getters := c.Getters - if getters == nil { - getters = Getters - } - - g, ok := getters[force] + g, ok := c.Getters[force] if !ok { return fmt.Errorf( "download not supported for scheme '%s'", force) @@ -152,7 +140,7 @@ func (c *Client) Get() error { if archiveV == "" { // We don't appear to... but is it part of the filename? matchingLen := 0 - for k, _ := range decompressors { + for k := range c.Decompressors { if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { archiveV = k matchingLen = len(k) @@ -165,7 +153,7 @@ func (c *Client) Get() error { // real path. var decompressDst string var decompressDir bool - decompressor := decompressors[archiveV] + decompressor := c.Decompressors[archiveV] if decompressor != nil { // Create a temporary directory to store our archive. We delete // this at the end of everything. @@ -184,44 +172,16 @@ func (c *Client) Get() error { mode = ClientModeFile } - // Determine if we have a checksum - var checksumHash hash.Hash - var checksumValue []byte - if v := q.Get("checksum"); v != "" { - // Delete the query parameter if we have it. - q.Del("checksum") - u.RawQuery = q.Encode() - - // Determine the checksum hash type - checksumType := "" - idx := strings.Index(v, ":") - if idx > -1 { - checksumType = v[:idx] - } - switch checksumType { - case "md5": - checksumHash = md5.New() - case "sha1": - checksumHash = sha1.New() - case "sha256": - checksumHash = sha256.New() - case "sha512": - checksumHash = sha512.New() - default: - return fmt.Errorf( - "unsupported checksum type: %s", checksumType) - } - - // Get the remainder of the value and parse it into bytes - b, err := hex.DecodeString(v[idx+1:]) - if err != nil { - return fmt.Errorf("invalid checksum: %s", err) - } - - // Set our value - checksumValue = b + // Determine checksum if we have one + checksum, err := c.extractChecksum(u) + if err != nil { + return fmt.Errorf("invalid checksum: %s", err) } + // Delete the query parameter if we have it. + q.Del("checksum") + u.RawQuery = q.Encode() + if mode == ClientModeAny { // Ask the getter which client mode to use mode, err = g.ClientMode(u) @@ -232,22 +192,42 @@ func (c *Client) Get() error { // Destination is the base name of the URL path in "any" mode when // a file source is detected. if mode == ClientModeFile { - dst = filepath.Join(dst, filepath.Base(u.Path)) + filename := filepath.Base(u.Path) + + // Determine if we have a custom file name + if v := q.Get("filename"); v != "" { + // Delete the query parameter if we have it. + q.Del("filename") + u.RawQuery = q.Encode() + + filename = v + } + + dst = filepath.Join(dst, filename) } } // If we're not downloading a directory, then just download the file // and return. if mode == ClientModeFile { - err := g.GetFile(dst, u) - if err != nil { - return err + getFile := true + if checksum != nil { + if err := checksum.checksum(dst); err == nil { + // don't get the file if the checksum of dst is correct + getFile = false + } } - - if checksumHash != nil { - if err := checksum(dst, checksumHash, checksumValue); err != nil { + if getFile { + err := g.GetFile(dst, u) + if err != nil { return err } + + if checksum != nil { + if err := checksum.checksum(dst); err != nil { + return err + } + } } if decompressor != nil { @@ -282,7 +262,7 @@ func (c *Client) Get() error { if decompressor == nil { // If we're getting a directory, then this is an error. You cannot // checksum a directory. TODO: test - if checksumHash != nil { + if checksum != nil { return fmt.Errorf( "checksum cannot be specified for directory download") } @@ -305,30 +285,13 @@ func (c *Client) Get() error { return err } - return copyDir(realDst, filepath.Join(dst, subDir), false) - } - - return nil -} - -// checksum is a simple method to compute the checksum of a source file -// and compare it to the given expected value. -func checksum(source string, h hash.Hash, v []byte) error { - f, err := os.Open(source) - if err != nil { - return fmt.Errorf("Failed to open file for checksum: %s", err) - } - defer f.Close() - - if _, err := io.Copy(h, f); err != nil { - return fmt.Errorf("Failed to hash: %s", err) - } + // Process any globs + subDir, err := SubdirGlob(dst, subDir) + if err != nil { + return err + } - if actual := h.Sum(nil); !bytes.Equal(actual, v) { - return fmt.Errorf( - "Checksums did not match.\nExpected: %s\nGot: %s", - hex.EncodeToString(v), - hex.EncodeToString(actual)) + return copyDir(c.Ctx, realDst, subDir, false) } return nil diff --git a/vendor/github.com/hashicorp/go-getter/client_option.go b/vendor/github.com/hashicorp/go-getter/client_option.go new file mode 100644 index 00000000..c1ee413b --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_option.go @@ -0,0 +1,46 @@ +package getter + +import "context" + +// A ClientOption allows to configure a client +type ClientOption func(*Client) error + +// Configure configures a client with options. +func (c *Client) Configure(opts ...ClientOption) error { + if c.Ctx == nil { + c.Ctx = context.Background() + } + c.Options = opts + for _, opt := range opts { + err := opt(c) + if err != nil { + return err + } + } + // Default decompressor values + if c.Decompressors == nil { + c.Decompressors = Decompressors + } + // Default detector values + if c.Detectors == nil { + c.Detectors = Detectors + } + // Default getter values + if c.Getters == nil { + c.Getters = Getters + } + + for _, getter := range c.Getters { + getter.SetClient(c) + } + return nil +} + +// WithContext allows to pass a context to operation +// in order to be able to cancel a download in progress. +func WithContext(ctx context.Context) func(*Client) error { + return func(c *Client) error { + c.Ctx = ctx + return nil + } +} diff --git a/vendor/github.com/hashicorp/go-getter/client_option_progress.go b/vendor/github.com/hashicorp/go-getter/client_option_progress.go new file mode 100644 index 00000000..9b185f71 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/client_option_progress.go @@ -0,0 +1,38 @@ +package getter + +import ( + "io" +) + +// WithProgress allows for a user to track +// the progress of a download. +// For example by displaying a progress bar with +// current download. +// Not all getters have progress support yet. +func WithProgress(pl ProgressTracker) func(*Client) error { + return func(c *Client) error { + c.ProgressListener = pl + return nil + } +} + +// ProgressTracker allows to track the progress of downloads. +type ProgressTracker interface { + // TrackProgress should be called when + // a new object is being downloaded. + // src is the location the file is + // downloaded from. + // currentSize is the current size of + // the file in case it is a partial + // download. + // totalSize is the total size in bytes, + // size can be zero if the file size + // is not known. + // stream is the file being downloaded, every + // written byte will add up to processed size. + // + // TrackProgress returns a ReadCloser that wraps the + // download in progress ( stream ). + // When the download is finished, body shall be closed. + TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) +} diff --git a/vendor/github.com/hashicorp/go-getter/common.go b/vendor/github.com/hashicorp/go-getter/common.go new file mode 100644 index 00000000..d2afd8ad --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/common.go @@ -0,0 +1,14 @@ +package getter + +import ( + "io/ioutil" +) + +func tmpFile(dir, pattern string) (string, error) { + f, err := ioutil.TempFile(dir, pattern) + if err != nil { + return "", err + } + f.Close() + return f.Name(), nil +} diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go index 2f58e8ae..641fe6d0 100644 --- a/vendor/github.com/hashicorp/go-getter/copy_dir.go +++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go @@ -1,7 +1,7 @@ package getter import ( - "io" + "context" "os" "path/filepath" "strings" @@ -11,7 +11,7 @@ import ( // should already exist. // // If ignoreDot is set to true, then dot-prefixed files/folders are ignored. -func copyDir(dst string, src string, ignoreDot bool) error { +func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error { src, err := filepath.EvalSymlinks(src) if err != nil { return err @@ -66,7 +66,7 @@ func copyDir(dst string, src string, ignoreDot bool) error { } defer dstF.Close() - if _, err := io.Copy(dstF, srcF); err != nil { + if _, err := Copy(ctx, dstF, srcF); err != nil { return err } diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go index d18174cc..198bb0ed 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress.go +++ b/vendor/github.com/hashicorp/go-getter/decompress.go @@ -1,7 +1,15 @@ package getter +import ( + "strings" +) + // Decompressor defines the interface that must be implemented to add // support for decompressing a type. +// +// Important: if you're implementing a decompressor, please use the +// containsDotDot helper in this file to ensure that files can't be +// decompressed outside of the specified directory. type Decompressor interface { // Decompress should decompress src to dst. dir specifies whether dst // is a directory or single file. src is guaranteed to be a single file @@ -16,14 +24,35 @@ var Decompressors map[string]Decompressor func init() { tbzDecompressor := new(TarBzip2Decompressor) tgzDecompressor := new(TarGzipDecompressor) + txzDecompressor := new(TarXzDecompressor) Decompressors = map[string]Decompressor{ "bz2": new(Bzip2Decompressor), "gz": new(GzipDecompressor), + "xz": new(XzDecompressor), "tar.bz2": tbzDecompressor, "tar.gz": tgzDecompressor, + "tar.xz": txzDecompressor, "tbz2": tbzDecompressor, "tgz": tgzDecompressor, + "txz": txzDecompressor, "zip": new(ZipDecompressor), } } + +// containsDotDot checks if the filepath value v contains a ".." entry. +// This will check filepath components by splitting along / or \. This +// function is copied directly from the Go net/http implementation. +func containsDotDot(v string) bool { + if !strings.Contains(v, "..") { + return false + } + for _, ent := range strings.FieldsFunc(v, isSlashRune) { + if ent == ".." { + return true + } + } + return false +} + +func isSlashRune(r rune) bool { return r == '/' || r == '\\' } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go index 20010540..5ebf709b 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go @@ -9,7 +9,7 @@ import ( ) // GzipDecompressor is an implementation of Decompressor that can -// decompress bz2 files. +// decompress gzip files. type GzipDecompressor struct{} func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error { diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go new file mode 100644 index 00000000..b6986a25 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go @@ -0,0 +1,160 @@ +package getter + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "time" +) + +// untar is a shared helper for untarring an archive. The reader should provide +// an uncompressed view of the tar archive. +func untar(input io.Reader, dst, src string, dir bool) error { + tarR := tar.NewReader(input) + done := false + dirHdrs := []*tar.Header{} + now := time.Now() + for { + hdr, err := tarR.Next() + if err == io.EOF { + if !done { + // Empty archive + return fmt.Errorf("empty archive: %s", src) + } + + break + } + if err != nil { + return err + } + + if hdr.Typeflag == tar.TypeXGlobalHeader || hdr.Typeflag == tar.TypeXHeader { + // don't unpack extended headers as files + continue + } + + path := dst + if dir { + // Disallow parent traversal + if containsDotDot(hdr.Name) { + return fmt.Errorf("entry contains '..': %s", hdr.Name) + } + + path = filepath.Join(path, hdr.Name) + } + + if hdr.FileInfo().IsDir() { + if !dir { + return fmt.Errorf("expected a single file: %s", src) + } + + // A directory, just make the directory and continue unarchiving... + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + // Record the directory information so that we may set its attributes + // after all files have been extracted + dirHdrs = append(dirHdrs, hdr) + + continue + } else { + // There is no ordering guarantee that a file in a directory is + // listed before the directory + dstPath := filepath.Dir(path) + + // Check that the directory exists, otherwise create it + if _, err := os.Stat(dstPath); os.IsNotExist(err) { + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + } + } + + // We have a file. If we already decoded, then it is an error + if !dir && done { + return fmt.Errorf("expected a single file, got multiple: %s", src) + } + + // Mark that we're done so future in single file mode errors + done = true + + // Open the file for writing + dstF, err := os.Create(path) + if err != nil { + return err + } + _, err = io.Copy(dstF, tarR) + dstF.Close() + if err != nil { + return err + } + + // Chmod the file + if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { + return err + } + + // Set the access and modification time if valid, otherwise default to current time + aTime := now + mTime := now + if hdr.AccessTime.Unix() > 0 { + aTime = hdr.AccessTime + } + if hdr.ModTime.Unix() > 0 { + mTime = hdr.ModTime + } + if err := os.Chtimes(path, aTime, mTime); err != nil { + return err + } + } + + // Perform a final pass over extracted directories to update metadata + for _, dirHdr := range dirHdrs { + path := filepath.Join(dst, dirHdr.Name) + // Chmod the directory since they might be created before we know the mode flags + if err := os.Chmod(path, dirHdr.FileInfo().Mode()); err != nil { + return err + } + // Set the mtime/atime attributes since they would have been changed during extraction + aTime := now + mTime := now + if dirHdr.AccessTime.Unix() > 0 { + aTime = dirHdr.AccessTime + } + if dirHdr.ModTime.Unix() > 0 { + mTime = dirHdr.ModTime + } + if err := os.Chtimes(path, aTime, mTime); err != nil { + return err + } + } + + return nil +} + +// tarDecompressor is an implementation of Decompressor that can +// unpack tar files. +type tarDecompressor struct{} + +func (d *tarDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + return untar(f, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go index c46ed445..5391b5c8 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go @@ -1,10 +1,7 @@ package getter import ( - "archive/tar" "compress/bzip2" - "fmt" - "io" "os" "path/filepath" ) @@ -32,64 +29,5 @@ func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error { // Bzip2 compression is second bzipR := bzip2.NewReader(f) - - // Once bzip decompressed we have a tar format - tarR := tar.NewReader(bzipR) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } + return untar(bzipR, dst, src, dir) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go index 686d6c2b..b2f662a8 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go @@ -11,20 +11,25 @@ import ( "runtime" "sort" "strings" - "testing" + "time" + + "github.com/mitchellh/go-testing-interface" ) // TestDecompressCase is a single test case for testing decompressors type TestDecompressCase struct { - Input string // Input is the complete path to the input file - Dir bool // Dir is whether or not we're testing directory mode - Err bool // Err is whether we expect an error or not - DirList []string // DirList is the list of files for Dir mode - FileMD5 string // FileMD5 is the expected MD5 for a single file + Input string // Input is the complete path to the input file + Dir bool // Dir is whether or not we're testing directory mode + Err bool // Err is whether we expect an error or not + DirList []string // DirList is the list of files for Dir mode + FileMD5 string // FileMD5 is the expected MD5 for a single file + Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) } // TestDecompressor is a helper function for testing generic decompressors. -func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) { +func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { + t.Helper() + for _, tc := range cases { t.Logf("Testing: %s", tc.Input) @@ -67,6 +72,18 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) } } + if tc.Mtime != nil { + actual := fi.ModTime() + if tc.Mtime.Unix() > 0 { + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) + } + } else if actual.Unix() <= 0 { + t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) + } + } + return } @@ -83,11 +100,31 @@ func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) if !reflect.DeepEqual(actual, expected) { t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected) } + // Check for correct atime/mtime + for _, dir := range actual { + path := filepath.Join(dst, dir) + if tc.Mtime != nil { + fi, err := os.Stat(path) + if err != nil { + t.Fatalf("err: %s", err) + } + actual := fi.ModTime() + if tc.Mtime.Unix() > 0 { + expected := *tc.Mtime + if actual != expected { + t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) + } + } else if actual.Unix() < 0 { + t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String()) + } + + } + } }() } } -func testListDir(t *testing.T, path string) []string { +func testListDir(t testing.T, path string) []string { var result []string err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error { if err != nil { @@ -102,7 +139,7 @@ func testListDir(t *testing.T, path string) []string { // If it is a dir, add trailing sep if info.IsDir() { - sub += "/" + sub += string(os.PathSeparator) } result = append(result, sub) @@ -116,7 +153,7 @@ func testListDir(t *testing.T, path string) []string { return result } -func testMD5(t *testing.T, path string) string { +func testMD5(t testing.T, path string) string { f, err := os.Open(path) if err != nil { t.Fatalf("err: %s", err) diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go index e8b1c31c..65eb70dd 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go @@ -1,10 +1,8 @@ package getter import ( - "archive/tar" "compress/gzip" "fmt" - "io" "os" "path/filepath" ) @@ -37,63 +35,5 @@ func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error { } defer gzipR.Close() - // Once gzip decompressed we have a tar format - tarR := tar.NewReader(gzipR) - done := false - for { - hdr, err := tarR.Next() - if err == io.EOF { - if !done { - // Empty archive - return fmt.Errorf("empty archive: %s", src) - } - - return nil - } - if err != nil { - return err - } - - path := dst - if dir { - path = filepath.Join(path, hdr.Name) - } - - if hdr.FileInfo().IsDir() { - if !dir { - return fmt.Errorf("expected a single file: %s", src) - } - - // A directory, just make the directory and continue unarchiving... - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - - continue - } - - // We have a file. If we already decoded, then it is an error - if !dir && done { - return fmt.Errorf("expected a single file, got multiple: %s", src) - } - - // Mark that we're done so future in single file mode errors - done = true - - // Open the file for writing - dstF, err := os.Create(path) - if err != nil { - return err - } - _, err = io.Copy(dstF, tarR) - dstF.Close() - if err != nil { - return err - } - - // Chmod the file - if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil { - return err - } - } + return untar(gzipR, dst, src, dir) } diff --git a/vendor/github.com/hashicorp/go-getter/decompress_txz.go b/vendor/github.com/hashicorp/go-getter/decompress_txz.go new file mode 100644 index 00000000..5e151c12 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_txz.go @@ -0,0 +1,39 @@ +package getter + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// TarXzDecompressor is an implementation of Decompressor that can +// decompress tar.xz files. +type TarXzDecompressor struct{} + +func (d *TarXzDecompressor) Decompress(dst, src string, dir bool) error { + // If we're going into a directory we should make that first + mkdir := dst + if !dir { + mkdir = filepath.Dir(dst) + } + if err := os.MkdirAll(mkdir, 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + txzR, err := xz.NewReader(f) + if err != nil { + return fmt.Errorf("Error opening an xz reader for %s: %s", src, err) + } + + return untar(txzR, dst, src, dir) +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_xz.go b/vendor/github.com/hashicorp/go-getter/decompress_xz.go new file mode 100644 index 00000000..4e37abab --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/decompress_xz.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/ulikunitz/xz" +) + +// XzDecompressor is an implementation of Decompressor that can +// decompress xz files. +type XzDecompressor struct{} + +func (d *XzDecompressor) Decompress(dst, src string, dir bool) error { + // Directory isn't supported at all + if dir { + return fmt.Errorf("xz-compressed files can only unarchive to a single file") + } + + // If we're going into a directory we should make that first + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + // File first + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + // xz compression is second + xzR, err := xz.NewReader(f) + if err != nil { + return err + } + + // Copy it out + dstF, err := os.Create(dst) + if err != nil { + return err + } + defer dstF.Close() + + _, err = io.Copy(dstF, xzR) + return err +} diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go index a065c076..0830f791 100644 --- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go +++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go @@ -9,7 +9,7 @@ import ( ) // ZipDecompressor is an implementation of Decompressor that can -// decompress tar.gzip files. +// decompress zip files. type ZipDecompressor struct{} func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { @@ -42,6 +42,11 @@ func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { for _, f := range zipR.File { path := dst if dir { + // Disallow parent traversal + if containsDotDot(f.Name) { + return fmt.Errorf("entry contains '..': %s", f.Name) + } + path = filepath.Join(path, f.Name) } diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go index 481b737c..5bb750c9 100644 --- a/vendor/github.com/hashicorp/go-getter/detect.go +++ b/vendor/github.com/hashicorp/go-getter/detect.go @@ -23,8 +23,10 @@ var Detectors []Detector func init() { Detectors = []Detector{ new(GitHubDetector), + new(GitDetector), new(BitBucketDetector), new(S3Detector), + new(GCSDetector), new(FileDetector), } } @@ -72,12 +74,18 @@ func Detect(src string, pwd string, ds []Detector) (string, error) { subDir = detectSubdir } } + if subDir != "" { u, err := url.Parse(result) if err != nil { return "", fmt.Errorf("Error parsing URL: %s", err) } u.Path += "//" + subDir + + // a subdir may contain wildcards, but in order to support them we + // have to ensure the path isn't escaped. + u.RawPath = u.Path + result = u.String() } diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go index a183a17d..19047eb1 100644 --- a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go +++ b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go @@ -35,7 +35,7 @@ func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) { var info struct { SCM string `json:"scm"` } - infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path + infoUrl := "https://api.bitbucket.org/2.0/repositories" + u.Path resp, err := http.Get(infoUrl) if err != nil { return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err) diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go index 756ea43f..4ef41ea7 100644 --- a/vendor/github.com/hashicorp/go-getter/detect_file.go +++ b/vendor/github.com/hashicorp/go-getter/detect_file.go @@ -32,7 +32,7 @@ func (d *FileDetector) Detect(src, pwd string) (string, bool, error) { return "", true, err } if fi.Mode()&os.ModeSymlink != 0 { - pwd, err = os.Readlink(pwd) + pwd, err = filepath.EvalSymlinks(pwd) if err != nil { return "", true, err } diff --git a/vendor/github.com/hashicorp/go-getter/detect_gcs.go b/vendor/github.com/hashicorp/go-getter/detect_gcs.go new file mode 100644 index 00000000..11363737 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_gcs.go @@ -0,0 +1,43 @@ +package getter + +import ( + "fmt" + "net/url" + "strings" +) + +// GCSDetector implements Detector to detect GCS URLs and turn +// them into URLs that the GCSGetter can understand. +type GCSDetector struct{} + +func (d *GCSDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + if strings.Contains(src, "googleapis.com/") { + return d.detectHTTP(src) + } + + return "", false, nil +} + +func (d *GCSDetector) detectHTTP(src string) (string, bool, error) { + + parts := strings.Split(src, "/") + if len(parts) < 5 { + return "", false, fmt.Errorf( + "URL is not a valid GCS URL") + } + version := parts[2] + bucket := parts[3] + object := strings.Join(parts[4:], "/") + + url, err := url.Parse(fmt.Sprintf("https://www.googleapis.com/storage/%s/%s/%s", + version, bucket, object)) + if err != nil { + return "", false, fmt.Errorf("error parsing GCS URL: %s", err) + } + + return "gcs::" + url.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_git.go b/vendor/github.com/hashicorp/go-getter/detect_git.go new file mode 100644 index 00000000..eeb8a04c --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_git.go @@ -0,0 +1,26 @@ +package getter + +// GitDetector implements Detector to detect Git SSH URLs such as +// git@host.com:dir1/dir2 and converts them to proper URLs. +type GitDetector struct{} + +func (d *GitDetector) Detect(src, _ string) (string, bool, error) { + if len(src) == 0 { + return "", false, nil + } + + u, err := detectSSH(src) + if err != nil { + return "", true, err + } + if u == nil { + return "", false, nil + } + + // We require the username to be "git" to assume that this is a Git URL + if u.User.Username() != "git" { + return "", false, nil + } + + return "git::" + u.String(), true, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go index c084ad9a..4bf4daf2 100644 --- a/vendor/github.com/hashicorp/go-getter/detect_github.go +++ b/vendor/github.com/hashicorp/go-getter/detect_github.go @@ -17,8 +17,6 @@ func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) { if strings.HasPrefix(src, "github.com/") { return d.detectHTTP(src) - } else if strings.HasPrefix(src, "git@github.com:") { - return d.detectSSH(src) } return "", false, nil @@ -47,27 +45,3 @@ func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) { return "git::" + url.String(), true, nil } - -func (d *GitHubDetector) detectSSH(src string) (string, bool, error) { - idx := strings.Index(src, ":") - qidx := strings.Index(src, "?") - if qidx == -1 { - qidx = len(src) - } - - var u url.URL - u.Scheme = "ssh" - u.User = url.User("git") - u.Host = "github.com" - u.Path = src[idx+1 : qidx] - if qidx < len(src) { - q, err := url.ParseQuery(src[qidx+1:]) - if err != nil { - return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err) - } - - u.RawQuery = q.Encode() - } - - return "git::" + u.String(), true, nil -} diff --git a/vendor/github.com/hashicorp/go-getter/detect_ssh.go b/vendor/github.com/hashicorp/go-getter/detect_ssh.go new file mode 100644 index 00000000..c0dbe9d4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/detect_ssh.go @@ -0,0 +1,49 @@ +package getter + +import ( + "fmt" + "net/url" + "regexp" + "strings" +) + +// Note that we do not have an SSH-getter currently so this file serves +// only to hold the detectSSH helper that is used by other detectors. + +// sshPattern matches SCP-like SSH patterns (user@host:path) +var sshPattern = regexp.MustCompile("^(?:([^@]+)@)?([^:]+):/?(.+)$") + +// detectSSH determines if the src string matches an SSH-like URL and +// converts it into a net.URL compatible string. This returns nil if the +// string doesn't match the SSH pattern. +// +// This function is tested indirectly via detect_git_test.go +func detectSSH(src string) (*url.URL, error) { + matched := sshPattern.FindStringSubmatch(src) + if matched == nil { + return nil, nil + } + + user := matched[1] + host := matched[2] + path := matched[3] + qidx := strings.Index(path, "?") + if qidx == -1 { + qidx = len(path) + } + + var u url.URL + u.Scheme = "ssh" + u.User = url.User(user) + u.Host = host + u.Path = path[0:qidx] + if qidx < len(path) { + q, err := url.ParseQuery(path[qidx+1:]) + if err != nil { + return nil, fmt.Errorf("error parsing GitHub SSH URL: %s", err) + } + u.RawQuery = q.Encode() + } + + return &u, nil +} diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go index c3236f55..c233763c 100644 --- a/vendor/github.com/hashicorp/go-getter/get.go +++ b/vendor/github.com/hashicorp/go-getter/get.go @@ -18,6 +18,8 @@ import ( "os/exec" "regexp" "syscall" + + cleanhttp "github.com/hashicorp/go-cleanhttp" ) // Getter defines the interface that schemes must implement to download @@ -39,6 +41,11 @@ type Getter interface { // ClientMode returns the mode based on the given URL. This is used to // allow clients to let the getters decide which mode to use. ClientMode(*url.URL) (ClientMode, error) + + // SetClient allows a getter to know it's client + // in order to access client's Get functions or + // progress tracking. + SetClient(*Client) } // Getters is the mapping of scheme to the Getter implementation that will @@ -49,12 +56,18 @@ var Getters map[string]Getter // syntax is schema::url, example: git::https://foo.com var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`) +// httpClient is the default client to be used by HttpGetters. +var httpClient = cleanhttp.DefaultClient() + func init() { - httpGetter := &HttpGetter{Netrc: true} + httpGetter := &HttpGetter{ + Netrc: true, + } Getters = map[string]Getter{ "file": new(FileGetter), "git": new(GitGetter), + "gcs": new(GCSGetter), "hg": new(HgGetter), "s3": new(S3Getter), "http": httpGetter, @@ -67,12 +80,12 @@ func init() { // // src is a URL, whereas dst is always just a file path to a folder. This // folder doesn't need to exist. It will be created if it doesn't exist. -func Get(dst, src string) error { +func Get(dst, src string, opts ...ClientOption) error { return (&Client{ Src: src, Dst: dst, Dir: true, - Getters: Getters, + Options: opts, }).Get() } @@ -82,23 +95,23 @@ func Get(dst, src string) error { // dst must be a directory. If src is a file, it will be downloaded // into dst with the basename of the URL. If src is a directory or // archive, it will be unpacked directly into dst. -func GetAny(dst, src string) error { +func GetAny(dst, src string, opts ...ClientOption) error { return (&Client{ Src: src, Dst: dst, Mode: ClientModeAny, - Getters: Getters, + Options: opts, }).Get() } // GetFile downloads the file specified by src into the path specified by // dst. -func GetFile(dst, src string) error { +func GetFile(dst, src string, opts ...ClientOption) error { return (&Client{ Src: src, Dst: dst, Dir: false, - Getters: Getters, + Options: opts, }).Get() } diff --git a/vendor/github.com/hashicorp/go-getter/get_base.go b/vendor/github.com/hashicorp/go-getter/get_base.go new file mode 100644 index 00000000..09e9b631 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_base.go @@ -0,0 +1,20 @@ +package getter + +import "context" + +// getter is our base getter; it regroups +// fields all getters have in common. +type getter struct { + client *Client +} + +func (g *getter) SetClient(c *Client) { g.client = c } + +// Context tries to returns the Contex from the getter's +// client. otherwise context.Background() is returned. +func (g *getter) Context() context.Context { + if g == nil || g.client == nil { + return context.Background() + } + return g.client.Ctx +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go index e5d2d61d..78660839 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file.go +++ b/vendor/github.com/hashicorp/go-getter/get_file.go @@ -8,7 +8,11 @@ import ( // FileGetter is a Getter implementation that will download a module from // a file scheme. type FileGetter struct { - // Copy, if set to true, will copy data instead of using a symlink + getter + + // Copy, if set to true, will copy data instead of using a symlink. If + // false, attempts to symlink to speed up the operation and to lower the + // disk space usage. If the symlink fails, may attempt to copy on windows. Copy bool } diff --git a/vendor/github.com/hashicorp/go-getter/get_file_copy.go b/vendor/github.com/hashicorp/go-getter/get_file_copy.go new file mode 100644 index 00000000..d70fb495 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_file_copy.go @@ -0,0 +1,29 @@ +package getter + +import ( + "context" + "io" +) + +// readerFunc is syntactic sugar for read interface. +type readerFunc func(p []byte) (n int, err error) + +func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) } + +// Copy is a io.Copy cancellable by context +func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) { + // Copy will call the Reader and Writer interface multiple time, in order + // to copy by chunk (avoiding loading the whole file in memory). + return io.Copy(dst, readerFunc(func(p []byte) (int, error) { + + select { + case <-ctx.Done(): + // context has been canceled + // stop process and propagate "context canceled" error + return 0, ctx.Err() + default: + // otherwise just run default io.Reader implementation + return src.Read(p) + } + })) +} diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go index c89a2d5a..c3b28ae5 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_unix.go +++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go @@ -4,7 +4,6 @@ package getter import ( "fmt" - "io" "net/url" "os" "path/filepath" @@ -50,6 +49,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { } func (g *FileGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() path := u.Path if u.RawPath != "" { path = u.RawPath @@ -98,6 +98,6 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } defer dstF.Close() - _, err = io.Copy(dstF, srcF) + _, err = Copy(ctx, dstF, srcF) return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go index f87ed0a0..24f1acb1 100644 --- a/vendor/github.com/hashicorp/go-getter/get_file_windows.go +++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go @@ -4,15 +4,16 @@ package getter import ( "fmt" - "io" "net/url" "os" "os/exec" "path/filepath" "strings" + "syscall" ) func (g *FileGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() path := u.Path if u.RawPath != "" { path = u.RawPath @@ -51,7 +52,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { sourcePath := toBackslash(path) // Use mklink to create a junction point - output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() + output, err := exec.CommandContext(ctx, "cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() if err != nil { return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) } @@ -60,6 +61,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error { } func (g *FileGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() path := u.Path if u.RawPath != "" { path = u.RawPath @@ -92,7 +94,21 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { // If we're not copying, just symlink and we're done if !g.Copy { - return os.Symlink(path, dst) + if err = os.Symlink(path, dst); err == nil { + return err + } + lerr, ok := err.(*os.LinkError) + if !ok { + return err + } + switch lerr.Err { + case syscall.ERROR_PRIVILEGE_NOT_HELD: + // no symlink privilege, let's + // fallback to a copy to avoid an error. + break + default: + return err + } } // Copy @@ -108,7 +124,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error { } defer dstF.Close() - _, err = io.Copy(dstF, srcF) + _, err = Copy(ctx, dstF, srcF) return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_gcs.go b/vendor/github.com/hashicorp/go-getter/get_gcs.go new file mode 100644 index 00000000..6faa70f4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/get_gcs.go @@ -0,0 +1,172 @@ +package getter + +import ( + "context" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" +) + +// GCSGetter is a Getter implementation that will download a module from +// a GCS bucket. +type GCSGetter struct { + getter +} + +func (g *GCSGetter) ClientMode(u *url.URL) (ClientMode, error) { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return 0, err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return 0, err + } + iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object}) + for { + obj, err := iter.Next() + if err != nil && err != iterator.Done { + return 0, err + } + + if err == iterator.Done { + break + } + if strings.HasSuffix(obj.Name, "/") { + // A directory matched the prefix search, so this must be a directory + return ClientModeDir, nil + } else if obj.Name != object { + // A file matched the prefix search and doesn't have the same name + // as the query, so this must be a directory + return ClientModeDir, nil + } + } + // There are no directories or subdirectories, and if a match was returned, + // it was exactly equal to the prefix search. So return File mode + return ClientModeFile, nil +} + +func (g *GCSGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return err + } + + // Remove destination if it already exists + _, err = os.Stat(dst) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // Remove the destination + if err := os.RemoveAll(dst); err != nil { + return err + } + } + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return err + } + + // Iterate through all matching objects. + iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object}) + for { + obj, err := iter.Next() + if err != nil && err != iterator.Done { + return err + } + if err == iterator.Done { + break + } + + if !strings.HasSuffix(obj.Name, "/") { + // Get the object destination path + objDst, err := filepath.Rel(object, obj.Name) + if err != nil { + return err + } + objDst = filepath.Join(dst, objDst) + // Download the matching object. + err = g.getObject(ctx, client, objDst, bucket, obj.Name) + if err != nil { + return err + } + } + } + return nil +} + +func (g *GCSGetter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() + + // Parse URL + bucket, object, err := g.parseURL(u) + if err != nil { + return err + } + + client, err := storage.NewClient(ctx) + if err != nil { + return err + } + return g.getObject(ctx, client, dst, bucket, object) +} + +func (g *GCSGetter) getObject(ctx context.Context, client *storage.Client, dst, bucket, object string) error { + rc, err := client.Bucket(bucket).Object(object).NewReader(ctx) + if err != nil { + return err + } + defer rc.Close() + + // Create all the parent directories + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.Create(dst) + if err != nil { + return err + } + defer f.Close() + + _, err = Copy(ctx, f, rc) + return err +} + +func (g *GCSGetter) parseURL(u *url.URL) (bucket, path string, err error) { + if strings.Contains(u.Host, "googleapis.com") { + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid GCS URL") + return + } + + pathParts := strings.SplitN(u.Path, "/", 5) + if len(pathParts) != 5 { + err = fmt.Errorf("URL is not a valid GCS URL") + return + } + bucket = pathParts[3] + path = pathParts[4] + } + return +} diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go index 07281398..67e8b2f4 100644 --- a/vendor/github.com/hashicorp/go-getter/get_git.go +++ b/vendor/github.com/hashicorp/go-getter/get_git.go @@ -1,6 +1,7 @@ package getter import ( + "context" "encoding/base64" "fmt" "io/ioutil" @@ -8,27 +9,43 @@ import ( "os" "os/exec" "path/filepath" + "runtime" + "strconv" "strings" urlhelper "github.com/hashicorp/go-getter/helper/url" - "github.com/hashicorp/go-version" + safetemp "github.com/hashicorp/go-safetemp" + version "github.com/hashicorp/go-version" ) // GitGetter is a Getter implementation that will download a module from // a git repository. -type GitGetter struct{} +type GitGetter struct { + getter +} func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { return ClientModeDir, nil } func (g *GitGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() if _, err := exec.LookPath("git"); err != nil { return fmt.Errorf("git must be available and on the PATH") } + // The port number must be parseable as an integer. If not, the user + // was probably trying to use a scp-style address, in which case the + // ssh:// prefix must be removed to indicate that. + if portStr := u.Port(); portStr != "" { + if _, err := strconv.ParseUint(portStr, 10, 16); err != nil { + return fmt.Errorf("invalid port number %q; if using the \"scp-like\" git address scheme where a colon introduces the path instead, remove the ssh:// portion and use just the git:: prefix", portStr) + } + } + // Extract some query parameters we use var ref, sshKey string + var depth int q := u.Query() if len(q) > 0 { ref = q.Get("ref") @@ -37,6 +54,11 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { sshKey = q.Get("sshkey") q.Del("sshkey") + if n, err := strconv.Atoi(q.Get("depth")); err == nil { + depth = n + } + q.Del("depth") + // Copy the URL var newU url.URL = *u u = &newU @@ -83,9 +105,9 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { return err } if err == nil { - err = g.update(dst, sshKeyFile, ref) + err = g.update(ctx, dst, sshKeyFile, ref, depth) } else { - err = g.clone(dst, sshKeyFile, u) + err = g.clone(ctx, dst, sshKeyFile, u, depth) } if err != nil { return err @@ -99,19 +121,17 @@ func (g *GitGetter) Get(dst string, u *url.URL) error { } // Lastly, download any/all submodules. - return g.fetchSubmodules(dst, sshKeyFile) + return g.fetchSubmodules(ctx, dst, sshKeyFile, depth) } // GetFile for Git doesn't support updating at this time. It will download // the file every time. func (g *GitGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-git") + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(td); err != nil { - return err - } + defer tdcloser.Close() // Get the filename, and strip the filename from the URL so we can // just get the repository directly. @@ -139,16 +159,23 @@ func (g *GitGetter) checkout(dst string, ref string) error { return getRunCommand(cmd) } -func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error { - cmd := exec.Command("git", "clone", u.String(), dst) +func (g *GitGetter) clone(ctx context.Context, dst, sshKeyFile string, u *url.URL, depth int) error { + args := []string{"clone"} + + if depth > 0 { + args = append(args, "--depth", strconv.Itoa(depth)) + } + + args = append(args, u.String(), dst) + cmd := exec.CommandContext(ctx, "git", args...) setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) } -func (g *GitGetter) update(dst, sshKeyFile, ref string) error { +func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, depth int) error { // Determine if we're a branch. If we're NOT a branch, then we just // switch to master prior to checking out - cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref) + cmd := exec.CommandContext(ctx, "git", "show-ref", "-q", "--verify", "refs/heads/"+ref) cmd.Dir = dst if getRunCommand(cmd) != nil { @@ -163,15 +190,24 @@ func (g *GitGetter) update(dst, sshKeyFile, ref string) error { return err } - cmd = exec.Command("git", "pull", "--ff-only") + if depth > 0 { + cmd = exec.Command("git", "pull", "--depth", strconv.Itoa(depth), "--ff-only") + } else { + cmd = exec.Command("git", "pull", "--ff-only") + } + cmd.Dir = dst setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) } // fetchSubmodules downloads any configured submodules recursively. -func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { - cmd := exec.Command("git", "submodule", "update", "--init", "--recursive") +func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, depth int) error { + args := []string{"submodule", "update", "--init", "--recursive"} + if depth > 0 { + args = append(args, "--depth", strconv.Itoa(depth)) + } + cmd := exec.CommandContext(ctx, "git", args...) cmd.Dir = dst setupGitEnv(cmd, sshKeyFile) return getRunCommand(cmd) @@ -180,17 +216,37 @@ func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { // setupGitEnv sets up the environment for the given command. This is used to // pass configuration data to git and ssh and enables advanced cloning methods. func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) { - var sshOpts []string + const gitSSHCommand = "GIT_SSH_COMMAND=" + var sshCmd []string + + // If we have an existing GIT_SSH_COMMAND, we need to append our options. + // We will also remove our old entry to make sure the behavior is the same + // with versions of Go < 1.9. + env := os.Environ() + for i, v := range env { + if strings.HasPrefix(v, gitSSHCommand) && len(v) > len(gitSSHCommand) { + sshCmd = []string{v} + + env[i], env[len(env)-1] = env[len(env)-1], env[i] + env = env[:len(env)-1] + break + } + } + + if len(sshCmd) == 0 { + sshCmd = []string{gitSSHCommand + "ssh"} + } if sshKeyFile != "" { // We have an SSH key temp file configured, tell ssh about this. - sshOpts = append(sshOpts, "-i", sshKeyFile) + if runtime.GOOS == "windows" { + sshKeyFile = strings.Replace(sshKeyFile, `\`, `/`, -1) + } + sshCmd = append(sshCmd, "-i", sshKeyFile) } - cmd.Env = append(os.Environ(), - // Set the ssh command to use for clones. - "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "), - ) + env = append(env, strings.Join(sshCmd, " ")) + cmd.Env = env } // checkGitVersion is used to check the version of git installed on the system @@ -208,11 +264,20 @@ func checkGitVersion(min string) error { } fields := strings.Fields(string(out)) - if len(fields) != 3 { + if len(fields) < 3 { return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) } + v := fields[2] + if runtime.GOOS == "windows" && strings.Contains(v, ".windows.") { + // on windows, git version will return for example: + // git version 2.20.1.windows.1 + // Which does not follow the semantic versionning specs + // https://semver.org. We remove that part in order for + // go-version to not error. + v = v[:strings.Index(v, ".windows.")] + } - have, err := version.NewVersion(fields[2]) + have, err := version.NewVersion(v) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go index 820bdd48..290649c9 100644 --- a/vendor/github.com/hashicorp/go-getter/get_hg.go +++ b/vendor/github.com/hashicorp/go-getter/get_hg.go @@ -1,8 +1,8 @@ package getter import ( + "context" "fmt" - "io/ioutil" "net/url" "os" "os/exec" @@ -10,17 +10,21 @@ import ( "runtime" urlhelper "github.com/hashicorp/go-getter/helper/url" + safetemp "github.com/hashicorp/go-safetemp" ) // HgGetter is a Getter implementation that will download a module from // a Mercurial repository. -type HgGetter struct{} +type HgGetter struct { + getter +} func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { return ClientModeDir, nil } func (g *HgGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() if _, err := exec.LookPath("hg"); err != nil { return fmt.Errorf("hg must be available and on the PATH") } @@ -58,19 +62,19 @@ func (g *HgGetter) Get(dst string, u *url.URL) error { return err } - return g.update(dst, newURL, rev) + return g.update(ctx, dst, newURL, rev) } // GetFile for Hg doesn't support updating at this time. It will download // the file every time. func (g *HgGetter) GetFile(dst string, u *url.URL) error { - td, err := ioutil.TempDir("", "getter-hg") + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - if err := os.RemoveAll(td); err != nil { - return err - } + defer tdcloser.Close() // Get the filename, and strip the filename from the URL so we can // just get the repository directly. @@ -93,7 +97,7 @@ func (g *HgGetter) GetFile(dst string, u *url.URL) error { return err } - fg := &FileGetter{Copy: true} + fg := &FileGetter{Copy: true, getter: g.getter} return fg.GetFile(dst, u) } @@ -108,13 +112,13 @@ func (g *HgGetter) pull(dst string, u *url.URL) error { return getRunCommand(cmd) } -func (g *HgGetter) update(dst string, u *url.URL, rev string) error { +func (g *HgGetter) update(ctx context.Context, dst string, u *url.URL, rev string) error { args := []string{"update"} if rev != "" { args = append(args, rev) } - cmd := exec.Command("hg", args...) + cmd := exec.CommandContext(ctx, "hg", args...) cmd.Dir = dst return getRunCommand(cmd) } diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go index 3c020343..7c4541c6 100644 --- a/vendor/github.com/hashicorp/go-getter/get_http.go +++ b/vendor/github.com/hashicorp/go-getter/get_http.go @@ -1,15 +1,18 @@ package getter import ( + "context" "encoding/xml" "fmt" "io" - "io/ioutil" "net/http" "net/url" "os" "path/filepath" + "strconv" "strings" + + safetemp "github.com/hashicorp/go-safetemp" ) // HttpGetter is a Getter implementation that will download from an HTTP @@ -17,7 +20,7 @@ import ( // // For file downloads, HTTP is used directly. // -// The protocol for downloading a directory from an HTTP endpoing is as follows: +// The protocol for downloading a directory from an HTTP endpoint is as follows: // // An HTTP GET request is made to the URL with the additional GET parameter // "terraform-get=1". This lets you handle that scenario specially if you @@ -33,9 +36,21 @@ import ( // formed URL. The shorthand syntax of "github.com/foo/bar" or relative // paths are not allowed. type HttpGetter struct { + getter + // Netrc, if true, will lookup and use auth information found // in the user's netrc file if available. Netrc bool + + // Client is the http.Client to use for Get requests. + // This defaults to a cleanhttp.DefaultClient if left unset. + Client *http.Client + + // Header contains optional request header fields that should be included + // with every HTTP request. Note that the zero value of this field is nil, + // and as such it needs to be initialized before use, via something like + // make(http.Header). + Header http.Header } func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { @@ -46,6 +61,7 @@ func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { } func (g *HttpGetter) Get(dst string, u *url.URL) error { + ctx := g.Context() // Copy the URL so we can modify it var newU url.URL = *u u = &newU @@ -57,16 +73,27 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { } } + if g.Client == nil { + g.Client = httpClient + } + // Add terraform-get to the parameter. q := u.Query() q.Add("terraform-get", "1") u.RawQuery = q.Encode() // Get the URL - resp, err := http.Get(u.String()) + req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return err } + + req.Header = g.Header + resp, err := g.Client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() if resp.StatusCode < 200 || resp.StatusCode >= 300 { return fmt.Errorf("bad response code: %d", resp.StatusCode) @@ -90,55 +117,131 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error { // into a temporary directory, then copy over the proper subdir. source, subDir := SourceDirSubdir(source) if subDir == "" { - return Get(dst, source) + var opts []ClientOption + if g.client != nil { + opts = g.client.Options + } + return Get(dst, source, opts...) } // We have a subdir, time to jump some hoops - return g.getSubdir(dst, source, subDir) + return g.getSubdir(ctx, dst, source, subDir) } -func (g *HttpGetter) GetFile(dst string, u *url.URL) error { - resp, err := http.Get(u.String()) +func (g *HttpGetter) GetFile(dst string, src *url.URL) error { + ctx := g.Context() + if g.Netrc { + // Add auth from netrc if we can + if err := addAuthFromNetrc(src); err != nil { + return err + } + } + + // Create all the parent directories if needed + if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + return err + } + + f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666)) if err != nil { return err } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return fmt.Errorf("bad response code: %d", resp.StatusCode) + defer f.Close() + + if g.Client == nil { + g.Client = httpClient } - // Create all the parent directories - if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { + var currentFileSize int64 + + // We first make a HEAD request so we can check + // if the server supports range queries. If the server/URL doesn't + // support HEAD requests, we just fall back to GET. + req, err := http.NewRequest("HEAD", src.String(), nil) + if err != nil { return err } + if g.Header != nil { + req.Header = g.Header + } + headResp, err := g.Client.Do(req) + if err == nil && headResp != nil { + headResp.Body.Close() + if headResp.StatusCode == 200 { + // If the HEAD request succeeded, then attempt to set the range + // query if we can. + if headResp.Header.Get("Accept-Ranges") == "bytes" { + if fi, err := f.Stat(); err == nil { + if _, err = f.Seek(0, os.SEEK_END); err == nil { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size())) + currentFileSize = fi.Size() + totalFileSize, _ := strconv.ParseInt(headResp.Header.Get("Content-Length"), 10, 64) + if currentFileSize >= totalFileSize { + // file already present + return nil + } + } + } + } + } + } + req.Method = "GET" - f, err := os.Create(dst) + resp, err := g.Client.Do(req) if err != nil { return err } - defer f.Close() + switch resp.StatusCode { + case http.StatusOK, http.StatusPartialContent: + // all good + default: + resp.Body.Close() + return fmt.Errorf("bad response code: %d", resp.StatusCode) + } + + body := resp.Body - _, err = io.Copy(f, resp.Body) + if g.client != nil && g.client.ProgressListener != nil { + // track download + fn := filepath.Base(src.EscapedPath()) + body = g.client.ProgressListener.TrackProgress(fn, currentFileSize, currentFileSize+resp.ContentLength, resp.Body) + } + defer body.Close() + + n, err := Copy(ctx, f, body) + if err == nil && n < resp.ContentLength { + err = io.ErrShortWrite + } return err } // getSubdir downloads the source into the destination, but with // the proper subdir. -func (g *HttpGetter) getSubdir(dst, source, subDir string) error { - // Create a temporary directory to store the full source - td, err := ioutil.TempDir("", "tf") +func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string) error { + // Create a temporary directory to store the full source. This has to be + // a non-existent directory. + td, tdcloser, err := safetemp.Dir("", "getter") if err != nil { return err } - defer os.RemoveAll(td) + defer tdcloser.Close() + var opts []ClientOption + if g.client != nil { + opts = g.client.Options + } // Download that into the given directory - if err := Get(td, source); err != nil { + if err := Get(td, source, opts...); err != nil { + return err + } + + // Process any globbing + sourcePath, err := SubdirGlob(td, subDir) + if err != nil { return err } // Make sure the subdir path actually exists - sourcePath := filepath.Join(td, subDir) if _, err := os.Stat(sourcePath); err != nil { return fmt.Errorf( "Error downloading %s: %s", source, err) @@ -154,7 +257,7 @@ func (g *HttpGetter) getSubdir(dst, source, subDir string) error { return err } - return copyDir(dst, sourcePath, false) + return copyDir(ctx, dst, sourcePath, false) } // parseMeta looks for the first meta tag in the given reader that diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go index 882e694d..e2a98ea2 100644 --- a/vendor/github.com/hashicorp/go-getter/get_mock.go +++ b/vendor/github.com/hashicorp/go-getter/get_mock.go @@ -6,6 +6,8 @@ import ( // MockGetter is an implementation of Getter that can be used for tests. type MockGetter struct { + getter + // Proxy, if set, will be called after recording the calls below. // If it isn't set, then the *Err values will be returned. Proxy Getter diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go index d3bffeb1..93eeb0b8 100644 --- a/vendor/github.com/hashicorp/go-getter/get_s3.go +++ b/vendor/github.com/hashicorp/go-getter/get_s3.go @@ -1,8 +1,8 @@ package getter import ( + "context" "fmt" - "io" "net/url" "os" "path/filepath" @@ -18,7 +18,9 @@ import ( // S3Getter is a Getter implementation that will download a module from // a S3 bucket. -type S3Getter struct{} +type S3Getter struct { + getter +} func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { // Parse URL @@ -28,7 +30,7 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { } // Create client config - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) @@ -60,6 +62,8 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { } func (g *S3Getter) Get(dst string, u *url.URL) error { + ctx := g.Context() + // Parse URL region, bucket, path, _, creds, err := g.parseUrl(u) if err != nil { @@ -84,7 +88,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { return err } - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) @@ -124,7 +128,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { } objDst = filepath.Join(dst, objDst) - if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil { + if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil { return err } } @@ -134,18 +138,19 @@ func (g *S3Getter) Get(dst string, u *url.URL) error { } func (g *S3Getter) GetFile(dst string, u *url.URL) error { + ctx := g.Context() region, bucket, path, version, creds, err := g.parseUrl(u) if err != nil { return err } - config := g.getAWSConfig(region, creds) + config := g.getAWSConfig(region, u, creds) sess := session.New(config) client := s3.New(sess) - return g.getObject(client, dst, bucket, path, version) + return g.getObject(ctx, client, dst, bucket, path, version) } -func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error { +func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error { req := &s3.GetObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), @@ -170,11 +175,11 @@ func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) er } defer f.Close() - _, err = io.Copy(f, resp.Body) + _, err = Copy(ctx, f, resp.Body) return err } -func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config { +func (g *S3Getter) getAWSConfig(region string, url *url.URL, creds *credentials.Credentials) *aws.Config { conf := &aws.Config{} if creds == nil { // Grab the metadata URL @@ -195,6 +200,14 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) * }) } + if creds != nil { + conf.Endpoint = &url.Host + conf.S3ForcePathStyle = aws.Bool(true) + if url.Scheme == "http" { + conf.DisableSSL = aws.Bool(true) + } + } + conf.Credentials = creds if region != "" { conf.Region = aws.String(region) @@ -204,29 +217,48 @@ func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) * } func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) { - // Expected host style: s3.amazonaws.com. They always have 3 parts, - // although the first may differ if we're accessing a specific region. - hostParts := strings.Split(u.Host, ".") - if len(hostParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } + // This just check whether we are dealing with S3 or + // any other S3 compliant service. S3 has a predictable + // url as others do not + if strings.Contains(u.Host, "amazonaws.com") { + // Expected host style: s3.amazonaws.com. They always have 3 parts, + // although the first may differ if we're accessing a specific region. + hostParts := strings.Split(u.Host, ".") + if len(hostParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } - // Parse the region out of the first part of the host - region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") - if region == "" { - region = "us-east-1" - } + // Parse the region out of the first part of the host + region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3") + if region == "" { + region = "us-east-1" + } - pathParts := strings.SplitN(u.Path, "/", 3) - if len(pathParts) != 3 { - err = fmt.Errorf("URL is not a valid S3 URL") - return - } + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 URL") + return + } + + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") - bucket = pathParts[1] - path = pathParts[2] - version = u.Query().Get("version") + } else { + pathParts := strings.SplitN(u.Path, "/", 3) + if len(pathParts) != 3 { + err = fmt.Errorf("URL is not a valid S3 complaint URL") + return + } + bucket = pathParts[1] + path = pathParts[2] + version = u.Query().Get("version") + region = u.Query().Get("region") + if region == "" { + region = "us-east-1" + } + } _, hasAwsId := u.Query()["aws_access_key_id"] _, hasAwsSecret := u.Query()["aws_access_key_secret"] diff --git a/vendor/github.com/hashicorp/go-getter/go.mod b/vendor/github.com/hashicorp/go-getter/go.mod new file mode 100644 index 00000000..807c0a23 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/go.mod @@ -0,0 +1,22 @@ +module github.com/hashicorp/go-getter + +require ( + cloud.google.com/go v0.36.0 + github.com/aws/aws-sdk-go v1.15.78 + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d + github.com/cheggaaa/pb v1.0.27 + github.com/fatih/color v1.7.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-safetemp v1.0.0 + github.com/hashicorp/go-version v1.1.0 + github.com/mattn/go-colorable v0.0.9 // indirect + github.com/mattn/go-isatty v0.0.4 // indirect + github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/mitchellh/go-homedir v1.0.0 + github.com/mitchellh/go-testing-interface v1.0.0 + github.com/ulikunitz/xz v0.5.5 + golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect + golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 // indirect + google.golang.org/api v0.1.0 + gopkg.in/cheggaaa/pb.v1 v1.0.27 // indirect +) diff --git a/vendor/github.com/hashicorp/go-getter/go.sum b/vendor/github.com/hashicorp/go-getter/go.sum new file mode 100644 index 00000000..0fc50882 --- /dev/null +++ b/vendor/github.com/hashicorp/go-getter/go.sum @@ -0,0 +1,182 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8= +cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/cheggaaa/pb v1.0.27 h1:wIkZHkNfC7R6GI5w7l/PdAdzXzlrbcI3p8OAlnkTsnc= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3 h1:siORttZ36U2R/WjiJuDz8znElWBiAlO9rVt+mqJt0Cc= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 h1:uESlIz09WIHT2I+pasSXcpLYqYK8wHcdCetU3VuMBJE= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 h1:0oC8rFnE+74kEmuHZ46F6KHsMr5Gx2gUQPuNz28iQZM= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 h1:mBVYJnbrXLA/ZCBTCe7PtEgAUP+1bg92qTaFoPHdz+8= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.27 h1:kJdccidYzt3CaHD1crCFTS1hxyhSi059NhOFUf03YFo= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go index 4655226f..4280ec59 100644 --- a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go +++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go @@ -11,19 +11,18 @@ func parse(rawURL string) (*url.URL, error) { // Make sure we're using "/" since URLs are "/"-based. rawURL = filepath.ToSlash(rawURL) + if len(rawURL) > 1 && rawURL[1] == ':' { + // Assume we're dealing with a drive letter. In which case we + // force the 'file' scheme to avoid "net/url" URL.String() prepending + // our url with "./". + rawURL = "file://" + rawURL + } + u, err := url.Parse(rawURL) if err != nil { return nil, err } - if len(rawURL) > 1 && rawURL[1] == ':' { - // Assume we're dealing with a drive letter file path where the drive - // letter has been parsed into the URL Scheme, and the rest of the path - // has been parsed into the URL Path without the leading ':' character. - u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path) - u.Scheme = "" - } - if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { // Assume we're dealing with a drive letter file path where the drive // letter has been parsed into the URL Host. diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go index 4d5ee3cc..dab6d400 100644 --- a/vendor/github.com/hashicorp/go-getter/source.go +++ b/vendor/github.com/hashicorp/go-getter/source.go @@ -1,21 +1,36 @@ package getter import ( + "fmt" + "path/filepath" "strings" ) -// SourceDirSubdir takes a source and returns a tuple of the URL without -// the subdir and the URL with the subdir. +// SourceDirSubdir takes a source URL and returns a tuple of the URL without +// the subdir and the subdir. +// +// ex: +// dom.com/path/?q=p => dom.com/path/?q=p, "" +// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*" +// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2" +// func SourceDirSubdir(src string) (string, string) { - // Calcaulate an offset to avoid accidentally marking the scheme + + // URL might contains another url in query parameters + stop := len(src) + if idx := strings.Index(src, "?"); idx > -1 { + stop = idx + } + + // Calculate an offset to avoid accidentally marking the scheme // as the dir. var offset int - if idx := strings.Index(src, "://"); idx > -1 { + if idx := strings.Index(src[:stop], "://"); idx > -1 { offset = idx + 3 } // First see if we even have an explicit subdir - idx := strings.Index(src[offset:], "//") + idx := strings.Index(src[offset:stop], "//") if idx == -1 { return src, "" } @@ -34,3 +49,27 @@ func SourceDirSubdir(src string) (string, string) { return src, subdir } + +// SubdirGlob returns the actual subdir with globbing processed. +// +// dst should be a destination directory that is already populated (the +// download is complete) and subDir should be the set subDir. If subDir +// is an empty string, this returns an empty string. +// +// The returned path is the full absolute path. +func SubdirGlob(dst, subDir string) (string, error) { + matches, err := filepath.Glob(filepath.Join(dst, subDir)) + if err != nil { + return "", err + } + + if len(matches) == 0 { + return "", fmt.Errorf("subdir %q not found", subDir) + } + + if len(matches) > 1 { + return "", fmt.Errorf("subdir %q matches multiple paths", subDir) + } + + return matches[0], nil +} diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE new file mode 100644 index 00000000..abaf1e45 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md new file mode 100644 index 00000000..9b6845e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/README.md @@ -0,0 +1,148 @@ +# go-hclog + +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[godocs]: https://godoc.org/github.com/hashicorp/go-hclog + +`go-hclog` is a package for Go that provides a simple key/value logging +interface for use in development and production environments. + +It provides logging levels that provide decreased output based upon the +desired amount of output, unlike the standard library `log` package. + +It provides `Printf` style logging of values via `hclog.Fmt()`. + +It provides a human readable output mode for use in development as well as +JSON output mode for production. + +## Stability Note + +While this library is fully open source and HashiCorp will be maintaining it +(since we are and will be making extensive use of it), the API and output +format is subject to minor changes as we fully bake and vet it in our projects. +This notice will be removed once it's fully integrated into our major projects +and no further changes are anticipated. + +## Installation and Docs + +Install using `go get github.com/hashicorp/go-hclog`. + +Full documentation is available at +http://godoc.org/github.com/hashicorp/go-hclog + +## Usage + +### Use the global logger + +```go +hclog.Default().Info("hello world") +``` + +```text +2017-07-05T16:15:55.167-0700 [INFO ] hello world +``` + +(Note timestamps are removed in future examples for brevity.) + +### Create a new logger + +```go +appLogger := hclog.New(&hclog.LoggerOptions{ + Name: "my-app", + Level: hclog.LevelFromString("DEBUG"), +}) +``` + +### Emit an Info level message with 2 key/value pairs + +```go +input := "5.5" +_, err := strconv.ParseInt(input, 10, 32) +if err != nil { + appLogger.Info("Invalid input for ParseInt", "input", input, "error", err) +} +``` + +```text +... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax" +``` + +### Create a new Logger for a major subsystem + +```go +subsystemLogger := appLogger.Named("transport") +subsystemLogger.Info("we are transporting something") +``` + +```text +... [INFO ] my-app.transport: we are transporting something +``` + +Notice that logs emitted by `subsystemLogger` contain `my-app.transport`, +reflecting both the application and subsystem names. + +### Create a new Logger with fixed key/value pairs + +Using `With()` will include a specific key-value pair in all messages emitted +by that logger. + +```go +requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363" +requestLogger := subsystemLogger.With("request", requestID) +requestLogger.Info("we are transporting a request") +``` + +```text +... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363 +``` + +This allows sub Loggers to be context specific without having to thread that +into all the callers. + +### Using `hclog.Fmt()` + +```go +var int totalBandwidth = 200 +appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth)) +``` + +```text +... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s" +``` + +### Use this with code that uses the standard library logger + +If you want to use the standard library's `log.Logger` interface you can wrap +`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use +it with the familiar `Println()`, `Printf()`, etc. For example: + +```go +stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, +}) +// Printf() is provided by stdlib log.Logger interface, not hclog.Logger +stdLogger.Printf("[DEBUG] %+v", stdLogger) +``` + +```text +... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]} +``` + +Alternatively, you may configure the system-wide logger: + +```go +// log the standard logger from 'import "log"' +log.SetOutput(appLogger.Writer(&hclog.StandardLoggerOptions{InferLevels: true})) +log.SetPrefix("") +log.SetFlags(0) + +log.Printf("[DEBUG] %d", 42) +``` + +```text +... [DEBUG] my-app: 42 +``` + +Notice that if `appLogger` is initialized with the `INFO` log level _and_ you +specify `InferLevels: true`, you will not see any output here. You must change +`appLogger` to `DEBUG` to see output. See the docs for more information. diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go new file mode 100644 index 00000000..7815f501 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/context.go @@ -0,0 +1,38 @@ +package hclog + +import ( + "context" +) + +// WithContext inserts a logger into the context and is retrievable +// with FromContext. The optional args can be set with the same syntax as +// Logger.With to set fields on the inserted logger. This will not modify +// the logger argument in-place. +func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context { + // While we could call logger.With even with zero args, we have this + // check to avoid unnecessary allocations around creating a copy of a + // logger. + if len(args) > 0 { + logger = logger.With(args...) + } + + return context.WithValue(ctx, contextKey, logger) +} + +// FromContext returns a logger from the context. This will return L() +// (the default logger) if no logger is found in the context. Therefore, +// this will never return a nil value. +func FromContext(ctx context.Context) Logger { + logger, _ := ctx.Value(contextKey).(Logger) + if logger == nil { + return L() + } + + return logger +} + +// Unexported new type so that our context key never collides with another. +type contextKeyType struct{} + +// contextKey is the key used for the context to store the logger. +var contextKey = contextKeyType{} diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go new file mode 100644 index 00000000..22ebc57d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/global.go @@ -0,0 +1,62 @@ +package hclog + +import ( + "sync" +) + +var ( + protect sync.Once + def Logger + + // DefaultOptions is used to create the Default logger. These are read + // only when the Default logger is created, so set them as soon as the + // process starts. + DefaultOptions = &LoggerOptions{ + Level: DefaultLevel, + Output: DefaultOutput, + } +) + +// Default returns a globally held logger. This can be a good starting +// place, and then you can use .With() and .Name() to create sub-loggers +// to be used in more specific contexts. +// The value of the Default logger can be set via SetDefault() or by +// changing the options in DefaultOptions. +// +// This method is goroutine safe, returning a global from memory, but +// cause should be used if SetDefault() is called it random times +// in the program as that may result in race conditions and an unexpected +// Logger being returned. +func Default() Logger { + protect.Do(func() { + // If SetDefault was used before Default() was called, we need to + // detect that here. + if def == nil { + def = New(DefaultOptions) + } + }) + + return def +} + +// L is a short alias for Default(). +func L() Logger { + return Default() +} + +// SetDefault changes the logger to be returned by Default()and L() +// to the one given. This allows packages to use the default logger +// and have higher level packages change it to match the execution +// environment. It returns any old default if there is one. +// +// NOTE: This is expected to be called early in the program to setup +// a default logger. As such, it does not attempt to make itself +// not racy with regard to the value of the default logger. Ergo +// if it is called in goroutines, you may experience race conditions +// with other goroutines retrieving the default logger. Basically, +// don't do that. +func SetDefault(log Logger) Logger { + old := def + def = log + return old +} diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod new file mode 100644 index 00000000..0d079a65 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.mod @@ -0,0 +1,7 @@ +module github.com/hashicorp/go-hclog + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.2 +) diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum new file mode 100644 index 00000000..e03ee77d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/go.sum @@ -0,0 +1,6 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go new file mode 100644 index 00000000..219656c4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/intlogger.go @@ -0,0 +1,527 @@ +package hclog + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "log" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// TimeFormat to use for logging. This is a version of RFC3339 that contains +// contains millisecond precision +const TimeFormat = "2006-01-02T15:04:05.000Z0700" + +// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json +const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json" + +var ( + _levelToBracket = map[Level]string{ + Debug: "[DEBUG]", + Trace: "[TRACE]", + Info: "[INFO] ", + Warn: "[WARN] ", + Error: "[ERROR]", + } +) + +// Make sure that intLogger is a Logger +var _ Logger = &intLogger{} + +// intLogger is an internal logger implementation. Internal in that it is +// defined entirely by this package. +type intLogger struct { + json bool + caller bool + name string + timeFormat string + + // This is a pointer so that it's shared by any derived loggers, since + // those derived loggers share the bufio.Writer as well. + mutex *sync.Mutex + writer *writer + level *int32 + + implied []interface{} +} + +// New returns a configured logger. +func New(opts *LoggerOptions) Logger { + if opts == nil { + opts = &LoggerOptions{} + } + + output := opts.Output + if output == nil { + output = DefaultOutput + } + + level := opts.Level + if level == NoLevel { + level = DefaultLevel + } + + mutex := opts.Mutex + if mutex == nil { + mutex = new(sync.Mutex) + } + + l := &intLogger{ + json: opts.JSONFormat, + caller: opts.IncludeLocation, + name: opts.Name, + timeFormat: TimeFormat, + mutex: mutex, + writer: newWriter(output), + level: new(int32), + } + + if opts.TimeFormat != "" { + l.timeFormat = opts.TimeFormat + } + + atomic.StoreInt32(l.level, int32(level)) + + return l +} + +// Log a message and a set of key/value pairs if the given level is at +// or more severe that the threshold configured in the Logger. +func (l *intLogger) Log(level Level, msg string, args ...interface{}) { + if level < Level(atomic.LoadInt32(l.level)) { + return + } + + t := time.Now() + + l.mutex.Lock() + defer l.mutex.Unlock() + + if l.json { + l.logJSON(t, level, msg, args...) + } else { + l.log(t, level, msg, args...) + } + + l.writer.Flush(level) +} + +// Cleanup a path by returning the last 2 segments of the path only. +func trimCallerPath(path string) string { + // lovely borrowed from zap + // nb. To make sure we trim the path correctly on Windows too, we + // counter-intuitively need to use '/' and *not* os.PathSeparator here, + // because the path given originates from Go stdlib, specifically + // runtime.Caller() which (as of Mar/17) returns forward slashes even on + // Windows. + // + // See https://github.com/golang/go/issues/3335 + // and https://github.com/golang/go/issues/18151 + // + // for discussion on the issue on Go side. + + // Find the last separator. + idx := strings.LastIndexByte(path, '/') + if idx == -1 { + return path + } + + // Find the penultimate separator. + idx = strings.LastIndexByte(path[:idx], '/') + if idx == -1 { + return path + } + + return path[idx+1:] +} + +// Non-JSON logging format function +func (l *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { + l.writer.WriteString(t.Format(l.timeFormat)) + l.writer.WriteByte(' ') + + s, ok := _levelToBracket[level] + if ok { + l.writer.WriteString(s) + } else { + l.writer.WriteString("[?????]") + } + + if l.caller { + if _, file, line, ok := runtime.Caller(3); ok { + l.writer.WriteByte(' ') + l.writer.WriteString(trimCallerPath(file)) + l.writer.WriteByte(':') + l.writer.WriteString(strconv.Itoa(line)) + l.writer.WriteByte(':') + } + } + + l.writer.WriteByte(' ') + + if l.name != "" { + l.writer.WriteString(l.name) + l.writer.WriteString(": ") + } + + l.writer.WriteString(msg) + + args = append(l.implied, args...) + + var stacktrace CapturedStacktrace + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + stacktrace = cs + } else { + args = append(args, "") + } + } + + l.writer.WriteByte(':') + + FOR: + for i := 0; i < len(args); i = i + 2 { + var ( + val string + raw bool + ) + + switch st := args[i+1].(type) { + case string: + val = st + case int: + val = strconv.FormatInt(int64(st), 10) + case int64: + val = strconv.FormatInt(int64(st), 10) + case int32: + val = strconv.FormatInt(int64(st), 10) + case int16: + val = strconv.FormatInt(int64(st), 10) + case int8: + val = strconv.FormatInt(int64(st), 10) + case uint: + val = strconv.FormatUint(uint64(st), 10) + case uint64: + val = strconv.FormatUint(uint64(st), 10) + case uint32: + val = strconv.FormatUint(uint64(st), 10) + case uint16: + val = strconv.FormatUint(uint64(st), 10) + case uint8: + val = strconv.FormatUint(uint64(st), 10) + case CapturedStacktrace: + stacktrace = st + continue FOR + case Format: + val = fmt.Sprintf(st[0].(string), st[1:]...) + default: + v := reflect.ValueOf(st) + if v.Kind() == reflect.Slice { + val = l.renderSlice(v) + raw = true + } else { + val = fmt.Sprintf("%v", st) + } + } + + l.writer.WriteByte(' ') + l.writer.WriteString(args[i].(string)) + l.writer.WriteByte('=') + + if !raw && strings.ContainsAny(val, " \t\n\r") { + l.writer.WriteByte('"') + l.writer.WriteString(val) + l.writer.WriteByte('"') + } else { + l.writer.WriteString(val) + } + } + } + + l.writer.WriteString("\n") + + if stacktrace != "" { + l.writer.WriteString(string(stacktrace)) + } +} + +func (l *intLogger) renderSlice(v reflect.Value) string { + var buf bytes.Buffer + + buf.WriteRune('[') + + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + + sv := v.Index(i) + + var val string + + switch sv.Kind() { + case reflect.String: + val = sv.String() + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + val = strconv.FormatInt(sv.Int(), 10) + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + val = strconv.FormatUint(sv.Uint(), 10) + default: + val = fmt.Sprintf("%v", sv.Interface()) + } + + if strings.ContainsAny(val, " \t\n\r") { + buf.WriteByte('"') + buf.WriteString(val) + buf.WriteByte('"') + } else { + buf.WriteString(val) + } + } + + buf.WriteRune(']') + + return buf.String() +} + +// JSON logging function +func (l *intLogger) logJSON(t time.Time, level Level, msg string, args ...interface{}) { + vals := l.jsonMapEntry(t, level, msg) + args = append(l.implied, args...) + + if args != nil && len(args) > 0 { + if len(args)%2 != 0 { + cs, ok := args[len(args)-1].(CapturedStacktrace) + if ok { + args = args[:len(args)-1] + vals["stacktrace"] = cs + } else { + args = append(args, "") + } + } + + for i := 0; i < len(args); i = i + 2 { + if _, ok := args[i].(string); !ok { + // As this is the logging function not much we can do here + // without injecting into logs... + continue + } + val := args[i+1] + switch sv := val.(type) { + case error: + // Check if val is of type error. If error type doesn't + // implement json.Marshaler or encoding.TextMarshaler + // then set val to err.Error() so that it gets marshaled + switch sv.(type) { + case json.Marshaler, encoding.TextMarshaler: + default: + val = sv.Error() + } + case Format: + val = fmt.Sprintf(sv[0].(string), sv[1:]...) + } + + vals[args[i].(string)] = val + } + } + + err := json.NewEncoder(l.writer).Encode(vals) + if err != nil { + if _, ok := err.(*json.UnsupportedTypeError); ok { + plainVal := l.jsonMapEntry(t, level, msg) + plainVal["@warn"] = errJsonUnsupportedTypeMsg + + json.NewEncoder(l.writer).Encode(plainVal) + } + } +} + +func (l intLogger) jsonMapEntry(t time.Time, level Level, msg string) map[string]interface{} { + vals := map[string]interface{}{ + "@message": msg, + "@timestamp": t.Format("2006-01-02T15:04:05.000000Z07:00"), + } + + var levelStr string + switch level { + case Error: + levelStr = "error" + case Warn: + levelStr = "warn" + case Info: + levelStr = "info" + case Debug: + levelStr = "debug" + case Trace: + levelStr = "trace" + default: + levelStr = "all" + } + + vals["@level"] = levelStr + + if l.name != "" { + vals["@module"] = l.name + } + + if l.caller { + if _, file, line, ok := runtime.Caller(4); ok { + vals["@caller"] = fmt.Sprintf("%s:%d", file, line) + } + } + return vals +} + +// Emit the message and args at DEBUG level +func (l *intLogger) Debug(msg string, args ...interface{}) { + l.Log(Debug, msg, args...) +} + +// Emit the message and args at TRACE level +func (l *intLogger) Trace(msg string, args ...interface{}) { + l.Log(Trace, msg, args...) +} + +// Emit the message and args at INFO level +func (l *intLogger) Info(msg string, args ...interface{}) { + l.Log(Info, msg, args...) +} + +// Emit the message and args at WARN level +func (l *intLogger) Warn(msg string, args ...interface{}) { + l.Log(Warn, msg, args...) +} + +// Emit the message and args at ERROR level +func (l *intLogger) Error(msg string, args ...interface{}) { + l.Log(Error, msg, args...) +} + +// Indicate that the logger would emit TRACE level logs +func (l *intLogger) IsTrace() bool { + return Level(atomic.LoadInt32(l.level)) == Trace +} + +// Indicate that the logger would emit DEBUG level logs +func (l *intLogger) IsDebug() bool { + return Level(atomic.LoadInt32(l.level)) <= Debug +} + +// Indicate that the logger would emit INFO level logs +func (l *intLogger) IsInfo() bool { + return Level(atomic.LoadInt32(l.level)) <= Info +} + +// Indicate that the logger would emit WARN level logs +func (l *intLogger) IsWarn() bool { + return Level(atomic.LoadInt32(l.level)) <= Warn +} + +// Indicate that the logger would emit ERROR level logs +func (l *intLogger) IsError() bool { + return Level(atomic.LoadInt32(l.level)) <= Error +} + +// Return a sub-Logger for which every emitted log message will contain +// the given key/value pairs. This is used to create a context specific +// Logger. +func (l *intLogger) With(args ...interface{}) Logger { + if len(args)%2 != 0 { + panic("With() call requires paired arguments") + } + + sl := *l + + result := make(map[string]interface{}, len(l.implied)+len(args)) + keys := make([]string, 0, len(l.implied)+len(args)) + + // Read existing args, store map and key for consistent sorting + for i := 0; i < len(l.implied); i += 2 { + key := l.implied[i].(string) + keys = append(keys, key) + result[key] = l.implied[i+1] + } + // Read new args, store map and key for consistent sorting + for i := 0; i < len(args); i += 2 { + key := args[i].(string) + _, exists := result[key] + if !exists { + keys = append(keys, key) + } + result[key] = args[i+1] + } + + // Sort keys to be consistent + sort.Strings(keys) + + sl.implied = make([]interface{}, 0, len(l.implied)+len(args)) + for _, k := range keys { + sl.implied = append(sl.implied, k) + sl.implied = append(sl.implied, result[k]) + } + + return &sl +} + +// Create a new sub-Logger that a name decending from the current name. +// This is used to create a subsystem specific Logger. +func (l *intLogger) Named(name string) Logger { + sl := *l + + if sl.name != "" { + sl.name = sl.name + "." + name + } else { + sl.name = name + } + + return &sl +} + +// Create a new sub-Logger with an explicit name. This ignores the current +// name. This is used to create a standalone logger that doesn't fall +// within the normal hierarchy. +func (l *intLogger) ResetNamed(name string) Logger { + sl := *l + + sl.name = name + + return &sl +} + +// Update the logging level on-the-fly. This will affect all subloggers as +// well. +func (l *intLogger) SetLevel(level Level) { + atomic.StoreInt32(l.level, int32(level)) +} + +// Create a *log.Logger that will send it's data through this Logger. This +// allows packages that expect to be using the standard library log to actually +// use this logger. +func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + if opts == nil { + opts = &StandardLoggerOptions{} + } + + return log.New(l.StandardWriter(opts), "", 0) +} + +func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return &stdlogAdapter{ + log: l, + inferLevels: opts.InferLevels, + forceLevel: opts.ForceLevel, + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go new file mode 100644 index 00000000..080ed799 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/logger.go @@ -0,0 +1,176 @@ +package hclog + +import ( + "io" + "log" + "os" + "strings" + "sync" +) + +var ( + //DefaultOutput is used as the default log output. + DefaultOutput io.Writer = os.Stderr + + // DefaultLevel is used as the default log level. + DefaultLevel = Info +) + +// Level represents a log level. +type Level int32 + +const ( + // NoLevel is a special level used to indicate that no level has been + // set and allow for a default to be used. + NoLevel Level = 0 + + // Trace is the most verbose level. Intended to be used for the tracing + // of actions in code, such as function enters/exits, etc. + Trace Level = 1 + + // Debug information for programmer lowlevel analysis. + Debug Level = 2 + + // Info information about steady state operations. + Info Level = 3 + + // Warn information about rare but handled events. + Warn Level = 4 + + // Error information about unrecoverable events. + Error Level = 5 +) + +// Format is a simple convience type for when formatting is required. When +// processing a value of this type, the logger automatically treats the first +// argument as a Printf formatting string and passes the rest as the values +// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}). +type Format []interface{} + +// Fmt returns a Format type. This is a convience function for creating a Format +// type. +func Fmt(str string, args ...interface{}) Format { + return append(Format{str}, args...) +} + +// LevelFromString returns a Level type for the named log level, or "NoLevel" if +// the level string is invalid. This facilitates setting the log level via +// config or environment variable by name in a predictable way. +func LevelFromString(levelStr string) Level { + // We don't care about case. Accept both "INFO" and "info". + levelStr = strings.ToLower(strings.TrimSpace(levelStr)) + switch levelStr { + case "trace": + return Trace + case "debug": + return Debug + case "info": + return Info + case "warn": + return Warn + case "error": + return Error + default: + return NoLevel + } +} + +// Logger describes the interface that must be implemeted by all loggers. +type Logger interface { + // Args are alternating key, val pairs + // keys must be strings + // vals can be any type, but display is implementation specific + // Emit a message and key/value pairs at the TRACE level + Trace(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the DEBUG level + Debug(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the INFO level + Info(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the WARN level + Warn(msg string, args ...interface{}) + + // Emit a message and key/value pairs at the ERROR level + Error(msg string, args ...interface{}) + + // Indicate if TRACE logs would be emitted. This and the other Is* guards + // are used to elide expensive logging code based on the current level. + IsTrace() bool + + // Indicate if DEBUG logs would be emitted. This and the other Is* guards + IsDebug() bool + + // Indicate if INFO logs would be emitted. This and the other Is* guards + IsInfo() bool + + // Indicate if WARN logs would be emitted. This and the other Is* guards + IsWarn() bool + + // Indicate if ERROR logs would be emitted. This and the other Is* guards + IsError() bool + + // Creates a sublogger that will always have the given key/value pairs + With(args ...interface{}) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // If the logger already has a name, the new value will be appended to the current + // name. That way, a major subsystem can use this to decorate all it's own logs + // without losing context. + Named(name string) Logger + + // Create a logger that will prepend the name string on the front of all messages. + // This sets the name of the logger to the value directly, unlike Named which honor + // the current name as well. + ResetNamed(name string) Logger + + // Updates the level. This should affect all sub-loggers as well. If an + // implementation cannot update the level on the fly, it should no-op. + SetLevel(level Level) + + // Return a value that conforms to the stdlib log.Logger interface + StandardLogger(opts *StandardLoggerOptions) *log.Logger + + // Return a value that conforms to io.Writer, which can be passed into log.SetOutput() + StandardWriter(opts *StandardLoggerOptions) io.Writer +} + +// StandardLoggerOptions can be used to configure a new standard logger. +type StandardLoggerOptions struct { + // Indicate that some minimal parsing should be done on strings to try + // and detect their level and re-emit them. + // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO], + // [DEBUG] and strip it off before reapplying it. + InferLevels bool + + // ForceLevel is used to force all output from the standard logger to be at + // the specified level. Similar to InferLevels, this will strip any level + // prefix contained in the logged string before applying the forced level. + // If set, this override InferLevels. + ForceLevel Level +} + +// LoggerOptions can be used to configure a new logger. +type LoggerOptions struct { + // Name of the subsystem to prefix logs with + Name string + + // The threshold for the logger. Anything less severe is supressed + Level Level + + // Where to write the logs to. Defaults to os.Stderr if nil + Output io.Writer + + // An optional mutex pointer in case Output is shared + Mutex *sync.Mutex + + // Control if the output should be in JSON. + JSONFormat bool + + // Include file and line information in each log line + IncludeLocation bool + + // The time format to use instead of the default + TimeFormat string +} diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go new file mode 100644 index 00000000..7ad6b351 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go @@ -0,0 +1,52 @@ +package hclog + +import ( + "io" + "io/ioutil" + "log" +) + +// NewNullLogger instantiates a Logger for which all calls +// will succeed without doing anything. +// Useful for testing purposes. +func NewNullLogger() Logger { + return &nullLogger{} +} + +type nullLogger struct{} + +func (l *nullLogger) Trace(msg string, args ...interface{}) {} + +func (l *nullLogger) Debug(msg string, args ...interface{}) {} + +func (l *nullLogger) Info(msg string, args ...interface{}) {} + +func (l *nullLogger) Warn(msg string, args ...interface{}) {} + +func (l *nullLogger) Error(msg string, args ...interface{}) {} + +func (l *nullLogger) IsTrace() bool { return false } + +func (l *nullLogger) IsDebug() bool { return false } + +func (l *nullLogger) IsInfo() bool { return false } + +func (l *nullLogger) IsWarn() bool { return false } + +func (l *nullLogger) IsError() bool { return false } + +func (l *nullLogger) With(args ...interface{}) Logger { return l } + +func (l *nullLogger) Named(name string) Logger { return l } + +func (l *nullLogger) ResetNamed(name string) Logger { return l } + +func (l *nullLogger) SetLevel(level Level) {} + +func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger { + return log.New(l.StandardWriter(opts), "", log.LstdFlags) +} + +func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer { + return ioutil.Discard +} diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go new file mode 100644 index 00000000..9b27bd3d --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stacktrace.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package hclog + +import ( + "bytes" + "runtime" + "strconv" + "strings" + "sync" +) + +var ( + _stacktraceIgnorePrefixes = []string{ + "runtime.goexit", + "runtime.main", + } + _stacktracePool = sync.Pool{ + New: func() interface{} { + return newProgramCounters(64) + }, + } +) + +// CapturedStacktrace represents a stacktrace captured by a previous call +// to log.Stacktrace. If passed to a logging function, the stacktrace +// will be appended. +type CapturedStacktrace string + +// Stacktrace captures a stacktrace of the current goroutine and returns +// it to be passed to a logging function. +func Stacktrace() CapturedStacktrace { + return CapturedStacktrace(takeStacktrace()) +} + +func takeStacktrace() string { + programCounters := _stacktracePool.Get().(*programCounters) + defer _stacktracePool.Put(programCounters) + + var buffer bytes.Buffer + + for { + // Skip the call to runtime.Counters and takeStacktrace so that the + // program counters start at the caller of takeStacktrace. + n := runtime.Callers(2, programCounters.pcs) + if n < cap(programCounters.pcs) { + programCounters.pcs = programCounters.pcs[:n] + break + } + // Don't put the too-short counter slice back into the pool; this lets + // the pool adjust if we consistently take deep stacktraces. + programCounters = newProgramCounters(len(programCounters.pcs) * 2) + } + + i := 0 + frames := runtime.CallersFrames(programCounters.pcs) + for frame, more := frames.Next(); more; frame, more = frames.Next() { + if shouldIgnoreStacktraceFunction(frame.Function) { + continue + } + if i != 0 { + buffer.WriteByte('\n') + } + i++ + buffer.WriteString(frame.Function) + buffer.WriteByte('\n') + buffer.WriteByte('\t') + buffer.WriteString(frame.File) + buffer.WriteByte(':') + buffer.WriteString(strconv.Itoa(int(frame.Line))) + } + + return buffer.String() +} + +func shouldIgnoreStacktraceFunction(function string) bool { + for _, prefix := range _stacktraceIgnorePrefixes { + if strings.HasPrefix(function, prefix) { + return true + } + } + return false +} + +type programCounters struct { + pcs []uintptr +} + +func newProgramCounters(size int) *programCounters { + return &programCounters{make([]uintptr, size)} +} diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go new file mode 100644 index 00000000..044a4696 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/stdlog.go @@ -0,0 +1,83 @@ +package hclog + +import ( + "bytes" + "strings" +) + +// Provides a io.Writer to shim the data out of *log.Logger +// and back into our Logger. This is basically the only way to +// build upon *log.Logger. +type stdlogAdapter struct { + log Logger + inferLevels bool + forceLevel Level +} + +// Take the data, infer the levels if configured, and send it through +// a regular Logger. +func (s *stdlogAdapter) Write(data []byte) (int, error) { + str := string(bytes.TrimRight(data, " \t\n")) + + if s.forceLevel != NoLevel { + // Use pickLevel to strip log levels included in the line since we are + // forcing the level + _, str := s.pickLevel(str) + + // Log at the forced level + switch s.forceLevel { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } + } else if s.inferLevels { + level, str := s.pickLevel(str) + switch level { + case Trace: + s.log.Trace(str) + case Debug: + s.log.Debug(str) + case Info: + s.log.Info(str) + case Warn: + s.log.Warn(str) + case Error: + s.log.Error(str) + default: + s.log.Info(str) + } + } else { + s.log.Info(str) + } + + return len(data), nil +} + +// Detect, based on conventions, what log level this is. +func (s *stdlogAdapter) pickLevel(str string) (Level, string) { + switch { + case strings.HasPrefix(str, "[DEBUG]"): + return Debug, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[TRACE]"): + return Trace, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[INFO]"): + return Info, strings.TrimSpace(str[6:]) + case strings.HasPrefix(str, "[WARN]"): + return Warn, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERROR]"): + return Error, strings.TrimSpace(str[7:]) + case strings.HasPrefix(str, "[ERR]"): + return Error, strings.TrimSpace(str[5:]) + default: + return Info, str + } +} diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go new file mode 100644 index 00000000..7e8ec729 --- /dev/null +++ b/vendor/github.com/hashicorp/go-hclog/writer.go @@ -0,0 +1,74 @@ +package hclog + +import ( + "bytes" + "io" +) + +type writer struct { + b bytes.Buffer + w io.Writer +} + +func newWriter(w io.Writer) *writer { + return &writer{w: w} +} + +func (w *writer) Flush(level Level) (err error) { + if lw, ok := w.w.(LevelWriter); ok { + _, err = lw.LevelWrite(level, w.b.Bytes()) + } else { + _, err = w.w.Write(w.b.Bytes()) + } + w.b.Reset() + return err +} + +func (w *writer) Write(p []byte) (int, error) { + return w.b.Write(p) +} + +func (w *writer) WriteByte(c byte) error { + return w.b.WriteByte(c) +} + +func (w *writer) WriteString(s string) (int, error) { + return w.b.WriteString(s) +} + +// LevelWriter is the interface that wraps the LevelWrite method. +type LevelWriter interface { + LevelWrite(level Level, p []byte) (n int, err error) +} + +// LeveledWriter writes all log messages to the standard writer, +// except for log levels that are defined in the overrides map. +type LeveledWriter struct { + standard io.Writer + overrides map[Level]io.Writer +} + +// NewLeveledWriter returns an initialized LeveledWriter. +// +// standard will be used as the default writer for all log levels, +// except for log levels that are defined in the overrides map. +func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter { + return &LeveledWriter{ + standard: standard, + overrides: overrides, + } +} + +// Write implements io.Writer. +func (lw *LeveledWriter) Write(p []byte) (int, error) { + return lw.standard.Write(p) +} + +// LevelWrite implements LevelWriter. +func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) { + w, ok := lw.overrides[level] + if !ok { + w = lw.standard + } + return w.Write(p) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md new file mode 100644 index 00000000..a967ae45 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -0,0 +1,9 @@ +# UNRELEASED + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 00000000..aca15a64 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,66 @@ +go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if key >= "050" { + break + } + fmt.Println(key) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 00000000..a6367477 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.mod b/vendor/github.com/hashicorp/go-immutable-radix/go.mod new file mode 100644 index 00000000..27e7b7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/go-immutable-radix + +require ( + github.com/hashicorp/go-uuid v1.0.0 + github.com/hashicorp/golang-lru v0.5.0 +) diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.sum b/vendor/github.com/hashicorp/go-immutable-radix/go.sum new file mode 100644 index 00000000..7de5dfc5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 00000000..e5e6e57f --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,662 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 00000000..1ecaf831 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,188 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator) recurseMin(n *Node) *Node { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + if len(n.edges) > 0 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. + i.stack = []edges{} + n := i.node + search := key + + found := func(n *Node) { + i.node = n + i.stack = append(i.stack, edges{edge{node: n}}) + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + n = i.recurseMin(n) + if n != nil { + found(n) + } + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf we're done. + if n.leaf != nil { + if bytes.Compare(n.leaf.key, key) < 0 { + i.node = nil + return + } + found(n) + return + } + + // Consume the search prefix + if len(n.prefix) > len(search) { + search = []byte{} + } else { + search = search[len(n.prefix):] + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + i.node = nil + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + i.node = lbNode + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 00000000..3ab904ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,304 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 00000000..04814c13 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile new file mode 100644 index 00000000..b97cd6ed --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/Makefile @@ -0,0 +1,31 @@ +TEST?=./... + +default: test + +# test runs the test suite and vets the code. +test: generate + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} + +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "/vendor/" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. +updatedeps: + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" + +# generate runs `go generate` to build the dynamically generated source files. +generate: + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "/vendor/" \ + | xargs -n1 go generate + +.PHONY: default test testrace updatedeps generate diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md index e81be50e..ead5830f 100644 --- a/vendor/github.com/hashicorp/go-multierror/README.md +++ b/vendor/github.com/hashicorp/go-multierror/README.md @@ -1,5 +1,11 @@ # go-multierror +[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: https://travis-ci.org/hashicorp/go-multierror +[godocs]: https://godoc.org/github.com/hashicorp/go-multierror + `go-multierror` is a package for Go that provides a mechanism for representing a list of `error` values as a single `error`. diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go index 00afa9b3..775b6e75 100644 --- a/vendor/github.com/hashicorp/go-multierror/append.go +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -18,9 +18,13 @@ func Append(err error, errs ...error) *Error { for _, e := range errs { switch e := e.(type) { case *Error: - err.Errors = append(err.Errors, e.Errors...) + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } default: - err.Errors = append(err.Errors, e) + if e != nil { + err.Errors = append(err.Errors, e) + } } } diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go index bb65a12e..47f13c49 100644 --- a/vendor/github.com/hashicorp/go-multierror/format.go +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -12,12 +12,16 @@ type ErrorFormatFunc func([]error) string // ListFormatFunc is a basic formatter that outputs the number of errors // that occurred along with a bullet point list of the errors. func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0]) + } + points := make([]string, len(es)) for i, err := range es { points[i] = fmt.Sprintf("* %s", err) } return fmt.Sprintf( - "%d error(s) occurred:\n\n%s", - len(es), strings.Join(points, "\n")) + "%d errors occurred:\n\t%s\n\n", + len(es), strings.Join(points, "\n\t")) } diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod new file mode 100644 index 00000000..2534331d --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-multierror + +require github.com/hashicorp/errwrap v1.0.0 diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum new file mode 100644 index 00000000..85b1f8ff --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go index 2ea08273..89b1422d 100644 --- a/vendor/github.com/hashicorp/go-multierror/multierror.go +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -40,11 +40,11 @@ func (e *Error) GoString() string { } // WrappedErrors returns the list of errors that this Error is wrapping. -// It is an implementatin of the errwrap.Wrapper interface so that +// It is an implementation of the errwrap.Wrapper interface so that // multierror.Error can be used with that library. // // This method is not safe to be called concurrently and is no different -// than accessing the Errors field directly. It is implementd only to +// than accessing the Errors field directly. It is implemented only to // satisfy the errwrap.Wrapper interface. func (e *Error) WrappedErrors() []error { return e.Errors diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go new file mode 100644 index 00000000..fecb14e8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/sort.go @@ -0,0 +1,16 @@ +package multierror + +// Len implements sort.Interface function for length +func (err Error) Len() int { + return len(err.Errors) +} + +// Swap implements sort.Interface function for swapping elements +func (err Error) Swap(i, j int) { + err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i] +} + +// Less implements sort.Interface function for determining order +func (err Error) Less(i, j int) bool { + return err.Errors[i].Error() < err.Errors[j].Error() +} diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md index 2058cfb6..fe305ad5 100644 --- a/vendor/github.com/hashicorp/go-plugin/README.md +++ b/vendor/github.com/hashicorp/go-plugin/README.md @@ -1,10 +1,9 @@ # Go Plugin System over RPC `go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system -that has been in use by HashiCorp tooling for over 3 years. While initially -created for [Packer](https://www.packer.io), it has since been used by -[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io), -with plans to also use it for [Nomad](https://www.nomadproject.io) and +that has been in use by HashiCorp tooling for over 4 years. While initially +created for [Packer](https://www.packer.io), it is additionally in use by +[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io), and [Vault](https://www.vaultproject.io). While the plugin system is over RPC, it is currently only designed to work @@ -24,6 +23,11 @@ interface as if it were going to run in the same process. For a plugin user: you just use and call functions on an interface as if it were in the same process. This plugin system handles the communication in between. +**Cross-language support.** Plugins can be written (and consumed) by +almost every major language. This library supports serving plugins via +[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written +in any language. + **Complex arguments and return values are supported.** This library provides APIs for handling complex arguments and return values such as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library @@ -37,7 +41,10 @@ and the plugin can call back into the host process. **Built-in Logging.** Any plugins that use the `log` standard library will have log data automatically sent to the host process. The host process will mirror this output prefixed with the path to the plugin -binary. This makes debugging with plugins simple. +binary. This makes debugging with plugins simple. If the host system +uses [hclog](https://github.com/hashicorp/go-hclog) then the log data +will be structured. If the plugin also uses hclog, logs from the plugin +will be sent to the host hclog and be structured. **Protocol Versioning.** A very basic "protocol version" is supported that can be incremented to invalidate any previous plugins. This is useful when @@ -62,13 +69,18 @@ This requires the host/plugin to know this is possible and daemonize properly. `NewClient` takes a `ReattachConfig` to determine if and how to reattach. +**Cryptographically Secure Plugins.** Plugins can be verified with an expected +checksum and RPC communications can be configured to use TLS. The host process +must be properly secured to protect this configuration. + ## Architecture The HashiCorp plugin system works by launching subprocesses and communicating -over RPC (using standard `net/rpc`). A single connection is made between -any plugin and the host process, and we use a -[connection multiplexing](https://github.com/hashicorp/yamux) -library to multiplex any other connections on top. +over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single +connection is made between any plugin and the host process. For net/rpc-based +plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux) +library to multiplex any other connections on top. For gRPC-based plugins, +the HTTP2 protocol handles multiplexing. This architecture has a number of benefits: @@ -76,8 +88,8 @@ This architecture has a number of benefits: panic the plugin user. * Plugins are very easy to write: just write a Go application and `go build`. - Theoretically you could also use another language as long as it can - communicate the Go `net/rpc` protocol but this hasn't yet been tried. + Or use any other language to write a gRPC server with a tiny amount of + boilerplate to support go-plugin. * Plugins are very easy to install: just put the binary in a location where the host will find it (depends on the host but this library also provides @@ -85,8 +97,8 @@ This architecture has a number of benefits: * Plugins can be relatively secure: The plugin only has access to the interfaces and args given to it, not to the entire memory space of the - process. More security features are planned (see the coming soon section - below). + process. Additionally, go-plugin can communicate with the plugin over + TLS. ## Usage @@ -97,10 +109,9 @@ high-level steps that must be done. Examples are available in the 1. Choose the interface(s) you want to expose for plugins. 2. For each interface, implement an implementation of that interface - that communicates over an `*rpc.Client` (from the standard `net/rpc` - package) for every function call. Likewise, implement the RPC server - struct this communicates to which is then communicating to a real, - concrete implementation. + that communicates over a `net/rpc` connection or over a + [gRPC](http://www.grpc.io) connection or both. You'll have to implement + both a client and server implementation. 3. Create a `Plugin` implementation that knows how to create the RPC client/server for a given plugin type. @@ -125,10 +136,6 @@ improvements we can make. At this point in time, the roadmap for the plugin system is: -**Cryptographically Secure Plugins.** We'll implement signing plugins -and loading signed plugins in order to allow Vault to make use of multi-process -in a secure way. - **Semantic Versioning.** Plugins will be able to implement a semantic version. This plugin system will give host processes a system for constraining versions. This is in addition to the protocol versioning already present @@ -143,19 +150,19 @@ user experience. When we started using plugins (late 2012, early 2013), plugins over RPC were the only option since Go didn't support dynamic library loading. Today, -Go still doesn't support dynamic library loading, but they do intend to. -Since 2012, our plugin system has stabilized from millions of users using it, -and has many benefits we've come to value greatly. - -For example, we intend to use this plugin system in -[Vault](https://www.vaultproject.io), and dynamic library loading will -simply never be acceptable in Vault for security reasons. That is an extreme +Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with +a number of limitations. Since 2012, our plugin system has stabilized +from tens of millions of users using it, and has many benefits we've come to +value greatly. + +For example, we use this plugin system in +[Vault](https://www.vaultproject.io) where dynamic library loading is +not acceptable for security reasons. That is an extreme example, but we believe our library system has more upsides than downsides over dynamic library loading and since we've had it built and tested for years, -we'll likely continue to use it. +we'll continue to use it. Shared libraries have one major advantage over our system which is much higher performance. In real world scenarios across our various tools, we've never required any more performance out of our plugin system and it has seen very high throughput, so this isn't a concern for us at the moment. - diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go index 9f8a0f27..bc56559c 100644 --- a/vendor/github.com/hashicorp/go-plugin/client.go +++ b/vendor/github.com/hashicorp/go-plugin/client.go @@ -2,11 +2,16 @@ package plugin import ( "bufio" + "context" + "crypto/subtle" + "crypto/tls" + "crypto/x509" + "encoding/base64" "errors" "fmt" + "hash" "io" "io/ioutil" - "log" "net" "os" "os/exec" @@ -16,7 +21,8 @@ import ( "sync" "sync/atomic" "time" - "unicode" + + hclog "github.com/hashicorp/go-hclog" ) // If this is 1, then we've called CleanupClients. This can be used @@ -35,6 +41,22 @@ var ( // ErrProcessNotFound is returned when a client is instantiated to // reattach to an existing process and it isn't found. ErrProcessNotFound = errors.New("Reattachment process not found") + + // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match + // the one provided in the SecureConfig. + ErrChecksumsDoNotMatch = errors.New("checksums did not match") + + // ErrSecureNoChecksum is returned when an empty checksum is provided to the + // SecureConfig. + ErrSecureConfigNoChecksum = errors.New("no checksum provided") + + // ErrSecureNoHash is returned when a nil Hash object is provided to the + // SecureConfig. + ErrSecureConfigNoHash = errors.New("no hash implementation provided") + + // ErrSecureConfigAndReattach is returned when both Reattach and + // SecureConfig are set. + ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") ) // Client handles the lifecycle of a plugin application. It launches @@ -49,13 +71,35 @@ var ( // // See NewClient and ClientConfig for using a Client. type Client struct { - config *ClientConfig - exited bool - doneLogging chan struct{} - l sync.Mutex - address net.Addr - process *os.Process - client *RPCClient + config *ClientConfig + exited bool + l sync.Mutex + address net.Addr + process *os.Process + client ClientProtocol + protocol Protocol + logger hclog.Logger + doneCtx context.Context + ctxCancel context.CancelFunc + negotiatedVersion int + + // clientWaitGroup is used to manage the lifecycle of the plugin management + // goroutines. + clientWaitGroup sync.WaitGroup + + // stderrWaitGroup is used to prevent the command's Wait() function from + // being called before we've finished reading from the stderr pipe. + stderrWaitGroup sync.WaitGroup + + // processKilled is used for testing only, to flag when the process was + // forcefully killed. + processKilled bool +} + +// NegotiatedVersion returns the protocol version negotiated with the server. +// This is only valid after Start() is called. +func (c *Client) NegotiatedVersion() int { + return c.negotiatedVersion } // ClientConfig is the configuration used to initialize a new @@ -66,7 +110,13 @@ type ClientConfig struct { HandshakeConfig // Plugins are the plugins that can be consumed. - Plugins map[string]Plugin + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet // One of the following must be set, but not both. // @@ -79,6 +129,13 @@ type ClientConfig struct { Cmd *exec.Cmd Reattach *ReattachConfig + // SecureConfig is configuration for verifying the integrity of the + // executable. It can not be used with Reattach. + SecureConfig *SecureConfig + + // TLSConfig is used to enable TLS on the RPC client. + TLSConfig *tls.Config + // Managed represents if the client should be managed by the // plugin package or not. If true, then by calling CleanupClients, // it will automatically be cleaned up. Otherwise, the client @@ -109,14 +166,97 @@ type ClientConfig struct { // sync any of these streams. SyncStdout io.Writer SyncStderr io.Writer + + // AllowedProtocols is a list of allowed protocols. If this isn't set, + // then only netrpc is allowed. This is so that older go-plugin systems + // can show friendly errors if they see a plugin with an unknown + // protocol. + // + // By setting this, you can cause an error immediately on plugin start + // if an unsupported protocol is used with a good error message. + // + // If this isn't set at all (nil value), then only net/rpc is accepted. + // This is done for legacy reasons. You must explicitly opt-in to + // new protocols. + AllowedProtocols []Protocol + + // Logger is the logger that the client will used. If none is provided, + // it will default to hclog's default logger. + Logger hclog.Logger + + // AutoMTLS has the client and server automatically negotiate mTLS for + // transport authentication. This ensures that only the original client will + // be allowed to connect to the server, and all other connections will be + // rejected. The client will also refuse to connect to any server that isn't + // the original instance started by the client. + // + // In this mode of operation, the client generates a one-time use tls + // certificate, sends the public x.509 certificate to the new server, and + // the server generates a one-time use tls certificate, and sends the public + // x.509 certificate back to the client. These are used to authenticate all + // rpc connections between the client and server. + // + // Setting AutoMTLS to true implies that the server must support the + // protocol, and correctly negotiate the tls certificates, or a connection + // failure will result. + // + // The client should not set TLSConfig, nor should the server set a + // TLSProvider, because AutoMTLS implies that a new certificate and tls + // configuration will be generated at startup. + // + // You cannot Reattach to a server with this option enabled. + AutoMTLS bool } // ReattachConfig is used to configure a client to reattach to an // already-running plugin process. You can retrieve this information by // calling ReattachConfig on Client. type ReattachConfig struct { - Addr net.Addr - Pid int + Protocol Protocol + Addr net.Addr + Pid int +} + +// SecureConfig is used to configure a client to verify the integrity of an +// executable before running. It does this by verifying the checksum is +// expected. Hash is used to specify the hashing method to use when checksumming +// the file. The configuration is verified by the client by calling the +// SecureConfig.Check() function. +// +// The host process should ensure the checksum was provided by a trusted and +// authoritative source. The binary should be installed in such a way that it +// can not be modified by an unauthorized user between the time of this check +// and the time of execution. +type SecureConfig struct { + Checksum []byte + Hash hash.Hash +} + +// Check takes the filepath to an executable and returns true if the checksum of +// the file matches the checksum provided in the SecureConfig. +func (s *SecureConfig) Check(filePath string) (bool, error) { + if len(s.Checksum) == 0 { + return false, ErrSecureConfigNoChecksum + } + + if s.Hash == nil { + return false, ErrSecureConfigNoHash + } + + file, err := os.Open(filePath) + if err != nil { + return false, err + } + defer file.Close() + + _, err = io.Copy(s.Hash, file) + if err != nil { + return false, err + } + + sum := s.Hash.Sum(nil) + + return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil } // This makes sure all the managed subprocesses are killed and properly @@ -142,7 +282,6 @@ func CleanupClients() { } managedClientsLock.Unlock() - log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...") wg.Wait() } @@ -174,7 +313,22 @@ func NewClient(config *ClientConfig) (c *Client) { config.SyncStderr = ioutil.Discard } - c = &Client{config: config} + if config.AllowedProtocols == nil { + config.AllowedProtocols = []Protocol{ProtocolNetRPC} + } + + if config.Logger == nil { + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: hclog.DefaultOutput, + Level: hclog.Trace, + Name: "plugin", + }) + } + + c = &Client{ + config: config, + logger: config.Logger, + } if config.Managed { managedClientsLock.Lock() managedClients = append(managedClients, c) @@ -184,11 +338,11 @@ func NewClient(config *ClientConfig) (c *Client) { return } -// Client returns an RPC client for the plugin. +// Client returns the protocol client for this connection. // -// Subsequent calls to this will return the same RPC client. -func (c *Client) Client() (*RPCClient, error) { - addr, err := c.Start() +// Subsequent calls to this will return the same client. +func (c *Client) Client() (ClientProtocol, error) { + _, err := c.Start() if err != nil { return nil, err } @@ -200,29 +354,18 @@ func (c *Client) Client() (*RPCClient, error) { return c.client, nil } - // Connect to the client - conn, err := net.Dial(addr.Network(), addr.String()) - if err != nil { - return nil, err - } - if tcpConn, ok := conn.(*net.TCPConn); ok { - // Make sure to set keep alive so that the connection doesn't die - tcpConn.SetKeepAlive(true) - } + switch c.protocol { + case ProtocolNetRPC: + c.client, err = newRPCClient(c) - // Create the actual RPC client - c.client, err = NewRPCClient(conn, c.config.Plugins) - if err != nil { - conn.Close() - return nil, err + case ProtocolGRPC: + c.client, err = newGRPCClient(c.doneCtx, c) + + default: + return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) } - // Begin the stream syncing so that stdin, out, err work properly - err = c.client.SyncStreams( - c.config.SyncStdout, - c.config.SyncStderr) if err != nil { - c.client.Close() c.client = nil return nil, err } @@ -237,6 +380,14 @@ func (c *Client) Exited() bool { return c.exited } +// killed is used in tests to check if a process failed to exit gracefully, and +// needed to be killed. +func (c *Client) killed() bool { + c.l.Lock() + defer c.l.Unlock() + return c.processKilled +} + // End the executing subprocess (if it is running) and perform any cleanup // tasks necessary such as capturing any remaining logs and so on. // @@ -248,14 +399,24 @@ func (c *Client) Kill() { c.l.Lock() process := c.process addr := c.address - doneCh := c.doneLogging c.l.Unlock() - // If there is no process, we never started anything. Nothing to kill. + // If there is no process, there is nothing to kill. if process == nil { return } + defer func() { + // Wait for the all client goroutines to finish. + c.clientWaitGroup.Wait() + + // Make sure there is no reference to the old process after it has been + // killed. + c.l.Lock() + c.process = nil + c.l.Unlock() + }() + // We need to check for address here. It is possible that the plugin // started (process != nil) but has no address (addr == nil) if the // plugin failed at startup. If we do have an address, we need to close @@ -274,9 +435,10 @@ func (c *Client) Kill() { if err != nil { // If there was an error just log it. We're going to force // kill in a moment anyways. - log.Printf( - "[WARN] plugin: error closing client during Kill: %s", err) + c.logger.Warn("error closing client during Kill", "err", err) } + } else { + c.logger.Error("client", "error", err) } } @@ -285,17 +447,20 @@ func (c *Client) Kill() { // doneCh which would be closed if the process exits. if graceful { select { - case <-doneCh: + case <-c.doneCtx.Done(): + c.logger.Debug("plugin exited") return - case <-time.After(250 * time.Millisecond): + case <-time.After(2 * time.Second): } } // If graceful exiting failed, just kill it + c.logger.Warn("plugin failed to exit gracefully") process.Kill() - // Wait for the client to finish logging so we have a complete log - <-doneCh + c.l.Lock() + c.processKilled = true + c.l.Unlock() } // Starts the underlying subprocess, communicating with it to negotiate @@ -314,77 +479,96 @@ func (c *Client) Start() (addr net.Addr, err error) { // If one of cmd or reattach isn't set, then it is an error. We wrap // this in a {} for scoping reasons, and hopeful that the escape - // analysis will pop the stock here. + // analysis will pop the stack here. { cmdSet := c.config.Cmd != nil attachSet := c.config.Reattach != nil + secureSet := c.config.SecureConfig != nil if cmdSet == attachSet { return nil, fmt.Errorf("Only one of Cmd or Reattach must be set") } - } - // Create the logging channel for when we kill - c.doneLogging = make(chan struct{}) - - if c.config.Reattach != nil { - // Verify the process still exists. If not, then it is an error - p, err := os.FindProcess(c.config.Reattach.Pid) - if err != nil { - return nil, err + if secureSet && attachSet { + return nil, ErrSecureConfigAndReattach } + } - // Attempt to connect to the addr since on Unix systems FindProcess - // doesn't actually return an error if it can't find the process. - conn, err := net.Dial( - c.config.Reattach.Addr.Network(), - c.config.Reattach.Addr.String()) - if err != nil { - p.Kill() - return nil, ErrProcessNotFound - } - conn.Close() - - // Goroutine to mark exit status - go func(pid int) { - // Wait for the process to die - pidWait(pid) - - // Log so we can see it - log.Printf("[DEBUG] plugin: reattached plugin process exited\n") + if c.config.Reattach != nil { + return c.reattach() + } - // Mark it - c.l.Lock() - defer c.l.Unlock() - c.exited = true + if c.config.VersionedPlugins == nil { + c.config.VersionedPlugins = make(map[int]PluginSet) + } - // Close the logging channel since that doesn't work on reattach - close(c.doneLogging) - }(p.Pid) + // handle all plugins as versioned, using the handshake config as the default. + version := int(c.config.ProtocolVersion) - // Set the address and process - c.address = c.config.Reattach.Addr - c.process = p + // Make sure we're not overwriting a real version 0. If ProtocolVersion was + // non-zero, then we have to just assume the user made sure that + // VersionedPlugins doesn't conflict. + if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil { + c.config.VersionedPlugins[version] = c.config.Plugins + } - return c.address, nil + var versionStrings []string + for v := range c.config.VersionedPlugins { + versionStrings = append(versionStrings, strconv.Itoa(v)) } env := []string{ fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), + fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), } - stdout_r, stdout_w := io.Pipe() - stderr_r, stderr_w := io.Pipe() - cmd := c.config.Cmd cmd.Env = append(cmd.Env, os.Environ()...) cmd.Env = append(cmd.Env, env...) cmd.Stdin = os.Stdin - cmd.Stderr = stderr_w - cmd.Stdout = stdout_w - log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args) + cmdStdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + cmdStderr, err := cmd.StderrPipe() + if err != nil { + return nil, err + } + + if c.config.SecureConfig != nil { + if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { + return nil, fmt.Errorf("error verifying checksum: %s", err) + } else if !ok { + return nil, ErrChecksumsDoNotMatch + } + } + + // Setup a temporary certificate for client/server mtls, and send the public + // certificate to the plugin. + if c.config.AutoMTLS { + c.logger.Info("configuring client automatic mTLS") + certPEM, keyPEM, err := generateCert() + if err != nil { + c.logger.Error("failed to generate client certificate", "error", err) + return nil, err + } + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + c.logger.Error("failed to parse client certificate", "error", err) + return nil, err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM)) + + c.config.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ServerName: "localhost", + } + } + + c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) err = cmd.Start() if err != nil { return @@ -392,6 +576,7 @@ func (c *Client) Start() (addr net.Addr, err error) { // Set the process c.process = cmd.Process + c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid) // Make sure the command is properly cleaned up if there is an error defer func() { @@ -406,57 +591,76 @@ func (c *Client) Start() (addr net.Addr, err error) { } }() - // Start goroutine to wait for process to exit - exitCh := make(chan struct{}) + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + // Start goroutine that logs the stderr + c.clientWaitGroup.Add(1) + c.stderrWaitGroup.Add(1) + // logStderr calls Done() + go c.logStderr(cmdStderr) + + c.clientWaitGroup.Add(1) go func() { - // Make sure we close the write end of our stderr/stdout so - // that the readers send EOF properly. - defer stderr_w.Close() - defer stdout_w.Close() + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + defer c.clientWaitGroup.Done() + + // get the cmd info early, since the process information will be removed + // in Kill. + pid := c.process.Pid + path := cmd.Path + + // wait to finish reading from stderr since the stderr pipe reader + // will be closed by the subsequent call to cmd.Wait(). + c.stderrWaitGroup.Wait() // Wait for the command to end. - cmd.Wait() + err := cmd.Wait() + + debugMsgArgs := []interface{}{ + "path", path, + "pid", pid, + } + if err != nil { + debugMsgArgs = append(debugMsgArgs, + []interface{}{"error", err.Error()}...) + } // Log and make sure to flush the logs write away - log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path) + c.logger.Debug("plugin process exited", debugMsgArgs...) os.Stderr.Sync() - // Mark that we exited - close(exitCh) - // Set that we exited, which takes a lock c.l.Lock() defer c.l.Unlock() c.exited = true }() - // Start goroutine that logs the stderr - go c.logStderr(stderr_r) - // Start a goroutine that is going to be reading the lines // out of stdout - linesCh := make(chan []byte) + linesCh := make(chan string) + c.clientWaitGroup.Add(1) go func() { + defer c.clientWaitGroup.Done() defer close(linesCh) - buf := bufio.NewReader(stdout_r) - for { - line, err := buf.ReadBytes('\n') - if line != nil { - linesCh <- line - } - - if err == io.EOF { - return - } + scanner := bufio.NewScanner(cmdStdout) + for scanner.Scan() { + linesCh <- scanner.Text() } }() // Make sure after we exit we read the lines from stdout forever - // so they don't block since it is an io.Pipe + // so they don't block since it is a pipe. + // The scanner goroutine above will close this, but track it with a wait + // group for completeness. + c.clientWaitGroup.Add(1) defer func() { go func() { - for _ = range linesCh { + defer c.clientWaitGroup.Done() + for range linesCh { } }() }() @@ -465,17 +669,17 @@ func (c *Client) Start() (addr net.Addr, err error) { timeout := time.After(c.config.StartTimeout) // Start looking for the address - log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path) + c.logger.Debug("waiting for RPC address", "path", cmd.Path) select { case <-timeout: err = errors.New("timeout while waiting for plugin to start") - case <-exitCh: + case <-c.doneCtx.Done(): err = errors.New("plugin exited before we could connect") - case lineBytes := <-linesCh: + case line := <-linesCh: // Trim the line and split by "|" in order to get the parts of // the output. - line := strings.TrimSpace(string(lineBytes)) - parts := strings.SplitN(line, "|", 4) + line = strings.TrimSpace(line) + parts := strings.SplitN(line, "|", 6) if len(parts) < 4 { err = fmt.Errorf( "Unrecognized remote plugin message: %s\n\n"+ @@ -495,27 +699,25 @@ func (c *Client) Start() (addr net.Addr, err error) { if int(coreProtocol) != CoreProtocolVersion { err = fmt.Errorf("Incompatible core API version with plugin. "+ - "Plugin version: %s, Ours: %d\n\n"+ + "Plugin version: %s, Core version: %d\n\n"+ "To fix this, the plugin usually only needs to be recompiled.\n"+ "Please report this to the plugin author.", parts[0], CoreProtocolVersion) return } } - // Parse the protocol version - var protocol int64 - protocol, err = strconv.ParseInt(parts[1], 10, 0) + // Test the API version + version, pluginSet, err := c.checkProtoVersion(parts[1]) if err != nil { - err = fmt.Errorf("Error parsing protocol version: %s", err) - return + return addr, err } - // Test the API version - if uint(protocol) != c.config.ProtocolVersion { - err = fmt.Errorf("Incompatible API version with plugin. "+ - "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion) - return - } + // set the Plugins value to the compatible set, so the version + // doesn't need to be passed through to the ClientProtocol + // implementation. + c.config.Plugins = pluginSet + c.negotiatedVersion = version + c.logger.Debug("using plugin", "version", version) switch parts[2] { case "tcp": @@ -525,12 +727,143 @@ func (c *Client) Start() (addr net.Addr, err error) { default: err = fmt.Errorf("Unknown address type: %s", parts[3]) } + + // If we have a server type, then record that. We default to net/rpc + // for backwards compatibility. + c.protocol = ProtocolNetRPC + if len(parts) >= 5 { + c.protocol = Protocol(parts[4]) + } + + found := false + for _, p := range c.config.AllowedProtocols { + if p == c.protocol { + found = true + break + } + } + if !found { + err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", + c.protocol, c.config.AllowedProtocols) + return addr, err + } + + // See if we have a TLS certificate from the server. + // Checking if the length is > 50 rules out catching the unused "extra" + // data returned from some older implementations. + if len(parts) >= 6 && len(parts[5]) > 50 { + err := c.loadServerCert(parts[5]) + if err != nil { + return nil, fmt.Errorf("error parsing server cert: %s", err) + } + } } c.address = addr return } +// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the +// server, and load it as the RootCA for the client TLSConfig. +func (c *Client) loadServerCert(cert string) error { + certPool := x509.NewCertPool() + + asn1, err := base64.RawStdEncoding.DecodeString(cert) + if err != nil { + return err + } + + x509Cert, err := x509.ParseCertificate([]byte(asn1)) + if err != nil { + return err + } + + certPool.AddCert(x509Cert) + + c.config.TLSConfig.RootCAs = certPool + return nil +} + +func (c *Client) reattach() (net.Addr, error) { + // Verify the process still exists. If not, then it is an error + p, err := os.FindProcess(c.config.Reattach.Pid) + if err != nil { + return nil, err + } + + // Attempt to connect to the addr since on Unix systems FindProcess + // doesn't actually return an error if it can't find the process. + conn, err := net.Dial( + c.config.Reattach.Addr.Network(), + c.config.Reattach.Addr.String()) + if err != nil { + p.Kill() + return nil, ErrProcessNotFound + } + conn.Close() + + // Create a context for when we kill + c.doneCtx, c.ctxCancel = context.WithCancel(context.Background()) + + c.clientWaitGroup.Add(1) + // Goroutine to mark exit status + go func(pid int) { + defer c.clientWaitGroup.Done() + + // ensure the context is cancelled when we're done + defer c.ctxCancel() + + // Wait for the process to die + pidWait(pid) + + // Log so we can see it + c.logger.Debug("reattached plugin process exited") + + // Mark it + c.l.Lock() + defer c.l.Unlock() + c.exited = true + }(p.Pid) + + // Set the address and process + c.address = c.config.Reattach.Addr + c.process = p + c.protocol = c.config.Reattach.Protocol + if c.protocol == "" { + // Default the protocol to net/rpc for backwards compatibility + c.protocol = ProtocolNetRPC + } + + return c.address, nil +} + +// checkProtoVersion returns the negotiated version and PluginSet. +// This returns an error if the server returned an incompatible protocol +// version, or an invalid handshake response. +func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) { + serverVersion, err := strconv.Atoi(protoVersion) + if err != nil { + return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err) + } + + // record these for the error message + var clientVersions []int + + // all versions, including the legacy ProtocolVersion have been added to + // the versions set + for version, plugins := range c.config.VersionedPlugins { + clientVersions = append(clientVersions, version) + + if serverVersion != version { + continue + } + return version, plugins, nil + } + + return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+ + "Plugin version: %d, Client versions: %d", serverVersion, clientVersions) +} + // ReattachConfig returns the information that must be provided to NewClient // to reattach to the plugin process that this client started. This is // useful for plugins that detach from their parent process. @@ -555,27 +888,138 @@ func (c *Client) ReattachConfig() *ReattachConfig { } return &ReattachConfig{ - Addr: c.address, - Pid: c.config.Cmd.Process.Pid, + Protocol: c.protocol, + Addr: c.address, + Pid: c.config.Cmd.Process.Pid, + } +} + +// Protocol returns the protocol of server on the remote end. This will +// start the plugin process if it isn't already started. Errors from +// starting the plugin are surpressed and ProtocolInvalid is returned. It +// is recommended you call Start explicitly before calling Protocol to ensure +// no errors occur. +func (c *Client) Protocol() Protocol { + _, err := c.Start() + if err != nil { + return ProtocolInvalid + } + + return c.protocol +} + +func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) { + return func(_ string, _ time.Duration) (net.Conn, error) { + // Connect to the client + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + return conn, nil } } +// dialer is compatible with grpc.WithDialer and creates the connection +// to the plugin. +func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { + conn, err := netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + + // If we have a TLS config we wrap our connection. We only do this + // for net/rpc since gRPC uses its own mechanism for TLS. + if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + return conn, nil +} + +var stdErrBufferSize = 64 * 1024 + func (c *Client) logStderr(r io.Reader) { - bufR := bufio.NewReader(r) + defer c.clientWaitGroup.Done() + defer c.stderrWaitGroup.Done() + l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) + + reader := bufio.NewReaderSize(r, stdErrBufferSize) + // continuation indicates the previous line was a prefix + continuation := false + for { - line, err := bufR.ReadString('\n') - if line != "" { - c.config.Stderr.Write([]byte(line)) + line, isPrefix, err := reader.ReadLine() + switch { + case err == io.EOF: + return + case err != nil: + l.Error("reading plugin stderr", "error", err) + return + } + + c.config.Stderr.Write(line) + + // The line was longer than our max token size, so it's likely + // incomplete and won't unmarshal. + if isPrefix || continuation { + l.Debug(string(line)) + + // if we're finishing a continued line, add the newline back in + if !isPrefix { + c.config.Stderr.Write([]byte{'\n'}) + } - line = strings.TrimRightFunc(line, unicode.IsSpace) - log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line) + continuation = isPrefix + continue } - if err == io.EOF { - break + c.config.Stderr.Write([]byte{'\n'}) + + entry, err := parseJSON(line) + // If output is not JSON format, print directly to Debug + if err != nil { + // Attempt to infer the desired log level from the commonly used + // string prefixes + switch line := string(line); { + case strings.HasPrefix(line, "[TRACE]"): + l.Trace(line) + case strings.HasPrefix(line, "[DEBUG]"): + l.Debug(line) + case strings.HasPrefix(line, "[INFO]"): + l.Info(line) + case strings.HasPrefix(line, "[WARN]"): + l.Warn(line) + case strings.HasPrefix(line, "[ERROR]"): + l.Error(line) + default: + l.Debug(line) + } + } else { + out := flattenKVPairs(entry.KVPairs) + + out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat)) + switch hclog.LevelFromString(entry.Level) { + case hclog.Trace: + l.Trace(entry.Message, out...) + case hclog.Debug: + l.Debug(entry.Message, out...) + case hclog.Info: + l.Info(entry.Message, out...) + case hclog.Warn: + l.Warn(entry.Message, out...) + case hclog.Error: + l.Error(entry.Message, out...) + default: + // if there was no log level, it's likely this is unexpected + // json from something other than hclog, and we should output + // it verbatim. + l.Debug(string(line)) + } } } - - // Flag that we've completed logging for others - close(c.doneLogging) } diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod new file mode 100644 index 00000000..f3ddf44e --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.mod @@ -0,0 +1,17 @@ +module github.com/hashicorp/go-plugin + +require ( + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/protobuf v1.2.0 + github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd + github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb + github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 + github.com/oklog/run v1.0.0 + github.com/stretchr/testify v1.3.0 // indirect + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect + golang.org/x/text v0.3.0 // indirect + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect + google.golang.org/grpc v1.14.0 +) diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum new file mode 100644 index 00000000..21b14e99 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/go.sum @@ -0,0 +1,31 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go new file mode 100644 index 00000000..daf142d1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -0,0 +1,457 @@ +package plugin + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + + "github.com/oklog/run" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +// streamer interface is used in the broker to send/receive connection +// information. +type streamer interface { + Send(*plugin.ConnInfo) error + Recv() (*plugin.ConnInfo, error) + Close() +} + +// sendErr is used to pass errors back during a send. +type sendErr struct { + i *plugin.ConnInfo + ch chan error +} + +// gRPCBrokerServer is used by the plugin to start a stream and to send +// connection information to/from the plugin. Implements GRPCBrokerServer and +// streamer interfaces. +type gRPCBrokerServer struct { + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerServer() *gRPCBrokerServer { + return &gRPCBrokerServer{ + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerServer interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the client. +func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error { + doneCh := stream.Context().Done() + defer s.Close() + + // Proccess send stream + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + // Process receive stream + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the client. +func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the client from the stream to the broker. +func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerServer) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// gRPCBrokerClientImpl is used by the client to start a stream and to send +// connection information to/from the client. Implements GRPCBrokerClient and +// streamer interfaces. +type gRPCBrokerClientImpl struct { + // client is the underlying GRPC client used to make calls to the server. + client plugin.GRPCBrokerClient + + // send is used to send connection info to the gRPC stream. + send chan *sendErr + + // recv is used to receive connection info from the gRPC stream. + recv chan *plugin.ConnInfo + + // quit closes down the stream. + quit chan struct{} + + // o is used to ensure we close the quit channel only once. + o sync.Once +} + +func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { + return &gRPCBrokerClientImpl{ + client: plugin.NewGRPCBrokerClient(conn), + send: make(chan *sendErr), + recv: make(chan *plugin.ConnInfo), + quit: make(chan struct{}), + } +} + +// StartStream implements the GRPCBrokerClient interface and will block until +// the quit channel is closed or the context reports Done. The stream will pass +// connection information to/from the plugin. +func (s *gRPCBrokerClientImpl) StartStream() error { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + defer s.Close() + + stream, err := s.client.StartStream(ctx) + if err != nil { + return err + } + doneCh := stream.Context().Done() + + go func() { + for { + select { + case <-doneCh: + return + case <-s.quit: + return + case se := <-s.send: + err := stream.Send(se.i) + se.ch <- err + } + } + }() + + for { + i, err := stream.Recv() + if err != nil { + return err + } + select { + case <-doneCh: + return nil + case <-s.quit: + return nil + case s.recv <- i: + } + } + + return nil +} + +// Send is used by the GRPCBroker to pass connection information into the stream +// to the plugin. +func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error { + ch := make(chan error) + defer close(ch) + + select { + case <-s.quit: + return errors.New("broker closed") + case s.send <- &sendErr{ + i: i, + ch: ch, + }: + } + + return <-ch +} + +// Recv is used by the GRPCBroker to pass connection information that has been +// sent from the plugin to the broker. +func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) { + select { + case <-s.quit: + return nil, errors.New("broker closed") + case i := <-s.recv: + return i, nil + } +} + +// Close closes the quit channel, shutting down the stream. +func (s *gRPCBrokerClientImpl) Close() { + s.o.Do(func() { + close(s.quit) + }) +} + +// GRPCBroker is responsible for brokering connections by unique ID. +// +// It is used by plugins to create multiple gRPC connections and data +// streams between the plugin process and the host process. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +// +// The Plugin interface has access to these for both Server and Client. +// The broker can be used by either (optionally) to reserve and connect to +// new streams. This is useful for complex args and return values, +// or anything else you might need a data stream for. +type GRPCBroker struct { + nextId uint32 + streamer streamer + streams map[uint32]*gRPCBrokerPending + tls *tls.Config + doneCh chan struct{} + o sync.Once + + sync.Mutex +} + +type gRPCBrokerPending struct { + ch chan *plugin.ConnInfo + doneCh chan struct{} +} + +func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker { + return &GRPCBroker{ + streamer: s, + streams: make(map[uint32]*gRPCBrokerPending), + tls: tls, + doneCh: make(chan struct{}), + } +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + listener, err := serverListener() + if err != nil { + return nil, err + } + + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Network: listener.Addr().Network(), + Address: listener.Addr().String(), + }) + if err != nil { + return nil, err + } + + return listener, nil +} + +// AcceptAndServe is used to accept a specific stream ID and immediately +// serve a gRPC server on that stream ID. This is used to easily serve +// complex arguments. Each AcceptAndServe call opens a new listener socket and +// sends the connection info down the stream to the dialer. Since a new +// connection is opened every call, these calls should be used sparingly. +// Multiple gRPC server implementations can be registered to a single +// AcceptAndServe call. +func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { + listener, err := b.Accept(id) + if err != nil { + log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) + return + } + defer listener.Close() + + var opts []grpc.ServerOption + if b.tls != nil { + opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} + } + + server := s(opts) + + // Here we use a run group to close this goroutine if the server is shutdown + // or the broker is shutdown. + var g run.Group + { + // Serve on the listener, if shutting down call GracefulStop. + g.Add(func() error { + return server.Serve(listener) + }, func(err error) { + server.GracefulStop() + }) + } + { + // block on the closeCh or the doneCh. If we are shutting down close the + // closeCh. + closeCh := make(chan struct{}) + g.Add(func() error { + select { + case <-b.doneCh: + case <-closeCh: + } + return nil + }, func(err error) { + close(closeCh) + }) + } + + // Block until we are done + g.Run() +} + +// Close closes the stream and all servers. +func (b *GRPCBroker) Close() error { + b.streamer.Close() + b.o.Do(func() { + close(b.doneCh) + }) + return nil +} + +// Dial opens a connection by ID. +func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + var c *plugin.ConnInfo + + // Open the stream + p := b.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + return nil, fmt.Errorf("timeout waiting for connection info") + } + + var addr net.Addr + switch c.Network { + case "tcp": + addr, err = net.ResolveTCPAddr("tcp", c.Address) + case "unix": + addr, err = net.ResolveUnixAddr("unix", c.Address) + default: + err = fmt.Errorf("Unknown address type: %s", c.Address) + } + if err != nil { + return nil, err + } + + return dialGRPCConn(b.tls, netAddrDialer(addr)) +} + +// NextId returns a unique ID to use next. +// +// It is possible for very long-running plugin hosts to wrap this value, +// though it would require a very large amount of calls. In practice +// we've never seen it happen. +func (m *GRPCBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +// +// Uses of GRPCBroker never need to call this. It is called internally by +// the plugin host/client. +func (m *GRPCBroker) Run() { + for { + stream, err := m.streamer.Recv() + if err != nil { + // Once we receive an error, just exit + break + } + + // Initialize the waiter + p := m.getStream(stream.ServiceId) + select { + case p.ch <- stream: + default: + } + + go m.timeoutWait(stream.ServiceId, p) + } +} + +func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go new file mode 100644 index 00000000..d0d0d8e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -0,0 +1,117 @@ +package plugin + +import ( + "crypto/tls" + "fmt" + "math" + "net" + "time" + + "github.com/hashicorp/go-plugin/internal/plugin" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) { + // Build dialing options. + opts := make([]grpc.DialOption, 0, 5) + + // We use a custom dialer so that we can connect over unix domain sockets. + opts = append(opts, grpc.WithDialer(dialer)) + + // Fail right away + opts = append(opts, grpc.FailOnNonTempDialError(true)) + + // If we have no TLS configuration set, we need to explicitly tell grpc + // that we're connecting with an insecure connection. + if tls == nil { + opts = append(opts, grpc.WithInsecure()) + } else { + opts = append(opts, grpc.WithTransportCredentials( + credentials.NewTLS(tls))) + } + + opts = append(opts, + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32))) + + + // Connect. Note the first parameter is unused because we use a custom + // dialer that has the state to see the address. + conn, err := grpc.Dial("unused", opts...) + if err != nil { + return nil, err + } + + return conn, nil +} + +// newGRPCClient creates a new GRPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { + conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer) + if err != nil { + return nil, err + } + + // Start the broker. + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig) + go broker.Run() + go brokerGRPCClient.StartStream() + + cl := &GRPCClient{ + Conn: conn, + Plugins: c.config.Plugins, + doneCtx: doneCtx, + broker: broker, + controller: plugin.NewGRPCControllerClient(conn), + } + + return cl, nil +} + +// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. +type GRPCClient struct { + Conn *grpc.ClientConn + Plugins map[string]Plugin + + doneCtx context.Context + broker *GRPCBroker + + controller plugin.GRPCControllerClient +} + +// ClientProtocol impl. +func (c *GRPCClient) Close() error { + c.broker.Close() + c.controller.Shutdown(c.doneCtx, &plugin.Empty{}) + return c.Conn.Close() +} + +// ClientProtocol impl. +func (c *GRPCClient) Dispense(name string) (interface{}, error) { + raw, ok := c.Plugins[name] + if !ok { + return nil, fmt.Errorf("unknown plugin type: %s", name) + } + + p, ok := raw.(GRPCPlugin) + if !ok { + return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) + } + + return p.GRPCClient(c.doneCtx, c.broker, c.Conn) +} + +// ClientProtocol impl. +func (c *GRPCClient) Ping() error { + client := grpc_health_v1.NewHealthClient(c.Conn) + _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{ + Service: GRPCServiceName, + }) + + return err +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go new file mode 100644 index 00000000..1a8a8e70 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go @@ -0,0 +1,23 @@ +package plugin + +import ( + "context" + + "github.com/hashicorp/go-plugin/internal/plugin" +) + +// GRPCControllerServer handles shutdown calls to terminate the server when the +// plugin client is closed. +type grpcControllerServer struct { + server *GRPCServer +} + +// Shutdown stops the grpc server. It first will attempt a graceful stop, then a +// full stop on the server. +func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) { + resp := &plugin.Empty{} + + // TODO: figure out why GracefullStop doesn't work. + s.server.Stop() + return resp, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go new file mode 100644 index 00000000..d3dbf1ce --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -0,0 +1,142 @@ +package plugin + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +// GRPCServiceName is the name of the service that the health check should +// return as passing. +const GRPCServiceName = "plugin" + +// DefaultGRPCServer can be used with the "GRPCServer" field for Server +// as a default factory method to create a gRPC server with no extra options. +func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server { + return grpc.NewServer(opts...) +} + +// GRPCServer is a ServerType implementation that serves plugins over +// gRPC. This allows plugins to easily be written for other languages. +// +// The GRPCServer outputs a custom configuration as a base64-encoded +// JSON structure represented by the GRPCServerConfig config structure. +type GRPCServer struct { + // Plugins are the list of plugins to serve. + Plugins map[string]Plugin + + // Server is the actual server that will accept connections. This + // will be used for plugin registration as well. + Server func([]grpc.ServerOption) *grpc.Server + + // TLS should be the TLS configuration if available. If this is nil, + // the connection will not have transport security. + TLS *tls.Config + + // DoneCh is the channel that is closed when this server has exited. + DoneCh chan struct{} + + // Stdout/StderrLis are the readers for stdout/stderr that will be copied + // to the stdout/stderr connection that is output. + Stdout io.Reader + Stderr io.Reader + + config GRPCServerConfig + server *grpc.Server + broker *GRPCBroker + + logger hclog.Logger +} + +// ServerProtocol impl. +func (s *GRPCServer) Init() error { + // Create our server + var opts []grpc.ServerOption + if s.TLS != nil { + opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS))) + } + s.server = s.Server(opts) + + // Register the health service + healthCheck := health.NewServer() + healthCheck.SetServingStatus( + GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) + grpc_health_v1.RegisterHealthServer(s.server, healthCheck) + + // Register the broker service + brokerServer := newGRPCBrokerServer() + plugin.RegisterGRPCBrokerServer(s.server, brokerServer) + s.broker = newGRPCBroker(brokerServer, s.TLS) + go s.broker.Run() + + // Register the controller + controllerServer := &grpcControllerServer{ + server: s, + } + plugin.RegisterGRPCControllerServer(s.server, controllerServer) + + // Register all our plugins onto the gRPC server. + for k, raw := range s.Plugins { + p, ok := raw.(GRPCPlugin) + if !ok { + return fmt.Errorf("%q is not a GRPC-compatible plugin", k) + } + + if err := p.GRPCServer(s.broker, s.server); err != nil { + return fmt.Errorf("error registering %q: %s", k, err) + } + } + + return nil +} + +// Stop calls Stop on the underlying grpc.Server +func (s *GRPCServer) Stop() { + s.server.Stop() +} + +// GracefulStop calls GracefulStop on the underlying grpc.Server +func (s *GRPCServer) GracefulStop() { + s.server.GracefulStop() +} + +// Config is the GRPCServerConfig encoded as JSON then base64. +func (s *GRPCServer) Config() string { + // Create a buffer that will contain our final contents + var buf bytes.Buffer + + // Wrap the base64 encoding with JSON encoding. + if err := json.NewEncoder(&buf).Encode(s.config); err != nil { + // We panic since ths shouldn't happen under any scenario. We + // carefully control the structure being encoded here and it should + // always be successful. + panic(err) + } + + return buf.String() +} + +func (s *GRPCServer) Serve(lis net.Listener) { + defer close(s.DoneCh) + err := s.server.Serve(lis) + if err != nil { + s.logger.Error("grpc server", "error", err) + } +} + +// GRPCServerConfig is the extra configuration passed along for consumers +// to facilitate using GRPC plugins. +type GRPCServerConfig struct { + StdoutAddr string `json:"stdout_addr"` + StderrAddr string `json:"stderr_addr"` +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go new file mode 100644 index 00000000..aa2fdc81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:. + +package plugin diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go new file mode 100644 index 00000000..b6850aa5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -0,0 +1,203 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_broker.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type ConnInfo struct { + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnInfo) Reset() { *m = ConnInfo{} } +func (m *ConnInfo) String() string { return proto.CompactTextString(m) } +func (*ConnInfo) ProtoMessage() {} +func (*ConnInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_802e9beed3ec3b28, []int{0} +} + +func (m *ConnInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnInfo.Unmarshal(m, b) +} +func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) +} +func (m *ConnInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnInfo.Merge(m, src) +} +func (m *ConnInfo) XXX_Size() int { + return xxx_messageInfo_ConnInfo.Size(m) +} +func (m *ConnInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ConnInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnInfo proto.InternalMessageInfo + +func (m *ConnInfo) GetServiceId() uint32 { + if m != nil { + return m.ServiceId + } + return 0 +} + +func (m *ConnInfo) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *ConnInfo) GetAddress() string { + if m != nil { + return m.Address + } + return "" +} + +func init() { + proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +} + +func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) } + +var fileDescriptor_802e9beed3ec3b28 = []byte{ + // 175 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, + 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, + 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, + 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, + 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, + 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, + 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, + 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, + 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, + 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { + s.RegisterService(&_GRPCBroker_serviceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "grpc_broker.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto new file mode 100644 index 00000000..3fa79e8a --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message ConnInfo { + uint32 service_id = 1; + string network = 2; + string address = 3; +} + +service GRPCBroker { + rpc StartStream(stream ConnInfo) returns (stream ConnInfo); +} + + diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go new file mode 100644 index 00000000..38b42043 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_controller.proto + +package plugin + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_23c2c7e42feab570, []int{0} +} + +func (m *Empty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Empty.Unmarshal(m, b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return xxx_messageInfo_Empty.Size(m) +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Empty)(nil), "plugin.Empty") +} + +func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) } + +var fileDescriptor_23c2c7e42feab570 = []byte{ + // 108 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, + 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, + 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, + 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, + 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc *grpc.ClientConn +} + +func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { + s.RegisterService(&_GRPCController_serviceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/plugin.GRPCController/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +var _GRPCController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_controller.proto", +} diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto new file mode 100644 index 00000000..345d0a1c --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package plugin; +option go_package = "plugin"; + +message Empty { +} + +// The GRPCController is responsible for telling the plugin server to shutdown. +service GRPCController { + rpc Shutdown(Empty) returns (Empty); +} diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go new file mode 100644 index 00000000..fb2ef930 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "encoding/json" + "time" +) + +// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host +type logEntry struct { + Message string `json:"@message"` + Level string `json:"@level"` + Timestamp time.Time `json:"timestamp"` + KVPairs []*logEntryKV `json:"kv_pairs"` +} + +// logEntryKV is a key value pair within the Output payload +type logEntryKV struct { + Key string `json:"key"` + Value interface{} `json:"value"` +} + +// flattenKVPairs is used to flatten KVPair slice into []interface{} +// for hclog consumption. +func flattenKVPairs(kvs []*logEntryKV) []interface{} { + var result []interface{} + for _, kv := range kvs { + result = append(result, kv.Key) + result = append(result, kv.Value) + } + + return result +} + +// parseJSON handles parsing JSON output +func parseJSON(input []byte) (*logEntry, error) { + var raw map[string]interface{} + entry := &logEntry{} + + err := json.Unmarshal(input, &raw) + if err != nil { + return nil, err + } + + // Parse hclog-specific objects + if v, ok := raw["@message"]; ok { + entry.Message = v.(string) + delete(raw, "@message") + } + + if v, ok := raw["@level"]; ok { + entry.Level = v.(string) + delete(raw, "@level") + } + + if v, ok := raw["@timestamp"]; ok { + t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string)) + if err != nil { + return nil, err + } + entry.Timestamp = t + delete(raw, "@timestamp") + } + + // Parse dynamic KV args from the hclog payload. + for k, v := range raw { + entry.KVPairs = append(entry.KVPairs, &logEntryKV{ + Key: k, + Value: v, + }) + } + + return entry, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go new file mode 100644 index 00000000..88955245 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/mtls.go @@ -0,0 +1,73 @@ +package plugin + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "time" +) + +// generateCert generates a temporary certificate for plugin authentication. The +// certificate and private key are returns in PEM format. +func generateCert() (cert []byte, privateKey []byte, err error) { + key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader) + if err != nil { + return nil, nil, err + } + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + host := "localhost" + + template := &x509.Certificate{ + Subject: pkix.Name{ + CommonName: host, + Organization: []string{"HashiCorp"}, + }, + DNSNames: []string{host}, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + x509.ExtKeyUsageServerAuth, + }, + KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + SerialNumber: sn, + NotBefore: time.Now().Add(-30 * time.Second), + NotAfter: time.Now().Add(262980 * time.Hour), + IsCA: true, + } + + der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key) + if err != nil { + return nil, nil, err + } + + var certOut bytes.Buffer + if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil { + return nil, nil, err + } + + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return nil, nil, err + } + + var keyOut bytes.Buffer + if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return nil, nil, err + } + + cert = certOut.Bytes() + privateKey = keyOut.Bytes() + + return cert, privateKey, nil +} diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go index 37c8fd65..79d96746 100644 --- a/vendor/github.com/hashicorp/go-plugin/plugin.go +++ b/vendor/github.com/hashicorp/go-plugin/plugin.go @@ -9,7 +9,11 @@ package plugin import ( + "context" + "errors" "net/rpc" + + "google.golang.org/grpc" ) // Plugin is the interface that is implemented to serve/connect to an @@ -23,3 +27,32 @@ type Plugin interface { // serving that communicates to the server end of the plugin. Client(*MuxBroker, *rpc.Client) (interface{}, error) } + +// GRPCPlugin is the interface that is implemented to serve/connect to +// a plugin over gRPC. +type GRPCPlugin interface { + // GRPCServer should register this plugin for serving with the + // given GRPCServer. Unlike Plugin.Server, this is only called once + // since gRPC plugins serve singletons. + GRPCServer(*GRPCBroker, *grpc.Server) error + + // GRPCClient should return the interface implementation for the plugin + // you're serving via gRPC. The provided context will be canceled by + // go-plugin in the event of the plugin process exiting. + GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error) +} + +// NetRPCUnsupportedPlugin implements Plugin but returns errors for the +// Server and Client functions. This will effectively disable support for +// net/rpc based plugins. +// +// This struct can be embedded in your struct. +type NetRPCUnsupportedPlugin struct{} + +func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} + +func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) { + return nil, errors.New("net/rpc plugin protocol not supported") +} diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go new file mode 100644 index 00000000..0cfc19e5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-plugin/protocol.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + "net" +) + +// Protocol is an enum representing the types of protocols. +type Protocol string + +const ( + ProtocolInvalid Protocol = "" + ProtocolNetRPC Protocol = "netrpc" + ProtocolGRPC Protocol = "grpc" +) + +// ServerProtocol is an interface that must be implemented for new plugin +// protocols to be servers. +type ServerProtocol interface { + // Init is called once to configure and initialize the protocol, but + // not start listening. This is the point at which all validation should + // be done and errors returned. + Init() error + + // Config is extra configuration to be outputted to stdout. This will + // be automatically base64 encoded to ensure it can be parsed properly. + // This can be an empty string if additional configuration is not needed. + Config() string + + // Serve is called to serve connections on the given listener. This should + // continue until the listener is closed. + Serve(net.Listener) +} + +// ClientProtocol is an interface that must be implemented for new plugin +// protocols to be clients. +type ClientProtocol interface { + io.Closer + + // Dispense dispenses a new instance of the plugin with the given name. + Dispense(string) (interface{}, error) + + // Ping checks that the client connection is still healthy. + Ping() error +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go index 29f9bf06..f30a4b1d 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_client.go @@ -1,6 +1,7 @@ package plugin import ( + "crypto/tls" "fmt" "io" "net" @@ -19,6 +20,42 @@ type RPCClient struct { stdout, stderr net.Conn } +// newRPCClient creates a new RPCClient. The Client argument is expected +// to be successfully started already with a lock held. +func newRPCClient(c *Client) (*RPCClient, error) { + // Connect to the client + conn, err := net.Dial(c.address.Network(), c.address.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + tcpConn.SetKeepAlive(true) + } + + if c.config.TLSConfig != nil { + conn = tls.Client(conn, c.config.TLSConfig) + } + + // Create the actual RPC client + result, err := NewRPCClient(conn, c.config.Plugins) + if err != nil { + conn.Close() + return nil, err + } + + // Begin the stream syncing so that stdin, out, err work properly + err = result.SyncStreams( + c.config.SyncStdout, + c.config.SyncStderr) + if err != nil { + result.Close() + return nil, err + } + + return result, nil +} + // NewRPCClient creates a client from an already-open connection-like value. // Dial is typically used instead. func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) { @@ -121,3 +158,13 @@ func (c *RPCClient) Dispense(name string) (interface{}, error) { return p.Client(c.broker, rpc.NewClient(conn)) } + +// Ping pings the connection to ensure it is still alive. +// +// The error from the RPC call is returned exactly if you want to inspect +// it for further error analysis. Any error returned from here would indicate +// that the connection to the plugin is not healthy. +func (c *RPCClient) Ping() error { + var empty struct{} + return c.control.Call("Control.Ping", true, &empty) +} diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go index 3984dc89..5bb18dd5 100644 --- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go +++ b/vendor/github.com/hashicorp/go-plugin/rpc_server.go @@ -34,10 +34,14 @@ type RPCServer struct { lock sync.Mutex } -// Accept accepts connections on a listener and serves requests for -// each incoming connection. Accept blocks; the caller typically invokes -// it in a go statement. -func (s *RPCServer) Accept(lis net.Listener) { +// ServerProtocol impl. +func (s *RPCServer) Init() error { return nil } + +// ServerProtocol impl. +func (s *RPCServer) Config() string { return "" } + +// ServerProtocol impl. +func (s *RPCServer) Serve(lis net.Listener) { for { conn, err := lis.Accept() if err != nil { @@ -122,6 +126,14 @@ type controlServer struct { server *RPCServer } +// Ping can be called to verify the connection (and likely the binary) +// is still alive to a plugin. +func (c *controlServer) Ping( + null bool, response *struct{}) error { + *response = struct{}{} + return nil +} + func (c *controlServer) Quit( null bool, response *struct{}) error { // End the server diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index b5c5270a..4c230e3a 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -1,6 +1,9 @@ package plugin import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" "errors" "fmt" "io/ioutil" @@ -9,8 +12,14 @@ import ( "os" "os/signal" "runtime" + "sort" "strconv" + "strings" "sync/atomic" + + "github.com/hashicorp/go-hclog" + + "google.golang.org/grpc" ) // CoreProtocolVersion is the ProtocolVersion of the plugin system itself. @@ -30,6 +39,8 @@ type HandshakeConfig struct { // ProtocolVersion is the version that clients must match on to // agree they can communicate. This should match the ProtocolVersion // set on ClientConfig when using a plugin. + // This field is not required if VersionedPlugins are being used in the + // Client or Server configurations. ProtocolVersion uint // MagicCookieKey and value are used as a very basic verification @@ -40,19 +51,125 @@ type HandshakeConfig struct { MagicCookieValue string } +// PluginSet is a set of plugins provided to be registered in the plugin +// server. +type PluginSet map[string]Plugin + // ServeConfig configures what sorts of plugins are served. type ServeConfig struct { // HandshakeConfig is the configuration that must match clients. HandshakeConfig + // TLSProvider is a function that returns a configured tls.Config. + TLSProvider func() (*tls.Config, error) + // Plugins are the plugins that are served. - Plugins map[string]Plugin + // The implied version of this PluginSet is the Handshake.ProtocolVersion. + Plugins PluginSet + + // VersionedPlugins is a map of PluginSets for specific protocol versions. + // These can be used to negotiate a compatible version between client and + // server. If this is set, Handshake.ProtocolVersion is not required. + VersionedPlugins map[int]PluginSet + + // GRPCServer should be non-nil to enable serving the plugins over + // gRPC. This is a function to create the server when needed with the + // given server options. The server options populated by go-plugin will + // be for TLS if set. You may modify the input slice. + // + // Note that the grpc.Server will automatically be registered with + // the gRPC health checking service. This is not optional since go-plugin + // relies on this to implement Ping(). + GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger +} + +// protocolVersion determines the protocol version and plugin set to be used by +// the server. In the event that there is no suitable version, the last version +// in the config is returned leaving the client to report the incompatibility. +func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) { + protoVersion := int(opts.ProtocolVersion) + pluginSet := opts.Plugins + protoType := ProtocolNetRPC + // Check if the client sent a list of acceptable versions + var clientVersions []int + if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" { + for _, s := range strings.Split(vs, ",") { + v, err := strconv.Atoi(s) + if err != nil { + fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s) + continue + } + clientVersions = append(clientVersions, v) + } + } + + // We want to iterate in reverse order, to ensure we match the newest + // compatible plugin version. + sort.Sort(sort.Reverse(sort.IntSlice(clientVersions))) + + // set the old un-versioned fields as if they were versioned plugins + if opts.VersionedPlugins == nil { + opts.VersionedPlugins = make(map[int]PluginSet) + } + + if pluginSet != nil { + opts.VersionedPlugins[protoVersion] = pluginSet + } + + // Sort the version to make sure we match the latest first + var versions []int + for v := range opts.VersionedPlugins { + versions = append(versions, v) + } + + sort.Sort(sort.Reverse(sort.IntSlice(versions))) + + // See if we have multiple versions of Plugins to choose from + for _, version := range versions { + // Record each version, since we guarantee that this returns valid + // values even if they are not a protocol match. + protoVersion = version + pluginSet = opts.VersionedPlugins[version] + + // If we have a configured gRPC server we should select a protocol + if opts.GRPCServer != nil { + // All plugins in a set must use the same transport, so check the first + // for the protocol type + for _, p := range pluginSet { + switch p.(type) { + case GRPCPlugin: + protoType = ProtocolGRPC + default: + protoType = ProtocolNetRPC + } + break + } + } + + for _, clientVersion := range clientVersions { + if clientVersion == protoVersion { + return protoVersion, protoType, pluginSet + } + } + } + + // Return the lowest version as the fallback. + // Since we iterated over all the versions in reverse order above, these + // values are from the lowest version number plugins (which may be from + // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins + // fields). This allows serving the oldest version of our plugins to a + // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list. + return protoVersion, protoType, pluginSet } // Serve serves the plugins given by ServeConfig. // // Serve doesn't return until the plugin is done being executed. Any -// errors will be outputted to the log. +// errors will be outputted to os.Stderr. // // This is the method that plugins should call in their main() functions. func Serve(opts *ServeConfig) { @@ -74,9 +191,23 @@ func Serve(opts *ServeConfig) { os.Exit(1) } + // negotiate the version and plugins + // start with default version in the handshake config + protoVersion, protoType, pluginSet := protocolVersion(opts) + // Logging goes to the original stderr log.SetOutput(os.Stderr) + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } + // Create our new stdout, stderr files. These will override our built-in // stdout/stderr so that it works across the stream boundary. stdout_r, stdout_w, err := os.Pipe() @@ -93,30 +224,113 @@ func Serve(opts *ServeConfig) { // Register a listener so we can accept a connection listener, err := serverListener() if err != nil { - log.Printf("[ERR] plugin: plugin init: %s", err) + logger.Error("plugin init error", "error", err) return } - defer listener.Close() + + // Close the listener on return. We wrap this in a func() on purpose + // because the "listener" reference may change to TLS. + defer func() { + listener.Close() + }() + + var tlsConfig *tls.Config + if opts.TLSProvider != nil { + tlsConfig, err = opts.TLSProvider() + if err != nil { + logger.Error("plugin tls init", "error", err) + return + } + } + + var serverCert string + clientCert := os.Getenv("PLUGIN_CLIENT_CERT") + // If the client is configured using AutoMTLS, the certificate will be here, + // and we need to generate our own in response. + if tlsConfig == nil && clientCert != "" { + logger.Info("configuring server automatic mTLS") + clientCertPool := x509.NewCertPool() + if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) { + logger.Error("client cert provided but failed to parse", "cert", clientCert) + } + + certPEM, keyPEM, err := generateCert() + if err != nil { + logger.Error("failed to generate client certificate", "error", err) + panic(err) + } + + cert, err := tls.X509KeyPair(certPEM, keyPEM) + if err != nil { + logger.Error("failed to parse client certificate", "error", err) + panic(err) + } + + tlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: clientCertPool, + MinVersion: tls.VersionTLS12, + } + + // We send back the raw leaf cert data for the client rather than the + // PEM, since the protocol can't handle newlines. + serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0]) + } // Create the channel to tell us when we're done doneCh := make(chan struct{}) - // Create the RPC server to dispense - server := &RPCServer{ - Plugins: opts.Plugins, - Stdout: stdout_r, - Stderr: stderr_r, - DoneCh: doneCh, + // Build the server type + var server ServerProtocol + switch protoType { + case ProtocolNetRPC: + // If we have a TLS configuration then we wrap the listener + // ourselves and do it at that level. + if tlsConfig != nil { + listener = tls.NewListener(listener, tlsConfig) + } + + // Create the RPC server to dispense + server = &RPCServer{ + Plugins: pluginSet, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + } + + case ProtocolGRPC: + // Create the gRPC server + server = &GRPCServer{ + Plugins: pluginSet, + Server: opts.GRPCServer, + TLS: tlsConfig, + Stdout: stdout_r, + Stderr: stderr_r, + DoneCh: doneCh, + logger: logger, + } + + default: + panic("unknown server protocol: " + protoType) + } + + // Initialize the servers + if err := server.Init(); err != nil { + logger.Error("protocol init", "error", err) + return } - // Output the address and service name to stdout so that core can bring it up. - log.Printf("[DEBUG] plugin: plugin address: %s %s\n", - listener.Addr().Network(), listener.Addr().String()) - fmt.Printf("%d|%d|%s|%s\n", + logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) + + // Output the address and service name to stdout so that the client can bring it up. + fmt.Printf("%d|%d|%s|%s|%s|%s\n", CoreProtocolVersion, - opts.ProtocolVersion, + protoVersion, listener.Addr().Network(), - listener.Addr().String()) + listener.Addr().String(), + protoType, + serverCert) os.Stdout.Sync() // Eat the interrupts @@ -127,9 +341,7 @@ func Serve(opts *ServeConfig) { for { <-ch newCount := atomic.AddInt32(&count, 1) - log.Printf( - "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.", - newCount) + logger.Debug("plugin received interrupt signal, ignoring", "count", newCount) } }() @@ -137,10 +349,8 @@ func Serve(opts *ServeConfig) { os.Stdout = stdout_w os.Stderr = stderr_w - // Serve - go server.Accept(listener) - - // Wait for the graceful exit + // Accept connections and wait for completion + go server.Serve(listener) <-doneCh } @@ -153,14 +363,34 @@ func serverListener() (net.Listener, error) { } func serverListener_tcp() (net.Listener, error) { - minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32) - if err != nil { - return nil, err + envMinPort := os.Getenv("PLUGIN_MIN_PORT") + envMaxPort := os.Getenv("PLUGIN_MAX_PORT") + + var minPort, maxPort int64 + var err error + + switch { + case len(envMinPort) == 0: + minPort = 0 + default: + minPort, err = strconv.ParseInt(envMinPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err) + } } - maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32) - if err != nil { - return nil, err + switch { + case len(envMaxPort) == 0: + maxPort = 0 + default: + maxPort, err = strconv.ParseInt(envMaxPort, 10, 32) + if err != nil { + return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err) + } + } + + if minPort > maxPort { + return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort) } for port := minPort; port <= maxPort; port++ { diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go index 9086a1b4..2cf2c26c 100644 --- a/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/vendor/github.com/hashicorp/go-plugin/testing.go @@ -2,17 +2,35 @@ package plugin import ( "bytes" + "context" + "io" "net" "net/rpc" - "testing" + + "github.com/mitchellh/go-testing-interface" + hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/plugin" + "google.golang.org/grpc" ) +// TestOptions allows specifying options that can affect the behavior of the +// test functions +type TestOptions struct { + //ServerStdout causes the given value to be used in place of a blank buffer + //for RPCServer's Stdout + ServerStdout io.ReadCloser + + //ServerStderr causes the given value to be used in place of a blank buffer + //for RPCServer's Stderr + ServerStderr io.ReadCloser +} + // The testing file contains test helpers that you can use outside of // this package for making it easier to test plugins themselves. // TestConn is a helper function for returning a client and server // net.Conn connected to each other. -func TestConn(t *testing.T) (net.Conn, net.Conn) { +func TestConn(t testing.T) (net.Conn, net.Conn) { // Listen to any local port. This listener will be closed // after a single connection is established. l, err := net.Listen("tcp", "127.0.0.1:0") @@ -46,7 +64,7 @@ func TestConn(t *testing.T) (net.Conn, net.Conn) { } // TestRPCConn returns a rpc client and server connected to each other. -func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { +func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) { clientConn, serverConn := TestConn(t) server := rpc.NewServer() @@ -58,12 +76,20 @@ func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) { // TestPluginRPCConn returns a plugin RPC client and server that are connected // together and configured. -func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { +func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) { // Create two net.Conns we can use to shuttle our control connection clientConn, serverConn := TestConn(t) // Start up the server server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} + if opts != nil { + if opts.ServerStdout != nil { + server.Stdout = opts.ServerStdout + } + if opts.ServerStderr != nil { + server.Stderr = opts.ServerStderr + } + } go server.ServeConn(serverConn) // Connect the client to the server @@ -74,3 +100,81 @@ func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServ return client, server } + +// TestGRPCConn returns a gRPC client conn and grpc server that are connected +// together and configured. The register function is used to register services +// prior to the Serve call. This is used to test gRPC connections. +func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + server := grpc.NewServer() + register(server) + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + // Connection successful, close the listener + l.Close() + + return conn, server +} + +// TestPluginGRPCConn returns a plugin gRPC client and server that are connected +// together and configured. This is used to test gRPC connections. +func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { + // Create a listener + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("err: %s", err) + } + + // Start up the server + server := &GRPCServer{ + Plugins: ps, + DoneCh: make(chan struct{}), + Server: DefaultGRPCServer, + Stdout: new(bytes.Buffer), + Stderr: new(bytes.Buffer), + logger: hclog.Default(), + } + if err := server.Init(); err != nil { + t.Fatalf("err: %s", err) + } + go server.Serve(l) + + // Connect to the server + conn, err := grpc.Dial( + l.Addr().String(), + grpc.WithBlock(), + grpc.WithInsecure()) + if err != nil { + t.Fatalf("err: %s", err) + } + + brokerGRPCClient := newGRPCBrokerClient(conn) + broker := newGRPCBroker(brokerGRPCClient, nil) + go broker.Run() + go brokerGRPCClient.StartStream() + + // Create the client + client := &GRPCClient{ + Conn: conn, + Plugins: ps, + broker: broker, + doneCtx: context.Background(), + controller: plugin.NewGRPCControllerClient(conn), + } + + return client, server +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/LICENSE b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-retryablehttp/Makefile b/vendor/github.com/hashicorp/go-retryablehttp/Makefile new file mode 100644 index 00000000..da17640e --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/Makefile @@ -0,0 +1,11 @@ +default: test + +test: + go vet ./... + go test -race ./... + +updatedeps: + go get -f -t -u ./... + go get -f -u ./... + +.PHONY: default test updatedeps diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md new file mode 100644 index 00000000..ccdc7e87 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -0,0 +1,46 @@ +go-retryablehttp +================ + +[![Build Status](http://img.shields.io/travis/hashicorp/go-retryablehttp.svg?style=flat-square)][travis] +[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs] + +[travis]: http://travis-ci.org/hashicorp/go-retryablehttp +[godocs]: http://godoc.org/github.com/hashicorp/go-retryablehttp + +The `retryablehttp` package provides a familiar HTTP client interface with +automatic retries and exponential backoff. It is a thin wrapper over the +standard `net/http` client library and exposes nearly the same public API. This +makes `retryablehttp` very easy to drop into existing programs. + +`retryablehttp` performs automatic retries under certain conditions. Mainly, if +an error is returned by the client (connection errors, etc.), or if a 500-range +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. + +The main difference from `net/http` is that requests which take a request body +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. + +Example Use +=========== + +Using this library should look almost identical to what you would do with +`net/http`. The most simple example of a GET request is shown below: + +```go +resp, err := retryablehttp.Get("/foo") +if err != nil { + panic(err) +} +``` + +The returned response object is an `*http.Response`, the same thing you would +usually get from `net/http`. Had the request failed one or more times, the above +call would block and retry with exponential backoff. + +For more usage and examples see the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp). diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go new file mode 100644 index 00000000..d5e250a5 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -0,0 +1,549 @@ +// The retryablehttp package provides a familiar HTTP client interface with +// automatic retries and exponential backoff. It is a thin wrapper over the +// standard net/http client library and exposes nearly the same public API. +// This makes retryablehttp very easy to drop into existing programs. +// +// retryablehttp performs automatic retries under certain conditions. Mainly, if +// an error is returned by the client (connection errors etc), or if a 500-range +// response is received, then a retry is invoked. Otherwise, the response is +// returned and left to the caller to interpret. +// +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. +package retryablehttp + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "math/rand" + "net/http" + "net/url" + "os" + "strings" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +var ( + // Default retry configuration + defaultRetryWaitMin = 1 * time.Second + defaultRetryWaitMax = 30 * time.Second + defaultRetryMax = 4 + + // defaultClient is used for performing requests without explicitly making + // a new client. It is purposely private to avoid modifications. + defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) +) + +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + +// LenReader is an interface implemented by many in-memory io.Reader's. Used +// for automatically sending the right Content-Length header when possible. +type LenReader interface { + Len() int +} + +// Request wraps the metadata needed to create HTTP requests. +type Request struct { + // body is a seekable reader over the request body payload. This is + // used to rewind the request data in between retries. + body ReaderFunc + + // Embed an HTTP request directly. This makes a *Request act exactly + // like an *http.Request so that all meta methods are supported. + *http.Request +} + +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + r.Request = r.Request.WithContext(ctx) + return r +} + +// BodyBytes allows accessing the request body. It is an analogue to +// http.Request's Body variable, but it returns a copy of the underlying data +// rather than consuming it. +// +// This function is not thread-safe; do not call it at the same time as another +// call, or at the same time this request is being used with Client.Do. +func (r *Request) BodyBytes() ([]byte, error) { + if r.body == nil { + return nil, nil + } + body, err := r.body() + if err != nil { + return nil, err + } + buf := new(bytes.Buffer) + _, err = buf.ReadFrom(body) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func getBodyReaderAndContentLength(rawBody interface{}) (ReaderFunc, int64, error) { + var bodyReader ReaderFunc + var contentLength int64 + + if rawBody != nil { + switch body := rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + bodyReader = body + tmp, err := body() + if err != nil { + return nil, 0, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := body + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := body + bodyReader = func() (io.Reader, error) { + _, err := raw.Seek(0, 0) + return ioutil.NopCloser(raw), err + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(body) + if err != nil { + return nil, 0, err + } + bodyReader = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + default: + return nil, 0, fmt.Errorf("cannot handle type %T", rawBody) + } + } + return bodyReader, contentLength, nil +} + +// FromRequest wraps an http.Request in a retryablehttp.Request +func FromRequest(r *http.Request) (*Request, error) { + bodyReader, _, err := getBodyReaderAndContentLength(r.Body) + if err != nil { + return nil, err + } + // Could assert contentLength == r.ContentLength + return &Request{bodyReader, r}, nil +} + +// NewRequest creates a new wrapped request. +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody) + if err != nil { + return nil, err + } + + httpReq, err := http.NewRequest(method, url, nil) + if err != nil { + return nil, err + } + httpReq.ContentLength = contentLength + + return &Request{bodyReader, httpReq}, nil +} + +// Logger interface allows to use other loggers than +// standard log.Logger. +type Logger interface { + Printf(string, ...interface{}) +} + +// RequestLogHook allows a function to run before each retry. The HTTP +// request which will be made, and the retry number (0 for the initial +// request) are available to users. The internal logger is exposed to +// consumers. +type RequestLogHook func(Logger, *http.Request, int) + +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckResponse callback to properly close any +// response body before returning. +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) + +// Client is used to make HTTP requests. It adds additional functionality +// like automatic retries to tolerate minor outages. +type Client struct { + HTTPClient *http.Client // Internal HTTP client. + Logger Logger // Customer logger instance. + + RetryWaitMin time.Duration // Minimum time to wait + RetryWaitMax time.Duration // Maximum time to wait + RetryMax int // Maximum number of retries + + // RequestLogHook allows a user-supplied function to be called + // before each retry. + RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler +} + +// NewClient creates a new Client with default settings. +func NewClient() *Client { + return &Client{ + HTTPClient: cleanhttp.DefaultClient(), + Logger: log.New(os.Stderr, "", log.LstdFlags), + RetryWaitMin: defaultRetryWaitMin, + RetryWaitMax: defaultRetryWaitMax, + RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, + } +} + +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + + if err != nil { + return true, err + } + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { + return true, nil + } + + return false, nil +} + +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multipled by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + +// Do wraps calling an HTTP method with retries. +func (c *Client) Do(req *Request) (*http.Response, error) { + if c.Logger != nil { + c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) + } + + var resp *http.Response + var err error + + for i := 0; ; i++ { + var code int // HTTP response code + + // Always rewind the request body when non-nil. + if req.body != nil { + body, err := req.body() + if err != nil { + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Body = c + } else { + req.Body = ioutil.NopCloser(body) + } + } + + if c.RequestLogHook != nil { + c.RequestLogHook(c.Logger, req.Request, i) + } + + // Attempt the request + resp, err = c.HTTPClient.Do(req.Request) + if resp != nil { + code = resp.StatusCode + } + + // Check if we should continue with retries. + checkOK, checkErr := c.CheckRetry(req.Context(), resp, err) + + if err != nil { + if c.Logger != nil { + c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + } + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + c.ResponseLogHook(c.Logger, resp) + } + } + + // Now decide if we should continue. + if !checkOK { + if checkErr != nil { + err = checkErr + } + return resp, err + } + + // We do this before drainBody beause there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + + // We're going to retry, consume any response to reuse the connection. + if err == nil && resp != nil { + c.drainBody(resp.Body) + } + + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) + desc := fmt.Sprintf("%s %s", req.Method, req.URL) + if code > 0 { + desc = fmt.Sprintf("%s (status: %d)", desc, code) + } + if c.Logger != nil { + c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + } + select { + case <-req.Context().Done(): + return nil, req.Context().Err() + case <-time.After(wait): + } + } + + if c.ErrorHandler != nil { + return c.ErrorHandler(resp, err, c.RetryMax+1) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + resp.Body.Close() + } + return nil, fmt.Errorf("%s %s giving up after %d attempts", + req.Method, req.URL, c.RetryMax+1) +} + +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + if c.Logger != nil { + c.Logger.Printf("[ERR] error reading response body: %v", err) + } + } +} + +// Get is a shortcut for doing a GET request without making a new client. +func Get(url string) (*http.Response, error) { + return defaultClient.Get(url) +} + +// Get is a convenience helper for doing simple GET requests. +func (c *Client) Get(url string) (*http.Response, error) { + req, err := NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Head is a shortcut for doing a HEAD request without making a new client. +func Head(url string) (*http.Response, error) { + return defaultClient.Head(url) +} + +// Head is a convenience method for doing simple HEAD requests. +func (c *Client) Head(url string) (*http.Response, error) { + req, err := NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return c.Do(req) +} + +// Post is a shortcut for doing a POST request without making a new client. +func Post(url, bodyType string, body interface{}) (*http.Response, error) { + return defaultClient.Post(url, bodyType, body) +} + +// Post is a convenience method for doing simple POST requests. +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { + req, err := NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return c.Do(req) +} + +// PostForm is a shortcut to perform a POST with form data without creating +// a new client. +func PostForm(url string, data url.Values) (*http.Response, error) { + return defaultClient.PostForm(url, data) +} + +// PostForm is a convenience method for doing simple POST operations using +// pre-filled url.Values form data. +func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { + return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.mod b/vendor/github.com/hashicorp/go-retryablehttp/go.mod new file mode 100644 index 00000000..d28c8c8e --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-retryablehttp + +require github.com/hashicorp/go-cleanhttp v0.5.0 diff --git a/vendor/github.com/hashicorp/go-retryablehttp/go.sum b/vendor/github.com/hashicorp/go-retryablehttp/go.sum new file mode 100644 index 00000000..3ed0fd98 --- /dev/null +++ b/vendor/github.com/hashicorp/go-retryablehttp/go.sum @@ -0,0 +1,2 @@ +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile new file mode 100644 index 00000000..c3989e78 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/Makefile @@ -0,0 +1,8 @@ +TEST?=./... + +test: + go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4 + go vet $(TEST) + go test $(TEST) -race + +.PHONY: test diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md new file mode 100644 index 00000000..f5abffc2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/README.md @@ -0,0 +1,43 @@ +# rootcerts + +Functions for loading root certificates for TLS connections. + +----- + +Go's standard library `crypto/tls` provides a common mechanism for configuring +TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool +of certificates for the client to use as a trust store when verifying server +certificates. + +This library contains utility functions for loading certificates destined for +that field, as well as one other important thing: + +When the `RootCAs` field is `nil`, the standard library attempts to load the +host's root CA set. This behavior is OS-specific, and the Darwin +implementation contains [a bug that prevents trusted certificates from the +System and Login keychains from being loaded][1]. This library contains +Darwin-specific behavior that works around that bug. + +[1]: https://github.com/golang/go/issues/14514 + +## Example Usage + +Here's a snippet demonstrating how this library is meant to be used: + +```go +func httpClient() (*http.Client, error) + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("MYAPP_CAFILE"), + CAPath: os.Getenv("MYAPP_CAPATH"), + }) + if err != nil { + return nil, err + } + c := cleanhttp.DefaultClient() + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + c.Transport = t + return c, nil +} +``` diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go new file mode 100644 index 00000000..b55cc628 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go @@ -0,0 +1,9 @@ +// Package rootcerts contains functions to aid in loading CA certificates for +// TLS connections. +// +// In addition, its default behavior on Darwin works around an open issue [1] +// in Go's crypto/x509 that prevents certicates from being loaded from the +// System or Login keychains. +// +// [1] https://github.com/golang/go/issues/14514 +package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.mod b/vendor/github.com/hashicorp/go-rootcerts/go.mod new file mode 100644 index 00000000..e2dd0247 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-rootcerts + +go 1.12 + +require github.com/mitchellh/go-homedir v1.1.0 diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.sum b/vendor/github.com/hashicorp/go-rootcerts/go.sum new file mode 100644 index 00000000..ae38d147 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/go.sum @@ -0,0 +1,2 @@ +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go new file mode 100644 index 00000000..aeb30ece --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go @@ -0,0 +1,103 @@ +package rootcerts + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// Config determines where LoadCACerts will load certificates from. When both +// CAFile and CAPath are blank, this library's functions will either load +// system roots explicitly and return them, or set the CertPool to nil to allow +// Go's standard library to load system certs. +type Config struct { + // CAFile is a path to a PEM-encoded certificate file or bundle. Takes + // precedence over CAPath. + CAFile string + + // CAPath is a path to a directory populated with PEM-encoded certificates. + CAPath string +} + +// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the +// Config specified. +func ConfigureTLS(t *tls.Config, c *Config) error { + if t == nil { + return nil + } + pool, err := LoadCACerts(c) + if err != nil { + return err + } + t.RootCAs = pool + return nil +} + +// LoadCACerts loads a CertPool based on the Config specified. +func LoadCACerts(c *Config) (*x509.CertPool, error) { + if c == nil { + c = &Config{} + } + if c.CAFile != "" { + return LoadCAFile(c.CAFile) + } + if c.CAPath != "" { + return LoadCAPath(c.CAPath) + } + + return LoadSystemCAs() +} + +// LoadCAFile loads a single PEM-encoded file from the path specified. +func LoadCAFile(caFile string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Error loading CA File: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) + } + + return pool, nil +} + +// LoadCAPath walks the provided path and loads all certificates encounted into +// a pool. +func LoadCAPath(caPath string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error loading file from CAPath: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) + } + + return nil + } + + err := filepath.Walk(caPath, walkFn) + if err != nil { + return nil, err + } + + return pool, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go new file mode 100644 index 00000000..66b1472c --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go @@ -0,0 +1,12 @@ +// +build !darwin + +package rootcerts + +import "crypto/x509" + +// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that +// default behavior of standard TLS config libraries is triggered, which is to +// load system certs. +func LoadSystemCAs() (*x509.CertPool, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go new file mode 100644 index 00000000..a9a04065 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go @@ -0,0 +1,48 @@ +package rootcerts + +import ( + "crypto/x509" + "os/exec" + "path" + + "github.com/mitchellh/go-homedir" +) + +// LoadSystemCAs has special behavior on Darwin systems to work around +func LoadSystemCAs() (*x509.CertPool, error) { + pool := x509.NewCertPool() + + for _, keychain := range certKeychains() { + err := addCertsFromKeychain(pool, keychain) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { + cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) + data, err := cmd.Output() + if err != nil { + return err + } + + pool.AppendCertsFromPEM(data) + + return nil +} + +func certKeychains() []string { + keychains := []string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + "/Library/Keychains/System.keychain", + } + home, err := homedir.Dir() + if err == nil { + loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") + keychains = append(keychains, loginKeychain) + } + return keychains +} diff --git a/vendor/github.com/hashicorp/go-safetemp/LICENSE b/vendor/github.com/hashicorp/go-safetemp/LICENSE new file mode 100644 index 00000000..be2cc4df --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-safetemp/README.md b/vendor/github.com/hashicorp/go-safetemp/README.md new file mode 100644 index 00000000..02ece331 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/README.md @@ -0,0 +1,10 @@ +# go-safetemp +[![Godoc](https://godoc.org/github.com/hashcorp/go-safetemp?status.svg)](https://godoc.org/github.com/hashicorp/go-safetemp) + +Functions for safely working with temporary directories and files. + +## Why? + +The Go standard library provides the excellent `ioutil` package for +working with temporary directories and files. This library builds on top +of that to provide safe abstractions above that. diff --git a/vendor/github.com/hashicorp/go-safetemp/go.mod b/vendor/github.com/hashicorp/go-safetemp/go.mod new file mode 100644 index 00000000..02bc5f5b --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-safetemp diff --git a/vendor/github.com/hashicorp/go-safetemp/safetemp.go b/vendor/github.com/hashicorp/go-safetemp/safetemp.go new file mode 100644 index 00000000..c4ae72b7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-safetemp/safetemp.go @@ -0,0 +1,40 @@ +package safetemp + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// Dir creates a new temporary directory that isn't yet created. This +// can be used with calls that expect a non-existent directory. +// +// The directory is created as a child of a temporary directory created +// within the directory dir starting with prefix. The temporary directory +// returned is always named "temp". The parent directory has the specified +// prefix. +// +// The returned io.Closer should be used to clean up the returned directory. +// This will properly remove the returned directory and any other temporary +// files created. +// +// If an error is returned, the Closer does not need to be called (and will +// be nil). +func Dir(dir, prefix string) (string, io.Closer, error) { + // Create the temporary directory + td, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", nil, err + } + + return filepath.Join(td, "temp"), pathCloser(td), nil +} + +// pathCloser implements io.Closer to remove the given path on Close. +type pathCloser string + +// Close deletes this path. +func (p pathCloser) Close() error { + return os.RemoveAll(string(p)) +} diff --git a/vendor/github.com/hashicorp/go-slug/LICENSE b/vendor/github.com/hashicorp/go-slug/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/vendor/github.com/hashicorp/go-slug/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-slug/README.md b/vendor/github.com/hashicorp/go-slug/README.md new file mode 100644 index 00000000..64fabbd2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-slug/README.md @@ -0,0 +1,70 @@ +# go-slug + +[![Build Status](https://travis-ci.org/hashicorp/go-slug.svg?branch=master)](https://travis-ci.org/hashicorp/go-slug) +[![GitHub license](https://img.shields.io/github/license/hashicorp/go-slug.svg)](https://github.com/hashicorp/go-slug/blob/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-slug?status.svg)](https://godoc.org/github.com/hashicorp/go-slug) +[![Go Report Card](https://goreportcard.com/badge/github.com/hashicorp/go-slug)](https://goreportcard.com/report/github.com/hashicorp/go-slug) +[![GitHub issues](https://img.shields.io/github/issues/hashicorp/go-slug.svg)](https://github.com/hashicorp/go-slug/issues) + +Package `go-slug` offers functions for packing and unpacking Terraform Enterprise +compatible slugs. Slugs are gzip compressed tar files containing Terraform configuration files. + +## Installation + +Installation can be done with a normal `go get`: + +``` +go get -u github.com/hashicorp/go-slug +``` + +## Documentation + +For the complete usage of `go-slug`, see the full [package docs](https://godoc.org/github.com/hashicorp/go-slug). + +## Example + +Packing or unpacking a slug is pretty straight forward as shown in the +following example: + +```go +package main + +import ( + "bytes" + "ioutil" + "log" + "os" + + slug "github.com/hashicorp/go-slug" +) + +func main() { + // First create a buffer for storing the slug. + slug := bytes.NewBuffer(nil) + + // Then call the Pack function with a directory path containing the + // configuration files and an io.Writer to write the slug to. + if _, err := Pack("testdata/archive-dir", slug); err != nil { + log.Fatal(err) + } + + // Create a directory to unpack the slug contents into. + dst, err := ioutil.TempDir("", "slug") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(dst) + + // Unpacking a slug is done by calling the Unpack function with an + // io.Reader to read the slug from and a directory path of an existing + // directory to store the unpacked configuration files. + if err := Unpack(slug, dst); err != nil { + log.Fatal(err) + } +} +``` + +## Issues and Contributing + +If you find an issue with this package, please report an issue. If you'd like, +we welcome any contributions. Fork this repository and submit a pull request. diff --git a/vendor/github.com/hashicorp/go-slug/go.mod b/vendor/github.com/hashicorp/go-slug/go.mod new file mode 100644 index 00000000..49ed430a --- /dev/null +++ b/vendor/github.com/hashicorp/go-slug/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/go-slug + +go 1.12 diff --git a/vendor/github.com/hashicorp/go-slug/slug.go b/vendor/github.com/hashicorp/go-slug/slug.go new file mode 100644 index 00000000..8dd40786 --- /dev/null +++ b/vendor/github.com/hashicorp/go-slug/slug.go @@ -0,0 +1,308 @@ +package slug + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "os" + "path/filepath" + "strings" +) + +// Meta provides detailed information about a slug. +type Meta struct { + // The list of files contained in the slug. + Files []string + + // Total size of the slug in bytes. + Size int64 +} + +// Pack creates a slug from a src directory, and writes the new slug +// to w. Returns metadata about the slug and any errors. +// +// When dereference is set to true, symlinks with a target outside of +// the src directory will be dereferenced. When dereference is set to +// false symlinks with a target outside the src directory are omitted +// from the slug. +func Pack(src string, w io.Writer, dereference bool) (*Meta, error) { + // Gzip compress all the output data. + gzipW := gzip.NewWriter(w) + + // Tar the file contents. + tarW := tar.NewWriter(gzipW) + + // Track the metadata details as we go. + meta := &Meta{} + + // Walk the tree of files. + err := filepath.Walk(src, packWalkFn(src, src, src, tarW, meta, dereference)) + if err != nil { + return nil, err + } + + // Flush the tar writer. + if err := tarW.Close(); err != nil { + return nil, fmt.Errorf("Failed to close the tar archive: %v", err) + } + + // Flush the gzip writer. + if err := gzipW.Close(); err != nil { + return nil, fmt.Errorf("Failed to close the gzip writer: %v", err) + } + + return meta, nil +} + +func packWalkFn(root, src, dst string, tarW *tar.Writer, meta *Meta, dereference bool) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Skip the .git directory. + if info.IsDir() && info.Name() == ".git" { + return filepath.SkipDir + } + + // Get the relative path from the current src directory. + subpath, err := filepath.Rel(src, path) + if err != nil { + return fmt.Errorf("Failed to get relative path for file %q: %v", path, err) + } + if subpath == "." { + return nil + } + + // Ignore the .terraform directory itself. + if info.IsDir() && info.Name() == ".terraform" { + return nil + } + + // Ignore any files in the .terraform directory. + if !info.IsDir() && filepath.Dir(subpath) == ".terraform" { + return nil + } + + // Skip .terraform subdirectories, except for the modules subdirectory. + if strings.HasPrefix(subpath, ".terraform"+string(filepath.Separator)) && + !strings.HasPrefix(subpath, filepath.Clean(".terraform/modules")) { + return filepath.SkipDir + } + + // Get the relative path from the initial root directory. + subpath, err = filepath.Rel(root, strings.Replace(path, src, dst, 1)) + if err != nil { + return fmt.Errorf("Failed to get relative path for file %q: %v", path, err) + } + if subpath == "." { + return nil + } + + // Check the file type and if we need to write the body. + keepFile, writeBody := checkFileMode(info.Mode()) + if !keepFile { + return nil + } + + fm := info.Mode() + header := &tar.Header{ + Name: filepath.ToSlash(subpath), + ModTime: info.ModTime(), + Mode: int64(fm.Perm()), + } + + switch { + case info.IsDir(): + header.Typeflag = tar.TypeDir + header.Name += "/" + + case fm.IsRegular(): + header.Typeflag = tar.TypeReg + header.Size = info.Size() + + case fm&os.ModeSymlink != 0: + target, err := filepath.EvalSymlinks(path) + if err != nil { + return fmt.Errorf("Failed to get symbolic link destination for %q: %v", path, err) + } + + // If the target is within the current source, we + // create the symlink using a relative path. + if strings.Contains(target, src) { + link, err := filepath.Rel(filepath.Dir(path), target) + if err != nil { + return fmt.Errorf("Failed to get relative path for symlink destination %q: %v", target, err) + } + + header.Typeflag = tar.TypeSymlink + header.Linkname = filepath.ToSlash(link) + + // Break out of the case as a symlink + // doesn't need any additional config. + break + } + + if !dereference { + // Return early as the symlink has a target outside of the + // src directory and we don't want to dereference symlinks. + return nil + } + + // Get the file info for the target. + info, err = os.Lstat(target) + if err != nil { + return fmt.Errorf("Failed to get file info from file %q: %v", target, err) + } + + // If the target is a directory we can recurse into the target + // directory by calling the packWalkFn with updated arguments. + if info.IsDir() { + return filepath.Walk(target, packWalkFn(root, target, path, tarW, meta, dereference)) + } + + // Dereference this symlink by updating the header with the target file + // details and set writeBody to true so the body will be written. + header.Typeflag = tar.TypeReg + header.ModTime = info.ModTime() + header.Mode = int64(info.Mode().Perm()) + header.Size = info.Size() + writeBody = true + + default: + return fmt.Errorf("Unexpected file mode %v", fm) + } + + // Write the header first to the archive. + if err := tarW.WriteHeader(header); err != nil { + return fmt.Errorf("Failed writing archive header for file %q: %v", path, err) + } + + // Account for the file in the list. + meta.Files = append(meta.Files, header.Name) + + // Skip writing file data for certain file types (above). + if !writeBody { + return nil + } + + f, err := os.Open(path) + if err != nil { + return fmt.Errorf("Failed opening file %q for archiving: %v", path, err) + } + defer f.Close() + + size, err := io.Copy(tarW, f) + if err != nil { + return fmt.Errorf("Failed copying file %q to archive: %v", path, err) + } + + // Add the size we copied to the body. + meta.Size += size + + return nil + } +} + +// Unpack is used to read and extract the contents of a slug to +// the dst directory. Returns any errors. +func Unpack(r io.Reader, dst string) error { + // Decompress as we read. + uncompressed, err := gzip.NewReader(r) + if err != nil { + return fmt.Errorf("Failed to uncompress slug: %v", err) + } + + // Untar as we read. + untar := tar.NewReader(uncompressed) + + // Unpackage all the contents into the directory. + for { + header, err := untar.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("Failed to untar slug: %v", err) + } + + // Get rid of absolute paths. + path := header.Name + if path[0] == '/' { + path = path[1:] + } + path = filepath.Join(dst, path) + + // Make the directories to the path. + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("Failed to create directory %q: %v", dir, err) + } + + // If we have a symlink, just link it. + if header.Typeflag == tar.TypeSymlink { + if err := os.Symlink(header.Linkname, path); err != nil { + return fmt.Errorf("Failed creating symlink %q => %q: %v", + path, header.Linkname, err) + } + continue + } + + // Only unpack regular files from this point on. + if header.Typeflag == tar.TypeDir { + continue + } else if header.Typeflag != tar.TypeReg && header.Typeflag != tar.TypeRegA { + return fmt.Errorf("Failed creating %q: unsupported type %c", path, + header.Typeflag) + } + + // Open a handle to the destination. + fh, err := os.Create(path) + if err != nil { + // This mimics tar's behavior wrt the tar file containing duplicate files + // and it allowing later ones to clobber earlier ones even if the file + // has perms that don't allow overwriting. + if os.IsPermission(err) { + os.Chmod(path, 0600) + fh, err = os.Create(path) + } + + if err != nil { + return fmt.Errorf("Failed creating file %q: %v", path, err) + } + } + + // Copy the contents. + _, err = io.Copy(fh, untar) + fh.Close() + if err != nil { + return fmt.Errorf("Failed to copy slug file %q: %v", path, err) + } + + // Restore the file mode. We have to do this after writing the file, + // since it is possible we have a read-only mode. + mode := header.FileInfo().Mode() + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("Failed setting permissions on %q: %v", path, err) + } + } + return nil +} + +// checkFileMode is used to examine an os.FileMode and determine if it should +// be included in the archive, and if it has a data body which needs writing. +func checkFileMode(m os.FileMode) (keep, body bool) { + switch { + case m.IsDir(): + return true, false + + case m.IsRegular(): + return true, true + + case m&os.ModeSymlink != 0: + return true, false + } + + return false, false +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile new file mode 100644 index 00000000..0f3ae166 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/GNUmakefile @@ -0,0 +1,65 @@ +TOOLS= golang.org/x/tools/cover +GOCOVER_TMPFILE?= $(GOCOVER_FILE).tmp +GOCOVER_FILE?= .cover.out +GOCOVERHTML?= coverage.html +FIND=`/usr/bin/which 2> /dev/null gfind find | /usr/bin/grep -v ^no | /usr/bin/head -n 1` +XARGS=`/usr/bin/which 2> /dev/null gxargs xargs | /usr/bin/grep -v ^no | /usr/bin/head -n 1` + +test:: $(GOCOVER_FILE) + @$(MAKE) -C cmd/sockaddr test + +cover:: coverage_report + +$(GOCOVER_FILE):: + @${FIND} . -type d ! -path '*cmd*' ! -path '*.git*' -print0 | ${XARGS} -0 -I % sh -ec "cd % && rm -f $(GOCOVER_TMPFILE) && go test -coverprofile=$(GOCOVER_TMPFILE)" + + @echo 'mode: set' > $(GOCOVER_FILE) + @${FIND} . -type f ! -path '*cmd*' ! -path '*.git*' -name "$(GOCOVER_TMPFILE)" -print0 | ${XARGS} -0 -n1 cat $(GOCOVER_TMPFILE) | grep -v '^mode: ' >> ${PWD}/$(GOCOVER_FILE) + +$(GOCOVERHTML): $(GOCOVER_FILE) + go tool cover -html=$(GOCOVER_FILE) -o $(GOCOVERHTML) + +coverage_report:: $(GOCOVER_FILE) + go tool cover -html=$(GOCOVER_FILE) + +audit_tools:: + @go get -u github.com/golang/lint/golint && echo "Installed golint:" + @go get -u github.com/fzipp/gocyclo && echo "Installed gocyclo:" + @go get -u github.com/remyoudompheng/go-misc/deadcode && echo "Installed deadcode:" + @go get -u github.com/client9/misspell/cmd/misspell && echo "Installed misspell:" + @go get -u github.com/gordonklaus/ineffassign && echo "Installed ineffassign:" + +audit:: + deadcode + go tool vet -all *.go + go tool vet -shadow=true *.go + golint *.go + ineffassign . + gocyclo -over 65 *.go + misspell *.go + +clean:: + rm -f $(GOCOVER_FILE) $(GOCOVERHTML) + +dev:: + @go build + @$(MAKE) -B -C cmd/sockaddr sockaddr + +install:: + @go install + @$(MAKE) -C cmd/sockaddr install + +doc:: + @echo Visit: http://127.0.0.1:6161/pkg/github.com/hashicorp/go-sockaddr/ + godoc -http=:6161 -goroot $GOROOT + +world:: + @set -e; \ + for os in solaris darwin freebsd linux windows android; do \ + for arch in amd64; do \ + printf "Building on %s-%s\n" "$${os}" "$${arch}" ; \ + env GOOS="$${os}" GOARCH="$${arch}" go build -o /dev/null; \ + done; \ + done + + $(MAKE) -C cmd/sockaddr world diff --git a/vendor/github.com/hashicorp/go-sockaddr/LICENSE b/vendor/github.com/hashicorp/go-sockaddr/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-sockaddr/README.md b/vendor/github.com/hashicorp/go-sockaddr/README.md new file mode 100644 index 00000000..a2e170ae --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/README.md @@ -0,0 +1,118 @@ +# go-sockaddr + +## `sockaddr` Library + +Socket address convenience functions for Go. `go-sockaddr` is a convenience +library that makes doing the right thing with IP addresses easy. `go-sockaddr` +is loosely modeled after the UNIX `sockaddr_t` and creates a union of the family +of `sockaddr_t` types (see below for an ascii diagram). Library documentation +is available +at +[https://godoc.org/github.com/hashicorp/go-sockaddr](https://godoc.org/github.com/hashicorp/go-sockaddr). +The primary intent of the library was to make it possible to define heuristics +for selecting the correct IP addresses when a configuration is evaluated at +runtime. See +the +[docs](https://godoc.org/github.com/hashicorp/go-sockaddr), +[`template` package](https://godoc.org/github.com/hashicorp/go-sockaddr/template), +tests, +and +[CLI utility](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr) +for details and hints as to how to use this library. + +For example, with this library it is possible to find an IP address that: + +* is attached to a default route + ([`GetDefaultInterfaces()`](https://godoc.org/github.com/hashicorp/go-sockaddr#GetDefaultInterfaces)) +* is contained within a CIDR block ([`IfByNetwork()`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByNetwork)) +* is an RFC1918 address + ([`IfByRFC("1918")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC)) +* is ordered + ([`OrderedIfAddrBy(args)`](https://godoc.org/github.com/hashicorp/go-sockaddr#OrderedIfAddrBy) where + `args` includes, but is not limited + to, + [`AscIfType`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscIfType), + [`AscNetworkSize`](https://godoc.org/github.com/hashicorp/go-sockaddr#AscNetworkSize)) +* excludes all IPv6 addresses + ([`IfByType("^(IPv4)$")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByType)) +* is larger than a `/32` + ([`IfByMaskSize(32)`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByMaskSize)) +* is not on a `down` interface + ([`ExcludeIfs("flags", "down")`](https://godoc.org/github.com/hashicorp/go-sockaddr#ExcludeIfs)) +* preferences an IPv6 address over an IPv4 address + ([`SortIfByType()`](https://godoc.org/github.com/hashicorp/go-sockaddr#SortIfByType) + + [`ReverseIfAddrs()`](https://godoc.org/github.com/hashicorp/go-sockaddr#ReverseIfAddrs)); and +* excludes any IP in RFC6890 address + ([`IfByRFC("6890")`](https://godoc.org/github.com/hashicorp/go-sockaddr#IfByRFC)) + +Or any combination or variation therein. + +There are also a few simple helper functions such as `GetPublicIP` and +`GetPrivateIP` which both return strings and select the first public or private +IP address on the default interface, respectively. Similarly, there is also a +helper function called `GetInterfaceIP` which returns the first usable IP +address on the named interface. + +## `sockaddr` CLI + +Given the possible complexity of the `sockaddr` library, there is a CLI utility +that accompanies the library, also +called +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr). +The +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr) +utility exposes nearly all of the functionality of the library and can be used +either as an administrative tool or testing tool. To install +the +[`sockaddr`](https://github.com/hashicorp/go-sockaddr/tree/master/cmd/sockaddr), +run: + +```text +$ go get -u github.com/hashicorp/go-sockaddr/cmd/sockaddr +``` + +If you're familiar with UNIX's `sockaddr` struct's, the following diagram +mapping the C `sockaddr` (top) to `go-sockaddr` structs (bottom) and +interfaces will be helpful: + +``` ++-------------------------------------------------------+ +| | +| sockaddr | +| SockAddr | +| | +| +--------------+ +----------------------------------+ | +| | sockaddr_un | | | | +| | SockAddrUnix | | sockaddr_in{,6} | | +| +--------------+ | IPAddr | | +| | | | +| | +-------------+ +--------------+ | | +| | | sockaddr_in | | sockaddr_in6 | | | +| | | IPv4Addr | | IPv6Addr | | | +| | +-------------+ +--------------+ | | +| | | | +| +----------------------------------+ | +| | ++-------------------------------------------------------+ +``` + +## Inspiration and Design + +There were many subtle inspirations that led to this design, but the most direct +inspiration for the filtering syntax was +OpenBSD's +[`pf.conf(5)`](https://www.freebsd.org/cgi/man.cgi?query=pf.conf&apropos=0&sektion=0&arch=default&format=html#PARAMETERS) firewall +syntax that lets you select the first IP address on a given named interface. +The original problem stemmed from: + +* needing to create immutable images using [Packer](https://www.packer.io) that + ran the [Consul](https://www.consul.io) process (Consul can only use one IP + address at a time); +* images that may or may not have multiple interfaces or IP addresses at + runtime; and +* we didn't want to rely on configuration management to render out the correct + IP address if the VM image was being used in an auto-scaling group. + +Instead we needed some way to codify a heuristic that would correctly select the +right IP address but the input parameters were not known when the image was +created. diff --git a/vendor/github.com/hashicorp/go-sockaddr/doc.go b/vendor/github.com/hashicorp/go-sockaddr/doc.go new file mode 100644 index 00000000..90671deb --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/doc.go @@ -0,0 +1,5 @@ +/* +Package sockaddr is a Go implementation of the UNIX socket family data types and +related helper functions. +*/ +package sockaddr diff --git a/vendor/github.com/hashicorp/go-sockaddr/go.mod b/vendor/github.com/hashicorp/go-sockaddr/go.mod new file mode 100644 index 00000000..21f8d8e8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/go.mod @@ -0,0 +1,8 @@ +module github.com/hashicorp/go-sockaddr + +require ( + github.com/hashicorp/errwrap v1.0.0 + github.com/mitchellh/cli v1.0.0 + github.com/mitchellh/go-wordwrap v1.0.0 + github.com/ryanuber/columnize v2.1.0+incompatible +) diff --git a/vendor/github.com/hashicorp/go-sockaddr/go.sum b/vendor/github.com/hashicorp/go-sockaddr/go.sum new file mode 100644 index 00000000..1b2bdd48 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/go.sum @@ -0,0 +1,24 @@ +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc h1:MeuS1UDyZyFH++6vVy44PuufTeFF0d0nfI6XB87YGSk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go new file mode 100644 index 00000000..0811b275 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go @@ -0,0 +1,254 @@ +package sockaddr + +import "strings" + +// ifAddrAttrMap is a map of the IfAddr type-specific attributes. +var ifAddrAttrMap map[AttrName]func(IfAddr) string +var ifAddrAttrs []AttrName + +func init() { + ifAddrAttrInit() +} + +// GetPrivateIP returns a string with a single IP address that is part of RFC +// 6890 and has a default route. If the system can't determine its IP address +// or find an RFC 6890 IP address, an empty string will be returned instead. +// This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetPrivateInterfaces | attr "address"}}' +/// ``` +func GetPrivateIP() (string, error) { + privateIfs, err := GetPrivateInterfaces() + if err != nil { + return "", err + } + if len(privateIfs) < 1 { + return "", nil + } + + ifAddr := privateIfs[0] + ip := *ToIPAddr(ifAddr.SockAddr) + return ip.NetIP().String(), nil +} + +// GetPrivateIPs returns a string with all IP addresses that are part of RFC +// 6890 (regardless of whether or not there is a default route, unlike +// GetPublicIP). If the system can't find any RFC 6890 IP addresses, an empty +// string will be returned instead. This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "RFC" "6890" | join "address" " "}}' +/// ``` +func GetPrivateIPs() (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } else if len(ifAddrs) < 1 { + return "", nil + } + + ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) + if len(ifAddrs) == 0 { + return "", nil + } + + OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) + + ifAddrs, _, err = IfByRFC("6890", ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + _, ifAddrs, err = IfByRFC(ForwardingBlacklistRFC, ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// GetPublicIP returns a string with a single IP address that is NOT part of RFC +// 6890 and has a default route. If the system can't determine its IP address +// or find a non RFC 6890 IP address, an empty string will be returned instead. +// This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetPublicInterfaces | attr "address"}}' +/// ``` +func GetPublicIP() (string, error) { + publicIfs, err := GetPublicInterfaces() + if err != nil { + return "", err + } else if len(publicIfs) < 1 { + return "", nil + } + + ifAddr := publicIfs[0] + ip := *ToIPAddr(ifAddr.SockAddr) + return ip.NetIP().String(), nil +} + +// GetPublicIPs returns a string with all IP addresses that are NOT part of RFC +// 6890 (regardless of whether or not there is a default route, unlike +// GetPublicIP). If the system can't find any non RFC 6890 IP addresses, an +// empty string will be returned instead. This function is the `eval` +// equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | exclude "RFC" "6890" | join "address" " "}}' +/// ``` +func GetPublicIPs() (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } else if len(ifAddrs) < 1 { + return "", nil + } + + ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) + if len(ifAddrs) == 0 { + return "", nil + } + + OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) + + _, ifAddrs, err = IfByRFC("6890", ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// GetInterfaceIP returns a string with a single IP address sorted by the size +// of the network (i.e. IP addresses with a smaller netmask, larger network +// size, are sorted first). This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | include "flag" "forwardable" | attr "address" }}' +/// ``` +func GetInterfaceIP(namedIfRE string) (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByFlag("forwardable", ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, err = SortIfBy("+type,+size", ifAddrs) + if err != nil { + return "", err + } + + if len(ifAddrs) == 0 { + return "", err + } + + ip := ToIPAddr(ifAddrs[0].SockAddr) + if ip == nil { + return "", err + } + + return IPAddrAttr(*ip, "address"), nil +} + +// GetInterfaceIPs returns a string with all IPs, sorted by the size of the +// network (i.e. IP addresses with a smaller netmask, larger network size, are +// sorted first), on a named interface. This function is the `eval` equivalent +// of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | join "address" " "}}' +/// ``` +func GetInterfaceIPs(namedIfRE string) (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, err = SortIfBy("+type,+size", ifAddrs) + if err != nil { + return "", err + } + + if len(ifAddrs) == 0 { + return "", err + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// IfAddrAttrs returns a list of attributes supported by the IfAddr type +func IfAddrAttrs() []AttrName { + return ifAddrAttrs +} + +// IfAddrAttr returns a string representation of an attribute for the given +// IfAddr. +func IfAddrAttr(ifAddr IfAddr, attrName AttrName) string { + fn, found := ifAddrAttrMap[attrName] + if !found { + return "" + } + + return fn(ifAddr) +} + +// ifAddrAttrInit is called once at init() +func ifAddrAttrInit() { + // Sorted for human readability + ifAddrAttrs = []AttrName{ + "flags", + "name", + } + + ifAddrAttrMap = map[AttrName]func(ifAddr IfAddr) string{ + "flags": func(ifAddr IfAddr) string { + return ifAddr.Interface.Flags.String() + }, + "name": func(ifAddr IfAddr) string { + return ifAddr.Interface.Name + }, + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go new file mode 100644 index 00000000..80f61bef --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go @@ -0,0 +1,1304 @@ +package sockaddr + +import ( + "encoding/binary" + "errors" + "fmt" + "math/big" + "net" + "regexp" + "sort" + "strconv" + "strings" +) + +var ( + // Centralize all regexps and regexp.Copy() where necessary. + signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`) + whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`) + ifNameRE *regexp.Regexp = regexp.MustCompile(`^(?:Ethernet|Wireless LAN) adapter ([^:]+):`) + ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`) +) + +// IfAddrs is a slice of IfAddr +type IfAddrs []IfAddr + +func (ifs IfAddrs) Len() int { return len(ifs) } + +// CmpIfFunc is the function signature that must be met to be used in the +// OrderedIfAddrBy multiIfAddrSorter +type CmpIfAddrFunc func(p1, p2 *IfAddr) int + +// multiIfAddrSorter implements the Sort interface, sorting the IfAddrs within. +type multiIfAddrSorter struct { + ifAddrs IfAddrs + cmp []CmpIfAddrFunc +} + +// Sort sorts the argument slice according to the Cmp functions passed to +// OrderedIfAddrBy. +func (ms *multiIfAddrSorter) Sort(ifAddrs IfAddrs) { + ms.ifAddrs = ifAddrs + sort.Sort(ms) +} + +// OrderedIfAddrBy sorts SockAddr by the list of sort function pointers. +func OrderedIfAddrBy(cmpFuncs ...CmpIfAddrFunc) *multiIfAddrSorter { + return &multiIfAddrSorter{ + cmp: cmpFuncs, + } +} + +// Len is part of sort.Interface. +func (ms *multiIfAddrSorter) Len() int { + return len(ms.ifAddrs) +} + +// Less is part of sort.Interface. It is implemented by looping along the Cmp() +// functions until it finds a comparison that is either less than or greater +// than. A return value of 0 defers sorting to the next function in the +// multisorter (which means the results of sorting may leave the resutls in a +// non-deterministic order). +func (ms *multiIfAddrSorter) Less(i, j int) bool { + p, q := &ms.ifAddrs[i], &ms.ifAddrs[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmp := ms.cmp[k] + x := cmp(p, q) + switch x { + case -1: + // p < q, so we have a decision. + return true + case 1: + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever the + // final comparison reports. + switch ms.cmp[k](p, q) { + case -1: + return true + case 1: + return false + default: + // Still a tie! Now what? + return false + panic("undefined sort order for remaining items in the list") + } +} + +// Swap is part of sort.Interface. +func (ms *multiIfAddrSorter) Swap(i, j int) { + ms.ifAddrs[i], ms.ifAddrs[j] = ms.ifAddrs[j], ms.ifAddrs[i] +} + +// AscIfAddress is a sorting function to sort IfAddrs by their respective +// address type. Non-equal types are deferred in the sort. +func AscIfAddress(p1Ptr, p2Ptr *IfAddr) int { + return AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfDefault is a sorting function to sort IfAddrs by whether or not they +// have a default route or not. Non-equal types are deferred in the sort. +// +// FIXME: This is a particularly expensive sorting operation because of the +// non-memoized calls to NewRouteInfo(). In an ideal world the routeInfo data +// once at the start of the sort and pass it along as a context or by wrapping +// the IfAddr type with this information (this would also solve the inability to +// return errors and the possibility of failing silently). Fortunately, +// N*log(N) where N = 3 is only ~6.2 invocations. Not ideal, but not worth +// optimizing today. The common case is this gets called once or twice. +// Patches welcome. +func AscIfDefault(p1Ptr, p2Ptr *IfAddr) int { + ri, err := NewRouteInfo() + if err != nil { + return sortDeferDecision + } + + defaultIfName, err := ri.GetDefaultInterfaceName() + if err != nil { + return sortDeferDecision + } + + switch { + case p1Ptr.Interface.Name == defaultIfName && p2Ptr.Interface.Name == defaultIfName: + return sortDeferDecision + case p1Ptr.Interface.Name == defaultIfName: + return sortReceiverBeforeArg + case p2Ptr.Interface.Name == defaultIfName: + return sortArgBeforeReceiver + default: + return sortDeferDecision + } +} + +// AscIfName is a sorting function to sort IfAddrs by their interface names. +func AscIfName(p1Ptr, p2Ptr *IfAddr) int { + return strings.Compare(p1Ptr.Name, p2Ptr.Name) +} + +// AscIfNetworkSize is a sorting function to sort IfAddrs by their respective +// network mask size. +func AscIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { + return AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfPort is a sorting function to sort IfAddrs by their respective +// port type. Non-equal types are deferred in the sort. +func AscIfPort(p1Ptr, p2Ptr *IfAddr) int { + return AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfPrivate is a sorting function to sort IfAddrs by "private" values before +// "public" values. Both IPv4 and IPv6 are compared against RFC6890 (RFC6890 +// includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and IPv6 +// includes RFC4193). +func AscIfPrivate(p1Ptr, p2Ptr *IfAddr) int { + return AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfType is a sorting function to sort IfAddrs by their respective address +// type. Non-equal types are deferred in the sort. +func AscIfType(p1Ptr, p2Ptr *IfAddr) int { + return AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfAddress is identical to AscIfAddress but reverse ordered. +func DescIfAddress(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfDefault is identical to AscIfDefault but reverse ordered. +func DescIfDefault(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscIfDefault(p1Ptr, p2Ptr) +} + +// DescIfName is identical to AscIfName but reverse ordered. +func DescIfName(p1Ptr, p2Ptr *IfAddr) int { + return -1 * strings.Compare(p1Ptr.Name, p2Ptr.Name) +} + +// DescIfNetworkSize is identical to AscIfNetworkSize but reverse ordered. +func DescIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfPort is identical to AscIfPort but reverse ordered. +func DescIfPort(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfPrivate is identical to AscIfPrivate but reverse ordered. +func DescIfPrivate(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfType is identical to AscIfType but reverse ordered. +func DescIfType(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// FilterIfByType filters IfAddrs and returns a list of the matching type +func FilterIfByType(ifAddrs IfAddrs, type_ SockAddrType) (matchedIfs, excludedIfs IfAddrs) { + excludedIfs = make(IfAddrs, 0, len(ifAddrs)) + matchedIfs = make(IfAddrs, 0, len(ifAddrs)) + + for _, ifAddr := range ifAddrs { + if ifAddr.SockAddr.Type()&type_ != 0 { + matchedIfs = append(matchedIfs, ifAddr) + } else { + excludedIfs = append(excludedIfs, ifAddr) + } + } + return matchedIfs, excludedIfs +} + +// IfAttr forwards the selector to IfAttr.Attr() for resolution. If there is +// more than one IfAddr, only the first IfAddr is used. +func IfAttr(selectorName string, ifAddr IfAddr) (string, error) { + attrName := AttrName(strings.ToLower(selectorName)) + attrVal, err := ifAddr.Attr(attrName) + return attrVal, err +} + +// IfAttrs forwards the selector to IfAttrs.Attr() for resolution. If there is +// more than one IfAddr, only the first IfAddr is used. +func IfAttrs(selectorName string, ifAddrs IfAddrs) (string, error) { + if len(ifAddrs) == 0 { + return "", nil + } + + attrName := AttrName(strings.ToLower(selectorName)) + attrVal, err := ifAddrs[0].Attr(attrName) + return attrVal, err +} + +// GetAllInterfaces iterates over all available network interfaces and finds all +// available IP addresses on each interface and converts them to +// sockaddr.IPAddrs, and returning the result as an array of IfAddr. +func GetAllInterfaces() (IfAddrs, error) { + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + + ifAddrs := make(IfAddrs, 0, len(ifs)) + for _, intf := range ifs { + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + var ipAddr IPAddr + ipAddr, err = NewIPAddr(addr.String()) + if err != nil { + return IfAddrs{}, fmt.Errorf("unable to create an IP address from %q", addr.String()) + } + + ifAddr := IfAddr{ + SockAddr: ipAddr, + Interface: intf, + } + ifAddrs = append(ifAddrs, ifAddr) + } + } + + return ifAddrs, nil +} + +// GetDefaultInterfaces returns IfAddrs of the addresses attached to the default +// route. +func GetDefaultInterfaces() (IfAddrs, error) { + ri, err := NewRouteInfo() + if err != nil { + return nil, err + } + + defaultIfName, err := ri.GetDefaultInterfaceName() + if err != nil { + return nil, err + } + + var defaultIfs, ifAddrs IfAddrs + ifAddrs, err = GetAllInterfaces() + for _, ifAddr := range ifAddrs { + if ifAddr.Name == defaultIfName { + defaultIfs = append(defaultIfs, ifAddr) + } + } + + return defaultIfs, nil +} + +// GetPrivateInterfaces returns an IfAddrs that are part of RFC 6890 and have a +// default route. If the system can't determine its IP address or find an RFC +// 6890 IP address, an empty IfAddrs will be returned instead. This function is +// the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | include "RFC" "6890" }}' +/// ``` +func GetPrivateInterfaces() (IfAddrs, error) { + privateIfs, err := GetAllInterfaces() + if err != nil { + return IfAddrs{}, err + } + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + privateIfs, _ = FilterIfByType(privateIfs, TypeIP) + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + privateIfs, _, err = IfByFlag("forwardable", privateIfs) + if err != nil { + return IfAddrs{}, err + } + + privateIfs, _, err = IfByFlag("up", privateIfs) + if err != nil { + return IfAddrs{}, err + } + + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(privateIfs) + + privateIfs, _, err = IfByRFC("6890", privateIfs) + if err != nil { + return IfAddrs{}, err + } else if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + return privateIfs, nil +} + +// GetPublicInterfaces returns an IfAddrs that are NOT part of RFC 6890 and has a +// default route. If the system can't determine its IP address or find a non +// RFC 6890 IP address, an empty IfAddrs will be returned instead. This +// function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | exclude "RFC" "6890" }}' +/// ``` +func GetPublicInterfaces() (IfAddrs, error) { + publicIfs, err := GetAllInterfaces() + if err != nil { + return IfAddrs{}, err + } + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + publicIfs, _ = FilterIfByType(publicIfs, TypeIP) + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + publicIfs, _, err = IfByFlag("forwardable", publicIfs) + if err != nil { + return IfAddrs{}, err + } + + publicIfs, _, err = IfByFlag("up", publicIfs) + if err != nil { + return IfAddrs{}, err + } + + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(publicIfs) + + _, publicIfs, err = IfByRFC("6890", publicIfs) + if err != nil { + return IfAddrs{}, err + } else if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + return publicIfs, nil +} + +// IfByAddress returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByAddress(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile address regexp %+q: %v", inputRe, err) + } + + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + for _, addr := range ifAddrs { + if re.MatchString(addr.SockAddr.String()) { + matchedAddrs = append(matchedAddrs, addr) + } else { + excludedAddrs = append(excludedAddrs, addr) + } + } + + return matchedAddrs, excludedAddrs, nil +} + +// IfByName returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByName(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile name regexp %+q: %v", inputRe, err) + } + + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + for _, addr := range ifAddrs { + if re.MatchString(addr.Name) { + matchedAddrs = append(matchedAddrs, addr) + } else { + excludedAddrs = append(excludedAddrs, addr) + } + } + + return matchedAddrs, excludedAddrs, nil +} + +// IfByPort returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByPort(inputRe string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile port regexp %+q: %v", inputRe, err) + } + + ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) + matchedIfs = make(IfAddrs, 0, len(ipIfs)) + excludedIfs = append(IfAddrs(nil), nonIfs...) + for _, addr := range ipIfs { + ipAddr := ToIPAddr(addr.SockAddr) + if ipAddr == nil { + continue + } + + port := strconv.FormatInt(int64((*ipAddr).IPPort()), 10) + if re.MatchString(port) { + matchedIfs = append(matchedIfs, addr) + } else { + excludedIfs = append(excludedIfs, addr) + } + } + + return matchedIfs, excludedIfs, nil +} + +// IfByRFC returns a list of matched and non-matched IfAddrs that contain the +// relevant RFC-specified traits. +func IfByRFC(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + inputRFC, err := strconv.ParseUint(selectorParam, 10, 64) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to parse RFC number %q: %v", selectorParam, err) + } + + matchedIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + + rfcNetMap := KnownRFCs() + rfcNets, ok := rfcNetMap[uint(inputRFC)] + if !ok { + return nil, nil, fmt.Errorf("unsupported RFC %d", inputRFC) + } + + for _, ifAddr := range ifAddrs { + var contained bool + for _, rfcNet := range rfcNets { + if rfcNet.Contains(ifAddr.SockAddr) { + matchedIfAddrs = append(matchedIfAddrs, ifAddr) + contained = true + break + } + } + if !contained { + remainingIfAddrs = append(remainingIfAddrs, ifAddr) + } + } + + return matchedIfAddrs, remainingIfAddrs, nil +} + +// IfByRFCs returns a list of matched and non-matched IfAddrs that contain the +// relevant RFC-specified traits. Multiple RFCs can be specified and separated +// by the `|` symbol. No protection is taken to ensure an IfAddr does not end +// up in both the included and excluded list. +func IfByRFCs(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + var includedIfs, excludedIfs IfAddrs + for _, rfcStr := range strings.Split(selectorParam, "|") { + includedRFCIfs, excludedRFCIfs, err := IfByRFC(rfcStr, ifAddrs) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to lookup RFC number %q: %v", rfcStr, err) + } + includedIfs = append(includedIfs, includedRFCIfs...) + excludedIfs = append(excludedIfs, excludedRFCIfs...) + } + + return includedIfs, excludedIfs, nil +} + +// IfByMaskSize returns a list of matched and non-matched IfAddrs that have the +// matching mask size. +func IfByMaskSize(selectorParam string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { + maskSize, err := strconv.ParseUint(selectorParam, 10, 64) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("invalid exclude size argument (%q): %v", selectorParam, err) + } + + ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) + matchedIfs = make(IfAddrs, 0, len(ipIfs)) + excludedIfs = append(IfAddrs(nil), nonIfs...) + for _, addr := range ipIfs { + ipAddr := ToIPAddr(addr.SockAddr) + if ipAddr == nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to filter mask sizes on non-IP type %s: %v", addr.SockAddr.Type().String(), addr.SockAddr.String()) + } + + switch { + case (*ipAddr).Type()&TypeIPv4 != 0 && maskSize > 32: + return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv4 address: %d", maskSize) + case (*ipAddr).Type()&TypeIPv6 != 0 && maskSize > 128: + return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv6 address: %d", maskSize) + } + + if (*ipAddr).Maskbits() == int(maskSize) { + matchedIfs = append(matchedIfs, addr) + } else { + excludedIfs = append(excludedIfs, addr) + } + } + + return matchedIfs, excludedIfs, nil +} + +// IfByType returns a list of matching and non-matching IfAddr that match the +// specified type. For instance: +// +// include "type" "IPv4,IPv6" +// +// will include any IfAddrs that is either an IPv4 or IPv6 address. Any +// addresses on those interfaces that don't match will be included in the +// remainder results. +func IfByType(inputTypes string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + matchingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + + ifTypes := strings.Split(strings.ToLower(inputTypes), "|") + for _, ifType := range ifTypes { + switch ifType { + case "ip", "ipv4", "ipv6", "unix": + // Valid types + default: + return nil, nil, fmt.Errorf("unsupported type %q %q", ifType, inputTypes) + } + } + + for _, ifAddr := range ifAddrs { + for _, ifType := range ifTypes { + var matched bool + switch { + case ifType == "ip" && ifAddr.SockAddr.Type()&TypeIP != 0: + matched = true + case ifType == "ipv4" && ifAddr.SockAddr.Type()&TypeIPv4 != 0: + matched = true + case ifType == "ipv6" && ifAddr.SockAddr.Type()&TypeIPv6 != 0: + matched = true + case ifType == "unix" && ifAddr.SockAddr.Type()&TypeUnix != 0: + matched = true + } + + if matched { + matchingIfAddrs = append(matchingIfAddrs, ifAddr) + } else { + remainingIfAddrs = append(remainingIfAddrs, ifAddr) + } + } + } + + return matchingIfAddrs, remainingIfAddrs, nil +} + +// IfByFlag returns a list of matching and non-matching IfAddrs that match the +// specified type. For instance: +// +// include "flag" "up,broadcast" +// +// will include any IfAddrs that have both the "up" and "broadcast" flags set. +// Any addresses on those interfaces that don't match will be omitted from the +// results. +func IfByFlag(inputFlags string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + + var wantForwardable, + wantGlobalUnicast, + wantInterfaceLocalMulticast, + wantLinkLocalMulticast, + wantLinkLocalUnicast, + wantLoopback, + wantMulticast, + wantUnspecified bool + var ifFlags net.Flags + var checkFlags, checkAttrs bool + for _, flagName := range strings.Split(strings.ToLower(inputFlags), "|") { + switch flagName { + case "broadcast": + checkFlags = true + ifFlags = ifFlags | net.FlagBroadcast + case "down": + checkFlags = true + ifFlags = (ifFlags &^ net.FlagUp) + case "forwardable": + checkAttrs = true + wantForwardable = true + case "global unicast": + checkAttrs = true + wantGlobalUnicast = true + case "interface-local multicast": + checkAttrs = true + wantInterfaceLocalMulticast = true + case "link-local multicast": + checkAttrs = true + wantLinkLocalMulticast = true + case "link-local unicast": + checkAttrs = true + wantLinkLocalUnicast = true + case "loopback": + checkAttrs = true + checkFlags = true + ifFlags = ifFlags | net.FlagLoopback + wantLoopback = true + case "multicast": + checkAttrs = true + checkFlags = true + ifFlags = ifFlags | net.FlagMulticast + wantMulticast = true + case "point-to-point": + checkFlags = true + ifFlags = ifFlags | net.FlagPointToPoint + case "unspecified": + checkAttrs = true + wantUnspecified = true + case "up": + checkFlags = true + ifFlags = ifFlags | net.FlagUp + default: + return nil, nil, fmt.Errorf("Unknown interface flag: %+q", flagName) + } + } + + for _, ifAddr := range ifAddrs { + var matched bool + if checkFlags && ifAddr.Interface.Flags&ifFlags == ifFlags { + matched = true + } + if checkAttrs { + if ip := ToIPAddr(ifAddr.SockAddr); ip != nil { + netIP := (*ip).NetIP() + switch { + case wantGlobalUnicast && netIP.IsGlobalUnicast(): + matched = true + case wantInterfaceLocalMulticast && netIP.IsInterfaceLocalMulticast(): + matched = true + case wantLinkLocalMulticast && netIP.IsLinkLocalMulticast(): + matched = true + case wantLinkLocalUnicast && netIP.IsLinkLocalUnicast(): + matched = true + case wantLoopback && netIP.IsLoopback(): + matched = true + case wantMulticast && netIP.IsMulticast(): + matched = true + case wantUnspecified && netIP.IsUnspecified(): + matched = true + case wantForwardable && !IsRFC(ForwardingBlacklist, ifAddr.SockAddr): + matched = true + } + } + } + if matched { + matchedAddrs = append(matchedAddrs, ifAddr) + } else { + excludedAddrs = append(excludedAddrs, ifAddr) + } + } + return matchedAddrs, excludedAddrs, nil +} + +// IfByNetwork returns an IfAddrs that are equal to or included within the +// network passed in by selector. +func IfByNetwork(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, IfAddrs, error) { + var includedIfs, excludedIfs IfAddrs + for _, netStr := range strings.Split(selectorParam, "|") { + netAddr, err := NewIPAddr(netStr) + if err != nil { + return nil, nil, fmt.Errorf("unable to create an IP address from %+q: %v", netStr, err) + } + + for _, ifAddr := range inputIfAddrs { + if netAddr.Contains(ifAddr.SockAddr) { + includedIfs = append(includedIfs, ifAddr) + } else { + excludedIfs = append(excludedIfs, ifAddr) + } + } + } + + return includedIfs, excludedIfs, nil +} + +// IfAddrMath will return a new IfAddr struct with a mutated value. +func IfAddrMath(operation, value string, inputIfAddr IfAddr) (IfAddr, error) { + // Regexp used to enforce the sign being a required part of the grammar for + // some values. + signRe := signRE.Copy() + + switch strings.ToLower(operation) { + case "address": + // "address" operates on the IP address and is allowed to overflow or + // underflow networks, however it will wrap along the underlying address's + // underlying type. + + if !signRe.MatchString(value) { + return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) + } + + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + // 33 == Accept any uint32 value + // TODO(seanc@): Add the ability to parse hex + i, err := strconv.ParseInt(value, 10, 33) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + ipv4Uint32 := uint32(ipv4.Address) + ipv4Uint32 += uint32(i) + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: ipv4.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + // 64 == Accept any int32 value + // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + ipv6BigIntA := new(big.Int) + ipv6BigIntA.Set(ipv6.Address) + ipv6BigIntB := big.NewInt(i) + + ipv6Addr := ipv6BigIntA.Add(ipv6BigIntA, ipv6BigIntB) + ipv6Addr.And(ipv6Addr, ipv6HostMask) + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(ipv6Addr), + Mask: ipv6.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + case "network": + // "network" operates on the network address. Positive values start at the + // network address and negative values wrap at the network address, which + // means a "-1" value on a network will be the broadcast address after + // wrapping is applied. + + if !signRe.MatchString(value) { + return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) + } + + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + // 33 == Accept any uint32 value + // TODO(seanc@): Add the ability to parse hex + i, err := strconv.ParseInt(value, 10, 33) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + ipv4Uint32 := uint32(ipv4.NetworkAddress()) + + // Wrap along network mask boundaries. EZ-mode wrapping made possible by + // use of int64 vs a uint. + var wrappedMask int64 + if i >= 0 { + wrappedMask = i + } else { + wrappedMask = 1 + i + int64(^uint32(ipv4.Mask)) + } + + ipv4Uint32 = ipv4Uint32 + (uint32(wrappedMask) &^ uint32(ipv4.Mask)) + + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: ipv4.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + // 64 == Accept any int32 value + // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + ipv6BigInt := new(big.Int) + ipv6BigInt.Set(ipv6.NetworkAddress()) + + mask := new(big.Int) + mask.Set(ipv6.Mask) + if i > 0 { + wrappedMask := new(big.Int) + wrappedMask.SetInt64(i) + + wrappedMask.AndNot(wrappedMask, mask) + ipv6BigInt.Add(ipv6BigInt, wrappedMask) + } else { + // Mask off any bits that exceed the network size. Subtract the + // wrappedMask from the last usable - 1 + wrappedMask := new(big.Int) + wrappedMask.SetInt64(-1 * i) + wrappedMask.Sub(wrappedMask, big.NewInt(1)) + + wrappedMask.AndNot(wrappedMask, mask) + + lastUsable := new(big.Int) + lastUsable.Set(ipv6.LastUsable().(IPv6Addr).Address) + + ipv6BigInt = lastUsable.Sub(lastUsable, wrappedMask) + } + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(ipv6BigInt), + Mask: ipv6.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + case "mask": + // "mask" operates on the IP address and returns the IP address on + // which the given integer mask has been applied. If the applied mask + // corresponds to a larger network than the mask of the IP address, + // the latter will be replaced by the former. + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + i, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + if i > 32 { + return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv4 addresses must be between 0 and 32", operation) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + + ipv4Mask := net.CIDRMask(int(i), 32) + ipv4MaskUint32 := binary.BigEndian.Uint32(ipv4Mask) + + maskedIpv4 := ipv4.NetIP().Mask(ipv4Mask) + maskedIpv4Uint32 := binary.BigEndian.Uint32(maskedIpv4) + + maskedIpv4MaskUint32 := uint32(ipv4.Mask) + + if ipv4MaskUint32 < maskedIpv4MaskUint32 { + maskedIpv4MaskUint32 = ipv4MaskUint32 + } + + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(maskedIpv4Uint32), + Mask: IPv4Mask(maskedIpv4MaskUint32), + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + i, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + if i > 128 { + return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv6 addresses must be between 0 and 64", operation) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + + ipv6Mask := net.CIDRMask(int(i), 128) + ipv6MaskBigInt := new(big.Int) + ipv6MaskBigInt.SetBytes(ipv6Mask) + + maskedIpv6 := ipv6.NetIP().Mask(ipv6Mask) + maskedIpv6BigInt := new(big.Int) + maskedIpv6BigInt.SetBytes(maskedIpv6) + + maskedIpv6MaskBigInt := new(big.Int) + maskedIpv6MaskBigInt.Set(ipv6.Mask) + + if ipv6MaskBigInt.Cmp(maskedIpv6MaskBigInt) == -1 { + maskedIpv6MaskBigInt = ipv6MaskBigInt + } + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(maskedIpv6BigInt), + Mask: IPv6Mask(maskedIpv6MaskBigInt), + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + default: + return IfAddr{}, fmt.Errorf("unsupported math operation: %q", operation) + } +} + +// IfAddrsMath will apply an IfAddrMath operation each IfAddr struct. Any +// failure will result in zero results. +func IfAddrsMath(operation, value string, inputIfAddrs IfAddrs) (IfAddrs, error) { + outputAddrs := make(IfAddrs, 0, len(inputIfAddrs)) + for _, ifAddr := range inputIfAddrs { + result, err := IfAddrMath(operation, value, ifAddr) + if err != nil { + return IfAddrs{}, fmt.Errorf("unable to perform an IPMath operation on %s: %v", ifAddr, err) + } + outputAddrs = append(outputAddrs, result) + } + return outputAddrs, nil +} + +// IncludeIfs returns an IfAddrs based on the passed in selector. +func IncludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + var includedIfs IfAddrs + var err error + + switch strings.ToLower(selectorName) { + case "address": + includedIfs, _, err = IfByAddress(selectorParam, inputIfAddrs) + case "flag", "flags": + includedIfs, _, err = IfByFlag(selectorParam, inputIfAddrs) + case "name": + includedIfs, _, err = IfByName(selectorParam, inputIfAddrs) + case "network": + includedIfs, _, err = IfByNetwork(selectorParam, inputIfAddrs) + case "port": + includedIfs, _, err = IfByPort(selectorParam, inputIfAddrs) + case "rfc", "rfcs": + includedIfs, _, err = IfByRFCs(selectorParam, inputIfAddrs) + case "size": + includedIfs, _, err = IfByMaskSize(selectorParam, inputIfAddrs) + case "type": + includedIfs, _, err = IfByType(selectorParam, inputIfAddrs) + default: + return IfAddrs{}, fmt.Errorf("invalid include selector %q", selectorName) + } + + if err != nil { + return IfAddrs{}, err + } + + return includedIfs, nil +} + +// ExcludeIfs returns an IfAddrs based on the passed in selector. +func ExcludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + var excludedIfs IfAddrs + var err error + + switch strings.ToLower(selectorName) { + case "address": + _, excludedIfs, err = IfByAddress(selectorParam, inputIfAddrs) + case "flag", "flags": + _, excludedIfs, err = IfByFlag(selectorParam, inputIfAddrs) + case "name": + _, excludedIfs, err = IfByName(selectorParam, inputIfAddrs) + case "network": + _, excludedIfs, err = IfByNetwork(selectorParam, inputIfAddrs) + case "port": + _, excludedIfs, err = IfByPort(selectorParam, inputIfAddrs) + case "rfc", "rfcs": + _, excludedIfs, err = IfByRFCs(selectorParam, inputIfAddrs) + case "size": + _, excludedIfs, err = IfByMaskSize(selectorParam, inputIfAddrs) + case "type": + _, excludedIfs, err = IfByType(selectorParam, inputIfAddrs) + default: + return IfAddrs{}, fmt.Errorf("invalid exclude selector %q", selectorName) + } + + if err != nil { + return IfAddrs{}, err + } + + return excludedIfs, nil +} + +// SortIfBy returns an IfAddrs sorted based on the passed in selector. Multiple +// sort clauses can be passed in as a comma delimited list without whitespace. +func SortIfBy(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + sortedIfs := append(IfAddrs(nil), inputIfAddrs...) + + clauses := strings.Split(selectorParam, ",") + sortFuncs := make([]CmpIfAddrFunc, len(clauses)) + + for i, clause := range clauses { + switch strings.TrimSpace(strings.ToLower(clause)) { + case "+address", "address": + // The "address" selector returns an array of IfAddrs + // ordered by the network address. IfAddrs that are not + // comparable will be at the end of the list and in a + // non-deterministic order. + sortFuncs[i] = AscIfAddress + case "-address": + sortFuncs[i] = DescIfAddress + case "+default", "default": + sortFuncs[i] = AscIfDefault + case "-default": + sortFuncs[i] = DescIfDefault + case "+name", "name": + // The "name" selector returns an array of IfAddrs + // ordered by the interface name. + sortFuncs[i] = AscIfName + case "-name": + sortFuncs[i] = DescIfName + case "+port", "port": + // The "port" selector returns an array of IfAddrs + // ordered by the port, if included in the IfAddr. + // IfAddrs that are not comparable will be at the end of + // the list and in a non-deterministic order. + sortFuncs[i] = AscIfPort + case "-port": + sortFuncs[i] = DescIfPort + case "+private", "private": + // The "private" selector returns an array of IfAddrs + // ordered by private addresses first. IfAddrs that are + // not comparable will be at the end of the list and in + // a non-deterministic order. + sortFuncs[i] = AscIfPrivate + case "-private": + sortFuncs[i] = DescIfPrivate + case "+size", "size": + // The "size" selector returns an array of IfAddrs + // ordered by the size of the network mask, smaller mask + // (larger number of hosts per network) to largest + // (e.g. a /24 sorts before a /32). + sortFuncs[i] = AscIfNetworkSize + case "-size": + sortFuncs[i] = DescIfNetworkSize + case "+type", "type": + // The "type" selector returns an array of IfAddrs + // ordered by the type of the IfAddr. The sort order is + // Unix, IPv4, then IPv6. + sortFuncs[i] = AscIfType + case "-type": + sortFuncs[i] = DescIfType + default: + // Return an empty list for invalid sort types. + return IfAddrs{}, fmt.Errorf("unknown sort type: %q", clause) + } + } + + OrderedIfAddrBy(sortFuncs...).Sort(sortedIfs) + + return sortedIfs, nil +} + +// UniqueIfAddrsBy creates a unique set of IfAddrs based on the matching +// selector. UniqueIfAddrsBy assumes the input has already been sorted. +func UniqueIfAddrsBy(selectorName string, inputIfAddrs IfAddrs) (IfAddrs, error) { + attrName := strings.ToLower(selectorName) + + ifs := make(IfAddrs, 0, len(inputIfAddrs)) + var lastMatch string + for _, ifAddr := range inputIfAddrs { + var out string + switch attrName { + case "address": + out = ifAddr.SockAddr.String() + case "name": + out = ifAddr.Name + default: + return nil, fmt.Errorf("unsupported unique constraint %+q", selectorName) + } + + switch { + case lastMatch == "", lastMatch != out: + lastMatch = out + ifs = append(ifs, ifAddr) + case lastMatch == out: + continue + } + } + + return ifs, nil +} + +// JoinIfAddrs joins an IfAddrs and returns a string +func JoinIfAddrs(selectorName string, joinStr string, inputIfAddrs IfAddrs) (string, error) { + outputs := make([]string, 0, len(inputIfAddrs)) + attrName := AttrName(strings.ToLower(selectorName)) + + for _, ifAddr := range inputIfAddrs { + var attrVal string + var err error + attrVal, err = ifAddr.Attr(attrName) + if err != nil { + return "", err + } + outputs = append(outputs, attrVal) + } + return strings.Join(outputs, joinStr), nil +} + +// LimitIfAddrs returns a slice of IfAddrs based on the specified limit. +func LimitIfAddrs(lim uint, in IfAddrs) (IfAddrs, error) { + // Clamp the limit to the length of the array + if int(lim) > len(in) { + lim = uint(len(in)) + } + + return in[0:lim], nil +} + +// OffsetIfAddrs returns a slice of IfAddrs based on the specified offset. +func OffsetIfAddrs(off int, in IfAddrs) (IfAddrs, error) { + var end bool + if off < 0 { + end = true + off = off * -1 + } + + if off > len(in) { + return IfAddrs{}, fmt.Errorf("unable to seek past the end of the interface array: offset (%d) exceeds the number of interfaces (%d)", off, len(in)) + } + + if end { + return in[len(in)-off:], nil + } + return in[off:], nil +} + +func (ifAddr IfAddr) String() string { + return fmt.Sprintf("%s %v", ifAddr.SockAddr, ifAddr.Interface) +} + +// parseDefaultIfNameFromRoute parses standard route(8)'s output for the *BSDs +// and Solaris. +func parseDefaultIfNameFromRoute(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + for _, line := range lines { + kvs := strings.SplitN(line, ":", 2) + if len(kvs) != 2 { + continue + } + + if strings.TrimSpace(kvs[0]) == "interface" { + ifName := strings.TrimSpace(kvs[1]) + return ifName, nil + } + } + + return "", errors.New("No default interface found") +} + +// parseDefaultIfNameFromIPCmd parses the default interface from ip(8) for +// Linux. +func parseDefaultIfNameFromIPCmd(routeOut string) (string, error) { + parsedLines := parseIfNameFromIPCmd(routeOut) + for _, parsedLine := range parsedLines { + if parsedLine[0] == "default" && + parsedLine[1] == "via" && + parsedLine[3] == "dev" { + ifName := strings.TrimSpace(parsedLine[4]) + return ifName, nil + } + } + + return "", errors.New("No default interface found") +} + +// parseDefaultIfNameFromIPCmdAndroid parses the default interface from ip(8) for +// Android. +func parseDefaultIfNameFromIPCmdAndroid(routeOut string) (string, error) { + parsedLines := parseIfNameFromIPCmd(routeOut) + if (len(parsedLines) > 0) { + ifName := strings.TrimSpace(parsedLines[0][4]) + return ifName, nil + } + + return "", errors.New("No default interface found") +} + + +// parseIfNameFromIPCmd parses interfaces from ip(8) for +// Linux. +func parseIfNameFromIPCmd(routeOut string) [][]string { + lines := strings.Split(routeOut, "\n") + re := whitespaceRE.Copy() + parsedLines := make([][]string, 0, len(lines)) + for _, line := range lines { + kvs := re.Split(line, -1) + if len(kvs) < 5 { + continue + } + parsedLines = append(parsedLines, kvs) + } + return parsedLines +} + +// parseDefaultIfNameWindows parses the default interface from `netstat -rn` and +// `ipconfig` on Windows. +func parseDefaultIfNameWindows(routeOut, ipconfigOut string) (string, error) { + defaultIPAddr, err := parseDefaultIPAddrWindowsRoute(routeOut) + if err != nil { + return "", err + } + + ifName, err := parseDefaultIfNameWindowsIPConfig(defaultIPAddr, ipconfigOut) + if err != nil { + return "", err + } + + return ifName, nil +} + +// parseDefaultIPAddrWindowsRoute parses the IP address on the default interface +// `netstat -rn`. +// +// NOTES(sean): Only IPv4 addresses are parsed at this time. If you have an +// IPv6 connected host, submit an issue on github.com/hashicorp/go-sockaddr with +// the output from `netstat -rn`, `ipconfig`, and version of Windows to see IPv6 +// support added. +func parseDefaultIPAddrWindowsRoute(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + re := whitespaceRE.Copy() + for _, line := range lines { + kvs := re.Split(strings.TrimSpace(line), -1) + if len(kvs) < 3 { + continue + } + + if kvs[0] == "0.0.0.0" && kvs[1] == "0.0.0.0" { + defaultIPAddr := strings.TrimSpace(kvs[3]) + return defaultIPAddr, nil + } + } + + return "", errors.New("No IP on default interface found") +} + +// parseDefaultIfNameWindowsIPConfig parses the output of `ipconfig` to find the +// interface name forwarding traffic to the default gateway. +func parseDefaultIfNameWindowsIPConfig(defaultIPAddr, routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + ifNameRe := ifNameRE.Copy() + ipAddrRe := ipAddrRE.Copy() + var ifName string + for _, line := range lines { + switch ifNameMatches := ifNameRe.FindStringSubmatch(line); { + case len(ifNameMatches) > 1: + ifName = ifNameMatches[1] + continue + } + + switch ipAddrMatches := ipAddrRe.FindStringSubmatch(line); { + case len(ipAddrMatches) > 1 && ipAddrMatches[1] == defaultIPAddr: + return ifName, nil + } + } + + return "", errors.New("No default interface found with matching IP") +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifattr.go b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go new file mode 100644 index 00000000..6984cb4a --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go @@ -0,0 +1,65 @@ +package sockaddr + +import ( + "fmt" + "net" +) + +// IfAddr is a union of a SockAddr and a net.Interface. +type IfAddr struct { + SockAddr + net.Interface +} + +// Attr returns the named attribute as a string +func (ifAddr IfAddr) Attr(attrName AttrName) (string, error) { + val := IfAddrAttr(ifAddr, attrName) + if val != "" { + return val, nil + } + + return Attr(ifAddr.SockAddr, attrName) +} + +// Attr returns the named attribute as a string +func Attr(sa SockAddr, attrName AttrName) (string, error) { + switch sockType := sa.Type(); { + case sockType&TypeIP != 0: + ip := *ToIPAddr(sa) + attrVal := IPAddrAttr(ip, attrName) + if attrVal != "" { + return attrVal, nil + } + + if sockType == TypeIPv4 { + ipv4 := *ToIPv4Addr(sa) + attrVal := IPv4AddrAttr(ipv4, attrName) + if attrVal != "" { + return attrVal, nil + } + } else if sockType == TypeIPv6 { + ipv6 := *ToIPv6Addr(sa) + attrVal := IPv6AddrAttr(ipv6, attrName) + if attrVal != "" { + return attrVal, nil + } + } + + case sockType == TypeUnix: + us := *ToUnixSock(sa) + attrVal := UnixSockAttr(us, attrName) + if attrVal != "" { + return attrVal, nil + } + } + + // Non type-specific attributes + switch attrName { + case "string": + return sa.String(), nil + case "type": + return sa.Type().String(), nil + } + + return "", fmt.Errorf("unsupported attribute name %q", attrName) +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go new file mode 100644 index 00000000..b47d15c2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go @@ -0,0 +1,169 @@ +package sockaddr + +import ( + "fmt" + "math/big" + "net" + "strings" +) + +// Constants for the sizes of IPv3, IPv4, and IPv6 address types. +const ( + IPv3len = 6 + IPv4len = 4 + IPv6len = 16 +) + +// IPAddr is a generic IP address interface for IPv4 and IPv6 addresses, +// networks, and socket endpoints. +type IPAddr interface { + SockAddr + AddressBinString() string + AddressHexString() string + Cmp(SockAddr) int + CmpAddress(SockAddr) int + CmpPort(SockAddr) int + FirstUsable() IPAddr + Host() IPAddr + IPPort() IPPort + LastUsable() IPAddr + Maskbits() int + NetIP() *net.IP + NetIPMask() *net.IPMask + NetIPNet() *net.IPNet + Network() IPAddr + Octets() []int +} + +// IPPort is the type for an IP port number for the TCP and UDP IP transports. +type IPPort uint16 + +// IPPrefixLen is a typed integer representing the prefix length for a given +// IPAddr. +type IPPrefixLen byte + +// ipAddrAttrMap is a map of the IPAddr type-specific attributes. +var ipAddrAttrMap map[AttrName]func(IPAddr) string +var ipAddrAttrs []AttrName + +func init() { + ipAddrInit() +} + +// NewIPAddr creates a new IPAddr from a string. Returns nil if the string is +// not an IPv4 or an IPv6 address. +func NewIPAddr(addr string) (IPAddr, error) { + ipv4Addr, err := NewIPv4Addr(addr) + if err == nil { + return ipv4Addr, nil + } + + ipv6Addr, err := NewIPv6Addr(addr) + if err == nil { + return ipv6Addr, nil + } + + return nil, fmt.Errorf("invalid IPAddr %v", addr) +} + +// IPAddrAttr returns a string representation of an attribute for the given +// IPAddr. +func IPAddrAttr(ip IPAddr, selector AttrName) string { + fn, found := ipAddrAttrMap[selector] + if !found { + return "" + } + + return fn(ip) +} + +// IPAttrs returns a list of attributes supported by the IPAddr type +func IPAttrs() []AttrName { + return ipAddrAttrs +} + +// MustIPAddr is a helper method that must return an IPAddr or panic on invalid +// input. +func MustIPAddr(addr string) IPAddr { + ip, err := NewIPAddr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPAddr from %+q: %v", addr, err)) + } + return ip +} + +// ipAddrInit is called once at init() +func ipAddrInit() { + // Sorted for human readability + ipAddrAttrs = []AttrName{ + "host", + "address", + "port", + "netmask", + "network", + "mask_bits", + "binary", + "hex", + "first_usable", + "last_usable", + "octets", + } + + ipAddrAttrMap = map[AttrName]func(ip IPAddr) string{ + "address": func(ip IPAddr) string { + return ip.NetIP().String() + }, + "binary": func(ip IPAddr) string { + return ip.AddressBinString() + }, + "first_usable": func(ip IPAddr) string { + return ip.FirstUsable().String() + }, + "hex": func(ip IPAddr) string { + return ip.AddressHexString() + }, + "host": func(ip IPAddr) string { + return ip.Host().String() + }, + "last_usable": func(ip IPAddr) string { + return ip.LastUsable().String() + }, + "mask_bits": func(ip IPAddr) string { + return fmt.Sprintf("%d", ip.Maskbits()) + }, + "netmask": func(ip IPAddr) string { + switch v := ip.(type) { + case IPv4Addr: + ipv4Mask := IPv4Addr{ + Address: IPv4Address(v.Mask), + Mask: IPv4HostMask, + } + return ipv4Mask.String() + case IPv6Addr: + ipv6Mask := new(big.Int) + ipv6Mask.Set(v.Mask) + ipv6MaskAddr := IPv6Addr{ + Address: IPv6Address(ipv6Mask), + Mask: ipv6HostMask, + } + return ipv6MaskAddr.String() + default: + return fmt.Sprintf("", ip) + } + }, + "network": func(ip IPAddr) string { + return ip.Network().NetIP().String() + }, + "octets": func(ip IPAddr) string { + octets := ip.Octets() + octetStrs := make([]string, 0, len(octets)) + for _, octet := range octets { + octetStrs = append(octetStrs, fmt.Sprintf("%d", octet)) + } + return strings.Join(octetStrs, " ") + }, + "port": func(ip IPAddr) string { + return fmt.Sprintf("%d", ip.IPPort()) + }, + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go new file mode 100644 index 00000000..6eeb7ddd --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go @@ -0,0 +1,98 @@ +package sockaddr + +import "bytes" + +type IPAddrs []IPAddr + +func (s IPAddrs) Len() int { return len(s) } +func (s IPAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// // SortIPAddrsByCmp is a type that satisfies sort.Interface and can be used +// // by the routines in this package. The SortIPAddrsByCmp type is used to +// // sort IPAddrs by Cmp() +// type SortIPAddrsByCmp struct{ IPAddrs } + +// // Less reports whether the element with index i should sort before the +// // element with index j. +// func (s SortIPAddrsByCmp) Less(i, j int) bool { +// // Sort by Type, then address, then port number. +// return Less(s.IPAddrs[i], s.IPAddrs[j]) +// } + +// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and +// can be used by the routines in this package. The +// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest +// network (most specific to largest network). +type SortIPAddrsByNetworkSize struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsByNetworkSize) Less(i, j int) bool { + // Sort masks with a larger binary value (i.e. fewer hosts per network + // prefix) after masks with a smaller value (larger number of hosts per + // prefix). + switch bytes.Compare([]byte(*s.IPAddrs[i].NetIPMask()), []byte(*s.IPAddrs[j].NetIPMask())) { + case 0: + // Fall through to the second test if the net.IPMasks are the + // same. + break + case 1: + return true + case -1: + return false + default: + panic("bad, m'kay?") + } + + // Sort IPs based on the length (i.e. prefer IPv4 over IPv6). + iLen := len(*s.IPAddrs[i].NetIP()) + jLen := len(*s.IPAddrs[j].NetIP()) + if iLen != jLen { + return iLen > jLen + } + + // Sort IPs based on their network address from lowest to highest. + switch bytes.Compare(s.IPAddrs[i].NetIPNet().IP, s.IPAddrs[j].NetIPNet().IP) { + case 0: + break + case 1: + return false + case -1: + return true + default: + panic("lol wut?") + } + + // If a host does not have a port set, it always sorts after hosts + // that have a port (e.g. a host with a /32 and port number is more + // specific and should sort first over a host with a /32 but no port + // set). + if s.IPAddrs[i].IPPort() == 0 || s.IPAddrs[j].IPPort() == 0 { + return false + } + return s.IPAddrs[i].IPPort() < s.IPAddrs[j].IPPort() +} + +// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and +// can be used by the routines in this package. The +// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest +// network (most specific to largest network). +type SortIPAddrsBySpecificMaskLen struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsBySpecificMaskLen) Less(i, j int) bool { + return s.IPAddrs[i].Maskbits() > s.IPAddrs[j].Maskbits() +} + +// SortIPAddrsByBroadMaskLen is a type that satisfies sort.Interface and can +// be used by the routines in this package. The SortIPAddrsByBroadMaskLen +// type is used to sort IPAddrs by largest network (i.e. largest subnets +// first). +type SortIPAddrsByBroadMaskLen struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsByBroadMaskLen) Less(i, j int) bool { + return s.IPAddrs[i].Maskbits() < s.IPAddrs[j].Maskbits() +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go new file mode 100644 index 00000000..4d395dc9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go @@ -0,0 +1,516 @@ +package sockaddr + +import ( + "encoding/binary" + "fmt" + "net" + "regexp" + "strconv" + "strings" +) + +type ( + // IPv4Address is a named type representing an IPv4 address. + IPv4Address uint32 + + // IPv4Network is a named type representing an IPv4 network. + IPv4Network uint32 + + // IPv4Mask is a named type representing an IPv4 network mask. + IPv4Mask uint32 +) + +// IPv4HostMask is a constant represents a /32 IPv4 Address +// (i.e. 255.255.255.255). +const IPv4HostMask = IPv4Mask(0xffffffff) + +// ipv4AddrAttrMap is a map of the IPv4Addr type-specific attributes. +var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string +var ipv4AddrAttrs []AttrName +var trailingHexNetmaskRE *regexp.Regexp + +// IPv4Addr implements a convenience wrapper around the union of Go's +// built-in net.IP and net.IPNet types. In UNIX-speak, IPv4Addr implements +// `sockaddr` when the the address family is set to AF_INET +// (i.e. `sockaddr_in`). +type IPv4Addr struct { + IPAddr + Address IPv4Address + Mask IPv4Mask + Port IPPort +} + +func init() { + ipv4AddrInit() + trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`) +} + +// NewIPv4Addr creates an IPv4Addr from a string. String can be in the form +// of either an IPv4:port (e.g. `1.2.3.4:80`, in which case the mask is +// assumed to be a `/32`), an IPv4 address (e.g. `1.2.3.4`, also with a `/32` +// mask), or an IPv4 CIDR (e.g. `1.2.3.4/24`, which has its IP port +// initialized to zero). ipv4Str can not be a hostname. +// +// NOTE: Many net.*() routines will initialize and return an IPv6 address. +// To create uint32 values from net.IP, always test to make sure the address +// returned can be converted to a 4 byte array using To4(). +func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) { + // Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In + // particular, clients with the Barracuda VPN client will see something like: + // `192.168.3.51/00ffffff` as their IP address. + trailingHexNetmaskRe := trailingHexNetmaskRE.Copy() + if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil { + ipv4Str = ipv4Str[:match[0]] + } + + // Parse as an IPv4 CIDR + ipAddr, network, err := net.ParseCIDR(ipv4Str) + if err == nil { + ipv4 := ipAddr.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str) + } + + // If we see an IPv6 netmask, convert it to an IPv4 mask. + netmaskSepPos := strings.LastIndexByte(ipv4Str, '/') + if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) { + netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8) + if err != nil { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err) + } else if netMask > 128 { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str) + } + + if netMask >= 96 { + // Convert the IPv6 netmask to an IPv4 netmask + network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8) + } + } + ipv4Addr := IPv4Addr{ + Address: IPv4Address(binary.BigEndian.Uint32(ipv4)), + Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)), + } + return ipv4Addr, nil + } + + // Attempt to parse ipv4Str as a /32 host with a port number. + tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str) + if err == nil { + ipv4 := tcpAddr.IP.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str) + } + + ipv4Uint32 := binary.BigEndian.Uint32(ipv4) + ipv4Addr := IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: IPv4HostMask, + Port: IPPort(tcpAddr.Port), + } + + return ipv4Addr, nil + } + + // Parse as a naked IPv4 address + ip := net.ParseIP(ipv4Str) + if ip != nil { + ipv4 := ip.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str) + } + + ipv4Uint32 := binary.BigEndian.Uint32(ipv4) + ipv4Addr := IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: IPv4HostMask, + } + return ipv4Addr, nil + } + + return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err) +} + +// AddressBinString returns a string with the IPv4Addr's Address represented +// as a sequence of '0' and '1' characters. This method is useful for +// debugging or by operators who want to inspect an address. +func (ipv4 IPv4Addr) AddressBinString() string { + return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2)) +} + +// AddressHexString returns a string with the IPv4Addr address represented as +// a sequence of hex characters. This method is useful for debugging or by +// operators who want to inspect an address. +func (ipv4 IPv4Addr) AddressHexString() string { + return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16)) +} + +// Broadcast is an IPv4Addr-only method that returns the broadcast address of +// the network. +// +// NOTE: IPv6 only supports multicast, so this method only exists for +// IPv4Addr. +func (ipv4 IPv4Addr) Broadcast() IPAddr { + // Nothing should listen on a broadcast address. + return IPv4Addr{ + Address: IPv4Address(ipv4.BroadcastAddress()), + Mask: IPv4HostMask, + } +} + +// BroadcastAddress returns a IPv4Network of the IPv4Addr's broadcast +// address. +func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network { + return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask)) +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its address is lower than arg +// - 0 if the SockAddr arg is equal to the receiving IPv4Addr or the argument is +// of a different type. +// - 1 If the argument should sort first. +func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return sortDeferDecision + } + + switch { + case ipv4.Address == ipv4b.Address: + return sortDeferDecision + case ipv4.Address < ipv4b.Address: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpPort follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its port is lower than arg +// - 0 if the SockAddr arg's port number is equal to the receiving IPv4Addr, +// regardless of type. +// - 1 If the argument should sort first. +func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int { + var saPort IPPort + switch v := sa.(type) { + case IPv4Addr: + saPort = v.Port + case IPv6Addr: + saPort = v.Port + default: + return sortDeferDecision + } + + switch { + case ipv4.Port == saPort: + return sortDeferDecision + case ipv4.Port < saPort: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpRFC follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because it belongs to the RFC and its +// arg does not +// - 0 if the receiver and arg both belong to the same RFC or neither do. +// - 1 If the arg belongs to the RFC but receiver does not. +func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int { + recvInRFC := IsRFC(rfcNum, ipv4) + ipv4b, ok := sa.(IPv4Addr) + if !ok { + // If the receiver is part of the desired RFC and the SockAddr + // argument is not, return -1 so that the receiver sorts before + // the non-IPv4 SockAddr. Conversely, if the receiver is not + // part of the RFC, punt on sorting and leave it for the next + // sorter. + if recvInRFC { + return sortReceiverBeforeArg + } else { + return sortDeferDecision + } + } + + argInRFC := IsRFC(rfcNum, ipv4b) + switch { + case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): + // If a and b both belong to the RFC, or neither belong to + // rfcNum, defer sorting to the next sorter. + return sortDeferDecision + case recvInRFC && !argInRFC: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// Contains returns true if the SockAddr is contained within the receiver. +func (ipv4 IPv4Addr) Contains(sa SockAddr) bool { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return false + } + + return ipv4.ContainsNetwork(ipv4b) +} + +// ContainsAddress returns true if the IPv4Address is contained within the +// receiver. +func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool { + return IPv4Address(ipv4.NetworkAddress()) <= x && + IPv4Address(ipv4.BroadcastAddress()) >= x +} + +// ContainsNetwork returns true if the network from IPv4Addr is contained +// within the receiver. +func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool { + return ipv4.NetworkAddress() <= x.NetworkAddress() && + ipv4.BroadcastAddress() >= x.BroadcastAddress() +} + +// DialPacketArgs returns the arguments required to be passed to +// net.DialUDP(). If the Mask of ipv4 is not a /32 or the Port is 0, +// DialPacketArgs() will fail. See Host() to create an IPv4Addr with its +// mask set to /32. +func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) { + if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { + return "udp4", "" + } + return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// DialStreamArgs returns the arguments required to be passed to +// net.DialTCP(). If the Mask of ipv4 is not a /32 or the Port is 0, +// DialStreamArgs() will fail. See Host() to create an IPv4Addr with its +// mask set to /32. +func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) { + if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { + return "tcp4", "" + } + return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. +func (ipv4 IPv4Addr) Equal(sa SockAddr) bool { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return false + } + + if ipv4.Port != ipv4b.Port { + return false + } + + if ipv4.Address != ipv4b.Address { + return false + } + + if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() { + return false + } + + return true +} + +// FirstUsable returns an IPv4Addr set to the first address following the +// network prefix. The first usable address in a network is normally the +// gateway and should not be used except by devices forwarding packets +// between two administratively distinct networks (i.e. a router). This +// function does not discriminate against first usable vs "first address that +// should be used." For example, FirstUsable() on "192.168.1.10/24" would +// return the address "192.168.1.1/24". +func (ipv4 IPv4Addr) FirstUsable() IPAddr { + addr := ipv4.NetworkAddress() + + // If /32, return the address itself. If /31 assume a point-to-point + // link and return the lower address. + if ipv4.Maskbits() < 31 { + addr++ + } + + return IPv4Addr{ + Address: IPv4Address(addr), + Mask: IPv4HostMask, + } +} + +// Host returns a copy of ipv4 with its mask set to /32 so that it can be +// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or +// ListenStreamArgs(). +func (ipv4 IPv4Addr) Host() IPAddr { + // Nothing should listen on a broadcast address. + return IPv4Addr{ + Address: ipv4.Address, + Mask: IPv4HostMask, + Port: ipv4.Port, + } +} + +// IPPort returns the Port number attached to the IPv4Addr +func (ipv4 IPv4Addr) IPPort() IPPort { + return ipv4.Port +} + +// LastUsable returns the last address before the broadcast address in a +// given network. +func (ipv4 IPv4Addr) LastUsable() IPAddr { + addr := ipv4.BroadcastAddress() + + // If /32, return the address itself. If /31 assume a point-to-point + // link and return the upper address. + if ipv4.Maskbits() < 31 { + addr-- + } + + return IPv4Addr{ + Address: IPv4Address(addr), + Mask: IPv4HostMask, + } +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUDP(). If the Mask of ipv4 is not a /32, ListenPacketArgs() +// will fail. See Host() to create an IPv4Addr with its mask set to /32. +func (ipv4 IPv4Addr) ListenPacketArgs() (network, listenArgs string) { + if ipv4.Mask != IPv4HostMask { + return "udp4", "" + } + return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenTCP(). If the Mask of ipv4 is not a /32, ListenStreamArgs() +// will fail. See Host() to create an IPv4Addr with its mask set to /32. +func (ipv4 IPv4Addr) ListenStreamArgs() (network, listenArgs string) { + if ipv4.Mask != IPv4HostMask { + return "tcp4", "" + } + return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// Maskbits returns the number of network mask bits in a given IPv4Addr. For +// example, the Maskbits() of "192.168.1.1/24" would return 24. +func (ipv4 IPv4Addr) Maskbits() int { + mask := make(net.IPMask, IPv4len) + binary.BigEndian.PutUint32(mask, uint32(ipv4.Mask)) + maskOnes, _ := mask.Size() + return maskOnes +} + +// MustIPv4Addr is a helper method that must return an IPv4Addr or panic on +// invalid input. +func MustIPv4Addr(addr string) IPv4Addr { + ipv4, err := NewIPv4Addr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPv4Addr from %+q: %v", addr, err)) + } + return ipv4 +} + +// NetIP returns the address as a net.IP (address is always presized to +// IPv4). +func (ipv4 IPv4Addr) NetIP() *net.IP { + x := make(net.IP, IPv4len) + binary.BigEndian.PutUint32(x, uint32(ipv4.Address)) + return &x +} + +// NetIPMask create a new net.IPMask from the IPv4Addr. +func (ipv4 IPv4Addr) NetIPMask() *net.IPMask { + ipv4Mask := net.IPMask{} + ipv4Mask = make(net.IPMask, IPv4len) + binary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask)) + return &ipv4Mask +} + +// NetIPNet create a new net.IPNet from the IPv4Addr. +func (ipv4 IPv4Addr) NetIPNet() *net.IPNet { + ipv4net := &net.IPNet{} + ipv4net.IP = make(net.IP, IPv4len) + binary.BigEndian.PutUint32(ipv4net.IP, uint32(ipv4.NetworkAddress())) + ipv4net.Mask = *ipv4.NetIPMask() + return ipv4net +} + +// Network returns the network prefix or network address for a given network. +func (ipv4 IPv4Addr) Network() IPAddr { + return IPv4Addr{ + Address: IPv4Address(ipv4.NetworkAddress()), + Mask: ipv4.Mask, + } +} + +// NetworkAddress returns an IPv4Network of the IPv4Addr's network address. +func (ipv4 IPv4Addr) NetworkAddress() IPv4Network { + return IPv4Network(uint32(ipv4.Address) & uint32(ipv4.Mask)) +} + +// Octets returns a slice of the four octets in an IPv4Addr's Address. The +// order of the bytes is big endian. +func (ipv4 IPv4Addr) Octets() []int { + return []int{ + int(ipv4.Address >> 24), + int((ipv4.Address >> 16) & 0xff), + int((ipv4.Address >> 8) & 0xff), + int(ipv4.Address & 0xff), + } +} + +// String returns a string representation of the IPv4Addr +func (ipv4 IPv4Addr) String() string { + if ipv4.Port != 0 { + return fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) + } + + if ipv4.Maskbits() == 32 { + return ipv4.NetIP().String() + } + + return fmt.Sprintf("%s/%d", ipv4.NetIP().String(), ipv4.Maskbits()) +} + +// Type is used as a type switch and returns TypeIPv4 +func (IPv4Addr) Type() SockAddrType { + return TypeIPv4 +} + +// IPv4AddrAttr returns a string representation of an attribute for the given +// IPv4Addr. +func IPv4AddrAttr(ipv4 IPv4Addr, selector AttrName) string { + fn, found := ipv4AddrAttrMap[selector] + if !found { + return "" + } + + return fn(ipv4) +} + +// IPv4Attrs returns a list of attributes supported by the IPv4Addr type +func IPv4Attrs() []AttrName { + return ipv4AddrAttrs +} + +// ipv4AddrInit is called once at init() +func ipv4AddrInit() { + // Sorted for human readability + ipv4AddrAttrs = []AttrName{ + "size", // Same position as in IPv6 for output consistency + "broadcast", + "uint32", + } + + ipv4AddrAttrMap = map[AttrName]func(ipv4 IPv4Addr) string{ + "broadcast": func(ipv4 IPv4Addr) string { + return ipv4.Broadcast().String() + }, + "size": func(ipv4 IPv4Addr) string { + return fmt.Sprintf("%d", 1< 2 && ipv6Str[0] == '[' && ipv6Str[len(ipv6Str)-1] == ']' { + ipv6Str = ipv6Str[1 : len(ipv6Str)-1] + } + ip := net.ParseIP(ipv6Str) + if ip != nil { + ipv6 := ip.To16() + if ipv6 == nil { + return IPv6Addr{}, fmt.Errorf("Unable to string convert %+q to a 16byte IPv6 address", ipv6Str) + } + + ipv6BigIntAddr := new(big.Int) + ipv6BigIntAddr.SetBytes(ipv6) + + ipv6BigIntMask := new(big.Int) + ipv6BigIntMask.Set(ipv6HostMask) + + return IPv6Addr{ + Address: IPv6Address(ipv6BigIntAddr), + Mask: IPv6Mask(ipv6BigIntMask), + }, nil + } + + // Parse as an IPv6 CIDR + ipAddr, network, err := net.ParseCIDR(ipv6Str) + if err == nil { + ipv6 := ipAddr.To16() + if ipv6 == nil { + return IPv6Addr{}, fmt.Errorf("Unable to convert %+q to a 16byte IPv6 address", ipv6Str) + } + + ipv6BigIntAddr := new(big.Int) + ipv6BigIntAddr.SetBytes(ipv6) + + ipv6BigIntMask := new(big.Int) + ipv6BigIntMask.SetBytes(network.Mask) + + ipv6Addr := IPv6Addr{ + Address: IPv6Address(ipv6BigIntAddr), + Mask: IPv6Mask(ipv6BigIntMask), + } + return ipv6Addr, nil + } + + return IPv6Addr{}, fmt.Errorf("Unable to parse %+q to an IPv6 address: %v", ipv6Str, err) +} + +// AddressBinString returns a string with the IPv6Addr's Address represented +// as a sequence of '0' and '1' characters. This method is useful for +// debugging or by operators who want to inspect an address. +func (ipv6 IPv6Addr) AddressBinString() string { + bi := big.Int(*ipv6.Address) + return fmt.Sprintf("%0128s", bi.Text(2)) +} + +// AddressHexString returns a string with the IPv6Addr address represented as +// a sequence of hex characters. This method is useful for debugging or by +// operators who want to inspect an address. +func (ipv6 IPv6Addr) AddressHexString() string { + bi := big.Int(*ipv6.Address) + return fmt.Sprintf("%032s", bi.Text(16)) +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its address is lower than arg +// - 0 if the SockAddr arg equal to the receiving IPv6Addr or the argument is of a +// different type. +// - 1 If the argument should sort first. +func (ipv6 IPv6Addr) CmpAddress(sa SockAddr) int { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return sortDeferDecision + } + + ipv6aBigInt := new(big.Int) + ipv6aBigInt.Set(ipv6.Address) + ipv6bBigInt := new(big.Int) + ipv6bBigInt.Set(ipv6b.Address) + + return ipv6aBigInt.Cmp(ipv6bBigInt) +} + +// CmpPort follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its port is lower than arg +// - 0 if the SockAddr arg's port number is equal to the receiving IPv6Addr, +// regardless of type. +// - 1 If the argument should sort first. +func (ipv6 IPv6Addr) CmpPort(sa SockAddr) int { + var saPort IPPort + switch v := sa.(type) { + case IPv4Addr: + saPort = v.Port + case IPv6Addr: + saPort = v.Port + default: + return sortDeferDecision + } + + switch { + case ipv6.Port == saPort: + return sortDeferDecision + case ipv6.Port < saPort: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpRFC follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because it belongs to the RFC and its +// arg does not +// - 0 if the receiver and arg both belong to the same RFC or neither do. +// - 1 If the arg belongs to the RFC but receiver does not. +func (ipv6 IPv6Addr) CmpRFC(rfcNum uint, sa SockAddr) int { + recvInRFC := IsRFC(rfcNum, ipv6) + ipv6b, ok := sa.(IPv6Addr) + if !ok { + // If the receiver is part of the desired RFC and the SockAddr + // argument is not, sort receiver before the non-IPv6 SockAddr. + // Conversely, if the receiver is not part of the RFC, punt on + // sorting and leave it for the next sorter. + if recvInRFC { + return sortReceiverBeforeArg + } else { + return sortDeferDecision + } + } + + argInRFC := IsRFC(rfcNum, ipv6b) + switch { + case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): + // If a and b both belong to the RFC, or neither belong to + // rfcNum, defer sorting to the next sorter. + return sortDeferDecision + case recvInRFC && !argInRFC: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// Contains returns true if the SockAddr is contained within the receiver. +func (ipv6 IPv6Addr) Contains(sa SockAddr) bool { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return false + } + + return ipv6.ContainsNetwork(ipv6b) +} + +// ContainsAddress returns true if the IPv6Address is contained within the +// receiver. +func (ipv6 IPv6Addr) ContainsAddress(x IPv6Address) bool { + xAddr := IPv6Addr{ + Address: x, + Mask: ipv6HostMask, + } + + { + xIPv6 := xAddr.FirstUsable().(IPv6Addr) + yIPv6 := ipv6.FirstUsable().(IPv6Addr) + if xIPv6.CmpAddress(yIPv6) >= 1 { + return false + } + } + + { + xIPv6 := xAddr.LastUsable().(IPv6Addr) + yIPv6 := ipv6.LastUsable().(IPv6Addr) + if xIPv6.CmpAddress(yIPv6) <= -1 { + return false + } + } + return true +} + +// ContainsNetwork returns true if the network from IPv6Addr is contained within +// the receiver. +func (x IPv6Addr) ContainsNetwork(y IPv6Addr) bool { + { + xIPv6 := x.FirstUsable().(IPv6Addr) + yIPv6 := y.FirstUsable().(IPv6Addr) + if ret := xIPv6.CmpAddress(yIPv6); ret >= 1 { + return false + } + } + + { + xIPv6 := x.LastUsable().(IPv6Addr) + yIPv6 := y.LastUsable().(IPv6Addr) + if ret := xIPv6.CmpAddress(yIPv6); ret <= -1 { + return false + } + } + return true +} + +// DialPacketArgs returns the arguments required to be passed to +// net.DialUDP(). If the Mask of ipv6 is not a /128 or the Port is 0, +// DialPacketArgs() will fail. See Host() to create an IPv6Addr with its +// mask set to /128. +func (ipv6 IPv6Addr) DialPacketArgs() (network, dialArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { + return "udp6", "" + } + return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// DialStreamArgs returns the arguments required to be passed to +// net.DialTCP(). If the Mask of ipv6 is not a /128 or the Port is 0, +// DialStreamArgs() will fail. See Host() to create an IPv6Addr with its +// mask set to /128. +func (ipv6 IPv6Addr) DialStreamArgs() (network, dialArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { + return "tcp6", "" + } + return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. +func (ipv6a IPv6Addr) Equal(sa SockAddr) bool { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return false + } + + if ipv6a.NetIP().String() != ipv6b.NetIP().String() { + return false + } + + if ipv6a.NetIPNet().String() != ipv6b.NetIPNet().String() { + return false + } + + if ipv6a.Port != ipv6b.Port { + return false + } + + return true +} + +// FirstUsable returns an IPv6Addr set to the first address following the +// network prefix. The first usable address in a network is normally the +// gateway and should not be used except by devices forwarding packets +// between two administratively distinct networks (i.e. a router). This +// function does not discriminate against first usable vs "first address that +// should be used." For example, FirstUsable() on "2001:0db8::0003/64" would +// return "2001:0db8::00011". +func (ipv6 IPv6Addr) FirstUsable() IPAddr { + return IPv6Addr{ + Address: IPv6Address(ipv6.NetworkAddress()), + Mask: ipv6HostMask, + } +} + +// Host returns a copy of ipv6 with its mask set to /128 so that it can be +// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or +// ListenStreamArgs(). +func (ipv6 IPv6Addr) Host() IPAddr { + // Nothing should listen on a broadcast address. + return IPv6Addr{ + Address: ipv6.Address, + Mask: ipv6HostMask, + Port: ipv6.Port, + } +} + +// IPPort returns the Port number attached to the IPv6Addr +func (ipv6 IPv6Addr) IPPort() IPPort { + return ipv6.Port +} + +// LastUsable returns the last address in a given network. +func (ipv6 IPv6Addr) LastUsable() IPAddr { + addr := new(big.Int) + addr.Set(ipv6.Address) + + mask := new(big.Int) + mask.Set(ipv6.Mask) + + negMask := new(big.Int) + negMask.Xor(ipv6HostMask, mask) + + lastAddr := new(big.Int) + lastAddr.And(addr, mask) + lastAddr.Or(lastAddr, negMask) + + return IPv6Addr{ + Address: IPv6Address(lastAddr), + Mask: ipv6HostMask, + } +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUDP(). If the Mask of ipv6 is not a /128, ListenPacketArgs() +// will fail. See Host() to create an IPv6Addr with its mask set to /128. +func (ipv6 IPv6Addr) ListenPacketArgs() (network, listenArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 { + return "udp6", "" + } + return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenTCP(). If the Mask of ipv6 is not a /128, ListenStreamArgs() +// will fail. See Host() to create an IPv6Addr with its mask set to /128. +func (ipv6 IPv6Addr) ListenStreamArgs() (network, listenArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 { + return "tcp6", "" + } + return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// Maskbits returns the number of network mask bits in a given IPv6Addr. For +// example, the Maskbits() of "2001:0db8::0003/64" would return 64. +func (ipv6 IPv6Addr) Maskbits() int { + maskOnes, _ := ipv6.NetIPNet().Mask.Size() + + return maskOnes +} + +// MustIPv6Addr is a helper method that must return an IPv6Addr or panic on +// invalid input. +func MustIPv6Addr(addr string) IPv6Addr { + ipv6, err := NewIPv6Addr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPv6Addr from %+q: %v", addr, err)) + } + return ipv6 +} + +// NetIP returns the address as a net.IP. +func (ipv6 IPv6Addr) NetIP() *net.IP { + return bigIntToNetIPv6(ipv6.Address) +} + +// NetIPMask create a new net.IPMask from the IPv6Addr. +func (ipv6 IPv6Addr) NetIPMask() *net.IPMask { + ipv6Mask := make(net.IPMask, IPv6len) + m := big.Int(*ipv6.Mask) + copy(ipv6Mask, m.Bytes()) + return &ipv6Mask +} + +// Network returns a pointer to the net.IPNet within IPv4Addr receiver. +func (ipv6 IPv6Addr) NetIPNet() *net.IPNet { + ipv6net := &net.IPNet{} + ipv6net.IP = make(net.IP, IPv6len) + copy(ipv6net.IP, *ipv6.NetIP()) + ipv6net.Mask = *ipv6.NetIPMask() + return ipv6net +} + +// Network returns the network prefix or network address for a given network. +func (ipv6 IPv6Addr) Network() IPAddr { + return IPv6Addr{ + Address: IPv6Address(ipv6.NetworkAddress()), + Mask: ipv6.Mask, + } +} + +// NetworkAddress returns an IPv6Network of the IPv6Addr's network address. +func (ipv6 IPv6Addr) NetworkAddress() IPv6Network { + addr := new(big.Int) + addr.SetBytes((*ipv6.Address).Bytes()) + + mask := new(big.Int) + mask.SetBytes(*ipv6.NetIPMask()) + + netAddr := new(big.Int) + netAddr.And(addr, mask) + + return IPv6Network(netAddr) +} + +// Octets returns a slice of the 16 octets in an IPv6Addr's Address. The +// order of the bytes is big endian. +func (ipv6 IPv6Addr) Octets() []int { + x := make([]int, IPv6len) + for i, b := range *bigIntToNetIPv6(ipv6.Address) { + x[i] = int(b) + } + + return x +} + +// String returns a string representation of the IPv6Addr +func (ipv6 IPv6Addr) String() string { + if ipv6.Port != 0 { + return fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) + } + + if ipv6.Maskbits() == 128 { + return ipv6.NetIP().String() + } + + return fmt.Sprintf("%s/%d", ipv6.NetIP().String(), ipv6.Maskbits()) +} + +// Type is used as a type switch and returns TypeIPv6 +func (IPv6Addr) Type() SockAddrType { + return TypeIPv6 +} + +// IPv6Attrs returns a list of attributes supported by the IPv6Addr type +func IPv6Attrs() []AttrName { + return ipv6AddrAttrs +} + +// IPv6AddrAttr returns a string representation of an attribute for the given +// IPv6Addr. +func IPv6AddrAttr(ipv6 IPv6Addr, selector AttrName) string { + fn, found := ipv6AddrAttrMap[selector] + if !found { + return "" + } + + return fn(ipv6) +} + +// ipv6AddrInit is called once at init() +func ipv6AddrInit() { + // Sorted for human readability + ipv6AddrAttrs = []AttrName{ + "size", // Same position as in IPv6 for output consistency + "uint128", + } + + ipv6AddrAttrMap = map[AttrName]func(ipv6 IPv6Addr) string{ + "size": func(ipv6 IPv6Addr) string { + netSize := big.NewInt(1) + netSize = netSize.Lsh(netSize, uint(IPv6len*8-ipv6.Maskbits())) + return netSize.Text(10) + }, + "uint128": func(ipv6 IPv6Addr) string { + b := big.Int(*ipv6.Address) + return b.Text(10) + }, + } +} + +// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the +// correctly padded values. +func bigIntToNetIPv6(bi *big.Int) *net.IP { + x := make(net.IP, IPv6len) + ipv6Bytes := bi.Bytes() + + // It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If + // they are different sizes we to pad the size of response. + if len(ipv6Bytes) < IPv6len { + buf := new(bytes.Buffer) + buf.Grow(IPv6len) + + for i := len(ipv6Bytes); i < IPv6len; i++ { + if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil { + panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err)) + } + } + + for _, b := range ipv6Bytes { + if err := binary.Write(buf, binary.BigEndian, b); err != nil { + panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err)) + } + } + + ipv6Bytes = buf.Bytes() + } + i := copy(x, ipv6Bytes) + if i != IPv6len { + panic("IPv6 wrong size") + } + return &x +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/rfc.go b/vendor/github.com/hashicorp/go-sockaddr/rfc.go new file mode 100644 index 00000000..02e188f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/rfc.go @@ -0,0 +1,948 @@ +package sockaddr + +// ForwardingBlacklist is a faux RFC that includes a list of non-forwardable IP +// blocks. +const ForwardingBlacklist = 4294967295 +const ForwardingBlacklistRFC = "4294967295" + +// IsRFC tests to see if an SockAddr matches the specified RFC +func IsRFC(rfcNum uint, sa SockAddr) bool { + rfcNetMap := KnownRFCs() + rfcNets, ok := rfcNetMap[rfcNum] + if !ok { + return false + } + + var contained bool + for _, rfcNet := range rfcNets { + if rfcNet.Contains(sa) { + contained = true + break + } + } + return contained +} + +// KnownRFCs returns an initial set of known RFCs. +// +// NOTE (sean@): As this list evolves over time, please submit patches to keep +// this list current. If something isn't right, inquire, as it may just be a +// bug on my part. Some of the inclusions were based on my judgement as to what +// would be a useful value (e.g. RFC3330). +// +// Useful resources: +// +// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +// * https://www.iana.org/assignments/ipv6-unicast-address-assignments/ipv6-unicast-address-assignments.xhtml +// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +func KnownRFCs() map[uint]SockAddrs { + // NOTE(sean@): Multiple SockAddrs per RFC lend themselves well to a + // RADIX tree, but `ENOTIME`. Patches welcome. + return map[uint]SockAddrs{ + 919: { + // [RFC919] Broadcasting Internet Datagrams + MustIPv4Addr("255.255.255.255/32"), // [RFC1122], §7 Broadcast IP Addressing - Proposed Standards + }, + 1122: { + // [RFC1122] Requirements for Internet Hosts -- Communication Layers + MustIPv4Addr("0.0.0.0/8"), // [RFC1122], §3.2.1.3 + MustIPv4Addr("127.0.0.0/8"), // [RFC1122], §3.2.1.3 + }, + 1112: { + // [RFC1112] Host Extensions for IP Multicasting + MustIPv4Addr("224.0.0.0/4"), // [RFC1112], §4 Host Group Addresses + }, + 1918: { + // [RFC1918] Address Allocation for Private Internets + MustIPv4Addr("10.0.0.0/8"), + MustIPv4Addr("172.16.0.0/12"), + MustIPv4Addr("192.168.0.0/16"), + }, + 2544: { + // [RFC2544] Benchmarking Methodology for Network + // Interconnect Devices + MustIPv4Addr("198.18.0.0/15"), + }, + 2765: { + // [RFC2765] Stateless IP/ICMP Translation Algorithm + // (SIIT) (obsoleted by RFCs 6145, which itself was + // later obsoleted by 7915). + + // [RFC2765], §2.1 Addresses + MustIPv6Addr("0:0:0:0:0:ffff:0:0/96"), + }, + 2928: { + // [RFC2928] Initial IPv6 Sub-TLA ID Assignments + MustIPv6Addr("2001::/16"), // Superblock + //MustIPv6Addr("2001:0000::/23"), // IANA + //MustIPv6Addr("2001:0200::/23"), // APNIC + //MustIPv6Addr("2001:0400::/23"), // ARIN + //MustIPv6Addr("2001:0600::/23"), // RIPE NCC + //MustIPv6Addr("2001:0800::/23"), // (future assignment) + // ... + //MustIPv6Addr("2001:FE00::/23"), // (future assignment) + }, + 3056: { // 6to4 address + // [RFC3056] Connection of IPv6 Domains via IPv4 Clouds + + // [RFC3056], §2 IPv6 Prefix Allocation + MustIPv6Addr("2002::/16"), + }, + 3068: { + // [RFC3068] An Anycast Prefix for 6to4 Relay Routers + // (obsolete by RFC7526) + + // [RFC3068], § 6to4 Relay anycast address + MustIPv4Addr("192.88.99.0/24"), + + // [RFC3068], §2.5 6to4 IPv6 relay anycast address + // + // NOTE: /120 == 128-(32-24) + MustIPv6Addr("2002:c058:6301::/120"), + }, + 3171: { + // [RFC3171] IANA Guidelines for IPv4 Multicast Address Assignments + MustIPv4Addr("224.0.0.0/4"), + }, + 3330: { + // [RFC3330] Special-Use IPv4 Addresses + + // Addresses in this block refer to source hosts on + // "this" network. Address 0.0.0.0/32 may be used as a + // source address for this host on this network; other + // addresses within 0.0.0.0/8 may be used to refer to + // specified hosts on this network [RFC1700, page 4]. + MustIPv4Addr("0.0.0.0/8"), + + // 10.0.0.0/8 - This block is set aside for use in + // private networks. Its intended use is documented in + // [RFC1918]. Addresses within this block should not + // appear on the public Internet. + MustIPv4Addr("10.0.0.0/8"), + + // 14.0.0.0/8 - This block is set aside for assignments + // to the international system of Public Data Networks + // [RFC1700, page 181]. The registry of assignments + // within this block can be accessed from the "Public + // Data Network Numbers" link on the web page at + // http://www.iana.org/numbers.html. Addresses within + // this block are assigned to users and should be + // treated as such. + + // 24.0.0.0/8 - This block was allocated in early 1996 + // for use in provisioning IP service over cable + // television systems. Although the IANA initially was + // involved in making assignments to cable operators, + // this responsibility was transferred to American + // Registry for Internet Numbers (ARIN) in May 2001. + // Addresses within this block are assigned in the + // normal manner and should be treated as such. + + // 39.0.0.0/8 - This block was used in the "Class A + // Subnet Experiment" that commenced in May 1995, as + // documented in [RFC1797]. The experiment has been + // completed and this block has been returned to the + // pool of addresses reserved for future allocation or + // assignment. This block therefore no longer has a + // special use and is subject to allocation to a + // Regional Internet Registry for assignment in the + // normal manner. + + // 127.0.0.0/8 - This block is assigned for use as the Internet host + // loopback address. A datagram sent by a higher level protocol to an + // address anywhere within this block should loop back inside the host. + // This is ordinarily implemented using only 127.0.0.1/32 for loopback, + // but no addresses within this block should ever appear on any network + // anywhere [RFC1700, page 5]. + MustIPv4Addr("127.0.0.0/8"), + + // 128.0.0.0/16 - This block, corresponding to the + // numerically lowest of the former Class B addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 169.254.0.0/16 - This is the "link local" block. It + // is allocated for communication between hosts on a + // single link. Hosts obtain these addresses by + // auto-configuration, such as when a DHCP server may + // not be found. + MustIPv4Addr("169.254.0.0/16"), + + // 172.16.0.0/12 - This block is set aside for use in + // private networks. Its intended use is documented in + // [RFC1918]. Addresses within this block should not + // appear on the public Internet. + MustIPv4Addr("172.16.0.0/12"), + + // 191.255.0.0/16 - This block, corresponding to the numerically highest + // to the former Class B addresses, was initially and is still reserved + // by the IANA. Given the present classless nature of the IP address + // space, the basis for the reservation no longer applies and addresses + // in this block are subject to future allocation to a Regional Internet + // Registry for assignment in the normal manner. + + // 192.0.0.0/24 - This block, corresponding to the + // numerically lowest of the former Class C addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 192.0.2.0/24 - This block is assigned as "TEST-NET" for use in + // documentation and example code. It is often used in conjunction with + // domain names example.com or example.net in vendor and protocol + // documentation. Addresses within this block should not appear on the + // public Internet. + MustIPv4Addr("192.0.2.0/24"), + + // 192.88.99.0/24 - This block is allocated for use as 6to4 relay + // anycast addresses, according to [RFC3068]. + MustIPv4Addr("192.88.99.0/24"), + + // 192.168.0.0/16 - This block is set aside for use in private networks. + // Its intended use is documented in [RFC1918]. Addresses within this + // block should not appear on the public Internet. + MustIPv4Addr("192.168.0.0/16"), + + // 198.18.0.0/15 - This block has been allocated for use + // in benchmark tests of network interconnect devices. + // Its use is documented in [RFC2544]. + MustIPv4Addr("198.18.0.0/15"), + + // 223.255.255.0/24 - This block, corresponding to the + // numerically highest of the former Class C addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 224.0.0.0/4 - This block, formerly known as the Class + // D address space, is allocated for use in IPv4 + // multicast address assignments. The IANA guidelines + // for assignments from this space are described in + // [RFC3171]. + MustIPv4Addr("224.0.0.0/4"), + + // 240.0.0.0/4 - This block, formerly known as the Class E address + // space, is reserved. The "limited broadcast" destination address + // 255.255.255.255 should never be forwarded outside the (sub-)net of + // the source. The remainder of this space is reserved + // for future use. [RFC1700, page 4] + MustIPv4Addr("240.0.0.0/4"), + }, + 3849: { + // [RFC3849] IPv6 Address Prefix Reserved for Documentation + MustIPv6Addr("2001:db8::/32"), // [RFC3849], §4 IANA Considerations + }, + 3927: { + // [RFC3927] Dynamic Configuration of IPv4 Link-Local Addresses + MustIPv4Addr("169.254.0.0/16"), // [RFC3927], §2.1 Link-Local Address Selection + }, + 4038: { + // [RFC4038] Application Aspects of IPv6 Transition + + // [RFC4038], §4.2. IPv6 Applications in a Dual-Stack Node + MustIPv6Addr("0:0:0:0:0:ffff::/96"), + }, + 4193: { + // [RFC4193] Unique Local IPv6 Unicast Addresses + MustIPv6Addr("fc00::/7"), + }, + 4291: { + // [RFC4291] IP Version 6 Addressing Architecture + + // [RFC4291], §2.5.2 The Unspecified Address + MustIPv6Addr("::/128"), + + // [RFC4291], §2.5.3 The Loopback Address + MustIPv6Addr("::1/128"), + + // [RFC4291], §2.5.5.1. IPv4-Compatible IPv6 Address + MustIPv6Addr("::/96"), + + // [RFC4291], §2.5.5.2. IPv4-Mapped IPv6 Address + MustIPv6Addr("::ffff:0:0/96"), + + // [RFC4291], §2.5.6 Link-Local IPv6 Unicast Addresses + MustIPv6Addr("fe80::/10"), + + // [RFC4291], §2.5.7 Site-Local IPv6 Unicast Addresses + // (depreciated) + MustIPv6Addr("fec0::/10"), + + // [RFC4291], §2.7 Multicast Addresses + MustIPv6Addr("ff00::/8"), + + // IPv6 Multicast Information. + // + // In the following "table" below, `ff0x` is replaced + // with the following values depending on the scope of + // the query: + // + // IPv6 Multicast Scopes: + // * ff00/9 // reserved + // * ff01/9 // interface-local + // * ff02/9 // link-local + // * ff03/9 // realm-local + // * ff04/9 // admin-local + // * ff05/9 // site-local + // * ff08/9 // organization-local + // * ff0e/9 // global + // * ff0f/9 // reserved + // + // IPv6 Multicast Addresses: + // * ff0x::2 // All routers + // * ff02::5 // OSPFIGP + // * ff02::6 // OSPFIGP Designated Routers + // * ff02::9 // RIP Routers + // * ff02::a // EIGRP Routers + // * ff02::d // All PIM Routers + // * ff02::1a // All RPL Routers + // * ff0x::fb // mDNSv6 + // * ff0x::101 // All Network Time Protocol (NTP) servers + // * ff02::1:1 // Link Name + // * ff02::1:2 // All-dhcp-agents + // * ff02::1:3 // Link-local Multicast Name Resolution + // * ff05::1:3 // All-dhcp-servers + // * ff02::1:ff00:0/104 // Solicited-node multicast address. + // * ff02::2:ff00:0/104 // Node Information Queries + }, + 4380: { + // [RFC4380] Teredo: Tunneling IPv6 over UDP through + // Network Address Translations (NATs) + + // [RFC4380], §2.6 Global Teredo IPv6 Service Prefix + MustIPv6Addr("2001:0000::/32"), + }, + 4773: { + // [RFC4773] Administration of the IANA Special Purpose IPv6 Address Block + MustIPv6Addr("2001:0000::/23"), // IANA + }, + 4843: { + // [RFC4843] An IPv6 Prefix for Overlay Routable Cryptographic Hash Identifiers (ORCHID) + MustIPv6Addr("2001:10::/28"), // [RFC4843], §7 IANA Considerations + }, + 5180: { + // [RFC5180] IPv6 Benchmarking Methodology for Network Interconnect Devices + MustIPv6Addr("2001:0200::/48"), // [RFC5180], §8 IANA Considerations + }, + 5735: { + // [RFC5735] Special Use IPv4 Addresses + MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 + MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 + MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 + MustIPv4Addr("198.18.0.0/15"), // Benchmarks + }, + 5737: { + // [RFC5737] IPv4 Address Blocks Reserved for Documentation + MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 + MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 + MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 + }, + 6052: { + // [RFC6052] IPv6 Addressing of IPv4/IPv6 Translators + MustIPv6Addr("64:ff9b::/96"), // [RFC6052], §2.1. Well-Known Prefix + }, + 6333: { + // [RFC6333] Dual-Stack Lite Broadband Deployments Following IPv4 Exhaustion + MustIPv4Addr("192.0.0.0/29"), // [RFC6333], §5.7 Well-Known IPv4 Address + }, + 6598: { + // [RFC6598] IANA-Reserved IPv4 Prefix for Shared Address Space + MustIPv4Addr("100.64.0.0/10"), + }, + 6666: { + // [RFC6666] A Discard Prefix for IPv6 + MustIPv6Addr("0100::/64"), + }, + 6890: { + // [RFC6890] Special-Purpose IP Address Registries + + // From "RFC6890 §2.2.1 Information Requirements": + /* + The IPv4 and IPv6 Special-Purpose Address Registries maintain the + following information regarding each entry: + + o Address Block - A block of IPv4 or IPv6 addresses that has been + registered for a special purpose. + + o Name - A descriptive name for the special-purpose address block. + + o RFC - The RFC through which the special-purpose address block was + requested. + + o Allocation Date - The date upon which the special-purpose address + block was allocated. + + o Termination Date - The date upon which the allocation is to be + terminated. This field is applicable for limited-use allocations + only. + + o Source - A boolean value indicating whether an address from the + allocated special-purpose address block is valid when used as the + source address of an IP datagram that transits two devices. + + o Destination - A boolean value indicating whether an address from + the allocated special-purpose address block is valid when used as + the destination address of an IP datagram that transits two + devices. + + o Forwardable - A boolean value indicating whether a router may + forward an IP datagram whose destination address is drawn from the + allocated special-purpose address block between external + interfaces. + + o Global - A boolean value indicating whether an IP datagram whose + destination address is drawn from the allocated special-purpose + address block is forwardable beyond a specified administrative + domain. + + o Reserved-by-Protocol - A boolean value indicating whether the + special-purpose address block is reserved by IP, itself. This + value is "TRUE" if the RFC that created the special-purpose + address block requires all compliant IP implementations to behave + in a special way when processing packets either to or from + addresses contained by the address block. + + If the value of "Destination" is FALSE, the values of "Forwardable" + and "Global" must also be false. + */ + + /*+----------------------+----------------------------+ + * | Attribute | Value | + * +----------------------+----------------------------+ + * | Address Block | 0.0.0.0/8 | + * | Name | "This host on this network"| + * | RFC | [RFC1122], Section 3.2.1.3 | + * | Allocation Date | September 1981 | + * | Termination Date | N/A | + * | Source | True | + * | Destination | False | + * | Forwardable | False | + * | Global | False | + * | Reserved-by-Protocol | True | + * +----------------------+----------------------------+*/ + MustIPv4Addr("0.0.0.0/8"), + + /*+----------------------+---------------+ + * | Attribute | Value | + * +----------------------+---------------+ + * | Address Block | 10.0.0.0/8 | + * | Name | Private-Use | + * | RFC | [RFC1918] | + * | Allocation Date | February 1996 | + * | Termination Date | N/A | + * | Source | True | + * | Destination | True | + * | Forwardable | True | + * | Global | False | + * | Reserved-by-Protocol | False | + * +----------------------+---------------+ */ + MustIPv4Addr("10.0.0.0/8"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 100.64.0.0/10 | + | Name | Shared Address Space | + | RFC | [RFC6598] | + | Allocation Date | April 2012 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------+*/ + MustIPv4Addr("100.64.0.0/10"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 127.0.0.0/8 | + | Name | Loopback | + | RFC | [RFC1122], Section 3.2.1.3 | + | Allocation Date | September 1981 | + | Termination Date | N/A | + | Source | False [1] | + | Destination | False [1] | + | Forwardable | False [1] | + | Global | False [1] | + | Reserved-by-Protocol | True | + +----------------------+----------------------------+*/ + // [1] Several protocols have been granted exceptions to + // this rule. For examples, see [RFC4379] and + // [RFC5884]. + MustIPv4Addr("127.0.0.0/8"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 169.254.0.0/16 | + | Name | Link Local | + | RFC | [RFC3927] | + | Allocation Date | May 2005 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+----------------+*/ + MustIPv4Addr("169.254.0.0/16"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 172.16.0.0/12 | + | Name | Private-Use | + | RFC | [RFC1918] | + | Allocation Date | February 1996 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + MustIPv4Addr("172.16.0.0/12"), + + /*+----------------------+---------------------------------+ + | Attribute | Value | + +----------------------+---------------------------------+ + | Address Block | 192.0.0.0/24 [2] | + | Name | IETF Protocol Assignments | + | RFC | Section 2.1 of this document | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------------------------+*/ + // [2] Not usable unless by virtue of a more specific + // reservation. + MustIPv4Addr("192.0.0.0/24"), + + /*+----------------------+--------------------------------+ + | Attribute | Value | + +----------------------+--------------------------------+ + | Address Block | 192.0.0.0/29 | + | Name | IPv4 Service Continuity Prefix | + | RFC | [RFC6333], [RFC7335] | + | Allocation Date | June 2011 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------------------------+*/ + MustIPv4Addr("192.0.0.0/29"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 192.0.2.0/24 | + | Name | Documentation (TEST-NET-1) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("192.0.2.0/24"), + + /*+----------------------+--------------------+ + | Attribute | Value | + +----------------------+--------------------+ + | Address Block | 192.88.99.0/24 | + | Name | 6to4 Relay Anycast | + | RFC | [RFC3068] | + | Allocation Date | June 2001 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | True | + | Reserved-by-Protocol | False | + +----------------------+--------------------+*/ + MustIPv4Addr("192.88.99.0/24"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 192.168.0.0/16 | + | Name | Private-Use | + | RFC | [RFC1918] | + | Allocation Date | February 1996 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + MustIPv4Addr("192.168.0.0/16"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 198.18.0.0/15 | + | Name | Benchmarking | + | RFC | [RFC2544] | + | Allocation Date | March 1999 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + MustIPv4Addr("198.18.0.0/15"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 198.51.100.0/24 | + | Name | Documentation (TEST-NET-2) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("198.51.100.0/24"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 203.0.113.0/24 | + | Name | Documentation (TEST-NET-3) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("203.0.113.0/24"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 240.0.0.0/4 | + | Name | Reserved | + | RFC | [RFC1112], Section 4 | + | Allocation Date | August 1989 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+----------------------+*/ + MustIPv4Addr("240.0.0.0/4"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 255.255.255.255/32 | + | Name | Limited Broadcast | + | RFC | [RFC0919], Section 7 | + | Allocation Date | October 1984 | + | Termination Date | N/A | + | Source | False | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------+*/ + MustIPv4Addr("255.255.255.255/32"), + + /*+----------------------+------------------+ + | Attribute | Value | + +----------------------+------------------+ + | Address Block | ::1/128 | + | Name | Loopback Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+------------------+*/ + MustIPv6Addr("::1/128"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | ::/128 | + | Name | Unspecified Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+---------------------+*/ + MustIPv6Addr("::/128"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | 64:ff9b::/96 | + | Name | IPv4-IPv6 Translat. | + | RFC | [RFC6052] | + | Allocation Date | October 2010 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | True | + | Reserved-by-Protocol | False | + +----------------------+---------------------+*/ + MustIPv6Addr("64:ff9b::/96"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | ::ffff:0:0/96 | + | Name | IPv4-mapped Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+---------------------+*/ + MustIPv6Addr("::ffff:0:0/96"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 100::/64 | + | Name | Discard-Only Address Block | + | RFC | [RFC6666] | + | Allocation Date | June 2012 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv6Addr("100::/64"), + + /*+----------------------+---------------------------+ + | Attribute | Value | + +----------------------+---------------------------+ + | Address Block | 2001::/23 | + | Name | IETF Protocol Assignments | + | RFC | [RFC2928] | + | Allocation Date | September 2000 | + | Termination Date | N/A | + | Source | False[1] | + | Destination | False[1] | + | Forwardable | False[1] | + | Global | False[1] | + | Reserved-by-Protocol | False | + +----------------------+---------------------------+*/ + // [1] Unless allowed by a more specific allocation. + MustIPv6Addr("2001::/16"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 2001::/32 | + | Name | TEREDO | + | RFC | [RFC4380] | + | Allocation Date | January 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001::/16"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 2001:2::/48 | + | Name | Benchmarking | + | RFC | [RFC5180] | + | Allocation Date | April 2008 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:2::/48"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 2001:db8::/32 | + | Name | Documentation | + | RFC | [RFC3849] | + | Allocation Date | July 2004 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:db8::/32"), + + /*+----------------------+--------------+ + | Attribute | Value | + +----------------------+--------------+ + | Address Block | 2001:10::/28 | + | Name | ORCHID | + | RFC | [RFC4843] | + | Allocation Date | March 2007 | + | Termination Date | March 2014 | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:10::/28"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 2002::/16 [2] | + | Name | 6to4 | + | RFC | [RFC3056] | + | Allocation Date | February 2001 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | N/A [2] | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + // [2] See [RFC3056] for details. + MustIPv6Addr("2002::/16"), + + /*+----------------------+--------------+ + | Attribute | Value | + +----------------------+--------------+ + | Address Block | fc00::/7 | + | Name | Unique-Local | + | RFC | [RFC4193] | + | Allocation Date | October 2005 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------+*/ + MustIPv6Addr("fc00::/7"), + + /*+----------------------+-----------------------+ + | Attribute | Value | + +----------------------+-----------------------+ + | Address Block | fe80::/10 | + | Name | Linked-Scoped Unicast | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+-----------------------+*/ + MustIPv6Addr("fe80::/10"), + }, + 7335: { + // [RFC7335] IPv4 Service Continuity Prefix + MustIPv4Addr("192.0.0.0/29"), // [RFC7335], §6 IANA Considerations + }, + ForwardingBlacklist: { // Pseudo-RFC + // Blacklist of non-forwardable IP blocks taken from RFC6890 + // + // TODO: the attributes for forwardable should be + // searcahble and embedded in the main list of RFCs + // above. + MustIPv4Addr("0.0.0.0/8"), + MustIPv4Addr("127.0.0.0/8"), + MustIPv4Addr("169.254.0.0/16"), + MustIPv4Addr("192.0.0.0/24"), + MustIPv4Addr("192.0.2.0/24"), + MustIPv4Addr("198.51.100.0/24"), + MustIPv4Addr("203.0.113.0/24"), + MustIPv4Addr("240.0.0.0/4"), + MustIPv4Addr("255.255.255.255/32"), + MustIPv6Addr("::1/128"), + MustIPv6Addr("::/128"), + MustIPv6Addr("::ffff:0:0/96"), + + // There is no way of expressing a whitelist per RFC2928 + // atm without creating a negative mask, which I don't + // want to do atm. + //MustIPv6Addr("2001::/23"), + + MustIPv6Addr("2001:db8::/32"), + MustIPv6Addr("2001:10::/28"), + MustIPv6Addr("fe80::/10"), + }, + } +} + +// VisitAllRFCs iterates over all known RFCs and calls the visitor +func VisitAllRFCs(fn func(rfcNum uint, sockaddrs SockAddrs)) { + rfcNetMap := KnownRFCs() + + // Blacklist of faux-RFCs. Don't show the world that we're abusing the + // RFC system in this library. + rfcBlacklist := map[uint]struct{}{ + ForwardingBlacklist: {}, + } + + for rfcNum, sas := range rfcNetMap { + if _, found := rfcBlacklist[rfcNum]; !found { + fn(rfcNum, sas) + } + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info.go b/vendor/github.com/hashicorp/go-sockaddr/route_info.go new file mode 100644 index 00000000..2a3ee1db --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info.go @@ -0,0 +1,19 @@ +package sockaddr + +// RouteInterface specifies an interface for obtaining memoized route table and +// network information from a given OS. +type RouteInterface interface { + // GetDefaultInterfaceName returns the name of the interface that has a + // default route or an error and an empty string if a problem was + // encountered. + GetDefaultInterfaceName() (string, error) +} + +// VisitCommands visits each command used by the platform-specific RouteInfo +// implementation. +func (ri routeInfo) VisitCommands(fn func(name string, cmd []string)) { + for k, v := range ri.cmds { + cmds := append([]string(nil), v...) + fn(k, cmds) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go new file mode 100644 index 00000000..9885915a --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_android.go @@ -0,0 +1,34 @@ +package sockaddr + +import ( + "errors" + "os/exec" +) + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a Android-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: map[string][]string{"ip": {"/system/bin/ip", "route", "get", "8.8.8.8"}}, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() + if err != nil { + return "", err + } + + + var ifName string + if ifName, err = parseDefaultIfNameFromIPCmdAndroid(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go new file mode 100644 index 00000000..705757ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go @@ -0,0 +1,36 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package sockaddr + +import "os/exec" + +var cmds map[string][]string = map[string][]string{ + "route": {"/sbin/route", "-n", "get", "default"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { + return "", err + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go new file mode 100644 index 00000000..d1b009f6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go @@ -0,0 +1,10 @@ +// +build android nacl plan9 + +package sockaddr + +import "errors" + +// getDefaultIfName is the default interface function for unsupported platforms. +func getDefaultIfName() (string, error) { + return "", errors.New("No default interface found (unsupported platform)") +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go new file mode 100644 index 00000000..b62ce6ec --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go @@ -0,0 +1,42 @@ +// +build !android + +package sockaddr + +import ( + "errors" + "os/exec" +) + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a Linux-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + // CoreOS Container Linux moved ip to /usr/bin/ip, so look it up on + // $PATH and fallback to /sbin/ip on error. + path, _ := exec.LookPath("ip") + if path == "" { + path = "/sbin/ip" + } + + return routeInfo{ + cmds: map[string][]string{"ip": {path, "route"}}, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromIPCmd(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go new file mode 100644 index 00000000..ee8e7984 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go @@ -0,0 +1,37 @@ +package sockaddr + +import ( + "errors" + "os/exec" +) + +var cmds map[string][]string = map[string][]string{ + "route": {"/usr/sbin/route", "-n", "get", "default"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go new file mode 100644 index 00000000..3da97288 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go @@ -0,0 +1,41 @@ +package sockaddr + +import "os/exec" + +var cmds map[string][]string = map[string][]string{ + "netstat": {"netstat", "-rn"}, + "ipconfig": {"ipconfig"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + ifNameOut, err := exec.Command(cmds["netstat"][0], cmds["netstat"][1:]...).Output() + if err != nil { + return "", err + } + + ipconfigOut, err := exec.Command(cmds["ipconfig"][0], cmds["ipconfig"][1:]...).Output() + if err != nil { + return "", err + } + + ifName, err := parseDefaultIfNameWindows(string(ifNameOut), string(ipconfigOut)) + if err != nil { + return "", err + } + + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go new file mode 100644 index 00000000..826c91c2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go @@ -0,0 +1,206 @@ +package sockaddr + +import ( + "encoding/json" + "fmt" + "strings" +) + +type SockAddrType int +type AttrName string + +const ( + TypeUnknown SockAddrType = 0x0 + TypeUnix = 0x1 + TypeIPv4 = 0x2 + TypeIPv6 = 0x4 + + // TypeIP is the union of TypeIPv4 and TypeIPv6 + TypeIP = 0x6 +) + +type SockAddr interface { + // CmpRFC returns 0 if SockAddr exactly matches one of the matched RFC + // networks, -1 if the receiver is contained within the RFC network, or + // 1 if the address is not contained within the RFC. + CmpRFC(rfcNum uint, sa SockAddr) int + + // Contains returns true if the SockAddr arg is contained within the + // receiver + Contains(SockAddr) bool + + // Equal allows for the comparison of two SockAddrs + Equal(SockAddr) bool + + DialPacketArgs() (string, string) + DialStreamArgs() (string, string) + ListenPacketArgs() (string, string) + ListenStreamArgs() (string, string) + + // String returns the string representation of SockAddr + String() string + + // Type returns the SockAddrType + Type() SockAddrType +} + +// sockAddrAttrMap is a map of the SockAddr type-specific attributes. +var sockAddrAttrMap map[AttrName]func(SockAddr) string +var sockAddrAttrs []AttrName + +func init() { + sockAddrInit() +} + +// New creates a new SockAddr from the string. The order in which New() +// attempts to construct a SockAddr is: IPv4Addr, IPv6Addr, SockAddrUnix. +// +// NOTE: New() relies on the heuristic wherein if the path begins with either a +// '.' or '/' character before creating a new UnixSock. For UNIX sockets that +// are absolute paths or are nested within a sub-directory, this works as +// expected, however if the UNIX socket is contained in the current working +// directory, this will fail unless the path begins with "./" +// (e.g. "./my-local-socket"). Calls directly to NewUnixSock() do not suffer +// this limitation. Invalid IP addresses such as "256.0.0.0/-1" will run afoul +// of this heuristic and be assumed to be a valid UNIX socket path (which they +// are, but it is probably not what you want and you won't realize it until you +// stat(2) the file system to discover it doesn't exist). +func NewSockAddr(s string) (SockAddr, error) { + ipv4Addr, err := NewIPv4Addr(s) + if err == nil { + return ipv4Addr, nil + } + + ipv6Addr, err := NewIPv6Addr(s) + if err == nil { + return ipv6Addr, nil + } + + // Check to make sure the string begins with either a '.' or '/', or + // contains a '/'. + if len(s) > 1 && (strings.IndexAny(s[0:1], "./") != -1 || strings.IndexByte(s, '/') != -1) { + unixSock, err := NewUnixSock(s) + if err == nil { + return unixSock, nil + } + } + + return nil, fmt.Errorf("Unable to convert %q to an IPv4 or IPv6 address, or a UNIX Socket", s) +} + +// ToIPAddr returns an IPAddr type or nil if the type conversion fails. +func ToIPAddr(sa SockAddr) *IPAddr { + ipa, ok := sa.(IPAddr) + if !ok { + return nil + } + return &ipa +} + +// ToIPv4Addr returns an IPv4Addr type or nil if the type conversion fails. +func ToIPv4Addr(sa SockAddr) *IPv4Addr { + switch v := sa.(type) { + case IPv4Addr: + return &v + default: + return nil + } +} + +// ToIPv6Addr returns an IPv6Addr type or nil if the type conversion fails. +func ToIPv6Addr(sa SockAddr) *IPv6Addr { + switch v := sa.(type) { + case IPv6Addr: + return &v + default: + return nil + } +} + +// ToUnixSock returns a UnixSock type or nil if the type conversion fails. +func ToUnixSock(sa SockAddr) *UnixSock { + switch v := sa.(type) { + case UnixSock: + return &v + default: + return nil + } +} + +// SockAddrAttr returns a string representation of an attribute for the given +// SockAddr. +func SockAddrAttr(sa SockAddr, selector AttrName) string { + fn, found := sockAddrAttrMap[selector] + if !found { + return "" + } + + return fn(sa) +} + +// String() for SockAddrType returns a string representation of the +// SockAddrType (e.g. "IPv4", "IPv6", "UNIX", "IP", or "unknown"). +func (sat SockAddrType) String() string { + switch sat { + case TypeIPv4: + return "IPv4" + case TypeIPv6: + return "IPv6" + // There is no concrete "IP" type. Leaving here as a reminder. + // case TypeIP: + // return "IP" + case TypeUnix: + return "UNIX" + default: + panic("unsupported type") + } +} + +// sockAddrInit is called once at init() +func sockAddrInit() { + sockAddrAttrs = []AttrName{ + "type", // type should be first + "string", + } + + sockAddrAttrMap = map[AttrName]func(sa SockAddr) string{ + "string": func(sa SockAddr) string { + return sa.String() + }, + "type": func(sa SockAddr) string { + return sa.Type().String() + }, + } +} + +// UnixSockAttrs returns a list of attributes supported by the UnixSock type +func SockAddrAttrs() []AttrName { + return sockAddrAttrs +} + +// Although this is pretty trivial to do in a program, having the logic here is +// useful all around. Note that this marshals into a *string* -- the underlying +// string representation of the sockaddr. If you then unmarshal into this type +// in Go, all will work as expected, but externally you can take what comes out +// and use the string value directly. +type SockAddrMarshaler struct { + SockAddr +} + +func (s *SockAddrMarshaler) MarshalJSON() ([]byte, error) { + return json.Marshal(s.SockAddr.String()) +} + +func (s *SockAddrMarshaler) UnmarshalJSON(in []byte) error { + var str string + err := json.Unmarshal(in, &str) + if err != nil { + return err + } + sa, err := NewSockAddr(str) + if err != nil { + return err + } + s.SockAddr = sa + return nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go new file mode 100644 index 00000000..75fbffb1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go @@ -0,0 +1,193 @@ +package sockaddr + +import ( + "bytes" + "sort" +) + +// SockAddrs is a slice of SockAddrs +type SockAddrs []SockAddr + +func (s SockAddrs) Len() int { return len(s) } +func (s SockAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// CmpAddrFunc is the function signature that must be met to be used in the +// OrderedAddrBy multiAddrSorter +type CmpAddrFunc func(p1, p2 *SockAddr) int + +// multiAddrSorter implements the Sort interface, sorting the SockAddrs within. +type multiAddrSorter struct { + addrs SockAddrs + cmp []CmpAddrFunc +} + +// Sort sorts the argument slice according to the Cmp functions passed to +// OrderedAddrBy. +func (ms *multiAddrSorter) Sort(sockAddrs SockAddrs) { + ms.addrs = sockAddrs + sort.Sort(ms) +} + +// OrderedAddrBy sorts SockAddr by the list of sort function pointers. +func OrderedAddrBy(cmpFuncs ...CmpAddrFunc) *multiAddrSorter { + return &multiAddrSorter{ + cmp: cmpFuncs, + } +} + +// Len is part of sort.Interface. +func (ms *multiAddrSorter) Len() int { + return len(ms.addrs) +} + +// Less is part of sort.Interface. It is implemented by looping along the +// Cmp() functions until it finds a comparison that is either less than, +// equal to, or greater than. +func (ms *multiAddrSorter) Less(i, j int) bool { + p, q := &ms.addrs[i], &ms.addrs[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmp := ms.cmp[k] + x := cmp(p, q) + switch x { + case -1: + // p < q, so we have a decision. + return true + case 1: + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever the + // final comparison reports. + switch ms.cmp[k](p, q) { + case -1: + return true + case 1: + return false + default: + // Still a tie! Now what? + return false + } +} + +// Swap is part of sort.Interface. +func (ms *multiAddrSorter) Swap(i, j int) { + ms.addrs[i], ms.addrs[j] = ms.addrs[j], ms.addrs[i] +} + +const ( + // NOTE (sean@): These constants are here for code readability only and + // are sprucing up the code for readability purposes. Some of the + // Cmp*() variants have confusing logic (especially when dealing with + // mixed-type comparisons) and this, I think, has made it easier to grok + // the code faster. + sortReceiverBeforeArg = -1 + sortDeferDecision = 0 + sortArgBeforeReceiver = 1 +) + +// AscAddress is a sorting function to sort SockAddrs by their respective +// address type. Non-equal types are deferred in the sort. +func AscAddress(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr: + return v.CmpAddress(p2) + case IPv6Addr: + return v.CmpAddress(p2) + case UnixSock: + return v.CmpAddress(p2) + default: + return sortDeferDecision + } +} + +// AscPort is a sorting function to sort SockAddrs by their respective address +// type. Non-equal types are deferred in the sort. +func AscPort(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr: + return v.CmpPort(p2) + case IPv6Addr: + return v.CmpPort(p2) + default: + return sortDeferDecision + } +} + +// AscPrivate is a sorting function to sort "more secure" private values before +// "more public" values. Both IPv4 and IPv6 are compared against RFC6890 +// (RFC6890 includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and +// IPv6 includes RFC4193). +func AscPrivate(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr, IPv6Addr: + return v.CmpRFC(6890, p2) + default: + return sortDeferDecision + } +} + +// AscNetworkSize is a sorting function to sort SockAddrs based on their network +// size. Non-equal types are deferred in the sort. +func AscNetworkSize(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + p1Type := p1.Type() + p2Type := p2.Type() + + // Network size operations on non-IP types make no sense + if p1Type != p2Type && p1Type != TypeIP { + return sortDeferDecision + } + + ipA := p1.(IPAddr) + ipB := p2.(IPAddr) + + return bytes.Compare([]byte(*ipA.NetIPMask()), []byte(*ipB.NetIPMask())) +} + +// AscType is a sorting function to sort "more secure" types before +// "less-secure" types. +func AscType(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + p1Type := p1.Type() + p2Type := p2.Type() + switch { + case p1Type < p2Type: + return sortReceiverBeforeArg + case p1Type == p2Type: + return sortDeferDecision + case p1Type > p2Type: + return sortArgBeforeReceiver + default: + return sortDeferDecision + } +} + +// FilterByType returns two lists: a list of matched and unmatched SockAddrs +func (sas SockAddrs) FilterByType(type_ SockAddrType) (matched, excluded SockAddrs) { + matched = make(SockAddrs, 0, len(sas)) + excluded = make(SockAddrs, 0, len(sas)) + + for _, sa := range sas { + if sa.Type()&type_ != 0 { + matched = append(matched, sa) + } else { + excluded = append(excluded, sa) + } + } + return matched, excluded +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/unixsock.go b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go new file mode 100644 index 00000000..f3be3f67 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go @@ -0,0 +1,135 @@ +package sockaddr + +import ( + "fmt" + "strings" +) + +type UnixSock struct { + SockAddr + path string +} +type UnixSocks []*UnixSock + +// unixAttrMap is a map of the UnixSockAddr type-specific attributes. +var unixAttrMap map[AttrName]func(UnixSock) string +var unixAttrs []AttrName + +func init() { + unixAttrInit() +} + +// NewUnixSock creates an UnixSock from a string path. String can be in the +// form of either URI-based string (e.g. `file:///etc/passwd`), an absolute +// path (e.g. `/etc/passwd`), or a relative path (e.g. `./foo`). +func NewUnixSock(s string) (ret UnixSock, err error) { + ret.path = s + return ret, nil +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its name lexically sorts before arg +// - 0 if the SockAddr arg is not a UnixSock, or is a UnixSock with the same path. +// - 1 If the argument should sort first. +func (us UnixSock) CmpAddress(sa SockAddr) int { + usb, ok := sa.(UnixSock) + if !ok { + return sortDeferDecision + } + + return strings.Compare(us.Path(), usb.Path()) +} + +// DialPacketArgs returns the arguments required to be passed to net.DialUnix() +// with the `unixgram` network type. +func (us UnixSock) DialPacketArgs() (network, dialArgs string) { + return "unixgram", us.path +} + +// DialStreamArgs returns the arguments required to be passed to net.DialUnix() +// with the `unix` network type. +func (us UnixSock) DialStreamArgs() (network, dialArgs string) { + return "unix", us.path +} + +// Equal returns true if a SockAddr is equal to the receiving UnixSock. +func (us UnixSock) Equal(sa SockAddr) bool { + usb, ok := sa.(UnixSock) + if !ok { + return false + } + + if us.Path() != usb.Path() { + return false + } + + return true +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUnixgram() with the `unixgram` network type. +func (us UnixSock) ListenPacketArgs() (network, dialArgs string) { + return "unixgram", us.path +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenUnix() with the `unix` network type. +func (us UnixSock) ListenStreamArgs() (network, dialArgs string) { + return "unix", us.path +} + +// MustUnixSock is a helper method that must return an UnixSock or panic on +// invalid input. +func MustUnixSock(addr string) UnixSock { + us, err := NewUnixSock(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create a UnixSock from %+q: %v", addr, err)) + } + return us +} + +// Path returns the given path of the UnixSock +func (us UnixSock) Path() string { + return us.path +} + +// String returns the path of the UnixSock +func (us UnixSock) String() string { + return fmt.Sprintf("%+q", us.path) +} + +// Type is used as a type switch and returns TypeUnix +func (UnixSock) Type() SockAddrType { + return TypeUnix +} + +// UnixSockAttrs returns a list of attributes supported by the UnixSockAddr type +func UnixSockAttrs() []AttrName { + return unixAttrs +} + +// UnixSockAttr returns a string representation of an attribute for the given +// UnixSock. +func UnixSockAttr(us UnixSock, attrName AttrName) string { + fn, found := unixAttrMap[attrName] + if !found { + return "" + } + + return fn(us) +} + +// unixAttrInit is called once at init() +func unixAttrInit() { + // Sorted for human readability + unixAttrs = []AttrName{ + "path", + } + + unixAttrMap = map[AttrName]func(us UnixSock) string{ + "path": func(us UnixSock) string { + return us.Path() + }, + } +} diff --git a/vendor/github.com/hashicorp/go-tfe/LICENSE b/vendor/github.com/hashicorp/go-tfe/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-tfe/README.md b/vendor/github.com/hashicorp/go-tfe/README.md new file mode 100644 index 00000000..83245e48 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/README.md @@ -0,0 +1,155 @@ +Terraform Enterprise Go Client +============================== + +[![Build Status](https://travis-ci.org/hashicorp/go-tfe.svg?branch=master)](https://travis-ci.org/hashicorp/go-tfe) +[![GitHub license](https://img.shields.io/github/license/hashicorp/go-tfe.svg)](https://github.com/hashicorp/go-tfe/blob/master/LICENSE) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-tfe?status.svg)](https://godoc.org/github.com/hashicorp/go-tfe) +[![Go Report Card](https://goreportcard.com/badge/github.com/hashicorp/go-tfe)](https://goreportcard.com/report/github.com/hashicorp/go-tfe) +[![GitHub issues](https://img.shields.io/github/issues/hashicorp/go-tfe.svg)](https://github.com/hashicorp/go-tfe/issues) + +This is an API client for [Terraform Enterprise](https://www.hashicorp.com/products/terraform). + +## NOTE + +The Terraform Enterprise API endpoints are in beta and are subject to change! +So that means this API client is also in beta and is also subject to change. We +will indicate any breaking changes by releasing new versions. Until the release +of v1.0, any minor version changes will indicate possible breaking changes. Patch +version changes will be used for both bugfixes and non-breaking changes. + +## Coverage + +Currently the following endpoints are supported: + +- [x] [Accounts](https://www.terraform.io/docs/enterprise/api/account.html) +- [x] [Configuration Versions](https://www.terraform.io/docs/enterprise/api/configuration-versions.html) +- [x] [OAuth Clients](https://www.terraform.io/docs/enterprise/api/oauth-clients.html) +- [x] [OAuth Tokens](https://www.terraform.io/docs/enterprise/api/oauth-tokens.html) +- [x] [Organizations](https://www.terraform.io/docs/enterprise/api/organizations.html) +- [x] [Organization Tokens](https://www.terraform.io/docs/enterprise/api/organization-tokens.html) +- [x] [Policies](https://www.terraform.io/docs/enterprise/api/policies.html) +- [x] [Policy Sets](https://www.terraform.io/docs/enterprise/api/policy-sets.html) +- [x] [Policy Checks](https://www.terraform.io/docs/enterprise/api/policy-checks.html) +- [ ] [Registry Modules](https://www.terraform.io/docs/enterprise/api/modules.html) +- [x] [Runs](https://www.terraform.io/docs/enterprise/api/run.html) +- [x] [SSH Keys](https://www.terraform.io/docs/enterprise/api/ssh-keys.html) +- [x] [State Versions](https://www.terraform.io/docs/enterprise/api/state-versions.html) +- [x] [Team Access](https://www.terraform.io/docs/enterprise/api/team-access.html) +- [x] [Team Memberships](https://www.terraform.io/docs/enterprise/api/team-members.html) +- [x] [Team Tokens](https://www.terraform.io/docs/enterprise/api/team-tokens.html) +- [x] [Teams](https://www.terraform.io/docs/enterprise/api/teams.html) +- [x] [Variables](https://www.terraform.io/docs/enterprise/api/variables.html) +- [x] [Workspaces](https://www.terraform.io/docs/enterprise/api/workspaces.html) +- [ ] [Admin](https://www.terraform.io/docs/enterprise/api/admin/index.html) + +## Installation + +Installation can be done with a normal `go get`: + +``` +go get -u github.com/hashicorp/go-tfe +``` + +## Documentation + +For complete usage of the API client, see the full [package docs](https://godoc.org/github.com/hashicorp/go-tfe). + +## Usage + +```go +import tfe "github.com/hashicorp/go-tfe" +``` + +Construct a new TFE client, then use the various endpoints on the client to +access different parts of the Terraform Enterprise API. For example, to list +all organizations: + +```go +config := &tfe.Config{ + Token: "insert-your-token-here", +} + +client, err := tfe.NewClient(config) +if err != nil { + log.Fatal(err) +} + +orgs, err := client.Organizations.List(context.Background(), OrganizationListOptions{}) +if err != nil { + log.Fatal(err) +} +``` + +## Examples + +The [examples](https://github.com/hashicorp/go-tfe/tree/master/examples) directory +contains a couple of examples. One of which is listed here as well: + +```go +package main + +import ( + "log" + + tfe "github.com/hashicorp/go-tfe" +) + +func main() { + config := &tfe.Config{ + Token: "insert-your-token-here", + } + + client, err := tfe.NewClient(config) + if err != nil { + log.Fatal(err) + } + + // Create a context + ctx := context.Background() + + // Create a new organization + options := tfe.OrganizationCreateOptions{ + Name: tfe.String("example"), + Email: tfe.String("info@example.com"), + } + + org, err := client.Organizations.Create(ctx, options) + if err != nil { + log.Fatal(err) + } + + // Delete an organization + err = client.Organizations.Delete(ctx, org.Name) + if err != nil { + log.Fatal(err) + } +} +``` + +## Running tests + +Tests are run against an actual backend so they require a valid backend address +and token. In addition it also needs a Github token for running the OAuth Client +tests: + +```sh +$ export TFE_ADDRESS=https://tfe.local +$ export TFE_TOKEN=xxxxxxxxxxxxxxxxxxx +$ export GITHUB_TOKEN=xxxxxxxxxxxxxxxx +$ export GITHUB_IDENTIFIER=xxxxxxxxxxx +``` + +In order for the tests relating to queuing and capacity to pass, FRQ should be +enabled with a limit of 2 concurrent runs per organization. + +As running the tests takes about ~10 minutes, make sure to add a timeout to your +command (as the default timeout is 10m): + +```sh +$ go test ./... -timeout=15m +``` + +## Issues and Contributing + +If you find an issue with this package, please report an issue. If you'd like, +we welcome any contributions. Fork this repository and submit a pull request. diff --git a/vendor/github.com/hashicorp/go-tfe/apply.go b/vendor/github.com/hashicorp/go-tfe/apply.go new file mode 100644 index 00000000..2c92896b --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/apply.go @@ -0,0 +1,132 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Applies = (*applies)(nil) + +// Applies describes all the apply related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/apply.html +type Applies interface { + // Read an apply by its ID. + Read(ctx context.Context, applyID string) (*Apply, error) + + // Logs retrieves the logs of an apply. + Logs(ctx context.Context, applyID string) (io.Reader, error) +} + +// applies implements Applys. +type applies struct { + client *Client +} + +// ApplyStatus represents an apply state. +type ApplyStatus string + +//List all available apply statuses. +const ( + ApplyCanceled ApplyStatus = "canceled" + ApplyCreated ApplyStatus = "created" + ApplyErrored ApplyStatus = "errored" + ApplyFinished ApplyStatus = "finished" + ApplyMFAWaiting ApplyStatus = "mfa_waiting" + ApplyPending ApplyStatus = "pending" + ApplyQueued ApplyStatus = "queued" + ApplyRunning ApplyStatus = "running" + ApplyUnreachable ApplyStatus = "unreachable" +) + +// Apply represents a Terraform Enterprise apply. +type Apply struct { + ID string `jsonapi:"primary,applies"` + LogReadURL string `jsonapi:"attr,log-read-url"` + ResourceAdditions int `jsonapi:"attr,resource-additions"` + ResourceChanges int `jsonapi:"attr,resource-changes"` + ResourceDestructions int `jsonapi:"attr,resource-destructions"` + Status ApplyStatus `jsonapi:"attr,status"` + StatusTimestamps *ApplyStatusTimestamps `jsonapi:"attr,status-timestamps"` +} + +// ApplyStatusTimestamps holds the timestamps for individual apply statuses. +type ApplyStatusTimestamps struct { + CanceledAt time.Time `json:"canceled-at"` + ErroredAt time.Time `json:"errored-at"` + FinishedAt time.Time `json:"finished-at"` + ForceCanceledAt time.Time `json:"force-canceled-at"` + QueuedAt time.Time `json:"queued-at"` + StartedAt time.Time `json:"started-at"` +} + +// Read an apply by its ID. +func (s *applies) Read(ctx context.Context, applyID string) (*Apply, error) { + if !validStringID(&applyID) { + return nil, errors.New("invalid value for apply ID") + } + + u := fmt.Sprintf("applies/%s", url.QueryEscape(applyID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + a := &Apply{} + err = s.client.do(ctx, req, a) + if err != nil { + return nil, err + } + + return a, nil +} + +// Logs retrieves the logs of an apply. +func (s *applies) Logs(ctx context.Context, applyID string) (io.Reader, error) { + if !validStringID(&applyID) { + return nil, errors.New("invalid value for apply ID") + } + + // Get the apply to make sure it exists. + a, err := s.Read(ctx, applyID) + if err != nil { + return nil, err + } + + // Return an error if the log URL is empty. + if a.LogReadURL == "" { + return nil, fmt.Errorf("apply %s does not have a log URL", applyID) + } + + u, err := url.Parse(a.LogReadURL) + if err != nil { + return nil, fmt.Errorf("invalid log URL: %v", err) + } + + done := func() (bool, error) { + a, err := s.Read(ctx, a.ID) + if err != nil { + return false, err + } + + switch a.Status { + case ApplyCanceled, ApplyErrored, ApplyFinished, ApplyUnreachable: + return true, nil + default: + return false, nil + } + } + + return &LogReader{ + client: s.client, + ctx: ctx, + done: done, + logURL: u, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/configuration_version.go b/vendor/github.com/hashicorp/go-tfe/configuration_version.go new file mode 100644 index 00000000..9a88b2d4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/configuration_version.go @@ -0,0 +1,208 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "os" + "time" + + slug "github.com/hashicorp/go-slug" +) + +// Compile-time proof of interface implementation. +var _ ConfigurationVersions = (*configurationVersions)(nil) + +// ConfigurationVersions describes all the configuration version related +// methods that the Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/configuration-versions.html +type ConfigurationVersions interface { + // List returns all configuration versions of a workspace. + List(ctx context.Context, workspaceID string, options ConfigurationVersionListOptions) (*ConfigurationVersionList, error) + + // Create is used to create a new configuration version. The created + // configuration version will be usable once data is uploaded to it. + Create(ctx context.Context, workspaceID string, options ConfigurationVersionCreateOptions) (*ConfigurationVersion, error) + + // Read a configuration version by its ID. + Read(ctx context.Context, cvID string) (*ConfigurationVersion, error) + + // Upload packages and uploads Terraform configuration files. It requires + // the upload URL from a configuration version and the full path to the + // configuration files on disk. + Upload(ctx context.Context, url string, path string) error +} + +// configurationVersions implements ConfigurationVersions. +type configurationVersions struct { + client *Client +} + +// ConfigurationStatus represents a configuration version status. +type ConfigurationStatus string + +//List all available configuration version statuses. +const ( + ConfigurationErrored ConfigurationStatus = "errored" + ConfigurationPending ConfigurationStatus = "pending" + ConfigurationUploaded ConfigurationStatus = "uploaded" +) + +// ConfigurationSource represents a source of a configuration version. +type ConfigurationSource string + +// List all available configuration version sources. +const ( + ConfigurationSourceAPI ConfigurationSource = "tfe-api" + ConfigurationSourceBitbucket ConfigurationSource = "bitbucket" + ConfigurationSourceGithub ConfigurationSource = "github" + ConfigurationSourceGitlab ConfigurationSource = "gitlab" + ConfigurationSourceTerraform ConfigurationSource = "terraform" +) + +// ConfigurationVersionList represents a list of configuration versions. +type ConfigurationVersionList struct { + *Pagination + Items []*ConfigurationVersion +} + +// ConfigurationVersion is a representation of an uploaded or ingressed +// Terraform configuration in TFE. A workspace must have at least one +// configuration version before any runs may be queued on it. +type ConfigurationVersion struct { + ID string `jsonapi:"primary,configuration-versions"` + AutoQueueRuns bool `jsonapi:"attr,auto-queue-runs"` + Error string `jsonapi:"attr,error"` + ErrorMessage string `jsonapi:"attr,error-message"` + Source ConfigurationSource `jsonapi:"attr,source"` + Speculative bool `jsonapi:"attr,speculative "` + Status ConfigurationStatus `jsonapi:"attr,status"` + StatusTimestamps *CVStatusTimestamps `jsonapi:"attr,status-timestamps"` + UploadURL string `jsonapi:"attr,upload-url"` +} + +// CVStatusTimestamps holds the timestamps for individual configuration version +// statuses. +type CVStatusTimestamps struct { + FinishedAt time.Time `json:"finished-at"` + QueuedAt time.Time `json:"queued-at"` + StartedAt time.Time `json:"started-at"` +} + +// ConfigurationVersionListOptions represents the options for listing +// configuration versions. +type ConfigurationVersionListOptions struct { + ListOptions +} + +// List returns all configuration versions of a workspace. +func (s *configurationVersions) List(ctx context.Context, workspaceID string, options ConfigurationVersionListOptions) (*ConfigurationVersionList, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/configuration-versions", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + cvl := &ConfigurationVersionList{} + err = s.client.do(ctx, req, cvl) + if err != nil { + return nil, err + } + + return cvl, nil +} + +// ConfigurationVersionCreateOptions represents the options for creating a +// configuration version. +type ConfigurationVersionCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,configuration-versions"` + + // When true, runs are queued automatically when the configuration version + // is uploaded. + AutoQueueRuns *bool `jsonapi:"attr,auto-queue-runs,omitempty"` + + // When true, this configuration version can only be used for planning. + Speculative *bool `jsonapi:"attr,speculative,omitempty"` +} + +// Create is used to create a new configuration version. The created +// configuration version will be usable once data is uploaded to it. +func (s *configurationVersions) Create(ctx context.Context, workspaceID string, options ConfigurationVersionCreateOptions) (*ConfigurationVersion, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("workspaces/%s/configuration-versions", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + cv := &ConfigurationVersion{} + err = s.client.do(ctx, req, cv) + if err != nil { + return nil, err + } + + return cv, nil +} + +// Read a configuration version by its ID. +func (s *configurationVersions) Read(ctx context.Context, cvID string) (*ConfigurationVersion, error) { + if !validStringID(&cvID) { + return nil, errors.New("invalid value for configuration version ID") + } + + u := fmt.Sprintf("configuration-versions/%s", url.QueryEscape(cvID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + cv := &ConfigurationVersion{} + err = s.client.do(ctx, req, cv) + if err != nil { + return nil, err + } + + return cv, nil +} + +// Upload packages and uploads Terraform configuration files. It requires the +// upload URL from a configuration version and the path to the configuration +// files on disk. +func (s *configurationVersions) Upload(ctx context.Context, url, path string) error { + file, err := os.Stat(path) + if err != nil { + return err + } + if !file.Mode().IsDir() { + return errors.New("path needs to be an existing directory") + } + + body := bytes.NewBuffer(nil) + + _, err = slug.Pack(path, body, true) + if err != nil { + return err + } + + req, err := s.client.newRequest("PUT", url, body) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/go.mod b/vendor/github.com/hashicorp/go-tfe/go.mod new file mode 100644 index 00000000..f428ce27 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/go.mod @@ -0,0 +1,13 @@ +module github.com/hashicorp/go-tfe + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/google/go-querystring v1.0.0 + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-retryablehttp v0.5.2 + github.com/hashicorp/go-slug v0.3.0 + github.com/hashicorp/go-uuid v1.0.1 + github.com/stretchr/testify v1.3.0 + github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 +) diff --git a/vendor/github.com/hashicorp/go-tfe/go.sum b/vendor/github.com/hashicorp/go-tfe/go.sum new file mode 100644 index 00000000..cc8c7bbb --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/go.sum @@ -0,0 +1,23 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4= +github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-slug v0.3.0 h1:L0c+AvH/J64iMNF4VqRaRku2DMTEuHioPVS7kMjWIU8= +github.com/hashicorp/go-slug v0.3.0/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/vendor/github.com/hashicorp/go-tfe/logreader.go b/vendor/github.com/hashicorp/go-tfe/logreader.go new file mode 100644 index 00000000..aee4472f --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/logreader.go @@ -0,0 +1,143 @@ +package tfe + +import ( + "context" + "fmt" + "io" + "math" + "net/http" + "net/url" + "time" +) + +// LogReader implements io.Reader for streaming logs. +type LogReader struct { + client *Client + ctx context.Context + done func() (bool, error) + logURL *url.URL + offset int64 + reads int + startOfText bool + endOfText bool +} + +// backoff will perform exponential backoff based on the iteration and +// limited by the provided min and max (in milliseconds) durations. +func backoff(min, max float64, iter int) time.Duration { + backoff := math.Pow(2, float64(iter)/5) * min + if backoff > max { + backoff = max + } + return time.Duration(backoff) * time.Millisecond +} + +func (r *LogReader) Read(l []byte) (int, error) { + if written, err := r.read(l); err != io.ErrNoProgress { + return written, err + } + + // Loop until we can any data, the context is canceled or the + // run is finsished. If we would return right away without any + // data, we could and up causing a io.ErrNoProgress error. + for r.reads = 1; ; r.reads++ { + select { + case <-r.ctx.Done(): + return 0, r.ctx.Err() + case <-time.After(backoff(500, 2000, r.reads)): + if written, err := r.read(l); err != io.ErrNoProgress { + return written, err + } + } + } +} + +func (r *LogReader) read(l []byte) (int, error) { + // Update the query string. + r.logURL.RawQuery = fmt.Sprintf("limit=%d&offset=%d", len(l), r.offset) + + // Create a new request. + req, err := http.NewRequest("GET", r.logURL.String(), nil) + if err != nil { + return 0, err + } + req = req.WithContext(r.ctx) + + // Attach the default headers. + for k, v := range r.client.headers { + req.Header[k] = v + } + + // Retrieve the next chunk. + resp, err := r.client.http.HTTPClient.Do(req) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + // Basic response checking. + if err := checkResponseCode(resp); err != nil { + return 0, err + } + + // Read the retrieved chunk. + written, err := resp.Body.Read(l) + if err != nil && err != io.EOF { + // Ignore io.EOF errors returned when reading from the response + // body as this indicates the end of the chunk and not the end + // of the logfile. + return written, err + } + + if written > 0 { + // Check for an STX (Start of Text) ASCII control marker. + if !r.startOfText && l[0] == byte(2) { + r.startOfText = true + + // Remove the STX marker from the received chunk. + copy(l[:written-1], l[1:]) + l[written-1] = byte(0) + r.offset++ + written-- + + // Return early if we only received the STX marker. + if written == 0 { + return 0, io.ErrNoProgress + } + } + + // If we found an STX ASCII control character, start looking for + // the ETX (End of Text) control character. + if r.startOfText && l[written-1] == byte(3) { + r.endOfText = true + + // Remove the ETX marker from the received chunk. + l[written-1] = byte(0) + r.offset++ + written-- + } + } + + // Check if we need to continue the loop and wait 500 miliseconds + // before checking if there is a new chunk available or that the + // run is finished and we are done reading all chunks. + if written == 0 { + if (r.startOfText && r.endOfText) || // The logstream finished without issues. + (r.startOfText && r.reads%10 == 0) || // The logstream terminated unexpectedly. + (!r.startOfText && r.reads > 1) { // The logstream doesn't support STX/ETX. + done, err := r.done() + if err != nil { + return 0, err + } + if done { + return 0, io.EOF + } + } + return 0, io.ErrNoProgress + } + + // Update the offset for the next read. + r.offset += int64(written) + + return written, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/notification_configuration.go b/vendor/github.com/hashicorp/go-tfe/notification_configuration.go new file mode 100644 index 00000000..fe269e2b --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/notification_configuration.go @@ -0,0 +1,294 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ NotificationConfigurations = (*notificationConfigurations)(nil) + +// NotificationConfigurations describes all the Notification Configuration +// related methods that the Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/notification-configurations.html +type NotificationConfigurations interface { + // List all the notification configurations within a workspace. + List(ctx context.Context, workspaceID string, options NotificationConfigurationListOptions) (*NotificationConfigurationList, error) + + // Create a new notification configuration with the given options. + Create(ctx context.Context, workspaceID string, options NotificationConfigurationCreateOptions) (*NotificationConfiguration, error) + + // Read a notification configuration by its ID. + Read(ctx context.Context, notificationConfigurationID string) (*NotificationConfiguration, error) + + // Update an existing notification configuration. + Update(ctx context.Context, notificationConfigurationID string, options NotificationConfigurationUpdateOptions) (*NotificationConfiguration, error) + + // Delete a notification configuration by its ID. + Delete(ctx context.Context, notificationConfigurationID string) error + + // Verify a notification configuration by its ID. + Verify(ctx context.Context, notificationConfigurationID string) (*NotificationConfiguration, error) +} + +// notificationConfigurations implements NotificationConfigurations. +type notificationConfigurations struct { + client *Client +} + +// List of available notification triggers. +const ( + NotificationTriggerCreated string = "run:created" + NotificationTriggerPlanning string = "run:planning" + NotificationTriggerNeedsAttention string = "run:needs_attention" + NotificationTriggerApplying string = "run:applying" + NotificationTriggerCompleted string = "run:completed" + NotificationTriggerErrored string = "run:errored" +) + +// NotificationDestinationType represents the destination type of the +// notification configuration. +type NotificationDestinationType string + +// List of available notification destination types. +const ( + NotificationDestinationTypeSlack NotificationDestinationType = "slack" + NotificationDestinationTypeGeneric NotificationDestinationType = "generic" +) + +// NotificationConfigurationList represents a list of Notification +// Configurations. +type NotificationConfigurationList struct { + *Pagination + Items []*NotificationConfiguration +} + +// NotificationConfiguration represents a Notification Configuration. +type NotificationConfiguration struct { + ID string `jsonapi:"primary,notification-configurations"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + DeliveryResponses []*DeliveryResponse `jsonapi:"attr,delivery-responses"` + DestinationType NotificationDestinationType `jsonapi:"attr,destination-type"` + Enabled bool `jsonapi:"attr,enabled"` + Name string `jsonapi:"attr,name"` + Token string `jsonapi:"attr,token"` + Triggers []string `jsonapi:"attr,triggers"` + UpdatedAt time.Time `jsonapi:"attr,updated-at,iso8601"` + URL string `jsonapi:"attr,url"` +} + +// DeliveryResponse represents a notification configuration delivery response. +type DeliveryResponse struct { + Body string `json:"body"` + Code int `json:"code"` + Headers http.Header `json:"headers"` + SentAt time.Time `json:"sent-at,iso8601"` + Successful bool `json:"successful"` + URL string `json:"url"` +} + +// NotificationConfigurationListOptions represents the options for listing +// notification configurations. +type NotificationConfigurationListOptions struct { + ListOptions +} + +// List all the notification configurations associated with a workspace. +func (s *notificationConfigurations) List(ctx context.Context, workspaceID string, options NotificationConfigurationListOptions) (*NotificationConfigurationList, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/notification-configurations", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("GET", u, options) + if err != nil { + return nil, err + } + + ncl := &NotificationConfigurationList{} + err = s.client.do(ctx, req, ncl) + if err != nil { + return nil, err + } + + return ncl, nil +} + +// NotificationConfigurationCreateOptions represents the options for +// creating a new notification configuration. +type NotificationConfigurationCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,notification-configurations"` + + // The destination type of the notification configuration + DestinationType *NotificationDestinationType `jsonapi:"attr,destination-type"` + + // Whether the notification configuration should be enabled or not + Enabled *bool `jsonapi:"attr,enabled"` + + // The name of the notification configuration + Name *string `jsonapi:"attr,name"` + + // The token of the notification configuration + Token *string `jsonapi:"attr,token,omitempty"` + + // The destination type of the notification configuration + Triggers []string `jsonapi:"attr,triggers,omitempty"` + + // The url of the notification configuration + URL *string `jsonapi:"attr,url"` +} + +func (o NotificationConfigurationCreateOptions) valid() error { + if o.DestinationType == nil { + return errors.New("destination type is required") + } + if o.Enabled == nil { + return errors.New("enabled is required") + } + if !validString(o.Name) { + return errors.New("name is required") + } + if !validString(o.URL) { + return errors.New("url is required") + } + return nil +} + +// Creates a notification configuration with the given options. +func (s *notificationConfigurations) Create(ctx context.Context, workspaceID string, options NotificationConfigurationCreateOptions) (*NotificationConfiguration, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("workspaces/%s/notification-configurations", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + nc := &NotificationConfiguration{} + err = s.client.do(ctx, req, nc) + if err != nil { + return nil, err + } + + return nc, nil +} + +// Read a notitification configuration by its ID. +func (s *notificationConfigurations) Read(ctx context.Context, notificationConfigurationID string) (*NotificationConfiguration, error) { + if !validStringID(¬ificationConfigurationID) { + return nil, errors.New("invalid value for notification configuration ID") + } + + u := fmt.Sprintf("notification-configurations/%s", url.QueryEscape(notificationConfigurationID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + nc := &NotificationConfiguration{} + err = s.client.do(ctx, req, nc) + if err != nil { + return nil, err + } + + return nc, nil +} + +// NotificationConfigurationUpdateOptions represents the options for +// updating a existing notification configuration. +type NotificationConfigurationUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,notification-configurations"` + + // Whether the notification configuration should be enabled or not + Enabled *bool `jsonapi:"attr,enabled,omitempty"` + + // The name of the notification configuration + Name *string `jsonapi:"attr,name,omitempty"` + + // The token of the notification configuration + Token *string `jsonapi:"attr,token,omitempty"` + + // The destination type of the notification configuration + Triggers []string `jsonapi:"attr,triggers,omitempty"` + + // The url of the notification configuration + URL *string `jsonapi:"attr,url,omitempty"` +} + +// Updates a notification configuration with the given options. +func (s *notificationConfigurations) Update(ctx context.Context, notificationConfigurationID string, options NotificationConfigurationUpdateOptions) (*NotificationConfiguration, error) { + if !validStringID(¬ificationConfigurationID) { + return nil, errors.New("invalid value for notification configuration ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("notification-configurations/%s", url.QueryEscape(notificationConfigurationID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + nc := &NotificationConfiguration{} + err = s.client.do(ctx, req, nc) + if err != nil { + return nil, err + } + + return nc, nil +} + +// Delete a notifications configuration by its ID. +func (s *notificationConfigurations) Delete(ctx context.Context, notificationConfigurationID string) error { + if !validStringID(¬ificationConfigurationID) { + return errors.New("invalid value for notification configuration ID") + } + + u := fmt.Sprintf("notification-configurations/%s", url.QueryEscape(notificationConfigurationID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Verifies a notification configuration by delivering a verification +// payload to the configured url. +func (s *notificationConfigurations) Verify(ctx context.Context, notificationConfigurationID string) (*NotificationConfiguration, error) { + if !validStringID(¬ificationConfigurationID) { + return nil, errors.New("invalid value for notification configuration ID") + } + + u := fmt.Sprintf( + "notification-configurations/%s/actions/verify", url.QueryEscape(notificationConfigurationID)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + nc := &NotificationConfiguration{} + err = s.client.do(ctx, req, nc) + if err != nil { + return nil, err + } + + return nc, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/oauth_client.go b/vendor/github.com/hashicorp/go-tfe/oauth_client.go new file mode 100644 index 00000000..b0b16bfb --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/oauth_client.go @@ -0,0 +1,199 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ OAuthClients = (*oAuthClients)(nil) + +// OAuthClients describes all the OAuth client related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/oauth-clients.html +type OAuthClients interface { + // List all the OAuth clients for a given organization. + List(ctx context.Context, organization string, options OAuthClientListOptions) (*OAuthClientList, error) + + // Create an OAuth client to connect an organization and a VCS provider. + Create(ctx context.Context, organization string, options OAuthClientCreateOptions) (*OAuthClient, error) + + // Read an OAuth client by its ID. + Read(ctx context.Context, oAuthClientID string) (*OAuthClient, error) + + // Delete an OAuth client by its ID. + Delete(ctx context.Context, oAuthClientID string) error +} + +// oAuthClients implements OAuthClients. +type oAuthClients struct { + client *Client +} + +// ServiceProviderType represents a VCS type. +type ServiceProviderType string + +// List of available VCS types. +const ( + ServiceProviderBitbucket ServiceProviderType = "bitbucket_hosted" + ServiceProviderBitbucketServer ServiceProviderType = "bitbucket_server" + ServiceProviderGithub ServiceProviderType = "github" + ServiceProviderGithubEE ServiceProviderType = "github_enterprise" + ServiceProviderGitlab ServiceProviderType = "gitlab_hosted" + ServiceProviderGitlabCE ServiceProviderType = "gitlab_community_edition" + ServiceProviderGitlabEE ServiceProviderType = "gitlab_enterprise_edition" +) + +// OAuthClientList represents a list of OAuth clients. +type OAuthClientList struct { + *Pagination + Items []*OAuthClient +} + +// OAuthClient represents a connection between an organization and a VCS +// provider. +type OAuthClient struct { + ID string `jsonapi:"primary,oauth-clients"` + APIURL string `jsonapi:"attr,api-url"` + CallbackURL string `jsonapi:"attr,callback-url"` + ConnectPath string `jsonapi:"attr,connect-path"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + HTTPURL string `jsonapi:"attr,http-url"` + Key string `jsonapi:"attr,key"` + RSAPublicKey string `jsonapi:"attr,rsa-public-key"` + ServiceProvider ServiceProviderType `jsonapi:"attr,service-provider"` + ServiceProviderName string `jsonapi:"attr,service-provider-display-name"` + + // Relations + Organization *Organization `jsonapi:"relation,organization"` + OAuthTokens []*OAuthToken `jsonapi:"relation,oauth-tokens"` +} + +// OAuthClientListOptions represents the options for listing +// OAuth clients. +type OAuthClientListOptions struct { + ListOptions +} + +// List all the OAuth clients for a given organization. +func (s *oAuthClients) List(ctx context.Context, organization string, options OAuthClientListOptions) (*OAuthClientList, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/oauth-clients", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + ocl := &OAuthClientList{} + err = s.client.do(ctx, req, ocl) + if err != nil { + return nil, err + } + + return ocl, nil +} + +// OAuthClientCreateOptions represents the options for creating an OAuth client. +type OAuthClientCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,oauth-clients"` + + // The base URL of your VCS provider's API. + APIURL *string `jsonapi:"attr,api-url"` + + // The homepage of your VCS provider. + HTTPURL *string `jsonapi:"attr,http-url"` + + // The token string you were given by your VCS provider. + OAuthToken *string `jsonapi:"attr,oauth-token-string"` + + // The VCS provider being connected with. + ServiceProvider *ServiceProviderType `jsonapi:"attr,service-provider"` +} + +func (o OAuthClientCreateOptions) valid() error { + if !validString(o.APIURL) { + return errors.New("API URL is required") + } + if !validString(o.HTTPURL) { + return errors.New("HTTP URL is required") + } + if !validString(o.OAuthToken) { + return errors.New("OAuth token is required") + } + if o.ServiceProvider == nil { + return errors.New("service provider is required") + } + return nil +} + +// Create an OAuth client to connect an organization and a VCS provider. +func (s *oAuthClients) Create(ctx context.Context, organization string, options OAuthClientCreateOptions) (*OAuthClient, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/oauth-clients", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + oc := &OAuthClient{} + err = s.client.do(ctx, req, oc) + if err != nil { + return nil, err + } + + return oc, nil +} + +// Read an OAuth client by its ID. +func (s *oAuthClients) Read(ctx context.Context, oAuthClientID string) (*OAuthClient, error) { + if !validStringID(&oAuthClientID) { + return nil, errors.New("invalid value for OAuth client ID") + } + + u := fmt.Sprintf("oauth-clients/%s", url.QueryEscape(oAuthClientID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + oc := &OAuthClient{} + err = s.client.do(ctx, req, oc) + if err != nil { + return nil, err + } + + return oc, err +} + +// Delete an OAuth client by its ID. +func (s *oAuthClients) Delete(ctx context.Context, oAuthClientID string) error { + if !validStringID(&oAuthClientID) { + return errors.New("invalid value for OAuth client ID") + } + + u := fmt.Sprintf("oauth-clients/%s", url.QueryEscape(oAuthClientID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/oauth_token.go b/vendor/github.com/hashicorp/go-tfe/oauth_token.go new file mode 100644 index 00000000..73ab4801 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/oauth_token.go @@ -0,0 +1,150 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ OAuthTokens = (*oAuthTokens)(nil) + +// OAuthTokens describes all the OAuth token related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/oauth-tokens.html +type OAuthTokens interface { + // List all the OAuth tokens for a given organization. + List(ctx context.Context, organization string, options OAuthTokenListOptions) (*OAuthTokenList, error) + // Read a OAuth token by its ID. + Read(ctx context.Context, oAuthTokenID string) (*OAuthToken, error) + + // Update an existing OAuth token. + Update(ctx context.Context, oAuthTokenID string, options OAuthTokenUpdateOptions) (*OAuthToken, error) + + // Delete a OAuth token by its ID. + Delete(ctx context.Context, oAuthTokenID string) error +} + +// oAuthTokens implements OAuthTokens. +type oAuthTokens struct { + client *Client +} + +// OAuthTokenList represents a list of OAuth tokens. +type OAuthTokenList struct { + *Pagination + Items []*OAuthToken +} + +// OAuthToken represents a VCS configuration including the associated +// OAuth token +type OAuthToken struct { + ID string `jsonapi:"primary,oauth-tokens"` + UID string `jsonapi:"attr,uid"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + HasSSHKey bool `jsonapi:"attr,has-ssh-key"` + ServiceProviderUser string `jsonapi:"attr,service-provider-user"` + + // Relations + OAuthClient *OAuthClient `jsonapi:"relation,oauth-client"` +} + +// OAuthTokenListOptions represents the options for listing +// OAuth tokens. +type OAuthTokenListOptions struct { + ListOptions +} + +// List all the OAuth tokens for a given organization. +func (s *oAuthTokens) List(ctx context.Context, organization string, options OAuthTokenListOptions) (*OAuthTokenList, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/oauth-tokens", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + otl := &OAuthTokenList{} + err = s.client.do(ctx, req, otl) + if err != nil { + return nil, err + } + + return otl, nil +} + +// Read an OAuth token by its ID. +func (s *oAuthTokens) Read(ctx context.Context, oAuthTokenID string) (*OAuthToken, error) { + if !validStringID(&oAuthTokenID) { + return nil, errors.New("invalid value for OAuth token ID") + } + + u := fmt.Sprintf("oauth-tokens/%s", url.QueryEscape(oAuthTokenID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + ot := &OAuthToken{} + err = s.client.do(ctx, req, ot) + if err != nil { + return nil, err + } + + return ot, err +} + +// OAuthTokenUpdateOptions represents the options for updating an OAuth token. +type OAuthTokenUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,oauth-tokens"` + + // A private SSH key to be used for git clone operations. + PrivateSSHKey *string `jsonapi:"attr,ssh-key"` +} + +// Update an existing OAuth token. +func (s *oAuthTokens) Update(ctx context.Context, oAuthTokenID string, options OAuthTokenUpdateOptions) (*OAuthToken, error) { + if !validStringID(&oAuthTokenID) { + return nil, errors.New("invalid value for OAuth token ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("oauth-tokens/%s", url.QueryEscape(oAuthTokenID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + ot := &OAuthToken{} + err = s.client.do(ctx, req, ot) + if err != nil { + return nil, err + } + + return ot, err +} + +// Delete an OAuth token by its ID. +func (s *oAuthTokens) Delete(ctx context.Context, oAuthTokenID string) error { + if !validStringID(&oAuthTokenID) { + return errors.New("invalid value for OAuth token ID") + } + + u := fmt.Sprintf("oauth-tokens/%s", url.QueryEscape(oAuthTokenID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/organization.go b/vendor/github.com/hashicorp/go-tfe/organization.go new file mode 100644 index 00000000..e3a14f69 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/organization.go @@ -0,0 +1,360 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Organizations = (*organizations)(nil) + +// Organizations describes all the organization related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/organizations.html +type Organizations interface { + // List all the organizations visible to the current user. + List(ctx context.Context, options OrganizationListOptions) (*OrganizationList, error) + + // Create a new organization with the given options. + Create(ctx context.Context, options OrganizationCreateOptions) (*Organization, error) + + // Read an organization by its name. + Read(ctx context.Context, organization string) (*Organization, error) + + // Update attributes of an existing organization. + Update(ctx context.Context, organization string, options OrganizationUpdateOptions) (*Organization, error) + + // Delete an organization by its name. + Delete(ctx context.Context, organization string) error + + // Capacity shows the current run capacity of an organization. + Capacity(ctx context.Context, organization string) (*Capacity, error) + + // Entitlements shows the entitlements of an organization. + Entitlements(ctx context.Context, organization string) (*Entitlements, error) + + // RunQueue shows the current run queue of an organization. + RunQueue(ctx context.Context, organization string, options RunQueueOptions) (*RunQueue, error) +} + +// organizations implements Organizations. +type organizations struct { + client *Client +} + +// AuthPolicyType represents an authentication policy type. +type AuthPolicyType string + +// List of available authentication policies. +const ( + AuthPolicyPassword AuthPolicyType = "password" + AuthPolicyTwoFactor AuthPolicyType = "two_factor_mandatory" +) + +// EnterprisePlanType represents an enterprise plan type. +type EnterprisePlanType string + +// List of available enterprise plan types. +const ( + EnterprisePlanDisabled EnterprisePlanType = "disabled" + EnterprisePlanPremium EnterprisePlanType = "premium" + EnterprisePlanPro EnterprisePlanType = "pro" + EnterprisePlanTrial EnterprisePlanType = "trial" +) + +// OrganizationList represents a list of organizations. +type OrganizationList struct { + *Pagination + Items []*Organization +} + +// Organization represents a Terraform Enterprise organization. +type Organization struct { + Name string `jsonapi:"primary,organizations"` + CollaboratorAuthPolicy AuthPolicyType `jsonapi:"attr,collaborator-auth-policy"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + Email string `jsonapi:"attr,email"` + EnterprisePlan EnterprisePlanType `jsonapi:"attr,enterprise-plan"` + OwnersTeamSAMLRoleID string `jsonapi:"attr,owners-team-saml-role-id"` + Permissions *OrganizationPermissions `jsonapi:"attr,permissions"` + SAMLEnabled bool `jsonapi:"attr,saml-enabled"` + SessionRemember int `jsonapi:"attr,session-remember"` + SessionTimeout int `jsonapi:"attr,session-timeout"` + TrialExpiresAt time.Time `jsonapi:"attr,trial-expires-at,iso8601"` + TwoFactorConformant bool `jsonapi:"attr,two-factor-conformant"` +} + +// Capacity represents the current run capacity of an organization. +type Capacity struct { + Organization string `jsonapi:"primary,organization-capacity"` + Pending int `jsonapi:"attr,pending"` + Running int `jsonapi:"attr,running"` +} + +// Entitlements represents the entitlements of an organization. +type Entitlements struct { + ID string `jsonapi:"primary,entitlement-sets"` + Operations bool `jsonapi:"attr,operations"` + PrivateModuleRegistry bool `jsonapi:"attr,private-module-registry"` + Sentinel bool `jsonapi:"attr,sentinel"` + StateStorage bool `jsonapi:"attr,state-storage"` + Teams bool `jsonapi:"attr,teams"` + VCSIntegrations bool `jsonapi:"attr,vcs-integrations"` +} + +// RunQueue represents the current run queue of an organization. +type RunQueue struct { + *Pagination + Items []*Run +} + +// OrganizationPermissions represents the organization permissions. +type OrganizationPermissions struct { + CanCreateTeam bool `json:"can-create-team"` + CanCreateWorkspace bool `json:"can-create-workspace"` + CanCreateWorkspaceMigration bool `json:"can-create-workspace-migration"` + CanDestroy bool `json:"can-destroy"` + CanTraverse bool `json:"can-traverse"` + CanUpdate bool `json:"can-update"` + CanUpdateAPIToken bool `json:"can-update-api-token"` + CanUpdateOAuth bool `json:"can-update-oauth"` + CanUpdateSentinel bool `json:"can-update-sentinel"` +} + +// OrganizationListOptions represents the options for listing organizations. +type OrganizationListOptions struct { + ListOptions +} + +// List all the organizations visible to the current user. +func (s *organizations) List(ctx context.Context, options OrganizationListOptions) (*OrganizationList, error) { + req, err := s.client.newRequest("GET", "organizations", &options) + if err != nil { + return nil, err + } + + orgl := &OrganizationList{} + err = s.client.do(ctx, req, orgl) + if err != nil { + return nil, err + } + + return orgl, nil +} + +// OrganizationCreateOptions represents the options for creating an organization. +type OrganizationCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,organizations"` + + // Name of the organization. + Name *string `jsonapi:"attr,name"` + + // Admin email address. + Email *string `jsonapi:"attr,email"` + + // Session expiration (minutes). + SessionRemember *int `jsonapi:"attr,session-remember,omitempty"` + + // Session timeout after inactivity (minutes). + SessionTimeout *int `jsonapi:"attr,session-timeout,omitempty"` + + // Authentication policy. + CollaboratorAuthPolicy *AuthPolicyType `jsonapi:"attr,collaborator-auth-policy,omitempty"` + + // The name of the "owners" team + OwnersTeamSAMLRoleID *string `jsonapi:"attr,owners-team-saml-role-id,omitempty"` +} + +func (o OrganizationCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("name is required") + } + if !validStringID(o.Name) { + return errors.New("invalid value for name") + } + if !validString(o.Email) { + return errors.New("email is required") + } + return nil +} + +// Create a new organization with the given options. +func (s *organizations) Create(ctx context.Context, options OrganizationCreateOptions) (*Organization, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "organizations", &options) + if err != nil { + return nil, err + } + + org := &Organization{} + err = s.client.do(ctx, req, org) + if err != nil { + return nil, err + } + + return org, nil +} + +// Read an organization by its name. +func (s *organizations) Read(ctx context.Context, organization string) (*Organization, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + org := &Organization{} + err = s.client.do(ctx, req, org) + if err != nil { + return nil, err + } + + return org, nil +} + +// OrganizationUpdateOptions represents the options for updating an organization. +type OrganizationUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,organizations"` + + // New name for the organization. + Name *string `jsonapi:"attr,name,omitempty"` + + // New admin email address. + Email *string `jsonapi:"attr,email,omitempty"` + + // Session expiration (minutes). + SessionRemember *int `jsonapi:"attr,session-remember,omitempty"` + + // Session timeout after inactivity (minutes). + SessionTimeout *int `jsonapi:"attr,session-timeout,omitempty"` + + // Authentication policy. + CollaboratorAuthPolicy *AuthPolicyType `jsonapi:"attr,collaborator-auth-policy,omitempty"` + + // The name of the "owners" team + OwnersTeamSAMLRoleID *string `jsonapi:"attr,owners-team-saml-role-id,omitempty"` +} + +// Update attributes of an existing organization. +func (s *organizations) Update(ctx context.Context, organization string, options OrganizationUpdateOptions) (*Organization, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s", url.QueryEscape(organization)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + org := &Organization{} + err = s.client.do(ctx, req, org) + if err != nil { + return nil, err + } + + return org, nil +} + +// Delete an organization by its name. +func (s *organizations) Delete(ctx context.Context, organization string) error { + if !validStringID(&organization) { + return errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s", url.QueryEscape(organization)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Capacity shows the currently used capacity of an organization. +func (s *organizations) Capacity(ctx context.Context, organization string) (*Capacity, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/capacity", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + c := &Capacity{} + err = s.client.do(ctx, req, c) + if err != nil { + return nil, err + } + + return c, nil +} + +// Entitlements shows the entitlements of an organization. +func (s *organizations) Entitlements(ctx context.Context, organization string) (*Entitlements, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/entitlement-set", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + e := &Entitlements{} + err = s.client.do(ctx, req, e) + if err != nil { + return nil, err + } + + return e, nil +} + +// RunQueueOptions represents the options for showing the queue. +type RunQueueOptions struct { + ListOptions +} + +// RunQueue shows the current run queue of an organization. +func (s *organizations) RunQueue(ctx context.Context, organization string, options RunQueueOptions) (*RunQueue, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/runs/queue", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + rq := &RunQueue{} + err = s.client.do(ctx, req, rq) + if err != nil { + return nil, err + } + + return rq, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/organization_token.go b/vendor/github.com/hashicorp/go-tfe/organization_token.go new file mode 100644 index 00000000..e2b0e0df --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/organization_token.go @@ -0,0 +1,99 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ OrganizationTokens = (*organizationTokens)(nil) + +// OrganizationTokens describes all the organization token related methods +// that the Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/organization-tokens.html +type OrganizationTokens interface { + // Generate a new organization token, replacing any existing token. + Generate(ctx context.Context, organization string) (*OrganizationToken, error) + + // Read an organization token. + Read(ctx context.Context, organization string) (*OrganizationToken, error) + + // Delete an organization token. + Delete(ctx context.Context, organization string) error +} + +// organizationTokens implements OrganizationTokens. +type organizationTokens struct { + client *Client +} + +// OrganizationToken represents a Terraform Enterprise organization token. +type OrganizationToken struct { + ID string `jsonapi:"primary,authentication-tokens"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + Description string `jsonapi:"attr,description"` + LastUsedAt time.Time `jsonapi:"attr,last-used-at,iso8601"` + Token string `jsonapi:"attr,token"` +} + +// Generate a new organization token, replacing any existing token. +func (s *organizationTokens) Generate(ctx context.Context, organization string) (*OrganizationToken, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + ot := &OrganizationToken{} + err = s.client.do(ctx, req, ot) + if err != nil { + return nil, err + } + + return ot, err +} + +// Read an organization token. +func (s *organizationTokens) Read(ctx context.Context, organization string) (*OrganizationToken, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + ot := &OrganizationToken{} + err = s.client.do(ctx, req, ot) + if err != nil { + return nil, err + } + + return ot, err +} + +// Delete an organization token. +func (s *organizationTokens) Delete(ctx context.Context, organization string) error { + if !validStringID(&organization) { + return errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/authentication-token", url.QueryEscape(organization)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/plan.go b/vendor/github.com/hashicorp/go-tfe/plan.go new file mode 100644 index 00000000..328380f4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/plan.go @@ -0,0 +1,136 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "io" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Plans = (*plans)(nil) + +// Plans describes all the plan related methods that the Terraform Enterprise +// API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/plan.html +type Plans interface { + // Read a plan by its ID. + Read(ctx context.Context, planID string) (*Plan, error) + + // Logs retrieves the logs of a plan. + Logs(ctx context.Context, planID string) (io.Reader, error) +} + +// plans implements Plans. +type plans struct { + client *Client +} + +// PlanStatus represents a plan state. +type PlanStatus string + +//List all available plan statuses. +const ( + PlanCanceled PlanStatus = "canceled" + PlanCreated PlanStatus = "created" + PlanErrored PlanStatus = "errored" + PlanFinished PlanStatus = "finished" + PlanMFAWaiting PlanStatus = "mfa_waiting" + PlanPending PlanStatus = "pending" + PlanQueued PlanStatus = "queued" + PlanRunning PlanStatus = "running" + PlanUnreachable PlanStatus = "unreachable" +) + +// Plan represents a Terraform Enterprise plan. +type Plan struct { + ID string `jsonapi:"primary,plans"` + HasChanges bool `jsonapi:"attr,has-changes"` + LogReadURL string `jsonapi:"attr,log-read-url"` + ResourceAdditions int `jsonapi:"attr,resource-additions"` + ResourceChanges int `jsonapi:"attr,resource-changes"` + ResourceDestructions int `jsonapi:"attr,resource-destructions"` + Status PlanStatus `jsonapi:"attr,status"` + StatusTimestamps *PlanStatusTimestamps `jsonapi:"attr,status-timestamps"` + + // Relations + Exports []*PlanExport `jsonapi:"relation,exports"` +} + +// PlanStatusTimestamps holds the timestamps for individual plan statuses. +type PlanStatusTimestamps struct { + CanceledAt time.Time `json:"canceled-at"` + ErroredAt time.Time `json:"errored-at"` + FinishedAt time.Time `json:"finished-at"` + ForceCanceledAt time.Time `json:"force-canceled-at"` + QueuedAt time.Time `json:"queued-at"` + StartedAt time.Time `json:"started-at"` +} + +// Read a plan by its ID. +func (s *plans) Read(ctx context.Context, planID string) (*Plan, error) { + if !validStringID(&planID) { + return nil, errors.New("invalid value for plan ID") + } + + u := fmt.Sprintf("plans/%s", url.QueryEscape(planID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + p := &Plan{} + err = s.client.do(ctx, req, p) + if err != nil { + return nil, err + } + + return p, nil +} + +// Logs retrieves the logs of a plan. +func (s *plans) Logs(ctx context.Context, planID string) (io.Reader, error) { + if !validStringID(&planID) { + return nil, errors.New("invalid value for plan ID") + } + + // Get the plan to make sure it exists. + p, err := s.Read(ctx, planID) + if err != nil { + return nil, err + } + + // Return an error if the log URL is empty. + if p.LogReadURL == "" { + return nil, fmt.Errorf("plan %s does not have a log URL", planID) + } + + u, err := url.Parse(p.LogReadURL) + if err != nil { + return nil, fmt.Errorf("invalid log URL: %v", err) + } + + done := func() (bool, error) { + p, err := s.Read(ctx, p.ID) + if err != nil { + return false, err + } + + switch p.Status { + case PlanCanceled, PlanErrored, PlanFinished, PlanUnreachable: + return true, nil + default: + return false, nil + } + } + + return &LogReader{ + client: s.client, + ctx: ctx, + done: done, + logURL: u, + }, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/plan_export.go b/vendor/github.com/hashicorp/go-tfe/plan_export.go new file mode 100644 index 00000000..cf9e9ced --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/plan_export.go @@ -0,0 +1,175 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ PlanExports = (*planExports)(nil) + +// PlanExports describes all the plan export related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/plan-exports.html +type PlanExports interface { + // Export a plan by its ID with the given options. + Create(ctx context.Context, options PlanExportCreateOptions) (*PlanExport, error) + + // Read a plan export by its ID. + Read(ctx context.Context, planExportID string) (*PlanExport, error) + + // Delete a plan export by its ID. + Delete(ctx context.Context, planExportID string) error + + // Download the data of an plan export. + Download(ctx context.Context, planExportID string) ([]byte, error) +} + +// planExports implements PlanExports. +type planExports struct { + client *Client +} + +// PlanExportDataType represents the type of data exported from a plan. +type PlanExportDataType string + +// List all available plan export data types. +const ( + PlanExportSentinelMockBundleV0 PlanExportDataType = "sentinel-mock-bundle-v0" +) + +// PlanExportStatus represents a plan export state. +type PlanExportStatus string + +// List all available plan export statuses. +const ( + PlanExportCanceled PlanExportStatus = "canceled" + PlanExportErrored PlanExportStatus = "errored" + PlanExportExpired PlanExportStatus = "expired" + PlanExportFinished PlanExportStatus = "finished" + PlanExportPending PlanExportStatus = "pending" + PlanExportQueued PlanExportStatus = "queued" +) + +// PlanExportStatusTimestamps holds the timestamps for plan export statuses. +type PlanExportStatusTimestamps struct { + CanceledAt time.Time `json:"canceled-at"` + ErroredAt time.Time `json:"errored-at"` + ExpiredAt time.Time `json:"expired-at"` + FinishedAt time.Time `json:"finished-at"` + QueuedAt time.Time `json:"queued-at"` +} + +// PlanExport represents an export of Terraform Enterprise plan data. +type PlanExport struct { + ID string `jsonapi:"primary,plan-exports"` + DataType PlanExportDataType `jsonapi:"attr,data-type"` + Status PlanExportStatus `jsonapi:"attr,status"` + StatusTimestamps *PlanExportStatusTimestamps `jsonapi:"attr,status-timestamps"` +} + +// PlanExportCreateOptions represents the options for exporting data from a plan. +type PlanExportCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,plan-exports"` + + // The plan to export. + Plan *Plan `jsonapi:"relation,plan"` + + // The name of the policy set. + DataType *PlanExportDataType `jsonapi:"attr,data-type"` +} + +func (o PlanExportCreateOptions) valid() error { + if o.Plan == nil { + return errors.New("plan is required") + } + if o.DataType == nil { + return errors.New("data type is required") + } + return nil +} + +func (s *planExports) Create(ctx context.Context, options PlanExportCreateOptions) (*PlanExport, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "plan-exports", &options) + if err != nil { + return nil, err + } + + pe := &PlanExport{} + err = s.client.do(ctx, req, pe) + if err != nil { + return nil, err + } + + return pe, err +} + +// Read a plan export by its ID. +func (s *planExports) Read(ctx context.Context, planExportID string) (*PlanExport, error) { + if !validStringID(&planExportID) { + return nil, errors.New("invalid value for plan export ID") + } + + u := fmt.Sprintf("plan-exports/%s", url.QueryEscape(planExportID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + pe := &PlanExport{} + err = s.client.do(ctx, req, pe) + if err != nil { + return nil, err + } + + return pe, nil +} + +// Delete a plan export by ID. +func (s *planExports) Delete(ctx context.Context, planExportID string) error { + if !validStringID(&planExportID) { + return errors.New("invalid value for plan export ID") + } + + u := fmt.Sprintf("plan-exports/%s", url.QueryEscape(planExportID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Download a plan export's data. Data is exported in a .tar.gz format. +func (s *planExports) Download(ctx context.Context, planExportID string) ([]byte, error) { + if !validStringID(&planExportID) { + return nil, errors.New("invalid value for plan export ID") + } + + u := fmt.Sprintf("plan-exports/%s/download", url.QueryEscape(planExportID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + err = s.client.do(ctx, req, &buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/policy.go b/vendor/github.com/hashicorp/go-tfe/policy.go new file mode 100644 index 00000000..b558b260 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/policy.go @@ -0,0 +1,286 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Policies = (*policies)(nil) + +// Policies describes all the policy related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/policies.html +type Policies interface { + // List all the policies for a given organization + List(ctx context.Context, organization string, options PolicyListOptions) (*PolicyList, error) + + // Create a policy and associate it with an organization. + Create(ctx context.Context, organization string, options PolicyCreateOptions) (*Policy, error) + + // Read a policy by its ID. + Read(ctx context.Context, policyID string) (*Policy, error) + + // Update an existing policy. + Update(ctx context.Context, policyID string, options PolicyUpdateOptions) (*Policy, error) + + // Delete a policy by its ID. + Delete(ctx context.Context, policyID string) error + + // Upload the policy content of the policy. + Upload(ctx context.Context, policyID string, content []byte) error + + // Upload the policy content of the policy. + Download(ctx context.Context, policyID string) ([]byte, error) +} + +// policies implements Policies. +type policies struct { + client *Client +} + +// EnforcementLevel represents an enforcement level. +type EnforcementLevel string + +// List the available enforcement types. +const ( + EnforcementAdvisory EnforcementLevel = "advisory" + EnforcementHard EnforcementLevel = "hard-mandatory" + EnforcementSoft EnforcementLevel = "soft-mandatory" +) + +// PolicyList represents a list of policies.. +type PolicyList struct { + *Pagination + Items []*Policy +} + +// Policy represents a Terraform Enterprise policy. +type Policy struct { + ID string `jsonapi:"primary,policies"` + Name string `jsonapi:"attr,name"` + Description string `jsonapi:"attr,description"` + Enforce []*Enforcement `jsonapi:"attr,enforce"` + PolicySetCount int `jsonapi:"attr,policy-set-count"` + UpdatedAt time.Time `jsonapi:"attr,updated-at,iso8601"` + + // Relations + Organization *Organization `jsonapi:"relation,organization"` +} + +// Enforcement describes a enforcement. +type Enforcement struct { + Path string `json:"path"` + Mode EnforcementLevel `json:"mode"` +} + +// PolicyListOptions represents the options for listing policies. +type PolicyListOptions struct { + ListOptions + + // A search string (partial policy name) used to filter the results. + Search *string `url:"search[name],omitempty"` +} + +// List all the policies for a given organization +func (s *policies) List(ctx context.Context, organization string, options PolicyListOptions) (*PolicyList, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/policies", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + pl := &PolicyList{} + err = s.client.do(ctx, req, pl) + if err != nil { + return nil, err + } + + return pl, nil +} + +// PolicyCreateOptions represents the options for creating a new policy. +type PolicyCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,policies"` + + // The name of the policy. + Name *string `jsonapi:"attr,name"` + + // A description of the policy's purpose. + Description *string `jsonapi:"attr,description,omitempty"` + + // The enforcements of the policy. + Enforce []*EnforcementOptions `jsonapi:"attr,enforce"` +} + +// EnforcementOptions represents the enforcement options of a policy. +type EnforcementOptions struct { + Path *string `json:"path,omitempty"` + Mode *EnforcementLevel `json:"mode"` +} + +func (o PolicyCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("name is required") + } + if !validStringID(o.Name) { + return errors.New("invalid value for name") + } + if o.Enforce == nil { + return errors.New("enforce is required") + } + for _, e := range o.Enforce { + if !validString(e.Path) { + return errors.New("enforcement path is required") + } + if e.Mode == nil { + return errors.New("enforcement mode is required") + } + } + return nil +} + +// Create a policy and associate it with an organization. +func (s *policies) Create(ctx context.Context, organization string, options PolicyCreateOptions) (*Policy, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/policies", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + p := &Policy{} + err = s.client.do(ctx, req, p) + if err != nil { + return nil, err + } + + return p, err +} + +// Read a policy by its ID. +func (s *policies) Read(ctx context.Context, policyID string) (*Policy, error) { + if !validStringID(&policyID) { + return nil, errors.New("invalid value for policy ID") + } + + u := fmt.Sprintf("policies/%s", url.QueryEscape(policyID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + p := &Policy{} + err = s.client.do(ctx, req, p) + if err != nil { + return nil, err + } + + return p, err +} + +// PolicyUpdateOptions represents the options for updating a policy. +type PolicyUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,policies"` + + // A description of the policy's purpose. + Description *string `jsonapi:"attr,description,omitempty"` + + // The enforcements of the policy. + Enforce []*EnforcementOptions `jsonapi:"attr,enforce,omitempty"` +} + +// Update an existing policy. +func (s *policies) Update(ctx context.Context, policyID string, options PolicyUpdateOptions) (*Policy, error) { + if !validStringID(&policyID) { + return nil, errors.New("invalid value for policy ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("policies/%s", url.QueryEscape(policyID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + p := &Policy{} + err = s.client.do(ctx, req, p) + if err != nil { + return nil, err + } + + return p, err +} + +// Delete a policy by its ID. +func (s *policies) Delete(ctx context.Context, policyID string) error { + if !validStringID(&policyID) { + return errors.New("invalid value for policy ID") + } + + u := fmt.Sprintf("policies/%s", url.QueryEscape(policyID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Upload the policy content of the policy. +func (s *policies) Upload(ctx context.Context, policyID string, content []byte) error { + if !validStringID(&policyID) { + return errors.New("invalid value for policy ID") + } + + u := fmt.Sprintf("policies/%s/upload", url.QueryEscape(policyID)) + req, err := s.client.newRequest("PUT", u, content) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Download the policy content of the policy. +func (s *policies) Download(ctx context.Context, policyID string) ([]byte, error) { + if !validStringID(&policyID) { + return nil, errors.New("invalid value for policy ID") + } + + u := fmt.Sprintf("policies/%s/download", url.QueryEscape(policyID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + err = s.client.do(ctx, req, &buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/policy_check.go b/vendor/github.com/hashicorp/go-tfe/policy_check.go new file mode 100644 index 00000000..59a5eb80 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/policy_check.go @@ -0,0 +1,221 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ PolicyChecks = (*policyChecks)(nil) + +// PolicyChecks describes all the policy check related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/policy-checks.html +type PolicyChecks interface { + // List all policy checks of the given run. + List(ctx context.Context, runID string, options PolicyCheckListOptions) (*PolicyCheckList, error) + + // Read a policy check by its ID. + Read(ctx context.Context, policyCheckID string) (*PolicyCheck, error) + + // Override a soft-mandatory or warning policy. + Override(ctx context.Context, policyCheckID string) (*PolicyCheck, error) + + // Logs retrieves the logs of a policy check. + Logs(ctx context.Context, policyCheckID string) (io.Reader, error) +} + +// policyChecks implements PolicyChecks. +type policyChecks struct { + client *Client +} + +// PolicyScope represents a policy scope. +type PolicyScope string + +// List all available policy scopes. +const ( + PolicyScopeOrganization PolicyScope = "organization" + PolicyScopeWorkspace PolicyScope = "workspace" +) + +// PolicyStatus represents a policy check state. +type PolicyStatus string + +//List all available policy check statuses. +const ( + PolicyCanceled PolicyStatus = "canceled" + PolicyErrored PolicyStatus = "errored" + PolicyHardFailed PolicyStatus = "hard_failed" + PolicyOverridden PolicyStatus = "overridden" + PolicyPasses PolicyStatus = "passed" + PolicyPending PolicyStatus = "pending" + PolicyQueued PolicyStatus = "queued" + PolicySoftFailed PolicyStatus = "soft_failed" + PolicyUnreachable PolicyStatus = "unreachable" +) + +// PolicyCheckList represents a list of policy checks. +type PolicyCheckList struct { + *Pagination + Items []*PolicyCheck +} + +// PolicyCheck represents a Terraform Enterprise policy check.. +type PolicyCheck struct { + ID string `jsonapi:"primary,policy-checks"` + Actions *PolicyActions `jsonapi:"attr,actions"` + Permissions *PolicyPermissions `jsonapi:"attr,permissions"` + Result *PolicyResult `jsonapi:"attr,result"` + Scope PolicyScope `jsonapi:"attr,scope"` + Status PolicyStatus `jsonapi:"attr,status"` + StatusTimestamps *PolicyStatusTimestamps `jsonapi:"attr,status-timestamps"` +} + +// PolicyActions represents the policy check actions. +type PolicyActions struct { + IsOverridable bool `json:"is-overridable"` +} + +// PolicyPermissions represents the policy check permissions. +type PolicyPermissions struct { + CanOverride bool `json:"can-override"` +} + +// PolicyResult represents the complete policy check result, +type PolicyResult struct { + AdvisoryFailed int `json:"advisory-failed"` + Duration int `json:"duration"` + HardFailed int `json:"hard-failed"` + Passed int `json:"passed"` + Result bool `json:"result"` + // Sentinel *sentinel.EvalResult `json:"sentinel"` + SoftFailed int `json:"soft-failed"` + TotalFailed int `json:"total-failed"` +} + +// PolicyStatusTimestamps holds the timestamps for individual policy check +// statuses. +type PolicyStatusTimestamps struct { + ErroredAt time.Time `json:"errored-at"` + HardFailedAt time.Time `json:"hard-failed-at"` + PassedAt time.Time `json:"passed-at"` + QueuedAt time.Time `json:"queued-at"` + SoftFailedAt time.Time `json:"soft-failed-at"` +} + +// PolicyCheckListOptions represents the options for listing policy checks. +type PolicyCheckListOptions struct { + ListOptions +} + +// List all policy checks of the given run. +func (s *policyChecks) List(ctx context.Context, runID string, options PolicyCheckListOptions) (*PolicyCheckList, error) { + if !validStringID(&runID) { + return nil, errors.New("invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/policy-checks", url.QueryEscape(runID)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + pcl := &PolicyCheckList{} + err = s.client.do(ctx, req, pcl) + if err != nil { + return nil, err + } + + return pcl, nil +} + +// Read a policy check by its ID. +func (s *policyChecks) Read(ctx context.Context, policyCheckID string) (*PolicyCheck, error) { + if !validStringID(&policyCheckID) { + return nil, errors.New("invalid value for policy check ID") + } + + u := fmt.Sprintf("policy-checks/%s", url.QueryEscape(policyCheckID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + pc := &PolicyCheck{} + err = s.client.do(ctx, req, pc) + if err != nil { + return nil, err + } + + return pc, nil +} + +// Override a soft-mandatory or warning policy. +func (s *policyChecks) Override(ctx context.Context, policyCheckID string) (*PolicyCheck, error) { + if !validStringID(&policyCheckID) { + return nil, errors.New("invalid value for policy check ID") + } + + u := fmt.Sprintf("policy-checks/%s/actions/override", url.QueryEscape(policyCheckID)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + pc := &PolicyCheck{} + err = s.client.do(ctx, req, pc) + if err != nil { + return nil, err + } + + return pc, nil +} + +// Logs retrieves the logs of a policy check. +func (s *policyChecks) Logs(ctx context.Context, policyCheckID string) (io.Reader, error) { + if !validStringID(&policyCheckID) { + return nil, errors.New("invalid value for policy check ID") + } + + // Loop until the context is canceled or the policy check is finished + // running. The policy check logs are not streamed and so only available + // once the check is finished. + for { + pc, err := s.Read(ctx, policyCheckID) + if err != nil { + return nil, err + } + + switch pc.Status { + case PolicyPending, PolicyQueued: + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(500 * time.Millisecond): + continue + } + } + + u := fmt.Sprintf("policy-checks/%s/output", url.QueryEscape(policyCheckID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + logs := bytes.NewBuffer(nil) + err = s.client.do(ctx, req, logs) + if err != nil { + return nil, err + } + + return logs, nil + } +} diff --git a/vendor/github.com/hashicorp/go-tfe/policy_set.go b/vendor/github.com/hashicorp/go-tfe/policy_set.go new file mode 100644 index 00000000..b49e1bd2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/policy_set.go @@ -0,0 +1,401 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ PolicySets = (*policySets)(nil) + +// PolicySets describes all the policy set related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/policies.html +type PolicySets interface { + // List all the policy sets for a given organization. + List(ctx context.Context, organization string, options PolicySetListOptions) (*PolicySetList, error) + + // Create a policy set and associate it with an organization. + Create(ctx context.Context, organization string, options PolicySetCreateOptions) (*PolicySet, error) + + // Read a policy set by its ID. + Read(ctx context.Context, policySetID string) (*PolicySet, error) + + // Update an existing policy set. + Update(ctx context.Context, policySetID string, options PolicySetUpdateOptions) (*PolicySet, error) + + // Add policies to a policy set. This function can only be used when + // there is no VCS repository associated with the policy set. + AddPolicies(ctx context.Context, policySetID string, options PolicySetAddPoliciesOptions) error + + // Remove policies from a policy set. This function can only be used + // when there is no VCS repository associated with the policy set. + RemovePolicies(ctx context.Context, policySetID string, options PolicySetRemovePoliciesOptions) error + + // Add workspaces to a policy set. + AddWorkspaces(ctx context.Context, policySetID string, options PolicySetAddWorkspacesOptions) error + + // Remove workspaces from a policy set. + RemoveWorkspaces(ctx context.Context, policySetID string, options PolicySetRemoveWorkspacesOptions) error + + // Delete a policy set by its ID. + Delete(ctx context.Context, policyID string) error +} + +// policySets implements PolicySets. +type policySets struct { + client *Client +} + +// PolicySetList represents a list of policy sets. +type PolicySetList struct { + *Pagination + Items []*PolicySet +} + +// PolicySet represents a Terraform Enterprise policy set. +type PolicySet struct { + ID string `jsonapi:"primary,policy-sets"` + Name string `jsonapi:"attr,name"` + Description string `jsonapi:"attr,description"` + Global bool `jsonapi:"attr,global"` + PoliciesPath string `jsonapi:"attr,policies-path"` + PolicyCount int `jsonapi:"attr,policy-count"` + VCSRepo *VCSRepo `jsonapi:"attr,vcs-repo"` + WorkspaceCount int `jsonapi:"attr,workspace-count"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + UpdatedAt time.Time `jsonapi:"attr,updated-at,iso8601"` + + // Relations + Organization *Organization `jsonapi:"relation,organization"` + Policies []*Policy `jsonapi:"relation,policies"` + Workspaces []*Workspace `jsonapi:"relation,workspaces"` +} + +// PolicySetListOptions represents the options for listing policy sets. +type PolicySetListOptions struct { + ListOptions + + // A search string (partial policy set name) used to filter the results. + Search *string `url:"search[name],omitempty"` +} + +// List all the policies for a given organization. +func (s *policySets) List(ctx context.Context, organization string, options PolicySetListOptions) (*PolicySetList, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/policy-sets", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + psl := &PolicySetList{} + err = s.client.do(ctx, req, psl) + if err != nil { + return nil, err + } + + return psl, nil +} + +// PolicySetCreateOptions represents the options for creating a new policy set. +type PolicySetCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,policy-sets"` + + // The name of the policy set. + Name *string `jsonapi:"attr,name"` + + // The description of the policy set. + Description *string `jsonapi:"attr,description,omitempty"` + + // Whether or not the policy set is global. + Global *bool `jsonapi:"attr,global,omitempty"` + + // The sub-path within the attached VCS repository to ingress. All + // files and directories outside of this sub-path will be ignored. + // This option may only be specified when a VCS repo is present. + PoliciesPath *string `jsonapi:"attr,policies-path,omitempty"` + + // The initial members of the policy set. + Policies []*Policy `jsonapi:"relation,policies,omitempty"` + + // VCS repository information. When present, the policies and + // configuration will be sourced from the specified VCS repository + // instead of being defined within the policy set itself. Note that + // this option is mutually exclusive with the Policies option and + // both cannot be used at the same time. + VCSRepo *VCSRepoOptions `jsonapi:"attr,vcs-repo,omitempty"` + + // The initial list of workspaces for which the policy set should be enforced. + Workspaces []*Workspace `jsonapi:"relation,workspaces,omitempty"` +} + +func (o PolicySetCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("name is required") + } + if !validStringID(o.Name) { + return errors.New("invalid value for name") + } + return nil +} + +// Create a policy set and associate it with an organization. +func (s *policySets) Create(ctx context.Context, organization string, options PolicySetCreateOptions) (*PolicySet, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/policy-sets", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + ps := &PolicySet{} + err = s.client.do(ctx, req, ps) + if err != nil { + return nil, err + } + + return ps, err +} + +// Read a policy set by its ID. +func (s *policySets) Read(ctx context.Context, policySetID string) (*PolicySet, error) { + if !validStringID(&policySetID) { + return nil, errors.New("invalid value for policy set ID") + } + + u := fmt.Sprintf("policy-sets/%s", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + ps := &PolicySet{} + err = s.client.do(ctx, req, ps) + if err != nil { + return nil, err + } + + return ps, err +} + +// PolicySetUpdateOptions represents the options for updating a policy set. +type PolicySetUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,policy-sets"` + + /// The name of the policy set. + Name *string `jsonapi:"attr,name,omitempty"` + + // The description of the policy set. + Description *string `jsonapi:"attr,description,omitempty"` + + // Whether or not the policy set is global. + Global *bool `jsonapi:"attr,global,omitempty"` +} + +func (o PolicySetUpdateOptions) valid() error { + if o.Name != nil && !validStringID(o.Name) { + return errors.New("invalid value for name") + } + return nil +} + +// Update an existing policy set. +func (s *policySets) Update(ctx context.Context, policySetID string, options PolicySetUpdateOptions) (*PolicySet, error) { + if !validStringID(&policySetID) { + return nil, errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("policy-sets/%s", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + ps := &PolicySet{} + err = s.client.do(ctx, req, ps) + if err != nil { + return nil, err + } + + return ps, err +} + +// PolicySetAddPoliciesOptions represents the options for adding policies +// to a policy set. +type PolicySetAddPoliciesOptions struct { + /// The policies to add to the policy set. + Policies []*Policy +} + +func (o PolicySetAddPoliciesOptions) valid() error { + if o.Policies == nil { + return errors.New("policies is required") + } + if len(o.Policies) == 0 { + return errors.New("must provide at least one policy") + } + return nil +} + +// Add policies to a policy set +func (s *policySets) AddPolicies(ctx context.Context, policySetID string, options PolicySetAddPoliciesOptions) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return err + } + + u := fmt.Sprintf("policy-sets/%s/relationships/policies", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("POST", u, options.Policies) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// PolicySetRemovePoliciesOptions represents the options for removing +// policies from a policy set. +type PolicySetRemovePoliciesOptions struct { + /// The policies to remove from the policy set. + Policies []*Policy +} + +func (o PolicySetRemovePoliciesOptions) valid() error { + if o.Policies == nil { + return errors.New("policies is required") + } + if len(o.Policies) == 0 { + return errors.New("must provide at least one policy") + } + return nil +} + +// Remove policies from a policy set +func (s *policySets) RemovePolicies(ctx context.Context, policySetID string, options PolicySetRemovePoliciesOptions) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return err + } + + u := fmt.Sprintf("policy-sets/%s/relationships/policies", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("DELETE", u, options.Policies) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// PolicySetAddWorkspacesOptions represents the options for adding workspaces +// to a policy set. +type PolicySetAddWorkspacesOptions struct { + /// The workspaces to add to the policy set. + Workspaces []*Workspace +} + +func (o PolicySetAddWorkspacesOptions) valid() error { + if o.Workspaces == nil { + return errors.New("workspaces is required") + } + if len(o.Workspaces) == 0 { + return errors.New("must provide at least one workspace") + } + return nil +} + +// Add workspaces to a policy set. +func (s *policySets) AddWorkspaces(ctx context.Context, policySetID string, options PolicySetAddWorkspacesOptions) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return err + } + + u := fmt.Sprintf("policy-sets/%s/relationships/workspaces", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("POST", u, options.Workspaces) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// PolicySetRemoveWorkspacesOptions represents the options for removing +// workspaces from a policy set. +type PolicySetRemoveWorkspacesOptions struct { + /// The workspaces to remove from the policy set. + Workspaces []*Workspace +} + +func (o PolicySetRemoveWorkspacesOptions) valid() error { + if o.Workspaces == nil { + return errors.New("workspaces is required") + } + if len(o.Workspaces) == 0 { + return errors.New("must provide at least one workspace") + } + return nil +} + +// Remove workspaces from a policy set. +func (s *policySets) RemoveWorkspaces(ctx context.Context, policySetID string, options PolicySetRemoveWorkspacesOptions) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + if err := options.valid(); err != nil { + return err + } + + u := fmt.Sprintf("policy-sets/%s/relationships/workspaces", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("DELETE", u, options.Workspaces) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// Delete a policy set by its ID. +func (s *policySets) Delete(ctx context.Context, policySetID string) error { + if !validStringID(&policySetID) { + return errors.New("invalid value for policy set ID") + } + + u := fmt.Sprintf("policy-sets/%s", url.QueryEscape(policySetID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/run.go b/vendor/github.com/hashicorp/go-tfe/run.go new file mode 100644 index 00000000..ef0657e2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/run.go @@ -0,0 +1,319 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Runs = (*runs)(nil) + +// Runs describes all the run related methods that the Terraform Enterprise +// API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/run.html +type Runs interface { + // List all the runs of the given workspace. + List(ctx context.Context, workspaceID string, options RunListOptions) (*RunList, error) + + // Create a new run with the given options. + Create(ctx context.Context, options RunCreateOptions) (*Run, error) + + // Read a run by its ID. + Read(ctx context.Context, runID string) (*Run, error) + + // Apply a run by its ID. + Apply(ctx context.Context, runID string, options RunApplyOptions) error + + // Cancel a run by its ID. + Cancel(ctx context.Context, runID string, options RunCancelOptions) error + + // Force-cancel a run by its ID. + ForceCancel(ctx context.Context, runID string, options RunForceCancelOptions) error + + // Discard a run by its ID. + Discard(ctx context.Context, runID string, options RunDiscardOptions) error +} + +// runs implements Runs. +type runs struct { + client *Client +} + +// RunStatus represents a run state. +type RunStatus string + +//List all available run statuses. +const ( + RunApplied RunStatus = "applied" + RunApplyQueued RunStatus = "apply_queued" + RunApplying RunStatus = "applying" + RunCanceled RunStatus = "canceled" + RunConfirmed RunStatus = "confirmed" + RunDiscarded RunStatus = "discarded" + RunErrored RunStatus = "errored" + RunPending RunStatus = "pending" + RunPlanQueued RunStatus = "plan_queued" + RunPlanned RunStatus = "planned" + RunPlannedAndFinished RunStatus = "planned_and_finished" + RunPlanning RunStatus = "planning" + RunPolicyChecked RunStatus = "policy_checked" + RunPolicyChecking RunStatus = "policy_checking" + RunPolicyOverride RunStatus = "policy_override" + RunPolicySoftFailed RunStatus = "policy_soft_failed" +) + +// RunSource represents a source type of a run. +type RunSource string + +// List all available run sources. +const ( + RunSourceAPI RunSource = "tfe-api" + RunSourceConfigurationVersion RunSource = "tfe-configuration-version" + RunSourceUI RunSource = "tfe-ui" +) + +// RunList represents a list of runs. +type RunList struct { + *Pagination + Items []*Run +} + +// Run represents a Terraform Enterprise run. +type Run struct { + ID string `jsonapi:"primary,runs"` + Actions *RunActions `jsonapi:"attr,actions"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + ForceCancelAvailableAt time.Time `jsonapi:"attr,force-cancel-available-at,iso8601"` + HasChanges bool `jsonapi:"attr,has-changes"` + IsDestroy bool `jsonapi:"attr,is-destroy"` + Message string `jsonapi:"attr,message"` + Permissions *RunPermissions `jsonapi:"attr,permissions"` + PositionInQueue int `jsonapi:"attr,position-in-queue"` + Source RunSource `jsonapi:"attr,source"` + Status RunStatus `jsonapi:"attr,status"` + StatusTimestamps *RunStatusTimestamps `jsonapi:"attr,status-timestamps"` + + // Relations + Apply *Apply `jsonapi:"relation,apply"` + ConfigurationVersion *ConfigurationVersion `jsonapi:"relation,configuration-version"` + Plan *Plan `jsonapi:"relation,plan"` + PolicyChecks []*PolicyCheck `jsonapi:"relation,policy-checks"` + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +// RunActions represents the run actions. +type RunActions struct { + IsCancelable bool `json:"is-cancelable"` + IsConfirmable bool `json:"is-confirmable"` + IsDiscardable bool `json:"is-discardable"` + IsForceCancelable bool `json:"is-force-cancelable"` +} + +// RunPermissions represents the run permissions. +type RunPermissions struct { + CanApply bool `json:"can-apply"` + CanCancel bool `json:"can-cancel"` + CanDiscard bool `json:"can-discard"` + CanForceCancel bool `json:"can-force-cancel"` + CanForceExecute bool `json:"can-force-execute"` +} + +// RunStatusTimestamps holds the timestamps for individual run statuses. +type RunStatusTimestamps struct { + ErroredAt time.Time `json:"errored-at"` + FinishedAt time.Time `json:"finished-at"` + QueuedAt time.Time `json:"queued-at"` + StartedAt time.Time `json:"started-at"` + ApplyingAt time.Time `json:"applying-at"` + AppliedAt time.Time `json:"applied-at"` + PlanningAt time.Time `json:"planning-at"` + PlannedAt time.Time `json:"planned-at"` + PlannedAndFinishedAt time.Time `json:"planned-and-finished-at"` + PlanQueuabledAt time.Time `json:"plan-queueable-at"` +} + +// RunListOptions represents the options for listing runs. +type RunListOptions struct { + ListOptions +} + +// List all the runs of the given workspace. +func (s *runs) List(ctx context.Context, workspaceID string, options RunListOptions) (*RunList, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/runs", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + rl := &RunList{} + err = s.client.do(ctx, req, rl) + if err != nil { + return nil, err + } + + return rl, nil +} + +// RunCreateOptions represents the options for creating a new run. +type RunCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,runs"` + + // Specifies if this plan is a destroy plan, which will destroy all + // provisioned resources. + IsDestroy *bool `jsonapi:"attr,is-destroy,omitempty"` + + // Specifies the message to be associated with this run. + Message *string `jsonapi:"attr,message,omitempty"` + + // Specifies the configuration version to use for this run. If the + // configuration version object is omitted, the run will be created using the + // workspace's latest configuration version. + ConfigurationVersion *ConfigurationVersion `jsonapi:"relation,configuration-version"` + + // Specifies the workspace where the run will be executed. + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +func (o RunCreateOptions) valid() error { + if o.Workspace == nil { + return errors.New("workspace is required") + } + return nil +} + +// Create a new run with the given options. +func (s *runs) Create(ctx context.Context, options RunCreateOptions) (*Run, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "runs", &options) + if err != nil { + return nil, err + } + + r := &Run{} + err = s.client.do(ctx, req, r) + if err != nil { + return nil, err + } + + return r, nil +} + +// Read a run by its ID. +func (s *runs) Read(ctx context.Context, runID string) (*Run, error) { + if !validStringID(&runID) { + return nil, errors.New("invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s", url.QueryEscape(runID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + r := &Run{} + err = s.client.do(ctx, req, r) + if err != nil { + return nil, err + } + + return r, nil +} + +// RunApplyOptions represents the options for applying a run. +type RunApplyOptions struct { + // An optional comment about the run. + Comment *string `json:"comment,omitempty"` +} + +// Apply a run by its ID. +func (s *runs) Apply(ctx context.Context, runID string, options RunApplyOptions) error { + if !validStringID(&runID) { + return errors.New("invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/actions/apply", url.QueryEscape(runID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// RunCancelOptions represents the options for canceling a run. +type RunCancelOptions struct { + // An optional explanation for why the run was canceled. + Comment *string `json:"comment,omitempty"` +} + +// Cancel a run by its ID. +func (s *runs) Cancel(ctx context.Context, runID string, options RunCancelOptions) error { + if !validStringID(&runID) { + return errors.New("invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/actions/cancel", url.QueryEscape(runID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// RunForceCancelOptions represents the options for force-canceling a run. +type RunForceCancelOptions struct { + // An optional comment explaining the reason for the force-cancel. + Comment *string `json:"comment,omitempty"` +} + +// ForceCancel is used to forcefully cancel a run by its ID. +func (s *runs) ForceCancel(ctx context.Context, runID string, options RunForceCancelOptions) error { + if !validStringID(&runID) { + return errors.New("invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/actions/force-cancel", url.QueryEscape(runID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// RunDiscardOptions represents the options for discarding a run. +type RunDiscardOptions struct { + // An optional explanation for why the run was discarded. + Comment *string `json:"comment,omitempty"` +} + +// Discard a run by its ID. +func (s *runs) Discard(ctx context.Context, runID string, options RunDiscardOptions) error { + if !validStringID(&runID) { + return errors.New("invalid value for run ID") + } + + u := fmt.Sprintf("runs/%s/actions/discard", url.QueryEscape(runID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/ssh_key.go b/vendor/github.com/hashicorp/go-tfe/ssh_key.go new file mode 100644 index 00000000..8735d071 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/ssh_key.go @@ -0,0 +1,198 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ SSHKeys = (*sshKeys)(nil) + +// SSHKeys describes all the SSH key related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/ssh-keys.html +type SSHKeys interface { + // List all the SSH keys for a given organization + List(ctx context.Context, organization string, options SSHKeyListOptions) (*SSHKeyList, error) + + // Create an SSH key and associate it with an organization. + Create(ctx context.Context, organization string, options SSHKeyCreateOptions) (*SSHKey, error) + + // Read an SSH key by its ID. + Read(ctx context.Context, sshKeyID string) (*SSHKey, error) + + // Update an SSH key by its ID. + Update(ctx context.Context, sshKeyID string, options SSHKeyUpdateOptions) (*SSHKey, error) + + // Delete an SSH key by its ID. + Delete(ctx context.Context, sshKeyID string) error +} + +// sshKeys implements SSHKeys. +type sshKeys struct { + client *Client +} + +// SSHKeyList represents a list of SSH keys. +type SSHKeyList struct { + *Pagination + Items []*SSHKey +} + +// SSHKey represents a SSH key. +type SSHKey struct { + ID string `jsonapi:"primary,ssh-keys"` + Name string `jsonapi:"attr,name"` +} + +// SSHKeyListOptions represents the options for listing SSH keys. +type SSHKeyListOptions struct { + ListOptions +} + +// List all the SSH keys for a given organization +func (s *sshKeys) List(ctx context.Context, organization string, options SSHKeyListOptions) (*SSHKeyList, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/ssh-keys", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + kl := &SSHKeyList{} + err = s.client.do(ctx, req, kl) + if err != nil { + return nil, err + } + + return kl, nil +} + +// SSHKeyCreateOptions represents the options for creating an SSH key. +type SSHKeyCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,ssh-keys"` + + // A name to identify the SSH key. + Name *string `jsonapi:"attr,name"` + + // The content of the SSH private key. + Value *string `jsonapi:"attr,value"` +} + +func (o SSHKeyCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("name is required") + } + if !validString(o.Value) { + return errors.New("value is required") + } + return nil +} + +// Create an SSH key and associate it with an organization. +func (s *sshKeys) Create(ctx context.Context, organization string, options SSHKeyCreateOptions) (*SSHKey, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/ssh-keys", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + k := &SSHKey{} + err = s.client.do(ctx, req, k) + if err != nil { + return nil, err + } + + return k, nil +} + +// Read an SSH key by its ID. +func (s *sshKeys) Read(ctx context.Context, sshKeyID string) (*SSHKey, error) { + if !validStringID(&sshKeyID) { + return nil, errors.New("invalid value for SSH key ID") + } + + u := fmt.Sprintf("ssh-keys/%s", url.QueryEscape(sshKeyID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + k := &SSHKey{} + err = s.client.do(ctx, req, k) + if err != nil { + return nil, err + } + + return k, nil +} + +// SSHKeyUpdateOptions represents the options for updating an SSH key. +type SSHKeyUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,ssh-keys"` + + // A new name to identify the SSH key. + Name *string `jsonapi:"attr,name,omitempty"` + + // Updated content of the SSH private key. + Value *string `jsonapi:"attr,value,omitempty"` +} + +// Update an SSH key by its ID. +func (s *sshKeys) Update(ctx context.Context, sshKeyID string, options SSHKeyUpdateOptions) (*SSHKey, error) { + if !validStringID(&sshKeyID) { + return nil, errors.New("invalid value for SSH key ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("ssh-keys/%s", url.QueryEscape(sshKeyID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + k := &SSHKey{} + err = s.client.do(ctx, req, k) + if err != nil { + return nil, err + } + + return k, nil +} + +// Delete an SSH key by its ID. +func (s *sshKeys) Delete(ctx context.Context, sshKeyID string) error { + if !validStringID(&sshKeyID) { + return errors.New("invalid value for SSH key ID") + } + + u := fmt.Sprintf("ssh-keys/%s", url.QueryEscape(sshKeyID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/state_version.go b/vendor/github.com/hashicorp/go-tfe/state_version.go new file mode 100644 index 00000000..36c493ac --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/state_version.go @@ -0,0 +1,220 @@ +package tfe + +import ( + "bytes" + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ StateVersions = (*stateVersions)(nil) + +// StateVersions describes all the state version related methods that +// the Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/state-versions.html +type StateVersions interface { + // List all the state versions for a given workspace. + List(ctx context.Context, options StateVersionListOptions) (*StateVersionList, error) + + // Create a new state version for the given workspace. + Create(ctx context.Context, workspaceID string, options StateVersionCreateOptions) (*StateVersion, error) + + // Read a state version by its ID. + Read(ctx context.Context, svID string) (*StateVersion, error) + + // Current reads the latest available state from the given workspace. + Current(ctx context.Context, workspaceID string) (*StateVersion, error) + + // Download retrieves the actual stored state of a state version + Download(ctx context.Context, url string) ([]byte, error) +} + +// stateVersions implements StateVersions. +type stateVersions struct { + client *Client +} + +// StateVersionList represents a list of state versions. +type StateVersionList struct { + *Pagination + Items []*StateVersion +} + +// StateVersion represents a Terraform Enterprise state version. +type StateVersion struct { + ID string `jsonapi:"primary,state-versions"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + DownloadURL string `jsonapi:"attr,hosted-state-download-url"` + Serial int64 `jsonapi:"attr,serial"` + VCSCommitSHA string `jsonapi:"attr,vcs-commit-sha"` + VCSCommitURL string `jsonapi:"attr,vcs-commit-url"` + + // Relations + Run *Run `jsonapi:"relation,run"` +} + +// StateVersionListOptions represents the options for listing state versions. +type StateVersionListOptions struct { + ListOptions + Organization *string `url:"filter[organization][name]"` + Workspace *string `url:"filter[workspace][name]"` +} + +func (o StateVersionListOptions) valid() error { + if !validString(o.Organization) { + return errors.New("organization is required") + } + if !validString(o.Workspace) { + return errors.New("workspace is required") + } + return nil +} + +// List all the state versions for a given workspace. +func (s *stateVersions) List(ctx context.Context, options StateVersionListOptions) (*StateVersionList, error) { + if err := options.valid(); err != nil { + return nil, err + } + + req, err := s.client.newRequest("GET", "state-versions", &options) + if err != nil { + return nil, err + } + + svl := &StateVersionList{} + err = s.client.do(ctx, req, svl) + if err != nil { + return nil, err + } + + return svl, nil +} + +// StateVersionCreateOptions represents the options for creating a state version. +type StateVersionCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,state-versions"` + + // The lineage of the state. + Lineage *string `jsonapi:"attr,lineage,omitempty"` + + // The MD5 hash of the state version. + MD5 *string `jsonapi:"attr,md5"` + + // The serial of the state. + Serial *int64 `jsonapi:"attr,serial"` + + // The base64 encoded state. + State *string `jsonapi:"attr,state"` + + // Force can be set to skip certain validations. Wrong use + // of this flag can cause data loss, so USE WITH CAUTION! + Force *bool `jsonapi:"attr,force"` + + // Specifies the run to associate the state with. + Run *Run `jsonapi:"relation,run,omitempty"` +} + +func (o StateVersionCreateOptions) valid() error { + if !validString(o.MD5) { + return errors.New("MD5 is required") + } + if o.Serial == nil { + return errors.New("serial is required") + } + if !validString(o.State) { + return errors.New("state is required") + } + return nil +} + +// Create a new state version for the given workspace. +func (s *stateVersions) Create(ctx context.Context, workspaceID string, options StateVersionCreateOptions) (*StateVersion, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("workspaces/%s/state-versions", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + sv := &StateVersion{} + err = s.client.do(ctx, req, sv) + if err != nil { + return nil, err + } + + return sv, nil +} + +// Read a state version by its ID. +func (s *stateVersions) Read(ctx context.Context, svID string) (*StateVersion, error) { + if !validStringID(&svID) { + return nil, errors.New("invalid value for state version ID") + } + + u := fmt.Sprintf("state-versions/%s", url.QueryEscape(svID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + sv := &StateVersion{} + err = s.client.do(ctx, req, sv) + if err != nil { + return nil, err + } + + return sv, nil +} + +// Current reads the latest available state from the given workspace. +func (s *stateVersions) Current(ctx context.Context, workspaceID string) (*StateVersion, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/current-state-version", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + sv := &StateVersion{} + err = s.client.do(ctx, req, sv) + if err != nil { + return nil, err + } + + return sv, nil +} + +// Download retrieves the actual stored state of a state version +func (s *stateVersions) Download(ctx context.Context, url string) ([]byte, error) { + req, err := s.client.newRequest("GET", url, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + + var buf bytes.Buffer + err = s.client.do(ctx, req, &buf) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/team.go b/vendor/github.com/hashicorp/go-tfe/team.go new file mode 100644 index 00000000..7bd93a4f --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/team.go @@ -0,0 +1,219 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ Teams = (*teams)(nil) + +// Teams describes all the team related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/teams.html +type Teams interface { + // List all the teams of the given organization. + List(ctx context.Context, organization string, options TeamListOptions) (*TeamList, error) + + // Create a new team with the given options. + Create(ctx context.Context, organization string, options TeamCreateOptions) (*Team, error) + + // Read a team by its ID. + Read(ctx context.Context, teamID string) (*Team, error) + + // Update a team by its ID. + Update(ctx context.Context, teamID string, options TeamUpdateOptions) (*Team, error) + + // Delete a team by its ID. + Delete(ctx context.Context, teamID string) error +} + +// teams implements Teams. +type teams struct { + client *Client +} + +// TeamList represents a list of teams. +type TeamList struct { + *Pagination + Items []*Team +} + +// Team represents a Terraform Enterprise team. +type Team struct { + ID string `jsonapi:"primary,teams"` + Name string `jsonapi:"attr,name"` + OrganizationAccess *OrganizationAccess `jsonapi:"attr,organization-access"` + Permissions *TeamPermissions `jsonapi:"attr,permissions"` + UserCount int `jsonapi:"attr,users-count"` + + // Relations + Users []*User `jsonapi:"relation,users"` +} + +// OrganizationAccess represents the team's permissions on its organization +type OrganizationAccess struct { + ManagePolicies bool `json:"manage-policies"` + ManageWorkspaces bool `json:"manage-workspaces"` + ManageVCSSettings bool `json:"manage-vcs-settings"` +} + +// TeamPermissions represents the current user's permissions on the team. +type TeamPermissions struct { + CanDestroy bool `json:"can-destroy"` + CanUpdateMembership bool `json:"can-update-membership"` +} + +// TeamListOptions represents the options for listing teams. +type TeamListOptions struct { + ListOptions +} + +// List all the teams of the given organization. +func (s *teams) List(ctx context.Context, organization string, options TeamListOptions) (*TeamList, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/teams", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + tl := &TeamList{} + err = s.client.do(ctx, req, tl) + if err != nil { + return nil, err + } + + return tl, nil +} + +// TeamCreateOptions represents the options for creating a team. +type TeamCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,teams"` + + // Name of the team. + Name *string `jsonapi:"attr,name"` + + // The team's organization access + OrganizationAccess *OrganizationAccessOptions `jsonapi:"attr,organization-access,omitempty"` +} + +// OrganizationAccessOptions represents the organization access options of a team. +type OrganizationAccessOptions struct { + ManagePolicies *bool `json:"manage-policies,omitempty"` + ManageWorkspaces *bool `json:"manage-workspaces,omitempty"` + ManageVCSSettings *bool `json:"manage-vcs-settings,omitempty"` +} + +func (o TeamCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("name is required") + } + return nil +} + +// Create a new team with the given options. +func (s *teams) Create(ctx context.Context, organization string, options TeamCreateOptions) (*Team, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/teams", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + t := &Team{} + err = s.client.do(ctx, req, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// Read a single team by its ID. +func (s *teams) Read(ctx context.Context, teamID string) (*Team, error) { + if !validStringID(&teamID) { + return nil, errors.New("invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + t := &Team{} + err = s.client.do(ctx, req, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// TeamUpdateOptions represents the options for updating a team. +type TeamUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,teams"` + + // New name for the team + Name *string `jsonapi:"attr,name,omitempty"` + + // The team's organization access + OrganizationAccess *OrganizationAccessOptions `jsonapi:"attr,organization-access,omitempty"` +} + +// Update a team by its ID. +func (s *teams) Update(ctx context.Context, teamID string, options TeamUpdateOptions) (*Team, error) { + if !validStringID(&teamID) { + return nil, errors.New("invalid value for team ID") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + t := &Team{} + err = s.client.do(ctx, req, t) + if err != nil { + return nil, err + } + + return t, nil +} + +// Delete a team by its ID. +func (s *teams) Delete(ctx context.Context, teamID string) error { + if !validStringID(&teamID) { + return errors.New("invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/team_access.go b/vendor/github.com/hashicorp/go-tfe/team_access.go new file mode 100644 index 00000000..a69ffa17 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/team_access.go @@ -0,0 +1,185 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ TeamAccesses = (*teamAccesses)(nil) + +// TeamAccesses describes all the team access related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/team-access.html +type TeamAccesses interface { + // List all the team accesses for a given workspace. + List(ctx context.Context, options TeamAccessListOptions) (*TeamAccessList, error) + + // Add team access for a workspace. + Add(ctx context.Context, options TeamAccessAddOptions) (*TeamAccess, error) + + // Read a team access by its ID. + Read(ctx context.Context, teamAccessID string) (*TeamAccess, error) + + // Remove team access from a workspace. + Remove(ctx context.Context, teamAccessID string) error +} + +// teamAccesses implements TeamAccesses. +type teamAccesses struct { + client *Client +} + +// AccessType represents a team access type. +type AccessType string + +// List all available team access types. +const ( + AccessAdmin AccessType = "admin" + AccessPlan AccessType = "plan" + AccessRead AccessType = "read" + AccessWrite AccessType = "write" +) + +// TeamAccessList represents a list of team accesses. +type TeamAccessList struct { + *Pagination + Items []*TeamAccess +} + +// TeamAccess represents the workspace access for a team. +type TeamAccess struct { + ID string `jsonapi:"primary,team-workspaces"` + Access AccessType `jsonapi:"attr,access"` + + // Relations + Team *Team `jsonapi:"relation,team"` + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +// TeamAccessListOptions represents the options for listing team accesses. +type TeamAccessListOptions struct { + ListOptions + WorkspaceID *string `url:"filter[workspace][id],omitempty"` +} + +func (o TeamAccessListOptions) valid() error { + if !validString(o.WorkspaceID) { + return errors.New("workspace ID is required") + } + if !validStringID(o.WorkspaceID) { + return errors.New("invalid value for workspace ID") + } + return nil +} + +// List all the team accesses for a given workspace. +func (s *teamAccesses) List(ctx context.Context, options TeamAccessListOptions) (*TeamAccessList, error) { + if err := options.valid(); err != nil { + return nil, err + } + + req, err := s.client.newRequest("GET", "team-workspaces", &options) + if err != nil { + return nil, err + } + + tal := &TeamAccessList{} + err = s.client.do(ctx, req, tal) + if err != nil { + return nil, err + } + + return tal, nil +} + +// TeamAccessAddOptions represents the options for adding team access. +type TeamAccessAddOptions struct { + // For internal use only! + ID string `jsonapi:"primary,team-workspaces"` + + // The type of access to grant. + Access *AccessType `jsonapi:"attr,access"` + + // The team to add to the workspace + Team *Team `jsonapi:"relation,team"` + + // The workspace to which the team is to be added. + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +func (o TeamAccessAddOptions) valid() error { + if o.Access == nil { + return errors.New("access is required") + } + if o.Team == nil { + return errors.New("team is required") + } + if o.Workspace == nil { + return errors.New("workspace is required") + } + return nil +} + +// Add team access for a workspace. +func (s *teamAccesses) Add(ctx context.Context, options TeamAccessAddOptions) (*TeamAccess, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "team-workspaces", &options) + if err != nil { + return nil, err + } + + ta := &TeamAccess{} + err = s.client.do(ctx, req, ta) + if err != nil { + return nil, err + } + + return ta, nil +} + +// Read a team access by its ID. +func (s *teamAccesses) Read(ctx context.Context, teamAccessID string) (*TeamAccess, error) { + if !validStringID(&teamAccessID) { + return nil, errors.New("invalid value for team access ID") + } + + u := fmt.Sprintf("team-workspaces/%s", url.QueryEscape(teamAccessID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + ta := &TeamAccess{} + err = s.client.do(ctx, req, ta) + if err != nil { + return nil, err + } + + return ta, nil +} + +// Remove team access from a workspace. +func (s *teamAccesses) Remove(ctx context.Context, teamAccessID string) error { + if !validStringID(&teamAccessID) { + return errors.New("invalid value for team access ID") + } + + u := fmt.Sprintf("team-workspaces/%s", url.QueryEscape(teamAccessID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/team_member.go b/vendor/github.com/hashicorp/go-tfe/team_member.go new file mode 100644 index 00000000..8ab97fda --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/team_member.go @@ -0,0 +1,139 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ TeamMembers = (*teamMembers)(nil) + +// TeamMembers describes all the team member related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/team-members.html +type TeamMembers interface { + // List all members of a team. + List(ctx context.Context, teamID string) ([]*User, error) + + // Add multiple users to a team. + Add(ctx context.Context, teamID string, options TeamMemberAddOptions) error + + // Remove multiple users from a team. + Remove(ctx context.Context, teamID string, options TeamMemberRemoveOptions) error +} + +// teamMembers implements TeamMembers. +type teamMembers struct { + client *Client +} + +type teamMember struct { + Username string `jsonapi:"primary,users"` +} + +// List all members of a team. +func (s *teamMembers) List(ctx context.Context, teamID string) ([]*User, error) { + if !validStringID(&teamID) { + return nil, errors.New("invalid value for team ID") + } + + options := struct { + Include string `url:"include"` + }{ + Include: "users", + } + + u := fmt.Sprintf("teams/%s", url.QueryEscape(teamID)) + req, err := s.client.newRequest("GET", u, options) + if err != nil { + return nil, err + } + + t := &Team{} + err = s.client.do(ctx, req, t) + if err != nil { + return nil, err + } + + return t.Users, nil +} + +// TeamMemberAddOptions represents the options for adding team members. +type TeamMemberAddOptions struct { + Usernames []string +} + +func (o *TeamMemberAddOptions) valid() error { + if o.Usernames == nil { + return errors.New("usernames is required") + } + if len(o.Usernames) == 0 { + return errors.New("invalid value for usernames") + } + return nil +} + +// Add multiple users to a team. +func (s *teamMembers) Add(ctx context.Context, teamID string, options TeamMemberAddOptions) error { + if !validStringID(&teamID) { + return errors.New("invalid value for team ID") + } + if err := options.valid(); err != nil { + return err + } + + var tms []*teamMember + for _, name := range options.Usernames { + tms = append(tms, &teamMember{Username: name}) + } + + u := fmt.Sprintf("teams/%s/relationships/users", url.QueryEscape(teamID)) + req, err := s.client.newRequest("POST", u, tms) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// TeamMemberRemoveOptions represents the options for deleting team members. +type TeamMemberRemoveOptions struct { + Usernames []string +} + +func (o *TeamMemberRemoveOptions) valid() error { + if o.Usernames == nil { + return errors.New("usernames is required") + } + if len(o.Usernames) == 0 { + return errors.New("invalid value for usernames") + } + return nil +} + +// Remove multiple users from a team. +func (s *teamMembers) Remove(ctx context.Context, teamID string, options TeamMemberRemoveOptions) error { + if !validStringID(&teamID) { + return errors.New("invalid value for team ID") + } + if err := options.valid(); err != nil { + return err + } + + var tms []*teamMember + for _, name := range options.Usernames { + tms = append(tms, &teamMember{Username: name}) + } + + u := fmt.Sprintf("teams/%s/relationships/users", url.QueryEscape(teamID)) + req, err := s.client.newRequest("DELETE", u, tms) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/team_token.go b/vendor/github.com/hashicorp/go-tfe/team_token.go new file mode 100644 index 00000000..6763b660 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/team_token.go @@ -0,0 +1,99 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ TeamTokens = (*teamTokens)(nil) + +// TeamTokens describes all the team token related methods that the +// Terraform Enterprise API supports. +// +// TFE API docs: +// https://www.terraform.io/docs/enterprise/api/team-tokens.html +type TeamTokens interface { + // Generate a new team token, replacing any existing token. + Generate(ctx context.Context, teamID string) (*TeamToken, error) + + // Read a team token by its ID. + Read(ctx context.Context, teamID string) (*TeamToken, error) + + // Delete a team token by its ID. + Delete(ctx context.Context, teamID string) error +} + +// teamTokens implements TeamTokens. +type teamTokens struct { + client *Client +} + +// TeamToken represents a Terraform Enterprise team token. +type TeamToken struct { + ID string `jsonapi:"primary,authentication-tokens"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + Description string `jsonapi:"attr,description"` + LastUsedAt time.Time `jsonapi:"attr,last-used-at,iso8601"` + Token string `jsonapi:"attr,token"` +} + +// Generate a new team token, replacing any existing token. +func (s *teamTokens) Generate(ctx context.Context, teamID string) (*TeamToken, error) { + if !validStringID(&teamID) { + return nil, errors.New("invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + tt := &TeamToken{} + err = s.client.do(ctx, req, tt) + if err != nil { + return nil, err + } + + return tt, err +} + +// Read a team token by its ID. +func (s *teamTokens) Read(ctx context.Context, teamID string) (*TeamToken, error) { + if !validStringID(&teamID) { + return nil, errors.New("invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + tt := &TeamToken{} + err = s.client.do(ctx, req, tt) + if err != nil { + return nil, err + } + + return tt, err +} + +// Delete a team token by its ID. +func (s *teamTokens) Delete(ctx context.Context, teamID string) error { + if !validStringID(&teamID) { + return errors.New("invalid value for team ID") + } + + u := fmt.Sprintf("teams/%s/authentication-token", url.QueryEscape(teamID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/tfe.go b/vendor/github.com/hashicorp/go-tfe/tfe.go new file mode 100644 index 00000000..761cdaf7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/tfe.go @@ -0,0 +1,586 @@ +package tfe + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "net/http" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "time" + + "github.com/google/go-querystring/query" + "github.com/hashicorp/go-cleanhttp" + retryablehttp "github.com/hashicorp/go-retryablehttp" + "github.com/svanharmelen/jsonapi" + "golang.org/x/time/rate" +) + +const ( + userAgent = "go-tfe" + headerRateLimit = "X-RateLimit-Limit" + headerRateReset = "X-RateLimit-Reset" + + // DefaultAddress of Terraform Enterprise. + DefaultAddress = "https://app.terraform.io" + // DefaultBasePath on which the API is served. + DefaultBasePath = "/api/v2/" + // No-op API endpoint used to configure the rate limiter + PingEndpoint = "ping" +) + +var ( + // ErrWorkspaceLocked is returned when trying to lock a + // locked workspace. + ErrWorkspaceLocked = errors.New("workspace already locked") + // ErrWorkspaceNotLocked is returned when trying to unlock + // a unlocked workspace. + ErrWorkspaceNotLocked = errors.New("workspace already unlocked") + + // ErrUnauthorized is returned when a receiving a 401. + ErrUnauthorized = errors.New("unauthorized") + // ErrResourceNotFound is returned when a receiving a 404. + ErrResourceNotFound = errors.New("resource not found") +) + +// RetryLogHook allows a function to run before each retry. +type RetryLogHook func(attemptNum int, resp *http.Response) + +// Config provides configuration details to the API client. +type Config struct { + // The address of the Terraform Enterprise API. + Address string + + // The base path on which the API is served. + BasePath string + + // API token used to access the Terraform Enterprise API. + Token string + + // Headers that will be added to every request. + Headers http.Header + + // A custom HTTP client to use. + HTTPClient *http.Client + + // RetryLogHook is invoked each time a request is retried. + RetryLogHook RetryLogHook +} + +// DefaultConfig returns a default config structure. +func DefaultConfig() *Config { + config := &Config{ + Address: os.Getenv("TFE_ADDRESS"), + BasePath: DefaultBasePath, + Token: os.Getenv("TFE_TOKEN"), + Headers: make(http.Header), + HTTPClient: cleanhttp.DefaultPooledClient(), + } + + // Set the default address if none is given. + if config.Address == "" { + config.Address = DefaultAddress + } + + // Set the default user agent. + config.Headers.Set("User-Agent", userAgent) + + return config +} + +// Client is the Terraform Enterprise API client. It provides the basic +// connectivity and configuration for accessing the TFE API. +type Client struct { + baseURL *url.URL + token string + headers http.Header + http *retryablehttp.Client + limiter *rate.Limiter + retryLogHook RetryLogHook + retryServerErrors bool + + Applies Applies + ConfigurationVersions ConfigurationVersions + NotificationConfigurations NotificationConfigurations + OAuthClients OAuthClients + OAuthTokens OAuthTokens + Organizations Organizations + OrganizationTokens OrganizationTokens + Plans Plans + PlanExports PlanExports + Policies Policies + PolicyChecks PolicyChecks + PolicySets PolicySets + Runs Runs + SSHKeys SSHKeys + StateVersions StateVersions + Teams Teams + TeamAccess TeamAccesses + TeamMembers TeamMembers + TeamTokens TeamTokens + Users Users + Variables Variables + Workspaces Workspaces +} + +// NewClient creates a new Terraform Enterprise API client. +func NewClient(cfg *Config) (*Client, error) { + config := DefaultConfig() + + // Layer in the provided config for any non-blank values. + if cfg != nil { + if cfg.Address != "" { + config.Address = cfg.Address + } + if cfg.BasePath != "" { + config.BasePath = cfg.BasePath + } + if cfg.Token != "" { + config.Token = cfg.Token + } + for k, v := range cfg.Headers { + config.Headers[k] = v + } + if cfg.HTTPClient != nil { + config.HTTPClient = cfg.HTTPClient + } + if cfg.RetryLogHook != nil { + config.RetryLogHook = cfg.RetryLogHook + } + } + + // Parse the address to make sure its a valid URL. + baseURL, err := url.Parse(config.Address) + if err != nil { + return nil, fmt.Errorf("invalid address: %v", err) + } + + baseURL.Path = config.BasePath + if !strings.HasSuffix(baseURL.Path, "/") { + baseURL.Path += "/" + } + + // This value must be provided by the user. + if config.Token == "" { + return nil, fmt.Errorf("missing API token") + } + + // Create the client. + client := &Client{ + baseURL: baseURL, + token: config.Token, + headers: config.Headers, + retryLogHook: config.RetryLogHook, + } + + client.http = &retryablehttp.Client{ + Backoff: client.retryHTTPBackoff, + CheckRetry: client.retryHTTPCheck, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + HTTPClient: config.HTTPClient, + RetryWaitMin: 100 * time.Millisecond, + RetryWaitMax: 400 * time.Millisecond, + RetryMax: 30, + } + + // Configure the rate limiter. + if err := client.configureLimiter(); err != nil { + return nil, err + } + + // Create the services. + client.Applies = &applies{client: client} + client.ConfigurationVersions = &configurationVersions{client: client} + client.NotificationConfigurations = ¬ificationConfigurations{client: client} + client.OAuthClients = &oAuthClients{client: client} + client.OAuthTokens = &oAuthTokens{client: client} + client.Organizations = &organizations{client: client} + client.OrganizationTokens = &organizationTokens{client: client} + client.Plans = &plans{client: client} + client.PlanExports = &planExports{client: client} + client.Policies = &policies{client: client} + client.PolicyChecks = &policyChecks{client: client} + client.PolicySets = &policySets{client: client} + client.Runs = &runs{client: client} + client.SSHKeys = &sshKeys{client: client} + client.StateVersions = &stateVersions{client: client} + client.Teams = &teams{client: client} + client.TeamAccess = &teamAccesses{client: client} + client.TeamMembers = &teamMembers{client: client} + client.TeamTokens = &teamTokens{client: client} + client.Users = &users{client: client} + client.Variables = &variables{client: client} + client.Workspaces = &workspaces{client: client} + + return client, nil +} + +// RetryServerErrors configures the retry HTTP check to also retry +// unexpected errors or requests that failed with a server error. +func (c *Client) RetryServerErrors(retry bool) { + c.retryServerErrors = retry +} + +// retryHTTPCheck provides a callback for Client.CheckRetry which +// will retry both rate limit (429) and server (>= 500) errors. +func (c *Client) retryHTTPCheck(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctx.Err() != nil { + return false, ctx.Err() + } + if err != nil { + return c.retryServerErrors, err + } + if resp.StatusCode == 429 || (c.retryServerErrors && resp.StatusCode >= 500) { + return true, nil + } + return false, nil +} + +// retryHTTPBackoff provides a generic callback for Client.Backoff which +// will pass through all calls based on the status code of the response. +func (c *Client) retryHTTPBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + if c.retryLogHook != nil { + c.retryLogHook(attemptNum, resp) + } + + // Use the rate limit backoff function when we are rate limited. + if resp != nil && resp.StatusCode == 429 { + return rateLimitBackoff(min, max, attemptNum, resp) + } + + // Set custom duration's when we experience a service interruption. + min = 700 * time.Millisecond + max = 900 * time.Millisecond + + return retryablehttp.LinearJitterBackoff(min, max, attemptNum, resp) +} + +// rateLimitBackoff provides a callback for Client.Backoff which will use the +// X-RateLimit_Reset header to determine the time to wait. We add some jitter +// to prevent a thundering herd. +// +// min and max are mainly used for bounding the jitter that will be added to +// the reset time retrieved from the headers. But if the final wait time is +// less then min, min will be used instead. +func rateLimitBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // rnd is used to generate pseudo-random numbers. + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) + + // First create some jitter bounded by the min and max durations. + jitter := time.Duration(rnd.Float64() * float64(max-min)) + + if resp != nil { + if v := resp.Header.Get(headerRateReset); v != "" { + if reset, _ := strconv.ParseFloat(v, 64); reset > 0 { + // Only update min if the given time to wait is longer. + if wait := time.Duration(reset * 1e9); wait > min { + min = wait + } + } + } + } + + return min + jitter +} + +// configureLimiter configures the rate limiter. +func (c *Client) configureLimiter() error { + // Create a new request. + u, err := c.baseURL.Parse(PingEndpoint) + if err != nil { + return err + } + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return err + } + + // Attach the default headers. + for k, v := range c.headers { + req.Header[k] = v + } + req.Header.Set("Accept", "application/vnd.api+json") + req.Header.Set("Authorization", "Bearer "+c.token) + + // Make a single request to retrieve the rate limit headers. + resp, err := c.http.HTTPClient.Do(req) + if err != nil { + return err + } + resp.Body.Close() + + // Set default values for when rate limiting is disabled. + limit := rate.Inf + burst := 0 + + if v := resp.Header.Get(headerRateLimit); v != "" { + if rateLimit, _ := strconv.ParseFloat(v, 64); rateLimit > 0 { + // Configure the limit and burst using a split of 2/3 for the limit and + // 1/3 for the burst. This enables clients to burst 1/3 of the allowed + // calls before the limiter kicks in. The remaining calls will then be + // spread out evenly using intervals of time.Second / limit which should + // prevent hitting the rate limit. + limit = rate.Limit(rateLimit * 0.66) + burst = int(rateLimit * 0.33) + } + } + + // Create a new limiter using the calculated values. + c.limiter = rate.NewLimiter(limit, burst) + + return nil +} + +// newRequest creates an API request. A relative URL path can be provided in +// path, in which case it is resolved relative to the apiVersionPath of the +// Client. Relative URL paths should always be specified without a preceding +// slash. +// If v is supplied, the value will be JSONAPI encoded and included as the +// request body. If the method is GET, the value will be parsed and added as +// query parameters. +func (c *Client) newRequest(method, path string, v interface{}) (*retryablehttp.Request, error) { + u, err := c.baseURL.Parse(path) + if err != nil { + return nil, err + } + + // Create a request specific headers map. + reqHeaders := make(http.Header) + reqHeaders.Set("Authorization", "Bearer "+c.token) + + var body interface{} + switch method { + case "GET": + reqHeaders.Set("Accept", "application/vnd.api+json") + + if v != nil { + q, err := query.Values(v) + if err != nil { + return nil, err + } + u.RawQuery = q.Encode() + } + case "DELETE", "PATCH", "POST": + reqHeaders.Set("Accept", "application/vnd.api+json") + reqHeaders.Set("Content-Type", "application/vnd.api+json") + + if v != nil { + buf := bytes.NewBuffer(nil) + if err := jsonapi.MarshalPayloadWithoutIncluded(buf, v); err != nil { + return nil, err + } + body = buf + } + case "PUT": + reqHeaders.Set("Accept", "application/json") + reqHeaders.Set("Content-Type", "application/octet-stream") + body = v + } + + req, err := retryablehttp.NewRequest(method, u.String(), body) + if err != nil { + return nil, err + } + + // Set the default headers. + for k, v := range c.headers { + req.Header[k] = v + } + + // Set the request specific headers. + for k, v := range reqHeaders { + req.Header[k] = v + } + + return req, nil +} + +// do sends an API request and returns the API response. The API response +// is JSONAPI decoded and the document's primary data is stored in the value +// pointed to by v, or returned as an error if an API error has occurred. + +// If v implements the io.Writer interface, the raw response body will be +// written to v, without attempting to first decode it. +// +// The provided ctx must be non-nil. If it is canceled or times out, ctx.Err() +// will be returned. +func (c *Client) do(ctx context.Context, req *retryablehttp.Request, v interface{}) error { + // Wait will block until the limiter can obtain a new token + // or returns an error if the given context is canceled. + if err := c.limiter.Wait(ctx); err != nil { + return err + } + + // Add the context to the request. + req = req.WithContext(ctx) + + // Execute the request and check the response. + resp, err := c.http.Do(req) + if err != nil { + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + select { + case <-ctx.Done(): + return ctx.Err() + default: + return err + } + } + defer resp.Body.Close() + + // Basic response checking. + if err := checkResponseCode(resp); err != nil { + return err + } + + // Return here if decoding the response isn't needed. + if v == nil { + return nil + } + + // If v implements io.Writer, write the raw response body. + if w, ok := v.(io.Writer); ok { + _, err = io.Copy(w, resp.Body) + return err + } + + // Get the value of v so we can test if it's a struct. + dst := reflect.Indirect(reflect.ValueOf(v)) + + // Return an error if v is not a struct or an io.Writer. + if dst.Kind() != reflect.Struct { + return fmt.Errorf("v must be a struct or an io.Writer") + } + + // Try to get the Items and Pagination struct fields. + items := dst.FieldByName("Items") + pagination := dst.FieldByName("Pagination") + + // Unmarshal a single value if v does not contain the + // Items and Pagination struct fields. + if !items.IsValid() || !pagination.IsValid() { + return jsonapi.UnmarshalPayload(resp.Body, v) + } + + // Return an error if v.Items is not a slice. + if items.Type().Kind() != reflect.Slice { + return fmt.Errorf("v.Items must be a slice") + } + + // Create a temporary buffer and copy all the read data into it. + body := bytes.NewBuffer(nil) + reader := io.TeeReader(resp.Body, body) + + // Unmarshal as a list of values as v.Items is a slice. + raw, err := jsonapi.UnmarshalManyPayload(reader, items.Type().Elem()) + if err != nil { + return err + } + + // Make a new slice to hold the results. + sliceType := reflect.SliceOf(items.Type().Elem()) + result := reflect.MakeSlice(sliceType, 0, len(raw)) + + // Add all of the results to the new slice. + for _, v := range raw { + result = reflect.Append(result, reflect.ValueOf(v)) + } + + // Pointer-swap the result. + items.Set(result) + + // As we are getting a list of values, we need to decode + // the pagination details out of the response body. + p, err := parsePagination(body) + if err != nil { + return err + } + + // Pointer-swap the decoded pagination details. + pagination.Set(reflect.ValueOf(p)) + + return nil +} + +// ListOptions is used to specify pagination options when making API requests. +// Pagination allows breaking up large result sets into chunks, or "pages". +type ListOptions struct { + // The page number to request. The results vary based on the PageSize. + PageNumber int `url:"page[number],omitempty"` + + // The number of elements returned in a single page. + PageSize int `url:"page[size],omitempty"` +} + +// Pagination is used to return the pagination details of an API request. +type Pagination struct { + CurrentPage int `json:"current-page"` + PreviousPage int `json:"prev-page"` + NextPage int `json:"next-page"` + TotalPages int `json:"total-pages"` + TotalCount int `json:"total-count"` +} + +func parsePagination(body io.Reader) (*Pagination, error) { + var raw struct { + Meta struct { + Pagination Pagination `json:"pagination"` + } `json:"meta"` + } + + // JSON decode the raw response. + if err := json.NewDecoder(body).Decode(&raw); err != nil { + return &Pagination{}, err + } + + return &raw.Meta.Pagination, nil +} + +// checkResponseCode can be used to check the status code of an HTTP request. +func checkResponseCode(r *http.Response) error { + if r.StatusCode >= 200 && r.StatusCode <= 299 { + return nil + } + + switch r.StatusCode { + case 401: + return ErrUnauthorized + case 404: + return ErrResourceNotFound + case 409: + switch { + case strings.HasSuffix(r.Request.URL.Path, "actions/lock"): + return ErrWorkspaceLocked + case strings.HasSuffix(r.Request.URL.Path, "actions/unlock"): + return ErrWorkspaceNotLocked + case strings.HasSuffix(r.Request.URL.Path, "actions/force-unlock"): + return ErrWorkspaceNotLocked + } + } + + // Decode the error payload. + errPayload := &jsonapi.ErrorsPayload{} + err := json.NewDecoder(r.Body).Decode(errPayload) + if err != nil || len(errPayload.Errors) == 0 { + return fmt.Errorf(r.Status) + } + + // Parse and format the errors. + var errs []string + for _, e := range errPayload.Errors { + if e.Detail == "" { + errs = append(errs, e.Title) + } else { + errs = append(errs, fmt.Sprintf("%s\n\n%s", e.Title, e.Detail)) + } + } + + return fmt.Errorf(strings.Join(errs, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-tfe/type_helpers.go b/vendor/github.com/hashicorp/go-tfe/type_helpers.go new file mode 100644 index 00000000..9296e4e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/type_helpers.go @@ -0,0 +1,56 @@ +package tfe + +// Access returns a pointer to the given team access type. +func Access(v AccessType) *AccessType { + return &v +} + +// AuthPolicy returns a pointer to the given authentication poliy. +func AuthPolicy(v AuthPolicyType) *AuthPolicyType { + return &v +} + +// Bool returns a pointer to the given bool +func Bool(v bool) *bool { + return &v +} + +// Category returns a pointer to the given category type. +func Category(v CategoryType) *CategoryType { + return &v +} + +// EnforcementMode returns a pointer to the given enforcement level. +func EnforcementMode(v EnforcementLevel) *EnforcementLevel { + return &v +} + +// Int returns a pointer to the given int. +func Int(v int) *int { + return &v +} + +// Int64 returns a pointer to the given int64. +func Int64(v int64) *int64 { + return &v +} + +// NotificationDestination returns a pointer to the given notification configuration destination type +func NotificationDestination(v NotificationDestinationType) *NotificationDestinationType { + return &v +} + +// PlanExportType returns a pointer to the given plan export data type. +func PlanExportType(v PlanExportDataType) *PlanExportDataType { + return &v +} + +// ServiceProvider returns a pointer to the given service provider type. +func ServiceProvider(v ServiceProviderType) *ServiceProviderType { + return &v +} + +// String returns a pointer to the given string. +func String(v string) *string { + return &v +} diff --git a/vendor/github.com/hashicorp/go-tfe/user.go b/vendor/github.com/hashicorp/go-tfe/user.go new file mode 100644 index 00000000..f0ca28ee --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/user.go @@ -0,0 +1,93 @@ +package tfe + +import ( + "context" +) + +// Compile-time proof of interface implementation. +var _ Users = (*users)(nil) + +// Users describes all the user related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/user.html +type Users interface { + // ReadCurrent reads the details of the currently authenticated user. + ReadCurrent(ctx context.Context) (*User, error) + + // Update attributes of the currently authenticated user. + Update(ctx context.Context, options UserUpdateOptions) (*User, error) +} + +// users implements Users. +type users struct { + client *Client +} + +// User represents a Terraform Enterprise user. +type User struct { + ID string `jsonapi:"primary,users"` + AvatarURL string `jsonapi:"attr,avatar-url"` + Email string `jsonapi:"attr,email"` + IsServiceAccount bool `jsonapi:"attr,is-service-account"` + TwoFactor *TwoFactor `jsonapi:"attr,two-factor"` + UnconfirmedEmail string `jsonapi:"attr,unconfirmed-email"` + Username string `jsonapi:"attr,username"` + V2Only bool `jsonapi:"attr,v2-only"` + + // Relations + // AuthenticationTokens *AuthenticationTokens `jsonapi:"relation,authentication-tokens"` +} + +// TwoFactor represents the organization permissions. +type TwoFactor struct { + Enabled bool `json:"enabled"` + Verified bool `json:"verified"` +} + +// ReadCurrent reads the details of the currently authenticated user. +func (s *users) ReadCurrent(ctx context.Context) (*User, error) { + req, err := s.client.newRequest("GET", "account/details", nil) + if err != nil { + return nil, err + } + + u := &User{} + err = s.client.do(ctx, req, u) + if err != nil { + return nil, err + } + + return u, nil +} + +// UserUpdateOptions represents the options for updating a user. +type UserUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,users"` + + // New username. + Username *string `jsonapi:"attr,username,omitempty"` + + // New email address (must be consumed afterwards to take effect). + Email *string `jsonapi:"attr,email,omitempty"` +} + +// Update attributes of the currently authenticated user. +func (s *users) Update(ctx context.Context, options UserUpdateOptions) (*User, error) { + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("PATCH", "account/update", &options) + if err != nil { + return nil, err + } + + u := &User{} + err = s.client.do(ctx, req, u) + if err != nil { + return nil, err + } + + return u, nil +} diff --git a/vendor/github.com/hashicorp/go-tfe/validations.go b/vendor/github.com/hashicorp/go-tfe/validations.go new file mode 100644 index 00000000..38d95a68 --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/validations.go @@ -0,0 +1,19 @@ +package tfe + +import ( + "regexp" +) + +// A regular expression used to validate common string ID patterns. +var reStringID = regexp.MustCompile(`^[a-zA-Z0-9\-\._]+$`) + +// validString checks if the given input is present and non-empty. +func validString(v *string) bool { + return v != nil && *v != "" +} + +// validStringID checks if the given string pointer is non-nil and +// contains a typical string identifier. +func validStringID(v *string) bool { + return v != nil && reStringID.MatchString(*v) +} diff --git a/vendor/github.com/hashicorp/go-tfe/variable.go b/vendor/github.com/hashicorp/go-tfe/variable.go new file mode 100644 index 00000000..eb3dbc6e --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/variable.go @@ -0,0 +1,240 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" +) + +// Compile-time proof of interface implementation. +var _ Variables = (*variables)(nil) + +// Variables describes all the variable related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/variables.html +type Variables interface { + // List all the variables associated with the given workspace. + List(ctx context.Context, options VariableListOptions) (*VariableList, error) + + // Create is used to create a new variable. + Create(ctx context.Context, options VariableCreateOptions) (*Variable, error) + + // Read a variable by its ID. + Read(ctx context.Context, variableID string) (*Variable, error) + + // Update values of an existing variable. + Update(ctx context.Context, variableID string, options VariableUpdateOptions) (*Variable, error) + + // Delete a variable by its ID. + Delete(ctx context.Context, variableID string) error +} + +// variables implements Variables. +type variables struct { + client *Client +} + +// CategoryType represents a category type. +type CategoryType string + +//List all available categories. +const ( + CategoryEnv CategoryType = "env" + CategoryTerraform CategoryType = "terraform" +) + +// VariableList represents a list of variables. +type VariableList struct { + *Pagination + Items []*Variable +} + +// Variable represents a Terraform Enterprise variable. +type Variable struct { + ID string `jsonapi:"primary,vars"` + Key string `jsonapi:"attr,key"` + Value string `jsonapi:"attr,value"` + Category CategoryType `jsonapi:"attr,category"` + HCL bool `jsonapi:"attr,hcl"` + Sensitive bool `jsonapi:"attr,sensitive"` + + // Relations + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +// VariableListOptions represents the options for listing variables. +type VariableListOptions struct { + ListOptions + Organization *string `url:"filter[organization][name]"` + Workspace *string `url:"filter[workspace][name]"` +} + +func (o VariableListOptions) valid() error { + if !validString(o.Organization) { + return errors.New("organization is required") + } + if !validString(o.Workspace) { + return errors.New("workspace is required") + } + return nil +} + +// List all the variables associated with the given workspace. +func (s *variables) List(ctx context.Context, options VariableListOptions) (*VariableList, error) { + if err := options.valid(); err != nil { + return nil, err + } + + req, err := s.client.newRequest("GET", "vars", &options) + if err != nil { + return nil, err + } + + vl := &VariableList{} + err = s.client.do(ctx, req, vl) + if err != nil { + return nil, err + } + + return vl, nil +} + +// VariableCreateOptions represents the options for creating a new variable. +type VariableCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,vars"` + + // The name of the variable. + Key *string `jsonapi:"attr,key"` + + // The value of the variable. + Value *string `jsonapi:"attr,value,omitempty"` + + // Whether this is a Terraform or environment variable. + Category *CategoryType `jsonapi:"attr,category"` + + // Whether to evaluate the value of the variable as a string of HCL code. + HCL *bool `jsonapi:"attr,hcl,omitempty"` + + // Whether the value is sensitive. + Sensitive *bool `jsonapi:"attr,sensitive,omitempty"` + + // The workspace that owns the variable. + Workspace *Workspace `jsonapi:"relation,workspace"` +} + +func (o VariableCreateOptions) valid() error { + if !validString(o.Key) { + return errors.New("key is required") + } + if o.Category == nil { + return errors.New("category is required") + } + if o.Workspace == nil { + return errors.New("workspace is required") + } + return nil +} + +// Create is used to create a new variable. +func (s *variables) Create(ctx context.Context, options VariableCreateOptions) (*Variable, error) { + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + req, err := s.client.newRequest("POST", "vars", &options) + if err != nil { + return nil, err + } + + v := &Variable{} + err = s.client.do(ctx, req, v) + if err != nil { + return nil, err + } + + return v, nil +} + +// Read a variable by its ID. +func (s *variables) Read(ctx context.Context, variableID string) (*Variable, error) { + if !validStringID(&variableID) { + return nil, errors.New("invalid value for variable ID") + } + + u := fmt.Sprintf("vars/%s", url.QueryEscape(variableID)) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + v := &Variable{} + err = s.client.do(ctx, req, v) + if err != nil { + return nil, err + } + + return v, err +} + +// VariableUpdateOptions represents the options for updating a variable. +type VariableUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,vars"` + + // The name of the variable. + Key *string `jsonapi:"attr,key,omitempty"` + + // The value of the variable. + Value *string `jsonapi:"attr,value,omitempty"` + + // Whether to evaluate the value of the variable as a string of HCL code. + HCL *bool `jsonapi:"attr,hcl,omitempty"` + + // Whether the value is sensitive. + Sensitive *bool `jsonapi:"attr,sensitive,omitempty"` +} + +// Update values of an existing variable. +func (s *variables) Update(ctx context.Context, variableID string, options VariableUpdateOptions) (*Variable, error) { + if !validStringID(&variableID) { + return nil, errors.New("invalid value for variable ID") + } + + // Make sure we don't send a user provided ID. + options.ID = variableID + + u := fmt.Sprintf("vars/%s", url.QueryEscape(variableID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + v := &Variable{} + err = s.client.do(ctx, req, v) + if err != nil { + return nil, err + } + + return v, nil +} + +// Delete a variable by its ID. +func (s *variables) Delete(ctx context.Context, variableID string) error { + if !validStringID(&variableID) { + return errors.New("invalid value for variable ID") + } + + u := fmt.Sprintf("vars/%s", url.QueryEscape(variableID)) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} diff --git a/vendor/github.com/hashicorp/go-tfe/workspace.go b/vendor/github.com/hashicorp/go-tfe/workspace.go new file mode 100644 index 00000000..cdcd228c --- /dev/null +++ b/vendor/github.com/hashicorp/go-tfe/workspace.go @@ -0,0 +1,544 @@ +package tfe + +import ( + "context" + "errors" + "fmt" + "net/url" + "time" +) + +// Compile-time proof of interface implementation. +var _ Workspaces = (*workspaces)(nil) + +// Workspaces describes all the workspace related methods that the Terraform +// Enterprise API supports. +// +// TFE API docs: https://www.terraform.io/docs/enterprise/api/workspaces.html +type Workspaces interface { + // List all the workspaces within an organization. + List(ctx context.Context, organization string, options WorkspaceListOptions) (*WorkspaceList, error) + + // Create is used to create a new workspace. + Create(ctx context.Context, organization string, options WorkspaceCreateOptions) (*Workspace, error) + + // Read a workspace by its name. + Read(ctx context.Context, organization string, workspace string) (*Workspace, error) + + // Update settings of an existing workspace. + Update(ctx context.Context, organization string, workspace string, options WorkspaceUpdateOptions) (*Workspace, error) + + // Delete a workspace by its name. + Delete(ctx context.Context, organization string, workspace string) error + + // RemoveVCSConnection from a workspace. + RemoveVCSConnection(ctx context.Context, organization, workspace string) (*Workspace, error) + + // Lock a workspace by its ID. + Lock(ctx context.Context, workspaceID string, options WorkspaceLockOptions) (*Workspace, error) + + // Unlock a workspace by its ID. + Unlock(ctx context.Context, workspaceID string) (*Workspace, error) + + // ForceUnlock a workspace by its ID. + ForceUnlock(ctx context.Context, workspaceID string) (*Workspace, error) + + // AssignSSHKey to a workspace. + AssignSSHKey(ctx context.Context, workspaceID string, options WorkspaceAssignSSHKeyOptions) (*Workspace, error) + + // UnassignSSHKey from a workspace. + UnassignSSHKey(ctx context.Context, workspaceID string) (*Workspace, error) +} + +// workspaces implements Workspaces. +type workspaces struct { + client *Client +} + +// WorkspaceList represents a list of workspaces. +type WorkspaceList struct { + *Pagination + Items []*Workspace +} + +// Workspace represents a Terraform Enterprise workspace. +type Workspace struct { + ID string `jsonapi:"primary,workspaces"` + Actions *WorkspaceActions `jsonapi:"attr,actions"` + AutoApply bool `jsonapi:"attr,auto-apply"` + CanQueueDestroyPlan bool `jsonapi:"attr,can-queue-destroy-plan"` + CreatedAt time.Time `jsonapi:"attr,created-at,iso8601"` + Environment string `jsonapi:"attr,environment"` + FileTriggersEnabled bool `jsonapi:"attr,file-triggers-enabled"` + Locked bool `jsonapi:"attr,locked"` + MigrationEnvironment string `jsonapi:"attr,migration-environment"` + Name string `jsonapi:"attr,name"` + Operations bool `jsonapi:"attr,operations"` + Permissions *WorkspacePermissions `jsonapi:"attr,permissions"` + QueueAllRuns bool `jsonapi:"attr,queue-all-runs"` + TerraformVersion string `jsonapi:"attr,terraform-version"` + TriggerPrefixes []string `jsonapi:"attr,trigger-prefixes"` + VCSRepo *VCSRepo `jsonapi:"attr,vcs-repo"` + WorkingDirectory string `jsonapi:"attr,working-directory"` + + // Relations + CurrentRun *Run `jsonapi:"relation,current-run"` + Organization *Organization `jsonapi:"relation,organization"` + SSHKey *SSHKey `jsonapi:"relation,ssh-key"` +} + +// VCSRepo contains the configuration of a VCS integration. +type VCSRepo struct { + Branch string `json:"branch"` + Identifier string `json:"identifier"` + IngressSubmodules bool `json:"ingress-submodules"` + OAuthTokenID string `json:"oauth-token-id"` +} + +// WorkspaceActions represents the workspace actions. +type WorkspaceActions struct { + IsDestroyable bool `json:"is-destroyable"` +} + +// WorkspacePermissions represents the workspace permissions. +type WorkspacePermissions struct { + CanDestroy bool `json:"can-destroy"` + CanForceUnlock bool `json:"can-force-unlock"` + CanLock bool `json:"can-lock"` + CanQueueApply bool `json:"can-queue-apply"` + CanQueueDestroy bool `json:"can-queue-destroy"` + CanQueueRun bool `json:"can-queue-run"` + CanReadSettings bool `json:"can-read-settings"` + CanUnlock bool `json:"can-unlock"` + CanUpdate bool `json:"can-update"` + CanUpdateVariable bool `json:"can-update-variable"` +} + +// WorkspaceListOptions represents the options for listing workspaces. +type WorkspaceListOptions struct { + ListOptions + + // A search string (partial workspace name) used to filter the results. + Search *string `url:"search[name],omitempty"` +} + +// List all the workspaces within an organization. +func (s *workspaces) List(ctx context.Context, organization string, options WorkspaceListOptions) (*WorkspaceList, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + + u := fmt.Sprintf("organizations/%s/workspaces", url.QueryEscape(organization)) + req, err := s.client.newRequest("GET", u, &options) + if err != nil { + return nil, err + } + + wl := &WorkspaceList{} + err = s.client.do(ctx, req, wl) + if err != nil { + return nil, err + } + + return wl, nil +} + +// WorkspaceCreateOptions represents the options for creating a new workspace. +type WorkspaceCreateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,workspaces"` + + // Whether to automatically apply changes when a Terraform plan is successful. + AutoApply *bool `jsonapi:"attr,auto-apply,omitempty"` + + // Whether to filter runs based on the changed files in a VCS push. If + // enabled, the working directory and trigger prefixes describe a set of + // paths which must contain changes for a VCS push to trigger a run. If + // disabled, any push will trigger a run. + FileTriggersEnabled *bool `jsonapi:"attr,file-triggers-enabled,omitempty"` + + // The legacy TFE environment to use as the source of the migration, in the + // form organization/environment. Omit this unless you are migrating a legacy + // environment. + MigrationEnvironment *string `jsonapi:"attr,migration-environment,omitempty"` + + // The name of the workspace, which can only include letters, numbers, -, + // and _. This will be used as an identifier and must be unique in the + // organization. + Name *string `jsonapi:"attr,name"` + + // Whether to queue all runs. Unless this is set to true, runs triggered by + // a webhook will not be queued until at least one run is manually queued. + QueueAllRuns *bool `jsonapi:"attr,queue-all-runs,omitempty"` + + // The version of Terraform to use for this workspace. Upon creating a + // workspace, the latest version is selected unless otherwise specified. + TerraformVersion *string `jsonapi:"attr,terraform-version,omitempty"` + + // List of repository-root-relative paths which list all locations to be + // tracked for changes. See FileTriggersEnabled above for more details. + TriggerPrefixes []string `jsonapi:"attr,trigger-prefixes,omitempty"` + + // Settings for the workspace's VCS repository. If omitted, the workspace is + // created without a VCS repo. If included, you must specify at least the + // oauth-token-id and identifier keys below. + VCSRepo *VCSRepoOptions `jsonapi:"attr,vcs-repo,omitempty"` + + // A relative path that Terraform will execute within. This defaults to the + // root of your repository and is typically set to a subdirectory matching the + // environment when multiple environments exist within the same repository. + WorkingDirectory *string `jsonapi:"attr,working-directory,omitempty"` +} + +// VCSRepoOptions represents the configuration options of a VCS integration. +type VCSRepoOptions struct { + Branch *string `json:"branch,omitempty"` + Identifier *string `json:"identifier,omitempty"` + IngressSubmodules *bool `json:"ingress-submodules,omitempty"` + OAuthTokenID *string `json:"oauth-token-id,omitempty"` +} + +func (o WorkspaceCreateOptions) valid() error { + if !validString(o.Name) { + return errors.New("name is required") + } + if !validStringID(o.Name) { + return errors.New("invalid value for name") + } + return nil +} + +// Create is used to create a new workspace. +func (s *workspaces) Create(ctx context.Context, organization string, options WorkspaceCreateOptions) (*Workspace, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("organizations/%s/workspaces", url.QueryEscape(organization)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// Read a workspace by its name. +func (s *workspaces) Read(ctx context.Context, organization, workspace string) (*Workspace, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if !validStringID(&workspace) { + return nil, errors.New("invalid value for workspace") + } + + u := fmt.Sprintf( + "organizations/%s/workspaces/%s", + url.QueryEscape(organization), + url.QueryEscape(workspace), + ) + req, err := s.client.newRequest("GET", u, nil) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// WorkspaceUpdateOptions represents the options for updating a workspace. +type WorkspaceUpdateOptions struct { + // For internal use only! + ID string `jsonapi:"primary,workspaces"` + + // Whether to automatically apply changes when a Terraform plan is successful. + AutoApply *bool `jsonapi:"attr,auto-apply,omitempty"` + + // A new name for the workspace, which can only include letters, numbers, -, + // and _. This will be used as an identifier and must be unique in the + // organization. Warning: Changing a workspace's name changes its URL in the + // API and UI. + Name *string `jsonapi:"attr,name,omitempty"` + + // Whether to filter runs based on the changed files in a VCS push. If + // enabled, the working directory and trigger prefixes describe a set of + // paths which must contain changes for a VCS push to trigger a run. If + // disabled, any push will trigger a run. + FileTriggersEnabled *bool `jsonapi:"attr,file-triggers-enabled,omitempty"` + + // Whether to queue all runs. Unless this is set to true, runs triggered by + // a webhook will not be queued until at least one run is manually queued. + QueueAllRuns *bool `jsonapi:"attr,queue-all-runs,omitempty"` + + // The version of Terraform to use for this workspace. + TerraformVersion *string `jsonapi:"attr,terraform-version,omitempty"` + + // List of repository-root-relative paths which list all locations to be + // tracked for changes. See FileTriggersEnabled above for more details. + TriggerPrefixes []string `jsonapi:"attr,trigger-prefixes,omitempty"` + + // To delete a workspace's existing VCS repo, specify null instead of an + // object. To modify a workspace's existing VCS repo, include whichever of + // the keys below you wish to modify. To add a new VCS repo to a workspace + // that didn't previously have one, include at least the oauth-token-id and + // identifier keys. + VCSRepo *VCSRepoOptions `jsonapi:"attr,vcs-repo,omitempty"` + + // A relative path that Terraform will execute within. This defaults to the + // root of your repository and is typically set to a subdirectory matching + // the environment when multiple environments exist within the same + // repository. + WorkingDirectory *string `jsonapi:"attr,working-directory,omitempty"` +} + +// Update settings of an existing workspace. +func (s *workspaces) Update(ctx context.Context, organization, workspace string, options WorkspaceUpdateOptions) (*Workspace, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if !validStringID(&workspace) { + return nil, errors.New("invalid value for workspace") + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf( + "organizations/%s/workspaces/%s", + url.QueryEscape(organization), + url.QueryEscape(workspace), + ) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// Delete a workspace by its name. +func (s *workspaces) Delete(ctx context.Context, organization, workspace string) error { + if !validStringID(&organization) { + return errors.New("invalid value for organization") + } + if !validStringID(&workspace) { + return errors.New("invalid value for workspace") + } + + u := fmt.Sprintf( + "organizations/%s/workspaces/%s", + url.QueryEscape(organization), + url.QueryEscape(workspace), + ) + req, err := s.client.newRequest("DELETE", u, nil) + if err != nil { + return err + } + + return s.client.do(ctx, req, nil) +} + +// workspaceRemoveVCSConnectionOptions +type workspaceRemoveVCSConnectionOptions struct { + ID string `jsonapi:"primary,workspaces"` + VCSRepo *VCSRepoOptions `jsonapi:"attr,vcs-repo"` +} + +// RemoveVCSConnection from a workspace. +func (s *workspaces) RemoveVCSConnection(ctx context.Context, organization, workspace string) (*Workspace, error) { + if !validStringID(&organization) { + return nil, errors.New("invalid value for organization") + } + if !validStringID(&workspace) { + return nil, errors.New("invalid value for workspace") + } + + u := fmt.Sprintf( + "organizations/%s/workspaces/%s", + url.QueryEscape(organization), + url.QueryEscape(workspace), + ) + + req, err := s.client.newRequest("PATCH", u, &workspaceRemoveVCSConnectionOptions{}) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// WorkspaceLockOptions represents the options for locking a workspace. +type WorkspaceLockOptions struct { + // Specifies the reason for locking the workspace. + Reason *string `json:"reason,omitempty"` +} + +// Lock a workspace by its ID. +func (s *workspaces) Lock(ctx context.Context, workspaceID string, options WorkspaceLockOptions) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/actions/lock", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, &options) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// Unlock a workspace by its ID. +func (s *workspaces) Unlock(ctx context.Context, workspaceID string) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/actions/unlock", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// ForceUnlock a workspace by its ID. +func (s *workspaces) ForceUnlock(ctx context.Context, workspaceID string) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/actions/force-unlock", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("POST", u, nil) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// WorkspaceAssignSSHKeyOptions represents the options to assign an SSH key to +// a workspace. +type WorkspaceAssignSSHKeyOptions struct { + // For internal use only! + ID string `jsonapi:"primary,workspaces"` + + // The SSH key ID to assign. + SSHKeyID *string `jsonapi:"attr,id"` +} + +func (o WorkspaceAssignSSHKeyOptions) valid() error { + if !validString(o.SSHKeyID) { + return errors.New("SSH key ID is required") + } + if !validStringID(o.SSHKeyID) { + return errors.New("invalid value for SSH key ID") + } + return nil +} + +// AssignSSHKey to a workspace. +func (s *workspaces) AssignSSHKey(ctx context.Context, workspaceID string, options WorkspaceAssignSSHKeyOptions) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + if err := options.valid(); err != nil { + return nil, err + } + + // Make sure we don't send a user provided ID. + options.ID = "" + + u := fmt.Sprintf("workspaces/%s/relationships/ssh-key", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("PATCH", u, &options) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} + +// workspaceUnassignSSHKeyOptions represents the options to unassign an SSH key +// to a workspace. +type workspaceUnassignSSHKeyOptions struct { + // For internal use only! + ID string `jsonapi:"primary,workspaces"` + + // Must be nil to unset the currently assigned SSH key. + SSHKeyID *string `jsonapi:"attr,id"` +} + +// UnassignSSHKey from a workspace. +func (s *workspaces) UnassignSSHKey(ctx context.Context, workspaceID string) (*Workspace, error) { + if !validStringID(&workspaceID) { + return nil, errors.New("invalid value for workspace ID") + } + + u := fmt.Sprintf("workspaces/%s/relationships/ssh-key", url.QueryEscape(workspaceID)) + req, err := s.client.newRequest("PATCH", u, &workspaceUnassignSSHKeyOptions{}) + if err != nil { + return nil, err + } + + w := &Workspace{} + err = s.client.do(ctx, req, w) + if err != nil { + return nil, err + } + + return w, nil +} diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md index 21fdda4a..fbde8b9a 100644 --- a/vendor/github.com/hashicorp/go-uuid/README.md +++ b/vendor/github.com/hashicorp/go-uuid/README.md @@ -1,6 +1,6 @@ -# uuid +# uuid [![Build Status](https://travis-ci.org/hashicorp/go-uuid.svg?branch=master)](https://travis-ci.org/hashicorp/go-uuid) -Generates UUID-format strings using purely high quality random bytes. +Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes. Documentation ============= diff --git a/vendor/github.com/hashicorp/go-uuid/go.mod b/vendor/github.com/hashicorp/go-uuid/go.mod new file mode 100644 index 00000000..dd57f9d2 --- /dev/null +++ b/vendor/github.com/hashicorp/go-uuid/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-uuid diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go index 322b522c..911227f6 100644 --- a/vendor/github.com/hashicorp/go-uuid/uuid.go +++ b/vendor/github.com/hashicorp/go-uuid/uuid.go @@ -6,22 +6,32 @@ import ( "fmt" ) -// GenerateUUID is used to generate a random UUID -func GenerateUUID() (string, error) { - buf := make([]byte, 16) +// GenerateRandomBytes is used to generate random bytes of given size. +func GenerateRandomBytes(size int) ([]byte, error) { + buf := make([]byte, size) if _, err := rand.Read(buf); err != nil { - return "", fmt.Errorf("failed to read random bytes: %v", err) + return nil, fmt.Errorf("failed to read random bytes: %v", err) } + return buf, nil +} +const uuidLen = 16 + +// GenerateUUID is used to generate a random UUID +func GenerateUUID() (string, error) { + buf, err := GenerateRandomBytes(uuidLen) + if err != nil { + return "", err + } return FormatUUID(buf) } func FormatUUID(buf []byte) (string, error) { - if len(buf) != 16 { - return "", fmt.Errorf("wrong length byte slice (%d)", len(buf)) + if buflen := len(buf); buflen != uuidLen { + return "", fmt.Errorf("wrong length byte slice (%d)", buflen) } - return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", + return fmt.Sprintf("%x-%x-%x-%x-%x", buf[0:4], buf[4:6], buf[6:8], @@ -30,16 +40,14 @@ func FormatUUID(buf []byte) (string, error) { } func ParseUUID(uuid string) ([]byte, error) { - if len(uuid) != 36 { + if len(uuid) != 2 * uuidLen + 4 { return nil, fmt.Errorf("uuid string is wrong length") } - hyph := []byte("-") - - if uuid[8] != hyph[0] || - uuid[13] != hyph[0] || - uuid[18] != hyph[0] || - uuid[23] != hyph[0] { + if uuid[8] != '-' || + uuid[13] != '-' || + uuid[18] != '-' || + uuid[23] != '-' { return nil, fmt.Errorf("uuid is improperly formatted") } @@ -49,7 +57,7 @@ func ParseUUID(uuid string) ([]byte, error) { if err != nil { return nil, err } - if len(ret) != 16 { + if len(ret) != uuidLen { return nil, fmt.Errorf("decoded hex is the wrong length") } diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go index 8c73df06..d0557596 100644 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -2,6 +2,7 @@ package version import ( "fmt" + "reflect" "regexp" "strings" ) @@ -113,6 +114,26 @@ func parseSingle(v string) (*Constraint, error) { }, nil } +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + //------------------------------------------------------------------- // Constraint functions //------------------------------------------------------------------- @@ -126,22 +147,27 @@ func constraintNotEqual(v, c *Version) bool { } func constraintGreaterThan(v, c *Version) bool { - return v.Compare(c) == 1 + return prereleaseCheck(v, c) && v.Compare(c) == 1 } func constraintLessThan(v, c *Version) bool { - return v.Compare(c) == -1 + return prereleaseCheck(v, c) && v.Compare(c) == -1 } func constraintGreaterThanEqual(v, c *Version) bool { - return v.Compare(c) >= 0 + return prereleaseCheck(v, c) && v.Compare(c) >= 0 } func constraintLessThanEqual(v, c *Version) bool { - return v.Compare(c) <= 0 + return prereleaseCheck(v, c) && v.Compare(c) <= 0 } func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + // If the version being checked is naturally less than the constraint, then there // is no way for the version to be valid against the constraint if v.LessThan(c) { diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod new file mode 100644 index 00000000..f5285555 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go index ae2f6b63..30454e1b 100644 --- a/vendor/github.com/hashicorp/go-version/version.go +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -10,14 +10,25 @@ import ( ) // The compiled regular expression used to test the validity of a version. -var versionRegexp *regexp.Regexp +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) // The raw regular expression string used for testing the validity // of a version. -const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + - `?` +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) // Version represents a single version. type Version struct { @@ -25,16 +36,29 @@ type Version struct { pre string segments []int64 si int + original string } func init() { versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") } // NewVersion parses the given version and returns a new // Version. func NewVersion(v string) (*Version, error) { - matches := versionRegexp.FindStringSubmatch(v) + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) if matches == nil { return nil, fmt.Errorf("Malformed version: %s", v) } @@ -59,11 +83,17 @@ func NewVersion(v string) (*Version, error) { segments = append(segments, 0) } + pre := matches[7] + if pre == "" { + pre = matches[4] + } + return &Version{ - metadata: matches[7], - pre: matches[4], + metadata: matches[10], + pre: pre, segments: segments, si: si, + original: v, }, nil } @@ -82,7 +112,7 @@ func Must(v *Version, err error) *Version { // or larger than the other version, respectively. // // If you want boolean results, use the LessThan, Equal, -// or GreaterThan methods. +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. func (v *Version) Compare(other *Version) int { // A quick, efficient equality check if v.String() == other.String() { @@ -166,24 +196,42 @@ func comparePart(preSelf string, preOther string) int { return 0 } + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + // if a part is empty, we use the other to decide if preSelf == "" { - _, notIsNumeric := strconv.ParseInt(preOther, 10, 64) - if notIsNumeric == nil { + if otherNumeric { return -1 } return 1 } if preOther == "" { - _, notIsNumeric := strconv.ParseInt(preSelf, 10, 64) - if notIsNumeric == nil { + if selfNumeric { return 1 } return -1 } - if preSelf > preOther { + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { return 1 } @@ -240,11 +288,21 @@ func (v *Version) GreaterThan(o *Version) bool { return v.Compare(o) > 0 } +// GreaterThanOrEqual tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + // LessThan tests if this version is less than another version. func (v *Version) LessThan(o *Version) bool { return v.Compare(o) < 0 } +// LessThanOrEqual tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + // Metadata returns any metadata that was part of the version // string. // @@ -283,11 +341,19 @@ func (v *Version) Segments() []int { // for a version "1.2.3-beta", segments will return a slice of // 1, 2, 3. func (v *Version) Segments64() []int64 { - return v.segments + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result } // String returns the full version string included pre-release // and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. func (v *Version) String() string { var buf bytes.Buffer fmtParts := make([]string, len(v.segments)) @@ -306,3 +372,9 @@ func (v *Version) String() string { return buf.String() } + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 00000000..be2cc4df --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 00000000..ff2f6388 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,164 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 00000000..ab85d3db --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() +} diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml index 3c8cdf8e..4db0b711 100644 --- a/vendor/github.com/hashicorp/hcl/appveyor.yml +++ b/vendor/github.com/hashicorp/hcl/appveyor.yml @@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\hashicorp\hcl environment: GOPATH: c:\gopath init: - - git config --global core.autocrlf true + - git config --global core.autocrlf false install: - cmd: >- echo %Path% diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go index 0b39c1b9..6ed2fb9b 100644 --- a/vendor/github.com/hashicorp/hcl/decoder.go +++ b/vendor/github.com/hashicorp/hcl/decoder.go @@ -89,7 +89,7 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error switch k.Kind() { case reflect.Bool: return d.decodeBool(name, node, result) - case reflect.Float64: + case reflect.Float32, reflect.Float64: return d.decodeFloat(name, node, result) case reflect.Int, reflect.Int32, reflect.Int64: return d.decodeInt(name, node, result) @@ -117,10 +117,17 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: - if n.Token.Type == token.BOOL { - v, err := strconv.ParseBool(n.Token.Text) - if err != nil { - return err + switch n.Token.Type { + case token.BOOL, token.STRING, token.NUMBER: + var v bool + s := strings.ToLower(strings.Replace(n.Token.Text, "\"", "", -1)) + switch s { + case "1", "true": + v = true + case "0", "false": + v = false + default: + return fmt.Errorf("decodeBool: Unknown value for boolean: %s", n.Token.Text) } result.Set(reflect.ValueOf(v)) @@ -137,13 +144,13 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error { switch n := node.(type) { case *ast.LiteralType: - if n.Token.Type == token.FLOAT { + if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER { v, err := strconv.ParseFloat(n.Token.Text, 64) if err != nil { return err } - result.Set(reflect.ValueOf(v)) + result.Set(reflect.ValueOf(v).Convert(result.Type())) return nil } } @@ -397,6 +404,11 @@ func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) er } func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error { + // if pointer is not nil, decode into existing value + if !result.IsNil() { + return d.decode(name, node, result.Elem()) + } + // Create an element of the concrete (non pointer) type and decode // into that. Then set the value of the pointer to this type. resultType := result.Type() @@ -505,7 +517,7 @@ func expandObject(node ast.Node, result reflect.Value) ast.Node { // we need to un-flatten the ast enough to decode newNode := &ast.ObjectItem{ Keys: []*ast.ObjectKey{ - &ast.ObjectKey{ + { Token: keyToken, }, }, @@ -523,7 +535,7 @@ func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) switch n := node.(type) { case *ast.LiteralType: switch n.Token.Type { - case token.NUMBER: + case token.NUMBER, token.FLOAT, token.BOOL: result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type())) return nil case token.STRING, token.HEREDOC: @@ -573,7 +585,11 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) // Compile the list of all the fields that we're going to be decoding // from all the structs. - fields := make(map[*reflect.StructField]reflect.Value) + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} for len(structs) > 0 { structVal := structs[0] structs = structs[1:] @@ -616,34 +632,44 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) } // Normal struct field, store it away - fields[&fieldType] = structVal.Field(i) + fields = append(fields, field{fieldType, structVal.Field(i)}) } } - usedKeys := make(map[string]struct{}) decodedFields := make([]string, 0, len(fields)) decodedFieldsVal := make([]reflect.Value, 0) unusedKeysVal := make([]reflect.Value, 0) - for fieldType, field := range fields { - if !field.IsValid() { + + // fill unusedNodeKeys with keys from the AST + // a slice because we have to do equals case fold to match Filter + unusedNodeKeys := make([]string, 0) + for _, item := range list.Items { + for _, k := range item.Keys { + unusedNodeKeys = append(unusedNodeKeys, k.Token.Value().(string)) + } + } + + for _, f := range fields { + field, fieldValue := f.field, f.val + if !fieldValue.IsValid() { // This should never happen panic("field is not valid") } // If we can't set the field, then it is unexported or something, // and we just continue onwards. - if !field.CanSet() { + if !fieldValue.CanSet() { continue } - fieldName := fieldType.Name + fieldName := field.Name - tagValue := fieldType.Tag.Get(tagName) + tagValue := field.Tag.Get(tagName) tagParts := strings.SplitN(tagValue, ",", 2) if len(tagParts) >= 2 { switch tagParts[1] { case "decodedFields": - decodedFieldsVal = append(decodedFieldsVal, field) + decodedFieldsVal = append(decodedFieldsVal, fieldValue) continue case "key": if item == nil { @@ -654,10 +680,10 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) } } - field.SetString(item.Keys[0].Token.Value().(string)) + fieldValue.SetString(item.Keys[0].Token.Value().(string)) continue case "unusedKeys": - unusedKeysVal = append(unusedKeysVal, field) + unusedKeysVal = append(unusedKeysVal, fieldValue) continue } } @@ -677,14 +703,14 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) continue } - // Track the used key - usedKeys[fieldName] = struct{}{} + // Track the used keys + unusedNodeKeys = removeCaseFold(unusedNodeKeys, fieldName) // Create the field name and decode. We range over the elements // because we actually want the value. fieldName = fmt.Sprintf("%s.%s", name, fieldName) if len(prefixMatches.Items) > 0 { - if err := d.decode(fieldName, prefixMatches, field); err != nil { + if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil { return err } } @@ -694,12 +720,12 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) decodeNode = &ast.ObjectList{Items: ot.List.Items} } - if err := d.decode(fieldName, decodeNode, field); err != nil { + if err := d.decode(fieldName, decodeNode, fieldValue); err != nil { return err } } - decodedFields = append(decodedFields, fieldType.Name) + decodedFields = append(decodedFields, field.Name) } if len(decodedFieldsVal) > 0 { @@ -711,6 +737,14 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) } } + if len(unusedNodeKeys) > 0 { + // like decodedFields, populated the unusedKeys field(s) + sort.Strings(unusedNodeKeys) + for _, v := range unusedKeysVal { + v.Set(reflect.ValueOf(unusedNodeKeys)) + } + } + return nil } @@ -722,3 +756,12 @@ func findNodeType() reflect.Type { value := reflect.ValueOf(nodeContainer).FieldByName("Node") return value.Type() } + +func removeCaseFold(xs []string, y string) []string { + for i, x := range xs { + if strings.EqualFold(x, y) { + return append(xs[:i], xs[i+1:]...) + } + } + return xs +} diff --git a/vendor/github.com/hashicorp/hcl/go.mod b/vendor/github.com/hashicorp/hcl/go.mod new file mode 100644 index 00000000..4debbbe3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/go.mod @@ -0,0 +1,3 @@ +module github.com/hashicorp/hcl + +require github.com/davecgh/go-spew v1.1.1 diff --git a/vendor/github.com/hashicorp/hcl/go.sum b/vendor/github.com/hashicorp/hcl/go.sum new file mode 100644 index 00000000..b5e2922e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/go.sum @@ -0,0 +1,2 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go new file mode 100644 index 00000000..2380d71e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/fmtcmd/fmtcmd.go @@ -0,0 +1,162 @@ +// Derivative work from: +// - https://golang.org/src/cmd/gofmt/gofmt.go +// - https://github.com/fatih/hclfmt + +package fmtcmd + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl/hcl/printer" +) + +var ( + ErrWriteStdin = errors.New("cannot use write option with standard input") +) + +type Options struct { + List bool // list files whose formatting differs + Write bool // write result to (source) file instead of stdout + Diff bool // display diffs of formatting changes +} + +func isValidFile(f os.FileInfo, extensions []string) bool { + if !f.IsDir() && !strings.HasPrefix(f.Name(), ".") { + for _, ext := range extensions { + if strings.HasSuffix(f.Name(), "."+ext) { + return true + } + } + } + + return false +} + +// If in == nil, the source is the contents of the file with the given filename. +func processFile(filename string, in io.Reader, out io.Writer, stdin bool, opts Options) error { + if in == nil { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + in = f + } + + src, err := ioutil.ReadAll(in) + if err != nil { + return err + } + + res, err := printer.Format(src) + if err != nil { + return fmt.Errorf("In %s: %s", filename, err) + } + + if !bytes.Equal(src, res) { + // formatting has changed + if opts.List { + fmt.Fprintln(out, filename) + } + if opts.Write { + err = ioutil.WriteFile(filename, res, 0644) + if err != nil { + return err + } + } + if opts.Diff { + data, err := diff(src, res) + if err != nil { + return fmt.Errorf("computing diff: %s", err) + } + fmt.Fprintf(out, "diff a/%s b/%s\n", filename, filename) + out.Write(data) + } + } + + if !opts.List && !opts.Write && !opts.Diff { + _, err = out.Write(res) + } + + return err +} + +func walkDir(path string, extensions []string, stdout io.Writer, opts Options) error { + visitFile := func(path string, f os.FileInfo, err error) error { + if err == nil && isValidFile(f, extensions) { + err = processFile(path, nil, stdout, false, opts) + } + return err + } + + return filepath.Walk(path, visitFile) +} + +func Run( + paths, extensions []string, + stdin io.Reader, + stdout io.Writer, + opts Options, +) error { + if len(paths) == 0 { + if opts.Write { + return ErrWriteStdin + } + if err := processFile("", stdin, stdout, true, opts); err != nil { + return err + } + return nil + } + + for _, path := range paths { + switch dir, err := os.Stat(path); { + case err != nil: + return err + case dir.IsDir(): + if err := walkDir(path, extensions, stdout, opts); err != nil { + return err + } + default: + if err := processFile(path, nil, stdout, false, opts); err != nil { + return err + } + } + } + + return nil +} + +func diff(b1, b2 []byte) (data []byte, err error) { + f1, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go index 6e54bed9..64c83bcf 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -3,6 +3,7 @@ package parser import ( + "bytes" "errors" "fmt" "strings" @@ -36,6 +37,11 @@ func newParser(src []byte) *Parser { // Parse returns the fully parsed source and returns the abstract syntax tree. func Parse(src []byte) (*ast.File, error) { + // normalize all line endings + // since the scanner and output only work with "\n" line endings, we may + // end up with dangling "\r" characters in the parsed data. + src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1) + p := newParser(src) return p.Parse() } @@ -191,9 +197,18 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) { keyStr = append(keyStr, k.Token.Text) } - return nil, fmt.Errorf( - "key '%s' expected start of object ('{') or assignment ('=')", - strings.Join(keyStr, " ")) + return nil, &PosError{ + Pos: p.tok.Pos, + Err: fmt.Errorf( + "key '%s' expected start of object ('{') or assignment ('=')", + strings.Join(keyStr, " ")), + } + } + + // key=#comment + // val + if p.lineComment != nil { + o.LineComment, p.lineComment = p.lineComment, nil } // do a look-ahead for line comment @@ -313,7 +328,10 @@ func (p *Parser) objectType() (*ast.ObjectType, error) { // No error, scan and expect the ending to be a brace if tok := p.scan(); tok.Type != token.RBRACE { - return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type) + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type), + } } o.List = l diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go new file mode 100644 index 00000000..7c038d12 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go @@ -0,0 +1,789 @@ +package printer + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/token" +) + +const ( + blank = byte(' ') + newline = byte('\n') + tab = byte('\t') + infinity = 1 << 30 // offset or line +) + +var ( + unindent = []byte("\uE123") // in the private use space +) + +type printer struct { + cfg Config + prev token.Pos + + comments []*ast.CommentGroup // may be nil, contains all comments + standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node) + + enableTrace bool + indentTrace int +} + +type ByPosition []*ast.CommentGroup + +func (b ByPosition) Len() int { return len(b) } +func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) } + +// collectComments comments all standalone comments which are not lead or line +// comment +func (p *printer) collectComments(node ast.Node) { + // first collect all comments. This is already stored in + // ast.File.(comments) + ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { + switch t := nn.(type) { + case *ast.File: + p.comments = t.Comments + return nn, false + } + return nn, true + }) + + standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0) + for _, c := range p.comments { + standaloneComments[c.Pos()] = c + } + + // next remove all lead and line comments from the overall comment map. + // This will give us comments which are standalone, comments which are not + // assigned to any kind of node. + ast.Walk(node, func(nn ast.Node) (ast.Node, bool) { + switch t := nn.(type) { + case *ast.LiteralType: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + + if t.LineComment != nil { + for _, comment := range t.LineComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + case *ast.ObjectItem: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + + if t.LineComment != nil { + for _, comment := range t.LineComment.List { + if _, ok := standaloneComments[comment.Pos()]; ok { + delete(standaloneComments, comment.Pos()) + } + } + } + } + + return nn, true + }) + + for _, c := range standaloneComments { + p.standaloneComments = append(p.standaloneComments, c) + } + + sort.Sort(ByPosition(p.standaloneComments)) +} + +// output prints creates b printable HCL output and returns it. +func (p *printer) output(n interface{}) []byte { + var buf bytes.Buffer + + switch t := n.(type) { + case *ast.File: + // File doesn't trace so we add the tracing here + defer un(trace(p, "File")) + return p.output(t.Node) + case *ast.ObjectList: + defer un(trace(p, "ObjectList")) + + var index int + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is at "infinity" + var nextItem token.Pos + if index != len(t.Items) { + nextItem = t.Items[index].Pos() + } else { + nextItem = token.Pos{Offset: infinity, Line: infinity} + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + // Go through all the comments in the group. The group + // should be printed together, not separated by double newlines. + printed := false + newlinePrinted := false + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. + if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { + // if we hit the end add newlines so we can print the comment + // we don't do this if prev is invalid which means the + // beginning of the file since the first comment should + // be at the first line. + if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) { + buf.Write([]byte{newline, newline}) + newlinePrinted = true + } + + // Write the actual comment. + buf.WriteString(comment.Text) + buf.WriteByte(newline) + + // Set printed to true to note that we printed something + printed = true + } + } + + // If we're not at the last item, write a new line so + // that there is a newline separating this comment from + // the next object. + if printed && index != len(t.Items) { + buf.WriteByte(newline) + } + } + + if index == len(t.Items) { + break + } + + buf.Write(p.output(t.Items[index])) + if index != len(t.Items)-1 { + // Always write a newline to separate us from the next item + buf.WriteByte(newline) + + // Need to determine if we're going to separate the next item + // with a blank line. The logic here is simple, though there + // are a few conditions: + // + // 1. The next object is more than one line away anyways, + // so we need an empty line. + // + // 2. The next object is not a "single line" object, so + // we need an empty line. + // + // 3. This current object is not a single line object, + // so we need an empty line. + current := t.Items[index] + next := t.Items[index+1] + if next.Pos().Line != t.Items[index].Pos().Line+1 || + !p.isSingleLineObject(next) || + !p.isSingleLineObject(current) { + buf.WriteByte(newline) + } + } + index++ + } + case *ast.ObjectKey: + buf.WriteString(t.Token.Text) + case *ast.ObjectItem: + p.prev = t.Pos() + buf.Write(p.objectItem(t)) + case *ast.LiteralType: + buf.Write(p.literalType(t)) + case *ast.ListType: + buf.Write(p.list(t)) + case *ast.ObjectType: + buf.Write(p.objectType(t)) + default: + fmt.Printf(" unknown type: %T\n", n) + } + + return buf.Bytes() +} + +func (p *printer) literalType(lit *ast.LiteralType) []byte { + result := []byte(lit.Token.Text) + switch lit.Token.Type { + case token.HEREDOC: + // Clear the trailing newline from heredocs + if result[len(result)-1] == '\n' { + result = result[:len(result)-1] + } + + // Poison lines 2+ so that we don't indent them + result = p.heredocIndent(result) + case token.STRING: + // If this is a multiline string, poison lines 2+ so we don't + // indent them. + if bytes.IndexRune(result, '\n') >= 0 { + result = p.heredocIndent(result) + } + } + + return result +} + +// objectItem returns the printable HCL form of an object item. An object type +// starts with one/multiple keys and has a value. The value might be of any +// type. +func (p *printer) objectItem(o *ast.ObjectItem) []byte { + defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text))) + var buf bytes.Buffer + + if o.LeadComment != nil { + for _, comment := range o.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + // If key and val are on different lines, treat line comments like lead comments. + if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line { + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range o.Keys { + buf.WriteString(k.Token.Text) + buf.WriteByte(blank) + + // reach end of key + if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + buf.Write(p.output(o.Val)) + + if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line { + buf.WriteByte(blank) + for _, comment := range o.LineComment.List { + buf.WriteString(comment.Text) + } + } + + return buf.Bytes() +} + +// objectType returns the printable HCL form of an object type. An object type +// begins with a brace and ends with a brace. +func (p *printer) objectType(o *ast.ObjectType) []byte { + defer un(trace(p, "ObjectType")) + var buf bytes.Buffer + buf.WriteString("{") + + var index int + var nextItem token.Pos + var commented, newlinePrinted bool + for { + // Determine the location of the next actual non-comment + // item. If we're at the end, the next item is the closing brace + if index != len(o.List.Items) { + nextItem = o.List.Items[index].Pos() + } else { + nextItem = o.Rbrace + } + + // Go through the standalone comments in the file and print out + // the comments that we should be for this object item. + for _, c := range p.standaloneComments { + printed := false + var lastCommentPos token.Pos + for _, comment := range c.List { + // We only care about comments after the previous item + // we've printed so that comments are printed in the + // correct locations (between two objects for example). + // And before the next item. + if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) { + // If there are standalone comments and the initial newline has not + // been printed yet, do it now. + if !newlinePrinted { + newlinePrinted = true + buf.WriteByte(newline) + } + + // add newline if it's between other printed nodes + if index > 0 { + commented = true + buf.WriteByte(newline) + } + + // Store this position + lastCommentPos = comment.Pos() + + // output the comment itself + buf.Write(p.indent(p.heredocIndent([]byte(comment.Text)))) + + // Set printed to true to note that we printed something + printed = true + + /* + if index != len(o.List.Items) { + buf.WriteByte(newline) // do not print on the end + } + */ + } + } + + // Stuff to do if we had comments + if printed { + // Always write a newline + buf.WriteByte(newline) + + // If there is another item in the object and our comment + // didn't hug it directly, then make sure there is a blank + // line separating them. + if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 { + buf.WriteByte(newline) + } + } + } + + if index == len(o.List.Items) { + p.prev = o.Rbrace + break + } + + // At this point we are sure that it's not a totally empty block: print + // the initial newline if it hasn't been printed yet by the previous + // block about standalone comments. + if !newlinePrinted { + buf.WriteByte(newline) + newlinePrinted = true + } + + // check if we have adjacent one liner items. If yes we'll going to align + // the comments. + var aligned []*ast.ObjectItem + for _, item := range o.List.Items[index:] { + // we don't group one line lists + if len(o.List.Items) == 1 { + break + } + + // one means a oneliner with out any lead comment + // two means a oneliner with lead comment + // anything else might be something else + cur := lines(string(p.objectItem(item))) + if cur > 2 { + break + } + + curPos := item.Pos() + + nextPos := token.Pos{} + if index != len(o.List.Items)-1 { + nextPos = o.List.Items[index+1].Pos() + } + + prevPos := token.Pos{} + if index != 0 { + prevPos = o.List.Items[index-1].Pos() + } + + // fmt.Println("DEBUG ----------------") + // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos) + // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos) + // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos) + + if curPos.Line+1 == nextPos.Line { + aligned = append(aligned, item) + index++ + continue + } + + if curPos.Line-1 == prevPos.Line { + aligned = append(aligned, item) + index++ + + // finish if we have a new line or comment next. This happens + // if the next item is not adjacent + if curPos.Line+1 != nextPos.Line { + break + } + continue + } + + break + } + + // put newlines if the items are between other non aligned items. + // newlines are also added if there is a standalone comment already, so + // check it too + if !commented && index != len(aligned) { + buf.WriteByte(newline) + } + + if len(aligned) >= 1 { + p.prev = aligned[len(aligned)-1].Pos() + + items := p.alignedItems(aligned) + buf.Write(p.indent(items)) + } else { + p.prev = o.List.Items[index].Pos() + + buf.Write(p.indent(p.objectItem(o.List.Items[index]))) + index++ + } + + buf.WriteByte(newline) + } + + buf.WriteString("}") + return buf.Bytes() +} + +func (p *printer) alignedItems(items []*ast.ObjectItem) []byte { + var buf bytes.Buffer + + // find the longest key and value length, needed for alignment + var longestKeyLen int // longest key length + var longestValLen int // longest value length + for _, item := range items { + key := len(item.Keys[0].Token.Text) + val := len(p.output(item.Val)) + + if key > longestKeyLen { + longestKeyLen = key + } + + if val > longestValLen { + longestValLen = val + } + } + + for i, item := range items { + if item.LeadComment != nil { + for _, comment := range item.LeadComment.List { + buf.WriteString(comment.Text) + buf.WriteByte(newline) + } + } + + for i, k := range item.Keys { + keyLen := len(k.Token.Text) + buf.WriteString(k.Token.Text) + for i := 0; i < longestKeyLen-keyLen+1; i++ { + buf.WriteByte(blank) + } + + // reach end of key + if i == len(item.Keys)-1 && len(item.Keys) == 1 { + buf.WriteString("=") + buf.WriteByte(blank) + } + } + + val := p.output(item.Val) + valLen := len(val) + buf.Write(val) + + if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil { + for i := 0; i < longestValLen-valLen+1; i++ { + buf.WriteByte(blank) + } + + for _, comment := range item.LineComment.List { + buf.WriteString(comment.Text) + } + } + + // do not print for the last item + if i != len(items)-1 { + buf.WriteByte(newline) + } + } + + return buf.Bytes() +} + +// list returns the printable HCL form of an list type. +func (p *printer) list(l *ast.ListType) []byte { + if p.isSingleLineList(l) { + return p.singleLineList(l) + } + + var buf bytes.Buffer + buf.WriteString("[") + buf.WriteByte(newline) + + var longestLine int + for _, item := range l.List { + // for now we assume that the list only contains literal types + if lit, ok := item.(*ast.LiteralType); ok { + lineLen := len(lit.Token.Text) + if lineLen > longestLine { + longestLine = lineLen + } + } + } + + haveEmptyLine := false + for i, item := range l.List { + // If we have a lead comment, then we want to write that first + leadComment := false + if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil { + leadComment = true + + // Ensure an empty line before every element with a + // lead comment (except the first item in a list). + if !haveEmptyLine && i != 0 { + buf.WriteByte(newline) + } + + for _, comment := range lit.LeadComment.List { + buf.Write(p.indent([]byte(comment.Text))) + buf.WriteByte(newline) + } + } + + // also indent each line + val := p.output(item) + curLen := len(val) + buf.Write(p.indent(val)) + + // if this item is a heredoc, then we output the comma on + // the next line. This is the only case this happens. + comma := []byte{','} + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + buf.WriteByte(newline) + comma = p.indent(comma) + } + + buf.Write(comma) + + if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil { + // if the next item doesn't have any comments, do not align + buf.WriteByte(blank) // align one space + for i := 0; i < longestLine-curLen; i++ { + buf.WriteByte(blank) + } + + for _, comment := range lit.LineComment.List { + buf.WriteString(comment.Text) + } + } + + buf.WriteByte(newline) + + // Ensure an empty line after every element with a + // lead comment (except the first item in a list). + haveEmptyLine = leadComment && i != len(l.List)-1 + if haveEmptyLine { + buf.WriteByte(newline) + } + } + + buf.WriteString("]") + return buf.Bytes() +} + +// isSingleLineList returns true if: +// * they were previously formatted entirely on one line +// * they consist entirely of literals +// * there are either no heredoc strings or the list has exactly one element +// * there are no line comments +func (printer) isSingleLineList(l *ast.ListType) bool { + for _, item := range l.List { + if item.Pos().Line != l.Lbrack.Line { + return false + } + + lit, ok := item.(*ast.LiteralType) + if !ok { + return false + } + + if lit.Token.Type == token.HEREDOC && len(l.List) != 1 { + return false + } + + if lit.LineComment != nil { + return false + } + } + + return true +} + +// singleLineList prints a simple single line list. +// For a definition of "simple", see isSingleLineList above. +func (p *printer) singleLineList(l *ast.ListType) []byte { + buf := &bytes.Buffer{} + + buf.WriteString("[") + for i, item := range l.List { + if i != 0 { + buf.WriteString(", ") + } + + // Output the item itself + buf.Write(p.output(item)) + + // The heredoc marker needs to be at the end of line. + if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC { + buf.WriteByte(newline) + } + } + + buf.WriteString("]") + return buf.Bytes() +} + +// indent indents the lines of the given buffer for each non-empty line +func (p *printer) indent(buf []byte) []byte { + var prefix []byte + if p.cfg.SpacesWidth != 0 { + for i := 0; i < p.cfg.SpacesWidth; i++ { + prefix = append(prefix, blank) + } + } else { + prefix = []byte{tab} + } + + var res []byte + bol := true + for _, c := range buf { + if bol && c != '\n' { + res = append(res, prefix...) + } + + res = append(res, c) + bol = c == '\n' + } + return res +} + +// unindent removes all the indentation from the tombstoned lines +func (p *printer) unindent(buf []byte) []byte { + var res []byte + for i := 0; i < len(buf); i++ { + skip := len(buf)-i <= len(unindent) + if !skip { + skip = !bytes.Equal(unindent, buf[i:i+len(unindent)]) + } + if skip { + res = append(res, buf[i]) + continue + } + + // We have a marker. we have to backtrace here and clean out + // any whitespace ahead of our tombstone up to a \n + for j := len(res) - 1; j >= 0; j-- { + if res[j] == '\n' { + break + } + + res = res[:j] + } + + // Skip the entire unindent marker + i += len(unindent) - 1 + } + + return res +} + +// heredocIndent marks all the 2nd and further lines as unindentable +func (p *printer) heredocIndent(buf []byte) []byte { + var res []byte + bol := false + for _, c := range buf { + if bol && c != '\n' { + res = append(res, unindent...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// isSingleLineObject tells whether the given object item is a single +// line object such as "obj {}". +// +// A single line object: +// +// * has no lead comments (hence multi-line) +// * has no assignment +// * has no values in the stanza (within {}) +// +func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool { + // If there is a lead comment, can't be one line + if val.LeadComment != nil { + return false + } + + // If there is assignment, we always break by line + if val.Assign.IsValid() { + return false + } + + // If it isn't an object type, then its not a single line object + ot, ok := val.Val.(*ast.ObjectType) + if !ok { + return false + } + + // If the object has no items, it is single line! + return len(ot.List.Items) == 0 +} + +func lines(txt string) int { + endline := 1 + for i := 0; i < len(txt); i++ { + if txt[i] == '\n' { + endline++ + } + } + return endline +} + +// ---------------------------------------------------------------------------- +// Tracing support + +func (p *printer) printTrace(a ...interface{}) { + if !p.enableTrace { + return + } + + const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . " + const n = len(dots) + i := 2 * p.indentTrace + for i > n { + fmt.Print(dots) + i -= n + } + // i <= n + fmt.Print(dots[0:i]) + fmt.Println(a...) +} + +func trace(p *printer, msg string) *printer { + p.printTrace(msg, "(") + p.indentTrace++ + return p +} + +// Usage pattern: defer un(trace(p, "...")) +func un(p *printer) { + p.indentTrace-- + p.printTrace(")") +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go new file mode 100644 index 00000000..6617ab8e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go @@ -0,0 +1,66 @@ +// Package printer implements printing of AST nodes to HCL format. +package printer + +import ( + "bytes" + "io" + "text/tabwriter" + + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/hcl/hcl/parser" +) + +var DefaultConfig = Config{ + SpacesWidth: 2, +} + +// A Config node controls the output of Fprint. +type Config struct { + SpacesWidth int // if set, it will use spaces instead of tabs for alignment +} + +func (c *Config) Fprint(output io.Writer, node ast.Node) error { + p := &printer{ + cfg: *c, + comments: make([]*ast.CommentGroup, 0), + standaloneComments: make([]*ast.CommentGroup, 0), + // enableTrace: true, + } + + p.collectComments(node) + + if _, err := output.Write(p.unindent(p.output(node))); err != nil { + return err + } + + // flush tabwriter, if any + var err error + if tw, _ := output.(*tabwriter.Writer); tw != nil { + err = tw.Flush() + } + + return err +} + +// Fprint "pretty-prints" an HCL node to output +// It calls Config.Fprint with default settings. +func Fprint(output io.Writer, node ast.Node) error { + return DefaultConfig.Fprint(output, node) +} + +// Format formats src HCL and returns the result. +func Format(src []byte) ([]byte, error) { + node, err := parser.Parse(src) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + if err := DefaultConfig.Fprint(&buf, node); err != nil { + return nil, err + } + + // Add trailing newline to result + buf.WriteString("\n") + return buf.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go index 69662367..624a18fe 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -74,14 +74,6 @@ func (s *Scanner) next() rune { return eof } - if ch == utf8.RuneError && size == 1 { - s.srcPos.Column++ - s.srcPos.Offset += size - s.lastCharLen = size - s.err("illegal UTF-8 encoding") - return ch - } - // remember last position s.prevPos = s.srcPos @@ -89,18 +81,27 @@ func (s *Scanner) next() rune { s.lastCharLen = size s.srcPos.Offset += size + if ch == utf8.RuneError && size == 1 { + s.err("illegal UTF-8 encoding") + return ch + } + if ch == '\n' { s.srcPos.Line++ s.lastLineLen = s.srcPos.Column s.srcPos.Column = 0 } - // If we see a null character with data left, then that is an error - if ch == '\x00' && s.buf.Len() > 0 { + if ch == '\x00' { s.err("unexpected null character (0x00)") return eof } + if ch == '\uE123' { + s.err("unicode code point U+E123 reserved for internal use") + return utf8.RuneError + } + // debug // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column) return ch @@ -351,7 +352,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type { return token.NUMBER } -// scanMantissa scans the mantissa begining from the rune. It returns the next +// scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false @@ -432,16 +433,16 @@ func (s *Scanner) scanHeredoc() { // Read the identifier identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen] - if len(identBytes) == 0 { + if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') { s.err("zero-length heredoc anchor") return } var identRegexp *regexp.Regexp if identBytes[0] == '-' { - identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:])) + identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:])) } else { - identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes)) + identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes)) } // Read the actual string value @@ -551,7 +552,7 @@ func (s *Scanner) scanDigits(ch rune, base, n int) rune { s.err("illegal char escape") } - if n != start { + if n != start && ch != eof { // we scanned all digits, put the last non digit char back, // only if we read anything at all s.unread() diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go index 6f460853..125a5f07 100644 --- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go @@ -147,7 +147,7 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) { // Done return keys, nil case token.ILLEGAL: - fmt.Println("illegal") + return nil, errors.New("illegal") default: return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type) } diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go index dd5c72bb..fe3f0f09 100644 --- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go @@ -246,7 +246,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type { return token.NUMBER } -// scanMantissa scans the mantissa begining from the rune. It returns the next +// scanMantissa scans the mantissa beginning from the rune. It returns the next // non decimal rune. It's used to determine wheter it's a fraction or exponent. func (s *Scanner) scanMantissa(ch rune) rune { scanned := false diff --git a/vendor/github.com/hashicorp/hcl2/LICENSE b/vendor/github.com/hashicorp/hcl2/LICENSE new file mode 100644 index 00000000..82b4de97 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md new file mode 100644 index 00000000..2b24fdbe --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md @@ -0,0 +1,184 @@ +# HCL Dynamic Blocks Extension + +This HCL extension implements a special block type named "dynamic" that can +be used to dynamically generate blocks of other types by iterating over +collection values. + +Normally the block structure in an HCL configuration file is rigid, even +though dynamic expressions can be used within attribute values. This is +convenient for most applications since it allows the overall structure of +the document to be decoded easily, but in some applications it is desirable +to allow dynamic block generation within certain portions of the configuration. + +Dynamic block generation is performed using the `dynamic` block type: + +```hcl +toplevel { + nested { + foo = "static block 1" + } + + dynamic "nested" { + for_each = ["a", "b", "c"] + iterator = nested + content { + foo = "dynamic block ${nested.value}" + } + } + + nested { + foo = "static block 2" + } +} +``` + +The above is interpreted as if it were written as follows: + +```hcl +toplevel { + nested { + foo = "static block 1" + } + + nested { + foo = "dynamic block a" + } + + nested { + foo = "dynamic block b" + } + + nested { + foo = "dynamic block c" + } + + nested { + foo = "static block 2" + } +} +``` + +Since HCL block syntax is not normally exposed to the possibility of unknown +values, this extension must make some compromises when asked to iterate over +an unknown collection. If the length of the collection cannot be statically +recognized (because it is an unknown value of list, map, or set type) then +the `dynamic` construct will generate a _single_ dynamic block whose iterator +key and value are both unknown values of the dynamic pseudo-type, thus causing +any attribute values derived from iteration to appear as unknown values. There +is no explicit representation of the fact that the length of the collection may +eventually be different than one. + +## Usage + +Pass a body to function `Expand` to obtain a new body that will, on access +to its content, evaluate and expand any nested `dynamic` blocks. +Dynamic block processing is also automatically propagated into any nested +blocks that are returned, allowing users to nest dynamic blocks inside +one another and to nest dynamic blocks inside other static blocks. + +HCL structural decoding does not normally have access to an `EvalContext`, so +any variables and functions that should be available to the `for_each` +and `labels` expressions must be passed in when calling `Expand`. Expressions +within the `content` block are evaluated separately and so can be passed a +separate `EvalContext` if desired, during normal attribute expression +evaluation. + +## Detecting Variables + +Some applications dynamically generate an `EvalContext` by analyzing which +variables are referenced by an expression before evaluating it. + +This unfortunately requires some extra effort when this analysis is required +for the context passed to `Expand`: the HCL API requires a schema to be +provided in order to do any analysis of the blocks in a body, but the low-level +schema model provides a description of only one level of nested blocks at +a time, and thus a new schema must be provided for each additional level of +nesting. + +To make this arduous process as convenient as possbile, this package provides +a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode` +instance that can be used to find variables directly in a given body and also +determine which nested blocks require recursive calls. Using this mechanism +requires that the caller be able to look up a schema given a nested block type. +For _simple_ formats where a specific block type name always has the same schema +regardless of context, a walk can be implemented as follows: + +```go +func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal { + vars, children := node.Visit(schema) + + for _, child := range children { + var childSchema *hcl.BodySchema + switch child.BlockTypeName { + case "a": + childSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "b", + LabelNames: []string{"key"}, + }, + }, + } + case "b": + childSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "val", + Required: true, + }, + }, + } + default: + // Should never happen, because the above cases should be exhaustive + // for the application's configuration format. + panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName)) + } + + vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...) + } +} +``` + +### Detecting Variables with `hcldec` Specifications + +For applications that use the higher-level `hcldec` package to decode nested +configuration structures into `cty` values, the same specification can be used +to automatically drive the recursive variable-detection walk described above. + +The helper function `ForEachVariablesHCLDec` allows an entire recursive +configuration structure to be analyzed in a single call given a `hcldec.Spec` +that describes the nested block structure. This means a `hcldec`-based +application can support dynamic blocks with only a little additional effort: + +```go +func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) { + // Determine which variables are needed to expand dynamic blocks + neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec) + + // Build a suitable EvalContext and expand dynamic blocks + dynCtx := buildEvalContext(neededForDynamic) + dynBody := dynblock.Expand(body, dynCtx) + + // Determine which variables are needed to fully decode the expanded body + // This will analyze expressions that came both from static blocks in the + // original body and from blocks that were dynamically added by Expand. + neededForDecode := hcldec.Variables(dynBody, spec) + + // Build a suitable EvalContext and then fully decode the body as per the + // hcldec specification. + decCtx := buildEvalContext(neededForDecode) + return hcldec.Decode(dynBody, spec, decCtx) +} + +func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext { + // (to be implemented by your application) +} +``` + +# Performance + +This extension is going quite harshly against the grain of the HCL API, and +so it uses lots of wrapping objects and temporary data structures to get its +work done. HCL in general is not suitable for use in high-performance situations +or situations sensitive to memory pressure, but that is _especially_ true for +this extension. diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go new file mode 100644 index 00000000..dd308223 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go @@ -0,0 +1,262 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// expandBody wraps another hcl.Body and expands any "dynamic" blocks found +// inside whenever Content or PartialContent is called. +type expandBody struct { + original hcl.Body + forEachCtx *hcl.EvalContext + iteration *iteration // non-nil if we're nested inside another "dynamic" block + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the transformer. + // + // Note that this is re-implemented here rather than delegating to the + // existing support required by the underlying body because we need to + // retain access to the entire original body on subsequent decode operations + // so we can retain any "dynamic" blocks for types we didn't take consume + // on the first pass. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]hcl.BlockHeaderSchema +} + +func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, diags := b.original.Content(extSchema) + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + return content, diags +} + +func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + extSchema := b.extendSchema(schema) + rawContent, _, diags := b.original.PartialContent(extSchema) + // We discard the "remain" argument above because we're going to construct + // our own remain that also takes into account remaining "dynamic" blocks. + + blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true) + diags = append(diags, blockDiags...) + attrs := b.prepareAttributes(rawContent.Attributes) + + content := &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + MissingItemRange: b.original.MissingItemRange(), + } + + remain := &expandBody{ + original: b.original, + forEachCtx: b.forEachCtx, + iteration: b.iteration, + hiddenAttrs: make(map[string]struct{}), + hiddenBlocks: make(map[string]hcl.BlockHeaderSchema), + } + for name := range b.hiddenAttrs { + remain.hiddenAttrs[name] = struct{}{} + } + for typeName, blockS := range b.hiddenBlocks { + remain.hiddenBlocks[typeName] = blockS + } + for _, attrS := range schema.Attributes { + remain.hiddenAttrs[attrS.Name] = struct{}{} + } + for _, blockS := range schema.Blocks { + remain.hiddenBlocks[blockS.Type] = blockS + } + + return content, remain, diags +} + +func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + // If we have any hiddenBlocks then we also need to register those here + // so that a call to "Content" on the underlying body won't fail. + // (We'll filter these out again once we process the result of either + // Content or PartialContent.) + for _, blockS := range b.hiddenBlocks { + extSchema.Blocks = append(extSchema.Blocks, blockS) + } + + // If we have any hiddenAttrs then we also need to register these, for + // the same reason as we deal with hiddenBlocks above. + if len(b.hiddenAttrs) != 0 { + newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs)) + copy(newAttrs, extSchema.Attributes) + for name := range b.hiddenAttrs { + newAttrs = append(newAttrs, hcl.AttributeSchema{ + Name: name, + Required: false, + }) + } + extSchema.Attributes = newAttrs + } + + return extSchema +} + +func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes { + if len(b.hiddenAttrs) == 0 && b.iteration == nil { + // Easy path: just pass through the attrs from the original body verbatim + return rawAttrs + } + + // Otherwise we have some work to do: we must filter out any attributes + // that are hidden (since a previous PartialContent call already saw these) + // and wrap the expressions of the inner attributes so that they will + // have access to our iteration variables. + attrs := make(hcl.Attributes, len(rawAttrs)) + for name, rawAttr := range rawAttrs { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + if b.iteration != nil { + attr := *rawAttr // shallow copy so we can mutate it + attr.Expr = exprWrap{ + Expression: attr.Expr, + i: b.iteration, + } + attrs[name] = &attr + } else { + // If we have no active iteration then no wrapping is required. + attrs[name] = rawAttr + } + } + return attrs +} + +func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) { + var blocks hcl.Blocks + var diags hcl.Diagnostics + + for _, rawBlock := range rawBlocks { + switch rawBlock.Type { + case "dynamic": + realBlockType := rawBlock.Labels[0] + if _, hidden := b.hiddenBlocks[realBlockType]; hidden { + continue + } + + var blockS *hcl.BlockHeaderSchema + for _, candidate := range schema.Blocks { + if candidate.Type == realBlockType { + blockS = &candidate + break + } + } + if blockS == nil { + // Not a block type that the caller requested. + if !partial { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType), + Subject: &rawBlock.LabelRanges[0], + }) + } + continue + } + + spec, specDiags := b.decodeSpec(blockS, rawBlock) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + continue + } + + if spec.forEachVal.IsKnown() { + for it := spec.forEachVal.ElementIterator(); it.Next(); { + key, value := it.Element() + i := b.iteration.MakeChild(spec.iteratorName, key, value) + + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + // Attach our new iteration context so that attributes + // and other nested blocks can refer to our iterator. + block.Body = b.expandChild(block.Body, i) + blocks = append(blocks, block) + } + } + } else { + // If our top-level iteration value isn't known then we're forced + // to compromise since HCL doesn't have any concept of an + // "unknown block". In this case then, we'll produce a single + // dynamic block with the iterator values set to DynamicVal, + // which at least makes the potential for a block visible + // in our result, even though it's not represented in a fully-accurate + // way. + i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal) + block, blockDiags := spec.newBlock(i, b.forEachCtx) + diags = append(diags, blockDiags...) + if block != nil { + block.Body = b.expandChild(block.Body, i) + + // We additionally force all of the leaf attribute values + // in the result to be unknown so the calling application + // can, if necessary, use that as a heuristic to detect + // when a single nested block might be standing in for + // multiple blocks yet to be expanded. This retains the + // structure of the generated body but forces all of its + // leaf attribute values to be unknown. + block.Body = unknownBody{block.Body} + + blocks = append(blocks, block) + } + } + + default: + if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden { + // A static block doesn't create a new iteration context, but + // it does need to inherit _our own_ iteration context in + // case it contains expressions that refer to our inherited + // iterators, or nested "dynamic" blocks. + expandedBlock := *rawBlock // shallow copy + expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration) + blocks = append(blocks, &expandedBlock) + } + } + } + + return blocks, diags +} + +func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body { + chiCtx := i.EvalContext(b.forEachCtx) + ret := Expand(child, chiCtx) + ret.(*expandBody).iteration = i + return ret +} + +func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // blocks aren't allowed in JustAttributes mode and this body can + // only produce blocks, so we'll just pass straight through to our + // underlying body here. + return b.original.JustAttributes() +} + +func (b *expandBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go new file mode 100644 index 00000000..41c0be26 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go @@ -0,0 +1,215 @@ +package dynblock + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type expandSpec struct { + blockType string + blockTypeRange hcl.Range + defRange hcl.Range + forEachVal cty.Value + iteratorName string + labelExprs []hcl.Expression + contentBody hcl.Body + inherited map[string]*iteration +} + +func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var schema *hcl.BodySchema + if len(blockS.LabelNames) != 0 { + schema = dynamicBlockBodySchemaLabels + } else { + schema = dynamicBlockBodySchemaNoLabels + } + + specContent, specDiags := rawSpec.Body.Content(schema) + diags = append(diags, specDiags...) + if specDiags.HasErrors() { + return nil, diags + } + + //// for_each attribute + + eachAttr := specContent.Attributes["for_each"] + eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx) + diags = append(diags, eachDiags...) + + if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType { + // We skip this error for DynamicPseudoType because that means we either + // have a null (which is checked immediately below) or an unknown + // (which is handled in the expandBody Content methods). + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()), + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + if eachVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic for_each value", + Detail: "Cannot use a null value in for_each.", + Subject: eachAttr.Expr.Range().Ptr(), + Expression: eachAttr.Expr, + EvalContext: b.forEachCtx, + }) + return nil, diags + } + + //// iterator attribute + + iteratorName := blockS.Type + if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil { + itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr) + diags = append(diags, itDiags...) + if itDiags.HasErrors() { + return nil, diags + } + + if len(itTraversal) != 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic iterator name", + Detail: "Dynamic iterator must be a single variable name.", + Subject: itTraversal.SourceRange().Ptr(), + }) + return nil, diags + } + + iteratorName = itTraversal.RootName() + } + + var labelExprs []hcl.Expression + if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil { + var labelDiags hcl.Diagnostics + labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + if len(labelExprs) > len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic block label", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(), + }) + return nil, diags + } else if len(labelExprs) < len(blockS.LabelNames) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Insufficient dynamic block labels", + Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)), + Subject: labelsAttr.Expr.Range().Ptr(), + }) + return nil, diags + } + } + + // Since our schema requests only blocks of type "content", we can assume + // that all entries in specContent.Blocks are content blocks. + if len(specContent.Blocks) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing dynamic content block", + Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.", + Subject: &specContent.MissingItemRange, + }) + return nil, diags + } + if len(specContent.Blocks) > 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous dynamic content block", + Detail: "Only one nested content block is allowed for each dynamic block.", + Subject: &specContent.Blocks[1].DefRange, + }) + return nil, diags + } + + return &expandSpec{ + blockType: blockS.Type, + blockTypeRange: rawSpec.LabelRanges[0], + defRange: rawSpec.DefRange, + forEachVal: eachVal, + iteratorName: iteratorName, + labelExprs: labelExprs, + contentBody: specContent.Blocks[0].Body, + }, diags +} + +func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) { + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + lCtx := i.EvalContext(ctx) + for _, labelExpr := range s.labelExprs { + labelVal, labelDiags := labelExpr.Value(lCtx) + diags = append(diags, labelDiags...) + if labelDiags.HasErrors() { + return nil, diags + } + + var convErr error + labelVal, convErr = convert.Convert(labelVal, cty.String) + if convErr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr), + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if labelVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "Cannot use a null value as a dynamic block label.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + if !labelVal.IsKnown() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid dynamic block label", + Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.", + Subject: labelExpr.Range().Ptr(), + Expression: labelExpr, + EvalContext: lCtx, + }) + return nil, diags + } + + labels = append(labels, labelVal.AsString()) + labelRanges = append(labelRanges, labelExpr.Range()) + } + + block := &hcl.Block{ + Type: s.blockType, + TypeRange: s.blockTypeRange, + Labels: labels, + LabelRanges: labelRanges, + DefRange: s.defRange, + Body: s.contentBody, + } + + return block, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go new file mode 100644 index 00000000..6916fc15 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go @@ -0,0 +1,42 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +type exprWrap struct { + hcl.Expression + i *iteration +} + +func (e exprWrap) Variables() []hcl.Traversal { + raw := e.Expression.Variables() + ret := make([]hcl.Traversal, 0, len(raw)) + + // Filter out traversals that refer to our iterator name or any + // iterator we've inherited; we're going to provide those in + // our Value wrapper, so the caller doesn't need to know about them. + for _, traversal := range raw { + rootName := traversal.RootName() + if rootName == e.i.IteratorName { + continue + } + if _, inherited := e.i.Inherited[rootName]; inherited { + continue + } + ret = append(ret, traversal) + } + return ret +} + +func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + extCtx := e.i.EvalContext(ctx) + return e.Expression.Value(extCtx) +} + +// UnwrapExpression returns the expression being wrapped by this instance. +// This allows the original expression to be recovered by hcl.UnwrapExpression. +func (e exprWrap) UnwrapExpression() hcl.Expression { + return e.Expression +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go new file mode 100644 index 00000000..7056d336 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go @@ -0,0 +1,66 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +type iteration struct { + IteratorName string + Key cty.Value + Value cty.Value + Inherited map[string]*iteration +} + +func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration { + return &iteration{ + IteratorName: s.iteratorName, + Key: key, + Value: value, + Inherited: s.inherited, + } +} + +func (i *iteration) Object() cty.Value { + return cty.ObjectVal(map[string]cty.Value{ + "key": i.Key, + "value": i.Value, + }) +} + +func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext { + new := base.NewChild() + + if i != nil { + new.Variables = map[string]cty.Value{} + for name, otherIt := range i.Inherited { + new.Variables[name] = otherIt.Object() + } + new.Variables[i.IteratorName] = i.Object() + } + + return new +} + +func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration { + if i == nil { + // Create entirely new root iteration, then + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + } + } + + inherited := map[string]*iteration{} + for name, otherIt := range i.Inherited { + inherited[name] = otherIt + } + inherited[i.IteratorName] = i + return &iteration{ + IteratorName: iteratorName, + Key: key, + Value: value, + Inherited: inherited, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go new file mode 100644 index 00000000..b7e8ca95 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go @@ -0,0 +1,44 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Expand "dynamic" blocks in the given body, returning a new body that +// has those blocks expanded. +// +// The given EvalContext is used when evaluating "for_each" and "labels" +// attributes within dynamic blocks, allowing those expressions access to +// variables and functions beyond the iterator variable created by the +// iteration. +// +// Expand returns no diagnostics because no blocks are actually expanded +// until a call to Content or PartialContent on the returned body, which +// will then expand only the blocks selected by the schema. +// +// "dynamic" blocks are also expanded automatically within nested blocks +// in the given body, including within other dynamic blocks, thus allowing +// multi-dimensional iteration. However, it is not possible to +// dynamically-generate the "dynamic" blocks themselves except through nesting. +// +// parent { +// dynamic "child" { +// for_each = child_objs +// content { +// dynamic "grandchild" { +// for_each = child.value.children +// labels = [grandchild.key] +// content { +// parent_key = child.key +// value = grandchild.value +// } +// } +// } +// } +// } +func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body { + return &expandBody{ + original: body, + forEachCtx: ctx, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go new file mode 100644 index 00000000..dc8ed5a2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go @@ -0,0 +1,50 @@ +package dynblock + +import "github.com/hashicorp/hcl2/hcl" + +var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, +} + +var dynamicBlockBodySchemaLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + { + Name: "labels", + Required: true, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} + +var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: true, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + LabelNames: nil, + }, + }, +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go new file mode 100644 index 00000000..932f6a32 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go @@ -0,0 +1,84 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// unknownBody is a funny body that just reports everything inside it as +// unknown. It uses a given other body as a sort of template for what attributes +// and blocks are inside -- including source location information -- but +// subsitutes unknown values of unknown type for all attributes. +// +// This rather odd process is used to handle expansion of dynamic blocks whose +// for_each expression is unknown. Since a block cannot itself be unknown, +// we instead arrange for everything _inside_ the block to be unknown instead, +// to give the best possible approximation. +type unknownBody struct { + template hcl.Body +} + +var _ hcl.Body = unknownBody{} + +func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, diags := b.template.Content(schema) + content = b.fixupContent(content) + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return content, diags +} + +func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + content, remain, diags := b.template.PartialContent(schema) + content = b.fixupContent(content) + remain = unknownBody{remain} // remaining content must also be wrapped + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return content, remain, diags +} + +func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs, diags := b.template.JustAttributes() + attrs = b.fixupAttrs(attrs) + + // We're intentionally preserving the diagnostics reported from the + // inner body so that we can still report where the template body doesn't + // match the requested schema. + return attrs, diags +} + +func (b unknownBody) MissingItemRange() hcl.Range { + return b.template.MissingItemRange() +} + +func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent { + ret := &hcl.BodyContent{} + ret.Attributes = b.fixupAttrs(got.Attributes) + if len(got.Blocks) > 0 { + ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks)) + for _, gotBlock := range got.Blocks { + new := *gotBlock // shallow copy + new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown + ret.Blocks = append(ret.Blocks, &new) + } + } + + return ret +} + +func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes { + if len(got) == 0 { + return nil + } + ret := make(hcl.Attributes, len(got)) + for name, gotAttr := range got { + new := *gotAttr // shallow copy + new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range()) + ret[name] = &new + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go new file mode 100644 index 00000000..ad838f3e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go @@ -0,0 +1,209 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// WalkVariables begins the recursive process of walking all expressions and +// nested blocks in the given body and its child bodies while taking into +// account any "dynamic" blocks. +// +// This function requires that the caller walk through the nested block +// structure in the given body level-by-level so that an appropriate schema +// can be provided at each level to inform further processing. This workflow +// is thus easiest to use for calling applications that have some higher-level +// schema representation available with which to drive this multi-step +// process. If your application uses the hcldec package, you may be able to +// use VariablesHCLDec instead for a more automatic approach. +func WalkVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + includeContent: true, + } +} + +// WalkExpandVariables is like Variables but it includes only the variables +// required for successful block expansion, ignoring any variables referenced +// inside block contents. The result is the minimal set of all variables +// required for a call to Expand, excluding variables that would only be +// needed to subsequently call Content or PartialContent on the expanded +// body. +func WalkExpandVariables(body hcl.Body) WalkVariablesNode { + return WalkVariablesNode{ + body: body, + } +} + +type WalkVariablesNode struct { + body hcl.Body + it *iteration + + includeContent bool +} + +type WalkVariablesChild struct { + BlockTypeName string + Node WalkVariablesNode +} + +// Body returns the HCL Body associated with the child node, in case the caller +// wants to do some sort of inspection of it in order to decide what schema +// to pass to Visit. +// +// Most implementations should just fetch a fixed schema based on the +// BlockTypeName field and not access this. Deciding on a schema dynamically +// based on the body is a strange thing to do and generally necessary only if +// your caller is already doing other bizarre things with HCL bodies. +func (c WalkVariablesChild) Body() hcl.Body { + return c.Node.body +} + +// Visit returns the variable traversals required for any "dynamic" blocks +// directly in the body associated with this node, and also returns any child +// nodes that must be visited in order to continue the walk. +// +// Each child node has its associated block type name given in its BlockTypeName +// field, which the calling application should use to determine the appropriate +// schema for the content of each child node and pass it to the child node's +// own Visit method to continue the walk recursively. +func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) { + extSchema := n.extendSchema(schema) + container, _, _ := n.body.PartialContent(extSchema) + if container == nil { + return vars, children + } + + children = make([]WalkVariablesChild, 0, len(container.Blocks)) + + if n.includeContent { + for _, attr := range container.Attributes { + for _, traversal := range attr.Expr.Variables() { + var ours, inherited bool + if n.it != nil { + ours = traversal.RootName() == n.it.IteratorName + _, inherited = n.it.Inherited[traversal.RootName()] + } + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + } + + for _, block := range container.Blocks { + switch block.Type { + + case "dynamic": + blockTypeName := block.Labels[0] + inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema) + if inner == nil { + continue + } + + iteratorName := blockTypeName + if attr, exists := inner.Attributes["iterator"]; exists { + iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr) + if len(iterTraversal) == 0 { + // Ignore this invalid dynamic block, since it'll produce + // an error if someone tries to extract content from it + // later anyway. + continue + } + iteratorName = iterTraversal.RootName() + } + blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal) + + if attr, exists := inner.Attributes["for_each"]; exists { + // Filter out iterator names inherited from parent blocks + for _, traversal := range attr.Expr.Variables() { + if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited { + vars = append(vars, traversal) + } + } + } + if attr, exists := inner.Attributes["labels"]; exists { + // Filter out both our own iterator name _and_ those inherited + // from parent blocks, since we provide _both_ of these to the + // label expressions. + for _, traversal := range attr.Expr.Variables() { + ours := traversal.RootName() == iteratorName + _, inherited := blockIt.Inherited[traversal.RootName()] + + if !(ours || inherited) { + vars = append(vars, traversal) + } + } + } + + for _, contentBlock := range inner.Blocks { + // We only request "content" blocks in our schema, so we know + // any blocks we find here will be content blocks. We require + // exactly one content block for actual expansion, but we'll + // be more liberal here so that callers can still collect + // variables from erroneous "dynamic" blocks. + children = append(children, WalkVariablesChild{ + BlockTypeName: blockTypeName, + Node: WalkVariablesNode{ + body: contentBlock.Body, + it: blockIt, + includeContent: n.includeContent, + }, + }) + } + + default: + children = append(children, WalkVariablesChild{ + BlockTypeName: block.Type, + Node: WalkVariablesNode{ + body: block.Body, + it: n.it, + includeContent: n.includeContent, + }, + }) + + } + } + + return vars, children +} + +func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema { + // We augment the requested schema to also include our special "dynamic" + // block type, since then we'll get instances of it interleaved with + // all of the literal child blocks we must also include. + extSchema := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + copy(extSchema.Blocks, schema.Blocks) + extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema) + + return extSchema +} + +// This is a more relaxed schema than what's in schema.go, since we +// want to maximize the amount of variables we can find even if there +// are erroneous blocks. +var variableDetectionInnerSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "for_each", + Required: false, + }, + { + Name: "labels", + Required: false, + }, + { + Name: "iterator", + Required: false, + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "content", + }, + }, +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go new file mode 100644 index 00000000..a078d915 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go @@ -0,0 +1,43 @@ +package dynblock + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" +) + +// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec +// specification to automatically drive the recursive walk through nested +// blocks in the given body. +// +// This is a drop-in replacement for hcldec.Variables which is able to treat +// blocks of type "dynamic" in the same special way that dynblock.Expand would, +// exposing both the variables referenced in the "for_each" and "labels" +// arguments and variables used in the nested "content" block. +func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the +// minimal set of variables required to call Expand, ignoring variables that +// are referenced only inside normal block contents. See WalkExpandVariables +// for more information. +func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal { + rootNode := WalkExpandVariables(body) + return walkVariablesWithHCLDec(rootNode, spec) +} + +func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal { + vars, children := node.Visit(hcldec.ImpliedSchema(spec)) + + if len(children) > 0 { + childSpecs := hcldec.ChildBlockTypes(spec) + for _, child := range children { + if childSpec, exists := childSpecs[child.BlockTypeName]; exists { + vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...) + } + } + } + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md new file mode 100644 index 00000000..ff2b3f22 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md @@ -0,0 +1,67 @@ +# HCL Type Expressions Extension + +This HCL extension defines a convention for describing HCL types using function +call and variable reference syntax, allowing configuration formats to include +type information provided by users. + +The type syntax is processed statically from a hcl.Expression, so it cannot +use any of the usual language operators. This is similar to type expressions +in statically-typed programming languages. + +```hcl +variable "example" { + type = list(string) +} +``` + +The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall` +functions, and so it relies on the underlying syntax to define how "keyword" +and "call" are interpreted. The above shows how they are interpreted in +the HCL native syntax, while the following shows the same information +expressed in JSON: + +```json +{ + "variable": { + "example": { + "type": "list(string)" + } + } +} +``` + +Notice that since we have additional contextual information that we intend +to allow only calls and keywords the JSON syntax is able to parse the given +string directly as an expression, rather than as a template as would be +the case for normal expression evaluation. + +For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl2/ext/typeexpr). + +## Type Expression Syntax + +When expressed in the native syntax, the following expressions are permitted +in a type expression: + +* `string` - string +* `bool` - boolean +* `number` - number +* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only) +* `list()` - list of the type given as an argument +* `set()` - set of the type given as an argument +* `map()` - map of the type given as an argument +* `tuple([])` - tuple with the element types given in the single list argument +* `object({=, ...}` - object with the attributes and corresponding types given in the single map argument + +For example: + +* `list(string)` +* `object({name=string,age=number})` +* `map(object({name=string,age=number}))` + +Note that the object constructor syntax is not fully-general for all possible +object types because it requires the attribute names to be valid identifiers. +In practice it is expected that any time an object type is being fixed for +type checking it will be one that has identifiers as its attributes; object +types with weird attributes generally show up only from arbitrary object +constructors in configuration files, which are usually treated either as maps +or as the dynamic pseudo-type. diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go new file mode 100644 index 00000000..c4b37957 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go @@ -0,0 +1,11 @@ +// Package typeexpr extends HCL with a convention for describing HCL types +// within configuration files. +// +// The type syntax is processed statically from a hcl.Expression, so it cannot +// use any of the usual language operators. This is similar to type expressions +// in statically-typed programming languages. +// +// variable "example" { +// type = list(string) +// } +package typeexpr diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go new file mode 100644 index 00000000..a84338a8 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go @@ -0,0 +1,196 @@ +package typeexpr + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +const invalidTypeSummary = "Invalid type specification" + +// getType is the internal implementation of both Type and TypeConstraint, +// using the passed flag to distinguish. When constraint is false, the "any" +// keyword will produce an error. +func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) { + // First we'll try for one of our keywords + kw := hcl.ExprAsKeyword(expr) + switch kw { + case "bool": + return cty.Bool, nil + case "string": + return cty.String, nil + case "number": + return cty.Number, nil + case "any": + if constraint { + return cty.DynamicPseudoType, nil + } + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw), + Subject: expr.Range().Ptr(), + }} + case "list", "map", "set": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw), + Subject: expr.Range().Ptr(), + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: expr.Range().Ptr(), + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: expr.Range().Ptr(), + }} + case "": + // okay! we'll fall through and try processing as a call, then. + default: + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw), + Subject: expr.Range().Ptr(), + }} + } + + // If we get down here then our expression isn't just a keyword, so we'll + // try to process it as a call instead. + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).", + Subject: expr.Range().Ptr(), + }} + } + + switch call.Name { + case "bool", "string", "number", "any": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name), + Subject: &call.ArgsRange, + }} + } + + if len(call.Arguments) != 1 { + contextRange := call.ArgsRange + subjectRange := call.ArgsRange + if len(call.Arguments) > 1 { + // If we have too many arguments (as opposed to too _few_) then + // we'll highlight the extraneous arguments as the diagnostic + // subject. + subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range()) + } + + switch call.Name { + case "list", "set", "map": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name), + Subject: &subjectRange, + Context: &contextRange, + }} + case "object": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.", + Subject: &subjectRange, + Context: &contextRange, + }} + case "tuple": + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "The tuple type constructor requires one argument specifying the element types as a list.", + Subject: &subjectRange, + Context: &contextRange, + }} + } + } + + switch call.Name { + + case "list": + ety, diags := getType(call.Arguments[0], constraint) + return cty.List(ety), diags + case "set": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Set(ety), diags + case "map": + ety, diags := getType(call.Arguments[0], constraint) + return cty.Map(ety), diags + case "object": + attrDefs, diags := hcl.ExprMap(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + + atys := make(map[string]cty.Type) + for _, attrDef := range attrDefs { + attrName := hcl.ExprAsKeyword(attrDef.Key) + if attrName == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Object constructor map keys must be attribute names.", + Subject: attrDef.Key.Range().Ptr(), + Context: expr.Range().Ptr(), + }) + continue + } + aty, attrDiags := getType(attrDef.Value, constraint) + diags = append(diags, attrDiags...) + atys[attrName] = aty + } + return cty.Object(atys), diags + case "tuple": + elemDefs, diags := hcl.ExprList(call.Arguments[0]) + if diags.HasErrors() { + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: "Tuple type constructor requires a list of element types.", + Subject: call.Arguments[0].Range().Ptr(), + Context: expr.Range().Ptr(), + }} + } + etys := make([]cty.Type, len(elemDefs)) + for i, defExpr := range elemDefs { + ety, elemDiags := getType(defExpr, constraint) + diags = append(diags, elemDiags...) + etys[i] = ety + } + return cty.Tuple(etys), diags + default: + // Can't access call.Arguments in this path because we've not validated + // that it contains exactly one expression here. + return cty.DynamicPseudoType, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: invalidTypeSummary, + Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name), + Subject: expr.Range().Ptr(), + }} + } +} diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go new file mode 100644 index 00000000..e3f5eef5 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go @@ -0,0 +1,129 @@ +package typeexpr + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// Type attempts to process the given expression as a type expression and, if +// successful, returns the resulting type. If unsuccessful, error diagnostics +// are returned. +func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, false) +} + +// TypeConstraint attempts to parse the given expression as a type constraint +// and, if successful, returns the resulting type. If unsuccessful, error +// diagnostics are returned. +// +// A type constraint has the same structure as a type, but it additionally +// allows the keyword "any" to represent cty.DynamicPseudoType, which is often +// used as a wildcard in type checking and type conversion operations. +func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) { + return getType(expr, true) +} + +// TypeString returns a string rendering of the given type as it would be +// expected to appear in the HCL native syntax. +// +// This is primarily intended for showing types to the user in an application +// that uses typexpr, where the user can be assumed to be familiar with the +// type expression syntax. In applications that do not use typeexpr these +// results may be confusing to the user and so type.FriendlyName may be +// preferable, even though it's less precise. +// +// TypeString produces reasonable results only for types like what would be +// produced by the Type and TypeConstraint functions. In particular, it cannot +// support capsule types. +func TypeString(ty cty.Type) string { + // Easy cases first + switch ty { + case cty.String: + return "string" + case cty.Bool: + return "bool" + case cty.Number: + return "number" + case cty.DynamicPseudoType: + return "any" + } + + if ty.IsCapsuleType() { + panic("TypeString does not support capsule types") + } + + if ty.IsCollectionType() { + ety := ty.ElementType() + etyString := TypeString(ety) + switch { + case ty.IsListType(): + return fmt.Sprintf("list(%s)", etyString) + case ty.IsSetType(): + return fmt.Sprintf("set(%s)", etyString) + case ty.IsMapType(): + return fmt.Sprintf("map(%s)", etyString) + default: + // Should never happen because the above is exhaustive + panic("unsupported collection type") + } + } + + if ty.IsObjectType() { + var buf bytes.Buffer + buf.WriteString("object({") + atys := ty.AttributeTypes() + names := make([]string, 0, len(atys)) + for name := range atys { + names = append(names, name) + } + sort.Strings(names) + first := true + for _, name := range names { + aty := atys[name] + if !first { + buf.WriteByte(',') + } + if !hclsyntax.ValidIdentifier(name) { + // Should never happen for any type produced by this package, + // but we'll do something reasonable here just so we don't + // produce garbage if someone gives us a hand-assembled object + // type that has weird attribute names. + // Using Go-style quoting here isn't perfect, since it doesn't + // exactly match HCL syntax, but it's fine for an edge-case. + buf.WriteString(fmt.Sprintf("%q", name)) + } else { + buf.WriteString(name) + } + buf.WriteByte('=') + buf.WriteString(TypeString(aty)) + first = false + } + buf.WriteString("})") + return buf.String() + } + + if ty.IsTupleType() { + var buf bytes.Buffer + buf.WriteString("tuple([") + etys := ty.TupleElementTypes() + first := true + for _, ety := range etys { + if !first { + buf.WriteByte(',') + } + buf.WriteString(TypeString(ety)) + first = false + } + buf.WriteString("])") + return buf.String() + } + + // Should never happen because we covered all cases above. + panic(fmt.Errorf("unsupported type %#v", ty)) +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/decode.go b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go new file mode 100644 index 00000000..3a149a8c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/decode.go @@ -0,0 +1,304 @@ +package gohcl + +import ( + "fmt" + "reflect" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" +) + +// DecodeBody extracts the configuration within the given body into the given +// value. This value must be a non-nil pointer to either a struct or +// a map, where in the former case the configuration will be decoded using +// struct tags and in the latter case only attributes are allowed and their +// values are decoded into the map. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeBody(body hcl.Body, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + rv := reflect.ValueOf(val) + if rv.Kind() != reflect.Ptr { + panic(fmt.Sprintf("target value must be a pointer, not %s", rv.Type().String())) + } + + return decodeBodyToValue(body, ctx, rv.Elem()) +} + +func decodeBodyToValue(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + et := val.Type() + switch et.Kind() { + case reflect.Struct: + return decodeBodyToStruct(body, ctx, val) + case reflect.Map: + return decodeBodyToMap(body, ctx, val) + default: + panic(fmt.Sprintf("target value must be pointer to struct or map, not %s", et.String())) + } +} + +func decodeBodyToStruct(body hcl.Body, ctx *hcl.EvalContext, val reflect.Value) hcl.Diagnostics { + schema, partial := ImpliedBodySchema(val.Interface()) + + var content *hcl.BodyContent + var leftovers hcl.Body + var diags hcl.Diagnostics + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + if content == nil { + return diags + } + + tags := getFieldTags(val.Type()) + + if tags.Remain != nil { + fieldIdx := *tags.Remain + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + switch { + case bodyType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(leftovers)) + case attrsType.AssignableTo(field.Type): + attrs, attrsDiags := leftovers.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + fieldV.Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(leftovers, ctx, fieldV)...) + } + } + + for name, fieldIdx := range tags.Attributes { + attr := content.Attributes[name] + field := val.Type().Field(fieldIdx) + fieldV := val.Field(fieldIdx) + + if attr == nil { + if !exprType.AssignableTo(field.Type) { + continue + } + + // As a special case, if the target is of type hcl.Expression then + // we'll assign an actual expression that evalues to a cty null, + // so the caller can deal with it within the cty realm rather + // than within the Go realm. + synthExpr := hcl.StaticExpr(cty.NullVal(cty.DynamicPseudoType), body.MissingItemRange()) + fieldV.Set(reflect.ValueOf(synthExpr)) + continue + } + + switch { + case attrType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr)) + case exprType.AssignableTo(field.Type): + fieldV.Set(reflect.ValueOf(attr.Expr)) + default: + diags = append(diags, DecodeExpression( + attr.Expr, ctx, fieldV.Addr().Interface(), + )...) + } + } + + blocksByType := content.Blocks.ByType() + + for typeName, fieldIdx := range tags.Blocks { + blocks := blocksByType[typeName] + field := val.Type().Field(fieldIdx) + + ty := field.Type + isSlice := false + isPtr := false + if ty.Kind() == reflect.Slice { + isSlice = true + ty = ty.Elem() + } + if ty.Kind() == reflect.Ptr { + isPtr = true + ty = ty.Elem() + } + + if len(blocks) > 1 && !isSlice { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", typeName), + Detail: fmt.Sprintf( + "Only one %s block is allowed. Another was defined at %s.", + typeName, blocks[0].DefRange.String(), + ), + Subject: &blocks[1].DefRange, + }) + continue + } + + if len(blocks) == 0 { + if isSlice || isPtr { + val.Field(fieldIdx).Set(reflect.Zero(field.Type)) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", typeName), + Detail: fmt.Sprintf("A %s block is required.", typeName), + Subject: body.MissingItemRange().Ptr(), + }) + } + continue + } + + switch { + + case isSlice: + elemType := ty + if isPtr { + elemType = reflect.PtrTo(ty) + } + sli := reflect.MakeSlice(reflect.SliceOf(elemType), len(blocks), len(blocks)) + + for i, block := range blocks { + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + sli.Index(i).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, sli.Index(i))...) + } + } + + val.Field(fieldIdx).Set(sli) + + default: + block := blocks[0] + if isPtr { + v := reflect.New(ty) + diags = append(diags, decodeBlockToValue(block, ctx, v.Elem())...) + val.Field(fieldIdx).Set(v) + } else { + diags = append(diags, decodeBlockToValue(block, ctx, val.Field(fieldIdx))...) + } + + } + + } + + return diags +} + +func decodeBodyToMap(body hcl.Body, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + attrs, diags := body.JustAttributes() + if attrs == nil { + return diags + } + + mv := reflect.MakeMap(v.Type()) + + for k, attr := range attrs { + switch { + case attrType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr)) + case exprType.AssignableTo(v.Type().Elem()): + mv.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(attr.Expr)) + default: + ev := reflect.New(v.Type().Elem()) + diags = append(diags, DecodeExpression(attr.Expr, ctx, ev.Interface())...) + mv.SetMapIndex(reflect.ValueOf(k), ev.Elem()) + } + } + + v.Set(mv) + + return diags +} + +func decodeBlockToValue(block *hcl.Block, ctx *hcl.EvalContext, v reflect.Value) hcl.Diagnostics { + var diags hcl.Diagnostics + + ty := v.Type() + + switch { + case blockType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block)) + case bodyType.AssignableTo(ty): + v.Elem().Set(reflect.ValueOf(block.Body)) + case attrsType.AssignableTo(ty): + attrs, attrsDiags := block.Body.JustAttributes() + if len(attrsDiags) > 0 { + diags = append(diags, attrsDiags...) + } + v.Elem().Set(reflect.ValueOf(attrs)) + default: + diags = append(diags, decodeBodyToValue(block.Body, ctx, v)...) + + if len(block.Labels) > 0 { + blockTags := getFieldTags(ty) + for li, lv := range block.Labels { + lfieldIdx := blockTags.Labels[li].FieldIndex + v.Field(lfieldIdx).Set(reflect.ValueOf(lv)) + } + } + + } + + return diags +} + +// DecodeExpression extracts the value of the given expression into the given +// value. This value must be something that gocty is able to decode into, +// since the final decoding is delegated to that package. +// +// The given EvalContext is used to resolve any variables or functions in +// expressions encountered while decoding. This may be nil to require only +// constant values, for simple applications that do not support variables or +// functions. +// +// The returned diagnostics should be inspected with its HasErrors method to +// determine if the populated value is valid and complete. If error diagnostics +// are returned then the given value may have been partially-populated but +// may still be accessed by a careful caller for static analysis and editor +// integration use-cases. +func DecodeExpression(expr hcl.Expression, ctx *hcl.EvalContext, val interface{}) hcl.Diagnostics { + srcVal, diags := expr.Value(ctx) + + convTy, err := gocty.ImpliedType(val) + if err != nil { + panic(fmt.Sprintf("unsuitable DecodeExpression target: %s", err)) + } + + srcVal, err = convert.Convert(srcVal, convTy) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + return diags + } + + err = gocty.FromCtyValue(srcVal, val) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsuitable value type", + Detail: fmt.Sprintf("Unsuitable value: %s", err.Error()), + Subject: expr.StartRange().Ptr(), + Context: expr.Range().Ptr(), + }) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go new file mode 100644 index 00000000..aa3c6ea9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go @@ -0,0 +1,53 @@ +// Package gohcl allows decoding HCL configurations into Go data structures. +// +// It provides a convenient and concise way of describing the schema for +// configuration and then accessing the resulting data via native Go +// types. +// +// A struct field tag scheme is used, similar to other decoding and +// unmarshalling libraries. The tags are formatted as in the following example: +// +// ThingType string `hcl:"thing_type,attr"` +// +// Within each tag there are two comma-separated tokens. The first is the +// name of the corresponding construct in configuration, while the second +// is a keyword giving the kind of construct expected. The following +// kind keywords are supported: +// +// attr (the default) indicates that the value is to be populated from an attribute +// block indicates that the value is to populated from a block +// label indicates that the value is to populated from a block label +// remain indicates that the value is to be populated from the remaining body after populating other fields +// +// "attr" fields may either be of type *hcl.Expression, in which case the raw +// expression is assigned, or of any type accepted by gocty, in which case +// gocty will be used to assign the value to a native Go type. +// +// "block" fields may be of type *hcl.Block or hcl.Body, in which case the +// corresponding raw value is assigned, or may be a struct that recursively +// uses the same tags. Block fields may also be slices of any of these types, +// in which case multiple blocks of the corresponding type are decoded into +// the slice. +// +// "label" fields are considered only in a struct used as the type of a field +// marked as "block", and are used sequentially to capture the labels of +// the blocks being decoded. In this case, the name token is used only as +// an identifier for the label in diagnostic messages. +// +// "remain" can be placed on a single field that may be either of type +// hcl.Body or hcl.Attributes, in which case any remaining body content is +// placed into this field for delayed processing. If no "remain" field is +// present then any attributes or blocks not matched by another valid tag +// will cause an error diagnostic. +// +// Only a subset of this tagging/typing vocabulary is supported for the +// "Encode" family of functions. See the EncodeIntoBody docs for full details +// on the constraints there. +// +// Broadly-speaking this package deals with two types of error. The first is +// errors in the configuration itself, which are returned as diagnostics +// written with the configuration author as the target audience. The second +// is bugs in the calling program, such as invalid struct tags, which are +// surfaced via panics since there can be no useful runtime handling of such +// errors and they should certainly not be returned to the user as diagnostics. +package gohcl diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/encode.go b/vendor/github.com/hashicorp/hcl2/gohcl/encode.go new file mode 100644 index 00000000..3cbf7e48 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/encode.go @@ -0,0 +1,191 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + + "github.com/hashicorp/hcl2/hclwrite" + "github.com/zclconf/go-cty/cty/gocty" +) + +// EncodeIntoBody replaces the contents of the given hclwrite Body with +// attributes and blocks derived from the given value, which must be a +// struct value or a pointer to a struct value with the struct tags defined +// in this package. +// +// This function can work only with fully-decoded data. It will ignore any +// fields tagged as "remain", any fields that decode attributes into either +// hcl.Attribute or hcl.Expression values, and any fields that decode blocks +// into hcl.Attributes values. This function does not have enough information +// to complete the decoding of these types. +// +// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock +// to produce a whole hclwrite.Block including block labels. +// +// As long as a suitable value is given to encode and the destination body +// is non-nil, this function will always complete. It will panic in case of +// any errors in the calling program, such as passing an inappropriate type +// or a nil body. +// +// The layout of the resulting HCL source is derived from the ordering of +// the struct fields, with blank lines around nested blocks of different types. +// Fields representing attributes should usually precede those representing +// blocks so that the attributes can group togather in the result. For more +// control, use the hclwrite API directly. +func EncodeIntoBody(val interface{}, dst *hclwrite.Body) { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + populateBody(rv, ty, tags, dst) +} + +// EncodeAsBlock creates a new hclwrite.Block populated with the data from +// the given value, which must be a struct or pointer to struct with the +// struct tags defined in this package. +// +// If the given struct type has fields tagged with "label" tags then they +// will be used in order to annotate the created block with labels. +// +// This function has the same constraints as EncodeIntoBody and will panic +// if they are violated. +func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block { + rv := reflect.ValueOf(val) + ty := rv.Type() + if ty.Kind() == reflect.Ptr { + rv = rv.Elem() + ty = rv.Type() + } + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("value is %s, not struct", ty.Kind())) + } + + tags := getFieldTags(ty) + labels := make([]string, len(tags.Labels)) + for i, lf := range tags.Labels { + lv := rv.Field(lf.FieldIndex) + // We just stringify whatever we find. It should always be a string + // but if not then we'll still do something reasonable. + labels[i] = fmt.Sprintf("%s", lv.Interface()) + } + + block := hclwrite.NewBlock(blockType, labels) + populateBody(rv, ty, tags, block.Body()) + return block +} + +func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) { + nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks)) + namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks)) + for n, i := range tags.Attributes { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + for n, i := range tags.Blocks { + nameIdxs[n] = i + namesOrder = append(namesOrder, n) + } + sort.SliceStable(namesOrder, func(i, j int) bool { + ni, nj := namesOrder[i], namesOrder[j] + return nameIdxs[ni] < nameIdxs[nj] + }) + + dst.Clear() + + prevWasBlock := false + for _, name := range namesOrder { + fieldIdx := nameIdxs[name] + field := ty.Field(fieldIdx) + fieldTy := field.Type + fieldVal := rv.Field(fieldIdx) + + if fieldTy.Kind() == reflect.Ptr { + fieldTy = fieldTy.Elem() + fieldVal = fieldVal.Elem() + } + + if _, isAttr := tags.Attributes[name]; isAttr { + + if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) { + continue // ignore undecoded fields + } + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + if prevWasBlock { + dst.AppendNewline() + prevWasBlock = false + } + + valTy, err := gocty.ImpliedType(fieldVal.Interface()) + if err != nil { + panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err)) + } + + val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy) + if err != nil { + // This should never happen, since we should always be able + // to decode into the implied type. + panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err)) + } + + dst.SetAttributeValue(name, val) + + } else { // must be a block, then + elemTy := fieldTy + isSeq := false + if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array { + isSeq = true + elemTy = elemTy.Elem() + } + + if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) { + continue // ignore undecoded fields + } + prevWasBlock = false + + if isSeq { + l := fieldVal.Len() + for i := 0; i < l; i++ { + elemVal := fieldVal.Index(i) + if !elemVal.IsValid() { + continue // ignore (elem value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(elemVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } else { + if !fieldVal.IsValid() { + continue // ignore (field value is nil pointer) + } + if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() { + continue // ignore + } + block := EncodeAsBlock(fieldVal.Interface(), name) + if !prevWasBlock { + dst.AppendNewline() + prevWasBlock = true + } + dst.AppendBlock(block) + } + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/schema.go b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go new file mode 100644 index 00000000..88164cb0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/schema.go @@ -0,0 +1,174 @@ +package gohcl + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedBodySchema produces a hcl.BodySchema derived from the type of the +// given value, which must be a struct value or a pointer to one. If an +// inappropriate value is passed, this function will panic. +// +// The second return argument indicates whether the given struct includes +// a "remain" field, and thus the returned schema is non-exhaustive. +// +// This uses the tags on the fields of the struct to discover how each +// field's value should be expressed within configuration. If an invalid +// mapping is attempted, this function will panic. +func ImpliedBodySchema(val interface{}) (schema *hcl.BodySchema, partial bool) { + ty := reflect.TypeOf(val) + + if ty.Kind() == reflect.Ptr { + ty = ty.Elem() + } + + if ty.Kind() != reflect.Struct { + panic(fmt.Sprintf("given value must be struct, not %T", val)) + } + + var attrSchemas []hcl.AttributeSchema + var blockSchemas []hcl.BlockHeaderSchema + + tags := getFieldTags(ty) + + attrNames := make([]string, 0, len(tags.Attributes)) + for n := range tags.Attributes { + attrNames = append(attrNames, n) + } + sort.Strings(attrNames) + for _, n := range attrNames { + idx := tags.Attributes[n] + optional := tags.Optional[n] + field := ty.Field(idx) + + var required bool + + switch { + case field.Type.AssignableTo(exprType): + // If we're decoding to hcl.Expression then absense can be + // indicated via a null value, so we don't specify that + // the field is required during decoding. + required = false + case field.Type.Kind() != reflect.Ptr && !optional: + required = true + default: + required = false + } + + attrSchemas = append(attrSchemas, hcl.AttributeSchema{ + Name: n, + Required: required, + }) + } + + blockNames := make([]string, 0, len(tags.Blocks)) + for n := range tags.Blocks { + blockNames = append(blockNames, n) + } + sort.Strings(blockNames) + for _, n := range blockNames { + idx := tags.Blocks[n] + field := ty.Field(idx) + fty := field.Type + if fty.Kind() == reflect.Slice { + fty = fty.Elem() + } + if fty.Kind() == reflect.Ptr { + fty = fty.Elem() + } + if fty.Kind() != reflect.Struct { + panic(fmt.Sprintf( + "hcl 'block' tag kind cannot be applied to %s field %s: struct required", field.Type.String(), field.Name, + )) + } + ftags := getFieldTags(fty) + var labelNames []string + if len(ftags.Labels) > 0 { + labelNames = make([]string, len(ftags.Labels)) + for i, l := range ftags.Labels { + labelNames[i] = l.Name + } + } + + blockSchemas = append(blockSchemas, hcl.BlockHeaderSchema{ + Type: n, + LabelNames: labelNames, + }) + } + + partial = tags.Remain != nil + schema = &hcl.BodySchema{ + Attributes: attrSchemas, + Blocks: blockSchemas, + } + return schema, partial +} + +type fieldTags struct { + Attributes map[string]int + Blocks map[string]int + Labels []labelField + Remain *int + Optional map[string]bool +} + +type labelField struct { + FieldIndex int + Name string +} + +func getFieldTags(ty reflect.Type) *fieldTags { + ret := &fieldTags{ + Attributes: map[string]int{}, + Blocks: map[string]int{}, + Optional: map[string]bool{}, + } + + ct := ty.NumField() + for i := 0; i < ct; i++ { + field := ty.Field(i) + tag := field.Tag.Get("hcl") + if tag == "" { + continue + } + + comma := strings.Index(tag, ",") + var name, kind string + if comma != -1 { + name = tag[:comma] + kind = tag[comma+1:] + } else { + name = tag + kind = "attr" + } + + switch kind { + case "attr": + ret.Attributes[name] = i + case "block": + ret.Blocks[name] = i + case "label": + ret.Labels = append(ret.Labels, labelField{ + FieldIndex: i, + Name: name, + }) + case "remain": + if ret.Remain != nil { + panic("only one 'remain' tag is permitted") + } + idx := i // copy, because this loop will continue assigning to i + ret.Remain = &idx + case "optional": + ret.Attributes[name] = i + ret.Optional[name] = true + default: + panic(fmt.Sprintf("invalid hcl field tag kind %q on %s %q", kind, field.Type.String(), field.Name)) + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/types.go b/vendor/github.com/hashicorp/hcl2/gohcl/types.go new file mode 100644 index 00000000..a94f275a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/gohcl/types.go @@ -0,0 +1,16 @@ +package gohcl + +import ( + "reflect" + + "github.com/hashicorp/hcl2/hcl" +) + +var victimExpr hcl.Expression +var victimBody hcl.Body + +var exprType = reflect.TypeOf(&victimExpr).Elem() +var bodyType = reflect.TypeOf(&victimBody).Elem() +var blockType = reflect.TypeOf((*hcl.Block)(nil)) +var attrType = reflect.TypeOf((*hcl.Attribute)(nil)) +var attrsType = reflect.TypeOf(hcl.Attributes(nil)) diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go new file mode 100644 index 00000000..c320961e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go @@ -0,0 +1,143 @@ +package hcl + +import ( + "fmt" +) + +// DiagnosticSeverity represents the severity of a diagnostic. +type DiagnosticSeverity int + +const ( + // DiagInvalid is the invalid zero value of DiagnosticSeverity + DiagInvalid DiagnosticSeverity = iota + + // DiagError indicates that the problem reported by a diagnostic prevents + // further progress in parsing and/or evaluating the subject. + DiagError + + // DiagWarning indicates that the problem reported by a diagnostic warrants + // user attention but does not prevent further progress. It is most + // commonly used for showing deprecation notices. + DiagWarning +) + +// Diagnostic represents information to be presented to a user about an +// error or anomoly in parsing or evaluating configuration. +type Diagnostic struct { + Severity DiagnosticSeverity + + // Summary and Detail contain the English-language description of the + // problem. Summary is a terse description of the general problem and + // detail is a more elaborate, often-multi-sentence description of + // the probem and what might be done to solve it. + Summary string + Detail string + + // Subject and Context are both source ranges relating to the diagnostic. + // + // Subject is a tight range referring to exactly the construct that + // is problematic, while Context is an optional broader range (which should + // fully contain Subject) that ought to be shown around Subject when + // generating isolated source-code snippets in diagnostic messages. + // If Context is nil, the Subject is also the Context. + // + // Some diagnostics have no source ranges at all. If Context is set then + // Subject should always also be set. + Subject *Range + Context *Range + + // For diagnostics that occur when evaluating an expression, Expression + // may refer to that expression and EvalContext may point to the + // EvalContext that was active when evaluating it. This may allow for the + // inclusion of additional useful information when rendering a diagnostic + // message to the user. + // + // It is not always possible to select a single EvalContext for a + // diagnostic, and so in some cases this field may be nil even when an + // expression causes a problem. + // + // EvalContexts form a tree, so the given EvalContext may refer to a parent + // which in turn refers to another parent, etc. For a full picture of all + // of the active variables and functions the caller must walk up this + // chain, preferring definitions that are "closer" to the expression in + // case of colliding names. + Expression Expression + EvalContext *EvalContext +} + +// Diagnostics is a list of Diagnostic instances. +type Diagnostics []*Diagnostic + +// error implementation, so that diagnostics can be returned via APIs +// that normally deal in vanilla Go errors. +// +// This presents only minimal context about the error, for compatibility +// with usual expectations about how errors will present as strings. +func (d *Diagnostic) Error() string { + return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail) +} + +// error implementation, so that sets of diagnostics can be returned via +// APIs that normally deal in vanilla Go errors. +func (d Diagnostics) Error() string { + count := len(d) + switch { + case count == 0: + return "no diagnostics" + case count == 1: + return d[0].Error() + default: + return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1) + } +} + +// Append appends a new error to a Diagnostics and return the whole Diagnostics. +// +// This is provided as a convenience for returning from a function that +// collects and then returns a set of diagnostics: +// +// return nil, diags.Append(&hcl.Diagnostic{ ... }) +// +// Note that this modifies the array underlying the diagnostics slice, so +// must be used carefully within a single codepath. It is incorrect (and rude) +// to extend a diagnostics created by a different subsystem. +func (d Diagnostics) Append(diag *Diagnostic) Diagnostics { + return append(d, diag) +} + +// Extend concatenates the given Diagnostics with the receiver and returns +// the whole new Diagnostics. +// +// This is similar to Append but accepts multiple diagnostics to add. It has +// all the same caveats and constraints. +func (d Diagnostics) Extend(diags Diagnostics) Diagnostics { + return append(d, diags...) +} + +// HasErrors returns true if the receiver contains any diagnostics of +// severity DiagError. +func (d Diagnostics) HasErrors() bool { + for _, diag := range d { + if diag.Severity == DiagError { + return true + } + } + return false +} + +func (d Diagnostics) Errs() []error { + var errs []error + for _, diag := range d { + if diag.Severity == DiagError { + errs = append(errs, diag) + } + } + + return errs +} + +// A DiagnosticWriter emits diagnostics somehow. +type DiagnosticWriter interface { + WriteDiagnostic(*Diagnostic) error + WriteDiagnostics(Diagnostics) error +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go new file mode 100644 index 00000000..0b4a2629 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go @@ -0,0 +1,311 @@ +package hcl + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "sort" + + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/zclconf/go-cty/cty" +) + +type diagnosticTextWriter struct { + files map[string]*File + wr io.Writer + width uint + color bool +} + +// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics +// to the given writer as formatted text. +// +// It is designed to produce text appropriate to print in a monospaced font +// in a terminal of a particular width, or optionally with no width limit. +// +// The given width may be zero to disable word-wrapping of the detail text +// and truncation of source code snippets. +// +// If color is set to true, the output will include VT100 escape sequences to +// color-code the severity indicators. It is suggested to turn this off if +// the target writer is not a terminal. +func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter { + return &diagnosticTextWriter{ + files: files, + wr: wr, + width: width, + color: color, + } +} + +func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { + if diag == nil { + return errors.New("nil diagnostic") + } + + var colorCode, highlightCode, resetCode string + if w.color { + switch diag.Severity { + case DiagError: + colorCode = "\x1b[31m" + case DiagWarning: + colorCode = "\x1b[33m" + } + resetCode = "\x1b[0m" + highlightCode = "\x1b[1;4m" + } + + var severityStr string + switch diag.Severity { + case DiagError: + severityStr = "Error" + case DiagWarning: + severityStr = "Warning" + default: + // should never happen + severityStr = "???????" + } + + fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary) + + if diag.Subject != nil { + snipRange := *diag.Subject + highlightRange := snipRange + if diag.Context != nil { + // Show enough of the source code to include both the subject + // and context ranges, which overlap in all reasonable + // situations. + snipRange = RangeOver(snipRange, *diag.Context) + } + // We can't illustrate an empty range, so we'll turn such ranges into + // single-character ranges, which might not be totally valid (may point + // off the end of a line, or off the end of the file) but are good + // enough for the bounds checks we do below. + if snipRange.Empty() { + snipRange.End.Byte++ + snipRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + file := w.files[diag.Subject.Filename] + if file == nil || file.Bytes == nil { + fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line) + } else { + + var contextLine string + if diag.Subject != nil { + contextLine = contextString(file, diag.Subject.Start.Byte) + if contextLine != "" { + contextLine = ", in " + contextLine + } + } + + fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine) + + src := file.Bytes + sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines) + + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snipRange) { + continue + } + + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + if highlightedRange.Empty() { + fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes()) + } else { + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + w.wr, "%4d: %s%s%s%s%s\n", + lineRange.Start.Line, + before, + highlightCode, highlighted, resetCode, + after, + ) + } + + } + + w.wr.Write([]byte{'\n'}) + } + + if diag.Expression != nil && diag.EvalContext != nil { + // We will attempt to render the values for any variables + // referenced in the given expression as additional context, for + // situations where the same expression is evaluated multiple + // times in different scopes. + expr := diag.Expression + ctx := diag.EvalContext + + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + for _, traversal := range vars { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + continue + } + + traversalStr := w.traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue // don't show duplicates when the same variable is referenced multiple times + } + switch { + case !val.IsKnown(): + // Can't say anything about this yet, then. + continue + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val))) + } + seen[traversalStr] = struct{}{} + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + last := len(stmts) - 1 + + for i, stmt := range stmts { + switch i { + case 0: + w.wr.Write([]byte{'w', 'i', 't', 'h', ' '}) + default: + w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '}) + } + w.wr.Write([]byte(stmt)) + switch i { + case last: + w.wr.Write([]byte{'.', '\n', '\n'}) + default: + w.wr.Write([]byte{',', '\n'}) + } + } + } + } + + if diag.Detail != "" { + detail := diag.Detail + if w.width != 0 { + detail = wordwrap.WrapString(detail, w.width) + } + fmt.Fprintf(w.wr, "%s\n\n", detail) + } + + return nil +} + +func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error { + for _, diag := range diags { + err := w.WriteDiagnostic(diag) + if err != nil { + return err + } + } + return nil +} + +func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case TraverseRoot: + buf.WriteString(tStep.Name) + case TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(w.valueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +func (w *diagnosticTextWriter) valueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} + +func contextString(file *File, offset int) string { + type contextStringer interface { + ContextString(offset int) string + } + + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go new file mode 100644 index 00000000..c1283344 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/didyoumean.go @@ -0,0 +1,24 @@ +package hcl + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/doc.go new file mode 100644 index 00000000..01318c96 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/doc.go @@ -0,0 +1 @@ +package hcl diff --git a/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go new file mode 100644 index 00000000..915910ad --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/eval_context.go @@ -0,0 +1,25 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// An EvalContext provides the variables and functions that should be used +// to evaluate an expression. +type EvalContext struct { + Variables map[string]cty.Value + Functions map[string]function.Function + parent *EvalContext +} + +// NewChild returns a new EvalContext that is a child of the receiver. +func (ctx *EvalContext) NewChild() *EvalContext { + return &EvalContext{parent: ctx} +} + +// Parent returns the parent of the receiver, or nil if the receiver has +// no parent. +func (ctx *EvalContext) Parent() *EvalContext { + return ctx.parent +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go new file mode 100644 index 00000000..6963fbae --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_call.go @@ -0,0 +1,46 @@ +package hcl + +// ExprCall tests if the given expression is a function call and, +// if so, extracts the function name and the expressions that represent +// the arguments. If the given expression is not statically a function call, +// error diagnostics are returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprCall that takes no arguments and returns +// *StaticCall. This method should return nil if a static call cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprCall(expr Expression) (*StaticCall, Diagnostics) { + type exprCall interface { + ExprCall() *StaticCall + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprCall) + return supported + }) + + if exC, supported := physExpr.(exprCall); supported { + if call := exC.ExprCall(); call != nil { + return call, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static function call is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// StaticCall represents a function call that was extracted statically from +// an expression using ExprCall. +type StaticCall struct { + Name string + NameRange Range + Arguments []Expression + ArgsRange Range +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go new file mode 100644 index 00000000..d05cca0b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go @@ -0,0 +1,37 @@ +package hcl + +// ExprList tests if the given expression is a static list construct and, +// if so, extracts the expressions that represent the list elements. +// If the given expression is not a static list, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprList that takes no arguments and returns +// []Expression. This method should return nil if a static list cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprList(expr Expression) ([]Expression, Diagnostics) { + type exprList interface { + ExprList() []Expression + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprList) + return supported + }) + + if exL, supported := physExpr.(exprList); supported { + if list := exL.ExprList(); list != nil { + return list, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static list expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go new file mode 100644 index 00000000..96d1ce4b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_map.go @@ -0,0 +1,44 @@ +package hcl + +// ExprMap tests if the given expression is a static map construct and, +// if so, extracts the expressions that represent the map elements. +// If the given expression is not a static map, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprMap that takes no arguments and returns +// []KeyValuePair. This method should return nil if a static map cannot +// be extracted. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) { + type exprMap interface { + ExprMap() []KeyValuePair + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(exprMap) + return supported + }) + + if exM, supported := physExpr.(exprMap); supported { + if pairs := exM.ExprMap(); pairs != nil { + return pairs, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static map expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} + +// KeyValuePair represents a pair of expressions that serve as a single item +// within a map or object definition construct. +type KeyValuePair struct { + Key Expression + Value Expression +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go new file mode 100644 index 00000000..6d5d205c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_unwrap.go @@ -0,0 +1,68 @@ +package hcl + +type unwrapExpression interface { + UnwrapExpression() Expression +} + +// UnwrapExpression removes any "wrapper" expressions from the given expression, +// to recover the representation of the physical expression given in source +// code. +// +// Sometimes wrapping expressions are used to modify expression behavior, e.g. +// in extensions that need to make some local variables available to certain +// sub-trees of the configuration. This can make it difficult to reliably +// type-assert on the physical AST types used by the underlying syntax. +// +// Unwrapping an expression may modify its behavior by stripping away any +// additional constraints or capabilities being applied to the Value and +// Variables methods, so this function should generally only be used prior +// to operations that concern themselves with the static syntax of the input +// configuration, and not with the effective value of the expression. +// +// Wrapper expression types must support unwrapping by implementing a method +// called UnwrapExpression that takes no arguments and returns the embedded +// Expression. Implementations of this method should peel away only one level +// of wrapping, if multiple are present. This method may return nil to +// indicate _dynamically_ that no wrapped expression is available, for +// expression types that might only behave as wrappers in certain cases. +func UnwrapExpression(expr Expression) Expression { + for { + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return expr + } + innerExpr := unwrap.UnwrapExpression() + if innerExpr == nil { + return expr + } + expr = innerExpr + } +} + +// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the +// caller an opportunity to test each level of unwrapping to see each a +// particular expression is accepted. +// +// This could be used, for example, to unwrap until a particular other +// interface is satisfied, regardless of wrap wrapping level it is satisfied +// at. +// +// The given callback function must return false to continue wrapping, or +// true to accept and return the proposed expression given. If the callback +// function rejects even the final, physical expression then the result of +// this function is nil. +func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression { + for { + if until(expr) { + return expr + } + unwrap, wrapped := expr.(unwrapExpression) + if !wrapped { + return nil + } + expr = unwrap.UnwrapExpression() + if expr == nil { + return nil + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go new file mode 100644 index 00000000..94eaf589 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go @@ -0,0 +1,23 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// setDiagEvalContext is an internal helper that will impose a particular +// EvalContext on a set of diagnostics in-place, for any diagnostic that +// does not already have an EvalContext set. +// +// We generally expect diagnostics to be immutable, but this is safe to use +// on any Diagnostics where none of the contained Diagnostic objects have yet +// been seen by a caller. Its purpose is to apply additional context to a +// set of diagnostics produced by a "deeper" component as the stack unwinds +// during expression evaluation. +func setDiagEvalContext(diags hcl.Diagnostics, expr hcl.Expression, ctx *hcl.EvalContext) { + for _, diag := range diags { + if diag.Expression == nil { + diag.Expression = expr + diag.EvalContext = ctx + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go new file mode 100644 index 00000000..ccc1c0ae --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/didyoumean.go @@ -0,0 +1,24 @@ +package hclsyntax + +import ( + "github.com/agext/levenshtein" +) + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go new file mode 100644 index 00000000..617bc29d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/doc.go @@ -0,0 +1,7 @@ +// Package hclsyntax contains the parser, AST, etc for HCL's native language, +// as opposed to the JSON variant. +// +// In normal use applications should rarely depend on this package directly, +// instead preferring the higher-level interface of the main hcl package and +// its companion package hclparse. +package hclsyntax diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go new file mode 100644 index 00000000..b9bd6ace --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go @@ -0,0 +1,1459 @@ +package hclsyntax + +import ( + "fmt" + "sync" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// Expression is the abstract type for nodes that behave as HCL expressions. +type Expression interface { + Node + + // The hcl.Expression methods are duplicated here, rather than simply + // embedded, because both Node and hcl.Expression have a Range method + // and so they conflict. + + Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + Variables() []hcl.Traversal + StartRange() hcl.Range +} + +// Assert that Expression implements hcl.Expression +var assertExprImplExpr hcl.Expression = Expression(nil) + +// LiteralValueExpr is an expression that just always returns a given value. +type LiteralValueExpr struct { + Val cty.Value + SrcRange hcl.Range +} + +func (e *LiteralValueExpr) walkChildNodes(w internalWalkFunc) { + // Literal values have no child nodes +} + +func (e *LiteralValueExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Val, nil +} + +func (e *LiteralValueExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *LiteralValueExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *LiteralValueExpr) AsTraversal() hcl.Traversal { + // This one's a little weird: the contract for AsTraversal is to interpret + // an expression as if it were traversal syntax, and traversal syntax + // doesn't have the special keywords "null", "true", and "false" so these + // are expected to be treated like variables in that case. + // Since our parser already turned them into LiteralValueExpr by the time + // we get here, we need to undo this and infer the name that would've + // originally led to our value. + // We don't do anything for any other values, since they don't overlap + // with traversal roots. + + if e.Val.IsNull() { + // In practice the parser only generates null values of the dynamic + // pseudo-type for literals, so we can safely assume that any null + // was orignally the keyword "null". + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "null", + SrcRange: e.SrcRange, + }, + } + } + + switch e.Val { + case cty.True: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "true", + SrcRange: e.SrcRange, + }, + } + case cty.False: + return hcl.Traversal{ + hcl.TraverseRoot{ + Name: "false", + SrcRange: e.SrcRange, + }, + } + default: + // No traversal is possible for any other value. + return nil + } +} + +// ScopeTraversalExpr is an Expression that retrieves a value from the scope +// using a traversal. +type ScopeTraversalExpr struct { + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) { + // Scope traversals have no child nodes +} + +func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := e.Traversal.TraverseAbs(ctx) + setDiagEvalContext(diags, e, ctx) + return val, diags +} + +func (e *ScopeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ScopeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal { + return e.Traversal +} + +// RelativeTraversalExpr is an Expression that retrieves a value from another +// value using a _relative_ traversal. +type RelativeTraversalExpr struct { + Source Expression + Traversal hcl.Traversal + SrcRange hcl.Range +} + +func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) { + w(e.Source) +} + +func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + src, diags := e.Source.Value(ctx) + ret, travDiags := e.Traversal.TraverseRel(src) + setDiagEvalContext(travDiags, e, ctx) + diags = append(diags, travDiags...) + return ret, diags +} + +func (e *RelativeTraversalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *RelativeTraversalExpr) StartRange() hcl.Range { + return e.SrcRange +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *RelativeTraversalExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our source can. + st, diags := hcl.AbsTraversalForExpr(e.Source) + if diags.HasErrors() { + return nil + } + + ret := make(hcl.Traversal, len(st)+len(e.Traversal)) + copy(ret, st) + copy(ret[len(st):], e.Traversal) + return ret +} + +// FunctionCallExpr is an Expression that calls a function from the EvalContext +// and returns its result. +type FunctionCallExpr struct { + Name string + Args []Expression + + // If true, the final argument should be a tuple, list or set which will + // expand to be one argument per element. + ExpandFinal bool + + NameRange hcl.Range + OpenParenRange hcl.Range + CloseParenRange hcl.Range +} + +func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) { + for _, arg := range e.Args { + w(arg) + } +} + +func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var f function.Function + exists := false + hasNonNilMap := false + thisCtx := ctx + for thisCtx != nil { + if thisCtx.Functions == nil { + thisCtx = thisCtx.Parent() + continue + } + hasNonNilMap = true + f, exists = thisCtx.Functions[e.Name] + if exists { + break + } + thisCtx = thisCtx.Parent() + } + + if !exists { + if !hasNonNilMap { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Function calls not allowed", + Detail: "Functions may not be called here.", + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + avail := make([]string, 0, len(ctx.Functions)) + for name := range ctx.Functions { + avail = append(avail, name) + } + suggestion := nameSuggestion(e.Name, avail) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Call to unknown function", + Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), + Subject: &e.NameRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + params := f.Params() + varParam := f.VarParam() + + args := e.Args + if e.ExpandFinal { + if len(args) < 1 { + // should never happen if the parser is behaving + panic("ExpandFinal set on function call with no arguments") + } + expandExpr := args[len(args)-1] + expandVal, expandDiags := expandExpr.Value(ctx) + diags = append(diags, expandDiags...) + if expandDiags.HasErrors() { + return cty.DynamicVal, diags + } + + switch { + case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): + if expandVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must not be null.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if !expandVal.IsKnown() { + return cty.DynamicVal, diags + } + + newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt()) + newArgs = append(newArgs, args[:len(args)-1]...) + it := expandVal.ElementIterator() + for it.Next() { + _, val := it.Element() + newArgs = append(newArgs, &LiteralValueExpr{ + Val: val, + SrcRange: expandExpr.Range(), + }) + } + args = newArgs + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expanding argument value", + Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", + Subject: expandExpr.Range().Ptr(), + Context: e.Range().Ptr(), + Expression: expandExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + } + + if len(args) < len(params) { + missing := params[len(args)] + qual := "" + if varParam != nil { + qual = " at least" + } + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Not enough function arguments", + Detail: fmt.Sprintf( + "Function %q expects%s %d argument(s). Missing value for %q.", + e.Name, qual, len(params), missing.Name, + ), + Subject: &e.CloseParenRange, + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + if varParam == nil && len(args) > len(params) { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Too many function arguments", + Detail: fmt.Sprintf( + "Function %q expects only %d argument(s).", + e.Name, len(params), + ), + Subject: args[len(params)].StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }, + } + } + + argVals := make([]cty.Value, len(args)) + + for i, argExpr := range args { + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + + val, argDiags := argExpr.Value(ctx) + if len(argDiags) > 0 { + diags = append(diags, argDiags...) + } + + // Try to convert our value to the parameter type + val, err := convert.Convert(val, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + } + + argVals[i] = val + } + + if diags.HasErrors() { + // Don't try to execute the function if we already have errors with + // the arguments, because the result will probably be a confusing + // error message. + return cty.DynamicVal, diags + } + + resultVal, err := f.Call(argVals) + if err != nil { + switch terr := err.(type) { + case function.ArgError: + i := terr.Index + var param *function.Parameter + if i < len(params) { + param = ¶ms[i] + } else { + param = varParam + } + argExpr := e.Args[i] + + // TODO: we should also unpick a PathError here and show the + // path to the deep value where the error was detected. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid function argument", + Detail: fmt.Sprintf( + "Invalid value for %q parameter: %s.", + param.Name, err, + ), + Subject: argExpr.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: argExpr, + EvalContext: ctx, + }) + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Error in function call", + Detail: fmt.Sprintf( + "Call to function %q failed: %s.", + e.Name, err, + ), + Subject: e.StartRange().Ptr(), + Context: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + } + + return cty.DynamicVal, diags + } + + return resultVal, diags +} + +func (e *FunctionCallExpr) Range() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.CloseParenRange) +} + +func (e *FunctionCallExpr) StartRange() hcl.Range { + return hcl.RangeBetween(e.NameRange, e.OpenParenRange) +} + +// Implementation for hcl.ExprCall. +func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall { + ret := &hcl.StaticCall{ + Name: e.Name, + NameRange: e.NameRange, + Arguments: make([]hcl.Expression, len(e.Args)), + ArgsRange: hcl.RangeBetween(e.OpenParenRange, e.CloseParenRange), + } + // Need to convert our own Expression objects into hcl.Expression. + for i, arg := range e.Args { + ret.Arguments[i] = arg + } + return ret +} + +type ConditionalExpr struct { + Condition Expression + TrueResult Expression + FalseResult Expression + + SrcRange hcl.Range +} + +func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) { + w(e.Condition) + w(e.TrueResult) + w(e.FalseResult) +} + +func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + trueResult, trueDiags := e.TrueResult.Value(ctx) + falseResult, falseDiags := e.FalseResult.Value(ctx) + var diags hcl.Diagnostics + + var resultType cty.Type + convs := make([]convert.Conversion, 2) + + switch { + // If either case is a dynamic null value (which would result from a + // literal null in the config), we know that it can convert to the expected + // type of the opposite case, and we don't need to speculatively reduce the + // final result type to DynamicPseudoType. + case trueResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)): + resultType = falseResult.Type() + convs[0] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType) + case falseResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)): + resultType = trueResult.Type() + convs[1] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType) + + default: + // Try to find a type that both results can be converted to. + resultType, convs = convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()}) + } + + if resultType == cty.NilType { + return cty.DynamicVal, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + // FIXME: Need a helper function for showing natural-language type diffs, + // since this will generate some useless messages in some cases, like + // "These expressions are object and object respectively" if the + // object types don't exactly match. + "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", + trueResult.Type().FriendlyName(), falseResult.Type().FriendlyName(), + ), + Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), + Context: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }, + } + } + + condResult, condDiags := e.Condition.Value(ctx) + diags = append(diags, condDiags...) + if condResult.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null condition", + Detail: "The condition value is null. Conditions must either be true or false.", + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Condition, + EvalContext: ctx, + }) + return cty.UnknownVal(resultType), diags + } + if !condResult.IsKnown() { + return cty.UnknownVal(resultType), diags + } + condResult, err := convert.Convert(condResult, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect condition type", + Detail: fmt.Sprintf("The condition expression must be of type bool."), + Subject: e.Condition.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Condition, + EvalContext: ctx, + }) + return cty.UnknownVal(resultType), diags + } + + if condResult.True() { + diags = append(diags, trueDiags...) + if convs[0] != nil { + var err error + trueResult, err = convs[0](trueResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The true result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.TrueResult.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.TrueResult, + EvalContext: ctx, + }) + trueResult = cty.UnknownVal(resultType) + } + } + return trueResult, diags + } else { + diags = append(diags, falseDiags...) + if convs[1] != nil { + var err error + falseResult, err = convs[1](falseResult) + if err != nil { + // Unsafe conversion failed with the concrete result value + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Inconsistent conditional result types", + Detail: fmt.Sprintf( + "The false result value has the wrong type: %s.", + err.Error(), + ), + Subject: e.FalseResult.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.FalseResult, + EvalContext: ctx, + }) + falseResult = cty.UnknownVal(resultType) + } + } + return falseResult, diags + } +} + +func (e *ConditionalExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ConditionalExpr) StartRange() hcl.Range { + return e.Condition.StartRange() +} + +type IndexExpr struct { + Collection Expression + Key Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { + w(e.Collection) + w(e.Key) +} + +func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + coll, collDiags := e.Collection.Value(ctx) + key, keyDiags := e.Key.Value(ctx) + diags = append(diags, collDiags...) + diags = append(diags, keyDiags...) + + val, indexDiags := hcl.Index(coll, key, &e.SrcRange) + setDiagEvalContext(indexDiags, e, ctx) + diags = append(diags, indexDiags...) + return val, diags +} + +func (e *IndexExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *IndexExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type TupleConsExpr struct { + Exprs []Expression + + SrcRange hcl.Range + OpenRange hcl.Range +} + +func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) { + for _, expr := range e.Exprs { + w(expr) + } +} + +func (e *TupleConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals []cty.Value + var diags hcl.Diagnostics + + vals = make([]cty.Value, len(e.Exprs)) + for i, expr := range e.Exprs { + val, valDiags := expr.Value(ctx) + vals[i] = val + diags = append(diags, valDiags...) + } + + return cty.TupleVal(vals), diags +} + +func (e *TupleConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TupleConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprList +func (e *TupleConsExpr) ExprList() []hcl.Expression { + ret := make([]hcl.Expression, len(e.Exprs)) + for i, expr := range e.Exprs { + ret[i] = expr + } + return ret +} + +type ObjectConsExpr struct { + Items []ObjectConsItem + + SrcRange hcl.Range + OpenRange hcl.Range +} + +type ObjectConsItem struct { + KeyExpr Expression + ValueExpr Expression +} + +func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { + for _, item := range e.Items { + w(item.KeyExpr) + w(item.ValueExpr) + } +} + +func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var vals map[string]cty.Value + var diags hcl.Diagnostics + + // This will get set to true if we fail to produce any of our keys, + // either because they are actually unknown or if the evaluation produces + // errors. In all of these case we must return DynamicPseudoType because + // we're unable to know the full set of keys our object has, and thus + // we can't produce a complete value of the intended type. + // + // We still evaluate all of the item keys and values to make sure that we + // get as complete as possible a set of diagnostics. + known := true + + vals = make(map[string]cty.Value, len(e.Items)) + for _, item := range e.Items { + key, keyDiags := item.KeyExpr.Value(ctx) + diags = append(diags, keyDiags...) + + val, valDiags := item.ValueExpr.Value(ctx) + diags = append(diags, valDiags...) + + if keyDiags.HasErrors() { + known = false + continue + } + + if key.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Null value as key", + Detail: "Can't use a null value as a key.", + Subject: item.ValueExpr.Range().Ptr(), + Expression: item.KeyExpr, + EvalContext: ctx, + }) + known = false + continue + } + + var err error + key, err = convert.Convert(key, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect key type", + Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), + Subject: item.KeyExpr.Range().Ptr(), + Expression: item.KeyExpr, + EvalContext: ctx, + }) + known = false + continue + } + + if !key.IsKnown() { + known = false + continue + } + + keyStr := key.AsString() + + vals[keyStr] = val + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.ObjectVal(vals), diags +} + +func (e *ObjectConsExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ObjectConsExpr) StartRange() hcl.Range { + return e.OpenRange +} + +// Implementation for hcl.ExprMap +func (e *ObjectConsExpr) ExprMap() []hcl.KeyValuePair { + ret := make([]hcl.KeyValuePair, len(e.Items)) + for i, item := range e.Items { + ret[i] = hcl.KeyValuePair{ + Key: item.KeyExpr, + Value: item.ValueExpr, + } + } + return ret +} + +// ObjectConsKeyExpr is a special wrapper used only for ObjectConsExpr keys, +// which deals with the special case that a naked identifier in that position +// must be interpreted as a literal string rather than evaluated directly. +type ObjectConsKeyExpr struct { + Wrapped Expression +} + +func (e *ObjectConsKeyExpr) literalName() string { + // This is our logic for deciding whether to behave like a literal string. + // We lean on our AbsTraversalForExpr implementation here, which already + // deals with some awkward cases like the expression being the result + // of the keywords "null", "true" and "false" which we'd want to interpret + // as keys here too. + return hcl.ExprAsKeyword(e.Wrapped) +} + +func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) { + // We only treat our wrapped expression as a real expression if we're + // not going to interpret it as a literal. + if e.literalName() == "" { + w(e.Wrapped) + } +} + +func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // Because we accept a naked identifier as a literal key rather than a + // reference, it's confusing to accept a traversal containing periods + // here since we can't tell if the user intends to create a key with + // periods or actually reference something. To avoid confusing downstream + // errors we'll just prohibit a naked multi-step traversal here and + // require the user to state their intent more clearly. + // (This is handled at evaluation time rather than parse time because + // an application using static analysis _can_ accept a naked multi-step + // traversal here, if desired.) + if travExpr, isTraversal := e.Wrapped.(*ScopeTraversalExpr); isTraversal && len(travExpr.Traversal) > 1 { + var diags hcl.Diagnostics + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Ambiguous attribute key", + Detail: "If this expression is intended to be a reference, wrap it in parentheses. If it's instead intended as a literal name containing periods, wrap it in quotes to create a string literal.", + Subject: e.Range().Ptr(), + }) + return cty.DynamicVal, diags + } + + if ln := e.literalName(); ln != "" { + return cty.StringVal(ln), nil + } + return e.Wrapped.Value(ctx) +} + +func (e *ObjectConsKeyExpr) Range() hcl.Range { + return e.Wrapped.Range() +} + +func (e *ObjectConsKeyExpr) StartRange() hcl.Range { + return e.Wrapped.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *ObjectConsKeyExpr) AsTraversal() hcl.Traversal { + // We can produce a traversal only if our wrappee can. + st, diags := hcl.AbsTraversalForExpr(e.Wrapped) + if diags.HasErrors() { + return nil + } + + return st +} + +func (e *ObjectConsKeyExpr) UnwrapExpression() Expression { + return e.Wrapped +} + +// ForExpr represents iteration constructs: +// +// tuple = [for i, v in list: upper(v) if i > 2] +// object = {for k, v in map: k => upper(v)} +// object_of_tuples = {for v in list: v.key: v...} +type ForExpr struct { + KeyVar string // empty if ignoring the key + ValVar string + + CollExpr Expression + + KeyExpr Expression // nil when producing a tuple + ValExpr Expression + CondExpr Expression // null if no "if" clause is present + + Group bool // set if the ellipsis is used on the value in an object for + + SrcRange hcl.Range + OpenRange hcl.Range + CloseRange hcl.Range +} + +func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + collVal, collDiags := e.CollExpr.Value(ctx) + diags = append(diags, collDiags...) + + if collVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over null value", + Detail: "A null value cannot be used as the collection in a 'for' expression.", + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CollExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if collVal.Type() == cty.DynamicPseudoType { + return cty.DynamicVal, diags + } + if !collVal.CanIterateElements() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Iteration over non-iterable value", + Detail: fmt.Sprintf( + "A value of type %s cannot be used as the collection in a 'for' expression.", + collVal.Type().FriendlyName(), + ), + Subject: e.CollExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CollExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if !collVal.IsKnown() { + return cty.DynamicVal, diags + } + + // Before we start we'll do an early check to see if any CondExpr we've + // been given is of the wrong type. This isn't 100% reliable (it may + // be DynamicVal until real values are given) but it should catch some + // straightforward cases and prevent a barrage of repeated errors. + if e.CondExpr != nil { + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = cty.DynamicVal + } + childCtx.Variables[e.ValVar] = cty.DynamicVal + + result, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if result.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Condition is null", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + _, err := convert.Convert(result, cty.Bool) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + if condDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + if e.KeyExpr != nil { + // Producing an object + var vals map[string]cty.Value + var groupVals map[string][]cty.Value + if e.Group { + groupVals = map[string][]cty.Value{} + } else { + vals = map[string]cty.Value{} + } + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !include.IsKnown() { + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + keyRaw, keyDiags := e.KeyExpr.Value(childCtx) + diags = append(diags, keyDiags...) + if keyRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: "Key expression in 'for' expression must not produce a null value.", + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !keyRaw.IsKnown() { + known = false + continue + } + + key, err := convert.Convert(keyRaw, cty.String) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key", + Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + + if e.Group { + k := key.AsString() + groupVals[k] = append(groupVals[k], val) + } else { + k := key.AsString() + if _, exists := vals[k]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object key", + Detail: fmt.Sprintf( + "Two different items produced the key %q in this 'for' expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", + k, + ), + Subject: e.KeyExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.KeyExpr, + EvalContext: childCtx, + }) + } else { + vals[key.AsString()] = val + } + } + } + + if !known { + return cty.DynamicVal, diags + } + + if e.Group { + vals = map[string]cty.Value{} + for k, gvs := range groupVals { + vals[k] = cty.TupleVal(gvs) + } + } + + return cty.ObjectVal(vals), diags + + } else { + // Producing a tuple + vals := []cty.Value{} + + it := collVal.ElementIterator() + + known := true + for it.Next() { + k, v := it.Element() + childCtx := ctx.NewChild() + childCtx.Variables = map[string]cty.Value{} + if e.KeyVar != "" { + childCtx.Variables[e.KeyVar] = k + } + childCtx.Variables[e.ValVar] = v + + if e.CondExpr != nil { + includeRaw, condDiags := e.CondExpr.Value(childCtx) + diags = append(diags, condDiags...) + if includeRaw.IsNull() { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: "The value of the 'if' clause must not be null.", + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + if !includeRaw.IsKnown() { + // We will eventually return DynamicVal, but we'll continue + // iterating in case there are other diagnostics to gather + // for later elements. + known = false + continue + } + + include, err := convert.Convert(includeRaw, cty.Bool) + if err != nil { + if known { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' condition", + Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), + Subject: e.CondExpr.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.CondExpr, + EvalContext: childCtx, + }) + } + known = false + continue + } + + if include.False() { + // Skip this element + continue + } + } + + val, valDiags := e.ValExpr.Value(childCtx) + diags = append(diags, valDiags...) + vals = append(vals, val) + } + + if !known { + return cty.DynamicVal, diags + } + + return cty.TupleVal(vals), diags + } +} + +func (e *ForExpr) walkChildNodes(w internalWalkFunc) { + w(e.CollExpr) + + scopeNames := map[string]struct{}{} + if e.KeyVar != "" { + scopeNames[e.KeyVar] = struct{}{} + } + if e.ValVar != "" { + scopeNames[e.ValVar] = struct{}{} + } + + if e.KeyExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.KeyExpr, + }) + } + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.ValExpr, + }) + if e.CondExpr != nil { + w(ChildScope{ + LocalNames: scopeNames, + Expr: e.CondExpr, + }) + } +} + +func (e *ForExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *ForExpr) StartRange() hcl.Range { + return e.OpenRange +} + +type SplatExpr struct { + Source Expression + Each Expression + Item *AnonSymbolExpr + + SrcRange hcl.Range + MarkerRange hcl.Range +} + +func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + sourceVal, diags := e.Source.Value(ctx) + if diags.HasErrors() { + // We'll evaluate our "Each" expression here just to see if it + // produces any more diagnostics we can report. Since we're not + // assigning a value to our AnonSymbolExpr here it will return + // DynamicVal, which should short-circuit any use of it. + _, itemDiags := e.Item.Value(ctx) + diags = append(diags, itemDiags...) + return cty.DynamicVal, diags + } + + sourceTy := sourceVal.Type() + if sourceTy == cty.DynamicPseudoType { + // If we don't even know the _type_ of our source value yet then + // we'll need to defer all processing, since we can't decide our + // result type either. + return cty.DynamicVal, diags + } + + // A "special power" of splat expressions is that they can be applied + // both to tuples/lists and to other values, and in the latter case + // the value will be treated as an implicit single-item tuple, or as + // an empty tuple if the value is null. + autoUpgrade := !(sourceTy.IsTupleType() || sourceTy.IsListType() || sourceTy.IsSetType()) + + if sourceVal.IsNull() { + if autoUpgrade { + return cty.EmptyTupleVal, diags + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Splat of null value", + Detail: "Splat expressions (with the * symbol) cannot be applied to null sequences.", + Subject: e.Source.Range().Ptr(), + Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), + Expression: e.Source, + EvalContext: ctx, + }) + return cty.DynamicVal, diags + } + + if autoUpgrade { + sourceVal = cty.TupleVal([]cty.Value{sourceVal}) + sourceTy = sourceVal.Type() + } + + // We'll compute our result type lazily if we need it. In the normal case + // it's inferred automatically from the value we construct. + resultTy := func() (cty.Type, hcl.Diagnostics) { + chiCtx := ctx.NewChild() + var diags hcl.Diagnostics + switch { + case sourceTy.IsListType() || sourceTy.IsSetType(): + ety := sourceTy.ElementType() + e.Item.setValue(chiCtx, cty.UnknownVal(ety)) + val, itemDiags := e.Each.Value(chiCtx) + diags = append(diags, itemDiags...) + e.Item.clearValue(chiCtx) // clean up our temporary value + return cty.List(val.Type()), diags + case sourceTy.IsTupleType(): + etys := sourceTy.TupleElementTypes() + resultTys := make([]cty.Type, 0, len(etys)) + for _, ety := range etys { + e.Item.setValue(chiCtx, cty.UnknownVal(ety)) + val, itemDiags := e.Each.Value(chiCtx) + diags = append(diags, itemDiags...) + e.Item.clearValue(chiCtx) // clean up our temporary value + resultTys = append(resultTys, val.Type()) + } + return cty.Tuple(resultTys), diags + default: + // Should never happen because of our promotion to list above. + return cty.DynamicPseudoType, diags + } + } + + if !sourceVal.IsKnown() { + // We can't produce a known result in this case, but we'll still + // indicate what the result type would be, allowing any downstream type + // checking to proceed. + ty, tyDiags := resultTy() + diags = append(diags, tyDiags...) + return cty.UnknownVal(ty), diags + } + + vals := make([]cty.Value, 0, sourceVal.LengthInt()) + it := sourceVal.ElementIterator() + if ctx == nil { + // we need a context to use our AnonSymbolExpr, so we'll just + // make an empty one here to use as a placeholder. + ctx = ctx.NewChild() + } + isKnown := true + for it.Next() { + _, sourceItem := it.Element() + e.Item.setValue(ctx, sourceItem) + newItem, itemDiags := e.Each.Value(ctx) + diags = append(diags, itemDiags...) + if itemDiags.HasErrors() { + isKnown = false + } + vals = append(vals, newItem) + } + e.Item.clearValue(ctx) // clean up our temporary value + + if !isKnown { + // We'll ingore the resultTy diagnostics in this case since they + // will just be the same errors we saw while iterating above. + ty, _ := resultTy() + return cty.UnknownVal(ty), diags + } + + switch { + case sourceTy.IsListType() || sourceTy.IsSetType(): + if len(vals) == 0 { + ty, tyDiags := resultTy() + diags = append(diags, tyDiags...) + return cty.ListValEmpty(ty.ElementType()), diags + } + return cty.ListVal(vals), diags + default: + return cty.TupleVal(vals), diags + } +} + +func (e *SplatExpr) walkChildNodes(w internalWalkFunc) { + w(e.Source) + w(e.Each) +} + +func (e *SplatExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *SplatExpr) StartRange() hcl.Range { + return e.MarkerRange +} + +// AnonSymbolExpr is used as a placeholder for a value in an expression that +// can be applied dynamically to any value at runtime. +// +// This is a rather odd, synthetic expression. It is used as part of the +// representation of splat expressions as a placeholder for the current item +// being visited in the splat evaluation. +// +// AnonSymbolExpr cannot be evaluated in isolation. If its Value is called +// directly then cty.DynamicVal will be returned. Instead, it is evaluated +// in terms of another node (i.e. a splat expression) which temporarily +// assigns it a value. +type AnonSymbolExpr struct { + SrcRange hcl.Range + + // values and its associated lock are used to isolate concurrent + // evaluations of a symbol from one another. It is the calling application's + // responsibility to ensure that the same splat expression is not evalauted + // concurrently within the _same_ EvalContext, but it is fine and safe to + // do cuncurrent evaluations with distinct EvalContexts. + values map[*hcl.EvalContext]cty.Value + valuesLock sync.RWMutex +} + +func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if ctx == nil { + return cty.DynamicVal, nil + } + + e.valuesLock.RLock() + defer e.valuesLock.RUnlock() + + val, exists := e.values[ctx] + if !exists { + return cty.DynamicVal, nil + } + return val, nil +} + +// setValue sets a temporary local value for the expression when evaluated +// in the given context, which must be non-nil. +func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { + e.valuesLock.Lock() + defer e.valuesLock.Unlock() + + if e.values == nil { + e.values = make(map[*hcl.EvalContext]cty.Value) + } + if ctx == nil { + panic("can't setValue for a nil EvalContext") + } + e.values[ctx] = val +} + +func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) { + e.valuesLock.Lock() + defer e.valuesLock.Unlock() + + if e.values == nil { + return + } + if ctx == nil { + panic("can't clearValue for a nil EvalContext") + } + delete(e.values, ctx) +} + +func (e *AnonSymbolExpr) walkChildNodes(w internalWalkFunc) { + // AnonSymbolExpr is a leaf node in the tree +} + +func (e *AnonSymbolExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *AnonSymbolExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go new file mode 100644 index 00000000..7f59f1a2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go @@ -0,0 +1,268 @@ +package hclsyntax + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" +) + +type Operation struct { + Impl function.Function + Type cty.Type +} + +var ( + OpLogicalOr = &Operation{ + Impl: stdlib.OrFunc, + Type: cty.Bool, + } + OpLogicalAnd = &Operation{ + Impl: stdlib.AndFunc, + Type: cty.Bool, + } + OpLogicalNot = &Operation{ + Impl: stdlib.NotFunc, + Type: cty.Bool, + } + + OpEqual = &Operation{ + Impl: stdlib.EqualFunc, + Type: cty.Bool, + } + OpNotEqual = &Operation{ + Impl: stdlib.NotEqualFunc, + Type: cty.Bool, + } + + OpGreaterThan = &Operation{ + Impl: stdlib.GreaterThanFunc, + Type: cty.Bool, + } + OpGreaterThanOrEqual = &Operation{ + Impl: stdlib.GreaterThanOrEqualToFunc, + Type: cty.Bool, + } + OpLessThan = &Operation{ + Impl: stdlib.LessThanFunc, + Type: cty.Bool, + } + OpLessThanOrEqual = &Operation{ + Impl: stdlib.LessThanOrEqualToFunc, + Type: cty.Bool, + } + + OpAdd = &Operation{ + Impl: stdlib.AddFunc, + Type: cty.Number, + } + OpSubtract = &Operation{ + Impl: stdlib.SubtractFunc, + Type: cty.Number, + } + OpMultiply = &Operation{ + Impl: stdlib.MultiplyFunc, + Type: cty.Number, + } + OpDivide = &Operation{ + Impl: stdlib.DivideFunc, + Type: cty.Number, + } + OpModulo = &Operation{ + Impl: stdlib.ModuloFunc, + Type: cty.Number, + } + OpNegate = &Operation{ + Impl: stdlib.NegateFunc, + Type: cty.Number, + } +) + +var binaryOps []map[TokenType]*Operation + +func init() { + // This operation table maps from the operator's token type + // to the AST operation type. All expressions produced from + // binary operators are BinaryOp nodes. + // + // Binary operator groups are listed in order of precedence, with + // the *lowest* precedence first. Operators within the same group + // have left-to-right associativity. + binaryOps = []map[TokenType]*Operation{ + { + TokenOr: OpLogicalOr, + }, + { + TokenAnd: OpLogicalAnd, + }, + { + TokenEqualOp: OpEqual, + TokenNotEqual: OpNotEqual, + }, + { + TokenGreaterThan: OpGreaterThan, + TokenGreaterThanEq: OpGreaterThanOrEqual, + TokenLessThan: OpLessThan, + TokenLessThanEq: OpLessThanOrEqual, + }, + { + TokenPlus: OpAdd, + TokenMinus: OpSubtract, + }, + { + TokenStar: OpMultiply, + TokenSlash: OpDivide, + TokenPercent: OpModulo, + }, + } +} + +type BinaryOpExpr struct { + LHS Expression + Op *Operation + RHS Expression + + SrcRange hcl.Range +} + +func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) { + w(e.LHS) + w(e.RHS) +} + +func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly two arguments + params := impl.Params() + lhsParam := params[0] + rhsParam := params[1] + + var diags hcl.Diagnostics + + givenLHSVal, lhsDiags := e.LHS.Value(ctx) + givenRHSVal, rhsDiags := e.RHS.Value(ctx) + diags = append(diags, lhsDiags...) + diags = append(diags, rhsDiags...) + + lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), + Subject: e.LHS.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.LHS, + EvalContext: ctx, + }) + } + rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), + Subject: e.RHS.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.RHS, + EvalContext: ctx, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{lhsVal, rhsVal} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *BinaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *BinaryOpExpr) StartRange() hcl.Range { + return e.LHS.StartRange() +} + +type UnaryOpExpr struct { + Op *Operation + Val Expression + + SrcRange hcl.Range + SymbolRange hcl.Range +} + +func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) { + w(e.Val) +} + +func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + impl := e.Op.Impl // assumed to be a function taking exactly one argument + params := impl.Params() + param := params[0] + + givenVal, diags := e.Val.Value(ctx) + + val, err := convert.Convert(givenVal, param.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid operand", + Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), + Subject: e.Val.Range().Ptr(), + Context: &e.SrcRange, + Expression: e.Val, + EvalContext: ctx, + }) + } + + if diags.HasErrors() { + // Don't actually try the call if we have errors already, since the + // this will probably just produce a confusing duplicative diagnostic. + return cty.UnknownVal(e.Op.Type), diags + } + + args := []cty.Value{val} + result, err := impl.Call(args) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + // FIXME: This diagnostic is useless. + Severity: hcl.DiagError, + Summary: "Operation failed", + Detail: fmt.Sprintf("Error during operation: %s.", err), + Subject: &e.SrcRange, + Expression: e, + EvalContext: ctx, + }) + return cty.UnknownVal(e.Op.Type), diags + } + + return result, diags +} + +func (e *UnaryOpExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *UnaryOpExpr) StartRange() hcl.Range { + return e.SymbolRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go new file mode 100644 index 00000000..ca3dae18 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go @@ -0,0 +1,220 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +type TemplateExpr struct { + Parts []Expression + + SrcRange hcl.Range +} + +func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) { + for _, part := range e.Parts { + w(part) + } +} + +func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + buf := &bytes.Buffer{} + var diags hcl.Diagnostics + isKnown := true + + for _, part := range e.Parts { + partVal, partDiags := part.Value(ctx) + diags = append(diags, partDiags...) + + if partVal.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "The expression result is null. Cannot include a null value in a string template.", + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + Expression: part, + EvalContext: ctx, + }) + continue + } + + if !partVal.IsKnown() { + // If any part is unknown then the result as a whole must be + // unknown too. We'll keep on processing the rest of the parts + // anyway, because we want to still emit any diagnostics resulting + // from evaluating those. + isKnown = false + continue + } + + strVal, err := convert.Convert(partVal, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include the given value in a string template: %s.", + err.Error(), + ), + Subject: part.Range().Ptr(), + Context: &e.SrcRange, + Expression: part, + EvalContext: ctx, + }) + continue + } + + buf.WriteString(strVal.AsString()) + } + + if !isKnown { + return cty.UnknownVal(cty.String), diags + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateExpr) StartRange() hcl.Range { + return e.Parts[0].StartRange() +} + +// IsStringLiteral returns true if and only if the template consists only of +// single string literal, as would be created for a simple quoted string like +// "foo". +// +// If this function returns true, then calling Value on the same expression +// with a nil EvalContext will return the literal value. +// +// Note that "${"foo"}", "${1}", etc aren't considered literal values for the +// purposes of this method, because the intent of this method is to identify +// situations where the user seems to be explicitly intending literal string +// interpretation, not situations that result in literals as a technicality +// of the template expression unwrapping behavior. +func (e *TemplateExpr) IsStringLiteral() bool { + if len(e.Parts) != 1 { + return false + } + _, ok := e.Parts[0].(*LiteralValueExpr) + return ok +} + +// TemplateJoinExpr is used to convert tuples of strings produced by template +// constructs (i.e. for loops) into flat strings, by converting the values +// tos strings and joining them. This AST node is not used directly; it's +// produced as part of the AST of a "for" loop in a template. +type TemplateJoinExpr struct { + Tuple Expression +} + +func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) { + w(e.Tuple) +} + +func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + tuple, diags := e.Tuple.Value(ctx) + + if tuple.IsNull() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got null tuple") + } + if tuple.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + if !tuple.Type().IsTupleType() { + // This indicates a bug in the code that constructed the AST. + panic("TemplateJoinExpr got non-tuple tuple") + } + if !tuple.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf := &bytes.Buffer{} + it := tuple.ElementIterator() + for it.Next() { + _, val := it.Element() + + if val.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "An iteration result is null. Cannot include a null value in a string template.", + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + continue + } + if val.Type() == cty.DynamicPseudoType { + return cty.UnknownVal(cty.String), diags + } + strVal, err := convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template interpolation value", + Detail: fmt.Sprintf( + "Cannot include one of the interpolation results into the string template: %s.", + err.Error(), + ), + Subject: e.Range().Ptr(), + Expression: e, + EvalContext: ctx, + }) + continue + } + if !val.IsKnown() { + return cty.UnknownVal(cty.String), diags + } + + buf.WriteString(strVal.AsString()) + } + + return cty.StringVal(buf.String()), diags +} + +func (e *TemplateJoinExpr) Range() hcl.Range { + return e.Tuple.Range() +} + +func (e *TemplateJoinExpr) StartRange() hcl.Range { + return e.Tuple.StartRange() +} + +// TemplateWrapExpr is used instead of a TemplateExpr when a template +// consists _only_ of a single interpolation sequence. In that case, the +// template's result is the single interpolation's result, verbatim with +// no type conversions. +type TemplateWrapExpr struct { + Wrapped Expression + + SrcRange hcl.Range +} + +func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) { + w(e.Wrapped) +} + +func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return e.Wrapped.Value(ctx) +} + +func (e *TemplateWrapExpr) Range() hcl.Range { + return e.SrcRange +} + +func (e *TemplateWrapExpr) StartRange() hcl.Range { + return e.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go new file mode 100644 index 00000000..9177092c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_vars.go @@ -0,0 +1,76 @@ +package hclsyntax + +// Generated by expression_vars_get.go. DO NOT EDIT. +// Run 'go generate' on this package to update the set of functions here. + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +func (e *AnonSymbolExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *BinaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ConditionalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ForExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *FunctionCallExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *IndexExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *LiteralValueExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *RelativeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *ScopeTraversalExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *SplatExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateJoinExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TemplateWrapExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *TupleConsExpr) Variables() []hcl.Traversal { + return Variables(e) +} + +func (e *UnaryOpExpr) Variables() []hcl.Traversal { + return Variables(e) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go new file mode 100644 index 00000000..490c0255 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/file.go @@ -0,0 +1,20 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// File is the top-level object resulting from parsing a configuration file. +type File struct { + Body *Body + Bytes []byte +} + +func (f *File) AsHCLFile() *hcl.File { + return &hcl.File{ + Body: f.Body, + Bytes: f.Bytes, + + // TODO: The Nav object, once we have an implementation of it + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go new file mode 100644 index 00000000..841656a6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/generate.go @@ -0,0 +1,9 @@ +package hclsyntax + +//go:generate go run expression_vars_gen.go +//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl +//go:generate ragel -Z scan_tokens.rl +//go:generate gofmt -w scan_tokens.go +//go:generate ragel -Z scan_string_lit.rl +//go:generate gofmt -w scan_string_lit.go +//go:generate stringer -type TokenType -output token_type_string.go diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go new file mode 100644 index 00000000..eef8b962 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/keywords.go @@ -0,0 +1,21 @@ +package hclsyntax + +import ( + "bytes" +) + +type Keyword []byte + +var forKeyword = Keyword([]byte{'f', 'o', 'r'}) +var inKeyword = Keyword([]byte{'i', 'n'}) +var ifKeyword = Keyword([]byte{'i', 'f'}) +var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'}) +var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'}) +var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'}) + +func (kw Keyword) TokenMatches(token Token) bool { + if token.Type != TokenIdent { + return false + } + return bytes.Equal([]byte(kw), token.Bytes) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go new file mode 100644 index 00000000..c8c97f37 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go @@ -0,0 +1,59 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" +) + +type navigation struct { + root *Body +} + +// Implementation of hcled.ContextString +func (n navigation) ContextString(offset int) string { + // We will walk our top-level blocks until we find one that contains + // the given offset, and then construct a representation of the header + // of the block. + + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return "" + } + + if len(block.Labels) == 0 { + // Easy case! + return block.Type + } + + buf := &bytes.Buffer{} + buf.WriteString(block.Type) + for _, label := range block.Labels { + fmt.Fprintf(buf, " %q", label) + } + return buf.String() +} + +func (n navigation) ContextDefRange(offset int) hcl.Range { + var block *Block + for _, candidate := range n.root.Blocks { + if candidate.Range().ContainsOffset(offset) { + block = candidate + break + } + } + + if block == nil { + return hcl.Range{} + } + + return block.DefRange() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go new file mode 100644 index 00000000..75812e63 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go @@ -0,0 +1,22 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Node is the abstract type that every AST node implements. +// +// This is a closed interface, so it cannot be implemented from outside of +// this package. +type Node interface { + // This is the mechanism by which the public-facing walk functions + // are implemented. Implementations should call the given function + // for each child node and then replace that node with its return value. + // The return value might just be the same node, for non-transforming + // walks. + walkChildNodes(w internalWalkFunc) + + Range() hcl.Range +} + +type internalWalkFunc func(Node) diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go new file mode 100644 index 00000000..772ebae2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go @@ -0,0 +1,2044 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "strconv" + "unicode/utf8" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +type parser struct { + *peeker + + // set to true if any recovery is attempted. The parser can use this + // to attempt to reduce error noise by suppressing "bad token" errors + // in recovery mode, assuming that the recovery heuristics have failed + // in this case and left the peeker in a wrong place. + recovery bool +} + +func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) { + attrs := Attributes{} + blocks := Blocks{} + var diags hcl.Diagnostics + + startRange := p.PrevRange() + var endRange hcl.Range + +Token: + for { + next := p.Peek() + if next.Type == end { + endRange = p.NextRange() + p.Read() + break Token + } + + switch next.Type { + case TokenNewline: + p.Read() + continue + case TokenIdent: + item, itemDiags := p.ParseBodyItem() + diags = append(diags, itemDiags...) + switch titem := item.(type) { + case *Block: + blocks = append(blocks, titem) + case *Attribute: + if existing, exists := attrs[titem.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute redefined", + Detail: fmt.Sprintf( + "The argument %q was already set at %s. Each argument may be set only once.", + titem.Name, existing.NameRange.String(), + ), + Subject: &titem.NameRange, + }) + } else { + attrs[titem.Name] = titem + } + default: + // This should never happen for valid input, but may if a + // syntax error was detected in ParseBodyItem that prevented + // it from even producing a partially-broken item. In that + // case, it would've left at least one error in the diagnostics + // slice we already dealt with above. + // + // We'll assume ParseBodyItem attempted recovery to leave + // us in a reasonable position to try parsing the next item. + continue + } + default: + bad := p.Read() + if !p.recovery { + if bad.Type == TokenOQuote { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid argument name", + Detail: "Argument names must not be quoted.", + Subject: &bad.Range, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &bad.Range, + }) + } + } + endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics + + p.recover(end) // attempt to recover to the token after the end of this body + break Token + } + } + + return &Body{ + Attributes: attrs, + Blocks: blocks, + + SrcRange: hcl.RangeBetween(startRange, endRange), + EndRange: hcl.Range{ + Filename: endRange.Filename, + Start: endRange.End, + End: endRange.End, + }, + }, diags +} + +func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + next := p.Peek() + + switch next.Type { + case TokenEqual: + return p.finishParsingBodyAttribute(ident, false) + case TokenOQuote, TokenOBrace, TokenIdent: + return p.finishParsingBodyBlock(ident) + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.", + Subject: &ident.Range, + }, + } + } + + return nil, nil +} + +// parseSingleAttrBody is a weird variant of ParseBody that deals with the +// body of a nested block containing only one attribute value all on a single +// line, like foo { bar = baz } . It expects to find a single attribute item +// immediately followed by the end token type with no intervening newlines. +func (p *parser) parseSingleAttrBody(end TokenType) (*Body, hcl.Diagnostics) { + ident := p.Read() + if ident.Type != TokenIdent { + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here.", + Subject: &ident.Range, + }, + } + } + + var attr *Attribute + var diags hcl.Diagnostics + + next := p.Peek() + + switch next.Type { + case TokenEqual: + node, attrDiags := p.finishParsingBodyAttribute(ident, true) + diags = append(diags, attrDiags...) + attr = node.(*Attribute) + case TokenOQuote, TokenOBrace, TokenIdent: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument definition required", + Detail: fmt.Sprintf("A single-line block definition can contain only a single argument. If you meant to define argument %q, use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.", ident.Bytes), + Subject: hcl.RangeBetween(ident.Range, next.Range).Ptr(), + }, + } + default: + p.recoverAfterBodyItem() + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Argument or block definition required", + Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.", + Subject: &ident.Range, + }, + } + } + + return &Body{ + Attributes: Attributes{ + string(ident.Bytes): attr, + }, + + SrcRange: attr.SrcRange, + EndRange: hcl.Range{ + Filename: attr.SrcRange.Filename, + Start: attr.SrcRange.End, + End: attr.SrcRange.End, + }, + }, diags + +} + +func (p *parser) finishParsingBodyAttribute(ident Token, singleLine bool) (Node, hcl.Diagnostics) { + eqTok := p.Read() // eat equals token + if eqTok.Type != TokenEqual { + // should never happen if caller behaves + panic("finishParsingBodyAttribute called with next not equals") + } + + var endRange hcl.Range + + expr, diags := p.ParseExpression() + if p.recovery && diags.HasErrors() { + // recovery within expressions tends to be tricky, so we've probably + // landed somewhere weird. We'll try to reset to the start of a body + // item so parsing can continue. + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + if !singleLine { + end := p.Peek() + if end.Type != TokenNewline && end.Type != TokenEOF { + if !p.recovery { + summary := "Missing newline after argument" + detail := "An argument definition must end with a newline." + + if end.Type == TokenComma { + summary = "Unexpected comma after argument" + detail = "Argument definitions must be separated by newlines, not commas. " + detail + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: summary, + Detail: detail, + Subject: &end.Range, + Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), + }) + } + endRange = p.PrevRange() + p.recoverAfterBodyItem() + } else { + endRange = p.PrevRange() + p.Read() // eat newline + } + } + } + + return &Attribute{ + Name: string(ident.Bytes), + Expr: expr, + + SrcRange: hcl.RangeBetween(ident.Range, endRange), + NameRange: ident.Range, + EqualsRange: eqTok.Range, + }, diags +} + +func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) { + var blockType = string(ident.Bytes) + var diags hcl.Diagnostics + var labels []string + var labelRanges []hcl.Range + + var oBrace Token + +Token: + for { + tok := p.Peek() + + switch tok.Type { + + case TokenOBrace: + oBrace = p.Read() + break Token + + case TokenOQuote: + label, labelRange, labelDiags := p.parseQuotedStringLiteral() + diags = append(diags, labelDiags...) + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + // parseQuoteStringLiteral recovers up to the closing quote + // if it encounters problems, so we can continue looking for + // more labels and eventually the block body even. + + case TokenIdent: + tok = p.Read() // eat token + label, labelRange := string(tok.Bytes), tok.Range + labels = append(labels, label) + labelRanges = append(labelRanges, labelRange) + + default: + switch tok.Type { + case TokenEqual: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "The equals sign \"=\" indicates an argument definition, and must not be used when defining a block.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + case TokenNewline: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid block definition", + Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.", + Subject: &tok.Range, + Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), + }) + } + } + + p.recoverAfterBodyItem() + + return &Block{ + Type: blockType, + Labels: labels, + Body: &Body{ + SrcRange: ident.Range, + EndRange: ident.Range, + }, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: ident.Range, // placeholder + CloseBraceRange: ident.Range, // placeholder + }, diags + } + } + + // Once we fall out here, the peeker is pointed just after our opening + // brace, so we can begin our nested body parsing. + var body *Body + var bodyDiags hcl.Diagnostics + switch p.Peek().Type { + case TokenNewline, TokenEOF, TokenCBrace: + body, bodyDiags = p.ParseBody(TokenCBrace) + default: + // Special one-line, single-attribute block parsing mode. + body, bodyDiags = p.parseSingleAttrBody(TokenCBrace) + switch p.Peek().Type { + case TokenCBrace: + p.Read() // the happy path - just consume the closing brace + case TokenComma: + // User seems to be trying to use the object-constructor + // comma-separated style, which isn't permitted for blocks. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "Single-line block syntax can include only one argument definition. To define multiple arguments, use the multi-line block syntax with one argument definition per line.", + Subject: p.Peek().Range.Ptr(), + }) + p.recover(TokenCBrace) + case TokenNewline: + // We don't allow weird mixtures of single and multi-line syntax. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + p.recover(TokenCBrace) + default: + // Some other weird thing is going on. Since we can't guess a likely + // user intent for this one, we'll skip it if we're already in + // recovery mode. + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid single-argument block definition", + Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenCBrace) + } + } + diags = append(diags, bodyDiags...) + cBraceRange := p.PrevRange() + + eol := p.Peek() + if eol.Type == TokenNewline || eol.Type == TokenEOF { + p.Read() // eat newline + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing newline after block definition", + Detail: "A block definition must end with a newline.", + Subject: &eol.Range, + Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(), + }) + } + p.recoverAfterBodyItem() + } + + // We must never produce a nil body, since the caller may attempt to + // do analysis of a partial result when there's an error, so we'll + // insert a placeholder if we otherwise failed to produce a valid + // body due to one of the syntax error paths above. + if body == nil && diags.HasErrors() { + body = &Body{ + SrcRange: hcl.RangeBetween(oBrace.Range, cBraceRange), + EndRange: cBraceRange, + } + } + + return &Block{ + Type: blockType, + Labels: labels, + Body: body, + + TypeRange: ident.Range, + LabelRanges: labelRanges, + OpenBraceRange: oBrace.Range, + CloseBraceRange: cBraceRange, + }, diags +} + +func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) { + return p.parseTernaryConditional() +} + +func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) { + // The ternary conditional operator (.. ? .. : ..) behaves somewhat + // like a binary operator except that the "symbol" is itself + // an expression enclosed in two punctuation characters. + // The middle expression is parsed as if the ? and : symbols + // were parentheses. The "rhs" (the "false expression") is then + // treated right-associatively so it behaves similarly to the + // middle in terms of precedence. + + startRange := p.NextRange() + var condExpr, trueExpr, falseExpr Expression + var diags hcl.Diagnostics + + condExpr, condDiags := p.parseBinaryOps(binaryOps) + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + return condExpr, diags + } + + questionMark := p.Peek() + if questionMark.Type != TokenQuestion { + return condExpr, diags + } + + p.Read() // eat question mark + + trueExpr, trueDiags := p.ParseExpression() + diags = append(diags, trueDiags...) + if p.recovery && trueDiags.HasErrors() { + return condExpr, diags + } + + colon := p.Peek() + if colon.Type != TokenColon { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing false expression in conditional", + Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.", + Subject: &colon.Range, + Context: hcl.RangeBetween(startRange, colon.Range).Ptr(), + }) + return condExpr, diags + } + + p.Read() // eat colon + + falseExpr, falseDiags := p.ParseExpression() + diags = append(diags, falseDiags...) + if p.recovery && falseDiags.HasErrors() { + return condExpr, diags + } + + return &ConditionalExpr{ + Condition: condExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()), + }, diags +} + +// parseBinaryOps calls itself recursively to work through all of the +// operator precedence groups, and then eventually calls parseExpressionTerm +// for each operand. +func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) { + if len(ops) == 0 { + // We've run out of operators, so now we'll just try to parse a term. + return p.parseExpressionWithTraversals() + } + + thisLevel := ops[0] + remaining := ops[1:] + + var lhs, rhs Expression + var operation *Operation + var diags hcl.Diagnostics + + // Parse a term that might be the first operand of a binary + // operation or it might just be a standalone term. + // We won't know until we've parsed it and can look ahead + // to see if there's an operator token for this level. + lhs, lhsDiags := p.parseBinaryOps(remaining) + diags = append(diags, lhsDiags...) + if p.recovery && lhsDiags.HasErrors() { + return lhs, diags + } + + // We'll keep eating up operators until we run out, so that operators + // with the same precedence will combine in a left-associative manner: + // a+b+c => (a+b)+c, not a+(b+c) + // + // Should we later want to have right-associative operators, a way + // to achieve that would be to call back up to ParseExpression here + // instead of iteratively parsing only the remaining operators. + for { + next := p.Peek() + var newOp *Operation + var ok bool + if newOp, ok = thisLevel[next.Type]; !ok { + break + } + + // Are we extending an expression started on the previous iteration? + if operation != nil { + lhs = &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + } + } + + operation = newOp + p.Read() // eat operator token + var rhsDiags hcl.Diagnostics + rhs, rhsDiags = p.parseBinaryOps(remaining) + diags = append(diags, rhsDiags...) + if p.recovery && rhsDiags.HasErrors() { + return lhs, diags + } + } + + if operation == nil { + return lhs, diags + } + + return &BinaryOpExpr{ + LHS: lhs, + Op: operation, + RHS: rhs, + + SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()), + }, diags +} + +func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { + term, diags := p.parseExpressionTerm() + ret, moreDiags := p.parseExpressionTraversals(term) + diags = append(diags, moreDiags...) + return ret, diags +} + +func (p *parser) parseExpressionTraversals(from Expression) (Expression, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := from + +Traversal: + for { + next := p.Peek() + + switch next.Type { + case TokenDot: + // Attribute access or splat + dot := p.Read() + attrTok := p.Peek() + + switch attrTok.Type { + case TokenIdent: + attrTok = p.Read() // eat token + name := string(attrTok.Bytes) + rng := hcl.RangeBetween(dot.Range, attrTok.Range) + step := hcl.TraverseAttr{ + Name: name, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenNumberLit: + // This is a weird form we inherited from HIL, allowing numbers + // to be used as attributes as a weird way of writing [n]. + // This was never actually a first-class thing in HIL, but + // HIL tolerated sequences like .0. in its variable names and + // calling applications like Terraform exploited that to + // introduce indexing syntax where none existed. + numTok := p.Read() // eat token + attrTok = numTok + + // This syntax is ambiguous if multiple indices are used in + // succession, like foo.0.1.baz: that actually parses as + // a fractional number 0.1. Since we're only supporting this + // syntax for compatibility with legacy Terraform + // configurations, and Terraform does not tend to have lists + // of lists, we'll choose to reject that here with a helpful + // error message, rather than failing later because the index + // isn't a whole number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + break + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + + rng := hcl.RangeBetween(dot.Range, numTok.Range) + step := hcl.TraverseIndex{ + Key: numVal, + SrcRange: rng, + } + + ret = makeRelativeTraversal(ret, step, rng) + + case TokenStar: + // "Attribute-only" splat expression. + // (This is a kinda weird construct inherited from HIL, which + // behaves a bit like a [*] splat except that it is only able + // to do attribute traversals into each of its elements, + // whereas foo[*] can support _any_ traversal. + marker := p.Read() // eat star + trav := make(hcl.Traversal, 0, 1) + var firstRange, lastRange hcl.Range + firstRange = p.NextRange() + for p.Peek().Type == TokenDot { + dot := p.Read() + + if p.Peek().Type == TokenNumberLit { + // Continuing the "weird stuff inherited from HIL" + // theme, we also allow numbers as attribute names + // inside splats and interpret them as indexing + // into a list, for expressions like: + // foo.bar.*.baz.0.foo + numTok := p.Read() + + // Weird special case if the user writes something + // like foo.bar.*.baz.0.0.foo, where 0.0 parses + // as a number. + if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 { + first := numTok.Bytes[:dotIdx] + second := numTok.Bytes[dotIdx+1:] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid legacy index syntax", + Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second), + Subject: &attrTok.Range, + }) + trav = append(trav, hcl.TraverseIndex{ + Key: cty.DynamicVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + numVal, numDiags := p.numberLitValue(numTok) + diags = append(diags, numDiags...) + trav = append(trav, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(dot.Range, numTok.Range), + }) + lastRange = numTok.Range + continue + } + + if p.Peek().Type != TokenIdent { + if !p.recovery { + if p.Peek().Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Nested splat expression not allowed", + Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.", + Subject: p.Peek().Range.Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + } + } + p.setRecovery() + continue Traversal + } + + attrTok := p.Read() + trav = append(trav, hcl.TraverseAttr{ + Name: string(attrTok.Bytes), + SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range), + }) + lastRange = attrTok.Range + } + + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(dot.Range, marker.Range), + } + var travExpr Expression + if len(trav) == 0 { + travExpr = itemExpr + } else { + travExpr = &RelativeTraversalExpr{ + Source: itemExpr, + Traversal: trav, + SrcRange: hcl.RangeBetween(firstRange, lastRange), + } + } + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(dot.Range, lastRange), + MarkerRange: hcl.RangeBetween(dot.Range, marker.Range), + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute name", + Detail: "An attribute name is required after a dot.", + Subject: &attrTok.Range, + }) + // This leaves the peeker in a bad place, so following items + // will probably be misparsed until we hit something that + // allows us to re-sync. + // + // We will probably need to do something better here eventually + // in order to support autocomplete triggered by typing a + // period. + p.setRecovery() + } + + case TokenOBrack: + // Indexing of a collection. + // This may or may not be a hcl.Traverser, depending on whether + // the key value is something constant. + + open := p.Read() + switch p.Peek().Type { + case TokenStar: + // This is a full splat expression, like foo[*], which consumes + // the rest of the traversal steps after it using a recursive + // call to this function. + p.Read() // consume star + close := p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on splat index", + Detail: "The star for a full splat operator must be immediately followed by a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + // Splat expressions use a special "anonymous symbol" as a + // placeholder in an expression to be evaluated once for each + // item in the source expression. + itemExpr := &AnonSymbolExpr{ + SrcRange: hcl.RangeBetween(open.Range, close.Range), + } + // Now we'll recursively call this same function to eat any + // remaining traversal steps against the anonymous symbol. + travExpr, nestedDiags := p.parseExpressionTraversals(itemExpr) + diags = append(diags, nestedDiags...) + + ret = &SplatExpr{ + Source: ret, + Each: travExpr, + Item: itemExpr, + + SrcRange: hcl.RangeBetween(open.Range, travExpr.Range()), + MarkerRange: hcl.RangeBetween(open.Range, close.Range), + } + + default: + + var close Token + p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets + keyExpr, keyDiags := p.ParseExpression() + diags = append(diags, keyDiags...) + if p.recovery && keyDiags.HasErrors() { + close = p.recover(TokenCBrack) + } else { + close = p.Read() + if close.Type != TokenCBrack && !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing close bracket on index", + Detail: "The index operator must end with a closing bracket (\"]\").", + Subject: &close.Range, + }) + close = p.recover(TokenCBrack) + } + } + p.PopIncludeNewlines() + + if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { + litKey, _ := lit.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else if tmpl, isTmpl := keyExpr.(*TemplateExpr); isTmpl && tmpl.IsStringLiteral() { + litKey, _ := tmpl.Value(nil) + rng := hcl.RangeBetween(open.Range, close.Range) + step := hcl.TraverseIndex{ + Key: litKey, + SrcRange: rng, + } + ret = makeRelativeTraversal(ret, step, rng) + } else { + rng := hcl.RangeBetween(open.Range, close.Range) + ret = &IndexExpr{ + Collection: ret, + Key: keyExpr, + + SrcRange: rng, + OpenRange: open.Range, + } + } + } + + default: + break Traversal + } + } + + return ret, diags +} + +// makeRelativeTraversal takes an expression and a traverser and returns +// a traversal expression that combines the two. If the given expression +// is already a traversal, it is extended in place (mutating it) and +// returned. If it isn't, a new RelativeTraversalExpr is created and returned. +func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression { + switch texpr := expr.(type) { + case *ScopeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + case *RelativeTraversalExpr: + texpr.Traversal = append(texpr.Traversal, next) + texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng) + return texpr + default: + return &RelativeTraversalExpr{ + Source: expr, + Traversal: hcl.Traversal{next}, + SrcRange: rng, + } + } +} + +func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) { + start := p.Peek() + + switch start.Type { + case TokenOParen: + p.Read() // eat open paren + + p.PushIncludeNewlines(false) + + expr, diags := p.ParseExpression() + if diags.HasErrors() { + // attempt to place the peeker after our closing paren + // before we return, so that the next parser has some + // chance of finding a valid expression. + p.recover(TokenCParen) + p.PopIncludeNewlines() + return expr, diags + } + + close := p.Peek() + if close.Type != TokenCParen { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unbalanced parentheses", + Detail: "Expected a closing parenthesis to terminate the expression.", + Subject: &close.Range, + Context: hcl.RangeBetween(start.Range, close.Range).Ptr(), + }) + p.setRecovery() + } + + p.Read() // eat closing paren + p.PopIncludeNewlines() + + return expr, diags + + case TokenNumberLit: + tok := p.Read() // eat number token + + numVal, diags := p.numberLitValue(tok) + return &LiteralValueExpr{ + Val: numVal, + SrcRange: tok.Range, + }, diags + + case TokenIdent: + tok := p.Read() // eat identifier token + + if p.Peek().Type == TokenOParen { + return p.finishParsingFunctionCall(tok) + } + + name := string(tok.Bytes) + switch name { + case "true": + return &LiteralValueExpr{ + Val: cty.True, + SrcRange: tok.Range, + }, nil + case "false": + return &LiteralValueExpr{ + Val: cty.False, + SrcRange: tok.Range, + }, nil + case "null": + return &LiteralValueExpr{ + Val: cty.NullVal(cty.DynamicPseudoType), + SrcRange: tok.Range, + }, nil + default: + return &ScopeTraversalExpr{ + Traversal: hcl.Traversal{ + hcl.TraverseRoot{ + Name: name, + SrcRange: tok.Range, + }, + }, + SrcRange: tok.Range, + }, nil + } + + case TokenOQuote, TokenOHeredoc: + open := p.Read() // eat opening marker + closer := p.oppositeBracket(open.Type) + exprs, passthru, _, diags := p.parseTemplateInner(closer, tokenOpensFlushHeredoc(open)) + + closeRange := p.PrevRange() + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: hcl.RangeBetween(open.Range, closeRange), + }, diags + + case TokenMinus: + tok := p.Read() // eat minus token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + // e.g. -46+5 should parse as (-46)+5, not -(46+5) + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpNegate, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenBang: + tok := p.Read() // eat bang token + + // Important to use parseExpressionWithTraversals rather than parseExpression + // here, otherwise we can capture a following binary expression into + // our negation. + operand, diags := p.parseExpressionWithTraversals() + return &UnaryOpExpr{ + Op: OpLogicalNot, + Val: operand, + + SrcRange: hcl.RangeBetween(tok.Range, operand.Range()), + SymbolRange: tok.Range, + }, diags + + case TokenOBrack: + return p.parseTupleCons() + + case TokenOBrace: + return p.parseObjectCons() + + default: + var diags hcl.Diagnostics + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid expression", + Detail: "Expected the start of an expression, but found an invalid expression token.", + Subject: &start.Range, + }) + } + p.setRecovery() + + // Return a placeholder so that the AST is still structurally sound + // even in the presence of parse errors. + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: start.Range, + }, diags + } +} + +func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { + // The cty.ParseNumberVal is always the same behavior as converting a + // string to a number, ensuring we always interpret decimal numbers in + // the same way. + numVal, err := cty.ParseNumberVal(string(tok.Bytes)) + if err != nil { + ret := cty.UnknownVal(cty.Number) + return ret, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid number literal", + // FIXME: not a very good error message, but convert only + // gives us "a number is required", so not much help either. + Detail: "Failed to recognize the value of this number literal.", + Subject: &tok.Range, + }, + } + } + return numVal, nil +} + +// finishParsingFunctionCall parses a function call assuming that the function +// name was already read, and so the peeker should be pointing at the opening +// parenthesis after the name. +func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) { + openTok := p.Read() + if openTok.Type != TokenOParen { + // should never happen if callers behave + panic("finishParsingFunctionCall called with non-parenthesis as next token") + } + + var args []Expression + var diags hcl.Diagnostics + var expandFinal bool + var closeTok Token + + // Arbitrary newlines are allowed inside the function call parentheses. + p.PushIncludeNewlines(false) + +Token: + for { + tok := p.Peek() + + if tok.Type == TokenCParen { + closeTok = p.Read() // eat closing paren + break Token + } + + arg, argDiags := p.ParseExpression() + args = append(args, arg) + diags = append(diags, argDiags...) + if p.recovery && argDiags.HasErrors() { + // if there was a parse error in the argument then we've + // probably been left in a weird place in the token stream, + // so we'll bail out with a partial argument list. + p.recover(TokenCParen) + break Token + } + + sep := p.Read() + if sep.Type == TokenCParen { + closeTok = sep + break Token + } + + if sep.Type == TokenEllipsis { + expandFinal = true + + if p.Peek().Type != TokenCParen { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing closing parenthesis", + Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + } + closeTok = p.recover(TokenCParen) + } else { + closeTok = p.Read() // eat closing paren + } + break Token + } + + if sep.Type != TokenComma { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing argument separator", + Detail: "A comma is required to separate each function argument from the next.", + Subject: &sep.Range, + Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(), + }) + closeTok = p.recover(TokenCParen) + break Token + } + + if p.Peek().Type == TokenCParen { + // A trailing comma after the last argument gets us in here. + closeTok = p.Read() // eat closing paren + break Token + } + + } + + p.PopIncludeNewlines() + + return &FunctionCallExpr{ + Name: string(name.Bytes), + Args: args, + + ExpandFinal: expandFinal, + + NameRange: name.Range, + OpenParenRange: openTok.Range, + CloseParenRange: closeTok.Range, + }, diags +} + +func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrack { + // Should never happen if callers are behaving + panic("parseTupleCons called without peeker pointing to open bracket") + } + + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + + if forKeyword.TokenMatches(p.Peek()) { + return p.finishParsingForExpr(open) + } + + var close Token + + var diags hcl.Diagnostics + var exprs []Expression + + for { + next := p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + expr, exprDiags := p.ParseExpression() + exprs = append(exprs, expr) + diags = append(diags, exprDiags...) + + if p.recovery && exprDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing bracket to allow parsing to continue. + close = p.recover(TokenCBrack) + break + } + + next = p.Peek() + if next.Type == TokenCBrack { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing item separator", + Detail: "Expected a comma to mark the beginning of the next item.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrack) + break + } + + p.Read() // eat comma + + } + + return &TupleConsExpr{ + Exprs: exprs, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) { + open := p.Read() + if open.Type != TokenOBrace { + // Should never happen if callers are behaving + panic("parseObjectCons called without peeker pointing to open brace") + } + + // We must temporarily stop looking at newlines here while we check for + // a "for" keyword, since for expressions are _not_ newline-sensitive, + // even though object constructors are. + p.PushIncludeNewlines(false) + isFor := forKeyword.TokenMatches(p.Peek()) + p.PopIncludeNewlines() + if isFor { + return p.finishParsingForExpr(open) + } + + p.PushIncludeNewlines(true) + defer p.PopIncludeNewlines() + + var close Token + + var diags hcl.Diagnostics + var items []ObjectConsItem + + for { + next := p.Peek() + if next.Type == TokenNewline { + p.Read() // eat newline + continue + } + + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + var key Expression + var keyDiags hcl.Diagnostics + key, keyDiags = p.ParseExpression() + diags = append(diags, keyDiags...) + + if p.recovery && keyDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + // We wrap up the key expression in a special wrapper that deals + // with our special case that naked identifiers as object keys + // are interpreted as literal strings. + key = &ObjectConsKeyExpr{Wrapped: key} + + next = p.Peek() + if next.Type != TokenEqual && next.Type != TokenColon { + if !p.recovery { + switch next.Type { + case TokenNewline, TokenComma: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute value", + Detail: "Expected an attribute value, introduced by an equals sign (\"=\").", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + case TokenIdent: + // Although this might just be a plain old missing equals + // sign before a reference, one way to get here is to try + // to write an attribute name containing a period followed + // by a digit, which was valid in HCL1, like this: + // foo1.2_bar = "baz" + // We can't know exactly what the user intended here, but + // we'll augment our message with an extra hint in this case + // in case it is helpful. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value. If you intended to given an attribute name containing periods or spaces, write the name in quotes to create a string literal.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing key/value separator", + Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat equals sign or colon + + value, valueDiags := p.ParseExpression() + diags = append(diags, valueDiags...) + + if p.recovery && valueDiags.HasErrors() { + // If expression parsing failed then we are probably in a strange + // place in the token stream, so we'll bail out and try to reset + // to after our closing brace to allow parsing to continue. + close = p.recover(TokenCBrace) + break + } + + items = append(items, ObjectConsItem{ + KeyExpr: key, + ValueExpr: value, + }) + + next = p.Peek() + if next.Type == TokenCBrace { + close = p.Read() // eat closer + break + } + + if next.Type != TokenComma && next.Type != TokenNewline { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute separator", + Detail: "Expected a newline or comma to mark the beginning of the next attribute.", + Subject: &next.Range, + Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), + }) + } + close = p.recover(TokenCBrace) + break + } + + p.Read() // eat comma or newline + + } + + return &ObjectConsExpr{ + Items: items, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { + p.PushIncludeNewlines(false) + defer p.PopIncludeNewlines() + introducer := p.Read() + if !forKeyword.TokenMatches(introducer) { + // Should never happen if callers are behaving + panic("finishParsingForExpr called without peeker pointing to 'for' identifier") + } + + var makeObj bool + var closeType TokenType + switch open.Type { + case TokenOBrace: + makeObj = true + closeType = TokenCBrace + case TokenOBrack: + makeObj = false // making a tuple + closeType = TokenCBrack + default: + // Should never happen if callers are behaving + panic("finishParsingForExpr called with invalid open token") + } + + var diags hcl.Diagnostics + var keyName, valName string + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires the 'in' keyword after its name declarations.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + if p.recovery && collDiags.HasErrors() { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + if p.Peek().Type != TokenColon { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "For expression requires a colon after the collection expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + p.Read() // eat colon + + var keyExpr, valExpr Expression + var keyDiags, valDiags hcl.Diagnostics + valExpr, valDiags = p.ParseExpression() + if p.Peek().Type == TokenFatArrow { + // What we just parsed was actually keyExpr + p.Read() // eat the fat arrow + keyExpr, keyDiags = valExpr, valDiags + + valExpr, valDiags = p.ParseExpression() + } + diags = append(diags, keyDiags...) + diags = append(diags, valDiags...) + if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) { + close := p.recover(closeType) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + + group := false + var ellipsis Token + if p.Peek().Type == TokenEllipsis { + ellipsis = p.Read() + group = true + } + + var condExpr Expression + var condDiags hcl.Diagnostics + if ifKeyword.TokenMatches(p.Peek()) { + p.Read() // eat "if" + condExpr, condDiags = p.ParseExpression() + diags = append(diags, condDiags...) + if p.recovery && condDiags.HasErrors() { + close := p.recover(p.oppositeBracket(open.Type)) + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }, diags + } + } + + var close Token + if p.Peek().Type == closeType { + close = p.Read() + } else { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Extra characters after the end of the 'for' expression.", + Subject: p.Peek().Range.Ptr(), + Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), + }) + } + close = p.recover(closeType) + } + + if !makeObj { + if keyExpr != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is not valid when building a tuple.", + Subject: keyExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + if group { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Grouping ellipsis (...) cannot be used when building a tuple.", + Subject: &ellipsis.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } else { + if keyExpr == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' expression", + Detail: "Key expression is required when building an object.", + Subject: valExpr.Range().Ptr(), + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + } + + return &ForExpr{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + KeyExpr: keyExpr, + ValExpr: valExpr, + CondExpr: condExpr, + Group: group, + + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +// parseQuotedStringLiteral is a helper for parsing quoted strings that +// aren't allowed to contain any interpolations, such as block labels. +func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) { + oQuote := p.Read() + if oQuote.Type != TokenOQuote { + return "", oQuote.Range, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "A quoted string is required here.", + Subject: &oQuote.Range, + }, + } + } + + var diags hcl.Diagnostics + ret := &bytes.Buffer{} + var cQuote Token + +Token: + for { + tok := p.Read() + switch tok.Type { + + case TokenCQuote: + cQuote = tok + break Token + + case TokenQuotedLit: + s, sDiags := p.decodeStringLit(tok) + diags = append(diags, sDiags...) + ret.WriteString(s) + + case TokenTemplateControl, TokenTemplateInterp: + which := "$" + if tok.Type == TokenTemplateControl { + which = "%" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: fmt.Sprintf( + "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.", + which, which, which, + ), + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + + // Now that we're returning an error callers won't attempt to use + // the result for any real operations, but they might try to use + // the partial AST for other analyses, so we'll leave a marker + // to indicate that there was something invalid in the string to + // help avoid misinterpretation of the partial result + ret.WriteString(which) + ret.WriteString("{ ... }") + + p.recover(TokenTemplateSeqEnd) // we'll try to keep parsing after the sequence ends + + case TokenEOF: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated string literal", + Detail: "Unable to find the closing quote mark before the end of the file.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + break Token + + default: + // Should never happen, as long as the scanner is behaving itself + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid string literal", + Detail: "This item is not valid in a string literal.", + Subject: &tok.Range, + Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), + }) + p.recover(TokenCQuote) + break Token + + } + + } + + return ret.String(), hcl.RangeBetween(oQuote.Range, cQuote.Range), diags +} + +// decodeStringLit processes the given token, which must be either a +// TokenQuotedLit or a TokenStringLit, returning the string resulting from +// resolving any escape sequences. +// +// If any error diagnostics are returned, the returned string may be incomplete +// or otherwise invalid. +func (p *parser) decodeStringLit(tok Token) (string, hcl.Diagnostics) { + var quoted bool + switch tok.Type { + case TokenQuotedLit: + quoted = true + case TokenStringLit: + quoted = false + default: + panic("decodeQuotedLit can only be used with TokenStringLit and TokenQuotedLit tokens") + } + var diags hcl.Diagnostics + + ret := make([]byte, 0, len(tok.Bytes)) + slices := scanStringLit(tok.Bytes, quoted) + + // We will mutate rng constantly as we walk through our token slices below. + // Any diagnostics must take a copy of this rng rather than simply pointing + // to it, e.g. by using rng.Ptr() rather than &rng. + rng := tok.Range + rng.End = rng.Start + +Slices: + for _, slice := range slices { + if len(slice) == 0 { + continue + } + + // Advance the start of our range to where the previous token ended + rng.Start = rng.End + + // Advance the end of our range to after our token. + b := slice + for len(b) > 0 { + adv, ch, _ := textseg.ScanGraphemeClusters(b, true) + rng.End.Byte += adv + switch ch[0] { + case '\r', '\n': + rng.End.Line++ + rng.End.Column = 1 + default: + rng.End.Column++ + } + b = b[adv:] + } + + TokenType: + switch slice[0] { + case '\\': + if !quoted { + // If we're not in quoted mode then just treat this token as + // normal. (Slices can still start with backslash even if we're + // not specifically looking for backslash sequences.) + break TokenType + } + if len(slice) < 2 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "Backslash must be followed by an escape sequence selector character.", + Subject: rng.Ptr(), + }) + break TokenType + } + + switch slice[1] { + + case 'n': + ret = append(ret, '\n') + continue Slices + case 'r': + ret = append(ret, '\r') + continue Slices + case 't': + ret = append(ret, '\t') + continue Slices + case '"': + ret = append(ret, '"') + continue Slices + case '\\': + ret = append(ret, '\\') + continue Slices + case 'u', 'U': + if slice[1] == 'u' && len(slice) != 6 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\u escape sequence must be followed by four hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } else if slice[1] == 'U' && len(slice) != 10 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: "The \\U escape sequence must be followed by eight hexadecimal digits.", + Subject: rng.Ptr(), + }) + break TokenType + } + + numHex := string(slice[2:]) + num, err := strconv.ParseUint(numHex, 16, 32) + if err != nil { + // Should never happen because the scanner won't match + // a sequence of digits that isn't valid. + panic(err) + } + + r := rune(num) + l := utf8.RuneLen(r) + if l == -1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("Cannot encode character U+%04x in UTF-8.", num), + Subject: rng.Ptr(), + }) + break TokenType + } + for i := 0; i < l; i++ { + ret = append(ret, 0) + } + rb := ret[len(ret)-l:] + utf8.EncodeRune(rb, r) + + continue Slices + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid escape sequence", + Detail: fmt.Sprintf("The symbol %q is not a valid escape sequence selector.", slice[1:]), + Subject: rng.Ptr(), + }) + ret = append(ret, slice[1:]...) + continue Slices + } + + case '$', '%': + if len(slice) != 3 { + // Not long enough to be our escape sequence, so it's literal. + break TokenType + } + + if slice[1] == slice[0] && slice[2] == '{' { + ret = append(ret, slice[0]) + ret = append(ret, '{') + continue Slices + } + + break TokenType + } + + // If we fall out here or break out of here from the switch above + // then this slice is just a literal. + ret = append(ret, slice...) + } + + return string(ret), diags +} + +// setRecovery turns on recovery mode without actually doing any recovery. +// This can be used when a parser knowingly leaves the peeker in a useless +// place and wants to suppress errors that might result from that decision. +func (p *parser) setRecovery() { + p.recovery = true +} + +// recover seeks forward in the token stream until it finds TokenType "end", +// then returns with the peeker pointed at the following token. +// +// If the given token type is a bracketer, this function will additionally +// count nested instances of the brackets to try to leave the peeker at +// the end of the _current_ instance of that bracketer, skipping over any +// nested instances. This is a best-effort operation and may have +// unpredictable results on input with bad bracketer nesting. +func (p *parser) recover(end TokenType) Token { + start := p.oppositeBracket(end) + p.recovery = true + + nest := 0 + for { + tok := p.Read() + ty := tok.Type + if end == TokenTemplateSeqEnd && ty == TokenTemplateControl { + // normalize so that our matching behavior can work, since + // TokenTemplateControl/TokenTemplateInterp are asymmetrical + // with TokenTemplateSeqEnd and thus we need to count both + // openers if that's the closer we're looking for. + ty = TokenTemplateInterp + } + + switch ty { + case start: + nest++ + case end: + if nest < 1 { + return tok + } + + nest-- + case TokenEOF: + return tok + } + } +} + +// recoverOver seeks forward in the token stream until it finds a block +// starting with TokenType "start", then finds the corresponding end token, +// leaving the peeker pointed at the token after that end token. +// +// The given token type _must_ be a bracketer. For example, if the given +// start token is TokenOBrace then the parser will be left at the _end_ of +// the next brace-delimited block encountered, or at EOF if no such block +// is found or it is unclosed. +func (p *parser) recoverOver(start TokenType) { + end := p.oppositeBracket(start) + + // find the opening bracket first +Token: + for { + tok := p.Read() + switch tok.Type { + case start, TokenEOF: + break Token + } + } + + // Now use our existing recover function to locate the _end_ of the + // container we've found. + p.recover(end) +} + +func (p *parser) recoverAfterBodyItem() { + p.recovery = true + var open []TokenType + +Token: + for { + tok := p.Read() + + switch tok.Type { + + case TokenNewline: + if len(open) == 0 { + break Token + } + + case TokenEOF: + break Token + + case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl: + open = append(open, tok.Type) + + case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc: + opener := p.oppositeBracket(tok.Type) + for len(open) > 0 && open[len(open)-1] != opener { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + case TokenTemplateSeqEnd: + for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl { + open = open[:len(open)-1] + } + if len(open) > 0 { + open = open[:len(open)-1] + } + + } + } +} + +// oppositeBracket finds the bracket that opposes the given bracketer, or +// NilToken if the given token isn't a bracketer. +// +// "Bracketer", for the sake of this function, is one end of a matching +// open/close set of tokens that establish a bracketing context. +func (p *parser) oppositeBracket(ty TokenType) TokenType { + switch ty { + + case TokenOBrace: + return TokenCBrace + case TokenOBrack: + return TokenCBrack + case TokenOParen: + return TokenCParen + case TokenOQuote: + return TokenCQuote + case TokenOHeredoc: + return TokenCHeredoc + + case TokenCBrace: + return TokenOBrace + case TokenCBrack: + return TokenOBrack + case TokenCParen: + return TokenOParen + case TokenCQuote: + return TokenOQuote + case TokenCHeredoc: + return TokenOHeredoc + + case TokenTemplateControl: + return TokenTemplateSeqEnd + case TokenTemplateInterp: + return TokenTemplateSeqEnd + case TokenTemplateSeqEnd: + // This is ambigous, but we return Interp here because that's + // what's assumed by the "recover" method. + return TokenTemplateInterp + + default: + return TokenNil + } +} + +func errPlaceholderExpr(rng hcl.Range) Expression { + return &LiteralValueExpr{ + Val: cty.DynamicVal, + SrcRange: rng, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go new file mode 100644 index 00000000..a141626f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go @@ -0,0 +1,799 @@ +package hclsyntax + +import ( + "fmt" + "strings" + "unicode" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { + return p.parseTemplate(TokenEOF, false) +} + +func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) { + exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc) + + if passthru { + if len(exprs) != 1 { + panic("passthru set with len(exprs) != 1") + } + return &TemplateWrapExpr{ + Wrapped: exprs[0], + SrcRange: rng, + }, diags + } + + return &TemplateExpr{ + Parts: exprs, + SrcRange: rng, + }, diags +} + +func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { + parts, diags := p.parseTemplateParts(end) + if flushHeredoc { + flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec + } + tp := templateParser{ + Tokens: parts.Tokens, + SrcRange: parts.SrcRange, + } + exprs, exprsDiags := tp.parseRoot() + diags = append(diags, exprsDiags...) + + passthru := false + if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token + if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp { + passthru = true + } + } + + return exprs, passthru, parts.SrcRange, diags +} + +type templateParser struct { + Tokens []templateToken + SrcRange hcl.Range + + pos int +} + +func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) { + var exprs []Expression + var diags hcl.Diagnostics + + for { + next := p.Peek() + if _, isEnd := next.(*templateEndToken); isEnd { + break + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + exprs = append(exprs, expr) + } + + return exprs, diags +} + +func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) { + next := p.Peek() + switch tok := next.(type) { + + case *templateLiteralToken: + p.Read() // eat literal + return &LiteralValueExpr{ + Val: cty.StringVal(tok.Val), + SrcRange: tok.SrcRange, + }, nil + + case *templateInterpToken: + p.Read() // eat interp + return tok.Expr, nil + + case *templateIfToken: + return p.parseIf() + + case *templateForToken: + return p.parseFor() + + case *templateEndToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + // This is a particularly unhelpful diagnostic, so callers + // should attempt to pre-empt it and produce a more helpful + // diagnostic that is context-aware. + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + case *templateEndCtrlToken: + p.Read() // eat erroneous token + return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()), + Detail: "The control directives within this template are unbalanced.", + Subject: &tok.SrcRange, + }, + } + + default: + // should never happen, because above should be exhaustive + panic(fmt.Sprintf("unhandled template token type %T", next)) + } +} + +func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) { + open := p.Read() + openIf, isIf := open.(*templateIfToken) + if !isIf { + // should never happen if caller is behaving + panic("parseIf called with peeker not pointing at if token") + } + + var ifExprs, elseExprs []Expression + var diags hcl.Diagnostics + var endifRange hcl.Range + + currentExprs := &ifExprs +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The if directive at %s is missing its corresponding endif directive.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + if currentExprs == &ifExprs { + currentExprs = &elseExprs + continue Token + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: fmt.Sprintf( + "Already in the else clause for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + + case templateEndIf: + endifRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endif directive for the if started at %s.", + openIf.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + *currentExprs = append(*currentExprs, expr) + } + + if len(ifExprs) == 0 { + ifExprs = append(ifExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openIf.SrcRange.Filename, + Start: openIf.SrcRange.End, + End: openIf.SrcRange.End, + }, + }) + } + if len(elseExprs) == 0 { + elseExprs = append(elseExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: endifRange.Filename, + Start: endifRange.Start, + End: endifRange.Start, + }, + }) + } + + trueExpr := &TemplateExpr{ + Parts: ifExprs, + SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()), + } + falseExpr := &TemplateExpr{ + Parts: elseExprs, + SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()), + } + + return &ConditionalExpr{ + Condition: openIf.CondExpr, + TrueResult: trueExpr, + FalseResult: falseExpr, + + SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange), + }, diags +} + +func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) { + open := p.Read() + openFor, isFor := open.(*templateForToken) + if !isFor { + // should never happen if caller is behaving + panic("parseFor called with peeker not pointing at for token") + } + + var contentExprs []Expression + var diags hcl.Diagnostics + var endforRange hcl.Range + +Token: + for { + next := p.Peek() + if end, isEnd := next.(*templateEndToken); isEnd { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected end of template", + Detail: fmt.Sprintf( + "The for directive at %s is missing its corresponding endfor directive.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + return errPlaceholderExpr(end.SrcRange), diags + } + if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd { + p.Read() // eat end directive + + switch end.Type { + + case templateElse: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unexpected else directive", + Detail: "An else clause is not expected for a for directive.", + Subject: &end.SrcRange, + }) + + case templateEndFor: + endforRange = end.SrcRange + break Token + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %s directive", end.Name()), + Detail: fmt.Sprintf( + "Expecting an endfor directive corresponding to the for directive at %s.", + openFor.SrcRange, + ), + Subject: &end.SrcRange, + }) + } + + return errPlaceholderExpr(end.SrcRange), diags + } + + expr, exprDiags := p.parseExpr() + diags = append(diags, exprDiags...) + contentExprs = append(contentExprs, expr) + } + + if len(contentExprs) == 0 { + contentExprs = append(contentExprs, &LiteralValueExpr{ + Val: cty.StringVal(""), + SrcRange: hcl.Range{ + Filename: openFor.SrcRange.Filename, + Start: openFor.SrcRange.End, + End: openFor.SrcRange.End, + }, + }) + } + + contentExpr := &TemplateExpr{ + Parts: contentExprs, + SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()), + } + + forExpr := &ForExpr{ + KeyVar: openFor.KeyVar, + ValVar: openFor.ValVar, + + CollExpr: openFor.CollExpr, + ValExpr: contentExpr, + + SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange), + OpenRange: openFor.SrcRange, + CloseRange: endforRange, + } + + return &TemplateJoinExpr{ + Tuple: forExpr, + }, diags +} + +func (p *templateParser) Peek() templateToken { + return p.Tokens[p.pos] +} + +func (p *templateParser) Read() templateToken { + ret := p.Peek() + if _, end := ret.(*templateEndToken); !end { + p.pos++ + } + return ret +} + +// parseTemplateParts produces a flat sequence of "template tokens", which are +// either literal values (with any "trimming" already applied), interpolation +// sequences, or control flow markers. +// +// A further pass is required on the result to turn it into an AST. +func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) { + var parts []templateToken + var diags hcl.Diagnostics + + startRange := p.NextRange() + ltrimNext := false + nextCanTrimPrev := false + var endRange hcl.Range + +Token: + for { + next := p.Read() + if next.Type == end { + // all done! + endRange = next.Range + break + } + + ltrim := ltrimNext + ltrimNext = false + canTrimPrev := nextCanTrimPrev + nextCanTrimPrev = false + + switch next.Type { + case TokenStringLit, TokenQuotedLit: + str, strDiags := p.decodeStringLit(next) + diags = append(diags, strDiags...) + + if ltrim { + str = strings.TrimLeftFunc(str, unicode.IsSpace) + } + + parts = append(parts, &templateLiteralToken{ + Val: str, + SrcRange: next.Range, + }) + nextCanTrimPrev = true + + case TokenTemplateInterp: + // if the opener is ${~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + + p.PushIncludeNewlines(false) + expr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after interpolation expression", + Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + parts = append(parts, &templateInterpToken{ + Expr: expr, + SrcRange: hcl.RangeBetween(next.Range, close.Range), + }) + + case TokenTemplateControl: + // if the opener is %{~ then we want to eat any trailing whitespace + // in the preceding literal token, assuming it is indeed a literal + // token. + if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { + prevExpr := parts[len(parts)-1] + if lexpr, ok := prevExpr.(*templateLiteralToken); ok { + lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace) + } + } + p.PushIncludeNewlines(false) + + kw := p.Peek() + if kw.Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template directive", + Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.", + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat keyword token + + switch { + + case ifKeyword.TokenMatches(kw): + condExpr, exprDiags := p.ParseExpression() + diags = append(diags, exprDiags...) + parts = append(parts, &templateIfToken{ + CondExpr: condExpr, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case elseKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateElse, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endifKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndIf, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case forKeyword.TokenMatches(kw): + var keyName, valName string + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires variable name after 'for'.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + + if p.Peek().Type == TokenComma { + // What we just read was actually the key, then. + keyName = valName + p.Read() // eat comma + + if p.Peek().Type != TokenIdent { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires value variable name after comma.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + + valName = string(p.Read().Bytes) + } + + if !inKeyword.TokenMatches(p.Peek()) { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid 'for' directive", + Detail: "For directive requires 'in' keyword after names.", + Subject: p.Peek().Range.Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + } + p.Read() // eat 'in' keyword + + collExpr, collDiags := p.ParseExpression() + diags = append(diags, collDiags...) + parts = append(parts, &templateForToken{ + KeyVar: keyName, + ValVar: valName, + CollExpr: collExpr, + + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + case endforKeyword.TokenMatches(kw): + parts = append(parts, &templateEndCtrlToken{ + Type: templateEndFor, + SrcRange: hcl.RangeBetween(next.Range, p.NextRange()), + }) + + default: + if !p.recovery { + suggestions := []string{"if", "for", "else", "endif", "endfor"} + given := string(kw.Bytes) + suggestion := nameSuggestion(given, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid template control keyword", + Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion), + Subject: &kw.Range, + Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + p.PopIncludeNewlines() + continue Token + + } + + close := p.Peek() + if close.Type != TokenTemplateSeqEnd { + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes), + Detail: "Expected a closing brace to end the sequence, but found extra characters.", + Subject: &close.Range, + Context: hcl.RangeBetween(startRange, close.Range).Ptr(), + }) + } + p.recover(TokenTemplateSeqEnd) + } else { + p.Read() // eat closing brace + + // If the closer is ~} then we want to eat any leading + // whitespace on the next token, if it turns out to be a + // literal token. + if len(close.Bytes) == 2 && close.Bytes[0] == '~' { + ltrimNext = true + } + } + p.PopIncludeNewlines() + + default: + if !p.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unterminated template string", + Detail: "No closing marker was found for the string.", + Subject: &next.Range, + Context: hcl.RangeBetween(startRange, next.Range).Ptr(), + }) + } + final := p.recover(end) + endRange = final.Range + break Token + } + } + + if len(parts) == 0 { + // If a sequence has no content, we'll treat it as if it had an + // empty string in it because that's what the user probably means + // if they write "" in configuration. + parts = append(parts, &templateLiteralToken{ + Val: "", + SrcRange: hcl.Range{ + // Range is the zero-character span immediately after the + // opening quote. + Filename: startRange.Filename, + Start: startRange.End, + End: startRange.End, + }, + }) + } + + // Always end with an end token, so the parser can produce diagnostics + // about unclosed items with proper position information. + parts = append(parts, &templateEndToken{ + SrcRange: endRange, + }) + + ret := &templateParts{ + Tokens: parts, + SrcRange: hcl.RangeBetween(startRange, endRange), + } + + return ret, diags +} + +// flushHeredocTemplateParts modifies in-place the line-leading literal strings +// to apply the flush heredoc processing rule: find the line with the smallest +// number of whitespace characters as prefix and then trim that number of +// characters from all of the lines. +// +// This rule is applied to static tokens rather than to the rendered result, +// so interpolating a string with leading whitespace cannot affect the chosen +// prefix length. +func flushHeredocTemplateParts(parts *templateParts) { + if len(parts.Tokens) == 0 { + // Nothing to do + return + } + + const maxInt = int((^uint(0)) >> 1) + + minSpaces := maxInt + newline := true + var adjust []*templateLiteralToken + for _, ttok := range parts.Tokens { + if newline { + newline = false + var spaces int + if lit, ok := ttok.(*templateLiteralToken); ok { + orig := lit.Val + trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace) + // If a token is entirely spaces and ends with a newline + // then it's a "blank line" and thus not considered for + // space-prefix-counting purposes. + if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") { + spaces = maxInt + } else { + spaceBytes := len(lit.Val) - len(trimmed) + spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters) + adjust = append(adjust, lit) + } + } else if _, ok := ttok.(*templateEndToken); ok { + break // don't process the end token since it never has spaces before it + } + if spaces < minSpaces { + minSpaces = spaces + } + } + if lit, ok := ttok.(*templateLiteralToken); ok { + if strings.HasSuffix(lit.Val, "\n") { + newline = true // The following token, if any, begins a new line + } + } + } + + for _, lit := range adjust { + // Since we want to count space _characters_ rather than space _bytes_, + // we can't just do a straightforward slice operation here and instead + // need to hunt for the split point with a scanner. + valBytes := []byte(lit.Val) + spaceByteCount := 0 + for i := 0; i < minSpaces; i++ { + adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true) + spaceByteCount += adv + valBytes = valBytes[adv:] + } + lit.Val = lit.Val[spaceByteCount:] + lit.SrcRange.Start.Column += minSpaces + lit.SrcRange.Start.Byte += spaceByteCount + } +} + +type templateParts struct { + Tokens []templateToken + SrcRange hcl.Range +} + +// templateToken is a higher-level token that represents a single atom within +// the template language. Our template parsing first raises the raw token +// stream to a sequence of templateToken, and then transforms the result into +// an expression tree. +type templateToken interface { + templateToken() templateToken +} + +type templateLiteralToken struct { + Val string + SrcRange hcl.Range + isTemplateToken +} + +type templateInterpToken struct { + Expr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateIfToken struct { + CondExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateForToken struct { + KeyVar string // empty if ignoring key + ValVar string + CollExpr Expression + SrcRange hcl.Range + isTemplateToken +} + +type templateEndCtrlType int + +const ( + templateEndIf templateEndCtrlType = iota + templateElse + templateEndFor +) + +type templateEndCtrlToken struct { + Type templateEndCtrlType + SrcRange hcl.Range + isTemplateToken +} + +func (t *templateEndCtrlToken) Name() string { + switch t.Type { + case templateEndIf: + return "endif" + case templateElse: + return "else" + case templateEndFor: + return "endfor" + default: + // should never happen + panic("invalid templateEndCtrlType") + } +} + +type templateEndToken struct { + SrcRange hcl.Range + isTemplateToken +} + +type isTemplateToken [0]int + +func (t isTemplateToken) templateToken() templateToken { + return t +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go new file mode 100644 index 00000000..2ff3ed6c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_traversal.go @@ -0,0 +1,159 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// ParseTraversalAbs parses an absolute traversal that is assumed to consume +// all of the remaining tokens in the peeker. The usual parser recovery +// behavior is not supported here because traversals are not expected to +// be parsed as part of a larger program. +func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) { + var ret hcl.Traversal + var diags hcl.Diagnostics + + // Absolute traversal must always begin with a variable name + varTok := p.Read() + if varTok.Type != TokenIdent { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable name required", + Detail: "Must begin with a variable name.", + Subject: &varTok.Range, + }) + return ret, diags + } + + varName := string(varTok.Bytes) + ret = append(ret, hcl.TraverseRoot{ + Name: varName, + SrcRange: varTok.Range, + }) + + for { + next := p.Peek() + + if next.Type == TokenEOF { + return ret, diags + } + + switch next.Type { + case TokenDot: + // Attribute access + dot := p.Read() // eat dot + nameTok := p.Read() + if nameTok.Type != TokenIdent { + if nameTok.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions (.*) may not be used here.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Dot must be followed by attribute name.", + Subject: &nameTok.Range, + Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(), + }) + } + return ret, diags + } + + attrName := string(nameTok.Bytes) + ret = append(ret, hcl.TraverseAttr{ + Name: attrName, + SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range), + }) + case TokenOBrack: + // Index + open := p.Read() // eat open bracket + next := p.Peek() + + switch next.Type { + case TokenNumberLit: + tok := p.Read() // eat number + numVal, numDiags := p.numberLitValue(tok) + diags = append(diags, numDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: numVal, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + case TokenOQuote: + str, _, strDiags := p.parseQuotedStringLiteral() + diags = append(diags, strDiags...) + + close := p.Read() + if close.Type != TokenCBrack { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed index brackets", + Detail: "Index key must be followed by a closing bracket.", + Subject: &close.Range, + Context: hcl.RangeBetween(open.Range, close.Range).Ptr(), + }) + } + + ret = append(ret, hcl.TraverseIndex{ + Key: cty.StringVal(str), + SrcRange: hcl.RangeBetween(open.Range, close.Range), + }) + + if diags.HasErrors() { + return ret, diags + } + + default: + if next.Type == TokenStar { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Attribute name required", + Detail: "Splat expressions ([*]) may not be used here.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Index value required", + Detail: "Index brackets must contain either a literal number or a literal string.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + } + return ret, diags + } + + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "Expected an attribute access or an index operator.", + Subject: &next.Range, + Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(), + }) + return ret, diags + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go new file mode 100644 index 00000000..5a4b50e2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/peeker.go @@ -0,0 +1,212 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// This is set to true at init() time in tests, to enable more useful output +// if a stack discipline error is detected. It should not be enabled in +// normal mode since there is a performance penalty from accessing the +// runtime stack to produce the traces, but could be temporarily set to +// true for debugging if desired. +var tracePeekerNewlinesStack = false + +type peeker struct { + Tokens Tokens + NextIndex int + + IncludeComments bool + IncludeNewlinesStack []bool + + // used only when tracePeekerNewlinesStack is set + newlineStackChanges []peekerNewlineStackChange +} + +// for use in debugging the stack usage only +type peekerNewlineStackChange struct { + Pushing bool // if false, then popping + Frame runtime.Frame + Include bool +} + +func newPeeker(tokens Tokens, includeComments bool) *peeker { + return &peeker{ + Tokens: tokens, + IncludeComments: includeComments, + + IncludeNewlinesStack: []bool{true}, + } +} + +func (p *peeker) Peek() Token { + ret, _ := p.nextToken() + return ret +} + +func (p *peeker) Read() Token { + ret, nextIdx := p.nextToken() + p.NextIndex = nextIdx + return ret +} + +func (p *peeker) NextRange() hcl.Range { + return p.Peek().Range +} + +func (p *peeker) PrevRange() hcl.Range { + if p.NextIndex == 0 { + return p.NextRange() + } + + return p.Tokens[p.NextIndex-1].Range +} + +func (p *peeker) nextToken() (Token, int) { + for i := p.NextIndex; i < len(p.Tokens); i++ { + tok := p.Tokens[i] + switch tok.Type { + case TokenComment: + if !p.IncludeComments { + // Single-line comment tokens, starting with # or //, absorb + // the trailing newline that terminates them as part of their + // bytes. When we're filtering out comments, we must as a + // special case transform these to newline tokens in order + // to properly parse newline-terminated block items. + + if p.includingNewlines() { + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + fakeNewline := Token{ + Type: TokenNewline, + Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)], + + // We use the whole token range as the newline + // range, even though that's a little... weird, + // because otherwise we'd need to go count + // characters again in order to figure out the + // column of the newline, and that complexity + // isn't justified when ranges of newlines are + // so rarely printed anyway. + Range: tok.Range, + } + return fakeNewline, i + 1 + } + } + + continue + } + case TokenNewline: + if !p.includingNewlines() { + continue + } + } + + return tok, i + 1 + } + + // if we fall out here then we'll return the EOF token, and leave + // our index pointed off the end of the array so we'll keep + // returning EOF in future too. + return p.Tokens[len(p.Tokens)-1], len(p.Tokens) +} + +func (p *peeker) includingNewlines() bool { + return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1] +} + +func (p *peeker) PushIncludeNewlines(include bool) { + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + true, frame, include, + }) + } + + p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include) +} + +func (p *peeker) PopIncludeNewlines() bool { + stack := p.IncludeNewlinesStack + remain, ret := stack[:len(stack)-1], stack[len(stack)-1] + p.IncludeNewlinesStack = remain + + if tracePeekerNewlinesStack { + // Record who called us so that we can more easily track down any + // mismanagement of the stack in the parser. + callers := []uintptr{0} + runtime.Callers(2, callers) + frames := runtime.CallersFrames(callers) + frame, _ := frames.Next() + p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{ + false, frame, ret, + }) + } + + return ret +} + +// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing +// panicking if it is not. This can be used to catch stack mismanagement that +// might otherwise just cause confusing downstream errors. +// +// This function is a no-op if the stack is empty when called. +// +// If newlines stack tracing is enabled by setting the global variable +// tracePeekerNewlinesStack at init time, a full log of all of the push/pop +// calls will be produced to help identify which caller in the parser is +// misbehaving. +func (p *peeker) AssertEmptyIncludeNewlinesStack() { + if len(p.IncludeNewlinesStack) != 1 { + // Should never happen; indicates mismanagement of the stack inside + // the parser. + if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above + panic(fmt.Errorf( + "non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s", + len(p.IncludeNewlinesStack)-1, + formatPeekerNewlineStackChanges(p.newlineStackChanges), + )) + } else { + panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack)) + } + } +} + +func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string { + indent := 0 + var buf bytes.Buffer + for _, change := range changes { + funcName := change.Frame.Function + if idx := strings.LastIndexByte(funcName, '.'); idx != -1 { + funcName = funcName[idx+1:] + } + filename := change.Frame.File + if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 { + filename = filename[idx+1:] + } + + switch change.Pushing { + + case true: + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + indent++ + + case false: + indent-- + buf.WriteString(strings.Repeat(" ", indent)) + fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line) + + } + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go new file mode 100644 index 00000000..cf0ee297 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/public.go @@ -0,0 +1,171 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ParseConfig parses the given buffer as a whole HCL config file, returning +// a *hcl.File representing its contents. If HasErrors called on the returned +// diagnostics returns true, the returned body is likely to be incomplete +// and should therefore be used with care. +// +// The body in the returned file has dynamic type *hclsyntax.Body, so callers +// may freely type-assert this to get access to the full hclsyntax API in +// situations where detailed access is required. However, most common use-cases +// should be served using the hcl.Body interface to ensure compatibility with +// other configurationg syntaxes, such as JSON. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) { + tokens, diags := LexConfig(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + body, parseDiags := parser.ParseBody(TokenEOF) + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return &hcl.File{ + Body: body, + Bytes: src, + + Nav: navigation{ + root: body, + }, + }, diags +} + +// ParseExpression parses the given buffer as a standalone HCL expression, +// returning it as an instance of Expression. +func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare expressions are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseExpression() + diags = append(diags, parseDiags...) + + next := parser.Peek() + if next.Type != TokenEOF && !parser.recovery { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extra characters after expression", + Detail: "An expression was successfully parsed, but extra characters were found after it.", + Subject: &next.Range, + }) + } + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTemplate parses the given buffer as a standalone HCL template, +// returning it as an instance of Expression. +func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) { + tokens, diags := LexTemplate(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + expr, parseDiags := parser.ParseTemplate() + diags = append(diags, parseDiags...) + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// ParseTraversalAbs parses the given buffer as a standalone absolute traversal. +// +// Parsing as a traversal is more limited than parsing as an expession since +// it allows only attribute and indexing operations on variables. Traverals +// are useful as a syntax for referring to objects without necessarily +// evaluating them. +func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) { + tokens, diags := LexExpression(src, filename, start) + peeker := newPeeker(tokens, false) + parser := &parser{peeker: peeker} + + // Bare traverals are always parsed in "ignore newlines" mode, as if + // they were wrapped in parentheses. + parser.PushIncludeNewlines(false) + + expr, parseDiags := parser.ParseTraversalAbs() + diags = append(diags, parseDiags...) + + parser.PopIncludeNewlines() + + // Panic if the parser uses incorrect stack discipline with the peeker's + // newlines stack, since otherwise it will produce confusing downstream + // errors. + peeker.AssertEmptyIncludeNewlinesStack() + + return expr, diags +} + +// LexConfig performs lexical analysis on the given buffer, treating it as a +// whole HCL config file, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexExpression performs lexical analysis on the given buffer, treating it as +// a standalone HCL expression, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + // This is actually just the same thing as LexConfig, since configs + // and expressions lex in the same way. + tokens := scanTokens(src, filename, start, scanNormal) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// LexTemplate performs lexical analysis on the given buffer, treating it as a +// standalone HCL template, and returns the resulting tokens. +// +// Only minimal validation is done during lexical analysis, so the returned +// diagnostics may include errors about lexical issues such as bad character +// encodings or unrecognized characters, but full parsing is required to +// detect _all_ syntax errors. +func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) { + tokens := scanTokens(src, filename, start, scanTemplate) + diags := checkInvalidTokens(tokens) + return tokens, diags +} + +// ValidIdentifier tests if the given string could be a valid identifier in +// a native syntax expression. +// +// This is useful when accepting names from the user that will be used as +// variable or attribute names in the scope, to ensure that any name chosen +// will be traversable using the variable or attribute traversal syntax. +func ValidIdentifier(s string) bool { + // This is a kinda-expensive way to do something pretty simple, but it + // is easiest to do with our existing scanner-related infrastructure here + // and nobody should be validating identifiers in a tight loop. + tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly) + return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go new file mode 100644 index 00000000..2895ade7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go @@ -0,0 +1,301 @@ +//line scan_string_lit.rl:1 + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. + +//line scan_string_lit.go:9 +var _hclstrtok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 2, 1, 0, +} + +var _hclstrtok_key_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 10, 14, 18, + 22, 27, 31, 36, 41, 46, 51, 57, + 62, 74, 85, 96, 107, 118, 129, 140, + 151, +} + +var _hclstrtok_trans_keys []byte = []byte{ + 128, 191, 128, 191, 128, 191, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 10, 13, 36, 37, 10, 13, + 36, 37, 123, 10, 13, 36, 37, 10, + 13, 36, 37, 92, 10, 13, 36, 37, + 92, 10, 13, 36, 37, 92, 10, 13, + 36, 37, 92, 10, 13, 36, 37, 92, + 123, 10, 13, 36, 37, 92, 85, 117, + 128, 191, 192, 223, 224, 239, 240, 247, + 248, 255, 10, 13, 36, 37, 92, 48, + 57, 65, 70, 97, 102, 10, 13, 36, + 37, 92, 48, 57, 65, 70, 97, 102, + 10, 13, 36, 37, 92, 48, 57, 65, + 70, 97, 102, 10, 13, 36, 37, 92, + 48, 57, 65, 70, 97, 102, 10, 13, + 36, 37, 92, 48, 57, 65, 70, 97, + 102, 10, 13, 36, 37, 92, 48, 57, + 65, 70, 97, 102, 10, 13, 36, 37, + 92, 48, 57, 65, 70, 97, 102, 10, + 13, 36, 37, 92, 48, 57, 65, 70, + 97, 102, +} + +var _hclstrtok_single_lengths []byte = []byte{ + 0, 0, 0, 0, 4, 4, 4, 4, + 5, 4, 5, 5, 5, 5, 6, 5, + 2, 5, 5, 5, 5, 5, 5, 5, + 5, +} + +var _hclstrtok_range_lengths []byte = []byte{ + 0, 1, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 5, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +var _hclstrtok_index_offsets []byte = []byte{ + 0, 0, 2, 4, 6, 11, 16, 21, + 26, 32, 37, 43, 49, 55, 61, 68, + 74, 82, 91, 100, 109, 118, 127, 136, + 145, +} + +var _hclstrtok_indicies []byte = []byte{ + 0, 1, 2, 1, 3, 1, 5, 6, + 7, 8, 4, 10, 11, 12, 13, 9, + 14, 11, 12, 13, 9, 10, 11, 15, + 13, 9, 10, 11, 12, 13, 14, 9, + 10, 11, 12, 15, 9, 17, 18, 19, + 20, 21, 16, 23, 24, 25, 26, 27, + 22, 0, 24, 25, 26, 27, 22, 23, + 24, 28, 26, 27, 22, 23, 24, 25, + 26, 27, 0, 22, 23, 24, 25, 28, + 27, 22, 29, 30, 22, 2, 3, 31, + 22, 0, 23, 24, 25, 26, 27, 32, + 32, 32, 22, 23, 24, 25, 26, 27, + 33, 33, 33, 22, 23, 24, 25, 26, + 27, 34, 34, 34, 22, 23, 24, 25, + 26, 27, 30, 30, 30, 22, 23, 24, + 25, 26, 27, 35, 35, 35, 22, 23, + 24, 25, 26, 27, 36, 36, 36, 22, + 23, 24, 25, 26, 27, 37, 37, 37, + 22, 23, 24, 25, 26, 27, 0, 0, + 0, 22, +} + +var _hclstrtok_trans_targs []byte = []byte{ + 11, 0, 1, 2, 4, 5, 6, 7, + 9, 4, 5, 6, 7, 9, 5, 8, + 10, 11, 12, 13, 15, 16, 10, 11, + 12, 13, 15, 16, 14, 17, 21, 3, + 18, 19, 20, 22, 23, 24, +} + +var _hclstrtok_trans_actions []byte = []byte{ + 0, 0, 0, 0, 0, 1, 1, 1, + 1, 3, 5, 5, 5, 5, 0, 0, + 0, 1, 1, 1, 1, 1, 3, 5, + 5, 5, 5, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hclstrtok_eof_actions []byte = []byte{ + 0, 0, 0, 0, 0, 3, 3, 3, + 3, 3, 0, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, + 3, +} + +const hclstrtok_start int = 4 +const hclstrtok_first_final int = 4 +const hclstrtok_error int = 0 + +const hclstrtok_en_quoted int = 10 +const hclstrtok_en_unquoted int = 4 + +//line scan_string_lit.rl:10 + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + +//line scan_string_lit.rl:61 + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + +//line scan_string_lit.go:154 + { + } + +//line scan_string_lit.go:158 + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _keys = int(_hclstrtok_key_offsets[cs]) + _trans = int(_hclstrtok_index_offsets[cs]) + + _klen = int(_hclstrtok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hclstrtok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hclstrtok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hclstrtok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hclstrtok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hclstrtok_indicies[_trans]) + cs = int(_hclstrtok_trans_targs[_trans]) + + if _hclstrtok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hclstrtok_trans_actions[_trans]) + _nacts = uint(_hclstrtok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hclstrtok_actions[_acts-1] { + case 0: +//line scan_string_lit.rl:40 + + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p + + case 1: +//line scan_string_lit.rl:48 + + te = p + ret = append(ret, data[ts:te]) + +//line scan_string_lit.go:253 + } + } + + _again: + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + __acts := _hclstrtok_eof_actions[cs] + __nacts := uint(_hclstrtok_actions[__acts]) + __acts++ + for ; __nacts > 0; __nacts-- { + __acts++ + switch _hclstrtok_actions[__acts-1] { + case 1: +//line scan_string_lit.rl:48 + + te = p + ret = append(ret, data[ts:te]) + +//line scan_string_lit.go:278 + } + } + } + + _out: + { + } + } + +//line scan_string_lit.rl:89 + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl new file mode 100644 index 00000000..f8ac1175 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.rl @@ -0,0 +1,105 @@ + +package hclsyntax + +// This file is generated from scan_string_lit.rl. DO NOT EDIT. +%%{ + # (except you are actually in scan_string_lit.rl here, so edit away!) + + machine hclstrtok; + write data; +}%% + +func scanStringLit(data []byte, quoted bool) [][]byte { + var ret [][]byte + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BadUTF8 = any - AnyUTF8; + + Hex = ('0'..'9' | 'a'..'f' | 'A'..'F'); + + # Our goal with this patterns is to capture user intent as best as + # possible, even if the input is invalid. The caller will then verify + # whether each token is valid and generate suitable error messages + # if not. + UnicodeEscapeShort = "\\u" . Hex{0,4}; + UnicodeEscapeLong = "\\U" . Hex{0,8}; + UnicodeEscape = (UnicodeEscapeShort | UnicodeEscapeLong); + SimpleEscape = "\\" . (AnyUTF8 - ('U'|'u'))?; + TemplateEscape = ("$" . ("$" . ("{"?))?) | ("%" . ("%" . ("{"?))?); + Newline = ("\r\n" | "\r" | "\n"); + + action Begin { + // If te is behind p then we've skipped over some literal + // characters which we must now return. + if te < p { + ret = append(ret, data[te:p]) + } + ts = p; + } + action End { + te = p; + ret = append(ret, data[ts:te]); + } + + QuotedToken = (UnicodeEscape | SimpleEscape | TemplateEscape | Newline) >Begin %End; + UnquotedToken = (TemplateEscape | Newline) >Begin %End; + QuotedLiteral = (any - ("\\" | "$" | "%" | "\r" | "\n")); + UnquotedLiteral = (any - ("$" | "%" | "\r" | "\n")); + + quoted := (QuotedToken | QuotedLiteral)**; + unquoted := (UnquotedToken | UnquotedLiteral)**; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + eof := pe + + var cs int // current state + switch { + case quoted: + cs = hclstrtok_en_quoted + default: + cs = hclstrtok_en_unquoted + } + + // Make Go compiler happy + _ = ts + _ = eof + + /*token := func () { + ret = append(ret, data[ts:te]) + }*/ + + %%{ + write init nocs; + write exec; + }%% + + if te < p { + // Collect any leftover literal characters at the end of the input + ret = append(ret, data[te:p]) + } + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which should + // be impossible (the scanner matches all bytes _somehow_) but we'll + // tolerate it and let the caller deal with it. + if cs < hclstrtok_first_final { + ret = append(ret, data[p:len(data)]) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go new file mode 100644 index 00000000..581e35e0 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go @@ -0,0 +1,5265 @@ +//line scan_tokens.rl:1 + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. + +//line scan_tokens.go:15 +var _hcltok_actions []byte = []byte{ + 0, 1, 0, 1, 1, 1, 3, 1, 4, + 1, 7, 1, 8, 1, 9, 1, 10, + 1, 11, 1, 12, 1, 13, 1, 14, + 1, 15, 1, 16, 1, 17, 1, 18, + 1, 19, 1, 20, 1, 23, 1, 24, + 1, 25, 1, 26, 1, 27, 1, 28, + 1, 29, 1, 30, 1, 31, 1, 32, + 1, 35, 1, 36, 1, 37, 1, 38, + 1, 39, 1, 40, 1, 41, 1, 42, + 1, 43, 1, 44, 1, 47, 1, 48, + 1, 49, 1, 50, 1, 51, 1, 52, + 1, 53, 1, 56, 1, 57, 1, 58, + 1, 59, 1, 60, 1, 61, 1, 62, + 1, 63, 1, 64, 1, 65, 1, 66, + 1, 67, 1, 68, 1, 69, 1, 70, + 1, 71, 1, 72, 1, 73, 1, 74, + 1, 75, 1, 76, 1, 77, 1, 78, + 1, 79, 1, 80, 1, 81, 1, 82, + 1, 83, 1, 84, 1, 85, 2, 0, + 14, 2, 0, 25, 2, 0, 29, 2, + 0, 37, 2, 0, 41, 2, 1, 2, + 2, 4, 5, 2, 4, 6, 2, 4, + 21, 2, 4, 22, 2, 4, 33, 2, + 4, 34, 2, 4, 45, 2, 4, 46, + 2, 4, 54, 2, 4, 55, +} + +var _hcltok_key_offsets []int16 = []int16{ + 0, 0, 1, 2, 4, 9, 13, 15, + 57, 98, 144, 145, 149, 155, 155, 157, + 159, 168, 174, 181, 182, 185, 186, 190, + 195, 204, 208, 212, 220, 222, 224, 226, + 229, 261, 263, 265, 269, 273, 276, 287, + 300, 319, 332, 348, 360, 376, 391, 412, + 422, 434, 445, 459, 474, 484, 496, 505, + 517, 519, 523, 544, 553, 563, 569, 575, + 576, 625, 627, 631, 633, 639, 646, 654, + 661, 664, 670, 674, 678, 680, 684, 688, + 692, 698, 706, 714, 720, 722, 726, 728, + 734, 738, 742, 746, 750, 755, 762, 768, + 770, 772, 776, 778, 784, 788, 792, 802, + 807, 821, 836, 838, 846, 848, 853, 867, + 872, 874, 878, 879, 883, 889, 895, 905, + 915, 926, 934, 937, 940, 944, 948, 950, + 953, 953, 956, 958, 988, 990, 992, 996, + 1001, 1005, 1010, 1012, 1014, 1016, 1025, 1029, + 1033, 1039, 1041, 1049, 1057, 1069, 1072, 1078, + 1082, 1084, 1088, 1108, 1110, 1112, 1123, 1129, + 1131, 1133, 1135, 1139, 1145, 1151, 1153, 1158, + 1162, 1164, 1172, 1190, 1230, 1240, 1244, 1246, + 1248, 1249, 1253, 1257, 1261, 1265, 1269, 1274, + 1278, 1282, 1286, 1288, 1290, 1294, 1304, 1308, + 1310, 1314, 1318, 1322, 1335, 1337, 1339, 1343, + 1345, 1349, 1351, 1353, 1383, 1387, 1391, 1395, + 1398, 1405, 1410, 1421, 1425, 1441, 1455, 1459, + 1464, 1468, 1472, 1478, 1480, 1486, 1488, 1492, + 1494, 1500, 1505, 1510, 1520, 1522, 1524, 1528, + 1532, 1534, 1547, 1549, 1553, 1557, 1565, 1567, + 1571, 1573, 1574, 1577, 1582, 1584, 1586, 1590, + 1592, 1596, 1602, 1622, 1628, 1634, 1636, 1637, + 1647, 1648, 1656, 1663, 1665, 1668, 1670, 1672, + 1674, 1679, 1683, 1687, 1692, 1702, 1712, 1716, + 1720, 1734, 1760, 1770, 1772, 1774, 1777, 1779, + 1782, 1784, 1788, 1790, 1791, 1795, 1797, 1800, + 1807, 1815, 1817, 1819, 1823, 1825, 1831, 1842, + 1845, 1847, 1851, 1856, 1886, 1891, 1893, 1896, + 1901, 1915, 1922, 1936, 1941, 1954, 1958, 1971, + 1976, 1994, 1995, 2004, 2008, 2020, 2025, 2032, + 2039, 2046, 2048, 2052, 2074, 2079, 2080, 2084, + 2086, 2136, 2139, 2150, 2154, 2156, 2162, 2168, + 2170, 2175, 2177, 2181, 2183, 2184, 2186, 2188, + 2194, 2196, 2198, 2202, 2208, 2221, 2223, 2229, + 2233, 2241, 2252, 2260, 2263, 2293, 2299, 2302, + 2307, 2309, 2313, 2317, 2321, 2323, 2330, 2332, + 2341, 2348, 2356, 2358, 2378, 2390, 2394, 2396, + 2414, 2453, 2455, 2459, 2461, 2468, 2472, 2500, + 2502, 2504, 2506, 2508, 2511, 2513, 2517, 2521, + 2523, 2526, 2528, 2530, 2533, 2535, 2537, 2538, + 2540, 2542, 2546, 2550, 2553, 2566, 2568, 2574, + 2578, 2580, 2584, 2588, 2602, 2605, 2614, 2616, + 2620, 2626, 2626, 2628, 2630, 2639, 2645, 2652, + 2653, 2656, 2657, 2661, 2666, 2675, 2679, 2683, + 2691, 2693, 2695, 2697, 2700, 2732, 2734, 2736, + 2740, 2744, 2747, 2758, 2771, 2790, 2803, 2819, + 2831, 2847, 2862, 2883, 2893, 2905, 2916, 2930, + 2945, 2955, 2967, 2976, 2988, 2990, 2994, 3015, + 3024, 3034, 3040, 3046, 3047, 3096, 3098, 3102, + 3104, 3110, 3117, 3125, 3132, 3135, 3141, 3145, + 3149, 3151, 3155, 3159, 3163, 3169, 3177, 3185, + 3191, 3193, 3197, 3199, 3205, 3209, 3213, 3217, + 3221, 3226, 3233, 3239, 3241, 3243, 3247, 3249, + 3255, 3259, 3263, 3273, 3278, 3292, 3307, 3309, + 3317, 3319, 3324, 3338, 3343, 3345, 3349, 3350, + 3354, 3360, 3366, 3376, 3386, 3397, 3405, 3408, + 3411, 3415, 3419, 3421, 3424, 3424, 3427, 3429, + 3459, 3461, 3463, 3467, 3472, 3476, 3481, 3483, + 3485, 3487, 3496, 3500, 3504, 3510, 3512, 3520, + 3528, 3540, 3543, 3549, 3553, 3555, 3559, 3579, + 3581, 3583, 3594, 3600, 3602, 3604, 3606, 3610, + 3616, 3622, 3624, 3629, 3633, 3635, 3643, 3661, + 3701, 3711, 3715, 3717, 3719, 3720, 3724, 3728, + 3732, 3736, 3740, 3745, 3749, 3753, 3757, 3759, + 3761, 3765, 3775, 3779, 3781, 3785, 3789, 3793, + 3806, 3808, 3810, 3814, 3816, 3820, 3822, 3824, + 3854, 3858, 3862, 3866, 3869, 3876, 3881, 3892, + 3896, 3912, 3926, 3930, 3935, 3939, 3943, 3949, + 3951, 3957, 3959, 3963, 3965, 3971, 3976, 3981, + 3991, 3993, 3995, 3999, 4003, 4005, 4018, 4020, + 4024, 4028, 4036, 4038, 4042, 4044, 4045, 4048, + 4053, 4055, 4057, 4061, 4063, 4067, 4073, 4093, + 4099, 4105, 4107, 4108, 4118, 4119, 4127, 4134, + 4136, 4139, 4141, 4143, 4145, 4150, 4154, 4158, + 4163, 4173, 4183, 4187, 4191, 4205, 4231, 4241, + 4243, 4245, 4248, 4250, 4253, 4255, 4259, 4261, + 4262, 4266, 4268, 4270, 4277, 4281, 4288, 4295, + 4304, 4320, 4332, 4350, 4361, 4373, 4381, 4399, + 4407, 4437, 4440, 4450, 4460, 4472, 4483, 4492, + 4505, 4517, 4521, 4527, 4554, 4563, 4566, 4571, + 4577, 4582, 4603, 4607, 4613, 4613, 4620, 4629, + 4637, 4640, 4644, 4650, 4656, 4659, 4663, 4670, + 4676, 4685, 4694, 4698, 4702, 4706, 4710, 4717, + 4721, 4725, 4735, 4741, 4745, 4751, 4755, 4758, + 4764, 4770, 4782, 4786, 4790, 4800, 4804, 4815, + 4817, 4819, 4823, 4835, 4840, 4864, 4868, 4874, + 4896, 4905, 4909, 4912, 4913, 4921, 4929, 4935, + 4945, 4952, 4970, 4973, 4976, 4984, 4990, 4994, + 4998, 5002, 5008, 5016, 5021, 5027, 5031, 5039, + 5046, 5050, 5057, 5063, 5071, 5079, 5085, 5091, + 5102, 5106, 5118, 5127, 5144, 5161, 5164, 5168, + 5170, 5176, 5178, 5182, 5197, 5201, 5205, 5209, + 5213, 5217, 5219, 5225, 5230, 5234, 5240, 5247, + 5250, 5268, 5270, 5315, 5321, 5327, 5331, 5335, + 5341, 5345, 5351, 5357, 5364, 5366, 5372, 5378, + 5382, 5386, 5394, 5407, 5413, 5420, 5428, 5434, + 5443, 5449, 5453, 5458, 5462, 5470, 5474, 5478, + 5508, 5514, 5520, 5526, 5532, 5539, 5545, 5552, + 5557, 5567, 5571, 5578, 5584, 5588, 5595, 5599, + 5605, 5608, 5612, 5616, 5620, 5624, 5629, 5634, + 5638, 5649, 5653, 5657, 5663, 5671, 5675, 5692, + 5696, 5702, 5712, 5718, 5724, 5727, 5732, 5741, + 5745, 5749, 5755, 5759, 5765, 5773, 5791, 5792, + 5802, 5803, 5812, 5820, 5822, 5825, 5827, 5829, + 5831, 5836, 5849, 5853, 5868, 5897, 5908, 5910, + 5914, 5918, 5923, 5927, 5929, 5936, 5940, 5948, + 5952, 5964, 5966, 5968, 5970, 5972, 5974, 5975, + 5977, 5979, 5981, 5983, 5985, 5986, 5988, 5990, + 5992, 5994, 5996, 6000, 6006, 6006, 6008, 6010, + 6019, 6025, 6032, 6033, 6036, 6037, 6041, 6046, + 6055, 6059, 6063, 6071, 6073, 6075, 6077, 6080, + 6112, 6114, 6116, 6120, 6124, 6127, 6138, 6151, + 6170, 6183, 6199, 6211, 6227, 6242, 6263, 6273, + 6285, 6296, 6310, 6325, 6335, 6347, 6356, 6368, + 6370, 6374, 6395, 6404, 6414, 6420, 6426, 6427, + 6476, 6478, 6482, 6484, 6490, 6497, 6505, 6512, + 6515, 6521, 6525, 6529, 6531, 6535, 6539, 6543, + 6549, 6557, 6565, 6571, 6573, 6577, 6579, 6585, + 6589, 6593, 6597, 6601, 6606, 6613, 6619, 6621, + 6623, 6627, 6629, 6635, 6639, 6643, 6653, 6658, + 6672, 6687, 6689, 6697, 6699, 6704, 6718, 6723, + 6725, 6729, 6730, 6734, 6740, 6746, 6756, 6766, + 6777, 6785, 6788, 6791, 6795, 6799, 6801, 6804, + 6804, 6807, 6809, 6839, 6841, 6843, 6847, 6852, + 6856, 6861, 6863, 6865, 6867, 6876, 6880, 6884, + 6890, 6892, 6900, 6908, 6920, 6923, 6929, 6933, + 6935, 6939, 6959, 6961, 6963, 6974, 6980, 6982, + 6984, 6986, 6990, 6996, 7002, 7004, 7009, 7013, + 7015, 7023, 7041, 7081, 7091, 7095, 7097, 7099, + 7100, 7104, 7108, 7112, 7116, 7120, 7125, 7129, + 7133, 7137, 7139, 7141, 7145, 7155, 7159, 7161, + 7165, 7169, 7173, 7186, 7188, 7190, 7194, 7196, + 7200, 7202, 7204, 7234, 7238, 7242, 7246, 7249, + 7256, 7261, 7272, 7276, 7292, 7306, 7310, 7315, + 7319, 7323, 7329, 7331, 7337, 7339, 7343, 7345, + 7351, 7356, 7361, 7371, 7373, 7375, 7379, 7383, + 7385, 7398, 7400, 7404, 7408, 7416, 7418, 7422, + 7424, 7425, 7428, 7433, 7435, 7437, 7441, 7443, + 7447, 7453, 7473, 7479, 7485, 7487, 7488, 7498, + 7499, 7507, 7514, 7516, 7519, 7521, 7523, 7525, + 7530, 7534, 7538, 7543, 7553, 7563, 7567, 7571, + 7585, 7611, 7621, 7623, 7625, 7628, 7630, 7633, + 7635, 7639, 7641, 7642, 7646, 7648, 7650, 7657, + 7661, 7668, 7675, 7684, 7700, 7712, 7730, 7741, + 7753, 7761, 7779, 7787, 7817, 7820, 7830, 7840, + 7852, 7863, 7872, 7885, 7897, 7901, 7907, 7934, + 7943, 7946, 7951, 7957, 7962, 7983, 7987, 7993, + 7993, 8000, 8009, 8017, 8020, 8024, 8030, 8036, + 8039, 8043, 8050, 8056, 8065, 8074, 8078, 8082, + 8086, 8090, 8097, 8101, 8105, 8115, 8121, 8125, + 8131, 8135, 8138, 8144, 8150, 8162, 8166, 8170, + 8180, 8184, 8195, 8197, 8199, 8203, 8215, 8220, + 8244, 8248, 8254, 8276, 8285, 8289, 8292, 8293, + 8301, 8309, 8315, 8325, 8332, 8350, 8353, 8356, + 8364, 8370, 8374, 8378, 8382, 8388, 8396, 8401, + 8407, 8411, 8419, 8426, 8430, 8437, 8443, 8451, + 8459, 8465, 8471, 8482, 8486, 8498, 8507, 8524, + 8541, 8544, 8548, 8550, 8556, 8558, 8562, 8577, + 8581, 8585, 8589, 8593, 8597, 8599, 8605, 8610, + 8614, 8620, 8627, 8630, 8648, 8650, 8695, 8701, + 8707, 8711, 8715, 8721, 8725, 8731, 8737, 8744, + 8746, 8752, 8758, 8762, 8766, 8774, 8787, 8793, + 8800, 8808, 8814, 8823, 8829, 8833, 8838, 8842, + 8850, 8854, 8858, 8888, 8894, 8900, 8906, 8912, + 8919, 8925, 8932, 8937, 8947, 8951, 8958, 8964, + 8968, 8975, 8979, 8985, 8988, 8992, 8996, 9000, + 9004, 9009, 9014, 9018, 9029, 9033, 9037, 9043, + 9051, 9055, 9072, 9076, 9082, 9092, 9098, 9104, + 9107, 9112, 9121, 9125, 9129, 9135, 9139, 9145, + 9153, 9171, 9172, 9182, 9183, 9192, 9200, 9202, + 9205, 9207, 9209, 9211, 9216, 9229, 9233, 9248, + 9277, 9288, 9290, 9294, 9298, 9303, 9307, 9309, + 9316, 9320, 9328, 9332, 9407, 9409, 9410, 9411, + 9412, 9413, 9414, 9416, 9421, 9423, 9425, 9426, + 9470, 9471, 9472, 9474, 9479, 9483, 9483, 9485, + 9487, 9498, 9508, 9516, 9517, 9519, 9520, 9524, + 9528, 9538, 9542, 9549, 9560, 9567, 9571, 9577, + 9588, 9620, 9669, 9684, 9699, 9704, 9706, 9711, + 9743, 9751, 9753, 9775, 9797, 9799, 9815, 9831, + 9833, 9835, 9835, 9836, 9837, 9838, 9840, 9841, + 9853, 9855, 9857, 9859, 9873, 9887, 9889, 9892, + 9895, 9897, 9898, 9899, 9901, 9903, 9905, 9919, + 9933, 9935, 9938, 9941, 9943, 9944, 9945, 9947, + 9949, 9951, 10000, 10044, 10046, 10051, 10055, 10055, + 10057, 10059, 10070, 10080, 10088, 10089, 10091, 10092, + 10096, 10100, 10110, 10114, 10121, 10132, 10139, 10143, + 10149, 10160, 10192, 10241, 10256, 10271, 10276, 10278, + 10283, 10315, 10323, 10325, 10347, 10369, +} + +var _hcltok_trans_keys []byte = []byte{ + 46, 42, 42, 47, 46, 69, 101, 48, + 57, 43, 45, 48, 57, 48, 57, 45, + 95, 194, 195, 198, 199, 203, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 239, 240, 65, + 90, 97, 122, 196, 202, 208, 218, 229, + 236, 95, 194, 195, 198, 199, 203, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 239, 240, + 65, 90, 97, 122, 196, 202, 208, 218, + 229, 236, 10, 13, 45, 95, 194, 195, + 198, 199, 203, 204, 205, 206, 207, 210, + 212, 213, 214, 215, 216, 217, 219, 220, + 221, 222, 223, 224, 225, 226, 227, 228, + 233, 234, 237, 239, 240, 243, 48, 57, + 65, 90, 97, 122, 196, 218, 229, 236, + 10, 170, 181, 183, 186, 128, 150, 152, + 182, 184, 255, 192, 255, 128, 255, 173, + 130, 133, 146, 159, 165, 171, 175, 255, + 181, 190, 184, 185, 192, 255, 140, 134, + 138, 142, 161, 163, 255, 182, 130, 136, + 137, 176, 151, 152, 154, 160, 190, 136, + 144, 192, 255, 135, 129, 130, 132, 133, + 144, 170, 176, 178, 144, 154, 160, 191, + 128, 169, 174, 255, 148, 169, 157, 158, + 189, 190, 192, 255, 144, 255, 139, 140, + 178, 255, 186, 128, 181, 160, 161, 162, + 163, 164, 165, 166, 167, 168, 169, 170, + 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, + 187, 188, 189, 190, 191, 128, 173, 128, + 155, 160, 180, 182, 189, 148, 161, 163, + 255, 176, 164, 165, 132, 169, 177, 141, + 142, 145, 146, 179, 181, 186, 187, 158, + 133, 134, 137, 138, 143, 150, 152, 155, + 164, 165, 178, 255, 188, 129, 131, 133, + 138, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 182, 184, 185, 190, 255, 157, + 131, 134, 137, 138, 142, 144, 146, 152, + 159, 165, 182, 255, 129, 131, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 255, 134, 138, 142, 143, + 145, 159, 164, 165, 176, 184, 186, 255, + 129, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 177, 128, 132, 135, 136, 139, 141, 150, + 151, 156, 157, 159, 163, 166, 175, 156, + 130, 131, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 190, 191, 144, 151, 128, 130, + 134, 136, 138, 141, 166, 175, 128, 131, + 133, 140, 142, 144, 146, 168, 170, 185, + 189, 255, 133, 137, 151, 142, 148, 155, + 159, 164, 165, 176, 255, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 179, 181, + 185, 188, 191, 158, 128, 132, 134, 136, + 138, 141, 149, 150, 160, 163, 166, 175, + 177, 178, 129, 131, 133, 140, 142, 144, + 146, 186, 189, 255, 133, 137, 143, 147, + 152, 158, 164, 165, 176, 185, 192, 255, + 189, 130, 131, 133, 150, 154, 177, 179, + 187, 138, 150, 128, 134, 143, 148, 152, + 159, 166, 175, 178, 179, 129, 186, 128, + 142, 144, 153, 132, 138, 141, 165, 167, + 129, 130, 135, 136, 148, 151, 153, 159, + 161, 163, 170, 171, 173, 185, 187, 189, + 134, 128, 132, 136, 141, 144, 153, 156, + 159, 128, 181, 183, 185, 152, 153, 160, + 169, 190, 191, 128, 135, 137, 172, 177, + 191, 128, 132, 134, 151, 153, 188, 134, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 173, 175, + 176, 177, 178, 179, 181, 182, 183, 188, + 189, 190, 191, 132, 152, 172, 184, 185, + 187, 128, 191, 128, 137, 144, 255, 158, + 159, 134, 187, 136, 140, 142, 143, 137, + 151, 153, 142, 143, 158, 159, 137, 177, + 142, 143, 182, 183, 191, 255, 128, 130, + 133, 136, 150, 152, 255, 145, 150, 151, + 155, 156, 160, 168, 178, 255, 128, 143, + 160, 255, 182, 183, 190, 255, 129, 255, + 173, 174, 192, 255, 129, 154, 160, 255, + 171, 173, 185, 255, 128, 140, 142, 148, + 160, 180, 128, 147, 160, 172, 174, 176, + 178, 179, 148, 150, 152, 155, 158, 159, + 170, 255, 139, 141, 144, 153, 160, 255, + 184, 255, 128, 170, 176, 255, 182, 255, + 128, 158, 160, 171, 176, 187, 134, 173, + 176, 180, 128, 171, 176, 255, 138, 143, + 155, 255, 128, 155, 160, 255, 159, 189, + 190, 192, 255, 167, 128, 137, 144, 153, + 176, 189, 140, 143, 154, 170, 180, 255, + 180, 255, 128, 183, 128, 137, 141, 189, + 128, 136, 144, 146, 148, 182, 184, 185, + 128, 181, 187, 191, 150, 151, 158, 159, + 152, 154, 156, 158, 134, 135, 142, 143, + 190, 255, 190, 128, 180, 182, 188, 130, + 132, 134, 140, 144, 147, 150, 155, 160, + 172, 178, 180, 182, 188, 128, 129, 130, + 131, 132, 133, 134, 176, 177, 178, 179, + 180, 181, 182, 183, 191, 255, 129, 147, + 149, 176, 178, 190, 192, 255, 144, 156, + 161, 144, 156, 165, 176, 130, 135, 149, + 164, 166, 168, 138, 147, 152, 157, 170, + 185, 188, 191, 142, 133, 137, 160, 255, + 137, 255, 128, 174, 176, 255, 159, 165, + 170, 180, 255, 167, 173, 128, 165, 176, + 255, 168, 174, 176, 190, 192, 255, 128, + 150, 160, 166, 168, 174, 176, 182, 184, + 190, 128, 134, 136, 142, 144, 150, 152, + 158, 160, 191, 128, 129, 130, 131, 132, + 133, 134, 135, 144, 145, 255, 133, 135, + 161, 175, 177, 181, 184, 188, 160, 151, + 152, 187, 192, 255, 133, 173, 177, 255, + 143, 159, 187, 255, 176, 191, 182, 183, + 184, 191, 192, 255, 150, 255, 128, 146, + 147, 148, 152, 153, 154, 155, 156, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 129, 255, 141, 255, 144, 189, + 141, 143, 172, 255, 191, 128, 175, 180, + 189, 151, 159, 162, 255, 175, 137, 138, + 184, 255, 183, 255, 168, 255, 128, 179, + 188, 134, 143, 154, 159, 184, 186, 190, + 255, 128, 173, 176, 255, 148, 159, 189, + 255, 129, 142, 154, 159, 191, 255, 128, + 182, 128, 141, 144, 153, 160, 182, 186, + 255, 128, 130, 155, 157, 160, 175, 178, + 182, 129, 134, 137, 142, 145, 150, 160, + 166, 168, 174, 176, 255, 155, 166, 175, + 128, 170, 172, 173, 176, 185, 158, 159, + 160, 255, 164, 175, 135, 138, 188, 255, + 164, 169, 171, 172, 173, 174, 175, 180, + 181, 182, 183, 184, 185, 187, 188, 189, + 190, 191, 165, 186, 174, 175, 154, 255, + 190, 128, 134, 147, 151, 157, 168, 170, + 182, 184, 188, 128, 129, 131, 132, 134, + 255, 147, 255, 190, 255, 144, 145, 136, + 175, 188, 255, 128, 143, 160, 175, 179, + 180, 141, 143, 176, 180, 182, 255, 189, + 255, 191, 144, 153, 161, 186, 129, 154, + 166, 255, 191, 255, 130, 135, 138, 143, + 146, 151, 154, 156, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 135, 138, 139, 140, 141, + 142, 143, 144, 145, 146, 147, 148, 149, + 152, 156, 157, 160, 161, 162, 163, 164, + 166, 168, 169, 170, 171, 172, 173, 174, + 176, 177, 153, 155, 178, 179, 128, 139, + 141, 166, 168, 186, 188, 189, 191, 255, + 142, 143, 158, 255, 187, 255, 128, 180, + 189, 128, 156, 160, 255, 145, 159, 161, + 255, 128, 159, 176, 255, 139, 143, 187, + 255, 128, 157, 160, 255, 144, 132, 135, + 150, 255, 158, 159, 170, 175, 148, 151, + 188, 255, 128, 167, 176, 255, 164, 255, + 183, 255, 128, 149, 160, 167, 136, 188, + 128, 133, 138, 181, 183, 184, 191, 255, + 150, 159, 183, 255, 128, 158, 160, 178, + 180, 181, 128, 149, 160, 185, 128, 183, + 190, 191, 191, 128, 131, 133, 134, 140, + 147, 149, 151, 153, 179, 184, 186, 160, + 188, 128, 156, 128, 135, 137, 166, 128, + 181, 128, 149, 160, 178, 128, 145, 128, + 178, 129, 130, 131, 132, 133, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 155, 156, 162, + 163, 171, 176, 177, 178, 128, 134, 135, + 165, 176, 190, 144, 168, 176, 185, 128, + 180, 182, 191, 182, 144, 179, 155, 133, + 137, 141, 143, 157, 255, 190, 128, 145, + 147, 183, 136, 128, 134, 138, 141, 143, + 157, 159, 168, 176, 255, 171, 175, 186, + 255, 128, 131, 133, 140, 143, 144, 147, + 168, 170, 176, 178, 179, 181, 185, 188, + 191, 144, 151, 128, 132, 135, 136, 139, + 141, 157, 163, 166, 172, 176, 180, 128, + 138, 144, 153, 134, 136, 143, 154, 255, + 128, 181, 184, 255, 129, 151, 158, 255, + 129, 131, 133, 143, 154, 255, 128, 137, + 128, 153, 157, 171, 176, 185, 160, 255, + 170, 190, 192, 255, 128, 184, 128, 136, + 138, 182, 184, 191, 128, 144, 153, 178, + 255, 168, 144, 145, 183, 255, 128, 142, + 145, 149, 129, 141, 144, 146, 147, 148, + 175, 255, 132, 255, 128, 144, 129, 143, + 144, 153, 145, 152, 135, 255, 160, 168, + 169, 171, 172, 173, 174, 188, 189, 190, + 191, 161, 167, 185, 255, 128, 158, 160, + 169, 144, 173, 176, 180, 128, 131, 144, + 153, 163, 183, 189, 255, 144, 255, 133, + 143, 191, 255, 143, 159, 160, 128, 129, + 255, 159, 160, 171, 172, 255, 173, 255, + 179, 255, 128, 176, 177, 178, 128, 129, + 171, 175, 189, 255, 128, 136, 144, 153, + 157, 158, 133, 134, 137, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 168, 169, 170, 150, 153, 165, 169, + 173, 178, 187, 255, 131, 132, 140, 169, + 174, 255, 130, 132, 149, 157, 173, 186, + 188, 160, 161, 163, 164, 167, 168, 132, + 134, 149, 157, 186, 139, 140, 191, 255, + 134, 128, 132, 138, 144, 146, 255, 166, + 167, 129, 155, 187, 149, 181, 143, 175, + 137, 169, 131, 140, 141, 192, 255, 128, + 182, 187, 255, 173, 180, 182, 255, 132, + 155, 159, 161, 175, 128, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 128, 134, + 136, 152, 155, 161, 163, 164, 166, 170, + 133, 143, 151, 255, 139, 143, 154, 255, + 164, 167, 185, 187, 128, 131, 133, 159, + 161, 162, 169, 178, 180, 183, 130, 135, + 137, 139, 148, 151, 153, 155, 157, 159, + 164, 190, 141, 143, 145, 146, 161, 162, + 167, 170, 172, 178, 180, 183, 185, 188, + 128, 137, 139, 155, 161, 163, 165, 169, + 171, 187, 155, 156, 151, 255, 156, 157, + 160, 181, 255, 186, 187, 255, 162, 255, + 160, 168, 161, 167, 158, 255, 160, 132, + 135, 133, 134, 176, 255, 170, 181, 186, + 191, 176, 180, 182, 183, 186, 189, 134, + 140, 136, 138, 142, 161, 163, 255, 130, + 137, 136, 255, 144, 170, 176, 178, 160, + 191, 128, 138, 174, 175, 177, 255, 148, + 150, 164, 167, 173, 176, 185, 189, 190, + 192, 255, 144, 146, 175, 141, 255, 166, + 176, 178, 255, 186, 138, 170, 180, 181, + 160, 161, 162, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 184, 186, + 187, 188, 189, 190, 183, 185, 154, 164, + 168, 128, 149, 128, 152, 189, 132, 185, + 144, 152, 161, 177, 255, 169, 177, 129, + 132, 141, 142, 145, 146, 179, 181, 186, + 188, 190, 255, 142, 156, 157, 159, 161, + 176, 177, 133, 138, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 182, 184, 185, + 158, 153, 156, 178, 180, 189, 133, 141, + 143, 145, 147, 168, 170, 176, 178, 179, + 181, 185, 144, 185, 160, 161, 189, 133, + 140, 143, 144, 147, 168, 170, 176, 178, + 179, 181, 185, 177, 156, 157, 159, 161, + 131, 156, 133, 138, 142, 144, 146, 149, + 153, 154, 158, 159, 163, 164, 168, 170, + 174, 185, 144, 189, 133, 140, 142, 144, + 146, 168, 170, 185, 152, 154, 160, 161, + 128, 189, 133, 140, 142, 144, 146, 168, + 170, 179, 181, 185, 158, 160, 161, 177, + 178, 189, 133, 140, 142, 144, 146, 186, + 142, 148, 150, 159, 161, 186, 191, 189, + 133, 150, 154, 177, 179, 187, 128, 134, + 129, 176, 178, 179, 132, 138, 141, 165, + 167, 189, 129, 130, 135, 136, 148, 151, + 153, 159, 161, 163, 170, 171, 173, 176, + 178, 179, 134, 128, 132, 156, 159, 128, + 128, 135, 137, 172, 136, 140, 128, 129, + 130, 131, 137, 138, 139, 140, 141, 142, + 143, 144, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 170, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 184, + 188, 189, 190, 191, 132, 152, 185, 187, + 191, 128, 170, 161, 144, 149, 154, 157, + 165, 166, 174, 176, 181, 255, 130, 141, + 143, 159, 155, 255, 128, 140, 142, 145, + 160, 177, 128, 145, 160, 172, 174, 176, + 151, 156, 170, 128, 168, 176, 255, 138, + 255, 128, 150, 160, 255, 149, 255, 167, + 133, 179, 133, 139, 131, 160, 174, 175, + 186, 255, 166, 255, 128, 163, 141, 143, + 154, 189, 169, 172, 174, 177, 181, 182, + 129, 130, 132, 133, 134, 176, 177, 178, + 179, 180, 181, 182, 183, 177, 191, 165, + 170, 175, 177, 180, 255, 168, 174, 176, + 255, 128, 134, 136, 142, 144, 150, 152, + 158, 128, 129, 130, 131, 132, 133, 134, + 135, 144, 145, 255, 133, 135, 161, 169, + 177, 181, 184, 188, 160, 151, 154, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 255, 141, 143, 160, + 169, 172, 255, 191, 128, 174, 130, 134, + 139, 163, 255, 130, 179, 187, 189, 178, + 183, 138, 165, 176, 255, 135, 159, 189, + 255, 132, 178, 143, 160, 164, 166, 175, + 186, 190, 128, 168, 186, 128, 130, 132, + 139, 160, 182, 190, 255, 176, 178, 180, + 183, 184, 190, 255, 128, 130, 155, 157, + 160, 170, 178, 180, 128, 162, 164, 169, + 171, 172, 173, 174, 175, 180, 181, 182, + 183, 185, 186, 187, 188, 189, 190, 191, + 165, 179, 157, 190, 128, 134, 147, 151, + 159, 168, 170, 182, 184, 188, 176, 180, + 182, 255, 161, 186, 144, 145, 146, 147, + 148, 150, 151, 152, 155, 157, 158, 160, + 170, 171, 172, 175, 161, 169, 128, 129, + 130, 131, 133, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 153, 155, 178, 179, 145, 255, 139, + 143, 182, 255, 158, 175, 128, 144, 147, + 149, 151, 153, 179, 128, 135, 137, 164, + 128, 130, 131, 132, 133, 134, 135, 136, + 138, 139, 140, 141, 144, 145, 146, 147, + 150, 151, 152, 153, 154, 156, 162, 163, + 171, 176, 177, 178, 131, 183, 131, 175, + 144, 168, 131, 166, 182, 144, 178, 131, + 178, 154, 156, 129, 132, 128, 145, 147, + 171, 159, 255, 144, 157, 161, 135, 138, + 128, 175, 135, 132, 133, 128, 174, 152, + 155, 132, 128, 170, 128, 153, 160, 190, + 192, 255, 128, 136, 138, 174, 128, 178, + 255, 160, 168, 169, 171, 172, 173, 174, + 188, 189, 190, 191, 161, 167, 144, 173, + 128, 131, 163, 183, 189, 255, 133, 143, + 145, 255, 147, 159, 128, 176, 177, 178, + 128, 136, 144, 153, 144, 145, 146, 147, + 148, 149, 154, 155, 156, 157, 158, 159, + 150, 153, 131, 140, 255, 160, 163, 164, + 165, 184, 185, 186, 161, 162, 133, 255, + 170, 181, 183, 186, 128, 150, 152, 182, + 184, 255, 192, 255, 128, 255, 173, 130, + 133, 146, 159, 165, 171, 175, 255, 181, + 190, 184, 185, 192, 255, 140, 134, 138, + 142, 161, 163, 255, 182, 130, 136, 137, + 176, 151, 152, 154, 160, 190, 136, 144, + 192, 255, 135, 129, 130, 132, 133, 144, + 170, 176, 178, 144, 154, 160, 191, 128, + 169, 174, 255, 148, 169, 157, 158, 189, + 190, 192, 255, 144, 255, 139, 140, 178, + 255, 186, 128, 181, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 177, 178, 179, + 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, 190, 191, 128, 173, 128, 155, + 160, 180, 182, 189, 148, 161, 163, 255, + 176, 164, 165, 132, 169, 177, 141, 142, + 145, 146, 179, 181, 186, 187, 158, 133, + 134, 137, 138, 143, 150, 152, 155, 164, + 165, 178, 255, 188, 129, 131, 133, 138, + 143, 144, 147, 168, 170, 176, 178, 179, + 181, 182, 184, 185, 190, 255, 157, 131, + 134, 137, 138, 142, 144, 146, 152, 159, + 165, 182, 255, 129, 131, 133, 141, 143, + 145, 147, 168, 170, 176, 178, 179, 181, + 185, 188, 255, 134, 138, 142, 143, 145, + 159, 164, 165, 176, 184, 186, 255, 129, + 131, 133, 140, 143, 144, 147, 168, 170, + 176, 178, 179, 181, 185, 188, 191, 177, + 128, 132, 135, 136, 139, 141, 150, 151, + 156, 157, 159, 163, 166, 175, 156, 130, + 131, 133, 138, 142, 144, 146, 149, 153, + 154, 158, 159, 163, 164, 168, 170, 174, + 185, 190, 191, 144, 151, 128, 130, 134, + 136, 138, 141, 166, 175, 128, 131, 133, + 140, 142, 144, 146, 168, 170, 185, 189, + 255, 133, 137, 151, 142, 148, 155, 159, + 164, 165, 176, 255, 128, 131, 133, 140, + 142, 144, 146, 168, 170, 179, 181, 185, + 188, 191, 158, 128, 132, 134, 136, 138, + 141, 149, 150, 160, 163, 166, 175, 177, + 178, 129, 131, 133, 140, 142, 144, 146, + 186, 189, 255, 133, 137, 143, 147, 152, + 158, 164, 165, 176, 185, 192, 255, 189, + 130, 131, 133, 150, 154, 177, 179, 187, + 138, 150, 128, 134, 143, 148, 152, 159, + 166, 175, 178, 179, 129, 186, 128, 142, + 144, 153, 132, 138, 141, 165, 167, 129, + 130, 135, 136, 148, 151, 153, 159, 161, + 163, 170, 171, 173, 185, 187, 189, 134, + 128, 132, 136, 141, 144, 153, 156, 159, + 128, 181, 183, 185, 152, 153, 160, 169, + 190, 191, 128, 135, 137, 172, 177, 191, + 128, 132, 134, 151, 153, 188, 134, 128, + 129, 130, 131, 137, 138, 139, 140, 141, + 142, 143, 144, 153, 154, 155, 156, 157, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 173, 175, 176, + 177, 178, 179, 181, 182, 183, 188, 189, + 190, 191, 132, 152, 172, 184, 185, 187, + 128, 191, 128, 137, 144, 255, 158, 159, + 134, 187, 136, 140, 142, 143, 137, 151, + 153, 142, 143, 158, 159, 137, 177, 142, + 143, 182, 183, 191, 255, 128, 130, 133, + 136, 150, 152, 255, 145, 150, 151, 155, + 156, 160, 168, 178, 255, 128, 143, 160, + 255, 182, 183, 190, 255, 129, 255, 173, + 174, 192, 255, 129, 154, 160, 255, 171, + 173, 185, 255, 128, 140, 142, 148, 160, + 180, 128, 147, 160, 172, 174, 176, 178, + 179, 148, 150, 152, 155, 158, 159, 170, + 255, 139, 141, 144, 153, 160, 255, 184, + 255, 128, 170, 176, 255, 182, 255, 128, + 158, 160, 171, 176, 187, 134, 173, 176, + 180, 128, 171, 176, 255, 138, 143, 155, + 255, 128, 155, 160, 255, 159, 189, 190, + 192, 255, 167, 128, 137, 144, 153, 176, + 189, 140, 143, 154, 170, 180, 255, 180, + 255, 128, 183, 128, 137, 141, 189, 128, + 136, 144, 146, 148, 182, 184, 185, 128, + 181, 187, 191, 150, 151, 158, 159, 152, + 154, 156, 158, 134, 135, 142, 143, 190, + 255, 190, 128, 180, 182, 188, 130, 132, + 134, 140, 144, 147, 150, 155, 160, 172, + 178, 180, 182, 188, 128, 129, 130, 131, + 132, 133, 134, 176, 177, 178, 179, 180, + 181, 182, 183, 191, 255, 129, 147, 149, + 176, 178, 190, 192, 255, 144, 156, 161, + 144, 156, 165, 176, 130, 135, 149, 164, + 166, 168, 138, 147, 152, 157, 170, 185, + 188, 191, 142, 133, 137, 160, 255, 137, + 255, 128, 174, 176, 255, 159, 165, 170, + 180, 255, 167, 173, 128, 165, 176, 255, + 168, 174, 176, 190, 192, 255, 128, 150, + 160, 166, 168, 174, 176, 182, 184, 190, + 128, 134, 136, 142, 144, 150, 152, 158, + 160, 191, 128, 129, 130, 131, 132, 133, + 134, 135, 144, 145, 255, 133, 135, 161, + 175, 177, 181, 184, 188, 160, 151, 152, + 187, 192, 255, 133, 173, 177, 255, 143, + 159, 187, 255, 176, 191, 182, 183, 184, + 191, 192, 255, 150, 255, 128, 146, 147, + 148, 152, 153, 154, 155, 156, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 129, 255, 141, 255, 144, 189, 141, + 143, 172, 255, 191, 128, 175, 180, 189, + 151, 159, 162, 255, 175, 137, 138, 184, + 255, 183, 255, 168, 255, 128, 179, 188, + 134, 143, 154, 159, 184, 186, 190, 255, + 128, 173, 176, 255, 148, 159, 189, 255, + 129, 142, 154, 159, 191, 255, 128, 182, + 128, 141, 144, 153, 160, 182, 186, 255, + 128, 130, 155, 157, 160, 175, 178, 182, + 129, 134, 137, 142, 145, 150, 160, 166, + 168, 174, 176, 255, 155, 166, 175, 128, + 170, 172, 173, 176, 185, 158, 159, 160, + 255, 164, 175, 135, 138, 188, 255, 164, + 169, 171, 172, 173, 174, 175, 180, 181, + 182, 183, 184, 185, 187, 188, 189, 190, + 191, 165, 186, 174, 175, 154, 255, 190, + 128, 134, 147, 151, 157, 168, 170, 182, + 184, 188, 128, 129, 131, 132, 134, 255, + 147, 255, 190, 255, 144, 145, 136, 175, + 188, 255, 128, 143, 160, 175, 179, 180, + 141, 143, 176, 180, 182, 255, 189, 255, + 191, 144, 153, 161, 186, 129, 154, 166, + 255, 191, 255, 130, 135, 138, 143, 146, + 151, 154, 156, 144, 145, 146, 147, 148, + 150, 151, 152, 155, 157, 158, 160, 170, + 171, 172, 175, 161, 169, 128, 129, 130, + 131, 133, 135, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 153, 155, 178, 179, 128, 139, 141, + 166, 168, 186, 188, 189, 191, 255, 142, + 143, 158, 255, 187, 255, 128, 180, 189, + 128, 156, 160, 255, 145, 159, 161, 255, + 128, 159, 176, 255, 139, 143, 187, 255, + 128, 157, 160, 255, 144, 132, 135, 150, + 255, 158, 159, 170, 175, 148, 151, 188, + 255, 128, 167, 176, 255, 164, 255, 183, + 255, 128, 149, 160, 167, 136, 188, 128, + 133, 138, 181, 183, 184, 191, 255, 150, + 159, 183, 255, 128, 158, 160, 178, 180, + 181, 128, 149, 160, 185, 128, 183, 190, + 191, 191, 128, 131, 133, 134, 140, 147, + 149, 151, 153, 179, 184, 186, 160, 188, + 128, 156, 128, 135, 137, 166, 128, 181, + 128, 149, 160, 178, 128, 145, 128, 178, + 129, 130, 131, 132, 133, 135, 136, 138, + 139, 140, 141, 144, 145, 146, 147, 150, + 151, 152, 153, 154, 155, 156, 162, 163, + 171, 176, 177, 178, 128, 134, 135, 165, + 176, 190, 144, 168, 176, 185, 128, 180, + 182, 191, 182, 144, 179, 155, 133, 137, + 141, 143, 157, 255, 190, 128, 145, 147, + 183, 136, 128, 134, 138, 141, 143, 157, + 159, 168, 176, 255, 171, 175, 186, 255, + 128, 131, 133, 140, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 185, 188, 191, + 144, 151, 128, 132, 135, 136, 139, 141, + 157, 163, 166, 172, 176, 180, 128, 138, + 144, 153, 134, 136, 143, 154, 255, 128, + 181, 184, 255, 129, 151, 158, 255, 129, + 131, 133, 143, 154, 255, 128, 137, 128, + 153, 157, 171, 176, 185, 160, 255, 170, + 190, 192, 255, 128, 184, 128, 136, 138, + 182, 184, 191, 128, 144, 153, 178, 255, + 168, 144, 145, 183, 255, 128, 142, 145, + 149, 129, 141, 144, 146, 147, 148, 175, + 255, 132, 255, 128, 144, 129, 143, 144, + 153, 145, 152, 135, 255, 160, 168, 169, + 171, 172, 173, 174, 188, 189, 190, 191, + 161, 167, 185, 255, 128, 158, 160, 169, + 144, 173, 176, 180, 128, 131, 144, 153, + 163, 183, 189, 255, 144, 255, 133, 143, + 191, 255, 143, 159, 160, 128, 129, 255, + 159, 160, 171, 172, 255, 173, 255, 179, + 255, 128, 176, 177, 178, 128, 129, 171, + 175, 189, 255, 128, 136, 144, 153, 157, + 158, 133, 134, 137, 144, 145, 146, 147, + 148, 149, 154, 155, 156, 157, 158, 159, + 168, 169, 170, 150, 153, 165, 169, 173, + 178, 187, 255, 131, 132, 140, 169, 174, + 255, 130, 132, 149, 157, 173, 186, 188, + 160, 161, 163, 164, 167, 168, 132, 134, + 149, 157, 186, 139, 140, 191, 255, 134, + 128, 132, 138, 144, 146, 255, 166, 167, + 129, 155, 187, 149, 181, 143, 175, 137, + 169, 131, 140, 141, 192, 255, 128, 182, + 187, 255, 173, 180, 182, 255, 132, 155, + 159, 161, 175, 128, 160, 163, 164, 165, + 184, 185, 186, 161, 162, 128, 134, 136, + 152, 155, 161, 163, 164, 166, 170, 133, + 143, 151, 255, 139, 143, 154, 255, 164, + 167, 185, 187, 128, 131, 133, 159, 161, + 162, 169, 178, 180, 183, 130, 135, 137, + 139, 148, 151, 153, 155, 157, 159, 164, + 190, 141, 143, 145, 146, 161, 162, 167, + 170, 172, 178, 180, 183, 185, 188, 128, + 137, 139, 155, 161, 163, 165, 169, 171, + 187, 155, 156, 151, 255, 156, 157, 160, + 181, 255, 186, 187, 255, 162, 255, 160, + 168, 161, 167, 158, 255, 160, 132, 135, + 133, 134, 176, 255, 128, 191, 154, 164, + 168, 128, 149, 150, 191, 128, 152, 153, + 191, 181, 128, 159, 160, 189, 190, 191, + 189, 128, 131, 132, 185, 186, 191, 144, + 128, 151, 152, 161, 162, 176, 177, 255, + 169, 177, 129, 132, 141, 142, 145, 146, + 179, 181, 186, 188, 190, 191, 192, 255, + 142, 158, 128, 155, 156, 161, 162, 175, + 176, 177, 178, 191, 169, 177, 180, 183, + 128, 132, 133, 138, 139, 142, 143, 144, + 145, 146, 147, 185, 186, 191, 157, 128, + 152, 153, 158, 159, 177, 178, 180, 181, + 191, 142, 146, 169, 177, 180, 189, 128, + 132, 133, 185, 186, 191, 144, 185, 128, + 159, 160, 161, 162, 191, 169, 177, 180, + 189, 128, 132, 133, 140, 141, 142, 143, + 144, 145, 146, 147, 185, 186, 191, 158, + 177, 128, 155, 156, 161, 162, 191, 131, + 145, 155, 157, 128, 132, 133, 138, 139, + 141, 142, 149, 150, 152, 153, 159, 160, + 162, 163, 164, 165, 167, 168, 170, 171, + 173, 174, 185, 186, 191, 144, 128, 191, + 141, 145, 169, 189, 128, 132, 133, 185, + 186, 191, 128, 151, 152, 154, 155, 159, + 160, 161, 162, 191, 128, 141, 145, 169, + 180, 189, 129, 132, 133, 185, 186, 191, + 158, 128, 159, 160, 161, 162, 176, 177, + 178, 179, 191, 141, 145, 189, 128, 132, + 133, 186, 187, 191, 142, 128, 147, 148, + 150, 151, 158, 159, 161, 162, 185, 186, + 191, 178, 188, 128, 132, 133, 150, 151, + 153, 154, 189, 190, 191, 128, 134, 135, + 191, 128, 177, 129, 179, 180, 191, 128, + 131, 137, 141, 152, 160, 164, 166, 172, + 177, 189, 129, 132, 133, 134, 135, 138, + 139, 147, 148, 167, 168, 169, 170, 179, + 180, 191, 133, 128, 134, 135, 155, 156, + 159, 160, 191, 128, 129, 191, 136, 128, + 172, 173, 191, 128, 135, 136, 140, 141, + 191, 191, 128, 170, 171, 190, 161, 128, + 143, 144, 149, 150, 153, 154, 157, 158, + 164, 165, 166, 167, 173, 174, 176, 177, + 180, 181, 255, 130, 141, 143, 159, 134, + 187, 136, 140, 142, 143, 137, 151, 153, + 142, 143, 158, 159, 137, 177, 191, 142, + 143, 182, 183, 192, 255, 129, 151, 128, + 133, 134, 135, 136, 255, 145, 150, 151, + 155, 191, 192, 255, 128, 143, 144, 159, + 160, 255, 182, 183, 190, 191, 192, 255, + 128, 129, 255, 173, 174, 192, 255, 128, + 129, 154, 155, 159, 160, 255, 171, 173, + 185, 191, 192, 255, 141, 128, 145, 146, + 159, 160, 177, 178, 191, 173, 128, 145, + 146, 159, 160, 176, 177, 191, 128, 179, + 180, 191, 151, 156, 128, 191, 128, 159, + 160, 255, 184, 191, 192, 255, 169, 128, + 170, 171, 175, 176, 255, 182, 191, 192, + 255, 128, 158, 159, 191, 128, 143, 144, + 173, 174, 175, 176, 180, 181, 191, 128, + 171, 172, 175, 176, 255, 138, 191, 192, + 255, 128, 150, 151, 159, 160, 255, 149, + 191, 192, 255, 167, 128, 191, 128, 132, + 133, 179, 180, 191, 128, 132, 133, 139, + 140, 191, 128, 130, 131, 160, 161, 173, + 174, 175, 176, 185, 186, 255, 166, 191, + 192, 255, 128, 163, 164, 191, 128, 140, + 141, 143, 144, 153, 154, 189, 190, 191, + 128, 136, 137, 191, 173, 128, 168, 169, + 177, 178, 180, 181, 182, 183, 191, 0, + 127, 192, 255, 150, 151, 158, 159, 152, + 154, 156, 158, 134, 135, 142, 143, 190, + 191, 192, 255, 181, 189, 191, 128, 190, + 133, 181, 128, 129, 130, 140, 141, 143, + 144, 147, 148, 149, 150, 155, 156, 159, + 160, 172, 173, 177, 178, 188, 189, 191, + 177, 191, 128, 190, 128, 143, 144, 156, + 157, 191, 130, 135, 148, 164, 166, 168, + 128, 137, 138, 149, 150, 151, 152, 157, + 158, 169, 170, 185, 186, 187, 188, 191, + 142, 128, 132, 133, 137, 138, 159, 160, + 255, 137, 191, 192, 255, 175, 128, 255, + 159, 165, 170, 175, 177, 180, 191, 192, + 255, 166, 173, 128, 167, 168, 175, 176, + 255, 168, 174, 176, 191, 192, 255, 167, + 175, 183, 191, 128, 150, 151, 159, 160, + 190, 135, 143, 151, 128, 158, 159, 191, + 128, 132, 133, 135, 136, 160, 161, 169, + 170, 176, 177, 181, 182, 183, 184, 188, + 189, 191, 160, 151, 154, 187, 192, 255, + 128, 132, 133, 173, 174, 176, 177, 255, + 143, 159, 187, 191, 192, 255, 128, 175, + 176, 191, 150, 191, 192, 255, 141, 191, + 192, 255, 128, 143, 144, 189, 190, 191, + 141, 143, 160, 169, 172, 191, 192, 255, + 191, 128, 174, 175, 190, 128, 157, 158, + 159, 160, 255, 176, 191, 192, 255, 128, + 150, 151, 159, 160, 161, 162, 255, 175, + 137, 138, 184, 191, 192, 255, 128, 182, + 183, 255, 130, 134, 139, 163, 191, 192, + 255, 128, 129, 130, 179, 180, 191, 187, + 189, 128, 177, 178, 183, 184, 191, 128, + 137, 138, 165, 166, 175, 176, 255, 135, + 159, 189, 191, 192, 255, 128, 131, 132, + 178, 179, 191, 143, 165, 191, 128, 159, + 160, 175, 176, 185, 186, 190, 128, 168, + 169, 191, 131, 186, 128, 139, 140, 159, + 160, 182, 183, 189, 190, 255, 176, 178, + 180, 183, 184, 190, 191, 192, 255, 129, + 128, 130, 131, 154, 155, 157, 158, 159, + 160, 170, 171, 177, 178, 180, 181, 191, + 128, 167, 175, 129, 134, 135, 136, 137, + 142, 143, 144, 145, 150, 151, 159, 160, + 255, 155, 166, 175, 128, 162, 163, 191, + 164, 175, 135, 138, 188, 191, 192, 255, + 174, 175, 154, 191, 192, 255, 157, 169, + 183, 189, 191, 128, 134, 135, 146, 147, + 151, 152, 158, 159, 190, 130, 133, 128, + 255, 178, 191, 192, 255, 128, 146, 147, + 255, 190, 191, 192, 255, 128, 143, 144, + 255, 144, 145, 136, 175, 188, 191, 192, + 255, 181, 128, 175, 176, 255, 189, 191, + 192, 255, 128, 160, 161, 186, 187, 191, + 128, 129, 154, 155, 165, 166, 255, 191, + 192, 255, 128, 129, 130, 135, 136, 137, + 138, 143, 144, 145, 146, 151, 152, 153, + 154, 156, 157, 191, 128, 191, 128, 129, + 130, 131, 133, 138, 139, 140, 141, 142, + 143, 144, 145, 146, 147, 148, 149, 152, + 156, 157, 160, 161, 162, 163, 164, 166, + 168, 169, 170, 171, 172, 173, 174, 176, + 177, 132, 151, 153, 155, 158, 175, 178, + 179, 180, 191, 140, 167, 187, 190, 128, + 255, 142, 143, 158, 191, 192, 255, 187, + 191, 192, 255, 128, 180, 181, 191, 128, + 156, 157, 159, 160, 255, 145, 191, 192, + 255, 128, 159, 160, 175, 176, 255, 139, + 143, 182, 191, 192, 255, 144, 132, 135, + 150, 191, 192, 255, 158, 175, 148, 151, + 188, 191, 192, 255, 128, 167, 168, 175, + 176, 255, 164, 191, 192, 255, 183, 191, + 192, 255, 128, 149, 150, 159, 160, 167, + 168, 191, 136, 182, 188, 128, 133, 134, + 137, 138, 184, 185, 190, 191, 255, 150, + 159, 183, 191, 192, 255, 179, 128, 159, + 160, 181, 182, 191, 128, 149, 150, 159, + 160, 185, 186, 191, 128, 183, 184, 189, + 190, 191, 128, 148, 152, 129, 143, 144, + 179, 180, 191, 128, 159, 160, 188, 189, + 191, 128, 156, 157, 191, 136, 128, 164, + 165, 191, 128, 181, 182, 191, 128, 149, + 150, 159, 160, 178, 179, 191, 128, 145, + 146, 191, 128, 178, 179, 191, 128, 130, + 131, 132, 133, 134, 135, 136, 138, 139, + 140, 141, 144, 145, 146, 147, 150, 151, + 152, 153, 154, 156, 162, 163, 171, 176, + 177, 178, 129, 191, 128, 130, 131, 183, + 184, 191, 128, 130, 131, 175, 176, 191, + 128, 143, 144, 168, 169, 191, 128, 130, + 131, 166, 167, 191, 182, 128, 143, 144, + 178, 179, 191, 128, 130, 131, 178, 179, + 191, 128, 154, 156, 129, 132, 133, 191, + 146, 128, 171, 172, 191, 135, 137, 142, + 158, 128, 168, 169, 175, 176, 255, 159, + 191, 192, 255, 144, 128, 156, 157, 161, + 162, 191, 128, 134, 135, 138, 139, 191, + 128, 175, 176, 191, 134, 128, 131, 132, + 135, 136, 191, 128, 174, 175, 191, 128, + 151, 152, 155, 156, 191, 132, 128, 191, + 128, 170, 171, 191, 128, 153, 154, 191, + 160, 190, 192, 255, 128, 184, 185, 191, + 137, 128, 174, 175, 191, 128, 129, 177, + 178, 255, 144, 191, 192, 255, 128, 142, + 143, 144, 145, 146, 149, 129, 148, 150, + 191, 175, 191, 192, 255, 132, 191, 192, + 255, 128, 144, 129, 143, 145, 191, 144, + 153, 128, 143, 145, 152, 154, 191, 135, + 191, 192, 255, 160, 168, 169, 171, 172, + 173, 174, 188, 189, 190, 191, 128, 159, + 161, 167, 170, 187, 185, 191, 192, 255, + 128, 143, 144, 173, 174, 191, 128, 131, + 132, 162, 163, 183, 184, 188, 189, 255, + 133, 143, 145, 191, 192, 255, 128, 146, + 147, 159, 160, 191, 160, 128, 191, 128, + 129, 191, 192, 255, 159, 160, 171, 128, + 170, 172, 191, 192, 255, 173, 191, 192, + 255, 179, 191, 192, 255, 128, 176, 177, + 178, 129, 191, 128, 129, 130, 191, 171, + 175, 189, 191, 192, 255, 128, 136, 137, + 143, 144, 153, 154, 191, 144, 145, 146, + 147, 148, 149, 154, 155, 156, 157, 158, + 159, 128, 143, 150, 153, 160, 191, 149, + 157, 173, 186, 188, 160, 161, 163, 164, + 167, 168, 132, 134, 149, 157, 186, 191, + 139, 140, 192, 255, 133, 145, 128, 134, + 135, 137, 138, 255, 166, 167, 129, 155, + 187, 149, 181, 143, 175, 137, 169, 131, + 140, 191, 192, 255, 160, 163, 164, 165, + 184, 185, 186, 128, 159, 161, 162, 166, + 191, 133, 191, 192, 255, 132, 160, 163, + 167, 179, 184, 186, 128, 164, 165, 168, + 169, 187, 188, 191, 130, 135, 137, 139, + 144, 147, 151, 153, 155, 157, 159, 163, + 171, 179, 184, 189, 191, 128, 140, 141, + 148, 149, 160, 161, 164, 165, 166, 167, + 190, 138, 164, 170, 128, 155, 156, 160, + 161, 187, 188, 191, 128, 191, 155, 156, + 128, 191, 151, 191, 192, 255, 156, 157, + 160, 128, 191, 181, 191, 192, 255, 158, + 159, 186, 128, 185, 187, 191, 192, 255, + 162, 191, 192, 255, 160, 168, 128, 159, + 161, 167, 169, 191, 158, 191, 192, 255, + 10, 13, 128, 191, 192, 223, 224, 239, + 240, 247, 248, 255, 128, 191, 128, 191, + 128, 191, 128, 191, 128, 191, 10, 128, + 191, 128, 191, 128, 191, 36, 123, 37, + 123, 10, 128, 191, 128, 191, 128, 191, + 36, 123, 37, 123, 170, 181, 183, 186, + 128, 150, 152, 182, 184, 255, 192, 255, + 128, 255, 173, 130, 133, 146, 159, 165, + 171, 175, 255, 181, 190, 184, 185, 192, + 255, 140, 134, 138, 142, 161, 163, 255, + 182, 130, 136, 137, 176, 151, 152, 154, + 160, 190, 136, 144, 192, 255, 135, 129, + 130, 132, 133, 144, 170, 176, 178, 144, + 154, 160, 191, 128, 169, 174, 255, 148, + 169, 157, 158, 189, 190, 192, 255, 144, + 255, 139, 140, 178, 255, 186, 128, 181, + 160, 161, 162, 163, 164, 165, 166, 167, + 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, + 184, 185, 186, 187, 188, 189, 190, 191, + 128, 173, 128, 155, 160, 180, 182, 189, + 148, 161, 163, 255, 176, 164, 165, 132, + 169, 177, 141, 142, 145, 146, 179, 181, + 186, 187, 158, 133, 134, 137, 138, 143, + 150, 152, 155, 164, 165, 178, 255, 188, + 129, 131, 133, 138, 143, 144, 147, 168, + 170, 176, 178, 179, 181, 182, 184, 185, + 190, 255, 157, 131, 134, 137, 138, 142, + 144, 146, 152, 159, 165, 182, 255, 129, + 131, 133, 141, 143, 145, 147, 168, 170, + 176, 178, 179, 181, 185, 188, 255, 134, + 138, 142, 143, 145, 159, 164, 165, 176, + 184, 186, 255, 129, 131, 133, 140, 143, + 144, 147, 168, 170, 176, 178, 179, 181, + 185, 188, 191, 177, 128, 132, 135, 136, + 139, 141, 150, 151, 156, 157, 159, 163, + 166, 175, 156, 130, 131, 133, 138, 142, + 144, 146, 149, 153, 154, 158, 159, 163, + 164, 168, 170, 174, 185, 190, 191, 144, + 151, 128, 130, 134, 136, 138, 141, 166, + 175, 128, 131, 133, 140, 142, 144, 146, + 168, 170, 185, 189, 255, 133, 137, 151, + 142, 148, 155, 159, 164, 165, 176, 255, + 128, 131, 133, 140, 142, 144, 146, 168, + 170, 179, 181, 185, 188, 191, 158, 128, + 132, 134, 136, 138, 141, 149, 150, 160, + 163, 166, 175, 177, 178, 129, 131, 133, + 140, 142, 144, 146, 186, 189, 255, 133, + 137, 143, 147, 152, 158, 164, 165, 176, + 185, 192, 255, 189, 130, 131, 133, 150, + 154, 177, 179, 187, 138, 150, 128, 134, + 143, 148, 152, 159, 166, 175, 178, 179, + 129, 186, 128, 142, 144, 153, 132, 138, + 141, 165, 167, 129, 130, 135, 136, 148, + 151, 153, 159, 161, 163, 170, 171, 173, + 185, 187, 189, 134, 128, 132, 136, 141, + 144, 153, 156, 159, 128, 181, 183, 185, + 152, 153, 160, 169, 190, 191, 128, 135, + 137, 172, 177, 191, 128, 132, 134, 151, + 153, 188, 134, 128, 129, 130, 131, 137, + 138, 139, 140, 141, 142, 143, 144, 153, + 154, 155, 156, 157, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 173, 175, 176, 177, 178, 179, 181, + 182, 183, 188, 189, 190, 191, 132, 152, + 172, 184, 185, 187, 128, 191, 128, 137, + 144, 255, 158, 159, 134, 187, 136, 140, + 142, 143, 137, 151, 153, 142, 143, 158, + 159, 137, 177, 142, 143, 182, 183, 191, + 255, 128, 130, 133, 136, 150, 152, 255, + 145, 150, 151, 155, 156, 160, 168, 178, + 255, 128, 143, 160, 255, 182, 183, 190, + 255, 129, 255, 173, 174, 192, 255, 129, + 154, 160, 255, 171, 173, 185, 255, 128, + 140, 142, 148, 160, 180, 128, 147, 160, + 172, 174, 176, 178, 179, 148, 150, 152, + 155, 158, 159, 170, 255, 139, 141, 144, + 153, 160, 255, 184, 255, 128, 170, 176, + 255, 182, 255, 128, 158, 160, 171, 176, + 187, 134, 173, 176, 180, 128, 171, 176, + 255, 138, 143, 155, 255, 128, 155, 160, + 255, 159, 189, 190, 192, 255, 167, 128, + 137, 144, 153, 176, 189, 140, 143, 154, + 170, 180, 255, 180, 255, 128, 183, 128, + 137, 141, 189, 128, 136, 144, 146, 148, + 182, 184, 185, 128, 181, 187, 191, 150, + 151, 158, 159, 152, 154, 156, 158, 134, + 135, 142, 143, 190, 255, 190, 128, 180, + 182, 188, 130, 132, 134, 140, 144, 147, + 150, 155, 160, 172, 178, 180, 182, 188, + 128, 129, 130, 131, 132, 133, 134, 176, + 177, 178, 179, 180, 181, 182, 183, 191, + 255, 129, 147, 149, 176, 178, 190, 192, + 255, 144, 156, 161, 144, 156, 165, 176, + 130, 135, 149, 164, 166, 168, 138, 147, + 152, 157, 170, 185, 188, 191, 142, 133, + 137, 160, 255, 137, 255, 128, 174, 176, + 255, 159, 165, 170, 180, 255, 167, 173, + 128, 165, 176, 255, 168, 174, 176, 190, + 192, 255, 128, 150, 160, 166, 168, 174, + 176, 182, 184, 190, 128, 134, 136, 142, + 144, 150, 152, 158, 160, 191, 128, 129, + 130, 131, 132, 133, 134, 135, 144, 145, + 255, 133, 135, 161, 175, 177, 181, 184, + 188, 160, 151, 152, 187, 192, 255, 133, + 173, 177, 255, 143, 159, 187, 255, 176, + 191, 182, 183, 184, 191, 192, 255, 150, + 255, 128, 146, 147, 148, 152, 153, 154, + 155, 156, 158, 159, 160, 161, 162, 163, + 164, 165, 166, 167, 168, 169, 170, 171, + 172, 173, 174, 175, 176, 129, 255, 141, + 255, 144, 189, 141, 143, 172, 255, 191, + 128, 175, 180, 189, 151, 159, 162, 255, + 175, 137, 138, 184, 255, 183, 255, 168, + 255, 128, 179, 188, 134, 143, 154, 159, + 184, 186, 190, 255, 128, 173, 176, 255, + 148, 159, 189, 255, 129, 142, 154, 159, + 191, 255, 128, 182, 128, 141, 144, 153, + 160, 182, 186, 255, 128, 130, 155, 157, + 160, 175, 178, 182, 129, 134, 137, 142, + 145, 150, 160, 166, 168, 174, 176, 255, + 155, 166, 175, 128, 170, 172, 173, 176, + 185, 158, 159, 160, 255, 164, 175, 135, + 138, 188, 255, 164, 169, 171, 172, 173, + 174, 175, 180, 181, 182, 183, 184, 185, + 187, 188, 189, 190, 191, 165, 186, 174, + 175, 154, 255, 190, 128, 134, 147, 151, + 157, 168, 170, 182, 184, 188, 128, 129, + 131, 132, 134, 255, 147, 255, 190, 255, + 144, 145, 136, 175, 188, 255, 128, 143, + 160, 175, 179, 180, 141, 143, 176, 180, + 182, 255, 189, 255, 191, 144, 153, 161, + 186, 129, 154, 166, 255, 191, 255, 130, + 135, 138, 143, 146, 151, 154, 156, 144, + 145, 146, 147, 148, 150, 151, 152, 155, + 157, 158, 160, 170, 171, 172, 175, 161, + 169, 128, 129, 130, 131, 133, 135, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 152, 156, 157, 160, 161, + 162, 163, 164, 166, 168, 169, 170, 171, + 172, 173, 174, 176, 177, 153, 155, 178, + 179, 128, 139, 141, 166, 168, 186, 188, + 189, 191, 255, 142, 143, 158, 255, 187, + 255, 128, 180, 189, 128, 156, 160, 255, + 145, 159, 161, 255, 128, 159, 176, 255, + 139, 143, 187, 255, 128, 157, 160, 255, + 144, 132, 135, 150, 255, 158, 159, 170, + 175, 148, 151, 188, 255, 128, 167, 176, + 255, 164, 255, 183, 255, 128, 149, 160, + 167, 136, 188, 128, 133, 138, 181, 183, + 184, 191, 255, 150, 159, 183, 255, 128, + 158, 160, 178, 180, 181, 128, 149, 160, + 185, 128, 183, 190, 191, 191, 128, 131, + 133, 134, 140, 147, 149, 151, 153, 179, + 184, 186, 160, 188, 128, 156, 128, 135, + 137, 166, 128, 181, 128, 149, 160, 178, + 128, 145, 128, 178, 129, 130, 131, 132, + 133, 135, 136, 138, 139, 140, 141, 144, + 145, 146, 147, 150, 151, 152, 153, 154, + 155, 156, 162, 163, 171, 176, 177, 178, + 128, 134, 135, 165, 176, 190, 144, 168, + 176, 185, 128, 180, 182, 191, 182, 144, + 179, 155, 133, 137, 141, 143, 157, 255, + 190, 128, 145, 147, 183, 136, 128, 134, + 138, 141, 143, 157, 159, 168, 176, 255, + 171, 175, 186, 255, 128, 131, 133, 140, + 143, 144, 147, 168, 170, 176, 178, 179, + 181, 185, 188, 191, 144, 151, 128, 132, + 135, 136, 139, 141, 157, 163, 166, 172, + 176, 180, 128, 138, 144, 153, 134, 136, + 143, 154, 255, 128, 181, 184, 255, 129, + 151, 158, 255, 129, 131, 133, 143, 154, + 255, 128, 137, 128, 153, 157, 171, 176, + 185, 160, 255, 170, 190, 192, 255, 128, + 184, 128, 136, 138, 182, 184, 191, 128, + 144, 153, 178, 255, 168, 144, 145, 183, + 255, 128, 142, 145, 149, 129, 141, 144, + 146, 147, 148, 175, 255, 132, 255, 128, + 144, 129, 143, 144, 153, 145, 152, 135, + 255, 160, 168, 169, 171, 172, 173, 174, + 188, 189, 190, 191, 161, 167, 185, 255, + 128, 158, 160, 169, 144, 173, 176, 180, + 128, 131, 144, 153, 163, 183, 189, 255, + 144, 255, 133, 143, 191, 255, 143, 159, + 160, 128, 129, 255, 159, 160, 171, 172, + 255, 173, 255, 179, 255, 128, 176, 177, + 178, 128, 129, 171, 175, 189, 255, 128, + 136, 144, 153, 157, 158, 133, 134, 137, + 144, 145, 146, 147, 148, 149, 154, 155, + 156, 157, 158, 159, 168, 169, 170, 150, + 153, 165, 169, 173, 178, 187, 255, 131, + 132, 140, 169, 174, 255, 130, 132, 149, + 157, 173, 186, 188, 160, 161, 163, 164, + 167, 168, 132, 134, 149, 157, 186, 139, + 140, 191, 255, 134, 128, 132, 138, 144, + 146, 255, 166, 167, 129, 155, 187, 149, + 181, 143, 175, 137, 169, 131, 140, 141, + 192, 255, 128, 182, 187, 255, 173, 180, + 182, 255, 132, 155, 159, 161, 175, 128, + 160, 163, 164, 165, 184, 185, 186, 161, + 162, 128, 134, 136, 152, 155, 161, 163, + 164, 166, 170, 133, 143, 151, 255, 139, + 143, 154, 255, 164, 167, 185, 187, 128, + 131, 133, 159, 161, 162, 169, 178, 180, + 183, 130, 135, 137, 139, 148, 151, 153, + 155, 157, 159, 164, 190, 141, 143, 145, + 146, 161, 162, 167, 170, 172, 178, 180, + 183, 185, 188, 128, 137, 139, 155, 161, + 163, 165, 169, 171, 187, 155, 156, 151, + 255, 156, 157, 160, 181, 255, 186, 187, + 255, 162, 255, 160, 168, 161, 167, 158, + 255, 160, 132, 135, 133, 134, 176, 255, + 128, 191, 154, 164, 168, 128, 149, 150, + 191, 128, 152, 153, 191, 181, 128, 159, + 160, 189, 190, 191, 189, 128, 131, 132, + 185, 186, 191, 144, 128, 151, 152, 161, + 162, 176, 177, 255, 169, 177, 129, 132, + 141, 142, 145, 146, 179, 181, 186, 188, + 190, 191, 192, 255, 142, 158, 128, 155, + 156, 161, 162, 175, 176, 177, 178, 191, + 169, 177, 180, 183, 128, 132, 133, 138, + 139, 142, 143, 144, 145, 146, 147, 185, + 186, 191, 157, 128, 152, 153, 158, 159, + 177, 178, 180, 181, 191, 142, 146, 169, + 177, 180, 189, 128, 132, 133, 185, 186, + 191, 144, 185, 128, 159, 160, 161, 162, + 191, 169, 177, 180, 189, 128, 132, 133, + 140, 141, 142, 143, 144, 145, 146, 147, + 185, 186, 191, 158, 177, 128, 155, 156, + 161, 162, 191, 131, 145, 155, 157, 128, + 132, 133, 138, 139, 141, 142, 149, 150, + 152, 153, 159, 160, 162, 163, 164, 165, + 167, 168, 170, 171, 173, 174, 185, 186, + 191, 144, 128, 191, 141, 145, 169, 189, + 128, 132, 133, 185, 186, 191, 128, 151, + 152, 154, 155, 159, 160, 161, 162, 191, + 128, 141, 145, 169, 180, 189, 129, 132, + 133, 185, 186, 191, 158, 128, 159, 160, + 161, 162, 176, 177, 178, 179, 191, 141, + 145, 189, 128, 132, 133, 186, 187, 191, + 142, 128, 147, 148, 150, 151, 158, 159, + 161, 162, 185, 186, 191, 178, 188, 128, + 132, 133, 150, 151, 153, 154, 189, 190, + 191, 128, 134, 135, 191, 128, 177, 129, + 179, 180, 191, 128, 131, 137, 141, 152, + 160, 164, 166, 172, 177, 189, 129, 132, + 133, 134, 135, 138, 139, 147, 148, 167, + 168, 169, 170, 179, 180, 191, 133, 128, + 134, 135, 155, 156, 159, 160, 191, 128, + 129, 191, 136, 128, 172, 173, 191, 128, + 135, 136, 140, 141, 191, 191, 128, 170, + 171, 190, 161, 128, 143, 144, 149, 150, + 153, 154, 157, 158, 164, 165, 166, 167, + 173, 174, 176, 177, 180, 181, 255, 130, + 141, 143, 159, 134, 187, 136, 140, 142, + 143, 137, 151, 153, 142, 143, 158, 159, + 137, 177, 191, 142, 143, 182, 183, 192, + 255, 129, 151, 128, 133, 134, 135, 136, + 255, 145, 150, 151, 155, 191, 192, 255, + 128, 143, 144, 159, 160, 255, 182, 183, + 190, 191, 192, 255, 128, 129, 255, 173, + 174, 192, 255, 128, 129, 154, 155, 159, + 160, 255, 171, 173, 185, 191, 192, 255, + 141, 128, 145, 146, 159, 160, 177, 178, + 191, 173, 128, 145, 146, 159, 160, 176, + 177, 191, 128, 179, 180, 191, 151, 156, + 128, 191, 128, 159, 160, 255, 184, 191, + 192, 255, 169, 128, 170, 171, 175, 176, + 255, 182, 191, 192, 255, 128, 158, 159, + 191, 128, 143, 144, 173, 174, 175, 176, + 180, 181, 191, 128, 171, 172, 175, 176, + 255, 138, 191, 192, 255, 128, 150, 151, + 159, 160, 255, 149, 191, 192, 255, 167, + 128, 191, 128, 132, 133, 179, 180, 191, + 128, 132, 133, 139, 140, 191, 128, 130, + 131, 160, 161, 173, 174, 175, 176, 185, + 186, 255, 166, 191, 192, 255, 128, 163, + 164, 191, 128, 140, 141, 143, 144, 153, + 154, 189, 190, 191, 128, 136, 137, 191, + 173, 128, 168, 169, 177, 178, 180, 181, + 182, 183, 191, 0, 127, 192, 255, 150, + 151, 158, 159, 152, 154, 156, 158, 134, + 135, 142, 143, 190, 191, 192, 255, 181, + 189, 191, 128, 190, 133, 181, 128, 129, + 130, 140, 141, 143, 144, 147, 148, 149, + 150, 155, 156, 159, 160, 172, 173, 177, + 178, 188, 189, 191, 177, 191, 128, 190, + 128, 143, 144, 156, 157, 191, 130, 135, + 148, 164, 166, 168, 128, 137, 138, 149, + 150, 151, 152, 157, 158, 169, 170, 185, + 186, 187, 188, 191, 142, 128, 132, 133, + 137, 138, 159, 160, 255, 137, 191, 192, + 255, 175, 128, 255, 159, 165, 170, 175, + 177, 180, 191, 192, 255, 166, 173, 128, + 167, 168, 175, 176, 255, 168, 174, 176, + 191, 192, 255, 167, 175, 183, 191, 128, + 150, 151, 159, 160, 190, 135, 143, 151, + 128, 158, 159, 191, 128, 132, 133, 135, + 136, 160, 161, 169, 170, 176, 177, 181, + 182, 183, 184, 188, 189, 191, 160, 151, + 154, 187, 192, 255, 128, 132, 133, 173, + 174, 176, 177, 255, 143, 159, 187, 191, + 192, 255, 128, 175, 176, 191, 150, 191, + 192, 255, 141, 191, 192, 255, 128, 143, + 144, 189, 190, 191, 141, 143, 160, 169, + 172, 191, 192, 255, 191, 128, 174, 175, + 190, 128, 157, 158, 159, 160, 255, 176, + 191, 192, 255, 128, 150, 151, 159, 160, + 161, 162, 255, 175, 137, 138, 184, 191, + 192, 255, 128, 182, 183, 255, 130, 134, + 139, 163, 191, 192, 255, 128, 129, 130, + 179, 180, 191, 187, 189, 128, 177, 178, + 183, 184, 191, 128, 137, 138, 165, 166, + 175, 176, 255, 135, 159, 189, 191, 192, + 255, 128, 131, 132, 178, 179, 191, 143, + 165, 191, 128, 159, 160, 175, 176, 185, + 186, 190, 128, 168, 169, 191, 131, 186, + 128, 139, 140, 159, 160, 182, 183, 189, + 190, 255, 176, 178, 180, 183, 184, 190, + 191, 192, 255, 129, 128, 130, 131, 154, + 155, 157, 158, 159, 160, 170, 171, 177, + 178, 180, 181, 191, 128, 167, 175, 129, + 134, 135, 136, 137, 142, 143, 144, 145, + 150, 151, 159, 160, 255, 155, 166, 175, + 128, 162, 163, 191, 164, 175, 135, 138, + 188, 191, 192, 255, 174, 175, 154, 191, + 192, 255, 157, 169, 183, 189, 191, 128, + 134, 135, 146, 147, 151, 152, 158, 159, + 190, 130, 133, 128, 255, 178, 191, 192, + 255, 128, 146, 147, 255, 190, 191, 192, + 255, 128, 143, 144, 255, 144, 145, 136, + 175, 188, 191, 192, 255, 181, 128, 175, + 176, 255, 189, 191, 192, 255, 128, 160, + 161, 186, 187, 191, 128, 129, 154, 155, + 165, 166, 255, 191, 192, 255, 128, 129, + 130, 135, 136, 137, 138, 143, 144, 145, + 146, 151, 152, 153, 154, 156, 157, 191, + 128, 191, 128, 129, 130, 131, 133, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 152, 156, 157, 160, 161, + 162, 163, 164, 166, 168, 169, 170, 171, + 172, 173, 174, 176, 177, 132, 151, 153, + 155, 158, 175, 178, 179, 180, 191, 140, + 167, 187, 190, 128, 255, 142, 143, 158, + 191, 192, 255, 187, 191, 192, 255, 128, + 180, 181, 191, 128, 156, 157, 159, 160, + 255, 145, 191, 192, 255, 128, 159, 160, + 175, 176, 255, 139, 143, 182, 191, 192, + 255, 144, 132, 135, 150, 191, 192, 255, + 158, 175, 148, 151, 188, 191, 192, 255, + 128, 167, 168, 175, 176, 255, 164, 191, + 192, 255, 183, 191, 192, 255, 128, 149, + 150, 159, 160, 167, 168, 191, 136, 182, + 188, 128, 133, 134, 137, 138, 184, 185, + 190, 191, 255, 150, 159, 183, 191, 192, + 255, 179, 128, 159, 160, 181, 182, 191, + 128, 149, 150, 159, 160, 185, 186, 191, + 128, 183, 184, 189, 190, 191, 128, 148, + 152, 129, 143, 144, 179, 180, 191, 128, + 159, 160, 188, 189, 191, 128, 156, 157, + 191, 136, 128, 164, 165, 191, 128, 181, + 182, 191, 128, 149, 150, 159, 160, 178, + 179, 191, 128, 145, 146, 191, 128, 178, + 179, 191, 128, 130, 131, 132, 133, 134, + 135, 136, 138, 139, 140, 141, 144, 145, + 146, 147, 150, 151, 152, 153, 154, 156, + 162, 163, 171, 176, 177, 178, 129, 191, + 128, 130, 131, 183, 184, 191, 128, 130, + 131, 175, 176, 191, 128, 143, 144, 168, + 169, 191, 128, 130, 131, 166, 167, 191, + 182, 128, 143, 144, 178, 179, 191, 128, + 130, 131, 178, 179, 191, 128, 154, 156, + 129, 132, 133, 191, 146, 128, 171, 172, + 191, 135, 137, 142, 158, 128, 168, 169, + 175, 176, 255, 159, 191, 192, 255, 144, + 128, 156, 157, 161, 162, 191, 128, 134, + 135, 138, 139, 191, 128, 175, 176, 191, + 134, 128, 131, 132, 135, 136, 191, 128, + 174, 175, 191, 128, 151, 152, 155, 156, + 191, 132, 128, 191, 128, 170, 171, 191, + 128, 153, 154, 191, 160, 190, 192, 255, + 128, 184, 185, 191, 137, 128, 174, 175, + 191, 128, 129, 177, 178, 255, 144, 191, + 192, 255, 128, 142, 143, 144, 145, 146, + 149, 129, 148, 150, 191, 175, 191, 192, + 255, 132, 191, 192, 255, 128, 144, 129, + 143, 145, 191, 144, 153, 128, 143, 145, + 152, 154, 191, 135, 191, 192, 255, 160, + 168, 169, 171, 172, 173, 174, 188, 189, + 190, 191, 128, 159, 161, 167, 170, 187, + 185, 191, 192, 255, 128, 143, 144, 173, + 174, 191, 128, 131, 132, 162, 163, 183, + 184, 188, 189, 255, 133, 143, 145, 191, + 192, 255, 128, 146, 147, 159, 160, 191, + 160, 128, 191, 128, 129, 191, 192, 255, + 159, 160, 171, 128, 170, 172, 191, 192, + 255, 173, 191, 192, 255, 179, 191, 192, + 255, 128, 176, 177, 178, 129, 191, 128, + 129, 130, 191, 171, 175, 189, 191, 192, + 255, 128, 136, 137, 143, 144, 153, 154, + 191, 144, 145, 146, 147, 148, 149, 154, + 155, 156, 157, 158, 159, 128, 143, 150, + 153, 160, 191, 149, 157, 173, 186, 188, + 160, 161, 163, 164, 167, 168, 132, 134, + 149, 157, 186, 191, 139, 140, 192, 255, + 133, 145, 128, 134, 135, 137, 138, 255, + 166, 167, 129, 155, 187, 149, 181, 143, + 175, 137, 169, 131, 140, 191, 192, 255, + 160, 163, 164, 165, 184, 185, 186, 128, + 159, 161, 162, 166, 191, 133, 191, 192, + 255, 132, 160, 163, 167, 179, 184, 186, + 128, 164, 165, 168, 169, 187, 188, 191, + 130, 135, 137, 139, 144, 147, 151, 153, + 155, 157, 159, 163, 171, 179, 184, 189, + 191, 128, 140, 141, 148, 149, 160, 161, + 164, 165, 166, 167, 190, 138, 164, 170, + 128, 155, 156, 160, 161, 187, 188, 191, + 128, 191, 155, 156, 128, 191, 151, 191, + 192, 255, 156, 157, 160, 128, 191, 181, + 191, 192, 255, 158, 159, 186, 128, 185, + 187, 191, 192, 255, 162, 191, 192, 255, + 160, 168, 128, 159, 161, 167, 169, 191, + 158, 191, 192, 255, 9, 10, 13, 32, + 33, 34, 35, 38, 46, 47, 60, 61, + 62, 64, 92, 95, 123, 124, 125, 126, + 127, 194, 195, 198, 199, 203, 204, 205, + 206, 207, 210, 212, 213, 214, 215, 216, + 217, 219, 220, 221, 222, 223, 224, 225, + 226, 227, 228, 233, 234, 237, 238, 239, + 240, 0, 36, 37, 45, 48, 57, 58, + 63, 65, 90, 91, 96, 97, 122, 192, + 193, 196, 218, 229, 236, 241, 247, 9, + 32, 10, 61, 10, 38, 46, 42, 47, + 46, 69, 101, 48, 57, 60, 61, 61, + 62, 61, 45, 95, 194, 195, 198, 199, + 203, 204, 205, 206, 207, 210, 212, 213, + 214, 215, 216, 217, 219, 220, 221, 222, + 223, 224, 225, 226, 227, 228, 233, 234, + 237, 239, 240, 243, 48, 57, 65, 90, + 97, 122, 196, 218, 229, 236, 124, 125, + 128, 191, 170, 181, 186, 128, 191, 151, + 183, 128, 255, 192, 255, 0, 127, 173, + 130, 133, 146, 159, 165, 171, 175, 191, + 192, 255, 181, 190, 128, 175, 176, 183, + 184, 185, 186, 191, 134, 139, 141, 162, + 128, 135, 136, 255, 182, 130, 137, 176, + 151, 152, 154, 160, 136, 191, 192, 255, + 128, 143, 144, 170, 171, 175, 176, 178, + 179, 191, 128, 159, 160, 191, 176, 128, + 138, 139, 173, 174, 255, 148, 150, 164, + 167, 173, 176, 185, 189, 190, 192, 255, + 144, 128, 145, 146, 175, 176, 191, 128, + 140, 141, 255, 166, 176, 178, 191, 192, + 255, 186, 128, 137, 138, 170, 171, 179, + 180, 181, 182, 191, 160, 161, 162, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 128, 191, 128, 129, 130, 131, + 137, 138, 139, 140, 141, 142, 143, 144, + 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 182, 183, 184, 188, + 189, 190, 191, 132, 187, 129, 130, 132, + 133, 134, 176, 177, 178, 179, 180, 181, + 182, 183, 128, 191, 128, 129, 130, 131, + 132, 133, 134, 135, 144, 136, 143, 145, + 191, 192, 255, 182, 183, 184, 128, 191, + 128, 191, 191, 128, 190, 192, 255, 128, + 146, 147, 148, 152, 153, 154, 155, 156, + 158, 159, 160, 161, 162, 163, 164, 165, + 166, 167, 168, 169, 170, 171, 172, 173, + 174, 175, 176, 129, 191, 192, 255, 158, + 159, 128, 157, 160, 191, 192, 255, 128, + 191, 164, 169, 171, 172, 173, 174, 175, + 180, 181, 182, 183, 184, 185, 187, 188, + 189, 190, 191, 128, 163, 165, 186, 144, + 145, 146, 147, 148, 150, 151, 152, 155, + 157, 158, 160, 170, 171, 172, 175, 128, + 159, 161, 169, 173, 191, 128, 191, 10, + 13, 34, 36, 37, 92, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 34, 92, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 36, 123, 123, 126, 126, 37, 123, + 126, 10, 13, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 128, 191, 128, + 191, 128, 191, 10, 13, 36, 37, 128, + 191, 192, 223, 224, 239, 240, 247, 248, + 255, 10, 13, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 10, 13, 123, 10, 13, 126, 10, + 13, 126, 126, 128, 191, 128, 191, 128, + 191, 10, 13, 36, 37, 128, 191, 192, + 223, 224, 239, 240, 247, 248, 255, 10, + 13, 36, 37, 128, 191, 192, 223, 224, + 239, 240, 247, 248, 255, 10, 13, 10, + 13, 123, 10, 13, 126, 10, 13, 126, + 126, 128, 191, 128, 191, 128, 191, 95, + 194, 195, 198, 199, 203, 204, 205, 206, + 207, 210, 212, 213, 214, 215, 216, 217, + 219, 220, 221, 222, 223, 224, 225, 226, + 227, 228, 233, 234, 237, 238, 239, 240, + 65, 90, 97, 122, 128, 191, 192, 193, + 196, 218, 229, 236, 241, 247, 248, 255, + 45, 95, 194, 195, 198, 199, 203, 204, + 205, 206, 207, 210, 212, 213, 214, 215, + 216, 217, 219, 220, 221, 222, 223, 224, + 225, 226, 227, 228, 233, 234, 237, 239, + 240, 243, 48, 57, 65, 90, 97, 122, + 196, 218, 229, 236, 128, 191, 170, 181, + 186, 128, 191, 151, 183, 128, 255, 192, + 255, 0, 127, 173, 130, 133, 146, 159, + 165, 171, 175, 191, 192, 255, 181, 190, + 128, 175, 176, 183, 184, 185, 186, 191, + 134, 139, 141, 162, 128, 135, 136, 255, + 182, 130, 137, 176, 151, 152, 154, 160, + 136, 191, 192, 255, 128, 143, 144, 170, + 171, 175, 176, 178, 179, 191, 128, 159, + 160, 191, 176, 128, 138, 139, 173, 174, + 255, 148, 150, 164, 167, 173, 176, 185, + 189, 190, 192, 255, 144, 128, 145, 146, + 175, 176, 191, 128, 140, 141, 255, 166, + 176, 178, 191, 192, 255, 186, 128, 137, + 138, 170, 171, 179, 180, 181, 182, 191, + 160, 161, 162, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, + 177, 178, 179, 180, 181, 182, 183, 184, + 185, 186, 187, 188, 189, 190, 128, 191, + 128, 129, 130, 131, 137, 138, 139, 140, + 141, 142, 143, 144, 153, 154, 155, 156, + 157, 158, 159, 160, 161, 162, 163, 164, + 165, 166, 167, 168, 169, 170, 171, 172, + 173, 174, 175, 176, 177, 178, 179, 180, + 182, 183, 184, 188, 189, 190, 191, 132, + 187, 129, 130, 132, 133, 134, 176, 177, + 178, 179, 180, 181, 182, 183, 128, 191, + 128, 129, 130, 131, 132, 133, 134, 135, + 144, 136, 143, 145, 191, 192, 255, 182, + 183, 184, 128, 191, 128, 191, 191, 128, + 190, 192, 255, 128, 146, 147, 148, 152, + 153, 154, 155, 156, 158, 159, 160, 161, + 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 172, 173, 174, 175, 176, 129, + 191, 192, 255, 158, 159, 128, 157, 160, + 191, 192, 255, 128, 191, 164, 169, 171, + 172, 173, 174, 175, 180, 181, 182, 183, + 184, 185, 187, 188, 189, 190, 191, 128, + 163, 165, 186, 144, 145, 146, 147, 148, + 150, 151, 152, 155, 157, 158, 160, 170, + 171, 172, 175, 128, 159, 161, 169, 173, + 191, 128, 191, +} + +var _hcltok_single_lengths []byte = []byte{ + 0, 1, 1, 2, 3, 2, 0, 32, + 31, 36, 1, 4, 0, 0, 0, 0, + 1, 2, 1, 1, 1, 1, 0, 1, + 1, 0, 0, 2, 0, 0, 0, 1, + 32, 0, 0, 0, 0, 1, 3, 1, + 1, 1, 0, 2, 0, 1, 1, 2, + 0, 3, 0, 1, 0, 2, 1, 2, + 0, 0, 5, 1, 4, 0, 0, 1, + 43, 0, 0, 0, 2, 3, 2, 1, + 1, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 1, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 4, 1, + 0, 15, 0, 0, 0, 1, 6, 1, + 0, 0, 1, 0, 2, 0, 0, 0, + 9, 0, 1, 1, 0, 0, 0, 3, + 0, 1, 0, 28, 0, 0, 0, 1, + 0, 1, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 2, + 0, 0, 18, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 16, 36, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 2, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 28, 0, 0, 0, 1, + 1, 1, 1, 0, 0, 2, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 4, 0, 0, 2, 2, + 0, 11, 0, 0, 0, 0, 0, 0, + 0, 1, 1, 3, 0, 0, 4, 0, + 0, 0, 18, 0, 0, 0, 1, 4, + 1, 4, 1, 0, 3, 2, 2, 2, + 1, 0, 0, 1, 8, 0, 0, 0, + 4, 12, 0, 2, 0, 3, 0, 1, + 0, 2, 0, 1, 2, 0, 3, 1, + 2, 0, 0, 0, 0, 0, 1, 1, + 0, 0, 1, 28, 3, 0, 1, 1, + 2, 1, 0, 1, 1, 2, 1, 1, + 2, 1, 1, 0, 2, 1, 1, 1, + 1, 0, 0, 6, 1, 1, 0, 0, + 46, 1, 1, 0, 0, 0, 0, 2, + 1, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 13, 2, 0, 0, + 0, 9, 0, 1, 28, 0, 1, 3, + 0, 2, 0, 0, 0, 1, 0, 1, + 1, 2, 0, 18, 2, 0, 0, 16, + 35, 0, 0, 0, 1, 0, 28, 0, + 0, 0, 0, 1, 0, 2, 0, 0, + 1, 0, 0, 1, 0, 0, 1, 0, + 0, 0, 0, 1, 11, 0, 0, 0, + 0, 4, 0, 12, 1, 7, 0, 4, + 0, 0, 0, 0, 1, 2, 1, 1, + 1, 1, 0, 1, 1, 0, 0, 2, + 0, 0, 0, 1, 32, 0, 0, 0, + 0, 1, 3, 1, 1, 1, 0, 2, + 0, 1, 1, 2, 0, 3, 0, 1, + 0, 2, 1, 2, 0, 0, 5, 1, + 4, 0, 0, 1, 43, 0, 0, 0, + 2, 3, 2, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 4, 1, 0, 15, 0, 0, + 0, 1, 6, 1, 0, 0, 1, 0, + 2, 0, 0, 0, 9, 0, 1, 1, + 0, 0, 0, 3, 0, 1, 0, 28, + 0, 0, 0, 1, 0, 1, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 2, 0, 0, 18, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 16, 36, + 0, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 2, 0, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 0, 0, 28, + 0, 0, 0, 1, 1, 1, 1, 0, + 0, 2, 0, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 4, + 0, 0, 2, 2, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 1, 1, 3, + 0, 0, 4, 0, 0, 0, 18, 0, + 0, 0, 1, 4, 1, 4, 1, 0, + 3, 2, 2, 2, 1, 0, 0, 1, + 8, 0, 0, 0, 4, 12, 0, 2, + 0, 3, 0, 1, 0, 2, 0, 1, + 2, 0, 0, 3, 0, 1, 1, 1, + 2, 2, 4, 1, 6, 2, 4, 2, + 4, 1, 4, 0, 6, 1, 3, 1, + 2, 0, 2, 11, 1, 1, 1, 0, + 1, 1, 0, 2, 0, 3, 3, 2, + 1, 0, 0, 0, 1, 0, 1, 0, + 1, 1, 0, 2, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 4, 3, 2, 2, 0, 6, + 1, 0, 1, 1, 0, 2, 0, 4, + 3, 0, 1, 1, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 1, + 0, 3, 0, 2, 0, 0, 0, 3, + 0, 2, 1, 1, 3, 1, 0, 0, + 0, 0, 0, 5, 2, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 1, 1, + 0, 0, 35, 4, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 0, + 0, 0, 3, 0, 1, 0, 0, 3, + 0, 0, 1, 0, 0, 0, 0, 28, + 0, 0, 0, 0, 1, 0, 3, 1, + 4, 0, 1, 0, 0, 1, 0, 0, + 1, 0, 0, 0, 0, 1, 1, 0, + 7, 0, 0, 2, 2, 0, 11, 0, + 0, 0, 0, 0, 1, 1, 3, 0, + 0, 4, 0, 0, 0, 12, 1, 4, + 1, 5, 2, 0, 3, 2, 2, 2, + 1, 7, 0, 7, 17, 3, 0, 2, + 0, 3, 0, 0, 1, 0, 2, 0, + 2, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 2, 2, 1, 0, 0, 0, + 2, 2, 4, 0, 0, 0, 0, 1, + 2, 1, 1, 1, 1, 0, 1, 1, + 0, 0, 2, 0, 0, 0, 1, 32, + 0, 0, 0, 0, 1, 3, 1, 1, + 1, 0, 2, 0, 1, 1, 2, 0, + 3, 0, 1, 0, 2, 1, 2, 0, + 0, 5, 1, 4, 0, 0, 1, 43, + 0, 0, 0, 2, 3, 2, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 4, 1, 0, + 15, 0, 0, 0, 1, 6, 1, 0, + 0, 1, 0, 2, 0, 0, 0, 9, + 0, 1, 1, 0, 0, 0, 3, 0, + 1, 0, 28, 0, 0, 0, 1, 0, + 1, 0, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 1, 0, 2, 0, + 0, 18, 0, 0, 1, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, + 0, 16, 36, 0, 0, 0, 0, 1, + 0, 0, 0, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 2, 0, 0, 0, + 0, 0, 1, 0, 0, 0, 0, 0, + 0, 0, 28, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 2, 0, 1, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 4, 0, 0, 2, 2, 0, + 11, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 3, 0, 0, 4, 0, 0, + 0, 18, 0, 0, 0, 1, 4, 1, + 4, 1, 0, 3, 2, 2, 2, 1, + 0, 0, 1, 8, 0, 0, 0, 4, + 12, 0, 2, 0, 3, 0, 1, 0, + 2, 0, 1, 2, 0, 0, 3, 0, + 1, 1, 1, 2, 2, 4, 1, 6, + 2, 4, 2, 4, 1, 4, 0, 6, + 1, 3, 1, 2, 0, 2, 11, 1, + 1, 1, 0, 1, 1, 0, 2, 0, + 3, 3, 2, 1, 0, 0, 0, 1, + 0, 1, 0, 1, 1, 0, 2, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 0, 4, 3, 2, + 2, 0, 6, 1, 0, 1, 1, 0, + 2, 0, 4, 3, 0, 1, 1, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 1, 0, 3, 0, 2, 0, + 0, 0, 3, 0, 2, 1, 1, 3, + 1, 0, 0, 0, 0, 0, 5, 2, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 1, 1, 0, 0, 35, 4, 0, + 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 0, 0, 0, 3, 0, 1, + 0, 0, 3, 0, 0, 1, 0, 0, + 0, 0, 28, 0, 0, 0, 0, 1, + 0, 3, 1, 4, 0, 1, 0, 0, + 1, 0, 0, 1, 0, 0, 0, 0, + 1, 1, 0, 7, 0, 0, 2, 2, + 0, 11, 0, 0, 0, 0, 0, 1, + 1, 3, 0, 0, 4, 0, 0, 0, + 12, 1, 4, 1, 5, 2, 0, 3, + 2, 2, 2, 1, 7, 0, 7, 17, + 3, 0, 2, 0, 3, 0, 0, 1, + 0, 2, 0, 53, 2, 1, 1, 1, + 1, 1, 2, 3, 2, 2, 1, 34, + 1, 1, 0, 3, 2, 0, 0, 0, + 1, 2, 4, 1, 0, 1, 0, 0, + 0, 0, 1, 1, 1, 0, 0, 1, + 30, 47, 13, 9, 3, 0, 1, 28, + 2, 0, 18, 16, 0, 6, 4, 2, + 2, 0, 1, 1, 1, 2, 1, 2, + 0, 0, 0, 4, 2, 2, 3, 3, + 2, 1, 1, 0, 0, 0, 4, 2, + 2, 3, 3, 2, 1, 1, 0, 0, + 0, 33, 34, 0, 3, 2, 0, 0, + 0, 1, 2, 4, 1, 0, 1, 0, + 0, 0, 0, 1, 1, 1, 0, 0, + 1, 30, 47, 13, 9, 3, 0, 1, + 28, 2, 0, 18, 16, 0, +} + +var _hcltok_range_lengths []byte = []byte{ + 0, 0, 0, 0, 1, 1, 1, 5, + 5, 5, 0, 0, 3, 0, 1, 1, + 4, 2, 3, 0, 1, 0, 2, 2, + 4, 2, 2, 3, 1, 1, 1, 1, + 0, 1, 1, 2, 2, 1, 4, 6, + 9, 6, 8, 5, 8, 7, 10, 4, + 6, 4, 7, 7, 5, 5, 4, 5, + 1, 2, 8, 4, 3, 3, 3, 0, + 3, 1, 2, 1, 2, 2, 3, 3, + 1, 3, 2, 2, 1, 2, 2, 2, + 3, 4, 4, 3, 1, 2, 1, 3, + 2, 2, 2, 2, 2, 3, 3, 1, + 1, 2, 1, 3, 2, 2, 3, 2, + 7, 0, 1, 4, 1, 2, 4, 2, + 1, 2, 0, 2, 2, 3, 5, 5, + 1, 4, 1, 1, 2, 2, 1, 0, + 0, 1, 1, 1, 1, 1, 2, 2, + 2, 2, 1, 1, 1, 4, 2, 2, + 3, 1, 4, 4, 6, 1, 3, 1, + 1, 2, 1, 1, 1, 5, 3, 1, + 1, 1, 2, 3, 3, 1, 2, 2, + 1, 4, 1, 2, 5, 2, 1, 1, + 0, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 1, 1, 2, 4, 2, 1, + 2, 2, 2, 6, 1, 1, 2, 1, + 2, 1, 1, 1, 2, 2, 2, 1, + 3, 2, 5, 2, 8, 6, 2, 2, + 2, 2, 3, 1, 3, 1, 2, 1, + 3, 2, 2, 3, 1, 1, 1, 1, + 1, 1, 1, 2, 2, 4, 1, 2, + 1, 0, 1, 1, 1, 1, 0, 1, + 2, 3, 1, 3, 3, 1, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 2, 2, 2, 1, 5, 2, 2, + 5, 7, 5, 0, 1, 0, 1, 1, + 1, 1, 1, 0, 1, 1, 0, 3, + 3, 1, 1, 2, 1, 3, 5, 1, + 1, 2, 2, 1, 1, 1, 1, 2, + 6, 3, 7, 2, 6, 1, 6, 2, + 8, 0, 4, 2, 5, 2, 3, 3, + 3, 1, 2, 8, 2, 0, 2, 1, + 2, 1, 5, 2, 1, 3, 3, 0, + 2, 1, 2, 1, 0, 1, 1, 3, + 1, 1, 2, 3, 0, 0, 3, 2, + 4, 1, 4, 1, 1, 3, 1, 1, + 1, 1, 2, 2, 1, 3, 1, 4, + 3, 3, 1, 1, 5, 2, 1, 1, + 2, 1, 2, 1, 3, 2, 0, 1, + 1, 1, 1, 1, 1, 1, 2, 1, + 1, 1, 1, 1, 1, 1, 0, 1, + 1, 2, 2, 1, 1, 1, 3, 2, + 1, 0, 2, 1, 1, 1, 1, 0, + 3, 0, 1, 1, 4, 2, 3, 0, + 1, 0, 2, 2, 4, 2, 2, 3, + 1, 1, 1, 1, 0, 1, 1, 2, + 2, 1, 4, 6, 9, 6, 8, 5, + 8, 7, 10, 4, 6, 4, 7, 7, + 5, 5, 4, 5, 1, 2, 8, 4, + 3, 3, 3, 0, 3, 1, 2, 1, + 2, 2, 3, 3, 1, 3, 2, 2, + 1, 2, 2, 2, 3, 4, 4, 3, + 1, 2, 1, 3, 2, 2, 2, 2, + 2, 3, 3, 1, 1, 2, 1, 3, + 2, 2, 3, 2, 7, 0, 1, 4, + 1, 2, 4, 2, 1, 2, 0, 2, + 2, 3, 5, 5, 1, 4, 1, 1, + 2, 2, 1, 0, 0, 1, 1, 1, + 1, 1, 2, 2, 2, 2, 1, 1, + 1, 4, 2, 2, 3, 1, 4, 4, + 6, 1, 3, 1, 1, 2, 1, 1, + 1, 5, 3, 1, 1, 1, 2, 3, + 3, 1, 2, 2, 1, 4, 1, 2, + 5, 2, 1, 1, 0, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 1, 1, + 2, 4, 2, 1, 2, 2, 2, 6, + 1, 1, 2, 1, 2, 1, 1, 1, + 2, 2, 2, 1, 3, 2, 5, 2, + 8, 6, 2, 2, 2, 2, 3, 1, + 3, 1, 2, 1, 3, 2, 2, 3, + 1, 1, 1, 1, 1, 1, 1, 2, + 2, 4, 1, 2, 1, 0, 1, 1, + 1, 1, 0, 1, 2, 3, 1, 3, + 3, 1, 0, 3, 0, 2, 3, 1, + 0, 0, 0, 0, 2, 2, 2, 2, + 1, 5, 2, 2, 5, 7, 5, 0, + 1, 0, 1, 1, 1, 1, 1, 0, + 1, 1, 1, 2, 2, 3, 3, 4, + 7, 5, 7, 5, 3, 3, 7, 3, + 13, 1, 3, 5, 3, 5, 3, 6, + 5, 2, 2, 8, 4, 1, 2, 3, + 2, 10, 2, 2, 0, 2, 3, 3, + 1, 2, 3, 3, 1, 2, 3, 3, + 4, 4, 2, 1, 2, 2, 3, 2, + 2, 5, 3, 2, 3, 2, 1, 3, + 3, 6, 2, 2, 5, 2, 5, 1, + 1, 2, 4, 1, 11, 1, 3, 8, + 4, 2, 1, 0, 4, 3, 3, 3, + 2, 9, 1, 1, 4, 3, 2, 2, + 2, 3, 4, 2, 3, 2, 4, 3, + 2, 2, 3, 3, 4, 3, 3, 4, + 2, 5, 4, 8, 7, 1, 2, 1, + 3, 1, 2, 5, 1, 2, 2, 2, + 2, 1, 3, 2, 2, 3, 3, 1, + 9, 1, 5, 1, 3, 2, 2, 3, + 2, 3, 3, 3, 1, 3, 3, 2, + 2, 4, 5, 3, 3, 4, 3, 3, + 3, 2, 2, 2, 4, 2, 2, 1, + 3, 3, 3, 3, 3, 3, 2, 2, + 3, 2, 3, 3, 2, 3, 2, 3, + 1, 2, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 2, 3, 2, 3, 2, + 3, 5, 3, 3, 1, 2, 3, 2, + 2, 1, 2, 3, 4, 3, 0, 3, + 0, 2, 3, 1, 0, 0, 0, 0, + 2, 3, 2, 4, 6, 4, 1, 1, + 2, 1, 2, 1, 3, 2, 3, 2, + 5, 1, 1, 1, 1, 1, 0, 1, + 1, 1, 0, 0, 0, 1, 1, 1, + 0, 0, 0, 3, 0, 1, 1, 4, + 2, 3, 0, 1, 0, 2, 2, 4, + 2, 2, 3, 1, 1, 1, 1, 0, + 1, 1, 2, 2, 1, 4, 6, 9, + 6, 8, 5, 8, 7, 10, 4, 6, + 4, 7, 7, 5, 5, 4, 5, 1, + 2, 8, 4, 3, 3, 3, 0, 3, + 1, 2, 1, 2, 2, 3, 3, 1, + 3, 2, 2, 1, 2, 2, 2, 3, + 4, 4, 3, 1, 2, 1, 3, 2, + 2, 2, 2, 2, 3, 3, 1, 1, + 2, 1, 3, 2, 2, 3, 2, 7, + 0, 1, 4, 1, 2, 4, 2, 1, + 2, 0, 2, 2, 3, 5, 5, 1, + 4, 1, 1, 2, 2, 1, 0, 0, + 1, 1, 1, 1, 1, 2, 2, 2, + 2, 1, 1, 1, 4, 2, 2, 3, + 1, 4, 4, 6, 1, 3, 1, 1, + 2, 1, 1, 1, 5, 3, 1, 1, + 1, 2, 3, 3, 1, 2, 2, 1, + 4, 1, 2, 5, 2, 1, 1, 0, + 2, 2, 2, 2, 2, 2, 2, 2, + 2, 1, 1, 2, 4, 2, 1, 2, + 2, 2, 6, 1, 1, 2, 1, 2, + 1, 1, 1, 2, 2, 2, 1, 3, + 2, 5, 2, 8, 6, 2, 2, 2, + 2, 3, 1, 3, 1, 2, 1, 3, + 2, 2, 3, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 4, 1, 2, 1, + 0, 1, 1, 1, 1, 0, 1, 2, + 3, 1, 3, 3, 1, 0, 3, 0, + 2, 3, 1, 0, 0, 0, 0, 2, + 2, 2, 2, 1, 5, 2, 2, 5, + 7, 5, 0, 1, 0, 1, 1, 1, + 1, 1, 0, 1, 1, 1, 2, 2, + 3, 3, 4, 7, 5, 7, 5, 3, + 3, 7, 3, 13, 1, 3, 5, 3, + 5, 3, 6, 5, 2, 2, 8, 4, + 1, 2, 3, 2, 10, 2, 2, 0, + 2, 3, 3, 1, 2, 3, 3, 1, + 2, 3, 3, 4, 4, 2, 1, 2, + 2, 3, 2, 2, 5, 3, 2, 3, + 2, 1, 3, 3, 6, 2, 2, 5, + 2, 5, 1, 1, 2, 4, 1, 11, + 1, 3, 8, 4, 2, 1, 0, 4, + 3, 3, 3, 2, 9, 1, 1, 4, + 3, 2, 2, 2, 3, 4, 2, 3, + 2, 4, 3, 2, 2, 3, 3, 4, + 3, 3, 4, 2, 5, 4, 8, 7, + 1, 2, 1, 3, 1, 2, 5, 1, + 2, 2, 2, 2, 1, 3, 2, 2, + 3, 3, 1, 9, 1, 5, 1, 3, + 2, 2, 3, 2, 3, 3, 3, 1, + 3, 3, 2, 2, 4, 5, 3, 3, + 4, 3, 3, 3, 2, 2, 2, 4, + 2, 2, 1, 3, 3, 3, 3, 3, + 3, 2, 2, 3, 2, 3, 3, 2, + 3, 2, 3, 1, 2, 2, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 3, + 2, 3, 2, 3, 5, 3, 3, 1, + 2, 3, 2, 2, 1, 2, 3, 4, + 3, 0, 3, 0, 2, 3, 1, 0, + 0, 0, 0, 2, 3, 2, 4, 6, + 4, 1, 1, 2, 1, 2, 1, 3, + 2, 3, 2, 11, 0, 0, 0, 0, + 0, 0, 0, 1, 0, 0, 0, 5, + 0, 0, 1, 1, 1, 0, 1, 1, + 5, 4, 2, 0, 1, 0, 2, 2, + 5, 2, 3, 5, 3, 2, 3, 5, + 1, 1, 1, 3, 1, 1, 2, 2, + 3, 1, 2, 3, 1, 5, 6, 0, + 0, 0, 0, 0, 0, 0, 0, 5, + 1, 1, 1, 5, 6, 0, 0, 0, + 0, 0, 0, 1, 1, 1, 5, 6, + 0, 0, 0, 0, 0, 0, 1, 1, + 1, 8, 5, 1, 1, 1, 0, 1, + 1, 5, 4, 2, 0, 1, 0, 2, + 2, 5, 2, 3, 5, 3, 2, 3, + 5, 1, 1, 1, 3, 1, 1, 2, + 2, 3, 1, 2, 3, 1, +} + +var _hcltok_index_offsets []int16 = []int16{ + 0, 0, 2, 4, 7, 12, 16, 18, + 56, 93, 135, 137, 142, 146, 147, 149, + 151, 157, 162, 167, 169, 172, 174, 177, + 181, 187, 190, 193, 199, 201, 203, 205, + 208, 241, 243, 245, 248, 251, 254, 262, + 270, 281, 289, 298, 306, 315, 324, 336, + 343, 350, 358, 366, 375, 381, 389, 395, + 403, 405, 408, 422, 428, 436, 440, 444, + 446, 493, 495, 498, 500, 505, 511, 517, + 522, 525, 529, 532, 535, 537, 540, 543, + 546, 550, 555, 560, 564, 566, 569, 571, + 575, 578, 581, 584, 587, 591, 596, 600, + 602, 604, 607, 609, 613, 616, 619, 627, + 631, 639, 655, 657, 662, 664, 668, 679, + 683, 685, 688, 690, 693, 698, 702, 708, + 714, 725, 730, 733, 736, 739, 742, 744, + 748, 749, 752, 754, 784, 786, 788, 791, + 795, 798, 802, 804, 806, 808, 814, 817, + 820, 824, 826, 831, 836, 843, 846, 850, + 854, 856, 859, 879, 881, 883, 890, 894, + 896, 898, 900, 903, 907, 911, 913, 917, + 920, 922, 927, 945, 984, 990, 993, 995, + 997, 999, 1002, 1005, 1008, 1011, 1014, 1018, + 1021, 1024, 1027, 1029, 1031, 1034, 1041, 1044, + 1046, 1049, 1052, 1055, 1063, 1065, 1067, 1070, + 1072, 1075, 1077, 1079, 1109, 1112, 1115, 1118, + 1121, 1126, 1130, 1137, 1140, 1149, 1158, 1161, + 1165, 1168, 1171, 1175, 1177, 1181, 1183, 1186, + 1188, 1192, 1196, 1200, 1208, 1210, 1212, 1216, + 1220, 1222, 1235, 1237, 1240, 1243, 1248, 1250, + 1253, 1255, 1257, 1260, 1265, 1267, 1269, 1274, + 1276, 1279, 1283, 1303, 1307, 1311, 1313, 1315, + 1323, 1325, 1332, 1337, 1339, 1343, 1346, 1349, + 1352, 1356, 1359, 1362, 1366, 1376, 1382, 1385, + 1388, 1398, 1418, 1424, 1427, 1429, 1433, 1435, + 1438, 1440, 1444, 1446, 1448, 1452, 1454, 1458, + 1463, 1469, 1471, 1473, 1476, 1478, 1482, 1489, + 1492, 1494, 1497, 1501, 1531, 1536, 1538, 1541, + 1545, 1554, 1559, 1567, 1571, 1579, 1583, 1591, + 1595, 1606, 1608, 1614, 1617, 1625, 1629, 1634, + 1639, 1644, 1646, 1649, 1664, 1668, 1670, 1673, + 1675, 1724, 1727, 1734, 1737, 1739, 1743, 1747, + 1750, 1754, 1756, 1759, 1761, 1763, 1765, 1767, + 1771, 1773, 1775, 1778, 1782, 1796, 1799, 1803, + 1806, 1811, 1822, 1827, 1830, 1860, 1864, 1867, + 1872, 1874, 1878, 1881, 1884, 1886, 1891, 1893, + 1899, 1904, 1910, 1912, 1932, 1940, 1943, 1945, + 1963, 2001, 2003, 2006, 2008, 2013, 2016, 2045, + 2047, 2049, 2051, 2053, 2056, 2058, 2062, 2065, + 2067, 2070, 2072, 2074, 2077, 2079, 2081, 2083, + 2085, 2087, 2090, 2093, 2096, 2109, 2111, 2115, + 2118, 2120, 2125, 2128, 2142, 2145, 2154, 2156, + 2161, 2165, 2166, 2168, 2170, 2176, 2181, 2186, + 2188, 2191, 2193, 2196, 2200, 2206, 2209, 2212, + 2218, 2220, 2222, 2224, 2227, 2260, 2262, 2264, + 2267, 2270, 2273, 2281, 2289, 2300, 2308, 2317, + 2325, 2334, 2343, 2355, 2362, 2369, 2377, 2385, + 2394, 2400, 2408, 2414, 2422, 2424, 2427, 2441, + 2447, 2455, 2459, 2463, 2465, 2512, 2514, 2517, + 2519, 2524, 2530, 2536, 2541, 2544, 2548, 2551, + 2554, 2556, 2559, 2562, 2565, 2569, 2574, 2579, + 2583, 2585, 2588, 2590, 2594, 2597, 2600, 2603, + 2606, 2610, 2615, 2619, 2621, 2623, 2626, 2628, + 2632, 2635, 2638, 2646, 2650, 2658, 2674, 2676, + 2681, 2683, 2687, 2698, 2702, 2704, 2707, 2709, + 2712, 2717, 2721, 2727, 2733, 2744, 2749, 2752, + 2755, 2758, 2761, 2763, 2767, 2768, 2771, 2773, + 2803, 2805, 2807, 2810, 2814, 2817, 2821, 2823, + 2825, 2827, 2833, 2836, 2839, 2843, 2845, 2850, + 2855, 2862, 2865, 2869, 2873, 2875, 2878, 2898, + 2900, 2902, 2909, 2913, 2915, 2917, 2919, 2922, + 2926, 2930, 2932, 2936, 2939, 2941, 2946, 2964, + 3003, 3009, 3012, 3014, 3016, 3018, 3021, 3024, + 3027, 3030, 3033, 3037, 3040, 3043, 3046, 3048, + 3050, 3053, 3060, 3063, 3065, 3068, 3071, 3074, + 3082, 3084, 3086, 3089, 3091, 3094, 3096, 3098, + 3128, 3131, 3134, 3137, 3140, 3145, 3149, 3156, + 3159, 3168, 3177, 3180, 3184, 3187, 3190, 3194, + 3196, 3200, 3202, 3205, 3207, 3211, 3215, 3219, + 3227, 3229, 3231, 3235, 3239, 3241, 3254, 3256, + 3259, 3262, 3267, 3269, 3272, 3274, 3276, 3279, + 3284, 3286, 3288, 3293, 3295, 3298, 3302, 3322, + 3326, 3330, 3332, 3334, 3342, 3344, 3351, 3356, + 3358, 3362, 3365, 3368, 3371, 3375, 3378, 3381, + 3385, 3395, 3401, 3404, 3407, 3417, 3437, 3443, + 3446, 3448, 3452, 3454, 3457, 3459, 3463, 3465, + 3467, 3471, 3473, 3475, 3481, 3484, 3489, 3494, + 3500, 3510, 3518, 3530, 3537, 3547, 3553, 3565, + 3571, 3589, 3592, 3600, 3606, 3616, 3623, 3630, + 3638, 3646, 3649, 3654, 3674, 3680, 3683, 3687, + 3691, 3695, 3707, 3710, 3715, 3716, 3722, 3729, + 3735, 3738, 3741, 3745, 3749, 3752, 3755, 3760, + 3764, 3770, 3776, 3779, 3783, 3786, 3789, 3794, + 3797, 3800, 3806, 3810, 3813, 3817, 3820, 3823, + 3827, 3831, 3838, 3841, 3844, 3850, 3853, 3860, + 3862, 3864, 3867, 3876, 3881, 3895, 3899, 3903, + 3918, 3924, 3927, 3930, 3932, 3937, 3943, 3947, + 3955, 3961, 3971, 3974, 3977, 3982, 3986, 3989, + 3992, 3995, 3999, 4004, 4008, 4012, 4015, 4020, + 4025, 4028, 4034, 4038, 4044, 4049, 4053, 4057, + 4065, 4068, 4076, 4082, 4092, 4103, 4106, 4109, + 4111, 4115, 4117, 4120, 4131, 4135, 4138, 4141, + 4144, 4147, 4149, 4153, 4157, 4160, 4164, 4169, + 4172, 4182, 4184, 4225, 4231, 4235, 4238, 4241, + 4245, 4248, 4252, 4256, 4261, 4263, 4267, 4271, + 4274, 4277, 4282, 4291, 4295, 4300, 4305, 4309, + 4316, 4320, 4323, 4327, 4330, 4335, 4338, 4341, + 4371, 4375, 4379, 4383, 4387, 4392, 4396, 4402, + 4406, 4414, 4417, 4422, 4426, 4429, 4434, 4437, + 4441, 4444, 4447, 4450, 4453, 4456, 4460, 4464, + 4467, 4477, 4480, 4483, 4488, 4494, 4497, 4512, + 4515, 4519, 4525, 4529, 4533, 4536, 4540, 4547, + 4550, 4553, 4559, 4562, 4566, 4571, 4587, 4589, + 4597, 4599, 4607, 4613, 4615, 4619, 4622, 4625, + 4628, 4632, 4643, 4646, 4658, 4682, 4690, 4692, + 4696, 4699, 4704, 4707, 4709, 4714, 4717, 4723, + 4726, 4734, 4736, 4738, 4740, 4742, 4744, 4746, + 4748, 4750, 4752, 4755, 4758, 4760, 4762, 4764, + 4766, 4769, 4772, 4777, 4781, 4782, 4784, 4786, + 4792, 4797, 4802, 4804, 4807, 4809, 4812, 4816, + 4822, 4825, 4828, 4834, 4836, 4838, 4840, 4843, + 4876, 4878, 4880, 4883, 4886, 4889, 4897, 4905, + 4916, 4924, 4933, 4941, 4950, 4959, 4971, 4978, + 4985, 4993, 5001, 5010, 5016, 5024, 5030, 5038, + 5040, 5043, 5057, 5063, 5071, 5075, 5079, 5081, + 5128, 5130, 5133, 5135, 5140, 5146, 5152, 5157, + 5160, 5164, 5167, 5170, 5172, 5175, 5178, 5181, + 5185, 5190, 5195, 5199, 5201, 5204, 5206, 5210, + 5213, 5216, 5219, 5222, 5226, 5231, 5235, 5237, + 5239, 5242, 5244, 5248, 5251, 5254, 5262, 5266, + 5274, 5290, 5292, 5297, 5299, 5303, 5314, 5318, + 5320, 5323, 5325, 5328, 5333, 5337, 5343, 5349, + 5360, 5365, 5368, 5371, 5374, 5377, 5379, 5383, + 5384, 5387, 5389, 5419, 5421, 5423, 5426, 5430, + 5433, 5437, 5439, 5441, 5443, 5449, 5452, 5455, + 5459, 5461, 5466, 5471, 5478, 5481, 5485, 5489, + 5491, 5494, 5514, 5516, 5518, 5525, 5529, 5531, + 5533, 5535, 5538, 5542, 5546, 5548, 5552, 5555, + 5557, 5562, 5580, 5619, 5625, 5628, 5630, 5632, + 5634, 5637, 5640, 5643, 5646, 5649, 5653, 5656, + 5659, 5662, 5664, 5666, 5669, 5676, 5679, 5681, + 5684, 5687, 5690, 5698, 5700, 5702, 5705, 5707, + 5710, 5712, 5714, 5744, 5747, 5750, 5753, 5756, + 5761, 5765, 5772, 5775, 5784, 5793, 5796, 5800, + 5803, 5806, 5810, 5812, 5816, 5818, 5821, 5823, + 5827, 5831, 5835, 5843, 5845, 5847, 5851, 5855, + 5857, 5870, 5872, 5875, 5878, 5883, 5885, 5888, + 5890, 5892, 5895, 5900, 5902, 5904, 5909, 5911, + 5914, 5918, 5938, 5942, 5946, 5948, 5950, 5958, + 5960, 5967, 5972, 5974, 5978, 5981, 5984, 5987, + 5991, 5994, 5997, 6001, 6011, 6017, 6020, 6023, + 6033, 6053, 6059, 6062, 6064, 6068, 6070, 6073, + 6075, 6079, 6081, 6083, 6087, 6089, 6091, 6097, + 6100, 6105, 6110, 6116, 6126, 6134, 6146, 6153, + 6163, 6169, 6181, 6187, 6205, 6208, 6216, 6222, + 6232, 6239, 6246, 6254, 6262, 6265, 6270, 6290, + 6296, 6299, 6303, 6307, 6311, 6323, 6326, 6331, + 6332, 6338, 6345, 6351, 6354, 6357, 6361, 6365, + 6368, 6371, 6376, 6380, 6386, 6392, 6395, 6399, + 6402, 6405, 6410, 6413, 6416, 6422, 6426, 6429, + 6433, 6436, 6439, 6443, 6447, 6454, 6457, 6460, + 6466, 6469, 6476, 6478, 6480, 6483, 6492, 6497, + 6511, 6515, 6519, 6534, 6540, 6543, 6546, 6548, + 6553, 6559, 6563, 6571, 6577, 6587, 6590, 6593, + 6598, 6602, 6605, 6608, 6611, 6615, 6620, 6624, + 6628, 6631, 6636, 6641, 6644, 6650, 6654, 6660, + 6665, 6669, 6673, 6681, 6684, 6692, 6698, 6708, + 6719, 6722, 6725, 6727, 6731, 6733, 6736, 6747, + 6751, 6754, 6757, 6760, 6763, 6765, 6769, 6773, + 6776, 6780, 6785, 6788, 6798, 6800, 6841, 6847, + 6851, 6854, 6857, 6861, 6864, 6868, 6872, 6877, + 6879, 6883, 6887, 6890, 6893, 6898, 6907, 6911, + 6916, 6921, 6925, 6932, 6936, 6939, 6943, 6946, + 6951, 6954, 6957, 6987, 6991, 6995, 6999, 7003, + 7008, 7012, 7018, 7022, 7030, 7033, 7038, 7042, + 7045, 7050, 7053, 7057, 7060, 7063, 7066, 7069, + 7072, 7076, 7080, 7083, 7093, 7096, 7099, 7104, + 7110, 7113, 7128, 7131, 7135, 7141, 7145, 7149, + 7152, 7156, 7163, 7166, 7169, 7175, 7178, 7182, + 7187, 7203, 7205, 7213, 7215, 7223, 7229, 7231, + 7235, 7238, 7241, 7244, 7248, 7259, 7262, 7274, + 7298, 7306, 7308, 7312, 7315, 7320, 7323, 7325, + 7330, 7333, 7339, 7342, 7407, 7410, 7412, 7414, + 7416, 7418, 7420, 7423, 7428, 7431, 7434, 7436, + 7476, 7478, 7480, 7482, 7487, 7491, 7492, 7494, + 7496, 7503, 7510, 7517, 7519, 7521, 7523, 7526, + 7529, 7535, 7538, 7543, 7550, 7555, 7558, 7562, + 7569, 7601, 7650, 7665, 7678, 7683, 7685, 7689, + 7720, 7726, 7728, 7749, 7769, 7771, 7783, 7794, + 7797, 7800, 7801, 7803, 7805, 7807, 7810, 7812, + 7820, 7822, 7824, 7826, 7836, 7845, 7848, 7852, + 7856, 7859, 7861, 7863, 7865, 7867, 7869, 7879, + 7888, 7891, 7895, 7899, 7902, 7904, 7906, 7908, + 7910, 7912, 7954, 7994, 7996, 8001, 8005, 8006, + 8008, 8010, 8017, 8024, 8031, 8033, 8035, 8037, + 8040, 8043, 8049, 8052, 8057, 8064, 8069, 8072, + 8076, 8083, 8115, 8164, 8179, 8192, 8197, 8199, + 8203, 8234, 8240, 8242, 8263, 8283, +} + +var _hcltok_indicies []int16 = []int16{ + 1, 0, 3, 2, 3, 4, 2, 6, + 8, 8, 7, 5, 9, 9, 7, 5, + 7, 5, 10, 11, 12, 13, 15, 16, + 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 39, 40, 41, + 42, 43, 11, 11, 14, 14, 38, 0, + 11, 12, 13, 15, 16, 17, 18, 19, + 20, 21, 22, 23, 24, 25, 26, 27, + 28, 29, 30, 31, 32, 33, 34, 35, + 36, 37, 39, 40, 41, 42, 43, 11, + 11, 14, 14, 38, 0, 44, 45, 11, + 11, 46, 13, 15, 16, 17, 16, 47, + 48, 20, 49, 22, 23, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, + 61, 62, 37, 39, 63, 41, 64, 65, + 66, 11, 11, 11, 14, 38, 0, 44, + 0, 11, 11, 11, 11, 0, 11, 11, + 11, 0, 11, 0, 11, 11, 0, 0, + 0, 0, 0, 0, 11, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 0, 0, + 11, 0, 0, 11, 0, 11, 0, 0, + 11, 0, 0, 0, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 0, 0, 11, 11, + 0, 0, 11, 0, 11, 11, 11, 0, + 67, 68, 69, 70, 14, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 93, 94, 95, 96, 97, + 0, 11, 0, 11, 0, 11, 11, 0, + 11, 11, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 11, 0, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 0, 0, 0, 0, 0, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 0, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 0, 0, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 0, 0, 0, 0, + 0, 0, 0, 0, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 0, 11, 11, 0, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 0, + 11, 11, 11, 0, 11, 0, 98, 99, + 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 16, + 115, 116, 117, 118, 119, 120, 121, 122, + 123, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 14, 15, 133, 134, 135, 136, + 137, 14, 16, 14, 0, 11, 0, 11, + 11, 0, 0, 11, 0, 0, 0, 0, + 11, 0, 0, 0, 0, 0, 11, 0, + 0, 0, 0, 0, 11, 11, 11, 11, + 11, 0, 0, 0, 11, 0, 0, 0, + 11, 11, 11, 0, 0, 0, 11, 11, + 0, 0, 0, 11, 11, 11, 0, 0, + 0, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 11, 11, 0, 0, 11, 11, 11, + 0, 0, 11, 11, 11, 11, 0, 11, + 11, 0, 11, 11, 0, 0, 0, 11, + 11, 11, 0, 0, 0, 0, 11, 11, + 11, 11, 11, 0, 0, 0, 0, 11, + 0, 11, 11, 0, 11, 11, 0, 11, + 0, 11, 11, 11, 0, 11, 11, 0, + 0, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 138, + 139, 140, 141, 142, 143, 144, 145, 146, + 14, 147, 148, 149, 150, 151, 0, 11, + 0, 0, 0, 0, 0, 11, 11, 0, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 0, 11, 11, 11, 0, + 0, 11, 0, 0, 11, 11, 11, 11, + 11, 0, 0, 0, 0, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 0, 152, 109, 153, 154, 155, 14, + 156, 157, 16, 14, 0, 11, 11, 11, + 11, 0, 0, 0, 11, 0, 0, 11, + 11, 11, 0, 0, 0, 11, 11, 0, + 119, 0, 16, 14, 14, 158, 0, 14, + 0, 11, 16, 159, 160, 16, 161, 162, + 16, 57, 163, 164, 165, 166, 167, 16, + 168, 169, 170, 16, 171, 172, 173, 15, + 174, 175, 176, 15, 177, 16, 14, 0, + 0, 11, 11, 0, 0, 0, 11, 11, + 11, 11, 0, 11, 11, 0, 0, 0, + 0, 11, 11, 0, 0, 11, 11, 0, + 0, 0, 0, 0, 0, 11, 11, 11, + 0, 0, 0, 11, 0, 0, 0, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 0, 11, 11, 11, 11, + 11, 11, 0, 0, 0, 11, 11, 11, + 11, 0, 178, 179, 0, 14, 0, 11, + 0, 0, 11, 16, 180, 181, 182, 183, + 57, 184, 185, 55, 186, 187, 188, 189, + 190, 191, 192, 193, 194, 14, 0, 0, + 11, 0, 11, 11, 11, 11, 11, 11, + 11, 0, 11, 11, 11, 0, 11, 0, + 0, 11, 0, 11, 0, 0, 11, 11, + 11, 11, 0, 11, 11, 11, 0, 0, + 11, 11, 11, 11, 0, 11, 11, 0, + 0, 11, 11, 11, 11, 11, 0, 195, + 196, 197, 198, 199, 200, 201, 202, 203, + 204, 205, 201, 206, 207, 208, 209, 38, + 0, 210, 211, 16, 212, 213, 214, 215, + 216, 217, 218, 219, 220, 16, 14, 221, + 222, 223, 224, 16, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, + 237, 238, 239, 16, 144, 14, 240, 0, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 11, 11, 0, 11, 0, 11, + 11, 0, 0, 0, 11, 11, 11, 0, + 0, 0, 11, 11, 11, 0, 0, 0, + 0, 11, 0, 0, 11, 0, 0, 11, + 11, 11, 0, 0, 11, 0, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 11, 11, 0, 11, 11, + 0, 11, 11, 0, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 0, 11, 0, 11, 11, 0, 11, 0, + 11, 11, 0, 11, 0, 11, 0, 241, + 212, 242, 243, 244, 245, 246, 247, 248, + 249, 250, 98, 251, 16, 252, 253, 254, + 16, 255, 129, 256, 257, 258, 259, 260, + 261, 262, 263, 16, 0, 0, 0, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 11, + 0, 0, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 0, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 0, 0, 0, 0, 11, 11, 11, 0, + 0, 0, 11, 0, 0, 0, 11, 11, + 0, 11, 11, 11, 0, 11, 0, 0, + 0, 11, 11, 0, 11, 11, 11, 0, + 11, 11, 11, 0, 0, 0, 0, 11, + 16, 181, 264, 265, 14, 16, 14, 0, + 0, 11, 0, 11, 16, 264, 14, 0, + 16, 266, 14, 0, 0, 11, 16, 267, + 268, 269, 172, 270, 271, 16, 272, 273, + 274, 14, 0, 0, 11, 11, 11, 0, + 11, 11, 0, 11, 11, 11, 11, 0, + 0, 11, 0, 0, 11, 11, 0, 11, + 0, 16, 14, 0, 275, 16, 276, 0, + 14, 0, 11, 0, 11, 277, 16, 278, + 279, 0, 11, 0, 0, 0, 11, 11, + 11, 11, 0, 280, 281, 282, 16, 283, + 284, 285, 286, 287, 288, 289, 290, 291, + 292, 293, 294, 295, 296, 14, 0, 11, + 11, 11, 0, 0, 0, 0, 11, 11, + 0, 0, 11, 0, 0, 0, 0, 0, + 0, 0, 11, 0, 11, 0, 0, 0, + 0, 0, 0, 11, 11, 11, 11, 11, + 0, 0, 11, 0, 0, 0, 11, 0, + 0, 11, 0, 0, 11, 0, 0, 11, + 0, 0, 0, 11, 11, 11, 0, 0, + 0, 11, 11, 11, 11, 0, 297, 16, + 298, 16, 299, 300, 301, 302, 14, 0, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 11, 0, + 303, 16, 14, 0, 11, 304, 16, 100, + 14, 0, 11, 305, 0, 14, 0, 11, + 16, 306, 14, 0, 0, 11, 307, 0, + 16, 308, 14, 0, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 0, 0, 11, 0, + 11, 11, 11, 0, 11, 0, 11, 11, + 11, 0, 0, 0, 0, 0, 0, 0, + 11, 11, 11, 0, 11, 0, 0, 0, + 11, 11, 11, 11, 0, 309, 310, 69, + 311, 312, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, + 327, 328, 329, 331, 332, 333, 334, 335, + 336, 330, 0, 11, 11, 11, 11, 0, + 11, 0, 11, 11, 0, 11, 11, 11, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 0, 11, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 0, 11, 0, + 11, 11, 11, 11, 11, 0, 11, 11, + 0, 11, 11, 11, 11, 11, 11, 11, + 0, 11, 11, 11, 0, 11, 11, 11, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 11, 0, 11, 0, 11, 11, + 0, 11, 11, 11, 11, 11, 11, 11, + 11, 11, 11, 11, 11, 11, 11, 0, + 11, 11, 11, 0, 11, 0, 11, 11, + 0, 11, 0, 337, 338, 339, 101, 102, + 103, 104, 105, 340, 107, 108, 109, 110, + 111, 112, 341, 342, 167, 343, 258, 117, + 344, 119, 229, 269, 122, 345, 346, 347, + 348, 349, 350, 351, 352, 353, 354, 131, + 355, 16, 14, 15, 16, 134, 135, 136, + 137, 14, 14, 0, 11, 11, 0, 11, + 11, 11, 11, 11, 11, 0, 0, 0, + 11, 0, 11, 11, 11, 11, 0, 11, + 11, 11, 0, 11, 11, 0, 11, 11, + 11, 0, 0, 11, 11, 11, 0, 0, + 11, 11, 0, 11, 0, 11, 0, 11, + 11, 11, 0, 0, 11, 11, 0, 11, + 11, 0, 11, 11, 11, 0, 356, 140, + 142, 143, 144, 145, 146, 14, 357, 148, + 358, 150, 359, 0, 11, 11, 0, 0, + 0, 0, 11, 0, 0, 11, 11, 11, + 11, 11, 0, 360, 109, 361, 154, 155, + 14, 156, 157, 16, 14, 0, 11, 11, + 11, 11, 0, 0, 0, 11, 16, 159, + 160, 16, 362, 363, 219, 308, 163, 164, + 165, 364, 167, 365, 366, 367, 368, 369, + 370, 371, 372, 373, 374, 175, 176, 15, + 375, 16, 14, 0, 0, 0, 0, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 0, 11, 11, 11, 0, 11, 11, + 0, 0, 0, 11, 11, 0, 11, 11, + 11, 11, 0, 11, 0, 11, 11, 11, + 11, 11, 0, 0, 0, 0, 0, 11, + 11, 11, 11, 11, 11, 0, 11, 0, + 16, 180, 181, 376, 183, 57, 184, 185, + 55, 186, 187, 377, 14, 190, 378, 192, + 193, 194, 14, 0, 11, 11, 11, 11, + 11, 11, 11, 0, 11, 11, 0, 11, + 0, 379, 380, 197, 198, 199, 381, 201, + 202, 382, 383, 384, 201, 206, 207, 208, + 209, 38, 0, 210, 211, 16, 212, 213, + 215, 385, 217, 386, 219, 220, 16, 14, + 387, 222, 223, 224, 16, 225, 226, 227, + 228, 229, 230, 231, 232, 388, 234, 235, + 389, 237, 238, 239, 16, 144, 14, 240, + 0, 0, 11, 0, 0, 11, 0, 11, + 11, 11, 11, 11, 0, 11, 11, 0, + 390, 391, 392, 393, 394, 395, 396, 397, + 247, 398, 319, 399, 213, 400, 401, 402, + 403, 404, 401, 405, 406, 407, 258, 408, + 260, 409, 410, 271, 0, 11, 0, 11, + 0, 11, 0, 11, 0, 11, 11, 0, + 11, 0, 11, 11, 11, 0, 11, 11, + 0, 0, 11, 11, 11, 0, 11, 0, + 11, 0, 11, 11, 0, 11, 0, 11, + 0, 11, 0, 11, 0, 11, 0, 0, + 0, 11, 11, 11, 0, 11, 11, 0, + 16, 267, 229, 411, 401, 412, 271, 16, + 413, 414, 274, 14, 0, 11, 0, 11, + 11, 11, 0, 0, 0, 11, 11, 0, + 277, 16, 278, 415, 0, 11, 11, 0, + 16, 283, 284, 285, 286, 287, 288, 289, + 290, 291, 292, 416, 14, 0, 0, 0, + 11, 16, 417, 16, 265, 300, 301, 302, + 14, 0, 0, 11, 419, 419, 419, 419, + 418, 419, 419, 419, 418, 419, 418, 419, + 419, 418, 418, 418, 418, 418, 418, 419, + 418, 418, 418, 418, 419, 419, 419, 419, + 419, 418, 418, 419, 418, 418, 419, 418, + 419, 418, 418, 419, 418, 418, 418, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 418, + 418, 419, 419, 418, 418, 419, 418, 419, + 419, 419, 418, 421, 422, 423, 424, 425, + 426, 427, 428, 429, 430, 431, 432, 433, + 434, 435, 436, 437, 438, 439, 440, 441, + 442, 443, 444, 445, 446, 447, 448, 449, + 450, 451, 452, 418, 419, 418, 419, 418, + 419, 419, 418, 419, 419, 418, 418, 418, + 419, 418, 418, 418, 418, 418, 418, 418, + 419, 418, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 418, 418, + 418, 418, 418, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 418, 418, 418, 418, + 418, 418, 418, 418, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 418, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 418, 418, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 418, + 418, 418, 418, 418, 418, 418, 418, 419, + 419, 419, 419, 419, 419, 418, 419, 419, + 419, 419, 419, 419, 419, 418, 419, 418, + 419, 419, 418, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 418, 419, 419, 419, 418, 419, + 418, 453, 454, 455, 456, 457, 458, 459, + 460, 461, 462, 463, 464, 465, 466, 467, + 468, 469, 470, 471, 472, 473, 474, 475, + 476, 477, 478, 479, 480, 481, 482, 483, + 484, 485, 486, 487, 488, 425, 489, 490, + 491, 492, 493, 494, 425, 470, 425, 418, + 419, 418, 419, 419, 418, 418, 419, 418, + 418, 418, 418, 419, 418, 418, 418, 418, + 418, 419, 418, 418, 418, 418, 418, 419, + 419, 419, 419, 419, 418, 418, 418, 419, + 418, 418, 418, 419, 419, 419, 418, 418, + 418, 419, 419, 418, 418, 418, 419, 419, + 419, 418, 418, 418, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 418, 418, 418, + 418, 418, 419, 419, 419, 419, 418, 418, + 419, 419, 419, 418, 418, 419, 419, 419, + 419, 418, 419, 419, 418, 419, 419, 418, + 418, 418, 419, 419, 419, 418, 418, 418, + 418, 419, 419, 419, 419, 419, 418, 418, + 418, 418, 419, 418, 419, 419, 418, 419, + 419, 418, 419, 418, 419, 419, 419, 418, + 419, 419, 418, 418, 418, 419, 418, 418, + 418, 418, 418, 418, 418, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 419, 418, 495, 496, 497, 498, 499, 500, + 501, 502, 503, 425, 504, 505, 506, 507, + 508, 418, 419, 418, 418, 418, 418, 418, + 419, 419, 418, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 419, 419, 418, 418, 419, + 419, 419, 418, 418, 419, 418, 418, 419, + 419, 419, 419, 419, 418, 418, 418, 418, + 419, 419, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 418, 509, 464, 510, + 511, 512, 425, 513, 514, 470, 425, 418, + 419, 419, 419, 419, 418, 418, 418, 419, + 418, 418, 419, 419, 419, 418, 418, 418, + 419, 419, 418, 475, 418, 470, 425, 425, + 515, 418, 425, 418, 419, 470, 516, 517, + 470, 518, 519, 470, 520, 521, 522, 523, + 524, 525, 470, 526, 527, 528, 470, 529, + 530, 531, 489, 532, 533, 534, 489, 535, + 470, 425, 418, 418, 419, 419, 418, 418, + 418, 419, 419, 419, 419, 418, 419, 419, + 418, 418, 418, 418, 419, 419, 418, 418, + 419, 419, 418, 418, 418, 418, 418, 418, + 419, 419, 419, 418, 418, 418, 419, 418, + 418, 418, 419, 419, 418, 419, 419, 419, + 419, 418, 419, 419, 419, 419, 418, 419, + 419, 419, 419, 419, 419, 418, 418, 418, + 419, 419, 419, 419, 418, 536, 537, 418, + 425, 418, 419, 418, 418, 419, 470, 538, + 539, 540, 541, 520, 542, 543, 544, 545, + 546, 547, 548, 549, 550, 551, 552, 553, + 425, 418, 418, 419, 418, 419, 419, 419, + 419, 419, 419, 419, 418, 419, 419, 419, + 418, 419, 418, 418, 419, 418, 419, 418, + 418, 419, 419, 419, 419, 418, 419, 419, + 419, 418, 418, 419, 419, 419, 419, 418, + 419, 419, 418, 418, 419, 419, 419, 419, + 419, 418, 554, 555, 556, 557, 558, 559, + 560, 561, 562, 563, 564, 560, 566, 567, + 568, 569, 565, 418, 570, 571, 470, 572, + 573, 574, 575, 576, 577, 578, 579, 580, + 470, 425, 581, 582, 583, 584, 470, 585, + 586, 587, 588, 589, 590, 591, 592, 593, + 594, 595, 596, 597, 598, 599, 470, 501, + 425, 600, 418, 419, 419, 419, 419, 419, + 418, 418, 418, 419, 418, 419, 419, 418, + 419, 418, 419, 419, 418, 418, 418, 419, + 419, 419, 418, 418, 418, 419, 419, 419, + 418, 418, 418, 418, 419, 418, 418, 419, + 418, 418, 419, 419, 419, 418, 418, 419, + 418, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 419, 419, + 418, 419, 419, 418, 419, 419, 418, 419, + 419, 418, 419, 419, 419, 419, 419, 419, + 419, 418, 419, 418, 419, 418, 419, 419, + 418, 419, 418, 419, 419, 418, 419, 418, + 419, 418, 601, 572, 602, 603, 604, 605, + 606, 607, 608, 609, 610, 453, 611, 470, + 612, 613, 614, 470, 615, 485, 616, 617, + 618, 619, 620, 621, 622, 623, 470, 418, + 418, 418, 419, 419, 419, 418, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 418, + 419, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 419, 418, 418, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 418, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 418, 418, 418, 418, 419, + 419, 419, 418, 418, 418, 419, 418, 418, + 418, 419, 419, 418, 419, 419, 419, 418, + 419, 418, 418, 418, 419, 419, 418, 419, + 419, 419, 418, 419, 419, 419, 418, 418, + 418, 418, 419, 470, 539, 624, 625, 425, + 470, 425, 418, 418, 419, 418, 419, 470, + 624, 425, 418, 470, 626, 425, 418, 418, + 419, 470, 627, 628, 629, 530, 630, 631, + 470, 632, 633, 634, 425, 418, 418, 419, + 419, 419, 418, 419, 419, 418, 419, 419, + 419, 419, 418, 418, 419, 418, 418, 419, + 419, 418, 419, 418, 470, 425, 418, 635, + 470, 636, 418, 425, 418, 419, 418, 419, + 637, 470, 638, 639, 418, 419, 418, 418, + 418, 419, 419, 419, 419, 418, 640, 641, + 642, 470, 643, 644, 645, 646, 647, 648, + 649, 650, 651, 652, 653, 654, 655, 656, + 425, 418, 419, 419, 419, 418, 418, 418, + 418, 419, 419, 418, 418, 419, 418, 418, + 418, 418, 418, 418, 418, 419, 418, 419, + 418, 418, 418, 418, 418, 418, 419, 419, + 419, 419, 419, 418, 418, 419, 418, 418, + 418, 419, 418, 418, 419, 418, 418, 419, + 418, 418, 419, 418, 418, 418, 419, 419, + 419, 418, 418, 418, 419, 419, 419, 419, + 418, 657, 470, 658, 470, 659, 660, 661, + 662, 425, 418, 419, 419, 419, 419, 419, + 418, 418, 418, 419, 418, 418, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 418, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 418, 419, 419, 419, + 419, 419, 418, 663, 470, 425, 418, 419, + 664, 470, 455, 425, 418, 419, 665, 418, + 425, 418, 419, 470, 666, 425, 418, 418, + 419, 667, 418, 470, 668, 425, 418, 418, + 419, 670, 669, 419, 419, 419, 419, 670, + 669, 419, 670, 669, 670, 670, 419, 670, + 669, 419, 670, 419, 670, 669, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 670, + 670, 670, 670, 670, 669, 419, 419, 670, + 670, 419, 670, 419, 670, 669, 670, 670, + 670, 670, 670, 419, 670, 419, 670, 419, + 670, 669, 670, 670, 419, 670, 419, 670, + 669, 670, 670, 670, 670, 670, 419, 670, + 419, 670, 669, 419, 419, 670, 419, 670, + 669, 670, 670, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 669, 670, 419, 670, + 419, 670, 669, 419, 670, 670, 670, 670, + 419, 670, 419, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 669, 419, 670, 669, + 670, 670, 670, 419, 670, 419, 670, 669, + 670, 419, 670, 419, 670, 669, 419, 670, + 670, 670, 670, 419, 670, 419, 670, 669, + 419, 670, 419, 670, 419, 670, 669, 670, + 670, 419, 670, 419, 670, 669, 419, 670, + 419, 670, 419, 670, 419, 669, 670, 670, + 670, 419, 670, 419, 670, 669, 419, 670, + 669, 670, 670, 419, 670, 669, 670, 670, + 670, 419, 670, 670, 670, 670, 670, 670, + 419, 419, 670, 419, 670, 419, 670, 419, + 670, 669, 670, 419, 670, 419, 670, 669, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 419, 419, 670, 669, 419, + 670, 419, 670, 419, 670, 419, 670, 419, + 670, 419, 669, 670, 670, 419, 670, 670, + 670, 670, 419, 419, 670, 670, 670, 670, + 670, 419, 670, 670, 670, 670, 670, 669, + 419, 670, 670, 419, 670, 419, 669, 670, + 670, 419, 670, 669, 419, 419, 670, 419, + 669, 670, 670, 669, 419, 670, 419, 669, + 670, 669, 419, 670, 419, 670, 419, 669, + 670, 670, 669, 419, 670, 419, 670, 419, + 670, 669, 670, 419, 670, 419, 670, 669, + 419, 670, 669, 419, 419, 670, 669, 670, + 419, 669, 670, 669, 419, 670, 419, 670, + 419, 669, 670, 669, 419, 419, 670, 669, + 670, 419, 670, 419, 670, 669, 419, 670, + 419, 669, 670, 669, 419, 419, 670, 419, + 669, 670, 669, 419, 419, 670, 669, 670, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 419, 670, 419, 669, 670, 669, + 419, 419, 670, 669, 670, 419, 670, 419, + 670, 669, 419, 670, 669, 670, 670, 419, + 670, 419, 670, 669, 669, 419, 669, 419, + 670, 670, 419, 670, 670, 670, 670, 670, + 670, 670, 669, 419, 670, 670, 670, 419, + 669, 670, 670, 670, 419, 670, 419, 670, + 419, 670, 419, 670, 419, 670, 669, 419, + 419, 670, 669, 670, 419, 670, 669, 419, + 419, 670, 419, 419, 419, 670, 419, 670, + 419, 670, 419, 670, 419, 669, 419, 670, + 419, 670, 419, 669, 670, 669, 419, 670, + 419, 669, 670, 419, 670, 670, 670, 669, + 419, 670, 419, 419, 670, 419, 669, 670, + 670, 669, 419, 670, 670, 670, 670, 419, + 670, 419, 669, 670, 670, 670, 419, 670, + 669, 670, 419, 670, 419, 670, 419, 670, + 419, 670, 669, 670, 670, 419, 670, 669, + 419, 670, 419, 670, 419, 669, 670, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 670, 669, 419, 670, 419, 670, 669, 670, + 670, 670, 669, 419, 419, 419, 670, 669, + 419, 670, 419, 669, 670, 669, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 669, + 419, 670, 419, 669, 670, 670, 670, 670, + 669, 419, 670, 419, 670, 669, 419, 419, + 670, 419, 670, 669, 670, 419, 670, 419, + 669, 670, 670, 669, 419, 670, 419, 670, + 669, 419, 670, 670, 670, 419, 670, 419, + 669, 419, 670, 669, 670, 419, 419, 670, + 419, 670, 419, 669, 670, 670, 670, 670, + 669, 419, 670, 419, 670, 419, 670, 419, + 670, 419, 670, 669, 670, 670, 670, 419, + 670, 419, 670, 419, 670, 419, 669, 670, + 670, 419, 419, 670, 669, 670, 419, 670, + 670, 669, 419, 670, 419, 670, 669, 419, + 419, 670, 670, 670, 670, 419, 670, 419, + 670, 419, 669, 670, 670, 419, 669, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 670, 419, 669, 670, 419, 670, 670, 669, + 419, 670, 670, 419, 669, 670, 669, 419, + 670, 419, 670, 669, 670, 419, 670, 419, + 669, 670, 669, 419, 670, 419, 670, 419, + 670, 419, 670, 419, 670, 669, 671, 669, + 672, 673, 674, 675, 676, 677, 678, 679, + 680, 681, 682, 674, 683, 684, 685, 686, + 687, 674, 688, 689, 690, 691, 692, 693, + 694, 695, 696, 697, 698, 699, 700, 701, + 702, 674, 703, 671, 683, 671, 704, 671, + 669, 670, 670, 670, 670, 419, 669, 670, + 670, 669, 419, 670, 669, 419, 419, 670, + 669, 419, 670, 419, 669, 670, 669, 419, + 419, 670, 419, 669, 670, 670, 669, 419, + 670, 670, 670, 669, 419, 670, 419, 670, + 670, 669, 419, 419, 670, 419, 669, 670, + 669, 419, 670, 669, 419, 419, 670, 419, + 670, 669, 419, 670, 419, 419, 670, 419, + 670, 419, 669, 670, 670, 669, 419, 670, + 670, 419, 670, 669, 419, 670, 419, 670, + 669, 419, 670, 419, 669, 419, 670, 670, + 670, 419, 670, 669, 670, 419, 670, 669, + 419, 670, 669, 670, 419, 670, 669, 419, + 670, 669, 419, 670, 419, 670, 669, 419, + 670, 669, 419, 670, 669, 705, 706, 707, + 708, 709, 710, 711, 712, 713, 714, 715, + 716, 676, 717, 718, 719, 720, 721, 718, + 722, 723, 724, 725, 726, 727, 728, 729, + 730, 671, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 670, 419, 670, 669, 670, + 419, 670, 669, 419, 670, 419, 670, 669, + 670, 419, 670, 669, 670, 419, 419, 419, + 670, 669, 670, 419, 670, 669, 670, 670, + 670, 670, 419, 670, 419, 669, 670, 669, + 419, 419, 670, 419, 670, 669, 670, 419, + 670, 669, 419, 670, 669, 670, 670, 419, + 670, 669, 419, 670, 669, 670, 419, 670, + 669, 419, 670, 669, 419, 670, 669, 419, + 670, 669, 670, 669, 419, 419, 670, 669, + 670, 419, 670, 669, 419, 670, 419, 669, + 670, 669, 419, 674, 731, 671, 674, 732, + 674, 733, 683, 671, 669, 670, 669, 419, + 670, 669, 419, 674, 732, 683, 671, 669, + 674, 734, 671, 683, 671, 669, 670, 669, + 419, 674, 735, 692, 736, 718, 737, 730, + 674, 738, 739, 740, 671, 683, 671, 669, + 670, 669, 419, 670, 419, 670, 669, 419, + 670, 419, 670, 419, 669, 670, 670, 669, + 419, 670, 419, 670, 669, 419, 670, 669, + 674, 683, 425, 669, 741, 674, 742, 683, + 671, 669, 425, 670, 669, 419, 670, 669, + 419, 743, 674, 744, 745, 671, 669, 419, + 670, 669, 670, 670, 669, 419, 419, 670, + 419, 670, 669, 674, 746, 747, 748, 749, + 750, 751, 752, 753, 754, 755, 756, 671, + 683, 671, 669, 670, 419, 670, 670, 670, + 670, 670, 670, 670, 419, 670, 419, 670, + 670, 670, 670, 670, 670, 669, 419, 670, + 670, 419, 670, 419, 669, 670, 419, 670, + 670, 670, 419, 670, 670, 419, 670, 670, + 419, 670, 670, 419, 670, 670, 669, 419, + 674, 757, 674, 733, 758, 759, 760, 671, + 683, 671, 669, 670, 669, 419, 670, 670, + 670, 419, 670, 670, 670, 419, 670, 419, + 670, 669, 419, 419, 419, 419, 670, 670, + 419, 419, 419, 419, 419, 670, 670, 670, + 670, 670, 670, 670, 419, 670, 419, 670, + 419, 669, 670, 670, 670, 419, 670, 419, + 670, 669, 683, 425, 761, 674, 683, 425, + 670, 669, 419, 762, 674, 763, 683, 425, + 670, 669, 419, 670, 419, 764, 683, 671, + 669, 425, 670, 669, 419, 674, 765, 671, + 683, 671, 669, 670, 669, 419, 766, 766, + 766, 768, 769, 770, 766, 767, 767, 771, + 768, 771, 769, 771, 767, 772, 773, 772, + 775, 774, 776, 774, 777, 774, 779, 778, + 781, 782, 780, 781, 783, 780, 785, 784, + 786, 784, 787, 784, 789, 788, 791, 792, + 790, 791, 793, 790, 795, 795, 795, 795, + 794, 795, 795, 795, 794, 795, 794, 795, + 795, 794, 794, 794, 794, 794, 794, 795, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 794, 794, 795, 794, 794, 795, 794, + 795, 794, 794, 795, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 795, + 795, 795, 794, 797, 798, 799, 800, 801, + 802, 803, 804, 805, 806, 807, 808, 809, + 810, 811, 812, 813, 814, 815, 816, 817, + 818, 819, 820, 821, 822, 823, 824, 825, + 826, 827, 828, 794, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 794, 794, + 794, 794, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 794, 794, 794, 794, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 794, 794, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 794, + 794, 794, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 795, 794, 795, 795, + 795, 795, 795, 795, 795, 794, 795, 794, + 795, 795, 794, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 795, + 794, 829, 830, 831, 832, 833, 834, 835, + 836, 837, 838, 839, 840, 841, 842, 843, + 844, 845, 846, 847, 848, 849, 850, 851, + 852, 853, 854, 855, 856, 857, 858, 859, + 860, 861, 862, 863, 864, 801, 865, 866, + 867, 868, 869, 870, 801, 846, 801, 794, + 795, 794, 795, 795, 794, 794, 795, 794, + 794, 794, 794, 795, 794, 794, 794, 794, + 794, 795, 794, 794, 794, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 794, 795, 795, 795, 794, 794, + 794, 795, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 794, 794, 794, + 794, 794, 795, 795, 795, 795, 794, 794, + 795, 795, 795, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 794, 795, 795, 794, + 794, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 795, 795, 795, 794, 794, + 794, 794, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 794, 795, 795, 795, 794, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 871, 872, 873, 874, 875, 876, + 877, 878, 879, 801, 880, 881, 882, 883, + 884, 794, 795, 794, 794, 794, 794, 794, + 795, 795, 794, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 795, 795, 794, 794, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 795, 795, 795, 794, 794, 794, 794, + 795, 795, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 794, 885, 840, 886, + 887, 888, 801, 889, 890, 846, 801, 794, + 795, 795, 795, 795, 794, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 794, + 795, 795, 794, 851, 794, 846, 801, 801, + 891, 794, 801, 794, 795, 846, 892, 893, + 846, 894, 895, 846, 896, 897, 898, 899, + 900, 901, 846, 902, 903, 904, 846, 905, + 906, 907, 865, 908, 909, 910, 865, 911, + 846, 801, 794, 794, 795, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 794, 794, 794, 794, 795, 795, 794, 794, + 795, 795, 794, 794, 794, 794, 794, 794, + 795, 795, 795, 794, 794, 794, 795, 794, + 794, 794, 795, 795, 794, 795, 795, 795, + 795, 794, 795, 795, 795, 795, 794, 795, + 795, 795, 795, 795, 795, 794, 794, 794, + 795, 795, 795, 795, 794, 912, 913, 794, + 801, 794, 795, 794, 794, 795, 846, 914, + 915, 916, 917, 896, 918, 919, 920, 921, + 922, 923, 924, 925, 926, 927, 928, 929, + 801, 794, 794, 795, 794, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 794, 795, 794, 794, 795, 794, 795, 794, + 794, 795, 795, 795, 795, 794, 795, 795, + 795, 794, 794, 795, 795, 795, 795, 794, + 795, 795, 794, 794, 795, 795, 795, 795, + 795, 794, 930, 931, 932, 933, 934, 935, + 936, 937, 938, 939, 940, 936, 942, 943, + 944, 945, 941, 794, 946, 947, 846, 948, + 949, 950, 951, 952, 953, 954, 955, 956, + 846, 801, 957, 958, 959, 960, 846, 961, + 962, 963, 964, 965, 966, 967, 968, 969, + 970, 971, 972, 973, 974, 975, 846, 877, + 801, 976, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 795, 795, 794, + 795, 794, 795, 795, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 795, 795, + 794, 794, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 795, 795, 794, 794, 795, + 794, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 794, 795, 795, 794, 795, 795, 794, 795, + 795, 794, 795, 795, 795, 795, 795, 795, + 795, 794, 795, 794, 795, 794, 795, 795, + 794, 795, 794, 795, 795, 794, 795, 794, + 795, 794, 977, 948, 978, 979, 980, 981, + 982, 983, 984, 985, 986, 829, 987, 846, + 988, 989, 990, 846, 991, 861, 992, 993, + 994, 995, 996, 997, 998, 999, 846, 794, + 794, 794, 795, 795, 795, 794, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 794, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 795, 794, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 794, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 794, 794, 794, 794, 795, + 795, 795, 794, 794, 794, 795, 794, 794, + 794, 795, 795, 794, 795, 795, 795, 794, + 795, 794, 794, 794, 795, 795, 794, 795, + 795, 795, 794, 795, 795, 795, 794, 794, + 794, 794, 795, 846, 915, 1000, 1001, 801, + 846, 801, 794, 794, 795, 794, 795, 846, + 1000, 801, 794, 846, 1002, 801, 794, 794, + 795, 846, 1003, 1004, 1005, 906, 1006, 1007, + 846, 1008, 1009, 1010, 801, 794, 794, 795, + 795, 795, 794, 795, 795, 794, 795, 795, + 795, 795, 794, 794, 795, 794, 794, 795, + 795, 794, 795, 794, 846, 801, 794, 1011, + 846, 1012, 794, 801, 794, 795, 794, 795, + 1013, 846, 1014, 1015, 794, 795, 794, 794, + 794, 795, 795, 795, 795, 794, 1016, 1017, + 1018, 846, 1019, 1020, 1021, 1022, 1023, 1024, + 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032, + 801, 794, 795, 795, 795, 794, 794, 794, + 794, 795, 795, 794, 794, 795, 794, 794, + 794, 794, 794, 794, 794, 795, 794, 795, + 794, 794, 794, 794, 794, 794, 795, 795, + 795, 795, 795, 794, 794, 795, 794, 794, + 794, 795, 794, 794, 795, 794, 794, 795, + 794, 794, 795, 794, 794, 794, 795, 795, + 795, 794, 794, 794, 795, 795, 795, 795, + 794, 1033, 846, 1034, 846, 1035, 1036, 1037, + 1038, 801, 794, 795, 795, 795, 795, 795, + 794, 794, 794, 795, 794, 794, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 794, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 794, 795, 795, 795, + 795, 795, 794, 1039, 846, 801, 794, 795, + 1040, 846, 831, 801, 794, 795, 1041, 794, + 801, 794, 795, 846, 1042, 801, 794, 794, + 795, 1043, 794, 846, 1044, 801, 794, 794, + 795, 1046, 1045, 795, 795, 795, 795, 1046, + 1045, 795, 1046, 1045, 1046, 1046, 795, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1045, 795, 795, 1046, + 1046, 795, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 1046, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 1046, 1046, 795, 1046, + 795, 1046, 1045, 795, 795, 1046, 795, 1046, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 1046, 795, 1046, + 795, 1046, 1045, 795, 1046, 1046, 1046, 1046, + 795, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1046, 1046, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1046, 1045, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 1045, 1046, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1046, + 795, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1046, 1046, + 1046, 1046, 795, 795, 1046, 1046, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1046, 795, 1046, 795, 1045, + 1046, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 1045, 1046, 795, 1046, 795, 1046, 1045, + 795, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1045, 1046, 1045, 795, 1046, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 795, 1046, 1045, 795, 1046, + 795, 1045, 1046, 1045, 795, 795, 1046, 795, + 1045, 1046, 1045, 795, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 1045, 1046, 795, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 795, 1046, 1045, 1045, 795, 1045, 795, + 1046, 1046, 795, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1045, 795, 1046, 1046, 1046, 795, + 1045, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 795, 1046, 795, 795, 795, 1046, 795, 1046, + 795, 1046, 795, 1046, 795, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1045, 1046, 795, 1046, 1046, 1046, 1045, + 795, 1046, 795, 795, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1046, 1046, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1046, 795, 1046, + 1045, 1046, 795, 1046, 795, 1046, 795, 1046, + 795, 1046, 1045, 1046, 1046, 795, 1046, 1045, + 795, 1046, 795, 1046, 795, 1045, 1046, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 1046, + 1046, 1046, 1045, 795, 795, 795, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1045, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1045, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 1045, 795, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 1046, 1046, 795, 1046, 795, + 1045, 795, 1046, 1045, 1046, 795, 795, 1046, + 795, 1046, 795, 1045, 1046, 1046, 1046, 1046, + 1045, 795, 1046, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 1045, 1046, 1046, 1046, 795, + 1046, 795, 1046, 795, 1046, 795, 1045, 1046, + 1046, 795, 795, 1046, 1045, 1046, 795, 1046, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 795, 1046, 1046, 1046, 1046, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 795, 1045, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1045, 1046, 795, 1046, 1046, 1045, + 795, 1046, 1046, 795, 1045, 1046, 1045, 795, + 1046, 795, 1046, 1045, 1046, 795, 1046, 795, + 1045, 1046, 1045, 795, 1046, 795, 1046, 795, + 1046, 795, 1046, 795, 1046, 1045, 1047, 1045, + 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, + 1056, 1057, 1058, 1050, 1059, 1060, 1061, 1062, + 1063, 1050, 1064, 1065, 1066, 1067, 1068, 1069, + 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1050, 1079, 1047, 1059, 1047, 1080, 1047, + 1045, 1046, 1046, 1046, 1046, 795, 1045, 1046, + 1046, 1045, 795, 1046, 1045, 795, 795, 1046, + 1045, 795, 1046, 795, 1045, 1046, 1045, 795, + 795, 1046, 795, 1045, 1046, 1046, 1045, 795, + 1046, 1046, 1046, 1045, 795, 1046, 795, 1046, + 1046, 1045, 795, 795, 1046, 795, 1045, 1046, + 1045, 795, 1046, 1045, 795, 795, 1046, 795, + 1046, 1045, 795, 1046, 795, 795, 1046, 795, + 1046, 795, 1045, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 1045, 795, 1046, 795, 1046, + 1045, 795, 1046, 795, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1045, 1046, 795, 1046, 1045, + 795, 1046, 1045, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 1045, 795, 1046, 1045, 1081, 1082, 1083, + 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, + 1092, 1052, 1093, 1094, 1095, 1096, 1097, 1094, + 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, + 1106, 1047, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 1046, 795, 1046, 1045, 1046, + 795, 1046, 1045, 795, 1046, 795, 1046, 1045, + 1046, 795, 1046, 1045, 1046, 795, 795, 795, + 1046, 1045, 1046, 795, 1046, 1045, 1046, 1046, + 1046, 1046, 795, 1046, 795, 1045, 1046, 1045, + 795, 795, 1046, 795, 1046, 1045, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 1046, 795, + 1046, 1045, 795, 1046, 1045, 1046, 795, 1046, + 1045, 795, 1046, 1045, 795, 1046, 1045, 795, + 1046, 1045, 1046, 1045, 795, 795, 1046, 1045, + 1046, 795, 1046, 1045, 795, 1046, 795, 1045, + 1046, 1045, 795, 1050, 1107, 1047, 1050, 1108, + 1050, 1109, 1059, 1047, 1045, 1046, 1045, 795, + 1046, 1045, 795, 1050, 1108, 1059, 1047, 1045, + 1050, 1110, 1047, 1059, 1047, 1045, 1046, 1045, + 795, 1050, 1111, 1068, 1112, 1094, 1113, 1106, + 1050, 1114, 1115, 1116, 1047, 1059, 1047, 1045, + 1046, 1045, 795, 1046, 795, 1046, 1045, 795, + 1046, 795, 1046, 795, 1045, 1046, 1046, 1045, + 795, 1046, 795, 1046, 1045, 795, 1046, 1045, + 1050, 1059, 801, 1045, 1117, 1050, 1118, 1059, + 1047, 1045, 801, 1046, 1045, 795, 1046, 1045, + 795, 1119, 1050, 1120, 1121, 1047, 1045, 795, + 1046, 1045, 1046, 1046, 1045, 795, 795, 1046, + 795, 1046, 1045, 1050, 1122, 1123, 1124, 1125, + 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1047, + 1059, 1047, 1045, 1046, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 1046, 1046, 1046, 1046, 1046, 1045, 795, 1046, + 1046, 795, 1046, 795, 1045, 1046, 795, 1046, + 1046, 1046, 795, 1046, 1046, 795, 1046, 1046, + 795, 1046, 1046, 795, 1046, 1046, 1045, 795, + 1050, 1133, 1050, 1109, 1134, 1135, 1136, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1046, 1046, + 1046, 795, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 795, 795, 795, 795, 1046, 1046, + 795, 795, 795, 795, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 795, 1046, 795, 1046, + 795, 1045, 1046, 1046, 1046, 795, 1046, 795, + 1046, 1045, 1059, 801, 1137, 1050, 1059, 801, + 1046, 1045, 795, 1138, 1050, 1139, 1059, 801, + 1046, 1045, 795, 1046, 795, 1140, 1059, 1047, + 1045, 801, 1046, 1045, 795, 1050, 1141, 1047, + 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143, + 1144, 1142, 1145, 1146, 1147, 1149, 1150, 1151, + 1152, 1153, 1154, 670, 670, 419, 1155, 1156, + 1157, 1158, 670, 1161, 1162, 1164, 1165, 1166, + 1160, 1167, 1168, 1169, 1170, 1171, 1172, 1173, + 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, + 1182, 1183, 1184, 1185, 1186, 1188, 1189, 1190, + 1191, 1192, 1193, 670, 1148, 7, 1148, 419, + 1148, 419, 1160, 1163, 1187, 1194, 1159, 1142, + 1142, 1195, 1143, 1196, 1198, 1197, 4, 1147, + 1200, 1197, 1201, 1197, 2, 1147, 1197, 6, + 8, 8, 7, 1202, 1203, 1204, 1197, 1205, + 1206, 1197, 1207, 1197, 419, 419, 1209, 1210, + 489, 470, 1211, 470, 1212, 1213, 1214, 1215, + 1216, 1217, 1218, 1219, 1220, 1221, 1222, 544, + 1223, 520, 1224, 1225, 1226, 1227, 1228, 1229, + 1230, 1231, 1232, 1233, 1234, 1235, 419, 419, + 419, 425, 565, 1208, 1236, 1197, 1237, 1197, + 670, 1238, 419, 419, 419, 670, 1238, 670, + 670, 419, 1238, 419, 1238, 419, 1238, 419, + 670, 670, 670, 670, 670, 1238, 419, 670, + 670, 670, 419, 670, 419, 1238, 419, 670, + 670, 670, 670, 419, 1238, 670, 419, 670, + 419, 670, 419, 670, 670, 419, 670, 1238, + 419, 670, 419, 670, 419, 670, 1238, 670, + 419, 1238, 670, 419, 670, 419, 1238, 670, + 670, 670, 670, 670, 1238, 419, 419, 670, + 419, 670, 1238, 670, 419, 1238, 670, 670, + 1238, 419, 419, 670, 419, 670, 419, 670, + 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245, + 1246, 1247, 1248, 1249, 715, 1250, 1251, 1252, + 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260, + 1261, 1260, 1262, 1263, 1264, 1265, 1266, 671, + 1238, 1267, 1268, 1269, 1270, 1271, 1272, 1273, + 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281, + 1282, 1283, 1284, 1285, 725, 1286, 1287, 1288, + 692, 1289, 1290, 1291, 1292, 1293, 1294, 671, + 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302, + 674, 1303, 671, 674, 1304, 1305, 1306, 1307, + 683, 1238, 1308, 1309, 1310, 1311, 703, 1312, + 1313, 683, 1314, 1315, 1316, 1317, 1318, 671, + 1238, 1319, 1278, 1320, 1321, 1322, 683, 1323, + 1324, 674, 671, 683, 425, 1238, 1288, 671, + 674, 683, 425, 683, 425, 1325, 683, 1238, + 425, 674, 1326, 1327, 674, 1328, 1329, 681, + 1330, 1331, 1332, 1333, 1334, 1284, 1335, 1336, + 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344, + 1345, 1346, 1303, 1347, 674, 683, 425, 1238, + 1348, 1349, 683, 671, 1238, 425, 671, 1238, + 674, 1350, 731, 1351, 1352, 1353, 1354, 1355, + 1356, 1357, 1358, 671, 1359, 1360, 1361, 1362, + 1363, 1364, 671, 683, 1238, 1366, 1367, 1368, + 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376, + 1372, 1378, 1379, 1380, 1381, 1365, 1377, 1365, + 1238, 1365, 1238, 1382, 1382, 1383, 1384, 1385, + 1386, 1387, 1388, 1389, 1390, 1387, 767, 1391, + 1391, 1391, 1392, 1391, 1391, 768, 769, 770, + 1391, 767, 1382, 1382, 1393, 1396, 1397, 1395, + 1398, 1399, 1398, 1400, 1391, 1402, 1401, 1396, + 1403, 1395, 1405, 1404, 1394, 1394, 1394, 768, + 769, 770, 1394, 767, 767, 1406, 773, 1406, + 1407, 1406, 775, 1408, 1409, 1410, 1411, 1412, + 1413, 1414, 1411, 776, 775, 1408, 1415, 1415, + 777, 779, 1416, 1415, 776, 1418, 1419, 1417, + 1418, 1419, 1420, 1417, 775, 1408, 1421, 1415, + 775, 1408, 1415, 1423, 1422, 1425, 1424, 776, + 1426, 777, 1426, 779, 1426, 785, 1427, 1428, + 1429, 1430, 1431, 1432, 1433, 1430, 786, 785, + 1427, 1434, 1434, 787, 789, 1435, 1434, 786, + 1437, 1438, 1436, 1437, 1438, 1439, 1436, 785, + 1427, 1440, 1434, 785, 1427, 1434, 1442, 1441, + 1444, 1443, 786, 1445, 787, 1445, 789, 1445, + 795, 1448, 1449, 1451, 1452, 1453, 1447, 1454, + 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, + 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470, + 1471, 1472, 1473, 1475, 1476, 1477, 1478, 1479, + 1480, 795, 795, 1446, 1447, 1450, 1474, 1481, + 1446, 1046, 795, 795, 1483, 1484, 865, 846, + 1485, 846, 1486, 1487, 1488, 1489, 1490, 1491, + 1492, 1493, 1494, 1495, 1496, 920, 1497, 896, + 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, + 1506, 1507, 1508, 1509, 795, 795, 795, 801, + 941, 1482, 1046, 1510, 795, 795, 795, 1046, + 1510, 1046, 1046, 795, 1510, 795, 1510, 795, + 1510, 795, 1046, 1046, 1046, 1046, 1046, 1510, + 795, 1046, 1046, 1046, 795, 1046, 795, 1510, + 795, 1046, 1046, 1046, 1046, 795, 1510, 1046, + 795, 1046, 795, 1046, 795, 1046, 1046, 795, + 1046, 1510, 795, 1046, 795, 1046, 795, 1046, + 1510, 1046, 795, 1510, 1046, 795, 1046, 795, + 1510, 1046, 1046, 1046, 1046, 1046, 1510, 795, + 795, 1046, 795, 1046, 1510, 1046, 795, 1510, + 1046, 1046, 1510, 795, 795, 1046, 795, 1046, + 795, 1046, 1510, 1511, 1512, 1513, 1514, 1515, + 1516, 1517, 1518, 1519, 1520, 1521, 1091, 1522, + 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530, + 1531, 1532, 1533, 1532, 1534, 1535, 1536, 1537, + 1538, 1047, 1510, 1539, 1540, 1541, 1542, 1543, + 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551, + 1552, 1553, 1554, 1555, 1556, 1557, 1101, 1558, + 1559, 1560, 1068, 1561, 1562, 1563, 1564, 1565, + 1566, 1047, 1567, 1568, 1569, 1570, 1571, 1572, + 1573, 1574, 1050, 1575, 1047, 1050, 1576, 1577, + 1578, 1579, 1059, 1510, 1580, 1581, 1582, 1583, + 1079, 1584, 1585, 1059, 1586, 1587, 1588, 1589, + 1590, 1047, 1510, 1591, 1550, 1592, 1593, 1594, + 1059, 1595, 1596, 1050, 1047, 1059, 801, 1510, + 1560, 1047, 1050, 1059, 801, 1059, 801, 1597, + 1059, 1510, 801, 1050, 1598, 1599, 1050, 1600, + 1601, 1057, 1602, 1603, 1604, 1605, 1606, 1556, + 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614, + 1615, 1616, 1617, 1618, 1575, 1619, 1050, 1059, + 801, 1510, 1620, 1621, 1059, 1047, 1510, 801, + 1047, 1510, 1050, 1622, 1107, 1623, 1624, 1625, + 1626, 1627, 1628, 1629, 1630, 1047, 1631, 1632, + 1633, 1634, 1635, 1636, 1047, 1059, 1510, 1638, + 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, + 1647, 1648, 1644, 1650, 1651, 1652, 1653, 1637, + 1649, 1637, 1510, 1637, 1510, +} + +var _hcltok_trans_targs []int16 = []int16{ + 1459, 1459, 2, 3, 1459, 1459, 4, 1467, + 5, 6, 8, 9, 286, 12, 13, 14, + 15, 16, 287, 288, 19, 289, 21, 22, + 290, 291, 292, 293, 294, 295, 296, 297, + 298, 299, 328, 348, 353, 127, 128, 129, + 356, 151, 371, 375, 1459, 10, 11, 17, + 18, 20, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 64, 105, 120, 131, + 154, 170, 283, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 65, 66, 67, 68, 69, 70, + 71, 72, 73, 74, 75, 76, 77, 78, + 79, 80, 81, 82, 83, 84, 85, 86, + 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 106, 107, 108, 109, 110, 111, + 112, 113, 114, 115, 116, 117, 118, 119, + 121, 122, 123, 124, 125, 126, 130, 132, + 133, 134, 135, 136, 137, 138, 139, 140, + 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 152, 153, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, + 167, 168, 169, 171, 203, 227, 230, 231, + 233, 242, 243, 246, 250, 268, 275, 277, + 279, 281, 172, 173, 174, 175, 176, 177, + 178, 179, 180, 181, 182, 183, 184, 185, + 186, 187, 188, 189, 190, 191, 192, 193, + 194, 195, 196, 197, 198, 199, 200, 201, + 202, 204, 205, 206, 207, 208, 209, 210, + 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, + 228, 229, 232, 234, 235, 236, 237, 238, + 239, 240, 241, 244, 245, 247, 248, 249, + 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, + 267, 269, 270, 271, 272, 273, 274, 276, + 278, 280, 282, 284, 285, 300, 301, 302, + 303, 304, 305, 306, 307, 308, 309, 310, + 311, 312, 313, 314, 315, 316, 317, 318, + 319, 320, 321, 322, 323, 324, 325, 326, + 327, 329, 330, 331, 332, 333, 334, 335, + 336, 337, 338, 339, 340, 341, 342, 343, + 344, 345, 346, 347, 349, 350, 351, 352, + 354, 355, 357, 358, 359, 360, 361, 362, + 363, 364, 365, 366, 367, 368, 369, 370, + 372, 373, 374, 376, 382, 404, 409, 411, + 413, 377, 378, 379, 380, 381, 383, 384, + 385, 386, 387, 388, 389, 390, 391, 392, + 393, 394, 395, 396, 397, 398, 399, 400, + 401, 402, 403, 405, 406, 407, 408, 410, + 412, 414, 1459, 1471, 1459, 437, 438, 439, + 440, 417, 441, 442, 443, 444, 445, 446, + 447, 448, 449, 450, 451, 452, 453, 454, + 455, 456, 457, 458, 459, 460, 461, 462, + 463, 464, 465, 466, 467, 469, 470, 471, + 472, 473, 474, 475, 476, 477, 478, 479, + 480, 481, 482, 483, 484, 485, 419, 486, + 487, 488, 489, 490, 491, 492, 493, 494, + 495, 496, 497, 498, 499, 500, 501, 502, + 503, 418, 504, 505, 506, 507, 508, 510, + 511, 512, 513, 514, 515, 516, 517, 518, + 519, 520, 521, 522, 523, 525, 526, 527, + 528, 529, 530, 534, 536, 537, 538, 539, + 434, 540, 541, 542, 543, 544, 545, 546, + 547, 548, 549, 550, 551, 552, 553, 554, + 556, 557, 559, 560, 561, 562, 563, 564, + 432, 565, 566, 567, 568, 569, 570, 571, + 572, 573, 575, 607, 631, 634, 635, 637, + 646, 647, 650, 654, 672, 532, 679, 681, + 683, 685, 576, 577, 578, 579, 580, 581, + 582, 583, 584, 585, 586, 587, 588, 589, + 590, 591, 592, 593, 594, 595, 596, 597, + 598, 599, 600, 601, 602, 603, 604, 605, + 606, 608, 609, 610, 611, 612, 613, 614, + 615, 616, 617, 618, 619, 620, 621, 622, + 623, 624, 625, 626, 627, 628, 629, 630, + 632, 633, 636, 638, 639, 640, 641, 642, + 643, 644, 645, 648, 649, 651, 652, 653, + 655, 656, 657, 658, 659, 660, 661, 662, + 663, 664, 665, 666, 667, 668, 669, 670, + 671, 673, 674, 675, 676, 677, 678, 680, + 682, 684, 686, 688, 689, 1459, 1459, 690, + 827, 828, 759, 829, 830, 831, 832, 833, + 834, 788, 835, 724, 836, 837, 838, 839, + 840, 841, 842, 843, 744, 844, 845, 846, + 847, 848, 849, 850, 851, 852, 853, 769, + 854, 856, 857, 858, 859, 860, 861, 862, + 863, 864, 865, 702, 866, 867, 868, 869, + 870, 871, 872, 873, 874, 740, 875, 876, + 877, 878, 879, 810, 881, 882, 885, 887, + 888, 889, 890, 891, 892, 895, 896, 898, + 899, 900, 902, 903, 904, 905, 906, 907, + 908, 909, 910, 911, 912, 914, 915, 916, + 917, 920, 922, 923, 925, 927, 1509, 1510, + 929, 930, 931, 1509, 1509, 932, 1523, 1523, + 1524, 935, 1523, 936, 1525, 1526, 1529, 1530, + 1534, 1534, 1535, 941, 1534, 942, 1536, 1537, + 1540, 1541, 1545, 1546, 1545, 968, 969, 970, + 971, 948, 972, 973, 974, 975, 976, 977, + 978, 979, 980, 981, 982, 983, 984, 985, + 986, 987, 988, 989, 990, 991, 992, 993, + 994, 995, 996, 997, 998, 1000, 1001, 1002, + 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, + 1011, 1012, 1013, 1014, 1015, 1016, 950, 1017, + 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, + 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033, + 1034, 949, 1035, 1036, 1037, 1038, 1039, 1041, + 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, + 1050, 1051, 1052, 1053, 1054, 1056, 1057, 1058, + 1059, 1060, 1061, 1065, 1067, 1068, 1069, 1070, + 965, 1071, 1072, 1073, 1074, 1075, 1076, 1077, + 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, + 1087, 1088, 1090, 1091, 1092, 1093, 1094, 1095, + 963, 1096, 1097, 1098, 1099, 1100, 1101, 1102, + 1103, 1104, 1106, 1138, 1162, 1165, 1166, 1168, + 1177, 1178, 1181, 1185, 1203, 1063, 1210, 1212, + 1214, 1216, 1107, 1108, 1109, 1110, 1111, 1112, + 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, + 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128, + 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136, + 1137, 1139, 1140, 1141, 1142, 1143, 1144, 1145, + 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153, + 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161, + 1163, 1164, 1167, 1169, 1170, 1171, 1172, 1173, + 1174, 1175, 1176, 1179, 1180, 1182, 1183, 1184, + 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, + 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, + 1202, 1204, 1205, 1206, 1207, 1208, 1209, 1211, + 1213, 1215, 1217, 1219, 1220, 1545, 1545, 1221, + 1358, 1359, 1290, 1360, 1361, 1362, 1363, 1364, + 1365, 1319, 1366, 1255, 1367, 1368, 1369, 1370, + 1371, 1372, 1373, 1374, 1275, 1375, 1376, 1377, + 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1300, + 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393, + 1394, 1395, 1396, 1233, 1397, 1398, 1399, 1400, + 1401, 1402, 1403, 1404, 1405, 1271, 1406, 1407, + 1408, 1409, 1410, 1341, 1412, 1413, 1416, 1418, + 1419, 1420, 1421, 1422, 1423, 1426, 1427, 1429, + 1430, 1431, 1433, 1434, 1435, 1436, 1437, 1438, + 1439, 1440, 1441, 1442, 1443, 1445, 1446, 1447, + 1448, 1451, 1453, 1454, 1456, 1458, 1460, 1459, + 1461, 1462, 1459, 1463, 1459, 1464, 1465, 1466, + 1468, 1469, 1470, 1459, 1472, 1459, 1473, 1459, + 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481, + 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489, + 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497, + 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505, + 1506, 1507, 1508, 1459, 1459, 1459, 1459, 1459, + 1459, 1, 1459, 7, 1459, 1459, 1459, 1459, + 1459, 415, 416, 420, 421, 422, 423, 424, + 425, 426, 427, 428, 429, 430, 431, 433, + 435, 436, 468, 509, 524, 531, 533, 535, + 555, 558, 574, 687, 1459, 1459, 1459, 691, + 692, 693, 694, 695, 696, 697, 698, 699, + 700, 701, 703, 704, 705, 706, 707, 708, + 709, 710, 711, 712, 713, 714, 715, 716, + 717, 718, 719, 720, 721, 722, 723, 725, + 726, 727, 728, 729, 730, 731, 732, 733, + 734, 735, 736, 737, 738, 739, 741, 742, + 743, 745, 746, 747, 748, 749, 750, 751, + 752, 753, 754, 755, 756, 757, 758, 760, + 761, 762, 763, 764, 765, 766, 767, 768, + 770, 771, 772, 773, 774, 775, 776, 777, + 778, 779, 780, 781, 782, 783, 784, 785, + 786, 787, 789, 790, 791, 792, 793, 794, + 795, 796, 797, 798, 799, 800, 801, 802, + 803, 804, 805, 806, 807, 808, 809, 811, + 812, 813, 814, 815, 816, 817, 818, 819, + 820, 821, 822, 823, 824, 825, 826, 855, + 880, 883, 884, 886, 893, 894, 897, 901, + 913, 918, 919, 921, 924, 926, 1511, 1509, + 1512, 1517, 1519, 1509, 1520, 1521, 1522, 1509, + 928, 1509, 1509, 1513, 1514, 1516, 1509, 1515, + 1509, 1509, 1509, 1518, 1509, 1509, 1509, 933, + 934, 938, 939, 1523, 1531, 1532, 1533, 1523, + 937, 1523, 1523, 934, 1527, 1528, 1523, 1523, + 1523, 1523, 1523, 940, 944, 945, 1534, 1542, + 1543, 1544, 1534, 943, 1534, 1534, 940, 1538, + 1539, 1534, 1534, 1534, 1534, 1534, 1545, 1547, + 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555, + 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563, + 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, + 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579, + 1580, 1581, 1545, 946, 947, 951, 952, 953, + 954, 955, 956, 957, 958, 959, 960, 961, + 962, 964, 966, 967, 999, 1040, 1055, 1062, + 1064, 1066, 1086, 1089, 1105, 1218, 1545, 1222, + 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230, + 1231, 1232, 1234, 1235, 1236, 1237, 1238, 1239, + 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, + 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1256, + 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264, + 1265, 1266, 1267, 1268, 1269, 1270, 1272, 1273, + 1274, 1276, 1277, 1278, 1279, 1280, 1281, 1282, + 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1291, + 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299, + 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308, + 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, + 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325, + 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, + 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1342, + 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350, + 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1386, + 1411, 1414, 1415, 1417, 1424, 1425, 1428, 1432, + 1444, 1449, 1450, 1452, 1455, 1457, +} + +var _hcltok_trans_actions []byte = []byte{ + 145, 107, 0, 0, 91, 141, 0, 7, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 121, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 143, 193, 149, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 147, 125, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 31, 169, + 0, 0, 0, 35, 33, 0, 55, 41, + 175, 0, 53, 0, 175, 175, 0, 0, + 75, 61, 181, 0, 73, 0, 181, 181, + 0, 0, 85, 187, 89, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 87, 79, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 93, + 0, 0, 119, 0, 111, 0, 7, 7, + 7, 0, 0, 113, 0, 115, 0, 123, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 7, 7, + 7, 196, 196, 196, 196, 196, 196, 7, + 7, 196, 7, 127, 139, 135, 97, 133, + 103, 0, 129, 0, 101, 95, 109, 99, + 131, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 105, 117, 137, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 13, + 0, 0, 172, 17, 0, 7, 7, 23, + 0, 25, 27, 0, 0, 0, 151, 0, + 15, 19, 9, 0, 21, 11, 29, 0, + 0, 0, 0, 43, 0, 178, 178, 49, + 0, 157, 154, 1, 175, 175, 45, 37, + 47, 39, 51, 0, 0, 0, 63, 0, + 184, 184, 69, 0, 163, 160, 1, 181, + 181, 65, 57, 67, 59, 71, 77, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 7, 7, 7, + 190, 190, 190, 190, 190, 190, 7, 7, + 190, 7, 81, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 83, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_to_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 3, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 166, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 166, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 3, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_from_state_actions []byte = []byte{ + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 5, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 5, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 5, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, +} + +var _hcltok_eof_trans []int16 = []int16{ + 0, 1, 1, 1, 6, 6, 6, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 419, + 419, 421, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 419, 419, 419, 419, 419, 419, + 419, 419, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 670, 670, 670, 670, 670, 670, 670, 670, + 767, 772, 772, 772, 773, 773, 775, 775, + 775, 779, 0, 0, 785, 785, 785, 789, + 0, 0, 795, 795, 797, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 795, 795, 795, + 795, 795, 795, 795, 795, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, + 1046, 1046, 1046, 0, 1196, 1197, 1198, 1200, + 1198, 1198, 1198, 1203, 1198, 1198, 1198, 1209, + 1198, 1198, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, + 1239, 1239, 1239, 1239, 1239, 0, 1392, 1394, + 1395, 1399, 1399, 1392, 1402, 1395, 1405, 1395, + 1407, 1407, 1407, 0, 1416, 1418, 1418, 1416, + 1416, 1423, 1425, 1427, 1427, 1427, 0, 1435, + 1437, 1437, 1435, 1435, 1442, 1444, 1446, 1446, + 1446, 0, 1483, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511, + 1511, 1511, 1511, 1511, 1511, 1511, +} + +const hcltok_start int = 1459 +const hcltok_first_final int = 1459 +const hcltok_error int = 0 + +const hcltok_en_stringTemplate int = 1509 +const hcltok_en_heredocTemplate int = 1523 +const hcltok_en_bareTemplate int = 1534 +const hcltok_en_identOnly int = 1545 +const hcltok_en_main int = 1459 + +//line scan_tokens.rl:16 + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + stripData := stripUTF8BOM(data) + start.Byte += len(data) - len(stripData) + data = stripData + + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + StartByte: start.Byte, + } + +//line scan_tokens.rl:305 + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + +//line scan_tokens.rl:340 + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func(ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func() { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + +//line scan_tokens.go:4289 + { + top = 0 + ts = 0 + te = 0 + act = 0 + } + +//line scan_tokens.go:4297 + { + var _klen int + var _trans int + var _acts int + var _nacts uint + var _keys int + if p == pe { + goto _test_eof + } + if cs == 0 { + goto _out + } + _resume: + _acts = int(_hcltok_from_state_actions[cs]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 3: +//line NONE:1 + ts = p + +//line scan_tokens.go:4320 + } + } + + _keys = int(_hcltok_key_offsets[cs]) + _trans = int(_hcltok_index_offsets[cs]) + + _klen = int(_hcltok_single_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + _klen - 1) + for { + if _upper < _lower { + break + } + + _mid = _lower + ((_upper - _lower) >> 1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 1 + case data[p] > _hcltok_trans_keys[_mid]: + _lower = _mid + 1 + default: + _trans += int(_mid - int(_keys)) + goto _match + } + } + _keys += _klen + _trans += _klen + } + + _klen = int(_hcltok_range_lengths[cs]) + if _klen > 0 { + _lower := int(_keys) + var _mid int + _upper := int(_keys + (_klen << 1) - 2) + for { + if _upper < _lower { + break + } + + _mid = _lower + (((_upper - _lower) >> 1) & ^1) + switch { + case data[p] < _hcltok_trans_keys[_mid]: + _upper = _mid - 2 + case data[p] > _hcltok_trans_keys[_mid+1]: + _lower = _mid + 2 + default: + _trans += int((_mid - int(_keys)) >> 1) + goto _match + } + } + _trans += _klen + } + + _match: + _trans = int(_hcltok_indicies[_trans]) + _eof_trans: + cs = int(_hcltok_trans_targs[_trans]) + + if _hcltok_trans_actions[_trans] == 0 { + goto _again + } + + _acts = int(_hcltok_trans_actions[_trans]) + _nacts = uint(_hcltok_actions[_acts]) + _acts++ + for ; _nacts > 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 0: +//line scan_tokens.rl:224 + p-- + + case 4: +//line NONE:1 + te = p + 1 + + case 5: +//line scan_tokens.rl:248 + act = 4 + case 6: +//line scan_tokens.rl:250 + act = 6 + case 7: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 8: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 9: +//line scan_tokens.rl:84 + te = p + 1 + { + token(TokenCQuote) + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + case 10: +//line scan_tokens.rl:248 + te = p + 1 + { + token(TokenQuotedLit) + } + case 11: +//line scan_tokens.rl:251 + te = p + 1 + { + token(TokenBadUTF8) + } + case 12: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 13: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 14: +//line scan_tokens.rl:248 + te = p + p-- + { + token(TokenQuotedLit) + } + case 15: +//line scan_tokens.rl:249 + te = p + p-- + { + token(TokenQuotedNewline) + } + case 16: +//line scan_tokens.rl:250 + te = p + p-- + { + token(TokenInvalid) + } + case 17: +//line scan_tokens.rl:251 + te = p + p-- + { + token(TokenBadUTF8) + } + case 18: +//line scan_tokens.rl:248 + p = (te) - 1 + { + token(TokenQuotedLit) + } + case 19: +//line scan_tokens.rl:251 + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 20: +//line NONE:1 + switch act { + case 4: + { + p = (te) - 1 + token(TokenQuotedLit) + } + case 6: + { + p = (te) - 1 + token(TokenInvalid) + } + } + + case 21: +//line scan_tokens.rl:148 + act = 11 + case 22: +//line scan_tokens.rl:259 + act = 12 + case 23: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 24: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 25: +//line scan_tokens.rl:111 + te = p + 1 + { + // This action is called specificially when a heredoc literal + // ends with a newline character. + + // This might actually be our end marker. + topdoc := &heredocs[len(heredocs)-1] + if topdoc.StartOfLine { + maybeMarker := bytes.TrimSpace(data[ts:te]) + if bytes.Equal(maybeMarker, topdoc.Marker) { + // We actually emit two tokens here: the end-of-heredoc + // marker first, and then separately the newline that + // follows it. This then avoids issues with the closing + // marker consuming a newline that would normally be used + // to mark the end of an attribute definition. + // We might have either a \n sequence or an \r\n sequence + // here, so we must handle both. + nls := te - 1 + nle := te + te-- + if data[te-1] == '\r' { + // back up one more byte + nls-- + te-- + } + token(TokenCHeredoc) + ts = nls + te = nle + token(TokenNewline) + heredocs = heredocs[:len(heredocs)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } + } + + topdoc.StartOfLine = true + token(TokenStringLit) + } + case 26: +//line scan_tokens.rl:259 + te = p + 1 + { + token(TokenBadUTF8) + } + case 27: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 28: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 29: +//line scan_tokens.rl:148 + te = p + p-- + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 30: +//line scan_tokens.rl:259 + te = p + p-- + { + token(TokenBadUTF8) + } + case 31: +//line scan_tokens.rl:148 + p = (te) - 1 + { + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 32: +//line NONE:1 + switch act { + case 0: + { + cs = 0 + goto _again + } + case 11: + { + p = (te) - 1 + + // This action is called when a heredoc literal _doesn't_ end + // with a newline character, e.g. because we're about to enter + // an interpolation sequence. + heredocs[len(heredocs)-1].StartOfLine = false + token(TokenStringLit) + } + case 12: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 33: +//line scan_tokens.rl:156 + act = 15 + case 34: +//line scan_tokens.rl:266 + act = 16 + case 35: +//line scan_tokens.rl:160 + te = p + 1 + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 36: +//line scan_tokens.rl:170 + te = p + 1 + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 37: +//line scan_tokens.rl:156 + te = p + 1 + { + token(TokenStringLit) + } + case 38: +//line scan_tokens.rl:266 + te = p + 1 + { + token(TokenBadUTF8) + } + case 39: +//line scan_tokens.rl:160 + te = p + p-- + { + token(TokenTemplateInterp) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 40: +//line scan_tokens.rl:170 + te = p + p-- + { + token(TokenTemplateControl) + braces++ + retBraces = append(retBraces, braces) + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false + } + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1459 + goto _again + } + } + case 41: +//line scan_tokens.rl:156 + te = p + p-- + { + token(TokenStringLit) + } + case 42: +//line scan_tokens.rl:266 + te = p + p-- + { + token(TokenBadUTF8) + } + case 43: +//line scan_tokens.rl:156 + p = (te) - 1 + { + token(TokenStringLit) + } + case 44: +//line NONE:1 + switch act { + case 0: + { + cs = 0 + goto _again + } + case 15: + { + p = (te) - 1 + + token(TokenStringLit) + } + case 16: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 45: +//line scan_tokens.rl:270 + act = 17 + case 46: +//line scan_tokens.rl:271 + act = 18 + case 47: +//line scan_tokens.rl:271 + te = p + 1 + { + token(TokenBadUTF8) + } + case 48: +//line scan_tokens.rl:272 + te = p + 1 + { + token(TokenInvalid) + } + case 49: +//line scan_tokens.rl:270 + te = p + p-- + { + token(TokenIdent) + } + case 50: +//line scan_tokens.rl:271 + te = p + p-- + { + token(TokenBadUTF8) + } + case 51: +//line scan_tokens.rl:270 + p = (te) - 1 + { + token(TokenIdent) + } + case 52: +//line scan_tokens.rl:271 + p = (te) - 1 + { + token(TokenBadUTF8) + } + case 53: +//line NONE:1 + switch act { + case 17: + { + p = (te) - 1 + token(TokenIdent) + } + case 18: + { + p = (te) - 1 + token(TokenBadUTF8) + } + } + + case 54: +//line scan_tokens.rl:278 + act = 22 + case 55: +//line scan_tokens.rl:301 + act = 39 + case 56: +//line scan_tokens.rl:280 + te = p + 1 + { + token(TokenComment) + } + case 57: +//line scan_tokens.rl:281 + te = p + 1 + { + token(TokenNewline) + } + case 58: +//line scan_tokens.rl:283 + te = p + 1 + { + token(TokenEqualOp) + } + case 59: +//line scan_tokens.rl:284 + te = p + 1 + { + token(TokenNotEqual) + } + case 60: +//line scan_tokens.rl:285 + te = p + 1 + { + token(TokenGreaterThanEq) + } + case 61: +//line scan_tokens.rl:286 + te = p + 1 + { + token(TokenLessThanEq) + } + case 62: +//line scan_tokens.rl:287 + te = p + 1 + { + token(TokenAnd) + } + case 63: +//line scan_tokens.rl:288 + te = p + 1 + { + token(TokenOr) + } + case 64: +//line scan_tokens.rl:289 + te = p + 1 + { + token(TokenEllipsis) + } + case 65: +//line scan_tokens.rl:290 + te = p + 1 + { + token(TokenFatArrow) + } + case 66: +//line scan_tokens.rl:291 + te = p + 1 + { + selfToken() + } + case 67: +//line scan_tokens.rl:180 + te = p + 1 + { + token(TokenOBrace) + braces++ + } + case 68: +//line scan_tokens.rl:185 + te = p + 1 + { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + token(TokenCBrace) + braces-- + } + } + case 69: +//line scan_tokens.rl:197 + te = p + 1 + { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd) + braces-- + retBraces = retBraces[0 : len(retBraces)-1] + top-- + cs = stack[top] + { + stack = stack[:len(stack)-1] + } + goto _again + + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd) + braces-- + } + } + case 70: +//line scan_tokens.rl:79 + te = p + 1 + { + token(TokenOQuote) + { + stack = append(stack, 0) + stack[top] = cs + top++ + cs = 1509 + goto _again + } + } + case 71: +//line scan_tokens.rl:89 + te = p + 1 + { + token(TokenOHeredoc) + // the token is currently the whole heredoc introducer, like + // < 0; _nacts-- { + _acts++ + switch _hcltok_actions[_acts-1] { + case 1: +//line NONE:1 + ts = 0 + + case 2: +//line NONE:1 + act = 0 + +//line scan_tokens.go:5073 + } + } + + if cs == 0 { + goto _out + } + p++ + if p != pe { + goto _resume + } + _test_eof: + { + } + if p == eof { + if _hcltok_eof_trans[cs] > 0 { + _trans = int(_hcltok_eof_trans[cs] - 1) + goto _eof_trans + } + } + + _out: + { + } + } + +//line scan_tokens.rl:363 + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl new file mode 100644 index 00000000..4443dc48 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl @@ -0,0 +1,395 @@ + +package hclsyntax + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// This file is generated from scan_tokens.rl. DO NOT EDIT. +%%{ + # (except when you are actually in scan_tokens.rl here, so edit away!) + + machine hcltok; + write data; +}%% + +func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { + stripData := stripUTF8BOM(data) + start.Byte += len(data) - len(stripData) + data = stripData + + f := &tokenAccum{ + Filename: filename, + Bytes: data, + Pos: start, + StartByte: start.Byte, + } + + %%{ + include UnicodeDerived "unicode_derived.rl"; + + UTF8Cont = 0x80 .. 0xBF; + AnyUTF8 = ( + 0x00..0x7F | + 0xC0..0xDF . UTF8Cont | + 0xE0..0xEF . UTF8Cont . UTF8Cont | + 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont + ); + BrokenUTF8 = any - AnyUTF8; + + NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit); + NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.'))); + Ident = (ID_Start | '_') (ID_Continue | '-')*; + + # Symbols that just represent themselves are handled as a single rule. + SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`" | "'"; + + EqualOp = "=="; + NotEqual = "!="; + GreaterThanEqual = ">="; + LessThanEqual = "<="; + LogicalAnd = "&&"; + LogicalOr = "||"; + + Ellipsis = "..."; + FatArrow = "=>"; + + Newline = '\r' ? '\n'; + EndOfLine = Newline; + + BeginStringTmpl = '"'; + BeginHeredocTmpl = '<<' ('-')? Ident Newline; + + Comment = ( + # The :>> operator in these is a "finish-guarded concatenation", + # which terminates the sequence on its left when it completes + # the sequence on its right. + # In the single-line comment cases this is allowing us to make + # the trailing EndOfLine optional while still having the overall + # pattern terminate. In the multi-line case it ensures that + # the first comment in the file ends at the first */, rather than + # gobbling up all of the "any*" until the _final_ */ in the file. + ("#" (any - EndOfLine)* :>> EndOfLine?) | + ("//" (any - EndOfLine)* :>> EndOfLine?) | + ("/*" any* :>> "*/") + ); + + # Note: hclwrite assumes that only ASCII spaces appear between tokens, + # and uses this assumption to recreate the spaces between tokens by + # looking at byte offset differences. This means it will produce + # incorrect results in the presence of tabs, but that's acceptable + # because the canonical style (which hclwrite itself can impose + # automatically is to never use tabs). + Spaces = (' ' | 0x09)+; + + action beginStringTemplate { + token(TokenOQuote); + fcall stringTemplate; + } + + action endStringTemplate { + token(TokenCQuote); + fret; + } + + action beginHeredocTemplate { + token(TokenOHeredoc); + // the token is currently the whole heredoc introducer, like + // < 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action beginTemplateControl { + token(TokenTemplateControl); + braces++; + retBraces = append(retBraces, braces); + if len(heredocs) > 0 { + heredocs[len(heredocs)-1].StartOfLine = false; + } + fcall main; + } + + action openBrace { + token(TokenOBrace); + braces++; + } + + action closeBrace { + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + token(TokenCBrace); + braces--; + } + } + + action closeTemplateSeqEatWhitespace { + // Only consume from the retBraces stack and return if we are at + // a suitable brace nesting level, otherwise things will get + // confused. (Not entering this branch indicates a syntax error, + // which we will catch in the parser.) + if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { + token(TokenTemplateSeqEnd); + braces--; + retBraces = retBraces[0:len(retBraces)-1] + fret; + } else { + // We intentionally generate a TokenTemplateSeqEnd here, + // even though the user apparently wanted a brace, because + // we want to allow the parser to catch the incorrect use + // of a ~} to balance a generic opening brace, rather than + // a template sequence. + token(TokenTemplateSeqEnd); + braces--; + } + } + + TemplateInterp = "${" ("~")?; + TemplateControl = "%{" ("~")?; + EndStringTmpl = '"'; + NewlineChars = ("\r"|"\n"); + NewlineCharsSeq = NewlineChars+; + StringLiteralChars = (AnyUTF8 - NewlineChars); + TemplateIgnoredNonBrace = (^'{' %{ fhold; }); + TemplateNotInterp = '$' (TemplateIgnoredNonBrace | TemplateInterp); + TemplateNotControl = '%' (TemplateIgnoredNonBrace | TemplateControl); + QuotedStringLiteralWithEsc = ('\\' StringLiteralChars) | (StringLiteralChars - ("$" | '%' | '"' | "\\")); + TemplateStringLiteral = ( + (TemplateNotInterp) | + (TemplateNotControl) | + (QuotedStringLiteralWithEsc)+ + ); + HeredocStringLiteral = ( + (TemplateNotInterp) | + (TemplateNotControl) | + (StringLiteralChars - ("$" | '%'))* + ); + BareStringLiteral = ( + (TemplateNotInterp) | + (TemplateNotControl) | + (StringLiteralChars - ("$" | '%'))* + ) Newline?; + + stringTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + EndStringTmpl => endStringTemplate; + TemplateStringLiteral => { token(TokenQuotedLit); }; + NewlineCharsSeq => { token(TokenQuotedNewline); }; + AnyUTF8 => { token(TokenInvalid); }; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + heredocTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + HeredocStringLiteral EndOfLine => heredocLiteralEOL; + HeredocStringLiteral => heredocLiteralMidline; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + bareTemplate := |* + TemplateInterp => beginTemplateInterp; + TemplateControl => beginTemplateControl; + BareStringLiteral => bareTemplateLiteral; + BrokenUTF8 => { token(TokenBadUTF8); }; + *|; + + identOnly := |* + Ident => { token(TokenIdent) }; + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + main := |* + Spaces => {}; + NumberLit => { token(TokenNumberLit) }; + Ident => { token(TokenIdent) }; + + Comment => { token(TokenComment) }; + Newline => { token(TokenNewline) }; + + EqualOp => { token(TokenEqualOp); }; + NotEqual => { token(TokenNotEqual); }; + GreaterThanEqual => { token(TokenGreaterThanEq); }; + LessThanEqual => { token(TokenLessThanEq); }; + LogicalAnd => { token(TokenAnd); }; + LogicalOr => { token(TokenOr); }; + Ellipsis => { token(TokenEllipsis); }; + FatArrow => { token(TokenFatArrow); }; + SelfToken => { selfToken() }; + + "{" => openBrace; + "}" => closeBrace; + + "~}" => closeTemplateSeqEatWhitespace; + + BeginStringTmpl => beginStringTemplate; + BeginHeredocTmpl => beginHeredocTemplate; + + BrokenUTF8 => { token(TokenBadUTF8) }; + AnyUTF8 => { token(TokenInvalid) }; + *|; + + }%% + + // Ragel state + p := 0 // "Pointer" into data + pe := len(data) // End-of-data "pointer" + ts := 0 + te := 0 + act := 0 + eof := pe + var stack []int + var top int + + var cs int // current state + switch mode { + case scanNormal: + cs = hcltok_en_main + case scanTemplate: + cs = hcltok_en_bareTemplate + case scanIdentOnly: + cs = hcltok_en_identOnly + default: + panic("invalid scanMode") + } + + braces := 0 + var retBraces []int // stack of brace levels that cause us to use fret + var heredocs []heredocInProgress // stack of heredocs we're currently processing + + %%{ + prepush { + stack = append(stack, 0); + } + postpop { + stack = stack[:len(stack)-1]; + } + }%% + + // Make Go compiler happy + _ = ts + _ = te + _ = act + _ = eof + + token := func (ty TokenType) { + f.emitToken(ty, ts, te) + } + selfToken := func () { + b := data[ts:te] + if len(b) != 1 { + // should never happen + panic("selfToken only works for single-character tokens") + } + f.emitToken(TokenType(b[0]), ts, te) + } + + %%{ + write init nocs; + write exec; + }%% + + // If we fall out here without being in a final state then we've + // encountered something that the scanner can't match, which we'll + // deal with as an invalid. + if cs < hcltok_first_final { + if mode == scanTemplate && len(stack) == 0 { + // If we're scanning a bare template then any straggling + // top-level stuff is actually literal string, rather than + // invalid. This handles the case where the template ends + // with a single "$" or "%", which trips us up because we + // want to see another character to decide if it's a sequence + // or an escape. + f.emitToken(TokenStringLit, ts, len(data)) + } else { + f.emitToken(TokenInvalid, ts, len(data)) + } + } + + // We always emit a synthetic EOF token at the end, since it gives the + // parser position information for an "unexpected EOF" diagnostic. + f.emitToken(TokenEOF, len(data), len(data)) + + return f.Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md new file mode 100644 index 00000000..091c1c23 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md @@ -0,0 +1,926 @@ +# HCL Native Syntax Specification + +This is the specification of the syntax and semantics of the native syntax +for HCL. HCL is a system for defining configuration languages for applications. +The HCL information model is designed to support multiple concrete syntaxes +for configuration, but this native syntax is considered the primary format +and is optimized for human authoring and maintenance, as opposed to machine +generation of configuration. + +The language consists of three integrated sub-languages: + +- The _structural_ language defines the overall hierarchical configuration + structure, and is a serialization of HCL bodies, blocks and attributes. + +- The _expression_ language is used to express attribute values, either as + literals or as derivations of other values. + +- The _template_ language is used to compose values together into strings, + as one of several types of expression in the expression language. + +In normal use these three sub-languages are used together within configuration +files to describe an overall configuration, with the structural language +being used at the top level. The expression and template languages can also +be used in isolation, to implement features such as REPLs, debuggers, and +integration into more limited HCL syntaxes such as the JSON profile. + +## Syntax Notation + +Within this specification a semi-formal notation is used to illustrate the +details of syntax. This notation is intended for human consumption rather +than machine consumption, with the following conventions: + +- A naked name starting with an uppercase letter is a global production, + common to all of the syntax specifications in this document. +- A naked name starting with a lowercase letter is a local production, + meaningful only within the specification where it is defined. +- Double and single quotes (`"` and `'`) are used to mark literal character + sequences, which may be either punctuation markers or keywords. +- The default operator for combining items, which has no punctuation, + is concatenation. +- The symbol `|` indicates that any one of its left and right operands may + be present. +- The `*` symbol indicates zero or more repetitions of the item to its left. +- The `?` symbol indicates zero or one of the item to its left. +- Parentheses (`(` and `)`) are used to group items together to apply + the `|`, `*` and `?` operators to them collectively. + +The grammar notation does not fully describe the language. The prose may +augment or conflict with the illustrated grammar. In case of conflict, prose +has priority. + +## Source Code Representation + +Source code is unicode text expressed in the UTF-8 encoding. The language +itself does not perform unicode normalization, so syntax features such as +identifiers are sequences of unicode code points and so e.g. a precombined +accented character is distinct from a letter associated with a combining +accent. (String literals have some special handling with regard to Unicode +normalization which will be covered later in the relevant section.) + +UTF-8 encoded Unicode byte order marks are not permitted. Invalid or +non-normalized UTF-8 encoding is always a parse error. + +## Lexical Elements + +### Comments and Whitespace + +Comments and Whitespace are recognized as lexical elements but are ignored +except as described below. + +Whitespace is defined as a sequence of zero or more space characters +(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A) +are _not_ considered whitespace but are ignored as such in certain contexts. + +Horizontal tab characters (U+0009) are not considered to be whitespace and +are not valid within HCL native syntax. + +Comments serve as program documentation and come in two forms: + +- _Line comments_ start with either the `//` or `#` sequences and end with + the next newline sequence. A line comments is considered equivalent to a + newline sequence. + +- _Inline comments_ start with the `/*` sequence and end with the `*/` + sequence, and may have any characters within except the ending sequence. + An inline comments is considered equivalent to a whitespace sequence. + +Comments and whitespace cannot begin within within other comments, or within +template literals except inside an interpolation sequence or template directive. + +### Identifiers + +Identifiers name entities such as blocks, attributes and expression variables. +Identifiers are interpreted as per [UAX #31][uax31] Section 2. Specifically, +their syntax is defined in terms of the `ID_Start` and `ID_Continue` +character properties as follows: + +```ebnf +Identifier = ID_Start (ID_Continue | '-')*; +``` + +The Unicode specification provides the normative requirements for identifier +parsing. Non-normatively, the spirit of this specification is that `ID_Start` +consists of Unicode letter and certain unambiguous punctuation tokens, while +`ID_Continue` augments that set with Unicode digits, combining marks, etc. + +The dash character `-` is additionally allowed in identifiers, even though +that is not part of the unicode `ID_Continue` definition. This is to allow +attribute names and block type names to contain dashes, although underscores +as word separators are considered the idiomatic usage. + +[uax31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax" + +### Keywords + +There are no globally-reserved words, but in some contexts certain identifiers +are reserved to function as keywords. These are discussed further in the +relevant documentation sections that follow. In such situations, the +identifier's role as a keyword supersedes any other valid interpretation that +may be possible. Outside of these specific situations, the keywords have no +special meaning and are interpreted as regular identifiers. + +### Operators and Delimiters + +The following character sequences represent operators, delimiters, and other +special tokens: + +``` ++ && == < : { [ ( ${ +- || != > ? } ] ) %{ +* ! <= = . +/ >= => , +% ... +``` + +### Numeric Literals + +A numeric literal is a decimal representation of a +real number. It has an integer part, a fractional part, +and an exponent part. + +```ebnf +NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?; +decimal = '0' .. '9'; +expmark = ('e' | 'E') ("+" | "-")?; +``` + +## Structural Elements + +The structural language consists of syntax representing the following +constructs: + +- _Attributes_, which assign a value to a specified name. +- _Blocks_, which create a child body annotated by a type and optional labels. +- _Body Content_, which consists of a collection of attributes and blocks. + +These constructs correspond to the similarly-named concepts in the +language-agnostic HCL information model. + +```ebnf +ConfigFile = Body; +Body = (Attribute | Block | OneLineBlock)*; +Attribute = Identifier "=" Expression Newline; +Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline; +OneLineBlock = Identifier (StringLit|Identifier)* "{" (Identifier "=" Expression)? "}" Newline; +``` + +### Configuration Files + +A _configuration file_ is a sequence of characters whose top-level is +interpreted as a Body. + +### Bodies + +A _body_ is a collection of associated attributes and blocks. The meaning of +this association is defined by the calling application. + +### Attribute Definitions + +An _attribute definition_ assigns a value to a particular attribute name within +a body. Each distinct attribute name may be defined no more than once within a +single body. + +The attribute value is given as an expression, which is retained literally +for later evaluation by the calling application. + +### Blocks + +A _block_ creates a child body that is annotated with a block _type_ and +zero or more block _labels_. Blocks create a structural hierachy which can be +interpreted by the calling application. + +Block labels can either be quoted literal strings or naked identifiers. + +## Expressions + +The expression sub-language is used within attribute definitions to specify +values. + +```ebnf +Expression = ( + ExprTerm | + Operation | + Conditional +); +``` + +### Types + +The value types used within the expression language are those defined by the +syntax-agnostic HCL information model. An expression may return any valid +type, but only a subset of the available types have first-class syntax. +A calling application may make other types available via _variables_ and +_functions_. + +### Expression Terms + +Expression _terms_ are the operands for unary and binary expressions, as well +as acting as expressions in their own right. + +```ebnf +ExprTerm = ( + LiteralValue | + CollectionValue | + TemplateExpr | + VariableExpr | + FunctionCall | + ForExpr | + ExprTerm Index | + ExprTerm GetAttr | + ExprTerm Splat | + "(" Expression ")" +); +``` + +The productions for these different term types are given in their corresponding +sections. + +Between the `(` and `)` characters denoting a sub-expression, newline +characters are ignored as whitespace. + +### Literal Values + +A _literal value_ immediately represents a particular value of a primitive +type. + +```ebnf +LiteralValue = ( + NumericLit | + "true" | + "false" | + "null" +); +``` + +- Numeric literals represent values of type _number_. +- The `true` and `false` keywords represent values of type _bool_. +- The `null` keyword represents a null value of the dynamic pseudo-type. + +String literals are not directly available in the expression sub-language, but +are available via the template sub-language, which can in turn be incorporated +via _template expressions_. + +### Collection Values + +A _collection value_ combines zero or more other expressions to produce a +collection value. + +```ebnf +CollectionValue = tuple | object; +tuple = "[" ( + (Expression ("," Expression)* ","?)? +) "]"; +object = "{" ( + (objectelem ("," objectelem)* ","?)? +) "}"; +objectelem = (Identifier | Expression) "=" Expression; +``` + +Only tuple and object values can be directly constructed via native syntax. +Tuple and object values can in turn be converted to list, set and map values +with other operations, which behaves as defined by the syntax-agnostic HCL +information model. + +When specifying an object element, an identifier is interpreted as a literal +attribute name as opposed to a variable reference. To populate an item key +from a variable, use parentheses to disambiguate: + +- `{foo = "baz"}` is interpreted as an attribute literally named `foo`. +- `{(foo) = "baz"}` is interpreted as an attribute whose name is taken + from the variable named `foo`. + +Between the open and closing delimiters of these sequences, newline sequences +are ignored as whitespace. + +There is a syntax ambiguity between _for expressions_ and collection values +whose first element is a reference to a variable named `for`. The +_for expression_ interpretation has priority, so to produce a tuple whose +first element is the value of a variable named `for`, or an object with a +key named `for`, use parentheses to disambiguate: + +- `[for, foo, baz]` is a syntax error. +- `[(for), foo, baz]` is a tuple whose first element is the value of variable + `for`. +- `{for: 1, baz: 2}` is a syntax error. +- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. +- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the + ambiguity by reordering. + +### Template Expressions + +A _template expression_ embeds a program written in the template sub-language +as an expression. Template expressions come in two forms: + +- A _quoted_ template expression is delimited by quote characters (`"`) and + defines a template as a single-line expression with escape characters. +- A _heredoc_ template expression is introduced by a `<<` sequence and + defines a template via a multi-line sequence terminated by a user-chosen + delimiter. + +In both cases the template interpolation and directive syntax is available for +use within the delimiters, and any text outside of these special sequences is +interpreted as a literal string. + +In _quoted_ template expressions any literal string sequences within the +template behave in a special way: literal newline sequences are not permitted +and instead _escape sequences_ can be included, starting with the +backslash `\`: + +``` + \n Unicode newline control character + \r Unicode carriage return control character + \t Unicode tab control character + \" Literal quote mark, used to prevent interpretation as end of string + \\ Literal backslash, used to prevent interpretation as escape sequence + \uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits) + \UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits) +``` + +The _heredoc_ template expression type is introduced by either `<<` or `<<-`, +followed by an identifier. The template expression ends when the given +identifier subsequently appears again on a line of its own. + +If a heredoc template is introduced with the `<<-` symbol, any literal string +at the start of each line is analyzed to find the minimum number of leading +spaces, and then that number of prefix spaces is removed from all line-leading +literal strings. The final closing marker may also have an arbitrary number +of spaces preceding it on its line. + +```ebnf +TemplateExpr = quotedTemplate | heredocTemplate; +quotedTemplate = (as defined in prose above); +heredocTemplate = ( + ("<<" | "<<-") Identifier Newline + (content as defined in prose above) + Identifier Newline +); +``` + +A quoted template expression containing only a single literal string serves +as a syntax for defining literal string _expressions_. In certain contexts +the template syntax is restricted in this manner: + +```ebnf +StringLit = '"' (quoted literals as defined in prose above) '"'; +``` + +The `StringLit` production permits the escape sequences discussed for quoted +template expressions as above, but does _not_ permit template interpolation +or directive sequences. + +### Variables and Variable Expressions + +A _variable_ is a value that has been assigned a symbolic name. Variables are +made available for use in expressions by the calling application, by populating +the _global scope_ used for expression evaluation. + +Variables can also be created by expressions themselves, which always creates +a _child scope_ that incorporates the variables from its parent scope but +(re-)defines zero or more names with new values. + +The value of a variable is accessed using a _variable expression_, which is +a standalone `Identifier` whose name corresponds to a defined variable: + +```ebnf +VariableExpr = Identifier; +``` + +Variables in a particular scope are immutable, but child scopes may _hide_ +a variable from an ancestor scope by defining a new variable of the same name. +When looking up variables, the most locally-defined variable of the given name +is used, and ancestor-scoped variables of the same name cannot be accessed. + +No direct syntax is provided for declaring or assigning variables, but other +expression constructs implicitly create child scopes and define variables as +part of their evaluation. + +### Functions and Function Calls + +A _function_ is an operation that has been assigned a symbolic name. Functions +are made available for use in expressions by the calling application, by +populating the _function table_ used for expression evaluation. + +The namespace of functions is distinct from the namespace of variables. A +function and a variable may share the same name with no implication that they +are in any way related. + +A function can be executed via a _function call_ expression: + +```ebnf +FunctionCall = Identifier "(" arguments ")"; +Arguments = ( + () || + (Expression ("," Expression)* ("," | "...")?) +); +``` + +The definition of functions and the semantics of calling them are defined by +the language-agnostic HCL information model. The given arguments are mapped +onto the function's _parameters_ and the result of a function call expression +is the return value of the named function when given those arguments. + +If the final argument expression is followed by the ellipsis symbol (`...`), +the final argument expression must evaluate to either a list or tuple value. +The elements of the value are each mapped to a single parameter of the +named function, beginning at the first parameter remaining after all other +argument expressions have been mapped. + +Within the parentheses that delimit the function arguments, newline sequences +are ignored as whitespace. + +### For Expressions + +A _for expression_ is a construct for constructing a collection by projecting +the items from another collection. + +```ebnf +ForExpr = forTupleExpr | forObjectExpr; +forTupleExpr = "[" forIntro Expression forCond? "]"; +forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}"; +forIntro = "for" Identifier ("," Identifier)? "in" Expression ":"; +forCond = "if" Expression; +``` + +The punctuation used to delimit a for expression decide whether it will produce +a tuple value (`[` and `]`) or an object value (`{` and `}`). + +The "introduction" is equivalent in both cases: the keyword `for` followed by +either one or two identifiers separated by a comma which define the temporary +variable names used for iteration, followed by the keyword `in` and then +an expression that must evaluate to a value that can be iterated. The +introduction is then terminated by the colon (`:`) symbol. + +If only one identifier is provided, it is the name of a variable that will +be temporarily assigned the value of each element during iteration. If both +are provided, the first is the key and the second is the value. + +Tuple, object, list, map, and set types are iterable. The type of collection +used defines how the key and value variables are populated: + +- For tuple and list types, the _key_ is the zero-based index into the + sequence for each element, and the _value_ is the element value. The + elements are visited in index order. +- For object and map types, the _key_ is the string attribute name or element + key, and the _value_ is the attribute or element value. The elements are + visited in the order defined by a lexicographic sort of the attribute names + or keys. +- For set types, the _key_ and _value_ are both the element value. The elements + are visited in an undefined but consistent order. + +The expression after the colon and (in the case of object `for`) the expression +after the `=>` are both evaluated once for each element of the source +collection, in a local scope that defines the key and value variable names +specified. + +The results of evaluating these expressions for each input element are used +to populate an element in the new collection. In the case of tuple `for`, the +single expression becomes an element, appending values to the tuple in visit +order. In the case of object `for`, the pair of expressions is used as an +attribute name and value respectively, creating an element in the resulting +object. + +In the case of object `for`, it is an error if two input elements produce +the same result from the attribute name expression, since duplicate +attributes are not possible. If the ellipsis symbol (`...`) appears +immediately after the value expression, this activates the grouping mode in +which each value in the resulting object is a _tuple_ of all of the values +that were produced against each distinct key. + +- `[for v in ["a", "b"]: v]` returns `["a", "b"]`. +- `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. +- `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. +- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute + `a` is defined twice. +- `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. + +If the `if` keyword is used after the element expression(s), it applies an +additional predicate that can be used to conditionally filter elements from +the source collection from consideration. The expression following `if` is +evaluated once for each source element, in the same scope used for the +element expression(s). It must evaluate to a boolean value; if `true`, the +element will be evaluated as normal, while if `false` the element will be +skipped. + +- `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`. + +If the collection value, element expression(s) or condition expression return +unknown values that are otherwise type-valid, the result is a value of the +dynamic pseudo-type. + +### Index Operator + +The _index_ operator returns the value of a single element of a collection +value. It is a postfix operator and can be applied to any value that has +a tuple, object, map, or list type. + +```ebnf +Index = "[" Expression "]"; +``` + +The expression delimited by the brackets is the _key_ by which an element +will be looked up. + +If the index operator is applied to a value of tuple or list type, the +key expression must be an non-negative integer number representing the +zero-based element index to access. If applied to a value of object or map +type, the key expression must be a string representing the attribute name +or element key. If the given key value is not of the appropriate type, a +conversion is attempted using the conversion rules from the HCL +syntax-agnostic information model. + +An error is produced if the given key expression does not correspond to +an element in the collection, either because it is of an unconvertable type, +because it is outside the range of elements for a tuple or list, or because +the given attribute or key does not exist. + +If either the collection or the key are an unknown value of an +otherwise-suitable type, the return value is an unknown value whose type +matches what type would be returned given known values, or a value of the +dynamic pseudo-type if type information alone cannot determine a suitable +return type. + +Within the brackets that delimit the index key, newline sequences are ignored +as whitespace. + +### Attribute Access Operator + +The _attribute access_ operator returns the value of a single attribute in +an object value. It is a postfix operator and can be applied to any value +that has an object type. + +```ebnf +GetAttr = "." Identifier; +``` + +The given identifier is interpreted as the name of the attribute to access. +An error is produced if the object to which the operator is applied does not +have an attribute with the given name. + +If the object is an unknown value of a type that has the attribute named, the +result is an unknown value of the attribute's type. + +### Splat Operators + +The _splat operators_ allow convenient access to attributes or elements of +elements in a tuple, list, or set value. + +There are two kinds of "splat" operator: + +- The _attribute-only_ splat operator supports only attribute lookups into + the elements from a list, but supports an arbitrary number of them. + +- The _full_ splat operator additionally supports indexing into the elements + from a list, and allows any combination of attribute access and index + operations. + +```ebnf +Splat = attrSplat | fullSplat; +attrSplat = "." "*" GetAttr*; +fullSplat = "[" "*" "]" (GetAttr | Index)*; +``` + +The splat operators can be thought of as shorthands for common operations that +could otherwise be performed using _for expressions_: + +- `tuple.*.foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar][0]`. +- `tuple[*].foo.bar[0]` is approximately equivalent to + `[for v in tuple: v.foo.bar[0]]` + +Note the difference in how the trailing index operator is interpreted in +each case. This different interpretation is the key difference between the +_attribute-only_ and _full_ splat operators. + +Splat operators have one additional behavior compared to the equivalent +_for expressions_ shown above: if a splat operator is applied to a value that +is _not_ of tuple, list, or set type, the value is coerced automatically into +a single-value list of the value type: + +- `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object` + is a single object. +- `any_number.*` is equivalent to `[any_number]`, assuming that `any_number` + is a single number. + +If applied to a null value that is not tuple, list, or set, the result is always +an empty tuple, which allows conveniently converting a possibly-null scalar +value into a tuple of zero or one elements. It is illegal to apply a splat +operator to a null value of tuple, list, or set type. + +### Operations + +Operations apply a particular operator to either one or two expression terms. + +```ebnf +Operation = unaryOp | binaryOp; +unaryOp = ("-" | "!") ExprTerm; +binaryOp = ExprTerm binaryOperator ExprTerm; +binaryOperator = compareOperator | arithmeticOperator | logicOperator; +compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">="; +arithmeticOperator = "+" | "-" | "*" | "/" | "%"; +logicOperator = "&&" | "||" | "!"; +``` + +The unary operators have the highest precedence. + +The binary operators are grouped into the following precedence levels: + +``` +Level Operators + 6 * / % + 5 + - + 4 > >= < <= + 3 == != + 2 && + 1 || +``` + +Higher values of "level" bind tighter. Operators within the same precedence +level have left-to-right associativity. For example, `x / y * z` is equivalent +to `(x / y) * z`. + +### Comparison Operators + +Comparison operators always produce boolean values, as a result of testing +the relationship between two values. + +The two equality operators apply to values of any type: + +``` +a == b equal +a != b not equal +``` + +Two values are equal if the are of identical types and their values are +equal as defined in the HCL syntax-agnostic information model. The equality +operators are commutative and opposite, such that `(a == b) == !(a != b)` +and `(a == b) == (b == a)` for all values `a` and `b`. + +The four numeric comparison operators apply only to numbers: + +``` +a < b less than +a <= b less than or equal to +a > b greater than +a >= b greater than or equal to +``` + +If either operand of a comparison operator is a correctly-typed unknown value +or a value of the dynamic pseudo-type, the result is an unknown boolean. + +### Arithmetic Operators + +Arithmetic operators apply only to number values and always produce number +values as results. + +``` +a + b sum (addition) +a - b difference (subtraction) +a * b product (multiplication) +a / b quotient (division) +a % b remainder (modulo) +-a negation +``` + +Arithmetic operations are considered to be performed in an arbitrary-precision +number space. + +If either operand of an arithmetic operator is an unknown number or a value +of the dynamic pseudo-type, the result is an unknown number. + +### Logic Operators + +Logic operators apply only to boolean values and always produce boolean values +as results. + +``` +a && b logical AND +a || b logical OR +!a logical NOT +``` + +If either operand of a logic operator is an unknown bool value or a value +of the dynamic pseudo-type, the result is an unknown bool value. + +### Conditional Operator + +The conditional operator allows selecting from one of two expressions based on +the outcome of a boolean expression. + +```ebnf +Conditional = Expression "?" Expression ":" Expression; +``` + +The first expression is the _predicate_, which is evaluated and must produce +a boolean result. If the predicate value is `true`, the result of the second +expression is the result of the conditional. If the predicate value is +`false`, the result of the third expression is the result of the conditional. + +The second and third expressions must be of the same type or must be able to +unify into a common type using the type unification rules defined in the +HCL syntax-agnostic information model. This unified type is the result type +of the conditional, with both expressions converted as necessary to the +unified type. + +If the predicate is an unknown boolean value or a value of the dynamic +pseudo-type then the result is an unknown value of the unified type of the +other two expressions. + +If either the second or third expressions produce errors when evaluated, +these errors are passed through only if the erroneous expression is selected. +This allows for expressions such as +`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length` +function) without producing an error when the predicate is `false`. + +## Templates + +The template sub-language is used within template expressions to concisely +combine strings and other values to produce other strings. It can also be +used in isolation as a standalone template language. + +```ebnf +Template = ( + TemplateLiteral | + TemplateInterpolation | + TemplateDirective +)* +TemplateDirective = TemplateIf | TemplateFor; +``` + +A template behaves like an expression that always returns a string value. +The different elements of the template are evaluated and combined into a +single string to return. If any of the elements produce an unknown string +or a value of the dynamic pseudo-type, the result is an unknown string. + +An important use-case for standalone templates is to enable the use of +expressions in alternative HCL syntaxes where a native expression grammar is +not available. For example, the HCL JSON profile treats the values of JSON +strings as standalone templates when attributes are evaluated in expression +mode. + +### Template Literals + +A template literal is a literal sequence of characters to include in the +resulting string. When the template sub-language is used standalone, a +template literal can contain any unicode character, with the exception +of the sequences that introduce interpolations and directives, and for the +sequences that escape those introductions. + +The interpolation and directive introductions are escaped by doubling their +leading characters. The `${` sequence is escaped as `$${` and the `%{` +sequence is escaped as `%%{`. + +When the template sub-language is embedded in the expression language via +_template expressions_, additional constraints and transforms are applied to +template literals as described in the definition of template expressions. + +The value of a template literal can be modified by _strip markers_ in any +interpolations or directives that are adjacent to it. A strip marker is +a tilde (`~`) placed immediately after the opening `{` or before the closing +`}` of a template sequence: + +- `hello ${~ "world" }` produces `"helloworld"`. +- `%{ if true ~} hello %{~ endif }` produces `"hello"`. + +When a strip marker is present, any spaces adjacent to it in the corresponding +string literal (if any) are removed before producing the final value. Space +characters are interpreted as per Unicode's definition. + +Stripping is done at syntax level rather than value level. Values returned +by interpolations or directives are not subject to stripping: + +- `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`, + because the space is not in a template literal directly adjacent to the + strip marker. + +### Template Interpolations + +An _interpolation sequence_ evaluates an expression (written in the +expression sub-language), converts the result to a string value, and +replaces itself with the resulting string. + +```ebnf +TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}"; +``` + +If the expression result cannot be converted to a string, an error is +produced. + +### Template If Directive + +The template `if` directive is the template equivalent of the +_conditional expression_, allowing selection of one of two sub-templates based +on the value of a predicate expression. + +```ebnf +TemplateIf = ( + ("%{" | "%{~") "if" Expression ("}" | "~}") + Template + ( + ("%{" | "%{~") "else" ("}" | "~}") + Template + )? + ("%{" | "%{~") "endif" ("}" | "~}") +); +``` + +The evaluation of the `if` directive is equivalent to the conditional +expression, with the following exceptions: + +- The two sub-templates always produce strings, and thus the result value is + also always a string. +- The `else` clause may be omitted, in which case the conditional's third + expression result is implied to be the empty string. + +### Template For Directive + +The template `for` directive is the template equivalent of the _for expression_, +producing zero or more copies of its sub-template based on the elements of +a collection. + +```ebnf +TemplateFor = ( + ("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}") + Template + ("%{" | "%{~") "endfor" ("}" | "~}") +); +``` + +The evaluation of the `for` directive is equivalent to the _for expression_ +when producing a tuple, with the following exceptions: + +- The sub-template always produces a string. +- There is no equivalent of the "if" clause on the for expression. +- The elements of the resulting tuple are all converted to strings and + concatenated to produce a flat string result. + +### Template Interpolation Unwrapping + +As a special case, a template that consists only of a single interpolation, +with no surrounding literals, directives or other interpolations, is +"unwrapped". In this case, the result of the interpolation expression is +returned verbatim, without conversion to string. + +This special case exists primarily to enable the native template language +to be used inside strings in alternative HCL syntaxes that lack a first-class +template or expression syntax. Unwrapping allows arbitrary expressions to be +used to populate attributes when strings in such languages are interpreted +as templates. + +- `${true}` produces the boolean value `true` +- `${"${true}"}` produces the boolean value `true`, because both the inner + and outer interpolations are subject to unwrapping. +- `hello ${true}` produces the string `"hello true"` +- `${""}${true}` produces the string `"true"` because there are two + interpolation sequences, even though one produces an empty result. +- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because + the presence of the `for` directive circumvents the unwrapping even though + the final result is a single value. + +In some contexts this unwrapping behavior may be circumvented by the calling +application, by converting the final template result to string. This is +necessary, for example, if a standalone template is being used to produce +the direct contents of a file, since the result in that case must always be a +string. + +## Static Analysis + +The HCL static analysis operations are implemented for some expression types +in the native syntax, as described in the following sections. + +A goal for static analysis of the native syntax is for the interpretation to +be as consistent as possible with the dynamic evaluation interpretation of +the given expression, though some deviations are intentionally made in order +to maximize the potential for analysis. + +### Static List + +The tuple construction syntax can be interpreted as a static list. All of +the expression elements given are returned as the static list elements, +with no further interpretation. + +### Static Map + +The object construction syntax can be interpreted as a static map. All of the +key/value pairs given are returned as the static pairs, with no further +interpretation. + +The usual requirement that an attribute name be interpretable as a string +does not apply to this static analysis, allowing callers to provide map-like +constructs with different key types by building on the map syntax. + +### Static Call + +The function call syntax can be interpreted as a static call. The called +function name is returned verbatim and the given argument expressions are +returned as the static arguments, with no further interpretation. + +### Static Traversal + +A variable expression and any attached attribute access operations and +constant index operations can be interpreted as a static traversal. + +The keywords `true`, `false` and `null` can also be interpreted as +static traversals, behaving as if they were references to variables of those +names, to allow callers to redefine the meaning of those keywords in certain +contexts. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go new file mode 100644 index 00000000..476025d1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go @@ -0,0 +1,394 @@ +package hclsyntax + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// AsHCLBlock returns the block data expressed as a *hcl.Block. +func (b *Block) AsHCLBlock() *hcl.Block { + if b == nil { + return nil + } + + lastHeaderRange := b.TypeRange + if len(b.LabelRanges) > 0 { + lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] + } + + return &hcl.Block{ + Type: b.Type, + Labels: b.Labels, + Body: b.Body, + + DefRange: hcl.RangeBetween(b.TypeRange, lastHeaderRange), + TypeRange: b.TypeRange, + LabelRanges: b.LabelRanges, + } +} + +// Body is the implementation of hcl.Body for the HCL native syntax. +type Body struct { + Attributes Attributes + Blocks Blocks + + // These are used with PartialContent to produce a "remaining items" + // body to return. They are nil on all bodies fresh out of the parser. + hiddenAttrs map[string]struct{} + hiddenBlocks map[string]struct{} + + SrcRange hcl.Range + EndRange hcl.Range // Final token of the body, for reporting missing items +} + +// Assert that *Body implements hcl.Body +var assertBodyImplBody hcl.Body = &Body{} + +func (b *Body) walkChildNodes(w internalWalkFunc) { + w(b.Attributes) + w(b.Blocks) +} + +func (b *Body) Range() hcl.Range { + return b.SrcRange +} + +func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remainHCL, diags := b.PartialContent(schema) + + // No we'll see if anything actually remains, to produce errors about + // extraneous items. + remain := remainHCL.(*Body) + + for name, attr := range b.Attributes { + if _, hidden := remain.hiddenAttrs[name]; !hidden { + var suggestions []string + for _, attrS := range schema.Attributes { + if _, defined := content.Attributes[attrS.Name]; defined { + continue + } + suggestions = append(suggestions, attrS.Name) + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there a block of the same name? + for _, blockS := range schema.Blocks { + if blockS.Type == name { + suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported argument", + Detail: fmt.Sprintf("An argument named %q is not expected here.%s", name, suggestion), + Subject: &attr.NameRange, + }) + } + } + + for _, block := range b.Blocks { + blockTy := block.Type + if _, hidden := remain.hiddenBlocks[blockTy]; !hidden { + var suggestions []string + for _, blockS := range schema.Blocks { + suggestions = append(suggestions, blockS.Type) + } + suggestion := nameSuggestion(blockTy, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + // Is there an attribute of the same name? + for _, attrS := range schema.Attributes { + if attrS.Name == blockTy { + suggestion = fmt.Sprintf(" Did you mean to define argument %q? If so, use the equals sign to assign it a value.", blockTy) + break + } + } + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion), + Subject: &block.TypeRange, + }) + } + } + + return content, diags +} + +func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var blocks hcl.Blocks + var diags hcl.Diagnostics + hiddenAttrs := make(map[string]struct{}) + hiddenBlocks := make(map[string]struct{}) + + if b.hiddenAttrs != nil { + for k, v := range b.hiddenAttrs { + hiddenAttrs[k] = v + } + } + if b.hiddenBlocks != nil { + for k, v := range b.hiddenBlocks { + hiddenBlocks[k] = v + } + } + + for _, attrS := range schema.Attributes { + name := attrS.Name + attr, exists := b.Attributes[name] + _, hidden := hiddenAttrs[name] + if hidden || !exists { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + continue + } + + hiddenAttrs[name] = struct{}{} + attrs[name] = attr.AsHCLAttribute() + } + + blocksWanted := make(map[string]hcl.BlockHeaderSchema) + for _, blockS := range schema.Blocks { + blocksWanted[blockS.Type] = blockS + } + + for _, block := range b.Blocks { + if _, hidden := hiddenBlocks[block.Type]; hidden { + continue + } + blockS, wanted := blocksWanted[block.Type] + if !wanted { + continue + } + + if len(block.Labels) > len(blockS.LabelNames) { + name := block.Type + if len(blockS.LabelNames) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "No labels are expected for %s blocks.", name, + ), + Subject: block.LabelRanges[0].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Extraneous label for %s", name), + Detail: fmt.Sprintf( + "Only %d labels (%s) are expected for %s blocks.", + len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name, + ), + Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(), + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + } + continue + } + + if len(block.Labels) < len(blockS.LabelNames) { + name := block.Type + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name), + Detail: fmt.Sprintf( + "All %s blocks must have %d labels (%s).", + name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), + ), + Subject: &block.OpenBraceRange, + Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(), + }) + continue + } + + blocks = append(blocks, block.AsHCLBlock()) + } + + // We hide blocks only after we've processed all of them, since otherwise + // we can't process more than one of the same type. + for _, blockS := range schema.Blocks { + hiddenBlocks[blockS.Type] = struct{}{} + } + + remain := &Body{ + Attributes: b.Attributes, + Blocks: b.Blocks, + + hiddenAttrs: hiddenAttrs, + hiddenBlocks: hiddenBlocks, + + SrcRange: b.SrcRange, + EndRange: b.EndRange, + } + + return &hcl.BodyContent{ + Attributes: attrs, + Blocks: blocks, + + MissingItemRange: b.MissingItemRange(), + }, remain, diags +} + +func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + attrs := make(hcl.Attributes) + var diags hcl.Diagnostics + + if len(b.Blocks) > 0 { + example := b.Blocks[0] + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unexpected %q block", example.Type), + Detail: "Blocks are not allowed here.", + Subject: &example.TypeRange, + }) + // we will continue processing anyway, and return the attributes + // we are able to find so that certain analyses can still be done + // in the face of errors. + } + + if b.Attributes == nil { + return attrs, diags + } + + for name, attr := range b.Attributes { + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + attrs[name] = attr.AsHCLAttribute() + } + + return attrs, diags +} + +func (b *Body) MissingItemRange() hcl.Range { + return hcl.Range{ + Filename: b.SrcRange.Filename, + Start: b.SrcRange.Start, + End: b.SrcRange.Start, + } +} + +// Attributes is the collection of attribute definitions within a body. +type Attributes map[string]*Attribute + +func (a Attributes) walkChildNodes(w internalWalkFunc) { + for _, attr := range a { + w(attr) + } +} + +// Range returns the range of some arbitrary point within the set of +// attributes, or an invalid range if there are no attributes. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (a Attributes) Range() hcl.Range { + // An attributes doesn't really have a useful range to report, since + // it's just a grouping construct. So we'll arbitrarily take the + // range of one of the attributes, or produce an invalid range if we have + // none. In practice, there's little reason to ask for the range of + // an Attributes. + for _, attr := range a { + return attr.Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Attribute represents a single attribute definition within a body. +type Attribute struct { + Name string + Expr Expression + + SrcRange hcl.Range + NameRange hcl.Range + EqualsRange hcl.Range +} + +func (a *Attribute) walkChildNodes(w internalWalkFunc) { + w(a.Expr) +} + +func (a *Attribute) Range() hcl.Range { + return a.SrcRange +} + +// AsHCLAttribute returns the block data expressed as a *hcl.Attribute. +func (a *Attribute) AsHCLAttribute() *hcl.Attribute { + if a == nil { + return nil + } + return &hcl.Attribute{ + Name: a.Name, + Expr: a.Expr, + + Range: a.SrcRange, + NameRange: a.NameRange, + } +} + +// Blocks is the list of nested blocks within a body. +type Blocks []*Block + +func (bs Blocks) walkChildNodes(w internalWalkFunc) { + for _, block := range bs { + w(block) + } +} + +// Range returns the range of some arbitrary point within the list of +// blocks, or an invalid range if there are no blocks. +// +// This is provided only to complete the Node interface, but has no practical +// use. +func (bs Blocks) Range() hcl.Range { + if len(bs) > 0 { + return bs[0].Range() + } + return hcl.Range{ + Filename: "", + } +} + +// Block represents a nested block structure +type Block struct { + Type string + Labels []string + Body *Body + + TypeRange hcl.Range + LabelRanges []hcl.Range + OpenBraceRange hcl.Range + CloseBraceRange hcl.Range +} + +func (b *Block) walkChildNodes(w internalWalkFunc) { + w(b.Body) +} + +func (b *Block) Range() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange) +} + +func (b *Block) DefRange() hcl.Range { + return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go new file mode 100644 index 00000000..d8f023ba --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go @@ -0,0 +1,118 @@ +package hclsyntax + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ----------------------------------------------------------------------------- +// The methods in this file are all optional extension methods that serve to +// implement the methods of the same name on *hcl.File when its root body +// is provided by this package. +// ----------------------------------------------------------------------------- + +// BlocksAtPos implements the method of the same name for an *hcl.File that +// is backed by a *Body. +func (b *Body) BlocksAtPos(pos hcl.Pos) []*hcl.Block { + list, _ := b.blocksAtPos(pos, true) + return list +} + +// InnermostBlockAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) InnermostBlockAtPos(pos hcl.Pos) *hcl.Block { + _, innermost := b.blocksAtPos(pos, false) + return innermost.AsHCLBlock() +} + +// OutermostBlockAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) OutermostBlockAtPos(pos hcl.Pos) *hcl.Block { + return b.outermostBlockAtPos(pos).AsHCLBlock() +} + +// blocksAtPos is the internal engine of both BlocksAtPos and +// InnermostBlockAtPos, which both need to do the same logic but return a +// differently-shaped result. +// +// list is nil if makeList is false, avoiding an allocation. Innermost is +// always set, and if the returned list is non-nil it will always match the +// final element from that list. +func (b *Body) blocksAtPos(pos hcl.Pos, makeList bool) (list []*hcl.Block, innermost *Block) { + current := b + +Blocks: + for current != nil { + for _, block := range current.Blocks { + wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange) + if wholeRange.ContainsPos(pos) { + innermost = block + if makeList { + list = append(list, innermost.AsHCLBlock()) + } + current = block.Body + continue Blocks + } + } + + // If we fall out here then none of the current body's nested blocks + // contain the position we are looking for, and so we're done. + break + } + + return +} + +// outermostBlockAtPos is the internal version of OutermostBlockAtPos that +// returns a hclsyntax.Block rather than an hcl.Block, allowing for further +// analysis if necessary. +func (b *Body) outermostBlockAtPos(pos hcl.Pos) *Block { + // This is similar to blocksAtPos, but simpler because we know it only + // ever needs to search the first level of nested blocks. + + for _, block := range b.Blocks { + wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange) + if wholeRange.ContainsPos(pos) { + return block + } + } + + return nil +} + +// AttributeAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) AttributeAtPos(pos hcl.Pos) *hcl.Attribute { + return b.attributeAtPos(pos).AsHCLAttribute() +} + +// attributeAtPos is the internal version of AttributeAtPos that returns a +// hclsyntax.Block rather than an hcl.Block, allowing for further analysis if +// necessary. +func (b *Body) attributeAtPos(pos hcl.Pos) *Attribute { + searchBody := b + _, block := b.blocksAtPos(pos, false) + if block != nil { + searchBody = block.Body + } + + for _, attr := range searchBody.Attributes { + if attr.SrcRange.ContainsPos(pos) { + return attr + } + } + + return nil +} + +// OutermostExprAtPos implements the method of the same name for an *hcl.File +// that is backed by a *Body. +func (b *Body) OutermostExprAtPos(pos hcl.Pos) hcl.Expression { + attr := b.attributeAtPos(pos) + if attr == nil { + return nil + } + if !attr.Expr.Range().ContainsPos(pos) { + return nil + } + return attr.Expr +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go new file mode 100644 index 00000000..3d898fd7 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go @@ -0,0 +1,320 @@ +package hclsyntax + +import ( + "bytes" + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +// Token represents a sequence of bytes from some HCL code that has been +// tagged with a type and its range within the source file. +type Token struct { + Type TokenType + Bytes []byte + Range hcl.Range +} + +// Tokens is a slice of Token. +type Tokens []Token + +// TokenType is an enumeration used for the Type field on Token. +type TokenType rune + +const ( + // Single-character tokens are represented by their own character, for + // convenience in producing these within the scanner. However, the values + // are otherwise arbitrary and just intended to be mnemonic for humans + // who might see them in debug output. + + TokenOBrace TokenType = '{' + TokenCBrace TokenType = '}' + TokenOBrack TokenType = '[' + TokenCBrack TokenType = ']' + TokenOParen TokenType = '(' + TokenCParen TokenType = ')' + TokenOQuote TokenType = '«' + TokenCQuote TokenType = '»' + TokenOHeredoc TokenType = 'H' + TokenCHeredoc TokenType = 'h' + + TokenStar TokenType = '*' + TokenSlash TokenType = '/' + TokenPlus TokenType = '+' + TokenMinus TokenType = '-' + TokenPercent TokenType = '%' + + TokenEqual TokenType = '=' + TokenEqualOp TokenType = '≔' + TokenNotEqual TokenType = '≠' + TokenLessThan TokenType = '<' + TokenLessThanEq TokenType = '≤' + TokenGreaterThan TokenType = '>' + TokenGreaterThanEq TokenType = '≥' + + TokenAnd TokenType = '∧' + TokenOr TokenType = '∨' + TokenBang TokenType = '!' + + TokenDot TokenType = '.' + TokenComma TokenType = ',' + + TokenEllipsis TokenType = '…' + TokenFatArrow TokenType = '⇒' + + TokenQuestion TokenType = '?' + TokenColon TokenType = ':' + + TokenTemplateInterp TokenType = '∫' + TokenTemplateControl TokenType = 'λ' + TokenTemplateSeqEnd TokenType = '∎' + + TokenQuotedLit TokenType = 'Q' // might contain backslash escapes + TokenStringLit TokenType = 'S' // cannot contain backslash escapes + TokenNumberLit TokenType = 'N' + TokenIdent TokenType = 'I' + + TokenComment TokenType = 'C' + + TokenNewline TokenType = '\n' + TokenEOF TokenType = '␄' + + // The rest are not used in the language but recognized by the scanner so + // we can generate good diagnostics in the parser when users try to write + // things that might work in other languages they are familiar with, or + // simply make incorrect assumptions about the HCL language. + + TokenBitwiseAnd TokenType = '&' + TokenBitwiseOr TokenType = '|' + TokenBitwiseNot TokenType = '~' + TokenBitwiseXor TokenType = '^' + TokenStarStar TokenType = '➚' + TokenApostrophe TokenType = '\'' + TokenBacktick TokenType = '`' + TokenSemicolon TokenType = ';' + TokenTabs TokenType = '␉' + TokenInvalid TokenType = '�' + TokenBadUTF8 TokenType = '💩' + TokenQuotedNewline TokenType = '␤' + + // TokenNil is a placeholder for when a token is required but none is + // available, e.g. when reporting errors. The scanner will never produce + // this as part of a token stream. + TokenNil TokenType = '\x00' +) + +func (t TokenType) GoString() string { + return fmt.Sprintf("hclsyntax.%s", t.String()) +} + +type scanMode int + +const ( + scanNormal scanMode = iota + scanTemplate + scanIdentOnly +) + +type tokenAccum struct { + Filename string + Bytes []byte + Pos hcl.Pos + Tokens []Token + StartByte int +} + +func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { + // Walk through our buffer to figure out how much we need to adjust + // the start pos to get our end pos. + + start := f.Pos + start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset + start.Byte = startOfs + f.StartByte + + end := start + end.Byte = endOfs + f.StartByte + b := f.Bytes[startOfs:endOfs] + for len(b) > 0 { + advance, seq, _ := textseg.ScanGraphemeClusters(b, true) + if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') { + end.Line++ + end.Column = 1 + } else { + end.Column++ + } + b = b[advance:] + } + + f.Pos = end + + f.Tokens = append(f.Tokens, Token{ + Type: ty, + Bytes: f.Bytes[startOfs:endOfs], + Range: hcl.Range{ + Filename: f.Filename, + Start: start, + End: end, + }, + }) +} + +type heredocInProgress struct { + Marker []byte + StartOfLine bool +} + +func tokenOpensFlushHeredoc(tok Token) bool { + if tok.Type != TokenOHeredoc { + return false + } + return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'}) +} + +// checkInvalidTokens does a simple pass across the given tokens and generates +// diagnostics for tokens that should _never_ appear in HCL source. This +// is intended to avoid the need for the parser to have special support +// for them all over. +// +// Returns a diagnostics with no errors if everything seems acceptable. +// Otherwise, returns zero or more error diagnostics, though tries to limit +// repetition of the same information. +func checkInvalidTokens(tokens Tokens) hcl.Diagnostics { + var diags hcl.Diagnostics + + toldBitwise := 0 + toldExponent := 0 + toldBacktick := 0 + toldApostrophe := 0 + toldSemicolon := 0 + toldTabs := 0 + toldBadUTF8 := 0 + + for _, tok := range tokens { + // copy token so it's safe to point to it + tok := tok + + switch tok.Type { + case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: + if toldBitwise < 4 { + var suggestion string + switch tok.Type { + case TokenBitwiseAnd: + suggestion = " Did you mean boolean AND (\"&&\")?" + case TokenBitwiseOr: + suggestion = " Did you mean boolean OR (\"&&\")?" + case TokenBitwiseNot: + suggestion = " Did you mean boolean NOT (\"!\")?" + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion), + Subject: &tok.Range, + }) + toldBitwise++ + } + case TokenStarStar: + if toldExponent < 1 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported operator", + Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.", + Subject: &tok.Range, + }) + + toldExponent++ + } + case TokenBacktick: + // Only report for alternating (even) backticks, so we won't report both start and ends of the same + // backtick-quoted string. + if (toldBacktick % 2) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid character", + Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"< +# +# This script uses the unicode spec to generate a Ragel state machine +# that recognizes unicode alphanumeric characters. It generates 5 +# character classes: uupper, ulower, ualpha, udigit, and ualnum. +# Currently supported encodings are UTF-8 [default] and UCS-4. +# +# Usage: unicode2ragel.rb [options] +# -e, --encoding [ucs4 | utf8] Data encoding +# -h, --help Show this message +# +# This script was originally written as part of the Ferret search +# engine library. +# +# Author: Rakan El-Khalil + +require 'optparse' +require 'open-uri' + +ENCODINGS = [ :utf8, :ucs4 ] +ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" } +DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt" +DEFAULT_MACHINE_NAME= "WChar" + +### +# Display vars & default option + +TOTAL_WIDTH = 80 +RANGE_WIDTH = 23 +@encoding = :utf8 +@chart_url = DEFAULT_CHART_URL +machine_name = DEFAULT_MACHINE_NAME +properties = [] +@output = $stdout + +### +# Option parsing + +cli_opts = OptionParser.new do |opts| + opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o| + @encoding = o.downcase.to_sym + end + opts.on("-h", "--help", "Show this message") do + puts opts + exit + end + opts.on("-u", "--url URL", "URL to process") do |o| + @chart_url = o + end + opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o| + machine_name = o + end + opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o| + properties = o + end + opts.on("-o", "--output FILE", "output file") do |o| + @output = File.new(o, "w+") + end +end + +cli_opts.parse(ARGV) +unless ENCODINGS.member? @encoding + puts "Invalid encoding: #{@encoding}" + puts cli_opts + exit +end + +## +# Downloads the document at url and yields every alpha line's hex +# range and description. + +def each_alpha( url, property ) + open( url ) do |file| + file.each_line do |line| + next if line =~ /^#/; + next if line !~ /; #{property} #/; + + range, description = line.split(/;/) + range.strip! + description.gsub!(/.*#/, '').strip! + + if range =~ /\.\./ + start, stop = range.split '..' + else start = stop = range + end + + yield start.hex .. stop.hex, description + end + end +end + +### +# Formats to hex at minimum width + +def to_hex( n ) + r = "%0X" % n + r = "0#{r}" unless (r.length % 2).zero? + r +end + +### +# UCS4 is just a straight hex conversion of the unicode codepoint. + +def to_ucs4( range ) + rangestr = "0x" + to_hex(range.begin) + rangestr << "..0x" + to_hex(range.end) if range.begin != range.end + [ rangestr ] +end + +## +# 0x00 - 0x7f -> 0zzzzzzz[7] +# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6] +# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6] +# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6] + +UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff] + +def to_utf8_enc( n ) + r = 0 + if n <= 0x7f + r = n + elsif n <= 0x7ff + y = 0xc0 | (n >> 6) + z = 0x80 | (n & 0x3f) + r = y << 8 | z + elsif n <= 0xffff + x = 0xe0 | (n >> 12) + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = x << 16 | y << 8 | z + elsif n <= 0x10ffff + w = 0xf0 | (n >> 18) + x = 0x80 | (n >> 12) & 0x3f + y = 0x80 | (n >> 6) & 0x3f + z = 0x80 | n & 0x3f + r = w << 24 | x << 16 | y << 8 | z + end + + to_hex(r) +end + +def from_utf8_enc( n ) + n = n.hex + r = 0 + if n <= 0x7f + r = n + elsif n <= 0xdfff + y = (n >> 8) & 0x1f + z = n & 0x3f + r = y << 6 | z + elsif n <= 0xefffff + x = (n >> 16) & 0x0f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = x << 10 | y << 6 | z + elsif n <= 0xf7ffffff + w = (n >> 24) & 0x07 + x = (n >> 16) & 0x3f + y = (n >> 8) & 0x3f + z = n & 0x3f + r = w << 18 | x << 12 | y << 6 | z + end + r +end + +### +# Given a range, splits it up into ranges that can be continuously +# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff] +# This is not strictly needed since the current [5.1] unicode standard +# doesn't have ranges that straddle utf8 boundaries. This is included +# for completeness as there is no telling if that will ever change. + +def utf8_ranges( range ) + ranges = [] + UTF8_BOUNDARIES.each do |max| + if range.begin <= max + if range.end <= max + ranges << range + return ranges + end + + ranges << (range.begin .. max) + range = (max + 1) .. range.end + end + end + ranges +end + +def build_range( start, stop ) + size = start.size/2 + left = size - 1 + return [""] if size < 1 + + a = start[0..1] + b = stop[0..1] + + ### + # Shared prefix + + if a == b + return build_range(start[2..-1], stop[2..-1]).map do |elt| + "0x#{a} " + elt + end + end + + ### + # Unshared prefix, end of run + + return ["0x#{a}..0x#{b} "] if left.zero? + + ### + # Unshared prefix, not end of run + # Range can be 0x123456..0x56789A + # Which is equivalent to: + # 0x123456 .. 0x12FFFF + # 0x130000 .. 0x55FFFF + # 0x560000 .. 0x56789A + + ret = [] + ret << build_range(start, a + "FF" * left) + + ### + # Only generate middle range if need be. + + if a.hex+1 != b.hex + max = to_hex(b.hex - 1) + max = "FF" if b == "FF" + ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left + end + + ### + # Don't generate last range if it is covered by first range + + ret << build_range(b + "00" * left, stop) unless b == "FF" + ret.flatten! +end + +def to_utf8( range ) + utf8_ranges( range ).map do |r| + begin_enc = to_utf8_enc(r.begin) + end_enc = to_utf8_enc(r.end) + build_range begin_enc, end_enc + end.flatten! +end + +## +# Perform a 3-way comparison of the number of codepoints advertised by +# the unicode spec for the given range, the originally parsed range, +# and the resulting utf8 encoded range. + +def count_codepoints( code ) + code.split(' ').inject(1) do |acc, elt| + if elt =~ /0x(.+)\.\.0x(.+)/ + if @encoding == :utf8 + acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1) + else + acc * ($2.hex - $1.hex + 1) + end + else + acc + end + end +end + +def is_valid?( range, desc, codes ) + spec_count = 1 + spec_count = $1.to_i if desc =~ /\[(\d+)\]/ + range_count = range.end - range.begin + 1 + + sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) } + sum == spec_count and sum == range_count +end + +## +# Generate the state maching to stdout + +def generate_machine( name, property ) + pipe = " " + @output.puts " #{name} = " + each_alpha( @chart_url, property ) do |range, desc| + + codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range) + + #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless + # is_valid? range, desc, codes + + range_width = codes.map { |a| a.size }.max + range_width = RANGE_WIDTH if range_width < RANGE_WIDTH + + desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11 + desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH + + if desc.size > desc_width + desc = desc[0..desc_width - 4] + "..." + end + + codes.each_with_index do |r, idx| + desc = "" unless idx.zero? + code = "%-#{range_width}s" % r + @output.puts " #{pipe} #{code} ##{desc}" + pipe = "|" + end + end + @output.puts " ;" + @output.puts "" +end + +@output.puts < 0 && ret[0] == '.' { + ret = ret[1:] + } + return ret +} + +func navigationStepsRev(v node, offset int) []string { + switch tv := v.(type) { + case *objectVal: + // Do any of our properties have an object that contains the target + // offset? + for _, attr := range tv.Attrs { + k := attr.Name + av := attr.Value + + switch av.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if av.Range().ContainsOffset(offset) { + return append(navigationStepsRev(av, offset), "."+k) + } + } + case *arrayVal: + // Do any of our elements contain the target offset? + for i, elem := range tv.Values { + + switch elem.(type) { + case *objectVal, *arrayVal: + // okay + default: + continue + } + + if elem.Range().ContainsOffset(offset) { + return append(navigationStepsRev(elem, offset), fmt.Sprintf("[%d]", i)) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go new file mode 100644 index 00000000..d368ea8f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go @@ -0,0 +1,496 @@ +package json + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { + tokens := scan(buf, pos{ + Filename: filename, + Pos: hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + }, + }) + p := newPeeker(tokens) + node, diags := parseValue(p) + if len(diags) == 0 && p.Peek().Type != tokenEOF { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous data after value", + Detail: "Extra characters appear after the JSON value.", + Subject: p.Peek().Range.Ptr(), + }) + } + return node, diags +} + +func parseValue(p *peeker) (node, hcl.Diagnostics) { + tok := p.Peek() + + wrapInvalid := func(n node, diags hcl.Diagnostics) (node, hcl.Diagnostics) { + if n != nil { + return n, diags + } + return invalidVal{tok.Range}, diags + } + + switch tok.Type { + case tokenBraceO: + return wrapInvalid(parseObject(p)) + case tokenBrackO: + return wrapInvalid(parseArray(p)) + case tokenNumber: + return wrapInvalid(parseNumber(p)) + case tokenString: + return wrapInvalid(parseString(p)) + case tokenKeyword: + return wrapInvalid(parseKeyword(p)) + case tokenBraceC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing JSON value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenBrackC: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing array element value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + case tokenEOF: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Missing value", + Detail: "The JSON data ends prematurely.", + Subject: &tok.Range, + }, + }) + default: + return wrapInvalid(nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid start of value", + Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", + Subject: &tok.Range, + }, + }) + } +} + +func tokenCanStartValue(tok token) bool { + switch tok.Type { + case tokenBraceO, tokenBrackO, tokenNumber, tokenString, tokenKeyword: + return true + default: + return false + } +} + +func parseObject(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + attrs := []*objectAttr{} + + // recover is used to shift the peeker to what seems to be the end of + // our object, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBraceO: + open++ + case tokenBraceC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBraceC { + break Token + } + + keyNode, keyDiags := parseValue(p) + diags = diags.Extend(keyDiags) + if keyNode == nil { + return nil, diags + } + + keyStrNode, ok := keyNode.(*stringVal) + if !ok { + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object property name", + Detail: "A JSON object property name must be a string", + Subject: keyNode.StartRange().Ptr(), + }) + } + + key := keyStrNode.Value + + colon := p.Read() + if colon.Type != tokenColon { + recover(colon) + + if colon.Type == tokenBraceC || colon.Type == tokenComma { + // Catch common mistake of using braces instead of brackets + // for an object. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing object value", + Detail: "A JSON object attribute must have a value, introduced by a colon.", + Subject: &colon.Range, + }) + } + + if colon.Type == tokenEquals { + // Possible confusion with native HCL syntax. + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing property value colon", + Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", + Subject: &colon.Range, + }) + } + + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing property value colon", + Detail: "A colon must appear between an object property's name and its value.", + Subject: &colon.Range, + }) + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + attrs = append(attrs, &objectAttr{ + Name: key, + Value: valNode, + NameRange: keyStrNode.SrcRange, + }) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBraceC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in object", + Detail: "JSON does not permit a trailing comma after the final property in an object.", + Subject: &comma.Range, + }) + } + continue Token + case tokenEOF: + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing brace was found for this JSON object.", + Subject: &open.Range, + }) + case tokenBrackC: + // Consume the bracket anyway, so that we don't return with the peeker + // at a strange place. + p.Read() + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched braces", + Detail: "A JSON object must be closed with a brace, not a bracket.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBraceC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each property definition in an object.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &objectVal{ + Attrs: attrs, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + CloseRange: close.Range, + }, diags +} + +func parseArray(p *peeker) (node, hcl.Diagnostics) { + var diags hcl.Diagnostics + + open := p.Read() + vals := []node{} + + // recover is used to shift the peeker to what seems to be the end of + // our array, so that when we encounter an error we leave the peeker + // at a reasonable point in the token stream to continue parsing. + recover := func(tok token) { + open := 1 + for { + switch tok.Type { + case tokenBrackO: + open++ + case tokenBrackC: + open-- + if open <= 1 { + return + } + case tokenEOF: + // Ran out of source before we were able to recover, + // so we'll bail here and let the caller deal with it. + return + } + tok = p.Read() + } + } + +Token: + for { + if p.Peek().Type == tokenBrackC { + break Token + } + + valNode, valDiags := parseValue(p) + diags = diags.Extend(valDiags) + if valNode == nil { + return nil, diags + } + + vals = append(vals, valNode) + + switch p.Peek().Type { + case tokenComma: + comma := p.Read() + if p.Peek().Type == tokenBrackC { + // Special error message for this common mistake + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Trailing comma in array", + Detail: "JSON does not permit a trailing comma after the final value in an array.", + Subject: &comma.Range, + }) + } + continue Token + case tokenColon: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid array value", + Detail: "A colon is not used to introduce values in a JSON array.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenEOF: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unclosed object", + Detail: "No closing bracket was found for this JSON array.", + Subject: &open.Range, + }) + case tokenBraceC: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Mismatched brackets", + Detail: "A JSON array must be closed with a bracket, not a brace.", + Subject: p.Peek().Range.Ptr(), + }) + case tokenBrackC: + break Token + default: + recover(p.Read()) + return nil, diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing attribute seperator comma", + Detail: "A comma must appear between each value in an array.", + Subject: p.Peek().Range.Ptr(), + }) + } + + } + + close := p.Read() + return &arrayVal{ + Values: vals, + SrcRange: hcl.RangeBetween(open.Range, close.Range), + OpenRange: open.Range, + }, diags +} + +func parseNumber(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + + // Use encoding/json to validate the number syntax. + // TODO: Do this more directly to produce better diagnostics. + var num json.Number + err := json.Unmarshal(tok.Bytes, &num) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + // We want to guarantee that we parse numbers the same way as cty (and thus + // native syntax HCL) would here, so we'll use the cty parser even though + // in most other cases we don't actually introduce cty concepts until + // decoding time. We'll unwrap the parsed float immediately afterwards, so + // the cty value is just a temporary helper. + nv, err := cty.ParseNumberVal(string(num)) + if err != nil { + // Should never happen if above passed, since JSON numbers are a subset + // of what cty can parse... + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON number", + Detail: fmt.Sprintf("There is a syntax error in the given JSON number."), + Subject: &tok.Range, + }, + } + } + + return &numberVal{ + Value: nv.AsBigFloat(), + SrcRange: tok.Range, + }, nil +} + +func parseString(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + var str string + err := json.Unmarshal(tok.Bytes, &str) + + if err != nil { + var errRange hcl.Range + if serr, ok := err.(*json.SyntaxError); ok { + errOfs := serr.Offset + errPos := tok.Range.Start + errPos.Byte += int(errOfs) + + // TODO: Use the byte offset to properly count unicode + // characters for the column, and mark the whole of the + // character that was wrong as part of our range. + errPos.Column += int(errOfs) + + errEndPos := errPos + errEndPos.Byte++ + errEndPos.Column++ + + errRange = hcl.Range{ + Filename: tok.Range.Filename, + Start: errPos, + End: errEndPos, + } + } else { + errRange = tok.Range + } + + var contextRange *hcl.Range + if errRange != tok.Range { + contextRange = &tok.Range + } + + // FIXME: Eventually we should parse strings directly here so + // we can produce a more useful error message in the face fo things + // such as invalid escapes, etc. + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON string", + Detail: fmt.Sprintf("There is a syntax error in the given JSON string."), + Subject: &errRange, + Context: contextRange, + }, + } + } + + return &stringVal{ + Value: str, + SrcRange: tok.Range, + }, nil +} + +func parseKeyword(p *peeker) (node, hcl.Diagnostics) { + tok := p.Read() + s := string(tok.Bytes) + + switch s { + case "true": + return &booleanVal{ + Value: true, + SrcRange: tok.Range, + }, nil + case "false": + return &booleanVal{ + Value: false, + SrcRange: tok.Range, + }, nil + case "null": + return &nullVal{ + SrcRange: tok.Range, + }, nil + case "undefined", "NaN", "Infinity": + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("The JavaScript identifier %q cannot be used in JSON.", s), + Subject: &tok.Range, + }, + } + default: + var dym string + if suggest := keywordSuggestion(s); suggest != "" { + dym = fmt.Sprintf(" Did you mean %q?", suggest) + } + + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Invalid JSON keyword", + Detail: fmt.Sprintf("%q is not a valid JSON keyword.%s", s, dym), + Subject: &tok.Range, + }, + } + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go b/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go new file mode 100644 index 00000000..fc7bbf58 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/peeker.go @@ -0,0 +1,25 @@ +package json + +type peeker struct { + tokens []token + pos int +} + +func newPeeker(tokens []token) *peeker { + return &peeker{ + tokens: tokens, + pos: 0, + } +} + +func (p *peeker) Peek() token { + return p.tokens[p.pos] +} + +func (p *peeker) Read() token { + ret := p.tokens[p.pos] + if ret.Type != tokenEOF { + p.pos++ + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/public.go b/vendor/github.com/hashicorp/hcl2/hcl/json/public.go new file mode 100644 index 00000000..2728aa13 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/public.go @@ -0,0 +1,94 @@ +package json + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/hcl2/hcl" +) + +// Parse attempts to parse the given buffer as JSON and, if successful, returns +// a hcl.File for the HCL configuration represented by it. +// +// This is not a generic JSON parser. Instead, it deals only with the profile +// of JSON used to express HCL configuration. +// +// The returned file is valid only if the returned diagnostics returns false +// from its HasErrors method. If HasErrors returns true, the file represents +// the subset of data that was able to be parsed, which may be none. +func Parse(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + rootNode, diags := parseFileContent(src, filename) + + switch rootNode.(type) { + case *objectVal, *arrayVal: + // okay + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Root value must be object", + Detail: "The root value in a JSON-based configuration must be either a JSON object or a JSON array of objects.", + Subject: rootNode.StartRange().Ptr(), + }) + + // Since we've already produced an error message for this being + // invalid, we'll return an empty placeholder here so that trying to + // extract content from our root body won't produce a redundant + // error saying the same thing again in more general terms. + fakePos := hcl.Pos{ + Byte: 0, + Line: 1, + Column: 1, + } + fakeRange := hcl.Range{ + Filename: filename, + Start: fakePos, + End: fakePos, + } + rootNode = &objectVal{ + Attrs: []*objectAttr{}, + SrcRange: fakeRange, + OpenRange: fakeRange, + } + } + + file := &hcl.File{ + Body: &body{ + val: rootNode, + }, + Bytes: src, + Nav: navigation{rootNode}, + } + return file, diags +} + +// ParseFile is a convenience wrapper around Parse that first attempts to load +// data from the given filename, passing the result to Parse if successful. +// +// If the file cannot be read, an error diagnostic with nil context is returned. +func ParseFile(filename string) (*hcl.File, hcl.Diagnostics) { + f, err := os.Open(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to open file", + Detail: fmt.Sprintf("The file %q could not be opened.", filename), + }, + } + } + defer f.Close() + + src, err := ioutil.ReadAll(f) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q was opened, but an error occured while reading it.", filename), + }, + } + } + + return Parse(src, filename) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go new file mode 100644 index 00000000..da728842 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go @@ -0,0 +1,297 @@ +package json + +import ( + "fmt" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" +) + +//go:generate stringer -type tokenType scanner.go +type tokenType rune + +const ( + tokenBraceO tokenType = '{' + tokenBraceC tokenType = '}' + tokenBrackO tokenType = '[' + tokenBrackC tokenType = ']' + tokenComma tokenType = ',' + tokenColon tokenType = ':' + tokenKeyword tokenType = 'K' + tokenString tokenType = 'S' + tokenNumber tokenType = 'N' + tokenEOF tokenType = '␄' + tokenInvalid tokenType = 0 + tokenEquals tokenType = '=' // used only for reminding the user of JSON syntax +) + +type token struct { + Type tokenType + Bytes []byte + Range hcl.Range +} + +// scan returns the primary tokens for the given JSON buffer in sequence. +// +// The responsibility of this pass is to just mark the slices of the buffer +// as being of various types. It is lax in how it interprets the multi-byte +// token types keyword, string and number, preferring to capture erroneous +// extra bytes that we presume the user intended to be part of the token +// so that we can generate more helpful diagnostics in the parser. +func scan(buf []byte, start pos) []token { + var tokens []token + p := start + for { + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + buf, p = skipWhitespace(buf, p) + + if len(buf) == 0 { + tokens = append(tokens, token{ + Type: tokenEOF, + Bytes: nil, + Range: posRange(p, p), + }) + return tokens + } + + start = p + + first := buf[0] + switch { + case first == '{' || first == '}' || first == '[' || first == ']' || first == ',' || first == ':' || first == '=': + p.Pos.Column++ + p.Pos.Byte++ + tokens = append(tokens, token{ + Type: tokenType(first), + Bytes: buf[0:1], + Range: posRange(start, p), + }) + buf = buf[1:] + case first == '"': + var tokBuf []byte + tokBuf, buf, p = scanString(buf, p) + tokens = append(tokens, token{ + Type: tokenString, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartNumber(first): + var tokBuf []byte + tokBuf, buf, p = scanNumber(buf, p) + tokens = append(tokens, token{ + Type: tokenNumber, + Bytes: tokBuf, + Range: posRange(start, p), + }) + case byteCanStartKeyword(first): + var tokBuf []byte + tokBuf, buf, p = scanKeyword(buf, p) + tokens = append(tokens, token{ + Type: tokenKeyword, + Bytes: tokBuf, + Range: posRange(start, p), + }) + default: + tokens = append(tokens, token{ + Type: tokenInvalid, + Bytes: buf[:1], + Range: start.Range(1, 1), + }) + // If we've encountered an invalid then we might as well stop + // scanning since the parser won't proceed beyond this point. + return tokens + } + } +} + +func byteCanStartNumber(b byte) bool { + switch b { + // We are slightly more tolerant than JSON requires here since we + // expect the parser will make a stricter interpretation of the + // number bytes, but we specifically don't allow 'e' or 'E' here + // since we want the scanner to treat that as the start of an + // invalid keyword instead, to produce more intelligible error messages. + case '-', '+', '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return true + default: + return false + } +} + +func scanNumber(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't check that the sequence of digit-ish bytes is + // in a valid order. The parser must do this when decoding a number + // token. + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case '-', '+', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func byteCanStartKeyword(b byte) bool { + switch { + // We allow any sequence of alphabetical characters here, even though + // JSON is more constrained, so that we can collect what we presume + // the user intended to be a single keyword and then check its validity + // in the parser, where we can generate better diagnostics. + // So e.g. we want to be able to say: + // unrecognized keyword "True". Did you mean "true"? + case isAlphabetical(b): + return true + default: + return false + } +} + +func scanKeyword(buf []byte, start pos) ([]byte, []byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + b := buf[i] + switch { + case isAlphabetical(b) || b == '_': + p.Pos.Byte++ + p.Pos.Column++ + default: + break Byte + } + } + return buf[:i], buf[i:], p +} + +func scanString(buf []byte, start pos) ([]byte, []byte, pos) { + // The scanner doesn't validate correct use of escapes, etc. It pays + // attention to escapes only for the purpose of identifying the closing + // quote character. It's the parser's responsibility to do proper + // validation. + // + // The scanner also doesn't specifically detect unterminated string + // literals, though they can be identified in the parser by checking if + // the final byte in a string token is the double-quote character. + + // Skip the opening quote symbol + i := 1 + p := start + p.Pos.Byte++ + p.Pos.Column++ + escaping := false +Byte: + for i < len(buf) { + b := buf[i] + + switch { + case b == '\\': + escaping = !escaping + p.Pos.Byte++ + p.Pos.Column++ + i++ + case b == '"': + p.Pos.Byte++ + p.Pos.Column++ + i++ + if !escaping { + break Byte + } + escaping = false + case b < 32: + break Byte + default: + // Advance by one grapheme cluster, so that we consider each + // grapheme to be a "column". + // Ignoring error because this scanner cannot produce errors. + advance, _, _ := textseg.ScanGraphemeClusters(buf[i:], true) + + p.Pos.Byte += advance + p.Pos.Column++ + i += advance + + escaping = false + } + } + return buf[:i], buf[i:], p +} + +func skipWhitespace(buf []byte, start pos) ([]byte, pos) { + var i int + p := start +Byte: + for i = 0; i < len(buf); i++ { + switch buf[i] { + case ' ': + p.Pos.Byte++ + p.Pos.Column++ + case '\n': + p.Pos.Byte++ + p.Pos.Column = 1 + p.Pos.Line++ + case '\r': + // For the purpose of line/column counting we consider a + // carriage return to take up no space, assuming that it will + // be paired up with a newline (on Windows, for example) that + // will account for both of them. + p.Pos.Byte++ + case '\t': + // We arbitrarily count a tab as if it were two spaces, because + // we need to choose _some_ number here. This means any system + // that renders code on-screen with markers must itself treat + // tabs as a pair of spaces for rendering purposes, or instead + // use the byte offset and back into its own column position. + p.Pos.Byte++ + p.Pos.Column += 2 + default: + break Byte + } + } + return buf[i:], p +} + +type pos struct { + Filename string + Pos hcl.Pos +} + +func (p *pos) Range(byteLen, charLen int) hcl.Range { + start := p.Pos + end := p.Pos + end.Byte += byteLen + end.Column += charLen + return hcl.Range{ + Filename: p.Filename, + Start: start, + End: end, + } +} + +func posRange(start, end pos) hcl.Range { + return hcl.Range{ + Filename: start.Filename, + Start: start.Pos, + End: end.Pos, + } +} + +func (t token) GoString() string { + return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) +} + +func isAlphabetical(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md new file mode 100644 index 00000000..dac5729d --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md @@ -0,0 +1,405 @@ +# HCL JSON Syntax Specification + +This is the specification for the JSON serialization for hcl. HCL is a system +for defining configuration languages for applications. The HCL information +model is designed to support multiple concrete syntaxes for configuration, +and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) +by being easy to machine-generate, whereas the native syntax is oriented +towards human authoring and maintenance + +This syntax is defined in terms of JSON as defined in +[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON +grammar as-is, and merely defines a specific methodology for interpreting +JSON constructs into HCL structural elements and expressions. + +This mapping is defined such that valid JSON-serialized HCL input can be +_produced_ using standard JSON implementations in various programming languages. +_Parsing_ such JSON has some additional constraints not beyond what is normally +supported by JSON parsers, so a specialized parser may be required that +is able to: + +- Preserve the relative ordering of properties defined in an object. +- Preserve multiple definitions of the same property name. +- Preserve numeric values to the precision required by the number type + in [the HCL syntax-agnostic information model](../spec.md). +- Retain source location information for parsed tokens/constructs in order + to produce good error messages. + +## Structural Elements + +[The HCL syntax-agnostic information model](../spec.md) defines a _body_ as an +abstract container for attribute definitions and child blocks. A body is +represented in JSON as either a single JSON object or a JSON array of objects. + +Body processing is in terms of JSON object properties, visited in the order +they appear in the input. Where a body is represented by a single JSON object, +the properties of that object are visited in order. Where a body is +represented by a JSON array, each of its elements are visited in order and +each element has its properties visited in order. If any element of the array +is not a JSON object then the input is erroneous. + +When a body is being processed in the _dynamic attributes_ mode, the allowance +of a JSON array in the previous paragraph does not apply and instead a single +JSON object is always required. + +As defined in the language-agnostic model, body processing is in terms +of a schema which provides context for interpreting the body's content. For +JSON bodies, the schema is crucial to allow differentiation of attribute +definitions and block definitions, both of which are represented via object +properties. + +The special property name `"//"`, when used in an object representing a HCL +body, is parsed and ignored. A property with this name can be used to +include human-readable comments. (This special property name is _not_ +processed in this way for any _other_ HCL constructs that are represented as +JSON objects.) + +### Attributes + +Where the given schema describes an attribute with a given name, the object +property with the matching name — if present — serves as the attribute's +definition. + +When a body is being processed in the _dynamic attributes_ mode, each object +property serves as an attribute definition for the attribute whose name +matches the property name. + +The value of an attribute definition property is interpreted as an _expression_, +as described in a later section. + +Given a schema that calls for an attribute named "foo", a JSON object like +the following provides a definition for that attribute: + +```json +{ + "foo": "bar baz" +} +``` + +### Blocks + +Where the given schema describes a block with a given type name, each object +property with the matching name serves as a definition of zero or more blocks +of that type. + +Processing of child blocks is in terms of nested JSON objects and arrays. +If the schema defines one or more _labels_ for the block type, a nested JSON +object or JSON array of objects is required for each labelling level. These +are flattened to a single ordered sequence of object properties using the +same algorithm as for body content as defined above. Each object property +serves as a label value at the corresponding level. + +After any labelling levels, the next nested value is either a JSON object +representing a single block body, or a JSON array of JSON objects that each +represent a single block body. Use of an array accommodates the definition +of multiple blocks that have identical type and labels. + +Given a schema that calls for a block type named "foo" with no labels, the +following JSON objects are all valid definitions of zero or more blocks of this +type: + +```json +{ + "foo": { + "child_attr": "baz" + } +} +``` + +```json +{ + "foo": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] +} +``` + +```json +{ + "foo": [] +} +``` + +The first of these defines a single child block of type "foo". The second +defines _two_ such blocks. The final example shows a degenerate definition +of zero blocks, though generators should prefer to omit the property entirely +in this scenario. + +Given a schema that calls for a block type named "foo" with _two_ labels, the +extra label levels must be represented as objects or arrays of objects as in +the following examples: + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": { + "child_attr": "baz" + } + } + } +} +``` + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "boz": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +```json +{ + "foo": [ + { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + } + }, + { + "bar": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } + ] +} +``` + +```json +{ + "foo": { + "bar": { + "baz": { + "child_attr": "baz" + }, + "boz": { + "child_attr": "baz" + } + }, + "bar": { + "baz": [ + { + "child_attr": "baz" + }, + { + "child_attr": "boz" + } + ] + } + } +} +``` + +Arrays can be introduced at either the label definition or block body +definition levels to define multiple definitions of the same block type +or labels while preserving order. + +A JSON HCL parser _must_ support duplicate definitions of the same property +name within a single object, preserving all of them and the relative ordering +between them. The array-based forms are also required so that JSON HCL +configurations can be produced with JSON producing libraries that are not +able to preserve property definition order and multiple definitions of +the same property. + +## Expressions + +JSON lacks a native expression syntax, so the HCL JSON syntax instead defines +a mapping for each of the JSON value types, including a special mapping for +strings that allows optional use of arbitrary expressions. + +### Objects + +When interpreted as an expression, a JSON object represents a value of a HCL +object type. + +Each property of the JSON object represents an attribute of the HCL object type. +The property name string given in the JSON input is interpreted as a string +expression as described below, and its result is converted to string as defined +by the syntax-agnostic information model. If such a conversion is not possible, +an error is produced and evaluation fails. + +An instance of the constructed object type is then created, whose values +are interpreted by again recursively applying the mapping rules defined in +this section to each of the property values. + +If any evaluated property name strings produce null values, an error is +produced and evaluation fails. If any produce _unknown_ values, the _entire +object's_ result is an unknown value of the dynamic pseudo-type, signalling +that the type of the object cannot be determined. + +It is an error to define the same property name multiple times within a single +JSON object interpreted as an expression. In full expression mode, this +constraint applies to the name expression results after conversion to string, +rather than the raw string that may contain interpolation expressions. + +### Arrays + +When interpreted as an expression, a JSON array represents a value of a HCL +tuple type. + +Each element of the JSON array represents an element of the HCL tuple type. +The tuple type is constructed by enumerating the JSON array elements, creating +for each an element whose type is the result of recursively applying the +expression mapping rules. Correspondence is preserved between the array element +indices and the tuple element indices. + +An instance of the constructed tuple type is then created, whose values are +interpreted by again recursively applying the mapping rules defined in this +section. + +### Numbers + +When interpreted as an expression, a JSON number represents a HCL number value. + +HCL numbers are arbitrary-precision decimal values, so a JSON HCL parser must +be able to translate exactly the value given to a number of corresponding +precision, within the constraints set by the HCL syntax-agnostic information +model. + +In practice, off-the-shelf JSON serializers often do not support customizing the +processing of numbers, and instead force processing as 32-bit or 64-bit +floating point values. + +A _producer_ of JSON HCL that uses such a serializer can provide numeric values +as JSON strings where they have precision too great for representation in the +serializer's chosen numeric type in situations where the result will be +converted to number (using the standard conversion rules) by a calling +application. + +Alternatively, for expressions that are evaluated in full expression mode an +embedded template interpolation can be used to faithfully represent a number, +such as `"${1e150}"`, which will then be evaluated by the underlying HCL native +syntax expression evaluator. + +### Boolean Values + +The JSON boolean values `true` and `false`, when interpreted as expressions, +represent the corresponding HCL boolean values. + +### The Null Value + +The JSON value `null`, when interpreted as an expression, represents a +HCL null value of the dynamic pseudo-type. + +### Strings + +When interpreted as an expression, a JSON string may be interpreted in one of +two ways depending on the evaluation mode. + +If evaluating in literal-only mode (as defined by the syntax-agnostic +information model) the literal string is intepreted directly as a HCL string +value, by directly using the exact sequence of unicode characters represented. +Template interpolations and directives MUST NOT be processed in this mode, +allowing any characters that appear as introduction sequences to pass through +literally: + +```json +"Hello world! Template sequences like ${ are not intepreted here." +``` + +When evaluating in full expression mode (again, as defined by the syntax- +agnostic information model) the literal string is instead interpreted as a +_standalone template_ in the HCL Native Syntax. The expression evaluation +result is then the direct result of evaluating that template with the current +variable scope and function table. + +```json +"Hello, ${name}! Template sequences are interpreted in full expression mode." +``` + +In particular the _Template Interpolation Unwrapping_ requirement from the +HCL native syntax specification must be implemented, allowing the use of +single-interpolation templates to represent expressions that would not +otherwise be representable in JSON, such as the following example where +the result must be a number, rather than a string representation of a number: + +```json +"${ a + b }" +``` + +## Static Analysis + +The HCL static analysis operations are implemented for JSON values that +represent expressions, as described in the following sections. + +Due to the limited expressive power of the JSON syntax alone, use of these +static analyses functions rather than normal expression evaluation is used +as additional context for how a JSON value is to be interpreted, which means +that static analyses can result in a different interpretation of a given +expression than normal evaluation. + +### Static List + +An expression interpreted as a static list must be a JSON array. Each of the +values in the array is interpreted as an expression and returned. + +### Static Map + +An expression interpreted as a static map must be a JSON object. Each of the +key/value pairs in the object is presented as a pair of expressions. Since +object property names are always strings, evaluating the key expression with +a non-`nil` evaluation context will evaluate any template sequences given +in the property name. + +### Static Call + +An expression interpreted as a static call must be a string. The content of +the string is interpreted as a native syntax expression (not a _template_, +unlike normal evaluation) and then the static call analysis is delegated to +that expression. + +If the original expression is not a string or its contents cannot be parsed +as a native syntax expression then static call analysis is not supported. + +### Static Traversal + +An expression interpreted as a static traversal must be a string. The content +of the string is interpreted as a native syntax expression (not a _template_, +unlike normal evaluation) and then static traversal analysis is delegated +to that expression. + +If the original expression is not a string or its contents cannot be parsed +as a native syntax expression then static call analysis is not supported. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go new file mode 100644 index 00000000..74847c79 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go @@ -0,0 +1,637 @@ +package json + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// body is the implementation of "Body" used for files processed with the JSON +// parser. +type body struct { + val node + + // If non-nil, the keys of this map cause the corresponding attributes to + // be treated as non-existing. This is used when Body.PartialContent is + // called, to produce the "remaining content" Body. + hiddenAttrs map[string]struct{} +} + +// expression is the implementation of "Expression" used for files processed +// with the JSON parser. +type expression struct { + src node +} + +func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, newBody, diags := b.PartialContent(schema) + + hiddenAttrs := newBody.(*body).hiddenAttrs + + var nameSuggestions []string + for _, attrS := range schema.Attributes { + if _, ok := hiddenAttrs[attrS.Name]; !ok { + // Only suggest an attribute name if we didn't use it already. + nameSuggestions = append(nameSuggestions, attrS.Name) + } + } + for _, blockS := range schema.Blocks { + // Blocks can appear multiple times, so we'll suggest their type + // names regardless of whether they've already been used. + nameSuggestions = append(nameSuggestions, blockS.Type) + } + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + for _, attr := range jsonAttrs { + k := attr.Name + if k == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, ok := hiddenAttrs[k]; !ok { + suggestion := nameSuggestion(k, nameSuggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Extraneous JSON object property", + Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion), + Subject: &attr.NameRange, + Context: attr.Range().Ptr(), + }) + } + } + + return content, diags +} + +func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + + jsonAttrs, attrDiags := b.collectDeepAttrs(b.val, nil) + diags = append(diags, attrDiags...) + + usedNames := map[string]struct{}{} + if b.hiddenAttrs != nil { + for k := range b.hiddenAttrs { + usedNames[k] = struct{}{} + } + } + + content := &hcl.BodyContent{ + Attributes: map[string]*hcl.Attribute{}, + Blocks: nil, + + MissingItemRange: b.MissingItemRange(), + } + + // Create some more convenient data structures for our work below. + attrSchemas := map[string]hcl.AttributeSchema{} + blockSchemas := map[string]hcl.BlockHeaderSchema{} + for _, attrS := range schema.Attributes { + attrSchemas[attrS.Name] = attrS + } + for _, blockS := range schema.Blocks { + blockSchemas[blockS.Type] = blockS + } + + for _, jsonAttr := range jsonAttrs { + attrName := jsonAttr.Name + if _, used := b.hiddenAttrs[attrName]; used { + continue + } + + if attrS, defined := attrSchemas[attrName]; defined { + if existing, exists := content.Attributes[attrName]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range), + Subject: &jsonAttr.NameRange, + Context: jsonAttr.Range().Ptr(), + }) + continue + } + + content.Attributes[attrS.Name] = &hcl.Attribute{ + Name: attrS.Name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + usedNames[attrName] = struct{}{} + + } else if blockS, defined := blockSchemas[attrName]; defined { + bv := jsonAttr.Value + blockDiags := b.unpackBlock(bv, blockS.Type, &jsonAttr.NameRange, blockS.LabelNames, nil, nil, &content.Blocks) + diags = append(diags, blockDiags...) + usedNames[attrName] = struct{}{} + } + + // We ignore anything that isn't defined because that's the + // PartialContent contract. The Content method will catch leftovers. + } + + // Make sure we got all the required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + if _, defined := content.Attributes[attrS.Name]; !defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name), + Subject: b.MissingItemRange().Ptr(), + }) + } + } + + unusedBody := &body{ + val: b.val, + hiddenAttrs: usedNames, + } + + return content, unusedBody, diags +} + +// JustAttributes for JSON bodies interprets all properties of the wrapped +// JSON object as attributes and returns them. +func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + attrs := make(map[string]*hcl.Attribute) + + obj, ok := b.val.(*objectVal) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, setting the arguments for this block.", + Subject: b.val.StartRange().Ptr(), + }) + return attrs, diags + } + + for _, jsonAttr := range obj.Attrs { + name := jsonAttr.Name + if name == "//" { + // Ignore "//" keys in objects representing bodies, to allow + // their use as comments. + continue + } + + if _, hidden := b.hiddenAttrs[name]; hidden { + continue + } + + if existing, exists := attrs[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate attribute definition", + Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range), + Subject: &jsonAttr.NameRange, + }) + continue + } + + attrs[name] = &hcl.Attribute{ + Name: name, + Expr: &expression{src: jsonAttr.Value}, + Range: hcl.RangeBetween(jsonAttr.NameRange, jsonAttr.Value.Range()), + NameRange: jsonAttr.NameRange, + } + } + + // No diagnostics possible here, since the parser already took care of + // finding duplicates and every JSON value can be a valid attribute value. + return attrs, diags +} + +func (b *body) MissingItemRange() hcl.Range { + switch tv := b.val.(type) { + case *objectVal: + return tv.CloseRange + case *arrayVal: + return tv.OpenRange + default: + // Should not happen in correct operation, but might show up if the + // input is invalid and we are producing partial results. + return tv.StartRange() + } +} + +func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labelsLeft []string, labelsUsed []string, labelRanges []hcl.Range, blocks *hcl.Blocks) (diags hcl.Diagnostics) { + if len(labelsLeft) > 0 { + labelName := labelsLeft[0] + jsonAttrs, attrDiags := b.collectDeepAttrs(v, &labelName) + diags = append(diags, attrDiags...) + + if len(jsonAttrs) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing block label", + Detail: fmt.Sprintf("At least one object property is required, whose name represents the %s block's %s.", typeName, labelName), + Subject: v.StartRange().Ptr(), + }) + return + } + labelsUsed := append(labelsUsed, "") + labelRanges := append(labelRanges, hcl.Range{}) + for _, p := range jsonAttrs { + pk := p.Name + labelsUsed[len(labelsUsed)-1] = pk + labelRanges[len(labelRanges)-1] = p.NameRange + diags = append(diags, b.unpackBlock(p.Value, typeName, typeRange, labelsLeft[1:], labelsUsed, labelRanges, blocks)...) + } + return + } + + // By the time we get here, we've peeled off all the labels and we're ready + // to deal with the block's actual content. + + // need to copy the label slices because their underlying arrays will + // continue to be mutated after we return. + labels := make([]string, len(labelsUsed)) + copy(labels, labelsUsed) + labelR := make([]hcl.Range, len(labelRanges)) + copy(labelR, labelRanges) + + switch tv := v.(type) { + case *nullVal: + // There is no block content, e.g the value is null. + return + case *objectVal: + // Single instance of the block + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: tv, + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + case *arrayVal: + // Multiple instances of the block + for _, av := range tv.Values { + *blocks = append(*blocks, &hcl.Block{ + Type: typeName, + Labels: labels, + Body: &body{ + val: av, // might be mistyped; we'll find out when content is requested for this body + }, + + DefRange: tv.OpenRange, + TypeRange: *typeRange, + LabelRanges: labelR, + }) + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or a JSON array is required, representing the contents of one or more %q blocks.", typeName), + Subject: v.StartRange().Ptr(), + }) + } + return +} + +// collectDeepAttrs takes either a single object or an array of objects and +// flattens it into a list of object attributes, collecting attributes from +// all of the objects in a given array. +// +// Ordering is preserved, so a list of objects that each have one property +// will result in those properties being returned in the same order as the +// objects appeared in the array. +// +// This is appropriate for use only for objects representing bodies or labels +// within a block. +// +// The labelName argument, if non-null, is used to tailor returned error +// messages to refer to block labels rather than attributes and child blocks. +// It has no other effect. +func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.Diagnostics) { + var diags hcl.Diagnostics + var attrs []*objectAttr + + switch tv := v.(type) { + case *nullVal: + // If a value is null, then we don't return any attributes or return an error. + + case *objectVal: + attrs = append(attrs, tv.Attrs...) + + case *arrayVal: + for _, ev := range tv.Values { + switch tev := ev.(type) { + case *objectVal: + attrs = append(attrs, tev.Attrs...) + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("A JSON object is required here, to specify %s labels for this block.", *labelName), + Subject: ev.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "A JSON object is required here, to define arguments and child blocks.", + Subject: ev.StartRange().Ptr(), + }) + } + } + } + + default: + if labelName != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: fmt.Sprintf("Either a JSON object or JSON array of objects is required here, to specify %s labels for this block.", *labelName), + Subject: v.StartRange().Ptr(), + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect JSON value type", + Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.", + Subject: v.StartRange().Ptr(), + }) + } + } + + return attrs, diags +} + +func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + switch v := e.src.(type) { + case *stringVal: + if ctx != nil { + // Parse string contents as a HCL native language expression. + // We only do this if we have a context, so passing a nil context + // is how the caller specifies that interpolations are not allowed + // and that the string should just be returned verbatim. + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, evalDiags := expr.Value(ctx) + diags = append(diags, evalDiags...) + return val, diags + } + + return cty.StringVal(v.Value), nil + case *numberVal: + return cty.NumberVal(v.Value), nil + case *booleanVal: + return cty.BoolVal(v.Value), nil + case *arrayVal: + var diags hcl.Diagnostics + vals := []cty.Value{} + for _, jsonVal := range v.Values { + val, valDiags := (&expression{src: jsonVal}).Value(ctx) + vals = append(vals, val) + diags = append(diags, valDiags...) + } + return cty.TupleVal(vals), diags + case *objectVal: + var diags hcl.Diagnostics + attrs := map[string]cty.Value{} + attrRanges := map[string]hcl.Range{} + known := true + for _, jsonAttr := range v.Attrs { + // In this one context we allow keys to contain interpolation + // expressions too, assuming we're evaluating in interpolation + // mode. This achieves parity with the native syntax where + // object expressions can have dynamic keys, while block contents + // may not. + name, nameDiags := (&expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}).Value(ctx) + valExpr := &expression{src: jsonAttr.Value} + val, valDiags := valExpr.Value(ctx) + diags = append(diags, nameDiags...) + diags = append(diags, valDiags...) + + var err error + name, err = convert.Convert(name, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), + Subject: &jsonAttr.NameRange, + Expression: valExpr, + EvalContext: ctx, + }) + continue + } + if name.IsNull() { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid object key expression", + Detail: "Cannot use null value as an object key.", + Subject: &jsonAttr.NameRange, + Expression: valExpr, + EvalContext: ctx, + }) + continue + } + if !name.IsKnown() { + // This is a bit of a weird case, since our usual rules require + // us to tolerate unknowns and just represent the result as + // best we can but if we don't know the key then we can't + // know the type of our object at all, and thus we must turn + // the whole thing into cty.DynamicVal. This is consistent with + // how this situation is handled in the native syntax. + // We'll keep iterating so we can collect other errors in + // subsequent attributes. + known = false + continue + } + nameStr := name.AsString() + if _, defined := attrs[nameStr]; defined { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate object attribute", + Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), + Subject: &jsonAttr.NameRange, + Expression: e, + EvalContext: ctx, + }) + continue + } + attrs[nameStr] = val + attrRanges[nameStr] = jsonAttr.NameRange + } + if !known { + // We encountered an unknown key somewhere along the way, so + // we can't know what our type will eventually be. + return cty.DynamicVal, diags + } + return cty.ObjectVal(attrs), diags + case *nullVal: + return cty.NullVal(cty.DynamicPseudoType), nil + default: + // Default to DynamicVal so that ASTs containing invalid nodes can + // still be partially-evaluated. + return cty.DynamicVal, nil + } +} + +func (e *expression) Variables() []hcl.Traversal { + var vars []hcl.Traversal + + switch v := e.src.(type) { + case *stringVal: + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the hclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return vars + } + return expr.Variables() + + case *arrayVal: + for _, jsonVal := range v.Values { + vars = append(vars, (&expression{src: jsonVal}).Variables()...) + } + case *objectVal: + for _, jsonAttr := range v.Attrs { + keyExpr := &stringVal{ // we're going to treat key as an expression in this context + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + } + vars = append(vars, (&expression{src: keyExpr}).Variables()...) + vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) + } + } + + return vars +} + +func (e *expression) Range() hcl.Range { + return e.src.Range() +} + +func (e *expression) StartRange() hcl.Range { + return e.src.StartRange() +} + +// Implementation for hcl.AbsTraversalForExpr. +func (e *expression) AsTraversal() hcl.Traversal { + // In JSON-based syntax a traversal is given as a string containing + // traversal syntax as defined by hclsyntax.ParseTraversalAbs. + + switch v := e.src.(type) { + case *stringVal: + traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + return traversal + default: + return nil + } +} + +// Implementation for hcl.ExprCall. +func (e *expression) ExprCall() *hcl.StaticCall { + // In JSON-based syntax a static call is given as a string containing + // an expression in the native syntax that also supports ExprCall. + + switch v := e.src.(type) { + case *stringVal: + expr, diags := hclsyntax.ParseExpression([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + + call, diags := hcl.ExprCall(expr) + if diags.HasErrors() { + return nil + } + + return call + default: + return nil + } +} + +// Implementation for hcl.ExprList. +func (e *expression) ExprList() []hcl.Expression { + switch v := e.src.(type) { + case *arrayVal: + ret := make([]hcl.Expression, len(v.Values)) + for i, node := range v.Values { + ret[i] = &expression{src: node} + } + return ret + default: + return nil + } +} + +// Implementation for hcl.ExprMap. +func (e *expression) ExprMap() []hcl.KeyValuePair { + switch v := e.src.(type) { + case *objectVal: + ret := make([]hcl.KeyValuePair, len(v.Attrs)) + for i, jsonAttr := range v.Attrs { + ret[i] = hcl.KeyValuePair{ + Key: &expression{src: &stringVal{ + Value: jsonAttr.Name, + SrcRange: jsonAttr.NameRange, + }}, + Value: &expression{src: jsonAttr.Value}, + } + } + return ret + default: + return nil + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go new file mode 100644 index 00000000..bbcce5b3 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/tokentype_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type tokenType scanner.go"; DO NOT EDIT. + +package json + +import "strconv" + +const _tokenType_name = "tokenInvalidtokenCommatokenColontokenEqualstokenKeywordtokenNumbertokenStringtokenBrackOtokenBrackCtokenBraceOtokenBraceCtokenEOF" + +var _tokenType_map = map[tokenType]string{ + 0: _tokenType_name[0:12], + 44: _tokenType_name[12:22], + 58: _tokenType_name[22:32], + 61: _tokenType_name[32:43], + 75: _tokenType_name[43:55], + 78: _tokenType_name[55:66], + 83: _tokenType_name[66:77], + 91: _tokenType_name[77:88], + 93: _tokenType_name[88:99], + 123: _tokenType_name[99:110], + 125: _tokenType_name[110:121], + 9220: _tokenType_name[121:129], +} + +func (i tokenType) String() string { + if str, ok := _tokenType_map[i]; ok { + return str + } + return "tokenType(" + strconv.FormatInt(int64(i), 10) + ")" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/merged.go b/vendor/github.com/hashicorp/hcl2/hcl/merged.go new file mode 100644 index 00000000..96e62a58 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/merged.go @@ -0,0 +1,226 @@ +package hcl + +import ( + "fmt" +) + +// MergeFiles combines the given files to produce a single body that contains +// configuration from all of the given files. +// +// The ordering of the given files decides the order in which contained +// elements will be returned. If any top-level attributes are defined with +// the same name across multiple files, a diagnostic will be produced from +// the Content and PartialContent methods describing this error in a +// user-friendly way. +func MergeFiles(files []*File) Body { + var bodies []Body + for _, file := range files { + bodies = append(bodies, file.Body) + } + return MergeBodies(bodies) +} + +// MergeBodies is like MergeFiles except it deals directly with bodies, rather +// than with entire files. +func MergeBodies(bodies []Body) Body { + if len(bodies) == 0 { + // Swap out for our singleton empty body, to reduce the number of + // empty slices we have hanging around. + return emptyBody + } + + // If any of the given bodies are already merged bodies, we'll unpack + // to flatten to a single mergedBodies, since that's conceptually simpler. + // This also, as a side-effect, eliminates any empty bodies, since + // empties are merged bodies with no inner bodies. + var newLen int + var flatten bool + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + newLen += len(children) + flatten = true + } else { + newLen++ + } + } + + if !flatten { // not just newLen == len, because we might have mergedBodies with single bodies inside + return mergedBodies(bodies) + } + + if newLen == 0 { + // Don't allocate a new empty when we already have one + return emptyBody + } + + new := make([]Body, 0, newLen) + for _, body := range bodies { + if children, merged := body.(mergedBodies); merged { + new = append(new, children...) + } else { + new = append(new, body) + } + } + return mergedBodies(new) +} + +var emptyBody = mergedBodies([]Body{}) + +// EmptyBody returns a body with no content. This body can be used as a +// placeholder when a body is required but no body content is available. +func EmptyBody() Body { + return emptyBody +} + +type mergedBodies []Body + +// Content returns the content produced by applying the given schema to all +// of the merged bodies and merging the result. +// +// Although required attributes _are_ supported, they should be used sparingly +// with merged bodies since in this case there is no contextual information +// with which to return good diagnostics. Applications working with merged +// bodies may wish to mark all attributes as optional and then check for +// required attributes afterwards, to produce better diagnostics. +func (mb mergedBodies) Content(schema *BodySchema) (*BodyContent, Diagnostics) { + // the returned body will always be empty in this case, because mergedContent + // will only ever call Content on the child bodies. + content, _, diags := mb.mergedContent(schema, false) + return content, diags +} + +func (mb mergedBodies) PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) { + return mb.mergedContent(schema, true) +} + +func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) { + attrs := make(map[string]*Attribute) + var diags Diagnostics + + for _, body := range mb { + thisAttrs, thisDiags := body.JustAttributes() + + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisAttrs != nil { + for name, attr := range thisAttrs { + if existing := attrs[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf( + "Argument %q was already set at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + + attrs[name] = attr + } + } + } + + return attrs, diags +} + +func (mb mergedBodies) MissingItemRange() Range { + if len(mb) == 0 { + // Nothing useful to return here, so we'll return some garbage. + return Range{ + Filename: "", + } + } + + // arbitrarily use the first body's missing item range + return mb[0].MissingItemRange() +} + +func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) { + // We need to produce a new schema with none of the attributes marked as + // required, since _any one_ of our bodies can contribute an attribute value. + // We'll separately check that all required attributes are present at + // the end. + mergedSchema := &BodySchema{ + Blocks: schema.Blocks, + } + for _, attrS := range schema.Attributes { + mergedAttrS := attrS + mergedAttrS.Required = false + mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS) + } + + var mergedLeftovers []Body + content := &BodyContent{ + Attributes: map[string]*Attribute{}, + } + + var diags Diagnostics + for _, body := range mb { + var thisContent *BodyContent + var thisLeftovers Body + var thisDiags Diagnostics + + if partial { + thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema) + } else { + thisContent, thisDiags = body.Content(mergedSchema) + } + + if thisLeftovers != nil { + mergedLeftovers = append(mergedLeftovers, thisLeftovers) + } + if len(thisDiags) != 0 { + diags = append(diags, thisDiags...) + } + + if thisContent.Attributes != nil { + for name, attr := range thisContent.Attributes { + if existing := content.Attributes[name]; existing != nil { + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Duplicate argument", + Detail: fmt.Sprintf( + "Argument %q was already set at %s", + name, existing.NameRange.String(), + ), + Subject: &attr.NameRange, + }) + continue + } + content.Attributes[name] = attr + } + } + + if len(thisContent.Blocks) != 0 { + content.Blocks = append(content.Blocks, thisContent.Blocks...) + } + } + + // Finally, we check for required attributes. + for _, attrS := range schema.Attributes { + if !attrS.Required { + continue + } + + if content.Attributes[attrS.Name] == nil { + // We don't have any context here to produce a good diagnostic, + // which is why we warn in the Content docstring to minimize the + // use of required attributes on merged bodies. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Missing required argument", + Detail: fmt.Sprintf( + "The argument %q is required, but was not set.", + attrS.Name, + ), + }) + } + } + + leftoverBody := MergeBodies(mergedLeftovers) + return content, leftoverBody, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/ops.go b/vendor/github.com/hashicorp/hcl2/hcl/ops.go new file mode 100644 index 00000000..5d2910c1 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/ops.go @@ -0,0 +1,288 @@ +package hcl + +import ( + "fmt" + "math/big" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// Index is a helper function that performs the same operation as the index +// operator in the HCL expression language. That is, the result is the +// same as it would be for collection[key] in a configuration expression. +// +// This is exported so that applications can perform indexing in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) { + if collection.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to index null value", + Detail: "This value is null, so it does not have any indices.", + Subject: srcRange, + }, + } + } + if key.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "Can't use a null value as an indexing key.", + Subject: srcRange, + }, + } + } + ty := collection.Type() + kty := key.Type() + if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + switch { + + case ty.IsListType() || ty.IsTupleType() || ty.IsMapType(): + var wantType cty.Type + switch { + case ty.IsListType() || ty.IsTupleType(): + wantType = cty.Number + case ty.IsMapType(): + wantType = cty.String + default: + // should never happen + panic("don't know what key type we want") + } + + key, keyErr := convert.Convert(key, wantType) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + + has := collection.HasIndex(key) + if !has.IsKnown() { + if ty.IsTupleType() { + return cty.DynamicVal, nil + } else { + return cty.UnknownVal(ty.ElementType()), nil + } + } + if has.False() { + // We have a more specialized error message for the situation of + // using a fractional number to index into a sequence, because + // that will tend to happen if the user is trying to use division + // to calculate an index and not realizing that HCL does float + // division rather than integer division. + if (ty.IsListType() || ty.IsTupleType()) && key.Type().Equals(cty.Number) { + if key.IsKnown() && !key.IsNull() { + bf := key.AsBigFloat() + if _, acc := bf.Int(nil); acc != big.Exact { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf("The given key does not identify an element in this collection value: indexing a sequence requires a whole number, but the given index (%g) has a fractional part.", bf), + Subject: srcRange, + }, + } + } + } + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.Index(key), nil + + case ty.IsObjectType(): + key, keyErr := convert.Convert(key, cty.String) + if keyErr != nil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: fmt.Sprintf( + "The given key does not identify an element in this collection value: %s.", + keyErr.Error(), + ), + Subject: srcRange, + }, + } + } + if !collection.IsKnown() { + return cty.DynamicVal, nil + } + if !key.IsKnown() { + return cty.DynamicVal, nil + } + + attrName := key.AsString() + + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "The given key does not identify an element in this collection value.", + Subject: srcRange, + }, + } + } + + return collection.GetAttr(attrName), nil + + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Invalid index", + Detail: "This value does not have any indices.", + Subject: srcRange, + }, + } + } + +} + +// GetAttr is a helper function that performs the same operation as the +// attribute access in the HCL expression language. That is, the result is the +// same as it would be for obj.attr in a configuration expression. +// +// This is exported so that applications can access attributes in a manner +// consistent with how the language does it, including handling of null and +// unknown values, etc. +// +// Diagnostics are produced if the given combination of values is not valid. +// Therefore a pointer to a source range must be provided to use in diagnostics, +// though nil can be provided if the calling application is going to +// ignore the subject of the returned diagnostics anyway. +func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagnostics) { + if obj.IsNull() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Attempt to get attribute from null value", + Detail: "This value is null, so it does not have any attributes.", + Subject: srcRange, + }, + } + } + + ty := obj.Type() + switch { + case ty.IsObjectType(): + if !ty.HasAttribute(attrName) { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("This object does not have an attribute named %q.", attrName), + Subject: srcRange, + }, + } + } + + if !obj.IsKnown() { + return cty.UnknownVal(ty.AttributeType(attrName)), nil + } + + return obj.GetAttr(attrName), nil + case ty.IsMapType(): + if !obj.IsKnown() { + return cty.UnknownVal(ty.ElementType()), nil + } + + idx := cty.StringVal(attrName) + if obj.HasIndex(idx).False() { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Missing map element", + Detail: fmt.Sprintf("This map does not have an element with the key %q.", attrName), + Subject: srcRange, + }, + } + } + + return obj.Index(idx), nil + case ty == cty.DynamicPseudoType: + return cty.DynamicVal, nil + default: + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unsupported attribute", + Detail: "This value does not have any attributes.", + Subject: srcRange, + }, + } + } + +} + +// ApplyPath is a helper function that applies a cty.Path to a value using the +// indexing and attribute access operations from HCL. +// +// This is similar to calling the path's own Apply method, but ApplyPath uses +// the more relaxed typing rules that apply to these operations in HCL, rather +// than cty's relatively-strict rules. ApplyPath is implemented in terms of +// Index and GetAttr, and so it has the same behavior for individual steps +// but will stop and return any errors returned by intermediate steps. +// +// Diagnostics are produced if the given path cannot be applied to the given +// value. Therefore a pointer to a source range must be provided to use in +// diagnostics, though nil can be provided if the calling application is going +// to ignore the subject of the returned diagnostics anyway. +func ApplyPath(val cty.Value, path cty.Path, srcRange *Range) (cty.Value, Diagnostics) { + var diags Diagnostics + + for _, step := range path { + var stepDiags Diagnostics + switch ts := step.(type) { + case cty.IndexStep: + val, stepDiags = Index(val, ts.Key, srcRange) + case cty.GetAttrStep: + val, stepDiags = GetAttr(val, ts.Name, srcRange) + default: + // Should never happen because the above are all of the step types. + diags = diags.Append(&Diagnostic{ + Severity: DiagError, + Summary: "Invalid path step", + Detail: fmt.Sprintf("Go type %T is not a valid path step. This is a bug in this program.", step), + Subject: srcRange, + }) + return cty.DynamicVal, diags + } + + diags = append(diags, stepDiags...) + if stepDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + + return val, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos.go b/vendor/github.com/hashicorp/hcl2/hcl/pos.go new file mode 100644 index 00000000..06db8bfb --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos.go @@ -0,0 +1,275 @@ +package hcl + +import "fmt" + +// Pos represents a single position in a source file, by addressing the +// start byte of a unicode character encoded in UTF-8. +// +// Pos is generally used only in the context of a Range, which then defines +// which source file the position is within. +type Pos struct { + // Line is the source code line where this position points. Lines are + // counted starting at 1 and incremented for each newline character + // encountered. + Line int + + // Column is the source code column where this position points, in + // unicode characters, with counting starting at 1. + // + // Column counts characters as they appear visually, so for example a + // latin letter with a combining diacritic mark counts as one character. + // This is intended for rendering visual markers against source code in + // contexts where these diacritics would be rendered in a single character + // cell. Technically speaking, Column is counting grapheme clusters as + // used in unicode normalization. + Column int + + // Byte is the byte offset into the file where the indicated character + // begins. This is a zero-based offset to the first byte of the first + // UTF-8 codepoint sequence in the character, and thus gives a position + // that can be resolved _without_ awareness of Unicode characters. + Byte int +} + +// InitialPos is a suitable position to use to mark the start of a file. +var InitialPos = Pos{Byte: 0, Line: 1, Column: 1} + +// Range represents a span of characters between two positions in a source +// file. +// +// This struct is usually used by value in types that represent AST nodes, +// but by pointer in types that refer to the positions of other objects, +// such as in diagnostics. +type Range struct { + // Filename is the name of the file into which this range's positions + // point. + Filename string + + // Start and End represent the bounds of this range. Start is inclusive + // and End is exclusive. + Start, End Pos +} + +// RangeBetween returns a new range that spans from the beginning of the +// start range to the end of the end range. +// +// The result is meaningless if the two ranges do not belong to the same +// source file or if the end range appears before the start range. +func RangeBetween(start, end Range) Range { + return Range{ + Filename: start.Filename, + Start: start.Start, + End: end.End, + } +} + +// RangeOver returns a new range that covers both of the given ranges and +// possibly additional content between them if the two ranges do not overlap. +// +// If either range is empty then it is ignored. The result is empty if both +// given ranges are empty. +// +// The result is meaningless if the two ranges to not belong to the same +// source file. +func RangeOver(a, b Range) Range { + if a.Empty() { + return b + } + if b.Empty() { + return a + } + + var start, end Pos + if a.Start.Byte < b.Start.Byte { + start = a.Start + } else { + start = b.Start + } + if a.End.Byte > b.End.Byte { + end = a.End + } else { + end = b.End + } + return Range{ + Filename: a.Filename, + Start: start, + End: end, + } +} + +// ContainsPos returns true if and only if the given position is contained within +// the receiving range. +// +// In the unlikely case that the line/column information disagree with the byte +// offset information in the given position or receiving range, the byte +// offsets are given priority. +func (r Range) ContainsPos(pos Pos) bool { + return r.ContainsOffset(pos.Byte) +} + +// ContainsOffset returns true if and only if the given byte offset is within +// the receiving Range. +func (r Range) ContainsOffset(offset int) bool { + return offset >= r.Start.Byte && offset < r.End.Byte +} + +// Ptr returns a pointer to a copy of the receiver. This is a convenience when +// ranges in places where pointers are required, such as in Diagnostic, but +// the range in question is returned from a method. Go would otherwise not +// allow one to take the address of a function call. +func (r Range) Ptr() *Range { + return &r +} + +// String returns a compact string representation of the receiver. +// Callers should generally prefer to present a range more visually, +// e.g. via markers directly on the relevant portion of source code. +func (r Range) String() string { + if r.Start.Line == r.End.Line { + return fmt.Sprintf( + "%s:%d,%d-%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Column, + ) + } else { + return fmt.Sprintf( + "%s:%d,%d-%d,%d", + r.Filename, + r.Start.Line, r.Start.Column, + r.End.Line, r.End.Column, + ) + } +} + +func (r Range) Empty() bool { + return r.Start.Byte == r.End.Byte +} + +// CanSliceBytes returns true if SliceBytes could return an accurate +// sub-slice of the given slice. +// +// This effectively tests whether the start and end offsets of the range +// are within the bounds of the slice, and thus whether SliceBytes can be +// trusted to produce an accurate start and end position within that slice. +func (r Range) CanSliceBytes(b []byte) bool { + switch { + case r.Start.Byte < 0 || r.Start.Byte > len(b): + return false + case r.End.Byte < 0 || r.End.Byte > len(b): + return false + case r.End.Byte < r.Start.Byte: + return false + default: + return true + } +} + +// SliceBytes returns a sub-slice of the given slice that is covered by the +// receiving range, assuming that the given slice is the source code of the +// file indicated by r.Filename. +// +// If the receiver refers to any byte offsets that are outside of the slice +// then the result is constrained to the overlapping portion only, to avoid +// a panic. Use CanSliceBytes to determine if the result is guaranteed to +// be an accurate span of the requested range. +func (r Range) SliceBytes(b []byte) []byte { + start := r.Start.Byte + end := r.End.Byte + if start < 0 { + start = 0 + } else if start > len(b) { + start = len(b) + } + if end < 0 { + end = 0 + } else if end > len(b) { + end = len(b) + } + if end < start { + end = start + } + return b[start:end] +} + +// Overlaps returns true if the receiver and the other given range share any +// characters in common. +func (r Range) Overlaps(other Range) bool { + switch { + case r.Filename != other.Filename: + // If the ranges are in different files then they can't possibly overlap + return false + case r.Empty() || other.Empty(): + // Empty ranges can never overlap + return false + case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte): + return true + case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte): + return true + default: + return false + } +} + +// Overlap finds a range that is either identical to or a sub-range of both +// the receiver and the other given range. It returns an empty range +// within the receiver if there is no overlap between the two ranges. +// +// A non-empty result is either identical to or a subset of the receiver. +func (r Range) Overlap(other Range) Range { + if !r.Overlaps(other) { + // Start == End indicates an empty range + return Range{ + Filename: r.Filename, + Start: r.Start, + End: r.Start, + } + } + + var start, end Pos + if r.Start.Byte > other.Start.Byte { + start = r.Start + } else { + start = other.Start + } + if r.End.Byte < other.End.Byte { + end = r.End + } else { + end = other.End + } + + return Range{ + Filename: r.Filename, + Start: start, + End: end, + } +} + +// PartitionAround finds the portion of the given range that overlaps with +// the reciever and returns three ranges: the portion of the reciever that +// precedes the overlap, the overlap itself, and then the portion of the +// reciever that comes after the overlap. +// +// If the two ranges do not overlap then all three returned ranges are empty. +// +// If the given range aligns with or extends beyond either extent of the +// reciever then the corresponding outer range will be empty. +func (r Range) PartitionAround(other Range) (before, overlap, after Range) { + overlap = r.Overlap(other) + if overlap.Empty() { + return overlap, overlap, overlap + } + + before = Range{ + Filename: r.Filename, + Start: r.Start, + End: overlap.Start, + } + after = Range{ + Filename: r.Filename, + Start: overlap.End, + End: r.End, + } + + return before, overlap, after +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go new file mode 100644 index 00000000..17c0d7c6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go @@ -0,0 +1,152 @@ +package hcl + +import ( + "bufio" + "bytes" + + "github.com/apparentlymart/go-textseg/textseg" +) + +// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc +// and visit a source range for each token matched. +// +// For example, this can be used with bufio.ScanLines to find the source range +// for each line in the file, skipping over the actual newline characters, which +// may be useful when printing source code snippets as part of diagnostic +// messages. +// +// The line and column information in the returned ranges is produced by +// counting newline characters and grapheme clusters respectively, which +// mimics the behavior we expect from a parser when producing ranges. +type RangeScanner struct { + filename string + b []byte + cb bufio.SplitFunc + + pos Pos // position of next byte to process in b + cur Range // latest range + tok []byte // slice of b that is covered by cur + err error // error from last scan, if any +} + +// NewRangeScanner creates a new RangeScanner for the given buffer, producing +// ranges for the given filename. +// +// Since ranges have grapheme-cluster granularity rather than byte granularity, +// the scanner will produce incorrect results if the given SplitFunc creates +// tokens between grapheme cluster boundaries. In particular, it is incorrect +// to use RangeScanner with bufio.ScanRunes because it will produce tokens +// around individual UTF-8 sequences, which will split any multi-sequence +// grapheme clusters. +func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner { + return NewRangeScannerFragment(b, filename, InitialPos, cb) +} + +// NewRangeScannerFragment is like NewRangeScanner but the ranges it produces +// will be offset by the given starting position, which is appropriate for +// sub-slices of a file, whereas NewRangeScanner assumes it is scanning an +// entire file. +func NewRangeScannerFragment(b []byte, filename string, start Pos, cb bufio.SplitFunc) *RangeScanner { + return &RangeScanner{ + filename: filename, + b: b, + cb: cb, + pos: start, + } +} + +func (sc *RangeScanner) Scan() bool { + if sc.pos.Byte >= len(sc.b) || sc.err != nil { + // All done + return false + } + + // Since we're operating on an in-memory buffer, we always pass the whole + // remainder of the buffer to our SplitFunc and set isEOF to let it know + // that it has the whole thing. + advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true) + + // Since we are setting isEOF to true this should never happen, but + // if it does we will just abort and assume the SplitFunc is misbehaving. + if advance == 0 && token == nil && err == nil { + return false + } + + if err != nil { + sc.err = err + sc.cur = Range{ + Filename: sc.filename, + Start: sc.pos, + End: sc.pos, + } + sc.tok = nil + return false + } + + sc.tok = token + start := sc.pos + end := sc.pos + new := sc.pos + + // adv is similar to token but it also includes any subsequent characters + // we're being asked to skip over by the SplitFunc. + // adv is a slice covering any additional bytes we are skipping over, based + // on what the SplitFunc told us to do with advance. + adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance] + + // We now need to scan over our token to count the grapheme clusters + // so we can correctly advance Column, and count the newlines so we + // can correctly advance Line. + advR := bytes.NewReader(adv) + gsc := bufio.NewScanner(advR) + advanced := 0 + gsc.Split(textseg.ScanGraphemeClusters) + for gsc.Scan() { + gr := gsc.Bytes() + new.Byte += len(gr) + new.Column++ + + // We rely here on the fact that \r\n is considered a grapheme cluster + // and so we don't need to worry about miscounting additional lines + // on files with Windows-style line endings. + if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') { + new.Column = 1 + new.Line++ + } + + if advanced < len(token) { + // If we've not yet found the end of our token then we'll + // also push our "end" marker along. + // (if advance > len(token) then we'll stop moving "end" early + // so that the caller only sees the range covered by token.) + end = new + } + advanced += len(gr) + } + + sc.cur = Range{ + Filename: sc.filename, + Start: start, + End: end, + } + sc.pos = new + return true +} + +// Range returns a range that covers the latest token obtained after a call +// to Scan returns true. +func (sc *RangeScanner) Range() Range { + return sc.cur +} + +// Bytes returns the slice of the input buffer that is covered by the range +// that would be returned by Range. +func (sc *RangeScanner) Bytes() []byte { + return sc.tok +} + +// Err can be called after Scan returns false to determine if the latest read +// resulted in an error, and obtain that error if so. +func (sc *RangeScanner) Err() error { + return sc.err +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/schema.go b/vendor/github.com/hashicorp/hcl2/hcl/schema.go new file mode 100644 index 00000000..891257ac --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/schema.go @@ -0,0 +1,21 @@ +package hcl + +// BlockHeaderSchema represents the shape of a block header, and is +// used for matching blocks within bodies. +type BlockHeaderSchema struct { + Type string + LabelNames []string +} + +// AttributeSchema represents the requirements for an attribute, and is used +// for matching attributes within bodies. +type AttributeSchema struct { + Name string + Required bool +} + +// BodySchema represents the desired shallow structure of a body. +type BodySchema struct { + Attributes []AttributeSchema + Blocks []BlockHeaderSchema +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/spec.md new file mode 100644 index 00000000..8bbaff81 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/spec.md @@ -0,0 +1,691 @@ +# HCL Syntax-Agnostic Information Model + +This is the specification for the general information model (abstract types and +semantics) for hcl. HCL is a system for defining configuration languages for +applications. The HCL information model is designed to support multiple +concrete syntaxes for configuration, each with a mapping to the model defined +in this specification. + +The two primary syntaxes intended for use in conjunction with this model are +[the HCL native syntax](./hclsyntax/spec.md) and [the JSON syntax](./json/spec.md). +In principle other syntaxes are possible as long as either their language model +is sufficiently rich to express the concepts described in this specification +or the language targets a well-defined subset of the specification. + +## Structural Elements + +The primary structural element is the _body_, which is a container representing +a set of zero or more _attributes_ and a set of zero or more _blocks_. + +A _configuration file_ is the top-level object, and will usually be produced +by reading a file from disk and parsing it as a particular syntax. A +configuration file has its own _body_, representing the top-level attributes +and blocks. + +An _attribute_ is a name and value pair associated with a body. Attribute names +are unique within a given body. Attribute values are provided as _expressions_, +which are discussed in detail in a later section. + +A _block_ is a nested structure that has a _type name_, zero or more string +_labels_ (e.g. identifiers), and a nested body. + +Together the structural elements create a hierarchical data structure, with +attributes intended to represent the direct properties of a particular object +in the calling application, and blocks intended to represent child objects +of a particular object. + +## Body Content + +To support the expression of the HCL concepts in languages whose information +model is a subset of HCL's, such as JSON, a _body_ is an opaque container +whose content can only be accessed by providing information on the expected +structure of the content. + +The specification for each syntax must describe how its physical constructs +are mapped on to body content given a schema. For syntaxes that have +first-class syntax distinguishing attributes and bodies this can be relatively +straightforward, while more detailed mapping rules may be required in syntaxes +where the representation of attributes vs. blocks is ambiguous. + +### Schema-driven Processing + +Schema-driven processing is the primary way to access body content. +A _body schema_ is a description of what is expected within a particular body, +which can then be used to extract the _body content_, which then provides +access to the specific attributes and blocks requested. + +A _body schema_ consists of a list of _attribute schemata_ and +_block header schemata_: + +- An _attribute schema_ provides the name of an attribute and whether its + presence is required. + +- A _block header schema_ provides a block type name and the semantic names + assigned to each of the labels of that block type, if any. + +Within a schema, it is an error to request the same attribute name twice or +to request a block type whose name is also an attribute name. While this can +in principle be supported in some syntaxes, in other syntaxes the attribute +and block namespaces are combined and so an an attribute cannot coexist with +a block whose type name is identical to the attribute name. + +The result of applying a body schema to a body is _body content_, which +consists of an _attribute map_ and a _block sequence_: + +- The _attribute map_ is a map data structure whose keys are attribute names + and whose values are _expressions_ that represent the corresponding attribute + values. + +- The _block sequence_ is an ordered sequence of blocks, with each specifying + a block _type name_, the sequence of _labels_ specified for the block, + and the body object (not body _content_) representing the block's own body. + +After obtaining _body content_, the calling application may continue processing +by evaluating attribute expressions and/or recursively applying further +schema-driven processing to the child block bodies. + +**Note:** The _body schema_ is intentionally minimal, to reduce the set of +mapping rules that must be defined for each syntax. Higher-level utility +libraries may be provided to assist in the construction of a schema and +perform additional processing, such as automatically evaluating attribute +expressions and assigning their result values into a data structure, or +recursively applying a schema to child blocks. Such utilities are not part of +this core specification and will vary depending on the capabilities and idiom +of the implementation language. + +### _Dynamic Attributes_ Processing + +The _schema-driven_ processing model is useful when the expected structure +of a body is known a priori by the calling application. Some blocks are +instead more free-form, such as a user-provided set of arbitrary key/value +pairs. + +The alternative _dynamic attributes_ processing mode allows for this more +ad-hoc approach. Processing in this mode behaves as if a schema had been +constructed without any _block header schemata_ and with an attribute +schema for each distinct key provided within the physical representation +of the body. + +The means by which _distinct keys_ are identified is dependent on the +physical syntax; this processing mode assumes that the syntax has a way +to enumerate keys provided by the author and identify expressions that +correspond with those keys, but does not define the means by which this is +done. + +The result of _dynamic attributes_ processing is an _attribute map_ as +defined in the previous section. No _block sequence_ is produced in this +processing mode. + +### Partial Processing of Body Content + +Under _schema-driven processing_, by default the given schema is assumed +to be exhaustive, such that any attribute or block not matched by schema +elements is considered an error. This allows feedback about unsupported +attributes and blocks (such as typos) to be provided. + +An alternative is _partial processing_, where any additional elements within +the body are not considered an error. + +Under partial processing, the result is both body content as described +above _and_ a new body that represents any body elements that remain after +the schema has been processed. + +Specifically: + +- Any attribute whose name is specified in the schema is returned in body + content and elided from the new body. + +- Any block whose type is specified in the schema is returned in body content + and elided from the new body. + +- Any attribute or block _not_ meeting the above conditions is placed into + the new body, unmodified. + +The new body can then be recursively processed using any of the body +processing models. This facility allows different subsets of body content +to be processed by different parts of the calling application. + +Processing a body in two steps — first partial processing of a source body, +then exhaustive processing of the returned body — is equivalent to single-step +processing with a schema that is the union of the schemata used +across the two steps. + +## Expressions + +Attribute values are represented by _expressions_. Depending on the concrete +syntax in use, an expression may just be a literal value or it may describe +a computation in terms of literal values, variables, and functions. + +Each syntax defines its own representation of expressions. For syntaxes based +in languages that do not have any non-literal expression syntax, it is +recommended to embed the template language from +[the native syntax](./hclsyntax/spec.md) e.g. as a post-processing step on +string literals. + +### Expression Evaluation + +In order to obtain a concrete value, each expression must be _evaluated_. +Evaluation is performed in terms of an evaluation context, which +consists of the following: + +- An _evaluation mode_, which is defined below. +- A _variable scope_, which provides a set of named variables for use in + expressions. +- A _function table_, which provides a set of named functions for use in + expressions. + +The _evaluation mode_ allows for two different interpretations of an +expression: + +- In _literal-only mode_, variables and functions are not available and it + is assumed that the calling application's intent is to treat the attribute + value as a literal. + +- In _full expression mode_, variables and functions are defined and it is + assumed that the calling application wishes to provide a full expression + language for definition of the attribute value. + +The actual behavior of these two modes depends on the syntax in use. For +languages with first-class expression syntax, these two modes may be considered +equivalent, with _literal-only mode_ simply not defining any variables or +functions. For languages that embed arbitrary expressions via string templates, +_literal-only mode_ may disable such processing, allowing literal strings to +pass through without interpretation as templates. + +Since literal-only mode does not support variables and functions, it is an +error for the calling application to enable this mode and yet provide a +variable scope and/or function table. + +## Values and Value Types + +The result of expression evaluation is a _value_. Each value has a _type_, +which is dynamically determined during evaluation. The _variable scope_ in +the evaluation context is a map from variable name to value, using the same +definition of value. + +The type system for HCL values is intended to be of a level abstraction +suitable for configuration of various applications. A well-defined, +implementation-language-agnostic type system is defined to allow for +consistent processing of configuration across many implementation languages. +Concrete implementations may provide additional functionality to lower +HCL values and types to corresponding native language types, which may then +impose additional constraints on the values outside of the scope of this +specification. + +Two values are _equal_ if and only if they have identical types and their +values are equal according to the rules of their shared type. + +### Primitive Types + +The primitive types are _string_, _bool_, and _number_. + +A _string_ is a sequence of unicode characters. Two strings are equal if +NFC normalization ([UAX#15](http://unicode.org/reports/tr15/) +of each string produces two identical sequences of characters. +NFC normalization ensures that, for example, a precomposed combination of a +latin letter and a diacritic compares equal with the letter followed by +a combining diacritic. + +The _bool_ type has only two non-null values: _true_ and _false_. Two bool +values are equal if and only if they are either both true or both false. + +A _number_ is an arbitrary-precision floating point value. An implementation +_must_ make the full-precision values available to the calling application +for interpretation into any suitable number representation. An implementation +may in practice implement numbers with limited precision so long as the +following constraints are met: + +- Integers are represented with at least 256 bits. +- Non-integer numbers are represented as floating point values with a + mantissa of at least 256 bits and a signed binary exponent of at least + 16 bits. +- An error is produced if an integer value given in source cannot be + represented precisely. +- An error is produced if a non-integer value cannot be represented due to + overflow. +- A non-integer number is rounded to the nearest possible value when a + value is of too high a precision to be represented. + +The _number_ type also requires representation of both positive and negative +infinity. A "not a number" (NaN) value is _not_ provided nor used. + +Two number values are equal if they are numerically equal to the precision +associated with the number. Positive infinity and negative infinity are +equal to themselves but not to each other. Positive infinity is greater than +any other number value, and negative infinity is less than any other number +value. + +Some syntaxes may be unable to represent numeric literals of arbitrary +precision. This must be defined in the syntax specification as part of its +description of mapping numeric literals to HCL values. + +### Structural Types + +_Structural types_ are types that are constructed by combining other types. +Each distinct combination of other types is itself a distinct type. There +are two structural type _kinds_: + +- _Object types_ are constructed of a set of named attributes, each of which + has a type. Attribute names are always strings. (_Object_ attributes are a + distinct idea from _body_ attributes, though calling applications + may choose to blur the distinction by use of common naming schemes.) +- _Tuple types_ are constructed of a sequence of elements, each of which + has a type. + +Values of structural types are compared for equality in terms of their +attributes or elements. A structural type value is equal to another if and +only if all of the corresponding attributes or elements are equal. + +Two structural types are identical if they are of the same kind and +have attributes or elements with identical types. + +### Collection Types + +_Collection types_ are types that combine together an arbitrary number of +values of some other single type. There are three collection type _kinds_: + +- _List types_ represent ordered sequences of values of their element type. +- _Map types_ represent values of their element type accessed via string keys. +- _Set types_ represent unordered sets of distinct values of their element type. + +For each of these kinds and each distinct element type there is a distinct +collection type. For example, "list of string" is a distinct type from +"set of string", and "list of number" is a distinct type from "list of string". + +Values of collection types are compared for equality in terms of their +elements. A collection type value is equal to another if and only if both +have the same number of elements and their corresponding elements are equal. + +Two collection types are identical if they are of the same kind and have +the same element type. + +### Null values + +Each type has a null value. The null value of a type represents the absence +of a value, but with type information retained to allow for type checking. + +Null values are used primarily to represent the conditional absence of a +body attribute. In a syntax with a conditional operator, one of the result +values of that conditional may be null to indicate that the attribute should be +considered not present in that case. + +Calling applications _should_ consider an attribute with a null value as +equivalent to the value not being present at all. + +A null value of a particular type is equal to itself. + +### Unknown Values and the Dynamic Pseudo-type + +An _unknown value_ is a placeholder for a value that is not yet known. +Operations on unknown values themselves return unknown values that have a +type appropriate to the operation. For example, adding together two unknown +numbers yields an unknown number, while comparing two unknown values of any +type for equality yields an unknown bool. + +Each type has a distinct unknown value. For example, an unknown _number_ is +a distinct value from an unknown _string_. + +_The dynamic pseudo-type_ is a placeholder for a type that is not yet known. +The only values of this type are its null value and its unknown value. It is +referred to as a _pseudo-type_ because it should not be considered a type in +its own right, but rather as a placeholder for a type yet to be established. +The unknown value of the dynamic pseudo-type is referred to as _the dynamic +value_. + +Operations on values of the dynamic pseudo-type behave as if it is a value +of the expected type, optimistically assuming that once the value and type +are known they will be valid for the operation. For example, adding together +a number and the dynamic value produces an unknown number. + +Unknown values and the dynamic pseudo-type can be used as a mechanism for +partial type checking and semantic checking: by evaluating an expression with +all variables set to an unknown value, the expression can be evaluated to +produce an unknown value of a given type, or produce an error if any operation +is provably invalid with only type information. + +Unknown values and the dynamic pseudo-type must never be returned from +operations unless at least one operand is unknown or dynamic. Calling +applications are guaranteed that unless the global scope includes unknown +values, or the function table includes functions that return unknown values, +no expression will evaluate to an unknown value. The calling application is +thus in total control over the use and meaning of unknown values. + +The dynamic pseudo-type is identical only to itself. + +### Capsule Types + +A _capsule type_ is a custom type defined by the calling application. A value +of a capsule type is considered opaque to HCL, but may be accepted +by functions provided by the calling application. + +A particular capsule type is identical only to itself. The equality of two +values of the same capsule type is defined by the calling application. No +other operations are supported for values of capsule types. + +Support for capsule types in a HCL implementation is optional. Capsule types +are intended to allow calling applications to pass through values that are +not part of the standard type system. For example, an application that +deals with raw binary data may define a capsule type representing a byte +array, and provide functions that produce or operate on byte arrays. + +### Type Specifications + +In certain situations it is necessary to define expectations about the expected +type of a value. Whereas two _types_ have a commutative _identity_ relationship, +a type has a non-commutative _matches_ relationship with a _type specification_. +A type specification is, in practice, just a different interpretation of a +type such that: + +- Any type _matches_ any type that it is identical to. + +- Any type _matches_ the dynamic pseudo-type. + +For example, given a type specification "list of dynamic pseudo-type", the +concrete types "list of string" and "list of map" match, but the +type "set of string" does not. + +## Functions and Function Calls + +The evaluation context used to evaluate an expression includes a function +table, which represents an application-defined set of named functions +available for use in expressions. + +Each syntax defines whether function calls are supported and how they are +physically represented in source code, but the semantics of function calls are +defined here to ensure consistent results across syntaxes and to allow +applications to provide functions that are interoperable with all syntaxes. + +A _function_ is defined from the following elements: + +- Zero or more _positional parameters_, each with a name used for documentation, + a type specification for expected argument values, and a flag for whether + each of null values, unknown values, and values of the dynamic pseudo-type + are accepted. + +- Zero or one _variadic parameters_, with the same structure as the _positional_ + parameters, which if present collects any additional arguments provided at + the function call site. + +- A _result type definition_, which specifies the value type returned for each + valid sequence of argument values. + +- A _result value definition_, which specifies the value returned for each + valid sequence of argument values. + +A _function call_, regardless of source syntax, consists of a sequence of +argument values. The argument values are each mapped to a corresponding +parameter as follows: + +- For each of the function's positional parameters in sequence, take the next + argument. If there are no more arguments, the call is erroneous. + +- If the function has a variadic parameter, take all remaining arguments that + where not yet assigned to a positional parameter and collect them into + a sequence of variadic arguments that each correspond to the variadic + parameter. + +- If the function has _no_ variadic parameter, it is an error if any arguments + remain after taking one argument for each positional parameter. + +After mapping each argument to a parameter, semantic checking proceeds +for each argument: + +- If the argument value corresponding to a parameter does not match the + parameter's type specification, the call is erroneous. + +- If the argument value corresponding to a parameter is null and the parameter + is not specified as accepting nulls, the call is erroneous. + +- If the argument value corresponding to a parameter is the dynamic value + and the parameter is not specified as accepting values of the dynamic + pseudo-type, the call is valid but its _result type_ is forced to be the + dynamic pseudo type. + +- If neither of the above conditions holds for any argument, the call is + valid and the function's value type definition is used to determine the + call's _result type_. A function _may_ vary its result type depending on + the argument _values_ as well as the argument _types_; for example, a + function that decodes a JSON value will return a different result type + depending on the data structure described by the given JSON source code. + +If semantic checking succeeds without error, the call is _executed_: + +- For each argument, if its value is unknown and its corresponding parameter + is not specified as accepting unknowns, the _result value_ is forced to be an + unknown value of the result type. + +- If the previous condition does not apply, the function's result value + definition is used to determine the call's _result value_. + +The result of a function call expression is either an error, if one of the +erroneous conditions above applies, or the _result value_. + +## Type Conversions and Unification + +Values given in configuration may not always match the expectations of the +operations applied to them or to the calling application. In such situations, +automatic type conversion is attempted as a convenience to the user. + +Along with conversions to a _specified_ type, it is sometimes necessary to +ensure that a selection of values are all of the _same_ type, without any +constraint on which type that is. This is the process of _type unification_, +which attempts to find the most general type that all of the given types can +be converted to. + +Both type conversions and unification are defined in the syntax-agnostic +model to ensure consistency of behavior between syntaxes. + +Type conversions are broadly characterized into two categories: _safe_ and +_unsafe_. A conversion is "safe" if any distinct value of the source type +has a corresponding distinct value in the target type. A conversion is +"unsafe" if either the target type values are _not_ distinct (information +may be lost in conversion) or if some values of the source type do not have +any corresponding value in the target type. An unsafe conversion may result +in an error. + +A given type can always be converted to itself, which is a no-op. + +### Conversion of Null Values + +All null values are safely convertable to a null value of any other type, +regardless of other type-specific rules specified in the sections below. + +### Conversion to and from the Dynamic Pseudo-type + +Conversion _from_ the dynamic pseudo-type _to_ any other type always succeeds, +producing an unknown value of the target type. + +Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result +is the input value, verbatim. This is the only situation where the conversion +result value is not of the the given target type. + +### Primitive Type Conversions + +Bidirectional conversions are available between the string and number types, +and between the string and boolean types. + +The bool value true corresponds to the string containing the characters "true", +while the bool value false corresponds to the string containing the characters +"false". Conversion from bool to string is safe, while the converse is +unsafe. The strings "1" and "0" are alternative string representations +of true and false respectively. It is an error to convert a string other than +the four in this paragraph to type bool. + +A number value is converted to string by translating its integer portion +into a sequence of decimal digits (`0` through `9`), and then if it has a +non-zero fractional part, a period `.` followed by a sequence of decimal +digits representing its fractional part. No exponent portion is included. +The number is converted at its full precision. Conversion from number to +string is safe. + +A string is converted to a number value by reversing the above mapping. +No exponent portion is allowed. Conversion from string to number is unsafe. +It is an error to convert a string that does not comply with the expected +syntax to type number. + +No direct conversion is available between the bool and number types. + +### Collection and Structural Type Conversions + +Conversion from set types to list types is _safe_, as long as their +element types are safely convertable. If the element types are _unsafely_ +convertable, then the collection conversion is also unsafe. Each set element +becomes a corresponding list element, in an undefined order. Although no +particular ordering is required, implementations _should_ produce list +elements in a consistent order for a given input set, as a convenience +to calling applications. + +Conversion from list types to set types is _unsafe_, as long as their element +types are convertable. Each distinct list item becomes a distinct set item. +If two list items are equal, one of the two is lost in the conversion. + +Conversion from tuple types to list types permitted if all of the +tuple element types are convertable to the target list element type. +The safety of the conversion depends on the safety of each of the element +conversions. Each element in turn is converted to the list element type, +producing a list of identical length. + +Conversion from tuple types to set types is permitted, behaving as if the +tuple type was first converted to a list of the same element type and then +that list converted to the target set type. + +Conversion from object types to map types is permitted if all of the object +attribute types are convertable to the target map element type. The safety +of the conversion depends on the safety of each of the attribute conversions. +Each attribute in turn is converted to the map element type, and map element +keys are set to the name of each corresponding object attribute. + +Conversion from list and set types to tuple types is permitted, following +the opposite steps as the converse conversions. Such conversions are _unsafe_. +It is an error to convert a list or set to a tuple type whose number of +elements does not match the list or set length. + +Conversion from map types to object types is permitted if each map key +corresponds to an attribute in the target object type. It is an error to +convert from a map value whose set of keys does not exactly match the target +type's attributes. The conversion takes the opposite steps of the converse +conversion. + +Conversion from one object type to another is permitted as long as the +common attribute names have convertable types. Any attribute present in the +target type but not in the source type is populated with a null value of +the appropriate type. + +Conversion from one tuple type to another is permitted as long as the +tuples have the same length and the elements have convertable types. + +### Type Unification + +Type unification is an operation that takes a list of types and attempts +to find a single type to which they can all be converted. Since some +type pairs have bidirectional conversions, preference is given to _safe_ +conversions. In technical terms, all possible types are arranged into +a lattice, from which a most general supertype is selected where possible. + +The type resulting from type unification may be one of the input types, or +it may be an entirely new type produced by combination of two or more +input types. + +The following rules do not guarantee a valid result. In addition to these +rules, unification fails if any of the given types are not convertable +(per the above rules) to the selected result type. + +The following unification rules apply transitively. That is, if a rule is +defined from A to B, and one from B to C, then A can unify to C. + +Number and bool types both unify with string by preferring string. + +Two collection types of the same kind unify according to the unification +of their element types. + +List and set types unify by preferring the list type. + +Map and object types unify by preferring the object type. + +List, set and tuple types unify by preferring the tuple type. + +The dynamic pseudo-type unifies with any other type by selecting that other +type. The dynamic pseudo-type is the result type only if _all_ input types +are the dynamic pseudo-type. + +Two object types unify by constructing a new type whose attributes are +the union of those of the two input types. Any common attributes themselves +have their types unified. + +Two tuple types of the same length unify constructing a new type of the +same length whose elements are the unification of the corresponding elements +in the two input types. + +## Static Analysis + +In most applications, full expression evaluation is sufficient for understanding +the provided configuration. However, some specialized applications require more +direct access to the physical structures in the expressions, which can for +example allow the construction of new language constructs in terms of the +existing syntax elements. + +Since static analysis analyses the physical structure of configuration, the +details will vary depending on syntax. Each syntax must decide which of its +physical structures corresponds to the following analyses, producing error +diagnostics if they are applied to inappropriate expressions. + +The following are the required static analysis functions: + +- **Static List**: Require list/tuple construction syntax to be used and + return a list of expressions for each of the elements given. + +- **Static Map**: Require map/object construction syntax to be used and + return a list of key/value pairs -- both expressions -- for each of + the elements given. The usual constraint that a map key must be a string + must not apply to this analysis, thus allowing applications to interpret + arbitrary keys as they see fit. + +- **Static Call**: Require function call syntax to be used and return an + object describing the called function name and a list of expressions + representing each of the call arguments. + +- **Static Traversal**: Require a reference to a symbol in the variable + scope and return a description of the path from the root scope to the + accessed attribute or index. + +The intent of a calling application using these features is to require a more +rigid interpretation of the configuration than in expression evaluation. +Syntax implementations should make use of the extra contextual information +provided in order to make an intuitive mapping onto the constructs of the +underlying syntax, possibly interpreting the expression slightly differently +than it would be interpreted in normal evaluation. + +Each syntax must define which of its expression elements each of the analyses +above applies to, and how those analyses behave given those expression elements. + +## Implementation Considerations + +Implementations of this specification are free to adopt any strategy that +produces behavior consistent with the specification. This non-normative +section describes some possible implementation strategies that are consistent +with the goals of this specification. + +### Language-agnosticism + +The language-agnosticism of this specification assumes that certain behaviors +are implemented separately for each syntax: + +- Matching of a body schema with the physical elements of a body in the + source language, to determine correspondence between physical constructs + and schema elements. + +- Implementing the _dynamic attributes_ body processing mode by either + interpreting all physical constructs as attributes or producing an error + if non-attribute constructs are present. + +- Providing an evaluation function for all possible expressions that produces + a value given an evaluation context. + +- Providing the static analysis functionality described above in a manner that + makes sense within the convention of the syntax. + +The suggested implementation strategy is to use an implementation language's +closest concept to an _abstract type_, _virtual type_ or _interface type_ +to represent both Body and Expression. Each language-specific implementation +can then provide an implementation of each of these types wrapping AST nodes +or other physical constructs from the language parser. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go new file mode 100644 index 00000000..98ada87b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/static_expr.go @@ -0,0 +1,40 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +type staticExpr struct { + val cty.Value + rng Range +} + +// StaticExpr returns an Expression that always evaluates to the given value. +// +// This is useful to substitute default values for expressions that are +// not explicitly given in configuration and thus would otherwise have no +// Expression to return. +// +// Since expressions are expected to have a source range, the caller must +// provide one. Ideally this should be a real source range, but it can +// be a synthetic one (with an empty-string filename) if no suitable range +// is available. +func StaticExpr(val cty.Value, rng Range) Expression { + return staticExpr{val, rng} +} + +func (e staticExpr) Value(ctx *EvalContext) (cty.Value, Diagnostics) { + return e.val, nil +} + +func (e staticExpr) Variables() []Traversal { + return nil +} + +func (e staticExpr) Range() Range { + return e.rng +} + +func (e staticExpr) StartRange() Range { + return e.rng +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/structure.go new file mode 100644 index 00000000..b336f300 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/structure.go @@ -0,0 +1,151 @@ +package hcl + +import ( + "github.com/zclconf/go-cty/cty" +) + +// File is the top-level node that results from parsing a HCL file. +type File struct { + Body Body + Bytes []byte + + // Nav is used to integrate with the "hcled" editor integration package, + // and with diagnostic information formatters. It is not for direct use + // by a calling application. + Nav interface{} +} + +// Block represents a nested block within a Body. +type Block struct { + Type string + Labels []string + Body Body + + DefRange Range // Range that can be considered the "definition" for seeking in an editor + TypeRange Range // Range for the block type declaration specifically. + LabelRanges []Range // Ranges for the label values specifically. +} + +// Blocks is a sequence of Block. +type Blocks []*Block + +// Attributes is a set of attributes keyed by their names. +type Attributes map[string]*Attribute + +// Body is a container for attributes and blocks. It serves as the primary +// unit of heirarchical structure within configuration. +// +// The content of a body cannot be meaningfully intepreted without a schema, +// so Body represents the raw body content and has methods that allow the +// content to be extracted in terms of a given schema. +type Body interface { + // Content verifies that the entire body content conforms to the given + // schema and then returns it, and/or returns diagnostics. The returned + // body content is valid if non-nil, regardless of whether Diagnostics + // are provided, but diagnostics should still be eventually shown to + // the user. + Content(schema *BodySchema) (*BodyContent, Diagnostics) + + // PartialContent is like Content except that it permits the configuration + // to contain additional blocks or attributes not specified in the + // schema. If any are present, the returned Body is non-nil and contains + // the remaining items from the body that were not selected by the schema. + PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics) + + // JustAttributes attempts to interpret all of the contents of the body + // as attributes, allowing for the contents to be accessed without a priori + // knowledge of the structure. + // + // The behavior of this method depends on the body's source language. + // Some languages, like JSON, can't distinguish between attributes and + // blocks without schema hints, but for languages that _can_ error + // diagnostics will be generated if any blocks are present in the body. + // + // Diagnostics may be produced for other reasons too, such as duplicate + // declarations of the same attribute. + JustAttributes() (Attributes, Diagnostics) + + // MissingItemRange returns a range that represents where a missing item + // might hypothetically be inserted. This is used when producing + // diagnostics about missing required attributes or blocks. Not all bodies + // will have an obvious single insertion point, so the result here may + // be rather arbitrary. + MissingItemRange() Range +} + +// BodyContent is the result of applying a BodySchema to a Body. +type BodyContent struct { + Attributes Attributes + Blocks Blocks + + MissingItemRange Range +} + +// Attribute represents an attribute from within a body. +type Attribute struct { + Name string + Expr Expression + + Range Range + NameRange Range +} + +// Expression is a literal value or an expression provided in the +// configuration, which can be evaluated within a scope to produce a value. +type Expression interface { + // Value returns the value resulting from evaluating the expression + // in the given evaluation context. + // + // The context may be nil, in which case the expression may contain + // only constants and diagnostics will be produced for any non-constant + // sub-expressions. (The exact definition of this depends on the source + // language.) + // + // The context may instead be set but have either its Variables or + // Functions maps set to nil, in which case only use of these features + // will return diagnostics. + // + // Different diagnostics are provided depending on whether the given + // context maps are nil or empty. In the former case, the message + // tells the user that variables/functions are not permitted at all, + // while in the latter case usage will produce a "not found" error for + // the specific symbol in question. + Value(ctx *EvalContext) (cty.Value, Diagnostics) + + // Variables returns a list of variables referenced in the receiving + // expression. These are expressed as absolute Traversals, so may include + // additional information about how the variable is used, such as + // attribute lookups, which the calling application can potentially use + // to only selectively populate the scope. + Variables() []Traversal + + Range() Range + StartRange() Range +} + +// OfType filters the receiving block sequence by block type name, +// returning a new block sequence including only the blocks of the +// requested type. +func (els Blocks) OfType(typeName string) Blocks { + ret := make(Blocks, 0) + for _, el := range els { + if el.Type == typeName { + ret = append(ret, el) + } + } + return ret +} + +// ByType transforms the receiving block sequence into a map from type +// name to block sequences of only that type. +func (els Blocks) ByType() map[string]Blocks { + ret := make(map[string]Blocks) + for _, el := range els { + ty := el.Type + if ret[ty] == nil { + ret[ty] = make(Blocks, 0, 1) + } + ret[ty] = append(ret[ty], el) + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go b/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go new file mode 100644 index 00000000..8521814e --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go @@ -0,0 +1,117 @@ +package hcl + +// ----------------------------------------------------------------------------- +// The methods in this file all have the general pattern of making a best-effort +// to find one or more constructs that contain a given source position. +// +// These all operate by delegating to an optional method of the same name and +// signature on the file's root body, allowing each syntax to potentially +// provide its own implementations of these. For syntaxes that don't implement +// them, the result is always nil. +// ----------------------------------------------------------------------------- + +// BlocksAtPos attempts to find all of the blocks that contain the given +// position, ordered so that the outermost block is first and the innermost +// block is last. This is a best-effort method that may not be able to produce +// a complete result for all positions or for all HCL syntaxes. +// +// If the returned slice is non-empty, the first element is guaranteed to +// represent the same block as would be the result of OutermostBlockAtPos and +// the last element the result of InnermostBlockAtPos. However, the +// implementation may return two different objects describing the same block, +// so comparison by pointer identity is not possible. +// +// The result is nil if no blocks at all contain the given position. +func (f *File) BlocksAtPos(pos Pos) []*Block { + // The root body of the file must implement this interface in order + // to support BlocksAtPos. + type Interface interface { + BlocksAtPos(pos Pos) []*Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.BlocksAtPos(pos) +} + +// OutermostBlockAtPos attempts to find a top-level block in the receiving file +// that contains the given position. This is a best-effort method that may not +// be able to produce a result for all positions or for all HCL syntaxes. +// +// The result is nil if no single block could be selected for any reason. +func (f *File) OutermostBlockAtPos(pos Pos) *Block { + // The root body of the file must implement this interface in order + // to support OutermostBlockAtPos. + type Interface interface { + OutermostBlockAtPos(pos Pos) *Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.OutermostBlockAtPos(pos) +} + +// InnermostBlockAtPos attempts to find the most deeply-nested block in the +// receiving file that contains the given position. This is a best-effort +// method that may not be able to produce a result for all positions or for +// all HCL syntaxes. +// +// The result is nil if no single block could be selected for any reason. +func (f *File) InnermostBlockAtPos(pos Pos) *Block { + // The root body of the file must implement this interface in order + // to support InnermostBlockAtPos. + type Interface interface { + InnermostBlockAtPos(pos Pos) *Block + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.InnermostBlockAtPos(pos) +} + +// OutermostExprAtPos attempts to find an expression in the receiving file +// that contains the given position. This is a best-effort method that may not +// be able to produce a result for all positions or for all HCL syntaxes. +// +// Since expressions are often nested inside one another, this method returns +// the outermost "root" expression that is not contained by any other. +// +// The result is nil if no single expression could be selected for any reason. +func (f *File) OutermostExprAtPos(pos Pos) Expression { + // The root body of the file must implement this interface in order + // to support OutermostExprAtPos. + type Interface interface { + OutermostExprAtPos(pos Pos) Expression + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.OutermostExprAtPos(pos) +} + +// AttributeAtPos attempts to find an attribute definition in the receiving +// file that contains the given position. This is a best-effort method that may +// not be able to produce a result for all positions or for all HCL syntaxes. +// +// The result is nil if no single attribute could be selected for any reason. +func (f *File) AttributeAtPos(pos Pos) *Attribute { + // The root body of the file must implement this interface in order + // to support OutermostExprAtPos. + type Interface interface { + AttributeAtPos(pos Pos) *Attribute + } + + impl, ok := f.Body.(Interface) + if !ok { + return nil + } + return impl.AttributeAtPos(pos) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go new file mode 100644 index 00000000..d7101970 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go @@ -0,0 +1,293 @@ +package hcl + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" +) + +// A Traversal is a description of traversing through a value through a +// series of operations such as attribute lookup, index lookup, etc. +// +// It is used to look up values in scopes, for example. +// +// The traversal operations are implementations of interface Traverser. +// This is a closed set of implementations, so the interface cannot be +// implemented from outside this package. +// +// A traversal can be absolute (its first value is a symbol name) or relative +// (starts from an existing value). +type Traversal []Traverser + +// TraversalJoin appends a relative traversal to an absolute traversal to +// produce a new absolute traversal. +func TraversalJoin(abs Traversal, rel Traversal) Traversal { + if abs.IsRelative() { + panic("first argument to TraversalJoin must be absolute") + } + if !rel.IsRelative() { + panic("second argument to TraversalJoin must be relative") + } + + ret := make(Traversal, len(abs)+len(rel)) + copy(ret, abs) + copy(ret[len(abs):], rel) + return ret +} + +// TraverseRel applies the receiving traversal to the given value, returning +// the resulting value. This is supported only for relative traversals, +// and will panic if applied to an absolute traversal. +func (t Traversal) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + if !t.IsRelative() { + panic("can't use TraverseRel on an absolute traversal") + } + + current := val + var diags Diagnostics + for _, tr := range t { + var newDiags Diagnostics + current, newDiags = tr.TraversalStep(current) + diags = append(diags, newDiags...) + if newDiags.HasErrors() { + return cty.DynamicVal, diags + } + } + return current, diags +} + +// TraverseAbs applies the receiving traversal to the given eval context, +// returning the resulting value. This is supported only for absolute +// traversals, and will panic if applied to a relative traversal. +func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + if t.IsRelative() { + panic("can't use TraverseAbs on a relative traversal") + } + + split := t.SimpleSplit() + root := split.Abs[0].(TraverseRoot) + name := root.Name + + thisCtx := ctx + hasNonNil := false + for thisCtx != nil { + if thisCtx.Variables == nil { + thisCtx = thisCtx.parent + continue + } + hasNonNil = true + val, exists := thisCtx.Variables[name] + if exists { + return split.Rel.TraverseRel(val) + } + thisCtx = thisCtx.parent + } + + if !hasNonNil { + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Variables not allowed", + Detail: "Variables may not be used here.", + Subject: &root.SrcRange, + }, + } + } + + suggestions := make([]string, 0, len(ctx.Variables)) + thisCtx = ctx + for thisCtx != nil { + for k := range thisCtx.Variables { + suggestions = append(suggestions, k) + } + thisCtx = thisCtx.parent + } + suggestion := nameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + return cty.DynamicVal, Diagnostics{ + { + Severity: DiagError, + Summary: "Unknown variable", + Detail: fmt.Sprintf("There is no variable named %q.%s", name, suggestion), + Subject: &root.SrcRange, + }, + } +} + +// IsRelative returns true if the receiver is a relative traversal, or false +// otherwise. +func (t Traversal) IsRelative() bool { + if len(t) == 0 { + return true + } + if _, firstIsRoot := t[0].(TraverseRoot); firstIsRoot { + return false + } + return true +} + +// SimpleSplit returns a TraversalSplit where the name lookup is the absolute +// part and the remainder is the relative part. Supported only for +// absolute traversals, and will panic if applied to a relative traversal. +// +// This can be used by applications that have a relatively-simple variable +// namespace where only the top-level is directly populated in the scope, with +// everything else handled by relative lookups from those initial values. +func (t Traversal) SimpleSplit() TraversalSplit { + if t.IsRelative() { + panic("can't use SimpleSplit on a relative traversal") + } + return TraversalSplit{ + Abs: t[0:1], + Rel: t[1:], + } +} + +// RootName returns the root name for a absolute traversal. Will panic if +// called on a relative traversal. +func (t Traversal) RootName() string { + if t.IsRelative() { + panic("can't use RootName on a relative traversal") + + } + return t[0].(TraverseRoot).Name +} + +// SourceRange returns the source range for the traversal. +func (t Traversal) SourceRange() Range { + if len(t) == 0 { + // Nothing useful to return here, but we'll return something + // that's correctly-typed at least. + return Range{} + } + + return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange()) +} + +// TraversalSplit represents a pair of traversals, the first of which is +// an absolute traversal and the second of which is relative to the first. +// +// This is used by calling applications that only populate prefixes of the +// traversals in the scope, with Abs representing the part coming from the +// scope and Rel representing the remaining steps once that part is +// retrieved. +type TraversalSplit struct { + Abs Traversal + Rel Traversal +} + +// TraverseAbs traverses from a scope to the value resulting from the +// absolute traversal. +func (t TraversalSplit) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) { + return t.Abs.TraverseAbs(ctx) +} + +// TraverseRel traverses from a given value, assumed to be the result of +// TraverseAbs on some scope, to a final result for the entire split traversal. +func (t TraversalSplit) TraverseRel(val cty.Value) (cty.Value, Diagnostics) { + return t.Rel.TraverseRel(val) +} + +// Traverse is a convenience function to apply TraverseAbs followed by +// TraverseRel. +func (t TraversalSplit) Traverse(ctx *EvalContext) (cty.Value, Diagnostics) { + v1, diags := t.TraverseAbs(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + v2, newDiags := t.TraverseRel(v1) + diags = append(diags, newDiags...) + return v2, diags +} + +// Join concatenates together the Abs and Rel parts to produce a single +// absolute traversal. +func (t TraversalSplit) Join() Traversal { + return TraversalJoin(t.Abs, t.Rel) +} + +// RootName returns the root name for the absolute part of the split. +func (t TraversalSplit) RootName() string { + return t.Abs.RootName() +} + +// A Traverser is a step within a Traversal. +type Traverser interface { + TraversalStep(cty.Value) (cty.Value, Diagnostics) + SourceRange() Range + isTraverserSigil() isTraverser +} + +// Embed this in a struct to declare it as a Traverser +type isTraverser struct { +} + +func (tr isTraverser) isTraverserSigil() isTraverser { + return isTraverser{} +} + +// TraverseRoot looks up a root name in a scope. It is used as the first step +// of an absolute Traversal, and cannot itself be traversed directly. +type TraverseRoot struct { + isTraverser + Name string + SrcRange Range +} + +// TraversalStep on a TraverseName immediately panics, because absolute +// traversals cannot be directly traversed. +func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) { + panic("Cannot traverse an absolute traversal") +} + +func (tn TraverseRoot) SourceRange() Range { + return tn.SrcRange +} + +// TraverseAttr looks up an attribute in its initial value. +type TraverseAttr struct { + isTraverser + Name string + SrcRange Range +} + +func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return GetAttr(val, tn.Name, &tn.SrcRange) +} + +func (tn TraverseAttr) SourceRange() Range { + return tn.SrcRange +} + +// TraverseIndex applies the index operation to its initial value. +type TraverseIndex struct { + isTraverser + Key cty.Value + SrcRange Range +} + +func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + return Index(val, tn.Key, &tn.SrcRange) +} + +func (tn TraverseIndex) SourceRange() Range { + return tn.SrcRange +} + +// TraverseSplat applies the splat operation to its initial value. +type TraverseSplat struct { + isTraverser + Each Traversal + SrcRange Range +} + +func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { + panic("TraverseSplat not yet implemented") +} + +func (tn TraverseSplat) SourceRange() Range { + return tn.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go new file mode 100644 index 00000000..f69d5fe9 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go @@ -0,0 +1,124 @@ +package hcl + +// AbsTraversalForExpr attempts to interpret the given expression as +// an absolute traversal, or returns error diagnostic(s) if that is +// not possible for the given expression. +// +// A particular Expression implementation can support this function by +// offering a method called AsTraversal that takes no arguments and +// returns either a valid absolute traversal or nil to indicate that +// no traversal is possible. Alternatively, an implementation can support +// UnwrapExpression to delegate handling of this function to a wrapped +// Expression object. +// +// In most cases the calling application is interested in the value +// that results from an expression, but in rarer cases the application +// needs to see the the name of the variable and subsequent +// attributes/indexes itself, for example to allow users to give references +// to the variables themselves rather than to their values. An implementer +// of this function should at least support attribute and index steps. +func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); traversal != nil { + return traversal, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A single static variable reference is required: only attribute access and indexing with constant keys. No calculations, function calls, template expressions, etc are allowed here.", + Subject: expr.Range().Ptr(), + }, + } +} + +// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns +// a relative traversal instead. Due to the nature of HCL expressions, the +// first element of the returned traversal is always a TraverseAttr, and +// then it will be followed by zero or more other expressions. +// +// Any expression accepted by AbsTraversalForExpr is also accepted by +// RelTraversalForExpr. +func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + traversal, diags := AbsTraversalForExpr(expr) + if len(traversal) > 0 { + ret := make(Traversal, len(traversal)) + copy(ret, traversal) + root := traversal[0].(TraverseRoot) + ret[0] = TraverseAttr{ + Name: root.Name, + SrcRange: root.SrcRange, + } + return ret, diags + } + return traversal, diags +} + +// ExprAsKeyword attempts to interpret the given expression as a static keyword, +// returning the keyword string if possible, and the empty string if not. +// +// A static keyword, for the sake of this function, is a single identifier. +// For example, the following attribute has an expression that would produce +// the keyword "foo": +// +// example = foo +// +// This function is a variant of AbsTraversalForExpr, which uses the same +// interface on the given expression. This helper constrains the result +// further by requiring only a single root identifier. +// +// This function is intended to be used with the following idiom, to recognize +// situations where one of a fixed set of keywords is required and arbitrary +// expressions are not allowed: +// +// switch hcl.ExprAsKeyword(expr) { +// case "allow": +// // (take suitable action for keyword "allow") +// case "deny": +// // (take suitable action for keyword "deny") +// default: +// diags = append(diags, &hcl.Diagnostic{ +// // ... "invalid keyword" diagnostic message ... +// }) +// } +// +// The above approach will generate the same message for both the use of an +// unrecognized keyword and for not using a keyword at all, which is usually +// reasonable if the message specifies that the given value must be a keyword +// from that fixed list. +// +// Note that in the native syntax the keywords "true", "false", and "null" are +// recognized as literal values during parsing and so these reserved words +// cannot not be accepted as keywords by this function. +// +// Since interpreting an expression as a keyword bypasses usual expression +// evaluation, it should be used sparingly for situations where e.g. one of +// a fixed set of keywords is used in a structural way in a special attribute +// to affect the further processing of a block. +func ExprAsKeyword(expr Expression) string { + type asTraversal interface { + AsTraversal() Traversal + } + + physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool { + _, supported := expr.(asTraversal) + return supported + }) + + if asT, supported := physExpr.(asTraversal); supported { + if traversal := asT.AsTraversal(); len(traversal) == 1 { + return traversal.RootName() + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go new file mode 100644 index 00000000..7e652e9b --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/block_labels.go @@ -0,0 +1,21 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type blockLabel struct { + Value string + Range hcl.Range +} + +func labelsForBlock(block *hcl.Block) []blockLabel { + ret := make([]blockLabel, len(block.Labels)) + for i := range block.Labels { + ret[i] = blockLabel{ + Value: block.Labels[i], + Range: block.LabelRanges[i], + } + } + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/decode.go b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go new file mode 100644 index 00000000..6cf93fed --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/decode.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +func decode(body hcl.Body, blockLabels []blockLabel, ctx *hcl.EvalContext, spec Spec, partial bool) (cty.Value, hcl.Body, hcl.Diagnostics) { + schema := ImpliedSchema(spec) + + var content *hcl.BodyContent + var diags hcl.Diagnostics + var leftovers hcl.Body + + if partial { + content, leftovers, diags = body.PartialContent(schema) + } else { + content, diags = body.Content(schema) + } + + val, valDiags := spec.decode(content, blockLabels, ctx) + diags = append(diags, valDiags...) + + return val, leftovers, diags +} + +func impliedType(spec Spec) cty.Type { + return spec.impliedType() +} + +func sourceRange(body hcl.Body, blockLabels []blockLabel, spec Spec) hcl.Range { + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + return spec.sourceRange(content, blockLabels) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/doc.go b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go new file mode 100644 index 00000000..23bfe542 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/doc.go @@ -0,0 +1,12 @@ +// Package hcldec provides a higher-level API for unpacking the content of +// HCL bodies, implemented in terms of the low-level "Content" API exposed +// by the bodies themselves. +// +// It allows decoding an entire nested configuration in a single operation +// by providing a description of the intended structure. +// +// For some applications it may be more convenient to use the "gohcl" +// package, which has a similar purpose but decodes directly into native +// Go data types. hcldec instead targets the cty type system, and thus allows +// a cty-driven application to remain within that type system. +package hcldec diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/gob.go b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go new file mode 100644 index 00000000..e2027cfd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/gob.go @@ -0,0 +1,23 @@ +package hcldec + +import ( + "encoding/gob" +) + +func init() { + // Every Spec implementation should be registered with gob, so that + // specs can be sent over gob channels, such as using + // github.com/hashicorp/go-plugin with plugins that need to describe + // what shape of configuration they are expecting. + gob.Register(ObjectSpec(nil)) + gob.Register(TupleSpec(nil)) + gob.Register((*AttrSpec)(nil)) + gob.Register((*LiteralSpec)(nil)) + gob.Register((*ExprSpec)(nil)) + gob.Register((*BlockSpec)(nil)) + gob.Register((*BlockListSpec)(nil)) + gob.Register((*BlockSetSpec)(nil)) + gob.Register((*BlockMapSpec)(nil)) + gob.Register((*BlockLabelSpec)(nil)) + gob.Register((*DefaultSpec)(nil)) +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/public.go b/vendor/github.com/hashicorp/hcl2/hcldec/public.go new file mode 100644 index 00000000..3c803632 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/public.go @@ -0,0 +1,81 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// Decode interprets the given body using the given specification and returns +// the resulting value. If the given body is not valid per the spec, error +// diagnostics are returned and the returned value is likely to be incomplete. +// +// The ctx argument may be nil, in which case any references to variables or +// functions will produce error diagnostics. +func Decode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, _, diags := decode(body, nil, ctx, spec, false) + return val, diags +} + +// PartialDecode is like Decode except that it permits "leftover" items in +// the top-level body, which are returned as a new body to allow for +// further processing. +// +// Any descendent block bodies are _not_ decoded partially and thus must +// be fully described by the given specification. +func PartialDecode(body hcl.Body, spec Spec, ctx *hcl.EvalContext) (cty.Value, hcl.Body, hcl.Diagnostics) { + return decode(body, nil, ctx, spec, true) +} + +// ImpliedType returns the value type that should result from decoding the +// given spec. +func ImpliedType(spec Spec) cty.Type { + return impliedType(spec) +} + +// SourceRange interprets the given body using the given specification and +// then returns the source range of the value that would be used to +// fulfill the spec. +// +// This can be used if application-level validation detects value errors, to +// obtain a reasonable SourceRange to use for generated diagnostics. It works +// best when applied to specific body items (e.g. using AttrSpec, BlockSpec, ...) +// as opposed to entire bodies using ObjectSpec, TupleSpec. The result will +// be less useful the broader the specification, so e.g. a spec that returns +// the entirety of all of the blocks of a given type is likely to be +// _particularly_ arbitrary and useless. +// +// If the given body is not valid per the given spec, the result is best-effort +// and may not actually be something ideal. It's expected that an application +// will already have used Decode or PartialDecode earlier and thus had an +// opportunity to detect and report spec violations. +func SourceRange(body hcl.Body, spec Spec) hcl.Range { + return sourceRange(body, nil, spec) +} + +// ChildBlockTypes returns a map of all of the child block types declared +// by the given spec, with block type names as keys and the associated +// nested body specs as values. +func ChildBlockTypes(spec Spec) map[string]Spec { + ret := map[string]Spec{} + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if bs, ok := s.(blockSpec); ok { + for _, blockS := range bs.blockHeaderSchemata() { + nested := bs.nestedSpec() + if nested != nil { // nil can be returned to dynamically opt out of this interface + ret[blockS.Type] = nested + } + } + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return ret +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/schema.go b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go new file mode 100644 index 00000000..b57bd969 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/schema.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// ImpliedSchema returns the *hcl.BodySchema implied by the given specification. +// This is the schema that the Decode function will use internally to +// access the content of a given body. +func ImpliedSchema(spec Spec) *hcl.BodySchema { + var attrs []hcl.AttributeSchema + var blocks []hcl.BlockHeaderSchema + + // visitSameBodyChildren walks through the spec structure, calling + // the given callback for each descendent spec encountered. We are + // interested in the specs that reference attributes and blocks. + var visit visitFunc + visit = func(s Spec) { + if as, ok := s.(attrSpec); ok { + attrs = append(attrs, as.attrSchemata()...) + } + + if bs, ok := s.(blockSpec); ok { + blocks = append(blocks, bs.blockHeaderSchemata()...) + } + + s.visitSameBodyChildren(visit) + } + + visit(spec) + + return &hcl.BodySchema{ + Attributes: attrs, + Blocks: blocks, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go new file mode 100644 index 00000000..f9da7f65 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go @@ -0,0 +1,1567 @@ +package hcldec + +import ( + "bytes" + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// A Spec is a description of how to decode a hcl.Body to a cty.Value. +// +// The various other types in this package whose names end in "Spec" are +// the spec implementations. The most common top-level spec is ObjectSpec, +// which decodes body content into a cty.Value of an object type. +type Spec interface { + // Perform the decode operation on the given body, in the context of + // the given block (which might be null), using the given eval context. + // + // "block" is provided only by the nested calls performed by the spec + // types that work on block bodies. + decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) + + // Return the cty.Type that should be returned when decoding a body with + // this spec. + impliedType() cty.Type + + // Call the given callback once for each of the nested specs that would + // get decoded with the same body and block as the receiver. This should + // not descend into the nested specs used when decoding blocks. + visitSameBodyChildren(cb visitFunc) + + // Determine the source range of the value that would be returned for the + // spec in the given content, in the context of the given block + // (which might be null). If the corresponding item is missing, return + // a place where it might be inserted. + sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range +} + +type visitFunc func(spec Spec) + +// An ObjectSpec is a Spec that produces a cty.Value of an object type whose +// attributes correspond to the keys of the spec map. +type ObjectSpec map[string]Spec + +// attrSpec is implemented by specs that require attributes from the body. +type attrSpec interface { + attrSchemata() []hcl.AttributeSchema +} + +// blockSpec is implemented by specs that require blocks from the body. +type blockSpec interface { + blockHeaderSchemata() []hcl.BlockHeaderSchema + nestedSpec() Spec +} + +// specNeedingVariables is implemented by specs that can use variables +// from the EvalContext, to declare which variables they need. +type specNeedingVariables interface { + variablesNeeded(content *hcl.BodyContent) []hcl.Traversal +} + +func (s ObjectSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s ObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make(map[string]cty.Value, len(s)) + var diags hcl.Diagnostics + + for k, spec := range s { + var kd hcl.Diagnostics + vals[k], kd = spec.decode(content, blockLabels, ctx) + diags = append(diags, kd...) + } + + return cty.ObjectVal(vals), diags +} + +func (s ObjectSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyObject + } + + attrTypes := make(map[string]cty.Type) + for k, childSpec := range s { + attrTypes[k] = childSpec.impliedType() + } + return cty.Object(attrTypes) +} + +func (s ObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// A TupleSpec is a Spec that produces a cty.Value of a tuple type whose +// elements correspond to the elements of the spec slice. +type TupleSpec []Spec + +func (s TupleSpec) visitSameBodyChildren(cb visitFunc) { + for _, c := range s { + cb(c) + } +} + +func (s TupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + vals := make([]cty.Value, len(s)) + var diags hcl.Diagnostics + + for i, spec := range s { + var ed hcl.Diagnostics + vals[i], ed = spec.decode(content, blockLabels, ctx) + diags = append(diags, ed...) + } + + return cty.TupleVal(vals), diags +} + +func (s TupleSpec) impliedType() cty.Type { + if len(s) == 0 { + return cty.EmptyTuple + } + + attrTypes := make([]cty.Type, len(s)) + for i, childSpec := range s { + attrTypes[i] = childSpec.impliedType() + } + return cty.Tuple(attrTypes) +} + +func (s TupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // This is not great, but the best we can do. In practice, it's rather + // strange to ask for the source range of an entire top-level body, since + // that's already readily available to the caller. + return content.MissingItemRange +} + +// An AttrSpec is a Spec that evaluates a particular attribute expression in +// the body and returns its resulting value converted to the requested type, +// or produces a diagnostic if the type is incorrect. +type AttrSpec struct { + Name string + Type cty.Type + Required bool +} + +func (s *AttrSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *AttrSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + attr, exists := content.Attributes[s.Name] + if !exists { + return nil + } + + return attr.Expr.Variables() +} + +// attrSpec implementation +func (s *AttrSpec) attrSchemata() []hcl.AttributeSchema { + return []hcl.AttributeSchema{ + { + Name: s.Name, + Required: s.Required, + }, + } +} + +func (s *AttrSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + attr, exists := content.Attributes[s.Name] + if !exists { + return content.MissingItemRange + } + + return attr.Expr.Range() +} + +func (s *AttrSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + attr, exists := content.Attributes[s.Name] + if !exists { + // We don't need to check required and emit a diagnostic here, because + // that would already have happened when building "content". + return cty.NullVal(s.Type), nil + } + + val, diags := attr.Expr.Value(ctx) + + convVal, err := convert.Convert(val, s.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect attribute value type", + Detail: fmt.Sprintf( + "Inappropriate value for attribute %q: %s.", + s.Name, err.Error(), + ), + Subject: attr.Expr.StartRange().Ptr(), + Context: hcl.RangeBetween(attr.NameRange, attr.Expr.StartRange()).Ptr(), + }) + // We'll return an unknown value of the _correct_ type so that the + // incomplete result can still be used for some analysis use-cases. + val = cty.UnknownVal(s.Type) + } else { + val = convVal + } + + return val, diags +} + +func (s *AttrSpec) impliedType() cty.Type { + return s.Type +} + +// A LiteralSpec is a Spec that produces the given literal value, ignoring +// the given body. +type LiteralSpec struct { + Value cty.Value +} + +func (s *LiteralSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *LiteralSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Value, nil +} + +func (s *LiteralSpec) impliedType() cty.Type { + return s.Value.Type() +} + +func (s *LiteralSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No sensible range to return for a literal, so the caller had better + // ensure it doesn't cause any diagnostics. + return hcl.Range{ + Filename: "", + } +} + +// An ExprSpec is a Spec that evaluates the given expression, ignoring the +// given body. +type ExprSpec struct { + Expr hcl.Expression +} + +func (s *ExprSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// specNeedingVariables implementation +func (s *ExprSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + return s.Expr.Variables() +} + +func (s *ExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return s.Expr.Value(ctx) +} + +func (s *ExprSpec) impliedType() cty.Type { + // We can't know the type of our expression until we evaluate it + return cty.DynamicPseudoType +} + +func (s *ExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + return s.Expr.Range() +} + +// A BlockSpec is a Spec that produces a cty.Value by decoding the contents +// of a single nested block of a given type, using a nested spec. +// +// If the Required flag is not set, the nested block may be omitted, in which +// case a null value is produced. If it _is_ set, an error diagnostic is +// produced if there are no nested blocks of the given type. +type BlockSpec struct { + TypeName string + Nested Spec + Required bool +} + +func (s *BlockSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return nil + } + + return Variables(childBlock.Body, s.Nested) +} + +func (s *BlockSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + if childBlock != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, childBlock.DefRange.String(), + ), + Subject: &candidate.DefRange, + }) + break + } + + childBlock = candidate + } + + if childBlock == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(s.Nested.impliedType()), diags + } + + if s.Nested == nil { + panic("BlockSpec with no Nested Spec") + } + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + return val, diags +} + +func (s *BlockSpec) impliedType() cty.Type { + return s.Nested.impliedType() +} + +func (s *BlockSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockListSpec is a Spec that produces a cty list of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockListSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockListSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockListSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockListSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockListSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.ListValEmpty(s.Nested.impliedType()) + } else { + // Since our target is a list, all of the decoded elements must have the + // same type or cty.ListVal will panic below. Different types can arise + // if there is an attribute spec of type cty.DynamicPseudoType in the + // nested spec; all given values must be convertable to a single type + // in order for the result to be considered valid. + etys := make([]cty.Type, len(elems)) + for i, v := range elems { + etys[i] = v.Type() + } + ety, convs := convert.UnifyUnsafe(etys) + if ety == cty.NilType { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: "Corresponding attributes in all blocks of this type must be the same.", + Subject: &sourceRanges[0], + }) + return cty.DynamicVal, diags + } + for i, v := range elems { + if convs[i] != nil { + newV, err := convs[i](v) + if err != nil { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), + Subject: &sourceRanges[i], + }) + // Bail early here so we won't panic below in cty.ListVal + return cty.DynamicVal, diags + } + elems[i] = newV + } + } + + ret = cty.ListVal(elems) + } + + return ret, diags +} + +func (s *BlockListSpec) impliedType() cty.Type { + return cty.List(s.Nested.impliedType()) +} + +func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockTupleSpec is a Spec that produces a cty tuple of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// This is similar to BlockListSpec, but it permits the nested blocks to have +// different result types in situations where cty.DynamicPseudoType attributes +// are present. +type BlockTupleSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockTupleSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockListSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.EmptyTupleVal + } else { + ret = cty.TupleVal(elems) + } + + return ret, diags +} + +func (s *BlockTupleSpec) impliedType() cty.Type { + // We can't predict our type, because we don't know how many blocks + // there will be until we decode. + return cty.DynamicPseudoType +} + +func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockSetSpec is a Spec that produces a cty set of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +type BlockSetSpec struct { + TypeName string + Nested Spec + MinItems int + MaxItems int +} + +func (s *BlockSetSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockSetSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: findLabelSpecs(s.Nested), + }, + } +} + +// blockSpec implementation +func (s *BlockSetSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockSetSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockSetSpec with no Nested Spec") + } + + var elems []cty.Value + var sourceRanges []hcl.Range + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false) + diags = append(diags, childDiags...) + elems = append(elems, val) + sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)) + } + + if len(elems) < s.MinItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName), + Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName), + Subject: &content.MissingItemRange, + }) + } else if s.MaxItems > 0 && len(elems) > s.MaxItems { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Too many %s blocks", s.TypeName), + Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName), + Subject: &sourceRanges[s.MaxItems], + }) + } + + var ret cty.Value + + if len(elems) == 0 { + ret = cty.SetValEmpty(s.Nested.impliedType()) + } else { + // Since our target is a set, all of the decoded elements must have the + // same type or cty.SetVal will panic below. Different types can arise + // if there is an attribute spec of type cty.DynamicPseudoType in the + // nested spec; all given values must be convertable to a single type + // in order for the result to be considered valid. + etys := make([]cty.Type, len(elems)) + for i, v := range elems { + etys[i] = v.Type() + } + ety, convs := convert.UnifyUnsafe(etys) + if ety == cty.NilType { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: "Corresponding attributes in all blocks of this type must be the same.", + Subject: &sourceRanges[0], + }) + return cty.DynamicVal, diags + } + for i, v := range elems { + if convs[i] != nil { + newV, err := convs[i](v) + if err != nil { + // FIXME: This is a pretty terrible error message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName), + Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err), + Subject: &sourceRanges[i], + }) + // Bail early here so we won't panic below in cty.ListVal + return cty.DynamicVal, diags + } + elems[i] = newV + } + } + + ret = cty.SetVal(elems) + } + + return ret, diags +} + +func (s *BlockSetSpec) impliedType() cty.Type { + return cty.Set(s.Nested.impliedType()) +} + +func (s *BlockSetSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockMapSpec is a Spec that produces a cty map of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of map structure is created for each of the given label names. +// There must be at least one given label name. +type BlockMapSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockMapSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockMapSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockMapSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockMapSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockMapSpec with no Nested Spec") + } + if ImpliedType(s).HasDynamicTypes() { + panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.MapValEmpty(s.Nested.impliedType()), diags + } + + var ctyMap func(map[string]interface{}, int) cty.Value + ctyMap = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyMap(v.(map[string]interface{}), depth-1) + } + } + return cty.MapVal(vals) + } + + return ctyMap(elems, len(s.LabelNames)), diags +} + +func (s *BlockMapSpec) impliedType() cty.Type { + ret := s.Nested.impliedType() + for _ = range s.LabelNames { + ret = cty.Map(ret) + } + return ret +} + +func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockObjectSpec is a Spec that produces a cty object of the results of +// decoding all of the nested blocks of a given type, using a nested spec. +// +// One level of object structure is created for each of the given label names. +// There must be at least one given label name. +// +// This is similar to BlockMapSpec, but it permits the nested blocks to have +// different result types in situations where cty.DynamicPseudoType attributes +// are present. +type BlockObjectSpec struct { + TypeName string + LabelNames []string + Nested Spec +} + +func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node ("Nested" does not use the same body) +} + +// blockSpec implementation +func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...), + }, + } +} + +// blockSpec implementation +func (s *BlockObjectSpec) nestedSpec() Spec { + return s.Nested +} + +// specNeedingVariables implementation +func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + var ret []hcl.Traversal + + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + ret = append(ret, Variables(childBlock.Body, s.Nested)...) + } + + return ret +} + +func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + if s.Nested == nil { + panic("BlockObjectSpec with no Nested Spec") + } + + elems := map[string]interface{}{} + for _, childBlock := range content.Blocks { + if childBlock.Type != s.TypeName { + continue + } + + childLabels := labelsForBlock(childBlock) + val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false) + targetMap := elems + for _, key := range childBlock.Labels[:len(s.LabelNames)-1] { + if _, exists := targetMap[key]; !exists { + targetMap[key] = make(map[string]interface{}) + } + targetMap = targetMap[key].(map[string]interface{}) + } + + diags = append(diags, childDiags...) + + key := childBlock.Labels[len(s.LabelNames)-1] + if _, exists := targetMap[key]; exists { + labelsBuf := bytes.Buffer{} + for _, label := range childBlock.Labels { + fmt.Fprintf(&labelsBuf, " %q", label) + } + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block for %s%s was already defined. The %s labels must be unique.", + s.TypeName, labelsBuf.String(), s.TypeName, + ), + Subject: &childBlock.DefRange, + }) + continue + } + + targetMap[key] = val + } + + if len(elems) == 0 { + return cty.EmptyObjectVal, diags + } + + var ctyObj func(map[string]interface{}, int) cty.Value + ctyObj = func(raw map[string]interface{}, depth int) cty.Value { + vals := make(map[string]cty.Value, len(raw)) + if depth == 1 { + for k, v := range raw { + vals[k] = v.(cty.Value) + } + } else { + for k, v := range raw { + vals[k] = ctyObj(v.(map[string]interface{}), depth-1) + } + } + return cty.ObjectVal(vals) + } + + return ctyObj(elems, len(s.LabelNames)), diags +} + +func (s *BlockObjectSpec) impliedType() cty.Type { + // We can't predict our type, since we don't know how many blocks are + // present and what labels they have until we decode. + return cty.DynamicPseudoType +} + +func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We return the source range of the _first_ block of the given type, + // since they are not guaranteed to form a contiguous range. + + var childBlock *hcl.Block + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + + childBlock = candidate + break + } + + if childBlock == nil { + return content.MissingItemRange + } + + return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) +} + +// A BlockAttrsSpec is a Spec that interprets a single block as if it were +// a map of some element type. That is, each attribute within the block +// becomes a key in the resulting map and the attribute's value becomes the +// element value, after conversion to the given element type. The resulting +// value is a cty.Map of the given element type. +// +// This spec imposes a validation constraint that there be exactly one block +// of the given type name and that this block may contain only attributes. The +// block does not accept any labels. +// +// This is an alternative to an AttrSpec of a map type for situations where +// block syntax is desired. Note that block syntax does not permit dynamic +// keys, construction of the result via a "for" expression, etc. In most cases +// an AttrSpec is preferred if the desired result is a map whose keys are +// chosen by the user rather than by schema. +type BlockAttrsSpec struct { + TypeName string + ElementType cty.Type + Required bool +} + +func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +// blockSpec implementation +func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + return []hcl.BlockHeaderSchema{ + { + Type: s.TypeName, + LabelNames: nil, + }, + } +} + +// blockSpec implementation +func (s *BlockAttrsSpec) nestedSpec() Spec { + // This is an odd case: we aren't actually going to apply a nested spec + // in this case, since we're going to interpret the body directly as + // attributes, but we need to return something non-nil so that the + // decoder will recognize this as a block spec. We won't actually be + // using this for anything at decode time. + return noopSpec{} +} + +// specNeedingVariables implementation +func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal { + + block, _ := s.findBlock(content) + if block == nil { + return nil + } + + var vars []hcl.Traversal + + attrs, diags := block.Body.JustAttributes() + if diags.HasErrors() { + return nil + } + + for _, attr := range attrs { + vars = append(vars, attr.Expr.Variables()...) + } + + // We'll return the variables references in source order so that any + // error messages that result are also in source order. + sort.Slice(vars, func(i, j int) bool { + return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte + }) + + return vars +} + +func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + var diags hcl.Diagnostics + + block, other := s.findBlock(content) + if block == nil { + if s.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Missing %s block", s.TypeName), + Detail: fmt.Sprintf( + "A block of type %q is required here.", s.TypeName, + ), + Subject: &content.MissingItemRange, + }) + } + return cty.NullVal(cty.Map(s.ElementType)), diags + } + if other != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate %s block", s.TypeName), + Detail: fmt.Sprintf( + "Only one block of type %q is allowed. Previous definition was at %s.", + s.TypeName, block.DefRange.String(), + ), + Subject: &other.DefRange, + }) + } + + attrs, attrDiags := block.Body.JustAttributes() + diags = append(diags, attrDiags...) + + if len(attrs) == 0 { + return cty.MapValEmpty(s.ElementType), diags + } + + vals := make(map[string]cty.Value, len(attrs)) + for name, attr := range attrs { + attrVal, attrDiags := attr.Expr.Value(ctx) + diags = append(diags, attrDiags...) + + attrVal, err := convert.Convert(attrVal, s.ElementType) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid attribute value", + Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err), + Subject: attr.Expr.Range().Ptr(), + }) + attrVal = cty.UnknownVal(s.ElementType) + } + + vals[name] = attrVal + } + + return cty.MapVal(vals), diags +} + +func (s *BlockAttrsSpec) impliedType() cty.Type { + return cty.Map(s.ElementType) +} + +func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + block, _ := s.findBlock(content) + if block == nil { + return content.MissingItemRange + } + return block.DefRange +} + +func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) { + for _, candidate := range content.Blocks { + if candidate.Type != s.TypeName { + continue + } + if block != nil { + return block, candidate + } + block = candidate + } + + return block, nil +} + +// A BlockLabelSpec is a Spec that returns a cty.String representing the +// label of the block its given body belongs to, if indeed its given body +// belongs to a block. It is a programming error to use this in a non-block +// context, so this spec will panic in that case. +// +// This spec only works in the nested spec within a BlockSpec, BlockListSpec, +// BlockSetSpec or BlockMapSpec. +// +// The full set of label specs used against a particular block must have a +// consecutive set of indices starting at zero. The maximum index found +// defines how many labels the corresponding blocks must have in cty source. +type BlockLabelSpec struct { + Index int + Name string +} + +func (s *BlockLabelSpec) visitSameBodyChildren(cb visitFunc) { + // leaf node +} + +func (s *BlockLabelSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return cty.StringVal(blockLabels[s.Index].Value), nil +} + +func (s *BlockLabelSpec) impliedType() cty.Type { + return cty.String // labels are always strings +} + +func (s *BlockLabelSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + if s.Index >= len(blockLabels) { + panic("BlockListSpec used in non-block context") + } + + return blockLabels[s.Index].Range +} + +func findLabelSpecs(spec Spec) []string { + maxIdx := -1 + var names map[int]string + + var visit visitFunc + visit = func(s Spec) { + if ls, ok := s.(*BlockLabelSpec); ok { + if maxIdx < ls.Index { + maxIdx = ls.Index + } + if names == nil { + names = make(map[int]string) + } + names[ls.Index] = ls.Name + } + s.visitSameBodyChildren(visit) + } + + visit(spec) + + if maxIdx < 0 { + return nil // no labels at all + } + + ret := make([]string, maxIdx+1) + for i := range ret { + name := names[i] + if name == "" { + // Should never happen if the spec is conformant, since we require + // consecutive indices starting at zero. + name = fmt.Sprintf("missing%02d", i) + } + ret[i] = name + } + + return ret +} + +// DefaultSpec is a spec that wraps two specs, evaluating the primary first +// and then evaluating the default if the primary returns a null value. +// +// The two specifications must have the same implied result type for correct +// operation. If not, the result is undefined. +// +// Any requirements imposed by the "Default" spec apply even if "Primary" does +// not return null. For example, if the "Default" spec is for a required +// attribute then that attribute is always required, regardless of the result +// of the "Primary" spec. +// +// The "Default" spec must not describe a nested block, since otherwise the +// result of ChildBlockTypes would not be decidable without evaluation. If +// the default spec _does_ describe a nested block then the result is +// undefined. +type DefaultSpec struct { + Primary Spec + Default Spec +} + +func (s *DefaultSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Primary) + cb(s.Default) +} + +func (s *DefaultSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + val, diags := s.Primary.decode(content, blockLabels, ctx) + if val.IsNull() { + var moreDiags hcl.Diagnostics + val, moreDiags = s.Default.decode(content, blockLabels, ctx) + diags = append(diags, moreDiags...) + } + return val, diags +} + +func (s *DefaultSpec) impliedType() cty.Type { + return s.Primary.impliedType() +} + +// attrSpec implementation +func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema { + // We must pass through the union of both of our nested specs so that + // we'll have both values available in the result. + var ret []hcl.AttributeSchema + if as, ok := s.Primary.(attrSpec); ok { + ret = append(ret, as.attrSchemata()...) + } + if as, ok := s.Default.(attrSpec); ok { + ret = append(ret, as.attrSchemata()...) + } + return ret +} + +// blockSpec implementation +func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema { + // Only the primary spec may describe a block, since otherwise + // our nestedSpec method below can't know which to return. + if bs, ok := s.Primary.(blockSpec); ok { + return bs.blockHeaderSchemata() + } + return nil +} + +// blockSpec implementation +func (s *DefaultSpec) nestedSpec() Spec { + if bs, ok := s.Primary.(blockSpec); ok { + return bs.nestedSpec() + } + return nil +} + +func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We can't tell from here which of the two specs will ultimately be used + // in our result, so we'll just assume the first. This is usually the right + // choice because the default is often a literal spec that doesn't have a + // reasonable source range to return anyway. + return s.Primary.sourceRange(content, blockLabels) +} + +// TransformExprSpec is a spec that wraps another and then evaluates a given +// hcl.Expression on the result. +// +// The implied type of this spec is determined by evaluating the expression +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +type TransformExprSpec struct { + Wrapped Spec + Expr hcl.Expression + TransformCtx *hcl.EvalContext + VarName string +} + +func (s *TransformExprSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformExprSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: wrappedVal, + } + resultVal, resultDiags := s.Expr.Value(chiCtx) + diags = append(diags, resultDiags...) + return resultVal, diags +} + +func (s *TransformExprSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + chiCtx := s.TransformCtx.NewChild() + chiCtx.Variables = map[string]cty.Value{ + s.VarName: cty.UnknownVal(wrappedTy), + } + resultVal, _ := s.Expr.Value(chiCtx) + return resultVal.Type() +} + +func (s *TransformExprSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} + +// TransformFuncSpec is a spec that wraps another and then evaluates a given +// cty function with the result. The given function must expect exactly one +// argument, where the result of the wrapped spec will be passed. +// +// The implied type of this spec is determined by type-checking the function +// with an unknown value of the nested spec's implied type, which may cause +// the result to be imprecise. This spec should not be used in situations where +// precise result type information is needed. +// +// If the given function produces an error when run, this spec will produce +// a non-user-actionable diagnostic message. It's the caller's responsibility +// to ensure that the given function cannot fail for any non-error result +// of the wrapped spec. +type TransformFuncSpec struct { + Wrapped Spec + Func function.Function +} + +func (s *TransformFuncSpec) visitSameBodyChildren(cb visitFunc) { + cb(s.Wrapped) +} + +func (s *TransformFuncSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + wrappedVal, diags := s.Wrapped.decode(content, blockLabels, ctx) + if diags.HasErrors() { + // We won't try to run our function in this case, because it'll probably + // generate confusing additional errors that will distract from the + // root cause. + return cty.UnknownVal(s.impliedType()), diags + } + + resultVal, err := s.Func.Call([]cty.Value{wrappedVal}) + if err != nil { + // This is not a good example of a diagnostic because it is reporting + // a programming error in the calling application, rather than something + // an end-user could act on. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Transform function failed", + Detail: fmt.Sprintf("Decoder transform returned an error: %s", err), + Subject: s.sourceRange(content, blockLabels).Ptr(), + }) + return cty.UnknownVal(s.impliedType()), diags + } + + return resultVal, diags +} + +func (s *TransformFuncSpec) impliedType() cty.Type { + wrappedTy := s.Wrapped.impliedType() + resultTy, err := s.Func.ReturnType([]cty.Type{wrappedTy}) + if err != nil { + // Should never happen with a correctly-configured spec + return cty.DynamicPseudoType + } + + return resultTy +} + +func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // We'll just pass through our wrapped range here, even though that's + // not super-accurate, because there's nothing better to return. + return s.Wrapped.sourceRange(content, blockLabels) +} + +// noopSpec is a placeholder spec that does nothing, used in situations where +// a non-nil placeholder spec is required. It is not exported because there is +// no reason to use it directly; it is always an implementation detail only. +type noopSpec struct { +} + +func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + return cty.NullVal(cty.DynamicPseudoType), nil +} + +func (s noopSpec) impliedType() cty.Type { + return cty.DynamicPseudoType +} + +func (s noopSpec) visitSameBodyChildren(cb visitFunc) { + // nothing to do +} + +func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { + // No useful range for a noopSpec, and nobody should be calling this anyway. + return hcl.Range{ + Filename: "noopSpec", + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go new file mode 100644 index 00000000..7662516c --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go @@ -0,0 +1,36 @@ +package hcldec + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// Variables processes the given body with the given spec and returns a +// list of the variable traversals that would be required to decode +// the same pairing of body and spec. +// +// This can be used to conditionally populate the variables in the EvalContext +// passed to Decode, for applications where a static scope is insufficient. +// +// If the given body is not compliant with the given schema, the result may +// be incomplete, but that's assumed to be okay because the eventual call +// to Decode will produce error diagnostics anyway. +func Variables(body hcl.Body, spec Spec) []hcl.Traversal { + var vars []hcl.Traversal + schema := ImpliedSchema(spec) + content, _, _ := body.PartialContent(schema) + + if vs, ok := spec.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + + var visitFn visitFunc + visitFn = func(s Spec) { + if vs, ok := s.(specNeedingVariables); ok { + vars = append(vars, vs.variablesNeeded(content)...) + } + s.visitSameBodyChildren(visitFn) + } + spec.visitSameBodyChildren(visitFn) + + return vars +} diff --git a/vendor/github.com/hashicorp/hcl2/hcled/doc.go b/vendor/github.com/hashicorp/hcl2/hcled/doc.go new file mode 100644 index 00000000..1a801448 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcled/doc.go @@ -0,0 +1,4 @@ +// Package hcled provides functionality intended to help an application +// that embeds HCL to deliver relevant information to a text editor or IDE +// for navigating around and analyzing configuration files. +package hcled diff --git a/vendor/github.com/hashicorp/hcl2/hcled/navigation.go b/vendor/github.com/hashicorp/hcl2/hcled/navigation.go new file mode 100644 index 00000000..5d10cd86 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcled/navigation.go @@ -0,0 +1,34 @@ +package hcled + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type contextStringer interface { + ContextString(offset int) string +} + +// ContextString returns a string describing the context of the given byte +// offset, if available. An empty string is returned if no such information +// is available, or otherwise the returned string is in a form that depends +// on the language used to write the referenced file. +func ContextString(file *hcl.File, offset int) string { + if cser, ok := file.Nav.(contextStringer); ok { + return cser.ContextString(offset) + } + return "" +} + +type contextDefRanger interface { + ContextDefRange(offset int) hcl.Range +} + +func ContextDefRange(file *hcl.File, offset int) hcl.Range { + if cser, ok := file.Nav.(contextDefRanger); ok { + defRange := cser.ContextDefRange(offset) + if !defRange.Empty() { + return defRange + } + } + return file.Body.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/hcl2/hclparse/parser.go b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go new file mode 100644 index 00000000..6d47f126 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclparse/parser.go @@ -0,0 +1,123 @@ +package hclparse + +import ( + "fmt" + "io/ioutil" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/hcl2/hcl/json" +) + +// NOTE: This is the public interface for parsing. The actual parsers are +// in other packages alongside this one, with this package just wrapping them +// to provide a unified interface for the caller across all supported formats. + +// Parser is the main interface for parsing configuration files. As well as +// parsing files, a parser also retains a registry of all of the files it +// has parsed so that multiple attempts to parse the same file will return +// the same object and so the collected files can be used when printing +// diagnostics. +// +// Any diagnostics for parsing a file are only returned once on the first +// call to parse that file. Callers are expected to collect up diagnostics +// and present them together, so returning diagnostics for the same file +// multiple times would create a confusing result. +type Parser struct { + files map[string]*hcl.File +} + +// NewParser creates a new parser, ready to parse configuration files. +func NewParser() *Parser { + return &Parser{ + files: map[string]*hcl.File{}, + } +} + +// ParseHCL parses the given buffer (which is assumed to have been loaded from +// the given filename) as a native-syntax configuration file and returns the +// hcl.File object representing it. +func (p *Parser) ParseHCL(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := hclsyntax.ParseConfig(src, filename, hcl.Pos{Byte: 0, Line: 1, Column: 1}) + p.files[filename] = file + return file, diags +} + +// ParseHCLFile reads the given filename and parses it as a native-syntax HCL +// configuration file. An error diagnostic is returned if the given file +// cannot be read. +func (p *Parser) ParseHCLFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + src, err := ioutil.ReadFile(filename) + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The configuration file %q could not be read.", filename), + }, + } + } + + return p.ParseHCL(src, filename) +} + +// ParseJSON parses the given JSON buffer (which is assumed to have been loaded +// from the given filename) and returns the hcl.File object representing it. +func (p *Parser) ParseJSON(src []byte, filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.Parse(src, filename) + p.files[filename] = file + return file, diags +} + +// ParseJSONFile reads the given filename and parses it as JSON, similarly to +// ParseJSON. An error diagnostic is returned if the given file cannot be read. +func (p *Parser) ParseJSONFile(filename string) (*hcl.File, hcl.Diagnostics) { + if existing := p.files[filename]; existing != nil { + return existing, nil + } + + file, diags := json.ParseFile(filename) + p.files[filename] = file + return file, diags +} + +// AddFile allows a caller to record in a parser a file that was parsed some +// other way, thus allowing it to be included in the registry of sources. +func (p *Parser) AddFile(filename string, file *hcl.File) { + p.files[filename] = file +} + +// Sources returns a map from filenames to the raw source code that was +// read from them. This is intended to be used, for example, to print +// diagnostics with contextual information. +// +// The arrays underlying the returned slices should not be modified. +func (p *Parser) Sources() map[string][]byte { + ret := make(map[string][]byte) + for fn, f := range p.files { + ret[fn] = f.Bytes + } + return ret +} + +// Files returns a map from filenames to the File objects produced from them. +// This is intended to be used, for example, to print diagnostics with +// contextual information. +// +// The returned map and all of the objects it refers to directly or indirectly +// must not be modified. +func (p *Parser) Files() map[string]*hcl.File { + return p.files +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go new file mode 100644 index 00000000..09041652 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go @@ -0,0 +1,121 @@ +package hclwrite + +import ( + "bytes" + "io" +) + +type File struct { + inTree + + srcBytes []byte + body *node +} + +// NewEmptyFile constructs a new file with no content, ready to be mutated +// by other calls that append to its body. +func NewEmptyFile() *File { + f := &File{ + inTree: newInTree(), + } + body := newBody() + f.body = f.children.Append(body) + return f +} + +// Body returns the root body of the file, which contains the top-level +// attributes and blocks. +func (f *File) Body() *Body { + return f.body.content.(*Body) +} + +// WriteTo writes the tokens underlying the receiving file to the given writer. +// +// The tokens first have a simple formatting pass applied that adjusts only +// the spaces between them. +func (f *File) WriteTo(wr io.Writer) (int64, error) { + tokens := f.inTree.children.BuildTokens(nil) + format(tokens) + return tokens.WriteTo(wr) +} + +// Bytes returns a buffer containing the source code resulting from the +// tokens underlying the receiving file. If any updates have been made via +// the AST API, these will be reflected in the result. +func (f *File) Bytes() []byte { + buf := &bytes.Buffer{} + f.WriteTo(buf) + return buf.Bytes() +} + +type comments struct { + leafNode + + parent *node + tokens Tokens +} + +func newComments(tokens Tokens) *comments { + return &comments{ + tokens: tokens, + } +} + +func (c *comments) BuildTokens(to Tokens) Tokens { + return c.tokens.BuildTokens(to) +} + +type identifier struct { + leafNode + + parent *node + token *Token +} + +func newIdentifier(token *Token) *identifier { + return &identifier{ + token: token, + } +} + +func (i *identifier) BuildTokens(to Tokens) Tokens { + return append(to, i.token) +} + +func (i *identifier) hasName(name string) bool { + return name == string(i.token.Bytes) +} + +type number struct { + leafNode + + parent *node + token *Token +} + +func newNumber(token *Token) *number { + return &number{ + token: token, + } +} + +func (n *number) BuildTokens(to Tokens) Tokens { + return append(to, n.token) +} + +type quoted struct { + leafNode + + parent *node + tokens Tokens +} + +func newQuoted(tokens Tokens) *quoted { + return "ed{ + tokens: tokens, + } +} + +func (q *quoted) BuildTokens(to Tokens) Tokens { + return q.tokens.BuildTokens(to) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go new file mode 100644 index 00000000..975fa742 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go @@ -0,0 +1,48 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +type Attribute struct { + inTree + + leadComments *node + name *node + expr *node + lineComments *node +} + +func newAttribute() *Attribute { + return &Attribute{ + inTree: newInTree(), + } +} + +func (a *Attribute) init(name string, expr *Expression) { + expr.assertUnattached() + + nameTok := newIdentToken(name) + nameObj := newIdentifier(nameTok) + a.leadComments = a.children.Append(newComments(nil)) + a.name = a.children.Append(nameObj) + a.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenEqual, + Bytes: []byte{'='}, + }, + }) + a.expr = a.children.Append(expr) + a.expr.list = a.children + a.lineComments = a.children.Append(newComments(nil)) + a.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} + +func (a *Attribute) Expr() *Expression { + return a.expr.content.(*Expression) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go new file mode 100644 index 00000000..d5fd32bd --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go @@ -0,0 +1,74 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Block struct { + inTree + + leadComments *node + typeName *node + labels nodeSet + open *node + body *node + close *node +} + +func newBlock() *Block { + return &Block{ + inTree: newInTree(), + labels: newNodeSet(), + } +} + +// NewBlock constructs a new, empty block with the given type name and labels. +func NewBlock(typeName string, labels []string) *Block { + block := newBlock() + block.init(typeName, labels) + return block +} + +func (b *Block) init(typeName string, labels []string) { + nameTok := newIdentToken(typeName) + nameObj := newIdentifier(nameTok) + b.leadComments = b.children.Append(newComments(nil)) + b.typeName = b.children.Append(nameObj) + for _, label := range labels { + labelToks := TokensForValue(cty.StringVal(label)) + labelObj := newQuoted(labelToks) + labelNode := b.children.Append(labelObj) + b.labels.Add(labelNode) + } + b.open = b.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenOBrace, + Bytes: []byte{'{'}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) + body := newBody() // initially totally empty; caller can append to it subsequently + b.body = b.children.Append(body) + b.close = b.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenCBrace, + Bytes: []byte{'}'}, + }, + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} + +// Body returns the body that represents the content of the receiving block. +// +// Appending to or otherwise modifying this body will make changes to the +// tokens that are generated between the blocks open and close braces. +func (b *Block) Body() *Body { + return b.body.content.(*Body) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go new file mode 100644 index 00000000..cf69fee2 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go @@ -0,0 +1,153 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Body struct { + inTree + + items nodeSet +} + +func newBody() *Body { + return &Body{ + inTree: newInTree(), + items: newNodeSet(), + } +} + +func (b *Body) appendItem(c nodeContent) *node { + nn := b.children.Append(c) + b.items.Add(nn) + return nn +} + +func (b *Body) appendItemNode(nn *node) *node { + nn.assertUnattached() + b.children.AppendNode(nn) + b.items.Add(nn) + return nn +} + +// Clear removes all of the items from the body, making it empty. +func (b *Body) Clear() { + b.children.Clear() +} + +func (b *Body) AppendUnstructuredTokens(ts Tokens) { + b.inTree.children.Append(ts) +} + +// Attributes returns a new map of all of the attributes in the body, with +// the attribute names as the keys. +func (b *Body) Attributes() map[string]*Attribute { + ret := make(map[string]*Attribute) + for n := range b.items { + if attr, isAttr := n.content.(*Attribute); isAttr { + nameObj := attr.name.content.(*identifier) + name := string(nameObj.token.Bytes) + ret[name] = attr + } + } + return ret +} + +// Blocks returns a new slice of all the blocks in the body. +func (b *Body) Blocks() []*Block { + ret := make([]*Block, 0, len(b.items)) + for n := range b.items { + if block, isBlock := n.content.(*Block); isBlock { + ret = append(ret, block) + } + } + return ret +} + +// GetAttribute returns the attribute from the body that has the given name, +// or returns nil if there is currently no matching attribute. +func (b *Body) GetAttribute(name string) *Attribute { + for n := range b.items { + if attr, isAttr := n.content.(*Attribute); isAttr { + nameObj := attr.name.content.(*identifier) + if nameObj.hasName(name) { + // We've found it! + return attr + } + } + } + + return nil +} + +// SetAttributeValue either replaces the expression of an existing attribute +// of the given name or adds a new attribute definition to the end of the block. +// +// The value is given as a cty.Value, and must therefore be a literal. To set +// a variable reference or other traversal, use SetAttributeTraversal. +// +// The return value is the attribute that was either modified in-place or +// created. +func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute { + attr := b.GetAttribute(name) + expr := NewExpressionLiteral(val) + if attr != nil { + attr.expr = attr.expr.ReplaceWith(expr) + } else { + attr := newAttribute() + attr.init(name, expr) + b.appendItem(attr) + } + return attr +} + +// SetAttributeTraversal either replaces the expression of an existing attribute +// of the given name or adds a new attribute definition to the end of the body. +// +// The new expression is given as a hcl.Traversal, which must be an absolute +// traversal. To set a literal value, use SetAttributeValue. +// +// The return value is the attribute that was either modified in-place or +// created. +func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute { + attr := b.GetAttribute(name) + expr := NewExpressionAbsTraversal(traversal) + if attr != nil { + attr.expr = attr.expr.ReplaceWith(expr) + } else { + attr := newAttribute() + attr.init(name, expr) + b.appendItem(attr) + } + return attr +} + +// AppendBlock appends an existing block (which must not be already attached +// to a body) to the end of the receiving body. +func (b *Body) AppendBlock(block *Block) *Block { + b.appendItem(block) + return block +} + +// AppendNewBlock appends a new nested block to the end of the receiving body +// with the given type name and labels. +func (b *Body) AppendNewBlock(typeName string, labels []string) *Block { + block := newBlock() + block.init(typeName, labels) + b.appendItem(block) + return block +} + +// AppendNewline appends a newline token to th end of the receiving body, +// which generally serves as a separator between different sets of body +// contents. +func (b *Body) AppendNewline() { + b.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenNewline, + Bytes: []byte{'\n'}, + }, + }) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go new file mode 100644 index 00000000..62d89fbe --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go @@ -0,0 +1,201 @@ +package hclwrite + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +type Expression struct { + inTree + + absTraversals nodeSet +} + +func newExpression() *Expression { + return &Expression{ + inTree: newInTree(), + absTraversals: newNodeSet(), + } +} + +// NewExpressionLiteral constructs an an expression that represents the given +// literal value. +// +// Since an unknown value cannot be represented in source code, this function +// will panic if the given value is unknown or contains a nested unknown value. +// Use val.IsWhollyKnown before calling to be sure. +// +// HCL native syntax does not directly represent lists, maps, and sets, and +// instead relies on the automatic conversions to those collection types from +// either list or tuple constructor syntax. Therefore converting collection +// values to source code and re-reading them will lose type information, and +// the reader must provide a suitable type at decode time to recover the +// original value. +func NewExpressionLiteral(val cty.Value) *Expression { + toks := TokensForValue(val) + expr := newExpression() + expr.children.AppendUnstructuredTokens(toks) + return expr +} + +// NewExpressionAbsTraversal constructs an expression that represents the +// given traversal, which must be absolute or this function will panic. +func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression { + if traversal.IsRelative() { + panic("can't construct expression from relative traversal") + } + + physT := newTraversal() + rootName := traversal.RootName() + steps := traversal[1:] + + { + tn := newTraverseName() + tn.name = tn.children.Append(newIdentifier(&Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(rootName), + })) + physT.steps.Add(physT.children.Append(tn)) + } + + for _, step := range steps { + switch ts := step.(type) { + case hcl.TraverseAttr: + tn := newTraverseName() + tn.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenDot, + Bytes: []byte{'.'}, + }, + }) + tn.name = tn.children.Append(newIdentifier(&Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + })) + physT.steps.Add(physT.children.Append(tn)) + case hcl.TraverseIndex: + ti := newTraverseIndex() + ti.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }, + }) + indexExpr := NewExpressionLiteral(ts.Key) + ti.key = ti.children.Append(indexExpr) + ti.children.AppendUnstructuredTokens(Tokens{ + { + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }, + }) + physT.steps.Add(physT.children.Append(ti)) + } + } + + expr := newExpression() + expr.absTraversals.Add(expr.children.Append(physT)) + return expr +} + +// Variables returns the absolute traversals that exist within the receiving +// expression. +func (e *Expression) Variables() []*Traversal { + nodes := e.absTraversals.List() + ret := make([]*Traversal, len(nodes)) + for i, node := range nodes { + ret[i] = node.content.(*Traversal) + } + return ret +} + +// RenameVariablePrefix examines each of the absolute traversals in the +// receiving expression to see if they have the given sequence of names as +// a prefix prefix. If so, they are updated in place to have the given +// replacement names instead of that prefix. +// +// This can be used to implement symbol renaming. The calling application can +// visit all relevant expressions in its input and apply the same renaming +// to implement a global symbol rename. +// +// The search and replacement traversals must be the same length, or this +// method will panic. Only attribute access operations can be matched and +// replaced. Index steps never match the prefix. +func (e *Expression) RenameVariablePrefix(search, replacement []string) { + if len(search) != len(replacement) { + panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement))) + } +Traversals: + for node := range e.absTraversals { + traversal := node.content.(*Traversal) + if len(traversal.steps) < len(search) { + // If it's shorter then it can't have our prefix + continue + } + + stepNodes := traversal.steps.List() + for i, name := range search { + step, isName := stepNodes[i].content.(*TraverseName) + if !isName { + continue Traversals // only name nodes can match + } + foundNameBytes := step.name.content.(*identifier).token.Bytes + if len(foundNameBytes) != len(name) { + continue Traversals + } + if string(foundNameBytes) != name { + continue Traversals + } + } + + // If we get here then the prefix matched, so now we'll swap in + // the replacement strings. + for i, name := range replacement { + step := stepNodes[i].content.(*TraverseName) + token := step.name.content.(*identifier).token + token.Bytes = []byte(name) + } + } +} + +// Traversal represents a sequence of variable, attribute, and/or index +// operations. +type Traversal struct { + inTree + + steps nodeSet +} + +func newTraversal() *Traversal { + return &Traversal{ + inTree: newInTree(), + steps: newNodeSet(), + } +} + +type TraverseName struct { + inTree + + name *node +} + +func newTraverseName() *TraverseName { + return &TraverseName{ + inTree: newInTree(), + } +} + +type TraverseIndex struct { + inTree + + key *node +} + +func newTraverseIndex() *TraverseIndex { + return &TraverseIndex{ + inTree: newInTree(), + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go b/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go new file mode 100644 index 00000000..56d5b775 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go @@ -0,0 +1,11 @@ +// Package hclwrite deals with the problem of generating HCL configuration +// and of making specific surgical changes to existing HCL configurations. +// +// It operates at a different level of abstraction than the main HCL parser +// and AST, since details such as the placement of comments and newlines +// are preserved when unchanged. +// +// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes +// to be read out, created and inserted, etc. Nodes represent syntax constructs +// rather than semantic concepts. +package hclwrite diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go new file mode 100644 index 00000000..f20ae23a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go @@ -0,0 +1,492 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'}) + +// placeholder token used when we don't have a token but we don't want +// to pass a real "nil" and complicate things with nil pointer checks +var nilToken = &Token{ + Type: hclsyntax.TokenNil, + Bytes: []byte{}, + SpacesBefore: 0, +} + +// format rewrites tokens within the given sequence, in-place, to adjust the +// whitespace around their content to achieve canonical formatting. +func format(tokens Tokens) { + // Formatting is a multi-pass process. More details on the passes below, + // but this is the overview: + // - adjust the leading space on each line to create appropriate + // indentation + // - adjust spaces between tokens in a single cell using a set of rules + // - adjust the leading space in the "assign" and "comment" cells on each + // line to vertically align with neighboring lines. + // All of these steps operate in-place on the given tokens, so a caller + // may collect a flat sequence of all of the tokens underlying an AST + // and pass it here and we will then indirectly modify the AST itself. + // Formatting must change only whitespace. Specifically, that means + // changing the SpacesBefore attribute on a token while leaving the + // other token attributes unchanged. + + lines := linesForFormat(tokens) + formatIndent(lines) + formatSpaces(lines) + formatCells(lines) +} + +func formatIndent(lines []formatLine) { + // Our methodology for indents is to take the input one line at a time + // and count the bracketing delimiters on each line. If a line has a net + // increase in open brackets, we increase the indent level by one and + // remember how many new openers we had. If the line has a net _decrease_, + // we'll compare it to the most recent number of openers and decrease the + // dedent level by one each time we pass an indent level remembered + // earlier. + // The "indent stack" used here allows for us to recognize degenerate + // input where brackets are not symmetrical within lines and avoid + // pushing things too far left or right, creating confusion. + + // We'll start our indent stack at a reasonable capacity to minimize the + // chance of us needing to grow it; 10 here means 10 levels of indent, + // which should be more than enough for reasonable HCL uses. + indents := make([]int, 0, 10) + + inHeredoc := false + for i := range lines { + line := &lines[i] + if len(line.lead) == 0 { + continue + } + + if inHeredoc { + for _, token := range line.lead { + if token.Type == hclsyntax.TokenCHeredoc { + inHeredoc = false + } + } + continue // don't touch indentation inside heredocs + } + + if line.lead[0].Type == hclsyntax.TokenNewline { + // Never place spaces before a newline + line.lead[0].SpacesBefore = 0 + continue + } + + netBrackets := 0 + for _, token := range line.lead { + netBrackets += tokenBracketChange(token) + if token.Type == hclsyntax.TokenOHeredoc { + inHeredoc = true + } + } + for _, token := range line.assign { + netBrackets += tokenBracketChange(token) + } + + switch { + case netBrackets > 0: + line.lead[0].SpacesBefore = 2 * len(indents) + indents = append(indents, netBrackets) + case netBrackets < 0: + closed := -netBrackets + for closed > 0 && len(indents) > 0 { + switch { + + case closed > indents[len(indents)-1]: + closed -= indents[len(indents)-1] + indents = indents[:len(indents)-1] + + case closed < indents[len(indents)-1]: + indents[len(indents)-1] -= closed + closed = 0 + + default: + indents = indents[:len(indents)-1] + closed = 0 + } + } + line.lead[0].SpacesBefore = 2 * len(indents) + default: + line.lead[0].SpacesBefore = 2 * len(indents) + } + } +} + +func formatSpaces(lines []formatLine) { + for _, line := range lines { + for i, token := range line.lead { + var before, after *Token + if i > 0 { + before = line.lead[i-1] + } else { + before = nilToken + } + if i < (len(line.lead) - 1) { + after = line.lead[i+1] + } else { + after = nilToken + } + if spaceAfterToken(token, before, after) { + after.SpacesBefore = 1 + } else { + after.SpacesBefore = 0 + } + } + for i, token := range line.assign { + if i == 0 { + // first token in "assign" always has one space before to + // separate the equals sign from what it's assigning. + token.SpacesBefore = 1 + } + + var before, after *Token + if i > 0 { + before = line.assign[i-1] + } else { + before = nilToken + } + if i < (len(line.assign) - 1) { + after = line.assign[i+1] + } else { + after = nilToken + } + if spaceAfterToken(token, before, after) { + after.SpacesBefore = 1 + } else { + after.SpacesBefore = 0 + } + } + + } +} + +func formatCells(lines []formatLine) { + + chainStart := -1 + maxColumns := 0 + + // We'll deal with the "assign" cell first, since moving that will + // also impact the "comment" cell. + closeAssignChain := func(i int) { + for _, chainLine := range lines[chainStart:i] { + columns := chainLine.lead.Columns() + spaces := (maxColumns - columns) + 1 + chainLine.assign[0].SpacesBefore = spaces + } + chainStart = -1 + maxColumns = 0 + } + for i, line := range lines { + if line.assign == nil { + if chainStart != -1 { + closeAssignChain(i) + } + } else { + if chainStart == -1 { + chainStart = i + } + columns := line.lead.Columns() + if columns > maxColumns { + maxColumns = columns + } + } + } + if chainStart != -1 { + closeAssignChain(len(lines)) + } + + // Now we'll deal with the comments + closeCommentChain := func(i int) { + for _, chainLine := range lines[chainStart:i] { + columns := chainLine.lead.Columns() + chainLine.assign.Columns() + spaces := (maxColumns - columns) + 1 + chainLine.comment[0].SpacesBefore = spaces + } + chainStart = -1 + maxColumns = 0 + } + for i, line := range lines { + if line.comment == nil { + if chainStart != -1 { + closeCommentChain(i) + } + } else { + if chainStart == -1 { + chainStart = i + } + columns := line.lead.Columns() + line.assign.Columns() + if columns > maxColumns { + maxColumns = columns + } + } + } + if chainStart != -1 { + closeCommentChain(len(lines)) + } + +} + +// spaceAfterToken decides whether a particular subject token should have a +// space after it when surrounded by the given before and after tokens. +// "before" can be TokenNil, if the subject token is at the start of a sequence. +func spaceAfterToken(subject, before, after *Token) bool { + switch { + + case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil: + // Never add spaces before a newline + return false + + case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen: + // Don't split a function name from open paren in a call + return false + + case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot: + // Don't use spaces around attribute access dots + return false + + case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis: + // No space right before a comma or ... in an argument list + return false + + case subject.Type == hclsyntax.TokenComma: + // Always a space after a comma + return true + + case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc: + // No extra spaces within templates + return false + + case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent: + // This is a special case for inside for expressions where a user + // might want to use a literal tuple constructor: + // [for x in [foo]: x] + // ... in that case, we would normally produce in[foo] thinking that + // in is a reference, but we'll recognize it as a keyword here instead + // to make the result less confusing. + return true + + case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0): + return false + + case subject.Type == hclsyntax.TokenMinus: + // Since a minus can either be subtraction or negation, and the latter + // should _not_ have a space after it, we need to use some heuristics + // to decide which case this is. + // We guess that we have a negation if the token before doesn't look + // like it could be the end of an expression. + + switch before.Type { + + case hclsyntax.TokenNil: + // Minus at the start of input must be a negation + return false + + case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion: + // Minus immediately after an opening bracket or separator must be a negation. + return false + + case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus: + // Minus immediately after another arithmetic operator must be negation. + return false + + case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq: + // Minus immediately after another comparison operator must be negation. + return false + + case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang: + // Minus immediately after logical operator doesn't make sense but probably intended as negation. + return false + + default: + return true + } + + case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace: + // Unlike other bracket types, braces have spaces on both sides of them, + // both in single-line nested blocks foo { bar = baz } and in object + // constructor expressions foo = { bar = baz }. + if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace { + // An open brace followed by a close brace is an exception, however. + // e.g. foo {} rather than foo { } + return false + } + return true + + // In the unlikely event that an interpolation expression is just + // a single object constructor, we'll put a space between the ${ and + // the following { to make this more obvious, and then the same + // thing for the two braces at the end. + case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace: + return true + case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd: + return true + + // Don't add spaces between interpolated items + case subject.Type == hclsyntax.TokenTemplateSeqEnd && after.Type == hclsyntax.TokenTemplateInterp: + return false + + case tokenBracketChange(subject) > 0: + // No spaces after open brackets + return false + + case tokenBracketChange(after) < 0: + // No spaces before close brackets + return false + + default: + // Most tokens are space-separated + return true + + } +} + +func linesForFormat(tokens Tokens) []formatLine { + if len(tokens) == 0 { + return make([]formatLine, 0) + } + + // first we'll count our lines, so we can allocate the array for them in + // a single block. (We want to minimize memory pressure in this codepath, + // so it can be run somewhat-frequently by editor integrations.) + lineCount := 1 // if there are zero newlines then there is one line + for _, tok := range tokens { + if tokenIsNewline(tok) { + lineCount++ + } + } + + // To start, we'll just put everything in the "lead" cell on each line, + // and then do another pass over the lines afterwards to adjust. + lines := make([]formatLine, lineCount) + li := 0 + lineStart := 0 + for i, tok := range tokens { + if tok.Type == hclsyntax.TokenEOF { + // The EOF token doesn't belong to any line, and terminates the + // token sequence. + lines[li].lead = tokens[lineStart:i] + break + } + + if tokenIsNewline(tok) { + lines[li].lead = tokens[lineStart : i+1] + lineStart = i + 1 + li++ + } + } + + // If a set of tokens doesn't end in TokenEOF (e.g. because it's a + // fragment of tokens from the middle of a file) then we might fall + // out here with a line still pending. + if lineStart < len(tokens) { + lines[li].lead = tokens[lineStart:] + if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF { + lines[li].lead = lines[li].lead[:len(lines[li].lead)-1] + } + } + + // Now we'll pick off any trailing comments and attribute assignments + // to shuffle off into the "comment" and "assign" cells. + inHeredoc := false + for i := range lines { + line := &lines[i] + if len(line.lead) == 0 { + // if the line is empty then there's nothing for us to do + // (this should happen only for the final line, because all other + // lines would have a newline token of some kind) + continue + } + + if inHeredoc { + for _, tok := range line.lead { + if tok.Type == hclsyntax.TokenCHeredoc { + inHeredoc = false + break + } + } + // Inside a heredoc everything is "lead", even if there's a + // template interpolation embedded in there that might otherwise + // confuse our logic below. + continue + } + + for _, tok := range line.lead { + if tok.Type == hclsyntax.TokenOHeredoc { + inHeredoc = true + break + } + } + + if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment { + line.comment = line.lead[len(line.lead)-1:] + line.lead = line.lead[:len(line.lead)-1] + } + + for i, tok := range line.lead { + if i > 0 && tok.Type == hclsyntax.TokenEqual { + // We only move the tokens into "assign" if the RHS seems to + // be a whole expression, which we determine by counting + // brackets. If there's a net positive number of brackets + // then that suggests we're introducing a multi-line expression. + netBrackets := 0 + for _, token := range line.lead[i:] { + netBrackets += tokenBracketChange(token) + } + + if netBrackets == 0 { + line.assign = line.lead[i:] + line.lead = line.lead[:i] + } + break + } + } + } + + return lines +} + +func tokenIsNewline(tok *Token) bool { + if tok.Type == hclsyntax.TokenNewline { + return true + } else if tok.Type == hclsyntax.TokenComment { + // Single line tokens (# and //) consume their terminating newline, + // so we need to treat them as newline tokens as well. + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + return true + } + } + return false +} + +func tokenBracketChange(tok *Token) int { + switch tok.Type { + case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp: + return 1 + case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd: + return -1 + default: + return 0 + } +} + +// formatLine represents a single line of source code for formatting purposes, +// splitting its tokens into up to three "cells": +// +// lead: always present, representing everything up to one of the others +// assign: if line contains an attribute assignment, represents the tokens +// starting at (and including) the equals symbol +// comment: if line contains any non-comment tokens and ends with a +// single-line comment token, represents the comment. +// +// When formatting, the leading spaces of the first tokens in each of these +// cells is adjusted to align vertically their occurences on consecutive +// rows. +type formatLine struct { + lead Tokens + assign Tokens + comment Tokens +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go new file mode 100644 index 00000000..d249cfdf --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go @@ -0,0 +1,250 @@ +package hclwrite + +import ( + "fmt" + "unicode" + "unicode/utf8" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// TokensForValue returns a sequence of tokens that represents the given +// constant value. +// +// This function only supports types that are used by HCL. In particular, it +// does not support capsule types and will panic if given one. +// +// It is not possible to express an unknown value in source code, so this +// function will panic if the given value is unknown or contains any unknown +// values. A caller can call the value's IsWhollyKnown method to verify that +// no unknown values are present before calling TokensForValue. +func TokensForValue(val cty.Value) Tokens { + toks := appendTokensForValue(val, nil) + format(toks) // fiddle with the SpacesBefore field to get canonical spacing + return toks +} + +// TokensForTraversal returns a sequence of tokens that represents the given +// traversal. +// +// If the traversal is absolute then the result is a self-contained, valid +// reference expression. If the traversal is relative then the returned tokens +// could be appended to some other expression tokens to traverse into the +// represented expression. +func TokensForTraversal(traversal hcl.Traversal) Tokens { + toks := appendTokensForTraversal(traversal, nil) + format(toks) // fiddle with the SpacesBefore field to get canonical spacing + return toks +} + +func appendTokensForValue(val cty.Value, toks Tokens) Tokens { + switch { + + case !val.IsKnown(): + panic("cannot produce tokens for unknown value") + + case val.IsNull(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(`null`), + }) + + case val.Type() == cty.Bool: + var src []byte + if val.True() { + src = []byte(`true`) + } else { + src = []byte(`false`) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: src, + }) + + case val.Type() == cty.Number: + bf := val.AsBigFloat() + srcStr := bf.Text('f', -1) + toks = append(toks, &Token{ + Type: hclsyntax.TokenNumberLit, + Bytes: []byte(srcStr), + }) + + case val.Type() == cty.String: + // TODO: If it's a multi-line string ending in a newline, format + // it as a HEREDOC instead. + src := escapeQuotedStringLit(val.AsString()) + toks = append(toks, &Token{ + Type: hclsyntax.TokenOQuote, + Bytes: []byte{'"'}, + }) + if len(src) > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenQuotedLit, + Bytes: src, + }) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenCQuote, + Bytes: []byte{'"'}, + }) + + case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }) + + i := 0 + for it := val.ElementIterator(); it.Next(); { + if i > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }) + } + _, eVal := it.Element() + toks = appendTokensForValue(eVal, toks) + i++ + } + + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + + case val.Type().IsMapType() || val.Type().IsObjectType(): + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrace, + Bytes: []byte{'{'}, + }) + + i := 0 + for it := val.ElementIterator(); it.Next(); { + if i > 0 { + toks = append(toks, &Token{ + Type: hclsyntax.TokenComma, + Bytes: []byte{','}, + }) + } + eKey, eVal := it.Element() + if hclsyntax.ValidIdentifier(eKey.AsString()) { + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(eKey.AsString()), + }) + } else { + toks = appendTokensForValue(eKey, toks) + } + toks = append(toks, &Token{ + Type: hclsyntax.TokenEqual, + Bytes: []byte{'='}, + }) + toks = appendTokensForValue(eVal, toks) + i++ + } + + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrace, + Bytes: []byte{'}'}, + }) + + default: + panic(fmt.Sprintf("cannot produce tokens for %#v", val)) + } + + return toks +} + +func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens { + for _, step := range traversal { + appendTokensForTraversalStep(step, toks) + } + return toks +} + +func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) { + switch ts := step.(type) { + case hcl.TraverseRoot: + toks = append(toks, &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + }) + case hcl.TraverseAttr: + toks = append( + toks, + &Token{ + Type: hclsyntax.TokenDot, + Bytes: []byte{'.'}, + }, + &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(ts.Name), + }, + ) + case hcl.TraverseIndex: + toks = append(toks, &Token{ + Type: hclsyntax.TokenOBrack, + Bytes: []byte{'['}, + }) + appendTokensForValue(ts.Key, toks) + toks = append(toks, &Token{ + Type: hclsyntax.TokenCBrack, + Bytes: []byte{']'}, + }) + default: + panic(fmt.Sprintf("unsupported traversal step type %T", step)) + } +} + +func escapeQuotedStringLit(s string) []byte { + if len(s) == 0 { + return nil + } + buf := make([]byte, 0, len(s)) + for i, r := range s { + switch r { + case '\n': + buf = append(buf, '\\', 'n') + case '\r': + buf = append(buf, '\\', 'r') + case '\t': + buf = append(buf, '\\', 't') + case '"': + buf = append(buf, '\\', '"') + case '\\': + buf = append(buf, '\\', '\\') + case '$', '%': + buf = appendRune(buf, r) + remain := s[i+1:] + if len(remain) > 0 && remain[0] == '{' { + // Double up our template introducer symbol to escape it. + buf = appendRune(buf, r) + } + default: + if !unicode.IsPrint(r) { + var fmted string + if r < 65536 { + fmted = fmt.Sprintf("\\u%04x", r) + } else { + fmted = fmt.Sprintf("\\U%08x", r) + } + buf = append(buf, fmted...) + } else { + buf = appendRune(buf, r) + } + } + } + return buf +} + +func appendRune(b []byte, r rune) []byte { + l := utf8.RuneLen(r) + for i := 0; i < l; i++ { + b = append(b, 0) // make room at the end of our buffer + } + ch := b[len(b)-l:] + utf8.EncodeRune(ch, r) + return b +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go new file mode 100644 index 00000000..a13c0ec4 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go @@ -0,0 +1,23 @@ +package hclwrite + +import ( + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +type nativeNodeSorter struct { + Nodes []hclsyntax.Node +} + +func (s nativeNodeSorter) Len() int { + return len(s.Nodes) +} + +func (s nativeNodeSorter) Less(i, j int) bool { + rangeI := s.Nodes[i].Range() + rangeJ := s.Nodes[j].Range() + return rangeI.Start.Byte < rangeJ.Start.Byte +} + +func (s nativeNodeSorter) Swap(i, j int) { + s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i] +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl2/hclwrite/node.go new file mode 100644 index 00000000..71fd00fa --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/node.go @@ -0,0 +1,236 @@ +package hclwrite + +import ( + "fmt" + + "github.com/google/go-cmp/cmp" +) + +// node represents a node in the AST. +type node struct { + content nodeContent + + list *nodes + before, after *node +} + +func newNode(c nodeContent) *node { + return &node{ + content: c, + } +} + +func (n *node) Equal(other *node) bool { + return cmp.Equal(n.content, other.content) +} + +func (n *node) BuildTokens(to Tokens) Tokens { + return n.content.BuildTokens(to) +} + +// Detach removes the receiver from the list it currently belongs to. If the +// node is not currently in a list, this is a no-op. +func (n *node) Detach() { + if n.list == nil { + return + } + if n.before != nil { + n.before.after = n.after + } + if n.after != nil { + n.after.before = n.before + } + if n.list.first == n { + n.list.first = n.after + } + if n.list.last == n { + n.list.last = n.before + } + n.list = nil + n.before = nil + n.after = nil +} + +// ReplaceWith removes the receiver from the list it currently belongs to and +// inserts a new node with the given content in its place. If the node is not +// currently in a list, this function will panic. +// +// The return value is the newly-constructed node, containing the given content. +// After this function returns, the reciever is no longer attached to a list. +func (n *node) ReplaceWith(c nodeContent) *node { + if n.list == nil { + panic("can't replace node that is not in a list") + } + + before := n.before + after := n.after + list := n.list + n.before, n.after, n.list = nil, nil, nil + + nn := newNode(c) + nn.before = before + nn.after = after + nn.list = list + if before != nil { + before.after = nn + } + if after != nil { + after.before = nn + } + return nn +} + +func (n *node) assertUnattached() { + if n.list != nil { + panic(fmt.Sprintf("attempt to attach already-attached node %#v", n)) + } +} + +// nodeContent is the interface type implemented by all AST content types. +type nodeContent interface { + walkChildNodes(w internalWalkFunc) + BuildTokens(to Tokens) Tokens +} + +// nodes is a list of nodes. +type nodes struct { + first, last *node +} + +func (ns *nodes) BuildTokens(to Tokens) Tokens { + for n := ns.first; n != nil; n = n.after { + to = n.BuildTokens(to) + } + return to +} + +func (ns *nodes) Clear() { + ns.first = nil + ns.last = nil +} + +func (ns *nodes) Append(c nodeContent) *node { + n := &node{ + content: c, + } + ns.AppendNode(n) + n.list = ns + return n +} + +func (ns *nodes) AppendNode(n *node) { + if ns.last != nil { + n.before = ns.last + ns.last.after = n + } + n.list = ns + ns.last = n + if ns.first == nil { + ns.first = n + } +} + +func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node { + if len(tokens) == 0 { + return nil + } + n := newNode(tokens) + ns.AppendNode(n) + n.list = ns + return n +} + +// nodeSet is an unordered set of nodes. It is used to describe a set of nodes +// that all belong to the same list that have some role or characteristic +// in common. +type nodeSet map[*node]struct{} + +func newNodeSet() nodeSet { + return make(nodeSet) +} + +func (ns nodeSet) Has(n *node) bool { + if ns == nil { + return false + } + _, exists := ns[n] + return exists +} + +func (ns nodeSet) Add(n *node) { + ns[n] = struct{}{} +} + +func (ns nodeSet) Remove(n *node) { + delete(ns, n) +} + +func (ns nodeSet) List() []*node { + if len(ns) == 0 { + return nil + } + + ret := make([]*node, 0, len(ns)) + + // Determine which list we are working with. We assume here that all of + // the nodes belong to the same list, since that is part of the contract + // for nodeSet. + var list *nodes + for n := range ns { + list = n.list + break + } + + // We recover the order by iterating over the whole list. This is not + // the most efficient way to do it, but our node lists should always be + // small so not worth making things more complex. + for n := list.first; n != nil; n = n.after { + if ns.Has(n) { + ret = append(ret, n) + } + } + return ret +} + +type internalWalkFunc func(*node) + +// inTree can be embedded into a content struct that has child nodes to get +// a standard implementation of the NodeContent interface and a record of +// a potential parent node. +type inTree struct { + parent *node + children *nodes +} + +func newInTree() inTree { + return inTree{ + children: &nodes{}, + } +} + +func (it *inTree) assertUnattached() { + if it.parent != nil { + panic(fmt.Sprintf("node is already attached to %T", it.parent.content)) + } +} + +func (it *inTree) walkChildNodes(w internalWalkFunc) { + for n := it.children.first; n != nil; n = n.after { + w(n) + } +} + +func (it *inTree) BuildTokens(to Tokens) Tokens { + for n := it.children.first; n != nil; n = n.after { + to = n.BuildTokens(to) + } + return to +} + +// leafNode can be embedded into a content struct to give it a do-nothing +// implementation of walkChildNodes +type leafNode struct { +} + +func (n *leafNode) walkChildNodes(w internalWalkFunc) { +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go new file mode 100644 index 00000000..1876818f --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go @@ -0,0 +1,594 @@ +package hclwrite + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// Our "parser" here is actually not doing any parsing of its own. Instead, +// it leans on the native parser in hclsyntax, and then uses the source ranges +// from the AST to partition the raw token sequence to match the raw tokens +// up to AST nodes. +// +// This strategy feels somewhat counter-intuitive, since most of the work the +// parser does is thrown away here, but this strategy is chosen because the +// normal parsing work done by hclsyntax is considered to be the "main case", +// while modifying and re-printing source is more of an edge case, used only +// in ancillary tools, and so it's good to keep all the main parsing logic +// with the main case but keep all of the extra complexity of token wrangling +// out of the main parser, which is already rather complex just serving the +// use-cases it already serves. +// +// If the parsing step produces any errors, the returned File is nil because +// we can't reliably extract tokens from the partial AST produced by an +// erroneous parse. +func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { + file, diags := hclsyntax.ParseConfig(src, filename, start) + if diags.HasErrors() { + return nil, diags + } + + // To do our work here, we use the "native" tokens (those from hclsyntax) + // to match against source ranges in the AST, but ultimately produce + // slices from our sequence of "writer" tokens, which contain only + // *relative* position information that is more appropriate for + // transformation/writing use-cases. + nativeTokens, diags := hclsyntax.LexConfig(src, filename, start) + if diags.HasErrors() { + // should never happen, since we would've caught these diags in + // the first call above. + return nil, diags + } + writerTokens := writerTokens(nativeTokens) + + from := inputTokens{ + nativeTokens: nativeTokens, + writerTokens: writerTokens, + } + + before, root, after := parseBody(file.Body.(*hclsyntax.Body), from) + ret := &File{ + inTree: newInTree(), + + srcBytes: src, + body: root, + } + + nodes := ret.inTree.children + nodes.Append(before.Tokens()) + nodes.AppendNode(root) + nodes.Append(after.Tokens()) + + return ret, diags +} + +type inputTokens struct { + nativeTokens hclsyntax.Tokens + writerTokens Tokens +} + +func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) { + start, end := partitionTokens(it.nativeTokens, rng) + before = it.Slice(0, start) + within = it.Slice(start, end) + after = it.Slice(end, len(it.nativeTokens)) + return +} + +func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) { + for i, t := range it.writerTokens { + if t.Type == ty { + return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens)) + } + } + panic(fmt.Sprintf("didn't find any token of type %s", ty)) +} + +func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) { + before, within, after := it.PartitionType(ty) + if within.Len() != 1 { + panic("PartitionType found more than one token") + } + return before, within.Tokens()[0], after +} + +// PartitionIncludeComments is like Partition except the returned "within" +// range includes any lead and line comments associated with the range. +func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) { + start, end := partitionTokens(it.nativeTokens, rng) + start = partitionLeadCommentTokens(it.nativeTokens[:start]) + _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:]) + end += afterNewline + + before = it.Slice(0, start) + within = it.Slice(start, end) + after = it.Slice(end, len(it.nativeTokens)) + return + +} + +// PartitionBlockItem is similar to PartitionIncludeComments but it returns +// the comments as separate token sequences so that they can be captured into +// AST attributes. It makes assumptions that apply only to block items, so +// should not be used for other constructs. +func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) { + before, within, after = it.Partition(rng) + before, leadComments = before.PartitionLeadComments() + lineComments, newline, after = after.PartitionLineEndTokens() + return +} + +func (it inputTokens) PartitionLeadComments() (before, within inputTokens) { + start := partitionLeadCommentTokens(it.nativeTokens) + before = it.Slice(0, start) + within = it.Slice(start, len(it.nativeTokens)) + return +} + +func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) { + afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens) + comments = it.Slice(0, afterComments) + newline = it.Slice(afterComments, afterNewline) + after = it.Slice(afterNewline, len(it.nativeTokens)) + return +} + +func (it inputTokens) Slice(start, end int) inputTokens { + // When we slice, we create a new slice with no additional capacity because + // we expect that these slices will be mutated in order to insert + // new code into the AST, and we want to ensure that a new underlying + // array gets allocated in that case, rather than writing into some + // following slice and corrupting it. + return inputTokens{ + nativeTokens: it.nativeTokens[start:end:end], + writerTokens: it.writerTokens[start:end:end], + } +} + +func (it inputTokens) Len() int { + return len(it.nativeTokens) +} + +func (it inputTokens) Tokens() Tokens { + return it.writerTokens +} + +func (it inputTokens) Types() []hclsyntax.TokenType { + ret := make([]hclsyntax.TokenType, len(it.nativeTokens)) + for i, tok := range it.nativeTokens { + ret[i] = tok.Type + } + return ret +} + +// parseBody locates the given body within the given input tokens and returns +// the resulting *Body object as well as the tokens that appeared before and +// after it. +func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) { + before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange) + + // The main AST doesn't retain the original source ordering of the + // body items, so we need to reconstruct that ordering by inspecting + // their source ranges. + nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks)) + for _, nativeAttr := range nativeBody.Attributes { + nativeItems = append(nativeItems, nativeAttr) + } + for _, nativeBlock := range nativeBody.Blocks { + nativeItems = append(nativeItems, nativeBlock) + } + sort.Sort(nativeNodeSorter{nativeItems}) + + body := &Body{ + inTree: newInTree(), + items: newNodeSet(), + } + + remain := within + for _, nativeItem := range nativeItems { + beforeItem, item, afterItem := parseBodyItem(nativeItem, remain) + + if beforeItem.Len() > 0 { + body.AppendUnstructuredTokens(beforeItem.Tokens()) + } + body.appendItemNode(item) + + remain = afterItem + } + + if remain.Len() > 0 { + body.AppendUnstructuredTokens(remain.Tokens()) + } + + return before, newNode(body), after +} + +func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) { + before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range()) + + var item *node + + switch tItem := nativeItem.(type) { + case *hclsyntax.Attribute: + item = parseAttribute(tItem, within, leadComments, lineComments, newline) + case *hclsyntax.Block: + item = parseBlock(tItem, within, leadComments, lineComments, newline) + default: + // should never happen if caller is behaving + panic("unsupported native item type") + } + + return before, item, after +} + +func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node { + attr := &Attribute{ + inTree: newInTree(), + } + children := attr.inTree.children + + { + cn := newNode(newComments(leadComments.Tokens())) + attr.leadComments = cn + children.AppendNode(cn) + } + + before, nameTokens, from := from.Partition(nativeAttr.NameRange) + { + children.AppendUnstructuredTokens(before.Tokens()) + if nameTokens.Len() != 1 { + // Should never happen with valid input + panic("attribute name is not exactly one token") + } + token := nameTokens.Tokens()[0] + in := newNode(newIdentifier(token)) + attr.name = in + children.AppendNode(in) + } + + before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendUnstructuredTokens(equalsTokens.Tokens()) + + before, exprTokens, from := from.Partition(nativeAttr.Expr.Range()) + { + children.AppendUnstructuredTokens(before.Tokens()) + exprNode := parseExpression(nativeAttr.Expr, exprTokens) + attr.expr = exprNode + children.AppendNode(exprNode) + } + + { + cn := newNode(newComments(lineComments.Tokens())) + attr.lineComments = cn + children.AppendNode(cn) + } + + children.AppendUnstructuredTokens(newline.Tokens()) + + // Collect any stragglers, though there shouldn't be any + children.AppendUnstructuredTokens(from.Tokens()) + + return newNode(attr) +} + +func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node { + block := &Block{ + inTree: newInTree(), + labels: newNodeSet(), + } + children := block.inTree.children + + { + cn := newNode(newComments(leadComments.Tokens())) + block.leadComments = cn + children.AppendNode(cn) + } + + before, typeTokens, from := from.Partition(nativeBlock.TypeRange) + { + children.AppendUnstructuredTokens(before.Tokens()) + if typeTokens.Len() != 1 { + // Should never happen with valid input + panic("block type name is not exactly one token") + } + token := typeTokens.Tokens()[0] + in := newNode(newIdentifier(token)) + block.typeName = in + children.AppendNode(in) + } + + for _, rng := range nativeBlock.LabelRanges { + var labelTokens inputTokens + before, labelTokens, from = from.Partition(rng) + children.AppendUnstructuredTokens(before.Tokens()) + tokens := labelTokens.Tokens() + ln := newNode(newQuoted(tokens)) + block.labels.Add(ln) + children.AppendNode(ln) + } + + before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendUnstructuredTokens(oBrace.Tokens()) + + // We go a bit out of order here: we go hunting for the closing brace + // so that we have a delimited body, but then we'll deal with the body + // before we actually append the closing brace and any straggling tokens + // that appear after it. + bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange) + before, body, after := parseBody(nativeBlock.Body, bodyTokens) + children.AppendUnstructuredTokens(before.Tokens()) + block.body = body + children.AppendNode(body) + children.AppendUnstructuredTokens(after.Tokens()) + + children.AppendUnstructuredTokens(cBrace.Tokens()) + + // stragglers + children.AppendUnstructuredTokens(from.Tokens()) + if lineComments.Len() > 0 { + // blocks don't actually have line comments, so we'll just treat + // them as extra stragglers + children.AppendUnstructuredTokens(lineComments.Tokens()) + } + children.AppendUnstructuredTokens(newline.Tokens()) + + return newNode(block) +} + +func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node { + expr := newExpression() + children := expr.inTree.children + + nativeVars := nativeExpr.Variables() + + for _, nativeTraversal := range nativeVars { + before, traversal, after := parseTraversal(nativeTraversal, from) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendNode(traversal) + expr.absTraversals.Add(traversal) + from = after + } + // Attach any stragglers that don't belong to a traversal to the expression + // itself. In an expression with no traversals at all, this is just the + // entirety of "from". + children.AppendUnstructuredTokens(from.Tokens()) + + return newNode(expr) +} + +func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) { + traversal := newTraversal() + children := traversal.inTree.children + before, from, after = from.Partition(nativeTraversal.SourceRange()) + + stepAfter := from + for _, nativeStep := range nativeTraversal { + before, step, after := parseTraversalStep(nativeStep, stepAfter) + children.AppendUnstructuredTokens(before.Tokens()) + children.AppendNode(step) + traversal.steps.Add(step) + stepAfter = after + } + + return before, newNode(traversal), after +} + +func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) { + var children *nodes + switch tNativeStep := nativeStep.(type) { + + case hcl.TraverseRoot, hcl.TraverseAttr: + step := newTraverseName() + children = step.inTree.children + before, from, after = from.Partition(nativeStep.SourceRange()) + inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent) + name := newIdentifier(token) + children.AppendUnstructuredTokens(inBefore.Tokens()) + step.name = children.Append(name) + children.AppendUnstructuredTokens(inAfter.Tokens()) + return before, newNode(step), after + + case hcl.TraverseIndex: + step := newTraverseIndex() + children = step.inTree.children + before, from, after = from.Partition(nativeStep.SourceRange()) + + var inBefore, oBrack, keyTokens, cBrack inputTokens + inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack) + children.AppendUnstructuredTokens(inBefore.Tokens()) + children.AppendUnstructuredTokens(oBrack.Tokens()) + keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack) + + keyVal := tNativeStep.Key + switch keyVal.Type() { + case cty.String: + key := newQuoted(keyTokens.Tokens()) + step.key = children.Append(key) + case cty.Number: + valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit) + children.AppendUnstructuredTokens(valBefore.Tokens()) + key := newNumber(valToken) + step.key = children.Append(key) + children.AppendUnstructuredTokens(valAfter.Tokens()) + } + + children.AppendUnstructuredTokens(cBrack.Tokens()) + children.AppendUnstructuredTokens(from.Tokens()) + + return before, newNode(step), after + default: + panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep)) + } + +} + +// writerTokens takes a sequence of tokens as produced by the main hclsyntax +// package and transforms it into an equivalent sequence of tokens using +// this package's own token model. +// +// The resulting list contains the same number of tokens and uses the same +// indices as the input, allowing the two sets of tokens to be correlated +// by index. +func writerTokens(nativeTokens hclsyntax.Tokens) Tokens { + // Ultimately we want a slice of token _pointers_, but since we can + // predict how much memory we're going to devote to tokens we'll allocate + // it all as a single flat buffer and thus give the GC less work to do. + tokBuf := make([]Token, len(nativeTokens)) + var lastByteOffset int + for i, mainToken := range nativeTokens { + // Create a copy of the bytes so that we can mutate without + // corrupting the original token stream. + bytes := make([]byte, len(mainToken.Bytes)) + copy(bytes, mainToken.Bytes) + + tokBuf[i] = Token{ + Type: mainToken.Type, + Bytes: bytes, + + // We assume here that spaces are always ASCII spaces, since + // that's what the scanner also assumes, and thus the number + // of bytes skipped is also the number of space characters. + SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset, + } + + lastByteOffset = mainToken.Range.End.Byte + } + + // Now make a slice of pointers into the previous slice. + ret := make(Tokens, len(tokBuf)) + for i := range ret { + ret[i] = &tokBuf[i] + } + + return ret +} + +// partitionTokens takes a sequence of tokens and a hcl.Range and returns +// two indices within the token sequence that correspond with the range +// boundaries, such that the slice operator could be used to produce +// three token sequences for before, within, and after respectively: +// +// start, end := partitionTokens(toks, rng) +// before := toks[:start] +// within := toks[start:end] +// after := toks[end:] +// +// This works best when the range is aligned with token boundaries (e.g. +// because it was produced in terms of the scanner's result) but if that isn't +// true then it will make a best effort that may produce strange results at +// the boundaries. +// +// Native hclsyntax tokens are used here, because they contain the necessary +// absolute position information. However, since writerTokens produces a +// correlatable sequence of writer tokens, the resulting indices can be +// used also to index into its result, allowing the partitioning of writer +// tokens to be driven by the partitioning of native tokens. +// +// The tokens are assumed to be in source order and non-overlapping, which +// will be true if the token sequence from the scanner is used directly. +func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) { + // We us a linear search here because we assume tha in most cases our + // target range is close to the beginning of the sequence, and the seqences + // are generally small for most reasonable files anyway. + for i := 0; ; i++ { + if i >= len(toks) { + // No tokens for the given range at all! + return len(toks), len(toks) + } + + if toks[i].Range.Start.Byte >= rng.Start.Byte { + start = i + break + } + } + + for i := start; ; i++ { + if i >= len(toks) { + // The range "hangs off" the end of the token sequence + return start, len(toks) + } + + if toks[i].Range.Start.Byte >= rng.End.Byte { + end = i // end marker is exclusive + break + } + } + + return start, end +} + +// partitionLeadCommentTokens takes a sequence of tokens that is assumed +// to immediately precede a construct that can have lead comment tokens, +// and returns the index into that sequence where the lead comments begin. +// +// Lead comments are defined as whole lines containing only comment tokens +// with no blank lines between. If no such lines are found, the returned +// index will be len(toks). +func partitionLeadCommentTokens(toks hclsyntax.Tokens) int { + // single-line comments (which is what we're interested in here) + // consume their trailing newline, so we can just walk backwards + // until we stop seeing comment tokens. + for i := len(toks) - 1; i >= 0; i-- { + if toks[i].Type != hclsyntax.TokenComment { + return i + 1 + } + } + return 0 +} + +// partitionLineEndTokens takes a sequence of tokens that is assumed +// to immediately follow a construct that can have a line comment, and +// returns first the index where any line comments end and then second +// the index immediately after the trailing newline. +// +// Line comments are defined as comments that appear immediately after +// a construct on the same line where its significant tokens ended. +// +// Since single-line comment tokens (# and //) include the newline that +// terminates them, in the presence of these the two returned indices +// will be the same since the comment itself serves as the line end. +func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) { + for i := 0; i < len(toks); i++ { + tok := toks[i] + if tok.Type != hclsyntax.TokenComment { + switch tok.Type { + case hclsyntax.TokenNewline: + return i, i + 1 + case hclsyntax.TokenEOF: + // Although this is valid, we mustn't include the EOF + // itself as our "newline" or else strange things will + // happen when we try to append new items. + return i, i + default: + // If we have well-formed input here then nothing else should be + // possible. This path should never happen, because we only try + // to extract tokens from the sequence if the parser succeeded, + // and it should catch this problem itself. + panic("malformed line trailers: expected only comments and newlines") + } + } + + if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' { + // Newline at the end of a single-line comment serves both as + // the end of comments *and* the end of the line. + return i + 1, i + 1 + } + } + return len(toks), len(toks) +} + +// lexConfig uses the hclsyntax scanner to get a token stream and then +// rewrites it into this package's token model. +// +// Any errors produced during scanning are ignored, so the results of this +// function should be used with care. +func lexConfig(src []byte) Tokens { + mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1}) + return writerTokens(mainTokens) +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl2/hclwrite/public.go new file mode 100644 index 00000000..4d5ce2a6 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/public.go @@ -0,0 +1,44 @@ +package hclwrite + +import ( + "bytes" + + "github.com/hashicorp/hcl2/hcl" +) + +// NewFile creates a new file object that is empty and ready to have constructs +// added t it. +func NewFile() *File { + body := &Body{ + inTree: newInTree(), + items: newNodeSet(), + } + file := &File{ + inTree: newInTree(), + } + file.body = file.inTree.children.Append(body) + return file +} + +// ParseConfig interprets the given source bytes into a *hclwrite.File. The +// resulting AST can be used to perform surgical edits on the source code +// before turning it back into bytes again. +func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) { + return parse(src, filename, start) +} + +// Format takes source code and performs simple whitespace changes to transform +// it to a canonical layout style. +// +// Format skips constructing an AST and works directly with tokens, so it +// is less expensive than formatting via the AST for situations where no other +// changes will be made. It also ignores syntax errors and can thus be applied +// to partial source code, although the result in that case may not be +// desirable. +func Format(src []byte) []byte { + tokens := lexConfig(src) + format(tokens) + buf := &bytes.Buffer{} + tokens.WriteTo(buf) + return buf.Bytes() +} diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go new file mode 100644 index 00000000..d87f8185 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go @@ -0,0 +1,122 @@ +package hclwrite + +import ( + "bytes" + "io" + + "github.com/apparentlymart/go-textseg/textseg" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// Token is a single sequence of bytes annotated with a type. It is similar +// in purpose to hclsyntax.Token, but discards the source position information +// since that is not useful in code generation. +type Token struct { + Type hclsyntax.TokenType + Bytes []byte + + // We record the number of spaces before each token so that we can + // reproduce the exact layout of the original file when we're making + // surgical changes in-place. When _new_ code is created it will always + // be in the canonical style, but we preserve layout of existing code. + SpacesBefore int +} + +// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token. +// A complete token is not possible since we don't have source location +// information here, and so this method is unexported so we can be sure it will +// only be used for internal purposes where we know the range isn't important. +// +// This is primarily intended to allow us to re-use certain functionality from +// hclsyntax rather than re-implementing it against our own token type here. +func (t *Token) asHCLSyntax() hclsyntax.Token { + return hclsyntax.Token{ + Type: t.Type, + Bytes: t.Bytes, + Range: hcl.Range{ + Filename: "", + }, + } +} + +// Tokens is a flat list of tokens. +type Tokens []*Token + +func (ts Tokens) Bytes() []byte { + buf := &bytes.Buffer{} + ts.WriteTo(buf) + return buf.Bytes() +} + +func (ts Tokens) testValue() string { + return string(ts.Bytes()) +} + +// Columns returns the number of columns (grapheme clusters) the token sequence +// occupies. The result is not meaningful if there are newline or single-line +// comment tokens in the sequence. +func (ts Tokens) Columns() int { + ret := 0 + for _, token := range ts { + ret += token.SpacesBefore // spaces are always worth one column each + ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters) + ret += ct + } + return ret +} + +// WriteTo takes an io.Writer and writes the bytes for each token to it, +// along with the spacing that separates each token. In other words, this +// allows serializing the tokens to a file or other such byte stream. +func (ts Tokens) WriteTo(wr io.Writer) (int64, error) { + // We know we're going to be writing a lot of small chunks of repeated + // space characters, so we'll prepare a buffer of these that we can + // easily pass to wr.Write without any further allocation. + spaces := make([]byte, 40) + for i := range spaces { + spaces[i] = ' ' + } + + var n int64 + var err error + for _, token := range ts { + if err != nil { + return n, err + } + + for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) { + thisChunk := spacesBefore + if thisChunk > len(spaces) { + thisChunk = len(spaces) + } + var thisN int + thisN, err = wr.Write(spaces[:thisChunk]) + n += int64(thisN) + if err != nil { + return n, err + } + } + + var thisN int + thisN, err = wr.Write(token.Bytes) + n += int64(thisN) + } + + return n, err +} + +func (ts Tokens) walkChildNodes(w internalWalkFunc) { + // Unstructured tokens have no child nodes +} + +func (ts Tokens) BuildTokens(to Tokens) Tokens { + return append(to, ts...) +} + +func newIdentToken(name string) *Token { + return &Token{ + Type: hclsyntax.TokenIdent, + Bytes: []byte(name), + } +} diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md index 186ed251..97d2292a 100644 --- a/vendor/github.com/hashicorp/hil/README.md +++ b/vendor/github.com/hashicorp/hil/README.md @@ -43,7 +43,7 @@ better tested for general purpose use. ## Syntax For a complete grammar, please see the parser itself. A high-level overview -of the syntax and grammer is listed here. +of the syntax and grammar is listed here. Code begins within `${` and `}`. Outside of this, text is treated literally. For example, `foo` is a valid HIL program that is just the diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go index 8149d495..da6014fe 100644 --- a/vendor/github.com/hashicorp/hil/ast/literal.go +++ b/vendor/github.com/hashicorp/hil/ast/literal.go @@ -77,3 +77,12 @@ func (n *LiteralNode) String() string { func (n *LiteralNode) Type(Scope) (Type, error) { return n.Typex, nil } + +// IsUnknown returns true either if the node's value is itself unknown +// of if it is a collection containing any unknown elements, deeply. +func (n *LiteralNode) IsUnknown() bool { + return IsUnknown(Variable{ + Type: n.Typex, + Value: n.Value, + }) +} diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go index 40aa534c..06bd18de 100644 --- a/vendor/github.com/hashicorp/hil/ast/variables_helper.go +++ b/vendor/github.com/hashicorp/hil/ast/variables_helper.go @@ -3,54 +3,61 @@ package ast import "fmt" func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) { - listTypes := make(map[Type]struct{}) + if len(list) == 0 { + return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown for _, v := range list { - // Allow unknown if v.Type == TypeUnknown { continue } - if _, ok := listTypes[v.Type]; ok { + if elemType == TypeUnknown { + elemType = v.Type continue } - listTypes[v.Type] = struct{}{} - } - if len(listTypes) != 1 && len(list) != 0 { - return TypeInvalid, fmt.Errorf("list %q does not have homogenous types. found %s", variableName, reportTypes(listTypes)) - } + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "list %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } - if len(list) > 0 { - return list[0].Type, nil + elemType = v.Type } - return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName) + return elemType, nil } func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) { - valueTypes := make(map[Type]struct{}) + if len(vmap) == 0 { + return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) + } + + elemType := TypeUnknown for _, v := range vmap { - // Allow unknown if v.Type == TypeUnknown { continue } - if _, ok := valueTypes[v.Type]; ok { + if elemType == TypeUnknown { + elemType = v.Type continue } - valueTypes[v.Type] = struct{}{} - } - - if len(valueTypes) != 1 && len(vmap) != 0 { - return TypeInvalid, fmt.Errorf("map %q does not have homogenous value types. found %s", variableName, reportTypes(valueTypes)) - } + if v.Type != elemType { + return TypeInvalid, fmt.Errorf( + "map %q does not have homogenous types. found %s and then %s", + variableName, + elemType, v.Type, + ) + } - // For loop here is an easy way to get a single key, we return immediately. - for _, v := range vmap { - return v.Type, nil + elemType = v.Type } - // This means the map is empty - return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName) + return elemType, nil } diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go index a8ca44e0..f16da391 100644 --- a/vendor/github.com/hashicorp/hil/check_types.go +++ b/vendor/github.com/hashicorp/hil/check_types.go @@ -98,10 +98,6 @@ func (v *TypeCheck) visit(raw ast.Node) ast.Node { pos.Column, pos.Line, err) } - if v.StackPeek() == ast.TypeUnknown { - v.err = errExitUnknown - } - return result } @@ -116,6 +112,14 @@ func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) { exprs[len(tc.n.Exprs)-1-i] = v.StackPop() } + // If any operand is unknown then our result is automatically unknown + for _, ty := range exprs { + if ty == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + } + switch tc.n.Op { case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr: return tc.checkLogical(v, exprs) @@ -333,6 +337,11 @@ func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { continue } + if args[i] == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + if args[i] != expected { cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i]) if cn != nil { @@ -350,6 +359,11 @@ func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) { if function.Variadic && function.VariadicType != ast.TypeAny { args = args[len(function.ArgTypes):] for i, t := range args { + if t == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + if t != function.VariadicType { realI := i + len(function.ArgTypes) cn := v.ImplicitConversion( @@ -384,6 +398,11 @@ func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { trueType := v.StackPop() condType := v.StackPop() + if condType == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + if condType != ast.TypeBool { cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr) if cn == nil { @@ -395,7 +414,7 @@ func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { } // The types of the true and false expression must match - if trueType != falseType { + if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown { // Since passing around stringified versions of other types is // common, we pragmatically allow the false expression to dictate @@ -441,7 +460,13 @@ func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) { } // Result type (guaranteed to also match falseType due to the above) - v.StackPush(trueType) + if trueType == ast.TypeUnknown { + // falseType may also be unknown, but that's okay because two + // unknowns means our result is unknown anyway. + v.StackPush(falseType) + } else { + v.StackPush(trueType) + } return tc.n, nil } @@ -457,6 +482,13 @@ func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { types[len(n.Exprs)-1-i] = v.StackPop() } + for _, ty := range types { + if ty == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + } + // If there is only one argument and it is a list, we evaluate to a list if len(types) == 1 { switch t := types[0]; t { @@ -469,7 +501,14 @@ func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { } // Otherwise, all concat args must be strings, so validate that + resultType := ast.TypeString for i, t := range types { + + if t == ast.TypeUnknown { + resultType = ast.TypeUnknown + continue + } + if t != ast.TypeString { cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i]) if cn != nil { @@ -482,8 +521,8 @@ func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) { } } - // This always results in type string - v.StackPush(ast.TypeString) + // This always results in type string, unless there are unknowns + v.StackPush(resultType) return n, nil } @@ -509,13 +548,6 @@ func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) { "unknown variable accessed: %s", tc.n.Name) } - // Check if the variable contains any unknown types. If so, then - // mark it as unknown. - if ast.IsUnknown(variable) { - v.StackPush(ast.TypeUnknown) - return tc.n, nil - } - // Add the type to the stack v.StackPush(variable.Type) @@ -530,6 +562,11 @@ func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) { keyType := v.StackPop() targetType := v.StackPop() + if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown { + v.StackPush(ast.TypeUnknown) + return tc.n, nil + } + // Ensure we have a VariableAccess as the target varAccessNode, ok := tc.n.Target.(*ast.VariableAccess) if !ok { diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go index f2024d01..184e029b 100644 --- a/vendor/github.com/hashicorp/hil/convert.go +++ b/vendor/github.com/hashicorp/hil/convert.go @@ -47,8 +47,23 @@ func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { } func InterfaceToVariable(input interface{}) (ast.Variable, error) { - if inputVariable, ok := input.(ast.Variable); ok { - return inputVariable, nil + if iv, ok := input.(ast.Variable); ok { + return iv, nil + } + + // This is just to maintain backward compatibility + // after https://github.com/mitchellh/mapstructure/pull/98 + if v, ok := input.([]ast.Variable); ok { + return ast.Variable{ + Type: ast.TypeList, + Value: v, + }, nil + } + if v, ok := input.(map[string]ast.Variable); ok { + return ast.Variable{ + Type: ast.TypeMap, + Value: v, + }, nil } var stringVal string diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go index 5c7afb02..27820769 100644 --- a/vendor/github.com/hashicorp/hil/eval.go +++ b/vendor/github.com/hashicorp/hil/eval.go @@ -54,6 +54,14 @@ func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { return InvalidResult, err } + // If the result contains any nested unknowns then the result as a whole + // is unknown, so that callers only have to deal with "entirely known" + // or "entirely unknown" as outcomes. + if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) { + outputType = ast.TypeUnknown + output = UnknownValue + } + switch outputType { case ast.TypeList: val, err := VariableToInterface(ast.Variable{ @@ -264,6 +272,10 @@ func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, e args := make([]interface{}, len(v.Args)) for i, _ := range v.Args { node := stack.Pop().(*ast.LiteralNode) + if node.IsUnknown() { + // If any arguments are unknown then the result is automatically unknown + return UnknownValue, ast.TypeUnknown, nil + } args[len(v.Args)-1-i] = node.Value } @@ -286,6 +298,11 @@ func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast. trueLit := stack.Pop().(*ast.LiteralNode) condLit := stack.Pop().(*ast.LiteralNode) + if condLit.IsUnknown() { + // If our conditional is unknown then our result is also unknown + return UnknownValue, ast.TypeUnknown, nil + } + if condLit.Value.(bool) { return trueLit.Value, trueLit.Typex, nil } else { @@ -301,6 +318,17 @@ func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Ty variableName := v.Index.Target.(*ast.VariableAccess).Name + if key.IsUnknown() { + // If our key is unknown then our result is also unknown + return UnknownValue, ast.TypeUnknown, nil + } + + // For target, we'll accept collections containing unknown values but + // we still need to catch when the collection itself is unknown, shallowly. + if target.Typex == ast.TypeUnknown { + return UnknownValue, ast.TypeUnknown, nil + } + switch target.Typex { case ast.TypeList: return v.evalListIndex(variableName, target.Value, key.Value) @@ -377,8 +405,22 @@ func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, // The expressions should all be on the stack in reverse // order. So pop them off, reverse their order, and concatenate. nodes := make([]*ast.LiteralNode, 0, len(v.Exprs)) + haveUnknown := false for range v.Exprs { - nodes = append(nodes, stack.Pop().(*ast.LiteralNode)) + n := stack.Pop().(*ast.LiteralNode) + nodes = append(nodes, n) + + // If we have any unknowns then the whole result is unknown + // (we must deal with this first, because the type checker can + // skip type conversions in the presence of unknowns, and thus + // any of our other nodes may be incorrectly typed.) + if n.IsUnknown() { + haveUnknown = true + } + } + + if haveUnknown { + return UnknownValue, ast.TypeUnknown, nil } // Special case the single list and map @@ -396,6 +438,14 @@ func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, // Otherwise concatenate the strings var buf bytes.Buffer for i := len(nodes) - 1; i >= 0; i-- { + if nodes[i].Typex != ast.TypeString { + return nil, ast.TypeInvalid, fmt.Errorf( + "invalid output with %s value at index %d: %#v", + nodes[i].Typex, + i, + nodes[i].Value, + ) + } buf.WriteString(nodes[i].Value.(string)) } @@ -418,11 +468,5 @@ func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, a "unknown variable accessed: %s", v.Name) } - // Check if the variable contains any unknown types. If so, then - // mark it as unknown and return that type. - if ast.IsUnknown(variable) { - return nil, ast.TypeUnknown, nil - } - return variable.Value, variable.Type, nil } diff --git a/vendor/github.com/hashicorp/hil/go.mod b/vendor/github.com/hashicorp/hil/go.mod new file mode 100644 index 00000000..45719a69 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/hil + +require ( + github.com/mitchellh/mapstructure v1.1.2 + github.com/mitchellh/reflectwalk v1.0.0 +) diff --git a/vendor/github.com/hashicorp/hil/go.sum b/vendor/github.com/hashicorp/hil/go.sum new file mode 100644 index 00000000..83639b69 --- /dev/null +++ b/vendor/github.com/hashicorp/hil/go.sum @@ -0,0 +1,4 @@ +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go index bab86c67..86085de0 100644 --- a/vendor/github.com/hashicorp/hil/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hil/scanner/scanner.go @@ -395,6 +395,12 @@ func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) { pos.Column = pos.Column + 2 litLen = litLen + 2 continue + } else if follow == '\\' { + // \\ escapes \ + // so we will consume both characters here. + pos.Column = pos.Column + 2 + litLen = litLen + 2 + continue } } } diff --git a/vendor/github.com/hashicorp/logutils/go.mod b/vendor/github.com/hashicorp/logutils/go.mod new file mode 100644 index 00000000..ba38a457 --- /dev/null +++ b/vendor/github.com/hashicorp/logutils/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/logutils diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE new file mode 100644 index 00000000..c33dcc7c --- /dev/null +++ b/vendor/github.com/hashicorp/serf/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go new file mode 100644 index 00000000..3582ee4d --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -0,0 +1,243 @@ +package coordinate + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/armon/go-metrics" +) + +// Client manages the estimated network coordinate for a given node, and adjusts +// it as the node observes round trip times and estimated coordinates from other +// nodes. The core algorithm is based on Vivaldi, see the documentation for Config +// for more details. +type Client struct { + // coord is the current estimate of the client's network coordinate. + coord *Coordinate + + // origin is a coordinate sitting at the origin. + origin *Coordinate + + // config contains the tuning parameters that govern the performance of + // the algorithm. + config *Config + + // adjustmentIndex is the current index into the adjustmentSamples slice. + adjustmentIndex uint + + // adjustment is used to store samples for the adjustment calculation. + adjustmentSamples []float64 + + // latencyFilterSamples is used to store the last several RTT samples, + // keyed by node name. We will use the config's LatencyFilterSamples + // value to determine how many samples we keep, per node. + latencyFilterSamples map[string][]float64 + + // stats is used to record events that occur when updating coordinates. + stats ClientStats + + // mutex enables safe concurrent access to the client. + mutex sync.RWMutex +} + +// ClientStats is used to record events that occur when updating coordinates. +type ClientStats struct { + // Resets is incremented any time we reset our local coordinate because + // our calculations have resulted in an invalid state. + Resets int +} + +// NewClient creates a new Client and verifies the configuration is valid. +func NewClient(config *Config) (*Client, error) { + if !(config.Dimensionality > 0) { + return nil, fmt.Errorf("dimensionality must be >0") + } + + return &Client{ + coord: NewCoordinate(config), + origin: NewCoordinate(config), + config: config, + adjustmentIndex: 0, + adjustmentSamples: make([]float64, config.AdjustmentWindowSize), + latencyFilterSamples: make(map[string][]float64), + }, nil +} + +// GetCoordinate returns a copy of the coordinate for this client. +func (c *Client) GetCoordinate() *Coordinate { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.Clone() +} + +// SetCoordinate forces the client's coordinate to a known state. +func (c *Client) SetCoordinate(coord *Coordinate) error { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(coord); err != nil { + return err + } + + c.coord = coord.Clone() + return nil +} + +// ForgetNode removes any client state for the given node. +func (c *Client) ForgetNode(node string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + delete(c.latencyFilterSamples, node) +} + +// Stats returns a copy of stats for the client. +func (c *Client) Stats() ClientStats { + c.mutex.Lock() + defer c.mutex.Unlock() + + return c.stats +} + +// checkCoordinate returns an error if the coordinate isn't compatible with +// this client, or if the coordinate itself isn't valid. This assumes the mutex +// has been locked already. +func (c *Client) checkCoordinate(coord *Coordinate) error { + if !c.coord.IsCompatibleWith(coord) { + return fmt.Errorf("dimensions aren't compatible") + } + + if !coord.IsValid() { + return fmt.Errorf("coordinate is invalid") + } + + return nil +} + +// latencyFilter applies a simple moving median filter with a new sample for +// a node. This assumes that the mutex has been locked already. +func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { + samples, ok := c.latencyFilterSamples[node] + if !ok { + samples = make([]float64, 0, c.config.LatencyFilterSize) + } + + // Add the new sample and trim the list, if needed. + samples = append(samples, rttSeconds) + if len(samples) > int(c.config.LatencyFilterSize) { + samples = samples[1:] + } + c.latencyFilterSamples[node] = samples + + // Sort a copy of the samples and return the median. + sorted := make([]float64, len(samples)) + copy(sorted, samples) + sort.Float64s(sorted) + return sorted[len(sorted)/2] +} + +// updateVivialdi updates the Vivaldi portion of the client's coordinate. This +// assumes that the mutex has been locked already. +func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { + const zeroThreshold = 1.0e-6 + + dist := c.coord.DistanceTo(other).Seconds() + if rttSeconds < zeroThreshold { + rttSeconds = zeroThreshold + } + wrongness := math.Abs(dist-rttSeconds) / rttSeconds + + totalError := c.coord.Error + other.Error + if totalError < zeroThreshold { + totalError = zeroThreshold + } + weight := c.coord.Error / totalError + + c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) + if c.coord.Error > c.config.VivaldiErrorMax { + c.coord.Error = c.config.VivaldiErrorMax + } + + delta := c.config.VivaldiCC * weight + force := delta * (rttSeconds - dist) + c.coord = c.coord.ApplyForce(c.config, force, other) +} + +// updateAdjustment updates the adjustment portion of the client's coordinate, if +// the feature is enabled. This assumes that the mutex has been locked already. +func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { + if c.config.AdjustmentWindowSize == 0 { + return + } + + // Note that the existing adjustment factors don't figure in to this + // calculation so we use the raw distance here. + dist := c.coord.rawDistanceTo(other) + c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist + c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize + + sum := 0.0 + for _, sample := range c.adjustmentSamples { + sum += sample + } + c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) +} + +// updateGravity applies a small amount of gravity to pull coordinates towards +// the center of the coordinate system to combat drift. This assumes that the +// mutex is locked already. +func (c *Client) updateGravity() { + dist := c.origin.DistanceTo(c.coord).Seconds() + force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) + c.coord = c.coord.ApplyForce(c.config, force, c.origin) +} + +// Update takes other, a coordinate for another node, and rtt, a round trip +// time observation for a ping to that node, and updates the estimated position of +// the client's coordinate. Returns the updated coordinate. +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) { + c.mutex.Lock() + defer c.mutex.Unlock() + + if err := c.checkCoordinate(other); err != nil { + return nil, err + } + + // The code down below can handle zero RTTs, which we have seen in + // https://github.com/hashicorp/consul/issues/3789, presumably in + // environments with coarse-grained monotonic clocks (we are still + // trying to pin this down). In any event, this is ok from a code PoV + // so we don't need to alert operators with spammy messages. We did + // add a counter so this is still observable, though. + const maxRTT = 10 * time.Second + if rtt < 0 || rtt > maxRTT { + return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT) + } + if rtt == 0 { + metrics.IncrCounter([]string{"serf", "coordinate", "zero-rtt"}, 1) + } + + rttSeconds := c.latencyFilter(node, rtt.Seconds()) + c.updateVivaldi(other, rttSeconds) + c.updateAdjustment(other, rttSeconds) + c.updateGravity() + if !c.coord.IsValid() { + c.stats.Resets++ + c.coord = NewCoordinate(c.config) + } + + return c.coord.Clone(), nil +} + +// DistanceTo returns the estimated RTT from the client's coordinate to other, the +// coordinate for another node. +func (c *Client) DistanceTo(other *Coordinate) time.Duration { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.DistanceTo(other) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go new file mode 100644 index 00000000..b85a8ab7 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -0,0 +1,70 @@ +package coordinate + +// Config is used to set the parameters of the Vivaldi-based coordinate mapping +// algorithm. +// +// The following references are called out at various points in the documentation +// here: +// +// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." +// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. +// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates +// in the Wild." NSDI. Vol. 7. 2007. +// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for +// host-based network coordinate systems." Networking, IEEE/ACM Transactions +// on 18.1 (2010): 27-40. +type Config struct { + // The dimensionality of the coordinate system. As discussed in [2], more + // dimensions improves the accuracy of the estimates up to a point. Per [2] + // we chose 8 dimensions plus a non-Euclidean height. + Dimensionality uint + + // VivaldiErrorMax is the default error value when a node hasn't yet made + // any observations. It also serves as an upper limit on the error value in + // case observations cause the error value to increase without bound. + VivaldiErrorMax float64 + + // VivaldiCE is a tuning factor that controls the maximum impact an + // observation can have on a node's confidence. See [1] for more details. + VivaldiCE float64 + + // VivaldiCC is a tuning factor that controls the maximum impact an + // observation can have on a node's coordinate. See [1] for more details. + VivaldiCC float64 + + // AdjustmentWindowSize is a tuning factor that determines how many samples + // we retain to calculate the adjustment factor as discussed in [3]. Setting + // this to zero disables this feature. + AdjustmentWindowSize uint + + // HeightMin is the minimum value of the height parameter. Since this + // always must be positive, it will introduce a small amount error, so + // the chosen value should be relatively small compared to "normal" + // coordinates. + HeightMin float64 + + // LatencyFilterSamples is the maximum number of samples that are retained + // per node, in order to compute a median. The intent is to ride out blips + // but still keep the delay low, since our time to probe any given node is + // pretty infrequent. See [2] for more details. + LatencyFilterSize uint + + // GravityRho is a tuning factor that sets how much gravity has an effect + // to try to re-center coordinates. See [2] for more details. + GravityRho float64 +} + +// DefaultConfig returns a Config that has some default values suitable for +// basic testing of the algorithm, but not tuned to any particular type of cluster. +func DefaultConfig() *Config { + return &Config{ + Dimensionality: 8, + VivaldiErrorMax: 1.5, + VivaldiCE: 0.25, + VivaldiCC: 0.25, + AdjustmentWindowSize: 20, + HeightMin: 10.0e-6, + LatencyFilterSize: 3, + GravityRho: 150.0, + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go new file mode 100644 index 00000000..fbe792c9 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -0,0 +1,203 @@ +package coordinate + +import ( + "math" + "math/rand" + "time" +) + +// Coordinate is a specialized structure for holding network coordinates for the +// Vivaldi-based coordinate mapping algorithm. All of the fields should be public +// to enable this to be serialized. All values in here are in units of seconds. +type Coordinate struct { + // Vec is the Euclidean portion of the coordinate. This is used along + // with the other fields to provide an overall distance estimate. The + // units here are seconds. + Vec []float64 + + // Err reflects the confidence in the given coordinate and is updated + // dynamically by the Vivaldi Client. This is dimensionless. + Error float64 + + // Adjustment is a distance offset computed based on a calculation over + // observations from all other nodes over a fixed window and is updated + // dynamically by the Vivaldi Client. The units here are seconds. + Adjustment float64 + + // Height is a distance offset that accounts for non-Euclidean effects + // which model the access links from nodes to the core Internet. The access + // links are usually set by bandwidth and congestion, and the core links + // usually follow distance based on geography. + Height float64 +} + +const ( + // secondsToNanoseconds is used to convert float seconds to nanoseconds. + secondsToNanoseconds = 1.0e9 + + // zeroThreshold is used to decide if two coordinates are on top of each + // other. + zeroThreshold = 1.0e-6 +) + +// ErrDimensionalityConflict will be panic-d if you try to perform operations +// with incompatible dimensions. +type DimensionalityConflictError struct{} + +// Adds the error interface. +func (e DimensionalityConflictError) Error() string { + return "coordinate dimensionality does not match" +} + +// NewCoordinate creates a new coordinate at the origin, using the given config +// to supply key initial values. +func NewCoordinate(config *Config) *Coordinate { + return &Coordinate{ + Vec: make([]float64, config.Dimensionality), + Error: config.VivaldiErrorMax, + Adjustment: 0.0, + Height: config.HeightMin, + } +} + +// Clone creates an independent copy of this coordinate. +func (c *Coordinate) Clone() *Coordinate { + vec := make([]float64, len(c.Vec)) + copy(vec, c.Vec) + return &Coordinate{ + Vec: vec, + Error: c.Error, + Adjustment: c.Adjustment, + Height: c.Height, + } +} + +// componentIsValid returns false if a floating point value is a NaN or an +// infinity. +func componentIsValid(f float64) bool { + return !math.IsInf(f, 0) && !math.IsNaN(f) +} + +// IsValid returns false if any component of a coordinate isn't valid, per the +// componentIsValid() helper above. +func (c *Coordinate) IsValid() bool { + for i := range c.Vec { + if !componentIsValid(c.Vec[i]) { + return false + } + } + + return componentIsValid(c.Error) && + componentIsValid(c.Adjustment) && + componentIsValid(c.Height) +} + +// IsCompatibleWith checks to see if the two coordinates are compatible +// dimensionally. If this returns true then you are guaranteed to not get +// any runtime errors operating on them. +func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { + return len(c.Vec) == len(other.Vec) +} + +// ApplyForce returns the result of applying the force from the direction of the +// other coordinate. +func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + ret := c.Clone() + unit, mag := unitVectorAt(c.Vec, other.Vec) + ret.Vec = add(ret.Vec, mul(unit, force)) + if mag > zeroThreshold { + ret.Height = (ret.Height+other.Height)*force/mag + ret.Height + ret.Height = math.Max(ret.Height, config.HeightMin) + } + return ret +} + +// DistanceTo returns the distance between this coordinate and the other +// coordinate, including adjustments. +func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + dist := c.rawDistanceTo(other) + adjustedDist := dist + c.Adjustment + other.Adjustment + if adjustedDist > 0.0 { + dist = adjustedDist + } + return time.Duration(dist * secondsToNanoseconds) +} + +// rawDistanceTo returns the Vivaldi distance between this coordinate and the +// other coordinate in seconds, not including adjustments. This assumes the +// dimensions have already been checked to be compatible. +func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { + return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height +} + +// add returns the sum of vec1 and vec2. This assumes the dimensions have +// already been checked to be compatible. +func add(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] + vec2[i] + } + return ret +} + +// diff returns the difference between the vec1 and vec2. This assumes the +// dimensions have already been checked to be compatible. +func diff(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i := range ret { + ret[i] = vec1[i] - vec2[i] + } + return ret +} + +// mul returns vec multiplied by a scalar factor. +func mul(vec []float64, factor float64) []float64 { + ret := make([]float64, len(vec)) + for i := range vec { + ret[i] = vec[i] * factor + } + return ret +} + +// magnitude computes the magnitude of the vec. +func magnitude(vec []float64) float64 { + sum := 0.0 + for i := range vec { + sum += vec[i] * vec[i] + } + return math.Sqrt(sum) +} + +// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two +// positions are the same then a random unit vector is returned. We also return +// the distance between the points for use in the later height calculation. +func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { + ret := diff(vec1, vec2) + + // If the coordinates aren't on top of each other we can normalize. + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), mag + } + + // Otherwise, just return a random unit vector. + for i := range ret { + ret[i] = rand.Float64() - 0.5 + } + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), 0.0 + } + + // And finally just give up and make a unit vector along the first + // dimension. This should be exceedingly rare. + ret = make([]float64, len(ret)) + ret[0] = 1.0 + return ret, 0.0 +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go new file mode 100644 index 00000000..6fb033c0 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -0,0 +1,187 @@ +package coordinate + +import ( + "fmt" + "math" + "math/rand" + "time" +) + +// GenerateClients returns a slice with nodes number of clients, all with the +// given config. +func GenerateClients(nodes int, config *Config) ([]*Client, error) { + clients := make([]*Client, nodes) + for i, _ := range clients { + client, err := NewClient(config) + if err != nil { + return nil, err + } + + clients[i] = client + } + return clients, nil +} + +// GenerateLine returns a truth matrix as if all the nodes are in a straight linke +// with the given spacing between them. +func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := time.Duration(j-i) * spacing + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional +// grid with the given spacing between them. +func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + n := int(math.Sqrt(float64(nodes))) + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + x1, y1 := float64(i%n), float64(i/n) + x2, y2 := float64(j%n), float64(j/n) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt := time.Duration(dist * float64(spacing)) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateSplit returns a truth matrix as if half the nodes are close together in +// one location and half the nodes are close together in another. The lan factor +// is used to separate the nodes locally and the wan factor represents the split +// between the two sides. +func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + split := nodes / 2 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := lan + if (i <= split && j > split) || (i > split && j <= split) { + rtt += wan + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed +// around a circle with the given radius. The first node is at the "center" of the +// circle because it's equidistant from all the other nodes, but we place it at +// double the radius, so it should show up above all the other nodes in height. +func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + var rtt time.Duration + if i == 0 { + rtt = 2 * radius + } else { + t1 := 2.0 * math.Pi * float64(i) / float64(nodes) + x1, y1 := math.Cos(t1), math.Sin(t1) + t2 := 2.0 * math.Pi * float64(j) / float64(nodes) + x2, y2 := math.Cos(t2), math.Sin(t2) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt = time.Duration(dist * float64(radius)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateRandom returns a truth matrix for a set of nodes with normally +// distributed delays, with the given mean and deviation. The RNG is re-seeded +// so you always get the same matrix for a given size. +func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { + rand.Seed(1) + + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() + rtt := time.Duration(rttSeconds * secondsToNanoseconds) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// Simulate runs the given number of cycles using the given list of clients and +// truth matrix. On each cycle, each client will pick a random node and observe +// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for +// each simulation run to get deterministic results (for this algorithm and the +// underlying algorithm which will use random numbers for position vectors when +// starting out with everything at the origin). +func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { + rand.Seed(1) + + nodes := len(clients) + for cycle := 0; cycle < cycles; cycle++ { + for i, _ := range clients { + if j := rand.Intn(nodes); j != i { + c := clients[j].GetCoordinate() + rtt := truth[i][j] + node := fmt.Sprintf("node_%d", j) + clients[i].Update(node, c, rtt) + } + } + } +} + +// Stats is returned from the Evaluate function with a summary of the algorithm +// performance. +type Stats struct { + ErrorMax float64 + ErrorAvg float64 +} + +// Evaluate uses the coordinates of the given clients to calculate estimated +// distances and compares them with the given truth matrix, returning summary +// stats. +func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { + nodes := len(clients) + count := 0 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() + actual := truth[i][j].Seconds() + error := math.Abs(est-actual) / actual + stats.ErrorMax = math.Max(stats.ErrorMax, error) + stats.ErrorAvg += error + count += 1 + } + } + + stats.ErrorAvg /= float64(count) + fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) + return +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE new file mode 100644 index 00000000..82b4de97 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go new file mode 100644 index 00000000..8d04ad4d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go @@ -0,0 +1,138 @@ +package tfconfig + +import ( + "fmt" + + legacyhclparser "github.com/hashicorp/hcl/hcl/parser" + "github.com/hashicorp/hcl2/hcl" +) + +// Diagnostic describes a problem (error or warning) encountered during +// configuration loading. +type Diagnostic struct { + Severity DiagSeverity `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail,omitempty"` + + // Pos is not populated for all diagnostics, but when populated should + // indicate a particular line that the described problem relates to. + Pos *SourcePos `json:"pos,omitempty"` +} + +// Diagnostics represents a sequence of diagnostics. This is the type that +// should be returned from a function that might generate diagnostics. +type Diagnostics []Diagnostic + +// HasErrors returns true if there is at least one Diagnostic of severity +// DiagError in the receiever. +// +// If a function returns a Diagnostics without errors then the result can +// be assumed to be complete within the "best effort" constraints of this +// library. If errors are present then the caller may wish to employ more +// caution in relying on the result. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity == DiagError { + return true + } + } + return false +} + +func (diags Diagnostics) Error() string { + switch len(diags) { + case 0: + return "no problems" + case 1: + return fmt.Sprintf("%s: %s", diags[0].Summary, diags[0].Detail) + default: + return fmt.Sprintf("%s: %s (and %d other messages)", diags[0].Summary, diags[0].Detail, len(diags)-1) + } +} + +// Err returns an error representing the receiver if the receiver HasErrors, or +// nil otherwise. +// +// The returned error can be type-asserted back to a Diagnostics if needed. +func (diags Diagnostics) Err() error { + if diags.HasErrors() { + return diags + } + return nil +} + +// DiagSeverity describes the severity of a Diagnostic. +type DiagSeverity rune + +// DiagError indicates a problem that prevented proper processing of the +// configuration. In the precense of DiagError diagnostics the result is +// likely to be incomplete. +const DiagError DiagSeverity = 'E' + +// DiagWarning indicates a problem that the user may wish to consider but +// that did not prevent proper processing of the configuration. +const DiagWarning DiagSeverity = 'W' + +// MarshalJSON is an implementation of encoding/json.Marshaler +func (s DiagSeverity) MarshalJSON() ([]byte, error) { + switch s { + case DiagError: + return []byte(`"error"`), nil + case DiagWarning: + return []byte(`"warning"`), nil + default: + return []byte(`"invalid"`), nil + } +} + +func diagnosticsHCL(diags hcl.Diagnostics) Diagnostics { + if len(diags) == 0 { + return nil + } + ret := make(Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = Diagnostic{ + Summary: diag.Summary, + Detail: diag.Detail, + } + switch diag.Severity { + case hcl.DiagError: + ret[i].Severity = DiagError + case hcl.DiagWarning: + ret[i].Severity = DiagWarning + } + if diag.Subject != nil { + pos := sourcePosHCL(*diag.Subject) + ret[i].Pos = &pos + } + } + return ret +} + +func diagnosticsError(err error) Diagnostics { + if err == nil { + return nil + } + + if posErr, ok := err.(*legacyhclparser.PosError); ok { + pos := sourcePosLegacyHCL(posErr.Pos, "") + return Diagnostics{ + Diagnostic{ + Severity: DiagError, + Summary: posErr.Err.Error(), + Pos: &pos, + }, + } + } + + return Diagnostics{ + Diagnostic{ + Severity: DiagError, + Summary: err.Error(), + }, + } +} + +func diagnosticsErrorf(format string, args ...interface{}) Diagnostics { + return diagnosticsError(fmt.Errorf(format, args...)) +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go new file mode 100644 index 00000000..1604a6e0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go @@ -0,0 +1,21 @@ +// Package tfconfig is a helper library that does careful, shallow parsing of +// Terraform modules to provide access to high-level metadata while +// remaining broadly compatible with configurations targeting various +// different Terraform versions. +// +// This packge focuses on describing top-level objects only, and in particular +// does not attempt any sort of processing that would require access to plugins. +// Currently it allows callers to extract high-level information about +// variables, outputs, resource blocks, provider dependencies, and Terraform +// Core dependencies. +// +// This package only works at the level of single modules. A full configuration +// is a tree of potentially several modules, some of which may be references +// to remote packages. There are some basic helpers for traversing calls to +// modules at relative local paths, however. +// +// This package employs a "best effort" parsing strategy, producing as complete +// a result as possible even though the input may not be entirely valid. The +// intended use-case is high-level analysis and indexing of externally-facing +// module characteristics, as opposed to validating or even applying the module. +package tfconfig diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go new file mode 100644 index 00000000..2d13fe12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go @@ -0,0 +1,130 @@ +package tfconfig + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// LoadModule reads the directory at the given path and attempts to interpret +// it as a Terraform module. +func LoadModule(dir string) (*Module, Diagnostics) { + + // For broad compatibility here we actually have two separate loader + // codepaths. The main one uses the new HCL parser and API and is intended + // for configurations from Terraform 0.12 onwards (though will work for + // many older configurations too), but we'll also fall back on one that + // uses the _old_ HCL implementation so we can deal with some edge-cases + // that are not valid in new HCL. + + module, diags := loadModule(dir) + if diags.HasErrors() { + // Try using the legacy HCL parser and see if we fare better. + legacyModule, legacyDiags := loadModuleLegacyHCL(dir) + if !legacyDiags.HasErrors() { + legacyModule.init(legacyDiags) + return legacyModule, legacyDiags + } + } + + module.init(diags) + return module, diags +} + +// IsModuleDir checks if the given path contains terraform configuration files. +// This allows the caller to decide how to handle directories that do not have tf files. +func IsModuleDir(dir string) bool { + primaryPaths, _ := dirFiles(dir) + if len(primaryPaths) == 0 { + return false + } + return true +} + +func (m *Module) init(diags Diagnostics) { + // Fill in any additional provider requirements that are implied by + // resource configurations, to avoid the caller from needing to apply + // this logic itself. Implied requirements don't have version constraints, + // but we'll make sure the requirement value is still non-nil in this + // case so callers can easily recognize it. + for _, r := range m.ManagedResources { + if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { + m.RequiredProviders[r.Provider.Name] = []string{} + } + } + for _, r := range m.DataResources { + if _, exists := m.RequiredProviders[r.Provider.Name]; !exists { + m.RequiredProviders[r.Provider.Name] = []string{} + } + } + + // We redundantly also reference the diagnostics from inside the module + // object, primarily so that we can easily included in JSON-serialized + // versions of the module object. + m.Diagnostics = diags +} + +func dirFiles(dir string) (primary []string, diags hcl.Diagnostics) { + infos, err := ioutil.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + var override []string + for _, info := range infos { + if info.IsDir() { + // We only care about files + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || isIgnoredFile(name) { + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + // We are assuming that any _override files will be logically named, + // and processing the files in alphabetical order. Primaries first, then overrides. + primary = append(primary, override...) + + return +} + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +// isIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func isIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go new file mode 100644 index 00000000..72b5d4af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go @@ -0,0 +1,322 @@ +package tfconfig + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hclparse" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +func loadModule(dir string) (*Module, Diagnostics) { + mod := newModule(dir) + primaryPaths, diags := dirFiles(dir) + + parser := hclparse.NewParser() + + for _, filename := range primaryPaths { + var file *hcl.File + var fileDiags hcl.Diagnostics + if strings.HasSuffix(filename, ".json") { + file, fileDiags = parser.ParseJSONFile(filename) + } else { + file, fileDiags = parser.ParseHCLFile(filename) + } + diags = append(diags, fileDiags...) + if file == nil { + continue + } + + content, _, contentDiags := file.Body.PartialContent(rootSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, _, contentDiags := block.Body.PartialContent(terraformBlockSchema) + diags = append(diags, contentDiags...) + + if attr, defined := content.Attributes["required_version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredCore = append(mod.RequiredCore, version) + } + } + + for _, block := range content.Blocks { + // Our schema only allows required_providers here, so we + // assume that we'll only get that block type. + attrs, attrDiags := block.Body.JustAttributes() + diags = append(diags, attrDiags...) + + for name, attr := range attrs { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) + } + } + } + + case "variable": + content, _, contentDiags := block.Body.PartialContent(variableSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + v := &Variable{ + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + mod.Variables[name] = v + + if attr, defined := content.Attributes["type"]; defined { + // We handle this particular attribute in a somewhat-tricky way: + // since Terraform may evolve its type expression syntax in + // future versions, we don't want to be overly-strict in how + // we handle it here, and so we'll instead just take the raw + // source provided by the user, using the source location + // information in the expression object. + // + // However, older versions of Terraform expected the type + // to be a string containing a keyword, so we'll need to + // handle that as a special case first for backward compatibility. + + var typeExpr string + + var typeExprAsStr string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &typeExprAsStr) + if !valDiags.HasErrors() { + typeExpr = typeExprAsStr + } else { + + rng := attr.Expr.Range() + sourceFilename := rng.Filename + source, exists := parser.Sources()[sourceFilename] + if exists { + typeExpr = string(rng.SliceBytes(source)) + } else { + // This should never happen, so we'll just warn about it and leave the type unspecified. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Source code not available", + Detail: fmt.Sprintf("Source code is not available for the file %q, which declares the variable %q.", sourceFilename, name), + Subject: &block.DefRange, + }) + typeExpr = "" + } + + } + + v.Type = typeExpr + } + + if attr, defined := content.Attributes["description"]; defined { + var description string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) + diags = append(diags, valDiags...) + v.Description = description + } + + if attr, defined := content.Attributes["default"]; defined { + // To avoid the caller needing to deal with cty here, we'll + // use its JSON encoding to convert into an + // approximately-equivalent plain Go interface{} value + // to return. + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + if val.IsWhollyKnown() { // should only be false if there are errors in the input + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + // Should never happen, since all possible known + // values have a JSON mapping. + panic(fmt.Errorf("failed to serialize default value as JSON: %s", err)) + } + var def interface{} + err = json.Unmarshal(valJSON, &def) + if err != nil { + // Again should never happen, because valJSON is + // guaranteed valid by ctyjson.Marshal. + panic(fmt.Errorf("failed to re-parse default value from JSON: %s", err)) + } + v.Default = def + } + } + + case "output": + + content, _, contentDiags := block.Body.PartialContent(outputSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + o := &Output{ + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + mod.Outputs[name] = o + + if attr, defined := content.Attributes["description"]; defined { + var description string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description) + diags = append(diags, valDiags...) + o.Description = description + } + + case "provider": + + content, _, contentDiags := block.Body.PartialContent(providerConfigSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + + if attr, defined := content.Attributes["version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + if !valDiags.HasErrors() { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version) + } + } + + // Even if there wasn't an explicit version required, we still + // need an entry in our map to signal the unversioned dependency. + if _, exists := mod.RequiredProviders[name]; !exists { + mod.RequiredProviders[name] = []string{} + } + + case "resource", "data": + + content, _, contentDiags := block.Body.PartialContent(resourceSchema) + diags = append(diags, contentDiags...) + + typeName := block.Labels[0] + name := block.Labels[1] + + r := &Resource{ + Type: typeName, + Name: name, + Pos: sourcePosHCL(block.DefRange), + } + + var resourcesMap map[string]*Resource + + switch block.Type { + case "resource": + r.Mode = ManagedResourceMode + resourcesMap = mod.ManagedResources + case "data": + r.Mode = DataResourceMode + resourcesMap = mod.DataResources + } + + key := r.MapKey() + + resourcesMap[key] = r + + if attr, defined := content.Attributes["provider"]; defined { + // New style here is to provide this as a naked traversal + // expression, but we also support quoted references for + // older configurations that predated this convention. + traversal, travDiags := hcl.AbsTraversalForExpr(attr.Expr) + if travDiags.HasErrors() { + traversal = nil // in case we got any partial results + + // Fall back on trying to parse as a string + var travStr string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &travStr) + if !valDiags.HasErrors() { + var strDiags hcl.Diagnostics + traversal, strDiags = hclsyntax.ParseTraversalAbs([]byte(travStr), "", hcl.Pos{}) + if strDiags.HasErrors() { + traversal = nil + } + } + } + + // If we get out here with a nil traversal then we didn't + // succeed in processing the input. + if len(traversal) > 0 { + providerName := traversal.RootName() + alias := "" + if len(traversal) > 1 { + if getAttr, ok := traversal[1].(hcl.TraverseAttr); ok { + alias = getAttr.Name + } + } + r.Provider = ProviderRef{ + Name: providerName, + Alias: alias, + } + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider reference", + Detail: "Provider argument requires a provider name followed by an optional alias, like \"aws.foo\".", + Subject: attr.Expr.Range().Ptr(), + }) + } + } else { + // If provider _isn't_ set then we'll infer it from the + // resource type. + r.Provider = ProviderRef{ + Name: resourceTypeDefaultProviderName(r.Type), + } + } + + case "module": + + content, _, contentDiags := block.Body.PartialContent(moduleCallSchema) + diags = append(diags, contentDiags...) + + name := block.Labels[0] + mc := &ModuleCall{ + Name: block.Labels[0], + Pos: sourcePosHCL(block.DefRange), + } + + // check if this is overriding an existing module + var origSource string + if origMod, exists := mod.ModuleCalls[name]; exists { + origSource = origMod.Source + } + + mod.ModuleCalls[name] = mc + + if attr, defined := content.Attributes["source"]; defined { + var source string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &source) + diags = append(diags, valDiags...) + mc.Source = source + } + + if mc.Source == "" { + mc.Source = origSource + } + + if attr, defined := content.Attributes["version"]; defined { + var version string + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version) + diags = append(diags, valDiags...) + mc.Version = version + } + + default: + // Should never happen because our cases above should be + // exhaustive for our schema. + panic(fmt.Errorf("unhandled block type %q", block.Type)) + } + } + } + + return mod, diagnosticsHCL(diags) +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go new file mode 100644 index 00000000..86ffdf11 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go @@ -0,0 +1,325 @@ +package tfconfig + +import ( + "io/ioutil" + "strings" + + legacyhcl "github.com/hashicorp/hcl" + legacyast "github.com/hashicorp/hcl/hcl/ast" +) + +func loadModuleLegacyHCL(dir string) (*Module, Diagnostics) { + // This implementation is intentionally more quick-and-dirty than the + // main loader. In particular, it doesn't bother to keep careful track + // of multiple error messages because we always fall back on returning + // the main parser's error message if our fallback parsing produces + // an error, and thus the errors here are not seen by the end-caller. + mod := newModule(dir) + + primaryPaths, diags := dirFiles(dir) + if diags.HasErrors() { + return mod, diagnosticsHCL(diags) + } + + for _, filename := range primaryPaths { + src, err := ioutil.ReadFile(filename) + if err != nil { + return mod, diagnosticsErrorf("Error reading %s: %s", filename, err) + } + + hclRoot, err := legacyhcl.Parse(string(src)) + if err != nil { + return mod, diagnosticsErrorf("Error parsing %s: %s", filename, err) + } + + list, ok := hclRoot.Node.(*legacyast.ObjectList) + if !ok { + return mod, diagnosticsErrorf("Error parsing %s: no root object", filename) + } + + for _, item := range list.Filter("terraform").Items { + if len(item.Keys) > 0 { + item = &legacyast.ObjectItem{ + Val: &legacyast.ObjectType{ + List: &legacyast.ObjectList{ + Items: []*legacyast.ObjectItem{item}, + }, + }, + } + } + + type TerraformBlock struct { + RequiredVersion string `hcl:"required_version"` + } + var block TerraformBlock + err = legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("terraform block: %s", err) + } + + if block.RequiredVersion != "" { + mod.RequiredCore = append(mod.RequiredCore, block.RequiredVersion) + } + } + + if vars := list.Filter("variable"); len(vars.Items) > 0 { + vars = vars.Children() + type VariableBlock struct { + Type string `hcl:"type"` + Default interface{} + Description string + Fields []string `hcl:",decodedFields"` + } + + for _, item := range vars.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("variable block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block VariableBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid variable block at %s: %s", item.Pos(), err) + } + + // Clean up legacy HCL decoding ambiguity by unwrapping list of maps + if ms, ok := block.Default.([]map[string]interface{}); ok { + def := make(map[string]interface{}) + for _, m := range ms { + for k, v := range m { + def[k] = v + } + } + block.Default = def + } + + v := &Variable{ + Name: name, + Type: block.Type, + Description: block.Description, + Default: block.Default, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + if _, exists := mod.Variables[name]; exists { + return nil, diagnosticsErrorf("duplicate variable block for %q", name) + } + mod.Variables[name] = v + + } + } + + if outputs := list.Filter("output"); len(outputs.Items) > 0 { + outputs = outputs.Children() + type OutputBlock struct { + Description string + } + + for _, item := range outputs.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("output block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block OutputBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid output block at %s: %s", item.Pos(), err) + } + + o := &Output{ + Name: name, + Description: block.Description, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + if _, exists := mod.Outputs[name]; exists { + return nil, diagnosticsErrorf("duplicate output block for %q", name) + } + mod.Outputs[name] = o + } + } + + for _, blockType := range []string{"resource", "data"} { + if resources := list.Filter(blockType); len(resources.Items) > 0 { + resources = resources.Children() + type ResourceBlock struct { + Provider string + } + + for _, item := range resources.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 2) + + if len(item.Keys) != 2 { + return nil, diagnosticsErrorf("resource block at %s has wrong label count", item.Pos()) + } + + typeName := item.Keys[0].Token.Value().(string) + name := item.Keys[1].Token.Value().(string) + var mode ResourceMode + var rMap map[string]*Resource + switch blockType { + case "resource": + mode = ManagedResourceMode + rMap = mod.ManagedResources + case "data": + mode = DataResourceMode + rMap = mod.DataResources + } + + var block ResourceBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid resource block at %s: %s", item.Pos(), err) + } + + var providerName, providerAlias string + if dotPos := strings.IndexByte(block.Provider, '.'); dotPos != -1 { + providerName = block.Provider[:dotPos] + providerAlias = block.Provider[dotPos+1:] + } else { + providerName = block.Provider + } + if providerName == "" { + providerName = resourceTypeDefaultProviderName(typeName) + } + + r := &Resource{ + Mode: mode, + Type: typeName, + Name: name, + Provider: ProviderRef{ + Name: providerName, + Alias: providerAlias, + }, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + key := r.MapKey() + if _, exists := rMap[key]; exists { + return nil, diagnosticsErrorf("duplicate resource block for %q", key) + } + rMap[key] = r + } + } + + } + + if moduleCalls := list.Filter("module"); len(moduleCalls.Items) > 0 { + moduleCalls = moduleCalls.Children() + type ModuleBlock struct { + Source string + Version string + } + + for _, item := range moduleCalls.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("module block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block ModuleBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("module block at %s: %s", item.Pos(), err) + } + + mc := &ModuleCall{ + Name: name, + Source: block.Source, + Version: block.Version, + Pos: sourcePosLegacyHCL(item.Pos(), filename), + } + // it's possible this module call is from an override file + if origMod, exists := mod.ModuleCalls[name]; exists { + if mc.Source == "" { + mc.Source = origMod.Source + } + } + mod.ModuleCalls[name] = mc + } + } + + if providerConfigs := list.Filter("provider"); len(providerConfigs.Items) > 0 { + providerConfigs = providerConfigs.Children() + type ProviderBlock struct { + Version string + } + + for _, item := range providerConfigs.Items { + unwrapLegacyHCLObjectKeysFromJSON(item, 1) + + if len(item.Keys) != 1 { + return nil, diagnosticsErrorf("provider block at %s has no label", item.Pos()) + } + + name := item.Keys[0].Token.Value().(string) + + var block ProviderBlock + err := legacyhcl.DecodeObject(&block, item.Val) + if err != nil { + return nil, diagnosticsErrorf("invalid provider block at %s: %s", item.Pos(), err) + } + + if block.Version != "" { + mod.RequiredProviders[name] = append(mod.RequiredProviders[name], block.Version) + } + + // Even if there wasn't an explicit version required, we still + // need an entry in our map to signal the unversioned dependency. + if _, exists := mod.RequiredProviders[name]; !exists { + mod.RequiredProviders[name] = []string{} + } + + } + } + } + + return mod, nil +} + +// unwrapLegacyHCLObjectKeysFromJSON cleans up an edge case that can occur when +// parsing JSON as input: if we're parsing JSON then directly nested +// items will show up as additional "keys". +// +// For objects that expect a fixed number of keys, this breaks the +// decoding process. This function unwraps the object into what it would've +// looked like if it came directly from HCL by specifying the number of keys +// you expect. +// +// Example: +// +// { "foo": { "baz": {} } } +// +// Will show up with Keys being: []string{"foo", "baz"} +// when we really just want the first two. This function will fix this. +func unwrapLegacyHCLObjectKeysFromJSON(item *legacyast.ObjectItem, depth int) { + if len(item.Keys) > depth && item.Keys[0].Token.JSON { + for len(item.Keys) > depth { + // Pop off the last key + n := len(item.Keys) + key := item.Keys[n-1] + item.Keys[n-1] = nil + item.Keys = item.Keys[:n-1] + + // Wrap our value in a list + item.Val = &legacyast.ObjectType{ + List: &legacyast.ObjectList{ + Items: []*legacyast.ObjectItem{ + &legacyast.ObjectItem{ + Keys: []*legacyast.ObjectKey{key}, + Val: item.Val, + }, + }, + }, + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go new file mode 100644 index 00000000..65ddb230 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go @@ -0,0 +1,35 @@ +package tfconfig + +// Module is the top-level type representing a parsed and processed Terraform +// module. +type Module struct { + // Path is the local filesystem directory where the module was loaded from. + Path string `json:"path"` + + Variables map[string]*Variable `json:"variables"` + Outputs map[string]*Output `json:"outputs"` + + RequiredCore []string `json:"required_core,omitempty"` + RequiredProviders map[string][]string `json:"required_providers"` + + ManagedResources map[string]*Resource `json:"managed_resources"` + DataResources map[string]*Resource `json:"data_resources"` + ModuleCalls map[string]*ModuleCall `json:"module_calls"` + + // Diagnostics records any errors and warnings that were detected during + // loading, primarily for inclusion in serialized forms of the module + // since this slice is also returned as a second argument from LoadModule. + Diagnostics Diagnostics `json:"diagnostics,omitempty"` +} + +func newModule(path string) *Module { + return &Module{ + Path: path, + Variables: make(map[string]*Variable), + Outputs: make(map[string]*Output), + RequiredProviders: make(map[string][]string), + ManagedResources: make(map[string]*Resource), + DataResources: make(map[string]*Resource), + ModuleCalls: make(map[string]*ModuleCall), + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go new file mode 100644 index 00000000..5e1e05a7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go @@ -0,0 +1,11 @@ +package tfconfig + +// ModuleCall represents a "module" block within a module. That is, a +// declaration of a child module from inside its parent. +type ModuleCall struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go new file mode 100644 index 00000000..890b25e6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go @@ -0,0 +1,9 @@ +package tfconfig + +// Output represents a single output from a Terraform module. +type Output struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go new file mode 100644 index 00000000..d9248377 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go @@ -0,0 +1,9 @@ +package tfconfig + +// ProviderRef is a reference to a provider configuration within a module. +// It represents the contents of a "provider" argument in a resource, or +// a value in the "providers" map for a module call. +type ProviderRef struct { + Name string `json:"name"` + Alias string `json:"alias,omitempty"` // Empty if the default provider configuration is referenced +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go new file mode 100644 index 00000000..401c8fce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go @@ -0,0 +1,64 @@ +package tfconfig + +import ( + "fmt" + "strconv" + "strings" +) + +// Resource represents a single "resource" or "data" block within a module. +type Resource struct { + Mode ResourceMode `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + + Provider ProviderRef `json:"provider"` + + Pos SourcePos `json:"pos"` +} + +// MapKey returns a string that can be used to uniquely identify the receiver +// in a map[string]*Resource. +func (r *Resource) MapKey() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // should never happen + return fmt.Sprintf("[invalid_mode!].%s.%s", r.Type, r.Name) + } +} + +// ResourceMode represents the "mode" of a resource, which is used to +// distinguish between managed resources ("resource" blocks in config) and +// data resources ("data" blocks in config). +type ResourceMode rune + +const InvalidResourceMode ResourceMode = 0 +const ManagedResourceMode ResourceMode = 'M' +const DataResourceMode ResourceMode = 'D' + +func (m ResourceMode) String() string { + switch m { + case ManagedResourceMode: + return "managed" + case DataResourceMode: + return "data" + default: + return "" + } +} + +// MarshalJSON implements encoding/json.Marshaler. +func (m ResourceMode) MarshalJSON() ([]byte, error) { + return []byte(strconv.Quote(m.String())), nil +} + +func resourceTypeDefaultProviderName(typeName string) string { + if underPos := strings.IndexByte(typeName, '_'); underPos != -1 { + return typeName[:underPos] + } + return typeName +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go new file mode 100644 index 00000000..3af742ff --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go @@ -0,0 +1,106 @@ +package tfconfig + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +var rootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + LabelNames: nil, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + }, +} + +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "required_providers", + }, + }, +} + +var providerConfigSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "version", + }, + { + Name: "alias", + }, + }, +} + +var variableSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "type", + }, + { + Name: "description", + }, + { + Name: "default", + }, + }, +} + +var outputSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + }, +} + +var moduleCallSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + }, + { + Name: "version", + }, + { + Name: "providers", + }, + }, +} + +var resourceSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "provider", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go new file mode 100644 index 00000000..883914eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go @@ -0,0 +1,50 @@ +package tfconfig + +import ( + legacyhcltoken "github.com/hashicorp/hcl/hcl/token" + "github.com/hashicorp/hcl2/hcl" +) + +// SourcePos is a pointer to a particular location in a source file. +// +// This type is embedded into other structs to allow callers to locate the +// definition of each described module element. The SourcePos of an element +// is usually the first line of its definition, although the definition can +// be a little "fuzzy" with JSON-based config files. +type SourcePos struct { + Filename string `json:"filename"` + Line int `json:"line"` +} + +func sourcePos(filename string, line int) SourcePos { + return SourcePos{ + Filename: filename, + Line: line, + } +} + +func sourcePosHCL(rng hcl.Range) SourcePos { + // We intentionally throw away the column information here because + // current and legacy HCL both disagree on the definition of a column + // and so a line-only reference is the best granularity we can do + // such that the result is consistent between both parsers. + return SourcePos{ + Filename: rng.Filename, + Line: rng.Start.Line, + } +} + +func sourcePosLegacyHCL(pos legacyhcltoken.Pos, filename string) SourcePos { + useFilename := pos.Filename + // We'll try to use the filename given in legacy HCL position, but + // in practice there's no way to actually get this populated via + // the HCL API so it's usually empty except in some specialized + // situations, such as positions in error objects. + if useFilename == "" { + useFilename = filename + } + return SourcePos{ + Filename: useFilename, + Line: pos.Line, + } +} diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go new file mode 100644 index 00000000..0f73fc99 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go @@ -0,0 +1,16 @@ +package tfconfig + +// Variable represents a single variable from a Terraform module. +type Variable struct { + Name string `json:"name"` + Type string `json:"type,omitempty"` + Description string `json:"description,omitempty"` + + // Default is an approximate representation of the default value in + // the native Go type system. The conversion from the value given in + // configuration may be slightly lossy. Only values that can be + // serialized by json.Marshal will be included here. + Default interface{} `json:"default,omitempty"` + + Pos SourcePos `json:"pos"` +} diff --git a/vendor/github.com/hashicorp/terraform/CHANGELOG.md b/vendor/github.com/hashicorp/terraform/CHANGELOG.md index 9c789b85..f4d0f794 100644 --- a/vendor/github.com/hashicorp/terraform/CHANGELOG.md +++ b/vendor/github.com/hashicorp/terraform/CHANGELOG.md @@ -1,4205 +1,169 @@ -## 0.9.11 (Jul 3, 2017) +## 0.12.4 (Unreleased) -BUG FIXES: - -* core: Hotfix for issue where a state from a plan was reported as not equal to the same state stored to a backend. This arose from the fix for the previous issue where the incorrect copy of the state was being used when applying with a plan. ([#15460](https://github.com/hashicorp/terraform/issues/15460)) - - -## 0.9.10 (June 29, 2017) - -BUG FIXES: - -* core: Hotfix for issue where state index wasn't getting properly incremented when applying a change containing only data source updates and/or resource drift. (That is, state changes made during refresh.) - This issue is significant only for the "atlas" backend, since that backend verifies on the server that state serial numbers are being used consistently. ([#15423](https://github.com/hashicorp/terraform/issues/15423)) - -## 0.9.9 (June 26, 2017) - -BUG FIXES: - -* provisioner/file: Refactor the provisioner validation function to prevent false positives ([#15273](https://github.com/hashicorp/terraform/issues/15273)) -* provisioner/chef: Prevent a panic while trying to read the connection info ([#15271](https://github.com/hashicorp/terraform/issues/15271)) - -## 0.9.8 (June 7, 2017) - -NOTE: - -* The 0.9.7 release had a bug with its new feature of periodically persisting state to the backend during an apply, as part of [[#14834](https://github.com/hashicorp/terraform/issues/14834)]. This change has been reverted in this release and will be re-introduced at a later time once it has been made to work properly. - -IMPROVEMENTS: - -* provider/google: `network` argument in `google_compute_instance_group` is now optional ([#13493](https://github.com/hashicorp/terraform/issues/13493)) -* provider/google: Add support for `draining_timeout_sec` to `google_compute_backend_service`. ([#14559](https://github.com/hashicorp/terraform/issues/14559)) - -BUG FIXES: - -* provider/aws: fixed reading network configurations for `spot_fleet_request` ([#13748](https://github.com/hashicorp/terraform/issues/13748)) - -## 0.9.7 (June 7, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* The `lock_table` attribute in the S3 backend configuration has been deprecated in favor of `dynamodb_table`, which better reflects that the table is no longer only used for locks. ([#14949](https://github.com/hashicorp/terraform/issues/14949)) - -FEATURES: - - * **New Data Source:** `aws_elastic_beanstalk_solution_stack` ([#14944](https://github.com/hashicorp/terraform/issues/14944)) - * **New Data Source:** `aws_elasticache_cluster` ([#14895](https://github.com/hashicorp/terraform/issues/14895)) - * **New Data Source:** `aws_ssm_parameter` ([#15035](https://github.com/hashicorp/terraform/issues/15035)) - * **New Data Source:** `azurerm_public_ip` ([#15110](https://github.com/hashicorp/terraform/issues/15110)) - * **New Resource:** `aws_ssm_parameter` ([#15035](https://github.com/hashicorp/terraform/issues/15035)) - * **New Resource:** `aws_ssm_patch_baseline` ([#14954](https://github.com/hashicorp/terraform/issues/14954)) - * **New Resource:** `aws_ssm_patch_group` ([#14954](https://github.com/hashicorp/terraform/issues/14954)) - * **New Resource:** `librato_metric` ([#14562](https://github.com/hashicorp/terraform/issues/14562)) - * **New Resource:** `digitalocean_certificate` ([#14578](https://github.com/hashicorp/terraform/issues/14578)) - * **New Resource:** `vcd_edgegateway_vpn` ([#13123](https://github.com/hashicorp/terraform/issues/13123)) - * **New Resource:** `vault_mount` ([#14456](https://github.com/hashicorp/terraform/issues/14456)) - * **New Interpolation Function:** `bcrypt` ([#14725](https://github.com/hashicorp/terraform/issues/14725)) - -IMPROVEMENTS: - -* backend/consul: Storing state to Consul now uses Check-And-Set (CAS) by default to avoid inconsistent state, and will automatically attempt to re-acquire a lock if it is lost during Terraform execution. ([#14930](https://github.com/hashicorp/terraform/issues/14930)) -* core: Remote state is now persisted more frequently to minimize data loss in the event of a crash. ([#14834](https://github.com/hashicorp/terraform/issues/14834)) -* provider/alicloud: Add the function of replacing ecs instance's system disk ([#15048](https://github.com/hashicorp/terraform/issues/15048)) -* provider/aws: Expose RDS instance and cluster resource id ([#14882](https://github.com/hashicorp/terraform/issues/14882)) -* provider/aws: Export internal tunnel addresses + document ([#14835](https://github.com/hashicorp/terraform/issues/14835)) -* provider/aws: Fix misleading error in aws_route validation ([#14972](https://github.com/hashicorp/terraform/issues/14972)) -* provider/aws: Support import of aws_lambda_event_source_mapping ([#14898](https://github.com/hashicorp/terraform/issues/14898)) -* provider/aws: Add support for a configurable timeout in db_option_group ([#15023](https://github.com/hashicorp/terraform/issues/15023)) -* provider/aws: Add task_parameters parameter to aws_ssm_maintenance_window_task resource ([#15104](https://github.com/hashicorp/terraform/issues/15104)) -* provider/aws: Expose reason of EMR cluster termination ([#15117](https://github.com/hashicorp/terraform/issues/15117)) -* provider/aws: `data.aws_acm_certificate` can now filter by `type` ([#15063](https://github.com/hashicorp/terraform/issues/15063)) -* provider/azurerm: Ignore case sensivity in Azurerm resource enums ([#14861](https://github.com/hashicorp/terraform/issues/14861)) -* provider/digitalocean: Add support for changing TTL on DigitalOcean domain records. ([#14805](https://github.com/hashicorp/terraform/issues/14805)) -* provider/google: Add ability to import Google Compute persistent disks ([#14573](https://github.com/hashicorp/terraform/issues/14573)) -* provider/google: `google_container_cluster.master_auth` should be optional ([#14630](https://github.com/hashicorp/terraform/issues/14630)) -* provider/google: Add CORS support for `google_storage_bucket` ([#14695](https://github.com/hashicorp/terraform/issues/14695)) -* provider/google: Allow resizing of Google Cloud persistent disks ([#15077](https://github.com/hashicorp/terraform/issues/15077)) -* provider/google: Add private_ip_google_access update support to google_compute_subnetwork ([#15125](https://github.com/hashicorp/terraform/issues/15125)) -* provider/heroku: can now import Heroku Spaces ([#14973](https://github.com/hashicorp/terraform/issues/14973)) -* provider/kubernetes: Upgrade K8S from 1.5.3 to 1.6.1 ([#14923](https://github.com/hashicorp/terraform/issues/14923)) -* provider/kubernetes: Provide more details about why PVC failed to bind ([#15019](https://github.com/hashicorp/terraform/issues/15019)) -* provider/kubernetes: Allow sourcing config_path from `KUBECONFIG` env var ([#14889](https://github.com/hashicorp/terraform/issues/14889)) -* provider/openstack: Add support provider networks ([#10265](https://github.com/hashicorp/terraform/issues/10265)) -* provider/openstack: Allow numerical protocols in security group rules ([#14917](https://github.com/hashicorp/terraform/issues/14917)) -* provider/openstack: Sort request/response headers in debug output ([#14956](https://github.com/hashicorp/terraform/issues/14956)) -* provider/openstack: Add support for FWaaS routerinsertion extension ([#12589](https://github.com/hashicorp/terraform/issues/12589)) -* provider/openstack: Add Terraform version to UserAgent string ([#14955](https://github.com/hashicorp/terraform/issues/14955)) -* provider/openstack: Optimize the printing of debug output ([#15086](https://github.com/hashicorp/terraform/issues/15086)) -* provisioner/chef: Use `helpers.shema.Provisoner` in Chef provisioner V2 ([#14681](https://github.com/hashicorp/terraform/issues/14681)) - -BUG FIXES: - -* provider/alicloud: set `alicloud_nat_gateway` zone to be Computed to avoid perpetual diffs ([#15050](https://github.com/hashicorp/terraform/issues/15050)) -* provider/alicloud: set provider to read env vars for access key and secrey key if empty strings ([#15050](https://github.com/hashicorp/terraform/issues/15050)) -* provider/alicloud: Fix vpc and vswitch bugs while creating vpc and vswitch ([#15082](https://github.com/hashicorp/terraform/issues/15082)) -* provider/alicloud: Fix allocating public ip bug ([#15049](https://github.com/hashicorp/terraform/issues/15049)) -* provider/alicloud: Fix security group rules nic_type bug ([#15114](https://github.com/hashicorp/terraform/issues/15114)) -* provider/aws: ForceNew aws_launch_config on ebs_block_device change ([#14899](https://github.com/hashicorp/terraform/issues/14899)) -* provider/aws: Avoid crash when EgressOnly IGW disappears ([#14929](https://github.com/hashicorp/terraform/issues/14929)) -* provider/aws: Allow IPv6/IPv4 addresses to coexist ([#13702](https://github.com/hashicorp/terraform/issues/13702)) -* provider/aws: Expect exception on deletion of APIG Usage Plan Key ([#14958](https://github.com/hashicorp/terraform/issues/14958)) -* provider/aws: Fix panic on nil dead_letter_config ([#14964](https://github.com/hashicorp/terraform/issues/14964)) -* provider/aws: Work around IAM eventual consistency in CW Log Subs ([#14959](https://github.com/hashicorp/terraform/issues/14959)) -* provider/aws: Fix ModifyInstanceAttribute on new instances ([#14992](https://github.com/hashicorp/terraform/issues/14992)) -* provider/aws: Fix issue with removing tags in aws_cloudwatch_log_group ([#14886](https://github.com/hashicorp/terraform/issues/14886)) -* provider/aws: Raise timeout for VPC DHCP options creation to 5 mins ([#15084](https://github.com/hashicorp/terraform/issues/15084)) -* provider/aws: Retry Redshift cluster deletion on InvalidClusterState ([#15068](https://github.com/hashicorp/terraform/issues/15068)) -* provider/aws: Retry Lambda func creation on IAM error ([#15067](https://github.com/hashicorp/terraform/issues/15067)) -* provider/aws: Retry ECS service creation on ClusterNotFoundException ([#15066](https://github.com/hashicorp/terraform/issues/15066)) -* provider/aws: Retry ECS service update on ServiceNotFoundException ([#15073](https://github.com/hashicorp/terraform/issues/15073)) -* provider/aws: Retry DB parameter group delete on InvalidDBParameterGroupState ([#15071](https://github.com/hashicorp/terraform/issues/15071)) -* provider/aws: Guard against panic when no aws_default_vpc found ([#15070](https://github.com/hashicorp/terraform/issues/15070)) -* provider/aws: Guard against panic if no NodeGroupMembers returned in `elasticache_replication_group` ([#13488](https://github.com/hashicorp/terraform/issues/13488)) -* provider/aws: Revoke default ipv6 egress rule for aws_security_group ([#15075](https://github.com/hashicorp/terraform/issues/15075)) -* provider/aws: Lambda ENI deletion fails on destroy ([#11849](https://github.com/hashicorp/terraform/issues/11849)) -* provider/aws: Add gov and cn hosted zone Ids to aws_elb_hosted_zone data source ([#15149](https://github.com/hashicorp/terraform/issues/15149)) -* provider/azurerm: VM - making `os_profile` optional ([#14176](https://github.com/hashicorp/terraform/issues/14176)) -* provider/azurerm: Preserve the Subnet properties on Update ([#13877](https://github.com/hashicorp/terraform/issues/13877)) -* provider/datadog: make datadog_user verified a computed attribute ([#15034](https://github.com/hashicorp/terraform/issues/15034)) -* provider/datadog: use correct evaluation_delay parameter ([#14878](https://github.com/hashicorp/terraform/issues/14878)) -* provider/digitalocean: Refresh DO loadbalancer from state if 404 ([#14897](https://github.com/hashicorp/terraform/issues/14897)) -* provider/github: Do not set incorrect values in github_team data source ([#14859](https://github.com/hashicorp/terraform/issues/14859)) -* provider/google: use a mutex to prevent concurrent sql instance operations ([#14424](https://github.com/hashicorp/terraform/issues/14424)) -* provider/google: Set instances to computed in compute_instance_group ([#15025](https://github.com/hashicorp/terraform/issues/15025)) -* provider/google: Make google_compute_autoscaler use Update instead of Patch. ([#15101](https://github.com/hashicorp/terraform/issues/15101)) -* provider/kubernetes: Ignore internal k8s labels in `kubernetes_persistent_volume` ([#13716](https://github.com/hashicorp/terraform/issues/13716)) -* provider/librato: Add retry to librato_alert ([#15118](https://github.com/hashicorp/terraform/issues/15118)) -* provider/postgresql: Fix for leaking credentials in the provider ([#14817](https://github.com/hashicorp/terraform/issues/14817)) -* provider/postgresql: Drop the optional WITH token from CREATE ROLE. ([#14864](https://github.com/hashicorp/terraform/issues/14864)) -* provider/rancher: refresh rancher_host from state on nil or removed host ([#15015](https://github.com/hashicorp/terraform/issues/15015)) - -## 0.9.6 (May 25, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: +NEW FEATURES: -* When assigning a "splat variable" to a resource attribute, like `foo = "${some_resource.foo.*.baz}"`, it is no longer required (nor recommended) to wrap the string in list brackets. The extra brackets continue to be allowed for resource attributes for compatibility, but this will cease to be allowed in a future version. ([#14737](https://github.com/hashicorp/terraform/issues/14737)) -* provider/aws: Allow lightsail resources to work in other regions. Previously Terraform would automatically configure lightsail resources to run solely in `us-east-1`. This means that if a provider was initialized with a different region than `us-east-1`, users will need to create a provider alias to maintain their lightsail resources in us-east-1 [[#14685](https://github.com/hashicorp/terraform/issues/14685)]. -* provider/aws: Users of `aws_cloudfront_distribution` `default_cache_behavior` will notice that cookies is now a required value - even if that value is none ([#12628](https://github.com/hashicorp/terraform/issues/12628)) -* provider/google: Users of `google_compute_health_check` who were not setting a value for the `host` property of `http_health_check` or `https_health_check` previously had a faulty default value. This has been fixed and will show as a change in terraform plan/apply. ([#14441](https://github.com/hashicorp/terraform/issues/14441)) - -FEATURES: - -* **New Provider:** `ovh` ([#12669](https://github.com/hashicorp/terraform/issues/12669)) -* **New Resource:** `aws_default_subnet` ([#14476](https://github.com/hashicorp/terraform/issues/14476)) -* **New Resource:** `aws_default_vpc` ([#11710](https://github.com/hashicorp/terraform/issues/11710)) -* **New Resource:** `aws_default_vpc_dhcp_options` ([#14475](https://github.com/hashicorp/terraform/issues/14475)) -* **New Resource:** `aws_devicefarm_project` ([#14288](https://github.com/hashicorp/terraform/issues/14288)) -* **New Resource:** `aws_wafregional_ipset` ([#13705](https://github.com/hashicorp/terraform/issues/13705)) -* **New Resource:** `aws_wafregional_byte_match_set` ([#13705](https://github.com/hashicorp/terraform/issues/13705)) -* **New Resource:** `azurerm_express_route_circuit` ([#14265](https://github.com/hashicorp/terraform/issues/14265)) -* **New Resource:** `gitlab_deploy_key` ([#14734](https://github.com/hashicorp/terraform/issues/14734)) -* **New Resource:** `gitlab_group` ([#14490](https://github.com/hashicorp/terraform/issues/14490)) -* **New Resource:** `google_compute_router` ([#12411](https://github.com/hashicorp/terraform/issues/12411)) -* **New Resource:** `google_compute_router_interface` ([#12411](https://github.com/hashicorp/terraform/issues/12411)) -* **New Resource:** `google_compute_router_peer` ([#12411](https://github.com/hashicorp/terraform/issues/12411)) -* **New Resource:** `kubernetes_horizontal_pod_autoscaler` ([#14763](https://github.com/hashicorp/terraform/issues/14763)) -* **New Resource:** `kubernetes_service` ([#14554](https://github.com/hashicorp/terraform/issues/14554)) -* **New Resource:** `openstack_dns_zone_v2` ([#14721](https://github.com/hashicorp/terraform/issues/14721)) -* **New Resource:** `openstack_dns_recordset_v2` ([#14813](https://github.com/hashicorp/terraform/issues/14813)) -* **New Data Source:** `aws_db_snapshot` ([#10291](https://github.com/hashicorp/terraform/issues/10291)) -* **New Data Source:** `aws_kms_ciphertext` ([#14691](https://github.com/hashicorp/terraform/issues/14691)) -* **New Data Source:** `github_user` ([#14570](https://github.com/hashicorp/terraform/issues/14570)) -* **New Data Source:** `github_team` ([#14614](https://github.com/hashicorp/terraform/issues/14614)) -* **New Data Source:** `google_storage_object_signed_url` ([#14643](https://github.com/hashicorp/terraform/issues/14643)) -* **New Interpolation Function:** `pow` ([#14598](https://github.com/hashicorp/terraform/issues/14598)) - -IMPROVEMENTS: - -* core: After `apply`, if the state cannot be persisted to remote for some reason then write out a local state file for recovery ([#14423](https://github.com/hashicorp/terraform/issues/14423)) -* core: It's no longer required to surround an attribute value that is just a "splat" variable with a redundant set of array brackets. ([#14737](https://github.com/hashicorp/terraform/issues/14737)) -* core/provider-split: Split out the Oracle OPC provider to new structure ([#14362](https://github.com/hashicorp/terraform/issues/14362)) -* provider/aws: Show state reason when EC2 instance fails to launch ([#14479](https://github.com/hashicorp/terraform/issues/14479)) -* provider/aws: Show last scaling activity when ASG creation/update fails ([#14480](https://github.com/hashicorp/terraform/issues/14480)) -* provider/aws: Add `tags` (list of maps) for `aws_autoscaling_group` ([#13574](https://github.com/hashicorp/terraform/issues/13574)) -* provider/aws: Support filtering in ASG data source ([#14501](https://github.com/hashicorp/terraform/issues/14501)) -* provider/aws: Add ability to 'terraform import' aws_kms_alias resources ([#14679](https://github.com/hashicorp/terraform/issues/14679)) -* provider/aws: Allow lightsail resources to work in other regions ([#14685](https://github.com/hashicorp/terraform/issues/14685)) -* provider/aws: Configurable timeouts for EC2 instance + spot instance ([#14711](https://github.com/hashicorp/terraform/issues/14711)) -* provider/aws: Add ability to define timeouts for DMS replication instance ([#14729](https://github.com/hashicorp/terraform/issues/14729)) -* provider/aws: Add support for X-Ray tracing to aws_lambda_function ([#14728](https://github.com/hashicorp/terraform/issues/14728)) -* provider/azurerm: Virtual Machine Scale Sets with managed disk support ([#13717](https://github.com/hashicorp/terraform/issues/13717)) -* provider/azurerm: Virtual Machine Scale Sets with single placement option support ([#14510](https://github.com/hashicorp/terraform/issues/14510)) -* provider/azurerm: Adding support for VMSS Data Disks using Managed Disk feature ([#14608](https://github.com/hashicorp/terraform/issues/14608)) -* provider/azurerm: Adding support for 4TB disks ([#14688](https://github.com/hashicorp/terraform/issues/14688)) -* provider/cloudstack: Load the provider configuration from a CloudMonkey config file ([#13926](https://github.com/hashicorp/terraform/issues/13926)) -* provider/datadog: Add last aggregator to datadog_timeboard resource ([#14391](https://github.com/hashicorp/terraform/issues/14391)) -* provider/datadog: Added new evaluation_delay parameter ([#14433](https://github.com/hashicorp/terraform/issues/14433)) -* provider/docker: Allow Windows Docker containers to map volumes ([#13584](https://github.com/hashicorp/terraform/issues/13584)) -* provider/docker: Add `network_alias` to `docker_container` resource ([#14710](https://github.com/hashicorp/terraform/issues/14710)) -* provider/fastly: Mark the `s3_access_key`, `s3_secret_key`, & `secret_key` fields as sensitive ([#14634](https://github.com/hashicorp/terraform/issues/14634)) -* provider/gitlab: Add namespcace ID attribute to `gitlab_project` ([#14483](https://github.com/hashicorp/terraform/issues/14483)) -* provider/google: Add a `url` attribute to `google_storage_bucket` ([#14393](https://github.com/hashicorp/terraform/issues/14393)) -* provider/google: Make google resource storage bucket importable ([#14455](https://github.com/hashicorp/terraform/issues/14455)) -* provider/google: Add support for privateIpGoogleAccess on subnetworks ([#14234](https://github.com/hashicorp/terraform/issues/14234)) -* provider/google: Add import support to `google_sql_user` ([#14457](https://github.com/hashicorp/terraform/issues/14457)) -* provider/google: add failover parameter to `google_sql_database_instance` ([#14336](https://github.com/hashicorp/terraform/issues/14336)) -* provider/google: resource_compute_disks can now reference snapshots using the snapshot URL ([#14774](https://github.com/hashicorp/terraform/issues/14774)) -* provider/heroku: Add import support for `heroku_pipeline` resource ([#14486](https://github.com/hashicorp/terraform/issues/14486)) -* provider/heroku: Add import support for `heroku_pipeline_coupling` resource ([#14495](https://github.com/hashicorp/terraform/issues/14495)) -* provider/heroku: Add import support for `heroku_addon` resource ([#14508](https://github.com/hashicorp/terraform/issues/14508)) -* provider/openstack: Add support for all protocols in Security Group Rules ([#14307](https://github.com/hashicorp/terraform/issues/14307)) -* provider/openstack: Add support for updating Subnet Allocation Pools ([#14782](https://github.com/hashicorp/terraform/issues/14782)) -* provider/openstack: Enable Security Group Updates ([#14815](https://github.com/hashicorp/terraform/issues/14815)) -* provider/rancher: Add member support to `rancher_environment` ([#14563](https://github.com/hashicorp/terraform/issues/14563)) -* provider/rundeck: adds `description` to `command` schema in `rundeck_job` resource ([#14352](https://github.com/hashicorp/terraform/issues/14352)) -* provider/scaleway: allow public_ip to be set on server resource ([#14515](https://github.com/hashicorp/terraform/issues/14515)) -* provider/vsphere: Exposing moid value from vm resource ([#14793](https://github.com/hashicorp/terraform/issues/14793)) +* lang/funcs: new `abspath` function returns the absolute path to a given file [GH-21409] +* backend/swift: support for user configured state object names in swift containers [GH-17465] BUG FIXES: -* core: Store and verify checksums for S3 remote state to prevent fetching a stale state ([#14746](https://github.com/hashicorp/terraform/issues/14746)) -* core: Allow -force-unlock of an S3 named state ([#14680](https://github.com/hashicorp/terraform/issues/14680)) -* core: Fix incorrect errors when validatin nested objects ([#14784](https://github.com/hashicorp/terraform/issues/14784)] [[#14801](https://github.com/hashicorp/terraform/issues/14801)) -* core: When using `-target`, any outputs that include attributes of the targeted resources are now updated ([#14186](https://github.com/hashicorp/terraform/issues/14186)) -* core: Fixed 0.9.5 regression with the conditional operator `.. ? .. : ..` failing to type check with unknown/computed values ([#14454](https://github.com/hashicorp/terraform/issues/14454)) -* core: Fixed 0.9 regression causing issues during refresh when adding new data resource instances using `count` ([#14098](https://github.com/hashicorp/terraform/issues/14098)) -* core: Fixed crasher when populating a "splat variable" from an empty (nil) module state. ([#14526](https://github.com/hashicorp/terraform/issues/14526)) -* core: fix bad Sprintf in backend migration message ([#14601](https://github.com/hashicorp/terraform/issues/14601)) -* core: Addressed 0.9.5 issue with passing partially-unknown splat results through module variables, by removing the requirement to pass a redundant list level. ([#14737](https://github.com/hashicorp/terraform/issues/14737)) -* provider/aws: Allow updating constraints in WAF SizeConstraintSet + no constraints ([#14661](https://github.com/hashicorp/terraform/issues/14661)) -* provider/aws: Allow updating tuples in WAF ByteMatchSet + no tuples ([#14071](https://github.com/hashicorp/terraform/issues/14071)) -* provider/aws: Allow updating tuples in WAF SQLInjectionMatchSet + no tuples ([#14667](https://github.com/hashicorp/terraform/issues/14667)) -* provider/aws: Allow updating tuples in WAF XssMatchSet + no tuples ([#14671](https://github.com/hashicorp/terraform/issues/14671)) -* provider/aws: Increase EIP update timeout ([#14381](https://github.com/hashicorp/terraform/issues/14381)) -* provider/aws: Increase timeout for creating security group ([#14380](https://github.com/hashicorp/terraform/issues/14380)] [[#14724](https://github.com/hashicorp/terraform/issues/14724)) -* provider/aws: Increase timeout for (dis)associating IPv6 addr to/from subnet ([#14401](https://github.com/hashicorp/terraform/issues/14401)) -* provider/aws: Increase timeout for retrying creation of IAM server cert ([#14609](https://github.com/hashicorp/terraform/issues/14609)) -* provider/aws: Increase timeout for deleting IGW ([#14705](https://github.com/hashicorp/terraform/issues/14705)) -* provider/aws: Increase timeout for retrying creation of CW log subs ([#14722](https://github.com/hashicorp/terraform/issues/14722)) -* provider/aws: Using the new time schema helper for RDS Instance lifecycle mgmt ([#14369](https://github.com/hashicorp/terraform/issues/14369)) -* provider/aws: Using the timeout schema helper to make alb timeout cofigurable ([#14375](https://github.com/hashicorp/terraform/issues/14375)) -* provider/aws: Refresh from state when CodePipeline Not Found ([#14431](https://github.com/hashicorp/terraform/issues/14431)) -* provider/aws: Override spot_instance_requests volume_tags schema ([#14481](https://github.com/hashicorp/terraform/issues/14481)) -* provider/aws: Allow Internet Gateway IPv6 routes ([#14484](https://github.com/hashicorp/terraform/issues/14484)) -* provider/aws: ForceNew aws_launch_config when root_block_device changes ([#14507](https://github.com/hashicorp/terraform/issues/14507)) -* provider/aws: Pass IAM Roles to codepipeline actions ([#14263](https://github.com/hashicorp/terraform/issues/14263)) -* provider/aws: Create rule(s) for prefix-list-only AWS security group permissions on 'terraform import' ([#14528](https://github.com/hashicorp/terraform/issues/14528)) -* provider/aws: Set aws_subnet ipv6_cidr_block to computed ([#14542](https://github.com/hashicorp/terraform/issues/14542)) -* provider/aws: Change of aws_subnet ipv6 causing update failure ([#14545](https://github.com/hashicorp/terraform/issues/14545)) -* provider/aws: Nothing to update in cloudformation should not result in errors ([#14463](https://github.com/hashicorp/terraform/issues/14463)) -* provider/aws: Handling data migration in RDS snapshot restoring ([#14622](https://github.com/hashicorp/terraform/issues/14622)) -* provider/aws: Mark cookies in `default_cache_behaviour` of cloudfront_distribution as required ([#12628](https://github.com/hashicorp/terraform/issues/12628)) -* provider/aws: Fall back to old tagging mechanism for AWS gov and aws China ([#14627](https://github.com/hashicorp/terraform/issues/14627)) -* provider/aws: Change AWS ssm_maintenance_window Read func ([#14665](https://github.com/hashicorp/terraform/issues/14665)) -* provider/aws: Increase timeout for creation of route_table ([#14701](https://github.com/hashicorp/terraform/issues/14701)) -* provider/aws: Retry ElastiCache cluster deletion when it's snapshotting ([#14700](https://github.com/hashicorp/terraform/issues/14700)) -* provider/aws: Retry ECS service update on InvalidParameterException ([#14708](https://github.com/hashicorp/terraform/issues/14708)) -* provider/aws: Retry IAM Role deletion on DeleteConflict ([#14707](https://github.com/hashicorp/terraform/issues/14707)) -* provider/aws: Do not dereference source_Dest_check in aws_instance ([#14723](https://github.com/hashicorp/terraform/issues/14723)) -* provider/aws: Add validation function for IAM Policies ([#14669](https://github.com/hashicorp/terraform/issues/14669)) -* provider/aws: Fix panic on instance shutdown ([#14727](https://github.com/hashicorp/terraform/issues/14727)) -* provider/aws: Handle migration when restoring db cluster from snapshot ([#14766](https://github.com/hashicorp/terraform/issues/14766)) -* provider/aws: Provider ability to enable snapshotting on ElastiCache RG ([#14757](https://github.com/hashicorp/terraform/issues/14757)) -* provider/cloudstack: `cloudstack_firewall` panicked when used with older (< v4.6) CloudStack versions ([#14044](https://github.com/hashicorp/terraform/issues/14044)) -* provider/datadog: Allowed method on aggregator is `avg` ! `average` ([#14414](https://github.com/hashicorp/terraform/issues/14414)) -* provider/digitalocean: Fix parsing of digitalocean dns records ([#14215](https://github.com/hashicorp/terraform/issues/14215)) -* provider/github: Log HTTP requests and responses in DEBUG mode ([#14363](https://github.com/hashicorp/terraform/issues/14363)) -* provider/github Check for potentially nil response from GitHub API client ([#14683](https://github.com/hashicorp/terraform/issues/14683)) -* provider/google: Fix health check http/https defaults ([#14441](https://github.com/hashicorp/terraform/issues/14441)) -* provider/google: Fix issue with GCP Cloud SQL Instance `disk_autoresize` ([#14582](https://github.com/hashicorp/terraform/issues/14582)) -* provider/google: Fix crash creating Google Cloud SQL 2nd Generation replication instance ([#14373](https://github.com/hashicorp/terraform/issues/14373)) -* provider/google: Disks now detach before getting deleted ([#14651](https://github.com/hashicorp/terraform/issues/14651)) -* provider/google: Update `google_compute_target_pool`'s session_affinity default ([#14807](https://github.com/hashicorp/terraform/issues/14807)) -* provider/heroku: Fix issue with setting correct CName in heroku_domain ([#14443](https://github.com/hashicorp/terraform/issues/14443)) -* provider/opc: Correctly export `ip_address` in IP Addr Reservation ([#14543](https://github.com/hashicorp/terraform/issues/14543)) -* provider/openstack: Handle Deleted Resources in Floating IP Association ([#14533](https://github.com/hashicorp/terraform/issues/14533)) -* provider/openstack: Catch error during instance network parsing ([#14704](https://github.com/hashicorp/terraform/issues/14704)) -* provider/vault: Prevent panic when no secret found ([#14435](https://github.com/hashicorp/terraform/issues/14435)) - -## 0.9.5 (May 11, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - -* provider/aws: Users of aws_cloudfront_distributions with custom_origins have been broken due to changes in the AWS API requiring `OriginReadTimeout` being set for updates. This has been fixed and will show as a change in terraform plan / apply. ([#13367](https://github.com/hashicorp/terraform/issues/13367)) -* provider/aws: Users of China and Gov clouds, cannot use the new tagging of volumes created as part of aws_instances ([#14055](https://github.com/hashicorp/terraform/issues/14055)) -* provider/aws: Skip tag operations on cloudwatch logs in govcloud partition. Currently not supported by Amazon. ([#12414](https://github.com/hashicorp/terraform/issues/12414)) -* provider/aws: More consistent (un)quoting of long TXT/SPF `aws_route53_record`s. - Previously we were trimming first 2 quotes and now we're (correctly) trimming first and last one. - Depending on the use of quotes in your TXT/SPF records this may result in extra diff in plan/apply ([#14170](https://github.com/hashicorp/terraform/issues/14170)) - -FEATURES: +* core: Prevent crash when a resource has no current valid instance [GH-21979] +* plugin/sdk: Prevent empty strings from being replaced with default values [GH-21806] +* plugin/sdk: Ensure resource timeouts are not lost when there is an empty plan [GH-21814] +* plugin/sdk: Don't add null elements to diagnostic paths when validating config [GH-21884] +* lang/funcs: Add missing map of bool support for `lookup` [GH-21863] +* config: Fix issue with downloading BitBucket modules from deprecated V1 API by updating go-getter dependency [GH-21948] +* config: Fix conditionals to evaluate to the correct type when using null [GH-21957] -* **New Provider:** `gitlab` ([#13898](https://github.com/hashicorp/terraform/issues/13898)) -* **New Resource:** `aws_emr_security_configuration` ([#14080](https://github.com/hashicorp/terraform/issues/14080)) -* **New Resource:** `aws_ssm_maintenance_window` ([#14087](https://github.com/hashicorp/terraform/issues/14087)) -* **New Resource:** `aws_ssm_maintenance_window_target` ([#14087](https://github.com/hashicorp/terraform/issues/14087)) -* **New Resource:** `aws_ssm_maintenance_window_task` ([#14087](https://github.com/hashicorp/terraform/issues/14087)) -* **New Resource:** `azurerm_sql_elasticpool` ([#14099](https://github.com/hashicorp/terraform/issues/14099)) -* **New Resource:** `google_bigquery_table` ([#13743](https://github.com/hashicorp/terraform/issues/13743)) -* **New Resource:** `google_compute_backend_bucket` ([#14015](https://github.com/hashicorp/terraform/issues/14015)) -* **New Resource:** `google_compute_snapshot` ([#12482](https://github.com/hashicorp/terraform/issues/12482)) -* **New Resource:** `heroku_app_feature` ([#14035](https://github.com/hashicorp/terraform/issues/14035)) -* **New Resource:** `heroku_pipeline` ([#14078](https://github.com/hashicorp/terraform/issues/14078)) -* **New Resource:** `heroku_pipeline_coupling` ([#14078](https://github.com/hashicorp/terraform/issues/14078)) -* **New Resource:** `kubernetes_limit_range` ([#14285](https://github.com/hashicorp/terraform/issues/14285)) -* **New Resource:** `kubernetes_resource_quota` ([#13914](https://github.com/hashicorp/terraform/issues/13914)) -* **New Resource:** `vault_auth_backend` ([#10988](https://github.com/hashicorp/terraform/issues/10988)) -* **New Data Source:** `aws_efs_file_system` ([#14041](https://github.com/hashicorp/terraform/issues/14041)) -* **New Data Source:** `http`, for retrieving text data from generic HTTP servers ([#14270](https://github.com/hashicorp/terraform/issues/14270)) -* **New Data Source:** `google_container_engine_versions`, for retrieving valid versions for clusters ([#14280](https://github.com/hashicorp/terraform/issues/14280)) -* **New Interpolation Function:** `log`, for computing logarithms ([#12872](https://github.com/hashicorp/terraform/issues/12872)) +## 0.12.3 (June 24, 2019) -IMPROVEMENTS: +ENHANCEMENTS: -* core: `sha512` and `base64sha512` interpolation functions, similar to their `sha256` equivalents. ([#14100](https://github.com/hashicorp/terraform/issues/14100)) -* core: It's now possible to use the index operator `[ ]` to select a known value out of a partially-known list, such as using "splat syntax" and increasing the `count`. ([#14135](https://github.com/hashicorp/terraform/issues/14135)) -* provider/aws: Add support for CustomOrigin timeouts to aws_cloudfront_distribution ([#13367](https://github.com/hashicorp/terraform/issues/13367)) -* provider/aws: Add support for IAMDatabaseAuthenticationEnabled ([#14092](https://github.com/hashicorp/terraform/issues/14092)) -* provider/aws: aws_dynamodb_table Add support for TimeToLive ([#14104](https://github.com/hashicorp/terraform/issues/14104)) -* provider/aws: Add `security_configuration` support to `aws_emr_cluster` ([#14133](https://github.com/hashicorp/terraform/issues/14133)) -* provider/aws: Add support for the tenancy placement option in `aws_spot_fleet_request` ([#14163](https://github.com/hashicorp/terraform/issues/14163)) -* provider/aws: `aws_db_option_group` normalizes name to lowercase ([#14192](https://github.com/hashicorp/terraform/issues/14192), [#14366](https://github.com/hashicorp/terraform/issues/14366)) -* provider/aws: Add support description to aws_iam_role ([#14208](https://github.com/hashicorp/terraform/issues/14208)) -* provider/aws: Add support for SSM Documents to aws_cloudwatch_event_target ([#14067](https://github.com/hashicorp/terraform/issues/14067)) -* provider/aws: add additional custom service endpoint options for CloudFormation, KMS, RDS, SNS & SQS ([#14097](https://github.com/hashicorp/terraform/issues/14097)) -* provider/aws: Add ARN to security group data source ([#14245](https://github.com/hashicorp/terraform/issues/14245)) -* provider/aws: Improve the wording of DynamoDB Validation error message ([#14256](https://github.com/hashicorp/terraform/issues/14256)) -* provider/aws: Add support for importing Kinesis Streams ([#14278](https://github.com/hashicorp/terraform/issues/14278)) -* provider/aws: Add `arn` attribute to `aws_ses_domain_identity` resource ([#14306](https://github.com/hashicorp/terraform/issues/14306)) -* provider/aws: Add support for targets to aws_ssm_association ([#14246](https://github.com/hashicorp/terraform/issues/14246)) -* provider/aws: native redis clustering support for elasticache ([#14317](https://github.com/hashicorp/terraform/issues/14317)) -* provider/aws: Support updating `aws_waf_rule` predicates ([#14089](https://github.com/hashicorp/terraform/issues/14089)) -* provider/azurerm: `azurerm_template_deployment` now supports String/Int/Boolean outputs ([#13670](https://github.com/hashicorp/terraform/issues/13670)) -* provider/azurerm: Expose the Private IP Address for a Load Balancer, if available ([#13965](https://github.com/hashicorp/terraform/issues/13965)) -* provider/dns: Fix data dns txt record set ([#14271](https://github.com/hashicorp/terraform/issues/14271)) -* provider/dnsimple: Add support for import for dnsimple_records ([#9130](https://github.com/hashicorp/terraform/issues/9130)) -* provider/dyn: Add verbose Dyn provider logs ([#14076](https://github.com/hashicorp/terraform/issues/14076)) -* provider/google: Add support for networkIP in compute instance templates ([#13515](https://github.com/hashicorp/terraform/issues/13515)) -* provider/google: google_dns_managed_zone is now importable ([#13824](https://github.com/hashicorp/terraform/issues/13824)) -* provider/google: Add support for `compute_route` ([#14065](https://github.com/hashicorp/terraform/issues/14065)) -* provider/google: Add `path` to `google_pubsub_subscription` ([#14238](https://github.com/hashicorp/terraform/issues/14238)) -* provider/google: Improve Service Account by offering to recreate if missing ([#14282](https://github.com/hashicorp/terraform/issues/14282)) -* provider/google: Log HTTP requests and responses in DEBUG mode ([#14281](https://github.com/hashicorp/terraform/issues/14281)) -* provider/google: Add additional properties for google resource storage bucket object ([#14259](https://github.com/hashicorp/terraform/issues/14259)) -* provider/google: Handle all 404 checks in read functions via the new function ([#14335](https://github.com/hashicorp/terraform/issues/14335)) -* provider/heroku: import heroku_app resource ([#14248](https://github.com/hashicorp/terraform/issues/14248)) -* provider/nomad: Add TLS options ([#13956](https://github.com/hashicorp/terraform/issues/13956)) -* provider/triton: Add support for reading provider configuration from `TRITON_*` environment variables in addition to `SDC_*`([#14000](https://github.com/hashicorp/terraform/issues/14000)) -* provider/triton: Add `cloud_config` argument to `triton_machine` resources for Linux containers ([#12840](https://github.com/hashicorp/terraform/issues/12840)) -* provider/triton: Add `insecure_skip_tls_verify` ([#14077](https://github.com/hashicorp/terraform/issues/14077)) +* config: add GCS source support for modules ([#21254](https://github.com/hashicorp/terraform/issues/21254)) +* command/format: Reduce extra whitespaces & new lines ([#21334](https://github.com/hashicorp/terraform/issues/21334)) +* backend/s3: Support for chaining assume IAM role from AWS shared configuration files ([#21815](https://github.com/hashicorp/terraform/issues/21815)) BUG FIXES: -* core: `module` blocks without names are now caught in validation, along with various other block types ([#14162](https://github.com/hashicorp/terraform/issues/14162)) -* core: no longer will errors and normal log output get garbled together on Windows ([#14194](https://github.com/hashicorp/terraform/issues/14194)) -* core: Avoid crash on empty TypeSet blocks ([#14305](https://github.com/hashicorp/terraform/issues/14305)) -* provider/aws: Update aws_ebs_volume when attached ([#14005](https://github.com/hashicorp/terraform/issues/14005)) -* provider/aws: Set aws_instance volume_tags to be Computed ([#14007](https://github.com/hashicorp/terraform/issues/14007)) -* provider/aws: Fix issue getting partition for federated users ([#13992](https://github.com/hashicorp/terraform/issues/13992)) -* provider/aws: aws_spot_instance_request not forcenew on volume_tags ([#14046](https://github.com/hashicorp/terraform/issues/14046)) -* provider/aws: Exclude aws_instance volume tagging for China and Gov Clouds ([#14055](https://github.com/hashicorp/terraform/issues/14055)) -* provider/aws: Fix source_dest_check with network_interface ([#14079](https://github.com/hashicorp/terraform/issues/14079)) -* provider/aws: Fixes the bug where SNS delivery policy get always recreated ([#14064](https://github.com/hashicorp/terraform/issues/14064)) -* provider/aws: Increase timeouts for Route Table retries ([#14345](https://github.com/hashicorp/terraform/issues/14345)) -* provider/aws: Prevent Crash when importing aws_route53_record ([#14218](https://github.com/hashicorp/terraform/issues/14218)) -* provider/aws: More consistent (un)quoting of long TXT/SPF `aws_route53_record`s ([#14170](https://github.com/hashicorp/terraform/issues/14170)) -* provider/aws: Retry deletion of AWSConfig Rule on ResourceInUseException ([#14269](https://github.com/hashicorp/terraform/issues/14269)) -* provider/aws: Refresh ssm document from state on 404 ([#14279](https://github.com/hashicorp/terraform/issues/14279)) -* provider/aws: Allow zero-value ELB and ALB names ([#14304](https://github.com/hashicorp/terraform/issues/14304)) -* provider/aws: Update the ignoring of AWS specific tags ([#14321](https://github.com/hashicorp/terraform/issues/14321)) -* provider/aws: Adding IPv6 address to instance causes perpetual diff ([#14355](https://github.com/hashicorp/terraform/issues/14355)) -* provider/aws: Fix SG update on instance with multiple network interfaces ([#14299](https://github.com/hashicorp/terraform/issues/14299)) -* provider/azurerm: Fixing a bug in `azurerm_network_interface` ([#14365](https://github.com/hashicorp/terraform/issues/14365)) -* provider/digitalocean: Prevent diffs when using IDs of images instead of slugs ([#13879](https://github.com/hashicorp/terraform/issues/13879)) -* provider/fastly: Changes setting conditionals to optional ([#14103](https://github.com/hashicorp/terraform/issues/14103)) -* provider/google: Ignore certain project services that can't be enabled directly via the api ([#13730](https://github.com/hashicorp/terraform/issues/13730)) -* provider/google: Ability to add more than 25 project services ([#13758](https://github.com/hashicorp/terraform/issues/13758)) -* provider/google: Fix compute instance panic with bad disk config ([#14169](https://github.com/hashicorp/terraform/issues/14169)) -* provider/google: Handle `google_storage_bucket_object` not being found ([#14203](https://github.com/hashicorp/terraform/issues/14203)) -* provider/google: Handle `google_compute_instance_group_manager` not being found ([#14190](https://github.com/hashicorp/terraform/issues/14190)) -* provider/google: better visibility for compute_region_backend_service ([#14301](https://github.com/hashicorp/terraform/issues/14301)) -* provider/heroku: Configure buildpacks correctly for both Org Apps and non-org Apps ([#13990](https://github.com/hashicorp/terraform/issues/13990)) -* provider/heroku: Fix `heroku_cert` update of ssl cert ([#14240](https://github.com/hashicorp/terraform/issues/14240)) -* provider/openstack: Handle disassociating deleted FloatingIP's from a server ([#14210](https://github.com/hashicorp/terraform/issues/14210)) -* provider/postgres grant role when creating database ([#11452](https://github.com/hashicorp/terraform/issues/11452)) -* provider/triton: Make triton machine deletes synchronous. ([#14368](https://github.com/hashicorp/terraform/issues/14368)) -* provisioner/remote-exec: Fix panic from remote_exec provisioner ([#14134](https://github.com/hashicorp/terraform/issues/14134)) +* configs: Can now use references like `tags["foo"]` in `ignore_changes` to ignore in-place updates to specific keys in a map ([#21788](https://github.com/hashicorp/terraform/issues/21788)) +* configs: Fix panic on missing value for `version` attribute in `provider` blocks. ([#21825](https://github.com/hashicorp/terraform/issues/21825)) +* lang/funcs: Fix `merge` panic on null values. Now will give an error if null used ([#21695](https://github.com/hashicorp/terraform/issues/21695)) +* backend/remote: Fix "Conflict" error if the first state snapshot written after a Terraform CLI upgrade has the same content as the prior state. ([#21811](https://github.com/hashicorp/terraform/issues/21811)) +* backend/s3: Fix AWS shared configuration file credential source not assuming a role with environment and ECS credentials ([#21815](https://github.com/hashicorp/terraform/issues/21815)) -## 0.9.4 (26th April 2017) +## 0.12.2 (June 12, 2019) -BACKWARDS INCOMPATIBILITIES / NOTES: +NEW FEATURES: - * provider/template: Fix invalid MIME formatting in `template_cloudinit_config`. - While the change itself is not breaking the data source it may be referenced - e.g. in `aws_launch_configuration` and similar resources which are immutable - and the formatting change will therefore trigger recreation ([#13752](https://github.com/hashicorp/terraform/issues/13752)) +* provisioners: new provisioner: `puppet` ([#18851](https://github.com/hashicorp/terraform/issues/18851)) +* `range` function for generating a sequence of numbers as a list ([#21461](https://github.com/hashicorp/terraform/issues/21461)) +* `yamldecode` and *experimental* `yamlencode` functions for working with YAML-serialized data ([#21459](https://github.com/hashicorp/terraform/issues/21459)) +* `uuidv5` function for generating name-based (as opposed to pseudorandom) UUIDs ([#21244](https://github.com/hashicorp/terraform/issues/21244)) +* backend/oss: Add support for Alibaba OSS remote state ([#16927](https://github.com/hashicorp/terraform/issues/16927)) -FEATURES: +ENHANCEMENTS: -* **New Provider:** `opc` - Oracle Public Cloud ([#13468](https://github.com/hashicorp/terraform/issues/13468)) -* **New Provider:** `oneandone` ([#13633](https://github.com/hashicorp/terraform/issues/13633)) -* **New Data Source:** `aws_ami_ids` ([#13844](https://github.com/hashicorp/terraform/issues/13844)] [[#13866](https://github.com/hashicorp/terraform/issues/13866)) -* **New Data Source:** `aws_ebs_snapshot_ids` ([#13844](https://github.com/hashicorp/terraform/issues/13844)] [[#13866](https://github.com/hashicorp/terraform/issues/13866)) -* **New Data Source:** `aws_kms_alias` ([#13669](https://github.com/hashicorp/terraform/issues/13669)) -* **New Data Source:** `aws_kinesis_stream` ([#13562](https://github.com/hashicorp/terraform/issues/13562)) -* **New Data Source:** `digitalocean_image` ([#13787](https://github.com/hashicorp/terraform/issues/13787)) -* **New Data Source:** `google_compute_network` ([#12442](https://github.com/hashicorp/terraform/issues/12442)) -* **New Data Source:** `google_compute_subnetwork` ([#12442](https://github.com/hashicorp/terraform/issues/12442)) -* **New Resource:** `local_file` for creating local files (please see the docs for caveats) ([#12757](https://github.com/hashicorp/terraform/issues/12757)) -* **New Resource:** `alicloud_ess_scalinggroup` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_ess_scalingconfiguration` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_ess_scalingrule` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_ess_schedule` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_snat_entry` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `alicloud_forward_entry` ([#13731](https://github.com/hashicorp/terraform/issues/13731)) -* **New Resource:** `aws_cognito_identity_pool` ([#13783](https://github.com/hashicorp/terraform/issues/13783)) -* **New Resource:**  `aws_network_interface_attachment` ([#13861](https://github.com/hashicorp/terraform/issues/13861)) -* **New Resource:** `github_branch_protection` ([#10476](https://github.com/hashicorp/terraform/issues/10476)) -* **New Resource:** `google_bigquery_dataset` ([#13436](https://github.com/hashicorp/terraform/issues/13436)) -* **New Resource:** `heroku_space` ([#13921](https://github.com/hashicorp/terraform/issues/13921)) -* **New Resource:** `template_dir` for producing a directory from templates ([#13652](https://github.com/hashicorp/terraform/issues/13652)) -* **New Interpolation Function:** `coalescelist()` ([#12537](https://github.com/hashicorp/terraform/issues/12537)) - - -IMPROVEMENTS: - - * core: Add a `-reconfigure` flag to the `init` command, to configure a backend while ignoring any saved configuration ([#13825](https://github.com/hashicorp/terraform/issues/13825)) - * helper/schema: Disallow validation+diff suppression on computed fields ([#13878](https://github.com/hashicorp/terraform/issues/13878)) - * config: The interpolation function `cidrhost` now accepts a negative host number to count backwards from the end of the range ([#13765](https://github.com/hashicorp/terraform/issues/13765)) - * config: New interpolation function `matchkeys` for using values from one list to filter corresponding values from another list using a matching set. ([#13847](https://github.com/hashicorp/terraform/issues/13847)) - * state/remote/swift: Support Openstack request logging ([#13583](https://github.com/hashicorp/terraform/issues/13583)) - * provider/aws: Add an option to skip getting the supported EC2 platforms ([#13672](https://github.com/hashicorp/terraform/issues/13672)) - * provider/aws: Add `name_prefix` support to `aws_cloudwatch_log_group` ([#13273](https://github.com/hashicorp/terraform/issues/13273)) - * provider/aws: Add `bucket_prefix` to `aws_s3_bucket` ([#13274](https://github.com/hashicorp/terraform/issues/13274)) - * provider/aws: Add replica_source_db to the aws_db_instance datasource ([#13842](https://github.com/hashicorp/terraform/issues/13842)) - * provider/aws: Add IPv6 outputs to aws_subnet datasource ([#13841](https://github.com/hashicorp/terraform/issues/13841)) - * provider/aws: Exercise SecondaryPrivateIpAddressCount for network interface ([#10590](https://github.com/hashicorp/terraform/issues/10590)) - * provider/aws: Expose execution ARN + invoke URL for APIG deployment ([#13889](https://github.com/hashicorp/terraform/issues/13889)) - * provider/aws: Expose invoke ARN from Lambda function (for API Gateway) ([#13890](https://github.com/hashicorp/terraform/issues/13890)) - * provider/aws: Add tagging support to the 'aws_lambda_function' resource ([#13873](https://github.com/hashicorp/terraform/issues/13873)) - * provider/aws: Validate WAF metric names ([#13885](https://github.com/hashicorp/terraform/issues/13885)) - * provider/aws: Allow AWS Subnet to change IPv6 CIDR Block without ForceNew ([#13909](https://github.com/hashicorp/terraform/issues/13909)) - * provider/aws: Allow filtering of aws_subnet_ids by tags ([#13937](https://github.com/hashicorp/terraform/issues/13937)) - * provider/aws: Support aws_instance and volume tagging on creation ([#13945](https://github.com/hashicorp/terraform/issues/13945)) - * provider/aws: Add network_interface to aws_instance ([#12933](https://github.com/hashicorp/terraform/issues/12933)) - * provider/azurerm: VM Scale Sets - import support ([#13464](https://github.com/hashicorp/terraform/issues/13464)) - * provider/azurerm: Allow Azure China region support ([#13767](https://github.com/hashicorp/terraform/issues/13767)) - * provider/digitalocean: Export droplet prices ([#13720](https://github.com/hashicorp/terraform/issues/13720)) - * provider/fastly: Add support for GCS logging ([#13553](https://github.com/hashicorp/terraform/issues/13553)) - * provider/google: `google_compute_address` and `google_compute_global_address` are now importable ([#13270](https://github.com/hashicorp/terraform/issues/13270)) - * provider/google: `google_compute_network` is now importable ([#13834](https://github.com/hashicorp/terraform/issues/13834)) - * provider/google: add attached_disk field to google_compute_instance ([#13443](https://github.com/hashicorp/terraform/issues/13443)) - * provider/heroku: Set App buildpacks from config ([#13910](https://github.com/hashicorp/terraform/issues/13910)) - * provider/heroku: Create Heroku app in a private space ([#13862](https://github.com/hashicorp/terraform/issues/13862)) - * provider/vault: `vault_generic_secret` resource can now optionally detect drift if it has appropriate access ([#11776](https://github.com/hashicorp/terraform/issues/11776)) +* config: consider build metadata when interpreting module versions ([#21640](https://github.com/hashicorp/terraform/issues/21640)) +* backend/http: implement retries for the http backend ([#19702](https://github.com/hashicorp/terraform/issues/19702)) +* backend/swift: authentication mechanisms now more consistent with other OpenStack-compatible tools ([#18671](https://github.com/hashicorp/terraform/issues/18671)) +* backend/swift: add application credential support ([#20914](https://github.com/hashicorp/terraform/pull/20914)) BUG FIXES: - * core: Prevent resource.Retry from adding untracked resources after the timeout: ([#13778](https://github.com/hashicorp/terraform/issues/13778)) - * core: Allow a schema.TypeList to be ForceNew and computed ([#13863](https://github.com/hashicorp/terraform/issues/13863)) - * core: Fix crash when refresh or apply build an invalid graph ([#13665](https://github.com/hashicorp/terraform/issues/13665)) - * core: Add the close provider/provisioner transformers back ([#13102](https://github.com/hashicorp/terraform/issues/13102)) - * core: Fix a crash condition by improving the flatmap.Expand() logic ([#13541](https://github.com/hashicorp/terraform/issues/13541)) - * provider/alicloud: Fix create PrePaid instance ([#13662](https://github.com/hashicorp/terraform/issues/13662)) - * provider/alicloud: Fix allocate public ip error ([#13268](https://github.com/hashicorp/terraform/issues/13268)) - * provider/alicloud: alicloud_security_group_rule: check ptr before use it [[#13731](https://github.com/hashicorp/terraform/issues/13731)) - * provider/alicloud: alicloud_instance: fix ecs internet_max_bandwidth_out cannot set zero bug ([#13731](https://github.com/hashicorp/terraform/issues/13731)) - * provider/aws: Allow force-destroying `aws_route53_zone` which has trailing dot ([#12421](https://github.com/hashicorp/terraform/issues/12421)) - * provider/aws: Allow GovCloud KMS ARNs to pass validation in `kms_key_id` attributes ([#13699](https://github.com/hashicorp/terraform/issues/13699)) - * provider/aws: Changing aws_opsworks_instance should ForceNew ([#13839](https://github.com/hashicorp/terraform/issues/13839)) - * provider/aws: Fix DB Parameter Group Name ([#13279](https://github.com/hashicorp/terraform/issues/13279)) - * provider/aws: Fix issue importing some Security Groups and Rules based on rule structure ([#13630](https://github.com/hashicorp/terraform/issues/13630)) - * provider/aws: Fix issue for cross account IAM role with `aws_lambda_permission` ([#13865](https://github.com/hashicorp/terraform/issues/13865)) - * provider/aws: Fix WAF IPSet descriptors removal on update ([#13766](https://github.com/hashicorp/terraform/issues/13766)) - * provider/aws: Increase default number of retries from 11 to 25 ([#13673](https://github.com/hashicorp/terraform/issues/13673)) - * provider/aws: Remove aws_vpc_dhcp_options if not found ([#13610](https://github.com/hashicorp/terraform/issues/13610)) - * provider/aws: Remove aws_network_acl_rule if not found ([#13608](https://github.com/hashicorp/terraform/issues/13608)) - * provider/aws: Use mutex & retry for WAF change operations ([#13656](https://github.com/hashicorp/terraform/issues/13656)) - * provider/aws: Adding support for ipv6 to aws_subnets needs migration ([#13876](https://github.com/hashicorp/terraform/issues/13876)) - * provider/aws: Fix validation of the `name_prefix` parameter of the `aws_alb` resource ([#13441](https://github.com/hashicorp/terraform/issues/13441)) - * provider/azurerm: azurerm_redis_cache resource missing hostname ([#13650](https://github.com/hashicorp/terraform/issues/13650)) - * provider/azurerm: Locking around Network Security Group / Subnets ([#13637](https://github.com/hashicorp/terraform/issues/13637)) - * provider/azurerm: Locking route table on subnet create/delete ([#13791](https://github.com/hashicorp/terraform/issues/13791)) - * provider/azurerm: VM's - fixes a bug where ssh_keys could contain a null entry ([#13755](https://github.com/hashicorp/terraform/issues/13755)) - * provider/azurerm: VM's - ignoring the case on the `create_option` field during Diff's ([#13933](https://github.com/hashicorp/terraform/issues/13933)) - * provider/azurerm: fixing a bug refreshing the `azurerm_redis_cache` ([#13899](https://github.com/hashicorp/terraform/issues/13899)) - * provider/fastly: Fix issue with using 0 for `default_ttl` ([#13648](https://github.com/hashicorp/terraform/issues/13648)) - * provider/google: Fix panic in GKE provisioning with addons ([#13954](https://github.com/hashicorp/terraform/issues/13954)) - * provider/fastly: Add ability to associate a healthcheck to a backend ([#13539](https://github.com/hashicorp/terraform/issues/13539)) - * provider/google: Stop setting the id when project creation fails ([#13644](https://github.com/hashicorp/terraform/issues/13644)) - * provider/google: Make ports in resource_compute_forwarding_rule ForceNew ([#13833](https://github.com/hashicorp/terraform/issues/13833)) - * provider/google: Validation fixes for forwarding rules ([#13952](https://github.com/hashicorp/terraform/issues/13952)) - * provider/ignition: Internal cache moved to global, instead per provider instance ([#13919](https://github.com/hashicorp/terraform/issues/13919)) - * provider/logentries: Refresh from state when resources not found ([#13810](https://github.com/hashicorp/terraform/issues/13810)) - * provider/newrelic: newrelic_alert_condition - `condition_scope` must be `application` or `instance` ([#12972](https://github.com/hashicorp/terraform/issues/12972)) - * provider/opc: fixed issue with unqualifying nats ([#13826](https://github.com/hashicorp/terraform/issues/13826)) - * provider/opc: Fix instance label if unset ([#13846](https://github.com/hashicorp/terraform/issues/13846)) - * provider/openstack: Fix updating Ports ([#13604](https://github.com/hashicorp/terraform/issues/13604)) - * provider/rabbitmq: Allow users without tags ([#13798](https://github.com/hashicorp/terraform/issues/13798)) - -## 0.9.3 (April 12, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * provider/aws: Fix a critical bug in `aws_emr_cluster` in order to preserve the ordering - of any arguments in `bootstrap_action`. Terraform will now enforce the ordering - from the configuration. As a result, `aws_emr_cluster` resources may need to be - recreated, as there is no API to update them in-place ([#13580](https://github.com/hashicorp/terraform/issues/13580)) - -FEATURES: - - * **New Resource:** `aws_api_gateway_method_settings` ([#13542](https://github.com/hashicorp/terraform/issues/13542)) - * **New Resource:** `aws_api_gateway_stage` ([#13540](https://github.com/hashicorp/terraform/issues/13540)) - * **New Resource:** `aws_iam_openid_connect_provider` ([#13456](https://github.com/hashicorp/terraform/issues/13456)) - * **New Resource:** `aws_lightsail_static_ip` ([#13175](https://github.com/hashicorp/terraform/issues/13175)) - * **New Resource:** `aws_lightsail_static_ip_attachment` ([#13207](https://github.com/hashicorp/terraform/issues/13207)) - * **New Resource:** `aws_ses_domain_identity` ([#13098](https://github.com/hashicorp/terraform/issues/13098)) - * **New Resource:** `azurerm_managed_disk` ([#12455](https://github.com/hashicorp/terraform/issues/12455)) - * **New Resource:** `kubernetes_persistent_volume` ([#13277](https://github.com/hashicorp/terraform/issues/13277)) - * **New Resource:** `kubernetes_persistent_volume_claim` ([#13527](https://github.com/hashicorp/terraform/issues/13527)) - * **New Resource:** `kubernetes_secret` ([#12960](https://github.com/hashicorp/terraform/issues/12960)) - * **New Data Source:** `aws_iam_role` ([#13213](https://github.com/hashicorp/terraform/issues/13213)) - -IMPROVEMENTS: +* command/show: use the state snapshot included in the planfile when rendering a plan to json ([#21597](https://github.com/hashicorp/terraform/issues/21597)) +* config: Fix issue with empty dynamic blocks failing when usign ConfigModeAttr ([#21549](https://github.com/hashicorp/terraform/issues/21549)) +* core: Re-validate resource config during final plan ([#21555](https://github.com/hashicorp/terraform/issues/21555)) +* core: Fix missing resource timeouts during destroy ([#21611](https://github.com/hashicorp/terraform/issues/21611)) +* core: Don't panic when encountering an invalid `depends_on` ([#21590](https://github.com/hashicorp/terraform/issues/21590)) +* backend: Fix panic when upgrading from a state with a hash value greater than MaxInt ([#21484](https://github.com/hashicorp/terraform/issues/21484)) - * core: add `-lock-timeout` option, which will block and retry locks for the given duration ([#13262](https://github.com/hashicorp/terraform/issues/13262)) - * core: new `chomp` interpolation function which returns the given string with any trailing newline characters removed ([#13419](https://github.com/hashicorp/terraform/issues/13419)) - * backend/remote-state: Add support for assume role extensions to s3 backend ([#13236](https://github.com/hashicorp/terraform/issues/13236)) - * backend/remote-state: Filter extra entries from s3 environment listings ([#13596](https://github.com/hashicorp/terraform/issues/13596)) - * config: New interpolation functions `basename` and `dirname`, for file path manipulation ([#13080](https://github.com/hashicorp/terraform/issues/13080)) - * helper/resource: Allow unknown "pending" states ([#13099](https://github.com/hashicorp/terraform/issues/13099)) - * command/hook_ui: Increase max length of state IDs from 20 to 80 ([#13317](https://github.com/hashicorp/terraform/issues/13317)) - * provider/aws: Add support to set iam_role_arn on cloudformation Stack ([#12547](https://github.com/hashicorp/terraform/issues/12547)) - * provider/aws: Support priority and listener_arn update of alb_listener_rule ([#13125](https://github.com/hashicorp/terraform/issues/13125)) - * provider/aws: Deprecate roles in favour of role in iam_instance_profile ([#13130](https://github.com/hashicorp/terraform/issues/13130)) - * provider/aws: Make alb_target_group_attachment port optional ([#13139](https://github.com/hashicorp/terraform/issues/13139)) - * provider/aws: `aws_api_gateway_domain_name` `certificate_private_key` field marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: `aws_directory_service_directory` `password` field marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: `aws_kinesis_firehose_delivery_stream` `password` field marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: `aws_opsworks_application` `app_source.0.password` & `ssl_configuration.0.private_key` fields marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: `aws_opsworks_stack` `custom_cookbooks_source.0.password` field marked as sensitive ([#13147](https://github.com/hashicorp/terraform/issues/13147)) - * provider/aws: Support the ability to enable / disable ipv6 support in VPC ([#12527](https://github.com/hashicorp/terraform/issues/12527)) - * provider/aws: Added API Gateway integration update ([#13249](https://github.com/hashicorp/terraform/issues/13249)) - * provider/aws: Add `identifier` | `name_prefix` to RDS resources ([#13232](https://github.com/hashicorp/terraform/issues/13232)) - * provider/aws: Validate `aws_ecs_task_definition.container_definitions` ([#12161](https://github.com/hashicorp/terraform/issues/12161)) - * provider/aws: Update caller_identity data source ([#13092](https://github.com/hashicorp/terraform/issues/13092)) - * provider/aws: `aws_subnet_ids` data source for getting a list of subnet ids matching certain criteria ([#13188](https://github.com/hashicorp/terraform/issues/13188)) - * provider/aws: Support ip_address_type for aws_alb ([#13227](https://github.com/hashicorp/terraform/issues/13227)) - * provider/aws: Migrate `aws_dms_*` resources away from AWS waiters ([#13291](https://github.com/hashicorp/terraform/issues/13291)) - * provider/aws: Add support for treat_missing_data to cloudwatch_metric_alarm ([#13358](https://github.com/hashicorp/terraform/issues/13358)) - * provider/aws: Add support for evaluate_low_sample_count_percentiles to cloudwatch_metric_alarm ([#13371](https://github.com/hashicorp/terraform/issues/13371)) - * provider/aws: Add `name_prefix` to `aws_alb_target_group` ([#13442](https://github.com/hashicorp/terraform/issues/13442)) - * provider/aws: Add support for EMR clusters to aws_appautoscaling_target ([#13368](https://github.com/hashicorp/terraform/issues/13368)) - * provider/aws: Add import capabilities to codecommit_repository ([#13577](https://github.com/hashicorp/terraform/issues/13577)) - * provider/bitbucket: Improved error handling ([#13390](https://github.com/hashicorp/terraform/issues/13390)) - * provider/cloudstack: Do not force a new resource when updating `cloudstack_loadbalancer_rule` members ([#11786](https://github.com/hashicorp/terraform/issues/11786)) - * provider/fastly: Add support for Sumologic logging ([#12541](https://github.com/hashicorp/terraform/issues/12541)) - * provider/github: Handle the case when issue labels already exist ([#13182](https://github.com/hashicorp/terraform/issues/13182)) - * provider/google: Mark `google_container_cluster`'s `client_key` & `password` inside `master_auth` as sensitive ([#13148](https://github.com/hashicorp/terraform/issues/13148)) - * provider/google: Add node_pool field in resource_container_cluster ([#13402](https://github.com/hashicorp/terraform/issues/13402)) - * provider/kubernetes: Allow defining custom config context ([#12958](https://github.com/hashicorp/terraform/issues/12958)) - * provider/openstack: Add support for 'value_specs' options to `openstack_compute_servergroup_v2` ([#13380](https://github.com/hashicorp/terraform/issues/13380)) - * provider/statuscake: Add support for StatusCake TriggerRate field ([#13340](https://github.com/hashicorp/terraform/issues/13340)) - * provider/triton: Move to joyent/triton-go ([#13225](https://github.com/hashicorp/terraform/issues/13225)) - * provisioner/chef: Make sure we add new Chef-Vault clients as clients ([#13525](https://github.com/hashicorp/terraform/issues/13525)) +## 0.12.1 (June 3, 2019) BUG FIXES: - * core: Escaped interpolation-like sequences (like `$${foo}`) now permitted in variable defaults ([#13137](https://github.com/hashicorp/terraform/issues/13137)) - * core: Fix strange issues with computed values in provider configuration that were worked around with `-input=false` ([#11264](https://github.com/hashicorp/terraform/issues/11264)], [[#13264](https://github.com/hashicorp/terraform/issues/13264)) - * core: Fix crash when providing nested maps as variable values in a `module` block ([#13343](https://github.com/hashicorp/terraform/issues/13343)) - * core: `connection` block attributes are now subject to basic validation of attribute names during validate walk ([#13400](https://github.com/hashicorp/terraform/issues/13400)) - * provider/aws: Add Support for maintenance_window and back_window to rds_cluster_instance ([#13134](https://github.com/hashicorp/terraform/issues/13134)) - * provider/aws: Increase timeout for AMI registration ([#13159](https://github.com/hashicorp/terraform/issues/13159)) - * provider/aws: Increase timeouts for ELB ([#13161](https://github.com/hashicorp/terraform/issues/13161)) - * provider/aws: `volume_type` of `aws_elasticsearch_domain.0.ebs_options` marked as `Computed` which prevents spurious diffs ([#13160](https://github.com/hashicorp/terraform/issues/13160)) - * provider/aws: Don't set DBName on `aws_db_instance` from snapshot ([#13140](https://github.com/hashicorp/terraform/issues/13140)) - * provider/aws: Add DiffSuppression to aws_ecs_service placement_strategies ([#13220](https://github.com/hashicorp/terraform/issues/13220)) - * provider/aws: Refresh aws_alb_target_group stickiness on manual updates ([#13199](https://github.com/hashicorp/terraform/issues/13199)) - * provider/aws: Preserve default retain_on_delete in cloudfront import ([#13209](https://github.com/hashicorp/terraform/issues/13209)) - * provider/aws: Refresh aws_alb_target_group tags ([#13200](https://github.com/hashicorp/terraform/issues/13200)) - * provider/aws: Set aws_vpn_connection to recreate when in deleted state ([#13204](https://github.com/hashicorp/terraform/issues/13204)) - * provider/aws: Wait for aws_opsworks_instance to be running when it's specified ([#13218](https://github.com/hashicorp/terraform/issues/13218)) - * provider/aws: Handle `aws_lambda_function` missing s3 key error ([#10960](https://github.com/hashicorp/terraform/issues/10960)) - * provider/aws: Set stickiness to computed in alb_target_group ([#13278](https://github.com/hashicorp/terraform/issues/13278)) - * provider/aws: Increase timeout for deploying `cloudfront_distribution` from 40 to 70 mins ([#13319](https://github.com/hashicorp/terraform/issues/13319)) - * provider/aws: Increase AMI retry timeouts ([#13324](https://github.com/hashicorp/terraform/issues/13324)) - * provider/aws: Increase subnet deletion timeout ([#13356](https://github.com/hashicorp/terraform/issues/13356)) - * provider/aws: Increase launch_configuration creation timeout ([#13357](https://github.com/hashicorp/terraform/issues/13357)) - * provider/aws: Increase Beanstalk env 'ready' timeout ([#13359](https://github.com/hashicorp/terraform/issues/13359)) - * provider/aws: Raise timeout for deleting APIG REST API ([#13414](https://github.com/hashicorp/terraform/issues/13414)) - * provider/aws: Raise timeout for attaching/detaching VPN Gateway ([#13457](https://github.com/hashicorp/terraform/issues/13457)) - * provider/aws: Recreate opsworks_stack on change of service_role_arn ([#13325](https://github.com/hashicorp/terraform/issues/13325)) - * provider/aws: Fix KMS Key reading with Exists method ([#13348](https://github.com/hashicorp/terraform/issues/13348)) - * provider/aws: Fix DynamoDB issues about GSIs indexes ([#13256](https://github.com/hashicorp/terraform/issues/13256)) - * provider/aws: Fix `aws_s3_bucket` drift detection of logging options ([#13281](https://github.com/hashicorp/terraform/issues/13281)) - * provider/aws: Update ElasticTranscoderPreset to have default for MaxFrameRate ([#13422](https://github.com/hashicorp/terraform/issues/13422)) - * provider/aws: Fix aws_ami_launch_permission refresh when AMI disappears ([#13469](https://github.com/hashicorp/terraform/issues/13469)) - * provider/aws: Add support for updating SSM documents ([#13491](https://github.com/hashicorp/terraform/issues/13491)) - * provider/aws: Fix panic on nil route configs ([#13548](https://github.com/hashicorp/terraform/issues/13548)) - * provider/azurerm: Network Security Group - ignoring protocol casing at Import time ([#13153](https://github.com/hashicorp/terraform/issues/13153)) - * provider/azurerm: Fix crash when importing Local Network Gateways ([#13261](https://github.com/hashicorp/terraform/issues/13261)) - * provider/azurerm: Defaulting the value of `duplicate_detection_history_time_window` for `azurerm_servicebus_topic` ([#13223](https://github.com/hashicorp/terraform/issues/13223)) - * provider/azurerm: Event Hubs making the Location field idempotent ([#13570](https://github.com/hashicorp/terraform/issues/13570)) - * provider/bitbucket: Fixed issue where provider would fail with an "EOF" error on some operations ([#13390](https://github.com/hashicorp/terraform/issues/13390)) - * provider/dnsimple: Handle 404 on DNSimple records ([#13131](https://github.com/hashicorp/terraform/issues/13131)) - * provider/kubernetes: Use PATCH to update namespace ([#13114](https://github.com/hashicorp/terraform/issues/13114)) - * provider/ns1: No splitting answer on SPF records. ([#13260](https://github.com/hashicorp/terraform/issues/13260)) - * provider/openstack: Refresh volume_attachment from state if NotFound ([#13342](https://github.com/hashicorp/terraform/issues/13342)) - * provider/openstack: Add SOFT_DELETED to delete status ([#13444](https://github.com/hashicorp/terraform/issues/13444)) - * provider/profitbricks: Changed output type of ips variable of ip_block ProfitBricks resource ([#13290](https://github.com/hashicorp/terraform/issues/13290)) - * provider/template: Fix panic in cloudinit config ([#13581](https://github.com/hashicorp/terraform/issues/13581)) +* core: Always try to select a workspace after initialization ([#21234](https://github.com/hashicorp/terraform/issues/21234)) +* command/show: fix inconsistent json output causing a panic ([#21541](https://github.com/hashicorp/terraform/issues/21541)) +* config: `distinct` function no longer panics when given an empty list ([#21538](https://github.com/hashicorp/terraform/issues/21538)) +* config: Don't panic when a `version` constraint is added to a module that was previously initialized without one ([#21542](https://github.com/hashicorp/terraform/issues/21542)) +* config: `matchkeys` function argument type checking will no longer fail incorrectly during validation ([#21576](https://github.com/hashicorp/terraform/issues/21576)) +* backend/local: Don't panic if an instance in the state only has deposed instances, and no current instance ([#21575](https://github.com/hashicorp/terraform/issues/21575)) -## 0.9.2 (March 28, 2017) +## 0.12.0 (May 22, 2019) -BACKWARDS INCOMPATIBILITIES / NOTES: +--- - * provider/openstack: Port Fixed IPs are able to be read again using the original numerical notation. However, Fixed IP configurations which are obtaining addresses via DHCP must now use the `all_fixed_ips` attribute to reference the returned IP address. - * Environment names must be safe to use as a URL path segment without escaping, and is enforced by the CLI. +This is the aggregated summary of changes compared to v0.11.14. If you'd like to see the incremental changelog through each of the v0.12.0 prereleases, please refer to [the v0.12.0-rc1 changelog](https://github.com/hashicorp/terraform/blob/v0.12.0-rc1/CHANGELOG.md). -FEATURES: +--- - * **New Resource:** `alicloud_db_instance` ([#12913](https://github.com/hashicorp/terraform/issues/12913)) - * **New Resource:** `aws_api_gateway_usage_plan` ([#12542](https://github.com/hashicorp/terraform/issues/12542)) - * **New Resource:** `aws_api_gateway_usage_plan_key` ([#12851](https://github.com/hashicorp/terraform/issues/12851)) - * **New Resource:** `github_repository_webhook` ([#12924](https://github.com/hashicorp/terraform/issues/12924)) - * **New Resource:** `random_pet` ([#12903](https://github.com/hashicorp/terraform/issues/12903)) - * **New Interpolation:** `substr` ([#12870](https://github.com/hashicorp/terraform/issues/12870)) - * **S3 Environments:** The S3 remote state backend now supports named environments +The focus of v0.12.0 was on improvements to the Terraform language made in response to all of the feedback and experience gathered on prior versions. We hope that these language improvements will help to make configurations for more complex situations more readable, and improve the usability of re-usable modules. -IMPROVEMENTS: +However, an overhaul of this kind inevitably means that 100% compatibility is not possible. The updated language is designed to be broadly compatible with the 0.11 language as documented, but some of the improvements required a slightly stricter parser and language model in order to resolve ambiguity or to give better feedback in error messages. - * core: fix interpolation error when referencing computed values from an `aws_instance` `cidr_block` ([#13046](https://github.com/hashicorp/terraform/issues/13046)) - * core: fix `ignore_changes` causing fields to be removed during apply ([#12897](https://github.com/hashicorp/terraform/issues/12897)) - * core: add `-force-copy` option to `terraform init` to supress prompts for copying state ([#12939](https://github.com/hashicorp/terraform/issues/12939)) - * helper/acctest: Add NewSSHKeyPair function ([#12894](https://github.com/hashicorp/terraform/issues/12894)) - * provider/alicloud: simplify validators ([#12982](https://github.com/hashicorp/terraform/issues/12982)) - * provider/aws: Added support for EMR AutoScalingRole ([#12823](https://github.com/hashicorp/terraform/issues/12823)) - * provider/aws: Add `name_prefix` to `aws_autoscaling_group` and `aws_elb` resources ([#12629](https://github.com/hashicorp/terraform/issues/12629)) - * provider/aws: Updated default configuration manager version in `aws_opsworks_stack` ([#12979](https://github.com/hashicorp/terraform/issues/12979)) - * provider/aws: Added aws_api_gateway_api_key value attribute ([#9462](https://github.com/hashicorp/terraform/issues/9462)) - * provider/aws: Allow aws_alb subnets to change ([#12850](https://github.com/hashicorp/terraform/issues/12850)) - * provider/aws: Support Attachment of ALB Target Groups to Autoscaling Groups ([#12855](https://github.com/hashicorp/terraform/issues/12855)) - * provider/aws: Support Import of iam_server_certificate ([#13065](https://github.com/hashicorp/terraform/issues/13065)) - * provider/azurerm: Add support for setting the primary network interface ([#11290](https://github.com/hashicorp/terraform/issues/11290)) - * provider/cloudstack: Add `zone_id` to `cloudstack_ipaddress` resource ([#11306](https://github.com/hashicorp/terraform/issues/11306)) - * provider/consul: Add support for basic auth to the provider ([#12679](https://github.com/hashicorp/terraform/issues/12679)) - * provider/digitalocean: Support disk only resize ([#13059](https://github.com/hashicorp/terraform/issues/13059)) - * provider/dnsimple: Allow dnsimple_record.priority attribute to be set ([#12843](https://github.com/hashicorp/terraform/issues/12843)) - * provider/google: Add support for service_account, metadata, and image_type fields in GKE cluster config ([#12743](https://github.com/hashicorp/terraform/issues/12743)) - * provider/google: Add local ssd count support for container clusters ([#12281](https://github.com/hashicorp/terraform/issues/12281)) - * provider/ignition: ignition_filesystem, explicit option to create the filesystem ([#12980](https://github.com/hashicorp/terraform/issues/12980)) - * provider/kubernetes: Internal K8S annotations are ignored in `config_map` ([#12945](https://github.com/hashicorp/terraform/issues/12945)) - * provider/ns1: Ensure provider checks for credentials ([#12920](https://github.com/hashicorp/terraform/issues/12920)) - * provider/openstack: Adding Timeouts to Blockstorage Resources ([#12862](https://github.com/hashicorp/terraform/issues/12862)) - * provider/openstack: Adding Timeouts to FWaaS v1 Resources ([#12863](https://github.com/hashicorp/terraform/issues/12863)) - * provider/openstack: Adding Timeouts to Image v2 and LBaaS v2 Resources ([#12865](https://github.com/hashicorp/terraform/issues/12865)) - * provider/openstack: Adding Timeouts to Network Resources ([#12866](https://github.com/hashicorp/terraform/issues/12866)) - * provider/openstack: Adding Timeouts to LBaaS v1 Resources ([#12867](https://github.com/hashicorp/terraform/issues/12867)) - * provider/openstack: Deprecating Instance Volume attribute ([#13062](https://github.com/hashicorp/terraform/issues/13062)) - * provider/openstack: Decprecating Instance Floating IP attribute ([#13063](https://github.com/hashicorp/terraform/issues/13063)) - * provider/openstack: Don't log the catalog ([#13075](https://github.com/hashicorp/terraform/issues/13075)) - * provider/openstack: Handle 409/500 Response on Pool Create ([#13074](https://github.com/hashicorp/terraform/issues/13074)) - * provider/pagerduty: Validate credentials ([#12854](https://github.com/hashicorp/terraform/issues/12854)) - * provider/openstack: Adding all_metadata attribute ([#13061](https://github.com/hashicorp/terraform/issues/13061)) - * provider/profitbricks: Handling missing resources ([#13053](https://github.com/hashicorp/terraform/issues/13053)) +If you are upgrading to v0.12.0, we strongly recommend reading [the upgrade guide](https://www.terraform.io/upgrade-guides/0-12.html) to learn the recommended upgrade process, which includes a tool to automatically upgrade many improved language constructs and to indicate situations where human intuition is required to complete the upgrade. -BUG FIXES: - - * core: Remove legacy remote state configuration on state migration. This fixes errors when saving plans. ([#12888](https://github.com/hashicorp/terraform/issues/12888)) - * provider/arukas: Default timeout for launching container increased to 15mins (was 10mins) ([#12849](https://github.com/hashicorp/terraform/issues/12849)) - * provider/aws: Fix flattened cloudfront lambda function associations to be a set not a slice ([#11984](https://github.com/hashicorp/terraform/issues/11984)) - * provider/aws: Consider ACTIVE as pending state during ECS svc deletion ([#12986](https://github.com/hashicorp/terraform/issues/12986)) - * provider/aws: Deprecate the usage of Api Gateway Key Stages in favor of Usage Plans ([#12883](https://github.com/hashicorp/terraform/issues/12883)) - * provider/aws: prevent panic in resourceAwsSsmDocumentRead ([#12891](https://github.com/hashicorp/terraform/issues/12891)) - * provider/aws: Prevent panic when setting AWS CodeBuild Source to state ([#12915](https://github.com/hashicorp/terraform/issues/12915)) - * provider/aws: Only call replace Iam Instance Profile on existing machines ([#12922](https://github.com/hashicorp/terraform/issues/12922)) - * provider/aws: Increase AWS AMI Destroy timeout ([#12943](https://github.com/hashicorp/terraform/issues/12943)) - * provider/aws: Set aws_vpc ipv6 for associated only ([#12899](https://github.com/hashicorp/terraform/issues/12899)) - * provider/aws: Fix AWS ECS placement strategy spread fields ([#12998](https://github.com/hashicorp/terraform/issues/12998)) - * provider/aws: Specify that aws_network_acl_rule requires a cidr block ([#13013](https://github.com/hashicorp/terraform/issues/13013)) - * provider/aws: aws_network_acl_rule treat all and -1 for protocol the same ([#13049](https://github.com/hashicorp/terraform/issues/13049)) - * provider/aws: Only allow 1 value in alb_listener_rule condition ([#13051](https://github.com/hashicorp/terraform/issues/13051)) - * provider/aws: Correct handling of network ACL default IPv6 ingress/egress rules ([#12835](https://github.com/hashicorp/terraform/issues/12835)) - * provider/aws: aws_ses_receipt_rule: fix off-by-one errors ([#12961](https://github.com/hashicorp/terraform/issues/12961)) - * provider/aws: Fix issue upgrading to Terraform v0.9+ with AWS OpsWorks Stacks ([#13024](https://github.com/hashicorp/terraform/issues/13024)) - * provider/fastly: Fix issue importing Fastly Services with Backends ([#12538](https://github.com/hashicorp/terraform/issues/12538)) - * provider/google: turn compute_instance_group.instances into a set ([#12790](https://github.com/hashicorp/terraform/issues/12790)) - * provider/mysql: recreate user/grant if user/grant got deleted manually ([#12791](https://github.com/hashicorp/terraform/issues/12791)) - * provider/openstack: Fix monitor_id typo in LBaaS v1 Pool ([#13069](https://github.com/hashicorp/terraform/issues/13069)) - * provider/openstack: Resolve issues with Port Fixed IPs ([#13056](https://github.com/hashicorp/terraform/issues/13056)) - * provider/rancher: error when no api_url is provided ([#13086](https://github.com/hashicorp/terraform/issues/13086)) - * provider/scaleway: work around parallel request limitation ([#13045](https://github.com/hashicorp/terraform/issues/13045)) - -## 0.9.1 (March 17, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: +### Incompatibilities and Notes - * provider/pagerduty: the deprecated `name_regex` field has been removed from vendor data source ([#12396](https://github.com/hashicorp/terraform/issues/12396)) +* As noted above, the language overhaul means that several aspects of the language are now parsed or evaluated more strictly than before, so configurations that employ workarounds for prior version limitations or that followed conventions other than what was shown in documentation may require some updates. For more information, please refer to [the upgrade guide](https://www.terraform.io/upgrade-guides/0-12.html). -FEATURES: +* In order to give better feedback about mistakes, Terraform now validates that all variable names set via `-var` and `-var-file` options correspond to declared variables, generating errors or warnings if not. In situations where automation is providing a fixed set of variables to all configurations (whether they are using them or not), use [`TF_VAR_` environment variables](https://www.terraform.io/docs/commands/environment-variables.html#tf_var_name) instead, which are ignored if they do not correspond to a declared variable. - * **New Provider:** `kubernetes` ([#12372](https://github.com/hashicorp/terraform/issues/12372)) - * **New Resource:** `kubernetes_namespace` ([#12372](https://github.com/hashicorp/terraform/issues/12372)) - * **New Resource:** `kubernetes_config_map` ([#12753](https://github.com/hashicorp/terraform/issues/12753)) - * **New Data Source:** `dns_a_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744)) - * **New Data Source:** `dns_cname_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744)) - * **New Data Source:** `dns_txt_record_set` ([#12744](https://github.com/hashicorp/terraform/issues/12744)) +* The wire protocol for provider and provisioner plugins has changed, so plugins built against prior versions of Terraform are not compatible with Terraform v0.12. The most commonly-downloaded providers already had v0.12-compatible releases at the time of v0.12.0 release, but some other providers (particularly those distributed independently of the `terraform init` installation mechanism) will need to make new releases before they can be used with Terraform v0.12 or later. -IMPROVEMENTS: +* The index API for automatic provider installation in `terraform init` is now provided by the Terraform Registry at `registry.terraform.io`, rather than the indexes directly on `releases.hashicorp.com`. The "releases" server is still currently the distribution source for the release archives themselves at the time of writing, but that may change over time. - * command/init: `-backend-config` accepts `key=value` pairs - * provider/aws: Improved error when failing to get S3 tags ([#12759](https://github.com/hashicorp/terraform/issues/12759)) - * provider/aws: Validate CIDR Blocks in SG and SG rule resources ([#12765](https://github.com/hashicorp/terraform/issues/12765)) - * provider/aws: Add KMS key tag support ([#12243](https://github.com/hashicorp/terraform/issues/12243)) - * provider/aws: Allow `name_prefix` to be used with various IAM resources ([#12658](https://github.com/hashicorp/terraform/issues/12658)) - * provider/openstack: Add timeout support for Compute resources ([#12794](https://github.com/hashicorp/terraform/issues/12794)) - * provider/scaleway: expose public IPv6 information on scaleway_server ([#12748](https://github.com/hashicorp/terraform/issues/12748)) +* The serialization formats for persisted state snapshots and saved plans have changed. Third-party tools that parse these artifacts will need to be updated to support these new serialization formats. -BUG FIXES: - - * core: Fix panic when an undefined module is reference ([#12793](https://github.com/hashicorp/terraform/issues/12793)) - * core: Fix regression from 0.8.x when using a data source in a module ([#12837](https://github.com/hashicorp/terraform/issues/12837)) - * command/apply: Applies from plans with backends set will reuse the backend rather than local ([#12785](https://github.com/hashicorp/terraform/issues/12785)) - * command/init: Changing only `-backend-config` detects changes and reconfigures ([#12776](https://github.com/hashicorp/terraform/issues/12776)) - * command/init: Fix legacy backend init error that could occur when upgrading ([#12818](https://github.com/hashicorp/terraform/issues/12818)) - * command/push: Detect local state and error properly ([#12773](https://github.com/hashicorp/terraform/issues/12773)) - * command/refresh: Allow empty and non-existent state ([#12777](https://github.com/hashicorp/terraform/issues/12777)) - * provider/aws: Get the aws_lambda_function attributes when there are great than 50 versions of a function ([#11745](https://github.com/hashicorp/terraform/issues/11745)) - * provider/aws: Correctly check for nil cidr_block in aws_network_acl ([#12735](https://github.com/hashicorp/terraform/issues/12735)) - * provider/aws: Stop setting weight property on route53_record read ([#12756](https://github.com/hashicorp/terraform/issues/12756)) - * provider/google: Fix the Google provider asking for account_file input on every run ([#12729](https://github.com/hashicorp/terraform/issues/12729)) - * provider/profitbricks: Prevent panic on profitbricks volume ([#12819](https://github.com/hashicorp/terraform/issues/12819)) + For most use-cases, we recommend instead using [`terraform show -json`](https://www.terraform.io/docs/commands/show.html#json-output) to read the content of state or plan, in a form that is less likely to see significant breaking changes in future releases. +* [`terraform validate`](https://www.terraform.io/docs/commands/validate.html) now has a slightly smaller scope than before, focusing only on configuration syntax and type/value checking. This makes it safe to run in unattended scenarios, such as on save in a text editor. -## 0.9.0 (March 15, 2017) +### New Features -**This is the complete 0.8.8 to 0.9 CHANGELOG. Below this section we also have a 0.9.0-beta2 to 0.9.0 final CHANGELOG.** +The full set of language improvements is too large to list them all out exhaustively, so the list below covers some highlights: -BACKWARDS INCOMPATIBILITIES / NOTES: +* **First-class expressions:** Prior to v0.12, expressions could be used only via string interpolation, like `"${var.foo}"`. Expressions are now fully integrated into the language, allowing them to be used directly as argument values, like `ami = var.ami`. - * provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503)) - * provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214)) - * provider/azurerm: scale_sets `os_profile_master_password` now marked as sensitive - * provider/azurerm: sql_server `administrator_login_password` now marked as sensitive - * provider/dnsimple: Provider has been upgraded to APIv2 therefore, you will need to use the APIv2 auth token - * provider/google: storage buckets have been updated with the new storage classes. The old classes will continue working as before, but should be migrated as soon as possible, as there's no guarantee they'll continue working forever. ([#12044](https://github.com/hashicorp/terraform/issues/12044)) - * provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223)) - * provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663)) - * provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668)) - * provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659)) +* **`for` expressions:** This new expression construct allows the construction of a list or map by transforming and filtering elements from another list or map. For more information, refer to [the _`for` expressions_ documentation](https://www.terraform.io/docs/configuration/expressions.html#for-expressions). -FEATURES: +* **Dynamic configuration blocks:** For nested configuration blocks accepted as part of a resource configuration, it is now possible to dynamically generate zero or more blocks corresponding to items in a list or map using the special new `dynamic` block construct. This is the official replacement for the common (but buggy) unofficial workaround of treating a block type name as if it were an attribute expecting a list of maps value, which worked sometimes before as a result of some unintended coincidences in the implementation. - * **Remote Backends:** This is a successor to "remote state" and includes - file-based configuration, an improved setup process (just run `terraform init`), - no more local caching of remote state, and more. ([#11286](https://github.com/hashicorp/terraform/issues/11286)) - * **Destroy Provisioners:** Provisioners can now be configured to run - on resource destruction. ([#11329](https://github.com/hashicorp/terraform/issues/11329)) - * **State Locking:** State will be automatically locked when supported by the backend. - Backends supporting locking in this release are Local, S3 (via DynamoDB), and Consul. ([#11187](https://github.com/hashicorp/terraform/issues/11187)) - * **State Environments:** You can now create named "environments" for states. This allows you to manage distinct infrastructure resources from the same configuration. - * **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578)) - * **New Data Source:** `openstack_networking_network_v2` ([#12304](https://github.com/hashicorp/terraform/issues/12304)) - * **New Resource:** `aws_iam_account_alias` ([#12648](https://github.com/hashicorp/terraform/issues/12648)) - * **New Resource:** `datadog_downtime` ([#10994](https://github.com/hashicorp/terraform/issues/10994)) - * **New Resource:** `ns1_notifylist` ([#12373](https://github.com/hashicorp/terraform/issues/12373)) - * **New Resource:** `google_container_node_pool` ([#11802](https://github.com/hashicorp/terraform/issues/11802)) - * **New Resource:** `rancher_certificate` ([#12717](https://github.com/hashicorp/terraform/issues/12717)) - * **New Resource:** `rancher_host` ([#11545](https://github.com/hashicorp/terraform/issues/11545)) - * helper/schema: Added Timeouts to allow Provider/Resource developers to expose configurable timeouts for actions ([#12311](https://github.com/hashicorp/terraform/issues/12311)) +* **Generalised "splat" operator:** The `aws_instance.foo.*.id` syntax was previously a special case only for resources with `count` set. It is now an operator within the expression language that can be applied to any list value. There is also an optional new splat variant that allows both index and attribute access operations on each item in the list. For more information, refer to [the _Splat Expressions_ documentation](https://www.terraform.io/docs/configuration/expressions.html#splat-expressions). -IMPROVEMENTS: +* **Nullable argument values:** It is now possible to use a conditional expression like `var.foo != "" ? var.foo : null` to conditionally leave an argument value unset, whereas before Terraform required the configuration author to provide a specific default value in this case. Assigning `null` to an argument is equivalent to omitting that argument entirely. - * core: Data source values can now be used as part of a `count` calculation. ([#11482](https://github.com/hashicorp/terraform/issues/11482)) - * core: "terraformrc" can contain env var references with $FOO ([#11929](https://github.com/hashicorp/terraform/issues/11929)) - * core: report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383)) - * command: CLI args can be specified via env vars. Specify `TF_CLI_ARGS` or `TF_CLI_ARGS_name` (where name is the name of a command) to specify additional CLI args ([#11922](https://github.com/hashicorp/terraform/issues/11922)) - * command/init: previous behavior is retained, but init now also configures - the new remote backends as well as downloads modules. It is the single - command to initialize a new or existing Terraform configuration. - * command: Display resource state ID in refresh/plan/destroy output ([#12261](https://github.com/hashicorp/terraform/issues/12261)) - * provider/aws: AWS Lambda DeadLetterConfig support ([#12188](https://github.com/hashicorp/terraform/issues/12188)) - * provider/aws: Return errors from Elastic Beanstalk ([#12425](https://github.com/hashicorp/terraform/issues/12425)) - * provider/aws: Set aws_db_cluster to snapshot by default ([#11668](https://github.com/hashicorp/terraform/issues/11668)) - * provider/aws: Enable final snapshots for aws_rds_cluster by default ([#11694](https://github.com/hashicorp/terraform/issues/11694)) - * provider/aws: Enable snapshotting by default on aws_redshift_cluster ([#11695](https://github.com/hashicorp/terraform/issues/11695)) - * provider/aws: Add support for ACM certificates to `api_gateway_domain_name` ([#12592](https://github.com/hashicorp/terraform/issues/12592)) - * provider/aws: Add support for IPv6 to aws\_security\_group\_rule ([#12645](https://github.com/hashicorp/terraform/issues/12645)) - * provider/aws: Add IPv6 Support to aws\_route\_table ([#12640](https://github.com/hashicorp/terraform/issues/12640)) - * provider/aws: Add support for IPv6 to aws\_network\_acl\_rule ([#12644](https://github.com/hashicorp/terraform/issues/12644)) - * provider/aws: Add support for IPv6 to aws\_default\_route\_table ([#12642](https://github.com/hashicorp/terraform/issues/12642)) - * provider/aws: Add support for IPv6 to aws\_network\_acl ([#12641](https://github.com/hashicorp/terraform/issues/12641)) - * provider/aws: Add support for IPv6 in aws\_route ([#12639](https://github.com/hashicorp/terraform/issues/12639)) - * provider/aws: Add support for IPv6 to aws\_security\_group ([#12655](https://github.com/hashicorp/terraform/issues/12655)) - * provider/aws: Add replace\_unhealthy\_instances to spot\_fleet\_request ([#12681](https://github.com/hashicorp/terraform/issues/12681)) - * provider/aws: Remove restriction on running aws\_opsworks\_* on us-east-1 ([#12688](https://github.com/hashicorp/terraform/issues/12688)) - * provider/aws: Improve error message on S3 Bucket Object deletion ([#12712](https://github.com/hashicorp/terraform/issues/12712)) - * provider/aws: Add log message about if changes are being applied now or later ([#12691](https://github.com/hashicorp/terraform/issues/12691)) - * provider/azurerm: Mark the azurerm_scale_set machine password as sensitive ([#11982](https://github.com/hashicorp/terraform/issues/11982)) - * provider/azurerm: Mark the azurerm_sql_server admin password as sensitive ([#12004](https://github.com/hashicorp/terraform/issues/12004)) - * provider/azurerm: Add support for managed availability sets. ([#12532](https://github.com/hashicorp/terraform/issues/12532)) - * provider/azurerm: Add support for extensions on virtual machine scale sets ([#12124](https://github.com/hashicorp/terraform/issues/12124)) - * provider/dnsimple: Upgrade DNSimple provider to API v2 ([#10760](https://github.com/hashicorp/terraform/issues/10760)) - * provider/docker: added support for linux capabilities ([#12045](https://github.com/hashicorp/terraform/issues/12045)) - * provider/fastly: Add Fastly SSL validation fields ([#12578](https://github.com/hashicorp/terraform/issues/12578)) - * provider/ignition: Migrate all of the igition resources to data sources ([#11851](https://github.com/hashicorp/terraform/issues/11851)) - * provider/openstack: Set Availability Zone in Instances ([#12610](https://github.com/hashicorp/terraform/issues/12610)) - * provider/openstack: Force Deletion of Instances ([#12689](https://github.com/hashicorp/terraform/issues/12689)) - * provider/rancher: Better comparison of compose files ([#12561](https://github.com/hashicorp/terraform/issues/12561)) - * provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214)) - * provider/vault: read vault token from `~/.vault-token` as a fallback for the - `VAULT_TOKEN` environment variable. ([#11529](https://github.com/hashicorp/terraform/issues/11529)) - * provisioners: All provisioners now respond very quickly to interrupts for - fast cancellation. ([#10934](https://github.com/hashicorp/terraform/issues/10934)) +* **Rich types in module inputs variables and output values:** Terraform v0.7 added support for returning flat lists and maps of strings, but this is now generalized to allow returning arbitrary nested data structures with mixed types. Module authors can specify an expected [type constraint](https://www.terraform.io/docs/configuration/types.html) for each input variable to allow early type checking of arguments. -BUG FIXES: +* **Resource and module object values:** An entire resource or module can now be treated as an object value within expressions, including passing them through input variables and output values to other modules, using an attribute-less reference syntax, like `aws_instance.foo`. - * core: targeting will remove untargeted providers ([#12050](https://github.com/hashicorp/terraform/issues/12050)) - * core: doing a map lookup in a resource config with a computed set no longer crashes ([#12210](https://github.com/hashicorp/terraform/issues/12210)) - * provider/aws: Fixes issue for aws_lb_ssl_negotiation_policy of already deleted ELB ([#12360](https://github.com/hashicorp/terraform/issues/12360)) - * provider/aws: Populate the iam_instance_profile uniqueId ([#12449](https://github.com/hashicorp/terraform/issues/12449)) - * provider/aws: Only send iops when creating io1 devices ([#12392](https://github.com/hashicorp/terraform/issues/12392)) - * provider/aws: Fix spurious aws_spot_fleet_request diffs ([#12437](https://github.com/hashicorp/terraform/issues/12437)) - * provider/aws: Changing volumes in ECS task definition should force new revision ([#11403](https://github.com/hashicorp/terraform/issues/11403)) - * provider/aws: Ignore whitespace in json diff for aws_dms_replication_task options ([#12380](https://github.com/hashicorp/terraform/issues/12380)) - * provider/aws: Check spot instance is running before trying to attach volumes ([#12459](https://github.com/hashicorp/terraform/issues/12459)) - * provider/aws: Add the IPV6 cidr block to the vpc datasource ([#12529](https://github.com/hashicorp/terraform/issues/12529)) - * provider/aws: Error on trying to recreate an existing customer gateway ([#12501](https://github.com/hashicorp/terraform/issues/12501)) - * provider/aws: Prevent aws_dms_replication_task panic ([#12539](https://github.com/hashicorp/terraform/issues/12539)) - * provider/aws: output the task definition name when errors occur during refresh ([#12609](https://github.com/hashicorp/terraform/issues/12609)) - * provider/aws: Refresh iam saml provider from state on 404 ([#12602](https://github.com/hashicorp/terraform/issues/12602)) - * provider/aws: Add address, port, hosted_zone_id and endpoint for aws_db_instance datasource ([#12623](https://github.com/hashicorp/terraform/issues/12623)) - * provider/aws: Allow recreation of `aws_opsworks_user_profile` when the `user_arn` is changed ([#12595](https://github.com/hashicorp/terraform/issues/12595)) - * provider/aws: Guard clause to prevent panic on ELB connectionSettings ([#12685](https://github.com/hashicorp/terraform/issues/12685)) - * provider/azurerm: bug fix to prevent crashes during azurerm_container_service provisioning ([#12516](https://github.com/hashicorp/terraform/issues/12516)) - * provider/cobbler: Fix Profile Repos ([#12452](https://github.com/hashicorp/terraform/issues/12452)) - * provider/datadog: Update to datadog_monitor to use default values ([#12497](https://github.com/hashicorp/terraform/issues/12497)) - * provider/datadog: Default notify_no_data on datadog_monitor to false ([#11903](https://github.com/hashicorp/terraform/issues/11903)) - * provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336)) - * provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387)) - * provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662)) - * provider/google: Minor correction : "Deleting disk" message in Delete method ([#12521](https://github.com/hashicorp/terraform/issues/12521)) - * provider/mysql: Avoid crash on un-interpolated provider cfg ([#12391](https://github.com/hashicorp/terraform/issues/12391)) - * provider/ns1: Fix incorrect schema (causing crash) for 'ns1_user.notify' ([#12721](https://github.com/hashicorp/terraform/issues/12721)) - * provider/openstack: Handle cases where volumes are disabled ([#12374](https://github.com/hashicorp/terraform/issues/12374)) - * provider/openstack: Toggle Creation of Default Security Group Rules ([#12119](https://github.com/hashicorp/terraform/issues/12119)) - * provider/openstack: Change Port fixed_ip to a Set ([#12613](https://github.com/hashicorp/terraform/issues/12613)) - * provider/openstack: Add network_id to Network data source ([#12615](https://github.com/hashicorp/terraform/issues/12615)) - * provider/openstack: Check for ErrDefault500 when creating/deleting pool member ([#12664](https://github.com/hashicorp/terraform/issues/12664)) - * provider/rancher: Apply the set value for finish_upgrade to set to prevent recurring plans ([#12545](https://github.com/hashicorp/terraform/issues/12545)) - * provider/scaleway: work around API concurrency issue ([#12707](https://github.com/hashicorp/terraform/issues/12707)) - * provider/statuscake: use default status code list when updating test ([#12375](https://github.com/hashicorp/terraform/issues/12375)) +* **Extended template syntax:** The simple interpolation syntax from prior versions is extended to become a simple template language, with support for conditional interpolations and repeated interpolations through iteration. For more information, see [the _String Templates_ documentation](https://www.terraform.io/docs/configuration/expressions.html#string-templates). -## 0.9.0 from 0.9.0-beta2 (March 15, 2017) +* **`jsondecode` and `csvdecode` interpolation functions:** Due to the richer type system in the new configuration language implementation, we can now offer functions for decoding serialization formats. [`jsondecode`](https://www.terraform.io/docs/configuration/functions/jsondecode.html) is the opposite of [`jsonencode`](https://www.terraform.io/docs/configuration/functions/jsonencode.html), while [`csvdecode`](https://www.terraform.io/docs/configuration/functions/csvdecode.html) provides a way to load in lists of maps from a compact tabular representation. -**This only includes changes from 0.9.0-beta2 to 0.9.0 final. The section above has the complete 0.8.x to 0.9.0 CHANGELOG.** +* **Revamped error messages:** Error messages relating to configuration now always include information about where in the configuration the problem was found, along with other contextual information. We have also revisited many of the most common error messages to reword them for clarity, consistency, and actionability. -FEATURES: +* **Structual plan output:** When Terraform renders the set of changes it plans to make, it will now use formatting designed to be similar to the input configuration language, including nested rendering of individual changes within multi-line strings, JSON strings, and nested collections. - * **New Provider:** `Circonus` ([#12578](https://github.com/hashicorp/terraform/issues/12578)) +### Other Improvements -BACKWARDS INCOMPATIBILITIES / NOTES: +* `terraform validate` now accepts an argument `-json` which produces machine-readable output. Please refer to the documentation for this command for details on the format and some caveats that consumers must consider when using this interface. ([#17539](https://github.com/hashicorp/terraform/issues/17539)) - * provider/aws: `aws_codebuild_project` renamed `timeout` to `build_timeout` ([#12503](https://github.com/hashicorp/terraform/issues/12503)) - * provider/azurem: `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` now store has of custom_data not all custom_data ([#12214](https://github.com/hashicorp/terraform/issues/12214)) - * provider/google: compute_instance, compute_instance_template, and compute_disk all have a subtly changed logic when specifying an image family as the image; in 0.8.x they would pin to the latest image in the family when the resource is created; in 0.9.x they pass the family to the API and use its behaviour. New input formats are also supported. ([#12223](https://github.com/hashicorp/terraform/issues/12223)) - * provider/google: removed the unused and deprecated region field from google_compute_backend_service ([#12663](https://github.com/hashicorp/terraform/issues/12663)) - * provider/google: removed the deprecated account_file field for the Google Cloud provider ([#12668](https://github.com/hashicorp/terraform/issues/12668)) - * provider/google: removed the deprecated fields from google_project ([#12659](https://github.com/hashicorp/terraform/issues/12659)) +* The JSON-based variant of the Terraform language now has a more tightly-specified and reliable mapping to the native syntax variant. In prior versions, certain Terraform configuration features did not function as expected or were not usable via the JSON-based forms. For more information, see [the _JSON Configuration Syntax_ documentation](https://www.terraform.io/docs/configuration/syntax-json.html). -IMPROVEMENTS: +* The new built-in function [`templatefile`](https://www.terraform.io/docs/configuration/functions/templatefile.html) allows rendering a template from a file directly in the language, without installing the separate Template provider and using the `template_file` data source. - * provider/azurerm: store only hash of `azurerm_virtual_machine` and `azurerm_virtual_machine_scale_set` custom_data - reduces size of state ([#12214](https://github.com/hashicorp/terraform/issues/12214)) - * report all errors encountered during config validation ([#12383](https://github.com/hashicorp/terraform/issues/12383)) +* The new built-in function [`formatdate`](https://www.terraform.io/docs/configuration/functions/formatdate.html), which is a specialized string formatting function for creating machine-oriented timestamp strings in various formats. -BUG FIXES: +* The new built-in functions [`reverse`](https://www.terraform.io/docs/configuration/functions/reverse.html), which reverses the order of items in a list, and [`strrev`](https://www.terraform.io/docs/configuration/functions/strrev.html), which reverses the order of Unicode characters in a string. - * provider/google: Correct the incorrect instance group manager URL returned from GKE ([#4336](https://github.com/hashicorp/terraform/issues/4336)) - * provider/google: Fix a plan/apply cycle in IAM policies ([#12387](https://github.com/hashicorp/terraform/issues/12387)) - * provider/google: Fix a plan/apply cycle in forwarding rules when only a single port is specified ([#12662](https://github.com/hashicorp/terraform/issues/12662)) +* A new `pg` state storage backend allows storing state in a PostgreSQL database. -## 0.9.0-beta2 (March 2, 2017) +* The `azurerm` state storage backend supports new authentication mechanisms, custom resource manager endpoints, and HTTP proxies. -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/azurerm: scale_sets `os_profile_master_password` now marked as sensitive - * provider/azurerm: sql_server `administrator_login_password` now marked as sensitive - * provider/google: storage buckets have been updated with the new storage classes. The old classes will continue working as before, but should be migrated as soon as possible, as there's no guarantee they'll continue working forever. ([#12044](https://github.com/hashicorp/terraform/issues/12044)) - * provider/dnsimple: Provider has been upgraded to APIv2 therefore, you will need to use the APIv2 auth token - -FEATURES: - - * **State Environments:** You can now create named "environments" for states. This allows you to manage distinct infrastructure resources from the same configuration. - * helper/schema: Added Timeouts to allow Provider/Resource developers to expose configurable timeouts for actions ([#12311](https://github.com/hashicorp/terraform/issues/12311)) - -IMPROVEMENTS: - - * core: "terraformrc" can contain env var references with $FOO ([#11929](https://github.com/hashicorp/terraform/issues/11929)) - * command: Display resource state ID in refresh/plan/destroy output ([#12261](https://github.com/hashicorp/terraform/issues/12261)) - * provider/aws: AWS Lambda DeadLetterConfig support ([#12188](https://github.com/hashicorp/terraform/issues/12188)) - * provider/azurerm: Mark the azurerm_scale_set machine password as sensitive ([#11982](https://github.com/hashicorp/terraform/issues/11982)) - * provider/azurerm: Mark the azurerm_sql_server admin password as sensitive ([#12004](https://github.com/hashicorp/terraform/issues/12004)) - * provider/dnsimple: Upgrade DNSimple provider to API v2 ([#10760](https://github.com/hashicorp/terraform/issues/10760)) - -BUG FIXES: +* The `s3` state storage backend now supports `credential_source` in AWS configuration files, support for the new AWS regions `eu-north-1` and `ap-east-1`, and several other improvements previously made in the `aws` provider. - * core: targeting will remove untargeted providers ([#12050](https://github.com/hashicorp/terraform/issues/12050)) - * core: doing a map lookup in a resource config with a computed set no longer crashes ([#12210](https://github.com/hashicorp/terraform/issues/12210)) +* The `swift` state storage backend now supports locking and workspaces. -0.9.0-beta1 FIXES: - - * core: backends are validated to not contain interpolations ([#12067](https://github.com/hashicorp/terraform/issues/12067)) - * core: fix local state locking on Windows ([#12059](https://github.com/hashicorp/terraform/issues/12059)) - * core: destroy provisioners dependent on module variables work ([#12063](https://github.com/hashicorp/terraform/issues/12063)) - * core: resource destruction happens after dependent resources' destroy provisioners ([#12063](https://github.com/hashicorp/terraform/issues/12063)) - * core: invalid resource attribute interpolation in a destroy provisioner errors ([#12063](https://github.com/hashicorp/terraform/issues/12063)) - * core: legacy backend loading of Consul now works properly ([#12320](https://github.com/hashicorp/terraform/issues/12320)) - * command/init: allow unsetting a backend properly ([#11988](https://github.com/hashicorp/terraform/issues/11988)) - * command/apply: fix crash that could happen with an empty directory ([#11989](https://github.com/hashicorp/terraform/issues/11989)) - * command/refresh: fix crash when no configs were in the pwd ([#12178](https://github.com/hashicorp/terraform/issues/12178)) - * command/{state,taint}: work properly with backend state ([#12155](https://github.com/hashicorp/terraform/issues/12155)) - * providers/terraform: remote state data source works with new backends ([#12173](https://github.com/hashicorp/terraform/issues/12173)) - -## 0.9.0-beta1 (February 15, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Once an environment is updated to use the new "remote backend" feature - (from a prior remote state), it cannot be used with prior Terraform versions. - Remote backends themselves are fully backwards compatible with prior - Terraform versions. - * provider/aws: `aws_db_instance` now defaults to making a final snapshot on delete - * provider/aws: `aws_rds_cluster` now defaults to making a final snapshot on delete - * provider/aws: `aws_redshift_cluster` now defaults to making a final snapshot on delete - * provider/aws: Deprecated fields `kinesis_endpoint` & `dynamodb_endpoint` were removed. Use `kinesis` & `dynamodb` inside the `endpoints` block instead. ([#11778](https://github.com/hashicorp/terraform/issues/11778)) - * provider/datadog: `datadog_monitor` now defaults `notify_no_data` to `false` as per the datadog API - -FEATURES: - - * **Remote Backends:** This is a successor to "remote state" and includes - file-based configuration, an improved setup process (just run `terraform init`), - no more local caching of remote state, and more. ([#11286](https://github.com/hashicorp/terraform/issues/11286)) - * **Destroy Provisioners:** Provisioners can now be configured to run - on resource destruction. ([#11329](https://github.com/hashicorp/terraform/issues/11329)) - * **State Locking:** State will be automatically locked when supported by the backend. - Backends supporting locking in this release are Local, S3 (via DynamoDB), and Consul. ([#11187](https://github.com/hashicorp/terraform/issues/11187)) - -IMPROVEMENTS: - - * core: Data source values can now be used as part of a `count` calculation. ([#11482](https://github.com/hashicorp/terraform/issues/11482)) - * command: CLI args can be specified via env vars. Specify `TF_CLI_ARGS` or `TF_CLI_ARGS_name` (where name is the name of a command) to specify additional CLI args ([#11922](https://github.com/hashicorp/terraform/issues/11922)) - * command/init: previous behavior is retained, but init now also configures - the new remote backends as well as downloads modules. It is the single - command to initialize a new or existing Terraform configuration. - * provisioners: All provisioners now respond very quickly to interrupts for - fast cancellation. ([#10934](https://github.com/hashicorp/terraform/issues/10934)) - * provider/aws: Set aws_db_cluster to snapshot by default ([#11668](https://github.com/hashicorp/terraform/issues/11668)) - * provider/aws: Enable final snapshots for aws_rds_cluster by default ([#11694](https://github.com/hashicorp/terraform/issues/11694)) - * provider/aws: Enable snapshotting by default on aws_redshift_cluster ([#11695](https://github.com/hashicorp/terraform/issues/11695)) - * provider/vault: read vault token from `~/.vault-token` as a fallback for the - `VAULT_TOKEN` environment variable. ([#11529](https://github.com/hashicorp/terraform/issues/11529)) - -BUG FIXES: - - * provider/datadog: Default notify_no_data on datadog_monitor to false ([#11903](https://github.com/hashicorp/terraform/issues/11903)) - -## 0.8.8 (March 2, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * provider/aws: Potential breaking change for `root_block_device` ([#12379](https://github.com/hashicorp/terraform/issues/12379)) - -FEATURES: - - * **New Provider:** `spotinst` ([#5001](https://github.com/hashicorp/terraform/issues/5001)) - * **New Interpolation:** `slice` ([#9729](https://github.com/hashicorp/terraform/issues/9729)) - * **New Data Source:** `aws_sns_topic` ([#11752](https://github.com/hashicorp/terraform/issues/11752)) - * **New Data Source:** `openstack_images_image_v2` ([#12097](https://github.com/hashicorp/terraform/issues/12097)) - * **New Resource:** `aws_elastic_beanstalk_application_version` ([#5770](https://github.com/hashicorp/terraform/issues/5770)) - * **New Resource:** `aws_cloudwatch_log_destination` ([#11940](https://github.com/hashicorp/terraform/issues/11940)) - * **New Resource:** `aws_cloudwatch_log_destination_policy` ([#11940](https://github.com/hashicorp/terraform/issues/11940)) - * **New Resource:** `aws_codepipeline` ([#11814](https://github.com/hashicorp/terraform/issues/11814)) - * **New Resource:** `aws_egress_only_internet_gateway` ([#10538](https://github.com/hashicorp/terraform/issues/10538)) - * **New Resource:** `datadog_user` ([#12268](https://github.com/hashicorp/terraform/issues/12268)) - * **New Resource:** `digitalocean_loadbalancer` ([#12077](https://github.com/hashicorp/terraform/issues/12077)) - * **New Resource:** `openstack_images_image_v2` ([#11942](https://github.com/hashicorp/terraform/issues/11942)) - * **New Resource:** `openstack_compute_floatingip_associate_v2` ([#12190](https://github.com/hashicorp/terraform/issues/12190)) - -IMPROVEMENTS: - - * provider/aws: Add support for AWS EBS Elastic Volumes ([#11981](https://github.com/hashicorp/terraform/issues/11981)) - * provider/aws: Allow aws_instances to be resized rather than forcing a new instance ([#11998](https://github.com/hashicorp/terraform/issues/11998)) - * provider/aws: Report bucket name in S3 Error message ([#12122](https://github.com/hashicorp/terraform/issues/12122)) - * provider/aws: Implement IPV6 Support for ec2 / VPC ([#10538](https://github.com/hashicorp/terraform/issues/10538)) - * provider/aws: Add support for import of aws_elasticsearch_domain ([#12330](https://github.com/hashicorp/terraform/issues/12330)) - * provider/aws: improve redshift cluster validation ([#12313](https://github.com/hashicorp/terraform/issues/12313)) - * provider/aws: Support IAM role attachment and replacement for existing EC2 instance ([#11852](https://github.com/hashicorp/terraform/issues/11852)) - * provider/azurerm: Auto base64encode virtual_machine custom data ([#12164](https://github.com/hashicorp/terraform/issues/12164)) - * provider/datadog: add support for new host delay to the datadog_monitor resource ([#11975](https://github.com/hashicorp/terraform/issues/11975)) - * provider/datadog: Upgrade to Datadog API v2 ([#12098](https://github.com/hashicorp/terraform/issues/12098)) - * provider/fastly: Make Backends optional if used in VCL ([#12025](https://github.com/hashicorp/terraform/issues/12025)) - * provider/fastly: Add support for custom `response_object` ([#12032](https://github.com/hashicorp/terraform/issues/12032)) - * provider/google: Add support for maintenance window in `sql_database_instance` ([#12042](https://github.com/hashicorp/terraform/issues/12042)) - * provider/google: google_project supports billing account ([#11653](https://github.com/hashicorp/terraform/issues/11653)) - * provider/openstack: Don't allow floating IP and port ([#12099](https://github.com/hashicorp/terraform/issues/12099)) - * provider/openstack: Enable HTTP Logging ([#12089](https://github.com/hashicorp/terraform/issues/12089)) - * provider/openstack: Add Additional Targets for LBaaS v1 Member ([#12266](https://github.com/hashicorp/terraform/issues/12266)) - * provider/openstack: Redesign openstack_blockstorage_volume_attach_v2 ([#12071](https://github.com/hashicorp/terraform/issues/12071)) - * provider/pagerduty: Import support for service integrations ([#12141](https://github.com/hashicorp/terraform/issues/12141)) - * provider/pagerduty: Updated implementation of pagerduty_vendor & pagerduty_service_integration ([#12357](https://github.com/hashicorp/terraform/issues/12357)) - * provider/random_id: Add prefix attribute ([#12016](https://github.com/hashicorp/terraform/issues/12016)) - * provider/statuscake: Add support for Port in statuscake_test ([#11966](https://github.com/hashicorp/terraform/issues/11966)) - -BUG FIXES: - - * core: Fix a hang that could occur at the end of a Terraform command with custom plugins used ([#12048](https://github.com/hashicorp/terraform/issues/12048)) - * command/fmt: Fix incorrect formatting with single line object following complex object ([#12049](https://github.com/hashicorp/terraform/issues/12049)) - * command/state: `-backup` flags work with `mv` and `rm` ([#12156](https://github.com/hashicorp/terraform/issues/12156)) - * provider/aws: add bucket name to delete error notification ([#11952](https://github.com/hashicorp/terraform/issues/11952)) - * provider/aws: Use proper Set for source.Auth in resource_aws_codebuild_project ([#11741](https://github.com/hashicorp/terraform/issues/11741)) - * provider/aws: aws_ecs_service should output service name along with err ([#12072](https://github.com/hashicorp/terraform/issues/12072)) - * provider/aws: Add VRRP to allowed protocols in network ACL rules ([#12107](https://github.com/hashicorp/terraform/issues/12107)) - * provider/aws: Add owner_account option to aws_redshift_cluster ([#12062](https://github.com/hashicorp/terraform/issues/12062)) - * provider/aws: Update of inspector_assessment_target should use ARN not Name ([#12115](https://github.com/hashicorp/terraform/issues/12115)) - * provider/aws: Fix the panic in ssm_association with parameters ([#12215](https://github.com/hashicorp/terraform/issues/12215)) - * provider/aws: Fix update of environment_variable in codebuild_project ([#12169](https://github.com/hashicorp/terraform/issues/12169)) - * provider/aws: Refresh aws_autoscaling_schedule from state when autoscaling_group not found ([#12312](https://github.com/hashicorp/terraform/issues/12312)) - * provider/aws: No longer ForceNew resource on lambda_function runtime update ([#12329](https://github.com/hashicorp/terraform/issues/12329)) - * provider/aws: reading multiple pages of aws_efs_file_system tags ([#12328](https://github.com/hashicorp/terraform/issues/12328)) - * provider/aws: Refresh cloudwatch log subscription filter on 404 ([#12333](https://github.com/hashicorp/terraform/issues/12333)) - * provider/aws: more details on which s3 bucket had an error ([#12314](https://github.com/hashicorp/terraform/issues/12314)) - * provider/azurerm: Ignore case on protocol and allocation types ([#12176](https://github.com/hashicorp/terraform/issues/12176)) - * provider/cloudflare: add validation for proxied record types ([#11993](https://github.com/hashicorp/terraform/issues/11993)) - * provider/datadog: Adding default values to datadog_monitor ([#12168](https://github.com/hashicorp/terraform/issues/12168)) - * provider/google: make local_traffic_selector computed ([#11631](https://github.com/hashicorp/terraform/issues/11631)) - * provider/google: Write the raw disk encryption key in the state file to avoid diffs on plan ([#12068](https://github.com/hashicorp/terraform/issues/12068)) - * provider/google: fix url map test and update logic ([#12317](https://github.com/hashicorp/terraform/issues/12317)) - * provider/openstack: Rename provider to loadbalancer_provider ([#12239](https://github.com/hashicorp/terraform/issues/12239)) - * provider/pagerduty: Setting incident_urgency_rule as optional ([#12211](https://github.com/hashicorp/terraform/issues/12211)) - * provider/profitbricks: Fixing how primary_nic is added to profitbricks server ([#12197](https://github.com/hashicorp/terraform/issues/12197)) - * state/azure: add environment option for non-public cloud usage ([#12364](https://github.com/hashicorp/terraform/issues/12364)) - -## 0.8.7 (February 15, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `kinesis_endpoint` & `dynamodb_endpoint` fields in the provider schema were deprecated in favour of `kinesis` & `dynamodb` inside the `endpoints` block. Deprecated fields will be removed in 0.9 ([#11768](https://github.com/hashicorp/terraform/issues/11768)) - -FEATURES: - - * **New Interpolation:** `slice` ([#9729](https://github.com/hashicorp/terraform/issues/9729)) - * **New Provider:** `arukas` ([#11171](https://github.com/hashicorp/terraform/issues/11171)) - * **New Data Source:** `aws_db_instance` ([#11717](https://github.com/hashicorp/terraform/issues/11717)) - * **New Data Source:** `aws_vpn_gateway` ([#11886](https://github.com/hashicorp/terraform/issues/11886)) - * **New Data Source:** `consul_agent_self`, `consul_catalog_service`, `consul_catalog_services`, `consul_catalog_nodes` ([#11729](https://github.com/hashicorp/terraform/pull/11729)) - * **New Data Source:** `google_compute_zones` ([#11954](https://github.com/hashicorp/terraform/issues/11954)) - * **New Resource:** `aws_elasticsearch_domain_policy` ([#8648](https://github.com/hashicorp/terraform/issues/8648)) - * **New Resource:** `aws_vpc_peering_connection_accepter` ([#11505](https://github.com/hashicorp/terraform/issues/11505)) - * **New Resource:** `aws_config_config_rule` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_configuration_recorder` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_configuration_recorder_status` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `aws_config_delivery_channel` ([#5850](https://github.com/hashicorp/terraform/issues/5850)) - * **New Resource:** `azurerm_container_service` ([#10820](https://github.com/hashicorp/terraform/issues/10820)) - * **New Resource:** `vault_policy` ([#10980](https://github.com/hashicorp/terraform/issues/10980)) - -IMPROVEMENTS: - - * provider/aws: Update aws_ssm_document to include `document_type`, `latest_version` and `default_version` ([#11671](https://github.com/hashicorp/terraform/issues/11671)) - * provider/aws: Support import of aws_opsworks_instance ([#11783](https://github.com/hashicorp/terraform/issues/11783)) - * provider/aws Add S3 bucket object tag support ([#11344](https://github.com/hashicorp/terraform/issues/11344)) - * provider/aws: Add validation for aws_iam_role ([#11915](https://github.com/hashicorp/terraform/issues/11915)) - * provider/fastly Allows for conditional settings across fastly ([#11843](https://github.com/hashicorp/terraform/issues/11843)) - * provider/openstack: Allow OpenStack SSL certs + keys to take path or content ([#10271](https://github.com/hashicorp/terraform/issues/10271)) - * provider/pagerduty: Add support for `incident_urgency_rule`, `support_hours` and `scheduled_actions` to `pagerduty_service` ([#11856](https://github.com/hashicorp/terraform/issues/11856)) - * provider/rancher: parse Rancher client cli.json config file ([#11658](https://github.com/hashicorp/terraform/issues/11658)) - * provider/vault: Use Vault api.DefaultConfig() ([#11523](https://github.com/hashicorp/terraform/issues/11523)) - -Bug FIXES: - - * core: resources that depend on create-before-destroy resources don't create cycles ([#11753](https://github.com/hashicorp/terraform/issues/11753)) - * core: create-before-destroy resources with a count > 1 create proper edges ([#11753](https://github.com/hashicorp/terraform/issues/11753)) - * core: fix "diffs didn't match issue" for removing or empty collections that force new ([#11732](https://github.com/hashicorp/terraform/issues/11732)) - * core: module sources ended in archive extensions without a "." won't be treated as archives ([#11438](https://github.com/hashicorp/terraform/issues/11438)) - * core: destroy ordering of resources within modules is correct ([#11765](https://github.com/hashicorp/terraform/issues/11765)) - * core: Fix crash if count interpolates into a non-int ([#11864](https://github.com/hashicorp/terraform/issues/11864)) - * core: Targeting a module will properly exclude untargeted module outputs ([#11921](https://github.com/hashicorp/terraform/issues/11921)) - * state/remote/s3: Fix Bug with Assume Role for Federated IAM Account ([#10067](https://github.com/hashicorp/terraform/issues/10067)) - * provider/aws: Fix security_group_rule resource timeout errors ([#11809](https://github.com/hashicorp/terraform/issues/11809)) - * provider/aws: Fix diff suppress function for aws_db_instance ([#11909](https://github.com/hashicorp/terraform/issues/11909)) - * provider/aws: Fix default values for AMI volume size ([#11842](https://github.com/hashicorp/terraform/issues/11842)) - * provider/aws: Fix aws_db_event_subscription import ([#11744](https://github.com/hashicorp/terraform/issues/11744)) - * provider/aws: Respect 400 returned from AWS API on RDS Cluster termination ([#11795](https://github.com/hashicorp/terraform/issues/11795)) - * provider/aws: Raise the codebuild_project create timeout ([#11777](https://github.com/hashicorp/terraform/issues/11777)) - * provider/aws: Make aws_dms_endpoint database_name optional ([#11792](https://github.com/hashicorp/terraform/issues/11792)) - * provider/aws: Bump Create and Delete timeouts to 60 mins on directory_service ([#11793](https://github.com/hashicorp/terraform/issues/11793)) - * provider/aws: aws_codecommit_trigger fix typo that causes serialization to fail when events is non-empty ([#11839](https://github.com/hashicorp/terraform/issues/11839)) - * provider/aws: Fix bug to allow update of maintenance_window in elasticache_replication_group ([#11850](https://github.com/hashicorp/terraform/issues/11850)) - * provider/azurerm: Don't push an empty set of ssh keys to virtual machine or they cannot be ammended ([#11804](https://github.com/hashicorp/terraform/issues/11804)) - * provider/azurerm: Refresh from state when VM Extension Resource not found ([#11894](https://github.com/hashicorp/terraform/issues/11894)) - * provider/cloudstack: Ensure consistent hashes of `cloudstack_port_forward` forward items. ([#11546](https://github.com/hashicorp/terraform/issues/11546)) - * provider/google: set additional_zones to computed and disallow the original zone from appearing in the list ([#11650](https://github.com/hashicorp/terraform/issues/11650)) - * provider/google: set subnetwork_project to computed ([#11646](https://github.com/hashicorp/terraform/issues/11646)) - * provider/openstack BlockStorage v1 availability_zone Fix ([#11949](https://github.com/hashicorp/terraform/issues/11949)) - -## 0.8.6 (07 February 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_appautoscaling_policy` no longer has default values for `scalable_dimension` and `service_namespace` - - -FEATURES: - - * **New Data Source:** `aws_kms_secret` ([#11460](https://github.com/hashicorp/terraform/issues/11460)) - * **New Data Source:** `aws_ecs_task_definition` ([#8509](https://github.com/hashicorp/terraform/issues/8509)) - * **New Data Source:** `aws_ecs_cluster` ([#11558](https://github.com/hashicorp/terraform/issues/11558)) - * **New Data Source:** `aws_partition` ([#11675](https://github.com/hashicorp/terraform/issues/11675)) - * **New Data Source:** `pagerduty_escalation_policy` ([#11616](https://github.com/hashicorp/terraform/issues/11616)) - * **New Data Source:** `pagerduty_schedule` ([#11614](https://github.com/hashicorp/terraform/issues/11614)) - * **New Data Source:** `profitbricks_datacenter` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Data Source:** `profitbricks_location` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Data Source:** `profitbricks_image` ([#11520](https://github.com/hashicorp/terraform/issues/11520)) - * **New Resource:** `aws_sfn_activity` ([#11420](https://github.com/hashicorp/terraform/issues/11420)) - * **New Resource:** `aws_sfn_state_machine` ([#11420](https://github.com/hashicorp/terraform/issues/11420)) - * **New Resource:** `aws_codebuild_project` ([#11560](https://github.com/hashicorp/terraform/issues/11560)) - * **New Resource:** `aws_dms_certificate` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_endpoint` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_instance` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_subnet_group` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `aws_dms_replication_subnet_group` ([#11122](https://github.com/hashicorp/terraform/issues/11122)) - * **New Resource:** `pagerduty_addon` ([#11620](https://github.com/hashicorp/terraform/issues/11620)) - - -IMPROVEMENTS: - - * core: Interaction with Atlas now supports the `ATLAS_TLS_NOVERIFY` environment variable ([#11576](https://github.com/hashicorp/terraform/issues/11576)) - * provider/aws: Add EBS Volume support for EMR Instance Groups ([#11411](https://github.com/hashicorp/terraform/issues/11411)) - * provider/aws: Add support for policy to AWS provider assume_role ([#11501](https://github.com/hashicorp/terraform/issues/11501)) - * provider/aws: Add support for more sns_topic_subscription parameters on import command ([#10408](https://github.com/hashicorp/terraform/issues/10408)) - * provider/aws: Add support for Sever Side Encryption with default S3 KMS key to `aws_s3_bucket_object` ([#11261](https://github.com/hashicorp/terraform/issues/11261)) - * provider/aws: Add support for Cross Region RDS Cluster Replica ([#11428](https://github.com/hashicorp/terraform/issues/11428)) - * provider/aws: Add sensitive attribute in master_password ([#11584](https://github.com/hashicorp/terraform/issues/11584)) - * provider/aws: Application Auto Scaling now supports scaling an Amazon EC2 Spot fleet ([#8697](https://github.com/hashicorp/terraform/issues/8697)) - * provider/aws: Add tag support to DynamoDb tables ([#11617](https://github.com/hashicorp/terraform/issues/11617)) - * provider/aws: Provide the certificate ID in the aws data source ([#11693](https://github.com/hashicorp/terraform/issues/11693)) - * provider/aws: Wait for instance_profile creation to complete ([#11678](https://github.com/hashicorp/terraform/issues/11678)) - * provider/azurerm: Add support for scale sets overprovision ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: support import for load balancer and sub resources ([#11610](https://github.com/hashicorp/terraform/issues/11610)) - * provider/fastly: Adds papertrail logging ([#11491](https://github.com/hashicorp/terraform/issues/11491)) - * provider/fastly: Adds format_version for s3logging ([#11725](https://github.com/hashicorp/terraform/issues/11725)) - * provider/fastly: Adds healthcheck service ([#11709](https://github.com/hashicorp/terraform/issues/11709)) - * provider/google: allow instance group managers in region other than project ([#11294](https://github.com/hashicorp/terraform/issues/11294)) - * provider/google: Add second generation disk specification options ([#11571](https://github.com/hashicorp/terraform/issues/11571)) - * provider/google: remote_traffic_selector for google_compute_vpn_tunnel ([#11020](https://github.com/hashicorp/terraform/issues/11020)) - * provider/nomad: Update jobspec dependency to allow parsing parameterized nomad jobfiles ([#11691](https://github.com/hashicorp/terraform/issues/11691)) - * provider/google: No default root user for SQL ([#11590](https://github.com/hashicorp/terraform/issues/11590)) - * provider/opsgenie: Descriptions for Teams ([#11391](https://github.com/hashicorp/terraform/issues/11391)) - * provider/rancher: rancher_registration_token add image parameter ([#11551](https://github.com/hashicorp/terraform/issues/11551)) - * provider/rancher: allow for importing resources using environment ID to target ([#11688](https://github.com/hashicorp/terraform/issues/11688)) - -BUG FIXES: - - * core: Remove missed subfields when parent list is removed ([#11498](https://github.com/hashicorp/terraform/issues/11498)) - * command/fmt: Trailing blocks of comments at the end of files are formatted properly ([#11585](https://github.com/hashicorp/terraform/issues/11585)) - * provider/aws: Fix issue with `path` not updated when modifying AWS API Gateway Resource ([#11443](https://github.com/hashicorp/terraform/issues/11443)) - * provider/aws: Fix AWS Lambda Qualifier Regexp for `aws_lambda_permission` ([#11383](https://github.com/hashicorp/terraform/issues/11383)) - * provider/aws: allow destroy of LB stickiness policy with missing LB ([#11462](https://github.com/hashicorp/terraform/issues/11462)) - * provider/aws: ECS Placement constraints fix ([#11475](https://github.com/hashicorp/terraform/issues/11475)) - * provider/aws: retry kms_key CreateKey if arn in policy not yet seen ([#11509](https://github.com/hashicorp/terraform/issues/11509)) - * provider/aws: Fix ALB Listener Rule Import ([#1174](https://github.com/hashicorp/terraform/issues/1174)) - * provider/aws: Fix issue with ECS Placement Strat. and type casing ([#11565](https://github.com/hashicorp/terraform/issues/11565)) - * provider/aws: aws_route53_record import error processing ([#11603](https://github.com/hashicorp/terraform/issues/11603)) - * provider/aws: Fix panic in aws_rds_cluster missing parameter error message ([#11600](https://github.com/hashicorp/terraform/issues/11600)) - * provider/aws: Succeed creating aws_volume_attachment if identical attachment exists ([#11060](https://github.com/hashicorp/terraform/issues/11060)) - * provider/aws: Guard against panic in aws_vpc_endpoint_association ([#11613](https://github.com/hashicorp/terraform/issues/11613)) - * provider/aws: Allow root volume size changes in aws_instance ([#11619](https://github.com/hashicorp/terraform/issues/11619)) - * provider/aws: Fix spot instance request block device configs ([#11649](https://github.com/hashicorp/terraform/issues/11649)) - * provider/aws: Fix validation issues for onceAWeek and onceADay validation functions ([#11679](https://github.com/hashicorp/terraform/issues/11679)) - * provider/aws: Return route_table_id from aws_route_table data source ([#11703](https://github.com/hashicorp/terraform/issues/11703)) - * provider/aws: validate aws_alb_target_group name is less than 32 characters ([#11699](https://github.com/hashicorp/terraform/issues/11699)) - * provider/azurerm: Scale Sets Load balancer pools should not be computed ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: Scale Sets ip configuration handling and update support for load balancer backend pools. ([#11516](https://github.com/hashicorp/terraform/issues/11516)) - * provider/azurerm: check if lb sub resources exist when reading ([#11553](https://github.com/hashicorp/terraform/issues/11553)) - * provider/google: Fix master_instance_name to prevent slave rebuilds ([#11477](https://github.com/hashicorp/terraform/issues/11477)) - * provider/google: Refresh google_compute_instance machine_type on read ([#11645](https://github.com/hashicorp/terraform/issues/11645)) - * provider/google: Added forceNew on accessConfig in google_compute_instance_template ([#11548](https://github.com/hashicorp/terraform/issues/11548)) - * provider/ignition: Allow to add authorized keys without user creation ([#11406](https://github.com/hashicorp/terraform/issues/11406)) - * provider/ignition: mount and path are mutually exclusive ([#11409](https://github.com/hashicorp/terraform/issues/11409)) - * provider/ns1: Fix "use_client_subnet" in ns1_record ([#11368](https://github.com/hashicorp/terraform/issues/11368)) - * provider/openstack: Remove Default Security Group Rules on Create ([#11466](https://github.com/hashicorp/terraform/issues/11466)) - * provider/pagerduty: Allow timeouts to be disabled (pagerduty_service) ([#11483](https://github.com/hashicorp/terraform/issues/11483)) - * provider/rancher: Use environment specific client for accessing resources ([#11503](https://github.com/hashicorp/terraform/issues/11503)) - * provider/rancher: Refresh rancher stack from state on delete ([#11539](https://github.com/hashicorp/terraform/issues/11539)) - * provider/rancher: Refresh rancher token and registry from state on not found ([#11543](https://github.com/hashicorp/terraform/issues/11543)) - * provider/rancher: return error when Rancher template not found ([#11544](https://github.com/hashicorp/terraform/issues/11544)) - * provider/rancher: rancher_stack set docker_compose and rancher_compose ([#11550](https://github.com/hashicorp/terraform/issues/11550)) - * provider/rancher: Handle deleted/purged resources from Rancher ([#11607](https://github.com/hashicorp/terraform/issues/11607)) - * provider/statuscake: Remove computed from statuscake_test timeout parameter ([#11541](https://github.com/hashicorp/terraform/issues/11541)) - * provider/vsphere: vSphere virtual machine don't ignore VM power on errors ([#11604](https://github.com/hashicorp/terraform/issues/11604)) - * provisioner/remote-exec: Revert change in 0.8.5 that treated each line as a script since that doesn't work for stateful scripts. ([#11692](https://github.com/hashicorp/terraform/issues/11692)) - * provisioner/chef: Attributes JSON coming from computed source validates ([#11502](https://github.com/hashicorp/terraform/issues/11502)) - -## 0.8.5 (26 January 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: We no longer prefix an ECR repository address with `https://` - * provider/google: `google_project` has undergone significant changes. Existing configs and state should continue to work as they always have, but new configs and state will exhibit some new behaviour, including actually creating and deleting projects, instead of just referencing them. See https://www.terraform.io/docs/providers/google/r/google_project.html for more details. - -FEATURES: - - * **New Data Source:** `aws_autoscaling_groups` ([#11303](https://github.com/hashicorp/terraform/issues/11303)) - * **New Data Source:** `aws_elb_hosted_zone_id ` ([#11027](https://github.com/hashicorp/terraform/issues/11027)) - * **New Data Source:** `aws_instance` ([#11272](https://github.com/hashicorp/terraform/issues/11272)) - * **New Data Source:** `aws_canonical_user_id` ([#11332](https://github.com/hashicorp/terraform/issues/11332)) - * **New Data Source:** `aws_vpc_endpoint` ([#11323](https://github.com/hashicorp/terraform/issues/11323)) - * **New Provider:** `profitbricks` ([#7943](https://github.com/hashicorp/terraform/issues/7943)) - * **New Provider:** `alicloud` ([#11235](https://github.com/hashicorp/terraform/issues/11235)) - * **New Provider:** `ns1` ([#10782](https://github.com/hashicorp/terraform/issues/10782)) - * **New Resource:** `aws_inspector_assessment_target` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `aws_inspector_assessment_template` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `aws_inspector_resource_group` ([#11217](https://github.com/hashicorp/terraform/issues/11217)) - * **New Resource:** `google_project_iam_policy` ([#10425](https://github.com/hashicorp/terraform/issues/10425)) - * **New Resource:** `google_project_services` ([#10425](https://github.com/hashicorp/terraform/issues/10425)) - * **New Interpolation Function:** `pathexpand()` ([#11277](https://github.com/hashicorp/terraform/issues/11277)) - -IMPROVEMENTS: - - * command/fmt: Single line objects (such as `variable "foo" {}`) aren't separated by newlines - * provider/aws: Add 'route_table_id' to route_table data source ([#11157](https://github.com/hashicorp/terraform/pull/11157)) - * provider/aws: Add Support for aws_cloudwatch_metric_alarm extended statistic ([#11193](https://github.com/hashicorp/terraform/issues/11193)) - * provider/aws: Make the type of a route53_record modifiable without recreating the resource ([#11164](https://github.com/hashicorp/terraform/issues/11164)) - * provider/aws: Add Placement Strategy to aws_ecs_service resource ([#11201](https://github.com/hashicorp/terraform/issues/11201)) - * provider/aws: Add support for placement_constraint to aws_ecs_service ([#11242](https://github.com/hashicorp/terraform/issues/11242)) - * provider/aws: allow ALB target group stickiness to be enabled/disabled ([#11251](https://github.com/hashicorp/terraform/issues/11251)) - * provider/aws: ALBs now wait for provisioning to complete before proceeding ([#11333](https://github.com/hashicorp/terraform/issues/11333)) - * provider/aws: Add support for setting MSSQL Timezone in aws_db_instance ([#11247](https://github.com/hashicorp/terraform/issues/11247)) - * provider/aws: CloudFormation YAML template support ([#11121](https://github.com/hashicorp/terraform/issues/11121)) - * provider/aws: Remove hardcoded https from the ecr repository ([#11307](https://github.com/hashicorp/terraform/issues/11307)) - * provider/aws: Implement CloudFront Lambda Function Associations ([#11291](https://github.com/hashicorp/terraform/issues/11291)) - * provider/aws: Remove MaxFrameRate default on ElasticTranscoderPreset ([#11340](https://github.com/hashicorp/terraform/issues/11340)) - * provider/aws: Allow ARN Identifier to be set for different partitions ([#11359](https://github.com/hashicorp/terraform/issues/11359)) - * provider/aws: Allow bypassing region validation ([#11358](https://github.com/hashicorp/terraform/issues/11358)) - * provider/aws: Added a s3_bucket domain name attribute ([#10088](https://github.com/hashicorp/terraform/issues/10088)) - * provider/aws: Add DiffSupressFunction to aws_db_instance's engine_version ([#11369](https://github.com/hashicorp/terraform/issues/11369)) - * provider/archive: Adding support for multiple source contents ([#11271](https://github.com/hashicorp/terraform/issues/11271)) - * provider/azurerm: add caching support for virtual_machine data_disks ([#11142](https://github.com/hashicorp/terraform/issues/11142)) - * provider/azurerm: make lb sub resources idempotent ([#11128](https://github.com/hashicorp/terraform/issues/11128)) - * provider/cloudflare: Add verification for record types and content ([#11197](https://github.com/hashicorp/terraform/issues/11197)) - * provider/datadog: Add aggregator method to timeboard graph resource ([#11206](https://github.com/hashicorp/terraform/issues/11206)) - * provider/fastly Add request_condition to backend definition ([#11238](https://github.com/hashicorp/terraform/issues/11238)) - * provider/google: Add subnetwork_project field to enable cross-project networking in instance templates ([#11110](https://github.com/hashicorp/terraform/issues/11110)) - * provider/google: Add support for encrypting a disk ([#11167](https://github.com/hashicorp/terraform/issues/11167)) - * provider/google: Add support for session_affinity to google_compute_region_backend_service ([#11228](https://github.com/hashicorp/terraform/issues/11228)) - * provider/google: Allow additional zones to be configured in GKE ([#11018](https://github.com/hashicorp/terraform/issues/11018)) - * provider/ignition: Allow empty dropin and content for systemd_units ([#11327](https://github.com/hashicorp/terraform/issues/11327)) - * provider/openstack: LoadBalancer Security Groups ([#11074](https://github.com/hashicorp/terraform/issues/11074)) - * provider/openstack: Volume Attachment Updates ([#11285](https://github.com/hashicorp/terraform/issues/11285)) - * provider/scaleway improve bootscript data source ([#11183](https://github.com/hashicorp/terraform/issues/11183)) - * provider/statuscake: Add support for StatusCake confirmation servers ([#11179](https://github.com/hashicorp/terraform/issues/11179)) - * provider/statuscake: Add support for Updating StatusCake contact_ids ([#7115](https://github.com/hashicorp/terraform/issues/7115)) - * provisioner/chef: Add support for named run-lists when using policyfiles ([#11215](https://github.com/hashicorp/terraform/issues/11215)) - * core: Add basic HTTP Auth for remote state backend ([#11301](https://github.com/hashicorp/terraform/issues/11301)) - -BUG FIXES: - - * command/fmt: Multiple `#` comments won't be separated by newlines. ([#11209](https://github.com/hashicorp/terraform/issues/11209)) - * command/fmt: Lists with a heredoc element that starts on the same line as the opening brace is formatted properly. ([#11208](https://github.com/hashicorp/terraform/issues/11208)) - * command/import: Provider configuration inheritance into modules works properly ([#11393](https://github.com/hashicorp/terraform/issues/11393)) - * command/import: Update help text to note that `-var` and `-var-file` work - * provider/aws: Fix panic when querying VPC's main route table via data source ([#11134](https://github.com/hashicorp/terraform/issues/11134)) - * provider/aws: Allow creating aws_codecommit repository outside of us-east-1 ([#11177](https://github.com/hashicorp/terraform/issues/11177)) - * provider/aws: Fix issue destroying or updating CloudFront due to missing Lambda Function Associations parameters ([#11291](https://github.com/hashicorp/terraform/issues/11291)) - * provider/aws: Correct error messages are now returned if an `aws_autoscaling_lifecycle_hook` fails during creation ([#11360](https://github.com/hashicorp/terraform/issues/11360)) - * provider/aws: Fix issue updating/destroying Spot Fleet requests when using `terminate_instances_with_expiration` ([#10953](https://github.com/hashicorp/terraform/issues/10953)) - * provider/azurerm: use configured environment for storage clients ([#11159](https://github.com/hashicorp/terraform/issues/11159)) - * provider/google: removes region param from google_compute_backend_service ([#10903](https://github.com/hashicorp/terraform/issues/10903)) - * provider/ignition: allowing empty systemd.content when a dropin is provided ([#11216](https://github.com/hashicorp/terraform/issues/11216)) - * provider/openstack: Increase deletion timeout for router interfaces ([#11250](https://github.com/hashicorp/terraform/issues/11250)) - * provider/openstack: Fix Instance Metadata Deletion ([#11252](https://github.com/hashicorp/terraform/issues/11252)) - * provider/scaleway: Rename Scaleway provider parameters to match more closely to the API ([#10874](https://github.com/hashicorp/terraform/issues/10874)) - * provider/vault: Remove user input for optional vault provider fields ([#11082](https://github.com/hashicorp/terraform/issues/11082)) - * provider/vsphere: Set deviceID to 0 if one 1 network interface in vsphere_virtual_machine ([#8276](https://github.com/hashicorp/terraform/issues/8276)) - * provisioner/remote-exec: fail on first inline script with bad exit code ([#11155](https://github.com/hashicorp/terraform/issues/11155)) - -## 0.8.4 (January 11, 2017) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * We have removed the `Arukas` provider that was added in v0.8.3 for this release. Unfortunately we found the - new provider included a dependency that would not compile and run on Windows operating systems. For now the - provider has been removed and we hope to work to reintroduce it for all platforms in the near future. Going forward we will also be taking additional steps in our build testing to ensure Terraform builds on all platforms before release. - -## 0.8.3 (January 10, 2017) - -FEATURES: - - * **New Provider:** `Arukas` ([#10862](https://github.com/hashicorp/terraform/issues/10862)) - * **New Provider:** `Ignition` ([#6189](https://github.com/hashicorp/terraform/issues/6189)) - * **New Provider:** `OpsGenie` ([#11012](https://github.com/hashicorp/terraform/issues/11012)) - * **New Data Source:** `aws_vpc_peering_connection` ([#10913](https://github.com/hashicorp/terraform/issues/10913)) - * **New Resource:** `aws_codedeploy_deployment_config` ([#11062](https://github.com/hashicorp/terraform/issues/11062)) - * **New Resource:** `azurerm_container_registry` ([#10973](https://github.com/hashicorp/terraform/issues/10973)) - * **New Resource:** `azurerm_eventhub_authorization_rule` ([#10971](https://github.com/hashicorp/terraform/issues/10971)) - * **New Resource:** `azurerm_eventhub_consumer_group` ([#9902](https://github.com/hashicorp/terraform/issues/9902)) - -IMPROVEMENTS: - - * command/fmt: Show filename on parse error ([#10923](https://github.com/hashicorp/terraform/issues/10923)) - * provider/archive: `archive_file` now exports `output_md5` attribute in addition to existing SHA1 and Base64 SHA256 hashes. ([#10851](https://github.com/hashicorp/terraform/issues/10851)) - * provider/aws: Add `most_recent` to the `ebs_snapshot` data source ([#10986](https://github.com/hashicorp/terraform/issues/10986)) - * provider/aws: Add support for instance tenancy in `aws_opsworks_instance` ([#10885](https://github.com/hashicorp/terraform/issues/10885)) - * provider/aws: Added a validation for security group rule types ([#10864](https://github.com/hashicorp/terraform/issues/10864)) - * provider:aws: Add support for updating aws_emr_cluster parameters ([#11008](https://github.com/hashicorp/terraform/issues/11008)) - * provider/aws: Add Placement Constraints to `aws_ecs_task_definition` ([#11030](https://github.com/hashicorp/terraform/issues/11030)) - * provider/aws: Increasing timeout for redshift cluster creation to 75 minutes ([#11041](https://github.com/hashicorp/terraform/issues/11041)) - * provider/aws: Add support for content_handling to aws_api_gateway_integration_response ([#11002](https://github.com/hashicorp/terraform/issues/11002)) - * provider/aws: Add S3 bucket name validation ([#11116](https://github.com/hashicorp/terraform/issues/11116)) - * provider/aws: Add Route53 Record type validation ([#11119](https://github.com/hashicorp/terraform/issues/11119)) - * provider/azurerm: support non public clouds ([#11026](https://github.com/hashicorp/terraform/issues/11026)) - * provider/azurerm: Azure resource providers which are already registered are no longer re-registered. ([#10991](https://github.com/hashicorp/terraform/issues/10991)) - * provider/docker: Add network create --internal flag support ([#10932](https://github.com/hashicorp/terraform/issues/10932)) - * provider/docker: Add support for a list of pull_triggers within the docker_image resource. ([#10845](https://github.com/hashicorp/terraform/issues/10845)) - * provider/pagerduty Add delete support to `pagerduty_service_integration` ([#10891](https://github.com/hashicorp/terraform/issues/10891)) - * provider/postgresql Add permissions support to `postgresql_schema` as nested `policy` attributes ([#10808](https://github.com/hashicorp/terraform/issues/10808)) - -BUG FIXES: +### Bug Fixes - * core: Properly expand sets as lists from a flatmap ([#11042](https://github.com/hashicorp/terraform/issues/11042)) - * core: Disallow root modules named "root" as a temporary workaround ([#11099](https://github.com/hashicorp/terraform/issues/11099)) - * command/fmt: Lists of heredocs format properly ([#10947](https://github.com/hashicorp/terraform/issues/10947)) - * command/graph: Fix crash when `-type=legacy` ([#11095](https://github.com/hashicorp/terraform/issues/11095)) - * provider/aws: Guard against nil change output in `route53_zone` that causes panic ([#10798](https://github.com/hashicorp/terraform/issues/10798)) - * provider/aws: Reworked validateArn function to handle empty values ([#10833](https://github.com/hashicorp/terraform/issues/10833)) - * provider/aws: Set `aws_autoscaling_policy` `metric_aggregation_type` to be Computed ([#10904](https://github.com/hashicorp/terraform/issues/10904)) - * provider/aws: `storage_class` is now correctly treated as optional when configuring replication for `aws_s3_bucket` resources. ([#10921](https://github.com/hashicorp/terraform/issues/10921)) - * provider/aws: `user_data` on `aws_launch_configuration` resources is only base 64 encoded if the value provided is not already base 64 encoded. ([#10871](https://github.com/hashicorp/terraform/issues/10871)) - * provider/aws: Add snapshotting to the list of pending state for elasticache ([#10965](https://github.com/hashicorp/terraform/issues/10965)) - * provider/aws: Add support for updating tags in aws_emr_cluster ([#11003](https://github.com/hashicorp/terraform/issues/11003)) - * provider/aws: Fix the normalization of AWS policy statements ([#11009](https://github.com/hashicorp/terraform/issues/11009)) - * provider/aws: data_source_aws_iam_server_certificate latest should be bool not string causes panic ([#11016](https://github.com/hashicorp/terraform/issues/11016)) - * provider/aws: Fix typo in aws_redshift_cluster causing security groups to not allow update ([#11025](https://github.com/hashicorp/terraform/issues/11025)) - * provider/aws: Set `key_name` in `aws_key_pair` if omited in configuration ([#10987](https://github.com/hashicorp/terraform/issues/10987)) - * provider/aws: Updating the aws_efs_mount_target dns_name ([#11023](https://github.com/hashicorp/terraform/issues/11023)) - * provider/aws: Validate window time format for snapshot times and backup windows on RDS and ElastiCache resources ([#11089](https://github.com/hashicorp/terraform/issues/11089)) - * provider/aws: aws_db_instance restored from snapshot had problem with subnet_group ([#11050](https://github.com/hashicorp/terraform/issues/11050)) - * provider/aws: Allow disabled access_log in ELB ([#11120](https://github.com/hashicorp/terraform/issues/11120)) - * provider/azurerm: fix update protocol for lb_probe ([#11125](https://github.com/hashicorp/terraform/issues/11125)) - * provider/google: Fix backwards incompatibility around create_timeout in instances ([#10858](https://github.com/hashicorp/terraform/issues/10858)) - * provider/google: google_compute_instance_group_manager update_strategy not properly read ([#10174](https://github.com/hashicorp/terraform/issues/10174)) - * provider/openstack: Handle `PENDING_UPDATE` status with LBaaS v2 members ([#10875](https://github.com/hashicorp/terraform/issues/10875)) - * provider/rancher: Add 'finishing-upgrade' state to rancher stack ([#11019](https://github.com/hashicorp/terraform/issues/11019)) +Quite a few bugs were fixed indirectly as a result of improvements to the underlying language engine, so a fully-comprehensive list of fixed bugs is not possible, but some of the more commonly-encountered bugs that are fixed in this release include: +* config: The conditional operator `... ? ... : ...` now works with result values of any type and only returns evaluation errors for the chosen result expression, as those familiar with this operator in other languages might expect. -## 0.8.2 (December 21, 2016) +* config: Accept and ignore UTF-8 byte-order mark for configuration files ([#19715](https://github.com/hashicorp/terraform/issues/19715)) -BACKWARDS INCOMPATIBILITIES / NOTES: +* config: When using a splat expression like `aws_instance.foo.*.id`, the addition of a new instance to the set (whose `id` is therefore not known until after apply) will no longer cause all of the other ids in the resulting list to appear unknown. - * `aws_lambda_function` Please note that `runtime` is now a required field as AWS have deprecated the use of nodejs 0.10 in lambda functions ([#9724](https://github.com/hashicorp/terraform/issues/9724)) +* config: The `jsonencode` function now preserves the types of values passed to it, even inside nested structures, whereas before it had a tendency to convert primitive-typed values to string representations. -FEATURES: +* config: The `format` and `formatlist` functions now attempt automatic type conversions when the given values do not match the "verbs" in the format string, rather than producing a result with error placeholders in it. - * **New Provider:** `New Relic` ([#10317](https://github.com/hashicorp/terraform/issues/10317)) - * **New Resource:** `aws_ses_configuration_set` ([#10735](https://github.com/hashicorp/terraform/issues/10735)) - * **New Resource:** `aws_ses_event_destination` ([#10735](https://github.com/hashicorp/terraform/issues/10735)) - * **New Resource:** `azurerm_redis_cache` ([#10184](https://github.com/hashicorp/terraform/issues/10184)) - * **New Resource:** `ultradns_dirpool` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_probe_http` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_probe_ping` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_record` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Resource:** `ultradns_tcpool` ([#9788](https://github.com/hashicorp/terraform/issues/9788)) - * **New Data Source:** `aws_iam_account_alias` ([#10804](https://github.com/hashicorp/terraform/issues/10804)) +* config: Assigning a list containing one or more unknown values to an argument expecting a list no longer produces the incorrect error message "should be a list", because Terraform is now able to track the individual elements as being unknown rather than the list as a whole, and to track the type of each unknown value. (This also avoids any need to place seemingly-redundant list brackets around values that are already lists, which would now be interpreted as a list of lists.) -IMPROVEMENTS: +* cli: When `create_before_destroy` is enabled for a resource, replacement actions are reflected correctly in rendered plans as `+/-` rather than `-/+`, and described as such in the UI messages. - * provider/aws: Add support for BinaryMediaTypes and ContentHandling to AWS API Gateway ([#10776](https://github.com/hashicorp/terraform/issues/10776)) - * provider/aws: Deprecated aws_lambda_function nodejs runtime in favor of nodejs4.3 ([#9724](https://github.com/hashicorp/terraform/issues/9724)) - * provider/aws: Support updating of aws_db_instance db_subnet_group_name ([#10818](https://github.com/hashicorp/terraform/issues/10818)) - * provider/aws: Allow update to RDS password when restoring from snapshot ([#8622](https://github.com/hashicorp/terraform/issues/8622)) - * provider/azurerm: add support for tags to dns_zone ([#10750](https://github.com/hashicorp/terraform/issues/10750)) - * provider/pagerduty pagerduty_schedule - support for start_day_of_week (schedule restriction) ([#10069](https://github.com/hashicorp/terraform/issues/10069)) - * state/remote/swift: add support for token authentication ([#10866](https://github.com/hashicorp/terraform/issues/10866)) +* core: Various root causes of the "diffs didn't match during apply" class of error are now checked at their source, allowing Terraform to either avoid the problem occurring altogether (ideally) or to provide a more actionable error message to help with reporting, finding, and fixing the bug. -BUG FIXES: - - * core: Improve validation for provider aliases to allow inheritance in moduels. ([#10807](https://github.com/hashicorp/terraform/issues/10807)) - * core: Math operations always prefer floating point if an argument is floating point. ([#10886](https://github.com/hashicorp/terraform/issues/10886)) - * core: Strings are implicitly converted to integers/floats for comparison. ([#10886](https://github.com/hashicorp/terraform/issues/10886)) - * provider/aws: Fixed crash in `data_source_ami` with empty `owner` value ([#10763](https://github.com/hashicorp/terraform/issues/10763)) - * provider/aws: Require `master_username` and `master_password` if no snapshot given in Redshift Cluster ([#9837](https://github.com/hashicorp/terraform/issues/9837)) - * provider/azurerm: fix network_interface.ip_configuration hash for load balancers ([#10834](https://github.com/hashicorp/terraform/issues/10834)) - * provider/docker: Fix regression, 'cert_path' stop working ([#10801](https://github.com/hashicorp/terraform/issues/10801)) - * provider/google: Use node_version during google_container_cluster creation ([#10817](https://github.com/hashicorp/terraform/issues/10817)) - * provider/openstack: Handle Volume Creation Errors ([#10821](https://github.com/hashicorp/terraform/issues/10821)) - -## 0.8.1 (December 14, 2016) - -IMPROVEMENTS: - - * provider/aws: Support eu-west-2 ([#10470](https://github.com/hashicorp/terraform/issues/10470)) - * provider/aws: Improved the SNS topic subscription protocols validation ([#10704](https://github.com/hashicorp/terraform/issues/10704)) - * providers/google: Add subnetwork_project field to enable cross-project networking ([#9662](https://github.com/hashicorp/terraform/issues/9662)) - * provider/pagerduty: Allow 'team_responder' role for pagerduty_user resource ([#10728](https://github.com/hashicorp/terraform/issues/10728)) - -BUG FIXES: - - * core: Handle whitespace around the key in the `-var` flag. ([#10717](https://github.com/hashicorp/terraform/issues/10717)) - * core: `terraform` block works in the presence of `_override` files ([#10715](https://github.com/hashicorp/terraform/issues/10715)) - * core: Fix error when a provider in a module only referenced a variable ([#10719](https://github.com/hashicorp/terraform/issues/10719)) - * core: Destroy ordering for resources that depend on each other across modules is correct ([#745](https://github.com/hashicorp/terraform/issues/745)) - -DEPRECATION REMOVALS: - - * provider/aws: Removed deprecated `parameter_group` from `aws_rds_cluster` ([#10733](https://github.com/hashicorp/terraform/issues/10733)) - -## 0.8.0 (December 13, 2016) - -**This is the complete 0.7.13 to 0.8 CHANGELOG. Below this section we -also have a 0.8.0-rc3 to 0.8.0 final CHANGELOG.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * `template_file` _inline_ templates must escape their variable usage. What - was previously `${foo}` must now be `$${foo}`. Note that this is only - for _inline_ templates. Templates read from files are unchanged. ([#9698](https://github.com/hashicorp/terraform/issues/9698)) - * Escape sequences used to require double-escaping when used within interpolations. - You now must only escape once (which is the expected/typical behavior). - For example: `${replace(var.foo, "\\", "\\\\")}` is correct. Before, - that would cause very strange behavior. However, this may break existing - configurations which found a level of escape sequences to work. Check - `terraform plan` for incorrect output. - * Math operators now follow the standard order of operations: *, /, % followed - by +, -. See the updated interpolation docs for more information. You can - continue to force ordering with parentheses. - * Strings in configuration can no longer contain unescaped newlines. For - unescaped newlines, heredocs must be used - - * provider/aws: Anywhere where we can specify kms_key_id must now be a valid KMS Key ID ARN to stop continual diffs - * provider/chef: The chef provider now accepts `key_material` as an alternative to - `private_key_pem`. The `private_key_pem` attribute will be deprecated in a - future release - * provider/postgres: `ssl_mode` has been renamed `sslmode` to match common usage ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - -DEPRECATION REMOVALS: - - * The `template_file` resource no longer accepts a direct file path for the - `template` attribute. You may either specify a path wrapped in a `file` - function or specify a file path with the `filepath` attribute. This was - deprecated during 0.7.x. - -FEATURES: - - * **New command:** `terraform console`, an interactive console for experimenting - with and using interpolations. ([#10093](https://github.com/hashicorp/terraform/issues/10093)) - * **Terraform version requirement in configuration.** You can now specify - a Terraform version requirement in configuration and modules. ([#10080](https://github.com/hashicorp/terraform/issues/10080)) - * **Conditional values:** You can now use conditionals to determine the values - of attributes. For example: `count = "${var.env == "prod" ? 1 : 0}"`. - * **`depends_on` can reference modules.** This allows a resource or output - to depend on everything within a module. ([#10076](https://github.com/hashicorp/terraform/issues/10076)) - * **`output` supports `depends_on`.** This is useful when the output depends - on a certain ordering to happen that can't be represented with interpolations. - ([#10072](https://github.com/hashicorp/terraform/issues/10072)) - * Providers and resources are now notified by Terraform core to "stop" when - an interrupt is received, allowing resources to gracefully exit much, much - faster. ([#9607](https://github.com/hashicorp/terraform/issues/9607)) - * The `import` command can now specify a provider alias to use. ([#10310](https://github.com/hashicorp/terraform/issues/10310)) - * The `import` command will now read provider configuration from Terraform - configuration files (including loading tfvars files and so on). - ([#9809](https://github.com/hashicorp/terraform/issues/9809)) - - * **New Provider:** `external` ([#8768](https://github.com/hashicorp/terraform/issues/8768)) - * **New Provider:** `nomad` ([#9538](https://github.com/hashicorp/terraform/issues/9538)) - * **New Provider:** `rancher` ([#9173](https://github.com/hashicorp/terraform/issues/9173)) - * **New Provider:** `vault` ([#9158](https://github.com/hashicorp/terraform/issues/9158)) - * **New Provider:** `Icinga2` ([#8306](https://github.com/hashicorp/terraform/issues/8306)) - * **New Resource:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Resource:** `aws_lightsail_domain` ([#10637](https://github.com/hashicorp/terraform/issues/10637)) - * **New Resource:** `aws_lightsail_key_pair` ([#10583](https://github.com/hashicorp/terraform/issues/10583)) - * **New Resource:** `aws_lightsail_instance` ([#10473](https://github.com/hashicorp/terraform/issues/10473)) - * **New Resource:** `aws_opsworks_rds_db_instance` ([#10294](https://github.com/hashicorp/terraform/issues/10294)) - * **New Resource:** `aws_snapshot_create_volume_permission` ([#9891](https://github.com/hashicorp/terraform/issues/9891)) - * **New Resource:** `aws_vpc_endpoint_route_table_association` ([#10137](https://github.com/hashicorp/terraform/issues/10137)) - * **New Resource:** `google_compute_health_check` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `google_compute_region_backend_service` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `openstack_blockstorage_volume_attach_v2` ([#10259](https://github.com/hashicorp/terraform/issues/10259)) - * **New Resource:** `openstack_compute_volume_attach_v2` ([#10260](https://github.com/hashicorp/terraform/issues/10260)) - * **New Data Source:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Data Source:** `aws_eip` ([#9833](https://github.com/hashicorp/terraform/issues/9833)) - * **New Data Source:** `aws_iam_server_certificate` ([#10558](https://github.com/hashicorp/terraform/issues/10558)) - * **New Data Source:** `aws_route_table` ([#10301](https://github.com/hashicorp/terraform/issues/10301)) - * **New Data Source:** `aws_route53_zone` ([#9766](https://github.com/hashicorp/terraform/issues/9766)) - * **New Data Source:** `aws_vpc_endpoint_services` ([#10261](https://github.com/hashicorp/terraform/issues/10261)) - * **New Data Source:** `pagerduty_user` ([#10541](https://github.com/hashicorp/terraform/issues/10541)) - * **New Interpolation Function:** `timestamp` ([#10475](https://github.com/hashicorp/terraform/issues/10475)) - * core: allow outputs to have descriptions ([#9722](https://github.com/hashicorp/terraform/issues/9722)) - * state/azure: support passing of lease ID when writing storage blob ([#10115](https://github.com/hashicorp/terraform/issues/10115)) - -IMPROVEMENTS: - - * core: Human-friendly error when a computed count is used. ([#10060](https://github.com/hashicorp/terraform/issues/10060)) - * core: Maps across multiple input sources (files, CLI, env vars) are merged. ([#10654](https://github.com/hashicorp/terraform/issues/10654)) - * core: SIGTERM also triggers graceful shutdown in addition to SIGINT ([#10534](https://github.com/hashicorp/terraform/issues/10534)) - * core: Plan will show deposed-only destroys for create-before-destroy resources. ([#10404](https://github.com/hashicorp/terraform/issues/10404)) - * command/plan: Show warning when a plan file is given as input to make behavior clear. ([#10639](https://github.com/hashicorp/terraform/issues/10639)) - * helper/schema: only map, list, and set elements that are actually causing - a resource to destroy/create are marked as "requires new". ([#9613](https://github.com/hashicorp/terraform/issues/9613)) - * provider/aws: Add support for AWS CA Central 1 Region ([#10618](https://github.com/hashicorp/terraform/issues/10618)) - * provider/aws: Allow importing of aws_iam_role, aws_iam_role_policy and aws_iam_policy ([#9398](https://github.com/hashicorp/terraform/issues/9398)) - * provider/aws: Added s3 bucket region attribute management ([#10482](https://github.com/hashicorp/terraform/issues/10482)) - * provider/aws: Added SQS FIFO queues ([#10614](https://github.com/hashicorp/terraform/issues/10614)) - * provider/aws: Addition of suspended_processes to aws_autoscaling_group ([#10096](https://github.com/hashicorp/terraform/issues/10096)) - * provider/aws: added auto_minor_version_upgrade on aws_rds_cluster_insstance ([#10284](https://github.com/hashicorp/terraform/issues/10284)) - * provider/aws: Add JSON validation to the aws_iam_policy resource ([#10239](https://github.com/hashicorp/terraform/issues/10239)) - * provider/aws: Support MFA delete for s3 bucket versioning ([#10020](https://github.com/hashicorp/terraform/issues/10020)) - * provider/aws: Enable DeleteOnTermination in ENI when created by spot fleet ([#9922](https://github.com/hashicorp/terraform/issues/9922)) - * provider/aws: Enforced kms_key_* attributes to be ARNs ([#10356](https://github.com/hashicorp/terraform/issues/10356)) - * provider/aws: IPv6 Support To Cloudfront ([#10332](https://github.com/hashicorp/terraform/issues/10332)) - * provider/aws: Support import of aws_iam_instance_profile ([#10436](https://github.com/hashicorp/terraform/issues/10436)) - * provider/aws: Increase `aws_emr_cluster` timeout ([#10444](https://github.com/hashicorp/terraform/issues/10444)) - * provider/aws: Support Automatic Rollback of CodeDeploy deployments and CloudWatch Alarms for a Deployment Group ([#9039](https://github.com/hashicorp/terraform/issues/9039)) - * provider/aws: Add support for termination protection and autotermination to EMR ([#10252](https://github.com/hashicorp/terraform/issues/10252)) - * provider/aws: Add "no_device" support to ephemeral block devices ([#10547](https://github.com/hashicorp/terraform/issues/10547)) - * provider/aws: Added S3 Bucket replication ([#10552](https://github.com/hashicorp/terraform/issues/10552)) - * provider/aws: Add `pgp_key` to `aws_iam_access_key` to protect key. ([#10615](https://github.com/hashicorp/terraform/issues/10615)) - * provider/azurerm: make DiskSizeGB optional for azurerm_virtual_machine data_disks ([#10232](https://github.com/hashicorp/terraform/issues/10232)) - * provider/azurerm support `license_type` virtual_machine property ([#10539](https://github.com/hashicorp/terraform/issues/10539)) - * provider/azurerm: support import of routes, fix route_table ([#10389](https://github.com/hashicorp/terraform/issues/10389)) - * provider/azurerm: enable import of more resources ([#10195](https://github.com/hashicorp/terraform/issues/10195)) - * provider/azurerm: create common schema for location field, add diff suppress ([#10409](https://github.com/hashicorp/terraform/issues/10409)) - * provider/chef: Migrate Chef to use KEY_MATERIAL rather than using a Pem file ([#10105](https://github.com/hashicorp/terraform/issues/10105)) - * provider/cloudstack: Add option to set a custom `network_domain` for `cloudstack_network` ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_static_nat` resource ([#10420](https://github.com/hashicorp/terraform/issues/10420)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_port_forward` resource ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/datadog: Make monitor thresholds optional. ([#10526](https://github.com/hashicorp/terraform/issues/10526)) - * provider/datadog: Improve datadog timeboard support ([#10027](https://github.com/hashicorp/terraform/issues/10027)) - * provider/docker: Upload files into container before first start ([#9520](https://github.com/hashicorp/terraform/issues/9520)) - * provider/docker: authentication via values instead of files ([#10151](https://github.com/hashicorp/terraform/issues/10151)) - * provider/fastly add origin shielding ([#10677](https://github.com/hashicorp/terraform/issues/10677)) - * provider/fastly: add ssl_hostname option ([#9629](https://github.com/hashicorp/terraform/issues/9629)) - * provider/github: supports importing resources ([#10382](https://github.com/hashicorp/terraform/issues/10382)) - * provider/google: Add support for Internal Load Balancing ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * provider/google: Add Service Accounts resource ([#9946](https://github.com/hashicorp/terraform/issues/9946)) - * provider/google: Instances and templates now both support `metadata_startup_script` and `metadata.startup-script`. ([#10537](https://github.com/hashicorp/terraform/issues/10537)) - * provider/google: Added support for session affinity to compute_backend_service ([#10387](https://github.com/hashicorp/terraform/issues/10387)) - * provider/google: Projects are now importable ([#10469](https://github.com/hashicorp/terraform/issues/10469)) - * provider/google: SSL certificates can now specify prefix instead of a full name ([#10684](https://github.com/hashicorp/terraform/issues/10684)) - * provider/openstack: Add Swauth/Swift Authentication ([#9943](https://github.com/hashicorp/terraform/issues/9943)) - * provider/openstack: Detect Region for Importing Resources ([#10509](https://github.com/hashicorp/terraform/issues/10509)) - * provider/postgresql: Improved support for many PostgreSQL resources ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - * provider/postgresql: Added 'connect_timeout' argument to provider 'postgresql' ([#10380](https://github.com/hashicorp/terraform/issues/10380)) - * provider/rundeck: enable validation for multiple values in an array ([#8913](https://github.com/hashicorp/terraform/issues/8913)) - * provider/rundeck: Add support for scheduler to rundeck_job ([#9449](https://github.com/hashicorp/terraform/issues/9449)) - * state/remote/swift: Add support for versioning state file in swift and expiring versioned state ([#10055](https://github.com/hashicorp/terraform/issues/10055)) - -BUG FIXES: - - * core: Escape sequences in interpolations work in every case. ([#8709](https://github.com/hashicorp/terraform/issues/8709)) - * core: Maps in outputs with computed values are no longer removed. ([#9549](https://github.com/hashicorp/terraform/issues/9549)) - * core: Direct indexing into a computed list no longer errors. ([#10657](https://github.com/hashicorp/terraform/issues/10657)) - * core: Validate fails on invalid keys in `variable` blocks. ([#10658](https://github.com/hashicorp/terraform/issues/10658)) - * core: Validate that only a single `lifecycle` block exists per rource. ([#10656](https://github.com/hashicorp/terraform/issues/10656)) - * core: When destroying, the resources of a provider that depends on another resource are destroyed first. ([#10659](https://github.com/hashicorp/terraform/issues/10659)) - * core: Catch parse errors for null characters mid-file ([#9134](https://github.com/hashicorp/terraform/issues/9134)) - * core: Remove extra dot from state command backup files ([#10300](https://github.com/hashicorp/terraform/issues/10300)) - * core: Validate data sources do not have provisioners ([#10318](https://github.com/hashicorp/terraform/issues/10318)) - * core: Disable checkpoint settings take effect ([#10206](https://github.com/hashicorp/terraform/issues/10206)) - * core: Changed attribute console output shows up on Windows. ([#10417](https://github.com/hashicorp/terraform/issues/10417)) - * core: Destroying deposed resources in create before destroy waits until the creation step of its specific index. (0.8 regression) ([#10416](https://github.com/hashicorp/terraform/issues/10416)) - * core: Certain invalid configurations will no longer print "illegal". ([#10448](https://github.com/hashicorp/terraform/issues/10448)) - * core: Fix a crash that could occur when multiple deposed instances exist. ([#10504](https://github.com/hashicorp/terraform/issues/10504)) - * core: Fix a diff mismatch error that could happen when a resource depends on a count resource being decreased. ([#10522](https://github.com/hashicorp/terraform/issues/10522)) - * core: On Unix machines if `getent` is not available, fall back to shell to find home dir. ([#10515](https://github.com/hashicorp/terraform/issues/10515)) - * command/fmt: Multiline comments aren't indented every fmt. ([#6524](https://github.com/hashicorp/terraform/issues/6524)) - * communicator/ssh: Avoid race that could cause parallel remote execs on the same host to overwrite each other ([#10549](https://github.com/hashicorp/terraform/issues/10549)) - * provider/aws: Added Lambda function guard when needed attributes are not set ([#10663](https://github.com/hashicorp/terraform/issues/10663)) - * provider/aws: Allow import of aws_security_groups with more than one source_security_group_id rule ([#9477](https://github.com/hashicorp/terraform/issues/9477)) - * provider/aws: Allow setting the DB Instance name when restoring from a snapshot ([#10664](https://github.com/hashicorp/terraform/issues/10664)) - * provider/aws: Fix issue importing `aws_vpc_peering_connection` ([#10635](https://github.com/hashicorp/terraform/issues/10635)) - * provider/aws: Fixed deletion of aws_api_gateway_base_path_mapping with empty path ([#10177](https://github.com/hashicorp/terraform/issues/10177)) - * provider/aws: Fix issue removing Lambda environment variables ([#10492](https://github.com/hashicorp/terraform/issues/10492)) - * provider/aws: Skip VPC endpoint routes when removing default route table's routes ([#10303](https://github.com/hashicorp/terraform/issues/10303)) - * provider/aws: Do not return a root device for instance store backed AMIs. ([#9483](https://github.com/hashicorp/terraform/issues/9483)) - * provider/aws: resource_aws_opsworks_application does not accept document_root parameter ([#10477](https://github.com/hashicorp/terraform/issues/10477)) - * provider/aws: bug fix when specifying level on aws_opsworks_permission ([#10394](https://github.com/hashicorp/terraform/issues/10394)) - * provider/aws: cloudfront distribution 404 should mark as gone ([#10281](https://github.com/hashicorp/terraform/issues/10281)) - * provider/aws: Assign correct number of core instances (n-1) to aws-emr-cluster on update ([#10529](https://github.com/hashicorp/terraform/issues/10529)) - * provider/aws: Allow update of Service role on a CodeDeploy deployment group ([#9866](https://github.com/hashicorp/terraform/issues/9866)) - * provider/aws: fixed the api_gw_domain_name replace operation ([#10179](https://github.com/hashicorp/terraform/issues/10179)) - * provider/aws: Forces the API GW domain name certificates to recreate the resource ([#10588](https://github.com/hashicorp/terraform/issues/10588)) - * provider/aws: Validate `effect` in aws_iam_policy_document data source ([#10021](https://github.com/hashicorp/terraform/issues/10021)) - * provider/azurerm: fix virtual_machine reading plan as the wrong type ([#10626](https://github.com/hashicorp/terraform/issues/10626)) - * provider/azurerm: Prevent null reference when reading boot_diagnostics settings in azurerm_virtual_machine ([#10283](https://github.com/hashicorp/terraform/issues/10283)) - * provider/azurerm: azurerm_availability_set not is ForceNew for UpdateDomain and FaultDomain ([#10545](https://github.com/hashicorp/terraform/issues/10545)) - * provider/azurerm: fix servicebus_topic max_size_in_megabytes for premium namespaces ([#10611](https://github.com/hashicorp/terraform/issues/10611)) - * provider/azurerm: set ForceNew for storage image and OS disk of virtual_machine ([#10340](https://github.com/hashicorp/terraform/issues/10340)) - * provider/datadog: Refactor monitor tags to a list instead of a map. ([#10570](https://github.com/hashicorp/terraform/issues/10570)) - * provider/datadog 9869: Validate credentials when initialising client. ([#10567](https://github.com/hashicorp/terraform/issues/10567)) - * provider/openstack: More Import and Region Fixes ([#10662](https://github.com/hashicorp/terraform/issues/10662)) - * provider/openstack: Fix Ordering of Port Allowed Address Pairs ([#10250](https://github.com/hashicorp/terraform/issues/10250)) - * provider/template: No file path error when setting template to `/` ([#10297](https://github.com/hashicorp/terraform/issues/10297)) - -## 0.8.0 from 0.8.0-rc3 (December 13, 2016) - -**This only includes changes from 0.8.0-rc3 to 0.8.0 final. The section above -has the complete 0.7.x to 0.8.0 CHANGELOG.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/postgres: `ssl_mode` has been renamed `sslmode` to match common usage ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - -FEATURES: - - * **New Provider:** `Icinga2` ([#8306](https://github.com/hashicorp/terraform/issues/8306)) - * **New Resource:** `aws_lightsail_domain` ([#10637](https://github.com/hashicorp/terraform/issues/10637)) - * **New Resource:** `aws_lightsail_key_pair` ([#10583](https://github.com/hashicorp/terraform/issues/10583)) - * **New Resource:** `aws_snapshot_create_volume_permission` ([#9891](https://github.com/hashicorp/terraform/issues/9891)) - * **New Resource:** `google_compute_health_check` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Resource:** `google_compute_region_backend_service` ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * **New Data Source:** `aws_eip` ([#9833](https://github.com/hashicorp/terraform/issues/9833)) - * **New Data Source:** `aws_route53_zone` ([#9766](https://github.com/hashicorp/terraform/issues/9766)) - * **New Data Source:** `aws_vpc_endpoint_services` ([#10261](https://github.com/hashicorp/terraform/issues/10261)) - -IMPROVEMENTS: - - * command/plan: Show warning when a plan file is given as input to make behavior clear. ([#10639](https://github.com/hashicorp/terraform/issues/10639)) - * core: Maps across multiple input sources (files, CLI, env vars) are merged. ([#10654](https://github.com/hashicorp/terraform/issues/10654)) - * provider/aws: Add support for AWS CA Central 1 Region ([#10618](https://github.com/hashicorp/terraform/issues/10618)) - * provider/aws: Added SQS FIFO queues ([#10614](https://github.com/hashicorp/terraform/issues/10614)) - * provider/aws: Support MFA delete for s3 bucket versioning ([#10020](https://github.com/hashicorp/terraform/issues/10020)) - * provider/aws: Enable DeleteOnTermination in ENI when created by spot fleet ([#9922](https://github.com/hashicorp/terraform/issues/9922)) - * provider/cloudstack: Add option to set a custom `network_domain` for `cloudstack_network` ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_port_forward` resource ([#10638](https://github.com/hashicorp/terraform/issues/10638)) - * provider/fastly add origin shielding ([#10677](https://github.com/hashicorp/terraform/issues/10677)) - * provider/google: Add support for Internal Load Balancing ([#10453](https://github.com/hashicorp/terraform/issues/10453)) - * provider/google: SSL certificates can now specify prefix instead of a full name ([#10684](https://github.com/hashicorp/terraform/issues/10684)) - * provider/postgresql: Improved support for many PostgreSQL resources ([#10682](https://github.com/hashicorp/terraform/issues/10682)) - * provider/rundeck: enable validation for multiple values in an array ([#8913](https://github.com/hashicorp/terraform/issues/8913)) - * provider/rundeck: Add support for scheduler to rundeck_job ([#9449](https://github.com/hashicorp/terraform/issues/9449)) - -BUG FIXES: - - * core: Direct indexing into a computed list no longer errors. ([#10657](https://github.com/hashicorp/terraform/issues/10657)) - * core: Validate fails on invalid keys in `variable` blocks. ([#10658](https://github.com/hashicorp/terraform/issues/10658)) - * core: Validate that only a single `lifecycle` block exists per rource. ([#10656](https://github.com/hashicorp/terraform/issues/10656)) - * core: When destroying, the resources of a provider that depends on another resource are destroyed first. ([#10659](https://github.com/hashicorp/terraform/issues/10659)) - * provider/aws: Added Lambda function guard when needed attributes are not set ([#10663](https://github.com/hashicorp/terraform/issues/10663)) - * provider/aws: Allow import of aws_security_groups with more than one source_security_group_id rule ([#9477](https://github.com/hashicorp/terraform/issues/9477)) - * provider/aws: Allow setting the DB Instance name when restoring from a snapshot ([#10664](https://github.com/hashicorp/terraform/issues/10664)) - * provider/aws: Fix issue importing `aws_vpc_peering_connection` ([#10635](https://github.com/hashicorp/terraform/issues/10635)) - * provider/aws: Fixed deletion of aws_api_gateway_base_path_mapping with empty path ([#10177](https://github.com/hashicorp/terraform/issues/10177)) - * provider/aws: Fix issue removing Lambda environment variables ([#10492](https://github.com/hashicorp/terraform/issues/10492)) - * provider/azurerm: fix virtual_machine reading plan as the wrong type ([#10626](https://github.com/hashicorp/terraform/issues/10626)) - * provider/azurerm: set ForceNew for storage image and OS disk of virtual_machine ([#10340](https://github.com/hashicorp/terraform/issues/10340)) - * provider/openstack: More Import and Region Fixes ([#10662](https://github.com/hashicorp/terraform/issues/10662)) - -## 0.8.0-rc3 (December 8, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Variable, resource, provider, and module names may no longer start with - a number or hyphen. Please see the upgrade guide for more information. - -FEATURES: - - * **New Provider:** `external` ([#8768](https://github.com/hashicorp/terraform/issues/8768)) - * **New Provider:** `Rancher` ([#9173](https://github.com/hashicorp/terraform/issues/9173)) - * **New Data Source:** `aws_iam_server_certificate` ([#10558](https://github.com/hashicorp/terraform/issues/10558)) - * **New Data Source:** `pagerduty_user` ([#10541](https://github.com/hashicorp/terraform/issues/10541)) - * **New Resource:** `aws_opsworks_rds_db_instance` ([#10294](https://github.com/hashicorp/terraform/issues/10294)) - * **New Resource:** `aws_vpc_endpoint_route_table_association` ([#10137](https://github.com/hashicorp/terraform/issues/10137)) -  * **New Resource:**  `aws_lightsail_instance` ([#10473](https://github.com/hashicorp/terraform/issues/10473)) -IMPROVEMENTS: - - * core: SIGTERM also triggers graceful shutdown in addition to SIGINT ([#10534](https://github.com/hashicorp/terraform/issues/10534)) - * provider/aws: Add support for termination protection and autotermination to EMR ([#10252](https://github.com/hashicorp/terraform/issues/10252)) - * provider/aws: Add "no_device" support to ephemeral block devices ([#10547](https://github.com/hashicorp/terraform/issues/10547)) - * provider/aws: Added S3 Bucket replication ([#10552](https://github.com/hashicorp/terraform/issues/10552)) - * provider/aws: Add `pgp_key` to `aws_iam_access_key` to protect key. ([#10615](https://github.com/hashicorp/terraform/issues/10615)) - * provider/azurerm: make DiskSizeGB optional for azurerm_virtual_machine data_disks ([#10232](https://github.com/hashicorp/terraform/issues/10232)) - * provider/azurerm support `license_type` virtual_machine property ([#10539](https://github.com/hashicorp/terraform/issues/10539)) - * provider/datadog: Make monitor thresholds optional. ([#10526](https://github.com/hashicorp/terraform/issues/10526)) - * provider/datadog: Improve datadog timeboard support ([#10027](https://github.com/hashicorp/terraform/issues/10027)) - * provider/docker: Upload files into container before first start ([#9520](https://github.com/hashicorp/terraform/issues/9520)) - * provider/fastly: add ssl_hostname option ([#9629](https://github.com/hashicorp/terraform/issues/9629)) - * provider/openstack: Detect Region for Importing Resources ([#10509](https://github.com/hashicorp/terraform/issues/10509)) - * provider/google: Instances and templates now both support `metadata_startup_script` and `metadata.startup-script`. ([#10537](https://github.com/hashicorp/terraform/issues/10537)) - -BUG FIXES: - - * core: Fix a diff mismatch error that could happen when a resource depends on a count resource being decreased. ([#10522](https://github.com/hashicorp/terraform/issues/10522)) - * core: On Unix machines if `getent` is not available, fall back to shell to find home dir. ([#10515](https://github.com/hashicorp/terraform/issues/10515)) - * communicator/ssh: Avoid race that could cause parallel remote execs on the same host to overwrite each other ([#10549](https://github.com/hashicorp/terraform/issues/10549)) - * provider/aws: cloudfront distribution 404 should mark as gone ([#10281](https://github.com/hashicorp/terraform/issues/10281)) - * provider/aws: Assign correct number of core instances (n-1) to aws-emr-cluster on update ([#10529](https://github.com/hashicorp/terraform/issues/10529)) - * provider/aws: Allow update of Service role on a CodeDeploy deployment group ([#9866](https://github.com/hashicorp/terraform/issues/9866)) - * provider/aws: fixed the api_gw_domain_name replace operation ([#10179](https://github.com/hashicorp/terraform/issues/10179)) - * provider/aws: Forces the API GW domain name certificates to recreate the resource ([#10588](https://github.com/hashicorp/terraform/issues/10588)) - * provider/aws: Validate `effect` in aws_iam_policy_document data source ([#10021](https://github.com/hashicorp/terraform/issues/10021)) - * provider/azurem: azurerm_availability_set not is ForceNew for UpdateDomain and FaultDomain ([#10545](https://github.com/hashicorp/terraform/issues/10545)) - * provider/azurerm: fix servicebus_topic max_size_in_megabytes for premium namespaces ([#10611](https://github.com/hashicorp/terraform/issues/10611)) - * provider/datadog: Refactor monitor tags to a list instead of a map. ([#10570](https://github.com/hashicorp/terraform/issues/10570)) - * provider/datadog 9869: Validate credentials when initialising client. ([#10567](https://github.com/hashicorp/terraform/issues/10567)) - * provider/openstack: Fix Ordering of Port Allowed Address Pairs ([#10250](https://github.com/hashicorp/terraform/issues/10250)) - -## 0.8.0-rc2 (December 2, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Strings in configuration can no longer contain unescaped newlines. For unescaped newlines, heredocs must be used - * provider/aws: Anywhere where we can specify kms_key_id must now be a valid KMS Key ID ARN to stop continual diffs - -FEATURES: - - * **New DataSource:** `aws_route_table` ([#10301](https://github.com/hashicorp/terraform/issues/10301)) - * **New Interpolation Function:** `timestamp` ([#10475](https://github.com/hashicorp/terraform/issues/10475)) - -IMPROVEMENTS: - - * core: Plan will show deposed-only destroys for create-before-destroy resources. ([#10404](https://github.com/hashicorp/terraform/issues/10404)) - * provider/aws: Enforced kms_key_* attributes to be ARNs ([#10356](https://github.com/hashicorp/terraform/issues/10356)) - * provider/aws: IPv6 Support To Cloudfront ([#10332](https://github.com/hashicorp/terraform/issues/10332)) - * provider/aws: Support import of aws_iam_instance_profile ([#10436](https://github.com/hashicorp/terraform/issues/10436)) - * provider/aws: Increase `aws_emr_cluster` timeout ([#10444](https://github.com/hashicorp/terraform/issues/10444)) - * provider/aws: Support Automatic Rollback of CodeDeploy deployments and CloudWatch Alarms for a Deployment Group ([#9039](https://github.com/hashicorp/terraform/issues/9039)) - * provider/aws: Allow importing of aws_iam_role, aws_iam_role_policy and aws_iam_policy ([#9398](https://github.com/hashicorp/terraform/issues/9398)) - * provider/aws: Added s3 bucket region attribute management ([#10482](https://github.com/hashicorp/terraform/issues/10482)) - * provider/azurerm: support import of routes, fix route_table ([#10389](https://github.com/hashicorp/terraform/issues/10389)) - * provider/azurerm: create common schema for location field, add diff suppress ([#10409](https://github.com/hashicorp/terraform/issues/10409)) - * provider/github: supports importing resources ([#10382](https://github.com/hashicorp/terraform/issues/10382)) - * provider/postgresql: Added 'connect_timeout' argument to provider 'postgresql' ([#10380](https://github.com/hashicorp/terraform/issues/10380)) - * provider/cloudstack: Support using secondary IP addresses with the `cloudstack_static_nat` resource ([#10420](https://github.com/hashicorp/terraform/issues/10420)) - * provider/google: Added support for session affinity to compute_backend_service ([#10387](https://github.com/hashicorp/terraform/issues/10387)) - * provider/google: Projects are now importable ([#10469](https://github.com/hashicorp/terraform/issues/10469)) - -BUG FIXES: - - * core: Changed attribute console output shows up on Windows. ([#10417](https://github.com/hashicorp/terraform/issues/10417)) - * core: Destroying deposed resources in create before destroy waits until the creation step of its specific index. (0.8 regression) ([#10416](https://github.com/hashicorp/terraform/issues/10416)) - * core: Certain invalid configurations will no longer print "illegal". ([#10448](https://github.com/hashicorp/terraform/issues/10448)) - * core: Fix a crash that could occur when multiple deposed instances exist. ([#10504](https://github.com/hashicorp/terraform/issues/10504)) - * command/console: variable access works ([#10446](https://github.com/hashicorp/terraform/issues/10446)) - * provider/aws: Do not return a root device for instance store backed AMIs. ([#9483](https://github.com/hashicorp/terraform/issues/9483)) - * provider/aws: resource_aws_opsworks_application does not accept document_root parameter ([#10477](https://github.com/hashicorp/terraform/issues/10477)) - * provider/aws: bug fix when specifying level on aws_opsworks_permission ([#10394](https://github.com/hashicorp/terraform/issues/10394)) - -## 0.8.0-rc1 (November 23, 2016) - -BASED ON: 0.7.13 (includes any changes up to that point as well) - -**Please read prior beta notes, as those are also included. The 0.8 changes -will be coalesced for a 0.8 final, but will remain separate for the pre-release -period.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The chef provider now accepts `key_material` as an alternative to `private_key_pem`. The `private_key_pem` attribute will be deprecated in a future release - * The `template_file` resource no longer accepts a direct file path for the `template` attribute. You may either specify a path wrapped in a `file` function or specify a file path with the `filepath` attribute. This was deprecated during 0.7.x. - -FEATURES: - * core: allow outputs to have descriptions ([#9722](https://github.com/hashicorp/terraform/issues/9722)) - * state/azure: support passing of lease ID when writing storage blob ([#10115](https://github.com/hashicorp/terraform/issues/10115)) - * **New Resource:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * **New Resource:** `openstack_blockstorage_volume_attach_v2` ([#10259](https://github.com/hashicorp/terraform/issues/10259)) - * **New Resource:** `openstack_compute_volume_attach_v2` ([#10260](https://github.com/hashicorp/terraform/issues/10260)) - * **New Data Source:** `aws_ebs_snapshot` ([#10017](https://github.com/hashicorp/terraform/issues/10017)) - * The `import` command can now specify a provider alias to use. ([#10310](https://github.com/hashicorp/terraform/issues/10310)) - -IMPROVEMENTS: - - * provider/aws: Addition of suspended_processes to aws_autoscaling_group ([#10096](https://github.com/hashicorp/terraform/issues/10096)) - * provider/aws: added auto_minor_version_upgrade on aws_rds_cluster_insstance ([#10284](https://github.com/hashicorp/terraform/issues/10284)) - * provider/aws: Add JSON validation to the aws_iam_policy resource ([#10239](https://github.com/hashicorp/terraform/issues/10239)) - * provider/azurerm: enable import of more resources ([#10195](https://github.com/hashicorp/terraform/issues/10195)) - * provider/chef: Migrate Chef to use KEY_MATERIAL rather than using a Pem file ([#10105](https://github.com/hashicorp/terraform/issues/10105)) - * provider/docker: authentication via values instead of files ([#10151](https://github.com/hashicorp/terraform/issues/10151)) - * provider/google: Add Service Accounts resource ([#9946](https://github.com/hashicorp/terraform/issues/9946)) - * provider/nomad: Update to support Nomad 0.5.0 - * provider/openstack: Add Swauth/Swift Authentication ([#9943](https://github.com/hashicorp/terraform/issues/9943)) - * state/remote/swift: Add support for versioning state file in swift and expiring versioned state ([#10055](https://github.com/hashicorp/terraform/issues/10055)) - -BUG FIXES: - - * core: Catch parse errors for null characters mid-file ([#9134](https://github.com/hashicorp/terraform/issues/9134)) - * core: escape sequence for " works (0.8 beta regression) ([#10236](https://github.com/hashicorp/terraform/issues/10236)) - * core: Terraform starts on Windows (0.8 beta2 regression) ([#10266](https://github.com/hashicorp/terraform/issues/10266)) - * core: Remove extra dot from state command backup files ([#10300](https://github.com/hashicorp/terraform/issues/10300)) - * core: Validate data sources do not have provisioners ([#10318](https://github.com/hashicorp/terraform/issues/10318)) - * core: Disable checkpoint settings take effect ([#10206](https://github.com/hashicorp/terraform/issues/10206)) - * provider/aws: Skip VPC endpoint routes when removing default route table's routes ([#10303](https://github.com/hashicorp/terraform/issues/10303)) - * provider/azurerm: Prevent null reference when reading boot_diagnostics settings in azurerm_virtual_machine ([#10283](https://github.com/hashicorp/terraform/issues/10283)) - * provider/template: No file path error when setting template to `/` ([#10297](https://github.com/hashicorp/terraform/issues/10297)) - -PLUGIN CHANGES: - - * The protocol version has been incremented, requiring all plugins for - 0.8 to be built with 0.8 sources (or newer). This should only require - a simple recompile for compatibility. - -## 0.8.0-beta2 (November 16, 2016) - -BASED ON: 0.7.11 (includes any changes up to that point as well) - -**Please read prior beta notes, as those are also included. The 0.8 changes -will be coalesced for a 0.8 final, but will remain separate for the pre-release -period.** - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Math operators now follow the standard order of operations: *, /, % followed - by +, -. See the updated interpolation docs for more information. You can - continue to force ordering with parentheses. - -FEATURES: - - * **New command:** `terraform console`, an interactive console for experimenting - with and using interpolations. ([#10093](https://github.com/hashicorp/terraform/issues/10093)) - * **Terraform version requirement in configuration.** You can now specify - a Terraform version requirement in configuration and modules. ([#10080](https://github.com/hashicorp/terraform/issues/10080)) - * **`depends_on` can reference modules.** This allows a resource or output - to depend on everything within a module. ([#10076](https://github.com/hashicorp/terraform/issues/10076)) - * **`output` supports `depends_on`.** This is useful when the output depends - on a certain ordering to happen that can't be represented with interpolations. - ([#10072](https://github.com/hashicorp/terraform/issues/10072)) - -## 0.8.0-beta1 (November 11, 2016) - -BASED ON: 0.7.10 (includes any changes up to that point as well) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * `template_file` _inline_ templates must escape their variable usage. What - was previously `${foo}` must now be `$${foo}`. Note that this is only - for _inline_ templates. Templates read from files are unchanged. ([#9698](https://github.com/hashicorp/terraform/issues/9698)) - * Escape sequences used to require double-escaping when used within interpolations. - You now must only escape once (which is the expected/typical behavior). - For example: `${replace(var.foo, "\\", "\\\\")}` is correct. Before, - that would cause very strange behavior. However, this may break existing - configurations which found a level of escape sequences to work. Check - `terraform plan` for incorrect output. - -FEATURES: - - * **New provider:** `nomad` ([#9538](https://github.com/hashicorp/terraform/issues/9538)) - * **New provider:** `vault` ([#9158](https://github.com/hashicorp/terraform/issues/9158)) - * The `import` command will now read provider configuration from Terraform - configuration files (including loading tfvars files and so on). ([#9809](https://github.com/hashicorp/terraform/issues/9809)) - * Providers and resources are now notified by Terraform core to "stop" when - an interrupt is received, allowing resources to gracefully exit much, much - faster. ([#9607](https://github.com/hashicorp/terraform/issues/9607)) - -IMPROVEMENTS: - - * core: Human-friendly error when a computed count is used. ([#10060](https://github.com/hashicorp/terraform/issues/10060)) - * helper/schema: only map, list, and set elements that are actually causing - a resource to destroy/create are marked as "requires new". ([#9613](https://github.com/hashicorp/terraform/issues/9613)) - -BUG FIXES: - - * core: Escape sequences in interpolations work in every case. ([#8709](https://github.com/hashicorp/terraform/issues/8709)) - * core: Maps in outputs with computed values are no longer removed. ([#9549](https://github.com/hashicorp/terraform/issues/9549)) - * command/fmt: Multiline comments aren't indented every fmt. ([#6524](https://github.com/hashicorp/terraform/issues/6524)) - -## 0.7.13 (November 23, 2016) - -BUG FIXES: - - * core: New graph records dependencies for explicit self references ([#10319](https://github.com/hashicorp/terraform/issues/10319)) - -## 0.7.12 (November 22, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/cloudstack: `cloudstack_static_nat` has now deprecated `network_id` ([#10204](https://github.com/hashicorp/terraform/issues/10204)) - -FEATURES: - - * *New Data Source:* `aws_alb_listener` ([#10181](https://github.com/hashicorp/terraform/issues/10181)) - * *New Resource:* `github_label` ([#10213](https://github.com/hashicorp/terraform/issues/10213)) - -IMPROVEMENTS: - - * core: Experimental feature failures are less verbose. ([#10276](https://github.com/hashicorp/terraform/issues/10276)) - * provider/aws: Add name_prefix to aws_iam_policy ([#10178](https://github.com/hashicorp/terraform/issues/10178)) - * provider/aws: Add ability to select aws_prefix_list data source by name ([#10248](https://github.com/hashicorp/terraform/issues/10248)) - * provider/aws Return service CIDR blocks from aws_vpc_endpoint resource ([#10254](https://github.com/hashicorp/terraform/issues/10254)) - * provider/aws: Added `environment` configuration for AWS Lambda Functions ([#10275](https://github.com/hashicorp/terraform/issues/10275)) - -BUG FIXES: - - * core: Fix potential crashing race condition on state write ([#10277](https://github.com/hashicorp/terraform/issues/10277)) - * core: Data sources in modules lose their `data.` prefix when moved within the state ([#9996](https://github.com/hashicorp/terraform/issues/9996)) - * provider/aws: Fixed issue with `enable_dns_support` on creation in `aws_vpc` ([#10171](https://github.com/hashicorp/terraform/issues/10171)) - * provider/aws: Add CertificateNotFound retry waiter to aws_alb_listener ([#10180](https://github.com/hashicorp/terraform/issues/10180)) - * provider/aws: Remove IAM user's MFA devices with `force_destroy` ([#10262](https://github.com/hashicorp/terraform/issues/10262)) - * provider/scaleway: improve volume attachment ([#10084](https://github.com/hashicorp/terraform/issues/10084)) - -## 0.7.11 (November 15, 2016) - -FEATURES: - -IMPROVEMENTS: - - * provider/aws: Expose RDS DB Instance HostedZoneId attribute ([#10000](https://github.com/hashicorp/terraform/issues/10000)) - * provider/aws: Ignore AWS internal tags ([#7454](https://github.com/hashicorp/terraform/issues/7454)) - * provider/aws: Exposed aws_iam_role create_date attribute ([#10091](https://github.com/hashicorp/terraform/issues/10091)) - * provider/aws: Added aws_api_gateway_api_key created_date & last_updated_date attributes ([#9530](https://github.com/hashicorp/terraform/issues/9530)) - * provider/aws: Added aws_api_gateway_rest_api created_date attribute ([#9532](https://github.com/hashicorp/terraform/issues/9532)) - * provider/aws: Exposed aws_api_gateway_deployment.created_date attribute ([#9534](https://github.com/hashicorp/terraform/issues/9534)) - * provider/aws: Added `retry_duration` to `redshift_configuration` in `kinesis_firehose_delivery_stream` ([#10113](https://github.com/hashicorp/terraform/issues/10113)) - * provider/azurerm: allow updating load balancer sub-resources ([#10016](https://github.com/hashicorp/terraform/issues/10016)) - * provider/openstack: Instance `user_data` will now detect if input is already Base64-encode ([#9966](https://github.com/hashicorp/terraform/issues/9966)) - -BUG FIXES: - - * core: Fix diff mismatch error on "Destroy: true to false" scenarios. ([#10139](https://github.com/hashicorp/terraform/issues/10139)) - * core: New destroy graph `-target` includes dependencies. ([#10036](https://github.com/hashicorp/terraform/issues/10036)) - * core: New destroy graph creates proper edges through module outputs ([#10068](https://github.com/hashicorp/terraform/issues/10068)) - * core: Fix shadow error when using uuid() ([#10106](https://github.com/hashicorp/terraform/issues/10106)) - * core: Fix an issue where applies with data sources could hang ([#10134](https://github.com/hashicorp/terraform/issues/10134)) - * core: Fix plan operation diff mismatch for computed keys in slices ([#10118](https://github.com/hashicorp/terraform/issues/10118)) - * provider/aws: fix the validation of aws_redshift_cluster database_name ([#10019](https://github.com/hashicorp/terraform/issues/10019)) - * provider/aws: Fix panic in aws_acm_certificate datasource ([#10051](https://github.com/hashicorp/terraform/issues/10051)) - * provider/aws: increase aws_lambda_function timeout ([#10116](https://github.com/hashicorp/terraform/issues/10116)) - * provider/aws: Fixed ES buffering_interval option in `kinesis_firehose_delivery_stream` ([#10112](https://github.com/hashicorp/terraform/issues/10112)) - -## 0.7.10 (November 9, 2016) - -FEATURES: - - * **New Resource:** `azurerm_eventhub` ([#9889](https://github.com/hashicorp/terraform/issues/9889)) - * **New Resource:** `azurerm_virtual_machine_extension` ([#9962](https://github.com/hashicorp/terraform/issues/9962)) - * **Experimental new plan graph:** `terraform plan` is getting a new graph - creation process for 0.8. This is now available behind a flag `-Xnew-apply` - (on any command). This will become the default in 0.8. There may still be - bugs. ([#9973](https://github.com/hashicorp/terraform/issues/9973)) - -IMPROVEMENTS: - - * provider/aws: Add support for Service Access Security Group in `aws_emr_cluster` ([#9600](https://github.com/hashicorp/terraform/issues/9600)) - * provider/aws: Add Enhanced VPC routing to Redshift ([#9950](https://github.com/hashicorp/terraform/issues/9950)) - * provider/aws: Add key_name_prefix argument to aws_key_pair resource ([#9993](https://github.com/hashicorp/terraform/issues/9993)) - * provider/openstack: Add `value_specs` to `openstack_fw_policy_v1` resource, allowing vendor information ([#9835](https://github.com/hashicorp/terraform/issues/9835)) - * provider/openstack: Add `value_specs` to `openstack_fw_firewall_v1` resource, allowing vendor information ([#9836](https://github.com/hashicorp/terraform/issues/9836)) - * provider/random: The `b64` attribute on `random_id` resources is deprecated, replaced by `b64_url` and `b64_std` ([#9903](https://github.com/hashicorp/terraform/issues/9903)) - -BUG FIXES: - - * core: Splat variables (`foo.*.bar`) are now ordered by count index for deterministic ordering. ([#9883](https://github.com/hashicorp/terraform/issues/9883)) - * core: Prune orphan outputs (in the config but not in the state). ([#9971](https://github.com/hashicorp/terraform/issues/9971)) - * core: New apply graph doesn't prune module variables as aggressively. ([#9898](https://github.com/hashicorp/terraform/issues/9898)) - * core: New apply graph properly configures providers with aliases. ([#9894](https://github.com/hashicorp/terraform/issues/9894)) - * core: New destroy graph doesn't create edge loops to destroy nodes that reference themselves. ([#9968](https://github.com/hashicorp/terraform/issues/9968)) - * provider/aws: Fix crash when adding EBS volumes to spot fleet request. ([#9857](https://github.com/hashicorp/terraform/issues/9857)) - * provider/aws: Ignore NoSuchEntity error when IAM user does not have login profile ([#9900](https://github.com/hashicorp/terraform/issues/9900)) - * provider/aws: Setting static_routes_only on import of vpn_connection ([#9802](https://github.com/hashicorp/terraform/issues/9802)) - * provider/aws: aws_alb_target_group arn_suffix missing the targetgroup ([#9911](https://github.com/hashicorp/terraform/issues/9911)) - * provider/aws: Fix the validateFunc of aws_elasticache_replication_group ([#9918](https://github.com/hashicorp/terraform/issues/9918)) - * provider/aws: removing toLower when setting aws_db_parameter_group options ([#9820](https://github.com/hashicorp/terraform/issues/9820)) - * provider/aws: Fix panic when passing statuses to aws_acm_certificate ([#9990](https://github.com/hashicorp/terraform/issues/9990)) - * provider/aws: AWS IAM, User and Role allow + in the name ([#9991](https://github.com/hashicorp/terraform/issues/9991)) - * provider/scaleway: retry volume attachment ([#9972](https://github.com/hashicorp/terraform/issues/9972)) - * provider/scaleway: fix `scaleway_image` datasource returning unknown images ([#9899](https://github.com/hashicorp/terraform/issues/9899)) - * provider/google: fix crash when mistakenly configuring disks ([#9942](https://github.com/hashicorp/terraform/issues/9942)) - -## 0.7.9 (November 4, 2016) - -FEATURES: - - * **New Data Source:** `aws_acm_certificate` ([#8359](https://github.com/hashicorp/terraform/issues/8359)) - * **New Resource:** `aws_autoscaling_attachment` ([#9146](https://github.com/hashicorp/terraform/issues/9146)) - * **New Resource:** `postgresql_extension` ([#9210](https://github.com/hashicorp/terraform/issues/9210)) - -IMPROVEMENTS: - - * core: Improve shadow graph robustness by catching panics during graph evaluation. ([#9852](https://github.com/hashicorp/terraform/issues/9852)) - * provider/aws: Provide the option to skip_destroy on aws_volume_attachment ([#9792](https://github.com/hashicorp/terraform/issues/9792)) - * provider/aws: Allows aws_alb security_groups to be updated ([#9804](https://github.com/hashicorp/terraform/issues/9804)) - * provider/aws: Add the enable_sni attribute for Route53 health checks. ([#9822](https://github.com/hashicorp/terraform/issues/9822)) - * provider/openstack: Add `value_specs` to openstack_fw_rule_v1 resource, allowing vendor information ([#9834](https://github.com/hashicorp/terraform/issues/9834)) - * state/remote/swift: Enable OpenStack Identity/Keystone v3 authentication ([#9769](https://github.com/hashicorp/terraform/issues/9769)) - * state/remote/swift: Now supports all login/config options that the OpenStack Provider supports ([#9777](https://github.com/hashicorp/terraform/issues/9777)) - -BUG FIXES: - - * core: Provisioners in modules do not crash during `apply` (regression). ([#9846](https://github.com/hashicorp/terraform/issues/9846)) - * core: Computed bool fields with non-bool values will not crash ([#9812](https://github.com/hashicorp/terraform/issues/9812)) - * core: `formatlist` interpolation function accepts an empty list ([#9795](https://github.com/hashicorp/terraform/issues/9795)) - * core: Validate outputs have a name ([#9823](https://github.com/hashicorp/terraform/issues/9823)) - * core: Validate variables have a name ([#9818](https://github.com/hashicorp/terraform/issues/9818)) - * command/apply: If a partial set of required variables are provided with `-var`, ask for the remainder ([#9794](https://github.com/hashicorp/terraform/issues/9794)) - * command/fmt: Multiline strings aren't erroneously indented ([#9859](https://github.com/hashicorp/terraform/issues/9859)) - * provider/aws: Fix issue setting `certificate_upload_date` in `aws_api_gateway_domain_name` ([#9815](https://github.com/hashicorp/terraform/issues/9815)) - * provider/azurerm: allow storage_account resource with name "$root" ([#9813](https://github.com/hashicorp/terraform/issues/9813)) - * provider/google: fix for looking up project image families ([#9243](https://github.com/hashicorp/terraform/issues/9243)) - * provider/openstack: Don't pass `shared` in FWaaS Policy unless it's set ([#9830](https://github.com/hashicorp/terraform/issues/9830)) - * provider/openstack: openstack_fw_firewall_v1 `admin_state_up` should default to true ([#9832](https://github.com/hashicorp/terraform/issues/9832)) - -PLUGIN CHANGES: - - * Fields in resources can now have both `Optional` and `ConflictsWith` ([#9825](https://github.com/hashicorp/terraform/issues/9825)) - -## 0.7.8 (November 1, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/openstack: The OpenStack provider has switched to the new Gophercloud SDK. - No front-facing changes were made, but please be aware that there might be bugs. - Please report any if found. - * `archive_file` is now a data source, instead of a resource ([#8492](https://github.com/hashicorp/terraform/issues/8492)) - -FEATURES: - - * **Experimental new apply graph:** `terraform apply` is getting a new graph - creation process for 0.8. This is now available behind a flag `-Xnew-apply` - (on any command). This will become the default in 0.8. There may still be - bugs. ([#9388](https://github.com/hashicorp/terraform/issues/9388)) - * **Experimental new destroy graph:** `terraform destroy` is also getting - a new graph creation process for 0.8. This is now available behind a flag - `-Xnew-destroy`. This will become the default in 0.8. ([#9527](https://github.com/hashicorp/terraform/issues/9527)) - * **New Provider:** `pagerduty` ([#9022](https://github.com/hashicorp/terraform/issues/9022)) - * **New Resource:** `aws_iam_user_login_profile` ([#9605](https://github.com/hashicorp/terraform/issues/9605)) - * **New Resource:** `aws_waf_ipset` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_rule` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_web_acl` ([#8852](https://github.com/hashicorp/terraform/issues/8852)) - * **New Resource:** `aws_waf_byte_match_set` ([#9681](https://github.com/hashicorp/terraform/issues/9681)) - * **New Resource:** `aws_waf_size_constraint_set` ([#9689](https://github.com/hashicorp/terraform/issues/9689)) - * **New Resource:** `aws_waf_sql_injection_match_set` ([#9709](https://github.com/hashicorp/terraform/issues/9709)) - * **New Resource:** `aws_waf_xss_match_set` ([#9710](https://github.com/hashicorp/terraform/issues/9710)) - * **New Resource:** `aws_ssm_activation` ([#9111](https://github.com/hashicorp/terraform/issues/9111)) - * **New Resource:** `azurerm_key_vault` ([#9478](https://github.com/hashicorp/terraform/issues/9478)) - * **New Resource:** `azurerm_storage_share` ([#8674](https://github.com/hashicorp/terraform/issues/8674)) - * **New Resource:** `azurerm_eventhub_namespace` ([#9297](https://github.com/hashicorp/terraform/issues/9297)) - * **New Resource:** `cloudstack_security_group` ([#9103](https://github.com/hashicorp/terraform/issues/9103)) - * **New Resource:** `cloudstack_security_group_rule` ([#9645](https://github.com/hashicorp/terraform/issues/9645)) - * **New Resource:** `cloudstack_private_gateway` ([#9637](https://github.com/hashicorp/terraform/issues/9637)) - * **New Resource:** `cloudstack_static_route` ([#9637](https://github.com/hashicorp/terraform/issues/9637)) - * **New DataSource:** `aws_ebs_volume` ([#9753](https://github.com/hashicorp/terraform/issues/9753)) - * **New DataSource:** `aws_prefix_list` ([#9566](https://github.com/hashicorp/terraform/issues/9566)) - * **New DataSource:** `aws_security_group` ([#9604](https://github.com/hashicorp/terraform/issues/9604)) - * **New DataSource:** `azurerm_client_config` ([#9478](https://github.com/hashicorp/terraform/issues/9478)) - * **New Interpolation Function:** `ceil` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `floor` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `min` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `max` ([#9692](https://github.com/hashicorp/terraform/issues/9692)) - * **New Interpolation Function:** `title` ([#9087](https://github.com/hashicorp/terraform/issues/9087)) - * **New Interpolation Function:** `zipmap` ([#9627](https://github.com/hashicorp/terraform/issues/9627)) - -IMPROVEMENTS: - - * provider/aws: No longer require `route_table_ids` list in `aws_vpc_endpoint` resources ([#9357](https://github.com/hashicorp/terraform/issues/9357)) - * provider/aws: Allow `description` in `aws_redshift_subnet_group` to be modified ([#9515](https://github.com/hashicorp/terraform/issues/9515)) - * provider/aws: Add tagging support to aws_redshift_subnet_group ([#9504](https://github.com/hashicorp/terraform/issues/9504)) - * provider/aws: Add validation to IAM User and Group Name ([#9584](https://github.com/hashicorp/terraform/issues/9584)) - * provider/aws: Add Ability To Enable / Disable ALB AccessLogs ([#9290](https://github.com/hashicorp/terraform/issues/9290)) - * provider/aws: Add support for `AutoMinorVersionUpgrade` to aws_elasticache_replication_group resource. ([#9657](https://github.com/hashicorp/terraform/issues/9657)) - * provider/aws: Fix import of RouteTable with destination prefixes ([#9686](https://github.com/hashicorp/terraform/issues/9686)) - * provider/aws: Add support for reference_name to aws_route53_health_check ([#9737](https://github.com/hashicorp/terraform/issues/9737)) - * provider/aws: Expose ARN suffix on ALB Target Group ([#9734](https://github.com/hashicorp/terraform/issues/9734)) - * provider/azurerm: add account_kind and access_tier to storage_account ([#9408](https://github.com/hashicorp/terraform/issues/9408)) - * provider/azurerm: write load_balanacer attributes to network_interface_card hash ([#9207](https://github.com/hashicorp/terraform/issues/9207)) - * provider/azurerm: Add disk_size_gb param to VM storage_os_disk ([#9200](https://github.com/hashicorp/terraform/issues/9200)) - * provider/azurerm: support importing of subnet resource ([#9646](https://github.com/hashicorp/terraform/issues/9646)) - * provider/azurerm: Add support for *all* of the Azure regions e.g. Germany, China and Government ([#9765](https://github.com/hashicorp/terraform/issues/9765)) - * provider/digitalocean: Allow resizing DigitalOcean Droplets without increasing disk size. ([#9573](https://github.com/hashicorp/terraform/issues/9573)) - * provider/google: enhance service scope list ([#9442](https://github.com/hashicorp/terraform/issues/9442)) - * provider/google Change default MySQL instance version to 5.6 ([#9674](https://github.com/hashicorp/terraform/issues/9674)) - * provider/google Support MySQL 5.7 instances ([#9673](https://github.com/hashicorp/terraform/issues/9673)) - * provider/google: Add support for using source_disk to google_compute_image ([#9614](https://github.com/hashicorp/terraform/issues/9614)) - * provider/google: Add support for default-internet-gateway alias for google_compute_route ([#9676](https://github.com/hashicorp/terraform/issues/9676)) - * provider/openstack: Added value_specs to openstack_networking_port_v2, allowing vendor information ([#9551](https://github.com/hashicorp/terraform/issues/9551)) - * provider/openstack: Added value_specs to openstack_networking_floatingip_v2, allowing vendor information ([#9552](https://github.com/hashicorp/terraform/issues/9552)) - * provider/openstack: Added value_specs to openstack_compute_keypair_v2, allowing vendor information ([#9554](https://github.com/hashicorp/terraform/issues/9554)) - * provider/openstack: Allow any protocol in openstack_fw_rule_v1 ([#9617](https://github.com/hashicorp/terraform/issues/9617)) - * provider/openstack: expose LoadBalancer v2 VIP Port ID ([#9727](https://github.com/hashicorp/terraform/issues/9727)) - * provider/openstack: Openstack Provider enhancements including environment variables ([#9725](https://github.com/hashicorp/terraform/issues/9725)) - * provider/scaleway: update sdk for ams1 region ([#9687](https://github.com/hashicorp/terraform/issues/9687)) - * provider/scaleway: server volume property ([#9695](https://github.com/hashicorp/terraform/issues/9695)) - -BUG FIXES: - - * core: Resources suffixed with 'panic' won't falsely trigger crash detection. ([#9395](https://github.com/hashicorp/terraform/issues/9395)) - * core: Validate lifecycle options don't contain interpolations. ([#9576](https://github.com/hashicorp/terraform/issues/9576)) - * core: Tainted resources will not process `ignore_changes`. ([#7855](https://github.com/hashicorp/terraform/issues/7855)) - * core: Boolean looking values passed in via `-var` no longer cause type errors. ([#9642](https://github.com/hashicorp/terraform/issues/9642)) - * core: Computed primitives in certain cases no longer cause diff mismatch errors. ([#9618](https://github.com/hashicorp/terraform/issues/9618)) - * core: Empty arrays for list vars in JSON work ([#8886](https://github.com/hashicorp/terraform/issues/8886)) - * core: Boolean types in tfvars work propertly ([#9751](https://github.com/hashicorp/terraform/issues/9751)) - * core: Deposed resource destruction is accounted for properly in `apply` counts. ([#9731](https://github.com/hashicorp/terraform/issues/9731)) - * core: Check for graph cycles on resource expansion to catch cycles between self-referenced resources. ([#9728](https://github.com/hashicorp/terraform/issues/9728)) - * core: `prevent_destroy` prevents decreasing count ([#9707](https://github.com/hashicorp/terraform/issues/9707)) - * core: removed optional items will trigger "requires new" if necessary ([#9699](https://github.com/hashicorp/terraform/issues/9699)) - * command/apply: `-backup` and `-state-out` work with plan files ([#9706](https://github.com/hashicorp/terraform/issues/9706)) - * command/fmt: Cleaner formatting for multiline standalone comments above resources - * command/validate: respond to `--help` ([#9660](https://github.com/hashicorp/terraform/issues/9660)) - * provider/archive: Converting to datasource. ([#8492](https://github.com/hashicorp/terraform/issues/8492)) - * provider/aws: Fix issue importing AWS Instances and setting the correct `associate_public_ip_address` value ([#9453](https://github.com/hashicorp/terraform/issues/9453)) - * provider/aws: Fix issue with updating ElasticBeanstalk environment variables ([#9259](https://github.com/hashicorp/terraform/issues/9259)) - * provider/aws: Allow zero value for `scaling_adjustment` in `aws_autoscaling_policy` when using `SimpleScaling` ([#8893](https://github.com/hashicorp/terraform/issues/8893)) - * provider/aws: Increase ECS service drain timeout ([#9521](https://github.com/hashicorp/terraform/issues/9521)) - * provider/aws: Remove VPC Endpoint from state if it's not found ([#9561](https://github.com/hashicorp/terraform/issues/9561)) - * provider/aws: Delete Loging Profile from IAM User on force_destroy ([#9583](https://github.com/hashicorp/terraform/issues/9583)) - * provider/aws: Exposed aws_api_gw_domain_name.certificate_upload_date attribute ([#9533](https://github.com/hashicorp/terraform/issues/9533)) - * provider/aws: fix aws_elasticache_replication_group for Redis in cluster mode ([#9601](https://github.com/hashicorp/terraform/issues/9601)) - * provider/aws: Validate regular expression passed via the ami data_source `name_regex` attribute. ([#9622](https://github.com/hashicorp/terraform/issues/9622)) - * provider/aws: Bug fix for NoSuckBucket on Destroy of aws_s3_bucket_policy ([#9641](https://github.com/hashicorp/terraform/issues/9641)) - * provider/aws: Refresh aws_autoscaling_schedule from state on 404 ([#9659](https://github.com/hashicorp/terraform/issues/9659)) - * provider/aws: Allow underscores in IAM user and group names ([#9684](https://github.com/hashicorp/terraform/issues/9684)) - * provider/aws: aws_ami: handle deletion of AMIs ([#9721](https://github.com/hashicorp/terraform/issues/9721)) - * provider/aws: Fix aws_route53_record alias perpetual diff ([#9704](https://github.com/hashicorp/terraform/issues/9704)) - * provider/aws: Allow `active` state while waiting for the VPC Peering Connection. ([#9754](https://github.com/hashicorp/terraform/issues/9754)) - * provider/aws: Normalize all-principals wildcard in `aws_iam_policy_document` ([#9720](https://github.com/hashicorp/terraform/issues/9720)) - * provider/azurerm: Fix Azure RM loadbalancer rules validation ([#9468](https://github.com/hashicorp/terraform/issues/9468)) - * provider/azurerm: Fix servicebus_topic values when using the Update func to stop perpetual diff ([#9323](https://github.com/hashicorp/terraform/issues/9323)) - * provider/azurerm: lower servicebus_topic max size to Azure limit ([#9649](https://github.com/hashicorp/terraform/issues/9649)) - * provider/azurerm: Fix VHD deletion when VM and Storage account are in separate resource groups ([#9631](https://github.com/hashicorp/terraform/issues/9631)) - * provider/azurerm: Guard against panic when importing arm_virtual_network ([#9739](https://github.com/hashicorp/terraform/issues/9739)) - * provider/azurerm: fix sql_database resource reading tags ([#9767](https://github.com/hashicorp/terraform/issues/9767)) - * provider/cloudflare: update client library to stop connection closed issues ([#9715](https://github.com/hashicorp/terraform/issues/9715)) - * provider/consul: Change to consul_service resource to introduce a `service_id` parameter ([#9366](https://github.com/hashicorp/terraform/issues/9366)) - * provider/datadog: Ignore float/int diffs on thresholds ([#9466](https://github.com/hashicorp/terraform/issues/9466)) - * provider/docker: Fixes for docker_container host object and documentation ([#9367](https://github.com/hashicorp/terraform/issues/9367)) - * provider/scaleway improve the performance of server deletion ([#9491](https://github.com/hashicorp/terraform/issues/9491)) - * provider/scaleway: fix scaleway_volume_attachment with count > 1 ([#9493](https://github.com/hashicorp/terraform/issues/9493)) - - -## 0.7.7 (October 18, 2016) - -FEATURES: - - * **New Data Source:** `scaleway_bootsscript`. ([#9386](https://github.com/hashicorp/terraform/issues/9386)) - * **New Data Source:** `scaleway_image`. ([#9386](https://github.com/hashicorp/terraform/issues/9386)) - -IMPROVEMENTS: - - * core: When the environment variable TF_LOG_PATH is specified, debug logs are now appended to the specified file instead of being truncated. ([#9440](https://github.com/hashicorp/terraform/pull/9440)) - * provider/aws: Expose ARN for `aws_lambda_alias`. ([#9390](https://github.com/hashicorp/terraform/issues/9390)) - * provider/aws: Add support for AWS US East (Ohio) region. ([#9414](https://github.com/hashicorp/terraform/issues/9414)) - * provider/scaleway: `scaleway_ip`, `scaleway_security_group`, `scalway_server` and `scaleway_volume` resources can now be imported. ([#9387](https://github.com/hashicorp/terraform/issues/9387)) - -BUG FIXES: - - * core: List and map indexes support arithmetic. ([#9372](https://github.com/hashicorp/terraform/issues/9372)) - * core: List and map indexes are implicitly converted to the correct type if possible. ([#9372](https://github.com/hashicorp/terraform/issues/9372)) - * provider/aws: Read back `associate_public_ip_address` in `aws_launch_configuration` resources to enable importing. ([#9399](https://github.com/hashicorp/terraform/issues/9399)) - * provider/aws: Remove `aws_route` resources from state if their associated `aws_route_table` has been removed. ([#9431](https://github.com/hashicorp/terraform/issues/9431)) - * provider/azurerm: Load balancer resources now have their `id` attribute set to the resource URI instead of the load balancer URI. ([#9401](https://github.com/hashicorp/terraform/issues/9401)) - * provider/google: Fix a bug causing a crash when migrating `google_compute_target_pool` resources from 0.6.x releases. ([#9370](https://github.com/hashicorp/terraform/issues/9370)) - -## 0.7.6 (October 14, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `azurerm_virtual_machine` has deprecated the use of `diagnostics_profile` in favour of `boot_diagnostics`. ([#9122](https://github.com/hashicorp/terraform/issues/9122)) - * The deprecated `key_file` and `bastion_key_file` arguments to Provisioner Connections have been removed ([#9340](https://github.com/hashicorp/terraform/issues/9340)) - -FEATURES: - * **New Data Source:** `aws_billing_service_account` ([#8701](https://github.com/hashicorp/terraform/issues/8701)) - * **New Data Source:** `aws_availability_zone` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_region` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_subnet` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Data Source:** `aws_vpc` ([#6819](https://github.com/hashicorp/terraform/issues/6819)) - * **New Resource:** `azurerm_lb` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_backend_address_pool` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_nat_rule` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_nat_pool` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_probe` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `azurerm_lb_rule` ([#9199](https://github.com/hashicorp/terraform/issues/9199)) - * **New Resource:** `github_repository` ([#9327](https://github.com/hashicorp/terraform/issues/9327)) - -IMPROVEMENTS: - * core-validation: create validation package to provide common validation functions ([#8103](https://github.com/hashicorp/terraform/issues/8103)) - * provider/aws: Support Import of OpsWorks Custom Layers ([#9252](https://github.com/hashicorp/terraform/issues/9252)) - * provider/aws: Automatically constructed ARNs now support partitions other than `aws`, allowing operation with `aws-cn` and `aws-us-gov` ([#9273](https://github.com/hashicorp/terraform/issues/9273)) - * provider/aws: Retry setTags operation for EC2 resources ([#7890](https://github.com/hashicorp/terraform/issues/7890)) - * provider/aws: Support refresh of EC2 instance `user_data` ([#6736](https://github.com/hashicorp/terraform/issues/6736)) - * provider/aws: Poll to confirm delete of `resource_aws_customer_gateway` ([#9346](https://github.com/hashicorp/terraform/issues/9346)) - * provider/azurerm: expose default keys for `servicebus_namespace` ([#9242](https://github.com/hashicorp/terraform/issues/9242)) - * provider/azurerm: add `enable_blob_encryption` to `azurerm_storage_account` resource ([#9233](https://github.com/hashicorp/terraform/issues/9233)) - * provider/azurerm: set `resource_group_name` on resource import across the provider ([#9073](https://github.com/hashicorp/terraform/issues/9073)) - * provider/azurerm: `azurerm_cdn_profile` resources can now be imported ([#9306](https://github.com/hashicorp/terraform/issues/9306)) - * provider/datadog: add support for Datadog dashboard "type" and "style" options ([#9228](https://github.com/hashicorp/terraform/issues/9228)) - * provider/scaleway: `region` is now supported for provider configuration - -BUG FIXES: - * core: Local state can now be refreshed when no resources exist ([#7320](https://github.com/hashicorp/terraform/issues/7320)) - * core: Orphaned nested (depth 2+) modules will inherit provider configs ([#9318](https://github.com/hashicorp/terraform/issues/9318)) - * core: Fix crash when a map key contains an interpolation function ([#9282](https://github.com/hashicorp/terraform/issues/9282)) - * core: Numeric variables values were incorrectly converted to numbers ([#9263](https://github.com/hashicorp/terraform/issues/9263)) - * core: Fix input and output of map variables from HCL ([#9268](https://github.com/hashicorp/terraform/issues/9268)) - * core: Crash when interpolating a map value with a function in the key ([#9282](https://github.com/hashicorp/terraform/issues/9282)) - * core: Crash when copying a nil value in an InstanceState ([#9356](https://github.com/hashicorp/terraform/issues/9356)) - * command/fmt: Bare comment groups no longer have superfluous newlines - * command/fmt: Leading comments on list items are formatted properly - * provider/aws: Return correct AMI image when `most_recent` is set to `true`. ([#9277](https://github.com/hashicorp/terraform/issues/9277)) - * provider/aws: Fix issue with diff on import of `aws_eip` in EC2 Classic ([#9009](https://github.com/hashicorp/terraform/issues/9009)) - * provider/aws: Handle EC2 tags related errors in CloudFront Distribution resource. ([#9298](https://github.com/hashicorp/terraform/issues/9298)) - * provider/aws: Fix cause error when using `etag` and `kms_key_id` with `aws_s3_bucket_object` ([#9168](https://github.com/hashicorp/terraform/issues/9168)) - * provider/aws: Fix issue reassigning EIP instances appropriately ([#7686](https://github.com/hashicorp/terraform/issues/7686)) - * provider/azurerm: removing resources from state when the API returns a 404 for them ([#8859](https://github.com/hashicorp/terraform/issues/8859)) - * provider/azurerm: Fixed a panic in `azurerm_virtual_machine` when using `diagnostic_profile` ([#9122](https://github.com/hashicorp/terraform/issues/9122)) - -## 0.7.5 (October 6, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `tls_cert_request` is now a managed resource instead of a data source, restoring the pre-Terraform 0.7 behaviour ([#9035](https://github.com/hashicorp/terraform/issues/9035)) - -FEATURES: - * **New Provider:** `bitbucket` ([#7405](https://github.com/hashicorp/terraform/issues/7405)) - * **New Resource:** `aws_api_gateway_client_certificate` ([#8775](https://github.com/hashicorp/terraform/issues/8775)) - * **New Resource:** `azurerm_servicebus_topic` ([#9151](https://github.com/hashicorp/terraform/issues/9151)) - * **New Resource:** `azurerm_servicebus_subscription` ([#9185](https://github.com/hashicorp/terraform/issues/9185)) - * **New Resource:** `aws_emr_cluster` ([#9106](https://github.com/hashicorp/terraform/issues/9106)) - * **New Resource:** `aws_emr_instance_group` ([#9106](https://github.com/hashicorp/terraform/issues/9106)) - -IMPROVEMENTS: - * helper/schema: Adding of MinItems as a validation to Lists and Maps ([#9216](https://github.com/hashicorp/terraform/issues/9216)) - * provider/aws: Add JSON validation to the `aws_cloudwatch_event_rule` resource ([#8897](https://github.com/hashicorp/terraform/issues/8897)) - * provider/aws: S3 bucket policies are imported as separate resources ([#8915](https://github.com/hashicorp/terraform/issues/8915)) - * provider/aws: S3 bucket policies can now be removed via the `aws_s3_bucket` resource ([#8915](https://github.com/hashicorp/terraform/issues/8915)) - * provider/aws: Added a `cluster_address` attribute to aws elasticache ([#8935](https://github.com/hashicorp/terraform/issues/8935)) - * provider/aws: Add JSON validation to the `aws_elasticsearch_domain resource`. ([#8898](https://github.com/hashicorp/terraform/issues/8898)) - * provider/aws: Add JSON validation to the `aws_kms_key resource`. ([#8900](https://github.com/hashicorp/terraform/issues/8900)) - * provider/aws: Add JSON validation to the `aws_s3_bucket_policy resource`. ([#8901](https://github.com/hashicorp/terraform/issues/8901)) - * provider/aws: Add JSON validation to the `aws_sns_topic resource`. ([#8902](https://github.com/hashicorp/terraform/issues/8902)) - * provider/aws: Add JSON validation to the `aws_sns_topic_policy resource`. ([#8903](https://github.com/hashicorp/terraform/issues/8903)) - * provider/aws: Add JSON validation to the `aws_sqs_queue resource`. ([#8904](https://github.com/hashicorp/terraform/issues/8904)) - * provider/aws: Add JSON validation to the `aws_sqs_queue_policy resource`. ([#8905](https://github.com/hashicorp/terraform/issues/8905)) - * provider/aws: Add JSON validation to the `aws_vpc_endpoint resource`. ([#8906](https://github.com/hashicorp/terraform/issues/8906)) - * provider/aws: Update `aws_cloudformation_stack` data source with new helper function. ([#8907](https://github.com/hashicorp/terraform/issues/8907)) - * provider/aws: Add JSON validation to the `aws_s3_bucket` resource. ([#8908](https://github.com/hashicorp/terraform/issues/8908)) - * provider/aws: Add support for `cloudwatch_logging_options` to Firehose Delivery Streams ([#8671](https://github.com/hashicorp/terraform/issues/8671)) - * provider/aws: Add HTTP/2 support via the http_version parameter to CloudFront distribution ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Add `query_string_cache_keys` to allow for selective caching of CloudFront keys ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Support Import `aws_elasticache_cluster` ([#9010](https://github.com/hashicorp/terraform/issues/9010)) - * provider/aws: Add support for tags to `aws_cloudfront_distribution` ([#9011](https://github.com/hashicorp/terraform/issues/9011)) - * provider/aws: Support Import `aws_opsworks_stack` ([#9124](https://github.com/hashicorp/terraform/issues/9124)) - * provider/aws: Support Import `aws_elasticache_replication_groups` ([#9140](https://github.com/hashicorp/terraform/issues/9140)) - * provider/aws: Add new aws api-gateway integration types ([#9213](https://github.com/hashicorp/terraform/issues/9213)) - * provider/aws: Import `aws_db_event_subscription` ([#9220](https://github.com/hashicorp/terraform/issues/9220)) - * provider/azurerm: Add normalizeJsonString and validateJsonString functions ([#8909](https://github.com/hashicorp/terraform/issues/8909)) - * provider/azurerm: Support AzureRM Sql Database DataWarehouse ([#9196](https://github.com/hashicorp/terraform/issues/9196)) - * provider/openstack: Use proxy environment variables for communication with services ([#8948](https://github.com/hashicorp/terraform/issues/8948)) - * provider/vsphere: Adding `detach_unknown_disks_on_delete` flag for VM resource ([#8947](https://github.com/hashicorp/terraform/issues/8947)) - * provisioner/chef: Add `skip_register` attribute to allow skipping the registering steps ([#9127](https://github.com/hashicorp/terraform/issues/9127)) - -BUG FIXES: - * core: Fixed variables not being in scope for destroy -target on modules ([#9021](https://github.com/hashicorp/terraform/issues/9021)) - * core: Fixed issue that prevented diffs from being properly generated in a specific resource schema scenario ([#8891](https://github.com/hashicorp/terraform/issues/8891)) - * provider/aws: Remove support for `ah` and `esp` literals in Security Group Ingress/Egress rules; you must use the actual protocol number for protocols other than `tcp`, `udp`, `icmp`, or `all` ([#8975](https://github.com/hashicorp/terraform/issues/8975)) - * provider/aws: Do not report drift for effect values differing only by case in AWS policies ([#9139](https://github.com/hashicorp/terraform/issues/9139)) - * provider/aws: VPC ID, Port, Protocol and Name change on aws_alb_target_group will ForceNew resource ([#8989](https://github.com/hashicorp/terraform/issues/8989)) - * provider/aws: Wait for Spot Fleet to drain before removing from state ([#8938](https://github.com/hashicorp/terraform/issues/8938)) - * provider/aws: Fix issue when importing `aws_eip` resources by IP address ([#8970](https://github.com/hashicorp/terraform/issues/8970)) - * provider/aws: Ensure that origin_access_identity is a required value within the CloudFront distribution s3_config block ([#8777](https://github.com/hashicorp/terraform/issues/8777)) - * provider/aws: Corrected Seoul S3 Website Endpoint format ([#9032](https://github.com/hashicorp/terraform/issues/9032)) - * provider/aws: Fix failed remove S3 lifecycle_rule ([#9031](https://github.com/hashicorp/terraform/issues/9031)) - * provider/aws: Fix crashing bug in `aws_ami` data source when using `name_regex` ([#9033](https://github.com/hashicorp/terraform/issues/9033)) - * provider/aws: Fix reading dimensions on cloudwatch alarms ([#9029](https://github.com/hashicorp/terraform/issues/9029)) - * provider/aws: Changing snapshot_identifier on aws_db_instance resource should force… ([#8806](https://github.com/hashicorp/terraform/issues/8806)) - * provider/aws: Refresh AWS EIP association from state when not found ([#9056](https://github.com/hashicorp/terraform/issues/9056)) - * provider/aws: Make encryption in Aurora instances computed-only ([#9060](https://github.com/hashicorp/terraform/issues/9060)) - * provider/aws: Make sure that VPC Peering Connection in a failed state returns an error. ([#9038](https://github.com/hashicorp/terraform/issues/9038)) - * provider/aws: guard against aws_route53_record delete panic ([#9049](https://github.com/hashicorp/terraform/issues/9049)) - * provider/aws: aws_db_option_group flattenOptions failing due to missing values ([#9052](https://github.com/hashicorp/terraform/issues/9052)) - * provider/aws: Add retry logic to the aws_ecr_repository delete func ([#9050](https://github.com/hashicorp/terraform/issues/9050)) - * provider/aws: Modifying the parameter_group_name of aws_elasticache_replication_group caused a panic ([#9101](https://github.com/hashicorp/terraform/issues/9101)) - * provider/aws: Fix issue with updating ELB subnets for subnets in the same AZ ([#9131](https://github.com/hashicorp/terraform/issues/9131)) - * provider/aws: aws_route53_record alias refresh manually updated record ([#9125](https://github.com/hashicorp/terraform/issues/9125)) - * provider/aws: Fix issue detaching volumes that were already detached ([#9023](https://github.com/hashicorp/terraform/issues/9023)) - * provider/aws: Add retry to the `aws_ssm_document` delete func ([#9188](https://github.com/hashicorp/terraform/issues/9188)) - * provider/aws: Fix issue updating `search_string` in aws_cloudwatch_metric_alarm ([#9230](https://github.com/hashicorp/terraform/issues/9230)) - * provider/aws: Update EFS resource to read performance mode and creation_token ([#9234](https://github.com/hashicorp/terraform/issues/9234)) - * provider/azurerm: fix resource ID parsing for subscriptions resources ([#9163](https://github.com/hashicorp/terraform/issues/9163)) - * provider/librato: Mandatory name and conditions attributes weren't being sent on Update unless changed ([#8984](https://github.com/hashicorp/terraform/issues/8984)) - * provisioner/chef: Fix an error with parsing certain `vault_json` content ([#9114](https://github.com/hashicorp/terraform/issues/9114)) - * provisioner/chef: Change to order in which to cleanup the user key so this is done before the Chef run starts ([#9114](https://github.com/hashicorp/terraform/issues/9114)) - -## 0.7.4 (September 19, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * In previous releases, the `private_key` field in the connection provisioner - inadvertently accepted a path argument and would read the file contents. - This functionality has been removed in this release ([#8577](https://github.com/hashicorp/terraform/issues/8577)), and the documented - method of using the `file()` interpolation function should be used to load - the key from a file. - -FEATURES: - * **New Resource:** `aws_codecommit_trigger` ([#8751](https://github.com/hashicorp/terraform/issues/8751)) - * **New Resource:** `aws_default_security_group` ([#8861](https://github.com/hashicorp/terraform/issues/8861)) - * **New Remote State Backend:** `manta` ([#8830](https://github.com/hashicorp/terraform/issues/8830)) - -IMPROVEMENTS: - * provider/aws: Support 'publish' attribute in `lambda_function` ([#8653](https://github.com/hashicorp/terraform/issues/8653)) - * provider/aws: Add `reader_endpoint` RDS Clusters ([#8884](https://github.com/hashicorp/terraform/issues/8884)) - * provider/aws: Export AWS ELB service account ARN ([#8700](https://github.com/hashicorp/terraform/issues/8700)) - * provider/aws: Allow `aws_alb` to have the name auto-generated ([#8673](https://github.com/hashicorp/terraform/issues/8673)) - * provider/aws: Expose `arn_suffix` on `aws_alb` ([#8833](https://github.com/hashicorp/terraform/issues/8833)) - * provider/aws: Add JSON validation to the `aws_cloudformation_stack` resource ([#8896](https://github.com/hashicorp/terraform/issues/8896)) - * provider/aws: Add JSON validation to the `aws_glacier_vault` resource ([#8899](https://github.com/hashicorp/terraform/issues/8899)) - * provider/azurerm: support Diagnostics Profile ([#8277](https://github.com/hashicorp/terraform/issues/8277)) - * provider/google: Resources depending on the `network` attribute can now reference the network by `self_link` or `name` ([#8639](https://github.com/hashicorp/terraform/issues/8639)) - * provider/postgresql: The standard environment variables PGHOST, PGUSER, PGPASSWORD and PGSSLMODE are now supported for provider configuration ([#8666](https://github.com/hashicorp/terraform/issues/8666)) - * helper/resource: Add timeout duration to timeout error message ([#8773](https://github.com/hashicorp/terraform/issues/8773)) - * provisioner/chef: Support recreating Chef clients by setting `recreate_client=true` ([#8577](https://github.com/hashicorp/terraform/issues/8577)) - * provisioner/chef: Support encrypting existing Chef-Vaults for newly created clients ([#8577](https://github.com/hashicorp/terraform/issues/8577)) - -BUG FIXES: - * core: Fix regression when loading variables from json ([#8820](https://github.com/hashicorp/terraform/issues/8820)) - * provider/aws: Prevent crash creating an `aws_sns_topic` with an empty policy ([#8834](https://github.com/hashicorp/terraform/issues/8834)) - * provider/aws: Bump `aws_elasticsearch_domain` timeout values ([#672](https://github.com/hashicorp/terraform/issues/672)) - * provider/aws: `aws_nat_gateways` will now recreate on `failed` state ([#8689](https://github.com/hashicorp/terraform/issues/8689)) - * provider/aws: Prevent crash on account ID validation ([#8731](https://github.com/hashicorp/terraform/issues/8731)) - * provider/aws: `aws_db_instance` unexpected state when configurating enhanced monitoring ([#8707](https://github.com/hashicorp/terraform/issues/8707)) - * provider/aws: Remove region condition from `aws_codecommit_repository` ([#8778](https://github.com/hashicorp/terraform/issues/8778)) - * provider/aws: Support Policy DiffSuppression in `aws_kms_key` policy ([#8675](https://github.com/hashicorp/terraform/issues/8675)) - * provider/aws: Fix issue updating Elastic Beanstalk Environment variables ([#8848](https://github.com/hashicorp/terraform/issues/8848)) - * provider/scaleway: Fix `security_group_rule` identification ([#8661](https://github.com/hashicorp/terraform/issues/8661)) - * provider/cloudstack: Fix renaming a VPC with the `cloudstack_vpc` resource ([#8784](https://github.com/hashicorp/terraform/issues/8784)) - -## 0.7.3 (September 5, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * Terraform now validates the uniqueness of variable and output names in your configurations. In prior versions certain ways of duplicating variable names would work. This is now a configuration error (and should've always been). If you get an error running Terraform you may need to remove the duplicates. Done right, this should not affect the behavior of Terraform. - * The internal structure of `.terraform/modules` changed slightly. For configurations with modules, you'll need to run `terraform get` again. - -FEATURES: - * **New Provider:** `rabbitmq` ([#7694](https://github.com/hashicorp/terraform/issues/7694)) - * **New Data Source:** `aws_cloudformation_stack` ([#8640](https://github.com/hashicorp/terraform/issues/8640)) - * **New Resource:** `aws_cloudwatch_log_stream` ([#8626](https://github.com/hashicorp/terraform/issues/8626)) - * **New Resource:** `aws_default_route_table` ([#8323](https://github.com/hashicorp/terraform/issues/8323)) - * **New Resource:** `aws_spot_datafeed_subscription` ([#8640](https://github.com/hashicorp/terraform/issues/8640)) - * **New Resource:** `aws_s3_bucket_policy` ([#8615](https://github.com/hashicorp/terraform/issues/8615)) - * **New Resource:** `aws_sns_topic_policy` ([#8654](https://github.com/hashicorp/terraform/issues/8654)) - * **New Resource:** `aws_sqs_queue_policy` ([#8657](https://github.com/hashicorp/terraform/issues/8657)) - * **New Resource:** `aws_ssm_association` ([#8376](https://github.com/hashicorp/terraform/issues/8376)) - * **New Resource:** `cloudstack_affinity_group` ([#8360](https://github.com/hashicorp/terraform/issues/8360)) - * **New Resource:** `librato_alert` ([#8170](https://github.com/hashicorp/terraform/issues/8170)) - * **New Resource:** `librato_service` ([#8170](https://github.com/hashicorp/terraform/issues/8170)) - * **New Remote State Backend:** `local` ([#8647](https://github.com/hashicorp/terraform/issues/8647)) - * Data source blocks can now have a count associated with them ([#8635](https://github.com/hashicorp/terraform/issues/8635)) - * The count of a resource can now be referenced for interpolations: `self.count` and `type.name.count` work ([#8581](https://github.com/hashicorp/terraform/issues/8581)) - * Provisioners now support connection using IPv6 in addition to IPv4 ([#6616](https://github.com/hashicorp/terraform/issues/6616)) - -IMPROVEMENTS: - * core: Add wildcard (match all) support to `ignore_changes` ([#8599](https://github.com/hashicorp/terraform/issues/8599)) - * core: HTTP module sources can now use netrc files for auth - * core: Show last resource state in a timeout error message ([#8510](https://github.com/hashicorp/terraform/issues/8510)) - * helper/schema: Add diff suppression callback ([#8585](https://github.com/hashicorp/terraform/issues/8585)) - * provider/aws: API Gateway Custom Authorizer ([#8535](https://github.com/hashicorp/terraform/issues/8535)) - * provider/aws: Add MemoryReservation To `aws_ecs_container_definition` data source ([#8437](https://github.com/hashicorp/terraform/issues/8437)) - * provider/aws: Add ability Enable/Disable For ELB Access logs ([#8438](https://github.com/hashicorp/terraform/issues/8438)) - * provider/aws: Add support for assuming a role prior to performing API operations ([#8638](https://github.com/hashicorp/terraform/issues/8638)) - * provider/aws: Export `arn` of `aws_autoscaling_group` ([#8503](https://github.com/hashicorp/terraform/issues/8503)) - * provider/aws: More robust handling of Lambda function archives hosted on S3 ([#6860](https://github.com/hashicorp/terraform/issues/6860)) - * provider/aws: Spurious diffs of `aws_s3_bucket` policy attributes due to JSON field ordering are reduced ([#8615](https://github.com/hashicorp/terraform/issues/8615)) - * provider/aws: `name_regex` attribute for local post-filtering of `aws_ami` data source results ([#8403](https://github.com/hashicorp/terraform/issues/8403)) - * provider/aws: Support for lifecycle hooks at ASG creation ([#5620](https://github.com/hashicorp/terraform/issues/5620)) - * provider/consul: Make provider settings truly optional ([#8551](https://github.com/hashicorp/terraform/issues/8551)) - * provider/statuscake: Add support for contact-group id in statuscake test ([#8417](https://github.com/hashicorp/terraform/issues/8417)) - -BUG FIXES: - * core: Changing a module source from file to VCS no longer errors ([#8398](https://github.com/hashicorp/terraform/issues/8398)) - * core: Configuration is now validated prior to input, fixing an obscure parse error when attempting to interpolate a count ([#8591](https://github.com/hashicorp/terraform/issues/8591)) - * core: JSON configuration with resources with a single key parse properly ([#8485](https://github.com/hashicorp/terraform/issues/8485)) - * core: States with duplicate modules are detected and an error is shown ([#8463](https://github.com/hashicorp/terraform/issues/8463)) - * core: Validate uniqueness of variables/outputs in a module ([#8482](https://github.com/hashicorp/terraform/issues/8482)) - * core: `-var` flag inputs starting with `/` work - * core: `-var` flag inputs starting with a number work and was fixed in such a way that this should overall be a lot more resilient to inputs ([#8044](https://github.com/hashicorp/terraform/issues/8044)) - * provider/aws: Add AWS error message to retry APIGateway account update ([#8533](https://github.com/hashicorp/terraform/issues/8533)) - * provider/aws: Do not set empty string to state for `aws_vpn_gateway` availability zone ([#8645](https://github.com/hashicorp/terraform/issues/8645)) - * provider/aws: Fix. Adjust create and destroy timeout in aws_vpn_gateway_attachment. ([#8636](https://github.com/hashicorp/terraform/issues/8636)) - * provider/aws: Handle missing EFS mount target in `aws_efs_mount_target` ([#8529](https://github.com/hashicorp/terraform/issues/8529)) - * provider/aws: If an `aws_security_group` was used in Lambda function it may have prevented you from destroying such SG due to dangling ENIs created by Lambda service. These ENIs are now automatically cleaned up prior to SG deletion ([#8033](https://github.com/hashicorp/terraform/issues/8033)) - * provider/aws: Increase `aws_route_table` timeouts from 1 min to 2 mins ([#8465](https://github.com/hashicorp/terraform/issues/8465)) - * provider/aws: Increase aws_rds_cluster timeout to 40 minutes ([#8623](https://github.com/hashicorp/terraform/issues/8623)) - * provider/aws: Refresh `aws_route` from state if `aws_route_table` not found ([#8443](https://github.com/hashicorp/terraform/issues/8443)) - * provider/aws: Remove `aws_elasticsearch_domain` from state if it doesn't exist ([#8643](https://github.com/hashicorp/terraform/issues/8643)) - * provider/aws: Remove unsafe ptr dereferencing from ECS/ECR ([#8514](https://github.com/hashicorp/terraform/issues/8514)) - * provider/aws: Set `apply_method` to state in `aws_db_parameter_group` ([#8603](https://github.com/hashicorp/terraform/issues/8603)) - * provider/aws: Stop `aws_instance` `source_dest_check` triggering an API call on each terraform run ([#8450](https://github.com/hashicorp/terraform/issues/8450)) - * provider/aws: Wait for `aws_route_53_record` to be in-sync after a delete ([#8646](https://github.com/hashicorp/terraform/issues/8646)) - * provider/aws: `aws_volume_attachment` detachment errors are caught ([#8479](https://github.com/hashicorp/terraform/issues/8479)) - * provider/aws: adds resource retry to `aws_spot_instance_request` ([#8516](https://github.com/hashicorp/terraform/issues/8516)) - * provider/aws: Add validation of Health Check target to aws_elb. ([#8578](https://github.com/hashicorp/terraform/issues/8578)) - * provider/aws: Skip detaching when aws_internet_gateway not found ([#8454](https://github.com/hashicorp/terraform/issues/8454)) - * provider/aws: Handle all kinds of CloudFormation stack failures ([#5606](https://github.com/hashicorp/terraform/issues/5606)) - * provider/azurerm: Reordering the checks after an Azure API Get ([#8607](https://github.com/hashicorp/terraform/issues/8607)) - * provider/chef: Fix "invalid header" errors that could occur ([#8382](https://github.com/hashicorp/terraform/issues/8382)) - * provider/github: Remove unsafe ptr dereferencing ([#8512](https://github.com/hashicorp/terraform/issues/8512)) - * provider/librato: Refresh space from state when not found ([#8596](https://github.com/hashicorp/terraform/issues/8596)) - * provider/mysql: Fix breakage in parsing MySQL version string ([#8571](https://github.com/hashicorp/terraform/issues/8571)) - * provider/template: `template_file` vars can be floating point ([#8590](https://github.com/hashicorp/terraform/issues/8590)) - * provider/triton: Fix bug where the ID of a `triton_key` was used prior to being set ([#8563](https://github.com/hashicorp/terraform/issues/8563)) - -## 0.7.2 (August 25, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * provider/openstack: changes were made to how volumes attached to instances are detected. If you attached a volume to an instance out of band to Terraform, it will be detached upon the next apply. You can resolve this by adding a `volume` entry for the attached volume. - * provider/aws: `aws_spot_fleet_request` has changed the `associate_public_ip_address` default from `true` to `false` - -FEATURES: - * **New Resource:** `aws_api_gateway_base_path_mapping` ([#8353](https://github.com/hashicorp/terraform/issues/8353)) - * **New Resource:** `aws_api_gateway_domain_name` ([#8353](https://github.com/hashicorp/terraform/issues/8353)) - * **New Resource:** `aws_ssm_document` ([#8460](https://github.com/hashicorp/terraform/issues/8460)) - -IMPROVEMENTS: - * core: Names generated with a unique prefix are now sortable based on age ([#8249](https://github.com/hashicorp/terraform/issues/8249)) - * provider/aws: Add Primary Endpoint Address attribute for `aws_elasticache_replication_group` ([#8385](https://github.com/hashicorp/terraform/issues/8385)) - * provider/aws: Add support for `network_mode` to `aws_ecs_task_definition` ([#8391](https://github.com/hashicorp/terraform/issues/8391)) - * provider/aws: Add support for LB target group to ECS service ([#8190](https://github.com/hashicorp/terraform/issues/8190)) - * provider/aws: Support Tags for `aws_alb` and `aws_alb_target_group` resources ([#8422](https://github.com/hashicorp/terraform/issues/8422)) - * provider/aws: Support `snapshot_name` for ElastiCache Cluster and Replication Groups ([#8419](https://github.com/hashicorp/terraform/issues/8419)) - * provider/aws: Add support to `aws_redshift_cluster` for restoring from snapshot ([#8414](https://github.com/hashicorp/terraform/issues/8414)) - * provider/aws: Add validation for master_password in `aws_redshift_cluster` ([#8434](https://github.com/hashicorp/terraform/issues/8434)) - * provider/openstack: Add `allowed_address_pairs` to `openstack_networking_port_v2` ([#8257](https://github.com/hashicorp/terraform/issues/8257)) - -BUG FIXES: - * core: fix crash case when malformed JSON given ([#8295](https://github.com/hashicorp/terraform/issues/8295)) - * core: when asking for input, spaces are allowed ([#8394](https://github.com/hashicorp/terraform/issues/8394)) - * core: module sources with URL encodings in the local file path won't error ([#8418](https://github.com/hashicorp/terraform/issues/8418)) - * command/apply: prefix destroying resources with module path ([#8396](https://github.com/hashicorp/terraform/issues/8396)) - * command/import: can import into specific indexes ([#8335](https://github.com/hashicorp/terraform/issues/8335)) - * command/push: -upload-modules=false works ([#8456](https://github.com/hashicorp/terraform/issues/8456)) - * command/state mv: nested modules can be moved ([#8304](https://github.com/hashicorp/terraform/issues/8304)) - * command/state mv: resources with a count > 1 can be moved ([#8304](https://github.com/hashicorp/terraform/issues/8304)) - * provider/aws: Refresh `aws_lambda_event_source_mapping` from state when NotFound ([#8378](https://github.com/hashicorp/terraform/issues/8378)) - * provider/aws: `aws_elasticache_replication_group_id` validation change ([#8381](https://github.com/hashicorp/terraform/issues/8381)) - * provider/aws: Fix possible crash if using duplicate Route53 records ([#8399](https://github.com/hashicorp/terraform/issues/8399)) - * provider/aws: Refresh `aws_autoscaling_policy` from state on 404 ([#8430](https://github.com/hashicorp/terraform/issues/8430)) - * provider/aws: Fix crash with VPC Peering connection accept/requests ([#8432](https://github.com/hashicorp/terraform/issues/8432)) - * provider/aws: AWS SpotFleet Requests now works with Subnets and AZs ([#8320](https://github.com/hashicorp/terraform/issues/8320)) - * provider/aws: Refresh `aws_cloudwatch_event_target` from state on `ResourceNotFoundException` ([#8442](https://github.com/hashicorp/terraform/issues/8442)) - * provider/aws: Validate `aws_iam_policy_attachment` Name parameter to stop being empty ([#8441](https://github.com/hashicorp/terraform/issues/8441)) - * provider/aws: Fix segmentation fault in `aws_api_gateway_base_path_mapping` resource ([#8466](https://github.com/hashicorp/terraform/issues/8466)) - * provider/google: fix crash regression from Terraform 0.7.1 on `google_compute_firewall` resource ([#8390](https://github.com/hashicorp/terraform/issues/8390)) - * provider/openstack: Volume Attachment and Detachment Fixes ([#8172](https://github.com/hashicorp/terraform/issues/8172)) - -## 0.7.1 (August 19, 2016) - -FEATURES: - * **New Command:** `terraform state rm` ([#8200](https://github.com/hashicorp/terraform/issues/8200)) - * **New Provider:** `archive` ([#7322](https://github.com/hashicorp/terraform/issues/7322)) - * **New Resource:** `aws_alb` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_listener` ([#8269](https://github.com/hashicorp/terraform/issues/8269)) - * **New Resource:** `aws_alb_target_group` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_target_group_attachment` ([#8254](https://github.com/hashicorp/terraform/issues/8254)) - * **New Resource:** `aws_alb_target_group_rule` ([#8321](https://github.com/hashicorp/terraform/issues/8321)) - * **New Resource:** `aws_vpn_gateway_attachment` ([#7870](https://github.com/hashicorp/terraform/issues/7870)) - * **New Resource:** `aws_load_balancer_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_load_balancer_backend_server_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_load_balancer_listener_policy` ([#7458](https://github.com/hashicorp/terraform/issues/7458)) - * **New Resource:** `aws_lb_ssl_negotiation_policy` ([#8084](https://github.com/hashicorp/terraform/issues/8084)) - * **New Resource:** `aws_elasticache_replication_groups` ([#8275](https://github.com/hashicorp/terraform/issues/8275)) - * **New Resource:** `azurerm_virtual_network_peering` ([#8168](https://github.com/hashicorp/terraform/issues/8168)) - * **New Resource:** `azurerm_servicebus_namespace` ([#8195](https://github.com/hashicorp/terraform/issues/8195)) - * **New Resource:** `google_compute_image` ([#7960](https://github.com/hashicorp/terraform/issues/7960)) - * **New Resource:** `packet_volume` ([#8142](https://github.com/hashicorp/terraform/issues/8142)) - * **New Resource:** `consul_prepared_query` ([#7474](https://github.com/hashicorp/terraform/issues/7474)) - * **New Data Source:** `aws_ip_ranges` ([#7984](https://github.com/hashicorp/terraform/issues/7984)) - * **New Data Source:** `fastly_ip_ranges` ([#7984](https://github.com/hashicorp/terraform/issues/7984)) - * **New Data Source:** `aws_caller_identity` ([#8206](https://github.com/hashicorp/terraform/issues/8206)) - * **New Data Source:** `aws_elb_service_account` ([#8221](https://github.com/hashicorp/terraform/issues/8221)) - * **New Data Source:** `aws_redshift_service_account` ([#8224](https://github.com/hashicorp/terraform/issues/8224)) - -IMPROVEMENTS - * provider/archive support folders in output_path ([#8278](https://github.com/hashicorp/terraform/issues/8278)) - * provider/aws: Introduce `aws_elasticsearch_domain` `elasticsearch_version` field (to specify ES version) ([#7860](https://github.com/hashicorp/terraform/issues/7860)) - * provider/aws: Add support for TargetGroups (`aws_alb_target_groups`) to `aws_autoscaling_group` [8327] - * provider/aws: CloudWatch Metrics are now supported for `aws_route53_health_check` resources ([#8319](https://github.com/hashicorp/terraform/issues/8319)) - * provider/aws: Query all pages of group membership ([#6726](https://github.com/hashicorp/terraform/issues/6726)) - * provider/aws: Query all pages of IAM Policy attachments ([#7779](https://github.com/hashicorp/terraform/issues/7779)) - * provider/aws: Change the way ARNs are built ([#7151](https://github.com/hashicorp/terraform/issues/7151)) - * provider/aws: Add support for Elasticsearch destination to firehose delivery streams ([#7839](https://github.com/hashicorp/terraform/issues/7839)) - * provider/aws: Retry AttachInternetGateway and increase timeout on `aws_internet_gateway` ([#7891](https://github.com/hashicorp/terraform/issues/7891)) - * provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` ([#8038](https://github.com/hashicorp/terraform/issues/8038)) - * provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` ([#8065](https://github.com/hashicorp/terraform/issues/8065)) - * provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` ([#8091](https://github.com/hashicorp/terraform/issues/8091)) - * provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check ([#7874](https://github.com/hashicorp/terraform/issues/7874)) - * provider/aws: API gateway request/response parameters can now be specified as map, original `*_in_json` parameters deprecated ([#7794](https://github.com/hashicorp/terraform/issues/7794)) - * provider/aws: Add support for `promotion_tier` to `aws_rds_cluster_instance` ([#8087](https://github.com/hashicorp/terraform/issues/8087)) - * provider/aws: Allow specifying custom S3 endpoint and enforcing S3 path style URLs via new provider options ([#7871](https://github.com/hashicorp/terraform/issues/7871)) - * provider/aws: Add ability to set Storage Class in `aws_s3_bucket_object` ([#8174](https://github.com/hashicorp/terraform/issues/8174)) - * provider/aws: Treat `aws_lambda_function` w/ empty `subnet_ids` & `security_groups_ids` in `vpc_config` as VPC-disabled function ([#6191](https://github.com/hashicorp/terraform/issues/6191)) - * provider/aws: Allow `source_ids` in `aws_db_event_subscription` to be Updatable ([#7892](https://github.com/hashicorp/terraform/issues/7892)) - * provider/aws: Make `aws_efs_mount_target` creation fail for 2+ targets per AZ ([#8205](https://github.com/hashicorp/terraform/issues/8205)) - * provider/aws: Add `force_destroy` option to `aws_route53_zone` ([#8239](https://github.com/hashicorp/terraform/issues/8239)) - * provider/aws: Support import of `aws_s3_bucket` ([#8262](https://github.com/hashicorp/terraform/issues/8262)) - * provider/aws: Increase timeout for retrying creation of IAM role ([#7733](https://github.com/hashicorp/terraform/issues/7733)) - * provider/aws: Add ability to set peering options in aws_vpc_peering_connection. ([#8310](https://github.com/hashicorp/terraform/issues/8310)) - * provider/azure: add custom_data argument for azure_instance resource ([#8158](https://github.com/hashicorp/terraform/issues/8158)) - * provider/azurerm: Adds support for uploading blobs to azure storage from local source ([#7994](https://github.com/hashicorp/terraform/issues/7994)) - * provider/azurerm: Storage blob contents can be copied from an existing blob ([#8126](https://github.com/hashicorp/terraform/issues/8126)) - * provider/datadog: Allow `tags` to be configured for monitor resources. ([#8284](https://github.com/hashicorp/terraform/issues/8284)) - * provider/google: allows atomic Cloud DNS record changes ([#6575](https://github.com/hashicorp/terraform/issues/6575)) - * provider/google: Move URLMap hosts to TypeSet from TypeList ([#7472](https://github.com/hashicorp/terraform/issues/7472)) - * provider/google: Support static private IP addresses in `resource_compute_instance` ([#6310](https://github.com/hashicorp/terraform/issues/6310)) - * provider/google: Add support for using a GCP Image Family ([#8083](https://github.com/hashicorp/terraform/issues/8083)) - * provider/openstack: Support updating the External Gateway assigned to a Neutron router ([#8070](https://github.com/hashicorp/terraform/issues/8070)) - * provider/openstack: Support for `value_specs` param on `openstack_networking_network_v2` ([#8155](https://github.com/hashicorp/terraform/issues/8155)) - * provider/openstack: Add `value_specs` param on `openstack_networking_subnet_v2` ([#8181](https://github.com/hashicorp/terraform/issues/8181)) - * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` ([#7908](https://github.com/hashicorp/terraform/issues/7908)) - * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` ([#7916](https://github.com/hashicorp/terraform/issues/7916)) - * provider/vsphere: Standardizing datastore references to use builtin Path func ([#8075](https://github.com/hashicorp/terraform/issues/8075)) - * provider/consul: add tls config support to consul provider ([#7015](https://github.com/hashicorp/terraform/issues/7015)) - * remote/consul: Support setting datacenter when using consul remote state ([#8102](https://github.com/hashicorp/terraform/issues/8102)) - * provider/google: Support import of `google_compute_instance_template` ([#8147](https://github.com/hashicorp/terraform/issues/8147)), `google_compute_firewall` ([#8236](https://github.com/hashicorp/terraform/issues/8236)), `google_compute_target_pool` ([#8133](https://github.com/hashicorp/terraform/issues/8133)), `google_compute_fowarding_rule` ([#8122](https://github.com/hashicorp/terraform/issues/8122)), `google_compute_http_health_check` ([#8121](https://github.com/hashicorp/terraform/issues/8121)), `google_compute_autoscaler` ([#8115](https://github.com/hashicorp/terraform/issues/8115)) - -BUG FIXES: - * core: Fix issue preventing `taint` from working with resources that had no other attributes in their diff ([#8167](https://github.com/hashicorp/terraform/issues/8167)) - * core: CLI will only run exact match commands ([#7983](https://github.com/hashicorp/terraform/issues/7983)) - * core: Fix panic when resources ends up null in state file ([#8120](https://github.com/hashicorp/terraform/issues/8120)) - * core: Fix panic when validating a count with a unprefixed variable ([#8243](https://github.com/hashicorp/terraform/issues/8243)) - * core: Divide by zero in interpolations no longer panics ([#7701](https://github.com/hashicorp/terraform/issues/7701)) - * core: Fix panic on some invalid interpolation syntax ([#5672](https://github.com/hashicorp/terraform/issues/5672)) - * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` ([#7966](https://github.com/hashicorp/terraform/issues/7966)) - * provider/aws: `aws_cloudformation_stack` now respects `timeout_in_minutes` field when waiting for CF API to finish an update operation ([#7997](https://github.com/hashicorp/terraform/issues/7997)) - * provider/aws: Prevent errors when `aws_s3_bucket` `acceleration_status` is not available in a given region ([#7999](https://github.com/hashicorp/terraform/issues/7999)) - * provider/aws: Add state filter to `aws_availability_zone`s data source ([#7965](https://github.com/hashicorp/terraform/issues/7965)) - * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` ([#7995](https://github.com/hashicorp/terraform/issues/7995)) - * provider/aws: Retry association of IAM Role & instance profile ([#7938](https://github.com/hashicorp/terraform/issues/7938)) - * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action ([#7883](https://github.com/hashicorp/terraform/issues/7883)) - * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings ([#7777](https://github.com/hashicorp/terraform/issues/7777)) - * provider/aws: `aws_rds_cluster` creation timeout bumped to 40 minutes ([#8052](https://github.com/hashicorp/terraform/issues/8052)) - * provider/aws: Update ElasticTranscoder to allow empty notifications, removing notifications, etc ([#8207](https://github.com/hashicorp/terraform/issues/8207)) - * provider/aws: Fix line ending errors/diffs with IAM Server Certs ([#8074](https://github.com/hashicorp/terraform/issues/8074)) - * provider/aws: Fixing IAM data source policy generation to prevent spurious diffs ([#6956](https://github.com/hashicorp/terraform/issues/6956)) - * provider/aws: Correct how CORS rules are handled in `aws_s3_bucket` ([#8096](https://github.com/hashicorp/terraform/issues/8096)) - * provider/aws: allow numeric characters in RedshiftClusterDbName ([#8178](https://github.com/hashicorp/terraform/issues/8178)) - * provider/aws: `aws_security_group` now creates tags as early as possible in the process ([#7849](https://github.com/hashicorp/terraform/issues/7849)) - * provider/aws: Defensively code around `db_security_group` ingress rules ([#7893](https://github.com/hashicorp/terraform/issues/7893)) - * provider/aws: `aws_spot_fleet_request` throws panic on missing subnet_id or availability_zone ([#8217](https://github.com/hashicorp/terraform/issues/8217)) - * provider/aws: Terraform fails during Redshift delete if FinalSnapshot is being taken. ([#8270](https://github.com/hashicorp/terraform/issues/8270)) - * provider/azurerm: `azurerm_storage_account` will interrupt for Ctrl-C ([#8215](https://github.com/hashicorp/terraform/issues/8215)) - * provider/azurerm: Public IP - Setting idle timeout value caused panic. #8283 - * provider/digitalocean: trim whitespace from ssh key ([#8173](https://github.com/hashicorp/terraform/issues/8173)) - * provider/digitalocean: Enforce Lowercase on IPV6 Addresses ([#7652](https://github.com/hashicorp/terraform/issues/7652)) - * provider/google: Use resource specific project when making queries/changes ([#7029](https://github.com/hashicorp/terraform/issues/7029)) - * provider/google: Fix read for the backend service resource ([#7476](https://github.com/hashicorp/terraform/issues/7476)) - * provider/mysql: `mysql_user` works with MySQL versions before 5.7.6 ([#8251](https://github.com/hashicorp/terraform/issues/8251)) - * provider/openstack: Fix typo in OpenStack LBaaSv2 pool resource ([#8179](https://github.com/hashicorp/terraform/issues/8179)) - * provider/vSphere: Fix for IPv6 only environment creation ([#7643](https://github.com/hashicorp/terraform/issues/7643)) - * provider/google: Correct update process for authorized networks in `google_sql_database_instance` ([#8290](https://github.com/hashicorp/terraform/issues/8290)) - -## 0.7.0 (August 2, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * Terraform Core - * Terraform's built-in plugins are now distributed as part of the main Terraform binary, and use the go-plugin framework. Overrides are still available using separate binaries, but will need recompiling against Terraform 0.7. - * The `terraform plan` command no longer persists state. This makes the command much safer to run, since it is now side-effect free. The `refresh` and `apply` commands still persist state to local and remote storage. Any automation that assumes that `terraform plan` persists state will need to be reworked to explicitly call `terraform refresh` to get the equivalent side-effect. (The `terraform plan` command no longer has the `-state-out` or `-backup` flags due to this change.) - * The `concat()` interpolation function can no longer be used to join strings. - * Quotation marks may no longer be escaped in HIL expressions ([#7201](https://github.com/hashicorp/terraform/issues/7201)) - * Lists materialized using splat syntax, for example `aws_instance.foo.*.id` are now ordered by the count index rather than lexographically sorted. If this produces a large number of undesirable differences, you can use the new `sort()` interpolation function to produce the previous behaviour. - * You now access the values of maps using the syntax `var.map["key"]` or the `lookup` function instead of `var.map.key`. - * Outputs on `terraform_remote_state` resources are now top level attributes rather than inside the `output` map. In order to access outputs, use the syntax: `terraform_remote_state.name.outputname`. Currently outputs cannot be named `config` or `backend`. - * AWS Provider - * `aws_elb` now defaults `cross_zone_load_balancing` to `true` - * `aws_instance`: EC2 Classic users may continue to use `security_groups` to reference Security Groups by their `name`. Users who are managing Instances inside VPCs will need to use `vpc_security_group_ids` instead, and reference the security groups by their `id`. Ref https://github.com/hashicorp/terraform/issues/6416#issuecomment-219145065 - * `aws_kinesis_firehose_delivery_stream`: AWS Kinesis Firehose has been refactored to support Redshift as a destination in addition to S3. As a result, the configuration has changed and users will need to update their configuration to match the new `s3_configuration` block. Checkout the documentaiton on [AWS Kinesis Firehose](http://localhost:4567/docs/providers/aws/r/kinesis_firehose_delivery_stream.html) for more information ([#7375](https://github.com/hashicorp/terraform/issues/7375)) - * `aws_route53_record`: `latency_routing_policy`, `geolocation_routing_policy`, and `failover_routing_policy` block options have been added. With these additions we’ve renamed the `weight` attribute to `weighted_routing_policy`, and it has changed from a string to a block to match the others. Please see the updated documentation on using `weighted_routing_policy`: https://www.terraform.io/docs/providers/aws/r/route53_record.html . ([#6954](https://github.com/hashicorp/terraform/issues/6954)) - * `aws_db_instance` now defaults `publicly_accessible` to false - * Microsoft Azure Provider - * In documentation, the "Azure (Resource Manager)" provider has been renamed to the "Microsoft Azure" provider. - * `azurerm_dns_cname_record` now accepts a single record rather than a list of records - * `azurerm_virtual_machine` computer_name now Required - * Openstack Provider - * `openstack_networking_subnet_v2` now defaults to turning DHCP on. - * `openstack_fw_policy_v1` now correctly applies rules in the order they are specified. Upon the next apply, current rules might be re-ordered. - * The `member` attribute of `openstack_lb_pool_v1` has been deprecated. Please ue the new `openstack_lb_member_v1` resource. - * Docker Provider - * `keep_updated` parameter removed from `docker_image` - This parameter never did what it was supposed to do. See relevant docs, specifically `pull_trigger` & new `docker_registry_image` data source to understand how to keep your `docker_image` updated. - * Atlas Provider - * `atlas_artifact` resource has be deprecated. Please use the new `atlas_artifact` Data Source. - * CloudStack Provider - * All deprecated parameters are removed from all `CloudStack` resources - -FEATURES: - - * **Data sources** are a new kind of primitive in Terraform. Attributes for data sources are refreshed and available during the planning stage. ([#6598](https://github.com/hashicorp/terraform/issues/6598)) - * **Lists and maps** can now be used as first class types for variables and may also be passed between modules. ([#6322](https://github.com/hashicorp/terraform/issues/6322)) - * **State management CLI commands** provide a variety of state manipulation functions for advanced use cases. This should be used where possible instead of manually modifying state files. ([#5811](https://github.com/hashicorp/terraform/issues/5811)) - * **State Import** allows a way to import existing resources into Terraform state for many types of resource. Initial coverage of AWS is quite high, and it is straightforward to add support for new resources. - * **New Command:** `terraform state` to provide access to a variety of state manipulation functions ([#5811](https://github.com/hashicorp/terraform/issues/5811)) - * **New Option:** `terraform output` now supports the `-json` flag to print a machine-readable representation of outputs ([#7608](https://github.com/hashicorp/terraform/issues/7608)) - * **New Data Source:** `aws_ami` ([#6911](https://github.com/hashicorp/terraform/issues/6911)) - * **New Data Source:** `aws_availability_zones` ([#6805](https://github.com/hashicorp/terraform/issues/6805)) - * **New Data Source:** `aws_iam_policy_document` ([#6881](https://github.com/hashicorp/terraform/issues/6881)) - * **New Data Source:** `aws_s3_bucket_object` ([#6946](https://github.com/hashicorp/terraform/issues/6946)) - * **New Data Source:** `aws_ecs_container_definition` ([#7230](https://github.com/hashicorp/terraform/issues/7230)) - * **New Data Source:** `atlas_artifact` ([#7419](https://github.com/hashicorp/terraform/issues/7419)) - * **New Data Source:** `docker_registry_image` ([#7000](https://github.com/hashicorp/terraform/issues/7000)) - * **New Data Source:** `consul_keys` ([#7678](https://github.com/hashicorp/terraform/issues/7678)) - * **New Interpolation Function:** `sort` ([#7128](https://github.com/hashicorp/terraform/issues/7128)) - * **New Interpolation Function:** `distinct` ([#7174](https://github.com/hashicorp/terraform/issues/7174)) - * **New Interpolation Function:** `list` ([#7528](https://github.com/hashicorp/terraform/issues/7528)) - * **New Interpolation Function:** `map` ([#7832](https://github.com/hashicorp/terraform/issues/7832)) - * **New Provider:** `grafana` ([#6206](https://github.com/hashicorp/terraform/issues/6206)) - * **New Provider:** `logentries` ([#7067](https://github.com/hashicorp/terraform/issues/7067)) - * **New Provider:** `scaleway` ([#7331](https://github.com/hashicorp/terraform/issues/7331)) - * **New Provider:** `random` - allows generation of random values without constantly generating diffs ([#6672](https://github.com/hashicorp/terraform/issues/6672)) - * **New Remote State Provider:** - `gcs` - Google Cloud Storage ([#6814](https://github.com/hashicorp/terraform/issues/6814)) - * **New Remote State Provider:** - `azure` - Microsoft Azure Storage ([#7064](https://github.com/hashicorp/terraform/issues/7064)) - * **New Resource:** `aws_elb_attachment` ([#6879](https://github.com/hashicorp/terraform/issues/6879)) - * **New Resource:** `aws_elastictranscoder_preset` ([#6965](https://github.com/hashicorp/terraform/issues/6965)) - * **New Resource:** `aws_elastictranscoder_pipeline` ([#6965](https://github.com/hashicorp/terraform/issues/6965)) - * **New Resource:** `aws_iam_group_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_iam_role_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_iam_user_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) - * **New Resource:** `aws_rds_cluster_parameter_group` ([#5269](https://github.com/hashicorp/terraform/issues/5269)) - * **New Resource:** `aws_spot_fleet_request` ([#7243](https://github.com/hashicorp/terraform/issues/7243)) - * **New Resource:** `aws_ses_active_receipt_rule_set` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_filter` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_rule` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_ses_receipt_rule_set` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) - * **New Resource:** `aws_simpledb_domain` ([#7600](https://github.com/hashicorp/terraform/issues/7600)) - * **New Resource:** `aws_opsworks_user_profile` ([#6304](https://github.com/hashicorp/terraform/issues/6304)) - * **New Resource:** `aws_opsworks_permission` ([#6304](https://github.com/hashicorp/terraform/issues/6304)) - * **New Resource:** `aws_ami_launch_permission` ([#7365](https://github.com/hashicorp/terraform/issues/7365)) - * **New Resource:** `aws_appautoscaling_policy` ([#7663](https://github.com/hashicorp/terraform/issues/7663)) - * **New Resource:** `aws_appautoscaling_target` ([#7663](https://github.com/hashicorp/terraform/issues/7663)) - * **New Resource:** `openstack_blockstorage_volume_v2` ([#6693](https://github.com/hashicorp/terraform/issues/6693)) - * **New Resource:** `openstack_lb_loadbalancer_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_listener_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_pool_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_member_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `openstack_lb_monitor_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) - * **New Resource:** `vsphere_virtual_disk` ([#6273](https://github.com/hashicorp/terraform/issues/6273)) - * **New Resource:** `github_repository_collaborator` ([#6861](https://github.com/hashicorp/terraform/issues/6861)) - * **New Resource:** `datadog_timeboard` ([#6900](https://github.com/hashicorp/terraform/issues/6900)) - * **New Resource:** `digitalocean_tag` ([#7500](https://github.com/hashicorp/terraform/issues/7500)) - * **New Resource:** `digitalocean_volume` ([#7560](https://github.com/hashicorp/terraform/issues/7560)) - * **New Resource:** `consul_agent_service` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_catalog_entry` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_node` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `consul_service` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) - * **New Resource:** `mysql_grant` ([#7656](https://github.com/hashicorp/terraform/issues/7656)) - * **New Resource:** `mysql_user` ([#7656](https://github.com/hashicorp/terraform/issues/7656)) - * **New Resource:** `azurerm_storage_table` ([#7327](https://github.com/hashicorp/terraform/issues/7327)) - * **New Resource:** `azurerm_virtual_machine_scale_set` ([#6711](https://github.com/hashicorp/terraform/issues/6711)) - * **New Resource:** `azurerm_traffic_manager_endpoint` ([#7826](https://github.com/hashicorp/terraform/issues/7826)) - * **New Resource:** `azurerm_traffic_manager_profile` ([#7826](https://github.com/hashicorp/terraform/issues/7826)) - * core: Tainted resources now show up in the plan and respect dependency ordering ([#6600](https://github.com/hashicorp/terraform/issues/6600)) - * core: The `lookup` interpolation function can now have a default fall-back value specified ([#6884](https://github.com/hashicorp/terraform/issues/6884)) - * core: The `terraform plan` command no longer persists state. ([#6811](https://github.com/hashicorp/terraform/issues/6811)) - -IMPROVEMENTS: - - * core: The `jsonencode` interpolation function now supports encoding lists and maps ([#6749](https://github.com/hashicorp/terraform/issues/6749)) - * core: Add the ability for resource definitions to mark attributes as "sensitive" which will omit them from UI output. ([#6923](https://github.com/hashicorp/terraform/issues/6923)) - * core: Support `.` in map keys ([#7654](https://github.com/hashicorp/terraform/issues/7654)) - * core: Enhance interpolation functions to account for first class maps and lists ([#7832](https://github.com/hashicorp/terraform/issues/7832)) ([#7834](https://github.com/hashicorp/terraform/issues/7834)) - * command: Remove second DefaultDataDirectory const ([#7666](https://github.com/hashicorp/terraform/issues/7666)) - * provider/aws: Add `dns_name` to `aws_efs_mount_target` ([#7428](https://github.com/hashicorp/terraform/issues/7428)) - * provider/aws: Add `force_destroy` to `aws_iam_user` for force-deleting access keys assigned to the user ([#7766](https://github.com/hashicorp/terraform/issues/7766)) - * provider/aws: Add `option_settings` to `aws_db_option_group` ([#6560](https://github.com/hashicorp/terraform/issues/6560)) - * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster ([#6795](https://github.com/hashicorp/terraform/issues/6795)) - * provider/aws: Add support for S3 Bucket Acceleration ([#6628](https://github.com/hashicorp/terraform/issues/6628)) - * provider/aws: Add support for `kms_key_id` to `aws_db_instance` ([#6651](https://github.com/hashicorp/terraform/issues/6651)) - * provider/aws: Specifying more than one health check on an `aws_elb` fails with an error prior to making an API request ([#7489](https://github.com/hashicorp/terraform/issues/7489)) - * provider/aws: Add support to `aws_redshift_cluster` for `iam_roles` ([#6647](https://github.com/hashicorp/terraform/issues/6647)) - * provider/aws: SQS use raw policy string if compact fails ([#6724](https://github.com/hashicorp/terraform/issues/6724)) - * provider/aws: Set default description to "Managed by Terraform" ([#6104](https://github.com/hashicorp/terraform/issues/6104)) - * provider/aws: Support for Redshift Cluster encryption using a KMS key ([#6712](https://github.com/hashicorp/terraform/issues/6712)) - * provider/aws: Support tags for AWS redshift cluster ([#5356](https://github.com/hashicorp/terraform/issues/5356)) - * provider/aws: Add `iam_arn` to aws_cloudfront_origin_access_identity ([#6955](https://github.com/hashicorp/terraform/issues/6955)) - * provider/aws: Add `cross_zone_load_balancing` on `aws_elb` default to true ([#6897](https://github.com/hashicorp/terraform/issues/6897)) - * provider/aws: Add support for `character_set_name` to `aws_db_instance` ([#4861](https://github.com/hashicorp/terraform/issues/4861)) - * provider/aws: Add support for DB parameter group with RDS Cluster Instances (Aurora) ([#6865](https://github.com/hashicorp/terraform/issues/6865)) - * provider/aws: Add `name_prefix` to `aws_iam_instance_profile` and `aws_iam_role` ([#6939](https://github.com/hashicorp/terraform/issues/6939)) - * provider/aws: Allow authentication & credentials validation for federated IAM Roles and EC2 instance profiles ([#6536](https://github.com/hashicorp/terraform/issues/6536)) - * provider/aws: Rename parameter_group_name to db_cluster_parameter_group_name ([#7083](https://github.com/hashicorp/terraform/issues/7083)) - * provider/aws: Retry RouteTable Route/Assocation creation ([#7156](https://github.com/hashicorp/terraform/issues/7156)) - * provider/aws: `delegation_set_id` conflicts w/ `vpc_id` in `aws_route53_zone` as delegation sets can only be used for public zones ([#7213](https://github.com/hashicorp/terraform/issues/7213)) - * provider/aws: Support Elastic Beanstalk scheduledaction ([#7376](https://github.com/hashicorp/terraform/issues/7376)) - * provider/aws: Add support for NewInstancesProtectedFromScaleIn to `aws_autoscaling_group` ([#6490](https://github.com/hashicorp/terraform/issues/6490)) - * provider/aws: Added support for `snapshot_identifier` parameter in aws_rds_cluster ([#7158](https://github.com/hashicorp/terraform/issues/7158)) - * provider/aws: Add inplace edit/update DB Security Group Rule Ingress ([#7245](https://github.com/hashicorp/terraform/issues/7245)) - * provider/aws: Added support for redshift destination to firehose delivery streams ([#7375](https://github.com/hashicorp/terraform/issues/7375)) - * provider/aws: Allow `aws_redshift_security_group` ingress rules to change ([#5939](https://github.com/hashicorp/terraform/issues/5939)) - * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` ([#7181](https://github.com/hashicorp/terraform/issues/7181)) - * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint ([#7511](https://github.com/hashicorp/terraform/issues/7511)) - * provider/aws: Retry creation of IAM role depending on new IAM user ([#7324](https://github.com/hashicorp/terraform/issues/7324)) - * provider/aws: Allow `port` on `aws_db_instance` to be updated ([#7441](https://github.com/hashicorp/terraform/issues/7441)) - * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs ([#7470](https://github.com/hashicorp/terraform/issues/7470)) - * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition ([#7653](https://github.com/hashicorp/terraform/issues/7653)) - * provider/aws: Support Tags on `aws_rds_cluster` ([#7695](https://github.com/hashicorp/terraform/issues/7695)) - * provider/aws: Support kms_key_id for `aws_rds_cluster` ([#7662](https://github.com/hashicorp/terraform/issues/7662)) - * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` ([#7523](https://github.com/hashicorp/terraform/issues/7523)) - * provider/aws: Add support for Kinesis streams shard-level metrics ([#7684](https://github.com/hashicorp/terraform/issues/7684)) - * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` ([#7364](https://github.com/hashicorp/terraform/issues/7364)) - * provider/aws: expose network interface id in `aws_instance` ([#6751](https://github.com/hashicorp/terraform/issues/6751)) - * provider/aws: Adding passthrough behavior for API Gateway integration ([#7801](https://github.com/hashicorp/terraform/issues/7801)) - * provider/aws: Enable Redshift Cluster Logging ([#7813](https://github.com/hashicorp/terraform/issues/7813)) - * provider/aws: Add ability to set Performance Mode in `aws_efs_file_system` ([#7791](https://github.com/hashicorp/terraform/issues/7791)) - * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` ([#6807](https://github.com/hashicorp/terraform/issues/6807)) - * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys ([#6742](https://github.com/hashicorp/terraform/issues/6742)) - * provider/azurerm: The Azure SDK now exposes better error messages ([#6976](https://github.com/hashicorp/terraform/issues/6976)) - * provider/azurerm: `azurerm_dns_zone` now returns `name_servers` ([#7434](https://github.com/hashicorp/terraform/issues/7434)) - * provider/azurerm: dump entire Request/Response in autorest Decorator ([#7719](https://github.com/hashicorp/terraform/issues/7719)) - * provider/azurerm: add option to delete VMs Data disks on termination ([#7793](https://github.com/hashicorp/terraform/issues/7793)) - * provider/clc: Add support for hyperscale and bareMetal server types and package installation - * provider/clc: Fix optional server password ([#6414](https://github.com/hashicorp/terraform/issues/6414)) - * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` ([#6898](https://github.com/hashicorp/terraform/issues/6898)) - * provider/cloudstack: Enable swapping of ACLs without having to rebuild the network tier ([#6741](https://github.com/hashicorp/terraform/issues/6741)) - * provider/cloudstack: Improve ACL swapping ([#7315](https://github.com/hashicorp/terraform/issues/7315)) - * provider/cloudstack: Add project support to `cloudstack_network_acl` and `cloudstack_network_acl_rule` ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Add option to set `root_disk_size` to `cloudstack_instance` ([#7070](https://github.com/hashicorp/terraform/issues/7070)) - * provider/cloudstack: Do no longer force a new `cloudstack_instance` resource when updating `user_data` ([#7074](https://github.com/hashicorp/terraform/issues/7074)) - * provider/cloudstack: Add option to set `security_group_names` to `cloudstack_instance` ([#7240](https://github.com/hashicorp/terraform/issues/7240)) - * provider/cloudstack: Add option to set `affinity_group_names` to `cloudstack_instance` ([#7242](https://github.com/hashicorp/terraform/issues/7242)) - * provider/datadog: Add support for 'require full window' and 'locked' ([#6738](https://github.com/hashicorp/terraform/issues/6738)) - * provider/docker: Docker Container DNS Setting Enhancements ([#7392](https://github.com/hashicorp/terraform/issues/7392)) - * provider/docker: Add `destroy_grace_seconds` option to stop container before delete ([#7513](https://github.com/hashicorp/terraform/issues/7513)) - * provider/docker: Add `pull_trigger` option to `docker_image` to trigger pulling layers of a given image ([#7000](https://github.com/hashicorp/terraform/issues/7000)) - * provider/fastly: Add support for Cache Settings ([#6781](https://github.com/hashicorp/terraform/issues/6781)) - * provider/fastly: Add support for Service Request Settings on `fastly_service_v1` resources ([#6622](https://github.com/hashicorp/terraform/issues/6622)) - * provider/fastly: Add support for custom VCL configuration ([#6662](https://github.com/hashicorp/terraform/issues/6662)) - * provider/google: Support optional uuid naming for Instance Template ([#6604](https://github.com/hashicorp/terraform/issues/6604)) - * provider/openstack: Add support for client certificate authentication ([#6279](https://github.com/hashicorp/terraform/issues/6279)) - * provider/openstack: Allow Neutron-based Floating IP to target a specific tenant ([#6454](https://github.com/hashicorp/terraform/issues/6454)) - * provider/openstack: Enable DHCP By Default ([#6838](https://github.com/hashicorp/terraform/issues/6838)) - * provider/openstack: Implement fixed_ip on Neutron floating ip allocations ([#6837](https://github.com/hashicorp/terraform/issues/6837)) - * provider/openstack: Increase timeouts for image resize, subnets, and routers ([#6764](https://github.com/hashicorp/terraform/issues/6764)) - * provider/openstack: Add `lb_provider` argument to `lb_pool_v1` resource ([#6919](https://github.com/hashicorp/terraform/issues/6919)) - * provider/openstack: Enforce `ForceNew` on Instance Block Device ([#6921](https://github.com/hashicorp/terraform/issues/6921)) - * provider/openstack: Can now stop instances before destroying them ([#7184](https://github.com/hashicorp/terraform/issues/7184)) - * provider/openstack: Disassociate LBaaS v1 Monitors from Pool Before Deletion ([#6997](https://github.com/hashicorp/terraform/issues/6997)) - * provider/powerdns: Add support for PowerDNS 4 API ([#7819](https://github.com/hashicorp/terraform/issues/7819)) - * provider/triton: add `triton_machine` `domain names` ([#7149](https://github.com/hashicorp/terraform/issues/7149)) - * provider/vsphere: Add support for `controller_type` to `vsphere_virtual_machine` ([#6785](https://github.com/hashicorp/terraform/issues/6785)) - * provider/vsphere: Fix bug with `vsphere_virtual_machine` wait for ip ([#6377](https://github.com/hashicorp/terraform/issues/6377)) - * provider/vsphere: Virtual machine update disk ([#6619](https://github.com/hashicorp/terraform/issues/6619)) - * provider/vsphere: `vsphere_virtual_machine` adding controller creation logic ([#6853](https://github.com/hashicorp/terraform/issues/6853)) - * provider/vsphere: `vsphere_virtual_machine` added support for `mac address` on `network_interface` ([#6966](https://github.com/hashicorp/terraform/issues/6966)) - * provider/vsphere: Enhanced `vsphere` logging capabilities ([#6893](https://github.com/hashicorp/terraform/issues/6893)) - * provider/vsphere: Add DiskEnableUUID option to `vsphere_virtual_machine` ([#7088](https://github.com/hashicorp/terraform/issues/7088)) - * provider/vsphere: Virtual Machine and File resources handle Read errors properley ([#7220](https://github.com/hashicorp/terraform/issues/7220)) - * provider/vsphere: set uuid as `vsphere_virtual_machine` output ([#4382](https://github.com/hashicorp/terraform/issues/4382)) - * provider/vsphere: Add support for `keep_on_remove` to `vsphere_virtual_machine` ([#7169](https://github.com/hashicorp/terraform/issues/7169)) - * provider/vsphere: Add support for additional `vsphere_virtial_machine` SCSI controller types ([#7525](https://github.com/hashicorp/terraform/issues/7525)) - * provisioner/file: File provisioners may now have file content set as an attribute ([#7561](https://github.com/hashicorp/terraform/issues/7561)) - -BUG FIXES: - - * core: Correct the previous fix for a bug causing "attribute not found" messages during destroy, as it was insufficient ([#6599](https://github.com/hashicorp/terraform/issues/6599)) - * core: Fix issue causing syntax errors interpolating count attribute when value passed between modules ([#6833](https://github.com/hashicorp/terraform/issues/6833)) - * core: Fix "diffs didn't match during apply" error for computed sets ([#7205](https://github.com/hashicorp/terraform/issues/7205)) - * core: Fix issue where `terraform init .` would truncate existing files ([#7273](https://github.com/hashicorp/terraform/issues/7273)) - * core: Don't compare diffs between maps with computed values ([#7249](https://github.com/hashicorp/terraform/issues/7249)) - * core: Don't copy existing files over themselves when fetching modules ([#7273](https://github.com/hashicorp/terraform/issues/7273)) - * core: Always increment the state serial number when upgrading the version ([#7402](https://github.com/hashicorp/terraform/issues/7402)) - * core: Fix a crash during eval when we're upgrading an empty state ([#7403](https://github.com/hashicorp/terraform/issues/7403)) - * core: Honor the `-state-out` flag when applying with a plan file ([#7443](https://github.com/hashicorp/terraform/issues/7443)) - * core: Fix a panic when a `terraform_remote_state` data source doesn't exist ([#7464](https://github.com/hashicorp/terraform/issues/7464)) - * core: Fix issue where `ignore_changes` caused incorrect diffs on dependent resources ([#7563](https://github.com/hashicorp/terraform/issues/7563)) - * provider/aws: Manual changes to `aws_codedeploy_deployment_group` resources are now detected ([#7530](https://github.com/hashicorp/terraform/issues/7530)) - * provider/aws: Changing keys in `aws_dynamodb_table` correctly force new resources ([#6829](https://github.com/hashicorp/terraform/issues/6829)) - * provider/aws: Fix a bug where CloudWatch alarms are created repeatedly if the user does not have permission to use the the DescribeAlarms operation ([#7227](https://github.com/hashicorp/terraform/issues/7227)) - * provider/aws: Fix crash in `aws_elasticache_parameter_group` occuring following edits in the console ([#6687](https://github.com/hashicorp/terraform/issues/6687)) - * provider/aws: Fix issue reattaching a VPN gateway to a VPC ([#6987](https://github.com/hashicorp/terraform/issues/6987)) - * provider/aws: Fix issue with Root Block Devices and encrypted flag in Launch Configurations ([#6512](https://github.com/hashicorp/terraform/issues/6512)) - * provider/aws: If more ENIs are attached to `aws_instance`, the one w/ DeviceIndex `0` is always used in context of `aws_instance` (previously unpredictable) ([#6761](https://github.com/hashicorp/terraform/issues/6761)) - * provider/aws: Increased lambda event mapping creation timeout ([#7657](https://github.com/hashicorp/terraform/issues/7657)) - * provider/aws: Handle spurious failures in resourceAwsSecurityGroupRuleRead ([#7377](https://github.com/hashicorp/terraform/issues/7377)) - * provider/aws: Make 'stage_name' required in api_gateway_deployment ([#6797](https://github.com/hashicorp/terraform/issues/6797)) - * provider/aws: Mark Lambda function as gone when it's gone ([#6924](https://github.com/hashicorp/terraform/issues/6924)) - * provider/aws: Trim trailing `.` from `name` in `aws_route53_record` resources to prevent spurious diffs ([#6592](https://github.com/hashicorp/terraform/issues/6592)) - * provider/aws: Update Lambda functions on name change ([#7081](https://github.com/hashicorp/terraform/issues/7081)) - * provider/aws: Updating state when `aws_sns_topic_subscription` is missing ([#6629](https://github.com/hashicorp/terraform/issues/6629)) - * provider/aws: `aws_codedeploy_deployment_group` panic when setting `on_premises_instance_tag_filter` ([#6617](https://github.com/hashicorp/terraform/issues/6617)) - * provider/aws: `aws_db_instance` now defaults `publicly_accessible` to false ([#7117](https://github.com/hashicorp/terraform/issues/7117)) - * provider/aws: `aws_opsworks_application.app_source` SSH key is write-only ([#6649](https://github.com/hashicorp/terraform/issues/6649)) - * provider/aws: fix Elastic Beanstalk `cname_prefix` continual plans ([#6653](https://github.com/hashicorp/terraform/issues/6653)) - * provider/aws: Bundle IOPs and Allocated Storage update for DB Instances ([#7203](https://github.com/hashicorp/terraform/issues/7203)) - * provider/aws: Fix case when instanceId is absent in network interfaces ([#6851](https://github.com/hashicorp/terraform/issues/6851)) - * provider/aws: fix aws_security_group_rule refresh ([#6730](https://github.com/hashicorp/terraform/issues/6730)) - * provider/aws: Fix issue with Elastic Beanstalk and invalid settings ([#7222](https://github.com/hashicorp/terraform/issues/7222)) - * provider/aws: Fix issue where aws_app_cookie_stickiness_policy fails on destroy if LoadBalancer doesn't exist ([#7166](https://github.com/hashicorp/terraform/issues/7166)) - * provider/aws: Stickiness Policy exists, but isn't assigned to the ELB ([#7188](https://github.com/hashicorp/terraform/issues/7188)) - * provider/aws: Fix issue with `manage_bundler` on `aws_opsworks_layers` ([#7219](https://github.com/hashicorp/terraform/issues/7219)) - * provider/aws: Set Elastic Beanstalk stack name back to state ([#7445](https://github.com/hashicorp/terraform/issues/7445)) - * provider/aws: Allow recreation of VPC Peering Connection when state is rejected ([#7466](https://github.com/hashicorp/terraform/issues/7466)) - * provider/aws: Remove EFS File System from State when NotFound ([#7437](https://github.com/hashicorp/terraform/issues/7437)) - * provider/aws: `aws_customer_gateway` refreshing from state on deleted state ([#7482](https://github.com/hashicorp/terraform/issues/7482)) - * provider/aws: Retry finding `aws_route` after creating it ([#7463](https://github.com/hashicorp/terraform/issues/7463)) - * provider/aws: Refresh CloudWatch Group from state on 404 ([#7576](https://github.com/hashicorp/terraform/issues/7576)) - * provider/aws: Adding in additional retry logic due to latency with delete of `db_option_group` ([#7312](https://github.com/hashicorp/terraform/issues/7312)) - * provider/aws: Safely get ELB values ([#7585](https://github.com/hashicorp/terraform/issues/7585)) - * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk ([#6491](https://github.com/hashicorp/terraform/issues/6491)) - * provider/aws: Bump rds_cluster timeout to 15 mins ([#7604](https://github.com/hashicorp/terraform/issues/7604)) - * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured ([#7669](https://github.com/hashicorp/terraform/issues/7669)) - * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` ([#7698](https://github.com/hashicorp/terraform/issues/7698)) - * provider/aws: Ignore IOPS on non io1 AWS root_block_device ([#7783](https://github.com/hashicorp/terraform/issues/7783)) - * provider/aws: Ignore missing ENI attachment when trying to detach ENI ([#7185](https://github.com/hashicorp/terraform/issues/7185)) - * provider/aws: Fix issue updating ElasticBeanstalk Environment templates ([#7811](https://github.com/hashicorp/terraform/issues/7811)) - * provider/aws: Restore Defaults to SQS Queues ([#7818](https://github.com/hashicorp/terraform/issues/7818)) - * provider/aws: Don't delete Lambda function from state on initial call of the Read func ([#7829](https://github.com/hashicorp/terraform/issues/7829)) - * provider/aws: `aws_vpn_gateway` should be removed from state when in deleted state ([#7861](https://github.com/hashicorp/terraform/issues/7861)) - * provider/aws: Fix aws_route53_record 0-2 migration ([#7907](https://github.com/hashicorp/terraform/issues/7907)) - * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` ([#6766](https://github.com/hashicorp/terraform/issues/6766)) - * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources ([#6790](https://github.com/hashicorp/terraform/issues/6790)) - * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` ([#6768](https://github.com/hashicorp/terraform/issues/6768)) - * provider/azurerm: Add support for storage container name validation ([#6852](https://github.com/hashicorp/terraform/issues/6852)) - * provider/azurerm: Remove storage containers and blobs when storage accounts are not found ([#6855](https://github.com/hashicorp/terraform/issues/6855)) - * provider/azurerm: `azurerm_virtual_machine` fix `additional_unattend_rm` Windows config option ([#7105](https://github.com/hashicorp/terraform/issues/7105)) - * provider/azurerm: Fix `azurerm_virtual_machine` windows_config ([#7123](https://github.com/hashicorp/terraform/issues/7123)) - * provider/azurerm: `azurerm_dns_cname_record` can create CNAME records again ([#7113](https://github.com/hashicorp/terraform/issues/7113)) - * provider/azurerm: `azurerm_network_security_group` now waits for the provisioning state of `ready` before proceeding ([#7307](https://github.com/hashicorp/terraform/issues/7307)) - * provider/azurerm: `computer_name` is now required for `azurerm_virtual_machine` resources ([#7308](https://github.com/hashicorp/terraform/issues/7308)) - * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion ([#7584](https://github.com/hashicorp/terraform/issues/7584)) - * provider/azurerm: catch `azurerm_template_deployment` erroring silently ([#7644](https://github.com/hashicorp/terraform/issues/7644)) - * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource ([#7646](https://github.com/hashicorp/terraform/issues/7646)) - * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names ([#7674](https://github.com/hashicorp/terraform/issues/7674)) - * provider/azurerm: `azurerm_virtual_machine` computer_name now Required ([#7308](https://github.com/hashicorp/terraform/issues/7308)) - * provider/azurerm: Change of `availability_set_id` on `azurerm_virtual_machine` should ForceNew ([#7650](https://github.com/hashicorp/terraform/issues/7650)) - * provider/azurerm: Wait for `azurerm_storage_account` to be available ([#7329](https://github.com/hashicorp/terraform/issues/7329)) - * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 ([#6969](https://github.com/hashicorp/terraform/issues/6969)) - * provider/cloudstack: Fix using `cloudstack_network_acl` within a project ([#6743](https://github.com/hashicorp/terraform/issues/6743)) - * provider/cloudstack: Fix refresing `cloudstack_network_acl_rule` when the associated ACL is deleted ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Fix refresing `cloudstack_port_forward` when the associated IP address is no longer associated ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/cloudstack: Fix creating `cloudstack_network` with offerings that do not support specifying IP ranges ([#7612](https://github.com/hashicorp/terraform/issues/7612)) - * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region ([#7044](https://github.com/hashicorp/terraform/issues/7044)) - * provider/digitalocean: Reassign Floating IP when droplet changes ([#7411](https://github.com/hashicorp/terraform/issues/7411)) - * provider/google: Fix a bug causing an error attempting to delete an already-deleted `google_compute_disk` ([#6689](https://github.com/hashicorp/terraform/issues/6689)) - * provider/mysql: Specifying empty provider credentials no longer causes a panic ([#7211](https://github.com/hashicorp/terraform/issues/7211)) - * provider/openstack: Reassociate Floating IP on network changes ([#6579](https://github.com/hashicorp/terraform/issues/6579)) - * provider/openstack: Ensure CIDRs Are Lower Case ([#6864](https://github.com/hashicorp/terraform/issues/6864)) - * provider/openstack: Rebuild Instances On Network Changes ([#6844](https://github.com/hashicorp/terraform/issues/6844)) - * provider/openstack: Firewall rules are applied in the correct order ([#7194](https://github.com/hashicorp/terraform/issues/7194)) - * provider/openstack: Fix Security Group EOF Error when Adding / Removing Multiple Groups ([#7468](https://github.com/hashicorp/terraform/issues/7468)) - * provider/openstack: Fixing boot volumes interfering with block storage volumes list ([#7649](https://github.com/hashicorp/terraform/issues/7649)) - * provider/vsphere: `gateway` and `ipv6_gateway` are now read from `vsphere_virtual_machine` resources ([#6522](https://github.com/hashicorp/terraform/issues/6522)) - * provider/vsphere: `ipv*_gateway` parameters won't force a new `vsphere_virtual_machine` ([#6635](https://github.com/hashicorp/terraform/issues/6635)) - * provider/vsphere: adding a `vsphere_virtual_machine` migration ([#7023](https://github.com/hashicorp/terraform/issues/7023)) - * provider/vsphere: Don't require vsphere debug paths to be set ([#7027](https://github.com/hashicorp/terraform/issues/7027)) - * provider/vsphere: Fix bug where `enable_disk_uuid` was not set on `vsphere_virtual_machine` resources ([#7275](https://github.com/hashicorp/terraform/issues/7275)) - * provider/vsphere: Make `vsphere_virtual_machine` `product_key` optional ([#7410](https://github.com/hashicorp/terraform/issues/7410)) - * provider/vsphere: Refreshing devices list after adding a disk or cdrom controller ([#7167](https://github.com/hashicorp/terraform/issues/7167)) - * provider/vsphere: `vsphere_virtual_machine` no longer has to be powered on to delete ([#7206](https://github.com/hashicorp/terraform/issues/7206)) - * provider/vSphere: Fixes the hasBootableVmdk flag when attaching multiple disks ([#7804](https://github.com/hashicorp/terraform/issues/7804)) - * provisioner/remote-exec: Properly seed random script paths so they are not deterministic across runs ([#7413](https://github.com/hashicorp/terraform/issues/7413)) - -## 0.6.16 (May 9, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_eip` field `private_ip` is now a computed value, and cannot be set in your configuration. - Use `associate_with_private_ip` instead. See ([#6521](https://github.com/hashicorp/terraform/issues/6521)) - -FEATURES: - - * **New provider:** `librato` ([#3371](https://github.com/hashicorp/terraform/issues/3371)) - * **New provider:** `softlayer` ([#4327](https://github.com/hashicorp/terraform/issues/4327)) - * **New resource:** `aws_api_gateway_account` ([#6321](https://github.com/hashicorp/terraform/issues/6321)) - * **New resource:** `aws_api_gateway_authorizer` ([#6320](https://github.com/hashicorp/terraform/issues/6320)) - * **New resource:** `aws_db_event_subscription` ([#6367](https://github.com/hashicorp/terraform/issues/6367)) - * **New resource:** `aws_db_option_group` ([#4401](https://github.com/hashicorp/terraform/issues/4401)) - * **New resource:** `aws_eip_association` ([#6552](https://github.com/hashicorp/terraform/issues/6552)) - * **New resource:** `openstack_networking_secgroup_rule_v2` ([#6410](https://github.com/hashicorp/terraform/issues/6410)) - * **New resource:** `openstack_networking_secgroup_v2` ([#6410](https://github.com/hashicorp/terraform/issues/6410)) - * **New resource:** `vsphere_file` ([#6401](https://github.com/hashicorp/terraform/issues/6401)) - -IMPROVEMENTS: - - * core: update HCL dependency to improve whitespace handling in `terraform fmt` ([#6347](https://github.com/hashicorp/terraform/issues/6347)) - * core: Add support for marking outputs as sensitive ([#6559](https://github.com/hashicorp/terraform/issues/6559)) - * provider/aws: Add agent_version argument to `aws_opswork_stack` ([#6493](https://github.com/hashicorp/terraform/issues/6493)) - * provider/aws: Add support for request parameters to `api_gateway_method` & `api_gateway_integration` ([#6501](https://github.com/hashicorp/terraform/issues/6501)) - * provider/aws: Add support for response parameters to `api_gateway_method_response` & `api_gateway_integration_response` ([#6344](https://github.com/hashicorp/terraform/issues/6344)) - * provider/aws: Allow empty S3 config in Cloudfront Origin ([#6487](https://github.com/hashicorp/terraform/issues/6487)) - * provider/aws: Improve error handling in IAM Server Certificates ([#6442](https://github.com/hashicorp/terraform/issues/6442)) - * provider/aws: Use `sts:GetCallerIdentity` as additional method for getting AWS account ID ([#6385](https://github.com/hashicorp/terraform/issues/6385)) - * provider/aws: `aws_redshift_cluster` `automated_snapshot_retention_period` didn't allow 0 value ([#6537](https://github.com/hashicorp/terraform/issues/6537)) - * provider/aws: Add CloudFront `hosted_zone_id` attribute ([#6530](https://github.com/hashicorp/terraform/issues/6530)) - * provider/azurerm: Increase timeout for ARM Template deployments to 40 minutes ([#6319](https://github.com/hashicorp/terraform/issues/6319)) - * provider/azurerm: Make `private_ip_address` an exported field on `azurerm_network_interface` ([#6538](https://github.com/hashicorp/terraform/issues/6538)) - * provider/azurerm: Add support for `tags` to `azurerm_virtual_machine` ([#6556](https://github.com/hashicorp/terraform/issues/6556)) - * provider/azurerm: Add `os_type` and `image_uri` in `azurerm_virtual_machine` ([#6553](https://github.com/hashicorp/terraform/issues/6553)) - * provider/cloudflare: Add proxied option to `cloudflare_record` ([#5508](https://github.com/hashicorp/terraform/issues/5508)) - * provider/docker: Add ability to keep docker image locally on terraform destroy ([#6376](https://github.com/hashicorp/terraform/issues/6376)) - * provider/fastly: Add S3 Log Streaming to Fastly Service ([#6378](https://github.com/hashicorp/terraform/issues/6378)) - * provider/fastly: Add Conditions to Fastly Service ([#6481](https://github.com/hashicorp/terraform/issues/6481)) - * provider/github: Add support for Github Enterprise via base_url configuration option ([#6434](https://github.com/hashicorp/terraform/issues/6434)) - * provider/triton: Add support for specifying network interfaces on `triton machine` resources ([#6418](https://github.com/hashicorp/terraform/issues/6418)) - * provider/triton: Deleted firewall rules no longer prevent refresh ([#6529](https://github.com/hashicorp/terraform/issues/6529)) - * provider/vsphere: Add `skip_customization` option to `vsphere_virtual_machine` resources ([#6355](https://github.com/hashicorp/terraform/issues/6355)) - * provider/vsphere: Add ability to specify and mount bootable vmdk in `vsphere_virtual_machine` ([#6146](https://github.com/hashicorp/terraform/issues/6146)) - * provider/vsphere: Add support for IPV6 to `vsphere_virtual_machine` ([#6457](https://github.com/hashicorp/terraform/issues/6457)) - * provider/vsphere: Add support for `memory_reservation` to `vsphere_virtual_machine` ([#6036](https://github.com/hashicorp/terraform/issues/6036)) - * provider/vsphere: Checking for empty diskPath in `vsphere_virtual_machine` before creating ([#6400](https://github.com/hashicorp/terraform/issues/6400)) - * provider/vsphere: Support updates to vcpu and memory on `vsphere_virtual_machine` ([#6356](https://github.com/hashicorp/terraform/issues/6356)) - * remote/s3: Logic for loading credentials now follows the same [conventions as AWS provider](https://www.terraform.io/docs/providers/aws/index.html#authentication) which means it also supports EC2 role auth and session token (e.g. assumed IAM Roles) ([#5270](https://github.com/hashicorp/terraform/issues/5270)) - -BUG FIXES: - - * core: Boolean values in diffs are normalized to `true` and `false`, eliminating some erroneous diffs ([#6499](https://github.com/hashicorp/terraform/issues/6499)) - * core: Fix a bug causing "attribute not found" messages during destroy ([#6557](https://github.com/hashicorp/terraform/issues/6557)) - * provider/aws: Allow account ID checks on EC2 instances & w/ federated accounts ([#5030](https://github.com/hashicorp/terraform/issues/5030)) - * provider/aws: Fix an eventually consistent issue aws_security_group_rule and possible duplications ([#6325](https://github.com/hashicorp/terraform/issues/6325)) - * provider/aws: Fix bug where `aws_elastic_beanstalk_environment` ignored `wait_for_ready_timeout` ([#6358](https://github.com/hashicorp/terraform/issues/6358)) - * provider/aws: Fix bug where `aws_elastic_beanstalk_environment` update config template didn't work ([#6342](https://github.com/hashicorp/terraform/issues/6342)) - * provider/aws: Fix issue in updating CloudFront distribution LoggingConfig ([#6407](https://github.com/hashicorp/terraform/issues/6407)) - * provider/aws: Fix issue in upgrading AutoScaling Policy to use `min_adjustment_magnitude` ([#6440](https://github.com/hashicorp/terraform/issues/6440)) - * provider/aws: Fix issue replacing Network ACL Relationship ([#6421](https://github.com/hashicorp/terraform/issues/6421)) - * provider/aws: Fix issue with KMS Alias keys and name prefixes ([#6328](https://github.com/hashicorp/terraform/issues/6328)) - * provider/aws: Fix issue with encrypted snapshots of block devices in `aws_launch_configuration` resources ([#6452](https://github.com/hashicorp/terraform/issues/6452)) - * provider/aws: Fix read of `aws_cloudwatch_log_group` after an update is applied ([#6384](https://github.com/hashicorp/terraform/issues/6384)) - * provider/aws: Fix updating `number_of_nodes` on `aws_redshift_cluster` ([#6333](https://github.com/hashicorp/terraform/issues/6333)) - * provider/aws: Omit `aws_cloudfront_distribution` custom_error fields when not explicitly set ([#6382](https://github.com/hashicorp/terraform/issues/6382)) - * provider/aws: Refresh state on `aws_sqs_queue` not found ([#6381](https://github.com/hashicorp/terraform/issues/6381)) - * provider/aws: Respect `selection_pattern` in `aws_api_gateway_integration_response` (previously ignored field) ([#5893](https://github.com/hashicorp/terraform/issues/5893)) - * provider/aws: `aws_cloudfront_distribution` resources now require the `cookies` argument ([#6505](https://github.com/hashicorp/terraform/issues/6505)) - * provider/aws: `aws_route` crash when used with `aws_vpc_endpoint` ([#6338](https://github.com/hashicorp/terraform/issues/6338)) - * provider/aws: validate `cluster_id` length for `aws_elasticache_cluster` ([#6330](https://github.com/hashicorp/terraform/issues/6330)) - * provider/azurerm: `ssh_keys` can now be set for `azurerm_virtual_machine` resources, allowing provisioning ([#6541](https://github.com/hashicorp/terraform/issues/6541)) - * provider/azurerm: Fix issue that updating `azurerm_virtual_machine` was failing due to empty adminPassword ([#6528](https://github.com/hashicorp/terraform/issues/6528)) - * provider/azurerm: `storage_data_disk` settings now work correctly on `azurerm_virtual_machine` resources ([#6543](https://github.com/hashicorp/terraform/issues/6543)) - * provider/cloudflare: can manage apex records ([#6449](https://github.com/hashicorp/terraform/issues/6449)) - * provider/cloudflare: won't refresh with incorrect record if names match ([#6449](https://github.com/hashicorp/terraform/issues/6449)) - * provider/datadog: `notify_no_data` and `no_data_timeframe` are set correctly for `datadog_monitor` resources ([#6509](https://github.com/hashicorp/terraform/issues/6509)) - * provider/docker: Fix crash when using empty string in the `command` list in `docker_container` resources ([#6424](https://github.com/hashicorp/terraform/issues/6424)) - * provider/vsphere: Memory reservations are now set correctly in `vsphere_virtual_machine` resources ([#6482](https://github.com/hashicorp/terraform/issues/6482)) - -## 0.6.15 (April 22, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - * `aws_instance` - if you still use `security_groups` field for SG IDs - i.e. inside VPC, this will generate diffs during `plan` and `apply` will **recreate** the resource. Terraform expects IDs (VPC SGs) inside `vpc_security_group_ids`. - -FEATURES: - - * **New command:** `terraform fmt` to automatically normalize config file style ([#4955](https://github.com/hashicorp/terraform/issues/4955)) - * **New interpolation function:** `jsonencode` ([#5890](https://github.com/hashicorp/terraform/issues/5890)) - * **New provider:** `cobbler` ([#5969](https://github.com/hashicorp/terraform/issues/5969)) - * **New provider:** `fastly` ([#5814](https://github.com/hashicorp/terraform/issues/5814)) - * **New resource:** `aws_cloudfront_distribution` ([#5221](https://github.com/hashicorp/terraform/issues/5221)) - * **New resource:** `aws_cloudfront_origin_access_identity` ([#5221](https://github.com/hashicorp/terraform/issues/5221)) - * **New resource:** `aws_iam_user_ssh_key` ([#5774](https://github.com/hashicorp/terraform/issues/5774)) - * **New resource:** `aws_s3_bucket_notification` ([#5473](https://github.com/hashicorp/terraform/issues/5473)) - * **New resource:** `cloudstack_static_nat` ([#6004](https://github.com/hashicorp/terraform/issues/6004)) - * **New resource:** `consul_key_prefix` ([#5988](https://github.com/hashicorp/terraform/issues/5988)) - * **New resource:** `aws_default_network_acl` ([#6165](https://github.com/hashicorp/terraform/issues/6165)) - * **New resource:** `triton_fabric` ([#5920](https://github.com/hashicorp/terraform/issues/5920)) - * **New resource:** `triton_vlan` ([#5920](https://github.com/hashicorp/terraform/issues/5920)) - * **New resource:** `aws_opsworks_application` ([#4419](https://github.com/hashicorp/terraform/issues/4419)) - * **New resource:** `aws_opsworks_instance` ([#4276](https://github.com/hashicorp/terraform/issues/4276)) - * **New resource:** `aws_cloudwatch_log_subscription_filter` ([#5996](https://github.com/hashicorp/terraform/issues/5996)) - * **New resource:** `openstack_networking_router_route_v2` ([#6207](https://github.com/hashicorp/terraform/issues/6207)) - -IMPROVEMENTS: - - * command/apply: Output will now show periodic status updates of slow resources. ([#6163](https://github.com/hashicorp/terraform/issues/6163)) - * core: Variables passed between modules are now type checked ([#6185](https://github.com/hashicorp/terraform/issues/6185)) - * core: Smaller release binaries by stripping debug information ([#6238](https://github.com/hashicorp/terraform/issues/6238)) - * provider/aws: Add support for Step Scaling in `aws_autoscaling_policy` ([#4277](https://github.com/hashicorp/terraform/issues/4277)) - * provider/aws: Add support for `cname_prefix` to `aws_elastic_beanstalk_environment` resource ([#5966](https://github.com/hashicorp/terraform/issues/5966)) - * provider/aws: Add support for trigger_configuration to `aws_codedeploy_deployment_group` ([#5599](https://github.com/hashicorp/terraform/issues/5599)) - * provider/aws: Adding outputs for elastic_beanstalk_environment resource ([#5915](https://github.com/hashicorp/terraform/issues/5915)) - * provider/aws: Adds `wait_for_ready_timeout` option to `aws_elastic_beanstalk_environment` ([#5967](https://github.com/hashicorp/terraform/issues/5967)) - * provider/aws: Allow `aws_db_subnet_group` description to be updated ([#5921](https://github.com/hashicorp/terraform/issues/5921)) - * provider/aws: Allow multiple EIPs to associate to single ENI ([#6070](https://github.com/hashicorp/terraform/issues/6070)) - * provider/aws: Change `aws_elb` access_logs to list type ([#5065](https://github.com/hashicorp/terraform/issues/5065)) - * provider/aws: Check that InternetGateway exists before returning from creation ([#6105](https://github.com/hashicorp/terraform/issues/6105)) - * provider/aws: Don't Base64-encode EC2 userdata if it is already Base64 encoded ([#6140](https://github.com/hashicorp/terraform/issues/6140)) - * provider/aws: Making the Cloudwatch Event Rule Target `target_id` optional ([#5787](https://github.com/hashicorp/terraform/issues/5787)) - * provider/aws: Timeouts for `elasticsearch_domain` are increased ([#5910](https://github.com/hashicorp/terraform/issues/5910)) - * provider/aws: `aws_codecommit_repository` set `default_branch` only if defined ([#5904](https://github.com/hashicorp/terraform/issues/5904)) - * provider/aws: `aws_redshift_cluster` allows usernames with underscore in it ([#5935](https://github.com/hashicorp/terraform/issues/5935)) - * provider/aws: normalise json for `aws_sns_topic` ([#6089](https://github.com/hashicorp/terraform/issues/6089)) - * provider/aws: normalize json for `aws_cloudwatch_event_rule` ([#6025](https://github.com/hashicorp/terraform/issues/6025)) - * provider/aws: increase timeout for aws_redshift_cluster ([#6305](https://github.com/hashicorp/terraform/issues/6305)) - * provider/aws: Opsworks layers now support `custom_json` argument ([#4272](https://github.com/hashicorp/terraform/issues/4272)) - * provider/aws: Added migration for `tier` attribute in `aws_elastic_beanstalk_environment` ([#6167](https://github.com/hashicorp/terraform/issues/6167)) - * provider/aws: Use resource.Retry for route creation and deletion ([#6225](https://github.com/hashicorp/terraform/issues/6225)) - * provider/aws: Add support S3 Bucket Lifecycle Rule ([#6220](https://github.com/hashicorp/terraform/issues/6220)) - * provider/clc: Override default `account` alias in provider config ([#5785](https://github.com/hashicorp/terraform/issues/5785)) - * provider/cloudstack: Deprecate `ipaddress` in favour of `ip_address` in all resources ([#6010](https://github.com/hashicorp/terraform/issues/6010)) - * provider/cloudstack: Deprecate allowing names (instead of IDs) for parameters that reference other resources ([#6123](https://github.com/hashicorp/terraform/issues/6123)) - * provider/datadog: Add heredoc support to message, escalation_message, and query ([#5788](https://github.com/hashicorp/terraform/issues/5788)) - * provider/docker: Add support for docker run --user option ([#5300](https://github.com/hashicorp/terraform/issues/5300)) - * provider/github: Add support for privacy to `github_team` ([#6116](https://github.com/hashicorp/terraform/issues/6116)) - * provider/google: Accept GOOGLE_CLOUD_KEYFILE_JSON env var for credentials ([#6007](https://github.com/hashicorp/terraform/issues/6007)) - * provider/google: Add "project" argument and attribute to all GCP compute resources which inherit from the provider's value ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/google: Make "project" attribute on provider configuration optional ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/google: Read more common configuration values from the environment and clarify precedence ordering ([#6114](https://github.com/hashicorp/terraform/issues/6114)) - * provider/google: `addons_config` and `subnetwork` added as attributes to `google_container_cluster` ([#5871](https://github.com/hashicorp/terraform/issues/5871)) - * provider/fastly: Add support for Request Headers ([#6197](https://github.com/hashicorp/terraform/issues/6197)) - * provider/fastly: Add support for Gzip rules ([#6247](https://github.com/hashicorp/terraform/issues/6247)) - * provider/openstack: Add value_specs argument and attribute for routers ([#4898](https://github.com/hashicorp/terraform/issues/4898)) - * provider/openstack: Allow subnets with no gateway ([#6060](https://github.com/hashicorp/terraform/issues/6060)) - * provider/openstack: Enable Token Authentication ([#6081](https://github.com/hashicorp/terraform/issues/6081)) - * provider/postgresql: New `ssl_mode` argument allowing different SSL usage tradeoffs ([#6008](https://github.com/hashicorp/terraform/issues/6008)) - * provider/vsphere: Support for linked clones and Windows-specific guest config options ([#6087](https://github.com/hashicorp/terraform/issues/6087)) - * provider/vsphere: Checking for Powered Off State before `vsphere_virtual_machine` deletion ([#6283](https://github.com/hashicorp/terraform/issues/6283)) - * provider/vsphere: Support mounting ISO images to virtual cdrom drives ([#4243](https://github.com/hashicorp/terraform/issues/4243)) - * provider/vsphere: Fix missing ssh connection info ([#4283](https://github.com/hashicorp/terraform/issues/4283)) - * provider/google: Deprecate unused "region" attribute in `global_forwarding_rule`; this attribute was never used anywhere in the computation of the resource ([#6112](https://github.com/hashicorp/terraform/issues/6112)) - * provider/cloudstack: Add group attribute to `cloudstack_instance` resource ([#6023](https://github.com/hashicorp/terraform/issues/6023)) - * provider/azurerm: Provider meaningful error message when credentials not correct ([#6290](https://github.com/hashicorp/terraform/issues/6290)) - * provider/cloudstack: Improve support for using projects ([#6282](https://github.com/hashicorp/terraform/issues/6282)) - -BUG FIXES: - - * core: Providers are now correctly inherited down a nested module tree ([#6186](https://github.com/hashicorp/terraform/issues/6186)) - * provider/aws: Convert protocols to standard format for Security Groups ([#5881](https://github.com/hashicorp/terraform/issues/5881)) - * provider/aws: Fix Lambda VPC integration (missing `vpc_id` field in schema) ([#6157](https://github.com/hashicorp/terraform/issues/6157)) - * provider/aws: Fix `aws_route panic` when destination CIDR block is nil ([#5781](https://github.com/hashicorp/terraform/issues/5781)) - * provider/aws: Fix issue re-creating deleted VPC peering connections ([#5959](https://github.com/hashicorp/terraform/issues/5959)) - * provider/aws: Fix issue with changing iops when also changing storage type to io1 on RDS ([#5676](https://github.com/hashicorp/terraform/issues/5676)) - * provider/aws: Fix issue with retrying deletion of Network ACLs ([#5954](https://github.com/hashicorp/terraform/issues/5954)) - * provider/aws: Fix potential crash when receiving malformed `aws_route` API responses ([#5867](https://github.com/hashicorp/terraform/issues/5867)) - * provider/aws: Guard against empty responses from Lambda Permissions ([#5838](https://github.com/hashicorp/terraform/issues/5838)) - * provider/aws: Normalize and compact SQS Redrive, Policy JSON ([#5888](https://github.com/hashicorp/terraform/issues/5888)) - * provider/aws: Fix issue updating ElasticBeanstalk Configuraiton Templates ([#6307](https://github.com/hashicorp/terraform/issues/6307)) - * provider/aws: Remove CloudTrail Trail from state if not found ([#6024](https://github.com/hashicorp/terraform/issues/6024)) - * provider/aws: Fix crash in AWS S3 Bucket when website index/error is empty ([#6269](https://github.com/hashicorp/terraform/issues/6269)) - * provider/aws: Report better error message in `aws_route53_record` when `set_identifier` is required ([#5777](https://github.com/hashicorp/terraform/issues/5777)) - * provider/aws: Show human-readable error message when failing to read an EBS volume ([#6038](https://github.com/hashicorp/terraform/issues/6038)) - * provider/aws: set ASG `health_check_grace_period` default to 300 ([#5830](https://github.com/hashicorp/terraform/issues/5830)) - * provider/aws: Fix issue with with Opsworks and empty Custom Cook Book sources ([#6078](https://github.com/hashicorp/terraform/issues/6078)) - * provider/aws: wait for IAM instance profile to propagate when creating Opsworks stacks ([#6049](https://github.com/hashicorp/terraform/issues/6049)) - * provider/aws: Don't read back `aws_opsworks_stack` cookbooks source password ([#6203](https://github.com/hashicorp/terraform/issues/6203)) - * provider/aws: Resolves DefaultOS and ConfigurationManager conflict on `aws_opsworks_stack` ([#6244](https://github.com/hashicorp/terraform/issues/6244)) - * provider/aws: Renaming `aws_elastic_beanstalk_configuration_template``option_settings` to `setting` ([#6043](https://github.com/hashicorp/terraform/issues/6043)) - * provider/aws: `aws_customer_gateway` will properly populate `bgp_asn` on refresh. [no issue] - * provider/aws: provider/aws: Refresh state on `aws_directory_service_directory` not found ([#6294](https://github.com/hashicorp/terraform/issues/6294)) - * provider/aws: `aws_elb` `cross_zone_load_balancing` is not refreshed in the state file ([#6295](https://github.com/hashicorp/terraform/issues/6295)) - * provider/aws: `aws_autoscaling_group` will properly populate `tag` on refresh. [no issue] - * provider/azurerm: Fix detection of `azurerm_storage_account` resources removed manually ([#5878](https://github.com/hashicorp/terraform/issues/5878)) - * provider/docker: Docker Image will be deleted on destroy ([#5801](https://github.com/hashicorp/terraform/issues/5801)) - * provider/openstack: Fix Disabling DHCP on Subnets ([#6052](https://github.com/hashicorp/terraform/issues/6052)) - * provider/openstack: Fix resizing when Flavor Name changes ([#6020](https://github.com/hashicorp/terraform/issues/6020)) - * provider/openstack: Fix Access Address Detection ([#6181](https://github.com/hashicorp/terraform/issues/6181)) - * provider/openstack: Fix admin_state_up on openstack_lb_member_v1 ([#6267](https://github.com/hashicorp/terraform/issues/6267)) - * provider/triton: Firewall status on `triton_machine` resources is reflected correctly ([#6119](https://github.com/hashicorp/terraform/issues/6119)) - * provider/triton: Fix time out when applying updates to Triton machine metadata ([#6149](https://github.com/hashicorp/terraform/issues/6149)) - * provider/vsphere: Add error handling to `vsphere_folder` ([#6095](https://github.com/hashicorp/terraform/issues/6095)) - * provider/cloudstack: Fix mashalling errors when using CloudStack 4.7.x (or newer) [GH-#226] - -## 0.6.14 (March 21, 2016) - -FEATURES: - - * **New provider:** `triton` - Manage Joyent Triton public cloud or on-premise installations ([#5738](https://github.com/hashicorp/terraform/issues/5738)) - * **New provider:** `clc` - Manage CenturyLink Cloud resources ([#4893](https://github.com/hashicorp/terraform/issues/4893)) - * **New provider:** `github` - Manage GitHub Organization permissions with Terraform config ([#5194](https://github.com/hashicorp/terraform/issues/5194)) - * **New provider:** `influxdb` - Manage InfluxDB databases ([#3478](https://github.com/hashicorp/terraform/issues/3478)) - * **New provider:** `ultradns` - Manage UltraDNS records ([#5716](https://github.com/hashicorp/terraform/issues/5716)) - * **New resource:** `aws_cloudwatch_log_metric_filter` ([#5444](https://github.com/hashicorp/terraform/issues/5444)) - * **New resource:** `azurerm_virtual_machine` ([#5514](https://github.com/hashicorp/terraform/issues/5514)) - * **New resource:** `azurerm_template_deployment` ([#5758](https://github.com/hashicorp/terraform/issues/5758)) - * **New interpolation function:** `uuid` ([#5575](https://github.com/hashicorp/terraform/issues/5575)) - -IMPROVEMENTS: - - * core: provisioners connecting via WinRM now respect HTTPS settings ([#5761](https://github.com/hashicorp/terraform/issues/5761)) - * provider/aws: `aws_db_instance` now makes `identifier` optional and generates a unique ID when it is omitted ([#5723](https://github.com/hashicorp/terraform/issues/5723)) - * provider/aws: `aws_redshift_cluster` now allows`publicly_accessible` to be modified ([#5721](https://github.com/hashicorp/terraform/issues/5721)) - * provider/aws: `aws_kms_alias` now allows name to be auto-generated with a `name_prefix` ([#5594](https://github.com/hashicorp/terraform/issues/5594)) - -BUG FIXES: - - * core: Color output is now shown correctly when running Terraform on Windows ([#5718](https://github.com/hashicorp/terraform/issues/5718)) - * core: HEREDOCs can now be indented in line with configuration using `<<-` and hanging indent is removed ([#5740](https://github.com/hashicorp/terraform/issues/5740)) - * core: Invalid HCL syntax of nested object blocks no longer causes a crash ([#5740](https://github.com/hashicorp/terraform/issues/5740)) - * core: Local directory-based modules now use junctions instead of symbolic links on Windows ([#5739](https://github.com/hashicorp/terraform/issues/5739)) - * core: Modules sourced from a Mercurial repository now work correctly on Windows ([#5739](https://github.com/hashicorp/terraform/issues/5739)) - * core: Address some issues with ignore_changes ([#5635](https://github.com/hashicorp/terraform/issues/5635)) - * core: Add a lock to fix an interpolation issue caught by the Go 1.6 concurrent map access detector ([#5772](https://github.com/hashicorp/terraform/issues/5772)) - * provider/aws: Fix crash when an `aws_rds_cluster_instance` is removed outside of Terraform ([#5717](https://github.com/hashicorp/terraform/issues/5717)) - * provider/aws: `aws_cloudformation_stack` use `timeout_in_minutes` for retry timeout to prevent unecessary timeouts ([#5712](https://github.com/hashicorp/terraform/issues/5712)) - * provider/aws: `aws_lambda_function` resources no longer error on refresh if deleted externally to Terraform ([#5668](https://github.com/hashicorp/terraform/issues/5668)) - * provider/aws: `aws_vpn_connection` resources deleted via the console on longer cause a crash ([#5747](https://github.com/hashicorp/terraform/issues/5747)) - * provider/aws: Fix crasher in Elastic Beanstalk Configuration when using options ([#5756](https://github.com/hashicorp/terraform/issues/5756)) - * provider/aws: Fix issue preventing `aws_opsworks_stck` from working with Windows set as the OS ([#5724](https://github.com/hashicorp/terraform/issues/5724)) - * provider/digitalocean: `digitalocean_ssh_key` resources no longer cause a panic if there is no network connectivity ([#5748](https://github.com/hashicorp/terraform/issues/5748)) - * provider/google: Default description `google_dns_managed_zone` resources to "Managed By Terraform" ([#5428](https://github.com/hashicorp/terraform/issues/5428)) - * provider/google: Fix error message on invalid instance URL for `google_compute_instance_group` ([#5715](https://github.com/hashicorp/terraform/issues/5715)) - * provider/vsphere: provide `host` to provisioner connections ([#5558](https://github.com/hashicorp/terraform/issues/5558)) - * provisioner/remote-exec: Address race condition introduced with script cleanup step introduced in 0.6.13 ([#5751](https://github.com/hashicorp/terraform/issues/5751)) - -## 0.6.13 (March 16, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * provider/aws: `aws_s3_bucket_object` field `etag` is now trimming off quotes (returns raw MD5 hash) ([#5305](https://github.com/hashicorp/terraform/issues/5305)) - * provider/aws: `aws_autoscaling_group` now supports metrics collection, so a diff installing the default value of `1Minute` for the `metrics_granularity` field is expected. This diff should resolve in the next `terraform apply` w/ no AWS API calls ([#4688](https://github.com/hashicorp/terraform/issues/4688)) - * provider/consul: `consul_keys` `key` blocks now respect `delete` flag for removing individual blocks. Previously keys would be deleted only when the entire resource was removed. - * provider/google: `next_hop_network` on `google_compute_route` is now read-only, to mirror the behavior in the official docs ([#5564](https://github.com/hashicorp/terraform/issues/5564)) - * state/remote/http: PUT requests for this backend will now have `Content-Type: application/json` instead of `application/octet-stream` ([#5499](https://github.com/hashicorp/terraform/issues/5499)) - -FEATURES: - - * **New command:** `terraform untaint` ([#5527](https://github.com/hashicorp/terraform/issues/5527)) - * **New resource:** `aws_api_gateway_api_key` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_deployment` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_integration_response` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_integration` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_method_response` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_method` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_model` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_resource` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_api_gateway_rest_api` ([#4295](https://github.com/hashicorp/terraform/issues/4295)) - * **New resource:** `aws_elastic_beanstalk_application` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_elastic_beanstalk_configuration_template` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_elastic_beanstalk_environment` ([#3157](https://github.com/hashicorp/terraform/issues/3157)) - * **New resource:** `aws_iam_account_password_policy` ([#5029](https://github.com/hashicorp/terraform/issues/5029)) - * **New resource:** `aws_kms_alias` ([#3928](https://github.com/hashicorp/terraform/issues/3928)) - * **New resource:** `aws_kms_key` ([#3928](https://github.com/hashicorp/terraform/issues/3928)) - * **New resource:** `google_compute_instance_group` ([#4087](https://github.com/hashicorp/terraform/issues/4087)) - -IMPROVEMENTS: - - * provider/aws: Add `repository_link` as a computed field for `aws_ecr_repository` ([#5524](https://github.com/hashicorp/terraform/issues/5524)) - * provider/aws: Add ability to update Route53 zone comments ([#5318](https://github.com/hashicorp/terraform/issues/5318)) - * provider/aws: Add support for Metrics Collection to `aws_autoscaling_group` ([#4688](https://github.com/hashicorp/terraform/issues/4688)) - * provider/aws: Add support for `description` to `aws_network_interface` ([#5523](https://github.com/hashicorp/terraform/issues/5523)) - * provider/aws: Add support for `storage_encrypted` to `aws_rds_cluster` ([#5520](https://github.com/hashicorp/terraform/issues/5520)) - * provider/aws: Add support for routing rules on `aws_s3_bucket` resources ([#5327](https://github.com/hashicorp/terraform/issues/5327)) - * provider/aws: Enable updates & versioning for `aws_s3_bucket_object` ([#5305](https://github.com/hashicorp/terraform/issues/5305)) - * provider/aws: Guard against Nil Reference in Redshift Endpoints ([#5593](https://github.com/hashicorp/terraform/issues/5593)) - * provider/aws: Lambda S3 object version defaults to `$LATEST` if unspecified ([#5370](https://github.com/hashicorp/terraform/issues/5370)) - * provider/aws: Retry DB Creation on IAM propigation error ([#5515](https://github.com/hashicorp/terraform/issues/5515)) - * provider/aws: Support KMS encryption of S3 objects ([#5453](https://github.com/hashicorp/terraform/issues/5453)) - * provider/aws: `aws_autoscaling_lifecycle_hook` now have `notification_target_arn` and `role_arn` as optional ([#5616](https://github.com/hashicorp/terraform/issues/5616)) - * provider/aws: `aws_ecs_service` validates number of `load_balancer`s before creation/updates ([#5605](https://github.com/hashicorp/terraform/issues/5605)) - * provider/aws: send Terraform version in User-Agent ([#5621](https://github.com/hashicorp/terraform/issues/5621)) - * provider/cloudflare: Change `cloudflare_record` type to ForceNew ([#5353](https://github.com/hashicorp/terraform/issues/5353)) - * provider/consul: `consul_keys` now detects drift and supports deletion of individual `key` blocks ([#5210](https://github.com/hashicorp/terraform/issues/5210)) - * provider/digitalocean: Guard against Nil reference in `digitalocean_droplet` ([#5588](https://github.com/hashicorp/terraform/issues/5588)) - * provider/docker: Add support for `unless-stopped` to docker container `restart_policy` ([#5337](https://github.com/hashicorp/terraform/issues/5337)) - * provider/google: Mark `next_hop_network` as read-only on `google_compute_route` ([#5564](https://github.com/hashicorp/terraform/issues/5564)) - * provider/google: Validate VPN tunnel peer_ip at plan time ([#5501](https://github.com/hashicorp/terraform/issues/5501)) - * provider/openstack: Add Support for Domain ID and Domain Name environment variables ([#5355](https://github.com/hashicorp/terraform/issues/5355)) - * provider/openstack: Add support for instances to have multiple ephemeral disks. ([#5131](https://github.com/hashicorp/terraform/issues/5131)) - * provider/openstack: Re-Add server.AccessIPv4 and server.AccessIPv6 ([#5366](https://github.com/hashicorp/terraform/issues/5366)) - * provider/vsphere: Add support for disk init types ([#4284](https://github.com/hashicorp/terraform/issues/4284)) - * provisioner/remote-exec: Clear out scripts after uploading ([#5577](https://github.com/hashicorp/terraform/issues/5577)) - * state/remote/http: Change content type of PUT requests to the more appropriate `application/json` ([#5499](https://github.com/hashicorp/terraform/issues/5499)) - -BUG FIXES: - - * core: Disallow negative indices in the element() interpolation function, preventing crash ([#5263](https://github.com/hashicorp/terraform/issues/5263)) - * core: Fix issue that caused tainted resource destroys to be improperly filtered out when using -target and a plan file ([#5516](https://github.com/hashicorp/terraform/issues/5516)) - * core: Fix several issues with retry logic causing spurious "timeout while waiting for state to become ..." errors and unnecessary retry loops ([#5460](https://github.com/hashicorp/terraform/issues/5460)), ([#5538](https://github.com/hashicorp/terraform/issues/5538)), ([#5543](https://github.com/hashicorp/terraform/issues/5543)), ([#5553](https://github.com/hashicorp/terraform/issues/5553)) - * core: Includes upstream HCL fix to properly detect unbalanced braces and throw an error ([#5400](https://github.com/hashicorp/terraform/issues/5400)) - * provider/aws: Allow recovering from failed CloudWatch Event Target creation ([#5395](https://github.com/hashicorp/terraform/issues/5395)) - * provider/aws: Fix EC2 Classic SG Rule issue when referencing rules by name ([#5533](https://github.com/hashicorp/terraform/issues/5533)) - * provider/aws: Fix `aws_cloudformation_stack` update for `parameters` & `capabilities` if unmodified ([#5603](https://github.com/hashicorp/terraform/issues/5603)) - * provider/aws: Fix a bug where AWS Kinesis Stream includes closed shards in the shard_count ([#5401](https://github.com/hashicorp/terraform/issues/5401)) - * provider/aws: Fix a bug where ElasticSearch Domain tags were not being set correctly ([#5361](https://github.com/hashicorp/terraform/issues/5361)) - * provider/aws: Fix a bug where `aws_route` would show continual changes in the plan when not computed ([#5321](https://github.com/hashicorp/terraform/issues/5321)) - * provider/aws: Fix a bug where `publicly_assessible` wasn't being set to state in `aws_db_instance` ([#5535](https://github.com/hashicorp/terraform/issues/5535)) - * provider/aws: Fix a bug where listener protocol on `aws_elb` resources was case insensitive ([#5376](https://github.com/hashicorp/terraform/issues/5376)) - * provider/aws: Fix a bug which caused panics creating rules on security groups in EC2 Classic ([#5329](https://github.com/hashicorp/terraform/issues/5329)) - * provider/aws: Fix crash when `aws_lambda_function` VpcId is nil ([#5182](https://github.com/hashicorp/terraform/issues/5182)) - * provider/aws: Fix error with parsing JSON in `aws_s3_bucket` policy attribute ([#5474](https://github.com/hashicorp/terraform/issues/5474)) - * provider/aws: `aws_lambda_function` can be properly updated, either via `s3_object_version` or via `filename` & `source_code_hash` as described in docs ([#5239](https://github.com/hashicorp/terraform/issues/5239)) - * provider/google: Fix managed instance group preemptible instance creation ([#4834](https://github.com/hashicorp/terraform/issues/4834)) - * provider/openstack: Account for a 403 reply when os-tenant-networks is disabled ([#5432](https://github.com/hashicorp/terraform/issues/5432)) - * provider/openstack: Fix crashing during certain network updates in instances ([#5365](https://github.com/hashicorp/terraform/issues/5365)) - * provider/openstack: Fix create/delete statuses in load balancing resources ([#5557](https://github.com/hashicorp/terraform/issues/5557)) - * provider/openstack: Fix race condition between instance deletion and volume detachment ([#5359](https://github.com/hashicorp/terraform/issues/5359)) - * provider/template: Warn when `template` attribute specified as path ([#5563](https://github.com/hashicorp/terraform/issues/5563)) - -INTERNAL IMPROVEMENTS: - - * helper/schema: `MaxItems` attribute on schema lists and sets ([#5218](https://github.com/hashicorp/terraform/issues/5218)) - -## 0.6.12 (February 24, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `publicly_accessible` attribute on `aws_redshift_cluster` resources now defaults to true - -FEATURES: - - * **New command:** `validate` to perform syntax validation ([#3783](https://github.com/hashicorp/terraform/issues/3783)) - * **New provider:** `datadog` ([#5251](https://github.com/hashicorp/terraform/issues/5251)) - * **New interpolation function:** `md5` ([#5267](https://github.com/hashicorp/terraform/issues/5267)) - * **New interpolation function:** `signum` ([#4854](https://github.com/hashicorp/terraform/issues/4854)) - * **New resource:** `aws_cloudwatch_event_rule` ([#4986](https://github.com/hashicorp/terraform/issues/4986)) - * **New resource:** `aws_cloudwatch_event_target` ([#4986](https://github.com/hashicorp/terraform/issues/4986)) - * **New resource:** `aws_lambda_permission` ([#4826](https://github.com/hashicorp/terraform/issues/4826)) - * **New resource:** `azurerm_dns_a_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_aaaa_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_cname_record` ([#5013](https://github.com/hashicorp/terraform/issues/5013)) - * **New resource:** `azurerm_dns_mx_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_ns_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_srv_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_txt_record` ([#5041](https://github.com/hashicorp/terraform/issues/5041)) - * **New resource:** `azurerm_dns_zone` ([#4979](https://github.com/hashicorp/terraform/issues/4979)) - * **New resource:** `azurerm_search_service` ([#5203](https://github.com/hashicorp/terraform/issues/5203)) - * **New resource:** `azurerm_sql_database` ([#5003](https://github.com/hashicorp/terraform/issues/5003)) - * **New resource:** `azurerm_sql_firewall_rule` ([#5057](https://github.com/hashicorp/terraform/issues/5057)) - * **New resource:** `azurerm_sql_server` ([#4991](https://github.com/hashicorp/terraform/issues/4991)) - * **New resource:** `google_compute_subnetwork` ([#5130](https://github.com/hashicorp/terraform/issues/5130)) - -IMPROVEMENTS: - - * core: Backend names are now down cased during `init` in the same manner as `remote config` ([#5012](https://github.com/hashicorp/terraform/issues/5012)) - * core: Upgrade resource name validation warning to an error as planned ([#5272](https://github.com/hashicorp/terraform/issues/5272)) - * core: output "diffs didn't match" error details ([#5276](https://github.com/hashicorp/terraform/issues/5276)) - * provider/aws: Add `is_multi_region_trail` option to CloudTrail ([#4939](https://github.com/hashicorp/terraform/issues/4939)) - * provider/aws: Add support for HTTP(S) endpoints that auto confirm SNS subscription ([#4711](https://github.com/hashicorp/terraform/issues/4711)) - * provider/aws: Add support for Tags to CloudTrail ([#5135](https://github.com/hashicorp/terraform/issues/5135)) - * provider/aws: Add support for Tags to ElasticSearch ([#4973](https://github.com/hashicorp/terraform/issues/4973)) - * provider/aws: Add support for deployment configuration to `aws_ecs_service` ([#5220](https://github.com/hashicorp/terraform/issues/5220)) - * provider/aws: Add support for log validation + KMS encryption to `aws_cloudtrail` ([#5051](https://github.com/hashicorp/terraform/issues/5051)) - * provider/aws: Allow name-prefix and auto-generated names for IAM Server Cert ([#5178](https://github.com/hashicorp/terraform/issues/5178)) - * provider/aws: Expose additional VPN Connection attributes ([#5032](https://github.com/hashicorp/terraform/issues/5032)) - * provider/aws: Return an error if no matching route is found for an AWS Route ([#5155](https://github.com/hashicorp/terraform/issues/5155)) - * provider/aws: Support custom endpoints for AWS EC2 ELB and IAM ([#5114](https://github.com/hashicorp/terraform/issues/5114)) - * provider/aws: The `cluster_type` on `aws_redshift_cluster` resources is now computed ([#5238](https://github.com/hashicorp/terraform/issues/5238)) - * provider/aws: `aws_lambda_function` resources now support VPC configuration ([#5149](https://github.com/hashicorp/terraform/issues/5149)) - * provider/aws: Add support for Enhanced Monitoring to RDS Instances ([#4945](https://github.com/hashicorp/terraform/issues/4945)) - * provider/aws: Improve vpc cidr_block err message ([#5255](https://github.com/hashicorp/terraform/issues/5255)) - * provider/aws: Implement Retention Period for `aws_kinesis_stream` ([#5223](https://github.com/hashicorp/terraform/issues/5223)) - * provider/aws: Enable `stream_arm` output for DynamoDB Table when streams are enabled ([#5271](https://github.com/hashicorp/terraform/issues/5271)) - * provider/digitalocean: `digitalocean_record` resources now export a computed `fqdn` attribute ([#5071](https://github.com/hashicorp/terraform/issues/5071)) - * provider/google: Add assigned IP Address to CloudSQL Instance `google_sql_database_instance` ([#5245](https://github.com/hashicorp/terraform/issues/5245)) - * provider/openstack: Add support for Distributed Routers ([#4878](https://github.com/hashicorp/terraform/issues/4878)) - * provider/openstack: Add support for optional cacert_file parameter ([#5106](https://github.com/hashicorp/terraform/issues/5106)) - -BUG FIXES: - - * core: Fix bug detecting deeply nested module orphans ([#5022](https://github.com/hashicorp/terraform/issues/5022)) - * core: Fix bug where `ignore_changes` could produce "diffs didn't match during apply" errors ([#4965](https://github.com/hashicorp/terraform/issues/4965)) - * core: Fix race condition when handling tainted resource destroys ([#5026](https://github.com/hashicorp/terraform/issues/5026)) - * core: Improve handling of Provisioners in the graph, fixing "Provisioner already initialized" errors ([#4877](https://github.com/hashicorp/terraform/issues/4877)) - * core: Skip `create_before_destroy` processing during a `terraform destroy`, solving several issues preventing `destroy` - from working properly with CBD resources ([#5096](https://github.com/hashicorp/terraform/issues/5096)) - * core: Error instead of panic on self var in wrong scope ([#5273](https://github.com/hashicorp/terraform/issues/5273)) - * provider/aws: Fix Copy of Tags to DB Instance when created from Snapshot ([#5197](https://github.com/hashicorp/terraform/issues/5197)) - * provider/aws: Fix DynamoDB Table Refresh to ensure deleted tables are removed from state ([#4943](https://github.com/hashicorp/terraform/issues/4943)) - * provider/aws: Fix ElasticSearch `domain_name` validation ([#4973](https://github.com/hashicorp/terraform/issues/4973)) - * provider/aws: Fix issue applying security group changes in EC2 Classic RDS for aws_db_instance ([#4969](https://github.com/hashicorp/terraform/issues/4969)) - * provider/aws: Fix reading auto scaling group availability zones ([#5044](https://github.com/hashicorp/terraform/issues/5044)) - * provider/aws: Fix reading auto scaling group load balancers ([#5045](https://github.com/hashicorp/terraform/issues/5045)) - * provider/aws: Fix `aws_redshift_cluster` to allow `publicly_accessible` to be false ([#5262](https://github.com/hashicorp/terraform/issues/5262)) - * provider/aws: Wait longer for internet gateways to detach ([#5120](https://github.com/hashicorp/terraform/issues/5120)) - * provider/aws: Fix issue reading auto scaling group termination policies ([#5101](https://github.com/hashicorp/terraform/issues/5101)) - * provider/cloudflare: `ttl` no longer shows a change on each plan on `cloudflare_record` resources ([#5042](https://github.com/hashicorp/terraform/issues/5042)) - * provider/docker: Fix the default docker_host value ([#5088](https://github.com/hashicorp/terraform/issues/5088)) - * provider/google: Fix backend service max_utilization attribute ([#5075](https://github.com/hashicorp/terraform/issues/5075)) - * provider/google: Fix reading of `google_compute_vpn_gateway` without an explicit ([#5125](https://github.com/hashicorp/terraform/issues/5125)) - * provider/google: Fix crash when setting `ack_deadline_seconds` on `google_pubsub_subscription` ([#5110](https://github.com/hashicorp/terraform/issues/5110)) - * provider/openstack: Fix crash when `access_network` was not defined in instances ([#4966](https://github.com/hashicorp/terraform/issues/4966)) - * provider/powerdns: Fix refresh of `powerdns_record` no longer fails if the record name contains a `-` ([#5228](https://github.com/hashicorp/terraform/issues/5228)) - * provider/vcd: Wait for DHCP assignment when creating `vcd_vapp` resources with no static IP assignment ([#5195](https://github.com/hashicorp/terraform/issues/5195)) - -## 0.6.11 (February 1, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `max_size`, `min_size` and `desired_capacity` attributes on `aws_autoscaling_schedule` resources now default to 0 - -FEATURES: - - * **New provider: `powerdns` - PowerDNS REST API** ([#4885](https://github.com/hashicorp/terraform/issues/4885)) - * **New builtin function:** `trimspace` for trimming whitespaces ([#4910](https://github.com/hashicorp/terraform/issues/4910)) - * **New builtin function:** `base64sha256` for base64 encoding raw sha256 sum of a given string ([#4899](https://github.com/hashicorp/terraform/issues/4899)) - * **New resource:** `openstack_lb_member_v1` ([#4359](https://github.com/hashicorp/terraform/issues/4359)) - -IMPROVEMENTS: - - * provider/template: Remove unnecessary mime-type validation from `template_cloudinit_config` resources ([#4873](https://github.com/hashicorp/terraform/issues/4873)) - * provider/template: Correct spelling of "Boundary" in the part separator of rendered `template_cloudinit_config` resources ([#4873](https://github.com/hashicorp/terraform/issues/4873)) - * provider/aws: Provide a better message if no AWS creds are found ([#4869](https://github.com/hashicorp/terraform/issues/4869)) - * provider/openstack: Ability to specify per-network Floating IPs ([#4812](https://github.com/hashicorp/terraform/issues/4812)) - -BUG FIXES: - - * provider/aws: `aws_autoscale_schedule` 0 values ([#4693](https://github.com/hashicorp/terraform/issues/4693)) - * provider/aws: Fix regression with VPCs and ClassicLink for regions that do not support it ([#4879](https://github.com/hashicorp/terraform/issues/4879)) - * provider/aws: Change VPC ClassicLink to be computed ([#4933](https://github.com/hashicorp/terraform/issues/4933)) - * provider/aws: Fix SNS Topic Refresh to ensure deleted topics are removed from state ([#4891](https://github.com/hashicorp/terraform/issues/4891)) - * provider/aws: Refactor Route53 record to fix regression in deleting records created in previous versions of Terraform ([#4892](https://github.com/hashicorp/terraform/issues/4892)) - * provider/azurerm: Fix panic if no creds supplied ([#4902](https://github.com/hashicorp/terraform/issues/4902)) - * provider/openstack: Changing the port resource to mark the ip_address as optional ([#4850](https://github.com/hashicorp/terraform/issues/4850)) - * provider/docker: Catch potential custom network errors in docker ([#4918](https://github.com/hashicorp/terraform/issues/4918)) - - - -## 0.6.10 (January 27, 2016) - -BACKWARDS INCOMPATIBILITIES / NOTES: - - * The `-module-depth` flag available on `plan`, `apply`, `show`, and `graph` now defaults to `-1`, causing - resources within modules to be expanded in command output. This is only a cosmetic change; it does not affect - any behavior. - * This release includes a bugfix for `$${}` interpolation escaping. These strings are now properly converted to `${}` - during interpolation. This may cause diffs on existing configurations in certain cases. - * Users of `consul_keys` should note that the `value` sub-attribute of `key` will no longer be updated with the remote value of the key. It should be only used to _set_ a key in Consul K/V. To reference key values, use the `var` attribute. - * The 0.6.9 release contained a regression in `aws_autoscaling_group` capacity waiting behavior for configs where `min_elb_capacity != desired_capacity` or `min_size != desired_capacity`. This release remedies that regression by un-deprecating `min_elb_capacity` and restoring the prior behavior. - * Users of `aws_security_group` may notice new diffs in initial plans with 0.6.10 due to a bugfix that fixes drift detection on nested security group rules. These new diffs should reflect the actual state of the resources, which Terraform previously was unable to see. - - -FEATURES: - - * **New resource: `aws_lambda_alias`** ([#4664](https://github.com/hashicorp/terraform/issues/4664)) - * **New resource: `aws_redshift_cluster`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_parameter_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_security_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `aws_redshift_subnet_group`** ([#3862](https://github.com/hashicorp/terraform/issues/3862)) - * **New resource: `azurerm_cdn_endpoint`** ([#4759](https://github.com/hashicorp/terraform/issues/4759)) - * **New resource: `azurerm_cdn_profile`** ([#4740](https://github.com/hashicorp/terraform/issues/4740)) - * **New resource: `azurerm_network_interface`** ([#4598](https://github.com/hashicorp/terraform/issues/4598)) - * **New resource: `azurerm_network_security_rule`** ([#4586](https://github.com/hashicorp/terraform/issues/4586)) - * **New resource: `azurerm_route_table`** ([#4602](https://github.com/hashicorp/terraform/issues/4602)) - * **New resource: `azurerm_route`** ([#4604](https://github.com/hashicorp/terraform/issues/4604)) - * **New resource: `azurerm_storage_account`** ([#4698](https://github.com/hashicorp/terraform/issues/4698)) - * **New resource: `azurerm_storage_blob`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_storage_container`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_storage_queue`** ([#4862](https://github.com/hashicorp/terraform/issues/4862)) - * **New resource: `azurerm_subnet`** ([#4595](https://github.com/hashicorp/terraform/issues/4595)) - * **New resource: `docker_network`** ([#4483](https://github.com/hashicorp/terraform/issues/4483)) - * **New resource: `docker_volume`** ([#4483](https://github.com/hashicorp/terraform/issues/4483)) - * **New resource: `google_sql_user`** ([#4669](https://github.com/hashicorp/terraform/issues/4669)) - -IMPROVEMENTS: - - * core: Add `sha256()` interpolation function ([#4704](https://github.com/hashicorp/terraform/issues/4704)) - * core: Validate lifecycle keys to show helpful error messages whe they are mistypes ([#4745](https://github.com/hashicorp/terraform/issues/4745)) - * core: Default `module-depth` parameter to `-1`, which expands resources within modules in command output ([#4763](https://github.com/hashicorp/terraform/issues/4763)) - * core: Variable types may now be specified explicitly using the `type` argument ([#4795](https://github.com/hashicorp/terraform/issues/4795)) - * provider/aws: Add new parameters `az_mode` and `availability_zone(s)` in ElastiCache ([#4631](https://github.com/hashicorp/terraform/issues/4631)) - * provider/aws: Allow ap-northeast-2 (Seoul) as valid region ([#4637](https://github.com/hashicorp/terraform/issues/4637)) - * provider/aws: Limit SNS Topic Subscription protocols ([#4639](https://github.com/hashicorp/terraform/issues/4639)) - * provider/aws: Add support for configuring logging on `aws_s3_bucket` resources ([#4482](https://github.com/hashicorp/terraform/issues/4482)) - * provider/aws: Add AWS Classiclink for AWS VPC resource ([#3994](https://github.com/hashicorp/terraform/issues/3994)) - * provider/aws: Supporting New AWS Route53 HealthCheck additions ([#4564](https://github.com/hashicorp/terraform/issues/4564)) - * provider/aws: Store instance state ([#3261](https://github.com/hashicorp/terraform/issues/3261)) - * provider/aws: Add support for updating ELB availability zones and subnets ([#4597](https://github.com/hashicorp/terraform/issues/4597)) - * provider/aws: Enable specifying aws s3 redirect protocol ([#4098](https://github.com/hashicorp/terraform/issues/4098)) - * provider/aws: Added support for `encrypted` on `ebs_block_devices` in Launch Configurations ([#4481](https://github.com/hashicorp/terraform/issues/4481)) - * provider/aws: Retry Listener Creation for ELBs ([#4825](https://github.com/hashicorp/terraform/issues/4825)) - * provider/aws: Add support for creating Managed Microsoft Active Directory - and Directory Connectors ([#4388](https://github.com/hashicorp/terraform/issues/4388)) - * provider/aws: Mark some `aws_db_instance` fields as optional ([#3138](https://github.com/hashicorp/terraform/issues/3138)) - * provider/digitalocean: Add support for reassigning `digitalocean_floating_ip` resources ([#4476](https://github.com/hashicorp/terraform/issues/4476)) - * provider/dme: Add support for Global Traffic Director locations on `dme_record` resources ([#4305](https://github.com/hashicorp/terraform/issues/4305)) - * provider/docker: Add support for adding host entries on `docker_container` resources ([#3463](https://github.com/hashicorp/terraform/issues/3463)) - * provider/docker: Add support for mounting named volumes on `docker_container` resources ([#4480](https://github.com/hashicorp/terraform/issues/4480)) - * provider/google: Add content field to bucket object ([#3893](https://github.com/hashicorp/terraform/issues/3893)) - * provider/google: Add support for `named_port` blocks on `google_compute_instance_group_manager` resources ([#4605](https://github.com/hashicorp/terraform/issues/4605)) - * provider/openstack: Add "personality" support to instance resource ([#4623](https://github.com/hashicorp/terraform/issues/4623)) - * provider/packet: Handle external state changes for Packet resources gracefully ([#4676](https://github.com/hashicorp/terraform/issues/4676)) - * provider/tls: `tls_private_key` now exports attributes with public key in both PEM and OpenSSH format ([#4606](https://github.com/hashicorp/terraform/issues/4606)) - * provider/vdc: Add `allow_unverified_ssl` for connections to vCloud API ([#4811](https://github.com/hashicorp/terraform/issues/4811)) - * state/remote: Allow KMS Key Encryption to be used with S3 backend ([#2903](https://github.com/hashicorp/terraform/issues/2903)) - -BUG FIXES: - - * core: Fix handling of literals with escaped interpolations `$${var}` ([#4747](https://github.com/hashicorp/terraform/issues/4747)) - * core: Fix diff mismatch when RequiresNew field and list both change ([#4749](https://github.com/hashicorp/terraform/issues/4749)) - * core: Respect module target path argument on `terraform init` ([#4753](https://github.com/hashicorp/terraform/issues/4753)) - * core: Write planfile even on empty plans ([#4766](https://github.com/hashicorp/terraform/issues/4766)) - * core: Add validation error when output is missing value field ([#4762](https://github.com/hashicorp/terraform/issues/4762)) - * core: Fix improper handling of orphan resources when targeting ([#4574](https://github.com/hashicorp/terraform/issues/4574)) - * core: Properly handle references to computed set attributes ([#4840](https://github.com/hashicorp/terraform/issues/4840)) - * config: Detect a specific JSON edge case and show a helpful workaround ([#4746](https://github.com/hashicorp/terraform/issues/4746)) - * provider/openstack: Ensure valid Security Group Rule attribute combination ([#4466](https://github.com/hashicorp/terraform/issues/4466)) - * provider/openstack: Don't put fixed_ip in port creation request if not defined ([#4617](https://github.com/hashicorp/terraform/issues/4617)) - * provider/google: Clarify SQL Database Instance recent name restriction ([#4577](https://github.com/hashicorp/terraform/issues/4577)) - * provider/google: Split Instance network interface into two fields ([#4265](https://github.com/hashicorp/terraform/issues/4265)) - * provider/aws: Error with empty list item on security group ([#4140](https://github.com/hashicorp/terraform/issues/4140)) - * provider/aws: Fix issue with detecting drift in AWS Security Groups rules ([#4779](https://github.com/hashicorp/terraform/issues/4779)) - * provider/aws: Trap Instance error from mismatched SG IDs and Names ([#4240](https://github.com/hashicorp/terraform/issues/4240)) - * provider/aws: EBS optimised to force new resource in AWS Instance ([#4627](https://github.com/hashicorp/terraform/issues/4627)) - * provider/aws: Wait for NACL rule to be visible ([#4734](https://github.com/hashicorp/terraform/issues/4734)) - * provider/aws: `default_result` on `aws_autoscaling_lifecycle_hook` resources is now computed ([#4695](https://github.com/hashicorp/terraform/issues/4695)) - * provider/aws: fix ASG capacity waiting regression by un-deprecating `min_elb_capacity` ([#4864](https://github.com/hashicorp/terraform/issues/4864)) - * provider/consul: fix several bugs surrounding update behavior ([#4787](https://github.com/hashicorp/terraform/issues/4787)) - * provider/mailgun: Handle the fact that the domain destroy API is eventually consistent ([#4777](https://github.com/hashicorp/terraform/issues/4777)) - * provider/template: Fix race causing sporadic crashes in template_file with count > 1 ([#4694](https://github.com/hashicorp/terraform/issues/4694)) - * provider/template: Add support for updating `template_cloudinit_config` resources ([#4757](https://github.com/hashicorp/terraform/issues/4757)) - * provisioner/chef: Add ENV['no_proxy'] to chef provisioner if no_proxy is detected ([#4661](https://github.com/hashicorp/terraform/issues/4661)) - -## 0.6.9 (January 8, 2016) - -FEATURES: - - * **New provider: `vcd` - VMware vCloud Director** ([#3785](https://github.com/hashicorp/terraform/issues/3785)) - * **New provider: `postgresql` - Create PostgreSQL databases and roles** ([#3653](https://github.com/hashicorp/terraform/issues/3653)) - * **New provider: `chef` - Create chef environments, roles, etc** ([#3084](https://github.com/hashicorp/terraform/issues/3084)) - * **New provider: `azurerm` - Preliminary support for Azure Resource Manager** ([#4226](https://github.com/hashicorp/terraform/issues/4226)) - * **New provider: `mysql` - Create MySQL databases** ([#3122](https://github.com/hashicorp/terraform/issues/3122)) - * **New resource: `aws_autoscaling_schedule`** ([#4256](https://github.com/hashicorp/terraform/issues/4256)) - * **New resource: `aws_nat_gateway`** ([#4381](https://github.com/hashicorp/terraform/issues/4381)) - * **New resource: `aws_network_acl_rule`** ([#4286](https://github.com/hashicorp/terraform/issues/4286)) - * **New resources: `aws_ecr_repository` and `aws_ecr_repository_policy`** ([#4415](https://github.com/hashicorp/terraform/issues/4415)) - * **New resource: `google_pubsub_topic`** ([#3671](https://github.com/hashicorp/terraform/issues/3671)) - * **New resource: `google_pubsub_subscription`** ([#3671](https://github.com/hashicorp/terraform/issues/3671)) - * **New resource: `template_cloudinit_config`** ([#4095](https://github.com/hashicorp/terraform/issues/4095)) - * **New resource: `tls_locally_signed_cert`** ([#3930](https://github.com/hashicorp/terraform/issues/3930)) - * **New remote state backend: `artifactory`** ([#3684](https://github.com/hashicorp/terraform/issues/3684)) - -IMPROVEMENTS: - - * core: Change set internals for performance improvements ([#3992](https://github.com/hashicorp/terraform/issues/3992)) - * core: Support HTTP basic auth in consul remote state ([#4166](https://github.com/hashicorp/terraform/issues/4166)) - * core: Improve error message on resource arity mismatch ([#4244](https://github.com/hashicorp/terraform/issues/4244)) - * core: Add support for unary operators + and - to the interpolation syntax ([#3621](https://github.com/hashicorp/terraform/issues/3621)) - * core: Add SSH agent support for Windows ([#4323](https://github.com/hashicorp/terraform/issues/4323)) - * core: Add `sha1()` interpolation function ([#4450](https://github.com/hashicorp/terraform/issues/4450)) - * provider/aws: Add `placement_group` as an option for `aws_autoscaling_group` ([#3704](https://github.com/hashicorp/terraform/issues/3704)) - * provider/aws: Add support for DynamoDB Table StreamSpecifications ([#4208](https://github.com/hashicorp/terraform/issues/4208)) - * provider/aws: Add `name_prefix` to Security Groups ([#4167](https://github.com/hashicorp/terraform/issues/4167)) - * provider/aws: Add support for removing nodes to `aws_elasticache_cluster` ([#3809](https://github.com/hashicorp/terraform/issues/3809)) - * provider/aws: Add support for `skip_final_snapshot` to `aws_db_instance` ([#3853](https://github.com/hashicorp/terraform/issues/3853)) - * provider/aws: Adding support for Tags to DB SecurityGroup ([#4260](https://github.com/hashicorp/terraform/issues/4260)) - * provider/aws: Adding Tag support for DB Param Groups ([#4259](https://github.com/hashicorp/terraform/issues/4259)) - * provider/aws: Fix issue with updated route ids for VPC Endpoints ([#4264](https://github.com/hashicorp/terraform/issues/4264)) - * provider/aws: Added measure_latency option to Route 53 Health Check resource ([#3688](https://github.com/hashicorp/terraform/issues/3688)) - * provider/aws: Validate IOPs for EBS Volumes ([#4146](https://github.com/hashicorp/terraform/issues/4146)) - * provider/aws: DB Subnet group arn output ([#4261](https://github.com/hashicorp/terraform/issues/4261)) - * provider/aws: Get full Kinesis streams view with pagination ([#4368](https://github.com/hashicorp/terraform/issues/4368)) - * provider/aws: Allow changing private IPs for ENIs ([#4307](https://github.com/hashicorp/terraform/issues/4307)) - * provider/aws: Retry MalformedPolicy errors due to newly created principals in S3 Buckets ([#4315](https://github.com/hashicorp/terraform/issues/4315)) - * provider/aws: Validate `name` on `db_subnet_group` against AWS requirements ([#4340](https://github.com/hashicorp/terraform/issues/4340)) - * provider/aws: wait for ASG capacity on update ([#3947](https://github.com/hashicorp/terraform/issues/3947)) - * provider/aws: Add validation for ECR repository name ([#4431](https://github.com/hashicorp/terraform/issues/4431)) - * provider/cloudstack: performance improvements ([#4150](https://github.com/hashicorp/terraform/issues/4150)) - * provider/docker: Add support for setting the entry point on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting the restart policy on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting memory, swap and CPU shares on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting labels on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for setting log driver and options on `docker_container` resources ([#3761](https://github.com/hashicorp/terraform/issues/3761)) - * provider/docker: Add support for settings network mode on `docker_container` resources ([#4475](https://github.com/hashicorp/terraform/issues/4475)) - * provider/heroku: Improve handling of Applications within an Organization ([#4495](https://github.com/hashicorp/terraform/issues/4495)) - * provider/vsphere: Add support for custom vm params on `vsphere_virtual_machine` ([#3867](https://github.com/hashicorp/terraform/issues/3867)) - * provider/vsphere: Rename vcenter_server config parameter to something clearer ([#3718](https://github.com/hashicorp/terraform/issues/3718)) - * provider/vsphere: Make allow_unverified_ssl a configuable on the provider ([#3933](https://github.com/hashicorp/terraform/issues/3933)) - * provider/vsphere: Add folder handling for folder-qualified vm names ([#3939](https://github.com/hashicorp/terraform/issues/3939)) - * provider/vsphere: Change ip_address parameter for ipv6 support ([#4035](https://github.com/hashicorp/terraform/issues/4035)) - * provider/openstack: Increase instance timeout from 10 to 30 minutes ([#4223](https://github.com/hashicorp/terraform/issues/4223)) - * provider/google: Add `restart_policy` attribute to `google_managed_instance_group` ([#3892](https://github.com/hashicorp/terraform/issues/3892)) - -BUG FIXES: - - * core: skip provider input for deprecated fields ([#4193](https://github.com/hashicorp/terraform/issues/4193)) - * core: Fix issue which could cause fields that become empty to retain old values in the state ([#3257](https://github.com/hashicorp/terraform/issues/3257)) - * provider/docker: Fix an issue running with Docker Swarm by looking up containers by ID instead of name ([#4148](https://github.com/hashicorp/terraform/issues/4148)) - * provider/openstack: Better handling of load balancing resource state changes ([#3926](https://github.com/hashicorp/terraform/issues/3926)) - * provider/aws: Treat `INACTIVE` ECS cluster as deleted ([#4364](https://github.com/hashicorp/terraform/issues/4364)) - * provider/aws: Skip `source_security_group_id` determination logic for Classic ELBs ([#4075](https://github.com/hashicorp/terraform/issues/4075)) - * provider/aws: Fix issue destroy Route 53 zone/record if it no longer exists ([#4198](https://github.com/hashicorp/terraform/issues/4198)) - * provider/aws: Fix issue force destroying a versioned S3 bucket ([#4168](https://github.com/hashicorp/terraform/issues/4168)) - * provider/aws: Update DB Replica to honor storage type ([#4155](https://github.com/hashicorp/terraform/issues/4155)) - * provider/aws: Fix issue creating AWS RDS replicas across regions ([#4215](https://github.com/hashicorp/terraform/issues/4215)) - * provider/aws: Fix issue with Route53 and zero weighted records ([#4427](https://github.com/hashicorp/terraform/issues/4427)) - * provider/aws: Fix issue with iam_profile in aws_instance when a path is specified ([#3663](https://github.com/hashicorp/terraform/issues/3663)) - * provider/aws: Refactor AWS Authentication chain to fix issue with authentication and IAM ([#4254](https://github.com/hashicorp/terraform/issues/4254)) - * provider/aws: Fix issue with finding S3 Hosted Zone ID for eu-central-1 region ([#4236](https://github.com/hashicorp/terraform/issues/4236)) - * provider/aws: Fix missing AMI issue with Launch Configurations ([#4242](https://github.com/hashicorp/terraform/issues/4242)) - * provider/aws: Opsworks stack SSH key is write-only ([#4241](https://github.com/hashicorp/terraform/issues/4241)) - * provider/aws: Update VPC Endpoint to correctly set route table ids ([#4392](https://github.com/hashicorp/terraform/issues/4392)) - * provider/aws: Fix issue with ElasticSearch Domain `access_policies` always appear changed ([#4245](https://github.com/hashicorp/terraform/issues/4245)) - * provider/aws: Fix issue with nil parameter group value causing panic in `aws_db_parameter_group` ([#4318](https://github.com/hashicorp/terraform/issues/4318)) - * provider/aws: Fix issue with Elastic IPs not recognizing when they have been unassigned manually ([#4387](https://github.com/hashicorp/terraform/issues/4387)) - * provider/aws: Use body or URL for all CloudFormation stack updates ([#4370](https://github.com/hashicorp/terraform/issues/4370)) - * provider/aws: Fix template_url/template_body conflict ([#4540](https://github.com/hashicorp/terraform/issues/4540)) - * provider/aws: Fix bug w/ changing ECS svc/ELB association ([#4366](https://github.com/hashicorp/terraform/issues/4366)) - * provider/aws: Fix RDS unexpected state config ([#4490](https://github.com/hashicorp/terraform/issues/4490)) - * provider/digitalocean: Fix issue where a floating IP attached to a missing droplet causes a panic ([#4214](https://github.com/hashicorp/terraform/issues/4214)) - * provider/google: Fix project metadata sshKeys from showing up and causing unnecessary diffs ([#4512](https://github.com/hashicorp/terraform/issues/4512)) - * provider/heroku: Retry drain create until log channel is assigned ([#4823](https://github.com/hashicorp/terraform/issues/4823)) - * provider/openstack: Handle volumes in "deleting" state ([#4204](https://github.com/hashicorp/terraform/issues/4204)) - * provider/rundeck: Tolerate Rundeck server not returning project name when reading a job ([#4301](https://github.com/hashicorp/terraform/issues/4301)) - * provider/vsphere: Create and attach additional disks before bootup ([#4196](https://github.com/hashicorp/terraform/issues/4196)) - * provider/openstack: Convert block_device from a Set to a List ([#4288](https://github.com/hashicorp/terraform/issues/4288)) - * provider/google: Terraform identifies deleted resources and handles them appropriately on Read ([#3913](https://github.com/hashicorp/terraform/issues/3913)) - -## 0.6.8 (December 2, 2015) - -FEATURES: - - * **New provider: `statuscake`** ([#3340](https://github.com/hashicorp/terraform/issues/3340)) - * **New resource: `digitalocean_floating_ip`** ([#3748](https://github.com/hashicorp/terraform/issues/3748)) - * **New resource: `aws_lambda_event_source_mapping`** ([#4093](https://github.com/hashicorp/terraform/issues/4093)) - -IMPROVEMENTS: - - * provider/cloudstack: Reduce the number of network calls required for common operations ([#4051](https://github.com/hashicorp/terraform/issues/4051)) - * provider/aws: Make `publically_accessible` on an `aws_db_instance` update existing instances instead of forcing new ones ([#3895](https://github.com/hashicorp/terraform/issues/3895)) - * provider/aws: Allow `block_duration_minutes` to be set for spot instance requests ([#4071](https://github.com/hashicorp/terraform/issues/4071)) - * provider/aws: Make setting `acl` on S3 buckets update existing buckets instead of forcing new ones ([#4080](https://github.com/hashicorp/terraform/issues/4080)) - * provider/aws: Make updates to `assume_role_policy` modify existing IAM roles instead of forcing new ones ([#4107](https://github.com/hashicorp/terraform/issues/4107)) - -BUG FIXES: - - * core: Fix a bug which prevented HEREDOC syntax being used in lists ([#4078](https://github.com/hashicorp/terraform/issues/4078)) - * core: Fix a bug which prevented HEREDOC syntax where the anchor ends in a number ([#4128](https://github.com/hashicorp/terraform/issues/4128)) - * core: Fix a bug which prevented HEREDOC syntax being used with Windows line endings ([#4069](https://github.com/hashicorp/terraform/issues/4069)) - * provider/aws: Fix a bug which could result in a panic when reading EC2 metadata ([#4024](https://github.com/hashicorp/terraform/issues/4024)) - * provider/aws: Fix issue recreating security group rule if it has been destroyed ([#4050](https://github.com/hashicorp/terraform/issues/4050)) - * provider/aws: Fix issue with some attributes in Spot Instance Requests returning as nil ([#4132](https://github.com/hashicorp/terraform/issues/4132)) - * provider/aws: Fix issue where SPF records in Route 53 could show differences with no modification to the configuration ([#4108](https://github.com/hashicorp/terraform/issues/4108)) - * provisioner/chef: Fix issue with path separators breaking the Chef provisioner on Windows ([#4041](https://github.com/hashicorp/terraform/issues/4041)) - -## 0.6.7 (November 23, 2015) - -FEATURES: - - * **New provider: `tls`** - A utility provider for generating TLS keys/self-signed certificates for development and testing ([#2778](https://github.com/hashicorp/terraform/issues/2778)) - * **New provider: `dyn`** - Manage DNS records on Dyn - * **New resource: `aws_cloudformation_stack`** ([#2636](https://github.com/hashicorp/terraform/issues/2636)) - * **New resource: `aws_cloudtrail`** ([#3094](https://github.com/hashicorp/terraform/issues/3094)), ([#4010](https://github.com/hashicorp/terraform/issues/4010)) - * **New resource: `aws_route`** ([#3548](https://github.com/hashicorp/terraform/issues/3548)) - * **New resource: `aws_codecommit_repository`** ([#3274](https://github.com/hashicorp/terraform/issues/3274)) - * **New resource: `aws_kinesis_firehose_delivery_stream`** ([#3833](https://github.com/hashicorp/terraform/issues/3833)) - * **New resource: `google_sql_database` and `google_sql_database_instance`** ([#3617](https://github.com/hashicorp/terraform/issues/3617)) - * **New resource: `google_compute_global_address`** ([#3701](https://github.com/hashicorp/terraform/issues/3701)) - * **New resource: `google_compute_https_health_check`** ([#3883](https://github.com/hashicorp/terraform/issues/3883)) - * **New resource: `google_compute_ssl_certificate`** ([#3723](https://github.com/hashicorp/terraform/issues/3723)) - * **New resource: `google_compute_url_map`** ([#3722](https://github.com/hashicorp/terraform/issues/3722)) - * **New resource: `google_compute_target_http_proxy`** ([#3727](https://github.com/hashicorp/terraform/issues/3727)) - * **New resource: `google_compute_target_https_proxy`** ([#3728](https://github.com/hashicorp/terraform/issues/3728)) - * **New resource: `google_compute_global_forwarding_rule`** ([#3702](https://github.com/hashicorp/terraform/issues/3702)) - * **New resource: `openstack_networking_port_v2`** ([#3731](https://github.com/hashicorp/terraform/issues/3731)) - * New interpolation function: `coalesce` ([#3814](https://github.com/hashicorp/terraform/issues/3814)) - -IMPROVEMENTS: - - * core: Improve message to list only resources which will be destroyed when using `--target` ([#3859](https://github.com/hashicorp/terraform/issues/3859)) - * connection/ssh: Accept `private_key` contents instead of paths ([#3846](https://github.com/hashicorp/terraform/issues/3846)) - * provider/google: `preemptible` option for instance_template ([#3667](https://github.com/hashicorp/terraform/issues/3667)) - * provider/google: Accurate Terraform Version ([#3554](https://github.com/hashicorp/terraform/issues/3554)) - * provider/google: Simplified auth (DefaultClient support) ([#3553](https://github.com/hashicorp/terraform/issues/3553)) - * provider/google: `automatic_restart`, `preemptible`, `on_host_maintenance` options ([#3643](https://github.com/hashicorp/terraform/issues/3643)) - * provider/google: Read credentials as contents instead of path ([#3901](https://github.com/hashicorp/terraform/issues/3901)) - * null_resource: Enhance and document ([#3244](https://github.com/hashicorp/terraform/issues/3244), [#3659](https://github.com/hashicorp/terraform/issues/3659)) - * provider/aws: Add CORS settings to S3 bucket ([#3387](https://github.com/hashicorp/terraform/issues/3387)) - * provider/aws: Add notification topic ARN for ElastiCache clusters ([#3674](https://github.com/hashicorp/terraform/issues/3674)) - * provider/aws: Add `kinesis_endpoint` for configuring Kinesis ([#3255](https://github.com/hashicorp/terraform/issues/3255)) - * provider/aws: Add a computed ARN for S3 Buckets ([#3685](https://github.com/hashicorp/terraform/issues/3685)) - * provider/aws: Add S3 support for Lambda Function resource ([#3794](https://github.com/hashicorp/terraform/issues/3794)) - * provider/aws: Add `name_prefix` option to launch configurations ([#3802](https://github.com/hashicorp/terraform/issues/3802)) - * provider/aws: Add support for group name and path changes with IAM group update function ([#3237](https://github.com/hashicorp/terraform/issues/3237)) - * provider/aws: Provide `source_security_group_id` for ELBs inside a VPC ([#3780](https://github.com/hashicorp/terraform/issues/3780)) - * provider/aws: Add snapshot window and retention limits for ElastiCache (Redis) ([#3707](https://github.com/hashicorp/terraform/issues/3707)) - * provider/aws: Add username updates for `aws_iam_user` ([#3227](https://github.com/hashicorp/terraform/issues/3227)) - * provider/aws: Add AutoMinorVersionUpgrade to RDS Instances ([#3677](https://github.com/hashicorp/terraform/issues/3677)) - * provider/aws: Add `access_logs` to ELB resource ([#3756](https://github.com/hashicorp/terraform/issues/3756)) - * provider/aws: Add a retry function to rescue an error in creating Autoscaling Lifecycle Hooks ([#3694](https://github.com/hashicorp/terraform/issues/3694)) - * provider/aws: `engine_version` is now optional for DB Instance ([#3744](https://github.com/hashicorp/terraform/issues/3744)) - * provider/aws: Add configuration to enable copying RDS tags to final snapshot ([#3529](https://github.com/hashicorp/terraform/issues/3529)) - * provider/aws: RDS Cluster additions (`backup_retention_period`, `preferred_backup_window`, `preferred_maintenance_window`) ([#3757](https://github.com/hashicorp/terraform/issues/3757)) - * provider/aws: Document and validate ELB `ssl_certificate_id` and protocol requirements ([#3887](https://github.com/hashicorp/terraform/issues/3887)) - * provider/azure: Read `publish_settings` as contents instead of path ([#3899](https://github.com/hashicorp/terraform/issues/3899)) - * provider/openstack: Use IPv4 as the default IP version for subnets ([#3091](https://github.com/hashicorp/terraform/issues/3091)) - * provider/aws: Apply security group after restoring `db_instance` from snapshot ([#3513](https://github.com/hashicorp/terraform/issues/3513)) - * provider/aws: Make the AutoScalingGroup `name` optional ([#3710](https://github.com/hashicorp/terraform/issues/3710)) - * provider/openstack: Add "delete on termination" boot-from-volume option ([#3232](https://github.com/hashicorp/terraform/issues/3232)) - * provider/digitalocean: Make `user_data` force a new droplet ([#3740](https://github.com/hashicorp/terraform/issues/3740)) - * provider/vsphere: Do not add network interfaces by default ([#3652](https://github.com/hashicorp/terraform/issues/3652)) - * provider/openstack: Configure Fixed IPs through ports ([#3772](https://github.com/hashicorp/terraform/issues/3772)) - * provider/openstack: Specify a port ID on a Router Interface ([#3903](https://github.com/hashicorp/terraform/issues/3903)) - * provider/openstack: Make LBaaS Virtual IP computed ([#3927](https://github.com/hashicorp/terraform/issues/3927)) - -BUG FIXES: - - * `terraform remote config`: update `--help` output ([#3632](https://github.com/hashicorp/terraform/issues/3632)) - * core: Modules on Git branches now update properly ([#1568](https://github.com/hashicorp/terraform/issues/1568)) - * core: Fix issue preventing input prompts for unset variables during plan ([#3843](https://github.com/hashicorp/terraform/issues/3843)) - * core: Fix issue preventing input prompts for unset variables during refresh ([#4017](https://github.com/hashicorp/terraform/issues/4017)) - * core: Orphan resources can now be targets ([#3912](https://github.com/hashicorp/terraform/issues/3912)) - * helper/schema: Skip StateFunc when value is nil ([#4002](https://github.com/hashicorp/terraform/issues/4002)) - * provider/google: Timeout when deleting large `instance_group_manager` ([#3591](https://github.com/hashicorp/terraform/issues/3591)) - * provider/aws: Fix issue with order of Termination Policies in AutoScaling Groups. - This will introduce plans on upgrade to this version, in order to correct the ordering ([#2890](https://github.com/hashicorp/terraform/issues/2890)) - * provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` ([#3668](https://github.com/hashicorp/terraform/issues/3668)) - * provider/aws: Fix a bug where a non-lower-cased `maintenance_window` can cause unnecessary planned changes ([#4020](https://github.com/hashicorp/terraform/issues/4020)) - * provider/aws: Only set `weight` on an `aws_route53_record` if it has been set in configuration ([#3900](https://github.com/hashicorp/terraform/issues/3900)) - * provider/aws: Ignore association not existing on route table destroy ([#3615](https://github.com/hashicorp/terraform/issues/3615)) - * provider/aws: Fix policy encoding issue with SNS Topics ([#3700](https://github.com/hashicorp/terraform/issues/3700)) - * provider/aws: Correctly export ARN in `aws_iam_saml_provider` ([#3827](https://github.com/hashicorp/terraform/issues/3827)) - * provider/aws: Fix issue deleting users who are attached to a group ([#4005](https://github.com/hashicorp/terraform/issues/4005)) - * provider/aws: Fix crash in Route53 Record if Zone not found ([#3945](https://github.com/hashicorp/terraform/issues/3945)) - * provider/aws: Retry deleting IAM Server Cert on dependency violation ([#3898](https://github.com/hashicorp/terraform/issues/3898)) - * provider/aws: Update Spot Instance request to provide connection information ([#3940](https://github.com/hashicorp/terraform/issues/3940)) - * provider/aws: Fix typo in error checking for IAM Policy Attachments ([#3970](https://github.com/hashicorp/terraform/issues/3970)) - * provider/aws: Fix issue with LB Cookie Stickiness and empty expiration period ([#3908](https://github.com/hashicorp/terraform/issues/3908)) - * provider/aws: Tolerate ElastiCache clusters being deleted outside Terraform ([#3767](https://github.com/hashicorp/terraform/issues/3767)) - * provider/aws: Downcase Route 53 record names in state file to match API output ([#3574](https://github.com/hashicorp/terraform/issues/3574)) - * provider/aws: Fix issue that could occur if no ECS Cluster was found for a given name ([#3829](https://github.com/hashicorp/terraform/issues/3829)) - * provider/aws: Fix issue with SNS topic policy if omitted ([#3777](https://github.com/hashicorp/terraform/issues/3777)) - * provider/aws: Support scratch volumes in `aws_ecs_task_definition` ([#3810](https://github.com/hashicorp/terraform/issues/3810)) - * provider/aws: Treat `aws_ecs_service` w/ Status==INACTIVE as deleted ([#3828](https://github.com/hashicorp/terraform/issues/3828)) - * provider/aws: Expand ~ to homedir in `aws_s3_bucket_object.source` ([#3910](https://github.com/hashicorp/terraform/issues/3910)) - * provider/aws: Fix issue with updating the `aws_ecs_task_definition` where `aws_ecs_service` didn't wait for a new computed ARN ([#3924](https://github.com/hashicorp/terraform/issues/3924)) - * provider/aws: Prevent crashing when deleting `aws_ecs_service` that is already gone ([#3914](https://github.com/hashicorp/terraform/issues/3914)) - * provider/aws: Allow spaces in `aws_db_subnet_group.name` (undocumented in the API) ([#3955](https://github.com/hashicorp/terraform/issues/3955)) - * provider/aws: Make VPC ID required on subnets ([#4021](https://github.com/hashicorp/terraform/issues/4021)) - * provider/azure: Various bug fixes ([#3695](https://github.com/hashicorp/terraform/issues/3695)) - * provider/digitalocean: Fix issue preventing SSH fingerprints from working ([#3633](https://github.com/hashicorp/terraform/issues/3633)) - * provider/digitalocean: Fix the DigitalOcean Droplet 404 potential on refresh of state ([#3768](https://github.com/hashicorp/terraform/issues/3768)) - * provider/openstack: Fix several issues causing unresolvable diffs ([#3440](https://github.com/hashicorp/terraform/issues/3440)) - * provider/openstack: Safely delete security groups ([#3696](https://github.com/hashicorp/terraform/issues/3696)) - * provider/openstack: Ignore order of `security_groups` in instance ([#3651](https://github.com/hashicorp/terraform/issues/3651)) - * provider/vsphere: Fix d.SetConnInfo error in case of a missing IP address ([#3636](https://github.com/hashicorp/terraform/issues/3636)) - * provider/openstack: Fix boot from volume ([#3206](https://github.com/hashicorp/terraform/issues/3206)) - * provider/openstack: Fix crashing when image is no longer accessible ([#2189](https://github.com/hashicorp/terraform/issues/2189)) - * provider/openstack: Better handling of network resource state changes ([#3712](https://github.com/hashicorp/terraform/issues/3712)) - * provider/openstack: Fix crashing when no security group is specified ([#3801](https://github.com/hashicorp/terraform/issues/3801)) - * provider/packet: Fix issue that could cause errors when provisioning many devices at once ([#3847](https://github.com/hashicorp/terraform/issues/3847)) - * provider/packet: Fix connection information for devices, allowing provisioners to run ([#3948](https://github.com/hashicorp/terraform/issues/3948)) - * provider/openstack: Fix issue preventing security group rules from being removed ([#3796](https://github.com/hashicorp/terraform/issues/3796)) - * provider/template: `template_file`: source contents instead of path ([#3909](https://github.com/hashicorp/terraform/issues/3909)) - -## 0.6.6 (October 23, 2015) - -FEATURES: - - * New interpolation functions: `cidrhost`, `cidrnetmask` and `cidrsubnet` ([#3127](https://github.com/hashicorp/terraform/issues/3127)) - -IMPROVEMENTS: - - * "forces new resource" now highlighted in plan output ([#3136](https://github.com/hashicorp/terraform/issues/3136)) - -BUG FIXES: - - * helper/schema: Better error message for assigning list/map to string ([#3009](https://github.com/hashicorp/terraform/issues/3009)) - * remote/state/atlas: Additional remote state conflict handling for semantically neutral state changes ([#3603](https://github.com/hashicorp/terraform/issues/3603)) - -## 0.6.5 (October 21, 2015) - -FEATURES: - - * **New resources: `aws_codeploy_app` and `aws_codeploy_deployment_group`** ([#2783](https://github.com/hashicorp/terraform/issues/2783)) - * New remote state backend: `etcd` ([#3487](https://github.com/hashicorp/terraform/issues/3487)) - * New interpolation functions: `upper` and `lower` ([#3558](https://github.com/hashicorp/terraform/issues/3558)) - -BUG FIXES: - - * core: Fix remote state conflicts caused by ambiguity in ordering of deeply nested modules ([#3573](https://github.com/hashicorp/terraform/issues/3573)) - * core: Fix remote state conflicts caused by state metadata differences ([#3569](https://github.com/hashicorp/terraform/issues/3569)) - * core: Avoid using http.DefaultClient ([#3532](https://github.com/hashicorp/terraform/issues/3532)) - -INTERNAL IMPROVEMENTS: - - * provider/digitalocean: use official Go client ([#3333](https://github.com/hashicorp/terraform/issues/3333)) - * core: extract module fetching to external library ([#3516](https://github.com/hashicorp/terraform/issues/3516)) - -## 0.6.4 (October 15, 2015) - -FEATURES: - - * **New provider: `rundeck`** ([#2412](https://github.com/hashicorp/terraform/issues/2412)) - * **New provider: `packet`** ([#2260](https://github.com/hashicorp/terraform/issues/2260)), ([#3472](https://github.com/hashicorp/terraform/issues/3472)) - * **New provider: `vsphere`**: Initial support for a VM resource ([#3419](https://github.com/hashicorp/terraform/issues/3419)) - * **New resource: `cloudstack_loadbalancer_rule`** ([#2934](https://github.com/hashicorp/terraform/issues/2934)) - * **New resource: `google_compute_project_metadata`** ([#3065](https://github.com/hashicorp/terraform/issues/3065)) - * **New resources: `aws_ami`, `aws_ami_copy`, `aws_ami_from_instance`** ([#2784](https://github.com/hashicorp/terraform/issues/2784)) - * **New resources: `aws_cloudwatch_log_group`** ([#2415](https://github.com/hashicorp/terraform/issues/2415)) - * **New resource: `google_storage_bucket_object`** ([#3192](https://github.com/hashicorp/terraform/issues/3192)) - * **New resources: `google_compute_vpn_gateway`, `google_compute_vpn_tunnel`** ([#3213](https://github.com/hashicorp/terraform/issues/3213)) - * **New resources: `google_storage_bucket_acl`, `google_storage_object_acl`** ([#3272](https://github.com/hashicorp/terraform/issues/3272)) - * **New resource: `aws_iam_saml_provider`** ([#3156](https://github.com/hashicorp/terraform/issues/3156)) - * **New resources: `aws_efs_file_system` and `aws_efs_mount_target`** ([#2196](https://github.com/hashicorp/terraform/issues/2196)) - * **New resources: `aws_opsworks_*`** ([#2162](https://github.com/hashicorp/terraform/issues/2162)) - * **New resource: `aws_elasticsearch_domain`** ([#3443](https://github.com/hashicorp/terraform/issues/3443)) - * **New resource: `aws_directory_service_directory`** ([#3228](https://github.com/hashicorp/terraform/issues/3228)) - * **New resource: `aws_autoscaling_lifecycle_hook`** ([#3351](https://github.com/hashicorp/terraform/issues/3351)) - * **New resource: `aws_placement_group`** ([#3457](https://github.com/hashicorp/terraform/issues/3457)) - * **New resource: `aws_glacier_vault`** ([#3491](https://github.com/hashicorp/terraform/issues/3491)) - * **New lifecycle flag: `ignore_changes`** ([#2525](https://github.com/hashicorp/terraform/issues/2525)) - -IMPROVEMENTS: - - * core: Add a function to find the index of an element in a list. ([#2704](https://github.com/hashicorp/terraform/issues/2704)) - * core: Print all outputs when `terraform output` is called with no arguments ([#2920](https://github.com/hashicorp/terraform/issues/2920)) - * core: In plan output summary, count resource replacement as Add/Remove instead of Change ([#3173](https://github.com/hashicorp/terraform/issues/3173)) - * core: Add interpolation functions for base64 encoding and decoding. ([#3325](https://github.com/hashicorp/terraform/issues/3325)) - * core: Expose parallelism as a CLI option instead of a hard-coding the default of 10 ([#3365](https://github.com/hashicorp/terraform/issues/3365)) - * core: Add interpolation function `compact`, to remove empty elements from a list. ([#3239](https://github.com/hashicorp/terraform/issues/3239)), ([#3479](https://github.com/hashicorp/terraform/issues/3479)) - * core: Allow filtering of log output by level, using e.g. ``TF_LOG=INFO`` ([#3380](https://github.com/hashicorp/terraform/issues/3380)) - * provider/aws: Add `instance_initiated_shutdown_behavior` to AWS Instance ([#2887](https://github.com/hashicorp/terraform/issues/2887)) - * provider/aws: Support IAM role names (previously just ARNs) in `aws_ecs_service.iam_role` ([#3061](https://github.com/hashicorp/terraform/issues/3061)) - * provider/aws: Add update method to RDS Subnet groups, can modify subnets without recreating ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Paginate notifications returned for ASG Notifications ([#3043](https://github.com/hashicorp/terraform/issues/3043)) - * provider/aws: Adds additional S3 Bucket Object inputs ([#3265](https://github.com/hashicorp/terraform/issues/3265)) - * provider/aws: add `ses_smtp_password` to `aws_iam_access_key` ([#3165](https://github.com/hashicorp/terraform/issues/3165)) - * provider/aws: read `iam_instance_profile` for `aws_instance` and save to state ([#3167](https://github.com/hashicorp/terraform/issues/3167)) - * provider/aws: allow `instance` to be computed in `aws_eip` ([#3036](https://github.com/hashicorp/terraform/issues/3036)) - * provider/aws: Add `versioning` option to `aws_s3_bucket` ([#2942](https://github.com/hashicorp/terraform/issues/2942)) - * provider/aws: Add `configuration_endpoint` to `aws_elasticache_cluster` ([#3250](https://github.com/hashicorp/terraform/issues/3250)) - * provider/aws: Add validation for `app_cookie_stickiness_policy.name` ([#3277](https://github.com/hashicorp/terraform/issues/3277)) - * provider/aws: Add validation for `db_parameter_group.name` ([#3279](https://github.com/hashicorp/terraform/issues/3279)) - * provider/aws: Set DynamoDB Table ARN after creation ([#3500](https://github.com/hashicorp/terraform/issues/3500)) - * provider/aws: `aws_s3_bucket_object` allows interpolated content to be set with new `content` attribute. ([#3200](https://github.com/hashicorp/terraform/issues/3200)) - * provider/aws: Allow tags for `aws_kinesis_stream` resource. ([#3397](https://github.com/hashicorp/terraform/issues/3397)) - * provider/aws: Configurable capacity waiting duration for ASGs ([#3191](https://github.com/hashicorp/terraform/issues/3191)) - * provider/aws: Allow non-persistent Spot Requests ([#3311](https://github.com/hashicorp/terraform/issues/3311)) - * provider/aws: Support tags for AWS DB subnet group ([#3138](https://github.com/hashicorp/terraform/issues/3138)) - * provider/cloudstack: Add `project` parameter to `cloudstack_vpc`, `cloudstack_network`, `cloudstack_ipaddress` and `cloudstack_disk` ([#3035](https://github.com/hashicorp/terraform/issues/3035)) - * provider/openstack: add functionality to attach FloatingIP to Port ([#1788](https://github.com/hashicorp/terraform/issues/1788)) - * provider/google: Can now do multi-region deployments without using multiple providers ([#3258](https://github.com/hashicorp/terraform/issues/3258)) - * remote/s3: Allow canned ACLs to be set on state objects. ([#3233](https://github.com/hashicorp/terraform/issues/3233)) - * remote/s3: Remote state is stored in S3 with `Content-Type: application/json` ([#3385](https://github.com/hashicorp/terraform/issues/3385)) - -BUG FIXES: - - * core: Fix problems referencing list attributes in interpolations ([#2157](https://github.com/hashicorp/terraform/issues/2157)) - * core: don't error on computed value during input walk ([#2988](https://github.com/hashicorp/terraform/issues/2988)) - * core: Ignore missing variables during destroy phase ([#3393](https://github.com/hashicorp/terraform/issues/3393)) - * provider/google: Crashes with interface conversion in GCE Instance Template ([#3027](https://github.com/hashicorp/terraform/issues/3027)) - * provider/google: Convert int to int64 when building the GKE cluster.NodeConfig struct ([#2978](https://github.com/hashicorp/terraform/issues/2978)) - * provider/google: google_compute_instance_template.network_interface.network should be a URL ([#3226](https://github.com/hashicorp/terraform/issues/3226)) - * provider/aws: Retry creation of `aws_ecs_service` if IAM policy isn't ready yet ([#3061](https://github.com/hashicorp/terraform/issues/3061)) - * provider/aws: Fix issue with mixed capitalization for RDS Instances ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Fix issue with RDS to allow major version upgrades ([#3053](https://github.com/hashicorp/terraform/issues/3053)) - * provider/aws: Fix shard_count in `aws_kinesis_stream` ([#2986](https://github.com/hashicorp/terraform/issues/2986)) - * provider/aws: Fix issue with `key_name` and using VPCs with spot instance requests ([#2954](https://github.com/hashicorp/terraform/issues/2954)) - * provider/aws: Fix unresolvable diffs coming from `aws_elasticache_cluster` names being downcased - by AWS ([#3120](https://github.com/hashicorp/terraform/issues/3120)) - * provider/aws: Read instance source_dest_check and save to state ([#3152](https://github.com/hashicorp/terraform/issues/3152)) - * provider/aws: Allow `weight = 0` in Route53 records ([#3196](https://github.com/hashicorp/terraform/issues/3196)) - * provider/aws: Normalize aws_elasticache_cluster id to lowercase, allowing convergence. ([#3235](https://github.com/hashicorp/terraform/issues/3235)) - * provider/aws: Fix ValidateAccountId for IAM Instance Profiles ([#3313](https://github.com/hashicorp/terraform/issues/3313)) - * provider/aws: Update Security Group Rules to Version 2 ([#3019](https://github.com/hashicorp/terraform/issues/3019)) - * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` ([#3470](https://github.com/hashicorp/terraform/issues/3470)) - * provider/aws: Fix force_delete on autoscaling groups ([#3485](https://github.com/hashicorp/terraform/issues/3485)) - * provider/aws: Fix crash with VPC Peering connections ([#3490](https://github.com/hashicorp/terraform/issues/3490)) - * provider/aws: fix bug with reading GSIs from dynamodb ([#3300](https://github.com/hashicorp/terraform/issues/3300)) - * provider/docker: Fix issue preventing private images from being referenced ([#2619](https://github.com/hashicorp/terraform/issues/2619)) - * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case ([#3284](https://github.com/hashicorp/terraform/issues/3284)) - * provider/openstack: add state 'downloading' to list of expected states in - `blockstorage_volume_v1` creation ([#2866](https://github.com/hashicorp/terraform/issues/2866)) - * provider/openstack: remove security groups (by name) before adding security - groups (by id) ([#2008](https://github.com/hashicorp/terraform/issues/2008)) - -INTERNAL IMPROVEMENTS: - - * core: Makefile target "plugin-dev" for building just one plugin. ([#3229](https://github.com/hashicorp/terraform/issues/3229)) - * helper/schema: Don't allow ``Update`` func if no attributes can actually be updated, per schema. ([#3288](https://github.com/hashicorp/terraform/issues/3288)) - * helper/schema: Default hashing function for sets ([#3018](https://github.com/hashicorp/terraform/issues/3018)) - * helper/multierror: Remove in favor of [github.com/hashicorp/go-multierror](http://github.com/hashicorp/go-multierror). ([#3336](https://github.com/hashicorp/terraform/issues/3336)) - -## 0.6.3 (August 11, 2015) - -BUG FIXES: - - * core: Skip all descendents after error, not just children; helps prevent confusing - additional errors/crashes after initial failure ([#2963](https://github.com/hashicorp/terraform/issues/2963)) - * core: fix deadlock possibility when both a module and a dependent resource are - removed in the same run ([#2968](https://github.com/hashicorp/terraform/issues/2968)) - * provider/aws: Fix issue with authenticating when using IAM profiles ([#2959](https://github.com/hashicorp/terraform/issues/2959)) - -## 0.6.2 (August 6, 2015) - -FEATURES: - - * **New resource: `google_compute_instance_group_manager`** ([#2868](https://github.com/hashicorp/terraform/issues/2868)) - * **New resource: `google_compute_autoscaler`** ([#2868](https://github.com/hashicorp/terraform/issues/2868)) - * **New resource: `aws_s3_bucket_object`** ([#2898](https://github.com/hashicorp/terraform/issues/2898)) - -IMPROVEMENTS: - - * core: Add resource IDs to errors coming from `apply`/`refresh` ([#2815](https://github.com/hashicorp/terraform/issues/2815)) - * provider/aws: Validate credentials before walking the graph ([#2730](https://github.com/hashicorp/terraform/issues/2730)) - * provider/aws: Added website_domain for S3 buckets ([#2210](https://github.com/hashicorp/terraform/issues/2210)) - * provider/aws: ELB names are now optional, and generated by Terraform if omitted ([#2571](https://github.com/hashicorp/terraform/issues/2571)) - * provider/aws: Downcase RDS engine names to prevent continuous diffs ([#2745](https://github.com/hashicorp/terraform/issues/2745)) - * provider/aws: Added `source_dest_check` attribute to the aws_network_interface ([#2741](https://github.com/hashicorp/terraform/issues/2741)) - * provider/aws: Clean up externally removed Launch Configurations ([#2806](https://github.com/hashicorp/terraform/issues/2806)) - * provider/aws: Allow configuration of the DynamoDB Endpoint ([#2825](https://github.com/hashicorp/terraform/issues/2825)) - * provider/aws: Compute private ip addresses of ENIs if they are not specified ([#2743](https://github.com/hashicorp/terraform/issues/2743)) - * provider/aws: Add `arn` attribute for DynamoDB tables ([#2924](https://github.com/hashicorp/terraform/issues/2924)) - * provider/aws: Fail silently when account validation fails while from instance profile ([#3001](https://github.com/hashicorp/terraform/issues/3001)) - * provider/azure: Allow `settings_file` to accept XML string ([#2922](https://github.com/hashicorp/terraform/issues/2922)) - * provider/azure: Provide a simpler error when using a Platform Image without a - Storage Service ([#2861](https://github.com/hashicorp/terraform/issues/2861)) - * provider/google: `account_file` is now expected to be JSON. Paths are still supported for - backwards compatibility. ([#2839](https://github.com/hashicorp/terraform/issues/2839)) - -BUG FIXES: - - * core: Prevent error duplication in `apply` ([#2815](https://github.com/hashicorp/terraform/issues/2815)) - * core: Fix crash when a provider validation adds a warning ([#2878](https://github.com/hashicorp/terraform/issues/2878)) - * provider/aws: Fix issue with toggling monitoring in AWS Instances ([#2794](https://github.com/hashicorp/terraform/issues/2794)) - * provider/aws: Fix issue with Spot Instance Requests and cancellation ([#2805](https://github.com/hashicorp/terraform/issues/2805)) - * provider/aws: Fix issue with checking for ElastiCache cluster cache node status ([#2842](https://github.com/hashicorp/terraform/issues/2842)) - * provider/aws: Fix issue when unable to find a Root Block Device name of an Instance Backed - AMI ([#2646](https://github.com/hashicorp/terraform/issues/2646)) - * provider/dnsimple: Domain and type should force new records ([#2777](https://github.com/hashicorp/terraform/issues/2777)) - * provider/aws: Fix issue with IAM Server Certificates and Chains ([#2871](https://github.com/hashicorp/terraform/issues/2871)) - * provider/aws: Fix issue with IAM Server Certificates when using `path` ([#2871](https://github.com/hashicorp/terraform/issues/2871)) - * provider/aws: Fix issue in Security Group Rules when the Security Group is not found ([#2897](https://github.com/hashicorp/terraform/issues/2897)) - * provider/aws: allow external ENI attachments ([#2943](https://github.com/hashicorp/terraform/issues/2943)) - * provider/aws: Fix issue with S3 Buckets, and throwing an error when not found ([#2925](https://github.com/hashicorp/terraform/issues/2925)) - -## 0.6.1 (July 20, 2015) - -FEATURES: - - * **New resource: `google_container_cluster`** ([#2357](https://github.com/hashicorp/terraform/issues/2357)) - * **New resource: `aws_vpc_endpoint`** ([#2695](https://github.com/hashicorp/terraform/issues/2695)) - -IMPROVEMENTS: - - * connection/ssh: Print SSH bastion host details to output ([#2684](https://github.com/hashicorp/terraform/issues/2684)) - * provider/aws: Create RDS databases from snapshots ([#2062](https://github.com/hashicorp/terraform/issues/2062)) - * provider/aws: Add support for restoring from Redis backup stored in S3 ([#2634](https://github.com/hashicorp/terraform/issues/2634)) - * provider/aws: Add `maintenance_window` to ElastiCache cluster ([#2642](https://github.com/hashicorp/terraform/issues/2642)) - * provider/aws: Availability Zones are optional when specifying VPC Zone Identifiers in - Auto Scaling Groups updates ([#2724](https://github.com/hashicorp/terraform/issues/2724)) - * provider/google: Add metadata_startup_script to google_compute_instance ([#2375](https://github.com/hashicorp/terraform/issues/2375)) - -BUG FIXES: - - * core: Don't prompt for variables with defaults ([#2613](https://github.com/hashicorp/terraform/issues/2613)) - * core: Return correct number of planned updates ([#2620](https://github.com/hashicorp/terraform/issues/2620)) - * core: Fix "provider not found" error that can occur while running - a destroy plan with grandchildren modules ([#2755](https://github.com/hashicorp/terraform/issues/2755)) - * core: Fix UUID showing up in diff for computed splat (`foo.*.bar`) - variables. ([#2788](https://github.com/hashicorp/terraform/issues/2788)) - * core: Orphan modules that contain no resources (only other modules) - are properly destroyed up to arbitrary depth ([#2786](https://github.com/hashicorp/terraform/issues/2786)) - * core: Fix "attribute not available" during destroy plans in - cases where the parameter is passed between modules ([#2775](https://github.com/hashicorp/terraform/issues/2775)) - * core: Record schema version when destroy fails ([#2923](https://github.com/hashicorp/terraform/issues/2923)) - * connection/ssh: fix issue on machines with an SSH Agent available - preventing `key_file` from being read without explicitly - setting `agent = false` ([#2615](https://github.com/hashicorp/terraform/issues/2615)) - * provider/aws: Allow uppercase characters in `aws_elb.name` ([#2580](https://github.com/hashicorp/terraform/issues/2580)) - * provider/aws: Allow underscores in `aws_db_subnet_group.name` (undocumented by AWS) ([#2604](https://github.com/hashicorp/terraform/issues/2604)) - * provider/aws: Allow dots in `aws_db_subnet_group.name` (undocumented by AWS) ([#2665](https://github.com/hashicorp/terraform/issues/2665)) - * provider/aws: Fix issue with pending Spot Instance requests ([#2640](https://github.com/hashicorp/terraform/issues/2640)) - * provider/aws: Fix issue in AWS Classic environment with referencing external - Security Groups ([#2644](https://github.com/hashicorp/terraform/issues/2644)) - * provider/aws: Bump internet gateway detach timeout ([#2669](https://github.com/hashicorp/terraform/issues/2669)) - * provider/aws: Fix issue with detecting differences in DB Parameters ([#2728](https://github.com/hashicorp/terraform/issues/2728)) - * provider/aws: `ecs_cluster` rename (recreation) and deletion is handled correctly ([#2698](https://github.com/hashicorp/terraform/issues/2698)) - * provider/aws: `aws_route_table` ignores routes generated for VPC endpoints ([#2695](https://github.com/hashicorp/terraform/issues/2695)) - * provider/aws: Fix issue with Launch Configurations and enable_monitoring ([#2735](https://github.com/hashicorp/terraform/issues/2735)) - * provider/openstack: allow empty api_key and endpoint_type ([#2626](https://github.com/hashicorp/terraform/issues/2626)) - * provisioner/chef: Fix permission denied error with ohai hints ([#2781](https://github.com/hashicorp/terraform/issues/2781)) - -## 0.6.0 (June 30, 2015) - -BACKWARDS INCOMPATIBILITIES: - - * command/push: If a variable is already set within Atlas, it won't be - updated unless the `-overwrite` flag is present ([#2373](https://github.com/hashicorp/terraform/issues/2373)) - * connection/ssh: The `agent` field now defaults to `true` if - the `SSH_AGENT_SOCK` environment variable is present. In other words, - `ssh-agent` support is now opt-out instead of opt-in functionality. ([#2408](https://github.com/hashicorp/terraform/issues/2408)) - * provider/aws: If you were setting access and secret key to blank ("") - to force Terraform to load credentials from another source such as the - EC2 role, this will now error. Remove the blank lines and Terraform - will load from other sources. - * `concat()` has been repurposed to combine lists instead of strings (old behavior - of joining strings is maintained in this version but is deprecated, strings - should be combined using interpolation syntax, like "${var.foo}{var.bar}") - ([#1790](https://github.com/hashicorp/terraform/issues/1790)) - -FEATURES: - - * **New provider: `azure`** ([#2052](https://github.com/hashicorp/terraform/issues/2052), [#2053](https://github.com/hashicorp/terraform/issues/2053), [#2372](https://github.com/hashicorp/terraform/issues/2372), [#2380](https://github.com/hashicorp/terraform/issues/2380), [#2394](https://github.com/hashicorp/terraform/issues/2394), [#2515](https://github.com/hashicorp/terraform/issues/2515), [#2530](https://github.com/hashicorp/terraform/issues/2530), [#2562](https://github.com/hashicorp/terraform/issues/2562)) - * **New resource: `aws_autoscaling_notification`** ([#2197](https://github.com/hashicorp/terraform/issues/2197)) - * **New resource: `aws_autoscaling_policy`** ([#2201](https://github.com/hashicorp/terraform/issues/2201)) - * **New resource: `aws_cloudwatch_metric_alarm`** ([#2201](https://github.com/hashicorp/terraform/issues/2201)) - * **New resource: `aws_dynamodb_table`** ([#2121](https://github.com/hashicorp/terraform/issues/2121)) - * **New resource: `aws_ecs_cluster`** ([#1803](https://github.com/hashicorp/terraform/issues/1803)) - * **New resource: `aws_ecs_service`** ([#1803](https://github.com/hashicorp/terraform/issues/1803)) - * **New resource: `aws_ecs_task_definition`** ([#1803](https://github.com/hashicorp/terraform/issues/1803), [#2402](https://github.com/hashicorp/terraform/issues/2402)) - * **New resource: `aws_elasticache_parameter_group`** ([#2276](https://github.com/hashicorp/terraform/issues/2276)) - * **New resource: `aws_flow_log`** ([#2384](https://github.com/hashicorp/terraform/issues/2384)) - * **New resource: `aws_iam_group_association`** ([#2273](https://github.com/hashicorp/terraform/issues/2273)) - * **New resource: `aws_iam_policy_attachment`** ([#2395](https://github.com/hashicorp/terraform/issues/2395)) - * **New resource: `aws_lambda_function`** ([#2170](https://github.com/hashicorp/terraform/issues/2170)) - * **New resource: `aws_route53_delegation_set`** ([#1999](https://github.com/hashicorp/terraform/issues/1999)) - * **New resource: `aws_route53_health_check`** ([#2226](https://github.com/hashicorp/terraform/issues/2226)) - * **New resource: `aws_spot_instance_request`** ([#2263](https://github.com/hashicorp/terraform/issues/2263)) - * **New resource: `cloudstack_ssh_keypair`** ([#2004](https://github.com/hashicorp/terraform/issues/2004)) - * **New remote state backend: `swift`**: You can now store remote state in - a OpenStack Swift. ([#2254](https://github.com/hashicorp/terraform/issues/2254)) - * command/output: support display of module outputs ([#2102](https://github.com/hashicorp/terraform/issues/2102)) - * core: `keys()` and `values()` funcs for map variables ([#2198](https://github.com/hashicorp/terraform/issues/2198)) - * connection/ssh: SSH bastion host support and ssh-agent forwarding ([#2425](https://github.com/hashicorp/terraform/issues/2425)) - -IMPROVEMENTS: - - * core: HTTP remote state now accepts `skip_cert_verification` - option to ignore TLS cert verification. ([#2214](https://github.com/hashicorp/terraform/issues/2214)) - * core: S3 remote state now accepts the 'encrypt' option for SSE ([#2405](https://github.com/hashicorp/terraform/issues/2405)) - * core: `plan` now reports sum of resources to be changed/created/destroyed ([#2458](https://github.com/hashicorp/terraform/issues/2458)) - * core: Change string list representation so we can distinguish empty, single - element lists ([#2504](https://github.com/hashicorp/terraform/issues/2504)) - * core: Properly close provider and provisioner plugin connections ([#2406](https://github.com/hashicorp/terraform/issues/2406), [#2527](https://github.com/hashicorp/terraform/issues/2527)) - * provider/aws: AutoScaling groups now support updating Load Balancers without - recreation ([#2472](https://github.com/hashicorp/terraform/issues/2472)) - * provider/aws: Allow more in-place updates for ElastiCache cluster without recreating - ([#2469](https://github.com/hashicorp/terraform/issues/2469)) - * provider/aws: ElastiCache Subnet Groups can be updated - without destroying first ([#2191](https://github.com/hashicorp/terraform/issues/2191)) - * provider/aws: Normalize `certificate_chain` in `aws_iam_server_certificate` to - prevent unnecessary replacement. ([#2411](https://github.com/hashicorp/terraform/issues/2411)) - * provider/aws: `aws_instance` supports `monitoring' ([#2489](https://github.com/hashicorp/terraform/issues/2489)) - * provider/aws: `aws_launch_configuration` now supports `enable_monitoring` ([#2410](https://github.com/hashicorp/terraform/issues/2410)) - * provider/aws: Show outputs after `terraform refresh` ([#2347](https://github.com/hashicorp/terraform/issues/2347)) - * provider/aws: Add backoff/throttling during DynamoDB creation ([#2462](https://github.com/hashicorp/terraform/issues/2462)) - * provider/aws: Add validation for aws_vpc.cidr_block ([#2514](https://github.com/hashicorp/terraform/issues/2514)) - * provider/aws: Add validation for aws_db_subnet_group.name ([#2513](https://github.com/hashicorp/terraform/issues/2513)) - * provider/aws: Add validation for aws_db_instance.identifier ([#2516](https://github.com/hashicorp/terraform/issues/2516)) - * provider/aws: Add validation for aws_elb.name ([#2517](https://github.com/hashicorp/terraform/issues/2517)) - * provider/aws: Add validation for aws_security_group (name+description) ([#2518](https://github.com/hashicorp/terraform/issues/2518)) - * provider/aws: Add validation for aws_launch_configuration ([#2519](https://github.com/hashicorp/terraform/issues/2519)) - * provider/aws: Add validation for aws_autoscaling_group.name ([#2520](https://github.com/hashicorp/terraform/issues/2520)) - * provider/aws: Add validation for aws_iam_role.name ([#2521](https://github.com/hashicorp/terraform/issues/2521)) - * provider/aws: Add validation for aws_iam_role_policy.name ([#2552](https://github.com/hashicorp/terraform/issues/2552)) - * provider/aws: Add validation for aws_iam_instance_profile.name ([#2553](https://github.com/hashicorp/terraform/issues/2553)) - * provider/aws: aws_auto_scaling_group.default_cooldown no longer requires - resource replacement ([#2510](https://github.com/hashicorp/terraform/issues/2510)) - * provider/aws: add AH and ESP protocol integers ([#2321](https://github.com/hashicorp/terraform/issues/2321)) - * provider/docker: `docker_container` has the `privileged` - option. ([#2227](https://github.com/hashicorp/terraform/issues/2227)) - * provider/openstack: allow `OS_AUTH_TOKEN` environment variable - to set the openstack `api_key` field ([#2234](https://github.com/hashicorp/terraform/issues/2234)) - * provider/openstack: Can now configure endpoint type (public, admin, - internal) ([#2262](https://github.com/hashicorp/terraform/issues/2262)) - * provider/cloudstack: `cloudstack_instance` now supports projects ([#2115](https://github.com/hashicorp/terraform/issues/2115)) - * provisioner/chef: Added a `os_type` to specifically specify the target OS ([#2483](https://github.com/hashicorp/terraform/issues/2483)) - * provisioner/chef: Added a `ohai_hints` option to upload hint files ([#2487](https://github.com/hashicorp/terraform/issues/2487)) - -BUG FIXES: - - * core: lifecycle `prevent_destroy` can be any value that can be - coerced into a bool ([#2268](https://github.com/hashicorp/terraform/issues/2268)) - * core: matching provider types in sibling modules won't override - each other's config. ([#2464](https://github.com/hashicorp/terraform/issues/2464)) - * core: computed provider configurations now properly validate ([#2457](https://github.com/hashicorp/terraform/issues/2457)) - * core: orphan (commented out) resource dependencies are destroyed in - the correct order ([#2453](https://github.com/hashicorp/terraform/issues/2453)) - * core: validate object types in plugins are actually objects ([#2450](https://github.com/hashicorp/terraform/issues/2450)) - * core: fix `-no-color` flag in subcommands ([#2414](https://github.com/hashicorp/terraform/issues/2414)) - * core: Fix error of 'attribute not found for variable' when a computed - resource attribute is used as a parameter to a module ([#2477](https://github.com/hashicorp/terraform/issues/2477)) - * core: moduled orphans will properly inherit provider configs ([#2476](https://github.com/hashicorp/terraform/issues/2476)) - * core: modules with provider aliases work properly if the parent - doesn't implement those aliases ([#2475](https://github.com/hashicorp/terraform/issues/2475)) - * core: unknown resource attributes passed in as parameters to modules - now error ([#2478](https://github.com/hashicorp/terraform/issues/2478)) - * core: better error messages for missing variables ([#2479](https://github.com/hashicorp/terraform/issues/2479)) - * core: removed set items now properly appear in diffs and applies ([#2507](https://github.com/hashicorp/terraform/issues/2507)) - * core: '*' will not be added as part of the variable name when you - attempt multiplication without a space ([#2505](https://github.com/hashicorp/terraform/issues/2505)) - * core: fix target dependency calculation across module boundaries ([#2555](https://github.com/hashicorp/terraform/issues/2555)) - * command/*: fixed bug where variable input was not asked for unset - vars if terraform.tfvars existed ([#2502](https://github.com/hashicorp/terraform/issues/2502)) - * command/apply: prevent output duplication when reporting errors ([#2267](https://github.com/hashicorp/terraform/issues/2267)) - * command/apply: destroyed orphan resources are properly counted ([#2506](https://github.com/hashicorp/terraform/issues/2506)) - * provider/aws: loading credentials from the environment (vars, EC2 role, - etc.) is more robust and will not ask for credentials from stdin ([#1841](https://github.com/hashicorp/terraform/issues/1841)) - * provider/aws: fix panic when route has no `cidr_block` ([#2215](https://github.com/hashicorp/terraform/issues/2215)) - * provider/aws: fix issue preventing destruction of IAM Roles ([#2177](https://github.com/hashicorp/terraform/issues/2177)) - * provider/aws: fix issue where Security Group Rules could collide and fail - to save to the state file correctly ([#2376](https://github.com/hashicorp/terraform/issues/2376)) - * provider/aws: fix issue preventing destruction self referencing Securtity - Group Rules ([#2305](https://github.com/hashicorp/terraform/issues/2305)) - * provider/aws: fix issue causing perpetual diff on ELB listeners - when non-lowercase protocol strings were used ([#2246](https://github.com/hashicorp/terraform/issues/2246)) - * provider/aws: corrected frankfurt S3 website region ([#2259](https://github.com/hashicorp/terraform/issues/2259)) - * provider/aws: `aws_elasticache_cluster` port is required ([#2160](https://github.com/hashicorp/terraform/issues/2160)) - * provider/aws: Handle AMIs where RootBlockDevice does not appear in the - BlockDeviceMapping, preventing root_block_device from working ([#2271](https://github.com/hashicorp/terraform/issues/2271)) - * provider/aws: fix `terraform show` with remote state ([#2371](https://github.com/hashicorp/terraform/issues/2371)) - * provider/aws: detect `instance_type` drift on `aws_instance` ([#2374](https://github.com/hashicorp/terraform/issues/2374)) - * provider/aws: fix crash when `security_group_rule` referenced non-existent - security group ([#2434](https://github.com/hashicorp/terraform/issues/2434)) - * provider/aws: `aws_launch_configuration` retries if IAM instance - profile is not ready yet. ([#2452](https://github.com/hashicorp/terraform/issues/2452)) - * provider/aws: `fqdn` is populated during creation for `aws_route53_record` ([#2528](https://github.com/hashicorp/terraform/issues/2528)) - * provider/aws: retry VPC delete on DependencyViolation due to eventual - consistency ([#2532](https://github.com/hashicorp/terraform/issues/2532)) - * provider/aws: VPC peering connections in "failed" state are deleted ([#2544](https://github.com/hashicorp/terraform/issues/2544)) - * provider/aws: EIP deletion works if it was manually disassociated ([#2543](https://github.com/hashicorp/terraform/issues/2543)) - * provider/aws: `elasticache_subnet_group.subnet_ids` is now a required argument ([#2534](https://github.com/hashicorp/terraform/issues/2534)) - * provider/aws: handle nil response from VPN connection describes ([#2533](https://github.com/hashicorp/terraform/issues/2533)) - * provider/cloudflare: manual record deletion doesn't cause error ([#2545](https://github.com/hashicorp/terraform/issues/2545)) - * provider/digitalocean: handle case where droplet is deleted outside of - terraform ([#2497](https://github.com/hashicorp/terraform/issues/2497)) - * provider/dme: No longer an error if record deleted manually ([#2546](https://github.com/hashicorp/terraform/issues/2546)) - * provider/docker: Fix issues when using containers with links ([#2327](https://github.com/hashicorp/terraform/issues/2327)) - * provider/openstack: fix panic case if API returns nil network ([#2448](https://github.com/hashicorp/terraform/issues/2448)) - * provider/template: fix issue causing "unknown variable" rendering errors - when an existing set of template variables is changed ([#2386](https://github.com/hashicorp/terraform/issues/2386)) - * provisioner/chef: improve the decoding logic to prevent parameter not found errors ([#2206](https://github.com/hashicorp/terraform/issues/2206)) - -## 0.5.3 (June 1, 2015) - -IMPROVEMENTS: - - * **New resource: `aws_kinesis_stream`** ([#2110](https://github.com/hashicorp/terraform/issues/2110)) - * **New resource: `aws_iam_server_certificate`** ([#2086](https://github.com/hashicorp/terraform/issues/2086)) - * **New resource: `aws_sqs_queue`** ([#1939](https://github.com/hashicorp/terraform/issues/1939)) - * **New resource: `aws_sns_topic`** ([#1974](https://github.com/hashicorp/terraform/issues/1974)) - * **New resource: `aws_sns_topic_subscription`** ([#1974](https://github.com/hashicorp/terraform/issues/1974)) - * **New resource: `aws_volume_attachment`** ([#2050](https://github.com/hashicorp/terraform/issues/2050)) - * **New resource: `google_storage_bucket`** ([#2060](https://github.com/hashicorp/terraform/issues/2060)) - * provider/aws: support ec2 termination protection ([#1988](https://github.com/hashicorp/terraform/issues/1988)) - * provider/aws: support for RDS Read Replicas ([#1946](https://github.com/hashicorp/terraform/issues/1946)) - * provider/aws: `aws_s3_bucket` add support for `policy` ([#1992](https://github.com/hashicorp/terraform/issues/1992)) - * provider/aws: `aws_ebs_volume` add support for `tags` ([#2135](https://github.com/hashicorp/terraform/issues/2135)) - * provider/aws: `aws_elasticache_cluster` Confirm node status before reporting - available - * provider/aws: `aws_network_acl` Add support for ICMP Protocol ([#2148](https://github.com/hashicorp/terraform/issues/2148)) - * provider/aws: New `force_destroy` parameter for S3 buckets, to destroy - Buckets that contain objects ([#2007](https://github.com/hashicorp/terraform/issues/2007)) - * provider/aws: switching `health_check_type` on ASGs no longer requires - resource refresh ([#2147](https://github.com/hashicorp/terraform/issues/2147)) - * provider/aws: ignore empty `vpc_security_group_ids` on `aws_instance` ([#2311](https://github.com/hashicorp/terraform/issues/2311)) - -BUG FIXES: - - * provider/aws: Correctly handle AWS keypairs which no longer exist ([#2032](https://github.com/hashicorp/terraform/issues/2032)) - * provider/aws: Fix issue with restoring an Instance from snapshot ID ([#2120](https://github.com/hashicorp/terraform/issues/2120)) - * provider/template: store relative path in the state ([#2038](https://github.com/hashicorp/terraform/issues/2038)) - * provisioner/chef: fix interpolation in the Chef provisioner ([#2168](https://github.com/hashicorp/terraform/issues/2168)) - * provisioner/remote-exec: Don't prepend shebang on scripts that already - have one ([#2041](https://github.com/hashicorp/terraform/issues/2041)) - -## 0.5.2 (May 15, 2015) - -FEATURES: - - * **Chef provisioning**: You can now provision new hosts (both Linux and - Windows) with [Chef](https://chef.io) using a native provisioner ([#1868](https://github.com/hashicorp/terraform/issues/1868)) - -IMPROVEMENTS: - - * **New config function: `formatlist`** - Format lists in a similar way to `format`. - Useful for creating URLs from a list of IPs. ([#1829](https://github.com/hashicorp/terraform/issues/1829)) - * **New resource: `aws_route53_zone_association`** - * provider/aws: `aws_autoscaling_group` can wait for capacity in ELB - via `min_elb_capacity` ([#1970](https://github.com/hashicorp/terraform/issues/1970)) - * provider/aws: `aws_db_instances` supports `license_model` ([#1966](https://github.com/hashicorp/terraform/issues/1966)) - * provider/aws: `aws_elasticache_cluster` add support for Tags ([#1965](https://github.com/hashicorp/terraform/issues/1965)) - * provider/aws: `aws_network_acl` Network ACLs can be applied to multiple subnets ([#1931](https://github.com/hashicorp/terraform/issues/1931)) - * provider/aws: `aws_s3_bucket` exports `hosted_zone_id` and `region` ([#1865](https://github.com/hashicorp/terraform/issues/1865)) - * provider/aws: `aws_s3_bucket` add support for website `redirect_all_requests_to` ([#1909](https://github.com/hashicorp/terraform/issues/1909)) - * provider/aws: `aws_route53_record` exports `fqdn` ([#1847](https://github.com/hashicorp/terraform/issues/1847)) - * provider/aws: `aws_route53_zone` can create private hosted zones ([#1526](https://github.com/hashicorp/terraform/issues/1526)) - * provider/google: `google_compute_instance` `scratch` attribute added ([#1920](https://github.com/hashicorp/terraform/issues/1920)) - -BUG FIXES: - - * core: fix "resource not found" for interpolation issues with modules - * core: fix unflattenable error for orphans ([#1922](https://github.com/hashicorp/terraform/issues/1922)) - * core: fix deadlock with create-before-destroy + modules ([#1949](https://github.com/hashicorp/terraform/issues/1949)) - * core: fix "no roots found" error with create-before-destroy ([#1953](https://github.com/hashicorp/terraform/issues/1953)) - * core: variables set with environment variables won't validate as - not set without a default ([#1930](https://github.com/hashicorp/terraform/issues/1930)) - * core: resources with a blank ID in the state are now assumed to not exist ([#1905](https://github.com/hashicorp/terraform/issues/1905)) - * command/push: local vars override remote ones ([#1881](https://github.com/hashicorp/terraform/issues/1881)) - * provider/aws: Mark `aws_security_group` description as `ForceNew` ([#1871](https://github.com/hashicorp/terraform/issues/1871)) - * provider/aws: `aws_db_instance` ARN value is correct ([#1910](https://github.com/hashicorp/terraform/issues/1910)) - * provider/aws: `aws_db_instance` only submit modify request if there - is a change. ([#1906](https://github.com/hashicorp/terraform/issues/1906)) - * provider/aws: `aws_elasticache_cluster` export missing information on cluster nodes ([#1965](https://github.com/hashicorp/terraform/issues/1965)) - * provider/aws: bad AMI on a launch configuration won't block refresh ([#1901](https://github.com/hashicorp/terraform/issues/1901)) - * provider/aws: `aws_security_group` + `aws_subnet` - destroy timeout increased - to prevent DependencyViolation errors. ([#1886](https://github.com/hashicorp/terraform/issues/1886)) - * provider/google: `google_compute_instance` Local SSDs no-longer cause crash - ([#1088](https://github.com/hashicorp/terraform/issues/1088)) - * provider/google: `google_http_health_check` Defaults now driven from Terraform, - avoids errors on update ([#1894](https://github.com/hashicorp/terraform/issues/1894)) - * provider/google: `google_compute_template` Update Instance Template network - definition to match changes to Instance ([#980](https://github.com/hashicorp/terraform/issues/980)) - * provider/template: Fix infinite diff ([#1898](https://github.com/hashicorp/terraform/issues/1898)) - -## 0.5.1 (never released) - -This version was never released since we accidentally skipped it! - -## 0.5.0 (May 7, 2015) - -BACKWARDS INCOMPATIBILITIES: - - * provider/aws: Terraform now remove the default egress rule created by AWS in - a new security group. - -FEATURES: - - * **Multi-provider (a.k.a multi-region)**: Multiple instances of a single - provider can be configured so resources can apply to different settings. - As an example, this allows Terraform to manage multiple regions with AWS. - * **Environmental variables to set variables**: Environment variables can be - used to set variables. The environment variables must be in the format - `TF_VAR_name` and this will be checked last for a value. - * **New remote state backend: `s3`**: You can now store remote state in - an S3 bucket. ([#1723](https://github.com/hashicorp/terraform/issues/1723)) - * **Automatic AWS retries**: This release includes a lot of improvement - around automatic retries of transient errors in AWS. The number of - retry attempts is also configurable. - * **Templates**: A new `template_file` resource allows long strings needing - variable interpolation to be moved into files. ([#1778](https://github.com/hashicorp/terraform/issues/1778)) - * **Provision with WinRM**: Provisioners can now run remote commands on - Windows hosts. ([#1483](https://github.com/hashicorp/terraform/issues/1483)) - -IMPROVEMENTS: - - * **New config function: `length`** - Get the length of a string or a list. - Useful in conjunction with `split`. ([#1495](https://github.com/hashicorp/terraform/issues/1495)) - * **New resource: `aws_app_cookie_stickiness_policy`** - * **New resource: `aws_customer_gateway`** - * **New resource: `aws_ebs_volume`** - * **New resource: `aws_elasticache_cluster`** - * **New resource: `aws_elasticache_security_group`** - * **New resource: `aws_elasticache_subnet_group`** - * **New resource: `aws_iam_access_key`** - * **New resource: `aws_iam_group_policy`** - * **New resource: `aws_iam_group`** - * **New resource: `aws_iam_instance_profile`** - * **New resource: `aws_iam_policy`** - * **New resource: `aws_iam_role_policy`** - * **New resource: `aws_iam_role`** - * **New resource: `aws_iam_user_policy`** - * **New resource: `aws_iam_user`** - * **New resource: `aws_lb_cookie_stickiness_policy`** - * **New resource: `aws_proxy_protocol_policy`** - * **New resource: `aws_security_group_rule`** - * **New resource: `aws_vpc_dhcp_options_association`** - * **New resource: `aws_vpc_dhcp_options`** - * **New resource: `aws_vpn_connection_route`** - * **New resource: `google_dns_managed_zone`** - * **New resource: `google_dns_record_set`** - * **Migrate to upstream AWS SDK:** Migrate the AWS provider to - [awslabs/aws-sdk-go](https://github.com/awslabs/aws-sdk-go), - the official `awslabs` library. Previously we had forked the library for - stability while `awslabs` refactored. Now that work has completed, and we've - migrated back to the upstream version. - * core: Improve error message on diff mismatch ([#1501](https://github.com/hashicorp/terraform/issues/1501)) - * provisioner/file: expand `~` in source path ([#1569](https://github.com/hashicorp/terraform/issues/1569)) - * provider/aws: Better retry logic, now retries up to 11 times by default - with exponentional backoff. This number is configurable. ([#1787](https://github.com/hashicorp/terraform/issues/1787)) - * provider/aws: Improved credential detection ([#1470](https://github.com/hashicorp/terraform/issues/1470)) - * provider/aws: Can specify a `token` via the config file ([#1601](https://github.com/hashicorp/terraform/issues/1601)) - * provider/aws: Added new `vpc_security_group_ids` attribute for AWS - Instances. If using a VPC, you can now modify the security groups for that - Instance without destroying it ([#1539](https://github.com/hashicorp/terraform/issues/1539)) - * provider/aws: White or blacklist account IDs that can be used to - protect against accidents. ([#1595](https://github.com/hashicorp/terraform/issues/1595)) - * provider/aws: Add a subset of IAM resources ([#939](https://github.com/hashicorp/terraform/issues/939)) - * provider/aws: `aws_autoscaling_group` retries deletes through "in progress" - errors ([#1840](https://github.com/hashicorp/terraform/issues/1840)) - * provider/aws: `aws_autoscaling_group` waits for healthy capacity during - ASG creation ([#1839](https://github.com/hashicorp/terraform/issues/1839)) - * provider/aws: `aws_instance` supports placement groups ([#1358](https://github.com/hashicorp/terraform/issues/1358)) - * provider/aws: `aws_eip` supports network interface attachment ([#1681](https://github.com/hashicorp/terraform/issues/1681)) - * provider/aws: `aws_elb` supports in-place changing of listeners ([#1619](https://github.com/hashicorp/terraform/issues/1619)) - * provider/aws: `aws_elb` supports connection draining settings ([#1502](https://github.com/hashicorp/terraform/issues/1502)) - * provider/aws: `aws_elb` increase default idle timeout to 60s ([#1646](https://github.com/hashicorp/terraform/issues/1646)) - * provider/aws: `aws_key_pair` name can be omitted and generated ([#1751](https://github.com/hashicorp/terraform/issues/1751)) - * provider/aws: `aws_network_acl` improved validation for network ACL ports - and protocols ([#1798](https://github.com/hashicorp/terraform/issues/1798)) ([#1808](https://github.com/hashicorp/terraform/issues/1808)) - * provider/aws: `aws_route_table` can target network interfaces ([#968](https://github.com/hashicorp/terraform/issues/968)) - * provider/aws: `aws_route_table` can specify propagating VGWs ([#1516](https://github.com/hashicorp/terraform/issues/1516)) - * provider/aws: `aws_route53_record` supports weighted sets ([#1578](https://github.com/hashicorp/terraform/issues/1578)) - * provider/aws: `aws_route53_zone` exports nameservers ([#1525](https://github.com/hashicorp/terraform/issues/1525)) - * provider/aws: `aws_s3_bucket` website support ([#1738](https://github.com/hashicorp/terraform/issues/1738)) - * provider/aws: `aws_security_group` name becomes optional and can be - automatically set to a unique identifier; this helps with - `create_before_destroy` scenarios ([#1632](https://github.com/hashicorp/terraform/issues/1632)) - * provider/aws: `aws_security_group` description becomes optional with a - static default value ([#1632](https://github.com/hashicorp/terraform/issues/1632)) - * provider/aws: automatically set the private IP as the SSH address - if not specified and no public IP is available ([#1623](https://github.com/hashicorp/terraform/issues/1623)) - * provider/aws: `aws_elb` exports `source_security_group` field ([#1708](https://github.com/hashicorp/terraform/issues/1708)) - * provider/aws: `aws_route53_record` supports alias targeting ([#1775](https://github.com/hashicorp/terraform/issues/1775)) - * provider/aws: Remove default AWS egress rule for newly created Security Groups ([#1765](https://github.com/hashicorp/terraform/issues/1765)) - * provider/consul: add `scheme` configuration argument ([#1838](https://github.com/hashicorp/terraform/issues/1838)) - * provider/docker: `docker_container` can specify links ([#1564](https://github.com/hashicorp/terraform/issues/1564)) - * provider/google: `resource_compute_disk` supports snapshots ([#1426](https://github.com/hashicorp/terraform/issues/1426)) - * provider/google: `resource_compute_instance` supports specifying the - device name ([#1426](https://github.com/hashicorp/terraform/issues/1426)) - * provider/openstack: Floating IP support for LBaaS ([#1550](https://github.com/hashicorp/terraform/issues/1550)) - * provider/openstack: Add AZ to `openstack_blockstorage_volume_v1` ([#1726](https://github.com/hashicorp/terraform/issues/1726)) - -BUG FIXES: +--- - * core: Fix graph cycle issues surrounding modules ([#1582](https://github.com/hashicorp/terraform/issues/1582)) ([#1637](https://github.com/hashicorp/terraform/issues/1637)) - * core: math on arbitrary variables works if first operand isn't a - numeric primitive. ([#1381](https://github.com/hashicorp/terraform/issues/1381)) - * core: avoid unnecessary cycles by pruning tainted destroys from - graph if there are no tainted resources ([#1475](https://github.com/hashicorp/terraform/issues/1475)) - * core: fix issue where destroy nodes weren't pruned in specific - edge cases around matching prefixes, which could cause cycles ([#1527](https://github.com/hashicorp/terraform/issues/1527)) - * core: fix issue causing diff mismatch errors in certain scenarios during - resource replacement ([#1515](https://github.com/hashicorp/terraform/issues/1515)) - * core: dependencies on resources with a different index work when - count > 1 ([#1540](https://github.com/hashicorp/terraform/issues/1540)) - * core: don't panic if variable default type is invalid ([#1344](https://github.com/hashicorp/terraform/issues/1344)) - * core: fix perpetual diff issue for computed maps that are empty ([#1607](https://github.com/hashicorp/terraform/issues/1607)) - * core: validation added to check for `self` variables in modules ([#1609](https://github.com/hashicorp/terraform/issues/1609)) - * core: fix edge case where validation didn't pick up unknown fields - if the value was computed ([#1507](https://github.com/hashicorp/terraform/issues/1507)) - * core: Fix issue where values in sets on resources couldn't contain - hyphens. ([#1641](https://github.com/hashicorp/terraform/issues/1641)) - * core: Outputs removed from the config are removed from the state ([#1714](https://github.com/hashicorp/terraform/issues/1714)) - * core: Validate against the worst-case graph during plan phase to catch cycles - that would previously only show up during apply ([#1655](https://github.com/hashicorp/terraform/issues/1655)) - * core: Referencing invalid module output in module validates ([#1448](https://github.com/hashicorp/terraform/issues/1448)) - * command: remote states with uppercase types work ([#1356](https://github.com/hashicorp/terraform/issues/1356)) - * provider/aws: Support `AWS_SECURITY_TOKEN` env var again ([#1785](https://github.com/hashicorp/terraform/issues/1785)) - * provider/aws: Don't save "instance" for EIP if association fails ([#1776](https://github.com/hashicorp/terraform/issues/1776)) - * provider/aws: launch configuration ID set after create success ([#1518](https://github.com/hashicorp/terraform/issues/1518)) - * provider/aws: Fixed an issue with creating ELBs without any tags ([#1580](https://github.com/hashicorp/terraform/issues/1580)) - * provider/aws: Fix issue in Security Groups with empty IPRanges ([#1612](https://github.com/hashicorp/terraform/issues/1612)) - * provider/aws: manually deleted S3 buckets are refreshed properly ([#1574](https://github.com/hashicorp/terraform/issues/1574)) - * provider/aws: only check for EIP allocation ID in VPC ([#1555](https://github.com/hashicorp/terraform/issues/1555)) - * provider/aws: raw protocol numbers work in `aws_network_acl` ([#1435](https://github.com/hashicorp/terraform/issues/1435)) - * provider/aws: Block devices can be encrypted ([#1718](https://github.com/hashicorp/terraform/issues/1718)) - * provider/aws: ASG health check grace period can be updated in-place ([#1682](https://github.com/hashicorp/terraform/issues/1682)) - * provider/aws: ELB security groups can be updated in-place ([#1662](https://github.com/hashicorp/terraform/issues/1662)) - * provider/aws: `aws_main_route_table +For information on v0.11 and prior releases, please see [the v0.11 branch changelog](https://github.com/hashicorp/terraform/blob/v0.11/CHANGELOG.md). diff --git a/vendor/github.com/hashicorp/terraform/CODEOWNERS b/vendor/github.com/hashicorp/terraform/CODEOWNERS new file mode 100644 index 00000000..218352e8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/CODEOWNERS @@ -0,0 +1,4 @@ +# remote-state backends. +/backend/remote-state/azure @terraform-azure +/backend/remote-state/gcs @terraform-google +/backend/remote-state/s3 @terraform-aws diff --git a/vendor/github.com/hashicorp/terraform/Dockerfile b/vendor/github.com/hashicorp/terraform/Dockerfile new file mode 100644 index 00000000..c5afc0d8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/Dockerfile @@ -0,0 +1,24 @@ +# This Dockerfile builds on golang:alpine by building Terraform from source +# using the current working directory. +# +# This produces a docker image that contains a working Terraform binary along +# with all of its source code, which is what gets released on hub.docker.com +# as terraform:full. The main releases (terraform:latest, terraform:light and +# the release tags) are lighter images including only the officially-released +# binary from releases.hashicorp.com; these are built instead from +# scripts/docker-release/Dockerfile-release. + +FROM golang:alpine +LABEL maintainer="HashiCorp Terraform Team " + +RUN apk add --update git bash openssh + +ENV TF_DEV=true +ENV TF_RELEASE=1 + +WORKDIR $GOPATH/src/github.com/hashicorp/terraform +COPY . . +RUN /bin/bash scripts/build.sh + +WORKDIR $GOPATH +ENTRYPOINT ["terraform"] diff --git a/vendor/github.com/hashicorp/terraform/Makefile b/vendor/github.com/hashicorp/terraform/Makefile index 0eba369d..d92fec42 100644 --- a/vendor/github.com/hashicorp/terraform/Makefile +++ b/vendor/github.com/hashicorp/terraform/Makefile @@ -1,12 +1,14 @@ -TEST?=$$(go list ./... | grep -v '/terraform/vendor/' | grep -v '/builtin/bins/') +VERSION?="0.3.32" +TEST?=./... GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) +WEBSITE_REPO=github.com/hashicorp/terraform-website -default: test vet +default: test tools: - go get -u github.com/kardianos/govendor - go get -u golang.org/x/tools/cmd/stringer - go get -u golang.org/x/tools/cmd/cover + GO111MODULE=off go get -u golang.org/x/tools/cmd/stringer + GO111MODULE=off go get -u golang.org/x/tools/cmd/cover + GO111MODULE=off go get -u github.com/golang/mock/mockgen # bin generates the releaseable binaries for Terraform bin: fmtcheck generate @@ -15,21 +17,10 @@ bin: fmtcheck generate # dev creates binaries for testing Terraform locally. These are put # into ./bin/ as well as $GOPATH/bin dev: fmtcheck generate - @TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'" + go install -mod=vendor . quickdev: generate - @TF_DEV=1 sh -c "'$(CURDIR)/scripts/build.sh'" - -# Shorthand for quickly building the core of Terraform. Note that some -# changes will require a rebuild of everything, in which case the dev -# target should be used. -core-dev: generate - go install -tags 'core' github.com/hashicorp/terraform - -# Shorthand for quickly testing the core of Terraform (i.e. "not providers") -core-test: generate - @echo "Testing core packages..." && \ - go test -tags 'core' $(TESTARGS) $(shell go list ./... | grep -v -E 'terraform/(builtin|vendor)') + go install -mod=vendor . # Shorthand for building and installing just one plugin for local testing. # Run as (for example): make plugin-dev PLUGIN=provider-aws @@ -38,31 +29,37 @@ plugin-dev: generate mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN) # test runs the unit tests -test: fmtcheck errcheck generate - go test -i $(TEST) || exit 1 - echo $(TEST) | \ - xargs -t -n4 go test $(TESTARGS) -timeout=60s -parallel=4 +# we run this one package at a time here because running the entire suite in +# one command creates memory usage issues when running in Travis-CI. +test: fmtcheck generate + go list -mod=vendor $(TEST) | xargs -t -n4 go test $(TESTARGS) -mod=vendor -timeout=2m -parallel=4 # testacc runs acceptance tests testacc: fmtcheck generate @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package. For example,"; \ - echo " make testacc TEST=./builtin/providers/aws"; \ + echo " make testacc TEST=./builtin/providers/test"; \ exit 1; \ fi - TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m + TF_ACC=1 go test $(TEST) -v $(TESTARGS) -mod=vendor -timeout 120m + +# e2etest runs the end-to-end tests against a generated Terraform binary +# The TF_ACC here allows network access, but does not require any special +# credentials since the e2etests use local-only providers such as "null". +e2etest: generate + TF_ACC=1 go test -mod=vendor -v ./command/e2etest test-compile: fmtcheck generate @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package. For example,"; \ - echo " make test-compile TEST=./builtin/providers/aws"; \ + echo " make test-compile TEST=./builtin/providers/test"; \ exit 1; \ fi - go test -c $(TEST) $(TESTARGS) + go test -mod=vendor -c $(TEST) $(TESTARGS) # testrace runs the race checker testrace: fmtcheck generate - TF_ACC= go test -race $(TEST) $(TESTARGS) + TF_ACC= go test -mod=vendor -race $(TEST) $(TESTARGS) cover: @go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \ @@ -72,25 +69,27 @@ cover: go tool cover -html=coverage.out rm coverage.out -# vet runs the Go source code static analysis tool `vet` to find -# any common errors. -vet: - @echo 'go vet $$(go list ./... | grep -v /terraform/vendor/)' - @go vet $$(go list ./... | grep -v /terraform/vendor/) ; if [ $$? -eq 1 ]; then \ - echo ""; \ - echo "Vet found suspicious constructs. Please check the reported constructs"; \ - echo "and fix them if necessary before submitting the code for review."; \ - exit 1; \ - fi - # generate runs `go generate` to build the dynamically generated -# source files. -generate: - @which stringer > /dev/null; if [ $$? -ne 0 ]; then \ - go get -u golang.org/x/tools/cmd/stringer; \ - fi - go generate $$(go list ./... | grep -v /terraform/vendor/) - @go fmt command/internal_plugin_list.go > /dev/null +# source files, except the protobuf stubs which are built instead with +# "make protobuf". +generate: tools + GOFLAGS=-mod=vendor go generate ./... + # go fmt doesn't support -mod=vendor but it still wants to populate the + # module cache with everything in go.mod even though formatting requires + # no dependencies, and so we're disabling modules mode for this right + # now until the "go fmt" behavior is rationalized to either support the + # -mod= argument or _not_ try to install things. + GO111MODULE=off go fmt command/internal_plugin_list.go > /dev/null + +# We separate the protobuf generation because most development tasks on +# Terraform do not involve changing protobuf files and protoc is not a +# go-gettable dependency and so getting it installed can be inconvenient. +# +# If you are working on changes to protobuf interfaces you may either use +# this target or run the individual scripts below directly. +protobuf: + bash internal/tfplugin5/generate.sh + bash plans/internal/planproto/generate.sh fmt: gofmt -w $(GOFMT_FILES) @@ -98,15 +97,53 @@ fmt: fmtcheck: @sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" -errcheck: - @sh -c "'$(CURDIR)/scripts/errcheck.sh'" - -vendor-status: - @govendor status +website: +ifeq (,$(wildcard $(GOPATH)/src/$(WEBSITE_REPO))) + echo "$(WEBSITE_REPO) not found in your GOPATH (necessary for layouts and assets), get-ting..." + git clone https://$(WEBSITE_REPO) $(GOPATH)/src/$(WEBSITE_REPO) +endif + $(eval WEBSITE_PATH := $(GOPATH)/src/$(WEBSITE_REPO)) + @echo "==> Starting core website in Docker..." + @docker run \ + --interactive \ + --rm \ + --tty \ + --publish "4567:4567" \ + --publish "35729:35729" \ + --volume "$(shell pwd)/website:/website" \ + --volume "$(shell pwd):/ext/terraform" \ + --volume "$(WEBSITE_PATH)/content:/terraform-website" \ + --volume "$(WEBSITE_PATH)/content/source/assets:/website/docs/assets" \ + --volume "$(WEBSITE_PATH)/content/source/layouts:/website/docs/layouts" \ + --workdir /terraform-website \ + hashicorp/middleman-hashicorp:${VERSION} + +website-test: +ifeq (,$(wildcard $(GOPATH)/src/$(WEBSITE_REPO))) + echo "$(WEBSITE_REPO) not found in your GOPATH (necessary for layouts and assets), get-ting..." + git clone https://$(WEBSITE_REPO) $(GOPATH)/src/$(WEBSITE_REPO) +endif + $(eval WEBSITE_PATH := $(GOPATH)/src/$(WEBSITE_REPO)) + @echo "==> Testing core website in Docker..." + -@docker stop "tf-website-core-temp" + @docker run \ + --detach \ + --rm \ + --name "tf-website-core-temp" \ + --publish "4567:4567" \ + --volume "$(shell pwd)/website:/website" \ + --volume "$(shell pwd):/ext/terraform" \ + --volume "$(WEBSITE_PATH)/content:/terraform-website" \ + --volume "$(WEBSITE_PATH)/content/source/assets:/website/docs/assets" \ + --volume "$(WEBSITE_PATH)/content/source/layouts:/website/docs/layouts" \ + --workdir /terraform-website \ + hashicorp/middleman-hashicorp:${VERSION} + $(WEBSITE_PATH)/content/scripts/check-links.sh "http://127.0.0.1:4567" "/" "/docs/providers/*" + @docker stop "tf-website-core-temp" # disallow any parallelism (-j) for Make. This is necessary since some # commands during the build process create temporary files that collide # under parallel conditions. .NOTPARALLEL: -.PHONY: bin core-dev core-test cover default dev errcheck fmt fmtcheck generate plugin-dev quickdev test-compile test testacc testrace tools vendor-status vet +.PHONY: bin cover default dev e2etest fmt fmtcheck generate protobuf plugin-dev quickdev test-compile test testacc testrace tools vendor-status website website-test diff --git a/vendor/github.com/hashicorp/terraform/README.md b/vendor/github.com/hashicorp/terraform/README.md index 351cf0e8..3a3cc919 100644 --- a/vendor/github.com/hashicorp/terraform/README.md +++ b/vendor/github.com/hashicorp/terraform/README.md @@ -5,7 +5,7 @@ Terraform - [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby) - Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool) -![Terraform](https://rawgithub.com/hashicorp/terraform/master/website/source/assets/images/logo-hashicorp.svg) +Terraform Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions. @@ -24,19 +24,31 @@ For more information, see the [introduction section](http://www.terraform.io/int Getting Started & Documentation ------------------------------- -All documentation is available on the [Terraform website](http://www.terraform.io). +If you're new to Terraform and want to get started creating infrastructure, please checkout our [Getting Started](https://www.terraform.io/intro/getting-started/install.html) guide, available on the [Terraform website](http://www.terraform.io). + +All documentation is available on the [Terraform website](http://www.terraform.io): + + - [Intro](https://www.terraform.io/intro/index.html) + - [Docs](https://www.terraform.io/docs/index.html) Developing Terraform -------------------- -If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.8+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you. +If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.11+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you. + +This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins that each have their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub. Instructions for developing each provider are in the associated README file. For more information, see [the provider development overview](https://www.terraform.io/docs/plugins/provider.html). + +For local development of Terraform core, first make sure Go is properly installed and that a +[GOPATH](http://golang.org/doc/code.html#GOPATH) has been set. You will also need to add `$GOPATH/bin` to your `$PATH`. -For local dev first make sure Go is properly installed, including setting up a [GOPATH](http://golang.org/doc/code.html#GOPATH). You will also need to add `$GOPATH/bin` to your `$PATH`. +Next, using [Git](https://git-scm.com/), clone this repository into `$GOPATH/src/github.com/hashicorp/terraform`. -Next, using [Git](https://git-scm.com/), clone this repository into `$GOPATH/src/github.com/hashicorp/terraform`. All the necessary dependencies are either vendored or automatically installed, so you just need to type `make`. This will compile the code and then run the tests. If this exits with exit status 0, then everything is working! +You'll need to run `make tools` to install some required tools, then `make`. This will compile the code and then run the tests. If this exits with exit status 0, then everything is working! +You only need to run `make tools` once (or when the tools change). ```sh $ cd "$GOPATH/src/github.com/hashicorp/terraform" +$ make tools $ make ``` @@ -56,21 +68,17 @@ $ make test TEST=./terraform ... ``` -If you're working on a specific provider and only wish to rebuild that provider, you can use the `plugin-dev` target. For example, to build only the Azure provider: +If you're working on a specific provider which has not been separated into an individual repository and only wish to rebuild that provider, you can use the `plugin-dev` target. For example, to build only the Test provider: ```sh -$ make plugin-dev PLUGIN=provider-azure -``` - -If you're working on the core of Terraform, and only wish to rebuild that without rebuilding providers, you can use the `core-dev` target. It is important to note that some types of changes may require both core and providers to be rebuilt - for example work on the RPC interface. To build just the core of Terraform: - -```sh -$ make core-dev +$ make plugin-dev PLUGIN=provider-test ``` ### Dependencies -Terraform stores its dependencies under `vendor/`, which [Go 1.6+ will automatically recognize and load](https://golang.org/cmd/go/#hdr-Vendor_Directories). We use [`govendor`](https://github.com/kardianos/govendor) to manage the vendored dependencies. +Terraform uses Go Modules for dependency management, but for the moment is +continuing to use Go 1.6-style vendoring for compatibility with tools that +have not yet been updated for full Go Modules support. If you're developing Terraform, there are a few tasks you might need to perform. @@ -82,17 +90,11 @@ To add a dependency: Assuming your work is on a branch called `my-feature-branch`, the steps look like this: -1. Add the new package to your GOPATH: - - ```bash - go get github.com/hashicorp/my-project - ``` - -2. Add the new package to your `vendor/` directory: +1. Add an `import` statement to a suitable package in the Terraform code. - ```bash - govendor add github.com/hashicorp/my-project/package - ``` +2. Run `go mod vendor` to download the latest version of the module containing + the imported package into the `vendor/` directory, and update the `go.mod` + and `go.sum` files. 3. Review the changes in git and commit them. @@ -100,13 +102,11 @@ Assuming your work is on a branch called `my-feature-branch`, the steps look lik To update a dependency: -1. Fetch the dependency: +1. Run `go get -u module-path@version-number`, such as `go get -u github.com/hashicorp/hcl@2.0.0` - ```bash - govendor fetch github.com/hashicorp/my-project - ``` +2. Run `go mod vendor` to update the vendored copy in the `vendor/` directory. -2. Review the changes in git and commit them. +3. Review the changes in git and commit them. ### Acceptance Tests @@ -162,3 +162,7 @@ For example, run the following command to build terraform in a linux-based conta ```sh docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/github.com/hashicorp/terraform -e XC_OS=darwin -e XC_ARCH=amd64 golang:latest bash -c "apt-get update && apt-get install -y zip && make bin" ``` + + +## License +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform.svg?type=large)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform?ref=badge_large) diff --git a/vendor/github.com/hashicorp/terraform/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go new file mode 100644 index 00000000..90a5faf0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go @@ -0,0 +1,12 @@ +package addrs + +// CountAttr is the address of an attribute of the "count" object in +// the interpolation scope, like "count.index". +type CountAttr struct { + referenceable + Name string +} + +func (ca CountAttr) String() string { + return "count." + ca.Name +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/doc.go b/vendor/github.com/hashicorp/terraform/addrs/doc.go new file mode 100644 index 00000000..46093314 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/doc.go @@ -0,0 +1,17 @@ +// Package addrs contains types that represent "addresses", which are +// references to specific objects within a Terraform configuration or +// state. +// +// All addresses have string representations based on HCL traversal syntax +// which should be used in the user-interface, and also in-memory +// representations that can be used internally. +// +// For object types that exist within Terraform modules a pair of types is +// used. The "local" part of the address is represented by a type, and then +// an absolute path to that object in the context of its module is represented +// by a type of the same name with an "Abs" prefix added, for "absolute". +// +// All types within this package should be treated as immutable, even if this +// is not enforced by the Go compiler. It is always an implementation error +// to modify an address object in-place after it is initially constructed. +package addrs diff --git a/vendor/github.com/hashicorp/terraform/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go new file mode 100644 index 00000000..d2c046c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go @@ -0,0 +1,41 @@ +package addrs + +import ( + "fmt" +) + +// InputVariable is the address of an input variable. +type InputVariable struct { + referenceable + Name string +} + +func (v InputVariable) String() string { + return "var." + v.Name +} + +// AbsInputVariableInstance is the address of an input variable within a +// particular module instance. +type AbsInputVariableInstance struct { + Module ModuleInstance + Variable InputVariable +} + +// InputVariable returns the absolute address of the input variable of the +// given name inside the receiving module instance. +func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance { + return AbsInputVariableInstance{ + Module: m, + Variable: InputVariable{ + Name: name, + }, + } +} + +func (v AbsInputVariableInstance) String() string { + if len(v.Module) == 0 { + return v.String() + } + + return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String()) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go new file mode 100644 index 00000000..cef8b279 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go @@ -0,0 +1,123 @@ +package addrs + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// InstanceKey represents the key of an instance within an object that +// contains multiple instances due to using "count" or "for_each" arguments +// in configuration. +// +// IntKey and StringKey are the two implementations of this type. No other +// implementations are allowed. The single instance of an object that _isn't_ +// using "count" or "for_each" is represented by NoKey, which is a nil +// InstanceKey. +type InstanceKey interface { + instanceKeySigil() + String() string +} + +// ParseInstanceKey returns the instance key corresponding to the given value, +// which must be known and non-null. +// +// If an unknown or null value is provided then this function will panic. This +// function is intended to deal with the values that would naturally be found +// in a hcl.TraverseIndex, which (when parsed from source, at least) can never +// contain unknown or null values. +func ParseInstanceKey(key cty.Value) (InstanceKey, error) { + switch key.Type() { + case cty.String: + return StringKey(key.AsString()), nil + case cty.Number: + var idx int + err := gocty.FromCtyValue(key, &idx) + return IntKey(idx), err + default: + return NoKey, fmt.Errorf("either a string or an integer is required") + } +} + +// NoKey represents the absense of an InstanceKey, for the single instance +// of a configuration object that does not use "count" or "for_each" at all. +var NoKey InstanceKey + +// IntKey is the InstanceKey representation representing integer indices, as +// used when the "count" argument is specified or if for_each is used with +// a sequence type. +type IntKey int + +func (k IntKey) instanceKeySigil() { +} + +func (k IntKey) String() string { + return fmt.Sprintf("[%d]", int(k)) +} + +// StringKey is the InstanceKey representation representing string indices, as +// used when the "for_each" argument is specified with a map or object type. +type StringKey string + +func (k StringKey) instanceKeySigil() { +} + +func (k StringKey) String() string { + // FIXME: This isn't _quite_ right because Go's quoted string syntax is + // slightly different than HCL's, but we'll accept it for now. + return fmt.Sprintf("[%q]", string(k)) +} + +// InstanceKeyLess returns true if the first given instance key i should sort +// before the second key j, and false otherwise. +func InstanceKeyLess(i, j InstanceKey) bool { + iTy := instanceKeyType(i) + jTy := instanceKeyType(j) + + switch { + case i == j: + return false + case i == NoKey: + return true + case j == NoKey: + return false + case iTy != jTy: + // The ordering here is arbitrary except that we want NoKeyType + // to sort before the others, so we'll just use the enum values + // of InstanceKeyType here (where NoKey is zero, sorting before + // any other). + return uint32(iTy) < uint32(jTy) + case iTy == IntKeyType: + return int(i.(IntKey)) < int(j.(IntKey)) + case iTy == StringKeyType: + return string(i.(StringKey)) < string(j.(StringKey)) + default: + // Shouldn't be possible to get down here in practice, since the + // above is exhaustive. + return false + } +} + +func instanceKeyType(k InstanceKey) InstanceKeyType { + if _, ok := k.(StringKey); ok { + return StringKeyType + } + if _, ok := k.(IntKey); ok { + return IntKeyType + } + return NoKeyType +} + +// InstanceKeyType represents the different types of instance key that are +// supported. Usually it is sufficient to simply type-assert an InstanceKey +// value to either IntKey or StringKey, but this type and its values can be +// used to represent the types themselves, rather than specific values +// of those types. +type InstanceKeyType rune + +const ( + NoKeyType InstanceKeyType = 0 + IntKeyType InstanceKeyType = 'I' + StringKeyType InstanceKeyType = 'S' +) diff --git a/vendor/github.com/hashicorp/terraform/addrs/local_value.go b/vendor/github.com/hashicorp/terraform/addrs/local_value.go new file mode 100644 index 00000000..61a07b9c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/local_value.go @@ -0,0 +1,48 @@ +package addrs + +import ( + "fmt" +) + +// LocalValue is the address of a local value. +type LocalValue struct { + referenceable + Name string +} + +func (v LocalValue) String() string { + return "local." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: v, + } +} + +// AbsLocalValue is the absolute address of a local value within a module instance. +type AbsLocalValue struct { + Module ModuleInstance + LocalValue LocalValue +} + +// LocalValue returns the absolute address of a local value of the given +// name within the receiving module instance. +func (m ModuleInstance) LocalValue(name string) AbsLocalValue { + return AbsLocalValue{ + Module: m, + LocalValue: LocalValue{ + Name: name, + }, + } +} + +func (v AbsLocalValue) String() string { + if len(v.Module) == 0 { + return v.LocalValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String()) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module.go b/vendor/github.com/hashicorp/terraform/addrs/module.go new file mode 100644 index 00000000..6420c630 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/module.go @@ -0,0 +1,75 @@ +package addrs + +import ( + "strings" +) + +// Module is an address for a module call within configuration. This is +// the static counterpart of ModuleInstance, representing a traversal through +// the static module call tree in configuration and does not take into account +// the potentially-multiple instances of a module that might be created by +// "count" and "for_each" arguments within those calls. +// +// This type should be used only in very specialized cases when working with +// the static module call tree. Type ModuleInstance is appropriate in more cases. +// +// Although Module is a slice, it should be treated as immutable after creation. +type Module []string + +// RootModule is the module address representing the root of the static module +// call tree, which is also the zero value of Module. +// +// Note that this is not the root of the dynamic module tree, which is instead +// represented by RootModuleInstance. +var RootModule Module + +// IsRoot returns true if the receiver is the address of the root module, +// or false otherwise. +func (m Module) IsRoot() bool { + return len(m) == 0 +} + +func (m Module) String() string { + if len(m) == 0 { + return "" + } + return strings.Join([]string(m), ".") +} + +// Child returns the address of a child call in the receiver, identified by the +// given name. +func (m Module) Child(name string) Module { + ret := make(Module, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, name) +} + +// Parent returns the address of the parent module of the receiver, or the +// receiver itself if there is no parent (if it's the root module address). +func (m Module) Parent() Module { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m Module) Call() (Module, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + caller, callName := m[:len(m)-1], m[len(m)-1] + return caller, ModuleCall{ + Name: callName, + } +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_call.go b/vendor/github.com/hashicorp/terraform/addrs/module_call.go new file mode 100644 index 00000000..09596cc8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/module_call.go @@ -0,0 +1,81 @@ +package addrs + +import ( + "fmt" +) + +// ModuleCall is the address of a call from the current module to a child +// module. +// +// There is no "Abs" version of ModuleCall because an absolute module path +// is represented by ModuleInstance. +type ModuleCall struct { + referenceable + Name string +} + +func (c ModuleCall) String() string { + return "module." + c.Name +} + +// Instance returns the address of an instance of the receiver identified by +// the given key. +func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance { + return ModuleCallInstance{ + Call: c, + Key: key, + } +} + +// ModuleCallInstance is the address of one instance of a module created from +// a module call, which might create multiple instances using "count" or +// "for_each" arguments. +type ModuleCallInstance struct { + referenceable + Call ModuleCall + Key InstanceKey +} + +func (c ModuleCallInstance) String() string { + if c.Key == NoKey { + return c.Call.String() + } + return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key) +} + +// ModuleInstance returns the address of the module instance that corresponds +// to the receiving call instance when resolved in the given calling module. +// In other words, it returns the child module instance that the receving +// call instance creates. +func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance { + return caller.Child(c.Call.Name, c.Key) +} + +// Output returns the address of an output of the receiver identified by its +// name. +func (c ModuleCallInstance) Output(name string) ModuleCallOutput { + return ModuleCallOutput{ + Call: c, + Name: name, + } +} + +// ModuleCallOutput is the address of a particular named output produced by +// an instance of a module call. +type ModuleCallOutput struct { + referenceable + Call ModuleCallInstance + Name string +} + +func (co ModuleCallOutput) String() string { + return fmt.Sprintf("%s.%s", co.Call.String(), co.Name) +} + +// AbsOutputValue returns the absolute output value address that corresponds +// to the receving module call output address, once resolved in the given +// calling module. +func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue { + moduleAddr := co.Call.ModuleInstance(caller) + return moduleAddr.OutputValue(co.Name) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go new file mode 100644 index 00000000..67e73e52 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go @@ -0,0 +1,415 @@ +package addrs + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + + "github.com/hashicorp/terraform/tfdiags" +) + +// ModuleInstance is an address for a particular module instance within the +// dynamic module tree. This is an extension of the static traversals +// represented by type Module that deals with the possibility of a single +// module call producing multiple instances via the "count" and "for_each" +// arguments. +// +// Although ModuleInstance is a slice, it should be treated as immutable after +// creation. +type ModuleInstance []ModuleInstanceStep + +var ( + _ Targetable = ModuleInstance(nil) +) + +func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) { + mi, remain, diags := parseModuleInstancePrefix(traversal) + if len(remain) != 0 { + if len(remain) == len(traversal) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "A module instance address must begin with \"module.\".", + Subject: remain.SourceRange().Ptr(), + }) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance address", + Detail: "The module instance address is followed by additional invalid content.", + Subject: remain.SourceRange().Ptr(), + }) + } + } + return mi, diags +} + +// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + addr, addrDiags := ParseModuleInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) { + remain := traversal + var mi ModuleInstance + var diags tfdiags.Diagnostics + + for len(remain) > 0 { + var next string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + next = tt.Name + case hcl.TraverseAttr: + next = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Module address prefix must be followed by dot and then a name.", + Subject: remain[0].SourceRange().Ptr(), + }) + break + } + + if next != "module" { + break + } + + kwRange := remain[0].SourceRange() + remain = remain[1:] + // If we have the prefix "module" then we should be followed by an + // module call name, as an attribute, and then optionally an index step + // giving the instance key. + if len(remain) == 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: &kwRange, + }) + break + } + + var moduleName string + switch tt := remain[0].(type) { + case hcl.TraverseAttr: + moduleName = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Prefix \"module.\" must be followed by a module name.", + Subject: remain[0].SourceRange().Ptr(), + }) + break + } + remain = remain[1:] + step := ModuleInstanceStep{ + Name: moduleName, + } + + if len(remain) > 0 { + if idx, ok := remain[0].(hcl.TraverseIndex); ok { + remain = remain[1:] + + switch idx.Key.Type() { + case cty.String: + step.InstanceKey = StringKey(idx.Key.AsString()) + case cty.Number: + var idxInt int + err := gocty.FromCtyValue(idx.Key, &idxInt) + if err == nil { + step.InstanceKey = IntKey(idxInt) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: fmt.Sprintf("Invalid module index: %s.", err), + Subject: idx.SourceRange().Ptr(), + }) + } + default: + // Should never happen, because no other types are allowed in traversal indices. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address operator", + Detail: "Invalid module key: must be either a string or an integer.", + Subject: idx.SourceRange().Ptr(), + }) + } + } + } + + mi = append(mi, step) + } + + var retRemain hcl.Traversal + if len(remain) > 0 { + retRemain = make(hcl.Traversal, len(remain)) + copy(retRemain, remain) + // The first element here might be either a TraverseRoot or a + // TraverseAttr, depending on whether we had a module address on the + // front. To make life easier for callers, we'll normalize to always + // start with a TraverseRoot. + if tt, ok := retRemain[0].(hcl.TraverseAttr); ok { + retRemain[0] = hcl.TraverseRoot{ + Name: tt.Name, + SrcRange: tt.SrcRange, + } + } + } + + return mi, retRemain, diags +} + +// UnkeyedInstanceShim is a shim method for converting a Module address to the +// equivalent ModuleInstance address that assumes that no modules have +// keyed instances. +// +// This is a temporary allowance for the fact that Terraform does not presently +// support "count" and "for_each" on modules, and thus graph building code that +// derives graph nodes from configuration must just assume unkeyed modules +// in order to construct the graph. At a later time when "count" and "for_each" +// support is added for modules, all callers of this method will need to be +// reworked to allow for keyed module instances. +func (m Module) UnkeyedInstanceShim() ModuleInstance { + path := make(ModuleInstance, len(m)) + for i, name := range m { + path[i] = ModuleInstanceStep{Name: name} + } + return path +} + +// ModuleInstanceStep is a single traversal step through the dynamic module +// tree. It is used only as part of ModuleInstance. +type ModuleInstanceStep struct { + Name string + InstanceKey InstanceKey +} + +// RootModuleInstance is the module instance address representing the root +// module, which is also the zero value of ModuleInstance. +var RootModuleInstance ModuleInstance + +// IsRoot returns true if the receiver is the address of the root module instance, +// or false otherwise. +func (m ModuleInstance) IsRoot() bool { + return len(m) == 0 +} + +// Child returns the address of a child module instance of the receiver, +// identified by the given name and key. +func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance { + ret := make(ModuleInstance, 0, len(m)+1) + ret = append(ret, m...) + return append(ret, ModuleInstanceStep{ + Name: name, + InstanceKey: key, + }) +} + +// Parent returns the address of the parent module instance of the receiver, or +// the receiver itself if there is no parent (if it's the root module address). +func (m ModuleInstance) Parent() ModuleInstance { + if len(m) == 0 { + return m + } + return m[:len(m)-1] +} + +// String returns a string representation of the receiver, in the format used +// within e.g. user-provided resource addresses. +// +// The address of the root module has the empty string as its representation. +func (m ModuleInstance) String() string { + var buf bytes.Buffer + sep := "" + for _, step := range m { + buf.WriteString(sep) + buf.WriteString("module.") + buf.WriteString(step.Name) + if step.InstanceKey != NoKey { + buf.WriteString(step.InstanceKey.String()) + } + sep = "." + } + return buf.String() +} + +// Equal returns true if the receiver and the given other value +// contains the exact same parts. +func (m ModuleInstance) Equal(o ModuleInstance) bool { + return m.String() == o.String() +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (m ModuleInstance) Less(o ModuleInstance) bool { + if len(m) != len(o) { + // Shorter path sorts first. + return len(m) < len(o) + } + + for i := range m { + mS, oS := m[i], o[i] + switch { + case mS.Name != oS.Name: + return mS.Name < oS.Name + case mS.InstanceKey != oS.InstanceKey: + return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey) + } + } + + return false +} + +// Ancestors returns a slice containing the receiver and all of its ancestor +// module instances, all the way up to (and including) the root module. +// The result is ordered by depth, with the root module always first. +// +// Since the result always includes the root module, a caller may choose to +// ignore it by slicing the result with [1:]. +func (m ModuleInstance) Ancestors() []ModuleInstance { + ret := make([]ModuleInstance, 0, len(m)+1) + for i := 0; i <= len(m); i++ { + ret = append(ret, m[:i]) + } + return ret +} + +// IsAncestor returns true if the receiver is an ancestor of the given +// other value. +func (m ModuleInstance) IsAncestor(o ModuleInstance) bool { + // Longer or equal sized paths means the receiver cannot + // be an ancestor of the given module insatnce. + if len(m) >= len(o) { + return false + } + + for i, ms := range m { + if ms.Name != o[i].Name { + return false + } + if ms.InstanceKey != NoKey && ms.InstanceKey != o[i].InstanceKey { + return false + } + } + + return true +} + +// Call returns the module call address that corresponds to the given module +// instance, along with the address of the module instance that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// A single module call can produce potentially many module instances, so the +// result discards any instance key that might be present on the last step +// of the instance. To retain this, use CallInstance instead. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCall and then returns a slice of the receiever that excludes that +// last part. This is just a convenience for situations where a call address +// is required, such as when dealing with *Reference and Referencable values. +func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) { + if len(m) == 0 { + panic("cannot produce ModuleCall for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCall{ + Name: lastStep.Name, + } +} + +// CallInstance returns the module call instance address that corresponds to +// the given module instance, along with the address of the module instance +// that contains it. +// +// There is no call for the root module, so this method will panic if called +// on the root module address. +// +// In practice, this just turns the last element of the receiver into a +// ModuleCallInstance and then returns a slice of the receiever that excludes +// that last part. This is just a convenience for situations where a call\ +// address is required, such as when dealing with *Reference and Referencable +// values. +func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) { + if len(m) == 0 { + panic("cannot produce ModuleCallInstance for root module") + } + + inst, lastStep := m[:len(m)-1], m[len(m)-1] + return inst, ModuleCallInstance{ + Call: ModuleCall{ + Name: lastStep.Name, + }, + Key: lastStep.InstanceKey, + } +} + +// TargetContains implements Targetable by returning true if the given other +// address either matches the receiver, is a sub-module-instance of the +// receiver, or is a targetable absolute address within a module that +// is contained within the reciever. +func (m ModuleInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case ModuleInstance: + if len(to) < len(m) { + // Can't be contained if the path is shorter + return false + } + // Other is contained if its steps match for the length of our own path. + for i, ourStep := range m { + otherStep := to[i] + if ourStep != otherStep { + return false + } + } + // If we fall out here then the prefixed matched, so it's contained. + return true + + case AbsResource: + return m.TargetContains(to.Module) + + case AbsResourceInstance: + return m.TargetContains(to.Module) + + default: + return false + } +} + +func (m ModuleInstance) targetableSigil() { + // ModuleInstance is targetable +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/output_value.go b/vendor/github.com/hashicorp/terraform/addrs/output_value.go new file mode 100644 index 00000000..bcd923ac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/output_value.go @@ -0,0 +1,75 @@ +package addrs + +import ( + "fmt" +) + +// OutputValue is the address of an output value, in the context of the module +// that is defining it. +// +// This is related to but separate from ModuleCallOutput, which represents +// a module output from the perspective of its parent module. Since output +// values cannot be represented from the module where they are defined, +// OutputValue is not Referenceable, while ModuleCallOutput is. +type OutputValue struct { + Name string +} + +func (v OutputValue) String() string { + return "output." + v.Name +} + +// Absolute converts the receiver into an absolute address within the given +// module instance. +func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: v, + } +} + +// AbsOutputValue is the absolute address of an output value within a module instance. +// +// This represents an output globally within the namespace of a particular +// configuration. It is related to but separate from ModuleCallOutput, which +// represents a module output from the perspective of its parent module. +type AbsOutputValue struct { + Module ModuleInstance + OutputValue OutputValue +} + +// OutputValue returns the absolute address of an output value of the given +// name within the receiving module instance. +func (m ModuleInstance) OutputValue(name string) AbsOutputValue { + return AbsOutputValue{ + Module: m, + OutputValue: OutputValue{ + Name: name, + }, + } +} + +func (v AbsOutputValue) String() string { + if v.Module.IsRoot() { + return v.OutputValue.String() + } + return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String()) +} + +// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput, +// returning also the module instance that the ModuleCallOutput is relative +// to. +// +// The root module does not have a call, and so this method cannot be used +// with outputs in the root module, and will panic in that case. +func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) { + if v.Module.IsRoot() { + panic("ReferenceFromCall used with root module output") + } + + caller, call := v.Module.CallInstance() + return caller, ModuleCallOutput{ + Call: call, + Name: v.OutputValue.Name, + } +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go new file mode 100644 index 00000000..84fe8a0d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go @@ -0,0 +1,338 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform/tfdiags" +) + +// Reference describes a reference to an address with source location +// information. +type Reference struct { + Subject Referenceable + SourceRange tfdiags.SourceRange + Remaining hcl.Traversal +} + +// ParseRef attempts to extract a referencable address from the prefix of the +// given traversal, which must be an absolute traversal or this function +// will panic. +// +// If no error diagnostics are returned, the returned reference includes the +// address that was extracted, the source range it was extracted from, and any +// remaining relative traversal that was not consumed as part of the +// reference. +// +// If error diagnostics are returned then the Reference value is invalid and +// must not be used. +func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + ref, diags := parseRef(traversal) + + // Normalize a little to make life easier for callers. + if ref != nil { + if len(ref.Remaining) == 0 { + ref.Remaining = nil + } + } + + return ref, diags +} + +// ParseRefStr is a helper wrapper around ParseRef that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseRef. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned reference may be nil or incomplete. +func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + ref, targetDiags := ParseRef(traversal) + diags = diags.Append(targetDiags) + return ref, diags +} + +func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + switch root { + + case "count": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: CountAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "data": + if len(traversal) < 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`, + Subject: traversal.SourceRange().Ptr(), + }) + return nil, diags + } + remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser + return parseResourceRef(DataResourceMode, rootRange, remain) + + case "local": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: LocalValue{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "module": + callName, callRange, remain, diags := parseSingleAttrRef(traversal) + if diags.HasErrors() { + return nil, diags + } + + // A traversal starting with "module" can either be a reference to + // an entire module instance or to a single output from a module + // instance, depending on what we find after this introducer. + + callInstance := ModuleCallInstance{ + Call: ModuleCall{ + Name: callName, + }, + Key: NoKey, + } + + if len(remain) == 0 { + // Reference to an entire module instance. Might alternatively + // be a reference to a collection of instances of a particular + // module, but the caller will need to deal with that ambiguity + // since we don't have enough context here. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(callRange), + Remaining: remain, + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + callInstance.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for module instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + + if len(remain) == 0 { + // Also a reference to an entire module instance, but we have a key + // now. + return &Reference{ + Subject: callInstance, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)), + Remaining: remain, + }, diags + } + } + + if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok { + remain = remain[1:] + return &Reference{ + Subject: ModuleCallOutput{ + Name: attrTrav.Name, + Call: callInstance, + }, + SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)), + Remaining: remain, + }, diags + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: "Module instance objects do not support this operation.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + + case "path": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: PathAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "self": + return &Reference{ + Subject: Self, + SourceRange: tfdiags.SourceRangeFromHCL(rootRange), + Remaining: traversal[1:], + }, diags + + case "terraform": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: TerraformAttr{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + case "var": + name, rng, remain, diags := parseSingleAttrRef(traversal) + return &Reference{ + Subject: InputVariable{Name: name}, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags + + default: + return parseResourceRef(ManagedResourceMode, rootRange, traversal) + } +} + +func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`, + Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + // If it isn't a TraverseRoot then it must be a "data" reference. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: `The "data" object does not support this operation.`, + Subject: traversal[0].SourceRange().Ptr(), + }) + return nil, diags + } + + attrTrav, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + var what string + switch mode { + case DataResourceMode: + what = "data source" + default: + what = "resource type" + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what), + Subject: traversal[1].SourceRange().Ptr(), + }) + return nil, diags + } + name = attrTrav.Name + rng := hcl.RangeBetween(startRange, attrTrav.SrcRange) + remain := traversal[2:] + + resourceAddr := Resource{ + Mode: mode, + Type: typeName, + Name: name, + } + resourceInstAddr := ResourceInstance{ + Resource: resourceAddr, + Key: NoKey, + } + + if len(remain) == 0 { + // This might actually be a reference to the collection of all instances + // of the resource, but we don't have enough context here to decide + // so we'll let the caller resolve that ambiguity. + return &Reference{ + Subject: resourceInstAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + }, diags + } + + if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok { + var err error + resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid index key", + Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err), + Subject: &idxTrav.SrcRange, + }) + return nil, diags + } + remain = remain[1:] + rng = hcl.RangeBetween(rng, idxTrav.SrcRange) + } + + return &Reference{ + Subject: resourceInstAddr, + SourceRange: tfdiags.SourceRangeFromHCL(rng), + Remaining: remain, + }, diags +} + +func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + root := traversal.RootName() + rootRange := traversal[0].SourceRange() + + if len(traversal) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root), + Subject: &rootRange, + }) + return "", hcl.Range{}, nil, diags + } + if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok { + return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid reference", + Detail: fmt.Sprintf("The %q object does not support this operation.", root), + Subject: traversal[1].SourceRange().Ptr(), + }) + return "", hcl.Range{}, nil, diags +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go new file mode 100644 index 00000000..057443af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go @@ -0,0 +1,318 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/tfdiags" +) + +// Target describes a targeted address with source location information. +type Target struct { + Subject Targetable + SourceRange tfdiags.SourceRange +} + +// ParseTarget attempts to interpret the given traversal as a targetable +// address. The given traversal must be absolute, or this function will +// panic. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the Target value is invalid and +// must not be used. +func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) { + path, remain, diags := parseModuleInstancePrefix(traversal) + if diags.HasErrors() { + return nil, diags + } + + rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange()) + + if len(remain) == 0 { + return &Target{ + Subject: path, + SourceRange: rng, + }, diags + } + + mode := ManagedResourceMode + if remain.RootName() == "data" { + mode = DataResourceMode + remain = remain[1:] + } + + if len(remain) < 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource specification must include a resource type and name.", + Subject: remain.SourceRange().Ptr(), + }) + return nil, diags + } + + var typeName, name string + switch tt := remain[0].(type) { + case hcl.TraverseRoot: + typeName = tt.Name + case hcl.TraverseAttr: + typeName = tt.Name + default: + switch mode { + case ManagedResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource type name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + case DataResourceMode: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A data source name is required.", + Subject: remain[0].SourceRange().Ptr(), + }) + default: + panic("unknown mode") + } + return nil, diags + } + + switch tt := remain[1].(type) { + case hcl.TraverseAttr: + name = tt.Name + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource name is required.", + Subject: remain[1].SourceRange().Ptr(), + }) + return nil, diags + } + + var subject Targetable + remain = remain[2:] + switch len(remain) { + case 0: + subject = path.Resource(mode, typeName, name) + case 1: + if tt, ok := remain[0].(hcl.TraverseIndex); ok { + key, err := ParseInstanceKey(tt.Key) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: fmt.Sprintf("Invalid resource instance key: %s.", err), + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + } + + subject = path.ResourceInstance(mode, typeName, name, key) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Resource instance key must be given in square brackets.", + Subject: remain[0].SourceRange().Ptr(), + }) + return nil, diags + } + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "Unexpected extra operators after address.", + Subject: remain[1].SourceRange().Ptr(), + }) + return nil, diags + } + + return &Target{ + Subject: subject, + SourceRange: rng, + }, diags +} + +// ParseTargetStr is a helper wrapper around ParseTarget that takes a string +// and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a target string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseTarget. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned target may be nil or incomplete. +func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return nil, diags + } + + target, targetDiags := ParseTarget(traversal) + diags = diags.Append(targetDiags) + return target, diags +} + +// ParseAbsResource attempts to interpret the given traversal as an absolute +// resource address, using the same syntax as expected by ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResource{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt, diags + + case AbsResourceInstance: // Catch likely user error with specialized message + // Assume that the last element of the traversal must be the index, + // since that's required for a valid resource instance address. + indexStep := traversal[len(traversal)-1] + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.", + Subject: indexStep.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here. The module path must be followed by a resource specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResource{}, diags + + } +} + +// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a +// string and parses it with the HCL native syntax traversal parser before +// interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResource{}, diags + } + + addr, addrDiags := ParseAbsResource(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ParseAbsResourceInstance attempts to interpret the given traversal as an +// absolute resource instance address, using the same syntax as expected by +// ParseTarget. +// +// If no error diagnostics are returned, the returned target includes the +// address that was extracted and the source range it was extracted from. +// +// If error diagnostics are returned then the AbsResource value is invalid and +// must not be used. +func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) { + addr, diags := ParseTarget(traversal) + if diags.HasErrors() { + return AbsResourceInstance{}, diags + } + + switch tt := addr.Subject.(type) { + + case AbsResource: + return tt.Instance(NoKey), diags + + case AbsResourceInstance: + return tt, diags + + case ModuleInstance: // Catch likely user error with specialized message + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + default: // Generic message for other address types + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid address", + Detail: "A resource address is required here.", + Subject: traversal.SourceRange().Ptr(), + }) + return AbsResourceInstance{}, diags + + } +} + +// ParseAbsResourceInstanceStr is a helper wrapper around +// ParseAbsResourceInstance that takes a string and parses it with the HCL +// native syntax traversal parser before interpreting it. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address may be incomplete. +// +// Since this function has no context about the source of the given string, +// any returned diagnostics will not have meaningful source location +// information. +func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsResourceInstance{}, diags + } + + addr, addrDiags := ParseAbsResourceInstance(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go new file mode 100644 index 00000000..cfc13f4b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go @@ -0,0 +1,12 @@ +package addrs + +// PathAttr is the address of an attribute of the "path" object in +// the interpolation scope, like "path.module". +type PathAttr struct { + referenceable + Name string +} + +func (pa PathAttr) String() string { + return "path." + pa.Name +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go new file mode 100644 index 00000000..340dd19e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go @@ -0,0 +1,297 @@ +package addrs + +import ( + "fmt" + + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// ProviderConfig is the address of a provider configuration. +type ProviderConfig struct { + Type string + + // If not empty, Alias identifies which non-default (aliased) provider + // configuration this address refers to. + Alias string +} + +// NewDefaultProviderConfig returns the address of the default (un-aliased) +// configuration for the provider with the given type name. +func NewDefaultProviderConfig(typeName string) ProviderConfig { + return ProviderConfig{ + Type: typeName, + } +} + +// ParseProviderConfigCompact parses the given absolute traversal as a relative +// provider address in compact form. The following are examples of traversals +// that can be successfully parsed as compact relative provider configuration +// addresses: +// +// aws +// aws.foo +// +// This function will panic if given a relative traversal. +// +// If the returned diagnostics contains errors then the result value is invalid +// and must not be used. +func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := ProviderConfig{ + Type: traversal.RootName(), + } + + if len(traversal) < 2 { + // Just a type name, then. + return ret, diags + } + + aliasStep := traversal[1] + switch ts := aliasStep.(type) { + case hcl.TraverseAttr: + ret.Alias = ts.Name + return ret, diags + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.", + Subject: aliasStep.SourceRange().Ptr(), + }) + } + + if len(traversal) > 2 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous extra operators after provider configuration address.", + Subject: traversal[2:].SourceRange().Ptr(), + }) + } + + return ret, diags +} + +// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseProviderConfigCompact. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// then the returned address is invalid. +func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return ProviderConfig{}, diags + } + + addr, addrDiags := ParseProviderConfigCompact(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// Absolute returns an AbsProviderConfig from the receiver and the given module +// instance address. +func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig { + return AbsProviderConfig{ + Module: module, + ProviderConfig: pc, + } +} + +func (pc ProviderConfig) String() string { + if pc.Type == "" { + // Should never happen; always indicates a bug + return "provider." + } + + if pc.Alias != "" { + return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias) + } + + return "provider." + pc.Type +} + +// StringCompact is an alternative to String that returns the form that can +// be parsed by ParseProviderConfigCompact, without the "provider." prefix. +func (pc ProviderConfig) StringCompact() string { + if pc.Alias != "" { + return fmt.Sprintf("%s.%s", pc.Type, pc.Alias) + } + return pc.Type +} + +// AbsProviderConfig is the absolute address of a provider configuration +// within a particular module instance. +type AbsProviderConfig struct { + Module ModuleInstance + ProviderConfig ProviderConfig +} + +// ParseAbsProviderConfig parses the given traversal as an absolute provider +// address. The following are examples of traversals that can be successfully +// parsed as absolute provider configuration addresses: +// +// provider.aws +// provider.aws.foo +// module.bar.provider.aws +// module.bar.module.baz.provider.aws.foo +// module.foo[1].provider.aws.foo +// +// This type of address is used, for example, to record the relationships +// between resources and provider configurations in the state structure. +// This type of address is not generally used in the UI, except in error +// messages that refer to provider configurations. +func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) { + modInst, remain, diags := parseModuleInstancePrefix(traversal) + ret := AbsProviderConfig{ + Module: modInst, + } + if len(remain) < 2 || remain.RootName() != "provider" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider address must begin with \"provider.\", followed by a provider type name.", + Subject: remain.SourceRange().Ptr(), + }) + return ret, diags + } + if len(remain) > 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Extraneous operators after provider configuration alias.", + Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(), + }) + return ret, diags + } + + if tt, ok := remain[1].(hcl.TraverseAttr); ok { + ret.ProviderConfig.Type = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "The prefix \"provider.\" must be followed by a provider type name.", + Subject: remain[1].SourceRange().Ptr(), + }) + return ret, diags + } + + if len(remain) == 3 { + if tt, ok := remain[2].(hcl.TraverseAttr); ok { + ret.ProviderConfig.Alias = tt.Name + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration address", + Detail: "Provider type name must be followed by a configuration alias name.", + Subject: remain[2].SourceRange().Ptr(), + }) + return ret, diags + } + } + + return ret, diags +} + +// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig +// that takes a string and parses it with the HCL native syntax traversal parser +// before interpreting it. +// +// This should be used only in specialized situations since it will cause the +// created references to not have any meaningful source location information. +// If a reference string is coming from a source that should be identified in +// error messages then the caller should instead parse it directly using a +// suitable function from the HCL API and pass the traversal itself to +// ParseAbsProviderConfig. +// +// Error diagnostics are returned if either the parsing fails or the analysis +// of the traversal fails. There is no way for the caller to distinguish the +// two kinds of diagnostics programmatically. If error diagnostics are returned +// the returned address is invalid. +func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return AbsProviderConfig{}, diags + } + + addr, addrDiags := ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags +} + +// ProviderConfigDefault returns the address of the default provider config +// of the given type inside the recieving module instance. +func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m, + ProviderConfig: ProviderConfig{ + Type: name, + }, + } +} + +// ProviderConfigAliased returns the address of an aliased provider config +// of with given type and alias inside the recieving module instance. +func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig { + return AbsProviderConfig{ + Module: m, + ProviderConfig: ProviderConfig{ + Type: name, + Alias: alias, + }, + } +} + +// Inherited returns an address that the receiving configuration address might +// inherit from in a parent module. The second bool return value indicates if +// such inheritance is possible, and thus whether the returned address is valid. +// +// Inheritance is possible only for default (un-aliased) providers in modules +// other than the root module. Even if a valid address is returned, inheritence +// may not be performed for other reasons, such as if the calling module +// provided explicit provider configurations within the call for this module. +// The ProviderTransformer graph transform in the main terraform module has +// the authoritative logic for provider inheritance, and this method is here +// mainly just for its benefit. +func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) { + // Can't inherit if we're already in the root. + if len(pc.Module) == 0 { + return AbsProviderConfig{}, false + } + + // Can't inherit if we have an alias. + if pc.ProviderConfig.Alias != "" { + return AbsProviderConfig{}, false + } + + // Otherwise, we might inherit from a configuration with the same + // provider name in the parent module instance. + parentMod := pc.Module.Parent() + return pc.ProviderConfig.Absolute(parentMod), true +} + +func (pc AbsProviderConfig) String() string { + if len(pc.Module) == 0 { + return pc.ProviderConfig.String() + } + return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String()) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go new file mode 100644 index 00000000..211083a5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go @@ -0,0 +1,20 @@ +package addrs + +// Referenceable is an interface implemented by all address types that can +// appear as references in configuration language expressions. +type Referenceable interface { + // All implementations of this interface must be covered by the type switch + // in lang.Scope.buildEvalContext. + referenceableSigil() + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseRef to produce an identical + // result. + String() string +} + +type referenceable struct { +} + +func (r referenceable) referenceableSigil() { +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource.go b/vendor/github.com/hashicorp/terraform/addrs/resource.go new file mode 100644 index 00000000..28667708 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/resource.go @@ -0,0 +1,270 @@ +package addrs + +import ( + "fmt" + "strings" +) + +// Resource is an address for a resource block within configuration, which +// contains potentially-multiple resource instances if that configuration +// block uses "count" or "for_each". +type Resource struct { + referenceable + Mode ResourceMode + Type string + Name string +} + +func (r Resource) String() string { + switch r.Mode { + case ManagedResourceMode: + return fmt.Sprintf("%s.%s", r.Type, r.Name) + case DataResourceMode: + return fmt.Sprintf("data.%s.%s", r.Type, r.Name) + default: + // Should never happen, but we'll return a string here rather than + // crashing just in case it does. + return fmt.Sprintf(".%s.%s", r.Type, r.Name) + } +} + +func (r Resource) Equal(o Resource) bool { + return r.String() == o.String() +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r Resource) Instance(key InstanceKey) ResourceInstance { + return ResourceInstance{ + Resource: r, + Key: key, + } +} + +// Absolute returns an AbsResource from the receiver and the given module +// instance address. +func (r Resource) Absolute(module ModuleInstance) AbsResource { + return AbsResource{ + Module: module, + Resource: r, + } +} + +// DefaultProviderConfig returns the address of the provider configuration +// that should be used for the resource identified by the reciever if it +// does not have a provider configuration address explicitly set in +// configuration. +// +// This method is not able to verify that such a configuration exists, nor +// represent the behavior of automatically inheriting certain provider +// configurations from parent modules. It just does a static analysis of the +// receiving address and returns an address to start from, relative to the +// same module that contains the resource. +func (r Resource) DefaultProviderConfig() ProviderConfig { + typeName := r.Type + if under := strings.Index(typeName, "_"); under != -1 { + typeName = typeName[:under] + } + return ProviderConfig{ + Type: typeName, + } +} + +// ResourceInstance is an address for a specific instance of a resource. +// When a resource is defined in configuration with "count" or "for_each" it +// produces zero or more instances, which can be addressed using this type. +type ResourceInstance struct { + referenceable + Resource Resource + Key InstanceKey +} + +func (r ResourceInstance) ContainingResource() Resource { + return r.Resource +} + +func (r ResourceInstance) String() string { + if r.Key == NoKey { + return r.Resource.String() + } + return r.Resource.String() + r.Key.String() +} + +func (r ResourceInstance) Equal(o ResourceInstance) bool { + return r.String() == o.String() +} + +// Absolute returns an AbsResourceInstance from the receiver and the given module +// instance address. +func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance { + return AbsResourceInstance{ + Module: module, + Resource: r, + } +} + +// AbsResource is an absolute address for a resource under a given module path. +type AbsResource struct { + targetable + Module ModuleInstance + Resource Resource +} + +// Resource returns the address of a particular resource within the receiver. +func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource { + return AbsResource{ + Module: m, + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + } +} + +// Instance produces the address for a specific instance of the receiver +// that is idenfied by the given key. +func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: r.Module, + Resource: r.Resource.Instance(key), + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is either equal to the receiver or is an instance of the +// receiver. +func (r AbsResource) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResource: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + case AbsResourceInstance: + return r.TargetContains(to.ContainingResource()) + + default: + return false + + } +} + +func (r AbsResource) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +func (r AbsResource) Equal(o AbsResource) bool { + return r.String() == o.String() +} + +// AbsResourceInstance is an absolute address for a resource instance under a +// given module path. +type AbsResourceInstance struct { + targetable + Module ModuleInstance + Resource ResourceInstance +} + +// ResourceInstance returns the address of a particular resource instance within the receiver. +func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance { + return AbsResourceInstance{ + Module: m, + Resource: ResourceInstance{ + Resource: Resource{ + Mode: mode, + Type: typeName, + Name: name, + }, + Key: key, + }, + } +} + +// ContainingResource returns the address of the resource that contains the +// receving resource instance. In other words, it discards the key portion +// of the address to produce an AbsResource value. +func (r AbsResourceInstance) ContainingResource() AbsResource { + return AbsResource{ + Module: r.Module, + Resource: r.Resource.ContainingResource(), + } +} + +// TargetContains implements Targetable by returning true if the given other +// address is equal to the receiver. +func (r AbsResourceInstance) TargetContains(other Targetable) bool { + switch to := other.(type) { + + case AbsResourceInstance: + // We'll use our stringification as a cheat-ish way to test for equality. + return to.String() == r.String() + + default: + return false + + } +} + +func (r AbsResourceInstance) String() string { + if len(r.Module) == 0 { + return r.Resource.String() + } + return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String()) +} + +func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool { + return r.String() == o.String() +} + +// Less returns true if the receiver should sort before the given other value +// in a sorted list of addresses. +func (r AbsResourceInstance) Less(o AbsResourceInstance) bool { + switch { + + case len(r.Module) != len(o.Module): + return len(r.Module) < len(o.Module) + + case r.Module.String() != o.Module.String(): + return r.Module.Less(o.Module) + + case r.Resource.Resource.Mode != o.Resource.Resource.Mode: + return r.Resource.Resource.Mode == DataResourceMode + + case r.Resource.Resource.Type != o.Resource.Resource.Type: + return r.Resource.Resource.Type < o.Resource.Resource.Type + + case r.Resource.Resource.Name != o.Resource.Resource.Name: + return r.Resource.Resource.Name < o.Resource.Resource.Name + + case r.Resource.Key != o.Resource.Key: + return InstanceKeyLess(r.Resource.Key, o.Resource.Key) + + default: + return false + + } +} + +// ResourceMode defines which lifecycle applies to a given resource. Each +// resource lifecycle has a slightly different address format. +type ResourceMode rune + +//go:generate stringer -type ResourceMode + +const ( + // InvalidResourceMode is the zero value of ResourceMode and is not + // a valid resource mode. + InvalidResourceMode ResourceMode = 0 + + // ManagedResourceMode indicates a managed resource, as defined by + // "resource" blocks in configuration. + ManagedResourceMode ResourceMode = 'M' + + // DataResourceMode indicates a data resource, as defined by + // "data" blocks in configuration. + DataResourceMode ResourceMode = 'D' +) diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go new file mode 100644 index 00000000..9bdbdc42 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go @@ -0,0 +1,105 @@ +package addrs + +import "fmt" + +// ResourceInstancePhase is a special kind of reference used only internally +// during graph building to represent resource instances that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via an instance phase +// or can declare that they reference an instance phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourceInstancePhase struct { + referenceable + ResourceInstance ResourceInstance + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourceInstancePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase { + return ResourceInstancePhase{ + ResourceInstance: r, + Phase: rpt, + } +} + +// ContainingResource returns an address for the same phase of the resource +// that this instance belongs to. +func (rp ResourceInstancePhase) ContainingResource() ResourcePhase { + return rp.ResourceInstance.Resource.Phase(rp.Phase) +} + +func (rp ResourceInstancePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase) +} + +// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase. +type ResourceInstancePhaseType string + +const ( + // ResourceInstancePhaseDestroy represents the "destroy" phase of a + // resource instance. + ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy" + + // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy + // but is used for resources that have "create_before_destroy" set, thus + // requiring a different dependency ordering. + ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd" +) + +func (rpt ResourceInstancePhaseType) String() string { + return string(rpt) +} + +// ResourcePhase is a special kind of reference used only internally +// during graph building to represent resources that are in a +// non-primary state. +// +// Graph nodes can declare themselves referenceable via a resource phase +// or can declare that they reference a resource phase in order to accomodate +// secondary graph nodes dealing with, for example, destroy actions. +// +// Since resources (as opposed to instances) aren't actually phased, this +// address type is used only as an approximation during initial construction +// of the resource-oriented plan graph, under the assumption that resource +// instances with ResourceInstancePhase addresses will be created in dynamic +// subgraphs during the graph walk. +// +// This special reference type cannot be accessed directly by end-users, and +// should never be shown in the UI. +type ResourcePhase struct { + referenceable + Resource Resource + Phase ResourceInstancePhaseType +} + +var _ Referenceable = ResourcePhase{} + +// Phase returns a special "phase address" for the receving instance. See the +// documentation of ResourceInstancePhase for the limited situations where this +// is intended to be used. +func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase { + return ResourcePhase{ + Resource: r, + Phase: rpt, + } +} + +func (rp ResourcePhase) String() string { + // We use a different separator here than usual to ensure that we'll + // never conflict with any non-phased resource instance string. This + // is intentionally something that would fail parsing with ParseRef, + // because this special address type should never be exposed in the UI. + return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase) +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go new file mode 100644 index 00000000..0b5c33f8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type ResourceMode"; DO NOT EDIT. + +package addrs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidResourceMode-0] + _ = x[ManagedResourceMode-77] + _ = x[DataResourceMode-68] +} + +const ( + _ResourceMode_name_0 = "InvalidResourceMode" + _ResourceMode_name_1 = "DataResourceMode" + _ResourceMode_name_2 = "ManagedResourceMode" +) + +func (i ResourceMode) String() string { + switch { + case i == 0: + return _ResourceMode_name_0 + case i == 68: + return _ResourceMode_name_1 + case i == 77: + return _ResourceMode_name_2 + default: + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/self.go b/vendor/github.com/hashicorp/terraform/addrs/self.go new file mode 100644 index 00000000..7f24eaf0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/self.go @@ -0,0 +1,14 @@ +package addrs + +// Self is the address of the special object "self" that behaves as an alias +// for a containing object currently in scope. +const Self selfT = 0 + +type selfT int + +func (s selfT) referenceableSigil() { +} + +func (s selfT) String() string { + return "self" +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/targetable.go b/vendor/github.com/hashicorp/terraform/addrs/targetable.go new file mode 100644 index 00000000..16819a5a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/targetable.go @@ -0,0 +1,26 @@ +package addrs + +// Targetable is an interface implemented by all address types that can be +// used as "targets" for selecting sub-graphs of a graph. +type Targetable interface { + targetableSigil() + + // TargetContains returns true if the receiver is considered to contain + // the given other address. Containment, for the purpose of targeting, + // means that if a container address is targeted then all of the + // addresses within it are also implicitly targeted. + // + // A targetable address always contains at least itself. + TargetContains(other Targetable) bool + + // String produces a string representation of the address that could be + // parsed as a HCL traversal and passed to ParseTarget to produce an + // identical result. + String() string +} + +type targetable struct { +} + +func (r targetable) targetableSigil() { +} diff --git a/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go new file mode 100644 index 00000000..a880182a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go @@ -0,0 +1,12 @@ +package addrs + +// TerraformAttr is the address of an attribute of the "terraform" object in +// the interpolation scope, like "terraform.workspace". +type TerraformAttr struct { + referenceable + Name string +} + +func (ta TerraformAttr) String() string { + return "terraform." + ta.Name +} diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go b/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go new file mode 100644 index 00000000..9cd15d42 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/atlas/backend.go @@ -0,0 +1,194 @@ +package atlas + +import ( + "fmt" + "net/url" + "os" + "strings" + "sync" + + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" +) + +const EnvVarToken = "ATLAS_TOKEN" +const EnvVarAddress = "ATLAS_ADDRESS" + +// Backend is an implementation of EnhancedBackend that performs all operations +// in Atlas. State must currently also be stored in Atlas, although it is worth +// investigating in the future if state storage can be external as well. +type Backend struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ContextOpts are the base context options to set when initializing a + // Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + //--------------------------------------------------------------- + // Internal fields, do not set + //--------------------------------------------------------------- + // stateClient is the legacy state client, setup in Configure + stateClient *stateClient + + // opLock locks operations + opLock sync.Mutex +} + +var _ backend.Backend = (*Backend)(nil) + +// New returns a new initialized Atlas backend. +func New() *Backend { + return &Backend{} +} + +func (b *Backend) ConfigSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Required: true, + Description: "Full name of the environment in Terraform Enterprise, such as 'myorg/myenv'", + }, + "access_token": { + Type: cty.String, + Optional: true, + Description: "Access token to use to access Terraform Enterprise; the ATLAS_TOKEN environment variable is used if this argument is not set", + }, + "address": { + Type: cty.String, + Optional: true, + Description: "Base URL for your Terraform Enterprise installation; the ATLAS_ADDRESS environment variable is used if this argument is not set, finally falling back to a default of 'https://atlas.hashicorp.com/' if neither are set.", + }, + }, + } +} + +func (b *Backend) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + name := obj.GetAttr("name").AsString() + if ct := strings.Count(name, "/"); ct != 1 { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspace selector", + `The "name" argument must be an organization name and a workspace name separated by a slash, such as "acme/network-production".`, + cty.Path{cty.GetAttrStep{Name: "name"}}, + )) + } + + if v := obj.GetAttr("address"); !v.IsNull() { + addr := v.AsString() + _, err := url.Parse(addr) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid Terraform Enterprise URL", + fmt.Sprintf(`The "address" argument must be a valid URL: %s.`, err), + cty.Path{cty.GetAttrStep{Name: "address"}}, + )) + } + } + + return obj, diags +} + +func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + client := &stateClient{ + // This is optionally set during Atlas Terraform runs. + RunId: os.Getenv("ATLAS_RUN_ID"), + } + + name := obj.GetAttr("name").AsString() // assumed valid due to PrepareConfig method + slashIdx := strings.Index(name, "/") + client.User = name[:slashIdx] + client.Name = name[slashIdx+1:] + + if v := obj.GetAttr("access_token"); !v.IsNull() { + client.AccessToken = v.AsString() + } else { + client.AccessToken = os.Getenv(EnvVarToken) + if client.AccessToken == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Missing Terraform Enterprise access token", + `The "access_token" argument must be set unless the ATLAS_TOKEN environment variable is set to provide the authentication token for Terraform Enterprise.`, + cty.Path{cty.GetAttrStep{Name: "access_token"}}, + )) + } + } + + if v := obj.GetAttr("address"); !v.IsNull() { + addr := v.AsString() + addrURL, err := url.Parse(addr) + if err != nil { + // We already validated the URL in PrepareConfig, so this shouldn't happen + panic(err) + } + client.Server = addr + client.ServerURL = addrURL + } else { + addr := os.Getenv(EnvVarAddress) + if addr == "" { + addr = defaultAtlasServer + } + addrURL, err := url.Parse(addr) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid Terraform Enterprise URL", + fmt.Sprintf(`The ATLAS_ADDRESS environment variable must contain a valid URL: %s.`, err), + cty.Path{cty.GetAttrStep{Name: "address"}}, + )) + } + client.Server = addr + client.ServerURL = addrURL + } + + b.stateClient = client + + return diags +} + +func (b *Backend) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *Backend) DeleteWorkspace(name string) error { + return backend.ErrWorkspacesNotSupported +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + return &remote.State{Client: b.stateClient}, nil +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is gauranteed to always return a non-nil value and so is useful +// as a helper to wrap any potentially colored strings. +func (b *Backend) Colorize() *colorstring.Colorize { + if b.CLIColor != nil { + return b.CLIColor + } + + return &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } +} diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go b/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go new file mode 100644 index 00000000..5b3656ef --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/atlas/cli.go @@ -0,0 +1,13 @@ +package atlas + +import ( + "github.com/hashicorp/terraform/backend" +) + +// backend.CLI impl. +func (b *Backend) CLIInit(opts *backend.CLIOpts) error { + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ContextOpts = opts.ContextOpts + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go b/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go new file mode 100644 index 00000000..0e15ff17 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/atlas/state_client.go @@ -0,0 +1,318 @@ +package atlas + +import ( + "bytes" + "context" + "crypto/md5" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "path" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/terraform" +) + +const ( + // defaultAtlasServer is used when no address is given + defaultAtlasServer = "https://atlas.hashicorp.com/" + atlasTokenHeader = "X-Atlas-Token" +) + +// AtlasClient implements the Client interface for an Atlas compatible server. +type stateClient struct { + Server string + ServerURL *url.URL + User string + Name string + AccessToken string + RunId string + HTTPClient *retryablehttp.Client + + conflictHandlingAttempted bool +} + +func (c *stateClient) Get() (*remote.Payload, error) { + // Make the HTTP request + req, err := retryablehttp.NewRequest("GET", c.url().String(), nil) + if err != nil { + return nil, fmt.Errorf("Failed to make HTTP request: %v", err) + } + + req.Header.Set(atlasTokenHeader, c.AccessToken) + + // Request the url + client, err := c.http() + if err != nil { + return nil, err + } + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Handle the common status codes + switch resp.StatusCode { + case http.StatusOK: + // Handled after + case http.StatusNoContent: + return nil, nil + case http.StatusNotFound: + return nil, nil + case http.StatusUnauthorized: + return nil, fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusInternalServerError: + return nil, fmt.Errorf("HTTP remote state internal server error") + default: + return nil, fmt.Errorf( + "Unexpected HTTP response code: %d\n\nBody: %s", + resp.StatusCode, c.readBody(resp.Body)) + } + + // Read in the body + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, resp.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %v", err) + } + + // Create the payload + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + if len(payload.Data) == 0 { + return nil, nil + } + + // Check for the MD5 + if raw := resp.Header.Get("Content-MD5"); raw != "" { + md5, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, fmt.Errorf("Failed to decode Content-MD5 '%s': %v", raw, err) + } + + payload.MD5 = md5 + } else { + // Generate the MD5 + hash := md5.Sum(payload.Data) + payload.MD5 = hash[:] + } + + return payload, nil +} + +func (c *stateClient) Put(state []byte) error { + // Get the target URL + base := c.url() + + // Generate the MD5 + hash := md5.Sum(state) + b64 := base64.StdEncoding.EncodeToString(hash[:]) + + // Make the HTTP client and request + req, err := retryablehttp.NewRequest("PUT", base.String(), bytes.NewReader(state)) + if err != nil { + return fmt.Errorf("Failed to make HTTP request: %v", err) + } + + // Prepare the request + req.Header.Set(atlasTokenHeader, c.AccessToken) + req.Header.Set("Content-MD5", b64) + req.Header.Set("Content-Type", "application/json") + req.ContentLength = int64(len(state)) + + // Make the request + client, err := c.http() + if err != nil { + return err + } + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("Failed to upload state: %v", err) + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusConflict: + return c.handleConflict(c.readBody(resp.Body), state) + default: + return fmt.Errorf( + "HTTP error: %d\n\nBody: %s", + resp.StatusCode, c.readBody(resp.Body)) + } +} + +func (c *stateClient) Delete() error { + // Make the HTTP request + req, err := retryablehttp.NewRequest("DELETE", c.url().String(), nil) + if err != nil { + return fmt.Errorf("Failed to make HTTP request: %v", err) + } + req.Header.Set(atlasTokenHeader, c.AccessToken) + + // Make the request + client, err := c.http() + if err != nil { + return err + } + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("Failed to delete state: %v", err) + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusNoContent: + return nil + case http.StatusNotFound: + return nil + default: + return fmt.Errorf( + "HTTP error: %d\n\nBody: %s", + resp.StatusCode, c.readBody(resp.Body)) + } +} + +func (c *stateClient) readBody(b io.Reader) string { + var buf bytes.Buffer + if _, err := io.Copy(&buf, b); err != nil { + return fmt.Sprintf("Error reading body: %s", err) + } + + result := buf.String() + if result == "" { + result = "" + } + + return result +} + +func (c *stateClient) url() *url.URL { + values := url.Values{} + + values.Add("atlas_run_id", c.RunId) + + return &url.URL{ + Scheme: c.ServerURL.Scheme, + Host: c.ServerURL.Host, + Path: path.Join("api/v1/terraform/state", c.User, c.Name), + RawQuery: values.Encode(), + } +} + +func (c *stateClient) http() (*retryablehttp.Client, error) { + if c.HTTPClient != nil { + return c.HTTPClient, nil + } + tlsConfig := &tls.Config{} + err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{ + CAFile: os.Getenv("ATLAS_CAFILE"), + CAPath: os.Getenv("ATLAS_CAPATH"), + }) + if err != nil { + return nil, err + } + rc := retryablehttp.NewClient() + + rc.CheckRetry = func(ctx context.Context, resp *http.Response, err error) (bool, error) { + if err != nil { + // don't bother retrying if the certs don't match + if err, ok := err.(*url.Error); ok { + if _, ok := err.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + } + return retryablehttp.DefaultRetryPolicy(ctx, resp, err) + } + + t := cleanhttp.DefaultTransport() + t.TLSClientConfig = tlsConfig + rc.HTTPClient.Transport = t + + c.HTTPClient = rc + return rc, nil +} + +// Atlas returns an HTTP 409 - Conflict if the pushed state reports the same +// Serial number but the checksum of the raw content differs. This can +// sometimes happen when Terraform changes state representation internally +// between versions in a way that's semantically neutral but affects the JSON +// output and therefore the checksum. +// +// Here we detect and handle this situation by ticking the serial and retrying +// iff for the previous state and the proposed state: +// +// * the serials match +// * the parsed states are Equal (semantically equivalent) +// +// In other words, in this situation Terraform can override Atlas's detected +// conflict by asserting that the state it is pushing is indeed correct. +func (c *stateClient) handleConflict(msg string, state []byte) error { + log.Printf("[DEBUG] Handling Atlas conflict response: %s", msg) + + if c.conflictHandlingAttempted { + log.Printf("[DEBUG] Already attempted conflict resolution; returning conflict.") + } else { + c.conflictHandlingAttempted = true + log.Printf("[DEBUG] Atlas reported conflict, checking for equivalent states.") + + payload, err := c.Get() + if err != nil { + return conflictHandlingError(err) + } + + currentState, err := terraform.ReadState(bytes.NewReader(payload.Data)) + if err != nil { + return conflictHandlingError(err) + } + + proposedState, err := terraform.ReadState(bytes.NewReader(state)) + if err != nil { + return conflictHandlingError(err) + } + + if statesAreEquivalent(currentState, proposedState) { + log.Printf("[DEBUG] States are equivalent, incrementing serial and retrying.") + proposedState.Serial++ + var buf bytes.Buffer + if err := terraform.WriteState(proposedState, &buf); err != nil { + return conflictHandlingError(err) + + } + return c.Put(buf.Bytes()) + } else { + log.Printf("[DEBUG] States are not equivalent, returning conflict.") + } + } + + return fmt.Errorf( + "Atlas detected a remote state conflict.\n\nMessage: %s", msg) +} + +func conflictHandlingError(err error) error { + return fmt.Errorf( + "Error while handling a conflict response from Atlas: %s", err) +} + +func statesAreEquivalent(current, proposed *terraform.State) bool { + return current.Serial == proposed.Serial && current.Equal(proposed) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/backend.go b/vendor/github.com/hashicorp/terraform/backend/backend.go new file mode 100644 index 00000000..6f220b6b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/backend.go @@ -0,0 +1,283 @@ +// Package backend provides interfaces that the CLI uses to interact with +// Terraform. A backend provides the abstraction that allows the same CLI +// to simultaneously support both local and remote operations for seamlessly +// using Terraform in a team environment. +package backend + +import ( + "context" + "errors" + "time" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// DefaultStateName is the name of the default, initial state that every +// backend must have. This state cannot be deleted. +const DefaultStateName = "default" + +var ( + // ErrDefaultWorkspaceNotSupported is returned when an operation does not + // support using the default workspace, but requires a named workspace to + // be selected. + ErrDefaultWorkspaceNotSupported = errors.New("default workspace not supported\n" + + "You can create a new workspace with the \"workspace new\" command.") + + // ErrOperationNotSupported is returned when an unsupported operation + // is detected by the configured backend. + ErrOperationNotSupported = errors.New("operation not supported") + + // ErrWorkspacesNotSupported is an error returned when a caller attempts + // to perform an operation on a workspace other than "default" for a + // backend that doesn't support multiple workspaces. + // + // The caller can detect this to do special fallback behavior or produce + // a specific, helpful error message. + ErrWorkspacesNotSupported = errors.New("workspaces not supported") +) + +// InitFn is used to initialize a new backend. +type InitFn func() Backend + +// Backend is the minimal interface that must be implemented to enable Terraform. +type Backend interface { + // ConfigSchema returns a description of the expected configuration + // structure for the receiving backend. + // + // This method does not have any side-effects for the backend and can + // be safely used before configuring. + ConfigSchema() *configschema.Block + + // PrepareConfig checks the validity of the values in the given + // configuration, and inserts any missing defaults, assuming that its + // structure has already been validated per the schema returned by + // ConfigSchema. + // + // This method does not have any side-effects for the backend and can + // be safely used before configuring. It also does not consult any + // external data such as environment variables, disk files, etc. Validation + // that requires such external data should be deferred until the + // Configure call. + // + // If error diagnostics are returned then the configuration is not valid + // and must not subsequently be passed to the Configure method. + // + // This method may return configuration-contextual diagnostics such + // as tfdiags.AttributeValue, and so the caller should provide the + // necessary context via the diags.InConfigBody method before returning + // diagnostics to the user. + PrepareConfig(cty.Value) (cty.Value, tfdiags.Diagnostics) + + // Configure uses the provided configuration to set configuration fields + // within the backend. + // + // The given configuration is assumed to have already been validated + // against the schema returned by ConfigSchema and passed validation + // via PrepareConfig. + // + // This method may be called only once per backend instance, and must be + // called before all other methods except where otherwise stated. + // + // If error diagnostics are returned, the internal state of the instance + // is undefined and no other methods may be called. + Configure(cty.Value) tfdiags.Diagnostics + + // StateMgr returns the state manager for the given workspace name. + // + // If the returned state manager also implements statemgr.Locker then + // it's the caller's responsibility to call Lock and Unlock as appropriate. + // + // If the named workspace doesn't exist, or if it has no state, it will + // be created either immediately on this call or the first time + // PersistState is called, depending on the state manager implementation. + StateMgr(workspace string) (statemgr.Full, error) + + // DeleteWorkspace removes the workspace with the given name if it exists. + // + // DeleteWorkspace cannot prevent deleting a state that is in use. It is + // the responsibility of the caller to hold a Lock for the state manager + // belonging to this workspace before calling this method. + DeleteWorkspace(name string) error + + // States returns a list of the names of all of the workspaces that exist + // in this backend. + Workspaces() ([]string, error) +} + +// Enhanced implements additional behavior on top of a normal backend. +// +// Enhanced backends allow customizing the behavior of Terraform operations. +// This allows Terraform to potentially run operations remotely, load +// configurations from external sources, etc. +type Enhanced interface { + Backend + + // Operation performs a Terraform operation such as refresh, plan, apply. + // It is up to the implementation to determine what "performing" means. + // This DOES NOT BLOCK. The context returned as part of RunningOperation + // should be used to block for completion. + // If the state used in the operation can be locked, it is the + // responsibility of the Backend to lock the state for the duration of the + // running operation. + Operation(context.Context, *Operation) (*RunningOperation, error) +} + +// Local implements additional behavior on a Backend that allows local +// operations in addition to remote operations. +// +// This enables more behaviors of Terraform that require more data such +// as `console`, `import`, `graph`. These require direct access to +// configurations, variables, and more. Not all backends may support this +// so we separate it out into its own optional interface. +type Local interface { + // Context returns a runnable terraform Context. The operation parameter + // doesn't need a Type set but it needs other options set such as Module. + Context(*Operation) (*terraform.Context, statemgr.Full, tfdiags.Diagnostics) +} + +// An operation represents an operation for Terraform to execute. +// +// Note that not all fields are supported by all backends and can result +// in an error if set. All backend implementations should show user-friendly +// errors explaining any incorrectly set values. For example, the local +// backend doesn't support a PlanId being set. +// +// The operation options are purposely designed to have maximal compatibility +// between Terraform and Terraform Servers (a commercial product offered by +// HashiCorp). Therefore, it isn't expected that other implementation support +// every possible option. The struct here is generalized in order to allow +// even partial implementations to exist in the open, without walling off +// remote functionality 100% behind a commercial wall. Anyone can implement +// against this interface and have Terraform interact with it just as it +// would with HashiCorp-provided Terraform Servers. +type Operation struct { + // Type is the operation to perform. + Type OperationType + + // PlanId is an opaque value that backends can use to execute a specific + // plan for an apply operation. + // + // PlanOutBackend is the backend to store with the plan. This is the + // backend that will be used when applying the plan. + PlanId string + PlanRefresh bool // PlanRefresh will do a refresh before a plan + PlanOutPath string // PlanOutPath is the path to save the plan + PlanOutBackend *plans.Backend + + // ConfigDir is the path to the directory containing the configuration's + // root module. + ConfigDir string + + // ConfigLoader is a configuration loader that can be used to load + // configuration from ConfigDir. + ConfigLoader *configload.Loader + + // Plan is a plan that was passed as an argument. This is valid for + // plan and apply arguments but may not work for all backends. + PlanFile *planfile.Reader + + // The options below are more self-explanatory and affect the runtime + // behavior of the operation. + AutoApprove bool + Destroy bool + DestroyForce bool + Parallelism int + Targets []addrs.Targetable + Variables map[string]UnparsedVariableValue + + // Input/output/control options. + UIIn terraform.UIInput + UIOut terraform.UIOutput + + // If LockState is true, the Operation must Lock any + // state.Lockers for its duration, and Unlock when complete. + LockState bool + + // StateLocker is used to lock the state while providing UI feedback to the + // user. This will be supplied by the Backend itself. + StateLocker clistate.Locker + + // The duration to retry obtaining a State lock. + StateLockTimeout time.Duration + + // Workspace is the name of the workspace that this operation should run + // in, which controls which named state is used. + Workspace string +} + +// HasConfig returns true if and only if the operation has a ConfigDir value +// that refers to a directory containing at least one Terraform configuration +// file. +func (o *Operation) HasConfig() bool { + return o.ConfigLoader.IsConfigDir(o.ConfigDir) +} + +// Config loads the configuration that the operation applies to, using the +// ConfigDir and ConfigLoader fields within the receiving operation. +func (o *Operation) Config() (*configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + config, hclDiags := o.ConfigLoader.LoadConfig(o.ConfigDir) + diags = diags.Append(hclDiags) + return config, diags +} + +// RunningOperation is the result of starting an operation. +type RunningOperation struct { + // For implementers of a backend, this context should not wrap the + // passed in context. Otherwise, cancelling the parent context will + // immediately mark this context as "done" but those aren't the semantics + // we want: we want this context to be done only when the operation itself + // is fully done. + context.Context + + // Stop requests the operation to complete early, by calling Stop on all + // the plugins. If the process needs to terminate immediately, call Cancel. + Stop context.CancelFunc + + // Cancel is the context.CancelFunc associated with the embedded context, + // and can be called to terminate the operation early. + // Once Cancel is called, the operation should return as soon as possible + // to avoid running operations during process exit. + Cancel context.CancelFunc + + // Result is the exit status of the operation, populated only after the + // operation has completed. + Result OperationResult + + // PlanEmpty is populated after a Plan operation completes without error + // to note whether a plan is empty or has changes. + PlanEmpty bool + + // State is the final state after the operation completed. Persisting + // this state is managed by the backend. This should only be read + // after the operation completes to avoid read/write races. + State *states.State +} + +// OperationResult describes the result status of an operation. +type OperationResult int + +const ( + // OperationSuccess indicates that the operation completed as expected. + OperationSuccess OperationResult = 0 + + // OperationFailure indicates that the operation encountered some sort + // of error, and thus may have been only partially performed or not + // performed at all. + OperationFailure OperationResult = 1 +) + +func (r OperationResult) ExitStatus() int { + return int(r) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/cli.go b/vendor/github.com/hashicorp/terraform/backend/cli.go new file mode 100644 index 00000000..cd29e386 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/cli.go @@ -0,0 +1,87 @@ +package backend + +import ( + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" +) + +// CLI is an optional interface that can be implemented to be initialized +// with information from the Terraform CLI. If this is implemented, this +// initialization function will be called with data to help interact better +// with a CLI. +// +// This interface was created to improve backend interaction with the +// official Terraform CLI while making it optional for API users to have +// to provide full CLI interaction to every backend. +// +// If you're implementing a Backend, it is acceptable to require CLI +// initialization. In this case, your backend should be coded to error +// on other methods (such as State, Operation) if CLI initialization was not +// done with all required fields. +type CLI interface { + Backend + + // CLIInit is called once with options. The options passed to this + // function may not be modified after calling this since they can be + // read/written at any time by the Backend implementation. + // + // This may be called before or after Configure is called, so if settings + // here affect configurable settings, care should be taken to handle + // whether they should be overwritten or not. + CLIInit(*CLIOpts) error +} + +// CLIOpts are the options passed into CLIInit for the CLI interface. +// +// These options represent the functionality the CLI exposes and often +// maps to meta-flags available on every CLI (such as -input). +// +// When implementing a backend, it isn't expected that every option applies. +// Your backend should be documented clearly to explain to end users what +// options have an affect and what won't. In some cases, it may even make sense +// to error in your backend when an option is set so that users don't make +// a critically incorrect assumption about behavior. +type CLIOpts struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ShowDiagnostics is a function that will format and print diagnostic + // messages to the UI. + ShowDiagnostics func(vals ...interface{}) + + // StatePath is the local path where state is read from. + // + // StateOutPath is the local path where the state will be written. + // If this is empty, it will default to StatePath. + // + // StateBackupPath is the local path where a backup file will be written. + // If this is empty, no backup will be taken. + StatePath string + StateOutPath string + StateBackupPath string + + // ContextOpts are the base context options to set when initializing a + // Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // Input will ask for necessary input prior to performing any operations. + // + // Validation will perform validation prior to running an operation. The + // variable naming doesn't match the style of others since we have a func + // Validate. + Input bool + Validation bool + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint not to produce messages that expect that a user can + // run a follow-up command, perhaps because Terraform is running in + // some sort of workflow automation tool that abstracts away the + // exact commands that are being run. + RunningInAutomation bool +} diff --git a/vendor/github.com/hashicorp/terraform/backend/init/init.go b/vendor/github.com/hashicorp/terraform/backend/init/init.go new file mode 100644 index 00000000..8f70a1f8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/init/init.go @@ -0,0 +1,142 @@ +// Package init contains the list of backends that can be initialized and +// basic helper functions for initializing those backends. +package init + +import ( + "sync" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + backendAtlas "github.com/hashicorp/terraform/backend/atlas" + backendLocal "github.com/hashicorp/terraform/backend/local" + backendRemote "github.com/hashicorp/terraform/backend/remote" + backendArtifactory "github.com/hashicorp/terraform/backend/remote-state/artifactory" + backendAzure "github.com/hashicorp/terraform/backend/remote-state/azure" + backendConsul "github.com/hashicorp/terraform/backend/remote-state/consul" + backendEtcdv2 "github.com/hashicorp/terraform/backend/remote-state/etcdv2" + backendEtcdv3 "github.com/hashicorp/terraform/backend/remote-state/etcdv3" + backendGCS "github.com/hashicorp/terraform/backend/remote-state/gcs" + backendHTTP "github.com/hashicorp/terraform/backend/remote-state/http" + backendInmem "github.com/hashicorp/terraform/backend/remote-state/inmem" + backendManta "github.com/hashicorp/terraform/backend/remote-state/manta" + backendOSS "github.com/hashicorp/terraform/backend/remote-state/oss" + backendPg "github.com/hashicorp/terraform/backend/remote-state/pg" + backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3" + backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift" +) + +// backends is the list of available backends. This is a global variable +// because backends are currently hardcoded into Terraform and can't be +// modified without recompilation. +// +// To read an available backend, use the Backend function. This ensures +// safe concurrent read access to the list of built-in backends. +// +// Backends are hardcoded into Terraform because the API for backends uses +// complex structures and supporting that over the plugin system is currently +// prohibitively difficult. For those wanting to implement a custom backend, +// they can do so with recompilation. +var backends map[string]backend.InitFn +var backendsLock sync.Mutex + +// Init initializes the backends map with all our hardcoded backends. +func Init(services *disco.Disco) { + backendsLock.Lock() + defer backendsLock.Unlock() + + backends = map[string]backend.InitFn{ + // Enhanced backends. + "local": func() backend.Backend { return backendLocal.New() }, + "remote": func() backend.Backend { return backendRemote.New(services) }, + + // Remote State backends. + "artifactory": func() backend.Backend { return backendArtifactory.New() }, + "atlas": func() backend.Backend { return backendAtlas.New() }, + "azurerm": func() backend.Backend { return backendAzure.New() }, + "consul": func() backend.Backend { return backendConsul.New() }, + "etcd": func() backend.Backend { return backendEtcdv2.New() }, + "etcdv3": func() backend.Backend { return backendEtcdv3.New() }, + "gcs": func() backend.Backend { return backendGCS.New() }, + "http": func() backend.Backend { return backendHTTP.New() }, + "inmem": func() backend.Backend { return backendInmem.New() }, + "manta": func() backend.Backend { return backendManta.New() }, + "oss": func() backend.Backend { return backendOSS.New() }, + "pg": func() backend.Backend { return backendPg.New() }, + "s3": func() backend.Backend { return backendS3.New() }, + "swift": func() backend.Backend { return backendSwift.New() }, + + // Deprecated backends. + "azure": func() backend.Backend { + return deprecateBackend( + backendAzure.New(), + `Warning: "azure" name is deprecated, please use "azurerm"`, + ) + }, + } +} + +// Backend returns the initialization factory for the given backend, or +// nil if none exists. +func Backend(name string) backend.InitFn { + backendsLock.Lock() + defer backendsLock.Unlock() + return backends[name] +} + +// Set sets a new backend in the list of backends. If f is nil then the +// backend will be removed from the map. If this backend already exists +// then it will be overwritten. +// +// This method sets this backend globally and care should be taken to do +// this only before Terraform is executing to prevent odd behavior of backends +// changing mid-execution. +func Set(name string, f backend.InitFn) { + backendsLock.Lock() + defer backendsLock.Unlock() + + if f == nil { + delete(backends, name) + return + } + + backends[name] = f +} + +// deprecatedBackendShim is used to wrap a backend and inject a deprecation +// warning into the Validate method. +type deprecatedBackendShim struct { + backend.Backend + Message string +} + +// PrepareConfig delegates to the wrapped backend to validate its config +// and then appends shim's deprecation warning. +func (b deprecatedBackendShim) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + newObj, diags := b.Backend.PrepareConfig(obj) + return newObj, diags.Append(tfdiags.SimpleWarning(b.Message)) +} + +// DeprecateBackend can be used to wrap a backend to retrun a deprecation +// warning during validation. +func deprecateBackend(b backend.Backend, message string) backend.Backend { + // Since a Backend wrapped by deprecatedBackendShim can no longer be + // asserted as an Enhanced or Local backend, disallow those types here + // entirely. If something other than a basic backend.Backend needs to be + // deprecated, we can add that functionality to schema.Backend or the + // backend itself. + if _, ok := b.(backend.Enhanced); ok { + panic("cannot use DeprecateBackend on an Enhanced Backend") + } + + if _, ok := b.(backend.Local); ok { + panic("cannot use DeprecateBackend on a Local Backend") + } + + return deprecatedBackendShim{ + Backend: b, + Message: message, + } +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend.go b/vendor/github.com/hashicorp/terraform/backend/local/backend.go new file mode 100644 index 00000000..54cab6ec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend.go @@ -0,0 +1,595 @@ +package local + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "sync" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" +) + +const ( + DefaultWorkspaceDir = "terraform.tfstate.d" + DefaultWorkspaceFile = "environment" + DefaultStateFilename = "terraform.tfstate" + DefaultBackupExtension = ".backup" +) + +// Local is an implementation of EnhancedBackend that performs all operations +// locally. This is the "default" backend and implements normal Terraform +// behavior as it is well known. +type Local struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ShowDiagnostics prints diagnostic messages to the UI. + ShowDiagnostics func(vals ...interface{}) + + // The State* paths are set from the backend config, and may be left blank + // to use the defaults. If the actual paths for the local backend state are + // needed, use the StatePaths method. + // + // StatePath is the local path where state is read from. + // + // StateOutPath is the local path where the state will be written. + // If this is empty, it will default to StatePath. + // + // StateBackupPath is the local path where a backup file will be written. + // Set this to "-" to disable state backup. + // + // StateWorkspaceDir is the path to the folder containing data for + // non-default workspaces. This defaults to DefaultWorkspaceDir if not set. + StatePath string + StateOutPath string + StateBackupPath string + StateWorkspaceDir string + + // The OverrideState* paths are set based on per-operation CLI arguments + // and will override what'd be built from the State* fields if non-empty. + // While the interpretation of the State* fields depends on the active + // workspace, the OverrideState* fields are always used literally. + OverrideStatePath string + OverrideStateOutPath string + OverrideStateBackupPath string + + // We only want to create a single instance of a local state, so store them + // here as they're loaded. + states map[string]statemgr.Full + + // Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // OpInput will ask for necessary input prior to performing any operations. + // + // OpValidation will perform validation prior to running an operation. The + // variable naming doesn't match the style of others since we have a func + // Validate. + OpInput bool + OpValidation bool + + // Backend, if non-nil, will use this backend for non-enhanced behavior. + // This allows local behavior with remote state storage. It is a way to + // "upgrade" a non-enhanced backend to an enhanced backend with typical + // behavior. + // + // If this is nil, local performs normal state loading and storage. + Backend backend.Backend + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint not to produce messages that expect that a user can + // run a follow-up command, perhaps because Terraform is running in + // some sort of workflow automation tool that abstracts away the + // exact commands that are being run. + RunningInAutomation bool + + // opLock locks operations + opLock sync.Mutex +} + +var _ backend.Backend = (*Local)(nil) + +// New returns a new initialized local backend. +func New() *Local { + return NewWithBackend(nil) +} + +// NewWithBackend returns a new local backend initialized with a +// dedicated backend for non-enhanced behavior. +func NewWithBackend(backend backend.Backend) *Local { + return &Local{ + Backend: backend, + } +} + +func (b *Local) ConfigSchema() *configschema.Block { + if b.Backend != nil { + return b.Backend.ConfigSchema() + } + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "path": { + Type: cty.String, + Optional: true, + }, + "workspace_dir": { + Type: cty.String, + Optional: true, + }, + }, + } +} + +func (b *Local) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + if b.Backend != nil { + return b.Backend.PrepareConfig(obj) + } + + var diags tfdiags.Diagnostics + + if val := obj.GetAttr("path"); !val.IsNull() { + p := val.AsString() + if p == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid local state file path", + `The "path" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "path"}}, + )) + } + } + + if val := obj.GetAttr("workspace_dir"); !val.IsNull() { + p := val.AsString() + if p == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid local workspace directory path", + `The "workspace_dir" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "workspace_dir"}}, + )) + } + } + + return obj, diags +} + +func (b *Local) Configure(obj cty.Value) tfdiags.Diagnostics { + if b.Backend != nil { + return b.Backend.Configure(obj) + } + + var diags tfdiags.Diagnostics + + type Config struct { + Path string `hcl:"path,optional"` + WorkspaceDir string `hcl:"workspace_dir,optional"` + } + + if val := obj.GetAttr("path"); !val.IsNull() { + p := val.AsString() + b.StatePath = p + b.StateOutPath = p + } else { + b.StatePath = DefaultStateFilename + b.StateOutPath = DefaultStateFilename + } + + if val := obj.GetAttr("workspace_dir"); !val.IsNull() { + p := val.AsString() + b.StateWorkspaceDir = p + } else { + b.StateWorkspaceDir = DefaultWorkspaceDir + } + + return diags +} + +func (b *Local) Workspaces() ([]string, error) { + // If we have a backend handling state, defer to that. + if b.Backend != nil { + return b.Backend.Workspaces() + } + + // the listing always start with "default" + envs := []string{backend.DefaultStateName} + + entries, err := ioutil.ReadDir(b.stateWorkspaceDir()) + // no error if there's no envs configured + if os.IsNotExist(err) { + return envs, nil + } + if err != nil { + return nil, err + } + + var listed []string + for _, entry := range entries { + if entry.IsDir() { + listed = append(listed, filepath.Base(entry.Name())) + } + } + + sort.Strings(listed) + envs = append(envs, listed...) + + return envs, nil +} + +// DeleteWorkspace removes a workspace. +// +// The "default" workspace cannot be removed. +func (b *Local) DeleteWorkspace(name string) error { + // If we have a backend handling state, defer to that. + if b.Backend != nil { + return b.Backend.DeleteWorkspace(name) + } + + if name == "" { + return errors.New("empty state name") + } + + if name == backend.DefaultStateName { + return errors.New("cannot delete default state") + } + + delete(b.states, name) + return os.RemoveAll(filepath.Join(b.stateWorkspaceDir(), name)) +} + +func (b *Local) StateMgr(name string) (statemgr.Full, error) { + // If we have a backend handling state, delegate to that. + if b.Backend != nil { + return b.Backend.StateMgr(name) + } + + if s, ok := b.states[name]; ok { + return s, nil + } + + if err := b.createState(name); err != nil { + return nil, err + } + + statePath, stateOutPath, backupPath := b.StatePaths(name) + log.Printf("[TRACE] backend/local: state manager for workspace %q will:\n - read initial snapshot from %s\n - write new snapshots to %s\n - create any backup at %s", name, statePath, stateOutPath, backupPath) + + s := statemgr.NewFilesystemBetweenPaths(statePath, stateOutPath) + if backupPath != "" { + s.SetBackupPath(backupPath) + } + + if b.states == nil { + b.states = map[string]statemgr.Full{} + } + b.states[name] = s + return s, nil +} + +// Operation implements backend.Enhanced +// +// This will initialize an in-memory terraform.Context to perform the +// operation within this process. +// +// The given operation parameter will be merged with the ContextOpts on +// the structure with the following rules. If a rule isn't specified and the +// name conflicts, assume that the field is overwritten if set. +func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation, *backend.RunningOperation) + switch op.Type { + case backend.OperationTypeRefresh: + f = b.opRefresh + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + default: + return nil, fmt.Errorf( + "Unsupported operation type: %s\n\n"+ + "This is a bug in Terraform and should be reported. The local backend\n"+ + "is built-in to Terraform and should always support all operations.", + op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + if op.LockState { + op.StateLocker = clistate.NewLocker(stopCtx, op.StateLockTimeout, b.CLI, b.Colorize()) + } else { + op.StateLocker = clistate.NewNoopLocker() + } + + // Do it + go func() { + defer done() + defer stop() + defer cancel() + + // the state was locked during context creation, unlock the state when + // the operation completes + defer func() { + err := op.StateLocker.Unlock(nil) + if err != nil { + b.ShowDiagnostics(err) + runningOp.Result = backend.OperationFailure + } + }() + + defer b.opLock.Unlock() + f(stopCtx, cancelCtx, op, runningOp) + }() + + // Return + return runningOp, nil +} + +// opWait waits for the operation to complete, and a stop signal or a +// cancelation signal. +func (b *Local) opWait( + doneCh <-chan struct{}, + stopCtx context.Context, + cancelCtx context.Context, + tfCtx *terraform.Context, + opStateMgr statemgr.Persister) (canceled bool) { + // Wait for the operation to finish or for us to be interrupted so + // we can handle it properly. + select { + case <-stopCtx.Done(): + if b.CLI != nil { + b.CLI.Output("Stopping operation...") + } + + // try to force a PersistState just in case the process is terminated + // before we can complete. + if err := opStateMgr.PersistState(); err != nil { + // We can't error out from here, but warn the user if there was an error. + // If this isn't transient, we will catch it again below, and + // attempt to save the state another way. + if b.CLI != nil { + b.CLI.Error(fmt.Sprintf(earlyStateWriteErrorFmt, err)) + } + } + + // Stop execution + log.Println("[TRACE] backend/local: waiting for the running operation to stop") + go tfCtx.Stop() + + select { + case <-cancelCtx.Done(): + log.Println("[WARN] running operation was forcefully canceled") + // if the operation was canceled, we need to return immediately + canceled = true + case <-doneCh: + log.Println("[TRACE] backend/local: graceful stop has completed") + } + case <-cancelCtx.Done(): + // this should not be called without first attempting to stop the + // operation + log.Println("[ERROR] running operation canceled without Stop") + canceled = true + case <-doneCh: + } + return +} + +// ReportResult is a helper for the common chore of setting the status of +// a running operation and showing any diagnostics produced during that +// operation. +// +// If the given diagnostics contains errors then the operation's result +// will be set to backend.OperationFailure. It will be set to +// backend.OperationSuccess otherwise. It will then use b.ShowDiagnostics +// to show the given diagnostics before returning. +// +// Callers should feel free to do each of these operations separately in +// more complex cases where e.g. diagnostics are interleaved with other +// output, but terminating immediately after reporting error diagnostics is +// common and can be expressed concisely via this method. +func (b *Local) ReportResult(op *backend.RunningOperation, diags tfdiags.Diagnostics) { + if diags.HasErrors() { + op.Result = backend.OperationFailure + } else { + op.Result = backend.OperationSuccess + } + if b.ShowDiagnostics != nil { + b.ShowDiagnostics(diags) + } else { + // Shouldn't generally happen, but if it does then we'll at least + // make some noise in the logs to help us spot it. + if len(diags) != 0 { + log.Printf( + "[ERROR] Local backend needs to report diagnostics but ShowDiagnostics is not set:\n%s", + diags.ErrWithWarnings(), + ) + } + } +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is gauranteed to always return a non-nil value and so is useful +// as a helper to wrap any potentially colored strings. +func (b *Local) Colorize() *colorstring.Colorize { + if b.CLIColor != nil { + return b.CLIColor + } + + return &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } +} + +func (b *Local) schemaConfigure(ctx context.Context) error { + d := schema.FromContextBackendConfig(ctx) + + // Set the path if it is set + pathRaw, ok := d.GetOk("path") + if ok { + path := pathRaw.(string) + if path == "" { + return fmt.Errorf("configured path is empty") + } + + b.StatePath = path + b.StateOutPath = path + } + + if raw, ok := d.GetOk("workspace_dir"); ok { + path := raw.(string) + if path != "" { + b.StateWorkspaceDir = path + } + } + + // Legacy name, which ConflictsWith workspace_dir + if raw, ok := d.GetOk("environment_dir"); ok { + path := raw.(string) + if path != "" { + b.StateWorkspaceDir = path + } + } + + return nil +} + +// StatePaths returns the StatePath, StateOutPath, and StateBackupPath as +// configured from the CLI. +func (b *Local) StatePaths(name string) (stateIn, stateOut, backupOut string) { + statePath := b.OverrideStatePath + stateOutPath := b.OverrideStateOutPath + backupPath := b.OverrideStateBackupPath + + isDefault := name == backend.DefaultStateName || name == "" + + baseDir := "" + if !isDefault { + baseDir = filepath.Join(b.stateWorkspaceDir(), name) + } + + if statePath == "" { + if isDefault { + statePath = b.StatePath // s.StatePath applies only to the default workspace, since StateWorkspaceDir is used otherwise + } + if statePath == "" { + statePath = filepath.Join(baseDir, DefaultStateFilename) + } + } + if stateOutPath == "" { + stateOutPath = statePath + } + if backupPath == "" { + backupPath = b.StateBackupPath + } + switch backupPath { + case "-": + backupPath = "" + case "": + backupPath = stateOutPath + DefaultBackupExtension + } + + return statePath, stateOutPath, backupPath +} + +// PathsConflictWith returns true if any state path used by a workspace in +// the receiver is the same as any state path used by the other given +// local backend instance. +// +// This should be used when "migrating" from one local backend configuration to +// another in order to avoid deleting the "old" state snapshots if they are +// in the same files as the "new" state snapshots. +func (b *Local) PathsConflictWith(other *Local) bool { + otherPaths := map[string]struct{}{} + otherWorkspaces, err := other.Workspaces() + if err != nil { + // If we can't enumerate the workspaces then we'll conservatively + // assume that paths _do_ overlap, since we can't be certain. + return true + } + for _, name := range otherWorkspaces { + p, _, _ := other.StatePaths(name) + otherPaths[p] = struct{}{} + } + + ourWorkspaces, err := other.Workspaces() + if err != nil { + // If we can't enumerate the workspaces then we'll conservatively + // assume that paths _do_ overlap, since we can't be certain. + return true + } + + for _, name := range ourWorkspaces { + p, _, _ := b.StatePaths(name) + if _, exists := otherPaths[p]; exists { + return true + } + } + return false +} + +// this only ensures that the named directory exists +func (b *Local) createState(name string) error { + if name == backend.DefaultStateName { + return nil + } + + stateDir := filepath.Join(b.stateWorkspaceDir(), name) + s, err := os.Stat(stateDir) + if err == nil && s.IsDir() { + // no need to check for os.IsNotExist, since that is covered by os.MkdirAll + // which will catch the other possible errors as well. + return nil + } + + err = os.MkdirAll(stateDir, 0755) + if err != nil { + return err + } + + return nil +} + +// stateWorkspaceDir returns the directory where state environments are stored. +func (b *Local) stateWorkspaceDir() string { + if b.StateWorkspaceDir != "" { + return b.StateWorkspaceDir + } + + return DefaultWorkspaceDir +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go new file mode 100644 index 00000000..b1244871 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_apply.go @@ -0,0 +1,293 @@ +package local + +import ( + "bytes" + "context" + "errors" + "fmt" + "log" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Local) opApply( + stopCtx context.Context, + cancelCtx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + log.Printf("[INFO] backend/local: starting Apply operation") + + var diags tfdiags.Diagnostics + + // If we have a nil module at this point, then set it to an empty tree + // to avoid any potential crashes. + if op.PlanFile == nil && !op.Destroy && !op.HasConfig() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files", + "Apply requires configuration to be present. Applying without a configuration "+ + "would mark everything for destruction, which is normally not what is desired. "+ + "If you would like to destroy everything, run 'terraform destroy' instead.", + )) + b.ReportResult(runningOp, diags) + return + } + + // Setup our count hook that keeps track of resource changes + countHook := new(CountHook) + stateHook := new(StateHook) + if b.ContextOpts == nil { + b.ContextOpts = new(terraform.ContextOpts) + } + old := b.ContextOpts.Hooks + defer func() { b.ContextOpts.Hooks = old }() + b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook) + + // Get our context + tfCtx, _, opState, contextDiags := b.context(op) + diags = diags.Append(contextDiags) + if contextDiags.HasErrors() { + b.ReportResult(runningOp, diags) + return + } + + // Setup the state + runningOp.State = tfCtx.State() + + // If we weren't given a plan, then we refresh/plan + if op.PlanFile == nil { + // If we're refreshing before apply, perform that + if op.PlanRefresh { + log.Printf("[INFO] backend/local: apply calling Refresh") + _, err := tfCtx.Refresh() + if err != nil { + diags = diags.Append(err) + runningOp.Result = backend.OperationFailure + b.ShowDiagnostics(diags) + return + } + } + + // Perform the plan + log.Printf("[INFO] backend/local: apply calling Plan") + plan, err := tfCtx.Plan() + if err != nil { + diags = diags.Append(err) + b.ReportResult(runningOp, diags) + return + } + + trivialPlan := plan.Changes.Empty() + hasUI := op.UIOut != nil && op.UIIn != nil + mustConfirm := hasUI && ((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove && !trivialPlan)) + if mustConfirm { + var desc, query string + if op.Destroy { + if op.Workspace != "default" { + query = "Do you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + } else { + query = "Do you really want to destroy all resources?" + } + desc = "Terraform will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + } else { + if op.Workspace != "default" { + query = "Do you want to perform these actions in workspace \"" + op.Workspace + "\"?" + } else { + query = "Do you want to perform these actions?" + } + desc = "Terraform will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + if !trivialPlan { + // Display the plan of what we are going to apply/destroy. + b.renderPlan(plan, runningOp.State, tfCtx.Schemas()) + b.CLI.Output("") + } + + v, err := op.UIIn.Input(stopCtx, &terraform.InputOpts{ + Id: "approve", + Query: query, + Description: desc, + }) + if err != nil { + diags = diags.Append(errwrap.Wrapf("Error asking for approval: {{err}}", err)) + b.ReportResult(runningOp, diags) + return + } + if v != "yes" { + if op.Destroy { + b.CLI.Info("Destroy cancelled.") + } else { + b.CLI.Info("Apply cancelled.") + } + runningOp.Result = backend.OperationFailure + return + } + } + } + + // Setup our hook for continuous state updates + stateHook.StateMgr = opState + + // Start the apply in a goroutine so that we can be interrupted. + var applyState *states.State + var applyDiags tfdiags.Diagnostics + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + log.Printf("[INFO] backend/local: apply calling Apply") + _, applyDiags = tfCtx.Apply() + // we always want the state, even if apply failed + applyState = tfCtx.State() + }() + + if b.opWait(doneCh, stopCtx, cancelCtx, tfCtx, opState) { + return + } + + // Store the final state + runningOp.State = applyState + err := statemgr.WriteAndPersist(opState, applyState) + if err != nil { + // Export the state file from the state manager and assign the new + // state. This is needed to preserve the existing serial and lineage. + stateFile := statemgr.Export(opState) + if stateFile == nil { + stateFile = &statefile.File{} + } + stateFile.State = applyState + + diags = diags.Append(b.backupStateForError(stateFile, err)) + b.ReportResult(runningOp, diags) + return + } + + diags = diags.Append(applyDiags) + if applyDiags.HasErrors() { + b.ReportResult(runningOp, diags) + return + } + + // If we've accumulated any warnings along the way then we'll show them + // here just before we show the summary and next steps. If we encountered + // errors then we would've returned early at some other point above. + b.ShowDiagnostics(diags) + + // If we have a UI, output the results + if b.CLI != nil { + if op.Destroy { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "[reset][bold][green]\n"+ + "Destroy complete! Resources: %d destroyed.", + countHook.Removed))) + } else { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "[reset][bold][green]\n"+ + "Apply complete! Resources: %d added, %d changed, %d destroyed.", + countHook.Added, + countHook.Changed, + countHook.Removed))) + } + + // only show the state file help message if the state is local. + if (countHook.Added > 0 || countHook.Changed > 0) && b.StateOutPath != "" { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "[reset]\n"+ + "The state of your infrastructure has been saved to the path\n"+ + "below. This state is required to modify and destroy your\n"+ + "infrastructure, so keep it safe. To inspect the complete state\n"+ + "use the `terraform show` command.\n\n"+ + "State path: %s", + b.StateOutPath))) + } + } +} + +// backupStateForError is called in a scenario where we're unable to persist the +// state for some reason, and will attempt to save a backup copy of the state +// to local disk to help the user recover. This is a "last ditch effort" sort +// of thing, so we really don't want to end up in this codepath; we should do +// everything we possibly can to get the state saved _somewhere_. +func (b *Local) backupStateForError(stateFile *statefile.File, err error) error { + b.CLI.Error(fmt.Sprintf("Failed to save state: %s\n", err)) + + local := statemgr.NewFilesystem("errored.tfstate") + writeErr := local.WriteStateForMigration(stateFile, true) + if writeErr != nil { + b.CLI.Error(fmt.Sprintf( + "Also failed to create local state file for recovery: %s\n\n", writeErr, + )) + // To avoid leaving the user with no state at all, our last resort + // is to print the JSON state out onto the terminal. This is an awful + // UX, so we should definitely avoid doing this if at all possible, + // but at least the user has _some_ path to recover if we end up + // here for some reason. + stateBuf := new(bytes.Buffer) + jsonErr := statefile.Write(stateFile, stateBuf) + if jsonErr != nil { + b.CLI.Error(fmt.Sprintf( + "Also failed to JSON-serialize the state to print it: %s\n\n", jsonErr, + )) + return errors.New(stateWriteFatalError) + } + + b.CLI.Output(stateBuf.String()) + + return errors.New(stateWriteConsoleFallbackError) + } + + return errors.New(stateWriteBackedUpError) +} + +const stateWriteBackedUpError = `Failed to persist state to backend. + +The error shown above has prevented Terraform from writing the updated state +to the configured backend. To allow for recovery, the state has been written +to the file "errored.tfstate" in the current working directory. + +Running "terraform apply" again at this point will create a forked state, +making it harder to recover. + +To retry writing this state, use the following command: + terraform state push errored.tfstate +` + +const stateWriteConsoleFallbackError = `Failed to persist state to backend. + +The errors shown above prevented Terraform from writing the updated state to +the configured backend and from creating a local backup file. As a fallback, +the raw state data is printed above as a JSON object. + +To retry writing this state, copy the state data (from the first { to the +last } inclusive) and save it into a local file called errored.tfstate, then +run the following command: + terraform state push errored.tfstate +` + +const stateWriteFatalError = `Failed to save state after apply. + +A catastrophic error has prevented Terraform from persisting the state file +or creating a backup. Unfortunately this means that the record of any resources +created during this apply has been lost, and such resources may exist outside +of Terraform's management. + +For resources that support import, it is possible to recover by manually +importing each resource using its id from the target system. + +This is a serious bug in Terraform and should be reported. +` + +const earlyStateWriteErrorFmt = `Error saving current state: %s + +Terraform encountered an error attempting to save the state before cancelling +the current operation. Once the operation is complete another attempt will be +made to save the final state. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go new file mode 100644 index 00000000..58bc4f5c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_local.go @@ -0,0 +1,254 @@ +package local + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// backend.Local implementation. +func (b *Local) Context(op *backend.Operation) (*terraform.Context, statemgr.Full, tfdiags.Diagnostics) { + // Make sure the type is invalid. We use this as a way to know not + // to ask for input/validate. + op.Type = backend.OperationTypeInvalid + + if op.LockState { + op.StateLocker = clistate.NewLocker(context.Background(), op.StateLockTimeout, b.CLI, b.Colorize()) + } else { + op.StateLocker = clistate.NewNoopLocker() + } + + ctx, _, stateMgr, diags := b.context(op) + return ctx, stateMgr, diags +} + +func (b *Local) context(op *backend.Operation) (*terraform.Context, *configload.Snapshot, statemgr.Full, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Get the latest state. + log.Printf("[TRACE] backend/local: requesting state manager for workspace %q", op.Workspace) + s, err := b.StateMgr(op.Workspace) + if err != nil { + diags = diags.Append(errwrap.Wrapf("Error loading state: {{err}}", err)) + return nil, nil, nil, diags + } + log.Printf("[TRACE] backend/local: requesting state lock for workspace %q", op.Workspace) + if err := op.StateLocker.Lock(s, op.Type.String()); err != nil { + diags = diags.Append(errwrap.Wrapf("Error locking state: {{err}}", err)) + return nil, nil, nil, diags + } + log.Printf("[TRACE] backend/local: reading remote state for workspace %q", op.Workspace) + if err := s.RefreshState(); err != nil { + diags = diags.Append(errwrap.Wrapf("Error loading state: {{err}}", err)) + return nil, nil, nil, diags + } + + // Initialize our context options + var opts terraform.ContextOpts + if v := b.ContextOpts; v != nil { + opts = *v + } + + // Copy set options from the operation + opts.Destroy = op.Destroy + opts.Targets = op.Targets + opts.UIInput = op.UIIn + + // Load the latest state. If we enter contextFromPlanFile below then the + // state snapshot in the plan file must match this, or else it'll return + // error diagnostics. + log.Printf("[TRACE] backend/local: retrieving local state snapshot for workspace %q", op.Workspace) + opts.State = s.State() + + var tfCtx *terraform.Context + var ctxDiags tfdiags.Diagnostics + var configSnap *configload.Snapshot + if op.PlanFile != nil { + var stateMeta *statemgr.SnapshotMeta + // If the statemgr implements our optional PersistentMeta interface then we'll + // additionally verify that the state snapshot in the plan file has + // consistent metadata, as an additional safety check. + if sm, ok := s.(statemgr.PersistentMeta); ok { + m := sm.StateSnapshotMeta() + stateMeta = &m + } + log.Printf("[TRACE] backend/local: building context from plan file") + tfCtx, configSnap, ctxDiags = b.contextFromPlanFile(op.PlanFile, opts, stateMeta) + // Write sources into the cache of the main loader so that they are + // available if we need to generate diagnostic message snippets. + op.ConfigLoader.ImportSourcesFromSnapshot(configSnap) + } else { + log.Printf("[TRACE] backend/local: building context for current working directory") + tfCtx, configSnap, ctxDiags = b.contextDirect(op, opts) + } + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + return nil, nil, nil, diags + } + log.Printf("[TRACE] backend/local: finished building terraform.Context") + + // If we have an operation, then we automatically do the input/validate + // here since every option requires this. + if op.Type != backend.OperationTypeInvalid { + // If input asking is enabled, then do that + if op.PlanFile == nil && b.OpInput { + mode := terraform.InputModeProvider + mode |= terraform.InputModeVar + mode |= terraform.InputModeVarUnset + + log.Printf("[TRACE] backend/local: requesting interactive input, if necessary") + inputDiags := tfCtx.Input(mode) + diags = diags.Append(inputDiags) + if inputDiags.HasErrors() { + return nil, nil, nil, diags + } + } + + // If validation is enabled, validate + if b.OpValidation { + log.Printf("[TRACE] backend/local: running validation operation") + validateDiags := tfCtx.Validate() + diags = diags.Append(validateDiags) + } + } + + return tfCtx, configSnap, s, diags +} + +func (b *Local) contextDirect(op *backend.Operation, opts terraform.ContextOpts) (*terraform.Context, *configload.Snapshot, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Load the configuration using the caller-provided configuration loader. + config, configSnap, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, diags + } + opts.Config = config + + variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, nil, diags + } + if op.Variables != nil { + opts.Variables = variables + } + + tfCtx, ctxDiags := terraform.NewContext(&opts) + diags = diags.Append(ctxDiags) + return tfCtx, configSnap, diags +} + +func (b *Local) contextFromPlanFile(pf *planfile.Reader, opts terraform.ContextOpts, currentStateMeta *statemgr.SnapshotMeta) (*terraform.Context, *configload.Snapshot, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + const errSummary = "Invalid plan file" + + // A plan file has a snapshot of configuration embedded inside it, which + // is used instead of whatever configuration might be already present + // in the filesystem. + snap, err := pf.ReadConfigSnapshot() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + errSummary, + fmt.Sprintf("Failed to read configuration snapshot from plan file: %s.", err), + )) + return nil, snap, diags + } + loader := configload.NewLoaderFromSnapshot(snap) + config, configDiags := loader.LoadConfig(snap.Modules[""].Dir) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, snap, diags + } + opts.Config = config + + // A plan file also contains a snapshot of the prior state the changes + // are intended to apply to. + priorStateFile, err := pf.ReadStateFile() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + errSummary, + fmt.Sprintf("Failed to read prior state snapshot from plan file: %s.", err), + )) + return nil, snap, diags + } + if currentStateMeta != nil { + // If the caller sets this, we require that the stored prior state + // has the same metadata, which is an extra safety check that nothing + // has changed since the plan was created. (All of the "real-world" + // state manager implementstions support this, but simpler test backends + // may not.) + if currentStateMeta.Lineage != "" && priorStateFile.Lineage != "" { + if priorStateFile.Serial != currentStateMeta.Serial || priorStateFile.Lineage != currentStateMeta.Lineage { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saved plan is stale", + "The given plan file can no longer be applied because the state was changed by another operation after the plan was created.", + )) + } + } + } + // The caller already wrote the "current state" here, but we're overriding + // it here with the prior state. These two should actually be identical in + // normal use, particularly if we validated the state meta above, but + // we do this here anyway to ensure consistent behavior. + opts.State = priorStateFile.State + + plan, err := pf.ReadPlan() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + errSummary, + fmt.Sprintf("Failed to read plan from plan file: %s.", err), + )) + return nil, snap, diags + } + + variables := terraform.InputValues{} + for name, dyVal := range plan.VariableValues { + val, err := dyVal.Decode(cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + errSummary, + fmt.Sprintf("Invalid value for variable %q recorded in plan file: %s.", name, err), + )) + continue + } + + variables[name] = &terraform.InputValue{ + Value: val, + SourceType: terraform.ValueFromPlan, + } + } + opts.Variables = variables + opts.Changes = plan.Changes + opts.Targets = plan.TargetAddrs + opts.ProviderSHA256s = plan.ProviderSHA256s + + tfCtx, ctxDiags := terraform.NewContext(&opts) + diags = diags.Append(ctxDiags) + return tfCtx, snap, diags +} + +const validateWarnHeader = ` +There are warnings related to your configuration. If no errors occurred, +Terraform will continue despite these warnings. It is a good idea to resolve +these warnings in the near future. + +Warnings: +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go new file mode 100644 index 00000000..0a7f28da --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_plan.go @@ -0,0 +1,338 @@ +package local + +import ( + "bytes" + "context" + "fmt" + "log" + "sort" + "strings" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Local) opPlan( + stopCtx context.Context, + cancelCtx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + + log.Printf("[INFO] backend/local: starting Plan operation") + + var diags tfdiags.Diagnostics + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't re-plan a saved plan", + "The plan command was given a saved plan file as its input. This command generates "+ + "a new plan, and so it requires a configuration directory as its argument.", + )) + b.ReportResult(runningOp, diags) + return + } + + // Local planning requires a config, unless we're planning to destroy. + if !op.Destroy && !op.HasConfig() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files", + "Plan requires configuration to be present. Planning without a configuration would "+ + "mark everything for destruction, which is normally not what is desired. If you "+ + "would like to destroy everything, run plan with the -destroy option. Otherwise, "+ + "create a Terraform configuration file (.tf file) and try again.", + )) + b.ReportResult(runningOp, diags) + return + } + + // Setup our count hook that keeps track of resource changes + countHook := new(CountHook) + if b.ContextOpts == nil { + b.ContextOpts = new(terraform.ContextOpts) + } + old := b.ContextOpts.Hooks + defer func() { b.ContextOpts.Hooks = old }() + b.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook) + + // Get our context + tfCtx, configSnap, opState, ctxDiags := b.context(op) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + b.ReportResult(runningOp, diags) + return + } + + // Setup the state + runningOp.State = tfCtx.State() + + // If we're refreshing before plan, perform that + baseState := runningOp.State + if op.PlanRefresh { + log.Printf("[INFO] backend/local: plan calling Refresh") + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(planRefreshing) + "\n")) + } + + refreshedState, err := tfCtx.Refresh() + if err != nil { + diags = diags.Append(err) + b.ReportResult(runningOp, diags) + return + } + baseState = refreshedState // plan will be relative to our refreshed state + if b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------") + } + } + + // Perform the plan in a goroutine so we can be interrupted + var plan *plans.Plan + var planDiags tfdiags.Diagnostics + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + log.Printf("[INFO] backend/local: plan calling Plan") + plan, planDiags = tfCtx.Plan() + }() + + if b.opWait(doneCh, stopCtx, cancelCtx, tfCtx, opState) { + // If we get in here then the operation was cancelled, which is always + // considered to be a failure. + log.Printf("[INFO] backend/local: plan operation was force-cancelled by interrupt") + runningOp.Result = backend.OperationFailure + return + } + log.Printf("[INFO] backend/local: plan operation completed") + + diags = diags.Append(planDiags) + if planDiags.HasErrors() { + b.ReportResult(runningOp, diags) + return + } + // Record state + runningOp.PlanEmpty = plan.Changes.Empty() + + // Save the plan to disk + if path := op.PlanOutPath; path != "" { + if op.PlanOutBackend == nil { + // This is always a bug in the operation caller; it's not valid + // to set PlanOutPath without also setting PlanOutBackend. + diags = diags.Append(fmt.Errorf( + "PlanOutPath set without also setting PlanOutBackend (this is a bug in Terraform)"), + ) + b.ReportResult(runningOp, diags) + return + } + plan.Backend = *op.PlanOutBackend + + // We may have updated the state in the refresh step above, but we + // will freeze that updated state in the plan file for now and + // only write it if this plan is subsequently applied. + plannedStateFile := statemgr.PlannedStateUpdate(opState, baseState) + + log.Printf("[INFO] backend/local: writing plan output to: %s", path) + err := planfile.Create(path, configSnap, plannedStateFile, plan) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write plan file", + fmt.Sprintf("The plan file could not be written: %s.", err), + )) + b.ReportResult(runningOp, diags) + return + } + } + + // Perform some output tasks if we have a CLI to output to. + if b.CLI != nil { + schemas := tfCtx.Schemas() + + if plan.Changes.Empty() { + b.CLI.Output("\n" + b.Colorize().Color(strings.TrimSpace(planNoChanges))) + return + } + + b.renderPlan(plan, baseState, schemas) + + // If we've accumulated any warnings along the way then we'll show them + // here just before we show the summary and next steps. If we encountered + // errors then we would've returned early at some other point above. + b.ShowDiagnostics(diags) + + // Give the user some next-steps, unless we're running in an automation + // tool which is presumed to provide its own UI for further actions. + if !b.RunningInAutomation { + + b.CLI.Output("\n------------------------------------------------------------------------") + + if path := op.PlanOutPath; path == "" { + b.CLI.Output(fmt.Sprintf( + "\n" + strings.TrimSpace(planHeaderNoOutput) + "\n", + )) + } else { + b.CLI.Output(fmt.Sprintf( + "\n"+strings.TrimSpace(planHeaderYesOutput)+"\n", + path, path, + )) + } + } + } +} + +func (b *Local) renderPlan(plan *plans.Plan, state *states.State, schemas *terraform.Schemas) { + counts := map[plans.Action]int{} + var rChanges []*plans.ResourceInstanceChangeSrc + for _, change := range plan.Changes.Resources { + if change.Action == plans.Delete && change.Addr.Resource.Resource.Mode == addrs.DataResourceMode { + // Avoid rendering data sources on deletion + continue + } + + rChanges = append(rChanges, change) + counts[change.Action]++ + } + + headerBuf := &bytes.Buffer{} + fmt.Fprintf(headerBuf, "\n%s\n", strings.TrimSpace(planHeaderIntro)) + if counts[plans.Create] > 0 { + fmt.Fprintf(headerBuf, "%s create\n", format.DiffActionSymbol(plans.Create)) + } + if counts[plans.Update] > 0 { + fmt.Fprintf(headerBuf, "%s update in-place\n", format.DiffActionSymbol(plans.Update)) + } + if counts[plans.Delete] > 0 { + fmt.Fprintf(headerBuf, "%s destroy\n", format.DiffActionSymbol(plans.Delete)) + } + if counts[plans.DeleteThenCreate] > 0 { + fmt.Fprintf(headerBuf, "%s destroy and then create replacement\n", format.DiffActionSymbol(plans.DeleteThenCreate)) + } + if counts[plans.CreateThenDelete] > 0 { + fmt.Fprintf(headerBuf, "%s create replacement and then destroy\n", format.DiffActionSymbol(plans.CreateThenDelete)) + } + if counts[plans.Read] > 0 { + fmt.Fprintf(headerBuf, "%s read (data resources)\n", format.DiffActionSymbol(plans.Read)) + } + + b.CLI.Output(b.Colorize().Color(headerBuf.String())) + + b.CLI.Output("Terraform will perform the following actions:\n") + + // Note: we're modifying the backing slice of this plan object in-place + // here. The ordering of resource changes in a plan is not significant, + // but we can only do this safely here because we can assume that nobody + // is concurrently modifying our changes while we're trying to print it. + sort.Slice(rChanges, func(i, j int) bool { + iA := rChanges[i].Addr + jA := rChanges[j].Addr + if iA.String() == jA.String() { + return rChanges[i].DeposedKey < rChanges[j].DeposedKey + } + return iA.Less(jA) + }) + + for _, rcs := range rChanges { + if rcs.Action == plans.NoOp { + continue + } + providerSchema := schemas.ProviderSchema(rcs.ProviderAddr.ProviderConfig.Type) + if providerSchema == nil { + // Should never happen + b.CLI.Output(fmt.Sprintf("(schema missing for %s)\n", rcs.ProviderAddr)) + continue + } + rSchema, _ := providerSchema.SchemaForResourceAddr(rcs.Addr.Resource.Resource) + if rSchema == nil { + // Should never happen + b.CLI.Output(fmt.Sprintf("(schema missing for %s)\n", rcs.Addr)) + continue + } + + // check if the change is due to a tainted resource + tainted := false + if !state.Empty() { + if is := state.ResourceInstance(rcs.Addr); is != nil { + if obj := is.GetGeneration(rcs.DeposedKey.Generation()); obj != nil { + tainted = obj.Status == states.ObjectTainted + } + } + } + + b.CLI.Output(format.ResourceChange( + rcs, + tainted, + rSchema, + b.CLIColor, + )) + } + + // stats is similar to counts above, but: + // - it considers only resource changes + // - it simplifies "replace" into both a create and a delete + stats := map[plans.Action]int{} + for _, change := range rChanges { + switch change.Action { + case plans.CreateThenDelete, plans.DeleteThenCreate: + stats[plans.Create]++ + stats[plans.Delete]++ + default: + stats[change.Action]++ + } + } + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "[reset][bold]Plan:[reset] "+ + "%d to add, %d to change, %d to destroy.", + stats[plans.Create], stats[plans.Update], stats[plans.Delete], + ))) +} + +const planErrNoConfig = ` +No configuration files found! + +Plan requires configuration to be present. Planning without a configuration +would mark everything for destruction, which is normally not what is desired. +If you would like to destroy everything, please run plan with the "-destroy" +flag or create a single empty configuration file. Otherwise, please create +a Terraform configuration file in the path being executed and try again. +` + +const planHeaderIntro = ` +An execution plan has been generated and is shown below. +Resource actions are indicated with the following symbols: +` + +const planHeaderNoOutput = ` +Note: You didn't specify an "-out" parameter to save this plan, so Terraform +can't guarantee that exactly these actions will be performed if +"terraform apply" is subsequently run. +` + +const planHeaderYesOutput = ` +This plan was saved to: %s + +To perform exactly these actions, run the following command to apply: + terraform apply %q +` + +const planNoChanges = ` +[reset][bold][green]No changes. Infrastructure is up-to-date.[reset][green] + +This means that Terraform did not detect any differences between your +configuration and real physical resources that exist. As a result, no +actions need to be performed. +` + +const planRefreshing = ` +[reset][bold]Refreshing Terraform state in-memory prior to plan...[reset] +The refreshed state will be used to calculate this plan, but will not be +persisted to local or remote state storage. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go b/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go new file mode 100644 index 00000000..d1fd4a7d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/backend_refresh.go @@ -0,0 +1,102 @@ +package local + +import ( + "context" + "fmt" + "log" + "os" + "strings" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Local) opRefresh( + stopCtx context.Context, + cancelCtx context.Context, + op *backend.Operation, + runningOp *backend.RunningOperation) { + + var diags tfdiags.Diagnostics + + // Check if our state exists if we're performing a refresh operation. We + // only do this if we're managing state with this backend. + if b.Backend == nil { + if _, err := os.Stat(b.StatePath); err != nil { + if os.IsNotExist(err) { + err = nil + } + + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot read state file", + fmt.Sprintf("Failed to read %s: %s", b.StatePath, err), + )) + b.ReportResult(runningOp, diags) + return + } + } + } + + // Get our context + tfCtx, _, opState, contextDiags := b.context(op) + diags = diags.Append(contextDiags) + if contextDiags.HasErrors() { + b.ReportResult(runningOp, diags) + return + } + + // Set our state + runningOp.State = opState.State() + if !runningOp.State.HasResources() { + if b.CLI != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Empty or non-existent state", + "There are currently no resources tracked in the state, so there is nothing to refresh.", + )) + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(refreshNoState) + "\n")) + } + } + + // Perform the refresh in a goroutine so we can be interrupted + var newState *states.State + var refreshDiags tfdiags.Diagnostics + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + newState, refreshDiags = tfCtx.Refresh() + log.Printf("[INFO] backend/local: refresh calling Refresh") + }() + + if b.opWait(doneCh, stopCtx, cancelCtx, tfCtx, opState) { + return + } + + // write the resulting state to the running op + runningOp.State = newState + diags = diags.Append(refreshDiags) + if refreshDiags.HasErrors() { + b.ReportResult(runningOp, diags) + return + } + + err := statemgr.WriteAndPersist(opState, newState) + if err != nil { + diags = diags.Append(errwrap.Wrapf("Failed to write state: {{err}}", err)) + b.ReportResult(runningOp, diags) + return + } +} + +const refreshNoState = ` +[reset][bold][yellow]Empty or non-existent state file.[reset][yellow] + +Refresh will do nothing. Refresh does not error or return an erroneous +exit status because many automation scripts use refresh, plan, then apply +and may not have a state file yet for the first run. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/local/cli.go b/vendor/github.com/hashicorp/terraform/backend/local/cli.go new file mode 100644 index 00000000..c3d7a65a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/cli.go @@ -0,0 +1,36 @@ +package local + +import ( + "log" + + "github.com/hashicorp/terraform/backend" +) + +// backend.CLI impl. +func (b *Local) CLIInit(opts *backend.CLIOpts) error { + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ShowDiagnostics = opts.ShowDiagnostics + b.ContextOpts = opts.ContextOpts + b.OpInput = opts.Input + b.OpValidation = opts.Validation + b.RunningInAutomation = opts.RunningInAutomation + + // configure any new cli options + if opts.StatePath != "" { + log.Printf("[TRACE] backend/local: CLI option -state is overriding state path to %s", opts.StatePath) + b.OverrideStatePath = opts.StatePath + } + + if opts.StateOutPath != "" { + log.Printf("[TRACE] backend/local: CLI option -state-out is overriding state output path to %s", opts.StateOutPath) + b.OverrideStateOutPath = opts.StateOutPath + } + + if opts.StateBackupPath != "" { + log.Printf("[TRACE] backend/local: CLI option -backup is overriding state backup path to %s", opts.StateBackupPath) + b.OverrideStateBackupPath = opts.StateBackupPath + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go b/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go new file mode 100644 index 00000000..59100474 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/counthookaction_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type=countHookAction hook_count_action.go"; DO NOT EDIT. + +package local + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[countHookActionAdd-0] + _ = x[countHookActionChange-1] + _ = x[countHookActionRemove-2] +} + +const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove" + +var _countHookAction_index = [...]uint8{0, 18, 39, 60} + +func (i countHookAction) String() string { + if i >= countHookAction(len(_countHookAction_index)-1) { + return "countHookAction(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _countHookAction_name[_countHookAction_index[i]:_countHookAction_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go new file mode 100644 index 00000000..f9d619ef --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/hook_count.go @@ -0,0 +1,106 @@ +package local + +import ( + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +// CountHook is a hook that counts the number of resources +// added, removed, changed during the course of an apply. +type CountHook struct { + Added int + Changed int + Removed int + + ToAdd int + ToChange int + ToRemove int + ToRemoveAndAdd int + + pending map[string]plans.Action + + sync.Mutex + terraform.NilHook +} + +var _ terraform.Hook = (*CountHook)(nil) + +func (h *CountHook) Reset() { + h.Lock() + defer h.Unlock() + + h.pending = nil + h.Added = 0 + h.Changed = 0 + h.Removed = 0 +} + +func (h *CountHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (terraform.HookAction, error) { + h.Lock() + defer h.Unlock() + + if h.pending == nil { + h.pending = make(map[string]plans.Action) + } + + h.pending[addr.String()] = action + + return terraform.HookActionContinue, nil +} + +func (h *CountHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (terraform.HookAction, error) { + h.Lock() + defer h.Unlock() + + if h.pending != nil { + pendingKey := addr.String() + if action, ok := h.pending[pendingKey]; ok { + delete(h.pending, pendingKey) + + if err == nil { + switch action { + case plans.CreateThenDelete, plans.DeleteThenCreate: + h.Added++ + h.Removed++ + case plans.Create: + h.Added++ + case plans.Delete: + h.Removed++ + case plans.Update: + h.Changed++ + } + } + } + } + + return terraform.HookActionContinue, nil +} + +func (h *CountHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (terraform.HookAction, error) { + h.Lock() + defer h.Unlock() + + // We don't count anything for data resources + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + return terraform.HookActionContinue, nil + } + + switch action { + case plans.CreateThenDelete, plans.DeleteThenCreate: + h.ToRemoveAndAdd += 1 + case plans.Create: + h.ToAdd += 1 + case plans.Delete: + h.ToRemove += 1 + case plans.Update: + h.ToChange += 1 + } + + return terraform.HookActionContinue, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go new file mode 100644 index 00000000..9a28464c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/hook_count_action.go @@ -0,0 +1,11 @@ +package local + +//go:generate stringer -type=countHookAction hook_count_action.go + +type countHookAction byte + +const ( + countHookActionAdd countHookAction = iota + countHookActionChange + countHookActionRemove +) diff --git a/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go b/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go new file mode 100644 index 00000000..a2f78475 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/hook_state.go @@ -0,0 +1,33 @@ +package local + +import ( + "sync" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" +) + +// StateHook is a hook that continuously updates the state by calling +// WriteState on a state.State. +type StateHook struct { + terraform.NilHook + sync.Mutex + + StateMgr statemgr.Writer +} + +var _ terraform.Hook = (*StateHook)(nil) + +func (h *StateHook) PostStateUpdate(new *states.State) (terraform.HookAction, error) { + h.Lock() + defer h.Unlock() + + if h.StateMgr != nil { + if err := h.StateMgr.WriteState(new); err != nil { + return terraform.HookActionHalt, err + } + } + + return terraform.HookActionContinue, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/local/testing.go b/vendor/github.com/hashicorp/terraform/backend/local/testing.go new file mode 100644 index 00000000..dccddb5d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/local/testing.go @@ -0,0 +1,213 @@ +package local + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// TestLocal returns a configured Local struct with temporary paths and +// in-memory ContextOpts. +// +// No operations will be called on the returned value, so you can still set +// public fields without any locks. +func TestLocal(t *testing.T) (*Local, func()) { + t.Helper() + tempDir := testTempDir(t) + + local := New() + local.StatePath = filepath.Join(tempDir, "state.tfstate") + local.StateOutPath = filepath.Join(tempDir, "state.tfstate") + local.StateBackupPath = filepath.Join(tempDir, "state.tfstate.bak") + local.StateWorkspaceDir = filepath.Join(tempDir, "state.tfstate.d") + local.ContextOpts = &terraform.ContextOpts{} + + local.ShowDiagnostics = func(vals ...interface{}) { + var diags tfdiags.Diagnostics + diags = diags.Append(vals...) + for _, diag := range diags { + // NOTE: Since the caller here is not directly the TestLocal + // function, t.Helper doesn't apply and so the log source + // isn't correctly shown in the test log output. This seems + // unavoidable as long as this is happening so indirectly. + desc := diag.Description() + if desc.Detail != "" { + t.Logf("%s: %s", desc.Summary, desc.Detail) + } else { + t.Log(desc.Summary) + } + if local.CLI != nil { + local.CLI.Error(desc.Summary) + } + } + } + + cleanup := func() { + if err := os.RemoveAll(tempDir); err != nil { + t.Fatal("error cleanup up test:", err) + } + } + + return local, cleanup +} + +// TestLocalProvider modifies the ContextOpts of the *Local parameter to +// have a provider with the given name. +func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.ProviderSchema) *terraform.MockProvider { + // Build a mock resource provider for in-memory operations + p := new(terraform.MockProvider) + + if schema == nil { + schema = &terraform.ProviderSchema{} // default schema is empty + } + p.GetSchemaReturn = schema + + p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + rSchema, _ := schema.SchemaForResourceType(addrs.ManagedResourceMode, req.TypeName) + if rSchema == nil { + rSchema = &configschema.Block{} // default schema is empty + } + plannedVals := map[string]cty.Value{} + for name, attrS := range rSchema.Attributes { + val := req.ProposedNewState.GetAttr(name) + if attrS.Computed && val.IsNull() { + val = cty.UnknownVal(attrS.Type) + } + plannedVals[name] = val + } + for name := range rSchema.BlockTypes { + // For simplicity's sake we just copy the block attributes over + // verbatim, since this package's mock providers are all relatively + // simple -- we're testing the backend, not esoteric provider features. + plannedVals[name] = req.ProposedNewState.GetAttr(name) + } + + return providers.PlanResourceChangeResponse{ + PlannedState: cty.ObjectVal(plannedVals), + PlannedPrivate: req.PriorPrivate, + } + } + p.ReadResourceFn = func(req providers.ReadResourceRequest) providers.ReadResourceResponse { + return providers.ReadResourceResponse{NewState: req.PriorState} + } + p.ReadDataSourceFn = func(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + return providers.ReadDataSourceResponse{State: req.Config} + } + + // Initialize the opts + if b.ContextOpts == nil { + b.ContextOpts = &terraform.ContextOpts{} + } + + // Setup our provider + b.ContextOpts.ProviderResolver = providers.ResolverFixed( + map[string]providers.Factory{ + name: providers.FactoryFixed(p), + }, + ) + + return p + +} + +// TestLocalSingleState is a backend implementation that wraps Local +// and modifies it to only support single states (returns +// ErrWorkspacesNotSupported for multi-state operations). +// +// This isn't an actual use case, this is exported just to provide a +// easy way to test that behavior. +type TestLocalSingleState struct { + *Local +} + +// TestNewLocalSingle is a factory for creating a TestLocalSingleState. +// This function matches the signature required for backend/init. +func TestNewLocalSingle() backend.Backend { + return &TestLocalSingleState{Local: New()} +} + +func (b *TestLocalSingleState) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *TestLocalSingleState) DeleteWorkspace(string) error { + return backend.ErrWorkspacesNotSupported +} + +func (b *TestLocalSingleState) StateMgr(name string) (statemgr.Full, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + return b.Local.StateMgr(name) +} + +// TestLocalNoDefaultState is a backend implementation that wraps +// Local and modifies it to support named states, but not the +// default state. It returns ErrDefaultWorkspaceNotSupported when +// the DefaultStateName is used. +type TestLocalNoDefaultState struct { + *Local +} + +// TestNewLocalNoDefault is a factory for creating a TestLocalNoDefaultState. +// This function matches the signature required for backend/init. +func TestNewLocalNoDefault() backend.Backend { + return &TestLocalNoDefaultState{Local: New()} +} + +func (b *TestLocalNoDefaultState) Workspaces() ([]string, error) { + workspaces, err := b.Local.Workspaces() + if err != nil { + return nil, err + } + + filtered := workspaces[:0] + for _, name := range workspaces { + if name != backend.DefaultStateName { + filtered = append(filtered, name) + } + } + + return filtered, nil +} + +func (b *TestLocalNoDefaultState) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + return b.Local.DeleteWorkspace(name) +} + +func (b *TestLocalNoDefaultState) StateMgr(name string) (statemgr.Full, error) { + if name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + return b.Local.StateMgr(name) +} + +func testTempDir(t *testing.T) string { + d, err := ioutil.TempDir("", "tf") + if err != nil { + t.Fatalf("err: %s", err) + } + + return d +} + +func testStateFile(t *testing.T, path string, s *states.State) { + stateFile := statemgr.NewFilesystem(path) + stateFile.WriteState(s) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/nil.go b/vendor/github.com/hashicorp/terraform/backend/nil.go new file mode 100644 index 00000000..8c46f49d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/nil.go @@ -0,0 +1,40 @@ +package backend + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Nil is a no-op implementation of Backend. +// +// This is useful to embed within another struct to implement all of the +// backend interface for testing. +type Nil struct{} + +func (Nil) ConfigSchema() *configschema.Block { + return &configschema.Block{} +} + +func (Nil) PrepareConfig(v cty.Value) (cty.Value, tfdiags.Diagnostics) { + return v, nil +} + +func (Nil) Configure(cty.Value) tfdiags.Diagnostics { + return nil +} + +func (Nil) StateMgr(string) (statemgr.Full, error) { + // We must return a non-nil manager to adhere to the interface, so + // we'll return an in-memory-only one. + return statemgr.NewFullFake(statemgr.NewTransientInMemory(nil), nil), nil +} + +func (Nil) DeleteWorkspace(string) error { + return nil +} + +func (Nil) Workspaces() ([]string, error) { + return []string{DefaultStateName}, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/operation_type.go b/vendor/github.com/hashicorp/terraform/backend/operation_type.go new file mode 100644 index 00000000..1739dc7f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/operation_type.go @@ -0,0 +1,14 @@ +package backend + +//go:generate stringer -type=OperationType operation_type.go + +// OperationType is an enum used with Operation to specify the operation +// type to perform for Terraform. +type OperationType uint + +const ( + OperationTypeInvalid OperationType = iota + OperationTypeRefresh + OperationTypePlan + OperationTypeApply +) diff --git a/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go b/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go new file mode 100644 index 00000000..fe84d848 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/operationtype_string.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -type=OperationType operation_type.go"; DO NOT EDIT. + +package backend + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[OperationTypeInvalid-0] + _ = x[OperationTypeRefresh-1] + _ = x[OperationTypePlan-2] + _ = x[OperationTypeApply-3] +} + +const _OperationType_name = "OperationTypeInvalidOperationTypeRefreshOperationTypePlanOperationTypeApply" + +var _OperationType_index = [...]uint8{0, 20, 40, 57, 75} + +func (i OperationType) String() string { + if i >= OperationType(len(_OperationType_index)-1) { + return "OperationType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _OperationType_name[_OperationType_index[i]:_OperationType_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/artifactory/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/artifactory/backend.go new file mode 100644 index 00000000..d085f21b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/artifactory/backend.go @@ -0,0 +1,100 @@ +package artifactory + +import ( + "context" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + artifactory "github.com/lusis/go-artifactory/src/artifactory.v401" +) + +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("ARTIFACTORY_USERNAME", nil), + Description: "Username", + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("ARTIFACTORY_PASSWORD", nil), + Description: "Password", + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("ARTIFACTORY_URL", nil), + Description: "Artfactory base URL", + }, + "repo": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The repository name", + }, + "subpath": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Path within the repository", + }, + }, + } + + b := &Backend{Backend: s} + b.Backend.ConfigureFunc = b.configure + return b +} + +type Backend struct { + *schema.Backend + + client *ArtifactoryClient +} + +func (b *Backend) configure(ctx context.Context) error { + data := schema.FromContextBackendConfig(ctx) + + userName := data.Get("username").(string) + password := data.Get("password").(string) + url := data.Get("url").(string) + repo := data.Get("repo").(string) + subpath := data.Get("subpath").(string) + + clientConf := &artifactory.ClientConfig{ + BaseURL: url, + Username: userName, + Password: password, + } + nativeClient := artifactory.NewClient(clientConf) + + b.client = &ArtifactoryClient{ + nativeClient: &nativeClient, + userName: userName, + password: password, + url: url, + repo: repo, + subpath: subpath, + } + return nil +} + +func (b *Backend) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *Backend) DeleteWorkspace(string) error { + return backend.ErrWorkspacesNotSupported +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + return &remote.State{ + Client: b.client, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/artifactory/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/artifactory/client.go new file mode 100644 index 00000000..030c54b7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/artifactory/client.go @@ -0,0 +1,63 @@ +package artifactory + +import ( + "crypto/md5" + "fmt" + "strings" + + "github.com/hashicorp/terraform/state/remote" + artifactory "github.com/lusis/go-artifactory/src/artifactory.v401" +) + +const ARTIF_TFSTATE_NAME = "terraform.tfstate" + +type ArtifactoryClient struct { + nativeClient *artifactory.ArtifactoryClient + userName string + password string + url string + repo string + subpath string +} + +func (c *ArtifactoryClient) Get() (*remote.Payload, error) { + p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) + output, err := c.nativeClient.Get(p, make(map[string]string)) + if err != nil { + if strings.Contains(err.Error(), "404") { + return nil, nil + } + return nil, err + } + + // TODO: migrate to using X-Checksum-Md5 header from artifactory + // needs to be exposed by go-artifactory first + + hash := md5.Sum(output) + payload := &remote.Payload{ + Data: output, + MD5: hash[:md5.Size], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *ArtifactoryClient) Put(data []byte) error { + p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) + if _, err := c.nativeClient.Put(p, string(data), make(map[string]string)); err == nil { + return nil + } else { + return fmt.Errorf("Failed to upload state: %v", err) + } +} + +func (c *ArtifactoryClient) Delete() error { + p := fmt.Sprintf("%s/%s/%s", c.repo, c.subpath, ARTIF_TFSTATE_NAME) + err := c.nativeClient.Delete(p) + return err +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/arm_client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/arm_client.go new file mode 100644 index 00000000..bcc511a4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/arm_client.go @@ -0,0 +1,171 @@ +package azure + +import ( + "context" + "fmt" + "log" + "net/url" + "os" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources" + armStorage "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage" + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/hashicorp/go-azure-helpers/authentication" + "github.com/hashicorp/terraform/httpclient" +) + +type ArmClient struct { + // These Clients are only initialized if an Access Key isn't provided + groupsClient *resources.GroupsClient + storageAccountsClient *armStorage.AccountsClient + + accessKey string + environment azure.Environment + resourceGroupName string + storageAccountName string + sasToken string +} + +func buildArmClient(config BackendConfig) (*ArmClient, error) { + env, err := buildArmEnvironment(config) + if err != nil { + return nil, err + } + + client := ArmClient{ + environment: *env, + resourceGroupName: config.ResourceGroupName, + storageAccountName: config.StorageAccountName, + } + + // if we have an Access Key - we don't need the other clients + if config.AccessKey != "" { + client.accessKey = config.AccessKey + return &client, nil + } + + // likewise with a SAS token + if config.SasToken != "" { + client.sasToken = config.SasToken + return &client, nil + } + + builder := authentication.Builder{ + ClientID: config.ClientID, + ClientSecret: config.ClientSecret, + SubscriptionID: config.SubscriptionID, + TenantID: config.TenantID, + CustomResourceManagerEndpoint: config.CustomResourceManagerEndpoint, + Environment: config.Environment, + MsiEndpoint: config.MsiEndpoint, + + // Feature Toggles + SupportsAzureCliToken: true, + SupportsClientSecretAuth: true, + SupportsManagedServiceIdentity: config.UseMsi, + // TODO: support for Client Certificate auth + } + armConfig, err := builder.Build() + if err != nil { + return nil, fmt.Errorf("Error building ARM Config: %+v", err) + } + + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, armConfig.TenantID) + if err != nil { + return nil, err + } + + auth, err := armConfig.GetAuthorizationToken(oauthConfig, env.TokenAudience) + if err != nil { + return nil, err + } + + accountsClient := armStorage.NewAccountsClientWithBaseURI(env.ResourceManagerEndpoint, armConfig.SubscriptionID) + client.configureClient(&accountsClient.Client, auth) + client.storageAccountsClient = &accountsClient + + groupsClient := resources.NewGroupsClientWithBaseURI(env.ResourceManagerEndpoint, armConfig.SubscriptionID) + client.configureClient(&groupsClient.Client, auth) + client.groupsClient = &groupsClient + + return &client, nil +} + +func buildArmEnvironment(config BackendConfig) (*azure.Environment, error) { + if config.CustomResourceManagerEndpoint != "" { + log.Printf("[DEBUG] Loading Environment from Endpoint %q", config.CustomResourceManagerEndpoint) + return authentication.LoadEnvironmentFromUrl(config.CustomResourceManagerEndpoint) + } + + log.Printf("[DEBUG] Loading Environment %q", config.Environment) + return authentication.DetermineEnvironment(config.Environment) +} + +func (c ArmClient) getBlobClient(ctx context.Context) (*storage.BlobStorageClient, error) { + if c.accessKey != "" { + log.Printf("[DEBUG] Building the Blob Client from an Access Token") + storageClient, err := storage.NewBasicClientOnSovereignCloud(c.storageAccountName, c.accessKey, c.environment) + if err != nil { + return nil, fmt.Errorf("Error creating storage client for storage account %q: %s", c.storageAccountName, err) + } + client := storageClient.GetBlobService() + return &client, nil + } + + if c.sasToken != "" { + log.Printf("[DEBUG] Building the Blob Client from a SAS Token") + token := strings.TrimPrefix(c.sasToken, "?") + uri, err := url.ParseQuery(token) + if err != nil { + return nil, fmt.Errorf("Error parsing SAS Token: %+v", err) + } + + storageClient := storage.NewAccountSASClient(c.storageAccountName, uri, c.environment) + client := storageClient.GetBlobService() + return &client, nil + } + + log.Printf("[DEBUG] Building the Blob Client from an Access Token (using user credentials)") + keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName) + if err != nil { + return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %s", c.storageAccountName, err) + } + + if keys.Keys == nil { + return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName) + } + + accessKeys := *keys.Keys + accessKey := accessKeys[0].Value + + storageClient, err := storage.NewBasicClientOnSovereignCloud(c.storageAccountName, *accessKey, c.environment) + if err != nil { + return nil, fmt.Errorf("Error creating storage client for storage account %q: %s", c.storageAccountName, err) + } + client := storageClient.GetBlobService() + return &client, nil +} + +func (c *ArmClient) configureClient(client *autorest.Client, auth autorest.Authorizer) { + client.UserAgent = buildUserAgent() + client.Authorizer = auth + client.Sender = buildSender() + client.SkipResourceProviderRegistration = false + client.PollingDuration = 60 * time.Minute +} + +func buildUserAgent() string { + userAgent := httpclient.UserAgentString() + + // append the CloudShell version to the user agent if it exists + if azureAgent := os.Getenv("AZURE_HTTP_USER_AGENT"); azureAgent != "" { + userAgent = fmt.Sprintf("%s %s", userAgent, azureAgent) + } + + return userAgent +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go new file mode 100644 index 00000000..a2135c44 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend.go @@ -0,0 +1,224 @@ +package azure + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" +) + +// New creates a new backend for Azure remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "storage_account_name": { + Type: schema.TypeString, + Required: true, + Description: "The name of the storage account.", + }, + + "container_name": { + Type: schema.TypeString, + Required: true, + Description: "The container name.", + }, + + "key": { + Type: schema.TypeString, + Required: true, + Description: "The blob key.", + }, + + "environment": { + Type: schema.TypeString, + Optional: true, + Description: "The Azure cloud environment.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ENVIRONMENT", "public"), + }, + + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "The access key.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ACCESS_KEY", ""), + }, + + "sas_token": { + Type: schema.TypeString, + Optional: true, + Description: "A SAS Token used to interact with the Blob Storage Account.", + DefaultFunc: schema.EnvDefaultFunc("ARM_SAS_TOKEN", ""), + }, + + "resource_group_name": { + Type: schema.TypeString, + Optional: true, + Description: "The resource group name.", + }, + + "client_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Client ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""), + }, + + "client_secret": { + Type: schema.TypeString, + Optional: true, + Description: "The Client Secret.", + DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""), + }, + + "subscription_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Subscription ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_SUBSCRIPTION_ID", ""), + }, + + "tenant_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Tenant ID.", + DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""), + }, + + "use_msi": { + Type: schema.TypeBool, + Optional: true, + Description: "Should Managed Service Identity be used?.", + DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false), + }, + + "msi_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "The Managed Service Identity Endpoint.", + DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""), + }, + + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom Endpoint used to access the Azure Resource Manager API's.", + DefaultFunc: schema.EnvDefaultFunc("ARM_ENDPOINT", ""), + }, + + // Deprecated fields + "arm_client_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Client ID.", + Deprecated: "`arm_client_id` has been replaced by `client_id`", + }, + + "arm_client_secret": { + Type: schema.TypeString, + Optional: true, + Description: "The Client Secret.", + Deprecated: "`arm_client_secret` has been replaced by `client_secret`", + }, + + "arm_subscription_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Subscription ID.", + Deprecated: "`arm_subscription_id` has been replaced by `subscription_id`", + }, + + "arm_tenant_id": { + Type: schema.TypeString, + Optional: true, + Description: "The Tenant ID.", + Deprecated: "`arm_tenant_id` has been replaced by `tenant_id`", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + armClient *ArmClient + containerName string + keyName string +} + +type BackendConfig struct { + // Required + StorageAccountName string + + // Optional + AccessKey string + ClientID string + ClientSecret string + CustomResourceManagerEndpoint string + Environment string + MsiEndpoint string + ResourceGroupName string + SasToken string + SubscriptionID string + TenantID string + UseMsi bool +} + +func (b *Backend) configure(ctx context.Context) error { + if b.containerName != "" { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + b.containerName = data.Get("container_name").(string) + b.keyName = data.Get("key").(string) + + // support for previously deprecated fields + clientId := valueFromDeprecatedField(data, "client_id", "arm_client_id") + clientSecret := valueFromDeprecatedField(data, "client_secret", "arm_client_secret") + subscriptionId := valueFromDeprecatedField(data, "subscription_id", "arm_subscription_id") + tenantId := valueFromDeprecatedField(data, "tenant_id", "arm_tenant_id") + + config := BackendConfig{ + AccessKey: data.Get("access_key").(string), + ClientID: clientId, + ClientSecret: clientSecret, + CustomResourceManagerEndpoint: data.Get("endpoint").(string), + Environment: data.Get("environment").(string), + MsiEndpoint: data.Get("msi_endpoint").(string), + ResourceGroupName: data.Get("resource_group_name").(string), + SasToken: data.Get("sas_token").(string), + StorageAccountName: data.Get("storage_account_name").(string), + SubscriptionID: subscriptionId, + TenantID: tenantId, + UseMsi: data.Get("use_msi").(bool), + } + + armClient, err := buildArmClient(config) + if err != nil { + return err + } + + if config.AccessKey == "" && config.SasToken == "" && config.ResourceGroupName == "" { + return fmt.Errorf("Either an Access Key / SAS Token or the Resource Group for the Storage Account must be specified") + } + + b.armClient = armClient + return nil +} + +func valueFromDeprecatedField(d *schema.ResourceData, key, deprecatedFieldKey string) string { + v := d.Get(key).(string) + + if v == "" { + v = d.Get(deprecatedFieldKey).(string) + } + + return v +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go new file mode 100644 index 00000000..cd0054ed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/backend_state.go @@ -0,0 +1,159 @@ +package azure + +import ( + "context" + "fmt" + "sort" + "strings" + + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" +) + +const ( + // This will be used as directory name, the odd looking colon is simply to + // reduce the chance of name conflicts with existing objects. + keyEnvPrefix = "env:" +) + +func (b *Backend) Workspaces() ([]string, error) { + prefix := b.keyName + keyEnvPrefix + params := storage.ListBlobsParameters{ + Prefix: prefix, + } + + ctx := context.TODO() + client, err := b.armClient.getBlobClient(ctx) + if err != nil { + return nil, err + } + container := client.GetContainerReference(b.containerName) + resp, err := container.ListBlobs(params) + if err != nil { + return nil, err + } + + envs := map[string]struct{}{} + for _, obj := range resp.Blobs { + key := obj.Name + if strings.HasPrefix(key, prefix) { + name := strings.TrimPrefix(key, prefix) + // we store the state in a key, not a directory + if strings.Contains(name, "/") { + continue + } + + envs[name] = struct{}{} + } + } + + result := []string{backend.DefaultStateName} + for name := range envs { + result = append(result, name) + } + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + ctx := context.TODO() + client, err := b.armClient.getBlobClient(ctx) + if err != nil { + return err + } + + containerReference := client.GetContainerReference(b.containerName) + blobReference := containerReference.GetBlobReference(b.path(name)) + options := &storage.DeleteBlobOptions{} + + return blobReference.Delete(options) +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + ctx := context.TODO() + blobClient, err := b.armClient.getBlobClient(ctx) + if err != nil { + return nil, err + } + + client := &RemoteClient{ + blobClient: *blobClient, + containerName: b.containerName, + keyName: b.path(name), + } + + stateMgr := &remote.State{Client: client} + + //if this isn't the default state name, we need to create the object so + //it's listed by States. + if name != backend.DefaultStateName { + // take a lock on this state while we write it + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock azure state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return b.keyName + keyEnvPrefix + name +} + +const errStateUnlock = ` +Error unlocking Azure state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go new file mode 100644 index 00000000..7ccc9a94 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/client.go @@ -0,0 +1,272 @@ +package azure + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "log" + + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" +) + +const ( + leaseHeader = "x-ms-lease-id" + // Must be lower case + lockInfoMetaKey = "terraformlockid" +) + +type RemoteClient struct { + blobClient storage.BlobStorageClient + containerName string + keyName string + leaseID string +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + options := &storage.GetBlobOptions{} + + if c.leaseID != "" { + options.LeaseID = c.leaseID + } + + blob, err := blobReference.Get(options) + if err != nil { + if storErr, ok := err.(storage.AzureStorageServiceError); ok { + if storErr.Code == "BlobNotFound" { + return nil, nil + } + } + return nil, err + } + + defer blob.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, blob); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + getOptions := &storage.GetBlobMetadataOptions{} + setOptions := &storage.SetBlobPropertiesOptions{} + putOptions := &storage.PutBlobOptions{} + + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + + blobReference.Properties.ContentType = "application/json" + blobReference.Properties.ContentLength = int64(len(data)) + + if c.leaseID != "" { + getOptions.LeaseID = c.leaseID + setOptions.LeaseID = c.leaseID + putOptions.LeaseID = c.leaseID + } + + exists, err := blobReference.Exists() + if err != nil { + return err + } + + if exists { + err = blobReference.GetMetadata(getOptions) + if err != nil { + return err + } + } + + reader := bytes.NewReader(data) + + err = blobReference.CreateBlockBlobFromReader(reader, putOptions) + if err != nil { + return err + } + + return blobReference.SetProperties(setOptions) +} + +func (c *RemoteClient) Delete() error { + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + options := &storage.DeleteBlobOptions{} + + if c.leaseID != "" { + options.LeaseID = c.leaseID + } + + return blobReference.Delete(options) +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + stateName := fmt.Sprintf("%s/%s", c.containerName, c.keyName) + info.Path = stateName + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + getLockInfoErr := func(err error) error { + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + return &state.LockError{ + Err: err, + Info: lockInfo, + } + } + + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + leaseID, err := blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{}) + if err != nil { + if storErr, ok := err.(storage.AzureStorageServiceError); ok && storErr.Code != "BlobNotFound" { + return "", getLockInfoErr(err) + } + + // failed to lock as there was no state blob, write empty state + stateMgr := &remote.State{Client: c} + + // ensure state is actually empty + if err := stateMgr.RefreshState(); err != nil { + return "", fmt.Errorf("Failed to refresh state before writing empty state for locking: %s", err) + } + + log.Print("[DEBUG] Could not lock as state blob did not exist, creating with empty state") + + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + return "", fmt.Errorf("Failed to write empty state for locking: %s", err) + } + if err := stateMgr.PersistState(); err != nil { + return "", fmt.Errorf("Failed to persist empty state for locking: %s", err) + } + } + + leaseID, err = blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{}) + if err != nil { + return "", getLockInfoErr(err) + } + } + + info.ID = leaseID + c.leaseID = leaseID + + if err := c.writeLockInfo(info); err != nil { + return "", err + } + + return info.ID, nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{}) + if err != nil { + return nil, err + } + + raw := blobReference.Metadata[lockInfoMetaKey] + if raw == "" { + return nil, fmt.Errorf("blob metadata %q was empty", lockInfoMetaKey) + } + + data, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, err + } + + lockInfo := &state.LockInfo{} + err = json.Unmarshal(data, lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +// writes info to blob meta data, deletes metadata entry if info is nil +func (c *RemoteClient) writeLockInfo(info *state.LockInfo) error { + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{ + LeaseID: c.leaseID, + }) + if err != nil { + return err + } + + if info == nil { + delete(blobReference.Metadata, lockInfoMetaKey) + } else { + value := base64.StdEncoding.EncodeToString(info.Marshal()) + blobReference.Metadata[lockInfoMetaKey] = value + } + + opts := &storage.SetBlobMetadataOptions{ + LeaseID: c.leaseID, + } + return blobReference.SetMetadata(opts) +} + +func (c *RemoteClient) Unlock(id string) error { + lockErr := &state.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + c.leaseID = lockInfo.ID + if err := c.writeLockInfo(nil); err != nil { + lockErr.Err = fmt.Errorf("failed to delete lock info from metadata: %s", err) + return lockErr + } + + containerReference := c.blobClient.GetContainerReference(c.containerName) + blobReference := containerReference.GetBlobReference(c.keyName) + err = blobReference.ReleaseLease(id, &storage.LeaseOptions{}) + if err != nil { + lockErr.Err = err + return lockErr + } + + c.leaseID = "" + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/sender.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/sender.go new file mode 100644 index 00000000..90a2fb5b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/azure/sender.go @@ -0,0 +1,64 @@ +package azure + +import ( + "log" + "net/http" + "net/http/httputil" + + "github.com/Azure/go-autorest/autorest" + "github.com/hashicorp/terraform/helper/logging" +) + +func buildSender() autorest.Sender { + return autorest.DecorateSender(&http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + }, withRequestLogging()) +} + +func withRequestLogging() autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + // only log if logging's enabled + logLevel := logging.LogLevel() + if logLevel == "" { + return s.Do(r) + } + + // strip the authorization header prior to printing + authHeaderName := "Authorization" + auth := r.Header.Get(authHeaderName) + if auth != "" { + r.Header.Del(authHeaderName) + } + + // dump request to wire format + if dump, err := httputil.DumpRequestOut(r, true); err == nil { + log.Printf("[DEBUG] Azure Backend Request: \n%s\n", dump) + } else { + // fallback to basic message + log.Printf("[DEBUG] Azure Backend Request: %s to %s\n", r.Method, r.URL) + } + + // add the auth header back + if auth != "" { + r.Header.Add(authHeaderName, auth) + } + + resp, err := s.Do(r) + if resp != nil { + // dump response to wire format + if dump, err2 := httputil.DumpResponse(resp, true); err2 == nil { + log.Printf("[DEBUG] Azure Backend Response for %s: \n%s\n", r.URL, dump) + } else { + // fallback to basic message + log.Printf("[DEBUG] Azure Backend Response: %s for %s\n", resp.Status, r.URL) + } + } else { + log.Printf("[DEBUG] Request to %s completed with no response", r.URL) + } + return resp, err + }) + } +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go new file mode 100644 index 00000000..271a60b6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend.go @@ -0,0 +1,180 @@ +package consul + +import ( + "context" + "net" + "strings" + "time" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" +) + +// New creates a new backend for Consul remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Path to store state in Consul", + }, + + "access_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Access token for a Consul ACL", + Default: "", // To prevent input + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Address to the Consul Cluster", + Default: "", // To prevent input + }, + + "scheme": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Scheme to communicate to Consul with", + Default: "", // To prevent input + }, + + "datacenter": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Datacenter to communicate with", + Default: "", // To prevent input + }, + + "http_auth": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "HTTP Auth in the format of 'username:password'", + Default: "", // To prevent input + }, + + "gzip": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Compress the state data using gzip", + Default: false, + }, + + "lock": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Lock state access", + Default: true, + }, + + "ca_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CACERT", ""), + }, + + "cert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded certificate provided to the remote agent; requires use of key_file.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_CERT", ""), + }, + + "key_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "A path to a PEM-encoded private key, required if cert_file is specified.", + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CLIENT_KEY", ""), + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + client *consulapi.Client + configData *schema.ResourceData + lock bool +} + +func (b *Backend) configure(ctx context.Context) error { + // Grab the resource data + b.configData = schema.FromContextBackendConfig(ctx) + + // Store the lock information + b.lock = b.configData.Get("lock").(bool) + + data := b.configData + + // Configure the client + config := consulapi.DefaultConfig() + + // replace the default Transport Dialer to reduce the KeepAlive + config.Transport.DialContext = dialContext + + if v, ok := data.GetOk("access_token"); ok && v.(string) != "" { + config.Token = v.(string) + } + if v, ok := data.GetOk("address"); ok && v.(string) != "" { + config.Address = v.(string) + } + if v, ok := data.GetOk("scheme"); ok && v.(string) != "" { + config.Scheme = v.(string) + } + if v, ok := data.GetOk("datacenter"); ok && v.(string) != "" { + config.Datacenter = v.(string) + } + + if v, ok := data.GetOk("ca_file"); ok && v.(string) != "" { + config.TLSConfig.CAFile = v.(string) + } + if v, ok := data.GetOk("cert_file"); ok && v.(string) != "" { + config.TLSConfig.CertFile = v.(string) + } + if v, ok := data.GetOk("key_file"); ok && v.(string) != "" { + config.TLSConfig.KeyFile = v.(string) + } + + if v, ok := data.GetOk("http_auth"); ok && v.(string) != "" { + auth := v.(string) + + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &consulapi.HttpBasicAuth{ + Username: username, + Password: password, + } + } + + client, err := consulapi.NewClient(config) + if err != nil { + return err + } + + b.client = client + return nil +} + +// dialContext is the DialContext function for the consul client transport. +// This is stored in a package var to inject a different dialer for tests. +var dialContext = (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 17 * time.Second, +}).DialContext diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go new file mode 100644 index 00000000..bdcc9da9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/backend_state.go @@ -0,0 +1,155 @@ +package consul + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" +) + +const ( + keyEnvPrefix = "-env:" +) + +func (b *Backend) Workspaces() ([]string, error) { + // List our raw path + prefix := b.configData.Get("path").(string) + keyEnvPrefix + keys, _, err := b.client.KV().Keys(prefix, "/", nil) + if err != nil { + return nil, err + } + + // Find the envs, we use a map since we can get duplicates with + // path suffixes. + envs := map[string]struct{}{} + for _, key := range keys { + // Consul should ensure this but it doesn't hurt to check again + if strings.HasPrefix(key, prefix) { + key = strings.TrimPrefix(key, prefix) + + // Ignore anything with a "/" in it since we store the state + // directly in a key not a directory. + if idx := strings.IndexRune(key, '/'); idx >= 0 { + continue + } + + envs[key] = struct{}{} + } + } + + result := make([]string, 1, len(envs)+1) + result[0] = backend.DefaultStateName + for k, _ := range envs { + result = append(result, k) + } + + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + // Determine the path of the data + path := b.path(name) + + // Delete it. We just delete it without any locking since + // the DeleteState API is documented as such. + _, err := b.client.KV().Delete(path, nil) + return err +} + +func (b *Backend) StateMgr(name string) (statemgr.Full, error) { + // Determine the path of the data + path := b.path(name) + + // Determine whether to gzip or not + gzip := b.configData.Get("gzip").(bool) + + // Build the state client + var stateMgr = &remote.State{ + Client: &RemoteClient{ + Client: b.client, + Path: path, + GZip: gzip, + lockState: b.lock, + }, + } + + if !b.lock { + stateMgr.DisableLocks() + } + + // the default state always exists + if name == backend.DefaultStateName { + return stateMgr, nil + } + + // Grab a lock, we use this to write an empty state if one doesn't + // exist already. We have to write an empty state as a sentinel value + // so States() knows it exists. + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock state in Consul: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + return stateMgr, nil +} + +func (b *Backend) path(name string) string { + path := b.configData.Get("path").(string) + if name != backend.DefaultStateName { + path += fmt.Sprintf("%s%s", keyEnvPrefix, name) + } + + return path +} + +const errStateUnlock = ` +Error unlocking Consul state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +The Consul backend acquires a lock during initialization to ensure +the minimum required key/values are prepared. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go new file mode 100644 index 00000000..bd37712f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/consul/client.go @@ -0,0 +1,468 @@ +package consul + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/md5" + "encoding/json" + "errors" + "fmt" + "log" + "sync" + "time" + + consulapi "github.com/hashicorp/consul/api" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +const ( + lockSuffix = "/.lock" + lockInfoSuffix = "/.lockinfo" + + // The Session TTL associated with this lock. + lockSessionTTL = "15s" + + // the delay time from when a session is lost to when the + // lock is released by the server + lockDelay = 5 * time.Second + // interval between attempts to reacquire a lost lock + lockReacquireInterval = 2 * time.Second +) + +var lostLockErr = errors.New("consul lock was lost") + +// RemoteClient is a remote client that stores data in Consul. +type RemoteClient struct { + Client *consulapi.Client + Path string + GZip bool + + mu sync.Mutex + // lockState is true if we're using locks + lockState bool + + // The index of the last state we wrote. + // If this is > 0, Put will perform a CAS to ensure that the state wasn't + // changed during the operation. This is important even with locks, because + // if the client loses the lock for some reason, then reacquires it, we + // need to make sure that the state was not modified. + modifyIndex uint64 + + consulLock *consulapi.Lock + lockCh <-chan struct{} + + info *state.LockInfo + + // cancel our goroutine which is monitoring the lock to automatically + // reacquire it when possible. + monitorCancel context.CancelFunc + monitorWG sync.WaitGroup + + // sessionCancel cancels the Context use for session.RenewPeriodic, and is + // called when unlocking, or before creating a new lock if the lock is + // lost. + sessionCancel context.CancelFunc +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + c.mu.Lock() + defer c.mu.Unlock() + + pair, _, err := c.Client.KV().Get(c.Path, nil) + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + + c.modifyIndex = pair.ModifyIndex + + payload := pair.Value + // If the payload starts with 0x1f, it's gzip, not json + if len(pair.Value) >= 1 && pair.Value[0] == '\x1f' { + if data, err := uncompressState(pair.Value); err == nil { + payload = data + } else { + return nil, err + } + } + + md5 := md5.Sum(pair.Value) + return &remote.Payload{ + Data: payload, + MD5: md5[:], + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + c.mu.Lock() + defer c.mu.Unlock() + + payload := data + if c.GZip { + if compressedState, err := compressState(data); err == nil { + payload = compressedState + } else { + return err + } + } + + kv := c.Client.KV() + + // default to doing a CAS + verb := consulapi.KVCAS + + // Assume a 0 index doesn't need a CAS for now, since we are either + // creating a new state or purposely overwriting one. + if c.modifyIndex == 0 { + verb = consulapi.KVSet + } + + // KV.Put doesn't return the new index, so we use a single operation + // transaction to get the new index with a single request. + txOps := consulapi.KVTxnOps{ + &consulapi.KVTxnOp{ + Verb: verb, + Key: c.Path, + Value: payload, + Index: c.modifyIndex, + }, + } + + ok, resp, _, err := kv.Txn(txOps, nil) + if err != nil { + return err + } + + // transaction was rolled back + if !ok { + return fmt.Errorf("consul CAS failed with transaction errors: %v", resp.Errors) + } + + if len(resp.Results) != 1 { + // this probably shouldn't happen + return fmt.Errorf("expected on 1 response value, got: %d", len(resp.Results)) + } + + c.modifyIndex = resp.Results[0].ModifyIndex + return nil +} + +func (c *RemoteClient) Delete() error { + c.mu.Lock() + defer c.mu.Unlock() + + kv := c.Client.KV() + _, err := kv.Delete(c.Path, nil) + return err +} + +func (c *RemoteClient) putLockInfo(info *state.LockInfo) error { + info.Path = c.Path + info.Created = time.Now().UTC() + + kv := c.Client.KV() + _, err := kv.Put(&consulapi.KVPair{ + Key: c.Path + lockInfoSuffix, + Value: info.Marshal(), + }, nil) + + return err +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + path := c.Path + lockInfoSuffix + pair, _, err := c.Client.KV().Get(path, nil) + if err != nil { + return nil, err + } + if pair == nil { + return nil, nil + } + + li := &state.LockInfo{} + err = json.Unmarshal(pair.Value, li) + if err != nil { + return nil, fmt.Errorf("error unmarshaling lock info: %s", err) + } + + return li, nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return "", nil + } + + c.info = info + + // These checks only are to ensure we strictly follow the specification. + // Terraform shouldn't ever re-lock, so provide errors for the 2 possible + // states if this is called. + select { + case <-c.lockCh: + // We had a lock, but lost it. + return "", errors.New("lost consul lock, cannot re-lock") + default: + if c.lockCh != nil { + // we have an active lock already + return "", fmt.Errorf("state %q already locked", c.Path) + } + } + + return c.lock() +} + +// the lock implementation. +// Only to be called while holding Client.mu +func (c *RemoteClient) lock() (string, error) { + // We create a new session here, so it can be canceled when the lock is + // lost or unlocked. + lockSession, err := c.createSession() + if err != nil { + return "", err + } + + // store the session ID for correlation with consul logs + c.info.Info = "consul session: " + lockSession + + opts := &consulapi.LockOptions{ + Key: c.Path + lockSuffix, + Session: lockSession, + + // only wait briefly, so terraform has the choice to fail fast or + // retry as needed. + LockWaitTime: time.Second, + LockTryOnce: true, + + // Don't let the lock monitor give up right away, as it's possible the + // session is still OK. While the session is refreshed at a rate of + // TTL/2, the lock monitor is an idle blocking request and is more + // susceptible to being closed by a lower network layer. + MonitorRetries: 5, + // + // The delay between lock monitor retries. + // While the session has a 15s TTL plus a 5s wait period on a lost + // lock, if we can't get our lock back in 10+ seconds something is + // wrong so we're going to drop the session and start over. + MonitorRetryTime: 2 * time.Second, + } + + c.consulLock, err = c.Client.LockOpts(opts) + if err != nil { + return "", err + } + + lockErr := &state.LockError{} + + lockCh, err := c.consulLock.Lock(make(chan struct{})) + if err != nil { + lockErr.Err = err + return "", lockErr + } + + if lockCh == nil { + lockInfo, e := c.getLockInfo() + if e != nil { + lockErr.Err = e + return "", lockErr + } + + lockErr.Info = lockInfo + + return "", lockErr + } + + c.lockCh = lockCh + + err = c.putLockInfo(c.info) + if err != nil { + if unlockErr := c.unlock(c.info.ID); unlockErr != nil { + err = multierror.Append(err, unlockErr) + } + + return "", err + } + + // Start a goroutine to monitor the lock state. + // If we lose the lock to due communication issues with the consul agent, + // attempt to immediately reacquire the lock. Put will verify the integrity + // of the state by using a CAS operation. + ctx, cancel := context.WithCancel(context.Background()) + c.monitorCancel = cancel + c.monitorWG.Add(1) + go func() { + defer c.monitorWG.Done() + select { + case <-c.lockCh: + log.Println("[ERROR] lost consul lock") + for { + c.mu.Lock() + // We lost our lock, so we need to cancel the session too. + // The CancelFunc is only replaced while holding Client.mu, so + // this is safe to call here. This will be replaced by the + // lock() call below. + c.sessionCancel() + + c.consulLock = nil + _, err := c.lock() + c.mu.Unlock() + + if err != nil { + // We failed to get the lock, keep trying as long as + // terraform is running. There may be changes in progress, + // so there's no use in aborting. Either we eventually + // reacquire the lock, or a Put will fail on a CAS. + log.Printf("[ERROR] could not reacquire lock: %s", err) + time.Sleep(lockReacquireInterval) + + select { + case <-ctx.Done(): + return + default: + } + continue + } + + // if the error was nil, the new lock started a new copy of + // this goroutine. + return + } + + case <-ctx.Done(): + return + } + }() + + if testLockHook != nil { + testLockHook() + } + + return c.info.ID, nil +} + +// called after a lock is acquired +var testLockHook func() + +func (c *RemoteClient) createSession() (string, error) { + // create the context first. Even if the session creation fails, we assume + // that the CancelFunc is always callable. + ctx, cancel := context.WithCancel(context.Background()) + c.sessionCancel = cancel + + session := c.Client.Session() + se := &consulapi.SessionEntry{ + Name: consulapi.DefaultLockSessionName, + TTL: lockSessionTTL, + LockDelay: lockDelay, + } + + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + + log.Println("[INFO] created consul lock session", id) + + // keep the session renewed + go session.RenewPeriodic(lockSessionTTL, id, nil, ctx.Done()) + + return id, nil +} + +func (c *RemoteClient) Unlock(id string) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return nil + } + + return c.unlock(id) +} + +// the unlock implementation. +// Only to be called while holding Client.mu +func (c *RemoteClient) unlock(id string) error { + // this doesn't use the lock id, because the lock is tied to the consul client. + if c.consulLock == nil || c.lockCh == nil { + return nil + } + + // cancel our monitoring goroutine + c.monitorCancel() + + defer func() { + c.consulLock = nil + + // The consul session is only used for this single lock, so cancel it + // after we unlock. + // The session is only created and replaced holding Client.mu, so the + // CancelFunc must be non-nil. + c.sessionCancel() + }() + + select { + case <-c.lockCh: + return lostLockErr + default: + } + + kv := c.Client.KV() + + var errs error + + if _, err := kv.Delete(c.Path+lockInfoSuffix, nil); err != nil { + errs = multierror.Append(errs, err) + } + + if err := c.consulLock.Unlock(); err != nil { + errs = multierror.Append(errs, err) + } + + // the monitoring goroutine may be in a select on the lockCh, so we need to + // wait for it to return before changing the value. + c.monitorWG.Wait() + c.lockCh = nil + + // This is only cleanup, and will fail if the lock was immediately taken by + // another client, so we don't report an error to the user here. + c.consulLock.Destroy() + + return errs +} + +func compressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz := gzip.NewWriter(b) + if _, err := gz.Write(data); err != nil { + return nil, err + } + if err := gz.Flush(); err != nil { + return nil, err + } + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func uncompressState(data []byte) ([]byte, error) { + b := new(bytes.Buffer) + gz, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + return nil, err + } + b.ReadFrom(gz) + if err := gz.Close(); err != nil { + return nil, err + } + return b.Bytes(), nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv2/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv2/backend.go new file mode 100644 index 00000000..aa030563 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv2/backend.go @@ -0,0 +1,96 @@ +// legacy etcd2.x backend + +package etcdv2 + +import ( + "context" + "strings" + + etcdapi "github.com/coreos/etcd/client" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "path": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The path where to store the state", + }, + "endpoints": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "A space-separated list of the etcd endpoints", + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Username", + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Password", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + client etcdapi.Client + path string +} + +func (b *Backend) configure(ctx context.Context) error { + data := schema.FromContextBackendConfig(ctx) + + b.path = data.Get("path").(string) + + endpoints := data.Get("endpoints").(string) + username := data.Get("username").(string) + password := data.Get("password").(string) + + config := etcdapi.Config{ + Endpoints: strings.Split(endpoints, " "), + Username: username, + Password: password, + } + + client, err := etcdapi.New(config) + if err != nil { + return err + } + + b.client = client + return nil +} + +func (b *Backend) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *Backend) DeleteWorkspace(string) error { + return backend.ErrWorkspacesNotSupported +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + return &remote.State{ + Client: &EtcdClient{ + Client: b.client, + Path: b.path, + }, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv2/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv2/client.go new file mode 100644 index 00000000..faf891ac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv2/client.go @@ -0,0 +1,46 @@ +package etcdv2 + +import ( + "context" + "crypto/md5" + "fmt" + + etcdapi "github.com/coreos/etcd/client" + "github.com/hashicorp/terraform/state/remote" +) + +// EtcdClient is a remote client that stores data in etcd. +type EtcdClient struct { + Client etcdapi.Client + Path string +} + +func (c *EtcdClient) Get() (*remote.Payload, error) { + resp, err := etcdapi.NewKeysAPI(c.Client).Get(context.Background(), c.Path, &etcdapi.GetOptions{Quorum: true}) + if err != nil { + if err, ok := err.(etcdapi.Error); ok && err.Code == etcdapi.ErrorCodeKeyNotFound { + return nil, nil + } + return nil, err + } + if resp.Node.Dir { + return nil, fmt.Errorf("path is a directory") + } + + data := []byte(resp.Node.Value) + md5 := md5.Sum(data) + return &remote.Payload{ + Data: data, + MD5: md5[:], + }, nil +} + +func (c *EtcdClient) Put(data []byte) error { + _, err := etcdapi.NewKeysAPI(c.Client).Set(context.Background(), c.Path, string(data), nil) + return err +} + +func (c *EtcdClient) Delete() error { + _, err := etcdapi.NewKeysAPI(c.Client).Delete(context.Background(), c.Path, nil) + return err +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go new file mode 100644 index 00000000..fb3f5e20 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend.go @@ -0,0 +1,157 @@ +package etcd + +import ( + "context" + + etcdv3 "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/pkg/transport" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" +) + +const ( + endpointsKey = "endpoints" + usernameKey = "username" + usernameEnvVarName = "ETCDV3_USERNAME" + passwordKey = "password" + passwordEnvVarName = "ETCDV3_PASSWORD" + prefixKey = "prefix" + lockKey = "lock" + cacertPathKey = "cacert_path" + certPathKey = "cert_path" + keyPathKey = "key_path" +) + +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + endpointsKey: &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + MinItems: 1, + Required: true, + Description: "Endpoints for the etcd cluster.", + }, + + usernameKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Username used to connect to the etcd cluster.", + DefaultFunc: schema.EnvDefaultFunc(usernameEnvVarName, ""), + }, + + passwordKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Password used to connect to the etcd cluster.", + DefaultFunc: schema.EnvDefaultFunc(passwordEnvVarName, ""), + }, + + prefixKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "An optional prefix to be added to keys when to storing state in etcd.", + Default: "", + }, + + lockKey: &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Whether to lock state access.", + Default: true, + }, + + cacertPathKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The path to a PEM-encoded CA bundle with which to verify certificates of TLS-enabled etcd servers.", + Default: "", + }, + + certPathKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The path to a PEM-encoded certificate to provide to etcd for secure client identification.", + Default: "", + }, + + keyPathKey: &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The path to a PEM-encoded key to provide to etcd for secure client identification.", + Default: "", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure. + client *etcdv3.Client + data *schema.ResourceData + lock bool + prefix string +} + +func (b *Backend) configure(ctx context.Context) error { + var err error + // Grab the resource data. + b.data = schema.FromContextBackendConfig(ctx) + // Store the lock information. + b.lock = b.data.Get(lockKey).(bool) + // Store the prefix information. + b.prefix = b.data.Get(prefixKey).(string) + // Initialize a client to test config. + b.client, err = b.rawClient() + // Return err, if any. + return err +} + +func (b *Backend) rawClient() (*etcdv3.Client, error) { + config := etcdv3.Config{} + tlsInfo := transport.TLSInfo{} + + if v, ok := b.data.GetOk(endpointsKey); ok { + config.Endpoints = retrieveEndpoints(v) + } + if v, ok := b.data.GetOk(usernameKey); ok && v.(string) != "" { + config.Username = v.(string) + } + if v, ok := b.data.GetOk(passwordKey); ok && v.(string) != "" { + config.Password = v.(string) + } + if v, ok := b.data.GetOk(cacertPathKey); ok && v.(string) != "" { + tlsInfo.TrustedCAFile = v.(string) + } + if v, ok := b.data.GetOk(certPathKey); ok && v.(string) != "" { + tlsInfo.CertFile = v.(string) + } + if v, ok := b.data.GetOk(keyPathKey); ok && v.(string) != "" { + tlsInfo.KeyFile = v.(string) + } + + if tlsCfg, err := tlsInfo.ClientConfig(); err != nil { + return nil, err + } else if !tlsInfo.Empty() { + config.TLS = tlsCfg // Assign TLS configuration only if it valid and non-empty. + } + + return etcdv3.New(config) +} + +func retrieveEndpoints(v interface{}) []string { + var endpoints []string + list := v.([]interface{}) + for _, ep := range list { + endpoints = append(endpoints, ep.(string)) + } + return endpoints +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go new file mode 100644 index 00000000..44bf0c58 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/backend_state.go @@ -0,0 +1,104 @@ +package etcd + +import ( + "context" + "fmt" + "sort" + "strings" + + etcdv3 "github.com/coreos/etcd/clientv3" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" +) + +func (b *Backend) Workspaces() ([]string, error) { + res, err := b.client.Get(context.TODO(), b.prefix, etcdv3.WithPrefix(), etcdv3.WithKeysOnly()) + if err != nil { + return nil, err + } + + result := make([]string, 1, len(res.Kvs)+1) + result[0] = backend.DefaultStateName + for _, kv := range res.Kvs { + result = append(result, strings.TrimPrefix(string(kv.Key), b.prefix)) + } + sort.Strings(result[1:]) + + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("Can't delete default state.") + } + + key := b.determineKey(name) + + _, err := b.client.Delete(context.TODO(), key) + return err +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + var stateMgr state.State = &remote.State{ + Client: &RemoteClient{ + Client: b.client, + DoLock: b.lock, + Key: b.determineKey(name), + }, + } + + if !b.lock { + stateMgr = &state.LockDisabled{Inner: stateMgr} + } + + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("Failed to lock state in etcd: %s.", err) + } + + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + if err := lockUnlock(nil); err != nil { + return nil, err + } + + return stateMgr, nil +} + +func (b *Backend) determineKey(name string) string { + return b.prefix + name +} + +const errStateUnlock = ` +Error unlocking etcd state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go new file mode 100644 index 00000000..155e8d8c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/etcdv3/client.go @@ -0,0 +1,211 @@ +package etcd + +import ( + "context" + "crypto/md5" + "encoding/json" + "fmt" + "sync" + "time" + + etcdv3 "github.com/coreos/etcd/clientv3" + etcdv3sync "github.com/coreos/etcd/clientv3/concurrency" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +const ( + lockAcquireTimeout = 2 * time.Second + lockInfoSuffix = ".lockinfo" +) + +// RemoteClient is a remote client that will store data in etcd. +type RemoteClient struct { + Client *etcdv3.Client + DoLock bool + Key string + + etcdMutex *etcdv3sync.Mutex + etcdSession *etcdv3sync.Session + info *state.LockInfo + mu sync.Mutex + modRevision int64 +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + c.mu.Lock() + defer c.mu.Unlock() + + res, err := c.Client.KV.Get(context.TODO(), c.Key) + if err != nil { + return nil, err + } + if res.Count == 0 { + return nil, nil + } + if res.Count >= 2 { + return nil, fmt.Errorf("Expected a single result but got %d.", res.Count) + } + + c.modRevision = res.Kvs[0].ModRevision + + payload := res.Kvs[0].Value + md5 := md5.Sum(payload) + + return &remote.Payload{ + Data: payload, + MD5: md5[:], + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + c.mu.Lock() + defer c.mu.Unlock() + + res, err := etcdv3.NewKV(c.Client).Txn(context.TODO()).If( + etcdv3.Compare(etcdv3.ModRevision(c.Key), "=", c.modRevision), + ).Then( + etcdv3.OpPut(c.Key, string(data)), + etcdv3.OpGet(c.Key), + ).Commit() + + if err != nil { + return err + } + if !res.Succeeded { + return fmt.Errorf("The transaction did not succeed.") + } + if len(res.Responses) != 2 { + return fmt.Errorf("Expected two responses but got %d.", len(res.Responses)) + } + + c.modRevision = res.Responses[1].GetResponseRange().Kvs[0].ModRevision + return nil +} + +func (c *RemoteClient) Delete() error { + c.mu.Lock() + defer c.mu.Unlock() + + _, err := c.Client.KV.Delete(context.TODO(), c.Key) + return err +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.DoLock { + return "", nil + } + if c.etcdSession != nil { + return "", fmt.Errorf("state %q already locked", c.Key) + } + + c.info = info + return c.lock() +} + +func (c *RemoteClient) Unlock(id string) error { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.DoLock { + return nil + } + + return c.unlock(id) +} + +func (c *RemoteClient) deleteLockInfo(info *state.LockInfo) error { + res, err := c.Client.KV.Delete(context.TODO(), c.Key+lockInfoSuffix) + if err != nil { + return err + } + if res.Deleted == 0 { + return fmt.Errorf("No keys deleted for %s when deleting lock info.", c.Key+lockInfoSuffix) + } + return nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + res, err := c.Client.KV.Get(context.TODO(), c.Key+lockInfoSuffix) + if err != nil { + return nil, err + } + if res.Count == 0 { + return nil, nil + } + + li := &state.LockInfo{} + err = json.Unmarshal(res.Kvs[0].Value, li) + if err != nil { + return nil, fmt.Errorf("Error unmarshaling lock info: %s.", err) + } + + return li, nil +} + +func (c *RemoteClient) putLockInfo(info *state.LockInfo) error { + c.info.Path = c.etcdMutex.Key() + c.info.Created = time.Now().UTC() + + _, err := c.Client.KV.Put(context.TODO(), c.Key+lockInfoSuffix, string(c.info.Marshal())) + return err +} + +func (c *RemoteClient) lock() (string, error) { + session, err := etcdv3sync.NewSession(c.Client) + if err != nil { + return "", nil + } + + ctx, cancel := context.WithTimeout(context.TODO(), lockAcquireTimeout) + defer cancel() + + mutex := etcdv3sync.NewMutex(session, c.Key) + if err1 := mutex.Lock(ctx); err1 != nil { + lockInfo, err2 := c.getLockInfo() + if err2 != nil { + return "", &state.LockError{Err: err2} + } + return "", &state.LockError{Info: lockInfo, Err: err1} + } + + c.etcdMutex = mutex + c.etcdSession = session + + err = c.putLockInfo(c.info) + if err != nil { + if unlockErr := c.unlock(c.info.ID); unlockErr != nil { + err = multierror.Append(err, unlockErr) + } + return "", err + } + + return c.info.ID, nil +} + +func (c *RemoteClient) unlock(id string) error { + if c.etcdMutex == nil { + return nil + } + + var errs error + + if err := c.deleteLockInfo(c.info); err != nil { + errs = multierror.Append(errs, err) + } + if err := c.etcdMutex.Unlock(context.TODO()); err != nil { + errs = multierror.Append(errs, err) + } + if err := c.etcdSession.Close(); err != nil { + errs = multierror.Append(errs, err) + } + + c.etcdMutex = nil + c.etcdSession = nil + + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go new file mode 100644 index 00000000..f1e6b372 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend.go @@ -0,0 +1,190 @@ +// Package gcs implements remote storage of state on Google Cloud Storage (GCS). +package gcs + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "strings" + + "cloud.google.com/go/storage" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/pathorcontents" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/httpclient" + "golang.org/x/oauth2/jwt" + "google.golang.org/api/option" +) + +// Backend implements "backend".Backend for GCS. +// Input(), Validate() and Configure() are implemented by embedding *schema.Backend. +// State(), DeleteState() and States() are implemented explicitly. +type Backend struct { + *schema.Backend + + storageClient *storage.Client + storageContext context.Context + + bucketName string + prefix string + defaultStateFile string + + encryptionKey []byte +} + +func New() backend.Backend { + b := &Backend{} + b.Backend = &schema.Backend{ + ConfigureFunc: b.configure, + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the Google Cloud Storage bucket", + }, + + "path": { + Type: schema.TypeString, + Optional: true, + Description: "Path of the default state file", + Deprecated: "Use the \"prefix\" option instead", + }, + + "prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The directory where state files will be saved inside the bucket", + }, + + "credentials": { + Type: schema.TypeString, + Optional: true, + Description: "Google Cloud JSON Account Key", + Default: "", + }, + + "encryption_key": { + Type: schema.TypeString, + Optional: true, + Description: "A 32 byte base64 encoded 'customer supplied encryption key' used to encrypt all state.", + Default: "", + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Description: "Google Cloud Project ID", + Default: "", + Removed: "Please remove this attribute. It is not used since the backend no longer creates the bucket if it does not yet exist.", + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Description: "Region / location in which to create the bucket", + Default: "", + Removed: "Please remove this attribute. It is not used since the backend no longer creates the bucket if it does not yet exist.", + }, + }, + } + + return b +} + +func (b *Backend) configure(ctx context.Context) error { + if b.storageClient != nil { + return nil + } + + // ctx is a background context with the backend config added. + // Since no context is passed to remoteClient.Get(), .Lock(), etc. but + // one is required for calling the GCP API, we're holding on to this + // context here and re-use it later. + b.storageContext = ctx + + data := schema.FromContextBackendConfig(b.storageContext) + + b.bucketName = data.Get("bucket").(string) + b.prefix = strings.TrimLeft(data.Get("prefix").(string), "/") + if b.prefix != "" && !strings.HasSuffix(b.prefix, "/") { + b.prefix = b.prefix + "/" + } + + b.defaultStateFile = strings.TrimLeft(data.Get("path").(string), "/") + + var opts []option.ClientOption + + creds := data.Get("credentials").(string) + if creds == "" { + creds = os.Getenv("GOOGLE_CREDENTIALS") + } + + if creds != "" { + var account accountFile + + // to mirror how the provider works, we accept the file path or the contents + contents, _, err := pathorcontents.Read(creds) + if err != nil { + return fmt.Errorf("Error loading credentials: %s", err) + } + + if err := json.Unmarshal([]byte(contents), &account); err != nil { + return fmt.Errorf("Error parsing credentials '%s': %s", contents, err) + } + + conf := jwt.Config{ + Email: account.ClientEmail, + PrivateKey: []byte(account.PrivateKey), + Scopes: []string{storage.ScopeReadWrite}, + TokenURL: "https://accounts.google.com/o/oauth2/token", + } + + opts = append(opts, option.WithHTTPClient(conf.Client(ctx))) + } else { + opts = append(opts, option.WithScopes(storage.ScopeReadWrite)) + } + + opts = append(opts, option.WithUserAgent(httpclient.UserAgentString())) + client, err := storage.NewClient(b.storageContext, opts...) + if err != nil { + return fmt.Errorf("storage.NewClient() failed: %v", err) + } + + b.storageClient = client + + key := data.Get("encryption_key").(string) + if key == "" { + key = os.Getenv("GOOGLE_ENCRYPTION_KEY") + } + + if key != "" { + kc, _, err := pathorcontents.Read(key) + if err != nil { + return fmt.Errorf("Error loading encryption key: %s", err) + } + + // The GCS client expects a customer supplied encryption key to be + // passed in as a 32 byte long byte slice. The byte slice is base64 + // encoded before being passed to the API. We take a base64 encoded key + // to remain consistent with the GCS docs. + // https://cloud.google.com/storage/docs/encryption#customer-supplied + // https://github.com/GoogleCloudPlatform/google-cloud-go/blob/def681/storage/storage.go#L1181 + k, err := base64.StdEncoding.DecodeString(kc) + if err != nil { + return fmt.Errorf("Error decoding encryption key: %s", err) + } + b.encryptionKey = k + } + + return nil +} + +// accountFile represents the structure of the account file JSON file. +type accountFile struct { + PrivateKeyId string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + ClientEmail string `json:"client_email"` + ClientId string `json:"client_id"` +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go new file mode 100644 index 00000000..835ad96a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/backend_state.go @@ -0,0 +1,160 @@ +package gcs + +import ( + "fmt" + "path" + "sort" + "strings" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" +) + +const ( + stateFileSuffix = ".tfstate" + lockFileSuffix = ".tflock" +) + +// Workspaces returns a list of names for the workspaces found on GCS. The default +// state is always returned as the first element in the slice. +func (b *Backend) Workspaces() ([]string, error) { + states := []string{backend.DefaultStateName} + + bucket := b.storageClient.Bucket(b.bucketName) + objs := bucket.Objects(b.storageContext, &storage.Query{ + Delimiter: "/", + Prefix: b.prefix, + }) + for { + attrs, err := objs.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, fmt.Errorf("querying Cloud Storage failed: %v", err) + } + + name := path.Base(attrs.Name) + if !strings.HasSuffix(name, stateFileSuffix) { + continue + } + st := strings.TrimSuffix(name, stateFileSuffix) + + if st != backend.DefaultStateName { + states = append(states, st) + } + } + + sort.Strings(states[1:]) + return states, nil +} + +// DeleteWorkspace deletes the named workspaces. The "default" state cannot be deleted. +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName { + return fmt.Errorf("cowardly refusing to delete the %q state", name) + } + + c, err := b.client(name) + if err != nil { + return err + } + + return c.Delete() +} + +// client returns a remoteClient for the named state. +func (b *Backend) client(name string) (*remoteClient, error) { + if name == "" { + return nil, fmt.Errorf("%q is not a valid state name", name) + } + + return &remoteClient{ + storageContext: b.storageContext, + storageClient: b.storageClient, + bucketName: b.bucketName, + stateFilePath: b.stateFile(name), + lockFilePath: b.lockFile(name), + encryptionKey: b.encryptionKey, + }, nil +} + +// StateMgr reads and returns the named state from GCS. If the named state does +// not yet exist, a new state file is created. +func (b *Backend) StateMgr(name string) (state.State, error) { + c, err := b.client(name) + if err != nil { + return nil, err + } + + st := &remote.State{Client: c} + + // Grab the value + if err := st.RefreshState(); err != nil { + return nil, err + } + + // If we have no state, we have to create an empty state + if v := st.State(); v == nil { + + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := st.Lock(lockInfo) + if err != nil { + return nil, err + } + + // Local helper function so we can call it multiple places + unlock := func(baseErr error) error { + if err := st.Unlock(lockID); err != nil { + const unlockErrMsg = `%v + Additionally, unlocking the state file on Google Cloud Storage failed: + + Error message: %q + Lock ID (gen): %v + Lock file URL: %v + + You may have to force-unlock this state in order to use it again. + The GCloud backend acquires a lock during initialization to ensure + the initial state file is created.` + return fmt.Errorf(unlockErrMsg, baseErr, err.Error(), lockID, c.lockFileURL()) + } + + return baseErr + } + + if err := st.WriteState(states.NewState()); err != nil { + return nil, unlock(err) + } + if err := st.PersistState(); err != nil { + return nil, unlock(err) + } + + // Unlock, the state should now be initialized + if err := unlock(nil); err != nil { + return nil, err + } + + } + + return st, nil +} + +func (b *Backend) stateFile(name string) string { + if name == backend.DefaultStateName && b.defaultStateFile != "" { + return b.defaultStateFile + } + return path.Join(b.prefix, name+stateFileSuffix) +} + +func (b *Backend) lockFile(name string) string { + if name == backend.DefaultStateName && b.defaultStateFile != "" { + return strings.TrimSuffix(b.defaultStateFile, stateFileSuffix) + lockFileSuffix + } + return path.Join(b.prefix, name+lockFileSuffix) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/client.go new file mode 100644 index 00000000..5163ba0c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/gcs/client.go @@ -0,0 +1,186 @@ +package gcs + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "strconv" + + "cloud.google.com/go/storage" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "golang.org/x/net/context" +) + +// remoteClient is used by "state/remote".State to read and write +// blobs representing state. +// Implements "state/remote".ClientLocker +type remoteClient struct { + storageContext context.Context + storageClient *storage.Client + bucketName string + stateFilePath string + lockFilePath string + encryptionKey []byte +} + +func (c *remoteClient) Get() (payload *remote.Payload, err error) { + stateFileReader, err := c.stateFile().NewReader(c.storageContext) + if err != nil { + if err == storage.ErrObjectNotExist { + return nil, nil + } else { + return nil, fmt.Errorf("Failed to open state file at %v: %v", c.stateFileURL(), err) + } + } + defer stateFileReader.Close() + + stateFileContents, err := ioutil.ReadAll(stateFileReader) + if err != nil { + return nil, fmt.Errorf("Failed to read state file from %v: %v", c.stateFileURL(), err) + } + + stateFileAttrs, err := c.stateFile().Attrs(c.storageContext) + if err != nil { + return nil, fmt.Errorf("Failed to read state file attrs from %v: %v", c.stateFileURL(), err) + } + + result := &remote.Payload{ + Data: stateFileContents, + MD5: stateFileAttrs.MD5, + } + + return result, nil +} + +func (c *remoteClient) Put(data []byte) error { + err := func() error { + stateFileWriter := c.stateFile().NewWriter(c.storageContext) + if _, err := stateFileWriter.Write(data); err != nil { + return err + } + return stateFileWriter.Close() + }() + if err != nil { + return fmt.Errorf("Failed to upload state to %v: %v", c.stateFileURL(), err) + } + + return nil +} + +func (c *remoteClient) Delete() error { + if err := c.stateFile().Delete(c.storageContext); err != nil { + return fmt.Errorf("Failed to delete state file %v: %v", c.stateFileURL(), err) + } + + return nil +} + +// Lock writes to a lock file, ensuring file creation. Returns the generation +// number, which must be passed to Unlock(). +func (c *remoteClient) Lock(info *state.LockInfo) (string, error) { + // update the path we're using + // we can't set the ID until the info is written + info.Path = c.lockFileURL() + + infoJson, err := json.Marshal(info) + if err != nil { + return "", err + } + + lockFile := c.lockFile() + w := lockFile.If(storage.Conditions{DoesNotExist: true}).NewWriter(c.storageContext) + err = func() error { + if _, err := w.Write(infoJson); err != nil { + return err + } + return w.Close() + }() + + if err != nil { + return "", c.lockError(fmt.Errorf("writing %q failed: %v", c.lockFileURL(), err)) + } + + info.ID = strconv.FormatInt(w.Attrs().Generation, 10) + + return info.ID, nil +} + +func (c *remoteClient) Unlock(id string) error { + gen, err := strconv.ParseInt(id, 10, 64) + if err != nil { + return fmt.Errorf("Lock ID should be numerical value, got '%s'", id) + } + + if err := c.lockFile().If(storage.Conditions{GenerationMatch: gen}).Delete(c.storageContext); err != nil { + return c.lockError(err) + } + + return nil +} + +func (c *remoteClient) lockError(err error) *state.LockError { + lockErr := &state.LockError{ + Err: err, + } + + info, infoErr := c.lockInfo() + if infoErr != nil { + lockErr.Err = multierror.Append(lockErr.Err, infoErr) + } else { + lockErr.Info = info + } + return lockErr +} + +// lockInfo reads the lock file, parses its contents and returns the parsed +// LockInfo struct. +func (c *remoteClient) lockInfo() (*state.LockInfo, error) { + r, err := c.lockFile().NewReader(c.storageContext) + if err != nil { + return nil, err + } + defer r.Close() + + rawData, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + info := &state.LockInfo{} + if err := json.Unmarshal(rawData, info); err != nil { + return nil, err + } + + // We use the Generation as the ID, so overwrite the ID in the json. + // This can't be written into the Info, since the generation isn't known + // until it's written. + attrs, err := c.lockFile().Attrs(c.storageContext) + if err != nil { + return nil, err + } + info.ID = strconv.FormatInt(attrs.Generation, 10) + + return info, nil +} + +func (c *remoteClient) stateFile() *storage.ObjectHandle { + h := c.storageClient.Bucket(c.bucketName).Object(c.stateFilePath) + if len(c.encryptionKey) > 0 { + return h.Key(c.encryptionKey) + } + return h +} + +func (c *remoteClient) stateFileURL() string { + return fmt.Sprintf("gs://%v/%v", c.bucketName, c.stateFilePath) +} + +func (c *remoteClient) lockFile() *storage.ObjectHandle { + return c.storageClient.Bucket(c.bucketName).Object(c.lockFilePath) +} + +func (c *remoteClient) lockFileURL() string { + return fmt.Sprintf("gs://%v/%v", c.bucketName, c.lockFilePath) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/http/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/http/backend.go new file mode 100644 index 00000000..ea5d8772 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/http/backend.go @@ -0,0 +1,192 @@ +package http + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "net/url" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The address of the REST endpoint", + }, + "update_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "POST", + Description: "HTTP method to use when updating state", + }, + "lock_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The address of the lock REST endpoint", + }, + "unlock_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The address of the unlock REST endpoint", + }, + "lock_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "LOCK", + Description: "The HTTP method to use when locking", + }, + "unlock_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "UNLOCK", + Description: "The HTTP method to use when unlocking", + }, + "username": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The username for HTTP basic authentication", + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The password for HTTP basic authentication", + }, + "skip_cert_verification": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Whether to skip TLS verification.", + }, + "retry_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 2, + Description: "The number of HTTP request retries.", + }, + "retry_wait_min": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 1, + Description: "The minimum time in seconds to wait between HTTP request attempts.", + }, + "retry_wait_max": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 30, + Description: "The maximum time in seconds to wait between HTTP request attempts.", + }, + }, + } + + b := &Backend{Backend: s} + b.Backend.ConfigureFunc = b.configure + return b +} + +type Backend struct { + *schema.Backend + + client *httpClient +} + +func (b *Backend) configure(ctx context.Context) error { + data := schema.FromContextBackendConfig(ctx) + + address := data.Get("address").(string) + updateURL, err := url.Parse(address) + if err != nil { + return fmt.Errorf("failed to parse address URL: %s", err) + } + if updateURL.Scheme != "http" && updateURL.Scheme != "https" { + return fmt.Errorf("address must be HTTP or HTTPS") + } + + updateMethod := data.Get("update_method").(string) + + var lockURL *url.URL + if v, ok := data.GetOk("lock_address"); ok && v.(string) != "" { + var err error + lockURL, err = url.Parse(v.(string)) + if err != nil { + return fmt.Errorf("failed to parse lockAddress URL: %s", err) + } + if lockURL.Scheme != "http" && lockURL.Scheme != "https" { + return fmt.Errorf("lockAddress must be HTTP or HTTPS") + } + } + + lockMethod := data.Get("lock_method").(string) + + var unlockURL *url.URL + if v, ok := data.GetOk("unlock_address"); ok && v.(string) != "" { + var err error + unlockURL, err = url.Parse(v.(string)) + if err != nil { + return fmt.Errorf("failed to parse unlockAddress URL: %s", err) + } + if unlockURL.Scheme != "http" && unlockURL.Scheme != "https" { + return fmt.Errorf("unlockAddress must be HTTP or HTTPS") + } + } + + unlockMethod := data.Get("unlock_method").(string) + + client := cleanhttp.DefaultPooledClient() + + if data.Get("skip_cert_verification").(bool) { + // ignores TLS verification + client.Transport.(*http.Transport).TLSClientConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } + + rClient := retryablehttp.NewClient() + rClient.HTTPClient = client + rClient.RetryMax = data.Get("retry_max").(int) + rClient.RetryWaitMin = time.Duration(data.Get("retry_wait_min").(int)) * time.Second + rClient.RetryWaitMax = time.Duration(data.Get("retry_wait_max").(int)) * time.Second + + b.client = &httpClient{ + URL: updateURL, + UpdateMethod: updateMethod, + + LockURL: lockURL, + LockMethod: lockMethod, + UnlockURL: unlockURL, + UnlockMethod: unlockMethod, + + Username: data.Get("username").(string), + Password: data.Get("password").(string), + + // accessible only for testing use + Client: rClient, + } + return nil +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + if name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + return &remote.State{Client: b.client}, nil +} + +func (b *Backend) Workspaces() ([]string, error) { + return nil, backend.ErrWorkspacesNotSupported +} + +func (b *Backend) DeleteWorkspace(string) error { + return backend.ErrWorkspacesNotSupported +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/http/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/http/client.go new file mode 100644 index 00000000..152d2668 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/http/client.go @@ -0,0 +1,247 @@ +package http + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/hashicorp/go-retryablehttp" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +// httpClient is a remote client that stores data in Consul or HTTP REST. +type httpClient struct { + // Update & Retrieve + URL *url.URL + UpdateMethod string + + // Locking + LockURL *url.URL + LockMethod string + UnlockURL *url.URL + UnlockMethod string + + // HTTP + Client *retryablehttp.Client + Username string + Password string + + lockID string + jsonLockInfo []byte +} + +func (c *httpClient) httpRequest(method string, url *url.URL, data *[]byte, what string) (*http.Response, error) { + // If we have data we need a reader + var reader io.Reader = nil + if data != nil { + reader = bytes.NewReader(*data) + } + + // Create the request + req, err := retryablehttp.NewRequest(method, url.String(), reader) + if err != nil { + return nil, fmt.Errorf("Failed to make %s HTTP request: %s", what, err) + } + // Setup basic auth + if c.Username != "" { + req.SetBasicAuth(c.Username, c.Password) + } + + // Work with data/body + if data != nil { + req.Header.Set("Content-Type", "application/json") + req.ContentLength = int64(len(*data)) + + // Generate the MD5 + hash := md5.Sum(*data) + b64 := base64.StdEncoding.EncodeToString(hash[:]) + req.Header.Set("Content-MD5", b64) + } + + // Make the request + resp, err := c.Client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to %s: %v", what, err) + } + + return resp, nil +} + +func (c *httpClient) Lock(info *state.LockInfo) (string, error) { + if c.LockURL == nil { + return "", nil + } + c.lockID = "" + + jsonLockInfo := info.Marshal() + resp, err := c.httpRequest(c.LockMethod, c.LockURL, &jsonLockInfo, "lock") + if err != nil { + return "", err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + c.lockID = info.ID + c.jsonLockInfo = jsonLockInfo + return info.ID, nil + case http.StatusUnauthorized: + return "", fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return "", fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusConflict, http.StatusLocked: + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("HTTP remote state already locked, failed to read body") + } + existing := state.LockInfo{} + err = json.Unmarshal(body, &existing) + if err != nil { + return "", fmt.Errorf("HTTP remote state already locked, failed to unmarshal body") + } + return "", fmt.Errorf("HTTP remote state already locked: ID=%s", existing.ID) + default: + return "", fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } +} + +func (c *httpClient) Unlock(id string) error { + if c.UnlockURL == nil { + return nil + } + + resp, err := c.httpRequest(c.UnlockMethod, c.UnlockURL, &c.jsonLockInfo, "unlock") + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } +} + +func (c *httpClient) Get() (*remote.Payload, error) { + resp, err := c.httpRequest("GET", c.URL, nil, "get state") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Handle the common status codes + switch resp.StatusCode { + case http.StatusOK: + // Handled after + case http.StatusNoContent: + return nil, nil + case http.StatusNotFound: + return nil, nil + case http.StatusUnauthorized: + return nil, fmt.Errorf("HTTP remote state endpoint requires auth") + case http.StatusForbidden: + return nil, fmt.Errorf("HTTP remote state endpoint invalid auth") + case http.StatusInternalServerError: + return nil, fmt.Errorf("HTTP remote state internal server error") + default: + return nil, fmt.Errorf("Unexpected HTTP response code %d", resp.StatusCode) + } + + // Read in the body + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, resp.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + // Create the payload + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + // Check for the MD5 + if raw := resp.Header.Get("Content-MD5"); raw != "" { + md5, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return nil, fmt.Errorf( + "Failed to decode Content-MD5 '%s': %s", raw, err) + } + + payload.MD5 = md5 + } else { + // Generate the MD5 + hash := md5.Sum(payload.Data) + payload.MD5 = hash[:] + } + + return payload, nil +} + +func (c *httpClient) Put(data []byte) error { + // Copy the target URL + base := *c.URL + + if c.lockID != "" { + query := base.Query() + query.Set("ID", c.lockID) + base.RawQuery = query.Encode() + } + + /* + // Set the force query parameter if needed + if force { + values := base.Query() + values.Set("force", "true") + base.RawQuery = values.Encode() + } + */ + + var method string = "POST" + if c.UpdateMethod != "" { + method = c.UpdateMethod + } + resp, err := c.httpRequest(method, &base, &data, "upload state") + if err != nil { + return err + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent: + return nil + default: + return fmt.Errorf("HTTP error: %d", resp.StatusCode) + } +} + +func (c *httpClient) Delete() error { + // Make the request + resp, err := c.httpRequest("DELETE", c.URL, nil, "delete state") + if err != nil { + return err + } + defer resp.Body.Close() + + // Handle the error codes + switch resp.StatusCode { + case http.StatusOK: + return nil + default: + return fmt.Errorf("HTTP error: %d", resp.StatusCode) + } +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go new file mode 100644 index 00000000..c25a8050 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/backend.go @@ -0,0 +1,208 @@ +package inmem + +import ( + "context" + "errors" + "fmt" + "sort" + "sync" + "time" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + statespkg "github.com/hashicorp/terraform/states" +) + +// we keep the states and locks in package-level variables, so that they can be +// accessed from multiple instances of the backend. This better emulates +// backend instances accessing a single remote data store. +var ( + states stateMap + locks lockMap +) + +func init() { + Reset() +} + +// Reset clears out all existing state and lock data. +// This is used to initialize the package during init, as well as between +// tests. +func Reset() { + states = stateMap{ + m: map[string]*remote.State{}, + } + + locks = lockMap{ + m: map[string]*state.LockInfo{}, + } +} + +// New creates a new backend for Inmem remote state. +func New() backend.Backend { + // Set the schema + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "lock_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "initializes the state in a locked configuration", + }, + }, + } + backend := &Backend{Backend: s} + backend.Backend.ConfigureFunc = backend.configure + return backend +} + +type Backend struct { + *schema.Backend +} + +func (b *Backend) configure(ctx context.Context) error { + states.Lock() + defer states.Unlock() + + defaultClient := &RemoteClient{ + Name: backend.DefaultStateName, + } + + states.m[backend.DefaultStateName] = &remote.State{ + Client: defaultClient, + } + + // set the default client lock info per the test config + data := schema.FromContextBackendConfig(ctx) + if v, ok := data.GetOk("lock_id"); ok && v.(string) != "" { + info := state.NewLockInfo() + info.ID = v.(string) + info.Operation = "test" + info.Info = "test config" + + locks.lock(backend.DefaultStateName, info) + } + + return nil +} + +func (b *Backend) Workspaces() ([]string, error) { + states.Lock() + defer states.Unlock() + + var workspaces []string + + for s := range states.m { + workspaces = append(workspaces, s) + } + + sort.Strings(workspaces) + return workspaces, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + states.Lock() + defer states.Unlock() + + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + delete(states.m, name) + return nil +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + states.Lock() + defer states.Unlock() + + s := states.m[name] + if s == nil { + s = &remote.State{ + Client: &RemoteClient{ + Name: name, + }, + } + states.m[name] = s + + // to most closely replicate other implementations, we are going to + // take a lock and create a new state if it doesn't exist. + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockID, err := s.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock inmem state: %s", err) + } + defer s.Unlock(lockID) + + // If we have no state, we have to create an empty state + if v := s.State(); v == nil { + if err := s.WriteState(statespkg.NewState()); err != nil { + return nil, err + } + if err := s.PersistState(); err != nil { + return nil, err + } + } + } + + return s, nil +} + +type stateMap struct { + sync.Mutex + m map[string]*remote.State +} + +// Global level locks for inmem backends. +type lockMap struct { + sync.Mutex + m map[string]*state.LockInfo +} + +func (l *lockMap) lock(name string, info *state.LockInfo) (string, error) { + l.Lock() + defer l.Unlock() + + lockInfo := l.m[name] + if lockInfo != nil { + lockErr := &state.LockError{ + Info: lockInfo, + } + + lockErr.Err = errors.New("state locked") + // make a copy of the lock info to avoid any testing shenanigans + *lockErr.Info = *lockInfo + return "", lockErr + } + + info.Created = time.Now().UTC() + l.m[name] = info + + return info.ID, nil +} + +func (l *lockMap) unlock(name, id string) error { + l.Lock() + defer l.Unlock() + + lockInfo := l.m[name] + + if lockInfo == nil { + return errors.New("state not locked") + } + + lockErr := &state.LockError{ + Info: &state.LockInfo{}, + } + + if id != lockInfo.ID { + lockErr.Err = errors.New("invalid lock id") + *lockErr.Info = *lockInfo + return lockErr + } + + delete(l.m, name) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go new file mode 100644 index 00000000..51c8d725 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/inmem/client.go @@ -0,0 +1,47 @@ +package inmem + +import ( + "crypto/md5" + + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +// RemoteClient is a remote client that stores data in memory for testing. +type RemoteClient struct { + Data []byte + MD5 []byte + Name string +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + if c.Data == nil { + return nil, nil + } + + return &remote.Payload{ + Data: c.Data, + MD5: c.MD5, + }, nil +} + +func (c *RemoteClient) Put(data []byte) error { + md5 := md5.Sum(data) + + c.Data = data + c.MD5 = md5[:] + return nil +} + +func (c *RemoteClient) Delete() error { + c.Data = nil + c.MD5 = nil + return nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + return locks.lock(c.Name, info) +} +func (c *RemoteClient) Unlock(id string) error { + return locks.unlock(c.Name, id) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend.go new file mode 100644 index 00000000..d4ec85c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend.go @@ -0,0 +1,206 @@ +package manta + +import ( + "context" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + triton "github.com/joyent/triton-go" + "github.com/joyent/triton-go/authentication" + "github.com/joyent/triton-go/storage" +) + +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "account": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_ACCOUNT", "SDC_ACCOUNT"}, ""), + }, + + "user": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_USER", "SDC_USER"}, ""), + }, + + "url": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"MANTA_URL"}, "https://us-east.manta.joyent.com"), + }, + + "key_material": { + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_KEY_MATERIAL", "SDC_KEY_MATERIAL"}, ""), + }, + + "key_id": { + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{"TRITON_KEY_ID", "SDC_KEY_ID"}, ""), + }, + + "insecure_skip_tls_verify": { + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("TRITON_SKIP_TLS_VERIFY", ""), + }, + + "path": { + Type: schema.TypeString, + Required: true, + }, + + "object_name": { + Type: schema.TypeString, + Optional: true, + Default: "terraform.tfstate", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + data *schema.ResourceData + + // The fields below are set from configure + storageClient *storage.StorageClient + path string + objectName string +} + +type BackendConfig struct { + AccountId string + Username string + KeyId string + AccountUrl string + KeyMaterial string + SkipTls bool +} + +func (b *Backend) configure(ctx context.Context) error { + if b.path != "" { + return nil + } + + data := schema.FromContextBackendConfig(ctx) + + config := &BackendConfig{ + AccountId: data.Get("account").(string), + AccountUrl: data.Get("url").(string), + KeyId: data.Get("key_id").(string), + SkipTls: data.Get("insecure_skip_tls_verify").(bool), + } + + if v, ok := data.GetOk("user"); ok { + config.Username = v.(string) + } + + if v, ok := data.GetOk("key_material"); ok { + config.KeyMaterial = v.(string) + } + + b.path = data.Get("path").(string) + b.objectName = data.Get("object_name").(string) + + // If object_name is not set, try the deprecated objectName. + if b.objectName == "" { + b.objectName = data.Get("objectName").(string) + } + + var validationError *multierror.Error + + if data.Get("account").(string) == "" { + validationError = multierror.Append(validationError, errors.New("`Account` must be configured for the Triton provider")) + } + if data.Get("key_id").(string) == "" { + validationError = multierror.Append(validationError, errors.New("`Key ID` must be configured for the Triton provider")) + } + if b.path == "" { + validationError = multierror.Append(validationError, errors.New("`Path` must be configured for the Triton provider")) + } + + if validationError != nil { + return validationError + } + + var signer authentication.Signer + var err error + + if config.KeyMaterial == "" { + input := authentication.SSHAgentSignerInput{ + KeyID: config.KeyId, + AccountName: config.AccountId, + Username: config.Username, + } + signer, err = authentication.NewSSHAgentSigner(input) + if err != nil { + return errwrap.Wrapf("Error Creating SSH Agent Signer: {{err}}", err) + } + } else { + var keyBytes []byte + if _, err = os.Stat(config.KeyMaterial); err == nil { + keyBytes, err = ioutil.ReadFile(config.KeyMaterial) + if err != nil { + return fmt.Errorf("Error reading key material from %s: %s", + config.KeyMaterial, err) + } + block, _ := pem.Decode(keyBytes) + if block == nil { + return fmt.Errorf( + "Failed to read key material '%s': no key found", config.KeyMaterial) + } + + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return fmt.Errorf( + "Failed to read key '%s': password protected keys are\n"+ + "not currently supported. Please decrypt the key prior to use.", config.KeyMaterial) + } + + } else { + keyBytes = []byte(config.KeyMaterial) + } + + input := authentication.PrivateKeySignerInput{ + KeyID: config.KeyId, + PrivateKeyMaterial: keyBytes, + AccountName: config.AccountId, + Username: config.Username, + } + + signer, err = authentication.NewPrivateKeySigner(input) + if err != nil { + return errwrap.Wrapf("Error Creating SSH Private Key Signer: {{err}}", err) + } + } + + clientConfig := &triton.ClientConfig{ + MantaURL: config.AccountUrl, + AccountName: config.AccountId, + Username: config.Username, + Signers: []authentication.Signer{signer}, + } + triton, err := storage.NewClient(clientConfig) + if err != nil { + return err + } + + b.storageClient = triton + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend_state.go new file mode 100644 index 00000000..1eec6f07 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/backend_state.go @@ -0,0 +1,145 @@ +package manta + +import ( + "context" + "errors" + "fmt" + "path" + "sort" + "strings" + + tritonErrors "github.com/joyent/triton-go/errors" + "github.com/joyent/triton-go/storage" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" +) + +func (b *Backend) Workspaces() ([]string, error) { + result := []string{backend.DefaultStateName} + + objs, err := b.storageClient.Dir().List(context.Background(), &storage.ListDirectoryInput{ + DirectoryName: path.Join(mantaDefaultRootStore, b.path), + }) + if err != nil { + if tritonErrors.IsResourceNotFound(err) { + return result, nil + } + return nil, err + } + + for _, obj := range objs.Entries { + if obj.Type == "directory" && obj.Name != "" { + result = append(result, obj.Name) + } + } + + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + //firstly we need to delete the state file + err := b.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, b.statePath(name), b.objectName), + }) + if err != nil { + return err + } + + //then we need to delete the state folder + err = b.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, b.statePath(name)), + }) + if err != nil { + return err + } + + return nil +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + storageClient: b.storageClient, + directoryName: b.statePath(name), + keyName: b.objectName, + } + + stateMgr := &remote.State{Client: client} + + //if this isn't the default state name, we need to create the object so + //it's listed by States. + if name != backend.DefaultStateName { + // take a lock on this state while we write it + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock manta state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) statePath(name string) string { + if name == backend.DefaultStateName { + return b.path + } + + return path.Join(b.path, name) +} + +const errStateUnlock = ` +Error unlocking Manta state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/client.go new file mode 100644 index 00000000..4f889935 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/manta/client.go @@ -0,0 +1,200 @@ +package manta + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "log" + "path" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + tritonErrors "github.com/joyent/triton-go/errors" + "github.com/joyent/triton-go/storage" +) + +const ( + mantaDefaultRootStore = "/stor" + lockFileName = "tflock" +) + +type RemoteClient struct { + storageClient *storage.StorageClient + directoryName string + keyName string + statePath string +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + output, err := c.storageClient.Objects().Get(context.Background(), &storage.GetObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName), + }) + if err != nil { + if tritonErrors.IsResourceNotFound(err) { + return nil, nil + } + return nil, err + } + defer output.ObjectReader.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output.ObjectReader); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + payload := &remote.Payload{ + Data: buf.Bytes(), + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil + +} + +func (c *RemoteClient) Put(data []byte) error { + contentType := "application/json" + contentLength := int64(len(data)) + + params := &storage.PutObjectInput{ + ContentType: contentType, + ContentLength: uint64(contentLength), + ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName), + ObjectReader: bytes.NewReader(data), + } + + log.Printf("[DEBUG] Uploading remote state to Manta: %#v", params) + err := c.storageClient.Objects().Put(context.Background(), params) + if err != nil { + return err + } + + return nil +} + +func (c *RemoteClient) Delete() error { + err := c.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName), + }) + + return err +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + //At Joyent, we want to make sure that the State directory exists before we interact with it + //We don't expect users to have to create it in advance + //The order of operations of Backend State as follows: + // * Get - if this doesn't exist then we continue as though it's new + // * Lock - we make sure that the state directory exists as it's the entrance to writing to Manta + // * Put - put the state up there + // * Unlock - unlock the directory + //We can always guarantee that the user can put their state in the specified location because of this + err := c.storageClient.Dir().Put(context.Background(), &storage.PutDirectoryInput{ + DirectoryName: path.Join(mantaDefaultRootStore, c.directoryName), + }) + if err != nil { + return "", err + } + + //firstly we want to check that a lock doesn't already exist + lockErr := &state.LockError{} + lockInfo, err := c.getLockInfo() + if err != nil { + if !tritonErrors.IsResourceNotFound(err) { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return "", lockErr + } + } + + if lockInfo != nil { + lockErr := &state.LockError{ + Err: fmt.Errorf("A lock is already acquired"), + Info: lockInfo, + } + return "", lockErr + } + + info.Path = path.Join(c.directoryName, lockFileName) + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + data := info.Marshal() + + contentType := "application/json" + contentLength := int64(len(data)) + + params := &storage.PutObjectInput{ + ContentType: contentType, + ContentLength: uint64(contentLength), + ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName), + ObjectReader: bytes.NewReader(data), + ForceInsert: true, + } + + log.Printf("[DEBUG] Creating manta state lock: %#v", params) + err = c.storageClient.Objects().Put(context.Background(), params) + if err != nil { + return "", err + } + + return info.ID, nil +} + +func (c *RemoteClient) Unlock(id string) error { + lockErr := &state.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + err = c.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName), + }) + + return err +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + output, err := c.storageClient.Objects().Get(context.Background(), &storage.GetObjectInput{ + ObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName), + }) + if err != nil { + return nil, err + } + + defer output.ObjectReader.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output.ObjectReader); err != nil { + return nil, fmt.Errorf("Failed to read lock info: %s", err) + } + + lockInfo := &state.LockInfo{} + err = json.Unmarshal(buf.Bytes(), lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/backend.go new file mode 100644 index 00000000..33c2cbf5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/backend.go @@ -0,0 +1,309 @@ +package oss + +import ( + "context" + "fmt" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + "os" + "strings" + + "github.com/aliyun/alibaba-cloud-sdk-go/sdk" + "github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials" + "github.com/aliyun/alibaba-cloud-sdk-go/services/location" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/version" + "log" + "net/http" + "strconv" + "time" +) + +// New creates a new backend for OSS remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "access_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Access Key ID", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_ID")), + }, + + "secret_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Access Secret Key", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", os.Getenv("ALICLOUD_ACCESS_KEY_SECRET")), + }, + + "security_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Alibaba Cloud Security Token", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", ""), + }, + + "region": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The region of the OSS bucket.", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_DEFAULT_REGION")), + }, + "tablestore_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the TableStore API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_TABLESTORE_ENDPOINT", ""), + }, + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the OSS API", + DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_OSS_ENDPOINT", os.Getenv("OSS_ENDPOINT")), + }, + + "bucket": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "The name of the OSS bucket", + }, + + "prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The directory where state files will be saved inside the bucket", + Default: "env:", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + prefix := v.(string) + if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") { + return nil, []error{fmt.Errorf("workspace_key_prefix must not start with '/' or './'")} + } + return nil, nil + }, + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "The path of the state file inside the bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") { + return nil, []error{fmt.Errorf("key can not start and end with '/'")} + } + return nil, nil + }, + Default: "terraform.tfstate", + }, + + "tablestore_table": { + Type: schema.TypeString, + Optional: true, + Description: "TableStore table for state locking and consistency", + Default: "", + }, + + "encrypt": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: false, + }, + + "acl": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Object ACL to be applied to the state file", + Default: "", + ValidateFunc: func(v interface{}, k string) ([]string, []error) { + if value := v.(string); value != "" { + acls := oss.ACLType(value) + if acls != oss.ACLPrivate && acls != oss.ACLPublicRead && acls != oss.ACLPublicReadWrite { + return nil, []error{fmt.Errorf( + "%q must be a valid ACL value , expected %s, %s or %s, got %q", + k, oss.ACLPrivate, oss.ACLPublicRead, oss.ACLPublicReadWrite, acls)} + } + } + return nil, nil + }, + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + ossClient *oss.Client + otsClient *tablestore.TableStoreClient + + bucketName string + statePrefix string + stateKey string + serverSideEncryption bool + acl string + endpoint string + otsEndpoint string + otsTable string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.ossClient != nil { + return nil + } + + // Grab the resource data + d := schema.FromContextBackendConfig(ctx) + + b.bucketName = d.Get("bucket").(string) + b.statePrefix = strings.TrimPrefix(strings.Trim(d.Get("prefix").(string), "/"), "./") + b.stateKey = d.Get("key").(string) + b.serverSideEncryption = d.Get("encrypt").(bool) + b.acl = d.Get("acl").(string) + + accessKey := d.Get("access_key").(string) + secretKey := d.Get("secret_key").(string) + securityToken := d.Get("security_token").(string) + region := d.Get("region").(string) + endpoint := d.Get("endpoint").(string) + schma := "https" + + if endpoint == "" { + endpointItem, _ := b.getOSSEndpointByRegion(accessKey, secretKey, securityToken, region) + if endpointItem != nil && len(endpointItem.Endpoint) > 0 { + if len(endpointItem.Protocols.Protocols) > 0 { + // HTTP or HTTPS + schma = strings.ToLower(endpointItem.Protocols.Protocols[0]) + for _, p := range endpointItem.Protocols.Protocols { + if strings.ToLower(p) == "https" { + schma = strings.ToLower(p) + break + } + } + } + endpoint = endpointItem.Endpoint + } else { + endpoint = fmt.Sprintf("oss-%s.aliyuncs.com", region) + } + } + if !strings.HasPrefix(endpoint, "http") { + endpoint = fmt.Sprintf("%s://%s", schma, endpoint) + } + log.Printf("[DEBUG] Instantiate OSS client using endpoint: %#v", endpoint) + var options []oss.ClientOption + if securityToken != "" { + options = append(options, oss.SecurityToken(securityToken)) + } + options = append(options, oss.UserAgent(fmt.Sprintf("%s/%s", TerraformUA, TerraformVersion))) + + client, err := oss.New(endpoint, accessKey, secretKey, options...) + b.ossClient = client + otsEndpoint := d.Get("tablestore_endpoint").(string) + if otsEndpoint != "" { + if !strings.HasPrefix(otsEndpoint, "http") { + otsEndpoint = fmt.Sprintf("%s://%s", schma, otsEndpoint) + } + b.otsEndpoint = otsEndpoint + parts := strings.Split(strings.TrimPrefix(strings.TrimPrefix(otsEndpoint, "https://"), "http://"), ".") + b.otsClient = tablestore.NewClientWithConfig(otsEndpoint, parts[0], accessKey, secretKey, securityToken, tablestore.NewDefaultTableStoreConfig()) + } + b.otsTable = d.Get("tablestore_table").(string) + + return err +} + +func (b *Backend) getOSSEndpointByRegion(access_key, secret_key, security_token, region string) (*location.DescribeEndpointResponse, error) { + args := location.CreateDescribeEndpointRequest() + args.ServiceCode = "oss" + args.Id = region + args.Domain = "location-readonly.aliyuncs.com" + + locationClient, err := location.NewClientWithOptions(region, getSdkConfig(), credentials.NewStsTokenCredential(access_key, secret_key, security_token)) + if err != nil { + return nil, fmt.Errorf("Unable to initialize the location client: %#v", err) + + } + locationClient.AppendUserAgent(TerraformUA, TerraformVersion) + endpointsResponse, err := locationClient.DescribeEndpoint(args) + if err != nil { + return nil, fmt.Errorf("Describe oss endpoint using region: %#v got an error: %#v.", region, err) + } + return endpointsResponse, nil +} + +func getSdkConfig() *sdk.Config { + return sdk.NewConfig(). + WithMaxRetryTime(5). + WithTimeout(time.Duration(30) * time.Second). + WithGoRoutinePoolSize(10). + WithDebug(false). + WithHttpTransport(getTransport()). + WithScheme("HTTPS") +} + +func getTransport() *http.Transport { + handshakeTimeout, err := strconv.Atoi(os.Getenv("TLSHandshakeTimeout")) + if err != nil { + handshakeTimeout = 120 + } + transport := cleanhttp.DefaultTransport() + transport.TLSHandshakeTimeout = time.Duration(handshakeTimeout) * time.Second + transport.Proxy = http.ProxyFromEnvironment + return transport +} + +type Invoker struct { + catchers []*Catcher +} + +type Catcher struct { + Reason string + RetryCount int + RetryWaitSeconds int +} + +const TerraformUA = "HashiCorp-Terraform" + +var TerraformVersion = strings.TrimSuffix(version.String(), "-dev") +var ClientErrorCatcher = Catcher{"AliyunGoClientFailure", 10, 3} +var ServiceBusyCatcher = Catcher{"ServiceUnavailable", 10, 3} + +func NewInvoker() Invoker { + i := Invoker{} + i.AddCatcher(ClientErrorCatcher) + i.AddCatcher(ServiceBusyCatcher) + return i +} + +func (a *Invoker) AddCatcher(catcher Catcher) { + a.catchers = append(a.catchers, &catcher) +} + +func (a *Invoker) Run(f func() error) error { + err := f() + + if err == nil { + return nil + } + + for _, catcher := range a.catchers { + if strings.Contains(err.Error(), catcher.Reason) { + catcher.RetryCount-- + + if catcher.RetryCount <= 0 { + return fmt.Errorf("Retry timeout and got an error: %#v.", err) + } else { + time.Sleep(time.Duration(catcher.RetryWaitSeconds) * time.Second) + return a.Run(f) + } + } + } + return err +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/backend_state.go new file mode 100644 index 00000000..28c40ee4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/backend_state.go @@ -0,0 +1,199 @@ +package oss + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" + + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "log" + "path" +) + +const ( + lockFileSuffix = ".tflock" +) + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + ossClient: b.ossClient, + bucketName: b.bucketName, + stateFile: b.stateFile(name), + lockFile: b.lockFile(name), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + otsTable: b.otsTable, + otsClient: b.otsClient, + } + if b.otsEndpoint != "" && b.otsTable != "" { + table, err := b.otsClient.DescribeTable(&tablestore.DescribeTableRequest{ + TableName: b.otsTable, + }) + if err != nil { + return client, fmt.Errorf("Error describing table store %s: %#v", b.otsTable, err) + } + for _, t := range table.TableMeta.SchemaEntry { + pkMeta := TableStorePrimaryKeyMeta{ + PKName: *t.Name, + } + if *t.Type == tablestore.PrimaryKeyType_INTEGER { + pkMeta.PKType = "Integer" + } else if *t.Type == tablestore.PrimaryKeyType_STRING { + pkMeta.PKType = "String" + } else if *t.Type == tablestore.PrimaryKeyType_BINARY { + pkMeta.PKType = "Binary" + } else { + return client, fmt.Errorf("Unsupported PrimaryKey type: %d.", *t.Type) + } + client.otsTabkePK = pkMeta + break + } + } + + return client, nil +} + +func (b *Backend) Workspaces() ([]string, error) { + bucket, err := b.ossClient.Bucket(b.bucketName) + if err != nil { + return []string{""}, fmt.Errorf("Error getting bucket: %#v", err) + } + + var options []oss.Option + options = append(options, oss.Prefix(b.statePrefix+"/")) + resp, err := bucket.ListObjects(options...) + + if err != nil { + return nil, err + } + + result := []string{backend.DefaultStateName} + prefix := b.statePrefix + for _, obj := range resp.Objects { + // we have 3 parts, the state prefix, the workspace name, and the state file: // + if path.Join(b.statePrefix, b.stateKey) == obj.Key { + // filter the default workspace + continue + } + + parts := strings.Split(strings.TrimPrefix(obj.Key, prefix+"/"), "/") + if len(parts) > 0 && parts[0] != "" { + result = append(result, parts[0]) + } + } + + sort.Strings(result[1:]) + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + return client.Delete() +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + client, err := b.remoteClient(name) + if err != nil { + return nil, err + } + stateMgr := &remote.State{Client: client} + + // Check to see if this state already exists. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + log.Printf("[DEBUG] Current workspace name: %s. All workspaces:%#v", name, existing) + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + // We need to create the object so it's listed by States. + if !exists { + // take a lock on this state while we write it + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("Failed to lock OSS state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(e error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(stateUnlockError), lockId, err) + } + return e + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + return stateMgr, nil +} + +func (b *Backend) stateFile(name string) string { + if name == backend.DefaultStateName { + return path.Join(b.statePrefix, b.stateKey) + } + return path.Join(b.statePrefix, name, b.stateKey) +} + +func (b *Backend) lockFile(name string) string { + return b.stateFile(name) + lockFileSuffix +} + +const stateUnlockError = ` +Error unlocking Alibaba Cloud OSS state file: + +Lock ID: %s +Error message: %#v + +You may have to force-unlock this state in order to use it again. +The Alibaba Cloud backend acquires a lock during initialization to ensure the initial state file is created. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/client.go new file mode 100644 index 00000000..e50f801e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/oss/client.go @@ -0,0 +1,484 @@ +package oss + +import ( + "bytes" + "crypto/md5" + "encoding/json" + "fmt" + "io" + + "encoding/hex" + "github.com/aliyun/aliyun-oss-go-sdk/oss" + "github.com/aliyun/aliyun-tablestore-go-sdk/tablestore" + "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/pkg/errors" + "log" + "sync" + "time" +) + +// Store the last saved serial in tablestore with this suffix for consistency checks. +const ( + stateIDSuffix = "-md5" + statePKValue = "terraform-remote-state-lock" +) + +var ( + // The amount of time we will retry a state waiting for it to match the + // expected checksum. + consistencyRetryTimeout = 10 * time.Second + + // delay when polling the state + consistencyRetryPollInterval = 2 * time.Second +) + +// test hook called when checksums don't match +var testChecksumHook func() + +type TableStorePrimaryKeyMeta struct { + PKName string + PKType string +} + +type RemoteClient struct { + ossClient *oss.Client + otsClient *tablestore.TableStoreClient + bucketName string + stateFile string + lockFile string + serverSideEncryption bool + acl string + info *state.LockInfo + mu sync.Mutex + otsTable string + otsTabkePK TableStorePrimaryKeyMeta +} + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + deadline := time.Now().Add(consistencyRetryTimeout) + + // If we have a checksum, and the returned payload doesn't match, we retry + // up until deadline. + for { + payload, err = c.getObj() + if err != nil { + return nil, err + } + + // If the remote state was manually removed the payload will be nil, + // but if there's still a digest entry for that state we will still try + // to compare the MD5 below. + var digest []byte + if payload != nil { + digest = payload.MD5 + } + + // verify that this state is what we expect + if expected, err := c.getMD5(); err != nil { + log.Printf("[WARN] failed to fetch state md5: %s", err) + } else if len(expected) > 0 && !bytes.Equal(expected, digest) { + log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) + + if testChecksumHook != nil { + testChecksumHook() + } + + if time.Now().Before(deadline) { + time.Sleep(consistencyRetryPollInterval) + log.Println("[INFO] retrying OSS RemoteClient.Get...") + continue + } + + return nil, fmt.Errorf(errBadChecksumFmt, digest) + } + + break + } + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return fmt.Errorf("Error getting bucket: %#v", err) + } + + body := bytes.NewReader(data) + + var options []oss.Option + if c.acl != "" { + options = append(options, oss.ACL(oss.ACLType(c.acl))) + } + options = append(options, oss.ContentType("application/json")) + if c.serverSideEncryption { + options = append(options, oss.ServerSideEncryption("AES256")) + } + options = append(options, oss.ContentLength(int64(len(data)))) + + if body != nil { + if err := bucket.PutObject(c.stateFile, body, options...); err != nil { + return fmt.Errorf("Failed to upload state %s: %#v", c.stateFile, err) + } + } + + sum := md5.Sum(data) + if err := c.putMD5(sum[:]); err != nil { + // if this errors out, we unfortunately have to error out altogether, + // since the next Get will inevitably fail. + return fmt.Errorf("Failed to store state MD5: %s", err) + } + return nil +} + +func (c *RemoteClient) Delete() error { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return fmt.Errorf("Error getting bucket %s: %#v", c.bucketName, err) + } + + log.Printf("[DEBUG] Deleting remote state from OSS: %#v", c.stateFile) + + if err := bucket.DeleteObject(c.stateFile); err != nil { + return fmt.Errorf("Error deleting state %s: %#v", c.stateFile, err) + } + + if err := c.deleteMD5(); err != nil { + log.Printf("[WARN] Error deleting state MD5: %s", err) + } + return nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + if c.otsTable == "" { + return "", nil + } + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + info.ID = lockID + } + + putParams := &tablestore.PutRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + Columns: []tablestore.AttributeColumn{ + { + ColumnName: "LockID", + Value: c.lockFile, + }, + { + ColumnName: "Info", + Value: string(info.Marshal()), + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST, + }, + } + + log.Printf("[DEBUG] Recoring state lock in tablestore: %#v", putParams) + + _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ + PutRowChange: putParams, + }) + if err != nil { + log.Printf("[WARN] Error storing state lock in tablestore: %#v", err) + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + log.Printf("[WARN] Error getting lock info: %#v", err) + err = multierror.Append(err, infoErr) + } + lockErr := &state.LockError{ + Err: err, + Info: lockInfo, + } + log.Printf("[WARN] state lock error: %#v", lockErr) + return "", lockErr + } + + return info.ID, nil +} + +func (c *RemoteClient) getMD5() ([]byte, error) { + if c.otsTable == "" { + return nil, nil + } + + getParams := &tablestore.SingleRowQueryCriteria{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + ColumnsToGet: []string{"LockID", "Digest"}, + MaxVersion: 1, + } + + log.Printf("[DEBUG] Retrieving state serial in tablestore: %#v", getParams) + + object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ + SingleRowQueryCriteria: getParams, + }) + + if err != nil { + return nil, err + } + + var val string + if v, ok := object.GetColumnMap().Columns["Digest"]; ok && len(v) > 0 { + val = v[0].Value.(string) + } + + sum, err := hex.DecodeString(val) + if err != nil || len(sum) != md5.Size { + return nil, errors.New("invalid md5") + } + + return sum, nil +} + +// store the hash of the state to that clients can check for stale state files. +func (c *RemoteClient) putMD5(sum []byte) error { + if c.otsTable == "" { + return nil + } + + if len(sum) != md5.Size { + return errors.New("invalid payload md5") + } + + putParams := &tablestore.PutRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + Columns: []tablestore.AttributeColumn{ + { + ColumnName: "LockID", + Value: c.lockPath() + stateIDSuffix, + }, + { + ColumnName: "Digest", + Value: hex.EncodeToString(sum), + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_NOT_EXIST, + }, + } + + log.Printf("[DEBUG] Recoring state serial in tablestore: %#v", putParams) + + _, err := c.otsClient.PutRow(&tablestore.PutRowRequest{ + PutRowChange: putParams, + }) + + if err != nil { + log.Printf("[WARN] failed to record state serial in tablestore: %s", err) + } + + return nil +} + +// remove the hash value for a deleted state +func (c *RemoteClient) deleteMD5() error { + if c.otsTable == "" { + return nil + } + + params := &tablestore.DeleteRowRequest{ + DeleteRowChange: &tablestore.DeleteRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST, + }, + }, + } + + log.Printf("[DEBUG] Deleting state serial in tablestore: %#v", params) + + if _, err := c.otsClient.DeleteRow(params); err != nil { + return err + } + + return nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + getParams := &tablestore.SingleRowQueryCriteria{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + ColumnsToGet: []string{"LockID", "Info"}, + MaxVersion: 1, + } + + log.Printf("[DEBUG] Retrieving state lock info from tablestore: %#v", getParams) + + object, err := c.otsClient.GetRow(&tablestore.GetRowRequest{ + SingleRowQueryCriteria: getParams, + }) + if err != nil { + return nil, err + } + + var infoData string + if v, ok := object.GetColumnMap().Columns["Info"]; ok && len(v) > 0 { + infoData = v[0].Value.(string) + } + lockInfo := &state.LockInfo{} + err = json.Unmarshal([]byte(infoData), lockInfo) + if err != nil { + return nil, err + } + return lockInfo, nil +} +func (c *RemoteClient) Unlock(id string) error { + if c.otsTable == "" { + return nil + } + + lockErr := &state.LockError{} + + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + params := &tablestore.DeleteRowRequest{ + DeleteRowChange: &tablestore.DeleteRowChange{ + TableName: c.otsTable, + PrimaryKey: &tablestore.PrimaryKey{ + PrimaryKeys: []*tablestore.PrimaryKeyColumn{ + { + ColumnName: c.otsTabkePK.PKName, + Value: c.getPKValue(), + }, + }, + }, + Condition: &tablestore.RowCondition{ + RowExistenceExpectation: tablestore.RowExistenceExpectation_EXPECT_EXIST, + }, + }, + } + + log.Printf("[DEBUG] Deleting state lock from tablestore: %#v", params) + + _, err = c.otsClient.DeleteRow(params) + + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} + +func (c *RemoteClient) lockPath() string { + return fmt.Sprintf("%s/%s", c.bucketName, c.stateFile) +} + +func (c *RemoteClient) getObj() (*remote.Payload, error) { + bucket, err := c.ossClient.Bucket(c.bucketName) + if err != nil { + return nil, fmt.Errorf("Error getting bucket %s: %#v", c.bucketName, err) + } + + if exist, err := bucket.IsObjectExist(c.stateFile); err != nil { + return nil, fmt.Errorf("Estimating object %s is exist got an error: %#v", c.stateFile, err) + } else if !exist { + return nil, nil + } + + var options []oss.Option + output, err := bucket.GetObject(c.stateFile, options...) + if err != nil { + return nil, fmt.Errorf("Error getting object: %#v", err) + } + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + sum := md5.Sum(buf.Bytes()) + payload := &remote.Payload{ + Data: buf.Bytes(), + MD5: sum[:], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) getPKValue() (value interface{}) { + value = statePKValue + if c.otsTabkePK.PKType == "Integer" { + value = hashcode.String(statePKValue) + } else if c.otsTabkePK.PKType == "Binary" { + value = stringToBin(statePKValue) + } + return +} + +func stringToBin(s string) (binString string) { + for _, c := range s { + binString = fmt.Sprintf("%s%b", binString, c) + } + return +} + +const errBadChecksumFmt = `state data in OSS does not have the expected content. + +This may be caused by unusually long delays in OSS processing a previous state +update. Please wait for a minute or two and try again. If this problem +persists, and neither OSS nor TableStore are experiencing an outage, you may need +to manually verify the remote state and update the Digest value stored in the +TableStore table to the following value: %x +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/backend.go new file mode 100644 index 00000000..34e0a46c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/backend.go @@ -0,0 +1,88 @@ +package pg + +import ( + "context" + "database/sql" + "fmt" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + _ "github.com/lib/pq" +) + +const ( + statesTableName = "states" + statesIndexName = "states_by_name" +) + +// New creates a new backend for Postgres remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "conn_str": &schema.Schema{ + Type: schema.TypeString, + Required: true, + Description: "Postgres connection string; a `postgres://` URL", + }, + + "schema_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: "Name of the automatically managed Postgres schema to store state", + Default: "terraform_remote_state", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + db *sql.DB + configData *schema.ResourceData + connStr string + schemaName string +} + +func (b *Backend) configure(ctx context.Context) error { + // Grab the resource data + b.configData = schema.FromContextBackendConfig(ctx) + data := b.configData + + b.connStr = data.Get("conn_str").(string) + b.schemaName = data.Get("schema_name").(string) + + db, err := sql.Open("postgres", b.connStr) + if err != nil { + return err + } + + // Prepare database schema, tables, & indexes. + var query string + query = `CREATE SCHEMA IF NOT EXISTS %s` + if _, err := db.Exec(fmt.Sprintf(query, b.schemaName)); err != nil { + return err + } + query = `CREATE TABLE IF NOT EXISTS %s.%s ( + id SERIAL PRIMARY KEY, + name TEXT, + data TEXT + )` + if _, err := db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName)); err != nil { + return err + } + query = `CREATE UNIQUE INDEX IF NOT EXISTS %s ON %s.%s (name)` + if _, err := db.Exec(fmt.Sprintf(query, statesIndexName, b.schemaName, statesTableName)); err != nil { + return err + } + + // Assign db after its schema is prepared. + b.db = db + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/backend_state.go new file mode 100644 index 00000000..a886d7f4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/backend_state.go @@ -0,0 +1,113 @@ +package pg + +import ( + "fmt" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" +) + +func (b *Backend) Workspaces() ([]string, error) { + query := `SELECT name FROM %s.%s ORDER BY name` + rows, err := b.db.Query(fmt.Sprintf(query, b.schemaName, statesTableName)) + if err != nil { + return nil, err + } + defer rows.Close() + + var result []string + + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return nil, err + } + result = append(result, name) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + query := `DELETE FROM %s.%s WHERE name = $1` + _, err := b.db.Exec(fmt.Sprintf(query, b.schemaName, statesTableName), name) + if err != nil { + return err + } + + return nil +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + // Build the state client + var stateMgr state.State = &remote.State{ + Client: &RemoteClient{ + Client: b.db, + Name: name, + SchemaName: b.schemaName, + }, + } + + // Check to see if this state already exists. + // If the state doesn't exist, we have to assume this + // is a normal create operation, and take the lock at that point. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + + // Grab a lock, we use this to write an empty state if one doesn't + // exist already. We have to write an empty state as a sentinel value + // so Workspaces() knows it exists. + if !exists { + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock state in Postgres: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(`error unlocking Postgres state: %s`, err) + } + return parent + } + + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + } + + return stateMgr, nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/client.go new file mode 100644 index 00000000..a14e5ed4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/pg/client.go @@ -0,0 +1,142 @@ +package pg + +import ( + "crypto/md5" + "database/sql" + "fmt" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + _ "github.com/lib/pq" +) + +// RemoteClient is a remote client that stores data in a Postgres database +type RemoteClient struct { + Client *sql.DB + Name string + SchemaName string + + info *state.LockInfo +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + query := `SELECT data FROM %s.%s WHERE name = $1` + row := c.Client.QueryRow(fmt.Sprintf(query, c.SchemaName, statesTableName), c.Name) + var data []byte + err := row.Scan(&data) + switch { + case err == sql.ErrNoRows: + // No existing state returns empty. + return nil, nil + case err != nil: + return nil, err + default: + md5 := md5.Sum(data) + return &remote.Payload{ + Data: data, + MD5: md5[:], + }, nil + } +} + +func (c *RemoteClient) Put(data []byte) error { + query := `INSERT INTO %s.%s (name, data) VALUES ($1, $2) + ON CONFLICT (name) DO UPDATE + SET data = $2 WHERE %s.name = $1` + _, err := c.Client.Exec(fmt.Sprintf(query, c.SchemaName, statesTableName, statesTableName), c.Name, data) + if err != nil { + return err + } + return nil +} + +func (c *RemoteClient) Delete() error { + query := `DELETE FROM %s.%s WHERE name = $1` + _, err := c.Client.Exec(fmt.Sprintf(query, c.SchemaName, statesTableName), c.Name) + if err != nil { + return err + } + return nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + var err error + var lockID string + + if info.ID == "" { + lockID, err = uuid.GenerateUUID() + if err != nil { + return "", err + } + info.ID = lockID + } + + // Local helper function so we can call it multiple places + // + lockUnlock := func(pgLockId string) error { + query := `SELECT pg_advisory_unlock(%s)` + row := c.Client.QueryRow(fmt.Sprintf(query, pgLockId)) + var didUnlock []byte + err := row.Scan(&didUnlock) + if err != nil { + return &state.LockError{Info: info, Err: err} + } + return nil + } + + // Try to acquire locks for the existing row `id` and the creation lock `-1`. + query := `SELECT %s.id, pg_try_advisory_lock(%s.id), pg_try_advisory_lock(-1) FROM %s.%s WHERE %s.name = $1` + row := c.Client.QueryRow(fmt.Sprintf(query, statesTableName, statesTableName, c.SchemaName, statesTableName, statesTableName), c.Name) + var pgLockId, didLock, didLockForCreate []byte + err = row.Scan(&pgLockId, &didLock, &didLockForCreate) + switch { + case err == sql.ErrNoRows: + // No rows means we're creating the workspace. Take the creation lock. + innerRow := c.Client.QueryRow(`SELECT pg_try_advisory_lock(-1)`) + var innerDidLock []byte + err := innerRow.Scan(&innerDidLock) + if err != nil { + return "", &state.LockError{Info: info, Err: err} + } + if string(innerDidLock) == "false" { + return "", &state.LockError{Info: info, Err: fmt.Errorf("Already locked for workspace creation: %s", c.Name)} + } + info.Path = "-1" + case err != nil: + return "", &state.LockError{Info: info, Err: err} + case string(didLock) == "false": + // Existing workspace is already locked. Release the attempted creation lock. + lockUnlock("-1") + return "", &state.LockError{Info: info, Err: fmt.Errorf("Workspace is already locked: %s", c.Name)} + case string(didLockForCreate) == "false": + // Someone has the creation lock already. Release the existing workspace because it might not be safe to touch. + lockUnlock(string(pgLockId)) + return "", &state.LockError{Info: info, Err: fmt.Errorf("Cannot lock workspace; already locked for workspace creation: %s", c.Name)} + default: + // Existing workspace is now locked. Release the attempted creation lock. + lockUnlock("-1") + info.Path = string(pgLockId) + } + c.info = info + + return info.ID, nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + return c.info, nil +} + +func (c *RemoteClient) Unlock(id string) error { + if c.info != nil && c.info.Path != "" { + query := `SELECT pg_advisory_unlock(%s)` + row := c.Client.QueryRow(fmt.Sprintf(query, c.info.Path)) + var didUnlock []byte + err := row.Scan(&didUnlock) + if err != nil { + return &state.LockError{Info: c.info, Err: err} + } + c.info = nil + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go new file mode 100644 index 00000000..303a25cc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend.go @@ -0,0 +1,332 @@ +package s3 + +import ( + "context" + "errors" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/s3" + awsbase "github.com/hashicorp/aws-sdk-go-base" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/logging" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/version" +) + +// New creates a new backend for S3 remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + Description: "The name of the S3 bucket", + }, + + "key": { + Type: schema.TypeString, + Required: true, + Description: "The path to the state file inside the bucket", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + // s3 will strip leading slashes from an object, so while this will + // technically be accepted by s3, it will break our workspace hierarchy. + if strings.HasPrefix(v.(string), "/") { + return nil, []error{errors.New("key must not start with '/'")} + } + return nil, nil + }, + }, + + "region": { + Type: schema.TypeString, + Required: true, + Description: "The region of the S3 bucket.", + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", + }, nil), + }, + + "dynamodb_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the DynamoDB API", + DefaultFunc: schema.EnvDefaultFunc("AWS_DYNAMODB_ENDPOINT", ""), + }, + + "endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the S3 API", + DefaultFunc: schema.EnvDefaultFunc("AWS_S3_ENDPOINT", ""), + }, + + "iam_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the IAM API", + DefaultFunc: schema.EnvDefaultFunc("AWS_IAM_ENDPOINT", ""), + }, + + "sts_endpoint": { + Type: schema.TypeString, + Optional: true, + Description: "A custom endpoint for the STS API", + DefaultFunc: schema.EnvDefaultFunc("AWS_STS_ENDPOINT", ""), + }, + + "encrypt": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable server side encryption of the state file", + Default: false, + }, + + "acl": { + Type: schema.TypeString, + Optional: true, + Description: "Canned ACL to be applied to the state file", + Default: "", + }, + + "access_key": { + Type: schema.TypeString, + Optional: true, + Description: "AWS access key", + Default: "", + }, + + "secret_key": { + Type: schema.TypeString, + Optional: true, + Description: "AWS secret key", + Default: "", + }, + + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Description: "The ARN of a KMS Key to use for encrypting the state", + Default: "", + }, + + "lock_table": { + Type: schema.TypeString, + Optional: true, + Description: "DynamoDB table for state locking", + Default: "", + Deprecated: "please use the dynamodb_table attribute", + }, + + "dynamodb_table": { + Type: schema.TypeString, + Optional: true, + Description: "DynamoDB table for state locking and consistency", + Default: "", + }, + + "profile": { + Type: schema.TypeString, + Optional: true, + Description: "AWS profile name", + Default: "", + }, + + "shared_credentials_file": { + Type: schema.TypeString, + Optional: true, + Description: "Path to a shared credentials file", + Default: "", + }, + + "token": { + Type: schema.TypeString, + Optional: true, + Description: "MFA token", + Default: "", + }, + + "skip_credentials_validation": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip the credentials validation via STS API.", + Default: false, + }, + + "skip_get_ec2_platforms": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip getting the supported EC2 platforms.", + Default: false, + Deprecated: "The S3 Backend does not require EC2 functionality and this attribute is no longer used.", + }, + + "skip_region_validation": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip static validation of region name.", + Default: false, + }, + + "skip_requesting_account_id": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip requesting the account ID.", + Default: false, + Deprecated: "The S3 Backend no longer automatically looks up the AWS Account ID and this attribute is no longer used.", + }, + + "skip_metadata_api_check": { + Type: schema.TypeBool, + Optional: true, + Description: "Skip the AWS Metadata API check.", + Default: false, + }, + + "role_arn": { + Type: schema.TypeString, + Optional: true, + Description: "The role to be assumed", + Default: "", + }, + + "session_name": { + Type: schema.TypeString, + Optional: true, + Description: "The session name to use when assuming the role.", + Default: "", + }, + + "external_id": { + Type: schema.TypeString, + Optional: true, + Description: "The external ID to use when assuming the role", + Default: "", + }, + + "assume_role_policy": { + Type: schema.TypeString, + Optional: true, + Description: "The permissions applied when assuming a role.", + Default: "", + }, + + "workspace_key_prefix": { + Type: schema.TypeString, + Optional: true, + Description: "The prefix applied to the non-default state path inside the bucket.", + Default: "env:", + ValidateFunc: func(v interface{}, s string) ([]string, []error) { + prefix := v.(string) + if strings.HasPrefix(prefix, "/") || strings.HasSuffix(prefix, "/") { + return nil, []error{errors.New("workspace_key_prefix must not start or end with '/'")} + } + return nil, nil + }, + }, + + "force_path_style": { + Type: schema.TypeBool, + Optional: true, + Description: "Force s3 to use path style api.", + Default: false, + }, + + "max_retries": { + Type: schema.TypeInt, + Optional: true, + Description: "The maximum number of times an AWS API request is retried on retryable failure.", + Default: 5, + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +type Backend struct { + *schema.Backend + + // The fields below are set from configure + s3Client *s3.S3 + dynClient *dynamodb.DynamoDB + + bucketName string + keyName string + serverSideEncryption bool + acl string + kmsKeyID string + ddbTable string + workspaceKeyPrefix string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.s3Client != nil { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + + if !data.Get("skip_region_validation").(bool) { + if err := awsbase.ValidateRegion(data.Get("region").(string)); err != nil { + return err + } + } + + b.bucketName = data.Get("bucket").(string) + b.keyName = data.Get("key").(string) + b.serverSideEncryption = data.Get("encrypt").(bool) + b.acl = data.Get("acl").(string) + b.kmsKeyID = data.Get("kms_key_id").(string) + b.workspaceKeyPrefix = data.Get("workspace_key_prefix").(string) + + b.ddbTable = data.Get("dynamodb_table").(string) + if b.ddbTable == "" { + // try the deprecated field + b.ddbTable = data.Get("lock_table").(string) + } + + cfg := &awsbase.Config{ + AccessKey: data.Get("access_key").(string), + AssumeRoleARN: data.Get("role_arn").(string), + AssumeRoleExternalID: data.Get("external_id").(string), + AssumeRolePolicy: data.Get("assume_role_policy").(string), + AssumeRoleSessionName: data.Get("session_name").(string), + CredsFilename: data.Get("shared_credentials_file").(string), + DebugLogging: logging.IsDebugOrHigher(), + IamEndpoint: data.Get("iam_endpoint").(string), + MaxRetries: data.Get("max_retries").(int), + Profile: data.Get("profile").(string), + Region: data.Get("region").(string), + SecretKey: data.Get("secret_key").(string), + SkipCredsValidation: data.Get("skip_credentials_validation").(bool), + SkipMetadataApiCheck: data.Get("skip_metadata_api_check").(bool), + StsEndpoint: data.Get("sts_endpoint").(string), + Token: data.Get("token").(string), + UserAgentProducts: []*awsbase.UserAgentProduct{ + {Name: "APN", Version: "1.0"}, + {Name: "HashiCorp", Version: "1.0"}, + {Name: "Terraform", Version: version.String()}, + }, + } + + sess, err := awsbase.GetSession(cfg) + if err != nil { + return err + } + + b.dynClient = dynamodb.New(sess.Copy(&aws.Config{ + Endpoint: aws.String(data.Get("dynamodb_endpoint").(string)), + })) + b.s3Client = s3.New(sess.Copy(&aws.Config{ + Endpoint: aws.String(data.Get("endpoint").(string)), + S3ForcePathStyle: aws.Bool(data.Get("force_path_style").(bool)), + })) + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go new file mode 100644 index 00000000..b9fe4d0c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/backend_state.go @@ -0,0 +1,218 @@ +package s3 + +import ( + "errors" + "fmt" + "path" + "sort" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/s3" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" +) + +func (b *Backend) Workspaces() ([]string, error) { + prefix := "" + + if b.workspaceKeyPrefix != "" { + prefix = b.workspaceKeyPrefix + "/" + } + + params := &s3.ListObjectsInput{ + Bucket: &b.bucketName, + Prefix: aws.String(prefix), + } + + resp, err := b.s3Client.ListObjects(params) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == s3.ErrCodeNoSuchBucket { + return nil, fmt.Errorf(errS3NoSuchBucket, err) + } + return nil, err + } + + wss := []string{backend.DefaultStateName} + for _, obj := range resp.Contents { + ws := b.keyEnv(*obj.Key) + if ws != "" { + wss = append(wss, ws) + } + } + + sort.Strings(wss[1:]) + return wss, nil +} + +func (b *Backend) keyEnv(key string) string { + prefix := b.workspaceKeyPrefix + + if prefix == "" { + parts := strings.SplitN(key, "/", 2) + if len(parts) > 1 && parts[1] == b.keyName { + return parts[0] + } else { + return "" + } + } + + // add a slash to treat this as a directory + prefix += "/" + + parts := strings.SplitAfterN(key, prefix, 2) + if len(parts) < 2 { + return "" + } + + // shouldn't happen since we listed by prefix + if parts[0] != prefix { + return "" + } + + parts = strings.SplitN(parts[1], "/", 2) + + if len(parts) < 2 { + return "" + } + + // not our key, so don't include it in our listing + if parts[1] != b.keyName { + return "" + } + + return parts[0] +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client, err := b.remoteClient(name) + if err != nil { + return err + } + + return client.Delete() +} + +// get a remote client configured for this state +func (b *Backend) remoteClient(name string) (*RemoteClient, error) { + if name == "" { + return nil, errors.New("missing state name") + } + + client := &RemoteClient{ + s3Client: b.s3Client, + dynClient: b.dynClient, + bucketName: b.bucketName, + path: b.path(name), + serverSideEncryption: b.serverSideEncryption, + acl: b.acl, + kmsKeyID: b.kmsKeyID, + ddbTable: b.ddbTable, + } + + return client, nil +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + client, err := b.remoteClient(name) + if err != nil { + return nil, err + } + + stateMgr := &remote.State{Client: client} + // Check to see if this state already exists. + // If we're trying to force-unlock a state, we can't take the lock before + // fetching the state. If the state doesn't exist, we have to assume this + // is a normal create operation, and take the lock at that point. + // + // If we need to force-unlock, but for some reason the state no longer + // exists, the user will have to use aws tools to manually fix the + // situation. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + + // We need to create the object so it's listed by States. + if !exists { + // take a lock on this state while we write it + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := client.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock s3 state: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + return parent + } + + // Grab the value + // This is to ensure that no one beat us to writing a state between + // the `exists` check and taking the lock. + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + + } + + return stateMgr, nil +} + +func (b *Backend) client() *RemoteClient { + return &RemoteClient{} +} + +func (b *Backend) path(name string) string { + if name == backend.DefaultStateName { + return b.keyName + } + + return path.Join(b.workspaceKeyPrefix, name, b.keyName) +} + +const errStateUnlock = ` +Error unlocking S3 state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go new file mode 100644 index 00000000..12500183 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/s3/client.go @@ -0,0 +1,402 @@ +package s3 + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/dynamodb" + "github.com/aws/aws-sdk-go/service/s3" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +// Store the last saved serial in dynamo with this suffix for consistency checks. +const ( + stateIDSuffix = "-md5" + s3ErrCodeInternalError = "InternalError" +) + +type RemoteClient struct { + s3Client *s3.S3 + dynClient *dynamodb.DynamoDB + bucketName string + path string + serverSideEncryption bool + acl string + kmsKeyID string + ddbTable string +} + +var ( + // The amount of time we will retry a state waiting for it to match the + // expected checksum. + consistencyRetryTimeout = 10 * time.Second + + // delay when polling the state + consistencyRetryPollInterval = 2 * time.Second +) + +// test hook called when checksums don't match +var testChecksumHook func() + +func (c *RemoteClient) Get() (payload *remote.Payload, err error) { + deadline := time.Now().Add(consistencyRetryTimeout) + + // If we have a checksum, and the returned payload doesn't match, we retry + // up until deadline. + for { + payload, err = c.get() + if err != nil { + return nil, err + } + + // If the remote state was manually removed the payload will be nil, + // but if there's still a digest entry for that state we will still try + // to compare the MD5 below. + var digest []byte + if payload != nil { + digest = payload.MD5 + } + + // verify that this state is what we expect + if expected, err := c.getMD5(); err != nil { + log.Printf("[WARN] failed to fetch state md5: %s", err) + } else if len(expected) > 0 && !bytes.Equal(expected, digest) { + log.Printf("[WARN] state md5 mismatch: expected '%x', got '%x'", expected, digest) + + if testChecksumHook != nil { + testChecksumHook() + } + + if time.Now().Before(deadline) { + time.Sleep(consistencyRetryPollInterval) + log.Println("[INFO] retrying S3 RemoteClient.Get...") + continue + } + + return nil, fmt.Errorf(errBadChecksumFmt, digest) + } + + break + } + + return payload, err +} + +func (c *RemoteClient) get() (*remote.Payload, error) { + var output *s3.GetObjectOutput + var err error + + output, err = c.s3Client.GetObject(&s3.GetObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + }) + + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + switch awserr.Code() { + case s3.ErrCodeNoSuchBucket: + return nil, fmt.Errorf(errS3NoSuchBucket, err) + case s3.ErrCodeNoSuchKey: + return nil, nil + } + } + return nil, err + } + + defer output.Body.Close() + + buf := bytes.NewBuffer(nil) + if _, err := io.Copy(buf, output.Body); err != nil { + return nil, fmt.Errorf("Failed to read remote state: %s", err) + } + + sum := md5.Sum(buf.Bytes()) + payload := &remote.Payload{ + Data: buf.Bytes(), + MD5: sum[:], + } + + // If there was no data, then return nil + if len(payload.Data) == 0 { + return nil, nil + } + + return payload, nil +} + +func (c *RemoteClient) Put(data []byte) error { + contentType := "application/json" + contentLength := int64(len(data)) + + i := &s3.PutObjectInput{ + ContentType: &contentType, + ContentLength: &contentLength, + Body: bytes.NewReader(data), + Bucket: &c.bucketName, + Key: &c.path, + } + + if c.serverSideEncryption { + if c.kmsKeyID != "" { + i.SSEKMSKeyId = &c.kmsKeyID + i.ServerSideEncryption = aws.String("aws:kms") + } else { + i.ServerSideEncryption = aws.String("AES256") + } + } + + if c.acl != "" { + i.ACL = aws.String(c.acl) + } + + log.Printf("[DEBUG] Uploading remote state to S3: %#v", i) + + _, err := c.s3Client.PutObject(i) + if err != nil { + return fmt.Errorf("failed to upload state: %s", err) + } + + sum := md5.Sum(data) + if err := c.putMD5(sum[:]); err != nil { + // if this errors out, we unfortunately have to error out altogether, + // since the next Get will inevitably fail. + return fmt.Errorf("failed to store state MD5: %s", err) + + } + + return nil +} + +func (c *RemoteClient) Delete() error { + _, err := c.s3Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: &c.bucketName, + Key: &c.path, + }) + + if err != nil { + return err + } + + if err := c.deleteMD5(); err != nil { + log.Printf("error deleting state md5: %s", err) + } + + return nil +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + if c.ddbTable == "" { + return "", nil + } + + info.Path = c.lockPath() + + if info.ID == "" { + lockID, err := uuid.GenerateUUID() + if err != nil { + return "", err + } + + info.ID = lockID + } + + putParams := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + "Info": {S: aws.String(string(info.Marshal()))}, + }, + TableName: aws.String(c.ddbTable), + ConditionExpression: aws.String("attribute_not_exists(LockID)"), + } + _, err := c.dynClient.PutItem(putParams) + + if err != nil { + lockInfo, infoErr := c.getLockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + lockErr := &state.LockError{ + Err: err, + Info: lockInfo, + } + return "", lockErr + } + + return info.ID, nil +} + +func (c *RemoteClient) getMD5() ([]byte, error) { + if c.ddbTable == "" { + return nil, nil + } + + getParams := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + }, + ProjectionExpression: aws.String("LockID, Digest"), + TableName: aws.String(c.ddbTable), + ConsistentRead: aws.Bool(true), + } + + resp, err := c.dynClient.GetItem(getParams) + if err != nil { + return nil, err + } + + var val string + if v, ok := resp.Item["Digest"]; ok && v.S != nil { + val = *v.S + } + + sum, err := hex.DecodeString(val) + if err != nil || len(sum) != md5.Size { + return nil, errors.New("invalid md5") + } + + return sum, nil +} + +// store the hash of the state to that clients can check for stale state files. +func (c *RemoteClient) putMD5(sum []byte) error { + if c.ddbTable == "" { + return nil + } + + if len(sum) != md5.Size { + return errors.New("invalid payload md5") + } + + putParams := &dynamodb.PutItemInput{ + Item: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + "Digest": {S: aws.String(hex.EncodeToString(sum))}, + }, + TableName: aws.String(c.ddbTable), + } + _, err := c.dynClient.PutItem(putParams) + if err != nil { + log.Printf("[WARN] failed to record state serial in dynamodb: %s", err) + } + + return nil +} + +// remove the hash value for a deleted state +func (c *RemoteClient) deleteMD5() error { + if c.ddbTable == "" { + return nil + } + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath() + stateIDSuffix)}, + }, + TableName: aws.String(c.ddbTable), + } + if _, err := c.dynClient.DeleteItem(params); err != nil { + return err + } + return nil +} + +func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) { + getParams := &dynamodb.GetItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + }, + ProjectionExpression: aws.String("LockID, Info"), + TableName: aws.String(c.ddbTable), + ConsistentRead: aws.Bool(true), + } + + resp, err := c.dynClient.GetItem(getParams) + if err != nil { + return nil, err + } + + var infoData string + if v, ok := resp.Item["Info"]; ok && v.S != nil { + infoData = *v.S + } + + lockInfo := &state.LockInfo{} + err = json.Unmarshal([]byte(infoData), lockInfo) + if err != nil { + return nil, err + } + + return lockInfo, nil +} + +func (c *RemoteClient) Unlock(id string) error { + if c.ddbTable == "" { + return nil + } + + lockErr := &state.LockError{} + + // TODO: store the path and lock ID in separate fields, and have proper + // projection expression only delete the lock if both match, rather than + // checking the ID from the info field first. + lockInfo, err := c.getLockInfo() + if err != nil { + lockErr.Err = fmt.Errorf("failed to retrieve lock info: %s", err) + return lockErr + } + lockErr.Info = lockInfo + + if lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock id %q does not match existing lock", id) + return lockErr + } + + params := &dynamodb.DeleteItemInput{ + Key: map[string]*dynamodb.AttributeValue{ + "LockID": {S: aws.String(c.lockPath())}, + }, + TableName: aws.String(c.ddbTable), + } + _, err = c.dynClient.DeleteItem(params) + + if err != nil { + lockErr.Err = err + return lockErr + } + return nil +} + +func (c *RemoteClient) lockPath() string { + return fmt.Sprintf("%s/%s", c.bucketName, c.path) +} + +const errBadChecksumFmt = `state data in S3 does not have the expected content. + +This may be caused by unusually long delays in S3 processing a previous state +update. Please wait for a minute or two and try again. If this problem +persists, and neither S3 nor DynamoDB are experiencing an outage, you may need +to manually verify the remote state and update the Digest value stored in the +DynamoDB table to the following value: %x +` + +const errS3NoSuchBucket = `S3 bucket does not exist. + +The referenced S3 bucket must have been previously created. If the S3 bucket +was created within the last minute, please wait for a minute or two and try +again. + +Error: %s +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go new file mode 100644 index 00000000..b2dd7c21 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend.go @@ -0,0 +1,436 @@ +package swift + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/helper/schema" + tf_openstack "github.com/terraform-providers/terraform-provider-openstack/openstack" +) + +// New creates a new backend for Swift remote state. +func New() backend.Backend { + s := &schema.Backend{ + Schema: map[string]*schema.Schema{ + "auth_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OS_AUTH_URL", ""), + Description: descriptions["auth_url"], + }, + + "user_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USER_ID", ""), + Description: descriptions["user_name"], + }, + + "user_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USERNAME", ""), + Description: descriptions["user_name"], + }, + + "application_credential_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_APPLICATION_CREDENTIAL_ID", ""), + Description: descriptions["application_credential_id"], + }, + + "application_credential_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_APPLICATION_CREDENTIAL_NAME", ""), + Description: descriptions["application_credential_name"], + }, + + "application_credential_secret": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_APPLICATION_CREDENTIAL_SECRET", ""), + Description: descriptions["application_credential_secret"], + }, + + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_ID", + "OS_PROJECT_ID", + }, ""), + Description: descriptions["tenant_id"], + }, + + "tenant_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TENANT_NAME", + "OS_PROJECT_NAME", + }, ""), + Description: descriptions["tenant_name"], + }, + + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Sensitive: true, + DefaultFunc: schema.EnvDefaultFunc("OS_PASSWORD", ""), + Description: descriptions["password"], + }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "OS_TOKEN", + "OS_AUTH_TOKEN", + }, ""), + Description: descriptions["token"], + }, + + "user_domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USER_DOMAIN_NAME", ""), + Description: descriptions["user_domain_name"], + }, + + "user_domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_USER_DOMAIN_ID", ""), + Description: descriptions["user_domain_id"], + }, + + "project_domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_PROJECT_DOMAIN_NAME", ""), + Description: descriptions["project_domain_name"], + }, + + "project_domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_PROJECT_DOMAIN_ID", ""), + Description: descriptions["project_domain_id"], + }, + + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_DOMAIN_ID", ""), + Description: descriptions["domain_id"], + }, + + "domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_DOMAIN_NAME", ""), + Description: descriptions["domain_name"], + }, + + "default_domain": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_DEFAULT_DOMAIN", "default"), + Description: descriptions["default_domain"], + }, + + "cloud": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CLOUD", ""), + Description: descriptions["cloud"], + }, + + "region_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("OS_REGION_NAME", ""), + Description: descriptions["region_name"], + }, + + "insecure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_INSECURE", nil), + Description: descriptions["insecure"], + }, + + "endpoint_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_ENDPOINT_TYPE", ""), + }, + + "cacert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CACERT", ""), + Description: descriptions["cacert_file"], + }, + + "cert": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_CERT", ""), + Description: descriptions["cert"], + }, + + "key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("OS_KEY", ""), + Description: descriptions["key"], + }, + + "path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["path"], + Deprecated: "Use container instead", + ConflictsWith: []string{"container"}, + }, + + "container": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["container"], + }, + + "archive_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["archive_path"], + Deprecated: "Use archive_container instead", + ConflictsWith: []string{"archive_container"}, + }, + + "archive_container": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["archive_container"], + }, + + "expire_after": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["expire_after"], + }, + + "lock": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Description: "Lock state access", + Default: true, + }, + + "state_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Description: descriptions["state_name"], + Default: "tfstate.tf", + }, + }, + } + + result := &Backend{Backend: s} + result.Backend.ConfigureFunc = result.configure + return result +} + +var descriptions map[string]string + +func init() { + descriptions = map[string]string{ + "auth_url": "The Identity authentication URL.", + + "user_name": "Username to login with.", + + "user_id": "User ID to login with.", + + "application_credential_id": "Application Credential ID to login with.", + + "application_credential_name": "Application Credential name to login with.", + + "application_credential_secret": "Application Credential secret to login with.", + + "tenant_id": "The ID of the Tenant (Identity v2) or Project (Identity v3)\n" + + "to login with.", + + "tenant_name": "The name of the Tenant (Identity v2) or Project (Identity v3)\n" + + "to login with.", + + "password": "Password to login with.", + + "token": "Authentication token to use as an alternative to username/password.", + + "user_domain_name": "The name of the domain where the user resides (Identity v3).", + + "user_domain_id": "The ID of the domain where the user resides (Identity v3).", + + "project_domain_name": "The name of the domain where the project resides (Identity v3).", + + "project_domain_id": "The ID of the domain where the proejct resides (Identity v3).", + + "domain_id": "The ID of the Domain to scope to (Identity v3).", + + "domain_name": "The name of the Domain to scope to (Identity v3).", + + "default_domain": "The name of the Domain ID to scope to if no other domain is specified. Defaults to `default` (Identity v3).", + + "cloud": "An entry in a `clouds.yaml` file to use.", + + "region_name": "The name of the Region to use.", + + "insecure": "Trust self-signed certificates.", + + "cacert_file": "A Custom CA certificate.", + + "endpoint_type": "The catalog endpoint type to use.", + + "cert": "A client certificate to authenticate with.", + + "key": "A client private key to authenticate with.", + + "path": "Swift container path to use.", + + "container": "Swift container to create", + + "archive_path": "Swift container path to archive state to.", + + "archive_container": "Swift container to archive state to.", + + "expire_after": "Archive object expiry duration.", + + "state_name": "Name of state object in container", + } +} + +type Backend struct { + *schema.Backend + + // Fields below are set from configure + client *gophercloud.ServiceClient + archive bool + archiveContainer string + expireSecs int + container string + lock bool + stateName string +} + +func (b *Backend) configure(ctx context.Context) error { + if b.client != nil { + return nil + } + + // Grab the resource data + data := schema.FromContextBackendConfig(ctx) + config := &tf_openstack.Config{ + CACertFile: data.Get("cacert_file").(string), + ClientCertFile: data.Get("cert").(string), + ClientKeyFile: data.Get("key").(string), + Cloud: data.Get("cloud").(string), + DefaultDomain: data.Get("default_domain").(string), + DomainID: data.Get("domain_id").(string), + DomainName: data.Get("domain_name").(string), + EndpointType: data.Get("endpoint_type").(string), + IdentityEndpoint: data.Get("auth_url").(string), + Password: data.Get("password").(string), + ProjectDomainID: data.Get("project_domain_id").(string), + ProjectDomainName: data.Get("project_domain_name").(string), + Token: data.Get("token").(string), + TenantID: data.Get("tenant_id").(string), + TenantName: data.Get("tenant_name").(string), + UserDomainID: data.Get("user_domain_id").(string), + UserDomainName: data.Get("user_domain_name").(string), + Username: data.Get("user_name").(string), + UserID: data.Get("user_id").(string), + ApplicationCredentialID: data.Get("application_credential_id").(string), + ApplicationCredentialName: data.Get("application_credential_name").(string), + ApplicationCredentialSecret: data.Get("application_credential_secret").(string), + } + + if v, ok := data.GetOkExists("insecure"); ok { + insecure := v.(bool) + config.Insecure = &insecure + } + + if err := config.LoadAndValidate(); err != nil { + return err + } + + // Assign state name + b.stateName = data.Get("state_name").(string) + + // Assign Container + b.container = data.Get("container").(string) + if b.container == "" { + // Check deprecated field + b.container = data.Get("path").(string) + } + + // Store the lock information + b.lock = data.Get("lock").(bool) + + // Enable object archiving? + if archiveContainer, ok := data.GetOk("archive_container"); ok { + log.Printf("[DEBUG] Archive_container set, enabling object versioning") + b.archive = true + b.archiveContainer = archiveContainer.(string) + } else if archivePath, ok := data.GetOk("archive_path"); ok { + log.Printf("[DEBUG] Archive_path set, enabling object versioning") + b.archive = true + b.archiveContainer = archivePath.(string) + } + + // Enable object expiry? + if expireRaw, ok := data.GetOk("expire_after"); ok { + expire := expireRaw.(string) + log.Printf("[DEBUG] Requested that remote state expires after %s", expire) + + if strings.HasSuffix(expire, "d") { + log.Printf("[DEBUG] Got a days expire after duration. Converting to hours") + days, err := strconv.Atoi(expire[:len(expire)-1]) + if err != nil { + return fmt.Errorf("Error converting expire_after value %s to int: %s", expire, err) + } + + expire = fmt.Sprintf("%dh", days*24) + log.Printf("[DEBUG] Expire after %s hours", expire) + } + + expireDur, err := time.ParseDuration(expire) + if err != nil { + log.Printf("[DEBUG] Error parsing duration %s: %s", expire, err) + return fmt.Errorf("Error parsing expire_after duration '%s': %s", expire, err) + } + log.Printf("[DEBUG] Seconds duration = %d", int(expireDur.Seconds())) + b.expireSecs = int(expireDur.Seconds()) + } + + objClient, err := openstack.NewObjectStorageV1(config.OsClient, gophercloud.EndpointOpts{ + Region: data.Get("region_name").(string), + }) + if err != nil { + return err + } + + b.client = objClient + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go new file mode 100644 index 00000000..13df8838 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/backend_state.go @@ -0,0 +1,208 @@ +package swift + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states" +) + +const ( + objectEnvPrefix = "env-" + delimiter = "/" +) + +func (b *Backend) Workspaces() ([]string, error) { + client := &RemoteClient{ + client: b.client, + container: b.container, + archive: b.archive, + archiveContainer: b.archiveContainer, + expireSecs: b.expireSecs, + lockState: b.lock, + } + + // List our container objects + objectNames, err := client.ListObjectsNames(objectEnvPrefix, delimiter) + + if err != nil { + return nil, err + } + + // Find the envs, we use a map since we can get duplicates with + // path suffixes. + envs := map[string]struct{}{} + for _, object := range objectNames { + object = strings.TrimPrefix(object, objectEnvPrefix) + object = strings.TrimSuffix(object, delimiter) + + // Ignore objects that still contain a "/" + // as we dont store states in subdirectories + if idx := strings.Index(object, delimiter); idx >= 0 { + continue + } + + // swift is eventually consistent, thus a deleted object may + // be listed in objectList. To ensure consistency, we query + // each object with a "newest" arg set to true + payload, err := client.get(b.objectName(object)) + if err != nil { + return nil, err + } + if payload == nil { + // object doesn't exist anymore. skipping. + continue + } + + envs[object] = struct{}{} + } + + result := make([]string, 1, len(envs)+1) + result[0] = backend.DefaultStateName + + for k, _ := range envs { + result = append(result, k) + } + + return result, nil +} + +func (b *Backend) DeleteWorkspace(name string) error { + if name == backend.DefaultStateName || name == "" { + return fmt.Errorf("can't delete default state") + } + + client := &RemoteClient{ + client: b.client, + container: b.container, + archive: b.archive, + archiveContainer: b.archiveContainer, + expireSecs: b.expireSecs, + objectName: b.objectName(name), + lockState: b.lock, + } + + // Delete our object + err := client.Delete() + + return err +} + +func (b *Backend) StateMgr(name string) (state.State, error) { + if name == "" { + return nil, fmt.Errorf("missing state name") + } + + client := &RemoteClient{ + client: b.client, + container: b.container, + archive: b.archive, + archiveContainer: b.archiveContainer, + expireSecs: b.expireSecs, + objectName: b.objectName(name), + lockState: b.lock, + } + + var stateMgr state.State = &remote.State{Client: client} + + // If we're not locking, disable it + if !b.lock { + stateMgr = &state.LockDisabled{Inner: stateMgr} + } + + // Check to see if this state already exists. + // If we're trying to force-unlock a state, we can't take the lock before + // fetching the state. If the state doesn't exist, we have to assume this + // is a normal create operation, and take the lock at that point. + // + // If we need to force-unlock, but for some reason the state no longer + // exists, the user will have to use openstack tools to manually fix the + // situation. + existing, err := b.Workspaces() + if err != nil { + return nil, err + } + + exists := false + for _, s := range existing { + if s == name { + exists = true + break + } + } + + // We need to create the object so it's listed by States. + if !exists { + // the default state always exists + if name == backend.DefaultStateName { + return stateMgr, nil + } + + // Grab a lock, we use this to write an empty state if one doesn't + // exist already. We have to write an empty state as a sentinel value + // so States() knows it exists. + lockInfo := state.NewLockInfo() + lockInfo.Operation = "init" + lockId, err := stateMgr.Lock(lockInfo) + if err != nil { + return nil, fmt.Errorf("failed to lock state in Swift: %s", err) + } + + // Local helper function so we can call it multiple places + lockUnlock := func(parent error) error { + if err := stateMgr.Unlock(lockId); err != nil { + return fmt.Errorf(strings.TrimSpace(errStateUnlock), lockId, err) + } + + return parent + } + + // Grab the value + if err := stateMgr.RefreshState(); err != nil { + err = lockUnlock(err) + return nil, err + } + + // If we have no state, we have to create an empty state + if v := stateMgr.State(); v == nil { + if err := stateMgr.WriteState(states.NewState()); err != nil { + err = lockUnlock(err) + return nil, err + } + if err := stateMgr.PersistState(); err != nil { + err = lockUnlock(err) + return nil, err + } + } + + // Unlock, the state should now be initialized + if err := lockUnlock(nil); err != nil { + return nil, err + } + } + + return stateMgr, nil +} + +func (b *Backend) objectName(name string) string { + if name != backend.DefaultStateName { + name = fmt.Sprintf("%s%s/%s", objectEnvPrefix, name, b.stateName) + } else { + name = b.stateName + } + + return name +} + +const errStateUnlock = ` +Error unlocking Swift state. Lock ID: %s + +Error: %s + +You may have to force-unlock this state in order to use it again. +The Swift backend acquires a lock during initialization to ensure +the minimum required keys are prepared. +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go new file mode 100644 index 00000000..b0083d43 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote-state/swift/client.go @@ -0,0 +1,535 @@ +package swift + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/json" + "fmt" + "log" + "sync" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + "github.com/gophercloud/gophercloud/pagination" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" +) + +const ( + consistencyTimeout = 15 + + // Suffix that will be appended to state file paths + // when locking + lockSuffix = ".lock" + + // The TTL associated with this lock. + lockTTL = 60 * time.Second + + // The Interval associated with this lock periodic renew. + lockRenewInterval = 30 * time.Second + + // The amount of time we will retry to delete a container waiting for + // the objects to be deleted. + deleteRetryTimeout = 60 * time.Second + + // delay when polling the objects + deleteRetryPollInterval = 5 * time.Second +) + +// RemoteClient implements the Client interface for an Openstack Swift server. +// Implements "state/remote".ClientLocker +type RemoteClient struct { + client *gophercloud.ServiceClient + container string + archive bool + archiveContainer string + expireSecs int + objectName string + + mu sync.Mutex + // lockState is true if we're using locks + lockState bool + + info *state.LockInfo + + // lockCancel cancels the Context use for lockRenewPeriodic, and is + // called when unlocking, or before creating a new lock if the lock is + // lost. + lockCancel context.CancelFunc +} + +func (c *RemoteClient) ListObjectsNames(prefix string, delim string) ([]string, error) { + if err := c.ensureContainerExists(); err != nil { + return nil, err + } + + // List our raw path + listOpts := objects.ListOpts{ + Full: false, + Prefix: prefix, + Delimiter: delim, + } + + result := []string{} + pager := objects.List(c.client, c.container, listOpts) + // Define an anonymous function to be executed on each page's iteration + err := pager.EachPage(func(page pagination.Page) (bool, error) { + objectList, err := objects.ExtractNames(page) + if err != nil { + return false, fmt.Errorf("Error extracting names from objects from page %+v", err) + } + for _, object := range objectList { + result = append(result, object) + } + return true, nil + }) + + if err != nil { + return nil, err + } + + return result, nil + +} + +func (c *RemoteClient) Get() (*remote.Payload, error) { + payload, err := c.get(c.objectName) + + // 404 response is to be expected if the object doesn't already exist! + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Println("[DEBUG] Object doesn't exist to download.") + return nil, nil + } + + return payload, err +} + +// swift is eventually constistent. Consistency +// is ensured by the Get func which will always try +// to retrieve the most recent object +func (c *RemoteClient) Put(data []byte) error { + if c.expireSecs != 0 { + log.Printf("[DEBUG] ExpireSecs = %d", c.expireSecs) + return c.put(c.objectName, data, c.expireSecs, "") + } + + return c.put(c.objectName, data, -1, "") + +} + +func (c *RemoteClient) Delete() error { + return c.delete(c.objectName) +} + +func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if !c.lockState { + return "", nil + } + + log.Printf("[DEBUG] Acquiring Lock %#v on %s/%s", info, c.container, c.objectName) + + // This check only is to ensure we strictly follow the specification. + // Terraform shouldn't ever re-lock, so provide errors for the possible + // states if this is called. + if c.info != nil { + // we have an active lock already + return "", fmt.Errorf("state %q already locked", c.lockFilePath()) + } + + // update the path we're using + info.Path = c.lockFilePath() + + if err := c.writeLockInfo(info, lockTTL, "*"); err != nil { + return "", err + } + + log.Printf("[DEBUG] Acquired Lock %s on %s", info.ID, c.objectName) + + c.info = info + + ctx, cancel := context.WithCancel(context.Background()) + c.lockCancel = cancel + + // keep the lock renewed + go c.lockRenewPeriodic(ctx, info) + + return info.ID, nil +} + +func (c *RemoteClient) Unlock(id string) error { + c.mu.Lock() + + if !c.lockState { + return nil + } + + defer func() { + // The periodic lock renew is canceled + // the lockCancel func may not be nil in most usecases + // but can typically be nil when using a second client + // to ForceUnlock the state based on the same lock Id + if c.lockCancel != nil { + c.lockCancel() + } + c.info = nil + c.mu.Unlock() + }() + + log.Printf("[DEBUG] Releasing Lock %s on %s", id, c.objectName) + + info, err := c.lockInfo() + if err != nil { + return c.lockError(fmt.Errorf("failed to retrieve lock info: %s", err), nil) + } + + c.info = info + + // conflicting lock + if info.ID != id { + return c.lockError(fmt.Errorf("lock id %q does not match existing lock", id), info) + } + + // before the lock object deletion is ordered, we shall + // stop periodic renew + if c.lockCancel != nil { + c.lockCancel() + } + + if err = c.delete(c.lockFilePath()); err != nil { + return c.lockError(fmt.Errorf("error deleting lock with %q: %s", id, err), info) + } + + // Swift is eventually consistent; we have to wait until + // the lock is effectively deleted to return, or raise + // an error if deadline is reached. + + warning := ` +WARNING: Waiting for lock deletion timed out. +Swift has accepted the deletion order of the lock %s/%s. +But as it is eventually consistent, complete deletion +may happen later. +` + deadline := time.Now().Add(deleteRetryTimeout) + for { + if time.Now().Before(deadline) { + info, err := c.lockInfo() + + // 404 response is to be expected if the lock deletion + // has been processed + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Println("[DEBUG] Lock has been deleted.") + return nil + } + + if err != nil { + return err + } + + // conflicting lock + if info.ID != id { + log.Printf("[DEBUG] Someone else has acquired a lock: %v.", info) + return nil + } + + log.Printf("[DEBUG] Lock is still there, delete again and wait %v.", deleteRetryPollInterval) + c.delete(c.lockFilePath()) + time.Sleep(deleteRetryPollInterval) + continue + } + + return fmt.Errorf(warning, c.container, c.lockFilePath()) + } + +} + +func (c *RemoteClient) get(object string) (*remote.Payload, error) { + log.Printf("[DEBUG] Getting object %s/%s", c.container, object) + result := objects.Download(c.client, c.container, object, objects.DownloadOpts{Newest: true}) + + // Extract any errors from result + _, err := result.Extract() + if err != nil { + return nil, err + } + + bytes, err := result.ExtractContent() + if err != nil { + return nil, err + } + + hash := md5.Sum(bytes) + payload := &remote.Payload{ + Data: bytes, + MD5: hash[:md5.Size], + } + + return payload, nil +} + +func (c *RemoteClient) put(object string, data []byte, deleteAfter int, ifNoneMatch string) error { + log.Printf("[DEBUG] Writing object in %s/%s", c.container, object) + if err := c.ensureContainerExists(); err != nil { + return err + } + + contentType := "application/json" + contentLength := int64(len(data)) + + createOpts := objects.CreateOpts{ + Content: bytes.NewReader(data), + ContentType: contentType, + ContentLength: int64(contentLength), + } + + if deleteAfter >= 0 { + createOpts.DeleteAfter = deleteAfter + } + + if ifNoneMatch != "" { + createOpts.IfNoneMatch = ifNoneMatch + } + + result := objects.Create(c.client, c.container, object, createOpts) + if result.Err != nil { + return result.Err + } + + return nil +} + +func (c *RemoteClient) deleteContainer() error { + log.Printf("[DEBUG] Deleting container %s", c.container) + + warning := ` +WARNING: Waiting for container %s deletion timed out. +It may have been left in your Openstack account and may incur storage charges. +error was: %s +` + + deadline := time.Now().Add(deleteRetryTimeout) + + // Swift is eventually consistent; we have to retry until + // all objects are effectively deleted to delete the container + // If we still have objects in the container, or raise + // an error if deadline is reached + for { + if time.Now().Before(deadline) { + // Remove any objects + c.cleanObjects() + + // Delete the container + log.Printf("[DEBUG] Deleting container %s", c.container) + deleteResult := containers.Delete(c.client, c.container) + if deleteResult.Err != nil { + // container is not found, thus has been deleted + if _, ok := deleteResult.Err.(gophercloud.ErrDefault404); ok { + return nil + } + + // 409 http error is raised when deleting a container with + // remaining objects + if respErr, ok := deleteResult.Err.(gophercloud.ErrUnexpectedResponseCode); ok && respErr.Actual == 409 { + time.Sleep(deleteRetryPollInterval) + log.Printf("[DEBUG] Remaining objects, failed to delete container, retrying...") + continue + } + + return fmt.Errorf(warning, deleteResult.Err) + } + return nil + } + + return fmt.Errorf(warning, c.container, "timeout reached") + } + +} + +// Helper function to delete Swift objects within a container +func (c *RemoteClient) cleanObjects() error { + // Get a slice of object names + objectNames, err := c.objectNames(c.container) + if err != nil { + return err + } + + for _, object := range objectNames { + log.Printf("[DEBUG] Deleting object %s from container %s", object, c.container) + result := objects.Delete(c.client, c.container, object, nil) + if result.Err == nil { + continue + } + + // if object is not found, it has already been deleted + if _, ok := result.Err.(gophercloud.ErrDefault404); !ok { + return fmt.Errorf("Error deleting object %s from container %s: %v", object, c.container, result.Err) + } + } + return nil + +} + +func (c *RemoteClient) delete(object string) error { + log.Printf("[DEBUG] Deleting object %s/%s", c.container, object) + + result := objects.Delete(c.client, c.container, object, nil) + + if result.Err != nil { + return result.Err + } + return nil +} + +func (c *RemoteClient) writeLockInfo(info *state.LockInfo, deleteAfter time.Duration, ifNoneMatch string) error { + err := c.put(c.lockFilePath(), info.Marshal(), int(deleteAfter.Seconds()), ifNoneMatch) + + if httpErr, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok && httpErr.Actual == 412 { + log.Printf("[DEBUG] Couldn't write lock %s. One already exists.", info.ID) + info2, err2 := c.lockInfo() + if err2 != nil { + return fmt.Errorf("Couldn't read lock info: %v", err2) + } + + return c.lockError(err, info2) + } + + if err != nil { + return c.lockError(err, nil) + } + + return nil +} + +func (c *RemoteClient) lockError(err error, conflictingLock *state.LockInfo) *state.LockError { + lockErr := &state.LockError{ + Err: err, + Info: conflictingLock, + } + + return lockErr +} + +// lockInfo reads the lock file, parses its contents and returns the parsed +// LockInfo struct. +func (c *RemoteClient) lockInfo() (*state.LockInfo, error) { + raw, err := c.get(c.lockFilePath()) + if err != nil { + return nil, err + } + + info := &state.LockInfo{} + + if err := json.Unmarshal(raw.Data, info); err != nil { + return nil, err + } + + return info, nil +} + +func (c *RemoteClient) lockRenewPeriodic(ctx context.Context, info *state.LockInfo) error { + log.Printf("[DEBUG] Renew lock %v", info) + + waitDur := lockRenewInterval + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > lockTTL { + return lastErr + } + select { + case <-time.After(waitDur): + c.mu.Lock() + // Unlock may have released the mu.Lock + // in which case we shouldn't renew the lock + select { + case <-ctx.Done(): + log.Printf("[DEBUG] Stopping Periodic renew of lock %v", info) + return nil + default: + } + + info2, err := c.lockInfo() + if _, ok := err.(gophercloud.ErrDefault404); ok { + log.Println("[DEBUG] Lock has expired trying to reacquire.") + err = nil + } + + if err == nil && (info2 == nil || info.ID == info2.ID) { + info2 = info + log.Printf("[DEBUG] Renewing lock %v.", info) + err = c.writeLockInfo(info, lockTTL, "") + } + + c.mu.Unlock() + + if err != nil { + log.Printf("[ERROR] could not reacquire lock (%v): %s", info, err) + waitDur = time.Second + lastErr = err + continue + } + + // conflicting lock + if info2.ID != info.ID { + return c.lockError(fmt.Errorf("lock id %q does not match existing lock %q", info.ID, info2.ID), info2) + } + + waitDur = lockRenewInterval + lastRenewTime = time.Now() + + case <-ctx.Done(): + log.Printf("[DEBUG] Stopping Periodic renew of lock %s", info.ID) + return nil + } + } +} + +func (c *RemoteClient) lockFilePath() string { + return c.objectName + lockSuffix +} + +func (c *RemoteClient) ensureContainerExists() error { + containerOpts := &containers.CreateOpts{} + + if c.archive { + log.Printf("[DEBUG] Creating archive container %s", c.archiveContainer) + result := containers.Create(c.client, c.archiveContainer, nil) + if result.Err != nil { + log.Printf("[DEBUG] Error creating archive container %s: %s", c.archiveContainer, result.Err) + return result.Err + } + + log.Printf("[DEBUG] Enabling Versioning on container %s", c.container) + containerOpts.VersionsLocation = c.archiveContainer + } + + log.Printf("[DEBUG] Creating container %s", c.container) + result := containers.Create(c.client, c.container, containerOpts) + if result.Err != nil { + return result.Err + } + + return nil +} + +// Helper function to get a list of objects in a Swift container +func (c *RemoteClient) objectNames(container string) (objectNames []string, err error) { + _ = objects.List(c.client, container, nil).EachPage(func(page pagination.Page) (bool, error) { + // Get a slice of object names + names, err := objects.ExtractNames(page) + if err != nil { + return false, fmt.Errorf("Error extracting object names from page: %s", err) + } + for _, object := range names { + objectNames = append(objectNames, object) + } + + return true, nil + }) + return +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/backend.go b/vendor/github.com/hashicorp/terraform/backend/remote/backend.go new file mode 100644 index 00000000..fc4040d3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/backend.go @@ -0,0 +1,920 @@ +package remote + +import ( + "context" + "fmt" + "log" + "net/http" + "net/url" + "os" + "sort" + "strings" + "sync" + "time" + + tfe "github.com/hashicorp/go-tfe" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" + "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" +) + +const ( + defaultHostname = "app.terraform.io" + defaultParallelism = 10 + stateServiceID = "state.v2" + tfeServiceID = "tfe.v2.1" +) + +// Remote is an implementation of EnhancedBackend that performs all +// operations in a remote backend. +type Remote struct { + // CLI and Colorize control the CLI output. If CLI is nil then no CLI + // output will be done. If CLIColor is nil then no coloring will be done. + CLI cli.Ui + CLIColor *colorstring.Colorize + + // ShowDiagnostics prints diagnostic messages to the UI. + ShowDiagnostics func(vals ...interface{}) + + // ContextOpts are the base context options to set when initializing a + // new Terraform context. Many of these will be overridden or merged by + // Operation. See Operation for more details. + ContextOpts *terraform.ContextOpts + + // client is the remote backend API client. + client *tfe.Client + + // lastRetry is set to the last time a request was retried. + lastRetry time.Time + + // hostname of the remote backend server. + hostname string + + // organization is the organization that contains the target workspaces. + organization string + + // workspace is used to map the default workspace to a remote workspace. + workspace string + + // prefix is used to filter down a set of workspaces that use a single + // configuration. + prefix string + + // services is used for service discovery + services *disco.Disco + + // local, if non-nil, will be used for all enhanced behavior. This + // allows local behavior with the remote backend functioning as remote + // state storage backend. + local backend.Enhanced + + // forceLocal, if true, will force the use of the local backend. + forceLocal bool + + // opLock locks operations + opLock sync.Mutex +} + +var _ backend.Backend = (*Remote)(nil) + +// New creates a new initialized remote backend. +func New(services *disco.Disco) *Remote { + return &Remote{ + services: services, + } +} + +// ConfigSchema implements backend.Enhanced. +func (b *Remote) ConfigSchema() *configschema.Block { + return &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "hostname": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["hostname"], + }, + "organization": { + Type: cty.String, + Required: true, + Description: schemaDescriptions["organization"], + }, + "token": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["token"], + }, + }, + + BlockTypes: map[string]*configschema.NestedBlock{ + "workspaces": { + Block: configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "name": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["name"], + }, + "prefix": { + Type: cty.String, + Optional: true, + Description: schemaDescriptions["prefix"], + }, + }, + }, + Nesting: configschema.NestingSingle, + }, + }, + } +} + +// PrepareConfig implements backend.Backend. +func (b *Remote) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if val := obj.GetAttr("organization"); val.IsNull() || val.AsString() == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid organization value", + `The "organization" attribute value must not be empty.`, + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + } + + var name, prefix string + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + name = val.AsString() + } + if val := workspaces.GetAttr("prefix"); !val.IsNull() { + prefix = val.AsString() + } + } + + // Make sure that we have either a workspace name or a prefix. + if name == "" && prefix == "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + `Either workspace "name" or "prefix" is required.`, + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + )) + } + + // Make sure that only one of workspace name or a prefix is configured. + if name != "" && prefix != "" { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid workspaces configuration", + `Only one of workspace "name" or "prefix" is allowed.`, + cty.Path{cty.GetAttrStep{Name: "workspaces"}}, + )) + } + + return obj, diags +} + +// Configure implements backend.Enhanced. +func (b *Remote) Configure(obj cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Get the hostname. + if val := obj.GetAttr("hostname"); !val.IsNull() && val.AsString() != "" { + b.hostname = val.AsString() + } else { + b.hostname = defaultHostname + } + + // Get the organization. + if val := obj.GetAttr("organization"); !val.IsNull() { + b.organization = val.AsString() + } + + // Get the workspaces configuration block and retrieve the + // default workspace name and prefix. + if workspaces := obj.GetAttr("workspaces"); !workspaces.IsNull() { + if val := workspaces.GetAttr("name"); !val.IsNull() { + b.workspace = val.AsString() + } + if val := workspaces.GetAttr("prefix"); !val.IsNull() { + b.prefix = val.AsString() + } + } + + // Determine if we are forced to use the local backend. + b.forceLocal = os.Getenv("TF_FORCE_LOCAL_BACKEND") != "" + + serviceID := tfeServiceID + if b.forceLocal { + serviceID = stateServiceID + } + + // Discover the service URL for this host to confirm that it provides + // a remote backend API and to get the version constraints. + service, constraints, err := b.discover(serviceID) + + // First check any contraints we might have received. + if constraints != nil { + diags = diags.Append(b.checkConstraints(constraints)) + if diags.HasErrors() { + return diags + } + } + + // When we don't have any constraints errors, also check for discovery + // errors before we continue. + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + + // Retrieve the token for this host as configured in the credentials + // section of the CLI Config File. + token, err := b.token() + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + strings.ToUpper(err.Error()[:1])+err.Error()[1:], + "", // no description is needed here, the error is clear + cty.Path{cty.GetAttrStep{Name: "hostname"}}, + )) + return diags + } + + // Get the token from the config if no token was configured for this + // host in credentials section of the CLI Config File. + if token == "" { + if val := obj.GetAttr("token"); !val.IsNull() { + token = val.AsString() + } + } + + // Return an error if we still don't have a token at this point. + if token == "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required token could not be found", + fmt.Sprintf( + "Make sure you configured a credentials block for %s in your CLI Config File.", + b.hostname, + ), + )) + return diags + } + + cfg := &tfe.Config{ + Address: service.String(), + BasePath: service.Path, + Token: token, + Headers: make(http.Header), + RetryLogHook: b.retryLogHook, + } + + // Set the version header to the current version. + cfg.Headers.Set(tfversion.Header, tfversion.Version) + + // Create the remote backend API client. + b.client, err = tfe.NewClient(cfg) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create the Terraform Enterprise client", + fmt.Sprintf( + `The "remote" backend encountered an unexpected error while creating the `+ + `Terraform Enterprise client: %s.`, err, + ), + )) + return diags + } + + // Check if the organization exists by reading its entitlements. + entitlements, err := b.client.Organizations.Entitlements(context.Background(), b.organization) + if err != nil { + if err == tfe.ErrResourceNotFound { + err = fmt.Errorf("organization %s does not exist", b.organization) + } + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Failed to read organization entitlements", + fmt.Sprintf( + `The "remote" backend encountered an unexpected error while reading the `+ + `organization settings: %s.`, err, + ), + cty.Path{cty.GetAttrStep{Name: "organization"}}, + )) + return diags + } + + // Configure a local backend for when we need to run operations locally. + b.local = backendLocal.NewWithBackend(b) + b.forceLocal = b.forceLocal || !entitlements.Operations + + // Enable retries for server errors as the backend is now fully configured. + b.client.RetryServerErrors(true) + + return diags +} + +// discover the remote backend API service URL and version constraints. +func (b *Remote) discover(serviceID string) (*url.URL, *disco.Constraints, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return nil, nil, err + } + + host, err := b.services.Discover(hostname) + if err != nil { + return nil, nil, err + } + + service, err := host.ServiceURL(serviceID) + // Return the error, unless its a disco.ErrVersionNotSupported error. + if _, ok := err.(*disco.ErrVersionNotSupported); !ok && err != nil { + return nil, nil, err + } + + // We purposefully ignore the error and return the previous error, as + // checking for version constraints is considered optional. + constraints, _ := host.VersionConstraints(serviceID, "terraform") + + return service, constraints, err +} + +// checkConstraints checks service version constrains against our own +// version and returns rich and informational diagnostics in case any +// incompatibilities are detected. +func (b *Remote) checkConstraints(c *disco.Constraints) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if c == nil || c.Minimum == "" || c.Maximum == "" { + return diags + } + + // Generate a parsable constraints string. + excluding := "" + if len(c.Excluding) > 0 { + excluding = fmt.Sprintf(", != %s", strings.Join(c.Excluding, ", != ")) + } + constStr := fmt.Sprintf(">= %s%s, <= %s", c.Minimum, excluding, c.Maximum) + + // Create the constraints to check against. + constraints, err := version.NewConstraint(constStr) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + // Create the version to check. + v, err := version.NewVersion(tfversion.Version) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + // Return if we satisfy all constraints. + if constraints.Check(v) { + return diags + } + + // Find out what action (upgrade/downgrade) we should advice. + minimum, err := version.NewVersion(c.Minimum) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + maximum, err := version.NewVersion(c.Maximum) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + + var excludes []*version.Version + for _, exclude := range c.Excluding { + v, err := version.NewVersion(exclude) + if err != nil { + return diags.Append(checkConstraintsWarning(err)) + } + excludes = append(excludes, v) + } + + // Sort all the excludes. + sort.Sort(version.Collection(excludes)) + + var action, toVersion string + switch { + case minimum.GreaterThan(v): + action = "upgrade" + toVersion = ">= " + minimum.String() + case maximum.LessThan(v): + action = "downgrade" + toVersion = "<= " + maximum.String() + case len(excludes) > 0: + // Get the latest excluded version. + action = "upgrade" + toVersion = "> " + excludes[len(excludes)-1].String() + } + + switch { + case len(excludes) == 1: + excluding = fmt.Sprintf(", excluding version %s", excludes[0].String()) + case len(excludes) > 1: + var vs []string + for _, v := range excludes { + vs = append(vs, v.String()) + } + excluding = fmt.Sprintf(", excluding versions %s", strings.Join(vs, ", ")) + default: + excluding = "" + } + + summary := fmt.Sprintf("Incompatible Terraform version v%s", v.String()) + details := fmt.Sprintf( + "The configured Terraform Enterprise backend is compatible with Terraform "+ + "versions >= %s, <= %s%s.", c.Minimum, c.Maximum, excluding, + ) + + if action != "" && toVersion != "" { + summary = fmt.Sprintf("Please %s Terraform to %s", action, toVersion) + details += fmt.Sprintf(" Please %s to a supported version and try again.", action) + } + + // Return the customized and informational error message. + return diags.Append(tfdiags.Sourceless(tfdiags.Error, summary, details)) +} + +// token returns the token for this host as configured in the credentials +// section of the CLI Config File. If no token was configured, an empty +// string will be returned instead. +func (b *Remote) token() (string, error) { + hostname, err := svchost.ForComparison(b.hostname) + if err != nil { + return "", err + } + creds, err := b.services.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", b.hostname, err) + return "", nil + } + if creds != nil { + return creds.Token(), nil + } + return "", nil +} + +// retryLogHook is invoked each time a request is retried allowing the +// backend to log any connection issues to prevent data loss. +func (b *Remote) retryLogHook(attemptNum int, resp *http.Response) { + if b.CLI != nil { + // Ignore the first retry to make sure any delayed output will + // be written to the console before we start logging retries. + // + // The retry logic in the TFE client will retry both rate limited + // requests and server errors, but in the remote backend we only + // care about server errors so we ignore rate limit (429) errors. + if attemptNum == 0 || resp.StatusCode == 429 { + // Reset the last retry time. + b.lastRetry = time.Now() + return + } + + if attemptNum == 1 { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(initialRetryError))) + } else { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace( + fmt.Sprintf(repeatedRetryError, time.Since(b.lastRetry).Round(time.Second))))) + } + } +} + +// Workspaces implements backend.Enhanced. +func (b *Remote) Workspaces() ([]string, error) { + if b.prefix == "" { + return nil, backend.ErrWorkspacesNotSupported + } + return b.workspaces() +} + +// workspaces returns a filtered list of remote workspace names. +func (b *Remote) workspaces() ([]string, error) { + options := tfe.WorkspaceListOptions{} + switch { + case b.workspace != "": + options.Search = tfe.String(b.workspace) + case b.prefix != "": + options.Search = tfe.String(b.prefix) + } + + // Create a slice to contain all the names. + var names []string + + for { + wl, err := b.client.Workspaces.List(context.Background(), b.organization, options) + if err != nil { + return nil, err + } + + for _, w := range wl.Items { + if b.workspace != "" && w.Name == b.workspace { + names = append(names, backend.DefaultStateName) + continue + } + if b.prefix != "" && strings.HasPrefix(w.Name, b.prefix) { + names = append(names, strings.TrimPrefix(w.Name, b.prefix)) + } + } + + // Exit the loop when we've seen all pages. + if wl.CurrentPage >= wl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = wl.NextPage + } + + // Sort the result so we have consistent output. + sort.StringSlice(names).Sort() + + return names, nil +} + +// DeleteWorkspace implements backend.Enhanced. +func (b *Remote) DeleteWorkspace(name string) error { + if b.workspace == "" && name == backend.DefaultStateName { + return backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { + return backend.ErrWorkspacesNotSupported + } + + // Configure the remote workspace name. + switch { + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name + } + + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: &tfe.Workspace{ + Name: name, + }, + } + + return client.Delete() +} + +// StateMgr implements backend.Enhanced. +func (b *Remote) StateMgr(name string) (state.State, error) { + if b.workspace == "" && name == backend.DefaultStateName { + return nil, backend.ErrDefaultWorkspaceNotSupported + } + if b.prefix == "" && name != backend.DefaultStateName { + return nil, backend.ErrWorkspacesNotSupported + } + + // Configure the remote workspace name. + switch { + case name == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(name, b.prefix): + name = b.prefix + name + } + + workspace, err := b.client.Workspaces.Read(context.Background(), b.organization, name) + if err != nil && err != tfe.ErrResourceNotFound { + return nil, fmt.Errorf("Failed to retrieve workspace %s: %v", name, err) + } + + if err == tfe.ErrResourceNotFound { + options := tfe.WorkspaceCreateOptions{ + Name: tfe.String(name), + } + + // We only set the Terraform Version for the new workspace if this is + // a release candidate or a final release. + if tfversion.Prerelease == "" || strings.HasPrefix(tfversion.Prerelease, "rc") { + options.TerraformVersion = tfe.String(tfversion.String()) + } + + workspace, err = b.client.Workspaces.Create(context.Background(), b.organization, options) + if err != nil { + return nil, fmt.Errorf("Error creating workspace %s: %v", name, err) + } + } + + client := &remoteClient{ + client: b.client, + organization: b.organization, + workspace: workspace, + + // This is optionally set during Terraform Enterprise runs. + runID: os.Getenv("TFE_RUN_ID"), + } + + return &remote.State{Client: client}, nil +} + +// Operation implements backend.Enhanced. +func (b *Remote) Operation(ctx context.Context, op *backend.Operation) (*backend.RunningOperation, error) { + // Get the remote workspace name. + name := op.Workspace + switch { + case op.Workspace == backend.DefaultStateName: + name = b.workspace + case b.prefix != "" && !strings.HasPrefix(op.Workspace, b.prefix): + name = b.prefix + op.Workspace + } + + // Retrieve the workspace for this operation. + w, err := b.client.Workspaces.Read(ctx, b.organization, name) + if err != nil { + switch err { + case context.Canceled: + return nil, err + case tfe.ErrResourceNotFound: + return nil, fmt.Errorf( + "workspace %s not found\n\n"+ + "The configured \"remote\" backend returns '404 Not Found' errors for resources\n"+ + "that do not exist, as well as for resources that a user doesn't have access\n"+ + "to. When the resource does exists, please check the rights for the used token.", + name, + ) + default: + return nil, fmt.Errorf( + "%s\n\n"+ + "The configured \"remote\" backend encountered an unexpected error. Sometimes\n"+ + "this is caused by network connection problems, in which case you could retr\n"+ + "the command. If the issue persists please open a support ticket to get help\n"+ + "resolving the problem.", + err, + ) + } + } + + // Check if we need to use the local backend to run the operation. + if b.forceLocal || !w.Operations { + return b.local.Operation(ctx, op) + } + + // Set the remote workspace name. + op.Workspace = w.Name + + // Determine the function to call for our operation + var f func(context.Context, context.Context, *backend.Operation, *tfe.Workspace) (*tfe.Run, error) + switch op.Type { + case backend.OperationTypePlan: + f = b.opPlan + case backend.OperationTypeApply: + f = b.opApply + default: + return nil, fmt.Errorf( + "\n\nThe \"remote\" backend does not support the %q operation.", op.Type) + } + + // Lock + b.opLock.Lock() + + // Build our running operation + // the runninCtx is only used to block until the operation returns. + runningCtx, done := context.WithCancel(context.Background()) + runningOp := &backend.RunningOperation{ + Context: runningCtx, + PlanEmpty: true, + } + + // stopCtx wraps the context passed in, and is used to signal a graceful Stop. + stopCtx, stop := context.WithCancel(ctx) + runningOp.Stop = stop + + // cancelCtx is used to cancel the operation immediately, usually + // indicating that the process is exiting. + cancelCtx, cancel := context.WithCancel(context.Background()) + runningOp.Cancel = cancel + + // Do it. + go func() { + defer done() + defer stop() + defer cancel() + + defer b.opLock.Unlock() + + r, opErr := f(stopCtx, cancelCtx, op, w) + if opErr != nil && opErr != context.Canceled { + b.ReportResult(runningOp, opErr) + return + } + + if r == nil && opErr == context.Canceled { + runningOp.Result = backend.OperationFailure + return + } + + if r != nil { + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + b.ReportResult(runningOp, generalError("Failed to retrieve run", err)) + return + } + + // Record if there are any changes. + runningOp.PlanEmpty = !r.HasChanges + + if opErr == context.Canceled { + if err := b.cancel(cancelCtx, op, r); err != nil { + b.ReportResult(runningOp, generalError("Failed to retrieve run", err)) + return + } + } + + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + runningOp.Result = backend.OperationFailure + } + } + }() + + // Return the running operation. + return runningOp, nil +} + +func (b *Remote) cancel(cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if r.Actions.IsCancelable { + // Only ask if the remote operation should be canceled + // if the auto approve flag is not set. + if !op.AutoApprove { + v, err := op.UIIn.Input(cancelCtx, &terraform.InputOpts{ + Id: "cancel", + Query: "\nDo you want to cancel the remote operation?", + Description: "Only 'yes' will be accepted to cancel.", + }) + if err != nil { + return generalError("Failed asking to cancel", err) + } + if v != "yes" { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationNotCanceled))) + } + return nil + } + } else { + if b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + } + + // Try to cancel the remote operation. + err := b.client.Runs.Cancel(cancelCtx, r.ID, tfe.RunCancelOptions{}) + if err != nil { + return generalError("Failed to cancel run", err) + } + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(operationCanceled))) + } + } + + return nil +} + +// ReportResult is a helper for the common chore of setting the status of +// a running operation and showing any diagnostics produced during that +// operation. +// +// If the given diagnostics contains errors then the operation's result +// will be set to backend.OperationFailure. It will be set to +// backend.OperationSuccess otherwise. It will then use b.ShowDiagnostics +// to show the given diagnostics before returning. +// +// Callers should feel free to do each of these operations separately in +// more complex cases where e.g. diagnostics are interleaved with other +// output, but terminating immediately after reporting error diagnostics is +// common and can be expressed concisely via this method. +func (b *Remote) ReportResult(op *backend.RunningOperation, err error) { + var diags tfdiags.Diagnostics + + diags = diags.Append(err) + if diags.HasErrors() { + op.Result = backend.OperationFailure + } else { + op.Result = backend.OperationSuccess + } + + if b.ShowDiagnostics != nil { + b.ShowDiagnostics(diags) + } else { + // Shouldn't generally happen, but if it does then we'll at least + // make some noise in the logs to help us spot it. + if len(diags) != 0 { + log.Printf( + "[ERROR] Remote backend needs to report diagnostics but ShowDiagnostics is not set:\n%s", + diags.ErrWithWarnings(), + ) + } + } +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is guaranteed to always return a non-nil value and so useful +// as a helper to wrap any potentially colored strings. +// +// TODO SvH: Rename this back to Colorize as soon as we can pass -no-color. +func (b *Remote) cliColorize() *colorstring.Colorize { + if b.CLIColor != nil { + return b.CLIColor + } + + return &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + } +} + +func generalError(msg string, err error) error { + var diags tfdiags.Diagnostics + + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + + switch err { + case context.Canceled: + return err + case tfe.ErrResourceNotFound: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `The configured "remote" backend returns '404 Not Found' errors for resources `+ + `that do not exist, as well as for resources that a user doesn't have access `+ + `to. If the resource does exists, please check the rights for the used token.`, + )) + return diags.Err() + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("%s: %v", msg, err), + `The configured "remote" backend encountered an unexpected error. Sometimes `+ + `this is caused by network connection problems, in which case you could retry `+ + `the command. If the issue persists please open a support ticket to get help `+ + `resolving the problem.`, + )) + return diags.Err() + } +} + +func checkConstraintsWarning(err error) tfdiags.Diagnostic { + return tfdiags.Sourceless( + tfdiags.Warning, + fmt.Sprintf("Failed to check version constraints: %v", err), + "Checking version constraints is considered optional, but this is an"+ + "unexpected error which should be reported.", + ) +} + +// The newline in this error is to make it look good in the CLI! +const initialRetryError = ` +[reset][yellow]There was an error connecting to the remote backend. Please do not exit +Terraform to prevent data loss! Trying to restore the connection... +[reset] +` + +const repeatedRetryError = ` +[reset][yellow]Still trying to restore the connection... (%s elapsed)[reset] +` + +const operationCanceled = ` +[reset][red]The remote operation was successfully cancelled.[reset] +` + +const operationNotCanceled = ` +[reset][red]The remote operation was not cancelled.[reset] +` + +var schemaDescriptions = map[string]string{ + "hostname": "The remote backend hostname to connect to (defaults to app.terraform.io).", + "organization": "The name of the organization containing the targeted workspace(s).", + "token": "The token used to authenticate with the remote backend. If credentials for the\n" + + "host are configured in the CLI Config File, then those will be used instead.", + "name": "A workspace name used to map the default workspace to a named remote workspace.\n" + + "When configured only the default workspace can be used. This option conflicts\n" + + "with \"prefix\"", + "prefix": "A prefix used to filter workspaces using a single configuration. New workspaces\n" + + "will automatically be prefixed with this prefix. If omitted only the default\n" + + "workspace can be used. This option conflicts with \"name\"", +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/backend_apply.go b/vendor/github.com/hashicorp/terraform/backend/remote/backend_apply.go new file mode 100644 index 00000000..6e187fc3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/backend_apply.go @@ -0,0 +1,249 @@ +package remote + +import ( + "bufio" + "context" + "fmt" + "io" + "log" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] backend/remote: starting Apply operation") + + var diags tfdiags.Diagnostics + + // We should remove the `CanUpdate` part of this test, but for now + // (to remain compatible with tfe.v2.1) we'll leave it in here. + if !w.Permissions.CanUpdate && !w.Permissions.CanQueueApply { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to apply changes", + "The provided credentials have insufficient rights to apply changes. In order "+ + "to apply changes at least write permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if w.VCSRepo != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Apply not allowed for workspaces with a VCS connection", + "A workspace that is connected to a VCS requires the VCS-driven workflow "+ + "to ensure that the VCS remains the single source of truth.", + )) + return nil, diags.Err() + } + + if op.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `The "remote" backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Applying a saved plan is currently not supported", + `The "remote" backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if !op.PlanRefresh { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Applying without refresh is currently not supported", + `Currently the "remote" backend will always do an in-memory refresh of `+ + `the Terraform state prior to generating the plan.`, + )) + } + + if op.Targets != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource targeting is currently not supported", + `The "remote" backend does not support resource targeting at this time.`, + )) + } + + variables, parseDiags := b.parseVariableValues(op) + diags = diags.Append(parseDiags) + + if len(variables) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Run variables are currently not supported", + fmt.Sprintf( + "The \"remote\" backend does not support setting run variables at this time. "+ + "Currently the only to way to pass variables to the remote backend is by "+ + "creating a '*.auto.tfvars' variables file. This file will automatically "+ + "be loaded by the \"remote\" backend when the workspace is configured to use "+ + "Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+ + "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", + b.hostname, b.organization, op.Workspace, + ), + )) + } + + if !op.HasConfig() && !op.Destroy { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Apply requires configuration to be present. Applying without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run 'terraform destroy' which `+ + `does not require any configuration files.`, + )) + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + // Run the plan phase. + r, err := b.plan(stopCtx, cancelCtx, op, w) + if err != nil { + return r, err + } + + // This check is also performed in the plan method to determine if + // the policies should be checked, but we need to check the values + // here again to determine if we are done and should return. + if !r.HasChanges || r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return r, nil + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run cannot be confirmed. + if !w.AutoApply && !r.Actions.IsConfirmable { + return r, nil + } + + // Since we already checked the permissions before creating the run + // this should never happen. But it doesn't hurt to keep this in as + // a safeguard for any unexpected situations. + if !w.AutoApply && !r.Permissions.CanApply { + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + if op.Destroy { + return r, generalError("Failed to discard destroy", err) + } + return r, generalError("Failed to discard apply", err) + } + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to approve the pending changes", + fmt.Sprintf("There are pending changes, but the provided credentials have "+ + "insufficient rights to approve them. The run will be discarded to prevent "+ + "it from blocking the queue waiting for external approval. To queue a run "+ + "that can be approved by someone else, please use the 'Queue Plan' button in "+ + "the web UI:\nhttps://%s/app/%s/%s/runs", b.hostname, b.organization, op.Workspace), + )) + return r, diags.Err() + } + + mustConfirm := (op.UIIn != nil && op.UIOut != nil) && + ((op.Destroy && (!op.DestroyForce && !op.AutoApprove)) || (!op.Destroy && !op.AutoApprove)) + + if !w.AutoApply { + if mustConfirm { + opts := &terraform.InputOpts{Id: "approve"} + + if op.Destroy { + opts.Query = "\nDo you really want to destroy all resources in workspace \"" + op.Workspace + "\"?" + opts.Description = "Terraform will destroy all your managed infrastructure, as shown above.\n" + + "There is no undo. Only 'yes' will be accepted to confirm." + } else { + opts.Query = "\nDo you want to perform these actions in workspace \"" + op.Workspace + "\"?" + opts.Description = "Terraform will perform the actions described above.\n" + + "Only 'yes' will be accepted to approve." + } + + err = b.confirm(stopCtx, op, opts, r, "yes") + if err != nil && err != errRunApproved { + return r, err + } + } + + if err != errRunApproved { + if err = b.client.Runs.Apply(stopCtx, r.ID, tfe.RunApplyOptions{}); err != nil { + return r, generalError("Failed to approve the apply command", err) + } + } + } + + // If we don't need to ask for confirmation, insert a blank + // line to separate the ouputs. + if w.AutoApply || !mustConfirm { + if b.CLI != nil { + b.CLI.Output("") + } + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "apply", r, w) + if err != nil { + return r, err + } + + logs, err := b.client.Applies.Logs(stopCtx, r.Apply.ID) + if err != nil { + return r, generalError("Failed to retrieve logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + if b.CLI != nil { + skip := 0 + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return r, generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + // Skip the first 3 lines to prevent duplicate output. + if skip < 3 { + skip++ + continue + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + return r, nil +} + +const applyDefaultHeader = ` +[reset][yellow]Running apply in the remote backend. Output will stream here. Pressing Ctrl-C +will cancel the remote apply if it's still pending. If the apply started it +will stop streaming the logs, but will not stop the apply running remotely.[reset] + +Preparing the remote apply... +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/backend_common.go b/vendor/github.com/hashicorp/terraform/backend/remote/backend_common.go new file mode 100644 index 00000000..4e64830f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/backend_common.go @@ -0,0 +1,438 @@ +package remote + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "math" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +var ( + errApplyDiscarded = errors.New("Apply discarded.") + errDestroyDiscarded = errors.New("Destroy discarded.") + errRunApproved = errors.New("approved using the UI or API") + errRunDiscarded = errors.New("discarded using the UI or API") + errRunOverridden = errors.New("overridden using the UI or API") +) + +// backoff will perform exponential backoff based on the iteration and +// limited by the provided min and max (in milliseconds) durations. +func backoff(min, max float64, iter int) time.Duration { + backoff := math.Pow(2, float64(iter)/5) * min + if backoff > max { + backoff = max + } + return time.Duration(backoff) * time.Millisecond +} + +func (b *Remote) waitForRun(stopCtx, cancelCtx context.Context, op *backend.Operation, opType string, r *tfe.Run, w *tfe.Workspace) (*tfe.Run, error) { + started := time.Now() + updated := started + for i := 0; ; i++ { + select { + case <-stopCtx.Done(): + return r, stopCtx.Err() + case <-cancelCtx.Done(): + return r, cancelCtx.Err() + case <-time.After(backoff(1000, 3000, i)): + // Timer up, show status + } + + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run is no longer pending. + if r.Status != tfe.RunPending && r.Status != tfe.RunConfirmed { + if i == 0 && opType == "plan" && b.CLI != nil { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf("Waiting for the %s to start...\n", opType))) + } + if i > 0 && b.CLI != nil { + // Insert a blank line to separate the ouputs. + b.CLI.Output("") + } + return r, nil + } + + // Check if 30 seconds have passed since the last update. + current := time.Now() + if b.CLI != nil && (i == 0 || current.Sub(updated).Seconds() > 30) { + updated = current + position := 0 + elapsed := "" + + // Calculate and set the elapsed time. + if i > 0 { + elapsed = fmt.Sprintf( + " (%s elapsed)", current.Sub(started).Truncate(30*time.Second)) + } + + // Retrieve the workspace used to run this operation in. + w, err = b.client.Workspaces.Read(stopCtx, b.organization, w.Name) + if err != nil { + return nil, generalError("Failed to retrieve workspace", err) + } + + // If the workspace is locked the run will not be queued and we can + // update the status without making any expensive calls. + if w.Locked && w.CurrentRun != nil { + cr, err := b.client.Runs.Read(stopCtx, w.CurrentRun.ID) + if err != nil { + return r, generalError("Failed to retrieve current run", err) + } + if cr.Status == tfe.RunPending { + b.CLI.Output(b.Colorize().Color( + "Waiting for the manually locked workspace to be unlocked..." + elapsed)) + continue + } + } + + // Skip checking the workspace queue when we are the current run. + if w.CurrentRun == nil || w.CurrentRun.ID != r.ID { + found := false + options := tfe.RunListOptions{} + runlist: + for { + rl, err := b.client.Runs.List(stopCtx, w.ID, options) + if err != nil { + return r, generalError("Failed to retrieve run list", err) + } + + // Loop through all runs to calculate the workspace queue position. + for _, item := range rl.Items { + if !found { + if r.ID == item.ID { + found = true + } + continue + } + + // If the run is in a final state, ignore it and continue. + switch item.Status { + case tfe.RunApplied, tfe.RunCanceled, tfe.RunDiscarded, tfe.RunErrored: + continue + case tfe.RunPlanned: + if op.Type == backend.OperationTypePlan { + continue + } + } + + // Increase the workspace queue position. + position++ + + // Stop searching when we reached the current run. + if w.CurrentRun != nil && w.CurrentRun.ID == item.ID { + break runlist + } + } + + // Exit the loop when we've seen all pages. + if rl.CurrentPage >= rl.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rl.NextPage + } + + if position > 0 { + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d run(s) to finish before being queued...%s", + position, + elapsed, + ))) + continue + } + } + + options := tfe.RunQueueOptions{} + search: + for { + rq, err := b.client.Organizations.RunQueue(stopCtx, b.organization, options) + if err != nil { + return r, generalError("Failed to retrieve queue", err) + } + + // Search through all queued items to find our run. + for _, item := range rq.Items { + if r.ID == item.ID { + position = item.PositionInQueue + break search + } + } + + // Exit the loop when we've seen all pages. + if rq.CurrentPage >= rq.TotalPages { + break + } + + // Update the page number to get the next page. + options.PageNumber = rq.NextPage + } + + if position > 0 { + c, err := b.client.Organizations.Capacity(stopCtx, b.organization) + if err != nil { + return r, generalError("Failed to retrieve capacity", err) + } + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for %d queued run(s) to finish before starting...%s", + position-c.Running, + elapsed, + ))) + continue + } + + b.CLI.Output(b.Colorize().Color(fmt.Sprintf( + "Waiting for the %s to start...%s", opType, elapsed))) + } + } +} + +func (b *Remote) parseVariableValues(op *backend.Operation) (terraform.InputValues, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + result := make(terraform.InputValues) + + // Load the configuration using the caller-provided configuration loader. + config, _, configDiags := op.ConfigLoader.LoadConfigWithSnapshot(op.ConfigDir) + diags = diags.Append(configDiags) + if diags.HasErrors() { + return nil, diags + } + + variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, diags + } + + // Save only the explicitly defined variables. + for k, v := range variables { + switch v.SourceType { + case terraform.ValueFromCLIArg, terraform.ValueFromNamedFile: + result[k] = v + } + } + + return result, diags +} + +func (b *Remote) checkPolicy(stopCtx, cancelCtx context.Context, op *backend.Operation, r *tfe.Run) error { + if b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------\n") + } + for i, pc := range r.PolicyChecks { + logs, err := b.client.PolicyChecks.Logs(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + // Retrieve the policy check to get its current status. + pc, err := b.client.PolicyChecks.Read(stopCtx, pc.ID) + if err != nil { + return generalError("Failed to retrieve policy check", err) + } + + var msgPrefix string + switch pc.Scope { + case tfe.PolicyScopeOrganization: + msgPrefix = "Organization policy check" + case tfe.PolicyScopeWorkspace: + msgPrefix = "Workspace policy check" + default: + msgPrefix = fmt.Sprintf("Unknown policy check (%s)", pc.Scope) + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(msgPrefix + ":\n")) + } + + if b.CLI != nil { + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + switch pc.Status { + case tfe.PolicyPasses: + if (r.HasChanges && op.Type == backend.OperationTypeApply || i < len(r.PolicyChecks)-1) && b.CLI != nil { + b.CLI.Output("\n------------------------------------------------------------------------") + } + continue + case tfe.PolicyErrored: + return fmt.Errorf(msgPrefix + " errored.") + case tfe.PolicyHardFailed: + return fmt.Errorf(msgPrefix + " hard failed.") + case tfe.PolicySoftFailed: + if op.Type == backend.OperationTypePlan || op.UIOut == nil || op.UIIn == nil || + op.AutoApprove || !pc.Actions.IsOverridable || !pc.Permissions.CanOverride { + return fmt.Errorf(msgPrefix + " soft failed.") + } + default: + return fmt.Errorf("Unknown or unexpected policy state: %s", pc.Status) + } + + opts := &terraform.InputOpts{ + Id: "override", + Query: "\nDo you want to override the soft failed policy check?", + Description: "Only 'override' will be accepted to override.", + } + + err = b.confirm(stopCtx, op, opts, r, "override") + if err != nil && err != errRunOverridden { + return err + } + + if err != errRunOverridden { + if _, err = b.client.PolicyChecks.Override(stopCtx, pc.ID); err != nil { + return generalError("Failed to override policy check", err) + } + } + + if b.CLI != nil { + b.CLI.Output("------------------------------------------------------------------------") + } + } + + return nil +} + +func (b *Remote) confirm(stopCtx context.Context, op *backend.Operation, opts *terraform.InputOpts, r *tfe.Run, keyword string) error { + doneCtx, cancel := context.WithCancel(stopCtx) + result := make(chan error, 2) + + go func() { + // Make sure we cancel doneCtx before we return + // so the input command is also canceled. + defer cancel() + + for { + select { + case <-doneCtx.Done(): + return + case <-stopCtx.Done(): + return + case <-time.After(3 * time.Second): + // Retrieve the run again to get its current status. + r, err := b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + result <- generalError("Failed to retrieve run", err) + return + } + + switch keyword { + case "override": + if r.Status != tfe.RunPolicyOverride { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunOverridden + } + } + case "yes": + if !r.Actions.IsConfirmable { + if r.Status == tfe.RunDiscarded { + err = errRunDiscarded + } else { + err = errRunApproved + } + } + } + + if err != nil { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color( + fmt.Sprintf("[reset][yellow]%s[reset]", err.Error()))) + } + + if err == errRunDiscarded { + if op.Destroy { + err = errDestroyDiscarded + } + err = errApplyDiscarded + } + + result <- err + return + } + } + } + }() + + result <- func() error { + v, err := op.UIIn.Input(doneCtx, opts) + if err != nil && err != context.Canceled && stopCtx.Err() != context.Canceled { + return fmt.Errorf("Error asking %s: %v", opts.Id, err) + } + + // We return the error of our parent channel as we don't + // care about the error of the doneCtx which is only used + // within this function. So if the doneCtx was canceled + // because stopCtx was canceled, this will properly return + // a context.Canceled error and otherwise it returns nil. + if doneCtx.Err() == context.Canceled || stopCtx.Err() == context.Canceled { + return stopCtx.Err() + } + + // Make sure we cancel the context here so the loop that + // checks for external changes to the run is ended before + // we start to make changes ourselves. + cancel() + + if v != keyword { + // Retrieve the run again to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return generalError("Failed to retrieve run", err) + } + + // Make sure we discard the run if possible. + if r.Actions.IsDiscardable { + err = b.client.Runs.Discard(stopCtx, r.ID, tfe.RunDiscardOptions{}) + if err != nil { + if op.Destroy { + return generalError("Failed to discard destroy", err) + } + return generalError("Failed to discard apply", err) + } + } + + // Even if the run was discarded successfully, we still + // return an error as the apply command was canceled. + if op.Destroy { + return errDestroyDiscarded + } + return errApplyDiscarded + } + + return nil + }() + + return <-result +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/backend_context.go b/vendor/github.com/hashicorp/terraform/backend/remote/backend_context.go new file mode 100644 index 00000000..d548a4ee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/backend_context.go @@ -0,0 +1,121 @@ +package remote + +import ( + "context" + "log" + "strings" + + "github.com/hashicorp/errwrap" + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// Context implements backend.Enhanced. +func (b *Remote) Context(op *backend.Operation) (*terraform.Context, statemgr.Full, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if op.LockState { + op.StateLocker = clistate.NewLocker(context.Background(), op.StateLockTimeout, b.CLI, b.cliColorize()) + } else { + op.StateLocker = clistate.NewNoopLocker() + } + + // Get the remote workspace name. + workspace := op.Workspace + switch { + case op.Workspace == backend.DefaultStateName: + workspace = b.workspace + case b.prefix != "" && !strings.HasPrefix(op.Workspace, b.prefix): + workspace = b.prefix + op.Workspace + } + + // Get the latest state. + log.Printf("[TRACE] backend/remote: requesting state manager for workspace %q", workspace) + stateMgr, err := b.StateMgr(op.Workspace) + if err != nil { + diags = diags.Append(errwrap.Wrapf("Error loading state: {{err}}", err)) + return nil, nil, diags + } + + log.Printf("[TRACE] backend/remote: requesting state lock for workspace %q", workspace) + if err := op.StateLocker.Lock(stateMgr, op.Type.String()); err != nil { + diags = diags.Append(errwrap.Wrapf("Error locking state: {{err}}", err)) + return nil, nil, diags + } + + log.Printf("[TRACE] backend/remote: reading remote state for workspace %q", workspace) + if err := stateMgr.RefreshState(); err != nil { + diags = diags.Append(errwrap.Wrapf("Error loading state: {{err}}", err)) + return nil, nil, diags + } + + // Initialize our context options + var opts terraform.ContextOpts + if v := b.ContextOpts; v != nil { + opts = *v + } + + // Copy set options from the operation + opts.Destroy = op.Destroy + opts.Targets = op.Targets + opts.UIInput = op.UIIn + + // Load the latest state. If we enter contextFromPlanFile below then the + // state snapshot in the plan file must match this, or else it'll return + // error diagnostics. + log.Printf("[TRACE] backend/remote: retrieving remote state snapshot for workspace %q", workspace) + opts.State = stateMgr.State() + + log.Printf("[TRACE] backend/remote: loading configuration for the current working directory") + config, configDiags := op.ConfigLoader.LoadConfig(op.ConfigDir) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, nil, diags + } + opts.Config = config + + log.Printf("[TRACE] backend/remote: retrieving variables from workspace %q", workspace) + tfeVariables, err := b.client.Variables.List(context.Background(), tfe.VariableListOptions{ + Organization: tfe.String(b.organization), + Workspace: tfe.String(workspace), + }) + if err != nil && err != tfe.ErrResourceNotFound { + diags = diags.Append(errwrap.Wrapf("Error loading variables: {{err}}", err)) + return nil, nil, diags + } + + if tfeVariables != nil { + if op.Variables == nil { + op.Variables = make(map[string]backend.UnparsedVariableValue) + } + for _, v := range tfeVariables.Items { + if v.Sensitive { + v.Value = "" + } + op.Variables[v.Key] = &unparsedVariableValue{ + value: v.Value, + source: terraform.ValueFromEnvVar, + } + } + } + + if op.Variables != nil { + variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables) + diags = diags.Append(varDiags) + if diags.HasErrors() { + return nil, nil, diags + } + opts.Variables = variables + } + + tfCtx, ctxDiags := terraform.NewContext(&opts) + diags = diags.Append(ctxDiags) + + log.Printf("[TRACE] backend/remote: finished building terraform.Context") + + return tfCtx, stateMgr, diags +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/backend_mock.go b/vendor/github.com/hashicorp/terraform/backend/remote/backend_mock.go new file mode 100644 index 00000000..8985dd0e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/backend_mock.go @@ -0,0 +1,1049 @@ +package remote + +import ( + "bytes" + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/terraform" +) + +type mockClient struct { + Applies *mockApplies + ConfigurationVersions *mockConfigurationVersions + Organizations *mockOrganizations + Plans *mockPlans + PolicyChecks *mockPolicyChecks + Runs *mockRuns + StateVersions *mockStateVersions + Workspaces *mockWorkspaces +} + +func newMockClient() *mockClient { + c := &mockClient{} + c.Applies = newMockApplies(c) + c.ConfigurationVersions = newMockConfigurationVersions(c) + c.Organizations = newMockOrganizations(c) + c.Plans = newMockPlans(c) + c.PolicyChecks = newMockPolicyChecks(c) + c.Runs = newMockRuns(c) + c.StateVersions = newMockStateVersions(c) + c.Workspaces = newMockWorkspaces(c) + return c +} + +type mockApplies struct { + client *mockClient + applies map[string]*tfe.Apply + logs map[string]string +} + +func newMockApplies(client *mockClient) *mockApplies { + return &mockApplies{ + client: client, + applies: make(map[string]*tfe.Apply), + logs: make(map[string]string), + } +} + +// create is a helper function to create a mock apply that uses the configured +// working directory to find the logfile. +func (m *mockApplies) create(cvID, workspaceID string) (*tfe.Apply, error) { + c, ok := m.client.ConfigurationVersions.configVersions[cvID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if c.Speculative { + // Speculative means its plan-only so we don't create a Apply. + return nil, nil + } + + id := generateID("apply-") + url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id) + + a := &tfe.Apply{ + ID: id, + LogReadURL: url, + Status: tfe.ApplyPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if w.AutoApply { + a.Status = tfe.ApplyRunning + } + + m.logs[url] = filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "apply.log", + ) + m.applies[a.ID] = a + + return a, nil +} + +func (m *mockApplies) Read(ctx context.Context, applyID string) (*tfe.Apply, error) { + a, ok := m.applies[applyID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + // Together with the mockLogReader this allows testing queued runs. + if a.Status == tfe.ApplyRunning { + a.Status = tfe.ApplyFinished + } + return a, nil +} + +func (m *mockApplies) Logs(ctx context.Context, applyID string) (io.Reader, error) { + a, err := m.Read(ctx, applyID) + if err != nil { + return nil, err + } + + logfile, ok := m.logs[a.LogReadURL] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := ioutil.ReadFile(logfile) + if err != nil { + return nil, err + } + + done := func() (bool, error) { + a, err := m.Read(ctx, applyID) + if err != nil { + return false, err + } + if a.Status != tfe.ApplyFinished { + return false, nil + } + return true, nil + } + + return &mockLogReader{ + done: done, + logs: bytes.NewBuffer(logs), + }, nil +} + +type mockConfigurationVersions struct { + client *mockClient + configVersions map[string]*tfe.ConfigurationVersion + uploadPaths map[string]string + uploadURLs map[string]*tfe.ConfigurationVersion +} + +func newMockConfigurationVersions(client *mockClient) *mockConfigurationVersions { + return &mockConfigurationVersions{ + client: client, + configVersions: make(map[string]*tfe.ConfigurationVersion), + uploadPaths: make(map[string]string), + uploadURLs: make(map[string]*tfe.ConfigurationVersion), + } +} + +func (m *mockConfigurationVersions) List(ctx context.Context, workspaceID string, options tfe.ConfigurationVersionListOptions) (*tfe.ConfigurationVersionList, error) { + cvl := &tfe.ConfigurationVersionList{} + for _, cv := range m.configVersions { + cvl.Items = append(cvl.Items, cv) + } + + cvl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(cvl.Items), + } + + return cvl, nil +} + +func (m *mockConfigurationVersions) Create(ctx context.Context, workspaceID string, options tfe.ConfigurationVersionCreateOptions) (*tfe.ConfigurationVersion, error) { + id := generateID("cv-") + url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id) + + cv := &tfe.ConfigurationVersion{ + ID: id, + Status: tfe.ConfigurationPending, + UploadURL: url, + } + + m.configVersions[cv.ID] = cv + m.uploadURLs[url] = cv + + return cv, nil +} + +func (m *mockConfigurationVersions) Read(ctx context.Context, cvID string) (*tfe.ConfigurationVersion, error) { + cv, ok := m.configVersions[cvID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return cv, nil +} + +func (m *mockConfigurationVersions) Upload(ctx context.Context, url, path string) error { + cv, ok := m.uploadURLs[url] + if !ok { + return errors.New("404 not found") + } + m.uploadPaths[cv.ID] = path + cv.Status = tfe.ConfigurationUploaded + return nil +} + +// mockInput is a mock implementation of terraform.UIInput. +type mockInput struct { + answers map[string]string +} + +func (m *mockInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { + v, ok := m.answers[opts.Id] + if !ok { + return "", fmt.Errorf("unexpected input request in test: %s", opts.Id) + } + if v == "wait-for-external-update" { + select { + case <-ctx.Done(): + case <-time.After(time.Minute): + } + } + delete(m.answers, opts.Id) + return v, nil +} + +type mockOrganizations struct { + client *mockClient + organizations map[string]*tfe.Organization +} + +func newMockOrganizations(client *mockClient) *mockOrganizations { + return &mockOrganizations{ + client: client, + organizations: make(map[string]*tfe.Organization), + } +} + +func (m *mockOrganizations) List(ctx context.Context, options tfe.OrganizationListOptions) (*tfe.OrganizationList, error) { + orgl := &tfe.OrganizationList{} + for _, org := range m.organizations { + orgl.Items = append(orgl.Items, org) + } + + orgl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(orgl.Items), + } + + return orgl, nil +} + +// mockLogReader is a mock logreader that enables testing queued runs. +type mockLogReader struct { + done func() (bool, error) + logs *bytes.Buffer +} + +func (m *mockLogReader) Read(l []byte) (int, error) { + for { + if written, err := m.read(l); err != io.ErrNoProgress { + return written, err + } + time.Sleep(500 * time.Millisecond) + } +} + +func (m *mockLogReader) read(l []byte) (int, error) { + done, err := m.done() + if err != nil { + return 0, err + } + if !done { + return 0, io.ErrNoProgress + } + return m.logs.Read(l) +} + +func (m *mockOrganizations) Create(ctx context.Context, options tfe.OrganizationCreateOptions) (*tfe.Organization, error) { + org := &tfe.Organization{Name: *options.Name} + m.organizations[org.Name] = org + return org, nil +} + +func (m *mockOrganizations) Read(ctx context.Context, name string) (*tfe.Organization, error) { + org, ok := m.organizations[name] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return org, nil +} + +func (m *mockOrganizations) Update(ctx context.Context, name string, options tfe.OrganizationUpdateOptions) (*tfe.Organization, error) { + org, ok := m.organizations[name] + if !ok { + return nil, tfe.ErrResourceNotFound + } + org.Name = *options.Name + return org, nil + +} + +func (m *mockOrganizations) Delete(ctx context.Context, name string) error { + delete(m.organizations, name) + return nil +} + +func (m *mockOrganizations) Capacity(ctx context.Context, name string) (*tfe.Capacity, error) { + var pending, running int + for _, r := range m.client.Runs.runs { + if r.Status == tfe.RunPending { + pending++ + continue + } + running++ + } + return &tfe.Capacity{Pending: pending, Running: running}, nil +} + +func (m *mockOrganizations) Entitlements(ctx context.Context, name string) (*tfe.Entitlements, error) { + return &tfe.Entitlements{ + Operations: true, + PrivateModuleRegistry: true, + Sentinel: true, + StateStorage: true, + Teams: true, + VCSIntegrations: true, + }, nil +} + +func (m *mockOrganizations) RunQueue(ctx context.Context, name string, options tfe.RunQueueOptions) (*tfe.RunQueue, error) { + rq := &tfe.RunQueue{} + + for _, r := range m.client.Runs.runs { + rq.Items = append(rq.Items, r) + } + + rq.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(rq.Items), + } + + return rq, nil +} + +type mockPlans struct { + client *mockClient + logs map[string]string + plans map[string]*tfe.Plan +} + +func newMockPlans(client *mockClient) *mockPlans { + return &mockPlans{ + client: client, + logs: make(map[string]string), + plans: make(map[string]*tfe.Plan), + } +} + +// create is a helper function to create a mock plan that uses the configured +// working directory to find the logfile. +func (m *mockPlans) create(cvID, workspaceID string) (*tfe.Plan, error) { + id := generateID("plan-") + url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id) + + p := &tfe.Plan{ + ID: id, + LogReadURL: url, + Status: tfe.PlanPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + m.logs[url] = filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "plan.log", + ) + m.plans[p.ID] = p + + return p, nil +} + +func (m *mockPlans) Read(ctx context.Context, planID string) (*tfe.Plan, error) { + p, ok := m.plans[planID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + // Together with the mockLogReader this allows testing queued runs. + if p.Status == tfe.PlanRunning { + p.Status = tfe.PlanFinished + } + return p, nil +} + +func (m *mockPlans) Logs(ctx context.Context, planID string) (io.Reader, error) { + p, err := m.Read(ctx, planID) + if err != nil { + return nil, err + } + + logfile, ok := m.logs[p.LogReadURL] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := ioutil.ReadFile(logfile) + if err != nil { + return nil, err + } + + done := func() (bool, error) { + p, err := m.Read(ctx, planID) + if err != nil { + return false, err + } + if p.Status != tfe.PlanFinished { + return false, nil + } + return true, nil + } + + return &mockLogReader{ + done: done, + logs: bytes.NewBuffer(logs), + }, nil +} + +type mockPolicyChecks struct { + client *mockClient + checks map[string]*tfe.PolicyCheck + logs map[string]string +} + +func newMockPolicyChecks(client *mockClient) *mockPolicyChecks { + return &mockPolicyChecks{ + client: client, + checks: make(map[string]*tfe.PolicyCheck), + logs: make(map[string]string), + } +} + +// create is a helper function to create a mock policy check that uses the +// configured working directory to find the logfile. +func (m *mockPolicyChecks) create(cvID, workspaceID string) (*tfe.PolicyCheck, error) { + id := generateID("pc-") + + pc := &tfe.PolicyCheck{ + ID: id, + Actions: &tfe.PolicyActions{}, + Permissions: &tfe.PolicyPermissions{}, + Scope: tfe.PolicyScopeOrganization, + Status: tfe.PolicyPending, + } + + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile := filepath.Join( + m.client.ConfigurationVersions.uploadPaths[cvID], + w.WorkingDirectory, + "policy.log", + ) + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return nil, nil + } + + m.logs[pc.ID] = logfile + m.checks[pc.ID] = pc + + return pc, nil +} + +func (m *mockPolicyChecks) List(ctx context.Context, runID string, options tfe.PolicyCheckListOptions) (*tfe.PolicyCheckList, error) { + _, ok := m.client.Runs.runs[runID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + pcl := &tfe.PolicyCheckList{} + for _, pc := range m.checks { + pcl.Items = append(pcl.Items, pc) + } + + pcl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(pcl.Items), + } + + return pcl, nil +} + +func (m *mockPolicyChecks) Read(ctx context.Context, policyCheckID string) (*tfe.PolicyCheck, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile, ok := m.logs[pc.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return nil, fmt.Errorf("logfile does not exist") + } + + logs, err := ioutil.ReadFile(logfile) + if err != nil { + return nil, err + } + + switch { + case bytes.Contains(logs, []byte("Sentinel Result: true")): + pc.Status = tfe.PolicyPasses + case bytes.Contains(logs, []byte("Sentinel Result: false")): + switch { + case bytes.Contains(logs, []byte("hard-mandatory")): + pc.Status = tfe.PolicyHardFailed + case bytes.Contains(logs, []byte("soft-mandatory")): + pc.Actions.IsOverridable = true + pc.Permissions.CanOverride = true + pc.Status = tfe.PolicySoftFailed + } + default: + // As this is an unexpected state, we say the policy errored. + pc.Status = tfe.PolicyErrored + } + + return pc, nil +} + +func (m *mockPolicyChecks) Override(ctx context.Context, policyCheckID string) (*tfe.PolicyCheck, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + pc.Status = tfe.PolicyOverridden + return pc, nil +} + +func (m *mockPolicyChecks) Logs(ctx context.Context, policyCheckID string) (io.Reader, error) { + pc, ok := m.checks[policyCheckID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + logfile, ok := m.logs[pc.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if _, err := os.Stat(logfile); os.IsNotExist(err) { + return bytes.NewBufferString("logfile does not exist"), nil + } + + logs, err := ioutil.ReadFile(logfile) + if err != nil { + return nil, err + } + + switch { + case bytes.Contains(logs, []byte("Sentinel Result: true")): + pc.Status = tfe.PolicyPasses + case bytes.Contains(logs, []byte("Sentinel Result: false")): + switch { + case bytes.Contains(logs, []byte("hard-mandatory")): + pc.Status = tfe.PolicyHardFailed + case bytes.Contains(logs, []byte("soft-mandatory")): + pc.Actions.IsOverridable = true + pc.Permissions.CanOverride = true + pc.Status = tfe.PolicySoftFailed + } + default: + // As this is an unexpected state, we say the policy errored. + pc.Status = tfe.PolicyErrored + } + + return bytes.NewBuffer(logs), nil +} + +type mockRuns struct { + client *mockClient + runs map[string]*tfe.Run + workspaces map[string][]*tfe.Run +} + +func newMockRuns(client *mockClient) *mockRuns { + return &mockRuns{ + client: client, + runs: make(map[string]*tfe.Run), + workspaces: make(map[string][]*tfe.Run), + } +} + +func (m *mockRuns) List(ctx context.Context, workspaceID string, options tfe.RunListOptions) (*tfe.RunList, error) { + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + rl := &tfe.RunList{ + Items: m.workspaces[w.ID], + } + + rl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(rl.Items), + } + + return rl, nil +} + +func (m *mockRuns) Create(ctx context.Context, options tfe.RunCreateOptions) (*tfe.Run, error) { + a, err := m.client.Applies.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + p, err := m.client.Plans.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + pc, err := m.client.PolicyChecks.create(options.ConfigurationVersion.ID, options.Workspace.ID) + if err != nil { + return nil, err + } + + r := &tfe.Run{ + ID: generateID("run-"), + Actions: &tfe.RunActions{IsCancelable: true}, + Apply: a, + HasChanges: false, + Permissions: &tfe.RunPermissions{}, + Plan: p, + Status: tfe.RunPending, + } + + if pc != nil { + r.PolicyChecks = []*tfe.PolicyCheck{pc} + } + + if options.IsDestroy != nil { + r.IsDestroy = *options.IsDestroy + } + + w, ok := m.client.Workspaces.workspaceIDs[options.Workspace.ID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if w.CurrentRun == nil { + w.CurrentRun = r + } + + m.runs[r.ID] = r + m.workspaces[options.Workspace.ID] = append(m.workspaces[options.Workspace.ID], r) + + return r, nil +} + +func (m *mockRuns) Read(ctx context.Context, runID string) (*tfe.Run, error) { + r, ok := m.runs[runID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + pending := false + for _, r := range m.runs { + if r.ID != runID && r.Status == tfe.RunPending { + pending = true + break + } + } + + if !pending && r.Status == tfe.RunPending { + // Only update the status if there are no other pending runs. + r.Status = tfe.RunPlanning + r.Plan.Status = tfe.PlanRunning + } + + logs, _ := ioutil.ReadFile(m.client.Plans.logs[r.Plan.LogReadURL]) + if r.Status == tfe.RunPlanning && r.Plan.Status == tfe.PlanFinished { + if r.IsDestroy || bytes.Contains(logs, []byte("1 to add, 0 to change, 0 to destroy")) { + r.Actions.IsCancelable = false + r.Actions.IsConfirmable = true + r.HasChanges = true + r.Permissions.CanApply = true + } + + if bytes.Contains(logs, []byte("null_resource.foo: 1 error")) { + r.Actions.IsCancelable = false + r.HasChanges = false + r.Status = tfe.RunErrored + } + } + + return r, nil +} + +func (m *mockRuns) Apply(ctx context.Context, runID string, options tfe.RunApplyOptions) error { + r, ok := m.runs[runID] + if !ok { + return tfe.ErrResourceNotFound + } + if r.Status != tfe.RunPending { + // Only update the status if the run is not pending anymore. + r.Status = tfe.RunApplying + r.Actions.IsConfirmable = false + r.Apply.Status = tfe.ApplyRunning + } + return nil +} + +func (m *mockRuns) Cancel(ctx context.Context, runID string, options tfe.RunCancelOptions) error { + panic("not implemented") +} + +func (m *mockRuns) ForceCancel(ctx context.Context, runID string, options tfe.RunForceCancelOptions) error { + panic("not implemented") +} + +func (m *mockRuns) Discard(ctx context.Context, runID string, options tfe.RunDiscardOptions) error { + r, ok := m.runs[runID] + if !ok { + return tfe.ErrResourceNotFound + } + r.Status = tfe.RunDiscarded + r.Actions.IsConfirmable = false + return nil +} + +type mockStateVersions struct { + client *mockClient + states map[string][]byte + stateVersions map[string]*tfe.StateVersion + workspaces map[string][]string +} + +func newMockStateVersions(client *mockClient) *mockStateVersions { + return &mockStateVersions{ + client: client, + states: make(map[string][]byte), + stateVersions: make(map[string]*tfe.StateVersion), + workspaces: make(map[string][]string), + } +} + +func (m *mockStateVersions) List(ctx context.Context, options tfe.StateVersionListOptions) (*tfe.StateVersionList, error) { + svl := &tfe.StateVersionList{} + for _, sv := range m.stateVersions { + svl.Items = append(svl.Items, sv) + } + + svl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 1, + PreviousPage: 1, + TotalPages: 1, + TotalCount: len(svl.Items), + } + + return svl, nil +} + +func (m *mockStateVersions) Create(ctx context.Context, workspaceID string, options tfe.StateVersionCreateOptions) (*tfe.StateVersion, error) { + id := generateID("sv-") + runID := os.Getenv("TFE_RUN_ID") + url := fmt.Sprintf("https://app.terraform.io/_archivist/%s", id) + + if runID != "" && (options.Run == nil || runID != options.Run.ID) { + return nil, fmt.Errorf("option.Run.ID does not contain the ID exported by TFE_RUN_ID") + } + + sv := &tfe.StateVersion{ + ID: id, + DownloadURL: url, + Serial: *options.Serial, + } + + state, err := base64.StdEncoding.DecodeString(*options.State) + if err != nil { + return nil, err + } + + m.states[sv.DownloadURL] = state + m.stateVersions[sv.ID] = sv + m.workspaces[workspaceID] = append(m.workspaces[workspaceID], sv.ID) + + return sv, nil +} + +func (m *mockStateVersions) Read(ctx context.Context, svID string) (*tfe.StateVersion, error) { + sv, ok := m.stateVersions[svID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return sv, nil +} + +func (m *mockStateVersions) Current(ctx context.Context, workspaceID string) (*tfe.StateVersion, error) { + w, ok := m.client.Workspaces.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + svs, ok := m.workspaces[w.ID] + if !ok || len(svs) == 0 { + return nil, tfe.ErrResourceNotFound + } + + sv, ok := m.stateVersions[svs[len(svs)-1]] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + return sv, nil +} + +func (m *mockStateVersions) Download(ctx context.Context, url string) ([]byte, error) { + state, ok := m.states[url] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return state, nil +} + +type mockWorkspaces struct { + client *mockClient + workspaceIDs map[string]*tfe.Workspace + workspaceNames map[string]*tfe.Workspace +} + +func newMockWorkspaces(client *mockClient) *mockWorkspaces { + return &mockWorkspaces{ + client: client, + workspaceIDs: make(map[string]*tfe.Workspace), + workspaceNames: make(map[string]*tfe.Workspace), + } +} + +func (m *mockWorkspaces) List(ctx context.Context, organization string, options tfe.WorkspaceListOptions) (*tfe.WorkspaceList, error) { + dummyWorkspaces := 10 + wl := &tfe.WorkspaceList{} + + // Get the prefix from the search options. + prefix := "" + if options.Search != nil { + prefix = *options.Search + } + + // Get all the workspaces that match the prefix. + var ws []*tfe.Workspace + for _, w := range m.workspaceIDs { + if strings.HasPrefix(w.Name, prefix) { + ws = append(ws, w) + } + } + + // Return an empty result if we have no matches. + if len(ws) == 0 { + wl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + } + return wl, nil + } + + // Return dummy workspaces for the first page to test pagination. + if options.PageNumber <= 1 { + for i := 0; i < dummyWorkspaces; i++ { + wl.Items = append(wl.Items, &tfe.Workspace{ + ID: generateID("ws-"), + Name: fmt.Sprintf("dummy-workspace-%d", i), + }) + } + + wl.Pagination = &tfe.Pagination{ + CurrentPage: 1, + NextPage: 2, + TotalPages: 2, + TotalCount: len(wl.Items) + len(ws), + } + + return wl, nil + } + + // Return the actual workspaces that matched as the second page. + wl.Items = ws + wl.Pagination = &tfe.Pagination{ + CurrentPage: 2, + PreviousPage: 1, + TotalPages: 2, + TotalCount: len(wl.Items) + dummyWorkspaces, + } + + return wl, nil +} + +func (m *mockWorkspaces) Create(ctx context.Context, organization string, options tfe.WorkspaceCreateOptions) (*tfe.Workspace, error) { + w := &tfe.Workspace{ + ID: generateID("ws-"), + Name: *options.Name, + Operations: !strings.HasSuffix(*options.Name, "no-operations"), + Permissions: &tfe.WorkspacePermissions{ + CanQueueApply: true, + CanQueueRun: true, + }, + } + if options.AutoApply != nil { + w.AutoApply = *options.AutoApply + } + if options.VCSRepo != nil { + w.VCSRepo = &tfe.VCSRepo{} + } + m.workspaceIDs[w.ID] = w + m.workspaceNames[w.Name] = w + return w, nil +} + +func (m *mockWorkspaces) Read(ctx context.Context, organization, workspace string) (*tfe.Workspace, error) { + w, ok := m.workspaceNames[workspace] + if !ok { + return nil, tfe.ErrResourceNotFound + } + return w, nil +} + +func (m *mockWorkspaces) Update(ctx context.Context, organization, workspace string, options tfe.WorkspaceUpdateOptions) (*tfe.Workspace, error) { + w, ok := m.workspaceNames[workspace] + if !ok { + return nil, tfe.ErrResourceNotFound + } + + if options.Name != nil { + w.Name = *options.Name + } + if options.TerraformVersion != nil { + w.TerraformVersion = *options.TerraformVersion + } + if options.WorkingDirectory != nil { + w.WorkingDirectory = *options.WorkingDirectory + } + + delete(m.workspaceNames, workspace) + m.workspaceNames[w.Name] = w + + return w, nil +} + +func (m *mockWorkspaces) Delete(ctx context.Context, organization, workspace string) error { + if w, ok := m.workspaceNames[workspace]; ok { + delete(m.workspaceIDs, w.ID) + } + delete(m.workspaceNames, workspace) + return nil +} + +func (m *mockWorkspaces) RemoveVCSConnection(ctx context.Context, organization, workspace string) (*tfe.Workspace, error) { + w, ok := m.workspaceNames[workspace] + if !ok { + return nil, tfe.ErrResourceNotFound + } + w.VCSRepo = nil + return w, nil +} + +func (m *mockWorkspaces) Lock(ctx context.Context, workspaceID string, options tfe.WorkspaceLockOptions) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if w.Locked { + return nil, tfe.ErrWorkspaceLocked + } + w.Locked = true + return w, nil +} + +func (m *mockWorkspaces) Unlock(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if !w.Locked { + return nil, tfe.ErrWorkspaceNotLocked + } + w.Locked = false + return w, nil +} + +func (m *mockWorkspaces) ForceUnlock(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + w, ok := m.workspaceIDs[workspaceID] + if !ok { + return nil, tfe.ErrResourceNotFound + } + if !w.Locked { + return nil, tfe.ErrWorkspaceNotLocked + } + w.Locked = false + return w, nil +} + +func (m *mockWorkspaces) AssignSSHKey(ctx context.Context, workspaceID string, options tfe.WorkspaceAssignSSHKeyOptions) (*tfe.Workspace, error) { + panic("not implemented") +} + +func (m *mockWorkspaces) UnassignSSHKey(ctx context.Context, workspaceID string) (*tfe.Workspace, error) { + panic("not implemented") +} + +const alphanumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +func generateID(s string) string { + b := make([]byte, 16) + for i := range b { + b[i] = alphanumeric[rand.Intn(len(alphanumeric))] + } + return s + string(b) +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/backend_plan.go b/vendor/github.com/hashicorp/terraform/backend/remote/backend_plan.go new file mode 100644 index 00000000..200f7fd8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/backend_plan.go @@ -0,0 +1,320 @@ +package remote + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/tfdiags" +) + +func (b *Remote) opPlan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + log.Printf("[INFO] backend/remote: starting Plan operation") + + var diags tfdiags.Diagnostics + + if !w.Permissions.CanQueueRun { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Insufficient rights to generate a plan", + "The provided credentials have insufficient rights to generate a plan. In order "+ + "to generate plans, at least plan permissions on the workspace are required.", + )) + return nil, diags.Err() + } + + if op.Parallelism != defaultParallelism { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Custom parallelism values are currently not supported", + `The "remote" backend does not support setting a custom parallelism `+ + `value at this time.`, + )) + } + + if op.PlanFile != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Displaying a saved plan is currently not supported", + `The "remote" backend currently requires configuration to be present and `+ + `does not accept an existing saved plan as an argument at this time.`, + )) + } + + if op.PlanOutPath != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Saving a generated plan is currently not supported", + `The "remote" backend does not support saving the generated execution `+ + `plan locally at this time.`, + )) + } + + if !op.PlanRefresh { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Planning without refresh is currently not supported", + `Currently the "remote" backend will always do an in-memory refresh of `+ + `the Terraform state prior to generating the plan.`, + )) + } + + if op.Targets != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource targeting is currently not supported", + `The "remote" backend does not support resource targeting at this time.`, + )) + } + + variables, parseDiags := b.parseVariableValues(op) + diags = diags.Append(parseDiags) + + if len(variables) > 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Run variables are currently not supported", + fmt.Sprintf( + "The \"remote\" backend does not support setting run variables at this time. "+ + "Currently the only to way to pass variables to the remote backend is by "+ + "creating a '*.auto.tfvars' variables file. This file will automatically "+ + "be loaded by the \"remote\" backend when the workspace is configured to use "+ + "Terraform v0.10.0 or later.\n\nAdditionally you can also set variables on "+ + "the workspace in the web UI:\nhttps://%s/app/%s/%s/variables", + b.hostname, b.organization, op.Workspace, + ), + )) + } + + if !op.HasConfig() && !op.Destroy { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files found", + `Plan requires configuration to be present. Planning without a configuration `+ + `would mark everything for destruction, which is normally not what is desired. `+ + `If you would like to destroy everything, please run plan with the "-destroy" `+ + `flag or create a single empty configuration file. Otherwise, please create `+ + `a Terraform configuration file in the path being executed and try again.`, + )) + } + + // Return if there are any errors. + if diags.HasErrors() { + return nil, diags.Err() + } + + return b.plan(stopCtx, cancelCtx, op, w) +} + +func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation, w *tfe.Workspace) (*tfe.Run, error) { + if b.CLI != nil { + header := planDefaultHeader + if op.Type == backend.OperationTypeApply { + header = applyDefaultHeader + } + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(header) + "\n")) + } + + configOptions := tfe.ConfigurationVersionCreateOptions{ + AutoQueueRuns: tfe.Bool(false), + Speculative: tfe.Bool(op.Type == backend.OperationTypePlan), + } + + cv, err := b.client.ConfigurationVersions.Create(stopCtx, w.ID, configOptions) + if err != nil { + return nil, generalError("Failed to create configuration version", err) + } + + var configDir string + if op.ConfigDir != "" { + // Make sure to take the working directory into account by removing + // the working directory from the current path. This will result in + // a path that points to the expected root of the workspace. + configDir = filepath.Clean(strings.TrimSuffix( + filepath.Clean(op.ConfigDir), + filepath.Clean(w.WorkingDirectory), + )) + } else { + // We did a check earlier to make sure we either have a config dir, + // or the plan is run with -destroy. So this else clause will only + // be executed when we are destroying and doesn't need the config. + configDir, err = ioutil.TempDir("", "tf") + if err != nil { + return nil, generalError("Failed to create temporary directory", err) + } + defer os.RemoveAll(configDir) + + // Make sure the configured working directory exists. + err = os.MkdirAll(filepath.Join(configDir, w.WorkingDirectory), 0700) + if err != nil { + return nil, generalError( + "Failed to create temporary working directory", err) + } + } + + err = b.client.ConfigurationVersions.Upload(stopCtx, cv.UploadURL, configDir) + if err != nil { + return nil, generalError("Failed to upload configuration files", err) + } + + uploaded := false + for i := 0; i < 60 && !uploaded; i++ { + select { + case <-stopCtx.Done(): + return nil, context.Canceled + case <-cancelCtx.Done(): + return nil, context.Canceled + case <-time.After(500 * time.Millisecond): + cv, err = b.client.ConfigurationVersions.Read(stopCtx, cv.ID) + if err != nil { + return nil, generalError("Failed to retrieve configuration version", err) + } + + if cv.Status == tfe.ConfigurationUploaded { + uploaded = true + } + } + } + + if !uploaded { + return nil, generalError( + "Failed to upload configuration files", errors.New("operation timed out")) + } + + runOptions := tfe.RunCreateOptions{ + IsDestroy: tfe.Bool(op.Destroy), + Message: tfe.String("Queued manually using Terraform"), + ConfigurationVersion: cv, + Workspace: w, + } + + r, err := b.client.Runs.Create(stopCtx, runOptions) + if err != nil { + return r, generalError("Failed to create run", err) + } + + // When the lock timeout is set, + if op.StateLockTimeout > 0 { + go func() { + select { + case <-stopCtx.Done(): + return + case <-cancelCtx.Done(): + return + case <-time.After(op.StateLockTimeout): + // Retrieve the run to get its current status. + r, err := b.client.Runs.Read(cancelCtx, r.ID) + if err != nil { + log.Printf("[ERROR] error reading run: %v", err) + return + } + + if r.Status == tfe.RunPending && r.Actions.IsCancelable { + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(lockTimeoutErr))) + } + + // We abuse the auto aprove flag to indicate that we do not + // want to ask if the remote operation should be canceled. + op.AutoApprove = true + + p, err := os.FindProcess(os.Getpid()) + if err != nil { + log.Printf("[ERROR] error searching process ID: %v", err) + return + } + p.Signal(syscall.SIGINT) + } + } + }() + } + + if b.CLI != nil { + b.CLI.Output(b.Colorize().Color(strings.TrimSpace(fmt.Sprintf( + runHeader, b.hostname, b.organization, op.Workspace, r.ID)) + "\n")) + } + + r, err = b.waitForRun(stopCtx, cancelCtx, op, "plan", r, w) + if err != nil { + return r, err + } + + logs, err := b.client.Plans.Logs(stopCtx, r.Plan.ID) + if err != nil { + return r, generalError("Failed to retrieve logs", err) + } + reader := bufio.NewReaderSize(logs, 64*1024) + + if b.CLI != nil { + for next := true; next; { + var l, line []byte + + for isPrefix := true; isPrefix; { + l, isPrefix, err = reader.ReadLine() + if err != nil { + if err != io.EOF { + return r, generalError("Failed to read logs", err) + } + next = false + } + line = append(line, l...) + } + + if next || len(line) > 0 { + b.CLI.Output(b.Colorize().Color(string(line))) + } + } + } + + // Retrieve the run to get its current status. + r, err = b.client.Runs.Read(stopCtx, r.ID) + if err != nil { + return r, generalError("Failed to retrieve run", err) + } + + // Return if the run is canceled or errored. We return without + // an error, even if the run errored, as the error is already + // displayed by the output of the remote run. + if r.Status == tfe.RunCanceled || r.Status == tfe.RunErrored { + return r, nil + } + + // Check any configured sentinel policies. + if len(r.PolicyChecks) > 0 { + err = b.checkPolicy(stopCtx, cancelCtx, op, r) + if err != nil { + return r, err + } + } + + return r, nil +} + +const planDefaultHeader = ` +[reset][yellow]Running plan in the remote backend. Output will stream here. Pressing Ctrl-C +will stop streaming the logs, but will not stop the plan running remotely.[reset] + +Preparing the remote plan... +` + +const runHeader = ` +[reset][yellow]To view this run in a browser, visit: +https://%s/app/%s/%s/runs/%s[reset] +` + +// The newline in this error is to make it look good in the CLI! +const lockTimeoutErr = ` +[reset][red]Lock timeout exceeded, sending interrupt to cancel the remote operation. +[reset] +` diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/backend_state.go b/vendor/github.com/hashicorp/terraform/backend/remote/backend_state.go new file mode 100644 index 00000000..5089c407 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/backend_state.go @@ -0,0 +1,173 @@ +package remote + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/base64" + "fmt" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/states/statefile" +) + +type remoteClient struct { + client *tfe.Client + lockInfo *state.LockInfo + organization string + runID string + stateUploadErr bool + workspace *tfe.Workspace +} + +// Get the remote state. +func (r *remoteClient) Get() (*remote.Payload, error) { + ctx := context.Background() + + sv, err := r.client.StateVersions.Current(ctx, r.workspace.ID) + if err != nil { + if err == tfe.ErrResourceNotFound { + // If no state exists, then return nil. + return nil, nil + } + return nil, fmt.Errorf("Error retrieving state: %v", err) + } + + state, err := r.client.StateVersions.Download(ctx, sv.DownloadURL) + if err != nil { + return nil, fmt.Errorf("Error downloading state: %v", err) + } + + // If the state is empty, then return nil. + if len(state) == 0 { + return nil, nil + } + + // Get the MD5 checksum of the state. + sum := md5.Sum(state) + + return &remote.Payload{ + Data: state, + MD5: sum[:], + }, nil +} + +// Put the remote state. +func (r *remoteClient) Put(state []byte) error { + ctx := context.Background() + + // Read the raw state into a Terraform state. + stateFile, err := statefile.Read(bytes.NewReader(state)) + if err != nil { + return fmt.Errorf("Error reading state: %s", err) + } + + options := tfe.StateVersionCreateOptions{ + Lineage: tfe.String(stateFile.Lineage), + Serial: tfe.Int64(int64(stateFile.Serial)), + MD5: tfe.String(fmt.Sprintf("%x", md5.Sum(state))), + State: tfe.String(base64.StdEncoding.EncodeToString(state)), + } + + // If we have a run ID, make sure to add it to the options + // so the state will be properly associated with the run. + if r.runID != "" { + options.Run = &tfe.Run{ID: r.runID} + } + + // Create the new state. + _, err = r.client.StateVersions.Create(ctx, r.workspace.ID, options) + if err != nil { + r.stateUploadErr = true + return fmt.Errorf("Error uploading state: %v", err) + } + + return nil +} + +// Delete the remote state. +func (r *remoteClient) Delete() error { + err := r.client.Workspaces.Delete(context.Background(), r.organization, r.workspace.Name) + if err != nil && err != tfe.ErrResourceNotFound { + return fmt.Errorf("Error deleting workspace %s: %v", r.workspace.Name, err) + } + + return nil +} + +// Lock the remote state. +func (r *remoteClient) Lock(info *state.LockInfo) (string, error) { + ctx := context.Background() + + lockErr := &state.LockError{Info: r.lockInfo} + + // Lock the workspace. + _, err := r.client.Workspaces.Lock(ctx, r.workspace.ID, tfe.WorkspaceLockOptions{ + Reason: tfe.String("Locked by Terraform"), + }) + if err != nil { + if err == tfe.ErrWorkspaceLocked { + err = fmt.Errorf("%s (lock ID: \"%s/%s\")", err, r.organization, r.workspace.Name) + } + lockErr.Err = err + return "", lockErr + } + + r.lockInfo = info + + return r.lockInfo.ID, nil +} + +// Unlock the remote state. +func (r *remoteClient) Unlock(id string) error { + ctx := context.Background() + + // We first check if there was an error while uploading the latest + // state. If so, we will not unlock the workspace to prevent any + // changes from being applied until the correct state is uploaded. + if r.stateUploadErr { + return nil + } + + lockErr := &state.LockError{Info: r.lockInfo} + + // With lock info this should be treated as a normal unlock. + if r.lockInfo != nil { + // Verify the expected lock ID. + if r.lockInfo.ID != id { + lockErr.Err = fmt.Errorf("lock ID does not match existing lock") + return lockErr + } + + // Unlock the workspace. + _, err := r.client.Workspaces.Unlock(ctx, r.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil + } + + // Verify the optional force-unlock lock ID. + if r.organization+"/"+r.workspace.Name != id { + lockErr.Err = fmt.Errorf( + "lock ID %q does not match existing lock ID \"%s/%s\"", + id, + r.organization, + r.workspace.Name, + ) + return lockErr + } + + // Force unlock the workspace. + _, err := r.client.Workspaces.ForceUnlock(ctx, r.workspace.ID) + if err != nil { + lockErr.Err = err + return lockErr + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/cli.go b/vendor/github.com/hashicorp/terraform/backend/remote/cli.go new file mode 100644 index 00000000..5a6afa7e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/cli.go @@ -0,0 +1,21 @@ +package remote + +import ( + "github.com/hashicorp/terraform/backend" +) + +// CLIInit implements backend.CLI +func (b *Remote) CLIInit(opts *backend.CLIOpts) error { + if cli, ok := b.local.(backend.CLI); ok { + if err := cli.CLIInit(opts); err != nil { + return err + } + } + + b.CLI = opts.CLI + b.CLIColor = opts.CLIColor + b.ShowDiagnostics = opts.ShowDiagnostics + b.ContextOpts = opts.ContextOpts + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/colorize.go b/vendor/github.com/hashicorp/terraform/backend/remote/colorize.go new file mode 100644 index 00000000..97e24e32 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/colorize.go @@ -0,0 +1,50 @@ +package remote + +import ( + "regexp" + + "github.com/mitchellh/colorstring" +) + +// TODO SvH: This file should be deleted and the type cliColorize should be +// renamed back to Colorize as soon as we can pass -no-color to the backend. + +// colorsRe is used to find ANSI escaped color codes. +var colorsRe = regexp.MustCompile("\033\\[\\d{1,3}m") + +// Colorer is the interface that must be implemented to colorize strings. +type Colorer interface { + Color(v string) string +} + +// Colorize is used to print output when the -no-color flag is used. It will +// strip all ANSI escaped color codes which are set while the operation was +// executed in Terraform Enterprise. +// +// When Terraform Enterprise supports run specific variables, this code can be +// removed as we can then pass the CLI flag to the backend and prevent the color +// codes from being written to the output. +type Colorize struct { + cliColor *colorstring.Colorize +} + +// Color will strip all ANSI escaped color codes and return a uncolored string. +func (c *Colorize) Color(v string) string { + return colorsRe.ReplaceAllString(c.cliColor.Color(v), "") +} + +// Colorize returns the Colorize structure that can be used for colorizing +// output. This is guaranteed to always return a non-nil value and so is useful +// as a helper to wrap any potentially colored strings. +func (b *Remote) Colorize() Colorer { + if b.CLIColor != nil && !b.CLIColor.Disable { + return b.CLIColor + } + if b.CLIColor != nil { + return &Colorize{cliColor: b.CLIColor} + } + return &Colorize{cliColor: &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + }} +} diff --git a/vendor/github.com/hashicorp/terraform/backend/remote/testing.go b/vendor/github.com/hashicorp/terraform/backend/remote/testing.go new file mode 100644 index 00000000..09c54189 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/remote/testing.go @@ -0,0 +1,298 @@ +package remote + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "path" + "testing" + + tfe "github.com/hashicorp/go-tfe" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/state/remote" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/cli" + "github.com/zclconf/go-cty/cty" + + backendLocal "github.com/hashicorp/terraform/backend/local" +) + +const ( + testCred = "test-auth-token" +) + +var ( + tfeHost = svchost.Hostname(defaultHostname) + credsSrc = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ + tfeHost: {"token": testCred}, + }) +) + +func testInput(t *testing.T, answers map[string]string) *mockInput { + return &mockInput{answers: answers} +} + +func testBackendDefault(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }) + return testBackend(t, obj) +} + +func testBackendNoDefault(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("hashicorp"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.NullVal(cty.String), + "prefix": cty.StringVal("my-app-"), + }), + }) + return testBackend(t, obj) +} + +func testBackendNoOperations(t *testing.T) (*Remote, func()) { + obj := cty.ObjectVal(map[string]cty.Value{ + "hostname": cty.NullVal(cty.String), + "organization": cty.StringVal("no-operations"), + "token": cty.NullVal(cty.String), + "workspaces": cty.ObjectVal(map[string]cty.Value{ + "name": cty.StringVal("prod"), + "prefix": cty.NullVal(cty.String), + }), + }) + return testBackend(t, obj) +} + +func testRemoteClient(t *testing.T) remote.Client { + b, bCleanup := testBackendDefault(t) + defer bCleanup() + + raw, err := b.StateMgr(backend.DefaultStateName) + if err != nil { + t.Fatalf("error: %v", err) + } + + return raw.(*remote.State).Client +} + +func testBackend(t *testing.T, obj cty.Value) (*Remote, func()) { + s := testServer(t) + b := New(testDisco(s)) + + // Configure the backend so the client is created. + newObj, valDiags := b.PrepareConfig(obj) + if len(valDiags) != 0 { + t.Fatal(valDiags.ErrWithWarnings()) + } + obj = newObj + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + t.Fatal(confDiags.ErrWithWarnings()) + } + + // Get a new mock client. + mc := newMockClient() + + // Replace the services we use with our mock services. + b.CLI = cli.NewMockUi() + b.client.Applies = mc.Applies + b.client.ConfigurationVersions = mc.ConfigurationVersions + b.client.Organizations = mc.Organizations + b.client.Plans = mc.Plans + b.client.PolicyChecks = mc.PolicyChecks + b.client.Runs = mc.Runs + b.client.StateVersions = mc.StateVersions + b.client.Workspaces = mc.Workspaces + + b.ShowDiagnostics = func(vals ...interface{}) { + var diags tfdiags.Diagnostics + for _, diag := range diags.Append(vals...) { + b.CLI.Error(diag.Description().Summary) + } + } + + // Set local to a local test backend. + b.local = testLocalBackend(t, b) + + ctx := context.Background() + + // Create the organization. + _, err := b.client.Organizations.Create(ctx, tfe.OrganizationCreateOptions{ + Name: tfe.String(b.organization), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + + // Create the default workspace if required. + if b.workspace != "" { + _, err = b.client.Workspaces.Create(ctx, b.organization, tfe.WorkspaceCreateOptions{ + Name: tfe.String(b.workspace), + }) + if err != nil { + t.Fatalf("error: %v", err) + } + } + + return b, s.Close +} + +func testLocalBackend(t *testing.T, remote *Remote) backend.Enhanced { + b := backendLocal.NewWithBackend(remote) + + b.CLI = remote.CLI + b.ShowDiagnostics = remote.ShowDiagnostics + + // Add a test provider to the local backend. + p := backendLocal.TestLocalProvider(t, b, "null", &terraform.ProviderSchema{ + ResourceTypes: map[string]*configschema.Block{ + "null_resource": { + Attributes: map[string]*configschema.Attribute{ + "id": {Type: cty.String, Computed: true}, + }, + }, + }, + }) + p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{ + "id": cty.StringVal("yes"), + })} + + return b +} + +// testServer returns a *httptest.Server used for local testing. +func testServer(t *testing.T) *httptest.Server { + mux := http.NewServeMux() + + // Respond to service discovery calls. + mux.HandleFunc("/well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, `{ + "state.v2": "/api/v2/", + "tfe.v2.1": "/api/v2/", + "versions.v1": "/v1/versions/" +}`) + }) + + // Respond to service version constraints calls. + mux.HandleFunc("/v1/versions/", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + io.WriteString(w, fmt.Sprintf(`{ + "service": "%s", + "product": "terraform", + "minimum": "0.1.0", + "maximum": "10.0.0" +}`, path.Base(r.URL.Path))) + }) + + // Respond to the initial query to read the hashicorp org entitlements. + mux.HandleFunc("/api/v2/organizations/hashicorp/entitlement-set", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-GExadygjSbKP8hsY", + "type": "entitlement-sets", + "attributes": { + "operations": true, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }) + + // Respond to the initial query to read the no-operations org entitlements. + mux.HandleFunc("/api/v2/organizations/no-operations/entitlement-set", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/vnd.api+json") + io.WriteString(w, `{ + "data": { + "id": "org-ufxa3y8jSbKP8hsT", + "type": "entitlement-sets", + "attributes": { + "operations": false, + "private-module-registry": true, + "sentinel": true, + "state-storage": true, + "teams": true, + "vcs-integrations": true + } + } +}`) + }) + + // All tests that are assumed to pass will use the hashicorp organization, + // so for all other organization requests we will return a 404. + mux.HandleFunc("/api/v2/organizations/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + io.WriteString(w, `{ + "errors": [ + { + "status": "404", + "title": "not found" + } + ] +}`) + }) + + return httptest.NewServer(mux) +} + +// testDisco returns a *disco.Disco mapping app.terraform.io and +// localhost to a local test server. +func testDisco(s *httptest.Server) *disco.Disco { + services := map[string]interface{}{ + "state.v2": fmt.Sprintf("%s/api/v2/", s.URL), + "tfe.v2.1": fmt.Sprintf("%s/api/v2/", s.URL), + "versions.v1": fmt.Sprintf("%s/v1/versions/", s.URL), + } + d := disco.NewWithCredentialsSource(credsSrc) + + d.ForceHostServices(svchost.Hostname(defaultHostname), services) + d.ForceHostServices(svchost.Hostname("localhost"), services) + return d +} + +type unparsedVariableValue struct { + value string + source terraform.ValueSourceType +} + +func (v *unparsedVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + return &terraform.InputValue{ + Value: cty.StringVal(v.value), + SourceType: v.source, + }, tfdiags.Diagnostics{} +} + +// testVariable returns a backend.UnparsedVariableValue used for testing. +func testVariables(s terraform.ValueSourceType, vs ...string) map[string]backend.UnparsedVariableValue { + vars := make(map[string]backend.UnparsedVariableValue, len(vs)) + for _, v := range vs { + vars[v] = &unparsedVariableValue{ + value: v, + source: s, + } + } + return vars +} diff --git a/vendor/github.com/hashicorp/terraform/backend/testing.go b/vendor/github.com/hashicorp/terraform/backend/testing.go new file mode 100644 index 00000000..79980deb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/testing.go @@ -0,0 +1,404 @@ +package backend + +import ( + "reflect" + "sort" + "testing" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" +) + +// TestBackendConfig validates and configures the backend with the +// given configuration. +func TestBackendConfig(t *testing.T, b Backend, c hcl.Body) Backend { + t.Helper() + + t.Logf("TestBackendConfig on %T with %#v", b, c) + + var diags tfdiags.Diagnostics + + // To make things easier for test authors, we'll allow a nil body here + // (even though that's not normally valid) and just treat it as an empty + // body. + if c == nil { + c = hcl.EmptyBody() + } + + schema := b.ConfigSchema() + spec := schema.DecoderSpec() + obj, decDiags := hcldec.Decode(c, spec, nil) + diags = diags.Append(decDiags) + + newObj, valDiags := b.PrepareConfig(obj) + diags = diags.Append(valDiags.InConfigBody(c)) + + if len(diags) != 0 { + t.Fatal(diags.ErrWithWarnings()) + } + + obj = newObj + + confDiags := b.Configure(obj) + if len(confDiags) != 0 { + confDiags = confDiags.InConfigBody(c) + t.Fatal(confDiags.ErrWithWarnings()) + } + + return b +} + +// TestWrapConfig takes a raw data structure and converts it into a +// synthetic hcl.Body to use for testing. +// +// The given structure should only include values that can be accepted by +// hcl2shim.HCL2ValueFromConfigValue. If incompatible values are given, +// this function will panic. +func TestWrapConfig(raw map[string]interface{}) hcl.Body { + obj := hcl2shim.HCL2ValueFromConfigValue(raw) + return configs.SynthBody("", obj.AsValueMap()) +} + +// TestBackend will test the functionality of a Backend. The backend is +// assumed to already be configured. This will test state functionality. +// If the backend reports it doesn't support multi-state by returning the +// error ErrWorkspacesNotSupported, then it will not test that. +func TestBackendStates(t *testing.T, b Backend) { + t.Helper() + + noDefault := false + if _, err := b.StateMgr(DefaultStateName); err != nil { + if err == ErrDefaultWorkspaceNotSupported { + noDefault = true + } else { + t.Fatalf("error: %v", err) + } + } + + workspaces, err := b.Workspaces() + if err != nil { + if err == ErrWorkspacesNotSupported { + t.Logf("TestBackend: workspaces not supported in %T, skipping", b) + return + } + t.Fatalf("error: %v", err) + } + + // Test it starts with only the default + if !noDefault && (len(workspaces) != 1 || workspaces[0] != DefaultStateName) { + t.Fatalf("should only default to start: %#v", workspaces) + } + + // Create a couple states + foo, err := b.StateMgr("foo") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := foo.State(); v.HasResources() { + t.Fatalf("should be empty: %s", v) + } + + bar, err := b.StateMgr("bar") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := bar.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := bar.State(); v.HasResources() { + t.Fatalf("should be empty: %s", v) + } + + // Verify they are distinct states that can be read back from storage + { + // We'll use two distinct states here and verify that changing one + // does not also change the other. + fooState := states.NewState() + barState := states.NewState() + + // write a known state to foo + if err := foo.WriteState(fooState); err != nil { + t.Fatal("error writing foo state:", err) + } + if err := foo.PersistState(); err != nil { + t.Fatal("error persisting foo state:", err) + } + + // We'll make "bar" different by adding a fake resource state to it. + barState.SyncWrapper().SetResourceInstanceCurrent( + addrs.ResourceInstance{ + Resource: addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "test_thing", + Name: "foo", + }, + }.Absolute(addrs.RootModuleInstance), + &states.ResourceInstanceObjectSrc{ + AttrsJSON: []byte("{}"), + Status: states.ObjectReady, + SchemaVersion: 0, + }, + addrs.ProviderConfig{ + Type: "test", + }.Absolute(addrs.RootModuleInstance), + ) + + // write a distinct known state to bar + if err := bar.WriteState(barState); err != nil { + t.Fatalf("bad: %s", err) + } + if err := bar.PersistState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // verify that foo is unchanged with the existing state manager + if err := foo.RefreshState(); err != nil { + t.Fatal("error refreshing foo:", err) + } + fooState = foo.State() + if fooState.HasResources() { + t.Fatal("after writing a resource to bar, foo now has resources too") + } + + // fetch foo again from the backend + foo, err = b.StateMgr("foo") + if err != nil { + t.Fatal("error re-fetching state:", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatal("error refreshing foo:", err) + } + fooState = foo.State() + if fooState.HasResources() { + t.Fatal("after writing a resource to bar and re-reading foo, foo now has resources too") + } + + // fetch the bar again from the backend + bar, err = b.StateMgr("bar") + if err != nil { + t.Fatal("error re-fetching state:", err) + } + if err := bar.RefreshState(); err != nil { + t.Fatal("error refreshing bar:", err) + } + barState = bar.State() + if !barState.HasResources() { + t.Fatal("after writing a resource instance object to bar and re-reading it, the object has vanished") + } + } + + // Verify we can now list them + { + // we determined that named stated are supported earlier + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"bar", "default", "foo"} + if noDefault { + expected = []string{"bar", "foo"} + } + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) + } + } + + // Delete some workspaces + if err := b.DeleteWorkspace("foo"); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify the default state can't be deleted + if err := b.DeleteWorkspace(DefaultStateName); err == nil { + t.Fatal("expected error") + } + + // Create and delete the foo workspace again. + // Make sure that there are no leftover artifacts from a deleted state + // preventing re-creation. + foo, err = b.StateMgr("foo") + if err != nil { + t.Fatalf("error: %s", err) + } + if err := foo.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + if v := foo.State(); v.HasResources() { + t.Fatalf("should be empty: %s", v) + } + // and delete it again + if err := b.DeleteWorkspace("foo"); err != nil { + t.Fatalf("err: %s", err) + } + + // Verify deletion + { + workspaces, err := b.Workspaces() + if err != nil { + t.Fatalf("err: %s", err) + } + + sort.Strings(workspaces) + expected := []string{"bar", "default"} + if noDefault { + expected = []string{"bar"} + } + if !reflect.DeepEqual(workspaces, expected) { + t.Fatalf("wrong workspaces list\ngot: %#v\nwant: %#v", workspaces, expected) + } + } +} + +// TestBackendStateLocks will test the locking functionality of the remote +// state backend. +func TestBackendStateLocks(t *testing.T, b1, b2 Backend) { + t.Helper() + testLocks(t, b1, b2, false) +} + +// TestBackendStateForceUnlock verifies that the lock error is the expected +// type, and the lock can be unlocked using the ID reported in the error. +// Remote state backends that support -force-unlock should call this in at +// least one of the acceptance tests. +func TestBackendStateForceUnlock(t *testing.T, b1, b2 Backend) { + t.Helper() + testLocks(t, b1, b2, true) +} + +func testLocks(t *testing.T, b1, b2 Backend, testForceUnlock bool) { + t.Helper() + + // Get the default state for each + b1StateMgr, err := b1.StateMgr(DefaultStateName) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b1StateMgr.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // Fast exit if this doesn't support locking at all + if _, ok := b1StateMgr.(state.Locker); !ok { + t.Logf("TestBackend: backend %T doesn't support state locking, not testing", b1) + return + } + + t.Logf("TestBackend: testing state locking for %T", b1) + + b2StateMgr, err := b2.StateMgr(DefaultStateName) + if err != nil { + t.Fatalf("error: %s", err) + } + if err := b2StateMgr.RefreshState(); err != nil { + t.Fatalf("bad: %s", err) + } + + // Reassign so its obvious whats happening + lockerA := b1StateMgr.(state.Locker) + lockerB := b2StateMgr.(state.Locker) + + infoA := state.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := state.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + // Make sure we can still get the state.State from another instance even + // when locked. This should only happen when a state is loaded via the + // backend, and as a remote state. + _, err = b2.StateMgr(DefaultStateName) + if err != nil { + t.Errorf("failed to read locked state from another backend instance: %s", err) + } + + // If the lock ID is blank, assume locking is disabled + if lockIDA == "" { + t.Logf("TestBackend: %T: empty string returned for lock, assuming disabled", b1) + return + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Errorf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } + + // test the equivalent of -force-unlock, by using the id from the error + // output. + if !testForceUnlock { + return + } + + // get a new ID + infoA.ID, err = uuid.GenerateUUID() + if err != nil { + panic(err) + } + + lockIDA, err = lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get re lock A:", err) + } + unlock := func() { + err := lockerA.Unlock(lockIDA) + if err != nil { + t.Fatal(err) + } + } + + _, err = lockerB.Lock(infoB) + if err == nil { + unlock() + t.Fatal("client B obtained lock while held by client A") + } + + infoErr, ok := err.(*statemgr.LockError) + if !ok { + unlock() + t.Fatalf("expected type *statemgr.LockError, got : %#v", err) + } + + // try to unlock with the second unlocker, using the ID from the error + if err := lockerB.Unlock(infoErr.Info.ID); err != nil { + unlock() + t.Fatalf("could not unlock with the reported ID %q: %s", infoErr.Info.ID, err) + } +} diff --git a/vendor/github.com/hashicorp/terraform/backend/unparsed_value.go b/vendor/github.com/hashicorp/terraform/backend/unparsed_value.go new file mode 100644 index 00000000..5d7b4d3c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/backend/unparsed_value.go @@ -0,0 +1,111 @@ +package backend + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// UnparsedVariableValue represents a variable value provided by the caller +// whose parsing must be deferred until configuration is available. +// +// This exists to allow processing of variable-setting arguments (e.g. in the +// command package) to be separated from parsing (in the backend package). +type UnparsedVariableValue interface { + // ParseVariableValue information in the provided variable configuration + // to parse (if necessary) and return the variable value encapsulated in + // the receiver. + // + // If error diagnostics are returned, the resulting value may be invalid + // or incomplete. + ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) +} + +func ParseVariableValues(vv map[string]UnparsedVariableValue, decls map[string]*configs.Variable) (terraform.InputValues, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := make(terraform.InputValues, len(vv)) + + // Currently we're generating only warnings for undeclared variables + // defined in files (see below) but we only want to generate a few warnings + // at a time because existing deployments may have lots of these and + // the result can therefore be overwhelming. + seenUndeclaredInFile := 0 + + for name, rv := range vv { + var mode configs.VariableParsingMode + config, declared := decls[name] + if declared { + mode = config.ParsingMode + } else { + mode = configs.VariableParseLiteral + } + + val, valDiags := rv.ParseVariableValue(mode) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + continue + } + + if !declared { + switch val.SourceType { + case terraform.ValueFromConfig, terraform.ValueFromAutoFile, terraform.ValueFromNamedFile: + // These source types have source ranges, so we can produce + // a nice error message with good context. + // + // This one is a warning for now because there is an existing + // pattern of providing a file containing the superset of + // variables across all configurations in an organization. This + // is deprecated in v0.12.0 because it's more important to give + // feedback to users who make typos. Those using this approach + // should migrate to using environment variables instead before + // this becomes an error in a future major release. + if seenUndeclaredInFile < 3 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Value for undeclared variable", + Detail: fmt.Sprintf("The root module does not declare a variable named %q. To use this value, add a \"variable\" block to the configuration.\n\nUsing a variables file to set an undeclared variable is deprecated and will become an error in a future release. If you wish to provide certain \"global\" settings to all configurations in your organization, use TF_VAR_... environment variables to set these instead.", name), + Subject: val.SourceRange.ToHCL().Ptr(), + }) + } + seenUndeclaredInFile++ + + case terraform.ValueFromEnvVar: + // We allow and ignore undeclared names for environment + // variables, because users will often set these globally + // when they are used across many (but not necessarily all) + // configurations. + case terraform.ValueFromCLIArg: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value for undeclared variable", + fmt.Sprintf("A variable named %q was assigned on the command line, but the root module does not declare a variable of that name. To use this value, add a \"variable\" block to the configuration.", name), + )) + default: + // For all other source types we are more vague, but other situations + // don't generally crop up at this layer in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value for undeclared variable", + fmt.Sprintf("A variable named %q was assigned a value, but the root module does not declare a variable of that name. To use this value, add a \"variable\" block to the configuration.", name), + )) + } + continue + } + + ret[name] = val + } + + if seenUndeclaredInFile >= 3 { + extras := seenUndeclaredInFile - 2 + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Values for undeclared variables", + Detail: fmt.Sprintf("In addition to the other similar warnings shown, %d other variable(s) defined without being declared.", extras), + }) + } + + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/data_source_state.go b/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/data_source_state.go new file mode 100644 index 00000000..accc356d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/data_source_state.go @@ -0,0 +1,219 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + backendInit "github.com/hashicorp/terraform/backend/init" +) + +func dataSourceRemoteStateGetSchema() providers.Schema { + return providers.Schema{ + Block: &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + "backend": { + Type: cty.String, + Required: true, + }, + "config": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + "defaults": { + Type: cty.DynamicPseudoType, + Optional: true, + }, + "outputs": { + Type: cty.DynamicPseudoType, + Computed: true, + }, + "workspace": { + Type: cty.String, + Optional: true, + }, + }, + }, + } +} + +func dataSourceRemoteStateValidate(cfg cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // Getting the backend implicitly validates the configuration for it, + // but we can only do that if it's all known already. + if cfg.GetAttr("config").IsWhollyKnown() && cfg.GetAttr("backend").IsKnown() { + _, moreDiags := getBackend(cfg) + diags = diags.Append(moreDiags) + } else { + // Otherwise we'll just type-check the config object itself. + configTy := cfg.GetAttr("config").Type() + if configTy != cty.DynamicPseudoType && !(configTy.IsObjectType() || configTy.IsMapType()) { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid backend configuration", + "The configuration must be an object value.", + cty.GetAttrPath("config"), + )) + } + } + + { + defaultsTy := cfg.GetAttr("defaults").Type() + if defaultsTy != cty.DynamicPseudoType && !(defaultsTy.IsObjectType() || defaultsTy.IsMapType()) { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid default values", + "Defaults must be given in an object value.", + cty.GetAttrPath("defaults"), + )) + } + } + + return diags +} + +func dataSourceRemoteStateRead(d cty.Value) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + b, moreDiags := getBackend(d) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + return cty.NilVal, diags + } + + newState := make(map[string]cty.Value) + newState["backend"] = d.GetAttr("backend") + newState["config"] = d.GetAttr("config") + + workspaceName := backend.DefaultStateName + + if workspaceVal := d.GetAttr("workspace"); !workspaceVal.IsNull() { + newState["workspace"] = workspaceVal + workspaceName = workspaceVal.AsString() + } + + newState["workspace"] = cty.StringVal(workspaceName) + + state, err := b.StateMgr(workspaceName) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Error loading state error", + fmt.Sprintf("error loading the remote state: %s", err), + cty.Path(nil).GetAttr("backend"), + )) + return cty.NilVal, diags + } + + if err := state.RefreshState(); err != nil { + diags = diags.Append(err) + return cty.NilVal, diags + } + + outputs := make(map[string]cty.Value) + + if defaultsVal := d.GetAttr("defaults"); !defaultsVal.IsNull() { + newState["defaults"] = defaultsVal + it := defaultsVal.ElementIterator() + for it.Next() { + k, v := it.Element() + outputs[k.AsString()] = v + } + } else { + newState["defaults"] = cty.NullVal(cty.DynamicPseudoType) + } + + remoteState := state.State() + if remoteState == nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Unable to find remote state", + "No stored state was found for the given workspace in the given backend.", + cty.Path(nil).GetAttr("workspace"), + )) + newState["outputs"] = cty.EmptyObjectVal + return cty.ObjectVal(newState), diags + } + mod := remoteState.RootModule() + if mod != nil { // should always have a root module in any valid state + for k, os := range mod.OutputValues { + outputs[k] = os.Value + } + } + + newState["outputs"] = cty.ObjectVal(outputs) + + return cty.ObjectVal(newState), diags +} + +func getBackend(cfg cty.Value) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + backendType := cfg.GetAttr("backend").AsString() + + // Don't break people using the old _local syntax - but note warning above + if backendType == "_local" { + log.Println(`[INFO] Switching old (unsupported) backend "_local" to "local"`) + backendType = "local" + } + + // Create the client to access our remote state + log.Printf("[DEBUG] Initializing remote state backend: %s", backendType) + f := backendInit.Backend(backendType) + if f == nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid backend configuration", + fmt.Sprintf("There is no backend type named %q.", backendType), + cty.Path(nil).GetAttr("backend"), + )) + return nil, diags + } + b := f() + + config := cfg.GetAttr("config") + if config.IsNull() { + // We'll treat this as an empty configuration and see if the backend's + // schema and validation code will accept it. + config = cty.EmptyObjectVal + } + + if config.Type().IsMapType() { // The code below expects an object type, so we'll convert + config = cty.ObjectVal(config.AsValueMap()) + } + + schema := b.ConfigSchema() + // Try to coerce the provided value into the desired configuration type. + configVal, err := schema.CoerceValue(config) + if err != nil { + diags = diags.Append(tfdiags.AttributeValue( + tfdiags.Error, + "Invalid backend configuration", + fmt.Sprintf("The given configuration is not valid for backend %q: %s.", backendType, + tfdiags.FormatError(err)), + cty.Path(nil).GetAttr("config"), + )) + return nil, diags + } + + newVal, validateDiags := b.PrepareConfig(configVal) + diags = diags.Append(validateDiags) + if validateDiags.HasErrors() { + return nil, diags + } + configVal = newVal + + configureDiags := b.Configure(configVal) + if configureDiags.HasErrors() { + diags = diags.Append(configureDiags.Err()) + return nil, diags + } + + return b, diags +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/flatten.go b/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/flatten.go new file mode 100644 index 00000000..4766a4f5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/flatten.go @@ -0,0 +1,76 @@ +package terraform + +import ( + "fmt" + "reflect" +) + +// remoteStateFlatten takes a structure and turns into a flat map[string]string. +// +// Within the "thing" parameter, only primitive values are allowed. Structs are +// not supported. Therefore, it can only be slices, maps, primitives, and +// any combination of those together. +// +// The difference between this version and the version in package flatmap is that +// we add the count key for maps in this version, and return a normal +// map[string]string instead of a flatmap.Map +func remoteStateFlatten(thing map[string]interface{}) map[string]string { + result := make(map[string]string) + + for k, raw := range thing { + flatten(result, k, reflect.ValueOf(raw)) + } + + return result +} + +func flatten(result map[string]string, prefix string, v reflect.Value) { + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Bool: + if v.Bool() { + result[prefix] = "true" + } else { + result[prefix] = "false" + } + case reflect.Int: + result[prefix] = fmt.Sprintf("%d", v.Int()) + case reflect.Map: + flattenMap(result, prefix, v) + case reflect.Slice: + flattenSlice(result, prefix, v) + case reflect.String: + result[prefix] = v.String() + default: + panic(fmt.Sprintf("Unknown: %s", v)) + } +} + +func flattenMap(result map[string]string, prefix string, v reflect.Value) { + mapKeys := v.MapKeys() + + result[fmt.Sprintf("%s.%%", prefix)] = fmt.Sprintf("%d", len(mapKeys)) + for _, k := range mapKeys { + if k.Kind() == reflect.Interface { + k = k.Elem() + } + + if k.Kind() != reflect.String { + panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k)) + } + + flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k)) + } +} + +func flattenSlice(result map[string]string, prefix string, v reflect.Value) { + prefix = prefix + "." + + result[prefix+"#"] = fmt.Sprintf("%d", v.Len()) + for i := 0; i < v.Len(); i++ { + flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/provider.go b/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/provider.go new file mode 100644 index 00000000..605362e3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/providers/terraform/provider.go @@ -0,0 +1,140 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/providers" +) + +// Provider is an implementation of providers.Interface +type Provider struct { + // Provider is the schema for the provider itself. + Schema providers.Schema + + // DataSources maps the data source name to that data source's schema. + DataSources map[string]providers.Schema +} + +// NewProvider returns a new terraform provider +func NewProvider() *Provider { + return &Provider{} +} + +// GetSchema returns the complete schema for the provider. +func (p *Provider) GetSchema() providers.GetSchemaResponse { + return providers.GetSchemaResponse{ + DataSources: map[string]providers.Schema{ + "terraform_remote_state": dataSourceRemoteStateGetSchema(), + }, + } +} + +// ValidateProviderConfig is used to validate the configuration values. +func (p *Provider) PrepareProviderConfig(req providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { + // At this moment there is nothing to configure for the terraform provider, + // so we will happily return without taking any action + var res providers.PrepareProviderConfigResponse + res.PreparedConfig = req.Config + return res +} + +// ValidateDataSourceConfig is used to validate the data source configuration values. +func (p *Provider) ValidateDataSourceConfig(req providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { + // FIXME: move the backend configuration validate call that's currently + // inside the read method into here so that we can catch provider configuration + // errors in terraform validate as well as during terraform plan. + var res providers.ValidateDataSourceConfigResponse + + // This should not happen + if req.TypeName != "terraform_remote_state" { + res.Diagnostics.Append(fmt.Errorf("Error: unsupported data source %s", req.TypeName)) + return res + } + + diags := dataSourceRemoteStateValidate(req.Config) + res.Diagnostics = diags + + return res +} + +// Configure configures and initializes the provider. +func (p *Provider) Configure(providers.ConfigureRequest) providers.ConfigureResponse { + // At this moment there is nothing to configure for the terraform provider, + // so we will happily return without taking any action + var res providers.ConfigureResponse + return res +} + +// ReadDataSource returns the data source's current state. +func (p *Provider) ReadDataSource(req providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + // call function + var res providers.ReadDataSourceResponse + + // This should not happen + if req.TypeName != "terraform_remote_state" { + res.Diagnostics.Append(fmt.Errorf("Error: unsupported data source %s", req.TypeName)) + return res + } + + newState, diags := dataSourceRemoteStateRead(req.Config) + + res.State = newState + res.Diagnostics = diags + + return res +} + +// Stop is called when the provider should halt any in-flight actions. +func (p *Provider) Stop() error { + log.Println("[DEBUG] terraform provider cannot Stop") + return nil +} + +// All the Resource-specific functions are below. +// The terraform provider supplies a single data source, `terraform_remote_state` +// and no resources. + +// UpgradeResourceState is called when the state loader encounters an +// instance state whose schema version is less than the one reported by the +// currently-used version of the corresponding provider, and the upgraded +// result is used for any further processing. +func (p *Provider) UpgradeResourceState(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + panic("unimplemented - terraform_remote_state has no resources") +} + +// ReadResource refreshes a resource and returns its current state. +func (p *Provider) ReadResource(providers.ReadResourceRequest) providers.ReadResourceResponse { + panic("unimplemented - terraform_remote_state has no resources") +} + +// PlanResourceChange takes the current state and proposed state of a +// resource, and returns the planned final state. +func (p *Provider) PlanResourceChange(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + panic("unimplemented - terraform_remote_state has no resources") +} + +// ApplyResourceChange takes the planned state for a resource, which may +// yet contain unknown computed values, and applies the changes returning +// the final state. +func (p *Provider) ApplyResourceChange(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + panic("unimplemented - terraform_remote_state has no resources") +} + +// ImportResourceState requests that the given resource be imported. +func (p *Provider) ImportResourceState(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + panic("unimplemented - terraform_remote_state has no resources") +} + +// ValidateResourceTypeConfig is used to to validate the resource configuration values. +func (p *Provider) ValidateResourceTypeConfig(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + // At this moment there is nothing to configure for the terraform provider, + // so we will happily return without taking any action + var res providers.ValidateResourceTypeConfigResponse + return res +} + +// Close is a noop for this provider, since it's run in-process. +func (p *Provider) Close() error { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/linux_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/linux_provisioner.go new file mode 100644 index 00000000..39996770 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/linux_provisioner.go @@ -0,0 +1,115 @@ +package chef + +import ( + "fmt" + "path" + "strings" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/terraform" +) + +const ( + chmod = "find %s -maxdepth 1 -type f -exec /bin/chmod %d {} +" + installURL = "https://omnitruck.chef.io/install.sh" +) + +func (p *provisioner) linuxInstallChefClient(o terraform.UIOutput, comm communicator.Communicator) error { + // Build up the command prefix + prefix := "" + if p.HTTPProxy != "" { + prefix += fmt.Sprintf("http_proxy='%s' ", p.HTTPProxy) + } + if p.HTTPSProxy != "" { + prefix += fmt.Sprintf("https_proxy='%s' ", p.HTTPSProxy) + } + if len(p.NOProxy) > 0 { + prefix += fmt.Sprintf("no_proxy='%s' ", strings.Join(p.NOProxy, ",")) + } + + // First download the install.sh script from Chef + err := p.runCommand(o, comm, fmt.Sprintf("%scurl -LO %s", prefix, installURL)) + if err != nil { + return err + } + + // Then execute the install.sh scrip to download and install Chef Client + err = p.runCommand(o, comm, fmt.Sprintf("%sbash ./install.sh -v %q -c %s", prefix, p.Version, p.Channel)) + if err != nil { + return err + } + + // And finally cleanup the install.sh script again + return p.runCommand(o, comm, fmt.Sprintf("%srm -f install.sh", prefix)) +} + +func (p *provisioner) linuxCreateConfigFiles(o terraform.UIOutput, comm communicator.Communicator) error { + // Make sure the config directory exists + if err := p.runCommand(o, comm, "mkdir -p "+linuxConfDir); err != nil { + return err + } + + // Make sure we have enough rights to upload the files if using sudo + if p.useSudo { + if err := p.runCommand(o, comm, "chmod 777 "+linuxConfDir); err != nil { + return err + } + if err := p.runCommand(o, comm, fmt.Sprintf(chmod, linuxConfDir, 666)); err != nil { + return err + } + } + + if err := p.deployConfigFiles(o, comm, linuxConfDir); err != nil { + return err + } + + if len(p.OhaiHints) > 0 { + // Make sure the hits directory exists + hintsDir := path.Join(linuxConfDir, "ohai/hints") + if err := p.runCommand(o, comm, "mkdir -p "+hintsDir); err != nil { + return err + } + + // Make sure we have enough rights to upload the hints if using sudo + if p.useSudo { + if err := p.runCommand(o, comm, "chmod 777 "+hintsDir); err != nil { + return err + } + if err := p.runCommand(o, comm, fmt.Sprintf(chmod, hintsDir, 666)); err != nil { + return err + } + } + + if err := p.deployOhaiHints(o, comm, hintsDir); err != nil { + return err + } + + // When done copying the hints restore the rights and make sure root is owner + if p.useSudo { + if err := p.runCommand(o, comm, "chmod 755 "+hintsDir); err != nil { + return err + } + if err := p.runCommand(o, comm, fmt.Sprintf(chmod, hintsDir, 600)); err != nil { + return err + } + if err := p.runCommand(o, comm, "chown -R root:root "+hintsDir); err != nil { + return err + } + } + } + + // When done copying all files restore the rights and make sure root is owner + if p.useSudo { + if err := p.runCommand(o, comm, "chmod 755 "+linuxConfDir); err != nil { + return err + } + if err := p.runCommand(o, comm, fmt.Sprintf(chmod, linuxConfDir, 600)); err != nil { + return err + } + if err := p.runCommand(o, comm, "chown -R root:root "+linuxConfDir); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/resource_provisioner.go new file mode 100644 index 00000000..c702c0f5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/resource_provisioner.go @@ -0,0 +1,813 @@ +package chef + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "os" + "path" + "regexp" + "strconv" + "strings" + "sync" + "text/template" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/go-homedir" + "github.com/mitchellh/go-linereader" +) + +const ( + clienrb = "client.rb" + defaultEnv = "_default" + firstBoot = "first-boot.json" + logfileDir = "logfiles" + linuxChefCmd = "chef-client" + linuxConfDir = "/etc/chef" + linuxNoOutput = "> /dev/null 2>&1" + linuxGemCmd = "/opt/chef/embedded/bin/gem" + linuxKnifeCmd = "knife" + secretKey = "encrypted_data_bag_secret" + windowsChefCmd = "cmd /c chef-client" + windowsConfDir = "C:/chef" + windowsNoOutput = "> nul 2>&1" + windowsGemCmd = "C:/opscode/chef/embedded/bin/gem" + windowsKnifeCmd = "cmd /c knife" +) + +const clientConf = ` +log_location STDOUT +chef_server_url "{{ .ServerURL }}" +node_name "{{ .NodeName }}" +{{ if .UsePolicyfile }} +use_policyfile true +policy_group "{{ .PolicyGroup }}" +policy_name "{{ .PolicyName }}" +{{ end -}} + +{{ if .HTTPProxy }} +http_proxy "{{ .HTTPProxy }}" +ENV['http_proxy'] = "{{ .HTTPProxy }}" +ENV['HTTP_PROXY'] = "{{ .HTTPProxy }}" +{{ end -}} + +{{ if .HTTPSProxy }} +https_proxy "{{ .HTTPSProxy }}" +ENV['https_proxy'] = "{{ .HTTPSProxy }}" +ENV['HTTPS_PROXY'] = "{{ .HTTPSProxy }}" +{{ end -}} + +{{ if .NOProxy }} +no_proxy "{{ join .NOProxy "," }}" +ENV['no_proxy'] = "{{ join .NOProxy "," }}" +{{ end -}} + +{{ if .SSLVerifyMode }} +ssl_verify_mode {{ .SSLVerifyMode }} +{{- end -}} + +{{ if .DisableReporting }} +enable_reporting false +{{ end -}} + +{{ if .ClientOptions }} +{{ join .ClientOptions "\n" }} +{{ end }} +` + +type provisionFn func(terraform.UIOutput, communicator.Communicator) error + +type provisioner struct { + Attributes map[string]interface{} + Channel string + ClientOptions []string + DisableReporting bool + Environment string + FetchChefCertificates bool + LogToFile bool + UsePolicyfile bool + PolicyGroup string + PolicyName string + HTTPProxy string + HTTPSProxy string + NamedRunList string + NOProxy []string + NodeName string + OhaiHints []string + OSType string + RecreateClient bool + PreventSudo bool + RunList []string + SecretKey string + ServerURL string + SkipInstall bool + SkipRegister bool + SSLVerifyMode string + UserName string + UserKey string + Vaults map[string][]string + Version string + + cleanupUserKeyCmd string + createConfigFiles provisionFn + installChefClient provisionFn + fetchChefCertificates provisionFn + generateClientKey provisionFn + configureVaults provisionFn + runChefClient provisionFn + useSudo bool +} + +// Provisioner returns a Chef provisioner +func Provisioner() terraform.ResourceProvisioner { + return &schema.Provisioner{ + Schema: map[string]*schema.Schema{ + "node_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "server_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "user_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "user_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "attributes_json": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "channel": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "stable", + }, + "client_options": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + "disable_reporting": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "environment": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: defaultEnv, + }, + "fetch_chef_certificates": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "log_to_file": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "use_policyfile": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "policy_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "policy_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "http_proxy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "https_proxy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "no_proxy": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + "named_run_list": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "ohai_hints": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + "os_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "recreate_client": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "prevent_sudo": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "run_list": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + "secret_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "skip_install": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "skip_register": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "ssl_verify_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "vault_json": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + + ApplyFunc: applyFn, + ValidateFunc: validateFn, + } +} + +// TODO: Support context cancelling (Provisioner Stop) +func applyFn(ctx context.Context) error { + o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) + s := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) + d := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) + + // Decode the provisioner config + p, err := decodeConfig(d) + if err != nil { + return err + } + + if p.OSType == "" { + switch t := s.Ephemeral.ConnInfo["type"]; t { + case "ssh", "": // The default connection type is ssh, so if the type is empty assume ssh + p.OSType = "linux" + case "winrm": + p.OSType = "windows" + default: + return fmt.Errorf("Unsupported connection type: %s", t) + } + } + + // Set some values based on the targeted OS + switch p.OSType { + case "linux": + p.cleanupUserKeyCmd = fmt.Sprintf("rm -f %s", path.Join(linuxConfDir, p.UserName+".pem")) + p.createConfigFiles = p.linuxCreateConfigFiles + p.installChefClient = p.linuxInstallChefClient + p.fetchChefCertificates = p.fetchChefCertificatesFunc(linuxKnifeCmd, linuxConfDir) + p.generateClientKey = p.generateClientKeyFunc(linuxKnifeCmd, linuxConfDir, linuxNoOutput) + p.configureVaults = p.configureVaultsFunc(linuxGemCmd, linuxKnifeCmd, linuxConfDir) + p.runChefClient = p.runChefClientFunc(linuxChefCmd, linuxConfDir) + p.useSudo = !p.PreventSudo && s.Ephemeral.ConnInfo["user"] != "root" + case "windows": + p.cleanupUserKeyCmd = fmt.Sprintf("cd %s && del /F /Q %s", windowsConfDir, p.UserName+".pem") + p.createConfigFiles = p.windowsCreateConfigFiles + p.installChefClient = p.windowsInstallChefClient + p.fetchChefCertificates = p.fetchChefCertificatesFunc(windowsKnifeCmd, windowsConfDir) + p.generateClientKey = p.generateClientKeyFunc(windowsKnifeCmd, windowsConfDir, windowsNoOutput) + p.configureVaults = p.configureVaultsFunc(windowsGemCmd, windowsKnifeCmd, windowsConfDir) + p.runChefClient = p.runChefClientFunc(windowsChefCmd, windowsConfDir) + p.useSudo = false + default: + return fmt.Errorf("Unsupported os type: %s", p.OSType) + } + + // Get a new communicator + comm, err := communicator.New(s) + if err != nil { + return err + } + + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + // Wait and retry until we establish the connection + err = communicator.Retry(retryCtx, func() error { + return comm.Connect(o) + }) + if err != nil { + return err + } + defer comm.Disconnect() + + // Make sure we always delete the user key from the new node! + var once sync.Once + cleanupUserKey := func() { + o.Output("Cleanup user key...") + if err := p.runCommand(o, comm, p.cleanupUserKeyCmd); err != nil { + o.Output("WARNING: Failed to cleanup user key on new node: " + err.Error()) + } + } + defer once.Do(cleanupUserKey) + + if !p.SkipInstall { + if err := p.installChefClient(o, comm); err != nil { + return err + } + } + + o.Output("Creating configuration files...") + if err := p.createConfigFiles(o, comm); err != nil { + return err + } + + if !p.SkipRegister { + if p.FetchChefCertificates { + o.Output("Fetch Chef certificates...") + if err := p.fetchChefCertificates(o, comm); err != nil { + return err + } + } + + o.Output("Generate the private key...") + if err := p.generateClientKey(o, comm); err != nil { + return err + } + } + + if p.Vaults != nil { + o.Output("Configure Chef vaults...") + if err := p.configureVaults(o, comm); err != nil { + return err + } + } + + // Cleanup the user key before we run Chef-Client to prevent issues + // with rights caused by changing settings during the run. + once.Do(cleanupUserKey) + + o.Output("Starting initial Chef-Client run...") + if err := p.runChefClient(o, comm); err != nil { + return err + } + + return nil +} + +func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { + usePolicyFile := false + if usePolicyFileRaw, ok := c.Get("use_policyfile"); ok { + switch usePolicyFileRaw := usePolicyFileRaw.(type) { + case bool: + usePolicyFile = usePolicyFileRaw + case string: + usePolicyFileBool, err := strconv.ParseBool(usePolicyFileRaw) + if err != nil { + return ws, append(es, errors.New("\"use_policyfile\" must be a boolean")) + } + usePolicyFile = usePolicyFileBool + default: + return ws, append(es, errors.New("\"use_policyfile\" must be a boolean")) + } + } + + if !usePolicyFile && !c.IsSet("run_list") { + es = append(es, errors.New("\"run_list\": required field is not set")) + } + if usePolicyFile && !c.IsSet("policy_name") { + es = append(es, errors.New("using policyfile, but \"policy_name\" not set")) + } + if usePolicyFile && !c.IsSet("policy_group") { + es = append(es, errors.New("using policyfile, but \"policy_group\" not set")) + } + + return ws, es +} + +func (p *provisioner) deployConfigFiles(o terraform.UIOutput, comm communicator.Communicator, confDir string) error { + // Copy the user key to the new instance + pk := strings.NewReader(p.UserKey) + if err := comm.Upload(path.Join(confDir, p.UserName+".pem"), pk); err != nil { + return fmt.Errorf("Uploading user key failed: %v", err) + } + + if p.SecretKey != "" { + // Copy the secret key to the new instance + s := strings.NewReader(p.SecretKey) + if err := comm.Upload(path.Join(confDir, secretKey), s); err != nil { + return fmt.Errorf("Uploading %s failed: %v", secretKey, err) + } + } + + // Make sure the SSLVerifyMode value is written as a symbol + if p.SSLVerifyMode != "" && !strings.HasPrefix(p.SSLVerifyMode, ":") { + p.SSLVerifyMode = fmt.Sprintf(":%s", p.SSLVerifyMode) + } + + // Make strings.Join available for use within the template + funcMap := template.FuncMap{ + "join": strings.Join, + } + + // Create a new template and parse the client config into it + t := template.Must(template.New(clienrb).Funcs(funcMap).Parse(clientConf)) + + var buf bytes.Buffer + err := t.Execute(&buf, p) + if err != nil { + return fmt.Errorf("Error executing %s template: %s", clienrb, err) + } + + // Copy the client config to the new instance + if err = comm.Upload(path.Join(confDir, clienrb), &buf); err != nil { + return fmt.Errorf("Uploading %s failed: %v", clienrb, err) + } + + // Create a map with first boot settings + fb := make(map[string]interface{}) + if p.Attributes != nil { + fb = p.Attributes + } + + // Check if the run_list was also in the attributes and if so log a warning + // that it will be overwritten with the value of the run_list argument. + if _, found := fb["run_list"]; found { + log.Printf("[WARN] Found a 'run_list' specified in the configured attributes! " + + "This value will be overwritten by the value of the `run_list` argument!") + } + + // Add the initial runlist to the first boot settings + if !p.UsePolicyfile { + fb["run_list"] = p.RunList + } + + // Marshal the first boot settings to JSON + d, err := json.Marshal(fb) + if err != nil { + return fmt.Errorf("Failed to create %s data: %s", firstBoot, err) + } + + // Copy the first-boot.json to the new instance + if err := comm.Upload(path.Join(confDir, firstBoot), bytes.NewReader(d)); err != nil { + return fmt.Errorf("Uploading %s failed: %v", firstBoot, err) + } + + return nil +} + +func (p *provisioner) deployOhaiHints(o terraform.UIOutput, comm communicator.Communicator, hintDir string) error { + for _, hint := range p.OhaiHints { + // Open the hint file + f, err := os.Open(hint) + if err != nil { + return err + } + defer f.Close() + + // Copy the hint to the new instance + if err := comm.Upload(path.Join(hintDir, path.Base(hint)), f); err != nil { + return fmt.Errorf("Uploading %s failed: %v", path.Base(hint), err) + } + } + + return nil +} + +func (p *provisioner) fetchChefCertificatesFunc( + knifeCmd string, + confDir string) func(terraform.UIOutput, communicator.Communicator) error { + return func(o terraform.UIOutput, comm communicator.Communicator) error { + clientrb := path.Join(confDir, clienrb) + cmd := fmt.Sprintf("%s ssl fetch -c %s", knifeCmd, clientrb) + + return p.runCommand(o, comm, cmd) + } +} + +func (p *provisioner) generateClientKeyFunc(knifeCmd string, confDir string, noOutput string) provisionFn { + return func(o terraform.UIOutput, comm communicator.Communicator) error { + options := fmt.Sprintf("-c %s -u %s --key %s", + path.Join(confDir, clienrb), + p.UserName, + path.Join(confDir, p.UserName+".pem"), + ) + + // See if we already have a node object + getNodeCmd := fmt.Sprintf("%s node show %s %s %s", knifeCmd, p.NodeName, options, noOutput) + node := p.runCommand(o, comm, getNodeCmd) == nil + + // See if we already have a client object + getClientCmd := fmt.Sprintf("%s client show %s %s %s", knifeCmd, p.NodeName, options, noOutput) + client := p.runCommand(o, comm, getClientCmd) == nil + + // If we have a client, we can only continue if we are to recreate the client + if client && !p.RecreateClient { + return fmt.Errorf( + "Chef client %q already exists, set recreate_client=true to automatically recreate the client", p.NodeName) + } + + // If the node exists, try to delete it + if node { + deleteNodeCmd := fmt.Sprintf("%s node delete %s -y %s", + knifeCmd, + p.NodeName, + options, + ) + if err := p.runCommand(o, comm, deleteNodeCmd); err != nil { + return err + } + } + + // If the client exists, try to delete it + if client { + deleteClientCmd := fmt.Sprintf("%s client delete %s -y %s", + knifeCmd, + p.NodeName, + options, + ) + if err := p.runCommand(o, comm, deleteClientCmd); err != nil { + return err + } + } + + // Create the new client object + createClientCmd := fmt.Sprintf("%s client create %s -d -f %s %s", + knifeCmd, + p.NodeName, + path.Join(confDir, "client.pem"), + options, + ) + + return p.runCommand(o, comm, createClientCmd) + } +} + +func (p *provisioner) configureVaultsFunc(gemCmd string, knifeCmd string, confDir string) provisionFn { + return func(o terraform.UIOutput, comm communicator.Communicator) error { + if err := p.runCommand(o, comm, fmt.Sprintf("%s install chef-vault", gemCmd)); err != nil { + return err + } + + options := fmt.Sprintf("-c %s -u %s --key %s", + path.Join(confDir, clienrb), + p.UserName, + path.Join(confDir, p.UserName+".pem"), + ) + + // if client gets recreated, remove (old) client (with old keys) from vaults/items + // otherwise, the (new) client (with new keys) will not be able to decrypt the vault + if p.RecreateClient { + for vault, items := range p.Vaults { + for _, item := range items { + deleteCmd := fmt.Sprintf("%s vault remove %s %s -C \"%s\" -M client %s", + knifeCmd, + vault, + item, + p.NodeName, + options, + ) + if err := p.runCommand(o, comm, deleteCmd); err != nil { + return err + } + } + } + } + + for vault, items := range p.Vaults { + for _, item := range items { + updateCmd := fmt.Sprintf("%s vault update %s %s -C %s -M client %s", + knifeCmd, + vault, + item, + p.NodeName, + options, + ) + if err := p.runCommand(o, comm, updateCmd); err != nil { + return err + } + } + } + + return nil + } +} + +func (p *provisioner) runChefClientFunc(chefCmd string, confDir string) provisionFn { + return func(o terraform.UIOutput, comm communicator.Communicator) error { + fb := path.Join(confDir, firstBoot) + var cmd string + + // Policyfiles do not support chef environments, so don't pass the `-E` flag. + switch { + case p.UsePolicyfile && p.NamedRunList == "": + cmd = fmt.Sprintf("%s -j %q", chefCmd, fb) + case p.UsePolicyfile && p.NamedRunList != "": + cmd = fmt.Sprintf("%s -j %q -n %q", chefCmd, fb, p.NamedRunList) + default: + cmd = fmt.Sprintf("%s -j %q -E %q", chefCmd, fb, p.Environment) + } + + if p.LogToFile { + if err := os.MkdirAll(logfileDir, 0755); err != nil { + return fmt.Errorf("Error creating logfile directory %s: %v", logfileDir, err) + } + + logFile := path.Join(logfileDir, p.NodeName) + f, err := os.Create(path.Join(logFile)) + if err != nil { + return fmt.Errorf("Error creating logfile %s: %v", logFile, err) + } + f.Close() + + o.Output("Writing Chef Client output to " + logFile) + o = p + } + + return p.runCommand(o, comm, cmd) + } +} + +// Output implementation of terraform.UIOutput interface +func (p *provisioner) Output(output string) { + logFile := path.Join(logfileDir, p.NodeName) + f, err := os.OpenFile(logFile, os.O_APPEND|os.O_WRONLY, 0666) + if err != nil { + log.Printf("Error creating logfile %s: %v", logFile, err) + return + } + defer f.Close() + + // These steps are needed to remove any ANSI escape codes used to colorize + // the output and to make sure we have proper line endings before writing + // the string to the logfile. + re := regexp.MustCompile(`\x1b\[[0-9;]+m`) + output = re.ReplaceAllString(output, "") + output = strings.Replace(output, "\r", "\n", -1) + + if _, err := f.WriteString(output); err != nil { + log.Printf("Error writing output to logfile %s: %v", logFile, err) + } + + if err := f.Sync(); err != nil { + log.Printf("Error saving logfile %s to disk: %v", logFile, err) + } +} + +// runCommand is used to run already prepared commands +func (p *provisioner) runCommand(o terraform.UIOutput, comm communicator.Communicator, command string) error { + // Unless prevented, prefix the command with sudo + if p.useSudo { + command = "sudo " + command + } + + outR, outW := io.Pipe() + errR, errW := io.Pipe() + go p.copyOutput(o, outR) + go p.copyOutput(o, errR) + defer outW.Close() + defer errW.Close() + + cmd := &remote.Cmd{ + Command: command, + Stdout: outW, + Stderr: errW, + } + + err := comm.Start(cmd) + if err != nil { + return fmt.Errorf("Error executing command %q: %v", cmd.Command, err) + } + + if err := cmd.Wait(); err != nil { + return err + } + + return nil +} + +func (p *provisioner) copyOutput(o terraform.UIOutput, r io.Reader) { + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} + +func decodeConfig(d *schema.ResourceData) (*provisioner, error) { + p := &provisioner{ + Channel: d.Get("channel").(string), + ClientOptions: getStringList(d.Get("client_options")), + DisableReporting: d.Get("disable_reporting").(bool), + Environment: d.Get("environment").(string), + FetchChefCertificates: d.Get("fetch_chef_certificates").(bool), + LogToFile: d.Get("log_to_file").(bool), + UsePolicyfile: d.Get("use_policyfile").(bool), + PolicyGroup: d.Get("policy_group").(string), + PolicyName: d.Get("policy_name").(string), + HTTPProxy: d.Get("http_proxy").(string), + HTTPSProxy: d.Get("https_proxy").(string), + NOProxy: getStringList(d.Get("no_proxy")), + NamedRunList: d.Get("named_run_list").(string), + NodeName: d.Get("node_name").(string), + OhaiHints: getStringList(d.Get("ohai_hints")), + OSType: d.Get("os_type").(string), + RecreateClient: d.Get("recreate_client").(bool), + PreventSudo: d.Get("prevent_sudo").(bool), + RunList: getStringList(d.Get("run_list")), + SecretKey: d.Get("secret_key").(string), + ServerURL: d.Get("server_url").(string), + SkipInstall: d.Get("skip_install").(bool), + SkipRegister: d.Get("skip_register").(bool), + SSLVerifyMode: d.Get("ssl_verify_mode").(string), + UserName: d.Get("user_name").(string), + UserKey: d.Get("user_key").(string), + Version: d.Get("version").(string), + } + + // Make sure the supplied URL has a trailing slash + p.ServerURL = strings.TrimSuffix(p.ServerURL, "/") + "/" + + for i, hint := range p.OhaiHints { + hintPath, err := homedir.Expand(hint) + if err != nil { + return nil, fmt.Errorf("Error expanding the path %s: %v", hint, err) + } + p.OhaiHints[i] = hintPath + } + + if attrs, ok := d.GetOk("attributes_json"); ok { + var m map[string]interface{} + if err := json.Unmarshal([]byte(attrs.(string)), &m); err != nil { + return nil, fmt.Errorf("Error parsing attributes_json: %v", err) + } + p.Attributes = m + } + + if vaults, ok := d.GetOk("vault_json"); ok { + var m map[string]interface{} + if err := json.Unmarshal([]byte(vaults.(string)), &m); err != nil { + return nil, fmt.Errorf("Error parsing vault_json: %v", err) + } + + v := make(map[string][]string) + for vault, items := range m { + switch items := items.(type) { + case []interface{}: + for _, item := range items { + if item, ok := item.(string); ok { + v[vault] = append(v[vault], item) + } + } + case interface{}: + if item, ok := items.(string); ok { + v[vault] = append(v[vault], item) + } + } + } + + p.Vaults = v + } + + return p, nil +} + +func getStringList(v interface{}) []string { + var result []string + + switch v := v.(type) { + case nil: + return result + case []interface{}: + for _, vv := range v { + if vv, ok := vv.(string); ok { + result = append(result, vv) + } + } + return result + default: + panic(fmt.Sprintf("Unsupported type: %T", v)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/windows_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/windows_provisioner.go new file mode 100644 index 00000000..02010acd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/chef/windows_provisioner.go @@ -0,0 +1,84 @@ +package chef + +import ( + "fmt" + "path" + "strings" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/terraform" +) + +const installScript = ` +$winver = [System.Environment]::OSVersion.Version | %% {"{0}.{1}" -f $_.Major,$_.Minor} + +switch ($winver) +{ + "6.0" {$machine_os = "2008"} + "6.1" {$machine_os = "2008r2"} + "6.2" {$machine_os = "2012"} + "6.3" {$machine_os = "2012"} + default {$machine_os = "2008r2"} +} + +if ([System.IntPtr]::Size -eq 4) {$machine_arch = "i686"} else {$machine_arch = "x86_64"} + +$url = "http://omnitruck.chef.io/%s/chef/download?p=windows&pv=$machine_os&m=$machine_arch&v=%s" +$dest = [System.IO.Path]::GetTempFileName() +$dest = [System.IO.Path]::ChangeExtension($dest, ".msi") +$downloader = New-Object System.Net.WebClient + +$http_proxy = '%s' +if ($http_proxy -ne '') { + $no_proxy = '%s' + if ($no_proxy -eq ''){ + $no_proxy = "127.0.0.1" + } + + $proxy = New-Object System.Net.WebProxy($http_proxy, $true, ,$no_proxy.Split(',')) + $downloader.proxy = $proxy +} + +Write-Host 'Downloading Chef Client...' +$downloader.DownloadFile($url, $dest) + +Write-Host 'Installing Chef Client...' +Start-Process -FilePath msiexec -ArgumentList /qn, /i, $dest -Wait +` + +func (p *provisioner) windowsInstallChefClient(o terraform.UIOutput, comm communicator.Communicator) error { + script := path.Join(path.Dir(comm.ScriptPath()), "ChefClient.ps1") + content := fmt.Sprintf(installScript, p.Channel, p.Version, p.HTTPProxy, strings.Join(p.NOProxy, ",")) + + // Copy the script to the new instance + if err := comm.UploadScript(script, strings.NewReader(content)); err != nil { + return fmt.Errorf("Uploading client.rb failed: %v", err) + } + + // Execute the script to install Chef Client + installCmd := fmt.Sprintf("powershell -NoProfile -ExecutionPolicy Bypass -File %s", script) + return p.runCommand(o, comm, installCmd) +} + +func (p *provisioner) windowsCreateConfigFiles(o terraform.UIOutput, comm communicator.Communicator) error { + // Make sure the config directory exists + cmd := fmt.Sprintf("cmd /c if not exist %q mkdir %q", windowsConfDir, windowsConfDir) + if err := p.runCommand(o, comm, cmd); err != nil { + return err + } + + if len(p.OhaiHints) > 0 { + // Make sure the hits directory exists + hintsDir := path.Join(windowsConfDir, "ohai/hints") + cmd := fmt.Sprintf("cmd /c if not exist %q mkdir %q", hintsDir, hintsDir) + if err := p.runCommand(o, comm, cmd); err != nil { + return err + } + + if err := p.deployOhaiHints(o, comm, hintsDir); err != nil { + return err + } + } + + return p.deployConfigFiles(o, comm, windowsConfDir) +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/file/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/file/resource_provisioner.go index 9b9e8a97..26f2f4da 100644 --- a/vendor/github.com/hashicorp/terraform/builtin/provisioners/file/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/file/resource_provisioner.go @@ -4,9 +4,7 @@ import ( "context" "fmt" "io/ioutil" - "log" "os" - "time" "github.com/hashicorp/terraform/communicator" "github.com/hashicorp/terraform/helper/schema" @@ -61,21 +59,11 @@ func applyFn(ctx context.Context) error { // Begin the file copy dst := data.Get("destination").(string) - resultCh := make(chan error, 1) - go func() { - resultCh <- copyFiles(comm, src, dst) - }() - // Allow the file copy to complete unless there is an interrupt. - // If there is an interrupt we make no attempt to cleanly close - // the connection currently. We just abruptly exit. Because Terraform - // taints the resource, this is fine. - select { - case err := <-resultCh: + if err := copyFiles(ctx, comm, src, dst); err != nil { return err - case <-ctx.Done(): - return fmt.Errorf("file transfer interrupted") } + return nil } func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { @@ -107,16 +95,24 @@ func getSrc(data *schema.ResourceData) (string, bool, error) { } // copyFiles is used to copy the files from a source to a destination -func copyFiles(comm communicator.Communicator, src, dst string) error { +func copyFiles(ctx context.Context, comm communicator.Communicator, src, dst string) error { + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + // Wait and retry until we establish the connection - err := retryFunc(comm.Timeout(), func() error { - err := comm.Connect(nil) - return err + err := communicator.Retry(retryCtx, func() error { + return comm.Connect(nil) }) if err != nil { return err } - defer comm.Disconnect() + + // disconnect when the context is canceled, which will close this after + // Apply as well. + go func() { + <-ctx.Done() + comm.Disconnect() + }() info, err := os.Stat(src) if err != nil { @@ -144,21 +140,3 @@ func copyFiles(comm communicator.Communicator, src, dst string) error { } return err } - -// retryFunc is used to retry a function for a given duration -func retryFunc(timeout time.Duration, f func() error) error { - finish := time.After(timeout) - for { - err := f() - if err == nil { - return nil - } - log.Printf("Retryable error: %v", err) - - select { - case <-finish: - return err - case <-time.After(3 * time.Second): - } - } -} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/habitat/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/habitat/resource_provisioner.go new file mode 100644 index 00000000..26691db5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/habitat/resource_provisioner.go @@ -0,0 +1,792 @@ +package habitat + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/url" + "path" + "strings" + "text/template" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" + linereader "github.com/mitchellh/go-linereader" +) + +const installURL = "https://raw.githubusercontent.com/habitat-sh/habitat/master/components/hab/install.sh" +const systemdUnit = ` +[Unit] +Description=Habitat Supervisor + +[Service] +ExecStart=/bin/hab sup run {{ .SupOptions }} +Restart=on-failure +{{ if .BuilderAuthToken -}} +Environment="HAB_AUTH_TOKEN={{ .BuilderAuthToken }}" +{{ end -}} + +[Install] +WantedBy=default.target +` + +var serviceTypes = map[string]bool{"unmanaged": true, "systemd": true} +var updateStrategies = map[string]bool{"at-once": true, "rolling": true, "none": true} +var topologies = map[string]bool{"leader": true, "standalone": true} + +type provisionFn func(terraform.UIOutput, communicator.Communicator) error + +type provisioner struct { + Version string + Services []Service + PermanentPeer bool + ListenGossip string + ListenHTTP string + Peer string + RingKey string + RingKeyContent string + SkipInstall bool + UseSudo bool + ServiceType string + ServiceName string + URL string + Channel string + Events string + OverrideName string + Organization string + BuilderAuthToken string + SupOptions string +} + +func Provisioner() terraform.ResourceProvisioner { + return &schema.Provisioner{ + Schema: map[string]*schema.Schema{ + "version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "peer": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "service_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "systemd", + }, + "service_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "hab-supervisor", + }, + "use_sudo": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "permanent_peer": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "listen_gossip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "listen_http": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "ring_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "ring_key_content": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "channel": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "events": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "override_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "organization": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "builder_auth_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "service": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "binds": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + "bind": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alias": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "group": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Optional: true, + }, + "topology": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "user_toml": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "strategy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "channel": &schema.Schema{ + + Type: schema.TypeString, + Optional: true, + }, + "group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "application": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "environment": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "override_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "service_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Optional: true, + }, + }, + ApplyFunc: applyFn, + ValidateFunc: validateFn, + } +} + +func applyFn(ctx context.Context) error { + o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) + s := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) + d := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) + + p, err := decodeConfig(d) + if err != nil { + return err + } + + comm, err := communicator.New(s) + if err != nil { + return err + } + + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + err = communicator.Retry(retryCtx, func() error { + return comm.Connect(o) + }) + + if err != nil { + return err + } + defer comm.Disconnect() + + if !p.SkipInstall { + o.Output("Installing habitat...") + if err := p.installHab(o, comm); err != nil { + return err + } + } + + if p.RingKeyContent != "" { + o.Output("Uploading supervisor ring key...") + if err := p.uploadRingKey(o, comm); err != nil { + return err + } + } + + o.Output("Starting the habitat supervisor...") + if err := p.startHab(o, comm); err != nil { + return err + } + + if p.Services != nil { + for _, service := range p.Services { + o.Output("Starting service: " + service.Name) + if err := p.startHabService(o, comm, service); err != nil { + return err + } + } + } + + return nil +} + +func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { + serviceType, ok := c.Get("service_type") + if ok { + if !serviceTypes[serviceType.(string)] { + es = append(es, errors.New(serviceType.(string)+" is not a valid service_type.")) + } + } + + builderURL, ok := c.Get("url") + if ok { + if _, err := url.ParseRequestURI(builderURL.(string)); err != nil { + es = append(es, errors.New(builderURL.(string)+" is not a valid URL.")) + } + } + + // Validate service level configs + services, ok := c.Get("service") + if ok { + for _, service := range services.([]map[string]interface{}) { + strategy, ok := service["strategy"].(string) + if ok && !updateStrategies[strategy] { + es = append(es, errors.New(strategy+" is not a valid update strategy.")) + } + + topology, ok := service["topology"].(string) + if ok && !topologies[topology] { + es = append(es, errors.New(topology+" is not a valid topology")) + } + + builderURL, ok := service["url"].(string) + if ok { + if _, err := url.ParseRequestURI(builderURL); err != nil { + es = append(es, errors.New(builderURL+" is not a valid URL.")) + } + } + } + } + return ws, es +} + +type Service struct { + Name string + Strategy string + Topology string + Channel string + Group string + URL string + Binds []Bind + BindStrings []string + UserTOML string + AppName string + Environment string + OverrideName string + ServiceGroupKey string +} + +type Bind struct { + Alias string + Service string + Group string +} + +func (s *Service) getPackageName(fullName string) string { + return strings.Split(fullName, "/")[1] +} + +func (b *Bind) toBindString() string { + return fmt.Sprintf("%s:%s.%s", b.Alias, b.Service, b.Group) +} + +func decodeConfig(d *schema.ResourceData) (*provisioner, error) { + p := &provisioner{ + Version: d.Get("version").(string), + Peer: d.Get("peer").(string), + Services: getServices(d.Get("service").(*schema.Set).List()), + UseSudo: d.Get("use_sudo").(bool), + ServiceType: d.Get("service_type").(string), + ServiceName: d.Get("service_name").(string), + RingKey: d.Get("ring_key").(string), + RingKeyContent: d.Get("ring_key_content").(string), + PermanentPeer: d.Get("permanent_peer").(bool), + ListenGossip: d.Get("listen_gossip").(string), + ListenHTTP: d.Get("listen_http").(string), + URL: d.Get("url").(string), + Channel: d.Get("channel").(string), + Events: d.Get("events").(string), + OverrideName: d.Get("override_name").(string), + Organization: d.Get("organization").(string), + BuilderAuthToken: d.Get("builder_auth_token").(string), + } + + return p, nil +} + +func getServices(v []interface{}) []Service { + services := make([]Service, 0, len(v)) + for _, rawServiceData := range v { + serviceData := rawServiceData.(map[string]interface{}) + name := (serviceData["name"].(string)) + strategy := (serviceData["strategy"].(string)) + topology := (serviceData["topology"].(string)) + channel := (serviceData["channel"].(string)) + group := (serviceData["group"].(string)) + url := (serviceData["url"].(string)) + app := (serviceData["application"].(string)) + env := (serviceData["environment"].(string)) + override := (serviceData["override_name"].(string)) + userToml := (serviceData["user_toml"].(string)) + serviceGroupKey := (serviceData["service_key"].(string)) + var bindStrings []string + binds := getBinds(serviceData["bind"].(*schema.Set).List()) + for _, b := range serviceData["binds"].([]interface{}) { + bind, err := getBindFromString(b.(string)) + if err != nil { + return nil + } + binds = append(binds, bind) + } + + service := Service{ + Name: name, + Strategy: strategy, + Topology: topology, + Channel: channel, + Group: group, + URL: url, + UserTOML: userToml, + BindStrings: bindStrings, + Binds: binds, + AppName: app, + Environment: env, + OverrideName: override, + ServiceGroupKey: serviceGroupKey, + } + services = append(services, service) + } + return services +} + +func getBinds(v []interface{}) []Bind { + binds := make([]Bind, 0, len(v)) + for _, rawBindData := range v { + bindData := rawBindData.(map[string]interface{}) + alias := bindData["alias"].(string) + service := bindData["service"].(string) + group := bindData["group"].(string) + bind := Bind{ + Alias: alias, + Service: service, + Group: group, + } + binds = append(binds, bind) + } + return binds +} + +func (p *provisioner) uploadRingKey(o terraform.UIOutput, comm communicator.Communicator) error { + command := fmt.Sprintf("echo '%s' | hab ring key import", p.RingKeyContent) + if p.UseSudo { + command = fmt.Sprintf("echo '%s' | sudo hab ring key import", p.RingKeyContent) + } + return p.runCommand(o, comm, command) +} + +func (p *provisioner) installHab(o terraform.UIOutput, comm communicator.Communicator) error { + // Build the install command + command := fmt.Sprintf("curl -L0 %s > install.sh", installURL) + if err := p.runCommand(o, comm, command); err != nil { + return err + } + + // Run the install script + if p.Version == "" { + command = fmt.Sprintf("env HAB_NONINTERACTIVE=true bash ./install.sh ") + } else { + command = fmt.Sprintf("env HAB_NONINTERACTIVE=true bash ./install.sh -v %s", p.Version) + } + + if p.UseSudo { + command = fmt.Sprintf("sudo %s", command) + } + + if err := p.runCommand(o, comm, command); err != nil { + return err + } + + if err := p.createHabUser(o, comm); err != nil { + return err + } + + return p.runCommand(o, comm, fmt.Sprintf("rm -f install.sh")) +} + +func (p *provisioner) startHab(o terraform.UIOutput, comm communicator.Communicator) error { + // Install the supervisor first + var command string + if p.Version == "" { + command += fmt.Sprintf("hab install core/hab-sup") + } else { + command += fmt.Sprintf("hab install core/hab-sup/%s", p.Version) + } + + if p.UseSudo { + command = fmt.Sprintf("sudo -E %s", command) + } + + command = fmt.Sprintf("env HAB_NONINTERACTIVE=true %s", command) + + if err := p.runCommand(o, comm, command); err != nil { + return err + } + + // Build up sup options + options := "" + if p.PermanentPeer { + options += " -I" + } + + if p.ListenGossip != "" { + options += fmt.Sprintf(" --listen-gossip %s", p.ListenGossip) + } + + if p.ListenHTTP != "" { + options += fmt.Sprintf(" --listen-http %s", p.ListenHTTP) + } + + if p.Peer != "" { + options += fmt.Sprintf(" --peer %s", p.Peer) + } + + if p.RingKey != "" { + options += fmt.Sprintf(" --ring %s", p.RingKey) + } + + if p.URL != "" { + options += fmt.Sprintf(" --url %s", p.URL) + } + + if p.Channel != "" { + options += fmt.Sprintf(" --channel %s", p.Channel) + } + + if p.Events != "" { + options += fmt.Sprintf(" --events %s", p.Events) + } + + if p.OverrideName != "" { + options += fmt.Sprintf(" --override-name %s", p.OverrideName) + } + + if p.Organization != "" { + options += fmt.Sprintf(" --org %s", p.Organization) + } + + p.SupOptions = options + + switch p.ServiceType { + case "unmanaged": + return p.startHabUnmanaged(o, comm, options) + case "systemd": + return p.startHabSystemd(o, comm, options) + default: + return errors.New("Unsupported service type") + } +} + +func (p *provisioner) startHabUnmanaged(o terraform.UIOutput, comm communicator.Communicator, options string) error { + // Create the sup directory for the log file + var command string + var token string + if p.UseSudo { + command = "sudo mkdir -p /hab/sup/default && sudo chmod o+w /hab/sup/default" + } else { + command = "mkdir -p /hab/sup/default && chmod o+w /hab/sup/default" + } + if err := p.runCommand(o, comm, command); err != nil { + return err + } + + if p.BuilderAuthToken != "" { + token = fmt.Sprintf("env HAB_AUTH_TOKEN=%s", p.BuilderAuthToken) + } + + if p.UseSudo { + command = fmt.Sprintf("(%s setsid sudo -E hab sup run %s > /hab/sup/default/sup.log 2>&1 &) ; sleep 1", token, options) + } else { + command = fmt.Sprintf("(%s setsid hab sup run %s > /hab/sup/default/sup.log 2>&1 <&1 &) ; sleep 1", token, options) + } + return p.runCommand(o, comm, command) +} + +func (p *provisioner) startHabSystemd(o terraform.UIOutput, comm communicator.Communicator, options string) error { + // Create a new template and parse the client config into it + unitString := template.Must(template.New("hab-supervisor.service").Parse(systemdUnit)) + + var buf bytes.Buffer + err := unitString.Execute(&buf, p) + if err != nil { + return fmt.Errorf("Error executing %s template: %s", "hab-supervisor.service", err) + } + + var command string + if p.UseSudo { + command = fmt.Sprintf("sudo echo '%s' | sudo tee /etc/systemd/system/%s.service > /dev/null", &buf, p.ServiceName) + } else { + command = fmt.Sprintf("echo '%s' | tee /etc/systemd/system/%s.service > /dev/null", &buf, p.ServiceName) + } + + if err := p.runCommand(o, comm, command); err != nil { + return err + } + + if p.UseSudo { + command = fmt.Sprintf("sudo systemctl enable hab-supervisor && sudo systemctl start hab-supervisor") + } else { + command = fmt.Sprintf("systemctl enable hab-supervisor && systemctl start hab-supervisor") + } + return p.runCommand(o, comm, command) +} + +func (p *provisioner) createHabUser(o terraform.UIOutput, comm communicator.Communicator) error { + addUser := false + // Install busybox to get us the user tools we need + command := fmt.Sprintf("env HAB_NONINTERACTIVE=true hab install core/busybox") + if p.UseSudo { + command = fmt.Sprintf("sudo %s", command) + } + if err := p.runCommand(o, comm, command); err != nil { + return err + } + + // Check for existing hab user + command = fmt.Sprintf("hab pkg exec core/busybox id hab") + if p.UseSudo { + command = fmt.Sprintf("sudo %s", command) + } + if err := p.runCommand(o, comm, command); err != nil { + o.Output("No existing hab user detected, creating...") + addUser = true + } + + if addUser { + command = fmt.Sprintf("hab pkg exec core/busybox adduser -D -g \"\" hab") + if p.UseSudo { + command = fmt.Sprintf("sudo %s", command) + } + return p.runCommand(o, comm, command) + } + + return nil +} + +// In the future we'll remove the dedicated install once the synchronous load feature in hab-sup is +// available. Until then we install here to provide output and a noisy failure mechanism because +// if you install with the pkg load, it occurs asynchronously and fails quietly. +func (p *provisioner) installHabPackage(o terraform.UIOutput, comm communicator.Communicator, service Service) error { + var command string + options := "" + if service.Channel != "" { + options += fmt.Sprintf(" --channel %s", service.Channel) + } + + if service.URL != "" { + options += fmt.Sprintf(" --url %s", service.URL) + } + if p.UseSudo { + command = fmt.Sprintf("env HAB_NONINTERACTIVE=true sudo -E hab pkg install %s %s", service.Name, options) + } else { + command = fmt.Sprintf("env HAB_NONINTERACTIVE=true hab pkg install %s %s", service.Name, options) + } + + if p.BuilderAuthToken != "" { + command = fmt.Sprintf("env HAB_AUTH_TOKEN=%s %s", p.BuilderAuthToken, command) + } + return p.runCommand(o, comm, command) +} + +func (p *provisioner) startHabService(o terraform.UIOutput, comm communicator.Communicator, service Service) error { + var command string + if err := p.installHabPackage(o, comm, service); err != nil { + return err + } + if err := p.uploadUserTOML(o, comm, service); err != nil { + return err + } + + // Upload service group key + if service.ServiceGroupKey != "" { + p.uploadServiceGroupKey(o, comm, service.ServiceGroupKey) + } + + options := "" + if service.Topology != "" { + options += fmt.Sprintf(" --topology %s", service.Topology) + } + + if service.Strategy != "" { + options += fmt.Sprintf(" --strategy %s", service.Strategy) + } + + if service.Channel != "" { + options += fmt.Sprintf(" --channel %s", service.Channel) + } + + if service.URL != "" { + options += fmt.Sprintf(" --url %s", service.URL) + } + + if service.Group != "" { + options += fmt.Sprintf(" --group %s", service.Group) + } + + for _, bind := range service.Binds { + options += fmt.Sprintf(" --bind %s", bind.toBindString()) + } + command = fmt.Sprintf("hab svc load %s %s", service.Name, options) + if p.UseSudo { + command = fmt.Sprintf("sudo -E %s", command) + } + if p.BuilderAuthToken != "" { + command = fmt.Sprintf("env HAB_AUTH_TOKEN=%s %s", p.BuilderAuthToken, command) + } + return p.runCommand(o, comm, command) +} + +func (p *provisioner) uploadServiceGroupKey(o terraform.UIOutput, comm communicator.Communicator, key string) error { + keyName := strings.Split(key, "\n")[1] + o.Output("Uploading service group key: " + keyName) + keyFileName := fmt.Sprintf("%s.box.key", keyName) + destPath := path.Join("/hab/cache/keys", keyFileName) + keyContent := strings.NewReader(key) + if p.UseSudo { + tempPath := path.Join("/tmp", keyFileName) + if err := comm.Upload(tempPath, keyContent); err != nil { + return err + } + command := fmt.Sprintf("sudo mv %s %s", tempPath, destPath) + return p.runCommand(o, comm, command) + } + + return comm.Upload(destPath, keyContent) +} + +func (p *provisioner) uploadUserTOML(o terraform.UIOutput, comm communicator.Communicator, service Service) error { + // Create the hab svc directory to lay down the user.toml before loading the service + o.Output("Uploading user.toml for service: " + service.Name) + destDir := fmt.Sprintf("/hab/svc/%s", service.getPackageName(service.Name)) + command := fmt.Sprintf("mkdir -p %s", destDir) + if p.UseSudo { + command = fmt.Sprintf("sudo %s", command) + } + if err := p.runCommand(o, comm, command); err != nil { + return err + } + + userToml := strings.NewReader(service.UserTOML) + + if p.UseSudo { + if err := comm.Upload("/tmp/user.toml", userToml); err != nil { + return err + } + command = fmt.Sprintf("sudo mv /tmp/user.toml %s", destDir) + return p.runCommand(o, comm, command) + } + + return comm.Upload(path.Join(destDir, "user.toml"), userToml) + +} + +func (p *provisioner) copyOutput(o terraform.UIOutput, r io.Reader) { + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} + +func (p *provisioner) runCommand(o terraform.UIOutput, comm communicator.Communicator, command string) error { + outR, outW := io.Pipe() + errR, errW := io.Pipe() + + go p.copyOutput(o, outR) + go p.copyOutput(o, errR) + defer outW.Close() + defer errW.Close() + + cmd := &remote.Cmd{ + Command: command, + Stdout: outW, + Stderr: errW, + } + + if err := comm.Start(cmd); err != nil { + return fmt.Errorf("Error executing command %q: %v", cmd.Command, err) + } + + if err := cmd.Wait(); err != nil { + return err + } + + return nil +} + +func getBindFromString(bind string) (Bind, error) { + t := strings.FieldsFunc(bind, func(d rune) bool { + switch d { + case ':', '.': + return true + } + return false + }) + if len(t) != 3 { + return Bind{}, errors.New("Invalid bind specification: " + bind) + } + return Bind{Alias: t[0], Service: t[1], Group: t[2]}, nil +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/local-exec/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/local-exec/resource_provisioner.go new file mode 100644 index 00000000..f0ba28f3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/local-exec/resource_provisioner.go @@ -0,0 +1,158 @@ +package localexec + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" + "runtime" + + "github.com/armon/circbuf" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/go-linereader" +) + +const ( + // maxBufSize limits how much output we collect from a local + // invocation. This is to prevent TF memory usage from growing + // to an enormous amount due to a faulty process. + maxBufSize = 8 * 1024 +) + +func Provisioner() terraform.ResourceProvisioner { + return &schema.Provisioner{ + Schema: map[string]*schema.Schema{ + "command": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "interpreter": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + "working_dir": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "environment": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + }, + }, + + ApplyFunc: applyFn, + } +} + +func applyFn(ctx context.Context) error { + data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) + o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) + + command := data.Get("command").(string) + if command == "" { + return fmt.Errorf("local-exec provisioner command must be a non-empty string") + } + + // Execute the command with env + environment := data.Get("environment").(map[string]interface{}) + + var env []string + for k := range environment { + entry := fmt.Sprintf("%s=%s", k, environment[k].(string)) + env = append(env, entry) + } + + // Execute the command using a shell + interpreter := data.Get("interpreter").([]interface{}) + + var cmdargs []string + if len(interpreter) > 0 { + for _, i := range interpreter { + if arg, ok := i.(string); ok { + cmdargs = append(cmdargs, arg) + } + } + } else { + if runtime.GOOS == "windows" { + cmdargs = []string{"cmd", "/C"} + } else { + cmdargs = []string{"/bin/sh", "-c"} + } + } + cmdargs = append(cmdargs, command) + + workingdir := data.Get("working_dir").(string) + + // Setup the reader that will read the output from the command. + // We use an os.Pipe so that the *os.File can be passed directly to the + // process, and not rely on goroutines copying the data which may block. + // See golang.org/issue/18874 + pr, pw, err := os.Pipe() + if err != nil { + return fmt.Errorf("failed to initialize pipe for output: %s", err) + } + + var cmdEnv []string + cmdEnv = os.Environ() + cmdEnv = append(cmdEnv, env...) + + // Setup the command + cmd := exec.CommandContext(ctx, cmdargs[0], cmdargs[1:]...) + cmd.Stderr = pw + cmd.Stdout = pw + // Dir specifies the working directory of the command. + // If Dir is the empty string (this is default), runs the command + // in the calling process's current directory. + cmd.Dir = workingdir + // Env specifies the environment of the command. + // By default will use the calling process's environment + cmd.Env = cmdEnv + + output, _ := circbuf.NewBuffer(maxBufSize) + + // Write everything we read from the pipe to the output buffer too + tee := io.TeeReader(pr, output) + + // copy the teed output to the UI output + copyDoneCh := make(chan struct{}) + go copyOutput(o, tee, copyDoneCh) + + // Output what we're about to run + o.Output(fmt.Sprintf("Executing: %q", cmdargs)) + + // Start the command + err = cmd.Start() + if err == nil { + err = cmd.Wait() + } + + // Close the write-end of the pipe so that the goroutine mirroring output + // ends properly. + pw.Close() + + // Cancelling the command may block the pipe reader if the file descriptor + // was passed to a child process which hasn't closed it. In this case the + // copyOutput goroutine will just hang out until exit. + select { + case <-copyDoneCh: + case <-ctx.Done(): + } + + if err != nil { + return fmt.Errorf("Error running command '%s': %v. Output: %s", + command, err, output.Bytes()) + } + + return nil +} + +func copyOutput(o terraform.UIOutput, r io.Reader, doneCh chan<- struct{}) { + defer close(doneCh) + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/bolt/bolt.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/bolt/bolt.go new file mode 100644 index 00000000..0fc70e32 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/bolt/bolt.go @@ -0,0 +1,74 @@ +package bolt + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "runtime" + "strings" + "time" +) + +type Result struct { + Items []struct { + Node string `json:"node"` + Status string `json:"status"` + Result map[string]string `json:"result"` + } `json:"items"` + NodeCount int `json:"node_count"` + ElapsedTime int `json:"elapsed_time"` +} + +func runCommand(command string, timeout time.Duration) ([]byte, error) { + var cmdargs []string + + if runtime.GOOS == "windows" { + cmdargs = []string{"cmd", "/C"} + } else { + cmdargs = []string{"/bin/sh", "-c"} + } + cmdargs = append(cmdargs, command) + + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + cmd := exec.CommandContext(ctx, cmdargs[0], cmdargs[1:]...) + return cmd.Output() +} + +func Task(connInfo map[string]string, timeout time.Duration, sudo bool, task string, args map[string]string) (*Result, error) { + cmdargs := []string{ + "bolt", "task", "run", "--nodes", connInfo["type"] + "://" + connInfo["host"], "-u", connInfo["user"], + } + + if connInfo["type"] == "winrm" { + cmdargs = append(cmdargs, "-p", "\""+connInfo["password"]+"\"", "--no-ssl") + } else { + if sudo { + cmdargs = append(cmdargs, "--run-as", "root") + } + + cmdargs = append(cmdargs, "--no-host-key-check") + } + + cmdargs = append(cmdargs, "--format", "json", "--connect-timeout", "120", task) + + if args != nil { + for key, value := range args { + cmdargs = append(cmdargs, strings.Join([]string{key, value}, "=")) + } + } + + out, err := runCommand(strings.Join(cmdargs, " "), timeout) + if err != nil { + return nil, fmt.Errorf("Bolt: \"%s\": %s: %s", strings.Join(cmdargs, " "), out, err) + } + + result := new(Result) + if err = json.Unmarshal(out, result); err != nil { + return nil, err + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/linux_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/linux_provisioner.go new file mode 100644 index 00000000..f480b0e7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/linux_provisioner.go @@ -0,0 +1,65 @@ +package puppet + +import ( + "fmt" + "io" + + "github.com/hashicorp/terraform/communicator/remote" +) + +func (p *provisioner) linuxUploadFile(f io.Reader, dir string, filename string) error { + _, err := p.runCommand("mkdir -p " + dir) + if err != nil { + return fmt.Errorf("Failed to make directory %s: %s", dir, err) + } + + err = p.comm.Upload("/tmp/"+filename, f) + if err != nil { + return fmt.Errorf("Failed to upload %s to /tmp: %s", filename, err) + } + + _, err = p.runCommand(fmt.Sprintf("mv /tmp/%s %s/%s", filename, dir, filename)) + return err +} + +func (p *provisioner) linuxDefaultCertname() (string, error) { + certname, err := p.runCommand("hostname -f") + if err != nil { + return "", err + } + + return certname, nil +} + +func (p *provisioner) linuxInstallPuppetAgent() error { + _, err := p.runCommand(fmt.Sprintf("curl -kO https://%s:8140/packages/current/install.bash", p.Server)) + if err != nil { + return err + } + + _, err = p.runCommand("bash -- ./install.bash --puppet-service-ensure stopped") + if err != nil { + return err + } + + _, err = p.runCommand("rm -f install.bash") + return err +} + +func (p *provisioner) linuxRunPuppetAgent() error { + _, err := p.runCommand(fmt.Sprintf( + "/opt/puppetlabs/puppet/bin/puppet agent --test --server %s --environment %s", + p.Server, + p.Environment, + )) + + // Puppet exits 2 if changes have been successfully made. + if err != nil { + errStruct, _ := err.(*remote.ExitError) + if errStruct.ExitStatus == 2 { + return nil + } + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/resource_provisioner.go new file mode 100644 index 00000000..35c7dd2a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/resource_provisioner.go @@ -0,0 +1,332 @@ +package puppet + +import ( + "bytes" + "context" + "fmt" + "io" + "strings" + "time" + + "github.com/hashicorp/terraform/builtin/provisioners/puppet/bolt" + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/go-linereader" + "gopkg.in/yaml.v2" +) + +type provisioner struct { + Server string + ServerUser string + OSType string + Certname string + Environment string + Autosign bool + OpenSource bool + UseSudo bool + BoltTimeout time.Duration + CustomAttributes map[string]interface{} + ExtensionRequests map[string]interface{} + + runPuppetAgent func() error + installPuppetAgent func() error + uploadFile func(f io.Reader, dir string, filename string) error + defaultCertname func() (string, error) + + instanceState *terraform.InstanceState + output terraform.UIOutput + comm communicator.Communicator +} + +type csrAttributes struct { + CustomAttributes map[string]string `yaml:"custom_attributes"` + ExtensionRequests map[string]string `yaml:"extension_requests"` +} + +// Provisioner returns a Puppet resource provisioner. +func Provisioner() terraform.ResourceProvisioner { + return &schema.Provisioner{ + Schema: map[string]*schema.Schema{ + "server": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "server_user": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "root", + }, + "os_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"linux", "windows"}, false), + }, + "use_sudo": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "autosign": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "open_source": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "certname": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "extension_requests": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + }, + "custom_attributes": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + }, + "environment": &schema.Schema{ + Type: schema.TypeString, + Default: "production", + Optional: true, + }, + "bolt_timeout": &schema.Schema{ + Type: schema.TypeString, + Default: "5m", + Optional: true, + ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { + _, err := time.ParseDuration(val.(string)) + if err != nil { + errs = append(errs, err) + } + return warns, errs + }, + }, + }, + ApplyFunc: applyFn, + } +} + +func applyFn(ctx context.Context) error { + output := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) + state := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) + configData := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) + + p, err := decodeConfig(configData) + if err != nil { + return err + } + + p.instanceState = state + p.output = output + + if p.OSType == "" { + switch connType := state.Ephemeral.ConnInfo["type"]; connType { + case "ssh", "": + p.OSType = "linux" + case "winrm": + p.OSType = "windows" + default: + return fmt.Errorf("Unsupported connection type: %s", connType) + } + } + + switch p.OSType { + case "linux": + p.runPuppetAgent = p.linuxRunPuppetAgent + p.installPuppetAgent = p.linuxInstallPuppetAgent + p.uploadFile = p.linuxUploadFile + p.defaultCertname = p.linuxDefaultCertname + case "windows": + p.runPuppetAgent = p.windowsRunPuppetAgent + p.installPuppetAgent = p.windowsInstallPuppetAgent + p.uploadFile = p.windowsUploadFile + p.UseSudo = false + p.defaultCertname = p.windowsDefaultCertname + default: + return fmt.Errorf("Unsupported OS type: %s", p.OSType) + } + + comm, err := communicator.New(state) + if err != nil { + return err + } + + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + err = communicator.Retry(retryCtx, func() error { + return comm.Connect(output) + }) + if err != nil { + return err + } + defer comm.Disconnect() + + p.comm = comm + + if p.OpenSource { + p.installPuppetAgent = p.installPuppetAgentOpenSource + } + + csrAttrs := new(csrAttributes) + csrAttrs.CustomAttributes = make(map[string]string) + for k, v := range p.CustomAttributes { + csrAttrs.CustomAttributes[k] = v.(string) + } + + csrAttrs.ExtensionRequests = make(map[string]string) + for k, v := range p.ExtensionRequests { + csrAttrs.ExtensionRequests[k] = v.(string) + } + + if p.Autosign { + if p.Certname == "" { + p.Certname, _ = p.defaultCertname() + } + + autosignToken, err := p.generateAutosignToken(p.Certname) + if err != nil { + return fmt.Errorf("Failed to generate an autosign token: %s", err) + } + csrAttrs.CustomAttributes["challengePassword"] = autosignToken + } + + if err = p.writeCSRAttributes(csrAttrs); err != nil { + return fmt.Errorf("Failed to write csr_attributes.yaml: %s", err) + } + + if err = p.installPuppetAgent(); err != nil { + return err + } + + if err = p.runPuppetAgent(); err != nil { + return err + } + + return nil +} + +func (p *provisioner) writeCSRAttributes(attrs *csrAttributes) (rerr error) { + content, err := yaml.Marshal(attrs) + if err != nil { + return fmt.Errorf("Failed to marshal CSR attributes to YAML: %s", err) + } + + configDir := map[string]string{ + "linux": "/etc/puppetlabs/puppet", + "windows": "C:\\ProgramData\\PuppetLabs\\Puppet\\etc", + } + + return p.uploadFile(bytes.NewBuffer(content), configDir[p.OSType], "csr_attributes.yaml") +} + +func (p *provisioner) generateAutosignToken(certname string) (string, error) { + task := "autosign::generate_token" + + masterConnInfo := map[string]string{ + "type": "ssh", + "host": p.Server, + "user": p.ServerUser, + } + + result, err := bolt.Task( + masterConnInfo, + p.BoltTimeout, + p.ServerUser != "root", + task, + map[string]string{"certname": certname}, + ) + if err != nil { + return "", err + } + + if result.Items[0].Status != "success" { + return "", fmt.Errorf("Bolt %s failed on %s: %v", + task, + result.Items[0].Node, + result.Items[0].Result["_error"], + ) + } + + return result.Items[0].Result["_output"], nil +} + +func (p *provisioner) installPuppetAgentOpenSource() error { + result, err := bolt.Task( + p.instanceState.Ephemeral.ConnInfo, + p.BoltTimeout, + p.UseSudo, + "puppet_agent::install", + nil, + ) + + if err != nil || result.Items[0].Status != "success" { + return fmt.Errorf("puppet_agent::install failed: %s\n%+v", err, result) + } + + return nil +} + +func (p *provisioner) runCommand(command string) (stdout string, err error) { + if p.UseSudo { + command = "sudo " + command + } + + var stdoutBuffer bytes.Buffer + outR, outW := io.Pipe() + errR, errW := io.Pipe() + outTee := io.TeeReader(outR, &stdoutBuffer) + go p.copyToOutput(outTee) + go p.copyToOutput(errR) + defer outW.Close() + defer errW.Close() + + cmd := &remote.Cmd{ + Command: command, + Stdout: outW, + Stderr: errW, + } + + err = p.comm.Start(cmd) + if err != nil { + err = fmt.Errorf("Error executing command %q: %v", cmd.Command, err) + return stdout, err + } + + err = cmd.Wait() + stdout = strings.TrimSpace(stdoutBuffer.String()) + + return stdout, err +} + +func (p *provisioner) copyToOutput(reader io.Reader) { + lr := linereader.New(reader) + for line := range lr.Ch { + p.output.Output(line) + } +} + +func decodeConfig(d *schema.ResourceData) (*provisioner, error) { + p := &provisioner{ + UseSudo: d.Get("use_sudo").(bool), + Server: d.Get("server").(string), + ServerUser: d.Get("server_user").(string), + OSType: strings.ToLower(d.Get("os_type").(string)), + Autosign: d.Get("autosign").(bool), + OpenSource: d.Get("open_source").(bool), + Certname: strings.ToLower(d.Get("certname").(string)), + ExtensionRequests: d.Get("extension_requests").(map[string]interface{}), + CustomAttributes: d.Get("custom_attributes").(map[string]interface{}), + Environment: d.Get("environment").(string), + } + p.BoltTimeout, _ = time.ParseDuration(d.Get("bolt_timeout").(string)) + + return p, nil +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/windows_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/windows_provisioner.go new file mode 100644 index 00000000..eeb2154e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/puppet/windows_provisioner.go @@ -0,0 +1,71 @@ +package puppet + +import ( + "fmt" + "io" + "strings" + + "github.com/hashicorp/terraform/communicator/remote" +) + +const ( + getHostByName = "([System.Net.Dns]::GetHostByName(($env:computerName))).Hostname" + domainQuery = "(Get-WmiObject -Query 'select DNSDomain from Win32_NetworkAdapterConfiguration where IPEnabled = True').DNSDomain" +) + +func (p *provisioner) windowsUploadFile(f io.Reader, dir string, filename string) error { + _, err := p.runCommand("powershell.exe new-item -itemtype directory -force -path " + dir) + if err != nil { + return fmt.Errorf("Failed to make directory %s: %s", dir, err) + } + + return p.comm.Upload(dir+"\\"+filename, f) +} + +func (p *provisioner) windowsDefaultCertname() (string, error) { + certname, err := p.runCommand(fmt.Sprintf(`powershell -Command "& {%s}"`, getHostByName)) + if err != nil { + return "", err + } + + // Sometimes System.Net.Dns::GetHostByName does not return a full FQDN, so + // we have to look up the domain separately. + if strings.Contains(certname, ".") { + return certname, nil + } + + domain, err := p.runCommand(fmt.Sprintf(`powershell -Command "& {%s}"`, domainQuery)) + if err != nil { + return "", err + } + + return strings.ToLower(certname + "." + domain), nil +} + +func (p *provisioner) windowsInstallPuppetAgent() error { + _, err := p.runCommand(fmt.Sprintf( + `powershell -Command "& {[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}; `+ + `(New-Object System.Net.WebClient).DownloadFile('https://%s:8140/packages/current/install.ps1', `+ + `'install.ps1')}"`, + p.Server, + )) + if err != nil { + return err + } + + _, err = p.runCommand(`powershell -Command "& .\install.ps1 -PuppetServiceEnsure stopped"`) + + return err +} + +func (p *provisioner) windowsRunPuppetAgent() error { + _, err := p.runCommand(fmt.Sprintf("puppet agent --test --server %s --environment %s", p.Server, p.Environment)) + if err != nil { + errStruct, _ := err.(*remote.ExitError) + if errStruct.ExitStatus == 2 { + return nil + } + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/remote-exec/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/remote-exec/resource_provisioner.go new file mode 100644 index 00000000..50042977 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/remote-exec/resource_provisioner.go @@ -0,0 +1,225 @@ +package remoteexec + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "strings" + "time" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/go-linereader" +) + +// maxBackoffDealy is the maximum delay between retry attempts +var maxBackoffDelay = 10 * time.Second +var initialBackoffDelay = time.Second + +func Provisioner() terraform.ResourceProvisioner { + return &schema.Provisioner{ + Schema: map[string]*schema.Schema{ + "inline": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + PromoteSingle: true, + Optional: true, + ConflictsWith: []string{"script", "scripts"}, + }, + + "script": { + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"inline", "scripts"}, + }, + + "scripts": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + ConflictsWith: []string{"script", "inline"}, + }, + }, + + ApplyFunc: applyFn, + } +} + +// Apply executes the remote exec provisioner +func applyFn(ctx context.Context) error { + connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) + data := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) + o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) + + // Get a new communicator + comm, err := communicator.New(connState) + if err != nil { + return err + } + + // Collect the scripts + scripts, err := collectScripts(data) + if err != nil { + return err + } + for _, s := range scripts { + defer s.Close() + } + + // Copy and execute each script + if err := runScripts(ctx, o, comm, scripts); err != nil { + return err + } + + return nil +} + +// generateScripts takes the configuration and creates a script from each inline config +func generateScripts(d *schema.ResourceData) ([]string, error) { + var lines []string + for _, l := range d.Get("inline").([]interface{}) { + line, ok := l.(string) + if !ok { + return nil, fmt.Errorf("Error parsing %v as a string", l) + } + lines = append(lines, line) + } + lines = append(lines, "") + + return []string{strings.Join(lines, "\n")}, nil +} + +// collectScripts is used to collect all the scripts we need +// to execute in preparation for copying them. +func collectScripts(d *schema.ResourceData) ([]io.ReadCloser, error) { + // Check if inline + if _, ok := d.GetOk("inline"); ok { + scripts, err := generateScripts(d) + if err != nil { + return nil, err + } + + var r []io.ReadCloser + for _, script := range scripts { + r = append(r, ioutil.NopCloser(bytes.NewReader([]byte(script)))) + } + + return r, nil + } + + // Collect scripts + var scripts []string + if script, ok := d.GetOk("script"); ok { + scr, ok := script.(string) + if !ok { + return nil, fmt.Errorf("Error parsing script %v as string", script) + } + scripts = append(scripts, scr) + } + + if scriptList, ok := d.GetOk("scripts"); ok { + for _, script := range scriptList.([]interface{}) { + scr, ok := script.(string) + if !ok { + return nil, fmt.Errorf("Error parsing script %v as string", script) + } + scripts = append(scripts, scr) + } + } + + // Open all the scripts + var fhs []io.ReadCloser + for _, s := range scripts { + fh, err := os.Open(s) + if err != nil { + for _, fh := range fhs { + fh.Close() + } + return nil, fmt.Errorf("Failed to open script '%s': %v", s, err) + } + fhs = append(fhs, fh) + } + + // Done, return the file handles + return fhs, nil +} + +// runScripts is used to copy and execute a set of scripts +func runScripts( + ctx context.Context, + o terraform.UIOutput, + comm communicator.Communicator, + scripts []io.ReadCloser) error { + + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + // Wait and retry until we establish the connection + err := communicator.Retry(retryCtx, func() error { + return comm.Connect(o) + }) + if err != nil { + return err + } + + // Wait for the context to end and then disconnect + go func() { + <-ctx.Done() + comm.Disconnect() + }() + + for _, script := range scripts { + var cmd *remote.Cmd + + outR, outW := io.Pipe() + errR, errW := io.Pipe() + defer outW.Close() + defer errW.Close() + + go copyOutput(o, outR) + go copyOutput(o, errR) + + remotePath := comm.ScriptPath() + + if err := comm.UploadScript(remotePath, script); err != nil { + return fmt.Errorf("Failed to upload script: %v", err) + } + + cmd = &remote.Cmd{ + Command: remotePath, + Stdout: outW, + Stderr: errW, + } + if err := comm.Start(cmd); err != nil { + return fmt.Errorf("Error starting script: %v", err) + } + + if err := cmd.Wait(); err != nil { + return err + } + + // Upload a blank follow up file in the same path to prevent residual + // script contents from remaining on remote machine + empty := bytes.NewReader([]byte("")) + if err := comm.Upload(remotePath, empty); err != nil { + // This feature is best-effort. + log.Printf("[WARN] Failed to upload empty follow up script: %v", err) + } + } + + return nil +} + +func copyOutput( + o terraform.UIOutput, r io.Reader) { + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} diff --git a/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go new file mode 100644 index 00000000..0be7ed00 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/builtin/provisioners/salt-masterless/resource_provisioner.go @@ -0,0 +1,525 @@ +// This package implements a provisioner for Terraform that executes a +// saltstack state within the remote machine +// +// Adapted from gitub.com/hashicorp/packer/provisioner/salt-masterless + +package saltmasterless + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/hashicorp/terraform/communicator" + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" + linereader "github.com/mitchellh/go-linereader" +) + +type provisionFn func(terraform.UIOutput, communicator.Communicator) error + +type provisioner struct { + SkipBootstrap bool + BootstrapArgs string + LocalStateTree string + DisableSudo bool + CustomState string + MinionConfig string + LocalPillarRoots string + RemoteStateTree string + RemotePillarRoots string + TempConfigDir string + NoExitOnFailure bool + LogLevel string + SaltCallArgs string + CmdArgs string +} + +const DefaultStateTreeDir = "/srv/salt" +const DefaultPillarRootDir = "/srv/pillar" + +// Provisioner returns a salt-masterless provisioner +func Provisioner() terraform.ResourceProvisioner { + return &schema.Provisioner{ + Schema: map[string]*schema.Schema{ + "local_state_tree": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "local_pillar_roots": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "remote_state_tree": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: DefaultStateTreeDir, + }, + "remote_pillar_roots": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: DefaultPillarRootDir, + }, + "temp_config_dir": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "/tmp/salt", + }, + "skip_bootstrap": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "no_exit_on_failure": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "bootstrap_args": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "disable_sudo": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "custom_state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "minion_config_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "cmd_args": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "salt_call_args": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "log_level": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + + ApplyFunc: applyFn, + ValidateFunc: validateFn, + } +} + +// Apply executes the file provisioner +func applyFn(ctx context.Context) error { + // Decode the raw config for this provisioner + o := ctx.Value(schema.ProvOutputKey).(terraform.UIOutput) + d := ctx.Value(schema.ProvConfigDataKey).(*schema.ResourceData) + connState := ctx.Value(schema.ProvRawStateKey).(*terraform.InstanceState) + + p, err := decodeConfig(d) + if err != nil { + return err + } + + // Get a new communicator + comm, err := communicator.New(connState) + if err != nil { + return err + } + + retryCtx, cancel := context.WithTimeout(ctx, comm.Timeout()) + defer cancel() + + // Wait and retry until we establish the connection + err = communicator.Retry(retryCtx, func() error { + return comm.Connect(o) + }) + + if err != nil { + return err + } + + // Wait for the context to end and then disconnect + go func() { + <-ctx.Done() + comm.Disconnect() + }() + + var src, dst string + + o.Output("Provisioning with Salt...") + if !p.SkipBootstrap { + cmd := &remote.Cmd{ + // Fallback on wget if curl failed for any reason (such as not being installed) + Command: fmt.Sprintf("curl -L https://bootstrap.saltstack.com -o /tmp/install_salt.sh || wget -O /tmp/install_salt.sh https://bootstrap.saltstack.com"), + } + o.Output(fmt.Sprintf("Downloading saltstack bootstrap to /tmp/install_salt.sh")) + if err = comm.Start(cmd); err != nil { + err = fmt.Errorf("Unable to download Salt: %s", err) + } + + if err := cmd.Wait(); err != nil { + return err + } + + outR, outW := io.Pipe() + errR, errW := io.Pipe() + go copyOutput(o, outR) + go copyOutput(o, errR) + defer outW.Close() + defer errW.Close() + + cmd = &remote.Cmd{ + Command: fmt.Sprintf("%s /tmp/install_salt.sh %s", p.sudo("sh"), p.BootstrapArgs), + Stdout: outW, + Stderr: errW, + } + + o.Output(fmt.Sprintf("Installing Salt with command %s", cmd.Command)) + if err := comm.Start(cmd); err != nil { + return fmt.Errorf("Unable to install Salt: %s", err) + } + + if err := cmd.Wait(); err != nil { + return err + } + } + + o.Output(fmt.Sprintf("Creating remote temporary directory: %s", p.TempConfigDir)) + if err := p.createDir(o, comm, p.TempConfigDir); err != nil { + return fmt.Errorf("Error creating remote temporary directory: %s", err) + } + + if p.MinionConfig != "" { + o.Output(fmt.Sprintf("Uploading minion config: %s", p.MinionConfig)) + src = p.MinionConfig + dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "minion")) + if err = p.uploadFile(o, comm, dst, src); err != nil { + return fmt.Errorf("Error uploading local minion config file to remote: %s", err) + } + + // move minion config into /etc/salt + o.Output(fmt.Sprintf("Make sure directory %s exists", "/etc/salt")) + if err := p.createDir(o, comm, "/etc/salt"); err != nil { + return fmt.Errorf("Error creating remote salt configuration directory: %s", err) + } + src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "minion")) + dst = "/etc/salt/minion" + if err = p.moveFile(o, comm, dst, src); err != nil { + return fmt.Errorf("Unable to move %s/minion to /etc/salt/minion: %s", p.TempConfigDir, err) + } + } + + o.Output(fmt.Sprintf("Uploading local state tree: %s", p.LocalStateTree)) + src = p.LocalStateTree + dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "states")) + if err = p.uploadDir(o, comm, dst, src, []string{".git"}); err != nil { + return fmt.Errorf("Error uploading local state tree to remote: %s", err) + } + + // move state tree from temporary directory + src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "states")) + dst = p.RemoteStateTree + if err = p.removeDir(o, comm, dst); err != nil { + return fmt.Errorf("Unable to clear salt tree: %s", err) + } + if err = p.moveFile(o, comm, dst, src); err != nil { + return fmt.Errorf("Unable to move %s/states to %s: %s", p.TempConfigDir, dst, err) + } + + if p.LocalPillarRoots != "" { + o.Output(fmt.Sprintf("Uploading local pillar roots: %s", p.LocalPillarRoots)) + src = p.LocalPillarRoots + dst = filepath.ToSlash(filepath.Join(p.TempConfigDir, "pillar")) + if err = p.uploadDir(o, comm, dst, src, []string{".git"}); err != nil { + return fmt.Errorf("Error uploading local pillar roots to remote: %s", err) + } + + // move pillar root from temporary directory + src = filepath.ToSlash(filepath.Join(p.TempConfigDir, "pillar")) + dst = p.RemotePillarRoots + + if err = p.removeDir(o, comm, dst); err != nil { + return fmt.Errorf("Unable to clear pillar root: %s", err) + } + if err = p.moveFile(o, comm, dst, src); err != nil { + return fmt.Errorf("Unable to move %s/pillar to %s: %s", p.TempConfigDir, dst, err) + } + } + + outR, outW := io.Pipe() + errR, errW := io.Pipe() + go copyOutput(o, outR) + go copyOutput(o, errR) + defer outW.Close() + defer errW.Close() + + o.Output(fmt.Sprintf("Running: salt-call --local %s", p.CmdArgs)) + cmd := &remote.Cmd{ + Command: p.sudo(fmt.Sprintf("salt-call --local %s", p.CmdArgs)), + Stdout: outW, + Stderr: errW, + } + if err = comm.Start(cmd); err != nil { + err = fmt.Errorf("Error executing salt-call: %s", err) + } + + if err := cmd.Wait(); err != nil { + return err + } + return nil +} + +// Prepends sudo to supplied command if config says to +func (p *provisioner) sudo(cmd string) string { + if p.DisableSudo { + return cmd + } + + return "sudo " + cmd +} + +func validateDirConfig(path string, name string, required bool) error { + if required == true && path == "" { + return fmt.Errorf("%s cannot be empty", name) + } else if required == false && path == "" { + return nil + } + info, err := os.Stat(path) + if err != nil { + return fmt.Errorf("%s: path '%s' is invalid: %s", name, path, err) + } else if !info.IsDir() { + return fmt.Errorf("%s: path '%s' must point to a directory", name, path) + } + return nil +} + +func validateFileConfig(path string, name string, required bool) error { + if required == true && path == "" { + return fmt.Errorf("%s cannot be empty", name) + } else if required == false && path == "" { + return nil + } + info, err := os.Stat(path) + if err != nil { + return fmt.Errorf("%s: path '%s' is invalid: %s", name, path, err) + } else if info.IsDir() { + return fmt.Errorf("%s: path '%s' must point to a file", name, path) + } + return nil +} + +func (p *provisioner) uploadFile(o terraform.UIOutput, comm communicator.Communicator, dst, src string) error { + f, err := os.Open(src) + if err != nil { + return fmt.Errorf("Error opening: %s", err) + } + defer f.Close() + + if err = comm.Upload(dst, f); err != nil { + return fmt.Errorf("Error uploading %s: %s", src, err) + } + return nil +} + +func (p *provisioner) moveFile(o terraform.UIOutput, comm communicator.Communicator, dst, src string) error { + o.Output(fmt.Sprintf("Moving %s to %s", src, dst)) + cmd := &remote.Cmd{Command: fmt.Sprintf(p.sudo("mv %s %s"), src, dst)} + if err := comm.Start(cmd); err != nil { + return fmt.Errorf("Unable to move %s to %s: %s", src, dst, err) + } + if err := cmd.Wait(); err != nil { + return err + } + return nil +} + +func (p *provisioner) createDir(o terraform.UIOutput, comm communicator.Communicator, dir string) error { + o.Output(fmt.Sprintf("Creating directory: %s", dir)) + cmd := &remote.Cmd{ + Command: fmt.Sprintf("mkdir -p '%s'", dir), + } + if err := comm.Start(cmd); err != nil { + return err + } + + if err := cmd.Wait(); err != nil { + return err + } + return nil +} + +func (p *provisioner) removeDir(o terraform.UIOutput, comm communicator.Communicator, dir string) error { + o.Output(fmt.Sprintf("Removing directory: %s", dir)) + cmd := &remote.Cmd{ + Command: fmt.Sprintf("rm -rf '%s'", dir), + } + if err := comm.Start(cmd); err != nil { + return err + } + if err := cmd.Wait(); err != nil { + return err + } + return nil +} + +func (p *provisioner) uploadDir(o terraform.UIOutput, comm communicator.Communicator, dst, src string, ignore []string) error { + if err := p.createDir(o, comm, dst); err != nil { + return err + } + + // Make sure there is a trailing "/" so that the directory isn't + // created on the other side. + if src[len(src)-1] != '/' { + src = src + "/" + } + return comm.UploadDir(dst, src) +} + +// Validate checks if the required arguments are configured +func validateFn(c *terraform.ResourceConfig) (ws []string, es []error) { + // require a salt state tree + localStateTreeTmp, ok := c.Get("local_state_tree") + var localStateTree string + if !ok { + es = append(es, + errors.New("Required local_state_tree is not set")) + } else { + localStateTree = localStateTreeTmp.(string) + } + err := validateDirConfig(localStateTree, "local_state_tree", true) + if err != nil { + es = append(es, err) + } + + var localPillarRoots string + localPillarRootsTmp, ok := c.Get("local_pillar_roots") + if !ok { + localPillarRoots = "" + } else { + localPillarRoots = localPillarRootsTmp.(string) + } + + err = validateDirConfig(localPillarRoots, "local_pillar_roots", false) + if err != nil { + es = append(es, err) + } + + var minionConfig string + minionConfigTmp, ok := c.Get("minion_config_file") + if !ok { + minionConfig = "" + } else { + minionConfig = minionConfigTmp.(string) + } + err = validateFileConfig(minionConfig, "minion_config_file", false) + if err != nil { + es = append(es, err) + } + + var remoteStateTree string + remoteStateTreeTmp, ok := c.Get("remote_state_tree") + if !ok { + remoteStateTree = DefaultStateTreeDir + } else { + remoteStateTree = remoteStateTreeTmp.(string) + } + + var remotePillarRoots string + remotePillarRootsTmp, ok := c.Get("remote_pillar_roots") + if !ok { + remotePillarRoots = DefaultPillarRootDir + } else { + remotePillarRoots = remotePillarRootsTmp.(string) + } + + if minionConfig != "" && (remoteStateTree != DefaultStateTreeDir || remotePillarRoots != DefaultPillarRootDir) { + es = append(es, + errors.New("remote_state_tree and remote_pillar_roots only apply when minion_config_file is not used")) + } + + if len(es) > 0 { + return ws, es + } + + return ws, es +} + +func decodeConfig(d *schema.ResourceData) (*provisioner, error) { + p := &provisioner{ + LocalStateTree: d.Get("local_state_tree").(string), + LogLevel: d.Get("log_level").(string), + SaltCallArgs: d.Get("salt_call_args").(string), + CmdArgs: d.Get("cmd_args").(string), + MinionConfig: d.Get("minion_config_file").(string), + CustomState: d.Get("custom_state").(string), + DisableSudo: d.Get("disable_sudo").(bool), + BootstrapArgs: d.Get("bootstrap_args").(string), + NoExitOnFailure: d.Get("no_exit_on_failure").(bool), + SkipBootstrap: d.Get("skip_bootstrap").(bool), + TempConfigDir: d.Get("temp_config_dir").(string), + RemotePillarRoots: d.Get("remote_pillar_roots").(string), + RemoteStateTree: d.Get("remote_state_tree").(string), + LocalPillarRoots: d.Get("local_pillar_roots").(string), + } + + // build the command line args to pass onto salt + var cmdArgs bytes.Buffer + + if p.CustomState == "" { + cmdArgs.WriteString(" state.highstate") + } else { + cmdArgs.WriteString(" state.sls ") + cmdArgs.WriteString(p.CustomState) + } + + if p.MinionConfig == "" { + // pass --file-root and --pillar-root if no minion_config_file is supplied + if p.RemoteStateTree != "" { + cmdArgs.WriteString(" --file-root=") + cmdArgs.WriteString(p.RemoteStateTree) + } else { + cmdArgs.WriteString(" --file-root=") + cmdArgs.WriteString(DefaultStateTreeDir) + } + if p.RemotePillarRoots != "" { + cmdArgs.WriteString(" --pillar-root=") + cmdArgs.WriteString(p.RemotePillarRoots) + } else { + cmdArgs.WriteString(" --pillar-root=") + cmdArgs.WriteString(DefaultPillarRootDir) + } + } + + if !p.NoExitOnFailure { + cmdArgs.WriteString(" --retcode-passthrough") + } + + if p.LogLevel == "" { + cmdArgs.WriteString(" -l info") + } else { + cmdArgs.WriteString(" -l ") + cmdArgs.WriteString(p.LogLevel) + } + + if p.SaltCallArgs != "" { + cmdArgs.WriteString(" ") + cmdArgs.WriteString(p.SaltCallArgs) + } + + p.CmdArgs = cmdArgs.String() + + return p, nil +} + +func copyOutput( + o terraform.UIOutput, r io.Reader) { + lr := linereader.New(r) + for line := range lr.Ch { + o.Output(line) + } +} diff --git a/vendor/github.com/hashicorp/terraform/command/012_config_upgrade.go b/vendor/github.com/hashicorp/terraform/command/012_config_upgrade.go new file mode 100644 index 00000000..4f0dc7c4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/012_config_upgrade.go @@ -0,0 +1,250 @@ +package command + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configupgrade" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// ZeroTwelveUpgradeCommand is a Command implementation that can upgrade +// the configuration files for a module from pre-0.11 syntax to new 0.12 +// idiom, while also flagging any suspicious constructs that will require +// human review. +type ZeroTwelveUpgradeCommand struct { + Meta +} + +func (c *ZeroTwelveUpgradeCommand) Run(args []string) int { + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + var skipConfirm, force bool + + flags := c.Meta.extendedFlagSet("0.12upgrade") + flags.BoolVar(&skipConfirm, "yes", false, "skip confirmation prompt") + flags.BoolVar(&force, "force", false, "override duplicate upgrade heuristic") + if err := flags.Parse(args); err != nil { + return 1 + } + + var diags tfdiags.Diagnostics + + var dir string + args = flags.Args() + switch len(args) { + case 0: + dir = "." + case 1: + dir = args[0] + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Too many arguments", + "The command 0.12upgrade expects only a single argument, giving the directory containing the module to upgrade.", + )) + c.showDiagnostics(diags) + return 1 + } + + dir = c.normalizePath(dir) + + sources, err := configupgrade.LoadModule(dir) + if err != nil { + if os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module directory not found", + fmt.Sprintf("The given directory %s does not exist.", dir), + )) + } else { + diags = diags.Append(err) + } + c.showDiagnostics(diags) + return 1 + } + + if len(sources) == 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Not a module directory", + fmt.Sprintf("The given directory %s does not contain any Terraform configuration files.", dir), + )) + c.showDiagnostics(diags) + return 1 + } + + // The config loader doesn't naturally populate our sources + // map, so we'll do it manually so our diagnostics can have + // source code snippets inside them. + // This is weird, but this whole upgrade codepath is pretty + // weird and temporary, so we'll accept it. + if loader, err := c.initConfigLoader(); err == nil { + parser := loader.Parser() + for name, src := range sources { + parser.ForceFileSource(filepath.Join(dir, name), src) + } + } + + if !force { + // We'll check first if this directory already looks upgraded, so we + // don't waste the user's time dealing with an interactive prompt + // immediately followed by an error. + if already, rng := sources.MaybeAlreadyUpgraded(); already { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module already upgraded", + Detail: fmt.Sprintf("The module in directory %s has a version constraint that suggests it has already been upgraded for v0.12. If this is incorrect, either remove this constraint or override this heuristic with the -force argument. Upgrading a module that was already upgraded may change the meaning of that module.", dir), + Subject: rng.ToHCL().Ptr(), + }) + c.showDiagnostics(diags) + return 1 + } + } + + if !skipConfirm { + c.Ui.Output(fmt.Sprintf(` +This command will rewrite the configuration files in the given directory so +that they use the new syntax features from Terraform v0.12, and will identify +any constructs that may need to be adjusted for correct operation with +Terraform v0.12. + +We recommend using this command in a clean version control work tree, so that +you can easily see the proposed changes as a diff against the latest commit. +If you have uncommited changes already present, we recommend aborting this +command and dealing with them before running this command again. +`)) + + query := "Would you like to upgrade the module in the current directory?" + if dir != "." { + query = fmt.Sprintf("Would you like to upgrade the module in %s?", dir) + } + v, err := c.UIInput().Input(context.Background(), &terraform.InputOpts{ + Id: "approve", + Query: query, + Description: `Only 'yes' will be accepted to confirm.`, + }) + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + if v != "yes" { + c.Ui.Info("Upgrade cancelled.") + return 0 + } + + c.Ui.Output(`-----------------------------------------------------------------------------`) + } + + upgrader := &configupgrade.Upgrader{ + Providers: c.providerResolver(), + Provisioners: c.provisionerFactories(), + } + newSources, upgradeDiags := upgrader.Upgrade(sources, dir) + diags = diags.Append(upgradeDiags) + if upgradeDiags.HasErrors() { + c.showDiagnostics(diags) + return 2 + } + + // Now we'll write the contents of newSources into the filesystem. + for name, src := range newSources { + fn := filepath.Join(dir, name) + if src == nil { + // indicates a file to be deleted + err := os.Remove(fn) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to remove file", + fmt.Sprintf("The file %s must be renamed as part of the upgrade process, but the old file could not be deleted: %s.", fn, err), + )) + } + continue + } + + err := ioutil.WriteFile(fn, src, 0644) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write file", + fmt.Sprintf("The file %s must be updated or created as part of the upgrade process, but there was an error while writing: %s.", fn, err), + )) + } + } + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 2 + } + + if !skipConfirm { + if len(diags) != 0 { + c.Ui.Output(`-----------------------------------------------------------------------------`) + } + c.Ui.Output(c.Colorize().Color(` +[bold][green]Upgrade complete![reset] + +The configuration files were upgraded successfully. Use your version control +system to review the proposed changes, make any necessary adjustments, and +then commit. +`)) + if len(diags) != 0 { + // We checked for errors above, so these must be warnings. + c.Ui.Output(`Some warnings were generated during the upgrade, as shown above. These +indicate situations where Terraform could not decide on an appropriate course +of action without further human input. + +Where possible, these have also been marked with TF-UPGRADE-TODO comments to +mark the locations where a decision must be made. After reviewing and adjusting +these, manually remove the TF-UPGRADE-TODO comment before continuing. +`) + } + + } + return 0 +} + +func (c *ZeroTwelveUpgradeCommand) Help() string { + helpText := ` +Usage: terraform 0.12upgrade [module-dir] + + Rewrites the .tf files for a single module that was written for a Terraform + version prior to v0.12 so that it uses new syntax features from v0.12 + and later. + + Also rewrites constructs that behave differently after v0.12, and flags any + suspicious constructs that require human review, + + By default, 0.12upgrade rewrites the files in the current working directory. + However, a path to a different directory can be provided. The command will + prompt for confirmation interactively unless the -yes option is given. + +Options: + + -yes Skip the initial introduction messages and interactive + confirmation. This can be used to run this command in + batch from a script. + + -force Override the heuristic that attempts to detect if a + configuration is already written for v0.12 or later. + Some of the transformations made by this command are + not idempotent, so re-running against the same module + may change the meanings expressions in the module. +` + return strings.TrimSpace(helpText) +} + +func (c *ZeroTwelveUpgradeCommand) Synopsis() string { + return "Rewrites pre-0.12 module source code for v0.12" +} diff --git a/vendor/github.com/hashicorp/terraform/command/apply.go b/vendor/github.com/hashicorp/terraform/command/apply.go index 1e49739a..01751b7b 100644 --- a/vendor/github.com/hashicorp/terraform/command/apply.go +++ b/vendor/github.com/hashicorp/terraform/command/apply.go @@ -2,17 +2,18 @@ package command import ( "bytes" - "context" "fmt" "os" "sort" "strings" "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ApplyCommand is a Command implementation that applies a Terraform @@ -23,27 +24,27 @@ type ApplyCommand struct { // If true, then this apply command will become the "destroy" // command. It is just like apply but only processes a destroy. Destroy bool - - // When this channel is closed, the apply will be cancelled. - ShutdownCh <-chan struct{} } func (c *ApplyCommand) Run(args []string) int { - var destroyForce, refresh bool - args = c.Meta.process(args, true) + var destroyForce, refresh, autoApprove bool + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } cmdName := "apply" if c.Destroy { cmdName = "destroy" } - cmdFlags := c.Meta.flagSet(cmdName) + cmdFlags := c.Meta.extendedFlagSet(cmdName) + cmdFlags.BoolVar(&autoApprove, "auto-approve", false, "skip interactive approval of plan before applying") if c.Destroy { - cmdFlags.BoolVar(&destroyForce, "force", false, "force") + cmdFlags.BoolVar(&destroyForce, "force", false, "deprecated: same as auto-approve") } cmdFlags.BoolVar(&refresh, "refresh", true, "refresh") - cmdFlags.IntVar( - &c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") + cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") @@ -54,6 +55,8 @@ func (c *ApplyCommand) Run(args []string) int { return 1 } + var diags tfdiags.Diagnostics + // Get the args. The "maybeInit" flag tracks whether we may need to // initialize the configuration from a remote path. This is true as long // as we have an argument. @@ -65,6 +68,12 @@ func (c *ApplyCommand) Run(args []string) int { return 1 } + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + if !c.Destroy && maybeInit { // We need the pwd for the getter operation below pwd, err := os.Getwd() @@ -75,8 +84,7 @@ func (c *ApplyCommand) Run(args []string) int { // Do a detect to determine if we need to do an init + apply. if detected, err := getter.Detect(configPath, pwd, getter.Detectors); err != nil { - c.Ui.Error(fmt.Sprintf( - "Invalid path: %s", err)) + c.Ui.Error(fmt.Sprintf("Invalid path: %s", err)) return 1 } else if !strings.HasPrefix(detected, "file") { // If this isn't a file URL then we're doing an init + @@ -93,145 +101,121 @@ func (c *ApplyCommand) Run(args []string) int { } // Check if the path is a plan - plan, err := c.Plan(configPath) + planFile, err := c.PlanFile(configPath) if err != nil { c.Ui.Error(err.Error()) return 1 } - if c.Destroy && plan != nil { - c.Ui.Error(fmt.Sprintf( - "Destroy can't be called with a plan file.")) + if c.Destroy && planFile != nil { + c.Ui.Error(fmt.Sprintf("Destroy can't be called with a plan file.")) return 1 } - if plan != nil { + if planFile != nil { // Reset the config path for backend loading configPath = "" - } - // Load the module if we don't have one yet (not running from plan) - var mod *module.Tree - if plan == nil { - mod, err = c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + if !c.variableArgs.Empty() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't set variables when applying a saved plan", + "The -var and -var-file options cannot be used when applying a saved plan file, because a saved plan includes the variable values that were set when it was created.", + )) + c.showDiagnostics(diags) return 1 } } - /* - terraform.SetDebugInfo(DefaultDataDir) - - // Check for the legacy graph - if experiment.Enabled(experiment.X_legacyGraph) { - c.Ui.Output(c.Colorize().Color( - "[reset][bold][yellow]" + - "Legacy graph enabled! This will use the graph from Terraform 0.7.x\n" + - "to execute this operation. This will be removed in the future so\n" + - "please report any issues causing you to use this to the Terraform\n" + - "project.\n\n")) - } - */ - // Load the backend - b, err := c.Backend(&BackendOpts{ - ConfigPath: configPath, - Plan: plan, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // If we're not forcing and we're destroying, verify with the - // user at this point. - if !destroyForce && c.Destroy { - // Default destroy message - desc := "Terraform will delete all your managed infrastructure.\n" + - "There is no undo. Only 'yes' will be accepted to confirm." - - // If targets are specified, list those to user - if c.Meta.targets != nil { - var descBuffer bytes.Buffer - descBuffer.WriteString("Terraform will delete the following infrastructure:\n") - for _, target := range c.Meta.targets { - descBuffer.WriteString("\t") - descBuffer.WriteString(target) - descBuffer.WriteString("\n") - } - descBuffer.WriteString("There is no undo. Only 'yes' will be accepted to confirm") - desc = descBuffer.String() + var be backend.Enhanced + var beDiags tfdiags.Diagnostics + if planFile == nil { + backendConfig, configDiags := c.loadBackendConfig(configPath) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 } - v, err := c.UIInput().Input(&terraform.InputOpts{ - Id: "destroy", - Query: "Do you really want to destroy?", - Description: desc, + be, beDiags = c.Backend(&BackendOpts{ + Config: backendConfig, }) + } else { + plan, err := planFile.ReadPlan() if err != nil { - c.Ui.Error(fmt.Sprintf("Error asking for confirmation: %s", err)) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read plan from plan file", + fmt.Sprintf("Cannot read the plan from the given plan file: %s.", err), + )) + c.showDiagnostics(diags) return 1 } - if v != "yes" { - c.Ui.Output("Destroy cancelled.") + if plan.Backend.Config == nil { + // Should never happen; always indicates a bug in the creation of the plan file + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read plan from plan file", + fmt.Sprintf("The given plan file does not have a valid backend configuration. This is a bug in the Terraform command that generated this plan file."), + )) + c.showDiagnostics(diags) return 1 } + be, beDiags = c.BackendForPlan(plan.Backend) + } + diags = diags.Append(beDiags) + if beDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 } + // Before we delegate to the backend, we'll print any warning diagnostics + // we've accumulated here, since the backend will start fresh with its own + // diagnostics. + c.showDiagnostics(diags) + diags = nil + // Build the operation - opReq := c.Operation() + opReq := c.Operation(be) + opReq.AutoApprove = autoApprove + opReq.ConfigDir = configPath opReq.Destroy = c.Destroy - opReq.Module = mod - opReq.Plan = plan + opReq.DestroyForce = destroyForce + opReq.PlanFile = planFile opReq.PlanRefresh = refresh opReq.Type = backend.OperationTypeApply - // Perform the operation - ctx, ctxCancel := context.WithCancel(context.Background()) - defer ctxCancel() - op, err := b.Operation(ctx, opReq) + opReq.ConfigLoader, err = c.initConfigLoader() if err != nil { - c.Ui.Error(fmt.Sprintf("Error starting operation: %s", err)) + c.showDiagnostics(err) return 1 } - // Wait for the operation to complete or an interrupt to occur - select { - case <-c.ShutdownCh: - // Cancel our context so we can start gracefully exiting - ctxCancel() - - // Notify the user - c.Ui.Output(outputInterrupt) - - // Still get the result, since there is still one - select { - case <-c.ShutdownCh: - c.Ui.Error( - "Two interrupts received. Exiting immediately. Note that data\n" + - "loss may have occurred.") - return 1 - case <-op.Done(): - } - case <-op.Done(): - if err := op.Err; err != nil { - c.Ui.Error(err.Error()) + { + var moreDiags tfdiags.Diagnostics + opReq.Variables, moreDiags = c.collectVariableValues() + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } } - if !c.Destroy { - // Get the right module that we used. If we ran a plan, then use - // that module. - if plan != nil { - mod = plan.Module - } + op, err := c.RunOperation(be, opReq) + if err != nil { + c.showDiagnostics(err) + return 1 + } + if op.Result != backend.OperationSuccess { + return op.Result.ExitStatus() + } - if outputs := outputsAsString(op.State, terraform.RootModulePath, mod.Config().Outputs, true); outputs != "" { + if !c.Destroy { + if outputs := outputsAsString(op.State, addrs.RootModuleInstance, true); outputs != "" { c.Ui.Output(c.Colorize().Color(outputs)) } } - return 0 + return op.Result.ExitStatus() } func (c *ApplyCommand) Help() string { @@ -262,18 +246,14 @@ Usage: terraform apply [options] [DIR-OR-PLAN] configuration or an execution plan can be provided. Execution plans can be used to only execute a pre-determined set of actions. - DIR can also be a SOURCE as given to the "init" command. In this case, - apply behaves as though "init" was called followed by "apply". This only - works for sources that aren't files, and only if the current working - directory is empty of Terraform files. This is a shortcut for getting - started. - Options: -backup=path Path to backup the existing state file before modifying. Defaults to the "-state-out" path with ".backup" extension. Set to "-" to disable backup. + -auto-approve Skip interactive approval of plan before applying. + -lock=true Lock the state file when locking is supported. -lock-timeout=0s Duration to retry a state lock. @@ -303,8 +283,8 @@ Options: flag can be set multiple times. -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be - automatically loaded if this flag is not specified. + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. ` @@ -323,7 +303,9 @@ Options: modifying. Defaults to the "-state-out" path with ".backup" extension. Set to "-" to disable backup. - -force Don't ask for input for destroy confirmation. + -auto-approve Skip interactive approval before destroying. + + -force Deprecated: same as auto-approve. -lock=true Lock the state file when locking is supported. @@ -352,34 +334,27 @@ Options: flag can be set multiple times. -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be - automatically loaded if this flag is not specified. + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. ` return strings.TrimSpace(helpText) } -func outputsAsString(state *terraform.State, modPath []string, schema []*config.Output, includeHeader bool) string { +func outputsAsString(state *states.State, modPath addrs.ModuleInstance, includeHeader bool) string { if state == nil { return "" } - ms := state.ModuleByPath(modPath) + ms := state.Module(modPath) if ms == nil { return "" } - outputs := ms.Outputs + outputs := ms.OutputValues outputBuf := new(bytes.Buffer) if len(outputs) > 0 { - schemaMap := make(map[string]*config.Output) - if schema != nil { - for _, s := range schema { - schemaMap[s.Name] = s - } - } - if includeHeader { outputBuf.WriteString("[reset][bold][green]\nOutputs:\n\n") } @@ -396,23 +371,24 @@ func outputsAsString(state *terraform.State, modPath []string, schema []*config. sort.Strings(ks) for _, k := range ks { - schema, ok := schemaMap[k] - if ok && schema.Sensitive { + v := outputs[k] + if v.Sensitive { outputBuf.WriteString(fmt.Sprintf("%s = \n", k)) continue } - v := outputs[k] - switch typedV := v.Value.(type) { - case string: - outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, typedV)) - case []interface{}: - outputBuf.WriteString(formatListOutput("", k, typedV)) - outputBuf.WriteString("\n") - case map[string]interface{}: - outputBuf.WriteString(formatMapOutput("", k, typedV)) - outputBuf.WriteString("\n") + // Our formatter still wants an old-style raw interface{} value, so + // for now we'll just shim it. + // FIXME: Port the formatter to work with cty.Value directly. + legacyVal := hcl2shim.ConfigValueFromHCL2(v.Value) + result, err := repl.FormatResult(legacyVal) + if err != nil { + // We can't really return errors from here, so we'll just have + // to stub this out. This shouldn't happen in practice anyway. + result = "" } + + outputBuf.WriteString(fmt.Sprintf("%s = %s\n", k, result)) } } diff --git a/vendor/github.com/hashicorp/terraform/command/autocomplete.go b/vendor/github.com/hashicorp/terraform/command/autocomplete.go new file mode 100644 index 00000000..4b19c1c9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/autocomplete.go @@ -0,0 +1,67 @@ +package command + +import ( + "github.com/posener/complete" +) + +// This file contains some re-usable predictors for auto-complete. The +// command-specific autocomplete configurations live within each command's +// own source file, as AutocompleteArgs and AutocompleteFlags methods on each +// Command implementation. + +// For completing the value of boolean flags like -foo false +var completePredictBoolean = complete.PredictSet("true", "false") + +// We don't currently have a real predictor for module sources, but +// we'll probably add one later. +var completePredictModuleSource = complete.PredictAnything + +type completePredictSequence []complete.Predictor + +func (s completePredictSequence) Predict(a complete.Args) []string { + // Only one level of command is stripped off the prefix of a.Completed + // here, so nested subcommands like "workspace new" will need to provide + // dummy entries (e.g. complete.PredictNothing) as placeholders for + // all but the first subcommand. For example, "workspace new" needs + // one placeholder for the argument "new". + idx := len(a.Completed) + if idx >= len(s) { + return nil + } + + return s[idx].Predict(a) +} + +func (m *Meta) completePredictWorkspaceName() complete.Predictor { + return complete.PredictFunc(func(a complete.Args) []string { + // There are lot of things that can fail in here, so if we encounter + // any error then we'll just return nothing and not support autocomplete + // until whatever error is fixed. (The user can't actually see the error + // here, but other commands should produce a user-visible error before + // too long.) + + // We assume here that we want to autocomplete for the current working + // directory, since we don't have enough context to know where to + // find any config path argument, and it might be _after_ the argument + // we're trying to complete here anyway. + configPath, err := ModulePath(nil) + if err != nil { + return nil + } + + backendConfig, diags := m.loadBackendConfig(configPath) + if diags.HasErrors() { + return nil + } + + b, diags := m.Backend(&BackendOpts{ + Config: backendConfig, + }) + if diags.HasErrors() { + return nil + } + + names, _ := b.Workspaces() + return names + }) +} diff --git a/vendor/github.com/hashicorp/terraform/command/clistate/state.go b/vendor/github.com/hashicorp/terraform/command/clistate/state.go index f02f053b..881377d4 100644 --- a/vendor/github.com/hashicorp/terraform/command/clistate/state.go +++ b/vendor/github.com/hashicorp/terraform/command/clistate/state.go @@ -8,11 +8,14 @@ import ( "context" "fmt" "strings" + "sync" "time" "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/helper/slowmessage" "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states/statemgr" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" ) @@ -48,47 +51,125 @@ that no one else is holding a lock. ` ) -// Lock locks the given state and outputs to the user if locking -// is taking longer than the threshold. The lock is retried until the context -// is cancelled. -func Lock(ctx context.Context, s state.State, info *state.LockInfo, ui cli.Ui, color *colorstring.Colorize) (string, error) { - var lockID string +// Locker allows for more convenient usage of the lower-level state.Locker +// implementations. +// The state.Locker API requires passing in a state.LockInfo struct. Locker +// implementations are expected to create the required LockInfo struct when +// Lock is called, populate the Operation field with the "reason" string +// provided, and pass that on to the underlying state.Locker. +// Locker implementations are also expected to store any state required to call +// Unlock, which is at a minimum the LockID string returned by the +// state.Locker. +type Locker interface { + // Lock the provided state manager, storing the reason string in the LockInfo. + Lock(s statemgr.Locker, reason string) error + // Unlock the previously locked state. + // An optional error can be passed in, and will be combined with any error + // from the Unlock operation. + Unlock(error) error +} + +type locker struct { + mu sync.Mutex + ctx context.Context + timeout time.Duration + state statemgr.Locker + ui cli.Ui + color *colorstring.Colorize + lockID string +} + +// Create a new Locker. +// This Locker uses state.LockWithContext to retry the lock until the provided +// timeout is reached, or the context is canceled. Lock progress will be be +// reported to the user through the provided UI. +func NewLocker( + ctx context.Context, + timeout time.Duration, + ui cli.Ui, + color *colorstring.Colorize) Locker { + + l := &locker{ + ctx: ctx, + timeout: timeout, + ui: ui, + color: color, + } + return l +} + +// Locker locks the given state and outputs to the user if locking is taking +// longer than the threshold. The lock is retried until the context is +// cancelled. +func (l *locker) Lock(s statemgr.Locker, reason string) error { + l.mu.Lock() + defer l.mu.Unlock() + + l.state = s + + ctx, cancel := context.WithTimeout(l.ctx, l.timeout) + defer cancel() + + lockInfo := state.NewLockInfo() + lockInfo.Operation = reason err := slowmessage.Do(LockThreshold, func() error { - id, err := state.LockWithContext(ctx, s, info) - lockID = id + id, err := statemgr.LockWithContext(ctx, s, lockInfo) + l.lockID = id return err }, func() { - if ui != nil { - ui.Output(color.Color(LockMessage)) + if l.ui != nil { + l.ui.Output(l.color.Color(LockMessage)) } }) if err != nil { - err = errwrap.Wrapf(strings.TrimSpace(LockErrorMessage), err) + return errwrap.Wrapf(strings.TrimSpace(LockErrorMessage), err) } - return lockID, err + return nil } -// Unlock unlocks the given state and outputs to the user if the -// unlock fails what can be done. -func Unlock(s state.State, id string, ui cli.Ui, color *colorstring.Colorize) error { +func (l *locker) Unlock(parentErr error) error { + l.mu.Lock() + defer l.mu.Unlock() + + if l.lockID == "" { + return parentErr + } + err := slowmessage.Do(LockThreshold, func() error { - return s.Unlock(id) + return l.state.Unlock(l.lockID) }, func() { - if ui != nil { - ui.Output(color.Color(UnlockMessage)) + if l.ui != nil { + l.ui.Output(l.color.Color(UnlockMessage)) } }) if err != nil { - ui.Output(color.Color(fmt.Sprintf( + l.ui.Output(l.color.Color(fmt.Sprintf( "\n"+strings.TrimSpace(UnlockErrorMessage)+"\n", err))) - err = fmt.Errorf( - "Error releasing the state lock. Please see the longer error message above.") + if parentErr != nil { + parentErr = multierror.Append(parentErr, err) + } } + return parentErr + +} + +type noopLocker struct{} + +// NewNoopLocker returns a valid Locker that does nothing. +func NewNoopLocker() Locker { + return noopLocker{} +} + +func (l noopLocker) Lock(statemgr.Locker, string) error { + return nil +} + +func (l noopLocker) Unlock(err error) error { return err } diff --git a/vendor/github.com/hashicorp/terraform/command/command.go b/vendor/github.com/hashicorp/terraform/command/command.go index 35a85d53..815a6fa6 100644 --- a/vendor/github.com/hashicorp/terraform/command/command.go +++ b/vendor/github.com/hashicorp/terraform/command/command.go @@ -4,9 +4,9 @@ import ( "fmt" "log" "os" + "runtime" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/cli" ) // Set to true when we're testing @@ -15,6 +15,18 @@ var test bool = false // DefaultDataDir is the default directory for storing local data. const DefaultDataDir = ".terraform" +// PluginPathFile is the name of the file in the data dir which stores the list +// of directories supplied by the user with the `-plugin-dir` flag during init. +const PluginPathFile = "plugin_path" + +// pluginMachineName is the directory name used in new plugin paths. +const pluginMachineName = runtime.GOOS + "_" + runtime.GOARCH + +// DefaultPluginVendorDir is the location in the config directory to look for +// user-added plugin binaries. Terraform only reads from this path if it +// exists, it is never created by terraform. +const DefaultPluginVendorDir = "terraform.d/plugins/" + pluginMachineName + // DefaultStateFilename is the default filename used for the state file. const DefaultStateFilename = "terraform.tfstate" @@ -36,10 +48,6 @@ The "backend" in Terraform defines how Terraform operates. The default backend performs all operations locally on your machine. Your configuration is configured to use a non-local backend. This backend doesn't support this operation. - -If you want to use the state from the backend but force all other data -(configuration, variables, etc.) to come locally, you can force local -behavior with the "-local" flag. ` // ModulePath returns the path to the root module from the CLI args. @@ -69,39 +77,18 @@ func ModulePath(args []string) (string, error) { return args[0], nil } -func validateContext(ctx *terraform.Context, ui cli.Ui) bool { +func (m *Meta) validateContext(ctx *terraform.Context) bool { log.Println("[INFO] Validating the context...") - ws, es := ctx.Validate() - log.Printf("[INFO] Validation result: %d warnings, %d errors", len(ws), len(es)) + diags := ctx.Validate() + log.Printf("[INFO] Validation result: %d diagnostics", len(diags)) - if len(ws) > 0 || len(es) > 0 { - ui.Output( + if len(diags) > 0 { + m.Ui.Output( "There are warnings and/or errors related to your configuration. Please\n" + "fix these before continuing.\n") - if len(ws) > 0 { - ui.Warn("Warnings:\n") - for _, w := range ws { - ui.Warn(fmt.Sprintf(" * %s", w)) - } - - if len(es) > 0 { - ui.Output("") - } - } - - if len(es) > 0 { - ui.Error("Errors:\n") - for _, e := range es { - ui.Error(fmt.Sprintf(" * %s", e)) - } - return false - } else { - ui.Warn(fmt.Sprintf("\n"+ - "No errors found. Continuing with %d warning(s).\n", len(ws))) - return true - } + m.showDiagnostics(diags) } - return true + return !diags.HasErrors() } diff --git a/vendor/github.com/hashicorp/terraform/command/console.go b/vendor/github.com/hashicorp/terraform/command/console.go index 4a6420b7..cf309621 100644 --- a/vendor/github.com/hashicorp/terraform/command/console.go +++ b/vendor/github.com/hashicorp/terraform/command/console.go @@ -2,12 +2,13 @@ package command import ( "bufio" - "fmt" "strings" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/helper/wrappedstreams" "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) @@ -16,14 +17,15 @@ import ( // configuration and actually builds or changes infrastructure. type ConsoleCommand struct { Meta - - // When this channel is closed, the apply will be cancelled. - ShutdownCh <-chan struct{} } func (c *ConsoleCommand) Run(args []string) int { - args = c.Meta.process(args, true) - cmdFlags := c.Meta.flagSet("console") + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + cmdFlags := c.Meta.defaultFlagSet("console") cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { @@ -36,47 +38,97 @@ func (c *ConsoleCommand) Run(args []string) int { return 1 } - // Load the module - mod, err := c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } // Load the backend - b, err := c.Backend(&BackendOpts{ConfigPath: configPath}) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } // We require a local backend local, ok := b.(backend.Local) if !ok { + c.showDiagnostics(diags) // in case of any warnings in here c.Ui.Error(ErrUnsupportedLocalOp) return 1 } // Build the operation - opReq := c.Operation() - opReq.Module = mod + opReq := c.Operation(b) + opReq.ConfigDir = configPath + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + + { + var moreDiags tfdiags.Diagnostics + opReq.Variables, moreDiags = c.collectVariableValues() + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } // Get the context - ctx, _, err := local.Context(opReq) - if err != nil { - c.Ui.Error(err.Error()) + ctx, _, ctxDiags := local.Context(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } + defer func() { + err := opReq.StateLocker.Unlock(nil) + if err != nil { + c.Ui.Error(err.Error()) + } + }() + // Setup the UI so we can output directly to stdout ui := &cli.BasicUi{ Writer: wrappedstreams.Stdout(), ErrorWriter: wrappedstreams.Stderr(), } + // Before we can evaluate expressions, we must compute and populate any + // derived values (input variables, local values, output values) + // that are not stored in the persistent state. + scope, scopeDiags := ctx.Eval(addrs.RootModuleInstance) + diags = diags.Append(scopeDiags) + if scope == nil { + // scope is nil if there are errors so bad that we can't even build a scope. + // Otherwise, we'll try to eval anyway. + c.showDiagnostics(diags) + return 1 + } + if diags.HasErrors() { + diags = diags.Append(tfdiags.SimpleWarning("Due to the problems above, some expressions may produce unexpected results.")) + } + + // Before we become interactive we'll show any diagnostics we encountered + // during initialization, and then afterwards the driver will manage any + // further diagnostics itself. + c.showDiagnostics(diags) + // IO Loop session := &repl.Session{ - Interpolater: ctx.Interpolater(), + Scope: scope, } // Determine if stdin is a pipe. If so, we evaluate directly. @@ -91,12 +143,15 @@ func (c *ConsoleCommand) modePiped(session *repl.Session, ui cli.Ui) int { var lastResult string scanner := bufio.NewScanner(wrappedstreams.Stdin()) for scanner.Scan() { - // Handle it. If there is an error exit immediately - result, err := session.Handle(strings.TrimSpace(scanner.Text())) - if err != nil { - ui.Error(err.Error()) + result, exit, diags := session.Handle(strings.TrimSpace(scanner.Text())) + if diags.HasErrors() { + // In piped mode we'll exit immediately on error. + c.showDiagnostics(diags) return 1 } + if exit { + return 0 + } // Store the last result lastResult = result @@ -133,8 +188,8 @@ Options: flag can be set multiple times. -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be - automatically loaded if this flag is not specified. + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. ` diff --git a/vendor/github.com/hashicorp/terraform/command/console_interactive.go b/vendor/github.com/hashicorp/terraform/command/console_interactive.go index f963528b..f8261bb5 100644 --- a/vendor/github.com/hashicorp/terraform/command/console_interactive.go +++ b/vendor/github.com/hashicorp/terraform/command/console_interactive.go @@ -45,13 +45,12 @@ func (c *ConsoleCommand) modeInteractive(session *repl.Session, ui cli.Ui) int { break } - out, err := session.Handle(line) - if err == repl.ErrSessionExit { - break + out, exit, diags := session.Handle(line) + if diags.HasErrors() { + c.showDiagnostics(diags) } - if err != nil { - ui.Error(err.Error()) - continue + if exit { + break } ui.Output(out) diff --git a/vendor/github.com/hashicorp/terraform/command/debug_json2dot.go b/vendor/github.com/hashicorp/terraform/command/debug_json2dot.go index f6ed214b..5b05e177 100644 --- a/vendor/github.com/hashicorp/terraform/command/debug_json2dot.go +++ b/vendor/github.com/hashicorp/terraform/command/debug_json2dot.go @@ -16,8 +16,11 @@ type DebugJSON2DotCommand struct { } func (c *DebugJSON2DotCommand) Run(args []string) int { - args = c.Meta.process(args, true) - cmdFlags := c.Meta.flagSet("debug json2dot") + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + cmdFlags := c.Meta.extendedFlagSet("debug json2dot") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp diff --git a/vendor/github.com/hashicorp/terraform/command/flag_kv.go b/vendor/github.com/hashicorp/terraform/command/flag_kv.go index b084c513..5d120397 100644 --- a/vendor/github.com/hashicorp/terraform/command/flag_kv.go +++ b/vendor/github.com/hashicorp/terraform/command/flag_kv.go @@ -3,6 +3,11 @@ package command import ( "fmt" "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" ) // FlagStringKV is a flag.Value implementation for parsing user variables @@ -41,3 +46,34 @@ func (v *FlagStringSlice) Set(raw string) error { return nil } + +// FlagTargetSlice is a flag.Value implementation for parsing target addresses +// from the command line, such as -target=aws_instance.foo -target=aws_vpc.bar . +type FlagTargetSlice []addrs.Targetable + +func (v *FlagTargetSlice) String() string { + return "" +} + +func (v *FlagTargetSlice) Set(raw string) error { + // FIXME: This is not an ideal way to deal with this because it requires + // us to do parsing in a context where we can't nicely return errors + // to the user. + + var diags tfdiags.Diagnostics + synthFilename := fmt.Sprintf("-target=%q", raw) + traversal, syntaxDiags := hclsyntax.ParseTraversalAbs([]byte(raw), synthFilename, hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(syntaxDiags) + if syntaxDiags.HasErrors() { + return diags.Err() + } + + target, targetDiags := addrs.ParseTarget(traversal) + diags = diags.Append(targetDiags) + if targetDiags.HasErrors() { + return diags.Err() + } + + *v = append(*v, target.Subject) + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/command/fmt.go b/vendor/github.com/hashicorp/terraform/command/fmt.go index 0b06d7b5..427f5fbd 100644 --- a/vendor/github.com/hashicorp/terraform/command/fmt.go +++ b/vendor/github.com/hashicorp/terraform/command/fmt.go @@ -1,27 +1,39 @@ package command import ( - "flag" + "bytes" "fmt" "io" + "io/ioutil" + "log" "os" + "os/exec" + "path/filepath" "strings" - "github.com/hashicorp/hcl/hcl/fmtcmd" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/hcl2/hclwrite" "github.com/mitchellh/cli" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" ) const ( - stdinArg = "-" - fileExtension = "tf" + stdinArg = "-" ) // FmtCommand is a Command implementation that rewrites Terraform config // files to a canonical format and style. type FmtCommand struct { Meta - opts fmtcmd.Options - input io.Reader // STDIN if nil + list bool + write bool + diff bool + check bool + recursive bool + input io.Reader // STDIN if nil } func (c *FmtCommand) Run(args []string) int { @@ -29,14 +41,18 @@ func (c *FmtCommand) Run(args []string) int { c.input = os.Stdin } - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - cmdFlags := flag.NewFlagSet("fmt", flag.ContinueOnError) - cmdFlags.BoolVar(&c.opts.List, "list", true, "list") - cmdFlags.BoolVar(&c.opts.Write, "write", true, "write") - cmdFlags.BoolVar(&c.opts.Diff, "diff", false, "diff") + cmdFlags := c.Meta.defaultFlagSet("fmt") + cmdFlags.BoolVar(&c.list, "list", true, "list") + cmdFlags.BoolVar(&c.write, "write", true, "write") + cmdFlags.BoolVar(&c.diff, "diff", false, "diff") + cmdFlags.BoolVar(&c.check, "check", false, "check") + cmdFlags.BoolVar(&c.recursive, "recursive", false, "recursive") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { return 1 } @@ -48,43 +64,234 @@ func (c *FmtCommand) Run(args []string) int { return 1 } - var dirs []string + var paths []string if len(args) == 0 { - dirs = []string{"."} + paths = []string{"."} } else if args[0] == stdinArg { - c.opts.List = false - c.opts.Write = false + c.list = false + c.write = false } else { - dirs = []string{args[0]} + paths = []string{args[0]} } - output := &cli.UiWriter{Ui: c.Ui} - err := fmtcmd.Run(dirs, []string{fileExtension}, c.input, output, c.opts) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error running fmt: %s", err)) + var output io.Writer + list := c.list // preserve the original value of -list + if c.check { + // set to true so we can use the list output to check + // if the input needs formatting + c.list = true + c.write = false + output = &bytes.Buffer{} + } else { + output = &cli.UiWriter{Ui: c.Ui} + } + + diags := c.fmt(paths, c.input, output) + c.showDiagnostics(diags) + if diags.HasErrors() { return 2 } + if c.check { + buf := output.(*bytes.Buffer) + ok := buf.Len() == 0 + if list { + io.Copy(&cli.UiWriter{Ui: c.Ui}, buf) + } + if ok { + return 0 + } else { + return 3 + } + } + return 0 } +func (c *FmtCommand) fmt(paths []string, stdin io.Reader, stdout io.Writer) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if len(paths) == 0 { // Assuming stdin, then. + if c.write { + diags = diags.Append(fmt.Errorf("Option -write cannot be used when reading from stdin")) + return diags + } + fileDiags := c.processFile("", stdin, stdout, true) + diags = diags.Append(fileDiags) + return diags + } + + for _, path := range paths { + path = c.normalizePath(path) + info, err := os.Stat(path) + if err != nil { + diags = diags.Append(fmt.Errorf("No file or directory at %s", path)) + return diags + } + if info.IsDir() { + dirDiags := c.processDir(path, stdout) + diags = diags.Append(dirDiags) + } else { + switch filepath.Ext(path) { + case ".tf", ".tfvars": + f, err := os.Open(path) + if err != nil { + // Open does not produce error messages that are end-user-appropriate, + // so we'll need to simplify here. + diags = diags.Append(fmt.Errorf("Failed to read file %s", path)) + continue + } + + fileDiags := c.processFile(c.normalizePath(path), f, stdout, false) + diags = diags.Append(fileDiags) + f.Close() + default: + diags = diags.Append(fmt.Errorf("Only .tf and .tfvars files can be processed with terraform fmt")) + continue + } + } + } + + return diags +} + +func (c *FmtCommand) processFile(path string, r io.Reader, w io.Writer, isStdout bool) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + log.Printf("[TRACE] terraform fmt: Formatting %s", path) + + src, err := ioutil.ReadAll(r) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to read %s", path)) + return diags + } + + // File must be parseable as HCL native syntax before we'll try to format + // it. If not, the formatter is likely to make drastic changes that would + // be hard for the user to undo. + _, syntaxDiags := hclsyntax.ParseConfig(src, path, hcl.Pos{Line: 1, Column: 1}) + if syntaxDiags.HasErrors() { + diags = diags.Append(syntaxDiags) + return diags + } + + result := hclwrite.Format(src) + + if !bytes.Equal(src, result) { + // Something was changed + if c.list { + fmt.Fprintln(w, path) + } + if c.write { + err := ioutil.WriteFile(path, result, 0644) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to write %s", path)) + return diags + } + } + if c.diff { + diff, err := bytesDiff(src, result, path) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to generate diff for %s: %s", path, err)) + return diags + } + w.Write(diff) + } + } + + if !c.list && !c.write && !c.diff { + _, err = w.Write(result) + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to write result")) + } + } + + return diags +} + +func (c *FmtCommand) processDir(path string, stdout io.Writer) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + log.Printf("[TRACE] terraform fmt: looking for files in %s", path) + + entries, err := ioutil.ReadDir(path) + if err != nil { + switch { + case os.IsNotExist(err): + diags = diags.Append(fmt.Errorf("There is no configuration directory at %s", path)) + default: + // ReadDir does not produce error messages that are end-user-appropriate, + // so we'll need to simplify here. + diags = diags.Append(fmt.Errorf("Cannot read directory %s", path)) + } + return diags + } + + for _, info := range entries { + name := info.Name() + if configs.IsIgnoredFile(name) { + continue + } + subPath := filepath.Join(path, name) + if info.IsDir() { + if c.recursive { + subDiags := c.processDir(subPath, stdout) + diags = diags.Append(subDiags) + } + + // We do not recurse into child directories by default because we + // want to mimic the file-reading behavior of "terraform plan", etc, + // operating on one module at a time. + continue + } + + ext := filepath.Ext(name) + switch ext { + case ".tf", ".tfvars": + f, err := os.Open(subPath) + if err != nil { + // Open does not produce error messages that are end-user-appropriate, + // so we'll need to simplify here. + diags = diags.Append(fmt.Errorf("Failed to read file %s", subPath)) + continue + } + + fileDiags := c.processFile(c.normalizePath(subPath), f, stdout, false) + diags = diags.Append(fileDiags) + f.Close() + } + } + + return diags +} + func (c *FmtCommand) Help() string { helpText := ` Usage: terraform fmt [options] [DIR] - Rewrites all Terraform configuration files to a canonical format. + Rewrites all Terraform configuration files to a canonical format. Both + configuration files (.tf) and variables files (.tfvars) are updated. + JSON files (.tf.json or .tfvars.json) are not modified. If DIR is not specified then the current working directory will be used. - If DIR is "-" then content will be read from STDIN. + If DIR is "-" then content will be read from STDIN. The given content must + be in the Terraform language native syntax; JSON is not supported. Options: - -list=true List files whose formatting differs (always false if using STDIN) + -list=false Don't list files whose formatting differs + (always disabled if using STDIN) + + -write=false Don't write to source files + (always disabled if using STDIN or -check) - -write=true Write result to source file instead of STDOUT (always false if using STDIN) + -diff Display diffs of formatting changes - -diff=false Display diffs of formatting changes + -check Check if the input is formatted. Exit status will be 0 if all + input is properly formatted and non-zero otherwise. + -recursive Also process files in subdirectories. By default, only the + given directory (or current directory) is processed. ` return strings.TrimSpace(helpText) } @@ -92,3 +299,30 @@ Options: func (c *FmtCommand) Synopsis() string { return "Rewrites config files to canonical format" } + +func bytesDiff(b1, b2 []byte, path string) (data []byte, err error) { + f1, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := ioutil.TempFile("", "") + if err != nil { + return + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "--label=old/"+path, "--label=new/"+path, "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + return +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go new file mode 100644 index 00000000..3dd9238d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go @@ -0,0 +1,295 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcled" + "github.com/hashicorp/hcl2/hclparse" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/colorstring" + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/zclconf/go-cty/cty" +) + +// Diagnostic formats a single diagnostic message. +// +// The width argument specifies at what column the diagnostic messages will +// be wrapped. If set to zero, messages will not be wrapped by this function +// at all. Although the long-form text parts of the message are wrapped, +// not all aspects of the message are guaranteed to fit within the specified +// terminal width. +func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string { + if diag == nil { + // No good reason to pass a nil diagnostic in here... + return "" + } + + var buf bytes.Buffer + + switch diag.Severity() { + case tfdiags.Error: + buf.WriteString(color.Color("\n[bold][red]Error: [reset]")) + case tfdiags.Warning: + buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]")) + default: + // Clear out any coloring that might be applied by Terraform's UI helper, + // so our result is not context-sensitive. + buf.WriteString(color.Color("\n[reset]")) + } + + desc := diag.Description() + sourceRefs := diag.Source() + + // We don't wrap the summary, since we expect it to be terse, and since + // this is where we put the text of a native Go error it may not always + // be pure text that lends itself well to word-wrapping. + fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary) + + if sourceRefs.Subject != nil { + // We'll borrow HCL's range implementation here, because it has some + // handy features to help us produce a nice source code snippet. + highlightRange := sourceRefs.Subject.ToHCL() + snippetRange := highlightRange + if sourceRefs.Context != nil { + snippetRange = sourceRefs.Context.ToHCL() + } + + // Make sure the snippet includes the highlight. This should be true + // for any reasonable diagnostic, but we'll make sure. + snippetRange = hcl.RangeOver(snippetRange, highlightRange) + if snippetRange.Empty() { + snippetRange.End.Byte++ + snippetRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } + + var src []byte + if sources != nil { + src = sources[snippetRange.Filename] + } + if src == nil { + // This should generally not happen, as long as sources are always + // loaded through the main loader. We may load things in other + // ways in weird cases, so we'll tolerate it at the expense of + // a not-so-helpful error message. + fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line) + } else { + file, offset := parseRange(src, highlightRange) + + headerRange := highlightRange + + contextStr := hcled.ContextString(file, offset-1) + if contextStr != "" { + contextStr = ", in " + contextStr + } + + fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr) + + // Config snippet rendering + sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines) + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snippetRange) { + continue + } + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"), + lineRange.Start.Line, + before, highlighted, after, + ) + } + + } + + if fromExpr := diag.FromExpr(); fromExpr != nil { + // We may also be able to generate information about the dynamic + // values of relevant variables at the point of evaluation, then. + // This is particularly useful for expressions that get evaluated + // multiple times with different values, such as blocks using + // "count" and "for_each", or within "for" expressions. + expr := fromExpr.Expression + ctx := fromExpr.EvalContext + vars := expr.Variables() + stmts := make([]string, 0, len(vars)) + seen := make(map[string]struct{}, len(vars)) + Traversals: + for _, traversal := range vars { + for len(traversal) > 1 { + val, diags := traversal.TraverseAbs(ctx) + if diags.HasErrors() { + // Skip anything that generates errors, since we probably + // already have the same error in our diagnostics set + // already. + traversal = traversal[:len(traversal)-1] + continue + } + + traversalStr := traversalStr(traversal) + if _, exists := seen[traversalStr]; exists { + continue Traversals // don't show duplicates when the same variable is referenced multiple times + } + switch { + case !val.IsKnown(): + // Can't say anything about this yet, then. + continue Traversals + case val.IsNull(): + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr)) + default: + stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val))) + } + seen[traversalStr] = struct{}{} + } + } + + sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly? + + if len(stmts) > 0 { + fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n")) + } + for _, stmt := range stmts { + fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt) + } + } + + buf.WriteByte('\n') + } + + if desc.Detail != "" { + detail := desc.Detail + if width != 0 { + detail = wordwrap.WrapString(detail, uint(width)) + } + fmt.Fprintf(&buf, "%s\n", detail) + } + + return buf.String() +} + +func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) { + filename := rng.Filename + offset := rng.Start.Byte + + // We need to re-parse here to get a *hcl.File we can interrogate. This + // is not awesome since we presumably already parsed the file earlier too, + // but this re-parsing is architecturally simpler than retaining all of + // the hcl.File objects and we only do this in the case of an error anyway + // so the overhead here is not a big problem. + parser := hclparse.NewParser() + var file *hcl.File + var diags hcl.Diagnostics + if strings.HasSuffix(filename, ".json") { + file, diags = parser.ParseJSON(src, filename) + } else { + file, diags = parser.ParseHCL(src, filename) + } + if diags.HasErrors() { + return file, offset + } + + return file, offset +} + +// traversalStr produces a representation of an HCL traversal that is compact, +// resembles HCL native syntax, and is suitable for display in the UI. +func traversalStr(traversal hcl.Traversal) string { + // This is a specialized subset of traversal rendering tailored to + // producing helpful contextual messages in diagnostics. It is not + // comprehensive nor intended to be used for other purposes. + + var buf bytes.Buffer + for _, step := range traversal { + switch tStep := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(tStep.Name) + case hcl.TraverseAttr: + buf.WriteByte('.') + buf.WriteString(tStep.Name) + case hcl.TraverseIndex: + buf.WriteByte('[') + if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() { + buf.WriteString(compactValueStr(tStep.Key)) + } else { + // We'll just use a placeholder for more complex values, + // since otherwise our result could grow ridiculously long. + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// compactValueStr produces a compact, single-line summary of a given value +// that is suitable for display in the UI. +// +// For primitives it returns a full representation, while for more complex +// types it instead summarizes the type, size, etc to produce something +// that is hopefully still somewhat useful but not as verbose as a rendering +// of the entire data structure. +func compactValueStr(val cty.Value) string { + // This is a specialized subset of value rendering tailored to producing + // helpful but concise messages in diagnostics. It is not comprehensive + // nor intended to be used for other purposes. + + ty := val.Type() + switch { + case val.IsNull(): + return "null" + case !val.IsKnown(): + // Should never happen here because we should filter before we get + // in here, but we'll do something reasonable rather than panic. + return "(not yet known)" + case ty == cty.Bool: + if val.True() { + return "true" + } + return "false" + case ty == cty.Number: + bf := val.AsBigFloat() + return bf.Text('g', 10) + case ty == cty.String: + // Go string syntax is not exactly the same as HCL native string syntax, + // but we'll accept the minor edge-cases where this is different here + // for now, just to get something reasonable here. + return fmt.Sprintf("%q", val.AsString()) + case ty.IsCollectionType() || ty.IsTupleType(): + l := val.LengthInt() + switch l { + case 0: + return "empty " + ty.FriendlyName() + case 1: + return ty.FriendlyName() + " with 1 element" + default: + return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l) + } + case ty.IsObjectType(): + atys := ty.AttributeTypes() + l := len(atys) + switch l { + case 0: + return "object with no attributes" + case 1: + var name string + for k := range atys { + name = k + } + return fmt.Sprintf("object with 1 attribute %q", name) + default: + return fmt.Sprintf("object with %d attributes", l) + } + default: + return ty.FriendlyName() + } +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/diff.go b/vendor/github.com/hashicorp/terraform/command/format/diff.go new file mode 100644 index 00000000..c726f0ed --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/diff.go @@ -0,0 +1,1192 @@ +package format + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strings" + + "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/states" +) + +// ResourceChange returns a string representation of a change to a particular +// resource, for inclusion in user-facing plan output. +// +// The resource schema must be provided along with the change so that the +// formatted change can reflect the configuration structure for the associated +// resource. +// +// If "color" is non-nil, it will be used to color the result. Otherwise, +// no color codes will be included. +func ResourceChange( + change *plans.ResourceInstanceChangeSrc, + tainted bool, + schema *configschema.Block, + color *colorstring.Colorize, +) string { + addr := change.Addr + var buf bytes.Buffer + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, + Reset: false, + } + } + + dispAddr := addr.String() + if change.DeposedKey != states.NotDeposed { + dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey) + } + + switch change.Action { + case plans.Create: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr))) + case plans.Read: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr))) + case plans.Update: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr))) + case plans.CreateThenDelete, plans.DeleteThenCreate: + if tainted { + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr))) + } else { + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr))) + } + case plans.Delete: + buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr))) + default: + // should never happen, since the above is exhaustive + buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr)) + } + buf.WriteString(color.Color("[reset]\n")) + + switch change.Action { + case plans.Create: + buf.WriteString(color.Color("[green] +[reset] ")) + case plans.Read: + buf.WriteString(color.Color("[cyan] <=[reset] ")) + case plans.Update: + buf.WriteString(color.Color("[yellow] ~[reset] ")) + case plans.DeleteThenCreate: + buf.WriteString(color.Color("[red]-[reset]/[green]+[reset] ")) + case plans.CreateThenDelete: + buf.WriteString(color.Color("[green]+[reset]/[red]-[reset] ")) + case plans.Delete: + buf.WriteString(color.Color("[red] -[reset] ")) + default: + buf.WriteString(color.Color("??? ")) + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + buf.WriteString(fmt.Sprintf( + "resource %q %q", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + case addrs.DataResourceMode: + buf.WriteString(fmt.Sprintf( + "data %q %q ", + addr.Resource.Resource.Type, + addr.Resource.Resource.Name, + )) + default: + // should never happen, since the above is exhaustive + buf.WriteString(addr.String()) + } + + buf.WriteString(" {") + + p := blockBodyDiffPrinter{ + buf: &buf, + color: color, + action: change.Action, + requiredReplace: change.RequiredReplace, + } + + // Most commonly-used resources have nested blocks that result in us + // going at least three traversals deep while we recurse here, so we'll + // start with that much capacity and then grow as needed for deeper + // structures. + path := make(cty.Path, 0, 3) + + changeV, err := change.Decode(schema.ImpliedType()) + if err != nil { + // Should never happen in here, since we've already been through + // loads of layers of encode/decode of the planned changes before now. + panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err)) + } + + // We currently have an opt-out that permits the legacy SDK to return values + // that defy our usual conventions around handling of nesting blocks. To + // avoid the rendering code from needing to handle all of these, we'll + // normalize first. + // (Ideally we'd do this as part of the SDK opt-out implementation in core, + // but we've added it here for now to reduce risk of unexpected impacts + // on other code in core.) + changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema) + changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema) + + bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path) + if bodyWritten { + buf.WriteString("\n") + buf.WriteString(strings.Repeat(" ", 4)) + } + buf.WriteString("}\n") + + return buf.String() +} + +type blockBodyDiffPrinter struct { + buf *bytes.Buffer + color *colorstring.Colorize + action plans.Action + requiredReplace cty.PathSet +} + +const forcesNewResourceCaption = " [red]# forces replacement[reset]" + +// writeBlockBodyDiff writes attribute or block differences +// and returns true if any differences were found and written +func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool { + path = ctyEnsurePathCapacity(path, 1) + + bodyWritten := false + blankBeforeBlocks := false + { + attrNames := make([]string, 0, len(schema.Attributes)) + attrNameLen := 0 + for name := range schema.Attributes { + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + if oldVal.IsNull() && newVal.IsNull() { + // Skip attributes where both old and new values are null + // (we do this early here so that we'll do our value alignment + // based on the longest attribute name that has a change, rather + // than the longest attribute name in the full set.) + continue + } + + attrNames = append(attrNames, name) + if len(name) > attrNameLen { + attrNameLen = len(name) + } + } + sort.Strings(attrNames) + if len(attrNames) > 0 { + blankBeforeBlocks = true + } + + for _, name := range attrNames { + attrS := schema.Attributes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + bodyWritten = true + p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path) + } + } + + { + blockTypeNames := make([]string, 0, len(schema.BlockTypes)) + for name := range schema.BlockTypes { + blockTypeNames = append(blockTypeNames, name) + } + sort.Strings(blockTypeNames) + + for _, name := range blockTypeNames { + blockS := schema.BlockTypes[name] + oldVal := ctyGetAttrMaybeNull(old, name) + newVal := ctyGetAttrMaybeNull(new, name) + + bodyWritten = true + p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path) + + // Always include a blank for any subsequent block types. + blankBeforeBlocks = true + } + } + + return bodyWritten +} + +func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) { + path = append(path, cty.GetAttrStep{Name: name}) + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + showJustNew := false + var action plans.Action + switch { + case old.IsNull(): + action = plans.Create + showJustNew = true + case new.IsNull(): + action = plans.Delete + case ctyEqualWithUnknown(old, new): + action = plans.NoOp + showJustNew = true + default: + action = plans.Update + } + + p.writeActionSymbol(action) + + p.buf.WriteString(p.color.Color("[bold]")) + p.buf.WriteString(name) + p.buf.WriteString(p.color.Color("[reset]")) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(name))) + p.buf.WriteString(" = ") + + if attrS.Sensitive { + p.buf.WriteString("(sensitive value)") + } else { + switch { + case showJustNew: + p.writeValue(new, action, indent+2) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + default: + // We show new even if it is null to emphasize the fact + // that it is being unset, since otherwise it is easy to + // misunderstand that the value is still set to the old value. + p.writeValueDiff(old, new, indent+2, path) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) { + path = append(path, cty.GetAttrStep{Name: name}) + if old.IsNull() && new.IsNull() { + // Nothing to do if both old and new is null + return + } + + // Where old/new are collections representing a nesting mode other than + // NestingSingle, we assume the collection value can never be unknown + // since we always produce the container for the nested objects, even if + // the objects within are computed. + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + var action plans.Action + eqV := new.Equals(old) + switch { + case old.IsNull(): + action = plans.Create + case new.IsNull(): + action = plans.Delete + case !new.IsWhollyKnown() || !old.IsWhollyKnown(): + // "old" should actually always be known due to our contract + // that old values must never be unknown, but we'll allow it + // anyway to be robust. + action = plans.Update + case !eqV.IsKnown() || !eqV.True(): + action = plans.Update + } + + if blankBefore { + p.buf.WriteRune('\n') + } + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path) + case configschema.NestingList: + // For the sake of handling nested blocks, we'll treat a null list + // the same as an empty list since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockListAsEmpty(old) + new = ctyNullBlockListAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + // Here we intentionally preserve the index-based correspondance + // between old and new, rather than trying to detect insertions + // and removals in the list, because this more accurately reflects + // how Terraform Core and providers will understand the change, + // particularly when the nested block contains computed attributes + // that will themselves maintain correspondance by index. + + // commonLen is number of elements that exist in both lists, which + // will be presented as updates (~). Any additional items in one + // of the lists will be presented as either creates (+) or deletes (-) + // depending on which list they belong to. + var commonLen int + switch { + case len(oldItems) < len(newItems): + commonLen = len(oldItems) + default: + commonLen = len(newItems) + } + + if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) { + p.buf.WriteRune('\n') + } + + for i := 0; i < commonLen; i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := newItems[i] + action := plans.Update + if oldItem.RawEquals(newItem) { + action = plans.NoOp + } + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path) + } + for i := commonLen; i < len(oldItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + oldItem := oldItems[i] + newItem := cty.NullVal(oldItem.Type()) + p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path) + } + for i := commonLen; i < len(newItems); i++ { + path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))}) + newItem := newItems[i] + oldItem := cty.NullVal(newItem.Type()) + p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path) + } + case configschema.NestingSet: + // For the sake of handling nested blocks, we'll treat a null set + // the same as an empty set since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockSetAsEmpty(old) + new = ctyNullBlockSetAsEmpty(new) + + oldItems := ctyCollectionValues(old) + newItems := ctyCollectionValues(new) + + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both sets are empty + return + } + + allItems := make([]cty.Value, 0, len(oldItems)+len(newItems)) + allItems = append(allItems, oldItems...) + allItems = append(allItems, newItems...) + all := cty.SetVal(allItems) + + if blankBefore { + p.buf.WriteRune('\n') + } + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + var action plans.Action + var oldValue, newValue cty.Value + switch { + case !val.IsKnown(): + action = plans.Update + newValue = val + case !old.HasElement(val).True(): + action = plans.Create + oldValue = cty.NullVal(val.Type()) + newValue = val + case !new.HasElement(val).True(): + action = plans.Delete + oldValue = val + newValue = cty.NullVal(val.Type()) + default: + action = plans.NoOp + oldValue = val + newValue = val + } + path := append(path, cty.IndexStep{Key: val}) + p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path) + } + + case configschema.NestingMap: + // For the sake of handling nested blocks, we'll treat a null map + // the same as an empty map since the config language doesn't + // distinguish these anyway. + old = ctyNullBlockMapAsEmpty(old) + new = ctyNullBlockMapAsEmpty(new) + + oldItems := old.AsValueMap() + newItems := new.AsValueMap() + if (len(oldItems) + len(newItems)) == 0 { + // Nothing to do if both maps are empty + return + } + + allKeys := make(map[string]bool) + for k := range oldItems { + allKeys[k] = true + } + for k := range newItems { + allKeys[k] = true + } + allKeysOrder := make([]string, 0, len(allKeys)) + for k := range allKeys { + allKeysOrder = append(allKeysOrder, k) + } + sort.Strings(allKeysOrder) + + if blankBefore { + p.buf.WriteRune('\n') + } + + for _, k := range allKeysOrder { + var action plans.Action + oldValue := oldItems[k] + newValue := newItems[k] + switch { + case oldValue == cty.NilVal: + oldValue = cty.NullVal(newValue.Type()) + action = plans.Create + case newValue == cty.NilVal: + newValue = cty.NullVal(oldValue.Type()) + action = plans.Delete + case !newValue.RawEquals(oldValue): + action = plans.Update + default: + action = plans.NoOp + } + + path := append(path, cty.IndexStep{Key: cty.StringVal(k)}) + p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path) + } + } +} + +func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + p.writeActionSymbol(action) + + if label != nil { + fmt.Fprintf(p.buf, "%s %q {", name, *label) + } else { + fmt.Fprintf(p.buf, "%s {", name) + } + + if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + + bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path) + if bodyWritten { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + } + p.buf.WriteString("}") +} + +func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) { + if !val.IsKnown() { + p.buf.WriteString("(known after apply)") + return + } + if val.IsNull() { + p.buf.WriteString(p.color.Color("[dark_gray]null[reset]")) + return + } + + ty := val.Type() + + switch { + case ty.IsPrimitiveType(): + switch ty { + case cty.String: + { + // Special behavior for JSON strings containing array or object + src := []byte(val.AsString()) + ty, err := ctyjson.ImpliedType(src) + // check for the special case of "null", which decodes to nil, + // and just allow it to be printed out directly + if err == nil && !ty.IsPrimitiveType() && val.AsString() != "null" { + jv, err := ctyjson.Unmarshal(src, ty) + if err == nil { + p.buf.WriteString("jsonencode(") + if jv.LengthInt() == 0 { + p.writeValue(jv, action, 0) + } else { + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(jv, action, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteByte(')') + break // don't *also* do the normal behavior below + } + } + } + fmt.Fprintf(p.buf, "%q", val.AsString()) + case cty.Bool: + if val.True() { + p.buf.WriteString("true") + } else { + p.buf.WriteString("false") + } + case cty.Number: + bf := val.AsBigFloat() + p.buf.WriteString(bf.Text('f', -1)) + default: + // should never happen, since the above is exhaustive + fmt.Fprintf(p.buf, "%#v", val) + } + case ty.IsListType() || ty.IsSetType() || ty.IsTupleType(): + p.buf.WriteString("[") + + it := val.ElementIterator() + for it.Next() { + _, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",") + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("]") + case ty.IsMapType(): + p.buf.WriteString("{") + + keyLen := 0 + for it := val.ElementIterator(); it.Next(); { + key, _ := it.Element() + if keyStr := key.AsString(); len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + for it := val.ElementIterator(); it.Next(); { + key, val := it.Element() + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.writeValue(key, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString()))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if val.LengthInt() > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + case ty.IsObjectType(): + p.buf.WriteString("{") + + atys := ty.AttributeTypes() + attrNames := make([]string, 0, len(atys)) + nameLen := 0 + for attrName := range atys { + attrNames = append(attrNames, attrName) + if len(attrName) > nameLen { + nameLen = len(attrName) + } + } + sort.Strings(attrNames) + + for _, attrName := range attrNames { + val := val.GetAttr(attrName) + + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(action) + p.buf.WriteString(attrName) + p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName))) + p.buf.WriteString(" = ") + p.writeValue(val, action, indent+4) + } + + if len(attrNames) > 0 { + p.buf.WriteString("\n") + p.buf.WriteString(strings.Repeat(" ", indent)) + } + p.buf.WriteString("}") + } +} + +func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) { + ty := old.Type() + typesEqual := ctyTypesEqual(ty, new.Type()) + + // We have some specialized diff implementations for certain complex + // values where it's useful to see a visualization of the diff of + // the nested elements rather than just showing the entire old and + // new values verbatim. + // However, these specialized implementations can apply only if both + // values are known and non-null. + if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual { + switch { + case ty == cty.String: + // We have special behavior for both multi-line strings in general + // and for strings that can parse as JSON. For the JSON handling + // to apply, both old and new must be valid JSON. + // For single-line strings that don't parse as JSON we just fall + // out of this switch block and do the default old -> new rendering. + oldS := old.AsString() + newS := new.AsString() + + { + // Special behavior for JSON strings containing object or + // list values. + oldBytes := []byte(oldS) + newBytes := []byte(newS) + oldType, oldErr := ctyjson.ImpliedType(oldBytes) + newType, newErr := ctyjson.ImpliedType(newBytes) + if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) { + oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType) + newJV, newErr := ctyjson.Unmarshal(newBytes, newType) + if oldErr == nil && newErr == nil { + if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace + p.buf.WriteString("jsonencode(") + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(plans.Update) + p.writeValueDiff(oldJV, newJV, indent+4, path) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } else { + // if they differ only in insigificant whitespace + // then we'll note that but still expand out the + // effective value. + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]")) + } else { + p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]")) + } + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent+4)) + p.writeValue(oldJV, plans.NoOp, indent+4) + p.buf.WriteByte('\n') + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteByte(')') + } + return + } + } + } + + if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 { + break + } + + p.buf.WriteString("<<~EOT") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var oldLines, newLines []cty.Value + { + r := strings.NewReader(oldS) + sc := bufio.NewScanner(r) + for sc.Scan() { + oldLines = append(oldLines, cty.StringVal(sc.Text())) + } + } + { + r := strings.NewReader(newS) + sc := bufio.NewScanner(r) + for sc.Scan() { + newLines = append(newLines, cty.StringVal(sc.Text())) + } + } + + diffLines := ctySequenceDiff(oldLines, newLines) + for _, diffLine := range diffLines { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(diffLine.Action) + + switch diffLine.Action { + case plans.NoOp, plans.Delete: + p.buf.WriteString(diffLine.Before.AsString()) + case plans.Create: + p.buf.WriteString(diffLine.After.AsString()) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return for strings + p.buf.WriteString(diffLine.After.AsString()) + + } + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol + p.buf.WriteString("EOT") + + return + + case ty.IsSetType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var addedVals, removedVals, allVals []cty.Value + for it := old.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if new.HasElement(val).False() { + removedVals = append(removedVals, val) + } + } + for it := new.ElementIterator(); it.Next(); { + _, val := it.Element() + allVals = append(allVals, val) + if val.IsKnown() && old.HasElement(val).False() { + addedVals = append(addedVals, val) + } + } + + var all, added, removed cty.Value + if len(allVals) > 0 { + all = cty.SetVal(allVals) + } else { + all = cty.SetValEmpty(ty.ElementType()) + } + if len(addedVals) > 0 { + added = cty.SetVal(addedVals) + } else { + added = cty.SetValEmpty(ty.ElementType()) + } + if len(removedVals) > 0 { + removed = cty.SetVal(removedVals) + } else { + removed = cty.SetValEmpty(ty.ElementType()) + } + + for it := all.ElementIterator(); it.Next(); { + _, val := it.Element() + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + + var action plans.Action + switch { + case !val.IsKnown(): + action = plans.Update + case added.HasElement(val).True(): + action = plans.Create + case removed.HasElement(val).True(): + action = plans.Delete + default: + action = plans.NoOp + } + + p.writeActionSymbol(action) + p.writeValue(val, action, indent+4) + p.buf.WriteString(",\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + case ty.IsListType() || ty.IsTupleType(): + p.buf.WriteString("[") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice()) + for _, elemDiff := range elemDiffs { + p.buf.WriteString(strings.Repeat(" ", indent+2)) + p.writeActionSymbol(elemDiff.Action) + switch elemDiff.Action { + case plans.NoOp, plans.Delete: + p.writeValue(elemDiff.Before, elemDiff.Action, indent+4) + case plans.Update: + p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path) + case plans.Create: + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + default: + // Should never happen since the above covers all + // actions that ctySequenceDiff can return. + p.writeValue(elemDiff.After, elemDiff.Action, indent+4) + } + + p.buf.WriteString(",\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("]") + return + + case ty.IsMapType(): + p.buf.WriteString("{") + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + p.buf.WriteString("\n") + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + kV := cty.StringVal(k) + var action plans.Action + if old.HasIndex(kV).False() { + action = plans.Create + } else if new.HasIndex(kV).False() { + action = plans.Delete + } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() { + action = plans.NoOp + } else { + action = plans.Update + } + + path := append(path, cty.IndexStep{Key: kV}) + + p.writeActionSymbol(action) + p.writeValue(kV, action, indent+4) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + switch action { + case plans.Create, plans.NoOp: + v := new.Index(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.Index(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.Index(kV) + newV := new.Index(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteByte('\n') + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + return + case ty.IsObjectType(): + p.buf.WriteString("{") + p.buf.WriteString("\n") + + forcesNewResource := p.pathForcesNewResource(path) + + var allKeys []string + keyLen := 0 + for it := old.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + for it := new.ElementIterator(); it.Next(); { + k, _ := it.Element() + keyStr := k.AsString() + allKeys = append(allKeys, keyStr) + if len(keyStr) > keyLen { + keyLen = len(keyStr) + } + } + + sort.Strings(allKeys) + + lastK := "" + for i, k := range allKeys { + if i > 0 && lastK == k { + continue // skip duplicates (list is sorted) + } + lastK = k + + p.buf.WriteString(strings.Repeat(" ", indent+2)) + kV := k + var action plans.Action + if !old.Type().HasAttribute(kV) { + action = plans.Create + } else if !new.Type().HasAttribute(kV) { + action = plans.Delete + } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() { + action = plans.NoOp + } else { + action = plans.Update + } + + path := append(path, cty.GetAttrStep{Name: kV}) + + p.writeActionSymbol(action) + p.buf.WriteString(k) + p.buf.WriteString(strings.Repeat(" ", keyLen-len(k))) + p.buf.WriteString(" = ") + + switch action { + case plans.Create, plans.NoOp: + v := new.GetAttr(kV) + p.writeValue(v, action, indent+4) + case plans.Delete: + oldV := old.GetAttr(kV) + newV := cty.NullVal(oldV.Type()) + p.writeValueDiff(oldV, newV, indent+4, path) + default: + oldV := old.GetAttr(kV) + newV := new.GetAttr(kV) + p.writeValueDiff(oldV, newV, indent+4, path) + } + + p.buf.WriteString("\n") + } + + p.buf.WriteString(strings.Repeat(" ", indent)) + p.buf.WriteString("}") + + if forcesNewResource { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } + return + } + } + + // In all other cases, we just show the new and old values as-is + p.writeValue(old, plans.Delete, indent) + if new.IsNull() { + p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] ")) + } else { + p.buf.WriteString(p.color.Color(" [yellow]->[reset] ")) + } + + p.writeValue(new, plans.Create, indent) + if p.pathForcesNewResource(path) { + p.buf.WriteString(p.color.Color(forcesNewResourceCaption)) + } +} + +// writeActionSymbol writes a symbol to represent the given action, followed +// by a space. +// +// It only supports the actions that can be represented with a single character: +// Create, Delete, Update and NoAction. +func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) { + switch action { + case plans.Create: + p.buf.WriteString(p.color.Color("[green]+[reset] ")) + case plans.Delete: + p.buf.WriteString(p.color.Color("[red]-[reset] ")) + case plans.Update: + p.buf.WriteString(p.color.Color("[yellow]~[reset] ")) + case plans.NoOp: + p.buf.WriteString(" ") + default: + // Should never happen + p.buf.WriteString(p.color.Color("? ")) + } +} + +func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool { + if !p.action.IsReplace() { + // "requiredReplace" only applies when the instance is being replaced + return false + } + return p.requiredReplace.Has(path) +} + +func ctyEmptyString(value cty.Value) bool { + if !value.IsNull() && value.IsKnown() { + valueType := value.Type() + if valueType == cty.String && value.AsString() == "" { + return true + } + } + return false +} + +func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value { + attrType := val.Type().AttributeType(name) + + if val.IsNull() { + return cty.NullVal(attrType) + } + + // We treat "" as null here + // as existing SDK doesn't support null yet. + // This allows us to avoid spurious diffs + // until we introduce null to the SDK. + attrValue := val.GetAttr(name) + if ctyEmptyString(attrValue) { + return cty.NullVal(attrType) + } + + return attrValue +} + +func ctyCollectionValues(val cty.Value) []cty.Value { + if !val.IsKnown() || val.IsNull() { + return nil + } + + ret := make([]cty.Value, 0, val.LengthInt()) + for it := val.ElementIterator(); it.Next(); { + _, value := it.Element() + ret = append(ret, value) + } + return ret +} + +// ctySequenceDiff returns differences between given sequences of cty.Value(s) +// in the form of Create, Delete, or Update actions (for objects). +func ctySequenceDiff(old, new []cty.Value) []*plans.Change { + var ret []*plans.Change + lcs := objchange.LongestCommonSubsequence(old, new) + var oldI, newI, lcsI int + for oldI < len(old) || newI < len(new) || lcsI < len(lcs) { + for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) { + isObjectDiff := old[oldI].Type().IsObjectType() && (newI >= len(new) || new[newI].Type().IsObjectType()) + if isObjectDiff && newI < len(new) { + ret = append(ret, &plans.Change{ + Action: plans.Update, + Before: old[oldI], + After: new[newI], + }) + oldI++ + newI++ // we also consume the next "new" in this case + continue + } + + ret = append(ret, &plans.Change{ + Action: plans.Delete, + Before: old[oldI], + After: cty.NullVal(old[oldI].Type()), + }) + oldI++ + } + for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) { + ret = append(ret, &plans.Change{ + Action: plans.Create, + Before: cty.NullVal(new[newI].Type()), + After: new[newI], + }) + newI++ + } + if lcsI < len(lcs) { + ret = append(ret, &plans.Change{ + Action: plans.NoOp, + Before: lcs[lcsI], + After: lcs[lcsI], + }) + + // All of our indexes advance together now, since the line + // is common to all three sequences. + lcsI++ + oldI++ + newI++ + } + } + return ret +} + +func ctyEqualWithUnknown(old, new cty.Value) bool { + if !old.IsWhollyKnown() || !new.IsWhollyKnown() { + return false + } + return old.Equals(new).True() +} + +// ctyTypesEqual checks equality of two types more loosely +// by avoiding checks of object/tuple elements +// as we render differences on element-by-element basis anyway +func ctyTypesEqual(oldT, newT cty.Type) bool { + if oldT.IsObjectType() && newT.IsObjectType() { + return true + } + if oldT.IsTupleType() && newT.IsTupleType() { + return true + } + return oldT.Equals(newT) +} + +func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path { + if cap(path)-len(path) >= minExtra { + return path + } + newCap := cap(path) * 2 + if newCap < (len(path) + minExtra) { + newCap = len(path) + minExtra + } + newPath := make(cty.Path, len(path), newCap) + copy(newPath, path) + return newPath +} + +// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "list" is +// actually represented as a tuple type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockListAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsListType() { + return cty.ListValEmpty(ty.ElementType()) + } + return cty.EmptyTupleVal // must need a tuple, then +} + +// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +// +// In particular, this function handles the special situation where a "map" is +// actually represented as an object type where nested blocks contain +// dynamically-typed values. +func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + if ty := in.Type(); ty.IsMapType() { + return cty.MapValEmpty(ty.ElementType()) + } + return cty.EmptyObjectVal // must need an object, then +} + +// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil +// or returns an empty value of a suitable type to serve as a placeholder for it. +func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value { + if !in.IsNull() { + return in + } + // Dynamically-typed attributes are not supported inside blocks backed by + // sets, so our result here is always a set. + return cty.SetValEmpty(in.Type().ElementType()) +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/object_id.go b/vendor/github.com/hashicorp/terraform/command/format/object_id.go new file mode 100644 index 00000000..85ebbfec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/format/object_id.go @@ -0,0 +1,123 @@ +package format + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ObjectValueID takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a unique identifier in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the Terraform configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// This function will panic if the given value is not of an object type. +func ObjectValueID(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["id"] == cty.String: + v := obj.GetAttr("id") + if v.IsKnown() && !v.IsNull() { + return "id", v.AsString() + } + + case atys["name"] == cty.String: + // "name" isn't always globally unique, but if there isn't also an + // "id" then it _often_ is, in practice. + v := obj.GetAttr("name") + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + } + + return "", "" +} + +// ObjectValueName takes a value that is assumed to be an object representation +// of some resource instance object and attempts to heuristically find an +// attribute of it that is likely to be a human-friendly name in the remote +// system that it belongs to which will be useful to the user. +// +// If such an attribute is found, its name and string value intended for +// display are returned. Both returned strings are empty if no such attribute +// exists, in which case the caller should assume that the resource instance +// address within the Terraform configuration is the best available identifier. +// +// This is only a best-effort sort of thing, relying on naming conventions in +// our resource type schemas. The result is not guaranteed to be unique, but +// should generally be suitable for display to an end-user anyway. +// +// Callers that use both ObjectValueName and ObjectValueID at the same time +// should be prepared to get the same attribute key and value from both in +// some cases, since there is overlap betweek the id-extraction and +// name-extraction heuristics. +// +// This function will panic if the given value is not of an object type. +func ObjectValueName(obj cty.Value) (k, v string) { + if obj.IsNull() || !obj.IsKnown() { + return "", "" + } + + atys := obj.Type().AttributeTypes() + + switch { + + case atys["name"] == cty.String: + v := obj.GetAttr("name") + if v.IsKnown() && !v.IsNull() { + return "name", v.AsString() + } + + case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String: + tags := obj.GetAttr("tags") + if tags.IsNull() || !tags.IsWhollyKnown() { + break + } + + switch { + case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True): + v := tags.Index(cty.StringVal("name")) + if v.IsKnown() && !v.IsNull() { + return "tags.name", v.AsString() + } + case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True): + // AWS-style naming convention + v := tags.Index(cty.StringVal("Name")) + if v.IsKnown() && !v.IsNull() { + return "tags.Name", v.AsString() + } + } + } + + return "", "" +} + +// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID +// and ObjectValueName (in that preference order) to try to extract some sort +// of human-friendly descriptive string value for an object as additional +// context about an object when it is being displayed in a compact way (where +// not all of the attributes are visible.) +// +// Just as with the two functions it wraps, it is a best-effort and may return +// two empty strings if no suitable attribute can be found for a given object. +func ObjectValueIDOrName(obj cty.Value) (k, v string) { + k, v = ObjectValueID(obj) + if k != "" { + return + } + k, v = ObjectValueName(obj) + return +} diff --git a/vendor/github.com/hashicorp/terraform/command/format/plan.go b/vendor/github.com/hashicorp/terraform/command/format/plan.go index b270e2d3..098653fc 100644 --- a/vendor/github.com/hashicorp/terraform/command/format/plan.go +++ b/vendor/github.com/hashicorp/terraform/command/format/plan.go @@ -3,229 +3,300 @@ package format import ( "bytes" "fmt" + "log" "sort" "strings" - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" ) -// PlanOpts are the options for formatting a plan. -type PlanOpts struct { - // Plan is the plan to format. This is required. - Plan *terraform.Plan +// Plan is a representation of a plan optimized for display to +// an end-user, as opposed to terraform.Plan which is for internal use. +// +// DisplayPlan excludes implementation details that may otherwise appear +// in the main plan, such as destroy actions on data sources (which are +// there only to clean up the state). +type Plan struct { + Resources []*InstanceDiff +} + +// InstanceDiff is a representation of an instance diff optimized +// for display, in conjunction with DisplayPlan. +type InstanceDiff struct { + Addr *terraform.ResourceAddress + Action plans.Action - // Color is the colorizer. This is optional. - Color *colorstring.Colorize + // Attributes describes changes to the attributes of the instance. + // + // For destroy diffs this is always nil. + Attributes []*AttributeDiff - // ModuleDepth is the depth of the modules to expand. By default this - // is zero which will not expand modules at all. - ModuleDepth int + Tainted bool + Deposed bool } -// Plan takes a plan and returns a -func Plan(opts *PlanOpts) string { - p := opts.Plan - if p.Diff == nil || p.Diff.Empty() { - return "This plan does nothing." - } +// AttributeDiff is a representation of an attribute diff optimized +// for display, in conjunction with DisplayInstanceDiff. +type AttributeDiff struct { + // Path is a dot-delimited traversal through possibly many levels of list and map structure, + // intended for display purposes only. + Path string - if opts.Color == nil { - opts.Color = &colorstring.Colorize{ - Colors: colorstring.DefaultColors, - Reset: false, - } - } + Action plans.Action - buf := new(bytes.Buffer) - for _, m := range p.Diff.Modules { - if len(m.Path)-1 <= opts.ModuleDepth || opts.ModuleDepth == -1 { - formatPlanModuleExpand(buf, m, opts) - } else { - formatPlanModuleSingle(buf, m, opts) - } - } + OldValue string + NewValue string - return strings.TrimSpace(buf.String()) + NewComputed bool + Sensitive bool + ForcesNew bool } -// formatPlanModuleExpand will output the given module and all of its -// resources. -func formatPlanModuleExpand( - buf *bytes.Buffer, m *terraform.ModuleDiff, opts *PlanOpts) { - // Ignore empty diffs - if m.Empty() { - return - } +// PlanStats gives summary counts for a Plan. +type PlanStats struct { + ToAdd, ToChange, ToDestroy int +} - var moduleName string - if !m.IsRoot() { - moduleName = fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) +// NewPlan produces a display-oriented Plan from a terraform.Plan. +func NewPlan(changes *plans.Changes) *Plan { + log.Printf("[TRACE] NewPlan for %#v", changes) + ret := &Plan{} + if changes == nil { + // Nothing to do! + return ret } - // We want to output the resources in sorted order to make things - // easier to scan through, so get all the resource names and sort them. - names := make([]string, 0, len(m.Resources)) - for name, _ := range m.Resources { - names = append(names, name) - } - sort.Strings(names) + for _, rc := range changes.Resources { + addr := rc.Addr + log.Printf("[TRACE] NewPlan found %s (%s)", addr, rc.Action) + dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode - // Go through each sorted name and start building the output - for _, name := range names { - rdiff := m.Resources[name] - if rdiff.Empty() { + // We create "delete" actions for data resources so we can clean + // up their entries in state, but this is an implementation detail + // that users shouldn't see. + if dataSource && rc.Action == plans.Delete { continue } - dataSource := strings.HasPrefix(name, "data.") - - if moduleName != "" { - name = moduleName + "." + name + // For now we'll shim this to work with our old types. + // TODO: Update for the new plan types, ideally also switching over to + // a structural diff renderer instead of a flat renderer. + did := &InstanceDiff{ + Addr: terraform.NewLegacyResourceInstanceAddress(addr), + Action: rc.Action, } - // Determine the color for the text (green for adding, yellow - // for change, red for delete), and symbol, and output the - // resource header. - color := "yellow" - symbol := "~" - oldValues := true - switch rdiff.ChangeType() { - case terraform.DiffDestroyCreate: - color = "green" - symbol = "-/+" - case terraform.DiffCreate: - color = "green" - symbol = "+" - oldValues = false - - // If we're "creating" a data resource then we'll present it - // to the user as a "read" operation, so it's clear that this - // operation won't change anything outside of the Terraform state. - // Unfortunately by the time we get here we only have the name - // to work with, so we need to cheat and exploit knowledge of the - // naming scheme for data resources. - if dataSource { - symbol = "<=" - color = "cyan" - } - case terraform.DiffDestroy: - color = "red" - symbol = "-" + if rc.DeposedKey != states.NotDeposed { + did.Deposed = true } - var extraAttr []string - if rdiff.DestroyTainted { - extraAttr = append(extraAttr, "tainted") - } - if rdiff.DestroyDeposed { - extraAttr = append(extraAttr, "deposed") - } - var extraStr string - if len(extraAttr) > 0 { - extraStr = fmt.Sprintf(" (%s)", strings.Join(extraAttr, ", ")) + // Since this is just a temporary stub implementation on the way + // to us replacing this with the structural diff renderer, we currently + // don't include any attributes here. + // FIXME: Implement the structural diff renderer to replace this + // codepath altogether. + + ret.Resources = append(ret.Resources, did) + } + + // Sort the instance diffs by their addresses for display. + sort.Slice(ret.Resources, func(i, j int) bool { + iAddr := ret.Resources[i].Addr + jAddr := ret.Resources[j].Addr + return iAddr.Less(jAddr) + }) + + return ret +} + +// Format produces and returns a text representation of the receiving plan +// intended for display in a terminal. +// +// If color is not nil, it is used to colorize the output. +func (p *Plan) Format(color *colorstring.Colorize) string { + if p.Empty() { + return "This plan does nothing." + } + + if color == nil { + color = &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Reset: false, } + } - buf.WriteString(opts.Color.Color(fmt.Sprintf( - "[%s]%s %s%s\n", - color, symbol, name, extraStr))) - - // Get all the attributes that are changing, and sort them. Also - // determine the longest key so that we can align them all. - keyLen := 0 - keys := make([]string, 0, len(rdiff.Attributes)) - for key, _ := range rdiff.Attributes { - // Skip the ID since we do that specially - if key == "id" { - continue - } + // Find the longest path length of all the paths that are changing, + // so we can align them all. + keyLen := 0 + for _, r := range p.Resources { + for _, attr := range r.Attributes { + key := attr.Path - keys = append(keys, key) if len(key) > keyLen { keyLen = len(key) } } - sort.Strings(keys) - - // Go through and output each attribute - for _, attrK := range keys { - attrDiff := rdiff.Attributes[attrK] - - v := attrDiff.New - if v == "" && attrDiff.NewComputed { - v = "" - } + } - if attrDiff.Sensitive { - v = "" - } + buf := new(bytes.Buffer) + for _, r := range p.Resources { + formatPlanInstanceDiff(buf, r, keyLen, color) + } - updateMsg := "" - if attrDiff.RequiresNew && rdiff.Destroy { - updateMsg = opts.Color.Color(" [red](forces new resource)") - } else if attrDiff.Sensitive && oldValues { - updateMsg = opts.Color.Color(" [yellow](attribute changed)") - } + return strings.TrimSpace(buf.String()) +} - if oldValues { - var u string - if attrDiff.Sensitive { - u = "" - } else { - u = attrDiff.Old - } - buf.WriteString(fmt.Sprintf( - " %s:%s %#v => %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - u, - v, - updateMsg)) - } else { - buf.WriteString(fmt.Sprintf( - " %s:%s %#v%s\n", - attrK, - strings.Repeat(" ", keyLen-len(attrK)), - v, - updateMsg)) - } +// Stats returns statistics about the plan +func (p *Plan) Stats() PlanStats { + var ret PlanStats + for _, r := range p.Resources { + switch r.Action { + case plans.Create: + ret.ToAdd++ + case plans.Update: + ret.ToChange++ + case plans.DeleteThenCreate, plans.CreateThenDelete: + ret.ToAdd++ + ret.ToDestroy++ + case plans.Delete: + ret.ToDestroy++ } + } + return ret +} - // Write the reset color so we don't overload the user's terminal - buf.WriteString(opts.Color.Color("[reset]\n")) +// ActionCounts returns the number of diffs for each action type +func (p *Plan) ActionCounts() map[plans.Action]int { + ret := map[plans.Action]int{} + for _, r := range p.Resources { + ret[r.Action]++ } + return ret +} + +// Empty returns true if there is at least one resource diff in the receiving plan. +func (p *Plan) Empty() bool { + return len(p.Resources) == 0 } -// formatPlanModuleSingle will output the given module and all of its -// resources. -func formatPlanModuleSingle( - buf *bytes.Buffer, m *terraform.ModuleDiff, opts *PlanOpts) { - // Ignore empty diffs - if m.Empty() { - return +// DiffActionSymbol returns a string that, once passed through a +// colorstring.Colorize, will produce a result that can be written +// to a terminal to produce a symbol made of three printable +// characters, possibly interspersed with VT100 color codes. +func DiffActionSymbol(action plans.Action) string { + switch action { + case plans.DeleteThenCreate: + return "[red]-[reset]/[green]+[reset]" + case plans.CreateThenDelete: + return "[green]+[reset]/[red]-[reset]" + case plans.Create: + return " [green]+[reset]" + case plans.Delete: + return " [red]-[reset]" + case plans.Read: + return " [cyan]<=[reset]" + case plans.Update: + return " [yellow]~[reset]" + default: + return " ?" } +} - moduleName := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) +// formatPlanInstanceDiff writes the text representation of the given instance diff +// to the given buffer, using the given colorizer. +func formatPlanInstanceDiff(buf *bytes.Buffer, r *InstanceDiff, keyLen int, colorizer *colorstring.Colorize) { + addrStr := r.Addr.String() // Determine the color for the text (green for adding, yellow // for change, red for delete), and symbol, and output the // resource header. color := "yellow" - symbol := "~" - switch m.ChangeType() { - case terraform.DiffCreate: + symbol := DiffActionSymbol(r.Action) + oldValues := true + switch r.Action { + case plans.DeleteThenCreate, plans.CreateThenDelete: + color = "yellow" + case plans.Create: color = "green" - symbol = "+" - case terraform.DiffDestroy: + oldValues = false + case plans.Delete: color = "red" - symbol = "-" + case plans.Read: + color = "cyan" + oldValues = false + } + + var extraStr string + if r.Tainted { + extraStr = extraStr + " (tainted)" + } + if r.Deposed { + extraStr = extraStr + " (deposed)" + } + if r.Action.IsReplace() { + extraStr = extraStr + colorizer.Color(" [red][bold](new resource required)") + } + + buf.WriteString( + colorizer.Color(fmt.Sprintf( + "[%s]%s [%s]%s%s\n", + color, symbol, color, addrStr, extraStr, + )), + ) + + for _, attr := range r.Attributes { + + v := attr.NewValue + var dispV string + switch { + case v == "" && attr.NewComputed: + dispV = "" + case attr.Sensitive: + dispV = "" + default: + dispV = fmt.Sprintf("%q", v) + } + + updateMsg := "" + switch { + case attr.ForcesNew && r.Action.IsReplace(): + updateMsg = colorizer.Color(" [red](forces new resource)") + case attr.Sensitive && oldValues: + updateMsg = colorizer.Color(" [yellow](attribute changed)") + } + + if oldValues { + u := attr.OldValue + var dispU string + switch { + case attr.Sensitive: + dispU = "" + default: + dispU = fmt.Sprintf("%q", u) + } + buf.WriteString(fmt.Sprintf( + " %s:%s %s => %s%s\n", + attr.Path, + strings.Repeat(" ", keyLen-len(attr.Path)), + dispU, dispV, + updateMsg, + )) + } else { + buf.WriteString(fmt.Sprintf( + " %s:%s %s%s\n", + attr.Path, + strings.Repeat(" ", keyLen-len(attr.Path)), + dispV, + updateMsg, + )) + } } - buf.WriteString(opts.Color.Color(fmt.Sprintf( - "[%s]%s %s\n", - color, symbol, moduleName))) - buf.WriteString(fmt.Sprintf( - " %d resource(s)", - len(m.Resources))) - buf.WriteString(opts.Color.Color("[reset]\n")) + // Write the reset color so we don't bleed color into later text + buf.WriteString(colorizer.Color("[reset]\n")) } diff --git a/vendor/github.com/hashicorp/terraform/command/format/state.go b/vendor/github.com/hashicorp/terraform/command/format/state.go index f8a658bc..0130f5cf 100644 --- a/vendor/github.com/hashicorp/terraform/command/format/state.go +++ b/vendor/github.com/hashicorp/terraform/command/format/state.go @@ -6,6 +6,12 @@ import ( "sort" "strings" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/colorstring" ) @@ -13,14 +19,13 @@ import ( // StateOpts are the options for formatting a state. type StateOpts struct { // State is the state to format. This is required. - State *terraform.State + State *states.State + + // Schemas are used to decode attributes. This is required. + Schemas *terraform.Schemas // Color is the colorizer. This is optional. Color *colorstring.Colorize - - // ModuleDepth is the depth of the modules to expand. By default this - // is zero which will not expand modules at all. - ModuleDepth int } // State takes a state and returns a string @@ -29,126 +34,145 @@ func State(opts *StateOpts) string { panic("colorize not given") } + if opts.Schemas == nil { + panic("schemas not given") + } + s := opts.State if len(s.Modules) == 0 { return "The state file is empty. No resources are represented." } - var buf bytes.Buffer - buf.WriteString("[reset]") + buf := bytes.NewBufferString("[reset]") + p := blockBodyDiffPrinter{ + buf: buf, + color: opts.Color, + action: plans.NoOp, + } // Format all the modules for _, m := range s.Modules { - if len(m.Path)-1 <= opts.ModuleDepth || opts.ModuleDepth == -1 { - formatStateModuleExpand(&buf, m, opts) - } else { - formatStateModuleSingle(&buf, m, opts) - } + formatStateModule(p, m, opts.Schemas) } // Write the outputs for the root module m := s.RootModule() - if len(m.Outputs) > 0 { - buf.WriteString("\nOutputs:\n\n") + + if m.OutputValues != nil { + if len(m.OutputValues) > 0 { + p.buf.WriteString("Outputs:\n\n") + } // Sort the outputs - ks := make([]string, 0, len(m.Outputs)) - for k, _ := range m.Outputs { + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { ks = append(ks, k) } sort.Strings(ks) // Output each output k/v pair for _, k := range ks { - v := m.Outputs[k] - switch output := v.Value.(type) { - case string: - buf.WriteString(fmt.Sprintf("%s = %s", k, output)) - buf.WriteString("\n") - case []interface{}: - buf.WriteString(formatListOutput("", k, output)) - buf.WriteString("\n") - case map[string]interface{}: - buf.WriteString(formatMapOutput("", k, output)) - buf.WriteString("\n") - } + v := m.OutputValues[k] + p.buf.WriteString(fmt.Sprintf("%s = ", k)) + p.writeValue(v.Value, plans.NoOp, 0) + p.buf.WriteString("\n") } } - return opts.Color.Color(strings.TrimSpace(buf.String())) -} + trimmedOutput := strings.TrimSpace(p.buf.String()) + trimmedOutput += "[reset]" -func formatStateModuleExpand( - buf *bytes.Buffer, m *terraform.ModuleState, opts *StateOpts) { - var moduleName string - if !m.IsRoot() { - moduleName = fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) - } + return opts.Color.Color(trimmedOutput) + +} +func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) { // First get the names of all the resources so we can show them // in alphabetical order. names := make([]string, 0, len(m.Resources)) - for name, _ := range m.Resources { + for name := range m.Resources { names = append(names, name) } sort.Strings(names) // Go through each resource and begin building up the output. - for _, k := range names { - name := k - if moduleName != "" { - name = moduleName + "." + name - } + for _, key := range names { + for k, v := range m.Resources[key].Instances { + addr := m.Resources[key].Addr - rs := m.Resources[k] - is := rs.Primary - var id string - if is != nil { - id = is.ID - } - if id == "" { - id = "" - } - - taintStr := "" - if rs.Primary != nil && rs.Primary.Tainted { - taintStr = " (tainted)" - } + taintStr := "" + if v.Current.Status == 'T' { + taintStr = " (tainted)" + } + p.buf.WriteString(fmt.Sprintf("# %s:%s\n", addr.Absolute(m.Addr).Instance(k), taintStr)) + + var schema *configschema.Block + provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact() + if _, exists := schemas.Providers[provider]; !exists { + // This should never happen in normal use because we should've + // loaded all of the schemas and checked things prior to this + // point. We can't return errors here, but since this is UI code + // we will try to do _something_ reasonable. + p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider)) + continue + } - buf.WriteString(fmt.Sprintf("%s:%s\n", name, taintStr)) - buf.WriteString(fmt.Sprintf(" id = %s\n", id)) + switch addr.Mode { + case addrs.ManagedResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + addr.Mode, + addr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q resource type %s\n\n", provider, addr.Type)) + continue + } - if is != nil { - // Sort the attributes - attrKeys := make([]string, 0, len(is.Attributes)) - for ak, _ := range is.Attributes { - // Skip the id attribute since we just show the id directly - if ak == "id" { + p.buf.WriteString(fmt.Sprintf( + "resource %q %q {", + addr.Type, + addr.Name, + )) + case addrs.DataResourceMode: + schema, _ = schemas.ResourceTypeConfig( + provider, + addr.Mode, + addr.Type, + ) + if schema == nil { + p.buf.WriteString(fmt.Sprintf( + "# missing schema for provider %q data source %s\n\n", provider, addr.Type)) continue } - attrKeys = append(attrKeys, ak) + p.buf.WriteString(fmt.Sprintf( + "data %q %q {", + addr.Type, + addr.Name, + )) + default: + // should never happen, since the above is exhaustive + p.buf.WriteString(addr.String()) } - sort.Strings(attrKeys) - // Output each attribute - for _, ak := range attrKeys { - av := is.Attributes[ak] - buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + val, err := v.Current.Decode(schema.ImpliedType()) + if err != nil { + fmt.Println(err.Error()) + break } - } - } - - buf.WriteString("[reset]\n") -} -func formatStateModuleSingle( - buf *bytes.Buffer, m *terraform.ModuleState, opts *StateOpts) { - // Header with the module name - buf.WriteString(fmt.Sprintf("module.%s\n", strings.Join(m.Path[1:], "."))) + path := make(cty.Path, 0, 3) + bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path) + if bodyWritten { + p.buf.WriteString("\n") + } - // Now just write how many resources are in here. - buf.WriteString(fmt.Sprintf(" %d resource(s)\n", len(m.Resources))) + p.buf.WriteString("}\n\n") + } + } + p.buf.WriteString("\n") } func formatNestedList(indent string, outputList []interface{}) string { @@ -210,7 +234,7 @@ func formatListOutput(indent, outputName string, outputList []interface{}) strin func formatNestedMap(indent string, outputMap map[string]interface{}) string { ks := make([]string, 0, len(outputMap)) - for k, _ := range outputMap { + for k := range outputMap { ks = append(ks, k) } sort.Strings(ks) @@ -235,7 +259,7 @@ func formatNestedMap(indent string, outputMap map[string]interface{}) string { func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string { ks := make([]string, 0, len(outputMap)) - for k, _ := range outputMap { + for k := range outputMap { ks = append(ks, k) } sort.Strings(ks) diff --git a/vendor/github.com/hashicorp/terraform/command/get.go b/vendor/github.com/hashicorp/terraform/command/get.go index 982484f9..02269852 100644 --- a/vendor/github.com/hashicorp/terraform/command/get.go +++ b/vendor/github.com/hashicorp/terraform/command/get.go @@ -1,11 +1,9 @@ package command import ( - "flag" - "fmt" "strings" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/tfdiags" ) // GetCommand is a Command implementation that takes a Terraform @@ -17,29 +15,29 @@ type GetCommand struct { func (c *GetCommand) Run(args []string) int { var update bool - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - cmdFlags := flag.NewFlagSet("get", flag.ContinueOnError) + cmdFlags := c.Meta.defaultFlagSet("get") cmdFlags.BoolVar(&update, "update", false, "update") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 } - var path string path, err := ModulePath(cmdFlags.Args()) if err != nil { c.Ui.Error(err.Error()) return 1 } - mode := module.GetModeGet - if update { - mode = module.GetModeUpdate - } + path = c.normalizePath(path) - if err := getModules(&c.Meta, path, mode); err != nil { - c.Ui.Error(err.Error()) + diags := getModules(&c.Meta, path, update) + c.showDiagnostics(diags) + if diags.HasErrors() { return 1 } @@ -60,10 +58,10 @@ Usage: terraform get [options] PATH Options: - -update=false If true, modules already downloaded will be checked - for updates and updated if necessary. + -update Check already-downloaded modules for available updates + and install the newest versions available. - -no-color If specified, output won't contain any color. + -no-color Disable text coloring in the output. ` return strings.TrimSpace(helpText) @@ -73,16 +71,10 @@ func (c *GetCommand) Synopsis() string { return "Download and install modules for the configuration" } -func getModules(m *Meta, path string, mode module.GetMode) error { - mod, err := module.NewTreeModule("", path) - if err != nil { - return fmt.Errorf("Error loading configuration: %s", err) +func getModules(m *Meta, path string, upgrade bool) tfdiags.Diagnostics { + hooks := uiModuleInstallHooks{ + Ui: m.Ui, + ShowLocalPaths: true, } - - err = mod.Load(m.moduleStorage(m.DataDir()), mode) - if err != nil { - return fmt.Errorf("Error loading modules: %s", err) - } - - return nil + return m.installModules(path, upgrade, hooks) } diff --git a/vendor/github.com/hashicorp/terraform/command/graph.go b/vendor/github.com/hashicorp/terraform/command/graph.go index 45299787..f047e14d 100644 --- a/vendor/github.com/hashicorp/terraform/command/graph.go +++ b/vendor/github.com/hashicorp/terraform/command/graph.go @@ -1,12 +1,13 @@ package command import ( - "flag" "fmt" "strings" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" "github.com/hashicorp/terraform/terraform" ) @@ -18,18 +19,21 @@ type GraphCommand struct { } func (c *GraphCommand) Run(args []string) int { - var moduleDepth int - var verbose bool var drawCycles bool var graphTypeStr string + var moduleDepth int + var verbose bool - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - cmdFlags := flag.NewFlagSet("graph", flag.ContinueOnError) - c.addModuleDepthFlag(cmdFlags, &moduleDepth) - cmdFlags.BoolVar(&verbose, "verbose", false, "verbose") + cmdFlags := c.Meta.defaultFlagSet("graph") cmdFlags.BoolVar(&drawCycles, "draw-cycles", false, "draw-cycles") cmdFlags.StringVar(&graphTypeStr, "type", "", "type") + cmdFlags.IntVar(&moduleDepth, "module-depth", -1, "module-depth") + cmdFlags.BoolVar(&verbose, "verbose", false, "verbose") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 @@ -41,56 +45,77 @@ func (c *GraphCommand) Run(args []string) int { return 1 } + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + // Check if the path is a plan - plan, err := c.Plan(configPath) + var plan *plans.Plan + planFile, err := c.PlanFile(configPath) if err != nil { c.Ui.Error(err.Error()) return 1 } - if plan != nil { + if planFile != nil { // Reset for backend loading configPath = "" } - // Load the module - var mod *module.Tree - if plan == nil { - mod, err = c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) - return 1 - } + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 } // Load the backend - b, err := c.Backend(&BackendOpts{ - ConfigPath: configPath, - Plan: plan, + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } // We require a local backend local, ok := b.(backend.Local) if !ok { + c.showDiagnostics(diags) // in case of any warnings in here c.Ui.Error(ErrUnsupportedLocalOp) return 1 } // Build the operation - opReq := c.Operation() - opReq.Module = mod - opReq.Plan = plan + opReq := c.Operation(b) + opReq.ConfigDir = configPath + opReq.ConfigLoader, err = c.initConfigLoader() + opReq.PlanFile = planFile + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } // Get the context - ctx, _, err := local.Context(opReq) - if err != nil { - c.Ui.Error(err.Error()) + ctx, _, ctxDiags := local.Context(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } + defer func() { + err := opReq.StateLocker.Unlock(nil) + if err != nil { + c.Ui.Error(err.Error()) + } + }() + // Determine the graph type graphType := terraform.GraphTypePlan if plan != nil { @@ -109,12 +134,13 @@ func (c *GraphCommand) Run(args []string) int { // Skip validation during graph generation - we want to see the graph even if // it is invalid for some reason. - g, err := ctx.Graph(graphType, &terraform.ContextGraphOpts{ + g, graphDiags := ctx.Graph(graphType, &terraform.ContextGraphOpts{ Verbose: verbose, Validate: false, }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error creating graph: %s", err)) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } @@ -128,6 +154,14 @@ func (c *GraphCommand) Run(args []string) int { return 1 } + if diags.HasErrors() { + // For this command we only show diagnostics if there are errors, + // because printing out naked warnings could upset a naive program + // consuming our dot output. + c.showDiagnostics(diags) + return 1 + } + c.Ui.Output(graphStr) return 0 @@ -152,14 +186,14 @@ Usage: terraform graph [options] [DIR] Options: - -draw-cycles Highlight any cycles in the graph with colored edges. - This helps when diagnosing cycle errors. - - -no-color If specified, output won't contain any color. + -draw-cycles Highlight any cycles in the graph with colored edges. + This helps when diagnosing cycle errors. - -type=plan Type of graph to output. Can be: plan, plan-destroy, apply, - validate, input, refresh. + -module-depth=n Specifies the depth of modules to show in the output. + By default this is -1, which will expand all. + -type=plan Type of graph to output. Can be: plan, plan-destroy, apply, + validate, input, refresh. ` return strings.TrimSpace(helpText) diff --git a/vendor/github.com/hashicorp/terraform/command/hook_module_install.go b/vendor/github.com/hashicorp/terraform/command/hook_module_install.go new file mode 100644 index 00000000..4afa7072 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/hook_module_install.go @@ -0,0 +1,33 @@ +package command + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/internal/initwd" + "github.com/mitchellh/cli" +) + +type uiModuleInstallHooks struct { + initwd.ModuleInstallHooksImpl + Ui cli.Ui + ShowLocalPaths bool +} + +var _ initwd.ModuleInstallHooks = uiModuleInstallHooks{} + +func (h uiModuleInstallHooks) Download(modulePath, packageAddr string, v *version.Version) { + if v != nil { + h.Ui.Info(fmt.Sprintf("Downloading %s %s for %s...", packageAddr, v, modulePath)) + } else { + h.Ui.Info(fmt.Sprintf("Downloading %s for %s...", packageAddr, modulePath)) + } +} + +func (h uiModuleInstallHooks) Install(modulePath string, v *version.Version, localDir string) { + if h.ShowLocalPaths { + h.Ui.Info(fmt.Sprintf("- %s in %s", modulePath, localDir)) + } else { + h.Ui.Info(fmt.Sprintf("- %s", modulePath)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/command/hook_ui.go b/vendor/github.com/hashicorp/terraform/command/hook_ui.go index a53edfa3..93613355 100644 --- a/vendor/github.com/hashicorp/terraform/command/hook_ui.go +++ b/vendor/github.com/hashicorp/terraform/command/hook_ui.go @@ -10,9 +10,16 @@ import ( "time" "unicode" - "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" ) const defaultPeriodicUiTimer = 10 * time.Second @@ -31,12 +38,14 @@ type UiHook struct { ui cli.Ui } +var _ terraform.Hook = (*UiHook)(nil) + // uiResourceState tracks the state of a single resource type uiResourceState struct { - Name string - ResourceId string - Op uiResourceOp - Start time.Time + DispAddr string + IDKey, IDValue string + Op uiResourceOp + Start time.Time DoneCh chan struct{} // To be used for cancellation @@ -53,35 +62,31 @@ const ( uiResourceDestroy ) -func (h *UiHook) PreApply( - n *terraform.InstanceInfo, - s *terraform.InstanceState, - d *terraform.InstanceDiff) (terraform.HookAction, error) { +func (h *UiHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (terraform.HookAction, error) { h.once.Do(h.init) - // if there's no diff, there's nothing to output - if d.Empty() { - return terraform.HookActionContinue, nil - } - - id := n.HumanId() - - op := uiResourceModify - if d.Destroy { - op = uiResourceDestroy - } else if s.ID == "" { - op = uiResourceCreate + dispAddr := addr.String() + if gen != states.CurrentGen { + dispAddr = fmt.Sprintf("%s (%s)", dispAddr, gen) } var operation string - switch op { - case uiResourceModify: - operation = "Modifying..." - case uiResourceDestroy: + var op uiResourceOp + idKey, idValue := format.ObjectValueIDOrName(priorState) + switch action { + case plans.Delete: operation = "Destroying..." - case uiResourceCreate: + op = uiResourceDestroy + case plans.Create: operation = "Creating..." - case uiResourceUnknown: + op = uiResourceCreate + case plans.Update: + operation = "Modifying..." + op = uiResourceModify + default: + // We don't expect any other actions in here, so anything else is a + // bug in the caller but we'll ignore it in order to be robust. + h.ui.Output(fmt.Sprintf("(Unknown action %s for %s)", action, dispAddr)) return terraform.HookActionContinue, nil } @@ -91,14 +96,13 @@ func (h *UiHook) PreApply( // determine the longest key so that we can align them all. keyLen := 0 - dAttrs := d.CopyAttributes() + // FIXME: This is stubbed out in preparation for rewriting it to use + // a structural presentation rather than the old-style flatmap one. + // We just assume no attributes at all for now, pending new code to + // work with the two cty.Values we are given. + dAttrs := map[string]terraform.ResourceAttrDiff{} keys := make([]string, 0, len(dAttrs)) for key, _ := range dAttrs { - // Skip the ID since we do that specially - if key == "id" { - continue - } - keys = append(keys, key) if len(key) > keyLen { keyLen = len(key) @@ -108,7 +112,7 @@ func (h *UiHook) PreApply( // Go through and output each attribute for _, attrK := range keys { - attrDiff, _ := d.GetAttribute(attrK) + attrDiff := dAttrs[attrK] v := attrDiff.New u := attrDiff.Old @@ -134,30 +138,37 @@ func (h *UiHook) PreApply( attrString = "\n " + attrString } - var stateId, stateIdSuffix string - if s != nil && s.ID != "" { - stateId = s.ID - stateIdSuffix = fmt.Sprintf(" (ID: %s)", truncateId(s.ID, maxIdLen)) + var stateIdSuffix string + if idKey != "" && idValue != "" { + stateIdSuffix = fmt.Sprintf(" [%s=%s]", idKey, idValue) + } else { + // Make sure they are both empty so we can deal with this more + // easily in the other hook methods. + idKey = "" + idValue = "" } h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][bold]%s: %s%s[reset]%s", - id, + dispAddr, operation, stateIdSuffix, - attrString))) + attrString, + ))) + key := addr.String() uiState := uiResourceState{ - Name: id, - ResourceId: stateId, - Op: op, - Start: time.Now().Round(time.Second), - DoneCh: make(chan struct{}), - done: make(chan struct{}), + DispAddr: key, + IDKey: idKey, + IDValue: idValue, + Op: op, + Start: time.Now().Round(time.Second), + DoneCh: make(chan struct{}), + done: make(chan struct{}), } h.l.Lock() - h.resources[id] = uiState + h.resources[key] = uiState h.l.Unlock() // Start goroutine that shows progress @@ -190,13 +201,13 @@ func (h *UiHook) stillApplying(state uiResourceState) { } idSuffix := "" - if v := state.ResourceId; v != "" { - idSuffix = fmt.Sprintf("ID: %s, ", truncateId(v, maxIdLen)) + if state.IDKey != "" { + idSuffix = fmt.Sprintf("%s=%s, ", state.IDKey, truncateId(state.IDValue, maxIdLen)) } h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: %s (%s%s elapsed)[reset]", - state.Name, + "[reset][bold]%s: %s [%s%s elapsed][reset]", + state.DispAddr, msg, idSuffix, time.Now().Round(time.Second).Sub(state.Start), @@ -204,12 +215,9 @@ func (h *UiHook) stillApplying(state uiResourceState) { } } -func (h *UiHook) PostApply( - n *terraform.InstanceInfo, - s *terraform.InstanceState, - applyerr error) (terraform.HookAction, error) { +func (h *UiHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, applyerr error) (terraform.HookAction, error) { - id := n.HumanId() + id := addr.String() h.l.Lock() state := h.resources[id] @@ -221,8 +229,8 @@ func (h *UiHook) PostApply( h.l.Unlock() var stateIdSuffix string - if s != nil && s.ID != "" { - stateIdSuffix = fmt.Sprintf(" (ID: %s)", truncateId(s.ID, maxIdLen)) + if k, v := format.ObjectValueID(newState); k != "" && v != "" { + stateIdSuffix = fmt.Sprintf(" [%s=%s]", k, v) } var msg string @@ -243,39 +251,31 @@ func (h *UiHook) PostApply( } colorized := h.Colorize.Color(fmt.Sprintf( - "[reset][bold]%s: %s%s[reset]", - id, msg, stateIdSuffix)) + "[reset][bold]%s: %s after %s%s[reset]", + addr, msg, time.Now().Round(time.Second).Sub(state.Start), stateIdSuffix)) h.ui.Output(colorized) return terraform.HookActionContinue, nil } -func (h *UiHook) PreDiff( - n *terraform.InstanceInfo, - s *terraform.InstanceState) (terraform.HookAction, error) { +func (h *UiHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (terraform.HookAction, error) { return terraform.HookActionContinue, nil } -func (h *UiHook) PreProvision( - n *terraform.InstanceInfo, - provId string) (terraform.HookAction, error) { - id := n.HumanId() +func (h *UiHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (terraform.HookAction, error) { h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][bold]%s: Provisioning with '%s'...[reset]", - id, provId))) + addr, typeName, + ))) return terraform.HookActionContinue, nil } -func (h *UiHook) ProvisionOutput( - n *terraform.InstanceInfo, - provId string, - msg string) { - id := n.HumanId() +func (h *UiHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, msg string) { var buf bytes.Buffer buf.WriteString(h.Colorize.Color("[reset]")) - prefix := fmt.Sprintf("%s (%s): ", id, provId) + prefix := fmt.Sprintf("%s (%s): ", addr, typeName) s := bufio.NewScanner(strings.NewReader(msg)) s.Split(scanLines) for s.Scan() { @@ -288,49 +288,40 @@ func (h *UiHook) ProvisionOutput( h.ui.Output(strings.TrimSpace(buf.String())) } -func (h *UiHook) PreRefresh( - n *terraform.InstanceInfo, - s *terraform.InstanceState) (terraform.HookAction, error) { +func (h *UiHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (terraform.HookAction, error) { h.once.Do(h.init) - id := n.HumanId() - var stateIdSuffix string - // Data resources refresh before they have ids, whereas managed - // resources are only refreshed when they have ids. - if s.ID != "" { - stateIdSuffix = fmt.Sprintf(" (ID: %s)", truncateId(s.ID, maxIdLen)) + if k, v := format.ObjectValueID(priorState); k != "" && v != "" { + stateIdSuffix = fmt.Sprintf(" [%s=%s]", k, v) } h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][bold]%s: Refreshing state...%s", - id, stateIdSuffix))) + addr, stateIdSuffix))) return terraform.HookActionContinue, nil } -func (h *UiHook) PreImportState( - n *terraform.InstanceInfo, - id string) (terraform.HookAction, error) { +func (h *UiHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (terraform.HookAction, error) { h.once.Do(h.init) h.ui.Output(h.Colorize.Color(fmt.Sprintf( "[reset][bold]%s: Importing from ID %q...", - n.HumanId(), id))) + addr, importID, + ))) return terraform.HookActionContinue, nil } -func (h *UiHook) PostImportState( - n *terraform.InstanceInfo, - s []*terraform.InstanceState) (terraform.HookAction, error) { +func (h *UiHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (terraform.HookAction, error) { h.once.Do(h.init) - id := n.HumanId() h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][bold][green]%s: Import complete!", id))) - for _, s := range s { + "[reset][bold][green]%s: Import prepared!", addr))) + for _, s := range imported { h.ui.Output(h.Colorize.Color(fmt.Sprintf( - "[reset][green] Imported %s (ID: %s)", - s.Ephemeral.Type, s.ID))) + "[reset][green] Prepared %s for import", + s.TypeName, + ))) } return terraform.HookActionContinue, nil diff --git a/vendor/github.com/hashicorp/terraform/command/import.go b/vendor/github.com/hashicorp/terraform/command/import.go index 1636ab9d..b566ee9f 100644 --- a/vendor/github.com/hashicorp/terraform/command/import.go +++ b/vendor/github.com/hashicorp/terraform/command/import.go @@ -1,14 +1,20 @@ package command import ( + "errors" "fmt" "log" "os" "strings" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // ImportCommand is a cli.Command implementation that imports resources @@ -26,17 +32,21 @@ func (c *ImportCommand) Run(args []string) int { } var configPath string - args = c.Meta.process(args, true) + args, err = c.Meta.process(args, true) + if err != nil { + return 1 + } - cmdFlags := c.Meta.flagSet("import") - cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", 0, "parallelism") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags := c.Meta.extendedFlagSet("import") + cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") + cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") cmdFlags.StringVar(&configPath, "config", pwd, "path") cmdFlags.StringVar(&c.Meta.provider, "provider", "", "provider") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.BoolVar(&c.Meta.allowMissingConfig, "allow-missing-config", false, "allow missing config") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 @@ -49,25 +59,149 @@ func (c *ImportCommand) Run(args []string) int { return 1 } - // Load the module - var mod *module.Tree - if configPath != "" { - var err error - mod, err = c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + var diags tfdiags.Diagnostics + + // Parse the provided resource address. + traversalSrc := []byte(args[0]) + traversal, travDiags := hclsyntax.ParseTraversalAbs(traversalSrc, "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + c.registerSynthConfigSource("", traversalSrc) // so we can include a source snippet + c.showDiagnostics(diags) + c.Ui.Info(importCommandInvalidAddressReference) + return 1 + } + addr, addrDiags := addrs.ParseAbsResourceInstance(traversal) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + c.registerSynthConfigSource("", traversalSrc) // so we can include a source snippet + c.showDiagnostics(diags) + c.Ui.Info(importCommandInvalidAddressReference) + return 1 + } + + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + diags = diags.Append(errors.New("A managed resource address is required. Importing into a data resource is not allowed.")) + c.showDiagnostics(diags) + return 1 + } + + if !c.dirIsConfigPath(configPath) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "No Terraform configuration files", + Detail: fmt.Sprintf( + "The directory %s does not contain any Terraform configuration files (.tf or .tf.json). To specify a different configuration directory, use the -config=\"...\" command line option.", + configPath, + ), + }) + c.showDiagnostics(diags) + return 1 + } + + // Load the full config, so we can verify that the target resource is + // already configured. + config, configDiags := c.loadConfig(configPath) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Verify that the given address points to something that exists in config. + // This is to reduce the risk that a typo in the resource address will + // import something that Terraform will want to immediately destroy on + // the next plan, and generally acts as a reassurance of user intent. + targetConfig := config.DescendentForInstance(addr.Module) + if targetConfig == nil { + modulePath := addr.Module.String() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Import to non-existent module", + Detail: fmt.Sprintf( + "%s is not defined in the configuration. Please add configuration for this module before importing into it.", + modulePath, + ), + }) + c.showDiagnostics(diags) + return 1 + } + targetMod := targetConfig.Module + rcs := targetMod.ManagedResources + var rc *configs.Resource + resourceRelAddr := addr.Resource.Resource + for _, thisRc := range rcs { + if resourceRelAddr.Type == thisRc.Type && resourceRelAddr.Name == thisRc.Name { + rc = thisRc + break + } + } + if !c.Meta.allowMissingConfig && rc == nil { + modulePath := addr.Module.String() + if modulePath == "" { + modulePath = "the root module" + } + + c.showDiagnostics(diags) + + // This is not a diagnostic because currently our diagnostics printer + // doesn't support having a code example in the detail, and there's + // a code example in this message. + // TODO: Improve the diagnostics printer so we can use it for this + // message. + c.Ui.Error(fmt.Sprintf( + importCommandMissingResourceFmt, + addr, modulePath, resourceRelAddr.Type, resourceRelAddr.Name, + )) + return 1 + } + + // Also parse the user-provided provider address, if any. + var providerAddr addrs.AbsProviderConfig + if c.Meta.provider != "" { + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(c.Meta.provider), `-provider=...`, hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + c.showDiagnostics(diags) + c.Ui.Info(importCommandInvalidAddressReference) return 1 } + relAddr, addrDiags := addrs.ParseProviderConfigCompact(traversal) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + providerAddr = relAddr.Absolute(addrs.RootModuleInstance) + } else { + // Use a default address inferred from the resource type. + // We assume the same module as the resource address here, which + // may get resolved to an inherited provider when we construct the + // import graph inside ctx.Import, called below. + providerAddr = resourceRelAddr.DefaultProviderConfig().Absolute(addr.Module) + } + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 } // Load the backend - b, err := c.Backend(&BackendOpts{ConfigPath: configPath}) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(&BackendOpts{ + Config: config.Module.Backend, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } - // We require a local backend + // We require a backend.Local to build a context. + // This isn't necessarily a "local.Local" backend, which provides local + // operations, however that is the only current implementation. A + // "local.Local" backend also doesn't necessarily provide local state, as + // that may be delegated to a "remotestate.Backend". local, ok := b.(backend.Local) if !ok { c.Ui.Error(ErrUnsupportedLocalOp) @@ -75,30 +209,55 @@ func (c *ImportCommand) Run(args []string) int { } // Build the operation - opReq := c.Operation() - opReq.Module = mod + opReq := c.Operation(b) + opReq.ConfigDir = configPath + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + { + var moreDiags tfdiags.Diagnostics + opReq.Variables, moreDiags = c.collectVariableValues() + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } // Get the context - ctx, state, err := local.Context(opReq) - if err != nil { - c.Ui.Error(err.Error()) + ctx, state, ctxDiags := local.Context(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } + // Make sure to unlock the state + defer func() { + err := opReq.StateLocker.Unlock(nil) + if err != nil { + c.Ui.Error(err.Error()) + } + }() + // Perform the import. Note that as you can see it is possible for this // API to import more than one resource at once. For now, we only allow // one while we stabilize this feature. - newState, err := ctx.Import(&terraform.ImportOpts{ + newState, importDiags := ctx.Import(&terraform.ImportOpts{ Targets: []*terraform.ImportTarget{ &terraform.ImportTarget{ - Addr: args[0], - ID: args[1], - Provider: c.Meta.provider, + Addr: addr, + ID: args[1], + ProviderAddr: providerAddr, }, }, }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error importing: %s", err)) + diags = diags.Append(importDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } @@ -113,13 +272,16 @@ func (c *ImportCommand) Run(args []string) int { return 1 } - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][green]\n" + - "Import success! The resources imported are shown above. These are\n" + - "now in your Terraform state. Import does not currently generate\n" + - "configuration, so you must do this next. If you do not create configuration\n" + - "for the above resources, then the next `terraform plan` will mark\n" + - "them for destruction."))) + c.Ui.Output(c.Colorize().Color("[reset][green]\n" + importCommandSuccessMsg)) + + if c.Meta.allowMissingConfig && rc == nil { + c.Ui.Output(c.Colorize().Color("[reset][yellow]\n" + importCommandAllowMissingResourceMsg)) + } + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } return 0 } @@ -141,11 +303,13 @@ Usage: terraform import [options] ADDR ID determine the ID syntax to use. It typically matches directly to the ID that the provider uses. - In the current state of Terraform import, the resource is only imported - into your state file. Once it is imported, you must manually write - configuration for the new resource or Terraform will mark it for destruction. - Future versions of Terraform will expand the functionality of Terraform - import. + The current implementation of Terraform import can only import resources + into the state. It does not generate configuration. A future version of + Terraform will also generate configuration. + + Because of this, prior to running terraform import it is necessary to write + a resource configuration block for the resource manually, to which the + imported object will be attached. This command will not modify your infrastructure, but it will make network requests to inspect parts of your infrastructure relevant to @@ -153,40 +317,43 @@ Usage: terraform import [options] ADDR ID Options: - -backup=path Path to backup the existing state file before - modifying. Defaults to the "-state-out" path with - ".backup" extension. Set to "-" to disable backup. + -backup=path Path to backup the existing state file before + modifying. Defaults to the "-state-out" path with + ".backup" extension. Set to "-" to disable backup. + + -config=path Path to a directory of Terraform configuration files + to use to configure the provider. Defaults to pwd. + If no config files are present, they must be provided + via the input prompts or env vars. - -config=path Path to a directory of Terraform configuration files - to use to configure the provider. Defaults to pwd. - If no config files are present, they must be provided - via the input prompts or env vars. + -allow-missing-config Allow import when no resource configuration block exists. - -input=true Ask for input for variables if not directly set. + -input=true Ask for input for variables if not directly set. - -lock=true Lock the state file when locking is supported. + -lock=true Lock the state file when locking is supported. - -lock-timeout=0s Duration to retry a state lock. + -lock-timeout=0s Duration to retry a state lock. - -no-color If specified, output won't contain any color. + -no-color If specified, output won't contain any color. - -provider=provider Specific provider to use for import. This is used for - specifying aliases, such as "aws.eu". Defaults to the - normal provider prefix of the resource being imported. + -provider=provider Specific provider to use for import. This is used for + specifying aliases, such as "aws.eu". Defaults to the + normal provider prefix of the resource being imported. - -state=path Path to read and save state (unless state-out - is specified). Defaults to "terraform.tfstate". + -state=PATH Path to the source state file. Defaults to the configured + backend, or "terraform.tfstate" - -state-out=path Path to write updated state file. By default, the - "-state" path will be used. + -state-out=PATH Path to the destination state file to write to. If this + isn't specified, the source state file will be used. This + can be a new or existing path. - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. This is only useful - with the "-config" flag. + -var 'foo=bar' Set a variable in the Terraform configuration. This + flag can be set multiple times. This is only useful + with the "-config" flag. - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be - automatically loaded if this flag is not specified. + -var-file=foo Set variables in the Terraform configuration from + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. ` @@ -196,3 +363,30 @@ Options: func (c *ImportCommand) Synopsis() string { return "Import existing infrastructure into Terraform" } + +const importCommandInvalidAddressReference = `For information on valid syntax, see: +https://www.terraform.io/docs/internals/resource-addressing.html` + +const importCommandMissingResourceFmt = `[reset][bold][red]Error:[reset][bold] resource address %q does not exist in the configuration.[reset] + +Before importing this resource, please create its configuration in %s. For example: + +resource %q %q { + # (resource arguments) +} +` + +const importCommandSuccessMsg = `Import successful! + +The resources that were imported are shown above. These resources are now in +your Terraform state and will henceforth be managed by Terraform. +` + +const importCommandAllowMissingResourceMsg = `Import does not generate resource configuration, you must create a resource +configuration block that matches the current or desired state manually. + +If there is no matching resource configuration block for the imported +resource, Terraform will delete the resource on the next "terraform apply". +It is recommended that you run "terraform plan" to verify that the +configuration is correct and complete. +` diff --git a/vendor/github.com/hashicorp/terraform/command/init.go b/vendor/github.com/hashicorp/terraform/command/init.go index ff0aadc9..c4e9554d 100644 --- a/vendor/github.com/hashicorp/terraform/command/init.go +++ b/vendor/github.com/hashicorp/terraform/command/init.go @@ -2,49 +2,108 @@ package command import ( "fmt" + "log" "os" - "path/filepath" + "sort" "strings" - "github.com/hashicorp/go-getter" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/posener/complete" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/backend" + backendInit "github.com/hashicorp/terraform/backend/init" "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/variables" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/configs/configupgrade" + "github.com/hashicorp/terraform/internal/earlyconfig" + "github.com/hashicorp/terraform/internal/initwd" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // InitCommand is a Command implementation that takes a Terraform // module and clones it to the working directory. type InitCommand struct { Meta + + // getPlugins is for the -get-plugins flag + getPlugins bool + + // providerInstaller is used to download and install providers that + // aren't found locally. This uses a discovery.ProviderInstaller instance + // by default, but it can be overridden here as a way to mock fetching + // providers for tests. + providerInstaller discovery.Installer } func (c *InitCommand) Run(args []string) int { - var flagBackend, flagGet bool - var flagConfigExtra map[string]interface{} + var flagFromModule string + var flagBackend, flagGet, flagUpgrade bool + var flagPluginPath FlagStringSlice + var flagVerifyPlugins bool + flagConfigExtra := newRawFlags("-backend-config") + + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - args = c.Meta.process(args, false) - cmdFlags := c.flagSet("init") + cmdFlags := c.Meta.extendedFlagSet("init") cmdFlags.BoolVar(&flagBackend, "backend", true, "") - cmdFlags.Var((*variables.FlagAny)(&flagConfigExtra), "backend-config", "") + cmdFlags.Var(flagConfigExtra, "backend-config", "") + cmdFlags.StringVar(&flagFromModule, "from-module", "", "copy the source of the given module into the directory before init") cmdFlags.BoolVar(&flagGet, "get", true, "") + cmdFlags.BoolVar(&c.getPlugins, "get-plugins", true, "") cmdFlags.BoolVar(&c.forceInitCopy, "force-copy", false, "suppress prompts about copying state data") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") cmdFlags.BoolVar(&c.reconfigure, "reconfigure", false, "reconfigure") - + cmdFlags.BoolVar(&flagUpgrade, "upgrade", false, "") + cmdFlags.Var(&flagPluginPath, "plugin-dir", "plugin directory") + cmdFlags.BoolVar(&flagVerifyPlugins, "verify-plugins", true, "verify plugins") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 } + var diags tfdiags.Diagnostics + + if len(flagPluginPath) > 0 { + c.pluginPath = flagPluginPath + c.getPlugins = false + } + + // set providerInstaller if we don't have a test version already + if c.providerInstaller == nil { + c.providerInstaller = &discovery.ProviderInstaller{ + Dir: c.pluginDir(), + Cache: c.pluginCache(), + PluginProtocolVersion: discovery.PluginInstallProtocolVersion, + SkipVerify: !flagVerifyPlugins, + Ui: c.Ui, + Services: c.Services, + } + } + // Validate the arg count args = cmdFlags.Args() - if len(args) > 2 { - c.Ui.Error("The init command expects at most two arguments.\n") + if len(args) > 1 { + c.Ui.Error("The init command expects at most one argument.\n") cmdFlags.Usage() return 1 } + if err := c.storePluginPath(c.pluginPath); err != nil { + c.Ui.Error(fmt.Sprintf("Error saving -plugin-path values: %s", err)) + return 1 + } + // Get our pwd. We don't always need it but always getting it is easier // than the logic to determine if it is or isn't needed. pwd, err := os.Getwd() @@ -53,143 +112,638 @@ func (c *InitCommand) Run(args []string) int { return 1 } - // Get the path and source module to copy - var path string - var source string - switch len(args) { - case 0: - path = pwd - case 1: - path = pwd - source = args[0] - case 2: - source = args[0] - path = args[1] - default: - panic("assertion failed on arg count") + // If an argument is provided then it overrides our working directory. + path := pwd + if len(args) == 1 { + path = args[0] } - // Set the state out path to be the path requested for the module - // to be copied. This ensures any remote states gets setup in the - // proper directory. - c.Meta.dataDir = filepath.Join(path, DefaultDataDir) - // This will track whether we outputted anything so that we know whether // to output a newline before the success message var header bool - // If we have a source, copy it - if source != "" { - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold]"+ - "Initializing configuration from: %q...", source))) - if err := c.copySource(path, source, pwd); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error copying source: %s", err)) + if flagFromModule != "" { + src := flagFromModule + + empty, err := config.IsEmptyDir(path) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error validating destination directory: %s", err)) + return 1 + } + if !empty { + c.Ui.Error(strings.TrimSpace(errInitCopyNotEmpty)) return 1 } + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + "[reset][bold]Copying configuration[reset] from %q...", src, + ))) header = true + + hooks := uiModuleInstallHooks{ + Ui: c.Ui, + ShowLocalPaths: false, // since they are in a weird location for init + } + + initDiags := c.initDirFromModule(path, src, hooks) + diags = diags.Append(initDiags) + if initDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + c.Ui.Output("") } // If our directory is empty, then we're done. We can't get or setup // the backend with an empty directory. - if empty, err := config.IsEmptyDir(path); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error checking configuration: %s", err)) + empty, err := config.IsEmptyDir(path) + if err != nil { + diags = diags.Append(fmt.Errorf("Error checking configuration: %s", err)) return 1 - } else if empty { + } + if empty { c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitEmpty))) return 0 } - // If we're performing a get or loading the backend, then we perform - // some extra tasks. - if flagGet || flagBackend { - // Load the configuration in this directory so that we can know - // if we have anything to get or any backend to configure. We do - // this to improve the UX. Practically, we could call the functions - // below without checking this to the same effect. - conf, err := config.LoadDir(path) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error loading configuration: %s", err)) + // Before we do anything else, we'll try loading configuration with both + // our "normal" and "early" configuration codepaths. If early succeeds + // while normal fails, that strongly suggests that the configuration is + // using syntax that worked in 0.11 but no longer in 0.12, which requires + // some special behavior here to get the directory initialized just enough + // to run "terraform 0.12upgrade". + // + // FIXME: Once we reach 0.13 and remove 0.12upgrade, we should rework this + // so that we first use the early config to do a general compatibility + // check with dependencies, producing version-oriented error messages if + // dependencies aren't right, and only then use the real loader to deal + // with the backend configuration. + rootMod, confDiags := c.loadSingleModule(path) + rootModEarly, earlyConfDiags := c.loadSingleModuleEarly(path) + configUpgradeProbablyNeeded := false + if confDiags.HasErrors() { + if earlyConfDiags.HasErrors() { + // If both parsers produced errors then we'll assume the config + // is _truly_ invalid and produce error messages as normal. + // Since this may be the user's first ever interaction with Terraform, + // we'll provide some additional context in this case. + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + diags = diags.Append(confDiags) + c.showDiagnostics(diags) return 1 } - // If we requested downloading modules and have modules in the config - if flagGet && len(conf.Modules) > 0 { - header = true - - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold]" + - "Downloading modules (if any)..."))) - if err := getModules(&c.Meta, path, module.GetModeGet); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error downloading modules: %s", err)) + // If _only_ the main loader produced errors then that suggests an + // upgrade may help. To give us more certainty here, we'll use the + // same heuristic that "terraform 0.12upgrade" uses to guess if a + // configuration has already been upgraded, to reduce the risk that + // we'll produce a misleading message if the problem is just a regular + // syntax error that the early loader just didn't catch. + sources, err := configupgrade.LoadModule(path) + if err == nil { + if already, _ := sources.MaybeAlreadyUpgraded(); already { + // Just report the errors as normal, then. + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + diags = diags.Append(confDiags) + c.showDiagnostics(diags) return 1 } } + configUpgradeProbablyNeeded = true + } + if earlyConfDiags.HasErrors() { + // If _only_ the early loader encountered errors then that's unusual + // (it should generally be a superset of the normal loader) but we'll + // return those errors anyway since otherwise we'll probably get + // some weird behavior downstream. Errors from the early loader are + // generally not as high-quality since it has less context to work with. + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + diags = diags.Append(earlyConfDiags) + c.showDiagnostics(diags) + return 1 + } - // If we're requesting backend configuration and configure it - if flagBackend { + if flagGet { + modsOutput, modsDiags := c.getModules(path, rootModEarly, flagUpgrade) + diags = diags.Append(modsDiags) + if modsDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + if modsOutput { header = true + } + } - // Only output that we're initializing a backend if we have - // something in the config. We can be UNSETTING a backend as well - // in which case we choose not to show this. - if conf.Terraform != nil && conf.Terraform.Backend != nil { - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold]" + - "Initializing the backend..."))) - } + // With all of the modules (hopefully) installed, we can now try to load + // the whole configuration tree. + // + // Just as above, we'll try loading both with the early and normal config + // loaders here. Subsequent work will only use the early config, but + // loading both gives us an opportunity to prefer the better error messages + // from the normal loader if both fail. + _, confDiags = c.loadConfig(path) + earlyConfig, earlyConfDiags := c.loadConfigEarly(path) + if confDiags.HasErrors() && !configUpgradeProbablyNeeded { + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + diags = diags.Append(confDiags) + c.showDiagnostics(diags) + return 1 + } + if earlyConfDiags.HasErrors() { + c.Ui.Error(strings.TrimSpace(errInitConfigError)) + diags = diags.Append(earlyConfDiags) + c.showDiagnostics(diags) + return 1 + } - opts := &BackendOpts{ - ConfigPath: path, - ConfigExtra: flagConfigExtra, - Init: true, - } - if _, err := c.Backend(opts); err != nil { - c.Ui.Error(err.Error()) + { + // Before we go further, we'll check to make sure none of the modules + // in the configuration declare that they don't support this Terraform + // version, so we can produce a version-related error message rather + // than potentially-confusing downstream errors. + versionDiags := initwd.CheckCoreVersionRequirements(earlyConfig) + diags = diags.Append(versionDiags) + if versionDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } + + var back backend.Backend + if flagBackend { + switch { + case configUpgradeProbablyNeeded: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Skipping backend initialization pending configuration upgrade", + // The "below" in this message is referring to the special + // note about running "terraform 0.12upgrade" that we'll + // print out at the end when configUpgradeProbablyNeeded is set. + "The root module configuration contains errors that may be fixed by running the configuration upgrade tool, so Terraform is skipping backend initialization. See below for more information.", + )) + default: + be, backendOutput, backendDiags := c.initBackend(rootMod, flagConfigExtra) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } + if backendOutput { + header = true + } + back = be } } + if back == nil { + // If we didn't initialize a backend then we'll try to at least + // instantiate one. This might fail if it wasn't already initialized + // by a previous run, so we must still expect that "back" may be nil + // in code that follows. + var backDiags tfdiags.Diagnostics + back, backDiags = c.Backend(nil) + if backDiags.HasErrors() { + // This is fine. We'll proceed with no backend, then. + back = nil + } + } + + var state *states.State + + // If we have a functional backend (either just initialized or initialized + // on a previous run) we'll use the current state as a potential source + // of provider dependencies. + if back != nil { + sMgr, err := back.StateMgr(c.Workspace()) + if err != nil { + c.Ui.Error(fmt.Sprintf("Error loading state: %s", err)) + return 1 + } + + if err := sMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Error refreshing state: %s", err)) + return 1 + } + + state = sMgr.State() + } + + if v := os.Getenv(ProviderSkipVerifyEnvVar); v != "" { + c.ignorePluginChecksum = true + } + + // Now that we have loaded all modules, check the module tree for missing providers. + providersOutput, providerDiags := c.getProviders(earlyConfig, state, flagUpgrade) + diags = diags.Append(providerDiags) + if providerDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + if providersOutput { + header = true + } + // If we outputted information, then we need to output a newline // so that our success message is nicely spaced out from prior text. if header { c.Ui.Output("") } + // If we accumulated any warnings along the way that weren't accompanied + // by errors then we'll output them here so that the success message is + // still the final thing shown. + c.showDiagnostics(diags) + + if configUpgradeProbablyNeeded { + switch { + case c.RunningInAutomation: + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccessConfigUpgrade))) + default: + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccessConfigUpgradeCLI))) + } + return 0 + } c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccess))) + if !c.RunningInAutomation { + // If we're not running in an automation wrapper, give the user + // some more detailed next steps that are appropriate for interactive + // shell usage. + c.Ui.Output(c.Colorize().Color(strings.TrimSpace(outputInitSuccessCLI))) + } return 0 } -func (c *InitCommand) copySource(dst, src, pwd string) error { - // Verify the directory is empty - if empty, err := config.IsEmptyDir(dst); err != nil { - return fmt.Errorf("Error checking on destination path: %s", err) - } else if !empty { - return fmt.Errorf(strings.TrimSpace(errInitCopyNotEmpty)) +func (c *InitCommand) getModules(path string, earlyRoot *tfconfig.Module, upgrade bool) (output bool, diags tfdiags.Diagnostics) { + if len(earlyRoot.ModuleCalls) == 0 { + // Nothing to do + return false, nil + } + + if upgrade { + c.Ui.Output(c.Colorize().Color(fmt.Sprintf("[reset][bold]Upgrading modules..."))) + } else { + c.Ui.Output(c.Colorize().Color(fmt.Sprintf("[reset][bold]Initializing modules..."))) + } + + hooks := uiModuleInstallHooks{ + Ui: c.Ui, + ShowLocalPaths: true, + } + instDiags := c.installModules(path, upgrade, hooks) + diags = diags.Append(instDiags) + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if c.configLoader != nil { + if err := c.configLoader.RefreshModules(); err != nil { + // Should never happen + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read module manifest", + fmt.Sprintf("After installing modules, Terraform could not re-read the manifest of installed modules. This is a bug in Terraform. %s.", err), + )) + } + } + + return true, diags +} + +func (c *InitCommand) initBackend(root *configs.Module, extraConfig rawFlags) (be backend.Backend, output bool, diags tfdiags.Diagnostics) { + c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[reset][bold]Initializing the backend..."))) + + var backendConfig *configs.Backend + var backendConfigOverride hcl.Body + if root.Backend != nil { + backendType := root.Backend.Type + bf := backendInit.Backend(backendType) + if bf == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported backend type", + Detail: fmt.Sprintf("There is no backend type named %q.", backendType), + Subject: &root.Backend.TypeRange, + }) + return nil, true, diags + } + + b := bf() + backendSchema := b.ConfigSchema() + backendConfig = root.Backend + + var overrideDiags tfdiags.Diagnostics + backendConfigOverride, overrideDiags = c.backendConfigOverrideBody(extraConfig, backendSchema) + diags = diags.Append(overrideDiags) + if overrideDiags.HasErrors() { + return nil, true, diags + } + } + + opts := &BackendOpts{ + Config: backendConfig, + ConfigOverride: backendConfigOverride, + Init: true, + } + + back, backDiags := c.Backend(opts) + diags = diags.Append(backDiags) + return back, true, diags +} + +// Load the complete module tree, and fetch any missing providers. +// This method outputs its own Ui. +func (c *InitCommand) getProviders(earlyConfig *earlyconfig.Config, state *states.State, upgrade bool) (output bool, diags tfdiags.Diagnostics) { + var available discovery.PluginMetaSet + if upgrade { + // If we're in upgrade mode, we ignore any auto-installed plugins + // in "available", causing us to reinstall and possibly upgrade them. + available = c.providerPluginManuallyInstalledSet() + } else { + available = c.providerPluginSet() + } + + configDeps, depsDiags := earlyConfig.ProviderDependencies() + diags = diags.Append(depsDiags) + if depsDiags.HasErrors() { + return false, diags + } + + configReqs := configDeps.AllPluginRequirements() + // FIXME: This is weird because ConfigTreeDependencies was written before + // we switched over to using earlyConfig as the main source of dependencies. + // In future we should clean this up to be a more reasoable API. + stateReqs := terraform.ConfigTreeDependencies(nil, state).AllPluginRequirements() + + requirements := configReqs.Merge(stateReqs) + if len(requirements) == 0 { + // nothing to initialize + return false, nil + } + + c.Ui.Output(c.Colorize().Color( + "\n[reset][bold]Initializing provider plugins...", + )) + + missing := c.missingPlugins(available, requirements) + + if c.getPlugins { + if len(missing) > 0 { + c.Ui.Output("- Checking for available provider plugins...") + } + + for provider, reqd := range missing { + _, providerDiags, err := c.providerInstaller.Get(provider, reqd.Versions) + diags = diags.Append(providerDiags) + + if err != nil { + constraint := reqd.Versions.String() + if constraint == "" { + constraint = "(any version)" + } + + switch { + case err == discovery.ErrorServiceUnreachable, err == discovery.ErrorPublicRegistryUnreachable: + c.Ui.Error(errDiscoveryServiceUnreachable) + case err == discovery.ErrorNoSuchProvider: + c.Ui.Error(fmt.Sprintf(errProviderNotFound, provider, DefaultPluginVendorDir)) + case err == discovery.ErrorNoSuitableVersion: + if reqd.Versions.Unconstrained() { + // This should never happen, but might crop up if we catch + // the releases server in a weird state where the provider's + // directory is present but does not yet contain any + // versions. We'll treat it like ErrorNoSuchProvider, then. + c.Ui.Error(fmt.Sprintf(errProviderNotFound, provider, DefaultPluginVendorDir)) + } else { + c.Ui.Error(fmt.Sprintf(errProviderVersionsUnsuitable, provider, reqd.Versions)) + } + case errwrap.Contains(err, discovery.ErrorVersionIncompatible.Error()): + // Attempt to fetch nested error to display to the user which versions + // we considered and which versions might be compatible. Otherwise, + // we'll just display a generic version incompatible msg + incompatErr := errwrap.GetType(err, fmt.Errorf("")) + if incompatErr != nil { + c.Ui.Error(incompatErr.Error()) + } else { + // Generic version incompatible msg + c.Ui.Error(fmt.Sprintf(errProviderIncompatible, provider, constraint)) + } + // Reset nested errors + err = discovery.ErrorVersionIncompatible + case err == discovery.ErrorNoVersionCompatible: + // Generic version incompatible msg + c.Ui.Error(fmt.Sprintf(errProviderIncompatible, provider, constraint)) + case err == discovery.ErrorSignatureVerification: + c.Ui.Error(fmt.Sprintf(errSignatureVerification, provider)) + case err == discovery.ErrorChecksumVerification, + err == discovery.ErrorMissingChecksumVerification: + c.Ui.Error(fmt.Sprintf(errChecksumVerification, provider)) + default: + c.Ui.Error(fmt.Sprintf(errProviderInstallError, provider, err.Error(), DefaultPluginVendorDir)) + } + + diags = diags.Append(err) + } + } + + if diags.HasErrors() { + return true, diags + } + } else if len(missing) > 0 { + // we have missing providers, but aren't going to try and download them + var lines []string + for provider, reqd := range missing { + if reqd.Versions.Unconstrained() { + lines = append(lines, fmt.Sprintf("* %s (any version)\n", provider)) + } else { + lines = append(lines, fmt.Sprintf("* %s (%s)\n", provider, reqd.Versions)) + } + diags = diags.Append(fmt.Errorf("missing provider %q", provider)) + } + sort.Strings(lines) + c.Ui.Error(fmt.Sprintf(errMissingProvidersNoInstall, strings.Join(lines, ""), DefaultPluginVendorDir)) + return true, diags } - // Detect - source, err := getter.Detect(src, pwd, getter.Detectors) + // With all the providers downloaded, we'll generate our lock file + // that ensures the provider binaries remain unchanged until we init + // again. If anything changes, other commands that use providers will + // fail with an error instructing the user to re-run this command. + available = c.providerPluginSet() // re-discover to see newly-installed plugins + + // internal providers were already filtered out, since we don't need to get them. + chosen := choosePlugins(available, nil, requirements) + + digests := map[string][]byte{} + for name, meta := range chosen { + digest, err := meta.SHA256() + if err != nil { + diags = diags.Append(fmt.Errorf("Failed to read provider plugin %s: %s", meta.Path, err)) + return true, diags + } + digests[name] = digest + if c.ignorePluginChecksum { + digests[name] = nil + } + } + err := c.providerPluginsLock().Write(digests) if err != nil { - return fmt.Errorf("Error with module source: %s", err) + diags = diags.Append(fmt.Errorf("failed to save provider manifest: %s", err)) + return true, diags + } + + { + // Purge any auto-installed plugins that aren't being used. + purged, err := c.providerInstaller.PurgeUnused(chosen) + if err != nil { + // Failure to purge old plugins is not a fatal error + c.Ui.Warn(fmt.Sprintf("failed to purge unused plugins: %s", err)) + } + if purged != nil { + for meta := range purged { + log.Printf("[DEBUG] Purged unused %s plugin %s", meta.Name, meta.Path) + } + } + } + + // If any providers have "floating" versions (completely unconstrained) + // we'll suggest the user constrain with a pessimistic constraint to + // avoid implicitly adopting a later major release. + constraintSuggestions := make(map[string]discovery.ConstraintStr) + for name, meta := range chosen { + req := requirements[name] + if req == nil { + // should never happen, but we don't want to crash here, so we'll + // be cautious. + continue + } + + if req.Versions.Unconstrained() && meta.Version != discovery.VersionZero { + // meta.Version.MustParse is safe here because our "chosen" metas + // were already filtered for validity of versions. + constraintSuggestions[name] = meta.Version.MustParse().MinorUpgradeConstraintStr() + } + } + if len(constraintSuggestions) != 0 { + names := make([]string, 0, len(constraintSuggestions)) + for name := range constraintSuggestions { + names = append(names, name) + } + sort.Strings(names) + + c.Ui.Output(outputInitProvidersUnconstrained) + for _, name := range names { + c.Ui.Output(fmt.Sprintf("* provider.%s: version = %q", name, constraintSuggestions[name])) + } } - // Get it! - return module.GetCopy(dst, source) + return true, diags +} + +// backendConfigOverrideBody interprets the raw values of -backend-config +// arguments into a hcl Body that should override the backend settings given +// in the configuration. +// +// If the result is nil then no override needs to be provided. +// +// If the returned diagnostics contains errors then the returned body may be +// incomplete or invalid. +func (c *InitCommand) backendConfigOverrideBody(flags rawFlags, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + items := flags.AllItems() + if len(items) == 0 { + return nil, nil + } + + var ret hcl.Body + var diags tfdiags.Diagnostics + synthVals := make(map[string]cty.Value) + + mergeBody := func(newBody hcl.Body) { + if ret == nil { + ret = newBody + } else { + ret = configs.MergeBodies(ret, newBody) + } + } + flushVals := func() { + if len(synthVals) == 0 { + return + } + newBody := configs.SynthBody("-backend-config=...", synthVals) + mergeBody(newBody) + synthVals = make(map[string]cty.Value) + } + + if len(items) == 1 && items[0].Value == "" { + // Explicitly remove all -backend-config options. + // We do this by setting an empty but non-nil ConfigOverrides. + return configs.SynthBody("-backend-config=''", synthVals), diags + } + + for _, item := range items { + eq := strings.Index(item.Value, "=") + + if eq == -1 { + // The value is interpreted as a filename. + newBody, fileDiags := c.loadHCLFile(item.Value) + diags = diags.Append(fileDiags) + flushVals() // deal with any accumulated individual values first + mergeBody(newBody) + } else { + name := item.Value[:eq] + rawValue := item.Value[eq+1:] + attrS := schema.Attributes[name] + if attrS == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid backend configuration argument", + fmt.Sprintf("The backend configuration argument %q given on the command line is not expected for the selected backend type.", name), + )) + continue + } + value, valueDiags := configValueFromCLI(item.String(), rawValue, attrS.Type) + diags = diags.Append(valueDiags) + if valueDiags.HasErrors() { + continue + } + synthVals[name] = value + } + } + + flushVals() + + return ret, diags +} + +func (c *InitCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictDirs("") +} + +func (c *InitCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-backend": completePredictBoolean, + "-backend-config": complete.PredictFiles("*.tfvars"), // can also be key=value, but we can't "predict" that + "-force-copy": complete.PredictNothing, + "-from-module": completePredictModuleSource, + "-get": completePredictBoolean, + "-get-plugins": completePredictBoolean, + "-input": completePredictBoolean, + "-lock": completePredictBoolean, + "-lock-timeout": complete.PredictAnything, + "-no-color": complete.PredictNothing, + "-plugin-dir": complete.PredictDirs(""), + "-reconfigure": complete.PredictNothing, + "-upgrade": completePredictBoolean, + "-verify-plugins": completePredictBoolean, + } } func (c *InitCommand) Help() string { helpText := ` -Usage: terraform init [options] [SOURCE] [PATH] +Usage: terraform init [options] [DIR] - Initialize a new or existing Terraform environment by creating + Initialize a new or existing Terraform working directory by creating initial files, loading any remote state, downloading modules, etc. This is the first command that should be run for any new or existing @@ -198,24 +752,16 @@ Usage: terraform init [options] [SOURCE] [PATH] control. This command is always safe to run multiple times. Though subsequent runs - may give errors, this command will never blow away your environment or state. - Even so, if you have important information, please back it up prior to - running this command just in case. + may give errors, this command will never delete your configuration or + state. Even so, if you have important information, please back it up prior + to running this command, just in case. If no arguments are given, the configuration in this working directory is initialized. - If one or two arguments are given, the first is a SOURCE of a module to - download to the second argument PATH. After downloading the module to PATH, - the configuration will be initialized as if this command were called pointing - only to that PATH. PATH must be empty of any Terraform files. Any - conflicting non-Terraform files will be overwritten. The module download - is a copy. If you're downloading a module from Git, it will not preserve - Git history. - Options: - -backend=true Configure the backend for this environment. + -backend=true Configure the backend for this configuration. -backend-config=path This can be either a path to an HCL file with key/value assignments (same format as terraform.tfvars) or a @@ -228,8 +774,13 @@ Options: equivalent to providing a "yes" to all confirmation prompts. + -from-module=SOURCE Copy the contents of the given module into the target + directory before initialization. + -get=true Download any modules for this configuration. + -get-plugins=true Download any missing plugins for this configuration. + -input=true Ask for input if necessary. If false, will error if input was required. @@ -239,21 +790,41 @@ Options: -no-color If specified, output won't contain any color. - -reconfigure Reconfigure the backend, ignoring any saved configuration. + -plugin-dir Directory containing plugin binaries. This overrides all + default search paths for plugins, and prevents the + automatic installation of plugins. This flag can be used + multiple times. + + -reconfigure Reconfigure the backend, ignoring any saved + configuration. + + -upgrade=false If installing modules (-get) or plugins (-get-plugins), + ignore previously-downloaded objects and install the + latest version allowed within configured constraints. + + -verify-plugins=true Verify the authenticity and integrity of automatically + downloaded plugins. ` return strings.TrimSpace(helpText) } func (c *InitCommand) Synopsis() string { - return "Initialize a new or existing Terraform configuration" + return "Initialize a Terraform working directory" } +const errInitConfigError = ` +There are some problems with the configuration, described below. + +The Terraform configuration must be valid before initialization so that +Terraform can determine which modules and providers need to be installed. +` + const errInitCopyNotEmpty = ` -The destination path contains Terraform configuration files. The init command -with a SOURCE parameter can only be used on a directory without existing -Terraform files. +The working directory already contains files. The -from-module option requires +an empty directory into which a copy of the referenced module will be placed. -Please resolve this issue and try again. +To initialize the configuration already in this working directory, omit the +-from-module option. ` const outputInitEmpty = ` @@ -265,12 +836,160 @@ with Terraform immediately by creating Terraform configuration files. const outputInitSuccess = ` [reset][bold][green]Terraform has been successfully initialized![reset][green] +` +const outputInitSuccessCLI = `[reset][green] You may now begin working with Terraform. Try running "terraform plan" to see any changes that are required for your infrastructure. All Terraform commands should now work. If you ever set or change modules or backend configuration for Terraform, -rerun this command to reinitialize your environment. If you forget, other +rerun this command to reinitialize your working directory. If you forget, other commands will detect it and remind you to do so if necessary. ` + +const outputInitSuccessConfigUpgrade = ` +[reset][bold]Terraform has initialized, but configuration upgrades may be needed.[reset] + +Terraform found syntax errors in the configuration that prevented full +initialization. If you've recently upgraded to Terraform v0.12, this may be +because your configuration uses syntax constructs that are no longer valid, +and so must be updated before full initialization is possible. + +Run terraform init for this configuration at a shell prompt for more information +on how to update it for Terraform v0.12 compatibility. +` + +const outputInitSuccessConfigUpgradeCLI = `[reset][green] +[reset][bold]Terraform has initialized, but configuration upgrades may be needed.[reset] + +Terraform found syntax errors in the configuration that prevented full +initialization. If you've recently upgraded to Terraform v0.12, this may be +because your configuration uses syntax constructs that are no longer valid, +and so must be updated before full initialization is possible. + +Terraform has installed the required providers to support the configuration +upgrade process. To begin upgrading your configuration, run the following: + terraform 0.12upgrade + +To see the full set of errors that led to this message, run: + terraform validate +` + +const outputInitProvidersUnconstrained = ` +The following providers do not have any version constraints in configuration, +so the latest version was installed. + +To prevent automatic upgrades to new major versions that may contain breaking +changes, it is recommended to add version = "..." constraints to the +corresponding provider blocks in configuration, with the constraint strings +suggested below. +` + +const errDiscoveryServiceUnreachable = ` +[reset][bold][red]Registry service unreachable.[reset][red] + +This may indicate a network issue, or an issue with the requested Terraform Registry. +` + +const errProviderNotFound = ` +[reset][bold][red]Provider %[1]q not available for installation.[reset][red] + +A provider named %[1]q could not be found in the Terraform Registry. + +This may result from mistyping the provider name, or the given provider may +be a third-party provider that cannot be installed automatically. + +In the latter case, the plugin must be installed manually by locating and +downloading a suitable distribution package and placing the plugin's executable +file in the following directory: + %[2]s + +Terraform detects necessary plugins by inspecting the configuration and state. +To view the provider versions requested by each module, run +"terraform providers". +` + +const errProviderVersionsUnsuitable = ` +[reset][bold][red]No provider %[1]q plugins meet the constraint %[2]q.[reset][red] + +The version constraint is derived from the "version" argument within the +provider %[1]q block in configuration. Child modules may also apply +provider version constraints. To view the provider versions requested by each +module in the current configuration, run "terraform providers". + +To proceed, the version constraints for this provider must be relaxed by +either adjusting or removing the "version" argument in the provider blocks +throughout the configuration. +` + +const errProviderIncompatible = ` +[reset][bold][red]No available provider %[1]q plugins are compatible with this Terraform version.[reset][red] + +From time to time, new Terraform major releases can change the requirements for +plugins such that older plugins become incompatible. + +Terraform checked all of the plugin versions matching the given constraint: + %[2]s + +Unfortunately, none of the suitable versions are compatible with this version +of Terraform. If you have recently upgraded Terraform, it may be necessary to +move to a newer major release of this provider. Alternatively, if you are +attempting to upgrade the provider to a new major version you may need to +also upgrade Terraform to support the new version. + +Consult the documentation for this provider for more information on +compatibility between provider versions and Terraform versions. +` + +const errProviderInstallError = ` +[reset][bold][red]Error installing provider %[1]q: %[2]s.[reset][red] + +Terraform analyses the configuration and state and automatically downloads +plugins for the providers used. However, when attempting to download this +plugin an unexpected error occurred. + +This may be caused if for some reason Terraform is unable to reach the +plugin repository. The repository may be unreachable if access is blocked +by a firewall. + +If automatic installation is not possible or desirable in your environment, +you may alternatively manually install plugins by downloading a suitable +distribution package and placing the plugin's executable file in the +following directory: + %[3]s +` + +const errMissingProvidersNoInstall = ` +[reset][bold][red]Missing required providers.[reset][red] + +The following provider constraints are not met by the currently-installed +provider plugins: + +%[1]s +Terraform can automatically download and install plugins to meet the given +constraints, but this step was skipped due to the use of -get-plugins=false +and/or -plugin-dir on the command line. + +If automatic installation is not possible or desirable in your environment, +you may manually install plugins by downloading a suitable distribution package +and placing the plugin's executable file in one of the directories given in +by -plugin-dir on the command line, or in the following directory if custom +plugin directories are not set: + %[2]s +` + +const errChecksumVerification = ` +[reset][bold][red]Error verifying checksum for provider %[1]q[reset][red] +The checksum for provider distribution from the Terraform Registry +did not match the source. This may mean that the distributed files +were changed after this version was released to the Registry. +` + +const errSignatureVerification = ` +[reset][bold][red]Error verifying GPG signature for provider %[1]q[reset][red] +Terraform was unable to verify the GPG signature of the downloaded provider +files using the keys downloaded from the Terraform Registry. This may mean that +the publisher of the provider removed the key it was signed with, or that the +distributed files were changed after this version was released. +` diff --git a/vendor/github.com/hashicorp/terraform/command/internal_plugin.go b/vendor/github.com/hashicorp/terraform/command/internal_plugin.go index 01d8c77b..b26ba1df 100644 --- a/vendor/github.com/hashicorp/terraform/command/internal_plugin.go +++ b/vendor/github.com/hashicorp/terraform/command/internal_plugin.go @@ -45,16 +45,6 @@ func (c *InternalPluginCommand) Run(args []string) int { log.SetPrefix(fmt.Sprintf("%s-%s (internal) ", pluginName, pluginType)) switch pluginType { - case "provider": - pluginFunc, found := InternalProviders[pluginName] - if !found { - log.Printf("[ERROR] Could not load provider: %s", pluginName) - return 1 - } - log.Printf("[INFO] Starting provider plugin %s", pluginName) - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: pluginFunc, - }) case "provisioner": pluginFunc, found := InternalProvisioners[pluginName] if !found { diff --git a/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go b/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go index 271bc8f6..e26c2c19 100644 --- a/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go +++ b/vendor/github.com/hashicorp/terraform/command/internal_plugin_list.go @@ -1,172 +1,28 @@ -// +build !core - // // This file is automatically generated by scripts/generate-plugins.go -- Do not edit! // package command import ( - alicloudprovider "github.com/hashicorp/terraform/builtin/providers/alicloud" - archiveprovider "github.com/hashicorp/terraform/builtin/providers/archive" - arukasprovider "github.com/hashicorp/terraform/builtin/providers/arukas" - atlasprovider "github.com/hashicorp/terraform/builtin/providers/atlas" - awsprovider "github.com/hashicorp/terraform/builtin/providers/aws" - azureprovider "github.com/hashicorp/terraform/builtin/providers/azure" - azurermprovider "github.com/hashicorp/terraform/builtin/providers/azurerm" - bitbucketprovider "github.com/hashicorp/terraform/builtin/providers/bitbucket" - chefprovider "github.com/hashicorp/terraform/builtin/providers/chef" - circonusprovider "github.com/hashicorp/terraform/builtin/providers/circonus" - clcprovider "github.com/hashicorp/terraform/builtin/providers/clc" - cloudflareprovider "github.com/hashicorp/terraform/builtin/providers/cloudflare" - cloudstackprovider "github.com/hashicorp/terraform/builtin/providers/cloudstack" - cobblerprovider "github.com/hashicorp/terraform/builtin/providers/cobbler" - consulprovider "github.com/hashicorp/terraform/builtin/providers/consul" - datadogprovider "github.com/hashicorp/terraform/builtin/providers/datadog" - digitaloceanprovider "github.com/hashicorp/terraform/builtin/providers/digitalocean" - dmeprovider "github.com/hashicorp/terraform/builtin/providers/dme" - dnsprovider "github.com/hashicorp/terraform/builtin/providers/dns" - dnsimpleprovider "github.com/hashicorp/terraform/builtin/providers/dnsimple" - dockerprovider "github.com/hashicorp/terraform/builtin/providers/docker" - dynprovider "github.com/hashicorp/terraform/builtin/providers/dyn" - externalprovider "github.com/hashicorp/terraform/builtin/providers/external" - fastlyprovider "github.com/hashicorp/terraform/builtin/providers/fastly" - githubprovider "github.com/hashicorp/terraform/builtin/providers/github" - gitlabprovider "github.com/hashicorp/terraform/builtin/providers/gitlab" - googleprovider "github.com/hashicorp/terraform/builtin/providers/google" - grafanaprovider "github.com/hashicorp/terraform/builtin/providers/grafana" - herokuprovider "github.com/hashicorp/terraform/builtin/providers/heroku" - httpprovider "github.com/hashicorp/terraform/builtin/providers/http" - icinga2provider "github.com/hashicorp/terraform/builtin/providers/icinga2" - ignitionprovider "github.com/hashicorp/terraform/builtin/providers/ignition" - influxdbprovider "github.com/hashicorp/terraform/builtin/providers/influxdb" - kubernetesprovider "github.com/hashicorp/terraform/builtin/providers/kubernetes" - libratoprovider "github.com/hashicorp/terraform/builtin/providers/librato" - localprovider "github.com/hashicorp/terraform/builtin/providers/local" - logentriesprovider "github.com/hashicorp/terraform/builtin/providers/logentries" - mailgunprovider "github.com/hashicorp/terraform/builtin/providers/mailgun" - mysqlprovider "github.com/hashicorp/terraform/builtin/providers/mysql" - newrelicprovider "github.com/hashicorp/terraform/builtin/providers/newrelic" - nomadprovider "github.com/hashicorp/terraform/builtin/providers/nomad" - ns1provider "github.com/hashicorp/terraform/builtin/providers/ns1" - nullprovider "github.com/hashicorp/terraform/builtin/providers/null" - oneandoneprovider "github.com/hashicorp/terraform/builtin/providers/oneandone" - openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack" - opsgenieprovider "github.com/hashicorp/terraform/builtin/providers/opsgenie" - ovhprovider "github.com/hashicorp/terraform/builtin/providers/ovh" - packetprovider "github.com/hashicorp/terraform/builtin/providers/packet" - pagerdutyprovider "github.com/hashicorp/terraform/builtin/providers/pagerduty" - postgresqlprovider "github.com/hashicorp/terraform/builtin/providers/postgresql" - powerdnsprovider "github.com/hashicorp/terraform/builtin/providers/powerdns" - profitbricksprovider "github.com/hashicorp/terraform/builtin/providers/profitbricks" - rabbitmqprovider "github.com/hashicorp/terraform/builtin/providers/rabbitmq" - rancherprovider "github.com/hashicorp/terraform/builtin/providers/rancher" - randomprovider "github.com/hashicorp/terraform/builtin/providers/random" - rundeckprovider "github.com/hashicorp/terraform/builtin/providers/rundeck" - scalewayprovider "github.com/hashicorp/terraform/builtin/providers/scaleway" - softlayerprovider "github.com/hashicorp/terraform/builtin/providers/softlayer" - spotinstprovider "github.com/hashicorp/terraform/builtin/providers/spotinst" - statuscakeprovider "github.com/hashicorp/terraform/builtin/providers/statuscake" - templateprovider "github.com/hashicorp/terraform/builtin/providers/template" - terraformprovider "github.com/hashicorp/terraform/builtin/providers/terraform" - testprovider "github.com/hashicorp/terraform/builtin/providers/test" - tlsprovider "github.com/hashicorp/terraform/builtin/providers/tls" - tritonprovider "github.com/hashicorp/terraform/builtin/providers/triton" - ultradnsprovider "github.com/hashicorp/terraform/builtin/providers/ultradns" - vaultprovider "github.com/hashicorp/terraform/builtin/providers/vault" - vcdprovider "github.com/hashicorp/terraform/builtin/providers/vcd" - vsphereprovider "github.com/hashicorp/terraform/builtin/providers/vsphere" chefprovisioner "github.com/hashicorp/terraform/builtin/provisioners/chef" fileprovisioner "github.com/hashicorp/terraform/builtin/provisioners/file" + habitatprovisioner "github.com/hashicorp/terraform/builtin/provisioners/habitat" localexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/local-exec" + puppetprovisioner "github.com/hashicorp/terraform/builtin/provisioners/puppet" remoteexecprovisioner "github.com/hashicorp/terraform/builtin/provisioners/remote-exec" + saltmasterlessprovisioner "github.com/hashicorp/terraform/builtin/provisioners/salt-masterless" "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/terraform" - - //New Provider Builds - opcprovider "github.com/hashicorp/terraform-provider-opc/opc" ) -var InternalProviders = map[string]plugin.ProviderFunc{ - "alicloud": alicloudprovider.Provider, - "archive": archiveprovider.Provider, - "arukas": arukasprovider.Provider, - "atlas": atlasprovider.Provider, - "aws": awsprovider.Provider, - "azure": azureprovider.Provider, - "azurerm": azurermprovider.Provider, - "bitbucket": bitbucketprovider.Provider, - "chef": chefprovider.Provider, - "circonus": circonusprovider.Provider, - "clc": clcprovider.Provider, - "cloudflare": cloudflareprovider.Provider, - "cloudstack": cloudstackprovider.Provider, - "cobbler": cobblerprovider.Provider, - "consul": consulprovider.Provider, - "datadog": datadogprovider.Provider, - "digitalocean": digitaloceanprovider.Provider, - "dme": dmeprovider.Provider, - "dns": dnsprovider.Provider, - "dnsimple": dnsimpleprovider.Provider, - "docker": dockerprovider.Provider, - "dyn": dynprovider.Provider, - "external": externalprovider.Provider, - "fastly": fastlyprovider.Provider, - "github": githubprovider.Provider, - "gitlab": gitlabprovider.Provider, - "google": googleprovider.Provider, - "grafana": grafanaprovider.Provider, - "heroku": herokuprovider.Provider, - "http": httpprovider.Provider, - "icinga2": icinga2provider.Provider, - "ignition": ignitionprovider.Provider, - "influxdb": influxdbprovider.Provider, - "kubernetes": kubernetesprovider.Provider, - "librato": libratoprovider.Provider, - "local": localprovider.Provider, - "logentries": logentriesprovider.Provider, - "mailgun": mailgunprovider.Provider, - "mysql": mysqlprovider.Provider, - "newrelic": newrelicprovider.Provider, - "nomad": nomadprovider.Provider, - "ns1": ns1provider.Provider, - "null": nullprovider.Provider, - "oneandone": oneandoneprovider.Provider, - "openstack": openstackprovider.Provider, - "opsgenie": opsgenieprovider.Provider, - "ovh": ovhprovider.Provider, - "packet": packetprovider.Provider, - "pagerduty": pagerdutyprovider.Provider, - "postgresql": postgresqlprovider.Provider, - "powerdns": powerdnsprovider.Provider, - "profitbricks": profitbricksprovider.Provider, - "rabbitmq": rabbitmqprovider.Provider, - "rancher": rancherprovider.Provider, - "random": randomprovider.Provider, - "rundeck": rundeckprovider.Provider, - "scaleway": scalewayprovider.Provider, - "softlayer": softlayerprovider.Provider, - "spotinst": spotinstprovider.Provider, - "statuscake": statuscakeprovider.Provider, - "template": templateprovider.Provider, - "terraform": terraformprovider.Provider, - "test": testprovider.Provider, - "tls": tlsprovider.Provider, - "triton": tritonprovider.Provider, - "ultradns": ultradnsprovider.Provider, - "vault": vaultprovider.Provider, - "vcd": vcdprovider.Provider, - "vsphere": vsphereprovider.Provider, -} +var InternalProviders = map[string]plugin.ProviderFunc{} var InternalProvisioners = map[string]plugin.ProvisionerFunc{ - "chef": chefprovisioner.Provisioner, - "file": fileprovisioner.Provisioner, - "local-exec": localexecprovisioner.Provisioner, - "remote-exec": remoteexecprovisioner.Provisioner, -} - -func init() { - // New Provider Layouts - InternalProviders["opc"] = func() terraform.ResourceProvider { return opcprovider.Provider() } + "chef": chefprovisioner.Provisioner, + "file": fileprovisioner.Provisioner, + "habitat": habitatprovisioner.Provisioner, + "local-exec": localexecprovisioner.Provisioner, + "puppet": puppetprovisioner.Provisioner, + "remote-exec": remoteexecprovisioner.Provisioner, + "salt-masterless": saltmasterlessprovisioner.Provisioner, } diff --git a/vendor/github.com/hashicorp/terraform/command/jsonconfig/config.go b/vendor/github.com/hashicorp/terraform/command/jsonconfig/config.go new file mode 100644 index 00000000..0b27c7af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonconfig/config.go @@ -0,0 +1,360 @@ +package jsonconfig + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/terraform" +) + +// Config represents the complete configuration source +type config struct { + ProviderConfigs map[string]providerConfig `json:"provider_config,omitempty"` + RootModule module `json:"root_module,omitempty"` +} + +// ProviderConfig describes all of the provider configurations throughout the +// configuration tree, flattened into a single map for convenience since +// provider configurations are the one concept in Terraform that can span across +// module boundaries. +type providerConfig struct { + Name string `json:"name,omitempty"` + Alias string `json:"alias,omitempty"` + VersionConstraint string `json:"version_constraint,omitempty"` + ModuleAddress string `json:"module_address,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` +} + +type module struct { + Outputs map[string]output `json:"outputs,omitempty"` + // Resources are sorted in a user-friendly order that is undefined at this + // time, but consistent. + Resources []resource `json:"resources,omitempty"` + ModuleCalls map[string]moduleCall `json:"module_calls,omitempty"` + Variables variables `json:"variables,omitempty"` +} + +type moduleCall struct { + Source string `json:"source,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` + CountExpression *expression `json:"count_expression,omitempty"` + ForEachExpression *expression `json:"for_each_expression,omitempty"` + Module module `json:"module,omitempty"` + VersionConstraint string `json:"version_constraint,omitempty"` +} + +// variables is the JSON representation of the variables provided to the current +// plan. +type variables map[string]*variable + +type variable struct { + Default json.RawMessage `json:"default,omitempty"` + Description string `json:"description,omitempty"` +} + +// Resource is the representation of a resource in the config +type resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // ProviderConfigKey is the key into "provider_configs" (shown above) for + // the provider configuration that this resource is associated with. + // + // NOTE: If a given resource is in a ModuleCall, and the provider was + // configured outside of the module (in a higher level configuration file), + // the ProviderConfigKey will not match a key in the ProviderConfigs map. + ProviderConfigKey string `json:"provider_config_key,omitempty"` + + // Provisioners is an optional field which describes any provisioners. + // Connection info will not be included here. + Provisioners []provisioner `json:"provisioners,omitempty"` + + // Expressions" describes the resource-type-specific content of the + // configuration block. + Expressions map[string]interface{} `json:"expressions,omitempty"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // CountExpression and ForEachExpression describe the expressions given for + // the corresponding meta-arguments in the resource configuration block. + // These are omitted if the corresponding argument isn't set. + CountExpression *expression `json:"count_expression,omitempty"` + ForEachExpression *expression `json:"for_each_expression,omitempty"` + + DependsOn []string `json:"depends_on,omitempty"` +} + +type output struct { + Sensitive bool `json:"sensitive,omitempty"` + Expression expression `json:"expression,omitempty"` + DependsOn []string `json:"depends_on,omitempty"` + Description string `json:"description,omitempty"` +} + +type provisioner struct { + Type string `json:"type,omitempty"` + Expressions map[string]interface{} `json:"expressions,omitempty"` +} + +// Marshal returns the json encoding of terraform configuration. +func Marshal(c *configs.Config, schemas *terraform.Schemas) ([]byte, error) { + var output config + + pcs := make(map[string]providerConfig) + marshalProviderConfigs(c, schemas, pcs) + output.ProviderConfigs = pcs + + rootModule, err := marshalModule(c, schemas, "") + if err != nil { + return nil, err + } + output.RootModule = rootModule + + ret, err := json.Marshal(output) + return ret, err +} + +func marshalProviderConfigs( + c *configs.Config, + schemas *terraform.Schemas, + m map[string]providerConfig, +) { + if c == nil { + return + } + + for k, pc := range c.Module.ProviderConfigs { + schema := schemas.ProviderConfig(pc.Name) + p := providerConfig{ + Name: pc.Name, + Alias: pc.Alias, + ModuleAddress: c.Path.String(), + Expressions: marshalExpressions(pc.Config, schema), + VersionConstraint: pc.Version.Required.String(), + } + absPC := opaqueProviderKey(k, c.Path.String()) + + m[absPC] = p + } + + // Must also visit our child modules, recursively. + for _, cc := range c.Children { + marshalProviderConfigs(cc, schemas, m) + } +} + +func marshalModule(c *configs.Config, schemas *terraform.Schemas, addr string) (module, error) { + var module module + var rs []resource + + managedResources, err := marshalResources(c.Module.ManagedResources, schemas, addr) + if err != nil { + return module, err + } + dataResources, err := marshalResources(c.Module.DataResources, schemas, addr) + if err != nil { + return module, err + } + + rs = append(managedResources, dataResources...) + module.Resources = rs + + outputs := make(map[string]output) + for _, v := range c.Module.Outputs { + o := output{ + Sensitive: v.Sensitive, + Expression: marshalExpression(v.Expr), + } + if v.Description != "" { + o.Description = v.Description + } + if len(v.DependsOn) > 0 { + dependencies := make([]string, len(v.DependsOn)) + for i, d := range v.DependsOn { + ref, diags := addrs.ParseRef(d) + // we should not get an error here, because `terraform validate` + // would have complained well before this point, but if we do we'll + // silenty skip it. + if !diags.HasErrors() { + dependencies[i] = ref.Subject.String() + } + } + o.DependsOn = dependencies + } + + outputs[v.Name] = o + } + module.Outputs = outputs + + module.ModuleCalls = marshalModuleCalls(c, schemas) + + if len(c.Module.Variables) > 0 { + vars := make(variables, len(c.Module.Variables)) + for k, v := range c.Module.Variables { + var defaultValJSON []byte + if v.Default == cty.NilVal { + defaultValJSON = nil + } else { + defaultValJSON, err = ctyjson.Marshal(v.Default, v.Default.Type()) + if err != nil { + return module, err + } + } + vars[k] = &variable{ + Default: defaultValJSON, + Description: v.Description, + } + } + module.Variables = vars + } + + return module, nil +} + +func marshalModuleCalls(c *configs.Config, schemas *terraform.Schemas) map[string]moduleCall { + ret := make(map[string]moduleCall) + + for name, mc := range c.Module.ModuleCalls { + mcConfig := c.Children[name] + ret[name] = marshalModuleCall(mcConfig, mc, schemas) + } + + return ret +} + +func marshalModuleCall(c *configs.Config, mc *configs.ModuleCall, schemas *terraform.Schemas) moduleCall { + // It is possible to have a module call with a nil config. + if c == nil { + return moduleCall{} + } + + ret := moduleCall{ + Source: mc.SourceAddr, + VersionConstraint: mc.Version.Required.String(), + } + cExp := marshalExpression(mc.Count) + if !cExp.Empty() { + ret.CountExpression = &cExp + } else { + fExp := marshalExpression(mc.ForEach) + if !fExp.Empty() { + ret.ForEachExpression = &fExp + } + } + + schema := &configschema.Block{} + schema.Attributes = make(map[string]*configschema.Attribute) + for _, variable := range c.Module.Variables { + schema.Attributes[variable.Name] = &configschema.Attribute{ + Required: variable.Default == cty.NilVal, + } + } + + ret.Expressions = marshalExpressions(mc.Config, schema) + module, _ := marshalModule(c, schemas, mc.Name) + ret.Module = module + + return ret +} + +func marshalResources(resources map[string]*configs.Resource, schemas *terraform.Schemas, moduleAddr string) ([]resource, error) { + var rs []resource + for _, v := range resources { + r := resource{ + Address: v.Addr().String(), + Type: v.Type, + Name: v.Name, + ProviderConfigKey: opaqueProviderKey(v.ProviderConfigAddr().StringCompact(), moduleAddr), + } + + switch v.Mode { + case addrs.ManagedResourceMode: + r.Mode = "managed" + case addrs.DataResourceMode: + r.Mode = "data" + default: + return rs, fmt.Errorf("resource %s has an unsupported mode %s", r.Address, v.Mode.String()) + } + + cExp := marshalExpression(v.Count) + if !cExp.Empty() { + r.CountExpression = &cExp + } else { + fExp := marshalExpression(v.ForEach) + if !fExp.Empty() { + r.ForEachExpression = &fExp + } + } + + schema, schemaVer := schemas.ResourceTypeConfig( + v.ProviderConfigAddr().Type, + v.Mode, + v.Type, + ) + if schema == nil { + return nil, fmt.Errorf("no schema found for %s", v.Addr().String()) + } + r.SchemaVersion = schemaVer + + r.Expressions = marshalExpressions(v.Config, schema) + + // Managed is populated only for Mode = addrs.ManagedResourceMode + if v.Managed != nil && len(v.Managed.Provisioners) > 0 { + var provisioners []provisioner + for _, p := range v.Managed.Provisioners { + schema := schemas.ProvisionerConfig(p.Type) + prov := provisioner{ + Type: p.Type, + Expressions: marshalExpressions(p.Config, schema), + } + provisioners = append(provisioners, prov) + } + r.Provisioners = provisioners + } + + if len(v.DependsOn) > 0 { + dependencies := make([]string, len(v.DependsOn)) + for i, d := range v.DependsOn { + ref, diags := addrs.ParseRef(d) + // we should not get an error here, because `terraform validate` + // would have complained well before this point, but if we do we'll + // silenty skip it. + if !diags.HasErrors() { + dependencies[i] = ref.Subject.String() + } + } + r.DependsOn = dependencies + } + + rs = append(rs, r) + } + sort.Slice(rs, func(i, j int) bool { + return rs[i].Address < rs[j].Address + }) + return rs, nil +} + +// opaqueProviderKey generates a unique absProviderConfig-like string from the module +// address and provider +func opaqueProviderKey(provider string, addr string) (key string) { + key = provider + if addr != "" { + key = fmt.Sprintf("%s:%s", addr, provider) + } + return key +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonconfig/doc.go b/vendor/github.com/hashicorp/terraform/command/jsonconfig/doc.go new file mode 100644 index 00000000..28324a57 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonconfig/doc.go @@ -0,0 +1,3 @@ +// Package jsonconfig implements methods for outputting a configuration snapshot +// in machine-readable json format +package jsonconfig diff --git a/vendor/github.com/hashicorp/terraform/command/jsonconfig/expression.go b/vendor/github.com/hashicorp/terraform/command/jsonconfig/expression.go new file mode 100644 index 00000000..3288faf7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonconfig/expression.go @@ -0,0 +1,119 @@ +package jsonconfig + +import ( + "encoding/json" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +// expression represents any unparsed expression +type expression struct { + // "constant_value" is set only if the expression contains no references to + // other objects, in which case it gives the resulting constant value. This + // is mapped as for the individual values in the common value + // representation. + ConstantValue json.RawMessage `json:"constant_value,omitempty"` + + // Alternatively, "references" will be set to a list of references in the + // expression. Multi-step references will be unwrapped and duplicated for + // each significant traversal step, allowing callers to more easily + // recognize the objects they care about without attempting to parse the + // expressions. Callers should only use string equality checks here, since + // the syntax may be extended in future releases. + References []string `json:"references,omitempty"` +} + +func marshalExpression(ex hcl.Expression) expression { + var ret expression + if ex != nil { + val, _ := ex.Value(nil) + if val != cty.NilVal { + valJSON, _ := ctyjson.Marshal(val, val.Type()) + ret.ConstantValue = valJSON + } + vars, _ := lang.ReferencesInExpr(ex) + var varString []string + if len(vars) > 0 { + for _, v := range vars { + varString = append(varString, v.Subject.String()) + } + ret.References = varString + } + return ret + } + return ret +} + +func (e *expression) Empty() bool { + return e.ConstantValue == nil && e.References == nil +} + +// expressions is used to represent the entire content of a block. Attribute +// arguments are mapped directly with the attribute name as key and an +// expression as value. +type expressions map[string]interface{} + +func marshalExpressions(body hcl.Body, schema *configschema.Block) expressions { + // Since we want the raw, un-evaluated expressions we need to use the + // low-level HCL API here, rather than the hcldec decoder API. That means we + // need the low-level schema. + lowSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + // (lowSchema is an hcl.BodySchema: + // https://godoc.org/github.com/hashicorp/hcl2/hcl#BodySchema ) + + // Use the low-level schema with the body to decode one level We'll just + // ignore any additional content that's not covered by the schema, which + // will effectively ignore "dynamic" blocks, and may also ignore other + // unknown stuff but anything else would get flagged by Terraform as an + // error anyway, and so we wouldn't end up in here. + content, _, _ := body.PartialContent(lowSchema) + if content == nil { + // Should never happen for a valid body, but we'll just generate empty + // if there were any problems. + return nil + } + + ret := make(expressions) + + // Any attributes we encode directly as expression objects. + for name, attr := range content.Attributes { + ret[name] = marshalExpression(attr.Expr) // note: singular expression for this one + } + + // Any nested blocks require a recursive call to produce nested expressions + // objects. + for _, block := range content.Blocks { + typeName := block.Type + blockS, exists := schema.BlockTypes[typeName] + if !exists { + // Should never happen since only block types in the schema would be + // put in blocks list + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + ret[typeName] = marshalExpressions(block.Body, &blockS.Block) + case configschema.NestingList, configschema.NestingSet: + if _, exists := ret[typeName]; !exists { + ret[typeName] = make([]map[string]interface{}, 0, 1) + } + ret[typeName] = append(ret[typeName].([]map[string]interface{}), marshalExpressions(block.Body, &blockS.Block)) + case configschema.NestingMap: + if _, exists := ret[typeName]; !exists { + ret[typeName] = make(map[string]map[string]interface{}) + } + // NestingMap blocks always have the key in the first (and only) label + key := block.Labels[0] + retMap := ret[typeName].(map[string]map[string]interface{}) + retMap[key] = marshalExpressions(block.Body, &blockS.Block) + } + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonplan/doc.go b/vendor/github.com/hashicorp/terraform/command/jsonplan/doc.go new file mode 100644 index 00000000..db1f3fb0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonplan/doc.go @@ -0,0 +1,3 @@ +// Package jsonplan implements methods for outputting a plan in a +// machine-readable json format +package jsonplan diff --git a/vendor/github.com/hashicorp/terraform/command/jsonplan/module.go b/vendor/github.com/hashicorp/terraform/command/jsonplan/module.go new file mode 100644 index 00000000..a122e77d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonplan/module.go @@ -0,0 +1,16 @@ +package jsonplan + +// module is the representation of a module in state. This can be the root +// module or a child module. +type module struct { + // Resources are sorted in a user-friendly order that is undefined at this + // time, but consistent. + Resources []resource `json:"resources,omitempty"` + + // Address is the absolute module address, omitted for the root module + Address string `json:"address,omitempty"` + + // Each module object can optionally have its own nested "child_modules", + // recursively describing the full module tree. + ChildModules []module `json:"child_modules,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonplan/plan.go b/vendor/github.com/hashicorp/terraform/command/jsonplan/plan.go new file mode 100644 index 00000000..f1563abe --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonplan/plan.go @@ -0,0 +1,477 @@ +package jsonplan + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/jsonconfig" + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/version" +) + +// FormatVersion represents the version of the json format and will be +// incremented for any change to this format that requires changes to a +// consuming parser. +const FormatVersion = "0.1" + +// Plan is the top-level representation of the json format of a plan. It includes +// the complete config and current state. +type plan struct { + FormatVersion string `json:"format_version,omitempty"` + TerraformVersion string `json:"terraform_version,omitempty"` + Variables variables `json:"variables,omitempty"` + PlannedValues stateValues `json:"planned_values,omitempty"` + // ResourceChanges are sorted in a user-friendly order that is undefined at + // this time, but consistent. + ResourceChanges []resourceChange `json:"resource_changes,omitempty"` + OutputChanges map[string]change `json:"output_changes,omitempty"` + PriorState json.RawMessage `json:"prior_state,omitempty"` + Config json.RawMessage `json:"configuration,omitempty"` +} + +func newPlan() *plan { + return &plan{ + FormatVersion: FormatVersion, + } +} + +// Change is the representation of a proposed change for an object. +type change struct { + // Actions are the actions that will be taken on the object selected by the + // properties below. Valid actions values are: + // ["no-op"] + // ["create"] + // ["read"] + // ["update"] + // ["delete", "create"] + // ["create", "delete"] + // ["delete"] + // The two "replace" actions are represented in this way to allow callers to + // e.g. just scan the list for "delete" to recognize all three situations + // where the object will be deleted, allowing for any new deletion + // combinations that might be added in future. + Actions []string `json:"actions,omitempty"` + + // Before and After are representations of the object value both before and + // after the action. For ["create"] and ["delete"] actions, either "before" + // or "after" is unset (respectively). For ["no-op"], the before and after + // values are identical. The "after" value will be incomplete if there are + // values within it that won't be known until after apply. + Before json.RawMessage `json:"before,omitempty"` + After json.RawMessage `json:"after,omitempty"` + AfterUnknown json.RawMessage `json:"after_unknown,omitempty"` +} + +type output struct { + Sensitive bool `json:"sensitive"` + Value json.RawMessage `json:"value,omitempty"` +} + +// variables is the JSON representation of the variables provided to the current +// plan. +type variables map[string]*variable + +type variable struct { + Value json.RawMessage `json:"value,omitempty"` +} + +// Marshal returns the json encoding of a terraform plan. +func Marshal( + config *configs.Config, + p *plans.Plan, + sf *statefile.File, + schemas *terraform.Schemas, +) ([]byte, error) { + output := newPlan() + output.TerraformVersion = version.String() + + err := output.marshalPlanVariables(p.VariableValues, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshalPlanVariables: %s", err) + } + + // output.PlannedValues + err = output.marshalPlannedValues(p.Changes, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshalPlannedValues: %s", err) + } + + // output.ResourceChanges + err = output.marshalResourceChanges(p.Changes, schemas) + if err != nil { + return nil, fmt.Errorf("error in marshalResourceChanges: %s", err) + } + + // output.OutputChanges + err = output.marshalOutputChanges(p.Changes) + if err != nil { + return nil, fmt.Errorf("error in marshaling output changes: %s", err) + } + + // output.PriorState + if sf != nil && !sf.State.Empty() { + output.PriorState, err = jsonstate.Marshal(sf, schemas) + if err != nil { + return nil, fmt.Errorf("error marshaling prior state: %s", err) + } + } + + // output.Config + output.Config, err = jsonconfig.Marshal(config, schemas) + if err != nil { + return nil, fmt.Errorf("error marshaling config: %s", err) + } + + ret, err := json.Marshal(output) + return ret, err +} + +func (p *plan) marshalPlanVariables(vars map[string]plans.DynamicValue, schemas *terraform.Schemas) error { + if len(vars) == 0 { + return nil + } + + p.Variables = make(variables, len(vars)) + + for k, v := range vars { + val, err := v.Decode(cty.DynamicPseudoType) + if err != nil { + return err + } + valJSON, err := ctyjson.Marshal(val, val.Type()) + if err != nil { + return err + } + p.Variables[k] = &variable{ + Value: valJSON, + } + } + return nil +} + +func (p *plan) marshalResourceChanges(changes *plans.Changes, schemas *terraform.Schemas) error { + if changes == nil { + // Nothing to do! + return nil + } + for _, rc := range changes.Resources { + var r resourceChange + addr := rc.Addr + r.Address = addr.String() + + dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode + // We create "delete" actions for data resources so we can clean up + // their entries in state, but this is an implementation detail that + // users shouldn't see. + if dataSource && rc.Action == plans.Delete { + continue + } + + schema, _ := schemas.ResourceTypeConfig( + rc.ProviderAddr.ProviderConfig.Type, + addr.Resource.Resource.Mode, + addr.Resource.Resource.Type, + ) + if schema == nil { + return fmt.Errorf("no schema found for %s", r.Address) + } + + changeV, err := rc.Decode(schema.ImpliedType()) + if err != nil { + return err + } + + var before, after []byte + var afterUnknown cty.Value + if changeV.Before != cty.NilVal { + before, err = ctyjson.Marshal(changeV.Before, changeV.Before.Type()) + if err != nil { + return err + } + } + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) + if err != nil { + return err + } + afterUnknown = cty.EmptyObjectVal + } else { + filteredAfter := omitUnknowns(changeV.After) + if filteredAfter.IsNull() { + after = nil + } else { + after, err = ctyjson.Marshal(filteredAfter, filteredAfter.Type()) + if err != nil { + return err + } + } + afterUnknown = unknownAsBool(changeV.After) + } + } + + a, err := ctyjson.Marshal(afterUnknown, afterUnknown.Type()) + if err != nil { + return err + } + + r.Change = change{ + Actions: actionString(rc.Action.String()), + Before: json.RawMessage(before), + After: json.RawMessage(after), + AfterUnknown: a, + } + + if rc.DeposedKey != states.NotDeposed { + r.Deposed = rc.DeposedKey.String() + } + + key := addr.Resource.Key + if key != nil { + r.Index = key + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + r.Mode = "managed" + case addrs.DataResourceMode: + r.Mode = "data" + default: + return fmt.Errorf("resource %s has an unsupported mode %s", r.Address, addr.Resource.Resource.Mode.String()) + } + r.ModuleAddress = addr.Module.String() + r.Name = addr.Resource.Resource.Name + r.Type = addr.Resource.Resource.Type + r.ProviderName = rc.ProviderAddr.ProviderConfig.StringCompact() + + p.ResourceChanges = append(p.ResourceChanges, r) + + } + + sort.Slice(p.ResourceChanges, func(i, j int) bool { + return p.ResourceChanges[i].Address < p.ResourceChanges[j].Address + }) + + return nil +} + +func (p *plan) marshalOutputChanges(changes *plans.Changes) error { + if changes == nil { + // Nothing to do! + return nil + } + + p.OutputChanges = make(map[string]change, len(changes.Outputs)) + for _, oc := range changes.Outputs { + changeV, err := oc.Decode() + if err != nil { + return err + } + + var before, after []byte + afterUnknown := cty.False + if changeV.Before != cty.NilVal { + before, err = ctyjson.Marshal(changeV.Before, changeV.Before.Type()) + if err != nil { + return err + } + } + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) + if err != nil { + return err + } + } else { + afterUnknown = cty.True + } + } + + a, _ := ctyjson.Marshal(afterUnknown, afterUnknown.Type()) + + c := change{ + Actions: actionString(oc.Action.String()), + Before: json.RawMessage(before), + After: json.RawMessage(after), + AfterUnknown: a, + } + + p.OutputChanges[oc.Addr.OutputValue.Name] = c + } + + return nil +} + +func (p *plan) marshalPlannedValues(changes *plans.Changes, schemas *terraform.Schemas) error { + // marshal the planned changes into a module + plan, err := marshalPlannedValues(changes, schemas) + if err != nil { + return err + } + p.PlannedValues.RootModule = plan + + // marshalPlannedOutputs + outputs, err := marshalPlannedOutputs(changes) + if err != nil { + return err + } + p.PlannedValues.Outputs = outputs + + return nil +} + +// omitUnknowns recursively walks the src cty.Value and returns a new cty.Value, +// omitting any unknowns. +// +// The result also normalizes some types: all sequence types are turned into +// tuple types and all mapping types are converted to object types, since we +// assume the result of this is just going to be serialized as JSON (and thus +// lose those distinctions) anyway. +func omitUnknowns(val cty.Value) cty.Value { + ty := val.Type() + switch { + case val.IsNull(): + return val + case !val.IsKnown(): + return cty.NilVal + case ty.IsPrimitiveType(): + return val + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + var vals []cty.Value + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + newVal := omitUnknowns(v) + if newVal != cty.NilVal { + vals = append(vals, newVal) + } else if newVal == cty.NilVal && ty.IsListType() { + // list length may be significant, so we will turn unknowns into nulls + vals = append(vals, cty.NullVal(v.Type())) + } + } + // We use tuple types always here, because the work we did above + // may have caused the individual elements to have different types, + // and we're doing this work to produce JSON anyway and JSON marshalling + // represents all of these sequence types as an array. + return cty.TupleVal(vals) + case ty.IsMapType() || ty.IsObjectType(): + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + newVal := omitUnknowns(v) + if newVal != cty.NilVal { + vals[k.AsString()] = newVal + } + } + // We use object types always here, because the work we did above + // may have caused the individual elements to have different types, + // and we're doing this work to produce JSON anyway and JSON marshalling + // represents both of these mapping types as an object. + return cty.ObjectVal(vals) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("omitUnknowns cannot handle %#v", val)) + } +} + +// recursively iterate through a cty.Value, replacing unknown values (including +// null) with cty.True and known values with cty.False. +// +// The result also normalizes some types: all sequence types are turned into +// tuple types and all mapping types are converted to object types, since we +// assume the result of this is just going to be serialized as JSON (and thus +// lose those distinctions) anyway. +func unknownAsBool(val cty.Value) cty.Value { + ty := val.Type() + switch { + case val.IsNull(): + return cty.False + case !val.IsKnown(): + if ty.IsPrimitiveType() || ty.Equals(cty.DynamicPseudoType) { + return cty.True + } + fallthrough + case ty.IsPrimitiveType(): + return cty.BoolVal(!val.IsKnown()) + case ty.IsListType() || ty.IsTupleType() || ty.IsSetType(): + length := val.LengthInt() + if length == 0 { + // If there are no elements then we can't have unknowns + return cty.EmptyTupleVal + } + vals := make([]cty.Value, 0, length) + it := val.ElementIterator() + for it.Next() { + _, v := it.Element() + vals = append(vals, unknownAsBool(v)) + } + // The above transform may have changed the types of some of the + // elements, so we'll always use a tuple here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these sequence types are + // indistinguishable in JSON. + return cty.TupleVal(vals) + case ty.IsMapType() || ty.IsObjectType(): + var length int + switch { + case ty.IsMapType(): + length = val.LengthInt() + default: + length = len(val.Type().AttributeTypes()) + } + if length == 0 { + // If there are no elements then we can't have unknowns + return cty.EmptyObjectVal + } + vals := make(map[string]cty.Value) + it := val.ElementIterator() + for it.Next() { + k, v := it.Element() + vAsBool := unknownAsBool(v) + if !vAsBool.RawEquals(cty.False) { // all of the "false"s for known values for more compact serialization + vals[k.AsString()] = unknownAsBool(v) + } + } + // The above transform may have changed the types of some of the + // elements, so we'll always use an object here in case we've now made + // different elements have different types. Our ultimate goal is to + // marshal to JSON anyway, and all of these mapping types are + // indistinguishable in JSON. + return cty.ObjectVal(vals) + default: + // Should never happen, since the above should cover all types + panic(fmt.Sprintf("unknownAsBool cannot handle %#v", val)) + } +} + +func actionString(action string) []string { + switch { + case action == "NoOp": + return []string{"no-op"} + case action == "Create": + return []string{"create"} + case action == "Delete": + return []string{"delete"} + case action == "Update": + return []string{"update"} + case action == "CreateThenDelete": + return []string{"create", "delete"} + case action == "Read": + return []string{"read"} + case action == "DeleteThenCreate": + return []string{"delete", "create"} + default: + return []string{action} + } +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonplan/resource.go b/vendor/github.com/hashicorp/terraform/command/jsonplan/resource.go new file mode 100644 index 00000000..97e4851a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonplan/resource.go @@ -0,0 +1,64 @@ +package jsonplan + +import ( + "github.com/hashicorp/terraform/addrs" +) + +// Resource is the representation of a resource in the json plan +type resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // Index is omitted for a resource not using `count` or `for_each` + Index addrs.InstanceKey `json:"index,omitempty"` + + // ProviderName allows the property "type" to be interpreted unambiguously + // in the unusual situation where a provider offers a resource type whose + // name does not start with its own name, such as the "googlebeta" provider + // offering "google_compute_instance". + ProviderName string `json:"provider_name,omitempty"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // AttributeValues is the JSON representation of the attribute values of the + // resource, whose structure depends on the resource type schema. Any + // unknown values are omitted or set to null, making them indistinguishable + // from absent values. + AttributeValues attributeValues `json:"values,omitempty"` +} + +// resourceChange is a description of an individual change action that Terraform +// plans to use to move from the prior state to a new state matching the +// configuration. +type resourceChange struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // ModuleAddress is the module portion of the above address. Omitted if the + // instance is in the root module. + ModuleAddress string `json:"module_address,omitempty"` + + // "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + Index addrs.InstanceKey `json:"index,omitempty"` + ProviderName string `json:"provider_name,omitempty"` + + // "deposed", if set, indicates that this action applies to a "deposed" + // object of the given instance rather than to its "current" object. Omitted + // for changes to the current object. + Deposed string `json:"deposed,omitempty"` + + // Change describes the change that will be made to this object + Change change `json:"change,omitempty"` +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonplan/values.go b/vendor/github.com/hashicorp/terraform/command/jsonplan/values.go new file mode 100644 index 00000000..a6e96d1d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonplan/values.go @@ -0,0 +1,234 @@ +package jsonplan + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/terraform" +) + +// stateValues is the common representation of resolved values for both the +// prior state (which is always complete) and the planned new state. +type stateValues struct { + Outputs map[string]output `json:"outputs,omitempty"` + RootModule module `json:"root_module,omitempty"` +} + +// attributeValues is the JSON representation of the attribute values of the +// resource, whose structure depends on the resource type schema. +type attributeValues map[string]interface{} + +func marshalAttributeValues(value cty.Value, schema *configschema.Block) attributeValues { + if value == cty.NilVal { + return nil + } + ret := make(attributeValues) + + it := value.ElementIterator() + for it.Next() { + k, v := it.Element() + vJSON, _ := ctyjson.Marshal(v, v.Type()) + ret[k.AsString()] = json.RawMessage(vJSON) + } + return ret +} + +// marshalPlannedOutputs takes a list of changes and returns a map of output +// values +func marshalPlannedOutputs(changes *plans.Changes) (map[string]output, error) { + if changes.Outputs == nil { + // No changes - we're done here! + return nil, nil + } + + ret := make(map[string]output) + + for _, oc := range changes.Outputs { + if oc.ChangeSrc.Action == plans.Delete { + continue + } + + var after []byte + changeV, err := oc.Decode() + if err != nil { + return ret, err + } + + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + after, err = ctyjson.Marshal(changeV.After, changeV.After.Type()) + if err != nil { + return ret, err + } + } + } + + ret[oc.Addr.OutputValue.Name] = output{ + Value: json.RawMessage(after), + Sensitive: oc.Sensitive, + } + } + + return ret, nil + +} + +func marshalPlannedValues(changes *plans.Changes, schemas *terraform.Schemas) (module, error) { + var ret module + + // build two maps: + // module name -> [resource addresses] + // module -> [children modules] + moduleResourceMap := make(map[string][]addrs.AbsResourceInstance) + moduleMap := make(map[string][]addrs.ModuleInstance) + seenModules := make(map[string]bool) + + for _, resource := range changes.Resources { + // if the resource is being deleted, skip over it. + if resource.Action != plans.Delete { + containingModule := resource.Addr.Module.String() + moduleResourceMap[containingModule] = append(moduleResourceMap[containingModule], resource.Addr) + + // root has no parents. + if containingModule != "" { + parent := resource.Addr.Module.Parent().String() + // we likely will see multiple resources in one module, so we + // only need to report the "parent" module for each child module + // once. + if !seenModules[containingModule] { + moduleMap[parent] = append(moduleMap[parent], resource.Addr.Module) + seenModules[containingModule] = true + } + } + } + } + + // start with the root module + resources, err := marshalPlanResources(changes, moduleResourceMap[""], schemas) + if err != nil { + return ret, err + } + ret.Resources = resources + + childModules, err := marshalPlanModules(changes, schemas, moduleMap[""], moduleMap, moduleResourceMap) + if err != nil { + return ret, err + } + sort.Slice(childModules, func(i, j int) bool { + return childModules[i].Address < childModules[j].Address + }) + + ret.ChildModules = childModules + + return ret, nil +} + +// marshalPlanResources +func marshalPlanResources(changes *plans.Changes, ris []addrs.AbsResourceInstance, schemas *terraform.Schemas) ([]resource, error) { + var ret []resource + + for _, ri := range ris { + r := changes.ResourceInstance(ri) + if r.Action == plans.Delete { + continue + } + + resource := resource{ + Address: r.Addr.String(), + Type: r.Addr.Resource.Resource.Type, + Name: r.Addr.Resource.Resource.Name, + ProviderName: r.ProviderAddr.ProviderConfig.StringCompact(), + Index: r.Addr.Resource.Key, + } + + switch r.Addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + resource.Mode = "managed" + case addrs.DataResourceMode: + resource.Mode = "data" + default: + return nil, fmt.Errorf("resource %s has an unsupported mode %s", + r.Addr.String(), + r.Addr.Resource.Resource.Mode.String(), + ) + } + + schema, schemaVer := schemas.ResourceTypeConfig( + r.ProviderAddr.ProviderConfig.Type, + r.Addr.Resource.Resource.Mode, + resource.Type, + ) + if schema == nil { + return nil, fmt.Errorf("no schema found for %s", r.Addr.String()) + } + resource.SchemaVersion = schemaVer + changeV, err := r.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + if changeV.After != cty.NilVal { + if changeV.After.IsWhollyKnown() { + resource.AttributeValues = marshalAttributeValues(changeV.After, schema) + } else { + knowns := omitUnknowns(changeV.After) + resource.AttributeValues = marshalAttributeValues(knowns, schema) + } + } + + ret = append(ret, resource) + } + + sort.Slice(ret, func(i, j int) bool { + return ret[i].Address < ret[j].Address + }) + + return ret, nil +} + +// marshalPlanModules iterates over a list of modules to recursively describe +// the full module tree. +func marshalPlanModules( + changes *plans.Changes, + schemas *terraform.Schemas, + childModules []addrs.ModuleInstance, + moduleMap map[string][]addrs.ModuleInstance, + moduleResourceMap map[string][]addrs.AbsResourceInstance, +) ([]module, error) { + + var ret []module + + for _, child := range childModules { + moduleResources := moduleResourceMap[child.String()] + // cm for child module, naming things is hard. + var cm module + // don't populate the address for the root module + if child.String() != "" { + cm.Address = child.String() + } + rs, err := marshalPlanResources(changes, moduleResources, schemas) + if err != nil { + return nil, err + } + cm.Resources = rs + + if len(moduleMap[child.String()]) > 0 { + moreChildModules, err := marshalPlanModules(changes, schemas, moduleMap[child.String()], moduleMap, moduleResourceMap) + if err != nil { + return nil, err + } + cm.ChildModules = moreChildModules + } + + ret = append(ret, cm) + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonprovider/attribute.go b/vendor/github.com/hashicorp/terraform/command/jsonprovider/attribute.go new file mode 100644 index 00000000..dbdd4590 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonprovider/attribute.go @@ -0,0 +1,31 @@ +package jsonprovider + +import ( + "encoding/json" + + "github.com/hashicorp/terraform/configs/configschema" +) + +type attribute struct { + AttributeType json.RawMessage `json:"type,omitempty"` + Description string `json:"description,omitempty"` + Required bool `json:"required,omitempty"` + Optional bool `json:"optional,omitempty"` + Computed bool `json:"computed,omitempty"` + Sensitive bool `json:"sensitive,omitempty"` +} + +func marshalAttribute(attr *configschema.Attribute) *attribute { + // we're not concerned about errors because at this point the schema has + // already been checked and re-checked. + attrTy, _ := attr.Type.MarshalJSON() + + return &attribute{ + AttributeType: attrTy, + Description: attr.Description, + Required: attr.Required, + Optional: attr.Optional, + Computed: attr.Computed, + Sensitive: attr.Sensitive, + } +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonprovider/block.go b/vendor/github.com/hashicorp/terraform/command/jsonprovider/block.go new file mode 100644 index 00000000..240724ca --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonprovider/block.go @@ -0,0 +1,69 @@ +package jsonprovider + +import ( + "github.com/hashicorp/terraform/configs/configschema" +) + +type block struct { + Attributes map[string]*attribute `json:"attributes,omitempty"` + BlockTypes map[string]*blockType `json:"block_types,omitempty"` +} + +type blockType struct { + NestingMode string `json:"nesting_mode,omitempty"` + Block *block `json:"block,omitempty"` + MinItems uint64 `json:"min_items,omitempty"` + MaxItems uint64 `json:"max_items,omitempty"` +} + +func marshalBlockTypes(nestedBlock *configschema.NestedBlock) *blockType { + if nestedBlock == nil { + return &blockType{} + } + ret := &blockType{ + Block: marshalBlock(&nestedBlock.Block), + MinItems: uint64(nestedBlock.MinItems), + MaxItems: uint64(nestedBlock.MaxItems), + } + + switch nestedBlock.Nesting { + case configschema.NestingSingle: + ret.NestingMode = "single" + case configschema.NestingGroup: + ret.NestingMode = "group" + case configschema.NestingList: + ret.NestingMode = "list" + case configschema.NestingSet: + ret.NestingMode = "set" + case configschema.NestingMap: + ret.NestingMode = "map" + default: + ret.NestingMode = "invalid" + } + return ret +} + +func marshalBlock(configBlock *configschema.Block) *block { + if configBlock == nil { + return &block{} + } + + var ret block + if len(configBlock.Attributes) > 0 { + attrs := make(map[string]*attribute, len(configBlock.Attributes)) + for k, attr := range configBlock.Attributes { + attrs[k] = marshalAttribute(attr) + } + ret.Attributes = attrs + } + + if len(configBlock.BlockTypes) > 0 { + blockTypes := make(map[string]*blockType, len(configBlock.BlockTypes)) + for k, bt := range configBlock.BlockTypes { + blockTypes[k] = marshalBlockTypes(bt) + } + ret.BlockTypes = blockTypes + } + + return &ret +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonprovider/doc.go b/vendor/github.com/hashicorp/terraform/command/jsonprovider/doc.go new file mode 100644 index 00000000..f7be0ade --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonprovider/doc.go @@ -0,0 +1,3 @@ +// Package jsonprovider contains types and functions to marshal terraform +// provider schemas into a json formatted output. +package jsonprovider diff --git a/vendor/github.com/hashicorp/terraform/command/jsonprovider/provider.go b/vendor/github.com/hashicorp/terraform/command/jsonprovider/provider.go new file mode 100644 index 00000000..5c45fd47 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonprovider/provider.go @@ -0,0 +1,70 @@ +package jsonprovider + +import ( + "encoding/json" + + "github.com/hashicorp/terraform/terraform" +) + +// FormatVersion represents the version of the json format and will be +// incremented for any change to this format that requires changes to a +// consuming parser. +const FormatVersion = "0.1" + +// providers is the top-level object returned when exporting provider schemas +type providers struct { + FormatVersion string `json:"format_version"` + Schemas map[string]*Provider `json:"provider_schemas,omitempty"` +} + +type Provider struct { + Provider *schema `json:"provider,omitempty"` + ResourceSchemas map[string]*schema `json:"resource_schemas,omitempty"` + DataSourceSchemas map[string]*schema `json:"data_source_schemas,omitempty"` +} + +func newProviders() *providers { + schemas := make(map[string]*Provider) + return &providers{ + FormatVersion: FormatVersion, + Schemas: schemas, + } +} + +func Marshal(s *terraform.Schemas) ([]byte, error) { + providers := newProviders() + + for k, v := range s.Providers { + providers.Schemas[k] = marshalProvider(v) + } + + ret, err := json.Marshal(providers) + return ret, err +} + +func marshalProvider(tps *terraform.ProviderSchema) *Provider { + if tps == nil { + return &Provider{} + } + + var ps *schema + var rs, ds map[string]*schema + + if tps.Provider != nil { + ps = marshalSchema(tps.Provider) + } + + if tps.ResourceTypes != nil { + rs = marshalSchemas(tps.ResourceTypes, tps.ResourceTypeSchemaVersions) + } + + if tps.DataSources != nil { + ds = marshalSchemas(tps.DataSources, tps.ResourceTypeSchemaVersions) + } + + return &Provider{ + Provider: ps, + ResourceSchemas: rs, + DataSourceSchemas: ds, + } +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonprovider/schema.go b/vendor/github.com/hashicorp/terraform/command/jsonprovider/schema.go new file mode 100644 index 00000000..dc9e30ce --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonprovider/schema.go @@ -0,0 +1,38 @@ +package jsonprovider + +import ( + "github.com/hashicorp/terraform/configs/configschema" +) + +type schema struct { + Version uint64 `json:"version"` + Block *block `json:"block,omitempty"` +} + +// marshalSchema is a convenience wrapper around mashalBlock. Schema version +// should be set by the caller. +func marshalSchema(block *configschema.Block) *schema { + if block == nil { + return &schema{} + } + + var ret schema + ret.Block = marshalBlock(block) + + return &ret +} + +func marshalSchemas(blocks map[string]*configschema.Block, rVersions map[string]uint64) map[string]*schema { + if blocks == nil { + return map[string]*schema{} + } + ret := make(map[string]*schema, len(blocks)) + for k, v := range blocks { + ret[k] = marshalSchema(v) + version, ok := rVersions[k] + if ok { + ret[k].Version = version + } + } + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/command/jsonstate/doc.go b/vendor/github.com/hashicorp/terraform/command/jsonstate/doc.go new file mode 100644 index 00000000..54773d09 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonstate/doc.go @@ -0,0 +1,3 @@ +// Package jsonstate implements methods for outputting a state in a +// machine-readable json format +package jsonstate diff --git a/vendor/github.com/hashicorp/terraform/command/jsonstate/state.go b/vendor/github.com/hashicorp/terraform/command/jsonstate/state.go new file mode 100644 index 00000000..68ed520a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/jsonstate/state.go @@ -0,0 +1,311 @@ +package jsonstate + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/terraform" +) + +// FormatVersion represents the version of the json format and will be +// incremented for any change to this format that requires changes to a +// consuming parser. +const FormatVersion = "0.1" + +// state is the top-level representation of the json format of a terraform +// state. +type state struct { + FormatVersion string `json:"format_version,omitempty"` + TerraformVersion string `json:"terraform_version,omitempty"` + Values *stateValues `json:"values,omitempty"` +} + +// stateValues is the common representation of resolved values for both the prior +// state (which is always complete) and the planned new state. +type stateValues struct { + Outputs map[string]output `json:"outputs,omitempty"` + RootModule module `json:"root_module,omitempty"` +} + +type output struct { + Sensitive bool `json:"sensitive"` + Value json.RawMessage `json:"value,omitempty"` +} + +// module is the representation of a module in state. This can be the root module +// or a child module +type module struct { + // Resources are sorted in a user-friendly order that is undefined at this + // time, but consistent. + Resources []resource `json:"resources,omitempty"` + + // Address is the absolute module address, omitted for the root module + Address string `json:"address,omitempty"` + + // Each module object can optionally have its own nested "child_modules", + // recursively describing the full module tree. + ChildModules []module `json:"child_modules,omitempty"` +} + +// Resource is the representation of a resource in the state. +type resource struct { + // Address is the absolute resource address + Address string `json:"address,omitempty"` + + // Mode can be "managed" or "data" + Mode string `json:"mode,omitempty"` + + Type string `json:"type,omitempty"` + Name string `json:"name,omitempty"` + + // Index is omitted for a resource not using `count` or `for_each`. + Index addrs.InstanceKey `json:"index,omitempty"` + + // ProviderName allows the property "type" to be interpreted unambiguously + // in the unusual situation where a provider offers a resource type whose + // name does not start with its own name, such as the "googlebeta" provider + // offering "google_compute_instance". + ProviderName string `json:"provider_name"` + + // SchemaVersion indicates which version of the resource type schema the + // "values" property conforms to. + SchemaVersion uint64 `json:"schema_version"` + + // AttributeValues is the JSON representation of the attribute values of the + // resource, whose structure depends on the resource type schema. Any + // unknown values are omitted or set to null, making them indistinguishable + // from absent values. + AttributeValues attributeValues `json:"values,omitempty"` + + // DependsOn contains a list of the resource's dependencies. The entries are + // addresses relative to the containing module. + DependsOn []string `json:"depends_on,omitempty"` + + // Tainted is true if the resource is tainted in terraform state. + Tainted bool `json:"tainted,omitempty"` +} + +// attributeValues is the JSON representation of the attribute values of the +// resource, whose structure depends on the resource type schema. +type attributeValues map[string]interface{} + +func marshalAttributeValues(value cty.Value, schema *configschema.Block) attributeValues { + if value == cty.NilVal { + return nil + } + ret := make(attributeValues) + + it := value.ElementIterator() + for it.Next() { + k, v := it.Element() + vJSON, _ := ctyjson.Marshal(v, v.Type()) + ret[k.AsString()] = json.RawMessage(vJSON) + } + return ret +} + +// newState() returns a minimally-initialized state +func newState() *state { + return &state{ + FormatVersion: FormatVersion, + } +} + +// Marshal returns the json encoding of a terraform state. +func Marshal(sf *statefile.File, schemas *terraform.Schemas) ([]byte, error) { + output := newState() + + if sf == nil || sf.State.Empty() { + ret, err := json.Marshal(output) + return ret, err + } + + if sf.TerraformVersion != nil { + output.TerraformVersion = sf.TerraformVersion.String() + } + + // output.StateValues + err := output.marshalStateValues(sf.State, schemas) + if err != nil { + return nil, err + } + + ret, err := json.Marshal(output) + return ret, err +} + +func (jsonstate *state) marshalStateValues(s *states.State, schemas *terraform.Schemas) error { + var sv stateValues + var err error + + // only marshal the root module outputs + sv.Outputs, err = marshalOutputs(s.RootModule().OutputValues) + if err != nil { + return err + } + + // use the state and module map to build up the module structure + sv.RootModule, err = marshalRootModule(s, schemas) + if err != nil { + return err + } + + jsonstate.Values = &sv + return nil +} + +func marshalOutputs(outputs map[string]*states.OutputValue) (map[string]output, error) { + if outputs == nil { + return nil, nil + } + + ret := make(map[string]output) + for k, v := range outputs { + ov, err := ctyjson.Marshal(v.Value, v.Value.Type()) + if err != nil { + return ret, err + } + ret[k] = output{ + Value: ov, + Sensitive: v.Sensitive, + } + } + + return ret, nil +} + +func marshalRootModule(s *states.State, schemas *terraform.Schemas) (module, error) { + var ret module + var err error + + ret.Address = "" + ret.Resources, err = marshalResources(s.RootModule().Resources, schemas) + if err != nil { + return ret, err + } + + // build a map of module -> [child module addresses] + moduleMap := make(map[string][]addrs.ModuleInstance) + for _, mod := range s.Modules { + if mod.Addr.IsRoot() { + continue + } else { + parent := mod.Addr.Parent().String() + moduleMap[parent] = append(moduleMap[parent], mod.Addr) + } + } + + // use the state and module map to build up the module structure + ret.ChildModules, err = marshalModules(s, schemas, moduleMap[""], moduleMap) + return ret, err +} + +// marshalModules is an ungainly recursive function to build a module +// structure out of a teraform state. +func marshalModules( + s *states.State, + schemas *terraform.Schemas, + modules []addrs.ModuleInstance, + moduleMap map[string][]addrs.ModuleInstance, +) ([]module, error) { + var ret []module + for _, child := range modules { + stateMod := s.Module(child) + // cm for child module, naming things is hard. + cm := module{Address: stateMod.Addr.String()} + rs, err := marshalResources(stateMod.Resources, schemas) + if err != nil { + return nil, err + } + cm.Resources = rs + if moduleMap[child.String()] != nil { + moreChildModules, err := marshalModules(s, schemas, moduleMap[child.String()], moduleMap) + if err != nil { + return nil, err + } + cm.ChildModules = moreChildModules + } + + ret = append(ret, cm) + } + + return ret, nil +} + +func marshalResources(resources map[string]*states.Resource, schemas *terraform.Schemas) ([]resource, error) { + var ret []resource + + for _, r := range resources { + for k, ri := range r.Instances { + + resource := resource{ + Address: r.Addr.String(), + Type: r.Addr.Type, + Name: r.Addr.Name, + ProviderName: r.ProviderConfig.ProviderConfig.StringCompact(), + } + + switch r.Addr.Mode { + case addrs.ManagedResourceMode: + resource.Mode = "managed" + case addrs.DataResourceMode: + resource.Mode = "data" + default: + return ret, fmt.Errorf("resource %s has an unsupported mode %s", + r.Addr.String(), + r.Addr.Mode.String(), + ) + } + + if r.EachMode != states.NoEach { + resource.Index = k + } + + schema, _ := schemas.ResourceTypeConfig( + r.ProviderConfig.ProviderConfig.Type, + r.Addr.Mode, + r.Addr.Type, + ) + resource.SchemaVersion = ri.Current.SchemaVersion + + if schema == nil { + return nil, fmt.Errorf("no schema found for %s", r.Addr.String()) + } + riObj, err := ri.Current.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + resource.AttributeValues = marshalAttributeValues(riObj.Value, schema) + + if len(riObj.Dependencies) > 0 { + dependencies := make([]string, len(riObj.Dependencies)) + for i, v := range riObj.Dependencies { + dependencies[i] = v.String() + } + resource.DependsOn = dependencies + } + + if riObj.Status == states.ObjectTainted { + resource.Tainted = true + } + + ret = append(ret, resource) + } + + } + + sort.Slice(ret, func(i, j int) bool { + return ret[i].Address < ret[j].Address + }) + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/terraform/command/meta.go b/vendor/github.com/hashicorp/terraform/command/meta.go index 0b9375f7..bb5f6d22 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta.go +++ b/vendor/github.com/hashicorp/terraform/command/meta.go @@ -3,6 +3,7 @@ package command import ( "bufio" "bytes" + "context" "errors" "flag" "fmt" @@ -15,13 +16,19 @@ import ( "strings" "time" - "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/backend/local" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs/configload" "github.com/hashicorp/terraform/helper/experiment" - "github.com/hashicorp/terraform/helper/variables" "github.com/hashicorp/terraform/helper/wrappedstreams" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/svchost/disco" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" "github.com/mitchellh/colorstring" ) @@ -32,35 +39,80 @@ type Meta struct { // command with a Meta field. These are expected to be set externally // (not from within the command itself). - Color bool // True if output should be colored - ContextOpts *terraform.ContextOpts // Opts copied to initialize - Ui cli.Ui // Ui for output + Color bool // True if output should be colored + GlobalPluginDirs []string // Additional paths to search for plugins + PluginOverrides *PluginOverrides // legacy overrides from .terraformrc file + Ui cli.Ui // Ui for output // ExtraHooks are extra hooks to add to the context. ExtraHooks []terraform.Hook + // Services provides access to remote endpoint information for + // "terraform-native' services running at a specific user-facing hostname. + Services *disco.Disco + + // RunningInAutomation indicates that commands are being run by an + // automated system rather than directly at a command prompt. + // + // This is a hint to various command routines that it may be confusing + // to print out messages that suggest running specific follow-up + // commands, since the user consuming the output will not be + // in a position to run such commands. + // + // The intended use-case of this flag is when Terraform is running in + // some sort of workflow orchestration tool which is abstracting away + // the specific commands being run. + RunningInAutomation bool + + // PluginCacheDir, if non-empty, enables caching of downloaded plugins + // into the given directory. + PluginCacheDir string + + // OverrideDataDir, if non-empty, overrides the return value of the + // DataDir method for situations where the local .terraform/ directory + // is not suitable, e.g. because of a read-only filesystem. + OverrideDataDir string + + // When this channel is closed, the command will be cancelled. + ShutdownCh <-chan struct{} + //---------------------------------------------------------- // Protected: commands can set these //---------------------------------------------------------- - // Modify the data directory location. Defaults to DefaultDataDir + // Modify the data directory location. This should be accessed through the + // DataDir method. dataDir string + // pluginPath is a user defined set of directories to look for plugins. + // This is set during init with the `-plugin-dir` flag, saved to a file in + // the data directory. + // This overrides all other search paths when discovering plugins. + pluginPath []string + + ignorePluginChecksum bool + + // Override certain behavior for tests within this package + testingOverrides *testingOverrides + //---------------------------------------------------------- // Private: do not set these //---------------------------------------------------------- + // configLoader is a shared configuration loader that is used by + // LoadConfig and other commands that access configuration files. + // It is initialized on first use. + configLoader *configload.Loader + // backendState is the currently active backend state backendState *terraform.BackendState // Variables for the context (private) - autoKey string - autoVariables map[string]interface{} - input bool - variables map[string]interface{} + variableArgs rawFlags + input bool // Targets for this context (private) - targets []string + targets []addrs.Targetable // Internal fields color bool @@ -76,7 +128,7 @@ type Meta struct { // // stateOutPath is used to override the output path for the state. // If not provided, the StatePath is used causing the old state to - // be overriden. + // be overridden. // // backupPath is used to backup the state file before writing a modified // version. It defaults to stateOutPath + DefaultBackupExtension @@ -84,8 +136,6 @@ type Meta struct { // parallelism is used to control the number of concurrent operations // allowed when walking the graph // - // shadow is used to enable/disable the shadow graph - // // provider is to specify specific resource providers // // stateLock is set to false to disable state locking @@ -101,12 +151,24 @@ type Meta struct { stateOutPath string backupPath string parallelism int - shadow bool provider string stateLock bool stateLockTimeout time.Duration forceInitCopy bool reconfigure bool + + // Used with the import command to allow import of state when no matching config exists. + allowMissingConfig bool +} + +type PluginOverrides struct { + Providers map[string]string + Provisioners map[string]string +} + +type testingOverrides struct { + ProviderResolver providers.Resolver + Provisioners map[string]provisioners.Factory } // initStatePaths is used to initialize the default values for @@ -138,13 +200,12 @@ func (m *Meta) Colorize() *colorstring.Colorize { } // DataDir returns the directory where local data will be stored. +// Defaults to DefaultDataDir in the current working directory. func (m *Meta) DataDir() string { - dataDir := DefaultDataDir - if m.dataDir != "" { - dataDir = m.dataDir + if m.OverrideDataDir != "" { + return m.OverrideDataDir } - - return dataDir + return DefaultDataDir } const ( @@ -195,61 +256,104 @@ func (m *Meta) StdinPiped() bool { return fi.Mode()&os.ModeNamedPipe != 0 } -// contextOpts returns the options to use to initialize a Terraform -// context with the settings from this Meta. -func (m *Meta) contextOpts() *terraform.ContextOpts { - var opts terraform.ContextOpts - if v := m.ContextOpts; v != nil { - opts = *v +// RunOperation executes the given operation on the given backend, blocking +// until that operation completes or is inteerrupted, and then returns +// the RunningOperation object representing the completed or +// aborted operation that is, despite the name, no longer running. +// +// An error is returned if the operation either fails to start or is cancelled. +// If the operation runs to completion then no error is returned even if the +// operation itself is unsuccessful. Use the "Result" field of the +// returned operation object to recognize operation-level failure. +func (m *Meta) RunOperation(b backend.Enhanced, opReq *backend.Operation) (*backend.RunningOperation, error) { + if opReq.ConfigDir != "" { + opReq.ConfigDir = m.normalizePath(opReq.ConfigDir) } - opts.Hooks = []terraform.Hook{m.uiHook(), &terraform.DebugHook{}} - if m.ContextOpts != nil { - opts.Hooks = append(opts.Hooks, m.ContextOpts.Hooks...) + op, err := b.Operation(context.Background(), opReq) + if err != nil { + return nil, fmt.Errorf("error starting operation: %s", err) } - opts.Hooks = append(opts.Hooks, m.ExtraHooks...) - vs := make(map[string]interface{}) - for k, v := range opts.Variables { - vs[k] = v - } - for k, v := range m.autoVariables { - vs[k] = v - } - for k, v := range m.variables { - vs[k] = v + // Wait for the operation to complete or an interrupt to occur + select { + case <-m.ShutdownCh: + // gracefully stop the operation + op.Stop() + + // Notify the user + m.Ui.Output(outputInterrupt) + + // Still get the result, since there is still one + select { + case <-m.ShutdownCh: + m.Ui.Error( + "Two interrupts received. Exiting immediately. Note that data\n" + + "loss may have occurred.") + + // cancel the operation completely + op.Cancel() + + // the operation should return asap + // but timeout just in case + select { + case <-op.Done(): + case <-time.After(5 * time.Second): + } + + return nil, errors.New("operation canceled") + + case <-op.Done(): + // operation completed after Stop + } + case <-op.Done(): + // operation completed normally } - opts.Variables = vs + + return op, nil +} + +const ( + ProviderSkipVerifyEnvVar = "TF_SKIP_PROVIDER_VERIFY" +) + +// contextOpts returns the options to use to initialize a Terraform +// context with the settings from this Meta. +func (m *Meta) contextOpts() *terraform.ContextOpts { + var opts terraform.ContextOpts + opts.Hooks = []terraform.Hook{m.uiHook()} + opts.Hooks = append(opts.Hooks, m.ExtraHooks...) opts.Targets = m.targets opts.UIInput = m.UIInput() opts.Parallelism = m.parallelism - opts.Shadow = m.shadow + + // If testingOverrides are set, we'll skip the plugin discovery process + // and just work with what we've been given, thus allowing the tests + // to provide mock providers and provisioners. + if m.testingOverrides != nil { + opts.ProviderResolver = m.testingOverrides.ProviderResolver + opts.Provisioners = m.testingOverrides.Provisioners + } else { + opts.ProviderResolver = m.providerResolver() + opts.Provisioners = m.provisionerFactories() + } + + opts.ProviderSHA256s = m.providerPluginsLock().Read() + if v := os.Getenv(ProviderSkipVerifyEnvVar); v != "" { + opts.SkipProviderVerify = true + } opts.Meta = &terraform.ContextMeta{ - Env: m.Env(), + Env: m.Workspace(), } return &opts } -// flags adds the meta flags to the given FlagSet. -func (m *Meta) flagSet(n string) *flag.FlagSet { +// defaultFlagSet creates a default flag set for commands. +func (m *Meta) defaultFlagSet(n string) *flag.FlagSet { f := flag.NewFlagSet(n, flag.ContinueOnError) - f.BoolVar(&m.input, "input", true, "input") - f.Var((*variables.Flag)(&m.variables), "var", "variables") - f.Var((*variables.FlagFile)(&m.variables), "var-file", "variable file") - f.Var((*FlagStringSlice)(&m.targets), "target", "resource to target") - - if m.autoKey != "" { - f.Var((*variables.FlagFile)(&m.autoVariables), m.autoKey, "variable file") - } - - // Advanced (don't need documentation, or unlikely to be set) - f.BoolVar(&m.shadow, "shadow", true, "shadow graph") - - // Experimental features - experiment.Flag(f) // Create an io.Writer that writes to our Ui properly for errors. // This is kind of a hack, but it does the job. Basically: create @@ -258,6 +362,13 @@ func (m *Meta) flagSet(n string) *flag.FlagSet { errR, errW := io.Pipe() errScanner := bufio.NewScanner(errR) go func() { + // This only needs to be alive long enough to write the help info if + // there is a flag error. Kill the scanner after a short duriation to + // prevent these from accumulating during tests, and cluttering up the + // stack traces. + time.AfterFunc(2*time.Second, func() { + errW.Close() + }) for errScanner.Scan() { m.Ui.Error(errScanner.Text()) } @@ -267,8 +378,30 @@ func (m *Meta) flagSet(n string) *flag.FlagSet { // Set the default Usage to empty f.Usage = func() {} - // command that bypass locking will supply their own flag on this var, but - // set the initial meta value to true as a failsafe. + return f +} + +// extendedFlagSet adds custom flags that are mostly used by commands +// that are used to run an operation like plan or apply. +func (m *Meta) extendedFlagSet(n string) *flag.FlagSet { + f := m.defaultFlagSet(n) + + f.BoolVar(&m.input, "input", true, "input") + f.Var((*FlagTargetSlice)(&m.targets), "target", "resource to target") + + if m.variableArgs.items == nil { + m.variableArgs = newRawFlags("-var") + } + varValues := m.variableArgs.Alias("-var") + varFiles := m.variableArgs.Alias("-var-file") + f.Var(varValues, "var", "variables") + f.Var(varFiles, "var-file", "variable file") + + // Experimental features + experiment.Flag(f) + + // commands that bypass locking will supply their own flag on this var, + // but set the initial meta value to true as a failsafe. m.stateLock = true return f @@ -276,21 +409,21 @@ func (m *Meta) flagSet(n string) *flag.FlagSet { // moduleStorage returns the module.Storage implementation used to store // modules for commands. -func (m *Meta) moduleStorage(root string) getter.Storage { - return &uiModuleStorage{ - Storage: &getter.FolderStorage{ - StorageDir: filepath.Join(root, "modules"), - }, - Ui: m.Ui, - } +func (m *Meta) moduleStorage(root string, mode module.GetMode) *module.Storage { + s := module.NewStorage(filepath.Join(root, "modules"), m.Services) + s.Ui = m.Ui + s.Mode = mode + return s } // process will process the meta-parameters out of the arguments. This // will potentially modify the args in-place. It will return the resulting // slice. // -// vars says whether or not we support variables. -func (m *Meta) process(args []string, vars bool) []string { +// vars is now ignored. It used to control whether to process variables, but +// that is no longer the responsibility of this function. (That happens +// instead in Meta.collectVariableValues.) +func (m *Meta) process(args []string, vars bool) ([]string, error) { // We do this so that we retain the ability to technically call // process multiple times, even if we have no plans to do so if m.oldUi != nil { @@ -319,28 +452,7 @@ func (m *Meta) process(args []string, vars bool) []string { }, } - // If we support vars and the default var file exists, add it to - // the args... - m.autoKey = "" - if vars { - if _, err := os.Stat(DefaultVarsFilename); err == nil { - m.autoKey = "var-file-default" - args = append(args, "", "") - copy(args[2:], args[0:]) - args[0] = "-" + m.autoKey - args[1] = DefaultVarsFilename - } - - if _, err := os.Stat(DefaultVarsFilename + ".json"); err == nil { - m.autoKey = "var-file-default" - args = append(args, "", "") - copy(args[2:], args[0:]) - args[0] = "-" + m.autoKey - args[1] = DefaultVarsFilename + ".json" - } - } - - return args + return args, nil } // uiHook returns the UiHook to use with the context. @@ -353,11 +465,12 @@ func (m *Meta) uiHook() *UiHook { // confirm asks a yes/no confirmation. func (m *Meta) confirm(opts *terraform.InputOpts) (bool, error) { - if !m.input { - return false, errors.New("input disabled") + if !m.Input() { + return false, errors.New("input is disabled") } - for { - v, err := m.UIInput().Input(opts) + + for i := 0; i < 2; i++ { + v, err := m.UIInput().Input(context.Background(), opts) if err != nil { return false, fmt.Errorf( "Error asking for confirmation: %s", err) @@ -370,23 +483,36 @@ func (m *Meta) confirm(opts *terraform.InputOpts) (bool, error) { return true, nil } } + return false, nil } -const ( - // ModuleDepthDefault is the default value for - // module depth, which can be overridden by flag - // or env var - ModuleDepthDefault = -1 - - // ModuleDepthEnvVar is the name of the environment variable that can be used to set module depth. - ModuleDepthEnvVar = "TF_MODULE_DEPTH" -) - -func (m *Meta) addModuleDepthFlag(flags *flag.FlagSet, moduleDepth *int) { - flags.IntVar(moduleDepth, "module-depth", ModuleDepthDefault, "module-depth") - if envVar := os.Getenv(ModuleDepthEnvVar); envVar != "" { - if md, err := strconv.Atoi(envVar); err == nil { - *moduleDepth = md +// showDiagnostics displays error and warning messages in the UI. +// +// "Diagnostics" here means the Diagnostics type from the tfdiag package, +// though as a convenience this function accepts anything that could be +// passed to the "Append" method on that type, converting it to Diagnostics +// before displaying it. +// +// Internally this function uses Diagnostics.Append, and so it will panic +// if given unsupported value types, just as Append does. +func (m *Meta) showDiagnostics(vals ...interface{}) { + var diags tfdiags.Diagnostics + diags = diags.Append(vals...) + diags.Sort() + + for _, diag := range diags { + // TODO: Actually measure the terminal width and pass it here. + // For now, we don't have easy access to the writer that + // ui.Error (etc) are writing to and thus can't interrogate + // to see if it's a terminal and what size it is. + msg := format.Diagnostic(diag, m.configSources(), m.Colorize(), 78) + switch diag.Severity() { + case tfdiags.Error: + m.Ui.Error(msg) + case tfdiags.Warning: + m.Ui.Warn(msg) + default: + m.Ui.Output(msg) } } } @@ -434,43 +560,60 @@ func (m *Meta) outputShadowError(err error, output bool) bool { return true } -// Env returns the name of the currently configured environment, corresponding +// WorkspaceNameEnvVar is the name of the environment variable that can be used +// to set the name of the Terraform workspace, overriding the workspace chosen +// by `terraform workspace select`. +// +// Note that this environment variable is ignored by `terraform workspace new` +// and `terraform workspace delete`. +const WorkspaceNameEnvVar = "TF_WORKSPACE" + +// Workspace returns the name of the currently configured workspace, corresponding // to the desired named state. -func (m *Meta) Env() string { - dataDir := m.dataDir - if m.dataDir == "" { - dataDir = DefaultDataDir +func (m *Meta) Workspace() string { + current, _ := m.WorkspaceOverridden() + return current +} + +// WorkspaceOverridden returns the name of the currently configured workspace, +// corresponding to the desired named state, as well as a bool saying whether +// this was set via the TF_WORKSPACE environment variable. +func (m *Meta) WorkspaceOverridden() (string, bool) { + if envVar := os.Getenv(WorkspaceNameEnvVar); envVar != "" { + return envVar, true } - envData, err := ioutil.ReadFile(filepath.Join(dataDir, local.DefaultEnvFile)) + envData, err := ioutil.ReadFile(filepath.Join(m.DataDir(), local.DefaultWorkspaceFile)) current := string(bytes.TrimSpace(envData)) if current == "" { current = backend.DefaultStateName } if err != nil && !os.IsNotExist(err) { - // always return the default if we can't get an environment name - log.Printf("[ERROR] failed to read current environment: %s", err) + // always return the default if we can't get a workspace name + log.Printf("[ERROR] failed to read current workspace: %s", err) } - return current + return current, false } -// SetEnv saves the named environment to the local filesystem. -func (m *Meta) SetEnv(name string) error { - dataDir := m.dataDir - if m.dataDir == "" { - dataDir = DefaultDataDir - } - - err := os.MkdirAll(dataDir, 0755) +// SetWorkspace saves the given name as the current workspace in the local +// filesystem. +func (m *Meta) SetWorkspace(name string) error { + err := os.MkdirAll(m.DataDir(), 0755) if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(dataDir, local.DefaultEnvFile), []byte(name), 0644) + err = ioutil.WriteFile(filepath.Join(m.DataDir(), local.DefaultWorkspaceFile), []byte(name), 0644) if err != nil { return err } return nil } + +// isAutoVarFile determines if the file ends with .auto.tfvars or .auto.tfvars.json +func isAutoVarFile(path string) bool { + return strings.HasSuffix(path, ".auto.tfvars") || + strings.HasSuffix(path, ".auto.tfvars.json") +} diff --git a/vendor/github.com/hashicorp/terraform/command/meta_backend.go b/vendor/github.com/hashicorp/terraform/command/meta_backend.go index b57999ce..c62be463 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta_backend.go +++ b/vendor/github.com/hashicorp/terraform/command/meta_backend.go @@ -5,46 +5,41 @@ package command import ( "context" + "encoding/json" "errors" "fmt" - "io/ioutil" "log" - "os" "path/filepath" + "strconv" "strings" "github.com/hashicorp/errwrap" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/state" "github.com/hashicorp/terraform/terraform" - "github.com/mitchellh/mapstructure" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" - backendinit "github.com/hashicorp/terraform/backend/init" - backendlocal "github.com/hashicorp/terraform/backend/local" + backendInit "github.com/hashicorp/terraform/backend/init" + backendLocal "github.com/hashicorp/terraform/backend/local" ) // BackendOpts are the options used to initialize a backend.Backend. type BackendOpts struct { - // ConfigPath is a path to a file or directory containing the backend - // configuration (declaration). - ConfigPath string + // Config is a representation of the backend configuration block given in + // the root module, or nil if no such block is present. + Config *configs.Backend - // ConfigFile is a path to a file that contains configuration that - // is merged directly into the backend configuration when loaded - // from a file. - ConfigFile string - - // ConfigExtra is extra configuration to merge into the backend - // configuration after the extra file above. - ConfigExtra map[string]interface{} - - // Plan is a plan that is being used. If this is set, the backend - // configuration and output configuration will come from this plan. - Plan *terraform.Plan + // ConfigOverride is an hcl.Body that, if non-nil, will be used with + // configs.MergeBodies to override the type-specific backend configuration + // arguments in Config. + ConfigOverride hcl.Body // Init should be set to true if initialization is allowed. If this is // false, then any configuration that requires configuration will show @@ -70,7 +65,13 @@ type BackendOpts struct { // and is unsafe to create multiple backends used at once. This function // can be called multiple times with each backend being "live" (usable) // one at a time. -func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, error) { +// +// A side-effect of this method is the population of m.backendState, recording +// the final resolved backend configuration after dealing with overrides from +// the "terraform init" command line, etc. +func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // If no opts are set, then initialize if opts == nil { opts = &BackendOpts{} @@ -80,51 +81,45 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, error) { // local operation. var b backend.Backend if !opts.ForceLocal { - var err error - - // If we have a plan then, we get the the backend from there. Otherwise, - // the backend comes from the configuration. - if opts.Plan != nil { - b, err = m.backendFromPlan(opts) - } else { - b, err = m.backendFromConfig(opts) - } - if err != nil { - return nil, err + var backendDiags tfdiags.Diagnostics + b, backendDiags = m.backendFromConfig(opts) + diags = diags.Append(backendDiags) + + if opts.Init && b != nil && !diags.HasErrors() { + // Its possible that the currently selected workspace doesn't exist, so + // we call selectWorkspace to ensure an existing workspace is selected. + if err := m.selectWorkspace(b); err != nil { + diags = diags.Append(err) + } } - log.Printf("[INFO] command: backend initialized: %T", b) - } + if diags.HasErrors() { + return nil, diags + } - // Setup the CLI opts we pass into backends that support it - cliOpts := &backend.CLIOpts{ - CLI: m.Ui, - CLIColor: m.Colorize(), - StatePath: m.statePath, - StateOutPath: m.stateOutPath, - StateBackupPath: m.backupPath, - ContextOpts: m.contextOpts(), - Input: m.Input(), + log.Printf("[TRACE] Meta.Backend: instantiated backend of type %T", b) } - // Don't validate if we have a plan. Validation is normally harmless here, - // but validation requires interpolation, and `file()` function calls may - // not have the original files in the current execution context. - cliOpts.Validation = opts.Plan == nil + // Setup the CLI opts we pass into backends that support it. + cliOpts := m.backendCLIOpts() + cliOpts.Validation = true // If the backend supports CLI initialization, do it. if cli, ok := b.(backend.CLI); ok { if err := cli.CLIInit(cliOpts); err != nil { - return nil, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Error initializing backend %T: %s\n\n"+ - "This is a bug, please report it to the backend developer", - b, err) + "This is a bug; please report it to the backend developer", + b, err, + )) + return nil, diags } } // If the result of loading the backend is an enhanced backend, // then return that as-is. This works even if b == nil (it will be !ok). if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.Backend: backend %T supports operations", b) return enhanced, nil } @@ -133,24 +128,180 @@ func (m *Meta) Backend(opts *BackendOpts) (backend.Enhanced, error) { // non-enhanced (if any) as the state backend. if !opts.ForceLocal { - log.Printf("[INFO] command: backend %T is not enhanced, wrapping in local", b) + log.Printf("[TRACE] Meta.Backend: backend %T does not support operations, so wrapping it in a local backend", b) } // Build the local backend - local := &backendlocal.Local{Backend: b} + local := backendLocal.NewWithBackend(b) if err := local.CLIInit(cliOpts); err != nil { // Local backend isn't allowed to fail. It would be a bug. panic(err) } + // If we got here from backendFromConfig returning nil then m.backendState + // won't be set, since that codepath considers that to be no backend at all, + // but our caller considers that to be the local backend with no config + // and so we'll synthesize a backend state so other code doesn't need to + // care about this special case. + // + // FIXME: We should refactor this so that we more directly and explicitly + // treat the local backend as the default, including in the UI shown to + // the user, since the local backend should only be used when learning or + // in exceptional cases and so it's better to help the user learn that + // by introducing it as a concept. + if m.backendState == nil { + // NOTE: This synthetic object is intentionally _not_ retained in the + // on-disk record of the backend configuration, which was already dealt + // with inside backendFromConfig, because we still need that codepath + // to be able to recognize the lack of a config as distinct from + // explicitly setting local until we do some more refactoring here. + m.backendState = &terraform.BackendState{ + Type: "local", + ConfigRaw: json.RawMessage("{}"), + } + } + return local, nil } +// selectWorkspace gets a list of existing workspaces and then checks +// if the currently selected workspace is valid. If not, it will ask +// the user to select a workspace from the list. +func (m *Meta) selectWorkspace(b backend.Backend) error { + workspaces, err := b.Workspaces() + if err == backend.ErrWorkspacesNotSupported { + return nil + } + if err != nil { + return fmt.Errorf("Failed to get existing workspaces: %s", err) + } + if len(workspaces) == 0 { + return fmt.Errorf(strings.TrimSpace(errBackendNoExistingWorkspaces)) + } + + // Get the currently selected workspace. + workspace := m.Workspace() + + // Check if any of the existing workspaces matches the selected + // workspace and create a numbered list of existing workspaces. + var list strings.Builder + for i, w := range workspaces { + if w == workspace { + return nil + } + fmt.Fprintf(&list, "%d. %s\n", i+1, w) + } + + // If the selected workspace doesn't exist, ask the user to select + // a workspace from the list of existing workspaces. + v, err := m.UIInput().Input(context.Background(), &terraform.InputOpts{ + Id: "select-workspace", + Query: fmt.Sprintf( + "\n[reset][bold][yellow]The currently selected workspace (%s) does not exist.[reset]", + workspace), + Description: fmt.Sprintf( + strings.TrimSpace(inputBackendSelectWorkspace), list.String()), + }) + if err != nil { + return fmt.Errorf("Failed to select workspace: %s", err) + } + + idx, err := strconv.Atoi(v) + if err != nil || (idx < 1 || idx > len(workspaces)) { + return fmt.Errorf("Failed to select workspace: input not a valid number") + } + + return m.SetWorkspace(workspaces[idx-1]) +} + +// BackendForPlan is similar to Backend, but uses backend settings that were +// stored in a plan. +// +// The current workspace name is also stored as part of the plan, and so this +// method will check that it matches the currently-selected workspace name +// and produce error diagnostics if not. +func (m *Meta) BackendForPlan(settings plans.Backend) (backend.Enhanced, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + f := backendInit.Backend(settings.Type) + if f == nil { + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), settings.Type)) + return nil, diags + } + b := f() + log.Printf("[TRACE] Meta.BackendForPlan: instantiated backend of type %T", b) + + schema := b.ConfigSchema() + configVal, err := settings.Config.Decode(schema.ImpliedType()) + if err != nil { + diags = diags.Append(errwrap.Wrapf("saved backend configuration is invalid: {{err}}", err)) + return nil, diags + } + + newVal, validateDiags := b.PrepareConfig(configVal) + diags = diags.Append(validateDiags) + if validateDiags.HasErrors() { + return nil, diags + } + + configureDiags := b.Configure(newVal) + diags = diags.Append(configureDiags) + + // If the backend supports CLI initialization, do it. + if cli, ok := b.(backend.CLI); ok { + cliOpts := m.backendCLIOpts() + if err := cli.CLIInit(cliOpts); err != nil { + diags = diags.Append(fmt.Errorf( + "Error initializing backend %T: %s\n\n"+ + "This is a bug; please report it to the backend developer", + b, err, + )) + return nil, diags + } + } + + // If the result of loading the backend is an enhanced backend, + // then return that as-is. This works even if b == nil (it will be !ok). + if enhanced, ok := b.(backend.Enhanced); ok { + log.Printf("[TRACE] Meta.BackendForPlan: backend %T supports operations", b) + return enhanced, nil + } + + // Otherwise, we'll wrap our state-only remote backend in the local backend + // to cause any operations to be run locally. + log.Printf("[TRACE] Meta.Backend: backend %T does not support operations, so wrapping it in a local backend", b) + cliOpts := m.backendCLIOpts() + cliOpts.Validation = false // don't validate here in case config contains file(...) calls where the file doesn't exist + local := backendLocal.NewWithBackend(b) + if err := local.CLIInit(cliOpts); err != nil { + // Local backend should never fail, so this is always a bug. + panic(err) + } + + return local, diags +} + +// backendCLIOpts returns a backend.CLIOpts object that should be passed to +// a backend that supports local CLI operations. +func (m *Meta) backendCLIOpts() *backend.CLIOpts { + return &backend.CLIOpts{ + CLI: m.Ui, + CLIColor: m.Colorize(), + ShowDiagnostics: m.showDiagnostics, + StatePath: m.statePath, + StateOutPath: m.stateOutPath, + StateBackupPath: m.backupPath, + ContextOpts: m.contextOpts(), + Input: m.Input(), + RunningInAutomation: m.RunningInAutomation, + } +} + // IsLocalBackend returns true if the backend is a local backend. We use this // for some checks that require a remote backend. func (m *Meta) IsLocalBackend(b backend.Backend) bool { // Is it a local backend? - bLocal, ok := b.(*backendlocal.Local) + bLocal, ok := b.(*backendLocal.Local) // If it is, does it not have an alternate state backend? if ok { @@ -165,176 +316,123 @@ func (m *Meta) IsLocalBackend(b backend.Backend) bool { // This prepares the operation. After calling this, the caller is expected // to modify fields of the operation such as Sequence to specify what will // be called. -func (m *Meta) Operation() *backend.Operation { +func (m *Meta) Operation(b backend.Backend) *backend.Operation { + schema := b.ConfigSchema() + workspace := m.Workspace() + planOutBackend, err := m.backendState.ForPlan(schema, workspace) + if err != nil { + // Always indicates an implementation error in practice, because + // errors here indicate invalid encoding of the backend configuration + // in memory, and we should always have validated that by the time + // we get here. + panic(fmt.Sprintf("failed to encode backend configuration for plan: %s", err)) + } + return &backend.Operation{ - PlanOutBackend: m.backendState, + PlanOutBackend: planOutBackend, + Parallelism: m.parallelism, Targets: m.targets, UIIn: m.UIInput(), - Environment: m.Env(), + UIOut: m.Ui, + Workspace: workspace, LockState: m.stateLock, StateLockTimeout: m.stateLockTimeout, } } // backendConfig returns the local configuration for the backend -func (m *Meta) backendConfig(opts *BackendOpts) (*config.Backend, error) { - // If no explicit path was given then it is okay for there to be - // no backend configuration found. - emptyOk := opts.ConfigPath == "" - - // Determine the path to the configuration. - path := opts.ConfigPath +func (m *Meta) backendConfig(opts *BackendOpts) (*configs.Backend, int, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics - // If we had no path set, it is an error. We can't initialize unset - if path == "" { - path = "." - } - - // Expand the path - if !filepath.IsAbs(path) { - var err error - path, err = filepath.Abs(path) + if opts.Config == nil { + // check if the config was missing, or just not required + conf, err := m.loadBackendConfig(".") if err != nil { - return nil, fmt.Errorf( - "Error expanding path to backend config %q: %s", path, err) - } - } - - log.Printf("[DEBUG] command: loading backend config file: %s", path) - - // We first need to determine if we're loading a file or a directory. - fi, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) && emptyOk { - log.Printf( - "[INFO] command: backend config not found, returning nil: %s", - path) - return nil, nil - } - - return nil, err - } - - var f func(string) (*config.Config, error) = config.LoadFile - if fi.IsDir() { - f = config.LoadDir - } - - // Load the configuration - c, err := f(path) - if err != nil { - // Check for the error where we have no config files and return nil - // as the configuration type. - if errwrap.ContainsType(err, new(config.ErrNoConfigsFound)) { - log.Printf( - "[INFO] command: backend config not found, returning nil: %s", - path) - return nil, nil + return nil, 0, err } - return nil, err - } - - // If there is no Terraform configuration block, no backend config - if c.Terraform == nil { - return nil, nil - } - - // Get the configuration for the backend itself. - backend := c.Terraform.Backend - if backend == nil { - return nil, nil - } - - // If we have a config file set, load that and merge. - if opts.ConfigFile != "" { - log.Printf( - "[DEBUG] command: loading extra backend config from: %s", - opts.ConfigFile) - rc, err := m.backendConfigFile(opts.ConfigFile) - if err != nil { - return nil, fmt.Errorf( - "Error loading extra configuration file for backend: %s", err) + if conf == nil { + log.Println("[TRACE] Meta.Backend: no config given or present on disk, so returning nil config") + return nil, 0, nil } - // Merge in the configuration - backend.RawConfig = backend.RawConfig.Merge(rc) + log.Printf("[TRACE] Meta.Backend: BackendOpts.Config not set, so using settings loaded from %s", conf.DeclRange) + opts.Config = conf } - // If we have extra config values, merge that - if len(opts.ConfigExtra) > 0 { - log.Printf( - "[DEBUG] command: adding extra backend config from CLI") - rc, err := config.NewRawConfig(opts.ConfigExtra) - if err != nil { - return nil, fmt.Errorf( - "Error adding extra configuration file for backend: %s", err) - } + c := opts.Config - // Merge in the configuration - backend.RawConfig = backend.RawConfig.Merge(rc) + if c == nil { + log.Println("[TRACE] Meta.Backend: no explicit backend config, so returning nil config") + return nil, 0, nil } - // Validate the backend early. We have to do this before the normal - // config validation pass since backend loading happens earlier. - if errs := backend.Validate(); len(errs) > 0 { - return nil, multierror.Append(nil, errs...) + bf := backendInit.Backend(c.Type) + if bf == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid backend type", + Detail: fmt.Sprintf("There is no backend type named %q.", c.Type), + Subject: &c.TypeRange, + }) + return nil, 0, diags } + b := bf() - // Return the configuration which may or may not be set - return backend, nil -} + configSchema := b.ConfigSchema() + configBody := c.Config + configHash := c.Hash(configSchema) -// backendConfigFile loads the extra configuration to merge with the -// backend configuration from an extra file if specified by -// BackendOpts.ConfigFile. -func (m *Meta) backendConfigFile(path string) (*config.RawConfig, error) { - // Read the file - d, err := ioutil.ReadFile(path) - if err != nil { - return nil, err + // If we have an override configuration body then we must apply it now. + if opts.ConfigOverride != nil { + log.Println("[TRACE] Meta.Backend: merging -backend-config=... CLI overrides into backend configuration") + configBody = configs.MergeBodies(configBody, opts.ConfigOverride) } - // Parse it - hclRoot, err := hcl.Parse(string(d)) - if err != nil { - return nil, err - } + log.Printf("[TRACE] Meta.Backend: built configuration for %q backend with hash value %d", c.Type, configHash) - // Decode it - var c map[string]interface{} - if err := hcl.DecodeObject(&c, hclRoot); err != nil { - return nil, err - } - - return config.NewRawConfig(c) + // We'll shallow-copy configs.Backend here so that we can replace the + // body without affecting others that hold this reference. + configCopy := *c + configCopy.Config = configBody + return &configCopy, configHash, diags } // backendFromConfig returns the initialized (not configured) backend // directly from the config/state.. // -// This function handles any edge cases around backend config loading. For -// example: legacy remote state, new config changes, backend type changes, -// etc. +// This function handles various edge cases around backend config loading. For +// example: new config changes, backend type changes, etc. +// +// As of the 0.12 release it can no longer migrate from legacy remote state +// to backends, and will instead instruct users to use 0.11 or earlier as +// a stepping-stone to do that migration. // // This function may query the user for input unless input is disabled, in // which case this function will error. -func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, error) { +func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, tfdiags.Diagnostics) { // Get the local backend configuration. - c, err := m.backendConfig(opts) - if err != nil { - return nil, fmt.Errorf("Error loading backend config: %s", err) + c, cHash, diags := m.backendConfig(opts) + if diags.HasErrors() { + return nil, diags } - // cHash defaults to zero unless c is set - var cHash uint64 - if c != nil { - // We need to rehash to get the value since we may have merged the - // config with an extra ConfigFile. We don't do this when merging - // because we do want the ORIGINAL value on c so that we store - // that to not detect drift. This is covered in tests. - cHash = c.Rehash() - } + // ------------------------------------------------------------------------ + // For historical reasons, current backend configuration for a working + // directory is kept in a *state-like* file, using the legacy state + // structures in the Terraform package. It is not actually a Terraform + // state, and so only the "backend" portion of it is actually used. + // + // The remainder of this code often confusingly refers to this as a "state", + // so it's unfortunately important to remember that this is not actually + // what we _usually_ think of as "state", and is instead a local working + // directory "backend configuration state" that is never persisted anywhere. + // + // Since the "real" state has since moved on to be represented by + // states.State, we can recognize the special meaning of state that applies + // to this function and its callees by their continued use of the + // otherwise-obsolete terraform.State. + // ------------------------------------------------------------------------ // Get the path to where we store a local cache of backend configuration // if we're using a remote backend. This may not yet exist which means @@ -342,14 +440,19 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, error) { statePath := filepath.Join(m.DataDir(), DefaultStateFilename) sMgr := &state.LocalState{Path: statePath} if err := sMgr.RefreshState(); err != nil { - return nil, fmt.Errorf("Error loading state: %s", err) + diags = diags.Append(fmt.Errorf("Failed to load state: %s", err)) + return nil, diags } // Load the state, it must be non-nil for the tests below but can be empty s := sMgr.State() if s == nil { - log.Printf("[DEBUG] command: no data state file found for backend config") + log.Printf("[TRACE] Meta.Backend: backend has not previously been initialized in this working directory") s = terraform.NewState() + } else if s.Backend != nil { + log.Printf("[TRACE] Meta.Backend: working directory was previously initialized for %q backend", s.Backend.Type) + } else { + log.Printf("[TRACE] Meta.Backend: working directory was previously initialized but has no backend (is using legacy remote state?)") } // if we want to force reconfiguration of the backend, we set the backend @@ -368,326 +471,94 @@ func (m *Meta) backendFromConfig(opts *BackendOpts) (backend.Backend, error) { } }() - // This giant switch statement covers all eight possible combinations - // of state settings between: configuring new backends, saved (previously- - // configured) backends, and legacy remote state. + if !s.Remote.Empty() { + // Legacy remote state is no longer supported. User must first + // migrate with Terraform 0.11 or earlier. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Legacy remote state not supported", + "This working directory is configured for legacy remote state, which is no longer supported from Terraform v0.12 onwards. To migrate this environment, first run \"terraform init\" under a Terraform 0.11 release, and then upgrade Terraform again.", + )) + return nil, diags + } + + // This switch statement covers all the different combinations of + // configuring new backends, updating previously-configured backends, etc. switch { // No configuration set at all. Pure local state. - case c == nil && s.Remote.Empty() && s.Backend.Empty(): + case c == nil && s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: using default local state only (no backend configuration, and no existing initialized backend)") return nil, nil // We're unsetting a backend (moving from backend => local) - case c == nil && s.Remote.Empty() && !s.Backend.Empty(): + case c == nil && !s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: previously-initialized %q backend is no longer present in config", s.Backend.Type) if !opts.Init { initReason := fmt.Sprintf( "Unsetting the previously set backend %q", s.Backend.Type) m.backendInitRequired(initReason) - return nil, errBackendInitRequired + diags = diags.Append(errBackendInitRequired) + return nil, diags } - return m.backend_c_r_S(c, sMgr, true) - - // We have a legacy remote state configuration but no new backend config - case c == nil && !s.Remote.Empty() && s.Backend.Empty(): - return m.backend_c_R_s(c, sMgr) - - // We have a legacy remote state configuration simultaneously with a - // saved backend configuration while at the same time disabling backend - // configuration. - // - // This is a naturally impossible case: Terraform will never put you - // in this state, though it is theoretically possible through manual edits - case c == nil && !s.Remote.Empty() && !s.Backend.Empty(): - if !opts.Init { - initReason := fmt.Sprintf( - "Unsetting the previously set backend %q", - s.Backend.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - return m.backend_c_R_S(c, sMgr) + return m.backend_c_r_S(c, cHash, sMgr, true) // Configuring a backend for the first time. - case c != nil && s.Remote.Empty() && s.Backend.Empty(): + case c != nil && s.Backend.Empty(): + log.Printf("[TRACE] Meta.Backend: moving from default local state only to %q backend", c.Type) if !opts.Init { initReason := fmt.Sprintf( "Initial configuration of the requested backend %q", c.Type) m.backendInitRequired(initReason) - return nil, errBackendInitRequired + diags = diags.Append(errBackendInitRequired) + return nil, diags } - return m.backend_C_r_s(c, sMgr) + return m.backend_C_r_s(c, cHash, sMgr) // Potentially changing a backend configuration - case c != nil && s.Remote.Empty() && !s.Backend.Empty(): + case c != nil && !s.Backend.Empty(): + // We are not going to migrate if were not initializing and the hashes + // match indicating that the stored config is valid. If we are + // initializing, then we also assume the the backend config is OK if + // the hashes match, as long as we're not providing any new overrides. + if (uint64(cHash) == s.Backend.Hash) && (!opts.Init || opts.ConfigOverride == nil) { + log.Printf("[TRACE] Meta.Backend: using already-initialized, unchanged %q backend configuration", c.Type) + return m.backend_C_r_S_unchanged(c, cHash, sMgr) + } + // If our configuration is the same, then we're just initializing // a previously configured remote backend. - if !s.Backend.Empty() { - hash := s.Backend.Hash - // on init we need an updated hash containing any extra options - // that were added after merging. - if opts.Init { - hash = s.Backend.Rehash() - } - if hash == cHash { - return m.backend_C_r_S_unchanged(c, sMgr) - } + if !m.backendConfigNeedsMigration(c, s.Backend) { + log.Printf("[TRACE] Meta.Backend: using already-initialized %q backend configuration", c.Type) + return m.backend_C_r_S_unchanged(c, cHash, sMgr) } + log.Printf("[TRACE] Meta.Backend: backend configuration has changed (from type %q to type %q)", s.Backend.Type, c.Type) if !opts.Init { initReason := fmt.Sprintf( "Backend configuration changed for %q", c.Type) m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - log.Printf( - "[WARN] command: backend config change! saved: %d, new: %d", - s.Backend.Hash, cHash) - return m.backend_C_r_S_changed(c, sMgr, true) - - // Configuring a backend for the first time while having legacy - // remote state. This is very possible if a Terraform user configures - // a backend prior to ever running Terraform on an old state. - case c != nil && !s.Remote.Empty() && s.Backend.Empty(): - if !opts.Init { - initReason := fmt.Sprintf( - "Initial configuration for backend %q", - c.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - return m.backend_C_R_s(c, sMgr) - - // Configuring a backend with both a legacy remote state set - // and a pre-existing backend saved. - case c != nil && !s.Remote.Empty() && !s.Backend.Empty(): - // If the hashes are the same, we have a legacy remote state with - // an unchanged stored backend state. - hash := s.Backend.Hash - if opts.Init { - hash = s.Backend.Rehash() - } - if hash == cHash { - if !opts.Init { - initReason := fmt.Sprintf( - "Legacy remote state found with configured backend %q", - c.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } - - return m.backend_C_R_S_unchanged(c, sMgr, true) + diags = diags.Append(errBackendInitRequired) + return nil, diags } - if !opts.Init { - initReason := fmt.Sprintf( - "Reconfiguring the backend %q", - c.Type) - m.backendInitRequired(initReason) - return nil, errBackendInitRequired - } + log.Printf("[WARN] backend config has changed since last init") + return m.backend_C_r_S_changed(c, cHash, sMgr, true) - // We have change in all three - return m.backend_C_R_S_changed(c, sMgr) default: - // This should be impossible since all state possibilties are - // tested above, but we need a default case anyways and we should - // protect against the scenario where a case is somehow removed. - return nil, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Unhandled backend configuration state. This is a bug. Please\n"+ "report this error with the following information.\n\n"+ "Config Nil: %v\n"+ - "Saved Backend Empty: %v\n"+ - "Legacy Remote Empty: %v\n", - c == nil, s.Backend.Empty(), s.Remote.Empty()) - } -} - -// backendFromPlan loads the backend from a given plan file. -func (m *Meta) backendFromPlan(opts *BackendOpts) (backend.Backend, error) { - // Precondition check - if opts.Plan == nil { - panic("plan should not be nil") - } - - // We currently don't allow "-state" to be specified. - if m.statePath != "" { - return nil, fmt.Errorf( - "State path cannot be specified with a plan file. The plan itself contains\n" + - "the state to use. If you wish to change that, please create a new plan\n" + - "and specify the state path when creating the plan.") - } - - planBackend := opts.Plan.Backend - planState := opts.Plan.State - if planState == nil { - // The state can be nil, we just have to make it empty for the logic - // in this function. - planState = terraform.NewState() - } - - // Validation only for non-local plans - local := planState.Remote.Empty() && planBackend.Empty() - if !local { - // We currently don't allow "-state-out" to be specified. - if m.stateOutPath != "" { - return nil, fmt.Errorf(strings.TrimSpace(errBackendPlanStateFlag)) - } - } - - /* - // Determine the path where we'd be writing state - path := DefaultStateFilename - if !planState.Remote.Empty() || !planBackend.Empty() { - path = filepath.Join(m.DataDir(), DefaultStateFilename) - } - - // If the path exists, then we need to verify we're writing the same - // state lineage. If the path doesn't exist that's okay. - _, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { - return nil, fmt.Errorf("Error checking state destination: %s", err) - } - if err == nil { - // The file exists, we need to read it and compare - if err := m.backendFromPlan_compareStates(state, path); err != nil { - return nil, err - } - } - */ - - // If we have a stateOutPath, we must also specify it as the - // input path so we can check it properly. We restore it after this - // function exits. - original := m.statePath - m.statePath = m.stateOutPath - defer func() { m.statePath = original }() - - var b backend.Backend - var err error - switch { - // No remote state at all, all local - case planState.Remote.Empty() && planBackend.Empty(): - log.Printf("[INFO] command: initializing local backend from plan (not set)") - - // Get the local backend - b, err = m.Backend(&BackendOpts{ForceLocal: true}) - - // New backend configuration set - case planState.Remote.Empty() && !planBackend.Empty(): - log.Printf( - "[INFO] command: initializing backend from plan: %s", - planBackend.Type) - - b, err = m.backendInitFromSaved(planBackend) - - // Legacy remote state set - case !planState.Remote.Empty() && planBackend.Empty(): - log.Printf( - "[INFO] command: initializing legacy remote backend from plan: %s", - planState.Remote.Type) - - // Write our current state to an inmemory state just so that we - // have it in the format of state.State - inmem := &state.InmemState{} - inmem.WriteState(planState) - - // Get the backend through the normal means of legacy state - b, err = m.backend_c_R_s(nil, inmem) - - // Both set, this can't happen in a plan. - case !planState.Remote.Empty() && !planBackend.Empty(): - return nil, fmt.Errorf(strings.TrimSpace(errBackendPlanBoth)) - } - - // If we had an error, return that - if err != nil { - return nil, err - } - - env := m.Env() - - // Get the state so we can determine the effect of using this plan - realMgr, err := b.State(env) - if err != nil { - return nil, fmt.Errorf("Error reading state: %s", err) - } - - if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "backend from plan" - - lockID, err := clistate.Lock(lockCtx, realMgr, lockInfo, m.Ui, m.Colorize()) - if err != nil { - return nil, fmt.Errorf("Error locking state: %s", err) - } - defer clistate.Unlock(realMgr, lockID, m.Ui, m.Colorize()) - } - - if err := realMgr.RefreshState(); err != nil { - return nil, fmt.Errorf("Error reading state: %s", err) - } - real := realMgr.State() - if real != nil { - // If they're not the same lineage, don't allow this - if !real.SameLineage(planState) { - return nil, fmt.Errorf(strings.TrimSpace(errBackendPlanLineageDiff)) - } - - // Compare ages - comp, err := real.CompareAges(planState) - if err != nil { - return nil, fmt.Errorf("Error comparing state ages for safety: %s", err) - } - switch comp { - case terraform.StateAgeEqual: - // State ages are equal, this is perfect - - case terraform.StateAgeReceiverOlder: - // Real state is somehow older, this is okay. - - case terraform.StateAgeReceiverNewer: - // If we have an older serial it is a problem but if we have a - // differing serial but are still identical, just let it through. - if real.Equal(planState) { - log.Printf( - "[WARN] command: state in plan has older serial, but Equal is true") - break - } - - // The real state is newer, this is not allowed. - return nil, fmt.Errorf( - strings.TrimSpace(errBackendPlanOlder), - planState.Serial, real.Serial) - } - } - - // Write the state - newState := opts.Plan.State.DeepCopy() - if newState != nil { - newState.Remote = nil - newState.Backend = nil - } - - // realMgr locked above - if err := realMgr.WriteState(newState); err != nil { - return nil, fmt.Errorf("Error writing state: %s", err) + "Saved Backend Empty: %v\n", + c == nil, s.Backend.Empty(), + )) + return nil, diags } - if err := realMgr.PersistState(); err != nil { - return nil, fmt.Errorf("Error writing state: %s", err) - } - - return b, nil } //------------------------------------------------------------------- @@ -708,63 +579,48 @@ func (m *Meta) backendFromPlan(opts *BackendOpts) (backend.Backend, error) { //------------------------------------------------------------------- // Unconfiguring a backend (moving from backend => local). -func (m *Meta) backend_c_r_S( - c *config.Backend, sMgr state.State, output bool) (backend.Backend, error) { +func (m *Meta) backend_c_r_S(c *configs.Backend, cHash int, sMgr *state.LocalState, output bool) (backend.Backend, tfdiags.Diagnostics) { s := sMgr.State() // Get the backend type for output backendType := s.Backend.Type - copy := m.forceInitCopy - if !copy { - var err error - // Confirm with the user that the copy should occur - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-local", - Query: fmt.Sprintf("Do you want to copy the state from %q?", s.Backend.Type), - Description: fmt.Sprintf( - strings.TrimSpace(inputBackendMigrateLocal), s.Backend.Type), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } + m.Ui.Output(fmt.Sprintf(strings.TrimSpace(outputBackendMigrateLocal), s.Backend.Type)) - // If we're copying, perform the migration - if copy { - // Grab a purely local backend to get the local state if it exists - localB, err := m.Backend(&BackendOpts{ForceLocal: true}) - if err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendLocalRead), err) - } + // Grab a purely local backend to get the local state if it exists + localB, diags := m.Backend(&BackendOpts{ForceLocal: true}) + if diags.HasErrors() { + return nil, diags + } - // Initialize the configured backend - b, err := m.backend_C_r_S_unchanged(c, sMgr) - if err != nil { - return nil, fmt.Errorf( - strings.TrimSpace(errBackendSavedUnsetConfig), s.Backend.Type, err) - } + // Initialize the configured backend + b, moreDiags := m.backend_C_r_S_unchanged(c, cHash, sMgr) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags + } - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Backend.Type, - TwoType: "local", - One: b, - Two: localB, - }) - if err != nil { - return nil, err - } + // Perform the migration + err := m.backendMigrateState(&backendMigrateOpts{ + OneType: s.Backend.Type, + TwoType: "local", + One: b, + Two: localB, + }) + if err != nil { + diags = diags.Append(err) + return nil, diags } // Remove the stored metadata s.Backend = nil if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearSaved), err) + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendClearSaved), err)) + return nil, diags } if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearSaved), err) + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendClearSaved), err)) + return nil, diags } if output { @@ -774,252 +630,82 @@ func (m *Meta) backend_c_r_S( } // Return no backend - return nil, nil + return nil, diags } // Legacy remote state -func (m *Meta) backend_c_R_s( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - s := sMgr.State() - - // Warn the user - m.Ui.Warn(strings.TrimSpace(warnBackendLegacy) + "\n") - - // We need to convert the config to map[string]interface{} since that - // is what the backends expect. - var configMap map[string]interface{} - if err := mapstructure.Decode(s.Remote.Config, &configMap); err != nil { - return nil, fmt.Errorf("Error configuring remote state: %s", err) - } - - // Create the config - rawC, err := config.NewRawConfig(configMap) - if err != nil { - return nil, fmt.Errorf("Error configuring remote state: %s", err) - } - config := terraform.NewResourceConfig(rawC) +func (m *Meta) backend_c_R_s(c *configs.Backend, sMgr *state.LocalState) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics - // Get the backend - f := backendinit.Backend(s.Remote.Type) - if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendLegacyUnknown), s.Remote.Type) - } - b := f() + m.Ui.Error(strings.TrimSpace(errBackendLegacy) + "\n") - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendLegacyConfig, err) - } - - return b, nil + diags = diags.Append(fmt.Errorf("Cannot initialize legacy remote state")) + return nil, diags } // Unsetting backend, saved backend, legacy remote state -func (m *Meta) backend_c_R_S( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendUnsetWithLegacy)))) - - // Get the backend type for later - backendType := sMgr.State().Backend.Type - - // First, perform the configured => local tranasition - if _, err := m.backend_c_r_S(c, sMgr, false); err != nil { - return nil, err - } - - // Grab a purely local backend - localB, err := m.Backend(&BackendOpts{ForceLocal: true}) - if err != nil { - return nil, fmt.Errorf(errBackendLocalRead, err) - } - - // Grab the state - s := sMgr.State() - - // Ask the user if they want to migrate their existing remote state - copy := m.forceInitCopy - if !copy { - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-new", - Query: fmt.Sprintf( - "Do you want to copy the legacy remote state from %q?", - s.Remote.Type), - Description: strings.TrimSpace(inputBackendMigrateLegacyLocal), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } +func (m *Meta) backend_c_R_S(c *configs.Backend, cHash int, sMgr *state.LocalState) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics - // If the user wants a copy, copy! - if copy { - // Initialize the legacy backend - oldB, err := m.backendInitFromLegacy(s.Remote) - if err != nil { - return nil, err - } - - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Remote.Type, - TwoType: "local", - One: oldB, - Two: localB, - }) - if err != nil { - return nil, err - } - } + m.Ui.Error(strings.TrimSpace(errBackendLegacy) + "\n") - // Unset the remote state - s = sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Remote = nil - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendUnset), backendType))) - - return nil, nil + diags = diags.Append(fmt.Errorf("Cannot initialize legacy remote state")) + return nil, diags } // Configuring a backend for the first time with legacy remote state. -func (m *Meta) backend_C_R_s( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendConfigureWithLegacy)))) - - // First, configure the new backend - b, err := m.backendInitFromConfig(c) - if err != nil { - return nil, err - } - - // Next, save the new configuration. This will not overwrite our - // legacy remote state. We'll handle that after. - s := sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Backend = &terraform.BackendState{ - Type: c.Type, - Config: c.RawConfig.Raw, - Hash: c.Hash, - } - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) - } - - // I don't know how this is possible but if we don't have remote - // state config anymore somehow, just return the backend. This - // shouldn't be possible, though. - if s.Remote.Empty() { - return b, nil - } - - // Finally, ask the user if they want to copy the state from - // their old remote state location. - copy := m.forceInitCopy - if !copy { - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-new", - Query: fmt.Sprintf( - "Do you want to copy the legacy remote state from %q?", - s.Remote.Type), - Description: strings.TrimSpace(inputBackendMigrateLegacy), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } - - // If the user wants a copy, copy! - if copy { - // Initialize the legacy backend - oldB, err := m.backendInitFromLegacy(s.Remote) - if err != nil { - return nil, err - } - - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Remote.Type, - TwoType: c.Type, - One: oldB, - Two: b, - }) - if err != nil { - return nil, err - } - } - - // Unset the remote state - s = sMgr.State() - if s == nil { - s = terraform.NewState() - } - s.Remote = nil - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } +func (m *Meta) backend_C_R_s(c *configs.Backend, sMgr *state.LocalState) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendSet), s.Backend.Type))) + m.Ui.Error(strings.TrimSpace(errBackendLegacy) + "\n") - return b, nil + diags = diags.Append(fmt.Errorf("Cannot initialize legacy remote state")) + return nil, diags } // Configuring a backend for the first time. -func (m *Meta) backend_C_r_s( - c *config.Backend, sMgr state.State) (backend.Backend, error) { +func (m *Meta) backend_C_r_s(c *configs.Backend, cHash int, sMgr *state.LocalState) (backend.Backend, tfdiags.Diagnostics) { // Get the backend - b, err := m.backendInitFromConfig(c) - if err != nil { - return nil, err + b, configVal, diags := m.backendInitFromConfig(c) + if diags.HasErrors() { + return nil, diags } // Grab a purely local backend to get the local state if it exists - localB, err := m.Backend(&BackendOpts{ForceLocal: true}) - if err != nil { - return nil, fmt.Errorf(errBackendLocalRead, err) + localB, localBDiags := m.Backend(&BackendOpts{ForceLocal: true}) + if localBDiags.HasErrors() { + diags = diags.Append(localBDiags) + return nil, diags } - env := m.Env() - - localState, err := localB.State(env) + workspaces, err := localB.Workspaces() if err != nil { - return nil, fmt.Errorf(errBackendLocalRead, err) + diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) + return nil, diags } - if err := localState.RefreshState(); err != nil { - return nil, fmt.Errorf(errBackendLocalRead, err) + + var localStates []state.State + for _, workspace := range workspaces { + localState, err := localB.StateMgr(workspace) + if err != nil { + diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) + return nil, diags + } + if err := localState.RefreshState(); err != nil { + diags = diags.Append(fmt.Errorf(errBackendLocalRead, err)) + return nil, diags + } + + // We only care about non-empty states. + if localS := localState.State(); !localS.Empty() { + log.Printf("[TRACE] Meta.Backend: will need to migrate workspace states because of existing %q workspace", workspace) + localStates = append(localStates, localState) + } else { + log.Printf("[TRACE] Meta.Backend: ignoring local %q workspace because its state is empty", workspace) + } } - // If the local state is not empty, we need to potentially do a - // state migration to the new backend (with user permission), unless the - // destination is also "local" - if localS := localState.State(); !localS.Empty() { + if len(localStates) > 0 { // Perform the migration err = m.backendMigrateState(&backendMigrateOpts{ OneType: "local", @@ -1028,7 +714,8 @@ func (m *Meta) backend_C_r_s( Two: b, }) if err != nil { - return nil, err + diags = diags.Append(err) + return nil, diags } // we usually remove the local state after migration to prevent @@ -1036,38 +723,44 @@ func (m *Meta) backend_C_r_s( // can get us here too. Don't delete our state if the old and new paths // are the same. erase := true - if newLocalB, ok := b.(*backendlocal.Local); ok { - if localB, ok := localB.(*backendlocal.Local); ok { - if newLocalB.StatePath == localB.StatePath { + if newLocalB, ok := b.(*backendLocal.Local); ok { + if localB, ok := localB.(*backendLocal.Local); ok { + if newLocalB.PathsConflictWith(localB) { erase = false + log.Printf("[TRACE] Meta.Backend: both old and new backends share the same local state paths, so not erasing old state") } } } if erase { - // We always delete the local state, unless that was our new state too. - if err := localState.WriteState(nil); err != nil { - return nil, fmt.Errorf(errBackendMigrateLocalDelete, err) - } - if err := localState.PersistState(); err != nil { - return nil, fmt.Errorf(errBackendMigrateLocalDelete, err) + log.Printf("[TRACE] Meta.Backend: removing old state snapshots from old backend") + for _, localState := range localStates { + // We always delete the local state, unless that was our new state too. + if err := localState.WriteState(nil); err != nil { + diags = diags.Append(fmt.Errorf(errBackendMigrateLocalDelete, err)) + return nil, diags + } + if err := localState.PersistState(); err != nil { + diags = diags.Append(fmt.Errorf(errBackendMigrateLocalDelete, err)) + return nil, diags + } } } } if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "backend from config" - - lockID, err := clistate.Lock(lockCtx, sMgr, lockInfo, m.Ui, m.Colorize()) - if err != nil { - return nil, fmt.Errorf("Error locking state: %s", err) + stateLocker := clistate.NewLocker(context.Background(), m.stateLockTimeout, m.Ui, m.Colorize()) + if err := stateLocker.Lock(sMgr, "backend from plan"); err != nil { + diags = diags.Append(fmt.Errorf("Error locking state: %s", err)) + return nil, diags } - defer clistate.Unlock(sMgr, lockID, m.Ui, m.Colorize()) + defer stateLocker.Unlock(nil) + } + + configJSON, err := ctyjson.Marshal(configVal, b.ConfigSchema().ImpliedType()) + if err != nil { + diags = diags.Append(fmt.Errorf("Can't serialize backend configuration as JSON: %s", err)) + return nil, diags } // Store the metadata in our saved state location @@ -1076,29 +769,29 @@ func (m *Meta) backend_C_r_s( s = terraform.NewState() } s.Backend = &terraform.BackendState{ - Type: c.Type, - Config: c.RawConfig.Raw, - Hash: c.Hash, + Type: c.Type, + ConfigRaw: json.RawMessage(configJSON), + Hash: uint64(cHash), } if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) + diags = diags.Append(fmt.Errorf(errBackendWriteSaved, err)) + return nil, diags } if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) + diags = diags.Append(fmt.Errorf(errBackendWriteSaved, err)) + return nil, diags } + // By now the backend is successfully configured. m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendSet), s.Backend.Type))) + "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) - // Return the backend - return b, nil + return b, diags } // Changing a previously saved backend. -func (m *Meta) backend_C_r_S_changed( - c *config.Backend, sMgr state.State, output bool) (backend.Backend, error) { +func (m *Meta) backend_C_r_S_changed(c *configs.Backend, cHash int, sMgr *state.LocalState, output bool) (backend.Backend, tfdiags.Diagnostics) { if output { // Notify the user m.Ui.Output(m.Colorize().Color(fmt.Sprintf( @@ -1110,61 +803,48 @@ func (m *Meta) backend_C_r_S_changed( s := sMgr.State() // Get the backend - b, err := m.backendInitFromConfig(c) - if err != nil { - return nil, fmt.Errorf( - "Error initializing new backend: %s", err) + b, configVal, diags := m.backendInitFromConfig(c) + if diags.HasErrors() { + return nil, diags } - // Check with the user if we want to migrate state - copy := m.forceInitCopy - if !copy { - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-new", - Query: fmt.Sprintf("Do you want to copy the state from %q?", c.Type), - Description: strings.TrimSpace(fmt.Sprintf(inputBackendMigrateChange, c.Type, s.Backend.Type)), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } + // no need to confuse the user if the backend types are the same + if s.Backend.Type != c.Type { + m.Ui.Output(strings.TrimSpace(fmt.Sprintf(outputBackendMigrateChange, s.Backend.Type, c.Type))) } - // If we are, then we need to initialize the old backend and - // perform the copy. - if copy { - // Grab the existing backend - oldB, err := m.backend_C_r_S_unchanged(c, sMgr) - if err != nil { - return nil, fmt.Errorf( - "Error loading previously configured backend: %s", err) - } + // Grab the existing backend + oldB, oldBDiags := m.backend_C_r_S_unchanged(c, cHash, sMgr) + diags = diags.Append(oldBDiags) + if oldBDiags.HasErrors() { + return nil, diags + } - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Backend.Type, - TwoType: c.Type, - One: oldB, - Two: b, - }) - if err != nil { - return nil, err - } + // Perform the migration + err := m.backendMigrateState(&backendMigrateOpts{ + OneType: s.Backend.Type, + TwoType: c.Type, + One: oldB, + Two: b, + }) + if err != nil { + diags = diags.Append(err) + return nil, diags } if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() - - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "backend from config" - - lockID, err := clistate.Lock(lockCtx, sMgr, lockInfo, m.Ui, m.Colorize()) - if err != nil { - return nil, fmt.Errorf("Error locking state: %s", err) + stateLocker := clistate.NewLocker(context.Background(), m.stateLockTimeout, m.Ui, m.Colorize()) + if err := stateLocker.Lock(sMgr, "backend from plan"); err != nil { + diags = diags.Append(fmt.Errorf("Error locking state: %s", err)) + return nil, diags } - defer clistate.Unlock(sMgr, lockID, m.Ui, m.Colorize()) + defer stateLocker.Unlock(nil) + } + + configJSON, err := ctyjson.Marshal(configVal, b.ConfigSchema().ImpliedType()) + if err != nil { + diags = diags.Append(fmt.Errorf("Can't serialize backend configuration as JSON: %s", err)) + return nil, diags } // Update the backend state @@ -1173,283 +853,236 @@ func (m *Meta) backend_C_r_S_changed( s = terraform.NewState() } s.Backend = &terraform.BackendState{ - Type: c.Type, - Config: c.RawConfig.Raw, - Hash: c.Hash, + Type: c.Type, + ConfigRaw: json.RawMessage(configJSON), + Hash: uint64(cHash), } if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) + diags = diags.Append(fmt.Errorf(errBackendWriteSaved, err)) + return nil, diags } if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) + diags = diags.Append(fmt.Errorf(errBackendWriteSaved, err)) + return nil, diags } if output { m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendSet), s.Backend.Type))) + "[reset][green]\n"+strings.TrimSpace(successBackendSet), s.Backend.Type))) } - return b, nil + return b, diags } // Initiailizing an unchanged saved backend -func (m *Meta) backend_C_r_S_unchanged( - c *config.Backend, sMgr state.State) (backend.Backend, error) { +func (m *Meta) backend_C_r_S_unchanged(c *configs.Backend, cHash int, sMgr *state.LocalState) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + s := sMgr.State() // it's possible for a backend to be unchanged, and the config itself to // have changed by moving a parameter from the config to `-backend-config` // In this case we only need to update the Hash. - if c != nil && s.Backend.Hash != c.Hash { - s.Backend.Hash = c.Hash + if c != nil && s.Backend.Hash != uint64(cHash) { + s.Backend.Hash = uint64(cHash) if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(errBackendWriteSaved, err) + diags = diags.Append(err) + return nil, diags } } - // Create the config. We do this from the backend state since this - // has the complete configuration data whereas the config itself - // may require input. - rawC, err := config.NewRawConfig(s.Backend.Config) - if err != nil { - return nil, fmt.Errorf("Error configuring backend: %s", err) - } - config := terraform.NewResourceConfig(rawC) - // Get the backend - f := backendinit.Backend(s.Backend.Type) + f := backendInit.Backend(s.Backend.Type) if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Backend.Type) + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Backend.Type)) + return nil, diags } b := f() - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendSavedConfig, s.Backend.Type, err) + // The configuration saved in the working directory state file is used + // in this case, since it will contain any additional values that + // were provided via -backend-config arguments on terraform init. + schema := b.ConfigSchema() + configVal, err := s.Backend.Config(schema) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to decode current backend config", + fmt.Sprintf("The backend configuration created by the most recent run of \"terraform init\" could not be decoded: %s. The configuration may have been initialized by an earlier version that used an incompatible configuration structure. Run \"terraform init -reconfigure\" to force re-initialization of the backend.", err), + )) + return nil, diags } - return b, nil -} - -// Initiailizing a changed saved backend with legacy remote state. -func (m *Meta) backend_C_R_S_changed( - c *config.Backend, sMgr state.State) (backend.Backend, error) { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendSavedWithLegacyChanged)))) - - // Reconfigure the backend first - if _, err := m.backend_C_r_S_changed(c, sMgr, false); err != nil { - return nil, err + // Validate the config and then configure the backend + newVal, validDiags := b.PrepareConfig(configVal) + diags = diags.Append(validDiags) + if validDiags.HasErrors() { + return nil, diags } - // Handle the case where we have all set but unchanged - b, err := m.backend_C_R_S_unchanged(c, sMgr, false) - if err != nil { - return nil, err + configDiags := b.Configure(newVal) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags } - // Output success message - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendReconfigureWithLegacy), c.Type))) - - return b, nil + return b, diags } -// Initiailizing an unchanged saved backend with legacy remote state. -func (m *Meta) backend_C_R_S_unchanged( - c *config.Backend, sMgr state.State, output bool) (backend.Backend, error) { - if output { - // Notify the user - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset]%s\n\n", - strings.TrimSpace(outputBackendSavedWithLegacy)))) - } +// Initiailizing a changed saved backend with legacy remote state. +func (m *Meta) backend_C_R_S_changed(c *configs.Backend, sMgr *state.LocalState) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics - // Load the backend from the state - s := sMgr.State() - b, err := m.backendInitFromSaved(s.Backend) - if err != nil { - return nil, err - } - - // Ask if the user wants to move their legacy remote state - copy := m.forceInitCopy - if !copy { - copy, err = m.confirm(&terraform.InputOpts{ - Id: "backend-migrate-to-new", - Query: fmt.Sprintf( - "Do you want to copy the legacy remote state from %q?", - s.Remote.Type), - Description: strings.TrimSpace(inputBackendMigrateLegacy), - }) - if err != nil { - return nil, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - } + m.Ui.Error(strings.TrimSpace(errBackendLegacy) + "\n") - // If the user wants a copy, copy! - if copy { - // Initialize the legacy backend - oldB, err := m.backendInitFromLegacy(s.Remote) - if err != nil { - return nil, err - } + diags = diags.Append(fmt.Errorf("Cannot initialize legacy remote state")) + return nil, diags +} - // Perform the migration - err = m.backendMigrateState(&backendMigrateOpts{ - OneType: s.Remote.Type, - TwoType: s.Backend.Type, - One: oldB, - Two: b, - }) - if err != nil { - return nil, err - } - } +// Initiailizing an unchanged saved backend with legacy remote state. +func (m *Meta) backend_C_R_S_unchanged(c *configs.Backend, sMgr *state.LocalState, output bool) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics - if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() + m.Ui.Error(strings.TrimSpace(errBackendLegacy) + "\n") - // Lock the state if we can - lockInfo := state.NewLockInfo() - lockInfo.Operation = "backend from config" + diags = diags.Append(fmt.Errorf("Cannot initialize legacy remote state")) + return nil, diags +} - lockID, err := clistate.Lock(lockCtx, sMgr, lockInfo, m.Ui, m.Colorize()) - if err != nil { - return nil, fmt.Errorf("Error locking state: %s", err) - } - defer clistate.Unlock(sMgr, lockID, m.Ui, m.Colorize()) - } +//------------------------------------------------------------------- +// Reusable helper functions for backend management +//------------------------------------------------------------------- - // Unset the remote state - s = sMgr.State() - if s == nil { - s = terraform.NewState() +// backendConfigNeedsMigration returns true if migration might be required to +// move from the configured backend to the given cached backend config. +// +// This must be called with the synthetic *configs.Backend that results from +// merging in any command-line options for correct behavior. +// +// If either the given configuration or cached configuration are invalid then +// this function will conservatively assume that migration is required, +// expecting that the migration code will subsequently deal with the same +// errors. +func (m *Meta) backendConfigNeedsMigration(c *configs.Backend, s *terraform.BackendState) bool { + if s == nil || s.Empty() { + log.Print("[TRACE] backendConfigNeedsMigration: no cached config, so migration is required") + return true + } + if c.Type != s.Type { + log.Printf("[TRACE] backendConfigNeedsMigration: type changed from %q to %q, so migration is required", s.Type, c.Type) + return true + } + + // We need the backend's schema to do our comparison here. + f := backendInit.Backend(c.Type) + if f == nil { + log.Printf("[TRACE] backendConfigNeedsMigration: no backend of type %q, which migration codepath must handle", c.Type) + return true // let the migration codepath deal with the missing backend } - s.Remote = nil + b := f() - if err := sMgr.WriteState(s); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) - } - if err := sMgr.PersistState(); err != nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendClearLegacy), err) + schema := b.ConfigSchema() + decSpec := schema.NoneRequired().DecoderSpec() + givenVal, diags := hcldec.Decode(c.Config, decSpec, nil) + if diags.HasErrors() { + log.Printf("[TRACE] backendConfigNeedsMigration: failed to decode given config; migration codepath must handle problem: %s", diags.Error()) + return true // let the migration codepath deal with these errors } - if output { - m.Ui.Output(m.Colorize().Color(fmt.Sprintf( - "[reset][green]\n\n"+ - strings.TrimSpace(successBackendLegacyUnset), s.Backend.Type))) + cachedVal, err := s.Config(schema) + if err != nil { + log.Printf("[TRACE] backendConfigNeedsMigration: failed to decode cached config; migration codepath must handle problem: %s", err) + return true // let the migration codepath deal with the error } - return b, nil + // If we get all the way down here then it's the exact equality of the + // two decoded values that decides our outcome. It's safe to use RawEquals + // here (rather than Equals) because we know that unknown values can + // never appear in backend configurations. + if cachedVal.RawEquals(givenVal) { + log.Print("[TRACE] backendConfigNeedsMigration: given configuration matches cached configuration, so no migration is required") + return false + } + log.Print("[TRACE] backendConfigNeedsMigration: configuration values have changed, so migration is required") + return true } -//------------------------------------------------------------------- -// Reusable helper functions for backend management -//------------------------------------------------------------------- - -func (m *Meta) backendInitFromConfig(c *config.Backend) (backend.Backend, error) { - // Create the config. - config := terraform.NewResourceConfig(c.RawConfig) +func (m *Meta) backendInitFromConfig(c *configs.Backend) (backend.Backend, cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics // Get the backend - f := backendinit.Backend(c.Type) + f := backendInit.Backend(c.Type) if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendNewUnknown), c.Type) + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendNewUnknown), c.Type)) + return nil, cty.NilVal, diags } b := f() + schema := b.ConfigSchema() + decSpec := schema.NoneRequired().DecoderSpec() + configVal, hclDiags := hcldec.Decode(c.Config, decSpec, nil) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return nil, cty.NilVal, diags + } + // TODO: test - // Ask for input if we have input enabled if m.Input() { var err error - config, err = b.Input(m.UIInput(), config) + configVal, err = m.inputForSchema(configVal, schema) if err != nil { - return nil, fmt.Errorf( - "Error asking for input to configure the backend %q: %s", - c.Type, err) + diags = diags.Append(fmt.Errorf("Error asking for input to configure backend %q: %s", c.Type, err)) } - } - // Validate - warns, errs := b.Validate(config) - if len(errs) > 0 { - return nil, fmt.Errorf( - "Error configuring the backend %q: %s", - c.Type, multierror.Append(nil, errs...)) - } - if len(warns) > 0 { - // TODO: warnings are currently ignored + // We get an unknown here if the if the user aborted input, but we can't + // turn that into a config value, so set it to null and let the provider + // handle it in PrepareConfig. + if !configVal.IsKnown() { + configVal = cty.NullVal(configVal.Type()) + } } - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendNewConfig, c.Type, err) + newVal, validateDiags := b.PrepareConfig(configVal) + diags = diags.Append(validateDiags.InConfigBody(c.Config)) + if validateDiags.HasErrors() { + return nil, cty.NilVal, diags } - return b, nil + configureDiags := b.Configure(newVal) + diags = diags.Append(configureDiags.InConfigBody(c.Config)) + + return b, configVal, diags } -func (m *Meta) backendInitFromLegacy(s *terraform.RemoteState) (backend.Backend, error) { - // We need to convert the config to map[string]interface{} since that - // is what the backends expect. - var configMap map[string]interface{} - if err := mapstructure.Decode(s.Config, &configMap); err != nil { - return nil, fmt.Errorf("Error configuring remote state: %s", err) - } - - // Create the config - rawC, err := config.NewRawConfig(configMap) - if err != nil { - return nil, fmt.Errorf("Error configuring remote state: %s", err) - } - config := terraform.NewResourceConfig(rawC) +func (m *Meta) backendInitFromSaved(s *terraform.BackendState) (backend.Backend, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics // Get the backend - f := backendinit.Backend(s.Type) + f := backendInit.Backend(s.Type) if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendLegacyUnknown), s.Type) + diags = diags.Append(fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Type)) + return nil, diags } b := f() - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendLegacyConfig, err) - } - - return b, nil -} - -func (m *Meta) backendInitFromSaved(s *terraform.BackendState) (backend.Backend, error) { - // Create the config. We do this from the backend state since this - // has the complete configuration data whereas the config itself - // may require input. - rawC, err := config.NewRawConfig(s.Config) + schema := b.ConfigSchema() + configVal, err := s.Config(schema) if err != nil { - return nil, fmt.Errorf("Error configuring backend: %s", err) + diags = diags.Append(errwrap.Wrapf("saved backend configuration is invalid: {{err}}", err)) + return nil, diags } - config := terraform.NewResourceConfig(rawC) - // Get the backend - f := backendinit.Backend(s.Type) - if f == nil { - return nil, fmt.Errorf(strings.TrimSpace(errBackendSavedUnknown), s.Type) + newVal, validateDiags := b.PrepareConfig(configVal) + diags = diags.Append(validateDiags) + if validateDiags.HasErrors() { + return nil, diags } - b := f() - // Configure - if err := b.Configure(config); err != nil { - return nil, fmt.Errorf(errBackendSavedConfig, s.Type, err) - } + configureDiags := b.Configure(newVal) + diags = diags.Append(configureDiags) - return b, nil + return b, diags } func (m *Meta) backendInitRequired(reason string) { @@ -1544,6 +1177,14 @@ If you'd like to run Terraform and store state locally, you can fix this error by removing the backend configuration from your configuration. ` +const errBackendNoExistingWorkspaces = ` +No existing workspaces. + +Use the "terraform workspace" command to create and select a new workspace. +If the backend already contains existing workspaces, you may need to update +the backend configuration. +` + const errBackendRemoteRead = ` Error reading backend state: %s @@ -1648,7 +1289,7 @@ different states. The most common cause of seeing this error is using a plan that was created against a different state. Perhaps the plan is very old and the -state has since been recreated, or perhaps the plan was against a competely +state has since been recreated, or perhaps the plan was against a completely different infrastructure. ` @@ -1674,27 +1315,16 @@ Plan Serial: %[1]d Current Serial: %[2]d ` -const inputBackendMigrateChange = ` -Would you like to copy the state from your prior backend %q to the -newly configured %q backend? If you're reconfiguring the same backend, -answering "yes" or "no" shouldn't make a difference. Please answer exactly -"yes" or "no". +const outputBackendMigrateChange = ` +Terraform detected that the backend type changed from %q to %q. ` -const inputBackendMigrateLegacy = ` -Terraform can copy the existing state in your legacy remote state -backend to your newly configured backend. Please answer "yes" or "no". +const outputBackendMigrateLegacy = ` +Terraform detected legacy remote state. ` -const inputBackendMigrateLegacyLocal = ` -Terraform can copy the existing state in your legacy remote state -backend to your local state. Please answer "yes" or "no". -` - -const inputBackendMigrateLocal = ` -Terraform has detected you're unconfiguring your previously set backend. -Would you like to copy the state from %q to local state? Please answer -"yes" or "no". If you answer "no", you will start with a blank local state. +const outputBackendMigrateLocal = ` +Terraform has detected you're unconfiguring your previously set %q backend. ` const outputBackendConfigureWithLegacy = ` @@ -1710,9 +1340,7 @@ const outputBackendReconfigure = ` [reset][bold]Backend configuration changed![reset] Terraform has detected that the configuration specified for the backend -has changed. Terraform will now reconfigure for this backend. If you didn't -intend to reconfigure your backend please undo any changes to the "backend" -section in your Terraform configuration. +has changed. Terraform will now check for existing state in the backends. ` const outputBackendSavedWithLegacy = ` @@ -1763,14 +1391,11 @@ Successfully configured the backend %q! Terraform will automatically use this backend unless the backend configuration changes. ` -const warnBackendLegacy = ` -Deprecation warning: This environment is configured to use legacy remote state. -Remote state changed significantly in Terraform 0.9. Please update your remote -state configuration to use the new 'backend' settings. For now, Terraform -will continue to use your existing settings. Legacy remote state support -will be removed in Terraform 0.11. - -You can find a guide for upgrading here: +const errBackendLegacy = ` +This working directory is configured to use the legacy remote state features +from Terraform 0.8 or earlier. Remote state changed significantly in Terraform +0.9 and the automatic upgrade mechanism has now been removed. -https://www.terraform.io/docs/backends/legacy-0-8.html +To upgrade, please first use Terraform v0.11 to complete the upgrade steps: + https://www.terraform.io/docs/backends/legacy-0-8.html ` diff --git a/vendor/github.com/hashicorp/terraform/command/meta_backend_migrate.go b/vendor/github.com/hashicorp/terraform/command/meta_backend_migrate.go index 8dce064a..9227844a 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta_backend_migrate.go +++ b/vendor/github.com/hashicorp/terraform/command/meta_backend_migrate.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io/ioutil" + "log" "os" "path/filepath" "sort" @@ -13,9 +14,22 @@ import ( "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/command/clistate" "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" "github.com/hashicorp/terraform/terraform" ) +type backendMigrateOpts struct { + OneType, TwoType string + One, Two backend.Backend + + // Fields below are set internally when migrate is called + + oneEnv string // source env + twoEnv string // dest env + force bool // if true, won't ask for confirmation +} + // backendMigrateState handles migrating (copying) state from one backend // to another. This function handles asking the user for confirmation // as well as the copy itself. @@ -28,11 +42,12 @@ import ( // // This will attempt to lock both states for the migration. func (m *Meta) backendMigrateState(opts *backendMigrateOpts) error { + log.Printf("[TRACE] backendMigrateState: need to migrate from %q to %q backend config", opts.OneType, opts.TwoType) // We need to check what the named state status is. If we're converting // from multi-state to single-state for example, we need to handle that. var oneSingle, twoSingle bool - oneStates, err := opts.One.States() - if err == backend.ErrNamedStatesNotSupported { + oneStates, err := opts.One.Workspaces() + if err == backend.ErrWorkspacesNotSupported { oneSingle = true err = nil } @@ -41,8 +56,8 @@ func (m *Meta) backendMigrateState(opts *backendMigrateOpts) error { errMigrateLoadStates), opts.OneType, err) } - _, err = opts.Two.States() - if err == backend.ErrNamedStatesNotSupported { + _, err = opts.Two.Workspaces() + if err == backend.ErrWorkspacesNotSupported { twoSingle = true err = nil } @@ -113,11 +128,13 @@ func (m *Meta) backendMigrateState(opts *backendMigrateOpts) error { // Multi-state to multi-state. func (m *Meta) backendMigrateState_S_S(opts *backendMigrateOpts) error { + log.Print("[TRACE] backendMigrateState: migrating all named workspaces") + // Ask the user if they want to migrate their existing remote state migrate, err := m.confirm(&terraform.InputOpts{ Id: "backend-migrate-multistate-to-multistate", Query: fmt.Sprintf( - "Do you want to migrate all environments to %q?", + "Do you want to migrate all workspaces to %q?", opts.TwoType), Description: fmt.Sprintf( strings.TrimSpace(inputBackendMigrateMultiToMulti), @@ -132,7 +149,7 @@ func (m *Meta) backendMigrateState_S_S(opts *backendMigrateOpts) error { } // Read all the states - oneStates, err := opts.One.States() + oneStates, err := opts.One.Workspaces() if err != nil { return fmt.Errorf(strings.TrimSpace( errMigrateLoadStates), opts.OneType, err) @@ -162,7 +179,9 @@ func (m *Meta) backendMigrateState_S_S(opts *backendMigrateOpts) error { // Multi-state to single state. func (m *Meta) backendMigrateState_S_s(opts *backendMigrateOpts) error { - currentEnv := m.Env() + log.Printf("[TRACE] backendMigrateState: target backend type %q does not support named workspaces", opts.TwoType) + + currentEnv := m.Workspace() migrate := opts.force if !migrate { @@ -171,8 +190,8 @@ func (m *Meta) backendMigrateState_S_s(opts *backendMigrateOpts) error { migrate, err = m.confirm(&terraform.InputOpts{ Id: "backend-migrate-multistate-to-single", Query: fmt.Sprintf( - "Destination state %q doesn't support environments (named states).\n"+ - "Do you want to copy only your current environment?", + "Destination state %q doesn't support workspaces.\n"+ + "Do you want to copy only your current workspace?", opts.TwoType), Description: fmt.Sprintf( strings.TrimSpace(inputBackendMigrateMultiToSingle), @@ -192,14 +211,16 @@ func (m *Meta) backendMigrateState_S_s(opts *backendMigrateOpts) error { opts.oneEnv = currentEnv // now switch back to the default env so we can acccess the new backend - m.SetEnv(backend.DefaultStateName) + m.SetWorkspace(backend.DefaultStateName) return m.backendMigrateState_s_s(opts) } // Single state to single state, assumed default state name. func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { - stateOne, err := opts.One.State(opts.oneEnv) + log.Printf("[TRACE] backendMigrateState: migrating %q workspace to %q workspace", opts.oneEnv, opts.twoEnv) + + stateOne, err := opts.One.StateMgr(opts.oneEnv) if err != nil { return fmt.Errorf(strings.TrimSpace( errMigrateSingleLoadDefault), opts.OneType, err) @@ -209,7 +230,49 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { errMigrateSingleLoadDefault), opts.OneType, err) } - stateTwo, err := opts.Two.State(opts.twoEnv) + // Do not migrate workspaces without state. + if stateOne.State().Empty() { + log.Print("[TRACE] backendMigrateState: source workspace has empty state, so nothing to migrate") + return nil + } + + stateTwo, err := opts.Two.StateMgr(opts.twoEnv) + if err == backend.ErrDefaultWorkspaceNotSupported { + // If the backend doesn't support using the default state, we ask the user + // for a new name and migrate the default state to the given named state. + stateTwo, err = func() (statemgr.Full, error) { + log.Print("[TRACE] backendMigrateState: target doesn't support a default workspace, so we must prompt for a new name") + name, err := m.UIInput().Input(context.Background(), &terraform.InputOpts{ + Id: "new-state-name", + Query: fmt.Sprintf( + "[reset][bold][yellow]The %q backend configuration only allows "+ + "named workspaces![reset]", + opts.TwoType), + Description: strings.TrimSpace(inputBackendNewWorkspaceName), + }) + if err != nil { + return nil, fmt.Errorf("Error asking for new state name: %s", err) + } + + // Update the name of the target state. + opts.twoEnv = name + + stateTwo, err := opts.Two.StateMgr(opts.twoEnv) + if err != nil { + return nil, err + } + + // If the currently selected workspace is the default workspace, then set + // the named workspace as the new selected workspace. + if m.Workspace() == backend.DefaultStateName { + if err := m.SetWorkspace(opts.twoEnv); err != nil { + return nil, fmt.Errorf("Failed to set new workspace: %s", err) + } + } + + return stateTwo, nil + }() + } if err != nil { return fmt.Errorf(strings.TrimSpace( errMigrateSingleLoadDefault), opts.TwoType, err) @@ -227,41 +290,43 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { // no reason to migrate if the state is already there if one.Equal(two) { // Equal isn't identical; it doesn't check lineage. - if one != nil && two != nil && one.Lineage == two.Lineage { - return nil + sm1, _ := stateOne.(statemgr.PersistentMeta) + sm2, _ := stateTwo.(statemgr.PersistentMeta) + if one != nil && two != nil { + if sm1 == nil || sm2 == nil { + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have no state, so no migration is needed") + return nil + } + if sm1.StateSnapshotMeta().Lineage == sm2.StateSnapshotMeta().Lineage { + log.Printf("[TRACE] backendMigrateState: both source and destination workspaces have equal state with lineage %q, so no migration is needed", sm1.StateSnapshotMeta().Lineage) + return nil + } } } if m.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), m.stateLockTimeout) - defer cancel() + lockCtx := context.Background() - lockInfoOne := state.NewLockInfo() - lockInfoOne.Operation = "migration" - lockInfoOne.Info = "source state" - - lockIDOne, err := clistate.Lock(lockCtx, stateOne, lockInfoOne, m.Ui, m.Colorize()) - if err != nil { + lockerOne := clistate.NewLocker(lockCtx, m.stateLockTimeout, m.Ui, m.Colorize()) + if err := lockerOne.Lock(stateOne, "migration source state"); err != nil { return fmt.Errorf("Error locking source state: %s", err) } - defer clistate.Unlock(stateOne, lockIDOne, m.Ui, m.Colorize()) + defer lockerOne.Unlock(nil) - lockInfoTwo := state.NewLockInfo() - lockInfoTwo.Operation = "migration" - lockInfoTwo.Info = "destination state" - - lockIDTwo, err := clistate.Lock(lockCtx, stateTwo, lockInfoTwo, m.Ui, m.Colorize()) - if err != nil { + lockerTwo := clistate.NewLocker(lockCtx, m.stateLockTimeout, m.Ui, m.Colorize()) + if err := lockerTwo.Lock(stateTwo, "migration destination state"); err != nil { return fmt.Errorf("Error locking destination state: %s", err) } - defer clistate.Unlock(stateTwo, lockIDTwo, m.Ui, m.Colorize()) + defer lockerTwo.Unlock(nil) // We now own a lock, so double check that we have the version // corresponding to the lock. + log.Print("[TRACE] backendMigrateState: refreshing source workspace state") if err := stateOne.RefreshState(); err != nil { return fmt.Errorf(strings.TrimSpace( errMigrateSingleLoadDefault), opts.OneType, err) } + log.Print("[TRACE] backendMigrateState: refreshing target workspace state") if err := stateTwo.RefreshState(); err != nil { return fmt.Errorf(strings.TrimSpace( errMigrateSingleLoadDefault), opts.OneType, err) @@ -271,33 +336,28 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { two = stateTwo.State() } - // Clear the legacy remote state in both cases. If we're at the migration - // step then this won't be used anymore. - if one != nil { - one.Remote = nil - } - if two != nil { - two.Remote = nil - } - var confirmFunc func(state.State, state.State, *backendMigrateOpts) (bool, error) switch { // No migration necessary case one.Empty() && two.Empty(): + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have empty state, so no migration is required") return nil // No migration necessary if we're inheriting state. case one.Empty() && !two.Empty(): + log.Print("[TRACE] backendMigrateState: source workspace has empty state, so no migration is required") return nil // We have existing state moving into no state. Ask the user if // they'd like to do this. case !one.Empty() && two.Empty(): + log.Print("[TRACE] backendMigrateState: target workspace has empty state, so might copy source workspace state") confirmFunc = m.backendMigrateEmptyConfirm // Both states are non-empty, meaning we need to determine which // state should be used and update accordingly. case !one.Empty() && !two.Empty(): + log.Print("[TRACE] backendMigrateState: both source and destination workspaces have states, so might overwrite destination with source") confirmFunc = m.backendMigrateNonEmptyConfirm } @@ -308,21 +368,27 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { if !opts.force { // Abort if we can't ask for input. if !m.input { + log.Print("[TRACE] backendMigrateState: can't prompt for input, so aborting migration") return errors.New("error asking for state migration action: input disabled") } // Confirm with the user whether we want to copy state over confirm, err := confirmFunc(stateOne, stateTwo, opts) if err != nil { + log.Print("[TRACE] backendMigrateState: error reading input, so aborting migration") return err } if !confirm { + log.Print("[TRACE] backendMigrateState: user cancelled at confirmation prompt, so aborting migration") return nil } } - // Confirmed! Write. - if err := stateTwo.WriteState(one); err != nil { + // Confirmed! We'll have the statemgr package handle the migration, which + // includes preserving any lineage/serial information where possible, if + // both managers support such metadata. + log.Print("[TRACE] backendMigrateState: migration confirmed, so migrating") + if err := statemgr.Migrate(stateTwo, stateOne); err != nil { return fmt.Errorf(strings.TrimSpace(errBackendStateCopy), opts.OneType, opts.TwoType, err) } @@ -337,31 +403,14 @@ func (m *Meta) backendMigrateState_s_s(opts *backendMigrateOpts) error { func (m *Meta) backendMigrateEmptyConfirm(one, two state.State, opts *backendMigrateOpts) (bool, error) { inputOpts := &terraform.InputOpts{ - Id: "backend-migrate-copy-to-empty", - Query: fmt.Sprintf( - "Do you want to copy state from %q to %q?", - opts.OneType, opts.TwoType), + Id: "backend-migrate-copy-to-empty", + Query: "Do you want to copy existing state to the new backend?", Description: fmt.Sprintf( strings.TrimSpace(inputBackendMigrateEmpty), opts.OneType, opts.TwoType), } - // Confirm with the user that the copy should occur - for { - v, err := m.UIInput().Input(inputOpts) - if err != nil { - return false, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - - switch strings.ToLower(v) { - case "no": - return false, nil - - case "yes": - return true, nil - } - } + return m.confirm(inputOpts) } func (m *Meta) backendMigrateNonEmptyConfirm( @@ -378,14 +427,9 @@ func (m *Meta) backendMigrateNonEmptyConfirm( defer os.RemoveAll(td) // Helper to write the state - saveHelper := func(n, path string, s *terraform.State) error { - f, err := os.Create(path) - if err != nil { - return err - } - defer f.Close() - - return terraform.WriteState(s, f) + saveHelper := func(n, path string, s *states.State) error { + mgr := statemgr.NewFilesystem(path) + return mgr.WriteState(s) } // Write the states @@ -400,46 +444,20 @@ func (m *Meta) backendMigrateNonEmptyConfirm( // Ask for confirmation inputOpts := &terraform.InputOpts{ - Id: "backend-migrate-to-backend", - Query: fmt.Sprintf( - "Do you want to copy state from %q to %q?", - opts.OneType, opts.TwoType), + Id: "backend-migrate-to-backend", + Query: "Do you want to copy existing state to the new backend?", Description: fmt.Sprintf( strings.TrimSpace(inputBackendMigrateNonEmpty), opts.OneType, opts.TwoType, onePath, twoPath), } // Confirm with the user that the copy should occur - for { - v, err := m.UIInput().Input(inputOpts) - if err != nil { - return false, fmt.Errorf( - "Error asking for state copy action: %s", err) - } - - switch strings.ToLower(v) { - case "no": - return false, nil - - case "yes": - return true, nil - } - } -} - -type backendMigrateOpts struct { - OneType, TwoType string - One, Two backend.Backend - - // Fields below are set internally when migrate is called - - oneEnv string // source env - twoEnv string // dest env - force bool // if true, won't ask for confirmation + return m.confirm(inputOpts) } const errMigrateLoadStates = ` -Error inspecting state in %q: %s +Error inspecting states in the %q backend: + %s Prior to changing backends, Terraform inspects the source and destination states to determine what kind of migration steps need to be taken, if any. @@ -448,9 +466,10 @@ destination remain unmodified. Please resolve the above error and try again. ` const errMigrateSingleLoadDefault = ` -Error loading state from %q: %s +Error loading state: + %[2]s -Terraform failed to load the default state from %[1]q. +Terraform failed to load the default state from the %[1]q backend. State migration cannot occur unless the state can be loaded. Backend modification and state migration has been aborted. The state in both the source and the destination remain unmodified. Please resolve the @@ -458,63 +477,80 @@ above error and try again. ` const errMigrateMulti = ` -Error migrating the environment %q from %q to %q: +Error migrating the workspace %q from the previous %q backend +to the newly configured %q backend: + %s -%s - -Terraform copies environments in alphabetical order. Any environments -alphabetically earlier than this one have been copied. Any environments -later than this haven't been modified in the destination. No environments +Terraform copies workspaces in alphabetical order. Any workspaces +alphabetically earlier than this one have been copied. Any workspaces +later than this haven't been modified in the destination. No workspaces in the source state have been modified. Please resolve the error above and run the initialization command again. -This will attempt to copy (with permission) all environments again. +This will attempt to copy (with permission) all workspaces again. ` const errBackendStateCopy = ` -Error copying state from %q to %q: %s +Error copying state from the previous %q backend to the newly configured +%q backend: + %s -The state in %[1]q remains intact and unmodified. Please resolve the -error above and try again. +The state in the previous backend remains intact and unmodified. Please resolve +the error above and try again. ` const inputBackendMigrateEmpty = ` -Pre-existing state was found in %q while migrating to %q. No existing -state was found in %[2]q. Do you want to copy the state from %[1]q to -%[2]q? Enter "yes" to copy and "no" to start with an empty state. +Pre-existing state was found while migrating the previous %q backend to the +newly configured %q backend. No existing state was found in the newly +configured %[2]q backend. Do you want to copy this state to the new %[2]q +backend? Enter "yes" to copy and "no" to start with an empty state. ` const inputBackendMigrateNonEmpty = ` -Pre-existing state was found in %q while migrating to %q. An existing -non-empty state exists in %[2]q. The two states have been saved to temporary -files that will be removed after responding to this query. +Pre-existing state was found while migrating the previous %q backend to the +newly configured %q backend. An existing non-empty state already exists in +the new backend. The two states have been saved to temporary files that will be +removed after responding to this query. -One (%[1]q): %[3]s -Two (%[2]q): %[4]s +Previous (type %[1]q): %[3]s +New (type %[2]q): %[4]s -Do you want to copy the state from %[1]q to %[2]q? Enter "yes" to copy -and "no" to start with the existing state in %[2]q. +Do you want to overwrite the state in the new backend with the previous state? +Enter "yes" to copy and "no" to start with the existing state in the newly +configured %[2]q backend. ` const inputBackendMigrateMultiToSingle = ` -The existing backend %[1]q supports environments and you currently are -using more than one. The target backend %[2]q doesn't support environments. -If you continue, Terraform will offer to copy your current environment -%[3]q to the default environment in the target. Your existing environments -in the source backend won't be modified. If you want to switch environments, -back them up, or cancel altogether, answer "no" and Terraform will abort. +The existing %[1]q backend supports workspaces and you currently are +using more than one. The newly configured %[2]q backend doesn't support +workspaces. If you continue, Terraform will copy your current workspace %[3]q +to the default workspace in the target backend. Your existing workspaces in the +source backend won't be modified. If you want to switch workspaces, back them +up, or cancel altogether, answer "no" and Terraform will abort. ` const inputBackendMigrateMultiToMulti = ` -Both the existing backend %[1]q and the target backend %[2]q support -environments. When migrating between backends, Terraform will copy all -environments (with the same names). THIS WILL OVERWRITE any conflicting +Both the existing %[1]q backend and the newly configured %[2]q backend +support workspaces. When migrating between backends, Terraform will copy +all workspaces (with the same names). THIS WILL OVERWRITE any conflicting states in the destination. -Terraform initialization doesn't currently migrate only select environments. -If you want to migrate a select number of environments, you must manually +Terraform initialization doesn't currently migrate only select workspaces. +If you want to migrate a select number of workspaces, you must manually pull and push those states. If you answer "yes", Terraform will migrate all states. If you answer "no", Terraform will abort. ` + +const inputBackendNewWorkspaceName = ` +Please provide a new workspace name (e.g. dev, test) that will be used +to migrate the existing default workspace. +` + +const inputBackendSelectWorkspace = ` +This is expected behavior when the selected workspace did not have an +existing non-empty state. Please enter a number to select a workspace: + +%s +` diff --git a/vendor/github.com/hashicorp/terraform/command/meta_config.go b/vendor/github.com/hashicorp/terraform/command/meta_config.go new file mode 100644 index 00000000..2989a7e7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/meta_config.go @@ -0,0 +1,480 @@ +package command + +import ( + "context" + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/internal/earlyconfig" + "github.com/hashicorp/terraform/internal/initwd" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// normalizePath normalizes a given path so that it is, if possible, relative +// to the current working directory. This is primarily used to prepare +// paths used to load configuration, because we want to prefer recording +// relative paths in source code references within the configuration. +func (m *Meta) normalizePath(path string) string { + var err error + + // First we will make it absolute so that we have a consistent place + // to start. + path, err = filepath.Abs(path) + if err != nil { + // We'll just accept what we were given, then. + return path + } + + cwd, err := os.Getwd() + if err != nil || !filepath.IsAbs(cwd) { + return path + } + + ret, err := filepath.Rel(cwd, path) + if err != nil { + return path + } + + return ret +} + +// loadConfig reads a configuration from the given directory, which should +// contain a root module and have already have any required descendent modules +// installed. +func (m *Meta) loadConfig(rootDir string) (*configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + rootDir = m.normalizePath(rootDir) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + config, hclDiags := loader.LoadConfig(rootDir) + diags = diags.Append(hclDiags) + return config, diags +} + +// loadConfigEarly is a variant of loadConfig that uses the special +// "early config" loader that is more forgiving of unexpected constructs and +// legacy syntax. +// +// Early-loaded config is not registered in the source code cache, so +// diagnostics produced from it may render without source code snippets. In +// practice this is not a big concern because the early config loader also +// cannot generate detailed source locations, so it prefers to produce +// diagnostics without explicit source location information and instead includes +// approximate locations in the message text. +// +// Most callers should use loadConfig. This method exists to support early +// initialization use-cases where the root module must be inspected in order +// to determine what else needs to be installed before the full configuration +// can be used +func (m *Meta) loadConfigEarly(rootDir string) (*earlyconfig.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + rootDir = m.normalizePath(rootDir) + + config, hclDiags := initwd.LoadConfig(rootDir, m.modulesDir()) + diags = diags.Append(hclDiags) + return config, diags +} + +// loadSingleModule reads configuration from the given directory and returns +// a description of that module only, without attempting to assemble a module +// tree for referenced child modules. +// +// Most callers should use loadConfig. This method exists to support early +// initialization use-cases where the root module must be inspected in order +// to determine what else needs to be installed before the full configuration +// can be used. +func (m *Meta) loadSingleModule(dir string) (*configs.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + dir = m.normalizePath(dir) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + module, hclDiags := loader.Parser().LoadConfigDir(dir) + diags = diags.Append(hclDiags) + return module, diags +} + +// loadSingleModuleEarly is a variant of loadSingleModule that uses the special +// "early config" loader that is more forgiving of unexpected constructs and +// legacy syntax. +// +// Early-loaded config is not registered in the source code cache, so +// diagnostics produced from it may render without source code snippets. In +// practice this is not a big concern because the early config loader also +// cannot generate detailed source locations, so it prefers to produce +// diagnostics without explicit source location information and instead includes +// approximate locations in the message text. +// +// Most callers should use loadConfig. This method exists to support early +// initialization use-cases where the root module must be inspected in order +// to determine what else needs to be installed before the full configuration +// can be used. +func (m *Meta) loadSingleModuleEarly(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + dir = m.normalizePath(dir) + + module, moreDiags := earlyconfig.LoadModule(dir) + diags = diags.Append(moreDiags) + + return module, diags +} + +// dirIsConfigPath checks if the given path is a directory that contains at +// least one Terraform configuration file (.tf or .tf.json), returning true +// if so. +// +// In the unlikely event that the underlying config loader cannot be initalized, +// this function optimistically returns true, assuming that the caller will +// then do some other operation that requires the config loader and get an +// error at that point. +func (m *Meta) dirIsConfigPath(dir string) bool { + loader, err := m.initConfigLoader() + if err != nil { + return true + } + + return loader.IsConfigDir(dir) +} + +// loadBackendConfig reads configuration from the given directory and returns +// the backend configuration defined by that module, if any. Nil is returned +// if the specified module does not have an explicit backend configuration. +// +// This is a convenience method for command code that will delegate to the +// configured backend to do most of its work, since in that case it is the +// backend that will do the full configuration load. +// +// Although this method returns only the backend configuration, at present it +// actually loads and validates the entire configuration first. Therefore errors +// returned may be about other aspects of the configuration. This behavior may +// change in future, so callers must not rely on it. (That is, they must expect +// that a call to loadSingleModule or loadConfig could fail on the same +// directory even if loadBackendConfig succeeded.) +func (m *Meta) loadBackendConfig(rootDir string) (*configs.Backend, tfdiags.Diagnostics) { + mod, diags := m.loadSingleModule(rootDir) + if diags.HasErrors() { + return nil, diags + } + return mod.Backend, diags +} + +// loadValuesFile loads a file that defines a single map of key/value pairs. +// This is the format used for "tfvars" files. +func (m *Meta) loadValuesFile(filename string) (map[string]cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + filename = m.normalizePath(filename) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + vals, hclDiags := loader.Parser().LoadValuesFile(filename) + diags = diags.Append(hclDiags) + return vals, diags +} + +// loadHCLFile reads an arbitrary HCL file and returns the unprocessed body +// representing its toplevel. Most callers should use one of the more +// specialized "load..." methods to get a higher-level representation. +func (m *Meta) loadHCLFile(filename string) (hcl.Body, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + filename = m.normalizePath(filename) + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return nil, diags + } + + body, hclDiags := loader.Parser().LoadHCLFile(filename) + diags = diags.Append(hclDiags) + return body, diags +} + +// installModules reads a root module from the given directory and attempts +// recursively install all of its descendent modules. +// +// The given hooks object will be notified of installation progress, which +// can then be relayed to the end-user. The moduleUiInstallHooks type in +// this package has a reasonable implementation for displaying notifications +// via a provided cli.Ui. +func (m *Meta) installModules(rootDir string, upgrade bool, hooks initwd.ModuleInstallHooks) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + rootDir = m.normalizePath(rootDir) + + err := os.MkdirAll(m.modulesDir(), os.ModePerm) + if err != nil { + diags = diags.Append(fmt.Errorf("failed to create local modules directory: %s", err)) + return diags + } + + inst := m.moduleInstaller() + _, moreDiags := inst.InstallModules(rootDir, upgrade, hooks) + diags = diags.Append(moreDiags) + return diags +} + +// initDirFromModule initializes the given directory (which should be +// pre-verified as empty by the caller) by copying the source code from the +// given module address. +// +// Internally this runs similar steps to installModules. +// The given hooks object will be notified of installation progress, which +// can then be relayed to the end-user. The moduleUiInstallHooks type in +// this package has a reasonable implementation for displaying notifications +// via a provided cli.Ui. +func (m *Meta) initDirFromModule(targetDir string, addr string, hooks initwd.ModuleInstallHooks) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + targetDir = m.normalizePath(targetDir) + moreDiags := initwd.DirFromModule(targetDir, m.modulesDir(), addr, m.registryClient(), hooks) + diags = diags.Append(moreDiags) + return diags +} + +// inputForSchema uses interactive prompts to try to populate any +// not-yet-populated required attributes in the given object value to +// comply with the given schema. +// +// An error will be returned if input is disabled for this meta or if +// values cannot be obtained for some other operational reason. Errors are +// not returned for invalid input since the input loop itself will report +// that interactively. +// +// It is not guaranteed that the result will be valid, since certain attribute +// types and nested blocks are not supported for input. +// +// The given value must conform to the given schema. If not, this method will +// panic. +func (m *Meta) inputForSchema(given cty.Value, schema *configschema.Block) (cty.Value, error) { + if given.IsNull() || !given.IsKnown() { + // This is not reasonable input, but we'll tolerate it anyway and + // just pass it through for the caller to handle downstream. + return given, nil + } + + retVals := given.AsValueMap() + names := make([]string, 0, len(schema.Attributes)) + for name, attrS := range schema.Attributes { + if attrS.Required && retVals[name].IsNull() && attrS.Type.IsPrimitiveType() { + names = append(names, name) + } + } + sort.Strings(names) + + input := m.UIInput() + for _, name := range names { + attrS := schema.Attributes[name] + + for { + strVal, err := input.Input(context.Background(), &terraform.InputOpts{ + Id: name, + Query: name, + Description: attrS.Description, + }) + if err != nil { + return cty.UnknownVal(schema.ImpliedType()), fmt.Errorf("%s: %s", name, err) + } + + val := cty.StringVal(strVal) + val, err = convert.Convert(val, attrS.Type) + if err != nil { + m.showDiagnostics(fmt.Errorf("Invalid value: %s", err)) + continue + } + + retVals[name] = val + break + } + } + + return cty.ObjectVal(retVals), nil +} + +// configSources returns the source cache from the receiver's config loader, +// which the caller must not modify. +// +// If a config loader has not yet been instantiated then no files could have +// been loaded already, so this method returns a nil map in that case. +func (m *Meta) configSources() map[string][]byte { + if m.configLoader == nil { + return nil + } + + return m.configLoader.Sources() +} + +func (m *Meta) modulesDir() string { + return filepath.Join(m.DataDir(), "modules") +} + +// registerSynthConfigSource allows commands to add synthetic additional source +// buffers to the config loader's cache of sources (as returned by +// configSources), which is useful when a command is directly parsing something +// from the command line that may produce diagnostics, so that diagnostic +// snippets can still be produced. +// +// If this is called before a configLoader has been initialized then it will +// try to initialize the loader but ignore any initialization failure, turning +// the call into a no-op. (We presume that a caller will later call a different +// function that also initializes the config loader as a side effect, at which +// point those errors can be returned.) +func (m *Meta) registerSynthConfigSource(filename string, src []byte) { + loader, err := m.initConfigLoader() + if err != nil || loader == nil { + return // treated as no-op, since this is best-effort + } + loader.Parser().ForceFileSource(filename, src) +} + +// initConfigLoader initializes the shared configuration loader if it isn't +// already initialized. +// +// If the loader cannot be created for some reason then an error is returned +// and no loader is created. Subsequent calls will presumably see the same +// error. Loader initialization errors will tend to prevent any further use +// of most Terraform features, so callers should report any error and safely +// terminate. +func (m *Meta) initConfigLoader() (*configload.Loader, error) { + if m.configLoader == nil { + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: m.modulesDir(), + Services: m.Services, + }) + if err != nil { + return nil, err + } + m.configLoader = loader + } + return m.configLoader, nil +} + +// moduleInstaller instantiates and returns a module installer for use by +// "terraform init" (directly or indirectly). +func (m *Meta) moduleInstaller() *initwd.ModuleInstaller { + reg := m.registryClient() + return initwd.NewModuleInstaller(m.modulesDir(), reg) +} + +// registryClient instantiates and returns a new Terraform Registry client. +func (m *Meta) registryClient() *registry.Client { + return registry.NewClient(m.Services, nil) +} + +// configValueFromCLI parses a configuration value that was provided in a +// context in the CLI where only strings can be provided, such as on the +// command line or in an environment variable, and returns the resulting +// value. +func configValueFromCLI(synthFilename, rawValue string, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + switch { + case wantType.IsPrimitiveType(): + // Primitive types are handled as conversions from string. + val := cty.StringVal(rawValue) + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid backend configuration value", + fmt.Sprintf("Invalid backend configuration argument %s: %s", synthFilename, err), + )) + val = cty.DynamicVal // just so we return something valid-ish + } + return val, diags + default: + // Non-primitives are parsed as HCL expressions + src := []byte(rawValue) + expr, hclDiags := hclsyntax.ParseExpression(src, synthFilename, hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return cty.DynamicVal, diags + } + val, hclDiags := expr.Value(nil) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + val = cty.DynamicVal + } + return val, diags + } +} + +// rawFlags is a flag.Value implementation that just appends raw flag +// names and values to a slice. +type rawFlags struct { + flagName string + items *[]rawFlag +} + +func newRawFlags(flagName string) rawFlags { + var items []rawFlag + return rawFlags{ + flagName: flagName, + items: &items, + } +} + +func (f rawFlags) Empty() bool { + if f.items == nil { + return true + } + return len(*f.items) == 0 +} + +func (f rawFlags) AllItems() []rawFlag { + if f.items == nil { + return nil + } + return *f.items +} + +func (f rawFlags) Alias(flagName string) rawFlags { + return rawFlags{ + flagName: flagName, + items: f.items, + } +} + +func (f rawFlags) String() string { + return "" +} + +func (f rawFlags) Set(str string) error { + *f.items = append(*f.items, rawFlag{ + Name: f.flagName, + Value: str, + }) + return nil +} + +type rawFlag struct { + Name string + Value string +} + +func (f rawFlag) String() string { + return fmt.Sprintf("%s=%q", f.Name, f.Value) +} diff --git a/vendor/github.com/hashicorp/terraform/command/meta_new.go b/vendor/github.com/hashicorp/terraform/command/meta_new.go index 3138cd2d..9f87d792 100644 --- a/vendor/github.com/hashicorp/terraform/command/meta_new.go +++ b/vendor/github.com/hashicorp/terraform/command/meta_new.go @@ -2,13 +2,17 @@ package command import ( "fmt" + "log" "os" + "path/filepath" "strconv" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // NOTE: Temporary file until this branch is cleaned up. @@ -28,11 +32,19 @@ func (m *Meta) Input() bool { return true } -// Module loads the module tree for the given root path. +// Module loads the module tree for the given root path using the legacy +// configuration loader. // // It expects the modules to already be downloaded. This will never // download any modules. -func (m *Meta) Module(path string) (*module.Tree, error) { +// +// The configuration is validated before returning, so the returned diagnostics +// may contain warnings and/or errors. If the diagnostics contains only +// warnings, the caller may treat the returned module.Tree as valid after +// presenting the warnings to the user. +func (m *Meta) Module(path string) (*module.Tree, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + mod, err := module.NewTreeModule("", path) if err != nil { // Check for the error where we have no config files @@ -40,62 +52,101 @@ func (m *Meta) Module(path string) (*module.Tree, error) { return nil, nil } - return nil, err + diags = diags.Append(err) + return nil, diags } - err = mod.Load(m.moduleStorage(m.DataDir()), module.GetModeNone) + err = mod.Load(m.moduleStorage(m.DataDir(), module.GetModeNone)) if err != nil { - return nil, fmt.Errorf("Error loading modules: %s", err) + diags = diags.Append(errwrap.Wrapf("Error loading modules: {{err}}", err)) + return nil, diags } - return mod, nil + diags = diags.Append(mod.Validate()) + + return mod, diags } -// Plan returns the plan for the given path. -// -// This only has an effect if the path itself looks like a plan. -// If error is nil and the plan is nil, then the path didn't look like -// a plan. +// Config loads the root config for the path specified, using the legacy +// configuration loader. // -// Error will be non-nil if path looks like a plan and loading the plan -// failed. -func (m *Meta) Plan(path string) (*terraform.Plan, error) { - // Open the path no matter if its a directory or file - f, err := os.Open(path) - defer f.Close() - if err != nil { - return nil, fmt.Errorf( - "Failed to load Terraform configuration or plan: %s", err) +// Path may be a directory or file. The absence of configuration is not an +// error and returns a nil Config. +func (m *Meta) Config(path string) (*config.Config, error) { + // If no explicit path was given then it is okay for there to be + // no backend configuration found. + emptyOk := path == "" + + // If we had no path set, it is an error. We can't initialize unset + if path == "" { + path = "." } - // Stat it so we can check if its a directory - fi, err := f.Stat() + // Expand the path + if !filepath.IsAbs(path) { + var err error + path, err = filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf( + "Error expanding path to backend config %q: %s", path, err) + } + } + + log.Printf("[DEBUG] command: loading backend config file: %s", path) + + // We first need to determine if we're loading a file or a directory. + fi, err := os.Stat(path) if err != nil { - return nil, fmt.Errorf( - "Failed to load Terraform configuration or plan: %s", err) + if os.IsNotExist(err) && emptyOk { + log.Printf( + "[INFO] command: backend config not found, returning nil: %s", + path) + return nil, nil + } + + return nil, err } - // If this path is a directory, then it can't be a plan. Not an error. + var f func(string) (*config.Config, error) = config.LoadFile if fi.IsDir() { - return nil, nil + f = config.LoadDir } - // Read the plan - p, err := terraform.ReadPlan(f) + // Load the configuration + c, err := f(path) if err != nil { + // Check for the error where we have no config files and return nil + // as the configuration type. + if errwrap.ContainsType(err, new(config.ErrNoConfigsFound)) { + log.Printf( + "[INFO] command: backend config not found, returning nil: %s", + path) + return nil, nil + } + return nil, err } - // We do a validation here that seems odd but if any plan is given, - // we must not have set any extra variables. The plan itself contains - // the variables and those aren't overwritten. - if len(m.variables) > 0 { - return nil, fmt.Errorf( - "You can't set variables with the '-var' or '-var-file' flag\n" + - "when you're applying a plan file. The variables used when\n" + - "the plan was created will be used. If you wish to use different\n" + - "variable values, create a new plan file.") + return c, nil +} + +// PlanFile returns a reader for the plan file at the given path. +// +// If the return value and error are both nil, the given path exists but seems +// to be a configuration directory instead. +// +// Error will be non-nil if path refers to something which looks like a plan +// file and loading the file fails. +func (m *Meta) PlanFile(path string) (*planfile.Reader, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + + if fi.IsDir() { + // Looks like a configuration directory. + return nil, nil } - return p, nil + return planfile.Open(path) } diff --git a/vendor/github.com/hashicorp/terraform/command/meta_vars.go b/vendor/github.com/hashicorp/terraform/command/meta_vars.go new file mode 100644 index 00000000..30fe9976 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/meta_vars.go @@ -0,0 +1,253 @@ +package command + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + hcljson "github.com/hashicorp/hcl2/hcl/json" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" +) + +// collectVariableValues inspects the various places that root module input variable +// values can come from and constructs a map ready to be passed to the +// backend as part of a backend.Operation. +// +// This method returns diagnostics relating to the collection of the values, +// but the values themselves may produce additional diagnostics when finally +// parsed. +func (m *Meta) collectVariableValues() (map[string]backend.UnparsedVariableValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := map[string]backend.UnparsedVariableValue{} + + // First we'll deal with environment variables, since they have the lowest + // precedence. + { + env := os.Environ() + for _, raw := range env { + if !strings.HasPrefix(raw, terraform.VarEnvPrefix) { + continue + } + raw = raw[len(terraform.VarEnvPrefix):] // trim the prefix + + eq := strings.Index(raw, "=") + if eq == -1 { + // Seems invalid, so we'll ignore it. + continue + } + + name := raw[:eq] + rawVal := raw[eq+1:] + + ret[name] = unparsedVariableValueString{ + str: rawVal, + name: name, + sourceType: terraform.ValueFromEnvVar, + } + } + } + + // Next up we have some implicit files that are loaded automatically + // if they are present. There's the original terraform.tfvars + // (DefaultVarsFilename) along with the later-added search for all files + // ending in .auto.tfvars. + if _, err := os.Stat(DefaultVarsFilename); err == nil { + moreDiags := m.addVarsFromFile(DefaultVarsFilename, terraform.ValueFromAutoFile, ret) + diags = diags.Append(moreDiags) + } + const defaultVarsFilenameJSON = DefaultVarsFilename + ".json" + if _, err := os.Stat(defaultVarsFilenameJSON); err == nil { + moreDiags := m.addVarsFromFile(defaultVarsFilenameJSON, terraform.ValueFromAutoFile, ret) + diags = diags.Append(moreDiags) + } + if infos, err := ioutil.ReadDir("."); err == nil { + // "infos" is already sorted by name, so we just need to filter it here. + for _, info := range infos { + name := info.Name() + if !isAutoVarFile(name) { + continue + } + moreDiags := m.addVarsFromFile(name, terraform.ValueFromAutoFile, ret) + diags = diags.Append(moreDiags) + } + } + + // Finally we process values given explicitly on the command line, either + // as individual literal settings or as additional files to read. + for _, rawFlag := range m.variableArgs.AllItems() { + switch rawFlag.Name { + case "-var": + // Value should be in the form "name=value", where value is a + // raw string whose interpretation will depend on the variable's + // parsing mode. + raw := rawFlag.Value + eq := strings.Index(raw, "=") + if eq == -1 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid -var option", + fmt.Sprintf("The given -var option %q is not correctly specified. Must be a variable name and value separated by an equals sign, like -var=\"key=value\".", raw), + )) + continue + } + name := raw[:eq] + rawVal := raw[eq+1:] + ret[name] = unparsedVariableValueString{ + str: rawVal, + name: name, + sourceType: terraform.ValueFromCLIArg, + } + + case "-var-file": + moreDiags := m.addVarsFromFile(rawFlag.Value, terraform.ValueFromNamedFile, ret) + diags = diags.Append(moreDiags) + + default: + // Should never happen; always a bug in the code that built up + // the contents of m.variableArgs. + diags = diags.Append(fmt.Errorf("unsupported variable option name %q (this is a bug in Terraform)", rawFlag.Name)) + } + } + + return ret, diags +} + +func (m *Meta) addVarsFromFile(filename string, sourceType terraform.ValueSourceType, to map[string]backend.UnparsedVariableValue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + src, err := ioutil.ReadFile(filename) + if err != nil { + if os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read variables file", + fmt.Sprintf("Given variables file %s does not exist.", filename), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read variables file", + fmt.Sprintf("Error while reading %s: %s.", filename, err), + )) + } + return diags + } + + loader, err := m.initConfigLoader() + if err != nil { + diags = diags.Append(err) + return diags + } + + // Record the file source code for snippets in diagnostic messages. + loader.Parser().ForceFileSource(filename, src) + + var f *hcl.File + if strings.HasSuffix(filename, ".json") { + var hclDiags hcl.Diagnostics + f, hclDiags = hcljson.Parse(src, filename) + diags = diags.Append(hclDiags) + if f == nil || f.Body == nil { + return diags + } + } else { + var hclDiags hcl.Diagnostics + f, hclDiags = hclsyntax.ParseConfig(src, filename, hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(hclDiags) + if f == nil || f.Body == nil { + return diags + } + } + + // Before we do our real decode, we'll probe to see if there are any blocks + // of type "variable" in this body, since it's a common mistake for new + // users to put variable declarations in tfvars rather than variable value + // definitions, and otherwise our error message for that case is not so + // helpful. + { + content, _, _ := f.Body.PartialContent(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "variable", + LabelNames: []string{"name"}, + }, + }, + }) + for _, block := range content.Blocks { + name := block.Labels[0] + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Variable declaration in .tfvars file", + Detail: fmt.Sprintf("A .tfvars file is used to assign values to variables that have already been declared in .tf files, not to declare new variables. To declare variable %q, place this block in one of your .tf files, such as variables.tf.\n\nTo set a value for this variable in %s, use the definition syntax instead:\n %s = ", name, block.TypeRange.Filename, name), + Subject: &block.TypeRange, + }) + } + if diags.HasErrors() { + // If we already found problems then JustAttributes below will find + // the same problems with less-helpful messages, so we'll bail for + // now to let the user focus on the immediate problem. + return diags + } + } + + attrs, hclDiags := f.Body.JustAttributes() + diags = diags.Append(hclDiags) + + for name, attr := range attrs { + to[name] = unparsedVariableValueExpression{ + expr: attr.Expr, + sourceType: sourceType, + } + } + return diags +} + +// unparsedVariableValueLiteral is a backend.UnparsedVariableValue +// implementation that was actually already parsed (!). This is +// intended to deal with expressions inside "tfvars" files. +type unparsedVariableValueExpression struct { + expr hcl.Expression + sourceType terraform.ValueSourceType +} + +func (v unparsedVariableValueExpression) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + val, hclDiags := v.expr.Value(nil) // nil because no function calls or variable references are allowed here + diags = diags.Append(hclDiags) + + rng := tfdiags.SourceRangeFromHCL(v.expr.Range()) + + return &terraform.InputValue{ + Value: val, + SourceType: v.sourceType, + SourceRange: rng, + }, diags +} + +// unparsedVariableValueString is a backend.UnparsedVariableValue +// implementation that parses its value from a string. This can be used +// to deal with values given directly on the command line and via environment +// variables. +type unparsedVariableValueString struct { + str string + name string + sourceType terraform.ValueSourceType +} + +func (v unparsedVariableValueString) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + val, hclDiags := mode.Parse(v.name, v.str) + diags = diags.Append(hclDiags) + + return &terraform.InputValue{ + Value: val, + SourceType: v.sourceType, + }, diags +} diff --git a/vendor/github.com/hashicorp/terraform/command/output.go b/vendor/github.com/hashicorp/terraform/command/output.go index 2f5f6468..789be53a 100644 --- a/vendor/github.com/hashicorp/terraform/command/output.go +++ b/vendor/github.com/hashicorp/terraform/command/output.go @@ -3,10 +3,17 @@ package command import ( "bytes" "encoding/json" - "flag" "fmt" "sort" "strings" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/repl" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // OutputCommand is a Command implementation that reads an output @@ -16,13 +23,16 @@ type OutputCommand struct { } func (c *OutputCommand) Run(args []string) int { - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - var module string + var module, statePath string var jsonOutput bool - cmdFlags := flag.NewFlagSet("output", flag.ContinueOnError) + cmdFlags := c.Meta.defaultFlagSet("output") cmdFlags.BoolVar(&jsonOutput, "json", false, "json") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags.StringVar(&statePath, "state", "", "path") cmdFlags.StringVar(&module, "module", "", "module") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { @@ -43,17 +53,24 @@ func (c *OutputCommand) Run(args []string) int { name = args[0] } + if statePath != "" { + c.Meta.statePath = statePath + } + + var diags tfdiags.Diagnostics + // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(nil) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } - env := c.Env() + env := c.Workspace() // Get the state - stateStore, err := b.State(env) + stateStore, err := b.StateMgr(env) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 @@ -64,17 +81,29 @@ func (c *OutputCommand) Run(args []string) int { return 1 } - if module == "" { - module = "root" - } else { - module = "root." + module + moduleAddr := addrs.RootModuleInstance + if module != "" { + // This option was supported prior to 0.12.0, but no longer supported + // because we only persist the root module outputs in state. + // (We could perhaps re-introduce this by doing an eval walk here to + // repopulate them, similar to how "terraform console" does it, but + // that requires more thought since it would imply this command + // supporting remote operations, which is a big change.) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported option", + "The -module option is no longer supported since Terraform 0.12, because now only root outputs are persisted in the state.", + )) + c.showDiagnostics(diags) + return 1 } - // Get the proper module we want to get outputs for - modPath := strings.Split(module, ".") - state := stateStore.State() - mod := state.ModuleByPath(modPath) + if state == nil { + state = states.NewState() + } + + mod := state.Module(moduleAddr) if mod == nil { c.Ui.Error(fmt.Sprintf( "The module %s could not be found. There is nothing to output.", @@ -82,7 +111,7 @@ func (c *OutputCommand) Run(args []string) int { return 1 } - if state.Empty() || len(mod.Outputs) == 0 { + if !jsonOutput && (state.Empty() || len(mod.OutputValues) == 0) { c.Ui.Error( "The state file either has no outputs defined, or all the defined\n" + "outputs are empty. Please define an output in your configuration\n" + @@ -95,20 +124,54 @@ func (c *OutputCommand) Run(args []string) int { if name == "" { if jsonOutput { - jsonOutputs, err := json.MarshalIndent(mod.Outputs, "", " ") + // Due to a historical accident, the switch from state version 2 to + // 3 caused our JSON output here to be the full metadata about the + // outputs rather than just the output values themselves as we'd + // show in the single value case. We must now maintain that behavior + // for compatibility, so this is an emulation of the JSON + // serialization of outputs used in state format version 3. + type OutputMeta struct { + Sensitive bool `json:"sensitive"` + Type json.RawMessage `json:"type"` + Value json.RawMessage `json:"value"` + } + outputs := map[string]OutputMeta{} + + for n, os := range mod.OutputValues { + jsonVal, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + jsonType, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + outputs[n] = OutputMeta{ + Sensitive: os.Sensitive, + Type: json.RawMessage(jsonType), + Value: json.RawMessage(jsonVal), + } + } + + jsonOutputs, err := json.MarshalIndent(outputs, "", " ") if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) return 1 } - c.Ui.Output(string(jsonOutputs)) return 0 } else { - c.Ui.Output(outputsAsString(state, modPath, nil, false)) + c.Ui.Output(outputsAsString(state, moduleAddr, false)) return 0 } } - v, ok := mod.Outputs[name] + os, ok := mod.OutputValues[name] if !ok { c.Ui.Error(fmt.Sprintf( "The output variable requested could not be found in the state\n" + @@ -117,29 +180,27 @@ func (c *OutputCommand) Run(args []string) int { "with new output variables until that command is run.")) return 1 } + v := os.Value if jsonOutput { - jsonOutputs, err := json.MarshalIndent(v, "", " ") + jsonOutput, err := ctyjson.Marshal(v, v.Type()) if err != nil { return 1 } - c.Ui.Output(string(jsonOutputs)) + c.Ui.Output(string(jsonOutput)) } else { - switch output := v.Value.(type) { - case string: - c.Ui.Output(output) - return 0 - case []interface{}: - c.Ui.Output(formatListOutput("", "", output)) - return 0 - case map[string]interface{}: - c.Ui.Output(formatMapOutput("", "", output)) - return 0 - default: - c.Ui.Error(fmt.Sprintf("Unknown output type: %T", v.Type)) + // Our formatter still wants an old-style raw interface{} value, so + // for now we'll just shim it. + // FIXME: Port the formatter to work with cty.Value directly. + legacyVal := hcl2shim.ConfigValueFromHCL2(v) + result, err := repl.FormatResult(legacyVal) + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) return 1 } + c.Ui.Output(result) } return 0 @@ -274,9 +335,6 @@ Options: -no-color If specified, output won't contain any color. - -module=name If specified, returns the outputs for a - specific module - -json If specified, machine readable output will be printed in JSON format diff --git a/vendor/github.com/hashicorp/terraform/command/plan.go b/vendor/github.com/hashicorp/terraform/command/plan.go index 0b66fdff..d3b15965 100644 --- a/vendor/github.com/hashicorp/terraform/command/plan.go +++ b/vendor/github.com/hashicorp/terraform/command/plan.go @@ -1,12 +1,13 @@ package command import ( - "context" "fmt" "strings" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" ) // PlanCommand is a Command implementation that compares a Terraform @@ -18,17 +19,17 @@ type PlanCommand struct { func (c *PlanCommand) Run(args []string) int { var destroy, refresh, detailed bool var outPath string - var moduleDepth int - args = c.Meta.process(args, true) + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } - cmdFlags := c.Meta.flagSet("plan") + cmdFlags := c.Meta.extendedFlagSet("plan") cmdFlags.BoolVar(&destroy, "destroy", false, "destroy") cmdFlags.BoolVar(&refresh, "refresh", true, "refresh") - c.addModuleDepthFlag(cmdFlags, &moduleDepth) cmdFlags.StringVar(&outPath, "out", "", "path") - cmdFlags.IntVar( - &c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") + cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") cmdFlags.BoolVar(&detailed, "detailed-exitcode", false, "detailed-exitcode") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") @@ -44,81 +45,153 @@ func (c *PlanCommand) Run(args []string) int { return 1 } - // Check if the path is a plan - plan, err := c.Plan(configPath) + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + // Check if the path is a plan, which is not permitted + planFileReader, err := c.PlanFile(configPath) if err != nil { c.Ui.Error(err.Error()) return 1 } - if plan != nil { - // Disable refreshing no matter what since we only want to show the plan - refresh = false - - // Set the config path to empty for backend loading - configPath = "" + if planFileReader != nil { + c.showDiagnostics(tfdiags.Sourceless( + tfdiags.Error, + "Invalid configuration directory", + fmt.Sprintf("Cannot pass a saved plan file to the 'terraform plan' command. To apply a saved plan, use: terraform apply %s", configPath), + )) + return 1 } - // Load the module if we don't have one yet (not running from plan) - var mod *module.Tree - if plan == nil { - mod, err = c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) - return 1 - } + var diags tfdiags.Diagnostics + + var backendConfig *configs.Backend + var configDiags tfdiags.Diagnostics + backendConfig, configDiags = c.loadBackendConfig(configPath) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 } // Load the backend - b, err := c.Backend(&BackendOpts{ - ConfigPath: configPath, - Plan: plan, + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } + // Emit any diagnostics we've accumulated before we delegate to the + // backend, since the backend will handle its own diagnostics internally. + c.showDiagnostics(diags) + diags = nil + // Build the operation - opReq := c.Operation() + opReq := c.Operation(b) + opReq.ConfigDir = configPath opReq.Destroy = destroy - opReq.Module = mod - opReq.Plan = plan opReq.PlanRefresh = refresh opReq.PlanOutPath = outPath + opReq.PlanRefresh = refresh opReq.Type = backend.OperationTypePlan - // Perform the operation - op, err := b.Operation(context.Background(), opReq) + opReq.ConfigLoader, err = c.initConfigLoader() if err != nil { - c.Ui.Error(fmt.Sprintf("Error starting operation: %s", err)) + c.showDiagnostics(err) return 1 } - // Wait for the operation to complete - <-op.Done() - if err := op.Err; err != nil { - c.Ui.Error(err.Error()) - return 1 + { + var moreDiags tfdiags.Diagnostics + opReq.Variables, moreDiags = c.collectVariableValues() + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } } - /* - err = terraform.SetDebugInfo(DefaultDataDir) + // c.Backend above has a non-obvious side-effect of also populating + // c.backendState, which is the state-shaped formulation of the effective + // backend configuration after evaluation of the backend configuration. + // We will in turn adapt that to a plans.Backend to include in a plan file + // if opReq.PlanOutPath was set to a non-empty value above. + // + // FIXME: It's ugly to be doing this inline here, but it's also not really + // clear where would be better to do it. In future we should find a better + // home for this logic, and ideally also stop depending on the side-effect + // of c.Backend setting c.backendState. + { + // This is not actually a state in the usual sense, but rather a + // representation of part of the current working directory's + // "configuration state". + backendPseudoState := c.backendState + if backendPseudoState == nil { + // Should never happen if c.Backend is behaving properly. + diags = diags.Append(fmt.Errorf("Backend initialization didn't produce resolved configuration (This is a bug in Terraform)")) + c.showDiagnostics(diags) + return 1 + } + var backendForPlan plans.Backend + backendForPlan.Type = backendPseudoState.Type + backendForPlan.Workspace = c.Workspace() + + // Configuration is a little more awkward to handle here because it's + // stored in state as raw JSON but we need it as a plans.DynamicValue + // to save it in the state. To do that conversion we need to know the + // configuration schema of the backend. + configSchema := b.ConfigSchema() + config, err := backendPseudoState.Config(configSchema) if err != nil { - c.Ui.Error(err.Error()) + // This means that the stored settings don't conform to the current + // schema, which could either be because we're reading something + // created by an older version that is no longer compatible, or + // because the user manually tampered with the stored config. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid backend initialization", + fmt.Sprintf("The backend configuration for this working directory is not valid: %s.\n\nIf you have recently upgraded Terraform, you may need to re-run \"terraform init\" to re-initialize this working directory.", err), + )) + c.showDiagnostics(diags) return 1 } - */ + configForPlan, err := plans.NewDynamicValue(config, configSchema.ImpliedType()) + if err != nil { + // This should never happen, since we've just decoded this value + // using the same schema. + diags = diags.Append(fmt.Errorf("Failed to encode backend configuration to store in plan: %s", err)) + c.showDiagnostics(diags) + return 1 + } + backendForPlan.Config = configForPlan + } + // Perform the operation + op, err := c.RunOperation(b, opReq) + if err != nil { + c.showDiagnostics(err) + return 1 + } + + if op.Result != backend.OperationSuccess { + return op.Result.ExitStatus() + } if detailed && !op.PlanEmpty { return 2 } - return 0 + return op.Result.ExitStatus() } func (c *PlanCommand) Help() string { helpText := ` -Usage: terraform plan [options] [DIR-OR-PLAN] +Usage: terraform plan [options] [DIR] Generates an execution plan for Terraform. @@ -127,9 +200,6 @@ Usage: terraform plan [options] [DIR-OR-PLAN] a Terraform plan file, and apply can take this plan file to execute this plan exactly. - If a saved plan is passed as an argument, this command will output - the saved plan contents. It will not modify the given plan. - Options: -destroy If set, a plan will be generated to destroy all resources @@ -147,10 +217,6 @@ Options: -lock-timeout=0s Duration to retry a state lock. - -module-depth=n Specifies the depth of modules to show in the output. - This does not affect the plan itself, only the output - shown. By default, this is -1, which will expand all. - -no-color If specified, output won't contain any color. -out=path Write a plan file to the given path. This can be used as @@ -172,8 +238,8 @@ Options: flag can be set multiple times. -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be - automatically loaded if this flag is not specified. + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. ` return strings.TrimSpace(helpText) } diff --git a/vendor/github.com/hashicorp/terraform/command/plugins.go b/vendor/github.com/hashicorp/terraform/command/plugins.go new file mode 100644 index 00000000..bc0a0293 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/plugins.go @@ -0,0 +1,422 @@ +package command + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + plugin "github.com/hashicorp/go-plugin" + "github.com/kardianos/osext" + + terraformProvider "github.com/hashicorp/terraform/builtin/providers/terraform" + tfplugin "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/terraform" +) + +// multiVersionProviderResolver is an implementation of +// terraform.ResourceProviderResolver that matches the given version constraints +// against a set of versioned provider plugins to find the newest version of +// each that satisfies the given constraints. +type multiVersionProviderResolver struct { + Available discovery.PluginMetaSet + + // Internal is a map that overrides the usual plugin selection process + // for internal plugins. These plugins do not support version constraints + // (will produce an error if one is set). This should be used only in + // exceptional circumstances since it forces the provider's release + // schedule to be tied to that of Terraform Core. + Internal map[string]providers.Factory +} + +func choosePlugins(avail discovery.PluginMetaSet, internal map[string]providers.Factory, reqd discovery.PluginRequirements) map[string]discovery.PluginMeta { + candidates := avail.ConstrainVersions(reqd) + ret := map[string]discovery.PluginMeta{} + for name, metas := range candidates { + // If the provider is in our internal map then we ignore any + // discovered plugins for it since these are dealt with separately. + if _, isInternal := internal[name]; isInternal { + continue + } + + if len(metas) == 0 { + continue + } + ret[name] = metas.Newest() + } + return ret +} + +func (r *multiVersionProviderResolver) ResolveProviders( + reqd discovery.PluginRequirements, +) (map[string]providers.Factory, []error) { + factories := make(map[string]providers.Factory, len(reqd)) + var errs []error + + chosen := choosePlugins(r.Available, r.Internal, reqd) + for name, req := range reqd { + if factory, isInternal := r.Internal[name]; isInternal { + if !req.Versions.Unconstrained() { + errs = append(errs, fmt.Errorf("provider.%s: this provider is built in to Terraform and so it does not support version constraints", name)) + continue + } + factories[name] = factory + continue + } + + if newest, available := chosen[name]; available { + digest, err := newest.SHA256() + if err != nil { + errs = append(errs, fmt.Errorf("provider.%s: failed to load plugin to verify its signature: %s", name, err)) + continue + } + if !reqd[name].AcceptsSHA256(digest) { + errs = append(errs, fmt.Errorf("provider.%s: new or changed plugin executable", name)) + continue + } + + factories[name] = providerFactory(newest) + } else { + msg := fmt.Sprintf("provider.%s: no suitable version installed", name) + + required := req.Versions.String() + // no version is unconstrained + if required == "" { + required = "(any version)" + } + + foundVersions := []string{} + for meta := range r.Available.WithName(name) { + foundVersions = append(foundVersions, fmt.Sprintf("%q", meta.Version)) + } + + found := "none" + if len(foundVersions) > 0 { + found = strings.Join(foundVersions, ", ") + } + + msg += fmt.Sprintf("\n version requirements: %q\n versions installed: %s", required, found) + + errs = append(errs, errors.New(msg)) + } + } + + return factories, errs +} + +// store the user-supplied path for plugin discovery +func (m *Meta) storePluginPath(pluginPath []string) error { + if len(pluginPath) == 0 { + return nil + } + + path := filepath.Join(m.DataDir(), PluginPathFile) + + // remove the plugin dir record if the path was set to an empty string + if len(pluginPath) == 1 && (pluginPath[0] == "") { + err := os.Remove(path) + if !os.IsNotExist(err) { + return err + } + return nil + } + + js, err := json.MarshalIndent(pluginPath, "", " ") + if err != nil { + return err + } + + // if this fails, so will WriteFile + os.MkdirAll(m.DataDir(), 0755) + + return ioutil.WriteFile(path, js, 0644) +} + +// Load the user-defined plugin search path into Meta.pluginPath if the file +// exists. +func (m *Meta) loadPluginPath() ([]string, error) { + js, err := ioutil.ReadFile(filepath.Join(m.DataDir(), PluginPathFile)) + if os.IsNotExist(err) { + return nil, nil + } + + if err != nil { + return nil, err + } + + var pluginPath []string + if err := json.Unmarshal(js, &pluginPath); err != nil { + return nil, err + } + + return pluginPath, nil +} + +// the default location for automatically installed plugins +func (m *Meta) pluginDir() string { + return filepath.Join(m.DataDir(), "plugins", fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH)) +} + +// pluginDirs return a list of directories to search for plugins. +// +// Earlier entries in this slice get priority over later when multiple copies +// of the same plugin version are found, but newer versions always override +// older versions where both satisfy the provider version constraints. +func (m *Meta) pluginDirs(includeAutoInstalled bool) []string { + // user defined paths take precedence + if len(m.pluginPath) > 0 { + return m.pluginPath + } + + // When searching the following directories, earlier entries get precedence + // if the same plugin version is found twice, but newer versions will + // always get preference below regardless of where they are coming from. + // TODO: Add auto-install dir, default vendor dir and optional override + // vendor dir(s). + dirs := []string{"."} + + // Look in the same directory as the Terraform executable. + // If found, this replaces what we found in the config path. + exePath, err := osext.Executable() + if err != nil { + log.Printf("[ERROR] Error discovering exe directory: %s", err) + } else { + dirs = append(dirs, filepath.Dir(exePath)) + } + + // add the user vendor directory + dirs = append(dirs, DefaultPluginVendorDir) + + if includeAutoInstalled { + dirs = append(dirs, m.pluginDir()) + } + dirs = append(dirs, m.GlobalPluginDirs...) + + return dirs +} + +func (m *Meta) pluginCache() discovery.PluginCache { + dir := m.PluginCacheDir + if dir == "" { + return nil // cache disabled + } + + dir = filepath.Join(dir, pluginMachineName) + + return discovery.NewLocalPluginCache(dir) +} + +// providerPluginSet returns the set of valid providers that were discovered in +// the defined search paths. +func (m *Meta) providerPluginSet() discovery.PluginMetaSet { + plugins := discovery.FindPlugins("provider", m.pluginDirs(true)) + + // Add providers defined in the legacy .terraformrc, + if m.PluginOverrides != nil { + for k, v := range m.PluginOverrides.Providers { + log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v) + } + plugins = plugins.OverridePaths(m.PluginOverrides.Providers) + } + + plugins, _ = plugins.ValidateVersions() + + for p := range plugins { + log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path) + } + + return plugins +} + +// providerPluginAutoInstalledSet returns the set of providers that exist +// within the auto-install directory. +func (m *Meta) providerPluginAutoInstalledSet() discovery.PluginMetaSet { + plugins := discovery.FindPlugins("provider", []string{m.pluginDir()}) + plugins, _ = plugins.ValidateVersions() + + for p := range plugins { + log.Printf("[DEBUG] found valid plugin: %q", p.Name) + } + + return plugins +} + +// providerPluginManuallyInstalledSet returns the set of providers that exist +// in all locations *except* the auto-install directory. +func (m *Meta) providerPluginManuallyInstalledSet() discovery.PluginMetaSet { + plugins := discovery.FindPlugins("provider", m.pluginDirs(false)) + + // Add providers defined in the legacy .terraformrc, + if m.PluginOverrides != nil { + for k, v := range m.PluginOverrides.Providers { + log.Printf("[DEBUG] found plugin override in .terraformrc: %q, %q", k, v) + } + + plugins = plugins.OverridePaths(m.PluginOverrides.Providers) + } + + plugins, _ = plugins.ValidateVersions() + + for p := range plugins { + log.Printf("[DEBUG] found valid plugin: %q, %q, %q", p.Name, p.Version, p.Path) + } + + return plugins +} + +func (m *Meta) providerResolver() providers.Resolver { + return &multiVersionProviderResolver{ + Available: m.providerPluginSet(), + Internal: m.internalProviders(), + } +} + +func (m *Meta) internalProviders() map[string]providers.Factory { + return map[string]providers.Factory{ + "terraform": func() (providers.Interface, error) { + return terraformProvider.NewProvider(), nil + }, + } +} + +// filter the requirements returning only the providers that we can't resolve +func (m *Meta) missingPlugins(avail discovery.PluginMetaSet, reqd discovery.PluginRequirements) discovery.PluginRequirements { + missing := make(discovery.PluginRequirements) + + candidates := avail.ConstrainVersions(reqd) + internal := m.internalProviders() + + for name, versionSet := range reqd { + // internal providers can't be missing + if _, ok := internal[name]; ok { + continue + } + + log.Printf("[DEBUG] plugin requirements: %q=%q", name, versionSet.Versions) + if metas := candidates[name]; metas.Count() == 0 { + missing[name] = versionSet + } + } + + return missing +} + +func (m *Meta) provisionerFactories() map[string]terraform.ProvisionerFactory { + dirs := m.pluginDirs(true) + plugins := discovery.FindPlugins("provisioner", dirs) + plugins, _ = plugins.ValidateVersions() + + // For now our goal is to just find the latest version of each plugin + // we have on the system. All provisioners should be at version 0.0.0 + // currently, so there should actually only be one instance of each plugin + // name here, even though the discovery interface forces us to pretend + // that might not be true. + + factories := make(map[string]terraform.ProvisionerFactory) + + // Wire up the internal provisioners first. These might be overridden + // by discovered provisioners below. + for name := range InternalProvisioners { + factories[name] = internalProvisionerFactory(discovery.PluginMeta{Name: name}) + } + + byName := plugins.ByName() + for name, metas := range byName { + // Since we validated versions above and we partitioned the sets + // by name, we're guaranteed that the metas in our set all have + // valid versions and that there's at least one meta. + newest := metas.Newest() + + factories[name] = provisionerFactory(newest) + } + + return factories +} + +func internalPluginClient(kind, name string) (*plugin.Client, error) { + cmdLine, err := BuildPluginCommandString(kind, name) + if err != nil { + return nil, err + } + + // See the docstring for BuildPluginCommandString for why we need to do + // this split here. + cmdArgv := strings.Split(cmdLine, TFSPACE) + + cfg := &plugin.ClientConfig{ + Cmd: exec.Command(cmdArgv[0], cmdArgv[1:]...), + HandshakeConfig: tfplugin.Handshake, + Managed: true, + VersionedPlugins: tfplugin.VersionedPlugins, + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + } + + return plugin.NewClient(cfg), nil +} + +func providerFactory(meta discovery.PluginMeta) providers.Factory { + return func() (providers.Interface, error) { + client := tfplugin.Client(meta) + // Request the RPC client so we can get the provider + // so we can build the actual RPC-implemented provider. + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName) + if err != nil { + return nil, err + } + + // store the client so that the plugin can kill the child process + p := raw.(*tfplugin.GRPCProvider) + p.PluginClient = client + return p, nil + } +} + +func provisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory { + return func() (provisioners.Interface, error) { + client := tfplugin.Client(meta) + return newProvisionerClient(client) + } +} + +func internalProvisionerFactory(meta discovery.PluginMeta) terraform.ProvisionerFactory { + return func() (provisioners.Interface, error) { + client, err := internalPluginClient("provisioner", meta.Name) + if err != nil { + return nil, fmt.Errorf("[WARN] failed to build command line for internal plugin %q: %s", meta.Name, err) + } + return newProvisionerClient(client) + } +} + +func newProvisionerClient(client *plugin.Client) (provisioners.Interface, error) { + // Request the RPC client so we can get the provisioner + // so we can build the actual RPC-implemented provisioner. + rpcClient, err := client.Client() + if err != nil { + return nil, err + } + + raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName) + if err != nil { + return nil, err + } + + // store the client so that the plugin can kill the child process + p := raw.(*tfplugin.GRPCProvisioner) + p.PluginClient = client + return p, nil +} diff --git a/vendor/github.com/hashicorp/terraform/command/plugins_lock.go b/vendor/github.com/hashicorp/terraform/command/plugins_lock.go new file mode 100644 index 00000000..ebdcee00 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/plugins_lock.go @@ -0,0 +1,86 @@ +package command + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" +) + +func (m *Meta) providerPluginsLock() *pluginSHA256LockFile { + return &pluginSHA256LockFile{ + Filename: filepath.Join(m.pluginDir(), "lock.json"), + } +} + +type pluginSHA256LockFile struct { + Filename string +} + +// Read loads the lock information from the file and returns it. If the file +// cannot be read, an empty map is returned to indicate that _no_ providers +// are acceptable, since the user must run "terraform init" to lock some +// providers before a context can be created. +func (pf *pluginSHA256LockFile) Read() map[string][]byte { + // Returning an empty map is different than nil because it causes + // us to reject all plugins as uninitialized, rather than applying no + // constraints at all. + // + // We don't surface any specific errors here because we want it to all + // roll up into our more-user-friendly error that appears when plugin + // constraint verification fails during context creation. + digests := make(map[string][]byte) + + buf, err := ioutil.ReadFile(pf.Filename) + if err != nil { + // This is expected if the user runs any context-using command before + // running "terraform init". + log.Printf("[INFO] Failed to read plugin lock file %s: %s", pf.Filename, err) + return digests + } + + var strDigests map[string]string + err = json.Unmarshal(buf, &strDigests) + if err != nil { + // This should never happen unless the user directly edits the file. + log.Printf("[WARN] Plugin lock file %s failed to parse as JSON: %s", pf.Filename, err) + return digests + } + + for name, strDigest := range strDigests { + var digest []byte + _, err := fmt.Sscanf(strDigest, "%x", &digest) + if err == nil { + digests[name] = digest + } else { + // This should never happen unless the user directly edits the file. + log.Printf("[WARN] Plugin lock file %s has invalid digest for %q", pf.Filename, name) + } + } + + return digests +} + +// Write persists lock information to disk, where it will be retrieved by +// future calls to Read. This entirely replaces any previous lock information, +// so the given map must be comprehensive. +func (pf *pluginSHA256LockFile) Write(digests map[string][]byte) error { + strDigests := map[string]string{} + for name, digest := range digests { + strDigests[name] = fmt.Sprintf("%x", digest) + } + + buf, err := json.MarshalIndent(strDigests, "", " ") + if err != nil { + // should never happen + return fmt.Errorf("failed to serialize plugin lock as JSON: %s", err) + } + + os.MkdirAll( + filepath.Dir(pf.Filename), os.ModePerm, + ) // ignore error since WriteFile below will generate a better one anyway + + return ioutil.WriteFile(pf.Filename, buf, os.ModePerm) +} diff --git a/vendor/github.com/hashicorp/terraform/command/providers.go b/vendor/github.com/hashicorp/terraform/command/providers.go new file mode 100644 index 00000000..dcfcadf4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/providers.go @@ -0,0 +1,158 @@ +package command + +import ( + "fmt" + "path/filepath" + "sort" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/moduledeps" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/xlab/treeprint" +) + +// ProvidersCommand is a Command implementation that prints out information +// about the providers used in the current configuration/state. +type ProvidersCommand struct { + Meta +} + +func (c *ProvidersCommand) Help() string { + return providersCommandHelp +} + +func (c *ProvidersCommand) Synopsis() string { + return "Prints a tree of the providers used in the configuration" +} + +func (c *ProvidersCommand) Run(args []string) int { + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } + + cmdFlags := c.Meta.defaultFlagSet("providers") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + configPath, err := ModulePath(cmdFlags.Args()) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + empty, err := config.IsEmptyDir(configPath) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error validating configuration directory", + fmt.Sprintf("Terraform encountered an unexpected error while verifying that the given configuration directory is valid: %s.", err), + )) + c.showDiagnostics(diags) + return 1 + } + if empty { + absPath, err := filepath.Abs(configPath) + if err != nil { + absPath = configPath + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No configuration files", + fmt.Sprintf("The directory %s contains no Terraform configuration files.", absPath), + )) + c.showDiagnostics(diags) + return 1 + } + + config, configDiags := c.loadConfig(configPath) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: config.Module.Backend, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Get the state + env := c.Workspace() + state, err := b.StateMgr(env) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + if err := state.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 + } + + s := state.State() + depTree := terraform.ConfigTreeDependencies(config, s) + depTree.SortDescendents() + + printRoot := treeprint.New() + providersCommandPopulateTreeNode(printRoot, depTree) + + c.Ui.Output(printRoot.String()) + + c.showDiagnostics(diags) + if diags.HasErrors() { + return 1 + } + + return 0 +} + +func providersCommandPopulateTreeNode(node treeprint.Tree, deps *moduledeps.Module) { + names := make([]string, 0, len(deps.Providers)) + for name := range deps.Providers { + names = append(names, string(name)) + } + sort.Strings(names) + + for _, name := range names { + dep := deps.Providers[moduledeps.ProviderInstance(name)] + versionsStr := dep.Constraints.String() + if versionsStr != "" { + versionsStr = " " + versionsStr + } + var reasonStr string + switch dep.Reason { + case moduledeps.ProviderDependencyInherited: + reasonStr = " (inherited)" + case moduledeps.ProviderDependencyFromState: + reasonStr = " (from state)" + } + node.AddNode(fmt.Sprintf("provider.%s%s%s", name, versionsStr, reasonStr)) + } + + for _, child := range deps.Children { + childNode := node.AddBranch(fmt.Sprintf("module.%s", child.Name)) + providersCommandPopulateTreeNode(childNode, child) + } +} + +const providersCommandHelp = ` +Usage: terraform providers [dir] + + Prints out a tree of modules in the referenced configuration annotated with + their provider requirements. + + This provides an overview of all of the provider requirements across all + referenced modules, as an aid to understanding why particular provider + plugins are needed and why particular versions are selected. + +` diff --git a/vendor/github.com/hashicorp/terraform/command/providers_schema.go b/vendor/github.com/hashicorp/terraform/command/providers_schema.go new file mode 100644 index 00000000..73d540e5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/providers_schema.go @@ -0,0 +1,113 @@ +package command + +import ( + "fmt" + "os" + + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/jsonprovider" + "github.com/hashicorp/terraform/tfdiags" +) + +// ProvidersCommand is a Command implementation that prints out information +// about the providers used in the current configuration/state. +type ProvidersSchemaCommand struct { + Meta +} + +func (c *ProvidersSchemaCommand) Help() string { + return providersSchemaCommandHelp +} + +func (c *ProvidersSchemaCommand) Synopsis() string { + return "Prints the schemas of the providers used in the configuration" +} + +func (c *ProvidersSchemaCommand) Run(args []string) int { + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } + + cmdFlags := c.Meta.defaultFlagSet("providers schema") + var jsonOutput bool + cmdFlags.BoolVar(&jsonOutput, "json", false, "produce JSON output") + + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + if !jsonOutput { + c.Ui.Error( + "The `terraform providers schema` command requires the `-json` flag.\n") + cmdFlags.Usage() + return 1 + } + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + var diags tfdiags.Diagnostics + + // Load the backend + b, backendDiags := c.Backend(nil) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.showDiagnostics(diags) // in case of any warnings in here + c.Ui.Error(ErrUnsupportedLocalOp) + return 1 + } + + // we expect that the config dir is the cwd + cwd, err := os.Getwd() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error getting cwd: %s", err)) + return 1 + } + + // Build the operation + opReq := c.Operation(b) + opReq.ConfigDir = cwd + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } + + // Get the context + ctx, _, ctxDiags := local.Context(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + schemas := ctx.Schemas() + jsonSchemas, err := jsonprovider.Marshal(schemas) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to marshal provider schemas to json: %s", err)) + return 1 + } + c.Ui.Output(string(jsonSchemas)) + + return 0 +} + +const providersSchemaCommandHelp = ` +Usage: terraform providers schema -json + + Prints out a json representation of the schemas for all providers used + in the current configuration. +` diff --git a/vendor/github.com/hashicorp/terraform/command/push.go b/vendor/github.com/hashicorp/terraform/command/push.go index 3a4c2060..45be43d6 100644 --- a/vendor/github.com/hashicorp/terraform/command/push.go +++ b/vendor/github.com/hashicorp/terraform/command/push.go @@ -1,538 +1,36 @@ package command import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" "strings" - "github.com/hashicorp/atlas-go/archive" - "github.com/hashicorp/atlas-go/v1" - "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) type PushCommand struct { Meta - - // client is the client to use for the actual push operations. - // If this isn't set, then the Atlas client is used. This should - // really only be set for testing reasons (and is hence not exported). - client pushClient } func (c *PushCommand) Run(args []string) int { - var atlasAddress, atlasToken string - var archiveVCS, moduleUpload bool - var name string - var overwrite []string - args = c.Meta.process(args, true) - cmdFlags := c.Meta.flagSet("push") - cmdFlags.StringVar(&atlasAddress, "atlas-address", "", "") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&atlasToken, "token", "", "") - cmdFlags.BoolVar(&moduleUpload, "upload-modules", true, "") - cmdFlags.StringVar(&name, "name", "", "") - cmdFlags.BoolVar(&archiveVCS, "vcs", true, "") - cmdFlags.Var((*FlagStringSlice)(&overwrite), "overwrite", "") - cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } - if err := cmdFlags.Parse(args); err != nil { - return 1 - } - - // Make a map of the set values - overwriteMap := make(map[string]struct{}, len(overwrite)) - for _, v := range overwrite { - overwriteMap[v] = struct{}{} - } - - // This is a map of variables specifically from the CLI that we want to overwrite. - // We need this because there is a chance that the user is trying to modify - // a variable we don't see in our context, but which exists in this atlas - // environment. - cliVars := make(map[string]string) - for k, v := range c.variables { - if _, ok := overwriteMap[k]; ok { - if val, ok := v.(string); ok { - cliVars[k] = val - } else { - c.Ui.Error(fmt.Sprintf("Error reading value for variable: %s", k)) - return 1 - } - } - } - - // Get the path to the configuration depending on the args. - configPath, err := ModulePath(cmdFlags.Args()) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Check if the path is a plan - plan, err := c.Plan(configPath) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if plan != nil { - c.Ui.Error( - "A plan file cannot be given as the path to the configuration.\n" + - "A path to a module (directory with configuration) must be given.") - return 1 - } - - // Load the module - mod, err := c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) - return 1 - } - if mod == nil { - c.Ui.Error(fmt.Sprintf( - "No configuration files found in the directory: %s\n\n"+ - "This command requires configuration to run.", - configPath)) - return 1 - } - - // Load the backend - b, err := c.Backend(&BackendOpts{ - ConfigPath: configPath, - }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) - return 1 - } - - // We require a non-local backend - if c.IsLocalBackend(b) { - c.Ui.Error( - "A remote backend is not enabled. For Atlas to run Terraform\n" + - "for you, remote state must be used and configured. Remote \n" + - "state via any backend is accepted, not just Atlas. To configure\n" + - "a backend, please see the documentation at the URL below:\n\n" + - "https://www.terraform.io/docs/state/remote.html") - return 1 - } - - // We require a local backend - local, ok := b.(backend.Local) - if !ok { - c.Ui.Error(ErrUnsupportedLocalOp) - return 1 - } - - // Build the operation - opReq := c.Operation() - opReq.Module = mod - opReq.Plan = plan - - // Get the context - ctx, _, err := local.Context(opReq) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - - // Get the configuration - config := ctx.Module().Config() - if name == "" { - if config.Atlas == nil || config.Atlas.Name == "" { - c.Ui.Error( - "The name of this Terraform configuration in Atlas must be\n" + - "specified within your configuration or the command-line. To\n" + - "set it on the command-line, use the `-name` parameter.") - return 1 - } - name = config.Atlas.Name - } - - // Initialize the client if it isn't given. - if c.client == nil { - // Make sure to nil out our client so our token isn't sitting around - defer func() { c.client = nil }() - - // Initialize it to the default client, we set custom settings later - client := atlas.DefaultClient() - if atlasAddress != "" { - client, err = atlas.NewClient(atlasAddress) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing Atlas client: %s", err)) - return 1 - } - } - - client.DefaultHeader.Set(terraform.VersionHeader, terraform.Version) - - if atlasToken != "" { - client.Token = atlasToken - } - - c.client = &atlasPushClient{Client: client} - } - - // Get the variables we already have in atlas - atlasVars, err := c.client.Get(name) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error looking up previously pushed configuration: %s", err)) - return 1 - } - - // Set remote variables in the context if we don't have a value here. These - // don't have to be correct, it just prevents the Input walk from prompting - // the user for input. - ctxVars := ctx.Variables() - atlasVarSentry := "ATLAS_78AC153CA649EAA44815DAD6CBD4816D" - for k, _ := range atlasVars { - if _, ok := ctxVars[k]; !ok { - ctx.SetVariable(k, atlasVarSentry) - } - } - - // Ask for input - if err := ctx.Input(c.InputMode()); err != nil { - c.Ui.Error(fmt.Sprintf( - "Error while asking for variable input:\n\n%s", err)) - return 1 - } - - // Now that we've gone through the input walk, we can be sure we have all - // the variables we're going to get. - // We are going to keep these separate from the atlas variables until - // upload, so we can notify the user which local variables we're sending. - serializedVars, err := tfVars(ctx.Variables()) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "An error has occurred while serializing the variables for uploading:\n"+ - "%s", err)) - return 1 - } - - // Get the absolute path for our data directory, since the Extra field - // value below needs to be absolute. - dataDirAbs, err := filepath.Abs(c.DataDir()) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error while expanding the data directory %q: %s", c.DataDir(), err)) - return 1 - } - - // Build the archiving options, which includes everything it can - // by default according to VCS rules but forcing the data directory. - archiveOpts := &archive.ArchiveOpts{ - VCS: archiveVCS, - Extra: map[string]string{ - DefaultDataDir: archive.ExtraEntryDir, - }, - } - - // Always store the state file in here so we can find state - statePathKey := fmt.Sprintf("%s/%s", DefaultDataDir, DefaultStateFilename) - archiveOpts.Extra[statePathKey] = filepath.Join(dataDirAbs, DefaultStateFilename) - if moduleUpload { - // If we're uploading modules, explicitly add that directory if exists. - moduleKey := fmt.Sprintf("%s/%s", DefaultDataDir, "modules") - moduleDir := filepath.Join(dataDirAbs, "modules") - _, err := os.Stat(moduleDir) - if err == nil { - archiveOpts.Extra[moduleKey] = filepath.Join(dataDirAbs, "modules") - } - if err != nil && !os.IsNotExist(err) { - c.Ui.Error(fmt.Sprintf( - "Error checking for module dir %q: %s", moduleDir, err)) - return 1 - } - } else { - // If we're not uploading modules, explicitly exclude add that - archiveOpts.Exclude = append( - archiveOpts.Exclude, - filepath.Join(c.DataDir(), "modules")) - } - - archiveR, err := archive.CreateArchive(configPath, archiveOpts) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "An error has occurred while archiving the module for uploading:\n"+ - "%s", err)) - return 1 - } - - // List of the vars we're uploading to display to the user. - // We always upload all vars from atlas, but only report them if they are overwritten. - var setVars []string - - // variables to upload - var uploadVars []atlas.TFVar - - // first add all the variables we want to send which have been serialized - // from the local context. - for _, sv := range serializedVars { - _, inOverwrite := overwriteMap[sv.Key] - _, inAtlas := atlasVars[sv.Key] - - // We have a variable that's not in atlas, so always send it. - if !inAtlas { - uploadVars = append(uploadVars, sv) - setVars = append(setVars, sv.Key) - } - - // We're overwriting an atlas variable. - // We also want to check that we - // don't send the dummy sentry value back to atlas. This could happen - // if it's specified as an overwrite on the cli, but we didn't set a - // new value. - if inAtlas && inOverwrite && sv.Value != atlasVarSentry { - uploadVars = append(uploadVars, sv) - setVars = append(setVars, sv.Key) - - // remove this value from the atlas vars, because we're going to - // send back the remainder regardless. - delete(atlasVars, sv.Key) - } - } - - // now send back all the existing atlas vars, inserting any overwrites from the cli. - for k, av := range atlasVars { - if v, ok := cliVars[k]; ok { - av.Value = v - setVars = append(setVars, k) - } - uploadVars = append(uploadVars, av) - } - - sort.Strings(setVars) - if len(setVars) > 0 { - c.Ui.Output( - "The following variables will be set or overwritten within Atlas from\n" + - "their local values. All other variables are already set within Atlas.\n" + - "If you want to modify the value of a variable, use the Atlas web\n" + - "interface or set it locally and use the -overwrite flag.\n\n") - for _, v := range setVars { - c.Ui.Output(fmt.Sprintf(" * %s", v)) - } - - // Newline - c.Ui.Output("") - } - - // Upsert! - opts := &pushUpsertOptions{ - Name: name, - Archive: archiveR, - Variables: ctx.Variables(), - TFVars: uploadVars, - } - - c.Ui.Output("Uploading Terraform configuration...") - vsn, err := c.client.Upsert(opts) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "An error occurred while uploading the module:\n\n%s", err)) - return 1 - } - - c.Ui.Output(c.Colorize().Color(fmt.Sprintf( - "[reset][bold][green]Configuration %q uploaded! (v%d)", - name, vsn))) - return 0 + // This command is no longer supported, but we'll retain it just to + // give the user some next-steps after upgrading. + c.showDiagnostics(tfdiags.Sourceless( + tfdiags.Error, + "Command \"terraform push\" is no longer supported", + "This command was used to push configuration to Terraform Enterprise legacy (v1), which has now reached end-of-life. To push configuration to Terraform Enterprise v2, use its REST API. Contact Terraform Enterprise support for more information.", + )) + return 1 } func (c *PushCommand) Help() string { helpText := ` Usage: terraform push [options] [DIR] - Upload this Terraform module to an Atlas server for remote - infrastructure management. - -Options: - - -atlas-address= An alternate address to an Atlas instance. Defaults - to https://atlas.hashicorp.com - - -upload-modules=true If true (default), then the modules are locked at - their current checkout and uploaded completely. This - prevents Atlas from running "terraform get". - - -name= Name of the configuration in Atlas. This can also - be set in the configuration itself. Format is - typically: "username/name". - - -token= Access token to use to upload. If blank or unspecified, - the ATLAS_TOKEN environmental variable will be used. - - -overwrite=foo Variable keys that should overwrite values in Atlas. - Otherwise, variables already set in Atlas will overwrite - local values. This flag can be repeated. - - -var 'foo=bar' Set a variable in the Terraform configuration. This - flag can be set multiple times. - - -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be - automatically loaded if this flag is not specified. - - -vcs=true If true (default), push will upload only files - committed to your VCS, if detected. - - -no-color If specified, output won't contain any color. - + This command was for the legacy version of Terraform Enterprise (v1), which + has now reached end-of-life. Therefore this command is no longer supported. ` return strings.TrimSpace(helpText) } -func sortedKeys(m map[string]interface{}) []string { - var keys []string - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// build the set of TFVars for push -func tfVars(vars map[string]interface{}) ([]atlas.TFVar, error) { - var tfVars []atlas.TFVar - var err error - -RANGE: - for _, k := range sortedKeys(vars) { - v := vars[k] - - var hcl []byte - tfv := atlas.TFVar{Key: k} - - switch v := v.(type) { - case string: - tfv.Value = v - - default: - // everything that's not a string is now HCL encoded - hcl, err = encodeHCL(v) - if err != nil { - break RANGE - } - - tfv.Value = string(hcl) - tfv.IsHCL = true - } - - tfVars = append(tfVars, tfv) - } - - return tfVars, err -} - func (c *PushCommand) Synopsis() string { - return "Upload this Terraform module to Atlas to run" -} - -// pushClient is implemented internally to control where pushes go. This is -// either to Atlas or a mock for testing. We still return a map to make it -// easier to check for variable existence when filtering the overrides. -type pushClient interface { - Get(string) (map[string]atlas.TFVar, error) - Upsert(*pushUpsertOptions) (int, error) -} - -type pushUpsertOptions struct { - Name string - Archive *archive.Archive - Variables map[string]interface{} - TFVars []atlas.TFVar -} - -type atlasPushClient struct { - Client *atlas.Client -} - -func (c *atlasPushClient) Get(name string) (map[string]atlas.TFVar, error) { - user, name, err := atlas.ParseSlug(name) - if err != nil { - return nil, err - } - - version, err := c.Client.TerraformConfigLatest(user, name) - if err != nil { - return nil, err - } - - variables := make(map[string]atlas.TFVar) - - if version == nil { - return variables, nil - } - - // Variables is superseded by TFVars - if version.TFVars == nil { - for k, v := range version.Variables { - variables[k] = atlas.TFVar{Key: k, Value: v} - } - } else { - for _, v := range version.TFVars { - variables[v.Key] = v - } - } - - return variables, nil -} - -func (c *atlasPushClient) Upsert(opts *pushUpsertOptions) (int, error) { - user, name, err := atlas.ParseSlug(opts.Name) - if err != nil { - return 0, err - } - - data := &atlas.TerraformConfigVersion{ - TFVars: opts.TFVars, - } - - version, err := c.Client.CreateTerraformConfigVersion( - user, name, data, opts.Archive, opts.Archive.Size) - if err != nil { - return 0, err - } - - return version, nil -} - -type mockPushClient struct { - File string - - GetCalled bool - GetName string - GetResult map[string]atlas.TFVar - GetError error - - UpsertCalled bool - UpsertOptions *pushUpsertOptions - UpsertVersion int - UpsertError error -} - -func (c *mockPushClient) Get(name string) (map[string]atlas.TFVar, error) { - c.GetCalled = true - c.GetName = name - return c.GetResult, c.GetError -} - -func (c *mockPushClient) Upsert(opts *pushUpsertOptions) (int, error) { - f, err := os.Create(c.File) - if err != nil { - return 0, err - } - defer f.Close() - - data := opts.Archive - size := opts.Archive.Size - if _, err := io.CopyN(f, data, size); err != nil { - return 0, err - } - - c.UpsertCalled = true - c.UpsertOptions = opts - return c.UpsertVersion, c.UpsertError + return "Obsolete command for Terraform Enterprise legacy (v1)" } diff --git a/vendor/github.com/hashicorp/terraform/command/refresh.go b/vendor/github.com/hashicorp/terraform/command/refresh.go index 3f1b8bf2..db7ca9c4 100644 --- a/vendor/github.com/hashicorp/terraform/command/refresh.go +++ b/vendor/github.com/hashicorp/terraform/command/refresh.go @@ -1,12 +1,12 @@ package command import ( - "context" "fmt" "strings" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/backend" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // RefreshCommand is a cli.Command implementation that refreshes the state @@ -16,11 +16,14 @@ type RefreshCommand struct { } func (c *RefreshCommand) Run(args []string) int { - args = c.Meta.process(args, true) + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } - cmdFlags := c.Meta.flagSet("refresh") + cmdFlags := c.Meta.extendedFlagSet("refresh") cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", 0, "parallelism") + cmdFlags.IntVar(&c.Meta.parallelism, "parallelism", DefaultParallelism, "parallelism") cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") @@ -36,45 +39,72 @@ func (c *RefreshCommand) Run(args []string) int { return 1 } - // Load the module - mod, err := c.Module(configPath) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load root config module: %s", err)) + var diags tfdiags.Diagnostics + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + backendConfig, configDiags := c.loadBackendConfig(configPath) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } // Load the backend - b, err := c.Backend(&BackendOpts{ConfigPath: configPath}) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } + // Before we delegate to the backend, we'll print any warning diagnostics + // we've accumulated here, since the backend will start fresh with its own + // diagnostics. + c.showDiagnostics(diags) + diags = nil + // Build the operation - opReq := c.Operation() + opReq := c.Operation(b) + opReq.ConfigDir = configPath opReq.Type = backend.OperationTypeRefresh - opReq.Module = mod - // Perform the operation - op, err := b.Operation(context.Background(), opReq) + opReq.ConfigLoader, err = c.initConfigLoader() if err != nil { - c.Ui.Error(fmt.Sprintf("Error starting operation: %s", err)) + c.showDiagnostics(err) return 1 } - // Wait for the operation to complete - <-op.Done() - if err := op.Err; err != nil { - c.Ui.Error(err.Error()) + { + var moreDiags tfdiags.Diagnostics + opReq.Variables, moreDiags = c.collectVariableValues() + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + } + + op, err := c.RunOperation(b, opReq) + if err != nil { + c.showDiagnostics(err) return 1 } + if op.Result != backend.OperationSuccess { + return op.Result.ExitStatus() + } - // Output the outputs - if outputs := outputsAsString(op.State, terraform.RootModulePath, nil, true); outputs != "" { + if outputs := outputsAsString(op.State, addrs.RootModuleInstance, true); outputs != "" { c.Ui.Output(c.Colorize().Color(outputs)) } - return 0 + return op.Result.ExitStatus() } func (c *RefreshCommand) Help() string { @@ -116,8 +146,8 @@ Options: flag can be set multiple times. -var-file=foo Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be - automatically loaded if this flag is not specified. + a file. If "terraform.tfvars" or any ".auto.tfvars" + files are present, they will be automatically loaded. ` return strings.TrimSpace(helpText) diff --git a/vendor/github.com/hashicorp/terraform/command/show.go b/vendor/github.com/hashicorp/terraform/command/show.go index 9be48281..d72f9778 100644 --- a/vendor/github.com/hashicorp/terraform/command/show.go +++ b/vendor/github.com/hashicorp/terraform/command/show.go @@ -1,13 +1,19 @@ package command import ( - "flag" "fmt" "os" "strings" + "github.com/hashicorp/terraform/backend" "github.com/hashicorp/terraform/command/format" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/command/jsonplan" + "github.com/hashicorp/terraform/command/jsonstate" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/planfile" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" ) // ShowCommand is a Command implementation that reads and outputs the @@ -17,109 +23,159 @@ type ShowCommand struct { } func (c *ShowCommand) Run(args []string) int { - var moduleDepth int - - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - cmdFlags := flag.NewFlagSet("show", flag.ContinueOnError) - c.addModuleDepthFlag(cmdFlags, &moduleDepth) + cmdFlags := c.Meta.defaultFlagSet("show") + var jsonOutput bool + cmdFlags.BoolVar(&jsonOutput, "json", false, "produce JSON output") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 } args = cmdFlags.Args() - if len(args) > 1 { + if len(args) > 2 { c.Ui.Error( - "The show command expects at most one argument with the path\n" + - "to a Terraform state or plan file.\n") + "The show command expects at most two arguments.\n The path to a " + + "Terraform state or plan file, and optionally -json for json output.\n") cmdFlags.Usage() return 1 } - var planErr, stateErr error - var path string - var plan *terraform.Plan - var state *terraform.State + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) + return 1 + } + + var diags tfdiags.Diagnostics + + // Load the backend + b, backendDiags := c.Backend(nil) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.showDiagnostics(diags) // in case of any warnings in here + c.Ui.Error(ErrUnsupportedLocalOp) + return 1 + } + + // the show command expects the config dir to always be the cwd + cwd, err := os.Getwd() + if err != nil { + c.Ui.Error(fmt.Sprintf("Error getting cwd: %s", err)) + return 1 + } + + // Determine if a planfile was passed to the command + var planFile *planfile.Reader if len(args) > 0 { - path = args[0] - f, err := os.Open(path) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error loading file: %s", err)) - return 1 - } - defer f.Close() + // We will handle error checking later on - this is just required to + // load the local context if the given path is successfully read as + // a planfile. + planFile, _ = c.PlanFile(args[0]) + } - plan, err = terraform.ReadPlan(f) - if err != nil { - if _, err := f.Seek(0, 0); err != nil { - c.Ui.Error(fmt.Sprintf("Error reading file: %s", err)) - return 1 - } + // Build the operation + opReq := c.Operation(b) + opReq.ConfigDir = cwd + opReq.PlanFile = planFile + opReq.ConfigLoader, err = c.initConfigLoader() + if err != nil { + diags = diags.Append(err) + c.showDiagnostics(diags) + return 1 + } - plan = nil - planErr = err - } - if plan == nil { - state, err = terraform.ReadState(f) - if err != nil { - stateErr = err + // Get the context + ctx, _, ctxDiags := local.Context(opReq) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Get the schemas from the context + schemas := ctx.Schemas() + + var planErr, stateErr error + var plan *plans.Plan + var stateFile *statefile.File + + // if a path was provided, try to read it as a path to a planfile + // if that fails, try to read the cli argument as a path to a statefile + if len(args) > 0 { + path := args[0] + plan, stateFile, planErr = getPlanFromPath(path) + if planErr != nil { + stateFile, stateErr = getStateFromPath(path) + if stateErr != nil { + c.Ui.Error(fmt.Sprintf( + "Terraform couldn't read the given file as a state or plan file.\n"+ + "The errors while attempting to read the file as each format are\n"+ + "shown below.\n\n"+ + "State read error: %s\n\nPlan read error: %s", + stateErr, + planErr)) + return 1 } } } else { - // Load the backend - b, err := c.Backend(nil) + env := c.Workspace() + stateFile, stateErr = getStateFromEnv(b, env) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + c.Ui.Error(err.Error()) return 1 } + } - env := c.Env() + if plan != nil { + if jsonOutput == true { + config := ctx.Config() + jsonPlan, err := jsonplan.Marshal(config, plan, stateFile, schemas) - // Get the state - stateStore, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to marshal plan to json: %s", err)) + return 1 + } + c.Ui.Output(string(jsonPlan)) + return 0 } + dispPlan := format.NewPlan(plan.Changes) + c.Ui.Output(dispPlan.Format(c.Colorize())) + return 0 + } - if err := stateStore.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + if jsonOutput == true { + // At this point, it is possible that there is neither state nor a plan. + // That's ok, we'll just return an empty object. + jsonState, err := jsonstate.Marshal(stateFile, schemas) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to marshal state to json: %s", err)) return 1 } - - state = stateStore.State() - if state == nil { + c.Ui.Output(string(jsonState)) + } else { + if stateFile == nil { c.Ui.Output("No state.") return 0 } - } - - if plan == nil && state == nil { - c.Ui.Error(fmt.Sprintf( - "Terraform couldn't read the given file as a state or plan file.\n"+ - "The errors while attempting to read the file as each format are\n"+ - "shown below.\n\n"+ - "State read error: %s\n\nPlan read error: %s", - stateErr, - planErr)) - return 1 - } - - if plan != nil { - c.Ui.Output(format.Plan(&format.PlanOpts{ - Plan: plan, - Color: c.Colorize(), - ModuleDepth: moduleDepth, + c.Ui.Output(format.State(&format.StateOpts{ + State: stateFile.State, + Color: c.Colorize(), + Schemas: schemas, })) - return 0 } - c.Ui.Output(format.State(&format.StateOpts{ - State: state, - Color: c.Colorize(), - ModuleDepth: moduleDepth, - })) return 0 } @@ -132,10 +188,9 @@ Usage: terraform show [options] [path] Options: - -module-depth=n Specifies the depth of modules to show in the output. - By default this is -1, which will expand all. - -no-color If specified, output won't contain any color. + -json If specified, output the Terraform plan or state in + a machine-readable form. ` return strings.TrimSpace(helpText) @@ -144,3 +199,53 @@ Options: func (c *ShowCommand) Synopsis() string { return "Inspect Terraform state or plan" } + +// getPlanFromPath returns a plan and statefile if the user-supplied path points +// to a planfile. If both plan and error are nil, the path is likely a +// directory. An error could suggest that the given path points to a statefile. +func getPlanFromPath(path string) (*plans.Plan, *statefile.File, error) { + pr, err := planfile.Open(path) + if err != nil { + return nil, nil, err + } + plan, err := pr.ReadPlan() + if err != nil { + return nil, nil, err + } + + stateFile, err := pr.ReadStateFile() + return plan, stateFile, nil +} + +// getStateFromPath returns a statefile if the user-supplied path points to a statefile. +func getStateFromPath(path string) (*statefile.File, error) { + f, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("Error loading statefile: %s", err) + } + defer f.Close() + + var stateFile *statefile.File + stateFile, err = statefile.Read(f) + if err != nil { + return nil, fmt.Errorf("Error reading %s as a statefile: %s", path, err) + } + return stateFile, nil +} + +// getStateFromEnv returns the State for the current workspace, if available. +func getStateFromEnv(b backend.Backend, env string) (*statefile.File, error) { + // Get the state + stateStore, err := b.StateMgr(env) + if err != nil { + return nil, fmt.Errorf("Failed to load state manager: %s", err) + } + + if err := stateStore.RefreshState(); err != nil { + return nil, fmt.Errorf("Failed to load state: %s", err) + } + + sf := statemgr.Export(stateStore) + + return sf, nil +} diff --git a/vendor/github.com/hashicorp/terraform/command/state_list.go b/vendor/github.com/hashicorp/terraform/command/state_list.go index d7087d1b..adaff6fc 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_list.go +++ b/vendor/github.com/hashicorp/terraform/command/state_list.go @@ -4,7 +4,9 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) @@ -16,80 +18,104 @@ type StateListCommand struct { } func (c *StateListCommand) Run(args []string) int { - args = c.Meta.process(args, true) + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } - cmdFlags := c.Meta.flagSet("state list") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + var statePath string + cmdFlags := c.Meta.defaultFlagSet("state list") + cmdFlags.StringVar(&statePath, "state", "", "path") + lookupId := cmdFlags.String("id", "", "Restrict output to paths with a resource having the specified ID.") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } args = cmdFlags.Args() + if statePath != "" { + c.Meta.statePath = statePath + } + // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(nil) + if backendDiags.HasErrors() { + c.showDiagnostics(backendDiags) return 1 } - env := c.Env() // Get the state - state, err := b.State(env) + env := c.Workspace() + stateMgr, err := b.StateMgr(env) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) return 1 } - - if err := state.RefreshState(); err != nil { + if err := stateMgr.RefreshState(); err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 } - stateReal := state.State() - if stateReal == nil { + state := stateMgr.State() + if state == nil { c.Ui.Error(fmt.Sprintf(errStateNotFound)) return 1 } - filter := &terraform.StateFilter{State: stateReal} - results, err := filter.Filter(args...) - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateFilter, err)) - return cli.RunResultHelp + var addrs []addrs.AbsResourceInstance + var diags tfdiags.Diagnostics + if len(args) == 0 { + addrs, diags = c.lookupAllResourceInstanceAddrs(state) + } else { + addrs, diags = c.lookupResourceInstanceAddrs(state, args...) + } + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 } - for _, result := range results { - if _, ok := result.Value.(*terraform.InstanceState); ok { - c.Ui.Output(result.Address) + for _, addr := range addrs { + if is := state.ResourceInstance(addr); is != nil { + if *lookupId == "" || *lookupId == states.LegacyInstanceObjectID(is.Current) { + c.Ui.Output(addr.String()) + } } } + c.showDiagnostics(diags) + return 0 } func (c *StateListCommand) Help() string { helpText := ` -Usage: terraform state list [options] [pattern...] +Usage: terraform state list [options] [address...] List resources in the Terraform state. - This command lists resources in the Terraform state. The pattern argument - can be used to filter the resources by resource or module. If no pattern - is given, all resources are listed. + This command lists resource instances in the Terraform state. The address + argument can be used to filter the instances by resource or module. If + no pattern is given, all resource instances are listed. - The pattern argument is meant to provide very simple filtering. For - advanced filtering, please use tools such as "grep". The output of this - command is designed to be friendly for this usage. + The addresses must either be module addresses or absolute resource + addresses, such as: + aws_instance.example + module.example + module.example.module.child + module.example.aws_instance.example - The pattern argument accepts any resource targeting syntax. Please - refer to the documentation on resource targeting syntax for more - information. + An error will be returned if any of the resources or modules given as + filter addresses do not exist in the state. Options: -state=statefile Path to a Terraform state file to use to look - up Terraform-managed resources. By default it will - use the state "terraform.tfstate" if it exists. + up Terraform-managed resources. By default, Terraform + will consult the state of the currently-selected + workspace. + + -id=ID Filters the results to include only instances whose + resource types have an attribute named "id" whose value + equals the given id string. ` return strings.TrimSpace(helpText) diff --git a/vendor/github.com/hashicorp/terraform/command/state_meta.go b/vendor/github.com/hashicorp/terraform/command/state_meta.go index 879f8f64..25c17bdb 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_meta.go +++ b/vendor/github.com/hashicorp/terraform/command/state_meta.go @@ -1,85 +1,198 @@ package command import ( - "errors" "fmt" + "sort" "time" - backendlocal "github.com/hashicorp/terraform/backend/local" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/tfdiags" + + backendLocal "github.com/hashicorp/terraform/backend/local" ) // StateMeta is the meta struct that should be embedded in state subcommands. -type StateMeta struct{} +type StateMeta struct { + Meta +} // State returns the state for this meta. This gets the appropriate state from // the backend, but changes the way that backups are done. This configures // backups to be timestamped rather than just the original state path plus a // backup path. -func (c *StateMeta) State(m *Meta) (state.State, error) { - // Load the backend - b, err := m.Backend(nil) - if err != nil { - return nil, err +func (c *StateMeta) State() (state.State, error) { + var realState state.State + backupPath := c.backupPath + stateOutPath := c.statePath + + // use the specified state + if c.statePath != "" { + realState = statemgr.NewFilesystem(c.statePath) + } else { + // Load the backend + b, backendDiags := c.Backend(nil) + if backendDiags.HasErrors() { + return nil, backendDiags.Err() + } + + workspace := c.Workspace() + // Get the state + s, err := b.StateMgr(workspace) + if err != nil { + return nil, err + } + + // Get a local backend + localRaw, backendDiags := c.Backend(&BackendOpts{ForceLocal: true}) + if backendDiags.HasErrors() { + // This should never fail + panic(backendDiags.Err()) + } + localB := localRaw.(*backendLocal.Local) + _, stateOutPath, _ = localB.StatePaths(workspace) + if err != nil { + return nil, err + } + + realState = s } - env := m.Env() - // Get the state - s, err := b.State(env) - if err != nil { - return nil, err + // We always backup state commands, so set the back if none was specified + // (the default is "-", but some tests bypass the flag parsing). + if backupPath == "-" || backupPath == "" { + // Determine the backup path. stateOutPath is set to the resulting + // file where state is written (cached in the case of remote state) + backupPath = fmt.Sprintf( + "%s.%d%s", + stateOutPath, + time.Now().UTC().Unix(), + DefaultBackupExtension) } - // Get a local backend - localRaw, err := m.Backend(&BackendOpts{ForceLocal: true}) - if err != nil { - // This should never fail - panic(err) + // If the backend is local (which it should always be, given our asserting + // of it above) we can now enable backups for it. + if lb, ok := realState.(*statemgr.Filesystem); ok { + lb.SetBackupPath(backupPath) } - localB := localRaw.(*backendlocal.Local) - _, stateOutPath, _ := localB.StatePaths(env) - if err != nil { - return nil, err + + return realState, nil +} + +func (c *StateMeta) lookupResourceInstanceAddr(state *states.State, allowMissing bool, addrStr string) ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) { + target, diags := addrs.ParseTargetStr(addrStr) + if diags.HasErrors() { + return nil, diags } - // Determine the backup path. stateOutPath is set to the resulting - // file where state is written (cached in the case of remote state) - backupPath := fmt.Sprintf( - "%s.%d%s", - stateOutPath, - time.Now().UTC().Unix(), - DefaultBackupExtension) - - // Wrap it for backups - s = &state.BackupState{ - Real: s, - Path: backupPath, + targetAddr := target.Subject + var ret []addrs.AbsResourceInstance + switch addr := targetAddr.(type) { + case addrs.ModuleInstance: + // Matches all instances within the indicated module and all of its + // descendent modules. + ms := state.Module(addr) + if ms == nil { + if !allowMissing { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unknown module", + fmt.Sprintf(`The current state contains no module at %s. If you've just added this module to the configuration, you must run "terraform apply" first to create the module's entry in the state.`, addr), + )) + } + break + } + ret = append(ret, c.collectModuleResourceInstances(ms)...) + for _, cms := range state.Modules { + candidateAddr := ms.Addr + if len(candidateAddr) > len(addr) && candidateAddr[:len(addr)].Equal(addr) { + ret = append(ret, c.collectModuleResourceInstances(cms)...) + } + } + case addrs.AbsResource: + // Matches all instances of the specific selected resource. + rs := state.Resource(addr) + if rs == nil { + if !allowMissing { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unknown resource", + fmt.Sprintf(`The current state contains no resource %s. If you've just added this resource to the configuration, you must run "terraform apply" first to create the resource's entry in the state.`, addr), + )) + } + break + } + ret = append(ret, c.collectResourceInstances(addr.Module, rs)...) + case addrs.AbsResourceInstance: + is := state.ResourceInstance(addr) + if is == nil { + if !allowMissing { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unknown resource instance", + fmt.Sprintf(`The current state contains no resource instance %s. If you've just added its resource to the configuration or have changed the count or for_each arguments, you must run "terraform apply" first to update the resource's entry in the state.`, addr), + )) + } + break + } + ret = append(ret, addr) } + sort.Slice(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) - return s, nil + return ret, diags } -// filterInstance filters a single instance out of filter results. -func (c *StateMeta) filterInstance(rs []*terraform.StateFilterResult) (*terraform.StateFilterResult, error) { - var result *terraform.StateFilterResult - for _, r := range rs { - if _, ok := r.Value.(*terraform.InstanceState); !ok { - continue - } +func (c *StateMeta) lookupSingleResourceInstanceAddr(state *states.State, addrStr string) (addrs.AbsResourceInstance, tfdiags.Diagnostics) { + return addrs.ParseAbsResourceInstanceStr(addrStr) +} - if result != nil { - return nil, errors.New(errStateMultiple) - } +func (c *StateMeta) lookupSingleStateObjectAddr(state *states.State, addrStr string) (addrs.Targetable, tfdiags.Diagnostics) { + target, diags := addrs.ParseTargetStr(addrStr) + if diags.HasErrors() { + return nil, diags + } + return target.Subject, diags +} - result = r +func (c *StateMeta) lookupResourceInstanceAddrs(state *states.State, addrStrs ...string) ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) { + var ret []addrs.AbsResourceInstance + var diags tfdiags.Diagnostics + for _, addrStr := range addrStrs { + moreAddrs, moreDiags := c.lookupResourceInstanceAddr(state, false, addrStr) + ret = append(ret, moreAddrs...) + diags = diags.Append(moreDiags) } + return ret, diags +} - return result, nil +func (c *StateMeta) lookupAllResourceInstanceAddrs(state *states.State) ([]addrs.AbsResourceInstance, tfdiags.Diagnostics) { + var ret []addrs.AbsResourceInstance + var diags tfdiags.Diagnostics + for _, ms := range state.Modules { + ret = append(ret, c.collectModuleResourceInstances(ms)...) + } + sort.Slice(ret, func(i, j int) bool { + return ret[i].Less(ret[j]) + }) + return ret, diags } -const errStateMultiple = `Multiple instances found for the given pattern! +func (c *StateMeta) collectModuleResourceInstances(ms *states.Module) []addrs.AbsResourceInstance { + var ret []addrs.AbsResourceInstance + for _, rs := range ms.Resources { + ret = append(ret, c.collectResourceInstances(ms.Addr, rs)...) + } + return ret +} -This command requires that the pattern match exactly one instance -of a resource. To view the matched instances, use "terraform state list". -Please modify the pattern to match only a single instance.` +func (c *StateMeta) collectResourceInstances(moduleAddr addrs.ModuleInstance, rs *states.Resource) []addrs.AbsResourceInstance { + var ret []addrs.AbsResourceInstance + for key := range rs.Instances { + ret = append(ret, rs.Addr.Instance(key).Absolute(moduleAddr)) + } + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/command/state_mv.go b/vendor/github.com/hashicorp/terraform/command/state_mv.go index 5b99dd2a..11005ad7 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_mv.go +++ b/vendor/github.com/hashicorp/terraform/command/state_mv.go @@ -1,29 +1,40 @@ package command import ( + "context" "fmt" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) // StateMvCommand is a Command implementation that shows a single resource. type StateMvCommand struct { - Meta StateMeta } func (c *StateMvCommand) Run(args []string) int { - args = c.Meta.process(args, true) + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } // We create two metas to track the two states - var meta1, meta2 Meta - cmdFlags := c.Meta.flagSet("state mv") - cmdFlags.StringVar(&meta1.backupPath, "backup", "-", "backup") - cmdFlags.StringVar(&meta1.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&meta2.backupPath, "backup-out", "-", "backup") - cmdFlags.StringVar(&meta2.statePath, "state-out", "", "path") + var backupPathOut, statePathOut string + + var dryRun bool + cmdFlags := c.Meta.defaultFlagSet("state mv") + cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") + cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") + cmdFlags.StringVar(&backupPathOut, "backup-out", "-", "backup") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock states") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&c.statePath, "state", "", "path") + cmdFlags.StringVar(&statePathOut, "state-out", "", "path") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } @@ -33,178 +44,432 @@ func (c *StateMvCommand) Run(args []string) int { return cli.RunResultHelp } - // Copy the `-state` flag for output if we weren't given a custom one - if meta2.statePath == "" { - meta2.statePath = meta1.statePath - } - // Read the from state - stateFrom, err := c.StateMeta.State(&meta1) + stateFromMgr, err := c.State() if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return cli.RunResultHelp + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateFromMgr, "state-mv"); err != nil { + c.Ui.Error(fmt.Sprintf("Error locking source state: %s", err)) + return 1 + } + defer stateLocker.Unlock(nil) } - if err := stateFrom.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + if err := stateFromMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh source state: %s", err)) return 1 } - stateFromReal := stateFrom.State() - if stateFromReal == nil { + stateFrom := stateFromMgr.State() + if stateFrom == nil { c.Ui.Error(fmt.Sprintf(errStateNotFound)) return 1 } // Read the destination state + stateToMgr := stateFromMgr stateTo := stateFrom - stateToReal := stateFromReal - if meta2.statePath != meta1.statePath { - stateTo, err = c.StateMeta.State(&meta2) + + if statePathOut != "" { + c.statePath = statePathOut + c.backupPath = backupPathOut + + stateToMgr, err = c.State() if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return cli.RunResultHelp + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateToMgr, "state-mv"); err != nil { + c.Ui.Error(fmt.Sprintf("Error locking destination state: %s", err)) + return 1 + } + defer stateLocker.Unlock(nil) } - if err := stateTo.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + if err := stateToMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh destination state: %s", err)) return 1 } - stateToReal = stateTo.State() - if stateToReal == nil { - stateToReal = terraform.NewState() + stateTo = stateToMgr.State() + if stateTo == nil { + stateTo = states.NewState() } } - // Filter what we're moving - filter := &terraform.StateFilter{State: stateFromReal} - results, err := filter.Filter(args[0]) - if err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return cli.RunResultHelp - } - if len(results) == 0 { - c.Ui.Output(fmt.Sprintf("Item to move doesn't exist: %s", args[0])) + var diags tfdiags.Diagnostics + sourceAddr, moreDiags := c.lookupSingleStateObjectAddr(stateFrom, args[0]) + diags = diags.Append(moreDiags) + destAddr, moreDiags := c.lookupSingleStateObjectAddr(stateFrom, args[1]) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } - // Get the item to add to the state - add := c.addableResult(results) + prefix := "Move" + if dryRun { + prefix = "Would move" + } - // Do the actual move - if err := stateFromReal.Remove(args[0]); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return 1 + const msgInvalidSource = "Invalid source address" + const msgInvalidTarget = "Invalid target address" + + var moved int + ssFrom := stateFrom.SyncWrapper() + sourceAddrs := c.sourceObjectAddrs(stateFrom, sourceAddr) + if len(sourceAddrs) == 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("Cannot move %s: does not match anything in the current state.", sourceAddr), + )) } + for _, rawAddrFrom := range sourceAddrs { + switch addrFrom := rawAddrFrom.(type) { + case addrs.ModuleInstance: + search := sourceAddr.(addrs.ModuleInstance) + addrTo, ok := destAddr.(addrs.ModuleInstance) + if !ok { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move %s to %s: the target must also be a module.", addrFrom, addrTo), + )) + c.showDiagnostics(diags) + return 1 + } - if err := stateToReal.Add(args[0], args[1], add); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMv, err)) - return 1 + if len(search) < len(addrFrom) { + n := make(addrs.ModuleInstance, 0, len(addrTo)+len(addrFrom)-len(search)) + n = append(n, addrTo...) + n = append(n, addrFrom[len(search):]...) + addrTo = n + } + + if stateTo.Module(addrTo) != nil { + c.Ui.Error(fmt.Sprintf(errStateMv, "destination module already exists")) + return 1 + } + + ms := ssFrom.Module(addrFrom) + if ms == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("The current state does not contain %s.", addrFrom), + )) + c.showDiagnostics(diags) + return 1 + } + + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), addrTo.String())) + if !dryRun { + ssFrom.RemoveModule(addrFrom) + + // Update the address before adding it to the state. + ms.Addr = addrTo + stateTo.Modules[addrTo.String()] = ms + } + + case addrs.AbsResource: + addrTo, ok := destAddr.(addrs.AbsResource) + if !ok { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move %s to %s: the target must also be a whole resource.", addrFrom, addrTo), + )) + c.showDiagnostics(diags) + return 1 + } + diags = diags.Append(c.validateResourceMove(addrFrom, addrTo)) + if stateTo.Module(addrTo.Module) == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move to %s: %s does not exist in the current state.", addrTo, addrTo.Module), + )) + } + if stateTo.Resource(addrTo) != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move to %s: there is already a resource at that address in the current state.", addrTo), + )) + } + + rs := ssFrom.Resource(addrFrom) + if rs == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("The current state does not contain %s.", addrFrom), + )) + } + + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), addrTo.String())) + if !dryRun { + ssFrom.RemoveResource(addrFrom) + + // Update the address before adding it to the state. + rs.Addr = addrTo.Resource + stateTo.Module(addrTo.Module).Resources[addrTo.Resource.String()] = rs + } + + case addrs.AbsResourceInstance: + addrTo, ok := destAddr.(addrs.AbsResourceInstance) + if !ok { + ra, ok := destAddr.(addrs.AbsResource) + if !ok { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move %s to %s: the target must also be a resource instance.", addrFrom, addrTo), + )) + c.showDiagnostics(diags) + return 1 + } + addrTo = ra.Instance(addrs.NoKey) + } + + diags = diags.Append(c.validateResourceMove(addrFrom.ContainingResource(), addrTo.ContainingResource())) + + if stateTo.Module(addrTo.Module) == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move to %s: %s does not exist in the current state.", addrTo, addrTo.Module), + )) + } + if stateTo.ResourceInstance(addrTo) != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move to %s: there is already a resource instance at that address in the current state.", addrTo), + )) + } + + is := ssFrom.ResourceInstance(addrFrom) + if is == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("The current state does not contain %s.", addrFrom), + )) + } + + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + moved++ + c.Ui.Output(fmt.Sprintf("%s %q to %q", prefix, addrFrom.String(), args[1])) + if !dryRun { + fromResourceAddr := addrFrom.ContainingResource() + fromProviderAddr := ssFrom.Resource(fromResourceAddr).ProviderConfig + ssFrom.ForgetResourceInstanceAll(addrFrom) + ssFrom.RemoveResourceIfEmpty(fromResourceAddr) + + rs := stateTo.Resource(addrTo.ContainingResource()) + if rs == nil { + // If we're moving to an address without an index then that + // suggests the user's intent is to establish both the + // resource and the instance at the same time (since the + // address covers both), but if there's an index in the + // target then the resource must already exist. + if addrTo.Resource.Key != addrs.NoKey { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidTarget, + fmt.Sprintf("Cannot move to %s: %s does not exist in the current state.", addrTo, addrTo.ContainingResource()), + )) + c.showDiagnostics(diags) + return 1 + } + + resourceAddr := addrTo.ContainingResource() + stateTo.SyncWrapper().SetResourceMeta( + resourceAddr, + states.NoEach, + fromProviderAddr, // in this case, we bring the provider along as if we were moving the whole resource + ) + rs = stateTo.Resource(resourceAddr) + } + + rs.Instances[addrTo.Resource.Key] = is + } + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidSource, + fmt.Sprintf("Cannot move %s: Terraform doesn't know how to move this object.", rawAddrFrom), + )) + } + } + + if dryRun { + if moved == 0 { + c.Ui.Output("Would have moved nothing.") + } + return 0 // This is as far as we go in dry-run mode } // Write the new state - if err := stateTo.WriteState(stateToReal); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) + if err := stateToMgr.WriteState(stateTo); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } - - if err := stateTo.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) + if err := stateToMgr.PersistState(); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } // Write the old state if it is different if stateTo != stateFrom { - if err := stateFrom.WriteState(stateFromReal); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) + if err := stateFromMgr.WriteState(stateFrom); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } - - if err := stateFrom.PersistState(); err != nil { - c.Ui.Error(fmt.Sprintf(errStateMvPersist, err)) + if err := stateFromMgr.PersistState(); err != nil { + c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } } - c.Ui.Output(fmt.Sprintf( - "Moved %s to %s", args[0], args[1])) + c.showDiagnostics(diags) + + if moved == 0 { + c.Ui.Output("No matching objects found.") + } else { + c.Ui.Output(fmt.Sprintf("Successfully moved %d object(s).", moved)) + } return 0 } -// addableResult takes the result from a filter operation and returns what to -// call State.Add with. The reason we do this is because in the module case -// we must add the list of all modules returned versus just the root module. -func (c *StateMvCommand) addableResult(results []*terraform.StateFilterResult) interface{} { - switch v := results[0].Value.(type) { - case *terraform.ModuleState: - // If a module state then we should add the full list of modules - result := []*terraform.ModuleState{v} - if len(results) > 1 { - for _, r := range results[1:] { - if ms, ok := r.Value.(*terraform.ModuleState); ok { - result = append(result, ms) - } +// sourceObjectAddrs takes a single source object address and expands it to +// potentially multiple objects that need to be handled within it. +// +// In particular, this handles the case where a module is requested directly: +// if it has any child modules, then they must also be moved. It also resolves +// the ambiguity that an index-less resource address could either be a resource +// address or a resource instance address, by making a decision about which +// is intended based on the current state of the resource in question. +func (c *StateMvCommand) sourceObjectAddrs(state *states.State, matched addrs.Targetable) []addrs.Targetable { + var ret []addrs.Targetable + + switch addr := matched.(type) { + case addrs.ModuleInstance: + for _, mod := range state.Modules { + if len(mod.Addr) < len(addr) { + continue // can't possibly be our selection or a child of it } - } - - return result - - case *terraform.ResourceState: - // If a resource state with more than one result, it has a multi-count - // and we need to add all of them. - result := []*terraform.ResourceState{v} - if len(results) > 1 { - for _, r := range results[1:] { - rs, ok := r.Value.(*terraform.ResourceState) - if !ok { - continue - } - - if rs.Type == v.Type { - result = append(result, rs) - } + if !mod.Addr[:len(addr)].Equal(addr) { + continue } + ret = append(ret, mod.Addr) } - - // If we only have one item, add it directly - if len(result) == 1 { - return result[0] + case addrs.AbsResource: + // If this refers to a resource without "count" or "for_each" set then + // we'll assume the user intended it to be a resource instance + // address instead, to allow for requests like this: + // terraform state mv aws_instance.foo aws_instance.bar[1] + // That wouldn't be allowed if aws_instance.foo had multiple instances + // since we can't move multiple instances into one. + if rs := state.Resource(addr); rs != nil && rs.EachMode == states.NoEach { + ret = append(ret, addr.Instance(addrs.NoKey)) + } else { + ret = append(ret, addr) } + default: + ret = append(ret, matched) + } - return result + return ret +} - default: - // By default just add the first result - return v +func (c *StateMvCommand) validateResourceMove(addrFrom, addrTo addrs.AbsResource) tfdiags.Diagnostics { + const msgInvalidRequest = "Invalid state move request" + + var diags tfdiags.Diagnostics + if addrFrom.Resource.Mode != addrTo.Resource.Mode { + switch addrFrom.Resource.Mode { + case addrs.ManagedResourceMode: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidRequest, + fmt.Sprintf("Cannot move %s to %s: a managed resource can be moved only to another managed resource address.", addrFrom, addrTo), + )) + case addrs.DataResourceMode: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidRequest, + fmt.Sprintf("Cannot move %s to %s: a data resource can be moved only to another data resource address.", addrFrom, addrTo), + )) + default: + // In case a new mode is added in future, this unhelpful error is better than nothing. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidRequest, + fmt.Sprintf("Cannot move %s to %s: cannot change resource mode.", addrFrom, addrTo), + )) + } + } + if addrFrom.Resource.Type != addrTo.Resource.Type { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + msgInvalidRequest, + fmt.Sprintf("Cannot move %s to %s: resource types don't match.", addrFrom, addrTo), + )) } + return diags } func (c *StateMvCommand) Help() string { helpText := ` -Usage: terraform state mv [options] ADDRESS ADDRESS +Usage: terraform state mv [options] SOURCE DESTINATION - Move an item in the state to another location or to a completely different - state file. + This command will move an item matched by the address given to the + destination address. This command can also move to a destination address + in a completely different state file. - This command is useful for module refactors (moving items into a module), - configuration refactors (moving items to a completely different or new - state file), or generally renaming of resources. + This can be used for simple resource renaming, moving items to and from + a module, moving entire modules, and more. And because this command can also + move data to a completely new state, it can also be used for refactoring + one configuration into multiple separately managed Terraform configurations. - This command creates a timestamped backup of the state on every invocation. - This can't be disabled. Due to the destructive nature of this command, - the backup is ensured by Terraform for safety reasons. + This command will output a backup copy of the state prior to saving any + changes. The backup cannot be disabled. Due to the destructive nature + of this command, backups are required. - If you're moving from one state file to a different state file, a backup - will be created for each state file. + If you're moving an item to a different state file, a backup will be created + for each state file. Options: + -dry-run If set, prints out what would've been moved but doesn't + actually move anything. + -backup=PATH Path where Terraform should write the backup for the original state. This can't be disabled. If not set, Terraform will write it to the same path as the statefile with - a backup extension. This backup will be made in addition - to the timestamped backup. + a ".backup" extension. -backup-out=PATH Path where Terraform should write the backup for the destination state. This can't be disabled. If not set, Terraform @@ -213,13 +478,16 @@ Options: to be specified if -state-out is set to a different path than -state. - -state=PATH Path to a Terraform state file to use to look - up Terraform-managed resources. By default it will - use the state "terraform.tfstate" if it exists. + -lock=true Lock the state files when locking is supported. + + -lock-timeout=0s Duration to retry a state lock. + + -state=PATH Path to the source state file. Defaults to the configured + backend, or "terraform.tfstate" - -state-out=PATH Path to the destination state file to move the item - to. This defaults to the same statefile. This will - overwrite the destination state file. + -state-out=PATH Path to the destination state file to write to. If this + isn't specified, the source state file will be used. This + can be a new or existing path. ` return strings.TrimSpace(helpText) @@ -229,7 +497,7 @@ func (c *StateMvCommand) Synopsis() string { return "Move an item in the state" } -const errStateMv = `Error moving state: %[1]s +const errStateMv = `Error moving state: %s Please ensure your addresses and state paths are valid. No state was persisted. Your existing states are untouched.` diff --git a/vendor/github.com/hashicorp/terraform/command/state_pull.go b/vendor/github.com/hashicorp/terraform/command/state_pull.go index 96c30d9b..fbd546b4 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_pull.go +++ b/vendor/github.com/hashicorp/terraform/command/state_pull.go @@ -5,7 +5,8 @@ import ( "fmt" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" "github.com/mitchellh/cli" ) @@ -16,48 +17,50 @@ type StatePullCommand struct { } func (c *StatePullCommand) Run(args []string) int { - args = c.Meta.process(args, true) + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } - cmdFlags := c.Meta.flagSet("state pull") + cmdFlags := c.Meta.defaultFlagSet("state pull") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } args = cmdFlags.Args() // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(nil) + if backendDiags.HasErrors() { + c.showDiagnostics(backendDiags) return 1 } - // Get the state - env := c.Env() - state, err := b.State(env) + // Get the state manager for the current workspace + env := c.Workspace() + stateMgr, err := b.StateMgr(env) if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) return 1 } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err)) return 1 } - s := state.State() - if s == nil { - // Output on "error" so it shows up on stderr - c.Ui.Error("Empty state (no state)") + // Get a statefile object representing the latest snapshot + stateFile := statemgr.Export(stateMgr) - return 0 - } + if stateFile != nil { // we produce no output if the statefile is nil + var buf bytes.Buffer + err = statefile.Write(stateFile, &buf) + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) + return 1 + } - var buf bytes.Buffer - if err := terraform.WriteState(s, &buf); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 + c.Ui.Output(buf.String()) } - c.Ui.Output(buf.String()) return 0 } diff --git a/vendor/github.com/hashicorp/terraform/command/state_push.go b/vendor/github.com/hashicorp/terraform/command/state_push.go index 95f2b6ed..9a3cf629 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_push.go +++ b/vendor/github.com/hashicorp/terraform/command/state_push.go @@ -1,12 +1,15 @@ package command import ( + "context" "fmt" "io" "os" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" "github.com/mitchellh/cli" ) @@ -17,19 +20,24 @@ type StatePushCommand struct { } func (c *StatePushCommand) Run(args []string) int { - args = c.Meta.process(args, true) + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } var flagForce bool - cmdFlags := c.Meta.flagSet("state push") + cmdFlags := c.Meta.defaultFlagSet("state push") cmdFlags.BoolVar(&flagForce, "force", false, "") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } args = cmdFlags.Args() if len(args) != 1 { - c.Ui.Error("Exactly one argument expected: path to state to push") - return 1 + c.Ui.Error("Exactly one argument expected.\n") + return cli.RunResultHelp } // Determine our reader for the input state. This is the filepath @@ -49,7 +57,7 @@ func (c *StatePushCommand) Run(args []string) int { } // Read the state - sourceState, err := terraform.ReadState(r) + srcStateFile, err := statefile.Read(r) if c, ok := r.(io.Closer); ok { // Close the reader if possible right now since we're done with it. c.Close() @@ -60,52 +68,52 @@ func (c *StatePushCommand) Run(args []string) int { } // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(nil) + if backendDiags.HasErrors() { + c.showDiagnostics(backendDiags) return 1 } - // Get the state - env := c.Env() - state, err := b.State(env) + // Get the state manager for the currently-selected workspace + env := c.Workspace() + stateMgr, err := b.StateMgr(env) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load destination state: %s", err)) return 1 } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load destination state: %s", err)) - return 1 - } - dstState := state.State() - // If we're not forcing, then perform safety checks - if !flagForce && !dstState.Empty() { - if !dstState.SameLineage(sourceState) { - c.Ui.Error(strings.TrimSpace(errStatePushLineage)) + if c.stateLock { + stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateMgr, "state-push"); err != nil { + c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) return 1 } + defer stateLocker.Unlock(nil) + } - age, err := dstState.CompareAges(sourceState) - if err != nil { - c.Ui.Error(err.Error()) - return 1 - } - if age == terraform.StateAgeReceiverNewer { - c.Ui.Error(strings.TrimSpace(errStatePushSerialNewer)) - return 1 - } + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh destination state: %s", err)) + return 1 } - // Overwrite it - if err := state.WriteState(sourceState); err != nil { + if srcStateFile == nil { + // We'll push a new empty state instead + srcStateFile = statemgr.NewStateFile() + } + + // Import it, forcing through the lineage/serial if requested and possible. + if err := statemgr.Import(srcStateFile, stateMgr, flagForce); err != nil { c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) return 1 } - if err := state.PersistState(); err != nil { + if err := stateMgr.WriteState(srcStateFile.State); err != nil { c.Ui.Error(fmt.Sprintf("Failed to write state: %s", err)) return 1 } + if err := stateMgr.PersistState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to persist state: %s", err)) + return 1 + } return 0 } @@ -133,6 +141,10 @@ Options: -force Write the state even if lineages don't match or the remote serial is higher. + -lock=true Lock the state file when locking is supported. + + -lock-timeout=0s Duration to retry a state lock. + ` return strings.TrimSpace(helpText) } diff --git a/vendor/github.com/hashicorp/terraform/command/state_rm.go b/vendor/github.com/hashicorp/terraform/command/state_rm.go index b2491c4e..fd936364 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_rm.go +++ b/vendor/github.com/hashicorp/terraform/command/state_rm.go @@ -1,61 +1,126 @@ package command import ( + "context" "fmt" "strings" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) // StateRmCommand is a Command implementation that shows a single resource. type StateRmCommand struct { - Meta StateMeta } func (c *StateRmCommand) Run(args []string) int { - args = c.Meta.process(args, true) + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } - cmdFlags := c.Meta.flagSet("state show") - cmdFlags.StringVar(&c.Meta.backupPath, "backup", "-", "backup") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + var dryRun bool + cmdFlags := c.Meta.defaultFlagSet("state rm") + cmdFlags.BoolVar(&dryRun, "dry-run", false, "dry run") + cmdFlags.StringVar(&c.backupPath, "backup", "-", "backup") + cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&c.statePath, "state", "", "path") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } + args = cmdFlags.Args() + if len(args) < 1 { + c.Ui.Error("At least one address is required.\n") + return cli.RunResultHelp + } - state, err := c.StateMeta.State(&c.Meta) + // Get the state + stateMgr, err := c.State() if err != nil { c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) - return cli.RunResultHelp + return 1 + } + + if c.stateLock { + stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateMgr, "state-rm"); err != nil { + c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) + return 1 + } + defer stateLocker.Unlock(nil) } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err)) return 1 } - stateReal := state.State() - if stateReal == nil { + state := stateMgr.State() + if state == nil { c.Ui.Error(fmt.Sprintf(errStateNotFound)) return 1 } - if err := stateReal.Remove(args...); err != nil { - c.Ui.Error(fmt.Sprintf(errStateRm, err)) + // This command primarily works with resource instances, though it will + // also clean up any modules and resources left empty by actions it takes. + var addrs []addrs.AbsResourceInstance + var diags tfdiags.Diagnostics + for _, addrStr := range args { + moreAddrs, moreDiags := c.lookupResourceInstanceAddr(state, true, addrStr) + addrs = append(addrs, moreAddrs...) + diags = diags.Append(moreDiags) + } + if diags.HasErrors() { + c.showDiagnostics(diags) return 1 } - if err := state.WriteState(stateReal); err != nil { + prefix := "Removed " + if dryRun { + prefix = "Would remove " + } + + var isCount int + ss := state.SyncWrapper() + for _, addr := range addrs { + isCount++ + c.Ui.Output(prefix + addr.String()) + if !dryRun { + ss.ForgetResourceInstanceAll(addr) + ss.RemoveResourceIfEmpty(addr.ContainingResource()) + } + } + + if dryRun { + if isCount == 0 { + c.Ui.Output("Would have removed nothing.") + } + return 0 // This is as far as we go in dry-run mode + } + + if err := stateMgr.WriteState(state); err != nil { c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } - - if err := state.PersistState(); err != nil { + if err := stateMgr.PersistState(); err != nil { c.Ui.Error(fmt.Sprintf(errStateRmPersist, err)) return 1 } - c.Ui.Output("Item removal successful.") + if len(diags) > 0 { + c.showDiagnostics(diags) + } + + if isCount == 0 { + c.Ui.Output("No matching resource instances found.") + } else { + c.Ui.Output(fmt.Sprintf("Successfully removed %d resource instance(s).", isCount)) + } return 0 } @@ -63,34 +128,40 @@ func (c *StateRmCommand) Help() string { helpText := ` Usage: terraform state rm [options] ADDRESS... - Remove one or more items from the Terraform state. + Remove one or more items from the Terraform state, causing Terraform to + "forget" those items without first destroying them in the remote system. - This command removes one or more items from the Terraform state based - on the address given. You can view and list the available resources + This command removes one or more resource instances from the Terraform state + based on the addresses given. You can view and list the available instances with "terraform state list". - This command creates a timestamped backup of the state on every invocation. - This can't be disabled. Due to the destructive nature of this command, - the backup is ensured by Terraform for safety reasons. + If you give the address of an entire module then all of the instances in + that module and any of its child modules will be removed from the state. + + If you give the address of a resource that has "count" or "for_each" set, + all of the instances of that resource will be removed from the state. Options: + -dry-run If set, prints out what would've been removed but + doesn't actually remove anything. + -backup=PATH Path where Terraform should write the backup - state. This can't be disabled. If not set, Terraform - will write it to the same path as the statefile with - a backup extension. This backup will be made in addition - to the timestamped backup. + state. + + -lock=true Lock the state file when locking is supported. + + -lock-timeout=0s Duration to retry a state lock. - -state=statefile Path to a Terraform state file to use to look - up Terraform-managed resources. By default it will - use the state "terraform.tfstate" if it exists. + -state=PATH Path to the state file to update. Defaults to the current + workspace state. ` return strings.TrimSpace(helpText) } func (c *StateRmCommand) Synopsis() string { - return "Remove an item from the state" + return "Remove instances from the state" } const errStateRm = `Error removing items from the state: %s diff --git a/vendor/github.com/hashicorp/terraform/command/state_show.go b/vendor/github.com/hashicorp/terraform/command/state_show.go index 235481f2..e3aaf486 100644 --- a/vendor/github.com/hashicorp/terraform/command/state_show.go +++ b/vendor/github.com/hashicorp/terraform/command/state_show.go @@ -2,12 +2,14 @@ package command import ( "fmt" - "sort" + "os" "strings" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/backend" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/states" "github.com/mitchellh/cli" - "github.com/ryanuber/columnize" ) // StateShowCommand is a Command implementation that shows a single resource. @@ -17,83 +19,114 @@ type StateShowCommand struct { } func (c *StateShowCommand) Run(args []string) int { - args = c.Meta.process(args, true) + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } - cmdFlags := c.Meta.flagSet("state show") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags := c.Meta.defaultFlagSet("state show") + cmdFlags.StringVar(&c.Meta.statePath, "state", "", "path") if err := cmdFlags.Parse(args); err != nil { return cli.RunResultHelp } args = cmdFlags.Args() + if len(args) != 1 { + c.Ui.Error("Exactly one argument expected.\n") + return cli.RunResultHelp + } - // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + c.Ui.Error(fmt.Sprintf("Error loading plugin path: %s", err)) return 1 } - // Get the state - env := c.Env() - state, err := b.State(env) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + // Load the backend + b, backendDiags := c.Backend(nil) + if backendDiags.HasErrors() { + c.showDiagnostics(backendDiags) return 1 } - if err := state.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + + // We require a local backend + local, ok := b.(backend.Local) + if !ok { + c.Ui.Error(ErrUnsupportedLocalOp) return 1 } - stateReal := state.State() - if stateReal == nil { - c.Ui.Error(fmt.Sprintf(errStateNotFound)) + // Check if the address can be parsed + addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) + if addrDiags.HasErrors() { + c.Ui.Error(fmt.Sprintf(errParsingAddress, args[0])) return 1 } - filter := &terraform.StateFilter{State: stateReal} - results, err := filter.Filter(args...) + // We expect the config dir to always be the cwd + cwd, err := os.Getwd() if err != nil { - c.Ui.Error(fmt.Sprintf(errStateFilter, err)) + c.Ui.Error(fmt.Sprintf("Error getting cwd: %s", err)) return 1 } - if len(results) == 0 { - return 0 - } + // Build the operation (required to get the schemas) + opReq := c.Operation(b) + opReq.ConfigDir = cwd - instance, err := c.filterInstance(results) + opReq.ConfigLoader, err = c.initConfigLoader() if err != nil { - c.Ui.Error(err.Error()) + c.Ui.Error(fmt.Sprintf("Error initializing config loader: %s", err)) return 1 } - if instance == nil { - return 0 + // Get the context (required to get the schemas) + ctx, _, ctxDiags := local.Context(opReq) + if ctxDiags.HasErrors() { + c.showDiagnostics(ctxDiags) + return 1 } - is := instance.Value.(*terraform.InstanceState) + // Get the schemas from the context + schemas := ctx.Schemas() - // Sort the keys - var keys []string - for k, _ := range is.Attributes { - keys = append(keys, k) + // Get the state + env := c.Workspace() + stateMgr, err := b.StateMgr(env) + if err != nil { + c.Ui.Error(fmt.Sprintf(errStateLoadingState, err)) + return 1 + } + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to refresh state: %s", err)) + return 1 } - sort.Strings(keys) - - // Build the output - var output []string - output = append(output, fmt.Sprintf("id | %s", is.ID)) - for _, k := range keys { - if k != "id" { - output = append(output, fmt.Sprintf("%s | %s", k, is.Attributes[k])) - } + + state := stateMgr.State() + if state == nil { + c.Ui.Error(fmt.Sprintf(errStateNotFound)) + return 1 } - // Output - config := columnize.DefaultConfig() - config.Glue = " = " - c.Ui.Output(columnize.Format(output, config)) + is := state.ResourceInstance(addr) + if !is.HasCurrent() { + c.Ui.Error(errNoInstanceFound) + return 1 + } + + singleInstance := states.NewState() + singleInstance.EnsureModule(addr.Module).SetResourceInstanceCurrent( + addr.Resource, + is.Current, + addr.Resource.Resource.DefaultProviderConfig().Absolute(addr.Module), + ) + + output := format.State(&format.StateOpts{ + State: singleInstance, + Color: c.Colorize(), + Schemas: schemas, + }) + c.Ui.Output(output[strings.Index(output, "#"):]) + return 0 } @@ -120,3 +153,15 @@ Options: func (c *StateShowCommand) Synopsis() string { return "Show a resource in the state" } + +const errNoInstanceFound = `No instance found for the given address! + +This command requires that the address references one specific instance. +To view the available instances, use "terraform state list". Please modify +the address to reference a specific instance.` + +const errParsingAddress = `Error parsing instance address: %s + +This command requires that the address references one specific instance. +To view the available instances, use "terraform state list". Please modify +the address to reference a specific instance.` diff --git a/vendor/github.com/hashicorp/terraform/command/taint.go b/vendor/github.com/hashicorp/terraform/command/taint.go index e4e44362..80ecfb70 100644 --- a/vendor/github.com/hashicorp/terraform/command/taint.go +++ b/vendor/github.com/hashicorp/terraform/command/taint.go @@ -3,12 +3,12 @@ package command import ( "context" "fmt" - "log" "strings" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/state" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // TaintCommand is a cli.Command implementation that manually taints @@ -18,23 +18,28 @@ type TaintCommand struct { } func (c *TaintCommand) Run(args []string) int { - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - var allowMissing bool var module string - cmdFlags := c.Meta.flagSet("taint") + var allowMissing bool + cmdFlags := c.Meta.defaultFlagSet("taint") cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "module") - cmdFlags.StringVar(&module, "module", "", "module") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&module, "module", "", "module") + cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 } + var diags tfdiags.Diagnostics + // Require the one argument for the resource to taint args = cmdFlags.Args() if len(args) != 1 { @@ -43,144 +48,142 @@ func (c *TaintCommand) Run(args []string) int { return 1 } - name := args[0] - if module == "" { - module = "root" - } else { - module = "root." + module + if module != "" { + c.Ui.Error("The -module option is no longer used. Instead, include the module path in the main resource address, like \"module.foo.module.bar.null_resource.baz\".") + return 1 } - rsk, err := terraform.ParseResourceStateKey(name) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to parse resource name: %s", err)) + addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } - if !rsk.Mode.Taintable() { - c.Ui.Error(fmt.Sprintf("Resource '%s' cannot be tainted", name)) + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + c.Ui.Error(fmt.Sprintf("Resource instance %s cannot be tainted", addr)) return 1 } // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(nil) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } // Get the state - env := c.Env() - st, err := b.State(env) + env := c.Workspace() + stateMgr, err := b.StateMgr(env) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 } - if err := st.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } if c.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), c.stateLockTimeout) - defer cancel() - - lockInfo := state.NewLockInfo() - lockInfo.Operation = "taint" - lockID, err := clistate.Lock(lockCtx, st, lockInfo, c.Ui, c.Colorize()) - if err != nil { + stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateMgr, "taint"); err != nil { c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) return 1 } - - defer clistate.Unlock(st, lockID, c.Ui, c.Colorize()) + defer stateLocker.Unlock(nil) } - // Get the actual state structure - s := st.State() - if s.Empty() { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The state is empty. The most common reason for this is that\n" + - "an invalid state file path was given or Terraform has never\n " + - "been run for this infrastructure. Infrastructure must exist\n" + - "for it to be tainted.")) + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 } - // Get the proper module we want to taint - modPath := strings.Split(module, ".") - mod := s.ModuleByPath(modPath) - if mod == nil { + // Get the actual state structure + state := stateMgr.State() + if state.Empty() { if allowMissing { - return c.allowMissingExit(name, module) + return c.allowMissingExit(addr) } - c.Ui.Error(fmt.Sprintf( - "The module %s could not be found. There is nothing to taint.", - module)) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + "The state currently contains no resource instances whatsoever. This may occur if the configuration has never been applied or if it has recently been destroyed.", + )) + c.showDiagnostics(diags) return 1 } - // If there are no resources in this module, it is an error - if len(mod.Resources) == 0 { + ss := state.SyncWrapper() + + // Get the resource and instance we're going to taint + rs := ss.Resource(addr.ContainingResource()) + is := ss.ResourceInstance(addr) + if is == nil { if allowMissing { - return c.allowMissingExit(name, module) + return c.allowMissingExit(addr) } - c.Ui.Error(fmt.Sprintf( - "The module %s has no resources. There is nothing to taint.", - module)) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("There is no resource instance in the state with the address %s. If the resource configuration has just been added, you must run \"terraform apply\" once to create the corresponding instance(s) before they can be tainted.", addr), + )) + c.showDiagnostics(diags) return 1 } - // Get the resource we're looking for - rs, ok := mod.Resources[name] - if !ok { - if allowMissing { - return c.allowMissingExit(name, module) + obj := is.Current + if obj == nil { + if len(is.Deposed) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("Resource instance %s is currently part-way through a create_before_destroy replacement action. Run \"terraform apply\" to complete its replacement before tainting it.", addr), + )) + } else { + // Don't know why we're here, but we'll produce a generic error message anyway. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("Resource instance %s does not currently have a remote object associated with it, so it cannot be tainted.", addr), + )) } - - c.Ui.Error(fmt.Sprintf( - "The resource %s couldn't be found in the module %s.", - name, - module)) + c.showDiagnostics(diags) return 1 } - // Taint the resource - rs.Taint() + obj.Status = states.ObjectTainted + ss.SetResourceInstanceCurrent(addr, obj, rs.ProviderConfig) - log.Printf("[INFO] Writing state output to: %s", c.Meta.StateOutPath()) - if err := st.WriteState(s); err != nil { + if err := stateMgr.WriteState(state); err != nil { c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) return 1 } - if err := st.PersistState(); err != nil { + if err := stateMgr.PersistState(); err != nil { c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) return 1 } - c.Ui.Output(fmt.Sprintf( - "The resource %s in the module %s has been marked as tainted!", - name, module)) + c.Ui.Output(fmt.Sprintf("Resource instance %s has been marked as tainted.", addr)) return 0 } func (c *TaintCommand) Help() string { helpText := ` -Usage: terraform taint [options] name +Usage: terraform taint [options]
Manually mark a resource as tainted, forcing a destroy and recreate on the next plan/apply. This will not modify your infrastructure. This command changes your state to mark a resource as tainted so that during the next plan or - apply, that resource will be destroyed and recreated. This command on - its own will not modify infrastructure. This command can be undone by - reverting the state backup file that is created. + apply that resource will be destroyed and recreated. This command on + its own will not modify infrastructure. This command can be undone + using the "terraform untaint" command with the same address. + + The address is in the usual resource address syntax, as shown in + the output from other commands, such as: + aws_instance.foo + aws_instance.bar[1] + module.foo.module.bar.aws_instance.baz Options: @@ -195,12 +198,6 @@ Options: -lock-timeout=0s Duration to retry a state lock. - -module=path The module path where the resource lives. By - default this will be root. Child modules can be specified - by names. Ex. "consul" or "consul.vpc" (nested modules). - - -no-color If specified, output won't contain any color. - -state=path Path to read and save state (unless state-out is specified). Defaults to "terraform.tfstate". @@ -215,10 +212,11 @@ func (c *TaintCommand) Synopsis() string { return "Manually mark a resource for recreation" } -func (c *TaintCommand) allowMissingExit(name, module string) int { - c.Ui.Output(fmt.Sprintf( - "The resource %s in the module %s was not found, but\n"+ - "-allow-missing is set, so we're exiting successfully.", - name, module)) +func (c *TaintCommand) allowMissingExit(name addrs.AbsResourceInstance) int { + c.showDiagnostics(tfdiags.Sourceless( + tfdiags.Warning, + "No such resource instance", + "Resource instance %s was not found, but this is not an error because -allow-missing was set.", + )) return 0 } diff --git a/vendor/github.com/hashicorp/terraform/command/ui_input.go b/vendor/github.com/hashicorp/terraform/command/ui_input.go index 9c8873d4..5a09bb16 100644 --- a/vendor/github.com/hashicorp/terraform/command/ui_input.go +++ b/vendor/github.com/hashicorp/terraform/command/ui_input.go @@ -3,6 +3,7 @@ package command import ( "bufio" "bytes" + "context" "errors" "fmt" "io" @@ -11,6 +12,7 @@ import ( "os/signal" "strings" "sync" + "sync/atomic" "unicode" "github.com/hashicorp/terraform/terraform" @@ -29,16 +31,19 @@ type UIInput struct { Colorize *colorstring.Colorize // Reader and Writer for IO. If these aren't set, they will default to - // Stdout and Stderr respectively. + // Stdin and Stdout respectively. Reader io.Reader Writer io.Writer + listening int32 + result chan string + interrupted bool l sync.Mutex once sync.Once } -func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { +func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { i.once.Do(i.init) r := i.Reader @@ -116,20 +121,24 @@ func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { } // Listen for the input in a goroutine. This will allow us to - // interrupt this if we are interrupted (SIGINT) - result := make(chan string, 1) + // interrupt this if we are interrupted (SIGINT). go func() { + if !atomic.CompareAndSwapInt32(&i.listening, 0, 1) { + return // We are already listening for input. + } + defer atomic.CompareAndSwapInt32(&i.listening, 1, 0) + buf := bufio.NewReader(r) line, err := buf.ReadString('\n') if err != nil { log.Printf("[ERR] UIInput scan err: %s", err) } - result <- strings.TrimRightFunc(line, unicode.IsSpace) + i.result <- strings.TrimRightFunc(line, unicode.IsSpace) }() select { - case line := <-result: + case line := <-i.result: fmt.Fprint(w, "\n") if line == "" { @@ -137,6 +146,12 @@ func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { } return line, nil + case <-ctx.Done(): + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(w) + + return "", ctx.Err() case <-sigCh: // Print a newline so that any further output starts properly // on a new line. @@ -150,6 +165,8 @@ func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { } func (i *UIInput) init() { + i.result = make(chan string) + if i.Colorize == nil { i.Colorize = &colorstring.Colorize{ Colors: colorstring.DefaultColors, diff --git a/vendor/github.com/hashicorp/terraform/command/unlock.go b/vendor/github.com/hashicorp/terraform/command/unlock.go index 666e4f34..8dc7cafc 100644 --- a/vendor/github.com/hashicorp/terraform/command/unlock.go +++ b/vendor/github.com/hashicorp/terraform/command/unlock.go @@ -1,11 +1,14 @@ package command import ( + "context" "fmt" "strings" - "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/cli" ) @@ -16,10 +19,13 @@ type UnlockCommand struct { } func (c *UnlockCommand) Run(args []string) int { - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - force := false - cmdFlags := c.Meta.flagSet("force-unlock") + var force bool + cmdFlags := c.Meta.defaultFlagSet("force-unlock") cmdFlags.BoolVar(&force, "force", false, "force") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { @@ -43,31 +49,33 @@ func (c *UnlockCommand) Run(args []string) int { return 1 } + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + // Load the backend - b, err := c.Backend(&BackendOpts{ - ConfigPath: configPath, + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, }) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } - env := c.Env() - st, err := b.State(env) + env := c.Workspace() + stateMgr, err := b.StateMgr(env) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 } - isLocal := false - switch s := st.(type) { - case *state.BackupState: - if _, ok := s.Real.(*state.LocalState); ok { - isLocal = true - } - case *state.LocalState: - isLocal = true - } + _, isLocal := stateMgr.(*statemgr.Filesystem) if !force { // Forcing this doesn't do anything, but doesn't break anything either, @@ -81,7 +89,7 @@ func (c *UnlockCommand) Run(args []string) int { "This will allow local Terraform commands to modify this state, even though it\n" + "may be still be in use. Only 'yes' will be accepted to confirm." - v, err := c.UIInput().Input(&terraform.InputOpts{ + v, err := c.UIInput().Input(context.Background(), &terraform.InputOpts{ Id: "force-unlock", Query: "Do you really want to force-unlock?", Description: desc, @@ -96,7 +104,7 @@ func (c *UnlockCommand) Run(args []string) int { } } - if err := st.Unlock(lockID); err != nil { + if err := stateMgr.Unlock(lockID); err != nil { c.Ui.Error(fmt.Sprintf("Failed to unlock state: %s", err)) return 1 } diff --git a/vendor/github.com/hashicorp/terraform/command/untaint.go b/vendor/github.com/hashicorp/terraform/command/untaint.go index adce511f..a23f3c3f 100644 --- a/vendor/github.com/hashicorp/terraform/command/untaint.go +++ b/vendor/github.com/hashicorp/terraform/command/untaint.go @@ -3,11 +3,12 @@ package command import ( "context" "fmt" - "log" "strings" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/command/clistate" - "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // UntaintCommand is a cli.Command implementation that manually untaints @@ -17,23 +18,28 @@ type UntaintCommand struct { } func (c *UntaintCommand) Run(args []string) int { - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } - var allowMissing bool var module string - cmdFlags := c.Meta.flagSet("untaint") + var allowMissing bool + cmdFlags := c.Meta.defaultFlagSet("untaint") cmdFlags.BoolVar(&allowMissing, "allow-missing", false, "module") - cmdFlags.StringVar(&module, "module", "", "module") - cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") - cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path") cmdFlags.BoolVar(&c.Meta.stateLock, "lock", true, "lock state") cmdFlags.DurationVar(&c.Meta.stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&module, "module", "", "module") + cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") + cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 } + var diags tfdiags.Diagnostics + // Require the one argument for the resource to untaint args = cmdFlags.Args() if len(args) != 1 { @@ -42,118 +48,125 @@ func (c *UntaintCommand) Run(args []string) int { return 1 } - name := args[0] - if module == "" { - module = "root" - } else { - module = "root." + module + if module != "" { + c.Ui.Error("The -module option is no longer used. Instead, include the module path in the main resource address, like \"module.foo.module.bar.null_resource.baz\".") + return 1 + } + + addr, addrDiags := addrs.ParseAbsResourceInstanceStr(args[0]) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 } // Load the backend - b, err := c.Backend(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + b, backendDiags := c.Backend(nil) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) return 1 } // Get the state - env := c.Env() - st, err := b.State(env) + workspace := c.Workspace() + stateMgr, err := b.StateMgr(workspace) if err != nil { c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) return 1 } - if err := st.RefreshState(); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) - return 1 - } if c.stateLock { - lockCtx, cancel := context.WithTimeout(context.Background(), c.stateLockTimeout) - defer cancel() - - lockInfo := state.NewLockInfo() - lockInfo.Operation = "untaint" - lockID, err := clistate.Lock(lockCtx, st, lockInfo, c.Ui, c.Colorize()) - if err != nil { + stateLocker := clistate.NewLocker(context.Background(), c.stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateMgr, "untaint"); err != nil { c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) return 1 } + defer stateLocker.Unlock(nil) + } - defer clistate.Unlock(st, lockID, c.Ui, c.Colorize()) + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load state: %s", err)) + return 1 } // Get the actual state structure - s := st.State() - if s.Empty() { + state := stateMgr.State() + if state.Empty() { if allowMissing { - return c.allowMissingExit(name, module) + return c.allowMissingExit(addr) } - c.Ui.Error(fmt.Sprintf( - "The state is empty. The most common reason for this is that\n" + - "an invalid state file path was given or Terraform has never\n " + - "been run for this infrastructure. Infrastructure must exist\n" + - "for it to be untainted.")) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + "The state currently contains no resource instances whatsoever. This may occur if the configuration has never been applied or if it has recently been destroyed.", + )) + c.showDiagnostics(diags) return 1 } - // Get the proper module holding the resource we want to untaint - modPath := strings.Split(module, ".") - mod := s.ModuleByPath(modPath) - if mod == nil { + ss := state.SyncWrapper() + + // Get the resource and instance we're going to taint + rs := ss.Resource(addr.ContainingResource()) + is := ss.ResourceInstance(addr) + if is == nil { if allowMissing { - return c.allowMissingExit(name, module) + return c.allowMissingExit(addr) } - c.Ui.Error(fmt.Sprintf( - "The module %s could not be found. There is nothing to untaint.", - module)) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("There is no resource instance in the state with the address %s. If the resource configuration has just been added, you must run \"terraform apply\" once to create the corresponding instance(s) before they can be tainted.", addr), + )) + c.showDiagnostics(diags) return 1 } - // If there are no resources in this module, it is an error - if len(mod.Resources) == 0 { - if allowMissing { - return c.allowMissingExit(name, module) + obj := is.Current + if obj == nil { + if len(is.Deposed) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("Resource instance %s is currently part-way through a create_before_destroy replacement action. Run \"terraform apply\" to complete its replacement before tainting it.", addr), + )) + } else { + // Don't know why we're here, but we'll produce a generic error message anyway. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "No such resource instance", + fmt.Sprintf("Resource instance %s does not currently have a remote object associated with it, so it cannot be tainted.", addr), + )) } - - c.Ui.Error(fmt.Sprintf( - "The module %s has no resources. There is nothing to untaint.", - module)) + c.showDiagnostics(diags) return 1 } - // Get the resource we're looking for - rs, ok := mod.Resources[name] - if !ok { - if allowMissing { - return c.allowMissingExit(name, module) - } - - c.Ui.Error(fmt.Sprintf( - "The resource %s couldn't be found in the module %s.", - name, - module)) + if obj.Status != states.ObjectTainted { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource instance is not tainted", + fmt.Sprintf("Resource instance %s is not currently tainted, and so it cannot be untainted.", addr), + )) + c.showDiagnostics(diags) return 1 } + obj.Status = states.ObjectReady + ss.SetResourceInstanceCurrent(addr, obj, rs.ProviderConfig) - // Untaint the resource - rs.Untaint() - - log.Printf("[INFO] Writing state output to: %s", c.Meta.StateOutPath()) - if err := st.WriteState(s); err != nil { + if err := stateMgr.WriteState(state); err != nil { c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) return 1 } - if err := st.PersistState(); err != nil { + if err := stateMgr.PersistState(); err != nil { c.Ui.Error(fmt.Sprintf("Error writing state file: %s", err)) return 1 } - c.Ui.Output(fmt.Sprintf( - "The resource %s in the module %s has been successfully untainted!", - name, module)) + c.Ui.Output(fmt.Sprintf("Resource instance %s has been successfully untainted.", addr)) return 0 } @@ -187,8 +200,6 @@ Options: default this will be root. Child modules can be specified by names. Ex. "consul" or "consul.vpc" (nested modules). - -no-color If specified, output won't contain any color. - -state=path Path to read and save state (unless state-out is specified). Defaults to "terraform.tfstate". @@ -203,10 +214,11 @@ func (c *UntaintCommand) Synopsis() string { return "Manually unmark a resource as tainted" } -func (c *UntaintCommand) allowMissingExit(name, module string) int { - c.Ui.Output(fmt.Sprintf( - "The resource %s in the module %s was not found, but\n"+ - "-allow-missing is set, so we're exiting successfully.", - name, module)) +func (c *UntaintCommand) allowMissingExit(name addrs.AbsResourceInstance) int { + c.showDiagnostics(tfdiags.Sourceless( + tfdiags.Warning, + "No such resource instance", + "Resource instance %s was not found, but this is not an error because -allow-missing was set.", + )) return 0 } diff --git a/vendor/github.com/hashicorp/terraform/command/validate.go b/vendor/github.com/hashicorp/terraform/command/validate.go index f693da24..d3488003 100644 --- a/vendor/github.com/hashicorp/terraform/command/validate.go +++ b/vendor/github.com/hashicorp/terraform/command/validate.go @@ -1,12 +1,15 @@ package command import ( - "flag" + "encoding/json" "fmt" "path/filepath" "strings" - "github.com/hashicorp/terraform/config" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // ValidateCommand is a Command implementation that validates the terraform files @@ -17,15 +20,36 @@ type ValidateCommand struct { const defaultPath = "." func (c *ValidateCommand) Run(args []string) int { - args = c.Meta.process(args, false) - var dirPath string + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + if c.Meta.variableArgs.items == nil { + c.Meta.variableArgs = newRawFlags("-var") + } + varValues := c.Meta.variableArgs.Alias("-var") + varFiles := c.Meta.variableArgs.Alias("-var-file") - cmdFlags := flag.NewFlagSet("validate", flag.ContinueOnError) + var jsonOutput bool + cmdFlags := c.Meta.defaultFlagSet("validate") + cmdFlags.BoolVar(&jsonOutput, "json", false, "produce JSON output") + cmdFlags.Var(varValues, "var", "variables") + cmdFlags.Var(varFiles, "var-file", "variable file") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 } + // After this point, we must only produce JSON output if JSON mode is + // enabled, so all errors should be accumulated into diags and we'll + // print out a suitable result at the end, depending on the format + // selection. All returns from this point on must be tail-calls into + // c.showResults in order to produce the expected output. + var diags tfdiags.Diagnostics + args = cmdFlags.Args() + + var dirPath string if len(args) == 1 { dirPath = args[0] } else { @@ -33,13 +57,164 @@ func (c *ValidateCommand) Run(args []string) int { } dir, err := filepath.Abs(dirPath) if err != nil { - c.Ui.Error(fmt.Sprintf( - "Unable to locate directory %v\n", err.Error())) + diags = diags.Append(fmt.Errorf("unable to locate module: %s", err)) + return c.showResults(diags, jsonOutput) + } + + // Check for user-supplied plugin path + if c.pluginPath, err = c.loadPluginPath(); err != nil { + diags = diags.Append(fmt.Errorf("error loading plugin path: %s", err)) + return c.showResults(diags, jsonOutput) } - rtnCode := c.validate(dir) + validateDiags := c.validate(dir) + diags = diags.Append(validateDiags) - return rtnCode + return c.showResults(diags, jsonOutput) +} + +func (c *ValidateCommand) validate(dir string) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + cfg, cfgDiags := c.loadConfig(dir) + diags = diags.Append(cfgDiags) + + if diags.HasErrors() { + return diags + } + + // "validate" is to check if the given module is valid regardless of + // input values, current state, etc. Therefore we populate all of the + // input values with unknown values of the expected type, allowing us + // to perform a type check without assuming any particular values. + varValues := make(terraform.InputValues) + for name, variable := range cfg.Module.Variables { + ty := variable.Type + if ty == cty.NilType { + // Can't predict the type at all, so we'll just mark it as + // cty.DynamicVal (unknown value of cty.DynamicPseudoType). + ty = cty.DynamicPseudoType + } + varValues[name] = &terraform.InputValue{ + Value: cty.UnknownVal(ty), + SourceType: terraform.ValueFromCLIArg, + } + } + + opts := c.contextOpts() + opts.Config = cfg + opts.Variables = varValues + + tfCtx, ctxDiags := terraform.NewContext(opts) + diags = diags.Append(ctxDiags) + if ctxDiags.HasErrors() { + return diags + } + + validateDiags := tfCtx.Validate() + diags = diags.Append(validateDiags) + return diags +} + +func (c *ValidateCommand) showResults(diags tfdiags.Diagnostics, jsonOutput bool) int { + switch { + case jsonOutput: + // FIXME: Eventually we'll probably want to factor this out somewhere + // to support machine-readable outputs for other commands too, but for + // now it's simplest to do this inline here. + type Pos struct { + Line int `json:"line"` + Column int `json:"column"` + Byte int `json:"byte"` + } + type Range struct { + Filename string `json:"filename"` + Start Pos `json:"start"` + End Pos `json:"end"` + } + type Diagnostic struct { + Severity string `json:"severity,omitempty"` + Summary string `json:"summary,omitempty"` + Detail string `json:"detail,omitempty"` + Range *Range `json:"range,omitempty"` + } + type Output struct { + // We include some summary information that is actually redundant + // with the detailed diagnostics, but avoids the need for callers + // to re-implement our logic for deciding these. + Valid bool `json:"valid"` + ErrorCount int `json:"error_count"` + WarningCount int `json:"warning_count"` + Diagnostics []Diagnostic `json:"diagnostics"` + } + + var output Output + output.Valid = true // until proven otherwise + for _, diag := range diags { + var jsonDiag Diagnostic + switch diag.Severity() { + case tfdiags.Error: + jsonDiag.Severity = "error" + output.ErrorCount++ + output.Valid = false + case tfdiags.Warning: + jsonDiag.Severity = "warning" + output.WarningCount++ + } + + desc := diag.Description() + jsonDiag.Summary = desc.Summary + jsonDiag.Detail = desc.Detail + + ranges := diag.Source() + if ranges.Subject != nil { + subj := ranges.Subject + jsonDiag.Range = &Range{ + Filename: subj.Filename, + Start: Pos{ + Line: subj.Start.Line, + Column: subj.Start.Column, + Byte: subj.Start.Byte, + }, + End: Pos{ + Line: subj.End.Line, + Column: subj.End.Column, + Byte: subj.End.Byte, + }, + } + } + + output.Diagnostics = append(output.Diagnostics, jsonDiag) + } + if output.Diagnostics == nil { + // Make sure this always appears as an array in our output, since + // this is easier to consume for dynamically-typed languages. + output.Diagnostics = []Diagnostic{} + } + + j, err := json.MarshalIndent(&output, "", " ") + if err != nil { + // Should never happen because we fully-control the input here + panic(err) + } + c.Ui.Output(string(j)) + + default: + if len(diags) == 0 { + c.Ui.Output(c.Colorize().Color("[green][bold]Success![reset] The configuration is valid.\n")) + } else { + c.showDiagnostics(diags) + + if !diags.HasErrors() { + c.Ui.Output(c.Colorize().Color("[green][bold]Success![reset] The configuration is valid, but there were some validation warnings as shown above.\n")) + } + } + } + + if diags.HasErrors() { + return 1 + } + return 0 } func (c *ValidateCommand) Synopsis() string { @@ -48,35 +223,37 @@ func (c *ValidateCommand) Synopsis() string { func (c *ValidateCommand) Help() string { helpText := ` -Usage: terraform validate [options] [path] +Usage: terraform validate [options] [dir] + + Validate the configuration files in a directory, referring only to the + configuration and not accessing any remote services such as remote state, + provider APIs, etc. - Reads the Terraform files in the given path (directory) and - validates their syntax and basic semantics. + Validate runs checks that verify whether a configuration is + internally-consistent, regardless of any provided variables or existing + state. It is thus primarily useful for general verification of reusable + modules, including correctness of attribute names and value types. - This is not a full validation that is normally done with - a plan or apply operation, but can be used to verify the basic - syntax and usage of Terraform configurations is correct. + It is safe to run this command automatically, for example as a post-save + check in a text editor or as a test step for a re-usable module in a CI + system. + + Validation requires an initialized working directory with any referenced + plugins and modules installed. To initialize a working directory for + validation without accessing any configured remote backend, use: + terraform init -backend=false + + If dir is not specified, then the current directory will be used. + + To verify configuration in the context of a particular run (a particular + target workspace, operation variables, etc), use the terraform plan + subcommand instead, which includes an implied validation check. Options: - -no-color If specified, output won't contain any color. + -json Produce output in a machine-readable JSON format, suitable for + use in e.g. text editor integrations. ` return strings.TrimSpace(helpText) } - -func (c *ValidateCommand) validate(dir string) int { - cfg, err := config.LoadDir(dir) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error loading files %v\n", err.Error())) - return 1 - } - err = cfg.Validate() - if err != nil { - c.Ui.Error(fmt.Sprintf( - "Error validating: %v\n", err.Error())) - return 1 - } - return 0 -} diff --git a/vendor/github.com/hashicorp/terraform/command/version.go b/vendor/github.com/hashicorp/terraform/command/version.go index d4c3a2f8..9c9e3072 100644 --- a/vendor/github.com/hashicorp/terraform/command/version.go +++ b/vendor/github.com/hashicorp/terraform/command/version.go @@ -3,6 +3,7 @@ package command import ( "bytes" "fmt" + "sort" ) // VersionCommand is a Command implementation prints the version. @@ -34,7 +35,10 @@ func (c *VersionCommand) Help() string { func (c *VersionCommand) Run(args []string) int { var versionString bytes.Buffer - args = c.Meta.process(args, false) + args, err := c.Meta.process(args, false) + if err != nil { + return 1 + } fmt.Fprintf(&versionString, "Terraform v%s", c.Version) if c.VersionPrerelease != "" { @@ -47,22 +51,64 @@ func (c *VersionCommand) Run(args []string) int { c.Ui.Output(versionString.String()) + // We'll also attempt to print out the selected plugin versions. We can + // do this only if "terraform init" was already run and thus we've committed + // to a specific set of plugins. If not, the plugins lock will be empty + // and so we'll show _no_ providers. + // + // Generally-speaking this is a best-effort thing that will give us a good + // result in the usual case where the user successfully ran "terraform init" + // and then hit a problem running _another_ command. + providerPlugins := c.providerPluginSet() + pluginsLockFile := c.providerPluginsLock() + pluginsLock := pluginsLockFile.Read() + var pluginVersions []string + for meta := range providerPlugins { + name := meta.Name + wantHash, wanted := pluginsLock[name] + if !wanted { + // Ignore providers that aren't used by the current config at all + continue + } + gotHash, err := meta.SHA256() + if err != nil { + // if we can't read the file to hash it, ignore it. + continue + } + if !bytes.Equal(gotHash, wantHash) { + // Not the plugin we've locked, so ignore it. + continue + } + + // If we get here then we've found a selected plugin, so we'll print + // out its details. + if meta.Version == "0.0.0" { + pluginVersions = append(pluginVersions, fmt.Sprintf("+ provider.%s (unversioned)", name)) + } else { + pluginVersions = append(pluginVersions, fmt.Sprintf("+ provider.%s v%s", name, meta.Version)) + } + } + if len(pluginVersions) != 0 { + sort.Strings(pluginVersions) + for _, str := range pluginVersions { + c.Ui.Output(str) + } + } + // If we have a version check function, then let's check for // the latest version as well. if c.CheckFunc != nil { - // Separate the prior output with a newline - c.Ui.Output("") // Check the latest version info, err := c.CheckFunc() if err != nil { c.Ui.Error(fmt.Sprintf( - "Error checking latest version: %s", err)) + "\nError checking latest version: %s", err)) } if info.Outdated { c.Ui.Output(fmt.Sprintf( - "Your version of Terraform is out of date! The latest version\n"+ - "is %s. You can update by downloading from www.terraform.io", + "\nYour version of Terraform is out of date! The latest version\n"+ + "is %s. You can update by downloading from www.terraform.io/downloads.html", info.Latest)) } } diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_command.go b/vendor/github.com/hashicorp/terraform/command/workspace_command.go new file mode 100644 index 00000000..ebf3aece --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/workspace_command.go @@ -0,0 +1,140 @@ +package command + +import ( + "net/url" + "strings" + + "github.com/mitchellh/cli" +) + +// WorkspaceCommand is a Command Implementation that manipulates workspaces, +// which allow multiple distinct states and variables from a single config. +type WorkspaceCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceCommand) Run(args []string) int { + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + envCommandShowWarning(c.Ui, c.LegacyName) + + cmdFlags := c.Meta.extendedFlagSet("workspace") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + + c.Ui.Output(c.Help()) + return 0 +} + +func (c *WorkspaceCommand) Help() string { + helpText := ` +Usage: terraform workspace + + New, list, select and delete Terraform workspaces. + +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceCommand) Synopsis() string { + return "Workspace management" +} + +// validWorkspaceName returns true is this name is valid to use as a workspace name. +// Since most named states are accessed via a filesystem path or URL, check if +// escaping the name would be required. +func validWorkspaceName(name string) bool { + return name == url.PathEscape(name) +} + +func envCommandShowWarning(ui cli.Ui, show bool) { + if !show { + return + } + + ui.Warn(`Warning: the "terraform env" family of commands is deprecated. + +"Workspace" is now the preferred term for what earlier Terraform versions +called "environment", to reduce ambiguity caused by the latter term colliding +with other concepts. + +The "terraform workspace" commands should be used instead. "terraform env" +will be removed in a future Terraform version. +`) +} + +const ( + envNotSupported = `Backend does not support multiple workspaces` + + envExists = `Workspace %q already exists` + + envDoesNotExist = ` +Workspace %q doesn't exist. + +You can create this workspace with the "new" subcommand.` + + envChanged = `[reset][green]Switched to workspace %q.` + + envCreated = ` +[reset][green][bold]Created and switched to workspace %q![reset][green] + +You're now on a new, empty workspace. Workspaces isolate their state, +so if you run "terraform plan" Terraform will not see any existing state +for this configuration. +` + + envDeleted = `[reset][green]Deleted workspace %q!` + + envNotEmpty = ` +Workspace %[1]q is not empty. + +Deleting %[1]q can result in dangling resources: resources that +exist but are no longer manageable by Terraform. Please destroy +these resources first. If you want to delete this workspace +anyway and risk dangling resources, use the '-force' flag. +` + + envWarnNotEmpty = `[reset][yellow]WARNING: %q was non-empty. +The resources managed by the deleted workspace may still exist, +but are no longer manageable by Terraform since the state has +been deleted. +` + + envDelCurrent = ` +Workspace %[1]q is your active workspace. + +You cannot delete the currently active workspace. Please switch +to another workspace and try again. +` + + envInvalidName = ` +The workspace name %q is not allowed. The name must contain only URL safe +characters, and no path separators. +` + + envIsOverriddenNote = ` + +The active workspace is being overridden using the TF_WORKSPACE environment +variable. +` + + envIsOverriddenSelectError = ` +The selected workspace is currently overridden using the TF_WORKSPACE +environment variable. + +To select a new workspace, either update this environment variable or unset +it and then run this command again. +` + + envIsOverriddenNewError = ` +The workspace is currently overridden using the TF_WORKSPACE environment +variable. You cannot create a new workspace when using this setting. + +To create a new workspace, either unset this environment variable or update it +to match the workspace name you are trying to create, and then run this command +again. +` +) diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_delete.go b/vendor/github.com/hashicorp/terraform/command/workspace_delete.go new file mode 100644 index 00000000..e363b5e6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/workspace_delete.go @@ -0,0 +1,201 @@ +package command + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +type WorkspaceDeleteCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceDeleteCommand) Run(args []string) int { + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + envCommandShowWarning(c.Ui, c.LegacyName) + + var force bool + var stateLock bool + var stateLockTimeout time.Duration + cmdFlags := c.Meta.defaultFlagSet("workspace delete") + cmdFlags.BoolVar(&force, "force", false, "force removal of a non-empty workspace") + cmdFlags.BoolVar(&stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + args = cmdFlags.Args() + if len(args) == 0 { + c.Ui.Error("expected NAME.\n") + return cli.RunResultHelp + } + + workspace := args[0] + + if !validWorkspaceName(workspace) { + c.Ui.Error(fmt.Sprintf(envInvalidName, workspace)) + return 1 + } + + configPath, err := ModulePath(args[1:]) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + workspaces, err := b.Workspaces() + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + exists := false + for _, ws := range workspaces { + if workspace == ws { + exists = true + break + } + } + + if !exists { + c.Ui.Error(fmt.Sprintf(strings.TrimSpace(envDoesNotExist), workspace)) + return 1 + } + + if workspace == c.Workspace() { + c.Ui.Error(fmt.Sprintf(strings.TrimSpace(envDelCurrent), workspace)) + return 1 + } + + // we need the actual state to see if it's empty + stateMgr, err := b.StateMgr(workspace) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var stateLocker clistate.Locker + if stateLock { + stateLocker = clistate.NewLocker(context.Background(), stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateMgr, "workspace_delete"); err != nil { + c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) + return 1 + } + } else { + stateLocker = clistate.NewNoopLocker() + } + + if err := stateMgr.RefreshState(); err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + hasResources := stateMgr.State().HasResources() + + if hasResources && !force { + c.Ui.Error(fmt.Sprintf(strings.TrimSpace(envNotEmpty), workspace)) + return 1 + } + + // We need to release the lock just before deleting the state, in case + // the backend can't remove the resource while holding the lock. This + // is currently true for Windows local files. + // + // TODO: While there is little safety in locking while deleting the + // state, it might be nice to be able to coordinate processes around + // state deletion, i.e. in a CI environment. Adding Delete() as a + // required method of States would allow the removal of the resource to + // be delegated from the Backend to the State itself. + stateLocker.Unlock(nil) + + err = b.DeleteWorkspace(workspace) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + c.Ui.Output( + c.Colorize().Color( + fmt.Sprintf(envDeleted, workspace), + ), + ) + + if hasResources { + c.Ui.Output( + c.Colorize().Color( + fmt.Sprintf(envWarnNotEmpty, workspace), + ), + ) + } + + return 0 +} + +func (c *WorkspaceDeleteCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + complete.PredictNothing, // the "select" subcommand itself (already matched) + c.completePredictWorkspaceName(), + complete.PredictDirs(""), + } +} + +func (c *WorkspaceDeleteCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-force": complete.PredictNothing, + } +} + +func (c *WorkspaceDeleteCommand) Help() string { + helpText := ` +Usage: terraform workspace delete [OPTIONS] NAME [DIR] + + Delete a Terraform workspace + + +Options: + + -force remove a non-empty workspace. + + -lock=true Lock the state file when locking is supported. + + -lock-timeout=0s Duration to retry a state lock. + +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceDeleteCommand) Synopsis() string { + return "Delete a workspace" +} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_list.go b/vendor/github.com/hashicorp/terraform/command/workspace_list.go new file mode 100644 index 00000000..93eeb6c1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/workspace_list.go @@ -0,0 +1,103 @@ +package command + +import ( + "bytes" + "strings" + + "github.com/hashicorp/terraform/tfdiags" + "github.com/posener/complete" +) + +type WorkspaceListCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceListCommand) Run(args []string) int { + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + envCommandShowWarning(c.Ui, c.LegacyName) + + cmdFlags := c.Meta.defaultFlagSet("workspace list") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + args = cmdFlags.Args() + configPath, err := ModulePath(args) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + states, err := b.Workspaces() + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + env, isOverridden := c.WorkspaceOverridden() + + var out bytes.Buffer + for _, s := range states { + if s == env { + out.WriteString("* ") + } else { + out.WriteString(" ") + } + out.WriteString(s + "\n") + } + + c.Ui.Output(out.String()) + + if isOverridden { + c.Ui.Output(envIsOverriddenNote) + } + + return 0 +} + +func (c *WorkspaceListCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictDirs("") +} + +func (c *WorkspaceListCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *WorkspaceListCommand) Help() string { + helpText := ` +Usage: terraform workspace list [DIR] + + List Terraform workspaces. + +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceListCommand) Synopsis() string { + return "List Workspaces" +} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_new.go b/vendor/github.com/hashicorp/terraform/command/workspace_new.go new file mode 100644 index 00000000..e26984d8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/workspace_new.go @@ -0,0 +1,198 @@ +package command + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/hashicorp/terraform/command/clistate" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +type WorkspaceNewCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceNewCommand) Run(args []string) int { + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + envCommandShowWarning(c.Ui, c.LegacyName) + + var stateLock bool + var stateLockTimeout time.Duration + var statePath string + cmdFlags := c.Meta.defaultFlagSet("workspace new") + cmdFlags.BoolVar(&stateLock, "lock", true, "lock state") + cmdFlags.DurationVar(&stateLockTimeout, "lock-timeout", 0, "lock timeout") + cmdFlags.StringVar(&statePath, "state", "", "terraform state file") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + args = cmdFlags.Args() + if len(args) == 0 { + c.Ui.Error("Expected a single argument: NAME.\n") + return cli.RunResultHelp + } + + workspace := args[0] + + if !validWorkspaceName(workspace) { + c.Ui.Error(fmt.Sprintf(envInvalidName, workspace)) + return 1 + } + + // You can't ask to create a workspace when you're overriding the + // workspace name to be something different. + if current, isOverridden := c.WorkspaceOverridden(); current != workspace && isOverridden { + c.Ui.Error(envIsOverriddenNewError) + return 1 + } + + configPath, err := ModulePath(args[1:]) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + workspaces, err := b.Workspaces() + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to get configured named states: %s", err)) + return 1 + } + for _, ws := range workspaces { + if workspace == ws { + c.Ui.Error(fmt.Sprintf(envExists, workspace)) + return 1 + } + } + + _, err = b.StateMgr(workspace) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // now set the current workspace locally + if err := c.SetWorkspace(workspace); err != nil { + c.Ui.Error(fmt.Sprintf("Error selecting new workspace: %s", err)) + return 1 + } + + c.Ui.Output(c.Colorize().Color(fmt.Sprintf( + strings.TrimSpace(envCreated), workspace))) + + if statePath == "" { + // if we're not loading a state, then we're done + return 0 + } + + // load the new Backend state + stateMgr, err := b.StateMgr(workspace) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + if stateLock { + stateLocker := clistate.NewLocker(context.Background(), stateLockTimeout, c.Ui, c.Colorize()) + if err := stateLocker.Lock(stateMgr, "workspace_new"); err != nil { + c.Ui.Error(fmt.Sprintf("Error locking state: %s", err)) + return 1 + } + defer stateLocker.Unlock(nil) + } + + // read the existing state file + f, err := os.Open(statePath) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + stateFile, err := statefile.Read(f) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + // save the existing state in the new Backend. + err = stateMgr.WriteState(stateFile.State) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + err = stateMgr.PersistState() + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + return 0 +} + +func (c *WorkspaceNewCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + complete.PredictNothing, // the "new" subcommand itself (already matched) + complete.PredictAnything, + complete.PredictDirs(""), + } +} + +func (c *WorkspaceNewCommand) AutocompleteFlags() complete.Flags { + return complete.Flags{ + "-state": complete.PredictFiles("*.tfstate"), + } +} + +func (c *WorkspaceNewCommand) Help() string { + helpText := ` +Usage: terraform workspace new [OPTIONS] NAME [DIR] + + Create a new Terraform workspace. + + +Options: + + -lock=true Lock the state file when locking is supported. + + -lock-timeout=0s Duration to retry a state lock. + + -state=path Copy an existing state file into the new workspace. + +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceNewCommand) Synopsis() string { + return "Create a new workspace" +} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_select.go b/vendor/github.com/hashicorp/terraform/command/workspace_select.go new file mode 100644 index 00000000..28b1755d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/workspace_select.go @@ -0,0 +1,142 @@ +package command + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/tfdiags" + "github.com/mitchellh/cli" + "github.com/posener/complete" +) + +type WorkspaceSelectCommand struct { + Meta + LegacyName bool +} + +func (c *WorkspaceSelectCommand) Run(args []string) int { + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + envCommandShowWarning(c.Ui, c.LegacyName) + + cmdFlags := c.Meta.defaultFlagSet("workspace select") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + args = cmdFlags.Args() + if len(args) == 0 { + c.Ui.Error("Expected a single argument: NAME.\n") + return cli.RunResultHelp + } + + configPath, err := ModulePath(args[1:]) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + var diags tfdiags.Diagnostics + + backendConfig, backendDiags := c.loadBackendConfig(configPath) + diags = diags.Append(backendDiags) + if diags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + current, isOverridden := c.WorkspaceOverridden() + if isOverridden { + c.Ui.Error(envIsOverriddenSelectError) + return 1 + } + + // Load the backend + b, backendDiags := c.Backend(&BackendOpts{ + Config: backendConfig, + }) + diags = diags.Append(backendDiags) + if backendDiags.HasErrors() { + c.showDiagnostics(diags) + return 1 + } + + if err != nil { + c.Ui.Error(fmt.Sprintf("Failed to load backend: %s", err)) + return 1 + } + + name := args[0] + if !validWorkspaceName(name) { + c.Ui.Error(fmt.Sprintf(envInvalidName, name)) + return 1 + } + + states, err := b.Workspaces() + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + if name == current { + // already using this workspace + return 0 + } + + found := false + for _, s := range states { + if name == s { + found = true + break + } + } + + if !found { + c.Ui.Error(fmt.Sprintf(envDoesNotExist, name)) + return 1 + } + + err = c.SetWorkspace(name) + if err != nil { + c.Ui.Error(err.Error()) + return 1 + } + + c.Ui.Output( + c.Colorize().Color( + fmt.Sprintf(envChanged, name), + ), + ) + + return 0 +} + +func (c *WorkspaceSelectCommand) AutocompleteArgs() complete.Predictor { + return completePredictSequence{ + complete.PredictNothing, // the "select" subcommand itself (already matched) + c.completePredictWorkspaceName(), + complete.PredictDirs(""), + } +} + +func (c *WorkspaceSelectCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *WorkspaceSelectCommand) Help() string { + helpText := ` +Usage: terraform workspace select NAME [DIR] + + Select a different Terraform workspace. + +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceSelectCommand) Synopsis() string { + return "Select a workspace" +} diff --git a/vendor/github.com/hashicorp/terraform/command/workspace_show.go b/vendor/github.com/hashicorp/terraform/command/workspace_show.go new file mode 100644 index 00000000..cccc4f58 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/command/workspace_show.go @@ -0,0 +1,50 @@ +package command + +import ( + "strings" + + "github.com/posener/complete" +) + +type WorkspaceShowCommand struct { + Meta +} + +func (c *WorkspaceShowCommand) Run(args []string) int { + args, err := c.Meta.process(args, true) + if err != nil { + return 1 + } + + cmdFlags := c.Meta.extendedFlagSet("workspace show") + cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } + if err := cmdFlags.Parse(args); err != nil { + return 1 + } + + workspace := c.Workspace() + c.Ui.Output(workspace) + + return 0 +} + +func (c *WorkspaceShowCommand) AutocompleteArgs() complete.Predictor { + return complete.PredictNothing +} + +func (c *WorkspaceShowCommand) AutocompleteFlags() complete.Flags { + return nil +} + +func (c *WorkspaceShowCommand) Help() string { + helpText := ` +Usage: terraform workspace show + + Show the name of the current workspace. +` + return strings.TrimSpace(helpText) +} + +func (c *WorkspaceShowCommand) Synopsis() string { + return "Show the name of the current workspace" +} diff --git a/vendor/github.com/hashicorp/terraform/commands.go b/vendor/github.com/hashicorp/terraform/commands.go index 409f4d85..00240936 100644 --- a/vendor/github.com/hashicorp/terraform/commands.go +++ b/vendor/github.com/hashicorp/terraform/commands.go @@ -1,13 +1,23 @@ package main import ( + "log" "os" "os/signal" "github.com/hashicorp/terraform/command" + pluginDiscovery "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/svchost/disco" "github.com/mitchellh/cli" ) +// runningInAutomationEnvName gives the name of an environment variable that +// can be set to any non-empty value in order to suppress certain messages +// that assume that Terraform is being run from a command prompt. +const runningInAutomationEnvName = "TF_IN_AUTOMATION" + // Commands is the mapping of all the available Terraform commands. var Commands map[string]cli.CommandFactory var PlumbingCommands map[string]struct{} @@ -20,19 +30,37 @@ const ( OutputPrefix = "o:" ) -func init() { - Ui = &cli.PrefixedUi{ - AskPrefix: OutputPrefix, - OutputPrefix: OutputPrefix, - InfoPrefix: OutputPrefix, - ErrorPrefix: ErrorPrefix, - Ui: &cli.BasicUi{Writer: os.Stdout}, +func initCommands(config *Config, services *disco.Disco) { + var inAutomation bool + if v := os.Getenv(runningInAutomationEnvName); v != "" { + inAutomation = true } + for userHost, hostConfig := range config.Hosts { + host, err := svchost.ForComparison(userHost) + if err != nil { + // We expect the config was already validated by the time we get + // here, so we'll just ignore invalid hostnames. + continue + } + services.ForceHostServices(host, hostConfig.Services) + } + + dataDir := os.Getenv("TF_DATA_DIR") + meta := command.Meta{ - Color: true, - ContextOpts: &ContextOpts, - Ui: Ui, + Color: true, + GlobalPluginDirs: globalPluginDirs(), + PluginOverrides: &PluginOverrides, + Ui: Ui, + + Services: services, + + RunningInAutomation: inAutomation, + PluginCacheDir: config.PluginCacheDir, + OverrideDataDir: dataDir, + + ShutdownCh: makeShutdownCh(), } // The command list is included in the terraform -help @@ -45,58 +73,62 @@ func init() { "state": struct{}{}, // includes all subcommands "debug": struct{}{}, // includes all subcommands "force-unlock": struct{}{}, + "push": struct{}{}, + "0.12upgrade": struct{}{}, } Commands = map[string]cli.CommandFactory{ "apply": func() (cli.Command, error) { return &command.ApplyCommand{ - Meta: meta, - ShutdownCh: makeShutdownCh(), + Meta: meta, }, nil }, "console": func() (cli.Command, error) { return &command.ConsoleCommand{ - Meta: meta, - ShutdownCh: makeShutdownCh(), + Meta: meta, }, nil }, "destroy": func() (cli.Command, error) { return &command.ApplyCommand{ - Meta: meta, - Destroy: true, - ShutdownCh: makeShutdownCh(), + Meta: meta, + Destroy: true, }, nil }, "env": func() (cli.Command, error) { - return &command.EnvCommand{ - Meta: meta, + return &command.WorkspaceCommand{ + Meta: meta, + LegacyName: true, }, nil }, "env list": func() (cli.Command, error) { - return &command.EnvListCommand{ - Meta: meta, + return &command.WorkspaceListCommand{ + Meta: meta, + LegacyName: true, }, nil }, "env select": func() (cli.Command, error) { - return &command.EnvSelectCommand{ - Meta: meta, + return &command.WorkspaceSelectCommand{ + Meta: meta, + LegacyName: true, }, nil }, "env new": func() (cli.Command, error) { - return &command.EnvNewCommand{ - Meta: meta, + return &command.WorkspaceNewCommand{ + Meta: meta, + LegacyName: true, }, nil }, "env delete": func() (cli.Command, error) { - return &command.EnvDeleteCommand{ - Meta: meta, + return &command.WorkspaceDeleteCommand{ + Meta: meta, + LegacyName: true, }, nil }, @@ -148,6 +180,18 @@ func init() { }, nil }, + "providers": func() (cli.Command, error) { + return &command.ProvidersCommand{ + Meta: meta, + }, nil + }, + + "providers schema": func() (cli.Command, error) { + return &command.ProvidersSchemaCommand{ + Meta: meta, + }, nil + }, + "push": func() (cli.Command, error) { return &command.PushCommand{ Meta: meta, @@ -194,10 +238,52 @@ func init() { }, nil }, + "workspace": func() (cli.Command, error) { + return &command.WorkspaceCommand{ + Meta: meta, + }, nil + }, + + "workspace list": func() (cli.Command, error) { + return &command.WorkspaceListCommand{ + Meta: meta, + }, nil + }, + + "workspace select": func() (cli.Command, error) { + return &command.WorkspaceSelectCommand{ + Meta: meta, + }, nil + }, + + "workspace show": func() (cli.Command, error) { + return &command.WorkspaceShowCommand{ + Meta: meta, + }, nil + }, + + "workspace new": func() (cli.Command, error) { + return &command.WorkspaceNewCommand{ + Meta: meta, + }, nil + }, + + "workspace delete": func() (cli.Command, error) { + return &command.WorkspaceDeleteCommand{ + Meta: meta, + }, nil + }, + //----------------------------------------------------------- // Plumbing //----------------------------------------------------------- + "0.12upgrade": func() (cli.Command, error) { + return &command.ZeroTwelveUpgradeCommand{ + Meta: meta, + }, nil + }, + "debug": func() (cli.Command, error) { return &command.DebugCommand{ Meta: meta, @@ -228,13 +314,17 @@ func init() { "state rm": func() (cli.Command, error) { return &command.StateRmCommand{ - Meta: meta, + StateMeta: command.StateMeta{ + Meta: meta, + }, }, nil }, "state mv": func() (cli.Command, error) { return &command.StateMvCommand{ - Meta: meta, + StateMeta: command.StateMeta{ + Meta: meta, + }, }, nil }, @@ -275,3 +365,45 @@ func makeShutdownCh() <-chan struct{} { return resultCh } + +func credentialsSource(config *Config) auth.CredentialsSource { + creds := auth.NoCredentials + if len(config.Credentials) > 0 { + staticTable := map[svchost.Hostname]map[string]interface{}{} + for userHost, creds := range config.Credentials { + host, err := svchost.ForComparison(userHost) + if err != nil { + // We expect the config was already validated by the time we get + // here, so we'll just ignore invalid hostnames. + continue + } + staticTable[host] = creds + } + creds = auth.StaticCredentialsSource(staticTable) + } + + for helperType, helperConfig := range config.CredentialsHelpers { + log.Printf("[DEBUG] Searching for credentials helper named %q", helperType) + available := pluginDiscovery.FindPlugins("credentials", globalPluginDirs()) + available = available.WithName(helperType) + if available.Count() == 0 { + log.Printf("[ERROR] Unable to find credentials helper %q; ignoring", helperType) + break + } + + selected := available.Newest() + + helperSource := auth.HelperProgramCredentialsSource(selected.Path, helperConfig.Args...) + creds = auth.Credentials{ + creds, + auth.CachingCredentialsSource(helperSource), // cached because external operation may be slow/expensive + } + + // There should only be zero or one "credentials_helper" blocks. We + // assume that the config was validated earlier and so we don't check + // for extras here. + break + } + + return creds +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/communicator.go b/vendor/github.com/hashicorp/terraform/communicator/communicator.go new file mode 100644 index 00000000..12b725b3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/communicator.go @@ -0,0 +1,149 @@ +package communicator + +import ( + "context" + "fmt" + "io" + "log" + "sync/atomic" + "time" + + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/communicator/ssh" + "github.com/hashicorp/terraform/communicator/winrm" + "github.com/hashicorp/terraform/terraform" +) + +// Communicator is an interface that must be implemented by all communicators +// used for any of the provisioners +type Communicator interface { + // Connect is used to setup the connection + Connect(terraform.UIOutput) error + + // Disconnect is used to terminate the connection + Disconnect() error + + // Timeout returns the configured connection timeout + Timeout() time.Duration + + // ScriptPath returns the configured script path + ScriptPath() string + + // Start executes a remote command in a new session + Start(*remote.Cmd) error + + // Upload is used to upload a single file + Upload(string, io.Reader) error + + // UploadScript is used to upload a file as an executable script + UploadScript(string, io.Reader) error + + // UploadDir is used to upload a directory + UploadDir(string, string) error +} + +// New returns a configured Communicator or an error if the connection type is not supported +func New(s *terraform.InstanceState) (Communicator, error) { + connType := s.Ephemeral.ConnInfo["type"] + switch connType { + case "ssh", "": // The default connection type is ssh, so if connType is empty use ssh + return ssh.New(s) + case "winrm": + return winrm.New(s) + default: + return nil, fmt.Errorf("connection type '%s' not supported", connType) + } +} + +// maxBackoffDelay is the maximum delay between retry attempts +var maxBackoffDelay = 20 * time.Second +var initialBackoffDelay = time.Second + +// Fatal is an interface that error values can return to halt Retry +type Fatal interface { + FatalError() error +} + +// Retry retries the function f until it returns a nil error, a Fatal error, or +// the context expires. +func Retry(ctx context.Context, f func() error) error { + // container for atomic error value + type errWrap struct { + E error + } + + // Try the function in a goroutine + var errVal atomic.Value + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + + delay := time.Duration(0) + for { + // If our context ended, we want to exit right away. + select { + case <-ctx.Done(): + return + case <-time.After(delay): + } + + // Try the function call + err := f() + + // return if we have no error, or a FatalError + done := false + switch e := err.(type) { + case nil: + done = true + case Fatal: + err = e.FatalError() + done = true + } + + errVal.Store(errWrap{err}) + + if done { + return + } + + log.Printf("[WARN] retryable error: %v", err) + + delay *= 2 + + if delay == 0 { + delay = initialBackoffDelay + } + + if delay > maxBackoffDelay { + delay = maxBackoffDelay + } + + log.Printf("[INFO] sleeping for %s", delay) + } + }() + + // Wait for completion + select { + case <-ctx.Done(): + case <-doneCh: + } + + var lastErr error + // Check if we got an error executing + if ev, ok := errVal.Load().(errWrap); ok { + lastErr = ev.E + } + + // Check if we have a context error to check if we're interrupted or timeout + switch ctx.Err() { + case context.Canceled: + return fmt.Errorf("interrupted - last error: %v", lastErr) + case context.DeadlineExceeded: + return fmt.Errorf("timeout - last error: %v", lastErr) + } + + if lastErr != nil { + return lastErr + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/communicator_mock.go b/vendor/github.com/hashicorp/terraform/communicator/communicator_mock.go new file mode 100644 index 00000000..49304a07 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/communicator_mock.go @@ -0,0 +1,106 @@ +package communicator + +import ( + "bytes" + "fmt" + "io" + "strings" + "time" + + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/terraform" +) + +// MockCommunicator is an implementation of Communicator that can be used for tests. +type MockCommunicator struct { + RemoteScriptPath string + Commands map[string]bool + Uploads map[string]string + UploadScripts map[string]string + UploadDirs map[string]string + CommandFunc func(*remote.Cmd) error + DisconnectFunc func() error + ConnTimeout time.Duration +} + +// Connect implementation of communicator.Communicator interface +func (c *MockCommunicator) Connect(o terraform.UIOutput) error { + return nil +} + +// Disconnect implementation of communicator.Communicator interface +func (c *MockCommunicator) Disconnect() error { + if c.DisconnectFunc != nil { + return c.DisconnectFunc() + } + return nil +} + +// Timeout implementation of communicator.Communicator interface +func (c *MockCommunicator) Timeout() time.Duration { + if c.ConnTimeout != 0 { + return c.ConnTimeout + } + return time.Duration(5 * time.Second) +} + +// ScriptPath implementation of communicator.Communicator interface +func (c *MockCommunicator) ScriptPath() string { + return c.RemoteScriptPath +} + +// Start implementation of communicator.Communicator interface +func (c *MockCommunicator) Start(r *remote.Cmd) error { + r.Init() + + if c.CommandFunc != nil { + return c.CommandFunc(r) + } + + if !c.Commands[r.Command] { + return fmt.Errorf("Command not found!") + } + + r.SetExitStatus(0, nil) + + return nil +} + +// Upload implementation of communicator.Communicator interface +func (c *MockCommunicator) Upload(path string, input io.Reader) error { + f, ok := c.Uploads[path] + if !ok { + return fmt.Errorf("Path %q not found!", path) + } + + var buf bytes.Buffer + buf.ReadFrom(input) + content := strings.TrimSpace(buf.String()) + + f = strings.TrimSpace(f) + if f != content { + return fmt.Errorf("expected: %q\n\ngot: %q\n", f, content) + } + + return nil +} + +// UploadScript implementation of communicator.Communicator interface +func (c *MockCommunicator) UploadScript(path string, input io.Reader) error { + c.Uploads = c.UploadScripts + return c.Upload(path, input) +} + +// UploadDir implementation of communicator.Communicator interface +func (c *MockCommunicator) UploadDir(dst string, src string) error { + v, ok := c.UploadDirs[src] + if !ok { + return fmt.Errorf("Directory not found!") + } + + if v != dst { + return fmt.Errorf("expected: %q\n\ngot: %q\n", v, dst) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/remote/command.go b/vendor/github.com/hashicorp/terraform/communicator/remote/command.go new file mode 100644 index 00000000..4c90368e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/remote/command.go @@ -0,0 +1,96 @@ +package remote + +import ( + "fmt" + "io" + "sync" +) + +// Cmd represents a remote command being prepared or run. +type Cmd struct { + // Command is the command to run remotely. This is executed as if + // it were a shell command, so you are expected to do any shell escaping + // necessary. + Command string + + // Stdin specifies the process's standard input. If Stdin is + // nil, the process reads from an empty bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr represent the process's standard output and + // error. + // + // If either is nil, it will be set to ioutil.Discard. + Stdout io.Writer + Stderr io.Writer + + // Once Wait returns, his will contain the exit code of the process. + exitStatus int + + // Internal fields + exitCh chan struct{} + + // err is used to store any error reported by the Communicator during + // execution. + err error + + // This thing is a mutex, lock when making modifications concurrently + sync.Mutex +} + +// Init must be called by the Communicator before executing the command. +func (c *Cmd) Init() { + c.Lock() + defer c.Unlock() + + c.exitCh = make(chan struct{}) +} + +// SetExitStatus stores the exit status of the remote command as well as any +// communicator related error. SetExitStatus then unblocks any pending calls +// to Wait. +// This should only be called by communicators executing the remote.Cmd. +func (c *Cmd) SetExitStatus(status int, err error) { + c.Lock() + defer c.Unlock() + + c.exitStatus = status + c.err = err + + close(c.exitCh) +} + +// Wait waits for the remote command to complete. +// Wait may return an error from the communicator, or an ExitError if the +// process exits with a non-zero exit status. +func (c *Cmd) Wait() error { + <-c.exitCh + + c.Lock() + defer c.Unlock() + + if c.err != nil || c.exitStatus != 0 { + return &ExitError{ + Command: c.Command, + ExitStatus: c.exitStatus, + Err: c.err, + } + } + + return nil +} + +// ExitError is returned by Wait to indicate and error executing the remote +// command, or a non-zero exit status. +type ExitError struct { + Command string + ExitStatus int + Err error +} + +func (e *ExitError) Error() string { + if e.Err != nil { + return fmt.Sprintf("error executing %q: %v", e.Command, e.Err) + } + return fmt.Sprintf("%q exit status: %d", e.Command, e.ExitStatus) +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/shared/shared.go b/vendor/github.com/hashicorp/terraform/communicator/shared/shared.go new file mode 100644 index 00000000..39cb1696 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/shared/shared.go @@ -0,0 +1,17 @@ +package shared + +import ( + "fmt" + "net" +) + +// IpFormat formats the IP correctly, so we don't provide IPv6 address in an IPv4 format during node communication. We return the ip parameter as is if it's an IPv4 address or a hostname. +func IpFormat(ip string) string { + ipObj := net.ParseIP(ip) + // Return the ip/host as is if it's either a hostname or an IPv4 address. + if ipObj == nil || ipObj.To4() != nil { + return ip + } + + return fmt.Sprintf("[%s]", ip) +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/ssh/communicator.go b/vendor/github.com/hashicorp/terraform/communicator/ssh/communicator.go new file mode 100644 index 00000000..5b814e3c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/ssh/communicator.go @@ -0,0 +1,769 @@ +package ssh + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/terraform" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +const ( + // DefaultShebang is added at the top of a SSH script file + DefaultShebang = "#!/bin/sh\n" + + // enable ssh keeplive probes by default + keepAliveInterval = 2 * time.Second +) + +// randShared is a global random generator object that is shared. +// This must be shared since it is seeded by the current time and +// creating multiple can result in the same values. By using a shared +// RNG we assure different numbers per call. +var randLock sync.Mutex +var randShared *rand.Rand + +// Communicator represents the SSH communicator +type Communicator struct { + connInfo *connectionInfo + client *ssh.Client + config *sshConfig + conn net.Conn + address string + cancelKeepAlive context.CancelFunc + + lock sync.Mutex +} + +type sshConfig struct { + // The configuration of the Go SSH connection + config *ssh.ClientConfig + + // connection returns a new connection. The current connection + // in use will be closed as part of the Close method, or in the + // case an error occurs. + connection func() (net.Conn, error) + + // noPty, if true, will not request a pty from the remote end. + noPty bool + + // sshAgent is a struct surrounding the agent.Agent client and the net.Conn + // to the SSH Agent. It is nil if no SSH agent is configured + sshAgent *sshAgent +} + +type fatalError struct { + error +} + +func (e fatalError) FatalError() error { + return e.error +} + +// New creates a new communicator implementation over SSH. +func New(s *terraform.InstanceState) (*Communicator, error) { + connInfo, err := parseConnectionInfo(s) + if err != nil { + return nil, err + } + + config, err := prepareSSHConfig(connInfo) + if err != nil { + return nil, err + } + + // Setup the random number generator once. The seed value is the + // time multiplied by the PID. This can overflow the int64 but that + // is okay. We multiply by the PID in case we have multiple processes + // grabbing this at the same time. This is possible with Terraform and + // if we communicate to the same host at the same instance, we could + // overwrite the same files. Multiplying by the PID prevents this. + randLock.Lock() + defer randLock.Unlock() + if randShared == nil { + randShared = rand.New(rand.NewSource( + time.Now().UnixNano() * int64(os.Getpid()))) + } + + comm := &Communicator{ + connInfo: connInfo, + config: config, + } + + return comm, nil +} + +// Connect implementation of communicator.Communicator interface +func (c *Communicator) Connect(o terraform.UIOutput) (err error) { + // Grab a lock so we can modify our internal attributes + c.lock.Lock() + defer c.lock.Unlock() + + if c.conn != nil { + c.conn.Close() + } + + // Set the conn and client to nil since we'll recreate it + c.conn = nil + c.client = nil + + if o != nil { + o.Output(fmt.Sprintf( + "Connecting to remote host via SSH...\n"+ + " Host: %s\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " Private key: %t\n"+ + " Certificate: %t\n"+ + " SSH Agent: %t\n"+ + " Checking Host Key: %t", + c.connInfo.Host, c.connInfo.User, + c.connInfo.Password != "", + c.connInfo.PrivateKey != "", + c.connInfo.Certificate != "", + c.connInfo.Agent, + c.connInfo.HostKey != "", + )) + + if c.connInfo.BastionHost != "" { + o.Output(fmt.Sprintf( + "Using configured bastion host...\n"+ + " Host: %s\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " Private key: %t\n"+ + " SSH Agent: %t\n"+ + " Checking Host Key: %t", + c.connInfo.BastionHost, c.connInfo.BastionUser, + c.connInfo.BastionPassword != "", + c.connInfo.BastionPrivateKey != "", + c.connInfo.Agent, + c.connInfo.BastionHostKey != "", + )) + } + } + + log.Printf("[DEBUG] connecting to TCP connection for SSH") + c.conn, err = c.config.connection() + if err != nil { + // Explicitly set this to the REAL nil. Connection() can return + // a nil implementation of net.Conn which will make the + // "if c.conn == nil" check fail above. Read here for more information + // on this psychotic language feature: + // + // http://golang.org/doc/faq#nil_error + c.conn = nil + + log.Printf("[ERROR] connection error: %s", err) + return err + } + + log.Printf("[DEBUG] handshaking with SSH") + host := fmt.Sprintf("%s:%d", c.connInfo.Host, c.connInfo.Port) + sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, host, c.config.config) + if err != nil { + // While in theory this should be a fatal error, some hosts may start + // the ssh service before it is properly configured, or before user + // authentication data is available. + // Log the error, and allow the provisioner to retry. + log.Printf("[WARN] %s", err) + return err + } + + c.client = ssh.NewClient(sshConn, sshChan, req) + + if c.config.sshAgent != nil { + log.Printf("[DEBUG] Telling SSH config to forward to agent") + if err := c.config.sshAgent.ForwardToAgent(c.client); err != nil { + return fatalError{err} + } + + log.Printf("[DEBUG] Setting up a session to request agent forwarding") + session, err := c.newSession() + if err != nil { + return err + } + defer session.Close() + + err = agent.RequestAgentForwarding(session) + + if err == nil { + log.Printf("[INFO] agent forwarding enabled") + } else { + log.Printf("[WARN] error forwarding agent: %s", err) + } + } + + if err != nil { + return err + } + + if o != nil { + o.Output("Connected!") + } + + ctx, cancelKeepAlive := context.WithCancel(context.TODO()) + c.cancelKeepAlive = cancelKeepAlive + + // Start a keepalive goroutine to help maintain the connection for + // long-running commands. + log.Printf("[DEBUG] starting ssh KeepAlives") + go func() { + t := time.NewTicker(keepAliveInterval) + defer t.Stop() + for { + select { + case <-t.C: + // there's no useful response to these, just abort when there's + // an error. + _, _, err := c.client.SendRequest("keepalive@terraform.io", true, nil) + if err != nil { + return + } + case <-ctx.Done(): + return + } + } + }() + + return nil +} + +// Disconnect implementation of communicator.Communicator interface +func (c *Communicator) Disconnect() error { + c.lock.Lock() + defer c.lock.Unlock() + + if c.cancelKeepAlive != nil { + c.cancelKeepAlive() + } + + if c.config.sshAgent != nil { + if err := c.config.sshAgent.Close(); err != nil { + return err + } + } + + if c.conn != nil { + conn := c.conn + c.conn = nil + return conn.Close() + } + + return nil +} + +// Timeout implementation of communicator.Communicator interface +func (c *Communicator) Timeout() time.Duration { + return c.connInfo.TimeoutVal +} + +// ScriptPath implementation of communicator.Communicator interface +func (c *Communicator) ScriptPath() string { + randLock.Lock() + defer randLock.Unlock() + + return strings.Replace( + c.connInfo.ScriptPath, "%RAND%", + strconv.FormatInt(int64(randShared.Int31()), 10), -1) +} + +// Start implementation of communicator.Communicator interface +func (c *Communicator) Start(cmd *remote.Cmd) error { + cmd.Init() + + session, err := c.newSession() + if err != nil { + return err + } + + // Setup our session + session.Stdin = cmd.Stdin + session.Stdout = cmd.Stdout + session.Stderr = cmd.Stderr + + if !c.config.noPty { + // Request a PTY + termModes := ssh.TerminalModes{ + ssh.ECHO: 0, // do not echo + ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud + ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud + } + + if err := session.RequestPty("xterm", 80, 40, termModes); err != nil { + return err + } + } + + log.Printf("[DEBUG] starting remote command: %s", cmd.Command) + err = session.Start(strings.TrimSpace(cmd.Command) + "\n") + if err != nil { + return err + } + + // Start a goroutine to wait for the session to end and set the + // exit boolean and status. + go func() { + defer session.Close() + + err := session.Wait() + exitStatus := 0 + if err != nil { + exitErr, ok := err.(*ssh.ExitError) + if ok { + exitStatus = exitErr.ExitStatus() + } + } + + cmd.SetExitStatus(exitStatus, err) + log.Printf("[DEBUG] remote command exited with '%d': %s", exitStatus, cmd.Command) + }() + + return nil +} + +// Upload implementation of communicator.Communicator interface +func (c *Communicator) Upload(path string, input io.Reader) error { + // The target directory and file for talking the SCP protocol + targetDir := filepath.Dir(path) + targetFile := filepath.Base(path) + + // On windows, filepath.Dir uses backslash separators (ie. "\tmp"). + // This does not work when the target host is unix. Switch to forward slash + // which works for unix and windows + targetDir = filepath.ToSlash(targetDir) + + // Skip copying if we can get the file size directly from common io.Readers + size := int64(0) + + switch src := input.(type) { + case *os.File: + fi, err := src.Stat() + if err != nil { + size = fi.Size() + } + case *bytes.Buffer: + size = int64(src.Len()) + case *bytes.Reader: + size = int64(src.Len()) + case *strings.Reader: + size = int64(src.Len()) + } + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + return scpUploadFile(targetFile, input, w, stdoutR, size) + } + + return c.scpSession("scp -vt "+targetDir, scpFunc) +} + +// UploadScript implementation of communicator.Communicator interface +func (c *Communicator) UploadScript(path string, input io.Reader) error { + reader := bufio.NewReader(input) + prefix, err := reader.Peek(2) + if err != nil { + return fmt.Errorf("Error reading script: %s", err) + } + + var script bytes.Buffer + if string(prefix) != "#!" { + script.WriteString(DefaultShebang) + } + + script.ReadFrom(reader) + if err := c.Upload(path, &script); err != nil { + return err + } + + var stdout, stderr bytes.Buffer + cmd := &remote.Cmd{ + Command: fmt.Sprintf("chmod 0777 %s", path), + Stdout: &stdout, + Stderr: &stderr, + } + if err := c.Start(cmd); err != nil { + return fmt.Errorf( + "Error chmodding script file to 0777 in remote "+ + "machine: %s", err) + } + + if err := cmd.Wait(); err != nil { + return fmt.Errorf( + "Error chmodding script file to 0777 in remote "+ + "machine %v: %s %s", err, stdout.String(), stderr.String()) + } + + return nil +} + +// UploadDir implementation of communicator.Communicator interface +func (c *Communicator) UploadDir(dst string, src string) error { + log.Printf("[DEBUG] Uploading dir '%s' to '%s'", src, dst) + scpFunc := func(w io.Writer, r *bufio.Reader) error { + uploadEntries := func() error { + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + entries, err := f.Readdir(-1) + if err != nil { + return err + } + + return scpUploadDir(src, entries, w, r) + } + + if src[len(src)-1] != '/' { + log.Printf("[DEBUG] No trailing slash, creating the source directory name") + return scpUploadDirProtocol(filepath.Base(src), w, r, uploadEntries) + } + // Trailing slash, so only upload the contents + return uploadEntries() + } + + return c.scpSession("scp -rvt "+dst, scpFunc) +} + +func (c *Communicator) newSession() (session *ssh.Session, err error) { + log.Println("[DEBUG] opening new ssh session") + if c.client == nil { + err = errors.New("ssh client is not connected") + } else { + session, err = c.client.NewSession() + } + + if err != nil { + log.Printf("[WARN] ssh session open error: '%s', attempting reconnect", err) + if err := c.Connect(nil); err != nil { + return nil, err + } + + return c.client.NewSession() + } + + return session, nil +} + +func (c *Communicator) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error { + session, err := c.newSession() + if err != nil { + return err + } + defer session.Close() + + // Get a pipe to stdin so that we can send data down + stdinW, err := session.StdinPipe() + if err != nil { + return err + } + + // We only want to close once, so we nil w after we close it, + // and only close in the defer if it hasn't been closed already. + defer func() { + if stdinW != nil { + stdinW.Close() + } + }() + + // Get a pipe to stdout so that we can get responses back + stdoutPipe, err := session.StdoutPipe() + if err != nil { + return err + } + stdoutR := bufio.NewReader(stdoutPipe) + + // Set stderr to a bytes buffer + stderr := new(bytes.Buffer) + session.Stderr = stderr + + // Start the sink mode on the other side + // TODO(mitchellh): There are probably issues with shell escaping the path + log.Println("[DEBUG] Starting remote scp process: ", scpCommand) + if err := session.Start(scpCommand); err != nil { + return err + } + + // Call our callback that executes in the context of SCP. We ignore + // EOF errors if they occur because it usually means that SCP prematurely + // ended on the other side. + log.Println("[DEBUG] Started SCP session, beginning transfers...") + if err := f(stdinW, stdoutR); err != nil && err != io.EOF { + return err + } + + // Close the stdin, which sends an EOF, and then set w to nil so that + // our defer func doesn't close it again since that is unsafe with + // the Go SSH package. + log.Println("[DEBUG] SCP session complete, closing stdin pipe.") + stdinW.Close() + stdinW = nil + + // Wait for the SCP connection to close, meaning it has consumed all + // our data and has completed. Or has errored. + log.Println("[DEBUG] Waiting for SSH session to complete.") + err = session.Wait() + + // log any stderr before exiting on an error + scpErr := stderr.String() + if len(scpErr) > 0 { + log.Printf("[ERROR] scp stderr: %q", stderr) + } + + if err != nil { + if exitErr, ok := err.(*ssh.ExitError); ok { + // Otherwise, we have an ExitErorr, meaning we can just read + // the exit status + log.Printf("[ERROR] %s", exitErr) + + // If we exited with status 127, it means SCP isn't available. + // Return a more descriptive error for that. + if exitErr.ExitStatus() == 127 { + return errors.New( + "SCP failed to start. This usually means that SCP is not\n" + + "properly installed on the remote system.") + } + } + + return err + } + + return nil +} + +// checkSCPStatus checks that a prior command sent to SCP completed +// successfully. If it did not complete successfully, an error will +// be returned. +func checkSCPStatus(r *bufio.Reader) error { + code, err := r.ReadByte() + if err != nil { + return err + } + + if code != 0 { + // Treat any non-zero (really 1 and 2) as fatal errors + message, _, err := r.ReadLine() + if err != nil { + return fmt.Errorf("Error reading error message: %s", err) + } + + return errors.New(string(message)) + } + + return nil +} + +func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, size int64) error { + if size == 0 { + // Create a temporary file where we can copy the contents of the src + // so that we can determine the length, since SCP is length-prefixed. + tf, err := ioutil.TempFile("", "terraform-upload") + if err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + defer os.Remove(tf.Name()) + defer tf.Close() + + log.Println("[DEBUG] Copying input data into temporary file so we can read the length") + if _, err := io.Copy(tf, src); err != nil { + return err + } + + // Sync the file so that the contents are definitely on disk, then + // read the length of it. + if err := tf.Sync(); err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + // Seek the file to the beginning so we can re-read all of it + if _, err := tf.Seek(0, 0); err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + fi, err := tf.Stat() + if err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + src = tf + size = fi.Size() + } + + // Start the protocol + log.Println("[DEBUG] Beginning file upload...") + fmt.Fprintln(w, "C0644", size, dst) + if err := checkSCPStatus(r); err != nil { + return err + } + + if _, err := io.Copy(w, src); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + if err := checkSCPStatus(r); err != nil { + return err + } + + return nil +} + +func scpUploadDirProtocol(name string, w io.Writer, r *bufio.Reader, f func() error) error { + log.Printf("[DEBUG] SCP: starting directory upload: %s", name) + fmt.Fprintln(w, "D0755 0", name) + err := checkSCPStatus(r) + if err != nil { + return err + } + + if err := f(); err != nil { + return err + } + + fmt.Fprintln(w, "E") + if err != nil { + return err + } + + return nil +} + +func scpUploadDir(root string, fs []os.FileInfo, w io.Writer, r *bufio.Reader) error { + for _, fi := range fs { + realPath := filepath.Join(root, fi.Name()) + + // Track if this is actually a symlink to a directory. If it is + // a symlink to a file we don't do any special behavior because uploading + // a file just works. If it is a directory, we need to know so we + // treat it as such. + isSymlinkToDir := false + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + symPath, err := filepath.EvalSymlinks(realPath) + if err != nil { + return err + } + + symFi, err := os.Lstat(symPath) + if err != nil { + return err + } + + isSymlinkToDir = symFi.IsDir() + } + + if !fi.IsDir() && !isSymlinkToDir { + // It is a regular file (or symlink to a file), just upload it + f, err := os.Open(realPath) + if err != nil { + return err + } + + err = func() error { + defer f.Close() + return scpUploadFile(fi.Name(), f, w, r, fi.Size()) + }() + + if err != nil { + return err + } + + continue + } + + // It is a directory, recursively upload + err := scpUploadDirProtocol(fi.Name(), w, r, func() error { + f, err := os.Open(realPath) + if err != nil { + return err + } + defer f.Close() + + entries, err := f.Readdir(-1) + if err != nil { + return err + } + + return scpUploadDir(realPath, entries, w, r) + }) + if err != nil { + return err + } + } + + return nil +} + +// ConnectFunc is a convenience method for returning a function +// that just uses net.Dial to communicate with the remote end that +// is suitable for use with the SSH communicator configuration. +func ConnectFunc(network, addr string) func() (net.Conn, error) { + return func() (net.Conn, error) { + c, err := net.DialTimeout(network, addr, 15*time.Second) + if err != nil { + return nil, err + } + + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + } + + return c, nil + } +} + +// BastionConnectFunc is a convenience method for returning a function +// that connects to a host over a bastion connection. +func BastionConnectFunc( + bProto string, + bAddr string, + bConf *ssh.ClientConfig, + proto string, + addr string) func() (net.Conn, error) { + return func() (net.Conn, error) { + log.Printf("[DEBUG] Connecting to bastion: %s", bAddr) + bastion, err := ssh.Dial(bProto, bAddr, bConf) + if err != nil { + return nil, fmt.Errorf("Error connecting to bastion: %s", err) + } + + log.Printf("[DEBUG] Connecting via bastion (%s) to host: %s", bAddr, addr) + conn, err := bastion.Dial(proto, addr) + if err != nil { + bastion.Close() + return nil, err + } + + // Wrap it up so we close both things properly + return &bastionConn{ + Conn: conn, + Bastion: bastion, + }, nil + } +} + +type bastionConn struct { + net.Conn + Bastion *ssh.Client +} + +func (c *bastionConn) Close() error { + c.Conn.Close() + return c.Bastion.Close() +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/ssh/password.go b/vendor/github.com/hashicorp/terraform/communicator/ssh/password.go new file mode 100644 index 00000000..8b32c8d4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/ssh/password.go @@ -0,0 +1,28 @@ +package ssh + +import ( + "log" + + "golang.org/x/crypto/ssh" +) + +// An implementation of ssh.KeyboardInteractiveChallenge that simply sends +// back the password for all questions. The questions are logged. +func PasswordKeyboardInteractive(password string) ssh.KeyboardInteractiveChallenge { + return func(user, instruction string, questions []string, echos []bool) ([]string, error) { + log.Printf("Keyboard interactive challenge: ") + log.Printf("-- User: %s", user) + log.Printf("-- Instructions: %s", instruction) + for i, question := range questions { + log.Printf("-- Question %d: %s", i+1, question) + } + + // Just send the password back for all questions + answers := make([]string, len(questions)) + for i := range answers { + answers[i] = string(password) + } + + return answers, nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/ssh/provisioner.go b/vendor/github.com/hashicorp/terraform/communicator/ssh/provisioner.go new file mode 100644 index 00000000..21d73b27 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/ssh/provisioner.go @@ -0,0 +1,480 @@ +package ssh + +import ( + "bytes" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/terraform/communicator/shared" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/mapstructure" + "github.com/xanzy/ssh-agent" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + "golang.org/x/crypto/ssh/knownhosts" +) + +const ( + // DefaultUser is used if there is no user given + DefaultUser = "root" + + // DefaultPort is used if there is no port given + DefaultPort = 22 + + // DefaultScriptPath is used as the path to copy the file to + // for remote execution if not provided otherwise. + DefaultScriptPath = "/tmp/terraform_%RAND%.sh" + + // DefaultTimeout is used if there is no timeout given + DefaultTimeout = 5 * time.Minute +) + +// connectionInfo is decoded from the ConnInfo of the resource. These are the +// only keys we look at. If a PrivateKey is given, that is used instead +// of a password. +type connectionInfo struct { + User string + Password string + PrivateKey string `mapstructure:"private_key"` + Certificate string `mapstructure:"certificate"` + Host string + HostKey string `mapstructure:"host_key"` + Port int + Agent bool + Timeout string + ScriptPath string `mapstructure:"script_path"` + TimeoutVal time.Duration `mapstructure:"-"` + + BastionUser string `mapstructure:"bastion_user"` + BastionPassword string `mapstructure:"bastion_password"` + BastionPrivateKey string `mapstructure:"bastion_private_key"` + BastionHost string `mapstructure:"bastion_host"` + BastionHostKey string `mapstructure:"bastion_host_key"` + BastionPort int `mapstructure:"bastion_port"` + + AgentIdentity string `mapstructure:"agent_identity"` +} + +// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into +// a ConnectionInfo struct +func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) { + connInfo := &connectionInfo{} + decConf := &mapstructure.DecoderConfig{ + WeaklyTypedInput: true, + Result: connInfo, + } + dec, err := mapstructure.NewDecoder(decConf) + if err != nil { + return nil, err + } + if err := dec.Decode(s.Ephemeral.ConnInfo); err != nil { + return nil, err + } + + // To default Agent to true, we need to check the raw string, since the + // decoded boolean can't represent "absence of config". + // + // And if SSH_AUTH_SOCK is not set, there's no agent to connect to, so we + // shouldn't try. + if s.Ephemeral.ConnInfo["agent"] == "" && os.Getenv("SSH_AUTH_SOCK") != "" { + connInfo.Agent = true + } + + if connInfo.User == "" { + connInfo.User = DefaultUser + } + + // Format the host if needed. + // Needed for IPv6 support. + connInfo.Host = shared.IpFormat(connInfo.Host) + + if connInfo.Port == 0 { + connInfo.Port = DefaultPort + } + if connInfo.ScriptPath == "" { + connInfo.ScriptPath = DefaultScriptPath + } + if connInfo.Timeout != "" { + connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) + } else { + connInfo.TimeoutVal = DefaultTimeout + } + + // Default all bastion config attrs to their non-bastion counterparts + if connInfo.BastionHost != "" { + // Format the bastion host if needed. + // Needed for IPv6 support. + connInfo.BastionHost = shared.IpFormat(connInfo.BastionHost) + + if connInfo.BastionUser == "" { + connInfo.BastionUser = connInfo.User + } + if connInfo.BastionPassword == "" { + connInfo.BastionPassword = connInfo.Password + } + if connInfo.BastionPrivateKey == "" { + connInfo.BastionPrivateKey = connInfo.PrivateKey + } + if connInfo.BastionPort == 0 { + connInfo.BastionPort = connInfo.Port + } + } + + return connInfo, nil +} + +// safeDuration returns either the parsed duration or a default value +func safeDuration(dur string, defaultDur time.Duration) time.Duration { + d, err := time.ParseDuration(dur) + if err != nil { + log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur) + return defaultDur + } + return d +} + +// prepareSSHConfig is used to turn the *ConnectionInfo provided into a +// usable *SSHConfig for client initialization. +func prepareSSHConfig(connInfo *connectionInfo) (*sshConfig, error) { + sshAgent, err := connectToAgent(connInfo) + if err != nil { + return nil, err + } + + host := fmt.Sprintf("%s:%d", connInfo.Host, connInfo.Port) + + sshConf, err := buildSSHClientConfig(sshClientConfigOpts{ + user: connInfo.User, + host: host, + privateKey: connInfo.PrivateKey, + password: connInfo.Password, + hostKey: connInfo.HostKey, + certificate: connInfo.Certificate, + sshAgent: sshAgent, + }) + if err != nil { + return nil, err + } + + connectFunc := ConnectFunc("tcp", host) + + var bastionConf *ssh.ClientConfig + if connInfo.BastionHost != "" { + bastionHost := fmt.Sprintf("%s:%d", connInfo.BastionHost, connInfo.BastionPort) + + bastionConf, err = buildSSHClientConfig(sshClientConfigOpts{ + user: connInfo.BastionUser, + host: bastionHost, + privateKey: connInfo.BastionPrivateKey, + password: connInfo.BastionPassword, + hostKey: connInfo.HostKey, + sshAgent: sshAgent, + }) + if err != nil { + return nil, err + } + + connectFunc = BastionConnectFunc("tcp", bastionHost, bastionConf, "tcp", host) + } + + config := &sshConfig{ + config: sshConf, + connection: connectFunc, + sshAgent: sshAgent, + } + return config, nil +} + +type sshClientConfigOpts struct { + privateKey string + password string + sshAgent *sshAgent + certificate string + user string + host string + hostKey string +} + +func buildSSHClientConfig(opts sshClientConfigOpts) (*ssh.ClientConfig, error) { + hkCallback := ssh.InsecureIgnoreHostKey() + + if opts.hostKey != "" { + // The knownhosts package only takes paths to files, but terraform + // generally wants to handle config data in-memory. Rather than making + // the known_hosts file an exception, write out the data to a temporary + // file to create the HostKeyCallback. + tf, err := ioutil.TempFile("", "tf-known_hosts") + if err != nil { + return nil, fmt.Errorf("failed to create temp known_hosts file: %s", err) + } + defer tf.Close() + defer os.RemoveAll(tf.Name()) + + // we mark this as a CA as well, but the host key fallback will still + // use it as a direct match if the remote host doesn't return a + // certificate. + if _, err := tf.WriteString(fmt.Sprintf("@cert-authority %s %s\n", opts.host, opts.hostKey)); err != nil { + return nil, fmt.Errorf("failed to write temp known_hosts file: %s", err) + } + tf.Sync() + + hkCallback, err = knownhosts.New(tf.Name()) + if err != nil { + return nil, err + } + } + + conf := &ssh.ClientConfig{ + HostKeyCallback: hkCallback, + User: opts.user, + } + + if opts.privateKey != "" { + if opts.certificate != "" { + log.Println("using client certificate for authentication") + + certSigner, err := signCertWithPrivateKey(opts.privateKey, opts.certificate) + if err != nil { + return nil, err + } + conf.Auth = append(conf.Auth, certSigner) + } else { + log.Println("using private key for authentication") + + pubKeyAuth, err := readPrivateKey(opts.privateKey) + if err != nil { + return nil, err + } + conf.Auth = append(conf.Auth, pubKeyAuth) + } + } + + if opts.password != "" { + conf.Auth = append(conf.Auth, ssh.Password(opts.password)) + conf.Auth = append(conf.Auth, ssh.KeyboardInteractive( + PasswordKeyboardInteractive(opts.password))) + } + + if opts.sshAgent != nil { + conf.Auth = append(conf.Auth, opts.sshAgent.Auth()) + } + + return conf, nil +} + +// Create a Cert Signer and return ssh.AuthMethod +func signCertWithPrivateKey(pk string, certificate string) (ssh.AuthMethod, error) { + rawPk, err := ssh.ParseRawPrivateKey([]byte(pk)) + if err != nil { + return nil, fmt.Errorf("failed to parse private key %q: %s", pk, err) + } + + pcert, _, _, _, err := ssh.ParseAuthorizedKey([]byte(certificate)) + if err != nil { + return nil, fmt.Errorf("failed to parse certificate %q: %s", certificate, err) + } + + usigner, err := ssh.NewSignerFromKey(rawPk) + if err != nil { + return nil, fmt.Errorf("failed to create signer from raw private key %q: %s", rawPk, err) + } + + ucertSigner, err := ssh.NewCertSigner(pcert.(*ssh.Certificate), usigner) + if err != nil { + return nil, fmt.Errorf("failed to create cert signer %q: %s", usigner, err) + } + + return ssh.PublicKeys(ucertSigner), nil +} + +func readPrivateKey(pk string) (ssh.AuthMethod, error) { + // We parse the private key on our own first so that we can + // show a nicer error if the private key has a password. + block, _ := pem.Decode([]byte(pk)) + if block == nil { + return nil, errors.New("Failed to read ssh private key: no key found") + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return nil, errors.New( + "Failed to read ssh private key: password protected keys are\n" + + "not supported. Please decrypt the key prior to use.") + } + + signer, err := ssh.ParsePrivateKey([]byte(pk)) + if err != nil { + return nil, fmt.Errorf("Failed to parse ssh private key: %s", err) + } + + return ssh.PublicKeys(signer), nil +} + +func connectToAgent(connInfo *connectionInfo) (*sshAgent, error) { + if connInfo.Agent != true { + // No agent configured + return nil, nil + } + + agent, conn, err := sshagent.New() + if err != nil { + return nil, err + } + + // connection close is handled over in Communicator + return &sshAgent{ + agent: agent, + conn: conn, + id: connInfo.AgentIdentity, + }, nil + +} + +// A tiny wrapper around an agent.Agent to expose the ability to close its +// associated connection on request. +type sshAgent struct { + agent agent.Agent + conn net.Conn + id string +} + +func (a *sshAgent) Close() error { + if a.conn == nil { + return nil + } + + return a.conn.Close() +} + +// make an attempt to either read the identity file or find a corresponding +// public key file using the typical openssh naming convention. +// This returns the public key in wire format, or nil when a key is not found. +func findIDPublicKey(id string) []byte { + for _, d := range idKeyData(id) { + signer, err := ssh.ParsePrivateKey(d) + if err == nil { + log.Println("[DEBUG] parsed id private key") + pk := signer.PublicKey() + return pk.Marshal() + } + + // try it as a publicKey + pk, err := ssh.ParsePublicKey(d) + if err == nil { + log.Println("[DEBUG] parsed id public key") + return pk.Marshal() + } + + // finally try it as an authorized key + pk, _, _, _, err = ssh.ParseAuthorizedKey(d) + if err == nil { + log.Println("[DEBUG] parsed id authorized key") + return pk.Marshal() + } + } + + return nil +} + +// Try to read an id file using the id as the file path. Also read the .pub +// file if it exists, as the id file may be encrypted. Return only the file +// data read. We don't need to know what data came from which path, as we will +// try parsing each as a private key, a public key and an authorized key +// regardless. +func idKeyData(id string) [][]byte { + idPath, err := filepath.Abs(id) + if err != nil { + return nil + } + + var fileData [][]byte + + paths := []string{idPath} + + if !strings.HasSuffix(idPath, ".pub") { + paths = append(paths, idPath+".pub") + } + + for _, p := range paths { + d, err := ioutil.ReadFile(p) + if err != nil { + log.Printf("[DEBUG] error reading %q: %s", p, err) + continue + } + log.Printf("[DEBUG] found identity data at %q", p) + fileData = append(fileData, d) + } + + return fileData +} + +// sortSigners moves a signer with an agent comment field matching the +// agent_identity to the head of the list when attempting authentication. This +// helps when there are more keys loaded in an agent than the host will allow +// attempts. +func (s *sshAgent) sortSigners(signers []ssh.Signer) { + if s.id == "" || len(signers) < 2 { + return + } + + // if we can locate the public key, either by extracting it from the id or + // locating the .pub file, then we can more easily determine an exact match + idPk := findIDPublicKey(s.id) + + // if we have a signer with a connect field that matches the id, send that + // first, otherwise put close matches at the front of the list. + head := 0 + for i := range signers { + pk := signers[i].PublicKey() + k, ok := pk.(*agent.Key) + if !ok { + continue + } + + // check for an exact match first + if bytes.Equal(pk.Marshal(), idPk) || s.id == k.Comment { + signers[0], signers[i] = signers[i], signers[0] + break + } + + // no exact match yet, move it to the front if it's close. The agent + // may have loaded as a full filepath, while the config refers to it by + // filename only. + if strings.HasSuffix(k.Comment, s.id) { + signers[head], signers[i] = signers[i], signers[head] + head++ + continue + } + } + + ss := []string{} + for _, signer := range signers { + pk := signer.PublicKey() + k := pk.(*agent.Key) + ss = append(ss, k.Comment) + } +} + +func (s *sshAgent) Signers() ([]ssh.Signer, error) { + signers, err := s.agent.Signers() + if err != nil { + return nil, err + } + + s.sortSigners(signers) + return signers, nil +} + +func (a *sshAgent) Auth() ssh.AuthMethod { + return ssh.PublicKeysCallback(a.Signers) +} + +func (a *sshAgent) ForwardToAgent(client *ssh.Client) error { + return agent.ForwardToAgent(client, a.agent) +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/winrm/communicator.go b/vendor/github.com/hashicorp/terraform/communicator/winrm/communicator.go new file mode 100644 index 00000000..1669e595 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/winrm/communicator.go @@ -0,0 +1,201 @@ +package winrm + +import ( + "fmt" + "io" + "log" + "math/rand" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform/communicator/remote" + "github.com/hashicorp/terraform/terraform" + "github.com/masterzen/winrm" + "github.com/packer-community/winrmcp/winrmcp" +) + +// Communicator represents the WinRM communicator +type Communicator struct { + connInfo *connectionInfo + client *winrm.Client + endpoint *winrm.Endpoint + rand *rand.Rand +} + +// New creates a new communicator implementation over WinRM. +func New(s *terraform.InstanceState) (*Communicator, error) { + connInfo, err := parseConnectionInfo(s) + if err != nil { + return nil, err + } + + endpoint := &winrm.Endpoint{ + Host: connInfo.Host, + Port: connInfo.Port, + HTTPS: connInfo.HTTPS, + Insecure: connInfo.Insecure, + } + if len(connInfo.CACert) > 0 { + endpoint.CACert = []byte(connInfo.CACert) + } + + comm := &Communicator{ + connInfo: connInfo, + endpoint: endpoint, + // Seed our own rand source so that script paths are not deterministic + rand: rand.New(rand.NewSource(time.Now().UnixNano())), + } + + return comm, nil +} + +// Connect implementation of communicator.Communicator interface +func (c *Communicator) Connect(o terraform.UIOutput) error { + if c.client != nil { + return nil + } + + params := winrm.DefaultParameters + params.Timeout = formatDuration(c.Timeout()) + if c.connInfo.NTLM == true { + params.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } + } + + client, err := winrm.NewClientWithParameters( + c.endpoint, c.connInfo.User, c.connInfo.Password, params) + if err != nil { + return err + } + + if o != nil { + o.Output(fmt.Sprintf( + "Connecting to remote host via WinRM...\n"+ + " Host: %s\n"+ + " Port: %d\n"+ + " User: %s\n"+ + " Password: %t\n"+ + " HTTPS: %t\n"+ + " Insecure: %t\n"+ + " NTLM: %t\n"+ + " CACert: %t", + c.connInfo.Host, + c.connInfo.Port, + c.connInfo.User, + c.connInfo.Password != "", + c.connInfo.HTTPS, + c.connInfo.Insecure, + c.connInfo.NTLM, + c.connInfo.CACert != "", + )) + } + + log.Printf("[DEBUG] connecting to remote shell using WinRM") + shell, err := client.CreateShell() + if err != nil { + log.Printf("[ERROR] error creating shell: %s", err) + return err + } + + err = shell.Close() + if err != nil { + log.Printf("[ERROR] error closing shell: %s", err) + return err + } + + if o != nil { + o.Output("Connected!") + } + + c.client = client + + return nil +} + +// Disconnect implementation of communicator.Communicator interface +func (c *Communicator) Disconnect() error { + c.client = nil + return nil +} + +// Timeout implementation of communicator.Communicator interface +func (c *Communicator) Timeout() time.Duration { + return c.connInfo.TimeoutVal +} + +// ScriptPath implementation of communicator.Communicator interface +func (c *Communicator) ScriptPath() string { + return strings.Replace( + c.connInfo.ScriptPath, "%RAND%", + strconv.FormatInt(int64(c.rand.Int31()), 10), -1) +} + +// Start implementation of communicator.Communicator interface +func (c *Communicator) Start(rc *remote.Cmd) error { + rc.Init() + log.Printf("[DEBUG] starting remote command: %s", rc.Command) + + // TODO: make sure communicators always connect first, so we can get output + // from the connection. + if c.client == nil { + log.Println("[WARN] winrm client not connected, attempting to connect") + if err := c.Connect(nil); err != nil { + return err + } + } + + status, err := c.client.Run(rc.Command, rc.Stdout, rc.Stderr) + rc.SetExitStatus(status, err) + + return nil +} + +// Upload implementation of communicator.Communicator interface +func (c *Communicator) Upload(path string, input io.Reader) error { + wcp, err := c.newCopyClient() + if err != nil { + return err + } + log.Printf("[DEBUG] Uploading file to '%s'", path) + return wcp.Write(path, input) +} + +// UploadScript implementation of communicator.Communicator interface +func (c *Communicator) UploadScript(path string, input io.Reader) error { + return c.Upload(path, input) +} + +// UploadDir implementation of communicator.Communicator interface +func (c *Communicator) UploadDir(dst string, src string) error { + log.Printf("[DEBUG] Uploading dir '%s' to '%s'", src, dst) + wcp, err := c.newCopyClient() + if err != nil { + return err + } + return wcp.Copy(src, dst) +} + +func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { + addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) + + config := winrmcp.Config{ + Auth: winrmcp.Auth{ + User: c.connInfo.User, + Password: c.connInfo.Password, + }, + Https: c.connInfo.HTTPS, + Insecure: c.connInfo.Insecure, + OperationTimeout: c.Timeout(), + MaxOperationsPerShell: 15, // lowest common denominator + } + + if c.connInfo.NTLM == true { + config.TransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } + } + + if c.connInfo.CACert != "" { + config.CACertBytes = []byte(c.connInfo.CACert) + } + + return winrmcp.New(addr, &config) +} diff --git a/vendor/github.com/hashicorp/terraform/communicator/winrm/provisioner.go b/vendor/github.com/hashicorp/terraform/communicator/winrm/provisioner.go new file mode 100644 index 00000000..5cef1309 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/communicator/winrm/provisioner.go @@ -0,0 +1,131 @@ +package winrm + +import ( + "fmt" + "log" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/terraform/communicator/shared" + "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/mapstructure" +) + +const ( + // DefaultUser is used if there is no user given + DefaultUser = "Administrator" + + // DefaultPort is used if there is no port given + DefaultPort = 5985 + + // DefaultHTTPSPort is used if there is no port given and HTTPS is true + DefaultHTTPSPort = 5986 + + // DefaultScriptPath is used as the path to copy the file to + // for remote execution if not provided otherwise. + DefaultScriptPath = "C:/Temp/terraform_%RAND%.cmd" + + // DefaultTimeout is used if there is no timeout given + DefaultTimeout = 5 * time.Minute +) + +// connectionInfo is decoded from the ConnInfo of the resource. These are the +// only keys we look at. If a KeyFile is given, that is used instead +// of a password. +type connectionInfo struct { + User string + Password string + Host string + Port int + HTTPS bool + Insecure bool + NTLM bool `mapstructure:"use_ntlm"` + CACert string `mapstructure:"cacert"` + Timeout string + ScriptPath string `mapstructure:"script_path"` + TimeoutVal time.Duration `mapstructure:"-"` +} + +// parseConnectionInfo is used to convert the ConnInfo of the InstanceState into +// a ConnectionInfo struct +func parseConnectionInfo(s *terraform.InstanceState) (*connectionInfo, error) { + connInfo := &connectionInfo{} + decConf := &mapstructure.DecoderConfig{ + WeaklyTypedInput: true, + Result: connInfo, + } + dec, err := mapstructure.NewDecoder(decConf) + if err != nil { + return nil, err + } + if err := dec.Decode(s.Ephemeral.ConnInfo); err != nil { + return nil, err + } + + // Check on script paths which point to the default Windows TEMP folder because files + // which are put in there very early in the boot process could get cleaned/deleted + // before you had the change to execute them. + // + // TODO (SvH) Needs some more debugging to fully understand the exact sequence of events + // causing this... + if strings.HasPrefix(filepath.ToSlash(connInfo.ScriptPath), "C:/Windows/Temp") { + return nil, fmt.Errorf( + `Using the C:\Windows\Temp folder is not supported. Please use a different 'script_path'.`) + } + + if connInfo.User == "" { + connInfo.User = DefaultUser + } + + // Format the host if needed. + // Needed for IPv6 support. + connInfo.Host = shared.IpFormat(connInfo.Host) + + if connInfo.Port == 0 { + if connInfo.HTTPS { + connInfo.Port = DefaultHTTPSPort + } else { + connInfo.Port = DefaultPort + } + } + if connInfo.ScriptPath == "" { + connInfo.ScriptPath = DefaultScriptPath + } + if connInfo.Timeout != "" { + connInfo.TimeoutVal = safeDuration(connInfo.Timeout, DefaultTimeout) + } else { + connInfo.TimeoutVal = DefaultTimeout + } + + return connInfo, nil +} + +// safeDuration returns either the parsed duration or a default value +func safeDuration(dur string, defaultDur time.Duration) time.Duration { + d, err := time.ParseDuration(dur) + if err != nil { + log.Printf("Invalid duration '%s', using default of %s", dur, defaultDur) + return defaultDur + } + return d +} + +func formatDuration(duration time.Duration) string { + h := int(duration.Hours()) + m := int(duration.Minutes()) - h*60 + s := int(duration.Seconds()) - (h*3600 + m*60) + + res := "PT" + if h > 0 { + res = fmt.Sprintf("%s%dH", res, h) + } + if m > 0 { + res = fmt.Sprintf("%s%dM", res, m) + } + if s > 0 { + res = fmt.Sprintf("%s%dS", res, s) + } + + return res +} diff --git a/vendor/github.com/hashicorp/terraform/config.go b/vendor/github.com/hashicorp/terraform/config.go index d0df909a..8c18ef95 100644 --- a/vendor/github.com/hashicorp/terraform/config.go +++ b/vendor/github.com/hashicorp/terraform/config.go @@ -6,19 +6,17 @@ import ( "io/ioutil" "log" "os" - "os/exec" "path/filepath" - "strings" - "github.com/hashicorp/go-plugin" "github.com/hashicorp/hcl" + "github.com/hashicorp/terraform/command" - tfplugin "github.com/hashicorp/terraform/plugin" - "github.com/hashicorp/terraform/terraform" - "github.com/kardianos/osext" - "github.com/mitchellh/cli" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/tfdiags" ) +const pluginCacheDirEnvVar = "TF_PLUGIN_CACHE_DIR" + // Config is the structure of the configuration for the Terraform CLI. // // This is not the configuration for Terraform itself. That is in the @@ -29,14 +27,37 @@ type Config struct { DisableCheckpoint bool `hcl:"disable_checkpoint"` DisableCheckpointSignature bool `hcl:"disable_checkpoint_signature"` + + // If set, enables local caching of plugins in this directory to + // avoid repeatedly re-downloading over the Internet. + PluginCacheDir string `hcl:"plugin_cache_dir"` + + Hosts map[string]*ConfigHost `hcl:"host"` + + Credentials map[string]map[string]interface{} `hcl:"credentials"` + CredentialsHelpers map[string]*ConfigCredentialsHelper `hcl:"credentials_helper"` +} + +// ConfigHost is the structure of the "host" nested block within the CLI +// configuration, which can be used to override the default service host +// discovery behavior for a particular hostname. +type ConfigHost struct { + Services map[string]interface{} `hcl:"services"` +} + +// ConfigCredentialsHelper is the structure of the "credentials_helper" +// nested block within the CLI configuration. +type ConfigCredentialsHelper struct { + Args []string `hcl:"args"` } // BuiltinConfig is the built-in defaults for the configuration. These // can be overridden by user configurations. var BuiltinConfig Config -// ContextOpts are the global ContextOpts we use to initialize the CLI. -var ContextOpts terraform.ContextOpts +// PluginOverrides are paths that override discovered plugins, set from +// the config file. +var PluginOverrides command.PluginOverrides // ConfigFile returns the default path to the configuration file. // @@ -52,26 +73,65 @@ func ConfigDir() (string, error) { return configDir() } -// LoadConfig loads the CLI configuration from ".terraformrc" files. -func LoadConfig(path string) (*Config, error) { +// LoadConfig reads the CLI configuration from the various filesystem locations +// and from the environment, returning a merged configuration along with any +// diagnostics (errors and warnings) encountered along the way. +func LoadConfig() (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + configVal := BuiltinConfig // copy + config := &configVal + + if mainFilename, err := cliConfigFile(); err == nil { + if _, err := os.Stat(mainFilename); err == nil { + mainConfig, mainDiags := loadConfigFile(mainFilename) + diags = diags.Append(mainDiags) + config = config.Merge(mainConfig) + } + } + + if configDir, err := ConfigDir(); err == nil { + if info, err := os.Stat(configDir); err == nil && info.IsDir() { + dirConfig, dirDiags := loadConfigDir(configDir) + diags = diags.Append(dirDiags) + config = config.Merge(dirConfig) + } + } + + if envConfig := EnvConfig(); envConfig != nil { + // envConfig takes precedence + config = envConfig.Merge(config) + } + + diags = diags.Append(config.Validate()) + + return config, diags +} + +// loadConfigFile loads the CLI configuration from ".terraformrc" files. +func loadConfigFile(path string) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + result := &Config{} + + log.Printf("Loading CLI configuration from %s", path) + // Read the HCL file and prepare for parsing d, err := ioutil.ReadFile(path) if err != nil { - return nil, fmt.Errorf( - "Error reading %s: %s", path, err) + diags = diags.Append(fmt.Errorf("Error reading %s: %s", path, err)) + return result, diags } // Parse it obj, err := hcl.Parse(string(d)) if err != nil { - return nil, fmt.Errorf( - "Error parsing %s: %s", path, err) + diags = diags.Append(fmt.Errorf("Error parsing %s: %s", path, err)) + return result, diags } // Build up the result - var result Config if err := hcl.DecodeObject(&result, obj); err != nil { - return nil, err + diags = diags.Append(fmt.Errorf("Error parsing %s: %s", path, err)) + return result, diags } // Replace all env vars @@ -82,89 +142,105 @@ func LoadConfig(path string) (*Config, error) { result.Provisioners[k] = os.ExpandEnv(v) } - return &result, nil + if result.PluginCacheDir != "" { + result.PluginCacheDir = os.ExpandEnv(result.PluginCacheDir) + } + + return result, diags } -// Discover plugins located on disk, and fall back on plugins baked into the -// Terraform binary. -// -// We look in the following places for plugins: -// -// 1. Terraform configuration path -// 2. Path where Terraform is installed -// 3. Path where Terraform is invoked -// -// Whichever file is discoverd LAST wins. -// -// Finally, we look at the list of plugins compiled into Terraform. If any of -// them has not been found on disk we use the internal version. This allows -// users to add / replace plugins without recompiling the main binary. -func (c *Config) Discover(ui cli.Ui) error { - // Look in ~/.terraform.d/plugins/ - dir, err := ConfigDir() +func loadConfigDir(path string) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + result := &Config{} + + entries, err := ioutil.ReadDir(path) if err != nil { - log.Printf("[ERR] Error loading config directory: %s", err) - } else { - if err := c.discover(filepath.Join(dir, "plugins")); err != nil { - return err - } + diags = diags.Append(fmt.Errorf("Error reading %s: %s", path, err)) + return result, diags } - // Next, look in the same directory as the Terraform executable, usually - // /usr/local/bin. If found, this replaces what we found in the config path. - exePath, err := osext.Executable() - if err != nil { - log.Printf("[ERR] Error loading exe directory: %s", err) - } else { - if err := c.discover(filepath.Dir(exePath)); err != nil { - return err + for _, entry := range entries { + name := entry.Name() + // Ignoring errors here because it is used only to indicate pattern + // syntax errors, and our patterns are hard-coded here. + hclMatched, _ := filepath.Match("*.tfrc", name) + jsonMatched, _ := filepath.Match("*.tfrc.json", name) + if !(hclMatched || jsonMatched) { + continue } + + filePath := filepath.Join(path, name) + fileConfig, fileDiags := loadConfigFile(filePath) + diags = diags.Append(fileDiags) + result = result.Merge(fileConfig) } - // Finally look in the cwd (where we are invoke Terraform). If found, this - // replaces anything we found in the config / install paths. - if err := c.discover("."); err != nil { - return err + return result, diags +} + +// EnvConfig returns a Config populated from environment variables. +// +// Any values specified in this config should override those set in the +// configuration file. +func EnvConfig() *Config { + config := &Config{} + + if envPluginCacheDir := os.Getenv(pluginCacheDirEnvVar); envPluginCacheDir != "" { + // No Expandenv here, because expanding environment variables inside + // an environment variable would be strange and seems unnecessary. + // (User can expand variables into the value while setting it using + // standard shell features.) + config.PluginCacheDir = envPluginCacheDir } - // Finally, if we have a plugin compiled into Terraform and we didn't find - // a replacement on disk, we'll just use the internal version. Only do this - // from the main process, or the log output will break the plugin handshake. - if os.Getenv("TF_PLUGIN_MAGIC_COOKIE") == "" { - for name, _ := range command.InternalProviders { - if path, found := c.Providers[name]; found { - // Allow these warnings to be suppressed via TF_PLUGIN_DEV=1 or similar - if os.Getenv("TF_PLUGIN_DEV") == "" { - ui.Warn(fmt.Sprintf("[WARN] %s overrides an internal plugin for %s-provider.\n"+ - " If you did not expect to see this message you will need to remove the old plugin.\n"+ - " See https://www.terraform.io/docs/internals/internal-plugins.html", path, name)) - } - } else { - cmd, err := command.BuildPluginCommandString("provider", name) - if err != nil { - return err - } - c.Providers[name] = cmd - } + return config +} + +// Validate checks for errors in the configuration that cannot be detected +// just by HCL decoding, returning any problems as diagnostics. +// +// On success, the returned diagnostics will return false from the HasErrors +// method. A non-nil diagnostics is not necessarily an error, since it may +// contain just warnings. +func (c *Config) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + if c == nil { + return diags + } + + // FIXME: Right now our config parsing doesn't retain enough information + // to give proper source references to any errors. We should improve + // on this when we change the CLI config parser to use HCL2. + + // Check that all "host" blocks have valid hostnames. + for givenHost := range c.Hosts { + _, err := svchost.ForComparison(givenHost) + if err != nil { + diags = diags.Append( + fmt.Errorf("The host %q block has an invalid hostname: %s", givenHost, err), + ) } - for name, _ := range command.InternalProvisioners { - if path, found := c.Provisioners[name]; found { - if os.Getenv("TF_PLUGIN_DEV") == "" { - ui.Warn(fmt.Sprintf("[WARN] %s overrides an internal plugin for %s-provisioner.\n"+ - " If you did not expect to see this message you will need to remove the old plugin.\n"+ - " See https://www.terraform.io/docs/internals/internal-plugins.html", path, name)) - } - } else { - cmd, err := command.BuildPluginCommandString("provisioner", name) - if err != nil { - return err - } - c.Provisioners[name] = cmd - } + } + + // Check that all "credentials" blocks have valid hostnames. + for givenHost := range c.Credentials { + _, err := svchost.ForComparison(givenHost) + if err != nil { + diags = diags.Append( + fmt.Errorf("The credentials %q block has an invalid hostname: %s", givenHost, err), + ) } } - return nil + // Should have zero or one "credentials_helper" blocks + if len(c.CredentialsHelpers) > 1 { + diags = diags.Append( + fmt.Errorf("No more than one credentials_helper block may be specified"), + ) + } + + return diags } // Merge merges two configurations and returns a third entirely @@ -194,175 +270,43 @@ func (c1 *Config) Merge(c2 *Config) *Config { result.DisableCheckpoint = c1.DisableCheckpoint || c2.DisableCheckpoint result.DisableCheckpointSignature = c1.DisableCheckpointSignature || c2.DisableCheckpointSignature - return &result -} - -func (c *Config) discover(path string) error { - var err error - - if !filepath.IsAbs(path) { - path, err = filepath.Abs(path) - if err != nil { - return err - } - } - - err = c.discoverSingle( - filepath.Join(path, "terraform-provider-*"), &c.Providers) - if err != nil { - return err - } - - err = c.discoverSingle( - filepath.Join(path, "terraform-provisioner-*"), &c.Provisioners) - if err != nil { - return err - } - - return nil -} - -func (c *Config) discoverSingle(glob string, m *map[string]string) error { - matches, err := filepath.Glob(glob) - if err != nil { - return err - } - - if *m == nil { - *m = make(map[string]string) + result.PluginCacheDir = c1.PluginCacheDir + if result.PluginCacheDir == "" { + result.PluginCacheDir = c2.PluginCacheDir } - for _, match := range matches { - file := filepath.Base(match) - - // If the filename has a ".", trim up to there - if idx := strings.Index(file, "."); idx >= 0 { - file = file[:idx] + if (len(c1.Hosts) + len(c2.Hosts)) > 0 { + result.Hosts = make(map[string]*ConfigHost) + for name, host := range c1.Hosts { + result.Hosts[name] = host } - - // Look for foo-bar-baz. The plugin name is "baz" - parts := strings.SplitN(file, "-", 3) - if len(parts) != 3 { - continue + for name, host := range c2.Hosts { + result.Hosts[name] = host } - - log.Printf("[DEBUG] Discovered plugin: %s = %s", parts[2], match) - (*m)[parts[2]] = match - } - - return nil -} - -// ProviderFactories returns the mapping of prefixes to -// ResourceProviderFactory that can be used to instantiate a -// binary-based plugin. -func (c *Config) ProviderFactories() map[string]terraform.ResourceProviderFactory { - result := make(map[string]terraform.ResourceProviderFactory) - for k, v := range c.Providers { - result[k] = c.providerFactory(v) } - return result -} - -func (c *Config) providerFactory(path string) terraform.ResourceProviderFactory { - // Build the plugin client configuration and init the plugin - var config plugin.ClientConfig - config.Cmd = pluginCmd(path) - config.HandshakeConfig = tfplugin.Handshake - config.Managed = true - config.Plugins = tfplugin.PluginMap - client := plugin.NewClient(&config) - - return func() (terraform.ResourceProvider, error) { - // Request the RPC client so we can get the provider - // so we can build the actual RPC-implemented provider. - rpcClient, err := client.Client() - if err != nil { - return nil, err + if (len(c1.Credentials) + len(c2.Credentials)) > 0 { + result.Credentials = make(map[string]map[string]interface{}) + for host, creds := range c1.Credentials { + result.Credentials[host] = creds } - - raw, err := rpcClient.Dispense(tfplugin.ProviderPluginName) - if err != nil { - return nil, err + for host, creds := range c2.Credentials { + // We just clobber an entry from the other file right now. Will + // improve on this later using the more-robust merging behavior + // built in to HCL2. + result.Credentials[host] = creds } - - return raw.(terraform.ResourceProvider), nil } -} -// ProvisionerFactories returns the mapping of prefixes to -// ResourceProvisionerFactory that can be used to instantiate a -// binary-based plugin. -func (c *Config) ProvisionerFactories() map[string]terraform.ResourceProvisionerFactory { - result := make(map[string]terraform.ResourceProvisionerFactory) - for k, v := range c.Provisioners { - result[k] = c.provisionerFactory(v) - } - - return result -} - -func (c *Config) provisionerFactory(path string) terraform.ResourceProvisionerFactory { - // Build the plugin client configuration and init the plugin - var config plugin.ClientConfig - config.HandshakeConfig = tfplugin.Handshake - config.Cmd = pluginCmd(path) - config.Managed = true - config.Plugins = tfplugin.PluginMap - client := plugin.NewClient(&config) - - return func() (terraform.ResourceProvisioner, error) { - rpcClient, err := client.Client() - if err != nil { - return nil, err + if (len(c1.CredentialsHelpers) + len(c2.CredentialsHelpers)) > 0 { + result.CredentialsHelpers = make(map[string]*ConfigCredentialsHelper) + for name, helper := range c1.CredentialsHelpers { + result.CredentialsHelpers[name] = helper } - - raw, err := rpcClient.Dispense(tfplugin.ProvisionerPluginName) - if err != nil { - return nil, err + for name, helper := range c2.CredentialsHelpers { + result.CredentialsHelpers[name] = helper } - - return raw.(terraform.ResourceProvisioner), nil - } -} - -func pluginCmd(path string) *exec.Cmd { - cmdPath := "" - - // If the path doesn't contain a separator, look in the same - // directory as the Terraform executable first. - if !strings.ContainsRune(path, os.PathSeparator) { - exePath, err := osext.Executable() - if err == nil { - temp := filepath.Join( - filepath.Dir(exePath), - filepath.Base(path)) - - if _, err := os.Stat(temp); err == nil { - cmdPath = temp - } - } - - // If we still haven't found the executable, look for it - // in the PATH. - if v, err := exec.LookPath(path); err == nil { - cmdPath = v - } - } - - // No plugin binary found, so try to use an internal plugin. - if strings.Contains(path, command.TFSPACE) { - parts := strings.Split(path, command.TFSPACE) - return exec.Command(parts[0], parts[1:]...) } - // If we still don't have a path, then just set it to the original - // given path. - if cmdPath == "" { - cmdPath = path - } - - // Build the command to execute the plugin - return exec.Command(cmdPath) + return &result } diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go index 5f4e89ee..9d80c42b 100644 --- a/vendor/github.com/hashicorp/terraform/config/append.go +++ b/vendor/github.com/hashicorp/terraform/config/append.go @@ -82,5 +82,11 @@ func Append(c1, c2 *Config) (*Config, error) { c.Variables = append(c.Variables, c2.Variables...) } + if len(c1.Locals) > 0 || len(c2.Locals) > 0 { + c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) + c.Locals = append(c.Locals, c1.Locals...) + c.Locals = append(c.Locals, c2.Locals...) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go index f944cadd..1772fd7e 100644 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ b/vendor/github.com/hashicorp/terraform/config/config.go @@ -8,10 +8,11 @@ import ( "strconv" "strings" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/hil" + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/helper/hilmapstructure" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/tfdiags" "github.com/mitchellh/reflectwalk" ) @@ -33,6 +34,7 @@ type Config struct { ProviderConfigs []*ProviderConfig Resources []*Resource Variables []*Variable + Locals []*Local Outputs []*Output // The fields below can be filled in by loaders for validation @@ -54,6 +56,8 @@ type AtlasConfig struct { type Module struct { Name string Source string + Version string + Providers map[string]string RawConfig *RawConfig } @@ -64,6 +68,7 @@ type Module struct { type ProviderConfig struct { Name string Alias string + Version string RawConfig *RawConfig } @@ -145,7 +150,7 @@ func (p *Provisioner) Copy() *Provisioner { } } -// Variable is a variable defined within the configuration. +// Variable is a module argument defined within the configuration. type Variable struct { Name string DeclaredType string `mapstructure:"type"` @@ -153,6 +158,12 @@ type Variable struct { Description string } +// Local is a local value defined within the configuration. +type Local struct { + Name string + RawConfig *RawConfig +} + // Output is an output defined within the configuration. An output is // resulting data that is highlighted by Terraform when finished. An // output marked Sensitive will be output in a masked form following @@ -220,7 +231,10 @@ func (r *Resource) Count() (int, error) { v, err := strconv.ParseInt(count, 0, 0) if err != nil { - return 0, err + return 0, fmt.Errorf( + "cannot parse %q as an integer", + count, + ) } return int(v), nil @@ -238,31 +252,65 @@ func (r *Resource) Id() string { } } +// ProviderFullName returns the full name of the provider for this resource, +// which may either be specified explicitly using the "provider" meta-argument +// or implied by the prefix on the resource type name. +func (r *Resource) ProviderFullName() string { + return ResourceProviderFullName(r.Type, r.Provider) +} + +// ResourceProviderFullName returns the full (dependable) name of the +// provider for a hypothetical resource with the given resource type and +// explicit provider string. If the explicit provider string is empty then +// the provider name is inferred from the resource type name. +func ResourceProviderFullName(resourceType, explicitProvider string) string { + if explicitProvider != "" { + // check for an explicit provider name, or return the original + parts := strings.SplitAfter(explicitProvider, "provider.") + return parts[len(parts)-1] + } + + idx := strings.IndexRune(resourceType, '_') + if idx == -1 { + // If no underscores, the resource name is assumed to be + // also the provider name, e.g. if the provider exposes + // only a single resource of each type. + return resourceType + } + + return resourceType[:idx] +} + // Validate does some basic semantic checking of the configuration. -func (c *Config) Validate() error { +func (c *Config) Validate() tfdiags.Diagnostics { if c == nil { return nil } - var errs []error + var diags tfdiags.Diagnostics for _, k := range c.unknownKeys { - errs = append(errs, fmt.Errorf( - "Unknown root level key: %s", k)) + diags = diags.Append( + fmt.Errorf("Unknown root level key: %s", k), + ) } // Validate the Terraform config if tf := c.Terraform; tf != nil { - errs = append(errs, c.Terraform.Validate()...) + errs := c.Terraform.Validate() + for _, err := range errs { + diags = diags.Append(err) + } } vars := c.InterpolatedVariables() varMap := make(map[string]*Variable) for _, v := range c.Variables { if _, ok := varMap[v.Name]; ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Variable '%s': duplicate found. Variable names must be unique.", - v.Name)) + v.Name, + )) } varMap[v.Name] = v @@ -270,17 +318,19 @@ func (c *Config) Validate() error { for k, _ := range varMap { if !NameRegexp.MatchString(k) { - errs = append(errs, fmt.Errorf( - "variable %q: variable name must match regular expresion %s", - k, NameRegexp)) + diags = diags.Append(fmt.Errorf( + "variable %q: variable name must match regular expression %s", + k, NameRegexp, + )) } } for _, v := range c.Variables { if v.Type() == VariableTypeUnknown { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Variable '%s': must be a string or a map", - v.Name)) + v.Name, + )) continue } @@ -301,9 +351,10 @@ func (c *Config) Validate() error { if v.Default != nil { if err := reflectwalk.Walk(v.Default, w); err == nil { if interp { - errs = append(errs, fmt.Errorf( - "Variable '%s': cannot contain interpolations", - v.Name)) + diags = diags.Append(fmt.Errorf( + "variable %q: default may not contain interpolations", + v.Name, + )) } } } @@ -319,10 +370,11 @@ func (c *Config) Validate() error { } if _, ok := varMap[uv.Name]; !ok { - errs = append(errs, fmt.Errorf( - "%s: unknown variable referenced: '%s'. define it with 'variable' blocks", + diags = diags.Append(fmt.Errorf( + "%s: unknown variable referenced: '%s'; define it with a 'variable' block", source, - uv.Name)) + uv.Name, + )) } } } @@ -333,34 +385,55 @@ func (c *Config) Validate() error { switch v := rawV.(type) { case *CountVariable: if v.Type == CountValueInvalid { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: invalid count variable: %s", source, - v.FullKey())) + v.FullKey(), + )) } case *PathVariable: if v.Type == PathValueInvalid { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: invalid path variable: %s", source, - v.FullKey())) + v.FullKey(), + )) } } } } - // Check that providers aren't declared multiple times. - providerSet := make(map[string]struct{}) + // Check that providers aren't declared multiple times and that their + // version constraints, where present, are syntactically valid. + providerSet := make(map[string]bool) for _, p := range c.ProviderConfigs { name := p.FullName() if _, ok := providerSet[name]; ok { - errs = append(errs, fmt.Errorf( - "provider.%s: declared multiple times, you can only declare a provider once", - name)) + diags = diags.Append(fmt.Errorf( + "provider.%s: multiple configurations present; only one configuration is allowed per provider", + name, + )) continue } - providerSet[name] = struct{}{} + if p.Version != "" { + _, err := discovery.ConstraintStr(p.Version).Parse() + if err != nil { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf( + "The value %q given for provider.%s is not a valid version constraint.", + p.Version, name, + ), + // TODO: include a "Subject" source reference in here, + // once the config loader is able to retain source + // location information. + }) + } + } + + providerSet[name] = true } // Check that all references to modules are valid @@ -372,9 +445,10 @@ func (c *Config) Validate() error { if _, ok := dupped[m.Id()]; !ok { dupped[m.Id()] = struct{}{} - errs = append(errs, fmt.Errorf( - "%s: module repeated multiple times", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module repeated multiple times", + m.Id(), + )) } // Already seen this module, just skip it @@ -388,21 +462,23 @@ func (c *Config) Validate() error { "root": m.Source, }) if err != nil { - errs = append(errs, fmt.Errorf( - "%s: module source error: %s", - m.Id(), err)) + diags = diags.Append(fmt.Errorf( + "module %q: module source error: %s", + m.Id(), err, + )) } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( - "%s: module source cannot contain interpolations", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module source cannot contain interpolations", + m.Id(), + )) } // Check that the name matches our regexp if !NameRegexp.Match([]byte(m.Name)) { - errs = append(errs, fmt.Errorf( - "%s: module name can only contain letters, numbers, "+ - "dashes, and underscores", - m.Id())) + diags = diags.Append(fmt.Errorf( + "module %q: module name must be a letter or underscore followed by only letters, numbers, dashes, and underscores", + m.Id(), + )) } // Check that the configuration can all be strings, lists or maps @@ -426,30 +502,47 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: variable %s must be a string, list or map value", - m.Id(), k)) + diags = diags.Append(fmt.Errorf( + "module %q: argument %s must have a string, list, or map value", + m.Id(), k, + )) } // Check for invalid count variables for _, v := range m.RawConfig.Variables { switch v.(type) { case *CountVariable: - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", m.Name)) + diags = diags.Append(fmt.Errorf( + "module %q: count variables are only valid within resources", + m.Name, + )) case *SelfVariable: - errs = append(errs, fmt.Errorf( - "%s: self variables are only valid within resources", m.Name)) + diags = diags.Append(fmt.Errorf( + "module %q: self variables are only valid within resources", + m.Name, + )) } } // Update the raw configuration to only contain the string values m.RawConfig, err = NewRawConfig(raw) if err != nil { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: can't initialize configuration: %s", - m.Id(), err)) + m.Id(), err, + )) + } + + // check that all named providers actually exist + for _, p := range m.Providers { + if !providerSet[p] { + diags = diags.Append(fmt.Errorf( + "module %q: cannot pass non-existent provider %q", + m.Name, p, + )) + } } + } dupped = nil @@ -463,10 +556,10 @@ func (c *Config) Validate() error { } if _, ok := modules[mv.Name]; !ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: unknown module referenced: %s", - source, - mv.Name)) + source, mv.Name, + )) } } } @@ -479,9 +572,10 @@ func (c *Config) Validate() error { if _, ok := dupped[r.Id()]; !ok { dupped[r.Id()] = struct{}{} - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource repeated multiple times", - r.Id())) + r.Id(), + )) } } @@ -495,57 +589,46 @@ func (c *Config) Validate() error { for _, v := range r.RawCount.Variables { switch v.(type) { case *CountVariable: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource count can't reference count variable: %s", - n, - v.FullKey())) + n, v.FullKey(), + )) case *SimpleVariable: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: resource count can't reference variable: %s", - n, - v.FullKey())) + n, v.FullKey(), + )) // Good case *ModuleVariable: case *ResourceVariable: case *TerraformVariable: case *UserVariable: + case *LocalVariable: default: - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "Internal error. Unknown type in count var in %s: %T", - n, v)) + n, v, + )) } } - // Interpolate with a fixed number to verify that its a number. - r.RawCount.interpolate(func(root ast.Node) (interface{}, error) { - // Execute the node but transform the AST so that it returns - // a fixed value of "5" for all interpolations. - result, err := hil.Eval( - hil.FixedValueTransform( - root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), - nil) - if err != nil { - return "", err - } - - return result.Value, nil - }) - _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0) - if err != nil { - errs = append(errs, fmt.Errorf( - "%s: resource count must be an integer", - n)) + if !r.RawCount.couldBeInteger() { + diags = diags.Append(fmt.Errorf( + "%s: resource count must be an integer", n, + )) } r.RawCount.init() // Validate DependsOn - errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...) + for _, err := range c.validateDependsOn(n, r.DependsOn, resources, modules) { + diags = diags.Append(err) + } // Verify provisioners for _, p := range r.Provisioners { - // This validation checks that there are now splat variables + // This validation checks that there are no splat variables // referencing ourself. This currently is not allowed. for _, v := range p.ConnInfo.Variables { @@ -555,9 +638,10 @@ func (c *Config) Validate() error { } if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) + diags = diags.Append(fmt.Errorf( + "%s: connection info cannot contain splat variable referencing itself", + n, + )) break } } @@ -569,9 +653,10 @@ func (c *Config) Validate() error { } if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name { - errs = append(errs, fmt.Errorf( - "%s: connection info cannot contain splat variable "+ - "referencing itself", n)) + diags = diags.Append(fmt.Errorf( + "%s: connection info cannot contain splat variable referencing itself", + n, + )) break } } @@ -579,21 +664,24 @@ func (c *Config) Validate() error { // Check for invalid when/onFailure values, though this should be // picked up by the loader we check here just in case. if p.When == ProvisionerWhenInvalid { - errs = append(errs, fmt.Errorf( - "%s: provisioner 'when' value is invalid", n)) + diags = diags.Append(fmt.Errorf( + "%s: provisioner 'when' value is invalid", n, + )) } if p.OnFailure == ProvisionerOnFailureInvalid { - errs = append(errs, fmt.Errorf( - "%s: provisioner 'on_failure' value is invalid", n)) + diags = diags.Append(fmt.Errorf( + "%s: provisioner 'on_failure' value is invalid", n, + )) } } // Verify ignore_changes contains valid entries for _, v := range r.Lifecycle.IgnoreChanges { if strings.Contains(v, "*") && v != "*" { - errs = append(errs, fmt.Errorf( - "%s: ignore_changes does not support using a partial string "+ - "together with a wildcard: %s", n, v)) + diags = diags.Append(fmt.Errorf( + "%s: ignore_changes does not support using a partial string together with a wildcard: %s", + n, v, + )) } } @@ -602,21 +690,24 @@ func (c *Config) Validate() error { "root": r.Lifecycle.IgnoreChanges, }) if err != nil { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: lifecycle ignore_changes error: %s", - n, err)) + n, err, + )) } else if len(rc.Interpolations) > 0 { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: lifecycle ignore_changes cannot contain interpolations", - n)) + n, + )) } // If it is a data source then it can't have provisioners if r.Mode == DataResourceMode { if _, ok := r.RawConfig.Raw["provisioner"]; ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: data sources cannot have provisioners", - n)) + n, + )) } } } @@ -630,25 +721,50 @@ func (c *Config) Validate() error { id := rv.ResourceId() if _, ok := resources[id]; !ok { - errs = append(errs, fmt.Errorf( + diags = diags.Append(fmt.Errorf( "%s: unknown resource '%s' referenced in variable %s", source, id, - rv.FullKey())) + rv.FullKey(), + )) continue } } } + // Check that all locals are valid + { + found := make(map[string]struct{}) + for _, l := range c.Locals { + if _, ok := found[l.Name]; ok { + diags = diags.Append(fmt.Errorf( + "%s: duplicate local. local value names must be unique", + l.Name, + )) + continue + } + found[l.Name] = struct{}{} + + for _, v := range l.RawConfig.Variables { + if _, ok := v.(*CountVariable); ok { + diags = diags.Append(fmt.Errorf( + "local %s: count variables are only valid within resources", l.Name, + )) + } + } + } + } + // Check that all outputs are valid { found := make(map[string]struct{}) for _, o := range c.Outputs { // Verify the output is new if _, ok := found[o.Name]; ok { - errs = append(errs, fmt.Errorf( - "%s: duplicate output. output names must be unique.", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: an output of this name was already defined", + o.Name, + )) continue } found[o.Name] = struct{}{} @@ -668,9 +784,10 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: value for 'sensitive' must be boolean", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: value for 'sensitive' must be boolean", + o.Name, + )) continue } if k == "description" { @@ -679,40 +796,80 @@ func (c *Config) Validate() error { continue } - errs = append(errs, fmt.Errorf( - "%s: value for 'description' must be string", - o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: value for 'description' must be string", + o.Name, + )) continue } invalidKeys = append(invalidKeys, k) } if len(invalidKeys) > 0 { - errs = append(errs, fmt.Errorf( - "%s: output has invalid keys: %s", - o.Name, strings.Join(invalidKeys, ", "))) + diags = diags.Append(fmt.Errorf( + "output %q: invalid keys: %s", + o.Name, strings.Join(invalidKeys, ", "), + )) } if !valueKeyFound { - errs = append(errs, fmt.Errorf( - "%s: output is missing required 'value' key", o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: missing required 'value' argument", o.Name, + )) } for _, v := range o.RawConfig.Variables { if _, ok := v.(*CountVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: count variables are only valid within resources", o.Name)) + diags = diags.Append(fmt.Errorf( + "output %q: count variables are only valid within resources", + o.Name, + )) } } - } - } - // Check that all variables are in the proper context - for source, rc := range c.rawConfigs() { - walker := &interpolationWalker{ - ContextF: c.validateVarContextFn(source, &errs), - } - if err := reflectwalk.Walk(rc.Raw, walker); err != nil { - errs = append(errs, fmt.Errorf( - "%s: error reading config: %s", source, err)) + // Detect a common mistake of using a "count"ed resource in + // an output value without using the splat or index form. + // Prior to 0.11 this error was silently ignored, but outputs + // now have their errors checked like all other contexts. + // + // TODO: Remove this in 0.12. + for _, v := range o.RawConfig.Variables { + rv, ok := v.(*ResourceVariable) + if !ok { + continue + } + + // If the variable seems to be treating the referenced + // resource as a singleton (no count specified) then + // we'll check to make sure it is indeed a singleton. + // It's a warning if not. + + if rv.Multi || rv.Index != 0 { + // This reference is treating the resource as a + // multi-resource, so the warning doesn't apply. + continue + } + + for _, r := range c.Resources { + if r.Id() != rv.ResourceId() { + continue + } + + // We test specifically for the raw string "1" here + // because we _do_ want to generate this warning if + // the user has provided an expression that happens + // to return 1 right now, to catch situations where + // a count might dynamically be set to something + // other than 1 and thus splat syntax is still needed + // to be safe. + if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" { + diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf( + "output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances", + o.Name, + r.Id(), rv.Field, + r.Id(), rv.Field, + ))) + } + } + } } } @@ -726,17 +883,15 @@ func (c *Config) Validate() error { for _, v := range rc.Variables { if _, ok := v.(*SelfVariable); ok { - errs = append(errs, fmt.Errorf( - "%s: cannot contain self-reference %s", source, v.FullKey())) + diags = diags.Append(fmt.Errorf( + "%s: cannot contain self-reference %s", + source, v.FullKey(), + )) } } } - if len(errs) > 0 { - return &multierror.Error{Errors: errs} - } - - return nil + return diags } // InterpolatedVariables is a helper that returns a mapping of all the interpolated @@ -787,57 +942,6 @@ func (c *Config) rawConfigs() map[string]*RawConfig { return result } -func (c *Config) validateVarContextFn( - source string, errs *[]error) interpolationWalkerContextFunc { - return func(loc reflectwalk.Location, node ast.Node) { - // If we're in a slice element, then its fine, since you can do - // anything in there. - if loc == reflectwalk.SliceElem { - return - } - - // Otherwise, let's check if there is a splat resource variable - // at the top level in here. We do this by doing a transform that - // replaces everything with a noop node unless its a variable - // access or concat. This should turn the AST into a flat tree - // of Concat(Noop, ...). If there are any variables left that are - // multi-access, then its still broken. - node = node.Accept(func(n ast.Node) ast.Node { - // If it is a concat or variable access, we allow it. - switch n.(type) { - case *ast.Output: - return n - case *ast.VariableAccess: - return n - } - - // Otherwise, noop - return &noopNode{} - }) - - vars, err := DetectVariables(node) - if err != nil { - // Ignore it since this will be caught during parse. This - // actually probably should never happen by the time this - // is called, but its okay. - return - } - - for _, v := range vars { - rv, ok := v.(*ResourceVariable) - if !ok { - return - } - - if rv.Multi && rv.Index == -1 { - *errs = append(*errs, fmt.Errorf( - "%s: use of the splat ('*') operator must be wrapped in a list declaration", - source)) - } - } - } -} - func (c *Config) validateDependsOn( n string, v []string, @@ -1017,7 +1121,16 @@ func (v *Variable) ValidateTypeAndDefault() error { // If an explicit type is declared, ensure it is valid if v.DeclaredType != "" { if _, ok := typeStringMap[v.DeclaredType]; !ok { - return fmt.Errorf("Variable '%s' must be of type string or map - '%s' is not a valid type", v.Name, v.DeclaredType) + validTypes := []string{} + for k := range typeStringMap { + validTypes = append(validTypes, k) + } + return fmt.Errorf( + "Variable '%s' type must be one of [%s] - '%s' is not a valid type", + v.Name, + strings.Join(validTypes, ", "), + v.DeclaredType, + ) } } diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go index 0b3abbcd..a6933c2a 100644 --- a/vendor/github.com/hashicorp/terraform/config/config_string.go +++ b/vendor/github.com/hashicorp/terraform/config/config_string.go @@ -143,6 +143,46 @@ func outputsStr(os []*Output) string { result += fmt.Sprintf(" %s: %s\n", kind, str) } } + + if o.Description != "" { + result += fmt.Sprintf(" description\n %s\n", o.Description) + } + } + + return strings.TrimSpace(result) +} + +func localsStr(ls []*Local) string { + ns := make([]string, 0, len(ls)) + m := make(map[string]*Local) + for _, l := range ls { + ns = append(ns, l.Name) + m[l.Name] = l + } + sort.Strings(ns) + + result := "" + for _, n := range ns { + l := m[n] + + result += fmt.Sprintf("%s\n", n) + + if len(l.RawConfig.Variables) > 0 { + result += fmt.Sprintf(" vars\n") + for _, rawV := range l.RawConfig.Variables { + kind := "unknown" + str := rawV.FullKey() + + switch rawV.(type) { + case *ResourceVariable: + kind = "resource" + case *UserVariable: + kind = "user" + } + + result += fmt.Sprintf(" %s: %s\n", kind, str) + } + } } return strings.TrimSpace(result) diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go new file mode 100644 index 00000000..207d1059 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2_shim_util.go @@ -0,0 +1,134 @@ +package config + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/hashicorp/hil/ast" + "github.com/hashicorp/terraform/config/hcl2shim" + + hcl2 "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// --------------------------------------------------------------------------- +// This file contains some helper functions that are used to shim between +// HCL2 concepts and HCL/HIL concepts, to help us mostly preserve the existing +// public API that was built around HCL/HIL-oriented approaches. +// --------------------------------------------------------------------------- + +func hcl2InterpolationFuncs() map[string]function.Function { + hcl2Funcs := map[string]function.Function{} + + for name, hilFunc := range Funcs() { + hcl2Funcs[name] = hcl2InterpolationFuncShim(hilFunc) + } + + // Some functions in the old world are dealt with inside langEvalConfig + // due to their legacy reliance on direct access to the symbol table. + // Since 0.7 they don't actually need it anymore and just ignore it, + // so we're cheating a bit here and exploiting that detail by passing nil. + hcl2Funcs["lookup"] = hcl2InterpolationFuncShim(interpolationFuncLookup(nil)) + hcl2Funcs["keys"] = hcl2InterpolationFuncShim(interpolationFuncKeys(nil)) + hcl2Funcs["values"] = hcl2InterpolationFuncShim(interpolationFuncValues(nil)) + + // As a bonus, we'll provide the JSON-handling functions from the cty + // function library since its "jsonencode" is more complete (doesn't force + // weird type conversions) and HIL's type system can't represent + // "jsondecode" at all. The result of jsondecode will eventually be forced + // to conform to the HIL type system on exit into the rest of Terraform due + // to our shimming right now, but it should be usable for decoding _within_ + // an expression. + hcl2Funcs["jsonencode"] = stdlib.JSONEncodeFunc + hcl2Funcs["jsondecode"] = stdlib.JSONDecodeFunc + + return hcl2Funcs +} + +func hcl2InterpolationFuncShim(hilFunc ast.Function) function.Function { + spec := &function.Spec{} + + for i, hilArgType := range hilFunc.ArgTypes { + spec.Params = append(spec.Params, function.Parameter{ + Type: hcl2shim.HCL2TypeForHILType(hilArgType), + Name: fmt.Sprintf("arg%d", i+1), // HIL args don't have names, so we'll fudge it + }) + } + + if hilFunc.Variadic { + spec.VarParam = &function.Parameter{ + Type: hcl2shim.HCL2TypeForHILType(hilFunc.VariadicType), + Name: "varargs", // HIL args don't have names, so we'll fudge it + } + } + + spec.Type = func(args []cty.Value) (cty.Type, error) { + return hcl2shim.HCL2TypeForHILType(hilFunc.ReturnType), nil + } + spec.Impl = func(args []cty.Value, retType cty.Type) (cty.Value, error) { + hilArgs := make([]interface{}, len(args)) + for i, arg := range args { + hilV := hcl2shim.HILVariableFromHCL2Value(arg) + + // Although the cty function system does automatic type conversions + // to match the argument types, cty doesn't distinguish int and + // float and so we may need to adjust here to ensure that the + // wrapped function gets exactly the Go type it was expecting. + var wantType ast.Type + if i < len(hilFunc.ArgTypes) { + wantType = hilFunc.ArgTypes[i] + } else { + wantType = hilFunc.VariadicType + } + switch { + case hilV.Type == ast.TypeInt && wantType == ast.TypeFloat: + hilV.Type = wantType + hilV.Value = float64(hilV.Value.(int)) + case hilV.Type == ast.TypeFloat && wantType == ast.TypeInt: + hilV.Type = wantType + hilV.Value = int(hilV.Value.(float64)) + } + + // HIL functions actually expect to have the outermost variable + // "peeled" but any nested values (in lists or maps) will + // still have their ast.Variable wrapping. + hilArgs[i] = hilV.Value + } + + hilResult, err := hilFunc.Callback(hilArgs) + if err != nil { + return cty.DynamicVal, err + } + + // Just as on the way in, we get back a partially-peeled ast.Variable + // which we need to re-wrap in order to convert it back into what + // we're calling a "config value". + rv := hcl2shim.HCL2ValueFromHILVariable(ast.Variable{ + Type: hilFunc.ReturnType, + Value: hilResult, + }) + + return convert.Convert(rv, retType) // if result is unknown we'll force the correct type here + } + return function.New(spec) +} + +func hcl2EvalWithUnknownVars(expr hcl2.Expression) (cty.Value, hcl2.Diagnostics) { + trs := expr.Variables() + vars := map[string]cty.Value{} + val := cty.DynamicVal + + for _, tr := range trs { + name := tr.RootName() + vars[name] = val + } + + ctx := &hcl2.EvalContext{ + Variables: vars, + Functions: hcl2InterpolationFuncs(), + } + return expr.Value(ctx) +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go new file mode 100644 index 00000000..bb4228d9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go @@ -0,0 +1,424 @@ +package hcl2shim + +import ( + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty/convert" + + "github.com/zclconf/go-cty/cty" +) + +// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a map compatible with what would be +// produced by the "flatmap" package. +// +// The type of the given value informs the structure of the resulting map. +// The value must be of an object type or this function will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given value must not have any maps of complex types or the result +// is undefined. +func FlatmapValueFromHCL2(v cty.Value) map[string]string { + if v.IsNull() { + return nil + } + + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type())) + } + + m := make(map[string]string) + flatmapValueFromHCL2Map(m, "", v) + return m +} + +func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) { + ty := val.Type() + switch { + case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType: + flatmapValueFromHCL2Primitive(m, key, val) + case ty.IsObjectType() || ty.IsMapType(): + flatmapValueFromHCL2Map(m, key+".", val) + case ty.IsTupleType() || ty.IsListType() || ty.IsSetType(): + flatmapValueFromHCL2Seq(m, key+".", val) + default: + panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName())) + } +} + +func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) { + if !val.IsKnown() { + m[key] = UnknownVariableValue + return + } + if val.IsNull() { + // Omit entirely + return + } + + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + // Should not be possible, since all primitive types can convert to string. + panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err)) + } + m[key] = val.AsString() +} + +func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + switch { + case val.Type().IsObjectType(): + // Whole objects can't be unknown in flatmap, so instead we'll + // just write all of the attribute values out as unknown. + for name, aty := range val.Type().AttributeTypes() { + flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty)) + } + default: + m[prefix+"%"] = UnknownVariableValue + } + return + } + + len := 0 + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + flatmapValueFromHCL2Value(m, prefix+name, av) + len++ + } + if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed + m[prefix+"%"] = strconv.Itoa(len) + } +} + +func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) { + if val.IsNull() { + // Omit entirely + return + } + if !val.IsKnown() { + m[prefix+"#"] = UnknownVariableValue + return + } + + // For sets this won't actually generate exactly what helper/schema would've + // generated, because we don't have access to the set key function it + // would've used. However, in practice it doesn't actually matter what the + // keys are as long as they are unique, so we'll just generate sequential + // indexes for them as if it were a list. + // + // An important implication of this, however, is that the set ordering will + // not be consistent across mutations and so different keys may be assigned + // to the same value when round-tripping. Since this shim is intended to + // be short-lived and not used for round-tripping, we accept this. + i := 0 + for it := val.ElementIterator(); it.Next(); { + _, av := it.Element() + key := prefix + strconv.Itoa(i) + flatmapValueFromHCL2Value(m, key, av) + i++ + } + m[prefix+"#"] = strconv.Itoa(i) +} + +// HCL2ValueFromFlatmap converts a map compatible with what would be produced +// by the "flatmap" package to a HCL2 (really, the cty dynamic types library +// that HCL2 uses) object type. +// +// The intended result type must be provided in order to guide how the +// map contents are decoded. This must be an object type or this function +// will panic. +// +// Flatmap values can only represent maps when they are of primitive types, +// so the given type must not have any maps of complex types or the result +// is undefined. +// +// The result may contain null values if the given map does not contain keys +// for all of the different key paths implied by the given type. +func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) { + if m == nil { + return cty.NullVal(ty), nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty)) + } + + return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes()) +} + +func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + var val cty.Value + var err error + switch { + case ty.IsPrimitiveType(): + val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty) + case ty.IsObjectType(): + val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes()) + case ty.IsTupleType(): + val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes()) + case ty.IsMapType(): + val, err = hcl2ValueFromFlatmapMap(m, key+".", ty) + case ty.IsListType(): + val, err = hcl2ValueFromFlatmapList(m, key+".", ty) + case ty.IsSetType(): + val, err = hcl2ValueFromFlatmapSet(m, key+".", ty) + default: + err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName()) + } + + if err != nil { + return cty.DynamicVal, err + } + return val, nil +} + +func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) { + rawVal, exists := m[key] + if !exists { + return cty.NullVal(ty), nil + } + if rawVal == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + var err error + val := cty.StringVal(rawVal) + val, err = convert.Convert(val, ty) + if err != nil { + // This should never happen for _valid_ input, but flatmap data might + // be tampered with by the user and become invalid. + return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err) + } + + return val, nil +} + +func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + for name, aty := range atys { + val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty) + if err != nil { + return cty.DynamicVal, err + } + vals[name] = val + } + return cty.ObjectVal(vals), nil +} + +func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(cty.Tuple(etys)), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(cty.Tuple(etys)), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + if count != len(etys) { + return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys)) + } + + vals = make([]cty.Value, len(etys)) + for i, ety := range etys { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + return cty.TupleVal(vals), nil +} + +func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + vals := make(map[string]cty.Value) + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // We actually don't really care about the "count" of a map for our + // purposes here, but we do need to check if it _exists_ in order to + // recognize the difference between null (not set at all) and empty. + if strCount, exists := m[prefix+"%"]; !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + key := fullKey[len(prefix):] + if key == "%" { + // Ignore the "count" key + continue + } + + val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[key] = val + } + + if len(vals) == 0 { + return cty.MapValEmpty(ety), nil + } + return cty.MapVal(vals), nil +} + +func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + countStr, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } + if countStr == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + count, err := strconv.Atoi(countStr) + if err != nil { + return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err) + } + + ety := ty.ElementType() + if count == 0 { + return cty.ListValEmpty(ety), nil + } + + vals = make([]cty.Value, count) + for i := 0; i < count; i++ { + key := prefix + strconv.Itoa(i) + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals[i] = val + } + + return cty.ListVal(vals), nil +} + +func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) { + var vals []cty.Value + ety := ty.ElementType() + + // if the container is unknown, there is no count string + listName := strings.TrimRight(prefix, ".") + if m[listName] == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + strCount, exists := m[prefix+"#"] + if !exists { + return cty.NullVal(ty), nil + } else if strCount == UnknownVariableValue { + return cty.UnknownVal(ty), nil + } + + // Keep track of keys we've seen, se we don't add the same set value + // multiple times. The cty.Set will normally de-duplicate values, but we may + // have unknown values that would not show as equivalent. + seen := map[string]bool{} + + for fullKey := range m { + if !strings.HasPrefix(fullKey, prefix) { + continue + } + subKey := fullKey[len(prefix):] + if subKey == "#" { + // Ignore the "count" key + continue + } + key := fullKey + if dot := strings.IndexByte(subKey, '.'); dot != -1 { + key = fullKey[:dot+len(prefix)] + } + + if seen[key] { + continue + } + + seen[key] = true + + // The flatmap format doesn't allow us to distinguish between keys + // that contain periods and nested objects, so by convention a + // map is only ever of primitive type in flatmap, and we just assume + // that the remainder of the raw key (dots and all) is the key we + // want in the result value. + + val, err := hcl2ValueFromFlatmapValue(m, key, ety) + if err != nil { + return cty.DynamicVal, err + } + vals = append(vals, val) + } + + if len(vals) == 0 && strCount == "1" { + // An empty set wouldn't be represented in the flatmap, so this must be + // a single empty object since the count is actually 1. + // Add an appropriately typed null value to the set. + var val cty.Value + switch { + case ety.IsMapType(): + val = cty.MapValEmpty(ety) + case ety.IsListType(): + val = cty.ListValEmpty(ety) + case ety.IsSetType(): + val = cty.SetValEmpty(ety) + case ety.IsObjectType(): + // TODO: cty.ObjectValEmpty + objectMap := map[string]cty.Value{} + for attr, ty := range ety.AttributeTypes() { + objectMap[attr] = cty.NullVal(ty) + } + val = cty.ObjectVal(objectMap) + default: + val = cty.NullVal(ety) + } + vals = append(vals, val) + + } else if len(vals) == 0 { + return cty.SetValEmpty(ety), nil + } + + return cty.SetVal(vals), nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go new file mode 100644 index 00000000..3403c026 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go @@ -0,0 +1,276 @@ +package hcl2shim + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" +) + +// RequiresReplace takes a list of flatmapped paths from a +// InstanceDiff.Attributes along with the corresponding cty.Type, and returns +// the list of the cty.Paths that are flagged as causing the resource +// replacement (RequiresNew). +// This will filter out redundant paths, paths that refer to flatmapped indexes +// (e.g. "#", "%"), and will return any changes within a set as the path to the +// set itself. +func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) { + var paths []cty.Path + + for _, attr := range attrs { + p, err := requiresReplacePath(attr, ty) + if err != nil { + return nil, err + } + + paths = append(paths, p) + } + + // now trim off any trailing paths that aren't GetAttrSteps, since only an + // attribute itself can require replacement + paths = trimPaths(paths) + + // There may be redundant paths due to set elements or index attributes + // Do some ugly n^2 filtering, but these are always fairly small sets. + for i := 0; i < len(paths)-1; i++ { + for j := i + 1; j < len(paths); j++ { + if reflect.DeepEqual(paths[i], paths[j]) { + // swap the tail and slice it off + paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j] + paths = paths[:len(paths)-1] + j-- + } + } + } + + return paths, nil +} + +// trimPaths removes any trailing steps that aren't of type GetAttrSet, since +// only an attribute itself can require replacement +func trimPaths(paths []cty.Path) []cty.Path { + var trimmed []cty.Path + for _, path := range paths { + path = trimPath(path) + if len(path) > 0 { + trimmed = append(trimmed, path) + } + } + return trimmed +} + +func trimPath(path cty.Path) cty.Path { + for len(path) > 0 { + _, isGetAttr := path[len(path)-1].(cty.GetAttrStep) + if isGetAttr { + break + } + path = path[:len(path)-1] + } + return path +} + +// requiresReplacePath takes a key from a flatmap along with the cty.Type +// describing the structure, and returns the cty.Path that would be used to +// reference the nested value in the data structure. +// This is used specifically to record the RequiresReplace attributes from a +// ResourceInstanceDiff. +func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) { + if k == "" { + return nil, nil + } + if !ty.IsObjectType() { + panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty)) + } + + path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes()) + if err != nil { + return path, fmt.Errorf("[%s] %s", k, err) + } + return path, nil +} + +func pathSplit(p string) (string, string) { + parts := strings.SplitN(p, ".", 2) + head := parts[0] + rest := "" + if len(parts) > 1 { + rest = parts[1] + } + return head, rest +} + +func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) { + k, rest := pathSplit(key) + + path := cty.Path{cty.GetAttrStep{Name: k}} + + ty, ok := atys[k] + if !ok { + return path, fmt.Errorf("attribute %q not found", k) + } + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + switch { + case ty.IsPrimitiveType(): + err = fmt.Errorf("invalid step %q with type %#v", key, ty) + case ty.IsObjectType(): + path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes()) + case ty.IsTupleType(): + path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes()) + case ty.IsMapType(): + path, err = pathFromFlatmapKeyMap(key, ty) + case ty.IsListType(): + path, err = pathFromFlatmapKeyList(key, ty) + case ty.IsSetType(): + path, err = pathFromFlatmapKeySet(key, ty) + default: + err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName()) + } + + if err != nil { + return path, err + } + + return path, nil +} + +func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if k == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if idx >= len(etys) { + return path, fmt.Errorf("index %s out of range in %#v", key, etys) + } + + if rest == "" { + return path, nil + } + + ty := etys[idx] + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := key, "" + if !ty.ElementType().IsPrimitiveType() { + k, rest = pathSplit(key) + } + + // we don't need to convert the index keys to paths + if k == "%" { + return path, nil + } + + path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) { + var path cty.Path + var err error + + k, rest := pathSplit(key) + + // we don't need to convert the index keys to paths + if key == "#" { + return path, nil + } + + idx, err := strconv.Atoi(k) + if err != nil { + return path, err + } + + path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}} + + if rest == "" { + return path, nil + } + + p, err := pathFromFlatmapKeyValue(rest, ty.ElementType()) + if err != nil { + return path, err + } + + return append(path, p...), nil +} + +func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) { + // once we hit a set, we can't return consistent paths, so just mark the + // set as a whole changed. + return nil, nil +} + +// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for +// use in generating legacy style diffs. +func FlatmapKeyFromPath(path cty.Path) string { + var parts []string + + for _, step := range path { + switch step := step.(type) { + case cty.GetAttrStep: + parts = append(parts, step.Name) + case cty.IndexStep: + switch ty := step.Key.Type(); { + case ty == cty.String: + parts = append(parts, step.Key.AsString()) + case ty == cty.Number: + i, _ := step.Key.AsBigFloat().Int64() + parts = append(parts, strconv.Itoa(int(i))) + } + } + } + + return strings.Join(parts, ".") +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go new file mode 100644 index 00000000..19651c81 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/single_attr_body.go @@ -0,0 +1,85 @@ +package hcl2shim + +import ( + "fmt" + + hcl2 "github.com/hashicorp/hcl2/hcl" +) + +// SingleAttrBody is a weird implementation of hcl2.Body that acts as if +// it has a single attribute whose value is the given expression. +// +// This is used to shim Resource.RawCount and Output.RawConfig to behave +// more like they do in the old HCL loader. +type SingleAttrBody struct { + Name string + Expr hcl2.Expression +} + +var _ hcl2.Body = SingleAttrBody{} + +func (b SingleAttrBody) Content(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + if !all { + // This should never happen because this body implementation should only + // be used by code that is aware that it's using a single-attr body. + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid attribute", + Detail: fmt.Sprintf("The correct attribute name is %q.", b.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + return content, diags +} + +func (b SingleAttrBody) PartialContent(schema *hcl2.BodySchema) (*hcl2.BodyContent, hcl2.Body, hcl2.Diagnostics) { + content, all, diags := b.content(schema) + var remain hcl2.Body + if all { + // If the request matched the one attribute we represent, then the + // remaining body is empty. + remain = hcl2.EmptyBody() + } else { + remain = b + } + return content, remain, diags +} + +func (b SingleAttrBody) content(schema *hcl2.BodySchema) (*hcl2.BodyContent, bool, hcl2.Diagnostics) { + ret := &hcl2.BodyContent{} + all := false + var diags hcl2.Diagnostics + + for _, attrS := range schema.Attributes { + if attrS.Name == b.Name { + attrs, _ := b.JustAttributes() + ret.Attributes = attrs + all = true + } else if attrS.Required { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Missing attribute", + Detail: fmt.Sprintf("The attribute %q is required.", attrS.Name), + Subject: b.Expr.Range().Ptr(), + }) + } + } + + return ret, all, diags +} + +func (b SingleAttrBody) JustAttributes() (hcl2.Attributes, hcl2.Diagnostics) { + return hcl2.Attributes{ + b.Name: { + Expr: b.Expr, + Name: b.Name, + NameRange: b.Expr.Range(), + Range: b.Expr.Range(), + }, + }, nil +} + +func (b SingleAttrBody) MissingItemRange() hcl2.Range { + return b.Expr.Range() +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go new file mode 100644 index 00000000..daeb0b8e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go @@ -0,0 +1,353 @@ +package hcl2shim + +import ( + "fmt" + "math/big" + + "github.com/hashicorp/hil/ast" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// UnknownVariableValue is a sentinel value that can be used +// to denote that the value of a variable is unknown at this time. +// RawConfig uses this information to build up data about +// unknown keys. +const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" + +// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for +// known object values and uses the provided block schema to perform some +// additional normalization to better mimic the shape of value that the old +// HCL1/HIL-based codepaths would've produced. +// +// In particular, it discards the collections that we use to represent nested +// blocks (other than NestingSingle) if they are empty, which better mimics +// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't +// know that an unspecified block _could_ exist. +// +// The given object value must conform to the schema's implied type or this +// function will panic or produce incorrect results. +// +// This is primarily useful for the final transition from new-style values to +// terraform.ResourceConfig before calling to a legacy provider, since +// helper/schema (the old provider SDK) is particularly sensitive to these +// subtle differences within its validation code. +func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} { + if v.IsNull() { + return nil + } + if !v.IsKnown() { + panic("ConfigValueFromHCL2Block used with unknown value") + } + if !v.Type().IsObjectType() { + panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v)) + } + + atys := v.Type().AttributeTypes() + ret := make(map[string]interface{}) + + for name := range schema.Attributes { + if _, exists := atys[name]; !exists { + continue + } + + av := v.GetAttr(name) + if av.IsNull() { + // Skip nulls altogether, to better mimic how HCL1 would behave + continue + } + ret[name] = ConfigValueFromHCL2(av) + } + + for name, blockS := range schema.BlockTypes { + if _, exists := atys[name]; !exists { + continue + } + bv := v.GetAttr(name) + if !bv.IsKnown() { + ret[name] = UnknownVariableValue + continue + } + if bv.IsNull() { + continue + } + + switch blockS.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block) + + case configschema.NestingList, configschema.NestingSet: + l := bv.LengthInt() + if l == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make([]interface{}, 0, l) + for it := bv.ElementIterator(); it.Next(); { + _, ev := it.Element() + if !ev.IsKnown() { + elems = append(elems, UnknownVariableValue) + continue + } + elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block)) + } + ret[name] = elems + + case configschema.NestingMap: + if bv.LengthInt() == 0 { + // skip empty collections to better mimic how HCL1 would behave + continue + } + + elems := make(map[string]interface{}) + for it := bv.ElementIterator(); it.Next(); { + ek, ev := it.Element() + if !ev.IsKnown() { + elems[ek.AsString()] = UnknownVariableValue + continue + } + elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block) + } + ret[name] = elems + } + } + + return ret +} + +// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic +// types library that HCL2 uses) to a value type that matches what would've +// been produced from the HCL-based interpolator for an equivalent structure. +// +// This function will transform a cty null value into a Go nil value, which +// isn't a possible outcome of the HCL/HIL-based decoder and so callers may +// need to detect and reject any null values. +func ConfigValueFromHCL2(v cty.Value) interface{} { + if !v.IsKnown() { + return UnknownVariableValue + } + if v.IsNull() { + return nil + } + + switch v.Type() { + case cty.Bool: + return v.True() // like HCL.BOOL + case cty.String: + return v.AsString() // like HCL token.STRING or token.HEREDOC + case cty.Number: + // We can't match HCL _exactly_ here because it distinguishes between + // int and float values, but we'll get as close as we can by using + // an int if the number is exactly representable, and a float if not. + // The conversion to float will force precision to that of a float64, + // which is potentially losing information from the specific number + // given, but no worse than what HCL would've done in its own conversion + // to float. + + f := v.AsBigFloat() + if i, acc := f.Int64(); acc == big.Exact { + // if we're on a 32-bit system and the number is too big for 32-bit + // int then we'll fall through here and use a float64. + const MaxInt = int(^uint(0) >> 1) + const MinInt = -MaxInt - 1 + if i <= int64(MaxInt) && i >= int64(MinInt) { + return int(i) // Like HCL token.NUMBER + } + } + + f64, _ := f.Float64() + return f64 // like HCL token.FLOAT + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]interface{}, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, ConfigValueFromHCL2(ev)) + } + return l + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]interface{}) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + cv := ConfigValueFromHCL2(ev) + if cv != nil { + l[ek.AsString()] = cv + } + } + return l + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to config value", v)) +} + +// HCL2ValueFromConfigValue is the opposite of configValueFromHCL2: it takes +// a value as would be returned from the old interpolator and turns it into +// a cty.Value so it can be used within, for example, an HCL2 EvalContext. +func HCL2ValueFromConfigValue(v interface{}) cty.Value { + if v == nil { + return cty.NullVal(cty.DynamicPseudoType) + } + if v == UnknownVariableValue { + return cty.DynamicVal + } + + switch tv := v.(type) { + case bool: + return cty.BoolVal(tv) + case string: + return cty.StringVal(tv) + case int: + return cty.NumberIntVal(int64(tv)) + case float64: + return cty.NumberFloatVal(tv) + case []interface{}: + vals := make([]cty.Value, len(tv)) + for i, ev := range tv { + vals[i] = HCL2ValueFromConfigValue(ev) + } + return cty.TupleVal(vals) + case map[string]interface{}: + vals := map[string]cty.Value{} + for k, ev := range tv { + vals[k] = HCL2ValueFromConfigValue(ev) + } + return cty.ObjectVal(vals) + default: + // HCL/HIL should never generate anything that isn't caught by + // the above, so if we get here something has gone very wrong. + panic(fmt.Errorf("can't convert %#v to cty.Value", v)) + } +} + +func HILVariableFromHCL2Value(v cty.Value) ast.Variable { + if v.IsNull() { + // Caller should guarantee/check this before calling + panic("Null values cannot be represented in HIL") + } + if !v.IsKnown() { + return ast.Variable{ + Type: ast.TypeUnknown, + Value: UnknownVariableValue, + } + } + + switch v.Type() { + case cty.Bool: + return ast.Variable{ + Type: ast.TypeBool, + Value: v.True(), + } + case cty.Number: + v := ConfigValueFromHCL2(v) + switch tv := v.(type) { + case int: + return ast.Variable{ + Type: ast.TypeInt, + Value: tv, + } + case float64: + return ast.Variable{ + Type: ast.TypeFloat, + Value: tv, + } + default: + // should never happen + panic("invalid return value for configValueFromHCL2") + } + case cty.String: + return ast.Variable{ + Type: ast.TypeString, + Value: v.AsString(), + } + } + + if v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType() { + l := make([]ast.Variable, 0, v.LengthInt()) + it := v.ElementIterator() + for it.Next() { + _, ev := it.Element() + l = append(l, HILVariableFromHCL2Value(ev)) + } + // If we were given a tuple then this could actually produce an invalid + // list with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous list would be. + return ast.Variable{ + Type: ast.TypeList, + Value: l, + } + } + + if v.Type().IsMapType() || v.Type().IsObjectType() { + l := make(map[string]ast.Variable) + it := v.ElementIterator() + for it.Next() { + ek, ev := it.Element() + l[ek.AsString()] = HILVariableFromHCL2Value(ev) + } + // If we were given an object then this could actually produce an invalid + // map with non-homogenous types, which we expect to be caught inside + // HIL just like a user-supplied non-homogenous map would be. + return ast.Variable{ + Type: ast.TypeMap, + Value: l, + } + } + + // If we fall out here then we have some weird type that we haven't + // accounted for. This should never happen unless the caller is using + // capsule types, and we don't currently have any such types defined. + panic(fmt.Errorf("can't convert %#v to HIL variable", v)) +} + +func HCL2ValueFromHILVariable(v ast.Variable) cty.Value { + switch v.Type { + case ast.TypeList: + vals := make([]cty.Value, len(v.Value.([]ast.Variable))) + for i, ev := range v.Value.([]ast.Variable) { + vals[i] = HCL2ValueFromHILVariable(ev) + } + return cty.TupleVal(vals) + case ast.TypeMap: + vals := make(map[string]cty.Value, len(v.Value.(map[string]ast.Variable))) + for k, ev := range v.Value.(map[string]ast.Variable) { + vals[k] = HCL2ValueFromHILVariable(ev) + } + return cty.ObjectVal(vals) + default: + return HCL2ValueFromConfigValue(v.Value) + } +} + +func HCL2TypeForHILType(hilType ast.Type) cty.Type { + switch hilType { + case ast.TypeAny: + return cty.DynamicPseudoType + case ast.TypeUnknown: + return cty.DynamicPseudoType + case ast.TypeBool: + return cty.Bool + case ast.TypeInt: + return cty.Number + case ast.TypeFloat: + return cty.Number + case ast.TypeString: + return cty.String + case ast.TypeList: + return cty.List(cty.DynamicPseudoType) + case ast.TypeMap: + return cty.Map(cty.DynamicPseudoType) + default: + return cty.NilType // equilvalent to ast.TypeInvalid + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go new file mode 100644 index 00000000..92f0213d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go @@ -0,0 +1,214 @@ +package hcl2shim + +import ( + "github.com/zclconf/go-cty/cty" +) + +// ValuesSDKEquivalent returns true if both of the given values seem equivalent +// as far as the legacy SDK diffing code would be concerned. +// +// Since SDK diffing is a fuzzy, inexact operation, this function is also +// fuzzy and inexact. It will err on the side of returning false if it +// encounters an ambiguous situation. Ambiguity is most common in the presence +// of sets because in practice it is impossible to exactly correlate +// nonequal-but-equivalent set elements because they have no identity separate +// from their value. +// +// This must be used _only_ for comparing values for equivalence within the +// SDK planning code. It is only meaningful to compare the "prior state" +// provided by Terraform Core with the "planned new state" produced by the +// legacy SDK code via shims. In particular it is not valid to use this +// function with their the config value or the "proposed new state" value +// because they contain only the subset of data that Terraform Core itself is +// able to determine. +func ValuesSDKEquivalent(a, b cty.Value) bool { + if a == cty.NilVal || b == cty.NilVal { + // We don't generally expect nils to appear, but we'll allow them + // for robustness since the data structures produced by legacy SDK code + // can sometimes be non-ideal. + return a == b // equivalent if they are _both_ nil + } + if a.RawEquals(b) { + // Easy case. We use RawEquals because we want two unknowns to be + // considered equal here, whereas "Equals" would return unknown. + return true + } + if !a.IsKnown() || !b.IsKnown() { + // Two unknown values are equivalent regardless of type. A known is + // never equivalent to an unknown. + return a.IsKnown() == b.IsKnown() + } + if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero { + // Two null/zero values are equivalent regardless of type. A non-zero is + // never equivalent to a zero. + return aZero == bZero + } + + // If we get down here then we are guaranteed that both a and b are known, + // non-null values. + + aTy := a.Type() + bTy := b.Type() + switch { + case aTy.IsSetType() && bTy.IsSetType(): + return valuesSDKEquivalentSets(a, b) + case aTy.IsListType() && bTy.IsListType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsTupleType() && bTy.IsTupleType(): + return valuesSDKEquivalentSequences(a, b) + case aTy.IsMapType() && bTy.IsMapType(): + return valuesSDKEquivalentMappings(a, b) + case aTy.IsObjectType() && bTy.IsObjectType(): + return valuesSDKEquivalentMappings(a, b) + case aTy == cty.Number && bTy == cty.Number: + return valuesSDKEquivalentNumbers(a, b) + default: + // We've now covered all the interesting cases, so anything that falls + // down here cannot be equivalent. + return false + } +} + +// valuesSDKEquivalentIsNullOrZero returns true if the given value is either +// null or is the "zero value" (in the SDK/Go sense) for its type. +func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool { + if v == cty.NilVal { + return true + } + + ty := v.Type() + switch { + case !v.IsKnown(): + return false + case v.IsNull(): + return true + + // After this point, v is always known and non-null + case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType(): + return v.LengthInt() == 0 + case ty == cty.String: + return v.RawEquals(cty.StringVal("")) + case ty == cty.Number: + return v.RawEquals(cty.Zero) + case ty == cty.Bool: + return v.RawEquals(cty.False) + default: + // The above is exhaustive, but for robustness we'll consider anything + // else to _not_ be zero unless it is null. + return false + } +} + +// valuesSDKEquivalentSets returns true only if each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa. +// This is a fuzzy operation that prefers to signal non-equivalence if it cannot +// be certain that all elements are accounted for. +func valuesSDKEquivalentSets(a, b cty.Value) bool { + if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen { + return false + } + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if ValuesSDKEquivalent(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + for _, eq := range aeqs { + if !eq { + return false + } + } + for _, eq := range beqs { + if !eq { + return false + } + } + return true +} + +// valuesSDKEquivalentSequences decides equivalence for two sequence values +// (lists or tuples). +func valuesSDKEquivalentSequences(a, b cty.Value) bool { + as := a.AsValueSlice() + bs := b.AsValueSlice() + if len(as) != len(bs) { + return false + } + + for i := range as { + if !ValuesSDKEquivalent(as[i], bs[i]) { + return false + } + } + return true +} + +// valuesSDKEquivalentMappings decides equivalence for two mapping values +// (maps or objects). +func valuesSDKEquivalentMappings(a, b cty.Value) bool { + as := a.AsValueMap() + bs := b.AsValueMap() + if len(as) != len(bs) { + return false + } + + for k, av := range as { + bv, ok := bs[k] + if !ok { + return false + } + if !ValuesSDKEquivalent(av, bv) { + return false + } + } + return true +} + +// valuesSDKEquivalentNumbers decides equivalence for two number values based +// on the fact that the SDK uses int and float64 representations while +// cty (and thus Terraform Core) uses big.Float, and so we expect to lose +// precision in the round-trip. +// +// This does _not_ attempt to allow for an epsilon difference that may be +// caused by accumulated innacuracy in a float calculation, under the +// expectation that providers generally do not actually do compuations on +// floats and instead just pass string representations of them on verbatim +// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's +// a problem for the provider itself to deal with, based on its knowledge of +// the remote system, e.g. using DiffSuppressFunc. +func valuesSDKEquivalentNumbers(a, b cty.Value) bool { + if a.RawEquals(b) { + return true // easy + } + + af := a.AsBigFloat() + bf := b.AsBigFloat() + + if af.IsInt() != bf.IsInt() { + return false + } + if af.IsInt() && bf.IsInt() { + return false // a.RawEquals(b) test above is good enough for integers + } + + // The SDK supports only int and float64, so if it's not an integer + // we know that only a float64-level of precision can possibly be + // significant. + af64, _ := af.Float64() + bf64, _ := bf.Float64() + return af64 == bf64 +} diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go index 37ec11a1..08cbc773 100644 --- a/vendor/github.com/hashicorp/terraform/config/import_tree.go +++ b/vendor/github.com/hashicorp/terraform/config/import_tree.go @@ -1,8 +1,12 @@ package config import ( + "bufio" "fmt" "io" + "os" + + "github.com/hashicorp/errwrap" ) // configurable is an interface that must be implemented by any configuration @@ -27,15 +31,52 @@ type importTree struct { // imports. type fileLoaderFunc func(path string) (configurable, []string, error) +// Set this to a non-empty value at link time to enable the HCL2 experiment. +// This is not currently enabled for release builds. +// +// For example: +// go install -ldflags="-X github.com/hashicorp/terraform/config.enableHCL2Experiment=true" github.com/hashicorp/terraform +var enableHCL2Experiment = "" + // loadTree takes a single file and loads the entire importTree for that // file. This function detects what kind of configuration file it is an // executes the proper fileLoaderFunc. func loadTree(root string) (*importTree, error) { var f fileLoaderFunc - switch ext(root) { - case ".tf", ".tf.json": - f = loadFileHcl - default: + + // HCL2 experiment is currently activated at build time via the linker. + // See the comment on this variable for more information. + if enableHCL2Experiment == "" { + // Main-line behavior: always use the original HCL parser + switch ext(root) { + case ".tf", ".tf.json": + f = loadFileHcl + default: + } + } else { + // Experimental behavior: use the HCL2 parser if the opt-in comment + // is present. + switch ext(root) { + case ".tf": + // We need to sniff the file for the opt-in comment line to decide + // if the file is participating in the HCL2 experiment. + cf, err := os.Open(root) + if err != nil { + return nil, err + } + sc := bufio.NewScanner(cf) + for sc.Scan() { + if sc.Text() == "#terraform:hcl2" { + f = globalHCL2Loader.loadFile + } + } + if f == nil { + f = loadFileHcl + } + case ".tf.json": + f = loadFileHcl + default: + } } if f == nil { @@ -86,10 +127,7 @@ func (t *importTree) Close() error { func (t *importTree) ConfigTree() (*configTree, error) { config, err := t.Raw.Config() if err != nil { - return nil, fmt.Errorf( - "Error loading %s: %s", - t.Path, - err) + return nil, errwrap.Wrapf(fmt.Sprintf("Error loading %s: {{err}}", t.Path), err) } // Build our result diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go index bbb35554..599e5ecd 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate.go @@ -5,6 +5,8 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/hil/ast" ) @@ -14,6 +16,21 @@ import ( // variables can come from: user variables, resources, etc. type InterpolatedVariable interface { FullKey() string + SourceRange() tfdiags.SourceRange +} + +// varRange can be embedded into an InterpolatedVariable implementation to +// implement the SourceRange method. +type varRange struct { + rng tfdiags.SourceRange +} + +func (r varRange) SourceRange() tfdiags.SourceRange { + return r.rng +} + +func makeVarRange(rng tfdiags.SourceRange) varRange { + return varRange{rng} } // CountVariable is a variable for referencing information about @@ -21,6 +38,7 @@ type InterpolatedVariable interface { type CountVariable struct { Type CountValueType key string + varRange } // CountValueType is the type of the count variable that is referenced. @@ -37,6 +55,7 @@ type ModuleVariable struct { Name string Field string key string + varRange } // A PathVariable is a variable that references path information about the @@ -44,6 +63,7 @@ type ModuleVariable struct { type PathVariable struct { Type PathValueType key string + varRange } type PathValueType byte @@ -67,6 +87,7 @@ type ResourceVariable struct { Index int // Index for multi-variable: aws_instance.foo.1.id == 1 key string + varRange } // SelfVariable is a variable that is referencing the same resource @@ -75,6 +96,7 @@ type SelfVariable struct { Field string key string + varRange } // SimpleVariable is an unprefixed variable, which can show up when users have @@ -82,6 +104,7 @@ type SelfVariable struct { // internally. The template_file resource is an example of this. type SimpleVariable struct { Key string + varRange } // TerraformVariable is a "terraform."-prefixed variable used to access @@ -89,6 +112,7 @@ type SimpleVariable struct { type TerraformVariable struct { Field string key string + varRange } // A UserVariable is a variable that is referencing a user variable @@ -99,6 +123,14 @@ type UserVariable struct { Elem string key string + varRange +} + +// A LocalVariable is a variable that references a local value defined within +// the current module, via a "locals" block. This looks like "${local.foo}". +type LocalVariable struct { + Name string + varRange } func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { @@ -112,6 +144,8 @@ func NewInterpolatedVariable(v string) (InterpolatedVariable, error) { return NewTerraformVariable(v) } else if strings.HasPrefix(v, "var.") { return NewUserVariable(v) + } else if strings.HasPrefix(v, "local.") { + return NewLocalVariable(v) } else if strings.HasPrefix(v, "module.") { return NewModuleVariable(v) } else if !strings.ContainsRune(v, '.') { @@ -276,7 +310,7 @@ func (v *SelfVariable) GoString() string { } func NewSimpleVariable(key string) (*SimpleVariable, error) { - return &SimpleVariable{key}, nil + return &SimpleVariable{Key: key}, nil } func (v *SimpleVariable) FullKey() string { @@ -331,6 +365,25 @@ func (v *UserVariable) GoString() string { return fmt.Sprintf("*%#v", *v) } +func NewLocalVariable(key string) (*LocalVariable, error) { + name := key[len("local."):] + if idx := strings.Index(name, "."); idx > -1 { + return nil, fmt.Errorf("Can't use dot (.) attribute access in local.%s; use square bracket indexing", name) + } + + return &LocalVariable{ + Name: name, + }, nil +} + +func (v *LocalVariable) FullKey() string { + return fmt.Sprintf("local.%s", v.Name) +} + +func (v *LocalVariable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} + // DetectVariables takes an AST root and returns all the interpolated // variables that are detected in the AST tree. func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) { diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go index cc09e384..6a2050c9 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -1,16 +1,23 @@ package config import ( + "bytes" + "compress/gzip" "crypto/md5" + "crypto/rsa" "crypto/sha1" "crypto/sha256" + "crypto/sha512" + "crypto/x509" "encoding/base64" "encoding/hex" "encoding/json" + "encoding/pem" "fmt" "io/ioutil" "math" "net" + "net/url" "path/filepath" "regexp" "sort" @@ -23,6 +30,7 @@ import ( "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/mitchellh/go-homedir" + "golang.org/x/crypto/bcrypt" ) // stringSliceToVariableValue converts a string slice into the value @@ -39,6 +47,20 @@ func stringSliceToVariableValue(values []string) []ast.Variable { return output } +// listVariableSliceToVariableValue converts a list of lists into the value +// required to be returned from interpolation functions which return TypeList. +func listVariableSliceToVariableValue(values [][]ast.Variable) []ast.Variable { + output := make([]ast.Variable, len(values)) + + for index, value := range values { + output[index] = ast.Variable{ + Type: ast.TypeList, + Value: value, + } + } + return output +} + func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { output := make([]string, len(values)) for index, value := range values { @@ -53,29 +75,41 @@ func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { // Funcs is the mapping of built-in functions for configuration. func Funcs() map[string]ast.Function { return map[string]ast.Function{ + "abs": interpolationFuncAbs(), "basename": interpolationFuncBasename(), "base64decode": interpolationFuncBase64Decode(), "base64encode": interpolationFuncBase64Encode(), + "base64gzip": interpolationFuncBase64Gzip(), "base64sha256": interpolationFuncBase64Sha256(), + "base64sha512": interpolationFuncBase64Sha512(), + "bcrypt": interpolationFuncBcrypt(), "ceil": interpolationFuncCeil(), + "chomp": interpolationFuncChomp(), "cidrhost": interpolationFuncCidrHost(), "cidrnetmask": interpolationFuncCidrNetmask(), "cidrsubnet": interpolationFuncCidrSubnet(), "coalesce": interpolationFuncCoalesce(), + "coalescelist": interpolationFuncCoalesceList(), "compact": interpolationFuncCompact(), "concat": interpolationFuncConcat(), + "contains": interpolationFuncContains(), "dirname": interpolationFuncDirname(), "distinct": interpolationFuncDistinct(), "element": interpolationFuncElement(), + "chunklist": interpolationFuncChunklist(), "file": interpolationFuncFile(), + "matchkeys": interpolationFuncMatchKeys(), + "flatten": interpolationFuncFlatten(), "floor": interpolationFuncFloor(), "format": interpolationFuncFormat(), "formatlist": interpolationFuncFormatList(), + "indent": interpolationFuncIndent(), "index": interpolationFuncIndex(), "join": interpolationFuncJoin(), "jsonencode": interpolationFuncJSONEncode(), "length": interpolationFuncLength(), "list": interpolationFuncList(), + "log": interpolationFuncLog(), "lower": interpolationFuncLower(), "map": interpolationFuncMap(), "max": interpolationFuncMax(), @@ -83,19 +117,26 @@ func Funcs() map[string]ast.Function { "merge": interpolationFuncMerge(), "min": interpolationFuncMin(), "pathexpand": interpolationFuncPathExpand(), + "pow": interpolationFuncPow(), "uuid": interpolationFuncUUID(), "replace": interpolationFuncReplace(), + "reverse": interpolationFuncReverse(), + "rsadecrypt": interpolationFuncRsaDecrypt(), "sha1": interpolationFuncSha1(), "sha256": interpolationFuncSha256(), + "sha512": interpolationFuncSha512(), "signum": interpolationFuncSignum(), "slice": interpolationFuncSlice(), "sort": interpolationFuncSort(), "split": interpolationFuncSplit(), "substr": interpolationFuncSubstr(), "timestamp": interpolationFuncTimestamp(), + "timeadd": interpolationFuncTimeAdd(), "title": interpolationFuncTitle(), + "transpose": interpolationFuncTranspose(), "trimspace": interpolationFuncTrimSpace(), "upper": interpolationFuncUpper(), + "urlencode": interpolationFuncURLEncode(), "zipmap": interpolationFuncZipMap(), } } @@ -322,6 +363,46 @@ func interpolationFuncCoalesce() ast.Function { } } +// interpolationFuncCoalesceList implements the "coalescelist" function that +// returns the first non empty list from the provided input +func interpolationFuncCoalesceList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + if len(args) < 2 { + return nil, fmt.Errorf("must provide at least two arguments") + } + for _, arg := range args { + argument := arg.([]ast.Variable) + + if len(argument) > 0 { + return argument, nil + } + } + return make([]ast.Variable, 0), nil + }, + } +} + +// interpolationFuncContains returns true if an element is in the list +// and return false otherwise +func interpolationFuncContains() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeString}, + ReturnType: ast.TypeBool, + Callback: func(args []interface{}) (interface{}, error) { + _, err := interpolationFuncIndex().Callback(args) + if err != nil { + return false, nil + } + return true, nil + }, + } +} + // interpolationFuncConcat implements the "concat" function that concatenates // multiple lists. func interpolationFuncConcat() ast.Function { @@ -363,6 +444,17 @@ func interpolationFuncConcat() ast.Function { } } +// interpolationFuncPow returns base x exponential of y. +func interpolationFuncPow() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Pow(args[0].(float64), args[1].(float64)), nil + }, + } +} + // interpolationFuncFile implements the "file" function that allows // loading contents from a file. func interpolationFuncFile() ast.Function { @@ -459,6 +551,29 @@ func interpolationFuncCeil() ast.Function { } } +// interpolationFuncLog returns the logarithnm. +func interpolationFuncLog() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat, ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Log(args[0].(float64)) / math.Log(args[1].(float64)), nil + }, + } +} + +// interpolationFuncChomp removes trailing newlines from the given string +func interpolationFuncChomp() ast.Function { + newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + return newlines.ReplaceAllString(args[0].(string), ""), nil + }, + } +} + // interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument func interpolationFuncFloor() ast.Function { return ast.Function{ @@ -584,6 +699,21 @@ func interpolationFuncFormatList() ast.Function { } } +// interpolationFuncIndent indents a multi-line string with the +// specified number of spaces +func interpolationFuncIndent() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeInt, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + spaces := args[0].(int) + data := args[1].(string) + pad := strings.Repeat(" ", spaces) + return strings.Replace(data, "\n", "\n"+pad, -1), nil + }, + } +} + // interpolationFuncIndex implements the "index" function that allows one to // find the index of a specific element in a list func interpolationFuncIndex() ast.Function { @@ -655,6 +785,57 @@ func appendIfMissing(slice []string, element string) []string { return append(slice, element) } +// for two lists `keys` and `values` of equal length, returns all elements +// from `values` where the corresponding element from `keys` is in `searchset`. +func interpolationFuncMatchKeys() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList}, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + output := make([]ast.Variable, 0) + + values, _ := args[0].([]ast.Variable) + keys, _ := args[1].([]ast.Variable) + searchset, _ := args[2].([]ast.Variable) + + if len(keys) != len(values) { + return nil, fmt.Errorf("length of keys and values should be equal") + } + + for i, key := range keys { + for _, search := range searchset { + if res, err := compareSimpleVariables(key, search); err != nil { + return nil, err + } else if res == true { + output = append(output, values[i]) + break + } + } + } + // if searchset is empty, then output is an empty list as well. + // if we haven't matched any key, then output is an empty list. + return output, nil + }, + } +} + +// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap +func compareSimpleVariables(a, b ast.Variable) (bool, error) { + if a.Type != b.Type { + return false, fmt.Errorf( + "won't compare items of different types %s and %s", + a.Type.Printable(), b.Type.Printable()) + } + switch a.Type { + case ast.TypeString: + return a.Value.(string) == b.Value.(string), nil + default: + return false, fmt.Errorf( + "can't compare items of type %s", + a.Type.Printable()) + } +} + // interpolationFuncJoin implements the "join" function that allows // multi-variable values to be joined by some character. func interpolationFuncJoin() ast.Function { @@ -687,8 +868,7 @@ func interpolationFuncJoin() ast.Function { } // interpolationFuncJSONEncode implements the "jsonencode" function that encodes -// a string, list, or map as its JSON representation. For now, values in the -// list or map may only be strings. +// a string, list, or map as its JSON representation. func interpolationFuncJSONEncode() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeAny}, @@ -701,28 +881,36 @@ func interpolationFuncJSONEncode() ast.Function { toEncode = typedArg case []ast.Variable: - // We preallocate the list here. Note that it's important that in - // the length 0 case, we have an empty list rather than nil, as - // they encode differently. - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? strings := make([]string, len(typedArg)) for i, v := range typedArg { if v.Type != ast.TypeString { - return "", fmt.Errorf("list elements must be strings") + variable, _ := hil.InterfaceToVariable(typedArg) + toEncode, _ = hil.VariableToInterface(variable) + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil + } strings[i] = v.Value.(string) } toEncode = strings case map[string]ast.Variable: - // XXX It would be nice to support arbitrarily nested data here. Is - // there an inverse of hil.InterfaceToVariable? stringMap := make(map[string]string) for k, v := range typedArg { if v.Type != ast.TypeString { - return "", fmt.Errorf("map values must be strings") + variable, _ := hil.InterfaceToVariable(typedArg) + toEncode, _ = hil.VariableToInterface(variable) + + jEnc, err := json.Marshal(toEncode) + if err != nil { + return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode) + } + return string(jEnc), nil } stringMap[k] = v.Value.(string) } @@ -768,6 +956,25 @@ func interpolationFuncReplace() ast.Function { } } +// interpolationFuncReverse implements the "reverse" function that does list reversal +func interpolationFuncReverse() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + reversedList := make([]ast.Variable, len(inputList)) + for idx := range inputList { + reversedList[len(inputList)-1-idx] = inputList[idx] + } + + return reversedList, nil + }, + } +} + func interpolationFuncLength() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeAny}, @@ -962,6 +1169,56 @@ func interpolationFuncElement() ast.Function { } } +// returns the `list` items chunked by `size`. +func interpolationFuncChunklist() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeList, // inputList + ast.TypeInt, // size + }, + ReturnType: ast.TypeList, + Callback: func(args []interface{}) (interface{}, error) { + output := make([]ast.Variable, 0) + + values, _ := args[0].([]ast.Variable) + size, _ := args[1].(int) + + // errors if size is negative + if size < 0 { + return nil, fmt.Errorf("The size argument must be positive") + } + + // if size is 0, returns a list made of the initial list + if size == 0 { + output = append(output, ast.Variable{ + Type: ast.TypeList, + Value: values, + }) + return output, nil + } + + variables := make([]ast.Variable, 0) + chunk := ast.Variable{ + Type: ast.TypeList, + Value: variables, + } + l := len(values) + for i, v := range values { + variables = append(variables, v) + + // Chunk when index isn't 0, or when reaching the values's length + if (i+1)%size == 0 || (i+1) == l { + chunk.Value = variables + output = append(output, chunk) + variables = make([]ast.Variable, 0) + } + } + + return output, nil + }, + } +} + // interpolationFuncKeys implements the "keys" function that yields a list of // keys of map types within a Terraform configuration. func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { @@ -1061,6 +1318,32 @@ func interpolationFuncBase64Decode() ast.Function { } } +// interpolationFuncBase64Gzip implements the "gzip" function that allows gzip +// compression encoding the result using base64 +func interpolationFuncBase64Gzip() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return "", fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return "", fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return "", fmt.Errorf("failed to close gzip writer: '%s'", s) + } + + return base64.StdEncoding.EncodeToString(b.Bytes()), nil + }, + } +} + // interpolationFuncLower implements the "lower" function that does // string lower casing. func interpolationFuncLower() ast.Function { @@ -1150,6 +1433,20 @@ func interpolationFuncSha256() ast.Function { } } +func interpolationFuncSha512() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha512.New() + h.Write([]byte(s)) + hash := hex.EncodeToString(h.Sum(nil)) + return hash, nil + }, + } +} + func interpolationFuncTrimSpace() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeString}, @@ -1176,6 +1473,55 @@ func interpolationFuncBase64Sha256() ast.Function { } } +func interpolationFuncBase64Sha512() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + h := sha512.New() + h.Write([]byte(s)) + shaSum := h.Sum(nil) + encoded := base64.StdEncoding.EncodeToString(shaSum[:]) + return encoded, nil + }, + } +} + +func interpolationFuncBcrypt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + Variadic: true, + VariadicType: ast.TypeString, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + defaultCost := 10 + + if len(args) > 1 { + costStr := args[1].(string) + cost, err := strconv.Atoi(costStr) + if err != nil { + return "", err + } + + defaultCost = cost + } + + if len(args) > 2 { + return "", fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].(string) + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return "", fmt.Errorf("error occured generating password %s", err.Error()) + } + + return string(out), nil + }, + } +} + func interpolationFuncUUID() ast.Function { return ast.Function{ ArgTypes: []ast.Type{}, @@ -1197,6 +1543,29 @@ func interpolationFuncTimestamp() ast.Function { } } +func interpolationFuncTimeAdd() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // input timestamp string in RFC3339 format + ast.TypeString, // duration to add to input timestamp that should be parsable by time.ParseDuration + }, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + + ts, err := time.Parse(time.RFC3339, args[0].(string)) + if err != nil { + return nil, err + } + duration, err := time.ParseDuration(args[1].(string)) + if err != nil { + return nil, err + } + + return ts.Add(duration).Format(time.RFC3339), nil + }, + } +} + // interpolationFuncTitle implements the "title" function that returns a copy of the // string in which first characters of all the words are capitalized. func interpolationFuncTitle() ast.Function { @@ -1242,7 +1611,7 @@ func interpolationFuncSubstr() ast.Function { return nil, fmt.Errorf("length should be a non-negative integer") } - if offset > len(str) { + if offset > len(str) || offset < 0 { return nil, fmt.Errorf("offset cannot be larger than the length of the string") } @@ -1254,3 +1623,139 @@ func interpolationFuncSubstr() ast.Function { }, } } + +// Flatten until it's not ast.TypeList +func flattener(finalList []ast.Variable, flattenList []ast.Variable) []ast.Variable { + for _, val := range flattenList { + if val.Type == ast.TypeList { + finalList = flattener(finalList, val.Value.([]ast.Variable)) + } else { + finalList = append(finalList, val) + } + } + return finalList +} + +// Flatten to single list +func interpolationFuncFlatten() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeList}, + ReturnType: ast.TypeList, + Variadic: false, + Callback: func(args []interface{}) (interface{}, error) { + inputList := args[0].([]ast.Variable) + + var outputList []ast.Variable + return flattener(outputList, inputList), nil + }, + } +} + +func interpolationFuncURLEncode() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + return url.QueryEscape(s), nil + }, + } +} + +// interpolationFuncTranspose implements the "transpose" function +// that converts a map (string,list) to a map (string,list) where +// the unique values of the original lists become the keys of the +// new map and the keys of the original map become values for the +// corresponding new keys. +func interpolationFuncTranspose() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + + inputMap := args[0].(map[string]ast.Variable) + outputMap := make(map[string]ast.Variable) + tmpMap := make(map[string][]string) + + for inKey, inVal := range inputMap { + if inVal.Type != ast.TypeList { + return nil, fmt.Errorf("transpose requires a map of lists of strings") + } + values := inVal.Value.([]ast.Variable) + for _, listVal := range values { + if listVal.Type != ast.TypeString { + return nil, fmt.Errorf("transpose requires the given map values to be lists of strings") + } + outKey := listVal.Value.(string) + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]ast.Variable, 0) + for _, v := range outVal { + values = append(values, ast.Variable{Type: ast.TypeString, Value: v}) + } + outputMap[outKey] = ast.Variable{Type: ast.TypeList, Value: values} + } + return outputMap, nil + }, + } +} + +// interpolationFuncAbs returns the absolute value of a given float. +func interpolationFuncAbs() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeFloat}, + ReturnType: ast.TypeFloat, + Callback: func(args []interface{}) (interface{}, error) { + return math.Abs(args[0].(float64)), nil + }, + } +} + +// interpolationFuncRsaDecrypt implements the "rsadecrypt" function that does +// RSA decryption. +func interpolationFuncRsaDecrypt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + key := args[1].(string) + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", fmt.Errorf("Failed to decode input %q: cipher text must be base64-encoded", s) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return "", fmt.Errorf("Failed to read key %q: no key found", key) + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return "", fmt.Errorf( + "Failed to read key %q: password protected keys are\n"+ + "not supported. Please decrypt the key prior to use.", key) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return "", err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return "", err + } + + return string(out), nil + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go index ead3d102..66a677d5 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go @@ -271,9 +271,7 @@ func (w *interpolationWalker) splitSlice() { result = append(result, val.Value) } case []interface{}: - for _, element := range val { - result = append(result, element) - } + result = append(result, val...) default: result = append(result, v) } diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go index 0bfa89c2..6e347816 100644 --- a/vendor/github.com/hashicorp/terraform/config/loader.go +++ b/vendor/github.com/hashicorp/terraform/config/loader.go @@ -80,7 +80,7 @@ func LoadDir(root string) (*Config, error) { if err != nil { return nil, err } - if len(files) == 0 { + if len(files) == 0 && len(overrides) == 0 { return nil, &ErrNoConfigsFound{Dir: root} } @@ -112,6 +112,9 @@ func LoadDir(root string) (*Config, error) { result = c } } + if len(files) == 0 { + result = &Config{} + } // Load all the overrides, and merge them into the config for _, f := range overrides { @@ -194,7 +197,7 @@ func dirFiles(dir string) ([]string, []string, error) { // Only care about files that are valid to load name := fi.Name() extValue := ext(name) - if extValue == "" || isIgnoredFile(name) { + if extValue == "" || IsIgnoredFile(name) { continue } @@ -215,9 +218,9 @@ func dirFiles(dir string) ([]string, []string, error) { return files, overrides, nil } -// isIgnoredFile returns true or false depending on whether the +// IsIgnoredFile returns true or false depending on whether the // provided file name is a file that should be ignored. -func isIgnoredFile(name string) bool { +func IsIgnoredFile(name string) bool { return strings.HasPrefix(name, ".") || // Unix-like hidden files strings.HasSuffix(name, "~") || // vim strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go index a40ad5ba..68cffe2c 100644 --- a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go @@ -17,10 +17,35 @@ type hclConfigurable struct { Root *ast.File } +var ReservedDataSourceFields = []string{ + "connection", + "count", + "depends_on", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedResourceFields = []string{ + "connection", + "count", + "depends_on", + "id", + "lifecycle", + "provider", + "provisioner", +} + +var ReservedProviderFields = []string{ + "alias", + "version", +} + func (t *hclConfigurable) Config() (*Config, error) { validKeys := map[string]struct{}{ "atlas": struct{}{}, "data": struct{}{}, + "locals": struct{}{}, "module": struct{}{}, "output": struct{}{}, "provider": struct{}{}, @@ -56,6 +81,15 @@ func (t *hclConfigurable) Config() (*Config, error) { } } + // Build local values + if locals := list.Filter("locals"); len(locals.Items) > 0 { + var err error + config.Locals, err = loadLocalsHcl(locals) + if err != nil { + return nil, err + } + } + // Get Atlas configuration if atlas := list.Filter("atlas"); len(atlas.Items) > 0 { var err error @@ -327,6 +361,10 @@ func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) { // represents exactly one module definition in the HCL configuration. // We leave it up to another pass to merge them together. func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { + if err := assertAllBlocksHaveNames("module", list); err != nil { + return nil, err + } + list = list.Children() if len(list.Items) == 0 { return nil, nil @@ -355,9 +393,6 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { err) } - // Remove the fields we handle specially - delete(config, "source") - rawConfig, err := NewRawConfig(config) if err != nil { return nil, fmt.Errorf( @@ -366,7 +401,11 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { err) } - // If we have a count, then figure it out + // Remove the fields we handle specially + delete(config, "source") + delete(config, "version") + delete(config, "providers") + var source string if o := listVal.Filter("source"); len(o.Items) > 0 { err = hcl.DecodeObject(&source, o.Items[0].Val) @@ -378,9 +417,33 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { } } + var version string + if o := listVal.Filter("version"); len(o.Items) > 0 { + err = hcl.DecodeObject(&version, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version for %s: %s", + k, + err) + } + } + + var providers map[string]string + if o := listVal.Filter("providers"); len(o.Items) > 0 { + err = hcl.DecodeObject(&providers, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error parsing providers for %s: %s", + k, + err) + } + } + result = append(result, &Module{ Name: k, Source: source, + Version: version, + Providers: providers, RawConfig: rawConfig, }) } @@ -388,15 +451,68 @@ func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) { return result, nil } +// loadLocalsHcl recurses into the given HCL object turns it into +// a list of locals. +func loadLocalsHcl(list *ast.ObjectList) ([]*Local, error) { + + result := make([]*Local, 0, len(list.Items)) + + for _, block := range list.Items { + if len(block.Keys) > 0 { + return nil, fmt.Errorf( + "locals block at %s should not have label %q", + block.Pos(), block.Keys[0].Token.Value(), + ) + } + + blockObj, ok := block.Val.(*ast.ObjectType) + if !ok { + return nil, fmt.Errorf("locals value at %s should be a block", block.Val.Pos()) + } + + // blockObj now contains directly our local decls + for _, item := range blockObj.List.Items { + if len(item.Keys) != 1 { + return nil, fmt.Errorf("local declaration at %s may not be a block", item.Val.Pos()) + } + + // By the time we get here there can only be one item left, but + // we'll decode into a map anyway because it's a convenient way + // to extract both the key and the value robustly. + kv := map[string]interface{}{} + hcl.DecodeObject(&kv, item) + for k, v := range kv { + rawConfig, err := NewRawConfig(map[string]interface{}{ + "value": v, + }) + + if err != nil { + return nil, fmt.Errorf( + "error parsing local value %q at %s: %s", + k, item.Val.Pos(), err, + ) + } + + result = append(result, &Local{ + Name: k, + RawConfig: rawConfig, + }) + } + } + } + + return result, nil +} + // LoadOutputsHcl recurses into the given HCL object and turns // it into a mapping of outputs. func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { - list = list.Children() - if len(list.Items) == 0 { - return nil, fmt.Errorf( - "'output' must be followed by exactly one string: a name") + if err := assertAllBlocksHaveNames("output", list); err != nil { + return nil, err } + list = list.Children() + // Go through each object and turn it into an actual result. result := make([]*Output, 0, len(list.Items)) for _, item := range list.Items { @@ -416,6 +532,7 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { // Delete special keys delete(config, "depends_on") + delete(config, "description") rawConfig, err := NewRawConfig(config) if err != nil { @@ -437,10 +554,23 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { } } + // If we have a description field, then filter that + var description string + if o := listVal.Filter("description"); len(o.Items) > 0 { + err := hcl.DecodeObject(&description, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading description for output %q: %s", + n, + err) + } + } + result = append(result, &Output{ - Name: n, - RawConfig: rawConfig, - DependsOn: dependsOn, + Name: n, + RawConfig: rawConfig, + DependsOn: dependsOn, + Description: description, }) } @@ -450,12 +580,12 @@ func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) { // LoadVariablesHcl recurses into the given HCL object and turns // it into a list of variables. func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) { - list = list.Children() - if len(list.Items) == 0 { - return nil, fmt.Errorf( - "'variable' must be followed by exactly one strings: a name") + if err := assertAllBlocksHaveNames("variable", list); err != nil { + return nil, err } + list = list.Children() + // hclVariable is the structure each variable is decoded into type hclVariable struct { DeclaredType string `hcl:"type"` @@ -531,6 +661,10 @@ func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) { // LoadProvidersHcl recurses into the given HCL object and turns // it into a mapping of provider configs. func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { + if err := assertAllBlocksHaveNames("provider", list); err != nil { + return nil, err + } + list = list.Children() if len(list.Items) == 0 { return nil, nil @@ -554,6 +688,7 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { } delete(config, "alias") + delete(config, "version") rawConfig, err := NewRawConfig(config) if err != nil { @@ -575,9 +710,22 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { } } + // If we have a version field then extract it + var version string + if a := listVal.Filter("version"); len(a.Items) > 0 { + err := hcl.DecodeObject(&version, a.Items[0].Val) + if err != nil { + return nil, fmt.Errorf( + "Error reading version for provider[%s]: %s", + n, + err) + } + } + result = append(result, &ProviderConfig{ Name: n, Alias: alias, + Version: version, RawConfig: rawConfig, }) } @@ -592,6 +740,10 @@ func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) { // represents exactly one data definition in the HCL configuration. // We leave it up to another pass to merge them together. func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { + if err := assertAllBlocksHaveNames("data", list); err != nil { + return nil, err + } + list = list.Children() if len(list.Items) == 0 { return nil, nil @@ -901,6 +1053,10 @@ func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) { } func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) { + if err := assertAllBlocksHaveNames("provisioner", list); err != nil { + return nil, err + } + list = list.Children() if len(list.Items) == 0 { return nil, nil @@ -1023,6 +1179,29 @@ func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode { } */ +// assertAllBlocksHaveNames returns an error if any of the items in +// the given object list are blocks without keys (like "module {}") +// or simple assignments (like "module = 1"). It returns nil if +// neither of these things are true. +// +// The given name is used in any generated error messages, and should +// be the name of the block we're dealing with. The given list should +// be the result of calling .Filter on an object list with that same +// name. +func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error { + if elem := list.Elem(); len(elem.Items) != 0 { + switch et := elem.Items[0].Val.(type) { + case *ast.ObjectType: + pos := et.Lbrace + return fmt.Errorf("%s: %q must be followed by a name", pos, name) + default: + pos := elem.Items[0].Val.Pos() + return fmt.Errorf("%s: %q must be a configuration block", pos, name) + } + } + return nil +} + func checkHCLKeys(node ast.Node, valid []string) error { var list *ast.ObjectList switch n := node.(type) { diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go new file mode 100644 index 00000000..4f9f129f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/loader_hcl2.go @@ -0,0 +1,473 @@ +package config + +import ( + "fmt" + "sort" + "strings" + + gohcl2 "github.com/hashicorp/hcl2/gohcl" + hcl2 "github.com/hashicorp/hcl2/hcl" + hcl2parse "github.com/hashicorp/hcl2/hclparse" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/zclconf/go-cty/cty" +) + +// hcl2Configurable is an implementation of configurable that knows +// how to turn a HCL Body into a *Config object. +type hcl2Configurable struct { + SourceFilename string + Body hcl2.Body +} + +// hcl2Loader is a wrapper around a HCL parser that provides a fileLoaderFunc. +type hcl2Loader struct { + Parser *hcl2parse.Parser +} + +// For the moment we'll just have a global loader since we don't have anywhere +// better to stash this. +// TODO: refactor the loader API so that it uses some sort of object we can +// stash the parser inside. +var globalHCL2Loader = newHCL2Loader() + +// newHCL2Loader creates a new hcl2Loader containing a new HCL Parser. +// +// HCL parsers retain information about files that are loaded to aid in +// producing diagnostic messages, so all files within a single configuration +// should be loaded with the same parser to ensure the availability of +// full diagnostic information. +func newHCL2Loader() hcl2Loader { + return hcl2Loader{ + Parser: hcl2parse.NewParser(), + } +} + +// loadFile is a fileLoaderFunc that knows how to read a HCL2 file and turn it +// into a hcl2Configurable. +func (l hcl2Loader) loadFile(filename string) (configurable, []string, error) { + var f *hcl2.File + var diags hcl2.Diagnostics + if strings.HasSuffix(filename, ".json") { + f, diags = l.Parser.ParseJSONFile(filename) + } else { + f, diags = l.Parser.ParseHCLFile(filename) + } + if diags.HasErrors() { + // Return diagnostics as an error; callers may type-assert this to + // recover the original diagnostics, if it doesn't end up wrapped + // in another error. + return nil, nil, diags + } + + return &hcl2Configurable{ + SourceFilename: filename, + Body: f.Body, + }, nil, nil +} + +func (t *hcl2Configurable) Config() (*Config, error) { + config := &Config{} + + // these structs are used only for the initial shallow decoding; we'll + // expand this into the main, public-facing config structs afterwards. + type atlas struct { + Name string `hcl:"name"` + Include *[]string `hcl:"include"` + Exclude *[]string `hcl:"exclude"` + } + type provider struct { + Name string `hcl:"name,label"` + Alias *string `hcl:"alias,attr"` + Version *string `hcl:"version,attr"` + Config hcl2.Body `hcl:",remain"` + } + type module struct { + Name string `hcl:"name,label"` + Source string `hcl:"source,attr"` + Version *string `hcl:"version,attr"` + Providers *map[string]string `hcl:"providers,attr"` + Config hcl2.Body `hcl:",remain"` + } + type resourceLifecycle struct { + CreateBeforeDestroy *bool `hcl:"create_before_destroy,attr"` + PreventDestroy *bool `hcl:"prevent_destroy,attr"` + IgnoreChanges *[]string `hcl:"ignore_changes,attr"` + } + type connection struct { + Config hcl2.Body `hcl:",remain"` + } + type provisioner struct { + Type string `hcl:"type,label"` + + When *string `hcl:"when,attr"` + OnFailure *string `hcl:"on_failure,attr"` + + Connection *connection `hcl:"connection,block"` + Config hcl2.Body `hcl:",remain"` + } + type managedResource struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + + CountExpr hcl2.Expression `hcl:"count,attr"` + Provider *string `hcl:"provider,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + + Lifecycle *resourceLifecycle `hcl:"lifecycle,block"` + Provisioners []provisioner `hcl:"provisioner,block"` + Connection *connection `hcl:"connection,block"` + + Config hcl2.Body `hcl:",remain"` + } + type dataResource struct { + Type string `hcl:"type,label"` + Name string `hcl:"name,label"` + + CountExpr hcl2.Expression `hcl:"count,attr"` + Provider *string `hcl:"provider,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + + Config hcl2.Body `hcl:",remain"` + } + type variable struct { + Name string `hcl:"name,label"` + + DeclaredType *string `hcl:"type,attr"` + Default *cty.Value `hcl:"default,attr"` + Description *string `hcl:"description,attr"` + Sensitive *bool `hcl:"sensitive,attr"` + } + type output struct { + Name string `hcl:"name,label"` + + ValueExpr hcl2.Expression `hcl:"value,attr"` + DependsOn *[]string `hcl:"depends_on,attr"` + Description *string `hcl:"description,attr"` + Sensitive *bool `hcl:"sensitive,attr"` + } + type locals struct { + Definitions hcl2.Attributes `hcl:",remain"` + } + type backend struct { + Type string `hcl:"type,label"` + Config hcl2.Body `hcl:",remain"` + } + type terraform struct { + RequiredVersion *string `hcl:"required_version,attr"` + Backend *backend `hcl:"backend,block"` + } + type topLevel struct { + Atlas *atlas `hcl:"atlas,block"` + Datas []dataResource `hcl:"data,block"` + Modules []module `hcl:"module,block"` + Outputs []output `hcl:"output,block"` + Providers []provider `hcl:"provider,block"` + Resources []managedResource `hcl:"resource,block"` + Terraform *terraform `hcl:"terraform,block"` + Variables []variable `hcl:"variable,block"` + Locals []*locals `hcl:"locals,block"` + } + + var raw topLevel + diags := gohcl2.DecodeBody(t.Body, nil, &raw) + if diags.HasErrors() { + // Do some minimal decoding to see if we can at least get the + // required Terraform version, which might help explain why we + // couldn't parse the rest. + if raw.Terraform != nil && raw.Terraform.RequiredVersion != nil { + config.Terraform = &Terraform{ + RequiredVersion: *raw.Terraform.RequiredVersion, + } + } + + // We return the diags as an implementation of error, which the + // caller than then type-assert if desired to recover the individual + // diagnostics. + // FIXME: The current API gives us no way to return warnings in the + // absense of any errors. + return config, diags + } + + if raw.Terraform != nil { + var reqdVersion string + var backend *Backend + + if raw.Terraform.RequiredVersion != nil { + reqdVersion = *raw.Terraform.RequiredVersion + } + if raw.Terraform.Backend != nil { + backend = new(Backend) + backend.Type = raw.Terraform.Backend.Type + + // We don't permit interpolations or nested blocks inside the + // backend config, so we can decode the config early here and + // get direct access to the values, which is important for the + // config hashing to work as expected. + var config map[string]string + configDiags := gohcl2.DecodeBody(raw.Terraform.Backend.Config, nil, &config) + diags = append(diags, configDiags...) + + raw := make(map[string]interface{}, len(config)) + for k, v := range config { + raw[k] = v + } + + var err error + backend.RawConfig, err = NewRawConfig(raw) + if err != nil { + diags = append(diags, &hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid backend configuration", + Detail: fmt.Sprintf("Error in backend configuration: %s", err), + }) + } + } + + config.Terraform = &Terraform{ + RequiredVersion: reqdVersion, + Backend: backend, + } + } + + if raw.Atlas != nil { + var include, exclude []string + if raw.Atlas.Include != nil { + include = *raw.Atlas.Include + } + if raw.Atlas.Exclude != nil { + exclude = *raw.Atlas.Exclude + } + config.Atlas = &AtlasConfig{ + Name: raw.Atlas.Name, + Include: include, + Exclude: exclude, + } + } + + for _, rawM := range raw.Modules { + m := &Module{ + Name: rawM.Name, + Source: rawM.Source, + RawConfig: NewRawConfigHCL2(rawM.Config), + } + + if rawM.Version != nil { + m.Version = *rawM.Version + } + + if rawM.Providers != nil { + m.Providers = *rawM.Providers + } + + config.Modules = append(config.Modules, m) + } + + for _, rawV := range raw.Variables { + v := &Variable{ + Name: rawV.Name, + } + if rawV.DeclaredType != nil { + v.DeclaredType = *rawV.DeclaredType + } + if rawV.Default != nil { + v.Default = hcl2shim.ConfigValueFromHCL2(*rawV.Default) + } + if rawV.Description != nil { + v.Description = *rawV.Description + } + + config.Variables = append(config.Variables, v) + } + + for _, rawO := range raw.Outputs { + o := &Output{ + Name: rawO.Name, + } + + if rawO.Description != nil { + o.Description = *rawO.Description + } + if rawO.DependsOn != nil { + o.DependsOn = *rawO.DependsOn + } + if rawO.Sensitive != nil { + o.Sensitive = *rawO.Sensitive + } + + // The result is expected to be a map like map[string]interface{}{"value": something}, + // so we'll fake that with our hcl2shim.SingleAttrBody shim. + o.RawConfig = NewRawConfigHCL2(hcl2shim.SingleAttrBody{ + Name: "value", + Expr: rawO.ValueExpr, + }) + + config.Outputs = append(config.Outputs, o) + } + + for _, rawR := range raw.Resources { + r := &Resource{ + Mode: ManagedResourceMode, + Type: rawR.Type, + Name: rawR.Name, + } + if rawR.Lifecycle != nil { + var l ResourceLifecycle + if rawR.Lifecycle.CreateBeforeDestroy != nil { + l.CreateBeforeDestroy = *rawR.Lifecycle.CreateBeforeDestroy + } + if rawR.Lifecycle.PreventDestroy != nil { + l.PreventDestroy = *rawR.Lifecycle.PreventDestroy + } + if rawR.Lifecycle.IgnoreChanges != nil { + l.IgnoreChanges = *rawR.Lifecycle.IgnoreChanges + } + r.Lifecycle = l + } + if rawR.Provider != nil { + r.Provider = *rawR.Provider + } + if rawR.DependsOn != nil { + r.DependsOn = *rawR.DependsOn + } + + var defaultConnInfo *RawConfig + if rawR.Connection != nil { + defaultConnInfo = NewRawConfigHCL2(rawR.Connection.Config) + } + + for _, rawP := range rawR.Provisioners { + p := &Provisioner{ + Type: rawP.Type, + } + + switch { + case rawP.When == nil: + p.When = ProvisionerWhenCreate + case *rawP.When == "create": + p.When = ProvisionerWhenCreate + case *rawP.When == "destroy": + p.When = ProvisionerWhenDestroy + default: + p.When = ProvisionerWhenInvalid + } + + switch { + case rawP.OnFailure == nil: + p.OnFailure = ProvisionerOnFailureFail + case *rawP.When == "fail": + p.OnFailure = ProvisionerOnFailureFail + case *rawP.When == "continue": + p.OnFailure = ProvisionerOnFailureContinue + default: + p.OnFailure = ProvisionerOnFailureInvalid + } + + if rawP.Connection != nil { + p.ConnInfo = NewRawConfigHCL2(rawP.Connection.Config) + } else { + p.ConnInfo = defaultConnInfo + } + + p.RawConfig = NewRawConfigHCL2(rawP.Config) + + r.Provisioners = append(r.Provisioners, p) + } + + // The old loader records the count expression as a weird RawConfig with + // a single-element map inside. Since the rest of the world is assuming + // that, we'll mimic it here. + { + countBody := hcl2shim.SingleAttrBody{ + Name: "count", + Expr: rawR.CountExpr, + } + + r.RawCount = NewRawConfigHCL2(countBody) + r.RawCount.Key = "count" + } + + r.RawConfig = NewRawConfigHCL2(rawR.Config) + + config.Resources = append(config.Resources, r) + + } + + for _, rawR := range raw.Datas { + r := &Resource{ + Mode: DataResourceMode, + Type: rawR.Type, + Name: rawR.Name, + } + + if rawR.Provider != nil { + r.Provider = *rawR.Provider + } + if rawR.DependsOn != nil { + r.DependsOn = *rawR.DependsOn + } + + // The old loader records the count expression as a weird RawConfig with + // a single-element map inside. Since the rest of the world is assuming + // that, we'll mimic it here. + { + countBody := hcl2shim.SingleAttrBody{ + Name: "count", + Expr: rawR.CountExpr, + } + + r.RawCount = NewRawConfigHCL2(countBody) + r.RawCount.Key = "count" + } + + r.RawConfig = NewRawConfigHCL2(rawR.Config) + + config.Resources = append(config.Resources, r) + } + + for _, rawP := range raw.Providers { + p := &ProviderConfig{ + Name: rawP.Name, + } + + if rawP.Alias != nil { + p.Alias = *rawP.Alias + } + if rawP.Version != nil { + p.Version = *rawP.Version + } + + // The result is expected to be a map like map[string]interface{}{"value": something}, + // so we'll fake that with our hcl2shim.SingleAttrBody shim. + p.RawConfig = NewRawConfigHCL2(rawP.Config) + + config.ProviderConfigs = append(config.ProviderConfigs, p) + } + + for _, rawL := range raw.Locals { + names := make([]string, 0, len(rawL.Definitions)) + for n := range rawL.Definitions { + names = append(names, n) + } + sort.Strings(names) + for _, n := range names { + attr := rawL.Definitions[n] + l := &Local{ + Name: n, + RawConfig: NewRawConfigHCL2(hcl2shim.SingleAttrBody{ + Name: "value", + Expr: attr.Expr, + }), + } + config.Locals = append(config.Locals, l) + } + } + + // FIXME: The current API gives us no way to return warnings in the + // absense of any errors. + var err error + if diags.HasErrors() { + err = diags + } + + return config, err +} diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go index db214be4..55fc864f 100644 --- a/vendor/github.com/hashicorp/terraform/config/merge.go +++ b/vendor/github.com/hashicorp/terraform/config/merge.go @@ -137,6 +137,17 @@ func Merge(c1, c2 *Config) (*Config, error) { } } + // Local Values + // These are simpler than the other config elements because they are just + // flat values and so no deep merging is required. + if localsCount := len(c1.Locals) + len(c2.Locals); localsCount != 0 { + // Explicit length check above because we want c.Locals to remain + // nil if the result would be empty. + c.Locals = make([]*Local, 0, len(c1.Locals)+len(c2.Locals)) + c.Locals = append(c.Locals, c1.Locals...) + c.Locals = append(c.Locals, c2.Locals...) + } + return c, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go index 96b4a63c..5073d0d2 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/get.go +++ b/vendor/github.com/hashicorp/terraform/config/module/get.go @@ -3,6 +3,7 @@ package module import ( "io/ioutil" "os" + "path/filepath" "github.com/hashicorp/go-getter" ) @@ -37,13 +38,10 @@ func GetCopy(dst, src string) error { if err != nil { return err } - // FIXME: This isn't completely safe. Creating and removing our temp path - // exposes where to race to inject files. - if err := os.RemoveAll(tmpDir); err != nil { - return err - } defer os.RemoveAll(tmpDir) + tmpDir = filepath.Join(tmpDir, "module") + // Get to that temporary dir if err := getter.Get(tmpDir, src); err != nil { return err @@ -57,15 +55,3 @@ func GetCopy(dst, src string) error { // Copy to the final location return copyDir(dst, tmpDir) } - -func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) { - // Get the module with the level specified if we were told to. - if mode > GetModeNone { - if err := s.Get(key, src, mode == GetModeUpdate); err != nil { - return "", false, err - } - } - - // Get the directory where the module is. - return s.Dir(key) -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go index 8603ee26..da520abc 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/inode.go +++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go @@ -1,4 +1,4 @@ -// +build linux darwin openbsd netbsd solaris +// +build linux darwin openbsd netbsd solaris dragonfly package module diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go index f8649f6e..7dc8fccd 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/module.go +++ b/vendor/github.com/hashicorp/terraform/config/module/module.go @@ -2,6 +2,8 @@ package module // Module represents the metadata for a single module. type Module struct { - Name string - Source string + Name string + Source string + Version string + Providers map[string]string } diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go new file mode 100644 index 00000000..7734cbc0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go @@ -0,0 +1,346 @@ +package module + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + + getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/mitchellh/cli" +) + +const manifestName = "modules.json" + +// moduleManifest is the serialization structure used to record the stored +// module's metadata. +type moduleManifest struct { + Modules []moduleRecord +} + +// moduleRecords represents the stored module's metadata. +// This is compared for equality using '==', so all fields needs to remain +// comparable. +type moduleRecord struct { + // Source is the module source string from the config, minus any + // subdirectory. + Source string + + // Key is the locally unique identifier for this module. + Key string + + // Version is the exact version string for the stored module. + Version string + + // Dir is the directory name returned by the FileStorage. This is what + // allows us to correlate a particular module version with the location on + // disk. + Dir string + + // Root is the root directory containing the module. If the module is + // unpacked from an archive, and not located in the root directory, this is + // used to direct the loader to the correct subdirectory. This is + // independent from any subdirectory in the original source string, which + // may traverse further into the module tree. + Root string + + // url is the location of the module source + url string + + // Registry is true if this module is sourced from a registry + registry bool +} + +// Storage implements methods to manage the storage of modules. +// This is used by Tree.Load to query registries, authenticate requests, and +// store modules locally. +type Storage struct { + // StorageDir is the full path to the directory where all modules will be + // stored. + StorageDir string + + // Ui is an optional cli.Ui for user output + Ui cli.Ui + + // Mode is the GetMode that will be used for various operations. + Mode GetMode + + registry *registry.Client +} + +// NewStorage returns a new initialized Storage object. +func NewStorage(dir string, services *disco.Disco) *Storage { + regClient := registry.NewClient(services, nil) + + return &Storage{ + StorageDir: dir, + registry: regClient, + } +} + +// loadManifest returns the moduleManifest file from the parent directory. +func (s Storage) loadManifest() (moduleManifest, error) { + manifest := moduleManifest{} + + manifestPath := filepath.Join(s.StorageDir, manifestName) + data, err := ioutil.ReadFile(manifestPath) + if err != nil && !os.IsNotExist(err) { + return manifest, err + } + + if len(data) == 0 { + return manifest, nil + } + + if err := json.Unmarshal(data, &manifest); err != nil { + return manifest, err + } + return manifest, nil +} + +// Store the location of the module, along with the version used and the module +// root directory. The storage method loads the entire file and rewrites it +// each time. This is only done a few times during init, so efficiency is +// not a concern. +func (s Storage) recordModule(rec moduleRecord) error { + manifest, err := s.loadManifest() + if err != nil { + // if there was a problem with the file, we will attempt to write a new + // one. Any non-data related error should surface there. + log.Printf("[WARN] error reading module manifest: %s", err) + } + + // do nothing if we already have the exact module + for i, stored := range manifest.Modules { + if rec == stored { + return nil + } + + // they are not equal, but if the storage path is the same we need to + // remove this rec to be replaced. + if rec.Dir == stored.Dir { + manifest.Modules[i] = manifest.Modules[len(manifest.Modules)-1] + manifest.Modules = manifest.Modules[:len(manifest.Modules)-1] + break + } + } + + manifest.Modules = append(manifest.Modules, rec) + + js, err := json.Marshal(manifest) + if err != nil { + panic(err) + } + + manifestPath := filepath.Join(s.StorageDir, manifestName) + return ioutil.WriteFile(manifestPath, js, 0644) +} + +// load the manifest from dir, and return all module versions matching the +// provided source. Records with no version info will be skipped, as they need +// to be uniquely identified by other means. +func (s Storage) moduleVersions(source string) ([]moduleRecord, error) { + manifest, err := s.loadManifest() + if err != nil { + return manifest.Modules, err + } + + var matching []moduleRecord + + for _, m := range manifest.Modules { + if m.Source == source && m.Version != "" { + log.Printf("[DEBUG] found local version %q for module %s", m.Version, m.Source) + matching = append(matching, m) + } + } + + return matching, nil +} + +func (s Storage) moduleDir(key string) (string, error) { + manifest, err := s.loadManifest() + if err != nil { + return "", err + } + + for _, m := range manifest.Modules { + if m.Key == key { + return m.Dir, nil + } + } + + return "", nil +} + +// return only the root directory of the module stored in dir. +func (s Storage) getModuleRoot(dir string) (string, error) { + manifest, err := s.loadManifest() + if err != nil { + return "", err + } + + for _, mod := range manifest.Modules { + if mod.Dir == dir { + return mod.Root, nil + } + } + return "", nil +} + +// record only the Root directory for the module stored at dir. +func (s Storage) recordModuleRoot(dir, root string) error { + rec := moduleRecord{ + Dir: dir, + Root: root, + } + + return s.recordModule(rec) +} + +func (s Storage) output(msg string) { + if s.Ui == nil || s.Mode == GetModeNone { + return + } + s.Ui.Output(msg) +} + +func (s Storage) getStorage(key string, src string) (string, bool, error) { + storage := &getter.FolderStorage{ + StorageDir: s.StorageDir, + } + + log.Printf("[DEBUG] fetching module from %s", src) + + // Get the module with the level specified if we were told to. + if s.Mode > GetModeNone { + log.Printf("[DEBUG] fetching %q with key %q", src, key) + if err := storage.Get(key, src, s.Mode == GetModeUpdate); err != nil { + return "", false, err + } + } + + // Get the directory where the module is. + dir, found, err := storage.Dir(key) + log.Printf("[DEBUG] found %q in %q: %t", src, dir, found) + return dir, found, err +} + +// find a stored module that's not from a registry +func (s Storage) findModule(key string) (string, error) { + if s.Mode == GetModeUpdate { + return "", nil + } + + return s.moduleDir(key) +} + +// GetModule fetches a module source into the specified directory. This is used +// as a convenience function by the CLI to initialize a configuration. +func (s Storage) GetModule(dst, src string) error { + // reset this in case the caller was going to re-use it + mode := s.Mode + s.Mode = GetModeUpdate + defer func() { + s.Mode = mode + }() + + rec, err := s.findRegistryModule(src, anyVersion) + if err != nil { + return err + } + + pwd, err := os.Getwd() + if err != nil { + return err + } + + source := rec.url + if source == "" { + source, err = getter.Detect(src, pwd, getter.Detectors) + if err != nil { + return fmt.Errorf("module %s: %s", src, err) + } + } + + if source == "" { + return fmt.Errorf("module %q not found", src) + } + + return GetCopy(dst, source) +} + +// find a registry module +func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, error) { + rec := moduleRecord{ + Source: mSource, + } + // detect if we have a registry source + mod, err := regsrc.ParseModuleSource(mSource) + switch err { + case nil: + //ok + case regsrc.ErrInvalidModuleSource: + return rec, nil + default: + return rec, err + } + rec.registry = true + + log.Printf("[TRACE] %q is a registry module", mod.Display()) + + versions, err := s.moduleVersions(mod.String()) + if err != nil { + log.Printf("[ERROR] error looking up versions for %q: %s", mod.Display(), err) + return rec, err + } + + match, err := newestRecord(versions, constraint) + if err != nil { + log.Printf("[INFO] no matching version for %q<%s>, %s", mod.Display(), constraint, err) + } + log.Printf("[DEBUG] matched %q version %s for %s", mod, match.Version, constraint) + + rec.Dir = match.Dir + rec.Version = match.Version + found := rec.Dir != "" + + // we need to lookup available versions + // Only on Get if it's not found, on unconditionally on Update + if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) { + resp, err := s.registry.ModuleVersions(mod) + if err != nil { + return rec, err + } + + if len(resp.Modules) == 0 { + return rec, fmt.Errorf("module %q not found in registry", mod.Display()) + } + + match, err := newestVersion(resp.Modules[0].Versions, constraint) + if err != nil { + return rec, err + } + + if match == nil { + return rec, fmt.Errorf("no versions for %q found matching %q", mod.Display(), constraint) + } + + rec.Version = match.Version + + rec.url, err = s.registry.ModuleLocation(mod, rec.Version) + if err != nil { + return rec, err + } + + // we've already validated this by now + host, _ := mod.SvcHost() + s.output(fmt.Sprintf(" Found version %s of %s on %s", rec.Version, mod.Module(), host.ForDisplay())) + + } + return rec, nil +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go index fc9e7331..6f1ff050 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/testing.go +++ b/vendor/github.com/hashicorp/terraform/config/module/testing.go @@ -4,8 +4,6 @@ import ( "io/ioutil" "os" "testing" - - "github.com/hashicorp/go-getter" ) // TestTree loads a module at the given path and returns the tree as well @@ -26,8 +24,8 @@ func TestTree(t *testing.T, path string) (*Tree, func()) { } // Get the child modules - s := &getter.FolderStorage{StorageDir: dir} - if err := mod.Load(s, GetModeGet); err != nil { + s := &Storage{StorageDir: dir, Mode: GetModeGet} + if err := mod.Load(s); err != nil { t.Fatalf("err: %s", err) return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go index b6f90fd9..f56d69b7 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/tree.go +++ b/vendor/github.com/hashicorp/terraform/config/module/tree.go @@ -4,11 +4,14 @@ import ( "bufio" "bytes" "fmt" + "log" "path/filepath" "strings" "sync" - "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/tfdiags" + + getter "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" ) @@ -26,6 +29,17 @@ type Tree struct { children map[string]*Tree path []string lock sync.RWMutex + + // version is the final version of the config loaded for the Tree's module + version string + // source is the "source" string used to load this module. It's possible + // for a module source to change, but the path remains the same, preventing + // it from being reloaded. + source string + // parent allows us to walk back up the tree and determine if there are any + // versioned ancestor modules which may effect the stored location of + // submodules + parent *Tree } // NewTree returns a new Tree for the given config structure. @@ -40,7 +54,7 @@ func NewEmptyTree() *Tree { // We do this dummy load so that the tree is marked as "loaded". It // should never fail because this is just about a no-op. If it does fail // we panic so we can know its a bug. - if err := t.Load(nil, GetModeGet); err != nil { + if err := t.Load(&Storage{Mode: GetModeGet}); err != nil { panic(err) } @@ -92,6 +106,25 @@ func (t *Tree) Children() map[string]*Tree { return t.children } +// DeepEach calls the provided callback for the receiver and then all of +// its descendents in the tree, allowing an operation to be performed on +// all modules in the tree. +// +// Parents will be visited before their children but otherwise the order is +// not defined. +func (t *Tree) DeepEach(cb func(*Tree)) { + t.lock.RLock() + defer t.lock.RUnlock() + t.deepEach(cb) +} + +func (t *Tree) deepEach(cb func(*Tree)) { + cb(t) + for _, c := range t.children { + c.deepEach(cb) + } +} + // Loaded says whether or not this tree has been loaded or not yet. func (t *Tree) Loaded() bool { t.lock.RLock() @@ -107,8 +140,10 @@ func (t *Tree) Modules() []*Module { result := make([]*Module, len(t.config.Modules)) for i, m := range t.config.Modules { result[i] = &Module{ - Name: m.Name, - Source: m.Source, + Name: m.Name, + Version: m.Version, + Source: m.Source, + Providers: m.Providers, } } @@ -136,81 +171,178 @@ func (t *Tree) Name() string { // module trees inherently require the configuration to be in a reasonably // sane state: no circular dependencies, proper module sources, etc. A full // suite of validations can be done by running Validate (after loading). -func (t *Tree) Load(s getter.Storage, mode GetMode) error { +func (t *Tree) Load(s *Storage) error { t.lock.Lock() defer t.lock.Unlock() - // Reset the children if we have any - t.children = nil + children, err := t.getChildren(s) + if err != nil { + return err + } + + // Go through all the children and load them. + for _, c := range children { + if err := c.Load(s); err != nil { + return err + } + } - modules := t.Modules() + // Set our tree up + t.children = children + + return nil +} + +func (t *Tree) getChildren(s *Storage) (map[string]*Tree, error) { children := make(map[string]*Tree) // Go through all the modules and get the directory for them. - for _, m := range modules { + for _, m := range t.Modules() { if _, ok := children[m.Name]; ok { - return fmt.Errorf( + return nil, fmt.Errorf( "module %s: duplicated. module names must be unique", m.Name) } // Determine the path to this child - path := make([]string, len(t.path), len(t.path)+1) - copy(path, t.path) - path = append(path, m.Name) + modPath := make([]string, len(t.path), len(t.path)+1) + copy(modPath, t.path) + modPath = append(modPath, m.Name) + + log.Printf("[TRACE] module source: %q", m.Source) - // Split out the subdir if we have one - source, subDir := getter.SourceDirSubdir(m.Source) + // add the module path to help indicate where modules with relative + // paths are being loaded from + s.output(fmt.Sprintf("- module.%s", strings.Join(modPath, "."))) - source, err := getter.Detect(source, t.config.Dir, getter.Detectors) + // Lookup the local location of the module. + // dir is the local directory where the module is stored + mod, err := s.findRegistryModule(m.Source, m.Version) if err != nil { - return fmt.Errorf("module %s: %s", m.Name, err) + return nil, err + } + + // The key is the string that will be used to uniquely id the Source in + // the local storage. The prefix digit can be incremented to + // invalidate the local module storage. + key := "1." + t.versionedPathKey(m) + if mod.Version != "" { + key += "." + mod.Version + } + + // Check for the exact key if it's not a registry module + if !mod.registry { + mod.Dir, err = s.findModule(key) + if err != nil { + return nil, err + } + } + + if mod.Dir != "" && s.Mode != GetModeUpdate { + // We found it locally, but in order to load the Tree we need to + // find out if there was another subDir stored from detection. + subDir, err := s.getModuleRoot(mod.Dir) + if err != nil { + // If there's a problem with the subdir record, we'll let the + // recordSubdir method fix it up. Any other filesystem errors + // will turn up again below. + log.Println("[WARN] error reading subdir record:", err) + } + + fullDir := filepath.Join(mod.Dir, subDir) + + child, err := NewTreeModule(m.Name, fullDir) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } + child.path = modPath + child.parent = t + child.version = mod.Version + child.source = m.Source + children[m.Name] = child + continue + } + + // Split out the subdir if we have one. + // Terraform keeps the entire requested tree, so that modules can + // reference sibling modules from the same archive or repo. + rawSource, subDir := getter.SourceDirSubdir(m.Source) + + // we haven't found a source, so fallback to the go-getter detectors + source := mod.url + if source == "" { + source, err = getter.Detect(rawSource, t.config.Dir, getter.Detectors) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } } + log.Printf("[TRACE] detected module source %q", source) + // Check if the detector introduced something new. - source, subDir2 := getter.SourceDirSubdir(source) - if subDir2 != "" { - subDir = filepath.Join(subDir2, subDir) + // For example, the registry always adds a subdir of `//*`, + // indicating that we need to strip off the first component from the + // tar archive, though we may not yet know what it is called. + source, detectedSubDir := getter.SourceDirSubdir(source) + if detectedSubDir != "" { + subDir = filepath.Join(detectedSubDir, subDir) } - // Get the directory where this module is so we can load it - key := strings.Join(path, ".") - key = fmt.Sprintf("root.%s-%s", key, m.Source) - dir, ok, err := getStorage(s, key, source, mode) + output := "" + switch s.Mode { + case GetModeUpdate: + output = fmt.Sprintf(" Updating source %q", m.Source) + default: + output = fmt.Sprintf(" Getting source %q", m.Source) + } + s.output(output) + + dir, ok, err := s.getStorage(key, source) if err != nil { - return err + return nil, err } if !ok { - return fmt.Errorf( - "module %s: not found, may need to be downloaded using 'terraform get'", m.Name) + return nil, fmt.Errorf("module %s: not found, may need to run 'terraform init'", m.Name) } - // If we have a subdirectory, then merge that in + log.Printf("[TRACE] %q stored in %q", source, dir) + + // expand and record the subDir for later + fullDir := dir if subDir != "" { - dir = filepath.Join(dir, subDir) - } + fullDir, err = getter.SubdirGlob(dir, subDir) + if err != nil { + return nil, err + } - // Load the configurations.Dir(source) - children[m.Name], err = NewTreeModule(m.Name, dir) - if err != nil { - return fmt.Errorf( - "module %s: %s", m.Name, err) + // +1 to account for the pathsep + if len(dir)+1 > len(fullDir) { + return nil, fmt.Errorf("invalid module storage path %q", fullDir) + } + subDir = fullDir[len(dir)+1:] } - // Set the path of this child - children[m.Name].path = path - } + // add new info to the module record + mod.Key = key + mod.Dir = dir + mod.Root = subDir - // Go through all the children and load them. - for _, c := range children { - if err := c.Load(s, mode); err != nil { - return err + // record the module in our manifest + if err := s.recordModule(mod); err != nil { + return nil, err } - } - // Set our tree up - t.children = children + child, err := NewTreeModule(m.Name, fullDir) + if err != nil { + return nil, fmt.Errorf("module %s: %s", m.Name, err) + } + child.path = modPath + child.parent = t + child.version = mod.Version + child.source = m.Source + children[m.Name] = child + } - return nil + return children, nil } // Path is the full path to this tree. @@ -253,32 +385,35 @@ func (t *Tree) String() string { // as verifying things such as parameters/outputs between the various modules. // // Load must be called prior to calling Validate or an error will be returned. -func (t *Tree) Validate() error { +func (t *Tree) Validate() tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if !t.Loaded() { - return fmt.Errorf("tree must be loaded before calling Validate") + diags = diags.Append(fmt.Errorf( + "tree must be loaded before calling Validate", + )) + return diags } - // If something goes wrong, here is our error template - newErr := &treeError{Name: []string{t.Name()}} - // Terraform core does not handle root module children named "root". // We plan to fix this in the future but this bug was brought up in // the middle of a release and we don't want to introduce wide-sweeping // changes at that time. if len(t.path) == 1 && t.name == "root" { - return fmt.Errorf("root module cannot contain module named 'root'") + diags = diags.Append(fmt.Errorf( + "root module cannot contain module named 'root'", + )) + return diags } // Validate our configuration first. - if err := t.config.Validate(); err != nil { - newErr.Add(err) - } + diags = diags.Append(t.config.Validate()) // If we're the root, we do extra validation. This validation usually // requires the entire tree (since children don't have parent pointers). if len(t.path) == 0 { if err := t.validateProviderAlias(); err != nil { - newErr.Add(err) + diags = diags.Append(err) } } @@ -287,20 +422,11 @@ func (t *Tree) Validate() error { // Validate all our children for _, c := range children { - err := c.Validate() - if err == nil { + childDiags := c.Validate() + diags = diags.Append(childDiags) + if diags.HasErrors() { continue } - - verr, ok := err.(*treeError) - if !ok { - // Unknown error, just return... - return err - } - - // Append ourselves to the error and then return - verr.Name = append(verr.Name, t.Name()) - newErr.AddChild(verr) } // Go over all the modules and verify that any parameters are valid @@ -326,9 +452,10 @@ func (t *Tree) Validate() error { // Compare to the keys in our raw config for the module for k, _ := range m.RawConfig.Raw { if _, ok := varMap[k]; !ok { - newErr.Add(fmt.Errorf( - "module %s: %s is not a valid parameter", - m.Name, k)) + diags = diags.Append(fmt.Errorf( + "module %q: %q is not a valid argument", + m.Name, k, + )) } // Remove the required @@ -337,9 +464,10 @@ func (t *Tree) Validate() error { // If we have any required left over, they aren't set. for k, _ := range requiredMap { - newErr.Add(fmt.Errorf( - "module %s: required variable %q not set", - m.Name, k)) + diags = diags.Append(fmt.Errorf( + "module %q: missing required argument %q", + m.Name, k, + )) } } @@ -354,9 +482,10 @@ func (t *Tree) Validate() error { tree, ok := children[mv.Name] if !ok { - newErr.Add(fmt.Errorf( - "%s: undefined module referenced %s", - source, mv.Name)) + diags = diags.Append(fmt.Errorf( + "%s: reference to undefined module %q", + source, mv.Name, + )) continue } @@ -368,14 +497,56 @@ func (t *Tree) Validate() error { } } if !found { - newErr.Add(fmt.Errorf( - "%s: %s is not a valid output for module %s", - source, mv.Field, mv.Name)) + diags = diags.Append(fmt.Errorf( + "%s: %q is not a valid output for module %q", + source, mv.Field, mv.Name, + )) } } } - return newErr.ErrOrNil() + return diags +} + +// versionedPathKey returns a path string with every levels full name, version +// and source encoded. This is to provide a unique key for our module storage, +// since submodules need to know which versions of their ancestor modules they +// are loaded from. +// For example, if module A has a subdirectory B, if module A's source or +// version is updated B's storage key must reflect this change in order for the +// correct version of B's source to be loaded. +func (t *Tree) versionedPathKey(m *Module) string { + path := make([]string, len(t.path)+1) + path[len(path)-1] = m.Name + ";" + m.Source + // We're going to load these in order for easier reading and debugging, but + // in practice they only need to be unique and consistent. + + p := t + i := len(path) - 2 + for ; i >= 0; i-- { + if p == nil { + break + } + // we may have been loaded under a blank Tree, so always check for a name + // too. + if p.name == "" { + break + } + seg := p.name + if p.version != "" { + seg += "#" + p.version + } + + if p.source != "" { + seg += ";" + p.source + } + + path[i] = seg + p = p.parent + } + + key := strings.Join(path, "|") + return key } // treeError is an error use by Tree.Validate to accumulates all diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go index 090d4f7e..f203556c 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go +++ b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go @@ -67,7 +67,7 @@ func (t *Tree) validateProviderAlias() error { // We didn't find the alias, error! err = multierror.Append(err, fmt.Errorf( - "module %s: provider alias must be defined by the module or a parent: %s", + "module %s: provider alias must be defined by the module: %s", strings.Join(pv.Path, "."), k)) } } diff --git a/vendor/github.com/hashicorp/terraform/config/module/versions.go b/vendor/github.com/hashicorp/terraform/config/module/versions.go new file mode 100644 index 00000000..29701b93 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/module/versions.go @@ -0,0 +1,129 @@ +package module + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/registry/response" +) + +const anyVersion = ">=0.0.0" + +var explicitEqualityConstraint = regexp.MustCompile("^=[0-9]") + +// return the newest version that satisfies the provided constraint +func newest(versions []string, constraint string) (string, error) { + if constraint == "" { + constraint = anyVersion + } + cs, err := version.NewConstraint(constraint) + if err != nil { + return "", err + } + + // Find any build metadata in the constraints, and + // store whether the constraint is an explicit equality that + // contains a build metadata requirement, so we can return a specific, + // if requested, build metadata version + var constraintMetas []string + var equalsConstraint bool + for i := range cs { + constraintMeta := strings.SplitAfterN(cs[i].String(), "+", 2) + if len(constraintMeta) > 1 { + constraintMetas = append(constraintMetas, constraintMeta[1]) + } + } + + if len(cs) == 1 { + equalsConstraint = explicitEqualityConstraint.MatchString(cs.String()) + } + + // If the version string includes metadata, this is valid in go-version, + // However, it's confusing as to what expected behavior should be, + // so give an error so the user can do something more logical + if (len(cs) > 1 || !equalsConstraint) && len(constraintMetas) > 0 { + return "", fmt.Errorf("Constraints including build metadata must have explicit equality, or are otherwise too ambiguous: %s", cs.String()) + } + + switch len(versions) { + case 0: + return "", errors.New("no versions found") + case 1: + v, err := version.NewVersion(versions[0]) + if err != nil { + return "", err + } + + if !cs.Check(v) { + return "", fmt.Errorf("no version found matching constraint %q", constraint) + } + return versions[0], nil + } + + sort.Slice(versions, func(i, j int) bool { + // versions should have already been validated + // sort invalid version strings to the end + iv, err := version.NewVersion(versions[i]) + if err != nil { + return true + } + jv, err := version.NewVersion(versions[j]) + if err != nil { + return true + } + return iv.GreaterThan(jv) + }) + + // versions are now in order, so just find the first which satisfies the + // constraint + for i := range versions { + v, err := version.NewVersion(versions[i]) + if err != nil { + continue + } + if cs.Check(v) { + // Constraint has metadata and is explicit equality + if equalsConstraint && len(constraintMetas) > 0 { + if constraintMetas[0] != v.Metadata() { + continue + } + } + return versions[i], nil + } + } + + return "", nil +} + +// return the newest *moduleVersion that matches the given constraint +// TODO: reconcile these two types and newest* functions +func newestVersion(moduleVersions []*response.ModuleVersion, constraint string) (*response.ModuleVersion, error) { + var versions []string + modules := make(map[string]*response.ModuleVersion) + + for _, m := range moduleVersions { + versions = append(versions, m.Version) + modules[m.Version] = m + } + + match, err := newest(versions, constraint) + return modules[match], err +} + +// return the newest moduleRecord that matches the given constraint +func newestRecord(moduleVersions []moduleRecord, constraint string) (moduleRecord, error) { + var versions []string + modules := make(map[string]moduleRecord) + + for _, m := range moduleVersions { + versions = append(versions, m.Version) + modules[m.Version] = m + } + + match, err := newest(versions, constraint) + return modules[match], err +} diff --git a/vendor/github.com/hashicorp/terraform/config/providers.go b/vendor/github.com/hashicorp/terraform/config/providers.go new file mode 100644 index 00000000..7a50782f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/config/providers.go @@ -0,0 +1,103 @@ +package config + +import "github.com/blang/semver" + +// ProviderVersionConstraint presents a constraint for a particular +// provider, identified by its full name. +type ProviderVersionConstraint struct { + Constraint string + ProviderType string +} + +// ProviderVersionConstraints is a map from provider full name to its associated +// ProviderVersionConstraint, as produced by Config.RequiredProviders. +type ProviderVersionConstraints map[string]ProviderVersionConstraint + +// RequiredProviders returns the ProviderVersionConstraints for this +// module. +// +// This includes both providers that are explicitly requested by provider +// blocks and those that are used implicitly by instantiating one of their +// resource types. In the latter case, the returned semver Range will +// accept any version of the provider. +func (c *Config) RequiredProviders() ProviderVersionConstraints { + ret := make(ProviderVersionConstraints, len(c.ProviderConfigs)) + + configs := c.ProviderConfigsByFullName() + + // In order to find the *implied* dependencies (those without explicit + // "provider" blocks) we need to walk over all of the resources and + // cross-reference with the provider configs. + for _, rc := range c.Resources { + providerName := rc.ProviderFullName() + var providerType string + + // Default to (effectively) no constraint whatsoever, but we might + // override if there's an explicit constraint in config. + constraint := ">=0.0.0" + + config, ok := configs[providerName] + if ok { + if config.Version != "" { + constraint = config.Version + } + providerType = config.Name + } else { + providerType = providerName + } + + ret[providerName] = ProviderVersionConstraint{ + ProviderType: providerType, + Constraint: constraint, + } + } + + return ret +} + +// RequiredRanges returns a semver.Range for each distinct provider type in +// the constraint map. If the same provider type appears more than once +// (e.g. because aliases are in use) then their respective constraints are +// combined such that they must *all* apply. +// +// The result of this method can be passed to the +// PluginMetaSet.ConstrainVersions method within the plugin/discovery +// package in order to filter down the available plugins to those which +// satisfy the given constraints. +// +// This function will panic if any of the constraints within cannot be +// parsed as semver ranges. This is guaranteed to never happen for a +// constraint set that was built from a configuration that passed validation. +func (cons ProviderVersionConstraints) RequiredRanges() map[string]semver.Range { + ret := make(map[string]semver.Range, len(cons)) + + for _, con := range cons { + spec := semver.MustParseRange(con.Constraint) + if existing, exists := ret[con.ProviderType]; exists { + ret[con.ProviderType] = existing.AND(spec) + } else { + ret[con.ProviderType] = spec + } + } + + return ret +} + +// ProviderConfigsByFullName returns a map from provider full names (as +// returned by ProviderConfig.FullName()) to the corresponding provider +// configs. +// +// This function returns no new information than what's already in +// c.ProviderConfigs, but returns it in a more convenient shape. If there +// is more than one provider config with the same full name then the result +// is undefined, but that is guaranteed not to happen for any config that +// has passed validation. +func (c *Config) ProviderConfigsByFullName() map[string]*ProviderConfig { + ret := make(map[string]*ProviderConfig, len(c.ProviderConfigs)) + + for _, pc := range c.ProviderConfigs { + ret[pc.FullName()] = pc + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go index f8498d85..1854a8b2 100644 --- a/vendor/github.com/hashicorp/terraform/config/raw_config.go +++ b/vendor/github.com/hashicorp/terraform/config/raw_config.go @@ -3,8 +3,14 @@ package config import ( "bytes" "encoding/gob" + "errors" + "strconv" "sync" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" "github.com/mitchellh/copystructure" @@ -27,8 +33,24 @@ const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" // RawConfig supports a query-like interface to request // information from deep within the structure. type RawConfig struct { - Key string - Raw map[string]interface{} + Key string + + // Only _one_ of Raw and Body may be populated at a time. + // + // In the normal case, Raw is populated and Body is nil. + // + // When the experimental HCL2 parsing mode is enabled, "Body" + // is populated and RawConfig serves only to transport the hcl2.Body + // through the rest of Terraform core so we can ultimately decode it + // once its schema is known. + // + // Once we transition to HCL2 as the primary representation, RawConfig + // should be removed altogether and the hcl2.Body should be passed + // around directly. + + Raw map[string]interface{} + Body hcl2.Body + Interpolations []ast.Node Variables map[string]InterpolatedVariable @@ -48,6 +70,26 @@ func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) { return result, nil } +// NewRawConfigHCL2 creates a new RawConfig that is serving as a capsule +// to transport a hcl2.Body. In this mode, the publicly-readable struct +// fields are not populated since all operations should instead be diverted +// to the HCL2 body. +// +// For a RawConfig object constructed with this function, the only valid use +// is to later retrieve the Body value and call its own methods. Callers +// may choose to set and then later handle the Key field, in a manner +// consistent with how it is handled by the Value method, but the Value +// method itself must not be used. +// +// This is an experimental codepath to be used only by the HCL2 config loader. +// Non-experimental parsing should _always_ use NewRawConfig to produce a +// fully-functional RawConfig object. +func NewRawConfigHCL2(body hcl2.Body) *RawConfig { + return &RawConfig{ + Body: body, + } +} + // RawMap returns a copy of the RawConfig.Raw map. func (r *RawConfig) RawMap() map[string]interface{} { r.lock.Lock() @@ -69,6 +111,10 @@ func (r *RawConfig) Copy() *RawConfig { r.lock.Lock() defer r.lock.Unlock() + if r.Body != nil { + return NewRawConfigHCL2(r.Body) + } + newRaw := make(map[string]interface{}) for k, v := range r.Raw { newRaw[k] = v @@ -223,6 +269,13 @@ func (r *RawConfig) init() error { } func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error { + if r.Body != nil { + // For RawConfigs created for the HCL2 experiement, callers must + // use the HCL2 Body API directly rather than interpolating via + // the RawConfig. + return errors.New("this feature is not yet supported under the HCL2 experiment") + } + config, err := copystructure.Copy(r.Raw) if err != nil { return err @@ -268,6 +321,74 @@ func (r *RawConfig) merge(r2 *RawConfig) *RawConfig { return result } +// couldBeInteger is a helper that determines if the represented value could +// result in an integer. +// +// This function only works for RawConfigs that have "Key" set, meaning that +// a single result can be produced. Calling this function will overwrite +// the Config and Value results to be a test value. +// +// This function is conservative. If there is some doubt about whether the +// result could be an integer -- for example, if it depends on a variable +// whose type we don't know yet -- it will still return true. +func (r *RawConfig) couldBeInteger() bool { + if r.Key == "" { + // un-keyed RawConfigs can never produce numbers + return false + } + if r.Body == nil { + // Normal path: using the interpolator in this package + // Interpolate with a fixed number to verify that its a number. + r.interpolate(func(root ast.Node) (interface{}, error) { + // Execute the node but transform the AST so that it returns + // a fixed value of "5" for all interpolations. + result, err := hil.Eval( + hil.FixedValueTransform( + root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}), + nil) + if err != nil { + return "", err + } + + return result.Value, nil + }) + _, err := strconv.ParseInt(r.Value().(string), 0, 0) + return err == nil + } else { + // HCL2 experiment path: using the HCL2 API via shims + // + // This path catches fewer situations because we have to assume all + // variables are entirely unknown in HCL2, rather than the assumption + // above that all variables can be numbers because names like "var.foo" + // are considered a single variable rather than an attribute access. + // This is fine in practice, because we get a definitive answer + // during the graph walk when we have real values to work with. + attrs, diags := r.Body.JustAttributes() + if diags.HasErrors() { + // This body is not just a single attribute with a value, so + // this can't be a number. + return false + } + attr, hasAttr := attrs[r.Key] + if !hasAttr { + return false + } + result, diags := hcl2EvalWithUnknownVars(attr.Expr) + if diags.HasErrors() { + // We'll conservatively assume that this error is a result of + // us not being ready to fully-populate the scope, and catch + // any further problems during the main graph walk. + return true + } + + // If the result is convertable to number then we'll allow it. + // We do this because an unknown string is optimistically convertable + // to number (might be "5") but a _known_ string "hello" is not. + _, err := convert.Convert(result, cty.Number) + return err == nil + } +} + // UnknownKeys returns the keys of the configuration that are unknown // because they had interpolated variables that must be computed. func (r *RawConfig) UnknownKeys() []string { diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go index ea68b4fc..01052782 100644 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go @@ -2,7 +2,15 @@ package config -import "fmt" +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ManagedResourceMode-0] + _ = x[DataResourceMode-1] +} const _ResourceMode_name = "ManagedResourceModeDataResourceMode" @@ -10,7 +18,7 @@ var _ResourceMode_index = [...]uint8{0, 19, 35} func (i ResourceMode) String() string { if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return fmt.Sprintf("ResourceMode(%d)", i) + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" } return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go index f7bfadd9..831fc778 100644 --- a/vendor/github.com/hashicorp/terraform/config/testing.go +++ b/vendor/github.com/hashicorp/terraform/config/testing.go @@ -6,6 +6,8 @@ import ( // TestRawConfig is used to create a RawConfig for testing. func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig { + t.Helper() + cfg, err := NewRawConfig(c) if err != nil { t.Fatalf("err: %s", err) diff --git a/vendor/github.com/hashicorp/terraform/config_unix.go b/vendor/github.com/hashicorp/terraform/config_unix.go index 4694d511..e8ec1a2a 100644 --- a/vendor/github.com/hashicorp/terraform/config_unix.go +++ b/vendor/github.com/hashicorp/terraform/config_unix.go @@ -3,13 +3,10 @@ package main import ( - "bytes" "errors" - "log" "os" - "os/exec" + "os/user" "path/filepath" - "strings" ) func configFile() (string, error) { @@ -33,22 +30,23 @@ func configDir() (string, error) { func homeDir() (string, error) { // First prefer the HOME environmental variable if home := os.Getenv("HOME"); home != "" { - log.Printf("[DEBUG] Detected home directory from env var: %s", home) + // FIXME: homeDir gets called from globalPluginDirs during init, before + // the logging is setup. We should move meta initializtion outside of + // init, but in the meantime we just need to silence this output. + //log.Printf("[DEBUG] Detected home directory from env var: %s", home) + return home, nil } - // If that fails, try the shell - var stdout bytes.Buffer - cmd := exec.Command("sh", "-c", "eval echo ~$USER") - cmd.Stdout = &stdout - if err := cmd.Run(); err != nil { + // If that fails, try build-in module + user, err := user.Current() + if err != nil { return "", err } - result := strings.TrimSpace(stdout.String()) - if result == "" { + if user.HomeDir == "" { return "", errors.New("blank output") } - return result, nil + return user.HomeDir, nil } diff --git a/vendor/github.com/hashicorp/terraform/configs/backend.go b/vendor/github.com/hashicorp/terraform/configs/backend.go new file mode 100644 index 00000000..6df7ddd0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/backend.go @@ -0,0 +1,55 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// Backend represents a "backend" block inside a "terraform" block in a module +// or file. +type Backend struct { + Type string + Config hcl.Body + + TypeRange hcl.Range + DeclRange hcl.Range +} + +func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) { + return &Backend{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + Config: block.Body, + DeclRange: block.DefRange, + }, nil +} + +// Hash produces a hash value for the reciever that covers the type and the +// portions of the config that conform to the given schema. +// +// If the config does not conform to the schema then the result is not +// meaningful for comparison since it will be based on an incomplete result. +// +// As an exception, required attributes in the schema are treated as optional +// for the purpose of hashing, so that an incomplete configuration can still +// be hashed. Other errors, such as extraneous attributes, have no such special +// case. +func (b *Backend) Hash(schema *configschema.Block) int { + // Don't fail if required attributes are not set. Instead, we'll just + // hash them as nulls. + schema = schema.NoneRequired() + spec := schema.DecoderSpec() + val, _ := hcldec.Decode(b.Config, spec, nil) + if val == cty.NilVal { + val = cty.UnknownVal(schema.ImpliedType()) + } + + toHash := cty.TupleVal([]cty.Value{ + cty.StringVal(b.Type), + val, + }) + + return toHash.Hash() +} diff --git a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go new file mode 100644 index 00000000..66037fcd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go @@ -0,0 +1,116 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// ------------------------------------------------------------------------- +// Functions in this file are compatibility shims intended to ease conversion +// from the old configuration loader. Any use of these functions that makes +// a change should generate a deprecation warning explaining to the user how +// to update their code for new patterns. +// +// Shims are particularly important for any patterns that have been widely +// documented in books, tutorials, etc. Users will still be starting from +// these examples and we want to help them adopt the latest patterns rather +// than leave them stranded. +// ------------------------------------------------------------------------- + +// shimTraversalInString takes any arbitrary expression and checks if it is +// a quoted string in the native syntax. If it _is_, then it is parsed as a +// traversal and re-wrapped into a synthetic traversal expression and a +// warning is generated. Otherwise, the given expression is just returned +// verbatim. +// +// This function has no effect on expressions from the JSON syntax, since +// traversals in strings are the required pattern in that syntax. +// +// If wantKeyword is set, the generated warning diagnostic will talk about +// keywords rather than references. The behavior is otherwise unchanged, and +// the caller remains responsible for checking that the result is indeed +// a keyword, e.g. using hcl.ExprAsKeyword. +func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) { + // ObjectConsKeyExpr is a special wrapper type used for keys on object + // constructors to deal with the fact that naked identifiers are normally + // handled as "bareword" strings rather than as variable references. Since + // we know we're interpreting as a traversal anyway (and thus it won't + // matter whether it's a string or an identifier) we can safely just unwrap + // here and then process whatever we find inside as normal. + if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok { + expr = ocke.Wrapped + } + + if !exprIsNativeQuotedString(expr) { + return expr, nil + } + + strVal, diags := expr.Value(nil) + if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() { + // Since we're not even able to attempt a shim here, we'll discard + // the diagnostics we saw so far and let the caller's own error + // handling take care of reporting the invalid expression. + return expr, nil + } + + // The position handling here isn't _quite_ right because it won't + // take into account any escape sequences in the literal string, but + // it should be close enough for any error reporting to make sense. + srcRange := expr.Range() + startPos := srcRange.Start // copy + startPos.Column++ // skip initial quote + startPos.Byte++ // skip initial quote + + traversal, tDiags := hclsyntax.ParseTraversalAbs( + []byte(strVal.AsString()), + srcRange.Filename, + startPos, + ) + diags = append(diags, tDiags...) + + // For initial release our deprecation warnings are disabled to allow + // a period where modules can be compatible with both old and new + // conventions. + // FIXME: Re-enable these deprecation warnings in a release prior to + // Terraform 0.13 and then remove the shims altogether for 0.13. + /* + if wantKeyword { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted keywords are deprecated", + Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.", + Subject: &srcRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Quoted references are deprecated", + Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.", + Subject: &srcRange, + }) + } + */ + + return &hclsyntax.ScopeTraversalExpr{ + Traversal: traversal, + SrcRange: srcRange, + }, diags +} + +// shimIsIgnoreChangesStar returns true if the given expression seems to be +// a string literal whose value is "*". This is used to support a legacy +// form of ignore_changes = all . +// +// This function does not itself emit any diagnostics, so it's the caller's +// responsibility to emit a warning diagnostic when this function returns true. +func shimIsIgnoreChangesStar(expr hcl.Expression) bool { + val, valDiags := expr.Value(nil) + if valDiags.HasErrors() { + return false + } + if val.Type() != cty.String || val.IsNull() || !val.IsKnown() { + return false + } + return val.AsString() == "*" +} diff --git a/vendor/github.com/hashicorp/terraform/configs/config.go b/vendor/github.com/hashicorp/terraform/configs/config.go new file mode 100644 index 00000000..82943122 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/config.go @@ -0,0 +1,205 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +// +// A module tree described in *this* package represents the static tree +// represented by configuration. During evaluation a static ModuleNode may +// expand into zero or more module instances depending on the use of count and +// for_each configuration attributes within each call. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *Module + + // CallRange is the source range for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallRange hcl.Range + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr string + + // SourceAddrRange is the location in the configuration source where the + // SourceAddr value was set, for use in diagnostic messages. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddrRange hcl.Range + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// NewEmptyConfig constructs a single-node configuration tree with an empty +// root module. This is generally a pretty useless thing to do, so most callers +// should instead use BuildConfig. +func NewEmptyConfig() *Config { + ret := &Config{} + ret.Root = ret + ret.Children = make(map[string]*Config) + ret.Module = &Module{} + return ret +} + +// Depth returns the number of "hops" the receiver is from the root of its +// module tree, with the root module having a depth of zero. +func (c *Config) Depth() int { + ret := 0 + this := c + for this.Parent != nil { + ret++ + this = this.Parent + } + return ret +} + +// DeepEach calls the given function once for each module in the tree, starting +// with the receiver. +// +// A parent is always called before its children and children of a particular +// node are visited in lexicographic order by their names. +func (c *Config) DeepEach(cb func(c *Config)) { + cb(c) + + names := make([]string, 0, len(c.Children)) + for name := range c.Children { + names = append(names, name) + } + + for _, name := range names { + c.Children[name].DeepEach(cb) + } +} + +// AllModules returns a slice of all the receiver and all of its descendent +// nodes in the module tree, in the same order they would be visited by +// DeepEach. +func (c *Config) AllModules() []*Config { + var ret []*Config + c.DeepEach(func(c *Config) { + ret = append(ret, c) + }) + return ret +} + +// Descendent returns the descendent config that has the given path beneath +// the receiver, or nil if there is no such module. +// +// The path traverses the static module tree, prior to any expansion to handle +// count and for_each arguments. +// +// An empty path will just return the receiver, and is therefore pointless. +func (c *Config) Descendent(path addrs.Module) *Config { + current := c + for _, name := range path { + current = current.Children[name] + if current == nil { + return nil + } + } + return current +} + +// DescendentForInstance is like Descendent except that it accepts a path +// to a particular module instance in the dynamic module graph, returning +// the node from the static module graph that corresponds to it. +// +// All instances created by a particular module call share the same +// configuration, so the keys within the given path are disregarded. +func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config { + current := c + for _, step := range path { + current = current.Children[step.Name] + if current == nil { + return nil + } + } + return current +} + +// ProviderTypes returns the names of each distinct provider type referenced +// in the receiving configuration. +// +// This is a helper for easily determining which provider types are required +// to fully interpret the configuration, though it does not include version +// information and so callers are expected to have already dealt with +// provider version selection in an earlier step and have identified suitable +// versions for each provider. +func (c *Config) ProviderTypes() []string { + m := make(map[string]struct{}) + c.gatherProviderTypes(m) + + ret := make([]string, 0, len(m)) + for k := range m { + ret = append(ret, k) + } + sort.Strings(ret) + return ret +} +func (c *Config) gatherProviderTypes(m map[string]struct{}) { + if c == nil { + return + } + + for _, pc := range c.Module.ProviderConfigs { + m[pc.Name] = struct{}{} + } + for _, rc := range c.Module.ManagedResources { + providerAddr := rc.ProviderConfigAddr() + m[providerAddr.Type] = struct{}{} + } + for _, rc := range c.Module.DataResources { + providerAddr := rc.ProviderConfigAddr() + m[providerAddr.Type] = struct{}{} + } + + // Must also visit our child modules, recursively. + for _, cc := range c.Children { + cc.gatherProviderTypes(m) + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/config_build.go b/vendor/github.com/hashicorp/terraform/configs/config_build.go new file mode 100644 index 00000000..948b2c8f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/config_build.go @@ -0,0 +1,179 @@ +package configs + +import ( + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +// +// The result is a module tree that has so far only had basic module- and +// file-level invariants validated. If the returned diagnostics contains errors, +// the returned module tree may be incomplete but can still be used carefully +// for static analysis. +func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := map[string]*Config{} + + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + VersionConstraint: call.Version, + Parent: parent, + CallRange: call.DeclRange, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallRange: call.DeclRange, + SourceAddr: call.SourceAddr, + SourceAddrRange: call.SourceAddrRange, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + + ret[call.Name] = child + } + + return ret, diags +} + +// A ModuleWalker knows how to find and load a child module given details about +// the module to be loaded and a reference to its partially-loaded parent +// Config. +type ModuleWalker interface { + // LoadModule finds and loads a requested child module. + // + // If errors are detected during loading, implementations should return them + // in the diagnostics object. If the diagnostics object contains any errors + // then the caller will tolerate the returned module being nil or incomplete. + // If no errors are returned, it should be non-nil and complete. + // + // Full validation need not have been performed but an implementation should + // ensure that the basic file- and module-validations performed by the + // LoadConfigDir function (valid syntax, no namespace collisions, etc) have + // been performed before returning a module. + LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) + +// LoadModule implements ModuleWalker. +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return f(req) +} + +// ModuleRequest is used with the ModuleWalker interface to describe a child +// module that must be loaded. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr string + + // SourceAddrRange is the source range for the SourceAddr value as it + // was provided in configuration. This can and should be used to generate + // diagnostics about the source address having invalid syntax, referring + // to a non-existent object, etc. + SourceAddrRange hcl.Range + + // VersionConstraint is the version constraint applied to the module in + // configuration. This data structure includes the source range for + // the constraint, which can and should be used to generate diagnostics + // about constraint-related issues, such as constraints that eliminate all + // available versions of a module whose source is otherwise valid. + VersionConstraint VersionConstraint + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source range for the header of the "module" block + // in configuration that prompted this request. This can be used as the + // subject of an error diagnostic that relates to the module call itself, + // rather than to either its source address or its version number. + CallRange hcl.Range +} + +// DisabledModuleWalker is a ModuleWalker that doesn't support +// child modules at all, and so will return an error if asked to load one. +// +// This is provided primarily for testing. There is no good reason to use this +// in the main application. +var DisabledModuleWalker ModuleWalker + +func init() { + DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Child modules are not supported", + Detail: "Child module calls are not allowed in this context.", + Subject: &req.CallRange, + }, + } + }) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go new file mode 100644 index 00000000..ebbeb3b6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go @@ -0,0 +1,125 @@ +package configload + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go new file mode 100644 index 00000000..8b615f90 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go @@ -0,0 +1,4 @@ +// Package configload knows how to install modules into the .terraform/modules +// directory and to load modules from those installed locations. It is used +// in conjunction with the LoadConfig function in the parent package. +package configload diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go new file mode 100644 index 00000000..75c7ef1f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go @@ -0,0 +1,152 @@ +package configload + +import ( + "fmt" + "log" + "os" + "path/filepath" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + getter "github.com/hashicorp/go-getter" +) + +// We configure our own go-getter detector and getter sets here, because +// the set of sources we support is part of Terraform's documentation and +// so we don't want any new sources introduced in go-getter to sneak in here +// and work even though they aren't documented. This also insulates us from +// any meddling that might be done by other go-getter callers linked into our +// executable. + +var goGetterDetectors = []getter.Detector{ + new(getter.GitHubDetector), + new(getter.BitBucketDetector), + new(getter.GCSDetector), + new(getter.S3Detector), + new(getter.FileDetector), +} + +var goGetterNoDetectors = []getter.Detector{} + +var goGetterDecompressors = map[string]getter.Decompressor{ + "bz2": new(getter.Bzip2Decompressor), + "gz": new(getter.GzipDecompressor), + "xz": new(getter.XzDecompressor), + "zip": new(getter.ZipDecompressor), + + "tar.bz2": new(getter.TarBzip2Decompressor), + "tar.tbz2": new(getter.TarBzip2Decompressor), + + "tar.gz": new(getter.TarGzipDecompressor), + "tgz": new(getter.TarGzipDecompressor), + + "tar.xz": new(getter.TarXzDecompressor), + "txz": new(getter.TarXzDecompressor), +} + +var goGetterGetters = map[string]getter.Getter{ + "file": new(getter.FileGetter), + "gcs": new(getter.GCSGetter), + "git": new(getter.GitGetter), + "hg": new(getter.HgGetter), + "s3": new(getter.S3Getter), + "http": getterHTTPGetter, + "https": getterHTTPGetter, +} + +var getterHTTPClient = cleanhttp.DefaultClient() + +var getterHTTPGetter = &getter.HttpGetter{ + Client: getterHTTPClient, + Netrc: true, +} + +// A reusingGetter is a helper for the module installer that remembers +// the final resolved addresses of all of the sources it has already been +// asked to install, and will copy from a prior installation directory if +// it has the same resolved source address. +// +// The keys in a reusingGetter are resolved and trimmed source addresses +// (with a scheme always present, and without any "subdir" component), +// and the values are the paths where each source was previously installed. +type reusingGetter map[string]string + +// getWithGoGetter retrieves the package referenced in the given address +// into the installation path and then returns the full path to any subdir +// indicated in the address. +// +// The errors returned by this function are those surfaced by the underlying +// go-getter library, which have very inconsistent quality as +// end-user-actionable error messages. At this time we do not have any +// reasonable way to improve these error messages at this layer because +// the underlying errors are not separatelyr recognizable. +func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { + packageAddr, subDir := splitAddrSubdir(addr) + + log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) + + realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) + if err != nil { + return "", err + } + + var realSubDir string + realAddr, realSubDir = splitAddrSubdir(realAddr) + if realSubDir != "" { + subDir = filepath.Join(realSubDir, subDir) + } + + if realAddr != packageAddr { + log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) + } + + if prevDir, exists := g[realAddr]; exists { + log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) + err := os.Mkdir(instPath, os.ModePerm) + if err != nil { + return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) + } + err = copyDir(instPath, prevDir) + if err != nil { + return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) + } + } else { + log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) + client := getter.Client{ + Src: realAddr, + Dst: instPath, + Pwd: instPath, + + Mode: getter.ClientModeDir, + + Detectors: goGetterNoDetectors, // we already did detection above + Decompressors: goGetterDecompressors, + Getters: goGetterGetters, + } + err = client.Get() + if err != nil { + return "", err + } + // Remember where we installed this so we might reuse this directory + // on subsequent calls to avoid re-downloading. + g[realAddr] = instPath + } + + // Our subDir string can contain wildcards until this point, so that + // e.g. a subDir of * can expand to one top-level directory in a .tar.gz + // archive. Now that we've expanded the archive successfully we must + // resolve that into a concrete path. + var finalDir string + if subDir != "" { + finalDir, err = getter.SubdirGlob(instPath, subDir) + log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) + if err != nil { + return "", err + } + } else { + finalDir = instPath + } + + // If we got this far then we have apparently succeeded in downloading + // the requested object! + return filepath.Clean(finalDir), nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go new file mode 100644 index 00000000..57df0414 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris dragonfly + +package configload + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go new file mode 100644 index 00000000..4dc28eaa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package configload + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go new file mode 100644 index 00000000..0d22e672 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package configload + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go new file mode 100644 index 00000000..416b48fc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go @@ -0,0 +1,150 @@ +package configload + +import ( + "fmt" + "path/filepath" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/spf13/afero" +) + +// A Loader instance is the main entry-point for loading configurations via +// this package. +// +// It extends the general config-loading functionality in the parent package +// "configs" to support installation of modules from remote sources and +// loading full configurations using modules that were previously installed. +type Loader struct { + // parser is used to read configuration + parser *configs.Parser + + // modules is used to install and locate descendent modules that are + // referenced (directly or indirectly) from the root module. + modules moduleMgr +} + +// Config is used with NewLoader to specify configuration arguments for the +// loader. +type Config struct { + // ModulesDir is a path to a directory where descendent modules are + // (or should be) installed. (This is usually the + // .terraform/modules directory, in the common case where this package + // is being loaded from the main Terraform CLI package.) + ModulesDir string + + // Services is the service discovery client to use when locating remote + // module registry endpoints. If this is nil then registry sources are + // not supported, which should be true only in specialized circumstances + // such as in tests. + Services *disco.Disco +} + +// NewLoader creates and returns a loader that reads configuration from the +// real OS filesystem. +// +// The loader has some internal state about the modules that are currently +// installed, which is read from disk as part of this function. If that +// manifest cannot be read then an error will be returned. +func NewLoader(config *Config) (*Loader, error) { + fs := afero.NewOsFs() + parser := configs.NewParser(fs) + reg := registry.NewClient(config.Services, nil) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: true, + Dir: config.ModulesDir, + Services: config.Services, + Registry: reg, + }, + } + + err := ret.modules.readModuleManifestSnapshot() + if err != nil { + return nil, fmt.Errorf("failed to read module manifest: %s", err) + } + + return ret, nil +} + +// ModulesDir returns the path to the directory where the loader will look for +// the local cache of remote module packages. +func (l *Loader) ModulesDir() string { + return l.modules.Dir +} + +// RefreshModules updates the in-memory cache of the module manifest from the +// module manifest file on disk. This is not necessary in normal use because +// module installation and configuration loading are separate steps, but it +// can be useful in tests where module installation is done as a part of +// configuration loading by a helper function. +// +// Call this function after any module installation where an existing loader +// is already alive and may be used again later. +// +// An error is returned if the manifest file cannot be read. +func (l *Loader) RefreshModules() error { + if l == nil { + // Nothing to do, then. + return nil + } + return l.modules.readModuleManifestSnapshot() +} + +// Parser returns the underlying parser for this loader. +// +// This is useful for loading other sorts of files than the module directories +// that a loader deals with, since then they will share the source code cache +// for this loader and can thus be shown as snippets in diagnostic messages. +func (l *Loader) Parser() *configs.Parser { + return l.parser +} + +// Sources returns the source code cache for the underlying parser of this +// loader. This is a shorthand for l.Parser().Sources(). +func (l *Loader) Sources() map[string][]byte { + return l.parser.Sources() +} + +// IsConfigDir returns true if and only if the given directory contains at +// least one Terraform configuration file. This is a wrapper around calling +// the same method name on the loader's parser. +func (l *Loader) IsConfigDir(path string) bool { + return l.parser.IsConfigDir(path) +} + +// ImportSources writes into the receiver's source code the given source +// code buffers. +// +// This is useful in the situation where an ancillary loader is created for +// some reason (e.g. loading config from a plan file) but the cached source +// code from that loader must be imported into the "main" loader in order +// to return source code snapshots in diagnostic messages. +// +// loader.ImportSources(otherLoader.Sources()) +func (l *Loader) ImportSources(sources map[string][]byte) { + p := l.Parser() + for name, src := range sources { + p.ForceFileSource(name, src) + } +} + +// ImportSourcesFromSnapshot writes into the receiver's source code the +// source files from the given snapshot. +// +// This is similar to ImportSources but knows how to unpack and flatten a +// snapshot data structure to get the corresponding flat source file map. +func (l *Loader) ImportSourcesFromSnapshot(snap *Snapshot) { + p := l.Parser() + for _, m := range snap.Modules { + baseDir := m.Dir + for fn, src := range m.Files { + fullPath := filepath.Join(baseDir, fn) + p.ForceFileSource(fullPath, src) + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go new file mode 100644 index 00000000..0e6cba93 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go @@ -0,0 +1,105 @@ +package configload + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" +) + +// LoadConfig reads the Terraform module in the given directory and uses it as the +// root module to build the static module tree that represents a configuration, +// assuming that all required descendent modules have already been installed. +// +// If error diagnostics are returned, the returned configuration may be either +// nil or incomplete. In the latter case, cautious static analysis is possible +// in spite of the errors. +// +// LoadConfig performs the basic syntax and uniqueness validations that are +// required to process the individual modules, and also detects +func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir) + if rootMod == nil { + return nil, diags + } + + cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad)) + diags = append(diags, cDiags...) + + return cfg, diags +} + +// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that +// are presumed to have already been installed. A different function +// (moduleWalkerInstall) is used for installation. +func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + // Since we're just loading here, we expect that all referenced modules + // will be already installed and described in our manifest. However, we + // do verify that the manifest and the configuration are in agreement + // so that we can prompt the user to run "terraform init" if not. + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.CallRange, + }, + } + } + + var diags hcl.Diagnostics + + // Check for inconsistencies between manifest and config + if req.SourceAddr != record.SourceAddr { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module source has changed", + Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if len(req.VersionConstraint.Required) > 0 && record.Version == nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", + Subject: &req.SourceAddrRange, + }) + } + if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Module version requirements have changed", + Detail: fmt.Sprintf( + "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.", + record.Version, + ), + Subject: &req.SourceAddrRange, + }) + } + + mod, mDiags := l.parser.LoadConfigDir(record.Dir) + diags = append(diags, mDiags...) + if mod == nil { + // nil specifically indicates that the directory does not exist or + // cannot be read, so in this case we'll discard any generic diagnostics + // returned from LoadConfigDir and produce our own context-sensitive + // error message. + return nil, nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Module not installed", + Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir), + Subject: &req.CallRange, + }, + } + } + + return mod, record.Version, diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go new file mode 100644 index 00000000..44c64397 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go @@ -0,0 +1,504 @@ +package configload + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + "time" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/spf13/afero" +) + +// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously +// creates an in-memory snapshot of the configuration files used, which can +// be later used to create a loader that may read only from this snapshot. +func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) { + rootMod, diags := l.parser.LoadConfigDir(rootDir) + if rootMod == nil { + return nil, nil, diags + } + + snap := &Snapshot{ + Modules: map[string]*SnapshotModule{}, + } + walker := l.makeModuleWalkerSnapshot(snap) + cfg, cDiags := configs.BuildConfig(rootMod, walker) + diags = append(diags, cDiags...) + + addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil) + diags = append(diags, addDiags...) + + return cfg, snap, diags +} + +// NewLoaderFromSnapshot creates a Loader that reads files only from the +// given snapshot. +// +// A snapshot-based loader cannot install modules, so calling InstallModules +// on the return value will cause a panic. +// +// A snapshot-based loader also has access only to configuration files. Its +// underlying parser does not have access to other files in the native +// filesystem, such as values files. For those, either use a normal loader +// (created by NewLoader) or use the configs.Parser API directly. +func NewLoaderFromSnapshot(snap *Snapshot) *Loader { + fs := snapshotFS{snap} + parser := configs.NewParser(fs) + + ret := &Loader{ + parser: parser, + modules: moduleMgr{ + FS: afero.Afero{Fs: fs}, + CanInstall: false, + manifest: snap.moduleManifest(), + }, + } + + return ret +} + +// Snapshot is an in-memory representation of the source files from a +// configuration, which can be used as an alternative configurations source +// for a loader with NewLoaderFromSnapshot. +// +// The primary purpose of a Snapshot is to build the configuration portion +// of a plan file (see ../../plans/planfile) so that it can later be reloaded +// and used to recover the exact configuration that the plan was built from. +type Snapshot struct { + // Modules is a map from opaque module keys (suitable for use as directory + // names on all supported operating systems) to the snapshot information + // about each module. + Modules map[string]*SnapshotModule +} + +// NewEmptySnapshot constructs and returns a snapshot containing only an empty +// root module. This is not useful for anything except placeholders in tests. +func NewEmptySnapshot() *Snapshot { + return &Snapshot{ + Modules: map[string]*SnapshotModule{ + "": &SnapshotModule{ + Files: map[string][]byte{}, + }, + }, + } +} + +// SnapshotModule represents a single module within a Snapshot. +type SnapshotModule struct { + // Dir is the path, relative to the root directory given when the + // snapshot was created, where the module appears in the snapshot's + // virtual filesystem. + Dir string + + // Files is a map from each configuration file filename for the + // module to a raw byte representation of the source file contents. + Files map[string][]byte + + // SourceAddr is the source address given for this module in configuration. + SourceAddr string `json:"Source"` + + // Version is the version of the module that is installed, or nil if + // the module is installed from a source that does not support versions. + Version *version.Version `json:"-"` +} + +// moduleManifest constructs a module manifest based on the contents of +// the receiving snapshot. +func (s *Snapshot) moduleManifest() modsdir.Manifest { + ret := make(modsdir.Manifest) + + for k, modSnap := range s.Modules { + ret[k] = modsdir.Record{ + Key: k, + Dir: modSnap.Dir, + SourceAddr: modSnap.SourceAddr, + Version: modSnap.Version, + } + } + + return ret +} + +// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit +// the same lookup behaviors as l.moduleWalkerLoad but will additionally write +// source files from the referenced modules into the given snapshot. +func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker { + return configs.ModuleWalkerFunc( + func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) { + mod, v, diags := l.moduleWalkerLoad(req) + if diags.HasErrors() { + return mod, v, diags + } + + key := l.modules.manifest.ModuleKey(req.Path) + record, exists := l.modules.manifest[key] + + if !exists { + // Should never happen, since otherwise moduleWalkerLoader would've + // returned an error and we would've returned already. + panic(fmt.Sprintf("module %s is not present in manifest", key)) + } + + addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version) + diags = append(diags, addDiags...) + + return mod, v, diags + }, + ) +} + +func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics { + var diags hcl.Diagnostics + + primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir) + if moreDiags.HasErrors() { + // Any diagnostics we get here should be already present + // in diags, so it's weird if we get here but we'll allow it + // and return a general error message in that case. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read directory for module", + Detail: fmt.Sprintf("The source directory %s could not be read", dir), + }) + return diags + } + + snapMod := &SnapshotModule{ + Dir: dir, + Files: map[string][]byte{}, + SourceAddr: sourceAddr, + Version: v, + } + + files := make([]string, 0, len(primaryFiles)+len(overrideFiles)) + files = append(files, primaryFiles...) + files = append(files, overrideFiles...) + sources := l.Sources() // should be populated with all the files we need by now + for _, filePath := range files { + filename := filepath.Base(filePath) + src, exists := sources[filePath] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing source file for snapshot", + Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath), + }) + continue + } + snapMod.Files[filepath.Clean(filename)] = src + } + + snap.Modules[key] = snapMod + + return diags +} + +// snapshotFS is an implementation of afero.Fs that reads from a snapshot. +// +// This is not intended as a general-purpose filesystem implementation. Instead, +// it just supports the minimal functionality required to support the +// configuration loader and parser as an implementation detail of creating +// a loader from a snapshot. +type snapshotFS struct { + snap *Snapshot +} + +var _ afero.Fs = snapshotFS{} + +func (fs snapshotFS) Create(name string) (afero.File, error) { + return nil, fmt.Errorf("cannot create file inside configuration snapshot") +} + +func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directory inside configuration snapshot") +} + +func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error { + return fmt.Errorf("cannot create directories inside configuration snapshot") +} + +func (fs snapshotFS) Open(name string) (afero.File, error) { + + // Our "filesystem" is sparsely populated only with the directories + // mentioned by modules in our snapshot, so the high-level process + // for opening a file is: + // - Find the module snapshot corresponding to the containing directory + // - Find the file within that snapshot + // - Wrap the resulting byte slice in a snapshotFile to return + // + // The other possibility handled here is if the given name is for the + // module directory itself, in which case we'll return a snapshotDir + // instead. + // + // This function doesn't try to be incredibly robust in supporting + // different permutations of paths, etc because in practice we only + // need to support the path forms that our own loader and parser will + // generate. + + dir := filepath.Dir(name) + fn := filepath.Base(name) + directDir := filepath.Clean(name) + + // First we'll check to see if this is an exact path for a module directory. + // We need to do this first (rather than as part of the next loop below) + // because a module in a child directory of another module can otherwise + // appear to be a file in that parent directory. + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == directDir { + // We've matched the module directory itself + filenames := make([]string, 0, len(candidate.Files)) + for n := range candidate.Files { + filenames = append(filenames, n) + } + sort.Strings(filenames) + return snapshotDir{ + filenames: filenames, + }, nil + } + } + + // If we get here then the given path isn't a module directory exactly, so + // we'll treat it as a file path and try to find a module directory it + // could be located in. + var modSnap *SnapshotModule + for _, candidate := range fs.snap.Modules { + modDir := filepath.Clean(candidate.Dir) + if modDir == dir { + modSnap = candidate + break + } + } + if modSnap == nil { + return nil, os.ErrNotExist + } + + src, exists := modSnap.Files[fn] + if !exists { + return nil, os.ErrNotExist + } + + return &snapshotFile{ + src: src, + }, nil +} + +func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) { + return fs.Open(name) +} + +func (fs snapshotFS) Remove(name string) error { + return fmt.Errorf("cannot remove file inside configuration snapshot") +} + +func (fs snapshotFS) RemoveAll(path string) error { + return fmt.Errorf("cannot remove files inside configuration snapshot") +} + +func (fs snapshotFS) Rename(old, new string) error { + return fmt.Errorf("cannot rename file inside configuration snapshot") +} + +func (fs snapshotFS) Stat(name string) (os.FileInfo, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + _, isDir := f.(snapshotDir) + return snapshotFileInfo{ + name: filepath.Base(name), + isDir: isDir, + }, nil +} + +func (fs snapshotFS) Name() string { + return "ConfigSnapshotFS" +} + +func (fs snapshotFS) Chmod(name string, mode os.FileMode) error { + return fmt.Errorf("cannot set file mode inside configuration snapshot") +} + +func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error { + return fmt.Errorf("cannot set file times inside configuration snapshot") +} + +type snapshotFile struct { + snapshotFileStub + src []byte + at int64 +} + +var _ afero.File = (*snapshotFile)(nil) + +func (f *snapshotFile) Read(p []byte) (n int, err error) { + if len(p) > 0 && f.at == int64(len(f.src)) { + return 0, io.EOF + } + if f.at > int64(len(f.src)) { + return 0, io.ErrUnexpectedEOF + } + if int64(len(f.src))-f.at >= int64(len(p)) { + n = len(p) + } else { + n = int(int64(len(f.src)) - f.at) + } + copy(p, f.src[f.at:f.at+int64(n)]) + f.at += int64(n) + return +} + +func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) { + f.at = off + return f.Read(p) +} + +func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case 0: + f.at = offset + case 1: + f.at += offset + case 2: + f.at = int64(len(f.src)) + offset + } + return f.at, nil +} + +type snapshotDir struct { + snapshotFileStub + filenames []string + at int +} + +var _ afero.File = snapshotDir{} + +func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) { + names, err := f.Readdirnames(count) + if err != nil { + return nil, err + } + ret := make([]os.FileInfo, len(names)) + for i, name := range names { + ret[i] = snapshotFileInfo{ + name: name, + isDir: false, + } + } + return ret, nil +} + +func (f snapshotDir) Readdirnames(count int) ([]string, error) { + var outLen int + names := f.filenames[f.at:] + if count > 0 { + if len(names) < count { + outLen = len(names) + } else { + outLen = count + } + if len(names) == 0 { + return nil, io.EOF + } + } else { + outLen = len(names) + } + f.at += outLen + + return names[:outLen], nil +} + +// snapshotFileInfo is a minimal implementation of os.FileInfo to support our +// virtual filesystem from snapshots. +type snapshotFileInfo struct { + name string + isDir bool +} + +var _ os.FileInfo = snapshotFileInfo{} + +func (fi snapshotFileInfo) Name() string { + return fi.name +} + +func (fi snapshotFileInfo) Size() int64 { + // In practice, our parser and loader never call Size + return -1 +} + +func (fi snapshotFileInfo) Mode() os.FileMode { + return os.ModePerm +} + +func (fi snapshotFileInfo) ModTime() time.Time { + return time.Now() +} + +func (fi snapshotFileInfo) IsDir() bool { + return fi.isDir +} + +func (fi snapshotFileInfo) Sys() interface{} { + return nil +} + +type snapshotFileStub struct{} + +func (f snapshotFileStub) Close() error { + return nil +} + +func (f snapshotFileStub) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot read") +} + +func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) { + return 0, fmt.Errorf("cannot seek") +} + +func (f snapshotFileStub) Write(p []byte) (n int, err error) { + return f.WriteAt(p, 0) +} + +func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) WriteString(s string) (n int, err error) { + return 0, fmt.Errorf("cannot write to file in snapshot") +} + +func (f snapshotFileStub) Name() string { + // in practice, the loader and parser never use this + return "" +} + +func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Readdirnames(count int) ([]string, error) { + return nil, fmt.Errorf("cannot use Readdir on a file") +} + +func (f snapshotFileStub) Stat() (os.FileInfo, error) { + return nil, fmt.Errorf("cannot stat") +} + +func (f snapshotFileStub) Sync() error { + return nil +} + +func (f snapshotFileStub) Truncate(size int64) error { + return fmt.Errorf("cannot write to file in snapshot") +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go new file mode 100644 index 00000000..3c410eeb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go @@ -0,0 +1,76 @@ +package configload + +import ( + "os" + "path/filepath" + + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/spf13/afero" +) + +type moduleMgr struct { + FS afero.Afero + + // CanInstall is true for a module manager that can support installation. + // + // This must be set only if FS is an afero.OsFs, because the installer + // (which uses go-getter) is not aware of the virtual filesystem + // abstraction and will always write into the "real" filesystem. + CanInstall bool + + // Dir is the path where descendent modules are (or will be) installed. + Dir string + + // Services is a service discovery client that will be used to find + // remote module registry endpoints. This object may be pre-loaded with + // cached discovery information. + Services *disco.Disco + + // Registry is a client for the module registry protocol, which is used + // when a module is requested from a registry source. + Registry *registry.Client + + // manifest tracks the currently-installed modules for this manager. + // + // The loader may read this. Only the installer may write to it, and + // after a set of updates are completed the installer must call + // writeModuleManifestSnapshot to persist a snapshot of the manifest + // to disk for use on subsequent runs. + manifest modsdir.Manifest +} + +func (m *moduleMgr) manifestSnapshotPath() string { + return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename) +} + +// readModuleManifestSnapshot loads a manifest snapshot from the filesystem. +func (m *moduleMgr) readModuleManifestSnapshot() error { + r, err := m.FS.Open(m.manifestSnapshotPath()) + if err != nil { + if os.IsNotExist(err) { + // We'll treat a missing file as an empty manifest + m.manifest = make(modsdir.Manifest) + return nil + } + return err + } + + m.manifest, err = modsdir.ReadManifestSnapshot(r) + return err +} + +// writeModuleManifestSnapshot writes a snapshot of the current manifest +// to the filesystem. +// +// The caller must guarantee no concurrent modifications of the manifest for +// the duration of a call to this function, or the behavior is undefined. +func (m *moduleMgr) writeModuleManifestSnapshot() error { + w, err := m.FS.Create(m.manifestSnapshotPath()) + if err != nil { + return err + } + + return m.manifest.WriteSnapshot(w) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go new file mode 100644 index 00000000..594cf640 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go @@ -0,0 +1,45 @@ +package configload + +import ( + "strings" + + "github.com/hashicorp/go-getter" + + "github.com/hashicorp/terraform/registry/regsrc" +) + +var localSourcePrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +func isLocalSourceAddr(addr string) bool { + for _, prefix := range localSourcePrefixes { + if strings.HasPrefix(addr, prefix) { + return true + } + } + return false +} + +func isRegistrySourceAddr(addr string) bool { + _, err := regsrc.ParseModuleSource(addr) + return err == nil +} + +// splitAddrSubdir splits the given address (which is assumed to be a +// registry address or go-getter-style address) into a package portion +// and a sub-directory portion. +// +// The package portion defines what should be downloaded and then the +// sub-directory portion, if present, specifies a sub-directory within +// the downloaded object (an archive, VCS repository, etc) that contains +// the module's configuration files. +// +// The subDir portion will be returned as empty if no subdir separator +// ("//") is present in the address. +func splitAddrSubdir(addr string) (packageAddr, subDir string) { + return getter.SourceDirSubdir(addr) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go new file mode 100644 index 00000000..86ca9d10 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go @@ -0,0 +1,43 @@ +package configload + +import ( + "io/ioutil" + "os" + "testing" +) + +// NewLoaderForTests is a variant of NewLoader that is intended to be more +// convenient for unit tests. +// +// The loader's modules directory is a separate temporary directory created +// for each call. Along with the created loader, this function returns a +// cleanup function that should be called before the test completes in order +// to remove that temporary directory. +// +// In the case of any errors, t.Fatal (or similar) will be called to halt +// execution of the test, so the calling test does not need to handle errors +// itself. +func NewLoaderForTests(t *testing.T) (*Loader, func()) { + t.Helper() + + modulesDir, err := ioutil.TempDir("", "tf-configs") + if err != nil { + t.Fatalf("failed to create temporary modules dir: %s", err) + return nil, func() {} + } + + cleanup := func() { + os.RemoveAll(modulesDir) + } + + loader, err := NewLoader(&Config{ + ModulesDir: modulesDir, + }) + if err != nil { + cleanup() + t.Fatalf("failed to create config loader: %s", err) + return nil, func() {} + } + + return loader, cleanup +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go new file mode 100644 index 00000000..e59f58d8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go @@ -0,0 +1,274 @@ +package configschema + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// CoerceValue attempts to force the given value to conform to the type +// implied by the receiever, while also applying the same validation and +// transformation rules that would be applied by the decoder specification +// returned by method DecoderSpec. +// +// This is useful in situations where a configuration must be derived from +// an already-decoded value. It is always better to decode directly from +// configuration where possible since then source location information is +// still available to produce diagnostics, but in special situations this +// function allows a compatible result to be obtained even if the +// configuration objects are not available. +// +// If the given value cannot be converted to conform to the receiving schema +// then an error is returned describing one of possibly many problems. This +// error may be a cty.PathError indicating a position within the nested +// data structure where the problem applies. +func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) { + var path cty.Path + return b.coerceValue(in, path) +} + +func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + switch { + case in.IsNull(): + return cty.NullVal(b.ImpliedType()), nil + case !in.IsKnown(): + return cty.UnknownVal(b.ImpliedType()), nil + } + + ty := in.Type() + if !ty.IsObjectType() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required") + } + + for name := range ty.AttributeTypes() { + if _, defined := b.Attributes[name]; defined { + continue + } + if _, defined := b.BlockTypes[name]; defined { + continue + } + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name) + } + + attrs := make(map[string]cty.Value) + + for name, attrS := range b.Attributes { + var val cty.Value + switch { + case ty.HasAttribute(name): + val = in.GetAttr(name) + case attrS.Computed || attrS.Optional: + val = cty.NullVal(attrS.Type) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name) + } + + val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + + attrs[name] = val + } + for typeName, blockS := range b.BlockTypes { + switch blockS.Nesting { + + case NestingSingle, NestingGroup: + switch { + case ty.HasAttribute(typeName): + var err error + val := in.GetAttr(typeName) + attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + case blockS.MinItems != 1 && blockS.MaxItems != 1: + if blockS.Nesting == NestingGroup { + attrs[typeName] = blockS.EmptyValue() + } else { + attrs[typeName] = cty.NullVal(blockS.ImpliedType()) + } + default: + // We use the word "attribute" here because we're talking about + // the cty sense of that word rather than the HCL sense. + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) + } + + case NestingList: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list") + } + l := coll.LengthInt() + if l < blockS.MinItems { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) + } + if l > blockS.MaxItems && blockS.MaxItems > 0 { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems) + } + if l == 0 { + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.ListVal(elems) + case blockS.MinItems == 0: + attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType()) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) + } + + case NestingSet: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set") + } + l := coll.LengthInt() + if l < blockS.MinItems { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems) + } + if l > blockS.MaxItems && blockS.MaxItems > 0 { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems) + } + if l == 0 { + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + continue + } + elems := make([]cty.Value, 0, l) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + idx, val := it.Element() + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems = append(elems, val) + } + } + attrs[typeName] = cty.SetVal(elems) + case blockS.MinItems == 0: + attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType()) + default: + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName) + } + + case NestingMap: + switch { + case ty.HasAttribute(typeName): + coll := in.GetAttr(typeName) + + switch { + case coll.IsNull(): + attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType())) + continue + case !coll.IsKnown(): + attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType())) + continue + } + + if !coll.CanIterateElements() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + l := coll.LengthInt() + if l == 0 { + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + continue + } + elems := make(map[string]cty.Value) + { + path = append(path, cty.GetAttrStep{Name: typeName}) + for it := coll.ElementIterator(); it.Next(); { + var err error + key, val := it.Element() + if key.Type() != cty.String || key.IsNull() || !key.IsKnown() { + return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map") + } + val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key})) + if err != nil { + return cty.UnknownVal(b.ImpliedType()), err + } + elems[key.AsString()] = val + } + } + + // If the attribute values here contain any DynamicPseudoTypes, + // the concrete type must be an object. + useObject := false + switch { + case coll.Type().IsObjectType(): + useObject = true + default: + // It's possible that we were given a map, and need to coerce it to an object + ety := coll.Type().ElementType() + for _, v := range elems { + if !v.Type().Equals(ety) { + useObject = true + break + } + } + } + + if useObject { + attrs[typeName] = cty.ObjectVal(elems) + } else { + attrs[typeName] = cty.MapVal(elems) + } + default: + attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType()) + } + + default: + // should never happen because above is exhaustive + panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting)) + } + } + + return cty.ObjectVal(attrs), nil +} + +func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) { + val, err := convert.Convert(in, a.Type) + if err != nil { + return cty.UnknownVal(a.Type), path.NewError(err) + } + return val, nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go new file mode 100644 index 00000000..d8f41eab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go @@ -0,0 +1,117 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" +) + +var mapLabelNames = []string{"key"} + +// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body +// using the facilities in the hcldec package. +// +// The returned specification is guaranteed to return a value of the same type +// returned by method ImpliedType, but it may contain null values if any of the +// block attributes are defined as optional and/or computed respectively. +func (b *Block) DecoderSpec() hcldec.Spec { + ret := hcldec.ObjectSpec{} + if b == nil { + return ret + } + + for name, attrS := range b.Attributes { + ret[name] = attrS.decoderSpec(name) + } + + for name, blockS := range b.BlockTypes { + if _, exists := ret[name]; exists { + // This indicates an invalid schema, since it's not valid to + // define both an attribute and a block type of the same name. + // However, we don't raise this here since it's checked by + // InternalValidate. + continue + } + + childSpec := blockS.Block.DecoderSpec() + + switch blockS.Nesting { + case NestingSingle, NestingGroup: + ret[name] = &hcldec.BlockSpec{ + TypeName: name, + Nested: childSpec, + Required: blockS.MinItems == 1 && blockS.MaxItems >= 1, + } + if blockS.Nesting == NestingGroup { + ret[name] = &hcldec.DefaultSpec{ + Primary: ret[name], + Default: &hcldec.LiteralSpec{ + Value: blockS.EmptyValue(), + }, + } + } + case NestingList: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.ImpliedType().HasDynamicTypes() { + ret[name] = &hcldec.BlockTupleSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + } else { + ret[name] = &hcldec.BlockListSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + } + case NestingSet: + // We forbid dynamically-typed attributes inside NestingSet in + // InternalValidate, so we don't do anything special to handle + // that here. (There is no set analog to tuple and object types, + // because cty's set implementation depends on knowing the static + // type in order to properly compute its internal hashes.) + ret[name] = &hcldec.BlockSetSpec{ + TypeName: name, + Nested: childSpec, + MinItems: blockS.MinItems, + MaxItems: blockS.MaxItems, + } + case NestingMap: + // We prefer to use a list where possible, since it makes our + // implied type more complete, but if there are any + // dynamically-typed attributes inside we must use a tuple + // instead, at the expense of our type then not being predictable. + if blockS.Block.ImpliedType().HasDynamicTypes() { + ret[name] = &hcldec.BlockObjectSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } else { + ret[name] = &hcldec.BlockMapSpec{ + TypeName: name, + Nested: childSpec, + LabelNames: mapLabelNames, + } + } + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. + continue + } + } + + return ret +} + +func (a *Attribute) decoderSpec(name string) hcldec.Spec { + return &hcldec.AttrSpec{ + Name: name, + Type: a.Type, + Required: a.Required, + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go new file mode 100644 index 00000000..caf8d730 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go @@ -0,0 +1,14 @@ +// Package configschema contains types for describing the expected structure +// of a configuration block whose shape is not known until runtime. +// +// For example, this is used to describe the expected contents of a resource +// configuration block, which is defined by the corresponding provider plugin +// and thus not compiled into Terraform core. +// +// A configschema primarily describes the shape of configuration, but it is +// also suitable for use with other structures derived from the configuration, +// such as the cached state of a resource or a resource diff. +// +// This package should not be confused with the package helper/schema, which +// is the higher-level helper library used to implement providers themselves. +package configschema diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go new file mode 100644 index 00000000..005da56b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go @@ -0,0 +1,59 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// EmptyValue returns the "empty value" for the recieving block, which for +// a block type is a non-null object where all of the attribute values are +// the empty values of the block's attributes and nested block types. +// +// In other words, it returns the value that would be returned if an empty +// block were decoded against the recieving schema, assuming that no required +// attribute or block constraints were honored. +func (b *Block) EmptyValue() cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range b.Attributes { + vals[name] = attrS.EmptyValue() + } + for name, blockS := range b.BlockTypes { + vals[name] = blockS.EmptyValue() + } + return cty.ObjectVal(vals) +} + +// EmptyValue returns the "empty value" for the receiving attribute, which is +// the value that would be returned if there were no definition of the attribute +// at all, ignoring any required constraint. +func (a *Attribute) EmptyValue() cty.Value { + return cty.NullVal(a.Type) +} + +// EmptyValue returns the "empty value" for when there are zero nested blocks +// present of the receiving type. +func (b *NestedBlock) EmptyValue() cty.Value { + switch b.Nesting { + case NestingSingle: + return cty.NullVal(b.Block.ImpliedType()) + case NestingGroup: + return b.Block.EmptyValue() + case NestingList: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyTupleVal + } else { + return cty.ListValEmpty(ty) + } + case NestingMap: + if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() { + return cty.EmptyObjectVal + } else { + return cty.MapValEmpty(ty) + } + case NestingSet: + return cty.SetValEmpty(b.Block.ImpliedType()) + default: + // Should never get here because the above is intended to be exhaustive, + // but we'll be robust and return a result nonetheless. + return cty.NullVal(cty.DynamicPseudoType) + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go new file mode 100644 index 00000000..c0ee8419 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go @@ -0,0 +1,42 @@ +package configschema + +import ( + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// ImpliedType returns the cty.Type that would result from decoding a +// configuration block using the receiving block schema. +// +// ImpliedType always returns a result, even if the given schema is +// inconsistent. Code that creates configschema.Block objects should be +// tested using the InternalValidate method to detect any inconsistencies +// that would cause this method to fall back on defaults and assumptions. +func (b *Block) ImpliedType() cty.Type { + if b == nil { + return cty.EmptyObject + } + + return hcldec.ImpliedType(b.DecoderSpec()) +} + +// ContainsSensitive returns true if any of the attributes of the receiving +// block or any of its descendent blocks are marked as sensitive. +// +// Blocks themselves cannot be sensitive as a whole -- sensitivity is a +// per-attribute idea -- but sometimes we want to include a whole object +// decoded from a block in some UI output, and that is safe to do only if +// none of the contained attributes are sensitive. +func (b *Block) ContainsSensitive() bool { + for _, attrS := range b.Attributes { + if attrS.Sensitive { + return true + } + } + for _, blockS := range b.BlockTypes { + if blockS.ContainsSensitive() { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go new file mode 100644 index 00000000..ebf1abba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go @@ -0,0 +1,105 @@ +package configschema + +import ( + "fmt" + "regexp" + + "github.com/zclconf/go-cty/cty" + + multierror "github.com/hashicorp/go-multierror" +) + +var validName = regexp.MustCompile(`^[a-z0-9_]+$`) + +// InternalValidate returns an error if the receiving block and its child +// schema definitions have any consistencies with the documented rules for +// valid schema. +// +// This is intended to be used within unit tests to detect when a given +// schema is invalid. +func (b *Block) InternalValidate() error { + if b == nil { + return fmt.Errorf("top-level block schema is nil") + } + return b.internalValidate("", nil) + +} + +func (b *Block) internalValidate(prefix string, err error) error { + for name, attrS := range b.Attributes { + if attrS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: attribute schema is nil", prefix, name)) + continue + } + if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + if attrS.Optional == false && attrS.Required == false && attrS.Computed == false { + err = multierror.Append(err, fmt.Errorf("%s%s: must set Optional, Required or Computed", prefix, name)) + } + if attrS.Optional && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Optional and Required", prefix, name)) + } + if attrS.Computed && attrS.Required { + err = multierror.Append(err, fmt.Errorf("%s%s: cannot set both Computed and Required", prefix, name)) + } + if attrS.Type == cty.NilType { + err = multierror.Append(err, fmt.Errorf("%s%s: Type must be set to something other than cty.NilType", prefix, name)) + } + } + + for name, blockS := range b.BlockTypes { + if blockS == nil { + err = multierror.Append(err, fmt.Errorf("%s%s: block schema is nil", prefix, name)) + continue + } + + if _, isAttr := b.Attributes[name]; isAttr { + err = multierror.Append(err, fmt.Errorf("%s%s: name defined as both attribute and child block type", prefix, name)) + } else if !validName.MatchString(name) { + err = multierror.Append(err, fmt.Errorf("%s%s: name may contain only lowercase letters, digits and underscores", prefix, name)) + } + + if blockS.MinItems < 0 || blockS.MaxItems < 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be greater than zero", prefix, name)) + } + + switch blockS.Nesting { + case NestingSingle: + switch { + case blockS.MinItems != blockS.MaxItems: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must match in NestingSingle mode", prefix, name)) + case blockS.MinItems < 0 || blockS.MinItems > 1: + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) + } + case NestingGroup: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name)) + } + case NestingList, NestingSet: + if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) + } + if blockS.Nesting == NestingSet { + ety := blockS.Block.ImpliedType() + if ety.HasDynamicTypes() { + // This is not permitted because the HCL (cty) set implementation + // needs to know the exact type of set elements in order to + // properly hash them, and so can't support mixed types. + err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name)) + } + } + case NestingMap: + if blockS.MinItems != 0 || blockS.MaxItems != 0 { + err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) + } + default: + err = multierror.Append(err, fmt.Errorf("%s%s: invalid nesting mode %s", prefix, name, blockS.Nesting)) + } + + subPrefix := prefix + name + "." + err = blockS.Block.internalValidate(subPrefix, err) + } + + return err +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go new file mode 100644 index 00000000..febe743e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=NestingMode"; DO NOT EDIT. + +package configschema + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[nestingModeInvalid-0] + _ = x[NestingSingle-1] + _ = x[NestingGroup-2] + _ = x[NestingList-3] + _ = x[NestingSet-4] + _ = x[NestingMap-5] +} + +const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap" + +var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74} + +func (i NestingMode) String() string { + if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go new file mode 100644 index 00000000..0be3b8fa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go @@ -0,0 +1,38 @@ +package configschema + +// NoneRequired returns a deep copy of the receiver with any required +// attributes translated to optional. +func (b *Block) NoneRequired() *Block { + ret := &Block{} + + if b.Attributes != nil { + ret.Attributes = make(map[string]*Attribute, len(b.Attributes)) + } + for name, attrS := range b.Attributes { + ret.Attributes[name] = attrS.forceOptional() + } + + if b.BlockTypes != nil { + ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes)) + } + for name, blockS := range b.BlockTypes { + ret.BlockTypes[name] = blockS.noneRequired() + } + + return ret +} + +func (b *NestedBlock) noneRequired() *NestedBlock { + ret := *b + ret.Block = *(ret.Block.NoneRequired()) + ret.MinItems = 0 + ret.MaxItems = 0 + return &ret +} + +func (a *Attribute) forceOptional() *Attribute { + ret := *a + ret.Optional = true + ret.Required = false + return &ret +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go new file mode 100644 index 00000000..5a67334d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go @@ -0,0 +1,130 @@ +package configschema + +import ( + "github.com/zclconf/go-cty/cty" +) + +// Block represents a configuration block. +// +// "Block" here is a logical grouping construct, though it happens to map +// directly onto the physical block syntax of Terraform's native configuration +// syntax. It may be a more a matter of convention in other syntaxes, such as +// JSON. +// +// When converted to a value, a Block always becomes an instance of an object +// type derived from its defined attributes and nested blocks +type Block struct { + // Attributes describes any attributes that may appear directly inside + // the block. + Attributes map[string]*Attribute + + // BlockTypes describes any nested block types that may appear directly + // inside the block. + BlockTypes map[string]*NestedBlock +} + +// Attribute represents a configuration attribute, within a block. +type Attribute struct { + // Type is a type specification that the attribute's value must conform to. + Type cty.Type + + // Description is an English-language description of the purpose and + // usage of the attribute. A description should be concise and use only + // one or two sentences, leaving full definition to longer-form + // documentation defined elsewhere. + Description string + + // Required, if set to true, specifies that an omitted or null value is + // not permitted. + Required bool + + // Optional, if set to true, specifies that an omitted or null value is + // permitted. This field conflicts with Required. + Optional bool + + // Computed, if set to true, specifies that the value comes from the + // provider rather than from configuration. If combined with Optional, + // then the config may optionally provide an overridden value. + Computed bool + + // Sensitive, if set to true, indicates that an attribute may contain + // sensitive information. + // + // At present nothing is done with this information, but callers are + // encouraged to set it where appropriate so that it may be used in the + // future to help Terraform mask sensitive information. (Terraform + // currently achieves this in a limited sense via other mechanisms.) + Sensitive bool +} + +// NestedBlock represents the embedding of one block within another. +type NestedBlock struct { + // Block is the description of the block that's nested. + Block + + // Nesting provides the nesting mode for the child block, which determines + // how many instances of the block are allowed, how many labels it expects, + // and how the resulting data will be converted into a data structure. + Nesting NestingMode + + // MinItems and MaxItems set, for the NestingList and NestingSet nesting + // modes, lower and upper limits on the number of child blocks allowed + // of the given type. If both are left at zero, no limit is applied. + // + // As a special case, both values can be set to 1 for NestingSingle in + // order to indicate that a particular single block is required. + // + // These fields are ignored for other nesting modes and must both be left + // at zero. + MinItems, MaxItems int +} + +// NestingMode is an enumeration of modes for nesting blocks inside other +// blocks. +type NestingMode int + +//go:generate stringer -type=NestingMode + +const ( + nestingModeInvalid NestingMode = iota + + // NestingSingle indicates that only a single instance of a given + // block type is permitted, with no labels, and its content should be + // provided directly as an object value. + NestingSingle + + // NestingGroup is similar to NestingSingle in that it calls for only a + // single instance of a given block type with no labels, but it additonally + // guarantees that its result will never be null, even if the block is + // absent, and instead the nested attributes and blocks will be treated + // as absent in that case. (Any required attributes or blocks within the + // nested block are not enforced unless the block is explicitly present + // in the configuration, so they are all effectively optional when the + // block is not present.) + // + // This is useful for the situation where a remote API has a feature that + // is always enabled but has a group of settings related to that feature + // that themselves have default values. By using NestingGroup instead of + // NestingSingle in that case, generated plans will show the block as + // present even when not present in configuration, thus allowing any + // default values within to be displayed to the user. + NestingGroup + + // NestingList indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a list. + NestingList + + // NestingSet indicates that multiple blocks of the given type are + // permitted, with no labels, and that their corresponding objects should + // be provided in a set. + NestingSet + + // NestingMap indicates that multiple blocks of the given type are + // permitted, each with a single label, and that their corresponding + // objects should be provided in a map whose keys are the labels. + // + // It's an error, therefore, to use the same label value on multiple + // blocks. + NestingMap +) diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go new file mode 100644 index 00000000..a41e930b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go @@ -0,0 +1,173 @@ +package configschema + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/helper/didyoumean" + "github.com/hashicorp/terraform/tfdiags" +) + +// StaticValidateTraversal checks whether the given traversal (which must be +// relative) refers to a construct in the receiving schema, returning error +// diagnostics if any problems are found. +// +// This method is "optimistic" in that it will not return errors for possible +// problems that cannot be detected statically. It is possible that an +// traversal which passed static validation will still fail when evaluated. +func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics { + if !traversal.IsRelative() { + panic("StaticValidateTraversal on absolute traversal") + } + if len(traversal) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + next := traversal[0] + after := traversal[1:] + + var name string + switch step := next.(type) { + case hcl.TraverseAttr: + name = step.Name + case hcl.TraverseIndex: + // No other traversal step types are allowed directly at a block. + // If it looks like the user was trying to use index syntax to + // access an attribute then we'll produce a specialized message. + key := step.Key + if key.Type() == cty.String && key.IsKnown() && !key.IsNull() { + maybeName := key.AsString() + if hclsyntax.ValidIdentifier(maybeName) { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName), + Subject: &step.SrcRange, + }) + return diags + } + } + // If it looks like some other kind of index then we'll use a generic error. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid index operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: &step.SrcRange, + }) + return diags + default: + // No other traversal types should appear in a normal valid traversal, + // but we'll handle this with a generic error anyway to be robust. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: `Only attribute access is allowed here, using the dot operator.`, + Subject: next.SourceRange().Ptr(), + }) + return diags + } + + if attrS, exists := b.Attributes[name]; exists { + // For attribute validation we will just apply the rest of the + // traversal to an unknown value of the attribute type and pass + // through HCL's own errors, since we don't want to replicate all of + // HCL's type checking rules here. + val := cty.UnknownVal(attrS.Type) + _, hclDiags := after.TraverseRel(val) + diags = diags.Append(hclDiags) + return diags + } + + if blockS, exists := b.BlockTypes[name]; exists { + moreDiags := blockS.staticValidateTraversal(name, after) + diags = diags.Append(moreDiags) + return diags + } + + // If we get here then the name isn't valid at all. We'll collect up + // all of the names that _are_ valid to use as suggestions. + var suggestions []string + for name := range b.Attributes { + suggestions = append(suggestions, name) + } + for name := range b.BlockTypes { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unsupported attribute`, + Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion), + Subject: next.SourceRange().Ptr(), + }) + + return diags +} + +func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics { + if b.Nesting == NestingSingle || b.Nesting == NestingGroup { + // Single blocks are easy: just pass right through. + return b.Block.StaticValidateTraversal(traversal) + } + + if len(traversal) == 0 { + // It's always valid to access a nested block's attribute directly. + return nil + } + + var diags tfdiags.Diagnostics + next := traversal[0] + after := traversal[1:] + + switch b.Nesting { + + case NestingSet: + // Can't traverse into a set at all, since it does not have any keys + // to index with. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Cannot index a set value`, + Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName), + Subject: next.SourceRange().Ptr(), + }) + return diags + + case NestingList: + if _, ok := next.(hcl.TraverseIndex); ok { + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + } else { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid operation`, + Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName), + Subject: next.SourceRange().Ptr(), + }) + } + return diags + + case NestingMap: + // Both attribute and index steps are valid for maps, so we'll just + // pass through here and let normal evaluation catch an + // incorrectly-typed index key later, if present. + moreDiags := b.Block.StaticValidateTraversal(after) + diags = diags.Append(moreDiags) + return diags + + default: + // Invalid nesting type is just ignored. It's checked by + // InternalValidate. (Note that we handled NestingSingle separately + // back at the start of this function.) + return nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/analysis.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/analysis.go new file mode 100644 index 00000000..93209fcd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/analysis.go @@ -0,0 +1,286 @@ +package configupgrade + +import ( + "fmt" + "log" + "strings" + + hcl1 "github.com/hashicorp/hcl" + hcl1ast "github.com/hashicorp/hcl/hcl/ast" + hcl1parser "github.com/hashicorp/hcl/hcl/parser" + hcl1token "github.com/hashicorp/hcl/hcl/token" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/moduledeps" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/terraform" +) + +// analysis is a container for the various different information gathered +// by Upgrader.analyze. +type analysis struct { + ProviderSchemas map[string]*terraform.ProviderSchema + ProvisionerSchemas map[string]*configschema.Block + ResourceProviderType map[addrs.Resource]string + ResourceHasCount map[addrs.Resource]bool + VariableTypes map[string]string + ModuleDir string +} + +// analyze processes the configuration files included inside the receiver +// and returns an assortment of information required to make decisions during +// a configuration upgrade. +func (u *Upgrader) analyze(ms ModuleSources) (*analysis, error) { + ret := &analysis{ + ProviderSchemas: make(map[string]*terraform.ProviderSchema), + ProvisionerSchemas: make(map[string]*configschema.Block), + ResourceProviderType: make(map[addrs.Resource]string), + ResourceHasCount: make(map[addrs.Resource]bool), + VariableTypes: make(map[string]string), + } + + m := &moduledeps.Module{ + Providers: make(moduledeps.Providers), + } + + // This is heavily based on terraform.ModuleTreeDependencies but + // differs in that it works directly with the HCL1 AST rather than + // the legacy config structs (and can thus outlive those) and that + // it only works on one module at a time, and so doesn't need to + // recurse into child calls. + for name, src := range ms { + if ext := fileExt(name); ext != ".tf" { + continue + } + + log.Printf("[TRACE] configupgrade: Analyzing %q", name) + + f, err := hcl1parser.Parse(src) + if err != nil { + // If we encounter a syntax error then we'll just skip for now + // and assume that we'll catch this again when we do the upgrade. + // If not, we'll break the upgrade step of renaming .tf files to + // .tf.json if they seem to be JSON syntax. + log.Printf("[ERROR] Failed to parse %q: %s", name, err) + continue + } + + list, ok := f.Node.(*hcl1ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing: file doesn't contain a root object") + } + + if providersList := list.Filter("provider"); len(providersList.Items) > 0 { + providerObjs := providersList.Children() + for _, providerObj := range providerObjs.Items { + if len(providerObj.Keys) != 1 { + return nil, fmt.Errorf("provider block has wrong number of labels") + } + name := providerObj.Keys[0].Token.Value().(string) + + var listVal *hcl1ast.ObjectList + if ot, ok := providerObj.Val.(*hcl1ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("provider %q: must be a block", name) + } + + var versionStr string + if a := listVal.Filter("version"); len(a.Items) > 0 { + err := hcl1.DecodeObject(&versionStr, a.Items[0].Val) + if err != nil { + return nil, fmt.Errorf("Error reading version for provider %q: %s", name, err) + } + } + var constraints discovery.Constraints + if versionStr != "" { + constraints, err = discovery.ConstraintStr(versionStr).Parse() + if err != nil { + return nil, fmt.Errorf("Error parsing version for provider %q: %s", name, err) + } + } + + var alias string + if a := listVal.Filter("alias"); len(a.Items) > 0 { + err := hcl1.DecodeObject(&alias, a.Items[0].Val) + if err != nil { + return nil, fmt.Errorf("Error reading alias for provider %q: %s", name, err) + } + } + + inst := moduledeps.ProviderInstance(name) + if alias != "" { + inst = moduledeps.ProviderInstance(name + "." + alias) + } + log.Printf("[TRACE] Provider block requires provider %q", inst) + m.Providers[inst] = moduledeps.ProviderDependency{ + Constraints: constraints, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + } + + { + resourceConfigsList := list.Filter("resource") + dataResourceConfigsList := list.Filter("data") + // list.Filter annoyingly strips off the key used for matching, + // so we'll put it back here so we can distinguish our two types + // of blocks below. + for _, obj := range resourceConfigsList.Items { + obj.Keys = append([]*hcl1ast.ObjectKey{ + {Token: hcl1token.Token{Type: hcl1token.IDENT, Text: "resource"}}, + }, obj.Keys...) + } + for _, obj := range dataResourceConfigsList.Items { + obj.Keys = append([]*hcl1ast.ObjectKey{ + {Token: hcl1token.Token{Type: hcl1token.IDENT, Text: "data"}}, + }, obj.Keys...) + } + // Now we can merge the two lists together, since we can distinguish + // them just by their keys[0]. + resourceConfigsList.Items = append(resourceConfigsList.Items, dataResourceConfigsList.Items...) + + resourceObjs := resourceConfigsList.Children() + for _, resourceObj := range resourceObjs.Items { + if len(resourceObj.Keys) != 3 { + return nil, fmt.Errorf("resource or data block has wrong number of labels") + } + typeName := resourceObj.Keys[1].Token.Value().(string) + name := resourceObj.Keys[2].Token.Value().(string) + rAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: typeName, + Name: name, + } + if resourceObj.Keys[0].Token.Value() == "data" { + rAddr.Mode = addrs.DataResourceMode + } + + var listVal *hcl1ast.ObjectList + if ot, ok := resourceObj.Val.(*hcl1ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("config for %q must be a block", rAddr) + } + + if o := listVal.Filter("count"); len(o.Items) > 0 { + ret.ResourceHasCount[rAddr] = true + } else { + ret.ResourceHasCount[rAddr] = false + } + + var providerKey string + if o := listVal.Filter("provider"); len(o.Items) > 0 { + err := hcl1.DecodeObject(&providerKey, o.Items[0].Val) + if err != nil { + return nil, fmt.Errorf("Error reading provider for resource %s: %s", rAddr, err) + } + } + + if providerKey == "" { + providerKey = rAddr.DefaultProviderConfig().StringCompact() + } + + inst := moduledeps.ProviderInstance(providerKey) + log.Printf("[TRACE] Resource block for %s requires provider %q", rAddr, inst) + if _, exists := m.Providers[inst]; !exists { + m.Providers[inst] = moduledeps.ProviderDependency{ + Reason: moduledeps.ProviderDependencyImplicit, + } + } + ret.ResourceProviderType[rAddr] = inst.Type() + } + } + + if variablesList := list.Filter("variable"); len(variablesList.Items) > 0 { + variableObjs := variablesList.Children() + for _, variableObj := range variableObjs.Items { + if len(variableObj.Keys) != 1 { + return nil, fmt.Errorf("variable block has wrong number of labels") + } + name := variableObj.Keys[0].Token.Value().(string) + + var listVal *hcl1ast.ObjectList + if ot, ok := variableObj.Val.(*hcl1ast.ObjectType); ok { + listVal = ot.List + } else { + return nil, fmt.Errorf("variable %q: must be a block", name) + } + + var typeStr string + if a := listVal.Filter("type"); len(a.Items) > 0 { + err := hcl1.DecodeObject(&typeStr, a.Items[0].Val) + if err != nil { + return nil, fmt.Errorf("Error reading type for variable %q: %s", name, err) + } + } else if a := listVal.Filter("default"); len(a.Items) > 0 { + switch a.Items[0].Val.(type) { + case *hcl1ast.ObjectType: + typeStr = "map" + case *hcl1ast.ListType: + typeStr = "list" + default: + typeStr = "string" + } + } else { + typeStr = "string" + } + + ret.VariableTypes[name] = strings.TrimSpace(typeStr) + } + } + } + + providerFactories, errs := u.Providers.ResolveProviders(m.PluginRequirements()) + if len(errs) > 0 { + var errorsMsg string + for _, err := range errs { + errorsMsg += fmt.Sprintf("\n- %s", err) + } + return nil, fmt.Errorf("error resolving providers:\n%s", errorsMsg) + } + + for name, fn := range providerFactories { + log.Printf("[TRACE] Fetching schema from provider %q", name) + provider, err := fn() + if err != nil { + return nil, fmt.Errorf("failed to load provider %q: %s", name, err) + } + + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + return nil, resp.Diagnostics.Err() + } + + schema := &terraform.ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: map[string]*configschema.Block{}, + DataSources: map[string]*configschema.Block{}, + } + for t, s := range resp.ResourceTypes { + schema.ResourceTypes[t] = s.Block + } + for t, s := range resp.DataSources { + schema.DataSources[t] = s.Block + } + ret.ProviderSchemas[name] = schema + } + + for name, fn := range u.Provisioners { + log.Printf("[TRACE] Fetching schema from provisioner %q", name) + provisioner, err := fn() + if err != nil { + return nil, fmt.Errorf("failed to load provisioner %q: %s", name, err) + } + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + return nil, resp.Diagnostics.Err() + } + + ret.ProvisionerSchemas[name] = resp.Provisioner + } + + return ret, nil +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/analysis_expr.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/analysis_expr.go new file mode 100644 index 00000000..44a65af1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/analysis_expr.go @@ -0,0 +1,167 @@ +package configupgrade + +import ( + "log" + + hcl2 "github.com/hashicorp/hcl2/hcl" + hcl2syntax "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" +) + +// InferExpressionType attempts to determine a result type for the given +// expression source code, which should already have been upgraded to new +// expression syntax. +// +// If self is non-nil, it will determine the meaning of the special "self" +// reference. +// +// If such an inference isn't possible, either because of limitations of +// static analysis or because of errors in the expression, the result is +// cty.DynamicPseudoType indicating "unknown". +func (an *analysis) InferExpressionType(src []byte, self addrs.Referenceable) cty.Type { + expr, diags := hcl2syntax.ParseExpression(src, "", hcl2.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + // If there's a syntax error then analysis is impossible. + return cty.DynamicPseudoType + } + + data := analysisData{an} + scope := &lang.Scope{ + Data: data, + SelfAddr: self, + PureOnly: false, + BaseDir: ".", + } + val, _ := scope.EvalExpr(expr, cty.DynamicPseudoType) + + // Value will be cty.DynamicVal if either inference was impossible or + // if there was an error, leading to cty.DynamicPseudoType here. + return val.Type() +} + +// analysisData is an implementation of lang.Data that returns unknown values +// of suitable types in order to achieve approximate dynamic analysis of +// expression result types, which we need for some upgrade rules. +// +// Unlike a usual implementation of this interface, this one never returns +// errors and will instead just return cty.DynamicVal if it can't produce +// an exact type for any reason. This can then allow partial upgrading to +// proceed and the caller can emit warning comments for ambiguous situations. +// +// N.B.: Source ranges in the data methods are meaningless, since they are +// just relative to the byte array passed to InferExpressionType, not to +// any real input file. +type analysisData struct { + an *analysis +} + +var _ lang.Data = (*analysisData)(nil) + +func (d analysisData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + // This implementation doesn't do any static validation. + return nil +} + +func (d analysisData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // All valid count attributes are numbers + return cty.UnknownVal(cty.Number), nil +} + +func (d analysisData) GetResourceInstance(instAddr addrs.ResourceInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + log.Printf("[TRACE] configupgrade: Determining type for %s", instAddr) + addr := instAddr.Resource + + // Our analysis pass should've found a suitable schema for every resource + // type in the module. + providerType, ok := d.an.ResourceProviderType[addr] + if !ok { + // Should not be possible, since analysis visits every resource block. + log.Printf("[TRACE] configupgrade: analysis.GetResourceInstance doesn't have a provider type for %s", addr) + return cty.DynamicVal, nil + } + providerSchema, ok := d.an.ProviderSchemas[providerType] + if !ok { + // Should not be possible, since analysis loads schema for every provider. + log.Printf("[TRACE] configupgrade: analysis.GetResourceInstance doesn't have a provider schema for for %q", providerType) + return cty.DynamicVal, nil + } + schema, _ := providerSchema.SchemaForResourceAddr(addr) + if schema == nil { + // Should not be possible, since analysis loads schema for every provider. + log.Printf("[TRACE] configupgrade: analysis.GetResourceInstance doesn't have a schema for for %s", addr) + return cty.DynamicVal, nil + } + + objTy := schema.ImpliedType() + + // We'll emulate the normal evaluator's behavor of deciding whether to + // return a list or a single object type depending on whether count is + // set and whether an instance key is given in the address. + if d.an.ResourceHasCount[addr] { + if instAddr.Key == addrs.NoKey { + log.Printf("[TRACE] configupgrade: %s refers to counted instance without a key, so result is a list of %#v", instAddr, objTy) + return cty.UnknownVal(cty.List(objTy)), nil + } + log.Printf("[TRACE] configupgrade: %s refers to counted instance with a key, so result is single object", instAddr) + return cty.UnknownVal(objTy), nil + } + + if instAddr.Key != addrs.NoKey { + log.Printf("[TRACE] configupgrade: %s refers to non-counted instance with a key, which is invalid", instAddr) + return cty.DynamicVal, nil + } + log.Printf("[TRACE] configupgrade: %s refers to non-counted instance without a key, so result is single object", instAddr) + return cty.UnknownVal(objTy), nil +} + +func (d analysisData) GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // We can only predict these in general by recursively evaluating their + // expressions, which creates some undesirable complexity here so for + // now we'll just skip analyses with locals and see if this complexity + // is warranted with real-world testing. + return cty.DynamicVal, nil +} + +func (d analysisData) GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // We only work on one module at a time during upgrading, so we have no + // information about the outputs of a child module. + return cty.DynamicVal, nil +} + +func (d analysisData) GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // We only work on one module at a time during upgrading, so we have no + // information about the outputs of a child module. + return cty.DynamicVal, nil +} + +func (d analysisData) GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // All valid path attributes are strings + return cty.UnknownVal(cty.String), nil +} + +func (d analysisData) GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // All valid "terraform" attributes are strings + return cty.UnknownVal(cty.String), nil +} + +func (d analysisData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + // TODO: Collect shallow type information (list vs. map vs. string vs. unknown) + // in analysis and then return a similarly-approximate type here. + log.Printf("[TRACE] configupgrade: Determining type for %s", addr) + name := addr.Name + typeName := d.an.VariableTypes[name] + switch typeName { + case "list": + return cty.UnknownVal(cty.List(cty.DynamicPseudoType)), nil + case "map": + return cty.UnknownVal(cty.Map(cty.DynamicPseudoType)), nil + case "string": + return cty.UnknownVal(cty.String), nil + default: + return cty.DynamicVal, nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/doc.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/doc.go new file mode 100644 index 00000000..aa326743 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/doc.go @@ -0,0 +1,16 @@ +// Package configupgrade upgrades configurations targeting our legacy +// configuration loader (in package "config") to be compatible with and +// idiomatic for the newer configuration loader (in package "configs"). +// +// It works on one module directory at a time, producing new content for +// each existing .tf file and possibly creating new files as needed. The +// legacy HCL and HIL parsers are used to read the existing configuration +// for maximum compatibility with any non-idiomatic constructs that were +// accepted by those implementations but not accepted by the new HCL parsers. +// +// Unlike the loaders and validators elsewhere in Terraform, this package +// always generates diagnostics with paths relative to the module directory +// currently being upgraded, with no intermediate paths. This means that the +// filenames in these ranges can be used directly as keys into the ModuleSources +// map that the file was parsed from. +package configupgrade diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/module_sources.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/module_sources.go new file mode 100644 index 00000000..d7b8ed09 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/module_sources.go @@ -0,0 +1,216 @@ +package configupgrade + +import ( + "fmt" + "io/ioutil" + "path/filepath" + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/hcl2/hcl" + hcl2syntax "github.com/hashicorp/hcl2/hcl/hclsyntax" + + version "github.com/hashicorp/go-version" +) + +type ModuleSources map[string][]byte + +// LoadModule looks for Terraform configuration files in the given directory +// and loads each of them into memory as source code, in preparation for +// further analysis and conversion. +// +// At this stage the files are not parsed at all. Instead, we just read the +// raw bytes from the file so that they can be passed into a parser in a +// separate step. +// +// If the given directory or any of the files cannot be read, an error is +// returned. It is not safe to proceed with processing in that case because +// we cannot "see" all of the source code for the configuration. +func LoadModule(dir string) (ModuleSources, error) { + entries, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + + ret := make(ModuleSources) + for _, entry := range entries { + name := entry.Name() + if entry.IsDir() { + continue + } + if configs.IsIgnoredFile(name) { + continue + } + ext := fileExt(name) + if ext == "" { + continue + } + + fullPath := filepath.Join(dir, name) + src, err := ioutil.ReadFile(fullPath) + if err != nil { + return nil, err + } + + ret[name] = src + } + + return ret, nil +} + +// UnusedFilename finds a filename that isn't already used by a file in +// the receiving sources and returns it. +// +// The given "proposed" name is returned verbatim if it isn't already used. +// Otherwise, the function will try appending incrementing integers to the +// proposed name until an unused name is found. Callers should propose names +// that they do not expect to already be in use so that numeric suffixes are +// only used in rare cases. +// +// The proposed name must end in either ".tf" or ".tf.json" because a +// ModuleSources only has visibility into such files. This function will +// panic if given a file whose name does not end with one of these +// extensions. +// +// A ModuleSources only works on one directory at a time, so the proposed +// name must not contain any directory separator characters. +func (ms ModuleSources) UnusedFilename(proposed string) string { + ext := fileExt(proposed) + if ext == "" { + panic(fmt.Errorf("method UnusedFilename used with invalid proposal %q", proposed)) + } + + if _, exists := ms[proposed]; !exists { + return proposed + } + + base := proposed[:len(proposed)-len(ext)] + for i := 1; ; i++ { + try := fmt.Sprintf("%s-%d%s", base, i, ext) + if _, exists := ms[try]; !exists { + return try + } + } +} + +// MaybeAlreadyUpgraded is a heuristic to see if a given module may have +// already been upgraded by this package. +// +// The heuristic used is to look for a Terraform Core version constraint in +// any of the given sources that seems to be requiring a version greater than +// or equal to v0.12.0. If true is returned then the source range of the found +// version constraint is returned in case the caller wishes to present it to +// the user as context for a warning message. The returned range is not +// meaningful if false is returned. +func (ms ModuleSources) MaybeAlreadyUpgraded() (bool, tfdiags.SourceRange) { + for name, src := range ms { + f, diags := hcl2syntax.ParseConfig(src, name, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + // If we can't parse at all then that's a reasonable signal that + // we _haven't_ been upgraded yet, but we'll continue checking + // other files anyway. + continue + } + + content, _, diags := f.Body.PartialContent(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + }, + }) + if diags.HasErrors() { + // Suggests that the file has an invalid "terraform" block, such + // as one with labels. + continue + } + + for _, block := range content.Blocks { + content, _, diags := block.Body.PartialContent(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + }) + if diags.HasErrors() { + continue + } + + attr, present := content.Attributes["required_version"] + if !present { + continue + } + + constraintVal, diags := attr.Expr.Value(nil) + if diags.HasErrors() { + continue + } + if constraintVal.Type() != cty.String || constraintVal.IsNull() { + continue + } + + constraints, err := version.NewConstraint(constraintVal.AsString()) + if err != nil { + continue + } + + // The go-version package doesn't actually let us see the details + // of the parsed constraints here, so we now need a bit of an + // abstraction inversion to decide if any of the given constraints + // match our heuristic. However, we do at least get to benefit + // from go-version's ability to extract multiple constraints from + // a single string and the fact that it's already validated each + // constraint to match its expected pattern. + Constraints: + for _, constraint := range constraints { + str := strings.TrimSpace(constraint.String()) + // Want to match >, >= and ~> here. + if !(strings.HasPrefix(str, ">") || strings.HasPrefix(str, "~>")) { + continue + } + + // Try to find something in this string that'll parse as a version. + for i := 1; i < len(str); i++ { + candidate := str[i:] + v, err := version.NewVersion(candidate) + if err != nil { + continue + } + + if v.Equal(firstVersionWithNewParser) || v.GreaterThan(firstVersionWithNewParser) { + // This constraint appears to be preventing the old + // parser from being used, so we suspect it was + // already upgraded. + return true, tfdiags.SourceRangeFromHCL(attr.Range) + } + + // If we fall out here then we _did_ find something that + // parses as a version, so we'll stop and move on to the + // next constraint. (Otherwise we'll pass by 0.7.0 and find + // 7.0, which is also a valid version.) + continue Constraints + } + } + } + } + return false, tfdiags.SourceRange{} +} + +var firstVersionWithNewParser = version.Must(version.NewVersion("0.12.0")) + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade.go new file mode 100644 index 00000000..e2236a6c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade.go @@ -0,0 +1,123 @@ +package configupgrade + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" + + hcl2 "github.com/hashicorp/hcl2/hcl" + hcl2write "github.com/hashicorp/hcl2/hclwrite" +) + +// Upgrade takes some input module sources and produces a new ModuleSources +// that should be equivalent to the input but use the configuration idioms +// associated with the new configuration loader. +// +// The result of this function will probably not be accepted by this function, +// because it will contain constructs that are known only to the new +// loader. +// +// The result may include additional files that were not present in the +// input. The result may also include nil entries for filenames that were +// present in the input, indicating that these files should be deleted. +// In particular, file renames are represented as a new entry accompanied +// by a nil entry for the old name. +// +// If the returned diagnostics contains errors, the caller should not write +// the resulting sources to disk since they will probably be incomplete. If +// only warnings are present then the files may be written to disk. Most +// warnings are also represented as "TF-UPGRADE-TODO:" comments in the +// generated source files so that users can visit them all and decide what to +// do with them. +func (u *Upgrader) Upgrade(input ModuleSources, dir string) (ModuleSources, tfdiags.Diagnostics) { + ret := make(ModuleSources) + var diags tfdiags.Diagnostics + + an, err := u.analyze(input) + if err != nil { + diags = diags.Append(err) + return ret, diags + } + an.ModuleDir = dir + + for name, src := range input { + ext := fileExt(name) + if ext == "" { + // This should never happen because we ignore files that don't + // have our conventional extensions during LoadModule, but we'll + // silently pass through such files assuming that the caller + // has been tampering with the sources map somehow. + ret[name] = src + continue + } + + isJSON := (ext == ".tf.json") + + // The legacy loader allowed JSON syntax inside files named just .tf, + // so we'll detect that case and rename them here so that the new + // loader will accept the JSON. However, JSON files are usually + // generated so we'll also generate a warning to the user to update + // whatever program generated the file to use the new name. + if !isJSON { + trimSrc := bytes.TrimSpace(src) + if len(trimSrc) > 0 && (trimSrc[0] == '{' || trimSrc[0] == '[') { + isJSON = true + + // Rename in the output + ret[name] = nil // mark for deletion + oldName := name + name = input.UnusedFilename(name + ".json") + ret[name] = src + + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagWarning, + Summary: "JSON configuration file was renamed", + Detail: fmt.Sprintf( + "The file %q appears to be in JSON format, so it was renamed to %q. If this file is generated by another program, that program must be updated to use this new name.", + oldName, name, + ), + }) + continue + } + } + + if isJSON { + // We don't do any automatic rewriting for JSON files, since they + // are usually generated and thus it's the generating program that + // needs to be updated, rather than its output. + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagWarning, + Summary: "JSON configuration file was not rewritten", + Detail: fmt.Sprintf( + "The JSON configuration file %q was skipped, because JSON files are assumed to be generated. The program that generated this file may need to be updated for changes to the configuration language.", + name, + ), + }) + ret[name] = src // unchanged + continue + } + + // TODO: Actually rewrite this .tf file. + result, fileDiags := u.upgradeNativeSyntaxFile(name, src, an) + diags = diags.Append(fileDiags) + if fileDiags.HasErrors() { + // Leave unchanged, then. + ret[name] = src + continue + } + + ret[name] = hcl2write.Format(result.Content) + } + + versionsName := ret.UnusedFilename("versions.tf") + ret[versionsName] = []byte(newVersionConstraint) + + return ret, diags +} + +const newVersionConstraint = ` +terraform { + required_version = ">= 0.12" +} +` diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_body.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_body.go new file mode 100644 index 00000000..39f71f3d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_body.go @@ -0,0 +1,989 @@ +package configupgrade + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + hcl1ast "github.com/hashicorp/hcl/hcl/ast" + hcl1token "github.com/hashicorp/hcl/hcl/token" + hcl2 "github.com/hashicorp/hcl2/hcl" + hcl2syntax "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/blocktoattr" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// bodyContentRules is a mapping from item names (argument names and block type +// names) to a "rule" function defining what to do with an item of that type. +type bodyContentRules map[string]bodyItemRule + +// bodyItemRule is just a function to write an upgraded representation of a +// particular given item to the given buffer. This is generic to handle various +// different mapping rules, though most values will be those constructed by +// other helper functions below. +type bodyItemRule func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics + +func normalAttributeRule(filename string, wantTy cty.Type, an *analysis) bodyItemRule { + exprRule := func(val interface{}) ([]byte, tfdiags.Diagnostics) { + return upgradeExpr(val, filename, true, an) + } + return attributeRule(filename, wantTy, an, exprRule) +} + +func noInterpAttributeRule(filename string, wantTy cty.Type, an *analysis) bodyItemRule { + exprRule := func(val interface{}) ([]byte, tfdiags.Diagnostics) { + return upgradeExpr(val, filename, false, an) + } + return attributeRule(filename, wantTy, an, exprRule) +} + +func maybeBareKeywordAttributeRule(filename string, an *analysis, specials map[string]string) bodyItemRule { + exprRule := func(val interface{}) ([]byte, tfdiags.Diagnostics) { + // If the expression is a literal that would be valid as a naked keyword + // then we'll turn it into one. + if lit, isLit := val.(*hcl1ast.LiteralType); isLit { + if lit.Token.Type == hcl1token.STRING { + kw := lit.Token.Value().(string) + if hcl2syntax.ValidIdentifier(kw) { + + // If we have a special mapping rule for this keyword, + // we'll let that override what the user gave. + if override := specials[kw]; override != "" { + kw = override + } + + return []byte(kw), nil + } + } + } + + return upgradeExpr(val, filename, false, an) + } + return attributeRule(filename, cty.String, an, exprRule) +} + +func maybeBareTraversalAttributeRule(filename string, an *analysis) bodyItemRule { + exprRule := func(val interface{}) ([]byte, tfdiags.Diagnostics) { + return upgradeTraversalExpr(val, filename, an) + } + return attributeRule(filename, cty.String, an, exprRule) +} + +func dependsOnAttributeRule(filename string, an *analysis) bodyItemRule { + return func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + val, ok := item.Val.(*hcl1ast.ListType) + if !ok { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid depends_on argument", + Detail: `The "depends_on" argument must be a list of strings containing references to resources and modules.`, + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + return diags + } + + var exprBuf bytes.Buffer + multiline := len(val.List) > 1 + exprBuf.WriteByte('[') + if multiline { + exprBuf.WriteByte('\n') + } + for _, node := range val.List { + lit, ok := node.(*hcl1ast.LiteralType) + if (!ok) || lit.Token.Type != hcl1token.STRING { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid depends_on argument", + Detail: `The "depends_on" argument must be a list of strings containing references to resources and modules.`, + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + continue + } + refStr := lit.Token.Value().(string) + if refStr == "" { + continue + } + refParts := strings.Split(refStr, ".") + var maxNames int + switch refParts[0] { + case "data", "module": + maxNames = 3 + default: // resource references + maxNames = 2 + } + + exprBuf.WriteString(refParts[0]) + for i, part := range refParts[1:] { + if part == "*" { + // We used to allow test_instance.foo.* as a reference + // but now that's expressed instead as test_instance.foo, + // referring to the tuple of instances. This also + // always marks the end of the reference part of the + // traversal, so anything after this would be resource + // attributes that don't belong on depends_on. + break + } + if i, err := strconv.Atoi(part); err == nil { + fmt.Fprintf(&exprBuf, "[%d]", i) + // An index always marks the end of the reference part. + break + } + if (i + 1) >= maxNames { + // We've reached the end of the reference part, so anything + // after this would be invalid in 0.12. + break + } + exprBuf.WriteByte('.') + exprBuf.WriteString(part) + } + + if multiline { + exprBuf.WriteString(",\n") + } + } + exprBuf.WriteByte(']') + + printAttribute(buf, item.Keys[0].Token.Value().(string), exprBuf.Bytes(), item.LineComment) + + return diags + } +} + +func attributeRule(filename string, wantTy cty.Type, an *analysis, exprRule func(val interface{}) ([]byte, tfdiags.Diagnostics)) bodyItemRule { + return func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + name := item.Keys[0].Token.Value().(string) + + // We'll tolerate a block with no labels here as a degenerate + // way to assign a map, but we can't migrate a block that has + // labels. In practice this should never happen because + // nested blocks in resource blocks did not accept labels + // prior to v0.12. + if len(item.Keys) != 1 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Block where attribute was expected", + Detail: fmt.Sprintf("Within %s the name %q is an attribute name, not a block type.", blockAddr, name), + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + return diags + } + + val := item.Val + + if typeIsSettableFromTupleCons(wantTy) && !typeIsSettableFromTupleCons(wantTy.ElementType()) { + // In Terraform circa 0.10 it was required to wrap any expression + // that produces a list in HCL list brackets to allow type analysis + // to complete successfully, even though logically that ought to + // have produced a list of lists. + // + // In Terraform 0.12 this construct _does_ produce a list of lists, so + // we need to update expressions that look like older usage. We can't + // do this exactly with static analysis, but we can make a best-effort + // and produce a warning if type inference is impossible for a + // particular expression. This should give good results for common + // simple examples, like splat expressions. + // + // There are four possible cases here: + // - The value isn't an HCL1 list expression at all, or is one that + // contains more than one item, in which case this special case + // does not apply. + // - The inner expression after upgrading can be proven to return + // a sequence type, in which case we must definitely remove + // the wrapping brackets. + // - The inner expression after upgrading can be proven to return + // a non-sequence type, in which case we fall through and treat + // the whole item like a normal expression. + // - Static type analysis is impossible (it returns cty.DynamicPseudoType), + // in which case we will make no changes but emit a warning and + // a TODO comment for the user to decide whether a change needs + // to be made in practice. + if list, ok := val.(*hcl1ast.ListType); ok { + if len(list.List) == 1 { + maybeAlsoList := list.List[0] + if exprSrc, diags := upgradeExpr(maybeAlsoList, filename, true, an); !diags.HasErrors() { + // Ideally we would set "self" here but we don't have + // enough context to set it and in practice not setting + // it only affects expressions inside provisioner and + // connection blocks, and the list-wrapping thing isn't + // common there. + gotTy := an.InferExpressionType(exprSrc, nil) + if typeIsSettableFromTupleCons(gotTy) { + // Since this expression was already inside HCL list brackets, + // the ultimate result would be a list of lists and so we + // need to unwrap it by taking just the portion within + // the brackets here. + val = maybeAlsoList + } + if gotTy == cty.DynamicPseudoType { + // User must decide. + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Possible legacy dynamic list usage", + Detail: "This list may be using the legacy redundant-list expression style from Terraform v0.10 and earlier. If the expression within these brackets returns a list itself, remove these brackets.", + Subject: hcl1PosRange(filename, list.Lbrack).Ptr(), + }) + buf.WriteString( + "# TF-UPGRADE-TODO: In Terraform v0.10 and earlier, it was sometimes necessary to\n" + + "# force an interpolation expression to be interpreted as a list by wrapping it\n" + + "# in an extra set of list brackets. That form was supported for compatibilty in\n" + + "# v0.11, but is no longer supported in Terraform v0.12.\n" + + "#\n" + + "# If the expression in the following list itself returns a list, remove the\n" + + "# brackets to avoid interpretation as a list of lists. If the expression\n" + + "# returns a single list item then leave it as-is and remove this TODO comment.\n", + ) + } + } + } + } + } + + valSrc, valDiags := exprRule(val) + diags = diags.Append(valDiags) + printAttribute(buf, item.Keys[0].Token.Value().(string), valSrc, item.LineComment) + + return diags + } +} + +func nestedBlockRule(filename string, nestedRules bodyContentRules, an *analysis, adhocComments *commentQueue) bodyItemRule { + return func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + // This simpler nestedBlockRule is for contexts where the special + // "dynamic" block type is not accepted and so only HCL1 object + // constructs can be accepted. Attempts to assign arbitrary HIL + // expressions will be rejected as errors. + + var diags tfdiags.Diagnostics + declRange := hcl1PosRange(filename, item.Keys[0].Pos()) + blockType := item.Keys[0].Token.Value().(string) + labels := make([]string, len(item.Keys)-1) + for i, key := range item.Keys[1:] { + labels[i] = key.Token.Value().(string) + } + + var blockItems []*hcl1ast.ObjectType + + switch val := item.Val.(type) { + + case *hcl1ast.ObjectType: + blockItems = []*hcl1ast.ObjectType{val} + + case *hcl1ast.ListType: + for _, node := range val.List { + switch listItem := node.(type) { + case *hcl1ast.ObjectType: + blockItems = append(blockItems, listItem) + default: + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid value for nested block", + Detail: fmt.Sprintf("In %s the name %q is a nested block type, so any value assigned to it must be an object.", blockAddr, blockType), + Subject: hcl1PosRange(filename, node.Pos()).Ptr(), + }) + } + } + + default: + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid value for nested block", + Detail: fmt.Sprintf("In %s the name %q is a nested block type, so any value assigned to it must be an object.", blockAddr, blockType), + Subject: &declRange, + }) + return diags + } + + for _, blockItem := range blockItems { + printBlockOpen(buf, blockType, labels, item.LineComment) + bodyDiags := upgradeBlockBody( + filename, fmt.Sprintf("%s.%s", blockAddr, blockType), buf, + blockItem.List.Items, blockItem.Rbrace, nestedRules, adhocComments, + ) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n") + } + + return diags + } +} + +func nestedBlockRuleWithDynamic(filename string, nestedRules bodyContentRules, nestedSchema *configschema.NestedBlock, emptyAsAttr bool, an *analysis, adhocComments *commentQueue) bodyItemRule { + return func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + // In Terraform v0.11 it was possible in some cases to trick Terraform + // and providers into accepting HCL's attribute syntax and some HIL + // expressions in places where blocks or sequences of blocks were + // expected, since the information about the heritage of the values + // was lost during decoding and interpolation. + // + // In order to avoid all of the weird rough edges that resulted from + // those misinterpretations, Terraform v0.12 is stricter and requires + // the use of block syntax for blocks in all cases. However, because + // various abuses of attribute syntax _did_ work (with some caveats) + // in v0.11 we will upgrade them as best we can to use proper block + // syntax. + // + // There are a few different permutations supported by this code: + // + // - Assigning a single HCL1 "object" using attribute syntax. This is + // straightforward to migrate just by dropping the equals sign. + // + // - Assigning a HCL1 list of objects using attribute syntax. Each + // object in that list can be translated to a separate block. + // + // - Assigning a HCL1 list containing HIL expressions that evaluate + // to maps. This is a hard case because we can't know the internal + // structure of those maps during static analysis, and so we must + // generate a worst-case dynamic block structure for it. + // + // - Assigning a single HIL expression that evaluates to a list of + // maps. This is just like the previous case except additionally + // we cannot even predict the number of generated blocks, so we must + // generate a single "dynamic" block to iterate over the list at + // runtime. + + var diags tfdiags.Diagnostics + blockType := item.Keys[0].Token.Value().(string) + labels := make([]string, len(item.Keys)-1) + for i, key := range item.Keys[1:] { + labels[i] = key.Token.Value().(string) + } + + var blockItems []hcl1ast.Node + + switch val := item.Val.(type) { + + case *hcl1ast.ObjectType: + blockItems = append(blockItems, val) + + case *hcl1ast.ListType: + for _, node := range val.List { + switch listItem := node.(type) { + case *hcl1ast.ObjectType: + blockItems = append(blockItems, listItem) + default: + // We're going to cheat a bit here and construct a synthetic + // HCL1 list just because that makes our logic + // simpler below where we can just treat all non-objects + // in the same way when producing "dynamic" blocks. + synthList := &hcl1ast.ListType{ + List: []hcl1ast.Node{listItem}, + Lbrack: listItem.Pos(), + Rbrack: hcl1NodeEndPos(listItem), + } + blockItems = append(blockItems, synthList) + } + } + + default: + blockItems = append(blockItems, item.Val) + } + + if len(blockItems) == 0 && emptyAsAttr { + // Terraform v0.12's config decoder allows using block syntax for + // certain attribute types, which we prefer as idiomatic usage + // causing us to end up in this function in such cases, but as + // a special case users can still use the attribute syntax to + // explicitly write an empty list. For more information, see + // the lang/blocktoattr package. + printAttribute(buf, item.Keys[0].Token.Value().(string), []byte{'[', ']'}, item.LineComment) + return diags + } + + for _, blockItem := range blockItems { + switch ti := blockItem.(type) { + case *hcl1ast.ObjectType: + // If we have an object then we'll pass through its content + // as a block directly. This is the most straightforward mapping + // from the source input, since we know exactly which keys + // are present. + printBlockOpen(buf, blockType, labels, item.LineComment) + bodyDiags := upgradeBlockBody( + filename, fmt.Sprintf("%s.%s", blockAddr, blockType), buf, + ti.List.Items, ti.Rbrace, nestedRules, adhocComments, + ) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n") + default: + // For any other sort of value we can't predict what shape it + // will have at runtime, so we must generate a very conservative + // "dynamic" block that tries to assign everything from the + // schema. The result of this is likely to be pretty ugly. + printBlockOpen(buf, "dynamic", []string{blockType}, item.LineComment) + eachSrc, eachDiags := upgradeExpr(blockItem, filename, true, an) + diags = diags.Append(eachDiags) + printAttribute(buf, "for_each", eachSrc, nil) + if nestedSchema.Nesting == configschema.NestingMap { + // This is a pretty odd situation since map-based blocks + // didn't exist prior to Terraform v0.12, but we'll support + // this anyway in case we decide to add support in a later + // SDK release that is still somehow compatible with + // Terraform v0.11. + printAttribute(buf, "labels", []byte(fmt.Sprintf(`[%s.key]`, blockType)), nil) + } + printBlockOpen(buf, "content", nil, nil) + buf.WriteString("# TF-UPGRADE-TODO: The automatic upgrade tool can't predict\n") + buf.WriteString("# which keys might be set in maps assigned here, so it has\n") + buf.WriteString("# produced a comprehensive set here. Consider simplifying\n") + buf.WriteString("# this after confirming which keys can be set in practice.\n\n") + printDynamicBlockBody(buf, blockType, &nestedSchema.Block) + buf.WriteString("}\n") + buf.WriteString("}\n") + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagWarning, + Summary: "Approximate migration of invalid block type assignment", + Detail: fmt.Sprintf("In %s the name %q is a nested block type, but this configuration is exploiting some missing validation rules from Terraform v0.11 and prior to trick Terraform into creating blocks dynamically.\n\nThis has been upgraded to use the new Terraform v0.12 dynamic blocks feature, but since the upgrade tool cannot predict which map keys will be present a fully-comprehensive set has been generated.", blockAddr, blockType), + Subject: hcl1PosRange(filename, blockItem.Pos()).Ptr(), + }) + } + } + + return diags + } +} + +// schemaDefaultBodyRules constructs standard body content rules for the given +// schema. Each call is guaranteed to produce a distinct object so that +// callers can safely mutate the result in order to impose custom rules +// in addition to or instead of those created by default, for situations +// where schema-based and predefined items mix in a single body. +func schemaDefaultBodyRules(filename string, schema *configschema.Block, an *analysis, adhocComments *commentQueue) bodyContentRules { + ret := make(bodyContentRules) + if schema == nil { + // Shouldn't happen in any real case, but often crops up in tests + // where the mock schemas tend to be incomplete. + return ret + } + + for name, attrS := range schema.Attributes { + if aty := attrS.Type; blocktoattr.TypeCanBeBlocks(aty) { + // Terraform's standard body processing rules for arbitrary schemas + // have a special case where list-of-object or set-of-object + // attributes can be specified as a sequence of nested blocks + // instead of a single list attribute. We prefer that form during + // upgrade for historical reasons, to avoid making large changes + // to existing configurations that were following documented idiom. + synthSchema := blocktoattr.SchemaForCtyContainerType(aty) + nestedRules := schemaDefaultBodyRules(filename, &synthSchema.Block, an, adhocComments) + ret[name] = nestedBlockRuleWithDynamic(filename, nestedRules, synthSchema, true, an, adhocComments) + continue + } + ret[name] = normalAttributeRule(filename, attrS.Type, an) + } + for name, blockS := range schema.BlockTypes { + nestedRules := schemaDefaultBodyRules(filename, &blockS.Block, an, adhocComments) + ret[name] = nestedBlockRuleWithDynamic(filename, nestedRules, blockS, false, an, adhocComments) + } + + return ret +} + +// schemaNoInterpBodyRules constructs standard body content rules for the given +// schema. Each call is guaranteed to produce a distinct object so that +// callers can safely mutate the result in order to impose custom rules +// in addition to or instead of those created by default, for situations +// where schema-based and predefined items mix in a single body. +func schemaNoInterpBodyRules(filename string, schema *configschema.Block, an *analysis, adhocComments *commentQueue) bodyContentRules { + ret := make(bodyContentRules) + if schema == nil { + // Shouldn't happen in any real case, but often crops up in tests + // where the mock schemas tend to be incomplete. + return ret + } + + for name, attrS := range schema.Attributes { + ret[name] = noInterpAttributeRule(filename, attrS.Type, an) + } + for name, blockS := range schema.BlockTypes { + nestedRules := schemaDefaultBodyRules(filename, &blockS.Block, an, adhocComments) + ret[name] = nestedBlockRule(filename, nestedRules, an, adhocComments) + } + + return ret +} + +// justAttributesBodyRules constructs body content rules that just use the +// standard interpolated attribute mapping for every name already present +// in the given body object. +// +// This is a little weird vs. just processing directly the attributes, but +// has the advantage that the caller can then apply overrides to the result +// as necessary to deal with any known names that need special handling. +// +// Any attribute rules created by this function do not have a specific wanted +// value type specified, instead setting it to just cty.DynamicPseudoType. +func justAttributesBodyRules(filename string, body *hcl1ast.ObjectType, an *analysis) bodyContentRules { + rules := make(bodyContentRules, len(body.List.Items)) + args := body.List.Items + for _, arg := range args { + name := arg.Keys[0].Token.Value().(string) + rules[name] = normalAttributeRule(filename, cty.DynamicPseudoType, an) + } + return rules +} + +func lifecycleBlockBodyRules(filename string, an *analysis) bodyContentRules { + return bodyContentRules{ + "create_before_destroy": noInterpAttributeRule(filename, cty.Bool, an), + "prevent_destroy": noInterpAttributeRule(filename, cty.Bool, an), + "ignore_changes": func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + val, ok := item.Val.(*hcl1ast.ListType) + if !ok { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid ignore_changes argument", + Detail: `The "ignore_changes" argument must be a list of attribute expressions relative to this resource.`, + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + return diags + } + + // As a special case, we'll map the single-element list ["*"] to + // the new keyword "all". + if len(val.List) == 1 { + if lit, ok := val.List[0].(*hcl1ast.LiteralType); ok { + if lit.Token.Value() == "*" { + printAttribute(buf, item.Keys[0].Token.Value().(string), []byte("all"), item.LineComment) + return diags + } + } + } + + var exprBuf bytes.Buffer + multiline := len(val.List) > 1 + exprBuf.WriteByte('[') + if multiline { + exprBuf.WriteByte('\n') + } + for _, node := range val.List { + itemSrc, moreDiags := upgradeTraversalExpr(node, filename, an) + diags = diags.Append(moreDiags) + exprBuf.Write(itemSrc) + if multiline { + exprBuf.WriteString(",\n") + } + } + exprBuf.WriteByte(']') + + printAttribute(buf, item.Keys[0].Token.Value().(string), exprBuf.Bytes(), item.LineComment) + + return diags + }, + } +} + +func provisionerBlockRule(filename string, resourceType string, an *analysis, adhocComments *commentQueue) bodyItemRule { + // Unlike some other examples above, this is a rule for the entire + // provisioner block, rather than just for its contents. Therefore it must + // also produce the block header and body delimiters. + return func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + body := item.Val.(*hcl1ast.ObjectType) + declRange := hcl1PosRange(filename, item.Keys[0].Pos()) + + if len(item.Keys) < 2 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid provisioner block", + Detail: "A provisioner block must have one label: the provisioner type.", + Subject: &declRange, + }) + return diags + } + + typeName := item.Keys[1].Token.Value().(string) + schema := an.ProvisionerSchemas[typeName] + if schema == nil { + // This message is assuming that if the user _is_ using a third-party + // provisioner plugin they already know how to install it for normal + // use and so we don't need to spell out those instructions in detail + // here. + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Unknown provisioner type", + Detail: fmt.Sprintf("The provisioner type %q is not supported. If this is a third-party plugin, make sure its plugin executable is available in one of the usual plugin search paths.", typeName), + Subject: &declRange, + }) + return diags + } + + rules := schemaDefaultBodyRules(filename, schema, an, adhocComments) + rules["when"] = maybeBareTraversalAttributeRule(filename, an) + rules["on_failure"] = maybeBareTraversalAttributeRule(filename, an) + rules["connection"] = connectionBlockRule(filename, resourceType, an, adhocComments) + + printComments(buf, item.LeadComment) + printBlockOpen(buf, "provisioner", []string{typeName}, item.LineComment) + bodyDiags := upgradeBlockBody(filename, fmt.Sprintf("%s.provisioner[%q]", blockAddr, typeName), buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n") + + return diags + } +} + +func connectionBlockRule(filename string, resourceType string, an *analysis, adhocComments *commentQueue) bodyItemRule { + // Unlike some other examples above, this is a rule for the entire + // connection block, rather than just for its contents. Therefore it must + // also produce the block header and body delimiters. + return func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + body := item.Val.(*hcl1ast.ObjectType) + + // TODO: For the few resource types that were setting ConnInfo in + // state after create/update in prior versions, generate the additional + // explicit connection settings that are now required if and only if + // there's at least one provisioner block. + // For now, we just pass this through as-is. + + schema := terraform.ConnectionBlockSupersetSchema() + rules := schemaDefaultBodyRules(filename, schema, an, adhocComments) + rules["type"] = noInterpAttributeRule(filename, cty.String, an) // type is processed early in the config loader, so cannot interpolate + + printComments(buf, item.LeadComment) + printBlockOpen(buf, "connection", nil, item.LineComment) + + // Terraform 0.12 no longer supports "magical" configuration of defaults + // in the connection block from logic in the provider because explicit + // is better than implicit, but for backward-compatibility we'll populate + // an existing connection block with any settings that would've been + // previously set automatically for a set of instance types we know + // had this behavior in versions prior to the v0.12 release. + if defaults := resourceTypeAutomaticConnectionExprs[resourceType]; len(defaults) > 0 { + names := make([]string, 0, len(defaults)) + for name := range defaults { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + exprSrc := defaults[name] + if existing := body.List.Filter(name); len(existing.Items) > 0 { + continue // Existing explicit value, so no need for a default + } + printAttribute(buf, name, []byte(exprSrc), nil) + } + } + + bodyDiags := upgradeBlockBody(filename, fmt.Sprintf("%s.connection", blockAddr), buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n") + + return diags + } +} + +func moduleSourceRule(filename string, an *analysis) bodyItemRule { + return func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + val, ok := item.Val.(*hcl1ast.LiteralType) + if !ok { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid source argument", + Detail: `The "source" argument must be a single string containing the module source.`, + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + return diags + } + if val.Token.Type != hcl1token.STRING { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid source argument", + Detail: `The "source" argument must be a single string containing the module source.`, + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + return diags + } + + litVal := val.Token.Value().(string) + + if isMaybeRelativeLocalPath(litVal, an.ModuleDir) { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagWarning, + Summary: "Possible relative module source", + Detail: "Terraform cannot determine the given module source, but it appears to be a relative path", + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + buf.WriteString( + "# TF-UPGRADE-TODO: In Terraform v0.11 and earlier, it was possible to\n" + + "# reference a relative module source without a preceding ./, but it is no\n" + + "# longer supported in Terraform v0.12.\n" + + "#\n" + + "# If the below module source is indeed a relative local path, add ./ to the\n" + + "# start of the source string. If that is not the case, then leave it as-is\n" + + "# and remove this TODO comment.\n", + ) + } + newVal, exprDiags := upgradeExpr(val, filename, false, an) + diags = diags.Append(exprDiags) + buf.WriteString("source = " + string(newVal) + "\n") + return diags + } +} + +// Prior to Terraform 0.12 providers were able to supply default connection +// settings that would partially populate the "connection" block with +// automatically-selected values. +// +// In practice, this feature was often confusing in that the provider would not +// have enough information to select a suitable host address or protocol from +// multiple possible options and so would just make an arbitrary decision. +// +// With our principle of "explicit is better than implicit", as of Terraform 0.12 +// we now require all connection settings to be configured explicitly by the +// user so that it's clear and explicit in the configuration which protocol and +// IP address are being selected. To avoid generating errors immediately after +// upgrade, though, we'll make a best effort to populate something functionally +// equivalent to what the provider would've done automatically for any resource +// types we know about in this table. +// +// The leaf values in this data structure are raw expressions to be inserted, +// and so they must use valid expression syntax as understood by Terraform 0.12. +// They should generally be expressions using only constant values or expressions +// in terms of attributes accessed via the special "self" object. These should +// mimic as closely as possible the logic that the provider itself used to +// implement. +// +// NOTE: Because provider releases are independent from Terraform Core releases, +// there could potentially be new 0.11-compatible provider releases that +// introduce new uses of default connection info that this map doesn't know +// about. The upgrade tool will not handle these, and so we will advise +// provider developers that this mechanism is not to be used for any new +// resource types, even in 0.11 mode. +var resourceTypeAutomaticConnectionExprs = map[string]map[string]string{ + "aws_instance": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.public_ip, self.private_ip)`, + }, + "aws_spot_instance_request": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.public_ip, self.private_ip)`, + "user": `self.username != "" ? self.username : null`, + "password": `self.password != "" ? self.password : null`, + }, + "azure_instance": map[string]string{ + "type": `"ssh" # TF-UPGRADE-TODO: If this is a windows instance without an SSH server, change to "winrm"`, + "host": `coalesce(self.vip_address, self.ip_address)`, + }, + "azurerm_virtual_machine": map[string]string{ + "type": `"ssh" # TF-UPGRADE-TODO: If this is a windows instance without an SSH server, change to "winrm"`, + // The azurerm_virtual_machine resource type does not expose a remote + // access IP address directly, instead requring the user to separately + // fetch the network interface. + // (If we can add a "default_ssh_ip" or similar attribute to this + // resource type before its first 0.12-compatible release then we should + // update this to use that instead, for simplicity's sake.) + "host": `"" # TF-UPGRADE-TODO: Set this to the IP address of the machine's primary network interface`, + }, + "brightbox_server": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.public_hostname, self.ipv6_hostname, self.fqdn)`, + }, + "cloudscale_server": map[string]string{ + "type": `"ssh"`, + // The logic for selecting this is pretty complicated for this resource + // type, and the result is not exposed as an isolated attribute, so + // the conversion here is a little messy. We include newlines in this + // one so that the auto-formatter can indent it nicely for readability. + // NOTE: In v1.0.1 of this provider (the latest at the time of this + // writing) it has an possible bug where it selects _public_ IPv4 + // addresses but _private_ IPv6 addresses. That behavior is followed + // here to maximize compatibility with existing configurations. + "host": `coalesce( # TF-UPGRADE-TODO: Simplify this to reference a specific desired IP address, if possible. + concat( + flatten([ + for i in self.network_interface : [ + for a in i.addresses : a.address + if a.version == 4 + ] + if i.type == "public" + ]), + flatten([ + for i in self.network_interface : [ + for a in i.addresses : a.address + if a.version == 6 + ] + if i.type == "private" + ]), + )... + )`, + }, + "cloudstack_instance": map[string]string{ + "type": `"ssh"`, + "host": `self.ip_address`, + }, + "digitalocean_droplet": map[string]string{ + "type": `"ssh"`, + "host": `self.ipv4_address`, + }, + "flexibleengine_compute_bms_server_v2": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.access_ip_v4, self.access_ip_v6)`, + }, + "flexibleengine_compute_instance_v2": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.access_ip_v4, self.access_ip_v6)`, + }, + "google_compute_instance": map[string]string{ + "type": `"ssh"`, + // The logic for selecting this is pretty complicated for this resource + // type, and the result is not exposed as an isolated attribute, so + // the conversion here is a little messy. We include newlines in this + // one so that the auto-formatter can indent it nicely for readability. + // (If we can add a "default_ssh_ip" or similar attribute to this + // resource type before its first 0.12-compatible release then we should + // update this to use that instead, for simplicity's sake.) + "host": `coalesce( # TF-UPGRADE-TODO: Simplify this to reference a specific desired IP address, if possible. + concat( + # Prefer any available NAT IP address + flatten([ + for ni in self.network_interface : [ + for ac in ni.access_config : ac.nat_ip + ] + ]), + + # Otherwise, use the first available LAN IP address + [ + for ni in self.network_interface : ni.network_ip + ], + )... + )`, + }, + "hcloud_server": map[string]string{ + "type": `"ssh"`, + "host": `self.ipv4_address`, + }, + "huaweicloud_compute_instance_v2": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.access_ip_v4, self.access_ip_v6)`, + }, + "linode_instance": map[string]string{ + "type": `"ssh"`, + "host": `self.ipv4[0]`, + }, + "oneandone_baremetal_server": map[string]string{ + "type": `ssh`, + "host": `self.ips[0].ip`, + "password": `self.password != "" ? self.password : null`, + "private_key": `self.ssh_key_path != "" ? file(self.ssh_key_path) : null`, + }, + "oneandone_server": map[string]string{ + "type": `ssh`, + "host": `self.ips[0].ip`, + "password": `self.password != "" ? self.password : null`, + "private_key": `self.ssh_key_path != "" ? file(self.ssh_key_path) : null`, + }, + "openstack_compute_instance_v2": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.access_ip_v4, self.access_ip_v6)`, + }, + "opentelekomcloud_compute_bms_server_v2": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.access_ip_v4, self.access_ip_v6)`, + }, + "opentelekomcloud_compute_instance_v2": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.access_ip_v4, self.access_ip_v6)`, + }, + "packet_device": map[string]string{ + "type": `"ssh"`, + "host": `self.access_public_ipv4`, + }, + "profitbricks_server": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.primary_nic.ips...)`, + // The value for this isn't exported anywhere on the object, so we'll + // need to have the user fix it up manually. + "password": `"" # TF-UPGRADE-TODO: set this to a suitable value, such as the boot image password`, + }, + "scaleway_server": map[string]string{ + "type": `"ssh"`, + "host": `self.public_ip`, + }, + "telefonicaopencloud_compute_bms_server_v2": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.access_ip_v4, self.access_ip_v6)`, + }, + "telefonicaopencloud_compute_instance_v2": map[string]string{ + "type": `"ssh"`, + "host": `coalesce(self.access_ip_v4, self.access_ip_v6)`, + }, + "triton_machine": map[string]string{ + "type": `"ssh"`, + "host": `self.primaryip`, // convention would call for this to be named "primary_ip", but "primaryip" is the name this resource type uses + }, + "vsphere_virtual_machine": map[string]string{ + "type": `"ssh"`, + "host": `self.default_ip_address`, + }, + "yandex_compute_instance": map[string]string{ + "type": `"ssh"`, + // The logic for selecting this is pretty complicated for this resource + // type, and the result is not exposed as an isolated attribute, so + // the conversion here is a little messy. We include newlines in this + // one so that the auto-formatter can indent it nicely for readability. + "host": `coalesce( # TF-UPGRADE-TODO: Simplify this to reference a specific desired IP address, if possible. + concat( + # Prefer any available NAT IP address + for i in self.network_interface: [ + i.nat_ip_address + ], + + # Otherwise, use the first available internal IP address + for i in self.network_interface: [ + i.ip_address + ], + )... + )`, + }, +} + +// copied directly from internal/initwd/getter.go +var localSourcePrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +// isMaybeRelativeLocalPath tries to catch situations where a module source is +// an improperly-referenced relative path, such as "module" instead of +// "./module". This is a simple check that could return a false positive in the +// unlikely-yet-plausible case that a module source is for eg. a github +// repository that also looks exactly like an existing relative path. This +// should only be used to return a warning. +func isMaybeRelativeLocalPath(addr, dir string) bool { + for _, prefix := range localSourcePrefixes { + if strings.HasPrefix(addr, prefix) { + // it is _definitely_ a relative path + return false + } + } + + _, err := regsrc.ParseModuleSource(addr) + if err == nil { + // it is a registry source + return false + } + + possibleRelPath := filepath.Join(dir, addr) + _, err = os.Stat(possibleRelPath) + if err == nil { + // If there is no error, something exists at what would be the relative + // path, if the module source started with ./ + return true + } + + return false +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_expr.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_expr.go new file mode 100644 index 00000000..6bfa2566 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_expr.go @@ -0,0 +1,930 @@ +package configupgrade + +import ( + "bytes" + "fmt" + "log" + "strconv" + "strings" + + hcl2 "github.com/hashicorp/hcl2/hcl" + hcl2syntax "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + + hcl1ast "github.com/hashicorp/hcl/hcl/ast" + hcl1printer "github.com/hashicorp/hcl/hcl/printer" + hcl1token "github.com/hashicorp/hcl/hcl/token" + + "github.com/hashicorp/hil" + hilast "github.com/hashicorp/hil/ast" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/tfdiags" +) + +func upgradeExpr(val interface{}, filename string, interp bool, an *analysis) ([]byte, tfdiags.Diagnostics) { + var buf bytes.Buffer + var diags tfdiags.Diagnostics + + // "val" here can be either a hcl1ast.Node or a hilast.Node, since both + // of these correspond to expressions in HCL2. Therefore we need to + // comprehensively handle every possible HCL1 *and* HIL AST node type + // and, at minimum, print it out as-is in HCL2 syntax. +Value: + switch tv := val.(type) { + + case *hcl1ast.LiteralType: + return upgradeExpr(tv.Token, filename, interp, an) + + case hcl1token.Token: + litVal := tv.Value() + switch tv.Type { + case hcl1token.STRING: + if !interp { + // Easy case, then. + printQuotedString(&buf, litVal.(string)) + break + } + + hilNode, err := hil.Parse(litVal.(string)) + if err != nil { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid interpolated string", + Detail: fmt.Sprintf("Interpolation parsing failed: %s", err), + Subject: hcl1PosRange(filename, tv.Pos).Ptr(), + }) + return nil, diags + } + + interpSrc, interpDiags := upgradeExpr(hilNode, filename, interp, an) + buf.Write(interpSrc) + diags = diags.Append(interpDiags) + + case hcl1token.HEREDOC: + // HCL1's "Value" method for tokens pulls out the body and removes + // any indents in the source for a flush heredoc, which throws away + // information we need to upgrade. Therefore we're going to + // re-implement a subset of that logic here where we want to retain + // the whitespace verbatim even in flush mode. + + firstNewlineIdx := strings.IndexByte(tv.Text, '\n') + if firstNewlineIdx < 0 { + // Should never happen, because tv.Value would already have + // panicked above in this case. + panic("heredoc doesn't contain newline") + } + introducer := tv.Text[:firstNewlineIdx+1] + marker := introducer[2:] // trim off << prefix + if marker[0] == '-' { + marker = marker[1:] // also trim of - prefix for flush heredoc + } + body := tv.Text[len(introducer) : len(tv.Text)-len(marker)] + flush := introducer[2] == '-' + if flush { + // HCL1 treats flush heredocs differently, trimming off any + // spare whitespace that might appear after the trailing + // newline, and so we must replicate that here to avoid + // introducing additional whitespace in the output. + body = strings.TrimRight(body, " \t") + } + + // Now we have: + // - introducer is the first line, like "<<-FOO\n" + // - marker is the end marker, like "FOO\n" + // - body is the raw data between the introducer and the marker, + // which we need to do recursive upgrading for. + + buf.WriteString(introducer) + if !interp { + // Easy case: escape all interpolation-looking sequences. + printHeredocLiteralFromHILOutput(&buf, body) + } else { + hilNode, err := hil.Parse(body) + if err != nil { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid interpolated string", + Detail: fmt.Sprintf("Interpolation parsing failed: %s", err), + Subject: hcl1PosRange(filename, tv.Pos).Ptr(), + }) + } + if _, ok := hilNode.(*hilast.Output); !ok { + // hil.Parse usually produces an output, but it can sometimes + // produce an isolated expression if the input is entirely + // a single interpolation. + hilNode = &hilast.Output{ + Exprs: []hilast.Node{hilNode}, + Posx: hilNode.Pos(), + } + } + interpDiags := upgradeHeredocBody(&buf, hilNode.(*hilast.Output), filename, an) + diags = diags.Append(interpDiags) + } + if !strings.HasSuffix(body, "\n") { + // The versions of HCL1 vendored into Terraform <=0.11 + // incorrectly allowed the end marker to appear at the end of + // the final line of the body, rather than on a line of its own. + // That is no longer valid in HCL2, so we need to fix it up. + buf.WriteByte('\n') + } + // NOTE: Marker intentionally contains an extra newline here because + // we need to ensure that any follow-on expression bits end up on + // a separate line, or else the HCL2 parser won't be able to + // recognize the heredoc marker. This causes an extra empty line + // in some cases, which we accept for simplicity's sake. + buf.WriteString(marker) + + case hcl1token.BOOL: + if litVal.(bool) { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + + case hcl1token.NUMBER: + num := tv.Value() + buf.WriteString(strconv.FormatInt(num.(int64), 10)) + + case hcl1token.FLOAT: + num := tv.Value() + buf.WriteString(strconv.FormatFloat(num.(float64), 'f', -1, 64)) + + default: + // For everything else we'll just pass through the given bytes verbatim, + // but we should't get here because the above is intended to be exhaustive. + buf.WriteString(tv.Text) + + } + + case *hcl1ast.ListType: + multiline := tv.Lbrack.Line != tv.Rbrack.Line + buf.WriteString("[") + if multiline { + buf.WriteString("\n") + } + for i, node := range tv.List { + src, moreDiags := upgradeExpr(node, filename, interp, an) + diags = diags.Append(moreDiags) + buf.Write(src) + if lit, ok := node.(*hcl1ast.LiteralType); ok && lit.LineComment != nil { + for _, comment := range lit.LineComment.List { + buf.WriteString(", " + comment.Text) + buf.WriteString("\n") + } + } else { + if multiline { + buf.WriteString(",\n") + } else if i < len(tv.List)-1 { + buf.WriteString(", ") + } + } + } + buf.WriteString("]") + + case *hcl1ast.ObjectType: + if len(tv.List.Items) == 0 { + buf.WriteString("{}") + break + } + buf.WriteString("{\n") + for _, item := range tv.List.Items { + if len(item.Keys) != 1 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid map element", + Detail: "A map element may not have any block-style labels.", + Subject: hcl1PosRange(filename, item.Pos()).Ptr(), + }) + continue + } + keySrc, moreDiags := upgradeExpr(item.Keys[0].Token, filename, interp, an) + diags = diags.Append(moreDiags) + valueSrc, moreDiags := upgradeExpr(item.Val, filename, interp, an) + diags = diags.Append(moreDiags) + if item.LeadComment != nil { + for _, c := range item.LeadComment.List { + buf.WriteString(c.Text) + buf.WriteByte('\n') + } + } + + buf.Write(keySrc) + buf.WriteString(" = ") + buf.Write(valueSrc) + if item.LineComment != nil { + for _, c := range item.LineComment.List { + buf.WriteByte(' ') + buf.WriteString(c.Text) + } + } + buf.WriteString("\n") + } + buf.WriteString("}") + + case hcl1ast.Node: + // If our more-specific cases above didn't match this then we'll + // ask the hcl1printer package to print the expression out + // itself, and assume it'll still be valid in HCL2. + // (We should rarely end up here, since our cases above should + // be comprehensive.) + log.Printf("[TRACE] configupgrade: Don't know how to upgrade %T as expression, so just passing it through as-is", tv) + hcl1printer.Fprint(&buf, tv) + + case *hilast.LiteralNode: + switch tl := tv.Value.(type) { + case string: + // This shouldn't generally happen because literal strings are + // always wrapped in hilast.Output in HIL, but we'll allow it anyway. + printQuotedString(&buf, tl) + case int: + buf.WriteString(strconv.Itoa(tl)) + case float64: + buf.WriteString(strconv.FormatFloat(tl, 'f', -1, 64)) + case bool: + if tl { + buf.WriteString("true") + } else { + buf.WriteString("false") + } + } + + case *hilast.VariableAccess: + // In HIL a variable access is just a single string which might contain + // a mixture of identifiers, dots, integer indices, and splat expressions. + // All of these concepts were formerly interpreted by Terraform itself, + // rather than by HIL. We're going to process this one chunk at a time + // here so we can normalize and introduce some newer syntax where it's + // safe to do so. + parts := strings.Split(tv.Name, ".") + + transformed := transformCountPseudoAttribute(&buf, parts, an) + if transformed { + break Value + } + + parts = upgradeTraversalParts(parts, an) // might add/remove/change parts + + vDiags := validateHilAddress(tv.Name, filename) + if len(vDiags) > 0 { + diags = diags.Append(vDiags) + break + } + + printHilTraversalPartsAsHcl2(&buf, parts) + + case *hilast.Arithmetic: + op, exists := hilArithmeticOpSyms[tv.Op] + if !exists { + panic(fmt.Errorf("arithmetic node with unsupported operator %#v", tv.Op)) + } + + lhsExpr := tv.Exprs[0] + rhsExpr := tv.Exprs[1] + lhsSrc, exprDiags := upgradeExpr(lhsExpr, filename, true, an) + diags = diags.Append(exprDiags) + rhsSrc, exprDiags := upgradeExpr(rhsExpr, filename, true, an) + diags = diags.Append(exprDiags) + + // HIL's AST represents -foo as (0 - foo), so we'll recognize + // that here and normalize it back. + if tv.Op == hilast.ArithmeticOpSub && len(lhsSrc) == 1 && lhsSrc[0] == '0' { + buf.WriteString("-") + buf.Write(rhsSrc) + break + } + + buf.Write(lhsSrc) + buf.WriteString(op) + buf.Write(rhsSrc) + + case *hilast.Call: + name := tv.Func + args := tv.Args + + // Some adaptations must happen prior to upgrading the arguments, + // because they depend on the original argument AST nodes. + switch name { + case "base64sha256", "base64sha512", "md5", "sha1", "sha256", "sha512": + // These functions were sometimes used in conjunction with the + // file() function to take the hash of the contents of a file. + // Prior to Terraform 0.11 there was a chance of silent corruption + // of strings containing non-UTF8 byte sequences, and so we have + // made it illegal to use file() with non-text files in 0.12 even + // though in this _particular_ situation (passing the function + // result directly to another function) there would not be any + // corruption; the general rule keeps things consistent. + // However, to still meet those use-cases we now have variants of + // the hashing functions that have a "file" prefix on their names + // and read the contents of a given file, rather than hashing + // directly the given string. + if len(args) > 0 { + if subCall, ok := args[0].(*hilast.Call); ok && subCall.Func == "file" { + // We're going to flatten this down into a single call, so + // we actually want the arguments of the sub-call here. + name = "file" + name + args = subCall.Args + + // For this one, we'll fall through to the normal upgrade + // handling now that we've fixed up the name and args... + } + } + + } + + argExprs := make([][]byte, len(args)) + multiline := false + totalLen := 0 + for i, arg := range args { + if i > 0 { + totalLen += 2 + } + exprSrc, exprDiags := upgradeExpr(arg, filename, true, an) + diags = diags.Append(exprDiags) + argExprs[i] = exprSrc + if bytes.Contains(exprSrc, []byte{'\n'}) { + // If any of our arguments are multi-line then we'll also be multiline + multiline = true + } + totalLen += len(exprSrc) + } + + if totalLen > 60 { // heuristic, since we don't know here how indented we are already + multiline = true + } + + // Some functions are now better expressed as native language constructs. + // These cases will return early if they emit anything, or otherwise + // fall through to the default emitter. + switch name { + case "list": + // Should now use tuple constructor syntax + buf.WriteByte('[') + if multiline { + buf.WriteByte('\n') + } + for i, exprSrc := range argExprs { + buf.Write(exprSrc) + if multiline { + buf.WriteString(",\n") + } else { + if i < len(args)-1 { + buf.WriteString(", ") + } + } + } + buf.WriteByte(']') + break Value + case "map": + // Should now use object constructor syntax, but we can only + // achieve that if the call is valid, which requires an even + // number of arguments. + if len(argExprs) == 0 { + buf.WriteString("{}") + break Value + } else if len(argExprs)%2 == 0 { + buf.WriteString("{\n") + for i := 0; i < len(argExprs); i += 2 { + k := argExprs[i] + v := argExprs[i+1] + + buf.Write(k) + buf.WriteString(" = ") + buf.Write(v) + buf.WriteByte('\n') + } + buf.WriteByte('}') + break Value + } + case "lookup": + // A lookup call with only two arguments is equivalent to native + // index syntax. (A third argument would specify a default value, + // so calls like that must be left alone.) + // (Note that we can't safely do this for element(...) because + // the user may be relying on its wraparound behavior.) + if len(argExprs) == 2 { + buf.Write(argExprs[0]) + buf.WriteByte('[') + buf.Write(argExprs[1]) + buf.WriteByte(']') + break Value + } + case "element": + // We cannot replace element with index syntax safely in general + // because users may be relying on its special modulo wraparound + // behavior that the index syntax doesn't do. However, if it seems + // like the user is trying to use element with a set, we'll insert + // an explicit conversion to list to mimic the implicit conversion + // that we used to do as an unintended side-effect of how functions + // work in HIL. + if len(argExprs) > 0 { + argTy := an.InferExpressionType(argExprs[0], nil) + if argTy.IsSetType() { + newExpr := []byte(`tolist(`) + newExpr = append(newExpr, argExprs[0]...) + newExpr = append(newExpr, ')') + argExprs[0] = newExpr + } + } + + // HIL used some undocumented special functions to implement certain + // operations, but since those were actually callable in real expressions + // some users inevitably depended on them, so we'll fix them up here. + // These each become two function calls to preserve the old behavior + // of implicitly converting to the source type first. Usage of these + // is relatively rare, so the result doesn't need to be too pretty. + case "__builtin_BoolToString": + buf.WriteString("tostring(tobool(") + buf.Write(argExprs[0]) + buf.WriteString("))") + break Value + case "__builtin_FloatToString": + buf.WriteString("tostring(tonumber(") + buf.Write(argExprs[0]) + buf.WriteString("))") + break Value + case "__builtin_IntToString": + buf.WriteString("tostring(floor(") + buf.Write(argExprs[0]) + buf.WriteString("))") + break Value + case "__builtin_StringToInt": + buf.WriteString("floor(tostring(") + buf.Write(argExprs[0]) + buf.WriteString("))") + break Value + case "__builtin_StringToFloat": + buf.WriteString("tonumber(tostring(") + buf.Write(argExprs[0]) + buf.WriteString("))") + break Value + case "__builtin_StringToBool": + buf.WriteString("tobool(tostring(") + buf.Write(argExprs[0]) + buf.WriteString("))") + break Value + case "__builtin_FloatToInt", "__builtin_IntToFloat": + // Since "floor" already has an implicit conversion of its argument + // to number, and the result is a whole number in either case, + // these ones are easier. (We no longer distinguish int and float + // as types in HCL2, even though HIL did.) + name = "floor" + } + + buf.WriteString(name) + buf.WriteByte('(') + if multiline { + buf.WriteByte('\n') + } + for i, exprSrc := range argExprs { + buf.Write(exprSrc) + if multiline { + buf.WriteString(",\n") + } else { + if i < len(args)-1 { + buf.WriteString(", ") + } + } + } + buf.WriteByte(')') + + case *hilast.Conditional: + condSrc, exprDiags := upgradeExpr(tv.CondExpr, filename, true, an) + diags = diags.Append(exprDiags) + trueSrc, exprDiags := upgradeExpr(tv.TrueExpr, filename, true, an) + diags = diags.Append(exprDiags) + falseSrc, exprDiags := upgradeExpr(tv.FalseExpr, filename, true, an) + diags = diags.Append(exprDiags) + + buf.Write(condSrc) + buf.WriteString(" ? ") + buf.Write(trueSrc) + buf.WriteString(" : ") + buf.Write(falseSrc) + + case *hilast.Index: + target, ok := tv.Target.(*hilast.VariableAccess) + if !ok { + panic(fmt.Sprintf("Index node with unsupported target type (%T)", tv.Target)) + } + parts := strings.Split(target.Name, ".") + + keySrc, exprDiags := upgradeExpr(tv.Key, filename, true, an) + diags = diags.Append(exprDiags) + + transformed := transformCountPseudoAttribute(&buf, parts, an) + if transformed { + break Value + } + + parts = upgradeTraversalParts(parts, an) // might add/remove/change parts + + vDiags := validateHilAddress(target.Name, filename) + if len(vDiags) > 0 { + diags = diags.Append(vDiags) + break + } + + first, remain := parts[0], parts[1:] + + var rAddr addrs.Resource + switch parts[0] { + case "data": + if len(parts) == 5 && parts[3] == "*" { + rAddr.Mode = addrs.DataResourceMode + rAddr.Type = parts[1] + rAddr.Name = parts[2] + } + default: + if len(parts) == 4 && parts[2] == "*" { + rAddr.Mode = addrs.ManagedResourceMode + rAddr.Type = parts[0] + rAddr.Name = parts[1] + } + } + + // We need to check if the thing being referenced has count + // to retain backward compatibility + hasCount := false + if v, exists := an.ResourceHasCount[rAddr]; exists { + hasCount = v + } + + hasSplat := false + + buf.WriteString(first) + for _, part := range remain { + // Attempt to convert old-style splat indexing to new one + // e.g. res.label.*.attr[idx] to res.label[idx].attr + if part == "*" && hasCount { + hasSplat = true + buf.WriteString(fmt.Sprintf("[%s]", keySrc)) + continue + } + + buf.WriteByte('.') + buf.WriteString(part) + } + + if !hasSplat { + buf.WriteString("[") + buf.Write(keySrc) + buf.WriteString("]") + } + + case *hilast.Output: + if len(tv.Exprs) == 1 { + item := tv.Exprs[0] + naked := true + if lit, ok := item.(*hilast.LiteralNode); ok { + if _, ok := lit.Value.(string); ok { + naked = false + } + } + if naked { + // If there's only one expression and it isn't a literal string + // then we'll just output it naked, since wrapping a single + // expression in interpolation is no longer idiomatic. + interped, interpDiags := upgradeExpr(item, filename, true, an) + diags = diags.Append(interpDiags) + buf.Write(interped) + break + } + } + + buf.WriteString(`"`) + for _, item := range tv.Exprs { + if lit, ok := item.(*hilast.LiteralNode); ok { + if litStr, ok := lit.Value.(string); ok { + printStringLiteralFromHILOutput(&buf, litStr) + continue + } + } + + interped, interpDiags := upgradeExpr(item, filename, true, an) + diags = diags.Append(interpDiags) + + buf.WriteString("${") + buf.Write(interped) + buf.WriteString("}") + } + buf.WriteString(`"`) + + case hilast.Node: + // Nothing reasonable we can do here, so we should've handled all of + // the possibilities above. + panic(fmt.Errorf("upgradeExpr doesn't handle HIL node type %T", tv)) + + default: + // If we end up in here then the caller gave us something completely invalid. + panic(fmt.Errorf("upgradeExpr on unsupported type %T", val)) + + } + + return buf.Bytes(), diags +} + +func validateHilAddress(address, filename string) tfdiags.Diagnostics { + parts := strings.Split(address, ".") + var diags tfdiags.Diagnostics + + label, ok := getResourceLabel(parts) + if ok && !hcl2syntax.ValidIdentifier(label) { + // We can't get any useful source location out of HIL unfortunately + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + fmt.Sprintf("Invalid address (%s) in ./%s", address, filename), + // The label could be invalid for another reason + // but this is the most likely, so we add it as hint + "Names of objects (resources, modules, etc) may no longer start with digits.")) + } + + return diags +} + +func getResourceLabel(parts []string) (string, bool) { + if len(parts) < 1 { + return "", false + } + + if parts[0] == "data" { + if len(parts) < 3 { + return "", false + } + return parts[2], true + } + + if len(parts) < 2 { + return "", false + } + + return parts[1], true +} + +// transformCountPseudoAttribute deals with the .count pseudo-attributes +// that 0.11 and prior allowed for resources. These no longer exist, +// because they don't do anything we can't do with the length(...) function. +func transformCountPseudoAttribute(buf *bytes.Buffer, parts []string, an *analysis) (transformed bool) { + if len(parts) > 0 { + var rAddr addrs.Resource + switch parts[0] { + case "data": + if len(parts) == 4 && parts[3] == "count" { + rAddr.Mode = addrs.DataResourceMode + rAddr.Type = parts[1] + rAddr.Name = parts[2] + } + default: + if len(parts) == 3 && parts[2] == "count" { + rAddr.Mode = addrs.ManagedResourceMode + rAddr.Type = parts[0] + rAddr.Name = parts[1] + } + } + // We need to check if the thing being referenced is actually an + // existing resource, because other three-part traversals might + // coincidentally end with "count". + if hasCount, exists := an.ResourceHasCount[rAddr]; exists { + if hasCount { + buf.WriteString("length(") + buf.WriteString(rAddr.String()) + buf.WriteString(")") + } else { + // If the resource does not have count, the .count + // attr would've always returned 1 before. + buf.WriteString("1") + } + transformed = true + return + } + } + return +} + +func printHilTraversalPartsAsHcl2(buf *bytes.Buffer, parts []string) { + first, remain := parts[0], parts[1:] + buf.WriteString(first) + seenSplat := false + for _, part := range remain { + if part == "*" { + seenSplat = true + buf.WriteString(".*") + continue + } + + // Other special cases apply only if we've not previously + // seen a splat expression marker, since attribute vs. index + // syntax have different interpretations after a simple splat. + if !seenSplat { + if v, err := strconv.Atoi(part); err == nil { + // Looks like it's old-style index traversal syntax foo.0.bar + // so we'll replace with canonical index syntax foo[0].bar. + fmt.Fprintf(buf, "[%d]", v) + continue + } + if !hcl2syntax.ValidIdentifier(part) { + // This should be rare since HIL's identifier syntax is _close_ + // to HCL2's, but we'll get here if one of the intervening + // parts is not a valid identifier in isolation, since HIL + // did not consider these to be separate identifiers. + // e.g. foo.1bar would be invalid in HCL2; must instead be foo["1bar"]. + buf.WriteByte('[') + printQuotedString(buf, part) + buf.WriteByte(']') + continue + } + } + + buf.WriteByte('.') + buf.WriteString(part) + } +} + +func upgradeHeredocBody(buf *bytes.Buffer, val *hilast.Output, filename string, an *analysis) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for _, item := range val.Exprs { + if lit, ok := item.(*hilast.LiteralNode); ok { + if litStr, ok := lit.Value.(string); ok { + printHeredocLiteralFromHILOutput(buf, litStr) + continue + } + } + interped, interpDiags := upgradeExpr(item, filename, true, an) + diags = diags.Append(interpDiags) + + buf.WriteString("${") + buf.Write(interped) + buf.WriteString("}") + } + + return diags +} + +func upgradeTraversalExpr(val interface{}, filename string, an *analysis) ([]byte, tfdiags.Diagnostics) { + if lit, ok := val.(*hcl1ast.LiteralType); ok && lit.Token.Type == hcl1token.STRING { + trStr := lit.Token.Value().(string) + if strings.HasSuffix(trStr, ".%") || strings.HasSuffix(trStr, ".#") { + // Terraform 0.11 would often not validate traversals given in + // strings and so users would get away with this sort of + // flatmap-implementation-detail reference, particularly inside + // ignore_changes. We'll just trim these off to tolerate it, + // rather than failing below in ParseTraversalAbs. + trStr = trStr[:len(trStr)-2] + } + trSrc := []byte(trStr) + _, trDiags := hcl2syntax.ParseTraversalAbs(trSrc, "", hcl2.Pos{}) + if !trDiags.HasErrors() { + return trSrc, nil + } + } + return upgradeExpr(val, filename, false, an) +} + +var hilArithmeticOpSyms = map[hilast.ArithmeticOp]string{ + hilast.ArithmeticOpAdd: " + ", + hilast.ArithmeticOpSub: " - ", + hilast.ArithmeticOpMul: " * ", + hilast.ArithmeticOpDiv: " / ", + hilast.ArithmeticOpMod: " % ", + + hilast.ArithmeticOpLogicalAnd: " && ", + hilast.ArithmeticOpLogicalOr: " || ", + + hilast.ArithmeticOpEqual: " == ", + hilast.ArithmeticOpNotEqual: " != ", + hilast.ArithmeticOpLessThan: " < ", + hilast.ArithmeticOpLessThanOrEqual: " <= ", + hilast.ArithmeticOpGreaterThan: " > ", + hilast.ArithmeticOpGreaterThanOrEqual: " >= ", +} + +// upgradeTraversalParts might alter the given split parts from a HIL-style +// variable access to account for renamings made in Terraform v0.12. +func upgradeTraversalParts(parts []string, an *analysis) []string { + parts = upgradeCountTraversalParts(parts, an) + parts = upgradeTerraformRemoteStateTraversalParts(parts, an) + return parts +} + +func upgradeCountTraversalParts(parts []string, an *analysis) []string { + // test_instance.foo.id needs to become test_instance.foo[0].id if + // count is set for test_instance.foo. Likewise, if count _isn't_ set + // then test_instance.foo.0.id must become test_instance.foo.id. + if len(parts) < 3 { + return parts + } + var addr addrs.Resource + var idxIdx int + switch parts[0] { + case "data": + addr.Mode = addrs.DataResourceMode + addr.Type = parts[1] + addr.Name = parts[2] + idxIdx = 3 + default: + addr.Mode = addrs.ManagedResourceMode + addr.Type = parts[0] + addr.Name = parts[1] + idxIdx = 2 + } + + hasCount, exists := an.ResourceHasCount[addr] + if !exists { + // Probably not actually a resource instance at all, then. + return parts + } + + // Since at least one attribute is required after a resource reference + // prior to Terraform v0.12, we can assume there will be at least enough + // parts to contain the index even if no index is actually present. + if idxIdx >= len(parts) { + return parts + } + + maybeIdx := parts[idxIdx] + switch { + case hasCount: + if _, err := strconv.Atoi(maybeIdx); err == nil || maybeIdx == "*" { + // Has an index already, so no changes required. + return parts + } + // Need to insert index zero at idxIdx. + log.Printf("[TRACE] configupgrade: %s has count but reference does not have index, so adding one", addr) + newParts := make([]string, len(parts)+1) + copy(newParts, parts[:idxIdx]) + newParts[idxIdx] = "0" + copy(newParts[idxIdx+1:], parts[idxIdx:]) + return newParts + default: + // For removing indexes we'll be more conservative and only remove + // exactly index "0", because other indexes on a resource without + // count are invalid anyway and we're better off letting the normal + // configuration parser deal with that. + if maybeIdx != "0" { + return parts + } + + // Need to remove the index zero. + log.Printf("[TRACE] configupgrade: %s does not have count but reference has index, so removing it", addr) + newParts := make([]string, len(parts)-1) + copy(newParts, parts[:idxIdx]) + copy(newParts[idxIdx:], parts[idxIdx+1:]) + return newParts + } +} + +func upgradeTerraformRemoteStateTraversalParts(parts []string, an *analysis) []string { + // data.terraform_remote_state.x.foo needs to become + // data.terraform_remote_state.x.outputs.foo unless "foo" is a real + // attribute in the object type implied by the remote state schema. + if len(parts) < 4 { + return parts + } + if parts[0] != "data" || parts[1] != "terraform_remote_state" { + return parts + } + + attrIdx := 3 + if parts[attrIdx] == "*" { + attrIdx = 4 // data.terraform_remote_state.x.*.foo + } else if _, err := strconv.Atoi(parts[attrIdx]); err == nil { + attrIdx = 4 // data.terraform_remote_state.x.1.foo + } + if attrIdx >= len(parts) { + return parts + } + + attrName := parts[attrIdx] + + // Now we'll use the schema of data.terraform_remote_state to decide if + // the user intended this to be an output, or whether it's one of the real + // attributes of this data source. + var schema *configschema.Block + if providerSchema := an.ProviderSchemas["terraform"]; providerSchema != nil { + schema, _ = providerSchema.SchemaForResourceType(addrs.DataResourceMode, "terraform_remote_state") + } + // Schema should be available in all reasonable cases, but might be nil + // if input configuration contains a reference to a remote state data resource + // without actually defining that data resource. In that weird edge case, + // we'll just assume all attributes are outputs. + if schema != nil && schema.ImpliedType().HasAttribute(attrName) { + // User is accessing one of the real attributes, then, and we have + // no need to rewrite it. + return parts + } + + // If we get down here then our task is to produce a new parts slice + // that has the fixed additional attribute name "outputs" inserted at + // attrIdx, retaining all other parts. + newParts := make([]string, len(parts)+1) + copy(newParts, parts[:attrIdx]) + newParts[attrIdx] = "outputs" + copy(newParts[attrIdx+1:], parts[attrIdx:]) + return newParts +} + +func typeIsSettableFromTupleCons(ty cty.Type) bool { + return ty.IsListType() || ty.IsTupleType() || ty.IsSetType() +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_native.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_native.go new file mode 100644 index 00000000..ec55064a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrade_native.go @@ -0,0 +1,796 @@ +package configupgrade + +import ( + "bytes" + "fmt" + "io" + "log" + "regexp" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + + hcl1ast "github.com/hashicorp/hcl/hcl/ast" + hcl1parser "github.com/hashicorp/hcl/hcl/parser" + hcl1printer "github.com/hashicorp/hcl/hcl/printer" + hcl1token "github.com/hashicorp/hcl/hcl/token" + + hcl2 "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + backendinit "github.com/hashicorp/terraform/backend/init" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/tfdiags" +) + +type upgradeFileResult struct { + Content []byte + ProviderRequirements map[string]version.Constraints +} + +func (u *Upgrader) upgradeNativeSyntaxFile(filename string, src []byte, an *analysis) (upgradeFileResult, tfdiags.Diagnostics) { + var result upgradeFileResult + var diags tfdiags.Diagnostics + + log.Printf("[TRACE] configupgrade: Working on %q", filename) + + var buf bytes.Buffer + + f, err := hcl1parser.Parse(src) + if err != nil { + return result, diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Syntax error in configuration file", + Detail: fmt.Sprintf("Error while parsing: %s", err), + Subject: hcl1ErrSubjectRange(filename, err), + }) + } + + rootList := f.Node.(*hcl1ast.ObjectList) + rootItems := rootList.Items + adhocComments := collectAdhocComments(f) + + for _, item := range rootItems { + comments := adhocComments.TakeBefore(item) + for _, group := range comments { + printComments(&buf, group) + buf.WriteByte('\n') // Extra separator after each group + } + + blockType := item.Keys[0].Token.Value().(string) + labels := make([]string, len(item.Keys)-1) + for i, key := range item.Keys[1:] { + labels[i] = key.Token.Value().(string) + } + body, isObject := item.Val.(*hcl1ast.ObjectType) + if !isObject { + // Should never happen for valid input, since we don't expect + // any non-block items at our top level. + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagWarning, + Summary: "Unsupported top-level attribute", + Detail: fmt.Sprintf("Attribute %q is not expected here, so its expression was not upgraded.", blockType), + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + // Preserve the item as-is, using the hcl1printer package. + buf.WriteString("# TF-UPGRADE-TODO: Top-level attributes are not valid, so this was not automatically upgraded.\n") + hcl1printer.Fprint(&buf, item) + buf.WriteString("\n\n") + continue + } + declRange := hcl1PosRange(filename, item.Keys[0].Pos()) + + switch blockType { + + case "resource", "data": + if len(labels) != 2 { + // Should never happen for valid input. + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: fmt.Sprintf("Invalid %s block", blockType), + Detail: fmt.Sprintf("A %s block must have two labels: the type and the name.", blockType), + Subject: &declRange, + }) + continue + } + + rAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: labels[0], + Name: labels[1], + } + if blockType == "data" { + rAddr.Mode = addrs.DataResourceMode + } + + log.Printf("[TRACE] configupgrade: Upgrading %s at %s", rAddr, declRange) + moreDiags := u.upgradeNativeSyntaxResource(filename, &buf, rAddr, item, an, adhocComments) + diags = diags.Append(moreDiags) + + case "provider": + if len(labels) != 1 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: fmt.Sprintf("Invalid %s block", blockType), + Detail: fmt.Sprintf("A %s block must have one label: the provider type.", blockType), + Subject: &declRange, + }) + continue + } + + pType := labels[0] + log.Printf("[TRACE] configupgrade: Upgrading provider.%s at %s", pType, declRange) + moreDiags := u.upgradeNativeSyntaxProvider(filename, &buf, pType, item, an, adhocComments) + diags = diags.Append(moreDiags) + + case "terraform": + if len(labels) != 0 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: fmt.Sprintf("Invalid %s block", blockType), + Detail: fmt.Sprintf("A %s block must not have any labels.", blockType), + Subject: &declRange, + }) + continue + } + moreDiags := u.upgradeNativeSyntaxTerraformBlock(filename, &buf, item, an, adhocComments) + diags = diags.Append(moreDiags) + + case "variable": + if len(labels) != 1 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: fmt.Sprintf("Invalid %s block", blockType), + Detail: fmt.Sprintf("A %s block must have one label: the variable name.", blockType), + Subject: &declRange, + }) + continue + } + + printComments(&buf, item.LeadComment) + printBlockOpen(&buf, blockType, labels, item.LineComment) + rules := bodyContentRules{ + "description": noInterpAttributeRule(filename, cty.String, an), + "default": noInterpAttributeRule(filename, cty.DynamicPseudoType, an), + "type": maybeBareKeywordAttributeRule(filename, an, map[string]string{ + // "list" and "map" in older versions were documented to + // mean list and map of strings, so we'll migrate to that + // and let the user adjust it to some other type if desired. + "list": `list(string)`, + "map": `map(string)`, + }), + } + log.Printf("[TRACE] configupgrade: Upgrading var.%s at %s", labels[0], declRange) + bodyDiags := upgradeBlockBody(filename, fmt.Sprintf("var.%s", labels[0]), &buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n\n") + + case "output": + if len(labels) != 1 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: fmt.Sprintf("Invalid %s block", blockType), + Detail: fmt.Sprintf("A %s block must have one label: the output name.", blockType), + Subject: &declRange, + }) + continue + } + + printComments(&buf, item.LeadComment) + if invalidLabel(labels[0]) { + printLabelTodo(&buf, labels[0]) + } + printBlockOpen(&buf, blockType, labels, item.LineComment) + + rules := bodyContentRules{ + "description": noInterpAttributeRule(filename, cty.String, an), + "value": normalAttributeRule(filename, cty.DynamicPseudoType, an), + "sensitive": noInterpAttributeRule(filename, cty.Bool, an), + "depends_on": dependsOnAttributeRule(filename, an), + } + log.Printf("[TRACE] configupgrade: Upgrading output.%s at %s", labels[0], declRange) + bodyDiags := upgradeBlockBody(filename, fmt.Sprintf("output.%s", labels[0]), &buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n\n") + + case "module": + if len(labels) != 1 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: fmt.Sprintf("Invalid %s block", blockType), + Detail: fmt.Sprintf("A %s block must have one label: the module call name.", blockType), + Subject: &declRange, + }) + continue + } + + // Since upgrading is a single-module endeavor, we don't have access + // to the configuration of the child module here, but we know that + // in practice all arguments that aren't reserved meta-arguments + // in a module block are normal expression attributes so we'll + // start with the straightforward mapping of those and override + // the special lifecycle arguments below. + rules := justAttributesBodyRules(filename, body, an) + rules["source"] = moduleSourceRule(filename, an) + rules["version"] = noInterpAttributeRule(filename, cty.String, an) + rules["providers"] = func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + subBody, ok := item.Val.(*hcl1ast.ObjectType) + if !ok { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid providers argument", + Detail: `The "providers" argument must be a map from provider addresses in the child module to corresponding provider addresses in this module.`, + Subject: &declRange, + }) + return diags + } + + // We're gonna cheat here and use justAttributesBodyRules to + // find all the attribute names but then just rewrite them all + // to be our specialized traversal-style mapping instead. + subRules := justAttributesBodyRules(filename, subBody, an) + for k := range subRules { + subRules[k] = maybeBareTraversalAttributeRule(filename, an) + } + buf.WriteString("providers = {\n") + bodyDiags := upgradeBlockBody(filename, blockAddr, buf, subBody.List.Items, body.Rbrace, subRules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n") + + return diags + } + + printComments(&buf, item.LeadComment) + printBlockOpen(&buf, blockType, labels, item.LineComment) + log.Printf("[TRACE] configupgrade: Upgrading module.%s at %s", labels[0], declRange) + bodyDiags := upgradeBlockBody(filename, fmt.Sprintf("module.%s", labels[0]), &buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n\n") + + case "locals": + log.Printf("[TRACE] configupgrade: Upgrading locals block at %s", declRange) + printComments(&buf, item.LeadComment) + printBlockOpen(&buf, blockType, labels, item.LineComment) + + // The "locals" block contents are free-form declarations, so + // we'll just use the default attribute mapping rule for everything + // inside it. + rules := justAttributesBodyRules(filename, body, an) + log.Printf("[TRACE] configupgrade: Upgrading locals block at %s", declRange) + bodyDiags := upgradeBlockBody(filename, "locals", &buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n\n") + + default: + // Should never happen for valid input, because the above cases + // are exhaustive for valid blocks as of Terraform 0.11. + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagWarning, + Summary: "Unsupported root block type", + Detail: fmt.Sprintf("The block type %q is not expected here, so its content was not upgraded.", blockType), + Subject: hcl1PosRange(filename, item.Keys[0].Pos()).Ptr(), + }) + + // Preserve the block as-is, using the hcl1printer package. + buf.WriteString("# TF-UPGRADE-TODO: Block type was not recognized, so this block and its contents were not automatically upgraded.\n") + hcl1printer.Fprint(&buf, item) + buf.WriteString("\n\n") + continue + } + } + + // Print out any leftover comments + for _, group := range *adhocComments { + printComments(&buf, group) + } + + result.Content = buf.Bytes() + + return result, diags +} + +func (u *Upgrader) upgradeNativeSyntaxResource(filename string, buf *bytes.Buffer, addr addrs.Resource, item *hcl1ast.ObjectItem, an *analysis, adhocComments *commentQueue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + body := item.Val.(*hcl1ast.ObjectType) + declRange := hcl1PosRange(filename, item.Keys[0].Pos()) + + // We should always have a schema for each provider in our analysis + // object. If not, it's a bug in the analyzer. + providerType, ok := an.ResourceProviderType[addr] + if !ok { + panic(fmt.Sprintf("unknown provider type for %s", addr.String())) + } + providerSchema, ok := an.ProviderSchemas[providerType] + if !ok { + panic(fmt.Sprintf("missing schema for provider type %q", providerType)) + } + schema, _ := providerSchema.SchemaForResourceAddr(addr) + if schema == nil { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Unknown resource type", + Detail: fmt.Sprintf("The resource type %q is not known to the currently-selected version of provider %q.", addr.Type, providerType), + Subject: &declRange, + }) + return diags + } + + var blockType string + switch addr.Mode { + case addrs.ManagedResourceMode: + blockType = "resource" + case addrs.DataResourceMode: + blockType = "data" + } + labels := []string{addr.Type, addr.Name} + + rules := schemaDefaultBodyRules(filename, schema, an, adhocComments) + rules["count"] = normalAttributeRule(filename, cty.Number, an) + rules["depends_on"] = dependsOnAttributeRule(filename, an) + rules["provider"] = maybeBareTraversalAttributeRule(filename, an) + rules["lifecycle"] = nestedBlockRule(filename, lifecycleBlockBodyRules(filename, an), an, adhocComments) + if addr.Mode == addrs.ManagedResourceMode { + rules["connection"] = connectionBlockRule(filename, addr.Type, an, adhocComments) + rules["provisioner"] = provisionerBlockRule(filename, addr.Type, an, adhocComments) + } + + printComments(buf, item.LeadComment) + if invalidLabel(labels[1]) { + printLabelTodo(buf, labels[1]) + } + printBlockOpen(buf, blockType, labels, item.LineComment) + bodyDiags := upgradeBlockBody(filename, addr.String(), buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n\n") + + return diags +} + +func (u *Upgrader) upgradeNativeSyntaxProvider(filename string, buf *bytes.Buffer, typeName string, item *hcl1ast.ObjectItem, an *analysis, adhocComments *commentQueue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + body := item.Val.(*hcl1ast.ObjectType) + + // We should always have a schema for each provider in our analysis + // object. If not, it's a bug in the analyzer. + providerSchema, ok := an.ProviderSchemas[typeName] + if !ok { + panic(fmt.Sprintf("missing schema for provider type %q", typeName)) + } + schema := providerSchema.Provider + rules := schemaDefaultBodyRules(filename, schema, an, adhocComments) + rules["alias"] = noInterpAttributeRule(filename, cty.String, an) + rules["version"] = noInterpAttributeRule(filename, cty.String, an) + + printComments(buf, item.LeadComment) + printBlockOpen(buf, "provider", []string{typeName}, item.LineComment) + bodyDiags := upgradeBlockBody(filename, fmt.Sprintf("provider.%s", typeName), buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n\n") + + return diags +} + +func (u *Upgrader) upgradeNativeSyntaxTerraformBlock(filename string, buf *bytes.Buffer, item *hcl1ast.ObjectItem, an *analysis, adhocComments *commentQueue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + body := item.Val.(*hcl1ast.ObjectType) + + rules := bodyContentRules{ + "required_version": noInterpAttributeRule(filename, cty.String, an), + "backend": func(buf *bytes.Buffer, blockAddr string, item *hcl1ast.ObjectItem) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + declRange := hcl1PosRange(filename, item.Keys[0].Pos()) + if len(item.Keys) != 2 { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: `Invalid backend block`, + Detail: `A backend block must have one label: the backend type name.`, + Subject: &declRange, + }) + return diags + } + + typeName := item.Keys[1].Token.Value().(string) + beFn := backendinit.Backend(typeName) + if beFn == nil { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Unsupported backend type", + Detail: fmt.Sprintf("Terraform does not support a backend type named %q.", typeName), + Subject: &declRange, + }) + return diags + } + be := beFn() + schema := be.ConfigSchema() + rules := schemaNoInterpBodyRules(filename, schema, an, adhocComments) + + body := item.Val.(*hcl1ast.ObjectType) + + printComments(buf, item.LeadComment) + printBlockOpen(buf, "backend", []string{typeName}, item.LineComment) + bodyDiags := upgradeBlockBody(filename, fmt.Sprintf("terraform.backend.%s", typeName), buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n") + + return diags + }, + } + + printComments(buf, item.LeadComment) + printBlockOpen(buf, "terraform", nil, item.LineComment) + bodyDiags := upgradeBlockBody(filename, "terraform", buf, body.List.Items, body.Rbrace, rules, adhocComments) + diags = diags.Append(bodyDiags) + buf.WriteString("}\n\n") + + return diags +} + +func upgradeBlockBody(filename string, blockAddr string, buf *bytes.Buffer, args []*hcl1ast.ObjectItem, end hcl1token.Pos, rules bodyContentRules, adhocComments *commentQueue) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for i, arg := range args { + comments := adhocComments.TakeBefore(arg) + for _, group := range comments { + printComments(buf, group) + buf.WriteByte('\n') // Extra separator after each group + } + + printComments(buf, arg.LeadComment) + + name := arg.Keys[0].Token.Value().(string) + + rule, expected := rules[name] + if !expected { + if arg.Assign.IsValid() { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Unrecognized attribute name", + Detail: fmt.Sprintf("No attribute named %q is expected in %s.", name, blockAddr), + Subject: hcl1PosRange(filename, arg.Keys[0].Pos()).Ptr(), + }) + } else { + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Unrecognized block type", + Detail: fmt.Sprintf("Blocks of type %q are not expected in %s.", name, blockAddr), + Subject: hcl1PosRange(filename, arg.Keys[0].Pos()).Ptr(), + }) + } + continue + } + + itemDiags := rule(buf, blockAddr, arg) + diags = diags.Append(itemDiags) + + // If we have another item and it's more than one line away + // from the current one then we'll print an extra blank line + // to retain that separation. + if (i + 1) < len(args) { + next := args[i+1] + thisPos := hcl1NodeEndPos(arg) + nextPos := next.Pos() + if nextPos.Line-thisPos.Line > 1 { + buf.WriteByte('\n') + } + } + } + + // Before we return, we must also print any remaining adhocComments that + // appear between our last item and the closing brace. + comments := adhocComments.TakeBeforePos(end) + for i, group := range comments { + printComments(buf, group) + if i < len(comments)-1 { + buf.WriteByte('\n') // Extra separator after each group + } + } + + return diags +} + +// printDynamicBody prints out a conservative, exhaustive dynamic block body +// for every attribute and nested block in the given schema, for situations +// when a dynamic expression was being assigned to a block type name in input +// configuration and so we can assume it's a list of maps but can't make +// any assumptions about what subset of the schema-specified keys might be +// present in the map values. +func printDynamicBlockBody(buf *bytes.Buffer, iterName string, schema *configschema.Block) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + attrNames := make([]string, 0, len(schema.Attributes)) + for name := range schema.Attributes { + attrNames = append(attrNames, name) + } + sort.Strings(attrNames) + for _, name := range attrNames { + attrS := schema.Attributes[name] + if !(attrS.Required || attrS.Optional) { // no Computed-only attributes + continue + } + if attrS.Required { + // For required attributes we can generate a simpler expression + // that just assumes the presence of the key representing the + // attribute value. + printAttribute(buf, name, []byte(fmt.Sprintf(`%s.value.%s`, iterName, name)), nil) + } else { + // Otherwise we must be conservative and generate a conditional + // lookup that will just populate nothing at all if the expected + // key is not present. + printAttribute(buf, name, []byte(fmt.Sprintf(`lookup(%s.value, %q, null)`, iterName, name)), nil) + } + } + + blockTypeNames := make([]string, 0, len(schema.BlockTypes)) + for name := range schema.BlockTypes { + blockTypeNames = append(blockTypeNames, name) + } + sort.Strings(blockTypeNames) + for i, name := range blockTypeNames { + blockS := schema.BlockTypes[name] + + // We'll disregard any block type that consists only of computed + // attributes, since otherwise we'll just create weird empty blocks + // that do nothing except create confusion. + if !schemaHasSettableArguments(&blockS.Block) { + continue + } + + if i > 0 || len(attrNames) > 0 { + buf.WriteByte('\n') + } + printBlockOpen(buf, "dynamic", []string{name}, nil) + switch blockS.Nesting { + case configschema.NestingMap: + printAttribute(buf, "for_each", []byte(fmt.Sprintf(`lookup(%s.value, %q, {})`, iterName, name)), nil) + printAttribute(buf, "labels", []byte(fmt.Sprintf(`[%s.key]`, name)), nil) + case configschema.NestingSingle, configschema.NestingGroup: + printAttribute(buf, "for_each", []byte(fmt.Sprintf(`lookup(%s.value, %q, null) != null ? [%s.value.%s] : []`, iterName, name, iterName, name)), nil) + default: + printAttribute(buf, "for_each", []byte(fmt.Sprintf(`lookup(%s.value, %q, [])`, iterName, name)), nil) + } + printBlockOpen(buf, "content", nil, nil) + moreDiags := printDynamicBlockBody(buf, name, &blockS.Block) + diags = diags.Append(moreDiags) + buf.WriteString("}\n") + buf.WriteString("}\n") + } + + return diags +} + +func printComments(buf *bytes.Buffer, group *hcl1ast.CommentGroup) { + if group == nil { + return + } + for _, comment := range group.List { + buf.WriteString(comment.Text) + buf.WriteByte('\n') + } +} + +func printBlockOpen(buf *bytes.Buffer, blockType string, labels []string, commentGroup *hcl1ast.CommentGroup) { + buf.WriteString(blockType) + for _, label := range labels { + buf.WriteByte(' ') + printQuotedString(buf, label) + } + buf.WriteString(" {") + if commentGroup != nil { + for _, c := range commentGroup.List { + buf.WriteByte(' ') + buf.WriteString(c.Text) + } + } + buf.WriteByte('\n') +} + +func printAttribute(buf *bytes.Buffer, name string, valSrc []byte, commentGroup *hcl1ast.CommentGroup) { + buf.WriteString(name) + buf.WriteString(" = ") + buf.Write(valSrc) + if commentGroup != nil { + for _, c := range commentGroup.List { + buf.WriteByte(' ') + buf.WriteString(c.Text) + } + } + buf.WriteByte('\n') +} + +func printQuotedString(buf *bytes.Buffer, val string) { + buf.WriteByte('"') + printStringLiteralFromHILOutput(buf, val) + buf.WriteByte('"') +} + +func printStringLiteralFromHILOutput(buf *bytes.Buffer, val string) { + val = strings.Replace(val, `\`, `\\`, -1) + val = strings.Replace(val, `"`, `\"`, -1) + val = strings.Replace(val, "\n", `\n`, -1) + val = strings.Replace(val, "\r", `\r`, -1) + val = strings.Replace(val, `${`, `$${`, -1) + val = strings.Replace(val, `%{`, `%%{`, -1) + buf.WriteString(val) +} + +func printHeredocLiteralFromHILOutput(buf *bytes.Buffer, val string) { + val = strings.Replace(val, `${`, `$${`, -1) + val = strings.Replace(val, `%{`, `%%{`, -1) + buf.WriteString(val) +} + +func collectAdhocComments(f *hcl1ast.File) *commentQueue { + comments := make(map[hcl1token.Pos]*hcl1ast.CommentGroup) + for _, c := range f.Comments { + comments[c.Pos()] = c + } + + // We'll remove from our map any comments that are attached to specific + // nodes as lead or line comments, since we'll find those during our + // walk anyway. + hcl1ast.Walk(f, func(nn hcl1ast.Node) (hcl1ast.Node, bool) { + switch t := nn.(type) { + case *hcl1ast.LiteralType: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + delete(comments, comment.Pos()) + } + } + + if t.LineComment != nil { + for _, comment := range t.LineComment.List { + delete(comments, comment.Pos()) + } + } + case *hcl1ast.ObjectItem: + if t.LeadComment != nil { + for _, comment := range t.LeadComment.List { + delete(comments, comment.Pos()) + } + } + + if t.LineComment != nil { + for _, comment := range t.LineComment.List { + delete(comments, comment.Pos()) + } + } + } + + return nn, true + }) + + if len(comments) == 0 { + var ret commentQueue + return &ret + } + + ret := make([]*hcl1ast.CommentGroup, 0, len(comments)) + for _, c := range comments { + ret = append(ret, c) + } + sort.Slice(ret, func(i, j int) bool { + return ret[i].Pos().Before(ret[j].Pos()) + }) + queue := commentQueue(ret) + return &queue +} + +type commentQueue []*hcl1ast.CommentGroup + +func (q *commentQueue) TakeBeforeToken(token hcl1token.Token) []*hcl1ast.CommentGroup { + return q.TakeBeforePos(token.Pos) +} + +func (q *commentQueue) TakeBefore(node hcl1ast.Node) []*hcl1ast.CommentGroup { + return q.TakeBeforePos(node.Pos()) +} + +func (q *commentQueue) TakeBeforePos(pos hcl1token.Pos) []*hcl1ast.CommentGroup { + toPos := pos + var i int + for i = 0; i < len(*q); i++ { + if (*q)[i].Pos().After(toPos) { + break + } + } + if i == 0 { + return nil + } + + ret := (*q)[:i] + *q = (*q)[i:] + + return ret +} + +// hcl1NodeEndPos tries to find the latest possible position in the given +// node. This is primarily to try to find the last line number of a multi-line +// construct and is a best-effort sort of thing because HCL1 only tracks +// start positions for tokens and has no generalized way to find the full +// range for a single node. +func hcl1NodeEndPos(node hcl1ast.Node) hcl1token.Pos { + switch tn := node.(type) { + case *hcl1ast.ObjectItem: + if tn.LineComment != nil && len(tn.LineComment.List) > 0 { + return tn.LineComment.List[len(tn.LineComment.List)-1].Start + } + return hcl1NodeEndPos(tn.Val) + case *hcl1ast.ListType: + return tn.Rbrack + case *hcl1ast.ObjectType: + return tn.Rbrace + default: + // If all else fails, we'll just return the position of what we were given. + return tn.Pos() + } +} + +func hcl1ErrSubjectRange(filename string, err error) *hcl2.Range { + if pe, isPos := err.(*hcl1parser.PosError); isPos { + return hcl1PosRange(filename, pe.Pos).Ptr() + } + return nil +} + +func hcl1PosRange(filename string, pos hcl1token.Pos) hcl2.Range { + return hcl2.Range{ + Filename: filename, + Start: hcl2.Pos{ + Line: pos.Line, + Column: pos.Column, + Byte: pos.Offset, + }, + End: hcl2.Pos{ + Line: pos.Line, + Column: pos.Column, + Byte: pos.Offset, + }, + } +} + +func passthruBlockTodo(w io.Writer, node hcl1ast.Node, msg string) { + fmt.Fprintf(w, "\n# TF-UPGRADE-TODO: %s\n", msg) + hcl1printer.Fprint(w, node) + w.Write([]byte{'\n', '\n'}) +} + +func schemaHasSettableArguments(schema *configschema.Block) bool { + for _, attrS := range schema.Attributes { + if attrS.Optional || attrS.Required { + return true + } + } + for _, blockS := range schema.BlockTypes { + if schemaHasSettableArguments(&blockS.Block) { + return true + } + } + return false +} + +func invalidLabel(name string) bool { + matched, err := regexp.Match(`[0-9]`, []byte{name[0]}) + if err == nil { + return matched + } + // This isn't likely, but if there's an error here we'll just ignore it and + // move on. + return false +} + +func printLabelTodo(buf *bytes.Buffer, label string) { + buf.WriteString("# TF-UPGRADE-TODO: In Terraform v0.11 and earlier, it was possible to begin a\n" + + "# resource name with a number, but it is no longer possible in Terraform v0.12.\n" + + "#\n" + + "# Rename the resource and run `terraform state mv` to apply the rename in the\n" + + "# state. Detailed information on the `state move` command can be found in the\n" + + "# documentation online: https://www.terraform.io/docs/commands/state/mv.html\n", + ) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrader.go b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrader.go new file mode 100644 index 00000000..67ca6af1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/configupgrade/upgrader.go @@ -0,0 +1,13 @@ +package configupgrade + +import ( + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/terraform" +) + +// Upgrader is the main type in this package, containing all of the +// dependencies that are needed to perform upgrades. +type Upgrader struct { + Providers providers.Resolver + Provisioners map[string]terraform.ProvisionerFactory +} diff --git a/vendor/github.com/hashicorp/terraform/configs/depends_on.go b/vendor/github.com/hashicorp/terraform/configs/depends_on.go new file mode 100644 index 00000000..b1984768 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/depends_on.go @@ -0,0 +1,23 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) { + var ret []hcl.Traversal + exprs, diags := hcl.ExprList(attr.Expr) + + for _, expr := range exprs { + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + ret = append(ret, traversal) + } + } + + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/doc.go b/vendor/github.com/hashicorp/terraform/configs/doc.go new file mode 100644 index 00000000..f01eb79f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/doc.go @@ -0,0 +1,19 @@ +// Package configs contains types that represent Terraform configurations and +// the different elements thereof. +// +// The functionality in this package can be used for some static analyses of +// Terraform configurations, but this package generally exposes representations +// of the configuration source code rather than the result of evaluating these +// objects. The sibling package "lang" deals with evaluation of structures +// and expressions in the configuration. +// +// Due to its close relationship with HCL, this package makes frequent use +// of types from the HCL API, including raw HCL diagnostic messages. Such +// diagnostics can be converted into Terraform-flavored diagnostics, if needed, +// using functions in the sibling package tfdiags. +// +// The Parser type is the main entry-point into this package. The LoadConfigDir +// method can be used to load a single module directory, and then a full +// configuration (including any descendent modules) can be produced using +// the top-level BuildConfig method. +package configs diff --git a/vendor/github.com/hashicorp/terraform/configs/module.go b/vendor/github.com/hashicorp/terraform/configs/module.go new file mode 100644 index 00000000..250f9d34 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module.go @@ -0,0 +1,404 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" +) + +// Module is a container for a set of configuration constructs that are +// evaluated within a common namespace. +type Module struct { + // SourceDir is the filesystem directory that the module was loaded from. + // + // This is populated automatically only for configurations loaded with + // LoadConfigDir. If the parser is using a virtual filesystem then the + // path here will be in terms of that virtual filesystem. + + // Any other caller that constructs a module directly with NewModule may + // assign a suitable value to this attribute before using it for other + // purposes. It should be treated as immutable by all consumers of Module + // values. + SourceDir string + + CoreVersionConstraints []VersionConstraint + + Backend *Backend + ProviderConfigs map[string]*Provider + ProviderRequirements map[string][]VersionConstraint + + Variables map[string]*Variable + Locals map[string]*Local + Outputs map[string]*Output + + ModuleCalls map[string]*ModuleCall + + ManagedResources map[string]*Resource + DataResources map[string]*Resource +} + +// File describes the contents of a single configuration file. +// +// Individual files are not usually used alone, but rather combined together +// with other files (conventionally, those in the same directory) to produce +// a *Module, using NewModule. +// +// At the level of an individual file we represent directly the structural +// elements present in the file, without any attempt to detect conflicting +// declarations. A File object can therefore be used for some basic static +// analysis of individual elements, but must be built into a Module to detect +// duplicate declarations. +type File struct { + CoreVersionConstraints []VersionConstraint + + Backends []*Backend + ProviderConfigs []*Provider + ProviderRequirements []*ProviderRequirement + + Variables []*Variable + Locals []*Local + Outputs []*Output + + ModuleCalls []*ModuleCall + + ManagedResources []*Resource + DataResources []*Resource +} + +// NewModule takes a list of primary files and a list of override files and +// produces a *Module by combining the files together. +// +// If there are any conflicting declarations in the given files -- for example, +// if the same variable name is defined twice -- then the resulting module +// will be incomplete and error diagnostics will be returned. Careful static +// analysis of the returned Module is still possible in this case, but the +// module will probably not be semantically valid. +func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) { + var diags hcl.Diagnostics + mod := &Module{ + ProviderConfigs: map[string]*Provider{}, + ProviderRequirements: map[string][]VersionConstraint{}, + Variables: map[string]*Variable{}, + Locals: map[string]*Local{}, + Outputs: map[string]*Output{}, + ModuleCalls: map[string]*ModuleCall{}, + ManagedResources: map[string]*Resource{}, + DataResources: map[string]*Resource{}, + } + + for _, file := range primaryFiles { + fileDiags := mod.appendFile(file) + diags = append(diags, fileDiags...) + } + + for _, file := range overrideFiles { + fileDiags := mod.mergeFile(file) + diags = append(diags, fileDiags...) + } + + return mod, diags +} + +// ResourceByAddr returns the configuration for the resource with the given +// address, or nil if there is no such resource. +func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource { + key := addr.String() + switch addr.Mode { + case addrs.ManagedResourceMode: + return m.ManagedResources[key] + case addrs.DataResourceMode: + return m.DataResources[key] + default: + return nil + } +} + +func (m *Module) appendFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + for _, constraint := range file.CoreVersionConstraints { + // If there are any conflicting requirements then we'll catch them + // when we actually check these constraints. + m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) + } + + for _, b := range file.Backends { + if m.Backend != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange), + Subject: &b.DeclRange, + }) + continue + } + m.Backend = b + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + if existing, exists := m.ProviderConfigs[key]; exists { + if existing.Alias == "" { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } else { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider configuration", + Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange), + Subject: &pc.DeclRange, + }) + } + continue + } + m.ProviderConfigs[key] = pc + } + + for _, reqd := range file.ProviderRequirements { + m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement) + } + + for _, v := range file.Variables { + if existing, exists := m.Variables[v.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate variable declaration", + Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &v.DeclRange, + }) + } + m.Variables[v.Name] = v + } + + for _, l := range file.Locals { + if existing, exists := m.Locals[l.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate local value definition", + Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &l.DeclRange, + }) + } + m.Locals[l.Name] = l + } + + for _, o := range file.Outputs { + if existing, exists := m.Outputs[o.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate output definition", + Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange), + Subject: &o.DeclRange, + }) + } + m.Outputs[o.Name] = o + } + + for _, mc := range file.ModuleCalls { + if existing, exists := m.ModuleCalls[mc.Name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate module call", + Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange), + Subject: &mc.DeclRange, + }) + } + m.ModuleCalls[mc.Name] = mc + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + if existing, exists := m.ManagedResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.ManagedResources[key] = r + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + if existing, exists := m.DataResources[key]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type), + Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange), + Subject: &r.DeclRange, + }) + continue + } + m.DataResources[key] = r + } + + return diags +} + +func (m *Module) mergeFile(file *File) hcl.Diagnostics { + var diags hcl.Diagnostics + + if len(file.CoreVersionConstraints) != 0 { + // This is a bit of a strange case for overriding since we normally + // would union together across multiple files anyway, but we'll + // allow it and have each override file clobber any existing list. + m.CoreVersionConstraints = nil + for _, constraint := range file.CoreVersionConstraints { + m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint) + } + } + + if len(file.Backends) != 0 { + switch len(file.Backends) { + case 1: + m.Backend = file.Backends[0] + default: + // An override file with multiple backends is still invalid, even + // though it can override backends from _other_ files. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate backend configuration", + Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange), + Subject: &file.Backends[1].DeclRange, + }) + } + } + + for _, pc := range file.ProviderConfigs { + key := pc.moduleUniqueKey() + existing, exists := m.ProviderConfigs[key] + if pc.Alias == "" { + // We allow overriding a non-existing _default_ provider configuration + // because the user model is that an absent provider configuration + // implies an empty provider configuration, which is what the user + // is therefore overriding here. + if exists { + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } else { + m.ProviderConfigs[key] = pc + } + } else { + // For aliased providers, there must be a base configuration to + // override. This allows us to detect and report alias typos + // that might otherwise cause the override to not apply. + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base provider configuration for override", + Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias), + Subject: &pc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(pc) + diags = append(diags, mergeDiags...) + } + } + + if len(file.ProviderRequirements) != 0 { + mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements) + } + + for _, v := range file.Variables { + existing, exists := m.Variables[v.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base variable declaration to override", + Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name), + Subject: &v.DeclRange, + }) + continue + } + mergeDiags := existing.merge(v) + diags = append(diags, mergeDiags...) + } + + for _, l := range file.Locals { + existing, exists := m.Locals[l.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base local value definition to override", + Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name), + Subject: &l.DeclRange, + }) + continue + } + mergeDiags := existing.merge(l) + diags = append(diags, mergeDiags...) + } + + for _, o := range file.Outputs { + existing, exists := m.Outputs[o.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing base output definition to override", + Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name), + Subject: &o.DeclRange, + }) + continue + } + mergeDiags := existing.merge(o) + diags = append(diags, mergeDiags...) + } + + for _, mc := range file.ModuleCalls { + existing, exists := m.ModuleCalls[mc.Name] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing module call to override", + Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name), + Subject: &mc.DeclRange, + }) + continue + } + mergeDiags := existing.merge(mc) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.ManagedResources { + key := r.moduleUniqueKey() + existing, exists := m.ManagedResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing resource to override", + Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r) + diags = append(diags, mergeDiags...) + } + + for _, r := range file.DataResources { + key := r.moduleUniqueKey() + existing, exists := m.DataResources[key] + if !exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing data resource to override", + Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name), + Subject: &r.DeclRange, + }) + continue + } + mergeDiags := existing.merge(r) + diags = append(diags, mergeDiags...) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_call.go b/vendor/github.com/hashicorp/terraform/configs/module_call.go new file mode 100644 index 00000000..8c3ba67c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_call.go @@ -0,0 +1,188 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// ModuleCall represents a "module" block in a module or file. +type ModuleCall struct { + Name string + + SourceAddr string + SourceAddrRange hcl.Range + SourceSet bool + + Config hcl.Body + + Version VersionConstraint + + Count hcl.Expression + ForEach hcl.Expression + + Providers []PassedProviderConfig + + DependsOn []hcl.Traversal + + DeclRange hcl.Range +} + +func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) { + mc := &ModuleCall{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := moduleBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, remain, diags := block.Body.PartialContent(schema) + mc.Config = remain + + if !hclsyntax.ValidIdentifier(mc.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid module instance name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["source"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr) + diags = append(diags, valDiags...) + mc.SourceAddrRange = attr.Expr.Range() + mc.SourceSet = true + } + + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + mc.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + if attr, exists := content.Attributes["count"]; exists { + mc.Count = attr.Expr + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["for_each"]; exists { + mc.ForEach = attr.Expr + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + mc.DependsOn = append(mc.DependsOn, deps...) + + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["providers"]; exists { + seen := make(map[string]hcl.Range) + pairs, pDiags := hcl.ExprMap(attr.Expr) + diags = append(diags, pDiags...) + for _, pair := range pairs { + key, keyDiags := decodeProviderConfigRef(pair.Key, "providers") + diags = append(diags, keyDiags...) + value, valueDiags := decodeProviderConfigRef(pair.Value, "providers") + diags = append(diags, valueDiags...) + if keyDiags.HasErrors() || valueDiags.HasErrors() { + continue + } + + matchKey := key.String() + if prev, exists := seen[matchKey]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate provider address", + Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev), + Subject: pair.Value.Range().Ptr(), + }) + continue + } + + rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range()) + seen[matchKey] = rng + mc.Providers = append(mc.Providers, PassedProviderConfig{ + InChild: key, + InParent: value, + }) + } + } + + // Reserved block types (all of them) + for _, block := range content.Blocks { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in module block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + + return mc, diags +} + +// PassedProviderConfig represents a provider config explicitly passed down to +// a child module, possibly giving it a new local address in the process. +type PassedProviderConfig struct { + InChild *ProviderConfigRef + InParent *ProviderConfigRef +} + +var moduleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "source", + Required: true, + }, + { + Name: "version", + }, + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "depends_on", + }, + { + Name: "providers", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + // These are all reserved for future use. + {Type: "lifecycle"}, + {Type: "locals"}, + {Type: "provider", LabelNames: []string{"type"}}, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge.go b/vendor/github.com/hashicorp/terraform/configs/module_merge.go new file mode 100644 index 00000000..12614c1d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_merge.go @@ -0,0 +1,247 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// The methods in this file are used by Module.mergeFile to apply overrides +// to our different configuration elements. These methods all follow the +// pattern of mutating the receiver to incorporate settings from the parameter, +// returning error diagnostics if any aspect of the parameter cannot be merged +// into the receiver for some reason. +// +// User expectation is that anything _explicitly_ set in the given object +// should take precedence over the corresponding settings in the receiver, +// but that anything omitted in the given object should be left unchanged. +// In some cases it may be reasonable to do a "deep merge" of certain nested +// features, if it is possible to unambiguously correlate the nested elements +// and their behaviors are orthogonal to each other. + +func (p *Provider) merge(op *Provider) hcl.Diagnostics { + var diags hcl.Diagnostics + + if op.Version.Required != nil { + p.Version = op.Version + } + + p.Config = MergeBodies(p.Config, op.Config) + + return diags +} + +func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) { + // Any provider name that's mentioned in the override gets nilled out in + // our map so that we'll rebuild it below. Any provider not mentioned is + // left unchanged. + for _, reqd := range ovrd { + delete(recv, reqd.Name) + } + for _, reqd := range ovrd { + recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement) + } +} + +func (v *Variable) merge(ov *Variable) hcl.Diagnostics { + var diags hcl.Diagnostics + + if ov.DescriptionSet { + v.Description = ov.Description + v.DescriptionSet = ov.DescriptionSet + } + if ov.Default != cty.NilVal { + v.Default = ov.Default + } + if ov.Type != cty.NilType { + v.Type = ov.Type + } + if ov.ParsingMode != 0 { + v.ParsingMode = ov.ParsingMode + } + + // If the override file overrode type without default or vice-versa then + // it may have created an invalid situation, which we'll catch now by + // attempting to re-convert the value. + // + // Note that here we may be re-converting an already-converted base value + // from the base config. This will be a no-op if the type was not changed, + // but in particular might be user-observable in the edge case where the + // literal value in config could've been converted to the overridden type + // constraint but the converted value cannot. In practice, this situation + // should be rare since most of our conversions are interchangable. + if v.Default != cty.NilVal { + val, err := convert.Convert(v.Default, v.Type) + if err != nil { + // What exactly we'll say in the error message here depends on whether + // it was Default or Type that was overridden here. + switch { + case ov.Type != cty.NilType && ov.Default == cty.NilVal: + // If only the type was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err), + Subject: &ov.DeclRange, + }) + case ov.Type == cty.NilType && ov.Default != cty.NilVal: + // Only the default was overridden + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err), + Subject: &ov.DeclRange, + }) + } + } else { + v.Default = val + } + } + + return diags +} + +func (l *Local) merge(ol *Local) hcl.Diagnostics { + var diags hcl.Diagnostics + + // Since a local is just a single expression in configuration, the + // override definition entirely replaces the base definition, including + // the source range so that we'll send the user to the right place if + // there is an error. + l.Expr = ol.Expr + l.DeclRange = ol.DeclRange + + return diags +} + +func (o *Output) merge(oo *Output) hcl.Diagnostics { + var diags hcl.Diagnostics + + if oo.Description != "" { + o.Description = oo.Description + } + if oo.Expr != nil { + o.Expr = oo.Expr + } + if oo.SensitiveSet { + o.Sensitive = oo.Sensitive + o.SensitiveSet = oo.SensitiveSet + } + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(oo.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics { + var diags hcl.Diagnostics + + if omc.SourceSet { + mc.SourceAddr = omc.SourceAddr + mc.SourceAddrRange = omc.SourceAddrRange + mc.SourceSet = omc.SourceSet + } + + if omc.Count != nil { + mc.Count = omc.Count + } + + if omc.ForEach != nil { + mc.ForEach = omc.ForEach + } + + if len(omc.Version.Required) != 0 { + mc.Version = omc.Version + } + + mc.Config = MergeBodies(mc.Config, omc.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(mc.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} + +func (r *Resource) merge(or *Resource) hcl.Diagnostics { + var diags hcl.Diagnostics + + if r.Mode != or.Mode { + // This is always a programming error, since managed and data resources + // are kept in separate maps in the configuration structures. + panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode)) + } + + if or.Count != nil { + r.Count = or.Count + } + if or.ForEach != nil { + r.ForEach = or.ForEach + } + if or.ProviderConfigRef != nil { + r.ProviderConfigRef = or.ProviderConfigRef + } + if r.Mode == addrs.ManagedResourceMode { + // or.Managed is always non-nil for managed resource mode + + if or.Managed.Connection != nil { + r.Managed.Connection = or.Managed.Connection + } + if or.Managed.CreateBeforeDestroySet { + r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy + r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet + } + if len(or.Managed.IgnoreChanges) != 0 { + r.Managed.IgnoreChanges = or.Managed.IgnoreChanges + } + if or.Managed.PreventDestroySet { + r.Managed.PreventDestroy = or.Managed.PreventDestroy + r.Managed.PreventDestroySet = or.Managed.PreventDestroySet + } + if len(or.Managed.Provisioners) != 0 { + r.Managed.Provisioners = or.Managed.Provisioners + } + } + + r.Config = MergeBodies(r.Config, or.Config) + + // We don't allow depends_on to be overridden because that is likely to + // cause confusing misbehavior. + if len(or.DependsOn) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported override", + Detail: "The depends_on argument may not be overridden.", + Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have + }) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go new file mode 100644 index 00000000..0ed561ee --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go @@ -0,0 +1,143 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// MergeBodies creates a new HCL body that contains a combination of the +// given base and override bodies. Attributes and blocks defined in the +// override body take precedence over those of the same name defined in +// the base body. +// +// If any block of a particular type appears in "override" then it will +// replace _all_ of the blocks of the same type in "base" in the new +// body. +func MergeBodies(base, override hcl.Body) hcl.Body { + return mergeBody{ + Base: base, + Override: override, + } +} + +// mergeBody is a hcl.Body implementation that wraps a pair of other bodies +// and allows attributes and blocks within the override to take precedence +// over those defined in the base body. +// +// This is used to deal with dynamically-processed bodies in Module.mergeFile. +// It uses a shallow-only merging strategy where direct attributes defined +// in Override will override attributes of the same name in Base, while any +// blocks defined in Override will hide all blocks of the same type in Base. +// +// This cannot possibly "do the right thing" in all cases, because we don't +// have enough information about user intent. However, this behavior is intended +// to be reasonable for simple overriding use-cases. +type mergeBody struct { + Base hcl.Body + Override hcl.Body +} + +var _ hcl.Body = mergeBody{} + +func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) + + baseContent, _, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + return content, diags +} + +func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + baseSchema := schemaWithDynamic(schema) + overrideSchema := schemaWithDynamic(schemaForOverrides(schema)) + + baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema) + diags = append(diags, cDiags...) + overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema) + diags = append(diags, cDiags...) + + content := b.prepareContent(baseContent, overrideContent) + + remain := MergeBodies(baseRemain, overrideRemain) + + return content, remain, diags +} + +func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent { + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + } + + // For attributes we just assign from each map in turn and let the override + // map clobber any matching entries from base. + for k, a := range base.Attributes { + content.Attributes[k] = a + } + for k, a := range override.Attributes { + content.Attributes[k] = a + } + + // Things are a little more interesting for blocks because they arrive + // as a flat list. Our merging semantics call for us to suppress blocks + // from base if at least one block of the same type appears in override. + // We explicitly do not try to correlate and deeply merge nested blocks, + // since we don't have enough context here to infer user intent. + + overriddenBlockTypes := make(map[string]bool) + for _, block := range override.Blocks { + if block.Type == "dynamic" { + overriddenBlockTypes[block.Labels[0]] = true + continue + } + overriddenBlockTypes[block.Type] = true + } + for _, block := range base.Blocks { + // We skip over dynamic blocks whose type label is an overridden type + // but note that below we do still leave them as dynamic blocks in + // the result because expanding the dynamic blocks that are left is + // done much later during the core graph walks, where we can safely + // evaluate the expressions. + if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] { + continue + } + if overriddenBlockTypes[block.Type] { + continue + } + content.Blocks = append(content.Blocks, block) + } + for _, block := range override.Blocks { + content.Blocks = append(content.Blocks, block) + } + + return content +} + +func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + var diags hcl.Diagnostics + ret := make(hcl.Attributes) + + baseAttrs, aDiags := b.Base.JustAttributes() + diags = append(diags, aDiags...) + overrideAttrs, aDiags := b.Override.JustAttributes() + diags = append(diags, aDiags...) + + for k, a := range baseAttrs { + ret[k] = a + } + for k, a := range overrideAttrs { + ret[k] = a + } + + return ret, diags +} + +func (b mergeBody) MissingItemRange() hcl.Range { + return b.Base.MissingItemRange() +} diff --git a/vendor/github.com/hashicorp/terraform/configs/named_values.go b/vendor/github.com/hashicorp/terraform/configs/named_values.go new file mode 100644 index 00000000..6f6b469f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/named_values.go @@ -0,0 +1,364 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/ext/typeexpr" + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/addrs" +) + +// A consistent detail message for all "not a valid identifier" diagnostics. +const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes." + +// Variable represents a "variable" block in a module or file. +type Variable struct { + Name string + Description string + Default cty.Value + Type cty.Type + ParsingMode VariableParsingMode + + DescriptionSet bool + + DeclRange hcl.Range +} + +func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) { + v := &Variable{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + // Unless we're building an override, we'll set some defaults + // which we might override with attributes below. We leave these + // as zero-value in the override case so we can recognize whether + // or not they are set when we merge. + if !override { + v.Type = cty.DynamicPseudoType + v.ParsingMode = VariableParseLiteral + } + + content, diags := block.Body.Content(variableBlockSchema) + + if !hclsyntax.ValidIdentifier(v.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + // Don't allow declaration of variables that would conflict with the + // reserved attribute and block type names in a "module" block, since + // these won't be usable for child modules. + for _, attr := range moduleBlockSchema.Attributes { + if attr.Name == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name), + Subject: &block.LabelRanges[0], + }) + } + } + for _, blockS := range moduleBlockSchema.Blocks { + if blockS.Type == v.Name { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid variable name", + Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type), + Subject: &block.LabelRanges[0], + }) + } + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description) + diags = append(diags, valDiags...) + v.DescriptionSet = true + } + + if attr, exists := content.Attributes["type"]; exists { + ty, parseMode, tyDiags := decodeVariableType(attr.Expr) + diags = append(diags, tyDiags...) + v.Type = ty + v.ParsingMode = parseMode + } + + if attr, exists := content.Attributes["default"]; exists { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + + // Convert the default to the expected type so we can catch invalid + // defaults early and allow later code to assume validity. + // Note that this depends on us having already processed any "type" + // attribute above. + // However, we can't do this if we're in an override file where + // the type might not be set; we'll catch that during merge. + if v.Type != cty.NilType { + var err error + val, err = convert.Convert(val, v.Type) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid default value for variable", + Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err), + Subject: attr.Expr.Range().Ptr(), + }) + val = cty.DynamicVal + } + } + + v.Default = val + } + + return v, diags +} + +func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) { + if exprIsNativeQuotedString(expr) { + // Here we're accepting the pre-0.12 form of variable type argument where + // the string values "string", "list" and "map" are accepted has a hint + // about the type used primarily for deciding how to parse values + // given on the command line and in environment variables. + // Only the native syntax ends up in this codepath; we handle the + // JSON syntax (which is, of course, quoted even in the new format) + // in the normal codepath below. + val, diags := expr.Value(nil) + if diags.HasErrors() { + return cty.DynamicPseudoType, VariableParseHCL, diags + } + str := val.AsString() + switch str { + case "string": + return cty.String, VariableParseLiteral, diags + case "list": + return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags + case "map": + return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags + default: + return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{ + Severity: hcl.DiagError, + Summary: "Invalid legacy variable type hint", + Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`, + Subject: expr.Range().Ptr(), + }} + } + } + + // First we'll deal with some shorthand forms that the HCL-level type + // expression parser doesn't include. These both emulate pre-0.12 behavior + // of allowing a list or map of any element type as long as all of the + // elements are consistent. This is the same as list(any) or map(any). + switch hcl.ExprAsKeyword(expr) { + case "list": + return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil + case "map": + return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil + } + + ty, diags := typeexpr.TypeConstraint(expr) + if diags.HasErrors() { + return cty.DynamicPseudoType, VariableParseHCL, diags + } + + switch { + case ty.IsPrimitiveType(): + // Primitive types use literal parsing. + return ty, VariableParseLiteral, diags + default: + // Everything else uses HCL parsing + return ty, VariableParseHCL, diags + } +} + +// VariableParsingMode defines how values of a particular variable given by +// text-only mechanisms (command line arguments and environment variables) +// should be parsed to produce the final value. +type VariableParsingMode rune + +// VariableParseLiteral is a variable parsing mode that just takes the given +// string directly as a cty.String value. +const VariableParseLiteral VariableParsingMode = 'L' + +// VariableParseHCL is a variable parsing mode that attempts to parse the given +// string as an HCL expression and returns the result. +const VariableParseHCL VariableParsingMode = 'H' + +// Parse uses the receiving parsing mode to process the given variable value +// string, returning the result along with any diagnostics. +// +// A VariableParsingMode does not know the expected type of the corresponding +// variable, so it's the caller's responsibility to attempt to convert the +// result to the appropriate type and return to the user any diagnostics that +// conversion may produce. +// +// The given name is used to create a synthetic filename in case any diagnostics +// must be generated about the given string value. This should be the name +// of the root module variable whose value will be populated from the given +// string. +// +// If the returned diagnostics has errors, the returned value may not be +// valid. +func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) { + switch m { + case VariableParseLiteral: + return cty.StringVal(value), nil + case VariableParseHCL: + fakeFilename := fmt.Sprintf("", name) + expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + val, valDiags := expr.Value(nil) + diags = append(diags, valDiags...) + return val, diags + default: + // Should never happen + panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m)) + } +} + +// Output represents an "output" block in a module or file. +type Output struct { + Name string + Description string + Expr hcl.Expression + DependsOn []hcl.Traversal + Sensitive bool + + DescriptionSet bool + SensitiveSet bool + + DeclRange hcl.Range +} + +func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) { + o := &Output{ + Name: block.Labels[0], + DeclRange: block.DefRange, + } + + schema := outputBlockSchema + if override { + schema = schemaForOverrides(schema) + } + + content, diags := block.Body.Content(schema) + + if !hclsyntax.ValidIdentifier(o.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid output name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + if attr, exists := content.Attributes["description"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description) + diags = append(diags, valDiags...) + o.DescriptionSet = true + } + + if attr, exists := content.Attributes["value"]; exists { + o.Expr = attr.Expr + } + + if attr, exists := content.Attributes["sensitive"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive) + diags = append(diags, valDiags...) + o.SensitiveSet = true + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + o.DependsOn = append(o.DependsOn, deps...) + } + + return o, diags +} + +// Local represents a single entry from a "locals" block in a module or file. +// The "locals" block itself is not represented, because it serves only to +// provide context for us to interpret its contents. +type Local struct { + Name string + Expr hcl.Expression + + DeclRange hcl.Range +} + +func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + if len(attrs) == 0 { + return nil, diags + } + + locals := make([]*Local, 0, len(attrs)) + for name, attr := range attrs { + if !hclsyntax.ValidIdentifier(name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid local value name", + Detail: badIdentifierDetail, + Subject: &attr.NameRange, + }) + } + + locals = append(locals, &Local{ + Name: name, + Expr: attr.Expr, + DeclRange: attr.Range, + }) + } + return locals, diags +} + +// Addr returns the address of the local value declared by the receiver, +// relative to its containing module. +func (l *Local) Addr() addrs.LocalValue { + return addrs.LocalValue{ + Name: l.Name, + } +} + +var variableBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "default", + }, + { + Name: "type", + }, + }, +} + +var outputBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "description", + }, + { + Name: "value", + Required: true, + }, + { + Name: "depends_on", + }, + { + Name: "sensitive", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser.go b/vendor/github.com/hashicorp/terraform/configs/parser.go new file mode 100644 index 00000000..8176fa1b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser.go @@ -0,0 +1,100 @@ +package configs + +import ( + "fmt" + "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hclparse" + "github.com/spf13/afero" +) + +// Parser is the main interface to read configuration files and other related +// files from disk. +// +// It retains a cache of all files that are loaded so that they can be used +// to create source code snippets in diagnostics, etc. +type Parser struct { + fs afero.Afero + p *hclparse.Parser +} + +// NewParser creates and returns a new Parser that reads files from the given +// filesystem. If a nil filesystem is passed then the system's "real" filesystem +// will be used, via afero.OsFs. +func NewParser(fs afero.Fs) *Parser { + if fs == nil { + fs = afero.OsFs{} + } + + return &Parser{ + fs: afero.Afero{Fs: fs}, + p: hclparse.NewParser(), + } +} + +// LoadHCLFile is a low-level method that reads the file at the given path, +// parses it, and returns the hcl.Body representing its root. In many cases +// it is better to use one of the other Load*File methods on this type, +// which additionally decode the root body in some way and return a higher-level +// construct. +// +// If the file cannot be read at all -- e.g. because it does not exist -- then +// this method will return a nil body and error diagnostics. In this case +// callers may wish to ignore the provided error diagnostics and produce +// a more context-sensitive error instead. +// +// The file will be parsed using the HCL native syntax unless the filename +// ends with ".json", in which case the HCL JSON syntax will be used. +func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) { + src, err := p.fs.ReadFile(path) + + if err != nil { + return nil, hcl.Diagnostics{ + { + Severity: hcl.DiagError, + Summary: "Failed to read file", + Detail: fmt.Sprintf("The file %q could not be read.", path), + }, + } + } + + var file *hcl.File + var diags hcl.Diagnostics + switch { + case strings.HasSuffix(path, ".json"): + file, diags = p.p.ParseJSON(src, path) + default: + file, diags = p.p.ParseHCL(src, path) + } + + // If the returned file or body is nil, then we'll return a non-nil empty + // body so we'll meet our contract that nil means an error reading the file. + if file == nil || file.Body == nil { + return hcl.EmptyBody(), diags + } + + return file.Body, diags +} + +// Sources returns a map of the cached source buffers for all files that +// have been loaded through this parser, with source filenames (as requested +// when each file was opened) as the keys. +func (p *Parser) Sources() map[string][]byte { + return p.p.Sources() +} + +// ForceFileSource artificially adds source code to the cache of file sources, +// as if it had been loaded from the given filename. +// +// This should be used only in special situations where configuration is loaded +// some other way. Most callers should load configuration via methods of +// Parser, which will update the sources cache automatically. +func (p *Parser) ForceFileSource(filename string, src []byte) { + // We'll make a synthetic hcl.File here just so we can reuse the + // existing cache. + p.p.AddFile(filename, &hcl.File{ + Body: hcl.EmptyBody(), + Bytes: src, + }) +} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config.go b/vendor/github.com/hashicorp/terraform/configs/parser_config.go new file mode 100644 index 00000000..7f2ff271 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_config.go @@ -0,0 +1,247 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// LoadConfigFile reads the file at the given path and parses it as a config +// file. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil *File will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, false) +} + +// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes +// certain required attribute constraints in order to interpret the given +// file as an overrides file. +func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) { + return p.loadConfigFile(path, true) +} + +func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) { + + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + file := &File{} + + var reqDiags hcl.Diagnostics + file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body) + diags = append(diags, reqDiags...) + + content, contentDiags := body.Content(configFileSchema) + diags = append(diags, contentDiags...) + + for _, block := range content.Blocks { + switch block.Type { + + case "terraform": + content, contentDiags := block.Body.Content(terraformBlockSchema) + diags = append(diags, contentDiags...) + + // We ignore the "terraform_version" attribute here because + // sniffCoreVersionRequirements already dealt with that above. + + for _, innerBlock := range content.Blocks { + switch innerBlock.Type { + + case "backend": + backendCfg, cfgDiags := decodeBackendBlock(innerBlock) + diags = append(diags, cfgDiags...) + if backendCfg != nil { + file.Backends = append(file.Backends, backendCfg) + } + + case "required_providers": + reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock) + diags = append(diags, reqsDiags...) + file.ProviderRequirements = append(file.ProviderRequirements, reqs...) + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + case "provider": + cfg, cfgDiags := decodeProviderBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ProviderConfigs = append(file.ProviderConfigs, cfg) + } + + case "variable": + cfg, cfgDiags := decodeVariableBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Variables = append(file.Variables, cfg) + } + + case "locals": + defs, defsDiags := decodeLocalsBlock(block) + diags = append(diags, defsDiags...) + file.Locals = append(file.Locals, defs...) + + case "output": + cfg, cfgDiags := decodeOutputBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.Outputs = append(file.Outputs, cfg) + } + + case "module": + cfg, cfgDiags := decodeModuleBlock(block, override) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ModuleCalls = append(file.ModuleCalls, cfg) + } + + case "resource": + cfg, cfgDiags := decodeResourceBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.ManagedResources = append(file.ManagedResources, cfg) + } + + case "data": + cfg, cfgDiags := decodeDataBlock(block) + diags = append(diags, cfgDiags...) + if cfg != nil { + file.DataResources = append(file.DataResources, cfg) + } + + default: + // Should never happen because the above cases should be exhaustive + // for all block type names in our schema. + continue + + } + } + + return file, diags +} + +// sniffCoreVersionRequirements does minimal parsing of the given body for +// "terraform" blocks with "required_version" attributes, returning the +// requirements found. +// +// This is intended to maximize the chance that we'll be able to read the +// requirements (syntax errors notwithstanding) even if the config file contains +// constructs that might've been added in future Terraform versions +// +// This is a "best effort" sort of method which will return constraints it is +// able to find, but may return no constraints at all if the given body is +// so invalid that it cannot be decoded at all. +func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) { + rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema) + + var constraints []VersionConstraint + + for _, block := range rootContent.Blocks { + content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema) + diags = append(diags, blockDiags...) + + attr, exists := content.Attributes["required_version"] + if !exists { + continue + } + + constraint, constraintDiags := decodeVersionConstraint(attr) + diags = append(diags, constraintDiags...) + if !constraintDiags.HasErrors() { + constraints = append(constraints, constraint) + } + } + + return constraints, diags +} + +// configFileSchema is the schema for the top-level of a config file. We use +// the low-level HCL API for this level so we can easily deal with each +// block type separately with its own decoding logic. +var configFileSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + { + Type: "provider", + LabelNames: []string{"name"}, + }, + { + Type: "variable", + LabelNames: []string{"name"}, + }, + { + Type: "locals", + }, + { + Type: "output", + LabelNames: []string{"name"}, + }, + { + Type: "module", + LabelNames: []string{"name"}, + }, + { + Type: "resource", + LabelNames: []string{"type", "name"}, + }, + { + Type: "data", + LabelNames: []string{"type", "name"}, + }, + }, +} + +// terraformBlockSchema is the schema for a top-level "terraform" block in +// a configuration file. +var terraformBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "backend", + LabelNames: []string{"type"}, + }, + { + Type: "required_providers", + }, + }, +} + +// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffRootSchema = &hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "terraform", + }, + }, +} + +// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements +var configFileVersionSniffBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "required_version", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go new file mode 100644 index 00000000..3014cb4b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go @@ -0,0 +1,142 @@ +package configs + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/hashicorp/hcl2/hcl" +) + +// LoadConfigDir reads the .tf and .tf.json files in the given directory +// as config files (using LoadConfigFile) and then combines these files into +// a single Module. +// +// If this method returns nil, that indicates that the given directory does not +// exist at all or could not be opened for some reason. Callers may wish to +// detect this case and ignore the returned diagnostics so that they can +// produce a more context-aware error message in that case. +// +// If this method returns a non-nil module while error diagnostics are returned +// then the module may be incomplete but can be used carefully for static +// analysis. +// +// This file does not consider a directory with no files to be an error, and +// will simply return an empty module in that case. Callers should first call +// Parser.IsConfigDir if they wish to recognize that situation. +// +// .tf files are parsed using the HCL native syntax while .tf.json files are +// parsed using the HCL JSON syntax. +func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) { + primaryPaths, overridePaths, diags := p.dirFiles(path) + if diags.HasErrors() { + return nil, diags + } + + primary, fDiags := p.loadFiles(primaryPaths, false) + diags = append(diags, fDiags...) + override, fDiags := p.loadFiles(overridePaths, true) + diags = append(diags, fDiags...) + + mod, modDiags := NewModule(primary, override) + diags = append(diags, modDiags...) + + mod.SourceDir = path + + return mod, diags +} + +// ConfigDirFiles returns lists of the primary and override files configuration +// files in the given directory. +// +// If the given directory does not exist or cannot be read, error diagnostics +// are returned. If errors are returned, the resulting lists may be incomplete. +func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + return p.dirFiles(dir) +} + +// IsConfigDir determines whether the given path refers to a directory that +// exists and contains at least one Terraform config file (with a .tf or +// .tf.json extension.) +func (p *Parser) IsConfigDir(path string) bool { + primaryPaths, overridePaths, _ := p.dirFiles(path) + return (len(primaryPaths) + len(overridePaths)) > 0 +} + +func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) { + var files []*File + var diags hcl.Diagnostics + + for _, path := range paths { + var f *File + var fDiags hcl.Diagnostics + if override { + f, fDiags = p.LoadConfigFileOverride(path) + } else { + f, fDiags = p.LoadConfigFile(path) + } + diags = append(diags, fDiags...) + if f != nil { + files = append(files, f) + } + } + + return files, diags +} + +func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) { + infos, err := p.fs.ReadDir(dir) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Failed to read module directory", + Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir), + }) + return + } + + for _, info := range infos { + if info.IsDir() { + // We only care about files + continue + } + + name := info.Name() + ext := fileExt(name) + if ext == "" || IsIgnoredFile(name) { + continue + } + + baseName := name[:len(name)-len(ext)] // strip extension + isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override") + + fullPath := filepath.Join(dir, name) + if isOverride { + override = append(override, fullPath) + } else { + primary = append(primary, fullPath) + } + } + + return +} + +// fileExt returns the Terraform configuration extension of the given +// path, or a blank string if it is not a recognized extension. +func fileExt(path string) string { + if strings.HasSuffix(path, ".tf") { + return ".tf" + } else if strings.HasSuffix(path, ".tf.json") { + return ".tf.json" + } else { + return "" + } +} + +// IsIgnoredFile returns true if the given filename (which must not have a +// directory path ahead of it) should be ignored as e.g. an editor swap file. +func IsIgnoredFile(name string) bool { + return strings.HasPrefix(name, ".") || // Unix-like hidden files + strings.HasSuffix(name, "~") || // vim + strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs +} diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_values.go b/vendor/github.com/hashicorp/terraform/configs/parser_values.go new file mode 100644 index 00000000..b7f1c1c5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/parser_values.go @@ -0,0 +1,43 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" +) + +// LoadValuesFile reads the file at the given path and parses it as a "values +// file", which is an HCL config file whose top-level attributes are treated +// as arbitrary key.value pairs. +// +// If the file cannot be read -- for example, if it does not exist -- then +// a nil map will be returned along with error diagnostics. Callers may wish +// to disregard the returned diagnostics in this case and instead generate +// their own error message(s) with additional context. +// +// If the returned diagnostics has errors when a non-nil map is returned +// then the map may be incomplete but should be valid enough for careful +// static analysis. +// +// This method wraps LoadHCLFile, and so it inherits the syntax selection +// behaviors documented for that method. +func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) { + body, diags := p.LoadHCLFile(path) + if body == nil { + return nil, diags + } + + vals := make(map[string]cty.Value) + attrs, attrDiags := body.JustAttributes() + diags = append(diags, attrDiags...) + if attrs == nil { + return vals, diags + } + + for name, attr := range attrs { + val, valDiags := attr.Expr.Value(nil) + diags = append(diags, valDiags...) + vals[name] = val + } + + return vals, diags +} diff --git a/vendor/github.com/hashicorp/terraform/configs/provider.go b/vendor/github.com/hashicorp/terraform/configs/provider.go new file mode 100644 index 00000000..d01d5cf2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provider.go @@ -0,0 +1,144 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform/addrs" +) + +// Provider represents a "provider" block in a module or file. A provider +// block is a provider configuration, and there can be zero or more +// configurations for each actual provider. +type Provider struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if no alias set + + Version VersionConstraint + + Config hcl.Body + + DeclRange hcl.Range +} + +func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) { + content, config, diags := block.Body.PartialContent(providerBlockSchema) + + provider := &Provider{ + Name: block.Labels[0], + NameRange: block.LabelRanges[0], + Config: config, + DeclRange: block.DefRange, + } + + if attr, exists := content.Attributes["alias"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias) + diags = append(diags, valDiags...) + provider.AliasRange = attr.Expr.Range().Ptr() + + if !hclsyntax.ValidIdentifier(provider.Alias) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration alias", + Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail), + }) + } + } + + if attr, exists := content.Attributes["version"]; exists { + var versionDiags hcl.Diagnostics + provider.Version, versionDiags = decodeVersionConstraint(attr) + diags = append(diags, versionDiags...) + } + + // Reserved attribute names + for _, name := range []string{"count", "depends_on", "for_each", "source"} { + if attr, exists := content.Attributes[name]; exists { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in provider block", + Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name), + Subject: &attr.NameRange, + }) + } + } + + // Reserved block types (all of them) + for _, block := range content.Blocks { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provider block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + + return provider, diags +} + +// Addr returns the address of the receiving provider configuration, relative +// to its containing module. +func (p *Provider) Addr() addrs.ProviderConfig { + return addrs.ProviderConfig{ + Type: p.Name, + Alias: p.Alias, + } +} + +func (p *Provider) moduleUniqueKey() string { + if p.Alias != "" { + return fmt.Sprintf("%s.%s", p.Name, p.Alias) + } + return p.Name +} + +// ProviderRequirement represents a declaration of a dependency on a particular +// provider version without actually configuring that provider. This is used in +// child modules that expect a provider to be passed in from their parent. +type ProviderRequirement struct { + Name string + Requirement VersionConstraint +} + +func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) { + attrs, diags := block.Body.JustAttributes() + var reqs []*ProviderRequirement + for name, attr := range attrs { + req, reqDiags := decodeVersionConstraint(attr) + diags = append(diags, reqDiags...) + if !diags.HasErrors() { + reqs = append(reqs, &ProviderRequirement{ + Name: name, + Requirement: req, + }) + } + } + return reqs, diags +} + +var providerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "alias", + }, + { + Name: "version", + }, + + // Attribute names reserved for future expansion. + {Name: "count"}, + {Name: "depends_on"}, + {Name: "for_each"}, + {Name: "source"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + // _All_ of these are reserved for future expansion. + {Type: "lifecycle"}, + {Type: "locals"}, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioner.go b/vendor/github.com/hashicorp/terraform/configs/provisioner.go new file mode 100644 index 00000000..b031dd0b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisioner.go @@ -0,0 +1,150 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" +) + +// Provisioner represents a "provisioner" block when used within a +// "resource" block in a module or file. +type Provisioner struct { + Type string + Config hcl.Body + Connection *Connection + When ProvisionerWhen + OnFailure ProvisionerOnFailure + + DeclRange hcl.Range + TypeRange hcl.Range +} + +func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) { + pv := &Provisioner{ + Type: block.Labels[0], + TypeRange: block.LabelRanges[0], + DeclRange: block.DefRange, + When: ProvisionerWhenCreate, + OnFailure: ProvisionerOnFailureFail, + } + + content, config, diags := block.Body.PartialContent(provisionerBlockSchema) + pv.Config = config + + if attr, exists := content.Attributes["when"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "create": + pv.When = ProvisionerWhenCreate + case "destroy": + pv.When = ProvisionerWhenDestroy + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"when\" keyword", + Detail: "The \"when\" argument requires one of the following keywords: create or destroy.", + Subject: expr.Range().Ptr(), + }) + } + } + + if attr, exists := content.Attributes["on_failure"]; exists { + expr, shimDiags := shimTraversalInString(attr.Expr, true) + diags = append(diags, shimDiags...) + + switch hcl.ExprAsKeyword(expr) { + case "continue": + pv.OnFailure = ProvisionerOnFailureContinue + case "fail": + pv.OnFailure = ProvisionerOnFailureFail + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid \"on_failure\" keyword", + Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.", + Subject: attr.Expr.Range().Ptr(), + }) + } + } + + var seenConnection *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + //conn, connDiags := decodeConnectionBlock(block) + //diags = append(diags, connDiags...) + pv.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in provisioner block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return pv, diags +} + +// Connection represents a "connection" block when used within either a +// "resource" or "provisioner" block in a module or file. +type Connection struct { + Config hcl.Body + + DeclRange hcl.Range +} + +// ProvisionerWhen is an enum for valid values for when to run provisioners. +type ProvisionerWhen int + +//go:generate stringer -type ProvisionerWhen + +const ( + ProvisionerWhenInvalid ProvisionerWhen = iota + ProvisionerWhenCreate + ProvisionerWhenDestroy +) + +// ProvisionerOnFailure is an enum for valid values for on_failure options +// for provisioners. +type ProvisionerOnFailure int + +//go:generate stringer -type ProvisionerOnFailure + +const ( + ProvisionerOnFailureInvalid ProvisionerOnFailure = iota + ProvisionerOnFailureContinue + ProvisionerOnFailureFail +) + +var provisionerBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + {Name: "when"}, + {Name: "on_failure"}, + }, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "connection"}, + {Type: "lifecycle"}, // reserved for future use + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go new file mode 100644 index 00000000..7ff5a6e0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ProvisionerOnFailureInvalid-0] + _ = x[ProvisionerOnFailureContinue-1] + _ = x[ProvisionerOnFailureFail-2] +} + +const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail" + +var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79} + +func (i ProvisionerOnFailure) String() string { + if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) { + return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go new file mode 100644 index 00000000..9f21b3ac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go @@ -0,0 +1,25 @@ +// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ProvisionerWhenInvalid-0] + _ = x[ProvisionerWhenCreate-1] + _ = x[ProvisionerWhenDestroy-2] +} + +const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy" + +var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65} + +func (i ProvisionerWhen) String() string { + if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) { + return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]] +} diff --git a/vendor/github.com/hashicorp/terraform/configs/resource.go b/vendor/github.com/hashicorp/terraform/configs/resource.go new file mode 100644 index 00000000..de1a3434 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/resource.go @@ -0,0 +1,486 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/gohcl" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform/addrs" +) + +// Resource represents a "resource" or "data" block in a module or file. +type Resource struct { + Mode addrs.ResourceMode + Name string + Type string + Config hcl.Body + Count hcl.Expression + ForEach hcl.Expression + + ProviderConfigRef *ProviderConfigRef + + DependsOn []hcl.Traversal + + // Managed is populated only for Mode = addrs.ManagedResourceMode, + // containing the additional fields that apply to managed resources. + // For all other resource modes, this field is nil. + Managed *ManagedResource + + DeclRange hcl.Range + TypeRange hcl.Range +} + +// ManagedResource represents a "resource" block in a module or file. +type ManagedResource struct { + Connection *Connection + Provisioners []*Provisioner + + CreateBeforeDestroy bool + PreventDestroy bool + IgnoreChanges []hcl.Traversal + IgnoreAllChanges bool + + CreateBeforeDestroySet bool + PreventDestroySet bool +} + +func (r *Resource) moduleUniqueKey() string { + return r.Addr().String() +} + +// Addr returns a resource address for the receiver that is relative to the +// resource's containing module. +func (r *Resource) Addr() addrs.Resource { + return addrs.Resource{ + Mode: r.Mode, + Type: r.Type, + Name: r.Name, + } +} + +// ProviderConfigAddr returns the address for the provider configuration +// that should be used for this resource. This function implements the +// default behavior of extracting the type from the resource type name if +// an explicit "provider" argument was not provided. +func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig { + if r.ProviderConfigRef == nil { + return r.Addr().DefaultProviderConfig() + } + + return addrs.ProviderConfig{ + Type: r.ProviderConfigRef.Name, + Alias: r.ProviderConfigRef.Alias, + } +} + +func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { + r := &Resource{ + Mode: addrs.ManagedResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + Managed: &ManagedResource{}, + } + + content, remain, diags := block.Body.PartialContent(resourceBlockSchema) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in resource block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + var seenLifecycle *hcl.Block + var seenConnection *hcl.Block + for _, block := range content.Blocks { + switch block.Type { + case "lifecycle": + if seenLifecycle != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate lifecycle block", + Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenLifecycle = block + + lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema) + diags = append(diags, lcDiags...) + + if attr, exists := lcContent.Attributes["create_before_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy) + diags = append(diags, valDiags...) + r.Managed.CreateBeforeDestroySet = true + } + + if attr, exists := lcContent.Attributes["prevent_destroy"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy) + diags = append(diags, valDiags...) + r.Managed.PreventDestroySet = true + } + + if attr, exists := lcContent.Attributes["ignore_changes"]; exists { + + // ignore_changes can either be a list of relative traversals + // or it can be just the keyword "all" to ignore changes to this + // resource entirely. + // ignore_changes = [ami, instance_type] + // ignore_changes = all + // We also allow two legacy forms for compatibility with earlier + // versions: + // ignore_changes = ["ami", "instance_type"] + // ignore_changes = ["*"] + + kw := hcl.ExprAsKeyword(attr.Expr) + + switch { + case kw == "all": + r.Managed.IgnoreAllChanges = true + default: + exprs, listDiags := hcl.ExprList(attr.Expr) + diags = append(diags, listDiags...) + + var ignoreAllRange hcl.Range + + for _, expr := range exprs { + + // our expr might be the literal string "*", which + // we accept as a deprecated way of saying "all". + if shimIsIgnoreChangesStar(expr) { + r.Managed.IgnoreAllChanges = true + ignoreAllRange = expr.Range() + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagWarning, + Summary: "Deprecated ignore_changes wildcard", + Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.", + Subject: attr.Expr.Range().Ptr(), + }) + continue + } + + expr, shimDiags := shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.RelTraversalForExpr(expr) + diags = append(diags, travDiags...) + if len(traversal) != 0 { + r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal) + } + } + + if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid ignore_changes ruleset", + Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.", + Subject: &ignoreAllRange, + Context: attr.Expr.Range().Ptr(), + }) + } + + } + + } + + case "connection": + if seenConnection != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Duplicate connection block", + Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange), + Subject: &block.DefRange, + }) + continue + } + seenConnection = block + + r.Managed.Connection = &Connection{ + Config: block.Body, + DeclRange: block.DefRange, + } + + case "provisioner": + pv, pvDiags := decodeProvisionerBlock(block) + diags = append(diags, pvDiags...) + if pv != nil { + r.Managed.Provisioners = append(r.Managed.Provisioners, pv) + } + + default: + // Any other block types are ones we've reserved for future use, + // so they get a generic message. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in resource block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return r, diags +} + +func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) { + r := &Resource{ + Mode: addrs.DataResourceMode, + Type: block.Labels[0], + Name: block.Labels[1], + DeclRange: block.DefRange, + TypeRange: block.LabelRanges[0], + } + + content, remain, diags := block.Body.PartialContent(dataBlockSchema) + r.Config = remain + + if !hclsyntax.ValidIdentifier(r.Type) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + if !hclsyntax.ValidIdentifier(r.Name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data resource name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[1], + }) + } + + if attr, exists := content.Attributes["count"]; exists { + r.Count = attr.Expr + } + + if attr, exists := content.Attributes["for_each"]; exists { + r.ForEach = attr.Expr + // We currently parse this, but don't yet do anything with it. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved argument name in module block", + Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name), + Subject: &attr.NameRange, + }) + } + + if attr, exists := content.Attributes["provider"]; exists { + var providerDiags hcl.Diagnostics + r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider") + diags = append(diags, providerDiags...) + } + + if attr, exists := content.Attributes["depends_on"]; exists { + deps, depsDiags := decodeDependsOn(attr) + diags = append(diags, depsDiags...) + r.DependsOn = append(r.DependsOn, deps...) + } + + for _, block := range content.Blocks { + // All of the block types we accept are just reserved for future use, but some get a specialized error message. + switch block.Type { + case "lifecycle": + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported lifecycle block", + Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.", + Subject: &block.DefRange, + }) + default: + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Reserved block type name in data block", + Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type), + Subject: &block.TypeRange, + }) + } + } + + return r, diags +} + +type ProviderConfigRef struct { + Name string + NameRange hcl.Range + Alias string + AliasRange *hcl.Range // nil if alias not set +} + +func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) { + var diags hcl.Diagnostics + + var shimDiags hcl.Diagnostics + expr, shimDiags = shimTraversalInString(expr, false) + diags = append(diags, shimDiags...) + + traversal, travDiags := hcl.AbsTraversalForExpr(expr) + + // AbsTraversalForExpr produces only generic errors, so we'll discard + // the errors given and produce our own with extra context. If we didn't + // get any errors then we might still have warnings, though. + if !travDiags.HasErrors() { + diags = append(diags, travDiags...) + } + + if len(traversal) < 1 || len(traversal) > 2 { + // A provider reference was given as a string literal in the legacy + // configuration language and there are lots of examples out there + // showing that usage, so we'll sniff for that situation here and + // produce a specialized error message for it to help users find + // the new correct form. + if exprIsNativeQuotedString(expr) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "A provider configuration reference must not be given in quotes.", + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName), + Subject: expr.Range().Ptr(), + }) + return nil, diags + } + + ret := &ProviderConfigRef{ + Name: traversal.RootName(), + NameRange: traversal[0].SourceRange(), + } + + if len(traversal) > 1 { + aliasStep, ok := traversal[1].(hcl.TraverseAttr) + if !ok { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider configuration reference", + Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.", + Subject: traversal[1].SourceRange().Ptr(), + }) + return ret, diags + } + + ret.Alias = aliasStep.Name + ret.AliasRange = aliasStep.SourceRange().Ptr() + } + + return ret, diags +} + +// Addr returns the provider config address corresponding to the receiving +// config reference. +// +// This is a trivial conversion, essentially just discarding the source +// location information and keeping just the addressing information. +func (r *ProviderConfigRef) Addr() addrs.ProviderConfig { + return addrs.ProviderConfig{ + Type: r.Name, + Alias: r.Alias, + } +} + +func (r *ProviderConfigRef) String() string { + if r == nil { + return "" + } + if r.Alias != "" { + return fmt.Sprintf("%s.%s", r.Name, r.Alias) + } + return r.Name +} + +var commonResourceAttributes = []hcl.AttributeSchema{ + { + Name: "count", + }, + { + Name: "for_each", + }, + { + Name: "provider", + }, + { + Name: "depends_on", + }, +} + +var resourceBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "locals"}, // reserved for future use + {Type: "lifecycle"}, + {Type: "connection"}, + {Type: "provisioner", LabelNames: []string{"type"}}, + }, +} + +var dataBlockSchema = &hcl.BodySchema{ + Attributes: commonResourceAttributes, + Blocks: []hcl.BlockHeaderSchema{ + {Type: "lifecycle"}, // reserved for future use + {Type: "locals"}, // reserved for future use + }, +} + +var resourceLifecycleBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "create_before_destroy", + }, + { + Name: "prevent_destroy", + }, + { + Name: "ignore_changes", + }, + }, +} diff --git a/vendor/github.com/hashicorp/terraform/configs/synth_body.go b/vendor/github.com/hashicorp/terraform/configs/synth_body.go new file mode 100644 index 00000000..3ae1bff6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/synth_body.go @@ -0,0 +1,118 @@ +package configs + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/zclconf/go-cty/cty" +) + +// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes +// corresponding to the elements given in the values map. +// +// This is useful in situations where, for example, values provided on the +// command line can override values given in configuration, using MergeBodies. +// +// The given filename is used in case any diagnostics are returned. Since +// the created body is synthetic, it is likely that this will not be a "real" +// filename. For example, if from a command line argument it could be +// a representation of that argument's name, such as "-var=...". +func SynthBody(filename string, values map[string]cty.Value) hcl.Body { + return synthBody{ + Filename: filename, + Values: values, + } +} + +type synthBody struct { + Filename string + Values map[string]cty.Value +} + +func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + content, remain, diags := b.PartialContent(schema) + remainS := remain.(synthBody) + for name := range remainS.Values { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported attribute", + Detail: fmt.Sprintf("An attribute named %q is not expected here.", name), + Subject: b.synthRange().Ptr(), + }) + } + return content, diags +} + +func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + var diags hcl.Diagnostics + content := &hcl.BodyContent{ + Attributes: make(hcl.Attributes), + MissingItemRange: b.synthRange(), + } + + remainValues := make(map[string]cty.Value) + for attrName, val := range b.Values { + remainValues[attrName] = val + } + + for _, attrS := range schema.Attributes { + delete(remainValues, attrS.Name) + val, defined := b.Values[attrS.Name] + if !defined { + if attrS.Required { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing required attribute", + Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), + Subject: b.synthRange().Ptr(), + }) + } + continue + } + content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val) + } + + // We just ignore blocks altogether, because this body type never has + // nested blocks. + + remain := synthBody{ + Filename: b.Filename, + Values: remainValues, + } + + return content, remain, diags +} + +func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + ret := make(hcl.Attributes) + for name, val := range b.Values { + ret[name] = b.synthAttribute(name, val) + } + return ret, nil +} + +func (b synthBody) MissingItemRange() hcl.Range { + return b.synthRange() +} + +func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute { + rng := b.synthRange() + return &hcl.Attribute{ + Name: name, + Expr: &hclsyntax.LiteralValueExpr{ + Val: val, + SrcRange: rng, + }, + NameRange: rng, + Range: rng, + } +} + +func (b synthBody) synthRange() hcl.Range { + return hcl.Range{ + Filename: b.Filename, + Start: hcl.Pos{Line: 1, Column: 1}, + End: hcl.Pos{Line: 1, Column: 1}, + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/util.go b/vendor/github.com/hashicorp/terraform/configs/util.go new file mode 100644 index 00000000..5fbde431 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/util.go @@ -0,0 +1,63 @@ +package configs + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" +) + +// exprIsNativeQuotedString determines whether the given expression looks like +// it's a quoted string in the HCL native syntax. +// +// This should be used sparingly only for situations where our legacy HCL +// decoding would've expected a keyword or reference in quotes but our new +// decoding expects the keyword or reference to be provided directly as +// an identifier-based expression. +func exprIsNativeQuotedString(expr hcl.Expression) bool { + _, ok := expr.(*hclsyntax.TemplateExpr) + return ok +} + +// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is +// equivalent except that any required attributes are forced to not be required. +// +// This is useful for dealing with "override" config files, which are allowed +// to omit things that they don't wish to override from the main configuration. +// +// The returned schema may have some pointers in common with the given schema, +// so neither the given schema nor the returned schema should be modified after +// using this function in order to avoid confusion. +// +// Overrides are rarely used, so it's recommended to just create the override +// schema on the fly only when it's needed, rather than storing it in a global +// variable as we tend to do for a primary schema. +func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} + +// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that +// is equivalent except that it accepts an additional block type "dynamic" with +// a single label, used to recognize usage of the HCL dynamic block extension. +func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: schema.Attributes, + Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1), + } + + copy(ret.Blocks, schema.Blocks) + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: "dynamic", + LabelNames: []string{"type"}, + }) + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go new file mode 100644 index 00000000..204efd12 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go @@ -0,0 +1,45 @@ +package configs + +// VariableTypeHint is an enumeration used for the Variable.TypeHint field, +// which is an incompletely-specified type for the variable which is used +// as a hint for whether a value provided in an ambiguous context (on the +// command line or in an environment variable) should be taken literally as a +// string or parsed as an HCL expression to produce a data structure. +// +// The type hint is applied to runtime values as well, but since it does not +// accurately describe a precise type it is not fully-sufficient to infer +// the dynamic type of a value passed through a variable. +// +// These hints use inaccurate terminology for historical reasons. Full details +// are in the documentation for each constant in this enumeration, but in +// summary: +// +// TypeHintString requires a primitive type +// TypeHintList requires a type that could be converted to a tuple +// TypeHintMap requires a type that could be converted to an object +type VariableTypeHint rune + +//go:generate stringer -type VariableTypeHint + +// TypeHintNone indicates the absense of a type hint. Values specified in +// ambiguous contexts will be treated as literal strings, as if TypeHintString +// were selected, but no runtime value checks will be applied. This is reasonable +// type hint for a module that is never intended to be used at the top-level +// of a configuration, since descendent modules never recieve values from +// ambiguous contexts. +const TypeHintNone VariableTypeHint = 0 + +// TypeHintString spec indicates that a value provided in an ambiguous context +// should be treated as a literal string, and additionally requires that the +// runtime value for the variable is of a primitive type (string, number, bool). +const TypeHintString VariableTypeHint = 'S' + +// TypeHintList indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an tuple, list, or set type. +const TypeHintList VariableTypeHint = 'L' + +// TypeHintMap indicates that a value provided in an ambiguous context should +// be treated as an HCL expression, and additionally requires that the +// runtime value for the variable is of an object or map type. +const TypeHintMap VariableTypeHint = 'M' diff --git a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go new file mode 100644 index 00000000..2b50428c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go @@ -0,0 +1,39 @@ +// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT. + +package configs + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeHintNone-0] + _ = x[TypeHintString-83] + _ = x[TypeHintList-76] + _ = x[TypeHintMap-77] +} + +const ( + _VariableTypeHint_name_0 = "TypeHintNone" + _VariableTypeHint_name_1 = "TypeHintListTypeHintMap" + _VariableTypeHint_name_2 = "TypeHintString" +) + +var ( + _VariableTypeHint_index_1 = [...]uint8{0, 12, 23} +) + +func (i VariableTypeHint) String() string { + switch { + case i == 0: + return _VariableTypeHint_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]] + case i == 83: + return _VariableTypeHint_name_2 + default: + return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go new file mode 100644 index 00000000..e40ce163 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go @@ -0,0 +1,71 @@ +package configs + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// VersionConstraint represents a version constraint on some resource +// (e.g. Terraform Core, a provider, a module, ...) that carries with it +// a source range so that a helpful diagnostic can be printed in the event +// that a particular constraint does not match. +type VersionConstraint struct { + Required version.Constraints + DeclRange hcl.Range +} + +func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) { + ret := VersionConstraint{ + DeclRange: attr.Range, + } + + val, diags := attr.Expr.Value(nil) + if diags.HasErrors() { + return ret, diags + } + var err error + val, err = convert.Convert(val, cty.String) + if err != nil { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("A string value is required for %s.", attr.Name), + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + if val.IsNull() { + // A null version constraint is strange, but we'll just treat it + // like an empty constraint set. + return ret, diags + } + + if !val.IsWhollyKnown() { + // If there is a syntax error, HCL sets the value of the given attribute + // to cty.DynamicVal. A diagnostic for the syntax error will already + // bubble up, so we will move forward gracefully here. + return ret, diags + } + + constraintStr := val.AsString() + constraints, err := version.NewConstraint(constraintStr) + if err != nil { + // NewConstraint doesn't return user-friendly errors, so we'll just + // ignore the provided error and produce our own generic one. + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid version constraint", + Detail: "This string does not use correct version constraint syntax.", // Not very actionable :( + Subject: attr.Expr.Range().Ptr(), + }) + return ret, diags + } + + ret.Required = constraints + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go index f8776bc5..77c67eff 100644 --- a/vendor/github.com/hashicorp/terraform/dag/dag.go +++ b/vendor/github.com/hashicorp/terraform/dag/dag.go @@ -5,6 +5,8 @@ import ( "sort" "strings" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/go-multierror" ) @@ -15,7 +17,7 @@ type AcyclicGraph struct { } // WalkFunc is the callback used for walking the graph. -type WalkFunc func(Vertex) error +type WalkFunc func(Vertex) tfdiags.Diagnostics // DepthWalkFunc is a walk function that also receives the current depth of the // walk as an argument @@ -106,7 +108,7 @@ func (g *AcyclicGraph) TransitiveReduction() { uTargets := g.DownEdges(u) vs := AsVertexList(g.DownEdges(u)) - g.DepthFirstWalk(vs, func(v Vertex, d int) error { + g.depthFirstWalk(vs, false, func(v Vertex, d int) error { shared := uTargets.Intersection(g.DownEdges(v)) for _, vPrime := range AsVertexList(shared) { g.RemoveEdge(BasicEdge(u, vPrime)) @@ -161,9 +163,9 @@ func (g *AcyclicGraph) Cycles() [][]Vertex { } // Walk walks the graph, calling your callback as each node is visited. -// This will walk nodes in parallel if it can. Because the walk is done -// in parallel, the error returned will be a multierror. -func (g *AcyclicGraph) Walk(cb WalkFunc) error { +// This will walk nodes in parallel if it can. The resulting diagnostics +// contains problems from all graphs visited, in no particular order. +func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics { defer g.debug.BeginOperation(typeWalk, "").End("") w := &Walker{Callback: cb, Reverse: true} @@ -187,9 +189,18 @@ type vertexAtDepth struct { } // depthFirstWalk does a depth-first walk of the graph starting from -// the vertices in start. This is not exported now but it would make sense -// to export this publicly at some point. +// the vertices in start. func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { + return g.depthFirstWalk(start, true, f) +} + +// This internal method provides the option of not sorting the vertices during +// the walk, which we use for the Transitive reduction. +// Some configurations can lead to fully-connected subgraphs, which makes our +// transitive reduction algorithm O(n^3). This is still passable for the size +// of our graphs, but the additional n^2 sort operations would make this +// uncomputable in a reasonable amount of time. +func (g *AcyclicGraph) depthFirstWalk(start []Vertex, sorted bool, f DepthWalkFunc) error { defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("") seen := make(map[Vertex]struct{}) @@ -219,7 +230,11 @@ func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error { // Visit targets of this in a consistent order. targets := AsVertexList(g.DownEdges(current.Vertex)) - sort.Sort(byVertexName(targets)) + + if sorted { + sort.Sort(byVertexName(targets)) + } + for _, t := range targets { frontier = append(frontier, &vertexAtDepth{ Vertex: t, diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go index 16d5dd6d..c567d271 100644 --- a/vendor/github.com/hashicorp/terraform/dag/marshal.go +++ b/vendor/github.com/hashicorp/terraform/dag/marshal.go @@ -273,6 +273,9 @@ func (e *encoder) Encode(i interface{}) { } func (e *encoder) Add(v Vertex) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, AddVertex: newMarshalVertex(v), @@ -281,6 +284,9 @@ func (e *encoder) Add(v Vertex) { // Remove records the removal of Vertex v. func (e *encoder) Remove(v Vertex) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, RemoveVertex: newMarshalVertex(v), @@ -288,6 +294,9 @@ func (e *encoder) Remove(v Vertex) { } func (e *encoder) Connect(edge Edge) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, AddEdge: newMarshalEdge(edge), @@ -295,6 +304,9 @@ func (e *encoder) Connect(edge Edge) { } func (e *encoder) RemoveEdge(edge Edge) { + if e == nil { + return + } e.Encode(marshalTransform{ Type: typeTransform, RemoveEdge: newMarshalEdge(edge), diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go index 3929c9d0..92b42151 100644 --- a/vendor/github.com/hashicorp/terraform/dag/set.go +++ b/vendor/github.com/hashicorp/terraform/dag/set.go @@ -81,6 +81,20 @@ func (s *Set) Difference(other *Set) *Set { return result } +// Filter returns a set that contains the elements from the receiver +// where the given callback returns true. +func (s *Set) Filter(cb func(interface{}) bool) *Set { + result := new(Set) + + for _, v := range s.m { + if cb(v) { + result.Add(v) + } + } + + return result +} + // Len is the number of items in the set. func (s *Set) Len() int { if s == nil { diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go index a74f1142..1c926c2c 100644 --- a/vendor/github.com/hashicorp/terraform/dag/walk.go +++ b/vendor/github.com/hashicorp/terraform/dag/walk.go @@ -2,12 +2,11 @@ package dag import ( "errors" - "fmt" "log" "sync" "time" - "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/tfdiags" ) // Walker is used to walk every vertex of a graph in parallel. @@ -54,10 +53,15 @@ type Walker struct { // if new vertices are added. wait sync.WaitGroup - // errMap contains the errors recorded so far for execution. Reading - // and writing should hold errLock. - errMap map[Vertex]error - errLock sync.Mutex + // diagsMap contains the diagnostics recorded so far for execution, + // and upstreamFailed contains all the vertices whose problems were + // caused by upstream failures, and thus whose diagnostics should be + // excluded from the final set. + // + // Readers and writers of either map must hold diagsLock. + diagsMap map[Vertex]tfdiags.Diagnostics + upstreamFailed map[Vertex]struct{} + diagsLock sync.Mutex } type walkerVertex struct { @@ -98,31 +102,30 @@ type walkerVertex struct { // user-returned error. var errWalkUpstream = errors.New("upstream dependency failed") -// Wait waits for the completion of the walk and returns any errors ( -// in the form of a multierror) that occurred. Update should be called -// to populate the walk with vertices and edges prior to calling this. +// Wait waits for the completion of the walk and returns diagnostics describing +// any problems that arose. Update should be called to populate the walk with +// vertices and edges prior to calling this. // // Wait will return as soon as all currently known vertices are complete. // If you plan on calling Update with more vertices in the future, you // should not call Wait until after this is done. -func (w *Walker) Wait() error { +func (w *Walker) Wait() tfdiags.Diagnostics { // Wait for completion w.wait.Wait() - // Grab the error lock - w.errLock.Lock() - defer w.errLock.Unlock() - - // Build the error - var result error - for v, err := range w.errMap { - if err != nil && err != errWalkUpstream { - result = multierror.Append(result, fmt.Errorf( - "%s: %s", VertexName(v), err)) + var diags tfdiags.Diagnostics + w.diagsLock.Lock() + for v, vDiags := range w.diagsMap { + if _, upstream := w.upstreamFailed[v]; upstream { + // Ignore diagnostics for nodes that had failed upstreams, since + // the downstream diagnostics are likely to be redundant. + continue } + diags = diags.Append(vDiags) } + w.diagsLock.Unlock() - return result + return diags } // Update updates the currently executing walk with the given graph. @@ -136,6 +139,7 @@ func (w *Walker) Wait() error { // Multiple Updates can be called in parallel. Update can be called at any // time during a walk. func (w *Walker) Update(g *AcyclicGraph) { + log.Print("[TRACE] dag/walk: updating graph") var v, e *Set if g != nil { v, e = g.vertices, g.edges @@ -166,7 +170,7 @@ func (w *Walker) Update(g *AcyclicGraph) { w.wait.Add(1) // Add to our own set so we know about it already - log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: added new vertex: %q", VertexName(v)) w.vertices.Add(raw) // Initialize the vertex info @@ -198,7 +202,7 @@ func (w *Walker) Update(g *AcyclicGraph) { // Delete it out of the map delete(w.vertexMap, v) - log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v)) + log.Printf("[TRACE] dag/walk: removed vertex: %q", VertexName(v)) w.vertices.Delete(raw) } @@ -229,7 +233,7 @@ func (w *Walker) Update(g *AcyclicGraph) { changedDeps.Add(waiter) log.Printf( - "[DEBUG] dag/walk: added edge: %q waiting on %q", + "[TRACE] dag/walk: added edge: %q waiting on %q", VertexName(waiter), VertexName(dep)) w.edges.Add(raw) } @@ -253,7 +257,7 @@ func (w *Walker) Update(g *AcyclicGraph) { changedDeps.Add(waiter) log.Printf( - "[DEBUG] dag/walk: removed edge: %q waiting on %q", + "[TRACE] dag/walk: removed edge: %q waiting on %q", VertexName(waiter), VertexName(dep)) w.edges.Delete(raw) } @@ -296,7 +300,7 @@ func (w *Walker) Update(g *AcyclicGraph) { info.depsCancelCh = cancelCh log.Printf( - "[DEBUG] dag/walk: dependencies changed for %q, sending new deps", + "[TRACE] dag/walk: dependencies changed for %q, sending new deps", VertexName(v)) // Start the waiter @@ -352,7 +356,7 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { } // Check if we have updated dependencies. This can happen if the - // dependencies were satisfied exactly prior to an Update occuring. + // dependencies were satisfied exactly prior to an Update occurring. // In that case, we'd like to take into account new dependencies // if possible. info.DepsLock.Lock() @@ -381,25 +385,34 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) { } // Run our callback or note that our upstream failed - var err error + var diags tfdiags.Diagnostics + var upstreamFailed bool if depsSuccess { - log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v)) - err = w.Callback(v) + log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v)) + diags = w.Callback(v) } else { - log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v)) - err = errWalkUpstream + log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v)) + // This won't be displayed to the user because we'll set upstreamFailed, + // but we need to ensure there's at least one error in here so that + // the failures will cascade downstream. + diags = diags.Append(errors.New("upstream dependencies failed")) + upstreamFailed = true } - // Record the error - if err != nil { - w.errLock.Lock() - defer w.errLock.Unlock() - - if w.errMap == nil { - w.errMap = make(map[Vertex]error) - } - w.errMap[v] = err + // Record the result (we must do this after execution because we mustn't + // hold diagsLock while visiting a vertex.) + w.diagsLock.Lock() + if w.diagsMap == nil { + w.diagsMap = make(map[Vertex]tfdiags.Diagnostics) + } + w.diagsMap[v] = diags + if w.upstreamFailed == nil { + w.upstreamFailed = make(map[Vertex]struct{}) } + if upstreamFailed { + w.upstreamFailed[v] = struct{}{} + } + w.diagsLock.Unlock() } func (w *Walker) waitDeps( @@ -407,6 +420,7 @@ func (w *Walker) waitDeps( deps map[Vertex]<-chan struct{}, doneCh chan<- bool, cancelCh <-chan struct{}) { + // For each dependency given to us, wait for it to complete for dep, depCh := range deps { DepSatisfied: @@ -423,17 +437,17 @@ func (w *Walker) waitDeps( return case <-time.After(time.Second * 5): - log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q", + log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q", VertexName(v), VertexName(dep)) } } } // Dependencies satisfied! We need to check if any errored - w.errLock.Lock() - defer w.errLock.Unlock() - for dep, _ := range deps { - if w.errMap[dep] != nil { + w.diagsLock.Lock() + defer w.diagsLock.Unlock() + for dep := range deps { + if w.diagsMap[dep].HasErrors() { // One of our dependencies failed, so return false doneCh <- false return diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go index e325077e..1449065e 100644 --- a/vendor/github.com/hashicorp/terraform/flatmap/expand.go +++ b/vendor/github.com/hashicorp/terraform/flatmap/expand.go @@ -37,7 +37,7 @@ func Expand(m map[string]string, key string) interface{} { // Check if this is a prefix in the map prefix := key + "." - for k, _ := range m { + for k := range m { if strings.HasPrefix(k, prefix) { return expandMap(m, prefix) } @@ -52,9 +52,22 @@ func expandArray(m map[string]string, prefix string) []interface{} { panic(err) } - // The Schema "Set" type stores its values in an array format, but using - // numeric hash values instead of ordinal keys. Take the set of keys - // regardless of value, and expand them in numeric order. + // If the number of elements in this array is 0, then return an + // empty slice as there is nothing to expand. Trying to expand it + // anyway could lead to crashes as any child maps, arrays or sets + // that no longer exist are still shown as empty with a count of 0. + if num == 0 { + return []interface{}{} + } + + // NOTE: "num" is not necessarily accurate, e.g. if a user tampers + // with state, so the following code should not crash when given a + // number of items more or less than what's given in num. The + // num key is mainly just a hint that this is a list or set. + + // The Schema "Set" type stores its values in an array format, but + // using numeric hash values instead of ordinal keys. Take the set + // of keys regardless of value, and expand them in numeric order. // See GH-11042 for more details. keySet := map[int]bool{} computed := map[string]bool{} @@ -93,7 +106,7 @@ func expandArray(m map[string]string, prefix string) []interface{} { } sort.Ints(keysList) - result := make([]interface{}, num) + result := make([]interface{}, len(keysList)) for i, key := range keysList { keyString := strconv.Itoa(key) if computed[keyString] { @@ -106,8 +119,14 @@ func expandArray(m map[string]string, prefix string) []interface{} { } func expandMap(m map[string]string, prefix string) map[string]interface{} { + // Submaps may not have a '%' key, so we can't count on this value being + // here. If we don't have a count, just proceed as if we have have a map. + if count, ok := m[prefix+"%"]; ok && count == "0" { + return map[string]interface{}{} + } + result := make(map[string]interface{}) - for k, _ := range m { + for k := range m { if !strings.HasPrefix(k, prefix) { continue } @@ -125,6 +144,7 @@ func expandMap(m map[string]string, prefix string) map[string]interface{} { if key == "%" { continue } + result[key] = Expand(m, k[:len(prefix)+len(key)]) } diff --git a/vendor/github.com/hashicorp/terraform/go.mod b/vendor/github.com/hashicorp/terraform/go.mod new file mode 100644 index 00000000..b491d159 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/go.mod @@ -0,0 +1,130 @@ +module github.com/hashicorp/terraform + +require ( + cloud.google.com/go v0.36.0 + github.com/Azure/azure-sdk-for-go v21.3.0+incompatible + github.com/Azure/go-autorest v10.15.4+incompatible + github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 // indirect + github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect + github.com/agext/levenshtein v1.2.2 + github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 // indirect + github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a + github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70 + github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible + github.com/apparentlymart/go-cidr v1.0.0 + github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 + github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 + github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect + github.com/armon/go-radix v1.0.0 // indirect + github.com/aws/aws-sdk-go v1.20.4 + github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f // indirect + github.com/blang/semver v3.5.1+incompatible + github.com/boltdb/bolt v1.3.1 // indirect + github.com/chzyer/logex v1.1.10 // indirect + github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect + github.com/coreos/bbolt v1.3.0 // indirect + github.com/coreos/etcd v3.3.10+incompatible + github.com/coreos/go-semver v0.2.0 // indirect + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect + github.com/davecgh/go-spew v1.1.1 + github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31 // indirect + github.com/dylanmei/iso8601 v0.1.0 // indirect + github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1 + github.com/go-test/deep v1.0.1 + github.com/gogo/protobuf v1.2.0 // indirect + github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 // indirect + github.com/golang/mock v1.3.1 + github.com/golang/protobuf v1.3.0 + github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect + github.com/google/go-cmp v0.3.0 + github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968 + github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01 // indirect + github.com/gorilla/websocket v1.4.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.5.1 // indirect + github.com/hashicorp/aws-sdk-go-base v0.2.0 + github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 + github.com/hashicorp/errwrap v1.0.0 + github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2 + github.com/hashicorp/go-checkpoint v0.5.0 + github.com/hashicorp/go-cleanhttp v0.5.0 + github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e + github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f + github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa // indirect + github.com/hashicorp/go-msgpack v0.5.4 // indirect + github.com/hashicorp/go-multierror v1.0.0 + github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26 + github.com/hashicorp/go-retryablehttp v0.5.2 + github.com/hashicorp/go-rootcerts v1.0.0 + github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 // indirect + github.com/hashicorp/go-tfe v0.3.16 + github.com/hashicorp/go-uuid v1.0.1 + github.com/hashicorp/go-version v1.1.0 + github.com/hashicorp/golang-lru v0.5.0 // indirect + github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f + github.com/hashicorp/hcl2 v0.0.0-20190702185634-5b39d9ff3a9a + github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 + github.com/hashicorp/logutils v1.0.0 + github.com/hashicorp/memberlist v0.1.0 // indirect + github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb // indirect + github.com/hashicorp/terraform-config-inspect v0.0.0-20190327195015-8022a2663a70 + github.com/hashicorp/vault v0.10.4 + github.com/jonboulle/clockwork v0.1.0 // indirect + github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 + github.com/json-iterator/go v1.1.5 // indirect + github.com/jtolds/gls v4.2.1+incompatible // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 + github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba // indirect + github.com/lib/pq v1.0.0 + github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 + github.com/marstr/guid v1.1.0 // indirect + github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b + github.com/mattn/go-colorable v0.1.1 + github.com/mattn/go-shellwords v1.0.4 + github.com/miekg/dns v1.0.8 // indirect + github.com/mitchellh/cli v1.0.0 + github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db + github.com/mitchellh/copystructure v1.0.0 + github.com/mitchellh/go-homedir v1.0.0 + github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb + github.com/mitchellh/go-wordwrap v1.0.0 + github.com/mitchellh/hashstructure v1.0.0 + github.com/mitchellh/mapstructure v1.1.2 + github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4 + github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 + github.com/mitchellh/reflectwalk v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58 + github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c // indirect + github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 + github.com/posener/complete v1.2.1 + github.com/satori/go.uuid v1.2.0 + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/sirupsen/logrus v1.1.1 // indirect + github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect + github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect + github.com/soheilhy/cmux v0.1.4 // indirect + github.com/spf13/afero v1.2.1 + github.com/terraform-providers/terraform-provider-openstack v1.15.0 + github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 // indirect + github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 // indirect + github.com/vmihailenco/msgpack v4.0.1+incompatible // indirect + github.com/xanzy/ssh-agent v0.2.1 + github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 // indirect + github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557 + github.com/zclconf/go-cty v1.0.1-0.20190708163926-19588f92a98f + github.com/zclconf/go-cty-yaml v0.1.0 + go.uber.org/atomic v1.3.2 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.9.1 // indirect + golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 + golang.org/x/net v0.0.0-20190502183928-7f726cade0ab + golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9 + google.golang.org/api v0.1.0 + google.golang.org/grpc v1.18.0 + gopkg.in/ini.v1 v1.42.0 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/hashicorp/terraform/go.sum b/vendor/github.com/hashicorp/terraform/go.sum new file mode 100644 index 00000000..3c39d6c7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/go.sum @@ -0,0 +1,557 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8= +cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/Azure/azure-sdk-for-go v21.3.0+incompatible h1:YFvAka2WKAl2xnJkYV1e1b7E2z88AgFszDzWU18ejMY= +github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v10.15.4+incompatible h1:q+DRrRdbCnkY7f2WxQBx58TwCGkEdMAK/hkZ10g0Pzk= +github.com/Azure/go-autorest v10.15.4+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA= +github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8= +github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4= +github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 h1:tuQ7w+my8a8mkwN7x2TSd7OzTjkZ7rAeSyH4xncuAMI= +github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= +github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 h1:LoeFxdq5zUCBQPhbQKE6zvoGwHMxCBlqwbH9+9kHoHA= +github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a h1:APorzFpCcv6wtD5vmRWYqNm4N55kbepL7c7kTq9XI6A= +github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70 h1:FrF4uxA24DF3ARNXVbUin3wa5fDLaB1Cy8mKks/LRz4= +github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= +github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible h1:ABQ7FF+IxSFHDMOTtjCfmMDMHiCq6EsAoCV/9sFinaM= +github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZylxvcg8H7wBIDfvO5g/cy4/sz1iucBlc2l3Jw= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e h1:ptBAamGVd6CfRsUtyHD+goy2JGhv1QC32v3gqM8mYAM= +github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk= +github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0 h1:JaCC8jz0zdMLk2m+qCCVLLLM/PL93p84w4pK3aJWj60= +github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M= +github.com/apparentlymart/go-cidr v1.0.0 h1:lGDvXx8Lv9QHjrAVP7jyzleG4F9+FkRhJcEsDFxeb8w= +github.com/apparentlymart/go-cidr v1.0.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3 h1:ZSTrOEhiM5J5RFxEaFvMZVEAM1KvT1YzbEOwB2EAGjA= +github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFUye+ZcSR6opIgz9Co7WcDx6ZcY+RjfFHoA0I= +github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM= +github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0= +github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= +github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.16.36 h1:POeH34ZME++pr7GBGh+ZO6Y5kOwSMQpqp5BGUgooJ6k= +github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.4 h1:czX3oqFyqz/AELrK/tneNuyZgNIrWnyqP+iQXsQ32E0= +github.com/aws/aws-sdk-go v1.20.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= +github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d h1:aG5FcWiZTOhPQzYIxwxSR1zEOxzL32fwr1CsaCfhO6w= +github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.0 h1:HIgH5xUWXT914HCI671AxuTTqjj64UOFr7pHn48LUTI= +github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d h1:t5Wuyh53qYyg9eqn4BbnlIT+vmhyww0TatL+zT3uWgI= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.0.0 h1:fGC2kkf4qOoKqZ4q7iIh+Vef4ubC1c38UDsEyZynZPc= +github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31 h1:Dzuw9GtbmllUqEcoHfScT9YpKFUssSiZ5PgZkIGf/YQ= +github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dylanmei/iso8601 v0.1.0 h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURUI= +github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ= +github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1 h1:r1oACdS2XYiAWcfF8BJXkoU8l1J71KehGR+d99yWEDA= +github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg= +github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= +github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3 h1:siORttZ36U2R/WjiJuDz8znElWBiAlO9rVt+mqJt0Cc= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968 h1:Pu+HW4kcQozw0QyrTTgLE+3RXNqFhQNNzhbnoLFL83c= +github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01 h1:OgCNGSnEalfkRpn//WGJHhpo7fkP+LhTpvEITZ7CkK4= +github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.5.1 h1:3scN4iuXkNOyP98jF55Lv8a9j1o/IwvnDIZ0LHJK1nk= +github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/aws-sdk-go-base v0.2.0 h1:5bjZnWCvQg9Im5CHZr9t90IaFC4uvVlMl2fTh23IoCk= +github.com/hashicorp/aws-sdk-go-base v0.2.0/go.mod h1:ZIWACGGi0N7a4DZbf15yuE1JQORmWLtBcVM6F5SXNFU= +github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 h1:1eDpXAxTh0iPv+1kc9/gfSI2pxRERDsTk/lNGolwHn8= +github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI= +github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2 h1:VBRx+yPYUZaobnn5ANBcOUf4hhWpTHSQgftG4TcDkhI= +github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e h1:6krcdHPiS+aIP9XKzJzSahfjD7jG7Z+4+opm0z39V1M= +github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e/go.mod h1:/O1k/AizTN0QmfEKknCYGvICeyKUDqCYA8vvWtGWDeQ= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f h1:Yv9YzBlAETjy6AOX9eLBZ3nshNVRREgerT/3nvxlGho= +github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa h1:0nA8i+6Rwqaq9xlpmVxxTwk6rxiEhX+E6Wh4vPNHiS8= +github.com/hashicorp/go-immutable-radix v0.0.0-20180129170900-7f3cd4390caa/go.mod h1:6ij3Z20p+OhOkCSrA0gImAWoHYQRGbnlcuk6XYTiaRw= +github.com/hashicorp/go-msgpack v0.5.4 h1:SFT72YqIkOcLdWJUYcriVX7hbrZpwc/f7h8aW2NUqrA= +github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26 h1:hRho44SAoNu1CBtn5r8Q9J3rCs4ZverWZ4R+UeeNuWM= +github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4= +github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-slug v0.3.0 h1:L0c+AvH/J64iMNF4VqRaRku2DMTEuHioPVS7kMjWIU8= +github.com/hashicorp/go-slug v0.3.0/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8= +github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86 h1:7YOlAIO2YWnJZkQp7B5eFykaIY7C9JndqAFQyVV5BhM= +github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-tfe v0.3.16 h1:GS2yv580p0co4j3FBVaC6Zahd9mxdCGehhJ0qqzFMH0= +github.com/hashicorp/go-tfe v0.3.16/go.mod h1:SuPHR+OcxvzBZNye7nGPfwZTEyd3rWPfLVbCgyZPezM= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws= +github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl2 v0.0.0-20181208003705-670926858200/go.mod h1:ShfpTh661oAaxo7VcNxg0zcZW6jvMa7Moy2oFx7e5dE= +github.com/hashicorp/hcl2 v0.0.0-20190618163856-0b64543c968c h1:P96avlEdjyi6kpx6kTbTbuQb5GuZvVTrLK9FWKwTy6A= +github.com/hashicorp/hcl2 v0.0.0-20190618163856-0b64543c968c/go.mod h1:FSQTwDi9qesxGBsII2VqhIzKQ4r0bHvBkOczWfD7llg= +github.com/hashicorp/hcl2 v0.0.0-20190702185634-5b39d9ff3a9a h1:1KfDwkIXrxrfMpqwuW//ujObiYNuR2DqaETSK2NB8Ug= +github.com/hashicorp/hcl2 v0.0.0-20190702185634-5b39d9ff3a9a/go.mod h1:FSQTwDi9qesxGBsII2VqhIzKQ4r0bHvBkOczWfD7llg= +github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI= +github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/memberlist v0.1.0 h1:qSsCiC0WYD39lbSitKNt40e30uorm2Ss/d4JGU1hzH8= +github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE= +github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb h1:ZbgmOQt8DOg796figP87/EFCVx2v2h9yRvwHF/zceX4= +github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE= +github.com/hashicorp/terraform-config-inspect v0.0.0-20190327195015-8022a2663a70 h1:oZm5nE11yhzsTRz/YrUyDMSvixePqjoZihwn8ipuOYI= +github.com/hashicorp/terraform-config-inspect v0.0.0-20190327195015-8022a2663a70/go.mod h1:ItvqtvbC3K23FFET62ZwnkwtpbKZm8t8eMcWjmVVjD8= +github.com/hashicorp/vault v0.10.4 h1:4x0lHxui/ZRp/B3E0Auv1QNBJpzETqHR2kQD3mHSBJU= +github.com/hashicorp/vault v0.10.4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 h1:kie3qOosvRKqwij2HGzXWffwpXvcqfPPXRUw8I4F/mg= +github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1 h1:PJPDf8OUfOK1bb/NeTKd4f1QXZItOX389VN3B6qC8ro= +github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba h1:NARVGAAgEXvoMeNPHhPFt1SBt1VMznA3Gnz9d0qj+co= +github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4= +github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 h1:wnfcqULT+N2seWf6y4yHzmi7GD2kNx4Ute0qArktD48= +github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84= +github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 h1:SmVbOZFWAlyQshuMfOkiAx1f5oUTsOGG5IXplAEYeeM= +github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc= +github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b h1:/1RFh2SLCJ+tEnT73+Fh5R2AO89sQqs8ba7o+hx1G0Y= +github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.4 h1:xmZZyxuP+bYKAKkA9ABYXVNJ+G/Wf3R8d8vAP3LDJJk= +github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/miekg/dns v1.0.8 h1:Zi8HNpze3NeRWH1PQV6O71YcvJRQ6j0lORO6DAEmAAI= +github.com/miekg/dns v1.0.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= +github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb h1:GRiLv4rgyqjqzxbhJke65IYUf4NCOOvrPOJbV/sPxkM= +github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y= +github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4 h1:jw9tsdJ1FQmUkyTXdIF/nByTX+mMnnp16glnvGZMsC4= +github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4/go.mod h1:YYMf4xtQnR8LRC0vKi3afvQ5QwRPQ17zjcpkBCufb+I= +github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 h1:eD92Am0Qf3rqhsOeA1zwBHSfRkoHrt4o6uORamdmJP8= +github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo= +github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= +github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58 h1:m3CEgv3ah1Rhy82L+c0QG/U3VyY1UsvsIdkh0/rU97Y= +github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 h1:chPfVn+gpAM5CTpTyVU9j8J+xgRGwmoDlNDLjKnJiYo= +github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= +github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= +github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.1.1 h1:VzGj7lhU7KEB9e9gMpAV/v5XT2NVSvLJhJLCWbnkgXg= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI= +github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/terraform-providers/terraform-provider-openstack v1.15.0 h1:adpjqej+F8BAX9dHmuPF47sUIkgifeqBu6p7iCsyj0Y= +github.com/terraform-providers/terraform-provider-openstack v1.15.0/go.mod h1:2aQ6n/BtChAl1y2S60vebhyJyZXBsuAI5G4+lHrT1Ew= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6 h1:lYIiVDtZnyTWlNwiAxLj0bbpTcx1BWCFhXjfsvmPdNc= +github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 h1:cMjKdf4PxEBN9K5HaD9UMW8gkTbM0kMzkTa9SJe0WNQ= +github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= +github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok= +github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU= +github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18 h1:MPPkRncZLN9Kh4MEFmbnK4h3BD7AUmskWv2+EeZJCCs= +github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557 h1:Jpn2j6wHkC9wJv5iMfJhKqrZJx3TahFx+7sbZ7zQdxs= +github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/zclconf/go-cty v0.0.0-20181129180422-88fbe721e0f8/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v0.0.0-20190516203816-4fecf87372ec h1:MSeYjmyjucsFbecMTxg63ASg23lcSARP/kr9sClTFfk= +github.com/zclconf/go-cty v0.0.0-20190516203816-4fecf87372ec/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.0.0 h1:EWtv3gKe2wPLIB9hQRQJa7k/059oIfAqcEkCNnaVckk= +github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty v1.0.1-0.20190708163926-19588f92a98f h1:sq2p8SN6ji66CFEQFIWLlD/gFmGtr5hBrOzv5nLlGfA= +github.com/zclconf/go-cty v1.0.1-0.20190708163926-19588f92a98f/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s= +github.com/zclconf/go-cty-yaml v0.1.0 h1:OP5nkApyAuXB88t8mRUqxD9gbKZocSLuVovrBAt8z10= +github.com/zclconf/go-cty-yaml v0.1.0/go.mod h1:Lk26EcRlO3XbaQ8U2fxIJbEtbgEteSZFUpEr3XFTtsU= +go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/crypto v0.0.0-20180816225734-aabede6cba87/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2 h1:NwxKRvbkH5MsNkvOtPZi3/3kmI8CAzs3mtv+GLQMkNo= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f h1:qWFY9ZxP3tfI37wYIs/MnIAqK0vlXp1xnYEa5HxFSSY= +golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76 h1:xx5MUFyRQRbPk6VjWjIE1epE/K5AoDD8QUN116NCy8k= +golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190502183928-7f726cade0ab h1:9RfW3ktsOZxgo9YNbBAjq1FWzc/igwEcUzZz8IXgSbk= +golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 h1:uESlIz09WIHT2I+pasSXcpLYqYK8wHcdCetU3VuMBJE= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9 h1:pfyU+l9dEu0vZzDDMsdAKa1gZbJYEn6urYXj/+Xkz7s= +golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0 h1:bzeyCHgoAyjZjAhvTpks+qM7sdlh4cCSitmXeCEO3B4= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 h1:vsphBvatvfbhlb4PO1BYSr9dzugGxJ/SQHoNufZJq1w= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 h1:mBVYJnbrXLA/ZCBTCe7PtEgAUP+1bg92qTaFoPHdz+8= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= +google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.42.0 h1:7N3gPTt50s8GuLortA00n8AqRTk75qOP98+mTPpgzRk= +gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/hashicorp/terraform/help.go b/vendor/github.com/hashicorp/terraform/help.go index cd4e6714..588a621d 100644 --- a/vendor/github.com/hashicorp/terraform/help.go +++ b/vendor/github.com/hashicorp/terraform/help.go @@ -32,7 +32,7 @@ func helpFunc(commands map[string]cli.CommandFactory) string { // website/source/docs/commands/index.html.markdown; if you // change this then consider updating that to match. helpText := fmt.Sprintf(` -Usage: terraform [--version] [--help] [args] +Usage: terraform [-version] [-help] [args] The available commands for execution are listed below. The most common, useful commands are shown first, followed by diff --git a/vendor/github.com/hashicorp/terraform/helper/customdiff/compose.go b/vendor/github.com/hashicorp/terraform/helper/customdiff/compose.go new file mode 100644 index 00000000..f7b31fb2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/customdiff/compose.go @@ -0,0 +1,72 @@ +package customdiff + +import ( + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/helper/schema" +) + +// All returns a CustomizeDiffFunc that runs all of the given +// CustomizeDiffFuncs and returns all of the errors produced. +// +// If one function produces an error, functions after it are still run. +// If this is not desirable, use function Sequence instead. +// +// If multiple functions returns errors, the result is a multierror. +// +// For example: +// +// &schema.Resource{ +// // ... +// CustomizeDiff: customdiff.All( +// customdiff.ValidateChange("size", func (old, new, meta interface{}) error { +// // If we are increasing "size" then the new value must be +// // a multiple of the old value. +// if new.(int) <= old.(int) { +// return nil +// } +// if (new.(int) % old.(int)) != 0 { +// return fmt.Errorf("new size value must be an integer multiple of old value %d", old.(int)) +// } +// return nil +// }), +// customdiff.ForceNewIfChange("size", func (old, new, meta interface{}) bool { +// // "size" can only increase in-place, so we must create a new resource +// // if it is decreased. +// return new.(int) < old.(int) +// }), +// customdiff.ComputedIf("version_id", func (d *schema.ResourceDiff, meta interface{}) bool { +// // Any change to "content" causes a new "version_id" to be allocated. +// return d.HasChange("content") +// }), +// ), +// } +// +func All(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + var err error + for _, f := range funcs { + thisErr := f(d, meta) + if thisErr != nil { + err = multierror.Append(err, thisErr) + } + } + return err + } +} + +// Sequence returns a CustomizeDiffFunc that runs all of the given +// CustomizeDiffFuncs in sequence, stopping at the first one that returns +// an error and returning that error. +// +// If all functions succeed, the combined function also succeeds. +func Sequence(funcs ...schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + for _, f := range funcs { + err := f(d, meta) + if err != nil { + return err + } + } + return nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/customdiff/computed.go b/vendor/github.com/hashicorp/terraform/helper/customdiff/computed.go new file mode 100644 index 00000000..a6fa1a82 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/customdiff/computed.go @@ -0,0 +1,16 @@ +package customdiff + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +// ComputedIf returns a CustomizeDiffFunc that sets the given key's new value +// as computed if the given condition function returns true. +func ComputedIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + if f(d, meta) { + d.SetNewComputed(key) + } + return nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/customdiff/condition.go b/vendor/github.com/hashicorp/terraform/helper/customdiff/condition.go new file mode 100644 index 00000000..2271c402 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/customdiff/condition.go @@ -0,0 +1,60 @@ +package customdiff + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +// ResourceConditionFunc is a function type that makes a boolean decision based +// on an entire resource diff. +type ResourceConditionFunc func(d *schema.ResourceDiff, meta interface{}) bool + +// ValueChangeConditionFunc is a function type that makes a boolean decision +// by comparing two values. +type ValueChangeConditionFunc func(old, new, meta interface{}) bool + +// ValueConditionFunc is a function type that makes a boolean decision based +// on a given value. +type ValueConditionFunc func(value, meta interface{}) bool + +// If returns a CustomizeDiffFunc that calls the given condition +// function and then calls the given CustomizeDiffFunc only if the condition +// function returns true. +// +// This can be used to include conditional customizations when composing +// customizations using All and Sequence, but should generally be used only in +// simple scenarios. Prefer directly writing a CustomizeDiffFunc containing +// a conditional branch if the given CustomizeDiffFunc is already a +// locally-defined function, since this avoids obscuring the control flow. +func If(cond ResourceConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + if cond(d, meta) { + return f(d, meta) + } + return nil + } +} + +// IfValueChange returns a CustomizeDiffFunc that calls the given condition +// function with the old and new values of the given key and then calls the +// given CustomizeDiffFunc only if the condition function returns true. +func IfValueChange(key string, cond ValueChangeConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange(key) + if cond(old, new, meta) { + return f(d, meta) + } + return nil + } +} + +// IfValue returns a CustomizeDiffFunc that calls the given condition +// function with the new values of the given key and then calls the +// given CustomizeDiffFunc only if the condition function returns true. +func IfValue(key string, cond ValueConditionFunc, f schema.CustomizeDiffFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + if cond(d.Get(key), meta) { + return f(d, meta) + } + return nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/customdiff/doc.go b/vendor/github.com/hashicorp/terraform/helper/customdiff/doc.go new file mode 100644 index 00000000..c6ad1199 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/customdiff/doc.go @@ -0,0 +1,11 @@ +// Package customdiff provides a set of reusable and composable functions +// to enable more "declarative" use of the CustomizeDiff mechanism available +// for resources in package helper/schema. +// +// The intent of these helpers is to make the intent of a set of diff +// customizations easier to see, rather than lost in a sea of Go function +// boilerplate. They should _not_ be used in situations where they _obscure_ +// intent, e.g. by over-using the composition functions where a single +// function containing normal Go control flow statements would be more +// straightforward. +package customdiff diff --git a/vendor/github.com/hashicorp/terraform/helper/customdiff/force_new.go b/vendor/github.com/hashicorp/terraform/helper/customdiff/force_new.go new file mode 100644 index 00000000..29e02c98 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/customdiff/force_new.go @@ -0,0 +1,40 @@ +package customdiff + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +// ForceNewIf returns a CustomizeDiffFunc that flags the given key as +// requiring a new resource if the given condition function returns true. +// +// The return value of the condition function is ignored if the old and new +// values of the field compare equal, since no attribute diff is generated in +// that case. +func ForceNewIf(key string, f ResourceConditionFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + if f(d, meta) { + d.ForceNew(key) + } + return nil + } +} + +// ForceNewIfChange returns a CustomizeDiffFunc that flags the given key as +// requiring a new resource if the given condition function returns true. +// +// The return value of the condition function is ignored if the old and new +// values compare equal, since no attribute diff is generated in that case. +// +// This function is similar to ForceNewIf but provides the condition function +// only the old and new values of the given key, which leads to more compact +// and explicit code in the common case where the decision can be made with +// only the specific field value. +func ForceNewIfChange(key string, f ValueChangeConditionFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange(key) + if f(old, new, meta) { + d.ForceNew(key) + } + return nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/customdiff/validate.go b/vendor/github.com/hashicorp/terraform/helper/customdiff/validate.go new file mode 100644 index 00000000..bdf46f5f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/customdiff/validate.go @@ -0,0 +1,38 @@ +package customdiff + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +// ValueChangeValidationFunc is a function type that validates the difference +// (or lack thereof) between two values, returning an error if the change +// is invalid. +type ValueChangeValidationFunc func(old, new, meta interface{}) error + +// ValueValidationFunc is a function type that validates a particular value, +// returning an error if the value is invalid. +type ValueValidationFunc func(value, meta interface{}) error + +// ValidateChange returns a CustomizeDiffFunc that applies the given validation +// function to the change for the given key, returning any error produced. +func ValidateChange(key string, f ValueChangeValidationFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange(key) + return f(old, new, meta) + } +} + +// ValidateValue returns a CustomizeDiffFunc that applies the given validation +// function to value of the given key, returning any error produced. +// +// This should generally not be used since it is functionally equivalent to +// a validation function applied directly to the schema attribute in question, +// but is provided for situations where composing multiple CustomizeDiffFuncs +// together makes intent clearer than spreading that validation across the +// schema. +func ValidateValue(key string, f ValueValidationFunc) schema.CustomizeDiffFunc { + return func(d *schema.ResourceDiff, meta interface{}) error { + val := d.Get(key) + return f(val, meta) + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go new file mode 100644 index 00000000..54899bc6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go @@ -0,0 +1,24 @@ +package didyoumean + +import ( + "github.com/agext/levenshtein" +) + +// NameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func NameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go index 64d8263e..6ccc5231 100644 --- a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go +++ b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go @@ -1,6 +1,8 @@ package hashcode import ( + "bytes" + "fmt" "hash/crc32" ) @@ -20,3 +22,14 @@ func String(s string) int { // v == MinInt return 0 } + +// Strings hashes a list of strings to a unique hashcode. +func Strings(strings []string) string { + var buf bytes.Buffer + + for _, s := range strings { + buf.WriteString(fmt.Sprintf("%s-", s)) + } + + return fmt.Sprintf("%d", String(buf.String())) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go index 433cd77d..6bd92f77 100644 --- a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go +++ b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go @@ -18,7 +18,7 @@ const ( EnvLogFile = "TF_LOG_PATH" // Set to a file ) -var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} +var ValidLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"} // LogOutput determines where we should send logs (if anywhere) and the log level. func LogOutput() (logOutput io.Writer, err error) { @@ -40,7 +40,7 @@ func LogOutput() (logOutput io.Writer, err error) { // This was the default since the beginning logOutput = &logutils.LevelFilter{ - Levels: validLevels, + Levels: ValidLevels, MinLevel: logutils.LogLevel(logLevel), Writer: logOutput, } @@ -77,7 +77,7 @@ func LogLevel() string { logLevel = strings.ToUpper(envLevel) } else { log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v", - envLevel, validLevels) + envLevel, ValidLevels) } return logLevel @@ -90,7 +90,7 @@ func IsDebugOrHigher() bool { } func isValidLogLevel(level string) bool { - for _, l := range validLevels { + for _, l := range ValidLevels { if strings.ToUpper(level) == string(l) { return true } diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go new file mode 100644 index 00000000..bddabe64 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go @@ -0,0 +1,70 @@ +package logging + +import ( + "bytes" + "encoding/json" + "log" + "net/http" + "net/http/httputil" + "strings" +) + +type transport struct { + name string + transport http.RoundTripper +} + +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + if IsDebugOrHigher() { + reqData, err := httputil.DumpRequestOut(req, true) + if err == nil { + log.Printf("[DEBUG] "+logReqMsg, t.name, prettyPrintJsonLines(reqData)) + } else { + log.Printf("[ERROR] %s API Request error: %#v", t.name, err) + } + } + + resp, err := t.transport.RoundTrip(req) + if err != nil { + return resp, err + } + + if IsDebugOrHigher() { + respData, err := httputil.DumpResponse(resp, true) + if err == nil { + log.Printf("[DEBUG] "+logRespMsg, t.name, prettyPrintJsonLines(respData)) + } else { + log.Printf("[ERROR] %s API Response error: %#v", t.name, err) + } + } + + return resp, nil +} + +func NewTransport(name string, t http.RoundTripper) *transport { + return &transport{name, t} +} + +// prettyPrintJsonLines iterates through a []byte line-by-line, +// transforming any lines that are complete json into pretty-printed json. +func prettyPrintJsonLines(b []byte) string { + parts := strings.Split(string(b), "\n") + for i, p := range parts { + if b := []byte(p); json.Valid(b) { + var out bytes.Buffer + json.Indent(&out, b, "", " ") + parts[i] = out.String() + } + } + return strings.Join(parts, "\n") +} + +const logReqMsg = `%s API Request Details: +---[ REQUEST ]--------------------------------------- +%s +-----------------------------------------------------` + +const logRespMsg = `%s API Response Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` diff --git a/vendor/github.com/hashicorp/terraform/helper/pathorcontents/read.go b/vendor/github.com/hashicorp/terraform/helper/pathorcontents/read.go new file mode 100644 index 00000000..d0d4847d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/pathorcontents/read.go @@ -0,0 +1,40 @@ +// Helpers for dealing with file paths and their contents +package pathorcontents + +import ( + "io/ioutil" + "os" + + "github.com/mitchellh/go-homedir" +) + +// If the argument is a path, Read loads it and returns the contents, +// otherwise the argument is assumed to be the desired contents and is simply +// returned. +// +// The boolean second return value can be called `wasPath` - it indicates if a +// path was detected and a file loaded. +func Read(poc string) (string, bool, error) { + if len(poc) == 0 { + return poc, false, nil + } + + path := poc + if path[0] == '~' { + var err error + path, err = homedir.Expand(path) + if err != nil { + return path, true, err + } + } + + if _, err := os.Stat(path); err == nil { + contents, err := ioutil.ReadFile(path) + if err != nil { + return string(contents), true, err + } + return string(contents), true, nil + } + + return poc, false, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go new file mode 100644 index 00000000..82b5937b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go @@ -0,0 +1,6 @@ +// Package plugin contains types and functions to help Terraform plugins +// implement the plugin rpc interface. +// The primary Provider type will be responsible for converting from the grpc +// wire protocol to the types and methods known to the provider +// implementations. +package plugin diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go new file mode 100644 index 00000000..104c8f5f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go @@ -0,0 +1,1398 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "log" + "strconv" + + "github.com/zclconf/go-cty/cty" + ctyconvert "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/msgpack" + context "golang.org/x/net/context" + + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/helper/schema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/terraform" +) + +const newExtraKey = "_new_extra_shim" + +// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a +// proto.ProviderServer implementation. If the provided provider is not a +// *schema.Provider, this will return nil, +func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer { + sp, ok := p.(*schema.Provider) + if !ok { + return nil + } + + return &GRPCProviderServer{ + provider: sp, + } +} + +// GRPCProviderServer handles the server, or plugin side of the rpc connection. +type GRPCProviderServer struct { + provider *schema.Provider +} + +func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) { + // Here we are certain that the provider is being called through grpc, so + // make sure the feature flag for helper/schema is set + schema.SetProto5() + + resp := &proto.GetProviderSchema_Response{ + ResourceSchemas: make(map[string]*proto.Schema), + DataSourceSchemas: make(map[string]*proto.Schema), + } + + resp.Provider = &proto.Schema{ + Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()), + } + + for typ, res := range s.provider.ResourcesMap { + resp.ResourceSchemas[typ] = &proto.Schema{ + Version: int64(res.SchemaVersion), + Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()), + } + } + + for typ, dat := range s.provider.DataSourcesMap { + resp.DataSourceSchemas[typ] = &proto.Schema{ + Version: int64(dat.SchemaVersion), + Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()), + } + } + + return resp, nil +} + +func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block { + return schema.InternalMap(s.provider.Schema).CoreConfigSchema() +} + +func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block { + res := s.provider.ResourcesMap[name] + return res.CoreConfigSchema() +} + +func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block { + dat := s.provider.DataSourcesMap[name] + return dat.CoreConfigSchema() +} + +func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) { + resp := &proto.PrepareProviderConfig_Response{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := s.provider.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return resp, nil + } + + configVal, err = schemaBlock.CoerceValue(configVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.Validate(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP} + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) { + resp := &proto.ValidateResourceTypeConfig_Response{} + + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.ValidateResource(req.TypeName, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) { + resp := &proto.ValidateDataSourceConfig_Response{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + warns, errs := s.provider.ValidateDataSource(req.TypeName, config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) { + resp := &proto.UpgradeResourceState_Response{} + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + version := int(req.Version) + + jsonMap := map[string]interface{}{} + var err error + + switch { + // We first need to upgrade a flatmap state if it exists. + // There should never be both a JSON and Flatmap state in the request. + case len(req.RawState.Flatmap) > 0: + jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // if there's a JSON state, we need to decode it. + case len(req.RawState.Json) > 0: + err = json.Unmarshal(req.RawState.Json, &jsonMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + default: + log.Println("[DEBUG] no state provided to upgrade") + return resp, nil + } + + // complete the upgrade of the JSON states + jsonMap, err = s.upgradeJSONState(version, jsonMap, res) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // The provider isn't required to clean out removed fields + s.removeAttributes(jsonMap, schemaBlock.ImpliedType()) + + // now we need to turn the state into the default json representation, so + // that it can be re-decoded using the actual schema. + val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to make sure blocks are represented correctly, which means + // that missing blocks are empty collections, rather than null. + // First we need to CoerceValue to ensure that all object types match. + val, err = schemaBlock.CoerceValue(val) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + // Normalize the value and fill in any missing blocks. + val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock) + + // encode the final state to the expected msgpack format + newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP} + return resp, nil +} + +// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate +// state if necessary, and converts it to the new JSON state format decoded as a +// map[string]interface{}. +// upgradeFlatmapState returns the json map along with the corresponding schema +// version. +func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) { + // this will be the version we've upgraded so, defaulting to the given + // version in case no migration was called. + upgradedVersion := version + + // first determine if we need to call the legacy MigrateState func + requiresMigrate := version < res.SchemaVersion + + schemaType := res.CoreConfigSchema().ImpliedType() + + // if there are any StateUpgraders, then we need to only compare + // against the first version there + if len(res.StateUpgraders) > 0 { + requiresMigrate = version < res.StateUpgraders[0].Version + } + + if requiresMigrate && res.MigrateState == nil { + // Providers were previously allowed to bump the version + // without declaring MigrateState. + // If there are further upgraders, then we've only updated that far. + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else if requiresMigrate { + is := &terraform.InstanceState{ + ID: m["id"], + Attributes: m, + Meta: map[string]interface{}{ + "schema_version": strconv.Itoa(version), + }, + } + + is, err := res.MigrateState(version, is, s.provider.Meta()) + if err != nil { + return nil, 0, err + } + + // re-assign the map in case there was a copy made, making sure to keep + // the ID + m := is.Attributes + m["id"] = is.ID + + // if there are further upgraders, then we've only updated that far + if len(res.StateUpgraders) > 0 { + schemaType = res.StateUpgraders[0].Type + upgradedVersion = res.StateUpgraders[0].Version + } + } else { + // the schema version may be newer than the MigrateState functions + // handled and older than the current, but still stored in the flatmap + // form. If that's the case, we need to find the correct schema type to + // convert the state. + for _, upgrader := range res.StateUpgraders { + if upgrader.Version == version { + schemaType = upgrader.Type + break + } + } + } + + // now we know the state is up to the latest version that handled the + // flatmap format state. Now we can upgrade the format and continue from + // there. + newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType) + if err != nil { + return nil, 0, err + } + + jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType) + return jsonMap, upgradedVersion, err +} + +func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) { + var err error + + for _, upgrader := range res.StateUpgraders { + if version != upgrader.Version { + continue + } + + m, err = upgrader.Upgrade(m, s.provider.Meta()) + if err != nil { + return nil, err + } + version++ + } + + return m, nil +} + +// Remove any attributes no longer present in the schema, so that the json can +// be correctly decoded. +func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) { + // we're only concerned with finding maps that corespond to object + // attributes + switch v := v.(type) { + case []interface{}: + // If these aren't blocks the next call will be a noop + if ty.IsListType() || ty.IsSetType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + } + return + case map[string]interface{}: + // map blocks aren't yet supported, but handle this just in case + if ty.IsMapType() { + eTy := ty.ElementType() + for _, eV := range v { + s.removeAttributes(eV, eTy) + } + return + } + + if ty == cty.DynamicPseudoType { + log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v) + return + } + + if !ty.IsObjectType() { + // This shouldn't happen, and will fail to decode further on, so + // there's no need to handle it here. + log.Printf("[WARN] unexpected type %#v for map in json state", ty) + return + } + + attrTypes := ty.AttributeTypes() + for attr, attrV := range v { + attrTy, ok := attrTypes[attr] + if !ok { + log.Printf("[DEBUG] attribute %q no longer present in schema", attr) + delete(v, attr) + continue + } + + s.removeAttributes(attrV, attrTy) + } + } +} + +func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) { + resp := &proto.Stop_Response{} + + err := s.provider.Stop() + if err != nil { + resp.Error = err.Error() + } + + return resp, nil +} + +func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) { + resp := &proto.Configure_Response{} + + schemaBlock := s.getProviderSchemaBlock() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + s.provider.TerraformVersion = req.TerraformVersion + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + err = s.provider.Configure(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + + return resp, nil +} + +func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) { + resp := &proto.ReadResource_Response{ + // helper/schema did previously handle private data during refresh, but + // core is now going to expect this to be maintained in order to + // persist it in the state. + Private: req.Private, + } + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + instanceState, err := res.ShimInstanceStateFromValue(stateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.Private) > 0 { + if err := json.Unmarshal(req.Private, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + instanceState.Meta = private + + newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if newInstanceState == nil || newInstanceState.ID == "" { + // The old provider API used an empty id to signal that the remote + // object appears to have been deleted, but our new protocol expects + // to see a null value (in the cty sense) in that case. + newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // helper/schema should always copy the ID over, but do it again just to be safe + newInstanceState.Attributes["id"] = newInstanceState.ID + + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, stateVal, false) + newStateVal = copyTimeoutValues(newStateVal, stateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + return resp, nil +} + +func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) { + resp := &proto.PlanResourceChange_Response{} + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + create := priorStateVal.IsNull() + + proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // We don't usually plan destroys, but this can return early in any case. + if proposedNewStateVal.IsNull() { + resp.PlannedState = req.ProposedNewState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + priorPrivate := make(map[string]interface{}) + if len(req.PriorPrivate) > 0 { + if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + priorState.Meta = priorPrivate + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(proposedNewStateVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // turn the proposed state into a legacy configuration + cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock) + + diff, err := s.provider.SimpleDiff(info, priorState, cfg) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // if this is a new instance, we need to make sure ID is going to be computed + if create { + if diff == nil { + diff = terraform.NewInstanceDiff() + } + + diff.Attributes["id"] = &terraform.ResourceAttrDiff{ + NewComputed: true, + } + } + + if diff == nil || len(diff.Attributes) == 0 { + // schema.Provider.Diff returns nil if it ends up making a diff with no + // changes, but our new interface wants us to return an actual change + // description that _shows_ there are no changes. This is always the + // prior state, because we force a diff above if this is a new instance. + resp.PlannedState = req.PriorState + resp.PlannedPrivate = req.PriorPrivate + return resp, nil + } + + if priorState == nil { + priorState = &terraform.InstanceState{} + } + + // now we need to apply the diff to the prior state, so get the planned state + plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock) + + plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false) + + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal) + + // The old SDK code has some imprecisions that cause it to sometimes + // generate differences that the SDK itself does not consider significant + // but Terraform Core would. To avoid producing weird do-nothing diffs + // in that case, we'll check if the provider as produced something we + // think is "equivalent" to the prior state and just return the prior state + // itself if so, thus ensuring that Terraform Core will treat this as + // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its + // accuracy. + forceNoChanges := false + if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) { + plannedStateVal = priorStateVal + forceNoChanges = true + } + + // if this was creating the resource, we need to set any remaining computed + // fields + if create { + plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock) + } + + plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedState = &proto.DynamicValue{ + Msgpack: plannedMP, + } + + // encode any timeouts into the diff Meta + t := &schema.ResourceTimeout{} + if err := t.ConfigDecode(res, cfg); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + if err := t.DiffEncode(diff); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Now we need to store any NewExtra values, which are where any actual + // StateFunc modified config fields are hidden. + privateMap := diff.Meta + if privateMap == nil { + privateMap = map[string]interface{}{} + } + + newExtra := map[string]interface{}{} + + for k, v := range diff.Attributes { + if v.NewExtra != nil { + newExtra[k] = v.NewExtra + } + } + privateMap[newExtraKey] = newExtra + + // the Meta field gets encoded into PlannedPrivate + plannedPrivate, err := json.Marshal(privateMap) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.PlannedPrivate = plannedPrivate + + // collect the attributes that require instance replacement, and convert + // them to cty.Paths. + var requiresNew []string + if !forceNoChanges { + for attr, d := range diff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + } + + // If anything requires a new resource already, or the "id" field indicates + // that we will be creating a new resource, then we need to add that to + // RequiresReplace so that core can tell if the instance is being replaced + // even if changes are being suppressed via "ignore_changes". + id := plannedStateVal.GetAttr("id") + if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() { + requiresNew = append(requiresNew, "id") + } + + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // convert these to the protocol structures + for _, p := range requiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p)) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) { + resp := &proto.ApplyResourceChange_Response{ + // Start with the existing state as a fallback + NewState: req.PriorState, + } + + res := s.provider.ResourcesMap[req.TypeName] + schemaBlock := s.getResourceSchemaBlock(req.TypeName) + + priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + priorState, err := res.ShimInstanceStateFromValue(priorStateVal) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + private := make(map[string]interface{}) + if len(req.PlannedPrivate) > 0 { + if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + var diff *terraform.InstanceDiff + destroy := false + + // a null state means we are destroying the instance + if plannedStateVal.IsNull() { + destroy = true + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + Destroy: true, + } + } else { + diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res)) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + } + + if diff == nil { + diff = &terraform.InstanceDiff{ + Attributes: make(map[string]*terraform.ResourceAttrDiff), + Meta: make(map[string]interface{}), + } + } + + // add NewExtra Fields that may have been stored in the private data + if newExtra := private[newExtraKey]; newExtra != nil { + for k, v := range newExtra.(map[string]interface{}) { + d := diff.Attributes[k] + + if d == nil { + d = &terraform.ResourceAttrDiff{} + } + + d.NewExtra = v + diff.Attributes[k] = d + } + } + + if private != nil { + diff.Meta = private + } + + for k, d := range diff.Attributes { + // We need to turn off any RequiresNew. There could be attributes + // without changes in here inserted by helper/schema, but if they have + // RequiresNew then the state will be dropped from the ResourceData. + d.RequiresNew = false + + // Check that any "removed" attributes that don't actually exist in the + // prior state, or helper/schema will confuse itself + if d.NewRemoved { + if _, ok := priorState.Attributes[k]; !ok { + delete(diff.Attributes, k) + } + } + } + + newInstanceState, err := s.provider.Apply(info, priorState, diff) + // we record the error here, but continue processing any returned state. + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + } + newStateVal := cty.NullVal(schemaBlock.ImpliedType()) + + // Always return a null value for destroy. + // While this is usually indicated by a nil state, check for missing ID or + // attributes in the case of a provider failure. + if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" { + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil + } + + // We keep the null val if we destroyed the resource, otherwise build the + // entire object, even if the new state was nil. + newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true) + + newStateVal = copyTimeoutValues(newStateVal, plannedStateVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.NewState = &proto.DynamicValue{ + Msgpack: newStateMP, + } + + meta, err := json.Marshal(newInstanceState.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.Private = meta + + // This is a signal to Terraform Core that we're doing the best we can to + // shim the legacy type system of the SDK onto the Terraform type system + // but we need it to cut us some slack. This setting should not be taken + // forward to any new SDK implementations, since setting it prevents us + // from catching certain classes of provider bug that can lead to + // confusing downstream errors. + resp.LegacyTypeSystem = true + + return resp, nil +} + +func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) { + resp := &proto.ImportResourceState_Response{} + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + newInstanceStates, err := s.provider.ImportState(info, req.Id) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + for _, is := range newInstanceStates { + // copy the ID again just to be sure it wasn't missed + is.Attributes["id"] = is.ID + + resourceType := is.Ephemeral.Type + if resourceType == "" { + resourceType = req.TypeName + } + + schemaBlock := s.getResourceSchemaBlock(resourceType) + newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // Normalize the value and fill in any missing blocks. + newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + meta, err := json.Marshal(is.Meta) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + importedResource := &proto.ImportResourceState_ImportedResource{ + TypeName: resourceType, + State: &proto.DynamicValue{ + Msgpack: newStateMP, + }, + Private: meta, + } + + resp.ImportedResources = append(resp.ImportedResources, importedResource) + } + + return resp, nil +} + +func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) { + resp := &proto.ReadDataSource_Response{} + + schemaBlock := s.getDatasourceSchemaBlock(req.TypeName) + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + info := &terraform.InstanceInfo{ + Type: req.TypeName, + } + + // Ensure there are no nulls that will cause helper/schema to panic. + if err := validateConfigNulls(configVal, nil); err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, schemaBlock) + + // we need to still build the diff separately with the Read method to match + // the old behavior + diff, err := s.provider.ReadDataDiff(info, config) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + // now we can get the new complete data source + newInstanceState, err := s.provider.ReadDataApply(info, diff) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + newStateVal = copyTimeoutValues(newStateVal, configVal) + + newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + resp.State = &proto.DynamicValue{ + Msgpack: newStateMP, + } + return resp, nil +} + +func pathToAttributePath(path cty.Path) *proto.AttributePath { + var steps []*proto.AttributePath_Step + + for _, step := range path { + switch s := step.(type) { + case cty.GetAttrStep: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: s.Name, + }, + }) + case cty.IndexStep: + ty := s.Key.Type() + switch ty { + case cty.Number: + i, _ := s.Key.AsBigFloat().Int64() + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: i, + }, + }) + case cty.String: + steps = append(steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: s.Key.AsString(), + }, + }) + } + } + } + + return &proto.AttributePath{Steps: steps} +} + +// helper/schema throws away timeout values from the config and stores them in +// the Private/Meta fields. we need to copy those values into the planned state +// so that core doesn't see a perpetual diff with the timeout block. +func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value { + // if `to` is null we are planning to remove it altogether. + if to.IsNull() { + return to + } + toAttrs := to.AsValueMap() + // We need to remove the key since the hcl2shims will add a non-null block + // because we can't determine if a single block was null from the flatmapped + // values. This needs to conform to the correct schema for marshaling, so + // change the value to null rather than deleting it from the object map. + timeouts, ok := toAttrs[schema.TimeoutsConfigKey] + if ok { + toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type()) + } + + // if from is null then there are no timeouts to copy + if from.IsNull() { + return cty.ObjectVal(toAttrs) + } + + fromAttrs := from.AsValueMap() + timeouts, ok = fromAttrs[schema.TimeoutsConfigKey] + + // timeouts shouldn't be unknown, but don't copy possibly invalid values either + if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() { + // no timeouts block to copy + return cty.ObjectVal(toAttrs) + } + + toAttrs[schema.TimeoutsConfigKey] = timeouts + + return cty.ObjectVal(toAttrs) +} + +// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all +// StateFuncs and CustomizeDiffs removed. This will be used during apply to +// create a diff from a planned state where the diff modifications have already +// been applied. +func stripResourceModifiers(r *schema.Resource) *schema.Resource { + if r == nil { + return nil + } + // start with a shallow copy + newResource := new(schema.Resource) + *newResource = *r + + newResource.CustomizeDiff = nil + newResource.Schema = map[string]*schema.Schema{} + + for k, s := range r.Schema { + newResource.Schema[k] = stripSchema(s) + } + + return newResource +} + +func stripSchema(s *schema.Schema) *schema.Schema { + if s == nil { + return nil + } + // start with a shallow copy + newSchema := new(schema.Schema) + *newSchema = *s + + newSchema.StateFunc = nil + + switch e := newSchema.Elem.(type) { + case *schema.Schema: + newSchema.Elem = stripSchema(e) + case *schema.Resource: + newSchema.Elem = stripResourceModifiers(e) + } + + return newSchema +} + +// Zero values and empty containers may be interchanged by the apply process. +// When there is a discrepency between src and dst value being null or empty, +// prefer the src value. This takes a little more liberty with set types, since +// we can't correlate modified set values. In the case of sets, if the src set +// was wholly known we assume the value was correctly applied and copy that +// entirely to the new value. +// While apply prefers the src value, during plan we prefer dst whenever there +// is an unknown or a set is involved, since the plan can alter the value +// however it sees fit. This however means that a CustomizeDiffFunction may not +// be able to change a null to an empty value or vice versa, but that should be +// very uncommon nor was it reliable before 0.12 either. +func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value { + ty := dst.Type() + if !src.IsNull() && !src.IsKnown() { + // Return src during plan to retain unknown interpolated placeholders, + // which could be lost if we're only updating a resource. If this is a + // read scenario, then there shouldn't be any unknowns at all. + if dst.IsNull() && !apply { + return src + } + return dst + } + + // Handle null/empty changes for collections during apply. + // A change between null and empty values prefers src to make sure the state + // is consistent between plan and apply. + if ty.IsCollectionType() && apply { + dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0 + srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0 + + if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) { + return src + } + } + + // check the invariants that we need below, to ensure we are working with + // non-null and known values. + if src.IsNull() || !src.IsKnown() || !dst.IsKnown() { + return dst + } + + switch { + case ty.IsMapType(), ty.IsObjectType(): + var dstMap map[string]cty.Value + if !dst.IsNull() { + dstMap = dst.AsValueMap() + } + if dstMap == nil { + dstMap = map[string]cty.Value{} + } + + srcMap := src.AsValueMap() + for key, v := range srcMap { + dstVal, ok := dstMap[key] + if !ok && apply && ty.IsMapType() { + // don't transfer old map values to dst during apply + continue + } + + if dstVal == cty.NilVal { + if !apply && ty.IsMapType() { + // let plan shape this map however it wants + continue + } + dstVal = cty.NullVal(v.Type()) + } + + dstMap[key] = normalizeNullValues(dstVal, v, apply) + } + + // you can't call MapVal/ObjectVal with empty maps, but nothing was + // copied in anyway. If the dst is nil, and the src is known, assume the + // src is correct. + if len(dstMap) == 0 { + if dst.IsNull() && src.IsWhollyKnown() && apply { + return src + } + return dst + } + + if ty.IsMapType() { + // helper/schema will populate an optional+computed map with + // unknowns which we have to fixup here. + // It would be preferable to simply prevent any known value from + // becoming unknown, but concessions have to be made to retain the + // broken legacy behavior when possible. + for k, srcVal := range srcMap { + if !srcVal.IsNull() && srcVal.IsKnown() { + dstVal, ok := dstMap[k] + if !ok { + continue + } + + if !dstVal.IsNull() && !dstVal.IsKnown() { + dstMap[k] = srcVal + } + } + } + + return cty.MapVal(dstMap) + } + + return cty.ObjectVal(dstMap) + + case ty.IsSetType(): + // If the original was wholly known, then we expect that is what the + // provider applied. The apply process loses too much information to + // reliably re-create the set. + if src.IsWhollyKnown() && apply { + return src + } + + case ty.IsListType(), ty.IsTupleType(): + // If the dst is null, and the src is known, then we lost an empty value + // so take the original. + if dst.IsNull() { + if src.IsWhollyKnown() && src.LengthInt() == 0 && apply { + return src + } + + // if dst is null and src only contains unknown values, then we lost + // those during a read or plan. + if !apply && !src.IsNull() { + allUnknown := true + for _, v := range src.AsValueSlice() { + if v.IsKnown() { + allUnknown = false + break + } + } + if allUnknown { + return src + } + } + + return dst + } + + // if the lengths are identical, then iterate over each element in succession. + srcLen := src.LengthInt() + dstLen := dst.LengthInt() + if srcLen == dstLen && srcLen > 0 { + srcs := src.AsValueSlice() + dsts := dst.AsValueSlice() + + for i := 0; i < srcLen; i++ { + dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply) + } + + if ty.IsTupleType() { + return cty.TupleVal(dsts) + } + return cty.ListVal(dsts) + } + + case ty == cty.String: + // The legacy SDK should not be able to remove a value during plan or + // apply, however we are only going to overwrite this if the source was + // an empty string, since that is what is often equated with unset and + // lost in the diff process. + if dst.IsNull() && src.AsString() == "" { + return src + } + } + + return dst +} + +// validateConfigNulls checks a config value for unsupported nulls before +// attempting to shim the value. While null values can mostly be ignored in the +// configuration, since they're not supported in HCL1, the case where a null +// appears in a list-like attribute (list, set, tuple) will present a nil value +// to helper/schema which can panic. Return an error to the user in this case, +// indicating the attribute with the null value. +func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic { + var diags []*proto.Diagnostic + if v.IsNull() || !v.IsKnown() { + return diags + } + + switch { + case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + if ev.IsNull() { + // if this is a set, the kv is also going to be null which + // isn't a valid path element, so we can't append it to the + // diagnostic. + p := path + if !kv.IsNull() { + p = append(p, cty.IndexStep{Key: kv}) + } + + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: "Null value found in list", + Detail: "Null values are not allowed for this attribute value.", + Attribute: convert.PathToAttributePath(p), + }) + continue + } + + d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv})) + diags = convert.AppendProtoDiag(diags, d) + } + + case v.Type().IsMapType() || v.Type().IsObjectType(): + it := v.ElementIterator() + for it.Next() { + kv, ev := it.Element() + var step cty.PathStep + switch { + case v.Type().IsMapType(): + step = cty.IndexStep{Key: kv} + case v.Type().IsObjectType(): + step = cty.GetAttrStep{Name: kv.AsString()} + } + d := validateConfigNulls(ev, append(path, step)) + diags = convert.AppendProtoDiag(diags, d) + } + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go new file mode 100644 index 00000000..14494e46 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go @@ -0,0 +1,147 @@ +package plugin + +import ( + "log" + + "github.com/hashicorp/terraform/helper/schema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" + ctyconvert "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/msgpack" + context "golang.org/x/net/context" +) + +// NewGRPCProvisionerServerShim wraps a terraform.ResourceProvisioner in a +// proto.ProvisionerServer implementation. If the provided provisioner is not a +// *schema.Provisioner, this will return nil, +func NewGRPCProvisionerServerShim(p terraform.ResourceProvisioner) *GRPCProvisionerServer { + sp, ok := p.(*schema.Provisioner) + if !ok { + return nil + } + return &GRPCProvisionerServer{ + provisioner: sp, + } +} + +type GRPCProvisionerServer struct { + provisioner *schema.Provisioner +} + +func (s *GRPCProvisionerServer) GetSchema(_ context.Context, req *proto.GetProvisionerSchema_Request) (*proto.GetProvisionerSchema_Response, error) { + resp := &proto.GetProvisionerSchema_Response{} + + resp.Provisioner = &proto.Schema{ + Block: convert.ConfigSchemaToProto(schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()), + } + + return resp, nil +} + +func (s *GRPCProvisionerServer) ValidateProvisionerConfig(_ context.Context, req *proto.ValidateProvisionerConfig_Request) (*proto.ValidateProvisionerConfig_Response, error) { + resp := &proto.ValidateProvisionerConfig_Response{} + + cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema() + + configVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType()) + if err != nil { + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err) + return resp, nil + } + + config := terraform.NewResourceConfigShimmed(configVal, cfgSchema) + + warns, errs := s.provisioner.Validate(config) + resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs)) + + return resp, nil +} + +// stringMapFromValue converts a cty.Value to a map[stirng]string. +// This will panic if the val is not a cty.Map(cty.String). +func stringMapFromValue(val cty.Value) map[string]string { + m := map[string]string{} + if val.IsNull() || !val.IsKnown() { + return m + } + + for it := val.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + + if !av.IsKnown() || av.IsNull() { + continue + } + + av, _ = ctyconvert.Convert(av, cty.String) + m[name] = av.AsString() + } + + return m +} + +// uiOutput implements the terraform.UIOutput interface to adapt the grpc +// stream to the legacy Provisioner.Apply method. +type uiOutput struct { + srv proto.Provisioner_ProvisionResourceServer +} + +func (o uiOutput) Output(s string) { + err := o.srv.Send(&proto.ProvisionResource_Response{ + Output: s, + }) + if err != nil { + log.Printf("[ERROR] %s", err) + } +} + +func (s *GRPCProvisionerServer) ProvisionResource(req *proto.ProvisionResource_Request, srv proto.Provisioner_ProvisionResourceServer) error { + // We send back a diagnostics over the stream if there was a + // provisioner-side problem. + srvResp := &proto.ProvisionResource_Response{} + + cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema() + cfgVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType()) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + resourceConfig := terraform.NewResourceConfigShimmed(cfgVal, cfgSchema) + + connVal, err := msgpack.Unmarshal(req.Connection.Msgpack, cty.Map(cty.String)) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + return nil + } + + conn := stringMapFromValue(connVal) + + instanceState := &terraform.InstanceState{ + Ephemeral: terraform.EphemeralState{ + ConnInfo: conn, + }, + Meta: make(map[string]interface{}), + } + + err = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig) + if err != nil { + srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err) + srv.Send(srvResp) + } + return nil +} + +func (s *GRPCProvisionerServer) Stop(_ context.Context, req *proto.Stop_Request) (*proto.Stop_Response, error) { + resp := &proto.Stop_Response{} + + err := s.provisioner.Stop() + if err != nil { + resp.Error = err.Error() + } + + return resp, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go new file mode 100644 index 00000000..64a6784e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go @@ -0,0 +1,131 @@ +package plugin + +import ( + "fmt" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// SetUnknowns takes a cty.Value, and compares it to the schema setting any null +// values which are computed to unknown. +func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value { + if !val.IsKnown() { + return val + } + + // If the object was null, we still need to handle the top level attributes + // which might be computed, but we don't need to expand the blocks. + if val.IsNull() { + objMap := map[string]cty.Value{} + allNull := true + for name, attr := range schema.Attributes { + switch { + case attr.Computed: + objMap[name] = cty.UnknownVal(attr.Type) + allNull = false + default: + objMap[name] = cty.NullVal(attr.Type) + } + } + + // If this object has no unknown attributes, then we can leave it null. + if allNull { + return val + } + + return cty.ObjectVal(objMap) + } + + valMap := val.AsValueMap() + newVals := make(map[string]cty.Value) + + for name, attr := range schema.Attributes { + v := valMap[name] + + if attr.Computed && v.IsNull() { + newVals[name] = cty.UnknownVal(attr.Type) + continue + } + + newVals[name] = v + } + + for name, blockS := range schema.BlockTypes { + blockVal := valMap[name] + if blockVal.IsNull() || !blockVal.IsKnown() { + newVals[name] = blockVal + continue + } + + blockValType := blockVal.Type() + blockElementType := blockS.Block.ImpliedType() + + // This switches on the value type here, so we can correctly switch + // between Tuples/Lists and Maps/Objects. + switch { + case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup: + // NestingSingle is the only exception here, where we treat the + // block directly as an object + newVals[name] = SetUnknowns(blockVal, &blockS.Block) + + case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType(): + listVals := blockVal.AsValueSlice() + newListVals := make([]cty.Value, 0, len(listVals)) + + for _, v := range listVals { + newListVals = append(newListVals, SetUnknowns(v, &blockS.Block)) + } + + switch { + case blockValType.IsSetType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.SetValEmpty(blockElementType) + default: + newVals[name] = cty.SetVal(newListVals) + } + case blockValType.IsListType(): + switch len(newListVals) { + case 0: + newVals[name] = cty.ListValEmpty(blockElementType) + default: + newVals[name] = cty.ListVal(newListVals) + } + case blockValType.IsTupleType(): + newVals[name] = cty.TupleVal(newListVals) + } + + case blockValType.IsMapType(), blockValType.IsObjectType(): + mapVals := blockVal.AsValueMap() + newMapVals := make(map[string]cty.Value) + + for k, v := range mapVals { + newMapVals[k] = SetUnknowns(v, &blockS.Block) + } + + switch { + case blockValType.IsMapType(): + switch len(newMapVals) { + case 0: + newVals[name] = cty.MapValEmpty(blockElementType) + default: + newVals[name] = cty.MapVal(newMapVals) + } + case blockValType.IsObjectType(): + if len(newMapVals) == 0 { + // We need to populate empty values to make a valid object. + for attr, ty := range blockElementType.AttributeTypes() { + newMapVals[attr] = cty.NullVal(ty) + } + } + newVals[name] = cty.ObjectVal(newMapVals) + } + + default: + panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType)) + } + } + + return cty.ObjectVal(newVals) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go new file mode 100644 index 00000000..0742e993 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go @@ -0,0 +1,43 @@ +package resource + +import ( + "context" + "net" + "time" + + "github.com/hashicorp/terraform/helper/plugin" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + tfplugin "github.com/hashicorp/terraform/plugin" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC +// shim and starts it in a grpc server using an inmem connection. It returns a +// GRPCClient for this new server to test the shimmed resource provider. +func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface { + listener := bufconn.Listen(256 * 1024) + grpcServer := grpc.NewServer() + + p := plugin.NewGRPCProviderServerShim(rp) + proto.RegisterProviderServer(grpcServer, p) + + go grpcServer.Serve(listener) + + conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) { + return listener.Dial() + }), grpc.WithInsecure()) + if err != nil { + panic(err) + } + + var pp tfplugin.GRPCProviderPlugin + client, _ := pp.GRPCClient(context.Background(), nil, conn) + + grpcClient := client.(*tfplugin.GRPCProvider) + grpcClient.TestServer = grpcServer + + return grpcClient +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go index 629582b3..44949550 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/id.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/id.go @@ -1,39 +1,45 @@ package resource import ( - "crypto/rand" "fmt" - "math/big" + "strings" "sync" + "time" ) const UniqueIdPrefix = `terraform-` -// idCounter is a randomly seeded monotonic counter for generating ordered -// unique ids. It uses a big.Int so we can easily increment a long numeric -// string. The max possible hex value here with 12 random bytes is -// "01000000000000000000000000", so there's no chance of rollover during -// operation. +// idCounter is a monotonic counter for generating ordered unique ids. var idMutex sync.Mutex -var idCounter = big.NewInt(0).SetBytes(randomBytes(12)) +var idCounter uint32 // Helper for a resource to generate a unique identifier w/ default prefix func UniqueId() string { return PrefixedUniqueId(UniqueIdPrefix) } +// UniqueIDSuffixLength is the string length of the suffix generated by +// PrefixedUniqueId. This can be used by length validation functions to +// ensure prefixes are the correct length for the target field. +const UniqueIDSuffixLength = 26 + // Helper for a resource to generate a unique identifier w/ given prefix // // After the prefix, the ID consists of an incrementing 26 digit value (to match -// previous timestamp output). +// previous timestamp output). After the prefix, the ID consists of a timestamp +// and an incrementing 8 hex digit value The timestamp means that multiple IDs +// created with the same prefix will sort in the order of their creation, even +// across multiple terraform executions, as long as the clock is not turned back +// between calls, and as long as any given terraform execution generates fewer +// than 4 billion IDs. func PrefixedUniqueId(prefix string) string { + // Be precise to 4 digits of fractional seconds, but remove the dot before the + // fractional seconds. + timestamp := strings.Replace( + time.Now().UTC().Format("20060102150405.0000"), ".", "", 1) + idMutex.Lock() defer idMutex.Unlock() - return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1))) -} - -func randomBytes(n int) []byte { - b := make([]byte, n) - rand.Read(b) - return b + idCounter++ + return fmt.Sprintf("%s%s%08x", prefix, timestamp, idCounter) } diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go index 7473a105..88a83966 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go @@ -2,10 +2,11 @@ package resource import ( "log" - "sync/atomic" "time" ) +var refreshGracePeriod = 30 * time.Second + // StateRefreshFunc is a function type used for StateChangeConf that is // responsible for refreshing the item being watched for a state change. // @@ -37,7 +38,7 @@ type StateChangeConf struct { // specified in the configuration using the specified Refresh() func, // waiting the number of seconds specified in the timeout configuration. // -// If the Refresh function returns a error, exit immediately with that error. +// If the Refresh function returns an error, exit immediately with that error. // // If the Refresh function returns a state other than the Target state or one // listed in Pending, return immediately with an error. @@ -45,7 +46,7 @@ type StateChangeConf struct { // If the Timeout is exceeded before reaching the Target state, return an // error. // -// Otherwise, result the result of the first call to the Refresh function to +// Otherwise, the result is the result of the first call to the Refresh function to // reach the target state. func (conf *StateChangeConf) WaitForState() (interface{}, error) { log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target) @@ -62,58 +63,76 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { conf.ContinuousTargetOccurence = 1 } - // We can't safely read the result values if we timeout, so store them in - // an atomic.Value type Result struct { Result interface{} State string Error error + Done bool } - var lastResult atomic.Value - lastResult.Store(Result{}) - doneCh := make(chan struct{}) + // Read every result from the refresh loop, waiting for a positive result.Done. + resCh := make(chan Result, 1) + // cancellation channel for the refresh loop + cancelCh := make(chan struct{}) + + result := Result{} + go func() { - defer close(doneCh) + defer close(resCh) - // Wait for the delay time.Sleep(conf.Delay) - wait := 100 * time.Millisecond + // start with 0 delay for the first loop + var wait time.Duration for { + // store the last result + resCh <- result + + // wait and watch for cancellation + select { + case <-cancelCh: + return + case <-time.After(wait): + // first round had no wait + if wait == 0 { + wait = 100 * time.Millisecond + } + } + res, currentState, err := conf.Refresh() - result := Result{ + result = Result{ Result: res, State: currentState, Error: err, } - lastResult.Store(result) if err != nil { + resCh <- result return } // If we're waiting for the absence of a thing, then return if res == nil && len(conf.Target) == 0 { - targetOccurence += 1 + targetOccurence++ if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result return - } else { - continue } + continue } if res == nil { // If we didn't find the resource, check if we have been // not finding it for awhile, and if so, report an error. - notfoundTick += 1 + notfoundTick++ if notfoundTick > conf.NotFoundChecks { result.Error = &NotFoundError{ LastError: err, Retries: notfoundTick, } - lastResult.Store(result) + resCh <- result return } } else { @@ -124,12 +143,13 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { for _, allowed := range conf.Target { if currentState == allowed { found = true - targetOccurence += 1 + targetOccurence++ if conf.ContinuousTargetOccurence == targetOccurence { + result.Done = true + resCh <- result return - } else { - continue } + continue } } @@ -147,11 +167,17 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { State: result.State, ExpectedState: conf.Target, } - lastResult.Store(result) + resCh <- result return } } + // Wait between refreshes using exponential backoff, except when + // waiting for the target state to reoccur. + if targetOccurence == 0 { + wait *= 2 + } + // If a poll interval has been specified, choose that interval. // Otherwise bound the default value. if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { @@ -165,27 +191,69 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { } log.Printf("[TRACE] Waiting %s before next try", wait) - time.Sleep(wait) - - // Wait between refreshes using exponential backoff, except when - // waiting for the target state to reoccur. - if targetOccurence == 0 { - wait *= 2 - } } }() - select { - case <-doneCh: - r := lastResult.Load().(Result) - return r.Result, r.Error - case <-time.After(conf.Timeout): - r := lastResult.Load().(Result) - return nil, &TimeoutError{ - LastError: r.Error, - LastState: r.State, - Timeout: conf.Timeout, - ExpectedState: conf.Target, + // store the last value result from the refresh loop + lastResult := Result{} + + timeout := time.After(conf.Timeout) + for { + select { + case r, ok := <-resCh: + // channel closed, so return the last result + if !ok { + return lastResult.Result, lastResult.Error + } + + // we reached the intended state + if r.Done { + return r.Result, r.Error + } + + // still waiting, store the last result + lastResult = r + + case <-timeout: + log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout) + log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod) + + // cancel the goroutine and start our grace period timer + close(cancelCh) + timeout := time.After(refreshGracePeriod) + + // we need a for loop and a label to break on, because we may have + // an extra response value to read, but still want to wait for the + // channel to close. + forSelect: + for { + select { + case r, ok := <-resCh: + if r.Done { + // the last refresh loop reached the desired state + return r.Result, r.Error + } + + if !ok { + // the goroutine returned + break forSelect + } + + // target state not reached, save the result for the + // TimeoutError and wait for the channel to close + lastResult = r + case <-timeout: + log.Println("[ERROR] WaitForState exceeded refresh grace period") + break forSelect + } + } + + return nil, &TimeoutError{ + LastError: lastResult.Error, + LastState: lastResult.State, + Timeout: conf.Timeout, + ExpectedState: conf.Target, + } } } } diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go new file mode 100644 index 00000000..f4882075 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go @@ -0,0 +1,187 @@ +package resource + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/helper/schema" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/terraform" +) + +// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests +func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) { + state := terraform.NewState() + + // in the odd case of a nil state, let the helper packages handle it + if newState == nil { + return nil, nil + } + + for _, newMod := range newState.Modules { + mod := state.AddModule(newMod.Addr) + + for name, out := range newMod.OutputValues { + outputType := "" + val := hcl2shim.ConfigValueFromHCL2(out.Value) + ty := out.Value.Type() + switch { + case ty == cty.String: + outputType = "string" + case ty.IsTupleType() || ty.IsListType(): + outputType = "list" + case ty.IsMapType(): + outputType = "map" + } + + mod.Outputs[name] = &terraform.OutputState{ + Type: outputType, + Value: val, + Sensitive: out.Sensitive, + } + } + + for _, res := range newMod.Resources { + resType := res.Addr.Type + providerType := res.ProviderConfig.ProviderConfig.Type + + resource := getResource(providers, providerType, res.Addr) + + for key, i := range res.Instances { + resState := &terraform.ResourceState{ + Type: resType, + Provider: res.ProviderConfig.String(), + } + + // We should always have a Current instance here, but be safe about checking. + if i.Current != nil { + flatmap, err := shimmedAttributes(i.Current, resource) + if err != nil { + return nil, fmt.Errorf("error decoding state for %q: %s", resType, err) + } + + var meta map[string]interface{} + if i.Current.Private != nil { + err := json.Unmarshal(i.Current.Private, &meta) + if err != nil { + return nil, err + } + } + + resState.Primary = &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: i.Current.Status == states.ObjectTainted, + Meta: meta, + } + + if i.Current.SchemaVersion != 0 { + resState.Primary.Meta = map[string]interface{}{ + "schema_version": i.Current.SchemaVersion, + } + } + + for _, dep := range i.Current.Dependencies { + resState.Dependencies = append(resState.Dependencies, dep.String()) + } + + // convert the indexes to the old style flapmap indexes + idx := "" + switch key.(type) { + case addrs.IntKey: + // don't add numeric index values to resources with a count of 0 + if len(res.Instances) > 1 { + idx = fmt.Sprintf(".%d", key) + } + case addrs.StringKey: + idx = "." + key.String() + } + + mod.Resources[res.Addr.String()+idx] = resState + } + + // add any deposed instances + for _, dep := range i.Deposed { + flatmap, err := shimmedAttributes(dep, resource) + if err != nil { + return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err) + } + + var meta map[string]interface{} + if dep.Private != nil { + err := json.Unmarshal(dep.Private, &meta) + if err != nil { + return nil, err + } + } + + deposed := &terraform.InstanceState{ + ID: flatmap["id"], + Attributes: flatmap, + Tainted: dep.Status == states.ObjectTainted, + Meta: meta, + } + if dep.SchemaVersion != 0 { + deposed.Meta = map[string]interface{}{ + "schema_version": dep.SchemaVersion, + } + } + + resState.Deposed = append(resState.Deposed, deposed) + } + } + } + } + + return state, nil +} + +func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource { + p := providers[providerName] + if p == nil { + panic(fmt.Sprintf("provider %q not found in test step", providerName)) + } + + // this is only for tests, so should only see schema.Providers + provider := p.(*schema.Provider) + + switch addr.Mode { + case addrs.ManagedResourceMode: + resource := provider.ResourcesMap[addr.Type] + if resource != nil { + return resource + } + case addrs.DataResourceMode: + resource := provider.DataSourcesMap[addr.Type] + if resource != nil { + return resource + } + } + + panic(fmt.Sprintf("resource %s not found in test step", addr.Type)) +} + +func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) { + flatmap := instance.AttrsFlat + if flatmap != nil { + return flatmap, nil + } + + // if we have json attrs, they need to be decoded + rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType()) + if err != nil { + return nil, err + } + + instanceState, err := res.ShimInstanceStateFromValue(rio.Value) + if err != nil { + return nil, err + } + + return instanceState.Attributes, nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go index 9557207c..aa7454d4 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go @@ -1,6 +1,8 @@ package resource import ( + "bytes" + "flag" "fmt" "io" "io/ioutil" @@ -10,16 +12,174 @@ import ( "reflect" "regexp" "strings" + "syscall" "testing" "github.com/davecgh/go-spew/spew" - "github.com/hashicorp/go-getter" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/logutils" + "github.com/mitchellh/colorstring" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/command/format" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" "github.com/hashicorp/terraform/helper/logging" + "github.com/hashicorp/terraform/internal/initwd" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) +// flagSweep is a flag available when running tests on the command line. It +// contains a comma seperated list of regions to for the sweeper functions to +// run in. This flag bypasses the normal Test path and instead runs functions designed to +// clean up any leaked resources a testing environment could have created. It is +// a best effort attempt, and relies on Provider authors to implement "Sweeper" +// methods for resources. + +// Adding Sweeper methods with AddTestSweepers will +// construct a list of sweeper funcs to be called here. We iterate through +// regions provided by the sweep flag, and for each region we iterate through the +// tests, and exit on any errors. At time of writing, sweepers are ran +// sequentially, however they can list dependencies to be ran first. We track +// the sweepers that have been ran, so as to not run a sweeper twice for a given +// region. +// +// WARNING: +// Sweepers are designed to be destructive. You should not use the -sweep flag +// in any environment that is not strictly a test environment. Resources will be +// destroyed. + +var flagSweep = flag.String("sweep", "", "List of Regions to run available Sweepers") +var flagSweepRun = flag.String("sweep-run", "", "Comma seperated list of Sweeper Tests to run") +var sweeperFuncs map[string]*Sweeper + +// map of sweepers that have ran, and the success/fail status based on any error +// raised +var sweeperRunList map[string]bool + +// type SweeperFunc is a signature for a function that acts as a sweeper. It +// accepts a string for the region that the sweeper is to be ran in. This +// function must be able to construct a valid client for that region. +type SweeperFunc func(r string) error + +type Sweeper struct { + // Name for sweeper. Must be unique to be ran by the Sweeper Runner + Name string + + // Dependencies list the const names of other Sweeper functions that must be ran + // prior to running this Sweeper. This is an ordered list that will be invoked + // recursively at the helper/resource level + Dependencies []string + + // Sweeper function that when invoked sweeps the Provider of specific + // resources + F SweeperFunc +} + +func init() { + sweeperFuncs = make(map[string]*Sweeper) +} + +// AddTestSweepers function adds a given name and Sweeper configuration +// pair to the internal sweeperFuncs map. Invoke this function to register a +// resource sweeper to be available for running when the -sweep flag is used +// with `go test`. Sweeper names must be unique to help ensure a given sweeper +// is only ran once per run. +func AddTestSweepers(name string, s *Sweeper) { + if _, ok := sweeperFuncs[name]; ok { + log.Fatalf("[ERR] Error adding (%s) to sweeperFuncs: function already exists in map", name) + } + + sweeperFuncs[name] = s +} + +func TestMain(m *testing.M) { + flag.Parse() + if *flagSweep != "" { + // parse flagSweep contents for regions to run + regions := strings.Split(*flagSweep, ",") + + // get filtered list of sweepers to run based on sweep-run flag + sweepers := filterSweepers(*flagSweepRun, sweeperFuncs) + for _, region := range regions { + region = strings.TrimSpace(region) + // reset sweeperRunList for each region + sweeperRunList = map[string]bool{} + + log.Printf("[DEBUG] Running Sweepers for region (%s):\n", region) + for _, sweeper := range sweepers { + if err := runSweeperWithRegion(region, sweeper); err != nil { + log.Fatalf("[ERR] error running (%s): %s", sweeper.Name, err) + } + } + + log.Printf("Sweeper Tests ran:\n") + for s, _ := range sweeperRunList { + fmt.Printf("\t- %s\n", s) + } + } + } else { + os.Exit(m.Run()) + } +} + +// filterSweepers takes a comma seperated string listing the names of sweepers +// to be ran, and returns a filtered set from the list of all of sweepers to +// run based on the names given. +func filterSweepers(f string, source map[string]*Sweeper) map[string]*Sweeper { + filterSlice := strings.Split(strings.ToLower(f), ",") + if len(filterSlice) == 1 && filterSlice[0] == "" { + // if the filter slice is a single element of "" then no sweeper list was + // given, so just return the full list + return source + } + + sweepers := make(map[string]*Sweeper) + for name, sweeper := range source { + for _, s := range filterSlice { + if strings.Contains(strings.ToLower(name), s) { + sweepers[name] = sweeper + } + } + } + return sweepers +} + +// runSweeperWithRegion recieves a sweeper and a region, and recursively calls +// itself with that region for every dependency found for that sweeper. If there +// are no dependencies, invoke the contained sweeper fun with the region, and +// add the success/fail status to the sweeperRunList. +func runSweeperWithRegion(region string, s *Sweeper) error { + for _, dep := range s.Dependencies { + if depSweeper, ok := sweeperFuncs[dep]; ok { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), running..", s.Name, dep) + if err := runSweeperWithRegion(region, depSweeper); err != nil { + return err + } + } else { + log.Printf("[DEBUG] Sweeper (%s) has dependency (%s), but that sweeper was not found", s.Name, dep) + } + } + + if _, ok := sweeperRunList[s.Name]; ok { + log.Printf("[DEBUG] Sweeper (%s) already ran in region (%s)", s.Name, region) + return nil + } + + runE := s.F(region) + if runE == nil { + sweeperRunList[s.Name] = true + } else { + sweeperRunList[s.Name] = false + } + + return runE +} + const TestEnvVar = "TF_ACC" // TestProvider can be implemented by any ResourceProvider to provide custom @@ -38,6 +198,10 @@ type TestCheckFunc func(*terraform.State) error // ImportStateCheckFunc is the check function for ImportState tests type ImportStateCheckFunc func([]*terraform.InstanceState) error +// ImportStateIdFunc is an ID generation function to help with complex ID +// generation for ImportState tests. +type ImportStateIdFunc func(*terraform.State) (string, error) + // TestCase is a single acceptance test case used to test the apply/destroy // lifecycle of a resource in a specific configuration. // @@ -112,6 +276,15 @@ type TestStep struct { // below. PreConfig func() + // Taint is a list of resource addresses to taint prior to the execution of + // the step. Be sure to only include this at a step where the referenced + // address will be present in state, as it will fail the test if the resource + // is missing. + // + // This option is ignored on ImportState tests, and currently only works for + // resources in the root module path. + Taint []string + //--------------------------------------------------------------- // Test modes. One of the following groups of settings must be // set to determine what the test step will do. Ideally we would've @@ -156,10 +329,19 @@ type TestStep struct { // no-op plans PlanOnly bool + // PreventDiskCleanup can be set to true for testing terraform modules which + // require access to disk at runtime. Note that this will leave files in the + // temp folder + PreventDiskCleanup bool + // PreventPostDestroyRefresh can be set to true for cases where data sources // are tested alongside real resources PreventPostDestroyRefresh bool + // SkipFunc is called before applying config, but after PreConfig + // This is useful for defining test steps with platform-dependent checks + SkipFunc func() (bool, error) + //--------------------------------------------------------------- // ImportState testing //--------------------------------------------------------------- @@ -174,6 +356,19 @@ type TestStep struct { // determined by inspecting the state for ResourceName's ID. ImportStateId string + // ImportStateIdPrefix is the prefix added in front of ImportStateId. + // This can be useful in complex import cases, where more than one + // attribute needs to be passed on as the Import ID. Mainly in cases + // where the ID is not known, and a known prefix needs to be added to + // the unset ImportStateId field. + ImportStateIdPrefix string + + // ImportStateIdFunc is a function that can be used to dynamically generate + // the ID for the ImportState tests. It is sent the state, which can be + // checked to derive the attributes necessary and generate the string in the + // desired format. + ImportStateIdFunc ImportStateIdFunc + // ImportStateCheck checks the results of ImportState. It should be // used to verify that the resulting value of ImportState has the // proper resources, IDs, and attributes. @@ -188,6 +383,64 @@ type TestStep struct { // be refreshed and don't matter. ImportStateVerify bool ImportStateVerifyIgnore []string + + // provider s is used internally to maintain a reference to the + // underlying providers during the tests + providers map[string]terraform.ResourceProvider +} + +// Set to a file mask in sprintf format where %s is test name +const EnvLogPathMask = "TF_LOG_PATH_MASK" + +func LogOutput(t TestT) (logOutput io.Writer, err error) { + logOutput = ioutil.Discard + + logLevel := logging.LogLevel() + if logLevel == "" { + return + } + + logOutput = os.Stderr + + if logPath := os.Getenv(logging.EnvLogFile); logPath != "" { + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + if logPathMask := os.Getenv(EnvLogPathMask); logPathMask != "" { + // Escape special characters which may appear if we have subtests + testName := strings.Replace(t.Name(), "/", "__", -1) + + logPath := fmt.Sprintf(logPathMask, testName) + var err error + logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666) + if err != nil { + return nil, err + } + } + + // This was the default since the beginning + logOutput = &logutils.LevelFilter{ + Levels: logging.ValidLevels, + MinLevel: logutils.LogLevel(logLevel), + Writer: logOutput, + } + + return +} + +// ParallelTest performs an acceptance test on a resource, allowing concurrency +// with other ParallelTest. +// +// Tests will fail if they do not properly handle conditions to allow multiple +// tests to occur against the same resource or service (e.g. random naming). +// All other requirements of the Test function also apply to this function. +func ParallelTest(t TestT, c TestCase) { + t.Parallel() + Test(t, c) } // Test performs an acceptance test on a resource. @@ -211,7 +464,7 @@ func Test(t TestT, c TestCase) { return } - logWriter, err := logging.LogOutput() + logWriter, err := LogOutput(t) if err != nil { t.Error(fmt.Errorf("error setting up logging: %s", err)) } @@ -228,11 +481,23 @@ func Test(t TestT, c TestCase) { c.PreCheck() } - ctxProviders, err := testProviderFactories(c) + // get instances of all providers, so we can use the individual + // resources to shim the state during the tests. + providers := make(map[string]terraform.ResourceProvider) + for name, pf := range testProviderFactories(c) { + p, err := pf() + if err != nil { + t.Fatal(err) + } + providers[name] = p + } + + providerResolver, err := testProviderResolver(c) if err != nil { t.Fatal(err) } - opts := terraform.ContextOpts{Providers: ctxProviders} + + opts := terraform.ContextOpts{ProviderResolver: providerResolver} // A single state variable to track the lifecycle, starting with no state var state *terraform.State @@ -242,18 +507,49 @@ func Test(t TestT, c TestCase) { idRefresh := c.IDRefreshName != "" errored := false for i, step := range c.Steps { + // insert the providers into the step so we can get the resources for + // shimming the state + step.providers = providers + var err error - log.Printf("[WARN] Test: Executing step %d", i) + log.Printf("[DEBUG] Test: Executing step %d", i) - // Determine the test mode to execute - if step.Config != "" { - state, err = testStepConfig(opts, state, step) - } else if step.ImportState { - state, err = testStepImportState(opts, state, step) - } else { + if step.SkipFunc != nil { + skip, err := step.SkipFunc() + if err != nil { + t.Fatal(err) + } + if skip { + log.Printf("[WARN] Skipping step %d", i) + continue + } + } + + if step.Config == "" && !step.ImportState { err = fmt.Errorf( "unknown test mode for step. Please see TestStep docs\n\n%#v", step) + } else { + if step.ImportState { + if step.Config == "" { + step.Config = testProviderConfig(c) + } + + // Can optionally set step.Config in addition to + // step.ImportState, to provide config for the import. + state, err = testStepImportState(opts, state, step) + } else { + state, err = testStepConfig(opts, state, step) + } + } + + // If we expected an error, but did not get one, fail + if err == nil && step.ExpectError != nil { + errored = true + t.Error(fmt.Sprintf( + "Step %d, no error received, but expected a match to:\n\n%s\n\n", + i, step.ExpectError)) + break } // If there was an error, exit @@ -269,8 +565,7 @@ func Test(t TestT, c TestCase) { } } else { errored = true - t.Error(fmt.Sprintf( - "Step %d error: %s", i, err)) + t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err))) break } } @@ -323,7 +618,9 @@ func Test(t TestT, c TestCase) { Config: lastStep.Config, Check: c.CheckDestroy, Destroy: true, + PreventDiskCleanup: lastStep.PreventDiskCleanup, PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, + providers: providers, } log.Printf("[WARN] Test: Executing destroy step") @@ -341,41 +638,62 @@ func Test(t TestT, c TestCase) { } } -// testProviderFactories is a helper to build the ResourceProviderFactory map -// with pre instantiated ResourceProviders, so that we can reset them for the -// test, while only calling the factory function once. -// Any errors are stored so that they can be returned by the factory in -// terraform to match non-test behavior. -func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) { +// testProviderConfig takes the list of Providers in a TestCase and returns a +// config with only empty provider blocks. This is useful for Import, where no +// config is provided, but the providers must be defined. +func testProviderConfig(c TestCase) string { + var lines []string + for p := range c.Providers { + lines = append(lines, fmt.Sprintf("provider %q {}\n", p)) + } + + return strings.Join(lines, "") +} + +// testProviderFactories combines the fixed Providers and +// ResourceProviderFactory functions into a single map of +// ResourceProviderFactory functions. +func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory { ctxProviders := make(map[string]terraform.ResourceProviderFactory) + for k, pf := range c.ProviderFactories { + ctxProviders[k] = pf + } // add any fixed providers for k, p := range c.Providers { ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) } + return ctxProviders +} - // call any factory functions and store the result. - for k, pf := range c.ProviderFactories { - p, err := pf() - ctxProviders[k] = func() (terraform.ResourceProvider, error) { - return p, err - } - } +// testProviderResolver is a helper to build a ResourceProviderResolver +// with pre instantiated ResourceProviders, so that we can reset them for the +// test, while only calling the factory function once. +// Any errors are stored so that they can be returned by the factory in +// terraform to match non-test behavior. +func testProviderResolver(c TestCase) (providers.Resolver, error) { + ctxProviders := testProviderFactories(c) + + // wrap the old provider factories in the test grpc server so they can be + // called from terraform. + newProviders := make(map[string]providers.Factory) - // reset the providers if needed for k, pf := range ctxProviders { - // we can ignore any errors here, if we don't have a provider to reset - // the error will be handled later - p, _ := pf() - if p, ok := p.(TestProvider); ok { - err := p.TestReset() + factory := pf // must copy to ensure each closure sees its own value + newProviders[k] = func() (providers.Interface, error) { + p, err := factory() if err != nil { - return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err) + return nil, err } + + // The provider is wrapped in a GRPCTestProvider so that it can be + // passed back to terraform core as a providers.Interface, rather + // than the legacy ResourceProvider. + return GRPCTestProvider(p), nil } } - return ctxProviders, nil + return providers.ResolverFixed(newProviders), nil } // UnitTest is a helper to force the acceptance testing harness to run in the @@ -393,63 +711,64 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r return nil } - name := fmt.Sprintf("%s.foo", r.Type) + addr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: r.Type, + Name: "foo", + }.Instance(addrs.NoKey) + absAddr := addr.Absolute(addrs.RootModuleInstance) // Build the state. The state is just the resource with an ID. There // are no attributes. We only set what is needed to perform a refresh. - state := terraform.NewState() - state.RootModule().Resources[name] = &terraform.ResourceState{ - Type: r.Type, - Primary: &terraform.InstanceState{ - ID: r.Primary.ID, + state := states.NewState() + state.RootModule().SetResourceInstanceCurrent( + addr, + &states.ResourceInstanceObjectSrc{ + AttrsFlat: r.Primary.Attributes, + Status: states.ObjectReady, }, - } + addrs.ProviderConfig{Type: "placeholder"}.Absolute(addrs.RootModuleInstance), + ) // Create the config module. We use the full config because Refresh // doesn't have access to it and we may need things like provider // configurations. The initial implementation of id-only checks used // an empty config module, but that caused the aforementioned problems. - mod, err := testModule(opts, step) + cfg, err := testConfig(opts, step) if err != nil { return err } // Initialize the context - opts.Module = mod + opts.Config = cfg opts.State = state - ctx, err := terraform.NewContext(&opts) - if err != nil { - return err + ctx, ctxDiags := terraform.NewContext(&opts) + if ctxDiags.HasErrors() { + return ctxDiags.Err() } - if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { - if len(es) > 0 { - estrs := make([]string, len(es)) - for i, e := range es { - estrs[i] = e.Error() - } - return fmt.Errorf( - "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", - ws, estrs) + if diags := ctx.Validate(); len(diags) > 0 { + if diags.HasErrors() { + return errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) } - log.Printf("[WARN] Config warnings: %#v", ws) + log.Printf("[WARN] Config warnings:\n%s", diags.Err().Error()) } // Refresh! - state, err = ctx.Refresh() - if err != nil { - return fmt.Errorf("Error refreshing: %s", err) + state, refreshDiags := ctx.Refresh() + if refreshDiags.HasErrors() { + return refreshDiags.Err() } // Verify attribute equivalence. - actualR := state.RootModule().Resources[name] + actualR := state.ResourceInstance(absAddr) if actualR == nil { return fmt.Errorf("Resource gone!") } - if actualR.Primary == nil { + if actualR.Current == nil { return fmt.Errorf("Resource has no primary instance") } - actual := actualR.Primary.Attributes + actual := actualR.Current.AttrsFlat expected := r.Primary.Attributes // Remove fields we're ignoring for _, v := range c.IDRefreshIgnore { @@ -485,51 +804,54 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r return nil } -func testModule( - opts terraform.ContextOpts, - step TestStep) (*module.Tree, error) { +func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) { if step.PreConfig != nil { step.PreConfig() } cfgPath, err := ioutil.TempDir("", "tf-test") if err != nil { - return nil, fmt.Errorf( - "Error creating temporary directory for config: %s", err) + return nil, fmt.Errorf("Error creating temporary directory for config: %s", err) } - defer os.RemoveAll(cfgPath) - // Write the configuration - cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf")) - if err != nil { - return nil, fmt.Errorf( - "Error creating temporary file for config: %s", err) + if step.PreventDiskCleanup { + log.Printf("[INFO] Skipping defer os.RemoveAll call") + } else { + defer os.RemoveAll(cfgPath) } - _, err = io.Copy(cfgF, strings.NewReader(step.Config)) - cfgF.Close() + // Write the main configuration file + err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm) if err != nil { - return nil, fmt.Errorf( - "Error creating temporary file for config: %s", err) + return nil, fmt.Errorf("Error creating temporary file for config: %s", err) } - // Parse the configuration - mod, err := module.NewTreeModule("", cfgPath) + // Create directory for our child modules, if any. + modulesDir := filepath.Join(cfgPath, ".modules") + err = os.Mkdir(modulesDir, os.ModePerm) if err != nil { - return nil, fmt.Errorf( - "Error loading configuration: %s", err) + return nil, fmt.Errorf("Error creating child modules directory: %s", err) } - // Load the modules - modStorage := &getter.FolderStorage{ - StorageDir: filepath.Join(cfgPath, ".tfmodules"), + inst := initwd.NewModuleInstaller(modulesDir, nil) + _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{}) + if installDiags.HasErrors() { + return nil, installDiags.Err() } - err = mod.Load(modStorage, module.GetModeGet) + + loader, err := configload.NewLoader(&configload.Config{ + ModulesDir: modulesDir, + }) if err != nil { - return nil, fmt.Errorf("Error downloading modules: %s", err) + return nil, fmt.Errorf("failed to create config loader: %s", err) + } + + config, configDiags := loader.LoadConfig(cfgPath) + if configDiags.HasErrors() { + return nil, configDiags } - return mod, nil + return config, nil } func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { @@ -599,12 +921,30 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc { return err } - if val, ok := is.Attributes[key]; ok && val != "" { - return nil + return testCheckResourceAttrSet(is, name, key) + } +} + +// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with +// support for non-root modules +func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err } + return testCheckResourceAttrSet(is, name, key) + } +} + +func testCheckResourceAttrSet(is *terraform.InstanceState, name string, key string) error { + if val, ok := is.Attributes[key]; !ok || val == "" { return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key) } + + return nil } // TestCheckResourceAttr is a TestCheckFunc which validates @@ -616,23 +956,52 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc { return err } - if v, ok := is.Attributes[key]; !ok || v != value { - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", name, key) - } + return testCheckResourceAttr(is, name, key, value) + } +} - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - name, - key, - value, - v) +// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with +// support for non-root modules +func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err } - return nil + return testCheckResourceAttr(is, name, key, value) } } +func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { + // Empty containers may be elided from the state. + // If the intent here is to check for an empty container, allow the key to + // also be non-existent. + emptyCheck := false + if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + emptyCheck = true + } + + if v, ok := is.Attributes[key]; !ok || v != value { + if emptyCheck && !ok { + return nil + } + + if !ok { + return fmt.Errorf("%s: Attribute '%s' not found", name, key) + } + + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + name, + key, + value, + v) + } + return nil +} + // TestCheckNoResourceAttr is a TestCheckFunc which ensures that // NO value exists in state for the given name/key combination. func TestCheckNoResourceAttr(name, key string) TestCheckFunc { @@ -642,12 +1011,43 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc { return err } - if _, ok := is.Attributes[key]; ok { - return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + return testCheckNoResourceAttr(is, name, key) + } +} + +// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with +// support for non-root modules +func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err } + return testCheckNoResourceAttr(is, name, key) + } +} + +func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { + // Empty containers may sometimes be included in the state. + // If the intent here is to check for an empty container, allow the value to + // also be "0". + emptyCheck := false + if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") { + emptyCheck = true + } + + val, exists := is.Attributes[key] + if emptyCheck && val == "0" { return nil } + + if exists { + return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) + } + + return nil } // TestMatchResourceAttr is a TestCheckFunc which checks that the value @@ -659,17 +1059,35 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc { return err } - if !r.MatchString(is.Attributes[key]) { - return fmt.Errorf( - "%s: Attribute '%s' didn't match %q, got %#v", - name, - key, - r.String(), - is.Attributes[key]) + return testMatchResourceAttr(is, name, key, r) + } +} + +// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with +// support for non-root modules +func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { + mpt := addrs.Module(mp).UnkeyedInstanceShim() + return func(s *terraform.State) error { + is, err := modulePathPrimaryInstanceState(s, mpt, name) + if err != nil { + return err } - return nil + return testMatchResourceAttr(is, name, key, r) + } +} + +func testMatchResourceAttr(is *terraform.InstanceState, name string, key string, r *regexp.Regexp) error { + if !r.MatchString(is.Attributes[key]) { + return fmt.Errorf( + "%s: Attribute '%s' didn't match %q, got %#v", + name, + key, + r.String(), + is.Attributes[key]) } + + return nil } // TestCheckResourceAttrPtr is like TestCheckResourceAttr except the @@ -681,6 +1099,14 @@ func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckF } } +// TestCheckModuleResourceAttrPtr - as per TestCheckResourceAttrPtr but with +// support for non-root modules +func TestCheckModuleResourceAttrPtr(mp []string, name string, key string, value *string) TestCheckFunc { + return func(s *terraform.State) error { + return TestCheckModuleResourceAttr(mp, name, key, *value)(s) + } +} + // TestCheckResourceAttrPair is a TestCheckFunc which validates that the values // in state for a pair of name/key combinations are equal. func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc { @@ -689,31 +1115,75 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string if err != nil { return err } - vFirst, ok := isFirst.Attributes[keyFirst] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) - } isSecond, err := primaryInstanceState(s, nameSecond) if err != nil { return err } - vSecond, ok := isSecond.Attributes[keySecond] - if !ok { - return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) + + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with +// support for non-root modules +func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { + mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim() + mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim() + return func(s *terraform.State) error { + isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst) + if err != nil { + return err } - if vFirst != vSecond { - return fmt.Errorf( - "%s: Attribute '%s' expected %#v, got %#v", - nameFirst, - keyFirst, - vSecond, - vFirst) + isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond) + if err != nil { + return err } + return testCheckResourceAttrPair(isFirst, nameFirst, keyFirst, isSecond, nameSecond, keySecond) + } +} + +func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { + vFirst, okFirst := isFirst.Attributes[keyFirst] + vSecond, okSecond := isSecond.Attributes[keySecond] + + // Container count values of 0 should not be relied upon, and not reliably + // maintained by helper/schema. For the purpose of tests, consider unset and + // 0 to be equal. + if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] && + (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) { + // they have the same suffix, and it is a collection count key. + if vFirst == "0" || vFirst == "" { + okFirst = false + } + if vSecond == "0" || vSecond == "" { + okSecond = false + } + } + + if okFirst != okSecond { + if !okFirst { + return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond) + } + return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond) + } + if !(okFirst || okSecond) { + // If they both don't exist then they are equally unset, so that's okay. return nil } + + if vFirst != vSecond { + return fmt.Errorf( + "%s: Attribute '%s' expected %#v, got %#v", + nameFirst, + keyFirst, + vSecond, + vFirst) + } + + return nil } // TestCheckOutput checks an output in the Terraform configuration @@ -764,23 +1234,87 @@ type TestT interface { Error(args ...interface{}) Fatal(args ...interface{}) Skip(args ...interface{}) + Name() string + Parallel() } // This is set to true by unit tests to alter some behavior var testTesting = false -// primaryInstanceState returns the primary instance state for the given resource name. -func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { - ms := s.RootModule() +// modulePrimaryInstanceState returns the instance state for the given resource +// name in a ModuleState +func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, name string) (*terraform.InstanceState, error) { rs, ok := ms.Resources[name] if !ok { - return nil, fmt.Errorf("Not found: %s", name) + return nil, fmt.Errorf("Not found: %s in %s", name, ms.Path) } is := rs.Primary if is == nil { - return nil, fmt.Errorf("No primary instance: %s", name) + return nil, fmt.Errorf("No primary instance: %s in %s", name, ms.Path) } return is, nil } + +// modulePathPrimaryInstanceState returns the primary instance state for the +// given resource name in a given module path. +func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) { + ms := s.ModuleByPath(mp) + if ms == nil { + return nil, fmt.Errorf("No module found at: %s", mp) + } + + return modulePrimaryInstanceState(s, ms, name) +} + +// primaryInstanceState returns the primary instance state for the given +// resource name in the root module. +func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) { + ms := s.RootModule() + return modulePrimaryInstanceState(s, ms, name) +} + +// operationError is a specialized implementation of error used to describe +// failures during one of the several operations performed for a particular +// test case. +type operationError struct { + OpName string + Diags tfdiags.Diagnostics +} + +func newOperationError(opName string, diags tfdiags.Diagnostics) error { + return operationError{opName, diags} +} + +// Error returns a terse error string containing just the basic diagnostic +// messages, for situations where normal Go error behavior is appropriate. +func (err operationError) Error() string { + return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error()) +} + +// ErrorDetail is like Error except it includes verbosely-rendered diagnostics +// similar to what would come from a normal Terraform run, which include +// additional context not included in Error(). +func (err operationError) ErrorDetail() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "errors during %s:", err.OpName) + clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors} + for _, diag := range err.Diags { + diagStr := format.Diagnostic(diag, nil, clr, 78) + buf.WriteByte('\n') + buf.WriteString(diagStr) + } + return buf.String() +} + +// detailedErrorMessage is a helper for calling ErrorDetail on an error if +// it is an operationError or just taking Error otherwise. +func detailedErrorMessage(err error) string { + switch tErr := err.(type) { + case operationError: + return tErr.ErrorDetail() + default: + return err.Error() + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go index 537a11c3..311fdb6e 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go @@ -1,11 +1,23 @@ package resource import ( + "bufio" + "bytes" + "errors" "fmt" "log" + "sort" "strings" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform/plans" "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/tfdiags" ) // testStepConfig runs a config-mode test step @@ -16,66 +28,79 @@ func testStepConfig( return testStep(opts, state, step) } -func testStep( - opts terraform.ContextOpts, - state *terraform.State, - step TestStep) (*terraform.State, error) { - mod, err := testModule(opts, step) +func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { + if !step.Destroy { + if err := testStepTaint(state, step); err != nil { + return state, err + } + } + + cfg, err := testConfig(opts, step) if err != nil { return state, err } + var stepDiags tfdiags.Diagnostics + // Build the context - opts.Module = mod - opts.State = state - opts.Destroy = step.Destroy - ctx, err := terraform.NewContext(&opts) + opts.Config = cfg + opts.State, err = terraform.ShimLegacyState(state) if err != nil { - return state, fmt.Errorf("Error initializing context: %s", err) + return nil, err } - if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { - if len(es) > 0 { - estrs := make([]string, len(es)) - for i, e := range es { - estrs[i] = e.Error() - } - return state, fmt.Errorf( - "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v", - ws, estrs) + + opts.Destroy = step.Destroy + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err()) + } + if stepDiags := ctx.Validate(); len(stepDiags) > 0 { + if stepDiags.HasErrors() { + return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err()) } - log.Printf("[WARN] Config warnings: %#v", ws) + + log.Printf("[WARN] Config warnings:\n%s", stepDiags) } // Refresh! - state, err = ctx.Refresh() + newState, stepDiags := ctx.Refresh() + // shim the state first so the test can check the state on errors + + state, err = shimNewState(newState, step.providers) if err != nil { - return state, fmt.Errorf( - "Error refreshing: %s", err) + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("refresh", stepDiags) } // If this step is a PlanOnly step, skip over this first Plan and subsequent // Apply, and use the follow up Plan that checks for perpetual diffs if !step.PlanOnly { // Plan! - if p, err := ctx.Plan(); err != nil { - return state, fmt.Errorf( - "Error planning: %s", err) + if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("plan", stepDiags) } else { - log.Printf("[WARN] Test: Step plan: %s", p) + log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes)) } // We need to keep a copy of the state prior to destroying - // such that destroy steps can verify their behaviour in the check + // such that destroy steps can verify their behavior in the check // function stateBeforeApplication := state.DeepCopy() - // Apply! - state, err = ctx.Apply() + // Apply the diff, creating real resources. + newState, stepDiags = ctx.Apply() + // shim the state first so the test can check the state on errors + state, err = shimNewState(newState, step.providers) if err != nil { - return state, fmt.Errorf("Error applying: %s", err) + return nil, err + } + if stepDiags.HasErrors() { + return state, newOperationError("apply", stepDiags) } - // Check! Excitement! + // Run any configured checks if step.Check != nil { if step.Destroy { if err := step.Check(stateBeforeApplication); err != nil { @@ -91,31 +116,35 @@ func testStep( // Now, verify that Plan is now empty and we don't have a perpetual diff issue // We do this with TWO plans. One without a refresh. - var p *terraform.Plan - if p, err = ctx.Plan(); err != nil { - return state, fmt.Errorf("Error on follow-up plan: %s", err) + var p *plans.Plan + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("follow-up plan", stepDiags) } - if p.Diff != nil && !p.Diff.Empty() { + if !p.Changes.Empty() { if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } else { return state, fmt.Errorf( - "After applying this step, the plan was not empty:\n\n%s", p) + "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } } // And another after a Refresh. if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { - state, err = ctx.Refresh() + newState, stepDiags = ctx.Refresh() + if stepDiags.HasErrors() { + return state, newOperationError("follow-up refresh", stepDiags) + } + + state, err = shimNewState(newState, step.providers) if err != nil { - return state, fmt.Errorf( - "Error on follow-up refresh: %s", err) + return nil, err } } - if p, err = ctx.Plan(); err != nil { - return state, fmt.Errorf("Error on second follow-up plan: %s", err) + if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() { + return state, newOperationError("second follow-up refresh", stepDiags) } - empty := p.Diff == nil || p.Diff.Empty() + empty := p.Changes.Empty() // Data resources are tricky because they legitimately get instantiated // during refresh so that they will be already populated during the @@ -123,38 +152,254 @@ func testStep( // config we'll end up wanting to destroy them again here. This is // acceptable and expected, and we'll treat it as "empty" for the // sake of this testing. - if step.Destroy { + if step.Destroy && !empty { empty = true - - for _, moduleDiff := range p.Diff.Modules { - for k, instanceDiff := range moduleDiff.Resources { - if !strings.HasPrefix(k, "data.") { - empty = false - break - } - - if !instanceDiff.Destroy { - empty = false - } + for _, change := range p.Changes.Resources { + if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode { + empty = false + break } } } if !empty { if step.ExpectNonEmptyPlan { - log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) + log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } else { return state, fmt.Errorf( "After applying this step and refreshing, "+ - "the plan was not empty:\n\n%s", p) + "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes)) } } // Made it here, but expected a non-empty plan, fail! - if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) { + if step.ExpectNonEmptyPlan && empty { return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") } // Made it here? Good job test step! return state, nil } + +// legacyPlanComparisonString produces a string representation of the changes +// from a plan and a given state togther, as was formerly produced by the +// String method of terraform.Plan. +// +// This is here only for compatibility with existing tests that predate our +// new plan and state types, and should not be used in new tests. Instead, use +// a library like "cmp" to do a deep equality and diff on the two +// data structures. +func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string { + return fmt.Sprintf( + "DIFF:\n\n%s\n\nSTATE:\n\n%s", + legacyDiffComparisonString(changes), + state.String(), + ) +} + +// legacyDiffComparisonString produces a string representation of the changes +// from a planned changes object, as was formerly produced by the String method +// of terraform.Diff. +// +// This is here only for compatibility with existing tests that predate our +// new plan types, and should not be used in new tests. Instead, use a library +// like "cmp" to do a deep equality check and diff on the two data structures. +func legacyDiffComparisonString(changes *plans.Changes) string { + // The old string representation of a plan was grouped by module, but + // our new plan structure is not grouped in that way and so we'll need + // to preprocess it in order to produce that grouping. + type ResourceChanges struct { + Current *plans.ResourceInstanceChangeSrc + Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc + } + byModule := map[string]map[string]*ResourceChanges{} + resourceKeys := map[string][]string{} + requiresReplace := map[string][]string{} + var moduleKeys []string + for _, rc := range changes.Resources { + if rc.Action == plans.NoOp { + // We won't mention no-op changes here at all, since the old plan + // model we are emulating here didn't have such a concept. + continue + } + moduleKey := rc.Addr.Module.String() + if _, exists := byModule[moduleKey]; !exists { + moduleKeys = append(moduleKeys, moduleKey) + byModule[moduleKey] = make(map[string]*ResourceChanges) + } + resourceKey := rc.Addr.Resource.String() + if _, exists := byModule[moduleKey][resourceKey]; !exists { + resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey) + byModule[moduleKey][resourceKey] = &ResourceChanges{ + Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc), + } + } + + if rc.DeposedKey == states.NotDeposed { + byModule[moduleKey][resourceKey].Current = rc + } else { + byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc + } + + rr := []string{} + for _, p := range rc.RequiredReplace.List() { + rr = append(rr, hcl2shim.FlatmapKeyFromPath(p)) + } + requiresReplace[resourceKey] = rr + } + sort.Strings(moduleKeys) + for _, ks := range resourceKeys { + sort.Strings(ks) + } + + var buf bytes.Buffer + + for _, moduleKey := range moduleKeys { + rcs := byModule[moduleKey] + var mBuf bytes.Buffer + + for _, resourceKey := range resourceKeys[moduleKey] { + rc := rcs[resourceKey] + + forceNewAttrs := requiresReplace[resourceKey] + + crud := "UPDATE" + if rc.Current != nil { + switch rc.Current.Action { + case plans.DeleteThenCreate: + crud = "DESTROY/CREATE" + case plans.CreateThenDelete: + crud = "CREATE/DESTROY" + case plans.Delete: + crud = "DESTROY" + case plans.Create: + crud = "CREATE" + } + } else { + // We must be working on a deposed object then, in which + // case destroying is the only possible action. + crud = "DESTROY" + } + + extra := "" + if rc.Current == nil && len(rc.Deposed) > 0 { + extra = " (deposed only)" + } + + fmt.Fprintf( + &mBuf, "%s: %s%s\n", + crud, resourceKey, extra, + ) + + attrNames := map[string]bool{} + var oldAttrs map[string]string + var newAttrs map[string]string + if rc.Current != nil { + if before := rc.Current.Before; before != nil { + ty, err := before.ImpliedType() + if err == nil { + val, err := before.Decode(ty) + if err == nil { + oldAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range oldAttrs { + attrNames[k] = true + } + } + } + } + if after := rc.Current.After; after != nil { + ty, err := after.ImpliedType() + if err == nil { + val, err := after.Decode(ty) + if err == nil { + newAttrs = hcl2shim.FlatmapValueFromHCL2(val) + for k := range newAttrs { + attrNames[k] = true + } + } + } + } + } + if oldAttrs == nil { + oldAttrs = make(map[string]string) + } + if newAttrs == nil { + newAttrs = make(map[string]string) + } + + attrNamesOrder := make([]string, 0, len(attrNames)) + keyLen := 0 + for n := range attrNames { + attrNamesOrder = append(attrNamesOrder, n) + if len(n) > keyLen { + keyLen = len(n) + } + } + sort.Strings(attrNamesOrder) + + for _, attrK := range attrNamesOrder { + v := newAttrs[attrK] + u := oldAttrs[attrK] + + if v == config.UnknownVariableValue { + v = "" + } + // NOTE: we don't support here because we would + // need schema to do that. Excluding sensitive values + // is now done at the UI layer, and so should not be tested + // at the core layer. + + updateMsg := "" + + // This may not be as precise as in the old diff, as it matches + // everything under the attribute that was originally marked as + // ForceNew, but should help make it easier to determine what + // caused replacement here. + for _, k := range forceNewAttrs { + if strings.HasPrefix(attrK, k) { + updateMsg = " (forces new resource)" + break + } + } + + fmt.Fprintf( + &mBuf, " %s:%s %#v => %#v%s\n", + attrK, + strings.Repeat(" ", keyLen-len(attrK)), + u, v, + updateMsg, + ) + } + } + + if moduleKey == "" { // root module + buf.Write(mBuf.Bytes()) + buf.WriteByte('\n') + continue + } + + fmt.Fprintf(&buf, "%s:\n", moduleKey) + s := bufio.NewScanner(&mBuf) + for s.Scan() { + buf.WriteString(fmt.Sprintf(" %s\n", s.Text())) + } + } + + return buf.String() +} + +func testStepTaint(state *terraform.State, step TestStep) error { + for _, p := range step.Taint { + m := state.RootModule() + if m == nil { + return errors.New("no state") + } + rs, ok := m.Resources[p] + if !ok { + return fmt.Errorf("resource %q not found in state", p) + } + log.Printf("[WARN] Test: Explicitly tainting resource %q", p) + rs.Taint() + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go index d5c57962..e1b7aea8 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go @@ -7,6 +7,12 @@ import ( "strings" "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/states" "github.com/hashicorp/terraform/terraform" ) @@ -15,46 +21,80 @@ func testStepImportState( opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) { + // Determine the ID to import - importId := step.ImportStateId - if importId == "" { + var importId string + switch { + case step.ImportStateIdFunc != nil: + var err error + importId, err = step.ImportStateIdFunc(state) + if err != nil { + return state, err + } + case step.ImportStateId != "": + importId = step.ImportStateId + default: resource, err := testResource(step, state) if err != nil { return state, err } - importId = resource.Primary.ID } + importPrefix := step.ImportStateIdPrefix + if importPrefix != "" { + importId = fmt.Sprintf("%s%s", importPrefix, importId) + } + // Setup the context. We initialize with an empty state. We use the // full config for provider configurations. - mod, err := testModule(opts, step) + cfg, err := testConfig(opts, step) if err != nil { return state, err } - opts.Module = mod - opts.State = terraform.NewState() - ctx, err := terraform.NewContext(&opts) - if err != nil { - return state, err + opts.Config = cfg + + // import tests start with empty state + opts.State = states.NewState() + + ctx, stepDiags := terraform.NewContext(&opts) + if stepDiags.HasErrors() { + return state, stepDiags.Err() } - // Do the import! - newState, err := ctx.Import(&terraform.ImportOpts{ + // The test step provides the resource address as a string, so we need + // to parse it to get an addrs.AbsResourceAddress to pass in to the + // import method. + traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{}) + if hclDiags.HasErrors() { + return nil, hclDiags + } + importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal) + if stepDiags.HasErrors() { + return nil, stepDiags.Err() + } + + // Do the import + importedState, stepDiags := ctx.Import(&terraform.ImportOpts{ // Set the module so that any provider config is loaded - Module: mod, + Config: cfg, Targets: []*terraform.ImportTarget{ &terraform.ImportTarget{ - Addr: step.ResourceName, + Addr: importAddr, ID: importId, }, }, }) + if stepDiags.HasErrors() { + log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err()) + return state, stepDiags.Err() + } + + newState, err := shimNewState(importedState, step.providers) if err != nil { - log.Printf("[ERROR] Test: ImportState failure: %s", err) - return state, err + return nil, err } // Go through the new state and verify @@ -62,7 +102,9 @@ func testStepImportState( var states []*terraform.InstanceState for _, r := range newState.RootModule().Resources { if r.Primary != nil { - states = append(states, r.Primary) + is := r.Primary.DeepCopy() + is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type + states = append(states, is) } } if err := step.ImportStateCheck(states); err != nil { @@ -89,30 +131,84 @@ func testStepImportState( r.Primary.ID) } + // We'll try our best to find the schema for this resource type + // so we can ignore Removed fields during validation. If we fail + // to find the schema then we won't ignore them and so the test + // will need to rely on explicit ImportStateVerifyIgnore, though + // this shouldn't happen in any reasonable case. + var rsrcSchema *schema.Resource + if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() { + providerType := providerAddr.ProviderConfig.Type + if provider, ok := step.providers[providerType]; ok { + if provider, ok := provider.(*schema.Provider); ok { + rsrcSchema = provider.ResourcesMap[r.Type] + } + } + } + + // don't add empty flatmapped containers, so we can more easily + // compare the attributes + skipEmpty := func(k, v string) bool { + if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") { + if v == "0" { + return true + } + } + return false + } + // Compare their attributes actual := make(map[string]string) for k, v := range r.Primary.Attributes { + if skipEmpty(k, v) { + continue + } actual[k] = v } + expected := make(map[string]string) for k, v := range oldR.Primary.Attributes { + if skipEmpty(k, v) { + continue + } expected[k] = v } // Remove fields we're ignoring for _, v := range step.ImportStateVerifyIgnore { - for k, _ := range actual { + for k := range actual { if strings.HasPrefix(k, v) { delete(actual, k) } } - for k, _ := range expected { + for k := range expected { if strings.HasPrefix(k, v) { delete(expected, k) } } } + // Also remove any attributes that are marked as "Removed" in the + // schema, if we have a schema to check that against. + if rsrcSchema != nil { + for k := range actual { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(actual, k) + break + } + } + } + for k := range expected { + for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) { + if schema.Removed != "" { + delete(expected, k) + break + } + } + } + } + if !reflect.DeepEqual(actual, expected) { // Determine only the different attributes for k, v := range expected { diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go index ca50e292..e56a5155 100644 --- a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go +++ b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go @@ -74,7 +74,7 @@ func RetryableError(err error) *RetryError { return &RetryError{Err: err, Retryable: true} } -// NonRetryableError is a helper to create a RetryError that's _not)_ retryable +// NonRetryableError is a helper to create a RetryError that's _not_ retryable // from a given error. func NonRetryableError(err error) *RetryError { if err == nil { diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go index 33fe2c19..c8d8ae28 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go @@ -2,8 +2,15 @@ package schema import ( "context" + "fmt" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" + ctyconvert "github.com/zclconf/go-cty/cty/convert" ) // Backend represents a partial backend.Backend implementation and simplifies @@ -28,8 +35,8 @@ type Backend struct { config *ResourceData } -const ( - backendConfigKey = iota +var ( + backendConfigKey = contextKey("backend config") ) // FromContextBackendConfig extracts a ResourceData with the configuration @@ -38,41 +45,123 @@ func FromContextBackendConfig(ctx context.Context) *ResourceData { return ctx.Value(backendConfigKey).(*ResourceData) } -func (b *Backend) Input( - input terraform.UIInput, - c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { +func (b *Backend) ConfigSchema() *configschema.Block { + // This is an alias of CoreConfigSchema just to implement the + // backend.Backend interface. + return b.CoreConfigSchema() +} + +func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) { if b == nil { - return c, nil + return configVal, nil } + var diags tfdiags.Diagnostics + var err error - return schemaMap(b.Schema).Input(input, c) -} + // In order to use Transform below, this needs to be filled out completely + // according the schema. + configVal, err = b.CoreConfigSchema().CoerceValue(configVal) + if err != nil { + return configVal, diags.Append(err) + } -func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) { - if b == nil { - return nil, nil + // lookup any required, top-level attributes that are Null, and see if we + // have a Default value available. + configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) { + // we're only looking for top-level attributes + if len(path) != 1 { + return val, nil + } + + // nothing to do if we already have a value + if !val.IsNull() { + return val, nil + } + + // get the Schema definition for this attribute + getAttr, ok := path[0].(cty.GetAttrStep) + // these should all exist, but just ignore anything strange + if !ok { + return val, nil + } + + attrSchema := b.Schema[getAttr.Name] + // continue to ignore anything that doesn't match + if attrSchema == nil { + return val, nil + } + + // this is deprecated, so don't set it + if attrSchema.Deprecated != "" || attrSchema.Removed != "" { + return val, nil + } + + // find a default value if it exists + def, err := attrSchema.DefaultValue() + if err != nil { + diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err)) + return val, err + } + + // no default + if def == nil { + return val, nil + } + + // create a cty.Value and make sure it's the correct type + tmpVal := hcl2shim.HCL2ValueFromConfigValue(def) + + // helper/schema used to allow setting "" to a bool + if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) { + // return a warning about the conversion + diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name) + tmpVal = cty.False + } + + val, err = ctyconvert.Convert(tmpVal, val.Type()) + if err != nil { + diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err)) + } + + return val, err + }) + if err != nil { + // any error here was already added to the diagnostics + return configVal, diags } - return schemaMap(b.Schema).Validate(c) + shimRC := b.shimConfig(configVal) + warns, errs := schemaMap(b.Schema).Validate(shimRC) + for _, warn := range warns { + diags = diags.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + diags = diags.Append(err) + } + return configVal, diags } -func (b *Backend) Configure(c *terraform.ResourceConfig) error { +func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics { if b == nil { return nil } + var diags tfdiags.Diagnostics sm := schemaMap(b.Schema) + shimRC := b.shimConfig(obj) // Get a ResourceData for this configuration. To do this, we actually // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c) + diff, err := sm.Diff(nil, shimRC, nil, nil, true) if err != nil { - return err + diags = diags.Append(err) + return diags } data, err := sm.Data(nil, diff) if err != nil { - return err + diags = diags.Append(err) + return diags } b.config = data @@ -80,11 +169,28 @@ func (b *Backend) Configure(c *terraform.ResourceConfig) error { err = b.ConfigureFunc(context.WithValue( context.Background(), backendConfigKey, data)) if err != nil { - return err + diags = diags.Append(err) + return diags } } - return nil + return diags +} + +// shimConfig turns a new-style cty.Value configuration (which must be of +// an object type) into a minimal old-style *terraform.ResourceConfig object +// that should be populated enough to appease the not-yet-updated functionality +// in this package. This should be removed once everything is updated. +func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig { + shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{}) + if !ok { + // If the configVal was nil, we still want a non-nil map here. + shimMap = map[string]interface{}{} + } + return &terraform.ResourceConfig{ + Config: shimMap, + Raw: shimMap, + } } // Config returns the configuration. This is available after Configure is diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go new file mode 100644 index 00000000..875677eb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go @@ -0,0 +1,309 @@ +package schema + +import ( + "fmt" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// The functions and methods in this file are concerned with the conversion +// of this package's schema model into the slightly-lower-level schema model +// used by Terraform core for configuration parsing. + +// CoreConfigSchema lowers the receiver to the schema model expected by +// Terraform core. +// +// This lower-level model has fewer features than the schema in this package, +// describing only the basic structure of configuration and state values we +// expect. The full schemaMap from this package is still required for full +// validation, handling of default values, etc. +// +// This method presumes a schema that passes InternalValidate, and so may +// panic or produce an invalid result if given an invalid schemaMap. +func (m schemaMap) CoreConfigSchema() *configschema.Block { + if len(m) == 0 { + // We return an actual (empty) object here, rather than a nil, + // because a nil result would mean that we don't have a schema at + // all, rather than that we have an empty one. + return &configschema.Block{} + } + + ret := &configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + BlockTypes: map[string]*configschema.NestedBlock{}, + } + + for name, schema := range m { + if schema.Elem == nil { + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + if schema.Type == TypeMap { + // For TypeMap in particular, it isn't valid for Elem to be a + // *Resource (since that would be ambiguous in flatmap) and + // so Elem is treated as a TypeString schema if so. This matches + // how the field readers treat this situation, for compatibility + // with configurations targeting Terraform 0.11 and earlier. + if _, isResource := schema.Elem.(*Resource); isResource { + sch := *schema // shallow copy + sch.Elem = &Schema{ + Type: TypeString, + } + ret.Attributes[name] = sch.coreConfigSchemaAttribute() + continue + } + } + switch schema.ConfigMode { + case SchemaConfigModeAttr: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case SchemaConfigModeBlock: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: // SchemaConfigModeAuto, or any other invalid value + if schema.Computed && !schema.Optional { + // Computed-only schemas are always handled as attributes, + // because they never appear in configuration. + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + continue + } + switch schema.Elem.(type) { + case *Schema, ValueType: + ret.Attributes[name] = schema.coreConfigSchemaAttribute() + case *Resource: + ret.BlockTypes[name] = schema.coreConfigSchemaBlock() + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) + } + } + } + + return ret +} + +// coreConfigSchemaAttribute prepares a configschema.Attribute representation +// of a schema. This is appropriate only for primitives or collections whose +// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections +// whose elem is a whole resource. +func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { + // The Schema.DefaultFunc capability adds some extra weirdness here since + // it can be combined with "Required: true" to create a sitution where + // required-ness is conditional. Terraform Core doesn't share this concept, + // so we must sniff for this possibility here and conditionally turn + // off the "Required" flag if it looks like the DefaultFunc is going + // to provide a value. + // This is not 100% true to the original interface of DefaultFunc but + // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc + // situations, which are the main cases we care about. + // + // Note that this also has a consequence for commands that return schema + // information for documentation purposes: running those for certain + // providers will produce different results depending on which environment + // variables are set. We accept that weirdness in order to keep this + // interface to core otherwise simple. + reqd := s.Required + opt := s.Optional + if reqd && s.DefaultFunc != nil { + v, err := s.DefaultFunc() + // We can't report errors from here, so we'll instead just force + // "Required" to false and let the provider try calling its + // DefaultFunc again during the validate step, where it can then + // return the error. + if err != nil || (err == nil && v != nil) { + reqd = false + opt = true + } + } + + return &configschema.Attribute{ + Type: s.coreConfigSchemaType(), + Optional: opt, + Required: reqd, + Computed: s.Computed, + Sensitive: s.Sensitive, + Description: s.Description, + } +} + +// coreConfigSchemaBlock prepares a configschema.NestedBlock representation of +// a schema. This is appropriate only for collections whose Elem is an instance +// of Resource, and will panic otherwise. +func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { + ret := &configschema.NestedBlock{} + if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil { + ret.Block = *nested + } + switch s.Type { + case TypeList: + ret.Nesting = configschema.NestingList + case TypeSet: + ret.Nesting = configschema.NestingSet + case TypeMap: + ret.Nesting = configschema.NestingMap + default: + // Should never happen for a valid schema + panic(fmt.Errorf("invalid s.Type %s for s.Elem being resource", s.Type)) + } + + ret.MinItems = s.MinItems + ret.MaxItems = s.MaxItems + + if s.Required && s.MinItems == 0 { + // configschema doesn't have a "required" representation for nested + // blocks, but we can fake it by requiring at least one item. + ret.MinItems = 1 + } + if s.Optional && s.MinItems > 0 { + // Historically helper/schema would ignore MinItems if Optional were + // set, so we must mimic this behavior here to ensure that providers + // relying on that undocumented behavior can continue to operate as + // they did before. + ret.MinItems = 0 + } + if s.Computed && !s.Optional { + // MinItems/MaxItems are meaningless for computed nested blocks, since + // they are never set by the user anyway. This ensures that we'll never + // generate weird errors about them. + ret.MinItems = 0 + ret.MaxItems = 0 + } + + return ret +} + +// coreConfigSchemaType determines the core config schema type that corresponds +// to a particular schema's type. +func (s *Schema) coreConfigSchemaType() cty.Type { + switch s.Type { + case TypeString: + return cty.String + case TypeBool: + return cty.Bool + case TypeInt, TypeFloat: + // configschema doesn't distinguish int and float, so helper/schema + // will deal with this as an additional validation step after + // configuration has been parsed and decoded. + return cty.Number + case TypeList, TypeSet, TypeMap: + var elemType cty.Type + switch set := s.Elem.(type) { + case *Schema: + elemType = set.coreConfigSchemaType() + case ValueType: + // This represents a mistake in the provider code, but it's a + // common one so we'll just shim it. + elemType = (&Schema{Type: set}).coreConfigSchemaType() + case *Resource: + // By default we construct a NestedBlock in this case, but this + // behavior is selected either for computed-only schemas or + // when ConfigMode is explicitly SchemaConfigModeBlock. + // See schemaMap.CoreConfigSchema for the exact rules. + elemType = set.coreConfigSchema().ImpliedType() + default: + if set != nil { + // Should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", s.Elem)) + } + // Some pre-existing schemas assume string as default, so we need + // to be compatible with them. + elemType = cty.String + } + switch s.Type { + case TypeList: + return cty.List(elemType) + case TypeSet: + return cty.Set(elemType) + case TypeMap: + return cty.Map(elemType) + default: + // can never get here in practice, due to the case we're inside + panic("invalid collection type") + } + default: + // should never happen for a valid schema + panic(fmt.Errorf("invalid Schema.Type %s", s.Type)) + } +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on +// the resource's schema. CoreConfigSchema adds the implicitly required "id" +// attribute for top level resources if it doesn't exist. +func (r *Resource) CoreConfigSchema() *configschema.Block { + block := r.coreConfigSchema() + + if block.Attributes == nil { + block.Attributes = map[string]*configschema.Attribute{} + } + + // Add the implicitly required "id" field if it doesn't exist + if block.Attributes["id"] == nil { + block.Attributes["id"] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + Computed: true, + } + } + + _, timeoutsAttr := block.Attributes[TimeoutsConfigKey] + _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey] + + // Insert configured timeout values into the schema, as long as the schema + // didn't define anything else by that name. + if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock { + timeouts := configschema.Block{ + Attributes: map[string]*configschema.Attribute{}, + } + + if r.Timeouts.Create != nil { + timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Read != nil { + timeouts.Attributes[TimeoutRead] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Update != nil { + timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Delete != nil { + timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + if r.Timeouts.Default != nil { + timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{ + Type: cty.String, + Optional: true, + } + } + + block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{ + Nesting: configschema.NestingSingle, + Block: timeouts, + } + } + + return block +} + +func (r *Resource) coreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} + +// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema +// on the backends's schema. +func (r *Backend) CoreConfigSchema() *configschema.Block { + return schemaMap(r.Schema).CoreConfigSchema() +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go index 5a03d2d8..8d93750a 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go @@ -32,7 +32,7 @@ func DataSourceResourceShim(name string, dataSource *Resource) *Resource { // FIXME: Link to some further docs either on the website or in the // changelog, once such a thing exists. - dataSource.deprecationMessage = fmt.Sprintf( + dataSource.DeprecationMessage = fmt.Sprintf( "using %s as a resource is deprecated; consider using the data source instead", name, ) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go index 1660a670..2a66a068 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go @@ -3,6 +3,7 @@ package schema import ( "fmt" "strconv" + "strings" ) // FieldReaders are responsible for decoding fields out of data into @@ -41,6 +42,13 @@ func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} { return s.ZeroValue() } +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through. +func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema { + parts := strings.Split(path, ".") + return addrToSchema(parts, schemaMap) +} + // addrToSchema finds the final element schema for the given address // and the given schema. It returns all the schemas that led to the final // schema. These are in order of the address (out to in). @@ -126,6 +134,8 @@ func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema { switch v := current.Elem.(type) { case ValueType: current = &Schema{Type: v} + case *Schema: + current, _ = current.Elem.(*Schema) default: // maps default to string values. This is all we can have // if this is nested in another list or map. @@ -249,11 +259,10 @@ func readObjectField( } // convert map values to the proper primitive type based on schema.Elem -func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error { - - elemType := TypeString - if et, ok := schema.Elem.(ValueType); ok { - elemType = et +func mapValuesToPrimitive(k string, m map[string]interface{}, schema *Schema) error { + elemType, err := getValueType(k, schema) + if err != nil { + return err } switch elemType { diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go index f958bbcb..6ad3f13c 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go @@ -2,6 +2,7 @@ package schema import ( "fmt" + "log" "strconv" "strings" "sync" @@ -93,6 +94,22 @@ func (r *ConfigFieldReader) readField( } } + if protoVersion5 { + switch schema.Type { + case TypeList, TypeSet, TypeMap, typeObject: + // Check if the value itself is unknown. + // The new protocol shims will add unknown values to this list of + // ComputedKeys. This is the only way we have to indicate that a + // collection is unknown in the config + for _, unknown := range r.Config.ComputedKeys { + if k == unknown { + log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k) + return FieldReadResult{Computed: true, Exists: true}, nil + } + } + } + } + switch schema.Type { case TypeBool, TypeFloat, TypeInt, TypeString: return r.readPrimitive(k, schema) @@ -202,11 +219,14 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, v, _ := r.Config.Get(key) result[ik] = v } + case nil: + // the map may have been empty on the configuration, so we leave the + // empty result default: panic(fmt.Sprintf("unknown type: %#v", mraw)) } - err := mapValuesToPrimitive(result, schema) + err := mapValuesToPrimitive(k, result, schema) if err != nil { return FieldReadResult{}, nil } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go index 16bbae29..3e70acf0 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go @@ -29,29 +29,59 @@ type DiffFieldReader struct { Diff *terraform.InstanceDiff Source FieldReader Schema map[string]*Schema + + // cache for memoizing ReadField calls. + cache map[string]cachedFieldReadResult +} + +type cachedFieldReadResult struct { + val FieldReadResult + err error } func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) { + if r.cache == nil { + r.cache = make(map[string]cachedFieldReadResult) + } + + // Create the cache key by joining around a value that isn't a valid part + // of an address. This assumes that the Source and Schema are not changed + // for the life of this DiffFieldReader. + cacheKey := strings.Join(address, "|") + if cached, ok := r.cache[cacheKey]; ok { + return cached.val, cached.err + } + schemaList := addrToSchema(address, r.Schema) if len(schemaList) == 0 { + r.cache[cacheKey] = cachedFieldReadResult{} return FieldReadResult{}, nil } + var res FieldReadResult + var err error + schema := schemaList[len(schemaList)-1] switch schema.Type { case TypeBool, TypeInt, TypeFloat, TypeString: - return r.readPrimitive(address, schema) + res, err = r.readPrimitive(address, schema) case TypeList: - return readListField(r, address, schema) + res, err = readListField(r, address, schema) case TypeMap: - return r.readMap(address, schema) + res, err = r.readMap(address, schema) case TypeSet: - return r.readSet(address, schema) + res, err = r.readSet(address, schema) case typeObject: - return readObjectField(r, address, schema.Elem.(map[string]*Schema)) + res, err = readObjectField(r, address, schema.Elem.(map[string]*Schema)) default: panic(fmt.Sprintf("Unknown type: %#v", schema.Type)) } + + r.cache[cacheKey] = cachedFieldReadResult{ + val: res, + err: err, + } + return res, err } func (r *DiffFieldReader) readMap( @@ -65,7 +95,9 @@ func (r *DiffFieldReader) readMap( return FieldReadResult{}, err } if source.Exists { - result = source.Value.(map[string]interface{}) + // readMap may return a nil value, or an unknown value placeholder in + // some cases, causing the type assertion to panic if we don't assign the ok value + result, _ = source.Value.(map[string]interface{}) resultSet = true } @@ -92,7 +124,8 @@ func (r *DiffFieldReader) readMap( result[k] = v.New } - err = mapValuesToPrimitive(result, schema) + key := address[len(address)-1] + err = mapValuesToPrimitive(key, result, schema) if err != nil { return FieldReadResult{}, nil } @@ -143,6 +176,9 @@ func (r *DiffFieldReader) readPrimitive( func (r *DiffFieldReader) readSet( address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + prefix := strings.Join(address, ".") + "." // Create the set that will be our result diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go index 95339810..53f73b71 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go @@ -61,7 +61,7 @@ func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, err return true }) - err := mapValuesToPrimitive(result, schema) + err := mapValuesToPrimitive(k, result, schema) if err != nil { return FieldReadResult{}, nil } @@ -98,6 +98,9 @@ func (r *MapFieldReader) readPrimitive( func (r *MapFieldReader) readSet( address []string, schema *Schema) (FieldReadResult, error) { + // copy address to ensure we don't modify the argument + address = append([]string(nil), address...) + // Get the number of elements in the list countRaw, err := r.readPrimitive( append(address, "#"), &Schema{Type: TypeInt}) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go index 689ed8d1..c09358b1 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go @@ -39,6 +39,19 @@ func (w *MapFieldWriter) unsafeWriteField(addr string, value string) { w.result[addr] = value } +// clearTree clears a field and any sub-fields of the given address out of the +// map. This should be used to reset some kind of complex structures (namely +// sets) before writing to make sure that any conflicting data is removed (for +// example, if the set was previously written to the writer's layer). +func (w *MapFieldWriter) clearTree(addr []string) { + prefix := strings.Join(addr, ".") + "." + for k := range w.result { + if strings.HasPrefix(k, prefix) { + delete(w.result, k) + } + } +} + func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error { w.lock.Lock() defer w.lock.Unlock() @@ -115,6 +128,14 @@ func (w *MapFieldWriter) setList( return fmt.Errorf("%s: %s", k, err) } + // Wipe the set from the current writer prior to writing if it exists. + // Multiple writes to the same layer is a lot safer for lists than sets due + // to the fact that indexes are always deterministic and the length will + // always be updated with the current length on the last write, but making + // sure we have a clean namespace removes any chance for edge cases to pop up + // and ensures that the last write to the set is the correct value. + w.clearTree(addr) + // Set the entire list. var err error for i, elem := range vs { @@ -162,6 +183,10 @@ func (w *MapFieldWriter) setMap( vs[mk.String()] = mv.Interface() } + // Wipe this address tree. The contents of the map should always reflect the + // last write made to it. + w.clearTree(addr) + // Remove the pure key since we're setting the full map value delete(w.result, k) @@ -272,13 +297,14 @@ func (w *MapFieldWriter) setSet( // we get the proper order back based on the hash code. if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { // Build a temp *ResourceData to use for the conversion + tempAddr := addr[len(addr)-1:] tempSchema := *schema tempSchema.Type = TypeList - tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema} + tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema} tempW := &MapFieldWriter{Schema: tempSchemaMap} // Set the entire list, this lets us get sane values out of it - if err := tempW.WriteField(addr, value); err != nil { + if err := tempW.WriteField(tempAddr, value); err != nil { return err } @@ -294,7 +320,7 @@ func (w *MapFieldWriter) setSet( } for i := 0; i < v.Len(); i++ { is := strconv.FormatInt(int64(i), 10) - result, err := tempR.ReadField(append(addrCopy, is)) + result, err := tempR.ReadField(append(tempAddr, is)) if err != nil { return err } @@ -308,6 +334,18 @@ func (w *MapFieldWriter) setSet( value = s } + // Clear any keys that match the set address first. This is necessary because + // it's always possible and sometimes may be necessary to write to a certain + // writer layer more than once with different set data each time, which will + // lead to different keys being inserted, which can lead to determinism + // problems when the old data isn't wiped first. + w.clearTree(addr) + + if value.(*Set) == nil { + w.result[k+".#"] = "0" + return nil + } + for code, elem := range value.(*Set).m { if err := w.set(append(addrCopy, code), elem); err != nil { return err diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go index 3a976293..0184d7b0 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go @@ -2,7 +2,19 @@ package schema -import "fmt" +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[getSourceState-1] + _ = x[getSourceConfig-2] + _ = x[getSourceDiff-4] + _ = x[getSourceSet-8] + _ = x[getSourceExact-16] + _ = x[getSourceLevelMask-15] +} const ( _getSource_name_0 = "getSourceStategetSourceConfig" @@ -13,8 +25,6 @@ const ( var ( _getSource_index_0 = [...]uint8{0, 14, 29} - _getSource_index_1 = [...]uint8{0, 13} - _getSource_index_2 = [...]uint8{0, 12} _getSource_index_3 = [...]uint8{0, 18, 32} ) @@ -31,6 +41,6 @@ func (i getSource) String() string { i -= 15 return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] default: - return fmt.Sprintf("getSource(%d)", i) + return "getSource(" + strconv.FormatInt(int64(i), 10) + ")" } } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go index d52d2f5f..97024479 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go @@ -8,6 +8,8 @@ import ( "sync" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" ) @@ -57,11 +59,13 @@ type Provider struct { meta interface{} - // a mutex is required because TestReset can directly repalce the stopCtx + // a mutex is required because TestReset can directly replace the stopCtx stopMu sync.Mutex stopCtx context.Context stopCtxCancel context.CancelFunc stopOnce sync.Once + + TerraformVersion string } // ConfigureFunc is the function used to configure a Provider. @@ -89,6 +93,13 @@ func (p *Provider) InternalValidate() error { validationErrors = multierror.Append(validationErrors, err) } + // Provider-specific checks + for k, _ := range sm { + if isReservedProviderFieldName(k) { + return fmt.Errorf("%s is a reserved field name for a provider", k) + } + } + for k, r := range p.ResourcesMap { if err := r.InternalValidate(nil, true); err != nil { validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err)) @@ -104,6 +115,15 @@ func (p *Provider) InternalValidate() error { return validationErrors } +func isReservedProviderFieldName(name string) bool { + for _, reservedName := range config.ReservedProviderFields { + if name == reservedName { + return true + } + } + return false +} + // Meta returns the metadata associated with this provider that was // returned by the Configure call. It will be nil until Configure is called. func (p *Provider) Meta() interface{} { @@ -168,6 +188,29 @@ func (p *Provider) TestReset() error { return nil } +// GetSchema implementation of terraform.ResourceProvider interface +func (p *Provider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + resourceTypes := map[string]*configschema.Block{} + dataSources := map[string]*configschema.Block{} + + for _, name := range req.ResourceTypes { + if r, exists := p.ResourcesMap[name]; exists { + resourceTypes[name] = r.CoreConfigSchema() + } + } + for _, name := range req.DataSources { + if r, exists := p.DataSourcesMap[name]; exists { + dataSources[name] = r.CoreConfigSchema() + } + } + + return &terraform.ProviderSchema{ + Provider: schemaMap(p.Schema).CoreConfigSchema(), + ResourceTypes: resourceTypes, + DataSources: dataSources, + }, nil +} + // Input implementation of terraform.ResourceProvider interface. func (p *Provider) Input( input terraform.UIInput, @@ -210,7 +253,7 @@ func (p *Provider) Configure(c *terraform.ResourceConfig) error { // Get a ResourceData for this configuration. To do this, we actually // generate an intermediary "diff" although that is never exposed. - diff, err := sm.Diff(nil, c) + diff, err := sm.Diff(nil, c, nil, p.meta, true) if err != nil { return err } @@ -252,7 +295,21 @@ func (p *Provider) Diff( return nil, fmt.Errorf("unknown resource type: %s", info.Type) } - return r.Diff(s, c) + return r.Diff(s, c, p.meta) +} + +// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't +// attempt to calculate ignore_changes. +func (p *Provider) SimpleDiff( + info *terraform.InstanceInfo, + s *terraform.InstanceState, + c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + r, ok := p.ResourcesMap[info.Type] + if !ok { + return nil, fmt.Errorf("unknown resource type: %s", info.Type) + } + + return r.simpleDiff(s, c, p.meta) } // Refresh implementation of terraform.ResourceProvider interface. @@ -270,7 +327,7 @@ func (p *Provider) Refresh( // Resources implementation of terraform.ResourceProvider interface. func (p *Provider) Resources() []terraform.ResourceType { keys := make([]string, 0, len(p.ResourcesMap)) - for k, _ := range p.ResourcesMap { + for k := range p.ResourcesMap { keys = append(keys, k) } sort.Strings(keys) @@ -288,6 +345,10 @@ func (p *Provider) Resources() []terraform.ResourceType { result = append(result, terraform.ResourceType{ Name: k, Importable: resource.Importer != nil, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, }) } @@ -365,7 +426,7 @@ func (p *Provider) ReadDataDiff( return nil, fmt.Errorf("unknown data source: %s", info.Type) } - return r.Diff(nil, c) + return r.Diff(nil, c, p.meta) } // RefreshData implementation of terraform.ResourceProvider interface. @@ -393,6 +454,10 @@ func (p *Provider) DataSources() []terraform.DataSource { for _, k := range keys { result = append(result, terraform.DataSource{ Name: k, + + // Indicates that a provider is compiled against a new enough + // version of core to support the GetSchema method. + SchemaAvailable: true, }) } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go index 6ac3fc1b..637e221e 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" ) @@ -41,30 +42,34 @@ type Provisioner struct { // information. ApplyFunc func(ctx context.Context) error + // ValidateFunc is a function for extended validation. This is optional + // and should be used when individual field validation is not enough. + ValidateFunc func(*terraform.ResourceConfig) ([]string, []error) + stopCtx context.Context stopCtxCancel context.CancelFunc stopOnce sync.Once } -// These constants are the keys that can be used to access data in -// the context parameters for Provisioners. -const ( - connDataInvalid int = iota +// Keys that can be used to access data in the context parameters for +// Provisioners. +var ( + connDataInvalid = contextKey("data invalid") // This returns a *ResourceData for the connection information. // Guaranteed to never be nil. - ProvConnDataKey + ProvConnDataKey = contextKey("provider conn data") // This returns a *ResourceData for the config information. // Guaranteed to never be nil. - ProvConfigDataKey + ProvConfigDataKey = contextKey("provider config data") // This returns a terraform.UIOutput. Guaranteed to never be nil. - ProvOutputKey + ProvOutputKey = contextKey("provider output") // This returns the raw InstanceState passed to Apply. Guaranteed to // be set, but may be nil. - ProvRawStateKey + ProvRawStateKey = contextKey("provider raw state") ) // InternalValidate should be called to validate the structure @@ -117,8 +122,9 @@ func (p *Provisioner) Stop() error { return nil } -func (p *Provisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { - return schemaMap(p.Schema).Validate(c) +// GetConfigSchema implementation of terraform.ResourceProvisioner interface. +func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) { + return schemaMap(p.Schema).CoreConfigSchema(), nil } // Apply implementation of terraform.ResourceProvisioner interface. @@ -146,7 +152,7 @@ func (p *Provisioner) Apply( } sm := schemaMap(p.ConnSchema) - diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) + diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil, true) if err != nil { return err } @@ -160,7 +166,7 @@ func (p *Provisioner) Apply( // Build the configuration data. Doing this requires making a "diff" // even though that's never used. We use that just to get the correct types. configMap := schemaMap(p.Schema) - diff, err := configMap.Diff(nil, c) + diff, err := configMap.Diff(nil, c, nil, nil, true) if err != nil { return err } @@ -178,3 +184,27 @@ func (p *Provisioner) Apply( ctx = context.WithValue(ctx, ProvRawStateKey, s) return p.ApplyFunc(ctx) } + +// Validate implements the terraform.ResourceProvisioner interface. +func (p *Provisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) { + if err := p.InternalValidate(); err != nil { + return nil, []error{fmt.Errorf( + "Internal validation of the provisioner failed! This is always a bug\n"+ + "with the provisioner itself, and not a user issue. Please report\n"+ + "this bug:\n\n%s", err)} + } + + if p.Schema != nil { + w, e := schemaMap(p.Schema).Validate(c) + ws = append(ws, w...) + es = append(es, e...) + } + + if p.ValidateFunc != nil { + w, e := p.ValidateFunc(c) + ws = append(ws, w...) + es = append(es, e...) + } + + return ws, es +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go index c8105588..bcfe5666 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go @@ -6,7 +6,9 @@ import ( "log" "strconv" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" + "github.com/zclconf/go-cty/cty" ) // Resource represents a thing in Terraform that has a set of configurable @@ -43,6 +45,12 @@ type Resource struct { // their Versioning at any integer >= 1 SchemaVersion int + // MigrateState is deprecated and any new changes to a resource's schema + // should be handled by StateUpgraders. Existing MigrateState implementations + // should remain for compatibility with existing state. MigrateState will + // still be called if the stored SchemaVersion is less than the + // first version of the StateUpgraders. + // // MigrateState is responsible for updating an InstanceState with an old // version to the format expected by the current version of the Schema. // @@ -55,6 +63,18 @@ type Resource struct { // needs to make any remote API calls. MigrateState StateMigrateFunc + // StateUpgraders contains the functions responsible for upgrading an + // existing state with an old schema version to a newer schema. It is + // called specifically by Terraform when the stored schema version is less + // than the current SchemaVersion of the Resource. + // + // StateUpgraders map specific schema versions to a StateUpgrader + // function. The registered versions are expected to be ordered, + // consecutive values. The initial value may be greater than 0 to account + // for legacy schemas that weren't recorded and can be handled by + // MigrateState. + StateUpgraders []StateUpgrader + // The functions below are the CRUD operations for this resource. // // The only optional operation is Update. If Update is not implemented, @@ -84,6 +104,37 @@ type Resource struct { Delete DeleteFunc Exists ExistsFunc + // CustomizeDiff is a custom function for working with the diff that + // Terraform has created for this resource - it can be used to customize the + // diff that has been created, diff values not controlled by configuration, + // or even veto the diff altogether and abort the plan. It is passed a + // *ResourceDiff, a structure similar to ResourceData but lacking most write + // functions like Set, while introducing new functions that work with the + // diff such as SetNew, SetNewComputed, and ForceNew. + // + // The phases Terraform runs this in, and the state available via functions + // like Get and GetChange, are as follows: + // + // * New resource: One run with no state + // * Existing resource: One run with state + // * Existing resource, forced new: One run with state (before ForceNew), + // then one run without state (as if new resource) + // * Tainted resource: No runs (custom diff logic is skipped) + // * Destroy: No runs (standard diff logic is skipped on destroy diffs) + // + // This function needs to be resilient to support all scenarios. + // + // If this function needs to access external API resources, remember to flag + // the RequiresRefresh attribute mentioned below to ensure that + // -refresh=false is blocked when running plan or apply, as this means that + // this resource requires refresh-like behaviour to work effectively. + // + // For the most part, only computed fields can be customized by this + // function. + // + // This function is only allowed on regular resources (not data sources). + CustomizeDiff CustomizeDiffFunc + // Importer is the ResourceImporter implementation for this resource. // If this is nil, then this resource does not support importing. If // this is non-nil, then it supports importing and ResourceImporter @@ -92,9 +143,7 @@ type Resource struct { Importer *ResourceImporter // If non-empty, this string is emitted as a warning during Validate. - // This is a private interface for now, for use by DataSourceResourceShim, - // and not for general use. (But maybe later...) - deprecationMessage string + DeprecationMessage string // Timeouts allow users to specify specific time durations in which an // operation should time out, to allow them to extend an action to suit their @@ -106,6 +155,27 @@ type Resource struct { Timeouts *ResourceTimeout } +// ShimInstanceStateFromValue converts a cty.Value to a +// terraform.InstanceState. +func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) { + // Get the raw shimmed value. While this is correct, the set hashes don't + // match those from the Schema. + s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion) + + // We now rebuild the state through the ResourceData, so that the set indexes + // match what helper/schema expects. + data, err := schemaMap(r.Schema).Data(s, nil) + if err != nil { + return nil, err + } + + s = data.State() + if s == nil { + s = &terraform.InstanceState{} + } + return s, nil +} + // See Resource documentation. type CreateFunc func(*ResourceData, interface{}) error @@ -125,6 +195,30 @@ type ExistsFunc func(*ResourceData, interface{}) (bool, error) type StateMigrateFunc func( int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) +type StateUpgrader struct { + // Version is the version schema that this Upgrader will handle, converting + // it to Version+1. + Version int + + // Type describes the schema that this function can upgrade. Type is + // required to decode the schema if the state was stored in a legacy + // flatmap format. + Type cty.Type + + // Upgrade takes the JSON encoded state and the provider meta value, and + // upgrades the state one single schema version. The provided state is + // deocded into the default json types using a map[string]interface{}. It + // is up to the StateUpgradeFunc to ensure that the returned value can be + // encoded using the new schema. + Upgrade StateUpgradeFunc +} + +// See StateUpgrader +type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) + +// See Resource documentation. +type CustomizeDiffFunc func(*ResourceDiff, interface{}) error + // Apply creates, updates, and/or deletes a resource. func (r *Resource) Apply( s *terraform.InstanceState, @@ -142,6 +236,12 @@ func (r *Resource) Apply( if err := rt.DiffDecode(d); err != nil { log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) } + } else if s != nil { + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } } else { log.Printf("[DEBUG] No meta timeoutkey found in Apply()") } @@ -195,11 +295,11 @@ func (r *Resource) Apply( return r.recordCurrentSchemaVersion(data.State()), err } -// Diff returns a diff of this resource and is API compatible with the -// ResourceProvider interface. +// Diff returns a diff of this resource. func (r *Resource) Diff( s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { t := &ResourceTimeout{} err := t.ConfigDecode(r, c) @@ -208,7 +308,7 @@ func (r *Resource) Diff( return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) } - instanceDiff, err := schemaMap(r.Schema).Diff(s, c) + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true) if err != nil { return instanceDiff, err } @@ -224,12 +324,40 @@ func (r *Resource) Diff( return instanceDiff, err } +func (r *Resource) simpleDiff( + s *terraform.InstanceState, + c *terraform.ResourceConfig, + meta interface{}) (*terraform.InstanceDiff, error) { + + instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false) + if err != nil { + return instanceDiff, err + } + + if instanceDiff == nil { + instanceDiff = terraform.NewInstanceDiff() + } + + // Make sure the old value is set in each of the instance diffs. + // This was done by the RequiresNew logic in the full legacy Diff. + for k, attr := range instanceDiff.Attributes { + if attr == nil { + continue + } + if s != nil { + attr.Old = s.Attributes[k] + } + } + + return instanceDiff, nil +} + // Validate validates the resource configuration against the schema. func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { warns, errs := schemaMap(r.Schema).Validate(c) - if r.deprecationMessage != "" { - warns = append(warns, r.deprecationMessage) + if r.DeprecationMessage != "" { + warns = append(warns, r.DeprecationMessage) } return warns, errs @@ -241,7 +369,6 @@ func (r *Resource) ReadDataApply( d *terraform.InstanceDiff, meta interface{}, ) (*terraform.InstanceState, error) { - // Data sources are always built completely from scratch // on each read, so the source state is always nil. data, err := schemaMap(r.Schema).Data(nil, d) @@ -262,8 +389,11 @@ func (r *Resource) ReadDataApply( return r.recordCurrentSchemaVersion(state), err } -// Refresh refreshes the state of the resource. -func (r *Resource) Refresh( +// RefreshWithoutUpgrade reads the instance state, but does not call +// MigrateState or the StateUpgraders, since those are now invoked in a +// separate API call. +// RefreshWithoutUpgrade is part of the new plugin shims. +func (r *Resource) RefreshWithoutUpgrade( s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { // If the ID is already somehow blank, it doesn't exist @@ -297,12 +427,60 @@ func (r *Resource) Refresh( } } - needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) - if needsMigration && r.MigrateState != nil { - s, err := r.MigrateState(stateSchemaVersion, s, meta) + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + if err != nil { + return s, err + } + + err = r.Read(data, meta) + state := data.State() + if state != nil && state.ID == "" { + state = nil + } + + return r.recordCurrentSchemaVersion(state), err +} + +// Refresh refreshes the state of the resource. +func (r *Resource) Refresh( + s *terraform.InstanceState, + meta interface{}) (*terraform.InstanceState, error) { + // If the ID is already somehow blank, it doesn't exist + if s.ID == "" { + return nil, nil + } + + rt := ResourceTimeout{} + if _, ok := s.Meta[TimeoutKey]; ok { + if err := rt.StateDecode(s); err != nil { + log.Printf("[ERR] Error decoding ResourceTimeout: %s", err) + } + } + + if r.Exists != nil { + // Make a copy of data so that if it is modified it doesn't + // affect our Read later. + data, err := schemaMap(r.Schema).Data(s, nil) + data.timeouts = &rt + + if err != nil { + return s, err + } + + exists, err := r.Exists(data, meta) if err != nil { return s, err } + if !exists { + return nil, nil + } + } + + // there may be new StateUpgraders that need to be run + s, err := r.upgradeState(s, meta) + if err != nil { + return s, err } data, err := schemaMap(r.Schema).Data(s, nil) @@ -320,6 +498,71 @@ func (r *Resource) Refresh( return r.recordCurrentSchemaVersion(state), err } +func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + var err error + + needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) + migrate := needsMigration && r.MigrateState != nil + + if migrate { + s, err = r.MigrateState(stateSchemaVersion, s, meta) + if err != nil { + return s, err + } + } + + if len(r.StateUpgraders) == 0 { + return s, nil + } + + // If we ran MigrateState, then the stateSchemaVersion value is no longer + // correct. We can expect the first upgrade function to be the correct + // schema type version. + if migrate { + stateSchemaVersion = r.StateUpgraders[0].Version + } + + schemaType := r.CoreConfigSchema().ImpliedType() + // find the expected type to convert the state + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion == upgrader.Version { + schemaType = upgrader.Type + } + } + + // StateUpgraders only operate on the new JSON format state, so the state + // need to be converted. + stateVal, err := StateValueFromInstanceState(s, schemaType) + if err != nil { + return nil, err + } + + jsonState, err := StateValueToJSONMap(stateVal, schemaType) + if err != nil { + return nil, err + } + + for _, upgrader := range r.StateUpgraders { + if stateSchemaVersion != upgrader.Version { + continue + } + + jsonState, err = upgrader.Upgrade(jsonState, meta) + if err != nil { + return nil, err + } + stateSchemaVersion++ + } + + // now we need to re-flatmap the new state + stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema()) + if err != nil { + return nil, err + } + + return r.ShimInstanceStateFromValue(stateVal) +} + // InternalValidate should be called to validate the structure // of the resource. // @@ -339,6 +582,11 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error if r.Create != nil || r.Update != nil || r.Delete != nil { return fmt.Errorf("must not implement Create, Update or Delete") } + + // CustomizeDiff cannot be defined for read-only resources + if r.CustomizeDiff != nil { + return fmt.Errorf("cannot implement CustomizeDiff") + } } tsm := topSchemaMap @@ -386,11 +634,76 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error return err } } + + for k, f := range tsm { + if isReservedResourceFieldName(k, f) { + return fmt.Errorf("%s is a reserved field name", k) + } + } + } + + lastVersion := -1 + for _, u := range r.StateUpgraders { + if lastVersion >= 0 && u.Version-lastVersion > 1 { + return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version) + } + + if u.Version >= r.SchemaVersion { + return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion) + } + + if !u.Type.IsObjectType() { + return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version) + } + + if u.Upgrade == nil { + return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version) + } + + lastVersion = u.Version + } + + if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 { + return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion) + } + + // Data source + if r.isTopLevel() && !writable { + tsm = schemaMap(r.Schema) + for k, _ := range tsm { + if isReservedDataSourceFieldName(k) { + return fmt.Errorf("%s is a reserved field name", k) + } + } } return schemaMap(r.Schema).InternalValidate(tsm) } +func isReservedDataSourceFieldName(name string) bool { + for _, reservedName := range config.ReservedDataSourceFields { + if name == reservedName { + return true + } + } + return false +} + +func isReservedResourceFieldName(name string, s *Schema) bool { + // Allow phasing out "id" + // See https://github.com/terraform-providers/terraform-provider-aws/pull/1626#issuecomment-328881415 + if name == "id" && (s.Deprecated != "" || s.Removed != "") { + return false + } + + for _, reservedName := range config.ReservedResourceFields { + if name == reservedName { + return true + } + } + return false +} + // Data returns a ResourceData struct for this Resource. Each return value // is a separate copy and can be safely modified differently. // @@ -407,6 +720,12 @@ func (r *Resource) Data(s *terraform.InstanceState) *ResourceData { panic(err) } + // load the Resource timeouts + result.timeouts = r.Timeouts + if result.timeouts == nil { + result.timeouts = &ResourceTimeout{} + } + // Set the schema version to latest by default result.meta = map[string]interface{}{ "schema_version": strconv.Itoa(r.SchemaVersion), @@ -424,10 +743,17 @@ func (r *Resource) TestResourceData() *ResourceData { } } +// SchemasForFlatmapPath tries its best to find a sequence of schemas that +// the given dot-delimited attribute path traverses through in the schema +// of the receiving Resource. +func (r *Resource) SchemasForFlatmapPath(path string) []*Schema { + return SchemasForFlatmapPath(path, r.Schema) +} + // Returns true if the resource is "top level" i.e. not a sub-resource. func (r *Resource) isTopLevel() bool { // TODO: This is a heuristic; replace with a definitive attribute? - return r.Create != nil + return (r.Create != nil || r.Read != nil) } // Determines if a given InstanceState needs to be migrated by checking the @@ -449,7 +775,15 @@ func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) { } stateSchemaVersion, _ := strconv.Atoi(rawString) - return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion + + // Don't run MigrateState if the version is handled by a StateUpgrader, + // since StateMigrateFuncs are not required to handle unknown versions + maxVersion := r.SchemaVersion + if len(r.StateUpgraders) > 0 { + maxVersion = r.StateUpgraders[0].Version + } + + return stateSchemaVersion < maxVersion, stateSchemaVersion } func (r *Resource) recordCurrentSchemaVersion( diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go index b2bc8f6c..1c390709 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go @@ -35,6 +35,8 @@ type ResourceData struct { partialMap map[string]struct{} once sync.Once isNew bool + + panicOnError bool } // getResult is the internal structure that is generated when a Get @@ -50,6 +52,8 @@ type getResult struct { // UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary // values, bypassing schema. This MUST NOT be used in normal circumstances - // it exists only to support the remote_state data source. +// +// Deprecated: Fully define schema attributes and use Set() instead. func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { d.once.Do(d.init) @@ -104,6 +108,22 @@ func (d *ResourceData) GetOk(key string) (interface{}, bool) { return r.Value, exists } +// GetOkExists returns the data for a given key and whether or not the key +// has been set to a non-zero value. This is only useful for determining +// if boolean attributes have been set, if they are Optional but do not +// have a Default value. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +// This should only be used if absolutely required/needed. +func (d *ResourceData) GetOkExists(key string) (interface{}, bool) { + r := d.getRaw(key, getSourceSet) + exists := r.Exists && !r.Computed + return r.Value, exists +} + func (d *ResourceData) getRaw(key string, level getSource) getResult { var parts []string if key != "" { @@ -168,7 +188,11 @@ func (d *ResourceData) Set(key string, value interface{}) error { } } - return d.setWriter.WriteField(strings.Split(key, "."), value) + err := d.setWriter.WriteField(strings.Split(key, "."), value) + if err != nil && d.panicOnError { + panic(err) + } + return err } // SetPartial adds the key to the final state output while @@ -197,10 +221,16 @@ func (d *ResourceData) Id() string { if d.state != nil { result = d.state.ID + if result == "" { + result = d.state.Attributes["id"] + } } if d.newState != nil { result = d.newState.ID + if result == "" { + result = d.newState.Attributes["id"] + } } return result @@ -224,6 +254,18 @@ func (d *ResourceData) ConnInfo() map[string]string { func (d *ResourceData) SetId(v string) { d.once.Do(d.init) d.newState.ID = v + + // once we transition away from the legacy state types, "id" will no longer + // be a special field, and will become a normal attribute. + // set the attribute normally + d.setWriter.unsafeWriteField("id", v) + + // Make sure the newState is also set, otherwise the old value + // may get precedence. + if d.newState.Attributes == nil { + d.newState.Attributes = map[string]string{} + } + d.newState.Attributes["id"] = v } // SetConnInfo sets the connection info for a resource. @@ -293,6 +335,7 @@ func (d *ResourceData) State() *terraform.InstanceState { mapW := &MapFieldWriter{Schema: d.schema} if err := mapW.WriteField(nil, rawMap); err != nil { + log.Printf("[ERR] Error writing fields: %s", err) return nil } @@ -344,6 +387,13 @@ func (d *ResourceData) State() *terraform.InstanceState { func (d *ResourceData) Timeout(key string) time.Duration { key = strings.ToLower(key) + // System default of 20 minutes + defaultTimeout := 20 * time.Minute + + if d.timeouts == nil { + return defaultTimeout + } + var timeout *time.Duration switch key { case TimeoutCreate: @@ -364,8 +414,7 @@ func (d *ResourceData) Timeout(key string) time.Duration { return *d.timeouts.Default } - // Return system default of 20 minutes - return 20 * time.Minute + return defaultTimeout } func (d *ResourceData) init() { @@ -423,7 +472,7 @@ func (d *ResourceData) init() { } func (d *ResourceData) diffChange( - k string) (interface{}, interface{}, bool, bool) { + k string) (interface{}, interface{}, bool, bool, bool) { // Get the change between the state and the config. o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) if !o.Exists { @@ -434,7 +483,7 @@ func (d *ResourceData) diffChange( } // Return the old, new, and whether there is a change - return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false } func (d *ResourceData) getChange( diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go new file mode 100644 index 00000000..47b54810 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go @@ -0,0 +1,559 @@ +package schema + +import ( + "errors" + "fmt" + "reflect" + "strings" + "sync" + + "github.com/hashicorp/terraform/terraform" +) + +// newValueWriter is a minor re-implementation of MapFieldWriter to include +// keys that should be marked as computed, to represent the new part of a +// pseudo-diff. +type newValueWriter struct { + *MapFieldWriter + + // A list of keys that should be marked as computed. + computedKeys map[string]bool + + // A lock to prevent races on writes. The underlying writer will have one as + // well - this is for computed keys. + lock sync.Mutex + + // To be used with init. + once sync.Once +} + +// init performs any initialization tasks for the newValueWriter. +func (w *newValueWriter) init() { + if w.computedKeys == nil { + w.computedKeys = make(map[string]bool) + } +} + +// WriteField overrides MapValueWriter's WriteField, adding the ability to flag +// the address as computed. +func (w *newValueWriter) WriteField(address []string, value interface{}, computed bool) error { + // Fail the write if we have a non-nil value and computed is true. + // NewComputed values should not have a value when written. + if value != nil && computed { + return errors.New("Non-nil value with computed set") + } + + if err := w.MapFieldWriter.WriteField(address, value); err != nil { + return err + } + + w.once.Do(w.init) + + w.lock.Lock() + defer w.lock.Unlock() + if computed { + w.computedKeys[strings.Join(address, ".")] = true + } + return nil +} + +// ComputedKeysMap returns the underlying computed keys map. +func (w *newValueWriter) ComputedKeysMap() map[string]bool { + w.once.Do(w.init) + return w.computedKeys +} + +// newValueReader is a minor re-implementation of MapFieldReader and is the +// read counterpart to MapValueWriter, allowing the read of keys flagged as +// computed to accommodate the diff override logic in ResourceDiff. +type newValueReader struct { + *MapFieldReader + + // The list of computed keys from a newValueWriter. + computedKeys map[string]bool +} + +// ReadField reads the values from the underlying writer, returning the +// computed value if it is found as well. +func (r *newValueReader) ReadField(address []string) (FieldReadResult, error) { + addrKey := strings.Join(address, ".") + v, err := r.MapFieldReader.ReadField(address) + if err != nil { + return FieldReadResult{}, err + } + for computedKey := range r.computedKeys { + if childAddrOf(addrKey, computedKey) { + if strings.HasSuffix(addrKey, ".#") { + // This is a count value for a list or set that has been marked as + // computed, or a sub-list/sub-set of a complex resource that has + // been marked as computed. We need to pass through to other readers + // so that an accurate previous count can be fetched for the diff. + v.Exists = false + } + v.Computed = true + } + } + + return v, nil +} + +// ResourceDiff is used to query and make custom changes to an in-flight diff. +// It can be used to veto particular changes in the diff, customize the diff +// that has been created, or diff values not controlled by config. +// +// The object functions similar to ResourceData, however most notably lacks +// Set, SetPartial, and Partial, as it should be used to change diff values +// only. Most other first-class ResourceData functions exist, namely Get, +// GetOk, HasChange, and GetChange exist. +// +// All functions in ResourceDiff, save for ForceNew, can only be used on +// computed fields. +type ResourceDiff struct { + // The schema for the resource being worked on. + schema map[string]*Schema + + // The current config for this resource. + config *terraform.ResourceConfig + + // The state for this resource as it exists post-refresh, after the initial + // diff. + state *terraform.InstanceState + + // The diff created by Terraform. This diff is used, along with state, + // config, and custom-set diff data, to provide a multi-level reader + // experience similar to ResourceData. + diff *terraform.InstanceDiff + + // The internal reader structure that contains the state, config, the default + // diff, and the new diff. + multiReader *MultiLevelFieldReader + + // A writer that writes overridden new fields. + newWriter *newValueWriter + + // Tracks which keys have been updated by ResourceDiff to ensure that the + // diff does not get re-run on keys that were not touched, or diffs that were + // just removed (re-running on the latter would just roll back the removal). + updatedKeys map[string]bool + + // Tracks which keys were flagged as forceNew. These keys are not saved in + // newWriter, but we need to track them so that they can be re-diffed later. + forcedNewKeys map[string]bool +} + +// newResourceDiff creates a new ResourceDiff instance. +func newResourceDiff(schema map[string]*Schema, config *terraform.ResourceConfig, state *terraform.InstanceState, diff *terraform.InstanceDiff) *ResourceDiff { + d := &ResourceDiff{ + config: config, + state: state, + diff: diff, + schema: schema, + } + + d.newWriter = &newValueWriter{ + MapFieldWriter: &MapFieldWriter{Schema: d.schema}, + } + readers := make(map[string]FieldReader) + var stateAttributes map[string]string + if d.state != nil { + stateAttributes = d.state.Attributes + readers["state"] = &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(stateAttributes), + } + } + if d.config != nil { + readers["config"] = &ConfigFieldReader{ + Schema: d.schema, + Config: d.config, + } + } + if d.diff != nil { + readers["diff"] = &DiffFieldReader{ + Schema: d.schema, + Diff: d.diff, + Source: &MultiLevelFieldReader{ + Levels: []string{"state", "config"}, + Readers: readers, + }, + } + } + readers["newDiff"] = &newValueReader{ + MapFieldReader: &MapFieldReader{ + Schema: d.schema, + Map: BasicMapReader(d.newWriter.Map()), + }, + computedKeys: d.newWriter.ComputedKeysMap(), + } + d.multiReader = &MultiLevelFieldReader{ + Levels: []string{ + "state", + "config", + "diff", + "newDiff", + }, + + Readers: readers, + } + + d.updatedKeys = make(map[string]bool) + d.forcedNewKeys = make(map[string]bool) + + return d +} + +// UpdatedKeys returns the keys that were updated by this ResourceDiff run. +// These are the only keys that a diff should be re-calculated for. +// +// This is the combined result of both keys for which diff values were updated +// for or cleared, and also keys that were flagged to be re-diffed as a result +// of ForceNew. +func (d *ResourceDiff) UpdatedKeys() []string { + var s []string + for k := range d.updatedKeys { + s = append(s, k) + } + for k := range d.forcedNewKeys { + for _, l := range s { + if k == l { + break + } + } + s = append(s, k) + } + return s +} + +// Clear wipes the diff for a particular key. It is called by ResourceDiff's +// functionality to remove any possibility of conflicts, but can be called on +// its own to just remove a specific key from the diff completely. +// +// Note that this does not wipe an override. This function is only allowed on +// computed keys. +func (d *ResourceDiff) Clear(key string) error { + if err := d.checkKey(key, "Clear", true); err != nil { + return err + } + + return d.clear(key) +} + +func (d *ResourceDiff) clear(key string) error { + // Check the schema to make sure that this key exists first. + schemaL := addrToSchema(strings.Split(key, "."), d.schema) + if len(schemaL) == 0 { + return fmt.Errorf("%s is not a valid key", key) + } + + for k := range d.diff.Attributes { + if strings.HasPrefix(k, key) { + delete(d.diff.Attributes, k) + } + } + return nil +} + +// GetChangedKeysPrefix helps to implement Resource.CustomizeDiff +// where we need to act on all nested fields +// without calling out each one separately +func (d *ResourceDiff) GetChangedKeysPrefix(prefix string) []string { + keys := make([]string, 0) + for k := range d.diff.Attributes { + if strings.HasPrefix(k, prefix) { + keys = append(keys, k) + } + } + return keys +} + +// diffChange helps to implement resourceDiffer and derives its change values +// from ResourceDiff's own change data, in addition to existing diff, config, and state. +func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { + old, new, customized := d.getChange(key) + + if !old.Exists { + old.Value = nil + } + if !new.Exists || d.removed(key) { + new.Value = nil + } + + return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized +} + +// SetNew is used to set a new diff value for the mentioned key. The value must +// be correct for the attribute's schema (mostly relevant for maps, lists, and +// sets). The original value from the state is used as the old value. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNew(key string, value interface{}) error { + if err := d.checkKey(key, "SetNew", false); err != nil { + return err + } + + return d.setDiff(key, value, false) +} + +// SetNewComputed functions like SetNew, except that it blanks out a new value +// and marks it as computed. +// +// This function is only allowed on computed attributes. +func (d *ResourceDiff) SetNewComputed(key string) error { + if err := d.checkKey(key, "SetNewComputed", false); err != nil { + return err + } + + return d.setDiff(key, nil, true) +} + +// setDiff performs common diff setting behaviour. +func (d *ResourceDiff) setDiff(key string, new interface{}, computed bool) error { + if err := d.clear(key); err != nil { + return err + } + + if err := d.newWriter.WriteField(strings.Split(key, "."), new, computed); err != nil { + return fmt.Errorf("Cannot set new diff value for key %s: %s", key, err) + } + + d.updatedKeys[key] = true + + return nil +} + +// ForceNew force-flags ForceNew in the schema for a specific key, and +// re-calculates its diff, effectively causing this attribute to force a new +// resource. +// +// Keep in mind that forcing a new resource will force a second run of the +// resource's CustomizeDiff function (with a new ResourceDiff) once the current +// one has completed. This second run is performed without state. This behavior +// will be the same as if a new resource is being created and is performed to +// ensure that the diff looks like the diff for a new resource as much as +// possible. CustomizeDiff should expect such a scenario and act correctly. +// +// This function is a no-op/error if there is no diff. +// +// Note that the change to schema is permanent for the lifecycle of this +// specific ResourceDiff instance. +func (d *ResourceDiff) ForceNew(key string) error { + if !d.HasChange(key) { + return fmt.Errorf("ForceNew: No changes for %s", key) + } + + keyParts := strings.Split(key, ".") + var schema *Schema + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } else { + return fmt.Errorf("ForceNew: %s is not a valid key", key) + } + + schema.ForceNew = true + + // Flag this for a re-diff. Don't save any values to guarantee that existing + // diffs aren't messed with, as this gets messy when dealing with complex + // structures, zero values, etc. + d.forcedNewKeys[keyParts[0]] = true + + return nil +} + +// Get hands off to ResourceData.Get. +func (d *ResourceDiff) Get(key string) interface{} { + r, _ := d.GetOk(key) + return r +} + +// GetChange gets the change between the state and diff, checking first to see +// if an overridden diff exists. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { + old, new, _ := d.getChange(key) + return old.Value, new.Value +} + +// GetOk functions the same way as ResourceData.GetOk, but it also checks the +// new diff levels to provide data consistent with the current state of the +// customized diff. +func (d *ResourceDiff) GetOk(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + if exists { + // If it exists, we also want to verify it is not the zero-value. + value := r.Value + zero := r.Schema.Type.Zero() + + if eq, ok := value.(Equal); ok { + exists = !eq.Equal(zero) + } else { + exists = !reflect.DeepEqual(value, zero) + } + } + + return r.Value, exists +} + +// GetOkExists functions the same way as GetOkExists within ResourceData, but +// it also checks the new diff levels to provide data consistent with the +// current state of the customized diff. +// +// This is nearly the same function as GetOk, yet it does not check +// for the zero value of the attribute's type. This allows for attributes +// without a default, to fully check for a literal assignment, regardless +// of the zero-value for that type. +func (d *ResourceDiff) GetOkExists(key string) (interface{}, bool) { + r := d.get(strings.Split(key, "."), "newDiff") + exists := r.Exists && !r.Computed + return r.Value, exists +} + +// NewValueKnown returns true if the new value for the given key is available +// as its final value at diff time. If the return value is false, this means +// either the value is based of interpolation that was unavailable at diff +// time, or that the value was explicitly marked as computed by SetNewComputed. +func (d *ResourceDiff) NewValueKnown(key string) bool { + r := d.get(strings.Split(key, "."), "newDiff") + return !r.Computed +} + +// HasChange checks to see if there is a change between state and the diff, or +// in the overridden diff. +func (d *ResourceDiff) HasChange(key string) bool { + old, new := d.GetChange(key) + + // If the type implements the Equal interface, then call that + // instead of just doing a reflect.DeepEqual. An example where this is + // needed is *Set + if eq, ok := old.(Equal); ok { + return !eq.Equal(new) + } + + return !reflect.DeepEqual(old, new) +} + +// Id returns the ID of this resource. +// +// Note that technically, ID does not change during diffs (it either has +// already changed in the refresh, or will change on update), hence we do not +// support updating the ID or fetching it from anything else other than state. +func (d *ResourceDiff) Id() string { + var result string + + if d.state != nil { + result = d.state.ID + } + return result +} + +// getChange gets values from two different levels, designed for use in +// diffChange, HasChange, and GetChange. +// +// This implementation differs from ResourceData's in the way that we first get +// results from the exact levels for the new diff, then from state and diff as +// per normal. +func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { + old := d.get(strings.Split(key, "."), "state") + var new getResult + for p := range d.updatedKeys { + if childAddrOf(key, p) { + new = d.getExact(strings.Split(key, "."), "newDiff") + return old, new, true + } + } + new = d.get(strings.Split(key, "."), "newDiff") + return old, new, false +} + +// removed checks to see if the key is present in the existing, pre-customized +// diff and if it was marked as NewRemoved. +func (d *ResourceDiff) removed(k string) bool { + diff, ok := d.diff.Attributes[k] + if !ok { + return false + } + return diff.NewRemoved +} + +// get performs the appropriate multi-level reader logic for ResourceDiff, +// starting at source. Refer to newResourceDiff for the level order. +func (d *ResourceDiff) get(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldMerge(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// getExact gets an attribute from the exact level referenced by source. +func (d *ResourceDiff) getExact(addr []string, source string) getResult { + result, err := d.multiReader.ReadFieldExact(addr, source) + if err != nil { + panic(err) + } + + return d.finalizeResult(addr, result) +} + +// finalizeResult does some post-processing of the result produced by get and getExact. +func (d *ResourceDiff) finalizeResult(addr []string, result FieldReadResult) getResult { + // If the result doesn't exist, then we set the value to the zero value + var schema *Schema + if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + + if result.Value == nil && schema != nil { + result.Value = result.ValueOrZero(schema) + } + + // Transform the FieldReadResult into a getResult. It might be worth + // merging these two structures one day. + return getResult{ + Value: result.Value, + ValueProcessed: result.ValueProcessed, + Computed: result.Computed, + Exists: result.Exists, + Schema: schema, + } +} + +// childAddrOf does a comparison of two addresses to see if one is the child of +// the other. +func childAddrOf(child, parent string) bool { + cs := strings.Split(child, ".") + ps := strings.Split(parent, ".") + if len(ps) > len(cs) { + return false + } + return reflect.DeepEqual(ps, cs[:len(ps)]) +} + +// checkKey checks the key to make sure it exists and is computed. +func (d *ResourceDiff) checkKey(key, caller string, nested bool) error { + var schema *Schema + if nested { + keyParts := strings.Split(key, ".") + schemaL := addrToSchema(keyParts, d.schema) + if len(schemaL) > 0 { + schema = schemaL[len(schemaL)-1] + } + } else { + s, ok := d.schema[key] + if ok { + schema = s + } + } + if schema == nil { + return fmt.Errorf("%s: invalid key: %s", caller, key) + } + if !schema.Computed { + return fmt.Errorf("%s only operates on computed keys - %s is not one", caller, key) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go index 445819f0..9e422c1a 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go @@ -5,6 +5,7 @@ import ( "log" "time" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/copystructure" ) @@ -62,55 +63,70 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) } if raw, ok := c.Config[TimeoutsConfigKey]; ok { - if configTimeouts, ok := raw.([]map[string]interface{}); ok { - for _, timeoutValues := range configTimeouts { - // loop through each Timeout given in the configuration and validate they - // the Timeout defined in the resource - for timeKey, timeValue := range timeoutValues { - // validate that we're dealing with the normal CRUD actions - var found bool - for _, key := range timeoutKeys() { - if timeKey == key { - found = true - break - } - } + var rawTimeouts []map[string]interface{} + switch raw := raw.(type) { + case map[string]interface{}: + rawTimeouts = append(rawTimeouts, raw) + case []map[string]interface{}: + rawTimeouts = raw + case string: + if raw == config.UnknownVariableValue { + // Timeout is not defined in the config + // Defaults will be used instead + return nil + } else { + log.Printf("[ERROR] Invalid timeout value: %q", raw) + return fmt.Errorf("Invalid Timeout value found") + } + default: + log.Printf("[ERROR] Invalid timeout structure: %#v", raw) + return fmt.Errorf("Invalid Timeout structure found") + } - if !found { - return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + for _, timeoutValues := range rawTimeouts { + for timeKey, timeValue := range timeoutValues { + // validate that we're dealing with the normal CRUD actions + var found bool + for _, key := range timeoutKeys() { + if timeKey == key { + found = true + break } + } - // Get timeout - rt, err := time.ParseDuration(timeValue.(string)) - if err != nil { - return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err) - } + if !found { + return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) + } - var timeout *time.Duration - switch timeKey { - case TimeoutCreate: - timeout = t.Create - case TimeoutUpdate: - timeout = t.Update - case TimeoutRead: - timeout = t.Read - case TimeoutDelete: - timeout = t.Delete - case TimeoutDefault: - timeout = t.Default - } + // Get timeout + rt, err := time.ParseDuration(timeValue.(string)) + if err != nil { + return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err) + } - // If the resource has not delcared this in the definition, then error - // with an unsupported message - if timeout == nil { - return unsupportedTimeoutKeyError(timeKey) - } + var timeout *time.Duration + switch timeKey { + case TimeoutCreate: + timeout = t.Create + case TimeoutUpdate: + timeout = t.Update + case TimeoutRead: + timeout = t.Read + case TimeoutDelete: + timeout = t.Delete + case TimeoutDefault: + timeout = t.Default + } - *timeout = rt + // If the resource has not delcared this in the definition, then error + // with an unsupported message + if timeout == nil { + return unsupportedTimeoutKeyError(timeKey) } + + *timeout = rt } - } else { - log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts") + return nil } } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go index f62c4d12..26b180e0 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go @@ -12,17 +12,49 @@ package schema import ( + "context" "fmt" "os" "reflect" + "regexp" "sort" "strconv" "strings" + "sync" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/terraform" + "github.com/mitchellh/copystructure" "github.com/mitchellh/mapstructure" ) +// Name of ENV variable which (if not empty) prefers panic over error +const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR" + +// type used for schema package context keys +type contextKey string + +var ( + protoVersionMu sync.Mutex + protoVersion5 = false +) + +func isProto5() bool { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + return protoVersion5 + +} + +// SetProto5 enables a feature flag for any internal changes required required +// to work with the new plugin protocol. This should not be called by +// provider. +func SetProto5() { + protoVersionMu.Lock() + defer protoVersionMu.Unlock() + protoVersion5 = true +} + // Schema is used to describe the structure of a value. // // Read the documentation of the struct elements for important details. @@ -43,6 +75,26 @@ type Schema struct { // Type ValueType + // ConfigMode allows for overriding the default behaviors for mapping + // schema entries onto configuration constructs. + // + // By default, the Elem field is used to choose whether a particular + // schema is represented in configuration as an attribute or as a nested + // block; if Elem is a *schema.Resource then it's a block and it's an + // attribute otherwise. + // + // If Elem is *schema.Resource then setting ConfigMode to + // SchemaConfigModeAttr will force it to be represented in configuration + // as an attribute, which means that the Computed flag can be used to + // provide default elements when the argument isn't set at all, while still + // allowing the user to force zero elements by explicitly assigning an + // empty list. + // + // When Computed is set without Optional, the attribute is not settable + // in configuration at all and so SchemaConfigModeAttr is the automatic + // behavior, and SchemaConfigModeBlock is not permitted. + ConfigMode SchemaConfigMode + // If one of these is set, then this item can come from the configuration. // Both cannot be set. If Optional is set, the value is optional. If // Required is set, the value is required. @@ -62,10 +114,20 @@ type Schema struct { DiffSuppressFunc SchemaDiffSuppressFunc // If this is non-nil, then this will be a default value that is used - // when this item is not set in the configuration/state. + // when this item is not set in the configuration. // - // DefaultFunc can be specified if you want a dynamic default value. - // Only one of Default or DefaultFunc can be set. + // DefaultFunc can be specified to compute a dynamic default. + // Only one of Default or DefaultFunc can be set. If DefaultFunc is + // used then its return value should be stable to avoid generating + // confusing/perpetual diffs. + // + // Changing either Default or the return value of DefaultFunc can be + // a breaking change, especially if the attribute in question has + // ForceNew set. If a default needs to change to align with changing + // assumptions in an upstream API then it may be necessary to also use + // the MigrateState function on the resource to change the state to match, + // or have the Read function adjust the state value to align with the + // new default. // // If Required is true above, then Default cannot be set. DefaultFunc // can be set with Required. If the DefaultFunc returns nil, then there @@ -102,12 +164,17 @@ type Schema struct { ForceNew bool StateFunc SchemaStateFunc - // The following fields are only set for a TypeList or TypeSet Type. + // The following fields are only set for a TypeList, TypeSet, or TypeMap. // - // Elem must be either a *Schema or a *Resource only if the Type is - // TypeList, and represents what the element type is. If it is *Schema, - // the element type is just a simple value. If it is *Resource, the - // element type is a complex structure, potentially with its own lifecycle. + // Elem represents the element type. For a TypeMap, it must be a *Schema + // with a Type that is one of the primitives: TypeString, TypeBool, + // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a + // *Resource. If it is *Schema, the element type is just a simple value. + // If it is *Resource, the element type is a complex structure, + // potentially with its own lifecycle. + Elem interface{} + + // The following fields are only set for a TypeList or TypeSet. // // MaxItems defines a maximum amount of items that can exist within a // TypeSet or TypeList. Specific use cases would be if a TypeSet is being @@ -119,14 +186,17 @@ type Schema struct { // used to wrap a complex structure, however less than one instance would // cause instability. // - // PromoteSingle, if true, will allow single elements to be standalone - // and promote them to a list. For example "foo" would be promoted to - // ["foo"] automatically. This is primarily for legacy reasons and the - // ambiguity is not recommended for new usage. Promotion is only allowed - // for primitive element types. - Elem interface{} - MaxItems int - MinItems int + // If the field Optional is set to true then MinItems is ignored and thus + // effectively zero. + MaxItems int + MinItems int + + // PromoteSingle originally allowed for a single element to be assigned + // where a primitive list was expected, but this no longer works from + // Terraform v0.12 onwards (Terraform Core will require a list to be set + // regardless of what this is set to) and so only applies to Terraform v0.11 + // and earlier, and so should be used only to retain this functionality + // for those still using v0.11 with a provider that formerly used this. PromoteSingle bool // The following fields are only valid for a TypeSet type. @@ -168,7 +238,8 @@ type Schema struct { // guaranteed to be of the proper Schema type, and it can yield warnings or // errors based on inspection of that value. // - // ValidateFunc currently only works for primitive types. + // ValidateFunc is honored only when the schema's Type is set to TypeInt, + // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types. ValidateFunc SchemaValidateFunc // Sensitive ensures that the attribute's value does not get displayed in @@ -178,7 +249,18 @@ type Schema struct { Sensitive bool } -// SchemaDiffSuppresFunc is a function which can be used to determine +// SchemaConfigMode is used to influence how a schema item is mapped into a +// corresponding configuration construct, using the ConfigMode field of +// Schema. +type SchemaConfigMode int + +const ( + SchemaConfigModeAuto SchemaConfigMode = iota + SchemaConfigModeAttr + SchemaConfigModeBlock +) + +// SchemaDiffSuppressFunc is a function which can be used to determine // whether a detected diff on a schema element is "valid" or not, and // suppress it from the plan if necessary. // @@ -275,8 +357,7 @@ func (s *Schema) ZeroValue() interface{} { } } -func (s *Schema) finalizeDiff( - d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff { +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { if d == nil { return d } @@ -317,13 +398,20 @@ func (s *Schema) finalizeDiff( } if s.Computed { - if d.Old != "" && d.New == "" { - // This is a computed value with an old value set already, - // just let it go. - return nil + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } } - if d.New == "" { + if d.New == "" && !d.NewComputed { // Computed attribute without a new value set d.NewComputed = true } @@ -337,9 +425,21 @@ func (s *Schema) finalizeDiff( return d } +// InternalMap is used to aid in the transition to the new schema types and +// protocol. The name is not meant to convey any usefulness, as this is not to +// be used directly by any providers. +type InternalMap = schemaMap + // schemaMap is a wrapper that adds nice functions on top of schemas. type schemaMap map[string]*Schema +func (m schemaMap) panicOnError() bool { + if os.Getenv(PanicOnErr) != "" { + return true + } + return false +} + // Data returns a ResourceData for the given schema, state, and diff. // // The diff is optional. @@ -347,17 +447,31 @@ func (m schemaMap) Data( s *terraform.InstanceState, d *terraform.InstanceDiff) (*ResourceData, error) { return &ResourceData{ - schema: m, - state: s, - diff: d, + schema: m, + state: s, + diff: d, + panicOnError: m.panicOnError(), }, nil } +// DeepCopy returns a copy of this schemaMap. The copy can be safely modified +// without affecting the original. +func (m *schemaMap) DeepCopy() schemaMap { + copy, err := copystructure.Config{Lock: true}.Copy(m) + if err != nil { + panic(err) + } + return *copy.(*schemaMap) +} + // Diff returns the diff for a resource given the schema map, // state, and configuration. func (m schemaMap) Diff( s *terraform.InstanceState, - c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) { + c *terraform.ResourceConfig, + customizeDiff CustomizeDiffFunc, + meta interface{}, + handleRequiresNew bool) (*terraform.InstanceDiff, error) { result := new(terraform.InstanceDiff) result.Attributes = make(map[string]*terraform.ResourceAttrDiff) @@ -367,9 +481,10 @@ func (m schemaMap) Diff( } d := &ResourceData{ - schema: m, - state: s, - config: c, + schema: m, + state: s, + config: c, + panicOnError: m.panicOnError(), } for k, schema := range m { @@ -379,74 +494,108 @@ func (m schemaMap) Diff( } } - // If the diff requires a new resource, then we recompute the diff - // so we have the complete new resource diff, and preserve the - // RequiresNew fields where necessary so the user knows exactly what - // caused that. - if result.RequiresNew() { - // Create the new diff - result2 := new(terraform.InstanceDiff) - result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) - - // Preserve the DestroyTainted flag - result2.DestroyTainted = result.DestroyTainted - - // Reset the data to not contain state. We have to call init() - // again in order to reset the FieldReaders. - d.state = nil - d.init() + // Remove any nil diffs just to keep things clean + for k, v := range result.Attributes { + if v == nil { + delete(result.Attributes, k) + } + } - // Perform the diff again - for k, schema := range m { - err := m.diff(k, schema, result2, d, false) + // If this is a non-destroy diff, call any custom diff logic that has been + // defined. + if !result.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, s, result) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result, rd, false) if err != nil { return nil, err } } + } - // Force all the fields to not force a new since we know what we - // want to force new. - for k, attr := range result2.Attributes { - if attr == nil { - continue - } + if handleRequiresNew { + // If the diff requires a new resource, then we recompute the diff + // so we have the complete new resource diff, and preserve the + // RequiresNew fields where necessary so the user knows exactly what + // caused that. + if result.RequiresNew() { + // Create the new diff + result2 := new(terraform.InstanceDiff) + result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) - if attr.RequiresNew { - attr.RequiresNew = false - } + // Preserve the DestroyTainted flag + result2.DestroyTainted = result.DestroyTainted - if s != nil { - attr.Old = s.Attributes[k] - } - } + // Reset the data to not contain state. We have to call init() + // again in order to reset the FieldReaders. + d.state = nil + d.init() - // Now copy in all the requires new diffs... - for k, attr := range result.Attributes { - if attr == nil { - continue + // Perform the diff again + for k, schema := range m { + err := m.diff(k, schema, result2, d, false) + if err != nil { + return nil, err + } } - newAttr, ok := result2.Attributes[k] - if !ok { - newAttr = attr + // Re-run customization + if !result2.DestroyTainted && customizeDiff != nil { + mc := m.DeepCopy() + rd := newResourceDiff(mc, c, d.state, result2) + if err := customizeDiff(rd, meta); err != nil { + return nil, err + } + for _, k := range rd.UpdatedKeys() { + err := m.diff(k, mc[k], result2, rd, false) + if err != nil { + return nil, err + } + } } - if attr.RequiresNew { - newAttr.RequiresNew = true + // Force all the fields to not force a new since we know what we + // want to force new. + for k, attr := range result2.Attributes { + if attr == nil { + continue + } + + if attr.RequiresNew { + attr.RequiresNew = false + } + + if s != nil { + attr.Old = s.Attributes[k] + } } - result2.Attributes[k] = newAttr - } + // Now copy in all the requires new diffs... + for k, attr := range result.Attributes { + if attr == nil { + continue + } - // And set the diff! - result = result2 - } + newAttr, ok := result2.Attributes[k] + if !ok { + newAttr = attr + } - // Remove any nil diffs just to keep things clean - for k, v := range result.Attributes { - if v == nil { - delete(result.Attributes, k) + if attr.RequiresNew { + newAttr.RequiresNew = true + } + + result2.Attributes[k] = newAttr + } + + // And set the diff! + result = result2 } + } // Go through and detect all of the ComputedWhens now that we've @@ -532,6 +681,10 @@ func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) { // from a unit test (and not in user-path code) to verify that a schema // is properly built. func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { + return m.internalValidate(topSchemaMap, false) +} + +func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error { if topSchemaMap == nil { topSchemaMap = m } @@ -552,6 +705,34 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { return fmt.Errorf("%s: One of optional, required, or computed must be set", k) } + computedOnly := v.Computed && !v.Optional + + switch v.ConfigMode { + case SchemaConfigModeBlock: + if _, ok := v.Elem.(*Resource); !ok { + return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k) + } + if attrsOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k) + } + if computedOnly { + return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k) + } + case SchemaConfigModeAttr: + // anything goes + case SchemaConfigModeAuto: + // Since "Auto" for Elem: *Resource would create a nested block, + // and that's impossible inside an attribute, we require it to be + // explicitly overridden as mode "Attr" for clarity. + if _, ok := v.Elem.(*Resource); ok { + if attrsOnly { + return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k) + } + } + default: + return fmt.Errorf("%s: invalid ConfigMode value", k) + } + if v.Computed && v.Default != nil { return fmt.Errorf("%s: Default must be nil if computed", k) } @@ -616,7 +797,9 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { switch t := v.Elem.(type) { case *Resource: - if err := t.InternalValidate(topSchemaMap, true); err != nil { + attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr + + if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil { return err } case *Schema: @@ -632,10 +815,29 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { } } + // Computed-only field + if v.Computed && !v.Optional { + if v.ValidateFunc != nil { + return fmt.Errorf("%s: ValidateFunc is for validating user input, "+ + "there's nothing to validate on computed-only field", k) + } + if v.DiffSuppressFunc != nil { + return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+ + " between config and state representation. "+ + "There is no config for computed-only field, nothing to compare.", k) + } + } + if v.ValidateFunc != nil { switch v.Type { case TypeList, TypeSet: - return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.") + return fmt.Errorf("%s: ValidateFunc is not yet supported on lists or sets.", k) + } + } + + if v.Deprecated == "" && v.Removed == "" { + if !isValidFieldName(k) { + return fmt.Errorf("%s: Field name may only contain lowercase alphanumeric characters & underscores.", k) } } } @@ -643,24 +845,28 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { return nil } -func (m schemaMap) markAsRemoved(k string, schema *Schema, diff *terraform.InstanceDiff) { - existingDiff, ok := diff.Attributes[k] - if ok { - existingDiff.NewRemoved = true - diff.Attributes[k] = schema.finalizeDiff(existingDiff) - return - } +func isValidFieldName(name string) bool { + re := regexp.MustCompile("^[a-z0-9_]+$") + return re.MatchString(name) +} - diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - NewRemoved: true, - }) +// resourceDiffer is an interface that is used by the private diff functions. +// This helps facilitate diff logic for both ResourceData and ResoureDiff with +// minimal divergence in code. +type resourceDiffer interface { + diffChange(string) (interface{}, interface{}, bool, bool, bool) + Get(string) interface{} + GetChange(string) (interface{}, interface{}) + GetOk(string) (interface{}, bool) + HasChange(string) bool + Id() string } func (m schemaMap) diff( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { unsupressedDiff := new(terraform.InstanceDiff) @@ -681,12 +887,23 @@ func (m schemaMap) diff( } for attrK, attrV := range unsupressedDiff.Attributes { - if schema.DiffSuppressFunc != nil && - attrV != nil && - schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) { - continue - } + switch rd := d.(type) { + case *ResourceData: + if schema.DiffSuppressFunc != nil && attrV != nil && + schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { + // If this attr diff is suppressed, we may still need it in the + // overall diff if it's contained within a set. Rather than + // dropping the diff, make it a NOOP. + if !all { + continue + } + attrV = &terraform.ResourceAttrDiff{ + Old: attrV.Old, + New: attrV.Old, + } + } + } diff.Attributes[attrK] = attrV } @@ -697,9 +914,9 @@ func (m schemaMap) diffList( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { - o, n, _, computedList := d.diffChange(k) + o, n, _, computedList, customized := d.diffChange(k) if computedList { n = nil } @@ -744,6 +961,7 @@ func (m schemaMap) diffList( diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{ Old: oldStr, NewComputed: true, + RequiresNew: schema.ForceNew, } return nil } @@ -765,10 +983,13 @@ func (m schemaMap) diffList( oldStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) } // Figure out the maximum @@ -779,7 +1000,6 @@ func (m schemaMap) diffList( switch t := schema.Elem.(type) { case *Resource: - countDiff, cOk := diff.GetAttribute(k + ".#") // This is a complex resource for i := 0; i < maxLen; i++ { for k2, schema := range t.Schema { @@ -788,15 +1008,6 @@ func (m schemaMap) diffList( if err != nil { return err } - - // If parent list is being removed - // remove all subfields which were missed by the diff func - // We process these separately because type-specific diff functions - // lack the context (hierarchy of fields) - subKeyIsCount := strings.HasSuffix(subK, ".#") - if cOk && countDiff.New == "0" && !subKeyIsCount { - m.markAsRemoved(subK, schema, diff) - } } } case *Schema: @@ -825,13 +1036,13 @@ func (m schemaMap) diffMap( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { prefix := k + "." // First get all the values from the state var stateMap, configMap map[string]string - o, n, _, nComputed := d.diffChange(k) + o, n, _, nComputed, customized := d.diffChange(k) if err := mapstructure.WeakDecode(o, &stateMap); err != nil { return fmt.Errorf("%s: %s", k, err) } @@ -883,6 +1094,7 @@ func (m schemaMap) diffMap( Old: oldStr, New: newStr, }, + customized, ) } @@ -900,16 +1112,22 @@ func (m schemaMap) diffMap( continue } - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: old, - New: v, - }) + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) } for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: v, - NewRemoved: true, - }) + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) } return nil @@ -919,10 +1137,10 @@ func (m schemaMap) diffSet( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { - o, n, _, computedSet := d.diffChange(k) + o, n, _, computedSet, customized := d.diffChange(k) if computedSet { n = nil } @@ -981,20 +1199,26 @@ func (m schemaMap) diffSet( countStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: countStr, - NewComputed: true, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) return nil } // If the counts are not the same, then record that diff changed := oldLen != newLen if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) } // Build the list of codes that will make up our set. This is the @@ -1006,7 +1230,6 @@ func (m schemaMap) diffSet( for _, code := range list { switch t := schema.Elem.(type) { case *Resource: - countDiff, cOk := diff.GetAttribute(k + ".#") // This is a complex resource for k2, schema := range t.Schema { subK := fmt.Sprintf("%s.%s.%s", k, code, k2) @@ -1014,17 +1237,7 @@ func (m schemaMap) diffSet( if err != nil { return err } - - // If parent set is being removed - // remove all subfields which were missed by the diff func - // We process these separately because type-specific diff functions - // lack the context (hierarchy of fields) - subKeyIsCount := strings.HasSuffix(subK, ".#") - if cOk && countDiff.New == "0" && !subKeyIsCount { - m.markAsRemoved(subK, schema, diff) - } } - case *Schema: // Copy the schema so that we can set Computed/ForceNew from // the parent schema (the TypeSet). @@ -1051,11 +1264,11 @@ func (m schemaMap) diffString( k string, schema *Schema, diff *terraform.InstanceDiff, - d *ResourceData, + d resourceDiffer, all bool) error { var originalN interface{} var os, ns string - o, n, _, computed := d.diffChange(k) + o, n, _, computed, customized := d.diffChange(k) if schema.StateFunc != nil && n != nil { originalN = n n = schema.StateFunc(n) @@ -1071,7 +1284,7 @@ func (m schemaMap) diffString( return fmt.Errorf("%s: %s", k, err) } - if os == ns && !all { + if os == ns && !all && !computed { // They're the same value. If there old value is not blank or we // have an ID, then return right away since we're already setup. if os != "" || d.Id() != "" { @@ -1079,26 +1292,29 @@ func (m schemaMap) diffString( } // Otherwise, only continue if we're computed - if !schema.Computed && !computed { + if !schema.Computed { return nil } } removed := false - if o != nil && n == nil { + if o != nil && n == nil && !computed { removed = true } if removed && schema.Computed { return nil } - diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: os, - New: ns, - NewExtra: originalN, - NewRemoved: removed, - NewComputed: computed, - }) + diff.Attributes[k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) return nil } @@ -1107,7 +1323,7 @@ func (m schemaMap) inputString( input terraform.UIInput, k string, schema *Schema) (interface{}, error) { - result, err := input.Input(&terraform.InputOpts{ + result, err := input.Input(context.Background(), &terraform.InputOpts{ Id: k, Query: k, Description: schema.Description, @@ -1149,6 +1365,15 @@ func (m schemaMap) validate( "%q: this field cannot be set", k)} } + // If the value is unknown then we can't validate it yet. + // In particular, this avoids spurious type errors where downstream + // validation code sees UnknownVariableValue as being just a string. + // The SDK has to allow the unknown value through initially, so that + // Required fields set via an interpolated value are accepted. + if !isWhollyKnown(raw) { + return nil, nil + } + err := m.validateConflictingAttributes(k, schema, c) if err != nil { return nil, []error{err} @@ -1157,6 +1382,28 @@ func (m schemaMap) validate( return m.validateType(k, raw, schema, c) } +// isWhollyKnown returns false if the argument contains an UnknownVariableValue +func isWhollyKnown(raw interface{}) bool { + switch raw := raw.(type) { + case string: + if raw == config.UnknownVariableValue { + return false + } + case []interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + case map[string]interface{}: + for _, v := range raw { + if !isWhollyKnown(v) { + return false + } + } + } + return true +} func (m schemaMap) validateConflictingAttributes( k string, schema *Schema, @@ -1166,10 +1413,15 @@ func (m schemaMap) validateConflictingAttributes( return nil } - for _, conflicting_key := range schema.ConflictsWith { - if value, ok := c.Get(conflicting_key); ok { + for _, conflictingKey := range schema.ConflictsWith { + if raw, ok := c.Get(conflictingKey); ok { + if raw == config.UnknownVariableValue { + // An unknown value might become unset (null) once known, so + // we must defer validation until it's known. + continue + } return fmt.Errorf( - "%q: conflicts with %s (%#v)", k, conflicting_key, value) + "%q: conflicts with %s", k, conflictingKey) } } @@ -1181,6 +1433,13 @@ func (m schemaMap) validateList( raw interface{}, schema *Schema, c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == config.UnknownVariableValue { + return nil, nil + } + } + // We use reflection to verify the slice because you can't // case to []interface{} unless the slice is exactly that type. rawV := reflect.ValueOf(raw) @@ -1252,6 +1511,13 @@ func (m schemaMap) validateMap( raw interface{}, schema *Schema, c *terraform.ResourceConfig) ([]string, []error) { + // first check if the list is wholly unknown + if s, ok := raw.(string); ok { + if s == config.UnknownVariableValue { + return nil, nil + } + } + // We use reflection to verify the slice because you can't // case to []interface{} unless the slice is exactly that type. rawV := reflect.ValueOf(raw) @@ -1358,13 +1624,10 @@ func getValueType(k string, schema *Schema) (ValueType, error) { return vt, nil } + // If a Schema is provided to a Map, we use the Type of that schema + // as the type for each element in the Map. if s, ok := schema.Elem.(*Schema); ok { - if s.Elem == nil { - return TypeString, nil - } - if vt, ok := s.Elem.(ValueType); ok { - return vt, nil - } + return s.Type, nil } if _, ok := schema.Elem.(*Resource); ok { @@ -1380,8 +1643,8 @@ func (m schemaMap) validateObject( k string, schema map[string]*Schema, c *terraform.ResourceConfig) ([]string, []error) { - raw, _ := c.GetRaw(k) - if _, ok := raw.(map[string]interface{}); !ok { + raw, _ := c.Get(k) + if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) { return nil, []error{fmt.Errorf( "%s: expected object, got %s", k, reflect.ValueOf(raw).Kind())} @@ -1425,7 +1688,6 @@ func (m schemaMap) validatePrimitive( raw interface{}, schema *Schema, c *terraform.ResourceConfig) ([]string, []error) { - // Catch if the user gave a complex type where a primitive was // expected, so we can return a friendly error message that // doesn't contain Go type system terminology. @@ -1457,12 +1719,25 @@ func (m schemaMap) validatePrimitive( } decoded = n case TypeInt: - // Verify that we can parse this as an int - var n int - if err := mapstructure.WeakDecode(raw, &n); err != nil { - return nil, []error{fmt.Errorf("%s: %s", k, err)} + switch { + case isProto5(): + // We need to verify the type precisely, because WeakDecode will + // decode a float as an integer. + + // the config shims only use int for integral number values + if v, ok := raw.(int); ok { + decoded = v + } else { + return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)} + } + default: + // Verify that we can parse this as an int + var n int + if err := mapstructure.WeakDecode(raw, &n); err != nil { + return nil, []error{fmt.Errorf("%s: %s", k, err)} + } + decoded = n } - decoded = n case TypeFloat: // Verify that we can parse this as an int var n float64 diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go index 3eb2d007..fe6d7504 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go @@ -85,6 +85,9 @@ func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) { // to hash complex substructures when used in sets, and so the serialization // is not reversible. func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) { + if val == nil { + return + } sm := resource.Schema m := val.(map[string]interface{}) var keys []string diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go index de05f40e..8ee89e47 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go @@ -17,6 +17,12 @@ func HashString(v interface{}) int { return hashcode.String(v.(string)) } +// HashInt hashes integers. If you want a Set of integers, this is the +// SchemaSetFunc you want. +func HashInt(v interface{}) int { + return hashcode.String(strconv.Itoa(v.(int))) +} + // HashResource hashes complex structures that are described using // a *Resource. This is the default set implementation used when a set's // element type is a full resource. @@ -153,6 +159,31 @@ func (s *Set) Equal(raw interface{}) bool { return reflect.DeepEqual(s.m, other.m) } +// HashEqual simply checks to the keys the top-level map to the keys in the +// other set's top-level map to see if they are equal. This obviously assumes +// you have a properly working hash function - use HashResource if in doubt. +func (s *Set) HashEqual(raw interface{}) bool { + other, ok := raw.(*Set) + if !ok { + return false + } + + ks1 := make([]string, 0) + ks2 := make([]string, 0) + + for k := range s.m { + ks1 = append(ks1, k) + } + for k := range other.m { + ks2 = append(ks2, k) + } + + sort.Strings(ks1) + sort.Strings(ks2) + + return reflect.DeepEqual(ks1, ks2) +} + func (s *Set) GoString() string { return fmt.Sprintf("*Set(%#v)", s.m) } @@ -167,6 +198,16 @@ func (s *Set) add(item interface{}, computed bool) string { code := s.hash(item) if computed { code = "~" + code + + if isProto5() { + tmpCode := code + count := 0 + for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] { + count++ + tmpCode = fmt.Sprintf("%s%d", code, count) + } + code = tmpCode + } } if _, ok := s.m[code]; !ok { diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go new file mode 100644 index 00000000..203d0170 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go @@ -0,0 +1,115 @@ +package schema + +import ( + "encoding/json" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/terraform" +) + +// DiffFromValues takes the current state and desired state as cty.Values and +// derives a terraform.InstanceDiff to give to the legacy providers. This is +// used to take the states provided by the new ApplyResourceChange method and +// convert them to a state+diff required for the legacy Apply method. +func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) { + return diffFromValues(prior, planned, res, nil) +} + +// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our +// test fixtures from the legacy tests. In the new provider protocol the diff +// only needs to be created for the apply operation, and any customizations +// have already been done. +func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) { + instanceState, err := res.ShimInstanceStateFromValue(prior) + if err != nil { + return nil, err + } + + configSchema := res.CoreConfigSchema() + + cfg := terraform.NewResourceConfigShimmed(planned, configSchema) + removeConfigUnknowns(cfg.Config) + removeConfigUnknowns(cfg.Raw) + + diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false) + if err != nil { + return nil, err + } + + return diff, err +} + +// During apply the only unknown values are those which are to be computed by +// the resource itself. These may have been marked as unknown config values, and +// need to be removed to prevent the UnknownVariableValue from appearing the diff. +func removeConfigUnknowns(cfg map[string]interface{}) { + for k, v := range cfg { + switch v := v.(type) { + case string: + if v == config.UnknownVariableValue { + delete(cfg, k) + } + case []interface{}: + for _, i := range v { + if m, ok := i.(map[string]interface{}); ok { + removeConfigUnknowns(m) + } + } + case map[string]interface{}: + removeConfigUnknowns(v) + } + } +} + +// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to +// get a new cty.Value state. This is used to convert the diff returned from +// the legacy provider Diff method to the state required for the new +// PlanResourceChange method. +func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) { + return d.ApplyToValue(base, schema) +} + +// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON +// encoding. +func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) { + js, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + var m map[string]interface{} + if err := json.Unmarshal(js, &m); err != nil { + return nil, err + } + + return m, nil +} + +// JSONMapToStateValue takes a generic json map[string]interface{} and converts it +// to the specific type, ensuring that the values conform to the schema. +func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) { + var val cty.Value + + js, err := json.Marshal(m) + if err != nil { + return val, err + } + + val, err = ctyjson.Unmarshal(js, block.ImpliedType()) + if err != nil { + return val, err + } + + return block.CoerceValue(val) +} + +// StateValueFromInstanceState converts a terraform.InstanceState to a +// cty.Value as described by the provided cty.Type, and maintains the resource +// ID as the "id" attribute. +func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) { + return is.AttrsAsObjectValue(ty) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go index 9765bdbc..a367a1fd 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go @@ -10,13 +10,15 @@ import ( // TestResourceDataRaw creates a ResourceData from a raw configuration map. func TestResourceDataRaw( t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData { + t.Helper() + c, err := config.NewRawConfig(raw) if err != nil { t.Fatalf("err: %s", err) } sm := schemaMap(schema) - diff, err := sm.Diff(nil, terraform.NewResourceConfig(c)) + diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil, true) if err != nil { t.Fatalf("err: %s", err) } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go index 1610cec2..914ca32c 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go @@ -2,7 +2,22 @@ package schema -import "fmt" +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypeBool-1] + _ = x[TypeInt-2] + _ = x[TypeFloat-3] + _ = x[TypeString-4] + _ = x[TypeList-5] + _ = x[TypeMap-6] + _ = x[TypeSet-7] + _ = x[typeObject-8] +} const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" @@ -10,7 +25,7 @@ var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} func (i ValueType) String() string { if i < 0 || i >= ValueType(len(_ValueType_index)-1) { - return fmt.Sprintf("ValueType(%d)", i) + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" } return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/helper/slowmessage/slowmessage.go b/vendor/github.com/hashicorp/terraform/helper/slowmessage/slowmessage.go new file mode 100644 index 00000000..e4e14710 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/slowmessage/slowmessage.go @@ -0,0 +1,34 @@ +package slowmessage + +import ( + "time" +) + +// SlowFunc is the function that could be slow. Usually, you'll have to +// wrap an existing function in a lambda to make it match this type signature. +type SlowFunc func() error + +// CallbackFunc is the function that is triggered when the threshold is reached. +type CallbackFunc func() + +// Do calls sf. If threshold time has passed, cb is called. Note that this +// call will be made concurrently to sf still running. +func Do(threshold time.Duration, sf SlowFunc, cb CallbackFunc) error { + // Call the slow function + errCh := make(chan error, 1) + go func() { + errCh <- sf() + }() + + // Wait for it to complete or the threshold to pass + select { + case err := <-errCh: + return err + case <-time.After(threshold): + // Threshold reached, call the callback + cb() + } + + // Wait an indefinite amount of time for it to finally complete + return <-errCh +} diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go new file mode 100644 index 00000000..b3eb90fd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go @@ -0,0 +1,11 @@ +package structure + +import "encoding/json" + +func ExpandJsonFromString(jsonString string) (map[string]interface{}, error) { + var result map[string]interface{} + + err := json.Unmarshal([]byte(jsonString), &result) + + return result, err +} diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go new file mode 100644 index 00000000..578ad2ea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go @@ -0,0 +1,16 @@ +package structure + +import "encoding/json" + +func FlattenJsonToString(input map[string]interface{}) (string, error) { + if len(input) == 0 { + return "", nil + } + + result, err := json.Marshal(input) + if err != nil { + return "", err + } + + return string(result), nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go new file mode 100644 index 00000000..3256b476 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go @@ -0,0 +1,24 @@ +package structure + +import "encoding/json" + +// Takes a value containing JSON string and passes it through +// the JSON parser to normalize it, returns either a parsing +// error or normalized JSON string. +func NormalizeJsonString(jsonString interface{}) (string, error) { + var j interface{} + + if jsonString == nil || jsonString.(string) == "" { + return "", nil + } + + s := jsonString.(string) + + err := json.Unmarshal([]byte(s), &j) + if err != nil { + return s, err + } + + bytes, _ := json.Marshal(j) + return string(bytes[:]), nil +} diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go b/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go new file mode 100644 index 00000000..46f794a7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go @@ -0,0 +1,21 @@ +package structure + +import ( + "reflect" + + "github.com/hashicorp/terraform/helper/schema" +) + +func SuppressJsonDiff(k, old, new string, d *schema.ResourceData) bool { + oldMap, err := ExpandJsonFromString(old) + if err != nil { + return false + } + + newMap, err := ExpandJsonFromString(new) + if err != nil { + return false + } + + return reflect.DeepEqual(oldMap, newMap) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go new file mode 100644 index 00000000..efded892 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go @@ -0,0 +1,341 @@ +package validation + +import ( + "bytes" + "fmt" + "net" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/structure" +) + +// All returns a SchemaValidateFunc which tests if the provided value +// passes all provided SchemaValidateFunc +func All(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + var allErrors []error + var allWarnings []string + for _, validator := range validators { + validatorWarnings, validatorErrors := validator(i, k) + allWarnings = append(allWarnings, validatorWarnings...) + allErrors = append(allErrors, validatorErrors...) + } + return allWarnings, allErrors + } +} + +// Any returns a SchemaValidateFunc which tests if the provided value +// passes any of the provided SchemaValidateFunc +func Any(validators ...schema.SchemaValidateFunc) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + var allErrors []error + var allWarnings []string + for _, validator := range validators { + validatorWarnings, validatorErrors := validator(i, k) + if len(validatorWarnings) == 0 && len(validatorErrors) == 0 { + return []string{}, []error{} + } + allWarnings = append(allWarnings, validatorWarnings...) + allErrors = append(allErrors, validatorErrors...) + } + return allWarnings, allErrors + } +} + +// IntBetween returns a SchemaValidateFunc which tests if the provided value +// is of type int and is between min and max (inclusive) +func IntBetween(min, max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be int", k)) + return + } + + if v < min || v > max { + es = append(es, fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v)) + return + } + + return + } +} + +// IntAtLeast returns a SchemaValidateFunc which tests if the provided value +// is of type int and is at least min (inclusive) +func IntAtLeast(min int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be int", k)) + return + } + + if v < min { + es = append(es, fmt.Errorf("expected %s to be at least (%d), got %d", k, min, v)) + return + } + + return + } +} + +// IntAtMost returns a SchemaValidateFunc which tests if the provided value +// is of type int and is at most max (inclusive) +func IntAtMost(max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be int", k)) + return + } + + if v > max { + es = append(es, fmt.Errorf("expected %s to be at most (%d), got %d", k, max, v)) + return + } + + return + } +} + +// IntInSlice returns a SchemaValidateFunc which tests if the provided value +// is of type int and matches the value of an element in the valid slice +func IntInSlice(valid []int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(int) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be an integer", k)) + return + } + + for _, validInt := range valid { + if v == validInt { + return + } + } + + es = append(es, fmt.Errorf("expected %s to be one of %v, got %d", k, valid, v)) + return + } +} + +// StringInSlice returns a SchemaValidateFunc which tests if the provided value +// is of type string and matches the value of an element in the valid slice +// will test with in lower case if ignoreCase is true +func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + for _, str := range valid { + if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) { + return + } + } + + es = append(es, fmt.Errorf("expected %s to be one of %v, got %s", k, valid, v)) + return + } +} + +// StringLenBetween returns a SchemaValidateFunc which tests if the provided value +// is of type string and has length between min and max (inclusive) +func StringLenBetween(min, max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + if len(v) < min || len(v) > max { + es = append(es, fmt.Errorf("expected length of %s to be in the range (%d - %d), got %s", k, min, max, v)) + } + return + } +} + +// StringMatch returns a SchemaValidateFunc which tests if the provided value +// matches a given regexp. Optionally an error message can be provided to +// return something friendlier than "must match some globby regexp". +func StringMatch(r *regexp.Regexp, message string) schema.SchemaValidateFunc { + return func(i interface{}, k string) ([]string, []error) { + v, ok := i.(string) + if !ok { + return nil, []error{fmt.Errorf("expected type of %s to be string", k)} + } + + if ok := r.MatchString(v); !ok { + if message != "" { + return nil, []error{fmt.Errorf("invalid value for %s (%s)", k, message)} + + } + return nil, []error{fmt.Errorf("expected value of %s to match regular expression %q", k, r)} + } + return nil, nil + } +} + +// NoZeroValues is a SchemaValidateFunc which tests if the provided value is +// not a zero value. It's useful in situations where you want to catch +// explicit zero values on things like required fields during validation. +func NoZeroValues(i interface{}, k string) (s []string, es []error) { + if reflect.ValueOf(i).Interface() == reflect.Zero(reflect.TypeOf(i)).Interface() { + switch reflect.TypeOf(i).Kind() { + case reflect.String: + es = append(es, fmt.Errorf("%s must not be empty", k)) + case reflect.Int, reflect.Float64: + es = append(es, fmt.Errorf("%s must not be zero", k)) + default: + // this validator should only ever be applied to TypeString, TypeInt and TypeFloat + panic(fmt.Errorf("can't use NoZeroValues with %T attribute %s", i, k)) + } + } + return +} + +// CIDRNetwork returns a SchemaValidateFunc which tests if the provided value +// is of type string, is in valid CIDR network notation, and has significant bits between min and max (inclusive) +func CIDRNetwork(min, max int) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + _, ipnet, err := net.ParseCIDR(v) + if err != nil { + es = append(es, fmt.Errorf( + "expected %s to contain a valid CIDR, got: %s with err: %s", k, v, err)) + return + } + + if ipnet == nil || v != ipnet.String() { + es = append(es, fmt.Errorf( + "expected %s to contain a valid network CIDR, expected %s, got %s", + k, ipnet, v)) + } + + sigbits, _ := ipnet.Mask.Size() + if sigbits < min || sigbits > max { + es = append(es, fmt.Errorf( + "expected %q to contain a network CIDR with between %d and %d significant bits, got: %d", + k, min, max, sigbits)) + } + + return + } +} + +// SingleIP returns a SchemaValidateFunc which tests if the provided value +// is of type string, and in valid single IP notation +func SingleIP() schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + ip := net.ParseIP(v) + if ip == nil { + es = append(es, fmt.Errorf( + "expected %s to contain a valid IP, got: %s", k, v)) + } + return + } +} + +// IPRange returns a SchemaValidateFunc which tests if the provided value +// is of type string, and in valid IP range notation +func IPRange() schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(string) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be string", k)) + return + } + + ips := strings.Split(v, "-") + if len(ips) != 2 { + es = append(es, fmt.Errorf( + "expected %s to contain a valid IP range, got: %s", k, v)) + return + } + ip1 := net.ParseIP(ips[0]) + ip2 := net.ParseIP(ips[1]) + if ip1 == nil || ip2 == nil || bytes.Compare(ip1, ip2) > 0 { + es = append(es, fmt.Errorf( + "expected %s to contain a valid IP range, got: %s", k, v)) + } + return + } +} + +// ValidateJsonString is a SchemaValidateFunc which tests to make sure the +// supplied string is valid JSON. +func ValidateJsonString(v interface{}, k string) (ws []string, errors []error) { + if _, err := structure.NormalizeJsonString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + return +} + +// ValidateListUniqueStrings is a ValidateFunc that ensures a list has no +// duplicate items in it. It's useful for when a list is needed over a set +// because order matters, yet the items still need to be unique. +func ValidateListUniqueStrings(v interface{}, k string) (ws []string, errors []error) { + for n1, v1 := range v.([]interface{}) { + for n2, v2 := range v.([]interface{}) { + if v1.(string) == v2.(string) && n1 != n2 { + errors = append(errors, fmt.Errorf("%q: duplicate entry - %s", k, v1.(string))) + } + } + } + return +} + +// ValidateRegexp returns a SchemaValidateFunc which tests to make sure the +// supplied string is a valid regular expression. +func ValidateRegexp(v interface{}, k string) (ws []string, errors []error) { + if _, err := regexp.Compile(v.(string)); err != nil { + errors = append(errors, fmt.Errorf("%q: %s", k, err)) + } + return +} + +// ValidateRFC3339TimeString is a ValidateFunc that ensures a string parses +// as time.RFC3339 format +func ValidateRFC3339TimeString(v interface{}, k string) (ws []string, errors []error) { + if _, err := time.Parse(time.RFC3339, v.(string)); err != nil { + errors = append(errors, fmt.Errorf("%q: invalid RFC3339 timestamp", k)) + } + return +} + +// FloatBetween returns a SchemaValidateFunc which tests if the provided value +// is of type float64 and is between min and max (inclusive). +func FloatBetween(min, max float64) schema.SchemaValidateFunc { + return func(i interface{}, k string) (s []string, es []error) { + v, ok := i.(float64) + if !ok { + es = append(es, fmt.Errorf("expected type of %s to be float64", k)) + return + } + + if v < min || v > max { + es = append(es, fmt.Errorf("expected %s to be in the range (%f - %f), got %f", k, min, max, v)) + return + } + + return + } +} diff --git a/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline.go b/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline.go new file mode 100644 index 00000000..86d7b921 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline.go @@ -0,0 +1,77 @@ +// wrappedreadline is a package that has helpers for interacting with +// readline from a panicwrap executable. +// +// panicwrap overrides the standard file descriptors so that the child process +// no longer looks like a TTY. The helpers here access the extra file descriptors +// passed by panicwrap to fix that. +// +// panicwrap should be checked for with panicwrap.Wrapped before using this +// librar, since this library won't adapt if the binary is not wrapped. +package wrappedreadline + +import ( + "runtime" + + "github.com/chzyer/readline" + + "github.com/hashicorp/terraform/helper/wrappedstreams" +) + +// Override overrides the values in readline.Config that need to be +// set with wrapped values. +func Override(cfg *readline.Config) *readline.Config { + cfg.Stdin = wrappedstreams.Stdin() + cfg.Stdout = wrappedstreams.Stdout() + cfg.Stderr = wrappedstreams.Stderr() + + cfg.FuncGetWidth = TerminalWidth + cfg.FuncIsTerminal = IsTerminal + + rm := RawMode{StdinFd: int(wrappedstreams.Stdin().Fd())} + cfg.FuncMakeRaw = rm.Enter + cfg.FuncExitRaw = rm.Exit + + return cfg +} + +// IsTerminal determines if this process is attached to a TTY. +func IsTerminal() bool { + // Windows is always a terminal + if runtime.GOOS == "windows" { + return true + } + + // Same implementation as readline but with our custom fds + return readline.IsTerminal(int(wrappedstreams.Stdin().Fd())) && + (readline.IsTerminal(int(wrappedstreams.Stdout().Fd())) || + readline.IsTerminal(int(wrappedstreams.Stderr().Fd()))) +} + +// TerminalWidth gets the terminal width in characters. +func TerminalWidth() int { + if runtime.GOOS == "windows" { + return readline.GetScreenWidth() + } + + return getWidth() +} + +// RawMode is a helper for entering and exiting raw mode. +type RawMode struct { + StdinFd int + + state *readline.State +} + +func (r *RawMode) Enter() (err error) { + r.state, err = readline.MakeRaw(r.StdinFd) + return err +} + +func (r *RawMode) Exit() error { + if r.state == nil { + return nil + } + + return readline.Restore(r.StdinFd, r.state) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline_unix.go b/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline_unix.go new file mode 100644 index 00000000..4e410c7b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline_unix.go @@ -0,0 +1,46 @@ +// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd + +package wrappedreadline + +import ( + "syscall" + "unsafe" + + "github.com/hashicorp/terraform/helper/wrappedstreams" +) + +// getWidth impl for Unix +func getWidth() int { + stdoutFd := int(wrappedstreams.Stdout().Fd()) + stderrFd := int(wrappedstreams.Stderr().Fd()) + + w := getWidthFd(stdoutFd) + if w < 0 { + w = getWidthFd(stderrFd) + } + + return w +} + +type winsize struct { + Row uint16 + Col uint16 + Xpixel uint16 + Ypixel uint16 +} + +// get width of the terminal +func getWidthFd(stdoutFd int) int { + ws := &winsize{} + retCode, _, errno := syscall.Syscall(syscall.SYS_IOCTL, + uintptr(stdoutFd), + uintptr(syscall.TIOCGWINSZ), + uintptr(unsafe.Pointer(ws))) + + if int(retCode) == -1 { + _ = errno + return -1 + } + + return int(ws.Col) +} diff --git a/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline_windows.go b/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline_windows.go new file mode 100644 index 00000000..110ee519 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/wrappedreadline/wrappedreadline_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package wrappedreadline + +// getWidth impl for other +func getWidth() int { + return 0 +} diff --git a/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams.go b/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams.go new file mode 100644 index 00000000..d3e5cf1c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams.go @@ -0,0 +1,52 @@ +// Package wrappedstreams provides access to the standard OS streams +// (stdin, stdout, stderr) even if wrapped under panicwrap. +package wrappedstreams + +import ( + "os" + + "github.com/mitchellh/panicwrap" +) + +// Stdin returns the true stdin of the process. +func Stdin() *os.File { + stdin := os.Stdin + if panicwrap.Wrapped(nil) { + stdin = wrappedStdin + } + + return stdin +} + +// Stdout returns the true stdout of the process. +func Stdout() *os.File { + stdout := os.Stdout + if panicwrap.Wrapped(nil) { + stdout = wrappedStdout + } + + return stdout +} + +// Stderr returns the true stderr of the process. +func Stderr() *os.File { + stderr := os.Stderr + if panicwrap.Wrapped(nil) { + stderr = wrappedStderr + } + + return stderr +} + +// These are the wrapped standard streams. These are setup by the +// platform specific code in initPlatform. +var ( + wrappedStdin *os.File + wrappedStdout *os.File + wrappedStderr *os.File +) + +func init() { + // Initialize the platform-specific code + initPlatform() +} diff --git a/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams_other.go b/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams_other.go new file mode 100644 index 00000000..5ffa413b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams_other.go @@ -0,0 +1,14 @@ +// +build !windows + +package wrappedstreams + +import ( + "os" +) + +func initPlatform() { + // The standard streams are passed in via extra file descriptors. + wrappedStdin = os.NewFile(uintptr(3), "stdin") + wrappedStdout = os.NewFile(uintptr(4), "stdout") + wrappedStderr = os.NewFile(uintptr(5), "stderr") +} diff --git a/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams_windows.go b/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams_windows.go new file mode 100644 index 00000000..2709cc0c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/helper/wrappedstreams/streams_windows.go @@ -0,0 +1,52 @@ +// +build windows + +package wrappedstreams + +import ( + "log" + "os" + "syscall" +) + +func initPlatform() { + wrappedStdin = openConsole("CONIN$", os.Stdin) + wrappedStdout = openConsole("CONOUT$", os.Stdout) + wrappedStderr = wrappedStdout +} + +// openConsole opens a console handle, using a backup if it fails. +// This is used to get the exact console handle instead of the redirected +// handles from panicwrap. +func openConsole(name string, backup *os.File) *os.File { + // Convert to UTF16 + path, err := syscall.UTF16PtrFromString(name) + if err != nil { + log.Printf("[ERROR] wrappedstreams: %s", err) + return backup + } + + // Determine the share mode + var shareMode uint32 + switch name { + case "CONIN$": + shareMode = syscall.FILE_SHARE_READ + case "CONOUT$": + shareMode = syscall.FILE_SHARE_WRITE + } + + // Get the file + h, err := syscall.CreateFile( + path, + syscall.GENERIC_READ|syscall.GENERIC_WRITE, + shareMode, + nil, + syscall.OPEN_EXISTING, + 0, 0) + if err != nil { + log.Printf("[ERROR] wrappedstreams: %s", err) + return backup + } + + // Create the Go file + return os.NewFile(uintptr(h), name) +} diff --git a/vendor/github.com/hashicorp/terraform/httpclient/client.go b/vendor/github.com/hashicorp/terraform/httpclient/client.go new file mode 100644 index 00000000..bb06beb4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/httpclient/client.go @@ -0,0 +1,18 @@ +package httpclient + +import ( + "net/http" + + cleanhttp "github.com/hashicorp/go-cleanhttp" +) + +// New returns the DefaultPooledClient from the cleanhttp +// package that will also send a Terraform User-Agent string. +func New() *http.Client { + cli := cleanhttp.DefaultPooledClient() + cli.Transport = &userAgentRoundTripper{ + userAgent: UserAgentString(), + inner: cli.Transport, + } + return cli +} diff --git a/vendor/github.com/hashicorp/terraform/httpclient/useragent.go b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go new file mode 100644 index 00000000..5e280176 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/httpclient/useragent.go @@ -0,0 +1,40 @@ +package httpclient + +import ( + "fmt" + "log" + "net/http" + "os" + "strings" + + "github.com/hashicorp/terraform/version" +) + +const userAgentFormat = "Terraform/%s" +const uaEnvVar = "TF_APPEND_USER_AGENT" + +func UserAgentString() string { + ua := fmt.Sprintf(userAgentFormat, version.Version) + + if add := os.Getenv(uaEnvVar); add != "" { + add = strings.TrimSpace(add) + if len(add) > 0 { + ua += " " + add + log.Printf("[DEBUG] Using modified User-Agent: %s", ua) + } + } + + return ua +} + +type userAgentRoundTripper struct { + inner http.RoundTripper + userAgent string +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if _, ok := req.Header["User-Agent"]; !ok { + req.Header.Set("User-Agent", rt.userAgent) + } + return rt.inner.RoundTrip(req) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go new file mode 100644 index 00000000..a9b8f988 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go @@ -0,0 +1,123 @@ +package earlyconfig + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/moduledeps" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/tfdiags" +) + +// A Config is a node in the tree of modules within a configuration. +// +// The module tree is constructed by following ModuleCall instances recursively +// through the root module transitively into descendent modules. +type Config struct { + // RootModule points to the Config for the root module within the same + // module tree as this module. If this module _is_ the root module then + // this is self-referential. + Root *Config + + // ParentModule points to the Config for the module that directly calls + // this module. If this is the root module then this field is nil. + Parent *Config + + // Path is a sequence of module logical names that traverse from the root + // module to this config. Path is empty for the root module. + // + // This should only be used to display paths to the end-user in rare cases + // where we are talking about the static module tree, before module calls + // have been resolved. In most cases, an addrs.ModuleInstance describing + // a node in the dynamic module tree is better, since it will then include + // any keys resulting from evaluating "count" and "for_each" arguments. + Path addrs.Module + + // ChildModules points to the Config for each of the direct child modules + // called from this module. The keys in this map match the keys in + // Module.ModuleCalls. + Children map[string]*Config + + // Module points to the object describing the configuration for the + // various elements (variables, resources, etc) defined by this module. + Module *tfconfig.Module + + // CallPos is the source position for the header of the module block that + // requested this module. + // + // This field is meaningless for the root module, where its contents are undefined. + CallPos tfconfig.SourcePos + + // SourceAddr is the source address that the referenced module was requested + // from, as specified in configuration. + // + // This field is meaningless for the root module, where its contents are undefined. + SourceAddr string + + // Version is the specific version that was selected for this module, + // based on version constraints given in configuration. + // + // This field is nil if the module was loaded from a non-registry source, + // since versions are not supported for other sources. + // + // This field is meaningless for the root module, where it will always + // be nil. + Version *version.Version +} + +// ProviderDependencies returns the provider dependencies for the recieving +// config, including all of its descendent modules. +func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var name string + if len(c.Path) > 0 { + name = c.Path[len(c.Path)-1] + } + + ret := &moduledeps.Module{ + Name: name, + } + + providers := make(moduledeps.Providers) + for name, reqs := range c.Module.RequiredProviders { + inst := moduledeps.ProviderInstance(name) + var constraints version.Constraints + for _, reqStr := range reqs { + if reqStr != "" { + constraint, err := version.NewConstraint(reqStr) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf("Invalid version constraint %q for provider %s.", reqStr, name), + })) + continue + } + constraints = append(constraints, constraint...) + } + } + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.NewConstraints(constraints), + Reason: moduledeps.ProviderDependencyExplicit, + } + } + ret.Providers = providers + + childNames := make([]string, 0, len(c.Children)) + for name := range c.Children { + childNames = append(childNames, name) + } + sort.Strings(childNames) + + for _, name := range childNames { + child, childDiags := c.Children[name].ProviderDependencies() + ret.Children = append(ret.Children, child) + diags = diags.Append(childDiags) + } + + return ret, diags +} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go new file mode 100644 index 00000000..770d5dfb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go @@ -0,0 +1,144 @@ +package earlyconfig + +import ( + "fmt" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" +) + +// BuildConfig constructs a Config from a root module by loading all of its +// descendent modules via the given ModuleWalker. +func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + cfg := &Config{ + Module: root, + } + cfg.Root = cfg // Root module is self-referential. + cfg.Children, diags = buildChildModules(cfg, walker) + return cfg, diags +} + +func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret := map[string]*Config{} + calls := parent.Module.ModuleCalls + + // We'll sort the calls by their local names so that they'll appear in a + // predictable order in any logging that's produced during the walk. + callNames := make([]string, 0, len(calls)) + for k := range calls { + callNames = append(callNames, k) + } + sort.Strings(callNames) + + for _, callName := range callNames { + call := calls[callName] + path := make([]string, len(parent.Path)+1) + copy(path, parent.Path) + path[len(path)-1] = call.Name + + var vc version.Constraints + if strings.TrimSpace(call.Version) != "" { + var err error + vc, err = version.NewConstraint(call.Version) + if err != nil { + diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{ + Severity: tfconfig.DiagError, + Summary: "Invalid version constraint", + Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err), + })) + continue + } + } + + req := ModuleRequest{ + Name: call.Name, + Path: path, + SourceAddr: call.Source, + VersionConstraints: vc, + Parent: parent, + CallPos: call.Pos, + } + + mod, ver, modDiags := walker.LoadModule(&req) + diags = append(diags, modDiags...) + if mod == nil { + // nil can be returned if the source address was invalid and so + // nothing could be loaded whatsoever. LoadModule should've + // returned at least one error diagnostic in that case. + continue + } + + child := &Config{ + Parent: parent, + Root: parent.Root, + Path: path, + Module: mod, + CallPos: call.Pos, + SourceAddr: call.Source, + Version: ver, + } + + child.Children, modDiags = buildChildModules(child, walker) + diags = diags.Append(modDiags) + + ret[call.Name] = child + } + + return ret, diags +} + +// ModuleRequest is used as part of the ModuleWalker interface used with +// function BuildConfig. +type ModuleRequest struct { + // Name is the "logical name" of the module call within configuration. + // This is provided in case the name is used as part of a storage key + // for the module, but implementations must otherwise treat it as an + // opaque string. It is guaranteed to have already been validated as an + // HCL identifier and UTF-8 encoded. + Name string + + // Path is a list of logical names that traverse from the root module to + // this module. This can be used, for example, to form a lookup key for + // each distinct module call in a configuration, allowing for multiple + // calls with the same name at different points in the tree. + Path addrs.Module + + // SourceAddr is the source address string provided by the user in + // configuration. + SourceAddr string + + // VersionConstraint is the version constraint applied to the module in + // configuration. + VersionConstraints version.Constraints + + // Parent is the partially-constructed module tree node that the loaded + // module will be added to. Callers may refer to any field of this + // structure except Children, which is still under construction when + // ModuleRequest objects are created and thus has undefined content. + // The main reason this is provided is so that full module paths can + // be constructed for uniqueness. + Parent *Config + + // CallRange is the source position for the header of the "module" block + // in configuration that prompted this request. + CallPos tfconfig.SourcePos +} + +// ModuleWalker is an interface used with BuildConfig. +type ModuleWalker interface { + LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) +} + +// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps +// a callback function, for more convenient use of that interface. +type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) + +func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + return f(req) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go new file mode 100644 index 00000000..9b2fd7f7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go @@ -0,0 +1,78 @@ +package earlyconfig + +import ( + "fmt" + + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/tfdiags" +) + +func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics { + ret := make(tfdiags.Diagnostics, len(diags)) + for i, diag := range diags { + ret[i] = wrapDiagnostic(diag) + } + return ret +} + +func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic { + return wrappedDiagnostic{ + d: diag, + } +} + +type wrappedDiagnostic struct { + d tfconfig.Diagnostic +} + +func (d wrappedDiagnostic) Severity() tfdiags.Severity { + switch d.d.Severity { + case tfconfig.DiagError: + return tfdiags.Error + case tfconfig.DiagWarning: + return tfdiags.Warning + default: + // Should never happen since there are no other severities + return 0 + } +} + +func (d wrappedDiagnostic) Description() tfdiags.Description { + // Since the inspect library doesn't produce precise source locations, + // we include the position information as part of the error message text. + // See the comment inside method "Source" for more information. + switch { + case d.d.Pos == nil: + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: d.d.Detail, + } + case d.d.Detail != "": + return tfdiags.Description{ + Summary: d.d.Summary, + Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail), + } + default: + return tfdiags.Description{ + Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line), + } + } +} + +func (d wrappedDiagnostic) Source() tfdiags.Source { + // Since the inspect library is constrained by the lowest common denominator + // between legacy HCL and modern HCL, it only returns ranges at whole-line + // granularity, and that isn't sufficient to populate a tfdiags.Source + // and so we'll just omit ranges altogether and include the line number in + // the Description text. + // + // Callers that want to return nicer errors should consider reacting to + // earlyconfig errors by attempting a follow-up parse with the normal + // config loader, which can produce more precise source location + // information. + return tfdiags.Source{} +} + +func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go new file mode 100644 index 00000000..a9cf10f3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go @@ -0,0 +1,20 @@ +// Package earlyconfig is a specialized alternative to the top-level "configs" +// package that does only shallow processing of configuration and is therefore +// able to be much more liberal than the full config loader in what it accepts. +// +// In particular, it can accept both current and legacy HCL syntax, and it +// ignores top-level blocks that it doesn't recognize. These two characteristics +// make this package ideal for dependency-checking use-cases so that we are +// more likely to be able to return an error message about an explicit +// incompatibility than to return a less-actionable message about a construct +// not being supported. +// +// However, its liberal approach also means it should be used sparingly. It +// exists primarily for "terraform init", so that it is able to detect +// incompatibilities more robustly when installing dependencies. For most +// other use-cases, use the "configs" and "configs/configload" packages. +// +// Package earlyconfig is a wrapper around the terraform-config-inspect +// codebase, adding to it just some helper functionality for Terraform's own +// use-cases. +package earlyconfig diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go new file mode 100644 index 00000000..d2d62879 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go @@ -0,0 +1,13 @@ +package earlyconfig + +import ( + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/tfdiags" +) + +// LoadModule loads some top-level metadata for the module in the given +// directory. +func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) { + mod, diags := tfconfig.LoadModule(dir) + return mod, wrapDiagnostics(diags) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go new file mode 100644 index 00000000..7096ff74 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go @@ -0,0 +1,125 @@ +package initwd + +import ( + "io" + "os" + "path/filepath" + "strings" +) + +// copyDir copies the src directory contents into dst. Both directories +// should already exist. +func copyDir(dst, src string) error { + src, err := filepath.EvalSymlinks(src) + if err != nil { + return err + } + + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == src { + return nil + } + + if strings.HasPrefix(filepath.Base(path), ".") { + // Skip any dot files + if info.IsDir() { + return filepath.SkipDir + } else { + return nil + } + } + + // The "path" has the src prefixed to it. We need to join our + // destination with the path without the src on it. + dstPath := filepath.Join(dst, path[len(src):]) + + // we don't want to try and copy the same file over itself. + if eq, err := sameFile(path, dstPath); eq { + return nil + } else if err != nil { + return err + } + + // If we have a directory, make that subdirectory, then continue + // the walk. + if info.IsDir() { + if path == filepath.Join(src, dst) { + // dst is in src; don't walk it. + return nil + } + + if err := os.MkdirAll(dstPath, 0755); err != nil { + return err + } + + return nil + } + + // If the current path is a symlink, recreate the symlink relative to + // the dst directory + if info.Mode()&os.ModeSymlink == os.ModeSymlink { + target, err := os.Readlink(path) + if err != nil { + return err + } + + return os.Symlink(target, dstPath) + } + + // If we have a file, copy the contents. + srcF, err := os.Open(path) + if err != nil { + return err + } + defer srcF.Close() + + dstF, err := os.Create(dstPath) + if err != nil { + return err + } + defer dstF.Close() + + if _, err := io.Copy(dstF, srcF); err != nil { + return err + } + + // Chmod it + return os.Chmod(dstPath, info.Mode()) + } + + return filepath.Walk(src, walkFn) +} + +// sameFile tried to determine if to paths are the same file. +// If the paths don't match, we lookup the inode on supported systems. +func sameFile(a, b string) (bool, error) { + if a == b { + return true, nil + } + + aIno, err := inode(a) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + bIno, err := inode(b) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if aIno > 0 && aIno == bIno { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go new file mode 100644 index 00000000..b9d938db --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go @@ -0,0 +1,7 @@ +// Package initwd contains various helper functions used by the "terraform init" +// command to initialize a working directory. +// +// These functions may also be used from testing code to simulate the behaviors +// of "terraform init" against test fixtures, but should not be used elsewhere +// in the main code. +package initwd diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go new file mode 100644 index 00000000..6b40d08d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go @@ -0,0 +1,363 @@ +package initwd + +import ( + "fmt" + "github.com/hashicorp/terraform/internal/earlyconfig" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/tfdiags" +) + +const initFromModuleRootCallName = "root" +const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "." + +// DirFromModule populates the given directory (which must exist and be +// empty) with the contents of the module at the given source address. +// +// It does this by installing the given module and all of its descendent +// modules in a temporary root directory and then copying the installed +// files into suitable locations. As a consequence, any diagnostics it +// generates will reveal the location of this temporary directory to the +// user. +// +// This rather roundabout installation approach is taken to ensure that +// installation proceeds in a manner identical to normal module installation. +// +// If the given source address specifies a sub-directory of the given +// package then only the sub-directory and its descendents will be copied +// into the given root directory, which will cause any relative module +// references using ../ from that module to be unresolvable. Error diagnostics +// are produced in that case, to prompt the user to rewrite the source strings +// to be absolute references to the original remote module. +func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // The way this function works is pretty ugly, but we accept it because + // -from-module is a less important case than normal module installation + // and so it's better to keep this ugly complexity out here rather than + // adding even more complexity to the normal module installer. + + // The target directory must exist but be empty. + { + entries, err := ioutil.ReadDir(rootDir) + if err != nil { + if os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Target directory does not exist", + fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read target directory", + fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err), + )) + } + return diags + } + haveEntries := false + for _, entry := range entries { + if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" { + continue + } + haveEntries = true + } + if haveEntries { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Can't populate non-empty directory", + fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir), + )) + return diags + } + } + + instDir := filepath.Join(rootDir, ".terraform/init-from-module") + inst := NewModuleInstaller(instDir, reg) + log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr) + os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too + err := os.MkdirAll(instDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create temporary directory", + fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err), + )) + return diags + } + + instManifest := make(modsdir.Manifest) + retManifest := make(modsdir.Manifest) + + fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr) + fakePos := tfconfig.SourcePos{ + Filename: fakeFilename, + Line: 1, + } + + // -from-module allows relative paths but it's different than a normal + // module address where it'd be resolved relative to the module call + // (which is synthetic, here.) To address this, we'll just patch up any + // relative paths to be absolute paths before we run, ensuring we'll + // get the right result. This also, as an important side-effect, ensures + // that the result will be "downloaded" with go-getter (copied from the + // source location), rather than just recorded as a relative path. + { + maybePath := filepath.ToSlash(sourceAddr) + if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") { + if wd, err := os.Getwd(); err == nil { + sourceAddr = filepath.Join(wd, sourceAddr) + log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr) + } + } + } + + // Now we need to create an artificial root module that will seed our + // installation process. + fakeRootModule := &tfconfig.Module{ + ModuleCalls: map[string]*tfconfig.ModuleCall{ + initFromModuleRootCallName: { + Name: initFromModuleRootCallName, + Source: sourceAddr, + Pos: fakePos, + }, + }, + } + + // wrapHooks filters hook notifications to only include Download calls + // and to trim off the initFromModuleRootCallName prefix. We'll produce + // our own Install notifications directly below. + wrapHooks := installHooksInitDir{ + Wrapped: hooks, + } + getter := reusingGetter{} + _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter) + diags = append(diags, instDiags...) + if instDiags.HasErrors() { + return diags + } + + // If all of that succeeded then we'll now migrate what was installed + // into the final directory structure. + err = os.MkdirAll(modulesDir, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create local modules directory", + fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err), + )) + return diags + } + + recordKeys := make([]string, 0, len(instManifest)) + for k := range instManifest { + recordKeys = append(recordKeys, k) + } + sort.Strings(recordKeys) + + for _, recordKey := range recordKeys { + record := instManifest[recordKey] + + if record.Key == initFromModuleRootCallName { + // We've found the module the user requested, which we must + // now copy into rootDir so it can be used directly. + log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir) + err := copyDir(rootDir, record.Dir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy root module", + fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err), + )) + continue + } + + // We'll try to load the newly-copied module here just so we can + // sniff for any module calls that ../ out of the root directory + // and must thus be rewritten to be absolute addresses again. + // For now we can't do this rewriting automatically, but we'll + // generate an error to help the user do it manually. + mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway + if mod != nil { + for _, mc := range mod.ModuleCalls { + if pathTraversesUp(mc.Source) { + packageAddr, givenSubdir := splitAddrSubdir(sourceAddr) + newSubdir := filepath.Join(givenSubdir, mc.Source) + if pathTraversesUp(newSubdir) { + // This should never happen in any reasonable + // configuration since this suggests a path that + // traverses up out of the package root. We'll just + // ignore this, since we'll fail soon enough anyway + // trying to resolve this path when this module is + // loaded. + continue + } + + var newAddr = packageAddr + if newSubdir != "" { + newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir)) + } + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Root module references parent directory", + fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr), + )) + continue + } + } + } + + retManifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + continue + } + + if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) { + // Ignore the *real* root module, whose key is empty, since + // we're only interested in the module named "root" and its + // descendents. + continue + } + + newKey := record.Key[len(initFromModuleRootKeyPrefix):] + instPath := filepath.Join(modulesDir, newKey) + tempPath := filepath.Join(instDir, record.Key) + + // tempPath won't be present for a module that was installed from + // a relative path, so in that case we just record the installation + // directory and assume it was already copied into place as part + // of its parent. + if _, err := os.Stat(tempPath); err != nil { + if !os.IsNotExist(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to stat temporary module install directory", + fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + var parentKey string + if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 { + parentKey = newKey[:lastDot] + } else { + parentKey = "" // parent is the root module + } + + parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey] + parentNew := retManifest[parentKey] + + // We need to figure out which portion of our directory is the + // parent package path and which portion is the subdirectory + // under that. + baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newDir := filepath.Join(parentNew.Dir, baseDirRel) + log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir) + newRecord := record // shallow copy + newRecord.Dir = newDir + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + continue + } + + err = os.MkdirAll(instPath, os.ModePerm) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to create module install directory", + fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err), + )) + continue + } + + // We copy rather than "rename" here because renaming between directories + // can be tricky in edge-cases like network filesystems, etc. + log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath) + err := copyDir(instPath, tempPath) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to copy descendent module", + fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err), + )) + continue + } + + subDir, err := filepath.Rel(tempPath, record.Dir) + if err != nil { + // Should never happen, because we constructed both directories + // from the same base and so they must have a common prefix. + panic(err) + } + + newRecord := record // shallow copy + newRecord.Dir = filepath.Join(instPath, subDir) + newRecord.Key = newKey + retManifest[newKey] = newRecord + hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir) + } + + retManifest.WriteSnapshotToDir(modulesDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write module manifest", + fmt.Sprintf("Error writing module manifest: %s.", err), + )) + } + + if !diags.HasErrors() { + // Try to clean up our temporary directory, but don't worry if we don't + // succeed since it shouldn't hurt anything. + os.RemoveAll(instDir) + } + + return diags +} + +func pathTraversesUp(path string) bool { + return strings.HasPrefix(filepath.ToSlash(path), "../") +} + +// installHooksInitDir is an adapter wrapper for an InstallHooks that +// does some fakery to make downloads look like they are happening in their +// final locations, rather than in the temporary loader we use. +// +// It also suppresses "Install" calls entirely, since InitDirFromModule +// does its own installation steps after the initial installation pass +// has completed. +type installHooksInitDir struct { + Wrapped ModuleInstallHooks + ModuleInstallHooksImpl +} + +func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) { + if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) { + // We won't announce the root module, since hook implementations + // don't expect to see that and the caller will usually have produced + // its own user-facing notification about what it's doing anyway. + return + } + + trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):] + h.Wrapped.Download(trimAddr, packageAddr, version) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go new file mode 100644 index 00000000..2f306be7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go @@ -0,0 +1,212 @@ +package initwd + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/registry/regsrc" +) + +// We configure our own go-getter detector and getter sets here, because +// the set of sources we support is part of Terraform's documentation and +// so we don't want any new sources introduced in go-getter to sneak in here +// and work even though they aren't documented. This also insulates us from +// any meddling that might be done by other go-getter callers linked into our +// executable. + +var goGetterDetectors = []getter.Detector{ + new(getter.GitHubDetector), + new(getter.BitBucketDetector), + new(getter.GCSDetector), + new(getter.S3Detector), + new(getter.FileDetector), +} + +var goGetterNoDetectors = []getter.Detector{} + +var goGetterDecompressors = map[string]getter.Decompressor{ + "bz2": new(getter.Bzip2Decompressor), + "gz": new(getter.GzipDecompressor), + "xz": new(getter.XzDecompressor), + "zip": new(getter.ZipDecompressor), + + "tar.bz2": new(getter.TarBzip2Decompressor), + "tar.tbz2": new(getter.TarBzip2Decompressor), + + "tar.gz": new(getter.TarGzipDecompressor), + "tgz": new(getter.TarGzipDecompressor), + + "tar.xz": new(getter.TarXzDecompressor), + "txz": new(getter.TarXzDecompressor), +} + +var goGetterGetters = map[string]getter.Getter{ + "file": new(getter.FileGetter), + "gcs": new(getter.GCSGetter), + "git": new(getter.GitGetter), + "hg": new(getter.HgGetter), + "s3": new(getter.S3Getter), + "http": getterHTTPGetter, + "https": getterHTTPGetter, +} + +var getterHTTPClient = cleanhttp.DefaultClient() + +var getterHTTPGetter = &getter.HttpGetter{ + Client: getterHTTPClient, + Netrc: true, +} + +// A reusingGetter is a helper for the module installer that remembers +// the final resolved addresses of all of the sources it has already been +// asked to install, and will copy from a prior installation directory if +// it has the same resolved source address. +// +// The keys in a reusingGetter are resolved and trimmed source addresses +// (with a scheme always present, and without any "subdir" component), +// and the values are the paths where each source was previously installed. +type reusingGetter map[string]string + +// getWithGoGetter retrieves the package referenced in the given address +// into the installation path and then returns the full path to any subdir +// indicated in the address. +// +// The errors returned by this function are those surfaced by the underlying +// go-getter library, which have very inconsistent quality as +// end-user-actionable error messages. At this time we do not have any +// reasonable way to improve these error messages at this layer because +// the underlying errors are not separately recognizable. +func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) { + packageAddr, subDir := splitAddrSubdir(addr) + + log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath) + + realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors) + if err != nil { + return "", err + } + + if isMaybeRelativeLocalPath(realAddr) { + return "", &MaybeRelativePathErr{addr} + } + + var realSubDir string + realAddr, realSubDir = splitAddrSubdir(realAddr) + if realSubDir != "" { + subDir = filepath.Join(realSubDir, subDir) + } + + if realAddr != packageAddr { + log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr) + } + + if prevDir, exists := g[realAddr]; exists { + log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath) + err := os.Mkdir(instPath, os.ModePerm) + if err != nil { + return "", fmt.Errorf("failed to create directory %s: %s", instPath, err) + } + err = copyDir(instPath, prevDir) + if err != nil { + return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err) + } + } else { + log.Printf("[TRACE] fetching %q to %q", realAddr, instPath) + client := getter.Client{ + Src: realAddr, + Dst: instPath, + Pwd: instPath, + + Mode: getter.ClientModeDir, + + Detectors: goGetterNoDetectors, // we already did detection above + Decompressors: goGetterDecompressors, + Getters: goGetterGetters, + } + err = client.Get() + if err != nil { + return "", err + } + // Remember where we installed this so we might reuse this directory + // on subsequent calls to avoid re-downloading. + g[realAddr] = instPath + } + + // Our subDir string can contain wildcards until this point, so that + // e.g. a subDir of * can expand to one top-level directory in a .tar.gz + // archive. Now that we've expanded the archive successfully we must + // resolve that into a concrete path. + var finalDir string + if subDir != "" { + finalDir, err = getter.SubdirGlob(instPath, subDir) + log.Printf("[TRACE] expanded %q to %q", subDir, finalDir) + if err != nil { + return "", err + } + } else { + finalDir = instPath + } + + // If we got this far then we have apparently succeeded in downloading + // the requested object! + return filepath.Clean(finalDir), nil +} + +// splitAddrSubdir splits the given address (which is assumed to be a +// registry address or go-getter-style address) into a package portion +// and a sub-directory portion. +// +// The package portion defines what should be downloaded and then the +// sub-directory portion, if present, specifies a sub-directory within +// the downloaded object (an archive, VCS repository, etc) that contains +// the module's configuration files. +// +// The subDir portion will be returned as empty if no subdir separator +// ("//") is present in the address. +func splitAddrSubdir(addr string) (packageAddr, subDir string) { + return getter.SourceDirSubdir(addr) +} + +var localSourcePrefixes = []string{ + "./", + "../", + ".\\", + "..\\", +} + +func isLocalSourceAddr(addr string) bool { + for _, prefix := range localSourcePrefixes { + if strings.HasPrefix(addr, prefix) { + return true + } + } + return false +} + +func isRegistrySourceAddr(addr string) bool { + _, err := regsrc.ParseModuleSource(addr) + return err == nil +} + +type MaybeRelativePathErr struct { + Addr string +} + +func (e *MaybeRelativePathErr) Error() string { + return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr) +} + +func isMaybeRelativeLocalPath(addr string) bool { + if strings.HasPrefix(addr, "file://") { + _, err := os.Stat(addr[7:]) + if err != nil { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go new file mode 100644 index 00000000..1150b093 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go @@ -0,0 +1,21 @@ +// +build linux darwin openbsd netbsd solaris dragonfly + +package initwd + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return st.Ino, nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go new file mode 100644 index 00000000..30532f54 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go @@ -0,0 +1,21 @@ +// +build freebsd + +package initwd + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go new file mode 100644 index 00000000..3ed58e4b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package initwd + +// no syscall.Stat_t on windows, return 0 for inodes +func inode(path string) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go new file mode 100644 index 00000000..6f77dcd8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go @@ -0,0 +1,56 @@ +package initwd + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/internal/earlyconfig" + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/tfdiags" +) + +// LoadConfig loads a full configuration tree that has previously had all of +// its dependent modules installed to the given modulesDir using a +// ModuleInstaller. +// +// This uses the early configuration loader and thus only reads top-level +// metadata from the modules in the configuration. Most callers should use +// the configs/configload package to fully load a configuration. +func LoadConfig(rootDir, modulesDir string) (*earlyconfig.Config, tfdiags.Diagnostics) { + rootMod, diags := earlyconfig.LoadModule(rootDir) + if rootMod == nil { + return nil, diags + } + + manifest, err := modsdir.ReadManifestSnapshotForDir(modulesDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read module manifest", + fmt.Sprintf("Terraform failed to read its manifest of locally-cached modules: %s.", err), + )) + return nil, diags + } + + return earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( + func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + key := manifest.ModuleKey(req.Path) + record, exists := manifest[key] + if !exists { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not installed", + fmt.Sprintf("Module %s is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", req.Path.String()), + )) + return nil, nil, diags + } + + mod, mDiags := earlyconfig.LoadModule(record.Dir) + diags = diags.Append(mDiags) + return mod, record.Version, diags + }, + )) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go new file mode 100644 index 00000000..531310ab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go @@ -0,0 +1,558 @@ +package initwd + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-config-inspect/tfconfig" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/internal/earlyconfig" + "github.com/hashicorp/terraform/internal/modsdir" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/tfdiags" +) + +type ModuleInstaller struct { + modsDir string + reg *registry.Client +} + +func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller { + return &ModuleInstaller{ + modsDir: modsDir, + reg: reg, + } +} + +// InstallModules analyses the root module in the given directory and installs +// all of its direct and transitive dependencies into the given modules +// directory, which must already exist. +// +// Since InstallModules makes possibly-time-consuming calls to remote services, +// a hook interface is supported to allow the caller to be notified when +// each module is installed and, for remote modules, when downloading begins. +// LoadConfig guarantees that two hook calls will not happen concurrently but +// it does not guarantee any particular ordering of hook calls. This mechanism +// is for UI feedback only and does not give the caller any control over the +// process. +// +// If modules are already installed in the target directory, they will be +// skipped unless their source address or version have changed or unless +// the upgrade flag is set. +// +// InstallModules never deletes any directory, except in the case where it +// needs to replace a directory that is already present with a newly-extracted +// package. +// +// If the returned diagnostics contains errors then the module installation +// may have wholly or partially completed. Modules must be loaded in order +// to find their dependencies, so this function does many of the same checks +// as LoadConfig as a side-effect. +// +// If successful (the returned diagnostics contains no errors) then the +// first return value is the early configuration tree that was constructed by +// the installation process. +func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) { + log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir) + + rootMod, diags := earlyconfig.LoadModule(rootDir) + if rootMod == nil { + return nil, diags + } + + manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read modules manifest file", + fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err), + )) + return nil, diags + } + + getter := reusingGetter{} + cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter) + diags = append(diags, instDiags...) + + return cfg, diags +} + +func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if hooks == nil { + // Use our no-op implementation as a placeholder + hooks = ModuleInstallHooksImpl{} + } + + // Create a manifest record for the root module. This will be used if + // there are any relative-pathed modules in the root. + manifest[""] = modsdir.Record{ + Key: "", + Dir: rootDir, + } + + cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc( + func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + + key := manifest.ModuleKey(req.Path) + instPath := i.packageInstallPath(req.Path) + + log.Printf("[DEBUG] Module installer: begin %s", key) + + // First we'll check if we need to upgrade/replace an existing + // installed module, and delete it out of the way if so. + replace := upgrade + if !replace { + record, recorded := manifest[key] + switch { + case !recorded: + log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key) + replace = true + case record.SourceAddr != req.SourceAddr: + log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr) + replace = true + case record.Version != nil && !req.VersionConstraints.Check(record.Version): + log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints) + replace = true + } + } + + // If we _are_ planning to replace this module, then we'll remove + // it now so our installation code below won't conflict with any + // existing remnants. + if replace { + if _, recorded := manifest[key]; recorded { + log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key) + } + delete(manifest, key) + // Deleting a module invalidates all of its descendent modules too. + keyPrefix := key + "." + for subKey := range manifest { + if strings.HasPrefix(subKey, keyPrefix) { + if _, recorded := manifest[subKey]; recorded { + log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey) + } + delete(manifest, subKey) + } + } + } + + record, recorded := manifest[key] + if !recorded { + // Clean up any stale cache directory that might be present. + // If this is a local (relative) source then the dir will + // not exist, but we'll ignore that. + log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key) + err := os.RemoveAll(instPath) + if err != nil && !os.IsNotExist(err) { + log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to remove local module cache", + fmt.Sprintf( + "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s", + instPath, err, + ), + )) + return nil, nil, diags + } + } else { + // If this module is already recorded and its root directory + // exists then we will just load what's already there and + // keep our existing record. + info, err := os.Stat(record.Dir) + if err == nil && info.IsDir() { + mod, mDiags := earlyconfig.LoadModule(record.Dir) + diags = diags.Append(mDiags) + + log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir) + return mod, record.Version, diags + } + } + + // If we get down here then it's finally time to actually install + // the module. There are some variants to this process depending + // on what type of module source address we have. + switch { + + case isLocalSourceAddr(req.SourceAddr): + log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr) + mod, mDiags := i.installLocalModule(req, key, manifest, hooks) + diags = append(diags, mDiags...) + return mod, nil, diags + + case isRegistrySourceAddr(req.SourceAddr): + addr, err := regsrc.ParseModuleSource(req.SourceAddr) + if err != nil { + // Should never happen because isRegistrySourceAddr already validated + panic(err) + } + log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr) + + mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter) + diags = append(diags, mDiags...) + return mod, v, diags + + default: + log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr) + + mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter) + diags = append(diags, mDiags...) + return mod, nil, diags + } + + }, + )) + diags = append(diags, cDiags...) + + err := manifest.WriteSnapshotToDir(i.modsDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to update module manifest", + fmt.Sprintf("Unable to write the module manifest file: %s", err), + )) + } + + return cfg, diags +} + +func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + parentKey := manifest.ModuleKey(req.Parent.Path) + parentRecord, recorded := manifest[parentKey] + if !recorded { + // This is indicative of a bug rather than a user-actionable error + panic(fmt.Errorf("missing manifest record for parent module %s", parentKey)) + } + + if len(req.VersionConstraints) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid version constraint", + fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } + + // For local sources we don't actually need to modify the + // filesystem at all because the parent already wrote + // the files we need, and so we just load up what's already here. + newDir := filepath.Join(parentRecord.Dir, req.SourceAddr) + + log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir) + // it is possible that the local directory is a symlink + newDir, err := filepath.EvalSymlinks(newDir) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()), + )) + } + + mod, mDiags := earlyconfig.LoadModule(newDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } else { + diags = diags.Append(mDiags) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: newDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir) + hooks.Install(key, nil, newDir) + + return mod, diags +} + +func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + hostname, err := addr.SvcHost() + if err != nil { + // If it looks like the user was trying to use punycode then we'll generate + // a specialized error for that case. We require the unicode form of + // hostname so that hostnames are always human-readable in configuration + // and punycode can't be used to hide a malicious module hostname. + if strings.HasPrefix(addr.RawHost.Raw, "xn--") { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module registry hostname", + fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid module registry hostname", + fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + } + return nil, nil, diags + } + + reg := i.reg + + log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname) + resp, err := reg.ModuleVersions(addr) + if err != nil { + if registry.IsModuleNotFound(err) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not found", + fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname), + )) + } else { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Error accessing remote module registry", + fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err), + )) + } + return nil, nil, diags + } + + // The response might contain information about dependencies to allow us + // to potentially optimize future requests, but we don't currently do that + // and so for now we'll just take the first item which is guaranteed to + // be the address we requested. + if len(resp.Modules) < 1 { + // Should never happen, but since this is a remote service that may + // be implemented by third-parties we will handle it gracefully. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid response from remote module registry", + fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + return nil, nil, diags + } + + modMeta := resp.Modules[0] + + var latestMatch *version.Version + var latestVersion *version.Version + for _, mv := range modMeta.Versions { + v, err := version.NewVersion(mv.Version) + if err != nil { + // Should never happen if the registry server is compliant with + // the protocol, but we'll warn if not to assist someone who + // might be developing a module registry server. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Warning, + "Invalid response from remote module registry", + fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + continue + } + + // If we've found a pre-release version then we'll ignore it unless + // it was exactly requested. + if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() { + log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v) + continue + } + + if latestVersion == nil || v.GreaterThan(latestVersion) { + latestVersion = v + } + + if req.VersionConstraints.Check(v) { + if latestMatch == nil || v.GreaterThan(latestMatch) { + latestMatch = v + } + } + } + + if latestVersion == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module has no versions", + fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname), + )) + return nil, nil, diags + } + + if latestMatch == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unresolvable module version constraint", + fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion), + )) + return nil, nil, diags + } + + // Report up to the caller that we're about to start downloading. + packageAddr, _ := splitAddrSubdir(req.SourceAddr) + hooks.Download(key, packageAddr, latestMatch) + + // If we manage to get down here then we've found a suitable version to + // install, so we need to ask the registry where we should download it from. + // The response to this is a go-getter-style address string. + dlAddr, err := reg.ModuleLocation(addr, latestMatch.String()) + if err != nil { + log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid response from remote module registry", + fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch), + )) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr) + + modDir, err := getter.getWithGoGetter(instPath, dlAddr) + if err != nil { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to download module", + fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err), + )) + return nil, nil, diags + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir) + + if addr.RawSubmodule != "" { + // Append the user's requested subdirectory to any subdirectory that + // was implied by any of the nested layers we expanded within go-getter. + modDir = filepath.Join(modDir, addr.RawSubmodule) + } + + log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir) + + // Finally we are ready to try actually loading the module. + mod, mDiags := earlyconfig.LoadModule(modDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For registry modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), + )) + } else { + diags = append(diags, mDiags...) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Version: latestMatch, + Dir: modDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, latestMatch, modDir) + + return mod, latestMatch, diags +} + +func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Report up to the caller that we're about to start downloading. + packageAddr, _ := splitAddrSubdir(req.SourceAddr) + hooks.Download(key, packageAddr, nil) + + if len(req.VersionConstraints) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid version constraint", + fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line), + )) + return nil, diags + } + + modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr) + if err != nil { + if _, ok := err.(*MaybeRelativePathErr); ok { + log.Printf( + "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../", + req.SourceAddr, + ) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Module not found", + fmt.Sprintf( + "The module address %q could not be resolved.\n\n"+ + "If you intended this as a path relative to the current "+ + "module, use \"./%s\" instead. The \"./\" prefix "+ + "indicates that the address is a relative filesystem path.", + req.SourceAddr, req.SourceAddr, + ), + )) + } else { + // Errors returned by go-getter have very inconsistent quality as + // end-user error messages, but for now we're accepting that because + // we have no way to recognize any specific errors to improve them + // and masking the error entirely would hide valuable diagnostic + // information from the user. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to download module", + fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err), + )) + } + return nil, diags + + } + + log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir) + + mod, mDiags := earlyconfig.LoadModule(modDir) + if mod == nil { + // nil indicates missing or unreadable directory, so we'll + // discard the returned diags and return a more specific + // error message here. For go-getter modules this actually + // indicates a bug in the code above, since it's not the + // user's responsibility to create the directory in this case. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unreadable module directory", + fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir), + )) + } else { + diags = append(diags, mDiags...) + } + + // Note the local location in our manifest. + manifest[key] = modsdir.Record{ + Key: key, + Dir: modDir, + SourceAddr: req.SourceAddr, + } + log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir) + hooks.Install(key, nil, modDir) + + return mod, diags +} + +func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string { + return filepath.Join(i.modsDir, strings.Join(modulePath, ".")) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go new file mode 100644 index 00000000..817a6dc8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go @@ -0,0 +1,36 @@ +package initwd + +import ( + version "github.com/hashicorp/go-version" +) + +// ModuleInstallHooks is an interface used to provide notifications about the +// installation process being orchestrated by InstallModules. +// +// This interface may have new methods added in future, so implementers should +// embed InstallHooksImpl to get no-op implementations of any unimplemented +// methods. +type ModuleInstallHooks interface { + // Download is called for modules that are retrieved from a remote source + // before that download begins, to allow a caller to give feedback + // on progress through a possibly-long sequence of downloads. + Download(moduleAddr, packageAddr string, version *version.Version) + + // Install is called for each module that is installed, even if it did + // not need to be downloaded from a remote source. + Install(moduleAddr string, version *version.Version, localPath string) +} + +// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that +// can be embedded in another implementation struct to allow only partial +// implementation of the interface. +type ModuleInstallHooksImpl struct { +} + +func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) { +} + +func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) { +} + +var _ ModuleInstallHooks = ModuleInstallHooksImpl{} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go new file mode 100644 index 00000000..8cef80a3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go @@ -0,0 +1,73 @@ +package initwd + +import ( + "github.com/hashicorp/terraform/registry" + "testing" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/tfdiags" +) + +// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests, +// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows +// a test configuration to be loaded in a single step. +// +// If module installation fails, t.Fatal (or similar) is called to halt +// execution of the test, under the assumption that installation failures are +// not expected. If installation failures _are_ expected then use +// NewLoaderForTests and work with the loader object directly. If module +// installation succeeds but generates warnings, these warnings are discarded. +// +// If installation succeeds but errors are detected during loading then a +// possibly-incomplete config is returned along with error diagnostics. The +// test run is not aborted in this case, so that the caller can make assertions +// against the returned diagnostics. +// +// As with NewLoaderForTests, a cleanup function is returned which must be +// called before the test completes in order to remove the temporary +// modules directory. +func LoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) { + t.Helper() + + var diags tfdiags.Diagnostics + + loader, cleanup := configload.NewLoaderForTests(t) + inst := NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil)) + + _, moreDiags := inst.InstallModules(rootDir, true, ModuleInstallHooksImpl{}) + diags = diags.Append(moreDiags) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + return nil, nil, func() {}, diags + } + + // Since module installer has modified the module manifest on disk, we need + // to refresh the cache of it in the loader. + if err := loader.RefreshModules(); err != nil { + t.Fatalf("failed to refresh modules after installation: %s", err) + } + + config, hclDiags := loader.LoadConfig(rootDir) + diags = diags.Append(hclDiags) + return config, loader, cleanup, diags +} + +// MustLoadConfigForTests is a variant of LoadConfigForTests which calls +// t.Fatal (or similar) if there are any errors during loading, and thus +// does not return diagnostics at all. +// +// This is useful for concisely writing tests that don't expect errors at +// all. For tests that expect errors and need to assert against them, use +// LoadConfigForTests instead. +func MustLoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func()) { + t.Helper() + + config, loader, cleanup, diags := LoadConfigForTests(t, rootDir) + if diags.HasErrors() { + cleanup() + t.Fatal(diags.Err()) + } + return config, loader, cleanup +} diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go new file mode 100644 index 00000000..104840b9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go @@ -0,0 +1,83 @@ +package initwd + +import ( + "fmt" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/internal/earlyconfig" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" +) + +// CheckCoreVersionRequirements visits each of the modules in the given +// early configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(earlyConfig *earlyconfig.Config) tfdiags.Diagnostics { + if earlyConfig == nil { + return nil + } + + var diags tfdiags.Diagnostics + module := earlyConfig.Module + + var constraints version.Constraints + for _, constraintStr := range module.RequiredCore { + constraint, err := version.NewConstraint(constraintStr) + if err != nil { + // Unfortunately the early config parser doesn't preserve a source + // location for this, so we're unable to indicate a specific + // location where this constraint came from, but we can at least + // say which module set it. + switch { + case len(earlyConfig.Path) == 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider version constraint", + fmt.Sprintf("Invalid version core constraint %q in the root module.", constraintStr), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid provider version constraint", + fmt.Sprintf("Invalid version core constraint %q in %s.", constraintStr, earlyConfig.Path), + )) + } + continue + } + constraints = append(constraints, constraint...) + } + + if !constraints.Check(tfversion.SemVer) { + switch { + case len(earlyConfig.Path) == 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported Terraform Core version", + fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the root module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unsupported Terraform Core version", + fmt.Sprintf( + "Module %s (from %q) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + earlyConfig.Path, earlyConfig.SourceAddr, tfversion.String(), + ), + )) + } + } + + for _, c := range earlyConfig.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go new file mode 100644 index 00000000..0d7d664f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go @@ -0,0 +1,3 @@ +// Package modsdir is an internal package containing the model types used to +// represent the manifest of modules in a local modules cache directory. +package modsdir diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go new file mode 100644 index 00000000..36f6c033 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go @@ -0,0 +1,138 @@ +package modsdir + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/addrs" +) + +// Record represents some metadata about an installed module, as part +// of a ModuleManifest. +type Record struct { + // Key is a unique identifier for this particular module, based on its + // position within the static module tree. + Key string `json:"Key"` + + // SourceAddr is the source address given for this module in configuration. + // This is used only to detect if the source was changed in configuration + // since the module was last installed, which means that the installer + // must re-install it. + SourceAddr string `json:"Source"` + + // Version is the exact version of the module, which results from parsing + // VersionStr. nil for un-versioned modules. + Version *version.Version `json:"-"` + + // VersionStr is the version specifier string. This is used only for + // serialization in snapshots and should not be accessed or updated + // by any other codepaths; use "Version" instead. + VersionStr string `json:"Version,omitempty"` + + // Dir is the path to the local directory where the module is installed. + Dir string `json:"Dir"` +} + +// Manifest is a map used to keep track of the filesystem locations +// and other metadata about installed modules. +// +// The configuration loader refers to this, while the module installer updates +// it to reflect any changes to the installed modules. +type Manifest map[string]Record + +func (m Manifest) ModuleKey(path addrs.Module) string { + return path.String() +} + +// manifestSnapshotFile is an internal struct used only to assist in our JSON +// serialization of manifest snapshots. It should not be used for any other +// purpose. +type manifestSnapshotFile struct { + Records []Record `json:"Modules"` +} + +func ReadManifestSnapshot(r io.Reader) (Manifest, error) { + src, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + if len(src) == 0 { + // This should never happen, but we'll tolerate it as if it were + // a valid empty JSON object. + return make(Manifest), nil + } + + var read manifestSnapshotFile + err = json.Unmarshal(src, &read) + + new := make(Manifest) + for _, record := range read.Records { + if record.VersionStr != "" { + record.Version, err = version.NewVersion(record.VersionStr) + if err != nil { + return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err) + } + } + if _, exists := new[record.Key]; exists { + // This should never happen in any valid file, so we'll catch it + // and report it to avoid confusing/undefined behavior if the + // snapshot file was edited incorrectly outside of Terraform. + return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key) + } + new[record.Key] = record + } + return new, nil +} + +func ReadManifestSnapshotForDir(dir string) (Manifest, error) { + fn := filepath.Join(dir, ManifestSnapshotFilename) + r, err := os.Open(fn) + if err != nil { + if os.IsNotExist(err) { + return make(Manifest), nil // missing file is okay and treated as empty + } + return nil, err + } + return ReadManifestSnapshot(r) +} + +func (m Manifest) WriteSnapshot(w io.Writer) error { + var write manifestSnapshotFile + + for _, record := range m { + // Make sure VersionStr is in sync with Version, since we encourage + // callers to manipulate Version and ignore VersionStr. + if record.Version != nil { + record.VersionStr = record.Version.String() + } else { + record.VersionStr = "" + } + write.Records = append(write.Records, record) + } + + src, err := json.Marshal(write) + if err != nil { + return err + } + + _, err = w.Write(src) + return err +} + +func (m Manifest) WriteSnapshotToDir(dir string) error { + fn := filepath.Join(dir, ManifestSnapshotFilename) + log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn) + w, err := os.Create(fn) + if err != nil { + return err + } + return m.WriteSnapshot(w) +} diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go new file mode 100644 index 00000000..9ebb5243 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go @@ -0,0 +1,3 @@ +package modsdir + +const ManifestSnapshotFilename = "modules.json" diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh new file mode 100644 index 00000000..de1d693c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# We do not run protoc under go:generate because we want to ensure that all +# dependencies of go:generate are "go get"-able for general dev environment +# usability. To compile all protobuf files in this repository, run +# "make protobuf" at the top-level. + +set -eu + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +cd "$DIR" + +protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./ diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go new file mode 100644 index 00000000..b2bdf888 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go @@ -0,0 +1,3529 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: tfplugin5.proto + +package tfplugin5 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Diagnostic_Severity int32 + +const ( + Diagnostic_INVALID Diagnostic_Severity = 0 + Diagnostic_ERROR Diagnostic_Severity = 1 + Diagnostic_WARNING Diagnostic_Severity = 2 +) + +var Diagnostic_Severity_name = map[int32]string{ + 0: "INVALID", + 1: "ERROR", + 2: "WARNING", +} + +var Diagnostic_Severity_value = map[string]int32{ + "INVALID": 0, + "ERROR": 1, + "WARNING": 2, +} + +func (x Diagnostic_Severity) String() string { + return proto.EnumName(Diagnostic_Severity_name, int32(x)) +} + +func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{1, 0} +} + +type Schema_NestedBlock_NestingMode int32 + +const ( + Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0 + Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1 + Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2 + Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3 + Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4 + Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5 +) + +var Schema_NestedBlock_NestingMode_name = map[int32]string{ + 0: "INVALID", + 1: "SINGLE", + 2: "LIST", + 3: "SET", + 4: "MAP", + 5: "GROUP", +} + +var Schema_NestedBlock_NestingMode_value = map[string]int32{ + "INVALID": 0, + "SINGLE": 1, + "LIST": 2, + "SET": 3, + "MAP": 4, + "GROUP": 5, +} + +func (x Schema_NestedBlock_NestingMode) String() string { + return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x)) +} + +func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 2, 0} +} + +// DynamicValue is an opaque encoding of terraform data, with the field name +// indicating the encoding scheme used. +type DynamicValue struct { + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicValue) Reset() { *m = DynamicValue{} } +func (m *DynamicValue) String() string { return proto.CompactTextString(m) } +func (*DynamicValue) ProtoMessage() {} +func (*DynamicValue) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{0} +} + +func (m *DynamicValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DynamicValue.Unmarshal(m, b) +} +func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) +} +func (m *DynamicValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicValue.Merge(m, src) +} +func (m *DynamicValue) XXX_Size() int { + return xxx_messageInfo_DynamicValue.Size(m) +} +func (m *DynamicValue) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicValue proto.InternalMessageInfo + +func (m *DynamicValue) GetMsgpack() []byte { + if m != nil { + return m.Msgpack + } + return nil +} + +func (m *DynamicValue) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +type Diagnostic struct { + Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` + Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Diagnostic) Reset() { *m = Diagnostic{} } +func (m *Diagnostic) String() string { return proto.CompactTextString(m) } +func (*Diagnostic) ProtoMessage() {} +func (*Diagnostic) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{1} +} + +func (m *Diagnostic) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Diagnostic.Unmarshal(m, b) +} +func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic) +} +func (m *Diagnostic) XXX_Merge(src proto.Message) { + xxx_messageInfo_Diagnostic.Merge(m, src) +} +func (m *Diagnostic) XXX_Size() int { + return xxx_messageInfo_Diagnostic.Size(m) +} +func (m *Diagnostic) XXX_DiscardUnknown() { + xxx_messageInfo_Diagnostic.DiscardUnknown(m) +} + +var xxx_messageInfo_Diagnostic proto.InternalMessageInfo + +func (m *Diagnostic) GetSeverity() Diagnostic_Severity { + if m != nil { + return m.Severity + } + return Diagnostic_INVALID +} + +func (m *Diagnostic) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Diagnostic) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *Diagnostic) GetAttribute() *AttributePath { + if m != nil { + return m.Attribute + } + return nil +} + +type AttributePath struct { + Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath) Reset() { *m = AttributePath{} } +func (m *AttributePath) String() string { return proto.CompactTextString(m) } +func (*AttributePath) ProtoMessage() {} +func (*AttributePath) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{2} +} + +func (m *AttributePath) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath.Unmarshal(m, b) +} +func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic) +} +func (m *AttributePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath.Merge(m, src) +} +func (m *AttributePath) XXX_Size() int { + return xxx_messageInfo_AttributePath.Size(m) +} +func (m *AttributePath) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath proto.InternalMessageInfo + +func (m *AttributePath) GetSteps() []*AttributePath_Step { + if m != nil { + return m.Steps + } + return nil +} + +type AttributePath_Step struct { + // Types that are valid to be assigned to Selector: + // *AttributePath_Step_AttributeName + // *AttributePath_Step_ElementKeyString + // *AttributePath_Step_ElementKeyInt + Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} } +func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) } +func (*AttributePath_Step) ProtoMessage() {} +func (*AttributePath_Step) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{2, 0} +} + +func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b) +} +func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic) +} +func (m *AttributePath_Step) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributePath_Step.Merge(m, src) +} +func (m *AttributePath_Step) XXX_Size() int { + return xxx_messageInfo_AttributePath_Step.Size(m) +} +func (m *AttributePath_Step) XXX_DiscardUnknown() { + xxx_messageInfo_AttributePath_Step.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo + +type isAttributePath_Step_Selector interface { + isAttributePath_Step_Selector() +} + +type AttributePath_Step_AttributeName struct { + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyString struct { + ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"` +} + +type AttributePath_Step_ElementKeyInt struct { + ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"` +} + +func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {} + +func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {} + +func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *AttributePath_Step) GetAttributeName() string { + if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyString() string { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok { + return x.ElementKeyString + } + return "" +} + +func (m *AttributePath_Step) GetElementKeyInt() int64 { + if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok { + return x.ElementKeyInt + } + return 0 +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AttributePath_Step) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AttributePath_Step_OneofMarshaler, _AttributePath_Step_OneofUnmarshaler, _AttributePath_Step_OneofSizer, []interface{}{ + (*AttributePath_Step_AttributeName)(nil), + (*AttributePath_Step_ElementKeyString)(nil), + (*AttributePath_Step_ElementKeyInt)(nil), + } +} + +func _AttributePath_Step_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AttributePath_Step) + // selector + switch x := m.Selector.(type) { + case *AttributePath_Step_AttributeName: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AttributeName) + case *AttributePath_Step_ElementKeyString: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.ElementKeyString) + case *AttributePath_Step_ElementKeyInt: + b.EncodeVarint(3<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.ElementKeyInt)) + case nil: + default: + return fmt.Errorf("AttributePath_Step.Selector has unexpected type %T", x) + } + return nil +} + +func _AttributePath_Step_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AttributePath_Step) + switch tag { + case 1: // selector.attribute_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Selector = &AttributePath_Step_AttributeName{x} + return true, err + case 2: // selector.element_key_string + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Selector = &AttributePath_Step_ElementKeyString{x} + return true, err + case 3: // selector.element_key_int + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Selector = &AttributePath_Step_ElementKeyInt{int64(x)} + return true, err + default: + return false, nil + } +} + +func _AttributePath_Step_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AttributePath_Step) + // selector + switch x := m.Selector.(type) { + case *AttributePath_Step_AttributeName: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.AttributeName))) + n += len(x.AttributeName) + case *AttributePath_Step_ElementKeyString: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.ElementKeyString))) + n += len(x.ElementKeyString) + case *AttributePath_Step_ElementKeyInt: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.ElementKeyInt)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Stop struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop) Reset() { *m = Stop{} } +func (m *Stop) String() string { return proto.CompactTextString(m) } +func (*Stop) ProtoMessage() {} +func (*Stop) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3} +} + +func (m *Stop) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop.Unmarshal(m, b) +} +func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop.Marshal(b, m, deterministic) +} +func (m *Stop) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop.Merge(m, src) +} +func (m *Stop) XXX_Size() int { + return xxx_messageInfo_Stop.Size(m) +} +func (m *Stop) XXX_DiscardUnknown() { + xxx_messageInfo_Stop.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop proto.InternalMessageInfo + +type Stop_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Request) Reset() { *m = Stop_Request{} } +func (m *Stop_Request) String() string { return proto.CompactTextString(m) } +func (*Stop_Request) ProtoMessage() {} +func (*Stop_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3, 0} +} + +func (m *Stop_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Request.Unmarshal(m, b) +} +func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic) +} +func (m *Stop_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Request.Merge(m, src) +} +func (m *Stop_Request) XXX_Size() int { + return xxx_messageInfo_Stop_Request.Size(m) +} +func (m *Stop_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Request proto.InternalMessageInfo + +type Stop_Response struct { + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Stop_Response) Reset() { *m = Stop_Response{} } +func (m *Stop_Response) String() string { return proto.CompactTextString(m) } +func (*Stop_Response) ProtoMessage() {} +func (*Stop_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{3, 1} +} + +func (m *Stop_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Stop_Response.Unmarshal(m, b) +} +func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic) +} +func (m *Stop_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Stop_Response.Merge(m, src) +} +func (m *Stop_Response) XXX_Size() int { + return xxx_messageInfo_Stop_Response.Size(m) +} +func (m *Stop_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Stop_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Stop_Response proto.InternalMessageInfo + +func (m *Stop_Response) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// RawState holds the stored state for a resource to be upgraded by the +// provider. It can be in one of two formats, the current json encoded format +// in bytes, or the legacy flatmap format as a map of strings. +type RawState struct { + Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"` + Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RawState) Reset() { *m = RawState{} } +func (m *RawState) String() string { return proto.CompactTextString(m) } +func (*RawState) ProtoMessage() {} +func (*RawState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{4} +} + +func (m *RawState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RawState.Unmarshal(m, b) +} +func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RawState.Marshal(b, m, deterministic) +} +func (m *RawState) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawState.Merge(m, src) +} +func (m *RawState) XXX_Size() int { + return xxx_messageInfo_RawState.Size(m) +} +func (m *RawState) XXX_DiscardUnknown() { + xxx_messageInfo_RawState.DiscardUnknown(m) +} + +var xxx_messageInfo_RawState proto.InternalMessageInfo + +func (m *RawState) GetJson() []byte { + if m != nil { + return m.Json + } + return nil +} + +func (m *RawState) GetFlatmap() map[string]string { + if m != nil { + return m.Flatmap + } + return nil +} + +// Schema is the configuration schema for a Resource, Provider, or Provisioner. +type Schema struct { + // The version of the schema. + // Schemas are versioned, so that providers can upgrade a saved resource + // state when the schema is changed. + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // Block is the top level configuration block for this schema. + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5} +} + +func (m *Schema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema.Unmarshal(m, b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) +} +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) +} +func (m *Schema) XXX_Size() int { + return xxx_messageInfo_Schema.Size(m) +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo + +func (m *Schema) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +type Schema_Block struct { + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` + BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Block) Reset() { *m = Schema_Block{} } +func (m *Schema_Block) String() string { return proto.CompactTextString(m) } +func (*Schema_Block) ProtoMessage() {} +func (*Schema_Block) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 0} +} + +func (m *Schema_Block) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Block.Unmarshal(m, b) +} +func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic) +} +func (m *Schema_Block) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Block.Merge(m, src) +} +func (m *Schema_Block) XXX_Size() int { + return xxx_messageInfo_Schema_Block.Size(m) +} +func (m *Schema_Block) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Block.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Block proto.InternalMessageInfo + +func (m *Schema_Block) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Schema_Block) GetAttributes() []*Schema_Attribute { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock { + if m != nil { + return m.BlockTypes + } + return nil +} + +type Schema_Attribute struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"` + Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} } +func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) } +func (*Schema_Attribute) ProtoMessage() {} +func (*Schema_Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 1} +} + +func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b) +} +func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic) +} +func (m *Schema_Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_Attribute.Merge(m, src) +} +func (m *Schema_Attribute) XXX_Size() int { + return xxx_messageInfo_Schema_Attribute.Size(m) +} +func (m *Schema_Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo + +func (m *Schema_Attribute) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Schema_Attribute) GetType() []byte { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema_Attribute) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema_Attribute) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *Schema_Attribute) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + +func (m *Schema_Attribute) GetComputed() bool { + if m != nil { + return m.Computed + } + return false +} + +func (m *Schema_Attribute) GetSensitive() bool { + if m != nil { + return m.Sensitive + } + return false +} + +type Schema_NestedBlock struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"` + Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"` + MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} } +func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) } +func (*Schema_NestedBlock) ProtoMessage() {} +func (*Schema_NestedBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{5, 2} +} + +func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b) +} +func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic) +} +func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema_NestedBlock.Merge(m, src) +} +func (m *Schema_NestedBlock) XXX_Size() int { + return xxx_messageInfo_Schema_NestedBlock.Size(m) +} +func (m *Schema_NestedBlock) XXX_DiscardUnknown() { + xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo + +func (m *Schema_NestedBlock) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *Schema_NestedBlock) GetBlock() *Schema_Block { + if m != nil { + return m.Block + } + return nil +} + +func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode { + if m != nil { + return m.Nesting + } + return Schema_NestedBlock_INVALID +} + +func (m *Schema_NestedBlock) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema_NestedBlock) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +type GetProviderSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} } +func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema) ProtoMessage() {} +func (*GetProviderSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6} +} + +func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b) +} +func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema.Merge(m, src) +} +func (m *GetProviderSchema) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema.Size(m) +} +func (m *GetProviderSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo + +type GetProviderSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} } +func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Request) ProtoMessage() {} +func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6, 0} +} + +func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b) +} +func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Request.Merge(m, src) +} +func (m *GetProviderSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Request.Size(m) +} +func (m *GetProviderSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo + +type GetProviderSchema_Response struct { + Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"` + ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} } +func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProviderSchema_Response) ProtoMessage() {} +func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{6, 1} +} + +func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b) +} +func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic) +} +func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProviderSchema_Response.Merge(m, src) +} +func (m *GetProviderSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProviderSchema_Response.Size(m) +} +func (m *GetProviderSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo + +func (m *GetProviderSchema_Response) GetProvider() *Schema { + if m != nil { + return m.Provider + } + return nil +} + +func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema { + if m != nil { + return m.ResourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema { + if m != nil { + return m.DataSourceSchemas + } + return nil +} + +func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type PrepareProviderConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} } +func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig) ProtoMessage() {} +func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7} +} + +func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b) +} +func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig.Merge(m, src) +} +func (m *PrepareProviderConfig) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig.Size(m) +} +func (m *PrepareProviderConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo + +type PrepareProviderConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} } +func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Request) ProtoMessage() {} +func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7, 0} +} + +func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src) +} +func (m *PrepareProviderConfig_Request) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Request.Size(m) +} +func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type PrepareProviderConfig_Response struct { + PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} } +func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) } +func (*PrepareProviderConfig_Response) ProtoMessage() {} +func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{7, 1} +} + +func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b) +} +func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic) +} +func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src) +} +func (m *PrepareProviderConfig_Response) XXX_Size() int { + return xxx_messageInfo_PrepareProviderConfig_Response.Size(m) +} +func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo + +func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { + if m != nil { + return m.PreparedConfig + } + return nil +} + +func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type UpgradeResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} } +func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState) ProtoMessage() {} +func (*UpgradeResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8} +} + +func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b) +} +func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState.Merge(m, src) +} +func (m *UpgradeResourceState) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState.Size(m) +} +func (m *UpgradeResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo + +type UpgradeResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + // version is the schema_version number recorded in the state file + Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + // raw_state is the raw states as stored for the resource. Core does + // not have access to the schema of prior_version, so it's the + // provider's responsibility to interpret this value using the + // appropriate older schema. The raw_state will be the json encoded + // state, or a legacy flat-mapped format. + RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} } +func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Request) ProtoMessage() {} +func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8, 0} +} + +func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src) +} +func (m *UpgradeResourceState_Request) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Request.Size(m) +} +func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo + +func (m *UpgradeResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *UpgradeResourceState_Request) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *UpgradeResourceState_Request) GetRawState() *RawState { + if m != nil { + return m.RawState + } + return nil +} + +type UpgradeResourceState_Response struct { + // new_state is a msgpack-encoded data structure that, when interpreted with + // the _current_ schema for this resource type, is functionally equivalent to + // that which was given in prior_state_raw. + UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"` + // diagnostics describes any errors encountered during migration that could not + // be safely resolved, and warnings about any possibly-risky assumptions made + // in the upgrade process. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} } +func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*UpgradeResourceState_Response) ProtoMessage() {} +func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{8, 1} +} + +func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b) +} +func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic) +} +func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src) +} +func (m *UpgradeResourceState_Response) XXX_Size() int { + return xxx_messageInfo_UpgradeResourceState_Response.Size(m) +} +func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo + +func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { + if m != nil { + return m.UpgradedState + } + return nil +} + +func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateResourceTypeConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} } +func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig) ProtoMessage() {} +func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9} +} + +func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src) +} +func (m *ValidateResourceTypeConfig) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig.Size(m) +} +func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo + +type ValidateResourceTypeConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} } +func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9, 0} +} + +func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src) +} +func (m *ValidateResourceTypeConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m) +} +func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateResourceTypeConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} } +func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} +func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{9, 1} +} + +func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src) +} +func (m *ValidateResourceTypeConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m) +} +func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo + +func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateDataSourceConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} } +func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig) ProtoMessage() {} +func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10} +} + +func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src) +} +func (m *ValidateDataSourceConfig) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig.Size(m) +} +func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo + +type ValidateDataSourceConfig_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} } +func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Request) ProtoMessage() {} +func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10, 0} +} + +func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src) +} +func (m *ValidateDataSourceConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m) +} +func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateDataSourceConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} } +func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateDataSourceConfig_Response) ProtoMessage() {} +func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{10, 1} +} + +func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b) +} +func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src) +} +func (m *ValidateDataSourceConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m) +} +func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo + +func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type Configure struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure) Reset() { *m = Configure{} } +func (m *Configure) String() string { return proto.CompactTextString(m) } +func (*Configure) ProtoMessage() {} +func (*Configure) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11} +} + +func (m *Configure) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure.Unmarshal(m, b) +} +func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure.Marshal(b, m, deterministic) +} +func (m *Configure) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure.Merge(m, src) +} +func (m *Configure) XXX_Size() int { + return xxx_messageInfo_Configure.Size(m) +} +func (m *Configure) XXX_DiscardUnknown() { + xxx_messageInfo_Configure.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure proto.InternalMessageInfo + +type Configure_Request struct { + TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Request) Reset() { *m = Configure_Request{} } +func (m *Configure_Request) String() string { return proto.CompactTextString(m) } +func (*Configure_Request) ProtoMessage() {} +func (*Configure_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11, 0} +} + +func (m *Configure_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Request.Unmarshal(m, b) +} +func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic) +} +func (m *Configure_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Request.Merge(m, src) +} +func (m *Configure_Request) XXX_Size() int { + return xxx_messageInfo_Configure_Request.Size(m) +} +func (m *Configure_Request) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Request proto.InternalMessageInfo + +func (m *Configure_Request) GetTerraformVersion() string { + if m != nil { + return m.TerraformVersion + } + return "" +} + +func (m *Configure_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type Configure_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Configure_Response) Reset() { *m = Configure_Response{} } +func (m *Configure_Response) String() string { return proto.CompactTextString(m) } +func (*Configure_Response) ProtoMessage() {} +func (*Configure_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{11, 1} +} + +func (m *Configure_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Configure_Response.Unmarshal(m, b) +} +func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic) +} +func (m *Configure_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Configure_Response.Merge(m, src) +} +func (m *Configure_Response) XXX_Size() int { + return xxx_messageInfo_Configure_Response.Size(m) +} +func (m *Configure_Response) XXX_DiscardUnknown() { + xxx_messageInfo_Configure_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Configure_Response proto.InternalMessageInfo + +func (m *Configure_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource) Reset() { *m = ReadResource{} } +func (m *ReadResource) String() string { return proto.CompactTextString(m) } +func (*ReadResource) ProtoMessage() {} +func (*ReadResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12} +} + +func (m *ReadResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource.Unmarshal(m, b) +} +func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic) +} +func (m *ReadResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource.Merge(m, src) +} +func (m *ReadResource) XXX_Size() int { + return xxx_messageInfo_ReadResource.Size(m) +} +func (m *ReadResource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource proto.InternalMessageInfo + +type ReadResource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} } +func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Request) ProtoMessage() {} +func (*ReadResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12, 0} +} + +func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b) +} +func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic) +} +func (m *ReadResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Request.Merge(m, src) +} +func (m *ReadResource_Request) XXX_Size() int { + return xxx_messageInfo_ReadResource_Request.Size(m) +} +func (m *ReadResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo + +func (m *ReadResource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadResource_Request) GetCurrentState() *DynamicValue { + if m != nil { + return m.CurrentState + } + return nil +} + +func (m *ReadResource_Request) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type ReadResource_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} } +func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadResource_Response) ProtoMessage() {} +func (*ReadResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{12, 1} +} + +func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b) +} +func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic) +} +func (m *ReadResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResource_Response.Merge(m, src) +} +func (m *ReadResource_Response) XXX_Size() int { + return xxx_messageInfo_ReadResource_Response.Size(m) +} +func (m *ReadResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo + +func (m *ReadResource_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ReadResource_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type PlanResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} } +func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange) ProtoMessage() {} +func (*PlanResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13} +} + +func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b) +} +func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange.Merge(m, src) +} +func (m *PlanResourceChange) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange.Size(m) +} +func (m *PlanResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo + +type PlanResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} } +func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Request) ProtoMessage() {} +func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13, 0} +} + +func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b) +} +func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Request.Merge(m, src) +} +func (m *PlanResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Request.Size(m) +} +func (m *PlanResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo + +func (m *PlanResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue { + if m != nil { + return m.ProposedNewState + } + return nil +} + +func (m *PlanResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *PlanResourceChange_Request) GetPriorPrivate() []byte { + if m != nil { + return m.PriorPrivate + } + return nil +} + +type PlanResourceChange_Response struct { + PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} } +func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*PlanResourceChange_Response) ProtoMessage() {} +func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{13, 1} +} + +func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b) +} +func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic) +} +func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlanResourceChange_Response.Merge(m, src) +} +func (m *PlanResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_PlanResourceChange_Response.Size(m) +} +func (m *PlanResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo + +func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath { + if m != nil { + return m.RequiresReplace + } + return nil +} + +func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ApplyResourceChange struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} } +func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange) ProtoMessage() {} +func (*ApplyResourceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14} +} + +func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b) +} +func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange.Merge(m, src) +} +func (m *ApplyResourceChange) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange.Size(m) +} +func (m *ApplyResourceChange) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo + +type ApplyResourceChange_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"` + PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"` + Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} } +func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Request) ProtoMessage() {} +func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14, 0} +} + +func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src) +} +func (m *ApplyResourceChange_Request) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Request.Size(m) +} +func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo + +func (m *ApplyResourceChange_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue { + if m != nil { + return m.PriorState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue { + if m != nil { + return m.PlannedState + } + return nil +} + +func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte { + if m != nil { + return m.PlannedPrivate + } + return nil +} + +type ApplyResourceChange_Response struct { + NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"` + Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + // This may be set only by the helper/schema "SDK" in the main Terraform + // repository, to request that Terraform Core >=0.12 permit additional + // inconsistencies that can result from the legacy SDK type system + // and its imprecise mapping to the >=0.12 type system. + // The change in behavior implied by this flag makes sense only for the + // specific details of the legacy SDK type system, and are not a general + // mechanism to avoid proper type handling in providers. + // + // ==== DO NOT USE THIS ==== + // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ==== + // ==== DO NOT USE THIS ==== + LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} } +func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) } +func (*ApplyResourceChange_Response) ProtoMessage() {} +func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{14, 1} +} + +func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b) +} +func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic) +} +func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src) +} +func (m *ApplyResourceChange_Response) XXX_Size() int { + return xxx_messageInfo_ApplyResourceChange_Response.Size(m) +} +func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo + +func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue { + if m != nil { + return m.NewState + } + return nil +} + +func (m *ApplyResourceChange_Response) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool { + if m != nil { + return m.LegacyTypeSystem + } + return false +} + +type ImportResourceState struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState) Reset() { *m = ImportResourceState{} } +func (m *ImportResourceState) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState) ProtoMessage() {} +func (*ImportResourceState) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15} +} + +func (m *ImportResourceState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState.Unmarshal(m, b) +} +func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic) +} +func (m *ImportResourceState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState.Merge(m, src) +} +func (m *ImportResourceState) XXX_Size() int { + return xxx_messageInfo_ImportResourceState.Size(m) +} +func (m *ImportResourceState) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo + +type ImportResourceState_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} } +func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Request) ProtoMessage() {} +func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 0} +} + +func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b) +} +func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Request.Merge(m, src) +} +func (m *ImportResourceState_Request) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Request.Size(m) +} +func (m *ImportResourceState_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo + +func (m *ImportResourceState_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_Request) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +type ImportResourceState_ImportedResource struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"` + Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} } +func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_ImportedResource) ProtoMessage() {} +func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 1} +} + +func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b) +} +func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src) +} +func (m *ImportResourceState_ImportedResource) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m) +} +func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo + +func (m *ImportResourceState_ImportedResource) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ImportResourceState_ImportedResource) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +type ImportResourceState_Response struct { + ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} } +func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) } +func (*ImportResourceState_Response) ProtoMessage() {} +func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{15, 2} +} + +func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b) +} +func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic) +} +func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImportResourceState_Response.Merge(m, src) +} +func (m *ImportResourceState_Response) XXX_Size() int { + return xxx_messageInfo_ImportResourceState_Response.Size(m) +} +func (m *ImportResourceState_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo + +func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { + if m != nil { + return m.ImportedResources + } + return nil +} + +func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ReadDataSource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource) Reset() { *m = ReadDataSource{} } +func (m *ReadDataSource) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource) ProtoMessage() {} +func (*ReadDataSource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16} +} + +func (m *ReadDataSource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource.Unmarshal(m, b) +} +func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic) +} +func (m *ReadDataSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource.Merge(m, src) +} +func (m *ReadDataSource) XXX_Size() int { + return xxx_messageInfo_ReadDataSource.Size(m) +} +func (m *ReadDataSource) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo + +type ReadDataSource_Request struct { + TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} } +func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Request) ProtoMessage() {} +func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16, 0} +} + +func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b) +} +func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic) +} +func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Request.Merge(m, src) +} +func (m *ReadDataSource_Request) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Request.Size(m) +} +func (m *ReadDataSource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo + +func (m *ReadDataSource_Request) GetTypeName() string { + if m != nil { + return m.TypeName + } + return "" +} + +func (m *ReadDataSource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ReadDataSource_Response struct { + State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} } +func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) } +func (*ReadDataSource_Response) ProtoMessage() {} +func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{16, 1} +} + +func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b) +} +func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic) +} +func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadDataSource_Response.Merge(m, src) +} +func (m *ReadDataSource_Response) XXX_Size() int { + return xxx_messageInfo_ReadDataSource_Response.Size(m) +} +func (m *ReadDataSource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo + +func (m *ReadDataSource_Response) GetState() *DynamicValue { + if m != nil { + return m.State + } + return nil +} + +func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type GetProvisionerSchema struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} } +func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema) ProtoMessage() {} +func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17} +} + +func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b) +} +func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema.Merge(m, src) +} +func (m *GetProvisionerSchema) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema.Size(m) +} +func (m *GetProvisionerSchema) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo + +type GetProvisionerSchema_Request struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} } +func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Request) ProtoMessage() {} +func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17, 0} +} + +func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src) +} +func (m *GetProvisionerSchema_Request) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Request.Size(m) +} +func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo + +type GetProvisionerSchema_Response struct { + Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} } +func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) } +func (*GetProvisionerSchema_Response) ProtoMessage() {} +func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{17, 1} +} + +func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b) +} +func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic) +} +func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src) +} +func (m *GetProvisionerSchema_Response) XXX_Size() int { + return xxx_messageInfo_GetProvisionerSchema_Response.Size(m) +} +func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() { + xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo + +func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema { + if m != nil { + return m.Provisioner + } + return nil +} + +func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ValidateProvisionerConfig struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} } +func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig) ProtoMessage() {} +func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18} +} + +func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src) +} +func (m *ValidateProvisionerConfig) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig.Size(m) +} +func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo + +type ValidateProvisionerConfig_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} } +func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Request) ProtoMessage() {} +func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18, 0} +} + +func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src) +} +func (m *ValidateProvisionerConfig_Request) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m) +} +func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +type ValidateProvisionerConfig_Response struct { + Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} } +func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) } +func (*ValidateProvisionerConfig_Response) ProtoMessage() {} +func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{18, 1} +} + +func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b) +} +func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic) +} +func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src) +} +func (m *ValidateProvisionerConfig_Response) XXX_Size() int { + return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m) +} +func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo + +func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +type ProvisionResource struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource) Reset() { *m = ProvisionResource{} } +func (m *ProvisionResource) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource) ProtoMessage() {} +func (*ProvisionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19} +} + +func (m *ProvisionResource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource.Unmarshal(m, b) +} +func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic) +} +func (m *ProvisionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource.Merge(m, src) +} +func (m *ProvisionResource) XXX_Size() int { + return xxx_messageInfo_ProvisionResource.Size(m) +} +func (m *ProvisionResource) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo + +type ProvisionResource_Request struct { + Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} } +func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Request) ProtoMessage() {} +func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19, 0} +} + +func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b) +} +func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic) +} +func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Request.Merge(m, src) +} +func (m *ProvisionResource_Request) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Request.Size(m) +} +func (m *ProvisionResource_Request) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo + +func (m *ProvisionResource_Request) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *ProvisionResource_Request) GetConnection() *DynamicValue { + if m != nil { + return m.Connection + } + return nil +} + +type ProvisionResource_Response struct { + Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} } +func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) } +func (*ProvisionResource_Response) ProtoMessage() {} +func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { + return fileDescriptor_17ae6090ff270234, []int{19, 1} +} + +func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b) +} +func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic) +} +func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProvisionResource_Response.Merge(m, src) +} +func (m *ProvisionResource_Response) XXX_Size() int { + return xxx_messageInfo_ProvisionResource_Response.Size(m) +} +func (m *ProvisionResource_Response) XXX_DiscardUnknown() { + xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo + +func (m *ProvisionResource_Response) GetOutput() string { + if m != nil { + return m.Output + } + return "" +} + +func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { + if m != nil { + return m.Diagnostics + } + return nil +} + +func init() { + proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value) + proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value) + proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue") + proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic") + proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath") + proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step") + proto.RegisterType((*Stop)(nil), "tfplugin5.Stop") + proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request") + proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response") + proto.RegisterType((*RawState)(nil), "tfplugin5.RawState") + proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry") + proto.RegisterType((*Schema)(nil), "tfplugin5.Schema") + proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block") + proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute") + proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock") + proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema") + proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request") + proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry") + proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry") + proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig") + proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request") + proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response") + proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState") + proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request") + proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response") + proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig") + proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request") + proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response") + proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig") + proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request") + proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response") + proto.RegisterType((*Configure)(nil), "tfplugin5.Configure") + proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request") + proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response") + proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource") + proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request") + proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response") + proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange") + proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request") + proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response") + proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange") + proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request") + proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response") + proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState") + proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request") + proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource") + proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response") + proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource") + proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request") + proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response") + proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema") + proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request") + proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response") + proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig") + proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request") + proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response") + proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource") + proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request") + proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response") +} + +func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) } + +var fileDescriptor_17ae6090ff270234 = []byte{ + // 1880 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x23, 0x49, + 0x19, 0x9f, 0xf6, 0x23, 0xb1, 0x3f, 0xe7, 0xe1, 0xd4, 0xcc, 0x0e, 0xa6, 0x77, 0x17, 0x82, 0x79, + 0x24, 0xab, 0xdd, 0xf1, 0xac, 0x32, 0xb0, 0xbb, 0x84, 0xd1, 0x8a, 0x6c, 0x26, 0x64, 0x22, 0x66, + 0xb2, 0xa1, 0x3c, 0x0f, 0x24, 0xa4, 0xb5, 0x6a, 0xdc, 0x15, 0x4f, 0x33, 0x76, 0x77, 0x6f, 0x75, + 0x39, 0x89, 0x85, 0xc4, 0x05, 0xc1, 0x19, 0x09, 0xf1, 0x90, 0x78, 0x5c, 0x40, 0xe2, 0x1f, 0xe0, + 0x00, 0xdc, 0x38, 0xf1, 0x0f, 0x70, 0x03, 0x4e, 0x08, 0x6e, 0x9c, 0xe1, 0x82, 0x84, 0xea, 0xd5, + 0x5d, 0xb6, 0xdb, 0x4e, 0x4f, 0xb2, 0x23, 0xc4, 0xad, 0xab, 0xbe, 0x5f, 0x7d, 0xdf, 0x57, 0xdf, + 0xab, 0xbe, 0xcf, 0x86, 0x55, 0x7e, 0x1c, 0xf5, 0x87, 0x3d, 0x3f, 0xf8, 0x42, 0x2b, 0x62, 0x21, + 0x0f, 0x51, 0x35, 0xd9, 0x68, 0xde, 0x86, 0xa5, 0x3b, 0xa3, 0x80, 0x0c, 0xfc, 0xee, 0x23, 0xd2, + 0x1f, 0x52, 0xd4, 0x80, 0xc5, 0x41, 0xdc, 0x8b, 0x48, 0xf7, 0x59, 0xc3, 0x59, 0x77, 0x36, 0x97, + 0xb0, 0x59, 0x22, 0x04, 0xa5, 0x6f, 0xc6, 0x61, 0xd0, 0x28, 0xc8, 0x6d, 0xf9, 0xdd, 0xfc, 0x9b, + 0x03, 0x70, 0xc7, 0x27, 0xbd, 0x20, 0x8c, 0xb9, 0xdf, 0x45, 0xdb, 0x50, 0x89, 0xe9, 0x09, 0x65, + 0x3e, 0x1f, 0xc9, 0xd3, 0x2b, 0x5b, 0x9f, 0x68, 0xa5, 0xb2, 0x53, 0x60, 0xab, 0xad, 0x51, 0x38, + 0xc1, 0x0b, 0xc1, 0xf1, 0x70, 0x30, 0x20, 0x6c, 0x24, 0x25, 0x54, 0xb1, 0x59, 0xa2, 0xeb, 0xb0, + 0xe0, 0x51, 0x4e, 0xfc, 0x7e, 0xa3, 0x28, 0x09, 0x7a, 0x85, 0xde, 0x82, 0x2a, 0xe1, 0x9c, 0xf9, + 0x4f, 0x86, 0x9c, 0x36, 0x4a, 0xeb, 0xce, 0x66, 0x6d, 0xab, 0x61, 0x89, 0xdb, 0x31, 0xb4, 0x23, + 0xc2, 0x9f, 0xe2, 0x14, 0xda, 0xbc, 0x09, 0x15, 0x23, 0x1f, 0xd5, 0x60, 0xf1, 0xe0, 0xf0, 0xd1, + 0xce, 0xbd, 0x83, 0x3b, 0xf5, 0x2b, 0xa8, 0x0a, 0xe5, 0x3d, 0x8c, 0xdf, 0xc7, 0x75, 0x47, 0xec, + 0x3f, 0xde, 0xc1, 0x87, 0x07, 0x87, 0xfb, 0xf5, 0x42, 0xf3, 0x2f, 0x0e, 0x2c, 0x8f, 0x71, 0x43, + 0xb7, 0xa0, 0x1c, 0x73, 0x1a, 0xc5, 0x0d, 0x67, 0xbd, 0xb8, 0x59, 0xdb, 0x7a, 0x75, 0x96, 0xd8, + 0x56, 0x9b, 0xd3, 0x08, 0x2b, 0xac, 0xfb, 0x43, 0x07, 0x4a, 0x62, 0x8d, 0x36, 0x60, 0x25, 0xd1, + 0xa6, 0x13, 0x90, 0x01, 0x95, 0xc6, 0xaa, 0xde, 0xbd, 0x82, 0x97, 0x93, 0xfd, 0x43, 0x32, 0xa0, + 0xa8, 0x05, 0x88, 0xf6, 0xe9, 0x80, 0x06, 0xbc, 0xf3, 0x8c, 0x8e, 0x3a, 0x31, 0x67, 0x7e, 0xd0, + 0x53, 0xe6, 0xb9, 0x7b, 0x05, 0xd7, 0x35, 0xed, 0xab, 0x74, 0xd4, 0x96, 0x14, 0xb4, 0x09, 0xab, + 0x36, 0xde, 0x0f, 0xb8, 0x34, 0x59, 0x51, 0x70, 0x4e, 0xc1, 0x07, 0x01, 0x7f, 0x0f, 0x84, 0xa7, + 0xfa, 0xb4, 0xcb, 0x43, 0xd6, 0xbc, 0x25, 0xd4, 0x0a, 0x23, 0xb7, 0x0a, 0x8b, 0x98, 0x7e, 0x38, + 0xa4, 0x31, 0x77, 0xd7, 0xa1, 0x82, 0x69, 0x1c, 0x85, 0x41, 0x4c, 0xd1, 0x35, 0x28, 0xef, 0x31, + 0x16, 0x32, 0xa5, 0x24, 0x56, 0x8b, 0xe6, 0x8f, 0x1c, 0xa8, 0x60, 0x72, 0xda, 0xe6, 0x84, 0xd3, + 0x24, 0x34, 0x9c, 0x34, 0x34, 0xd0, 0x36, 0x2c, 0x1e, 0xf7, 0x09, 0x1f, 0x90, 0xa8, 0x51, 0x90, + 0x46, 0x5a, 0xb7, 0x8c, 0x64, 0x4e, 0xb6, 0xbe, 0xa2, 0x20, 0x7b, 0x01, 0x67, 0x23, 0x6c, 0x0e, + 0xb8, 0xdb, 0xb0, 0x64, 0x13, 0x50, 0x1d, 0x8a, 0xcf, 0xe8, 0x48, 0x2b, 0x20, 0x3e, 0x85, 0x52, + 0x27, 0x22, 0x5e, 0x75, 0xac, 0xa8, 0xc5, 0x76, 0xe1, 0x1d, 0xa7, 0xf9, 0x8f, 0x32, 0x2c, 0xb4, + 0xbb, 0x4f, 0xe9, 0x80, 0x88, 0x90, 0x3a, 0xa1, 0x2c, 0xf6, 0xb5, 0x66, 0x45, 0x6c, 0x96, 0xe8, + 0x06, 0x94, 0x9f, 0xf4, 0xc3, 0xee, 0x33, 0x79, 0xbc, 0xb6, 0xf5, 0x31, 0x4b, 0x35, 0x75, 0xb6, + 0xf5, 0x9e, 0x20, 0x63, 0x85, 0x72, 0x7f, 0xe1, 0x40, 0x59, 0x6e, 0xcc, 0x61, 0xf9, 0x25, 0x80, + 0xc4, 0x79, 0xb1, 0xbe, 0xf2, 0xcb, 0xd3, 0x7c, 0x93, 0xf0, 0xc0, 0x16, 0x1c, 0xbd, 0x0b, 0x35, + 0x29, 0xa9, 0xc3, 0x47, 0x11, 0x8d, 0x1b, 0xc5, 0xa9, 0xa8, 0xd2, 0xa7, 0x0f, 0x69, 0xcc, 0xa9, + 0xa7, 0x74, 0x03, 0x79, 0xe2, 0x81, 0x38, 0xe0, 0xfe, 0xd1, 0x81, 0x6a, 0xc2, 0x59, 0xb8, 0x23, + 0x8d, 0x2a, 0x2c, 0xbf, 0xc5, 0x9e, 0xe0, 0x6d, 0xb2, 0x57, 0x7c, 0xa3, 0x75, 0xa8, 0x79, 0x34, + 0xee, 0x32, 0x3f, 0xe2, 0xe2, 0x42, 0x2a, 0xbb, 0xec, 0x2d, 0xe4, 0x42, 0x85, 0xd1, 0x0f, 0x87, + 0x3e, 0xa3, 0x9e, 0xcc, 0xb0, 0x0a, 0x4e, 0xd6, 0x82, 0x16, 0x4a, 0x14, 0xe9, 0x37, 0xca, 0x8a, + 0x66, 0xd6, 0x82, 0xd6, 0x0d, 0x07, 0xd1, 0x90, 0x53, 0xaf, 0xb1, 0xa0, 0x68, 0x66, 0x8d, 0x5e, + 0x81, 0x6a, 0x4c, 0x83, 0xd8, 0xe7, 0xfe, 0x09, 0x6d, 0x2c, 0x4a, 0x62, 0xba, 0xe1, 0xfe, 0xba, + 0x00, 0x35, 0xeb, 0x96, 0xe8, 0x65, 0xa8, 0x0a, 0x5d, 0xad, 0x34, 0xc1, 0x15, 0xb1, 0x21, 0xf3, + 0xe3, 0xf9, 0xdc, 0x88, 0x76, 0x61, 0x31, 0xa0, 0x31, 0x17, 0x39, 0x54, 0x94, 0xd5, 0xe9, 0xb5, + 0xb9, 0x16, 0x96, 0xdf, 0x7e, 0xd0, 0xbb, 0x1f, 0x7a, 0x14, 0x9b, 0x93, 0x42, 0xa1, 0x81, 0x1f, + 0x74, 0x7c, 0x4e, 0x07, 0xb1, 0xb4, 0x49, 0x11, 0x57, 0x06, 0x7e, 0x70, 0x20, 0xd6, 0x92, 0x48, + 0xce, 0x34, 0xb1, 0xac, 0x89, 0xe4, 0x4c, 0x12, 0x9b, 0xf7, 0xd5, 0xcd, 0x34, 0xc7, 0xf1, 0xd2, + 0x03, 0xb0, 0xd0, 0x3e, 0x38, 0xdc, 0xbf, 0xb7, 0x57, 0x77, 0x50, 0x05, 0x4a, 0xf7, 0x0e, 0xda, + 0x0f, 0xea, 0x05, 0xb4, 0x08, 0xc5, 0xf6, 0xde, 0x83, 0x7a, 0x51, 0x7c, 0xdc, 0xdf, 0x39, 0xaa, + 0x97, 0x44, 0x89, 0xda, 0xc7, 0xef, 0x3f, 0x3c, 0xaa, 0x97, 0x9b, 0x3f, 0x29, 0xc1, 0xda, 0x3e, + 0xe5, 0x47, 0x2c, 0x3c, 0xf1, 0x3d, 0xca, 0x94, 0xfe, 0x76, 0x12, 0xff, 0xab, 0x68, 0x65, 0xf1, + 0x0d, 0xa8, 0x44, 0x1a, 0x29, 0xcd, 0x58, 0xdb, 0x5a, 0x9b, 0xba, 0x3c, 0x4e, 0x20, 0x88, 0x42, + 0x9d, 0xd1, 0x38, 0x1c, 0xb2, 0x2e, 0xed, 0xc4, 0x92, 0x68, 0x62, 0x7a, 0xdb, 0x3a, 0x36, 0x25, + 0xbe, 0x65, 0xe4, 0x89, 0x0f, 0x79, 0x5a, 0xed, 0xc7, 0x2a, 0xc1, 0x57, 0xd9, 0xf8, 0x2e, 0xea, + 0xc3, 0x55, 0x8f, 0x70, 0xd2, 0x99, 0x90, 0xa4, 0xe2, 0xff, 0x76, 0x3e, 0x49, 0x77, 0x08, 0x27, + 0xed, 0x69, 0x59, 0x6b, 0xde, 0xe4, 0x3e, 0x7a, 0x1b, 0x6a, 0x5e, 0xf2, 0x06, 0x09, 0xe7, 0x09, + 0x29, 0x2f, 0x65, 0xbe, 0x50, 0xd8, 0x46, 0xba, 0x0f, 0xe1, 0x5a, 0xd6, 0x7d, 0x32, 0xea, 0xd2, + 0x86, 0x5d, 0x97, 0x32, 0x6d, 0x9c, 0x96, 0x2a, 0xf7, 0x31, 0x5c, 0xcf, 0x56, 0xfe, 0x92, 0x8c, + 0x9b, 0x7f, 0x76, 0xe0, 0xa5, 0x23, 0x46, 0x23, 0xc2, 0xa8, 0xb1, 0xda, 0x6e, 0x18, 0x1c, 0xfb, + 0x3d, 0x77, 0x3b, 0x09, 0x0f, 0x74, 0x13, 0x16, 0xba, 0x72, 0x53, 0xc7, 0x83, 0x9d, 0x3d, 0x76, + 0x4b, 0x80, 0x35, 0xcc, 0xfd, 0xae, 0x63, 0xc5, 0xd3, 0x97, 0x61, 0x35, 0x52, 0x12, 0xbc, 0x4e, + 0x3e, 0x36, 0x2b, 0x06, 0xaf, 0x54, 0x99, 0xf4, 0x46, 0x21, 0xaf, 0x37, 0x9a, 0xdf, 0x2f, 0xc0, + 0xb5, 0x87, 0x51, 0x8f, 0x11, 0x8f, 0x26, 0x5e, 0x11, 0x8f, 0x89, 0xcb, 0xd2, 0xcb, 0xcd, 0x2d, + 0x1b, 0x56, 0x11, 0x2f, 0x8c, 0x17, 0xf1, 0x37, 0xa1, 0xca, 0xc8, 0x69, 0x27, 0x16, 0xec, 0x64, + 0x8d, 0xa8, 0x6d, 0x5d, 0xcd, 0x78, 0xb6, 0x70, 0x85, 0xe9, 0x2f, 0xf7, 0x3b, 0xb6, 0x51, 0xde, + 0x85, 0x95, 0xa1, 0x52, 0xcc, 0xd3, 0x3c, 0xce, 0xb1, 0xc9, 0xb2, 0x81, 0xab, 0x77, 0xf4, 0xc2, + 0x26, 0xf9, 0xbd, 0x03, 0xee, 0x23, 0xd2, 0xf7, 0x3d, 0xa1, 0x9c, 0xb6, 0x89, 0x78, 0x19, 0xb4, + 0xd7, 0x1f, 0xe7, 0x34, 0x4c, 0x1a, 0x12, 0x85, 0x7c, 0x21, 0xb1, 0x6b, 0x5d, 0x7e, 0x42, 0x79, + 0x27, 0xb7, 0xf2, 0xbf, 0x75, 0xa0, 0x61, 0x94, 0x4f, 0xf3, 0xe1, 0xff, 0x42, 0xf5, 0xdf, 0x39, + 0x50, 0x55, 0x8a, 0x0e, 0x19, 0x75, 0x7b, 0xa9, 0xae, 0xaf, 0xc3, 0x1a, 0xa7, 0x8c, 0x91, 0xe3, + 0x90, 0x0d, 0x3a, 0x76, 0xc7, 0x50, 0xc5, 0xf5, 0x84, 0xf0, 0x48, 0x47, 0xdd, 0xff, 0x46, 0xf7, + 0x5f, 0x15, 0x60, 0x09, 0x53, 0xe2, 0x99, 0x78, 0x71, 0xbf, 0x9d, 0xd3, 0xd4, 0xb7, 0x61, 0xb9, + 0x3b, 0x64, 0x4c, 0x74, 0x99, 0x2a, 0xc8, 0xcf, 0xd1, 0x7a, 0x49, 0xa3, 0x55, 0x8c, 0x37, 0x60, + 0x31, 0x62, 0xfe, 0x89, 0x49, 0xb0, 0x25, 0x6c, 0x96, 0xee, 0x0f, 0xec, 0x54, 0xfa, 0x3c, 0x54, + 0x03, 0x7a, 0x9a, 0x2f, 0x8b, 0x2a, 0x01, 0x3d, 0xbd, 0x5c, 0x02, 0xcd, 0xd6, 0xaa, 0xf9, 0x9b, + 0x12, 0xa0, 0xa3, 0x3e, 0x09, 0x8c, 0x99, 0x76, 0x9f, 0x92, 0xa0, 0x47, 0xdd, 0xff, 0x38, 0x39, + 0xad, 0xf5, 0x0e, 0xd4, 0x22, 0xe6, 0x87, 0x2c, 0x9f, 0xad, 0x40, 0x62, 0xd5, 0x65, 0xf6, 0x00, + 0x45, 0x2c, 0x8c, 0xc2, 0x98, 0x7a, 0x9d, 0xd4, 0x16, 0xc5, 0xf9, 0x0c, 0xea, 0xe6, 0xc8, 0xa1, + 0xb1, 0x49, 0x1a, 0x5d, 0xa5, 0x5c, 0xd1, 0x85, 0x3e, 0x0d, 0xcb, 0x4a, 0x63, 0x63, 0x91, 0xb2, + 0xb4, 0xc8, 0x92, 0xdc, 0x3c, 0xd2, 0xce, 0xfa, 0x79, 0xc1, 0x72, 0xd6, 0x6d, 0x58, 0x8e, 0xfa, + 0x24, 0x08, 0xf2, 0x96, 0xbd, 0x25, 0x8d, 0x56, 0x0a, 0xee, 0x8a, 0x5e, 0x43, 0x36, 0x95, 0x71, + 0x87, 0xd1, 0xa8, 0x4f, 0xba, 0x54, 0x7b, 0x6e, 0xf6, 0x38, 0xb7, 0x6a, 0x4e, 0x60, 0x75, 0x00, + 0x6d, 0xc0, 0xaa, 0x51, 0x61, 0xdc, 0x91, 0x2b, 0x7a, 0x5b, 0x2b, 0x7e, 0xe1, 0x26, 0x00, 0xbd, + 0x01, 0xa8, 0x4f, 0x7b, 0xa4, 0x3b, 0x92, 0x4d, 0x7a, 0x27, 0x1e, 0xc5, 0x9c, 0x0e, 0x74, 0xe7, + 0x5b, 0x57, 0x14, 0x51, 0x72, 0xdb, 0x72, 0xbf, 0xf9, 0xa7, 0x22, 0x5c, 0xdd, 0x89, 0xa2, 0xfe, + 0x68, 0x22, 0x6e, 0xfe, 0xfd, 0xe2, 0xe3, 0x66, 0xca, 0x1b, 0xc5, 0xe7, 0xf1, 0xc6, 0x73, 0x87, + 0x4b, 0x86, 0xe5, 0xcb, 0x59, 0x96, 0x77, 0xff, 0x70, 0xf9, 0xfc, 0xb6, 0xd2, 0xb4, 0x30, 0x96, + 0xa6, 0x93, 0x6e, 0x2d, 0x5e, 0xd2, 0xad, 0xa5, 0x19, 0x6e, 0xfd, 0x67, 0x01, 0xae, 0x1e, 0x0c, + 0xa2, 0x90, 0xf1, 0xf1, 0xd6, 0xe3, 0xad, 0x9c, 0x5e, 0x5d, 0x81, 0x82, 0xef, 0xe9, 0xa1, 0xb5, + 0xe0, 0x7b, 0xee, 0x19, 0xd4, 0x15, 0x3b, 0x9a, 0xd4, 0xe1, 0x73, 0x47, 0x9e, 0x5c, 0x01, 0xa1, + 0x50, 0x73, 0xaa, 0xed, 0x2f, 0x6d, 0x6f, 0x7c, 0x00, 0xc8, 0xd7, 0x6a, 0x74, 0x4c, 0x8f, 0x6e, + 0xde, 0x92, 0x9b, 0x96, 0x88, 0x8c, 0xab, 0xb7, 0x26, 0xf5, 0xc7, 0x6b, 0xfe, 0xc4, 0x4e, 0x7c, + 0xf1, 0xc6, 0xe6, 0xaf, 0x0e, 0xac, 0x88, 0x47, 0x2a, 0xed, 0x0b, 0x5e, 0x5c, 0x47, 0xc0, 0xc6, + 0xc6, 0xa5, 0x72, 0xae, 0xd0, 0xd4, 0x66, 0xbe, 0xf0, 0xfd, 0x7e, 0xea, 0xc0, 0x35, 0x33, 0xdb, + 0x88, 0x5e, 0x20, 0x6b, 0x8e, 0x3b, 0xb3, 0xf4, 0xba, 0x25, 0xaa, 0x42, 0x82, 0x9d, 0x3d, 0xc9, + 0xd9, 0xa8, 0x8b, 0x6b, 0xf7, 0x33, 0x07, 0x3e, 0x6e, 0x3a, 0x33, 0x4b, 0xc5, 0x8f, 0x60, 0x96, + 0xf8, 0x48, 0x3a, 0x98, 0xbf, 0x3b, 0xb0, 0x96, 0xa8, 0x95, 0xb4, 0x31, 0xf1, 0xc5, 0xd5, 0x42, + 0x6f, 0x03, 0x74, 0xc3, 0x20, 0xa0, 0x5d, 0x6e, 0x86, 0x83, 0x79, 0x35, 0x37, 0x85, 0xba, 0xdf, + 0xb0, 0xee, 0x73, 0x1d, 0x16, 0xc2, 0x21, 0x8f, 0x86, 0x5c, 0x87, 0xa4, 0x5e, 0x5d, 0xd8, 0x0d, + 0x5b, 0x3f, 0xae, 0x42, 0xc5, 0xcc, 0x71, 0xe8, 0xeb, 0x50, 0xdd, 0xa7, 0x5c, 0xff, 0xc2, 0xf5, + 0x99, 0x73, 0x46, 0x64, 0x15, 0x40, 0x9f, 0xcd, 0x35, 0x48, 0xa3, 0xfe, 0x8c, 0xa1, 0x11, 0x6d, + 0x5a, 0xe7, 0x33, 0x11, 0x89, 0xa4, 0xd7, 0x72, 0x20, 0xb5, 0xb4, 0x6f, 0xcd, 0x9b, 0x58, 0xd0, + 0x0d, 0x8b, 0xd1, 0x6c, 0x58, 0x22, 0xb7, 0x95, 0x17, 0xae, 0x85, 0x0f, 0x67, 0x4f, 0x1c, 0xe8, + 0xf5, 0x0c, 0x5e, 0x93, 0xa0, 0x44, 0xf0, 0x1b, 0xf9, 0xc0, 0x5a, 0xac, 0x9f, 0x3d, 0xb8, 0xa2, + 0x0d, 0x8b, 0x4b, 0x16, 0x20, 0x11, 0xb7, 0x79, 0x3e, 0x50, 0x8b, 0xba, 0x6b, 0x0d, 0x26, 0xe8, + 0x15, 0xeb, 0x58, 0xb2, 0x9b, 0x30, 0x7d, 0x75, 0x06, 0x55, 0x73, 0xfa, 0xda, 0xf8, 0x98, 0x80, + 0x3e, 0x69, 0x0f, 0xc4, 0x16, 0x21, 0xe1, 0xb7, 0x3e, 0x1b, 0xa0, 0x59, 0x76, 0xb3, 0x5a, 0x6a, + 0x64, 0x87, 0xe9, 0x34, 0x39, 0x61, 0xff, 0xb9, 0xf3, 0x60, 0x5a, 0xc8, 0x71, 0x66, 0x03, 0x86, + 0xec, 0xe3, 0x19, 0xf4, 0x44, 0xcc, 0xc6, 0xb9, 0xb8, 0x54, 0x4e, 0xc6, 0xb3, 0x38, 0x26, 0x27, + 0xeb, 0xd9, 0xcc, 0x92, 0x93, 0x8d, 0xd3, 0x72, 0x1e, 0x4f, 0xbe, 0x84, 0xe8, 0x53, 0x13, 0x86, + 0x4e, 0x49, 0x09, 0xf7, 0xe6, 0x3c, 0x88, 0x66, 0xfc, 0x45, 0xf5, 0xfb, 0x3f, 0x1a, 0xfb, 0xf9, + 0x94, 0x87, 0x51, 0xc2, 0xa4, 0x31, 0x4d, 0x50, 0x47, 0xb7, 0xbe, 0x57, 0x84, 0x9a, 0xf5, 0x30, + 0xa0, 0x0f, 0xec, 0xe2, 0xb4, 0x91, 0x51, 0x76, 0xec, 0x37, 0x2e, 0x33, 0xaa, 0x67, 0x00, 0xb5, + 0xaa, 0x67, 0x73, 0xde, 0x23, 0x94, 0x95, 0x8b, 0x53, 0xa8, 0x44, 0xe8, 0x8d, 0x9c, 0x68, 0x2d, + 0xf9, 0x49, 0xc6, 0x53, 0x33, 0x56, 0x7e, 0xa7, 0xa8, 0x99, 0xe5, 0x37, 0x0b, 0xa5, 0x24, 0xbc, + 0xe9, 0x5c, 0xc2, 0x11, 0x4f, 0x16, 0xe4, 0x1f, 0x7b, 0xb7, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, + 0x8a, 0x61, 0xfa, 0xcc, 0xeb, 0x1b, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ProviderClient is the client API for Provider service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProviderClient interface { + // ////// Information about what a provider supports/expects + GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) + PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) + PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) + ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) + ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) + ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + // ////// Graceful Shutdown + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type providerClient struct { + cc *grpc.ClientConn +} + +func NewProviderClient(cc *grpc.ClientConn) ProviderClient { + return &providerClient{cc} +} + +func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) { + out := new(GetProviderSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) { + out := new(PrepareProviderConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) { + out := new(ValidateResourceTypeConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) { + out := new(ValidateDataSourceConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) { + out := new(UpgradeResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) { + out := new(Configure_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) { + out := new(ReadResource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) { + out := new(PlanResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) { + out := new(ApplyResourceChange_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) { + out := new(ImportResourceState_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) { + out := new(ReadDataSource_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProviderServer is the server API for Provider service. +type ProviderServer interface { + // ////// Information about what a provider supports/expects + GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error) + PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error) + ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error) + ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error) + UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error) + // ////// One-time initialization, called before other functions below + Configure(context.Context, *Configure_Request) (*Configure_Response, error) + // ////// Managed Resource Lifecycle + ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error) + PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error) + ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) + ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) + ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + // ////// Graceful Shutdown + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +func RegisterProviderServer(s *grpc.Server, srv ProviderServer) { + s.RegisterService(&_Provider_serviceDesc, srv) +} + +func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProviderSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareProviderConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PrepareProviderConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PrepareProviderConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateResourceTypeConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateDataSourceConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).UpgradeResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/UpgradeResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Configure_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Configure(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Configure", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadResource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlanResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).PlanResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/PlanResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyResourceChange_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ApplyResourceChange(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ApplyResourceChange", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportResourceState_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ImportResourceState(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ImportResourceState", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadDataSource_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).ReadDataSource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/ReadDataSource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provider/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provider_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provider", + HandlerType: (*ProviderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provider_GetSchema_Handler, + }, + { + MethodName: "PrepareProviderConfig", + Handler: _Provider_PrepareProviderConfig_Handler, + }, + { + MethodName: "ValidateResourceTypeConfig", + Handler: _Provider_ValidateResourceTypeConfig_Handler, + }, + { + MethodName: "ValidateDataSourceConfig", + Handler: _Provider_ValidateDataSourceConfig_Handler, + }, + { + MethodName: "UpgradeResourceState", + Handler: _Provider_UpgradeResourceState_Handler, + }, + { + MethodName: "Configure", + Handler: _Provider_Configure_Handler, + }, + { + MethodName: "ReadResource", + Handler: _Provider_ReadResource_Handler, + }, + { + MethodName: "PlanResourceChange", + Handler: _Provider_PlanResourceChange_Handler, + }, + { + MethodName: "ApplyResourceChange", + Handler: _Provider_ApplyResourceChange_Handler, + }, + { + MethodName: "ImportResourceState", + Handler: _Provider_ImportResourceState_Handler, + }, + { + MethodName: "ReadDataSource", + Handler: _Provider_ReadDataSource_Handler, + }, + { + MethodName: "Stop", + Handler: _Provider_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tfplugin5.proto", +} + +// ProvisionerClient is the client API for Provisioner service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ProvisionerClient interface { + GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) + Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) +} + +type provisionerClient struct { + cc *grpc.ClientConn +} + +func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient { + return &provisionerClient{cc} +} + +func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) { + out := new(GetProvisionerSchema_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) { + out := new(ValidateProvisionerConfig_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) { + stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...) + if err != nil { + return nil, err + } + x := &provisionerProvisionResourceClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Provisioner_ProvisionResourceClient interface { + Recv() (*ProvisionResource_Response, error) + grpc.ClientStream +} + +type provisionerProvisionResourceClient struct { + grpc.ClientStream +} + +func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) { + m := new(ProvisionResource_Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { + out := new(Stop_Response) + err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ProvisionerServer is the server API for Provisioner service. +type ProvisionerServer interface { + GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error) + ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error) + ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error + Stop(context.Context, *Stop_Request) (*Stop_Response, error) +} + +func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) { + s.RegisterService(&_Provisioner_serviceDesc, srv) +} + +func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetProvisionerSchema_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).GetSchema(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/GetSchema", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateProvisionerConfig_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ProvisionResource_Request) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream}) +} + +type Provisioner_ProvisionResourceServer interface { + Send(*ProvisionResource_Response) error + grpc.ServerStream +} + +type provisionerProvisionResourceServer struct { + grpc.ServerStream +} + +func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error { + return x.ServerStream.SendMsg(m) +} + +func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Stop_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProvisionerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tfplugin5.Provisioner/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request)) + } + return interceptor(ctx, in, info, handler) +} + +var _Provisioner_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tfplugin5.Provisioner", + HandlerType: (*ProvisionerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetSchema", + Handler: _Provisioner_GetSchema_Handler, + }, + { + MethodName: "ValidateProvisionerConfig", + Handler: _Provisioner_ValidateProvisionerConfig_Handler, + }, + { + MethodName: "Stop", + Handler: _Provisioner_Stop_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ProvisionResource", + Handler: _Provisioner_ProvisionResource_Handler, + ServerStreams: true, + }, + }, + Metadata: "tfplugin5.proto", +} diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto new file mode 100644 index 00000000..0837e1d4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto @@ -0,0 +1 @@ +../../docs/plugin-protocol/tfplugin5.1.proto \ No newline at end of file diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go new file mode 100644 index 00000000..8f89909c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go @@ -0,0 +1,5 @@ +// Package blocktoattr includes some helper functions that can perform +// preprocessing on a HCL body where a configschema.Block schema is available +// in order to allow list and set attributes defined in the schema to be +// optionally written by the user as block syntax. +package blocktoattr diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go new file mode 100644 index 00000000..d8c2e775 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go @@ -0,0 +1,187 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization +// functionality to allow attributes that are specified as having list or set +// type in the schema to be written with HCL block syntax as multiple nested +// blocks with the attribute name as the block type. +// +// This partially restores some of the block/attribute confusion from HCL 1 +// so that existing patterns that depended on that confusion can continue to +// be used in the short term while we settle on a longer-term strategy. +// +// Most of the fixup work is actually done when the returned body is +// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual +// decode of the body might not, if the content of the body is so ambiguous +// that there's no safe way to map it to the schema. +func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body { + // The schema should never be nil, but in practice it seems to be sometimes + // in the presence of poorly-configured test mocks, so we'll be robust + // by synthesizing an empty one. + if schema == nil { + schema = &configschema.Block{} + } + + return &fixupBody{ + original: body, + schema: schema, + names: ambiguousNames(schema), + } +} + +type fixupBody struct { + original hcl.Body + schema *configschema.Block + names map[string]struct{} +} + +// Content decodes content from the body. The given schema must be the lower-level +// representation of the same schema that was previously passed to FixUpBlockAttrs, +// or else the result is undefined. +func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, diags := b.original.Content(schema) + return b.fixupContent(content), diags +} + +func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) { + schema = b.effectiveSchema(schema) + content, remain, diags := b.original.PartialContent(schema) + remain = &fixupBody{ + original: remain, + schema: b.schema, + names: b.names, + } + return b.fixupContent(content), remain, diags +} + +func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) { + // FixUpBlockAttrs is not intended to be used in situations where we'd use + // JustAttributes, so we just pass this through verbatim to complete our + // implementation of hcl.Body. + return b.original.JustAttributes() +} + +func (b *fixupBody) MissingItemRange() hcl.Range { + return b.original.MissingItemRange() +} + +// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's +// content to determine whether the author has used attribute or block syntax +// for each of the ambigious attributes where both are permitted. +// +// The resulting schema will always contain all of the same names that are +// in the given schema, but some attribute schemas may instead be replaced by +// block header schemas. +func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema { + return effectiveSchema(given, b.original, b.names, true) +} + +func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent { + var ret hcl.BodyContent + ret.Attributes = make(hcl.Attributes) + for name, attr := range content.Attributes { + ret.Attributes[name] = attr + } + blockAttrVals := make(map[string][]*hcl.Block) + for _, block := range content.Blocks { + if _, exists := b.names[block.Type]; exists { + // If we get here then we've found a block type whose instances need + // to be re-interpreted as a list-of-objects attribute. We'll gather + // those up and fix them up below. + blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block) + continue + } + + // We need to now re-wrap our inner body so it will be subject to the + // same attribute-as-block fixup when recursively decoded. + retBlock := *block // shallow copy + if blockS, ok := b.schema.BlockTypes[block.Type]; ok { + // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then + retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block) + } + + ret.Blocks = append(ret.Blocks, &retBlock) + } + // No we'll install synthetic attributes for each of our fixups. We can't + // do this exactly because HCL's information model expects an attribute + // to be a single decl but we have multiple separate blocks. We'll + // approximate things, then, by using only our first block for the source + // location information. (We are guaranteed at least one by the above logic.) + for name, blocks := range blockAttrVals { + ret.Attributes[name] = &hcl.Attribute{ + Name: name, + Expr: &fixupBlocksExpr{ + blocks: blocks, + ety: b.schema.Attributes[name].Type.ElementType(), + }, + + Range: blocks[0].DefRange, + NameRange: blocks[0].TypeRange, + } + } + return &ret +} + +type fixupBlocksExpr struct { + blocks hcl.Blocks + ety cty.Type +} + +func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { + // In order to produce a suitable value for our expression we need to + // now decode the whole descendent block structure under each of our block + // bodies. + // + // That requires us to do something rather strange: we must construct a + // synthetic block type schema derived from the element type of the + // attribute, thus inverting our usual direction of lowering a schema + // into an implied type. Because a type is less detailed than a schema, + // the result is imprecise and in particular will just consider all + // the attributes to be optional and let the provider eventually decide + // whether to return errors if they turn out to be null when required. + schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety + spec := schema.DecoderSpec() + + vals := make([]cty.Value, len(e.blocks)) + var diags hcl.Diagnostics + for i, block := range e.blocks { + body := FixUpBlockAttrs(block.Body, schema) + val, blockDiags := hcldec.Decode(body, spec, ctx) + diags = append(diags, blockDiags...) + if val == cty.NilVal { + val = cty.UnknownVal(e.ety) + } + vals[i] = val + } + if len(vals) == 0 { + return cty.ListValEmpty(e.ety), diags + } + return cty.ListVal(vals), diags +} + +func (e *fixupBlocksExpr) Variables() []hcl.Traversal { + var ret []hcl.Traversal + schema := SchemaForCtyElementType(e.ety) + spec := schema.DecoderSpec() + for _, block := range e.blocks { + ret = append(ret, hcldec.Variables(block.Body, spec)...) + } + return ret +} + +func (e *fixupBlocksExpr) Range() hcl.Range { + // This is not really an appropriate range for the expression but it's + // the best we can do from here. + return e.blocks[0].DefRange +} + +func (e *fixupBlocksExpr) StartRange() hcl.Range { + return e.blocks[0].DefRange +} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go new file mode 100644 index 00000000..47a02565 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go @@ -0,0 +1,146 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +func ambiguousNames(schema *configschema.Block) map[string]struct{} { + if schema == nil { + return nil + } + ambiguousNames := make(map[string]struct{}) + for name, attrS := range schema.Attributes { + aty := attrS.Type + if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() { + ambiguousNames[name] = struct{}{} + } + } + return ambiguousNames +} + +func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema { + ret := &hcl.BodySchema{} + + appearsAsBlock := make(map[string]struct{}) + { + // We'll construct some throwaway schemas here just to probe for + // whether each of our ambiguous names seems to be being used as + // an attribute or a block. We need to check both because in JSON + // syntax we rely on the schema to decide between attribute or block + // interpretation and so JSON will always answer yes to both of + // these questions and we want to prefer the attribute interpretation + // in that case. + var probeSchema hcl.BodySchema + + for name := range ambiguousNames { + probeSchema = hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: name, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + if _, exists := content.Attributes[name]; exists { + // Can decode as an attribute, so we'll go with that. + continue + } + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: name, + }, + }, + } + content, _, _ = body.PartialContent(&probeSchema) + if len(content.Blocks) > 0 || dynamicExpanded { + // A dynamic block with an empty iterator returns nothing. + // If there's no attribute and we have either a block or a + // dynamic expansion, we need to rewrite this one as a + // block for a successful result. + appearsAsBlock[name] = struct{}{} + } + } + if !dynamicExpanded { + // If we're deciding for a context where dynamic blocks haven't + // been expanded yet then we need to probe for those too. + probeSchema = hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: "dynamic", + LabelNames: []string{"type"}, + }, + }, + } + content, _, _ := body.PartialContent(&probeSchema) + for _, block := range content.Blocks { + if _, exists := ambiguousNames[block.Labels[0]]; exists { + appearsAsBlock[block.Labels[0]] = struct{}{} + } + } + } + } + + for _, attrS := range given.Attributes { + if _, exists := appearsAsBlock[attrS.Name]; exists { + ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{ + Type: attrS.Name, + }) + } else { + ret.Attributes = append(ret.Attributes, attrS) + } + } + + // Anything that is specified as a block type in the input schema remains + // that way by just passing through verbatim. + ret.Blocks = append(ret.Blocks, given.Blocks...) + + return ret +} + +// SchemaForCtyElementType converts a cty object type into an +// approximately-equivalent configschema.Block representing the element of +// a list or set. If the given type is not an object type then this +// function will panic. +func SchemaForCtyElementType(ty cty.Type) *configschema.Block { + atys := ty.AttributeTypes() + ret := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute, len(atys)), + } + for name, aty := range atys { + ret.Attributes[name] = &configschema.Attribute{ + Type: aty, + Optional: true, + } + } + return ret +} + +// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type +// into an approximately-equivalent configschema.NestedBlock. If the given type +// is not of the expected kind then this function will panic. +func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch { + case ty.IsListType(): + nesting = configschema.NestingList + case ty.IsSetType(): + nesting = configschema.NestingSet + default: + panic("unsuitable type") + } + nested := SchemaForCtyElementType(ty.ElementType()) + return &configschema.NestedBlock{ + Nesting: nesting, + Block: *nested, + } +} + +// TypeCanBeBlocks returns true if the given type is a list-of-object or +// set-of-object type, and would thus be subject to the blocktoattr fixup +// if used as an attribute type. +func TypeCanBeBlocks(ty cty.Type) bool { + return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType() +} diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go new file mode 100644 index 00000000..e123b8aa --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go @@ -0,0 +1,43 @@ +package blocktoattr + +import ( + "github.com/hashicorp/hcl2/ext/dynblock" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/configs/configschema" +) + +// ExpandedVariables finds all of the global variables referenced in the +// given body with the given schema while taking into account the possibilities +// both of "dynamic" blocks being expanded and the possibility of certain +// attributes being written instead as nested blocks as allowed by the +// FixUpBlockAttrs function. +// +// This function exists to allow variables to be analyzed prior to dynamic +// block expansion while also dealing with the fact that dynamic block expansion +// might in turn produce nested blocks that are subject to FixUpBlockAttrs. +// +// This is intended as a drop-in replacement for dynblock.VariablesHCLDec, +// which is itself a drop-in replacement for hcldec.Variables. +func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal { + rootNode := dynblock.WalkVariables(body) + return walkVariables(rootNode, body, schema) +} + +func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal { + givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec()) + ambiguousNames := ambiguousNames(schema) + effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false) + vars, children := node.Visit(effectiveRawSchema) + + for _, child := range children { + if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists { + vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...) + } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists { + synthSchema := SchemaForCtyElementType(attrS.Type.ElementType()) + vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...) + } + } + + return vars +} diff --git a/vendor/github.com/hashicorp/terraform/lang/data.go b/vendor/github.com/hashicorp/terraform/lang/data.go new file mode 100644 index 00000000..80313d6c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/data.go @@ -0,0 +1,33 @@ +package lang + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Data is an interface whose implementations can provide cty.Value +// representations of objects identified by referenceable addresses from +// the addrs package. +// +// This interface will grow each time a new type of reference is added, and so +// implementations outside of the Terraform codebases are not advised. +// +// Each method returns a suitable value and optionally some diagnostics. If the +// returned diagnostics contains errors then the type of the returned value is +// used to construct an unknown value of the same type which is then used in +// place of the requested object so that type checking can still proceed. In +// cases where it's not possible to even determine a suitable result type, +// cty.DynamicVal is returned along with errors describing the problem. +type Data interface { + StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics + + GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) + GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/doc.go b/vendor/github.com/hashicorp/terraform/lang/doc.go new file mode 100644 index 00000000..af5c5cac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/doc.go @@ -0,0 +1,5 @@ +// Package lang deals with the runtime aspects of Terraform's configuration +// language, with concerns such as expression evaluation. It is closely related +// to sibling package "configs", which is responsible for configuration +// parsing and static validation. +package lang diff --git a/vendor/github.com/hashicorp/terraform/lang/eval.go b/vendor/github.com/hashicorp/terraform/lang/eval.go new file mode 100644 index 00000000..a3fb363e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/eval.go @@ -0,0 +1,477 @@ +package lang + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/hcl2/ext/dynblock" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/blocktoattr" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" +) + +// ExpandBlock expands any "dynamic" blocks present in the given body. The +// result is a body with those blocks expanded, ready to be evaluated with +// EvalBlock. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + traversals := dynblock.ExpandVariablesHCLDec(body, spec) + refs, diags := References(traversals) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + + return dynblock.Expand(body, ctx), diags +} + +// EvalBlock evaluates the given body using the given block schema and returns +// a cty object value representing its contents. The type of the result conforms +// to the implied type of the given schema. +// +// This function does not automatically expand "dynamic" blocks within the +// body. If that is desired, first call the ExpandBlock method to obtain +// an expanded body to pass to this method. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. +func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) { + spec := schema.DecoderSpec() + + refs, diags := ReferencesInBlock(body, schema) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // HACK: In order to remain compatible with some assumptions made in + // Terraform v0.11 and earlier about the approximate equivalence of + // attribute vs. block syntax, we do a just-in-time fixup here to allow + // any attribute in the schema that has a list-of-objects or set-of-objects + // kind to potentially be populated instead by one or more nested blocks + // whose type is the attribute name. + body = blocktoattr.FixUpBlockAttrs(body, schema) + + val, evalDiags := hcldec.Decode(body, spec, ctx) + diags = diags.Append(evalDiags) + + return val, diags +} + +// EvalExpr evaluates a single expression in the receiving context and returns +// the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + refs, diags := ReferencesInExpr(expr) + + ctx, ctxDiags := s.EvalContext(refs) + diags = diags.Append(ctxDiags) + if diags.HasErrors() { + // We'll stop early if we found problems in the references, because + // it's likely evaluation will produce redundant copies of the same errors. + return cty.UnknownVal(wantType), diags + } + + val, evalDiags := expr.Value(ctx) + diags = diags.Append(evalDiags) + + if wantType != cty.DynamicPseudoType { + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: expr.Range().Ptr(), + }) + } + } + + return val, diags +} + +// EvalReference evaluates the given reference in the receiving scope and +// returns the resulting value. The value will be converted to the given type before +// it is returned if possible, or else an error diagnostic will be produced +// describing the conversion error. +// +// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion +// and just obtain the returned value directly. +// +// If the returned diagnostics contains errors then the result may be +// incomplete, but will always be of the requested type. +func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // We cheat a bit here and just build an EvalContext for our requested + // reference with the "self" address overridden, and then pull the "self" + // result out of it to return. + ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject) + diags = diags.Append(ctxDiags) + val := ctx.Variables["self"] + if val == cty.NilVal { + val = cty.DynamicVal + } + + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + val = cty.UnknownVal(wantType) + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Incorrect value type", + Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + + return val, diags +} + +// EvalContext constructs a HCL expression evaluation context whose variable +// scope contains sufficient values to satisfy the given set of references. +// +// Most callers should prefer to use the evaluation helper methods that +// this type offers, but this is here for less common situations where the +// caller will handle the evaluation calls itself. +func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) { + return s.evalContext(refs, s.SelfAddr) +} + +func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) { + if s == nil { + panic("attempt to construct EvalContext for nil Scope") + } + + var diags tfdiags.Diagnostics + vals := make(map[string]cty.Value) + funcs := s.Functions() + ctx := &hcl.EvalContext{ + Variables: vals, + Functions: funcs, + } + + if len(refs) == 0 { + // Easy path for common case where there are no references at all. + return ctx, diags + } + + // First we'll do static validation of the references. This catches things + // early that might otherwise not get caught due to unknown values being + // present in the scope during planning. + if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() { + diags = diags.Append(staticDiags) + return ctx, diags + } + + // The reference set we are given has not been de-duped, and so there can + // be redundant requests in it for two reasons: + // - The same item is referenced multiple times + // - Both an item and that item's container are separately referenced. + // We will still visit every reference here and ask our data source for + // it, since that allows us to gather a full set of any errors and + // warnings, but once we've gathered all the data we'll then skip anything + // that's redundant in the process of populating our values map. + dataResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{} + managedResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{} + wholeModules := map[string]map[addrs.InstanceKey]cty.Value{} + moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{} + inputVariables := map[string]cty.Value{} + localValues := map[string]cty.Value{} + pathAttrs := map[string]cty.Value{} + terraformAttrs := map[string]cty.Value{} + countAttrs := map[string]cty.Value{} + var self cty.Value + + for _, ref := range refs { + rng := ref.SourceRange + isSelf := false + + rawSubj := ref.Subject + if rawSubj == addrs.Self { + if selfAddr == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + continue + } + + // Treat "self" as an alias for the configured self address. + rawSubj = selfAddr + isSelf = true + + if rawSubj == addrs.Self { + // Programming error: the self address cannot alias itself. + panic("scope SelfAddr attempting to alias itself") + } + } + + // This type switch must cover all of the "Referenceable" implementations + // in package addrs. + switch subj := rawSubj.(type) { + + case addrs.ResourceInstance: + var into map[string]map[string]map[addrs.InstanceKey]cty.Value + switch subj.Resource.Mode { + case addrs.ManagedResourceMode: + into = managedResources + case addrs.DataResourceMode: + into = dataResources + default: + panic(fmt.Errorf("unsupported ResourceMode %s", subj.Resource.Mode)) + } + + val, valDiags := normalizeRefValue(s.Data.GetResourceInstance(subj, rng)) + diags = diags.Append(valDiags) + + r := subj.Resource + if into[r.Type] == nil { + into[r.Type] = make(map[string]map[addrs.InstanceKey]cty.Value) + } + if into[r.Type][r.Name] == nil { + into[r.Type][r.Name] = make(map[addrs.InstanceKey]cty.Value) + } + into[r.Type][r.Name][subj.Key] = val + if isSelf { + self = val + } + + case addrs.ModuleCallInstance: + val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng)) + diags = diags.Append(valDiags) + + if wholeModules[subj.Call.Name] == nil { + wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value) + } + wholeModules[subj.Call.Name][subj.Key] = val + if isSelf { + self = val + } + + case addrs.ModuleCallOutput: + val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng)) + diags = diags.Append(valDiags) + + callName := subj.Call.Call.Name + callKey := subj.Call.Key + if moduleOutputs[callName] == nil { + moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value) + } + if moduleOutputs[callName][callKey] == nil { + moduleOutputs[callName][callKey] = make(map[string]cty.Value) + } + moduleOutputs[callName][callKey][subj.Name] = val + if isSelf { + self = val + } + + case addrs.InputVariable: + val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng)) + diags = diags.Append(valDiags) + inputVariables[subj.Name] = val + if isSelf { + self = val + } + + case addrs.LocalValue: + val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng)) + diags = diags.Append(valDiags) + localValues[subj.Name] = val + if isSelf { + self = val + } + + case addrs.PathAttr: + val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng)) + diags = diags.Append(valDiags) + pathAttrs[subj.Name] = val + if isSelf { + self = val + } + + case addrs.TerraformAttr: + val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng)) + diags = diags.Append(valDiags) + terraformAttrs[subj.Name] = val + if isSelf { + self = val + } + + case addrs.CountAttr: + val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng)) + diags = diags.Append(valDiags) + countAttrs[subj.Name] = val + if isSelf { + self = val + } + + default: + // Should never happen + panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj)) + } + } + + for k, v := range buildResourceObjects(managedResources) { + vals[k] = v + } + vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources)) + vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs)) + vals["var"] = cty.ObjectVal(inputVariables) + vals["local"] = cty.ObjectVal(localValues) + vals["path"] = cty.ObjectVal(pathAttrs) + vals["terraform"] = cty.ObjectVal(terraformAttrs) + vals["count"] = cty.ObjectVal(countAttrs) + if self != cty.NilVal { + vals["self"] = self + } + + return ctx, diags +} + +func buildResourceObjects(resources map[string]map[string]map[addrs.InstanceKey]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + for typeName, names := range resources { + nameVals := make(map[string]cty.Value) + for name, keys := range names { + nameVals[name] = buildInstanceObjects(keys) + } + vals[typeName] = cty.ObjectVal(nameVals) + } + return vals +} + +func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value { + vals := make(map[string]cty.Value) + + for name, keys := range wholeModules { + vals[name] = buildInstanceObjects(keys) + } + + for name, keys := range moduleOutputs { + if _, exists := wholeModules[name]; exists { + // If we also have a whole module value for this name then we'll + // skip this since the individual outputs are embedded in that result. + continue + } + + // The shape of this collection isn't compatible with buildInstanceObjects, + // but rather than replicating most of the buildInstanceObjects logic + // here we'll instead first transform the structure to be what that + // function expects and then use it. This is a little wasteful, but + // we do not expect this these maps to be large and so the extra work + // here should not hurt too much. + flattened := make(map[addrs.InstanceKey]cty.Value, len(keys)) + for k, vals := range keys { + flattened[k] = cty.ObjectVal(vals) + } + vals[name] = buildInstanceObjects(flattened) + } + + return vals +} + +func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value { + if val, exists := keys[addrs.NoKey]; exists { + // If present, a "no key" value supersedes all other values, + // since they should be embedded inside it. + return val + } + + // If we only have individual values then we need to construct + // either a list or a map, depending on what sort of keys we + // have. + haveInt := false + haveString := false + maxInt := 0 + + for k := range keys { + switch tk := k.(type) { + case addrs.IntKey: + haveInt = true + if int(tk) > maxInt { + maxInt = int(tk) + } + case addrs.StringKey: + haveString = true + } + } + + // We should either have ints or strings and not both, but + // if we have both then we'll prefer strings and let the + // language interpreter try to convert the int keys into + // strings in a map. + switch { + case haveString: + vals := make(map[string]cty.Value) + for k, v := range keys { + switch tk := k.(type) { + case addrs.StringKey: + vals[string(tk)] = v + case addrs.IntKey: + sk := strconv.Itoa(int(tk)) + vals[sk] = v + } + } + return cty.ObjectVal(vals) + case haveInt: + // We'll make a tuple that is long enough for our maximum + // index value. It doesn't matter if we end up shorter than + // the number of instances because if length(...) were + // being evaluated we would've got a NoKey reference and + // thus not ended up in this codepath at all. + vals := make([]cty.Value, maxInt+1) + for i := range vals { + if v, exists := keys[addrs.IntKey(i)]; exists { + vals[i] = v + } else { + // Just a placeholder, since nothing will access this anyway + vals[i] = cty.DynamicVal + } + } + return cty.TupleVal(vals) + default: + // Should never happen because there are no other key types. + log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types") + return cty.EmptyObjectVal + } +} + +func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) { + if diags.HasErrors() { + // If there are errors then we will force an unknown result so that + // we can still evaluate and catch type errors but we'll avoid + // producing redundant re-statements of the same errors we've already + // dealt with here. + return cty.UnknownVal(val.Type()), diags + } + return val, diags +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go new file mode 100644 index 00000000..6ce8aa9f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go @@ -0,0 +1,129 @@ +package funcs + +import ( + "fmt" + "net" + + "github.com/apparentlymart/go-cidr/cidr" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CidrHostFunc contructs a function that calculates a full host IP address +// within a given IP network address prefix. +var CidrHostFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "hostnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var hostNum int + if err := gocty.FromCtyValue(args[1], &hostNum); err != nil { + return cty.UnknownVal(cty.String), err + } + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + ip, err := cidr.Host(network, hostNum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ip.String()), nil + }, +}) + +// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given +// in CIDR notation into a subnet mask address. +var CidrNetmaskFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + return cty.StringVal(net.IP(network.Mask).String()), nil + }, +}) + +// CidrSubnetFunc contructs a function that calculates a subnet address within +// a given IP network address prefix. +var CidrSubnetFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "prefix", + Type: cty.String, + }, + { + Name: "newbits", + Type: cty.Number, + }, + { + Name: "netnum", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var newbits int + if err := gocty.FromCtyValue(args[1], &newbits); err != nil { + return cty.UnknownVal(cty.String), err + } + var netnum int + if err := gocty.FromCtyValue(args[2], &netnum); err != nil { + return cty.UnknownVal(cty.String), err + } + + _, network, err := net.ParseCIDR(args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err) + } + + // For portability with 32-bit systems where the subnet number + // will be a 32-bit int, we only allow extension of 32 bits in + // one call even if we're running on a 64-bit machine. + // (Of course, this is significant only for IPv6.) + if newbits > 32 { + return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits") + } + + newNetwork, err := cidr.Subnet(network, newbits, netnum) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(newNetwork.String()), nil + }, +}) + +// CidrHost calculates a full host IP address within a given IP network address prefix. +func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) { + return CidrHostFunc.Call([]cty.Value{prefix, hostnum}) +} + +// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address. +func CidrNetmask(prefix cty.Value) (cty.Value, error) { + return CidrNetmaskFunc.Call([]cty.Value{prefix}) +} + +// CidrSubnet calculates a subnet address within a given IP network address prefix. +func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) { + return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go new file mode 100644 index 00000000..fd0de9ea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go @@ -0,0 +1,1521 @@ +package funcs + +import ( + "errors" + "fmt" + "sort" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + "github.com/zclconf/go-cty/cty/gocty" +) + +var ElementFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "index", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + list := args[0] + listTy := list.Type() + switch { + case listTy.IsListType(): + return listTy.ElementType(), nil + case listTy.IsTupleType(): + if !args[1].IsKnown() { + // If the index isn't known yet then we can't predict the + // result type since each tuple element can have its own type. + return cty.DynamicPseudoType, nil + } + + etys := listTy.TupleElementTypes() + var index int + err := gocty.FromCtyValue(args[1], &index) + if err != nil { + // e.g. fractional number where whole number is required + return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err) + } + if len(etys) == 0 { + return cty.DynamicPseudoType, errors.New("cannot use element function with an empty list") + } + index = index % len(etys) + return etys[index], nil + default: + return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName()) + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + var index int + err := gocty.FromCtyValue(args[1], &index) + if err != nil { + // can't happen because we checked this in the Type function above + return cty.DynamicVal, fmt.Errorf("invalid index: %s", err) + } + + if !args[0].IsKnown() { + return cty.UnknownVal(retType), nil + } + + l := args[0].LengthInt() + if l == 0 { + return cty.DynamicVal, errors.New("cannot use element function with an empty list") + } + index = index % l + + // We did all the necessary type checks in the type function above, + // so this is guaranteed not to fail. + return args[0].Index(cty.NumberIntVal(int64(index))), nil + }, +}) + +var LengthFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "value", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + AllowUnknown: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + collTy := args[0].Type() + switch { + case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType: + return cty.Number, nil + default: + return cty.Number, errors.New("argument must be a string, a collection type, or a structural type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + coll := args[0] + collTy := args[0].Type() + switch { + case collTy == cty.DynamicPseudoType: + return cty.UnknownVal(cty.Number), nil + case collTy.IsTupleType(): + l := len(collTy.TupleElementTypes()) + return cty.NumberIntVal(int64(l)), nil + case collTy.IsObjectType(): + l := len(collTy.AttributeTypes()) + return cty.NumberIntVal(int64(l)), nil + case collTy == cty.String: + // We'll delegate to the cty stdlib strlen function here, because + // it deals with all of the complexities of tokenizing unicode + // grapheme clusters. + return stdlib.Strlen(coll) + case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType(): + return coll.Length(), nil + default: + // Should never happen, because of the checks in our Type func above + return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)") + } + }, +}) + +// CoalesceFunc constructs a function that takes any number of arguments and +// returns the first one that isn't empty. This function was copied from go-cty +// stdlib and modified so that it returns the first *non-empty* non-null element +// from a sequence, instead of merely the first non-null. +var CoalesceFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + argTypes := make([]cty.Type, len(args)) + for i, val := range args { + argTypes[i] = val.Type() + } + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + return retType, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, argVal := range args { + // We already know this will succeed because of the checks in our Type func above + argVal, _ = convert.Convert(argVal, retType) + if !argVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + if argVal.IsNull() { + continue + } + if retType == cty.String && argVal.RawEquals(cty.StringVal("")) { + continue + } + + return argVal, nil + } + return cty.NilVal, errors.New("no non-null, non-empty-string arguments") + }, +}) + +// CoalesceListFunc constructs a function that takes any number of list arguments +// and returns the first one that isn't empty. +var CoalesceListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, errors.New("at least one argument is required") + } + + argTypes := make([]cty.Type, len(args)) + + for i, arg := range args { + // if any argument is unknown, we can't be certain know which type we will return + if !arg.IsKnown() { + return cty.DynamicPseudoType, nil + } + ty := arg.Type() + + if !ty.IsListType() && !ty.IsTupleType() { + return cty.NilType, errors.New("coalescelist arguments must be lists or tuples") + } + + argTypes[i] = arg.Type() + } + + last := argTypes[0] + // If there are mixed types, we have to return a dynamic type. + for _, next := range argTypes[1:] { + if !next.Equals(last) { + return cty.DynamicPseudoType, nil + } + } + + return last, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, arg := range args { + if !arg.IsKnown() { + // If we run into an unknown list at some point, we can't + // predict the final result yet. (If there's a known, non-empty + // arg before this then we won't get here.) + return cty.UnknownVal(retType), nil + } + + if arg.LengthInt() > 0 { + return arg, nil + } + } + + return cty.NilVal, errors.New("no non-null arguments") + }, +}) + +// CompactFunc constructs a function that takes a list of strings and returns a new list +// with any empty string elements removed. +var CompactFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + if !listVal.IsWhollyKnown() { + // If some of the element values aren't known yet then we + // can't yet return a compacted list + return cty.UnknownVal(retType), nil + } + + var outputList []cty.Value + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + if v.AsString() == "" { + continue + } + outputList = append(outputList, v) + } + + if len(outputList) == 0 { + return cty.ListValEmpty(cty.String), nil + } + + return cty.ListVal(outputList), nil + }, +}) + +// ContainsFunc constructs a function that determines whether a given list or +// set contains a given single value as one of its elements. +var ContainsFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + arg := args[0] + ty := arg.Type() + + if !ty.IsListType() && !ty.IsTupleType() && !ty.IsSetType() { + return cty.NilVal, errors.New("argument must be list, tuple, or set") + } + + _, err = Index(cty.TupleVal(arg.AsValueSlice()), args[1]) + if err != nil { + return cty.False, nil + } + + return cty.True, nil + }, +}) + +// IndexFunc constructs a function that finds the element index for a given value in a list. +var IndexFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "value", + Type: cty.DynamicPseudoType, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) { + return cty.NilVal, errors.New("argument must be a list or tuple") + } + + if !args[0].IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + + if args[0].LengthInt() == 0 { // Easy path + return cty.NilVal, errors.New("cannot search an empty list") + } + + for it := args[0].ElementIterator(); it.Next(); { + i, v := it.Element() + eq, err := stdlib.Equal(v, args[1]) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.UnknownVal(cty.Number), nil + } + if eq.True() { + return i, nil + } + } + return cty.NilVal, errors.New("item not found") + + }, +}) + +// DistinctFunc constructs a function that takes a list and returns a new list +// with any duplicate elements removed. +var DistinctFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + + if !listVal.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + var list []cty.Value + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + list, err = appendIfMissing(list, v) + if err != nil { + return cty.NilVal, err + } + } + + if len(list) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(list), nil + }, +}) + +// ChunklistFunc constructs a function that splits a single list into fixed-size chunks, +// returning a list of lists. +var ChunklistFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "size", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.List(args[0].Type()), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + listVal := args[0] + if !listVal.IsKnown() { + return cty.UnknownVal(retType), nil + } + + var size int + err = gocty.FromCtyValue(args[1], &size) + if err != nil { + return cty.NilVal, fmt.Errorf("invalid index: %s", err) + } + + if size < 0 { + return cty.NilVal, errors.New("the size argument must be positive") + } + + output := make([]cty.Value, 0) + + // if size is 0, returns a list made of the initial list + if size == 0 { + output = append(output, listVal) + return cty.ListVal(output), nil + } + + chunk := make([]cty.Value, 0) + + l := args[0].LengthInt() + i := 0 + + for it := listVal.ElementIterator(); it.Next(); { + _, v := it.Element() + chunk = append(chunk, v) + + // Chunk when index isn't 0, or when reaching the values's length + if (i+1)%size == 0 || (i+1) == l { + output = append(output, cty.ListVal(chunk)) + chunk = make([]cty.Value, 0) + } + i++ + } + + return cty.ListVal(output), nil + }, +}) + +// FlattenFunc constructs a function that takes a list and replaces any elements +// that are lists with a flattened sequence of the list contents. +var FlattenFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + if !args[0].IsWhollyKnown() { + return cty.DynamicPseudoType, nil + } + + argTy := args[0].Type() + if !argTy.IsListType() && !argTy.IsSetType() && !argTy.IsTupleType() { + return cty.NilType, errors.New("can only flatten lists, sets and tuples") + } + + retVal, known := flattener(args[0]) + if !known { + return cty.DynamicPseudoType, nil + } + + tys := make([]cty.Type, len(retVal)) + for i, ty := range retVal { + tys[i] = ty.Type() + } + return cty.Tuple(tys), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputList := args[0] + if inputList.LengthInt() == 0 { + return cty.EmptyTupleVal, nil + } + + out, known := flattener(inputList) + if !known { + return cty.UnknownVal(retType), nil + } + + return cty.TupleVal(out), nil + }, +}) + +// Flatten until it's not a cty.List, and return whether the value is known. +// We can flatten lists with unknown values, as long as they are not +// lists themselves. +func flattener(flattenList cty.Value) ([]cty.Value, bool) { + out := make([]cty.Value, 0) + for it := flattenList.ElementIterator(); it.Next(); { + _, val := it.Element() + if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() { + if !val.IsKnown() { + return out, false + } + + res, known := flattener(val) + if !known { + return res, known + } + out = append(out, res...) + } else { + out = append(out, val) + } + } + return out, true +} + +// KeysFunc constructs a function that takes a map and returns a sorted list of the map keys. +var KeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty := args[0].Type() + switch { + case ty.IsMapType(): + return cty.List(cty.String), nil + case ty.IsObjectType(): + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + // All of our result elements will be strings, and atys just + // decides how many there are. + etys := make([]cty.Type, len(atys)) + for i := range etys { + etys[i] = cty.String + } + return cty.Tuple(etys), nil + default: + return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + m := args[0] + var keys []cty.Value + + switch { + case m.Type().IsObjectType(): + // In this case we allow unknown values so we must work only with + // the attribute _types_, not with the value itself. + var names []string + for name := range m.Type().AttributeTypes() { + names = append(names, name) + } + sort.Strings(names) // same ordering guaranteed by cty's ElementIterator + if len(names) == 0 { + return cty.EmptyTupleVal, nil + } + keys = make([]cty.Value, len(names)) + for i, name := range names { + keys[i] = cty.StringVal(name) + } + return cty.TupleVal(keys), nil + default: + if !m.IsKnown() { + return cty.UnknownVal(retType), nil + } + + // cty guarantees that ElementIterator will iterate in lexicographical + // order by key. + for it := args[0].ElementIterator(); it.Next(); { + k, _ := it.Element() + keys = append(keys, k) + } + if len(keys) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(keys), nil + } + }, +}) + +// ListFunc constructs a function that takes an arbitrary number of arguments +// and returns a list containing those values in the same order. +// +// This function is deprecated in Terraform v0.12 +var ListFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) == 0 { + return cty.NilType, errors.New("at least one argument is required") + } + + argTypes := make([]cty.Type, len(args)) + + for i, arg := range args { + argTypes[i] = arg.Type() + } + + retType, _ := convert.UnifyUnsafe(argTypes) + if retType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + + return cty.List(retType), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + newList := make([]cty.Value, 0, len(args)) + + for _, arg := range args { + // We already know this will succeed because of the checks in our Type func above + arg, _ = convert.Convert(arg, retType.ElementType()) + newList = append(newList, arg) + } + + return cty.ListVal(newList), nil + }, +}) + +// LookupFunc constructs a function that performs dynamic lookups of map types. +var LookupFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "inputMap", + Type: cty.DynamicPseudoType, + }, + { + Name: "key", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "default", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 1 || len(args) > 3 { + return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args)) + } + + ty := args[0].Type() + + switch { + case ty.IsObjectType(): + if !args[1].IsKnown() { + return cty.DynamicPseudoType, nil + } + + key := args[1].AsString() + if ty.HasAttribute(key) { + return args[0].GetAttr(key).Type(), nil + } else if len(args) == 3 { + // if the key isn't found but a default is provided, + // return the default type + return args[2].Type(), nil + } + return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key) + case ty.IsMapType(): + return ty.ElementType(), nil + default: + return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var defaultVal cty.Value + defaultValueSet := false + + if len(args) == 3 { + defaultVal = args[2] + defaultValueSet = true + } + + mapVar := args[0] + lookupKey := args[1].AsString() + + if !mapVar.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + if mapVar.Type().IsObjectType() { + if mapVar.Type().HasAttribute(lookupKey) { + return mapVar.GetAttr(lookupKey), nil + } + } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True { + v := mapVar.Index(cty.StringVal(lookupKey)) + if ty := v.Type(); !ty.Equals(cty.NilType) { + switch { + case ty.Equals(cty.String): + return cty.StringVal(v.AsString()), nil + case ty.Equals(cty.Number): + return cty.NumberVal(v.AsBigFloat()), nil + case ty.Equals(cty.Bool): + return cty.BoolVal(v.True()), nil + default: + return cty.NilVal, errors.New("lookup() can only be used with maps of primitive types") + } + } + } + + if defaultValueSet { + defaultVal, err = convert.Convert(defaultVal, retType) + if err != nil { + return cty.NilVal, err + } + return defaultVal, nil + } + + return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf( + "lookup failed to find '%s'", lookupKey) + }, +}) + +// MapFunc constructs a function that takes an even number of arguments and +// returns a map whose elements are constructed from consecutive pairs of arguments. +// +// This function is deprecated in Terraform v0.12 +var MapFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "vals", + Type: cty.DynamicPseudoType, + AllowUnknown: true, + AllowDynamicType: true, + AllowNull: true, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + if len(args) < 2 || len(args)%2 != 0 { + return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args)) + } + + argTypes := make([]cty.Type, len(args)/2) + index := 0 + + for i := 0; i < len(args); i += 2 { + argTypes[index] = args[i+1].Type() + index++ + } + + valType, _ := convert.UnifyUnsafe(argTypes) + if valType == cty.NilType { + return cty.NilType, errors.New("all arguments must have the same type") + } + + return cty.Map(valType), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + for _, arg := range args { + if !arg.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + } + + outputMap := make(map[string]cty.Value) + + for i := 0; i < len(args); i += 2 { + + key := args[i].AsString() + + err := gocty.FromCtyValue(args[i], &key) + if err != nil { + return cty.NilVal, err + } + + val := args[i+1] + + var variable cty.Value + err = gocty.FromCtyValue(val, &variable) + if err != nil { + return cty.NilVal, err + } + + // We already know this will succeed because of the checks in our Type func above + variable, _ = convert.Convert(variable, retType.ElementType()) + + // Check for duplicate keys + if _, ok := outputMap[key]; ok { + return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) + } + outputMap[key] = variable + } + + return cty.MapVal(outputMap), nil + }, +}) + +// MatchkeysFunc constructs a function that constructs a new list by taking a +// subset of elements from one list whose indexes match the corresponding +// indexes of values in another list. +var MatchkeysFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "keys", + Type: cty.List(cty.DynamicPseudoType), + }, + { + Name: "searchset", + Type: cty.List(cty.DynamicPseudoType), + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + if ty == cty.NilType { + return cty.NilType, errors.New("keys and searchset must be of the same type") + } + + // the return type is based on args[0] (values) + return args[0].Type(), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + if !args[0].IsKnown() { + return cty.UnknownVal(cty.List(retType.ElementType())), nil + } + + if args[0].LengthInt() != args[1].LengthInt() { + return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal") + } + + output := make([]cty.Value, 0) + values := args[0] + + // Keys and searchset must be the same type. + // We can skip error checking here because we've already verified that + // they can be unified in the Type function + ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()}) + keys, _ := convert.Convert(args[1], ty) + searchset, _ := convert.Convert(args[2], ty) + + // if searchset is empty, return an empty list. + if searchset.LengthInt() == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + + if !values.IsWhollyKnown() || !keys.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, key := it.Element() + for iter := searchset.ElementIterator(); iter.Next(); { + _, search := iter.Element() + eq, err := stdlib.Equal(key, search) + if err != nil { + return cty.NilVal, err + } + if !eq.IsKnown() { + return cty.ListValEmpty(retType.ElementType()), nil + } + if eq.True() { + v := values.Index(cty.NumberIntVal(int64(i))) + output = append(output, v) + break + } + } + i++ + } + + // if we haven't matched any key, then output is an empty list. + if len(output) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(output), nil + }, +}) + +// MergeFunc constructs a function that takes an arbitrary number of maps and +// returns a single map that contains a merged set of elements from all of the maps. +// +// If more than one given map defines the same key then the one that is later in +// the argument sequence takes precedence. +var MergeFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "maps", + Type: cty.DynamicPseudoType, + AllowDynamicType: true, + }, + Type: function.StaticReturnType(cty.DynamicPseudoType), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + outputMap := make(map[string]cty.Value) + + for _, arg := range args { + if !arg.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + if !arg.Type().IsObjectType() && !arg.Type().IsMapType() { + return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName()) + } + for it := arg.ElementIterator(); it.Next(); { + k, v := it.Element() + outputMap[k.AsString()] = v + } + } + return cty.ObjectVal(outputMap), nil + }, +}) + +// ReverseFunc takes a sequence and produces a new sequence of the same length +// with all of the same elements as the given sequence but in reverse order. +var ReverseFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + argTy := args[0].Type() + switch { + case argTy.IsTupleType(): + argTys := argTy.TupleElementTypes() + retTys := make([]cty.Type, len(argTys)) + for i, ty := range argTys { + retTys[len(retTys)-i-1] = ty + } + return cty.Tuple(retTys), nil + case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list + return cty.List(argTy.ElementType()), nil + default: + return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName()) + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + in := args[0].AsValueSlice() + outVals := make([]cty.Value, len(in)) + for i, v := range in { + outVals[len(outVals)-i-1] = v + } + switch { + case retType.IsTupleType(): + return cty.TupleVal(outVals), nil + default: + if len(outVals) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(outVals), nil + } + }, +}) + +// SetProductFunc calculates the cartesian product of two or more sets or +// sequences. If the arguments are all lists then the result is a list of tuples, +// preserving the ordering of all of the input lists. Otherwise the result is a +// set of tuples. +var SetProductFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + VarParam: &function.Parameter{ + Name: "sets", + Type: cty.DynamicPseudoType, + }, + Type: func(args []cty.Value) (retType cty.Type, err error) { + if len(args) < 2 { + return cty.NilType, errors.New("at least two arguments are required") + } + + listCount := 0 + elemTys := make([]cty.Type, len(args)) + for i, arg := range args { + aty := arg.Type() + switch { + case aty.IsSetType(): + elemTys[i] = aty.ElementType() + case aty.IsListType(): + elemTys[i] = aty.ElementType() + listCount++ + case aty.IsTupleType(): + // We can accept a tuple type only if there's some common type + // that all of its elements can be converted to. + allEtys := aty.TupleElementTypes() + if len(allEtys) == 0 { + elemTys[i] = cty.DynamicPseudoType + listCount++ + break + } + ety, _ := convert.UnifyUnsafe(allEtys) + if ety == cty.NilType { + return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type") + } + elemTys[i] = ety + listCount++ + default: + return cty.NilType, function.NewArgErrorf(i, "a set or a list is required") + } + } + + if listCount == len(args) { + return cty.List(cty.Tuple(elemTys)), nil + } + return cty.Set(cty.Tuple(elemTys)), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + ety := retType.ElementType() + + total := 1 + for _, arg := range args { + // Because of our type checking function, we are guaranteed that + // all of the arguments are known, non-null values of types that + // support LengthInt. + total *= arg.LengthInt() + } + + if total == 0 { + // If any of the arguments was an empty collection then our result + // is also an empty collection, which we'll short-circuit here. + if retType.IsListType() { + return cty.ListValEmpty(ety), nil + } + return cty.SetValEmpty(ety), nil + } + + subEtys := ety.TupleElementTypes() + product := make([][]cty.Value, total) + + b := make([]cty.Value, total*len(args)) + n := make([]int, len(args)) + s := 0 + argVals := make([][]cty.Value, len(args)) + for i, arg := range args { + argVals[i] = arg.AsValueSlice() + } + + for i := range product { + e := s + len(args) + pi := b[s:e] + product[i] = pi + s = e + + for j, n := range n { + val := argVals[j][n] + ty := subEtys[j] + if !val.Type().Equals(ty) { + var err error + val, err = convert.Convert(val, ty) + if err != nil { + // Should never happen since we checked this in our + // type-checking function. + return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName()) + } + } + pi[j] = val + } + + for j := len(n) - 1; j >= 0; j-- { + n[j]++ + if n[j] < len(argVals[j]) { + break + } + n[j] = 0 + } + } + + productVals := make([]cty.Value, total) + for i, vals := range product { + productVals[i] = cty.TupleVal(vals) + } + + if retType.IsListType() { + return cty.ListVal(productVals), nil + } + return cty.SetVal(productVals), nil + }, +}) + +// SliceFunc constructs a function that extracts some consecutive elements +// from within a list. +var SliceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.DynamicPseudoType, + }, + { + Name: "start_index", + Type: cty.Number, + }, + { + Name: "end_index", + Type: cty.Number, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + arg := args[0] + argTy := arg.Type() + + if argTy.IsSetType() { + return cty.NilType, function.NewArgErrorf(0, "cannot slice a set, because its elements do not have indices; use the tolist function to force conversion to list if the ordering of the result is not important") + } + if !argTy.IsListType() && !argTy.IsTupleType() { + return cty.NilType, function.NewArgErrorf(0, "must be a list or tuple value") + } + + startIndex, endIndex, idxsKnown, err := sliceIndexes(args) + if err != nil { + return cty.NilType, err + } + + if argTy.IsListType() { + return argTy, nil + } + + if !idxsKnown { + // If we don't know our start/end indices then we can't predict + // the result type if we're planning to return a tuple. + return cty.DynamicPseudoType, nil + } + return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputList := args[0] + + if retType == cty.DynamicPseudoType { + return cty.DynamicVal, nil + } + + // we ignore idxsKnown return value here because the indices are always + // known here, or else the call would've short-circuited. + startIndex, endIndex, _, err := sliceIndexes(args) + if err != nil { + return cty.NilVal, err + } + + if endIndex-startIndex == 0 { + if retType.IsTupleType() { + return cty.EmptyTupleVal, nil + } + return cty.ListValEmpty(retType.ElementType()), nil + } + + outputList := inputList.AsValueSlice()[startIndex:endIndex] + + if retType.IsTupleType() { + return cty.TupleVal(outputList), nil + } + + return cty.ListVal(outputList), nil + }, +}) + +func sliceIndexes(args []cty.Value) (int, int, bool, error) { + var startIndex, endIndex, length int + var startKnown, endKnown, lengthKnown bool + + if args[0].Type().IsTupleType() || args[0].IsKnown() { // if it's a tuple then we always know the length by the type, but lists must be known + length = args[0].LengthInt() + lengthKnown = true + } + + if args[1].IsKnown() { + if err := gocty.FromCtyValue(args[1], &startIndex); err != nil { + return 0, 0, false, function.NewArgErrorf(1, "invalid start index: %s", err) + } + if startIndex < 0 { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be less than zero") + } + if lengthKnown && startIndex > length { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than the length of the list") + } + startKnown = true + } + if args[2].IsKnown() { + if err := gocty.FromCtyValue(args[2], &endIndex); err != nil { + return 0, 0, false, function.NewArgErrorf(2, "invalid end index: %s", err) + } + if endIndex < 0 { + return 0, 0, false, function.NewArgErrorf(2, "end index must not be less than zero") + } + if lengthKnown && endIndex > length { + return 0, 0, false, function.NewArgErrorf(2, "end index must not be greater than the length of the list") + } + endKnown = true + } + if startKnown && endKnown { + if startIndex > endIndex { + return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than end index") + } + } + return startIndex, endIndex, startKnown && endKnown, nil +} + +// TransposeFunc contructs a function that takes a map of lists of strings and +// TransposeFunc constructs a function that takes a map of lists of strings and +// swaps the keys and values to produce a new map of lists of strings. +var TransposeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.Map(cty.List(cty.String)), + }, + }, + Type: function.StaticReturnType(cty.Map(cty.List(cty.String))), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + inputMap := args[0] + if !inputMap.IsWhollyKnown() { + return cty.UnknownVal(retType), nil + } + + outputMap := make(map[string]cty.Value) + tmpMap := make(map[string][]string) + + for it := inputMap.ElementIterator(); it.Next(); { + inKey, inVal := it.Element() + for iter := inVal.ElementIterator(); iter.Next(); { + _, val := iter.Element() + if !val.Type().Equals(cty.String) { + return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings") + } + + outKey := val.AsString() + if _, ok := tmpMap[outKey]; !ok { + tmpMap[outKey] = make([]string, 0) + } + outVal := tmpMap[outKey] + outVal = append(outVal, inKey.AsString()) + sort.Strings(outVal) + tmpMap[outKey] = outVal + } + } + + for outKey, outVal := range tmpMap { + values := make([]cty.Value, 0) + for _, v := range outVal { + values = append(values, cty.StringVal(v)) + } + outputMap[outKey] = cty.ListVal(values) + } + + return cty.MapVal(outputMap), nil + }, +}) + +// ValuesFunc constructs a function that returns a list of the map values, +// in the order of the sorted keys. +var ValuesFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "values", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + ty := args[0].Type() + if ty.IsMapType() { + return cty.List(ty.ElementType()), nil + } else if ty.IsObjectType() { + // The result is a tuple type with all of the same types as our + // object type's attributes, sorted in lexicographical order by the + // keys. (This matches the sort order guaranteed by ElementIterator + // on a cty object value.) + atys := ty.AttributeTypes() + if len(atys) == 0 { + return cty.EmptyTuple, nil + } + attrNames := make([]string, 0, len(atys)) + for name := range atys { + attrNames = append(attrNames, name) + } + sort.Strings(attrNames) + + tys := make([]cty.Type, len(attrNames)) + for i, name := range attrNames { + tys[i] = atys[name] + } + return cty.Tuple(tys), nil + } + return cty.NilType, errors.New("values() requires a map as the first argument") + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + mapVar := args[0] + + // We can just iterate the map/object value here because cty guarantees + // that these types always iterate in key lexicographical order. + var values []cty.Value + for it := mapVar.ElementIterator(); it.Next(); { + _, val := it.Element() + values = append(values, val) + } + + if retType.IsTupleType() { + return cty.TupleVal(values), nil + } + if len(values) == 0 { + return cty.ListValEmpty(retType.ElementType()), nil + } + return cty.ListVal(values), nil + }, +}) + +// ZipmapFunc constructs a function that constructs a map from a list of keys +// and a corresponding list of values. +var ZipmapFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "keys", + Type: cty.List(cty.String), + }, + { + Name: "values", + Type: cty.DynamicPseudoType, + }, + }, + Type: func(args []cty.Value) (ret cty.Type, err error) { + keys := args[0] + values := args[1] + valuesTy := values.Type() + + switch { + case valuesTy.IsListType(): + return cty.Map(values.Type().ElementType()), nil + case valuesTy.IsTupleType(): + if !keys.IsWhollyKnown() { + // Since zipmap with a tuple produces an object, we need to know + // all of the key names before we can predict our result type. + return cty.DynamicPseudoType, nil + } + + keysRaw := keys.AsValueSlice() + valueTypesRaw := valuesTy.TupleElementTypes() + if len(keysRaw) != len(valueTypesRaw) { + return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw)) + } + atys := make(map[string]cty.Type, len(valueTypesRaw)) + for i, keyVal := range keysRaw { + if keyVal.IsNull() { + return cty.NilType, fmt.Errorf("keys list has null value at index %d", i) + } + key := keyVal.AsString() + atys[key] = valueTypesRaw[i] + } + return cty.Object(atys), nil + + default: + return cty.NilType, errors.New("values argument must be a list or tuple value") + } + }, + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + keys := args[0] + values := args[1] + + if !keys.IsWhollyKnown() { + // Unknown map keys and object attributes are not supported, so + // our entire result must be unknown in this case. + return cty.UnknownVal(retType), nil + } + + // both keys and values are guaranteed to be shallowly-known here, + // because our declared params above don't allow unknown or null values. + if keys.LengthInt() != values.LengthInt() { + return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt()) + } + + output := make(map[string]cty.Value) + + i := 0 + for it := keys.ElementIterator(); it.Next(); { + _, v := it.Element() + val := values.Index(cty.NumberIntVal(int64(i))) + output[v.AsString()] = val + i++ + } + + switch { + case retType.IsMapType(): + if len(output) == 0 { + return cty.MapValEmpty(retType.ElementType()), nil + } + return cty.MapVal(output), nil + case retType.IsObjectType(): + return cty.ObjectVal(output), nil + default: + // Should never happen because the type-check function should've + // caught any other case. + return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName()) + } + }, +}) + +// helper function to add an element to a list, if it does not already exist +func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) { + for _, ele := range slice { + eq, err := stdlib.Equal(ele, element) + if err != nil { + return slice, err + } + if eq.True() { + return slice, nil + } + } + return append(slice, element), nil +} + +// Element returns a single element from a given list at the given index. If +// index is greater than the length of the list then it is wrapped modulo +// the list length. +func Element(list, index cty.Value) (cty.Value, error) { + return ElementFunc.Call([]cty.Value{list, index}) +} + +// Length returns the number of elements in the given collection or number of +// Unicode characters in the given string. +func Length(collection cty.Value) (cty.Value, error) { + return LengthFunc.Call([]cty.Value{collection}) +} + +// Coalesce takes any number of arguments and returns the first one that isn't empty. +func Coalesce(args ...cty.Value) (cty.Value, error) { + return CoalesceFunc.Call(args) +} + +// CoalesceList takes any number of list arguments and returns the first one that isn't empty. +func CoalesceList(args ...cty.Value) (cty.Value, error) { + return CoalesceListFunc.Call(args) +} + +// Compact takes a list of strings and returns a new list +// with any empty string elements removed. +func Compact(list cty.Value) (cty.Value, error) { + return CompactFunc.Call([]cty.Value{list}) +} + +// Contains determines whether a given list contains a given single value +// as one of its elements. +func Contains(list, value cty.Value) (cty.Value, error) { + return ContainsFunc.Call([]cty.Value{list, value}) +} + +// Index finds the element index for a given value in a list. +func Index(list, value cty.Value) (cty.Value, error) { + return IndexFunc.Call([]cty.Value{list, value}) +} + +// Distinct takes a list and returns a new list with any duplicate elements removed. +func Distinct(list cty.Value) (cty.Value, error) { + return DistinctFunc.Call([]cty.Value{list}) +} + +// Chunklist splits a single list into fixed-size chunks, returning a list of lists. +func Chunklist(list, size cty.Value) (cty.Value, error) { + return ChunklistFunc.Call([]cty.Value{list, size}) +} + +// Flatten takes a list and replaces any elements that are lists with a flattened +// sequence of the list contents. +func Flatten(list cty.Value) (cty.Value, error) { + return FlattenFunc.Call([]cty.Value{list}) +} + +// Keys takes a map and returns a sorted list of the map keys. +func Keys(inputMap cty.Value) (cty.Value, error) { + return KeysFunc.Call([]cty.Value{inputMap}) +} + +// List takes any number of list arguments and returns a list containing those +// values in the same order. +func List(args ...cty.Value) (cty.Value, error) { + return ListFunc.Call(args) +} + +// Lookup performs a dynamic lookup into a map. +// There are two required arguments, map and key, plus an optional default, +// which is a value to return if no key is found in map. +func Lookup(args ...cty.Value) (cty.Value, error) { + return LookupFunc.Call(args) +} + +// Map takes an even number of arguments and returns a map whose elements are constructed +// from consecutive pairs of arguments. +func Map(args ...cty.Value) (cty.Value, error) { + return MapFunc.Call(args) +} + +// Matchkeys constructs a new list by taking a subset of elements from one list +// whose indexes match the corresponding indexes of values in another list. +func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) { + return MatchkeysFunc.Call([]cty.Value{values, keys, searchset}) +} + +// Merge takes an arbitrary number of maps and returns a single map that contains +// a merged set of elements from all of the maps. +// +// If more than one given map defines the same key then the one that is later in +// the argument sequence takes precedence. +func Merge(maps ...cty.Value) (cty.Value, error) { + return MergeFunc.Call(maps) +} + +// Reverse takes a sequence and produces a new sequence of the same length +// with all of the same elements as the given sequence but in reverse order. +func Reverse(list cty.Value) (cty.Value, error) { + return ReverseFunc.Call([]cty.Value{list}) +} + +// SetProduct computes the cartesian product of sets or sequences. +func SetProduct(sets ...cty.Value) (cty.Value, error) { + return SetProductFunc.Call(sets) +} + +// Slice extracts some consecutive elements from within a list. +func Slice(list, start, end cty.Value) (cty.Value, error) { + return SliceFunc.Call([]cty.Value{list, start, end}) +} + +// Transpose takes a map of lists of strings and swaps the keys and values to +// produce a new map of lists of strings. +func Transpose(values cty.Value) (cty.Value, error) { + return TransposeFunc.Call([]cty.Value{values}) +} + +// Values returns a list of the map values, in the order of the sorted keys. +// This function only works on flat maps. +func Values(values cty.Value) (cty.Value, error) { + return ValuesFunc.Call([]cty.Value{values}) +} + +// Zipmap constructs a map from a list of keys and a corresponding list of values. +func Zipmap(keys, values cty.Value) (cty.Value, error) { + return ZipmapFunc.Call([]cty.Value{keys, values}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go new file mode 100644 index 00000000..83f85979 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go @@ -0,0 +1,87 @@ +package funcs + +import ( + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeToFunc constructs a "to..." function, like "tostring", which converts +// its argument to a specific type or type kind. +// +// The given type wantTy can be any type constraint that cty's "convert" package +// would accept. In particular, this means that you can pass +// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which +// will then cause cty to attempt to unify all of the element types when given +// a tuple. +func MakeToFunc(wantTy cty.Type) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "v", + // We use DynamicPseudoType rather than wantTy here so that + // all values will pass through the function API verbatim and + // we can handle the conversion logic within the Type and + // Impl functions. This allows us to customize the error + // messages to be more appropriate for an explicit type + // conversion, whereas the cty function system produces + // messages aimed at _implicit_ type conversions. + Type: cty.DynamicPseudoType, + AllowNull: true, + }, + }, + Type: func(args []cty.Value) (cty.Type, error) { + gotTy := args[0].Type() + if gotTy.Equals(wantTy) { + return wantTy, nil + } + conv := convert.GetConversionUnsafe(args[0].Type(), wantTy) + if conv == nil { + // We'll use some specialized errors for some trickier cases, + // but most we can handle in a simple way. + switch { + case gotTy.IsTupleType() && wantTy.IsTupleType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + case gotTy.IsObjectType() && wantTy.IsObjectType(): + return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy)) + default: + return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + // If a conversion is available then everything is fine. + return wantTy, nil + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + // We didn't set "AllowUnknown" on our argument, so it is guaranteed + // to be known here but may still be null. + ret, err := convert.Convert(args[0], retType) + if err != nil { + // Because we used GetConversionUnsafe above, conversion can + // still potentially fail in here. For example, if the user + // asks to convert the string "a" to bool then we'll + // optimistically permit it during type checking but fail here + // once we note that the value isn't either "true" or "false". + gotTy := args[0].Type() + switch { + case gotTy == cty.String && wantTy == cty.Bool: + what := "string" + if !args[0].IsNull() { + what = strconv.Quote(args[0].AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what) + case gotTy == cty.String && wantTy == cty.Number: + what := "string" + if !args[0].IsNull() { + what = strconv.Quote(args[0].AsString()) + } + return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what) + default: + return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint()) + } + } + return ret, nil + }, + }) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go new file mode 100644 index 00000000..be006f82 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go @@ -0,0 +1,325 @@ +package funcs + +import ( + "crypto/md5" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/base64" + "encoding/hex" + "encoding/pem" + "fmt" + "hash" + + uuid "github.com/hashicorp/go-uuid" + uuidv5 "github.com/satori/go.uuid" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" + "golang.org/x/crypto/bcrypt" +) + +var UUIDFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + result, err := uuid.GenerateUUID() + if err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.StringVal(result), nil + }, +}) + +var UUIDV5Func = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "namespace", + Type: cty.String, + }, + { + Name: "name", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var namespace uuidv5.UUID + switch { + case args[0].AsString() == "dns": + namespace = uuidv5.NamespaceDNS + case args[0].AsString() == "url": + namespace = uuidv5.NamespaceURL + case args[0].AsString() == "oid": + namespace = uuidv5.NamespaceOID + case args[0].AsString() == "x500": + namespace = uuidv5.NamespaceX500 + default: + if namespace, err = uuidv5.FromString(args[0].AsString()); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err) + } + } + val := args[1].AsString() + return cty.StringVal(uuidv5.NewV5(namespace, val).String()), nil + }, +}) + +// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString) +} + +// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string +// and encodes it with Base64. +var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString) + +// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileBase64Sha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString) +} + +// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher. +var BcryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "cost", + Type: cty.Number, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + defaultCost := 10 + + if len(args) > 1 { + var val int + if err := gocty.FromCtyValue(args[1], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + defaultCost = val + } + + if len(args) > 2 { + return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments") + } + + input := args[0].AsString() + out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error()) + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits. +var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString) + +// MakeFileMd5Func constructs a function that is like Md5Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileMd5Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString) +} + +// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext. +var RsaDecryptFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "ciphertext", + Type: cty.String, + }, + { + Name: "privatekey", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + key := args[1].AsString() + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found") + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return cty.UnknownVal(cty.String), fmt.Errorf( + "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use", + ) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(string(out)), nil + }, +}) + +// Sha1Func contructs a function that computes the SHA1 hash of a given string +// and encodes it with hexadecimal digits. +var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString) + +// MakeFileSha1Func constructs a function that is like Sha1Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha1Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString) +} + +// Sha256Func contructs a function that computes the SHA256 hash of a given string +// and encodes it with hexadecimal digits. +var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString) + +// MakeFileSha256Func constructs a function that is like Sha256Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha256Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString) +} + +// Sha512Func contructs a function that computes the SHA512 hash of a given string +// and encodes it with hexadecimal digits. +var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString) + +// MakeFileSha512Func constructs a function that is like Sha512Func but reads the +// contents of a file rather than hashing a given literal string. +func MakeFileSha512Func(baseDir string) function.Function { + return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString) +} + +func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + s := args[0].AsString() + h := hf() + h.Write([]byte(s)) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + h := hf() + h.Write(src) + rv := enc(h.Sum(nil)) + return cty.StringVal(rv), nil + }, + }) +} + +// UUID generates and returns a Type-4 UUID in the standard hexadecimal string +// format. +// +// This is not a pure function: it will generate a different result for each +// call. It must therefore be registered as an impure function in the function +// table in the "lang" package. +func UUID() (cty.Value, error) { + return UUIDFunc.Call(nil) +} + +// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string +// format. +func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) { + return UUIDV5Func.Call([]cty.Value{namespace, name}) +} + +// Base64Sha256 computes the SHA256 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +func Base64Sha256(str cty.Value) (cty.Value, error) { + return Base64Sha256Func.Call([]cty.Value{str}) +} + +// Base64Sha512 computes the SHA512 hash of a given string and encodes it with +// Base64. +// +// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied +// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning. +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4 +func Base64Sha512(str cty.Value) (cty.Value, error) { + return Base64Sha512Func.Call([]cty.Value{str}) +} + +// Bcrypt computes a hash of the given string using the Blowfish cipher, +// returning a string in the Modular Crypt Format +// usually expected in the shadow password file on many Unix systems. +func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(cost)+1) + args[0] = str + copy(args[1:], cost) + return BcryptFunc.Call(args) +} + +// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits. +func Md5(str cty.Value) (cty.Value, error) { + return Md5Func.Call([]cty.Value{str}) +} + +// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding +// cleartext. +func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) { + return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey}) +} + +// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits. +func Sha1(str cty.Value) (cty.Value, error) { + return Sha1Func.Call([]cty.Value{str}) +} + +// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits. +func Sha256(str cty.Value) (cty.Value, error) { + return Sha256Func.Call([]cty.Value{str}) +} + +// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits. +func Sha512(str cty.Value) (cty.Value, error) { + return Sha512Func.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go new file mode 100644 index 00000000..5dae1987 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go @@ -0,0 +1,70 @@ +package funcs + +import ( + "time" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// TimestampFunc constructs a function that returns a string representation of the current date and time. +var TimestampFunc = function.New(&function.Spec{ + Params: []function.Parameter{}, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil + }, +}) + +// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp. +var TimeAddFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "timestamp", + Type: cty.String, + }, + { + Name: "duration", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + ts, err := time.Parse(time.RFC3339, args[0].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + duration, err := time.ParseDuration(args[1].AsString()) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil + }, +}) + +// Timestamp returns a string representation of the current date and time. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax, and so timestamp +// returns a string in this format. +func Timestamp() (cty.Value, error) { + return TimestampFunc.Call([]cty.Value{}) +} + +// TimeAdd adds a duration to a timestamp, returning a new timestamp. +// +// In the Terraform language, timestamps are conventionally represented as +// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires +// the timestamp argument to be a string conforming to this syntax. +// +// `duration` is a string representation of a time difference, consisting of +// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted +// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first +// number may be negative to indicate a negative duration, like `"-2h5m"`. +// +// The result is a string, also in RFC 3339 format, representing the result +// of adding the given direction to the given timestamp. +func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) { + return TimeAddFunc.Call([]cty.Value{timestamp, duration}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go new file mode 100644 index 00000000..af93f08d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go @@ -0,0 +1,140 @@ +package funcs + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "log" + "net/url" + "unicode/utf8" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence. +var Base64DecodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + sDec, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s) + } + if !utf8.Valid([]byte(sDec)) { + log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec) + return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8") + } + return cty.StringVal(string(sDec)), nil + }, +}) + +// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence. +var Base64EncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil + }, +}) + +// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in +// Base64 encoding. +var Base64GzipFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + s := args[0].AsString() + + var b bytes.Buffer + gz := gzip.NewWriter(&b) + if _, err := gz.Write([]byte(s)); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s) + } + if err := gz.Flush(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s) + } + if err := gz.Close(); err != nil { + return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s) + } + return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil + }, +}) + +// URLEncodeFunc constructs a function that applies URL encoding to a given string. +var URLEncodeFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(url.QueryEscape(args[0].AsString())), nil + }, +}) + +// Base64Decode decodes a string containing a base64 sequence. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will also interpret the resulting bytes as +// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function +// produces an error. +func Base64Decode(str cty.Value) (cty.Value, error) { + return Base64DecodeFunc.Call([]cty.Value{str}) +} + +// Base64Encode applies Base64 encoding to a string. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, and then apply Base64 encoding to the result. +func Base64Encode(str cty.Value) (cty.Value, error) { + return Base64EncodeFunc.Call([]cty.Value{str}) +} + +// Base64Gzip compresses a string with gzip and then encodes the result in +// Base64 encoding. +// +// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4. +// +// Strings in the Terraform language are sequences of unicode characters rather +// than bytes, so this function will first encode the characters from the string +// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding. +func Base64Gzip(str cty.Value) (cty.Value, error) { + return Base64GzipFunc.Call([]cty.Value{str}) +} + +// URLEncode applies URL encoding to a given string. +// +// This function identifies characters in the given string that would have a +// special meaning when included as a query string argument in a URL and +// escapes them using RFC 3986 "percent encoding". +// +// If the given string contains non-ASCII characters, these are first encoded as +// UTF-8 and then percent encoding is applied separately to each UTF-8 byte. +func URLEncode(str cty.Value) (cty.Value, error) { + return URLEncodeFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go new file mode 100644 index 00000000..016b102d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go @@ -0,0 +1,360 @@ +package funcs + +import ( + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "unicode/utf8" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + homedir "github.com/mitchellh/go-homedir" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" +) + +// MakeFileFunc constructs a function that takes a file path and returns the +// contents of that file, either directly as a string (where valid UTF-8 is +// required) or as a string containing base64 bytes. +func MakeFileFunc(baseDir string, encBase64 bool) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + src, err := readFileBytes(baseDir, path) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + switch { + case encBase64: + enc := base64.StdEncoding.EncodeToString(src) + return cty.StringVal(enc), nil + default: + if !utf8.Valid(src) { + return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path) + } + return cty.StringVal(string(src)), nil + } + }, + }) +} + +// MakeTemplateFileFunc constructs a function that takes a file path and +// an arbitrary object of named values and attempts to render the referenced +// file as a template using HCL template syntax. +// +// The template itself may recursively call other functions so a callback +// must be provided to get access to those functions. The template cannot, +// however, access any variables defined in the scope: it is restricted only to +// those variables provided in the second function argument, to ensure that all +// dependencies on other graph nodes can be seen before executing this function. +// +// As a special exception, a referenced template file may not recursively call +// the templatefile function, since that would risk the same file being +// included into itself indefinitely. +func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function { + + params := []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + { + Name: "vars", + Type: cty.DynamicPseudoType, + }, + } + + loadTmpl := func(fn string) (hcl.Expression, error) { + // We re-use File here to ensure the same filename interpretation + // as it does, along with its other safety checks. + tmplVal, err := File(baseDir, cty.StringVal(fn)) + if err != nil { + return nil, err + } + + expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1}) + if diags.HasErrors() { + return nil, diags + } + + return expr, nil + } + + renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) { + if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) { + return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time + } + + ctx := &hcl.EvalContext{ + Variables: varsVal.AsValueMap(), + } + + // We'll pre-check references in the template here so we can give a + // more specialized error message than HCL would by default, so it's + // clearer that this problem is coming from a templatefile call. + for _, traversal := range expr.Variables() { + root := traversal.RootName() + if _, ok := ctx.Variables[root]; !ok { + return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange()) + } + } + + givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems + funcs := make(map[string]function.Function, len(givenFuncs)) + for name, fn := range givenFuncs { + if name == "templatefile" { + // We stub this one out to prevent recursive calls. + funcs[name] = function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call") + }, + }) + continue + } + funcs[name] = fn + } + ctx.Functions = funcs + + val, diags := expr.Value(ctx) + if diags.HasErrors() { + return cty.DynamicVal, diags + } + return val, nil + } + + return function.New(&function.Spec{ + Params: params, + Type: func(args []cty.Value) (cty.Type, error) { + if !(args[0].IsKnown() && args[1].IsKnown()) { + return cty.DynamicPseudoType, nil + } + + // We'll render our template now to see what result type it produces. + // A template consisting only of a single interpolation an potentially + // return any type. + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicPseudoType, err + } + + // This is safe even if args[1] contains unknowns because the HCL + // template renderer itself knows how to short-circuit those. + val, err := renderTmpl(expr, args[1]) + return val.Type(), err + }, + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + expr, err := loadTmpl(args[0].AsString()) + if err != nil { + return cty.DynamicVal, err + } + return renderTmpl(expr, args[1]) + }, + }) + +} + +// MakeFileExistsFunc constructs a function that takes a path +// and determines whether a file exists at that path +func MakeFileExistsFunc(baseDir string) function.Function { + return function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.Bool), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + path := args[0].AsString() + path, err := homedir.Expand(path) + if err != nil { + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + fi, err := os.Stat(path) + if err != nil { + if os.IsNotExist(err) { + return cty.False, nil + } + return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path) + } + + if fi.Mode().IsRegular() { + return cty.True, nil + } + + return cty.False, fmt.Errorf("%s is not a regular file, but %q", + path, fi.Mode().String()) + }, + }) +} + +// BasenameFunc constructs a function that takes a string containing a filesystem path +// and removes all except the last portion from it. +var BasenameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Base(args[0].AsString())), nil + }, +}) + +// DirnameFunc constructs a function that takes a string containing a filesystem path +// and removes the last portion from it. +var DirnameFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + return cty.StringVal(filepath.Dir(args[0].AsString())), nil + }, +}) + +// AbsPathFunc constructs a function that converts a filesystem path to an absolute path +var AbsPathFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + absPath, err := filepath.Abs(args[0].AsString()) + return cty.StringVal(filepath.ToSlash(absPath)), err + }, +}) + +// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory. +var PathExpandFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "path", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + + homePath, err := homedir.Expand(args[0].AsString()) + return cty.StringVal(homePath), err + }, +}) + +func readFileBytes(baseDir, path string) ([]byte, error) { + path, err := homedir.Expand(path) + if err != nil { + return nil, fmt.Errorf("failed to expand ~: %s", err) + } + + if !filepath.IsAbs(path) { + path = filepath.Join(baseDir, path) + } + + // Ensure that the path is canonical for the host OS + path = filepath.Clean(path) + + src, err := ioutil.ReadFile(path) + if err != nil { + // ReadFile does not return Terraform-user-friendly error + // messages, so we'll provide our own. + if os.IsNotExist(err) { + return nil, fmt.Errorf("no file exists at %s", path) + } + return nil, fmt.Errorf("failed to read %s", path) + } + + return src, nil +} + +// File reads the contents of the file at the given path. +// +// The file must contain valid UTF-8 bytes, or this function will return an error. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func File(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, false) + return fn.Call([]cty.Value{path}) +} + +// FileExists determines whether a file exists at the given path. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileExists(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileExistsFunc(baseDir) + return fn.Call([]cty.Value{path}) +} + +// FileBase64 reads the contents of the file at the given path. +// +// The bytes from the file are encoded as base64 before returning. +// +// The underlying function implementation works relative to a particular base +// directory, so this wrapper takes a base directory string and uses it to +// construct the underlying function before calling it. +func FileBase64(baseDir string, path cty.Value) (cty.Value, error) { + fn := MakeFileFunc(baseDir, true) + return fn.Call([]cty.Value{path}) +} + +// Basename takes a string containing a filesystem path and removes all except the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Basename(path cty.Value) (cty.Value, error) { + return BasenameFunc.Call([]cty.Value{path}) +} + +// Dirname takes a string containing a filesystem path and removes the last portion from it. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the path is empty then the result is ".", representing the current working directory. +func Dirname(path cty.Value) (cty.Value, error) { + return DirnameFunc.Call([]cty.Value{path}) +} + +// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with +// the current user's home directory path. +// +// The underlying function implementation works only with the path string and does not access the filesystem itself. +// It is therefore unable to take into account filesystem features such as symlinks. +// +// If the leading segment in the path is not `~` then the given path is returned unmodified. +func Pathexpand(path cty.Value) (cty.Value, error) { + return PathExpandFunc.Call([]cty.Value{path}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go new file mode 100644 index 00000000..15cfe718 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go @@ -0,0 +1,155 @@ +package funcs + +import ( + "math" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +// CeilFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var CeilFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var val float64 + if err := gocty.FromCtyValue(args[0], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.NumberIntVal(int64(math.Ceil(val))), nil + }, +}) + +// FloorFunc contructs a function that returns the closest whole number lesser +// than or equal to the given value. +var FloorFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var val float64 + if err := gocty.FromCtyValue(args[0], &val); err != nil { + return cty.UnknownVal(cty.String), err + } + return cty.NumberIntVal(int64(math.Floor(val))), nil + }, +}) + +// LogFunc contructs a function that returns the logarithm of a given number in a given base. +var LogFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "base", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var base float64 + if err := gocty.FromCtyValue(args[1], &base); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil + }, +}) + +// PowFunc contructs a function that returns the logarithm of a given number in a given base. +var PowFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + { + Name: "power", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num float64 + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + + var power float64 + if err := gocty.FromCtyValue(args[1], &power); err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.NumberFloatVal(math.Pow(num, power)), nil + }, +}) + +// SignumFunc contructs a function that returns the closest whole number greater +// than or equal to the given value. +var SignumFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "num", + Type: cty.Number, + }, + }, + Type: function.StaticReturnType(cty.Number), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var num int + if err := gocty.FromCtyValue(args[0], &num); err != nil { + return cty.UnknownVal(cty.String), err + } + switch { + case num < 0: + return cty.NumberIntVal(-1), nil + case num > 0: + return cty.NumberIntVal(+1), nil + default: + return cty.NumberIntVal(0), nil + } + }, +}) + +// Ceil returns the closest whole number greater than or equal to the given value. +func Ceil(num cty.Value) (cty.Value, error) { + return CeilFunc.Call([]cty.Value{num}) +} + +// Floor returns the closest whole number lesser than or equal to the given value. +func Floor(num cty.Value) (cty.Value, error) { + return FloorFunc.Call([]cty.Value{num}) +} + +// Log returns returns the logarithm of a given number in a given base. +func Log(num, base cty.Value) (cty.Value, error) { + return LogFunc.Call([]cty.Value{num, base}) +} + +// Pow returns the logarithm of a given number in a given base. +func Pow(num, power cty.Value) (cty.Value, error) { + return PowFunc.Call([]cty.Value{num, power}) +} + +// Signum determines the sign of a number, returning a number between -1 and +// 1 to represent the sign. +func Signum(num cty.Value) (cty.Value, error) { + return SignumFunc.Call([]cty.Value{num}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go new file mode 100644 index 00000000..c9ddf19e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go @@ -0,0 +1,280 @@ +package funcs + +import ( + "fmt" + "regexp" + "sort" + "strings" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/gocty" +) + +var JoinFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "separator", + Type: cty.String, + }, + }, + VarParam: &function.Parameter{ + Name: "lists", + Type: cty.List(cty.String), + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + sep := args[0].AsString() + listVals := args[1:] + if len(listVals) < 1 { + return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required") + } + + l := 0 + for _, list := range listVals { + if !list.IsWhollyKnown() { + return cty.UnknownVal(cty.String), nil + } + l += list.LengthInt() + } + + items := make([]string, 0, l) + for ai, list := range listVals { + ei := 0 + for it := list.ElementIterator(); it.Next(); { + _, val := it.Element() + if val.IsNull() { + if len(listVals) > 1 { + return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1) + } + return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei) + } + items = append(items, val.AsString()) + ei++ + } + } + + return cty.StringVal(strings.Join(items, sep)), nil + }, +}) + +var SortFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "list", + Type: cty.List(cty.String), + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + listVal := args[0] + + if !listVal.IsWhollyKnown() { + // If some of the element values aren't known yet then we + // can't yet preduct the order of the result. + return cty.UnknownVal(retType), nil + } + if listVal.LengthInt() == 0 { // Easy path + return listVal, nil + } + + list := make([]string, 0, listVal.LengthInt()) + for it := listVal.ElementIterator(); it.Next(); { + iv, v := it.Element() + if v.IsNull() { + return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String()) + } + list = append(list, v.AsString()) + } + + sort.Strings(list) + retVals := make([]cty.Value, len(list)) + for i, s := range list { + retVals[i] = cty.StringVal(s) + } + return cty.ListVal(retVals), nil + }, +}) + +var SplitFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "separator", + Type: cty.String, + }, + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.List(cty.String)), + Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) { + sep := args[0].AsString() + str := args[1].AsString() + elems := strings.Split(str, sep) + elemVals := make([]cty.Value, len(elems)) + for i, s := range elems { + elemVals[i] = cty.StringVal(s) + } + if len(elemVals) == 0 { + return cty.ListValEmpty(cty.String), nil + } + return cty.ListVal(elemVals), nil + }, +}) + +// ChompFunc constructions a function that removes newline characters at the end of a string. +var ChompFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`) + return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil + }, +}) + +// IndentFunc constructions a function that adds a given number of spaces to the +// beginnings of all but the first line in a given multi-line string. +var IndentFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "spaces", + Type: cty.Number, + }, + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + var spaces int + if err := gocty.FromCtyValue(args[0], &spaces); err != nil { + return cty.UnknownVal(cty.String), err + } + data := args[1].AsString() + pad := strings.Repeat(" ", spaces) + return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil + }, +}) + +// ReplaceFunc constructions a function that searches a given string for another +// given substring, and replaces each occurence with a given replacement string. +var ReplaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + { + Name: "substr", + Type: cty.String, + }, + { + Name: "replace", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + str := args[0].AsString() + substr := args[1].AsString() + replace := args[2].AsString() + + // We search/replace using a regexp if the string is surrounded + // in forward slashes. + if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' { + re, err := regexp.Compile(substr[1 : len(substr)-1]) + if err != nil { + return cty.UnknownVal(cty.String), err + } + + return cty.StringVal(re.ReplaceAllString(str, replace)), nil + } + + return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil + }, +}) + +// TitleFunc constructions a function that converts the first letter of each word +// in the given string to uppercase. +var TitleFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.StringVal(strings.Title(args[0].AsString())), nil + }, +}) + +// TrimSpaceFunc constructions a function that removes any space characters from +// the start and end of the given string. +var TrimSpaceFunc = function.New(&function.Spec{ + Params: []function.Parameter{ + { + Name: "str", + Type: cty.String, + }, + }, + Type: function.StaticReturnType(cty.String), + Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) { + return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil + }, +}) + +// Join concatenates together the string elements of one or more lists with a +// given separator. +func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) { + args := make([]cty.Value, len(lists)+1) + args[0] = sep + copy(args[1:], lists) + return JoinFunc.Call(args) +} + +// Sort re-orders the elements of a given list of strings so that they are +// in ascending lexicographical order. +func Sort(list cty.Value) (cty.Value, error) { + return SortFunc.Call([]cty.Value{list}) +} + +// Split divides a given string by a given separator, returning a list of +// strings containing the characters between the separator sequences. +func Split(sep, str cty.Value) (cty.Value, error) { + return SplitFunc.Call([]cty.Value{sep, str}) +} + +// Chomp removes newline characters at the end of a string. +func Chomp(str cty.Value) (cty.Value, error) { + return ChompFunc.Call([]cty.Value{str}) +} + +// Indent adds a given number of spaces to the beginnings of all but the first +// line in a given multi-line string. +func Indent(spaces, str cty.Value) (cty.Value, error) { + return IndentFunc.Call([]cty.Value{spaces, str}) +} + +// Replace searches a given string for another given substring, +// and replaces all occurences with a given replacement string. +func Replace(str, substr, replace cty.Value) (cty.Value, error) { + return ReplaceFunc.Call([]cty.Value{str, substr, replace}) +} + +// Title converts the first letter of each word in the given string to uppercase. +func Title(str cty.Value) (cty.Value, error) { + return TitleFunc.Call([]cty.Value{str}) +} + +// TrimSpace removes any space characters from the start and end of the given string. +func TrimSpace(str cty.Value) (cty.Value, error) { + return TrimSpaceFunc.Call([]cty.Value{str}) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/functions.go b/vendor/github.com/hashicorp/terraform/lang/functions.go new file mode 100644 index 00000000..b77a55fd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/functions.go @@ -0,0 +1,153 @@ +package lang + +import ( + "fmt" + + ctyyaml "github.com/zclconf/go-cty-yaml" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/function" + "github.com/zclconf/go-cty/cty/function/stdlib" + + "github.com/hashicorp/terraform/lang/funcs" +) + +var impureFunctions = []string{ + "bcrypt", + "timestamp", + "uuid", +} + +// Functions returns the set of functions that should be used to when evaluating +// expressions in the receiving scope. +func (s *Scope) Functions() map[string]function.Function { + s.funcsLock.Lock() + if s.funcs == nil { + // Some of our functions are just directly the cty stdlib functions. + // Others are implemented in the subdirectory "funcs" here in this + // repository. New functions should generally start out their lives + // in the "funcs" directory and potentially graduate to cty stdlib + // later if the functionality seems to be something domain-agnostic + // that would be useful to all applications using cty functions. + + s.funcs = map[string]function.Function{ + "abs": stdlib.AbsoluteFunc, + "abspath": funcs.AbsPathFunc, + "basename": funcs.BasenameFunc, + "base64decode": funcs.Base64DecodeFunc, + "base64encode": funcs.Base64EncodeFunc, + "base64gzip": funcs.Base64GzipFunc, + "base64sha256": funcs.Base64Sha256Func, + "base64sha512": funcs.Base64Sha512Func, + "bcrypt": funcs.BcryptFunc, + "ceil": funcs.CeilFunc, + "chomp": funcs.ChompFunc, + "cidrhost": funcs.CidrHostFunc, + "cidrnetmask": funcs.CidrNetmaskFunc, + "cidrsubnet": funcs.CidrSubnetFunc, + "coalesce": funcs.CoalesceFunc, + "coalescelist": funcs.CoalesceListFunc, + "compact": funcs.CompactFunc, + "concat": stdlib.ConcatFunc, + "contains": funcs.ContainsFunc, + "csvdecode": stdlib.CSVDecodeFunc, + "dirname": funcs.DirnameFunc, + "distinct": funcs.DistinctFunc, + "element": funcs.ElementFunc, + "chunklist": funcs.ChunklistFunc, + "file": funcs.MakeFileFunc(s.BaseDir, false), + "fileexists": funcs.MakeFileExistsFunc(s.BaseDir), + "filebase64": funcs.MakeFileFunc(s.BaseDir, true), + "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir), + "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir), + "filemd5": funcs.MakeFileMd5Func(s.BaseDir), + "filesha1": funcs.MakeFileSha1Func(s.BaseDir), + "filesha256": funcs.MakeFileSha256Func(s.BaseDir), + "filesha512": funcs.MakeFileSha512Func(s.BaseDir), + "flatten": funcs.FlattenFunc, + "floor": funcs.FloorFunc, + "format": stdlib.FormatFunc, + "formatdate": stdlib.FormatDateFunc, + "formatlist": stdlib.FormatListFunc, + "indent": funcs.IndentFunc, + "index": funcs.IndexFunc, + "join": funcs.JoinFunc, + "jsondecode": stdlib.JSONDecodeFunc, + "jsonencode": stdlib.JSONEncodeFunc, + "keys": funcs.KeysFunc, + "length": funcs.LengthFunc, + "list": funcs.ListFunc, + "log": funcs.LogFunc, + "lookup": funcs.LookupFunc, + "lower": stdlib.LowerFunc, + "map": funcs.MapFunc, + "matchkeys": funcs.MatchkeysFunc, + "max": stdlib.MaxFunc, + "md5": funcs.Md5Func, + "merge": funcs.MergeFunc, + "min": stdlib.MinFunc, + "pathexpand": funcs.PathExpandFunc, + "pow": funcs.PowFunc, + "range": stdlib.RangeFunc, + "replace": funcs.ReplaceFunc, + "reverse": funcs.ReverseFunc, + "rsadecrypt": funcs.RsaDecryptFunc, + "setintersection": stdlib.SetIntersectionFunc, + "setproduct": funcs.SetProductFunc, + "setunion": stdlib.SetUnionFunc, + "sha1": funcs.Sha1Func, + "sha256": funcs.Sha256Func, + "sha512": funcs.Sha512Func, + "signum": funcs.SignumFunc, + "slice": funcs.SliceFunc, + "sort": funcs.SortFunc, + "split": funcs.SplitFunc, + "strrev": stdlib.ReverseFunc, + "substr": stdlib.SubstrFunc, + "timestamp": funcs.TimestampFunc, + "timeadd": funcs.TimeAddFunc, + "title": funcs.TitleFunc, + "tostring": funcs.MakeToFunc(cty.String), + "tonumber": funcs.MakeToFunc(cty.Number), + "tobool": funcs.MakeToFunc(cty.Bool), + "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)), + "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)), + "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)), + "transpose": funcs.TransposeFunc, + "trimspace": funcs.TrimSpaceFunc, + "upper": stdlib.UpperFunc, + "urlencode": funcs.URLEncodeFunc, + "uuid": funcs.UUIDFunc, + "uuidv5": funcs.UUIDV5Func, + "values": funcs.ValuesFunc, + "yamldecode": ctyyaml.YAMLDecodeFunc, + "yamlencode": ctyyaml.YAMLEncodeFunc, + "zipmap": funcs.ZipmapFunc, + } + + s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function { + // The templatefile function prevents recursive calls to itself + // by copying this map and overwriting the "templatefile" entry. + return s.funcs + }) + + if s.PureOnly { + // Force our few impure functions to return unknown so that we + // can defer evaluating them until a later pass. + for _, name := range impureFunctions { + s.funcs[name] = function.Unpredictable(s.funcs[name]) + } + } + } + s.funcsLock.Unlock() + + return s.funcs +} + +var unimplFunc = function.New(&function.Spec{ + Type: func([]cty.Value) (cty.Type, error) { + return cty.DynamicPseudoType, fmt.Errorf("function not yet implemented") + }, + Impl: func([]cty.Value, cty.Type) (cty.Value, error) { + return cty.DynamicVal, fmt.Errorf("function not yet implemented") + }, +}) diff --git a/vendor/github.com/hashicorp/terraform/lang/references.go b/vendor/github.com/hashicorp/terraform/lang/references.go new file mode 100644 index 00000000..d688477a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/references.go @@ -0,0 +1,81 @@ +package lang + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang/blocktoattr" + "github.com/hashicorp/terraform/tfdiags" +) + +// References finds all of the references in the given set of traversals, +// returning diagnostics if any of the traversals cannot be interpreted as a +// reference. +// +// This function does not do any de-duplication of references, since references +// have source location information embedded in them and so any invalid +// references that are duplicated should have errors reported for each +// occurence. +// +// If the returned diagnostics contains errors then the result may be +// incomplete or invalid. Otherwise, the returned slice has one reference per +// given traversal, though it is not guaranteed that the references will +// appear in the same order as the given traversals. +func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) { + if len(traversals) == 0 { + return nil, nil + } + + var diags tfdiags.Diagnostics + refs := make([]*addrs.Reference, 0, len(traversals)) + + for _, traversal := range traversals { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if ref == nil { + continue + } + refs = append(refs, ref) + } + + return refs, diags +} + +// ReferencesInBlock is a helper wrapper around References that first searches +// the given body for traversals, before converting those traversals to +// references. +// +// A block schema must be provided so that this function can determine where in +// the body variables are expected. +func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) { + if body == nil { + return nil, nil + } + + // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or + // dynblock.VariablesHCLDec here because when we evaluate a block we'll + // first apply the dynamic block extension and _then_ the blocktoattr + // transform, and so blocktoattr.ExpandedVariables takes into account + // both of those transforms when it analyzes the body to ensure we find + // all of the references as if they'd already moved into their final + // locations, even though we can't expand dynamic blocks yet until we + // already know which variables are required. + // + // The set of cases we want to detect here is covered by the tests for + // the plan graph builder in the main 'terraform' package, since it's + // in a better position to test this due to having mock providers etc + // available. + traversals := blocktoattr.ExpandedVariables(body, schema) + return References(traversals) +} + +// ReferencesInExpr is a helper wrapper around References that first searches +// the given expression for traversals, before converting those traversals +// to references. +func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) { + if expr == nil { + return nil, nil + } + traversals := expr.Variables() + return References(traversals) +} diff --git a/vendor/github.com/hashicorp/terraform/lang/scope.go b/vendor/github.com/hashicorp/terraform/lang/scope.go new file mode 100644 index 00000000..98fca6ba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/lang/scope.go @@ -0,0 +1,34 @@ +package lang + +import ( + "sync" + + "github.com/zclconf/go-cty/cty/function" + + "github.com/hashicorp/terraform/addrs" +) + +// Scope is the main type in this package, allowing dynamic evaluation of +// blocks and expressions based on some contextual information that informs +// which variables and functions will be available. +type Scope struct { + // Data is used to resolve references in expressions. + Data Data + + // SelfAddr is the address that the "self" object should be an alias of, + // or nil if the "self" object should not be available at all. + SelfAddr addrs.Referenceable + + // BaseDir is the base directory used by any interpolation functions that + // accept filesystem paths as arguments. + BaseDir string + + // PureOnly can be set to true to request that any non-pure functions + // produce unknown value results rather than actually executing. This is + // important during a plan phase to avoid generating results that could + // then differ during apply. + PureOnly bool + + funcs map[string]function.Function + funcsLock sync.Mutex +} diff --git a/vendor/github.com/hashicorp/terraform/main.go b/vendor/github.com/hashicorp/terraform/main.go index 23758120..c9340780 100644 --- a/vendor/github.com/hashicorp/terraform/main.go +++ b/vendor/github.com/hashicorp/terraform/main.go @@ -6,18 +6,23 @@ import ( "io/ioutil" "log" "os" + "path/filepath" "runtime" "strings" "sync" "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/command/format" "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/svchost/disco" "github.com/mattn/go-colorable" "github.com/mattn/go-shellwords" "github.com/mitchellh/cli" + "github.com/mitchellh/colorstring" "github.com/mitchellh/panicwrap" "github.com/mitchellh/prefixedio" + + backendInit "github.com/hashicorp/terraform/backend/init" ) const ( @@ -95,9 +100,18 @@ func realMain() int { return wrappedMain() } +func init() { + Ui = &cli.PrefixedUi{ + AskPrefix: OutputPrefix, + OutputPrefix: OutputPrefix, + InfoPrefix: OutputPrefix, + ErrorPrefix: ErrorPrefix, + Ui: &cli.BasicUi{Writer: os.Stdout}, + } +} + func wrappedMain() int { - // We always need to close the DebugInfo before we exit. - defer terraform.CloseDebugInfo() + var err error log.SetOutput(os.Stderr) log.Printf( @@ -106,38 +120,50 @@ func wrappedMain() int { log.Printf("[INFO] Go runtime version: %s", runtime.Version()) log.Printf("[INFO] CLI args: %#v", os.Args) - // Load the configuration - config := BuiltinConfig - if err := config.Discover(Ui); err != nil { - Ui.Error(fmt.Sprintf("Error discovering plugins: %s", err)) - return 1 + config, diags := LoadConfig() + if len(diags) > 0 { + // Since we haven't instantiated a command.Meta yet, we need to do + // some things manually here and use some "safe" defaults for things + // that command.Meta could otherwise figure out in smarter ways. + Ui.Error("There are some problems with the CLI configuration:") + for _, diag := range diags { + earlyColor := &colorstring.Colorize{ + Colors: colorstring.DefaultColors, + Disable: true, // Disable color to be conservative until we know better + Reset: true, + } + // We don't currently have access to the source code cache for + // the parser used to load the CLI config, so we can't show + // source code snippets in early diagnostics. + Ui.Error(format.Diagnostic(diag, nil, earlyColor, 78)) + } + if diags.HasErrors() { + Ui.Error("As a result of the above problems, Terraform may not behave as intended.\n\n") + // We continue to run anyway, since Terraform has reasonable defaults. + } } - // Load the configuration file if we have one, that can be used to - // define extra providers and provisioners. - clicfgFile, err := cliConfigFile() - if err != nil { - Ui.Error(fmt.Sprintf("Error loading CLI configuration: \n\n%s", err)) - return 1 - } + // Get any configured credentials from the config and initialize + // a service discovery object. + credsSrc := credentialsSource(config) + services := disco.NewWithCredentialsSource(credsSrc) - if clicfgFile != "" { - usrcfg, err := LoadConfig(clicfgFile) - if err != nil { - Ui.Error(fmt.Sprintf("Error loading CLI configuration: \n\n%s", err)) - return 1 - } + // Initialize the backends. + backendInit.Init(services) - config = *config.Merge(usrcfg) + // In tests, Commands may already be set to provide mock commands + if Commands == nil { + initCommands(config, services) } // Run checkpoint - go runCheckpoint(&config) + go runCheckpoint(config) // Make sure we clean up any managed plugins at the end of this defer plugin.CleanupClients() // Get the command line args. + binName := filepath.Base(os.Args[0]) args := os.Args[1:] // Build the CLI so far, we do this so we can query the subcommand. @@ -179,15 +205,20 @@ func wrappedMain() int { // Rebuild the CLI with any modified args. log.Printf("[INFO] CLI command args: %#v", args) cliRunner = &cli.CLI{ + Name: binName, Args: args, Commands: Commands, HelpFunc: helpFunc, HelpWriter: os.Stdout, + + Autocomplete: true, + AutocompleteInstall: "install-autocomplete", + AutocompleteUninstall: "uninstall-autocomplete", } - // Initialize the TFConfig settings for the commands... - ContextOpts.Providers = config.ProviderFactories() - ContextOpts.Provisioners = config.ProvisionerFactories() + // Pass in the overriding plugin paths from config + PluginOverrides.Providers = config.Providers + PluginOverrides.Provisioners = config.Provisioners exitCode, err := cliRunner.Run() if err != nil { @@ -200,7 +231,12 @@ func wrappedMain() int { func cliConfigFile() (string, error) { mustExist := true - configFilePath := os.Getenv("TERRAFORM_CONFIG") + + configFilePath := os.Getenv("TF_CLI_CONFIG_FILE") + if configFilePath == "" { + configFilePath = os.Getenv("TERRAFORM_CONFIG") + } + if configFilePath == "" { var err error configFilePath, err = ConfigFile() diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go b/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go new file mode 100644 index 00000000..87c8431e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/moduledeps/dependencies.go @@ -0,0 +1,43 @@ +package moduledeps + +import ( + "github.com/hashicorp/terraform/plugin/discovery" +) + +// Providers describes a set of provider dependencies for a given module. +// +// Each named provider instance can have one version constraint. +type Providers map[ProviderInstance]ProviderDependency + +// ProviderDependency describes the dependency for a particular provider +// instance, including both the set of allowed versions and the reason for +// the dependency. +type ProviderDependency struct { + Constraints discovery.Constraints + Reason ProviderDependencyReason +} + +// ProviderDependencyReason is an enumeration of reasons why a dependency might be +// present. +type ProviderDependencyReason int + +const ( + // ProviderDependencyExplicit means that there is an explicit "provider" + // block in the configuration for this module. + ProviderDependencyExplicit ProviderDependencyReason = iota + + // ProviderDependencyImplicit means that there is no explicit "provider" + // block but there is at least one resource that uses this provider. + ProviderDependencyImplicit + + // ProviderDependencyInherited is a special case of + // ProviderDependencyImplicit where a parent module has defined a + // configuration for the provider that has been inherited by at least one + // resource in this module. + ProviderDependencyInherited + + // ProviderDependencyFromState means that this provider is not currently + // referenced by configuration at all, but some existing instances in + // the state still depend on it. + ProviderDependencyFromState +) diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/doc.go b/vendor/github.com/hashicorp/terraform/moduledeps/doc.go new file mode 100644 index 00000000..7eff0831 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/moduledeps/doc.go @@ -0,0 +1,7 @@ +// Package moduledeps contains types that can be used to describe the +// providers required for all of the modules in a module tree. +// +// It does not itself contain the functionality for populating such +// data structures; that's in Terraform core, since this package intentionally +// does not depend on terraform core to avoid package dependency cycles. +package moduledeps diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/module.go b/vendor/github.com/hashicorp/terraform/moduledeps/module.go new file mode 100644 index 00000000..d6cbaf5c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/moduledeps/module.go @@ -0,0 +1,204 @@ +package moduledeps + +import ( + "sort" + "strings" + + "github.com/hashicorp/terraform/plugin/discovery" +) + +// Module represents the dependencies of a single module, as well being +// a node in a tree of such structures representing the dependencies of +// an entire configuration. +type Module struct { + Name string + Providers Providers + Children []*Module +} + +// WalkFunc is a callback type for use with Module.WalkTree +type WalkFunc func(path []string, parent *Module, current *Module) error + +// WalkTree calls the given callback once for the receiver and then +// once for each descendent, in an order such that parents are called +// before their children and siblings are called in the order they +// appear in the Children slice. +// +// When calling the callback, parent will be nil for the first call +// for the receiving module, and then set to the direct parent of +// each module for the subsequent calls. +// +// The path given to the callback is valid only until the callback +// returns, after which it will be mutated and reused. Callbacks must +// therefore copy the path slice if they wish to retain it. +// +// If the given callback returns an error, the walk will be aborted at +// that point and that error returned to the caller. +// +// This function is not thread-safe for concurrent modifications of the +// data structure, so it's the caller's responsibility to arrange for that +// should it be needed. +// +// It is safe for a callback to modify the descendents of the "current" +// module, including the ordering of the Children slice itself, but the +// callback MUST NOT modify the parent module. +func (m *Module) WalkTree(cb WalkFunc) error { + return walkModuleTree(make([]string, 0, 1), nil, m, cb) +} + +func walkModuleTree(path []string, parent *Module, current *Module, cb WalkFunc) error { + path = append(path, current.Name) + err := cb(path, parent, current) + if err != nil { + return err + } + + for _, child := range current.Children { + err := walkModuleTree(path, current, child, cb) + if err != nil { + return err + } + } + return nil +} + +// SortChildren sorts the Children slice into lexicographic order by +// name, in-place. +// +// This is primarily useful prior to calling WalkTree so that the walk +// will proceed in a consistent order. +func (m *Module) SortChildren() { + sort.Sort(sortModules{m.Children}) +} + +// SortDescendents is a convenience wrapper for calling SortChildren on +// the receiver and all of its descendent modules. +func (m *Module) SortDescendents() { + m.WalkTree(func(path []string, parent *Module, current *Module) error { + current.SortChildren() + return nil + }) +} + +type sortModules struct { + modules []*Module +} + +func (s sortModules) Len() int { + return len(s.modules) +} + +func (s sortModules) Less(i, j int) bool { + cmp := strings.Compare(s.modules[i].Name, s.modules[j].Name) + return cmp < 0 +} + +func (s sortModules) Swap(i, j int) { + s.modules[i], s.modules[j] = s.modules[j], s.modules[i] +} + +// PluginRequirements produces a PluginRequirements structure that can +// be used with discovery.PluginMetaSet.ConstrainVersions to identify +// suitable plugins to satisfy the module's provider dependencies. +// +// This method only considers the direct requirements of the receiver. +// Use AllPluginRequirements to flatten the dependencies for the +// entire tree of modules. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) PluginRequirements() discovery.PluginRequirements { + ret := make(discovery.PluginRequirements) + for inst, dep := range m.Providers { + // m.Providers is keyed on provider names, such as "aws.foo". + // a PluginRequirements wants keys to be provider *types*, such + // as "aws". If there are multiple aliases for the same + // provider then we will flatten them into a single requirement + // by combining their constraint sets. + pty := inst.Type() + if existing, exists := ret[pty]; exists { + ret[pty].Versions = existing.Versions.Append(dep.Constraints) + } else { + ret[pty] = &discovery.PluginConstraints{ + Versions: dep.Constraints, + } + } + } + return ret +} + +// AllPluginRequirements calls PluginRequirements for the receiver and all +// of its descendents, and merges the result into a single PluginRequirements +// structure that would satisfy all of the modules together. +// +// Requirements returned by this method include only version constraints, +// and apply no particular SHA256 hash constraint. +func (m *Module) AllPluginRequirements() discovery.PluginRequirements { + var ret discovery.PluginRequirements + m.WalkTree(func(path []string, parent *Module, current *Module) error { + ret = ret.Merge(current.PluginRequirements()) + return nil + }) + return ret +} + +// Equal returns true if the receiver is the root of an identical tree +// to the other given Module. This is a deep comparison that considers +// the equality of all downstream modules too. +// +// The children are considered to be ordered, so callers may wish to use +// SortDescendents first to normalize the order of the slices of child nodes. +// +// The implementation of this function is not optimized since it is provided +// primarily for use in tests. +func (m *Module) Equal(other *Module) bool { + // take care of nils first + if m == nil && other == nil { + return true + } else if (m == nil && other != nil) || (m != nil && other == nil) { + return false + } + + if m.Name != other.Name { + return false + } + + if len(m.Providers) != len(other.Providers) { + return false + } + if len(m.Children) != len(other.Children) { + return false + } + + // Can't use reflect.DeepEqual on this provider structure because + // the nested Constraints objects contain function pointers that + // never compare as equal. So we'll need to walk it the long way. + for inst, dep := range m.Providers { + if _, exists := other.Providers[inst]; !exists { + return false + } + + if dep.Reason != other.Providers[inst].Reason { + return false + } + + // Constraints are not too easy to compare robustly, so + // we'll just use their string representations as a proxy + // for now. + if dep.Constraints.String() != other.Providers[inst].Constraints.String() { + return false + } + } + + // Above we already checked that we have the same number of children + // in each module, so now we just need to check that they are + // recursively equal. + for i := range m.Children { + if !m.Children[i].Equal(other.Children[i]) { + return false + } + } + + // If we fall out here then they are equal + return true +} diff --git a/vendor/github.com/hashicorp/terraform/moduledeps/provider.go b/vendor/github.com/hashicorp/terraform/moduledeps/provider.go new file mode 100644 index 00000000..89ceefb2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/moduledeps/provider.go @@ -0,0 +1,30 @@ +package moduledeps + +import ( + "strings" +) + +// ProviderInstance describes a particular provider instance by its full name, +// like "null" or "aws.foo". +type ProviderInstance string + +// Type returns the provider type of this instance. For example, for an instance +// named "aws.foo" the type is "aws". +func (p ProviderInstance) Type() string { + t := string(p) + if dotPos := strings.Index(t, "."); dotPos != -1 { + t = t[:dotPos] + } + return t +} + +// Alias returns the alias of this provider, if any. An instance named "aws.foo" +// has the alias "foo", while an instance named just "docker" has no alias, +// so the empty string would be returned. +func (p ProviderInstance) Alias() string { + t := string(p) + if dotPos := strings.Index(t, "."); dotPos != -1 { + return t[dotPos+1:] + } + return "" +} diff --git a/vendor/github.com/hashicorp/terraform/plans/action.go b/vendor/github.com/hashicorp/terraform/plans/action.go new file mode 100644 index 00000000..c3e6a32a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/action.go @@ -0,0 +1,22 @@ +package plans + +type Action rune + +const ( + NoOp Action = 0 + Create Action = '+' + Read Action = '←' + Update Action = '~' + DeleteThenCreate Action = '∓' + CreateThenDelete Action = '±' + Delete Action = '-' +) + +//go:generate stringer -type Action + +// IsReplace returns true if the action is one of the two actions that +// represents replacing an existing object with a new object: +// DeleteThenCreate or CreateThenDelete. +func (a Action) IsReplace() bool { + return a == DeleteThenCreate || a == CreateThenDelete +} diff --git a/vendor/github.com/hashicorp/terraform/plans/action_string.go b/vendor/github.com/hashicorp/terraform/plans/action_string.go new file mode 100644 index 00000000..be43ab17 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/action_string.go @@ -0,0 +1,49 @@ +// Code generated by "stringer -type Action"; DO NOT EDIT. + +package plans + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoOp-0] + _ = x[Create-43] + _ = x[Read-8592] + _ = x[Update-126] + _ = x[DeleteThenCreate-8723] + _ = x[CreateThenDelete-177] + _ = x[Delete-45] +} + +const ( + _Action_name_0 = "NoOp" + _Action_name_1 = "Create" + _Action_name_2 = "Delete" + _Action_name_3 = "Update" + _Action_name_4 = "CreateThenDelete" + _Action_name_5 = "Read" + _Action_name_6 = "DeleteThenCreate" +) + +func (i Action) String() string { + switch { + case i == 0: + return _Action_name_0 + case i == 43: + return _Action_name_1 + case i == 45: + return _Action_name_2 + case i == 126: + return _Action_name_3 + case i == 177: + return _Action_name_4 + case i == 8592: + return _Action_name_5 + case i == 8723: + return _Action_name_6 + default: + return "Action(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes.go b/vendor/github.com/hashicorp/terraform/plans/changes.go new file mode 100644 index 00000000..d7e0dcdb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/changes.go @@ -0,0 +1,308 @@ +package plans + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" +) + +// Changes describes various actions that Terraform will attempt to take if +// the corresponding plan is applied. +// +// A Changes object can be rendered into a visual diff (by the caller, using +// code in another package) for display to the user. +type Changes struct { + // Resources tracks planned changes to resource instance objects. + Resources []*ResourceInstanceChangeSrc + + // Outputs tracks planned changes output values. + // + // Note that although an in-memory plan contains planned changes for + // outputs throughout the configuration, a plan serialized + // to disk retains only the root outputs because they are + // externally-visible, while other outputs are implementation details and + // can be easily re-calculated during the apply phase. Therefore only root + // module outputs will survive a round-trip through a plan file. + Outputs []*OutputChangeSrc +} + +// NewChanges returns a valid Changes object that describes no changes. +func NewChanges() *Changes { + return &Changes{} +} + +func (c *Changes) Empty() bool { + for _, res := range c.Resources { + if res.Action != NoOp { + return false + } + } + return true +} + +// ResourceInstance returns the planned change for the current object of the +// resource instance of the given address, if any. Returns nil if no change is +// planned. +func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc { + addrStr := addr.String() + for _, rc := range c.Resources { + if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed { + return rc + } + } + + return nil +} + +// ResourceInstanceDeposed returns the plan change of a deposed object of +// the resource instance of the given address, if any. Returns nil if no change +// is planned. +func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc { + addrStr := addr.String() + for _, rc := range c.Resources { + if rc.Addr.String() == addrStr && rc.DeposedKey == key { + return rc + } + } + + return nil +} + +// OutputValue returns the planned change for the output value with the +// given address, if any. Returns nil if no change is planned. +func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc { + addrStr := addr.String() + for _, oc := range c.Outputs { + if oc.Addr.String() == addrStr { + return oc + } + } + + return nil +} + +// SyncWrapper returns a wrapper object around the receiver that can be used +// to make certain changes to the receiver in a concurrency-safe way, as long +// as all callers share the same wrapper object. +func (c *Changes) SyncWrapper() *ChangesSync { + return &ChangesSync{ + changes: c, + } +} + +// ResourceInstanceChange describes a change to a particular resource instance +// object. +type ResourceInstanceChange struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // Change is an embedded description of the change. + Change + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // Terraform that relates to this change. Terraform will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the implied type of the +// corresponding resource type schema for correct operation. +func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) { + cs, err := rc.Change.Encode(ty) + if err != nil { + return nil, err + } + return &ResourceInstanceChangeSrc{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + ProviderAddr: rc.ProviderAddr, + ChangeSrc: *cs, + RequiredReplace: rc.RequiredReplace, + Private: rc.Private, + }, err +} + +// Simplify will, where possible, produce a change with a simpler action than +// the receiever given a flag indicating whether the caller is dealing with +// a normal apply or a destroy. This flag deals with the fact that Terraform +// Core uses a specialized graph node type for destroying; only that +// specialized node should set "destroying" to true. +// +// The following table shows the simplification behavior: +// +// Action Destroying? New Action +// --------+-------------+----------- +// Create true NoOp +// Delete false NoOp +// Replace true Delete +// Replace false Create +// +// For any combination not in the above table, the Simplify just returns the +// receiver as-is. +func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange { + if destroying { + switch rc.Action { + case Delete: + // We'll fall out and just return rc verbatim, then. + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Delete, + Before: rc.Before, + After: cty.NullVal(rc.Before.Type()), + }, + } + default: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + }, + } + } + } else { + switch rc.Action { + case Delete: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: NoOp, + Before: rc.Before, + After: rc.Before, + }, + } + case CreateThenDelete, DeleteThenCreate: + return &ResourceInstanceChange{ + Addr: rc.Addr, + DeposedKey: rc.DeposedKey, + Private: rc.Private, + ProviderAddr: rc.ProviderAddr, + Change: Change{ + Action: Create, + Before: cty.NullVal(rc.After.Type()), + After: rc.After, + }, + } + } + } + + // If we fall out here then our change is already simple enough. + return rc +} + +// OutputChange describes a change to an output value. +type OutputChange struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // Change is an embedded description of the change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + Change + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. +func (oc *OutputChange) Encode() (*OutputChangeSrc, error) { + cs, err := oc.Change.Encode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChangeSrc{ + Addr: oc.Addr, + ChangeSrc: *cs, + Sensitive: oc.Sensitive, + }, err +} + +// Change describes a single change with a given action. +type Change struct { + // Action defines what kind of change is being made. + Action Action + + // Interpretation of Before and After depend on Action: + // + // NoOp Before and After are the same, unchanged value + // Create Before is nil, and After is the expected value after create. + // Read Before is any prior value (nil if no prior), and After is the + // value that was or will be read. + // Update Before is the value prior to update, and After is the expected + // value after update. + // Replace As with Update. + // Delete Before is the value prior to delete, and After is always nil. + // + // Unknown values may appear anywhere within the Before and After values, + // either as the values themselves or as nested elements within known + // collections/structures. + Before, After cty.Value +} + +// Encode produces a variant of the reciever that has its change values +// serialized so it can be written to a plan file. Pass the type constraint +// that the values are expected to conform to; to properly decode the values +// later an identical type constraint must be provided at that time. +// +// Where a Change is embedded in some other struct, it's generally better +// to call the corresponding Encode method of that struct rather than working +// directly with its embedded Change. +func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) { + beforeDV, err := NewDynamicValue(c.Before, ty) + if err != nil { + return nil, err + } + afterDV, err := NewDynamicValue(c.After, ty) + if err != nil { + return nil, err + } + + return &ChangeSrc{ + Action: c.Action, + Before: beforeDV, + After: afterDV, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_src.go b/vendor/github.com/hashicorp/terraform/plans/changes_src.go new file mode 100644 index 00000000..90153ea7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/changes_src.go @@ -0,0 +1,190 @@ +package plans + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/zclconf/go-cty/cty" +) + +// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange. +// Pass the associated resource type's schema type to method Decode to +// obtain a ResourceInstancChange. +type ResourceInstanceChangeSrc struct { + // Addr is the absolute address of the resource instance that the change + // will apply to. + Addr addrs.AbsResourceInstance + + // DeposedKey is the identifier for a deposed object associated with the + // given instance, or states.NotDeposed if this change applies to the + // current object. + // + // A Replace change for a resource with create_before_destroy set will + // create a new DeposedKey temporarily during replacement. In that case, + // DeposedKey in the plan is always states.NotDeposed, representing that + // the current object is being replaced with the deposed. + DeposedKey states.DeposedKey + + // Provider is the address of the provider configuration that was used + // to plan this change, and thus the configuration that must also be + // used to apply it. + ProviderAddr addrs.AbsProviderConfig + + // ChangeSrc is an embedded description of the not-yet-decoded change. + ChangeSrc + + // RequiredReplace is a set of paths that caused the change action to be + // Replace rather than Update. Always nil if the change action is not + // Replace. + // + // This is retained only for UI-plan-rendering purposes and so it does not + // currently survive a round-trip through a saved plan file. + RequiredReplace cty.PathSet + + // Private allows a provider to stash any extra data that is opaque to + // Terraform that relates to this change. Terraform will save this + // byte-for-byte and return it to the provider in the apply call. + Private []byte +} + +// Decode unmarshals the raw representation of the instance object being +// changed. Pass the implied type of the corresponding resource type schema +// for correct operation. +func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) { + change, err := rcs.ChangeSrc.Decode(ty) + if err != nil { + return nil, err + } + return &ResourceInstanceChange{ + Addr: rcs.Addr, + DeposedKey: rcs.DeposedKey, + ProviderAddr: rcs.ProviderAddr, + Change: *change, + RequiredReplace: rcs.RequiredReplace, + Private: rcs.Private, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc { + if rcs == nil { + return nil + } + ret := *rcs + + ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...) + + if len(ret.Private) != 0 { + private := make([]byte, len(ret.Private)) + copy(private, ret.Private) + ret.Private = private + } + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// OutputChangeSrc describes a change to an output value. +type OutputChangeSrc struct { + // Addr is the absolute address of the output value that the change + // will apply to. + Addr addrs.AbsOutputValue + + // ChangeSrc is an embedded description of the not-yet-decoded change. + // + // For output value changes, the type constraint for the DynamicValue + // instances is always cty.DynamicPseudoType. + ChangeSrc + + // Sensitive, if true, indicates that either the old or new value in the + // change is sensitive and so a rendered version of the plan in the UI + // should elide the actual values while still indicating the action of the + // change. + Sensitive bool +} + +// Decode unmarshals the raw representation of the output value being +// changed. +func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) { + change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType) + if err != nil { + return nil, err + } + return &OutputChange{ + Addr: ocs.Addr, + Change: *change, + Sensitive: ocs.Sensitive, + }, nil +} + +// DeepCopy creates a copy of the receiver where any pointers to nested mutable +// values are also copied, thus ensuring that future mutations of the receiver +// will not affect the copy. +// +// Some types used within a resource change are immutable by convention even +// though the Go language allows them to be mutated, such as the types from +// the addrs package. These are _not_ copied by this method, under the +// assumption that callers will behave themselves. +func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc { + if ocs == nil { + return nil + } + ret := *ocs + + ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy() + ret.ChangeSrc.After = ret.ChangeSrc.After.Copy() + + return &ret +} + +// ChangeSrc is a not-yet-decoded Change. +type ChangeSrc struct { + // Action defines what kind of change is being made. + Action Action + + // Before and After correspond to the fields of the same name in Change, + // but have not yet been decoded from the serialized value used for + // storage. + Before, After DynamicValue +} + +// Decode unmarshals the raw representations of the before and after values +// to produce a Change object. Pass the type constraint that the result must +// conform to. +// +// Where a ChangeSrc is embedded in some other struct, it's generally better +// to call the corresponding Decode method of that struct rather than working +// directly with its embedded Change. +func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) { + var err error + before := cty.NullVal(ty) + after := cty.NullVal(ty) + + if len(cs.Before) > 0 { + before, err = cs.Before.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'before' value: %s", err) + } + } + if len(cs.After) > 0 { + after, err = cs.After.Decode(ty) + if err != nil { + return nil, fmt.Errorf("error decoding 'after' value: %s", err) + } + } + return &Change{ + Action: cs.Action, + Before: before, + After: after, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_state.go b/vendor/github.com/hashicorp/terraform/plans/changes_state.go new file mode 100644 index 00000000..543e6c2b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/changes_state.go @@ -0,0 +1,15 @@ +package plans + +import ( + "github.com/hashicorp/terraform/states" +) + +// PlannedState merges the set of changes described by the receiver into the +// given prior state to produce the planned result state. +// +// The result is an approximation of the state as it would exist after +// applying these changes, omitting any values that cannot be determined until +// the changes are actually applied. +func (c *Changes) PlannedState(prior *states.State) (*states.State, error) { + panic("Changes.PlannedState not yet implemented") +} diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go new file mode 100644 index 00000000..6b4ff98f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go @@ -0,0 +1,144 @@ +package plans + +import ( + "fmt" + "sync" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" +) + +// ChangesSync is a wrapper around a Changes that provides a concurrency-safe +// interface to insert new changes and retrieve copies of existing changes. +// +// Each ChangesSync is independent of all others, so all concurrent writers +// to a particular Changes must share a single ChangesSync. Behavior is +// undefined if any other caller makes changes to the underlying Changes +// object or its nested objects concurrently with any of the methods of a +// particular ChangesSync. +type ChangesSync struct { + lock sync.Mutex + changes *Changes +} + +// AppendResourceInstanceChange records the given resource instance change in +// the set of planned resource changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) { + if cs == nil { + panic("AppendResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Resources = append(cs.changes.Resources, s) +} + +// GetResourceInstanceChange searches the set of resource instance changes for +// one matching the given address and generation, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc { + if cs == nil { + panic("GetResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + if gen == states.CurrentGen { + return cs.changes.ResourceInstance(addr).DeepCopy() + } + if dk, ok := gen.(states.DeposedKey); ok { + return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy() + } + panic(fmt.Sprintf("unsupported generation value %#v", gen)) +} + +// RemoveResourceInstanceChange searches the set of resource instance changes +// for one matching the given address and generation, and removes it from the +// set if it exists. +func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) { + if cs == nil { + panic("RemoveResourceInstanceChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + dk := states.NotDeposed + if realDK, ok := gen.(states.DeposedKey); ok { + dk = realDK + } + + addrStr := addr.String() + for i, r := range cs.changes.Resources { + if r.Addr.String() != addrStr || r.DeposedKey != dk { + continue + } + copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:]) + cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1] + return + } +} + +// AppendOutputChange records the given output value change in the set of +// planned value changes. +// +// The caller must ensure that there are no concurrent writes to the given +// change while this method is running, but it is safe to resume mutating +// it after this method returns without affecting the saved change. +func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) { + if cs == nil { + panic("AppendOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + s := changeSrc.DeepCopy() + cs.changes.Outputs = append(cs.changes.Outputs, s) +} + +// GetOutputChange searches the set of output value changes for one matching +// the given address, returning it if it exists. +// +// If no such change exists, nil is returned. +// +// The returned object is a deep copy of the change recorded in the plan, so +// callers may mutate it although it's generally better (less confusing) to +// treat planned changes as immutable after they've been initially constructed. +func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc { + if cs == nil { + panic("GetOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + return cs.changes.OutputValue(addr) +} + +// RemoveOutputChange searches the set of output value changes for one matching +// the given address, and removes it from the set if it exists. +func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) { + if cs == nil { + panic("RemoveOutputChange on nil ChangesSync") + } + cs.lock.Lock() + defer cs.lock.Unlock() + + addrStr := addr.String() + for i, o := range cs.changes.Outputs { + if o.Addr.String() != addrStr { + continue + } + copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:]) + cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1] + return + } +} diff --git a/vendor/github.com/hashicorp/terraform/plans/doc.go b/vendor/github.com/hashicorp/terraform/plans/doc.go new file mode 100644 index 00000000..01ca3892 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/doc.go @@ -0,0 +1,5 @@ +// Package plans contains the types that are used to represent Terraform plans. +// +// A plan describes a set of changes that Terraform will make to update remote +// objects to match with changes to the configuration. +package plans diff --git a/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go new file mode 100644 index 00000000..51fbb24c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go @@ -0,0 +1,96 @@ +package plans + +import ( + "github.com/zclconf/go-cty/cty" + ctymsgpack "github.com/zclconf/go-cty/cty/msgpack" +) + +// DynamicValue is the representation in the plan of a value whose type cannot +// be determined at compile time, such as because it comes from a schema +// defined in a plugin. +// +// This type is used as an indirection so that the overall plan structure can +// be decoded without schema available, and then the dynamic values accessed +// at a later time once the appropriate schema has been determined. +// +// Internally, DynamicValue is a serialized version of a cty.Value created +// against a particular type constraint. Callers should not access directly +// the serialized form, whose format may change in future. Values of this +// type must always be created by calling NewDynamicValue. +// +// The zero value of DynamicValue is nil, and represents the absense of a +// value within the Go type system. This is distinct from a cty.NullVal +// result, which represents the absense of a value within the cty type system. +type DynamicValue []byte + +// NewDynamicValue creates a DynamicValue by serializing the given value +// against the given type constraint. The value must conform to the type +// constraint, or the result is undefined. +// +// If the value to be encoded has no predefined schema (for example, for +// module output values and input variables), set the type constraint to +// cty.DynamicPseudoType in order to save type information as part of the +// value, and then also pass cty.DynamicPseudoType to method Decode to recover +// the original value. +// +// cty.NilVal can be used to represent the absense of a value, but callers +// must be careful to distinguish values that are absent at the Go layer +// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal +// results). +func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) { + // If we're given cty.NilVal (the zero value of cty.Value, which is + // distinct from a typed null value created by cty.NullVal) then we'll + // assume the caller is trying to represent the _absense_ of a value, + // and so we'll return a nil DynamicValue. + if val == cty.NilVal { + return DynamicValue(nil), nil + } + + // Currently our internal encoding is msgpack, via ctymsgpack. + buf, err := ctymsgpack.Marshal(val, ty) + if err != nil { + return nil, err + } + + return DynamicValue(buf), nil +} + +// Decode retrieves the effective value from the receiever by interpreting the +// serialized form against the given type constraint. For correct results, +// the type constraint must match (or be consistent with) the one that was +// used to create the receiver. +// +// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and +// instead represents the absense of a value. +func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) { + if v == nil { + return cty.NilVal, nil + } + + return ctymsgpack.Unmarshal([]byte(v), ty) +} + +// ImpliedType returns the type implied by the serialized structure of the +// receiving value. +// +// This will not necessarily be exactly the type that was given when the +// value was encoded, and in particular must not be used for values that +// were encoded with their static type given as cty.DynamicPseudoType. +// It is however safe to use this method for values that were encoded using +// their runtime type as the conforming type, with the result being +// semantically equivalent but with all lists and sets represented as tuples, +// and maps as objects, due to ambiguities of the serialization. +func (v DynamicValue) ImpliedType() (cty.Type, error) { + return ctymsgpack.ImpliedType([]byte(v)) +} + +// Copy produces a copy of the receiver with a distinct backing array. +func (v DynamicValue) Copy() DynamicValue { + if v == nil { + return nil + } + + ret := make(DynamicValue, len(v)) + copy(ret, v) + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/plans/internal/planproto/doc.go b/vendor/github.com/hashicorp/terraform/plans/internal/planproto/doc.go new file mode 100644 index 00000000..d6ea0f78 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/internal/planproto/doc.go @@ -0,0 +1,7 @@ +// Package planproto is home to the Go stubs generated from the tfplan protobuf +// schema. +// +// This is an internal package to be used only by Terraform's planfile package. +// From elsewhere in Terraform, use the API exported by the planfile package +// itself. +package planproto diff --git a/vendor/github.com/hashicorp/terraform/plans/internal/planproto/generate.sh b/vendor/github.com/hashicorp/terraform/plans/internal/planproto/generate.sh new file mode 100644 index 00000000..8603edfb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/internal/planproto/generate.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +# We do not run protoc under go:generate because we want to ensure that all +# dependencies of go:generate are "go get"-able for general dev environment +# usability. To compile all protobuf files in this repository, run +# "make protobuf" at the top-level. + +set -eu + +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" + +cd "$DIR" + +protoc --go_out=paths=source_relative:. planfile.proto diff --git a/vendor/github.com/hashicorp/terraform/plans/internal/planproto/planfile.pb.go b/vendor/github.com/hashicorp/terraform/plans/internal/planproto/planfile.pb.go new file mode 100644 index 00000000..6feb0930 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/internal/planproto/planfile.pb.go @@ -0,0 +1,965 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: planfile.proto + +package planproto // import "github.com/hashicorp/terraform/plans/internal/planproto" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Action describes the type of action planned for an object. +// Not all action values are valid for all object types. +type Action int32 + +const ( + Action_NOOP Action = 0 + Action_CREATE Action = 1 + Action_READ Action = 2 + Action_UPDATE Action = 3 + Action_DELETE Action = 5 + Action_DELETE_THEN_CREATE Action = 6 + Action_CREATE_THEN_DELETE Action = 7 +) + +var Action_name = map[int32]string{ + 0: "NOOP", + 1: "CREATE", + 2: "READ", + 3: "UPDATE", + 5: "DELETE", + 6: "DELETE_THEN_CREATE", + 7: "CREATE_THEN_DELETE", +} +var Action_value = map[string]int32{ + "NOOP": 0, + "CREATE": 1, + "READ": 2, + "UPDATE": 3, + "DELETE": 5, + "DELETE_THEN_CREATE": 6, + "CREATE_THEN_DELETE": 7, +} + +func (x Action) String() string { + return proto.EnumName(Action_name, int32(x)) +} +func (Action) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{0} +} + +type ResourceInstanceChange_ResourceMode int32 + +const ( + ResourceInstanceChange_managed ResourceInstanceChange_ResourceMode = 0 + ResourceInstanceChange_data ResourceInstanceChange_ResourceMode = 1 +) + +var ResourceInstanceChange_ResourceMode_name = map[int32]string{ + 0: "managed", + 1: "data", +} +var ResourceInstanceChange_ResourceMode_value = map[string]int32{ + "managed": 0, + "data": 1, +} + +func (x ResourceInstanceChange_ResourceMode) String() string { + return proto.EnumName(ResourceInstanceChange_ResourceMode_name, int32(x)) +} +func (ResourceInstanceChange_ResourceMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{3, 0} +} + +// Plan is the root message type for the tfplan file +type Plan struct { + // Version is incremented whenever there is a breaking change to + // the serialization format. Programs reading serialized plans should + // verify that version is set to the expected value and abort processing + // if not. A breaking change is any change that may cause an older + // consumer to interpret the structure incorrectly. This number will + // not be incremented if an existing consumer can either safely ignore + // changes to the format or if an existing consumer would fail to process + // the file for another message- or field-specific reason. + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + // The variables that were set when creating the plan. Each value is + // a msgpack serialization of an HCL value. + Variables map[string]*DynamicValue `protobuf:"bytes,2,rep,name=variables,proto3" json:"variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // An unordered set of proposed changes to resources throughout the + // configuration, including any nested modules. Use the address of + // each resource to determine which module it belongs to. + ResourceChanges []*ResourceInstanceChange `protobuf:"bytes,3,rep,name=resource_changes,json=resourceChanges,proto3" json:"resource_changes,omitempty"` + // An unordered set of proposed changes to outputs in the root module + // of the configuration. This set also includes "no action" changes for + // outputs that are not changing, as context for detecting inconsistencies + // at apply time. + OutputChanges []*OutputChange `protobuf:"bytes,4,rep,name=output_changes,json=outputChanges,proto3" json:"output_changes,omitempty"` + // An unordered set of target addresses to include when applying. If no + // target addresses are present, the plan applies to the whole + // configuration. + TargetAddrs []string `protobuf:"bytes,5,rep,name=target_addrs,json=targetAddrs,proto3" json:"target_addrs,omitempty"` + // The version string for the Terraform binary that created this plan. + TerraformVersion string `protobuf:"bytes,14,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"` + // SHA256 digests of all of the provider plugin binaries that were used + // in the creation of this plan. + ProviderHashes map[string]*Hash `protobuf:"bytes,15,rep,name=provider_hashes,json=providerHashes,proto3" json:"provider_hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Backend is a description of the backend configuration and other related + // settings at the time the plan was created. + Backend *Backend `protobuf:"bytes,13,opt,name=backend,proto3" json:"backend,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Plan) Reset() { *m = Plan{} } +func (m *Plan) String() string { return proto.CompactTextString(m) } +func (*Plan) ProtoMessage() {} +func (*Plan) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{0} +} +func (m *Plan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Plan.Unmarshal(m, b) +} +func (m *Plan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Plan.Marshal(b, m, deterministic) +} +func (dst *Plan) XXX_Merge(src proto.Message) { + xxx_messageInfo_Plan.Merge(dst, src) +} +func (m *Plan) XXX_Size() int { + return xxx_messageInfo_Plan.Size(m) +} +func (m *Plan) XXX_DiscardUnknown() { + xxx_messageInfo_Plan.DiscardUnknown(m) +} + +var xxx_messageInfo_Plan proto.InternalMessageInfo + +func (m *Plan) GetVersion() uint64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Plan) GetVariables() map[string]*DynamicValue { + if m != nil { + return m.Variables + } + return nil +} + +func (m *Plan) GetResourceChanges() []*ResourceInstanceChange { + if m != nil { + return m.ResourceChanges + } + return nil +} + +func (m *Plan) GetOutputChanges() []*OutputChange { + if m != nil { + return m.OutputChanges + } + return nil +} + +func (m *Plan) GetTargetAddrs() []string { + if m != nil { + return m.TargetAddrs + } + return nil +} + +func (m *Plan) GetTerraformVersion() string { + if m != nil { + return m.TerraformVersion + } + return "" +} + +func (m *Plan) GetProviderHashes() map[string]*Hash { + if m != nil { + return m.ProviderHashes + } + return nil +} + +func (m *Plan) GetBackend() *Backend { + if m != nil { + return m.Backend + } + return nil +} + +// Backend is a description of backend configuration and other related settings. +type Backend struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Workspace string `protobuf:"bytes,3,opt,name=workspace,proto3" json:"workspace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Backend) Reset() { *m = Backend{} } +func (m *Backend) String() string { return proto.CompactTextString(m) } +func (*Backend) ProtoMessage() {} +func (*Backend) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{1} +} +func (m *Backend) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Backend.Unmarshal(m, b) +} +func (m *Backend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Backend.Marshal(b, m, deterministic) +} +func (dst *Backend) XXX_Merge(src proto.Message) { + xxx_messageInfo_Backend.Merge(dst, src) +} +func (m *Backend) XXX_Size() int { + return xxx_messageInfo_Backend.Size(m) +} +func (m *Backend) XXX_DiscardUnknown() { + xxx_messageInfo_Backend.DiscardUnknown(m) +} + +var xxx_messageInfo_Backend proto.InternalMessageInfo + +func (m *Backend) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Backend) GetConfig() *DynamicValue { + if m != nil { + return m.Config + } + return nil +} + +func (m *Backend) GetWorkspace() string { + if m != nil { + return m.Workspace + } + return "" +} + +// Change represents a change made to some object, transforming it from an old +// state to a new state. +type Change struct { + // Not all action values are valid for all object types. Consult + // the documentation for any message that embeds Change. + Action Action `protobuf:"varint,1,opt,name=action,proto3,enum=tfplan.Action" json:"action,omitempty"` + // msgpack-encoded HCL values involved in the change. + // - For update and replace, two values are provided that give the old and new values, + // respectively. + // - For create, one value is provided that gives the new value to be created + // - For delete, one value is provided that describes the value being deleted + // - For read, two values are provided that give the prior value for this object + // (or null, if no prior value exists) and the value that was or will be read, + // respectively. + // - For no-op, one value is provided that is left unmodified by this non-change. + Values []*DynamicValue `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Change) Reset() { *m = Change{} } +func (m *Change) String() string { return proto.CompactTextString(m) } +func (*Change) ProtoMessage() {} +func (*Change) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{2} +} +func (m *Change) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Change.Unmarshal(m, b) +} +func (m *Change) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Change.Marshal(b, m, deterministic) +} +func (dst *Change) XXX_Merge(src proto.Message) { + xxx_messageInfo_Change.Merge(dst, src) +} +func (m *Change) XXX_Size() int { + return xxx_messageInfo_Change.Size(m) +} +func (m *Change) XXX_DiscardUnknown() { + xxx_messageInfo_Change.DiscardUnknown(m) +} + +var xxx_messageInfo_Change proto.InternalMessageInfo + +func (m *Change) GetAction() Action { + if m != nil { + return m.Action + } + return Action_NOOP +} + +func (m *Change) GetValues() []*DynamicValue { + if m != nil { + return m.Values + } + return nil +} + +type ResourceInstanceChange struct { + // module_path is an address to the module that defined this resource. + // module_path is omitted for resources in the root module. For descendent modules + // it is a string like module.foo.module.bar as would be seen at the beginning of a + // resource address. The format of this string is not yet frozen and so external + // callers should treat it as an opaque key for filtering purposes. + ModulePath string `protobuf:"bytes,1,opt,name=module_path,json=modulePath,proto3" json:"module_path,omitempty"` + // mode is the resource mode. + Mode ResourceInstanceChange_ResourceMode `protobuf:"varint,2,opt,name=mode,proto3,enum=tfplan.ResourceInstanceChange_ResourceMode" json:"mode,omitempty"` + // type is the resource type name, like "aws_instance". + Type string `protobuf:"bytes,3,opt,name=type,proto3" json:"type,omitempty"` + // name is the logical name of the resource as defined in configuration. + // For example, in aws_instance.foo this would be "foo". + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // instance_key is either an integer index or a string key, depending on which iteration + // attributes ("count" or "for_each") are being used for this resource. If none + // are in use, this field is omitted. + // + // Types that are valid to be assigned to InstanceKey: + // *ResourceInstanceChange_Str + // *ResourceInstanceChange_Int + InstanceKey isResourceInstanceChange_InstanceKey `protobuf_oneof:"instance_key"` + // deposed_key, if set, indicates that this change applies to a deposed + // object for the indicated instance with the given deposed key. If not + // set, the change applies to the instance's current object. + DeposedKey string `protobuf:"bytes,7,opt,name=deposed_key,json=deposedKey,proto3" json:"deposed_key,omitempty"` + // provider is the address of the provider configuration that this change + // was planned with, and thus the configuration that must be used to + // apply it. + Provider string `protobuf:"bytes,8,opt,name=provider,proto3" json:"provider,omitempty"` + // Description of the proposed change. May use "create", "read", "update", + // "replace" and "delete" actions. "no-op" changes are not currently used here + // but consumers must accept and discard them to allow for future expansion. + Change *Change `protobuf:"bytes,9,opt,name=change,proto3" json:"change,omitempty"` + // raw blob value provided by the provider as additional context for the + // change. Must be considered an opaque value for any consumer other than + // the provider that generated it, and will be returned verbatim to the + // provider during the subsequent apply operation. + Private []byte `protobuf:"bytes,10,opt,name=private,proto3" json:"private,omitempty"` + // An unordered set of paths that prompted the change action to be + // "replace" rather than "update". Empty for any action other than + // "replace". + RequiredReplace []*Path `protobuf:"bytes,11,rep,name=required_replace,json=requiredReplace,proto3" json:"required_replace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResourceInstanceChange) Reset() { *m = ResourceInstanceChange{} } +func (m *ResourceInstanceChange) String() string { return proto.CompactTextString(m) } +func (*ResourceInstanceChange) ProtoMessage() {} +func (*ResourceInstanceChange) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{3} +} +func (m *ResourceInstanceChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResourceInstanceChange.Unmarshal(m, b) +} +func (m *ResourceInstanceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResourceInstanceChange.Marshal(b, m, deterministic) +} +func (dst *ResourceInstanceChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceInstanceChange.Merge(dst, src) +} +func (m *ResourceInstanceChange) XXX_Size() int { + return xxx_messageInfo_ResourceInstanceChange.Size(m) +} +func (m *ResourceInstanceChange) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceInstanceChange.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceInstanceChange proto.InternalMessageInfo + +type isResourceInstanceChange_InstanceKey interface { + isResourceInstanceChange_InstanceKey() +} + +type ResourceInstanceChange_Str struct { + Str string `protobuf:"bytes,5,opt,name=str,proto3,oneof"` +} +type ResourceInstanceChange_Int struct { + Int int64 `protobuf:"varint,6,opt,name=int,proto3,oneof"` +} + +func (*ResourceInstanceChange_Str) isResourceInstanceChange_InstanceKey() {} +func (*ResourceInstanceChange_Int) isResourceInstanceChange_InstanceKey() {} + +func (m *ResourceInstanceChange) GetInstanceKey() isResourceInstanceChange_InstanceKey { + if m != nil { + return m.InstanceKey + } + return nil +} + +func (m *ResourceInstanceChange) GetModulePath() string { + if m != nil { + return m.ModulePath + } + return "" +} + +func (m *ResourceInstanceChange) GetMode() ResourceInstanceChange_ResourceMode { + if m != nil { + return m.Mode + } + return ResourceInstanceChange_managed +} + +func (m *ResourceInstanceChange) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ResourceInstanceChange) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ResourceInstanceChange) GetStr() string { + if x, ok := m.GetInstanceKey().(*ResourceInstanceChange_Str); ok { + return x.Str + } + return "" +} + +func (m *ResourceInstanceChange) GetInt() int64 { + if x, ok := m.GetInstanceKey().(*ResourceInstanceChange_Int); ok { + return x.Int + } + return 0 +} + +func (m *ResourceInstanceChange) GetDeposedKey() string { + if m != nil { + return m.DeposedKey + } + return "" +} + +func (m *ResourceInstanceChange) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func (m *ResourceInstanceChange) GetChange() *Change { + if m != nil { + return m.Change + } + return nil +} + +func (m *ResourceInstanceChange) GetPrivate() []byte { + if m != nil { + return m.Private + } + return nil +} + +func (m *ResourceInstanceChange) GetRequiredReplace() []*Path { + if m != nil { + return m.RequiredReplace + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResourceInstanceChange) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResourceInstanceChange_OneofMarshaler, _ResourceInstanceChange_OneofUnmarshaler, _ResourceInstanceChange_OneofSizer, []interface{}{ + (*ResourceInstanceChange_Str)(nil), + (*ResourceInstanceChange_Int)(nil), + } +} + +func _ResourceInstanceChange_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResourceInstanceChange) + // instance_key + switch x := m.InstanceKey.(type) { + case *ResourceInstanceChange_Str: + b.EncodeVarint(5<<3 | proto.WireBytes) + b.EncodeStringBytes(x.Str) + case *ResourceInstanceChange_Int: + b.EncodeVarint(6<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.Int)) + case nil: + default: + return fmt.Errorf("ResourceInstanceChange.InstanceKey has unexpected type %T", x) + } + return nil +} + +func _ResourceInstanceChange_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResourceInstanceChange) + switch tag { + case 5: // instance_key.str + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.InstanceKey = &ResourceInstanceChange_Str{x} + return true, err + case 6: // instance_key.int + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.InstanceKey = &ResourceInstanceChange_Int{int64(x)} + return true, err + default: + return false, nil + } +} + +func _ResourceInstanceChange_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResourceInstanceChange) + // instance_key + switch x := m.InstanceKey.(type) { + case *ResourceInstanceChange_Str: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.Str))) + n += len(x.Str) + case *ResourceInstanceChange_Int: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.Int)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type OutputChange struct { + // Name of the output as defined in the root module. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Description of the proposed change. May use "no-op", "create", + // "update" and "delete" actions. + Change *Change `protobuf:"bytes,2,opt,name=change,proto3" json:"change,omitempty"` + // Sensitive, if true, indicates that one or more of the values given + // in "change" is sensitive and should not be shown directly in any + // rendered plan. + Sensitive bool `protobuf:"varint,3,opt,name=sensitive,proto3" json:"sensitive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OutputChange) Reset() { *m = OutputChange{} } +func (m *OutputChange) String() string { return proto.CompactTextString(m) } +func (*OutputChange) ProtoMessage() {} +func (*OutputChange) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{4} +} +func (m *OutputChange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OutputChange.Unmarshal(m, b) +} +func (m *OutputChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OutputChange.Marshal(b, m, deterministic) +} +func (dst *OutputChange) XXX_Merge(src proto.Message) { + xxx_messageInfo_OutputChange.Merge(dst, src) +} +func (m *OutputChange) XXX_Size() int { + return xxx_messageInfo_OutputChange.Size(m) +} +func (m *OutputChange) XXX_DiscardUnknown() { + xxx_messageInfo_OutputChange.DiscardUnknown(m) +} + +var xxx_messageInfo_OutputChange proto.InternalMessageInfo + +func (m *OutputChange) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *OutputChange) GetChange() *Change { + if m != nil { + return m.Change + } + return nil +} + +func (m *OutputChange) GetSensitive() bool { + if m != nil { + return m.Sensitive + } + return false +} + +// DynamicValue represents a value whose type is not decided until runtime, +// often based on schema information obtained from a plugin. +// +// At present dynamic values are always encoded as msgpack, with extension +// id 0 used to represent the special "unknown" value indicating results +// that won't be known until after apply. +// +// In future other serialization formats may be used, possibly with a +// transitional period of including both as separate attributes of this type. +// Consumers must ignore attributes they don't support and fail if no supported +// attribute is present. The top-level format version will not be incremented +// for changes to the set of dynamic serialization formats. +type DynamicValue struct { + Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DynamicValue) Reset() { *m = DynamicValue{} } +func (m *DynamicValue) String() string { return proto.CompactTextString(m) } +func (*DynamicValue) ProtoMessage() {} +func (*DynamicValue) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{5} +} +func (m *DynamicValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DynamicValue.Unmarshal(m, b) +} +func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic) +} +func (dst *DynamicValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DynamicValue.Merge(dst, src) +} +func (m *DynamicValue) XXX_Size() int { + return xxx_messageInfo_DynamicValue.Size(m) +} +func (m *DynamicValue) XXX_DiscardUnknown() { + xxx_messageInfo_DynamicValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DynamicValue proto.InternalMessageInfo + +func (m *DynamicValue) GetMsgpack() []byte { + if m != nil { + return m.Msgpack + } + return nil +} + +// Hash represents a hash value. +// +// At present hashes always use the SHA256 algorithm. In future other hash +// algorithms may be used, possibly with a transitional period of including +// both as separate attributes of this type. Consumers must ignore attributes +// they don't support and fail if no supported attribute is present. The +// top-level format version will not be incremented for changes to the set of +// hash algorithms. +type Hash struct { + Sha256 []byte `protobuf:"bytes,1,opt,name=sha256,proto3" json:"sha256,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Hash) Reset() { *m = Hash{} } +func (m *Hash) String() string { return proto.CompactTextString(m) } +func (*Hash) ProtoMessage() {} +func (*Hash) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{6} +} +func (m *Hash) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Hash.Unmarshal(m, b) +} +func (m *Hash) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Hash.Marshal(b, m, deterministic) +} +func (dst *Hash) XXX_Merge(src proto.Message) { + xxx_messageInfo_Hash.Merge(dst, src) +} +func (m *Hash) XXX_Size() int { + return xxx_messageInfo_Hash.Size(m) +} +func (m *Hash) XXX_DiscardUnknown() { + xxx_messageInfo_Hash.DiscardUnknown(m) +} + +var xxx_messageInfo_Hash proto.InternalMessageInfo + +func (m *Hash) GetSha256() []byte { + if m != nil { + return m.Sha256 + } + return nil +} + +// Path represents a set of steps to traverse into a data structure. It is +// used to refer to a sub-structure within a dynamic data structure presented +// separately. +type Path struct { + Steps []*Path_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} +func (*Path) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{7} +} +func (m *Path) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Path.Unmarshal(m, b) +} +func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Path.Marshal(b, m, deterministic) +} +func (dst *Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path.Merge(dst, src) +} +func (m *Path) XXX_Size() int { + return xxx_messageInfo_Path.Size(m) +} +func (m *Path) XXX_DiscardUnknown() { + xxx_messageInfo_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Path proto.InternalMessageInfo + +func (m *Path) GetSteps() []*Path_Step { + if m != nil { + return m.Steps + } + return nil +} + +type Path_Step struct { + // Types that are valid to be assigned to Selector: + // *Path_Step_AttributeName + // *Path_Step_ElementKey + Selector isPath_Step_Selector `protobuf_oneof:"selector"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Path_Step) Reset() { *m = Path_Step{} } +func (m *Path_Step) String() string { return proto.CompactTextString(m) } +func (*Path_Step) ProtoMessage() {} +func (*Path_Step) Descriptor() ([]byte, []int) { + return fileDescriptor_planfile_f1de017ed03cb7aa, []int{7, 0} +} +func (m *Path_Step) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Path_Step.Unmarshal(m, b) +} +func (m *Path_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Path_Step.Marshal(b, m, deterministic) +} +func (dst *Path_Step) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path_Step.Merge(dst, src) +} +func (m *Path_Step) XXX_Size() int { + return xxx_messageInfo_Path_Step.Size(m) +} +func (m *Path_Step) XXX_DiscardUnknown() { + xxx_messageInfo_Path_Step.DiscardUnknown(m) +} + +var xxx_messageInfo_Path_Step proto.InternalMessageInfo + +type isPath_Step_Selector interface { + isPath_Step_Selector() +} + +type Path_Step_AttributeName struct { + AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"` +} +type Path_Step_ElementKey struct { + ElementKey *DynamicValue `protobuf:"bytes,2,opt,name=element_key,json=elementKey,proto3,oneof"` +} + +func (*Path_Step_AttributeName) isPath_Step_Selector() {} +func (*Path_Step_ElementKey) isPath_Step_Selector() {} + +func (m *Path_Step) GetSelector() isPath_Step_Selector { + if m != nil { + return m.Selector + } + return nil +} + +func (m *Path_Step) GetAttributeName() string { + if x, ok := m.GetSelector().(*Path_Step_AttributeName); ok { + return x.AttributeName + } + return "" +} + +func (m *Path_Step) GetElementKey() *DynamicValue { + if x, ok := m.GetSelector().(*Path_Step_ElementKey); ok { + return x.ElementKey + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Path_Step) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Path_Step_OneofMarshaler, _Path_Step_OneofUnmarshaler, _Path_Step_OneofSizer, []interface{}{ + (*Path_Step_AttributeName)(nil), + (*Path_Step_ElementKey)(nil), + } +} + +func _Path_Step_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Path_Step) + // selector + switch x := m.Selector.(type) { + case *Path_Step_AttributeName: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.AttributeName) + case *Path_Step_ElementKey: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ElementKey); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Path_Step.Selector has unexpected type %T", x) + } + return nil +} + +func _Path_Step_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Path_Step) + switch tag { + case 1: // selector.attribute_name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Selector = &Path_Step_AttributeName{x} + return true, err + case 2: // selector.element_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DynamicValue) + err := b.DecodeMessage(msg) + m.Selector = &Path_Step_ElementKey{msg} + return true, err + default: + return false, nil + } +} + +func _Path_Step_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Path_Step) + // selector + switch x := m.Selector.(type) { + case *Path_Step_AttributeName: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.AttributeName))) + n += len(x.AttributeName) + case *Path_Step_ElementKey: + s := proto.Size(x.ElementKey) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*Plan)(nil), "tfplan.Plan") + proto.RegisterMapType((map[string]*Hash)(nil), "tfplan.Plan.ProviderHashesEntry") + proto.RegisterMapType((map[string]*DynamicValue)(nil), "tfplan.Plan.VariablesEntry") + proto.RegisterType((*Backend)(nil), "tfplan.Backend") + proto.RegisterType((*Change)(nil), "tfplan.Change") + proto.RegisterType((*ResourceInstanceChange)(nil), "tfplan.ResourceInstanceChange") + proto.RegisterType((*OutputChange)(nil), "tfplan.OutputChange") + proto.RegisterType((*DynamicValue)(nil), "tfplan.DynamicValue") + proto.RegisterType((*Hash)(nil), "tfplan.Hash") + proto.RegisterType((*Path)(nil), "tfplan.Path") + proto.RegisterType((*Path_Step)(nil), "tfplan.Path.Step") + proto.RegisterEnum("tfplan.Action", Action_name, Action_value) + proto.RegisterEnum("tfplan.ResourceInstanceChange_ResourceMode", ResourceInstanceChange_ResourceMode_name, ResourceInstanceChange_ResourceMode_value) +} + +func init() { proto.RegisterFile("planfile.proto", fileDescriptor_planfile_f1de017ed03cb7aa) } + +var fileDescriptor_planfile_f1de017ed03cb7aa = []byte{ + // 893 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x55, 0xe1, 0x6e, 0xe3, 0x44, + 0x10, 0xae, 0x63, 0xc7, 0x49, 0x26, 0xa9, 0x9b, 0x5b, 0x50, 0x65, 0x95, 0xd3, 0x11, 0x2c, 0xc1, + 0x85, 0x3b, 0x94, 0x4a, 0x41, 0x50, 0x0e, 0x7e, 0xa0, 0xf6, 0x1a, 0x29, 0xd5, 0x41, 0x1b, 0x2d, + 0xa5, 0x3f, 0xf8, 0x81, 0xb5, 0xb1, 0xa7, 0x89, 0x55, 0xc7, 0x36, 0xbb, 0x9b, 0xa0, 0x3c, 0x10, + 0x0f, 0xc1, 0x4b, 0xf0, 0x4c, 0x68, 0x77, 0x6d, 0x27, 0x95, 0x7a, 0xfd, 0x95, 0x9d, 0x6f, 0x66, + 0x3e, 0xcf, 0x7e, 0x33, 0xb3, 0x01, 0xaf, 0x48, 0x59, 0x76, 0x9f, 0xa4, 0x38, 0x2a, 0x78, 0x2e, + 0x73, 0xe2, 0xca, 0x7b, 0x85, 0x04, 0xff, 0x39, 0xe0, 0xcc, 0x52, 0x96, 0x11, 0x1f, 0x5a, 0x1b, + 0xe4, 0x22, 0xc9, 0x33, 0xdf, 0x1a, 0x58, 0x43, 0x87, 0x56, 0x26, 0x79, 0x07, 0x9d, 0x0d, 0xe3, + 0x09, 0x9b, 0xa7, 0x28, 0xfc, 0xc6, 0xc0, 0x1e, 0x76, 0xc7, 0x9f, 0x8d, 0x4c, 0xfa, 0x48, 0xa5, + 0x8e, 0xee, 0x2a, 0xef, 0x24, 0x93, 0x7c, 0x4b, 0x77, 0xd1, 0xe4, 0x0a, 0xfa, 0x1c, 0x45, 0xbe, + 0xe6, 0x11, 0x86, 0xd1, 0x92, 0x65, 0x0b, 0x14, 0xbe, 0xad, 0x19, 0x5e, 0x55, 0x0c, 0xb4, 0xf4, + 0x5f, 0x65, 0x42, 0xb2, 0x2c, 0xc2, 0xf7, 0x3a, 0x8c, 0x1e, 0x55, 0x79, 0xc6, 0x16, 0xe4, 0x27, + 0xf0, 0xf2, 0xb5, 0x2c, 0xd6, 0xb2, 0x26, 0x72, 0x34, 0xd1, 0xa7, 0x15, 0xd1, 0x8d, 0xf6, 0x96, + 0xe9, 0x87, 0xf9, 0x9e, 0x25, 0xc8, 0x17, 0xd0, 0x93, 0x8c, 0x2f, 0x50, 0x86, 0x2c, 0x8e, 0xb9, + 0xf0, 0x9b, 0x03, 0x7b, 0xd8, 0xa1, 0x5d, 0x83, 0x9d, 0x2b, 0x88, 0xbc, 0x85, 0x17, 0x12, 0x39, + 0x67, 0xf7, 0x39, 0x5f, 0x85, 0x95, 0x12, 0xde, 0xc0, 0x1a, 0x76, 0x68, 0xbf, 0x76, 0xdc, 0x95, + 0x92, 0x5c, 0xc1, 0x51, 0xc1, 0xf3, 0x4d, 0x12, 0x23, 0x0f, 0x97, 0x4c, 0x2c, 0x51, 0xf8, 0x47, + 0xba, 0x9a, 0xc1, 0x23, 0x61, 0x66, 0x65, 0xcc, 0x54, 0x87, 0x18, 0x75, 0xbc, 0xe2, 0x11, 0x48, + 0xbe, 0x86, 0xd6, 0x9c, 0x45, 0x0f, 0x98, 0xc5, 0xfe, 0xe1, 0xc0, 0x1a, 0x76, 0xc7, 0x47, 0x15, + 0xc5, 0x85, 0x81, 0x69, 0xe5, 0x3f, 0xa1, 0xe0, 0x3d, 0x96, 0x9a, 0xf4, 0xc1, 0x7e, 0xc0, 0xad, + 0x6e, 0x58, 0x87, 0xaa, 0x23, 0x79, 0x03, 0xcd, 0x0d, 0x4b, 0xd7, 0xe8, 0x37, 0x34, 0x59, 0xad, + 0xce, 0xe5, 0x36, 0x63, 0xab, 0x24, 0xba, 0x53, 0x3e, 0x6a, 0x42, 0x7e, 0x6c, 0xfc, 0x60, 0x9d, + 0xdc, 0xc0, 0x27, 0x4f, 0x54, 0xf9, 0x04, 0x71, 0xf0, 0x98, 0xb8, 0x57, 0x11, 0xab, 0xac, 0x3d, + 0xc2, 0x20, 0x81, 0x56, 0x59, 0x38, 0x21, 0xe0, 0xc8, 0x6d, 0x81, 0x25, 0x8b, 0x3e, 0x93, 0x6f, + 0xc0, 0x8d, 0xf2, 0xec, 0x3e, 0x59, 0x3c, 0x5b, 0x60, 0x19, 0x43, 0x5e, 0x42, 0xe7, 0xef, 0x9c, + 0x3f, 0x88, 0x82, 0x45, 0xe8, 0xdb, 0x9a, 0x66, 0x07, 0x04, 0x7f, 0x82, 0x6b, 0x1a, 0x4c, 0xbe, + 0x02, 0x97, 0x45, 0xb2, 0x9a, 0x5d, 0x6f, 0xec, 0x55, 0xac, 0xe7, 0x1a, 0xa5, 0xa5, 0x57, 0x7d, + 0x5d, 0x57, 0x5a, 0xcd, 0xf1, 0x47, 0xbe, 0x6e, 0x62, 0x82, 0x7f, 0x6d, 0x38, 0x7e, 0x7a, 0x3c, + 0xc9, 0xe7, 0xd0, 0x5d, 0xe5, 0xf1, 0x3a, 0xc5, 0xb0, 0x60, 0x72, 0x59, 0xde, 0x10, 0x0c, 0x34, + 0x63, 0x72, 0x49, 0x7e, 0x06, 0x67, 0x95, 0xc7, 0x46, 0x2d, 0x6f, 0xfc, 0xf6, 0xf9, 0x69, 0xaf, + 0xe1, 0x5f, 0xf3, 0x18, 0xa9, 0x4e, 0xac, 0xc5, 0xb3, 0xf7, 0xc4, 0x23, 0xe0, 0x64, 0x6c, 0x85, + 0xbe, 0x63, 0x30, 0x75, 0x26, 0x04, 0x6c, 0x21, 0xb9, 0xdf, 0x54, 0xd0, 0xf4, 0x80, 0x2a, 0x43, + 0x61, 0x49, 0x26, 0x7d, 0x77, 0x60, 0x0d, 0x6d, 0x85, 0x25, 0x99, 0x54, 0x15, 0xc7, 0x58, 0xe4, + 0x02, 0xe3, 0x50, 0x75, 0xb6, 0x65, 0x2a, 0x2e, 0xa1, 0x0f, 0xb8, 0x25, 0x27, 0xd0, 0xae, 0x46, + 0xd3, 0x6f, 0x6b, 0x6f, 0x6d, 0x2b, 0x7d, 0xcd, 0xd6, 0xf9, 0x1d, 0xdd, 0xb5, 0x5a, 0xdf, 0x72, + 0xdd, 0x4a, 0xaf, 0x7a, 0x44, 0x0a, 0x9e, 0x6c, 0x98, 0x44, 0x1f, 0x06, 0xd6, 0xb0, 0x47, 0x2b, + 0x93, 0x9c, 0xa9, 0x97, 0xe0, 0xaf, 0x75, 0xc2, 0x31, 0x0e, 0x39, 0x16, 0xa9, 0x6a, 0x68, 0x57, + 0xf7, 0xa0, 0x9e, 0x24, 0xa5, 0x9b, 0xda, 0x7b, 0x13, 0x45, 0x4d, 0x50, 0xf0, 0x25, 0xf4, 0xf6, + 0xd5, 0x21, 0x5d, 0x68, 0xad, 0x58, 0xc6, 0x16, 0x18, 0xf7, 0x0f, 0x48, 0x1b, 0x9c, 0x98, 0x49, + 0xd6, 0xb7, 0x2e, 0x3c, 0xe8, 0x25, 0xa5, 0xa6, 0xea, 0x7e, 0xc1, 0x12, 0x7a, 0xfb, 0x0f, 0x42, + 0x2d, 0x9d, 0xb5, 0x27, 0xdd, 0xee, 0x56, 0x8d, 0x67, 0x6f, 0xf5, 0x12, 0x3a, 0x02, 0x33, 0x91, + 0xc8, 0x64, 0x63, 0xfa, 0xd1, 0xa6, 0x3b, 0x20, 0x18, 0x42, 0x6f, 0x7f, 0x7a, 0x94, 0x06, 0x2b, + 0xb1, 0x28, 0x58, 0xf4, 0xa0, 0x3f, 0xd6, 0xa3, 0x95, 0x19, 0xbc, 0x02, 0x47, 0x6d, 0x0b, 0x39, + 0x06, 0x57, 0x2c, 0xd9, 0xf8, 0xbb, 0xef, 0xcb, 0x80, 0xd2, 0x0a, 0xfe, 0xb1, 0xc0, 0xd1, 0xc3, + 0xf3, 0x1a, 0x9a, 0x42, 0x62, 0x21, 0x7c, 0x4b, 0x2b, 0xf4, 0x62, 0x5f, 0xa1, 0xd1, 0x6f, 0x12, + 0x0b, 0x6a, 0xfc, 0x27, 0x12, 0x1c, 0x65, 0x92, 0xd7, 0xe0, 0x31, 0x29, 0x79, 0x32, 0x5f, 0x4b, + 0x0c, 0x77, 0xf7, 0x9c, 0x1e, 0xd0, 0xc3, 0x1a, 0xbf, 0x56, 0x57, 0x3e, 0x83, 0x2e, 0xa6, 0xb8, + 0xc2, 0x4c, 0xea, 0x29, 0x78, 0x66, 0x07, 0xa7, 0x07, 0x14, 0xca, 0xd0, 0x0f, 0xb8, 0xbd, 0x00, + 0x68, 0x0b, 0x4c, 0x31, 0x92, 0x39, 0x7f, 0x53, 0x80, 0x6b, 0xf6, 0x4a, 0xe9, 0x7f, 0x7d, 0x73, + 0x33, 0xeb, 0x1f, 0x10, 0x00, 0xf7, 0x3d, 0x9d, 0x9c, 0xdf, 0x4e, 0xfa, 0x96, 0x42, 0xe9, 0xe4, + 0xfc, 0xb2, 0xdf, 0x50, 0xe8, 0xef, 0xb3, 0x4b, 0x85, 0xda, 0xea, 0x7c, 0x39, 0xf9, 0x65, 0x72, + 0x3b, 0xe9, 0x37, 0xc9, 0x31, 0x10, 0x73, 0x0e, 0x6f, 0xa7, 0x93, 0xeb, 0xb0, 0xcc, 0x74, 0x15, + 0x6e, 0xce, 0x06, 0x2f, 0xe3, 0x5b, 0x17, 0xef, 0xfe, 0x38, 0x5b, 0x24, 0x72, 0xb9, 0x9e, 0x8f, + 0xa2, 0x7c, 0x75, 0xaa, 0x5e, 0xdc, 0x24, 0xca, 0x79, 0x71, 0x5a, 0x3f, 0xcc, 0xa7, 0xaa, 0x7e, + 0x71, 0x9a, 0x64, 0x12, 0x79, 0xc6, 0x52, 0x6d, 0xea, 0x3f, 0xba, 0xb9, 0xab, 0x7f, 0xbe, 0xfd, + 0x3f, 0x00, 0x00, 0xff, 0xff, 0x30, 0x3e, 0x4e, 0x33, 0x01, 0x07, 0x00, 0x00, +} diff --git a/vendor/github.com/hashicorp/terraform/plans/internal/planproto/planfile.proto b/vendor/github.com/hashicorp/terraform/plans/internal/planproto/planfile.proto new file mode 100644 index 00000000..f45c3005 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/internal/planproto/planfile.proto @@ -0,0 +1,205 @@ +syntax = "proto3"; +package tfplan; + +// For Terraform's own parsing, the proto stub types go into an internal Go +// package. The public API is in github.com/hashicorp/terraform/plans/planfile . +option go_package = "github.com/hashicorp/terraform/plans/internal/planproto"; + +// Plan is the root message type for the tfplan file +message Plan { + // Version is incremented whenever there is a breaking change to + // the serialization format. Programs reading serialized plans should + // verify that version is set to the expected value and abort processing + // if not. A breaking change is any change that may cause an older + // consumer to interpret the structure incorrectly. This number will + // not be incremented if an existing consumer can either safely ignore + // changes to the format or if an existing consumer would fail to process + // the file for another message- or field-specific reason. + uint64 version = 1; + + // The variables that were set when creating the plan. Each value is + // a msgpack serialization of an HCL value. + map variables = 2; + + // An unordered set of proposed changes to resources throughout the + // configuration, including any nested modules. Use the address of + // each resource to determine which module it belongs to. + repeated ResourceInstanceChange resource_changes = 3; + + // An unordered set of proposed changes to outputs in the root module + // of the configuration. This set also includes "no action" changes for + // outputs that are not changing, as context for detecting inconsistencies + // at apply time. + repeated OutputChange output_changes = 4; + + // An unordered set of target addresses to include when applying. If no + // target addresses are present, the plan applies to the whole + // configuration. + repeated string target_addrs = 5; + + // The version string for the Terraform binary that created this plan. + string terraform_version = 14; + + // SHA256 digests of all of the provider plugin binaries that were used + // in the creation of this plan. + map provider_hashes = 15; + + // Backend is a description of the backend configuration and other related + // settings at the time the plan was created. + Backend backend = 13; +} + +// Backend is a description of backend configuration and other related settings. +message Backend { + string type = 1; + DynamicValue config = 2; + string workspace = 3; +} + +// Action describes the type of action planned for an object. +// Not all action values are valid for all object types. +enum Action { + NOOP = 0; + CREATE = 1; + READ = 2; + UPDATE = 3; + DELETE = 5; + DELETE_THEN_CREATE = 6; + CREATE_THEN_DELETE = 7; +} + +// Change represents a change made to some object, transforming it from an old +// state to a new state. +message Change { + // Not all action values are valid for all object types. Consult + // the documentation for any message that embeds Change. + Action action = 1; + + // msgpack-encoded HCL values involved in the change. + // - For update and replace, two values are provided that give the old and new values, + // respectively. + // - For create, one value is provided that gives the new value to be created + // - For delete, one value is provided that describes the value being deleted + // - For read, two values are provided that give the prior value for this object + // (or null, if no prior value exists) and the value that was or will be read, + // respectively. + // - For no-op, one value is provided that is left unmodified by this non-change. + repeated DynamicValue values = 2; +} + +message ResourceInstanceChange { + // module_path is an address to the module that defined this resource. + // module_path is omitted for resources in the root module. For descendent modules + // it is a string like module.foo.module.bar as would be seen at the beginning of a + // resource address. The format of this string is not yet frozen and so external + // callers should treat it as an opaque key for filtering purposes. + string module_path = 1; + + // mode is the resource mode. + ResourceMode mode = 2; + enum ResourceMode { + managed = 0; // for "resource" blocks in configuration + data = 1; // for "data" blocks in configuration + } + + // type is the resource type name, like "aws_instance". + string type = 3; + + // name is the logical name of the resource as defined in configuration. + // For example, in aws_instance.foo this would be "foo". + string name = 4; + + // instance_key is either an integer index or a string key, depending on which iteration + // attributes ("count" or "for_each") are being used for this resource. If none + // are in use, this field is omitted. + oneof instance_key { + string str = 5; + int64 int = 6; + }; + + // deposed_key, if set, indicates that this change applies to a deposed + // object for the indicated instance with the given deposed key. If not + // set, the change applies to the instance's current object. + string deposed_key = 7; + + // provider is the address of the provider configuration that this change + // was planned with, and thus the configuration that must be used to + // apply it. + string provider = 8; + + // Description of the proposed change. May use "create", "read", "update", + // "replace" and "delete" actions. "no-op" changes are not currently used here + // but consumers must accept and discard them to allow for future expansion. + Change change = 9; + + // raw blob value provided by the provider as additional context for the + // change. Must be considered an opaque value for any consumer other than + // the provider that generated it, and will be returned verbatim to the + // provider during the subsequent apply operation. + bytes private = 10; + + // An unordered set of paths that prompted the change action to be + // "replace" rather than "update". Empty for any action other than + // "replace". + repeated Path required_replace = 11; +} + +message OutputChange { + // Name of the output as defined in the root module. + string name = 1; + + // Description of the proposed change. May use "no-op", "create", + // "update" and "delete" actions. + Change change = 2; + + // Sensitive, if true, indicates that one or more of the values given + // in "change" is sensitive and should not be shown directly in any + // rendered plan. + bool sensitive = 3; +} + +// DynamicValue represents a value whose type is not decided until runtime, +// often based on schema information obtained from a plugin. +// +// At present dynamic values are always encoded as msgpack, with extension +// id 0 used to represent the special "unknown" value indicating results +// that won't be known until after apply. +// +// In future other serialization formats may be used, possibly with a +// transitional period of including both as separate attributes of this type. +// Consumers must ignore attributes they don't support and fail if no supported +// attribute is present. The top-level format version will not be incremented +// for changes to the set of dynamic serialization formats. +message DynamicValue { + bytes msgpack = 1; +} + +// Hash represents a hash value. +// +// At present hashes always use the SHA256 algorithm. In future other hash +// algorithms may be used, possibly with a transitional period of including +// both as separate attributes of this type. Consumers must ignore attributes +// they don't support and fail if no supported attribute is present. The +// top-level format version will not be incremented for changes to the set of +// hash algorithms. +message Hash { + bytes sha256 = 1; +} + +// Path represents a set of steps to traverse into a data structure. It is +// used to refer to a sub-structure within a dynamic data structure presented +// separately. +message Path { + message Step { + oneof selector { + // Set "attribute_name" to represent looking up an attribute + // in the current object value. + string attribute_name = 1; + + // Set "element_key" to represent looking up an element in + // an indexable collection type. + DynamicValue element_key = 2; + } + } + repeated Step steps = 1; +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go new file mode 100644 index 00000000..18a7e99a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go @@ -0,0 +1,18 @@ +package objchange + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// AllAttributesNull constructs a non-null cty.Value of the object type implied +// by the given schema that has all of its leaf attributes set to null and all +// of its nested block collections set to zero-length. +// +// This simulates what would result from decoding an empty configuration block +// with the given schema, except that it does not produce errors +func AllAttributesNull(schema *configschema.Block) cty.Value { + // "All attributes null" happens to be the definition of EmptyValue for + // a Block, so we can just delegate to that. + return schema.EmptyValue() +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go new file mode 100644 index 00000000..8a74c07f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go @@ -0,0 +1,437 @@ +package objchange + +import ( + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// AssertObjectCompatible checks whether the given "actual" value is a valid +// completion of the possibly-partially-unknown "planned" value. +// +// This means that any known leaf value in "planned" must be equal to the +// corresponding value in "actual", and various other similar constraints. +// +// Any inconsistencies are reported by returning a non-zero number of errors. +// These errors are usually (but not necessarily) cty.PathError values +// referring to a particular nested value within the "actual" value. +// +// The two values must have types that conform to the given schema's implied +// type, or this function will panic. +func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error { + return assertObjectCompatible(schema, planned, actual, nil) +} + +func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error { + var errs []error + if planned.IsNull() && !actual.IsNull() { + errs = append(errs, path.NewErrorf("was absent, but now present")) + return errs + } + if actual.IsNull() && !planned.IsNull() { + errs = append(errs, path.NewErrorf("was present, but now absent")) + return errs + } + if planned.IsNull() { + // No further checks possible if both values are null + return errs + } + + for name, attrS := range schema.Attributes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + path := append(path, cty.GetAttrStep{Name: name}) + moreErrs := assertValueCompatible(plannedV, actualV, path) + if attrS.Sensitive { + if len(moreErrs) > 0 { + // Use a vague placeholder message instead, to avoid disclosing + // sensitive information. + errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute")) + } + } else { + errs = append(errs, moreErrs...) + } + } + for name, blockS := range schema.BlockTypes { + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + + // As a special case, if there were any blocks whose leaf attributes + // are all unknown then we assume (possibly incorrectly) that the + // HCL dynamic block extension is in use with an unknown for_each + // argument, and so we will do looser validation here that allows + // for those blocks to have expanded into a different number of blocks + // if the for_each value is now known. + maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false) + + path := append(path, cty.GetAttrStep{Name: name}) + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + // If an unknown block placeholder was present then the placeholder + // may have expanded out into zero blocks, which is okay. + if maybeUnknownBlocks && actualV.IsNull() { + continue + } + moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + if maybeUnknownBlocks { + // When unknown blocks are present the final blocks may be + // at different indices than the planned blocks, so unfortunately + // we can't do our usual checks in this case without generating + // false negatives. + continue + } + + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL { + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so both values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + actualAtys := actualV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := actualAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q has vanished", k)) + continue + } + + plannedEV := plannedV.GetAttr(k) + actualEV := actualV.GetAttr(k) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k})) + errs = append(errs, moreErrs...) + } + if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan + for k := range actualAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("new block key %q has appeared", k)) + continue + } + } + } + } else { + if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan + errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + if !actualV.HasIndex(idx).True() { + continue + } + actualEV := actualV.Index(idx) + moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx})) + errs = append(errs, moreErrs...) + } + } + case configschema.NestingSet: + if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() { + continue + } + + setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool { + errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + plannedL := plannedV.LengthInt() + actualL := actualV.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL)) + } + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + return errs +} + +func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error { + // NOTE: We don't normally use the GoString rendering of cty.Value in + // user-facing error messages as a rule, but we make an exception + // for this function because we expect the user to pass this message on + // verbatim to the provider development team and so more detail is better. + + var errs []error + if planned.Type() == cty.DynamicPseudoType { + // Anything goes, then + return errs + } + if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 { + errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type()))) + // If the types don't match then we can't do any other comparisons, + // so we bail early. + return errs + } + + if !planned.IsKnown() { + // We didn't know what were going to end up with during plan, so + // anything goes during apply. + return errs + } + + if actual.IsNull() { + if planned.IsNull() { + return nil + } + errs = append(errs, path.NewErrorf("was %#v, but now null", planned)) + return errs + } + if planned.IsNull() { + errs = append(errs, path.NewErrorf("was null, but now %#v", actual)) + return errs + } + + ty := planned.Type() + switch { + + case !actual.IsKnown(): + errs = append(errs, path.NewErrorf("was known, but now unknown")) + + case ty.IsPrimitiveType(): + if !actual.Equals(planned).True() { + errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual)) + } + + case ty.IsListType() || ty.IsMapType() || ty.IsTupleType(): + for it := planned.ElementIterator(); it.Next(); { + k, plannedV := it.Element() + if !actual.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k))) + continue + } + + actualV := actual.Index(k) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k})) + errs = append(errs, moreErrs...) + } + + for it := actual.ElementIterator(); it.Next(); { + k, _ := it.Element() + if !planned.HasIndex(k).True() { + errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k))) + } + } + + case ty.IsObjectType(): + atys := ty.AttributeTypes() + for name := range atys { + // Because we already tested that the two values have the same type, + // we can assume that the same attributes are present in both and + // focus just on testing their values. + plannedV := planned.GetAttr(name) + actualV := actual.GetAttr(name) + moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name})) + errs = append(errs, moreErrs...) + } + + case ty.IsSetType(): + // We can't really do anything useful for sets here because changing + // an unknown element to known changes the identity of the element, and + // so we can't correlate them properly. However, we will at least check + // to ensure that the number of elements is consistent, along with + // the general type-match checks we ran earlier in this function. + if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() { + + setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool { + errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV})) + return len(errs) == 0 + }) + errs = append(errs, setErrs...) + + // There can be fewer elements in a set after its elements are all + // known (values that turn out to be equal will coalesce) but the + // number of elements must never get larger. + + plannedL := planned.LengthInt() + actualL := actual.LengthInt() + if plannedL < actualL { + errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL)) + } + } + } + + return errs +} + +func indexStrForErrors(v cty.Value) string { + switch v.Type() { + case cty.Number: + return v.AsBigFloat().Text('f', -1) + case cty.String: + return strconv.Quote(v.AsString()) + default: + // Should be impossible, since no other index types are allowed! + return fmt.Sprintf("%#v", v) + } +} + +// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the +// HCL dynamic block extension behaves when it's asked to expand a block whose +// for_each argument is unknown. In such cases, it generates a single placeholder +// block with all leaf attribute values unknown, and once the for_each +// expression becomes known the placeholder may be replaced with any number +// of blocks, so object compatibility checks would need to be more liberal. +// +// Set "nested" if testing a block that is nested inside a candidate block +// placeholder; this changes the interpretation of there being no blocks of +// a type to allow for there being zero nested blocks. +func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if nested && v.IsNull() { + return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder + } + return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block) + default: + // These situations should be impossible for correct providers, but + // we permit the legacy SDK to produce some incorrect outcomes + // for compatibility with its existing logic, and so we must be + // tolerant here. + if !v.IsKnown() { + return true + } + if v.IsNull() { + return false // treated as if the list were empty, so we would see zero iterations below + } + + // For all other nesting modes, our value should be something iterable. + for it := v.ElementIterator(); it.Next(); { + _, ev := it.Element() + if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) { + return true + } + } + + // Our default changes depending on whether we're testing the candidate + // block itself or something nested inside of it: zero blocks of a type + // can never contain a dynamic block placeholder, but a dynamic block + // placeholder might contain zero blocks of one of its own nested block + // types, if none were set in the config at all. + return nested + } +} + +func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool { + if v.IsNull() { + return false // null value can never be a placeholder element + } + if !v.IsKnown() { + return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs + } + for name := range schema.Attributes { + av := v.GetAttr(name) + + // Unknown block placeholders contain only unknown or null attribute + // values, depending on whether or not a particular attribute was set + // explicitly inside the content block. Note that this is imprecise: + // non-placeholders can also match this, so this function can generate + // false positives. + if av.IsKnown() && !av.IsNull() { + return false + } + } + for name, blockS := range schema.BlockTypes { + if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) { + return false + } + } + return true +} + +// assertSetValuesCompatible checks that each of the elements in a can +// be correlated with at least one equivalent element in b and vice-versa, +// using the given correlation function. +// +// This allows the number of elements in the sets to change as long as all +// elements in both sets can be correlated, making this function safe to use +// with sets that may contain unknown values as long as the unknown case is +// addressed in some reasonable way in the callback function. +// +// The callback always recieves values from set a as its first argument and +// values from set b in its second argument, so it is safe to use with +// non-commutative functions. +// +// As with assertValueCompatible, we assume that the target audience of error +// messages here is a provider developer (via a bug report from a user) and so +// we intentionally violate our usual rule of keeping cty implementation +// details out of error messages. +func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error { + a := planned + b := actual + + // Our methodology here is a little tricky, to deal with the fact that + // it's impossible to directly correlate two non-equal set elements because + // they don't have identities separate from their values. + // The approach is to count the number of equivalent elements each element + // of a has in b and vice-versa, and then return true only if each element + // in both sets has at least one equivalent. + as := a.AsValueSlice() + bs := b.AsValueSlice() + aeqs := make([]bool, len(as)) + beqs := make([]bool, len(bs)) + for ai, av := range as { + for bi, bv := range bs { + if f(av, bv) { + aeqs[ai] = true + beqs[bi] = true + } + } + } + + var errs []error + for i, eq := range aeqs { + if !eq { + errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i])) + } + } + if len(errs) > 0 { + // Exit early since otherwise we're likely to generate duplicate + // error messages from the other perspective in the subsequent loop. + return errs + } + for i, eq := range beqs { + if !eq { + errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i])) + } + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go new file mode 100644 index 00000000..2c18a010 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go @@ -0,0 +1,4 @@ +// Package objchange deals with the business logic of taking a prior state +// value and a config value and producing a proposed new merged value, along +// with other related rules in this domain. +package objchange diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go new file mode 100644 index 00000000..cbfefddd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go @@ -0,0 +1,104 @@ +package objchange + +import ( + "github.com/zclconf/go-cty/cty" +) + +// LongestCommonSubsequence finds a sequence of values that are common to both +// x and y, with the same relative ordering as in both collections. This result +// is useful as a first step towards computing a diff showing added/removed +// elements in a sequence. +// +// The approached used here is a "naive" one, assuming that both xs and ys will +// generally be small in most reasonable Terraform configurations. For larger +// lists the time/space usage may be sub-optimal. +// +// A pair of lists may have multiple longest common subsequences. In that +// case, the one selected by this function is undefined. +func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value { + if len(xs) == 0 || len(ys) == 0 { + return make([]cty.Value, 0) + } + + c := make([]int, len(xs)*len(ys)) + eqs := make([]bool, len(xs)*len(ys)) + w := len(xs) + + for y := 0; y < len(ys); y++ { + for x := 0; x < len(xs); x++ { + eqV := xs[x].Equals(ys[y]) + eq := false + if eqV.IsKnown() && eqV.True() { + eq = true + eqs[(w*y)+x] = true // equality tests can be expensive, so cache it + } + if eq { + // Sequence gets one longer than for the cell at top left, + // since we'd append a new item to the sequence here. + if x == 0 || y == 0 { + c[(w*y)+x] = 1 + } else { + c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1 + } + } else { + // We follow the longest of the sequence above and the sequence + // to the left of us in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + c[(w*y)+x] = l + } else { + c[(w*y)+x] = u + } + } + } + } + + // The bottom right cell tells us how long our longest sequence will be + seq := make([]cty.Value, c[len(c)-1]) + + // Now we will walk back from the bottom right cell, finding again all + // of the equal pairs to construct our sequence. + x := len(xs) - 1 + y := len(ys) - 1 + i := len(seq) - 1 + + for x > -1 && y > -1 { + if eqs[(w*y)+x] { + // Add the value to our result list and then walk diagonally + // up and to the left. + seq[i] = xs[x] + x-- + y-- + i-- + } else { + // Take the path with the greatest sequence length in the matrix. + l := 0 + u := 0 + if x > 0 { + l = c[(w*y)+(x-1)] + } + if y > 0 { + u = c[(w*(y-1))+x] + } + if l > u { + x-- + } else { + y-- + } + } + } + + if i > -1 { + // should never happen if the matrix was constructed properly + panic("not enough elements in sequence") + } + + return seq +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go new file mode 100644 index 00000000..c23f44da --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go @@ -0,0 +1,132 @@ +package objchange + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// NormalizeObjectFromLegacySDK takes an object that may have been generated +// by the legacy Terraform SDK (i.e. returned from a provider with the +// LegacyTypeSystem opt-out set) and does its best to normalize it for the +// assumptions we would normally enforce if the provider had not opted out. +// +// In particular, this function guarantees that a value representing a nested +// block will never itself be unknown or null, instead representing that as +// a non-null value that may contain null/unknown values. +// +// The input value must still conform to the implied type of the given schema, +// or else this function may produce garbage results or panic. This is usually +// okay because type consistency is enforced when deserializing the value +// returned from the provider over the RPC wire protocol anyway. +func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value { + if val == cty.NilVal || val.IsNull() { + // This should never happen in reasonable use, but we'll allow it + // and normalize to a null of the expected type rather than panicking + // below. + return cty.NullVal(schema.ImpliedType()) + } + + vals := make(map[string]cty.Value) + for name := range schema.Attributes { + // No normalization for attributes, since them being type-conformant + // is all that we require. + vals[name] = val.GetAttr(name) + } + for name, blockS := range schema.BlockTypes { + lv := val.GetAttr(name) + + // Legacy SDK never generates dynamically-typed attributes and so our + // normalization code doesn't deal with them, but we need to make sure + // we still pass them through properly so that we don't interfere with + // objects generated by other SDKs. + if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() { + vals[name] = lv + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + if lv.IsKnown() { + if lv.IsNull() && blockS.Nesting == configschema.NestingGroup { + vals[name] = blockS.EmptyValue() + } else { + vals[name] = NormalizeObjectFromLegacySDK(lv, &blockS.Block) + } + } else { + vals[name] = unknownBlockStub(&blockS.Block) + } + case configschema.NestingList: + switch { + case !lv.IsKnown(): + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.ListVal(subVals) + } + case configschema.NestingSet: + switch { + case !lv.IsKnown(): + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case lv.IsNull() || lv.LengthInt() == 0: + vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType()) + default: + subVals := make([]cty.Value, 0, lv.LengthInt()) + for it := lv.ElementIterator(); it.Next(); { + _, subVal := it.Element() + subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block)) + } + vals[name] = cty.SetVal(subVals) + } + default: + // The legacy SDK doesn't support NestingMap, so we just assume + // maps are always okay. (If not, we would've detected and returned + // an error to the user before we got here.) + vals[name] = lv + } + } + return cty.ObjectVal(vals) +} + +// unknownBlockStub constructs an object value that approximates an unknown +// block by producing a known block object with all of its leaf attribute +// values set to unknown. +// +// Blocks themselves cannot be unknown, so if the legacy SDK tries to return +// such a thing, we'll use this result instead. This convention mimics how +// the dynamic block feature deals with being asked to iterate over an unknown +// value, because our value-checking functions already accept this convention +// as a special case. +func unknownBlockStub(schema *configschema.Block) cty.Value { + vals := make(map[string]cty.Value) + for name, attrS := range schema.Attributes { + vals[name] = cty.UnknownVal(attrS.Type) + } + for name, blockS := range schema.BlockTypes { + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + vals[name] = unknownBlockStub(&blockS.Block) + case configschema.NestingList: + // In principle we may be expected to produce a tuple value here, + // if there are any dynamically-typed attributes in our nested block, + // but the legacy SDK doesn't support that, so we just assume it'll + // never be necessary to normalize those. (Incorrect usage in any + // other SDK would be caught and returned as an error before we + // get here.) + vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingSet: + vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)}) + case configschema.NestingMap: + // A nesting map can never be unknown since we then wouldn't know + // what the keys are. (Legacy SDK doesn't support NestingMap anyway, + // so this should never arise.) + vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType()) + } + } + return cty.ObjectVal(vals) +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go new file mode 100644 index 00000000..5a8af148 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go @@ -0,0 +1,390 @@ +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// ProposedNewObject constructs a proposed new object value by combining the +// computed attribute values from "prior" with the configured attribute values +// from "config". +// +// Both value must conform to the given schema's implied type, or this function +// will panic. +// +// The prior value must be wholly known, but the config value may be unknown +// or have nested unknown values. +// +// The merging of the two objects includes the attributes of any nested blocks, +// which will be correlated in a manner appropriate for their nesting mode. +// Note in particular that the correlation for blocks backed by sets is a +// heuristic based on matching non-computed attribute values and so it may +// produce strange results with more "extreme" cases, such as a nested set +// block where _all_ attributes are computed. +func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { + // If the config and prior are both null, return early here before + // populating the prior block. The prevents non-null blocks from appearing + // the proposed state value. + if config.IsNull() && prior.IsNull() { + return prior + } + + if prior.IsNull() { + // In this case, we will construct a synthetic prior value that is + // similar to the result of decoding an empty configuration block, + // which simplifies our handling of the top-level attributes/blocks + // below by giving us one non-null level of object to pull values from. + prior = AllAttributesNull(schema) + } + return proposedNewObject(schema, prior, config) +} + +// PlannedDataResourceObject is similar to ProposedNewObject but tailored for +// planning data resources in particular. Specifically, it replaces the values +// of any Computed attributes not set in the configuration with an unknown +// value, which serves as a placeholder for a value to be filled in by the +// provider when the data resource is finally read. +// +// Data resources are different because the planning of them is handled +// entirely within Terraform Core and not subject to customization by the +// provider. This function is, in effect, producing an equivalent result to +// passing the ProposedNewObject result into a provider's PlanResourceChange +// function, assuming a fixed implementation of PlanResourceChange that just +// fills in unknown values as needed. +func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value { + // Our trick here is to run the ProposedNewObject logic with an + // entirely-unknown prior value. Because of cty's unknown short-circuit + // behavior, any operation on prior returns another unknown, and so + // unknown values propagate into all of the parts of the resulting value + // that would normally be filled in by preserving the prior state. + prior := cty.UnknownVal(schema.ImpliedType()) + return proposedNewObject(schema, prior, config) +} + +func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value { + if config.IsNull() || !config.IsKnown() { + // This is a weird situation, but we'll allow it anyway to free + // callers from needing to specifically check for these cases. + return prior + } + if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) { + panic("ProposedNewObject only supports object-typed values") + } + + // From this point onwards, we can assume that both values are non-null + // object types, and that the config value itself is known (though it + // may contain nested values that are unknown.) + + newAttrs := map[string]cty.Value{} + for name, attr := range schema.Attributes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + var newV cty.Value + switch { + case attr.Computed && attr.Optional: + // This is the trickiest scenario: we want to keep the prior value + // if the config isn't overriding it. Note that due to some + // ambiguity here, setting an optional+computed attribute from + // config and then later switching the config to null in a + // subsequent change causes the initial config value to be "sticky" + // unless the provider specifically overrides it during its own + // plan customization step. + if configV.IsNull() { + newV = priorV + } else { + newV = configV + } + case attr.Computed: + // configV will always be null in this case, by definition. + // priorV may also be null, but that's okay. + newV = priorV + default: + // For non-computed attributes, we always take the config value, + // even if it is null. If it's _required_ then null values + // should've been caught during an earlier validation step, and + // so we don't really care about that here. + newV = configV + } + newAttrs[name] = newV + } + + // Merging nested blocks is a little more complex, since we need to + // correlate blocks between both objects and then recursively propose + // a new object for each. The correlation logic depends on the nesting + // mode for each block type. + for name, blockType := range schema.BlockTypes { + priorV := prior.GetAttr(name) + configV := config.GetAttr(name) + var newV cty.Value + switch blockType.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + newV = ProposedNewObject(&blockType.Block, priorV, configV) + + case configschema.NestingList: + // Nested blocks are correlated by index. + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make([]cty.Value, 0, configVLen) + for it := configV.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals = append(newVals, configEV) + continue + } + priorEV := priorV.Index(idx) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals = append(newVals, newEV) + } + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if configV.Type().IsTupleType() { + newV = cty.TupleVal(newVals) + } else { + newV = cty.ListVal(newVals) + } + } else { + // Despite the name, a NestingList might also be a tuple, if + // its nested schema contains dynamically-typed attributes. + if configV.Type().IsTupleType() { + newV = cty.EmptyTupleVal + } else { + newV = cty.ListValEmpty(blockType.ImpliedType()) + } + } + + case configschema.NestingMap: + // Despite the name, a NestingMap may produce either a map or + // object value, depending on whether the nested schema contains + // dynamically-typed attributes. + if configV.Type().IsObjectType() { + // Nested blocks are correlated by key. + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make(map[string]cty.Value, configVLen) + atys := configV.Type().AttributeTypes() + for name := range atys { + configEV := configV.GetAttr(name) + if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[name] = configEV + continue + } + priorEV := priorV.GetAttr(name) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals[name] = newEV + } + // Although we call the nesting mode "map", we actually use + // object values so that elements might have different types + // in case of dynamically-typed attributes. + newV = cty.ObjectVal(newVals) + } else { + newV = cty.EmptyObjectVal + } + } else { + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + newVals := make(map[string]cty.Value, configVLen) + for it := configV.ElementIterator(); it.Next(); { + idx, configEV := it.Element() + k := idx.AsString() + if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) { + // If there is no corresponding prior element then + // we just take the config value as-is. + newVals[k] = configEV + continue + } + priorEV := priorV.Index(idx) + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals[k] = newEV + } + newV = cty.MapVal(newVals) + } else { + newV = cty.MapValEmpty(blockType.ImpliedType()) + } + } + + case configschema.NestingSet: + if !configV.Type().IsSetType() { + panic("configschema.NestingSet value is not a set as expected") + } + + // Nested blocks are correlated by comparing the element values + // after eliminating all of the computed attributes. In practice, + // this means that any config change produces an entirely new + // nested object, and we only propagate prior computed values + // if the non-computed attribute values are identical. + var cmpVals [][2]cty.Value + if priorV.IsKnown() && !priorV.IsNull() { + cmpVals = setElementCompareValues(&blockType.Block, priorV, false) + } + configVLen := 0 + if configV.IsKnown() && !configV.IsNull() { + configVLen = configV.LengthInt() + } + if configVLen > 0 { + used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value + newVals := make([]cty.Value, 0, configVLen) + for it := configV.ElementIterator(); it.Next(); { + _, configEV := it.Element() + var priorEV cty.Value + for i, cmp := range cmpVals { + if used[i] { + continue + } + if cmp[1].RawEquals(configEV) { + priorEV = cmp[0] + used[i] = true // we can't use this value on a future iteration + break + } + } + if priorEV == cty.NilVal { + priorEV = cty.NullVal(blockType.ImpliedType()) + } + + newEV := ProposedNewObject(&blockType.Block, priorEV, configEV) + newVals = append(newVals, newEV) + } + newV = cty.SetVal(newVals) + } else { + newV = cty.SetValEmpty(blockType.Block.ImpliedType()) + } + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) + } + + newAttrs[name] = newV + } + + return cty.ObjectVal(newAttrs) +} + +// setElementCompareValues takes a known, non-null value of a cty.Set type and +// returns a table -- constructed of two-element arrays -- that maps original +// set element values to corresponding values that have all of the computed +// values removed, making them suitable for comparison with values obtained +// from configuration. The element type of the set must conform to the implied +// type of the given schema, or this function will panic. +// +// In the resulting slice, the zeroth element of each array is the original +// value and the one-indexed element is the corresponding "compare value". +// +// This is intended to help correlate prior elements with configured elements +// in ProposedNewObject. The result is a heuristic rather than an exact science, +// since e.g. two separate elements may reduce to the same value through this +// process. The caller must therefore be ready to deal with duplicates. +func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value { + ret := make([][2]cty.Value, 0, set.LengthInt()) + for it := set.ElementIterator(); it.Next(); { + _, ev := it.Element() + ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)}) + } + return ret +} + +// setElementCompareValue creates a new value that has all of the same +// non-computed attribute values as the one given but has all computed +// attribute values forced to null. +// +// If isConfig is true then non-null Optional+Computed attribute values will +// be preserved. Otherwise, they will also be set to null. +// +// The input value must conform to the schema's implied type, and the return +// value is guaranteed to conform to it. +func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value { + if v.IsNull() || !v.IsKnown() { + return v + } + + attrs := map[string]cty.Value{} + for name, attr := range schema.Attributes { + switch { + case attr.Computed && attr.Optional: + if isConfig { + attrs[name] = v.GetAttr(name) + } else { + attrs[name] = cty.NullVal(attr.Type) + } + case attr.Computed: + attrs[name] = cty.NullVal(attr.Type) + default: + attrs[name] = v.GetAttr(name) + } + } + + for name, blockType := range schema.BlockTypes { + switch blockType.Nesting { + + case configschema.NestingSingle, configschema.NestingGroup: + attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig) + + case configschema.NestingList, configschema.NestingSet: + cv := v.GetAttr(name) + if cv.IsNull() || !cv.IsKnown() { + attrs[name] = cv + continue + } + if l := cv.LengthInt(); l > 0 { + elems := make([]cty.Value, 0, l) + for it := cv.ElementIterator(); it.Next(); { + _, ev := it.Element() + elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig)) + } + if blockType.Nesting == configschema.NestingSet { + // SetValEmpty would panic if given elements that are not + // all of the same type, but that's guaranteed not to + // happen here because our input value was _already_ a + // set and we've not changed the types of any elements here. + attrs[name] = cty.SetVal(elems) + } else { + attrs[name] = cty.TupleVal(elems) + } + } else { + if blockType.Nesting == configschema.NestingSet { + attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType()) + } else { + attrs[name] = cty.EmptyTupleVal + } + } + + case configschema.NestingMap: + cv := v.GetAttr(name) + if cv.IsNull() || !cv.IsKnown() { + attrs[name] = cv + continue + } + elems := make(map[string]cty.Value) + for it := cv.ElementIterator(); it.Next(); { + kv, ev := it.Element() + elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig) + } + attrs[name] = cty.ObjectVal(elems) + + default: + // Should never happen, since the above cases are comprehensive. + panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting)) + } + } + + return cty.ObjectVal(attrs) +} diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go new file mode 100644 index 00000000..69acb897 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go @@ -0,0 +1,267 @@ +package objchange + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" +) + +// AssertPlanValid checks checks whether a planned new state returned by a +// provider's PlanResourceChange method is suitable to achieve a change +// from priorState to config. It returns a slice with nonzero length if +// any problems are detected. Because problems here indicate bugs in the +// provider that generated the plannedState, they are written with provider +// developers as an audience, rather than end-users. +// +// All of the given values must have the same type and must conform to the +// implied type of the given schema, or this function may panic or produce +// garbage results. +// +// During planning, a provider may only make changes to attributes that are +// null (unset) in the configuration and are marked as "computed" in the +// resource type schema, in order to insert any default values the provider +// may know about. If the default value cannot be determined until apply time, +// the provider can return an unknown value. Providers are forbidden from +// planning a change that disagrees with any non-null argument in the +// configuration. +// +// As a special exception, providers _are_ allowed to provide attribute values +// conflicting with configuration if and only if the planned value exactly +// matches the corresponding attribute value in the prior state. The provider +// can use this to signal that the new value is functionally equivalent to +// the old and thus no change is required. +func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error { + return assertPlanValid(schema, priorState, config, plannedState, nil) +} + +func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error { + var errs []error + if plannedState.IsNull() && !config.IsNull() { + errs = append(errs, path.NewErrorf("planned for absense but config wants existence")) + return errs + } + if config.IsNull() && !plannedState.IsNull() { + errs = append(errs, path.NewErrorf("planned for existence but config wants absense")) + return errs + } + if plannedState.IsNull() { + // No further checks possible if the planned value is null + return errs + } + + impTy := schema.ImpliedType() + + for name, attrS := range schema.Attributes { + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(attrS.Type) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + + path := append(path, cty.GetAttrStep{Name: name}) + moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + } + for name, blockS := range schema.BlockTypes { + path := append(path, cty.GetAttrStep{Name: name}) + plannedV := plannedState.GetAttr(name) + configV := config.GetAttr(name) + priorV := cty.NullVal(impTy.AttributeType(name)) + if !priorState.IsNull() { + priorV = priorState.GetAttr(name) + } + if plannedV.RawEquals(configV) { + // Easy path: nothing has changed at all + continue + } + if !plannedV.IsKnown() { + errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + + switch blockS.Nesting { + case configschema.NestingSingle, configschema.NestingGroup: + moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path) + errs = append(errs, moreErrs...) + case configschema.NestingList: + // A NestingList might either be a list or a tuple, depending on + // whether there are dynamically-typed attributes inside. However, + // both support a similar-enough API that we can treat them the + // same for our purposes here. + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + if !configV.HasIndex(idx).True() { + continue // should never happen since we checked the lengths above + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + case configschema.NestingMap: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // A NestingMap might either be a map or an object, depending on + // whether there are dynamically-typed attributes inside, but + // that's decided statically and so all values will have the same + // kind. + if plannedV.Type().IsObjectType() { + plannedAtys := plannedV.Type().AttributeTypes() + configAtys := configV.Type().AttributeTypes() + for k := range plannedAtys { + if _, ok := configAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + path := append(path, cty.GetAttrStep{Name: k}) + + plannedEV := plannedV.GetAttr(k) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + configEV := configV.GetAttr(k) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.Type().HasAttribute(k) { + priorEV = priorV.GetAttr(k) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for k := range configAtys { + if _, ok := plannedAtys[k]; !ok { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k)) + continue + } + } + } else { + plannedL := plannedV.LengthInt() + configL := configV.LengthInt() + if plannedL != configL { + errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL)) + continue + } + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + k := idx.AsString() + if !configV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k)) + continue + } + configEV := configV.Index(idx) + priorEV := cty.NullVal(blockS.ImpliedType()) + if !priorV.IsNull() && priorV.HasIndex(idx).True() { + priorEV = priorV.Index(idx) + } + moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path) + errs = append(errs, moreErrs...) + } + for it := configV.ElementIterator(); it.Next(); { + idx, _ := it.Element() + if !plannedV.HasIndex(idx).True() { + errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString())) + continue + } + } + } + case configschema.NestingSet: + if plannedV.IsNull() { + errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null")) + continue + } + + // Because set elements have no identifier with which to correlate + // them, we can't robustly validate the plan for a nested block + // backed by a set, and so unfortunately we need to just trust the + // provider to do the right thing. :( + // + // (In principle we could correlate elements by matching the + // subset of attributes explicitly set in config, except for the + // special diff suppression rule which allows for there to be a + // planned value that is constructed by mixing part of a prior + // value with part of a config value, creating an entirely new + // element that is not present in either prior nor config.) + for it := plannedV.ElementIterator(); it.Next(); { + idx, plannedEV := it.Element() + path := append(path, cty.IndexStep{Key: idx}) + if !plannedEV.IsKnown() { + errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead")) + continue + } + } + + default: + panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting)) + } + } + + return errs +} + +func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error { + var errs []error + if plannedV.RawEquals(configV) { + // This is the easy path: provider didn't change anything at all. + return errs + } + if plannedV.RawEquals(priorV) && !priorV.IsNull() { + // Also pretty easy: there is a prior value and the provider has + // returned it unchanged. This indicates that configV and plannedV + // are functionally equivalent and so the provider wishes to disregard + // the configuration value in favor of the prior. + return errs + } + if attrS.Computed && configV.IsNull() { + // The provider is allowed to change the value of any computed + // attribute that isn't explicitly set in the config. + return errs + } + + // If none of the above conditions match, the provider has made an invalid + // change to this attribute. + if priorV.IsNull() { + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV)) + } + return errs + } + if attrS.Sensitive { + errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value")) + } else { + errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV)) + } + return errs +} diff --git a/vendor/github.com/hashicorp/terraform/plans/plan.go b/vendor/github.com/hashicorp/terraform/plans/plan.go new file mode 100644 index 00000000..5a3e4548 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/plan.go @@ -0,0 +1,92 @@ +package plans + +import ( + "sort" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// Plan is the top-level type representing a planned set of changes. +// +// A plan is a summary of the set of changes required to move from a current +// state to a goal state derived from configuration. The described changes +// are not applied directly, but contain an approximation of the final +// result that will be completed during apply by resolving any values that +// cannot be predicted. +// +// A plan must always be accompanied by the state and configuration it was +// built from, since the plan does not itself include all of the information +// required to make the changes indicated. +type Plan struct { + VariableValues map[string]DynamicValue + Changes *Changes + TargetAddrs []addrs.Targetable + ProviderSHA256s map[string][]byte + Backend Backend +} + +// Backend represents the backend-related configuration and other data as it +// existed when a plan was created. +type Backend struct { + // Type is the type of backend that the plan will apply against. + Type string + + // Config is the configuration of the backend, whose schema is decided by + // the backend Type. + Config DynamicValue + + // Workspace is the name of the workspace that was active when the plan + // was created. It is illegal to apply a plan created for one workspace + // to the state of another workspace. + // (This constraint is already enforced by the statefile lineage mechanism, + // but storing this explicitly allows us to return a better error message + // in the situation where the user has the wrong workspace selected.) + Workspace string +} + +func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) { + dv, err := NewDynamicValue(config, configSchema.ImpliedType()) + if err != nil { + return nil, err + } + + return &Backend{ + Type: typeName, + Config: dv, + Workspace: workspaceName, + }, nil +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving plan. +// +// The result is de-duplicated so that each distinct address appears only once. +func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig { + if p == nil || p.Changes == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, rc := range p.Changes.Resources { + m[rc.ProviderAddr.String()] = rc.ProviderAddr + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/plans/planfile/config_snapshot.go b/vendor/github.com/hashicorp/terraform/plans/planfile/config_snapshot.go new file mode 100644 index 00000000..a78a99b3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/planfile/config_snapshot.go @@ -0,0 +1,218 @@ +package planfile + +import ( + "archive/zip" + "encoding/json" + "fmt" + "io/ioutil" + "path" + "sort" + "strings" + "time" + + version "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/configs/configload" +) + +const configSnapshotPrefix = "tfconfig/" +const configSnapshotManifestFile = configSnapshotPrefix + "modules.json" +const configSnapshotModulePrefix = configSnapshotPrefix + "m-" + +type configSnapshotModuleRecord struct { + Key string `json:"Key"` + SourceAddr string `json:"Source,omitempty"` + VersionStr string `json:"Version,omitempty"` + Dir string `json:"Dir"` +} +type configSnapshotModuleManifest []configSnapshotModuleRecord + +func readConfigSnapshot(z *zip.Reader) (*configload.Snapshot, error) { + // Errors from this function are expected to be reported with some + // additional prefix context about them being in a config snapshot, + // so they should not themselves refer to the config snapshot. + // They are also generally indicative of an invalid file, and so since + // plan files should not be hand-constructed we don't need to worry + // about making the messages user-actionable. + + snap := &configload.Snapshot{ + Modules: map[string]*configload.SnapshotModule{}, + } + var manifestSrc []byte + + // For processing our source files, we'll just sweep over all the files + // and react to the one-by-one to start, and then clean up afterwards + // when we'll presumably have found the manifest file. + for _, file := range z.File { + switch { + + case file.Name == configSnapshotManifestFile: + // It's the manifest file, so we'll just read it raw into + // manifestSrc for now and process it below. + r, err := file.Open() + if err != nil { + return nil, fmt.Errorf("failed to open module manifest: %s", r) + } + manifestSrc, err = ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to read module manifest: %s", r) + } + + case strings.HasPrefix(file.Name, configSnapshotModulePrefix): + relName := file.Name[len(configSnapshotModulePrefix):] + moduleKey, fileName := path.Split(relName) + + // moduleKey should currently have a trailing slash on it, which we + // can use to recognize the difference between the root module + // (just a trailing slash) and no module path at all (empty string). + if moduleKey == "" { + // ignore invalid config entry + continue + } + moduleKey = moduleKey[:len(moduleKey)-1] // trim trailing slash + + r, err := file.Open() + if err != nil { + return nil, fmt.Errorf("failed to open snapshot of %s from module %q: %s", fileName, moduleKey, err) + } + fileSrc, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to read snapshot of %s from module %q: %s", fileName, moduleKey, err) + } + + if _, exists := snap.Modules[moduleKey]; !exists { + snap.Modules[moduleKey] = &configload.SnapshotModule{ + Files: map[string][]byte{}, + // Will fill in everything else afterwards, when we + // process the manifest. + } + } + snap.Modules[moduleKey].Files[fileName] = fileSrc + } + } + + if manifestSrc == nil { + return nil, fmt.Errorf("config snapshot does not have manifest file") + } + + var manifest configSnapshotModuleManifest + err := json.Unmarshal(manifestSrc, &manifest) + if err != nil { + return nil, fmt.Errorf("invalid module manifest: %s", err) + } + + for _, record := range manifest { + modSnap, exists := snap.Modules[record.Key] + if !exists { + // We'll allow this, assuming that it's a module with no files. + // This is still weird, since we generally reject modules with + // no files, but we'll allow it because downstream errors will + // catch it in that case. + modSnap = &configload.SnapshotModule{ + Files: map[string][]byte{}, + } + snap.Modules[record.Key] = modSnap + } + modSnap.SourceAddr = record.SourceAddr + modSnap.Dir = record.Dir + if record.VersionStr != "" { + v, err := version.NewVersion(record.VersionStr) + if err != nil { + return nil, fmt.Errorf("manifest has invalid version string %q for module %q", record.VersionStr, record.Key) + } + modSnap.Version = v + } + } + + // Finally, we'll make sure we don't have any errant files for modules that + // aren't in the manifest. + for k := range snap.Modules { + found := false + for _, record := range manifest { + if record.Key == k { + found = true + break + } + } + if !found { + return nil, fmt.Errorf("found files for module %q that isn't recorded in the manifest", k) + } + } + + return snap, nil +} + +// writeConfigSnapshot adds to the given zip.Writer one or more files +// representing the given snapshot. +// +// This file creates new files in the writer, so any already-open writer +// for the file will be invalidated by this call. The writer remains open +// when this function returns. +func writeConfigSnapshot(snap *configload.Snapshot, z *zip.Writer) error { + // Errors from this function are expected to be reported with some + // additional prefix context about them being in a config snapshot, + // so they should not themselves refer to the config snapshot. + // They are also indicative of a bug in the caller, so they do not + // need to be user-actionable. + + var manifest configSnapshotModuleManifest + keys := make([]string, 0, len(snap.Modules)) + for k := range snap.Modules { + keys = append(keys, k) + } + sort.Strings(keys) + + // We'll re-use this fileheader for each Create we do below. + + for _, k := range keys { + snapMod := snap.Modules[k] + record := configSnapshotModuleRecord{ + Dir: snapMod.Dir, + Key: k, + SourceAddr: snapMod.SourceAddr, + } + if snapMod.Version != nil { + record.VersionStr = snapMod.Version.String() + } + manifest = append(manifest, record) + + pathPrefix := fmt.Sprintf("%s%s/", configSnapshotModulePrefix, k) + for filename, src := range snapMod.Files { + zh := &zip.FileHeader{ + Name: pathPrefix + filename, + Method: zip.Deflate, + Modified: time.Now(), + } + w, err := z.CreateHeader(zh) + if err != nil { + return fmt.Errorf("failed to create snapshot of %s from module %q: %s", zh.Name, k, err) + } + _, err = w.Write(src) + if err != nil { + return fmt.Errorf("failed to write snapshot of %s from module %q: %s", zh.Name, k, err) + } + } + } + + // Now we'll write our manifest + { + zh := &zip.FileHeader{ + Name: configSnapshotManifestFile, + Method: zip.Deflate, + Modified: time.Now(), + } + src, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return fmt.Errorf("failed to serialize module manifest: %s", err) + } + w, err := z.CreateHeader(zh) + if err != nil { + return fmt.Errorf("failed to create module manifest: %s", err) + } + _, err = w.Write(src) + if err != nil { + return fmt.Errorf("failed to write module manifest: %s", err) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plans/planfile/doc.go b/vendor/github.com/hashicorp/terraform/plans/planfile/doc.go new file mode 100644 index 00000000..edd16af2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/planfile/doc.go @@ -0,0 +1,6 @@ +// Package planfile deals with the file format used to serialize plans to disk +// and then deserialize them back into memory later. +// +// A plan file contains the planned changes along with the configuration and +// state snapshot that they are based on. +package planfile diff --git a/vendor/github.com/hashicorp/terraform/plans/planfile/reader.go b/vendor/github.com/hashicorp/terraform/plans/planfile/reader.go new file mode 100644 index 00000000..579e2859 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/planfile/reader.go @@ -0,0 +1,148 @@ +package planfile + +import ( + "archive/zip" + "bytes" + "fmt" + "io/ioutil" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/tfdiags" +) + +const tfstateFilename = "tfstate" + +// Reader is the main type used to read plan files. Create a Reader by calling +// Open. +// +// A plan file is a random-access file format, so methods of Reader must +// be used to access the individual portions of the file for further +// processing. +type Reader struct { + zip *zip.ReadCloser +} + +// Open creates a Reader for the file at the given filename, or returns an +// error if the file doesn't seem to be a planfile. +func Open(filename string) (*Reader, error) { + r, err := zip.OpenReader(filename) + if err != nil { + // To give a better error message, we'll sniff to see if this looks + // like our old plan format from versions prior to 0.12. + if b, sErr := ioutil.ReadFile(filename); sErr == nil { + if bytes.HasPrefix(b, []byte("tfplan")) { + return nil, fmt.Errorf("the given plan file was created by an earlier version of Terraform; plan files cannot be shared between different Terraform versions") + } + } + return nil, err + } + + // Sniff to make sure this looks like a plan file, as opposed to any other + // random zip file the user might have around. + var planFile *zip.File + for _, file := range r.File { + if file.Name == tfplanFilename { + planFile = file + break + } + } + if planFile == nil { + return nil, fmt.Errorf("the given file is not a valid plan file") + } + + // For now, we'll just accept the presence of the tfplan file as enough, + // and wait to validate the version when the caller requests the plan + // itself. + + return &Reader{ + zip: r, + }, nil +} + +// ReadPlan reads the plan embedded in the plan file. +// +// Errors can be returned for various reasons, including if the plan file +// is not of an appropriate format version, if it was created by a different +// version of Terraform, if it is invalid, etc. +func (r *Reader) ReadPlan() (*plans.Plan, error) { + var planFile *zip.File + for _, file := range r.zip.File { + if file.Name == tfplanFilename { + planFile = file + break + } + } + if planFile == nil { + // This should never happen because we checked for this file during + // Open, but we'll check anyway to be safe. + return nil, fmt.Errorf("the plan file is invalid") + } + + pr, err := planFile.Open() + if err != nil { + return nil, fmt.Errorf("failed to retrieve plan from plan file: %s", err) + } + defer pr.Close() + + return readTfplan(pr) +} + +// ReadStateFile reads the state file embedded in the plan file. +// +// If the plan file contains no embedded state file, the returned error is +// statefile.ErrNoState. +func (r *Reader) ReadStateFile() (*statefile.File, error) { + for _, file := range r.zip.File { + if file.Name == tfstateFilename { + r, err := file.Open() + if err != nil { + return nil, fmt.Errorf("failed to extract state from plan file: %s", err) + } + return statefile.Read(r) + } + } + return nil, statefile.ErrNoState +} + +// ReadConfigSnapshot reads the configuration snapshot embedded in the plan +// file. +// +// This is a lower-level alternative to ReadConfig that just extracts the +// source files, without attempting to parse them. +func (r *Reader) ReadConfigSnapshot() (*configload.Snapshot, error) { + return readConfigSnapshot(&r.zip.Reader) +} + +// ReadConfig reads the configuration embedded in the plan file. +// +// Internally this function delegates to the configs/configload package to +// parse the embedded configuration and so it returns diagnostics (rather than +// a native Go error as with other methods on Reader). +func (r *Reader) ReadConfig() (*configs.Config, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + snap, err := r.ReadConfigSnapshot() + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read configuration from plan file", + fmt.Sprintf("The configuration file snapshot in the plan file could not be read: %s.", err), + )) + return nil, diags + } + + loader := configload.NewLoaderFromSnapshot(snap) + rootDir := snap.Modules[""].Dir // Root module base directory + config, configDiags := loader.LoadConfig(rootDir) + diags = diags.Append(configDiags) + + return config, diags +} + +// Close closes the file, after which no other operations may be performed. +func (r *Reader) Close() error { + return r.zip.Close() +} diff --git a/vendor/github.com/hashicorp/terraform/plans/planfile/tfplan.go b/vendor/github.com/hashicorp/terraform/plans/planfile/tfplan.go new file mode 100644 index 00000000..e1deeb08 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/planfile/tfplan.go @@ -0,0 +1,474 @@ +package planfile + +import ( + "fmt" + "io" + "io/ioutil" + + "github.com/golang/protobuf/proto" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/internal/planproto" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" + "github.com/hashicorp/terraform/version" +) + +const tfplanFormatVersion = 3 +const tfplanFilename = "tfplan" + +// --------------------------------------------------------------------------- +// This file deals with the internal structure of the "tfplan" sub-file within +// the plan file format. It's all private API, wrapped by methods defined +// elsewhere. This is the only file that should import the +// ../internal/planproto package, which contains the ugly stubs generated +// by the protobuf compiler. +// --------------------------------------------------------------------------- + +// readTfplan reads a protobuf-encoded description from the plan portion of +// a plan file, which is stored in a special file in the archive called +// "tfplan". +func readTfplan(r io.Reader) (*plans.Plan, error) { + src, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + var rawPlan planproto.Plan + err = proto.Unmarshal(src, &rawPlan) + if err != nil { + return nil, fmt.Errorf("parse error: %s", err) + } + + if rawPlan.Version != tfplanFormatVersion { + return nil, fmt.Errorf("unsupported plan file format version %d; only version %d is supported", rawPlan.Version, tfplanFormatVersion) + } + + if rawPlan.TerraformVersion != version.String() { + return nil, fmt.Errorf("plan file was created by Terraform %s, but this is %s; plan files cannot be transferred between different Terraform versions", rawPlan.TerraformVersion, version.String()) + } + + plan := &plans.Plan{ + VariableValues: map[string]plans.DynamicValue{}, + Changes: &plans.Changes{ + Outputs: []*plans.OutputChangeSrc{}, + Resources: []*plans.ResourceInstanceChangeSrc{}, + }, + + ProviderSHA256s: map[string][]byte{}, + } + + for _, rawOC := range rawPlan.OutputChanges { + name := rawOC.Name + change, err := changeFromTfplan(rawOC.Change) + if err != nil { + return nil, fmt.Errorf("invalid plan for output %q: %s", name, err) + } + + plan.Changes.Outputs = append(plan.Changes.Outputs, &plans.OutputChangeSrc{ + // All output values saved in the plan file are root module outputs, + // since we don't retain others. (They can be easily recomputed + // during apply). + Addr: addrs.OutputValue{Name: name}.Absolute(addrs.RootModuleInstance), + ChangeSrc: *change, + Sensitive: rawOC.Sensitive, + }) + } + + for _, rawRC := range rawPlan.ResourceChanges { + change, err := resourceChangeFromTfplan(rawRC) + if err != nil { + // errors from resourceChangeFromTfplan already include context + return nil, err + } + + plan.Changes.Resources = append(plan.Changes.Resources, change) + } + + for _, rawTargetAddr := range rawPlan.TargetAddrs { + target, diags := addrs.ParseTargetStr(rawTargetAddr) + if diags.HasErrors() { + return nil, fmt.Errorf("plan contains invalid target address %q: %s", target, diags.Err()) + } + plan.TargetAddrs = append(plan.TargetAddrs, target.Subject) + } + + for name, rawHashObj := range rawPlan.ProviderHashes { + if len(rawHashObj.Sha256) == 0 { + return nil, fmt.Errorf("no SHA256 hash for provider %q plugin", name) + } + + plan.ProviderSHA256s[name] = rawHashObj.Sha256 + } + + for name, rawVal := range rawPlan.Variables { + val, err := valueFromTfplan(rawVal) + if err != nil { + return nil, fmt.Errorf("invalid value for input variable %q: %s", name, err) + } + plan.VariableValues[name] = val + } + + if rawBackend := rawPlan.Backend; rawBackend == nil { + return nil, fmt.Errorf("plan file has no backend settings; backend settings are required") + } else { + config, err := valueFromTfplan(rawBackend.Config) + if err != nil { + return nil, fmt.Errorf("plan file has invalid backend configuration: %s", err) + } + plan.Backend = plans.Backend{ + Type: rawBackend.Type, + Config: config, + Workspace: rawBackend.Workspace, + } + } + + return plan, nil +} + +func resourceChangeFromTfplan(rawChange *planproto.ResourceInstanceChange) (*plans.ResourceInstanceChangeSrc, error) { + if rawChange == nil { + // Should never happen in practice, since protobuf can't represent + // a nil value in a list. + return nil, fmt.Errorf("resource change object is absent") + } + + ret := &plans.ResourceInstanceChangeSrc{} + + moduleAddr := addrs.RootModuleInstance + if rawChange.ModulePath != "" { + var diags tfdiags.Diagnostics + moduleAddr, diags = addrs.ParseModuleInstanceStr(rawChange.ModulePath) + if diags.HasErrors() { + return nil, diags.Err() + } + } + + providerAddr, diags := addrs.ParseAbsProviderConfigStr(rawChange.Provider) + if diags.HasErrors() { + return nil, diags.Err() + } + ret.ProviderAddr = providerAddr + + var mode addrs.ResourceMode + switch rawChange.Mode { + case planproto.ResourceInstanceChange_managed: + mode = addrs.ManagedResourceMode + case planproto.ResourceInstanceChange_data: + mode = addrs.DataResourceMode + default: + return nil, fmt.Errorf("resource has invalid mode %s", rawChange.Mode) + } + + typeName := rawChange.Type + name := rawChange.Name + + resAddr := addrs.Resource{ + Mode: mode, + Type: typeName, + Name: name, + } + + var instKey addrs.InstanceKey + switch rawTk := rawChange.InstanceKey.(type) { + case nil: + case *planproto.ResourceInstanceChange_Int: + instKey = addrs.IntKey(rawTk.Int) + case *planproto.ResourceInstanceChange_Str: + instKey = addrs.StringKey(rawTk.Str) + default: + return nil, fmt.Errorf("instance of %s has invalid key type %T", resAddr.Absolute(moduleAddr), rawChange.InstanceKey) + } + + ret.Addr = resAddr.Instance(instKey).Absolute(moduleAddr) + + if rawChange.DeposedKey != "" { + if len(rawChange.DeposedKey) != 8 { + return nil, fmt.Errorf("deposed object for %s has invalid deposed key %q", ret.Addr, rawChange.DeposedKey) + } + ret.DeposedKey = states.DeposedKey(rawChange.DeposedKey) + } + + change, err := changeFromTfplan(rawChange.Change) + if err != nil { + return nil, fmt.Errorf("invalid plan for resource %s: %s", ret.Addr, err) + } + + ret.ChangeSrc = *change + + if len(rawChange.Private) != 0 { + ret.Private = rawChange.Private + } + + return ret, nil +} + +func changeFromTfplan(rawChange *planproto.Change) (*plans.ChangeSrc, error) { + if rawChange == nil { + return nil, fmt.Errorf("change object is absent") + } + + ret := &plans.ChangeSrc{} + + // -1 indicates that there is no index. We'll customize these below + // depending on the change action, and then decode. + beforeIdx, afterIdx := -1, -1 + + switch rawChange.Action { + case planproto.Action_NOOP: + ret.Action = plans.NoOp + beforeIdx = 0 + afterIdx = 0 + case planproto.Action_CREATE: + ret.Action = plans.Create + afterIdx = 0 + case planproto.Action_READ: + ret.Action = plans.Read + beforeIdx = 0 + afterIdx = 1 + case planproto.Action_UPDATE: + ret.Action = plans.Update + beforeIdx = 0 + afterIdx = 1 + case planproto.Action_DELETE: + ret.Action = plans.Delete + beforeIdx = 0 + case planproto.Action_CREATE_THEN_DELETE: + ret.Action = plans.CreateThenDelete + beforeIdx = 0 + afterIdx = 1 + case planproto.Action_DELETE_THEN_CREATE: + ret.Action = plans.DeleteThenCreate + beforeIdx = 0 + afterIdx = 1 + default: + return nil, fmt.Errorf("invalid change action %s", rawChange.Action) + } + + if beforeIdx != -1 { + if l := len(rawChange.Values); l <= beforeIdx { + return nil, fmt.Errorf("incorrect number of values (%d) for %s change", l, rawChange.Action) + } + var err error + ret.Before, err = valueFromTfplan(rawChange.Values[beforeIdx]) + if err != nil { + return nil, fmt.Errorf("invalid \"before\" value: %s", err) + } + if ret.Before == nil { + return nil, fmt.Errorf("missing \"before\" value: %s", err) + } + } + if afterIdx != -1 { + if l := len(rawChange.Values); l <= afterIdx { + return nil, fmt.Errorf("incorrect number of values (%d) for %s change", l, rawChange.Action) + } + var err error + ret.After, err = valueFromTfplan(rawChange.Values[afterIdx]) + if err != nil { + return nil, fmt.Errorf("invalid \"after\" value: %s", err) + } + if ret.After == nil { + return nil, fmt.Errorf("missing \"after\" value: %s", err) + } + } + + return ret, nil +} + +func valueFromTfplan(rawV *planproto.DynamicValue) (plans.DynamicValue, error) { + if len(rawV.Msgpack) == 0 { // len(0) because that's the default value for a "bytes" in protobuf + return nil, fmt.Errorf("dynamic value does not have msgpack serialization") + } + + return plans.DynamicValue(rawV.Msgpack), nil +} + +// writeTfplan serializes the given plan into the protobuf-based format used +// for the "tfplan" portion of a plan file. +func writeTfplan(plan *plans.Plan, w io.Writer) error { + if plan == nil { + return fmt.Errorf("cannot write plan file for nil plan") + } + if plan.Changes == nil { + return fmt.Errorf("cannot write plan file with nil changeset") + } + + rawPlan := &planproto.Plan{ + Version: tfplanFormatVersion, + TerraformVersion: version.String(), + ProviderHashes: map[string]*planproto.Hash{}, + + Variables: map[string]*planproto.DynamicValue{}, + OutputChanges: []*planproto.OutputChange{}, + ResourceChanges: []*planproto.ResourceInstanceChange{}, + } + + for _, oc := range plan.Changes.Outputs { + // When serializing a plan we only retain the root outputs, since + // changes to these are externally-visible side effects (e.g. via + // terraform_remote_state). + if !oc.Addr.Module.IsRoot() { + continue + } + + name := oc.Addr.OutputValue.Name + + // Writing outputs as cty.DynamicPseudoType forces the stored values + // to also contain dynamic type information, so we can recover the + // original type when we read the values back in readTFPlan. + protoChange, err := changeToTfplan(&oc.ChangeSrc) + if err != nil { + return fmt.Errorf("cannot write output value %q: %s", name, err) + } + + rawPlan.OutputChanges = append(rawPlan.OutputChanges, &planproto.OutputChange{ + Name: name, + Change: protoChange, + Sensitive: oc.Sensitive, + }) + } + + for _, rc := range plan.Changes.Resources { + rawRC, err := resourceChangeToTfplan(rc) + if err != nil { + return err + } + rawPlan.ResourceChanges = append(rawPlan.ResourceChanges, rawRC) + } + + for _, targetAddr := range plan.TargetAddrs { + rawPlan.TargetAddrs = append(rawPlan.TargetAddrs, targetAddr.String()) + } + + for name, hash := range plan.ProviderSHA256s { + rawPlan.ProviderHashes[name] = &planproto.Hash{ + Sha256: hash, + } + } + + for name, val := range plan.VariableValues { + rawPlan.Variables[name] = valueToTfplan(val) + } + + if plan.Backend.Type == "" || plan.Backend.Config == nil { + // This suggests a bug in the code that created the plan, since it + // ought to always have a backend populated, even if it's the default + // "local" backend with a local state file. + return fmt.Errorf("plan does not have a backend configuration") + } + + rawPlan.Backend = &planproto.Backend{ + Type: plan.Backend.Type, + Config: valueToTfplan(plan.Backend.Config), + Workspace: plan.Backend.Workspace, + } + + src, err := proto.Marshal(rawPlan) + if err != nil { + return fmt.Errorf("serialization error: %s", err) + } + + _, err = w.Write(src) + if err != nil { + return fmt.Errorf("failed to write plan to plan file: %s", err) + } + + return nil +} + +func resourceChangeToTfplan(change *plans.ResourceInstanceChangeSrc) (*planproto.ResourceInstanceChange, error) { + ret := &planproto.ResourceInstanceChange{} + + ret.ModulePath = change.Addr.Module.String() + + relAddr := change.Addr.Resource + + switch relAddr.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = planproto.ResourceInstanceChange_managed + case addrs.DataResourceMode: + ret.Mode = planproto.ResourceInstanceChange_data + default: + return nil, fmt.Errorf("resource %s has unsupported mode %s", relAddr, relAddr.Resource.Mode) + } + + ret.Type = relAddr.Resource.Type + ret.Name = relAddr.Resource.Name + + switch tk := relAddr.Key.(type) { + case nil: + // Nothing to do, then. + case addrs.IntKey: + ret.InstanceKey = &planproto.ResourceInstanceChange_Int{ + Int: int64(tk), + } + case addrs.StringKey: + ret.InstanceKey = &planproto.ResourceInstanceChange_Str{ + Str: string(tk), + } + default: + return nil, fmt.Errorf("resource %s has unsupported instance key type %T", relAddr, relAddr.Key) + } + + ret.DeposedKey = string(change.DeposedKey) + ret.Provider = change.ProviderAddr.String() + + valChange, err := changeToTfplan(&change.ChangeSrc) + if err != nil { + return nil, fmt.Errorf("failed to serialize resource %s change: %s", relAddr, err) + } + ret.Change = valChange + + if len(change.Private) > 0 { + ret.Private = change.Private + } + + return ret, nil +} + +func changeToTfplan(change *plans.ChangeSrc) (*planproto.Change, error) { + ret := &planproto.Change{} + + before := valueToTfplan(change.Before) + after := valueToTfplan(change.After) + + switch change.Action { + case plans.NoOp: + ret.Action = planproto.Action_NOOP + ret.Values = []*planproto.DynamicValue{before} // before and after should be identical + case plans.Create: + ret.Action = planproto.Action_CREATE + ret.Values = []*planproto.DynamicValue{after} + case plans.Read: + ret.Action = planproto.Action_READ + ret.Values = []*planproto.DynamicValue{before, after} + case plans.Update: + ret.Action = planproto.Action_UPDATE + ret.Values = []*planproto.DynamicValue{before, after} + case plans.Delete: + ret.Action = planproto.Action_DELETE + ret.Values = []*planproto.DynamicValue{before} + case plans.DeleteThenCreate: + ret.Action = planproto.Action_DELETE_THEN_CREATE + ret.Values = []*planproto.DynamicValue{before, after} + case plans.CreateThenDelete: + ret.Action = planproto.Action_CREATE_THEN_DELETE + ret.Values = []*planproto.DynamicValue{before, after} + default: + return nil, fmt.Errorf("invalid change action %s", change.Action) + } + + return ret, nil +} + +func valueToTfplan(val plans.DynamicValue) *planproto.DynamicValue { + if val == nil { + // protobuf can't represent nil, so we'll represent it as a + // DynamicValue that has no serializations at all. + return &planproto.DynamicValue{} + } + return &planproto.DynamicValue{ + Msgpack: []byte(val), + } +} diff --git a/vendor/github.com/hashicorp/terraform/plans/planfile/writer.go b/vendor/github.com/hashicorp/terraform/plans/planfile/writer.go new file mode 100644 index 00000000..7759463b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plans/planfile/writer.go @@ -0,0 +1,72 @@ +package planfile + +import ( + "archive/zip" + "fmt" + "os" + "time" + + "github.com/hashicorp/terraform/configs/configload" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states/statefile" +) + +// Create creates a new plan file with the given filename, overwriting any +// file that might already exist there. +// +// A plan file contains both a snapshot of the configuration and of the latest +// state file in addition to the plan itself, so that Terraform can detect +// if the world has changed since the plan was created and thus refuse to +// apply it. +func Create(filename string, configSnap *configload.Snapshot, stateFile *statefile.File, plan *plans.Plan) error { + f, err := os.Create(filename) + if err != nil { + return err + } + defer f.Close() + + zw := zip.NewWriter(f) + defer zw.Close() + + // tfplan file + { + w, err := zw.CreateHeader(&zip.FileHeader{ + Name: tfplanFilename, + Method: zip.Deflate, + Modified: time.Now(), + }) + if err != nil { + return fmt.Errorf("failed to create tfplan file: %s", err) + } + err = writeTfplan(plan, w) + if err != nil { + return fmt.Errorf("failed to write plan: %s", err) + } + } + + // tfstate file + { + w, err := zw.CreateHeader(&zip.FileHeader{ + Name: tfstateFilename, + Method: zip.Deflate, + Modified: time.Now(), + }) + if err != nil { + return fmt.Errorf("failed to create embedded tfstate file: %s", err) + } + err = statefile.Write(stateFile, w) + if err != nil { + return fmt.Errorf("failed to write state snapshot: %s", err) + } + } + + // tfconfig directory + { + err := writeConfigSnapshot(configSnap, zw) + if err != nil { + return fmt.Errorf("failed to write config snapshot: %s", err) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go new file mode 100644 index 00000000..0eab5385 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/client.go @@ -0,0 +1,35 @@ +package plugin + +import ( + "os" + "os/exec" + + hclog "github.com/hashicorp/go-hclog" + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/plugin/discovery" +) + +// ClientConfig returns a configuration object that can be used to instantiate +// a client for the plugin described by the given metadata. +func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig { + logger := hclog.New(&hclog.LoggerOptions{ + Name: "plugin", + Level: hclog.Trace, + Output: os.Stderr, + }) + + return &plugin.ClientConfig{ + Cmd: exec.Command(m.Path), + HandshakeConfig: Handshake, + VersionedPlugins: VersionedPlugins, + Managed: true, + Logger: logger, + AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, + AutoMTLS: true, + } +} + +// Client returns a plugin client for the plugin described by the given metadata. +func Client(m discovery.PluginMeta) *plugin.Client { + return plugin.NewClient(ClientConfig(m)) +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go new file mode 100644 index 00000000..51cb2fe2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go @@ -0,0 +1,132 @@ +package convert + +import ( + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// WarnsAndErrorsToProto converts the warnings and errors return by the legacy +// provider to protobuf diagnostics. +func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) { + for _, w := range warns { + diags = AppendProtoDiag(diags, w) + } + + for _, e := range errs { + diags = AppendProtoDiag(diags, e) + } + + return diags +} + +// AppendProtoDiag appends a new diagnostic from a warning string or an error. +// This panics if d is not a string or error. +func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic { + switch d := d.(type) { + case cty.PathError: + ap := PathToAttributePath(d.Path) + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + Attribute: ap, + }) + case error: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_ERROR, + Summary: d.Error(), + }) + case string: + diags = append(diags, &proto.Diagnostic{ + Severity: proto.Diagnostic_WARNING, + Summary: d, + }) + case *proto.Diagnostic: + diags = append(diags, d) + case []*proto.Diagnostic: + diags = append(diags, d...) + } + return diags +} + +// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics. +func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, d := range ds { + var severity tfdiags.Severity + + switch d.Severity { + case proto.Diagnostic_ERROR: + severity = tfdiags.Error + case proto.Diagnostic_WARNING: + severity = tfdiags.Warning + } + + var newDiag tfdiags.Diagnostic + + // if there's an attribute path, we need to create a AttributeValue diagnostic + if d.Attribute != nil { + path := AttributePathToPath(d.Attribute) + newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path) + } else { + newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail) + } + + diags = diags.Append(newDiag) + } + + return diags +} + +// AttributePathToPath takes the proto encoded path and converts it to a cty.Path +func AttributePathToPath(ap *proto.AttributePath) cty.Path { + var p cty.Path + for _, step := range ap.Steps { + switch selector := step.Selector.(type) { + case *proto.AttributePath_Step_AttributeName: + p = p.GetAttr(selector.AttributeName) + case *proto.AttributePath_Step_ElementKeyString: + p = p.Index(cty.StringVal(selector.ElementKeyString)) + case *proto.AttributePath_Step_ElementKeyInt: + p = p.Index(cty.NumberIntVal(selector.ElementKeyInt)) + } + } + return p +} + +// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path. +func PathToAttributePath(p cty.Path) *proto.AttributePath { + ap := &proto.AttributePath{} + for _, step := range p { + switch selector := step.(type) { + case cty.GetAttrStep: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_AttributeName{ + AttributeName: selector.Name, + }, + }) + case cty.IndexStep: + key := selector.Key + switch key.Type() { + case cty.String: + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyString{ + ElementKeyString: key.AsString(), + }, + }) + case cty.Number: + v, _ := key.AsBigFloat().Int64() + ap.Steps = append(ap.Steps, &proto.AttributePath_Step{ + Selector: &proto.AttributePath_Step_ElementKeyInt{ + ElementKeyInt: v, + }, + }) + default: + // We'll bail early if we encounter anything else, and just + // return the valid prefix. + return ap + } + } + } + return ap +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go new file mode 100644 index 00000000..6a45f54c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go @@ -0,0 +1,154 @@ +package convert + +import ( + "encoding/json" + "reflect" + "sort" + + "github.com/hashicorp/terraform/configs/configschema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/providers" +) + +// ConfigSchemaToProto takes a *configschema.Block and converts it to a +// proto.Schema_Block for a grpc response. +func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block { + block := &proto.Schema_Block{} + + for _, name := range sortedKeys(b.Attributes) { + a := b.Attributes[name] + attr := &proto.Schema_Attribute{ + Name: name, + Description: a.Description, + Optional: a.Optional, + Computed: a.Computed, + Required: a.Required, + Sensitive: a.Sensitive, + } + + ty, err := json.Marshal(a.Type) + if err != nil { + panic(err) + } + + attr.Type = ty + + block.Attributes = append(block.Attributes, attr) + } + + for _, name := range sortedKeys(b.BlockTypes) { + b := b.BlockTypes[name] + block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b)) + } + + return block +} + +func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock { + var nesting proto.Schema_NestedBlock_NestingMode + switch b.Nesting { + case configschema.NestingSingle: + nesting = proto.Schema_NestedBlock_SINGLE + case configschema.NestingGroup: + nesting = proto.Schema_NestedBlock_GROUP + case configschema.NestingList: + nesting = proto.Schema_NestedBlock_LIST + case configschema.NestingSet: + nesting = proto.Schema_NestedBlock_SET + case configschema.NestingMap: + nesting = proto.Schema_NestedBlock_MAP + default: + nesting = proto.Schema_NestedBlock_INVALID + } + return &proto.Schema_NestedBlock{ + TypeName: name, + Block: ConfigSchemaToProto(&b.Block), + Nesting: nesting, + MinItems: int64(b.MinItems), + MaxItems: int64(b.MaxItems), + } +} + +// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema. +func ProtoToProviderSchema(s *proto.Schema) providers.Schema { + return providers.Schema{ + Version: s.Version, + Block: ProtoToConfigSchema(s.Block), + } +} + +// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it +// to a terraform *configschema.Block. +func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block { + block := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + BlockTypes: make(map[string]*configschema.NestedBlock), + } + + for _, a := range b.Attributes { + attr := &configschema.Attribute{ + Description: a.Description, + Required: a.Required, + Optional: a.Optional, + Computed: a.Computed, + Sensitive: a.Sensitive, + } + + if err := json.Unmarshal(a.Type, &attr.Type); err != nil { + panic(err) + } + + block.Attributes[a.Name] = attr + } + + for _, b := range b.BlockTypes { + block.BlockTypes[b.TypeName] = schemaNestedBlock(b) + } + + return block +} + +func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock { + var nesting configschema.NestingMode + switch b.Nesting { + case proto.Schema_NestedBlock_SINGLE: + nesting = configschema.NestingSingle + case proto.Schema_NestedBlock_GROUP: + nesting = configschema.NestingGroup + case proto.Schema_NestedBlock_LIST: + nesting = configschema.NestingList + case proto.Schema_NestedBlock_MAP: + nesting = configschema.NestingMap + case proto.Schema_NestedBlock_SET: + nesting = configschema.NestingSet + default: + // In all other cases we'll leave it as the zero value (invalid) and + // let the caller validate it and deal with this. + } + + nb := &configschema.NestedBlock{ + Nesting: nesting, + MinItems: int(b.MinItems), + MaxItems: int(b.MaxItems), + } + + nested := ProtoToConfigSchema(b.Block) + nb.Block = *nested + return nb +} + +// sortedKeys returns the lexically sorted keys from the given map. This is +// used to make schema conversions are deterministic. This panics if map keys +// are not a string. +func sortedKeys(m interface{}) []string { + v := reflect.ValueOf(m) + keys := make([]string, v.Len()) + + mapKeys := v.MapKeys() + for i, k := range mapKeys { + keys[i] = k.Interface().(string) + } + + sort.Strings(keys) + return keys +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go new file mode 100644 index 00000000..729e9709 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go @@ -0,0 +1,64 @@ +package discovery + +// Error is a type used to describe situations that the caller must handle +// since they indicate some form of user error. +// +// The functions and methods that return these specialized errors indicate so +// in their documentation. The Error type should not itself be used directly, +// but rather errors should be compared using the == operator with the +// error constants in this package. +// +// Values of this type are _not_ used when the error being reported is an +// operational error (server unavailable, etc) or indicative of a bug in +// this package or its caller. +type Error string + +// ErrorNoSuitableVersion indicates that a suitable version (meeting given +// constraints) is not available. +const ErrorNoSuitableVersion = Error("no suitable version is available") + +// ErrorNoVersionCompatible indicates that all of the available versions +// that otherwise met constraints are not compatible with the current +// version of Terraform. +const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") + +// ErrorVersionIncompatible indicates that all of the versions within the +// constraints are not compatible with the current version of Terrafrom, though +// there does exist a version outside of the constaints that is compatible. +const ErrorVersionIncompatible = Error("incompatible provider version") + +// ErrorNoSuchProvider indicates that no provider exists with a name given +const ErrorNoSuchProvider = Error("no provider exists with the given name") + +// ErrorNoVersionCompatibleWithPlatform indicates that all of the available +// versions that otherwise met constraints are not compatible with the +// requested platform +const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform") + +// ErrorMissingChecksumVerification indicates that either the provider +// distribution is missing the SHA256SUMS file or the checksum file does +// not contain a checksum for the binary plugin +const ErrorMissingChecksumVerification = Error("unable to verify checksum") + +// ErrorChecksumVerification indicates that the current checksum of the +// provider plugin has changed since the initial release and is not trusted +// to download +const ErrorChecksumVerification = Error("unexpected plugin checksum") + +// ErrorSignatureVerification indicates that the digital signature for a +// provider distribution could not be verified for one of the following +// reasons: missing signature file, missing public key, or the signature +// was not signed by any known key for the publisher +const ErrorSignatureVerification = Error("unable to verify signature") + +// ErrorServiceUnreachable indicates that the network was unable to connect +// to the registry service +const ErrorServiceUnreachable = Error("registry service is unreachable") + +// ErrorPublicRegistryUnreachable indicates that the network was unable to connect +// to the public registry in particular, so we can show a link to the statuspage +const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates") + +func (err Error) Error() string { + return string(err) +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go new file mode 100644 index 00000000..f053312b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/find.go @@ -0,0 +1,191 @@ +package discovery + +import ( + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" +) + +// FindPlugins looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider") and +// returns a PluginMetaSet representing the discovered potential-plugins. +// +// Currently this supports two different naming schemes. The current +// standard naming scheme is a subdirectory called $GOOS-$GOARCH containing +// files named terraform-$KIND-$NAME-V$VERSION. The legacy naming scheme is +// files directly in the given directory whose names are like +// terraform-$KIND-$NAME. +// +// Only one plugin will be returned for each unique plugin (name, version) +// pair, with preference given to files found in earlier directories. +// +// This is a convenience wrapper around FindPluginPaths and ResolvePluginsPaths. +func FindPlugins(kind string, dirs []string) PluginMetaSet { + return ResolvePluginPaths(FindPluginPaths(kind, dirs)) +} + +// FindPluginPaths looks in the given directories for files whose filenames +// suggest that they are plugins of the given kind (e.g. "provider"). +// +// The return value is a list of absolute paths that appear to refer to +// plugins in the given directories, based only on what can be inferred +// from the naming scheme. The paths returned are ordered such that files +// in later dirs appear after files in earlier dirs in the given directory +// list. Within the same directory plugins are returned in a consistent but +// undefined order. +func FindPluginPaths(kind string, dirs []string) []string { + // This is just a thin wrapper around findPluginPaths so that we can + // use the latter in tests with a fake machineName so we can use our + // test fixtures. + return findPluginPaths(kind, dirs) +} + +func findPluginPaths(kind string, dirs []string) []string { + prefix := "terraform-" + kind + "-" + + ret := make([]string, 0, len(dirs)) + + for _, dir := range dirs { + items, err := ioutil.ReadDir(dir) + if err != nil { + // Ignore missing dirs, non-dirs, etc + continue + } + + log.Printf("[DEBUG] checking for %s in %q", kind, dir) + + for _, item := range items { + fullName := item.Name() + + if !strings.HasPrefix(fullName, prefix) { + continue + } + + // New-style paths must have a version segment in filename + if strings.Contains(strings.ToLower(fullName), "_v") { + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[DEBUG] found %s %q", kind, fullName) + ret = append(ret, filepath.Clean(absPath)) + continue + } + + // Legacy style with files directly in the base directory + absPath, err := filepath.Abs(filepath.Join(dir, fullName)) + if err != nil { + log.Printf("[ERROR] plugin filepath error: %s", err) + continue + } + + // Check that the file we found is usable + if !pathIsFile(absPath) { + log.Printf("[ERROR] ignoring non-file %s", absPath) + continue + } + + log.Printf("[WARN] found legacy %s %q", kind, fullName) + + ret = append(ret, filepath.Clean(absPath)) + } + } + + return ret +} + +// Returns true if and only if the given path refers to a file or a symlink +// to a file. +func pathIsFile(path string) bool { + info, err := os.Stat(path) + if err != nil { + return false + } + + return !info.IsDir() +} + +// ResolvePluginPaths takes a list of paths to plugin executables (as returned +// by e.g. FindPluginPaths) and produces a PluginMetaSet describing the +// referenced plugins. +// +// If the same combination of plugin name and version appears multiple times, +// the earlier reference will be preferred. Several different versions of +// the same plugin name may be returned, in which case the methods of +// PluginMetaSet can be used to filter down. +func ResolvePluginPaths(paths []string) PluginMetaSet { + s := make(PluginMetaSet) + + type nameVersion struct { + Name string + Version string + } + found := make(map[nameVersion]struct{}) + + for _, path := range paths { + baseName := strings.ToLower(filepath.Base(path)) + if !strings.HasPrefix(baseName, "terraform-") { + // Should never happen with reasonable input + continue + } + + baseName = baseName[10:] + firstDash := strings.Index(baseName, "-") + if firstDash == -1 { + // Should never happen with reasonable input + continue + } + + baseName = baseName[firstDash+1:] + if baseName == "" { + // Should never happen with reasonable input + continue + } + + // Trim the .exe suffix used on Windows before we start wrangling + // the remainder of the path. + if strings.HasSuffix(baseName, ".exe") { + baseName = baseName[:len(baseName)-4] + } + + parts := strings.SplitN(baseName, "_v", 2) + name := parts[0] + version := VersionZero + if len(parts) == 2 { + version = parts[1] + } + + // Auto-installed plugins contain an extra name portion representing + // the expected plugin version, which we must trim off. + if underX := strings.Index(version, "_x"); underX != -1 { + version = version[:underX] + } + + if _, ok := found[nameVersion{name, version}]; ok { + // Skip duplicate versions of the same plugin + // (We do this during this step because after this we will be + // dealing with sets and thus lose our ordering with which to + // decide preference.) + continue + } + + s.Add(PluginMeta{ + Name: name, + Version: VersionStr(version), + Path: path, + }) + found[nameVersion{name, version}] = struct{}{} + } + + return s +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go new file mode 100644 index 00000000..b1d01fb9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go @@ -0,0 +1,689 @@ +package discovery + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/hashicorp/errwrap" + getter "github.com/hashicorp/go-getter" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/registry" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/response" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" + "github.com/mitchellh/cli" +) + +// Releases are located by querying the terraform registry. + +const protocolVersionHeader = "x-terraform-protocol-version" + +var httpClient *http.Client + +var errVersionNotFound = errors.New("version not found") + +func init() { + httpClient = httpclient.New() + + httpGetter := &getter.HttpGetter{ + Client: httpClient, + Netrc: true, + } + + getter.Getters["http"] = httpGetter + getter.Getters["https"] = httpGetter +} + +// An Installer maintains a local cache of plugins by downloading plugins +// from an online repository. +type Installer interface { + Get(name string, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) + PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error) +} + +// ProviderInstaller is an Installer implementation that knows how to +// download Terraform providers from the official HashiCorp releases service +// into a local directory. The files downloaded are compliant with the +// naming scheme expected by FindPlugins, so the target directory of a +// provider installer can be used as one of several plugin discovery sources. +type ProviderInstaller struct { + Dir string + + // Cache is used to access and update a local cache of plugins if non-nil. + // Can be nil to disable caching. + Cache PluginCache + + PluginProtocolVersion uint + + // OS and Arch specify the OS and architecture that should be used when + // installing plugins. These use the same labels as the runtime.GOOS and + // runtime.GOARCH variables respectively, and indeed the values of these + // are used as defaults if either of these is the empty string. + OS string + Arch string + + // Skip checksum and signature verification + SkipVerify bool + + Ui cli.Ui // Ui for output + + // Services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + Services *disco.Disco + + // registry client + registry *registry.Client +} + +// Get is part of an implementation of type Installer, and attempts to download +// and install a Terraform provider matching the given constraints. +// +// This method may return one of a number of sentinel errors from this +// package to indicate issues that are likely to be resolvable via user action: +// +// ErrorNoSuchProvider: no provider with the given name exists in the repository. +// ErrorNoSuitableVersion: the provider exists but no available version matches constraints. +// ErrorNoVersionCompatible: a plugin was found within the constraints but it is +// incompatible with the current Terraform version. +// +// These errors should be recognized and handled as special cases by the caller +// to present a suitable user-oriented error message. +// +// All other errors indicate an internal problem that is likely _not_ solvable +// through user action, or at least not within Terraform's scope. Error messages +// are produced under the assumption that if presented to the user they will +// be presented alongside context about what is being installed, and thus the +// error messages do not redundantly include such information. +func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) { + var diags tfdiags.Diagnostics + + // a little bit of initialization. + if i.OS == "" { + i.OS = runtime.GOOS + } + if i.Arch == "" { + i.Arch = runtime.GOARCH + } + if i.registry == nil { + i.registry = registry.NewClient(i.Services, nil) + } + + // get a full listing of versions for the requested provider + allVersions, err := i.listProviderVersions(provider) + + // TODO: return multiple errors + if err != nil { + log.Printf("[DEBUG] %s", err) + if registry.IsServiceUnreachable(err) { + registryHost, err := i.hostname() + if err == nil && registryHost == regsrc.PublicRegistryHost.Raw { + return PluginMeta{}, diags, ErrorPublicRegistryUnreachable + } + return PluginMeta{}, diags, ErrorServiceUnreachable + } + if registry.IsServiceNotProvided(err) { + return PluginMeta{}, diags, err + } + return PluginMeta{}, diags, ErrorNoSuchProvider + } + + // Add any warnings from the response to diags + for _, warning := range allVersions.Warnings { + hostname, err := i.hostname() + if err != nil { + return PluginMeta{}, diags, err + } + diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning)) + diags = diags.Append(diag) + } + + if len(allVersions.Versions) == 0 { + return PluginMeta{}, diags, ErrorNoSuitableVersion + } + providerSource := allVersions.ID + + // Filter the list of plugin versions to those which meet the version constraints + versions := allowedVersions(allVersions, req) + if len(versions) == 0 { + return PluginMeta{}, diags, ErrorNoSuitableVersion + } + + // sort them newest to oldest. The newest version wins! + response.ProviderVersionCollection(versions).Sort() + + // if the chosen provider version does not support the requested platform, + // filter the list of acceptable versions to those that support that platform + if err := i.checkPlatformCompatibility(versions[0]); err != nil { + versions = i.platformCompatibleVersions(versions) + if len(versions) == 0 { + return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform + } + } + + // we now have a winning platform-compatible version + versionMeta := versions[0] + v := VersionStr(versionMeta.Version).MustParse() + + // check protocol compatibility + if err := i.checkPluginProtocol(versionMeta); err != nil { + closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions) + if err != nil { + // No operation here if we can't find a version with compatible protocol + return PluginMeta{}, diags, err + } + + // Prompt version suggestion to UI based on closest protocol match + var errMsg string + closestVersion := VersionStr(closestMatch.Version).MustParse() + if v.NewerThan(closestVersion) { + errMsg = providerProtocolTooNew + } else { + errMsg = providerProtocolTooOld + } + + constraintStr := req.String() + if constraintStr == "" { + constraintStr = "(any version)" + } + + return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf( + errMsg, provider, v.String(), tfversion.String(), + closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr))) + } + + downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version) + providerURL := downloadURLs.DownloadURL + + if !i.SkipVerify { + // Terraform verifies the integrity of a provider release before downloading + // the plugin binary. The digital signature (SHA256SUMS.sig) on the + // release distribution (SHA256SUMS) is verified with the public key of the + // publisher provided in the Terraform Registry response, ensuring that + // everything is as intended by the publisher. The checksum of the provider + // plugin is expected in the SHA256SUMS file and is double checked to match + // the checksum of the original published release to the Registry. This + // enforces immutability of releases between the Registry and the plugin's + // host location. Lastly, the integrity of the binary is verified upon + // download matches the Registry and signed checksum. + sha256, err := i.getProviderChecksum(downloadURLs) + if err != nil { + return PluginMeta{}, diags, err + } + + // add the checksum parameter for go-getter to verify the download for us. + if sha256 != "" { + providerURL = providerURL + "?checksum=sha256:" + sha256 + } + } + + printedProviderName := fmt.Sprintf("%q (%s)", provider, providerSource) + i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %s %s...", printedProviderName, versionMeta.Version)) + log.Printf("[DEBUG] getting provider %s version %q", printedProviderName, versionMeta.Version) + err = i.install(provider, v, providerURL) + if err != nil { + return PluginMeta{}, diags, err + } + + // Find what we just installed + // (This is weird, because go-getter doesn't directly return + // information about what was extracted, and we just extracted + // the archive directly into a shared dir here.) + log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, versionMeta.Version) + metas := FindPlugins("provider", []string{i.Dir}) + log.Printf("[DEBUG] all plugins found %#v", metas) + metas, _ = metas.ValidateVersions() + metas = metas.WithName(provider).WithVersion(v) + log.Printf("[DEBUG] filtered plugins %#v", metas) + if metas.Count() == 0 { + // This should never happen. Suggests that the release archive + // contains an executable file whose name doesn't match the + // expected convention. + return PluginMeta{}, diags, fmt.Errorf( + "failed to find installed plugin version %s; this is a bug in Terraform and should be reported", + versionMeta.Version, + ) + } + + if metas.Count() > 1 { + // This should also never happen, and suggests that a + // particular version was re-released with a different + // executable filename. We consider releases as immutable, so + // this is an error. + return PluginMeta{}, diags, fmt.Errorf( + "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", + versionMeta.Version, + ) + } + + // By now we know we have exactly one meta, and so "Newest" will + // return that one. + return metas.Newest(), diags, nil +} + +func (i *ProviderInstaller) install(provider string, version Version, url string) error { + if i.Cache != nil { + log.Printf("[DEBUG] looking for provider %s %s in plugin cache", provider, version) + cached := i.Cache.CachedPluginPath("provider", provider, version) + if cached == "" { + log.Printf("[DEBUG] %s %s not yet in cache, so downloading %s", provider, version, url) + err := getter.Get(i.Cache.InstallDir(), url) + if err != nil { + return err + } + // should now be in cache + cached = i.Cache.CachedPluginPath("provider", provider, version) + if cached == "" { + // should never happen if the getter is behaving properly + // and the plugins are packaged properly. + return fmt.Errorf("failed to find downloaded plugin in cache %s", i.Cache.InstallDir()) + } + } + + // Link or copy the cached binary into our install dir so the + // normal resolution machinery can find it. + filename := filepath.Base(cached) + targetPath := filepath.Join(i.Dir, filename) + // check if the target dir exists, and create it if not + var err error + if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) { + err = os.MkdirAll(i.Dir, 0700) + } + if err != nil { + return err + } + + log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached) + + // Delete if we can. If there's nothing there already then no harm done. + // This is important because we can't create a link if there's + // already a file of the same name present. + // (any other error here we'll catch below when we try to write here) + os.Remove(targetPath) + + // We don't attempt linking on Windows because links are not + // comprehensively supported by all tools/apps in Windows and + // so we choose to be conservative to avoid creating any + // weird issues for Windows users. + linkErr := errors.New("link not supported for Windows") // placeholder error, never actually returned + if runtime.GOOS != "windows" { + // Try hard linking first. Hard links are preferable because this + // creates a self-contained directory that doesn't depend on the + // cache after install. + linkErr = os.Link(cached, targetPath) + + // If that failed, try a symlink. This _does_ depend on the cache + // after install, so the user must manage the cache more carefully + // in this case, but avoids creating redundant copies of the + // plugins on disk. + if linkErr != nil { + linkErr = os.Symlink(cached, targetPath) + } + } + + // If we still have an error then we'll try a copy as a fallback. + // In this case either the OS is Windows or the target filesystem + // can't support symlinks. + if linkErr != nil { + srcFile, err := os.Open(cached) + if err != nil { + return fmt.Errorf("failed to open cached plugin %s: %s", cached, err) + } + defer srcFile.Close() + + destFile, err := os.OpenFile(targetPath, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create %s: %s", targetPath, err) + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + destFile.Close() + return fmt.Errorf("failed to copy cached plugin from %s to %s: %s", cached, targetPath, err) + } + + err = destFile.Close() + if err != nil { + return fmt.Errorf("error creating %s: %s", targetPath, err) + } + } + + // One way or another, by the time we get here we should have either + // a link or a copy of the cached plugin within i.Dir, as expected. + } else { + log.Printf("[DEBUG] plugin cache is disabled, so downloading %s %s from %s", provider, version, url) + err := getter.Get(i.Dir, url) + if err != nil { + return err + } + } + return nil +} + +func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaSet, error) { + purge := make(PluginMetaSet) + + present := FindPlugins("provider", []string{i.Dir}) + for meta := range present { + chosen, ok := used[meta.Name] + if !ok { + purge.Add(meta) + } + if chosen.Path != meta.Path { + purge.Add(meta) + } + } + + removed := make(PluginMetaSet) + var errs error + for meta := range purge { + path := meta.Path + err := os.Remove(path) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "failed to remove unused provider plugin %s: %s", + path, err, + )) + } else { + removed.Add(meta) + } + } + + return removed, errs +} + +func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) { + // Get SHA256SUMS file. + shasums, err := getFile(resp.ShasumsURL) + if err != nil { + log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err) + return "", ErrorMissingChecksumVerification + } + + // Get SHA256SUMS.sig file. + signature, err := getFile(resp.ShasumsSignatureURL) + if err != nil { + log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err) + return "", ErrorSignatureVerification + } + + // Verify the GPG signature returned from the Registry. + asciiArmor := resp.SigningKeys.GPGASCIIArmor() + signer, err := verifySig(shasums, signature, asciiArmor) + if err != nil { + log.Printf("[ERROR] error verifying signature: %s", err) + return "", ErrorSignatureVerification + } + + // Also verify the GPG signature against the HashiCorp public key. This is + // a temporary additional check until a more robust key verification + // process is added in a future release. + _, err = verifySig(shasums, signature, HashicorpPublicKey) + if err != nil { + log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err) + return "", ErrorSignatureVerification + } + + // Display identity for GPG key which succeeded verifying the signature. + // This could also be used to display to the user with i.Ui.Info(). + identities := []string{} + for k := range signer.Identities { + identities = append(identities, k) + } + identity := strings.Join(identities, ", ") + log.Printf("[DEBUG] verified GPG signature with key from %s", identity) + + // Extract checksum for this os/arch platform binary and verify against Registry + checksum := checksumForFile(shasums, resp.Filename) + if checksum == "" { + log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL) + return "", ErrorMissingChecksumVerification + } else if checksum != resp.Shasum { + log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL) + return "", ErrorChecksumVerification + } + + return checksum, nil +} + +func (i *ProviderInstaller) hostname() (string, error) { + provider := regsrc.NewTerraformProvider("", i.OS, i.Arch) + svchost, err := provider.SvcHost() + if err != nil { + return "", err + } + + return svchost.ForDisplay(), nil +} + +// list all versions available for the named provider +func (i *ProviderInstaller) listProviderVersions(name string) (*response.TerraformProviderVersions, error) { + provider := regsrc.NewTerraformProvider(name, i.OS, i.Arch) + versions, err := i.registry.TerraformProviderVersions(provider) + return versions, err +} + +func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) { + urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version) + if urls == nil { + return nil, fmt.Errorf("No download urls found for provider %s", name) + } + return urls, err +} + +// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match. +// Prerelease versions are filtered. +func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { + // Loop through all the provider versions to find the earliest and latest + // versions that match the installer protocol to then select the closest of the two + var latest, earliest *response.TerraformProviderVersion + for _, version := range versions { + // Prereleases are filtered and will not be suggested + v, err := VersionStr(version.Version).Parse() + if err != nil || v.IsPrerelease() { + continue + } + + if err := i.checkPluginProtocol(version); err == nil { + if earliest == nil { + // Found the first provider version with compatible protocol + earliest = version + } + // Update the latest protocol compatible version + latest = version + } + } + if earliest == nil { + // No compatible protocol was found for any version + return nil, ErrorNoVersionCompatible + } + + // Convert protocols to comparable types + protoString := strconv.Itoa(int(i.PluginProtocolVersion)) + protocolVersion, err := VersionStr(protoString).Parse() + if err != nil { + return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) + } + + earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse() + if err != nil { + return nil, err + } + + // Compare installer protocol version with the first protocol listed of the earliest match + // [A, B] where A is assumed the earliest compatible major version of the protocol pair + if protocolVersion.NewerThan(earliestVersionProtocol) { + // Provider protocols are too old, the closest version is the earliest compatible version + return earliest, nil + } + + // Provider protocols are too new, the closest version is the latest compatible version + return latest, nil +} + +func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error { + // TODO: should this be a different error? We should probably differentiate between + // no compatible versions and no protocol versions listed at all + if len(versionMeta.Protocols) == 0 { + return fmt.Errorf("no plugin protocol versions listed") + } + + protoString := strconv.Itoa(int(i.PluginProtocolVersion)) + protocolVersion, err := VersionStr(protoString).Parse() + if err != nil { + return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion) + } + protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse() + if err != nil { + // This should not fail if the preceding function succeeded. + return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String()) + } + + for _, p := range versionMeta.Protocols { + proPro, err := VersionStr(p).Parse() + if err != nil { + // invalid protocol reported by the registry. Move along. + log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version) + continue + } + // success! + if protocolConstraint.Allows(proPro) { + return nil + } + } + + return ErrorNoVersionCompatible +} + +// REVIEWER QUESTION (again): this ends up swallowing a bunch of errors from +// checkPluginProtocol. Do they need to be percolated up better, or would +// debug messages would suffice in these situations? +func (i *ProviderInstaller) findPlatformCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) { + for _, version := range versions { + if err := i.checkPlatformCompatibility(version); err == nil { + return version, nil + } + } + + return nil, ErrorNoVersionCompatibleWithPlatform +} + +// platformCompatibleVersions returns a list of provider versions that are +// compatible with the requested platform. +func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion { + var v []*response.TerraformProviderVersion + for _, version := range versions { + if err := i.checkPlatformCompatibility(version); err == nil { + v = append(v, version) + } + } + return v +} + +func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error { + if len(versionMeta.Platforms) == 0 { + return fmt.Errorf("no supported provider platforms listed") + } + for _, p := range versionMeta.Platforms { + if p.Arch == i.Arch && p.OS == i.OS { + return nil + } + } + return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch) +} + +// take the list of available versions for a plugin, and filter out those that +// don't fit the constraints. +func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion { + var allowed []*response.TerraformProviderVersion + + for _, v := range available.Versions { + version, err := VersionStr(v.Version).Parse() + if err != nil { + log.Printf("[WARN] invalid version found for %q: %s", available.ID, err) + continue + } + if required.Allows(version) { + allowed = append(allowed, v) + } + } + return allowed +} + +func checksumForFile(sums []byte, name string) string { + for _, line := range strings.Split(string(sums), "\n") { + parts := strings.Fields(line) + if len(parts) > 1 && parts[1] == name { + return parts[0] + } + } + return "" +} + +func getFile(url string) ([]byte, error) { + resp, err := httpClient.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s", resp.Status) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return data, err + } + return data, nil +} + +// providerProtocolTooOld is a message sent to the CLI UI if the provider's +// supported protocol versions are too old for the user's version of terraform, +// but an older version of the provider is compatible. +const providerProtocolTooOld = ` +[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] + +Provider version %s is the earliest compatible version. Select it with +the following version constraint: + + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on +compatibility between provider and Terraform versions. +` + +// providerProtocolTooNew is a message sent to the CLI UI if the provider's +// supported protocol versions are too new for the user's version of terraform, +// and the user could either upgrade terraform or choose an older version of the +// provider +const providerProtocolTooNew = ` +[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red] + +Provider version %s is the latest compatible version. Select it with +the following constraint: + + version = %q + +Terraform checked all of the plugin versions matching the given constraint: + %s + +Consult the documentation for this provider for more information on +compatibility between provider and Terraform versions. + +Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases. +` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go new file mode 100644 index 00000000..1a100426 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get_cache.go @@ -0,0 +1,48 @@ +package discovery + +// PluginCache is an interface implemented by objects that are able to maintain +// a cache of plugins. +type PluginCache interface { + // CachedPluginPath returns a path where the requested plugin is already + // cached, or an empty string if the requested plugin is not yet cached. + CachedPluginPath(kind string, name string, version Version) string + + // InstallDir returns the directory that new plugins should be installed into + // in order to populate the cache. This directory should be used as the + // first argument to getter.Get when downloading plugins with go-getter. + // + // After installing into this directory, use CachedPluginPath to obtain the + // path where the plugin was installed. + InstallDir() string +} + +// NewLocalPluginCache returns a PluginCache that caches plugins in a +// given local directory. +func NewLocalPluginCache(dir string) PluginCache { + return &pluginCache{ + Dir: dir, + } +} + +type pluginCache struct { + Dir string +} + +func (c *pluginCache) CachedPluginPath(kind string, name string, version Version) string { + allPlugins := FindPlugins(kind, []string{c.Dir}) + plugins := allPlugins.WithName(name).WithVersion(version) + + if plugins.Count() == 0 { + // nothing cached + return "" + } + + // There should generally be only one plugin here; if there's more than + // one match for some reason then we'll just choose one arbitrarily. + plugin := plugins.Newest() + return plugin.Path +} + +func (c *pluginCache) InstallDir() string { + return c.Dir +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go new file mode 100644 index 00000000..4622ca05 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go @@ -0,0 +1,34 @@ +package discovery + +// HashicorpPublicKey is the HashiCorp public key, also available at +// https://www.hashicorp.com/security +const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f +W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq +fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA +3drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca +KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k +SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1 +cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG +CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n +Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i +SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi +psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w +sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO +klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW +WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9 +wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j +2tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM +skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo +mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y +0H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA +CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc +z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP +0BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG +unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ +EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ +oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C +=LYpS +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go new file mode 100644 index 00000000..bdcebcb9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta.go @@ -0,0 +1,41 @@ +package discovery + +import ( + "crypto/sha256" + "io" + "os" +) + +// PluginMeta is metadata about a plugin, useful for launching the plugin +// and for understanding which plugins are available. +type PluginMeta struct { + // Name is the name of the plugin, e.g. as inferred from the plugin + // binary's filename, or by explicit configuration. + Name string + + // Version is the semver version of the plugin, expressed as a string + // that might not be semver-valid. + Version VersionStr + + // Path is the absolute path of the executable that can be launched + // to provide the RPC server for this plugin. + Path string +} + +// SHA256 returns a SHA256 hash of the content of the referenced executable +// file, or an error if the file's contents cannot be read. +func (m PluginMeta) SHA256() ([]byte, error) { + f, err := os.Open(m.Path) + if err != nil { + return nil, err + } + defer f.Close() + + h := sha256.New() + _, err = io.Copy(h, f) + if err != nil { + return nil, err + } + + return h.Sum(nil), nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go new file mode 100644 index 00000000..3a992892 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go @@ -0,0 +1,195 @@ +package discovery + +// A PluginMetaSet is a set of PluginMeta objects meeting a certain criteria. +// +// Methods on this type allow filtering of the set to produce subsets that +// meet more restrictive criteria. +type PluginMetaSet map[PluginMeta]struct{} + +// Add inserts the given PluginMeta into the receiving set. This is a no-op +// if the given meta is already present. +func (s PluginMetaSet) Add(p PluginMeta) { + s[p] = struct{}{} +} + +// Remove removes the given PluginMeta from the receiving set. This is a no-op +// if the given meta is not already present. +func (s PluginMetaSet) Remove(p PluginMeta) { + delete(s, p) +} + +// Has returns true if the given meta is in the receiving set, or false +// otherwise. +func (s PluginMetaSet) Has(p PluginMeta) bool { + _, ok := s[p] + return ok +} + +// Count returns the number of metas in the set +func (s PluginMetaSet) Count() int { + return len(s) +} + +// ValidateVersions returns two new PluginMetaSets, separating those with +// versions that have syntax-valid semver versions from those that don't. +// +// Eliminating invalid versions from consideration (and possibly warning about +// them) is usually the first step of working with a meta set after discovery +// has completed. +func (s PluginMetaSet) ValidateVersions() (valid, invalid PluginMetaSet) { + valid = make(PluginMetaSet) + invalid = make(PluginMetaSet) + for p := range s { + if _, err := p.Version.Parse(); err == nil { + valid.Add(p) + } else { + invalid.Add(p) + } + } + return +} + +// WithName returns the subset of metas that have the given name. +func (s PluginMetaSet) WithName(name string) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + if p.Name == name { + ns.Add(p) + } + } + return ns +} + +// WithVersion returns the subset of metas that have the given version. +// +// This should be used only with the "valid" result from ValidateVersions; +// it will ignore any plugin metas that have invalid version strings. +func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { + ns := make(PluginMetaSet) + for p := range s { + gotVersion, err := p.Version.Parse() + if err != nil { + continue + } + if gotVersion.Equal(version) { + ns.Add(p) + } + } + return ns +} + +// ByName groups the metas in the set by their Names, returning a map. +func (s PluginMetaSet) ByName() map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + ret[p.Name].Add(p) + } + return ret +} + +// Newest returns the one item from the set that has the newest Version value. +// +// The result is meaningful only if the set is already filtered such that +// all of the metas have the same Name. +// +// If there isn't at least one meta in the set then this function will panic. +// Use Count() to ensure that there is at least one value before calling. +// +// If any of the metas have invalid version strings then this function will +// panic. Use ValidateVersions() first to filter out metas with invalid +// versions. +// +// If two metas have the same Version then one is arbitrarily chosen. This +// situation should be avoided by pre-filtering the set. +func (s PluginMetaSet) Newest() PluginMeta { + if len(s) == 0 { + panic("can't call NewestStable on empty PluginMetaSet") + } + + var first = true + var winner PluginMeta + var winnerVersion Version + for p := range s { + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + + if first == true || version.NewerThan(winnerVersion) { + winner = p + winnerVersion = version + first = false + } + } + + return winner +} + +// ConstrainVersions takes a set of requirements and attempts to +// return a map from name to a set of metas that have the matching +// name and an appropriate version. +// +// If any of the given requirements match *no* plugins then its PluginMetaSet +// in the returned map will be empty. +// +// All viable metas are returned, so the caller can apply any desired filtering +// to reduce down to a single option. For example, calling Newest() to obtain +// the highest available version. +// +// If any of the metas in the set have invalid version strings then this +// function will panic. Use ValidateVersions() first to filter out metas with +// invalid versions. +func (s PluginMetaSet) ConstrainVersions(reqd PluginRequirements) map[string]PluginMetaSet { + ret := make(map[string]PluginMetaSet) + for p := range s { + name := p.Name + allowedVersions, ok := reqd[name] + if !ok { + continue + } + if _, ok := ret[p.Name]; !ok { + ret[p.Name] = make(PluginMetaSet) + } + version, err := p.Version.Parse() + if err != nil { + panic(err) + } + if allowedVersions.Allows(version) { + ret[p.Name].Add(p) + } + } + return ret +} + +// OverridePaths returns a new set where any existing plugins with the given +// names are removed and replaced with the single path given in the map. +// +// This is here only to continue to support the legacy way of overriding +// plugin binaries in the .terraformrc file. It treats all given plugins +// as pre-versioning (version 0.0.0). This mechanism will eventually be +// phased out, with vendor directories being the intended replacement. +func (s PluginMetaSet) OverridePaths(paths map[string]string) PluginMetaSet { + ret := make(PluginMetaSet) + for p := range s { + if _, ok := paths[p.Name]; ok { + // Skip plugins that we're overridding + continue + } + + ret.Add(p) + } + + // Now add the metadata for overriding plugins + for name, path := range paths { + ret.Add(PluginMeta{ + Name: name, + Version: VersionZero, + Path: path, + }) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go new file mode 100644 index 00000000..0466ab25 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go @@ -0,0 +1,111 @@ +package discovery + +import ( + "bytes" +) + +// PluginInstallProtocolVersion is the protocol version TF-core +// supports to communicate with servers, and is used to resolve +// plugin discovery with terraform registry, in addition to +// any specified plugin version constraints +const PluginInstallProtocolVersion = 5 + +// PluginRequirements describes a set of plugins (assumed to be of a consistent +// kind) that are required to exist and have versions within the given +// corresponding sets. +type PluginRequirements map[string]*PluginConstraints + +// PluginConstraints represents an element of PluginRequirements describing +// the constraints for a single plugin. +type PluginConstraints struct { + // Specifies that the plugin's version must be within the given + // constraints. + Versions Constraints + + // If non-nil, the hash of the on-disk plugin executable must exactly + // match the SHA256 hash given here. + SHA256 []byte +} + +// Allows returns true if the given version is within the receiver's version +// constraints. +func (s *PluginConstraints) Allows(v Version) bool { + return s.Versions.Allows(v) +} + +// AcceptsSHA256 returns true if the given executable SHA256 hash is acceptable, +// either because it matches the constraint or because there is no such +// constraint. +func (s *PluginConstraints) AcceptsSHA256(digest []byte) bool { + if s.SHA256 == nil { + return true + } + return bytes.Equal(s.SHA256, digest) +} + +// Merge takes the contents of the receiver and the other given requirements +// object and merges them together into a single requirements structure +// that satisfies both sets of requirements. +// +// Note that it doesn't make sense to merge two PluginRequirements with +// differing required plugin SHA256 hashes, since the result will never +// match any plugin. +func (r PluginRequirements) Merge(other PluginRequirements) PluginRequirements { + ret := make(PluginRequirements) + for n, c := range r { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + for n, c := range other { + if existing, exists := ret[n]; exists { + ret[n].Versions = ret[n].Versions.Append(c.Versions) + + if existing.SHA256 != nil { + if c.SHA256 != nil && !bytes.Equal(c.SHA256, existing.SHA256) { + // If we've been asked to merge two constraints with + // different SHA256 hashes then we'll produce a dummy value + // that can never match anything. This is a silly edge case + // that no reasonable caller should hit. + ret[n].SHA256 = []byte(invalidProviderHash) + } + } else { + ret[n].SHA256 = c.SHA256 // might still be nil + } + } else { + ret[n] = &PluginConstraints{ + Versions: Constraints{}.Append(c.Versions), + SHA256: c.SHA256, + } + } + } + return ret +} + +// LockExecutables applies additional constraints to the receiver that +// require plugin executables with specific SHA256 digests. This modifies +// the receiver in-place, since it's intended to be applied after +// version constraints have been resolved. +// +// The given map must include a key for every plugin that is already +// required. If not, any missing keys will cause the corresponding plugin +// to never match, though the direct caller doesn't necessarily need to +// guarantee this as long as the downstream code _applying_ these constraints +// is able to deal with the non-match in some way. +func (r PluginRequirements) LockExecutables(sha256s map[string][]byte) { + for name, cons := range r { + digest := sha256s[name] + + if digest == nil { + // Prevent any match, which will then presumably cause the + // downstream consumer of this requirements to report an error. + cons.SHA256 = []byte(invalidProviderHash) + continue + } + + cons.SHA256 = digest + } +} + +const invalidProviderHash = "" diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go new file mode 100644 index 00000000..7bbae50c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go @@ -0,0 +1,19 @@ +package discovery + +import ( + "bytes" + "strings" + + "golang.org/x/crypto/openpgp" +) + +// Verify the data using the provided openpgp detached signature and the +// embedded hashicorp public key. +func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) { + el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor)) + if err != nil { + return nil, err + } + + return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go new file mode 100644 index 00000000..4311d510 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go @@ -0,0 +1,77 @@ +package discovery + +import ( + "fmt" + "sort" + + version "github.com/hashicorp/go-version" +) + +const VersionZero = "0.0.0" + +// A VersionStr is a string containing a possibly-invalid representation +// of a semver version number. Call Parse on it to obtain a real Version +// object, or discover that it is invalid. +type VersionStr string + +// Parse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s VersionStr) Parse() (Version, error) { + raw, err := version.NewVersion(string(s)) + if err != nil { + return Version{}, err + } + return Version{raw}, nil +} + +// MustParse transforms a VersionStr into a Version if it is +// syntactically valid. If it isn't then it panics. +func (s VersionStr) MustParse() Version { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Version represents a version number that has been parsed from +// a semver string and known to be valid. +type Version struct { + // We wrap this here just because it avoids a proliferation of + // direct go-version imports all over the place, and keeps the + // version-processing details within this package. + raw *version.Version +} + +func (v Version) String() string { + return v.raw.String() +} + +func (v Version) NewerThan(other Version) bool { + return v.raw.GreaterThan(other.raw) +} + +func (v Version) Equal(other Version) bool { + return v.raw.Equal(other.raw) +} + +// IsPrerelease determines if version is a prerelease +func (v Version) IsPrerelease() bool { + return v.raw.Prerelease() != "" +} + +// MinorUpgradeConstraintStr returns a ConstraintStr that would permit +// minor upgrades relative to the receiving version. +func (v Version) MinorUpgradeConstraintStr() ConstraintStr { + segments := v.raw.Segments() + return ConstraintStr(fmt.Sprintf("~> %d.%d", segments[0], segments[1])) +} + +type Versions []Version + +// Sort sorts version from newest to oldest. +func (v Versions) Sort() { + sort.Slice(v, func(i, j int) bool { + return v[i].NewerThan(v[j]) + }) +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go new file mode 100644 index 00000000..de02f5ec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go @@ -0,0 +1,89 @@ +package discovery + +import ( + "sort" + + version "github.com/hashicorp/go-version" +) + +// A ConstraintStr is a string containing a possibly-invalid representation +// of a version constraint provided in configuration. Call Parse on it to +// obtain a real Constraint object, or discover that it is invalid. +type ConstraintStr string + +// Parse transforms a ConstraintStr into a Constraints if it is +// syntactically valid. If it isn't then an error is returned instead. +func (s ConstraintStr) Parse() (Constraints, error) { + raw, err := version.NewConstraint(string(s)) + if err != nil { + return Constraints{}, err + } + return Constraints{raw}, nil +} + +// MustParse is like Parse but it panics if the constraint string is invalid. +func (s ConstraintStr) MustParse() Constraints { + ret, err := s.Parse() + if err != nil { + panic(err) + } + return ret +} + +// Constraints represents a set of versions which any given Version is either +// a member of or not. +type Constraints struct { + raw version.Constraints +} + +// NewConstraints creates a Constraints based on a version.Constraints. +func NewConstraints(c version.Constraints) Constraints { + return Constraints{c} +} + +// AllVersions is a Constraints containing all versions +var AllVersions Constraints + +func init() { + AllVersions = Constraints{ + raw: make(version.Constraints, 0), + } +} + +// Allows returns true if the given version permitted by the receiving +// constraints set. +func (s Constraints) Allows(v Version) bool { + return s.raw.Check(v.raw) +} + +// Append combines the receiving set with the given other set to produce +// a set that is the intersection of both sets, which is to say that resulting +// constraints contain only the versions that are members of both. +func (s Constraints) Append(other Constraints) Constraints { + raw := make(version.Constraints, 0, len(s.raw)+len(other.raw)) + + // Since "raw" is a list of constraints that remove versions from the set, + // "Intersection" is implemented by concatenating together those lists, + // thus leaving behind only the versions not removed by either list. + raw = append(raw, s.raw...) + raw = append(raw, other.raw...) + + // while the set is unordered, we sort these lexically for consistent output + sort.Slice(raw, func(i, j int) bool { + return raw[i].String() < raw[j].String() + }) + + return Constraints{raw} +} + +// String returns a string representation of the set members as a set +// of range constraints. +func (s Constraints) String() string { + return s.raw.String() +} + +// Unconstrained returns true if and only if the receiver is an empty +// constraint set. +func (s Constraints) Unconstrained() bool { + return len(s.raw) == 0 +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go new file mode 100644 index 00000000..5b190e2c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go @@ -0,0 +1,564 @@ +package plugin + +import ( + "context" + "errors" + "log" + "sync" + + "github.com/zclconf/go-cty/cty" + + plugin "github.com/hashicorp/go-plugin" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/version" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package. +type GRPCProviderPlugin struct { + plugin.Plugin + GRPCProvider func() proto.ProviderServer +} + +func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvider{ + client: proto.NewProviderClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProviderServer(s, p.GRPCProvider()) + return nil +} + +// GRPCProvider handles the client, or core side of the plugin rpc connection. +// The GRPCProvider methods are mostly a translation layer between the +// terraform provioders types and the grpc proto types, directly converting +// between the two. +type GRPCProvider struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + // TestServer contains a grpc.Server to close when the GRPCProvider is being + // used in an end to end test of a provider. + TestServer *grpc.Server + + // Proto client use to make the grpc service calls. + client proto.ProviderClient + + // this context is created by the plugin package, and is canceled when the + // plugin process ends. + ctx context.Context + + // schema stores the schema for this provider. This is used to properly + // serialize the state for requests. + mu sync.Mutex + schemas providers.GetSchemaResponse +} + +// getSchema is used internally to get the saved provider schema. The schema +// should have already been fetched from the provider, but we have to +// synchronize access to avoid being called concurrently with GetSchema. +func (p *GRPCProvider) getSchema() providers.GetSchemaResponse { + p.mu.Lock() + // unlock inline in case GetSchema needs to be called + if p.schemas.Provider.Block != nil { + p.mu.Unlock() + return p.schemas + } + p.mu.Unlock() + + // the schema should have been fetched already, but give it another shot + // just in case things are being called out of order. This may happen for + // tests. + schemas := p.GetSchema() + if schemas.Diagnostics.HasErrors() { + panic(schemas.Diagnostics.Err()) + } + + return schemas +} + +// getResourceSchema is a helper to extract the schema for a resource, and +// panics if the schema is not available. +func (p *GRPCProvider) getResourceSchema(name string) providers.Schema { + schema := p.getSchema() + resSchema, ok := schema.ResourceTypes[name] + if !ok { + panic("unknown resource type " + name) + } + return resSchema +} + +// gettDatasourceSchema is a helper to extract the schema for a datasource, and +// panics if that schema is not available. +func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema { + schema := p.getSchema() + dataSchema, ok := schema.DataSources[name] + if !ok { + panic("unknown data source " + name) + } + return dataSchema +} + +func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) { + log.Printf("[TRACE] GRPCProvider: GetSchema") + p.mu.Lock() + defer p.mu.Unlock() + + if p.schemas.Provider.Block != nil { + return p.schemas + } + + resp.ResourceTypes = make(map[string]providers.Schema) + resp.DataSources = make(map[string]providers.Schema) + + // Some providers may generate quite large schemas, and the internal default + // grpc response size limit is 4MB. 64MB should cover most any use case, and + // if we get providers nearing that we may want to consider a finer-grained + // API to fetch individual resource schemas. + // Note: this option is marked as EXPERIMENTAL in the grpc API. + const maxRecvSize = 64 << 20 + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize}) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provider == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema")) + return resp + } + + resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider) + + for name, res := range protoResp.ResourceSchemas { + resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res) + } + + for name, data := range protoResp.DataSourceSchemas { + resp.DataSources[name] = convert.ProtoToProviderSchema(data) + } + + p.schemas = resp + + return resp +} + +func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) { + log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig") + + schema := p.getSchema() + ty := schema.Provider.Block.ImpliedType() + + mp, err := msgpack.Marshal(r.Config, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PrepareProviderConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + config := cty.NullVal(ty) + if protoResp.PreparedConfig != nil { + config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.PreparedConfig = config + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) { + log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig") + resourceSchema := p.getResourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateResourceTypeConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) { + log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig") + + dataSchema := p.getDatasourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateDataSourceConfig_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{Msgpack: mp}, + } + + protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) { + log.Printf("[TRACE] GRPCProvider: UpgradeResourceState") + + resSchema := p.getResourceSchema(r.TypeName) + + protoReq := &proto.UpgradeResourceState_Request{ + TypeName: r.TypeName, + Version: int64(r.Version), + RawState: &proto.RawState{ + Json: r.RawStateJSON, + Flatmap: r.RawStateFlatmap, + }, + } + + protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.UpgradedState != nil { + state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + + resp.UpgradedState = state + return resp +} + +func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) { + log.Printf("[TRACE] GRPCProvider: Configure") + + schema := p.getSchema() + + var mp []byte + + // we don't have anything to marshal if there's no config + mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.Configure_Request{ + TerraformVersion: version.Version, + Config: &proto.DynamicValue{ + Msgpack: mp, + }, + } + + protoResp, err := p.client.Configure(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvider) Stop() error { + log.Printf("[TRACE] GRPCProvider: Stop") + + resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request)) + if err != nil { + return err + } + + if resp.Error != "" { + return errors.New(resp.Error) + } + return nil +} + +func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) { + log.Printf("[TRACE] GRPCProvider: ReadResource") + + resSchema := p.getResourceSchema(r.TypeName) + + mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadResource_Request{ + TypeName: r.TypeName, + CurrentState: &proto.DynamicValue{Msgpack: mp}, + Private: r.Private, + } + + protoResp, err := p.client.ReadResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.NewState != nil { + state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.NewState = state + resp.Private = protoResp.Private + + return resp +} + +func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) { + log.Printf("[TRACE] GRPCProvider: PlanResourceChange") + + resSchema := p.getResourceSchema(r.TypeName) + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.PlanResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + ProposedNewState: &proto.DynamicValue{Msgpack: propMP}, + PriorPrivate: r.PriorPrivate, + } + + protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.PlannedState != nil { + state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.PlannedState = state + + for _, p := range protoResp.RequiresReplace { + resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p)) + } + + resp.PlannedPrivate = protoResp.PlannedPrivate + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) { + log.Printf("[TRACE] GRPCProvider: ApplyResourceChange") + + resSchema := p.getResourceSchema(r.TypeName) + + priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ApplyResourceChange_Request{ + TypeName: r.TypeName, + PriorState: &proto.DynamicValue{Msgpack: priorMP}, + PlannedState: &proto.DynamicValue{Msgpack: plannedMP}, + Config: &proto.DynamicValue{Msgpack: configMP}, + PlannedPrivate: r.PlannedPrivate, + } + + protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + resp.Private = protoResp.Private + + state := cty.NullVal(resSchema.Block.ImpliedType()) + if protoResp.NewState != nil { + state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.NewState = state + + resp.LegacyTypeSystem = protoResp.LegacyTypeSystem + + return resp +} + +func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) { + log.Printf("[TRACE] GRPCProvider: ImportResourceState") + + protoReq := &proto.ImportResourceState_Request{ + TypeName: r.TypeName, + Id: r.ID, + } + + protoResp, err := p.client.ImportResourceState(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + for _, imported := range protoResp.ImportedResources { + resource := providers.ImportedResource{ + TypeName: imported.TypeName, + Private: imported.Private, + } + + resSchema := p.getResourceSchema(resource.TypeName) + state := cty.NullVal(resSchema.Block.ImpliedType()) + if imported.State != nil { + state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resource.State = state + resp.ImportedResources = append(resp.ImportedResources, resource) + } + + return resp +} + +func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) { + log.Printf("[TRACE] GRPCProvider: ReadDataSource") + + dataSchema := p.getDatasourceSchema(r.TypeName) + + config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ReadDataSource_Request{ + TypeName: r.TypeName, + Config: &proto.DynamicValue{ + Msgpack: config, + }, + } + + protoResp, err := p.client.ReadDataSource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + state := cty.NullVal(dataSchema.Block.ImpliedType()) + if protoResp.State != nil { + state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + } + resp.State = state + + return resp +} + +// closing the grpc connection is final, and terraform will call it at the end of every phase. +func (p *GRPCProvider) Close() error { + log.Printf("[TRACE] GRPCProvider: Close") + + // Make sure to stop the server if we're not running within go-plugin. + if p.TestServer != nil { + p.TestServer.Stop() + } + + // Check this since it's not automatically inserted during plugin creation. + // It's currently only inserted by the command package, because that is + // where the factory is built and is the only point with access to the + // plugin.Client. + if p.PluginClient == nil { + log.Println("[DEBUG] provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go new file mode 100644 index 00000000..136c88d6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go @@ -0,0 +1,178 @@ +package plugin + +import ( + "context" + "errors" + "io" + "log" + "sync" + + plugin "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/configs/configschema" + proto "github.com/hashicorp/terraform/internal/tfplugin5" + "github.com/hashicorp/terraform/plugin/convert" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/msgpack" + "google.golang.org/grpc" +) + +// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation. +type GRPCProvisionerPlugin struct { + plugin.Plugin + GRPCProvisioner func() proto.ProvisionerServer +} + +func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return &GRPCProvisioner{ + client: proto.NewProvisionerClient(c), + ctx: ctx, + }, nil +} + +func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterProvisionerServer(s, p.GRPCProvisioner()) + return nil +} + +// provisioners.Interface grpc implementation +type GRPCProvisioner struct { + // PluginClient provides a reference to the plugin.Client which controls the plugin process. + // This allows the GRPCProvider a way to shutdown the plugin process. + PluginClient *plugin.Client + + client proto.ProvisionerClient + ctx context.Context + + // Cache the schema since we need it for serialization in each method call. + mu sync.Mutex + schema *configschema.Block +} + +func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) { + p.mu.Lock() + defer p.mu.Unlock() + + if p.schema != nil { + return provisioners.GetSchemaResponse{ + Provisioner: p.schema, + } + } + + protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + + if protoResp.Provisioner == nil { + resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema")) + return resp + } + + resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block) + + p.schema = resp.Provisioner + + return resp +} + +func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ValidateProvisionerConfig_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + } + protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics)) + return resp +} + +func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) { + schema := p.GetSchema() + if schema.Diagnostics.HasErrors() { + resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics) + return resp + } + + mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + // connection is always assumed to be a simple string map + connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String)) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + protoReq := &proto.ProvisionResource_Request{ + Config: &proto.DynamicValue{Msgpack: mp}, + Connection: &proto.DynamicValue{Msgpack: connMP}, + } + + outputClient, err := p.client.ProvisionResource(p.ctx, protoReq) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + + for { + rcv, err := outputClient.Recv() + if rcv != nil { + r.UIOutput.Output(rcv.Output) + } + if err != nil { + if err != io.EOF { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + break + } + + if len(rcv.Diagnostics) > 0 { + resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics)) + break + } + } + + return resp +} + +func (p *GRPCProvisioner) Stop() error { + protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{}) + if err != nil { + return err + } + if protoResp.Error != "" { + return errors.New(protoResp.Error) + } + return nil +} + +func (p *GRPCProvisioner) Close() error { + // check this since it's not automatically inserted during plugin creation + if p.PluginClient == nil { + log.Println("[DEBUG] provider has no plugin.Client") + return nil + } + + p.PluginClient.Kill() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go index 00fa7b29..e4fb5776 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/plugin.go +++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go @@ -6,8 +6,9 @@ import ( // See serve.go for serving plugins -// PluginMap should be used by clients for the map of plugins. -var PluginMap = map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{}, - "provisioner": &ResourceProvisionerPlugin{}, +var VersionedPlugins = map[int]plugin.PluginSet{ + 5: { + "provider": &GRPCProviderPlugin{}, + "provisioner": &GRPCProvisionerPlugin{}, + }, } diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go index 473f7860..459661a5 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go @@ -9,11 +9,14 @@ import ( // ResourceProviderPlugin is the plugin.Plugin implementation. type ResourceProviderPlugin struct { - F func() terraform.ResourceProvider + ResourceProvider func() terraform.ResourceProvider } func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil + return &ResourceProviderServer{ + Broker: b, + Provider: p.ResourceProvider(), + }, nil } func (p *ResourceProviderPlugin) Client( @@ -41,6 +44,24 @@ func (p *ResourceProvider) Stop() error { return err } +func (p *ResourceProvider) GetSchema(req *terraform.ProviderSchemaRequest) (*terraform.ProviderSchema, error) { + var result ResourceProviderGetSchemaResponse + args := &ResourceProviderGetSchemaArgs{ + Req: req, + } + + err := p.Client.Call("Plugin.GetSchema", args, &result) + if err != nil { + return nil, err + } + + if result.Error != nil { + err = result.Error + } + + return result.Schema, err +} + func (p *ResourceProvider) Input( input terraform.UIInput, c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { @@ -312,6 +333,15 @@ type ResourceProviderStopResponse struct { Error *plugin.BasicError } +type ResourceProviderGetSchemaArgs struct { + Req *terraform.ProviderSchemaRequest +} + +type ResourceProviderGetSchemaResponse struct { + Schema *terraform.ProviderSchema + Error *plugin.BasicError +} + type ResourceProviderConfigureResponse struct { Error *plugin.BasicError } @@ -418,6 +448,18 @@ func (s *ResourceProviderServer) Stop( return nil } +func (s *ResourceProviderServer) GetSchema( + args *ResourceProviderGetSchemaArgs, + result *ResourceProviderGetSchemaResponse, +) error { + schema, err := s.Provider.GetSchema(args.Req) + result.Schema = schema + if err != nil { + result.Error = plugin.NewBasicError(err) + } + return nil +} + func (s *ResourceProviderServer) Input( args *ResourceProviderInputArgs, reply *ResourceProviderInputResponse) error { diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go index 8fce9d8a..f0cc341f 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go @@ -4,16 +4,20 @@ import ( "net/rpc" "github.com/hashicorp/go-plugin" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/terraform" ) // ResourceProvisionerPlugin is the plugin.Plugin implementation. type ResourceProvisionerPlugin struct { - F func() terraform.ResourceProvisioner + ResourceProvisioner func() terraform.ResourceProvisioner } func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { - return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil + return &ResourceProvisionerServer{ + Broker: b, + Provisioner: p.ResourceProvisioner(), + }, nil } func (p *ResourceProvisionerPlugin) Client( @@ -28,6 +32,11 @@ type ResourceProvisioner struct { Client *rpc.Client } +func (p *ResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { + panic("not implemented") + return nil, nil +} + func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { var resp ResourceProvisionerValidateResponse args := ResourceProvisionerValidateArgs{ diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go index 2028a613..8d056c59 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/serve.go +++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go @@ -2,14 +2,23 @@ package plugin import ( "github.com/hashicorp/go-plugin" + grpcplugin "github.com/hashicorp/terraform/helper/plugin" + proto "github.com/hashicorp/terraform/internal/tfplugin5" "github.com/hashicorp/terraform/terraform" ) -// The constants below are the names of the plugins that can be dispensed -// from the plugin server. const ( + // The constants below are the names of the plugins that can be dispensed + // from the plugin server. ProviderPluginName = "provider" ProvisionerPluginName = "provisioner" + + // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify + // a particular version during their handshake. This is the version used when Terraform 0.10 + // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must + // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and + // 0.11. + DefaultProtocolVersion = 4 ) // Handshake is the HandshakeConfig used to configure clients and servers. @@ -19,7 +28,7 @@ var Handshake = plugin.HandshakeConfig{ // one or the other that makes it so that they can't safely communicate. // This could be adding a new interface value, it could be how // helper/schema computes diffs, etc. - ProtocolVersion: 4, + ProtocolVersion: DefaultProtocolVersion, // The magic cookie values should NEVER be changed. MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", @@ -28,27 +37,85 @@ var Handshake = plugin.HandshakeConfig{ type ProviderFunc func() terraform.ResourceProvider type ProvisionerFunc func() terraform.ResourceProvisioner +type GRPCProviderFunc func() proto.ProviderServer +type GRPCProvisionerFunc func() proto.ProvisionerServer // ServeOpts are the configurations to serve a plugin. type ServeOpts struct { ProviderFunc ProviderFunc ProvisionerFunc ProvisionerFunc + + // Wrapped versions of the above plugins will automatically shimmed and + // added to the GRPC functions when possible. + GRPCProviderFunc GRPCProviderFunc + GRPCProvisionerFunc GRPCProvisionerFunc } // Serve serves a plugin. This function never returns and should be the final // function called in the main function of the plugin. func Serve(opts *ServeOpts) { + // since the plugins may not yet be aware of the new protocol, we + // automatically wrap the plugins in the grpc shims. + if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil { + provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc()) + // this is almost always going to be a *schema.Provider, but check that + // we got back a valid provider just in case. + if provider != nil { + opts.GRPCProviderFunc = func() proto.ProviderServer { + return provider + } + } + } + if opts.GRPCProvisionerFunc == nil && opts.ProvisionerFunc != nil { + provisioner := grpcplugin.NewGRPCProvisionerServerShim(opts.ProvisionerFunc()) + if provisioner != nil { + opts.GRPCProvisionerFunc = func() proto.ProvisionerServer { + return provisioner + } + } + } + plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - Plugins: pluginMap(opts), + HandshakeConfig: Handshake, + VersionedPlugins: pluginSet(opts), + GRPCServer: plugin.DefaultGRPCServer, }) } -// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin -// server or client. -func pluginMap(opts *ServeOpts) map[string]plugin.Plugin { +// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring +// a plugin server or client. +func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin { return map[string]plugin.Plugin{ - "provider": &ResourceProviderPlugin{F: opts.ProviderFunc}, - "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc}, + "provider": &ResourceProviderPlugin{ + ResourceProvider: opts.ProviderFunc, + }, + "provisioner": &ResourceProvisionerPlugin{ + ResourceProvisioner: opts.ProvisionerFunc, + }, + } +} + +func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet { + // Set the legacy netrpc plugins at version 4. + // The oldest version is returned in when executed by a legacy go-plugin + // client. + plugins := map[int]plugin.PluginSet{ + 4: legacyPluginMap(opts), + } + + // add the new protocol versions if they're configured + if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil { + plugins[5] = plugin.PluginSet{} + if opts.GRPCProviderFunc != nil { + plugins[5]["provider"] = &GRPCProviderPlugin{ + GRPCProvider: opts.GRPCProviderFunc, + } + } + if opts.GRPCProvisionerFunc != nil { + plugins[5]["provisioner"] = &GRPCProvisionerPlugin{ + GRPCProvisioner: opts.GRPCProvisionerFunc, + } + } } + return plugins } diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go index 493efc0a..3469e6a9 100644 --- a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go +++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go @@ -1,19 +1,20 @@ package plugin import ( + "context" "net/rpc" "github.com/hashicorp/go-plugin" "github.com/hashicorp/terraform/terraform" ) -// UIInput is an implementatin of terraform.UIInput that communicates +// UIInput is an implementation of terraform.UIInput that communicates // over RPC. type UIInput struct { Client *rpc.Client } -func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { +func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) { var resp UIInputInputResponse err := i.Client.Call("Plugin.Input", opts, &resp) if err != nil { @@ -41,7 +42,7 @@ type UIInputServer struct { func (s *UIInputServer) Input( opts *terraform.InputOpts, reply *UIInputInputResponse) error { - value, err := s.UIInput.Input(opts) + value, err := s.UIInput.Input(context.Background(), opts) *reply = UIInputInputResponse{ Value: value, Error: plugin.NewBasicError(err), diff --git a/vendor/github.com/hashicorp/terraform/plugins.go b/vendor/github.com/hashicorp/terraform/plugins.go new file mode 100644 index 00000000..cf2d5425 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/plugins.go @@ -0,0 +1,29 @@ +package main + +import ( + "fmt" + "log" + "path/filepath" + "runtime" +) + +// globalPluginDirs returns directories that should be searched for +// globally-installed plugins (not specific to the current configuration). +// +// Earlier entries in this slice get priority over later when multiple copies +// of the same plugin version are found, but newer versions always override +// older versions where both satisfy the provider version constraints. +func globalPluginDirs() []string { + var ret []string + // Look in ~/.terraform.d/plugins/ , or its equivalent on non-UNIX + dir, err := ConfigDir() + if err != nil { + log.Printf("[ERROR] Error finding global config directory: %s", err) + } else { + machineDir := fmt.Sprintf("%s_%s", runtime.GOOS, runtime.GOARCH) + ret = append(ret, filepath.Join(dir, "plugins")) + ret = append(ret, filepath.Join(dir, "plugins", machineDir)) + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go new file mode 100644 index 00000000..7ed523f1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go @@ -0,0 +1,47 @@ +package providers + +import ( + "sort" + + "github.com/hashicorp/terraform/addrs" +) + +// AddressedTypes is a helper that extracts all of the distinct provider +// types from the given list of relative provider configuration addresses. +func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]struct{}{} + for _, addr := range providerAddrs { + m[addr.Type] = struct{}{} + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + return names +} + +// AddressedTypesAbs is a helper that extracts all of the distinct provider +// types from the given list of absolute provider configuration addresses. +func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string { + if len(providerAddrs) == 0 { + return nil + } + m := map[string]struct{}{} + for _, addr := range providerAddrs { + m[addr.ProviderConfig.Type] = struct{}{} + } + + names := make([]string, 0, len(m)) + for typeName := range m { + names = append(names, typeName) + } + + sort.Strings(names) // Stable result for tests + return names +} diff --git a/vendor/github.com/hashicorp/terraform/providers/doc.go b/vendor/github.com/hashicorp/terraform/providers/doc.go new file mode 100644 index 00000000..39aa1de6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/providers/doc.go @@ -0,0 +1,3 @@ +// Package providers contains the interface and primary types required to +// implement a Terraform resource provider. +package providers diff --git a/vendor/github.com/hashicorp/terraform/providers/provider.go b/vendor/github.com/hashicorp/terraform/providers/provider.go new file mode 100644 index 00000000..7e0a74c5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/providers/provider.go @@ -0,0 +1,359 @@ +package providers + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Interface represents the set of methods required for a complete resource +// provider plugin. +type Interface interface { + // GetSchema returns the complete schema for the provider. + GetSchema() GetSchemaResponse + + // PrepareProviderConfig allows the provider to validate the configuration + // values, and set or override any values with defaults. + PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse + + // ValidateResourceTypeConfig allows the provider to validate the resource + // configuration values. + ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse + + // ValidateDataSource allows the provider to validate the data source + // configuration values. + ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse + + // UpgradeResourceState is called when the state loader encounters an + // instance state whose schema version is less than the one reported by the + // currently-used version of the corresponding provider, and the upgraded + // result is used for any further processing. + UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse + + // Configure configures and initialized the provider. + Configure(ConfigureRequest) ConfigureResponse + + // Stop is called when the provider should halt any in-flight actions. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provider after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // ReadResource refreshes a resource and returns its current state. + ReadResource(ReadResourceRequest) ReadResourceResponse + + // PlanResourceChange takes the current state and proposed state of a + // resource, and returns the planned final state. + PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse + + // ApplyResourceChange takes the planned state for a resource, which may + // yet contain unknown computed values, and applies the changes returning + // the final state. + ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse + + // ImportResourceState requests that the given resource be imported. + ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse + + // ReadDataSource returns the data source's current state. + ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provider is the schema for the provider itself. + Provider Schema + + // ResourceTypes map the resource type name to that type's schema. + ResourceTypes map[string]Schema + + // DataSources maps the data source name to that data source's schema. + DataSources map[string]Schema + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// Schema pairs a provider or resource schema with that schema's version. +// This is used to be able to upgrade the schema in UpgradeResourceState. +type Schema struct { + Version int64 + Block *configschema.Block +} + +type PrepareProviderConfigRequest struct { + // Config is the raw configuration value for the provider. + Config cty.Value +} + +type PrepareProviderConfigResponse struct { + // PreparedConfig is the configuration as prepared by the provider. + PreparedConfig cty.Value + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateResourceTypeConfigRequest struct { + // TypeName is the name of the resource type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateResourceTypeConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ValidateDataSourceConfigRequest struct { + // TypeName is the name of the data source type to validate. + TypeName string + + // Config is the configuration value to validate, which may contain unknown + // values. + Config cty.Value +} + +type ValidateDataSourceConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type UpgradeResourceStateRequest struct { + // TypeName is the name of the resource type being upgraded + TypeName string + + // Version is version of the schema that created the current state. + Version int64 + + // RawStateJSON and RawStateFlatmap contiain the state that needs to be + // upgraded to match the current schema version. Because the schema is + // unknown, this contains only the raw data as stored in the state. + // RawStateJSON is the current json state encoding. + // RawStateFlatmap is the legacy flatmap encoding. + // Only on of these fields may be set for the upgrade request. + RawStateJSON []byte + RawStateFlatmap map[string]string +} + +type UpgradeResourceStateResponse struct { + // UpgradedState is the newly upgraded resource state. + UpgradedState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ConfigureRequest struct { + // Terraform version is the version string from the running instance of + // terraform. Providers can use TerraformVersion to verify compatibility, + // and to store for informational purposes. + TerraformVersion string + + // Config is the complete configuration value for the provider. + Config cty.Value +} + +type ConfigureResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ReadResourceRequest struct { + // TypeName is the name of the resource type being read. + TypeName string + + // PriorState contains the previously saved state value for this resource. + PriorState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type ReadResourceResponse struct { + // NewState contains the current state of the resource. + NewState cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +type PlanResourceChangeRequest struct { + // TypeName is the name of the resource type to plan. + TypeName string + + // PriorState is the previously saved state value for this resource. + PriorState cty.Value + + // ProposedNewState is the expected state after the new configuration is + // applied. This is created by directly applying the configuration to the + // PriorState. The provider is then responsible for applying any further + // changes required to create the proposed final state. + ProposedNewState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the ProposedNewState in most circumstances. + Config cty.Value + + // PriorPrivate is the previously saved private data returned from the + // provider during the last apply. + PriorPrivate []byte +} + +type PlanResourceChangeResponse struct { + // PlannedState is the expected state of the resource once the current + // configuration is applied. + PlannedState cty.Value + + // RequiresReplace is the list of thee attributes that are requiring + // resource replacement. + RequiresReplace []cty.Path + + // PlannedPrivate is an opaque blob that is not interpreted by terraform + // core. This will be saved and relayed back to the provider during + // ApplyResourceChange. + PlannedPrivate []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ApplyResourceChangeRequest struct { + // TypeName is the name of the resource type being applied. + TypeName string + + // PriorState is the current state of resource. + PriorState cty.Value + + // Planned state is the state returned from PlanResourceChange, and should + // represent the new state, minus any remaining computed attributes. + PlannedState cty.Value + + // Config is the resource configuration, before being merged with the + // PriorState. Any value not explicitly set in the configuration will be + // null. Config is supplied for reference, but Provider implementations + // should prefer the PlannedState in most circumstances. + Config cty.Value + + // PlannedPrivate is the same value as returned by PlanResourceChange. + PlannedPrivate []byte +} + +type ApplyResourceChangeResponse struct { + // NewState is the new complete state after applying the planned change. + // In the event of an error, NewState should represent the most recent + // known state of the resource, if it exists. + NewState cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics + + // LegacyTypeSystem is set only if the provider is using the legacy SDK + // whose type system cannot be precisely mapped into the Terraform type + // system. We use this to bypass certain consistency checks that would + // otherwise fail due to this imprecise mapping. No other provider or SDK + // implementation is permitted to set this. + LegacyTypeSystem bool +} + +type ImportResourceStateRequest struct { + // TypeName is the name of the resource type to be imported. + TypeName string + + // ID is a string with which the provider can identify the resource to be + // imported. + ID string +} + +type ImportResourceStateResponse struct { + // ImportedResources contains one or more state values related to the + // imported resource. It is not required that these be complete, only that + // there is enough identifying information for the provider to successfully + // update the states in ReadResource. + ImportedResources []ImportedResource + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// ImportedResource represents an object being imported into Terraform with the +// help of a provider. An ImportedObject is a RemoteObject that has been read +// by the provider's import handler but hasn't yet been committed to state. +type ImportedResource struct { + // TypeName is the name of the resource type associated with the + // returned state. It's possible for providers to import multiple related + // types with a single import request. + TypeName string + + // State is the state of the remote object being imported. This may not be + // complete, but must contain enough information to uniquely identify the + // resource. + State cty.Value + + // Private is an opaque blob that will be stored in state along with the + // resource. It is intended only for interpretation by the provider itself. + Private []byte +} + +// AsInstanceObject converts the receiving ImportedObject into a +// ResourceInstanceObject that has status ObjectReady. +// +// The returned object does not know its own resource type, so the caller must +// retain the ResourceType value from the source object if this information is +// needed. +// +// The returned object also has no dependency addresses, but the caller may +// freely modify the direct fields of the returned object without affecting +// the receiver. +func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject { + return &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: ir.State, + Private: ir.Private, + } +} + +type ReadDataSourceRequest struct { + // TypeName is the name of the data source type to Read. + TypeName string + + // Config is the complete configuration for the requested data source. + Config cty.Value +} + +type ReadDataSourceResponse struct { + // State is the current state of the requested data source. + State cty.Value + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/vendor/github.com/hashicorp/terraform/providers/resolver.go b/vendor/github.com/hashicorp/terraform/providers/resolver.go new file mode 100644 index 00000000..4de8e0ac --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/providers/resolver.go @@ -0,0 +1,112 @@ +package providers + +import ( + "fmt" + + "github.com/hashicorp/terraform/plugin/discovery" +) + +// Resolver is an interface implemented by objects that are able to resolve +// a given set of resource provider version constraints into Factory +// callbacks. +type Resolver interface { + // Given a constraint map, return a Factory for each requested provider. + // If some or all of the constraints cannot be satisfied, return a non-nil + // slice of errors describing the problems. + ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) +} + +// ResolverFunc wraps a callback function and turns it into a Resolver +// implementation, for convenience in situations where a function and its +// associated closure are sufficient as a resolver implementation. +type ResolverFunc func(reqd discovery.PluginRequirements) (map[string]Factory, []error) + +// ResolveProviders implements Resolver by calling the +// wrapped function. +func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) { + return f(reqd) +} + +// ResolverFixed returns a Resolver that has a fixed set of provider factories +// provided by the caller. The returned resolver ignores version constraints +// entirely and just returns the given factory for each requested provider +// name. +// +// This function is primarily used in tests, to provide mock providers or +// in-process providers under test. +func ResolverFixed(factories map[string]Factory) Resolver { + return ResolverFunc(func(reqd discovery.PluginRequirements) (map[string]Factory, []error) { + ret := make(map[string]Factory, len(reqd)) + var errs []error + for name := range reqd { + if factory, exists := factories[name]; exists { + ret[name] = factory + } else { + errs = append(errs, fmt.Errorf("provider %q is not available", name)) + } + } + return ret, errs + }) +} + +// Factory is a function type that creates a new instance of a resource +// provider, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provider. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} + +// ProviderHasResource is a helper that requests schema from the given provider +// and checks if it has a resource type of the given name. +// +// This function is more expensive than it may first appear since it must +// retrieve the entire schema from the underlying provider, and so it should +// be used sparingly and especially not in tight loops. +// +// Since retrieving the provider may fail (e.g. if the provider is accessed +// over an RPC channel that has operational problems), this function will +// return false if the schema cannot be retrieved, under the assumption that +// a subsequent call to do anything with the resource type would fail +// anyway. +func ProviderHasResource(provider Interface, typeName string) bool { + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + return false + } + + _, exists := resp.ResourceTypes[typeName] + return exists +} + +// ProviderHasDataSource is a helper that requests schema from the given +// provider and checks if it has a data source of the given name. +// +// This function is more expensive than it may first appear since it must +// retrieve the entire schema from the underlying provider, and so it should +// be used sparingly and especially not in tight loops. +// +// Since retrieving the provider may fail (e.g. if the provider is accessed +// over an RPC channel that has operational problems), this function will +// return false if the schema cannot be retrieved, under the assumption that +// a subsequent call to do anything with the data source would fail +// anyway. +func ProviderHasDataSource(provider Interface, dataSourceName string) bool { + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + return false + } + + _, exists := resp.DataSources[dataSourceName] + return exists +} diff --git a/vendor/github.com/hashicorp/terraform/provisioners/doc.go b/vendor/github.com/hashicorp/terraform/provisioners/doc.go new file mode 100644 index 00000000..b03ba9a1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/provisioners/doc.go @@ -0,0 +1,3 @@ +// Package provisioners contains the interface and primary types to implement a +// Terraform resource provisioner. +package provisioners diff --git a/vendor/github.com/hashicorp/terraform/provisioners/factory.go b/vendor/github.com/hashicorp/terraform/provisioners/factory.go new file mode 100644 index 00000000..590b97a8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/provisioners/factory.go @@ -0,0 +1,19 @@ +package provisioners + +// Factory is a function type that creates a new instance of a resource +// provisioner, or returns an error if that is impossible. +type Factory func() (Interface, error) + +// FactoryFixed is a helper that creates a Factory that just returns some given +// single provisioner. +// +// Unlike usual factories, the exact same instance is returned for each call +// to the factory and so this must be used in only specialized situations where +// the caller can take care to either not mutate the given provider at all +// or to mutate it in ways that will not cause unexpected behavior for others +// holding the same reference. +func FactoryFixed(p Interface) Factory { + return func() (Interface, error) { + return p, nil + } +} diff --git a/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go new file mode 100644 index 00000000..e53c8848 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go @@ -0,0 +1,82 @@ +package provisioners + +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" +) + +// Interface is the set of methods required for a resource provisioner plugin. +type Interface interface { + // GetSchema returns the schema for the provisioner configuration. + GetSchema() GetSchemaResponse + + // ValidateProvisionerConfig allows the provisioner to validate the + // configuration values. + ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse + + // ProvisionResource runs the provisioner with provided configuration. + // ProvisionResource blocks until the execution is complete. + // If the returned diagnostics contain any errors, the resource will be + // left in a tainted state. + ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse + + // Stop is called to interrupt the provisioner. + // + // Stop should not block waiting for in-flight actions to complete. It + // should take any action it wants and return immediately acknowledging it + // has received the stop request. Terraform will not make any further API + // calls to the provisioner after Stop is called. + // + // The error returned, if non-nil, is assumed to mean that signaling the + // stop somehow failed and that the user should expect potentially waiting + // a longer period of time. + Stop() error + + // Close shuts down the plugin process if applicable. + Close() error +} + +type GetSchemaResponse struct { + // Provisioner contains the schema for this provisioner. + Provisioner *configschema.Block + + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +// UIOutput provides the Output method for resource provisioner +// plugins to write any output to the UI. +// +// Provisioners may call the Output method multiple times while Apply is in +// progress. It is invalid to call Output after Apply returns. +type UIOutput interface { + Output(string) +} + +type ValidateProvisionerConfigRequest struct { + // Config is the complete configuration to be used for the provisioner. + Config cty.Value +} + +type ValidateProvisionerConfigResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} + +type ProvisionResourceRequest struct { + // Config is the complete provisioner configuration. + Config cty.Value + + // Connection contains any information required to access the resource + // instance. + Connection cty.Value + + // UIOutput is used to return output during the Apply operation. + UIOutput UIOutput +} + +type ProvisionResourceResponse struct { + // Diagnostics contains any warnings or errors from the method call. + Diagnostics tfdiags.Diagnostics +} diff --git a/vendor/github.com/hashicorp/terraform/registry/client.go b/vendor/github.com/hashicorp/terraform/registry/client.go new file mode 100644 index 00000000..93424d17 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/client.go @@ -0,0 +1,343 @@ +package registry + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/response" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/disco" + "github.com/hashicorp/terraform/version" +) + +const ( + xTerraformGet = "X-Terraform-Get" + xTerraformVersion = "X-Terraform-Version" + requestTimeout = 10 * time.Second + modulesServiceID = "modules.v1" + providersServiceID = "providers.v1" +) + +var tfVersion = version.String() + +// Client provides methods to query Terraform Registries. +type Client struct { + // this is the client to be used for all requests. + client *http.Client + + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco +} + +// NewClient returns a new initialized registry client. +func NewClient(services *disco.Disco, client *http.Client) *Client { + if services == nil { + services = disco.New() + } + + if client == nil { + client = httpclient.New() + client.Timeout = requestTimeout + } + + services.Transport = client.Transport + + return &Client{ + client: client, + services: services, + } +} + +// Discover queries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) { + service, err := c.services.DiscoverServiceURL(host, serviceID) + if err != nil { + return nil, &ServiceUnreachableError{err} + } + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" + } + return service, nil +} + +// ModuleVersions queries the registry for a module, and returns the available versions. +func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { + host, err := module.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(module.Module(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching module versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errModuleNotFound{addr: module} + default: + return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) + } + + var versions response.ModuleVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + for _, mod := range versions.Modules { + for _, v := range mod.Versions { + log.Printf("[DEBUG] found available version %q for %s", v.Version, mod.Source) + } + } + + return &versions, nil +} + +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + creds, err := c.services.CredentialsForHost(host) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// ModuleLocation find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) { + host, err := module.SvcHost() + if err != nil { + return "", err + } + + service, err := c.Discover(host, modulesServiceID) + if err != nil { + return "", err + } + + var p *url.URL + if version == "" { + p, err = url.Parse(path.Join(module.Module(), "download")) + } else { + p, err = url.Parse(path.Join(module.Module(), version, "download")) + } + if err != nil { + return "", err + } + download := service.ResolveReference(p) + + log.Printf("[DEBUG] looking up module location from %q", download) + + req, err := http.NewRequest("GET", download.String(), nil) + if err != nil { + return "", err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + // there should be no body, but save it for logging + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", fmt.Errorf("error reading response body from registry: %s", err) + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return "", fmt.Errorf("module %q version %q not found", module, version) + default: + // anything else is an error: + return "", fmt.Errorf("error getting download location for %q: %s resp:%s", module, resp.Status, body) + } + + // the download location is in the X-Terraform-Get header + location := resp.Header.Get(xTerraformGet) + if location == "" { + return "", fmt.Errorf("failed to get download URL for %q: %s resp:%s", module, resp.Status, body) + } + + // If location looks like it's trying to be a relative URL, treat it as + // one. + // + // We don't do this for just _any_ location, since the X-Terraform-Get + // header is a go-getter location rather than a URL, and so not all + // possible values will parse reasonably as URLs.) + // + // When used in conjunction with go-getter we normally require this header + // to be an absolute URL, but we are more liberal here because third-party + // registry implementations may not "know" their own absolute URLs if + // e.g. they are running behind a reverse proxy frontend, or such. + if strings.HasPrefix(location, "/") || strings.HasPrefix(location, "./") || strings.HasPrefix(location, "../") { + locationURL, err := url.Parse(location) + if err != nil { + return "", fmt.Errorf("invalid relative URL for %q: %s", module, err) + } + locationURL = download.ResolveReference(locationURL) + location = locationURL.String() + } + + return location, nil +} + +// TerraformProviderVersions queries the registry for a provider, and returns the available versions. +func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) { + host, err := provider.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, providersServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions")) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching provider versions from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // OK + case http.StatusNotFound: + return nil, &errProviderNotFound{addr: provider} + default: + return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status) + } + + var versions response.TerraformProviderVersions + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&versions); err != nil { + return nil, err + } + + return &versions, nil +} + +// TerraformProviderLocation queries the registry for a provider download metadata +func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) { + host, err := provider.SvcHost() + if err != nil { + return nil, err + } + + service, err := c.Discover(host, providersServiceID) + if err != nil { + return nil, err + } + + p, err := url.Parse(path.Join( + provider.TerraformProvider(), + version, + "download", + provider.OS, + provider.Arch, + )) + if err != nil { + return nil, err + } + + service = service.ResolveReference(p) + + log.Printf("[DEBUG] fetching provider location from %q", service) + + req, err := http.NewRequest("GET", service.String(), nil) + if err != nil { + return nil, err + } + + c.addRequestCreds(host, req) + req.Header.Set(xTerraformVersion, tfVersion) + + resp, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var loc response.TerraformProviderPlatformLocation + + dec := json.NewDecoder(resp.Body) + if err := dec.Decode(&loc); err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusNoContent: + // OK + case http.StatusNotFound: + return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version) + default: + // anything else is an error: + return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status) + } + + return &loc, nil +} diff --git a/vendor/github.com/hashicorp/terraform/registry/errors.go b/vendor/github.com/hashicorp/terraform/registry/errors.go new file mode 100644 index 00000000..5a6a31b0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/errors.go @@ -0,0 +1,63 @@ +package registry + +import ( + "fmt" + + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/svchost/disco" +) + +type errModuleNotFound struct { + addr *regsrc.Module +} + +func (e *errModuleNotFound) Error() string { + return fmt.Sprintf("module %s not found", e.addr) +} + +// IsModuleNotFound returns true only if the given error is a "module not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsModuleNotFound(err error) bool { + _, ok := err.(*errModuleNotFound) + return ok +} + +type errProviderNotFound struct { + addr *regsrc.TerraformProvider +} + +func (e *errProviderNotFound) Error() string { + return fmt.Sprintf("provider %s not found", e.addr) +} + +// IsProviderNotFound returns true only if the given error is a "provider not found" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsProviderNotFound(err error) bool { + _, ok := err.(*errProviderNotFound) + return ok +} + +// IsServiceNotProvided returns true only if the given error is a "service not provided" +// error. This allows callers to recognize this particular error condition +// as distinct from operational errors such as poor network connectivity. +func IsServiceNotProvided(err error) bool { + _, ok := err.(*disco.ErrServiceNotProvided) + return ok +} + +// ServiceUnreachableError Registry service is unreachable +type ServiceUnreachableError struct { + err error +} + +func (e *ServiceUnreachableError) Error() string { + return e.err.Error() +} + +// IsServiceUnreachable returns true if the registry/discovery service was unreachable +func IsServiceUnreachable(err error) bool { + _, ok := err.(*ServiceUnreachableError) + return ok +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go new file mode 100644 index 00000000..14b4dce9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/friendly_host.go @@ -0,0 +1,140 @@ +package regsrc + +import ( + "regexp" + "strings" + + "github.com/hashicorp/terraform/svchost" +) + +var ( + // InvalidHostString is a placeholder returned when a raw host can't be + // converted by IDNA spec. It will never be returned for any host for which + // Valid() is true. + InvalidHostString = "" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at the start or end of a URL label according to RFC1123. + urlLabelEndSubRe = "[0-9A-Za-z]" + + // urlLabelEndSubRe is a sub-expression that matches any character that's + // allowed at in a non-start or end of a URL label according to RFC1123. + urlLabelMidSubRe = "[0-9A-Za-z-]" + + // urlLabelUnicodeSubRe is a sub-expression that matches any non-ascii char + // in an IDN (Unicode) display URL. It's not strict - there are only ~15k + // valid Unicode points in IDN RFC (some with conditions). We are just going + // with being liberal with matching and then erroring if we fail to convert + // to punycode later (which validates chars fully). This at least ensures + // ascii chars dissalowed by the RC1123 parts above don't become legal + // again. + urlLabelUnicodeSubRe = "[^[:ascii:]]" + + // hostLabelSubRe is the sub-expression that matches a valid hostname label. + // It does not anchor the start or end so it can be composed into more + // complex RegExps below. Note that for sanity we don't handle disallowing + // raw punycode in this regexp (esp. since re2 doesn't support negative + // lookbehind, but we can capture it's presence here to check later). + hostLabelSubRe = "" + + // Match valid initial char, or unicode char + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + // Optionally, match 0 to 61 valid URL or Unicode chars, + // followed by one valid end char or unicode char + "(?:" + + "(?:" + urlLabelMidSubRe + "|" + urlLabelUnicodeSubRe + "){0,61}" + + "(?:" + urlLabelEndSubRe + "|" + urlLabelUnicodeSubRe + ")" + + ")?" + + // hostSubRe is the sub-expression that matches a valid host prefix. + // Allows custom port. + hostSubRe = hostLabelSubRe + "(?:\\." + hostLabelSubRe + ")+(?::\\d+)?" + + // hostRe is a regexp that matches a valid host prefix. Additional + // validation of unicode strings is needed for matches. + hostRe = regexp.MustCompile("^" + hostSubRe + "$") +) + +// FriendlyHost describes a registry instance identified in source strings by a +// simple bare hostname like registry.terraform.io. +type FriendlyHost struct { + Raw string +} + +func NewFriendlyHost(host string) *FriendlyHost { + return &FriendlyHost{Raw: host} +} + +// ParseFriendlyHost attempts to parse a valid "friendly host" prefix from the +// given string. If no valid prefix is found, host will be nil and rest will +// contain the full source string. The host prefix must terminate at the end of +// the input or at the first / character. If one or more characters exist after +// the first /, they will be returned as rest (without the / delimiter). +// Hostnames containing punycode WILL be parsed successfully since they may have +// come from an internal normalized source string, however should be considered +// invalid if the string came from a user directly. This must be checked +// explicitly for user-input strings by calling Valid() on the +// returned host. +func ParseFriendlyHost(source string) (host *FriendlyHost, rest string) { + parts := strings.SplitN(source, "/", 2) + + if hostRe.MatchString(parts[0]) { + host = &FriendlyHost{Raw: parts[0]} + if len(parts) == 2 { + rest = parts[1] + } + return + } + + // No match, return whole string as rest along with nil host + rest = source + return +} + +// Valid returns whether the host prefix is considered valid in any case. +// Example of invalid prefixes might include ones that don't conform to the host +// name specifications. Not that IDN prefixes containing punycode are not valid +// input which we expect to always be in user-input or normalised display form. +func (h *FriendlyHost) Valid() bool { + return svchost.IsValid(h.Raw) +} + +// Display returns the host formatted for display to the user in CLI or web +// output. +func (h *FriendlyHost) Display() string { + return svchost.ForDisplay(h.Raw) +} + +// Normalized returns the host formatted for internal reference or comparison. +func (h *FriendlyHost) Normalized() string { + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return InvalidHostString + } + return string(host) +} + +// String returns the host formatted as the user originally typed it assuming it +// was parsed from user input. +func (h *FriendlyHost) String() string { + return h.Raw +} + +// Equal compares the FriendlyHost against another instance taking normalization +// into account. Invalid hosts cannot be compared and will always return false. +func (h *FriendlyHost) Equal(other *FriendlyHost) bool { + if other == nil { + return false + } + + otherHost, err := svchost.ForComparison(other.Raw) + if err != nil { + return false + } + + host, err := svchost.ForComparison(h.Raw) + if err != nil { + return false + } + + return otherHost == host +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go new file mode 100644 index 00000000..325706ec --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/module.go @@ -0,0 +1,205 @@ +package regsrc + +import ( + "errors" + "fmt" + "regexp" + "strings" + + "github.com/hashicorp/terraform/svchost" +) + +var ( + ErrInvalidModuleSource = errors.New("not a valid registry module source") + + // nameSubRe is the sub-expression that matches a valid module namespace or + // name. It's strictly a super-set of what GitHub allows for user/org and + // repo names respectively, but more restrictive than our original repo-name + // regex which allowed periods but could cause ambiguity with hostname + // prefixes. It does not anchor the start or end so it can be composed into + // more complex RegExps below. Alphanumeric with - and _ allowed in non + // leading or trailing positions. Max length 64 chars. (GitHub username is + // 38 max.) + nameSubRe = "[0-9A-Za-z](?:[0-9A-Za-z-_]{0,62}[0-9A-Za-z])?" + + // providerSubRe is the sub-expression that matches a valid provider. It + // does not anchor the start or end so it can be composed into more complex + // RegExps below. Only lowercase chars and digits are supported in practice. + // Max length 64 chars. + providerSubRe = "[0-9a-z]{1,64}" + + // moduleSourceRe is a regular expression that matches the basic + // namespace/name/provider[//...] format for registry sources. It assumes + // any FriendlyHost prefix has already been removed if present. + moduleSourceRe = regexp.MustCompile( + fmt.Sprintf("^(%s)\\/(%s)\\/(%s)(?:\\/\\/(.*))?$", + nameSubRe, nameSubRe, providerSubRe)) + + // NameRe is a regular expression defining the format allowed for namespace + // or name fields in module registry implementations. + NameRe = regexp.MustCompile("^" + nameSubRe + "$") + + // ProviderRe is a regular expression defining the format allowed for + // provider fields in module registry implementations. + ProviderRe = regexp.MustCompile("^" + providerSubRe + "$") + + // these hostnames are not allowed as registry sources, because they are + // already special case module sources in terraform. + disallowed = map[string]bool{ + "github.com": true, + "bitbucket.org": true, + } +) + +// Module describes a Terraform Registry Module source. +type Module struct { + // RawHost is the friendly host prefix if one was present. It might be nil + // if the original source had no host prefix which implies + // PublicRegistryHost but is distinct from having an actual pointer to + // PublicRegistryHost since it encodes the fact the original string didn't + // include a host prefix at all which is significant for recovering actual + // input not just normalized form. Most callers should access it with Host() + // which will return public registry host instance if it's nil. + RawHost *FriendlyHost + RawNamespace string + RawName string + RawProvider string + RawSubmodule string +} + +// NewModule construct a new module source from separate parts. Pass empty +// string if host or submodule are not needed. +func NewModule(host, namespace, name, provider, submodule string) (*Module, error) { + m := &Module{ + RawNamespace: namespace, + RawName: name, + RawProvider: provider, + RawSubmodule: submodule, + } + if host != "" { + h := NewFriendlyHost(host) + if h != nil { + fmt.Println("HOST:", h) + if !h.Valid() || disallowed[h.Display()] { + return nil, ErrInvalidModuleSource + } + } + m.RawHost = h + } + return m, nil +} + +// ParseModuleSource attempts to parse source as a Terraform registry module +// source. If the string is not found to be in a valid format, +// ErrInvalidModuleSource is returned. Note that this can only be used on +// "input" strings, e.g. either ones supplied by the user or potentially +// normalised but in Display form (unicode). It will fail to parse a source with +// a punycoded domain since this is not permitted input from a user. If you have +// an already normalized string internally, you can compare it without parsing +// by comparing with the normalized version of the subject with the normal +// string equality operator. +func ParseModuleSource(source string) (*Module, error) { + // See if there is a friendly host prefix. + host, rest := ParseFriendlyHost(source) + if host != nil { + if !host.Valid() || disallowed[host.Display()] { + return nil, ErrInvalidModuleSource + } + } + + matches := moduleSourceRe.FindStringSubmatch(rest) + if len(matches) < 4 { + return nil, ErrInvalidModuleSource + } + + m := &Module{ + RawHost: host, + RawNamespace: matches[1], + RawName: matches[2], + RawProvider: matches[3], + } + + if len(matches) == 5 { + m.RawSubmodule = matches[4] + } + + return m, nil +} + +// Display returns the source formatted for display to the user in CLI or web +// output. +func (m *Module) Display() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Display()), false) +} + +// Normalized returns the source formatted for internal reference or comparison. +func (m *Module) Normalized() string { + return m.formatWithPrefix(m.normalizedHostPrefix(m.Host().Normalized()), false) +} + +// String returns the source formatted as the user originally typed it assuming +// it was parsed from user input. +func (m *Module) String() string { + // Don't normalize public registry hostname - leave it exactly like the user + // input it. + hostPrefix := "" + if m.RawHost != nil { + hostPrefix = m.RawHost.String() + "/" + } + return m.formatWithPrefix(hostPrefix, true) +} + +// Equal compares the module source against another instance taking +// normalization into account. +func (m *Module) Equal(other *Module) bool { + return m.Normalized() == other.Normalized() +} + +// Host returns the FriendlyHost object describing which registry this module is +// in. If the original source string had not host component this will return the +// PublicRegistryHost. +func (m *Module) Host() *FriendlyHost { + if m.RawHost == nil { + return PublicRegistryHost + } + return m.RawHost +} + +func (m *Module) normalizedHostPrefix(host string) string { + if m.Host().Equal(PublicRegistryHost) { + return "" + } + return host + "/" +} + +func (m *Module) formatWithPrefix(hostPrefix string, preserveCase bool) string { + suffix := "" + if m.RawSubmodule != "" { + suffix = "//" + m.RawSubmodule + } + str := fmt.Sprintf("%s%s/%s/%s%s", hostPrefix, m.RawNamespace, m.RawName, + m.RawProvider, suffix) + + // lower case by default + if !preserveCase { + return strings.ToLower(str) + } + return str +} + +// Module returns just the registry ID of the module, without a hostname or +// suffix. +func (m *Module) Module() string { + return fmt.Sprintf("%s/%s/%s", m.RawNamespace, m.RawName, m.RawProvider) +} + +// SvcHost returns the svchost.Hostname for this module. Since FriendlyHost may +// contain an invalid hostname, this also returns an error indicating if it +// could be converted to a svchost.Hostname. If no host is specified, the +// default PublicRegistryHost is returned. +func (m *Module) SvcHost() (svchost.Hostname, error) { + if m.RawHost == nil { + return svchost.ForComparison(PublicRegistryHost.Raw) + } + return svchost.ForComparison(m.RawHost.Raw) +} diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go new file mode 100644 index 00000000..c430bf14 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/regsrc.go @@ -0,0 +1,8 @@ +// Package regsrc provides helpers for working with source strings that identify +// resources within a Terraform registry. +package regsrc + +var ( + // PublicRegistryHost is a FriendlyHost that represents the public registry. + PublicRegistryHost = NewFriendlyHost("registry.terraform.io") +) diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go new file mode 100644 index 00000000..58dedee5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go @@ -0,0 +1,60 @@ +package regsrc + +import ( + "fmt" + "runtime" + "strings" + + "github.com/hashicorp/terraform/svchost" +) + +var ( + // DefaultProviderNamespace represents the namespace for canonical + // HashiCorp-controlled providers. + DefaultProviderNamespace = "-" +) + +// TerraformProvider describes a Terraform Registry Provider source. +type TerraformProvider struct { + RawHost *FriendlyHost + RawNamespace string + RawName string + OS string + Arch string +} + +// NewTerraformProvider constructs a new provider source. +func NewTerraformProvider(name, os, arch string) *TerraformProvider { + if os == "" { + os = runtime.GOOS + } + if arch == "" { + arch = runtime.GOARCH + } + + // separate namespace if included + namespace := DefaultProviderNamespace + if names := strings.SplitN(name, "/", 2); len(names) == 2 { + namespace, name = names[0], names[1] + } + p := &TerraformProvider{ + RawHost: PublicRegistryHost, + RawNamespace: namespace, + RawName: name, + OS: os, + Arch: arch, + } + + return p +} + +// Provider returns just the registry ID of the provider +func (p *TerraformProvider) TerraformProvider() string { + return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName) +} + +// SvcHost returns the svchost.Hostname for this provider. The +// default PublicRegistryHost is returned. +func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) { + return svchost.ForComparison(PublicRegistryHost.Raw) +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module.go b/vendor/github.com/hashicorp/terraform/registry/response/module.go new file mode 100644 index 00000000..3bd2b3df --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module.go @@ -0,0 +1,93 @@ +package response + +import ( + "time" +) + +// Module is the response structure with the data for a single module version. +type Module struct { + ID string `json:"id"` + + //--------------------------------------------------------------- + // Metadata about the overall module. + + Owner string `json:"owner"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Provider string `json:"provider"` + Description string `json:"description"` + Source string `json:"source"` + PublishedAt time.Time `json:"published_at"` + Downloads int `json:"downloads"` + Verified bool `json:"verified"` +} + +// ModuleDetail represents a module in full detail. +type ModuleDetail struct { + Module + + //--------------------------------------------------------------- + // Metadata about the overall module. This is only available when + // requesting the specific module (not in list responses). + + // Root is the root module. + Root *ModuleSubmodule `json:"root"` + + // Submodules are the other submodules that are available within + // this module. + Submodules []*ModuleSubmodule `json:"submodules"` + + //--------------------------------------------------------------- + // The fields below are only set when requesting this specific + // module. They are available to easily know all available versions + // and providers without multiple API calls. + + Providers []string `json:"providers"` // All available providers + Versions []string `json:"versions"` // All versions +} + +// ModuleSubmodule is the metadata about a specific submodule within +// a module. This includes the root module as a special case. +type ModuleSubmodule struct { + Path string `json:"path"` + Readme string `json:"readme"` + Empty bool `json:"empty"` + + Inputs []*ModuleInput `json:"inputs"` + Outputs []*ModuleOutput `json:"outputs"` + Dependencies []*ModuleDep `json:"dependencies"` + Resources []*ModuleResource `json:"resources"` +} + +// ModuleInput is an input for a module. +type ModuleInput struct { + Name string `json:"name"` + Description string `json:"description"` + Default string `json:"default"` +} + +// ModuleOutput is an output for a module. +type ModuleOutput struct { + Name string `json:"name"` + Description string `json:"description"` +} + +// ModuleDep is an output for a module. +type ModuleDep struct { + Name string `json:"name"` + Source string `json:"source"` + Version string `json:"version"` +} + +// ModuleProviderDep is the output for a provider dependency +type ModuleProviderDep struct { + Name string `json:"name"` + Version string `json:"version"` +} + +// ModuleResource is an output for a module. +type ModuleResource struct { + Name string `json:"name"` + Type string `json:"type"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_list.go b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go new file mode 100644 index 00000000..97837482 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_list.go @@ -0,0 +1,7 @@ +package response + +// ModuleList is the response structure for a pageable list of modules. +type ModuleList struct { + Meta PaginationMeta `json:"meta"` + Modules []*Module `json:"modules"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go new file mode 100644 index 00000000..e48499dc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_provider.go @@ -0,0 +1,14 @@ +package response + +// ModuleProvider represents a single provider for modules. +type ModuleProvider struct { + Name string `json:"name"` + Downloads int `json:"downloads"` + ModuleCount int `json:"module_count"` +} + +// ModuleProviderList is the response structure for a pageable list of ModuleProviders. +type ModuleProviderList struct { + Meta PaginationMeta `json:"meta"` + Providers []*ModuleProvider `json:"providers"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go new file mode 100644 index 00000000..f69e9750 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/module_versions.go @@ -0,0 +1,32 @@ +package response + +// ModuleVersions is the response format that contains all metadata about module +// versions needed for terraform CLI to resolve version constraints. See RFC +// TF-042 for details on this format. +type ModuleVersions struct { + Modules []*ModuleProviderVersions `json:"modules"` +} + +// ModuleProviderVersions is the response format for a single module instance, +// containing metadata about all versions and their dependencies. +type ModuleProviderVersions struct { + Source string `json:"source"` + Versions []*ModuleVersion `json:"versions"` +} + +// ModuleVersion is the output metadata for a given version needed by CLI to +// resolve candidate versions to satisfy requirements. +type ModuleVersion struct { + Version string `json:"version"` + Root VersionSubmodule `json:"root"` + Submodules []*VersionSubmodule `json:"submodules"` +} + +// VersionSubmodule is the output metadata for a submodule within a given +// version needed by CLI to resolve candidate versions to satisfy requirements. +// When representing the Root in JSON the path is omitted. +type VersionSubmodule struct { + Path string `json:"path,omitempty"` + Providers []*ModuleProviderDep `json:"providers"` + Dependencies []*ModuleDep `json:"dependencies"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/pagination.go b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go new file mode 100644 index 00000000..75a92549 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/pagination.go @@ -0,0 +1,65 @@ +package response + +import ( + "net/url" + "strconv" +) + +// PaginationMeta is a structure included in responses for pagination. +type PaginationMeta struct { + Limit int `json:"limit"` + CurrentOffset int `json:"current_offset"` + NextOffset *int `json:"next_offset,omitempty"` + PrevOffset *int `json:"prev_offset,omitempty"` + NextURL string `json:"next_url,omitempty"` + PrevURL string `json:"prev_url,omitempty"` +} + +// NewPaginationMeta populates pagination meta data from result parameters +func NewPaginationMeta(offset, limit int, hasMore bool, currentURL string) PaginationMeta { + pm := PaginationMeta{ + Limit: limit, + CurrentOffset: offset, + } + + // Calculate next/prev offsets, leave nil if not valid pages + nextOffset := offset + limit + if hasMore { + pm.NextOffset = &nextOffset + } + + prevOffset := offset - limit + if prevOffset < 0 { + prevOffset = 0 + } + if prevOffset < offset { + pm.PrevOffset = &prevOffset + } + + // If URL format provided, populate URLs. Intentionally swallow URL errors for now, API should + // catch missing URLs if we call with bad URL arg (and we care about them being present). + if currentURL != "" && pm.NextOffset != nil { + pm.NextURL, _ = setQueryParam(currentURL, "offset", *pm.NextOffset, 0) + } + if currentURL != "" && pm.PrevOffset != nil { + pm.PrevURL, _ = setQueryParam(currentURL, "offset", *pm.PrevOffset, 0) + } + + return pm +} + +func setQueryParam(baseURL, key string, val, defaultVal int) (string, error) { + u, err := url.Parse(baseURL) + if err != nil { + return "", err + } + q := u.Query() + if val == defaultVal { + // elide param if it's the default value + q.Del(key) + } else { + q.Set(key, strconv.Itoa(val)) + } + u.RawQuery = q.Encode() + return u.String(), nil +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider.go b/vendor/github.com/hashicorp/terraform/registry/response/provider.go new file mode 100644 index 00000000..5e8bae35 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/provider.go @@ -0,0 +1,36 @@ +package response + +import ( + "time" +) + +// Provider is the response structure with the data for a single provider +// version. This is just the metadata. A full provider response will be +// ProviderDetail. +type Provider struct { + ID string `json:"id"` + + //--------------------------------------------------------------- + // Metadata about the overall provider. + + Owner string `json:"owner"` + Namespace string `json:"namespace"` + Name string `json:"name"` + Version string `json:"version"` + Description string `json:"description"` + Source string `json:"source"` + PublishedAt time.Time `json:"published_at"` + Downloads int `json:"downloads"` +} + +// ProviderDetail represents a Provider with full detail. +type ProviderDetail struct { + Provider + + //--------------------------------------------------------------- + // The fields below are only set when requesting this specific + // module. They are available to easily know all available versions + // without multiple API calls. + + Versions []string `json:"versions"` // All versions +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go new file mode 100644 index 00000000..1dc7d237 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go @@ -0,0 +1,7 @@ +package response + +// ProviderList is the response structure for a pageable list of providers. +type ProviderList struct { + Meta PaginationMeta `json:"meta"` + Providers []*Provider `json:"providers"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/redirect.go b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go new file mode 100644 index 00000000..d5eb49ba --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/redirect.go @@ -0,0 +1,6 @@ +package response + +// Redirect causes the frontend to perform a window redirect. +type Redirect struct { + URL string `json:"url"` +} diff --git a/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go new file mode 100644 index 00000000..64e454a6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go @@ -0,0 +1,96 @@ +package response + +import ( + "sort" + "strings" + + version "github.com/hashicorp/go-version" +) + +// TerraformProvider is the response structure for all required information for +// Terraform to choose a download URL. It must include all versions and all +// platforms for Terraform to perform version and os/arch constraint matching +// locally. +type TerraformProvider struct { + ID string `json:"id"` + Verified bool `json:"verified"` + + Versions []*TerraformProviderVersion `json:"versions"` +} + +// TerraformProviderVersion is the Terraform-specific response structure for a +// provider version. +type TerraformProviderVersion struct { + Version string `json:"version"` + Protocols []string `json:"protocols"` + + Platforms []*TerraformProviderPlatform `json:"platforms"` +} + +// TerraformProviderVersions is the Terraform-specific response structure for an +// array of provider versions +type TerraformProviderVersions struct { + ID string `json:"id"` + Versions []*TerraformProviderVersion `json:"versions"` + Warnings []string `json:"warnings"` +} + +// TerraformProviderPlatform is the Terraform-specific response structure for a +// provider platform. +type TerraformProviderPlatform struct { + OS string `json:"os"` + Arch string `json:"arch"` +} + +// TerraformProviderPlatformLocation is the Terraform-specific response +// structure for a provider platform with all details required to perform a +// download. +type TerraformProviderPlatformLocation struct { + Protocols []string `json:"protocols"` + OS string `json:"os"` + Arch string `json:"arch"` + Filename string `json:"filename"` + DownloadURL string `json:"download_url"` + ShasumsURL string `json:"shasums_url"` + ShasumsSignatureURL string `json:"shasums_signature_url"` + Shasum string `json:"shasum"` + + SigningKeys SigningKeyList `json:"signing_keys"` +} + +// SigningKeyList is the response structure for a list of signing keys. +type SigningKeyList struct { + GPGKeys []*GPGKey `json:"gpg_public_keys"` +} + +// GPGKey is the response structure for a GPG key. +type GPGKey struct { + ASCIIArmor string `json:"ascii_armor"` + Source string `json:"source"` + SourceURL *string `json:"source_url"` +} + +// Collection type for TerraformProviderVersion +type ProviderVersionCollection []*TerraformProviderVersion + +// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg +// keys in the response. +func (signingKeys *SigningKeyList) GPGASCIIArmor() string { + keys := []string{} + + for _, gpgKey := range signingKeys.GPGKeys { + keys = append(keys, gpgKey.ASCIIArmor) + } + + return strings.Join(keys, "\n") +} + +// Sort sorts versions from newest to oldest. +func (v ProviderVersionCollection) Sort() { + sort.Slice(v, func(i, j int) bool { + versionA, _ := version.NewVersion(v[i].Version) + versionB, _ := version.NewVersion(v[j].Version) + + return versionA.GreaterThan(versionB) + }) +} diff --git a/vendor/github.com/hashicorp/terraform/repl/format.go b/vendor/github.com/hashicorp/terraform/repl/format.go new file mode 100644 index 00000000..d8f95ea3 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/repl/format.go @@ -0,0 +1,116 @@ +package repl + +import ( + "bufio" + "bytes" + "fmt" + "sort" + "strconv" + "strings" +) + +// FormatResult formats the given result value for human-readable output. +// +// The value must currently be a string, list, map, and any nested values +// with those same types. +func FormatResult(value interface{}) (string, error) { + return formatResult(value, false) +} + +func formatResult(value interface{}, nested bool) (string, error) { + if value == nil { + return "null", nil + } + switch output := value.(type) { + case string: + if nested { + return fmt.Sprintf("%q", output), nil + } + return output, nil + case int: + return strconv.Itoa(output), nil + case float64: + return fmt.Sprintf("%g", output), nil + case bool: + switch { + case output == true: + return "true", nil + default: + return "false", nil + } + case []interface{}: + return formatListResult(output) + case map[string]interface{}: + return formatMapResult(output) + default: + return "", fmt.Errorf("unknown value type: %T", value) + } +} + +func formatListResult(value []interface{}) (string, error) { + var outputBuf bytes.Buffer + outputBuf.WriteString("[") + if len(value) > 0 { + outputBuf.WriteString("\n") + } + + for _, v := range value { + raw, err := formatResult(v, true) + if err != nil { + return "", err + } + + outputBuf.WriteString(indent(raw)) + outputBuf.WriteString(",\n") + } + + outputBuf.WriteString("]") + return outputBuf.String(), nil +} + +func formatMapResult(value map[string]interface{}) (string, error) { + ks := make([]string, 0, len(value)) + for k, _ := range value { + ks = append(ks, k) + } + sort.Strings(ks) + + var outputBuf bytes.Buffer + outputBuf.WriteString("{") + if len(value) > 0 { + outputBuf.WriteString("\n") + } + + for _, k := range ks { + v := value[k] + rawK, err := formatResult(k, true) + if err != nil { + return "", err + } + rawV, err := formatResult(v, true) + if err != nil { + return "", err + } + + outputBuf.WriteString(indent(fmt.Sprintf("%s = %s", rawK, rawV))) + outputBuf.WriteString("\n") + } + + outputBuf.WriteString("}") + return outputBuf.String(), nil +} + +func indent(value string) string { + var outputBuf bytes.Buffer + s := bufio.NewScanner(strings.NewReader(value)) + newline := false + for s.Scan() { + if newline { + outputBuf.WriteByte('\n') + } + outputBuf.WriteString(" " + s.Text()) + newline = true + } + + return outputBuf.String() +} diff --git a/vendor/github.com/hashicorp/terraform/repl/repl.go b/vendor/github.com/hashicorp/terraform/repl/repl.go new file mode 100644 index 00000000..92df9c3d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/repl/repl.go @@ -0,0 +1,4 @@ +// Package repl provides the structs and functions necessary to run +// REPL for Terraform. The REPL allows experimentation of Terraform +// interpolations without having to run a Terraform configuration. +package repl diff --git a/vendor/github.com/hashicorp/terraform/repl/session.go b/vendor/github.com/hashicorp/terraform/repl/session.go new file mode 100644 index 00000000..56e11b39 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/repl/session.go @@ -0,0 +1,99 @@ +package repl + +import ( + "errors" + "fmt" + "strings" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" +) + +// ErrSessionExit is a special error result that should be checked for +// from Handle to signal a graceful exit. +var ErrSessionExit = errors.New("session exit") + +// Session represents the state for a single REPL session. +type Session struct { + // Scope is the evaluation scope where expressions will be evaluated. + Scope *lang.Scope +} + +// Handle handles a single line of input from the REPL. +// +// This is a stateful operation if a command is given (such as setting +// a variable). This function should not be called in parallel. +// +// The return value is the output and the error to show. +func (s *Session) Handle(line string) (string, bool, tfdiags.Diagnostics) { + switch { + case strings.TrimSpace(line) == "": + return "", false, nil + case strings.TrimSpace(line) == "exit": + return "", true, nil + case strings.TrimSpace(line) == "help": + ret, diags := s.handleHelp() + return ret, false, diags + default: + ret, diags := s.handleEval(line) + return ret, false, diags + } +} + +func (s *Session) handleEval(line string) (string, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Parse the given line as an expression + expr, parseDiags := hclsyntax.ParseExpression([]byte(line), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(parseDiags) + if parseDiags.HasErrors() { + return "", diags + } + + val, valDiags := s.Scope.EvalExpr(expr, cty.DynamicPseudoType) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return "", diags + } + + if !val.IsWhollyKnown() { + // FIXME: In future, once we've updated the result formatter to be + // cty-aware, we should just include unknown values as "(not yet known)" + // in the serialized result, allowing the rest (if any) to be seen. + diags = diags.Append(fmt.Errorf("Result depends on values that cannot be determined until after \"terraform apply\".")) + return "", diags + } + + // Our formatter still wants an old-style raw interface{} value, so + // for now we'll just shim it. + // FIXME: Port the formatter to work with cty.Value directly. + legacyVal := hcl2shim.ConfigValueFromHCL2(val) + result, err := FormatResult(legacyVal) + if err != nil { + diags = diags.Append(err) + return "", diags + } + + return result, diags +} + +func (s *Session) handleHelp() (string, tfdiags.Diagnostics) { + text := ` +The Terraform console allows you to experiment with Terraform interpolations. +You may access resources in the state (if you have one) just as you would +from a configuration. For example: "aws_instance.foo.id" would evaluate +to the ID of "aws_instance.foo" if it exists in your state. + +Type in the interpolation to test and hit to see the result. + +To exit the console, type "exit" and hit , or use Control-C or +Control-D. +` + + return strings.TrimSpace(text), nil +} diff --git a/vendor/github.com/hashicorp/terraform/state/backup.go b/vendor/github.com/hashicorp/terraform/state/backup.go index 047258f4..0cac3b6c 100644 --- a/vendor/github.com/hashicorp/terraform/state/backup.go +++ b/vendor/github.com/hashicorp/terraform/state/backup.go @@ -3,7 +3,8 @@ package state import ( "sync" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" ) // BackupState wraps a State that backs up the state on the first time that @@ -18,7 +19,7 @@ type BackupState struct { done bool } -func (s *BackupState) State() *terraform.State { +func (s *BackupState) State() *states.State { return s.Real.State() } @@ -26,7 +27,7 @@ func (s *BackupState) RefreshState() error { return s.Real.RefreshState() } -func (s *BackupState) WriteState(state *terraform.State) error { +func (s *BackupState) WriteState(state *states.State) error { s.mu.Lock() defer s.mu.Unlock() @@ -74,7 +75,7 @@ func (s *BackupState) backup() error { // purposes, but we don't need a backup or lock if the state is empty, so // skip this with a nil state. if state != nil { - ls := &LocalState{Path: s.Path} + ls := statemgr.NewFilesystem(s.Path) if err := ls.WriteState(state); err != nil { return err } diff --git a/vendor/github.com/hashicorp/terraform/state/inmem.go b/vendor/github.com/hashicorp/terraform/state/inmem.go index 4e031896..36fa3414 100644 --- a/vendor/github.com/hashicorp/terraform/state/inmem.go +++ b/vendor/github.com/hashicorp/terraform/state/inmem.go @@ -32,8 +32,18 @@ func (s *InmemState) WriteState(state *terraform.State) error { s.mu.Lock() defer s.mu.Unlock() - state.IncrementSerialMaybe(s.state) + state = state.DeepCopy() + + if s.state != nil { + state.Serial = s.state.Serial + + if !s.state.MarshalEqual(state) { + state.Serial++ + } + } + s.state = state + return nil } diff --git a/vendor/github.com/hashicorp/terraform/state/local.go b/vendor/github.com/hashicorp/terraform/state/local.go index 5ce02ce5..100feea1 100644 --- a/vendor/github.com/hashicorp/terraform/state/local.go +++ b/vendor/github.com/hashicorp/terraform/state/local.go @@ -48,8 +48,8 @@ func (s *LocalState) SetState(state *terraform.State) { s.mu.Lock() defer s.mu.Unlock() - s.state = state - s.readState = state + s.state = state.DeepCopy() + s.readState = state.DeepCopy() } // StateReader impl. @@ -74,9 +74,16 @@ func (s *LocalState) WriteState(state *terraform.State) error { } defer s.stateFileOut.Sync() - s.state = state + s.state = state.DeepCopy() // don't want mutations before we actually get this written to disk - if _, err := s.stateFileOut.Seek(0, os.SEEK_SET); err != nil { + if s.readState != nil && s.state != nil { + // We don't trust callers to properly manage serials. Instead, we assume + // that a WriteState is always for the next version after what was + // most recently read. + s.state.Serial = s.readState.Serial + } + + if _, err := s.stateFileOut.Seek(0, io.SeekStart); err != nil { return err } if err := s.stateFileOut.Truncate(0); err != nil { @@ -88,8 +95,9 @@ func (s *LocalState) WriteState(state *terraform.State) error { return nil } - s.state.IncrementSerialMaybe(s.readState) - s.readState = s.state + if !s.state.MarshalEqual(s.readState) { + s.state.Serial++ + } if err := terraform.WriteState(s.state, s.stateFileOut); err != nil { return err @@ -111,8 +119,20 @@ func (s *LocalState) RefreshState() error { s.mu.Lock() defer s.mu.Unlock() + if s.PathOut == "" { + s.PathOut = s.Path + } + var reader io.Reader - if !s.written { + + // The s.Path file is only OK to read if we have not written any state out + // (in which case the same state needs to be read in), and no state output file + // has been opened (possibly via a lock) or the input path is different + // than the output path. + // This is important for Windows, as if the input file is the same as the + // output file, and the output file has been locked already, we can't open + // the file again. + if !s.written && (s.stateFileOut == nil || s.Path != s.PathOut) { // we haven't written a state file yet, so load from Path f, err := os.Open(s.Path) if err != nil { @@ -136,7 +156,7 @@ func (s *LocalState) RefreshState() error { } // we have a state file, make sure we're at the start - s.stateFileOut.Seek(0, os.SEEK_SET) + s.stateFileOut.Seek(0, io.SeekStart) reader = s.stateFileOut } @@ -147,7 +167,7 @@ func (s *LocalState) RefreshState() error { } s.state = state - s.readState = state + s.readState = s.state.DeepCopy() return nil } diff --git a/vendor/github.com/hashicorp/terraform/state/local_lock_unix.go b/vendor/github.com/hashicorp/terraform/state/local_lock_unix.go index ccadeaba..470b3149 100644 --- a/vendor/github.com/hashicorp/terraform/state/local_lock_unix.go +++ b/vendor/github.com/hashicorp/terraform/state/local_lock_unix.go @@ -3,7 +3,7 @@ package state import ( - "os" + "io" "syscall" ) @@ -12,7 +12,7 @@ import ( func (s *LocalState) lock() error { flock := &syscall.Flock_t{ Type: syscall.F_RDLCK | syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), + Whence: int16(io.SeekStart), Start: 0, Len: 0, } @@ -24,7 +24,7 @@ func (s *LocalState) lock() error { func (s *LocalState) unlock() error { flock := &syscall.Flock_t{ Type: syscall.F_UNLCK, - Whence: int16(os.SEEK_SET), + Whence: int16(io.SeekStart), Start: 0, Len: 0, } diff --git a/vendor/github.com/hashicorp/terraform/state/lock.go b/vendor/github.com/hashicorp/terraform/state/lock.go index b3a03b3e..4839df2a 100644 --- a/vendor/github.com/hashicorp/terraform/state/lock.go +++ b/vendor/github.com/hashicorp/terraform/state/lock.go @@ -1,7 +1,7 @@ package state import ( - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/states" ) // LockDisabled implements State and Locker but disables state locking. @@ -13,11 +13,11 @@ type LockDisabled struct { Inner State } -func (s *LockDisabled) State() *terraform.State { +func (s *LockDisabled) State() *states.State { return s.Inner.State() } -func (s *LockDisabled) WriteState(v *terraform.State) error { +func (s *LockDisabled) WriteState(v *states.State) error { return s.Inner.WriteState(v) } diff --git a/vendor/github.com/hashicorp/terraform/state/remote/remote.go b/vendor/github.com/hashicorp/terraform/state/remote/remote.go new file mode 100644 index 00000000..58f2578d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/remote.go @@ -0,0 +1,47 @@ +package remote + +import ( + "fmt" + + "github.com/hashicorp/terraform/state" +) + +// Client is the interface that must be implemented for a remote state +// driver. It supports dumb put/get/delete, and the higher level structs +// handle persisting the state properly here. +type Client interface { + Get() (*Payload, error) + Put([]byte) error + Delete() error +} + +// ClientLocker is an optional interface that allows a remote state +// backend to enable state lock/unlock. +type ClientLocker interface { + Client + state.Locker +} + +// Payload is the return value from the remote state storage. +type Payload struct { + MD5 []byte + Data []byte +} + +// Factory is the factory function to create a remote client. +type Factory func(map[string]string) (Client, error) + +// NewClient returns a new Client with the given type and configuration. +// The client is looked up in the BuiltinClients variable. +func NewClient(t string, conf map[string]string) (Client, error) { + f, ok := BuiltinClients[t] + if !ok { + return nil, fmt.Errorf("unknown remote client type: %s", t) + } + + return f(conf) +} + +// BuiltinClients is the list of built-in clients that can be used with +// NewClient. +var BuiltinClients = map[string]Factory{} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/state.go b/vendor/github.com/hashicorp/terraform/state/remote/state.go new file mode 100644 index 00000000..6d701da8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/state.go @@ -0,0 +1,214 @@ +package remote + +import ( + "bytes" + "fmt" + "sync" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/states/statemgr" +) + +// State implements the State interfaces in the state package to handle +// reading and writing the remote state. This State on its own does no +// local caching so every persist will go to the remote storage and local +// writes will go to memory. +type State struct { + mu sync.Mutex + + Client Client + + lineage string + serial uint64 + state, readState *states.State + disableLocks bool +} + +var _ statemgr.Full = (*State)(nil) +var _ statemgr.Migrator = (*State)(nil) + +// statemgr.Reader impl. +func (s *State) State() *states.State { + s.mu.Lock() + defer s.mu.Unlock() + + return s.state.DeepCopy() +} + +// StateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) StateForMigration() *statefile.File { + s.mu.Lock() + defer s.mu.Unlock() + + return statefile.New(s.state.DeepCopy(), s.lineage, s.serial) +} + +// statemgr.Writer impl. +func (s *State) WriteState(state *states.State) error { + s.mu.Lock() + defer s.mu.Unlock() + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = state.DeepCopy() + + return nil +} + +// WriteStateForMigration is part of our implementation of statemgr.Migrator. +func (s *State) WriteStateForMigration(f *statefile.File, force bool) error { + s.mu.Lock() + defer s.mu.Unlock() + + checkFile := statefile.New(s.state, s.lineage, s.serial) + if !force { + if err := statemgr.CheckValidImport(f, checkFile); err != nil { + return err + } + } + + // We create a deep copy of the state here, because the caller also has + // a reference to the given object and can potentially go on to mutate + // it after we return, but we want the snapshot at this point in time. + s.state = f.State.DeepCopy() + s.lineage = f.Lineage + s.serial = f.Serial + + return nil +} + +// statemgr.Refresher impl. +func (s *State) RefreshState() error { + s.mu.Lock() + defer s.mu.Unlock() + return s.refreshState() +} + +// refreshState is the main implementation of RefreshState, but split out so +// that we can make internal calls to it from methods that are already holding +// the s.mu lock. +func (s *State) refreshState() error { + payload, err := s.Client.Get() + if err != nil { + return err + } + + // no remote state is OK + if payload == nil { + s.readState = nil + s.lineage = "" + s.serial = 0 + return nil + } + + stateFile, err := statefile.Read(bytes.NewReader(payload.Data)) + if err != nil { + return err + } + + s.lineage = stateFile.Lineage + s.serial = stateFile.Serial + s.state = stateFile.State + s.readState = s.state.DeepCopy() // our states must be separate instances so we can track changes + return nil +} + +// statemgr.Persister impl. +func (s *State) PersistState() error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.readState != nil { + if statefile.StatesMarshalEqual(s.state, s.readState) { + // If the state hasn't changed at all then we have nothing to do. + return nil + } + s.serial++ + } else { + // We might be writing a new state altogether, but before we do that + // we'll check to make sure there isn't already a snapshot present + // that we ought to be updating. + err := s.refreshState() + if err != nil { + return fmt.Errorf("failed checking for existing remote state: %s", err) + } + if s.lineage == "" { // indicates that no state snapshot is present yet + lineage, err := uuid.GenerateUUID() + if err != nil { + return fmt.Errorf("failed to generate initial lineage: %v", err) + } + s.lineage = lineage + s.serial = 0 + } + } + + f := statefile.New(s.state, s.lineage, s.serial) + + var buf bytes.Buffer + err := statefile.Write(f, &buf) + if err != nil { + return err + } + + err = s.Client.Put(buf.Bytes()) + if err != nil { + return err + } + + // After we've successfully persisted, what we just wrote is our new + // reference state until someone calls RefreshState again. + s.readState = s.state.DeepCopy() + return nil +} + +// Lock calls the Client's Lock method if it's implemented. +func (s *State) Lock(info *state.LockInfo) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return "", nil + } + + if c, ok := s.Client.(ClientLocker); ok { + return c.Lock(info) + } + return "", nil +} + +// Unlock calls the Client's Unlock method if it's implemented. +func (s *State) Unlock(id string) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.disableLocks { + return nil + } + + if c, ok := s.Client.(ClientLocker); ok { + return c.Unlock(id) + } + return nil +} + +// DisableLocks turns the Lock and Unlock methods into no-ops. This is intended +// to be called during initialization of a state manager and should not be +// called after any of the statemgr.Full interface methods have been called. +func (s *State) DisableLocks() { + s.disableLocks = true +} + +// StateSnapshotMeta returns the metadata from the most recently persisted +// or refreshed persistent state snapshot. +// +// This is an implementation of statemgr.PersistentMeta. +func (s *State) StateSnapshotMeta() statemgr.SnapshotMeta { + return statemgr.SnapshotMeta{ + Lineage: s.lineage, + Serial: s.serial, + } +} diff --git a/vendor/github.com/hashicorp/terraform/state/remote/testing.go b/vendor/github.com/hashicorp/terraform/state/remote/testing.go new file mode 100644 index 00000000..002f5038 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/state/remote/testing.go @@ -0,0 +1,99 @@ +package remote + +import ( + "bytes" + "testing" + + "github.com/hashicorp/terraform/state" + "github.com/hashicorp/terraform/states/statefile" +) + +// TestClient is a generic function to test any client. +func TestClient(t *testing.T, c Client) { + var buf bytes.Buffer + s := state.TestStateInitial() + sf := statefile.New(s, "stub-lineage", 2) + err := statefile.Write(sf, &buf) + if err != nil { + t.Fatalf("err: %s", err) + } + data := buf.Bytes() + + if err := c.Put(data); err != nil { + t.Fatalf("put: %s", err) + } + + p, err := c.Get() + if err != nil { + t.Fatalf("get: %s", err) + } + if !bytes.Equal(p.Data, data) { + t.Fatalf("expected full state %q\n\ngot: %q", string(p.Data), string(data)) + } + + if err := c.Delete(); err != nil { + t.Fatalf("delete: %s", err) + } + + p, err = c.Get() + if err != nil { + t.Fatalf("get: %s", err) + } + if p != nil { + t.Fatalf("expected empty state, got: %q", string(p.Data)) + } +} + +// Test the lock implementation for a remote.Client. +// This test requires 2 client instances, in oder to have multiple remote +// clients since some implementations may tie the client to the lock, or may +// have reentrant locks. +func TestRemoteLocks(t *testing.T, a, b Client) { + lockerA, ok := a.(state.Locker) + if !ok { + t.Fatal("client A not a state.Locker") + } + + lockerB, ok := b.(state.Locker) + if !ok { + t.Fatal("client B not a state.Locker") + } + + infoA := state.NewLockInfo() + infoA.Operation = "test" + infoA.Who = "clientA" + + infoB := state.NewLockInfo() + infoB.Operation = "test" + infoB.Who = "clientB" + + lockIDA, err := lockerA.Lock(infoA) + if err != nil { + t.Fatal("unable to get initial lock:", err) + } + + _, err = lockerB.Lock(infoB) + if err == nil { + lockerA.Unlock(lockIDA) + t.Fatal("client B obtained lock while held by client A") + } + + if err := lockerA.Unlock(lockIDA); err != nil { + t.Fatal("error unlocking client A", err) + } + + lockIDB, err := lockerB.Lock(infoB) + if err != nil { + t.Fatal("unable to obtain lock from client B") + } + + if lockIDB == lockIDA { + t.Fatalf("duplicate lock IDs: %q", lockIDB) + } + + if err = lockerB.Unlock(lockIDB); err != nil { + t.Fatal("error unlocking client B:", err) + } + + // TODO: Should we enforce that Unlock requires the correct ID? +} diff --git a/vendor/github.com/hashicorp/terraform/state/state.go b/vendor/github.com/hashicorp/terraform/state/state.go index 45163852..21cd9038 100644 --- a/vendor/github.com/hashicorp/terraform/state/state.go +++ b/vendor/github.com/hashicorp/terraform/state/state.go @@ -1,78 +1,35 @@ package state import ( - "bytes" "context" - "encoding/json" - "errors" "fmt" - "math/rand" "os" "os/user" - "strings" - "text/template" "time" uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/terraform/terraform" -) -var rngSource *rand.Rand + "github.com/hashicorp/terraform/states/statemgr" + "github.com/hashicorp/terraform/version" +) -func init() { - rngSource = rand.New(rand.NewSource(time.Now().UnixNano())) -} +// State is a deprecated alias for statemgr.Full +type State = statemgr.Full -// State is the collection of all state interfaces. -type State interface { - StateReader - StateWriter - StateRefresher - StatePersister - Locker -} +// StateReader is a deprecated alias for statemgr.Reader +type StateReader = statemgr.Reader -// StateReader is the interface for things that can return a state. Retrieving -// the state here must not error. Loading the state fresh (an operation that -// can likely error) should be implemented by RefreshState. If a state hasn't -// been loaded yet, it is okay for State to return nil. -type StateReader interface { - State() *terraform.State -} +// StateWriter is a deprecated alias for statemgr.Writer +type StateWriter = statemgr.Writer -// StateWriter is the interface that must be implemented by something that -// can write a state. Writing the state can be cached or in-memory, as -// full persistence should be implemented by StatePersister. -type StateWriter interface { - WriteState(*terraform.State) error -} +// StateRefresher is a deprecated alias for statemgr.Refresher +type StateRefresher = statemgr.Refresher -// StateRefresher is the interface that is implemented by something that -// can load a state. This might be refreshing it from a remote location or -// it might simply be reloading it from disk. -type StateRefresher interface { - RefreshState() error -} +// StatePersister is a deprecated alias for statemgr.Persister +type StatePersister = statemgr.Persister -// StatePersister is implemented to truly persist a state. Whereas StateWriter -// is allowed to perhaps be caching in memory, PersistState must write the -// state to some durable storage. -type StatePersister interface { - PersistState() error -} - -// Locker is implemented to lock state during command execution. -// The info parameter can be recorded with the lock, but the -// implementation should not depend in its value. The string returned by Lock -// is an ID corresponding to the lock acquired, and must be passed to Unlock to -// ensure that the correct lock is being released. -// -// Lock and Unlock may return an error value of type LockError which in turn -// can contain the LockInfo of a conflicting lock. -type Locker interface { - Lock(info *LockInfo) (string, error) - Unlock(id string) error -} +// Locker is a deprecated alias for statemgr.Locker +type Locker = statemgr.Locker // test hook to verify that LockWithContext has attempted a lock var postLockHook func() @@ -118,13 +75,7 @@ func LockWithContext(ctx context.Context, s State, info *LockInfo) (string, erro // Generate a LockInfo structure, populating the required fields. func NewLockInfo() *LockInfo { - // this doesn't need to be cryptographically secure, just unique. - // Using math/rand alleviates the need to check handle the read error. - // Use a uuid format to match other IDs used throughout Terraform. - buf := make([]byte, 16) - rngSource.Read(buf) - - id, err := uuid.FormatUUID(buf) + id, err := uuid.GenerateUUID() if err != nil { // this of course shouldn't happen panic(err) @@ -140,84 +91,14 @@ func NewLockInfo() *LockInfo { info := &LockInfo{ ID: id, Who: fmt.Sprintf("%s@%s", userName, host), - Version: terraform.Version, + Version: version.Version, Created: time.Now().UTC(), } return info } -// LockInfo stores lock metadata. -// -// Only Operation and Info are required to be set by the caller of Lock. -type LockInfo struct { - // Unique ID for the lock. NewLockInfo provides a random ID, but this may - // be overridden by the lock implementation. The final value if ID will be - // returned by the call to Lock. - ID string - - // Terraform operation, provided by the caller. - Operation string - // Extra information to store with the lock, provided by the caller. - Info string - - // user@hostname when available - Who string - // Terraform version - Version string - // Time that the lock was taken. - Created time.Time - - // Path to the state file when applicable. Set by the Lock implementation. - Path string -} +// LockInfo is a deprecated lias for statemgr.LockInfo +type LockInfo = statemgr.LockInfo -// Err returns the lock info formatted in an error -func (l *LockInfo) Err() error { - return errors.New(l.String()) -} - -// Marshal returns a string json representation of the LockInfo -func (l *LockInfo) Marshal() []byte { - js, err := json.Marshal(l) - if err != nil { - panic(err) - } - return js -} - -// String return a multi-line string representation of LockInfo -func (l *LockInfo) String() string { - tmpl := `Lock Info: - ID: {{.ID}} - Path: {{.Path}} - Operation: {{.Operation}} - Who: {{.Who}} - Version: {{.Version}} - Created: {{.Created}} - Info: {{.Info}} -` - - t := template.Must(template.New("LockInfo").Parse(tmpl)) - var out bytes.Buffer - if err := t.Execute(&out, l); err != nil { - panic(err) - } - return out.String() -} - -type LockError struct { - Info *LockInfo - Err error -} - -func (e *LockError) Error() string { - var out []string - if e.Err != nil { - out = append(out, e.Err.Error()) - } - - if e.Info != nil { - out = append(out, e.Info.String()) - } - return strings.Join(out, "\n") -} +// LockError is a deprecated alias for statemgr.LockError +type LockError = statemgr.LockError diff --git a/vendor/github.com/hashicorp/terraform/state/testing.go b/vendor/github.com/hashicorp/terraform/state/testing.go index 61f972ca..c0c1fbbc 100644 --- a/vendor/github.com/hashicorp/terraform/state/testing.go +++ b/vendor/github.com/hashicorp/terraform/state/testing.go @@ -1,150 +1,22 @@ package state import ( - "reflect" "testing" - "github.com/hashicorp/terraform/terraform" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statemgr" ) // TestState is a helper for testing state implementations. It is expected // that the given implementation is pre-loaded with the TestStateInitial // state. -func TestState(t *testing.T, s interface{}) { - reader, ok := s.(StateReader) - if !ok { - t.Fatalf("must at least be a StateReader") - } - - // If it implements refresh, refresh - if rs, ok := s.(StateRefresher); ok { - if err := rs.RefreshState(); err != nil { - t.Fatalf("err: %s", err) - } - } - - // current will track our current state - current := TestStateInitial() - - // Check that the initial state is correct - if state := reader.State(); !current.Equal(state) { - t.Fatalf("not initial:\n%#v\n\n%#v", state, current) - } - - // Write a new state and verify that we have it - if ws, ok := s.(StateWriter); ok { - current.AddModuleState(&terraform.ModuleState{ - Path: []string{"root"}, - Outputs: map[string]*terraform.OutputState{ - "bar": &terraform.OutputState{ - Type: "string", - Sensitive: false, - Value: "baz", - }, - }, - }) - - if err := ws.WriteState(current); err != nil { - t.Fatalf("err: %s", err) - } - - if actual := reader.State(); !actual.Equal(current) { - t.Fatalf("bad:\n%#v\n\n%#v", actual, current) - } - } - - // Test persistence - if ps, ok := s.(StatePersister); ok { - if err := ps.PersistState(); err != nil { - t.Fatalf("err: %s", err) - } - - // Refresh if we got it - if rs, ok := s.(StateRefresher); ok { - if err := rs.RefreshState(); err != nil { - t.Fatalf("err: %s", err) - } - } - - // Just set the serials the same... Then compare. - actual := reader.State() - if !actual.Equal(current) { - t.Fatalf("bad: %#v\n\n%#v", actual, current) - } - } - - // If we can write and persist then verify that the serial - // is only implemented on change. - writer, writeOk := s.(StateWriter) - persister, persistOk := s.(StatePersister) - if writeOk && persistOk { - // Same serial - serial := current.Serial - if err := writer.WriteState(current); err != nil { - t.Fatalf("err: %s", err) - } - if err := persister.PersistState(); err != nil { - t.Fatalf("err: %s", err) - } - - if reader.State().Serial != serial { - t.Fatalf("bad: expected %d, got %d", serial, reader.State().Serial) - } - - // Change the serial - current = current.DeepCopy() - current.Modules = []*terraform.ModuleState{ - &terraform.ModuleState{ - Path: []string{"root", "somewhere"}, - Outputs: map[string]*terraform.OutputState{ - "serialCheck": &terraform.OutputState{ - Type: "string", - Sensitive: false, - Value: "true", - }, - }, - }, - } - if err := writer.WriteState(current); err != nil { - t.Fatalf("err: %s", err) - } - if err := persister.PersistState(); err != nil { - t.Fatalf("err: %s", err) - } - - if reader.State().Serial <= serial { - t.Fatalf("bad: expected %d, got %d", serial, reader.State().Serial) - } - - // Check that State() returns a copy by modifying the copy and comparing - // to the current state. - stateCopy := reader.State() - stateCopy.Serial++ - if reflect.DeepEqual(stateCopy, current) { - t.Fatal("State() should return a copy") - } - } +func TestState(t *testing.T, s State) { + t.Helper() + statemgr.TestFull(t, s) } // TestStateInitial is the initial state that a State should have // for TestState. -func TestStateInitial() *terraform.State { - initial := &terraform.State{ - Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ - Path: []string{"root", "child"}, - Outputs: map[string]*terraform.OutputState{ - "foo": &terraform.OutputState{ - Type: "string", - Sensitive: false, - Value: "bar", - }, - }, - }, - }, - } - - initial.Init() - - return initial +func TestStateInitial() *states.State { + return statemgr.TestFullInitialState() } diff --git a/vendor/github.com/hashicorp/terraform/states/doc.go b/vendor/github.com/hashicorp/terraform/states/doc.go new file mode 100644 index 00000000..7dd74ac7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/doc.go @@ -0,0 +1,3 @@ +// Package states contains the types that are used to represent Terraform +// states. +package states diff --git a/vendor/github.com/hashicorp/terraform/states/eachmode_string.go b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go new file mode 100644 index 00000000..0dc73499 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go @@ -0,0 +1,35 @@ +// Code generated by "stringer -type EachMode"; DO NOT EDIT. + +package states + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[NoEach-0] + _ = x[EachList-76] + _ = x[EachMap-77] +} + +const ( + _EachMode_name_0 = "NoEach" + _EachMode_name_1 = "EachListEachMap" +) + +var ( + _EachMode_index_1 = [...]uint8{0, 8, 15} +) + +func (i EachMode) String() string { + switch { + case i == 0: + return _EachMode_name_0 + case 76 <= i && i <= 77: + i -= 76 + return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]] + default: + return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_generation.go b/vendor/github.com/hashicorp/terraform/states/instance_generation.go new file mode 100644 index 00000000..617ad4ea --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/instance_generation.go @@ -0,0 +1,24 @@ +package states + +// Generation is used to represent multiple objects in a succession of objects +// represented by a single resource instance address. A resource instance can +// have multiple generations over its lifetime due to object replacement +// (when a change can't be applied without destroying and re-creating), and +// multiple generations can exist at the same time when create_before_destroy +// is used. +// +// A Generation value can either be the value of the variable "CurrentGen" or +// a value of type DeposedKey. Generation values can be compared for equality +// using "==" and used as map keys. The zero value of Generation (nil) is not +// a valid generation and must not be used. +type Generation interface { + generation() +} + +// CurrentGen is the Generation representing the currently-active object for +// a resource instance. +var CurrentGen Generation + +type currentGen struct{} + +func (g currentGen) generation() {} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object.go b/vendor/github.com/hashicorp/terraform/states/instance_object.go new file mode 100644 index 00000000..1374c59d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/instance_object.go @@ -0,0 +1,120 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" +) + +// ResourceInstanceObject is the local representation of a specific remote +// object associated with a resource instance. In practice not all remote +// objects are actually remote in the sense of being accessed over the network, +// but this is the most common case. +// +// It is not valid to mutate a ResourceInstanceObject once it has been created. +// Instead, create a new object and replace the existing one. +type ResourceInstanceObject struct { + // Value is the object-typed value representing the remote object within + // Terraform. + Value cty.Value + + // Private is an opaque value set by the provider when this object was + // last created or updated. Terraform Core does not use this value in + // any way and it is not exposed anywhere in the user interface, so + // a provider can use it for retaining any necessary private state. + Private []byte + + // Status represents the "readiness" of the object as of the last time + // it was updated. + Status ObjectStatus + + // Dependencies is a set of other addresses in the same module which + // this instance depended on when the given attributes were evaluated. + // This is used to construct the dependency relationships for an object + // whose configuration is no longer available, such as if it has been + // removed from configuration altogether, or is now deposed. + Dependencies []addrs.Referenceable +} + +// ObjectStatus represents the status of a RemoteObject. +type ObjectStatus rune + +//go:generate stringer -type ObjectStatus + +const ( + // ObjectReady is an object status for an object that is ready to use. + ObjectReady ObjectStatus = 'R' + + // ObjectTainted is an object status representing an object that is in + // an unrecoverable bad state due to a partial failure during a create, + // update, or delete operation. Since it cannot be moved into the + // ObjectRead state, a tainted object must be replaced. + ObjectTainted ObjectStatus = 'T' + + // ObjectPlanned is a special object status used only for the transient + // placeholder objects we place into state during the refresh and plan + // walks to stand in for objects that will be created during apply. + // + // Any object of this status must have a corresponding change recorded + // in the current plan, whose value must then be used in preference to + // the value stored in state when evaluating expressions. A planned + // object stored in state will be incomplete if any of its attributes are + // not yet known, and the plan must be consulted in order to "see" those + // unknown values, because the state is not able to represent them. + ObjectPlanned ObjectStatus = 'P' +) + +// Encode marshals the value within the receiver to produce a +// ResourceInstanceObjectSrc ready to be written to a state file. +// +// The given type must be the implied type of the resource type schema, and +// the given value must conform to it. It is important to pass the schema +// type and not the object's own type so that dynamically-typed attributes +// will be stored correctly. The caller must also provide the version number +// of the schema that the given type was derived from, which will be recorded +// in the source object so it can be used to detect when schema migration is +// required on read. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + // Our state serialization can't represent unknown values, so we convert + // them to nulls here. This is lossy, but nobody should be writing unknown + // values here and expecting to get them out again later. + // + // We get unknown values here while we're building out a "planned state" + // during the plan phase, but the value stored in the plan takes precedence + // for expression evaluation. The apply step should never produce unknown + // values, but if it does it's the responsibility of the caller to detect + // and raise an error about that. + val := cty.UnknownAsNull(o.Value) + + src, err := ctyjson.Marshal(val, ty) + if err != nil { + return nil, err + } + + return &ResourceInstanceObjectSrc{ + SchemaVersion: schemaVersion, + AttrsJSON: src, + Private: o.Private, + Status: o.Status, + Dependencies: o.Dependencies, + }, nil +} + +// AsTainted returns a deep copy of the receiver with the status updated to +// ObjectTainted. +func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject { + if o == nil { + // A nil object can't be tainted, but we'll allow this anyway to + // avoid a crash, since we presumably intend to eventually record + // the object has having been deleted anyway. + return nil + } + ret := o.DeepCopy() + ret.Status = ObjectTainted + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go new file mode 100644 index 00000000..6cb3c27e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go @@ -0,0 +1,113 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config/hcl2shim" +) + +// ResourceInstanceObjectSrc is a not-fully-decoded version of +// ResourceInstanceObject. Decoding of it can be completed by first handling +// any schema migration steps to get to the latest schema version and then +// calling method Decode with the implied type of the latest schema. +type ResourceInstanceObjectSrc struct { + // SchemaVersion is the resource-type-specific schema version number that + // was current when either AttrsJSON or AttrsFlat was encoded. Migration + // steps are required if this is less than the current version number + // reported by the corresponding provider. + SchemaVersion uint64 + + // AttrsJSON is a JSON-encoded representation of the object attributes, + // encoding the value (of the object type implied by the associated resource + // type schema) that represents this remote object in Terraform Language + // expressions, and is compared with configuration when producing a diff. + // + // This is retained in JSON format here because it may require preprocessing + // before decoding if, for example, the stored attributes are for an older + // schema version which the provider must upgrade before use. If the + // version is current, it is valid to simply decode this using the + // type implied by the current schema, without the need for the provider + // to perform an upgrade first. + // + // When writing a ResourceInstanceObject into the state, AttrsJSON should + // always be conformant to the current schema version and the current + // schema version should be recorded in the SchemaVersion field. + AttrsJSON []byte + + // AttrsFlat is a legacy form of attributes used in older state file + // formats, and in the new state format for objects that haven't yet been + // upgraded. This attribute is mutually exclusive with Attrs: for any + // ResourceInstanceObject, only one of these attributes may be populated + // and the other must be nil. + // + // An instance object with this field populated should be upgraded to use + // Attrs at the earliest opportunity, since this legacy flatmap-based + // format will be phased out over time. AttrsFlat should not be used when + // writing new or updated objects to state; instead, callers must follow + // the recommendations in the AttrsJSON documentation above. + AttrsFlat map[string]string + + // These fields all correspond to the fields of the same name on + // ResourceInstanceObject. + Private []byte + Status ObjectStatus + Dependencies []addrs.Referenceable +} + +// Decode unmarshals the raw representation of the object attributes. Pass the +// implied type of the corresponding resource type schema for correct operation. +// +// Before calling Decode, the caller must check that the SchemaVersion field +// exactly equals the version number of the schema whose implied type is being +// passed, or else the result is undefined. +// +// The returned object may share internal references with the receiver and +// so the caller must not mutate the receiver any further once once this +// method is called. +func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) { + var val cty.Value + var err error + if os.AttrsFlat != nil { + // Legacy mode. We'll do our best to unpick this from the flatmap. + val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty) + if err != nil { + return nil, err + } + } else { + val, err = ctyjson.Unmarshal(os.AttrsJSON, ty) + if err != nil { + return nil, err + } + } + + return &ResourceInstanceObject{ + Value: val, + Status: os.Status, + Dependencies: os.Dependencies, + Private: os.Private, + }, nil +} + +// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the +// metadata from the receiver and writing in the given new schema version +// and attribute value that are presumed to have resulted from upgrading +// from an older schema version. +func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) { + new := os.DeepCopy() + new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap + + // This is the same principle as ResourceInstanceObject.Encode, but + // avoiding a decode/re-encode cycle because we don't have type info + // available for the "old" attributes. + newAttrs = cty.UnknownAsNull(newAttrs) + src, err := ctyjson.Marshal(newAttrs, newType) + if err != nil { + return nil, err + } + + new.AttrsJSON = src + new.SchemaVersion = newSchemaVersion + return new, nil +} diff --git a/vendor/github.com/hashicorp/terraform/states/module.go b/vendor/github.com/hashicorp/terraform/states/module.go new file mode 100644 index 00000000..d89e7878 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/module.go @@ -0,0 +1,285 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" +) + +// Module is a container for the states of objects within a particular module. +type Module struct { + Addr addrs.ModuleInstance + + // Resources contains the state for each resource. The keys in this map are + // an implementation detail and must not be used by outside callers. + Resources map[string]*Resource + + // OutputValues contains the state for each output value. The keys in this + // map are output value names. + OutputValues map[string]*OutputValue + + // LocalValues contains the value for each named output value. The keys + // in this map are local value names. + LocalValues map[string]cty.Value +} + +// NewModule constructs an empty module state for the given module address. +func NewModule(addr addrs.ModuleInstance) *Module { + return &Module{ + Addr: addr, + Resources: map[string]*Resource{}, + OutputValues: map[string]*OutputValue{}, + LocalValues: map[string]cty.Value{}, + } +} + +// Resource returns the state for the resource with the given address within +// the receiving module state, or nil if the requested resource is not tracked +// in the state. +func (ms *Module) Resource(addr addrs.Resource) *Resource { + return ms.Resources[addr.String()] +} + +// ResourceInstance returns the state for the resource instance with the given +// address within the receiving module state, or nil if the requested instance +// is not tracked in the state. +func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance { + rs := ms.Resource(addr.Resource) + if rs == nil { + return nil + } + return rs.Instance(addr.Key) +} + +// SetResourceMeta updates the resource-level metadata for the resource +// with the given address, creating the resource state for it if it doesn't +// already exist. +func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) { + rs := ms.Resource(addr) + if rs == nil { + rs = &Resource{ + Addr: addr, + Instances: map[addrs.InstanceKey]*ResourceInstance{}, + } + ms.Resources[addr.String()] = rs + } + + rs.EachMode = eachMode + rs.ProviderConfig = provider +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed. +func (ms *Module) RemoveResource(addr addrs.Resource) { + delete(ms.Resources, addr.String()) +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simulataneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance will be removed altogether. +// +// The provider address and "each mode" are resource-wide settings and so they +// are updated for all other instances of the same resource as a side-effect of +// this call. +func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + + is.Current = obj + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider) + + rs := ms.Resource(addr.Resource) + is := rs.EnsureInstance(addr.Key) + if obj != nil { + is.Deposed[key] = obj + } else { + delete(is.Deposed, key) + } + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + delete(rs.Instances, addr.Key) + + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) { + rs := ms.Resource(addr.Resource) + if rs == nil { + return + } + is := rs.Instance(addr.Key) + if is == nil { + return + } + delete(is.Deposed, key) + + if !is.HasObjects() { + // If we have no objects at all then we'll clean up. + delete(rs.Instances, addr.Key) + } + if rs.EachMode == NoEach && len(rs.Instances) == 0 { + // Also clean up if we only expect to have one instance anyway + // and there are none. We leave the resource behind if an each mode + // is active because an empty list or map of instances is a valid state. + delete(ms.Resources, addr.Resource.String()) + } +} + +// deposeResourceInstanceObject is the real implementation of +// SyncState.DeposeResourceInstanceObject. +func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey { + is := ms.ResourceInstance(addr) + if is == nil { + return NotDeposed + } + return is.deposeCurrentObject(forceKey) +} + +// maybeRestoreResourceInstanceDeposed is the real implementation of +// SyncState.MaybeRestoreResourceInstanceDeposed. +func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool { + rs := ms.Resource(addr.Resource) + if rs == nil { + return false + } + is := rs.Instance(addr.Key) + if is == nil { + return false + } + if is.Current != nil { + return false + } + if len(is.Deposed) == 0 { + return false + } + is.Current = is.Deposed[key] + delete(is.Deposed, key) + return true +} + +// SetOutputValue writes an output value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue { + os := &OutputValue{ + Value: value, + Sensitive: sensitive, + } + ms.OutputValues[name] = os + return os +} + +// RemoveOutputValue removes the output value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveOutputValue(name string) { + delete(ms.OutputValues, name) +} + +// SetLocalValue writes a local value into the state, overwriting any +// existing value of the same name. +func (ms *Module) SetLocalValue(name string, value cty.Value) { + ms.LocalValues[name] = value +} + +// RemoveLocalValue removes the local value of the given name from the state, +// if it exists. This method is a no-op if there is no value of the given +// name. +func (ms *Module) RemoveLocalValue(name string) { + delete(ms.LocalValues, name) +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// You probably shouldn't call this! See the method of the same name on +// type State for more information on what this is for and the rare situations +// where it is safe to use. +func (ms *Module) PruneResourceHusks() { + for _, rs := range ms.Resources { + if len(rs.Instances) == 0 { + ms.RemoveResource(rs.Addr) + } + } +} + +// empty returns true if the receving module state is contributing nothing +// to the state. In other words, it returns true if the module could be +// removed from the state altogether without changing the meaning of the state. +// +// In practice a module containing no objects is the same as a non-existent +// module, and so we can opportunistically clean up once a module becomes +// empty on the assumption that it will be re-added if needed later. +func (ms *Module) empty() bool { + if ms == nil { + return true + } + + // This must be updated to cover any new collections added to Module + // in future. + return (len(ms.Resources) == 0 && + len(ms.OutputValues) == 0 && + len(ms.LocalValues) == 0) +} diff --git a/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go new file mode 100644 index 00000000..96a6db2f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go @@ -0,0 +1,33 @@ +// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT. + +package states + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ObjectReady-82] + _ = x[ObjectTainted-84] + _ = x[ObjectPlanned-80] +} + +const ( + _ObjectStatus_name_0 = "ObjectPlanned" + _ObjectStatus_name_1 = "ObjectReady" + _ObjectStatus_name_2 = "ObjectTainted" +) + +func (i ObjectStatus) String() string { + switch { + case i == 80: + return _ObjectStatus_name_0 + case i == 82: + return _ObjectStatus_name_1 + case i == 84: + return _ObjectStatus_name_2 + default: + return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/output_value.go b/vendor/github.com/hashicorp/terraform/states/output_value.go new file mode 100644 index 00000000..d232b76d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/output_value.go @@ -0,0 +1,14 @@ +package states + +import ( + "github.com/zclconf/go-cty/cty" +) + +// OutputValue represents the state of a particular output value. +// +// It is not valid to mutate an OutputValue object once it has been created. +// Instead, create an entirely new OutputValue to replace the previous one. +type OutputValue struct { + Value cty.Value + Sensitive bool +} diff --git a/vendor/github.com/hashicorp/terraform/states/resource.go b/vendor/github.com/hashicorp/terraform/states/resource.go new file mode 100644 index 00000000..e2a2b858 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/resource.go @@ -0,0 +1,239 @@ +package states + +import ( + "fmt" + "math/rand" + "time" + + "github.com/hashicorp/terraform/addrs" +) + +// Resource represents the state of a resource. +type Resource struct { + // Addr is the module-relative address for the resource this state object + // belongs to. + Addr addrs.Resource + + // EachMode is the multi-instance mode currently in use for this resource, + // or NoEach if this is a single-instance resource. This dictates what + // type of value is returned when accessing this resource via expressions + // in the Terraform language. + EachMode EachMode + + // Instances contains the potentially-multiple instances associated with + // this resource. This map can contain a mixture of different key types, + // but only the ones of InstanceKeyType are considered current. + Instances map[addrs.InstanceKey]*ResourceInstance + + // ProviderConfig is the absolute address for the provider configuration that + // most recently managed this resource. This is used to connect a resource + // with a provider configuration when the resource configuration block is + // not available, such as if it has been removed from configuration + // altogether. + ProviderConfig addrs.AbsProviderConfig +} + +// Instance returns the state for the instance with the given key, or nil +// if no such instance is tracked within the state. +func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance { + return rs.Instances[key] +} + +// EnsureInstance returns the state for the instance with the given key, +// creating a new empty state for it if one doesn't already exist. +// +// Because this may create and save a new state, it is considered to be +// a write operation. +func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance { + ret := rs.Instance(key) + if ret == nil { + ret = NewResourceInstance() + rs.Instances[key] = ret + } + return ret +} + +// ResourceInstance represents the state of a particular instance of a resource. +type ResourceInstance struct { + // Current, if non-nil, is the remote object that is currently represented + // by the corresponding resource instance. + Current *ResourceInstanceObjectSrc + + // Deposed, if len > 0, contains any remote objects that were previously + // represented by the corresponding resource instance but have been + // replaced and are pending destruction due to the create_before_destroy + // lifecycle mode. + Deposed map[DeposedKey]*ResourceInstanceObjectSrc +} + +// NewResourceInstance constructs and returns a new ResourceInstance, ready to +// use. +func NewResourceInstance() *ResourceInstance { + return &ResourceInstance{ + Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{}, + } +} + +// HasCurrent returns true if this resource instance has a "current"-generation +// object. Most instances do, but this can briefly be false during a +// create-before-destroy replace operation when the current has been deposed +// but its replacement has not yet been created. +func (i *ResourceInstance) HasCurrent() bool { + return i != nil && i.Current != nil +} + +// HasDeposed returns true if this resource instance has a deposed object +// with the given key. +func (i *ResourceInstance) HasDeposed(key DeposedKey) bool { + return i != nil && i.Deposed[key] != nil +} + +// HasAnyDeposed returns true if this resource instance has one or more +// deposed objects. +func (i *ResourceInstance) HasAnyDeposed() bool { + return i != nil && len(i.Deposed) > 0 +} + +// HasObjects returns true if this resource has any objects at all, whether +// current or deposed. +func (i *ResourceInstance) HasObjects() bool { + return i.Current != nil || len(i.Deposed) != 0 +} + +// deposeCurrentObject is part of the real implementation of +// SyncState.DeposeResourceInstanceObject. The exported method uses a lock +// to ensure that we can safely allocate an unused deposed key without +// collision. +func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey { + if !i.HasCurrent() { + return NotDeposed + } + + key := forceKey + if key == NotDeposed { + key = i.findUnusedDeposedKey() + } else { + if _, exists := i.Deposed[key]; exists { + panic(fmt.Sprintf("forced key %s is already in use", forceKey)) + } + } + i.Deposed[key] = i.Current + i.Current = nil + return key +} + +// GetGeneration retrieves the object of the given generation from the +// ResourceInstance, or returns nil if there is no such object. +// +// If the given generation is nil or invalid, this method will panic. +func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc { + if gen == CurrentGen { + return i.Current + } + if dk, ok := gen.(DeposedKey); ok { + return i.Deposed[dk] + } + if gen == nil { + panic(fmt.Sprintf("get with nil Generation")) + } + // Should never fall out here, since the above covers all possible + // Generation values. + panic(fmt.Sprintf("get invalid Generation %#v", gen)) +} + +// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance at the time of the call. +// +// Note that the validity of this result may change if new deposed keys are +// allocated before it is used. To avoid this risk, instead use the +// DeposeResourceInstanceObject method on the SyncState wrapper type, which +// allocates a key and uses it atomically. +func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey { + return i.findUnusedDeposedKey() +} + +// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to +// already be in use for this instance. +func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey { + for { + key := NewDeposedKey() + if _, exists := i.Deposed[key]; !exists { + return key + } + // Spin until we find a unique one. This shouldn't take long, because + // we have a 32-bit keyspace and there's rarely more than one deposed + // instance. + } +} + +// EachMode specifies the multi-instance mode for a resource. +type EachMode rune + +const ( + NoEach EachMode = 0 + EachList EachMode = 'L' + EachMap EachMode = 'M' +) + +//go:generate stringer -type EachMode + +func eachModeForInstanceKey(key addrs.InstanceKey) EachMode { + switch key.(type) { + case addrs.IntKey: + return EachList + case addrs.StringKey: + return EachMap + default: + if key == addrs.NoKey { + return NoEach + } + panic(fmt.Sprintf("don't know an each mode for instance key %#v", key)) + } +} + +// DeposedKey is a 8-character hex string used to uniquely identify deposed +// instance objects in the state. +type DeposedKey string + +// NotDeposed is a special invalid value of DeposedKey that is used to represent +// the absense of a deposed key. It must not be used as an actual deposed key. +const NotDeposed = DeposedKey("") + +var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano())) + +// NewDeposedKey generates a pseudo-random deposed key. Because of the short +// length of these keys, uniqueness is not a natural consequence and so the +// caller should test to see if the generated key is already in use and generate +// another if so, until a unique key is found. +func NewDeposedKey() DeposedKey { + v := deposedKeyRand.Uint32() + return DeposedKey(fmt.Sprintf("%08x", v)) +} + +func (k DeposedKey) String() string { + return string(k) +} + +func (k DeposedKey) GoString() string { + ks := string(k) + switch { + case ks == "": + return "states.NotDeposed" + default: + return fmt.Sprintf("states.DeposedKey(%s)", ks) + } +} + +// Generation is a helper method to convert a DeposedKey into a Generation. +// If the reciever is anything other than NotDeposed then the result is +// just the same value as a Generation. If the receiver is NotDeposed then +// the result is CurrentGen. +func (k DeposedKey) Generation() Generation { + if k == NotDeposed { + return CurrentGen + } + return k +} + +// generation is an implementation of Generation. +func (k DeposedKey) generation() {} diff --git a/vendor/github.com/hashicorp/terraform/states/state.go b/vendor/github.com/hashicorp/terraform/states/state.go new file mode 100644 index 00000000..1f842359 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/state.go @@ -0,0 +1,229 @@ +package states + +import ( + "sort" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" +) + +// State is the top-level type of a Terraform state. +// +// A state should be mutated only via its accessor methods, to ensure that +// invariants are preserved. +// +// Access to State and the nested values within it is not concurrency-safe, +// so when accessing a State object concurrently it is the caller's +// responsibility to ensure that only one write is in progress at a time +// and that reads only occur when no write is in progress. The most common +// way to acheive this is to wrap the State in a SyncState and use the +// higher-level atomic operations supported by that type. +type State struct { + // Modules contains the state for each module. The keys in this map are + // an implementation detail and must not be used by outside callers. + Modules map[string]*Module +} + +// NewState constructs a minimal empty state, containing an empty root module. +func NewState() *State { + modules := map[string]*Module{} + modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance) + return &State{ + Modules: modules, + } +} + +// BuildState is a helper -- primarily intended for tests -- to build a state +// using imperative code against the StateSync type while still acting as +// an expression of type *State to assign into a containing struct. +func BuildState(cb func(*SyncState)) *State { + s := NewState() + cb(s.SyncWrapper()) + return s +} + +// Empty returns true if there are no resources or populated output values +// in the receiver. In other words, if this state could be safely replaced +// with the return value of NewState and be functionally equivalent. +func (s *State) Empty() bool { + if s == nil { + return true + } + for _, ms := range s.Modules { + if len(ms.Resources) != 0 { + return false + } + if len(ms.OutputValues) != 0 { + return false + } + } + return true +} + +// Module returns the state for the module with the given address, or nil if +// the requested module is not tracked in the state. +func (s *State) Module(addr addrs.ModuleInstance) *Module { + if s == nil { + panic("State.Module on nil *State") + } + return s.Modules[addr.String()] +} + +// RemoveModule removes the module with the given address from the state, +// unless it is the root module. The root module cannot be deleted, and so +// this method will panic if that is attempted. +// +// Removing a module implicitly discards all of the resources, outputs and +// local values within it, and so this should usually be done only for empty +// modules. For callers accessing the state through a SyncState wrapper, modules +// are automatically pruned if they are empty after one of their contained +// elements is removed. +func (s *State) RemoveModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + panic("attempted to remove root module") + } + + delete(s.Modules, addr.String()) +} + +// RootModule is a convenient alias for Module(addrs.RootModuleInstance). +func (s *State) RootModule() *Module { + if s == nil { + panic("RootModule called on nil State") + } + return s.Modules[addrs.RootModuleInstance.String()] +} + +// EnsureModule returns the state for the module with the given address, +// creating and adding a new one if necessary. +// +// Since this might modify the state to add a new instance, it is considered +// to be a write operation. +func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module { + ms := s.Module(addr) + if ms == nil { + ms = NewModule(addr) + s.Modules[addr.String()] = ms + } + return ms +} + +// HasResources returns true if there is at least one resource (of any mode) +// present in the receiving state. +func (s *State) HasResources() bool { + if s == nil { + return false + } + for _, ms := range s.Modules { + if len(ms.Resources) > 0 { + return true + } + } + return false +} + +// Resource returns the state for the resource with the given address, or nil +// if no such resource is tracked in the state. +func (s *State) Resource(addr addrs.AbsResource) *Resource { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.Resource(addr.Resource) +} + +// ResourceInstance returns the state for the resource instance with the given +// address, or nil if no such resource is tracked in the state. +func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + if s == nil { + panic("State.ResourceInstance on nil *State") + } + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.ResourceInstance(addr.Resource) +} + +// OutputValue returns the state for the output value with the given address, +// or nil if no such output value is tracked in the state. +func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + ms := s.Module(addr.Module) + if ms == nil { + return nil + } + return ms.OutputValues[addr.OutputValue.Name] +} + +// LocalValue returns the value of the named local value with the given address, +// or cty.NilVal if no such value is tracked in the state. +func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value { + ms := s.Module(addr.Module) + if ms == nil { + return cty.NilVal + } + return ms.LocalValues[addr.LocalValue.Name] +} + +// ProviderAddrs returns a list of all of the provider configuration addresses +// referenced throughout the receiving state. +// +// The result is de-duplicated so that each distinct address appears only once. +func (s *State) ProviderAddrs() []addrs.AbsProviderConfig { + if s == nil { + return nil + } + + m := map[string]addrs.AbsProviderConfig{} + for _, ms := range s.Modules { + for _, rc := range ms.Resources { + m[rc.ProviderConfig.String()] = rc.ProviderConfig + } + } + if len(m) == 0 { + return nil + } + + // This is mainly just so we'll get stable results for testing purposes. + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + ret := make([]addrs.AbsProviderConfig, len(keys)) + for i, key := range keys { + ret[i] = m[key] + } + + return ret +} + +// PruneResourceHusks is a specialized method that will remove any Resource +// objects that do not contain any instances, even if they have an EachMode. +// +// This should generally be used only after a "terraform destroy" operation, +// to finalize the cleanup of the state. It is not correct to use this after +// other operations because if a resource has "count = 0" or "for_each" over +// an empty collection then we want to retain it in the state so that references +// to it, particularly in "strange" contexts like "terraform console", can be +// properly resolved. +// +// This method MUST NOT be called concurrently with other readers and writers +// of the receiving state. +func (s *State) PruneResourceHusks() { + for _, m := range s.Modules { + m.PruneResourceHusks() + if len(m.Resources) == 0 && !m.Addr.IsRoot() { + s.RemoveModule(m.Addr) + } + } +} + +// SyncWrapper returns a SyncState object wrapping the receiver. +func (s *State) SyncWrapper() *SyncState { + return &SyncState{ + state: s, + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go new file mode 100644 index 00000000..8664f3be --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go @@ -0,0 +1,221 @@ +package states + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" +) + +// Taking deep copies of states is an important operation because state is +// otherwise a mutable data structure that is challenging to share across +// many separate callers. It is important that the DeepCopy implementations +// in this file comprehensively copy all parts of the state data structure +// that could be mutated via pointers. + +// DeepCopy returns a new state that contains equivalent data to the reciever +// but shares no backing memory in common. +// +// As with all methods on State, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + + modules := make(map[string]*Module, len(s.Modules)) + for k, m := range s.Modules { + modules[k] = m.DeepCopy() + } + return &State{ + Modules: modules, + } +} + +// DeepCopy returns a new module state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Module, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (ms *Module) DeepCopy() *Module { + if ms == nil { + return nil + } + + resources := make(map[string]*Resource, len(ms.Resources)) + for k, r := range ms.Resources { + resources[k] = r.DeepCopy() + } + outputValues := make(map[string]*OutputValue, len(ms.OutputValues)) + for k, v := range ms.OutputValues { + outputValues[k] = v.DeepCopy() + } + localValues := make(map[string]cty.Value, len(ms.LocalValues)) + for k, v := range ms.LocalValues { + // cty.Value is immutable, so we don't need to copy these. + localValues[k] = v + } + + return &Module{ + Addr: ms.Addr, // technically mutable, but immutable by convention + Resources: resources, + OutputValues: outputValues, + LocalValues: localValues, + } +} + +// DeepCopy returns a new resource state that contains equivalent data to the +// receiver but shares no backing memory in common. +// +// As with all methods on Resource, this method is not safe to use concurrently +// with writing to any portion of the recieving data structure. It is the +// caller's responsibility to ensure mutual exclusion for the duration of the +// operation, but may then freely modify the receiver and the returned copy +// independently once this method returns. +func (rs *Resource) DeepCopy() *Resource { + if rs == nil { + return nil + } + + instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances)) + for k, i := range rs.Instances { + instances[k] = i.DeepCopy() + } + + return &Resource{ + Addr: rs.Addr, + EachMode: rs.EachMode, + Instances: instances, + ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention + } +} + +// DeepCopy returns a new resource instance state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstance, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (is *ResourceInstance) DeepCopy() *ResourceInstance { + if is == nil { + return nil + } + + deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed)) + for k, obj := range is.Deposed { + deposed[k] = obj.DeepCopy() + } + + return &ResourceInstance{ + Current: is.Current.DeepCopy(), + Deposed: deposed, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObjectSrc, this method is not safe to +// use concurrently with writing to any portion of the recieving data structure. +// It is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc { + if obj == nil { + return nil + } + + var attrsFlat map[string]string + if obj.AttrsFlat != nil { + attrsFlat = make(map[string]string, len(obj.AttrsFlat)) + for k, v := range obj.AttrsFlat { + attrsFlat[k] = v + } + } + + var attrsJSON []byte + if obj.AttrsJSON != nil { + attrsJSON = make([]byte, len(obj.AttrsJSON)) + copy(attrsJSON, obj.AttrsJSON) + } + + var private []byte + if obj.Private != nil { + private = make([]byte, len(obj.Private)) + copy(private, obj.Private) + } + + // Some addrs.Referencable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + dependencies := make([]addrs.Referenceable, len(obj.Dependencies)) + copy(dependencies, obj.Dependencies) + + return &ResourceInstanceObjectSrc{ + Status: obj.Status, + SchemaVersion: obj.SchemaVersion, + Private: private, + AttrsFlat: attrsFlat, + AttrsJSON: attrsJSON, + Dependencies: dependencies, + } +} + +// DeepCopy returns a new resource instance object that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on ResourceInstanceObject, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject { + if obj == nil { + return nil + } + + var private []byte + if obj.Private != nil { + private = make([]byte, len(obj.Private)) + copy(private, obj.Private) + } + + // Some addrs.Referenceable implementations are technically mutable, but + // we treat them as immutable by convention and so we don't deep-copy here. + var dependencies []addrs.Referenceable + if obj.Dependencies != nil { + dependencies = make([]addrs.Referenceable, len(obj.Dependencies)) + copy(dependencies, obj.Dependencies) + } + + return &ResourceInstanceObject{ + Value: obj.Value, + Status: obj.Status, + Private: private, + Dependencies: dependencies, + } +} + +// DeepCopy returns a new output value state that contains equivalent data +// to the receiver but shares no backing memory in common. +// +// As with all methods on OutputValue, this method is not safe to use +// concurrently with writing to any portion of the recieving data structure. It +// is the caller's responsibility to ensure mutual exclusion for the duration +// of the operation, but may then freely modify the receiver and the returned +// copy independently once this method returns. +func (os *OutputValue) DeepCopy() *OutputValue { + if os == nil { + return nil + } + + return &OutputValue{ + Value: os.Value, + Sensitive: os.Sensitive, + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/state_equal.go b/vendor/github.com/hashicorp/terraform/states/state_equal.go new file mode 100644 index 00000000..ea20967e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/state_equal.go @@ -0,0 +1,18 @@ +package states + +import ( + "reflect" +) + +// Equal returns true if the receiver is functionally equivalent to other, +// including any ephemeral portions of the state that would not be included +// if the state were saved to files. +// +// To test only the persistent portions of two states for equality, instead +// use statefile.StatesMarshalEqual. +func (s *State) Equal(other *State) bool { + // For the moment this is sufficient, but we may need to do something + // more elaborate in future if we have any portions of state that require + // more sophisticated comparisons. + return reflect.DeepEqual(s, other) +} diff --git a/vendor/github.com/hashicorp/terraform/states/state_string.go b/vendor/github.com/hashicorp/terraform/states/state_string.go new file mode 100644 index 00000000..bca4581c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/state_string.go @@ -0,0 +1,279 @@ +package states + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "sort" + "strings" + + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config/hcl2shim" +) + +// String returns a rather-odd string representation of the entire state. +// +// This is intended to match the behavior of the older terraform.State.String +// method that is used in lots of existing tests. It should not be used in +// new tests: instead, use "cmp" to directly compare the state data structures +// and print out a diff if they do not match. +// +// This method should never be used in non-test code, whether directly by call +// or indirectly via a %s or %q verb in package fmt. +func (s *State) String() string { + if s == nil { + return "" + } + + // sort the modules by name for consistent output + modules := make([]string, 0, len(s.Modules)) + for m := range s.Modules { + modules = append(modules, m) + } + sort.Strings(modules) + + var buf bytes.Buffer + for _, name := range modules { + m := s.Modules[name] + mStr := m.testString() + + // If we're the root module, we just write the output directly. + if m.Addr.IsRoot() { + buf.WriteString(mStr + "\n") + continue + } + + // We need to build out a string that resembles the not-quite-standard + // format that terraform.State.String used to use, where there's a + // "module." prefix but then just a chain of all of the module names + // without any further "module." portions. + buf.WriteString("module") + for _, step := range m.Addr { + buf.WriteByte('.') + buf.WriteString(step.Name) + if step.InstanceKey != addrs.NoKey { + buf.WriteByte('[') + buf.WriteString(step.InstanceKey.String()) + buf.WriteByte(']') + } + } + buf.WriteString(":\n") + + s := bufio.NewScanner(strings.NewReader(mStr)) + for s.Scan() { + text := s.Text() + if text != "" { + text = " " + text + } + + buf.WriteString(fmt.Sprintf("%s\n", text)) + } + } + + return strings.TrimSpace(buf.String()) +} + +// testString is used to produce part of the output of State.String. It should +// never be used directly. +func (m *Module) testString() string { + var buf bytes.Buffer + + if len(m.Resources) == 0 { + buf.WriteString("") + } + + // We use AbsResourceInstance here, even though everything belongs to + // the same module, just because we have a sorting behavior defined + // for those but not for just ResourceInstance. + addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources)) + for _, rs := range m.Resources { + for ik := range rs.Instances { + addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance)) + } + } + + sort.Slice(addrsOrder, func(i, j int) bool { + return addrsOrder[i].Less(addrsOrder[j]) + }) + + for _, fakeAbsAddr := range addrsOrder { + addr := fakeAbsAddr.Resource + rs := m.Resource(addr.ContainingResource()) + is := m.ResourceInstance(addr) + + // Here we need to fake up a legacy-style address as the old state + // types would've used, since that's what our tests against those + // old types expect. The significant difference is that instancekey + // is dot-separated rather than using index brackets. + k := addr.ContainingResource().String() + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + k = fmt.Sprintf("%s.%d", k, tk) + default: + // No other key types existed for the legacy types, so we + // can do whatever we want here. We'll just use our standard + // syntax for these. + k = k + tk.String() + } + } + + id := LegacyInstanceObjectID(is.Current) + + taintStr := "" + if is.Current != nil && is.Current.Status == ObjectTainted { + taintStr = " (tainted)" + } + + deposedStr := "" + if len(is.Deposed) > 0 { + deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed)) + } + + buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr)) + buf.WriteString(fmt.Sprintf(" ID = %s\n", id)) + buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String())) + + // Attributes were a flatmap before, but are not anymore. To preserve + // our old output as closely as possible we need to do a conversion + // to flatmap. Normally we'd want to do this with schema for + // accuracy, but for our purposes here it only needs to be approximate. + // This should produce an identical result for most cases, though + // in particular will differ in a few cases: + // - The keys used for elements in a set will be different + // - Values for attributes of type cty.DynamicPseudoType will be + // misinterpreted (but these weren't possible in old world anyway) + var attributes map[string]string + if obj := is.Current; obj != nil { + switch { + case obj.AttrsFlat != nil: + // Easy (but increasingly unlikely) case: the state hasn't + // actually been upgraded to the new form yet. + attributes = obj.AttrsFlat + case obj.AttrsJSON != nil: + ty, err := ctyjson.ImpliedType(obj.AttrsJSON) + if err == nil { + val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty) + if err == nil { + attributes = hcl2shim.FlatmapValueFromHCL2(val) + } + } + } + } + attrKeys := make([]string, 0, len(attributes)) + for ak, val := range attributes { + if ak == "id" { + continue + } + + // don't show empty containers in the output + if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) { + continue + } + + attrKeys = append(attrKeys, ak) + } + + sort.Strings(attrKeys) + + for _, ak := range attrKeys { + av := attributes[ak] + buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av)) + } + + // CAUTION: Since deposed keys are now random strings instead of + // incrementing integers, this result will not be deterministic + // if there is more than one deposed object. + i := 1 + for _, t := range is.Deposed { + id := LegacyInstanceObjectID(t) + taintStr := "" + if t.Status == ObjectTainted { + taintStr = " (tainted)" + } + buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr)) + i++ + } + + if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 { + buf.WriteString(fmt.Sprintf("\n Dependencies:\n")) + for _, dep := range obj.Dependencies { + buf.WriteString(fmt.Sprintf(" %s\n", dep.String())) + } + } + } + + if len(m.OutputValues) > 0 { + buf.WriteString("\nOutputs:\n\n") + + ks := make([]string, 0, len(m.OutputValues)) + for k := range m.OutputValues { + ks = append(ks, k) + } + sort.Strings(ks) + + for _, k := range ks { + v := m.OutputValues[k] + lv := hcl2shim.ConfigValueFromHCL2(v.Value) + switch vTyped := lv.(type) { + case string: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case []interface{}: + buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped)) + case map[string]interface{}: + var mapKeys []string + for key := range vTyped { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + var mapBuf bytes.Buffer + mapBuf.WriteString("{") + for _, key := range mapKeys { + mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key])) + } + mapBuf.WriteString("}") + + buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String())) + default: + buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv)) + } + } + } + + return buf.String() +} + +// LegacyInstanceObjectID is a helper for extracting an object id value from +// an instance object in a way that approximates how we used to do this +// for the old state types. ID is no longer first-class, so this is preserved +// only for compatibility with old tests that include the id as part of their +// expected value. +func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string { + if obj == nil { + return "" + } + + if obj.AttrsJSON != nil { + type WithID struct { + ID string `json:"id"` + } + var withID WithID + err := json.Unmarshal(obj.AttrsJSON, &withID) + if err == nil { + return withID.ID + } + } else if obj.AttrsFlat != nil { + if flatID, exists := obj.AttrsFlat["id"]; exists { + return flatID + } + } + + // For resource types created after we removed id as special there may + // not actually be one at all. This is okay because older tests won't + // encounter this, and new tests shouldn't be using ids. + return "" +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go new file mode 100644 index 00000000..a6d88ecd --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go @@ -0,0 +1,62 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" +) + +const invalidFormat = "Invalid state file format" + +// jsonUnmarshalDiags is a helper that translates errors returned from +// json.Unmarshal into hopefully-more-helpful diagnostics messages. +func jsonUnmarshalDiags(err error) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + if err == nil { + return diags + } + + switch tErr := err.(type) { + case *json.SyntaxError: + // We've usually already successfully parsed a source file as JSON at + // least once before we'd use jsonUnmarshalDiags with it (to sniff + // the version number) so this particular error should not appear much + // in practice. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + // This is likely to be the most common area, describing a + // non-conformance between the file and the expected file format + // at a semantic level. + if tErr.Field != "" { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value), + )) + break + } else { + // Without a field name, we can't really say anything helpful. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + "The state file does not conform to the expected JSON data structure.", + )) + } + default: + // Fallback for all other types of errors. This can happen only for + // custom UnmarshalJSON implementations, so should be encountered + // only rarely. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + invalidFormat, + fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()), + )) + } + + return diags +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go new file mode 100644 index 00000000..625d0cf4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go @@ -0,0 +1,3 @@ +// Package statefile deals with the file format used to serialize states for +// persistent storage and then deserialize them into memory again later. +package statefile diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/file.go b/vendor/github.com/hashicorp/terraform/states/statefile/file.go new file mode 100644 index 00000000..6e202401 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/file.go @@ -0,0 +1,62 @@ +package statefile + +import ( + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/states" + tfversion "github.com/hashicorp/terraform/version" +) + +// File is the in-memory representation of a state file. It includes the state +// itself along with various metadata used to track changing state files for +// the same configuration over time. +type File struct { + // TerraformVersion is the version of Terraform that wrote this state file. + TerraformVersion *version.Version + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial uint64 + + // Lineage is set when a new, blank state file is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string + + // State is the actual state represented by this file. + State *states.State +} + +func New(state *states.State, lineage string, serial uint64) *File { + // To make life easier on callers, we'll accept a nil state here and just + // allocate an empty one, which is required for this file to be successfully + // written out. + if state == nil { + state = states.NewState() + } + + return &File{ + TerraformVersion: tfversion.SemVer, + State: state, + Lineage: lineage, + Serial: serial, + } +} + +// DeepCopy is a convenience method to create a new File object whose state +// is a deep copy of the receiver's, as implemented by states.State.DeepCopy. +func (f *File) DeepCopy() *File { + if f == nil { + return nil + } + return &File{ + TerraformVersion: f.TerraformVersion, + Serial: f.Serial, + Lineage: f.Lineage, + State: f.State.DeepCopy(), + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go new file mode 100644 index 00000000..4948b39b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go @@ -0,0 +1,40 @@ +package statefile + +import ( + "bytes" + + "github.com/hashicorp/terraform/states" +) + +// StatesMarshalEqual returns true if and only if the two given states have +// an identical (byte-for-byte) statefile representation. +// +// This function compares only the portions of the state that are persisted +// in state files, so for example it will not return false if the only +// differences between the two states are local values or descendent module +// outputs. +func StatesMarshalEqual(a, b *states.State) bool { + var aBuf bytes.Buffer + var bBuf bytes.Buffer + + // nil states are not valid states, and so they can never martial equal. + if a == nil || b == nil { + return false + } + + // We write here some temporary files that have no header information + // populated, thus ensuring that we're only comparing the state itself + // and not any metadata. + err := Write(&File{State: a}, &aBuf) + if err != nil { + // Should never happen, because we're writing to an in-memory buffer + panic(err) + } + err = Write(&File{State: b}, &bBuf) + if err != nil { + // Should never happen, because we're writing to an in-memory buffer + panic(err) + } + + return bytes.Equal(aBuf.Bytes(), bBuf.Bytes()) +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/read.go b/vendor/github.com/hashicorp/terraform/states/statefile/read.go new file mode 100644 index 00000000..d691c029 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/read.go @@ -0,0 +1,209 @@ +package statefile + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" +) + +// ErrNoState is returned by ReadState when the state file is empty. +var ErrNoState = errors.New("no state") + +// Read reads a state from the given reader. +// +// Legacy state format versions 1 through 3 are supported, but the result will +// contain object attributes in the deprecated "flatmap" format and so must +// be upgraded by the caller before use. +// +// If the state file is empty, the special error value ErrNoState is returned. +// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics +// potentially describing multiple errors. +func Read(r io.Reader) (*File, error) { + // Some callers provide us a "typed nil" *os.File here, which would + // cause us to panic below if we tried to use it. + if f, ok := r.(*os.File); ok && f == nil { + return nil, ErrNoState + } + + var diags tfdiags.Diagnostics + + // We actually just buffer the whole thing in memory, because states are + // generally not huge and we need to do be able to sniff for a version + // number before full parsing. + src, err := ioutil.ReadAll(r) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to read state file", + fmt.Sprintf("The state file could not be read: %s", err), + )) + return nil, diags.Err() + } + + if len(src) == 0 { + return nil, ErrNoState + } + + state, diags := readState(src) + if diags.HasErrors() { + return nil, diags.Err() + } + + if state == nil { + // Should never happen + panic("readState returned nil state with no errors") + } + + if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) { + return state, fmt.Errorf( + "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state", + state.TerraformVersion, + tfversion.SemVer, + state.TerraformVersion, + ) + } + + return state, diags.Err() +} + +func readState(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + if looksLikeVersion0(src) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.", + )) + return nil, diags + } + + version, versionDiags := sniffJSONStateVersion(src) + diags = diags.Append(versionDiags) + if versionDiags.HasErrors() { + return nil, diags + } + + switch version { + case 0: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.", + )) + return nil, diags + case 1: + return readStateV1(src) + case 2: + return readStateV2(src) + case 3: + return readStateV3(src) + case 4: + return readStateV4(src) + default: + thisVersion := tfversion.SemVer.String() + creatingVersion := sniffJSONStateTerraformVersion(src) + switch { + case creatingVersion != "": + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion), + )) + } + return nil, diags + } +} + +func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + type VersionSniff struct { + Version *uint64 `json:"version"` + } + var sniff VersionSniff + err := json.Unmarshal(src, &sniff) + if err != nil { + switch tErr := err.(type) { + case *json.SyntaxError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset), + )) + case *json.UnmarshalTypeError: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value), + )) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file could not be parsed as JSON.", + )) + } + } + + if sniff.Version == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + unsupportedFormat, + "The state file does not have a \"version\" attribute, which is required to identify the format version.", + )) + return 0, diags + } + + return *sniff.Version, diags +} + +// sniffJSONStateTerraformVersion attempts to sniff the Terraform version +// specification from the given state file source code. The result is either +// a version string or an empty string if no version number could be extracted. +// +// This is a best-effort function intended to produce nicer error messages. It +// should not be used for any real processing. +func sniffJSONStateTerraformVersion(src []byte) string { + type VersionSniff struct { + Version string `json:"terraform_version"` + } + var sniff VersionSniff + + err := json.Unmarshal(src, &sniff) + if err != nil { + return "" + } + + // Attempt to parse the string as a version so we won't report garbage + // as a version number. + _, err = version.NewVersion(sniff.Version) + if err != nil { + return "" + } + + return sniff.Version +} + +// unsupportedFormat is a diagnostic summary message for when the state file +// seems to not be a state file at all, or is not a supported version. +// +// Use invalidFormat instead for the subtly-different case of "this looks like +// it's intended to be a state file but it's not structured correctly". +const unsupportedFormat = "Unsupported state file format" + +const upgradeFailed = "State format upgrade failed" diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go new file mode 100644 index 00000000..9b533317 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go @@ -0,0 +1,23 @@ +package statefile + +// looksLikeVersion0 sniffs for the signature indicating a version 0 state +// file. +// +// Version 0 was the number retroactively assigned to Terraform's initial +// (unversioned) binary state file format, which was later superseded by the +// version 1 format in JSON. +// +// Version 0 is no longer supported, so this is used only to detect it and +// return a nice error to the user. +func looksLikeVersion0(src []byte) bool { + // Version 0 files begin with the magic prefix "tfstate". + const magic = "tfstate" + if len(src) < len(magic) { + // Not even long enough to have the magic prefix + return false + } + if string(src[0:len(magic)]) == magic { + return true + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go new file mode 100644 index 00000000..80d711bc --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go @@ -0,0 +1,174 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" +) + +func readStateV1(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV1 := &stateV1{} + err := json.Unmarshal(src, sV1) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV1(sV1) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2, err := upgradeStateV1ToV2(sV1) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV1 is a representation of the legacy JSON state format version 1. +// +// It is only used to read version 1 JSON files prior to upgrading them to +// the current format. +type stateV1 struct { + // Version is the protocol version. "1" for a StateV1. + Version int `json:"version"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV1 `json:"remote,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV1 `json:"modules"` +} + +type remoteStateV1 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type moduleStateV1 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]string `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV1 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` +} + +type resourceStateV1 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on,omitempty"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV1 `json:"primary"` + + // Tainted is used to track any underlying instances that + // have been created but are in a bad or unknown state and + // need to be cleaned up subsequently. In the + // standard case, there is only at most a single instance. + // However, in pathological cases, it is possible for the number + // of instances to accumulate. + Tainted []*instanceStateV1 `json:"tainted,omitempty"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. If there were problems creating the + // replacement, the instance remains in the Deposed list so it can be + // destroyed in a future run. Functionally, Deposed instances are very + // similar to Tainted instances in that Terraform is only tracking them in + // order to remember to destroy them. + Deposed []*instanceStateV1 `json:"deposed,omitempty"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider,omitempty"` +} + +type instanceStateV1 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes,omitempty"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. + Meta map[string]string `json:"meta,omitempty"` +} + +type ephemeralStateV1 struct { + // ConnInfo is used for the providers to export information which is + // used to connect to the resource for provisioning. For example, + // this could contain SSH or WinRM credentials. + ConnInfo map[string]string `json:"-"` +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go new file mode 100644 index 00000000..0b417e1c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go @@ -0,0 +1,172 @@ +package statefile + +import ( + "fmt" + "log" + + "github.com/mitchellh/copystructure" +) + +// upgradeStateV1ToV2 is used to upgrade a V1 state representation +// into a V2 state representation +func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) { + log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2") + if old == nil { + return nil, nil + } + + remote, err := old.Remote.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + + modules := make([]*moduleStateV2, len(old.Modules)) + for i, module := range old.Modules { + upgraded, err := module.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading State V1: %v", err) + } + modules[i] = upgraded + } + if len(modules) == 0 { + modules = nil + } + + newState := &stateV2{ + Version: 2, + Serial: old.Serial, + Remote: remote, + Modules: modules, + } + + return newState, nil +} + +func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) { + if old == nil { + return nil, nil + } + + config, err := copystructure.Copy(old.Config) + if err != nil { + return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err) + } + + return &remoteStateV2{ + Type: old.Type, + Config: config.(map[string]string), + }, nil +} + +func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) { + if old == nil { + return nil, nil + } + + pathRaw, err := copystructure.Copy(old.Path) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + path, ok := pathRaw.([]string) + if !ok { + return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings") + } + if len(path) == 0 { + // We found some V1 states with a nil path. Assume root. + path = []string{"root"} + } + + // Outputs needs upgrading to use the new structure + outputs := make(map[string]*outputStateV2) + for key, output := range old.Outputs { + outputs[key] = &outputStateV2{ + Type: "string", + Value: output, + Sensitive: false, + } + } + + resources := make(map[string]*resourceStateV2) + for key, oldResource := range old.Resources { + upgraded, err := oldResource.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + resources[key] = upgraded + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err) + } + + return &moduleStateV2{ + Path: path, + Outputs: outputs, + Resources: resources, + Dependencies: dependencies.([]string), + }, nil +} + +func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) { + if old == nil { + return nil, nil + } + + dependencies, err := copystructure.Copy(old.Dependencies) + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + primary, err := old.Primary.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + + deposed := make([]*instanceStateV2, len(old.Deposed)) + for i, v := range old.Deposed { + upgraded, err := v.upgradeToV2() + if err != nil { + return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err) + } + deposed[i] = upgraded + } + if len(deposed) == 0 { + deposed = nil + } + + return &resourceStateV2{ + Type: old.Type, + Dependencies: dependencies.([]string), + Primary: primary, + Deposed: deposed, + Provider: old.Provider, + }, nil +} + +func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) { + if old == nil { + return nil, nil + } + + attributes, err := copystructure.Copy(old.Attributes) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + meta, err := copystructure.Copy(old.Meta) + if err != nil { + return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err) + } + + newMeta := make(map[string]interface{}) + for k, v := range meta.(map[string]string) { + newMeta[k] = v + } + + return &instanceStateV2{ + ID: old.ID, + Attributes: attributes.(map[string]string), + Meta: newMeta, + }, nil +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go new file mode 100644 index 00000000..be93924a --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go @@ -0,0 +1,209 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/hashicorp/terraform/tfdiags" +) + +func readStateV2(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV2 := &stateV2{} + err := json.Unmarshal(src, sV2) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV2(sV2) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3, err := upgradeStateV2ToV3(sV2) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 2. +// +// It is only used to read version 2 JSON files prior to upgrading them to +// the current format. +type stateV2 struct { + // Version is the state file protocol version. + Version int `json:"version"` + + // TFVersion is the version of Terraform that wrote this state. + TFVersion string `json:"terraform_version,omitempty"` + + // Serial is incremented on any operation that modifies + // the State file. It is used to detect potentially conflicting + // updates. + Serial int64 `json:"serial"` + + // Lineage is set when a new, blank state is created and then + // never updated. This allows us to determine whether the serials + // of two states can be meaningfully compared. + // Apart from the guarantee that collisions between two lineages + // are very unlikely, this value is opaque and external callers + // should only compare lineage strings byte-for-byte for equality. + Lineage string `json:"lineage"` + + // Remote is used to track the metadata required to + // pull and push state files from a remote storage endpoint. + Remote *remoteStateV2 `json:"remote,omitempty"` + + // Backend tracks the configuration for the backend in use with + // this state. This is used to track any changes in the backend + // configuration. + Backend *backendStateV2 `json:"backend,omitempty"` + + // Modules contains all the modules in a breadth-first order + Modules []*moduleStateV2 `json:"modules"` +} + +type remoteStateV2 struct { + // Type controls the client we use for the remote state + Type string `json:"type"` + + // Config is used to store arbitrary configuration that + // is type specific + Config map[string]string `json:"config"` +} + +type outputStateV2 struct { + // Sensitive describes whether the output is considered sensitive, + // which may lead to masking the value on screen in some cases. + Sensitive bool `json:"sensitive"` + // Type describes the structure of Value. Valid values are "string", + // "map" and "list" + Type string `json:"type"` + // Value contains the value of the output, in the structure described + // by the Type field. + Value interface{} `json:"value"` + + mu sync.Mutex +} + +type moduleStateV2 struct { + // Path is the import path from the root module. Modules imports are + // always disjoint, so the path represents amodule tree + Path []string `json:"path"` + + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + + // Outputs declared by the module and maintained for each module + // even though only the root module technically needs to be kept. + // This allows operators to inspect values at the boundaries. + Outputs map[string]*outputStateV2 `json:"outputs"` + + // Resources is a mapping of the logically named resource to + // the state of the resource. Each resource may actually have + // N instances underneath, although a user only needs to think + // about the 1:1 case. + Resources map[string]*resourceStateV2 `json:"resources"` + + // Dependencies are a list of things that this module relies on + // existing to remain intact. For example: an module may depend + // on a VPC ID given by an aws_vpc resource. + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a module that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` +} + +type resourceStateV2 struct { + // This is filled in and managed by Terraform, and is the resource + // type itself such as "mycloud_instance". If a resource provider sets + // this value, it won't be persisted. + Type string `json:"type"` + + // Dependencies are a list of things that this resource relies on + // existing to remain intact. For example: an AWS instance might + // depend on a subnet (which itself might depend on a VPC, and so + // on). + // + // Terraform uses this information to build valid destruction + // orders and to warn the user if they're destroying a resource that + // another resource depends on. + // + // Things can be put into this list that may not be managed by + // Terraform. If Terraform doesn't find a matching ID in the + // overall state, then it assumes it isn't managed and doesn't + // worry about it. + Dependencies []string `json:"depends_on"` + + // Primary is the current active instance for this resource. + // It can be replaced but only after a successful creation. + // This is the instances on which providers will act. + Primary *instanceStateV2 `json:"primary"` + + // Deposed is used in the mechanics of CreateBeforeDestroy: the existing + // Primary is Deposed to get it out of the way for the replacement Primary to + // be created by Apply. If the replacement Primary creates successfully, the + // Deposed instance is cleaned up. + // + // If there were problems creating the replacement Primary, the Deposed + // instance and the (now tainted) replacement Primary will be swapped so the + // tainted replacement will be cleaned up instead. + // + // An instance will remain in the Deposed list until it is successfully + // destroyed and purged. + Deposed []*instanceStateV2 `json:"deposed"` + + // Provider is used when a resource is connected to a provider with an alias. + // If this string is empty, the resource is connected to the default provider, + // e.g. "aws_instance" goes with the "aws" provider. + // If the resource block contained a "provider" key, that value will be set here. + Provider string `json:"provider"` + + mu sync.Mutex +} + +type instanceStateV2 struct { + // A unique ID for this resource. This is opaque to Terraform + // and is only meant as a lookup mechanism for the providers. + ID string `json:"id"` + + // Attributes are basic information about the resource. Any keys here + // are accessible in variable format within Terraform configurations: + // ${resourcetype.name.attribute}. + Attributes map[string]string `json:"attributes"` + + // Meta is a simple K/V map that is persisted to the State but otherwise + // ignored by Terraform core. It's meant to be used for accounting by + // external client code. The value here must only contain Go primitives + // and collections. + Meta map[string]interface{} `json:"meta"` + + // Tainted is used to mark a resource for recreation. + Tainted bool `json:"tainted"` +} + +type backendStateV2 struct { + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go new file mode 100644 index 00000000..2d03c07c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go @@ -0,0 +1,145 @@ +package statefile + +import ( + "fmt" + "log" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/mitchellh/copystructure" +) + +func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) { + if old == nil { + return (*stateV3)(nil), nil + } + + var new *stateV3 + { + copy, err := copystructure.Config{Lock: true}.Copy(old) + if err != nil { + panic(err) + } + newWrongType := copy.(*stateV2) + newRightType := (stateV3)(*newWrongType) + new = &newRightType + } + + // Set the new version number + new.Version = 3 + + // Change the counts for things which look like maps to use the % + // syntax. Remove counts for empty collections - they will be added + // back in later. + for _, module := range new.Modules { + for _, resource := range module.Resources { + // Upgrade Primary + if resource.Primary != nil { + upgradeAttributesV2ToV3(resource.Primary) + } + + // Upgrade Deposed + for _, deposed := range resource.Deposed { + upgradeAttributesV2ToV3(deposed) + } + } + } + + return new, nil +} + +func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error { + collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`) + collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`) + + // Identify the key prefix of anything which is a collection + var collectionKeyPrefixes []string + for key := range instanceState.Attributes { + if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1]) + } + } + sort.Strings(collectionKeyPrefixes) + + log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes) + + // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not + // run very often. + for _, prefix := range collectionKeyPrefixes { + // First get the actual keys that belong to this prefix + var potentialKeysMatching []string + for key := range instanceState.Attributes { + if strings.HasPrefix(key, prefix) { + potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix)) + } + } + sort.Strings(potentialKeysMatching) + + var actualKeysMatching []string + for _, key := range potentialKeysMatching { + if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 { + actualKeysMatching = append(actualKeysMatching, submatches[0][1]) + } else { + if key != "#" { + actualKeysMatching = append(actualKeysMatching, key) + } + } + } + actualKeysMatching = uniqueSortedStrings(actualKeysMatching) + + // Now inspect the keys in order to determine whether this is most likely to be + // a map, list or set. There is room for error here, so we log in each case. If + // there is no method of telling, we remove the key from the InstanceState in + // order that it will be recreated. Again, this could be rolled into fewer loops + // but we prefer clarity. + + oldCountKey := fmt.Sprintf("%s#", prefix) + + // First, detect "obvious" maps - which have non-numeric keys (mostly). + hasNonNumericKeys := false + for _, key := range actualKeysMatching { + if _, err := strconv.Atoi(key); err != nil { + hasNonNumericKeys = true + } + } + if hasNonNumericKeys { + newCountKey := fmt.Sprintf("%s%%", prefix) + + instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey] + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s", + strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey]) + } + + // Now detect empty collections and remove them from state. + if len(actualKeysMatching) == 0 { + delete(instanceState.Attributes, oldCountKey) + log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.", + strings.TrimSuffix(prefix, ".")) + } + } + + return nil +} + +// uniqueSortedStrings removes duplicates from a slice of strings and returns +// a sorted slice of the unique strings. +func uniqueSortedStrings(input []string) []string { + uniquemap := make(map[string]struct{}) + for _, str := range input { + uniquemap[str] = struct{}{} + } + + output := make([]string, len(uniquemap)) + + i := 0 + for key := range uniquemap { + output[i] = key + i = i + 1 + } + + sort.Strings(output) + return output +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go new file mode 100644 index 00000000..ab6414b0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go @@ -0,0 +1,50 @@ +package statefile + +import ( + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform/tfdiags" +) + +func readStateV3(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV3 := &stateV3{} + err := json.Unmarshal(src, sV3) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV3(sV3) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4, err := upgradeStateV3ToV4(sV3) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + upgradeFailed, + fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err), + )) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +// stateV2 is a representation of the legacy JSON state format version 3. +// +// It is only used to read version 3 JSON files prior to upgrading them to +// the current format. +// +// The differences between version 2 and version 3 are only in the data and +// not in the structure, so stateV3 actually shares the same structs as +// stateV2. Type stateV3 represents that the data within is formatted as +// expected by the V3 format, rather than the V2 format. +type stateV3 stateV2 diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go new file mode 100644 index 00000000..2cbe8a53 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go @@ -0,0 +1,431 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) { + + if old.Serial < 0 { + // The new format is using uint64 here, which should be fine for any + // real state (we only used positive integers in practice) but we'll + // catch this explicitly here to avoid weird behavior if a state file + // has been tampered with in some way. + return nil, fmt.Errorf("state has serial less than zero, which is invalid") + } + + new := &stateV4{ + TerraformVersion: old.TFVersion, + Serial: uint64(old.Serial), + Lineage: old.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + if new.TerraformVersion == "" { + // Older formats considered this to be optional, but now it's required + // and so we'll stub it out with something that's definitely older + // than the version that really created this state. + new.TerraformVersion = "0.0.0" + } + + for _, msOld := range old.Modules { + if len(msOld.Path) < 1 || msOld.Path[0] != "root" { + return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path) + } + + // Convert legacy-style module address into our newer address type. + // Since these old formats are only generated by versions of Terraform + // that don't support count and for_each on modules, we can just assume + // all of the modules are unkeyed. + moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1) + for i, name := range msOld.Path[1:] { + moduleAddr[i] = addrs.ModuleInstanceStep{ + Name: name, + InstanceKey: addrs.NoKey, + } + } + + // In a v3 state file, a "resource state" is actually an instance + // state, so we need to fill in a missing level of heirarchy here + // by lazily creating resource states as we encounter them. + // We'll track them in here, keyed on the string representation of + // the resource address. + resourceStates := map[string]*resourceStateV4{} + + for legacyAddr, rsOld := range msOld.Resources { + instAddr, err := parseLegacyResourceAddress(legacyAddr) + if err != nil { + return nil, err + } + + resAddr := instAddr.Resource + rs, exists := resourceStates[resAddr.String()] + if !exists { + var modeStr string + switch resAddr.Mode { + case addrs.ManagedResourceMode: + modeStr = "managed" + case addrs.DataResourceMode: + modeStr = "data" + default: + return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode", resAddr) + } + + // In state versions prior to 4 we allowed each instance of a + // resource to have its own provider configuration address, + // which makes no real sense in practice because providers + // are associated with resources in the configuration. We + // elevate that to the resource level during this upgrade, + // implicitly taking the provider address of the first instance + // we encounter for each resource. While this is lossy in + // theory, in practice there is no reason for these values to + // differ between instances. + var providerAddr addrs.AbsProviderConfig + oldProviderAddr := rsOld.Provider + if strings.Contains(oldProviderAddr, "provider.") { + // Smells like a new-style provider address, but we'll test it. + var diags tfdiags.Diagnostics + providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr) + if diags.HasErrors() { + return nil, diags.Err() + } + } else { + // Smells like an old-style module-local provider address, + // which we'll need to migrate. We'll assume it's referring + // to the same module the resource is in, which might be + // incorrect but it'll get fixed up next time any updates + // are made to an instance. + if oldProviderAddr != "" { + localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr) + if diags.HasErrors() { + return nil, diags.Err() + } + providerAddr = localAddr.Absolute(moduleAddr) + } else { + providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr) + } + } + + rs = &resourceStateV4{ + Module: moduleAddr.String(), + Mode: modeStr, + Type: resAddr.Type, + Name: resAddr.Name, + Instances: []instanceObjectStateV4{}, + ProviderConfig: providerAddr.String(), + } + resourceStates[resAddr.String()] = rs + } + + // Now we'll deal with the instance itself, which may either be + // the first instance in a resource we just created or an additional + // instance for a resource added on a prior loop. + instKey := instAddr.Key + if isOld := rsOld.Primary; isOld != nil { + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed) + if err != nil { + return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + for i, isOld := range rsOld.Deposed { + // When we migrate old instances we'll use sequential deposed + // keys just so that the upgrade result is deterministic. New + // deposed keys allocated moving forward will be pseudorandomly + // selected, but we check for collisions and so these + // non-random ones won't hurt. + deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1)) + isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey) + if err != nil { + return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err) + } + rs.Instances = append(rs.Instances, *isNew) + } + + if instKey != addrs.NoKey && rs.EachMode == "" { + rs.EachMode = "list" + } + } + + for _, rs := range resourceStates { + new.Resources = append(new.Resources, *rs) + } + + if len(msOld.Path) == 1 && msOld.Path[0] == "root" { + // We'll migrate the outputs for this module too, then. + for name, oldOS := range msOld.Outputs { + newOS := outputStateV4{ + Sensitive: oldOS.Sensitive, + } + + valRaw := oldOS.Value + valSrc, err := json.Marshal(valRaw) + if err != nil { + // Should never happen, because this value came from JSON + // in the first place and so we're just round-tripping here. + return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err) + } + + // The "type" field in state V2 wasn't really that useful + // since it was only able to capture string vs. list vs. map. + // For this reason, during upgrade we'll just discard it + // altogether and use cty's idea of the implied type of + // turning our old value into JSON. + ty, err := ctyjson.ImpliedType(valSrc) + if err != nil { + // REALLY should never happen, because we literally just + // encoded this as JSON above! + return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err) + } + + // ImpliedType tends to produce structural types, but since older + // version of Terraform didn't support those a collection type + // is probably what was intended, so we'll see if we can + // interpret our value as one. + ty = simplifyImpliedValueType(ty) + + tySrc, err := ctyjson.MarshalType(ty) + if err != nil { + return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err) + } + + newOS.ValueRaw = json.RawMessage(valSrc) + newOS.ValueTypeRaw = json.RawMessage(tySrc) + + new.RootOutputs[name] = newOS + } + } + } + + new.normalize() + + return new, nil +} + +func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) { + + // Schema versions were, in prior formats, a private concern of the provider + // SDK, and not a first-class concept in the state format. Here we're + // sniffing for the pre-0.12 SDK's way of representing schema versions + // and promoting it to our first-class field if we find it. We'll ignore + // it if it doesn't look like what the SDK would've written. If this + // sniffing fails then we'll assume schema version 0. + var schemaVersion uint64 + migratedSchemaVersion := false + if raw, exists := isOld.Meta["schema_version"]; exists { + switch tv := raw.(type) { + case string: + v, err := strconv.ParseUint(tv, 10, 64) + if err == nil { + schemaVersion = v + migratedSchemaVersion = true + } + case int: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + case float64: + schemaVersion = uint64(tv) + migratedSchemaVersion = true + } + } + + private := map[string]interface{}{} + for k, v := range isOld.Meta { + if k == "schema_version" && migratedSchemaVersion { + // We're gonna promote this into our first-class schema version field + continue + } + private[k] = v + } + var privateJSON []byte + if len(private) != 0 { + var err error + privateJSON, err = json.Marshal(private) + if err != nil { + // This shouldn't happen, because the Meta values all came from JSON + // originally anyway. + return nil, fmt.Errorf("cannot serialize private instance object data: %s", err) + } + } + + var status string + if isOld.Tainted { + status = "tainted" + } + + var instKeyRaw interface{} + switch tk := instKey.(type) { + case addrs.IntKey: + instKeyRaw = int(tk) + case addrs.StringKey: + instKeyRaw = string(tk) + default: + if instKeyRaw != nil { + return nil, fmt.Errorf("insupported instance key: %#v", instKey) + } + } + + var attributes map[string]string + if isOld.Attributes != nil { + attributes = make(map[string]string, len(isOld.Attributes)) + for k, v := range isOld.Attributes { + attributes[k] = v + } + } + if isOld.ID != "" { + // As a special case, if we don't already have an "id" attribute and + // yet there's a non-empty first-class ID on the old object then we'll + // create a synthetic id attribute to avoid losing that first-class id. + // In practice this generally arises only in tests where state literals + // are hand-written in a non-standard way; real code prior to 0.12 + // would always force the first-class ID to be copied into the + // id attribute before storing. + if attributes == nil { + attributes = make(map[string]string, len(isOld.Attributes)) + } + if idVal := attributes["id"]; idVal == "" { + attributes["id"] = isOld.ID + } + } + + dependencies := make([]string, len(rsOld.Dependencies)) + for i, v := range rsOld.Dependencies { + dependencies[i] = parseLegacyDependency(v) + } + + return &instanceObjectStateV4{ + IndexKey: instKeyRaw, + Status: status, + Deposed: string(deposedKey), + AttributesFlat: attributes, + Dependencies: dependencies, + SchemaVersion: schemaVersion, + PrivateRaw: privateJSON, + }, nil +} + +// parseLegacyResourceAddress parses the different identifier format used +// state formats before version 4, like "instance.name.0". +func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) { + var ret addrs.ResourceInstance + + // Split based on ".". Every resource address should have at least two + // elements (type and name). + parts := strings.Split(s, ".") + if len(parts) < 2 || len(parts) > 4 { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Data resource if we have at least 3 parts and the first one is data + ret.Resource.Mode = addrs.ManagedResourceMode + if len(parts) > 2 && parts[0] == "data" { + ret.Resource.Mode = addrs.DataResourceMode + parts = parts[1:] + } + + // If we're not a data resource and we have more than 3, then it is an error + if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode { + return ret, fmt.Errorf("invalid internal resource address format: %s", s) + } + + // Build the parts of the resource address that are guaranteed to exist + ret.Resource.Type = parts[0] + ret.Resource.Name = parts[1] + ret.Key = addrs.NoKey + + // If we have more parts, then we have an index. Parse that. + if len(parts) > 2 { + idx, err := strconv.ParseInt(parts[2], 0, 0) + if err != nil { + return ret, fmt.Errorf("error parsing resource address %q: %s", s, err) + } + + ret.Key = addrs.IntKey(idx) + } + + return ret, nil +} + +// simplifyImpliedValueType attempts to heuristically simplify a value type +// derived from a legacy stored output value into something simpler that +// is closer to what would've fitted into the pre-v0.12 value type system. +func simplifyImpliedValueType(ty cty.Type) cty.Type { + switch { + case ty.IsTupleType(): + // If all of the element types are the same then we'll make this + // a list instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyTuple) { + // Don't know what the element type would be, then. + return ty + } + + etys := ty.TupleElementTypes() + ety := etys[0] + for _, other := range etys[1:] { + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.List(ety) + + case ty.IsObjectType(): + // If all of the attribute types are the same then we'll make this + // a map instead. This is very likely to be true, since prior versions + // of Terraform did not officially support mixed-type collections. + + if ty.Equals(cty.EmptyObject) { + // Don't know what the element type would be, then. + return ty + } + + atys := ty.AttributeTypes() + var ety cty.Type + for _, other := range atys { + if ety == cty.NilType { + ety = other + continue + } + if !other.Equals(ety) { + // inconsistent types + return ty + } + } + ety = simplifyImpliedValueType(ety) + return cty.Map(ety) + + default: + // No other normalizations are possible + return ty + } +} + +func parseLegacyDependency(s string) string { + parts := strings.Split(s, ".") + ret := parts[0] + for _, part := range parts[1:] { + if part == "*" { + break + } + if i, err := strconv.Atoi(part); err == nil { + ret = ret + fmt.Sprintf("[%d]", i) + break + } + ret = ret + "." + part + } + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go new file mode 100644 index 00000000..ee8b6523 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go @@ -0,0 +1,604 @@ +package statefile + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + version "github.com/hashicorp/go-version" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +func readStateV4(src []byte) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + sV4 := &stateV4{} + err := json.Unmarshal(src, sV4) + if err != nil { + diags = diags.Append(jsonUnmarshalDiags(err)) + return nil, diags + } + + file, prepDiags := prepareStateV4(sV4) + diags = diags.Append(prepDiags) + return file, diags +} + +func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var tfVersion *version.Version + if sV4.TerraformVersion != "" { + var err error + tfVersion, err = version.NewVersion(sV4.TerraformVersion) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid Terraform version string", + fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion), + )) + } + } + + file := &File{ + TerraformVersion: tfVersion, + Serial: sV4.Serial, + Lineage: sV4.Lineage, + } + + state := states.NewState() + + for _, rsV4 := range sV4.Resources { + rAddr := addrs.Resource{ + Type: rsV4.Type, + Name: rsV4.Name, + } + switch rsV4.Mode { + case "managed": + rAddr.Mode = addrs.ManagedResourceMode + case "data": + rAddr.Mode = addrs.DataResourceMode + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource mode in state", + fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name), + )) + continue + } + + moduleAddr := addrs.RootModuleInstance + if rsV4.Module != "" { + var addrDiags tfdiags.Diagnostics + moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module) + diags = diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + } + + providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig) + diags.Append(addrDiags) + if addrDiags.HasErrors() { + continue + } + + var eachMode states.EachMode + switch rsV4.EachMode { + case "": + eachMode = states.NoEach + case "list": + eachMode = states.EachList + case "map": + eachMode = states.EachMap + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource metadata in state", + fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode), + )) + continue + } + + ms := state.EnsureModule(moduleAddr) + + // Ensure the resource container object is present in the state. + ms.SetResourceMeta(rAddr, eachMode, providerAddr) + + for _, isV4 := range rsV4.Instances { + keyRaw := isV4.IndexKey + var key addrs.InstanceKey + switch tk := keyRaw.(type) { + case int: + key = addrs.IntKey(tk) + case float64: + // Since JSON only has one number type, reading from encoding/json + // gives us a float64 here even if the number is whole. + // float64 has a smaller integer range than int, but in practice + // we rarely have more than a few tens of instances and so + // it's unlikely that we'll exhaust the 52 bits in a float64. + key = addrs.IntKey(int(tk)) + case string: + key = addrs.StringKey(tk) + default: + if keyRaw != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw), + )) + continue + } + key = addrs.NoKey + } + + instAddr := rAddr.Instance(key) + + obj := &states.ResourceInstanceObjectSrc{ + SchemaVersion: isV4.SchemaVersion, + } + + { + // Instance attributes + switch { + case isV4.AttributesRaw != nil: + obj.AttrsJSON = isV4.AttributesRaw + case isV4.AttributesFlat != nil: + obj.AttrsFlat = isV4.AttributesFlat + default: + // This is odd, but we'll accept it and just treat the + // object has being empty. In practice this should arise + // only from the contrived sort of state objects we tend + // to hand-write inline in tests. + obj.AttrsJSON = []byte{'{', '}'} + } + } + + { + // Status + raw := isV4.Status + switch raw { + case "": + obj.Status = states.ObjectReady + case "tainted": + obj.Status = states.ObjectTainted + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw), + )) + continue + } + } + + if raw := isV4.PrivateRaw; len(raw) > 0 { + obj.Private = raw + } + + { + depsRaw := isV4.Dependencies + deps := make([]addrs.Referenceable, 0, len(depsRaw)) + for _, depRaw := range depsRaw { + ref, refDiags := addrs.ParseRefStr(depRaw) + diags = diags.Append(refDiags) + if refDiags.HasErrors() { + continue + } + if len(ref.Remaining) != 0 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw), + )) + } + if ref.Subject == nil { + // Should never happen + panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr))) + } + deps = append(deps, ref.Subject) + } + obj.Dependencies = deps + } + + switch { + case isV4.Deposed != "": + dk := states.DeposedKey(isV4.Deposed) + if len(dk) != 8 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource instance metadata in state", + fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed), + )) + continue + } + is := ms.ResourceInstance(instAddr) + if is.HasDeposed(dk) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk), + )) + continue + } + + ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr) + default: + is := ms.ResourceInstance(instAddr) + if is.HasCurrent() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Duplicate resource instance in state", + fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)), + )) + continue + } + + ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr) + } + } + + // We repeat this after creating the instances because + // SetResourceInstanceCurrent automatically resets this metadata based + // on the incoming objects. That behavior is useful when we're making + // piecemeal updates to the state during an apply, but when we're + // reading the state file we want to reflect its contents exactly. + ms.SetResourceMeta(rAddr, eachMode, providerAddr) + } + + // The root module is special in that we persist its attributes and thus + // need to reload them now. (For descendent modules we just re-calculate + // them based on the latest configuration on each run.) + { + rootModule := state.RootModule() + for name, fos := range sV4.RootOutputs { + os := &states.OutputValue{} + os.Sensitive = fos.Sensitive + + ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw)) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value type in state", + fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err), + )) + continue + } + + val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid output value saved in state", + fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err), + )) + continue + } + + os.Value = val + rootModule.OutputValues[name] = os + } + } + + file.State = state + return file, diags +} + +func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics { + // Here we'll convert back from the "File" representation to our + // stateV4 struct representation and write that. + // + // While we support legacy state formats for reading, we only support the + // latest for writing and so if a V5 is added in future then this function + // should be deleted and replaced with a writeStateV5, even though the + // read/prepare V4 functions above would stick around. + + var diags tfdiags.Diagnostics + if file == nil || file.State == nil { + panic("attempt to write nil state to file") + } + + var terraformVersion string + if file.TerraformVersion != nil { + terraformVersion = file.TerraformVersion.String() + } + + sV4 := &stateV4{ + TerraformVersion: terraformVersion, + Serial: file.Serial, + Lineage: file.Lineage, + RootOutputs: map[string]outputStateV4{}, + Resources: []resourceStateV4{}, + } + + for name, os := range file.State.RootModule().OutputValues { + src, err := ctyjson.Marshal(os.Value, os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err), + )) + continue + } + + typeSrc, err := ctyjson.MarshalType(os.Value.Type()) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize output value in state", + fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err), + )) + continue + } + + sV4.RootOutputs[name] = outputStateV4{ + Sensitive: os.Sensitive, + ValueRaw: json.RawMessage(src), + ValueTypeRaw: json.RawMessage(typeSrc), + } + } + + for _, ms := range file.State.Modules { + moduleAddr := ms.Addr + for _, rs := range ms.Resources { + resourceAddr := rs.Addr + + var mode string + switch resourceAddr.Mode { + case addrs.ManagedResourceMode: + mode = "managed" + case addrs.DataResourceMode: + mode = "data" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode), + )) + continue + } + + var eachMode string + switch rs.EachMode { + case states.NoEach: + eachMode = "" + case states.EachList: + eachMode = "list" + case states.EachMap: + eachMode = "map" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource in state", + fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode), + )) + continue + } + + sV4.Resources = append(sV4.Resources, resourceStateV4{ + Module: moduleAddr.String(), + Mode: mode, + Type: resourceAddr.Type, + Name: resourceAddr.Name, + EachMode: eachMode, + ProviderConfig: rs.ProviderConfig.String(), + Instances: []instanceObjectStateV4{}, + }) + rsV4 := &(sV4.Resources[len(sV4.Resources)-1]) + + for key, is := range rs.Instances { + if is.HasCurrent() { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, is.Current, states.NotDeposed, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + for dk, obj := range is.Deposed { + var objDiags tfdiags.Diagnostics + rsV4.Instances, objDiags = appendInstanceObjectStateV4( + rs, is, key, obj, dk, + rsV4.Instances, + ) + diags = diags.Append(objDiags) + } + } + } + } + + sV4.normalize() + + src, err := json.MarshalIndent(sV4, "", " ") + if err != nil { + // Shouldn't happen if we do our conversion to *stateV4 correctly above. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize state", + fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err), + )) + return diags + } + src = append(src, '\n') + + _, err = w.Write(src) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to write state", + fmt.Sprintf("An error occured while writing the serialized state: %s.", err), + )) + return diags + } + + return diags +} + +func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + var status string + switch obj.Status { + case states.ObjectReady: + status = "" + case states.ObjectTainted: + status = "tainted" + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status), + )) + } + + var privateRaw []byte + if len(obj.Private) > 0 { + privateRaw = obj.Private + } + + deps := make([]string, len(obj.Dependencies)) + for i, depAddr := range obj.Dependencies { + deps[i] = depAddr.String() + } + + var rawKey interface{} + switch tk := key.(type) { + case addrs.IntKey: + rawKey = int(tk) + case addrs.StringKey: + rawKey = string(tk) + default: + if key != addrs.NoKey { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to serialize resource instance in state", + fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key), + )) + } + } + + return append(isV4s, instanceObjectStateV4{ + IndexKey: rawKey, + Deposed: string(deposed), + Status: status, + SchemaVersion: obj.SchemaVersion, + AttributesFlat: obj.AttrsFlat, + AttributesRaw: obj.AttrsJSON, + PrivateRaw: privateRaw, + Dependencies: deps, + }), diags +} + +type stateV4 struct { + Version stateVersionV4 `json:"version"` + TerraformVersion string `json:"terraform_version"` + Serial uint64 `json:"serial"` + Lineage string `json:"lineage"` + RootOutputs map[string]outputStateV4 `json:"outputs"` + Resources []resourceStateV4 `json:"resources"` +} + +// normalize makes some in-place changes to normalize the way items are +// stored to ensure that two functionally-equivalent states will be stored +// identically. +func (s *stateV4) normalize() { + sort.Stable(sortResourcesV4(s.Resources)) + for _, rs := range s.Resources { + sort.Stable(sortInstancesV4(rs.Instances)) + } +} + +type outputStateV4 struct { + ValueRaw json.RawMessage `json:"value"` + ValueTypeRaw json.RawMessage `json:"type"` + Sensitive bool `json:"sensitive,omitempty"` +} + +type resourceStateV4 struct { + Module string `json:"module,omitempty"` + Mode string `json:"mode"` + Type string `json:"type"` + Name string `json:"name"` + EachMode string `json:"each,omitempty"` + ProviderConfig string `json:"provider"` + Instances []instanceObjectStateV4 `json:"instances"` +} + +type instanceObjectStateV4 struct { + IndexKey interface{} `json:"index_key,omitempty"` + Status string `json:"status,omitempty"` + Deposed string `json:"deposed,omitempty"` + + SchemaVersion uint64 `json:"schema_version"` + AttributesRaw json.RawMessage `json:"attributes,omitempty"` + AttributesFlat map[string]string `json:"attributes_flat,omitempty"` + + PrivateRaw []byte `json:"private,omitempty"` + + Dependencies []string `json:"depends_on,omitempty"` +} + +// stateVersionV4 is a weird special type we use to produce our hard-coded +// "version": 4 in the JSON serialization. +type stateVersionV4 struct{} + +func (sv stateVersionV4) MarshalJSON() ([]byte, error) { + return []byte{'4'}, nil +} + +func (sv stateVersionV4) UnmarshalJSON([]byte) error { + // Nothing to do: we already know we're version 4 + return nil +} + +type sortResourcesV4 []resourceStateV4 + +func (sr sortResourcesV4) Len() int { return len(sr) } +func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] } +func (sr sortResourcesV4) Less(i, j int) bool { + switch { + case sr[i].Mode != sr[j].Mode: + return sr[i].Mode < sr[j].Mode + case sr[i].Type != sr[j].Type: + return sr[i].Type < sr[j].Type + case sr[i].Name != sr[j].Name: + return sr[i].Name < sr[j].Name + default: + return false + } +} + +type sortInstancesV4 []instanceObjectStateV4 + +func (si sortInstancesV4) Len() int { return len(si) } +func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] } +func (si sortInstancesV4) Less(i, j int) bool { + ki := si[i].IndexKey + kj := si[j].IndexKey + if ki != kj { + if (ki == nil) != (kj == nil) { + return ki == nil + } + if kii, isInt := ki.(int); isInt { + if kji, isInt := kj.(int); isInt { + return kii < kji + } + return true + } + if kis, isStr := ki.(string); isStr { + if kjs, isStr := kj.(string); isStr { + return kis < kjs + } + return true + } + } + if si[i].Deposed != si[j].Deposed { + return si[i].Deposed < si[j].Deposed + } + return false +} diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/write.go b/vendor/github.com/hashicorp/terraform/states/statefile/write.go new file mode 100644 index 00000000..548ba8a8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statefile/write.go @@ -0,0 +1,17 @@ +package statefile + +import ( + "io" + + tfversion "github.com/hashicorp/terraform/version" +) + +// Write writes the given state to the given writer in the current state +// serialization format. +func Write(s *File, w io.Writer) error { + // Always record the current terraform version in the state. + s.TerraformVersion = tfversion.SemVer + + diags := writeStateV4(s, w) + return diags.Err() +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/doc.go b/vendor/github.com/hashicorp/terraform/states/statemgr/doc.go new file mode 100644 index 00000000..058b36d9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/doc.go @@ -0,0 +1,21 @@ +// Package statemgr defines the interfaces and some supporting functionality +// for "state managers", which are components responsible for writing state +// to some persistent storage and then later retrieving it. +// +// State managers will usually (but not necessarily) use the state file formats +// implemented in the sibling directory "statefile" to serialize the persistent +// parts of state for storage. +// +// State managers are responsible for ensuring that stored state can be updated +// safely across multiple, possibly-concurrent Terraform runs (with reasonable +// constraints and limitations). The rest of Terraform considers state to be +// a mutable data structure, with state managers preserving that illusion +// by creating snapshots of the state and updating them over time. +// +// From the perspective of callers of the general state manager API, a state +// manager is able to return the latest snapshot and to replace that snapshot +// with a new one. Some state managers may also preserve historical snapshots +// using facilities offered by their storage backend, but this is always an +// implementation detail: the historical versions are not visible to a user +// of these interfaces. +package statemgr diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem.go b/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem.go new file mode 100644 index 00000000..8338e574 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem.go @@ -0,0 +1,516 @@ +package statemgr + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "sync" + "time" + + multierror "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" +) + +// Filesystem is a full state manager that uses a file in the local filesystem +// for persistent storage. +// +// The transient storage for Filesystem is always in-memory. +type Filesystem struct { + mu sync.Mutex + + // path is the location where a file will be created or replaced for + // each persistent snapshot. + path string + + // readPath is read by RefreshState instead of "path" until the first + // call to PersistState, after which it is ignored. + // + // The file at readPath must never be written to by this manager. + readPath string + + // backupPath is an optional extra path which, if non-empty, will be + // created or overwritten with the first state snapshot we read if there + // is a subsequent call to write a different state. + backupPath string + + // the file handle corresponding to PathOut + stateFileOut *os.File + + // While the stateFileOut will correspond to the lock directly, + // store and check the lock ID to maintain a strict state.Locker + // implementation. + lockID string + + // created is set to true if stateFileOut didn't exist before we created it. + // This is mostly so we can clean up empty files during tests, but doesn't + // hurt to remove file we never wrote to. + created bool + + file *statefile.File + readFile *statefile.File + backupFile *statefile.File + writtenBackup bool +} + +var ( + _ Full = (*Filesystem)(nil) + _ PersistentMeta = (*Filesystem)(nil) + _ Migrator = (*Filesystem)(nil) +) + +// NewFilesystem creates a filesystem-based state manager that reads and writes +// state snapshots at the given filesystem path. +// +// This is equivalent to calling NewFileSystemBetweenPaths with statePath as +// both of the path arguments. +func NewFilesystem(statePath string) *Filesystem { + return &Filesystem{ + path: statePath, + readPath: statePath, + } +} + +// NewFilesystemBetweenPaths creates a filesystem-based state manager that +// reads an initial snapshot from readPath and then writes all new snapshots to +// writePath. +func NewFilesystemBetweenPaths(readPath, writePath string) *Filesystem { + return &Filesystem{ + path: writePath, + readPath: readPath, + } +} + +// SetBackupPath configures the receiever so that it will create a local +// backup file of the next state snapshot it reads (in State) if a different +// snapshot is subsequently written (in WriteState). Only one backup is +// written for the lifetime of the object, unless reset as described below. +// +// For correct operation, this must be called before any other state methods +// are called. If called multiple times, each call resets the backup +// function so that the next read will become the backup snapshot and a +// following write will save a backup of it. +func (s *Filesystem) SetBackupPath(path string) { + s.backupPath = path + s.backupFile = nil + s.writtenBackup = false +} + +// BackupPath returns the manager's backup path if backup files are enabled, +// or an empty string otherwise. +func (s *Filesystem) BackupPath() string { + return s.backupPath +} + +// State is an implementation of Reader. +func (s *Filesystem) State() *states.State { + defer s.mutex()() + if s.file == nil { + return nil + } + return s.file.DeepCopy().State +} + +// WriteState is an incorrect implementation of Writer that actually also +// persists. +func (s *Filesystem) WriteState(state *states.State) error { + // TODO: this should use a more robust method of writing state, by first + // writing to a temp file on the same filesystem, and renaming the file over + // the original. + + defer s.mutex()() + + if s.readFile == nil { + err := s.refreshState() + if err != nil { + return err + } + } + + return s.writeState(state, nil) +} + +func (s *Filesystem) writeState(state *states.State, meta *SnapshotMeta) error { + if s.stateFileOut == nil { + if err := s.createStateFiles(); err != nil { + return nil + } + } + defer s.stateFileOut.Sync() + + // We'll try to write our backup first, so we can be sure we've created + // it successfully before clobbering the original file it came from. + if !s.writtenBackup && s.backupFile != nil && s.backupPath != "" { + if !statefile.StatesMarshalEqual(state, s.backupFile.State) { + log.Printf("[TRACE] statemgr.Filesystem: creating backup snapshot at %s", s.backupPath) + bfh, err := os.Create(s.backupPath) + if err != nil { + return fmt.Errorf("failed to create local state backup file: %s", err) + } + defer bfh.Close() + + err = statefile.Write(s.backupFile, bfh) + if err != nil { + return fmt.Errorf("failed to write to local state backup file: %s", err) + } + + s.writtenBackup = true + } else { + log.Print("[TRACE] statemgr.Filesystem: not making a backup, because the new snapshot is identical to the old") + } + } else { + // This branch is all just logging, to help understand why we didn't make a backup. + switch { + case s.backupPath == "": + log.Print("[TRACE] statemgr.Filesystem: state file backups are disabled") + case s.writtenBackup: + log.Printf("[TRACE] statemgr.Filesystem: have already backed up original %s to %s on a previous write", s.path, s.backupPath) + case s.backupFile == nil: + log.Printf("[TRACE] statemgr.Filesystem: no original state snapshot to back up") + default: + log.Printf("[TRACE] statemgr.Filesystem: not creating a backup for an unknown reason") + } + } + + s.file = s.file.DeepCopy() + if s.file == nil { + s.file = NewStateFile() + } + s.file.State = state.DeepCopy() + + if _, err := s.stateFileOut.Seek(0, os.SEEK_SET); err != nil { + return err + } + if err := s.stateFileOut.Truncate(0); err != nil { + return err + } + + if state == nil { + // if we have no state, don't write anything else. + log.Print("[TRACE] statemgr.Filesystem: state is nil, so leaving the file empty") + return nil + } + + if meta == nil { + if s.readFile == nil || !statefile.StatesMarshalEqual(s.file.State, s.readFile.State) { + s.file.Serial++ + log.Printf("[TRACE] statemgr.Filesystem: state has changed since last snapshot, so incrementing serial to %d", s.file.Serial) + } else { + log.Print("[TRACE] statemgr.Filesystem: no state changes since last snapshot") + } + } else { + // Force new metadata + s.file.Lineage = meta.Lineage + s.file.Serial = meta.Serial + log.Printf("[TRACE] statemgr.Filesystem: forcing lineage %q serial %d for migration/import", s.file.Lineage, s.file.Serial) + } + + log.Printf("[TRACE] statemgr.Filesystem: writing snapshot at %s", s.path) + if err := statefile.Write(s.file, s.stateFileOut); err != nil { + return err + } + + // Any future reads must come from the file we've now updated + s.readPath = s.path + return nil +} + +// PersistState is an implementation of Persister that does nothing because +// this type's Writer implementation does its own persistence. +func (s *Filesystem) PersistState() error { + return nil +} + +// RefreshState is an implementation of Refresher. +func (s *Filesystem) RefreshState() error { + defer s.mutex()() + return s.refreshState() +} + +func (s *Filesystem) refreshState() error { + var reader io.Reader + + // The s.readPath file is only OK to read if we have not written any state out + // (in which case the same state needs to be read in), and no state output file + // has been opened (possibly via a lock) or the input path is different + // than the output path. + // This is important for Windows, as if the input file is the same as the + // output file, and the output file has been locked already, we can't open + // the file again. + if s.stateFileOut == nil || s.readPath != s.path { + // we haven't written a state file yet, so load from readPath + log.Printf("[TRACE] statemgr.Filesystem: reading initial snapshot from %s", s.readPath) + f, err := os.Open(s.readPath) + if err != nil { + // It is okay if the file doesn't exist; we'll treat that as a nil state. + if !os.IsNotExist(err) { + return err + } + + // we need a non-nil reader for ReadState and an empty buffer works + // to return EOF immediately + reader = bytes.NewBuffer(nil) + + } else { + defer f.Close() + reader = f + } + } else { + log.Printf("[TRACE] statemgr.Filesystem: reading latest snapshot from %s", s.path) + // no state to refresh + if s.stateFileOut == nil { + return nil + } + + // we have a state file, make sure we're at the start + s.stateFileOut.Seek(0, os.SEEK_SET) + reader = s.stateFileOut + } + + f, err := statefile.Read(reader) + // if there's no state then a nil file is fine + if err != nil { + if err != statefile.ErrNoState { + return err + } + log.Printf("[TRACE] statemgr.Filesystem: snapshot file has nil snapshot, but that's okay") + } + + s.file = f + s.readFile = s.file.DeepCopy() + if s.file != nil { + log.Printf("[TRACE] statemgr.Filesystem: read snapshot with lineage %q serial %d", s.file.Lineage, s.file.Serial) + } else { + log.Print("[TRACE] statemgr.Filesystem: read nil snapshot") + } + return nil +} + +// Lock implements Locker using filesystem discretionary locks. +func (s *Filesystem) Lock(info *LockInfo) (string, error) { + defer s.mutex()() + + if s.stateFileOut == nil { + if err := s.createStateFiles(); err != nil { + return "", err + } + } + + if s.lockID != "" { + return "", fmt.Errorf("state %q already locked", s.stateFileOut.Name()) + } + + if err := s.lock(); err != nil { + info, infoErr := s.lockInfo() + if infoErr != nil { + err = multierror.Append(err, infoErr) + } + + lockErr := &LockError{ + Info: info, + Err: err, + } + + return "", lockErr + } + + s.lockID = info.ID + return s.lockID, s.writeLockInfo(info) +} + +// Unlock is the companion to Lock, completing the implemention of Locker. +func (s *Filesystem) Unlock(id string) error { + defer s.mutex()() + + if s.lockID == "" { + return fmt.Errorf("LocalState not locked") + } + + if id != s.lockID { + idErr := fmt.Errorf("invalid lock id: %q. current id: %q", id, s.lockID) + info, err := s.lockInfo() + if err != nil { + err = multierror.Append(idErr, err) + } + + return &LockError{ + Err: idErr, + Info: info, + } + } + + lockInfoPath := s.lockInfoPath() + log.Printf("[TRACE] statemgr.Filesystem: removing lock metadata file %s", lockInfoPath) + os.Remove(lockInfoPath) + + fileName := s.stateFileOut.Name() + + unlockErr := s.unlock() + + s.stateFileOut.Close() + s.stateFileOut = nil + s.lockID = "" + + // clean up the state file if we created it an never wrote to it + stat, err := os.Stat(fileName) + if err == nil && stat.Size() == 0 && s.created { + os.Remove(fileName) + } + + return unlockErr +} + +// StateSnapshotMeta returns the metadata from the most recently persisted +// or refreshed persistent state snapshot. +// +// This is an implementation of PersistentMeta. +func (s *Filesystem) StateSnapshotMeta() SnapshotMeta { + if s.file == nil { + return SnapshotMeta{} // placeholder + } + + return SnapshotMeta{ + Lineage: s.file.Lineage, + Serial: s.file.Serial, + + TerraformVersion: s.file.TerraformVersion, + } +} + +// StateForMigration is part of our implementation of Migrator. +func (s *Filesystem) StateForMigration() *statefile.File { + return s.file.DeepCopy() +} + +// WriteStateForMigration is part of our implementation of Migrator. +func (s *Filesystem) WriteStateForMigration(f *statefile.File, force bool) error { + defer s.mutex()() + + if s.readFile == nil { + err := s.refreshState() + if err != nil { + return err + } + } + + if !force { + err := CheckValidImport(f, s.readFile) + if err != nil { + return err + } + } + + if s.readFile != nil { + log.Printf( + "[TRACE] statemgr.Filesystem: Importing snapshot with lineage %q serial %d over snapshot with lineage %q serial %d at %s", + f.Lineage, f.Serial, + s.readFile.Lineage, s.readFile.Serial, + s.path, + ) + } else { + log.Printf( + "[TRACE] statemgr.Filesystem: Importing snapshot with lineage %q serial %d as the initial state snapshot at %s", + f.Lineage, f.Serial, + s.path, + ) + } + + err := s.writeState(f.State, &SnapshotMeta{Lineage: f.Lineage, Serial: f.Serial}) + if err != nil { + return err + } + + return nil +} + +// Open the state file, creating the directories and file as needed. +func (s *Filesystem) createStateFiles() error { + log.Printf("[TRACE] statemgr.Filesystem: preparing to manage state snapshots at %s", s.path) + + // This could race, but we only use it to clean up empty files + if _, err := os.Stat(s.path); os.IsNotExist(err) { + s.created = true + } + + // Create all the directories + if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { + return err + } + + f, err := os.OpenFile(s.path, os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return err + } + + s.stateFileOut = f + + // If the file already existed with content then that'll be the content + // of our backup file if we write a change later. + s.backupFile, err = statefile.Read(s.stateFileOut) + if err != nil { + if err != statefile.ErrNoState { + return err + } + log.Printf("[TRACE] statemgr.Filesystem: no previously-stored snapshot exists") + } else { + log.Printf("[TRACE] statemgr.Filesystem: existing snapshot has lineage %q serial %d", s.backupFile.Lineage, s.backupFile.Serial) + } + + // Refresh now, to load in the snapshot if the file already existed + return nil +} + +// return the path for the lockInfo metadata. +func (s *Filesystem) lockInfoPath() string { + stateDir, stateName := filepath.Split(s.path) + if stateName == "" { + panic("empty state file path") + } + + if stateName[0] == '.' { + stateName = stateName[1:] + } + + return filepath.Join(stateDir, fmt.Sprintf(".%s.lock.info", stateName)) +} + +// lockInfo returns the data in a lock info file +func (s *Filesystem) lockInfo() (*LockInfo, error) { + path := s.lockInfoPath() + infoData, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + + info := LockInfo{} + err = json.Unmarshal(infoData, &info) + if err != nil { + return nil, fmt.Errorf("state file %q locked, but could not unmarshal lock info: %s", s.readPath, err) + } + return &info, nil +} + +// write a new lock info file +func (s *Filesystem) writeLockInfo(info *LockInfo) error { + path := s.lockInfoPath() + info.Path = s.readPath + info.Created = time.Now().UTC() + + log.Printf("[TRACE] statemgr.Filesystem: writing lock metadata to %s", path) + err := ioutil.WriteFile(path, info.Marshal(), 0600) + if err != nil { + return fmt.Errorf("could not write lock info for %q: %s", s.readPath, err) + } + return nil +} + +func (s *Filesystem) mutex() func() { + s.mu.Lock() + return s.mu.Unlock +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem_lock_unix.go b/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem_lock_unix.go new file mode 100644 index 00000000..4c4f571e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem_lock_unix.go @@ -0,0 +1,37 @@ +// +build !windows + +package statemgr + +import ( + "log" + "os" + "syscall" +) + +// use fcntl POSIX locks for the most consistent behavior across platforms, and +// hopefully some campatibility over NFS and CIFS. +func (s *Filesystem) lock() error { + log.Printf("[TRACE] statemgr.Filesystem: locking %s using fcntl flock", s.path) + flock := &syscall.Flock_t{ + Type: syscall.F_RDLCK | syscall.F_WRLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + } + + fd := s.stateFileOut.Fd() + return syscall.FcntlFlock(fd, syscall.F_SETLK, flock) +} + +func (s *Filesystem) unlock() error { + log.Printf("[TRACE] statemgr.Filesystem: unlocking %s using fcntl flock", s.path) + flock := &syscall.Flock_t{ + Type: syscall.F_UNLCK, + Whence: int16(os.SEEK_SET), + Start: 0, + Len: 0, + } + + fd := s.stateFileOut.Fd() + return syscall.FcntlFlock(fd, syscall.F_SETLK, flock) +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem_lock_windows.go b/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem_lock_windows.go new file mode 100644 index 00000000..91b4a2a6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/filesystem_lock_windows.go @@ -0,0 +1,113 @@ +// +build windows + +package statemgr + +import ( + "log" + "math" + "syscall" + "unsafe" +) + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procCreateEventW = modkernel32.NewProc("CreateEventW") +) + +const ( + // dwFlags defined for LockFileEx + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + _LOCKFILE_FAIL_IMMEDIATELY = 1 + _LOCKFILE_EXCLUSIVE_LOCK = 2 +) + +func (s *Filesystem) lock() error { + log.Printf("[TRACE] statemgr.Filesystem: locking %s using LockFileEx", s.path) + + // even though we're failing immediately, an overlapped event structure is + // required + ol, err := newOverlapped() + if err != nil { + return err + } + defer syscall.CloseHandle(ol.HEvent) + + return lockFileEx( + syscall.Handle(s.stateFileOut.Fd()), + _LOCKFILE_EXCLUSIVE_LOCK|_LOCKFILE_FAIL_IMMEDIATELY, + 0, // reserved + 0, // bytes low + math.MaxUint32, // bytes high + ol, + ) +} + +func (s *Filesystem) unlock() error { + log.Printf("[TRACE] statemgr.Filesystem: unlocked by closing %s", s.path) + + // the file is closed in Unlock + return nil +} + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6( + procLockFileEx.Addr(), + 6, + uintptr(h), + uintptr(flags), + uintptr(reserved), + uintptr(locklow), + uintptr(lockhigh), + uintptr(unsafe.Pointer(ol)), + ) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +// newOverlapped creates a structure used to track asynchronous +// I/O requests that have been issued. +func newOverlapped() (*syscall.Overlapped, error) { + event, err := createEvent(nil, true, false, nil) + if err != nil { + return nil, err + } + return &syscall.Overlapped{HEvent: event}, nil +} + +func createEvent(sa *syscall.SecurityAttributes, manualReset bool, initialState bool, name *uint16) (handle syscall.Handle, err error) { + var _p0 uint32 + if manualReset { + _p0 = 1 + } + var _p1 uint32 + if initialState { + _p1 = 1 + } + + r0, _, e1 := syscall.Syscall6( + procCreateEventW.Addr(), + 4, + uintptr(unsafe.Pointer(sa)), + uintptr(_p0), + uintptr(_p1), + uintptr(unsafe.Pointer(name)), + 0, + 0, + ) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/helper.go b/vendor/github.com/hashicorp/terraform/states/statemgr/helper.go new file mode 100644 index 00000000..7eef70ef --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/helper.go @@ -0,0 +1,53 @@ +package statemgr + +// The functions in this file are helper wrappers for common sequences of +// operations done against full state managers. + +import ( + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/version" +) + +// NewStateFile creates a new statefile.File object, with a newly-minted +// lineage identifier and serial 0, and returns a pointer to it. +func NewStateFile() *statefile.File { + return &statefile.File{ + Lineage: NewLineage(), + TerraformVersion: version.SemVer, + State: states.NewState(), + } +} + +// RefreshAndRead refreshes the persistent snapshot in the given state manager +// and then returns it. +// +// This is a wrapper around calling RefreshState and then State on the given +// manager. +func RefreshAndRead(mgr Storage) (*states.State, error) { + err := mgr.RefreshState() + if err != nil { + return nil, err + } + return mgr.State(), nil +} + +// WriteAndPersist writes a snapshot of the given state to the given state +// manager's transient store and then immediately persists it. +// +// The caller must ensure that the given state is not concurrently modified +// while this function is running, but it is safe to modify it after this +// function has returned. +// +// If an error is returned, it is undefined whether the state has been saved +// to the transient store or not, and so the only safe response is to bail +// out quickly with a user-facing error. In situations where more control +// is required, call WriteState and PersistState on the state manager directly +// and handle their errors. +func WriteAndPersist(mgr Storage, state *states.State) error { + err := mgr.WriteState(state) + if err != nil { + return err + } + return mgr.PersistState() +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/lineage.go b/vendor/github.com/hashicorp/terraform/states/statemgr/lineage.go new file mode 100644 index 00000000..b06b12c2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/lineage.go @@ -0,0 +1,20 @@ +package statemgr + +import ( + "fmt" + + uuid "github.com/hashicorp/go-uuid" +) + +// NewLineage generates a new lineage identifier string. A lineage identifier +// is an opaque string that is intended to be unique in space and time, chosen +// when state is recorded at a location for the first time and then preserved +// afterwards to allow Terraform to recognize when one state snapshot is a +// predecessor or successor of another. +func NewLineage() string { + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + return lineage +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/locker.go b/vendor/github.com/hashicorp/terraform/states/statemgr/locker.go new file mode 100644 index 00000000..312eb02d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/locker.go @@ -0,0 +1,224 @@ +package statemgr + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "math/rand" + "os" + "os/user" + "strings" + "text/template" + "time" + + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform/version" +) + +var rngSource = rand.New(rand.NewSource(time.Now().UnixNano())) + +// Locker is the interface for state managers that are able to manage +// mutual-exclusion locks for state. +// +// Implementing Locker alongside Persistent relaxes some of the usual +// implemention constraints for implementations of Refresher and Persister, +// under the assumption that the locking mechanism effectively prevents +// multiple Terraform processes from reading and writing state concurrently. +// In particular, a type that implements both Locker and Persistent is only +// required to that the Persistent implementation is concurrency-safe within +// a single Terraform process. +// +// A Locker implementation must ensure that another processes with a +// similarly-configured state manager cannot successfully obtain a lock while +// the current process is holding it, or vice-versa, assuming that both +// processes agree on the locking mechanism. +// +// A Locker is not required to prevent non-cooperating processes from +// concurrently modifying the state, but is free to do so as an extra +// protection. If a mandatory locking mechanism of this sort is implemented, +// the state manager must ensure that RefreshState and PersistState calls +// can succeed if made through the same manager instance that is holding the +// lock, such has by retaining some sort of lock token that the Persistent +// methods can then use. +type Locker interface { + // Lock attempts to obtain a lock, using the given lock information. + // + // The result is an opaque id that can be passed to Unlock to release + // the lock, or an error if the lock cannot be acquired. Lock returns + // an instance of LockError immediately if the lock is already held, + // and the helper function LockWithContext uses this to automatically + // retry lock acquisition periodically until a timeout is reached. + Lock(info *LockInfo) (string, error) + + // Unlock releases a lock previously acquired by Lock. + // + // If the lock cannot be released -- for example, if it was stolen by + // another user with some sort of administrative override privilege -- + // then an error is returned explaining the situation in a way that + // is suitable for returning to an end-user. + Unlock(id string) error +} + +// test hook to verify that LockWithContext has attempted a lock +var postLockHook func() + +// LockWithContext locks the given state manager using the provided context +// for both timeout and cancellation. +// +// This method has a built-in retry/backoff behavior up to the context's +// timeout. +func LockWithContext(ctx context.Context, s Locker, info *LockInfo) (string, error) { + delay := time.Second + maxDelay := 16 * time.Second + for { + id, err := s.Lock(info) + if err == nil { + return id, nil + } + + le, ok := err.(*LockError) + if !ok { + // not a lock error, so we can't retry + return "", err + } + + if le == nil || le.Info == nil || le.Info.ID == "" { + // If we don't have a complete LockError then there's something + // wrong with the lock. + return "", err + } + + if postLockHook != nil { + postLockHook() + } + + // there's an existing lock, wait and try again + select { + case <-ctx.Done(): + // return the last lock error with the info + return "", err + case <-time.After(delay): + if delay < maxDelay { + delay *= 2 + } + } + } +} + +// LockInfo stores lock metadata. +// +// Only Operation and Info are required to be set by the caller of Lock. +// Most callers should use NewLockInfo to create a LockInfo value with many +// of the fields populated with suitable default values. +type LockInfo struct { + // Unique ID for the lock. NewLockInfo provides a random ID, but this may + // be overridden by the lock implementation. The final value if ID will be + // returned by the call to Lock. + ID string + + // Terraform operation, provided by the caller. + Operation string + + // Extra information to store with the lock, provided by the caller. + Info string + + // user@hostname when available + Who string + + // Terraform version + Version string + + // Time that the lock was taken. + Created time.Time + + // Path to the state file when applicable. Set by the Lock implementation. + Path string +} + +// NewLockInfo creates a LockInfo object and populates many of its fields +// with suitable default values. +func NewLockInfo() *LockInfo { + // this doesn't need to be cryptographically secure, just unique. + // Using math/rand alleviates the need to check handle the read error. + // Use a uuid format to match other IDs used throughout Terraform. + buf := make([]byte, 16) + rngSource.Read(buf) + + id, err := uuid.FormatUUID(buf) + if err != nil { + // this of course shouldn't happen + panic(err) + } + + // don't error out on user and hostname, as we don't require them + userName := "" + if userInfo, err := user.Current(); err == nil { + userName = userInfo.Username + } + host, _ := os.Hostname() + + info := &LockInfo{ + ID: id, + Who: fmt.Sprintf("%s@%s", userName, host), + Version: version.Version, + Created: time.Now().UTC(), + } + return info +} + +// Err returns the lock info formatted in an error +func (l *LockInfo) Err() error { + return errors.New(l.String()) +} + +// Marshal returns a string json representation of the LockInfo +func (l *LockInfo) Marshal() []byte { + js, err := json.Marshal(l) + if err != nil { + panic(err) + } + return js +} + +// String return a multi-line string representation of LockInfo +func (l *LockInfo) String() string { + tmpl := `Lock Info: + ID: {{.ID}} + Path: {{.Path}} + Operation: {{.Operation}} + Who: {{.Who}} + Version: {{.Version}} + Created: {{.Created}} + Info: {{.Info}} +` + + t := template.Must(template.New("LockInfo").Parse(tmpl)) + var out bytes.Buffer + if err := t.Execute(&out, l); err != nil { + panic(err) + } + return out.String() +} + +// LockError is a specialization of type error that is returned by Locker.Lock +// to indicate that the lock is already held by another process and that +// retrying may be productive to take the lock once the other process releases +// it. +type LockError struct { + Info *LockInfo + Err error +} + +func (e *LockError) Error() string { + var out []string + if e.Err != nil { + out = append(out, e.Err.Error()) + } + + if e.Info != nil { + out = append(out, e.Info.String()) + } + return strings.Join(out, "\n") +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/migrate.go b/vendor/github.com/hashicorp/terraform/states/statemgr/migrate.go new file mode 100644 index 00000000..8f0d6799 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/migrate.go @@ -0,0 +1,212 @@ +package statemgr + +import ( + "fmt" + + "github.com/hashicorp/terraform/states/statefile" +) + +// Migrator is an optional interface implemented by state managers that +// are capable of direct migration of state snapshots with their associated +// metadata unchanged. +// +// This interface is used when available by function Migrate. See that +// function for more information on how it is used. +type Migrator interface { + PersistentMeta + + // StateForMigration returns a full statefile representing the latest + // snapshot (as would be returned by Reader.State) and the associated + // snapshot metadata (as would be returned by + // PersistentMeta.StateSnapshotMeta). + // + // Just as with Reader.State, this must not fail. + StateForMigration() *statefile.File + + // WriteStateForMigration accepts a full statefile including associated + // snapshot metadata, and atomically updates the stored file (as with + // Writer.WriteState) and the metadata. + // + // If "force" is not set, the manager must call CheckValidImport with + // the given file and the current file and complete the update only if + // that function returns nil. If force is set this may override such + // checks, but some backends do not support forcing and so will act + // as if force is always false. + WriteStateForMigration(f *statefile.File, force bool) error +} + +// Migrate writes the latest transient state snapshot from src into dest, +// preserving snapshot metadata (serial and lineage) where possible. +// +// If both managers implement the optional interface Migrator then it will +// be used to copy the snapshot and its associated metadata. Otherwise, +// the normal Reader and Writer interfaces will be used instead. +// +// If the destination manager refuses the new state or fails to write it then +// its error is returned directly. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to persist the newly-written state after a successful result, +// just as with calls to Writer.WriteState. +// +// This function doesn't do any locking of its own, so if the state managers +// also implement Locker the caller should hold a lock on both managers +// for the duration of this call. +func Migrate(dst, src Transient) error { + if dstM, ok := dst.(Migrator); ok { + if srcM, ok := src.(Migrator); ok { + // Full-fidelity migration, them. + s := srcM.StateForMigration() + return dstM.WriteStateForMigration(s, true) + } + } + + // Managers to not support full-fidelity migration, so migration will not + // preserve serial/lineage. + s := src.State() + return dst.WriteState(s) +} + +// Import loads the given state snapshot into the given manager, preserving +// its metadata (serial and lineage) if the target manager supports metadata. +// +// A state manager must implement the optional interface Migrator to get +// access to the full metadata. +// +// Unless "force" is true, Import will check first that the metadata given +// in the file matches the current snapshot metadata for the manager, if the +// manager supports metadata. Some managers do not support forcing, so a +// write with an unsuitable lineage or serial may still be rejected even if +// "force" is set. "force" has no effect for managers that do not support +// snapshot metadata. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to persist the newly-written state after a successful result, +// just as with calls to Writer.WriteState. +// +// This function doesn't do any locking of its own, so if the state manager +// also implements Locker the caller should hold a lock on it for the +// duration of this call. +func Import(f *statefile.File, mgr Transient, force bool) error { + if mgrM, ok := mgr.(Migrator); ok { + return mgrM.WriteStateForMigration(f, force) + } + + // For managers that don't implement Migrator, this is just a normal write + // of the state contained in the given file. + return mgr.WriteState(f.State) +} + +// Export retrieves the latest state snapshot from the given manager, including +// its metadata (serial and lineage) where possible. +// +// A state manager must also implement either Migrator or PersistentMeta +// for the metadata to be included. Otherwise, the relevant fields will have +// zero value in the returned object. +// +// For state managers that also implement Persistent, it is the caller's +// responsibility to refresh from persistent storage first if needed. +// +// This function doesn't do any locking of its own, so if the state manager +// also implements Locker the caller should hold a lock on it for the +// duration of this call. +func Export(mgr Reader) *statefile.File { + switch mgrT := mgr.(type) { + case Migrator: + return mgrT.StateForMigration() + case PersistentMeta: + s := mgr.State() + meta := mgrT.StateSnapshotMeta() + return statefile.New(s, meta.Lineage, meta.Serial) + default: + s := mgr.State() + return statefile.New(s, "", 0) + } +} + +// SnapshotMetaRel describes a relationship between two SnapshotMeta values, +// returned from the SnapshotMeta.Compare method where the "first" value +// is the receiver of that method and the "second" is the given argument. +type SnapshotMetaRel rune + +//go:generate stringer -type=SnapshotMetaRel + +const ( + // SnapshotOlder indicates that two snapshots have a common lineage and + // that the first has a lower serial value. + SnapshotOlder SnapshotMetaRel = '<' + + // SnapshotNewer indicates that two snapshots have a common lineage and + // that the first has a higher serial value. + SnapshotNewer SnapshotMetaRel = '>' + + // SnapshotEqual indicates that two snapshots have a common lineage and + // the same serial value. + SnapshotEqual SnapshotMetaRel = '=' + + // SnapshotUnrelated indicates that two snapshots have different lineage + // and thus cannot be meaningfully compared. + SnapshotUnrelated SnapshotMetaRel = '!' + + // SnapshotLegacy indicates that one or both of the snapshots + // does not have a lineage at all, and thus no comparison is possible. + SnapshotLegacy SnapshotMetaRel = '?' +) + +// Compare determines the relationship, if any, between the given existing +// SnapshotMeta and the potential "new" SnapshotMeta that is the receiver. +func (m SnapshotMeta) Compare(existing SnapshotMeta) SnapshotMetaRel { + switch { + case m.Lineage == "" || existing.Lineage == "": + return SnapshotLegacy + case m.Lineage != existing.Lineage: + return SnapshotUnrelated + case m.Serial > existing.Serial: + return SnapshotNewer + case m.Serial < existing.Serial: + return SnapshotOlder + default: + // both serials are equal, by elimination + return SnapshotEqual + } +} + +// CheckValidImport returns nil if the "new" snapshot can be imported as a +// successor of the "existing" snapshot without forcing. +// +// If not, an error is returned describing why. +func CheckValidImport(newFile, existingFile *statefile.File) error { + if existingFile == nil || existingFile.State.Empty() { + // It's always okay to overwrite an empty state, regardless of + // its lineage/serial. + return nil + } + new := SnapshotMeta{ + Lineage: newFile.Lineage, + Serial: newFile.Serial, + } + existing := SnapshotMeta{ + Lineage: existingFile.Lineage, + Serial: existingFile.Serial, + } + rel := new.Compare(existing) + switch rel { + case SnapshotNewer: + return nil // a newer snapshot is fine + case SnapshotLegacy: + return nil // anything goes for a legacy state + case SnapshotUnrelated: + return fmt.Errorf("cannot import state with lineage %q over unrelated state with lineage %q", new.Lineage, existing.Lineage) + case SnapshotEqual: + if statefile.StatesMarshalEqual(newFile.State, existingFile.State) { + // If lineage, serial, and state all match then this is fine. + return nil + } + return fmt.Errorf("cannot overwrite existing state with serial %d with a different state that has the same serial", new.Serial) + case SnapshotOlder: + return fmt.Errorf("cannot import state with serial %d over newer state with serial %d", new.Serial, existing.Serial) + default: + // Should never happen, but we'll check to make sure for safety + return fmt.Errorf("unsupported state snapshot relationship %s", rel) + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/persistent.go b/vendor/github.com/hashicorp/terraform/states/statemgr/persistent.go new file mode 100644 index 00000000..c15e84af --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/persistent.go @@ -0,0 +1,104 @@ +package statemgr + +import ( + version "github.com/hashicorp/go-version" +) + +// Persistent is a union of the Refresher and Persistent interfaces, for types +// that deal with persistent snapshots. +// +// Persistent snapshots are ones that are retained in storage that will +// outlive a particular Terraform process, and are shared with other Terraform +// processes that have a similarly-configured state manager. +// +// A manager may also choose to retain historical persistent snapshots, but +// that is an implementation detail and not visible via this API. +type Persistent interface { + Refresher + Persister +} + +// Refresher is the interface for managers that can read snapshots from +// persistent storage. +// +// Refresher is usually implemented in conjunction with Reader, with +// RefreshState copying the latest persistent snapshot into the latest +// transient snapshot. +// +// For a type that implements both Refresher and Persister, RefreshState must +// return the result of the most recently completed successful call to +// PersistState, unless another concurrently-running process has persisted +// another snapshot in the mean time. +// +// The Refresher implementation must guarantee that the snapshot is read +// from persistent storage in a way that is safe under concurrent calls to +// PersistState that may be happening in other processes. +type Refresher interface { + // RefreshState retrieves a snapshot of state from persistent storage, + // returning an error if this is not possible. + // + // Types that implement RefreshState generally also implement a State + // method that returns the result of the latest successful refresh. + // + // Since only a subset of the data in a state is included when persisting, + // a round-trip through PersistState and then RefreshState will often + // return only a subset of what was written. Callers must assume that + // ephemeral portions of the state may be unpopulated after calling + // RefreshState. + RefreshState() error +} + +// Persister is the interface for managers that can write snapshots to +// persistent storage. +// +// Persister is usually implemented in conjunction with Writer, with +// PersistState copying the latest transient snapshot to be the new latest +// persistent snapshot. +// +// A Persister implementation must detect updates made by other processes +// that may be running concurrently and avoid destroying those changes. This +// is most commonly achieved by making use of atomic write capabilities on +// the remote storage backend in conjunction with book-keeping with the +// Serial and Lineage fields in the standard state file formats. +type Persister interface { + PersistState() error +} + +// PersistentMeta is an optional extension to Persistent that allows inspecting +// the metadata associated with the snapshot that was most recently either +// read by RefreshState or written by PersistState. +type PersistentMeta interface { + // StateSnapshotMeta returns metadata about the state snapshot most + // recently created either by a call to PersistState or read by a call + // to RefreshState. + // + // If no persistent snapshot is yet available in the manager then + // the return value is meaningless. This method is primarily available + // for testing and logging purposes, and is of little use otherwise. + StateSnapshotMeta() SnapshotMeta +} + +// SnapshotMeta contains metadata about a persisted state snapshot. +// +// This metadata is usually (but not necessarily) included as part of the +// "header" of a state file, which is then written to a raw blob storage medium +// by a persistent state manager. +// +// Not all state managers will have useful values for all fields in this +// struct, so SnapshotMeta values are of little use beyond testing and logging +// use-cases. +type SnapshotMeta struct { + // Lineage and Serial can be used to understand the relationships between + // snapshots. + // + // If two snapshots both have an identical, non-empty Lineage + // then the one with the higher Serial is newer than the other. + // If the Lineage values are different or empty then the two snapshots + // are unrelated and cannot be compared for relative age. + Lineage string + Serial uint64 + + // TerraformVersion is the number of the version of Terraform that created + // the snapshot. + TerraformVersion *version.Version +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/plan.go b/vendor/github.com/hashicorp/terraform/states/statemgr/plan.go new file mode 100644 index 00000000..b5036030 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/plan.go @@ -0,0 +1,71 @@ +package statemgr + +import ( + "fmt" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" +) + +// PlannedStateUpdate is a special helper to obtain a statefile representation +// of a not-yet-written state snapshot that can be written later by a call +// to the companion function WritePlannedStateUpdate. +// +// The statefile object returned here has an unusual interpretation of its +// metadata that is understood only by WritePlannedStateUpdate, and so the +// returned object should not be used for any other purpose. +// +// If the state manager implements Locker then it is the caller's +// responsibility to hold the lock at least for the duration of this call. +// It is not safe to modify the given state concurrently while +// PlannedStateUpdate is running. +func PlannedStateUpdate(mgr Transient, planned *states.State) *statefile.File { + ret := &statefile.File{ + State: planned.DeepCopy(), + } + + // If the given manager uses snapshot metadata then we'll save that + // in our file so we can check it again during WritePlannedStateUpdate. + if mr, ok := mgr.(PersistentMeta); ok { + m := mr.StateSnapshotMeta() + ret.Lineage = m.Lineage + ret.Serial = m.Serial + } + + return ret +} + +// WritePlannedStateUpdate is a companion to PlannedStateUpdate that attempts +// to apply a state update that was planned earlier to the given state +// manager. +// +// An error is returned if this function detects that a new state snapshot +// has been written to the backend since the update was planned, since that +// invalidates the plan. An error is returned also if the manager itself +// rejects the given state when asked to store it. +// +// If the returned error is nil, the given manager's transient state snapshot +// is updated to match what was planned. It is the caller's responsibility +// to then persist that state if the manager also implements Persistent and +// the snapshot should be written to the persistent store. +// +// If the state manager implements Locker then it is the caller's +// responsibility to hold the lock at least for the duration of this call. +func WritePlannedStateUpdate(mgr Transient, planned *statefile.File) error { + // If the given manager uses snapshot metadata then we'll check to make + // sure no new snapshots have been created since we planned to write + // the given state file. + if mr, ok := mgr.(PersistentMeta); ok { + m := mr.StateSnapshotMeta() + if planned.Lineage != "" { + if planned.Lineage != m.Lineage { + return fmt.Errorf("planned state update is from an unrelated state lineage than the current state") + } + if planned.Serial != m.Serial { + return fmt.Errorf("stored state has been changed by another operation since the given update was planned") + } + } + } + + return mgr.WriteState(planned.State) +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/snapshotmetarel_string.go b/vendor/github.com/hashicorp/terraform/states/statemgr/snapshotmetarel_string.go new file mode 100644 index 00000000..a5b66138 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/snapshotmetarel_string.go @@ -0,0 +1,37 @@ +// Code generated by "stringer -type=SnapshotMetaRel"; DO NOT EDIT. + +package statemgr + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SnapshotOlder-60] + _ = x[SnapshotNewer-62] + _ = x[SnapshotEqual-61] + _ = x[SnapshotUnrelated-33] + _ = x[SnapshotLegacy-63] +} + +const ( + _SnapshotMetaRel_name_0 = "SnapshotUnrelated" + _SnapshotMetaRel_name_1 = "SnapshotOlderSnapshotEqualSnapshotNewerSnapshotLegacy" +) + +var ( + _SnapshotMetaRel_index_1 = [...]uint8{0, 13, 26, 39, 53} +) + +func (i SnapshotMetaRel) String() string { + switch { + case i == 33: + return _SnapshotMetaRel_name_0 + case 60 <= i && i <= 63: + i -= 60 + return _SnapshotMetaRel_name_1[_SnapshotMetaRel_index_1[i]:_SnapshotMetaRel_index_1[i+1]] + default: + return "SnapshotMetaRel(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/statemgr.go b/vendor/github.com/hashicorp/terraform/states/statemgr/statemgr.go new file mode 100644 index 00000000..672f10a9 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/statemgr.go @@ -0,0 +1,26 @@ +package statemgr + +// Storage is the union of Transient and Persistent, for state managers that +// have both transient and persistent storage. +// +// Types implementing this interface coordinate between their Transient +// and Persistent implementations so that the persistent operations read +// or write the transient store. +type Storage interface { + Transient + Persistent +} + +// Full is the union of all of the more-specific state interfaces. +// +// This interface may grow over time, so state implementations aiming to +// implement it may need to be modified for future changes. To ensure that +// this need can be detected, always include a statement nearby the declaration +// of the implementing type that will fail at compile time if the interface +// isn't satisfied, such as: +// +// var _ statemgr.Full = (*ImplementingType)(nil) +type Full interface { + Storage + Locker +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/statemgr_fake.go b/vendor/github.com/hashicorp/terraform/states/statemgr/statemgr_fake.go new file mode 100644 index 00000000..42d2b9bb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/statemgr_fake.go @@ -0,0 +1,96 @@ +package statemgr + +import ( + "errors" + "sync" + + "github.com/hashicorp/terraform/states" +) + +// NewFullFake returns a full state manager that really only supports transient +// snapshots. This is primarily intended for testing and is not suitable for +// general use. +// +// The persistent part of the interface is stubbed out as an in-memory store, +// and so its snapshots are effectively also transient. +// +// The given Transient implementation is used to implement the transient +// portion of the interface. If nil is given, NewTransientInMemory is +// automatically called to create an in-memory transient manager with no +// initial transient snapshot. +// +// If the given initial state is non-nil then a copy of it will be used as +// the initial persistent snapshot. +// +// The Locker portion of the returned manager uses a local mutex to simulate +// mutually-exclusive access to the fake persistent portion of the object. +func NewFullFake(t Transient, initial *states.State) Full { + if t == nil { + t = NewTransientInMemory(nil) + } + + // The "persistent" part of our manager is actually just another in-memory + // transient used to fake a secondary storage layer. + fakeP := NewTransientInMemory(initial.DeepCopy()) + + return &fakeFull{ + t: t, + fakeP: fakeP, + } +} + +type fakeFull struct { + t Transient + fakeP Transient + + lockLock sync.Mutex + locked bool +} + +var _ Full = (*fakeFull)(nil) + +func (m *fakeFull) State() *states.State { + return m.t.State() +} + +func (m *fakeFull) WriteState(s *states.State) error { + return m.t.WriteState(s) +} + +func (m *fakeFull) RefreshState() error { + return m.t.WriteState(m.fakeP.State()) +} + +func (m *fakeFull) PersistState() error { + return m.fakeP.WriteState(m.t.State()) +} + +func (m *fakeFull) Lock(info *LockInfo) (string, error) { + m.lockLock.Lock() + defer m.lockLock.Unlock() + + if m.locked { + return "", &LockError{ + Err: errors.New("fake state manager is locked"), + Info: info, + } + } + + m.locked = true + return "placeholder", nil +} + +func (m *fakeFull) Unlock(id string) error { + m.lockLock.Lock() + defer m.lockLock.Unlock() + + if !m.locked { + return errors.New("fake state manager is not locked") + } + if id != "placeholder" { + return errors.New("wrong lock id for fake state manager") + } + + m.locked = false + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/testing.go b/vendor/github.com/hashicorp/terraform/states/statemgr/testing.go new file mode 100644 index 00000000..8b2f2cbb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/testing.go @@ -0,0 +1,157 @@ +package statemgr + +import ( + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" + + "github.com/hashicorp/terraform/states/statefile" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/states" +) + +// TestFull is a helper for testing full state manager implementations. It +// expects that the given implementation is pre-loaded with a snapshot of the +// result from TestFullInitialState. +// +// If the given state manager also implements PersistentMeta, this function +// will test that the snapshot metadata changes as expected between calls +// to the methods of Persistent. +func TestFull(t *testing.T, s Full) { + t.Helper() + + if err := s.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + + // Check that the initial state is correct. + // These do have different Lineages, but we will replace current below. + initial := TestFullInitialState() + if state := s.State(); !state.Equal(initial) { + t.Fatalf("state does not match expected initial state\n\ngot:\n%s\nwant:\n%s", spew.Sdump(state), spew.Sdump(initial)) + } + + var initialMeta SnapshotMeta + if sm, ok := s.(PersistentMeta); ok { + initialMeta = sm.StateSnapshotMeta() + } + + // Now we've proven that the state we're starting with is an initial + // state, we'll complete our work here with that state, since otherwise + // further writes would violate the invariant that we only try to write + // states that share the same lineage as what was initially written. + current := s.State() + + // Write a new state and verify that we have it + current.RootModule().SetOutputValue("bar", cty.StringVal("baz"), false) + + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + + if actual := s.State(); !actual.Equal(current) { + t.Fatalf("bad:\n%#v\n\n%#v", actual, current) + } + + // Test persistence + if err := s.PersistState(); err != nil { + t.Fatalf("err: %s", err) + } + + // Refresh if we got it + if err := s.RefreshState(); err != nil { + t.Fatalf("err: %s", err) + } + + var newMeta SnapshotMeta + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + if got, want := newMeta.Lineage, initialMeta.Lineage; got != want { + t.Errorf("Lineage changed from %q to %q", want, got) + } + if after, before := newMeta.Serial, initialMeta.Serial; after == before { + t.Errorf("Serial didn't change from %d after new module added", before) + } + } + + // Same serial + serial := newMeta.Serial + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + if err := s.PersistState(); err != nil { + t.Fatalf("err: %s", err) + } + + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + if newMeta.Serial != serial { + t.Fatalf("serial changed after persisting with no changes: got %d, want %d", newMeta.Serial, serial) + } + } + + if sm, ok := s.(PersistentMeta); ok { + newMeta = sm.StateSnapshotMeta() + } + + // Change the serial + current = current.DeepCopy() + current.EnsureModule(addrs.RootModuleInstance).SetOutputValue( + "serialCheck", cty.StringVal("true"), false, + ) + if err := s.WriteState(current); err != nil { + t.Fatalf("err: %s", err) + } + if err := s.PersistState(); err != nil { + t.Fatalf("err: %s", err) + } + + if sm, ok := s.(PersistentMeta); ok { + oldMeta := newMeta + newMeta = sm.StateSnapshotMeta() + + if newMeta.Serial <= serial { + t.Fatalf("serial incorrect after persisting with changes: got %d, want > %d", newMeta.Serial, serial) + } + + if newMeta.TerraformVersion != oldMeta.TerraformVersion { + t.Fatalf("TFVersion changed from %s to %s", oldMeta.TerraformVersion, newMeta.TerraformVersion) + } + + // verify that Lineage doesn't change along with Serial, or during copying. + if newMeta.Lineage != oldMeta.Lineage { + t.Fatalf("Lineage changed from %q to %q", oldMeta.Lineage, newMeta.Lineage) + } + } + + // Check that State() returns a copy by modifying the copy and comparing + // to the current state. + stateCopy := s.State() + stateCopy.EnsureModule(addrs.RootModuleInstance.Child("another", addrs.NoKey)) + if reflect.DeepEqual(stateCopy, s.State()) { + t.Fatal("State() should return a copy") + } + + // our current expected state should also marshal identically to the persisted state + if !statefile.StatesMarshalEqual(current, s.State()) { + t.Fatalf("Persisted state altered unexpectedly.\n\ngot:\n%s\nwant:\n%s", spew.Sdump(s.State()), spew.Sdump(current)) + } +} + +// TestFullInitialState is a state that should be snapshotted into a +// full state manager before passing it into TestFull. +func TestFullInitialState() *states.State { + state := states.NewState() + childMod := state.EnsureModule(addrs.RootModuleInstance.Child("child", addrs.NoKey)) + rAddr := addrs.Resource{ + Mode: addrs.ManagedResourceMode, + Type: "null_resource", + Name: "foo", + } + childMod.SetResourceMeta(rAddr, states.EachList, rAddr.DefaultProviderConfig().Absolute(addrs.RootModuleInstance)) + return state +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/transient.go b/vendor/github.com/hashicorp/terraform/states/statemgr/transient.go new file mode 100644 index 00000000..087fd278 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/transient.go @@ -0,0 +1,66 @@ +package statemgr + +import "github.com/hashicorp/terraform/states" + +// Transient is a union of the Reader and Writer interfaces, for types that +// deal with transient snapshots. +// +// Transient snapshots are ones that are generally retained only locally and +// to not create any historical version record when updated. Transient +// snapshots are not expected to outlive a particular Terraform process, +// and are not shared with any other process. +// +// A state manager type that is primarily concerned with persistent storage +// may embed type Transient and then call State from its PersistState and +// WriteState from its RefreshState in order to build on any existing +// Transient implementation, such as the one returned by NewTransientInMemory. +type Transient interface { + Reader + Writer +} + +// Reader is the interface for managers that can return transient snapshots +// of state. +// +// Retrieving the snapshot must not fail, so retrieving a snapshot from remote +// storage (for example) should be dealt with elsewhere, often in an +// implementation of Refresher. For a type that implements both Reader +// and Refresher, it is okay for State to return nil if called before +// a RefreshState call has completed. +// +// For a type that implements both Reader and Writer, State must return the +// result of the most recently completed call to WriteState, and the state +// manager must accept concurrent calls to both State and WriteState. +// +// Each caller of this function must get a distinct copy of the state, and +// it must also be distinct from any instance cached inside the reader, to +// ensure that mutations of the returned state will not affect the values +// returned to other callers. +type Reader interface { + // State returns the latest state. + // + // Each call to State returns an entirely-distinct copy of the state, with + // no storage shared with any other call, so the caller may freely mutate + // the returned object via the state APIs. + State() *states.State +} + +// Writer is the interface for managers that can create transient snapshots +// from state. +// +// Writer is the opposite of Reader, and so it must update whatever the State +// method reads from. It does not write the state to any persistent +// storage, and (for managers that support historical versions) must not +// be recorded as a persistent new version of state. +// +// Implementations that cache the state in memory must take a deep copy of it, +// since the caller may continue to modify the given state object after +// WriteState returns. +type Writer interface { + // Write state saves a transient snapshot of the given state. + // + // The caller must ensure that the given state object is not concurrently + // modified while a WriteState call is in progress. WriteState itself + // will never modify the given state. + WriteState(*states.State) error +} diff --git a/vendor/github.com/hashicorp/terraform/states/statemgr/transient_inmem.go b/vendor/github.com/hashicorp/terraform/states/statemgr/transient_inmem.go new file mode 100644 index 00000000..07fd3726 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/statemgr/transient_inmem.go @@ -0,0 +1,41 @@ +package statemgr + +import ( + "sync" + + "github.com/hashicorp/terraform/states" +) + +// NewTransientInMemory returns a Transient implementation that retains +// transient snapshots only in memory, as part of the object. +// +// The given initial state, if any, must not be modified concurrently while +// this function is running, but may be freely modified once this function +// returns without affecting the stored transient snapshot. +func NewTransientInMemory(initial *states.State) Transient { + return &transientInMemory{ + current: initial.DeepCopy(), + } +} + +type transientInMemory struct { + lock sync.RWMutex + current *states.State +} + +var _ Transient = (*transientInMemory)(nil) + +func (m *transientInMemory) State() *states.State { + m.lock.RLock() + defer m.lock.RUnlock() + + return m.current.DeepCopy() +} + +func (m *transientInMemory) WriteState(new *states.State) error { + m.lock.Lock() + defer m.lock.Unlock() + + m.current = new.DeepCopy() + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/states/sync.go b/vendor/github.com/hashicorp/terraform/states/sync.go new file mode 100644 index 00000000..a3774467 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/states/sync.go @@ -0,0 +1,537 @@ +package states + +import ( + "log" + "sync" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" +) + +// SyncState is a wrapper around State that provides concurrency-safe access to +// various common operations that occur during a Terraform graph walk, or other +// similar concurrent contexts. +// +// When a SyncState wrapper is in use, no concurrent direct access to the +// underlying objects is permitted unless the caller first acquires an explicit +// lock, using the Lock and Unlock methods. Most callers should _not_ +// explicitly lock, and should instead use the other methods of this type that +// handle locking automatically. +// +// Since SyncState is able to safely consolidate multiple updates into a single +// atomic operation, many of its methods are at a higher level than those +// of the underlying types, and operate on the state as a whole rather than +// on individual sub-structures of the state. +// +// SyncState can only protect against races within its own methods. It cannot +// provide any guarantees about the order in which concurrent operations will +// be processed, so callers may still need to employ higher-level techniques +// for ensuring correct operation sequencing, such as building and walking +// a dependency graph. +type SyncState struct { + state *State + lock sync.RWMutex +} + +// Module returns a snapshot of the state of the module instance with the given +// address, or nil if no such module is tracked. +// +// The return value is a pointer to a copy of the module state, which the +// caller may then freely access and mutate. However, since the module state +// tends to be a large data structure with many child objects, where possible +// callers should prefer to use a more granular accessor to access a child +// module directly, and thus reduce the amount of copying required. +func (s *SyncState) Module(addr addrs.ModuleInstance) *Module { + s.lock.RLock() + ret := s.state.Module(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// RemoveModule removes the entire state for the given module, taking with +// it any resources associated with the module. This should generally be +// called only for modules whose resources have all been destroyed, but +// that is not enforced by this method. +func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + s.state.RemoveModule(addr) +} + +// OutputValue returns a snapshot of the state of the output value with the +// given address, or nil if no such output value is tracked. +// +// The return value is a pointer to a copy of the output value state, which the +// caller may then freely access and mutate. +func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue { + s.lock.RLock() + ret := s.state.OutputValue(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// SetOutputValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the output is not yet tracked in state then it +// be added as a side-effect. +func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetOutputValue(addr.OutputValue.Name, value, sensitive) +} + +// RemoveOutputValue removes the stored value for the output value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveOutputValue(addr.OutputValue.Name) + s.maybePruneModule(addr.Module) +} + +// LocalValue returns the current value associated with the given local value +// address. +func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value { + s.lock.RLock() + // cty.Value is immutable, so we don't need any extra copying here. + ret := s.state.LocalValue(addr) + s.lock.RUnlock() + return ret +} + +// SetLocalValue writes a given output value into the state, overwriting +// any existing value of the same name. +// +// If the module containing the local value is not yet tracked in state then it +// will be added as a side-effect. +func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetLocalValue(addr.LocalValue.Name, value) +} + +// RemoveLocalValue removes the stored value for the local value with the +// given address. +// +// If this results in its containing module being empty, the module will be +// pruned from the state as a side-effect. +func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.RemoveLocalValue(addr.LocalValue.Name) + s.maybePruneModule(addr.Module) +} + +// Resource returns a snapshot of the state of the resource with the given +// address, or nil if no such resource is tracked. +// +// The return value is a pointer to a copy of the resource state, which the +// caller may then freely access and mutate. +func (s *SyncState) Resource(addr addrs.AbsResource) *Resource { + s.lock.RLock() + ret := s.state.Resource(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstance returns a snapshot of the state the resource instance with +// the given address, or nil if no such instance is tracked. +// +// The return value is a pointer to a copy of the instance state, which the +// caller may then freely access and mutate. +func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance { + s.lock.RLock() + ret := s.state.ResourceInstance(addr).DeepCopy() + s.lock.RUnlock() + return ret +} + +// ResourceInstanceObject returns a snapshot of the current instance object +// of the given generation belonging to the instance with the given address, +// or nil if no such object is tracked.. +// +// The return value is a pointer to a copy of the object, which the caller may +// then freely access and mutate. +func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc { + s.lock.RLock() + defer s.lock.RUnlock() + + inst := s.state.ResourceInstance(addr) + if inst == nil { + return nil + } + return inst.GetGeneration(gen).DeepCopy() +} + +// SetResourceMeta updates the resource-level metadata for the resource at +// the given address, creating the containing module state and resource state +// as a side-effect if not already present. +func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceMeta(addr.Resource, eachMode, provider) +} + +// RemoveResource removes the entire state for the given resource, taking with +// it any instances associated with the resource. This should generally be +// called only for resource objects whose instances have all been destroyed, +// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead +// to safely check first.) +func (s *SyncState) RemoveResource(addr addrs.AbsResource) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) +} + +// RemoveResourceIfEmpty is similar to RemoveResource but first checks to +// make sure there are no instances or objects left in the resource. +// +// Returns true if the resource was removed, or false if remaining child +// objects prevented its removal. Returns true also if the resource was +// already absent, and thus no action needed to be taken. +func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return true // nothing to do + } + rs := ms.Resource(addr.Resource) + if rs == nil { + return true // nothing to do + } + if len(rs.Instances) != 0 { + // We don't check here for the possibility of instances that exist + // but don't have any objects because it's the responsibility of the + // instance-mutation methods to prune those away automatically. + return false + } + ms.RemoveResource(addr.Resource) + s.maybePruneModule(addr.Module) + return true +} + +// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a +// resource has changed from having "count" set to not set, or vice-versa, and +// so we need to rename the zeroth instance key to no key at all, or vice-versa. +// +// Set countEnabled to true if the resource has count set in its new +// configuration, or false if it does not. +// +// The state is modified in-place if necessary, moving a resource instance +// between the two addresses. The return value is true if a change was made, +// and false otherwise. +func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return false + } + + relAddr := addr.Resource + rs := ms.Resource(relAddr) + if rs == nil { + return false + } + huntKey := addrs.NoKey + replaceKey := addrs.InstanceKey(addrs.IntKey(0)) + if !countEnabled { + huntKey, replaceKey = replaceKey, huntKey + } + + is, exists := rs.Instances[huntKey] + if !exists { + return false + } + + if _, exists := rs.Instances[replaceKey]; exists { + // If the replacement key also exists then we'll do nothing and keep both. + return false + } + + // If we get here then we need to "rename" from hunt to replace + rs.Instances[replaceKey] = is + delete(rs.Instances, huntKey) + return true +} + +// SetResourceInstanceCurrent saves the given instance object as the current +// generation of the resource instance with the given address, simulataneously +// updating the recorded provider configuration address, dependencies, and +// resource EachMode. +// +// Any existing current instance object for the given resource is overwritten. +// Set obj to nil to remove the primary generation object altogether. If there +// are no deposed objects then the instance as a whole will be removed, which +// may in turn also remove the containing module if it becomes empty. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The provider address and "each mode" are resource-wide settings and so they +// are updated for all other instances of the same resource as a side-effect of +// this call. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// SetResourceInstanceDeposed saves the given instance object as a deposed +// generation of the resource instance with the given address and deposed key. +// +// Call this method only for pre-existing deposed objects that already have +// a known DeposedKey. For example, this method is useful if reloading objects +// that were persisted to a state file. To mark the current object as deposed, +// use DeposeResourceInstanceObject instead. +// +// The caller must ensure that the given ResourceInstanceObject is not +// concurrently mutated during this call, but may be freely used again once +// this function returns. +// +// The resource that contains the given instance must already exist in the +// state, or this method will panic. Use Resource to check first if its +// presence is not already guaranteed. +// +// Any existing current instance object for the given resource and deposed key +// is overwritten. Set obj to nil to remove the deposed object altogether. If +// the instance is left with no objects after this operation then it will +// be removed from its containing resource altogether. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then they will be added as a side-effect. +func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.EnsureModule(addr.Module) + ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider) + s.maybePruneModule(addr.Module) +} + +// DeposeResourceInstanceObject moves the current instance object for the +// given resource instance address into the deposed set, leaving the instance +// without a current object. +// +// The return value is the newly-allocated deposed key, or NotDeposed if the +// given instance is already lacking a current object. +// +// If the containing module for this resource or the resource itself are not +// already tracked in state then there cannot be a current object for the +// given instance, and so NotDeposed will be returned without modifying the +// state at all. +func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return NotDeposed + } + + return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed) +} + +// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject +// but uses a pre-allocated key. It's the caller's responsibility to ensure +// that there aren't any races to use a particular key; this method will panic +// if the given key is already in use. +func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + if forcedKey == NotDeposed { + // Usage error: should use DeposeResourceInstanceObject in this case + panic("DeposeResourceInstanceObjectForceKey called without forced key") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + return // Nothing to do, since there can't be any current object either. + } + + ms.deposeResourceInstanceObject(addr.Resource, forcedKey) +} + +// ForgetResourceInstanceAll removes the record of all objects associated with +// the specified resource instance, if present. If not present, this is a no-op. +func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.ForgetResourceInstanceAll(addr.Resource) + s.maybePruneModule(addr.Module) +} + +// ForgetResourceInstanceDeposed removes the record of the deposed object with +// the given address and key, if present. If not present, this is a no-op. +func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) { + s.lock.Lock() + defer s.lock.Unlock() + + ms := s.state.Module(addr.Module) + if ms == nil { + return + } + ms.ForgetResourceInstanceDeposed(addr.Resource, key) + s.maybePruneModule(addr.Module) +} + +// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the +// given key on the specified resource as the current object for that instance +// if and only if that would not cause us to forget an existing current +// object for that instance. +// +// Returns true if the object was restored to current, or false if no change +// was made at all. +func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool { + s.lock.Lock() + defer s.lock.Unlock() + + if key == NotDeposed { + panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey") + } + + ms := s.state.Module(addr.Module) + if ms == nil { + // Nothing to do, since the specified deposed object cannot exist. + return false + } + + return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key) +} + +// RemovePlannedResourceInstanceObjects removes from the state any resource +// instance objects that have the status ObjectPlanned, indiciating that they +// are just transient placeholders created during planning. +// +// Note that this does not restore any "ready" or "tainted" object that might +// have been present before the planned object was written. The only real use +// for this method is in preparing the state created during a refresh walk, +// where we run the planning step for certain instances just to create enough +// information to allow correct expression evaluation within provider and +// data resource blocks. Discarding planned instances in that case is okay +// because the refresh phase only creates planned objects to stand in for +// objects that don't exist yet, and thus the planned object must have been +// absent before by definition. +func (s *SyncState) RemovePlannedResourceInstanceObjects() { + // TODO: Merge together the refresh and plan phases into a single walk, + // so we can remove the need to create this "partial plan" during refresh + // that we then need to clean up before proceeding. + + s.lock.Lock() + defer s.lock.Unlock() + + for _, ms := range s.state.Modules { + moduleAddr := ms.Addr + + for _, rs := range ms.Resources { + resAddr := rs.Addr + + for ik, is := range rs.Instances { + instAddr := resAddr.Instance(ik) + + if is.Current != nil && is.Current.Status == ObjectPlanned { + // Setting the current instance to nil removes it from the + // state altogether if there are not also deposed instances. + ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig) + } + + for dk, obj := range is.Deposed { + // Deposed objects should never be "planned", but we'll + // do this anyway for the sake of completeness. + if obj.Status == ObjectPlanned { + ms.ForgetResourceInstanceDeposed(instAddr, dk) + } + } + } + } + + // We may have deleted some objects, which means that we may have + // left a module empty, and so we must prune to preserve the invariant + // that only the root module is allowed to be empty. + s.maybePruneModule(moduleAddr) + } +} + +// Lock acquires an explicit lock on the state, allowing direct read and write +// access to the returned state object. The caller must call Unlock once +// access is no longer needed, and then immediately discard the state pointer +// pointer. +// +// Most callers should not use this. Instead, use the concurrency-safe +// accessors and mutators provided directly on SyncState. +func (s *SyncState) Lock() *State { + s.lock.Lock() + return s.state +} + +// Unlock releases a lock previously acquired by Lock, at which point the +// caller must cease all use of the state pointer that was returned. +// +// Do not call this method except to end an explicit lock acquired by +// Lock. If a caller calls Unlock without first holding the lock, behavior +// is undefined. +func (s *SyncState) Unlock() { + s.lock.Unlock() +} + +// maybePruneModule will remove a module from the state altogether if it is +// empty, unless it's the root module which must always be present. +// +// This helper method is not concurrency-safe on its own, so must only be +// called while the caller is already holding the lock for writing. +func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) { + if addr.IsRoot() { + // We never prune the root. + return + } + + ms := s.state.Module(addr) + if ms == nil { + return + } + + if ms.empty() { + log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr) + s.state.RemoveModule(addr) + } +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go new file mode 100644 index 00000000..4f0d1689 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/cache.go @@ -0,0 +1,45 @@ +package auth + +import ( + "github.com/hashicorp/terraform/svchost" +) + +// CachingCredentialsSource creates a new credentials source that wraps another +// and caches its results in memory, on a per-hostname basis. +// +// No means is provided for expiration of cached credentials, so a caching +// credentials source should have a limited lifetime (one Terraform operation, +// for example) to ensure that time-limited credentials don't expire before +// their cache entries do. +func CachingCredentialsSource(source CredentialsSource) CredentialsSource { + return &cachingCredentialsSource{ + source: source, + cache: map[svchost.Hostname]HostCredentials{}, + } +} + +type cachingCredentialsSource struct { + source CredentialsSource + cache map[svchost.Hostname]HostCredentials +} + +// ForHost passes the given hostname on to the wrapped credentials source and +// caches the result to return for future requests with the same hostname. +// +// Both credentials and non-credentials (nil) responses are cached. +// +// No cache entry is created if the wrapped source returns an error, to allow +// the caller to retry the failing operation. +func (s *cachingCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if cache, cached := s.cache[host]; cached { + return cache, nil + } + + result, err := s.source.ForHost(host) + if err != nil { + return result, err + } + + s.cache[host] = result + return result, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go new file mode 100644 index 00000000..0372c160 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/credentials.go @@ -0,0 +1,63 @@ +// Package auth contains types and functions to manage authentication +// credentials for service hosts. +package auth + +import ( + "net/http" + + "github.com/hashicorp/terraform/svchost" +) + +// Credentials is a list of CredentialsSource objects that can be tried in +// turn until one returns credentials for a host, or one returns an error. +// +// A Credentials is itself a CredentialsSource, wrapping its members. +// In principle one CredentialsSource can be nested inside another, though +// there is no good reason to do so. +type Credentials []CredentialsSource + +// NoCredentials is an empty CredentialsSource that always returns nil +// when asked for credentials. +var NoCredentials CredentialsSource = Credentials{} + +// A CredentialsSource is an object that may be able to provide credentials +// for a given host. +// +// Credentials lookups are not guaranteed to be concurrency-safe. Callers +// using these facilities in concurrent code must use external concurrency +// primitives to prevent race conditions. +type CredentialsSource interface { + // ForHost returns a non-nil HostCredentials if the source has credentials + // available for the host, and a nil HostCredentials if it does not. + // + // If an error is returned, progress through a list of CredentialsSources + // is halted and the error is returned to the user. + ForHost(host svchost.Hostname) (HostCredentials, error) +} + +// HostCredentials represents a single set of credentials for a particular +// host. +type HostCredentials interface { + // PrepareRequest modifies the given request in-place to apply the + // receiving credentials. The usual behavior of this method is to + // add some sort of Authorization header to the request. + PrepareRequest(req *http.Request) + + // Token returns the authentication token. + Token() string +} + +// ForHost iterates over the contained CredentialsSource objects and +// tries to obtain credentials for the given host from each one in turn. +// +// If any source returns either a non-nil HostCredentials or a non-nil error +// then this result is returned. Otherwise, the result is nil, nil. +func (c Credentials) ForHost(host svchost.Hostname) (HostCredentials, error) { + for _, source := range c { + creds, err := source.ForHost(host) + if creds != nil || err != nil { + return creds, err + } + } + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go new file mode 100644 index 00000000..f91006ae --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/from_map.go @@ -0,0 +1,18 @@ +package auth + +// HostCredentialsFromMap converts a map of key-value pairs from a credentials +// definition provided by the user (e.g. in a config file, or via a credentials +// helper) into a HostCredentials object if possible, or returns nil if +// no credentials could be extracted from the map. +// +// This function ignores map keys it is unfamiliar with, to allow for future +// expansion of the credentials map format for new credential types. +func HostCredentialsFromMap(m map[string]interface{}) HostCredentials { + if m == nil { + return nil + } + if token, ok := m["token"].(string); ok { + return HostCredentialsToken(token) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go new file mode 100644 index 00000000..d72ffe3c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/helper_program.go @@ -0,0 +1,80 @@ +package auth + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "path/filepath" + + "github.com/hashicorp/terraform/svchost" +) + +type helperProgramCredentialsSource struct { + executable string + args []string +} + +// HelperProgramCredentialsSource returns a CredentialsSource that runs the +// given program with the given arguments in order to obtain credentials. +// +// The given executable path must be an absolute path; it is the caller's +// responsibility to validate and process a relative path or other input +// provided by an end-user. If the given path is not absolute, this +// function will panic. +// +// When credentials are requested, the program will be run in a child process +// with the given arguments along with two additional arguments added to the +// end of the list: the literal string "get", followed by the requested +// hostname in ASCII compatibility form (punycode form). +func HelperProgramCredentialsSource(executable string, args ...string) CredentialsSource { + if !filepath.IsAbs(executable) { + panic("NewCredentialsSourceHelperProgram requires absolute path to executable") + } + + fullArgs := make([]string, len(args)+1) + fullArgs[0] = executable + copy(fullArgs[1:], args) + + return &helperProgramCredentialsSource{ + executable: executable, + args: fullArgs, + } +} + +func (s *helperProgramCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + args := make([]string, len(s.args), len(s.args)+2) + copy(args, s.args) + args = append(args, "get") + args = append(args, string(host)) + + outBuf := bytes.Buffer{} + errBuf := bytes.Buffer{} + + cmd := exec.Cmd{ + Path: s.executable, + Args: args, + Stdin: nil, + Stdout: &outBuf, + Stderr: &errBuf, + } + err := cmd.Run() + if _, isExitErr := err.(*exec.ExitError); isExitErr { + errText := errBuf.String() + if errText == "" { + // Shouldn't happen for a well-behaved helper program + return nil, fmt.Errorf("error in %s, but it produced no error message", s.executable) + } + return nil, fmt.Errorf("error in %s: %s", s.executable, errText) + } else if err != nil { + return nil, fmt.Errorf("failed to run %s: %s", s.executable, err) + } + + var m map[string]interface{} + err = json.Unmarshal(outBuf.Bytes(), &m) + if err != nil { + return nil, fmt.Errorf("malformed output from %s: %s", s.executable, err) + } + + return HostCredentialsFromMap(m), nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/static.go b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go new file mode 100644 index 00000000..5373fddf --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/static.go @@ -0,0 +1,28 @@ +package auth + +import ( + "github.com/hashicorp/terraform/svchost" +) + +// StaticCredentialsSource is a credentials source that retrieves credentials +// from the provided map. It returns nil if a requested hostname is not +// present in the map. +// +// The caller should not modify the given map after passing it to this function. +func StaticCredentialsSource(creds map[svchost.Hostname]map[string]interface{}) CredentialsSource { + return staticCredentialsSource(creds) +} + +type staticCredentialsSource map[svchost.Hostname]map[string]interface{} + +func (s staticCredentialsSource) ForHost(host svchost.Hostname) (HostCredentials, error) { + if s == nil { + return nil, nil + } + + if m, exists := s[host]; exists { + return HostCredentialsFromMap(m), nil + } + + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go new file mode 100644 index 00000000..9358bcb6 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/auth/token_credentials.go @@ -0,0 +1,25 @@ +package auth + +import ( + "net/http" +) + +// HostCredentialsToken is a HostCredentials implementation that represents a +// single "bearer token", to be sent to the server via an Authorization header +// with the auth type set to "Bearer" +type HostCredentialsToken string + +// PrepareRequest alters the given HTTP request by setting its Authorization +// header to the string "Bearer " followed by the encapsulated authentication +// token. +func (tc HostCredentialsToken) PrepareRequest(req *http.Request) { + if req.Header == nil { + req.Header = http.Header{} + } + req.Header.Set("Authorization", "Bearer "+string(tc)) +} + +// Token returns the authentication token. +func (tc HostCredentialsToken) Token() string { + return string(tc) +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go new file mode 100644 index 00000000..1963cbd1 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/disco.go @@ -0,0 +1,259 @@ +// Package disco handles Terraform's remote service discovery protocol. +// +// This protocol allows mapping from a service hostname, as produced by the +// svchost package, to a set of services supported by that host and the +// endpoint information for each supported service. +package disco + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "mime" + "net/http" + "net/url" + "time" + + cleanhttp "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/httpclient" + "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" +) + +const ( + // Fixed path to the discovery manifest. + discoPath = "/.well-known/terraform.json" + + // Arbitrary-but-small number to prevent runaway redirect loops. + maxRedirects = 3 + + // Arbitrary-but-small time limit to prevent UI "hangs" during discovery. + discoTimeout = 11 * time.Second + + // 1MB - to prevent abusive services from using loads of our memory. + maxDiscoDocBytes = 1 * 1024 * 1024 +) + +// httpTransport is overridden during tests, to skip TLS verification. +var httpTransport = cleanhttp.DefaultPooledTransport() + +// Disco is the main type in this package, which allows discovery on given +// hostnames and caches the results by hostname to avoid repeated requests +// for the same information. +type Disco struct { + hostCache map[svchost.Hostname]*Host + credsSrc auth.CredentialsSource + + // Transport is a custom http.RoundTripper to use. + Transport http.RoundTripper +} + +// New returns a new initialized discovery object. +func New() *Disco { + return NewWithCredentialsSource(nil) +} + +// NewWithCredentialsSource returns a new discovery object initialized with +// the given credentials source. +func NewWithCredentialsSource(credsSrc auth.CredentialsSource) *Disco { + return &Disco{ + hostCache: make(map[svchost.Hostname]*Host), + credsSrc: credsSrc, + Transport: httpTransport, + } +} + +// SetCredentialsSource provides a credentials source that will be used to +// add credentials to outgoing discovery requests, where available. +// +// If this method is never called, no outgoing discovery requests will have +// credentials. +func (d *Disco) SetCredentialsSource(src auth.CredentialsSource) { + d.credsSrc = src +} + +// CredentialsForHost returns a non-nil HostCredentials if the embedded source has +// credentials available for the host, and a nil HostCredentials if it does not. +func (d *Disco) CredentialsForHost(hostname svchost.Hostname) (auth.HostCredentials, error) { + if d.credsSrc == nil { + return nil, nil + } + return d.credsSrc.ForHost(hostname) +} + +// ForceHostServices provides a pre-defined set of services for a given +// host, which prevents the receiver from attempting network-based discovery +// for the given host. Instead, the given services map will be returned +// verbatim. +// +// When providing "forced" services, any relative URLs are resolved against +// the initial discovery URL that would have been used for network-based +// discovery, yielding the same results as if the given map were published +// at the host's default discovery URL, though using absolute URLs is strongly +// recommended to make the configured behavior more explicit. +func (d *Disco) ForceHostServices(hostname svchost.Hostname, services map[string]interface{}) { + if services == nil { + services = map[string]interface{}{} + } + + d.hostCache[hostname] = &Host{ + discoURL: &url.URL{ + Scheme: "https", + Host: string(hostname), + Path: discoPath, + }, + hostname: hostname.ForDisplay(), + services: services, + transport: d.Transport, + } +} + +// Discover runs the discovery protocol against the given hostname (which must +// already have been validated and prepared with svchost.ForComparison) and +// returns an object describing the services available at that host. +// +// If a given hostname supports no Terraform services at all, a non-nil but +// empty Host object is returned. When giving feedback to the end user about +// such situations, we say "host does not provide a service", +// regardless of whether that is due to that service specifically being absent +// or due to the host not providing Terraform services at all, since we don't +// wish to expose the detail of whole-host discovery to an end-user. +func (d *Disco) Discover(hostname svchost.Hostname) (*Host, error) { + if host, cached := d.hostCache[hostname]; cached { + return host, nil + } + + host, err := d.discover(hostname) + if err != nil { + return nil, err + } + d.hostCache[hostname] = host + + return host, nil +} + +// DiscoverServiceURL is a convenience wrapper for discovery on a given +// hostname and then looking up a particular service in the result. +func (d *Disco) DiscoverServiceURL(hostname svchost.Hostname, serviceID string) (*url.URL, error) { + host, err := d.Discover(hostname) + if err != nil { + return nil, err + } + return host.ServiceURL(serviceID) +} + +// discover implements the actual discovery process, with its result cached +// by the public-facing Discover method. +func (d *Disco) discover(hostname svchost.Hostname) (*Host, error) { + discoURL := &url.URL{ + Scheme: "https", + Host: hostname.String(), + Path: discoPath, + } + + client := &http.Client{ + Transport: d.Transport, + Timeout: discoTimeout, + + CheckRedirect: func(req *http.Request, via []*http.Request) error { + log.Printf("[DEBUG] Service discovery redirected to %s", req.URL) + if len(via) > maxRedirects { + return errors.New("too many redirects") // this error will never actually be seen + } + return nil + }, + } + + req := &http.Request{ + Header: make(http.Header), + Method: "GET", + URL: discoURL, + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", httpclient.UserAgentString()) + + creds, err := d.CredentialsForHost(hostname) + if err != nil { + log.Printf("[WARN] Failed to get credentials for %s: %s (ignoring)", hostname, err) + } + if creds != nil { + // Update the request to include credentials. + creds.PrepareRequest(req) + } + + log.Printf("[DEBUG] Service discovery for %s at %s", hostname, discoURL) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request discovery document: %v", err) + } + defer resp.Body.Close() + + host := &Host{ + // Use the discovery URL from resp.Request in + // case the client followed any redirects. + discoURL: resp.Request.URL, + hostname: hostname.ForDisplay(), + transport: d.Transport, + } + + // Return the host without any services. + if resp.StatusCode == 404 { + return host, nil + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request discovery document: %s", resp.Status) + } + + contentType := resp.Header.Get("Content-Type") + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, fmt.Errorf("Discovery URL has a malformed Content-Type %q", contentType) + } + if mediaType != "application/json" { + return nil, fmt.Errorf("Discovery URL returned an unsupported Content-Type %q", mediaType) + } + + // This doesn't catch chunked encoding, because ContentLength is -1 in that case. + if resp.ContentLength > maxDiscoDocBytes { + // Size limit here is not a contractual requirement and so we may + // adjust it over time if we find a different limit is warranted. + return nil, fmt.Errorf( + "Discovery doc response is too large (got %d bytes; limit %d)", + resp.ContentLength, maxDiscoDocBytes, + ) + } + + // If the response is using chunked encoding then we can't predict its + // size, but we'll at least prevent reading the entire thing into memory. + lr := io.LimitReader(resp.Body, maxDiscoDocBytes) + + servicesBytes, err := ioutil.ReadAll(lr) + if err != nil { + return nil, fmt.Errorf("Error reading discovery document body: %v", err) + } + + var services map[string]interface{} + err = json.Unmarshal(servicesBytes, &services) + if err != nil { + return nil, fmt.Errorf("Failed to decode discovery document as a JSON object: %v", err) + } + host.services = services + + return host, nil +} + +// Forget invalidates any cached record of the given hostname. If the host +// has no cache entry then this is a no-op. +func (d *Disco) Forget(hostname svchost.Hostname) { + delete(d.hostCache, hostname) +} + +// ForgetAll is like Forget, but for all of the hostnames that have cache entries. +func (d *Disco) ForgetAll() { + d.hostCache = make(map[svchost.Hostname]*Host) +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/disco/host.go b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go new file mode 100644 index 00000000..ab9514c4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/disco/host.go @@ -0,0 +1,264 @@ +package disco + +import ( + "encoding/json" + "fmt" + "log" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/httpclient" +) + +const versionServiceID = "versions.v1" + +// Host represents a service discovered host. +type Host struct { + discoURL *url.URL + hostname string + services map[string]interface{} + transport http.RoundTripper +} + +// Constraints represents the version constraints of a service. +type Constraints struct { + Service string `json:"service"` + Product string `json:"product"` + Minimum string `json:"minimum"` + Maximum string `json:"maximum"` + Excluding []string `json:"excluding"` +} + +// ErrServiceNotProvided is returned when the service is not provided. +type ErrServiceNotProvided struct { + hostname string + service string +} + +// Error returns a customized error message. +func (e *ErrServiceNotProvided) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not provide a %s service", e.service) + } + return fmt.Sprintf("host %s does not provide a %s service", e.hostname, e.service) +} + +// ErrVersionNotSupported is returned when the version is not supported. +type ErrVersionNotSupported struct { + hostname string + service string + version string +} + +// Error returns a customized error message. +func (e *ErrVersionNotSupported) Error() string { + if e.hostname == "" { + return fmt.Sprintf("host does not support %s version %s", e.service, e.version) + } + return fmt.Sprintf("host %s does not support %s version %s", e.hostname, e.service, e.version) +} + +// ErrNoVersionConstraints is returned when checkpoint was disabled +// or the endpoint to query for version constraints was unavailable. +type ErrNoVersionConstraints struct { + disabled bool +} + +// Error returns a customized error message. +func (e *ErrNoVersionConstraints) Error() string { + if e.disabled { + return "checkpoint disabled" + } + return "unable to contact versions service" +} + +// ServiceURL returns the URL associated with the given service identifier, +// which should be of the form "servicename.vN". +// +// A non-nil result is always an absolute URL with a scheme of either HTTPS +// or HTTP. +func (h *Host) ServiceURL(id string) (*url.URL, error) { + svc, ver, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + urlStr, ok := h.services[id].(string) + if !ok { + // See if we have a matching service as that would indicate + // the service is supported, but not the requested version. + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + return nil, &ErrVersionNotSupported{ + hostname: h.hostname, + service: svc, + version: ver.Original(), + } + } + } + + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, fmt.Errorf("Failed to parse service URL: %v", err) + } + + // Make relative URLs absolute using our discovery URL. + if !u.IsAbs() { + u = h.discoURL.ResolveReference(u) + } + + if u.Scheme != "https" && u.Scheme != "http" { + return nil, fmt.Errorf("Service URL is using an unsupported scheme: %s", u.Scheme) + } + if u.User != nil { + return nil, fmt.Errorf("Embedded username/password information is not permitted") + } + + // Fragment part is irrelevant, since we're not a browser. + u.Fragment = "" + + return h.discoURL.ResolveReference(u), nil +} + +// VersionConstraints returns the contraints for a given service identifier +// (which should be of the form "servicename.vN") and product. +// +// When an exact (service and version) match is found, the constraints for +// that service are returned. +// +// When the requested version is not provided but the service is, we will +// search for all alternative versions. If mutliple alternative versions +// are found, the contrains of the latest available version are returned. +// +// When a service is not provided at all an error will be returned instead. +// +// When checkpoint is disabled or when a 404 is returned after making the +// HTTP call, an ErrNoVersionConstraints error will be returned. +func (h *Host) VersionConstraints(id, product string) (*Constraints, error) { + svc, _, err := parseServiceID(id) + if err != nil { + return nil, err + } + + // Return early if checkpoint is disabled. + if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" { + return nil, &ErrNoVersionConstraints{disabled: true} + } + + // No services supported for an empty Host. + if h == nil || h.services == nil { + return nil, &ErrServiceNotProvided{service: svc} + } + + // Try to get the service URL for the version service and + // return early if the service isn't provided by the host. + u, err := h.ServiceURL(versionServiceID) + if err != nil { + return nil, err + } + + // Check if we have an exact (service and version) match. + if _, ok := h.services[id].(string); !ok { + // If we don't have an exact match, we search for all matching + // services and then use the service ID of the latest version. + var services []string + for serviceID := range h.services { + if strings.HasPrefix(serviceID, svc+".") { + services = append(services, serviceID) + } + } + + if len(services) == 0 { + // No discovered services match the requested service. + return nil, &ErrServiceNotProvided{hostname: h.hostname, service: svc} + } + + // Set id to the latest service ID we found. + var latest *version.Version + for _, serviceID := range services { + if _, ver, err := parseServiceID(serviceID); err == nil { + if latest == nil || latest.LessThan(ver) { + id = serviceID + latest = ver + } + } + } + } + + // Set a default timeout of 1 sec for the versions request (in milliseconds) + timeout := 1000 + if v, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil { + timeout = v + } + + client := &http.Client{ + Transport: h.transport, + Timeout: time.Duration(timeout) * time.Millisecond, + } + + // Prepare the service URL by setting the service and product. + v := u.Query() + v.Set("product", product) + u.Path += id + u.RawQuery = v.Encode() + + // Create a new request. + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return nil, fmt.Errorf("Failed to create version constraints request: %v", err) + } + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", httpclient.UserAgentString()) + + log.Printf("[DEBUG] Retrieve version constraints for service %s and product %s", id, product) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to request version constraints: %v", err) + } + defer resp.Body.Close() + + if resp.StatusCode == 404 { + return nil, &ErrNoVersionConstraints{disabled: false} + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("Failed to request version constraints: %s", resp.Status) + } + + // Parse the constraints from the response body. + result := &Constraints{} + if err := json.NewDecoder(resp.Body).Decode(result); err != nil { + return nil, fmt.Errorf("Error parsing version constraints: %v", err) + } + + return result, nil +} + +func parseServiceID(id string) (string, *version.Version, error) { + parts := strings.SplitN(id, ".", 2) + if len(parts) != 2 { + return "", nil, fmt.Errorf("Invalid service ID format (i.e. service.vN): %s", id) + } + + version, err := version.NewVersion(parts[1]) + if err != nil { + return "", nil, fmt.Errorf("Invalid service version: %v", err) + } + + return parts[0], version, nil +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/label_iter.go b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go new file mode 100644 index 00000000..af8ccbab --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/label_iter.go @@ -0,0 +1,69 @@ +package svchost + +import ( + "strings" +) + +// A labelIter allows iterating over domain name labels. +// +// This type is copied from golang.org/x/net/idna, where it is used +// to segment hostnames into their separate labels for analysis. We use +// it for the same purpose here, in ForComparison. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} diff --git a/vendor/github.com/hashicorp/terraform/svchost/svchost.go b/vendor/github.com/hashicorp/terraform/svchost/svchost.go new file mode 100644 index 00000000..4eded142 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/svchost/svchost.go @@ -0,0 +1,207 @@ +// Package svchost deals with the representations of the so-called "friendly +// hostnames" that we use to represent systems that provide Terraform-native +// remote services, such as module registry, remote operations, etc. +// +// Friendly hostnames are specified such that, as much as possible, they +// are consistent with how web browsers think of hostnames, so that users +// can bring their intuitions about how hostnames behave when they access +// a Terraform Enterprise instance's web UI (or indeed any other website) +// and have this behave in a similar way. +package svchost + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/net/idna" +) + +// Hostname is specialized name for string that indicates that the string +// has been converted to (or was already in) the storage and comparison form. +// +// Hostname values are not suitable for display in the user-interface. Use +// the ForDisplay method to obtain a form suitable for display in the UI. +// +// Unlike user-supplied hostnames, strings of type Hostname (assuming they +// were constructed by a function within this package) can be compared for +// equality using the standard Go == operator. +type Hostname string + +// acePrefix is the ASCII Compatible Encoding prefix, used to indicate that +// a domain name label is in "punycode" form. +const acePrefix = "xn--" + +// displayProfile is a very liberal idna profile that we use to do +// normalization for display without imposing validation rules. +var displayProfile = idna.New( + idna.MapForLookup(), + idna.Transitional(true), +) + +// ForDisplay takes a user-specified hostname and returns a normalized form of +// it suitable for display in the UI. +// +// If the input is so invalid that no normalization can be performed then +// this will return the input, assuming that the caller still wants to +// display _something_. This function is, however, more tolerant than the +// other functions in this package and will make a best effort to prepare +// _any_ given hostname for display. +// +// For validation, use either IsValid (for explicit validation) or +// ForComparison (which implicitly validates, returning an error if invalid). +func ForDisplay(given string) string { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + portPortion, _ = normalizePortPortion(portPortion) + + ascii, err := displayProfile.ToASCII(given) + if err != nil { + return given + portPortion + } + display, err := displayProfile.ToUnicode(ascii) + if err != nil { + return given + portPortion + } + return display + portPortion +} + +// IsValid returns true if the given user-specified hostname is a valid +// service hostname. +// +// Validity is determined by complying with the RFC 5891 requirements for +// names that are valid for domain lookup (section 5), with the additional +// requirement that user-supplied forms must not _already_ contain +// Punycode segments. +func IsValid(given string) bool { + _, err := ForComparison(given) + return err == nil +} + +// ForComparison takes a user-specified hostname and returns a normalized +// form of it suitable for storage and comparison. The result is not suitable +// for display to end-users because it uses Punycode to represent non-ASCII +// characters, and this form is unreadable for non-ASCII-speaking humans. +// +// The result is typed as Hostname -- a specialized name for string -- so that +// other APIs can make it clear within the type system whether they expect a +// user-specified or display-form hostname or a value already normalized for +// comparison. +// +// The returned Hostname is not valid if the returned error is non-nil. +func ForComparison(given string) (Hostname, error) { + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + + var err error + portPortion, err = normalizePortPortion(portPortion) + if err != nil { + return Hostname(""), err + } + + if given == "" { + return Hostname(""), fmt.Errorf("empty string is not a valid hostname") + } + + // First we'll apply our additional constraint that Punycode must not + // be given directly by the user. This is not an IDN specification + // requirement, but we prohibit it to force users to use human-readable + // hostname forms within Terraform configuration. + labels := labelIter{orig: given} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + return Hostname(""), fmt.Errorf( + "hostname contains empty label (two consecutive periods)", + ) + } + if strings.HasPrefix(label, acePrefix) { + return Hostname(""), fmt.Errorf( + "hostname label %q specified in punycode format; service hostnames must be given in unicode", + label, + ) + } + } + + result, err := idna.Lookup.ToASCII(given) + if err != nil { + return Hostname(""), err + } + return Hostname(result + portPortion), nil +} + +// ForDisplay returns a version of the receiver that is appropriate for display +// in the UI. This includes converting any punycode labels to their +// corresponding Unicode characters. +// +// A round-trip through ForComparison and this ForDisplay method does not +// guarantee the same result as calling this package's top-level ForDisplay +// function, since a round-trip through the Hostname type implies stricter +// handling than we do when doing basic display-only processing. +func (h Hostname) ForDisplay() string { + given := string(h) + var portPortion string + if colonPos := strings.Index(given, ":"); colonPos != -1 { + given, portPortion = given[:colonPos], given[colonPos:] + } + // We don't normalize the port portion here because we assume it's + // already been normalized on the way in. + + result, err := idna.Lookup.ToUnicode(given) + if err != nil { + // Should never happen, since type Hostname indicates that a string + // passed through our validation rules. + panic(fmt.Errorf("ForDisplay called on invalid Hostname: %s", err)) + } + return result + portPortion +} + +func (h Hostname) String() string { + return string(h) +} + +func (h Hostname) GoString() string { + return fmt.Sprintf("svchost.Hostname(%q)", string(h)) +} + +// normalizePortPortion attempts to normalize the "port portion" of a hostname, +// which begins with the first colon in the hostname and should be followed +// by a string of decimal digits. +// +// If the port portion is valid, a normalized version of it is returned along +// with a nil error. +// +// If the port portion is invalid, the input string is returned verbatim along +// with a non-nil error. +// +// An empty string is a valid port portion representing the absense of a port. +// If non-empty, the first character must be a colon. +func normalizePortPortion(s string) (string, error) { + if s == "" { + return s, nil + } + + if s[0] != ':' { + // should never happen, since caller tends to guarantee the presence + // of a colon due to how it's extracted from the string. + return s, errors.New("port portion is missing its initial colon") + } + + numStr := s[1:] + num, err := strconv.Atoi(numStr) + if err != nil { + return s, errors.New("port portion contains non-digit characters") + } + if num == 443 { + return "", nil // ":443" is the default + } + if num > 65535 { + return s, errors.New("port number is greater than 65535") + } + return fmt.Sprintf(":%d", num), nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go index 15528bee..afdba99c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -1,18 +1,26 @@ package terraform import ( + "bytes" "context" "fmt" "log" - "sort" "strings" "sync" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/experiment" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/states/statefile" + "github.com/hashicorp/terraform/tfdiags" ) // InputMode defines what sort of input will be asked for when Input @@ -49,19 +57,23 @@ var ( // ContextOpts are the user-configurable options to create a context with // NewContext. type ContextOpts struct { - Meta *ContextMeta - Destroy bool - Diff *Diff - Hooks []Hook - Module *module.Tree - Parallelism int - State *State - StateFutureAllowed bool - Providers map[string]ResourceProviderFactory - Provisioners map[string]ResourceProvisionerFactory - Shadow bool - Targets []string - Variables map[string]interface{} + Config *configs.Config + Changes *plans.Changes + State *states.State + Targets []addrs.Targetable + Variables InputValues + Meta *ContextMeta + Destroy bool + + Hooks []Hook + Parallelism int + ProviderResolver providers.Resolver + Provisioners map[string]ProvisionerFactory + + // If non-nil, will apply as additional constraints on the provider + // plugins that will be requested from the provider resolver. + ProviderSHA256s map[string][]byte + SkipProviderVerify bool UIInput UIInput } @@ -76,32 +88,26 @@ type ContextMeta struct { // Context represents all the context that Terraform needs in order to // perform operations on infrastructure. This structure is built using -// NewContext. See the documentation for that. -// -// Extra functions on Context can be found in context_*.go files. +// NewContext. type Context struct { - // Maintainer note: Anytime this struct is changed, please verify - // that newShadowContext still does the right thing. Tests should - // fail regardless but putting this note here as well. + config *configs.Config + changes *plans.Changes + state *states.State + targets []addrs.Targetable + variables InputValues + meta *ContextMeta + destroy bool - components contextComponentFactory - destroy bool - diff *Diff - diffLock sync.RWMutex hooks []Hook - meta *ContextMeta - module *module.Tree + components contextComponentFactory + schemas *Schemas sh *stopHook - shadow bool - state *State - stateLock sync.RWMutex - targets []string uiInput UIInput - variables map[string]interface{} l sync.Mutex // Lock acquired during any task parallelSem Semaphore - providerInputConfig map[string]map[string]interface{} + providerInputConfig map[string]map[string]cty.Value + providerSHA256s map[string][]byte runLock sync.Mutex runCond *sync.Cond runContext context.Context @@ -109,17 +115,23 @@ type Context struct { shadowErr error } +// (additional methods on Context can be found in context_*.go files.) + // NewContext creates a new Context structure. // -// Once a Context is creator, the pointer values within ContextOpts -// should not be mutated in any way, since the pointers are copied, not -// the values themselves. -func NewContext(opts *ContextOpts) (*Context, error) { - // Validate the version requirement if it is given - if opts.Module != nil { - if err := checkRequiredVersion(opts.Module); err != nil { - return nil, err - } +// Once a Context is created, the caller must not access or mutate any of +// the objects referenced (directly or indirectly) by the ContextOpts fields. +// +// If the returned diagnostics contains errors then the resulting context is +// invalid and must not be used. +func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) { + log.Printf("[TRACE] terraform.NewContext: starting") + diags := CheckCoreVersionRequirements(opts.Config) + // If version constraints are not met then we'll bail early since otherwise + // we're likely to just see a bunch of other errors related to + // incompatibilities, which could be overwhelming for the user. + if diags.HasErrors() { + return nil, diags } // Copy all the hooks and add our stop hook. We don't append directly @@ -131,26 +143,9 @@ func NewContext(opts *ContextOpts) (*Context, error) { state := opts.State if state == nil { - state = new(State) - state.init() - } - - // If our state is from the future, then error. Callers can avoid - // this error by explicitly setting `StateFutureAllowed`. - if !opts.StateFutureAllowed && state.FromFutureTerraform() { - return nil, fmt.Errorf( - "Terraform doesn't allow running any operations against a state\n"+ - "that was written by a future Terraform version. The state is\n"+ - "reporting it is written by Terraform '%s'.\n\n"+ - "Please run at least that version of Terraform to continue.", - state.TFVersion) + state = states.NewState() } - // Explicitly reset our state version to our current version so that - // any operations we do will write out that our latest version - // has run. - state.TFVersion = Version - // Determine parallelism, default to 10. We do this both to limit // CPU pressure but also to have an extra guard against rate throttling // from providers. @@ -165,43 +160,84 @@ func NewContext(opts *ContextOpts) (*Context, error) { // 2 - Take values specified in -var flags, overriding values // set by environment variables if necessary. This includes // values taken from -var-file in addition. - variables := make(map[string]interface{}) + var variables InputValues + if opts.Config != nil { + // Default variables from the configuration seed our map. + variables = DefaultVariableValues(opts.Config.Module.Variables) + } + // Variables provided by the caller (from CLI, environment, etc) can + // override the defaults. + variables = variables.Override(opts.Variables) + + // Bind available provider plugins to the constraints in config + var providerFactories map[string]providers.Factory + if opts.ProviderResolver != nil { + deps := ConfigTreeDependencies(opts.Config, state) + reqd := deps.AllPluginRequirements() + if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { + reqd.LockExecutables(opts.ProviderSHA256s) + } + log.Printf("[TRACE] terraform.NewContext: resolving provider version selections") - if opts.Module != nil { - var err error - variables, err = Variables(opts.Module, opts.Variables) - if err != nil { - return nil, err + var providerDiags tfdiags.Diagnostics + providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd) + diags = diags.Append(providerDiags) + + if diags.HasErrors() { + return nil, diags } + } else { + providerFactories = make(map[string]providers.Factory) + } + + components := &basicComponentFactory{ + providers: providerFactories, + provisioners: opts.Provisioners, + } + + log.Printf("[TRACE] terraform.NewContext: loading provider schemas") + schemas, err := LoadSchemas(opts.Config, opts.State, components) + if err != nil { + diags = diags.Append(err) + return nil, diags } - diff := opts.Diff - if diff == nil { - diff = &Diff{} + changes := opts.Changes + if changes == nil { + changes = plans.NewChanges() } + config := opts.Config + if config == nil { + config = configs.NewEmptyConfig() + } + + log.Printf("[TRACE] terraform.NewContext: complete") + return &Context{ - components: &basicComponentFactory{ - providers: opts.Providers, - provisioners: opts.Provisioners, - }, - destroy: opts.Destroy, - diff: diff, - hooks: hooks, - meta: opts.Meta, - module: opts.Module, - shadow: opts.Shadow, - state: state, - targets: opts.Targets, - uiInput: opts.UIInput, - variables: variables, + components: components, + schemas: schemas, + destroy: opts.Destroy, + changes: changes, + hooks: hooks, + meta: opts.Meta, + config: config, + state: state, + targets: opts.Targets, + uiInput: opts.UIInput, + variables: variables, parallelSem: NewSemaphore(par), - providerInputConfig: make(map[string]map[string]interface{}), + providerInputConfig: make(map[string]map[string]cty.Value), + providerSHA256s: opts.ProviderSHA256s, sh: sh, }, nil } +func (c *Context) Schemas() *Schemas { + return c.schemas +} + type ContextGraphOpts struct { // If true, validates the graph structure (checks for cycles). Validate bool @@ -213,7 +249,7 @@ type ContextGraphOpts struct { // Graph returns the graph used for the given operation type. // // The most extensive or complex graph type is GraphTypePlan. -func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { +func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) { if opts == nil { opts = &ContextGraphOpts{Validate: true} } @@ -222,65 +258,71 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { switch typ { case GraphTypeApply: return (&ApplyGraphBuilder{ - Module: c.module, - Diff: c.diff, - State: c.state, - Providers: c.components.ResourceProviders(), - Provisioners: c.components.ResourceProvisioners(), - Targets: c.targets, - Destroy: c.destroy, - Validate: opts.Validate, - }).Build(RootModulePath) - - case GraphTypeInput: - // The input graph is just a slightly modified plan graph - fallthrough + Config: c.config, + Changes: c.changes, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Destroy: c.destroy, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + case GraphTypeValidate: // The validate graph is just a slightly modified plan graph fallthrough case GraphTypePlan: // Create the plan graph builder p := &PlanGraphBuilder{ - Module: c.module, - State: c.state, - Providers: c.components.ResourceProviders(), - Targets: c.targets, - Validate: opts.Validate, + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, } // Some special cases for other graph types shared with plan currently var b GraphBuilder = p switch typ { - case GraphTypeInput: - b = InputGraphBuilder(p) case GraphTypeValidate: - // We need to set the provisioners so those can be validated - p.Provisioners = c.components.ResourceProvisioners() - b = ValidateGraphBuilder(p) } - return b.Build(RootModulePath) + return b.Build(addrs.RootModuleInstance) case GraphTypePlanDestroy: return (&DestroyPlanGraphBuilder{ - Module: c.module, - State: c.state, - Targets: c.targets, - Validate: opts.Validate, - }).Build(RootModulePath) + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) case GraphTypeRefresh: return (&RefreshGraphBuilder{ - Module: c.module, - State: c.state, - Providers: c.components.ResourceProviders(), - Targets: c.targets, - Validate: opts.Validate, - }).Build(RootModulePath) - } + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + Targets: c.targets, + Validate: opts.Validate, + }).Build(addrs.RootModuleInstance) + + case GraphTypeEval: + return (&EvalGraphBuilder{ + Config: c.config, + State: c.state, + Components: c.components, + Schemas: c.schemas, + }).Build(addrs.RootModuleInstance) - return nil, fmt.Errorf("unknown graph type: %s", typ) + default: + // Should never happen, because the above is exhaustive for all graph types. + panic(fmt.Errorf("unsupported graph type %s", typ)) + } } // ShadowError returns any errors caught during a shadow operation. @@ -313,158 +355,98 @@ func (c *Context) ShadowError() error { // State returns a copy of the current state associated with this context. // // This cannot safely be called in parallel with any other Context function. -func (c *Context) State() *State { +func (c *Context) State() *states.State { return c.state.DeepCopy() } -// Interpolater returns an Interpolater built on a copy of the state -// that can be used to test interpolation values. -func (c *Context) Interpolater() *Interpolater { - var varLock sync.Mutex - var stateLock sync.RWMutex - return &Interpolater{ - Operation: walkApply, - Meta: c.meta, - Module: c.module, - State: c.state.DeepCopy(), - StateLock: &stateLock, - VariableValues: c.variables, - VariableValuesLock: &varLock, - } +// Eval produces a scope in which expressions can be evaluated for +// the given module path. +// +// This method must first evaluate any ephemeral values (input variables, local +// values, and output values) in the configuration. These ephemeral values are +// not included in the persisted state, so they must be re-computed using other +// values in the state before they can be properly evaluated. The updated +// values are retained in the main state associated with the receiving context. +// +// This function takes no action against remote APIs but it does need access +// to all provider and provisioner instances in order to obtain their schemas +// for type checking. +// +// The result is an evaluation scope that can be used to resolve references +// against the root module. If the returned diagnostics contains errors then +// the returned scope may be nil. If it is not nil then it may still be used +// to attempt expression evaluation or other analysis, but some expressions +// may not behave as expected. +func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) { + // This is intended for external callers such as the "terraform console" + // command. Internally, we create an evaluator in c.walk before walking + // the graph, and create scopes in ContextGraphWalker. + + var diags tfdiags.Diagnostics + defer c.acquireRun("eval")() + + // Start with a copy of state so that we don't affect any instances + // that other methods may have already returned. + c.state = c.state.DeepCopy() + var walker *ContextGraphWalker + + graph, graphDiags := c.Graph(GraphTypeEval, nil) + diags = diags.Append(graphDiags) + if !diags.HasErrors() { + var walkDiags tfdiags.Diagnostics + walker, walkDiags = c.walk(graph, walkEval) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + } + + if walker == nil { + // If we skipped walking the graph (due to errors) then we'll just + // use a placeholder graph walker here, which'll refer to the + // unmodified state. + walker = c.graphWalker(walkEval) + } + + // This is a bit weird since we don't normally evaluate outside of + // the context of a walk, but we'll "re-enter" our desired path here + // just to get hold of an EvalContext for it. GraphContextBuiltin + // caches its contexts, so we should get hold of the context that was + // previously used for evaluation here, unless we skipped walking. + evalCtx := walker.EnterPath(path) + return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags } -// Input asks for input to fill variables and provider configurations. -// This modifies the configuration in-place, so asking for Input twice -// may result in different UI output showing different current values. -func (c *Context) Input(mode InputMode) error { - defer c.acquireRun("input")() - - if mode&InputModeVar != 0 { - // Walk the variables first for the root module. We walk them in - // alphabetical order for UX reasons. - rootConf := c.module.Config() - names := make([]string, len(rootConf.Variables)) - m := make(map[string]*config.Variable) - for i, v := range rootConf.Variables { - names[i] = v.Name - m[v.Name] = v - } - sort.Strings(names) - for _, n := range names { - // If we only care about unset variables, then if the variable - // is set, continue on. - if mode&InputModeVarUnset != 0 { - if _, ok := c.variables[n]; ok { - continue - } - } - - var valueType config.VariableType - - v := m[n] - switch valueType = v.Type(); valueType { - case config.VariableTypeUnknown: - continue - case config.VariableTypeMap: - // OK - case config.VariableTypeList: - // OK - case config.VariableTypeString: - // OK - default: - panic(fmt.Sprintf("Unknown variable type: %#v", v.Type())) - } - - // If the variable is not already set, and the variable defines a - // default, use that for the value. - if _, ok := c.variables[n]; !ok { - if v.Default != nil { - c.variables[n] = v.Default.(string) - continue - } - } - - // this should only happen during tests - if c.uiInput == nil { - log.Println("[WARN] Content.uiInput is nil") - continue - } - - // Ask the user for a value for this variable - var value string - retry := 0 - for { - var err error - value, err = c.uiInput.Input(&InputOpts{ - Id: fmt.Sprintf("var.%s", n), - Query: fmt.Sprintf("var.%s", n), - Description: v.Description, - }) - if err != nil { - return fmt.Errorf( - "Error asking for %s: %s", n, err) - } - - if value == "" && v.Required() { - // Redo if it is required, but abort if we keep getting - // blank entries - if retry > 2 { - return fmt.Errorf("missing required value for %q", n) - } - retry++ - continue - } - - break - } - - // no value provided, so don't set the variable at all - if value == "" { - continue - } - - decoded, err := parseVariableAsHCL(n, value, valueType) - if err != nil { - return err - } - - if decoded != nil { - c.variables[n] = decoded - } - } - } - - if mode&InputModeProvider != 0 { - // Build the graph - graph, err := c.Graph(GraphTypeInput, nil) - if err != nil { - return err - } - - // Do the walk - if _, err := c.walk(graph, nil, walkInput); err != nil { - return err - } - } - - return nil +// Interpolater is no longer used. Use Evaluator instead. +// +// The interpolator returned from this function will return an error on any use. +func (c *Context) Interpolater() *Interpolater { + // FIXME: Remove this once all callers are updated to no longer use it. + return &Interpolater{} } // Apply applies the changes represented by this context and returns // the resulting state. // -// In addition to returning the resulting state, this context is updated -// with the latest state. -func (c *Context) Apply() (*State, error) { +// Even in the case an error is returned, the state may be returned and will +// potentially be partially updated. In addition to returning the resulting +// state, this context is updated with the latest state. +// +// If the state is required after an error, the caller should call +// Context.State, rather than rely on the return value. +// +// TODO: Apply and Refresh should either always return a state, or rely on the +// State() method. Currently the helper/resource testing framework relies +// on the absence of a returned state to determine if Destroy can be +// called, so that will need to be refactored before this can be changed. +func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) { defer c.acquireRun("apply")() // Copy our own state c.state = c.state.DeepCopy() // Build the graph. - graph, err := c.Graph(GraphTypeApply, nil) - if err != nil { - return nil, err + graph, diags := c.Graph(GraphTypeApply, nil) + if diags.HasErrors() { + return nil, diags } // Determine the operation @@ -474,15 +456,30 @@ func (c *Context) Apply() (*State, error) { } // Walk the graph - walker, err := c.walk(graph, graph, operation) - if len(walker.ValidationErrors) > 0 { - err = multierror.Append(err, walker.ValidationErrors...) - } - - // Clean out any unused things - c.state.prune() - - return c.state, err + walker, walkDiags := c.walk(graph, operation) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + + if c.destroy && !diags.HasErrors() { + // If we know we were trying to destroy objects anyway, and we + // completed without any errors, then we'll also prune out any + // leftover empty resource husks (left after all of the instances + // of a resource with "count" or "for_each" are destroyed) to + // help ensure we end up with an _actually_ empty state, assuming + // we weren't destroying with -target here. + // + // (This doesn't actually take into account -target, but that should + // be okay because it doesn't throw away anything we can't recompute + // on a subsequent "terraform plan" run, if the resources are still + // present in the configuration. However, this _will_ cause "count = 0" + // resources to read as unknown during the next refresh walk, which + // may cause some additional churn if used in a data resource or + // provider block, until we remove refreshing as a separate walk and + // just do it as part of the plan walk.) + c.state.PruneResourceHusks() + } + + return c.state, diags } // Plan generates an execution plan for the given context. @@ -492,14 +489,33 @@ func (c *Context) Apply() (*State, error) { // // Plan also updates the diff of this context to be the diff generated // by the plan, so Apply can be called after. -func (c *Context) Plan() (*Plan, error) { +func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) { defer c.acquireRun("plan")() + c.changes = plans.NewChanges() - p := &Plan{ - Module: c.module, - Vars: c.variables, - State: c.state, - Targets: c.targets, + var diags tfdiags.Diagnostics + + varVals := make(map[string]plans.DynamicValue, len(c.variables)) + for k, iv := range c.variables { + // We use cty.DynamicPseudoType here so that we'll save both the + // value _and_ its dynamic type in the plan, so we can recover + // exactly the same value later. + dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to prepare variable value for plan", + fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err), + )) + continue + } + varVals[k] = dv + } + + p := &plans.Plan{ + VariableValues: varVals, + TargetAddrs: c.targets, + ProviderSHA256s: c.providerSHA256s, } var operation walkOperation @@ -511,8 +527,7 @@ func (c *Context) Plan() (*Plan, error) { // we replace it back with our old state. old := c.state if old == nil { - c.state = &State{} - c.state.init() + c.state = states.NewState() } else { c.state = old.DeepCopy() } @@ -523,86 +538,75 @@ func (c *Context) Plan() (*Plan, error) { operation = walkPlan } - // Setup our diff - c.diffLock.Lock() - c.diff = new(Diff) - c.diff.init() - c.diffLock.Unlock() - // Build the graph. graphType := GraphTypePlan if c.destroy { graphType = GraphTypePlanDestroy } - graph, err := c.Graph(graphType, nil) - if err != nil { - return nil, err + graph, graphDiags := c.Graph(graphType, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return nil, diags } // Do the walk - walker, err := c.walk(graph, graph, operation) - if err != nil { - return nil, err + walker, walkDiags := c.walk(graph, operation) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags } - p.Diff = c.diff - - // If this is true, it means we're running unit tests. In this case, - // we perform a deep copy just to ensure that all context tests also - // test that a diff is copy-able. This will panic if it fails. This - // is enabled during unit tests. - // - // This should never be true during production usage, but even if it is, - // it can't do any real harm. - if contextTestDeepCopyOnPlan { - p.Diff.DeepCopy() - } - - /* - // We don't do the reverification during the new destroy plan because - // it will use a different apply process. - if X_legacyGraph { - // Now that we have a diff, we can build the exact graph that Apply will use - // and catch any possible cycles during the Plan phase. - if _, err := c.Graph(GraphTypeLegacy, nil); err != nil { - return nil, err - } - } - */ + p.Changes = c.changes - var errs error - if len(walker.ValidationErrors) > 0 { - errs = multierror.Append(errs, walker.ValidationErrors...) - } - return p, errs + return p, diags } // Refresh goes through all the resources in the state and refreshes them // to their latest state. This will update the state that this context // works with, along with returning it. // -// Even in the case an error is returned, the state will be returned and +// Even in the case an error is returned, the state may be returned and // will potentially be partially updated. -func (c *Context) Refresh() (*State, error) { +func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) { defer c.acquireRun("refresh")() // Copy our own state c.state = c.state.DeepCopy() + // Refresh builds a partial changeset as part of its work because it must + // create placeholder stubs for any resource instances that'll be created + // in subsequent plan so that provider configurations and data resources + // can interpolate from them. This plan is always thrown away after + // the operation completes, restoring any existing changeset. + oldChanges := c.changes + defer func() { c.changes = oldChanges }() + c.changes = plans.NewChanges() + // Build the graph. - graph, err := c.Graph(GraphTypeRefresh, nil) - if err != nil { - return nil, err + graph, diags := c.Graph(GraphTypeRefresh, nil) + if diags.HasErrors() { + return nil, diags } // Do the walk - if _, err := c.walk(graph, graph, walkRefresh); err != nil { - return nil, err - } - - // Clean out any unused things - c.state.prune() - - return c.state, nil + _, walkDiags := c.walk(graph, walkRefresh) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return nil, diags + } + + // During our walk we will have created planned object placeholders in + // state for resource instances that are in configuration but not yet + // created. These were created only to allow expression evaluation to + // work properly in provider and data blocks during the walk and must + // now be discarded, since a subsequent plan walk is responsible for + // creating these "for real". + // TODO: Consolidate refresh and plan into a single walk, so that the + // refresh walk doesn't need to emulate various aspects of the plan + // walk in order to properly evaluate provider and data blocks. + c.state.SyncWrapper().RemovePlannedResourceInstanceObjects() + + return c.state, diags } // Stop stops the running task. @@ -628,79 +632,76 @@ func (c *Context) Stop() { // Grab the condition var before we exit if cond := c.runCond; cond != nil { + log.Printf("[INFO] terraform: waiting for graceful stop to complete") cond.Wait() } log.Printf("[WARN] terraform: stop complete") } -// Validate validates the configuration and returns any warnings or errors. -func (c *Context) Validate() ([]string, []error) { +// Validate performs semantic validation of the configuration, and returning +// any warnings or errors. +// +// Syntax and structural checks are performed by the configuration loader, +// and so are not repeated here. +func (c *Context) Validate() tfdiags.Diagnostics { defer c.acquireRun("validate")() - var errs error + var diags tfdiags.Diagnostics - // Validate the configuration itself - if err := c.module.Validate(); err != nil { - errs = multierror.Append(errs, err) + // Validate input variables. We do this only for the values supplied + // by the root module, since child module calls are validated when we + // visit their graph nodes. + if c.config != nil { + varDiags := checkInputVariables(c.config.Module.Variables, c.variables) + diags = diags.Append(varDiags) } - // This only needs to be done for the root module, since inter-module - // variables are validated in the module tree. - if config := c.module.Config(); config != nil { - // Validate the user variables - if err := smcUserVariables(config, c.variables); len(err) > 0 { - errs = multierror.Append(errs, err...) - } - } - - // If we have errors at this point, the graphing has no chance, - // so just bail early. - if errs != nil { - return nil, []error{errs} + // If we have errors at this point then we probably won't be able to + // construct a graph without producing redundant errors, so we'll halt early. + if diags.HasErrors() { + return diags } // Build the graph so we can walk it and run Validate on nodes. // We also validate the graph generated here, but this graph doesn't // necessarily match the graph that Plan will generate, so we'll validate the // graph again later after Planning. - graph, err := c.Graph(GraphTypeValidate, nil) - if err != nil { - return nil, []error{err} + graph, graphDiags := c.Graph(GraphTypeValidate, nil) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return diags } // Walk - walker, err := c.walk(graph, graph, walkValidate) - if err != nil { - return nil, multierror.Append(errs, err).Errors + walker, walkDiags := c.walk(graph, walkValidate) + diags = diags.Append(walker.NonFatalDiagnostics) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return diags } - // Return the result - rerrs := multierror.Append(errs, walker.ValidationErrors...) - - sort.Strings(walker.ValidationWarnings) - sort.Slice(rerrs.Errors, func(i, j int) bool { - return rerrs.Errors[i].Error() < rerrs.Errors[j].Error() - }) - - return walker.ValidationWarnings, rerrs.Errors + return diags } -// Module returns the module tree associated with this context. -func (c *Context) Module() *module.Tree { - return c.module +// Config returns the configuration tree associated with this context. +func (c *Context) Config() *configs.Config { + return c.config } // Variables will return the mapping of variables that were defined // for this Context. If Input was called, this mapping may be different // than what was given. -func (c *Context) Variables() map[string]interface{} { +func (c *Context) Variables() InputValues { return c.variables } // SetVariable sets a variable after a context has already been built. -func (c *Context) SetVariable(k string, v interface{}) { - c.variables[k] = v +func (c *Context) SetVariable(k string, v cty.Value) { + c.variables[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, + } } func (c *Context) acquireRun(phase string) func() { @@ -717,9 +718,6 @@ func (c *Context) acquireRun(phase string) func() { // Build our lock c.runCond = sync.NewCond(&c.l) - // Setup debugging - dbug.SetPhase(phase) - // Create a new run context c.runContext, c.runContextCancel = context.WithCancel(context.Background()) @@ -737,11 +735,6 @@ func (c *Context) releaseRun() { c.l.Lock() defer c.l.Unlock() - // setting the phase to "INVALID" lets us easily detect if we have - // operations happening outside of a run, or we missed setting the proper - // phase - dbug.SetPhase("INVALID") - // End our run. We check if runContext is non-nil because it can be // set to nil if it was cancelled via Stop() if c.runContextCancel != nil { @@ -757,136 +750,33 @@ func (c *Context) releaseRun() { c.runContext = nil } -func (c *Context) walk( - graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) { - // Keep track of the "real" context which is the context that does - // the real work: talking to real providers, modifying real state, etc. - realCtx := c - - // If we don't want shadowing, remove it - if !experiment.Enabled(experiment.X_shadow) { - shadow = nil - } - - // Just log this so we can see it in a debug log - if !c.shadow { - log.Printf("[WARN] terraform: shadow graph disabled") - shadow = nil - } - - // If we have a shadow graph, walk that as well - var shadowCtx *Context - var shadowCloser Shadow - if shadow != nil { - // Build the shadow context. In the process, override the real context - // with the one that is wrapped so that the shadow context can verify - // the results of the real. - realCtx, shadowCtx, shadowCloser = newShadowContext(c) - } - +func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) { log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) - walker := &ContextGraphWalker{ - Context: realCtx, - Operation: operation, - StopContext: c.runContext, - } + walker := c.graphWalker(operation) // Watch for a stop so we can call the provider Stop() API. watchStop, watchWait := c.watchStop(walker) // Walk the real graph, this will block until it completes - realErr := graph.Walk(walker) + diags := graph.Walk(walker) // Close the channel so the watcher stops, and wait for it to return. close(watchStop) <-watchWait - // If we have a shadow graph and we interrupted the real graph, then - // we just close the shadow and never verify it. It is non-trivial to - // recreate the exact execution state up until an interruption so this - // isn't supported with shadows at the moment. - if shadowCloser != nil && c.sh.Stopped() { - // Ignore the error result, there is nothing we could care about - shadowCloser.CloseShadow() - - // Set it to nil so we don't do anything - shadowCloser = nil - } - - // If we have a shadow graph, wait for that to complete. - if shadowCloser != nil { - // Build the graph walker for the shadow. We also wrap this in - // a panicwrap so that panics are captured. For the shadow graph, - // we just want panics to be normal errors rather than to crash - // Terraform. - shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{ - Context: shadowCtx, - Operation: operation, - }) - - // Kick off the shadow walk. This will block on any operations - // on the real walk so it is fine to start first. - log.Printf("[INFO] Starting shadow graph walk: %s", operation.String()) - shadowCh := make(chan error) - go func() { - shadowCh <- shadow.Walk(shadowWalker) - }() - - // Notify the shadow that we're done - if err := shadowCloser.CloseShadow(); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // Wait for the walk to end - log.Printf("[DEBUG] Waiting for shadow graph to complete...") - shadowWalkErr := <-shadowCh - - // Get any shadow errors - if err := shadowCloser.ShadowError(); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // Verify the contexts (compare) - if err := shadowContextVerify(realCtx, shadowCtx); err != nil { - c.shadowErr = multierror.Append(c.shadowErr, err) - } - - // At this point, if we're supposed to fail on error, then - // we PANIC. Some tests just verify that there is an error, - // so simply appending it to realErr and returning could hide - // shadow problems. - // - // This must be done BEFORE appending shadowWalkErr since the - // shadowWalkErr may include expected errors. - // - // We only do this if we don't have a real error. In the case of - // a real error, we can't guarantee what nodes were and weren't - // traversed in parallel scenarios so we can't guarantee no - // shadow errors. - if c.shadowErr != nil && contextFailOnShadowError && realErr == nil { - panic(multierror.Prefix(c.shadowErr, "shadow graph:")) - } - - // Now, if we have a walk error, we append that through - if shadowWalkErr != nil { - c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr) - } - - if c.shadowErr == nil { - log.Printf("[INFO] Shadow graph success!") - } else { - log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr) + return walker, diags +} - // If we're supposed to fail on shadow errors, then report it - if contextFailOnShadowError { - realErr = multierror.Append(realErr, multierror.Prefix( - c.shadowErr, "shadow graph:")) - } - } +func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker { + return &ContextGraphWalker{ + Context: c, + State: c.state.SyncWrapper(), + Changes: c.changes.SyncWrapper(), + Operation: operation, + StopContext: c.runContext, + RootVariableValues: c.variables, } - - return walker, realErr } // watchStop immediately returns a `stop` and a `wait` chan after dispatching @@ -919,12 +809,13 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s } // If we're here, we're stopped, trigger the call. + log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop") { // Copy the providers so that a misbehaved blocking Stop doesn't // completely hang Terraform. walker.providerLock.Lock() - ps := make([]ResourceProvider, 0, len(walker.providerCache)) + ps := make([]providers.Interface, 0, len(walker.providerCache)) for _, p := range walker.providerCache { ps = append(ps, p) } @@ -941,7 +832,7 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s { // Call stop on all the provisioners walker.provisionerLock.Lock() - ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache)) + ps := make([]provisioners.Interface, 0, len(walker.provisionerCache)) for _, p := range walker.provisionerCache { ps = append(ps, p) } @@ -1011,3 +902,37 @@ func parseVariableAsHCL(name string, input string, targetType config.VariableTyp panic(fmt.Errorf("unknown type %s", targetType.Printable())) } } + +// ShimLegacyState is a helper that takes the legacy state type and +// converts it to the new state type. +// +// This is implemented as a state file upgrade, so it will not preserve +// parts of the state structure that are not included in a serialized state, +// such as the resolved results of any local values, outputs in non-root +// modules, etc. +func ShimLegacyState(legacy *State) (*states.State, error) { + if legacy == nil { + return nil, nil + } + var buf bytes.Buffer + err := WriteState(legacy, &buf) + if err != nil { + return nil, err + } + f, err := statefile.Read(&buf) + if err != nil { + return nil, err + } + return f.State, err +} + +// MustShimLegacyState is a wrapper around ShimLegacyState that panics if +// the conversion does not succeed. This is primarily intended for tests where +// the given legacy state is an object constructed within the test. +func MustShimLegacyState(legacy *State) *states.State { + ret, err := ShimLegacyState(legacy) + if err != nil { + panic(err) + } + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go index 6f507445..26ec9959 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_components.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go @@ -2,6 +2,9 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" ) // contextComponentFactory is the interface that Context uses @@ -12,25 +15,25 @@ type contextComponentFactory interface { // ResourceProvider creates a new ResourceProvider with the given // type. The "uid" is a unique identifier for this provider being // initialized that can be used for internal tracking. - ResourceProvider(typ, uid string) (ResourceProvider, error) + ResourceProvider(typ, uid string) (providers.Interface, error) ResourceProviders() []string // ResourceProvisioner creates a new ResourceProvisioner with the // given type. The "uid" is a unique identifier for this provisioner // being initialized that can be used for internal tracking. - ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) + ResourceProvisioner(typ, uid string) (provisioners.Interface, error) ResourceProvisioners() []string } // basicComponentFactory just calls a factory from a map directly. type basicComponentFactory struct { - providers map[string]ResourceProviderFactory - provisioners map[string]ResourceProvisionerFactory + providers map[string]providers.Factory + provisioners map[string]ProvisionerFactory } func (c *basicComponentFactory) ResourceProviders() []string { result := make([]string, len(c.providers)) - for k, _ := range c.providers { + for k := range c.providers { result = append(result, k) } @@ -39,14 +42,14 @@ func (c *basicComponentFactory) ResourceProviders() []string { func (c *basicComponentFactory) ResourceProvisioners() []string { result := make([]string, len(c.provisioners)) - for k, _ := range c.provisioners { + for k := range c.provisioners { result = append(result, k) } return result } -func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) { +func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) { f, ok := c.providers[typ] if !ok { return nil, fmt.Errorf("unknown provider %q", typ) @@ -55,7 +58,7 @@ func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvi return f() } -func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) { +func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) { f, ok := c.provisioners[typ] if !ok { return nil, fmt.Errorf("unknown provisioner %q", typ) diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go index 084f0105..0a424a01 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go @@ -14,8 +14,8 @@ const ( GraphTypePlan GraphTypePlanDestroy GraphTypeApply - GraphTypeInput GraphTypeValidate + GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs. ) // GraphTypeMap is a mapping of human-readable string to GraphType. This @@ -23,10 +23,10 @@ const ( // graph types. var GraphTypeMap = map[string]GraphType{ "apply": GraphTypeApply, - "input": GraphTypeInput, "plan": GraphTypePlan, "plan-destroy": GraphTypePlanDestroy, "refresh": GraphTypeRefresh, "legacy": GraphTypeLegacy, "validate": GraphTypeValidate, + "eval": GraphTypeEval, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go index f1d57760..313e9094 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go @@ -1,7 +1,10 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ImportOpts are used as the configuration for Import. @@ -9,23 +12,23 @@ type ImportOpts struct { // Targets are the targets to import Targets []*ImportTarget - // Module is optional, and specifies a config module that is loaded - // into the graph and evaluated. The use case for this is to provide - // provider configuration. - Module *module.Tree + // Config is optional, and specifies a config tree that will be loaded + // into the graph and evaluated. This is the source for provider + // configurations. + Config *configs.Config } // ImportTarget is a single resource to import. type ImportTarget struct { - // Addr is the full resource address of the resource to import. - // Example: "module.foo.aws_instance.bar" - Addr string + // Addr is the address for the resource instance that the new object should + // be imported into. + Addr addrs.AbsResourceInstance // ID is the ID of the resource to import. This is resource-specific. ID string - // Provider string - Provider string + // ProviderAddr is the address of the provider that should handle the import. + ProviderAddr addrs.AbsProviderConfig } // Import takes already-created external resources and brings them @@ -38,7 +41,9 @@ type ImportTarget struct { // Further, this operation also gracefully handles partial state. If during // an import there is a failure, all previously imported resources remain // imported. -func (c *Context) Import(opts *ImportOpts) (*State, error) { +func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + // Hold a lock since we can modify our own state here defer c.acquireRun("import")() @@ -47,31 +52,32 @@ func (c *Context) Import(opts *ImportOpts) (*State, error) { // If no module is given, default to the module configured with // the Context. - module := opts.Module - if module == nil { - module = c.module + config := opts.Config + if config == nil { + config = c.config } // Initialize our graph builder builder := &ImportGraphBuilder{ ImportTargets: opts.Targets, - Module: module, - Providers: c.components.ResourceProviders(), + Config: config, + Components: c.components, + Schemas: c.schemas, } // Build the graph! - graph, err := builder.Build(RootModulePath) - if err != nil { - return c.state, err + graph, graphDiags := builder.Build(addrs.RootModuleInstance) + diags = diags.Append(graphDiags) + if graphDiags.HasErrors() { + return c.state, diags } // Walk it - if _, err := c.walk(graph, nil, walkImport); err != nil { - return c.state, err + _, walkDiags := c.walk(graph, walkImport) + diags = diags.Append(walkDiags) + if walkDiags.HasErrors() { + return c.state, diags } - // Clean the state - c.state.prune() - - return c.state, nil + return c.state, diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_input.go b/vendor/github.com/hashicorp/terraform/terraform/context_input.go new file mode 100644 index 00000000..6c7be881 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/context_input.go @@ -0,0 +1,251 @@ +package terraform + +import ( + "context" + "fmt" + "log" + "sort" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" +) + +// Input asks for input to fill variables and provider configurations. +// This modifies the configuration in-place, so asking for Input twice +// may result in different UI output showing different current values. +func (c *Context) Input(mode InputMode) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + defer c.acquireRun("input")() + + if c.uiInput == nil { + log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping") + return diags + } + + ctx := context.Background() + + if mode&InputModeVar != 0 { + log.Printf("[TRACE] Context.Input: Prompting for variables") + + // Walk the variables first for the root module. We walk them in + // alphabetical order for UX reasons. + configs := c.config.Module.Variables + names := make([]string, 0, len(configs)) + for name := range configs { + names = append(names, name) + } + sort.Strings(names) + Variables: + for _, n := range names { + v := configs[n] + + // If we only care about unset variables, then we should set any + // variable that is already set. + if mode&InputModeVarUnset != 0 { + if _, isSet := c.variables[n]; isSet { + continue + } + } + + // this should only happen during tests + if c.uiInput == nil { + log.Println("[WARN] Context.uiInput is nil during input walk") + continue + } + + // Ask the user for a value for this variable + var rawValue string + retry := 0 + for { + var err error + rawValue, err = c.uiInput.Input(ctx, &InputOpts{ + Id: fmt.Sprintf("var.%s", n), + Query: fmt.Sprintf("var.%s", n), + Description: v.Description, + }) + if err != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to request interactive input", + fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err), + )) + return diags + } + + if rawValue == "" && v.Default == cty.NilVal { + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Required variable not assigned", + fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n), + )) + continue Variables + } + retry++ + continue + } + + break + } + + val, valDiags := v.ParsingMode.Parse(n, rawValue) + diags = diags.Append(valDiags) + if diags.HasErrors() { + continue + } + + c.variables[n] = &InputValue{ + Value: val, + SourceType: ValueFromInput, + } + } + } + + if mode&InputModeProvider != 0 { + log.Printf("[TRACE] Context.Input: Prompting for provider arguments") + + // We prompt for input only for provider configurations defined in + // the root module. At the time of writing that is an arbitrary + // restriction, but we have future plans to support "count" and + // "for_each" on modules that will then prevent us from supporting + // input for child module configurations anyway (since we'd need to + // dynamic-expand first), and provider configurations in child modules + // are not recommended since v0.11 anyway, so this restriction allows + // us to keep this relatively simple without significant hardship. + + pcs := make(map[string]*configs.Provider) + pas := make(map[string]addrs.ProviderConfig) + for _, pc := range c.config.Module.ProviderConfigs { + addr := pc.Addr() + pcs[addr.String()] = pc + pas[addr.String()] = addr + log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange) + } + // We also need to detect _implied_ provider configs from resources. + // These won't have *configs.Provider objects, but they will still + // exist in the map and we'll just treat them as empty below. + for _, rc := range c.config.Module.ManagedResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange) + } + } + for _, rc := range c.config.Module.DataResources { + pa := rc.ProviderConfigAddr() + if pa.Alias != "" { + continue // alias configurations cannot be implied + } + if _, exists := pcs[pa.String()]; !exists { + pcs[pa.String()] = nil + pas[pa.String()] = pa + log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange) + } + } + + for pk, pa := range pas { + pc := pcs[pk] // will be nil if this is an implied config + + // Wrap the input into a namespace + input := &PrefixUIInput{ + IdPrefix: pk, + QueryPrefix: pk + ".", + UIInput: c.uiInput, + } + + schema := c.schemas.ProviderConfig(pa.Type) + if schema == nil { + // Could either be an incorrect config or just an incomplete + // mock in tests. We'll let a later pass decide, and just + // ignore this for the purposes of gathering input. + log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type) + continue + } + + // For our purposes here we just want to detect if attrbutes are + // set in config at all, so rather than doing a full decode + // (which would require us to prepare an evalcontext, etc) we'll + // use the low-level HCL API to process only the top-level + // structure. + var attrExprs hcl.Attributes // nil if there is no config + if pc != nil && pc.Config != nil { + lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec())) + content, _, diags := pc.Config.PartialContent(lowLevelSchema) + if diags.HasErrors() { + log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error()) + continue + } + attrExprs = content.Attributes + } + + keys := make([]string, 0, len(schema.Attributes)) + for key := range schema.Attributes { + keys = append(keys, key) + } + sort.Strings(keys) + + vals := map[string]cty.Value{} + for _, key := range keys { + attrS := schema.Attributes[key] + if attrS.Optional { + continue + } + if attrExprs != nil { + if _, exists := attrExprs[key]; exists { + continue + } + } + if !attrS.Type.Equals(cty.String) { + continue + } + + log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key) + rawVal, err := input.Input(ctx, &InputOpts{ + Id: key, + Query: key, + Description: attrS.Description, + }) + if err != nil { + log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err) + continue + } + + vals[key] = cty.StringVal(rawVal) + } + + c.providerInputConfig[pk] = vals + log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals) + } + } + + return diags +} + +// schemaForInputSniffing returns a transformed version of a given schema +// that marks all attributes as optional, which the Context.Input method can +// use to detect whether a required argument is set without missing arguments +// themselves generating errors. +func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema { + ret := &hcl.BodySchema{ + Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)), + Blocks: schema.Blocks, + } + + for i, attrS := range schema.Attributes { + ret.Attributes[i] = attrS + ret.Attributes[i].Required = false + } + + return ret +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go index a9fae6c2..7a6ef3d3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -4,12 +4,20 @@ import ( "bufio" "bytes" "fmt" + "log" "reflect" "regexp" "sort" + "strconv" "strings" "sync" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" + "github.com/mitchellh/copystructure" ) @@ -23,12 +31,18 @@ const ( DiffUpdate DiffDestroy DiffDestroyCreate + + // DiffRefresh is only used in the UI for displaying diffs. + // Managed resource reads never appear in plan, and when data source + // reads appear they are represented as DiffCreate in core before + // transforming to DiffRefresh in the UI layer. + DiffRefresh // TODO: Actually use DiffRefresh in core too, for less confusion ) // multiVal matches the index key to a flatmapped set, list or map var multiVal = regexp.MustCompile(`\.(#|%)$`) -// Diff trackes the changes that are necessary to apply a configuration +// Diff tracks the changes that are necessary to apply a configuration // to an existing infrastructure. type Diff struct { // Modules contains all the modules that have a diff @@ -63,8 +77,24 @@ func (d *Diff) Prune() { // // This should be the preferred method to add module diffs since it // allows us to optimize lookups later as well as control sorting. -func (d *Diff) AddModule(path []string) *ModuleDiff { - m := &ModuleDiff{Path: path} +func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff { + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + legacyPath := make([]string, len(path)) + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("diff cannot represent modules with count or for_each keys") + } + + legacyPath[i] = step.Name + } + + m := &ModuleDiff{Path: legacyPath} m.init() d.Modules = append(d.Modules, m) return m @@ -73,7 +103,7 @@ func (d *Diff) AddModule(path []string) *ModuleDiff { // ModuleByPath is used to lookup the module diff for the given path. // This should be the preferred lookup mechanism as it allows for future // lookup optimizations. -func (d *Diff) ModuleByPath(path []string) *ModuleDiff { +func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff { if d == nil { return nil } @@ -81,7 +111,8 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff { if mod.Path == nil { panic("missing module path") } - if reflect.DeepEqual(mod.Path, path) { + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { return mod } } @@ -90,7 +121,7 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff { // RootModule returns the ModuleState for the root module func (d *Diff) RootModule() *ModuleDiff { - root := d.ModuleByPath(rootModulePath) + root := d.ModuleByPath(addrs.RootModuleInstance) if root == nil { panic("missing root module") } @@ -160,7 +191,8 @@ func (d *Diff) String() string { keys := make([]string, 0, len(d.Modules)) lookup := make(map[string]*ModuleDiff) for _, m := range d.Modules { - key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) + addr := normalizeModulePath(m.Path) + key := addr.String() keys = append(keys, key) lookup[key] = m } @@ -370,7 +402,7 @@ type InstanceDiff struct { // Meta is a simple K/V map that is stored in a diff and persisted to // plans but otherwise is completely ignored by Terraform core. It is - // mean to be used for additional data a resource may want to pass through. + // meant to be used for additional data a resource may want to pass through. // The value here must only contain Go primitives and collections. Meta map[string]interface{} } @@ -378,6 +410,541 @@ type InstanceDiff struct { func (d *InstanceDiff) Lock() { d.mu.Lock() } func (d *InstanceDiff) Unlock() { d.mu.Unlock() } +// ApplyToValue merges the receiver into the given base value, returning a +// new value that incorporates the planned changes. The given value must +// conform to the given schema, or this method will panic. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) { + // Create an InstanceState attributes from our existing state. + // We can use this to more easily apply the diff changes. + attrs := hcl2shim.FlatmapValueFromHCL2(base) + applied, err := d.Apply(attrs, schema) + if err != nil { + return base, err + } + + val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType()) + if err != nil { + return base, err + } + + return schema.CoerceValue(val) +} + +// Apply applies the diff to the provided flatmapped attributes, +// returning the new instance attributes. +// +// This method is intended for shimming old subsystems that still use this +// legacy diff type to work with the new-style types. +func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + // We always build a new value here, even if the given diff is "empty", + // because we might be planning to create a new instance that happens + // to have no attributes set, and so we want to produce an empty object + // rather than just echoing back the null old value. + if attrs == nil { + attrs = map[string]string{} + } + + // Rather applying the diff to mutate the attrs, we'll copy new values into + // here to avoid the possibility of leaving stale values. + result := map[string]string{} + + if d.Destroy || d.DestroyDeposed || d.DestroyTainted { + return result, nil + } + + return d.applyBlockDiff(nil, attrs, schema) +} + +func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) { + result := map[string]string{} + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + // localPrefix is used to build the local result map + localPrefix := "" + if name != "" { + localPrefix = name + "." + } + + // iterate over the schema rather than the attributes, so we can handle + // different block types separately from plain attributes + for n, attrSchema := range schema.Attributes { + var err error + newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema) + + if err != nil { + return result, err + } + + for k, v := range newAttrs { + result[localPrefix+k] = v + } + } + + blockPrefix := strings.Join(path, ".") + if blockPrefix != "" { + blockPrefix += "." + } + for n, block := range schema.BlockTypes { + // we need to find the set of all keys that traverse this block + candidateKeys := map[string]bool{} + blockKey := blockPrefix + n + "." + localBlockPrefix := localPrefix + n + "." + + // we can only trust the diff for sets, since the path changes, so don't + // count existing values as candidate keys. If it turns out we're + // keeping the attributes, we will catch it down below with "keepBlock" + // after we check the set count. + if block.Nesting != configschema.NestingSet { + for k := range attrs { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + } + + for k, diff := range d.Attributes { + if strings.HasPrefix(k, blockKey) { + nextDot := strings.Index(k[len(blockKey):], ".") + if nextDot < 0 { + continue + } + + if diff.NewRemoved { + continue + } + + nextDot += len(blockKey) + candidateKeys[k[len(blockKey):nextDot]] = true + } + } + + // check each set candidate to see if it was removed. + // we need to do this, because when entire sets are removed, they may + // have the wrong key, and ony show diffs going to "" + if block.Nesting == configschema.NestingSet { + for k := range candidateKeys { + indexPrefix := strings.Join(append(path, n, k), ".") + "." + keep := false + // now check each set element to see if it's a new diff, or one + // that we're dropping. Since we're only applying the "New" + // portion of the set, we can ignore diffs that only contain "Old" + for attr, diff := range d.Attributes { + if !strings.HasPrefix(attr, indexPrefix) { + continue + } + + // check for empty "count" keys + if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" { + continue + } + + // removed items don't count either + if diff.NewRemoved { + continue + } + + // this must be a diff to keep + keep = true + break + } + if !keep { + delete(candidateKeys, k) + } + } + } + + for k := range candidateKeys { + newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block) + if err != nil { + return result, err + } + + for attr, v := range newAttrs { + result[localBlockPrefix+attr] = v + } + } + + keepBlock := true + // check this block's count diff directly first, since we may not + // have candidates because it was removed and only set to "0" + if diff, ok := d.Attributes[blockKey+"#"]; ok { + if diff.New == "0" || diff.NewRemoved { + keepBlock = false + } + } + + // if there was no diff at all, then we need to keep the block attributes + if len(candidateKeys) == 0 && keepBlock { + for k, v := range attrs { + if strings.HasPrefix(k, blockKey) { + // we need the key relative to this block, so remove the + // entire prefix, then re-insert the block name. + localKey := localBlockPrefix + k[len(blockKey):] + result[localKey] = v + } + } + } + + countAddr := strings.Join(append(path, n, "#"), ".") + if countDiff, ok := d.Attributes[countAddr]; ok { + if countDiff.NewComputed { + result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue + } else { + result[localBlockPrefix+"#"] = countDiff.New + + // While sets are complete, list are not, and we may not have all the + // information to track removals. If the list was truncated, we need to + // remove the extra items from the result. + if block.Nesting == configschema.NestingList && + countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue { + length, _ := strconv.Atoi(countDiff.New) + for k := range result { + if !strings.HasPrefix(k, localBlockPrefix) { + continue + } + + index := k[len(localBlockPrefix):] + nextDot := strings.Index(index, ".") + if nextDot < 1 { + continue + } + index = index[:nextDot] + i, err := strconv.Atoi(index) + if err != nil { + // this shouldn't happen since we added these + // ourself, but make note of it just in case. + log.Printf("[ERROR] bad list index in %q: %s", k, err) + continue + } + if i >= length { + delete(result, k) + } + } + } + } + } else if origCount, ok := attrs[countAddr]; ok && keepBlock { + result[localBlockPrefix+"#"] = origCount + } else { + result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result) + } + } + + return result, nil +} + +func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + ty := attrSchema.Type + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsMapType(): + return d.applyCollectionDiff(path, attrs, attrSchema) + case ty.IsSetType(): + return d.applySetDiff(path, attrs, attrSchema) + default: + return d.applySingleAttrDiff(path, attrs, attrSchema) + } +} + +func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + currentKey := strings.Join(path, ".") + + attr := path[len(path)-1] + + result := map[string]string{} + diff := d.Attributes[currentKey] + old, exists := attrs[currentKey] + + if diff != nil && diff.NewComputed { + result[attr] = config.UnknownVariableValue + return result, nil + } + + // "id" must exist and not be an empty string, or it must be unknown. + // This only applied to top-level "id" fields. + if attr == "id" && len(path) == 1 { + if old == "" { + result[attr] = config.UnknownVariableValue + } else { + result[attr] = old + } + return result, nil + } + + // attribute diffs are sometimes missed, so assume no diff means keep the + // old value + if diff == nil { + if exists { + result[attr] = old + } else { + // We need required values, so set those with an empty value. It + // must be set in the config, since if it were missing it would have + // failed validation. + if attrSchema.Required { + // we only set a missing string here, since bool or number types + // would have distinct zero value which shouldn't have been + // lost. + if attrSchema.Type == cty.String { + result[attr] = "" + } + } + } + return result, nil + } + + // check for missmatched diff values + if exists && + old != diff.Old && + old != config.UnknownVariableValue && + diff.Old != config.UnknownVariableValue { + return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old) + } + + if diff.NewRemoved { + // don't set anything in the new value + return map[string]string{}, nil + } + + if diff.Old == diff.New && diff.New == "" { + // this can only be a valid empty string + if attrSchema.Type == cty.String { + result[attr] = "" + } + return result, nil + } + + if attrSchema.Computed && diff.NewComputed { + result[attr] = config.UnknownVariableValue + return result, nil + } + + result[attr] = diff.New + + return result, nil +} + +func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + result := map[string]string{} + + prefix := "" + if len(path) > 1 { + prefix = strings.Join(path[:len(path)-1], ".") + "." + } + + name := "" + if len(path) > 0 { + name = path[len(path)-1] + } + + currentKey := prefix + name + + // check the index first for special handling + for k, diff := range d.Attributes { + // check the index value, which can be set, and 0 + if k == currentKey+".#" || k == currentKey+".%" || k == currentKey { + if diff.NewRemoved { + return result, nil + } + + if diff.NewComputed { + result[k[len(prefix):]] = config.UnknownVariableValue + return result, nil + } + + // do what the diff tells us to here, so that it's consistent with applies + if diff.New == "0" { + result[k[len(prefix):]] = "0" + return result, nil + } + } + } + + // collect all the keys from the diff and the old state + noDiff := true + keys := map[string]bool{} + for k := range d.Attributes { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noDiff = false + keys[k] = true + } + + noAttrs := true + for k := range attrs { + if !strings.HasPrefix(k, currentKey+".") { + continue + } + noAttrs = false + keys[k] = true + } + + // If there's no diff and no attrs, then there's no value at all. + // This prevents an unexpected zero-count attribute in the attributes. + if noDiff && noAttrs { + return result, nil + } + + idx := "#" + if attrSchema.Type.IsMapType() { + idx = "%" + } + + for k := range keys { + // generate an schema placeholder for the values + elSchema := &configschema.Attribute{ + Type: attrSchema.Type.ElementType(), + } + + res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema) + if err != nil { + return result, err + } + + for k, v := range res { + result[name+"."+k] = v + } + } + + // Just like in nested list blocks, for simple lists we may need to fill in + // missing empty strings. + countKey := name + "." + idx + count := result[countKey] + length, _ := strconv.Atoi(count) + + if count != "" && count != hcl2shim.UnknownVariableValue && + attrSchema.Type.Equals(cty.List(cty.String)) { + // insert empty strings into missing indexes + for i := 0; i < length; i++ { + key := fmt.Sprintf("%s.%d", name, i) + if _, ok := result[key]; !ok { + result[key] = "" + } + } + } + + // now check for truncation in any type of list + if attrSchema.Type.IsListType() { + for key := range result { + if key == countKey { + continue + } + + if len(key) <= len(name)+1 { + // not sure what this is, but don't panic + continue + } + + index := key[len(name)+1:] + + // It is possible to have nested sets or maps, so look for another dot + dot := strings.Index(index, ".") + if dot > 0 { + index = index[:dot] + } + + // This shouldn't have any more dots, since the element type is only string. + num, err := strconv.Atoi(index) + if err != nil { + log.Printf("[ERROR] bad list index in %q: %s", currentKey, err) + continue + } + + if num >= length { + delete(result, key) + } + } + } + + // Fill in the count value if it wasn't present in the diff for some reason, + // or if there is no count at all. + _, countDiff := d.Attributes[countKey] + if result[countKey] == "" || (!countDiff && len(keys) != len(result)) { + result[countKey] = countFlatmapContainerValues(countKey, result) + } + + return result, nil +} + +func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) { + // We only need this special behavior for sets of object. + if !attrSchema.Type.ElementType().IsObjectType() { + // The normal collection apply behavior will work okay for this one, then. + return d.applyCollectionDiff(path, attrs, attrSchema) + } + + // When we're dealing with a set of an object type we actually want to + // use our normal _block type_ apply behaviors, so we'll construct ourselves + // a synthetic schema that treats the object type as a block type and + // then delegate to our block apply method. + synthSchema := &configschema.Block{ + Attributes: make(map[string]*configschema.Attribute), + } + + for name, ty := range attrSchema.Type.ElementType().AttributeTypes() { + // We can safely make everything into an attribute here because in the + // event that there are nested set attributes we'll end up back in + // here again recursively and can then deal with the next level of + // expansion. + synthSchema.Attributes[name] = &configschema.Attribute{ + Type: ty, + Optional: true, + } + } + + parentPath := path[:len(path)-1] + childName := path[len(path)-1] + containerSchema := &configschema.Block{ + BlockTypes: map[string]*configschema.NestedBlock{ + childName: { + Nesting: configschema.NestingSet, + Block: *synthSchema, + }, + }, + } + + return d.applyBlockDiff(parentPath, attrs, containerSchema) +} + +// countFlatmapContainerValues returns the number of values in the flatmapped container +// (set, map, list) indexed by key. The key argument is expected to include the +// trailing ".#", or ".%". +func countFlatmapContainerValues(key string, attrs map[string]string) string { + if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) { + panic(fmt.Sprintf("invalid index value %q", key)) + } + + prefix := key[:len(key)-1] + items := map[string]int{} + + for k := range attrs { + if k == key { + continue + } + if !strings.HasPrefix(k, prefix) { + continue + } + + suffix := k[len(prefix):] + dot := strings.Index(suffix, ".") + if dot > 0 { + suffix = suffix[:dot] + } + + items[suffix]++ + } + return strconv.Itoa(len(items)) +} + // ResourceAttrDiff is the diff of a single attribute of a resource. type ResourceAttrDiff struct { Old string // Old Value @@ -551,7 +1118,7 @@ func (d *InstanceDiff) SetDestroyDeposed(b bool) { } // These methods are properly locked, for use outside other InstanceDiff -// methods but everywhere else within in the terraform package. +// methods but everywhere else within the terraform package. // TODO refactor the locking scheme func (d *InstanceDiff) SetTainted(b bool) { d.mu.Lock() @@ -831,7 +1398,14 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { } } - // TODO: check for the same value if not computed + // We don't compare the values because we can't currently actually + // guarantee to generate the same value two two diffs created from + // the same state+config: we have some pesky interpolation functions + // that do not behave as pure functions (uuid, timestamp) and so they + // can be different each time a diff is produced. + // FIXME: Re-organize our config handling so that we don't re-evaluate + // expressions when we produce a second comparison diff during + // apply (for EvalCompareDiff). } // Check for leftover attributes diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go index 3cb088a2..48ed3533 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go @@ -2,7 +2,8 @@ package terraform import ( "log" - "strings" + + "github.com/hashicorp/terraform/tfdiags" ) // EvalNode is the interface that must be implemented by graph nodes to @@ -46,15 +47,21 @@ func Eval(n EvalNode, ctx EvalContext) (interface{}, error) { func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { path := "unknown" if ctx != nil { - path = strings.Join(ctx.Path(), ".") + path = ctx.Path().String() + } + if path == "" { + path = "" } - log.Printf("[DEBUG] %s: eval: %T", path, n) + log.Printf("[TRACE] %s: eval: %T", path, n) output, err := n.Eval(ctx) if err != nil { - if _, ok := err.(EvalEarlyExitError); ok { - log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err) - } else { + switch err.(type) { + case EvalEarlyExitError: + log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err) + case tfdiags.NonFatalError: + log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err) + default: log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go index 2f6a4973..09313f7f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go @@ -3,119 +3,316 @@ package terraform import ( "fmt" "log" - "strconv" + "strings" "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // EvalApply is an EvalNode implementation that writes the diff to // the full diff. type EvalApply struct { - Info *InstanceInfo - State **InstanceState - Diff **InstanceDiff - Provider *ResourceProvider - Output **InstanceState - CreateNew *bool - Error *error + Addr addrs.ResourceInstance + Config *configs.Resource + Dependencies []addrs.Referenceable + State **states.ResourceInstanceObject + Change **plans.ResourceInstanceChange + ProviderAddr addrs.AbsProviderConfig + Provider *providers.Interface + ProviderSchema **ProviderSchema + Output **states.ResourceInstanceObject + CreateNew *bool + Error *error } // TODO: test func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { - diff := *n.Diff + var diags tfdiags.Diagnostics + + change := *n.Change provider := *n.Provider state := *n.State + absAddr := n.Addr.Absolute(ctx.Path()) - // If we have no diff, we have nothing to do! - if diff.Empty() { - log.Printf( - "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id) - return nil, nil + if state == nil { + state = &states.ResourceInstanceObject{} + } + + schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + + if n.CreateNew != nil { + *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace()) } - // Remove any output values from the diff - for k, ad := range diff.CopyAttributes() { - if ad.Type == DiffAttrOutput { - diff.DelAttribute(k) + configVal := cty.NullVal(cty.DynamicPseudoType) + if n.Config != nil { + var configDiags tfdiags.Diagnostics + keyData := EvalDataForInstanceKey(n.Addr.Key) + configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() } } - // If the state is nil, make it non-nil - if state == nil { - state = new(InstanceState) + log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action) + resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + PriorState: change.Before, + Config: configVal, + PlannedState: change.After, + PlannedPrivate: change.Private, + }) + applyDiags := resp.Diagnostics + if n.Config != nil { + applyDiags = applyDiags.InConfigBody(n.Config.Config) + } + diags = diags.Append(applyDiags) + + // Even if there are errors in the returned diagnostics, the provider may + // have returned a _partial_ state for an object that already exists but + // failed to fully configure, and so the remaining code must always run + // to completion but must be defensive against the new value being + // incomplete. + newVal := resp.NewState + + if newVal == cty.NilVal { + // Providers are supposed to return a partial new value even when errors + // occur, but sometimes they don't and so in that case we'll patch that up + // by just using the prior state, so we'll at least keep track of the + // object for the user to retry. + newVal = change.Before + + // As a special case, we'll set the new value to null if it looks like + // we were trying to execute a delete, because the provider in this case + // probably left the newVal unset intending it to be interpreted as "null". + if change.After.IsNull() { + newVal = cty.NullVal(schema.ImpliedType()) + } + + // Ideally we'd produce an error or warning here if newVal is nil and + // there are no errors in diags, because that indicates a buggy + // provider not properly reporting its result, but unfortunately many + // of our historical test mocks behave in this way and so producing + // a diagnostic here fails hundreds of tests. Instead, we must just + // silently retain the old value for now. Returning a nil value with + // no errors is still always considered a bug in the provider though, + // and should be fixed for any "real" providers that do it. } - state.init() - // Flag if we're creating a new instance - if n.CreateNew != nil { - *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() + var conformDiags tfdiags.Diagnostics + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + conformDiags = conformDiags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + diags = diags.Append(conformDiags) + if conformDiags.HasErrors() { + // Bail early in this particular case, because an object that doesn't + // conform to the schema can't be saved in the state anyway -- the + // serializer will reject it. + return nil, diags.Err() } - // With the completed diff, apply! - log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id) - state, err := provider.Apply(n.Info, state, diff) - if state == nil { - state = new(InstanceState) + // After this point we have a type-conforming result object and so we + // must always run to completion to ensure it can be saved. If n.Error + // is set then we must not return a non-nil error, in order to allow + // evaluation to continue to a later point where our state object will + // be saved. + + // By this point there must not be any unknown values remaining in our + // object, because we've applied the change and we can't save unknowns + // in our persistent state. If any are present then we will indicate an + // error (which is always a bug in the provider) but we will also replace + // them with nulls so that we can successfully save the portions of the + // returned value that are known. + if !newVal.IsWhollyKnown() { + // To generate better error messages, we'll go for a walk through the + // value and make a separate diagnostic for each unknown value we + // find. + cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) { + if !val.IsKnown() { + pathStr := tfdiags.FormatCtyPath(path) + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.", + n.Addr.Absolute(ctx.Path()), pathStr, + ), + )) + } + return true, nil + }) + + // NOTE: This operation can potentially be lossy if there are multiple + // elements in a set that differ only by unknown values: after + // replacing with null these will be merged together into a single set + // element. Since we can only get here in the presence of a provider + // bug, we accept this because storing a result here is always a + // best-effort sort of thing. + newVal = cty.UnknownAsNull(newVal) + } + + if change.Action != plans.Delete && !diags.HasErrors() { + // Only values that were marked as unknown in the planned value are allowed + // to change during the apply operation. (We do this after the unknown-ness + // check above so that we also catch anything that became unknown after + // being known during plan.) + // + // If we are returning other errors anyway then we'll give this + // a pass since the other errors are usually the explanation for + // this one and so it's more helpful to let the user focus on the + // root cause rather than distract with this extra problem. + if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + + // The sort of inconsistency we won't catch here is if a known value + // in the plan is changed during apply. That can cause downstream + // problems because a dependent resource would make its own plan based + // on the planned value, and thus get a different result during the + // apply phase. This will usually lead to a "Provider produced invalid plan" + // error that incorrectly blames the downstream resource for the change. + + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent result after apply", + fmt.Sprintf( + "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), + ), + )) + } + } + } } - state.init() - // Force the "id" attribute to be our ID - if state.ID != "" { - state.Attributes["id"] = state.ID + // If a provider returns a null or non-null object at the wrong time then + // we still want to save that but it often causes some confusing behaviors + // where it seems like Terraform is failing to take any action at all, + // so we'll generate some errors to draw attention to it. + if !diags.HasErrors() { + if change.Action == plans.Delete && !newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.", + change.Action, n.Addr.Absolute(ctx.Path()), + ), + )) + } + if change.Action != plans.Delete && newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider returned invalid result object after apply", + fmt.Sprintf( + "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.", + change.Action, n.Addr.Absolute(ctx.Path()), + ), + )) + } + } + + // Sometimes providers return a null value when an operation fails for some + // reason, but we'd rather keep the prior state so that the error can be + // corrected on a subsequent run. We must only do this for null new value + // though, or else we may discard partial updates the provider was able to + // complete. + if diags.HasErrors() && newVal.IsNull() { + // Otherwise, we'll continue but using the prior state as the new value, + // making this effectively a no-op. If the item really _has_ been + // deleted then our next refresh will detect that and fix it up. + // If change.Action is Create then change.Before will also be null, + // which is fine. + newVal = change.Before } - // If the value is the unknown variable value, then it is an error. - // In this case we record the error and remove it from the state - for ak, av := range state.Attributes { - if av == config.UnknownVariableValue { - err = multierror.Append(err, fmt.Errorf( - "Attribute with unknown value: %s", ak)) - delete(state.Attributes, ak) + var newState *states.ResourceInstanceObject + if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case + newState = &states.ResourceInstanceObject{ + Status: states.ObjectReady, + Value: newVal, + Private: resp.Private, + Dependencies: n.Dependencies, // Should be populated by the caller from the StateDependencies method on the resource instance node } } // Write the final state if n.Output != nil { - *n.Output = state + *n.Output = newState } - // If there are no errors, then we append it to our output error - // if we have one, otherwise we just output it. - if err != nil { + if diags.HasErrors() { + // If the caller provided an error pointer then they are expected to + // handle the error some other way and we treat our own result as + // success. if n.Error != nil { - helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error()) - *n.Error = multierror.Append(*n.Error, helpfulErr) - } else { - return nil, err + err := diags.Err() + *n.Error = err + log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err) + return nil, nil } } - return nil, nil + return nil, diags.ErrWithWarnings() } // EvalApplyPre is an EvalNode implementation that does the pre-Apply work type EvalApplyPre struct { - Info *InstanceInfo - State **InstanceState - Diff **InstanceDiff + Addr addrs.ResourceInstance + Gen states.Generation + State **states.ResourceInstanceObject + Change **plans.ResourceInstanceChange } // TODO: test func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { - state := *n.State - diff := *n.Diff + change := *n.Change + absAddr := n.Addr.Absolute(ctx.Path()) - // If the state is nil, make it non-nil - if state == nil { - state = new(InstanceState) + if change == nil { + panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr)) } - state.init() - { - // Call post-apply hook + if resourceHasUserVisibleApply(n.Addr) { + priorState := change.Before + plannedNewState := change.After + err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreApply(n.Info, state, diff) + return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState) }) if err != nil { return nil, err @@ -127,8 +324,9 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { // EvalApplyPost is an EvalNode implementation that does the post-Apply work type EvalApplyPost struct { - Info *InstanceInfo - State **InstanceState + Addr addrs.ResourceInstance + Gen states.Generation + State **states.ResourceInstanceObject Error *error } @@ -136,42 +334,128 @@ type EvalApplyPost struct { func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { state := *n.State - { - // Call post-apply hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostApply(n.Info, state, *n.Error) + if resourceHasUserVisibleApply(n.Addr) { + absAddr := n.Addr.Absolute(ctx.Path()) + var newState cty.Value + if state != nil { + newState = state.Value + } else { + newState = cty.NullVal(cty.DynamicPseudoType) + } + var err error + if n.Error != nil { + err = *n.Error + } + + hookErr := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostApply(absAddr, n.Gen, newState, err) }) - if err != nil { - return nil, err + if hookErr != nil { + return nil, hookErr } } return nil, *n.Error } +// EvalMaybeTainted is an EvalNode that takes the planned change, new value, +// and possible error from an apply operation and produces a new instance +// object marked as tainted if it appears that a create operation has failed. +// +// This EvalNode never returns an error, to ensure that a subsequent EvalNode +// can still record the possibly-tainted object in the state. +type EvalMaybeTainted struct { + Addr addrs.ResourceInstance + Gen states.Generation + Change **plans.ResourceInstanceChange + State **states.ResourceInstanceObject + Error *error + + // If StateOutput is not nil, its referent will be assigned either the same + // pointer as State or a new object with its status set as Tainted, + // depending on whether an error is given and if this was a create action. + StateOutput **states.ResourceInstanceObject +} + +// TODO: test +func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) { + state := *n.State + change := *n.Change + err := *n.Error + + if state != nil && state.Status == states.ObjectTainted { + log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path())) + return nil, nil + } + + if n.StateOutput != nil { + if err != nil && change.Action == plans.Create { + // If there are errors during a _create_ then the object is + // in an undefined state, and so we'll mark it as tainted so + // we can try again on the next run. + // + // We don't do this for other change actions because errors + // during updates will often not change the remote object at all. + // If there _were_ changes prior to the error, it's the provider's + // responsibility to record the effect of those changes in the + // object value it returned. + log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path())) + *n.StateOutput = state.AsTainted() + } else { + *n.StateOutput = state + } + } + + return nil, nil +} + +// resourceHasUserVisibleApply returns true if the given resource is one where +// apply actions should be exposed to the user. +// +// Certain resources do apply actions only as an implementation detail, so +// these should not be advertised to code outside of this package. +func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool { + // Only managed resources have user-visible apply actions. + // In particular, this excludes data resources since we "apply" these + // only as an implementation detail of removing them from state when + // they are destroyed. (When reading, they don't get here at all because + // we present them as "Refresh" actions.) + return addr.ContainingResource().Mode == addrs.ManagedResourceMode +} + // EvalApplyProvisioners is an EvalNode implementation that executes // the provisioners for a resource. // // TODO(mitchellh): This should probably be split up into a more fine-grained // ApplyProvisioner (single) that is looped over. type EvalApplyProvisioners struct { - Info *InstanceInfo - State **InstanceState - Resource *config.Resource - InterpResource *Resource + Addr addrs.ResourceInstance + State **states.ResourceInstanceObject + ResourceConfig *configs.Resource CreateNew *bool Error *error // When is the type of provisioner to run at this point - When config.ProvisionerWhen + When configs.ProvisionerWhen } // TODO: test func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) state := *n.State - - if n.CreateNew != nil && !*n.CreateNew { + if state == nil { + log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr) + return nil, nil + } + if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew { // If we're not creating a new resource, then don't run provisioners + log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr) + return nil, nil + } + if state.Status == states.ObjectTainted { + // No point in provisioning an object that is already tainted, since + // it's going to get recreated on the next apply anyway. + log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr) return nil, nil } @@ -181,14 +465,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } - // taint tells us whether to enable tainting. - taint := n.When == config.ProvisionerWhenCreate - if n.Error != nil && *n.Error != nil { - if taint { - state.Tainted = true - } - // We're already tainted, so just return out return nil, nil } @@ -196,7 +473,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { { // Call pre hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvisionResource(n.Info, state) + return h.PreProvisionInstance(absAddr, state.Value) }) if err != nil { return nil, err @@ -207,21 +484,19 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { // if we have one, otherwise we just output it. err := n.apply(ctx, provs) if err != nil { - if taint { - state.Tainted = true - } - - if n.Error != nil { - *n.Error = multierror.Append(*n.Error, err) - } else { + *n.Error = multierror.Append(*n.Error, err) + if n.Error == nil { return nil, err + } else { + log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr) + return nil, nil } } { // Call post hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvisionResource(n.Info, state) + return h.PostProvisionInstance(absAddr, state.Value) }) if err != nil { return nil, err @@ -233,18 +508,18 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { // filterProvisioners filters the provisioners on the resource to only // the provisioners specified by the "when" option. -func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { +func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner { // Fast path the zero case - if n.Resource == nil { + if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil { return nil } - if len(n.Resource.Provisioners) == 0 { + if len(n.ResourceConfig.Managed.Provisioners) == 0 { return nil } - result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners)) - for _, p := range n.Resource.Provisioners { + result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners)) + for _, p := range n.ResourceConfig.Managed.Provisioners { if p.When == n.When { result = append(result, p) } @@ -253,64 +528,71 @@ func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { return result } -func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error { - state := *n.State - - // Store the original connection info, restore later - origConnInfo := state.Ephemeral.ConnInfo - defer func() { - state.Ephemeral.ConnInfo = origConnInfo - }() +func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error { + var diags tfdiags.Diagnostics + instanceAddr := n.Addr + absAddr := instanceAddr.Absolute(ctx.Path()) + + // If there's a connection block defined directly inside the resource block + // then it'll serve as a base connection configuration for all of the + // provisioners. + var baseConn hcl.Body + if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil { + baseConn = n.ResourceConfig.Managed.Connection.Config + } for _, prov := range provs { + log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type) + // Get the provisioner provisioner := ctx.Provisioner(prov.Type) + schema := ctx.ProvisionerSchema(prov.Type) - // Interpolate the provisioner config - provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource) - if err != nil { - return err - } + keyData := EvalDataForInstanceKey(instanceAddr.Key) - // Interpolate the conn info, since it may contain variables - connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource) - if err != nil { - return err + // Evaluate the main provisioner configuration. + config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData) + diags = diags.Append(configDiags) + + // If the provisioner block contains a connection block of its own then + // it can override the base connection configuration, if any. + var localConn hcl.Body + if prov.Connection != nil { + localConn = prov.Connection.Config } - // Merge the connection information - overlay := make(map[string]string) - if origConnInfo != nil { - for k, v := range origConnInfo { - overlay[k] = v - } + var connBody hcl.Body + switch { + case baseConn != nil && localConn != nil: + // Our standard merging logic applies here, similar to what we do + // with _override.tf configuration files: arguments from the + // base connection block will be masked by any arguments of the + // same name in the local connection block. + connBody = configs.MergeBodies(baseConn, localConn) + case baseConn != nil: + connBody = baseConn + case localConn != nil: + connBody = localConn } - for k, v := range connInfo.Config { - switch vt := v.(type) { - case string: - overlay[k] = vt - case int64: - overlay[k] = strconv.FormatInt(vt, 10) - case int32: - overlay[k] = strconv.FormatInt(int64(vt), 10) - case int: - overlay[k] = strconv.FormatInt(int64(vt), 10) - case float32: - overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32) - case float64: - overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64) - case bool: - overlay[k] = strconv.FormatBool(vt) - default: - overlay[k] = fmt.Sprintf("%v", vt) + + // start with an empty connInfo + connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType()) + + if connBody != nil { + var connInfoDiags tfdiags.Diagnostics + connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData) + diags = diags.Append(connInfoDiags) + if diags.HasErrors() { + // "on failure continue" setting only applies to failures of the + // provisioner itself, not to invalid configuration. + return diags.Err() } } - state.Ephemeral.ConnInfo = overlay { // Call pre hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreProvision(n.Info, prov.Type) + return h.PreProvisionInstanceStep(absAddr, prov.Type) }) if err != nil { return err @@ -320,31 +602,37 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision // The output function outputFn := func(msg string) { ctx.Hook(func(h Hook) (HookAction, error) { - h.ProvisionOutput(n.Info, prov.Type, msg) + h.ProvisionOutput(absAddr, prov.Type, msg) return HookActionContinue, nil }) } - // Invoke the Provisioner output := CallbackUIOutput{OutputFn: outputFn} - applyErr := provisioner.Apply(&output, state, provConfig) + resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{ + Config: config, + Connection: connInfo, + UIOutput: &output, + }) + applyDiags := resp.Diagnostics.InConfigBody(prov.Config) // Call post hook hookErr := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostProvision(n.Info, prov.Type, applyErr) + return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err()) }) - // Handle the error before we deal with the hook - if applyErr != nil { - // Determine failure behavior - switch prov.OnFailure { - case config.ProvisionerOnFailureContinue: - log.Printf( - "[INFO] apply: %s [%s]: error during provision, continue requested", - n.Info.Id, prov.Type) - - case config.ProvisionerOnFailureFail: - return applyErr + switch prov.OnFailure { + case configs.ProvisionerOnFailureContinue: + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type) + } else { + // Maybe there are warnings that we still want to see + diags = diags.Append(applyDiags) + } + default: + diags = diags.Append(applyDiags) + if applyDiags.HasErrors() { + log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type) + return diags.Err() } } @@ -354,6 +642,5 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision } } - return nil - + return diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go index 715e79e1..4dff0c84 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go @@ -3,33 +3,44 @@ package terraform import ( "fmt" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" ) // EvalPreventDestroy is an EvalNode implementation that returns an // error if a resource has PreventDestroy configured and the diff // would destroy the resource. type EvalCheckPreventDestroy struct { - Resource *config.Resource - ResourceId string - Diff **InstanceDiff + Addr addrs.ResourceInstance + Config *configs.Resource + Change **plans.ResourceInstanceChange } func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { - if n.Diff == nil || *n.Diff == nil || n.Resource == nil { + if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil { return nil, nil } - diff := *n.Diff - preventDestroy := n.Resource.Lifecycle.PreventDestroy - - if diff.GetDestroy() && preventDestroy { - resourceId := n.ResourceId - if resourceId == "" { - resourceId = n.Resource.Id() - } - - return nil, fmt.Errorf(preventDestroyErrStr, resourceId) + change := *n.Change + preventDestroy := n.Config.Managed.PreventDestroy + + if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Instance cannot be destroyed", + Detail: fmt.Sprintf( + "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.", + n.Addr.Absolute(ctx.Path()).String(), + ), + Subject: &n.Config.DeclRange, + }) + return nil, diags.Err() } return nil, nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go index a1f815b7..08f3059e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go @@ -1,9 +1,16 @@ package terraform import ( - "sync" - - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" ) // EvalContext is the interface that is given to eval nodes to execute. @@ -13,7 +20,7 @@ type EvalContext interface { Stopped() <-chan struct{} // Path is the current module path. - Path() []string + Path() addrs.ModuleInstance // Hook is used to call hook methods. The callback is called for each // hook and should return the hook action to take and the error. @@ -22,63 +29,105 @@ type EvalContext interface { // Input is the UIInput object for interacting with the UI. Input() UIInput - // InitProvider initializes the provider with the given name and + // InitProvider initializes the provider with the given type and address, and // returns the implementation of the resource provider or an error. // // It is an error to initialize the same provider more than once. - InitProvider(string) (ResourceProvider, error) + InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error) - // Provider gets the provider instance with the given name (already + // Provider gets the provider instance with the given address (already // initialized) or returns nil if the provider isn't initialized. - Provider(string) ResourceProvider + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + // InitProvider must've been called on the EvalContext of the module + // that owns the given provider before calling this method. + Provider(addrs.AbsProviderConfig) providers.Interface + + // ProviderSchema retrieves the schema for a particular provider, which + // must have already been initialized with InitProvider. + // + // This method expects an _absolute_ provider configuration address, since + // resources in one module are able to use providers from other modules. + ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema // CloseProvider closes provider connections that aren't needed anymore. - CloseProvider(string) error + CloseProvider(addrs.ProviderConfig) error // ConfigureProvider configures the provider with the given // configuration. This is a separate context call because this call // is used to store the provider configuration for inheritance lookups // with ParentProviderConfig(). - ConfigureProvider(string, *ResourceConfig) error - SetProviderConfig(string, *ResourceConfig) error - ParentProviderConfig(string) *ResourceConfig + ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics // ProviderInput and SetProviderInput are used to configure providers // from user input. - ProviderInput(string) map[string]interface{} - SetProviderInput(string, map[string]interface{}) + ProviderInput(addrs.ProviderConfig) map[string]cty.Value + SetProviderInput(addrs.ProviderConfig, map[string]cty.Value) // InitProvisioner initializes the provisioner with the given name and // returns the implementation of the resource provisioner or an error. // // It is an error to initialize the same provisioner more than once. - InitProvisioner(string) (ResourceProvisioner, error) + InitProvisioner(string) (provisioners.Interface, error) // Provisioner gets the provisioner instance with the given name (already // initialized) or returns nil if the provisioner isn't initialized. - Provisioner(string) ResourceProvisioner + Provisioner(string) provisioners.Interface + + // ProvisionerSchema retrieves the main configuration schema for a + // particular provisioner, which must have already been initialized with + // InitProvisioner. + ProvisionerSchema(string) *configschema.Block // CloseProvisioner closes provisioner connections that aren't needed // anymore. CloseProvisioner(string) error - // Interpolate takes the given raw configuration and completes - // the interpolations, returning the processed ResourceConfig. + // EvaluateBlock takes the given raw configuration block and associated + // schema and evaluates it to produce a value of an object type that + // conforms to the implied type of the schema. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + // + // The "key" argument is also optional. If given, it is the instance key + // of the current object within the multi-instance container it belongs + // to. For example, on a resource block with "count" set this should be + // set to a different addrs.IntKey for each instance created from that + // block. Set this to addrs.NoKey if not appropriate. + // + // The returned body is an expanded version of the given body, with any + // "dynamic" blocks replaced with zero or more static blocks. This can be + // used to extract correct source location information about attributes of + // the returned object value. + EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) + + // EvaluateExpr takes the given HCL expression and evaluates it to produce + // a value. + // + // The "self" argument is optional. If given, it is the referenceable + // address that the name "self" should behave as an alias for when + // evaluating. Set this to nil if the "self" object should not be available. + EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) + + // EvaluationScope returns a scope that can be used to evaluate reference + // addresses in this context. + EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope + + // SetModuleCallArguments defines values for the variables of a particular + // child module call. // - // The resource argument is optional. If given, it is the resource - // that is currently being acted upon. - Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) - - // SetVariables sets the variables for the module within - // this context with the name n. This function call is additive: - // the second parameter is merged with any previous call. - SetVariables(string, map[string]interface{}) - - // Diff returns the global diff as well as the lock that should - // be used to modify that diff. - Diff() (*Diff, *sync.RWMutex) - - // State returns the global state as well as the lock that should - // be used to modify that state. - State() (*State, *sync.RWMutex) + // Calling this function multiple times has merging behavior, keeping any + // previously-set keys that are not present in the new map. + SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value) + + // Changes returns the writer object that can be used to write new proposed + // changes into the global changes set. + Changes() *plans.ChangesSync + + // State returns a wrapper object that provides safe concurrent access to + // the global state. + State() *states.SyncState } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go index 3dcfb227..20b37933 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go @@ -4,10 +4,22 @@ import ( "context" "fmt" "log" - "strings" "sync" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/version" + + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" ) // BuiltinEvalContext is an EvalContext implementation that is used by @@ -17,36 +29,47 @@ type BuiltinEvalContext struct { StopContext context.Context // PathValue is the Path that this context is operating within. - PathValue []string + PathValue addrs.ModuleInstance - // Interpolater setting below affect the interpolation of variables. + // Evaluator is used for evaluating expressions within the scope of this + // eval context. + Evaluator *Evaluator + + // Schemas is a repository of all of the schemas we should need to + // decode configuration blocks and expressions. This must be constructed by + // the caller to include schemas for all of the providers, resource types, + // data sources and provisioners used by the given configuration and + // state. // - // The InterpolaterVars are the exact value for ${var.foo} values. - // The map is shared between all contexts and is a mapping of - // PATH to KEY to VALUE. Because it is shared by all contexts as well - // as the Interpolater itself, it is protected by InterpolaterVarLock - // which must be locked during any access to the map. - Interpolater *Interpolater - InterpolaterVars map[string]map[string]interface{} - InterpolaterVarLock *sync.Mutex + // This must not be mutated during evaluation. + Schemas *Schemas + + // VariableValues contains the variable values across all modules. This + // structure is shared across the entire containing context, and so it + // may be accessed only when holding VariableValuesLock. + // The keys of the first level of VariableValues are the string + // representations of addrs.ModuleInstance values. The second-level keys + // are variable names within each module instance. + VariableValues map[string]map[string]cty.Value + VariableValuesLock *sync.Mutex Components contextComponentFactory Hooks []Hook InputValue UIInput - ProviderCache map[string]ResourceProvider - ProviderConfigCache map[string]*ResourceConfig - ProviderInputConfig map[string]map[string]interface{} + ProviderCache map[string]providers.Interface + ProviderInputConfig map[string]map[string]cty.Value ProviderLock *sync.Mutex - ProvisionerCache map[string]ResourceProvisioner + ProvisionerCache map[string]provisioners.Interface ProvisionerLock *sync.Mutex - DiffValue *Diff - DiffLock *sync.RWMutex - StateValue *State - StateLock *sync.RWMutex + ChangesValue *plans.ChangesSync + StateValue *states.SyncState once sync.Once } +// BuiltinEvalContext implements EvalContext +var _ EvalContext = (*BuiltinEvalContext)(nil) + func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { // This can happen during tests. During tests, we just block forever. if ctx.StopContext == nil { @@ -80,12 +103,13 @@ func (ctx *BuiltinEvalContext) Input() UIInput { return ctx.InputValue } -func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) { +func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) { ctx.once.Do(ctx.init) + absAddr := addr.Absolute(ctx.Path()) // If we already initialized, it is an error - if p := ctx.Provider(n); p != nil { - return nil, fmt.Errorf("Provider '%s' already initialized", n) + if p := ctx.Provider(absAddr); p != nil { + return nil, fmt.Errorf("%s is already initialized", addr) } // Warning: make sure to acquire these locks AFTER the call to Provider @@ -93,139 +117,102 @@ func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - key := PathCacheKey(providerPath) + key := absAddr.String() - typeName := strings.SplitN(n, ".", 2)[0] p, err := ctx.Components.ResourceProvider(typeName, key) if err != nil { return nil, err } + log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr) ctx.ProviderCache[key] = p + return p, nil } -func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { +func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { ctx.once.Do(ctx.init) ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n + return ctx.ProviderCache[addr.String()] +} + +func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { + ctx.once.Do(ctx.init) - return ctx.ProviderCache[PathCacheKey(providerPath)] + return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type) } -func (ctx *BuiltinEvalContext) CloseProvider(n string) error { +func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error { ctx.once.Do(ctx.init) ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - var provider interface{} - provider = ctx.ProviderCache[PathCacheKey(providerPath)] + key := addr.Absolute(ctx.Path()).String() + provider := ctx.ProviderCache[key] if provider != nil { - if p, ok := provider.(ResourceProviderCloser); ok { - delete(ctx.ProviderCache, PathCacheKey(providerPath)) - return p.Close() - } + delete(ctx.ProviderCache, key) + return provider.Close() } return nil } -func (ctx *BuiltinEvalContext) ConfigureProvider( - n string, cfg *ResourceConfig) error { - p := ctx.Provider(n) +func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + absAddr := addr.Absolute(ctx.Path()) + p := ctx.Provider(absAddr) if p == nil { - return fmt.Errorf("Provider '%s' not initialized", n) + diags = diags.Append(fmt.Errorf("%s not initialized", addr)) + return diags } - if err := ctx.SetProviderConfig(n, cfg); err != nil { - return nil + providerSchema := ctx.ProviderSchema(absAddr) + if providerSchema == nil { + diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr)) + return diags } - return p.Configure(cfg) -} - -func (ctx *BuiltinEvalContext) SetProviderConfig( - n string, cfg *ResourceConfig) error { - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n - - // Save the configuration - ctx.ProviderLock.Lock() - ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg - ctx.ProviderLock.Unlock() + req := providers.ConfigureRequest{ + TerraformVersion: version.String(), + Config: cfg, + } - return nil + resp := p.Configure(req) + return resp.Diagnostics } -func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { +func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value { ctx.ProviderLock.Lock() defer ctx.ProviderLock.Unlock() - // Make a copy of the path so we can safely edit it - path := ctx.Path() - pathCopy := make([]string, len(path)+1) - copy(pathCopy, path) - - // Go up the tree. - for i := len(path) - 1; i >= 0; i-- { - pathCopy[i+1] = n - k := PathCacheKey(pathCopy[:i+2]) - if v, ok := ctx.ProviderInputConfig[k]; ok { - return v - } + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + return nil } - return nil + return ctx.ProviderInputConfig[pc.String()] } -func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) { - providerPath := make([]string, len(ctx.Path())+1) - copy(providerPath, ctx.Path()) - providerPath[len(providerPath)-1] = n +func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) { + absProvider := pc.Absolute(ctx.Path()) + + if !ctx.Path().IsRoot() { + // Only root module provider configurations can have input. + log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module") + return + } // Save the configuration ctx.ProviderLock.Lock() - ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c + ctx.ProviderInputConfig[absProvider.String()] = c ctx.ProviderLock.Unlock() } -func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig { - ctx.ProviderLock.Lock() - defer ctx.ProviderLock.Unlock() - - // Make a copy of the path so we can safely edit it - path := ctx.Path() - pathCopy := make([]string, len(path)+1) - copy(pathCopy, path) - - // Go up the tree. - for i := len(path) - 1; i >= 0; i-- { - pathCopy[i+1] = n - k := PathCacheKey(pathCopy[:i+2]) - if v, ok := ctx.ProviderConfigCache[k]; ok { - return v - } - } - - return nil -} - -func (ctx *BuiltinEvalContext) InitProvisioner( - n string) (ResourceProvisioner, error) { +func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { ctx.once.Do(ctx.init) // If we already initialized, it is an error @@ -238,10 +225,7 @@ func (ctx *BuiltinEvalContext) InitProvisioner( ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n - key := PathCacheKey(provPath) + key := PathObjectCacheKey(ctx.Path(), n) p, err := ctx.Components.ResourceProvisioner(n, key) if err != nil { @@ -249,20 +233,24 @@ func (ctx *BuiltinEvalContext) InitProvisioner( } ctx.ProvisionerCache[key] = p + return p, nil } -func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner { +func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface { ctx.once.Do(ctx.init) ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n + key := PathObjectCacheKey(ctx.Path(), n) + return ctx.ProvisionerCache[key] +} + +func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block { + ctx.once.Do(ctx.init) - return ctx.ProvisionerCache[PathCacheKey(provPath)] + return ctx.Schemas.ProvisionerConfig(n) } func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { @@ -271,76 +259,70 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { ctx.ProvisionerLock.Lock() defer ctx.ProvisionerLock.Unlock() - provPath := make([]string, len(ctx.Path())+1) - copy(provPath, ctx.Path()) - provPath[len(provPath)-1] = n + key := PathObjectCacheKey(ctx.Path(), n) - var prov interface{} - prov = ctx.ProvisionerCache[PathCacheKey(provPath)] + prov := ctx.ProvisionerCache[key] if prov != nil { - if p, ok := prov.(ResourceProvisionerCloser); ok { - delete(ctx.ProvisionerCache, PathCacheKey(provPath)) - return p.Close() - } + return prov.Close() } return nil } -func (ctx *BuiltinEvalContext) Interpolate( - cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { - if cfg != nil { - scope := &InterpolationScope{ - Path: ctx.Path(), - Resource: r, - } +func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + scope := ctx.EvaluationScope(self, keyData) + body, evalDiags := scope.ExpandBlock(body, schema) + diags = diags.Append(evalDiags) + val, evalDiags := scope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + return val, body, diags +} - vs, err := ctx.Interpolater.Values(scope, cfg.Variables) - if err != nil { - return nil, err - } +func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey) + return scope.EvalExpr(expr, wantType) +} - // Do the interpolation - if err := cfg.Interpolate(vs); err != nil { - return nil, err - } +func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + data := &evaluationStateData{ + Evaluator: ctx.Evaluator, + ModulePath: ctx.PathValue, + InstanceKeyData: keyData, + Operation: ctx.Evaluator.Operation, } - - result := NewResourceConfig(cfg) - result.interpolateForce() - return result, nil + return ctx.Evaluator.Scope(data, self) } -func (ctx *BuiltinEvalContext) Path() []string { +func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance { return ctx.PathValue } -func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) { - ctx.InterpolaterVarLock.Lock() - defer ctx.InterpolaterVarLock.Unlock() +func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) { + ctx.VariableValuesLock.Lock() + defer ctx.VariableValuesLock.Unlock() - path := make([]string, len(ctx.Path())+1) - copy(path, ctx.Path()) - path[len(path)-1] = n - key := PathCacheKey(path) + childPath := n.ModuleInstance(ctx.PathValue) + key := childPath.String() - vars := ctx.InterpolaterVars[key] - if vars == nil { - vars = make(map[string]interface{}) - ctx.InterpolaterVars[key] = vars + args := ctx.VariableValues[key] + if args == nil { + args = make(map[string]cty.Value) + ctx.VariableValues[key] = vals + return } - for k, v := range vs { - vars[k] = v + for k, v := range vals { + args[k] = v } } -func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) { - return ctx.DiffValue, ctx.DiffLock +func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync { + return ctx.ChangesValue } -func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) { - return ctx.StateValue, ctx.StateLock +func (ctx *BuiltinEvalContext) State() *states.SyncState { + return ctx.StateValue } func (ctx *BuiltinEvalContext) init() { diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go index 4f90d5b1..195ecc5c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go @@ -1,9 +1,20 @@ package terraform import ( - "sync" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcldec" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // MockEvalContext is a mock version of EvalContext that can be used @@ -20,51 +31,84 @@ type MockEvalContext struct { InputInput UIInput InitProviderCalled bool - InitProviderName string - InitProviderProvider ResourceProvider + InitProviderType string + InitProviderAddr addrs.ProviderConfig + InitProviderProvider providers.Interface InitProviderError error ProviderCalled bool - ProviderName string - ProviderProvider ResourceProvider + ProviderAddr addrs.AbsProviderConfig + ProviderProvider providers.Interface + + ProviderSchemaCalled bool + ProviderSchemaAddr addrs.AbsProviderConfig + ProviderSchemaSchema *ProviderSchema CloseProviderCalled bool - CloseProviderName string - CloseProviderProvider ResourceProvider + CloseProviderAddr addrs.ProviderConfig + CloseProviderProvider providers.Interface ProviderInputCalled bool - ProviderInputName string - ProviderInputConfig map[string]interface{} + ProviderInputAddr addrs.ProviderConfig + ProviderInputValues map[string]cty.Value SetProviderInputCalled bool - SetProviderInputName string - SetProviderInputConfig map[string]interface{} + SetProviderInputAddr addrs.ProviderConfig + SetProviderInputValues map[string]cty.Value ConfigureProviderCalled bool - ConfigureProviderName string - ConfigureProviderConfig *ResourceConfig - ConfigureProviderError error - - SetProviderConfigCalled bool - SetProviderConfigName string - SetProviderConfigConfig *ResourceConfig - - ParentProviderConfigCalled bool - ParentProviderConfigName string - ParentProviderConfigConfig *ResourceConfig + ConfigureProviderAddr addrs.ProviderConfig + ConfigureProviderConfig cty.Value + ConfigureProviderDiags tfdiags.Diagnostics InitProvisionerCalled bool InitProvisionerName string - InitProvisionerProvisioner ResourceProvisioner + InitProvisionerProvisioner provisioners.Interface InitProvisionerError error ProvisionerCalled bool ProvisionerName string - ProvisionerProvisioner ResourceProvisioner + ProvisionerProvisioner provisioners.Interface + + ProvisionerSchemaCalled bool + ProvisionerSchemaName string + ProvisionerSchemaSchema *configschema.Block CloseProvisionerCalled bool CloseProvisionerName string - CloseProvisionerProvisioner ResourceProvisioner + CloseProvisionerProvisioner provisioners.Interface + + EvaluateBlockCalled bool + EvaluateBlockBody hcl.Body + EvaluateBlockSchema *configschema.Block + EvaluateBlockSelf addrs.Referenceable + EvaluateBlockKeyData InstanceKeyEvalData + EvaluateBlockResultFunc func( + body hcl.Body, + schema *configschema.Block, + self addrs.Referenceable, + keyData InstanceKeyEvalData, + ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateBlockResult cty.Value + EvaluateBlockExpandedBody hcl.Body + EvaluateBlockDiags tfdiags.Diagnostics + + EvaluateExprCalled bool + EvaluateExprExpr hcl.Expression + EvaluateExprWantType cty.Type + EvaluateExprSelf addrs.Referenceable + EvaluateExprResultFunc func( + expr hcl.Expression, + wantType cty.Type, + self addrs.Referenceable, + ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set + EvaluateExprResult cty.Value + EvaluateExprDiags tfdiags.Diagnostics + + EvaluationScopeCalled bool + EvaluationScopeSelf addrs.Referenceable + EvaluationScopeKeyData InstanceKeyEvalData + EvaluationScopeScope *lang.Scope InterpolateCalled bool InterpolateConfig *config.RawConfig @@ -72,22 +116,29 @@ type MockEvalContext struct { InterpolateConfigResult *ResourceConfig InterpolateError error + InterpolateProviderCalled bool + InterpolateProviderConfig *config.ProviderConfig + InterpolateProviderResource *Resource + InterpolateProviderConfigResult *ResourceConfig + InterpolateProviderError error + PathCalled bool - PathPath []string + PathPath addrs.ModuleInstance - SetVariablesCalled bool - SetVariablesModule string - SetVariablesVariables map[string]interface{} + SetModuleCallArgumentsCalled bool + SetModuleCallArgumentsModule addrs.ModuleCallInstance + SetModuleCallArgumentsValues map[string]cty.Value - DiffCalled bool - DiffDiff *Diff - DiffLock *sync.RWMutex + ChangesCalled bool + ChangesChanges *plans.ChangesSync StateCalled bool - StateState *State - StateLock *sync.RWMutex + StateState *states.SyncState } +// MockEvalContext implements EvalContext +var _ EvalContext = (*MockEvalContext)(nil) + func (c *MockEvalContext) Stopped() <-chan struct{} { c.StoppedCalled = true return c.StoppedValue @@ -109,75 +160,157 @@ func (c *MockEvalContext) Input() UIInput { return c.InputInput } -func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) { +func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) { c.InitProviderCalled = true - c.InitProviderName = n + c.InitProviderType = t + c.InitProviderAddr = addr return c.InitProviderProvider, c.InitProviderError } -func (c *MockEvalContext) Provider(n string) ResourceProvider { +func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface { c.ProviderCalled = true - c.ProviderName = n + c.ProviderAddr = addr return c.ProviderProvider } -func (c *MockEvalContext) CloseProvider(n string) error { +func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema { + c.ProviderSchemaCalled = true + c.ProviderSchemaAddr = addr + return c.ProviderSchemaSchema +} + +func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error { c.CloseProviderCalled = true - c.CloseProviderName = n + c.CloseProviderAddr = addr return nil } -func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error { +func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics { c.ConfigureProviderCalled = true - c.ConfigureProviderName = n + c.ConfigureProviderAddr = addr c.ConfigureProviderConfig = cfg - return c.ConfigureProviderError -} - -func (c *MockEvalContext) SetProviderConfig( - n string, cfg *ResourceConfig) error { - c.SetProviderConfigCalled = true - c.SetProviderConfigName = n - c.SetProviderConfigConfig = cfg - return nil + return c.ConfigureProviderDiags } -func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig { - c.ParentProviderConfigCalled = true - c.ParentProviderConfigName = n - return c.ParentProviderConfigConfig -} - -func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { +func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value { c.ProviderInputCalled = true - c.ProviderInputName = n - return c.ProviderInputConfig + c.ProviderInputAddr = addr + return c.ProviderInputValues } -func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) { +func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) { c.SetProviderInputCalled = true - c.SetProviderInputName = n - c.SetProviderInputConfig = cfg + c.SetProviderInputAddr = addr + c.SetProviderInputValues = vals } -func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) { +func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) { c.InitProvisionerCalled = true c.InitProvisionerName = n return c.InitProvisionerProvisioner, c.InitProvisionerError } -func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner { +func (c *MockEvalContext) Provisioner(n string) provisioners.Interface { c.ProvisionerCalled = true c.ProvisionerName = n return c.ProvisionerProvisioner } +func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block { + c.ProvisionerSchemaCalled = true + c.ProvisionerSchemaName = n + return c.ProvisionerSchemaSchema +} + func (c *MockEvalContext) CloseProvisioner(n string) error { c.CloseProvisionerCalled = true c.CloseProvisionerName = n return nil } +func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + c.EvaluateBlockCalled = true + c.EvaluateBlockBody = body + c.EvaluateBlockSchema = schema + c.EvaluateBlockSelf = self + c.EvaluateBlockKeyData = keyData + if c.EvaluateBlockResultFunc != nil { + return c.EvaluateBlockResultFunc(body, schema, self, keyData) + } + return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags +} + +func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + c.EvaluateExprCalled = true + c.EvaluateExprExpr = expr + c.EvaluateExprWantType = wantType + c.EvaluateExprSelf = self + if c.EvaluateExprResultFunc != nil { + return c.EvaluateExprResultFunc(expr, wantType, self) + } + return c.EvaluateExprResult, c.EvaluateExprDiags +} + +// installSimpleEval is a helper to install a simple mock implementation of +// both EvaluateBlock and EvaluateExpr into the receiver. +// +// These default implementations will either evaluate the given input against +// the scope in field EvaluationScopeScope or, if it is nil, with no eval +// context at all so that only constant values may be used. +// +// This function overwrites any existing functions installed in fields +// EvaluateBlockResultFunc and EvaluateExprResultFunc. +func (c *MockEvalContext) installSimpleEval() { + c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + var diags tfdiags.Diagnostics + body, diags = scope.ExpandBlock(body, schema) + if diags.HasErrors() { + return cty.DynamicVal, body, diags + } + val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return cty.DynamicVal, body, diags + } + return val, body, diags + } + + // Fallback codepath supporting constant values only. + val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil) + return val, body, tfdiags.Diagnostics(nil).Append(hclDiags) + } + c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) { + if scope := c.EvaluationScopeScope; scope != nil { + // Fully-functional codepath. + return scope.EvalExpr(expr, wantType) + } + + // Fallback codepath supporting constant values only. + var diags tfdiags.Diagnostics + val, hclDiags := expr.Value(nil) + diags = diags.Append(hclDiags) + if hclDiags.HasErrors() { + return cty.DynamicVal, diags + } + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + diags = diags.Append(err) + return cty.DynamicVal, diags + } + return val, diags + } +} + +func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope { + c.EvaluationScopeCalled = true + c.EvaluationScopeSelf = self + c.EvaluationScopeKeyData = keyData + return c.EvaluationScopeScope +} + func (c *MockEvalContext) Interpolate( config *config.RawConfig, resource *Resource) (*ResourceConfig, error) { c.InterpolateCalled = true @@ -186,23 +319,31 @@ func (c *MockEvalContext) Interpolate( return c.InterpolateConfigResult, c.InterpolateError } -func (c *MockEvalContext) Path() []string { +func (c *MockEvalContext) InterpolateProvider( + config *config.ProviderConfig, resource *Resource) (*ResourceConfig, error) { + c.InterpolateProviderCalled = true + c.InterpolateProviderConfig = config + c.InterpolateProviderResource = resource + return c.InterpolateProviderConfigResult, c.InterpolateError +} + +func (c *MockEvalContext) Path() addrs.ModuleInstance { c.PathCalled = true return c.PathPath } -func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) { - c.SetVariablesCalled = true - c.SetVariablesModule = n - c.SetVariablesVariables = vs +func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) { + c.SetModuleCallArgumentsCalled = true + c.SetModuleCallArgumentsModule = n + c.SetModuleCallArgumentsValues = values } -func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) { - c.DiffCalled = true - return c.DiffDiff, c.DiffLock +func (c *MockEvalContext) Changes() *plans.ChangesSync { + c.ChangesCalled = true + return c.ChangesChanges } -func (c *MockEvalContext) State() (*State, *sync.RWMutex) { +func (c *MockEvalContext) State() *states.SyncState { c.StateCalled = true - return c.StateState, c.StateLock + return c.StateState } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go index 2ae56a75..8083105b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go @@ -1,58 +1,120 @@ package terraform import ( - "github.com/hashicorp/terraform/config" + "fmt" + "log" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" ) -// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state -// when there is a resource count with zero/one boundary, i.e. fixing -// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. -type EvalCountFixZeroOneBoundary struct { - Resource *config.Resource +// evaluateResourceCountExpression is our standard mechanism for interpreting an +// expression given for a "count" argument on a resource. This should be called +// from the DynamicExpand of a node representing a resource in order to +// determine the final count value. +// +// If the result is zero or positive and no error diagnostics are returned, then +// the result is the literal count value to use. +// +// If the result is -1, this indicates that the given expression is nil and so +// the "count" behavior should not be enabled for this resource at all. +// +// If error diagnostics are returned then the result is always the meaningless +// placeholder value -1. +func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) { + count, known, diags := evaluateResourceCountExpressionKnown(expr, ctx) + if !known { + // Currently this is a rather bad outcome from a UX standpoint, since we have + // no real mechanism to deal with this situation and all we can do is produce + // an error message. + // FIXME: In future, implement a built-in mechanism for deferring changes that + // can't yet be predicted, and use it to guide the user through several + // plan/apply steps until the desired configuration is eventually reached. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`, + Subject: expr.Range().Ptr(), + }) + } + return count, diags } -// TODO: test -func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) { - // Get the count, important for knowing whether we're supposed to - // be adding the zero, or trimming it. - count, err := n.Resource.Count() - if err != nil { - return nil, err +// evaluateResourceCountExpressionKnown is like evaluateResourceCountExpression +// except that it handles an unknown result by returning count = 0 and +// a known = false, rather than by reporting the unknown value as an error +// diagnostic. +func evaluateResourceCountExpressionKnown(expr hcl.Expression, ctx EvalContext) (count int, known bool, diags tfdiags.Diagnostics) { + if expr == nil { + return -1, true, nil } - // Figure what to look for and what to replace it with - hunt := n.Resource.Id() - replace := hunt + ".0" - if count < 2 { - hunt, replace = replace, hunt + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return -1, true, diags } - state, lock := ctx.State() - - // Get a lock so we can access this instance and potentially make - // changes to it. - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil + switch { + case countVal.IsNull(): + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return -1, true, diags + case !countVal.IsKnown(): + return 0, false, diags } - // Look for the resource state. If we don't have one, then it is okay. - rs, ok := mod.Resources[hunt] - if !ok { - return nil, nil + err := gocty.FromCtyValue(countVal, &count) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return -1, true, diags } - - // If the replacement key exists, we just keep both - if _, ok := mod.Resources[replace]; ok { - return nil, nil + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`, + Subject: expr.Range().Ptr(), + }) + return -1, true, diags } - mod.Resources[replace] = rs - delete(mod.Resources, hunt) + return count, true, diags +} - return nil, nil +// fixResourceCountSetTransition is a helper function to fix up the state when a +// resource transitions its "count" from being set to unset or vice-versa, +// treating a 0-key and a no-key instance as aliases for one another across +// the transition. +// +// The correct time to call this function is in the DynamicExpand method for +// a node representing a resource, just after evaluating the count with +// evaluateResourceCountExpression, and before any other analysis of the +// state such as orphan detection. +// +// This function calls methods on the given EvalContext to update the current +// state in-place, if necessary. It is a no-op if there is no count transition +// taking place. +// +// Since the state is modified in-place, this function must take a writer lock +// on the state. The caller must therefore not also be holding a state lock, +// or this function will block forever awaiting the lock. +func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) { + state := ctx.State() + changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled) + if changed { + log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr) + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go index 91e2b904..647c58d1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go @@ -1,7 +1,11 @@ package terraform import ( + "fmt" "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) // EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state @@ -9,22 +13,34 @@ import ( // a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. // // This works on the global state. -type EvalCountFixZeroOneBoundaryGlobal struct{} +type EvalCountFixZeroOneBoundaryGlobal struct { + Config *configs.Config +} // TODO: test func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { - // Get the state and lock it since we'll potentially modify it - state, lock := ctx.State() - lock.Lock() - defer lock.Unlock() - - // Prune the state since we require a clean state to work - state.prune() - - // Go through each modules since the boundaries are restricted to a - // module scope. + // We'll temporarily lock the state to grab the modules, then work on each + // one separately while taking a lock again for each separate resource. + // This means that if another caller concurrently adds a module here while + // we're working then we won't update it, but that's no worse than the + // concurrent writer blocking for our entire fixup process and _then_ + // adding a new module, and in practice the graph node associated with + // this eval depends on everything else in the graph anyway, so there + // should not be concurrent writers. + state := ctx.State().Lock() + moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules)) for _, m := range state.Modules { - if err := n.fixModule(m); err != nil { + moduleAddrs = append(moduleAddrs, m.Addr) + } + ctx.State().Unlock() + + for _, addr := range moduleAddrs { + cfg := n.Config.DescendentForInstance(addr) + if cfg == nil { + log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) + continue + } + if err := n.fixModule(ctx, addr); err != nil { return nil, err } } @@ -32,46 +48,29 @@ func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, return nil, nil } -func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error { - // Counts keeps track of keys and their counts - counts := make(map[string]int) - for k, _ := range m.Resources { - // Parse the key - key, err := ParseResourceStateKey(k) - if err != nil { - return err - } - - // Set the index to -1 so that we can keep count - key.Index = -1 - - // Increment - counts[key.String()]++ +func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error { + ms := ctx.State().Module(moduleAddr) + cfg := n.Config.DescendentForInstance(moduleAddr) + if ms == nil { + // Theoretically possible for a concurrent writer to delete a module + // while we're running, but in practice the graph node that called us + // depends on everything else in the graph and so there can never + // be a concurrent writer. + return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr) + } + if cfg == nil { + return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr) } - // Go through the counts and do the fixup for each resource - for raw, count := range counts { - // Search and replace this resource - search := raw - replace := raw + ".0" - if count < 2 { - search, replace = replace, search - } - log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace) - - // Look for the resource state. If we don't have one, then it is okay. - rs, ok := m.Resources[search] - if !ok { - continue - } - - // If the replacement key exists, we just keep both - if _, ok := m.Resources[replace]; ok { + for _, r := range ms.Resources { + addr := r.Addr.Absolute(moduleAddr) + rCfg := cfg.Module.ResourceByAddr(r.Addr) + if rCfg == nil { + log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr) continue } - - m.Resources[replace] = rs - delete(m.Resources, search) + hasCount := rCfg.Count != nil + fixResourceCountSetTransition(ctx, addr, hasCount) } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go index 6f09526a..20af9593 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -1,86 +1,114 @@ package terraform import ( + "bytes" "fmt" "log" "strings" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) -// EvalCompareDiff is an EvalNode implementation that compares two diffs -// and errors if the diffs are not equal. -type EvalCompareDiff struct { - Info *InstanceInfo - One, Two **InstanceDiff +// EvalCheckPlannedChange is an EvalNode implementation that produces errors +// if the _actual_ expected value is not compatible with what was recorded +// in the plan. +// +// Errors here are most often indicative of a bug in the provider, so our +// error messages will report with that in mind. It's also possible that +// there's a bug in Terraform's Core's own "proposed new value" code in +// EvalDiff. +type EvalCheckPlannedChange struct { + Addr addrs.ResourceInstance + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + // We take ResourceInstanceChange objects here just because that's what's + // convenient to pass in from the evaltree implementation, but we really + // only look at the "After" value of each change. + Planned, Actual **plans.ResourceInstanceChange } -// TODO: test -func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { - one, two := *n.One, *n.Two - - // If either are nil, let them be empty - if one == nil { - one = new(InstanceDiff) - one.init() - } - if two == nil { - two = new(InstanceDiff) - two.init() - } - oneId, _ := one.GetAttribute("id") - twoId, _ := two.GetAttribute("id") - one.DelAttribute("id") - two.DelAttribute("id") - defer func() { - if oneId != nil { - one.SetAttribute("id", oneId) - } - if twoId != nil { - two.SetAttribute("id", twoId) - } - }() - - if same, reason := one.Same(two); !same { - log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id) - log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason) - log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one) - log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two) - return nil, fmt.Errorf( - "%s: diffs didn't match during apply. This is a bug with "+ - "Terraform and should be reported as a GitHub Issue.\n"+ - "\n"+ - "Please include the following information in your report:\n"+ - "\n"+ - " Terraform Version: %s\n"+ - " Resource ID: %s\n"+ - " Mismatch reason: %s\n"+ - " Diff One (usually from plan): %#v\n"+ - " Diff Two (usually from apply): %#v\n"+ - "\n"+ - "Also include as much context as you can about your config, state, "+ - "and the steps you performed to trigger this error.\n", - n.Info.Id, Version, n.Info.Id, reason, one, two) +func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) { + providerSchema := *n.ProviderSchema + plannedChange := *n.Planned + actualChange := *n.Actual + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type) + } + + var diags tfdiags.Diagnostics + absAddr := n.Addr.Absolute(ctx.Path()) + + log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action) + + if plannedChange.Action != actualChange.Action { + switch { + case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp: + // It's okay for an update to become a NoOp once we've filled in + // all of the unknown values, since the final values might actually + // match what was there before after all. + log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr) + default: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, + plannedChange.Action, actualChange.Action, + ), + )) + } } - return nil, nil + errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After) + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced inconsistent final plan", + fmt.Sprintf( + "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err), + ), + )) + } + return nil, diags.Err() } -// EvalDiff is an EvalNode implementation that does a refresh for -// a resource. +// EvalDiff is an EvalNode implementation that detects changes for a given +// resource instance. type EvalDiff struct { - Name string - Info *InstanceInfo - Config **ResourceConfig - Provider *ResourceProvider - Diff **InstanceDiff - State **InstanceState - OutputDiff **InstanceDiff - OutputState **InstanceState - - // Resource is needed to fetch the ignore_changes list so we can - // filter user-requested ignored attributes from the diff. - Resource *config.Resource + Addr addrs.ResourceInstance + Config *configs.Resource + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + State **states.ResourceInstanceObject + PreviousDiff **plans.ResourceInstanceChange + + // CreateBeforeDestroy is set if either the resource's own config sets + // create_before_destroy explicitly or if dependencies have forced the + // resource to be handled as create_before_destroy in order to avoid + // a dependency cycle. + CreateBeforeDestroy bool + + OutputChange **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputState **states.ResourceInstanceObject + + Stub bool } // TODO: test @@ -88,106 +116,454 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { state := *n.State config := *n.Config provider := *n.Provider + providerSchema := *n.ProviderSchema + + if providerSchema == nil { + return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr) + } + if n.ProviderAddr.ProviderConfig.Type == "" { + panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path()))) + } + + var diags tfdiags.Diagnostics + + // Evaluate the configuration + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } + keyData := EvalDataForInstanceKey(n.Addr.Key) + configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + absAddr := n.Addr.Absolute(ctx.Path()) + var priorVal cty.Value + var priorValTainted cty.Value + var priorPrivate []byte + if state != nil { + if state.Status != states.ObjectTainted { + priorVal = state.Value + priorPrivate = state.Private + } else { + // If the prior state is tainted then we'll proceed below like + // we're creating an entirely new object, but then turn it into + // a synthetic "Replace" change at the end, creating the same + // result as if the provider had marked at least one argument + // change as "requires replacement". + priorValTainted = state.Value + priorVal = cty.NullVal(schema.ImpliedType()) + } + } else { + priorVal = cty.NullVal(schema.ImpliedType()) + } + + proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal) // Call pre-diff hook - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, state) - }) - if err != nil { - return nil, err + if !n.Stub { + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } } - // The state for the diff must never be nil - diffState := state - if diffState == nil { - diffState = new(InstanceState) + log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path())) + // Allow the provider to validate the final set of values. + // The config was statically validated early on, but there may have been + // unknown values which the provider could not validate at the time. + validateResp := provider.ValidateResourceTypeConfig( + providers.ValidateResourceTypeConfigRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }, + ) + if validateResp.Diagnostics.HasErrors() { + return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err() + } + + // The provider gets an opportunity to customize the proposed new value, + // which in turn produces the _planned_ new value. + resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + PriorState: priorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: priorPrivate, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + + plannedNewVal := resp.PlannedState + plannedPrivate := resp.PlannedPrivate + + if plannedNewVal == cty.NilVal { + // Should never happen. Since real-world providers return via RPC a nil + // is always a bug in the client-side stub. This is more likely caused + // by an incompletely-configured mock provider in tests, though. + panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String())) + } + + // We allow the planned new value to disagree with configuration _values_ + // here, since that allows the provider to do special logic like a + // DiffSuppressFunc, but we still require that the provider produces + // a value whose type conforms to the schema. + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 { + if resp.LegacyTypeSystem { + // The shimming of the old type system in the legacy SDK is not precise + // enough to pass this consistency check, so we'll give it a pass here, + // but we will generate a warning about it so that we are more likely + // to notice in the logs if an inconsistency beyond the type system + // leads to a downstream provider failure. + var buf strings.Builder + fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr) + for _, err := range errs { + fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err)) + } + log.Print(buf.String()) + } else { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + return nil, diags.Err() + } } - diffState.init() - // Diff! - diff, err := provider.Diff(n.Info, diffState, config) - if err != nil { - return nil, err - } - if diff == nil { - diff = new(InstanceDiff) + { + var moreDiags tfdiags.Diagnostics + plannedNewVal, moreDiags = n.processIgnoreChanges(priorVal, plannedNewVal) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags.Err() + } } - // Set DestroyDeposed if we have deposed instances - _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) { - if len(rs.Deposed) > 0 { - diff.DestroyDeposed = true - } + // The provider produces a list of paths to attributes whose changes mean + // that we must replace rather than update an existing remote object. + // However, we only need to do that if the identified attributes _have_ + // actually changed -- particularly after we may have undone some of the + // changes in processIgnoreChanges -- so now we'll filter that list to + // include only where changes are detected. + reqRep := cty.NewPathSet() + if len(resp.RequiresReplace) > 0 { + for _, path := range resp.RequiresReplace { + if priorVal.IsNull() { + // If prior is null then we don't expect any RequiresReplace at all, + // because this is a Create action. + continue + } - return nil, nil - }) - if err != nil { - return nil, err - } + priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil) + plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil) + if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() { + // This means the path was invalid in both the prior and new + // values, which is an error with the provider itself. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, path, + ), + )) + continue + } - // Preserve the DestroyTainted flag - if n.Diff != nil { - diff.SetTainted((*n.Diff).GetDestroyTainted()) - } + // Make sure we have valid Values for both values. + // Note: if the opposing value was of the type + // cty.DynamicPseudoType, the type assigned here may not exactly + // match the schema. This is fine here, since we're only going to + // check for equality, but if the NullVal is to be used, we need to + // check the schema for th true type. + switch { + case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal: + // this should never happen without ApplyPath errors above + panic("requires replace path returned 2 nil values") + case priorChangedVal == cty.NilVal: + priorChangedVal = cty.NullVal(plannedChangedVal.Type()) + case plannedChangedVal == cty.NilVal: + plannedChangedVal = cty.NullVal(priorChangedVal.Type()) + } - // Require a destroy if there is an ID and it requires new. - if diff.RequiresNew() && state != nil && state.ID != "" { - diff.SetDestroy(true) + eqV := plannedChangedVal.Equals(priorChangedVal) + if !eqV.IsKnown() || eqV.False() { + reqRep.Add(path) + } + } + if diags.HasErrors() { + return nil, diags.Err() + } } - // If we're creating a new resource, compute its ID - if diff.RequiresNew() || state == nil || state.ID == "" { - var oldID string - if state != nil { - oldID = state.Attributes["id"] + eqV := plannedNewVal.Equals(priorVal) + eq := eqV.IsKnown() && eqV.True() + + var action plans.Action + switch { + case priorVal.IsNull(): + action = plans.Create + case eq: + action = plans.NoOp + case !reqRep.Empty(): + // If there are any "requires replace" paths left _after our filtering + // above_ then this is a replace action. + if n.CreateBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate } - - // Add diff to compute new ID - diff.init() - diff.SetAttribute("id", &ResourceAttrDiff{ - Old: oldID, - NewComputed: true, - RequiresNew: true, - Type: DiffAttrOutput, + default: + action = plans.Update + // "Delete" is never chosen here, because deletion plans are always + // created more directly elsewhere, such as in "orphan" handling. + } + + if action.IsReplace() { + // In this strange situation we want to produce a change object that + // shows our real prior object but has a _new_ object that is built + // from a null prior object, since we're going to delete the one + // that has all the computed values on it. + // + // Therefore we'll ask the provider to plan again here, giving it + // a null object for the prior, and then we'll meld that with the + // _actual_ prior state to produce a correctly-shaped replace change. + // The resulting change should show any computed attributes changing + // from known prior values to unknown values, unless the provider is + // able to predict new values for any of these computed attributes. + nullPriorVal := cty.NullVal(schema.ImpliedType()) + + // create a new proposed value from the null state and the config + proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal) + + resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + PriorState: nullPriorVal, + ProposedNewState: proposedNewVal, + PriorPrivate: plannedPrivate, }) + // We need to tread carefully here, since if there are any warnings + // in here they probably also came out of our previous call to + // PlanResourceChange above, and so we don't want to repeat them. + // Consequently, we break from the usual pattern here and only + // append these new diagnostics if there's at least one error inside. + if resp.Diagnostics.HasErrors() { + diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config)) + return nil, diags.Err() + } + plannedNewVal = resp.PlannedState + plannedPrivate = resp.PlannedPrivate + for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid plan", + fmt.Sprintf( + "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } } - // filter out ignored resources - if err := n.processIgnoreChanges(diff); err != nil { - return nil, err + // If our prior value was tainted then we actually want this to appear + // as a replace change, even though so far we've been treating it as a + // create. + if action == plans.Create && priorValTainted != cty.NilVal { + if n.CreateBeforeDestroy { + action = plans.CreateThenDelete + } else { + action = plans.DeleteThenCreate + } + priorVal = priorValTainted + } + + // As a special case, if we have a previous diff (presumably from the plan + // phases, whereas we're now in the apply phase) and it was for a replace, + // we've already deleted the original object from state by the time we + // get here and so we would've ended up with a _create_ action this time, + // which we now need to paper over to get a result consistent with what + // we originally intended. + if n.PreviousDiff != nil { + prevChange := *n.PreviousDiff + if prevChange.Action.IsReplace() && action == plans.Create { + log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action) + action = prevChange.Action + priorVal = prevChange.Before + } } // Call post-refresh hook - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) - }) - if err != nil { - return nil, err + if !n.Stub { + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal) + }) + if err != nil { + return nil, err + } } - // Update our output - *n.OutputDiff = diff + // Update our output if we care + if n.OutputChange != nil { + *n.OutputChange = &plans.ResourceInstanceChange{ + Addr: absAddr, + Private: plannedPrivate, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: action, + Before: priorVal, + After: plannedNewVal, + }, + RequiredReplace: reqRep, + } + } + + if n.OutputValue != nil { + *n.OutputValue = configVal + } // Update the state if we care if n.OutputState != nil { - *n.OutputState = state - - // Merge our state so that the state is updated with our plan - if !diff.Empty() && n.OutputState != nil { - *n.OutputState = state.MergeDiff(diff) + *n.OutputState = &states.ResourceInstanceObject{ + // We use the special "planned" status here to note that this + // object's value is not yet complete. Objects with this status + // cannot be used during expression evaluation, so the caller + // must _also_ record the returned change in the active plan, + // which the expression evaluator will use in preference to this + // incomplete value recorded in the state. + Status: states.ObjectPlanned, + Value: plannedNewVal, + Private: plannedPrivate, } } return nil, nil } -func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { - if diff == nil || n.Resource == nil || n.Resource.Id() == "" { +func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) { + // ignore_changes only applies when an object already exists, since we + // can't ignore changes to a thing we've not created yet. + if prior.IsNull() { + return proposed, nil + } + + ignoreChanges := n.Config.Managed.IgnoreChanges + ignoreAll := n.Config.Managed.IgnoreAllChanges + + if len(ignoreChanges) == 0 && !ignoreAll { + return proposed, nil + } + if ignoreAll { + return prior, nil + } + if prior.IsNull() || proposed.IsNull() { + // Ignore changes doesn't apply when we're creating for the first time. + // Proposed should never be null here, but if it is then we'll just let it be. + return proposed, nil + } + + return processIgnoreChangesIndividual(prior, proposed, ignoreChanges) +} + +func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) { + // When we walk below we will be using cty.Path values for comparison, so + // we'll convert our traversals here so we can compare more easily. + ignoreChangesPath := make([]cty.Path, len(ignoreChanges)) + for i, traversal := range ignoreChanges { + path := make(cty.Path, len(traversal)) + for si, step := range traversal { + switch ts := step.(type) { + case hcl.TraverseRoot: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseAttr: + path[si] = cty.GetAttrStep{ + Name: ts.Name, + } + case hcl.TraverseIndex: + path[si] = cty.IndexStep{ + Key: ts.Key, + } + default: + panic(fmt.Sprintf("unsupported traversal step %#v", step)) + } + } + ignoreChangesPath[i] = path + } + + var diags tfdiags.Diagnostics + ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) { + // First we must see if this is a path that's being ignored at all. + // We're looking for an exact match here because this walk will visit + // leaf values first and then their containers, and we want to do + // the "ignore" transform once we reach the point indicated, throwing + // away any deeper values we already produced at that point. + var ignoreTraversal hcl.Traversal + for i, candidate := range ignoreChangesPath { + if path.Equals(candidate) { + ignoreTraversal = ignoreChanges[i] + } + } + if ignoreTraversal == nil { + return v, nil + } + + // If we're able to follow the same path through the prior value, + // we'll take the value there instead, effectively undoing the + // change that was planned. + priorV, diags := hcl.ApplyPath(prior, path, nil) + if diags.HasErrors() { + // We just ignore the errors and move on here, since we assume it's + // just because the prior value was a slightly-different shape. + // It could potentially also be that the traversal doesn't match + // the schema, but we should've caught that during the validate + // walk if so. + return v, nil + } + return priorV, nil + }) + return ret, diags +} + +func (n *EvalDiff) processIgnoreChangesOld(diff *InstanceDiff) error { + if diff == nil || n.Config == nil || n.Config.Managed == nil { return nil } - ignoreChanges := n.Resource.Lifecycle.IgnoreChanges + ignoreChanges := n.Config.Managed.IgnoreChanges + ignoreAll := n.Config.Managed.IgnoreAllChanges - if len(ignoreChanges) == 0 { + if len(ignoreChanges) == 0 && !ignoreAll { return nil } @@ -207,9 +583,14 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { // get the complete set of keys we want to ignore ignorableAttrKeys := make(map[string]bool) - for _, ignoredKey := range ignoreChanges { - for k := range attrs { - if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) { + for k := range attrs { + if ignoreAll { + ignorableAttrKeys[k] = true + continue + } + for _, ignoredTraversal := range ignoreChanges { + ignoredKey := legacyFlatmapKeyForTraversal(ignoredTraversal) + if k == ignoredKey || strings.HasPrefix(k, ignoredKey+".") { ignorableAttrKeys[k] = true } } @@ -243,11 +624,15 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { containers := groupContainers(diff) keep := map[string]bool{} for _, v := range containers { - if v.keepDiff() { + if v.keepDiff(ignorableAttrKeys) { // At least one key has changes, so list all the sibling keys - // to keep in the diff. + // to keep in the diff for k := range v { keep[k] = true + // this key may have been added by the user to ignore, but + // if it's a subkey in a container, we need to un-ignore it + // to keep the complete containter. + delete(ignorableAttrKeys, k) } } } @@ -268,21 +653,70 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { // If we didn't hit any of our early exit conditions, we can filter the diff. for k := range ignorableAttrKeys { - log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", - n.Resource.Id(), k) + log.Printf("[DEBUG] [EvalIgnoreChanges] %s: Ignoring diff attribute: %s", n.Addr.String(), k) diff.DelAttribute(k) } return nil } +// legacyFlagmapKeyForTraversal constructs a key string compatible with what +// the flatmap package would generate for an attribute addressable by the given +// traversal. +// +// This is used only to shim references to attributes within the diff and +// state structures, which have not (at the time of writing) yet been updated +// to use the newer HCL-based representations. +func legacyFlatmapKeyForTraversal(traversal hcl.Traversal) string { + var buf bytes.Buffer + first := true + for _, step := range traversal { + if !first { + buf.WriteByte('.') + } + switch ts := step.(type) { + case hcl.TraverseRoot: + buf.WriteString(ts.Name) + case hcl.TraverseAttr: + buf.WriteString(ts.Name) + case hcl.TraverseIndex: + val := ts.Key + switch val.Type() { + case cty.Number: + bf := val.AsBigFloat() + buf.WriteString(bf.String()) + case cty.String: + s := val.AsString() + buf.WriteString(s) + default: + // should never happen, since no other types appear in + // traversals in practice. + buf.WriteByte('?') + } + default: + // should never happen, since we've covered all of the types + // that show up in parsed traversals in practice. + buf.WriteByte('?') + } + first = false + } + return buf.String() +} + // a group of key-*ResourceAttrDiff pairs from the same flatmapped container type flatAttrDiff map[string]*ResourceAttrDiff -// we need to keep all keys if any of them have a diff -func (f flatAttrDiff) keepDiff() bool { - for _, v := range f { - if !v.Empty() && !v.NewComputed { +// we need to keep all keys if any of them have a diff that's not ignored +func (f flatAttrDiff) keepDiff(ignoreChanges map[string]bool) bool { + for k, v := range f { + ignore := false + for attr := range ignoreChanges { + if strings.HasPrefix(k, attr) { + ignore = true + } + } + + if !v.Empty() && !v.NewComputed && !ignore { return true } } @@ -319,159 +753,214 @@ func groupContainers(d *InstanceDiff) map[string]flatAttrDiff { // EvalDiffDestroy is an EvalNode implementation that returns a plain // destroy diff. type EvalDiffDestroy struct { - Info *InstanceInfo - State **InstanceState - Output **InstanceDiff + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + State **states.ResourceInstanceObject + ProviderAddr addrs.AbsProviderConfig + + Output **plans.ResourceInstanceChange + OutputState **states.ResourceInstanceObject } // TODO: test func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) state := *n.State - // If there is no state or we don't have an ID, we're already destroyed - if state == nil || state.ID == "" { + if n.ProviderAddr.ProviderConfig.Type == "" { + if n.DeposedKey == "" { + panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr)) + } else { + panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey)) + } + } + + // If there is no state or our attributes object is null then we're already + // destroyed. + if state == nil || state.Value.IsNull() { return nil, nil } // Call pre-diff hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, state) + return h.PreDiff( + absAddr, n.DeposedKey.Generation(), + state.Value, + cty.NullVal(cty.DynamicPseudoType), + ) }) if err != nil { return nil, err } - // The diff - diff := &InstanceDiff{Destroy: true} + // Change is always the same for a destroy. We don't need the provider's + // help for this one. + // TODO: Should we give the provider an opportunity to veto this? + change := &plans.ResourceInstanceChange{ + Addr: absAddr, + DeposedKey: n.DeposedKey, + Change: plans.Change{ + Action: plans.Delete, + Before: state.Value, + After: cty.NullVal(cty.DynamicPseudoType), + }, + Private: state.Private, + ProviderAddr: n.ProviderAddr, + } // Call post-diff hook err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) + return h.PostDiff( + absAddr, + n.DeposedKey.Generation(), + change.Action, + change.Before, + change.After, + ) }) if err != nil { return nil, err } // Update our output - *n.Output = diff + *n.Output = change - return nil, nil -} - -// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to -// the full diff. -type EvalDiffDestroyModule struct { - Path []string -} - -// TODO: test -func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() - - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() - - // Write the diff - modDiff := diff.ModuleByPath(n.Path) - if modDiff == nil { - modDiff = diff.AddModule(n.Path) + if n.OutputState != nil { + // Record our proposed new state, which is nil because we're destroying. + *n.OutputState = nil } - modDiff.Destroy = true return nil, nil } -// EvalFilterDiff is an EvalNode implementation that filters the diff -// according to some filter. -type EvalFilterDiff struct { - // Input and output - Diff **InstanceDiff - Output **InstanceDiff - - // Destroy, if true, will only include a destroy diff if it is set. - Destroy bool +// EvalReduceDiff is an EvalNode implementation that takes a planned resource +// instance change as might be produced by EvalDiff or EvalDiffDestroy and +// "simplifies" it to a single atomic action to be performed by a specific +// graph node. +// +// Callers must specify whether they are a destroy node or a regular apply +// node. If the result is NoOp then the given change requires no action for +// the specific graph node calling this and so evaluation of the that graph +// node should exit early and take no action. +// +// The object written to OutChange may either be identical to InChange or +// a new change object derived from InChange. Because of the former case, the +// caller must not mutate the object returned in OutChange. +type EvalReduceDiff struct { + Addr addrs.ResourceInstance + InChange **plans.ResourceInstanceChange + Destroy bool + OutChange **plans.ResourceInstanceChange } -func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) { - if *n.Diff == nil { - return nil, nil - } - - input := *n.Diff - result := new(InstanceDiff) - - if n.Destroy { - if input.GetDestroy() || input.RequiresNew() { - result.SetDestroy(true) +// TODO: test +func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) { + in := *n.InChange + out := in.Simplify(n.Destroy) + if n.OutChange != nil { + *n.OutChange = out + } + if out.Action != in.Action { + if n.Destroy { + log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action) + } else { + log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action) } } - - if n.Output != nil { - *n.Output = result - } - return nil, nil } -// EvalReadDiff is an EvalNode implementation that writes the diff to -// the full diff. +// EvalReadDiff is an EvalNode implementation that retrieves the planned +// change for a particular resource instance object. type EvalReadDiff struct { - Name string - Diff **InstanceDiff + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + ProviderSchema **ProviderSchema + Change **plans.ResourceInstanceChange } func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() + providerSchema := *n.ProviderSchema + changes := ctx.Changes() + addr := n.Addr.Absolute(ctx.Path()) - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } - // Write the diff - modDiff := diff.ModuleByPath(ctx.Path()) - if modDiff == nil { + gen := states.CurrentGen + if n.DeposedKey != states.NotDeposed { + gen = n.DeposedKey + } + csrc := changes.GetResourceInstanceChange(addr, gen) + if csrc == nil { + log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr) return nil, nil } - *n.Diff = modDiff.Resources[n.Name] + change, err := csrc.Decode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err) + } + if n.Change != nil { + *n.Change = change + } + + log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr) return nil, nil } -// EvalWriteDiff is an EvalNode implementation that writes the diff to -// the full diff. +// EvalWriteDiff is an EvalNode implementation that saves a planned change +// for an instance object into the set of global planned changes. type EvalWriteDiff struct { - Name string - Diff **InstanceDiff + Addr addrs.ResourceInstance + DeposedKey states.DeposedKey + ProviderSchema **ProviderSchema + Change **plans.ResourceInstanceChange } // TODO: test func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { - diff, lock := ctx.Diff() - - // The diff to write, if its empty it should write nil - var diffVal *InstanceDiff - if n.Diff != nil { - diffVal = *n.Diff + changes := ctx.Changes() + addr := n.Addr.Absolute(ctx.Path()) + if n.Change == nil || *n.Change == nil { + // Caller sets nil to indicate that we need to remove a change from + // the set of changes. + gen := states.CurrentGen + if n.DeposedKey != states.NotDeposed { + gen = n.DeposedKey + } + changes.RemoveResourceInstanceChange(addr, gen) + return nil, nil } - if diffVal.Empty() { - diffVal = nil + + providerSchema := *n.ProviderSchema + change := *n.Change + + if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey { + // Should never happen, and indicates a bug in the caller. + panic("inconsistent address and/or deposed key in EvalWriteDiff") } - // Acquire the lock so that we can do this safely concurrently - lock.Lock() - defer lock.Unlock() + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) + } - // Write the diff - modDiff := diff.ModuleByPath(ctx.Path()) - if modDiff == nil { - modDiff = diff.AddModule(ctx.Path()) + csrc, err := change.Encode(schema.ImpliedType()) + if err != nil { + return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err) } - if diffVal != nil { - modDiff.Resources[n.Name] = diffVal + + changes.AppendResourceInstanceChange(csrc) + if n.DeposedKey == states.NotDeposed { + log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr) } else { - delete(modDiff.Resources, n.Name) + log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey) } return nil, nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go index 62cc581f..a60f4a0a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go @@ -2,47 +2,63 @@ package terraform import ( "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // EvalImportState is an EvalNode implementation that performs an // ImportState operation on a provider. This will return the imported // states but won't modify any actual state. type EvalImportState struct { - Provider *ResourceProvider - Info *InstanceInfo - Id string - Output *[]*InstanceState + Addr addrs.ResourceInstance + Provider *providers.Interface + ID string + Output *[]providers.ImportedResource } // TODO: test func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) provider := *n.Provider + var diags tfdiags.Diagnostics { // Call pre-import hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreImportState(n.Info, n.Id) + return h.PreImportState(absAddr, n.ID) }) if err != nil { return nil, err } } - // Import! - state, err := provider.ImportState(n.Info, n.Id) - if err != nil { - return nil, fmt.Errorf( - "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err) + resp := provider.ImportResourceState(providers.ImportResourceStateRequest{ + TypeName: n.Addr.Resource.Type, + ID: n.ID, + }) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.Err() + } + + imported := resp.ImportedResources + + for _, obj := range imported { + log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName) } if n.Output != nil { - *n.Output = state + *n.Output = imported } { // Call post-import hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostImportState(n.Info, state) + return h.PostImportState(absAddr, imported) }) if err != nil { return nil, err @@ -55,22 +71,25 @@ func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { // EvalImportStateVerify verifies the state after ImportState and // after the refresh to make sure it is non-nil and valid. type EvalImportStateVerify struct { - Info *InstanceInfo - Id string - State **InstanceState + Addr addrs.ResourceInstance + State **states.ResourceInstanceObject } // TODO: test func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + state := *n.State - if state.Empty() { - return nil, fmt.Errorf( - "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+ - "exist. Please verify the ID is correct. You cannot import non-existent\n"+ - "resources using Terraform import.", - n.Info.HumanId(), - n.Id) + if state.Value.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Cannot import non-existent remote object", + fmt.Sprintf( + "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.", + n.Addr.String(), + ), + )) } - return nil, nil + return nil, diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go new file mode 100644 index 00000000..0c051f76 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go @@ -0,0 +1,61 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/addrs" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/zclconf/go-cty/cty" +) + +// EvalConfigBlock is an EvalNode implementation that takes a raw +// configuration block and evaluates any expressions within it. +// +// ExpandedConfig is populated with the result of expanding any "dynamic" +// blocks in the given body, which can be useful for extracting correct source +// location information for specific attributes in the result. +type EvalConfigBlock struct { + Config *hcl.Body + Schema *configschema.Block + SelfAddr addrs.Referenceable + Output *cty.Value + ExpandedConfig *hcl.Body + ContinueOnErr bool +} + +func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) { + val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey) + if diags.HasErrors() && n.ContinueOnErr { + log.Printf("[WARN] Block evaluation failed: %s", diags.Err()) + return nil, EvalEarlyExitError{} + } + + if n.Output != nil { + *n.Output = val + } + if n.ExpandedConfig != nil { + *n.ExpandedConfig = body + } + + return nil, diags.ErrWithWarnings() +} + +// EvalConfigExpr is an EvalNode implementation that takes a raw configuration +// expression and evaluates it. +type EvalConfigExpr struct { + Expr hcl.Expression + SelfAddr addrs.Referenceable + Output *cty.Value +} + +func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) { + val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr) + + if n.Output != nil { + *n.Output = val + } + + return nil, diags.ErrWithWarnings() +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go new file mode 100644 index 00000000..bad9ac5b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go @@ -0,0 +1,74 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" +) + +// EvalLocal is an EvalNode implementation that evaluates the +// expression for a local value and writes it into a transient part of +// the state. +type EvalLocal struct { + Addr addrs.LocalValue + Expr hcl.Expression +} + +func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + + // We ignore diags here because any problems we might find will be found + // again in EvaluateExpr below. + refs, _ := lang.ReferencesInExpr(n.Expr) + for _, ref := range refs { + if ref.Subject == n.Addr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referencing local value", + Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr), + Subject: ref.SourceRange.ToHCL().Ptr(), + Context: n.Expr.Range().Ptr(), + }) + } + } + if diags.HasErrors() { + return nil, diags.Err() + } + + val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) + diags = diags.Append(moreDiags) + if moreDiags.HasErrors() { + return nil, diags.Err() + } + + state := ctx.State() + if state == nil { + return nil, fmt.Errorf("cannot write local value to nil state") + } + + state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val) + + return nil, nil +} + +// EvalDeleteLocal is an EvalNode implementation that deletes a Local value +// from the state. Locals aren't persisted, but we don't need to evaluate them +// during destroy. +type EvalDeleteLocal struct { + Addr addrs.LocalValue +} + +func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { + state := ctx.State() + if state == nil { + return nil, nil + } + + state.RemoveLocalValue(n.Addr.Absolute(ctx.Path())) + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go index cf61781e..10573971 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go @@ -4,116 +4,132 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" ) // EvalDeleteOutput is an EvalNode implementation that deletes an output // from the state. type EvalDeleteOutput struct { - Name string + Addr addrs.OutputValue } // TODO: test func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() + state := ctx.State() if state == nil { return nil, nil } - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - delete(mod.Outputs, n.Name) - + state.RemoveOutputValue(n.Addr.Absolute(ctx.Path())) return nil, nil } // EvalWriteOutput is an EvalNode implementation that writes the output // for the given name to the current state. type EvalWriteOutput struct { - Name string + Addr addrs.OutputValue Sensitive bool - Value *config.RawConfig + Expr hcl.Expression + // ContinueOnErr allows interpolation to fail during Input + ContinueOnErr bool } // TODO: test func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { - cfg, err := ctx.Interpolate(n.Value, nil) - if err != nil { - // Log error but continue anyway - log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err) - } + addr := n.Addr.Absolute(ctx.Path()) - state, lock := ctx.State() + // This has to run before we have a state lock, since evaluation also + // reads the state + val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil) + // We'll handle errors below, after we have loaded the module. + + state := ctx.State() if state == nil { - return nil, fmt.Errorf("cannot write state to nil state") + return nil, nil } - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - mod = state.AddModule(ctx.Path()) + changes := ctx.Changes() // may be nil, if we're not working on a changeset + + // handling the interpolation error + if diags.HasErrors() { + if n.ContinueOnErr || flagWarnOutputErrors { + log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err()) + // if we're continuing, make sure the output is included, and + // marked as unknown. If the evaluator was able to find a type + // for the value in spite of the error then we'll use it. + n.setValue(addr, state, changes, cty.UnknownVal(val.Type())) + return nil, EvalEarlyExitError{} + } + return nil, diags.Err() } - // Get the value from the config - var valueRaw interface{} = config.UnknownVariableValue - if cfg != nil { - var ok bool - valueRaw, ok = cfg.Get("value") - if !ok { - valueRaw = "" - } - if cfg.IsComputed("value") { - valueRaw = config.UnknownVariableValue - } + n.setValue(addr, state, changes, val) + + return nil, nil +} + +func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) { + if val.IsKnown() && !val.IsNull() { + // The state itself doesn't represent unknown values, so we null them + // out here and then we'll save the real unknown value in the planned + // changeset below, if we have one on this graph walk. + log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr) + stateVal := cty.UnknownAsNull(val) + state.SetOutputValue(addr, stateVal, n.Sensitive) + } else { + log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr) + state.RemoveOutputValue(addr) } - switch valueTyped := valueRaw.(type) { - case string: - mod.Outputs[n.Name] = &OutputState{ - Type: "string", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case []interface{}: - mod.Outputs[n.Name] = &OutputState{ - Type: "list", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case map[string]interface{}: - mod.Outputs[n.Name] = &OutputState{ - Type: "map", - Sensitive: n.Sensitive, - Value: valueTyped, - } - case []map[string]interface{}: - // an HCL map is multi-valued, so if this was read out of a config the - // map may still be in a slice. - if len(valueTyped) == 1 { - mod.Outputs[n.Name] = &OutputState{ - Type: "map", + // If we also have an active changeset then we'll replicate the value in + // there. This is used in preference to the state where present, since it + // *is* able to represent unknowns, while the state cannot. + if changes != nil { + // For the moment we are not properly tracking changes to output + // values, and just marking them always as "Create" or "Destroy" + // actions. A future release will rework the output lifecycle so we + // can track their changes properly, in a similar way to how we work + // with resource instances. + + var change *plans.OutputChange + if !val.IsNull() { + change = &plans.OutputChange{ + Addr: addr, Sensitive: n.Sensitive, - Value: valueTyped[0], + Change: plans.Change{ + Action: plans.Create, + Before: cty.NullVal(cty.DynamicPseudoType), + After: val, + }, + } + } else { + change = &plans.OutputChange{ + Addr: addr, + Sensitive: n.Sensitive, + Change: plans.Change{ + // This is just a weird placeholder delete action since + // we don't have an actual prior value to indicate. + // FIXME: Generate real planned changes for output values + // that include the old values. + Action: plans.Delete, + Before: cty.NullVal(cty.DynamicPseudoType), + After: cty.NullVal(cty.DynamicPseudoType), + }, } - break } - return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map", - n.Name, valueTyped, len(valueTyped)) - default: - return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped) - } - return nil, nil + cs, err := change.Encode() + if err != nil { + // Should never happen, since we just constructed this right above + panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err)) + } + log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr) + changes.RemoveOutputChange(addr) // remove any existing planned change, if present + changes.AppendOutputChange(cs) // add the new planned change + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go index 61efcc23..7df6584a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go @@ -2,143 +2,145 @@ package terraform import ( "fmt" + "log" - "github.com/hashicorp/terraform/config" -) - -// EvalSetProviderConfig sets the parent configuration for a provider -// without configuring that provider, validating it, etc. -type EvalSetProviderConfig struct { - Provider string - Config **ResourceConfig -} - -func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) { - return nil, ctx.SetProviderConfig(n.Provider, *n.Config) -} + "github.com/hashicorp/hcl2/hcl" -// EvalBuildProviderConfig outputs a *ResourceConfig that is properly -// merged with parents and inputs on top of what is configured in the file. -type EvalBuildProviderConfig struct { - Provider string - Config **ResourceConfig - Output **ResourceConfig -} - -func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { - cfg := *n.Config - - // If we have a configuration set, then merge that in - if input := ctx.ProviderInput(n.Provider); input != nil { - rc, err := config.NewRawConfig(input) - if err != nil { - return nil, err - } + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" +) - merged := cfg.raw.Merge(rc) - cfg = NewResourceConfig(merged) +func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body { + var configBody hcl.Body + if config != nil { + configBody = config.Config } - // Get the parent configuration if there is one - if parent := ctx.ParentProviderConfig(n.Provider); parent != nil { - merged := cfg.raw.Merge(parent.raw) - cfg = NewResourceConfig(merged) + var inputBody hcl.Body + inputConfig := ctx.ProviderInput(addr) + if len(inputConfig) > 0 { + inputBody = configs.SynthBody("", inputConfig) } - *n.Output = cfg - return nil, nil + switch { + case configBody != nil && inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr) + // Note that the inputBody is the _base_ here, because configs.MergeBodies + // expects the base have all of the required fields, while these are + // forced to be optional for the override. The input process should + // guarantee that we have a value for each of the required arguments and + // that in practice the sets of attributes in each body will be + // disjoint. + return configs.MergeBodies(inputBody, configBody) + case configBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr) + return configBody + case inputBody != nil: + log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr) + return inputBody + default: + log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr) + return hcl.EmptyBody() + } } // EvalConfigProvider is an EvalNode implementation that configures // a provider that is already initialized and retrieved. type EvalConfigProvider struct { - Provider string - Config **ResourceConfig + Addr addrs.ProviderConfig + Provider *providers.Interface + Config *configs.Provider } func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { - return nil, ctx.ConfigureProvider(n.Provider, *n.Config) + if n.Provider == nil { + return nil, fmt.Errorf("EvalConfigProvider Provider is nil") + } + + var diags tfdiags.Diagnostics + provider := *n.Provider + config := n.Config + + configBody := buildProviderConfig(ctx, n.Addr, config) + + resp := provider.GetSchema() + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configSchema := resp.Provider.Block + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configDiags := ctx.ConfigureProvider(n.Addr, configVal) + configDiags = configDiags.InConfigBody(configBody) + + return nil, configDiags.ErrWithWarnings() } // EvalInitProvider is an EvalNode implementation that initializes a provider // and returns nothing. The provider can be retrieved again with the // EvalGetProvider node. type EvalInitProvider struct { - Name string + TypeName string + Addr addrs.ProviderConfig } func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { - return ctx.InitProvider(n.Name) + return ctx.InitProvider(n.TypeName, n.Addr) } // EvalCloseProvider is an EvalNode implementation that closes provider // connections that aren't needed anymore. type EvalCloseProvider struct { - Name string + Addr addrs.ProviderConfig } func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { - ctx.CloseProvider(n.Name) + ctx.CloseProvider(n.Addr) return nil, nil } // EvalGetProvider is an EvalNode implementation that retrieves an already // initialized provider instance for the given name. +// +// Unlike most eval nodes, this takes an _absolute_ provider configuration, +// because providers can be passed into and inherited between modules. +// Resource nodes must therefore know the absolute path of the provider they +// will use, which is usually accomplished by implementing +// interface GraphNodeProviderConsumer. type EvalGetProvider struct { - Name string - Output *ResourceProvider + Addr addrs.AbsProviderConfig + Output *providers.Interface + + // If non-nil, Schema will be updated after eval to refer to the + // schema of the provider. + Schema **ProviderSchema } func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { - result := ctx.Provider(n.Name) + if n.Addr.ProviderConfig.Type == "" { + // Should never happen + panic("EvalGetProvider used with uninitialized provider configuration address") + } + + result := ctx.Provider(n.Addr) if result == nil { - return nil, fmt.Errorf("provider %s not initialized", n.Name) + return nil, fmt.Errorf("provider %s not initialized", n.Addr) } if n.Output != nil { *n.Output = result } - return nil, nil -} - -// EvalInputProvider is an EvalNode implementation that asks for input -// for the given provider configurations. -type EvalInputProvider struct { - Name string - Provider *ResourceProvider - Config **ResourceConfig -} - -func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) { - // If we already configured this provider, then don't do this again - if v := ctx.ProviderInput(n.Name); v != nil { - return nil, nil - } - - rc := *n.Config - - // Wrap the input into a namespace - input := &PrefixUIInput{ - IdPrefix: fmt.Sprintf("provider.%s", n.Name), - QueryPrefix: fmt.Sprintf("provider.%s.", n.Name), - UIInput: ctx.Input(), - } - - // Go through each provider and capture the input necessary - // to satisfy it. - config, err := (*n.Provider).Input(input, rc) - if err != nil { - return nil, fmt.Errorf( - "Error configuring %s: %s", n.Name, err) - } - - // Set the input that we received so that child modules don't attempt - // to ask for input again. - if config != nil && len(config.Config) > 0 { - ctx.SetProviderInput(n.Name, config.Config) - } else { - ctx.SetProviderInput(n.Name, map[string]interface{}{}) + if n.Schema != nil { + *n.Schema = ctx.ProviderSchema(n.Addr) } return nil, nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go index 89579c05..bc6b5cc7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go @@ -2,6 +2,9 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" ) // EvalInitProvisioner is an EvalNode implementation that initializes a provisioner @@ -30,7 +33,8 @@ func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) { // initialized provisioner instance for the given name. type EvalGetProvisioner struct { Name string - Output *ResourceProvisioner + Output *provisioners.Interface + Schema **configschema.Block } func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { @@ -43,5 +47,9 @@ func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { *n.Output = result } + if n.Schema != nil { + *n.Schema = ctx.ProvisionerSchema(n.Name) + } + return result, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go index fb85a284..728719a7 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go @@ -2,105 +2,331 @@ package terraform import ( "fmt" + "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/plans/objchange" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) -// EvalReadDataDiff is an EvalNode implementation that executes a data -// resource's ReadDataDiff method to discover what attributes it exports. -type EvalReadDataDiff struct { - Provider *ResourceProvider - Output **InstanceDiff - OutputState **InstanceState - Config **ResourceConfig - Info *InstanceInfo - - // Set Previous when re-evaluating diff during apply, to ensure that - // the "Destroy" flag is preserved. - Previous **InstanceDiff +// EvalReadData is an EvalNode implementation that deals with the main part +// of the data resource lifecycle: either actually reading from the data source +// or generating a plan to do so. +type EvalReadData struct { + Addr addrs.ResourceInstance + Config *configs.Resource + Dependencies []addrs.Referenceable + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + + // Planned is set when dealing with data resources that were deferred to + // the apply walk, to let us see what was planned. If this is set, the + // evaluation of the config is required to produce a wholly-known + // configuration which is consistent with the partial object included + // in this planned change. + Planned **plans.ResourceInstanceChange + + // ForcePlanRead, if true, overrides the usual behavior of immediately + // reading from the data source where possible, instead forcing us to + // _always_ generate a plan. This is used during the plan walk, since we + // mustn't actually apply anything there. (The resulting state doesn't + // get persisted) + ForcePlanRead bool + + // The result from this EvalNode has a few different possibilities + // depending on the input: + // - If Planned is nil then we assume we're aiming to _produce_ the plan, + // and so the following two outcomes are possible: + // - OutputChange.Action is plans.NoOp and OutputState is the complete + // result of reading from the data source. This is the easy path. + // - OutputChange.Action is plans.Read and OutputState is a planned + // object placeholder (states.ObjectPlanned). In this case, the + // returned change must be recorded in the overral changeset and + // eventually passed to another instance of this struct during the + // apply walk. + // - If Planned is non-nil then we assume we're aiming to complete a + // planned read from an earlier plan walk. In this case the only possible + // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp + // change and put the complete resulting state in OutputState, ready to + // be saved in the overall state and used for expression evaluation. + OutputChange **plans.ResourceInstanceChange + OutputValue *cty.Value + OutputConfigValue *cty.Value + OutputState **states.ResourceInstanceObject } -func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { - // TODO: test +func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadData: working on %s", absAddr) - err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreDiff(n.Info, nil) - }) - if err != nil { - return nil, err + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema not available for %s", n.Addr) + } + + var diags tfdiags.Diagnostics + var change *plans.ResourceInstanceChange + var configVal cty.Value + + // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and + // EvalReadDataApply did, but it seems like we should handle that via a + // separate mechanism since it boils down to just deleting the object from + // the state... and we do that on every plan anyway, forcing the data + // resource to re-read. + + config := *n.Config + provider := *n.Provider + providerSchema := *n.ProviderSchema + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type) + } + + // We'll always start by evaluating the configuration. What we do after + // that will depend on the evaluation result along with what other inputs + // we were given. + objTy := schema.ImpliedType() + priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time + + keyData := EvalDataForInstanceKey(n.Addr.Key) + + var configDiags tfdiags.Diagnostics + configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() } - var diff *InstanceDiff + proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal) - if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() { - // If we're re-diffing for a diff that was already planning to - // destroy, then we'll just continue with that plan. - diff = &InstanceDiff{Destroy: true} - } else { - provider := *n.Provider - config := *n.Config + // If our configuration contains any unknown values then we must defer the + // read to the apply phase by producing a "Read" change for this resource, + // and a placeholder value for it in the state. + if n.ForcePlanRead || !configVal.IsWhollyKnown() { + // If the configuration is still unknown when we're applying a planned + // change then that indicates a bug in Terraform, since we should have + // everything resolved by now. + if n.Planned != nil && *n.Planned != nil { + return nil, fmt.Errorf( + "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)", + absAddr, + ) + } + if n.ForcePlanRead { + log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr) + } else { + log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr) + } - var err error - diff, err = provider.ReadDataDiff(n.Info, config) + err := ctx.Hook(func(h Hook) (HookAction, error) { + return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal) + }) if err != nil { return nil, err } - if diff == nil { - diff = new(InstanceDiff) + + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.Read, + Before: priorVal, + After: proposedNewVal, + }, } - // if id isn't explicitly set then it's always computed, because we're - // always "creating a new resource". - diff.init() - if _, ok := diff.Attributes["id"]; !ok { - diff.SetAttribute("id", &ResourceAttrDiff{ - Old: "", - NewComputed: true, - RequiresNew: true, - Type: DiffAttrOutput, - }) + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal) + }) + if err != nil { + return nil, err + } + + if n.OutputChange != nil { + *n.OutputChange = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal } + if n.OutputState != nil { + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectPlanned, // because the partial value in the plan must be used for now + Dependencies: n.Dependencies, + } + *n.OutputState = state + } + + return nil, diags.ErrWithWarnings() } - err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostDiff(n.Info, diff) + if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read { + // If any other action gets in here then that's always a bug; this + // EvalNode only deals with reading. + return nil, fmt.Errorf( + "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)", + (*n.Planned).Action, absAddr, + ) + } + + log.Printf("[TRACE] Re-validating config for %s", absAddr) + validateResp := provider.ValidateDataSourceConfig( + providers.ValidateDataSourceConfigRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }, + ) + if validateResp.Diagnostics.HasErrors() { + return nil, validateResp.Diagnostics.InConfigBody(n.Config.Config).Err() + } + + // If we get down here then our configuration is complete and we're read + // to actually call the provider to read the data. + log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr) + + err := ctx.Hook(func(h Hook) (HookAction, error) { + // We don't have a state yet, so we'll just give the hook an + // empty one to work with. + return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) }) if err != nil { return nil, err } - *n.Output = diff + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.Resource.Type, + Config: configVal, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + newVal := resp.State + if newVal == cty.NilVal { + // This can happen with incompletely-configured mocks. We'll allow it + // and treat it as an alias for a properly-typed null value. + newVal = cty.NullVal(schema.ImpliedType()) + } + + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + if newVal.IsNull() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced null object", + fmt.Sprintf( + "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, + ), + )) + } + if !newVal.IsWhollyKnown() { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, + ), + )) + + // We'll still save the object, but we need to eliminate any unknown + // values first because we can't serialize them in the state file. + // Note that this may cause set elements to be coalesced if they + // differed only by having unknown values, but we don't worry about + // that here because we're saving the value only for inspection + // purposes; the error we added above will halt the graph walk. + newVal = cty.UnknownAsNull(newVal) + } + // Since we've completed the read, we actually have no change to make, but + // we'll produce a NoOp one anyway to preserve the usual flow of the + // plan phase and allow it to produce a complete plan. + change = &plans.ResourceInstanceChange{ + Addr: absAddr, + ProviderAddr: n.ProviderAddr, + Change: plans.Change{ + Action: plans.NoOp, + Before: newVal, + After: newVal, + }, + } + state := &states.ResourceInstanceObject{ + Value: change.After, + Status: states.ObjectReady, // because we completed the read from the provider + Dependencies: n.Dependencies, + } + + err = ctx.Hook(func(h Hook) (HookAction, error) { + return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) + }) + if err != nil { + return nil, err + } + + if n.OutputChange != nil { + *n.OutputChange = change + } + if n.OutputValue != nil { + *n.OutputValue = change.After + } + if n.OutputConfigValue != nil { + *n.OutputConfigValue = configVal + } if n.OutputState != nil { - state := &InstanceState{} *n.OutputState = state - - // Apply the diff to the returned state, so the state includes - // any attribute values that are not computed. - if !diff.Empty() && n.OutputState != nil { - *n.OutputState = state.MergeDiff(diff) - } } - return nil, nil + return nil, diags.ErrWithWarnings() } // EvalReadDataApply is an EvalNode implementation that executes a data // resource's ReadDataApply method to read data from the data source. type EvalReadDataApply struct { - Provider *ResourceProvider - Output **InstanceState - Diff **InstanceDiff - Info *InstanceInfo + Addr addrs.ResourceInstance + Provider *providers.Interface + ProviderAddr addrs.AbsProviderConfig + ProviderSchema **ProviderSchema + Output **states.ResourceInstanceObject + Config *configs.Resource + Change **plans.ResourceInstanceChange + StateReferences []addrs.Referenceable } func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { - // TODO: test provider := *n.Provider - diff := *n.Diff + change := *n.Change + providerSchema := *n.ProviderSchema + absAddr := n.Addr.Absolute(ctx.Path()) + + var diags tfdiags.Diagnostics // If the diff is for *destroying* this resource then we'll // just drop its state and move on, since data resources don't // support an actual "destroy" action. - if diff != nil && diff.GetDestroy() { + if change != nil && change.Action == plans.Delete { if n.Output != nil { *n.Output = nil } @@ -113,27 +339,56 @@ func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { err := ctx.Hook(func(h Hook) (HookAction, error) { // We don't have a state yet, so we'll just give the hook an // empty one to work with. - return h.PreRefresh(n.Info, &InstanceState{}) + return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType)) }) if err != nil { return nil, err } - state, err := provider.ReadDataApply(n.Info, diff) - if err != nil { - return nil, fmt.Errorf("%s: %s", n.Info.Id, err) + resp := provider.ReadDataSource(providers.ReadDataSourceRequest{ + TypeName: n.Addr.Resource.Type, + Config: change.After, + }) + diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config)) + if diags.HasErrors() { + return nil, diags.Err() + } + + schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type) + } + + newVal := resp.State + for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() } err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(n.Info, state) + return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal) }) if err != nil { return nil, err } if n.Output != nil { - *n.Output = state + *n.Output = &states.ResourceInstanceObject{ + Value: newVal, + Status: states.ObjectReady, + Dependencies: n.StateReferences, + } } - return nil, nil + return nil, diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go index fa2b8126..4dfb5b4e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go @@ -3,53 +3,104 @@ package terraform import ( "fmt" "log" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // EvalRefresh is an EvalNode implementation that does a refresh for // a resource. type EvalRefresh struct { - Provider *ResourceProvider - State **InstanceState - Info *InstanceInfo - Output **InstanceState + Addr addrs.ResourceInstance + ProviderAddr addrs.AbsProviderConfig + Provider *providers.Interface + ProviderSchema **ProviderSchema + State **states.ResourceInstanceObject + Output **states.ResourceInstanceObject } // TODO: test func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { - provider := *n.Provider state := *n.State + absAddr := n.Addr.Absolute(ctx.Path()) + + var diags tfdiags.Diagnostics // If we have no state, we don't do any refreshing if state == nil { - log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id) - return nil, nil + log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path())) + return nil, diags.ErrWithWarnings() + } + + schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Should be caught during validation, so we don't bother with a pretty error here + return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type) } // Call pre-refresh hook err := ctx.Hook(func(h Hook) (HookAction, error) { - return h.PreRefresh(n.Info, state) + return h.PreRefresh(absAddr, states.CurrentGen, state.Value) }) if err != nil { - return nil, err + return nil, diags.ErrWithWarnings() } // Refresh! - state, err = provider.Refresh(n.Info, state) - if err != nil { - return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error()) + priorVal := state.Value + req := providers.ReadResourceRequest{ + TypeName: n.Addr.Resource.Type, + PriorState: priorVal, + Private: state.Private, } + provider := *n.Provider + resp := provider.ReadResource(req) + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.Err() + } + + if resp.NewState == cty.NilVal { + // This ought not to happen in real cases since it's not possible to + // send NilVal over the plugin RPC channel, but it can come up in + // tests due to sloppy mocking. + panic("new state is cty.NilVal") + } + + for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider produced invalid object", + fmt.Sprintf( + "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.", + n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err), + ), + )) + } + if diags.HasErrors() { + return nil, diags.Err() + } + + newState := state.DeepCopy() + newState.Value = resp.NewState + newState.Private = resp.Private + // Call post-refresh hook err = ctx.Hook(func(h Hook) (HookAction, error) { - return h.PostRefresh(n.Info, state) + return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value) }) if err != nil { return nil, err } if n.Output != nil { - *n.Output = state + *n.Output = newState } - return nil, nil + return nil, diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go index 82d81782..3485e4f1 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go @@ -1,22 +1,37 @@ package terraform +import ( + "github.com/hashicorp/terraform/tfdiags" +) + // EvalSequence is an EvalNode that evaluates in sequence. type EvalSequence struct { Nodes []EvalNode } func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + for _, n := range n.Nodes { if n == nil { continue } if _, err := EvalRaw(n, ctx); err != nil { - return nil, err + if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit { + // In this path we abort early, losing any non-error + // diagnostics we saw earlier. + return nil, err + } + diags = diags.Append(err) + if diags.HasErrors() { + // Halt if we get some errors, but warnings are okay. + break + } } } - return nil, nil + return nil, diags.ErrWithWarnings() } // EvalNodeFilterable impl. diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go index 126a0e63..d506ce3f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go @@ -1,90 +1,150 @@ package terraform -import "fmt" +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) // EvalReadState is an EvalNode implementation that reads the -// primary InstanceState for a specific resource out of the state. +// current object for a specific instance in the state. type EvalReadState struct { - Name string - Output **InstanceState + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // ProviderSchema is the schema for the provider given in Provider. + ProviderSchema **ProviderSchema + + // Provider is the provider that will subsequently perform actions on + // the the state object. This is used to perform any schema upgrades + // that might be required to prepare the stored data for use. + Provider *providers.Interface + + // Output will be written with a pointer to the retrieved object. + Output **states.ResourceInstanceObject } func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { - return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { - return rs.Primary, nil - }) + if n.Provider == nil || *n.Provider == nil { + panic("EvalReadState used with no Provider object") + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + panic("EvalReadState used with no ProviderSchema object") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr) + + src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr) + return nil, nil + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) + } + var diags tfdiags.Diagnostics + src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) + if err != nil { + return nil, err + } + + if n.Output != nil { + *n.Output = obj + } + return obj, nil } // EvalReadStateDeposed is an EvalNode implementation that reads the // deposed InstanceState for a specific resource out of the state type EvalReadStateDeposed struct { - Name string - Output **InstanceState - // Index indicates which instance in the Deposed list to target, or -1 for - // the last item. - Index int + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // Key identifies which deposed object we will read. + Key states.DeposedKey + + // ProviderSchema is the schema for the provider given in Provider. + ProviderSchema **ProviderSchema + + // Provider is the provider that will subsequently perform actions on + // the the state object. This is used to perform any schema upgrades + // that might be required to prepare the stored data for use. + Provider *providers.Interface + + // Output will be written with a pointer to the retrieved object. + Output **states.ResourceInstanceObject } func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { - // Get the index. If it is negative, then we get the last one - idx := n.Index - if idx < 0 { - idx = len(rs.Deposed) - 1 - } - if idx >= 0 && idx < len(rs.Deposed) { - return rs.Deposed[idx], nil - } else { - return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs) - } - }) -} + if n.Provider == nil || *n.Provider == nil { + panic("EvalReadStateDeposed used with no Provider object") + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + panic("EvalReadStateDeposed used with no ProviderSchema object") + } -// Does the bulk of the work for the various flavors of ReadState eval nodes. -// Each node just provides a reader function to get from the ResourceState to the -// InstanceState, and this takes care of all the plumbing. -func readInstanceFromState( - ctx EvalContext, - resourceName string, - output **InstanceState, - readerFn func(*ResourceState) (*InstanceState, error), -) (*InstanceState, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil + key := n.Key + if key == states.NotDeposed { + return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported") } + absAddr := n.Addr.Absolute(ctx.Path()) + log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key) - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[resourceName] - if rs == nil { + src := ctx.State().ResourceInstanceObject(absAddr, key) + if src == nil { + // Presumably we only have deposed objects, then. + log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key) return nil, nil } - // Use the delegate function to get the instance state from the resource state - is, err := readerFn(rs) + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // Shouldn't happen since we should've failed long ago if no schema is present + return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr) + } + var diags tfdiags.Diagnostics + src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion) + if diags.HasErrors() { + // Note that we don't have any channel to return warnings here. We'll + // accept that for now since warnings during a schema upgrade would + // be pretty weird anyway, since this operation is supposed to seem + // invisible to the user. + return nil, diags.Err() + } + + obj, err := src.Decode(schema.ImpliedType()) if err != nil { return nil, err } - - // Write the result to the output pointer - if output != nil { - *output = is + if n.Output != nil { + *n.Output = obj } - - return is, nil + return obj, nil } -// EvalRequireState is an EvalNode implementation that early exits -// if the state doesn't have an ID. +// EvalRequireState is an EvalNode implementation that exits early if the given +// object is null. type EvalRequireState struct { - State **InstanceState + State **states.ResourceInstanceObject } func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { @@ -93,7 +153,7 @@ func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { } state := *n.State - if state == nil || state.ID == "" { + if state == nil || state.Value.IsNull() { return nil, EvalEarlyExitError{} } @@ -105,12 +165,14 @@ func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { type EvalUpdateStateHook struct{} func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a full lock. Even calling something like WriteState can modify - // (prune) the state, so we need the full lock. - lock.Lock() - defer lock.Unlock() + // In principle we could grab the lock here just long enough to take a + // deep copy and then pass that to our hooks below, but we'll instead + // hold the hook for the duration to avoid the potential confusing + // situation of us racing to call PostStateUpdate concurrently with + // different state snapshots. + stateSync := ctx.State() + state := stateSync.Lock().DeepCopy() + defer stateSync.Unlock() // Call the hook err := ctx.Hook(func(h Hook) (HookAction, error) { @@ -123,202 +185,285 @@ func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } -// EvalWriteState is an EvalNode implementation that writes the -// primary InstanceState for a specific resource into the state. +// EvalWriteState is an EvalNode implementation that saves the given object +// as the current object for the selected resource instance. type EvalWriteState struct { - Name string - ResourceType string - Provider string - Dependencies []string - State **InstanceState + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance + + // State is the object state to save. + State **states.ResourceInstanceObject + + // ProviderSchema is the schema for the provider given in ProviderAddr. + ProviderSchema **ProviderSchema + + // ProviderAddr is the address of the provider configuration that + // produced the given object. + ProviderAddr addrs.AbsProviderConfig } func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { - return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, - func(rs *ResourceState) error { - rs.Primary = *n.State - return nil - }, - ) + if n.State == nil { + // Note that a pointer _to_ nil is valid here, indicating the total + // absense of an object as we'd see during destroy. + panic("EvalWriteState used with no ResourceInstanceObject") + } + + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + if n.ProviderAddr.ProviderConfig.Type == "" { + return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr) + } + + obj := *n.State + if obj == nil || obj.Value.IsNull() { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr) + log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr) + return nil, nil + } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + // Should never happen, unless our state object is nil + panic("EvalWriteState used with pointer to nil ProviderSchema object") + } + + if obj != nil { + log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr) + } else { + log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr) + } + + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + + state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr) + return nil, nil } // EvalWriteStateDeposed is an EvalNode implementation that writes // an InstanceState out to the Deposed list of a resource in the state. type EvalWriteStateDeposed struct { - Name string - ResourceType string - Provider string - Dependencies []string - State **InstanceState - // Index indicates which instance in the Deposed list to target, or -1 to append. - Index int -} + // Addr is the address of the instance to read state for. + Addr addrs.ResourceInstance -func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { - return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, - func(rs *ResourceState) error { - if n.Index == -1 { - rs.Deposed = append(rs.Deposed, *n.State) - } else { - rs.Deposed[n.Index] = *n.State - } - return nil - }, - ) -} + // Key indicates which deposed object to write to. + Key states.DeposedKey -// Pulls together the common tasks of the EvalWriteState nodes. All the args -// are passed directly down from the EvalNode along with a `writer` function -// which is yielded the *ResourceState and is responsible for writing an -// InstanceState to the proper field in the ResourceState. -func writeInstanceToState( - ctx EvalContext, - resourceName string, - resourceType string, - provider string, - dependencies []string, - writerFn func(*ResourceState) error, -) (*InstanceState, error) { - state, lock := ctx.State() - if state == nil { - return nil, fmt.Errorf("cannot write state to nil state") - } - - // Get a write lock so we can access this instance - lock.Lock() - defer lock.Unlock() - - // Look for the module state. If we don't have one, create it. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - mod = state.AddModule(ctx.Path()) - } - - // Look for the resource state. - rs := mod.Resources[resourceName] - if rs == nil { - rs = &ResourceState{} - rs.init() - mod.Resources[resourceName] = rs - } - rs.Type = resourceType - rs.Dependencies = dependencies - rs.Provider = provider - - if err := writerFn(rs); err != nil { - return nil, err - } + // State is the object state to save. + State **states.ResourceInstanceObject - return nil, nil -} + // ProviderSchema is the schema for the provider given in ProviderAddr. + ProviderSchema **ProviderSchema -// EvalClearPrimaryState is an EvalNode implementation that clears the primary -// instance from a resource state. -type EvalClearPrimaryState struct { - Name string + // ProviderAddr is the address of the provider configuration that + // produced the given object. + ProviderAddr addrs.AbsProviderConfig } -func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() +func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { + if n.State == nil { + // Note that a pointer _to_ nil is valid here, indicating the total + // absense of an object as we'd see during destroy. + panic("EvalWriteStateDeposed used with no ResourceInstanceObject") + } - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() + absAddr := n.Addr.Absolute(ctx.Path()) + key := n.Key + state := ctx.State() - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil + if key == states.NotDeposed { + // should never happen + return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr) } - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { + obj := *n.State + if obj == nil { + // No need to encode anything: we'll just write it directly. + state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr) + log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key) return nil, nil } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + // Should never happen, unless our state object is nil + panic("EvalWriteStateDeposed used with no ProviderSchema object") + } - // Clear primary from the resource state - rs.Primary = nil + schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource()) + if schema == nil { + // It shouldn't be possible to get this far in any real scenario + // without a schema, but we might end up here in contrived tests that + // fail to set up their world properly. + return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr) + } + src, err := obj.Encode(schema.ImpliedType(), currentVersion) + if err != nil { + return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err) + } + log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key) + state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr) return nil, nil } -// EvalDeposeState is an EvalNode implementation that takes the primary -// out of a state and makes it Deposed. This is done at the beginning of -// create-before-destroy calls so that the create can create while preserving -// the old state of the to-be-destroyed resource. +// EvalDeposeState is an EvalNode implementation that moves the current object +// for the given instance to instead be a deposed object, leaving the instance +// with no current object. +// This is used at the beginning of a create-before-destroy replace action so +// that the create can create while preserving the old state of the +// to-be-destroyed object. type EvalDeposeState struct { - Name string + Addr addrs.ResourceInstance + + // ForceKey, if a value other than states.NotDeposed, will be used as the + // key for the newly-created deposed object that results from this action. + // If set to states.NotDeposed (the zero value), a new unique key will be + // allocated. + ForceKey states.DeposedKey + + // OutputKey, if non-nil, will be written with the deposed object key that + // was generated for the object. This can then be passed to + // EvalUndeposeState.Key so it knows which deposed instance to forget. + OutputKey *states.DeposedKey } // TODO: test func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() - - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() - - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } - - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + var key states.DeposedKey + if n.ForceKey == states.NotDeposed { + key = state.DeposeResourceInstanceObject(absAddr) + } else { + key = n.ForceKey + state.DeposeResourceInstanceObjectForceKey(absAddr, key) } + log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key) - // If we don't have a primary, we have nothing to depose - if rs.Primary == nil { - return nil, nil + if n.OutputKey != nil { + *n.OutputKey = key } - // Depose - rs.Deposed = append(rs.Deposed, rs.Primary) - rs.Primary = nil - return nil, nil } -// EvalUndeposeState is an EvalNode implementation that reads the -// InstanceState for a specific resource out of the state. -type EvalUndeposeState struct { - Name string - State **InstanceState +// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will +// restore a particular deposed object of the specified resource instance +// to be the "current" object if and only if the instance doesn't currently +// have a current object. +// +// This is intended for use when the create leg of a create before destroy +// fails with no partial new object: if we didn't take any action, the user +// would be left in the unfortunate situation of having no current object +// and the previously-workign object now deposed. This EvalNode causes a +// better outcome by restoring things to how they were before the replace +// operation began. +// +// The create operation may have produced a partial result even though it +// failed and it's important that we don't "forget" that state, so in that +// situation the prior object remains deposed and the partial new object +// remains the current object, allowing the situation to hopefully be +// improved in a subsequent run. +type EvalMaybeRestoreDeposedObject struct { + Addr addrs.ResourceInstance + + // Key is a pointer to the deposed object key that should be forgotten + // from the state, which must be non-nil. + Key *states.DeposedKey } // TODO: test -func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) { - state, lock := ctx.State() +func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + dk := *n.Key + state := ctx.State() + + restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk) + if restored { + log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk) + } else { + log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk) + } - // Get a read lock so we can access this instance - lock.RLock() - defer lock.RUnlock() + return nil, nil +} - // Look for the module state. If we don't have one, then it doesn't matter. - mod := state.ModuleByPath(ctx.Path()) - if mod == nil { - return nil, nil - } +// EvalWriteResourceState is an EvalNode implementation that ensures that +// a suitable resource-level state record is present in the state, if that's +// required for the "each mode" of that resource. +// +// This is important primarily for the situation where count = 0, since this +// eval is the only change we get to set the resource "each mode" to list +// in that case, allowing expression evaluation to see it as a zero-element +// list rather than as not set at all. +type EvalWriteResourceState struct { + Addr addrs.Resource + Config *configs.Resource + ProviderAddr addrs.AbsProviderConfig +} - // Look for the resource state. If we don't have one, then it is okay. - rs := mod.Resources[n.Name] - if rs == nil { - return nil, nil +// TODO: test +func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() } - // If we don't have any desposed resource, then we don't have anything to do - if len(rs.Deposed) == 0 { - return nil, nil + // Currently we ony support NoEach and EachList, because for_each support + // is not fully wired up across Terraform. Once for_each support is added, + // we'll need to handle that here too, setting states.EachMap if the + // assigned expression is a map. + eachMode := states.NoEach + if count >= 0 { // -1 signals "count not set" + eachMode = states.EachList } - // Undepose - idx := len(rs.Deposed) - 1 - rs.Primary = rs.Deposed[idx] - rs.Deposed[idx] = *n.State + // This method takes care of all of the business logic of updating this + // while ensuring that any existing instances are preserved, etc. + state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr) + + return nil, nil +} + +// EvalForgetResourceState is an EvalNode implementation that prunes out an +// empty resource-level state for a given resource address, or produces an +// error if it isn't empty after all. +// +// This should be the last action taken for a resource that has been removed +// from the configuration altogether, to clean up the leftover husk of the +// resource in the state after other EvalNodes have destroyed and removed +// all of the instances and instance objects beneath it. +type EvalForgetResourceState struct { + Addr addrs.Resource +} + +func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) { + absAddr := n.Addr.Absolute(ctx.Path()) + state := ctx.State() + + pruned := state.RemoveResourceIfEmpty(absAddr) + if !pruned { + // If this produces an error, it indicates a bug elsewhere in Terraform + // -- probably missing graph nodes, graph edges, or + // incorrectly-implemented evaluation steps. + return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr) + } + log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr) return nil, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go new file mode 100644 index 00000000..e1940005 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go @@ -0,0 +1,106 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// UpgradeResourceState will, if necessary, run the provider-defined upgrade +// logic against the given state object to make it compliant with the +// current schema version. This is a no-op if the given state object is +// already at the latest version. +// +// If any errors occur during upgrade, error diagnostics are returned. In that +// case it is not safe to proceed with using the original state object. +func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) { + if addr.Resource.Resource.Mode != addrs.ManagedResourceMode { + // We only do state upgrading for managed resources. + return src, nil + } + + stateIsFlatmap := len(src.AttrsJSON) == 0 + + providerType := addr.Resource.Resource.DefaultProviderConfig().Type + if src.SchemaVersion > currentVersion { + log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion) + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource instance managed by newer provider version", + // This is not a very good error message, but we don't retain enough + // information in state to give good feedback on what provider + // version might be required here. :( + fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType), + )) + return nil, diags + } + + // If we get down here then we need to upgrade the state, with the + // provider's help. + // If this state was originally created by a version of Terraform prior to + // v0.12, this also includes translating from legacy flatmap to new-style + // representation, since only the provider has enough information to + // understand a flatmap built against an older schema. + if src.SchemaVersion != currentVersion { + log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType) + } else { + log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType) + } + + req := providers.UpgradeResourceStateRequest{ + TypeName: addr.Resource.Resource.Type, + + // TODO: The internal schema version representations are all using + // uint64 instead of int64, but unsigned integers aren't friendly + // to all protobuf target languages so in practice we use int64 + // on the wire. In future we will change all of our internal + // representations to int64 too. + Version: int64(src.SchemaVersion), + } + + if stateIsFlatmap { + req.RawStateFlatmap = src.AttrsFlat + } else { + req.RawStateJSON = src.AttrsJSON + } + + resp := provider.UpgradeResourceState(req) + diags := resp.Diagnostics + if diags.HasErrors() { + return nil, diags + } + + // After upgrading, the new value must conform to the current schema. When + // going over RPC this is actually already ensured by the + // marshaling/unmarshaling of the new value, but we'll check it here + // anyway for robustness, e.g. for in-process providers. + newValue := resp.UpgradedState + if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 { + for _, err := range errs { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid resource state upgrade", + fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)), + )) + } + return nil, diags + } + + new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion)) + if err != nil { + // We already checked for type conformance above, so getting into this + // codepath should be rare and is probably a bug somewhere under CompleteUpgrade. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Failed to encode result of resource state upgrade", + fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)), + )) + } + return new, diags +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go index a2c122d6..3edbee40 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go @@ -2,148 +2,543 @@ package terraform import ( "fmt" + "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + "github.com/zclconf/go-cty/cty/gocty" ) -// EvalValidateError is the error structure returned if there were -// validation errors. -type EvalValidateError struct { - Warnings []string - Errors []error -} - -func (e *EvalValidateError) Error() string { - return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors) -} - // EvalValidateCount is an EvalNode implementation that validates // the count of a resource. type EvalValidateCount struct { - Resource *config.Resource + Resource *configs.Resource } // TODO: test func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics var count int - var errs []error var err error - if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil { - errs = append(errs, fmt.Errorf( - "Failed to interpolate count: %s", err)) + + val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { goto RETURN } - - count, err = n.Resource.Count() - if err != nil { - // If we can't get the count during validation, then - // just replace it with the number 1. - c := n.Resource.RawCount.Config() - c[n.Resource.RawCount.Key] = "1" - count = 1 + if val.IsNull() || !val.IsKnown() { + goto RETURN } - err = nil - if count < 0 { - errs = append(errs, fmt.Errorf( - "Count is less than zero: %d", count)) + err = gocty.FromCtyValue(val, &count) + if err != nil { + // The EvaluateExpr call above already guaranteed us a number value, + // so if we end up here then we have something that is out of range + // for an int, and the error message will include a description of + // the valid range. + rawVal := val.AsBigFloat() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count value", + Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err), + Subject: n.Resource.Count.Range().Ptr(), + }) + } else if count < 0 { + rawVal := val.AsBigFloat() + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count value", + Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal), + Subject: n.Resource.Count.Range().Ptr(), + }) } RETURN: - if len(errs) != 0 { - err = &EvalValidateError{ - Errors: errs, - } - } - return nil, err + return nil, diags.NonFatalErr() } // EvalValidateProvider is an EvalNode implementation that validates -// the configuration of a resource. +// a provider configuration. type EvalValidateProvider struct { - Provider *ResourceProvider - Config **ResourceConfig + Addr addrs.ProviderConfig + Provider *providers.Interface + Config *configs.Provider } func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics provider := *n.Provider - config := *n.Config - warns, errs := provider.Validate(config) - if len(warns) == 0 && len(errs) == 0 { - return nil, nil + configBody := buildProviderConfig(ctx, n.Addr, n.Config) + + resp := provider.GetSchema() + diags = diags.Append(resp.Diagnostics) + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } + + configSchema := resp.Provider.Block + if configSchema == nil { + // Should never happen in real code, but often comes up in tests where + // mock schemas are being used that tend to be incomplete. + log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr) + configSchema = &configschema.Block{} } - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, + configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey) + diags = diags.Append(evalDiags) + if evalDiags.HasErrors() { + return nil, diags.NonFatalErr() } + + req := providers.PrepareProviderConfigRequest{ + Config: configVal, + } + + validateResp := provider.PrepareProviderConfig(req) + diags = diags.Append(validateResp.Diagnostics) + + return nil, diags.NonFatalErr() } // EvalValidateProvisioner is an EvalNode implementation that validates -// the configuration of a resource. +// the configuration of a provisioner belonging to a resource. The provisioner +// config is expected to contain the merged connection configurations. type EvalValidateProvisioner struct { - Provisioner *ResourceProvisioner - Config **ResourceConfig + ResourceAddr addrs.Resource + Provisioner *provisioners.Interface + Schema **configschema.Block + Config *configs.Provisioner + ResourceHasCount bool } func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { provisioner := *n.Provisioner config := *n.Config - warns, errs := provisioner.Validate(config) - if len(warns) == 0 && len(errs) == 0 { - return nil, nil + schema := *n.Schema + + var diags tfdiags.Diagnostics + + { + // Validate the provisioner's own config first + + configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema) + diags = diags.Append(configDiags) + if configDiags.HasErrors() { + return nil, diags.Err() + } + + if configVal == cty.NilVal { + // Should never happen for a well-behaved EvaluateBlock implementation + return nil, fmt.Errorf("EvaluateBlock returned nil value") + } + + req := provisioners.ValidateProvisionerConfigRequest{ + Config: configVal, + } + + resp := provisioner.ValidateProvisionerConfig(req) + diags = diags.Append(resp.Diagnostics) + } + + { + // Now validate the connection config, which contains the merged bodies + // of the resource and provisioner connection blocks. + connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr) + diags = diags.Append(connDiags) + } + + return nil, diags.NonFatalErr() +} + +func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics { + // We can't comprehensively validate the connection config since its + // final structure is decided by the communicator and we can't instantiate + // that until we have a complete instance state. However, we *can* catch + // configuration keys that are not valid for *any* communicator, catching + // typos early rather than waiting until we actually try to run one of + // the resource's provisioners. + + var diags tfdiags.Diagnostics + + if config == nil || config.Config == nil { + // No block to validate + return diags } - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, + // We evaluate here just by evaluating the block and returning any + // diagnostics we get, since evaluation alone is enough to check for + // extraneous arguments and incorrectly-typed arguments. + _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema) + diags = diags.Append(configDiags) + + return diags +} + +func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) { + keyData := EvalDataForNoInstanceKey + selfAddr := n.ResourceAddr.Instance(addrs.NoKey) + + if n.ResourceHasCount { + // For a resource that has count, we allow count.index but don't + // know at this stage what it will return. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // "self" can't point to an unknown key, but we'll force it to be + // key 0 here, which should return an unknown value of the + // expected type since none of these elements are known at this + // point anyway. + selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0)) } + + return ctx.EvaluateBlock(body, schema, selfAddr, keyData) +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. Once that is done, we can remove +// this and use a type-specific schema from the communicator to validate +// exactly what is expected for a given connection type. +var connectionBlockSupersetSchema = &configschema.Block{ + Attributes: map[string]*configschema.Attribute{ + // NOTE: "type" is not included here because it's treated special + // by the config loader and stored away in a separate field. + + // Common attributes for both connection types + "host": { + Type: cty.String, + Required: true, + }, + "type": { + Type: cty.String, + Optional: true, + }, + "user": { + Type: cty.String, + Optional: true, + }, + "password": { + Type: cty.String, + Optional: true, + }, + "port": { + Type: cty.String, + Optional: true, + }, + "timeout": { + Type: cty.String, + Optional: true, + }, + "script_path": { + Type: cty.String, + Optional: true, + }, + + // For type=ssh only (enforced in ssh communicator) + "private_key": { + Type: cty.String, + Optional: true, + }, + "certificate": { + Type: cty.String, + Optional: true, + }, + "host_key": { + Type: cty.String, + Optional: true, + }, + "agent": { + Type: cty.Bool, + Optional: true, + }, + "agent_identity": { + Type: cty.String, + Optional: true, + }, + "bastion_host": { + Type: cty.String, + Optional: true, + }, + "bastion_host_key": { + Type: cty.String, + Optional: true, + }, + "bastion_port": { + Type: cty.Number, + Optional: true, + }, + "bastion_user": { + Type: cty.String, + Optional: true, + }, + "bastion_password": { + Type: cty.String, + Optional: true, + }, + "bastion_private_key": { + Type: cty.String, + Optional: true, + }, + + // For type=winrm only (enforced in winrm communicator) + "https": { + Type: cty.Bool, + Optional: true, + }, + "insecure": { + Type: cty.Bool, + Optional: true, + }, + "cacert": { + Type: cty.String, + Optional: true, + }, + "use_ntlm": { + Type: cty.Bool, + Optional: true, + }, + }, +} + +// connectionBlockSupersetSchema is a schema representing the superset of all +// possible arguments for "connection" blocks across all supported connection +// types. +// +// This currently lives here because we've not yet updated our communicator +// subsystem to be aware of schema itself. It's exported only for use in the +// configs/configupgrade package and should not be used from anywhere else. +// The caller may not modify any part of the returned schema data structure. +func ConnectionBlockSupersetSchema() *configschema.Block { + return connectionBlockSupersetSchema } // EvalValidateResource is an EvalNode implementation that validates // the configuration of a resource. type EvalValidateResource struct { - Provider *ResourceProvider - Config **ResourceConfig - ResourceName string - ResourceType string - ResourceMode config.ResourceMode + Addr addrs.Resource + Provider *providers.Interface + ProviderSchema **ProviderSchema + Config *configs.Resource // IgnoreWarnings means that warnings will not be passed through. This allows // "just-in-time" passes of validation to continue execution through warnings. IgnoreWarnings bool + + // ConfigVal, if non-nil, will be updated with the value resulting from + // evaluating the given configuration body. Since validation is performed + // very early, this value is likely to contain lots of unknown values, + // but its type will conform to the schema of the resource type associated + // with the resource instance being validated. + ConfigVal *cty.Value } func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr) + } + + var diags tfdiags.Diagnostics provider := *n.Provider cfg := *n.Config - var warns []string - var errs []error + schema := *n.ProviderSchema + mode := cfg.Mode + + keyData := EvalDataForNoInstanceKey + if n.Config.Count != nil { + // If the config block has count, we'll evaluate with an unknown + // number as count.index so we can still type check even though + // we won't expand count until the plan phase. + keyData = InstanceKeyEvalData{ + CountIndex: cty.UnknownVal(cty.Number), + } + + // Basic type-checking of the count argument. More complete validation + // of this will happen when we DynamicExpand during the plan walk. + countDiags := n.validateCount(ctx, n.Config.Count) + diags = diags.Append(countDiags) + } + + for _, traversal := range n.Config.DependsOn { + ref, refDiags := addrs.ParseRef(traversal) + diags = diags.Append(refDiags) + if !refDiags.HasErrors() && len(ref.Remaining) != 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid depends_on reference", + Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.", + Subject: ref.Remaining.SourceRange().Ptr(), + }) + } + + // The ref must also refer to something that exists. To test that, + // we'll just eval it and count on the fact that our evaluator will + // detect references to non-existent objects. + if !diags.HasErrors() { + scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey) + if scope != nil { // sometimes nil in tests, due to incomplete mocks + _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType) + diags = diags.Append(refDiags) + } + } + } + // Provider entry point varies depending on resource mode, because // managed resources and data resources are two distinct concepts // in the provider abstraction. - switch n.ResourceMode { - case config.ManagedResourceMode: - warns, errs = provider.ValidateResource(n.ResourceType, cfg) - case config.DataResourceMode: - warns, errs = provider.ValidateDataSource(n.ResourceType, cfg) - } + switch mode { + case addrs.ManagedResourceMode: + schema, _ := schema.SchemaForResourceType(mode, cfg.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource type", + Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type), + Subject: &cfg.TypeRange, + }) + return nil, diags.Err() + } + + configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return nil, diags.Err() + } + + if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks + for _, traversal := range cfg.Managed.IgnoreChanges { + moreDiags := schema.StaticValidateTraversal(traversal) + diags = diags.Append(moreDiags) + } + } - // If the resource name doesn't match the name regular - // expression, show an error. - if !config.NameRegexp.Match([]byte(n.ResourceName)) { - errs = append(errs, fmt.Errorf( - "%s: resource name can only contain letters, numbers, "+ - "dashes, and underscores.", n.ResourceName)) + req := providers.ValidateResourceTypeConfigRequest{ + TypeName: cfg.Type, + Config: configVal, + } + + resp := provider.ValidateResourceTypeConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) + + if n.ConfigVal != nil { + *n.ConfigVal = configVal + } + + case addrs.DataResourceMode: + schema, _ := schema.SchemaForResourceType(mode, cfg.Type) + if schema == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid data source", + Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type), + Subject: &cfg.TypeRange, + }) + return nil, diags.Err() + } + + configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData) + diags = diags.Append(valDiags) + if valDiags.HasErrors() { + return nil, diags.Err() + } + + req := providers.ValidateDataSourceConfigRequest{ + TypeName: cfg.Type, + Config: configVal, + } + + resp := provider.ValidateDataSourceConfig(req) + diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config)) } - if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 { + if n.IgnoreWarnings { + // If we _only_ have warnings then we'll return nil. + if diags.HasErrors() { + return nil, diags.NonFatalErr() + } return nil, nil + } else { + // We'll return an error if there are any diagnostics at all, even if + // some of them are warnings. + return nil, diags.NonFatalErr() + } +} + +func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics { + if expr == nil { + return nil + } + + var diags tfdiags.Diagnostics + + countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil) + diags = diags.Append(countDiags) + if diags.HasErrors() { + return diags + } + + if countVal.IsNull() { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is null. An integer is required.`, + Subject: expr.Range().Ptr(), + }) + return diags } - return nil, &EvalValidateError{ - Warnings: warns, - Errors: errs, + var err error + countVal, err = convert.Convert(countVal, cty.Number) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return diags } + + // If the value isn't known then that's the best we can do for now, but + // we'll check more thoroughly during the plan walk. + if !countVal.IsKnown() { + return diags + } + + // If we _do_ know the value, then we can do a few more checks here. + var count int + err = gocty.FromCtyValue(countVal, &count) + if err != nil { + // Isn't a whole number, etc. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err), + Subject: expr.Range().Ptr(), + }) + return diags + } + + if count < 0 { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid count argument", + Detail: `The given "count" argument value is unsuitable: count cannot be negative.`, + Subject: expr.Range().Ptr(), + }) + return diags + } + + return diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go index ae4436a2..edd604fa 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go @@ -3,72 +3,65 @@ package terraform import ( "fmt" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/tfdiags" ) -// EvalValidateResourceSelfRef is an EvalNode implementation that validates that -// a configuration doesn't contain a reference to the resource itself. -// -// This must be done prior to interpolating configuration in order to avoid -// any infinite loop scenarios. -type EvalValidateResourceSelfRef struct { - Addr **ResourceAddress - Config **config.RawConfig +// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that +// expressions within a particular referencable block do not reference that +// same block. +type EvalValidateSelfRef struct { + Addr addrs.Referenceable + Config hcl.Body + ProviderSchema **ProviderSchema } -func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) { - addr := *n.Addr - conf := *n.Config - - // Go through the variables and find self references - var errs []error - for k, raw := range conf.Variables { - rv, ok := raw.(*config.ResourceVariable) - if !ok { - continue - } - - // Build an address from the variable - varAddr := &ResourceAddress{ - Path: addr.Path, - Mode: rv.Mode, - Type: rv.Type, - Name: rv.Name, - Index: rv.Index, - InstanceType: TypePrimary, - } +func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) { + var diags tfdiags.Diagnostics + addr := n.Addr - // If the variable access is a multi-access (*), then we just - // match the index so that we'll match our own addr if everything - // else matches. - if rv.Multi && rv.Index == -1 { - varAddr.Index = addr.Index - } + addrStrs := make([]string, 0, 1) + addrStrs = append(addrStrs, addr.String()) + switch tAddr := addr.(type) { + case addrs.ResourceInstance: + // A resource instance may not refer to its containing resource either. + addrStrs = append(addrStrs, tAddr.ContainingResource().String()) + } - // This is a weird thing where ResourceAddres has index "-1" when - // index isn't set at all. This means index "0" for resource access. - // So, if we have this scenario, just set our varAddr to -1 so it - // matches. - if addr.Index == -1 && varAddr.Index == 0 { - varAddr.Index = -1 - } + if n.ProviderSchema == nil || *n.ProviderSchema == nil { + return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr) + } - // If the addresses match, then this is a self reference - if varAddr.Equals(addr) && varAddr.Index == addr.Index { - errs = append(errs, fmt.Errorf( - "%s: self reference not allowed: %q", - addr, k)) - } + providerSchema := *n.ProviderSchema + var schema *configschema.Block + switch tAddr := addr.(type) { + case addrs.Resource: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr) + case addrs.ResourceInstance: + schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource()) } - // If no errors, no errors! - if len(errs) == 0 { - return nil, nil + if schema == nil { + return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr) } - // Wrap the errors in the proper wrapper so we can handle validation - // formatting properly upstream. - return nil, &EvalValidateError{ - Errors: errs, + refs, _ := lang.ReferencesInBlock(n.Config, schema) + for _, ref := range refs { + for _, addrStr := range addrStrs { + if ref.Subject.String() == addrStr { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Self-referential block", + Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + } + } } + + return nil, diags.NonFatalErr() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go index 47bd2ea2..68adf764 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go @@ -4,12 +4,17 @@ import ( "fmt" "log" "reflect" - "strconv" "strings" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/hilmapstructure" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" ) // EvalTypeCheckVariable is an EvalNode which ensures that the variable @@ -93,157 +98,88 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } -// EvalSetVariables is an EvalNode implementation that sets the variables -// explicitly for interpolation later. -type EvalSetVariables struct { - Module *string - Variables map[string]interface{} +// EvalSetModuleCallArguments is an EvalNode implementation that sets values +// for arguments of a child module call, for later retrieval during +// expression evaluation. +type EvalSetModuleCallArguments struct { + Module addrs.ModuleCallInstance + Values map[string]cty.Value } // TODO: test -func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) { - ctx.SetVariables(*n.Module, n.Variables) +func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) { + ctx.SetModuleCallArguments(n.Module, n.Values) return nil, nil } -// EvalVariableBlock is an EvalNode implementation that evaluates the -// given configuration, and uses the final values as a way to set the -// mapping. -type EvalVariableBlock struct { - Config **ResourceConfig - VariableValues map[string]interface{} +// EvalModuleCallArgument is an EvalNode implementation that produces the value +// for a particular variable as will be used by a child module instance. +// +// The result is written into the map given in Values, with its key +// set to the local name of the variable, disregarding the module instance +// address. Any existing values in that map are deleted first. This weird +// interface is a result of trying to be convenient for use with +// EvalContext.SetModuleCallArguments, which expects a map to merge in with +// any existing arguments. +type EvalModuleCallArgument struct { + Addr addrs.InputVariable + Config *configs.Variable + Expr hcl.Expression + + // If this flag is set, any diagnostics are discarded and this operation + // will always succeed, though may produce an unknown value in the + // event of an error. + IgnoreDiagnostics bool + + Values map[string]cty.Value } -// TODO: test -func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { +func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) { // Clear out the existing mapping - for k, _ := range n.VariableValues { - delete(n.VariableValues, k) - } - - // Get our configuration - rc := *n.Config - for k, v := range rc.Config { - var vString string - if err := hilmapstructure.WeakDecode(v, &vString); err == nil { - n.VariableValues[k] = vString - continue - } - - var vMap map[string]interface{} - if err := hilmapstructure.WeakDecode(v, &vMap); err == nil { - n.VariableValues[k] = vMap - continue - } - - var vSlice []interface{} - if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil { - n.VariableValues[k] = vSlice - continue - } - - return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k) - } - - for _, path := range rc.ComputedKeys { - log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path) - err := n.setUnknownVariableValueForPath(path) - if err != nil { - return nil, err - } - } - - return nil, nil -} - -func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { - pathComponents := strings.Split(path, ".") - - if len(pathComponents) < 1 { - return fmt.Errorf("No path comoponents in %s", path) - } - - if len(pathComponents) == 1 { - // Special case the "top level" since we know the type - if _, ok := n.VariableValues[pathComponents[0]]; !ok { - n.VariableValues[pathComponents[0]] = config.UnknownVariableValue - } - return nil - } - - // Otherwise find the correct point in the tree and then set to unknown - var current interface{} = n.VariableValues[pathComponents[0]] - for i := 1; i < len(pathComponents); i++ { - switch current.(type) { - case []interface{}, []map[string]interface{}: - tCurrent := current.([]interface{}) - index, err := strconv.Atoi(pathComponents[i]) - if err != nil { - return fmt.Errorf("Cannot convert %s to slice index in path %s", - pathComponents[i], path) - } - current = tCurrent[index] - case map[string]interface{}: - tCurrent := current.(map[string]interface{}) - if val, hasVal := tCurrent[pathComponents[i]]; hasVal { - current = val - continue - } - - tCurrent[pathComponents[i]] = config.UnknownVariableValue - break - } + for k := range n.Values { + delete(n.Values, k) } - return nil -} - -// EvalCoerceMapVariable is an EvalNode implementation that recognizes a -// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a -// bare map literal is indistinguishable from a list of maps w/ one element. -// -// We take all the same inputs as EvalTypeCheckVariable above, since we need -// both the target type and the proposed value in order to properly coerce. -type EvalCoerceMapVariable struct { - Variables map[string]interface{} - ModulePath []string - ModuleTree *module.Tree -} - -// Eval implements the EvalNode interface. See EvalCoerceMapVariable for -// details. -func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) { - currentTree := n.ModuleTree - for _, pathComponent := range n.ModulePath[1:] { - currentTree = currentTree.Children()[pathComponent] + wantType := n.Config.Type + name := n.Addr.Name + expr := n.Expr + + if expr == nil { + // Should never happen, but we'll bail out early here rather than + // crash in case it does. We set no value at all in this case, + // making a subsequent call to EvalContext.SetModuleCallArguments + // a no-op. + log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String()) + return nil, nil } - targetConfig := currentTree.Config() - prototypes := make(map[string]config.VariableType) - for _, variable := range targetConfig.Variables { - prototypes[variable.Name] = variable.Type() + val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil) + + // We intentionally passed DynamicPseudoType to EvaluateExpr above because + // now we can do our own local type conversion and produce an error message + // with better context if it fails. + var convErr error + val, convErr = convert.Convert(val, wantType) + if convErr != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for module argument", + Detail: fmt.Sprintf( + "The given value is not suitable for child module variable %q defined at %s: %s.", + name, n.Config.DeclRange.String(), convErr, + ), + Subject: expr.Range().Ptr(), + }) + // We'll return a placeholder unknown value to avoid producing + // redundant downstream errors. + val = cty.UnknownVal(wantType) } - for name, declaredType := range prototypes { - if declaredType != config.VariableTypeMap { - continue - } - - proposedValue, ok := n.Variables[name] - if !ok { - continue - } - - if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 { - if m, ok := list[0].(map[string]interface{}); ok { - log.Printf("[DEBUG] EvalCoerceMapVariable: "+ - "Coercing single element list into map: %#v", m) - n.Variables[name] = m - } - } + n.Values[name] = val + if n.IgnoreDiagnostics { + return nil, nil } - - return nil, nil + return nil, diags.ErrWithWarnings() } // hclTypeName returns the name of the type that would represent this value in diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go index 00392efe..6b4df67a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go @@ -1,41 +1,34 @@ package terraform import ( - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/providers" ) // ProviderEvalTree returns the evaluation tree for initializing and // configuring providers. -func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { - var provider ResourceProvider - var resourceConfig *ResourceConfig +func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode { + var provider providers.Interface + + addr := n.Addr + relAddr := addr.ProviderConfig seq := make([]EvalNode, 0, 5) - seq = append(seq, &EvalInitProvider{Name: n}) + seq = append(seq, &EvalInitProvider{ + TypeName: relAddr.Type, + Addr: addr.ProviderConfig, + }) // Input stuff seq = append(seq, &EvalOpFilter{ - Ops: []walkOperation{walkInput, walkImport}, + Ops: []walkOperation{walkImport}, Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Addr: addr, Output: &provider, }, - &EvalInterpolate{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n, - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalInputProvider{ - Name: n, - Provider: &provider, - Config: &resourceConfig, - }, }, }, }) @@ -45,25 +38,13 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Addr: addr, Output: &provider, }, - &EvalInterpolate{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n, - Config: &resourceConfig, - Output: &resourceConfig, - }, &EvalValidateProvider{ + Addr: relAddr, Provider: &provider, - Config: &resourceConfig, - }, - &EvalSetProviderConfig{ - Provider: n, - Config: &resourceConfig, + Config: config, }, }, }, @@ -75,22 +56,9 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n, + Addr: addr, Output: &provider, }, - &EvalInterpolate{ - Config: config, - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n, - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalSetProviderConfig{ - Provider: n, - Config: &resourceConfig, - }, }, }, }) @@ -102,8 +70,9 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { Node: &EvalSequence{ Nodes: []EvalNode{ &EvalConfigProvider{ - Provider: n, - Config: &resourceConfig, + Addr: relAddr, + Provider: &provider, + Config: config, }, }, }, @@ -114,6 +83,6 @@ func ProviderEvalTree(n string, config *config.RawConfig) EvalNode { // CloseProviderEvalTree returns the evaluation tree for closing // provider connections that aren't needed anymore. -func CloseProviderEvalTree(n string) EvalNode { - return &EvalCloseProvider{Name: n} +func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode { + return &EvalCloseProvider{Addr: addr.ProviderConfig} } diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go new file mode 100644 index 00000000..11a0dac8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go @@ -0,0 +1,933 @@ +package terraform + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strconv" + "sync" + + "github.com/agext/levenshtein" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Evaluator provides the necessary contextual data for evaluating expressions +// for a particular walk operation. +type Evaluator struct { + // Operation defines what type of operation this evaluator is being used + // for. + Operation walkOperation + + // Meta is contextual metadata about the current operation. + Meta *ContextMeta + + // Config is the root node in the configuration tree. + Config *configs.Config + + // VariableValues is a map from variable names to their associated values, + // within the module indicated by ModulePath. VariableValues is modified + // concurrently, and so it must be accessed only while holding + // VariableValuesLock. + // + // The first map level is string representations of addr.ModuleInstance + // values, while the second level is variable names. + VariableValues map[string]map[string]cty.Value + VariableValuesLock *sync.Mutex + + // Schemas is a repository of all of the schemas we should need to + // evaluate expressions. This must be constructed by the caller to + // include schemas for all of the providers, resource types, data sources + // and provisioners used by the given configuration and state. + // + // This must not be mutated during evaluation. + Schemas *Schemas + + // State is the current state, embedded in a wrapper that ensures that + // it can be safely accessed and modified concurrently. + State *states.SyncState + + // Changes is the set of proposed changes, embedded in a wrapper that + // ensures they can be safely accessed and modified concurrently. + Changes *plans.ChangesSync +} + +// Scope creates an evaluation scope for the given module path and optional +// resource. +// +// If the "self" argument is nil then the "self" object is not available +// in evaluated expressions. Otherwise, it behaves as an alias for the given +// address. +func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope { + return &lang.Scope{ + Data: data, + SelfAddr: self, + PureOnly: e.Operation != walkApply && e.Operation != walkDestroy, + BaseDir: ".", // Always current working directory for now. + } +} + +// evaluationStateData is an implementation of lang.Data that resolves +// references primarily (but not exclusively) using information from a State. +type evaluationStateData struct { + Evaluator *Evaluator + + // ModulePath is the path through the dynamic module tree to the module + // that references will be resolved relative to. + ModulePath addrs.ModuleInstance + + // InstanceKeyData describes the values, if any, that are accessible due + // to repetition of a containing object using "count" or "for_each" + // arguments. (It is _not_ used for the for_each inside "dynamic" blocks, + // since the user specifies in that case which variable name to locally + // shadow.) + InstanceKeyData InstanceKeyEvalData + + // Operation records the type of walk the evaluationStateData is being used + // for. + Operation walkOperation +} + +// InstanceKeyEvalData is used during evaluation to specify which values, +// if any, should be produced for count.index, each.key, and each.value. +type InstanceKeyEvalData struct { + // CountIndex is the value for count.index, or cty.NilVal if evaluating + // in a context where the "count" argument is not active. + // + // For correct operation, this should always be of type cty.Number if not + // nil. + CountIndex cty.Value + + // EachKey and EachValue are the values for each.key and each.value + // respectively, or cty.NilVal if evaluating in a context where the + // "for_each" argument is not active. These must either both be set + // or neither set. + // + // For correct operation, EachKey must always be either of type cty.String + // or cty.Number if not nil. + EachKey, EachValue cty.Value +} + +// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for +// evaluating in a context that has the given instance key. +func EvalDataForInstanceKey(key addrs.InstanceKey) InstanceKeyEvalData { + // At the moment we don't actually implement for_each, so we only + // ever populate CountIndex. + // (When we implement for_each later we may need to reorganize this some, + // so that we can resolve the ambiguity that an int key may either be + // a count.index or an each.key where for_each is over a list.) + + var countIdx cty.Value + if intKey, ok := key.(addrs.IntKey); ok { + countIdx = cty.NumberIntVal(int64(intKey)) + } + + return InstanceKeyEvalData{ + CountIndex: countIdx, + } +} + +// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance +// key values at all, suitable for use in contexts where no keyed instance +// is relevant. +var EvalDataForNoInstanceKey = InstanceKeyEvalData{} + +// evaluationStateData must implement lang.Data +var _ lang.Data = (*evaluationStateData)(nil) + +func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "index": + idxVal := d.InstanceKeyData.CountIndex + if idxVal == cty.NilVal { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to "count" in non-counted context`, + Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`), + Subject: rng.ToHCL().Ptr(), + }) + return cty.UnknownVal(cty.Number), diags + } + return idxVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "count" attribute`, + Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Variables[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Variables { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } else { + suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared input variable`, + Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + wantType := cty.DynamicPseudoType + if config.Type != cty.NilType { + wantType = config.Type + } + + d.Evaluator.VariableValuesLock.Lock() + defer d.Evaluator.VariableValuesLock.Unlock() + + // During the validate walk, input variables are always unknown so + // that we are validating the configuration for all possible input values + // rather than for a specific set. Checking against a specific set of + // input values then happens during the plan walk. + // + // This is important because otherwise the validation walk will tend to be + // overly strict, requiring expressions throughout the configuration to + // be complicated to accommodate all possible inputs, whereas returning + // known here allows for simpler patterns like using input values as + // guards to broadly enable/disable resources, avoid processing things + // that are disabled, etc. Terraform's static validation leans towards + // being liberal in what it accepts because the subsequent plan walk has + // more information available and so can be more conservative. + if d.Operation == walkValidate { + return cty.UnknownVal(wantType), diags + } + + moduleAddrStr := d.ModulePath.String() + vals := d.Evaluator.VariableValues[moduleAddrStr] + if vals == nil { + return cty.UnknownVal(wantType), diags + } + + val, isSet := vals[addr.Name] + if !isSet { + if config.Default != cty.NilVal { + return config.Default, diags + } + return cty.UnknownVal(wantType), diags + } + + var err error + val, err = convert.Convert(val, wantType) + if err != nil { + // We should never get here because this problem should've been caught + // during earlier validation, but we'll do something reasonable anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Incorrect variable type`, + Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err), + Subject: &config.DeclRange, + }) + // Stub out our return value so that the semantic checker doesn't + // produce redundant downstream errors. + val = cty.UnknownVal(wantType) + } + + return val, diags +} + +func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // First we'll make sure the requested value is declared in configuration, + // so we can produce a nice message if not. + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath)) + } + + config := moduleConfig.Module.Locals[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Locals { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared local value`, + Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath)) + if val == cty.NilVal { + // Not evaluated yet? + val = cty.DynamicVal + } + + return val, diags +} + +func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + moduleAddr := addr.ModuleInstance(d.ModulePath) + + // We'll consult the configuration to see what output names we are + // expecting, so we can ensure the resulting object is of the expected + // type even if our data is incomplete for some reason. + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since this should've been caught during + // static validation. + panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr)) + } + outputConfigs := moduleConfig.Module.Outputs + + vals := map[string]cty.Value{} + for n := range outputConfigs { + addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr) + + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err)) + vals[n] = cty.DynamicVal + continue + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + vals[n] = change.After + } else { + os := d.Evaluator.State.OutputValue(addr) + if os == nil { + // Not evaluated yet? + vals[n] = cty.DynamicVal + continue + } + vals[n] = os.Value + } + } + return cty.ObjectVal(vals), diags +} + +func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Output results live in the module that declares them, which is one of + // the child module instances of our current module path. + absAddr := addr.AbsOutputValue(d.ModulePath) + moduleAddr := absAddr.Module + + // First we'll consult the configuration to see if an output of this + // name is declared at all. + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // this doesn't happen in normal circumstances due to our validation + // pass, but it can turn up in some unusual situations, like in the + // "terraform console" repl where arbitrary expressions can be + // evaluated. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + config := moduleConfig.Module.Outputs[addr.Name] + if config == nil { + var suggestions []string + for k := range moduleConfig.Module.Outputs { + suggestions = append(suggestions, k) + } + suggestion := nameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared output value`, + Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // If a pending change is present in our current changeset then its value + // takes priority over what's in state. (It will usually be the same but + // will differ if the new value is unknown during planning.) + if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil { + change, err := changeSrc.Decode() + if err != nil { + // This should happen only if someone has tampered with a plan + // file, so we won't bother with a pretty error for it. + diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err)) + return cty.DynamicVal, diags + } + // We care only about the "after" value, which is the value this output + // will take on after the plan is applied. + return change.After, diags + } + + os := d.Evaluator.State.OutputValue(absAddr) + if os == nil { + // Not evaluated yet? + return cty.DynamicVal, diags + } + + return os.Value, diags +} + +func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "cwd": + wd, err := os.Getwd() + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Failed to get working directory`, + Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + return cty.StringVal(filepath.ToSlash(wd)), diags + + case "module": + moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath)) + } + sourceDir := moduleConfig.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + case "root": + sourceDir := d.Evaluator.Config.Module.SourceDir + return cty.StringVal(filepath.ToSlash(sourceDir)), diags + + default: + suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"}) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "path" attribute`, + Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +func (d *evaluationStateData) GetResourceInstance(addr addrs.ResourceInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + // Although we are giving a ResourceInstance address here, if it has + // a key of addrs.NoKey then it might actually be a request for all of + // the instances of a particular resource. The reference resolver can't + // resolve the ambiguity itself, so we must do it in here. + + // First we'll consult the configuration to see if an resource of this + // name is declared at all. + moduleAddr := d.ModulePath + moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr) + if moduleConfig == nil { + // should never happen, since we can't be evaluating in a module + // that wasn't mentioned in configuration. + panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr)) + } + + config := moduleConfig.Module.ResourceByAddr(addr.ContainingResource()) + if config == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Resource.Type, addr.Resource.Name, moduleDisplayAddr(moduleAddr)), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + // First we'll find the state for the resource as a whole, and decide + // from there whether we're going to interpret the given address as a + // resource or a resource instance address. + rs := d.Evaluator.State.Resource(addr.ContainingResource().Absolute(d.ModulePath)) + + if rs == nil { + schema := d.getResourceSchema(addr.ContainingResource(), config.ProviderConfigAddr().Absolute(d.ModulePath)) + + // If it doesn't exist at all then we can't reliably determine whether + // single-instance or whole-resource interpretation was intended, but + // we can decide this partially... + if addr.Key != addrs.NoKey { + // If there's an instance key then the user must be intending + // single-instance interpretation, and so we can return a + // properly-typed unknown value to help with type checking. + return cty.UnknownVal(schema.ImpliedType()), diags + } + + // otherwise we must return DynamicVal so that both interpretations + // can proceed without generating errors, and we'll deal with this + // in a later step where more information is gathered. + // (In practice we should only end up here during the validate walk, + // since later walks should have at least partial states populated + // for all resources in the configuration.) + return cty.DynamicVal, diags + } + + // Break out early during validation, because resource may not be expanded + // yet and indexed references may show up as invalid. + if d.Operation == walkValidate { + return cty.DynamicVal, diags + } + + schema := d.getResourceSchema(addr.ContainingResource(), rs.ProviderConfig) + + // If we are able to automatically convert to the "right" type of instance + // key for this each mode then we'll do so, to match with how we generally + // treat values elsewhere in the language. This allows code below to + // assume that any possible conversions have already been dealt with and + // just worry about validation. + key := d.coerceInstanceKey(addr.Key, rs.EachMode) + + multi := false + + switch rs.EachMode { + case states.NoEach: + if key != addrs.NoKey { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s does not have either \"count\" or \"for_each\" set, so it cannot be indexed.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + case states.EachList: + multi = key == addrs.NoKey + if _, ok := addr.Key.(addrs.IntKey); !multi && !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s must be indexed with a number value.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + case states.EachMap: + multi = key == addrs.NoKey + if _, ok := addr.Key.(addrs.IntKey); !multi && !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource index", + Detail: fmt.Sprintf("Resource %s must be indexed with a string value.", addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + } + + if !multi { + log.Printf("[TRACE] GetResourceInstance: %s is a single instance", addr) + is := rs.Instance(key) + if is == nil { + return cty.UnknownVal(schema.ImpliedType()), diags + } + return d.getResourceInstanceSingle(addr, rng, is, config, rs.ProviderConfig) + } + + log.Printf("[TRACE] GetResourceInstance: %s has multiple keyed instances", addr) + return d.getResourceInstancesAll(addr.ContainingResource(), rng, config, rs, rs.ProviderConfig) +} + +func (d *evaluationStateData) getResourceInstanceSingle(addr addrs.ResourceInstance, rng tfdiags.SourceRange, is *states.ResourceInstance, config *configs.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + schema := d.getResourceSchema(addr.ContainingResource(), providerAddr) + if schema == nil { + // This shouldn't happen, since validation before we get here should've + // taken care of it, but we'll show a reasonable error message anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource type schema`, + Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + ty := schema.ImpliedType() + if is == nil || is.Current == nil { + // Assume we're dealing with an instance that hasn't been created yet. + return cty.UnknownVal(ty), diags + } + + if is.Current.Status == states.ObjectPlanned { + // If there's a pending change for this instance in our plan, we'll prefer + // that. This is important because the state can't represent unknown values + // and so its data is inaccurate when changes are pending. + if change := d.Evaluator.Changes.GetResourceInstanceChange(addr.Absolute(d.ModulePath), states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + return val, diags + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err), + Subject: &config.DeclRange, + }) + return cty.UnknownVal(ty), diags + } + + return ios.Value, diags +} + +func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + + schema := d.getResourceSchema(addr, providerAddr) + if schema == nil { + // This shouldn't happen, since validation before we get here should've + // taken care of it, but we'll show a reasonable error message anyway. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource type schema`, + Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } + + switch rs.EachMode { + + case states.EachList: + // We need to infer the length of our resulting tuple by searching + // for the max IntKey in our instances map. + length := 0 + for k := range rs.Instances { + if ik, ok := k.(addrs.IntKey); ok { + if int(ik) >= length { + length = int(ik) + 1 + } + } + } + + vals := make([]cty.Value, length) + for i := 0; i < length; i++ { + ty := schema.ImpliedType() + key := addrs.IntKey(i) + is, exists := rs.Instances[key] + if exists && is.Current != nil { + instAddr := addr.Instance(key).Absolute(d.ModulePath) + + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + if is.Current.Status == states.ObjectPlanned { + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[i] = val + continue + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[i] = ios.Value + } else { + // There shouldn't normally be "gaps" in our list but we'll + // allow it under the assumption that we're in a weird situation + // where e.g. someone has run "terraform state mv" to reorder + // a list and left a hole behind. + vals[i] = cty.UnknownVal(schema.ImpliedType()) + } + } + + // We use a tuple rather than a list here because resource schemas may + // include dynamically-typed attributes, which will then cause each + // instance to potentially have a different runtime type even though + // they all conform to the static schema. + return cty.TupleVal(vals), diags + + case states.EachMap: + ty := schema.ImpliedType() + vals := make(map[string]cty.Value, len(rs.Instances)) + for k, is := range rs.Instances { + if sk, ok := k.(addrs.StringKey); ok { + instAddr := addr.Instance(k).Absolute(d.ModulePath) + + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + // Prefer pending value in plan if present. See getResourceInstanceSingle + // comment for the rationale. + if is.Current.Status == states.ObjectPlanned { + if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil { + val, err := change.After.Decode(ty) + if err != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in plan", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[string(sk)] = val + continue + } else { + // If the object is in planned status then we should not + // get here, since we should've found a pending value + // in the plan above instead. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Missing pending object in plan", + Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr), + Subject: &config.DeclRange, + }) + continue + } + } + + ios, err := is.Current.Decode(ty) + if err != nil { + // This shouldn't happen, since by the time we get here + // we should've upgraded the state data already. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid resource instance data in state", + Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err), + Subject: &config.DeclRange, + }) + continue + } + vals[string(sk)] = ios.Value + } + } + + // We use an object rather than a map here because resource schemas may + // include dynamically-typed attributes, which will then cause each + // instance to potentially have a different runtime type even though + // they all conform to the static schema. + return cty.ObjectVal(vals), diags + + default: + // Should never happen since caller should deal with other modes + panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode)) + } +} + +func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block { + providerType := providerAddr.ProviderConfig.Type + schemas := d.Evaluator.Schemas + schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) + return schema +} + +// coerceInstanceKey attempts to convert the given key to the type expected +// for the given EachMode. +// +// If the key is already of the correct type or if it cannot be converted then +// it is returned verbatim. If conversion is required and possible, the +// converted value is returned. Callers should not try to determine if +// conversion was possible, should instead just check if the result is of +// the expected type. +func (d *evaluationStateData) coerceInstanceKey(key addrs.InstanceKey, mode states.EachMode) addrs.InstanceKey { + if key == addrs.NoKey { + // An absent key can't be converted + return key + } + + switch mode { + case states.NoEach: + // No conversions possible at all + return key + case states.EachMap: + if intKey, isInt := key.(addrs.IntKey); isInt { + return addrs.StringKey(strconv.Itoa(int(intKey))) + } + return key + case states.EachList: + if strKey, isStr := key.(addrs.StringKey); isStr { + i, err := strconv.Atoi(string(strKey)) + if err != nil { + return key + } + return addrs.IntKey(i) + } + return key + default: + return key + } +} + +func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + switch addr.Name { + + case "workspace": + workspaceName := d.Evaluator.Meta.Env + return cty.StringVal(workspaceName), diags + + case "env": + // Prior to Terraform 0.12 there was an attribute "env", which was + // an alias name for "workspace". This was deprecated and is now + // removed. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`, + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "terraform" attribute`, + Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name), + Subject: rng.ToHCL().Ptr(), + }) + return cty.DynamicVal, diags + } +} + +// nameSuggestion tries to find a name from the given slice of suggested names +// that is close to the given name and returns it if found. If no suggestion +// is close enough, returns the empty string. +// +// The suggestions are tried in order, so earlier suggestions take precedence +// if the given string is similar to two or more suggestions. +// +// This function is intended to be used with a relatively-small number of +// suggestions. It's not optimized for hundreds or thousands of them. +func nameSuggestion(given string, suggestions []string) string { + for _, suggestion := range suggestions { + dist := levenshtein.Distance(given, suggestion, nil) + if dist < 3 { // threshold determined experimentally + return suggestion + } + } + return "" +} + +// moduleDisplayAddr returns a string describing the given module instance +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleDisplayAddr(addr addrs.ModuleInstance) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go new file mode 100644 index 00000000..4255102d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go @@ -0,0 +1,299 @@ +package terraform + +import ( + "fmt" + "sort" + + "github.com/hashicorp/hcl2/hcl" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/helper/didyoumean" + "github.com/hashicorp/terraform/tfdiags" +) + +// StaticValidateReferences checks the given references against schemas and +// other statically-checkable rules, producing error diagnostics if any +// problems are found. +// +// If this method returns errors for a particular reference then evaluating +// that reference is likely to generate a very similar error, so callers should +// not run this method and then also evaluate the source expression(s) and +// merge the two sets of diagnostics together, since this will result in +// confusing redundant errors. +// +// This method can find more errors than can be found by evaluating an +// expression with a partially-populated scope, since it checks the referenced +// names directly against the schema rather than relying on evaluation errors. +// +// The result may include warning diagnostics if, for example, deprecated +// features are referenced. +func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + for _, ref := range refs { + moreDiags := d.staticValidateReference(ref, self) + diags = diags.Append(moreDiags) + } + return diags +} + +func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics { + modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath) + if modCfg == nil { + // This is a bug in the caller rather than a problem with the + // reference, but rather than crashing out here in an unhelpful way + // we'll just ignore it and trust a different layer to catch it. + return nil + } + + if ref.Subject == addrs.Self { + // The "self" address is a special alias for the address given as + // our self parameter here, if present. + if self == nil { + var diags tfdiags.Diagnostics + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid "self" reference`, + // This detail message mentions some current practice that + // this codepath doesn't really "know about". If the "self" + // object starts being supported in more contexts later then + // we'll need to adjust this message. + Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`, + Subject: ref.SourceRange.ToHCL().Ptr(), + }) + return diags + } + + synthRef := *ref // shallow copy + synthRef.Subject = self + ref = &synthRef + } + + switch addr := ref.Subject.(type) { + + // For static validation we validate both resource and resource instance references the same way. + // We mostly disregard the index, though we do some simple validation of + // its _presence_ in staticValidateSingleResourceReference and + // staticValidateMultiResourceReference respectively. + case addrs.Resource: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + return diags + case addrs.ResourceInstance: + var diags tfdiags.Diagnostics + diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange)) + diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange)) + return diags + + // We also handle all module call references the same way, disregarding index. + case addrs.ModuleCall: + return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallInstance: + return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange) + case addrs.ModuleCallOutput: + // This one is a funny one because we will take the output name referenced + // and use it to fake up a "remaining" that would make sense for the + // module call itself, rather than for the specific output, and then + // we can just re-use our static module call validation logic. + remain := make(hcl.Traversal, len(ref.Remaining)+1) + copy(remain[1:], ref.Remaining) + remain[0] = hcl.TraverseAttr{ + Name: addr.Name, + + // Using the whole reference as the source range here doesn't exactly + // match how HCL would normally generate an attribute traversal, + // but is close enough for our purposes. + SrcRange: ref.SourceRange.ToHCL(), + } + return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange) + + default: + // Anything else we'll just permit through without any static validation + // and let it be caught during dynamic evaluation, in evaluate.go . + return nil + } +} + +func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + // If we have at least one step in "remain" and this resource has + // "count" set then we know for sure this in invalid because we have + // something like: + // aws_instance.foo.bar + // ...when we really need + // aws_instance.foo[count.index].bar + + // It is _not_ safe to do this check when remain is empty, because that + // would also match aws_instance.foo[count.index].bar due to `count.index` + // not being statically-resolvable as part of a reference, and match + // direct references to the whole aws_instance.foo tuple. + if len(remain) == 0 { + return nil + } + + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if cfg.Count != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + if cfg.ForEach != nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Missing resource instance key`, + Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr), + Subject: rng.ToHCL().Ptr(), + }) + } + + return diags +} + +func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource()) + if cfg == nil { + // We'll just bail out here and catch this in our subsequent call to + // staticValidateResourceReference, then. + return diags + } + + if addr.Key == addrs.NoKey { + // This is a different path into staticValidateSingleResourceReference + return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng) + } else { + if cfg.Count == nil && cfg.ForEach == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Unexpected resource instance key`, + Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()), + Subject: rng.ToHCL().Ptr(), + }) + } + } + + return diags +} + +func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + var modeAdjective string + switch addr.Mode { + case addrs.ManagedResourceMode: + modeAdjective = "managed" + case addrs.DataResourceMode: + modeAdjective = "data" + default: + // should never happen + modeAdjective = "" + } + + cfg := modCfg.Module.ResourceByAddr(addr) + if cfg == nil { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared resource`, + Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // Normally accessing this directly is wrong because it doesn't take into + // account provider inheritance, etc but it's okay here because we're only + // paying attention to the type anyway. + providerType := cfg.ProviderConfigAddr().Type + schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type) + + if schema == nil { + // Prior validation should've taken care of a resource block with an + // unsupported type, so we should never get here but we'll handle it + // here anyway for robustness. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource type`, + Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + // As a special case we'll detect attempts to access an attribute called + // "count" and produce a special error for it, since versions of Terraform + // prior to v0.12 offered this as a weird special case that we can no + // longer support. + if len(remain) > 0 { + if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Invalid resource count attribute`, + Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + } + + // If we got this far then we'll try to validate the remaining traversal + // steps against our schema. + moreDiags := schema.StaticValidateTraversal(remain) + diags = diags.Append(moreDiags) + + return diags +} + +func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + // For now, our focus here is just in testing that the referenced module + // call exists. All other validation is deferred until evaluation time. + _, exists := modCfg.Module.ModuleCalls[addr.Name] + if !exists { + var suggestions []string + for name := range modCfg.Module.ModuleCalls { + suggestions = append(suggestions, name) + } + sort.Strings(suggestions) + suggestion := didyoumean.NameSuggestion(addr.Name, suggestions) + if suggestion != "" { + suggestion = fmt.Sprintf(" Did you mean %q?", suggestion) + } + + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: `Reference to undeclared module`, + Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion), + Subject: rng.ToHCL().Ptr(), + }) + return diags + } + + return diags +} + +// moduleConfigDisplayAddr returns a string describing the given module +// address that is appropriate for returning to users in situations where the +// root module is possible. Specifically, it returns "the root module" if the +// root module instance is given, or a string representation of the module +// address otherwise. +func moduleConfigDisplayAddr(addr addrs.Module) string { + switch { + case addr.IsRoot(): + return "the root module" + default: + return addr.String() + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/features.go b/vendor/github.com/hashicorp/terraform/terraform/features.go new file mode 100644 index 00000000..97c77bdb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/features.go @@ -0,0 +1,7 @@ +package terraform + +import "os" + +// This file holds feature flags for the next release + +var flagWarnOutputErrors = os.Getenv("TF_WARN_OUTPUT_ERRORS") != "" diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go index 48ce6a33..58d45a7b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go @@ -3,17 +3,13 @@ package terraform import ( "fmt" "log" - "runtime/debug" - "strings" - "github.com/hashicorp/terraform/dag" -) + "github.com/hashicorp/terraform/tfdiags" -// RootModuleName is the name given to the root module implicitly. -const RootModuleName = "root" + "github.com/hashicorp/terraform/addrs" -// RootModulePath is the path for the root module. -var RootModulePath = []string{RootModuleName} + "github.com/hashicorp/terraform/dag" +) // Graph represents the graph that Terraform uses to represent resources // and their dependencies. @@ -23,9 +19,7 @@ type Graph struct { dag.AcyclicGraph // Path is the path in the module tree that this Graph represents. - // The root is represented by a single element list containing - // RootModuleName - Path []string + Path addrs.ModuleInstance // debugName is a name for reference in the debug output. This is usually // to indicate what topmost builder was, and if this graph is a shadow or @@ -40,71 +34,42 @@ func (g *Graph) DirectedGraph() dag.Grapher { // Walk walks the graph with the given walker for callbacks. The graph // will be walked with full parallelism, so the walker should expect // to be called in concurrently. -func (g *Graph) Walk(walker GraphWalker) error { +func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics { return g.walk(walker) } -func (g *Graph) walk(walker GraphWalker) error { +func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics { // The callbacks for enter/exiting a graph ctx := walker.EnterPath(g.Path) defer walker.ExitPath(g.Path) // Get the path for logs - path := strings.Join(ctx.Path(), ".") - - // Determine if our walker is a panic wrapper - panicwrap, ok := walker.(GraphWalkerPanicwrapper) - if !ok { - panicwrap = nil // just to be sure - } + path := ctx.Path().String() debugName := "walk-graph.json" if g.debugName != "" { debugName = g.debugName + "-" + debugName } - debugBuf := dbug.NewFileWriter(debugName) - g.SetDebugWriter(debugBuf) - defer debugBuf.Close() - // Walk the graph. var walkFn dag.WalkFunc - walkFn = func(v dag.Vertex) (rerr error) { - log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v)) + walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) { + log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v) g.DebugVisitInfo(v, g.debugName) - // If we have a panic wrap GraphWalker and a panic occurs, recover - // and call that. We ensure the return value is an error, however, - // so that future nodes are not called. defer func() { - // If no panicwrap, do nothing - if panicwrap == nil { - return - } - - // If no panic, do nothing - err := recover() - if err == nil { - return - } - - // Modify the return value to show the error - rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s", - dag.VertexName(v), err, debug.Stack()) - - // Call the panic wrapper - panicwrap.Panic(v, err) + log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v)) }() walker.EnterVertex(v) - defer walker.ExitVertex(v, rerr) + defer walker.ExitVertex(v, diags) // vertexCtx is the context that we use when evaluating. This // is normally the context of our graph but can be overridden // with a GraphNodeSubPath impl. vertexCtx := ctx if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { - vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path())) + vertexCtx = walker.EnterPath(pn.Path()) defer walker.ExitPath(pn.Path()) } @@ -112,60 +77,64 @@ func (g *Graph) walk(walker GraphWalker) error { if ev, ok := v.(GraphNodeEvalable); ok { tree := ev.EvalTree() if tree == nil { - panic(fmt.Sprintf( - "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v)) + panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v)) } // Allow the walker to change our tree if needed. Eval, // then callback with the output. - log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v)) + log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v)) g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) tree = walker.EnterEvalTree(v, tree) output, err := Eval(tree, vertexCtx) - if rerr = walker.ExitEvalTree(v, output, err); rerr != nil { + diags = diags.Append(walker.ExitEvalTree(v, output, err)) + if diags.HasErrors() { return } } // If the node is dynamically expanded, then expand it if ev, ok := v.(GraphNodeDynamicExpandable); ok { - log.Printf( - "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph", - path, - dag.VertexName(v)) + log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v)) g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) g, err := ev.DynamicExpand(vertexCtx) if err != nil { - rerr = err + diags = diags.Append(err) return } if g != nil { // Walk the subgraph - if rerr = g.walk(walker); rerr != nil { + log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v)) + subDiags := g.walk(walker) + diags = diags.Append(subDiags) + if subDiags.HasErrors() { + log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v)) return } + log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v)) + } else { + log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v)) } } // If the node has a subgraph, then walk the subgraph if sn, ok := v.(GraphNodeSubgraph); ok { - log.Printf( - "[DEBUG] vertex '%s.%s': walking subgraph", - path, - dag.VertexName(v)) + log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v)) g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) - if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil { + subDiags := sn.Subgraph().(*Graph).walk(walker) + if subDiags.HasErrors() { + log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v)) return } + log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v)) } - return nil + return } return g.AcyclicGraph.Walk(walkFn) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go index 6374bb90..66b21f30 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go @@ -4,6 +4,10 @@ import ( "fmt" "log" "strings" + + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/addrs" ) // GraphBuilder is an interface that can be implemented and used with @@ -12,7 +16,7 @@ type GraphBuilder interface { // Build builds the graph for the given module path. It is up to // the interface implementation whether this build should expand // the graph or not. - Build(path []string) (*Graph, error) + Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) } // BasicGraphBuilder is a GraphBuilder that builds a graph out of a @@ -25,21 +29,16 @@ type BasicGraphBuilder struct { Name string } -func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { +func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics g := &Graph{Path: path} - debugName := "graph.json" - if b.Name != "" { - debugName = b.Name + "-" + debugName - } - debugBuf := dbug.NewFileWriter(debugName) - g.SetDebugWriter(debugBuf) - defer debugBuf.Close() - + var lastStepStr string for _, step := range b.Steps { if step == nil { continue } + log.Printf("[TRACE] Executing graph transform %T", step) stepName := fmt.Sprintf("%T", step) dot := strings.LastIndex(stepName, ".") @@ -56,12 +55,20 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { } debugOp.End(errMsg) - log.Printf( - "[TRACE] Graph after step %T:\n\n%s", - step, g.StringWithNodeTypes()) + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] Completed graph transform %T (no changes)", step) + } if err != nil { - return g, err + if nf, isNF := err.(tfdiags.NonFatalError); isNF { + diags = diags.Append(nf.Diagnostics) + } else { + diags = diags.Append(err) + return g, diags + } } } @@ -69,9 +76,10 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { if b.Validate { if err := g.Validate(); err != nil { log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) - return nil, err + diags = diags.Append(err) + return nil, diags } } - return g, nil + return g, diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go index 61242586..7182dd7d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go @@ -1,8 +1,12 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ApplyGraphBuilder implements GraphBuilder and is responsible for building @@ -13,26 +17,28 @@ import ( // that aren't explicitly in the diff. There are other scenarios where the // diff can be deviated, so this is just one layer of protection. type ApplyGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree + // Config is the configuration tree that the diff was built from. + Config *configs.Config - // Diff is the diff to apply. - Diff *Diff + // Changes describes the changes that we need apply. + Changes *plans.Changes // State is the current state - State *State + State *states.State - // Providers is the list of providers supported. - Providers []string + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory - // Provisioners is the list of provisioners supported. - Provisioners []string + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas // Targets are resources to target. This is only required to make sure // unnecessary outputs aren't included in the apply graph. The plan // builder successfully handles targeting resources. In the future, // outputs should go into the diff so that this is unnecessary. - Targets []string + Targets []addrs.Targetable // DisableReduce, if true, will not reduce the graph. Great for testing. DisableReduce bool @@ -45,7 +51,7 @@ type ApplyGraphBuilder struct { } // See GraphBuilder -func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) { +func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: b.Validate, @@ -68,61 +74,130 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer { } } + concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeDestroyResource{ + NodeAbstractResource: a, + } + } + + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeApplyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + steps := []GraphTransformer{ - // Creates all the nodes represented in the diff. - &DiffTransformer{ + // Creates all the resources represented in the config. During apply, + // we use this just to ensure that the whole-resource metadata is + // updated to reflect things such as whether the count argument is + // set in config, or which provider configuration manages each resource. + &ConfigTransformer{ Concrete: concreteResource, + Config: b.Config, + }, + + // Creates all the resource instances represented in the diff, along + // with dependency edges against the whole-resource nodes added by + // ConfigTransformer above. + &DiffTransformer{ + Concrete: concreteResourceInstance, + State: b.State, + Changes: b.Changes, + }, - Diff: b.Diff, - Module: b.Module, - State: b.State, + // Creates extra cleanup nodes for any entire resources that are + // no longer present in config, so we can make sure we clean up the + // leftover empty resource states after the instances have been + // destroyed. + // (We don't track this particular type of change in the plan because + // it's just cleanup of our own state object, and so doesn't effect + // any real remote objects or consumable outputs.) + &OrphanResourceTransformer{ + Concrete: concreteOrphanResource, + Config: b.Config, + State: b.State, }, // Create orphan output nodes - &OrphanOutputTransformer{Module: b.Module, State: b.State}, + &OrphanOutputTransformer{Config: b.Config, State: b.State}, // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, + &AttachResourceConfigTransformer{Config: b.Config}, // Attach the state &AttachStateTransformer{State: b.State}, - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, - // Destruction ordering - &DestroyEdgeTransformer{Module: b.Module, State: b.State}, + &DestroyEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, GraphTransformIf( func() bool { return !b.Destroy }, - &CBDEdgeTransformer{Module: b.Module, State: b.State}, + &CBDEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, ), // Provisioner-related transformations - &MissingProvisionerTransformer{Provisioners: b.Provisioners}, + &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, &ProvisionerTransformer{}, // Add root variables - &RootVariableTransformer{Module: b.Module}, + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, // Add the outputs - &OutputTransformer{Module: b.Module}, + &OutputTransformer{Config: b.Config}, // Add module variables - &ModuleVariableTransformer{Module: b.Module}, + &ModuleVariableTransformer{Config: b.Config}, + + // add providers + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, // Connect references so ordering is correct &ReferenceTransformer{}, + // Handle destroy time transformations for output and local values. + // Reverse the edges from outputs and locals, so that + // interpolations don't fail during destroy. + // Create a destroy node for outputs to remove them from the state. + // Prune unreferenced values, which may have interpolations that can't + // be resolved. + GraphTransformIf( + func() bool { return b.Destroy }, + GraphTransformMulti( + &DestroyValueReferenceTransformer{}, + &DestroyOutputTransformer{}, + &PruneUnusedValuesTransformer{}, + ), + ), + // Add the node to fix the state count boundaries - &CountBoundaryTransformer{}, + &CountBoundaryTransformer{ + Config: b.Config, + }, // Target &TargetsTransformer{Targets: b.Targets}, + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, + // Single root &RootTransformer{}, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go index 014b348e..a6047a9b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go @@ -1,8 +1,11 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // DestroyPlanGraphBuilder implements GraphBuilder and is responsible for @@ -11,21 +14,29 @@ import ( // Planning a pure destroy operation is simple because we can ignore most // ordering configuration and simply reverse the state. type DestroyPlanGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree + // Config is the configuration tree to build the plan from. + Config *configs.Config // State is the current state - State *State + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas // Targets are resources to target - Targets []string + Targets []addrs.Targetable // Validate will do structural validation of the graph. Validate bool } // See GraphBuilder -func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { +func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: b.Validate, @@ -35,25 +46,44 @@ func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { // See GraphBuilder func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodePlanDestroyableResource{ - NodeAbstractResource: a, + concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlanDestroyableResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeApplyableProvider{ + NodeAbstractProvider: a, } } steps := []GraphTransformer{ - // Creates all the nodes represented in the state. + // Creates nodes for the resource instances tracked in the state. &StateTransformer{ - Concrete: concreteResource, - State: b.State, + ConcreteCurrent: concreteResourceInstance, + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, }, // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, + &AttachResourceConfigTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), // Destruction ordering. We require this only so that // targeting below will prune the correct things. - &DestroyEdgeTransformer{Module: b.Module, State: b.State}, + &DestroyEdgeTransformer{ + Config: b.Config, + State: b.State, + Schemas: b.Schemas, + }, // Target. Note we don't set "Destroy: true" here since we already // created proper destroy ordering. diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go new file mode 100644 index 00000000..eb6c897b --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go @@ -0,0 +1,108 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable +// for evaluating in-memory values (input variables, local values, output +// values) in the state without any other side-effects. +// +// This graph is used only in weird cases, such as the "terraform console" +// CLI command, where we need to evaluate expressions against the state +// without taking any other actions. +// +// The generated graph will include nodes for providers, resources, etc +// just to allow indirect dependencies to be resolved, but these nodes will +// not take any actions themselves since we assume that their parts of the +// state, if any, are already complete. +// +// Although the providers are never configured, they must still be available +// in order to obtain schema information used for type checking, etc. +type EvalGraphBuilder struct { + // Config is the configuration tree. + Config *configs.Config + + // State is the current state + State *states.State + + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { + return (&BasicGraphBuilder{ + Steps: b.Steps(), + Validate: true, + Name: "EvalGraphBuilder", + }).Build(path) +} + +// See GraphBuilder +func (b *EvalGraphBuilder) Steps() []GraphTransformer { + concreteProvider := func(a *NodeAbstractProvider) dag.Vertex { + return &NodeEvalableProvider{ + NodeAbstractProvider: a, + } + } + + steps := []GraphTransformer{ + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. + &ConfigTransformer{ + Concrete: nil, // just use the abstract type + Config: b.Config, + Unique: true, + }, + + // Attach the state + &AttachStateTransformer{State: b.State}, + + // Attach the configuration to any resources + &AttachResourceConfigTransformer{Config: b.Config}, + + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Although we don't configure providers, we do still start them up + // to get their schemas, and so we must shut them down again here. + &CloseProviderTransformer{}, + + // Single root + &RootTransformer{}, + + // Remove redundant edges to simplify the graph. + &TransitiveReductionTransformer{}, + } + + return steps +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go index 7fa76ded..7b0e39f4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go @@ -1,8 +1,10 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) // ImportGraphBuilder implements GraphBuilder and is responsible for building @@ -12,15 +14,19 @@ type ImportGraphBuilder struct { // ImportTargets are the list of resources to import. ImportTargets []*ImportTarget - // Module is the module to add to the graph. See ImportOpts.Module. - Module *module.Tree + // Module is a configuration to build the graph from. See ImportOpts.Config. + Config *configs.Config - // Providers is the list of providers supported. - Providers []string + // Components is the factory for our available plugin components. + Components contextComponentFactory + + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas } // Build builds the graph according to the steps returned by Steps. -func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { +func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: true, @@ -33,9 +39,9 @@ func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { func (b *ImportGraphBuilder) Steps() []GraphTransformer { // Get the module. If we don't have one, we just use an empty tree // so that the transform still works but does nothing. - mod := b.Module - if mod == nil { - mod = module.NewEmptyTree() + config := b.Config + if config == nil { + config = configs.NewEmptyConfig() } // Custom factory for creating providers. @@ -47,21 +53,39 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer { steps := []GraphTransformer{ // Create all our resources from the configuration and state - &ConfigTransformer{Module: mod}, + &ConfigTransformer{Config: config}, // Add the import steps &ImportStateTransformer{Targets: b.ImportTargets}, - // Provider-related transformations - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: mod}, + // Add root variables + &RootVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, config), // This validates that the providers only depend on variables &ImportProviderValidateTransformer{}, + // Add the local values + &LocalTransformer{Config: b.Config}, + + // Add the outputs + &OutputTransformer{Config: b.Config}, + + // Add module variables + &ModuleVariableTransformer{Config: b.Config}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, + + // Connect so that the references are ready for targeting. We'll + // have to connect again later for providers and so on. + &ReferenceTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + // Single root &RootTransformer{}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go index 275cb32f..17adfd27 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go @@ -3,8 +3,11 @@ package terraform import ( "sync" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // PlanGraphBuilder implements GraphBuilder and is responsible for building @@ -19,20 +22,22 @@ import ( // create-before-destroy can be completely ignored. // type PlanGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree + // Config is the configuration tree to build a plan from. + Config *configs.Config // State is the current state - State *State + State *states.State - // Providers is the list of providers supported. - Providers []string + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory - // Provisioners is the list of provisioners supported. - Provisioners []string + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas // Targets are resources to target - Targets []string + Targets []addrs.Targetable // DisableReduce, if true, will not reduce the graph. Great for testing. DisableReduce bool @@ -46,13 +51,13 @@ type PlanGraphBuilder struct { CustomConcrete bool ConcreteProvider ConcreteProviderNodeFunc ConcreteResource ConcreteResourceNodeFunc - ConcreteResourceOrphan ConcreteResourceNodeFunc + ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc once sync.Once } // See GraphBuilder -func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { +func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: b.Validate, @@ -64,57 +69,101 @@ func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { func (b *PlanGraphBuilder) Steps() []GraphTransformer { b.once.Do(b.init) + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + steps := []GraphTransformer{ // Creates all the resources represented in the config &ConfigTransformer{ Concrete: b.ConcreteResource, - Module: b.Module, + Config: b.Config, }, + // Add the local values + &LocalTransformer{Config: b.Config}, + // Add the outputs - &OutputTransformer{Module: b.Module}, + &OutputTransformer{Config: b.Config}, // Add orphan resources - &OrphanResourceTransformer{ + &OrphanResourceInstanceTransformer{ Concrete: b.ConcreteResourceOrphan, State: b.State, - Module: b.Module, + Config: b.Config, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can plan to destroy them. (This intentionally + // skips creating nodes for _current_ objects, since ConfigTransformer + // created nodes that will do that during DynamicExpand.) + &StateTransformer{ + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, + }, + + // Create orphan output nodes + &OrphanOutputTransformer{ + Config: b.Config, + State: b.State, }, // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, + &AttachResourceConfigTransformer{Config: b.Config}, // Attach the state &AttachStateTransformer{State: b.State}, // Add root variables - &RootVariableTransformer{Module: b.Module}, - - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, - - // Provisioner-related transformations. Only add these if requested. - GraphTransformIf( - func() bool { return b.Provisioners != nil }, - GraphTransformMulti( - &MissingProvisionerTransformer{Provisioners: b.Provisioners}, - &ProvisionerTransformer{}, - ), - ), + &RootVariableTransformer{Config: b.Config}, + + &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()}, + &ProvisionerTransformer{}, // Add module variables - &ModuleVariableTransformer{Module: b.Module}, + &ModuleVariableTransformer{ + Config: b.Config, + }, + + TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config), + + // Remove modules no longer present in the config + &RemovedModuleTransformer{Config: b.Config, State: b.State}, + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, // Connect so that the references are ready for targeting. We'll // have to connect again later for providers and so on. &ReferenceTransformer{}, + // Add the node to fix the state count boundaries + &CountBoundaryTransformer{ + Config: b.Config, + }, + // Target - &TargetsTransformer{Targets: b.Targets}, + &TargetsTransformer{ + Targets: b.Targets, + + // Resource nodes from config have not yet been expanded for + // "count", so we must apply targeting without indices. Exact + // targeting will be dealt with later when these resources + // DynamicExpand. + IgnoreIndices: true, + }, + + // Detect when create_before_destroy must be forced on for a particular + // node due to dependency edges, to avoid graph cycles during apply. + &ForcedCBDTransformer{}, + + // Close opened plugin connections + &CloseProviderTransformer{}, + &CloseProvisionerTransformer{}, // Single root &RootTransformer{}, @@ -143,15 +192,13 @@ func (b *PlanGraphBuilder) init() { b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { return &NodePlannableResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, + NodeAbstractResource: a, } } - b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex { - return &NodePlannableResourceOrphan{ - NodeAbstractResource: a, + b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, } } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go index 8fed21d6..0342cdbe 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go @@ -1,8 +1,13 @@ package terraform import ( - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "log" + + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -19,17 +24,22 @@ import ( // create-before-destroy can be completely ignored. // type RefreshGraphBuilder struct { - // Module is the root module for the graph to build. - Module *module.Tree + // Config is the configuration tree. + Config *configs.Config + + // State is the prior state + State *states.State - // State is the current state - State *State + // Components is a factory for the plug-in components (providers and + // provisioners) available for use. + Components contextComponentFactory - // Providers is the list of providers supported. - Providers []string + // Schemas is the repository of schemas we will draw from to analyse + // the configuration. + Schemas *Schemas // Targets are resources to target - Targets []string + Targets []addrs.Targetable // DisableReduce, if true, will not reduce the graph. Great for testing. DisableReduce bool @@ -39,7 +49,7 @@ type RefreshGraphBuilder struct { } // See GraphBuilder -func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) { +func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) { return (&BasicGraphBuilder{ Steps: b.Steps(), Validate: b.Validate, @@ -56,64 +66,119 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer { } } - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - return &NodeRefreshableResource{ + concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { + return &NodeRefreshableManagedResource{ NodeAbstractResource: a, } } + concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex { + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex { + // The "Plan" node type also handles refreshing behavior. + return &NodePlanDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: a, + DeposedKey: key, + } + } + concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { return &NodeRefreshableDataResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, + NodeAbstractResource: a, } } steps := []GraphTransformer{ - // Creates all the resources represented in the state - &StateTransformer{ - Concrete: concreteResource, - State: b.State, - }, - - // Creates all the data resources that aren't in the state + // Creates all the managed resources that aren't in the state, but only if + // we have a state already. No resources in state means there's not + // anything to refresh. + func() GraphTransformer { + if b.State.HasResources() { + return &ConfigTransformer{ + Concrete: concreteManagedResource, + Config: b.Config, + Unique: true, + ModeFilter: true, + Mode: addrs.ManagedResourceMode, + } + } + log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer") + return nil + }(), + + // Creates all the data resources that aren't in the state. This will also + // add any orphans from scaling in as destroy nodes. &ConfigTransformer{ Concrete: concreteDataResource, - Module: b.Module, + Config: b.Config, Unique: true, ModeFilter: true, - Mode: config.DataResourceMode, + Mode: addrs.DataResourceMode, + }, + + // Add any fully-orphaned resources from config (ones that have been + // removed completely, not ones that are just orphaned due to a scaled-in + // count. + &OrphanResourceInstanceTransformer{ + Concrete: concreteManagedResourceInstance, + State: b.State, + Config: b.Config, + }, + + // We also need nodes for any deposed instance objects present in the + // state, so we can check if they still exist. (This intentionally + // skips creating nodes for _current_ objects, since ConfigTransformer + // created nodes that will do that during DynamicExpand.) + &StateTransformer{ + ConcreteDeposed: concreteResourceInstanceDeposed, + State: b.State, }, // Attach the state &AttachStateTransformer{State: b.State}, // Attach the configuration to any resources - &AttachResourceConfigTransformer{Module: b.Module}, + &AttachResourceConfigTransformer{Config: b.Config}, // Add root variables - &RootVariableTransformer{Module: b.Module}, + &RootVariableTransformer{Config: b.Config}, - // Create all the providers - &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: b.Module}, + // Add the local values + &LocalTransformer{Config: b.Config}, // Add the outputs - &OutputTransformer{Module: b.Module}, + &OutputTransformer{Config: b.Config}, // Add module variables - &ModuleVariableTransformer{Module: b.Module}, + &ModuleVariableTransformer{Config: b.Config}, + + TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: b.Schemas}, // Connect so that the references are ready for targeting. We'll // have to connect again later for providers and so on. &ReferenceTransformer{}, // Target - &TargetsTransformer{Targets: b.Targets}, + &TargetsTransformer{ + Targets: b.Targets, + + // Resource nodes from config have not yet been expanded for + // "count", so we must apply targeting without indices. Exact + // targeting will be dealt with later when these resources + // DynamicExpand. + IgnoreIndices: true, + }, + + // Close opened plugin connections + &CloseProviderTransformer{}, // Single root &RootTransformer{}, diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go index 645ec7be..1881f95f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go @@ -23,9 +23,7 @@ func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder { p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { return &NodeValidatableResource{ - NodeAbstractCountResource: &NodeAbstractCountResource{ - NodeAbstractResource: a, - }, + NodeAbstractResource: a, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go index 2897eb54..768590fb 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go @@ -1,7 +1,11 @@ package terraform +import ( + "github.com/hashicorp/terraform/addrs" +) + // GraphNodeSubPath says that a node is part of a graph with a // different path, and the context should be adjusted accordingly. type GraphNodeSubPath interface { - Path() []string + Path() addrs.ModuleInstance } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go index 34ce6f64..e980e0c6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go @@ -1,60 +1,32 @@ package terraform import ( + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) // GraphWalker is an interface that can be implemented that when used // with Graph.Walk will invoke the given callbacks under certain events. type GraphWalker interface { - EnterPath([]string) EvalContext - ExitPath([]string) + EnterPath(addrs.ModuleInstance) EvalContext + ExitPath(addrs.ModuleInstance) EnterVertex(dag.Vertex) - ExitVertex(dag.Vertex, error) + ExitVertex(dag.Vertex, tfdiags.Diagnostics) EnterEvalTree(dag.Vertex, EvalNode) EvalNode - ExitEvalTree(dag.Vertex, interface{}, error) error + ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics } -// GrpahWalkerPanicwrapper can be optionally implemented to catch panics -// that occur while walking the graph. This is not generally recommended -// since panics should crash Terraform and result in a bug report. However, -// this is particularly useful for situations like the shadow graph where -// you don't ever want to cause a panic. -type GraphWalkerPanicwrapper interface { - GraphWalker - - // Panic is called when a panic occurs. This will halt the panic from - // propogating so if the walker wants it to crash still it should panic - // again. This is called from within a defer so runtime/debug.Stack can - // be used to get the stack trace of the panic. - Panic(dag.Vertex, interface{}) -} - -// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow -// the panics. This doesn't lose the panics since the panics are still -// returned as errors as part of a graph walk. -func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper { - return &graphWalkerPanicwrapper{ - GraphWalker: w, - } -} - -type graphWalkerPanicwrapper struct { - GraphWalker -} - -func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {} - // NullGraphWalker is a GraphWalker implementation that does nothing. // This can be embedded within other GraphWalker implementations for easily // implementing all the required functions. type NullGraphWalker struct{} -func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) } -func (NullGraphWalker) ExitPath([]string) {} +func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) } +func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {} func (NullGraphWalker) EnterVertex(dag.Vertex) {} -func (NullGraphWalker) ExitVertex(dag.Vertex, error) {} +func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {} func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } -func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error { +func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics { return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go index e63b4603..03c192a8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go @@ -2,12 +2,19 @@ package terraform import ( "context" - "fmt" "log" "sync" - "github.com/hashicorp/errwrap" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ContextGraphWalker is the GraphWalker implementation used with the @@ -16,55 +23,56 @@ type ContextGraphWalker struct { NullGraphWalker // Configurable values - Context *Context - Operation walkOperation - StopContext context.Context - - // Outputs, do not set these. Do not read these while the graph - // is being walked. - ValidationWarnings []string - ValidationErrors []error - - errorLock sync.Mutex - once sync.Once - contexts map[string]*BuiltinEvalContext - contextLock sync.Mutex - interpolaterVars map[string]map[string]interface{} - interpolaterVarLock sync.Mutex - providerCache map[string]ResourceProvider - providerConfigCache map[string]*ResourceConfig - providerLock sync.Mutex - provisionerCache map[string]ResourceProvisioner - provisionerLock sync.Mutex + Context *Context + State *states.SyncState // Used for safe concurrent access to state + Changes *plans.ChangesSync // Used for safe concurrent writes to changes + Operation walkOperation + StopContext context.Context + RootVariableValues InputValues + + // This is an output. Do not set this, nor read it while a graph walk + // is in progress. + NonFatalDiagnostics tfdiags.Diagnostics + + errorLock sync.Mutex + once sync.Once + contexts map[string]*BuiltinEvalContext + contextLock sync.Mutex + variableValues map[string]map[string]cty.Value + variableValuesLock sync.Mutex + providerCache map[string]providers.Interface + providerSchemas map[string]*ProviderSchema + providerLock sync.Mutex + provisionerCache map[string]provisioners.Interface + provisionerSchemas map[string]*configschema.Block + provisionerLock sync.Mutex } -func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { +func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext { w.once.Do(w.init) w.contextLock.Lock() defer w.contextLock.Unlock() // If we already have a context for this path cached, use that - key := PathCacheKey(path) + key := path.String() if ctx, ok := w.contexts[key]; ok { return ctx } - // Setup the variables for this interpolater - variables := make(map[string]interface{}) - if len(path) <= 1 { - for k, v := range w.Context.variables { - variables[k] = v - } - } - w.interpolaterVarLock.Lock() - if m, ok := w.interpolaterVars[key]; ok { - for k, v := range m { - variables[k] = v - } + // Our evaluator shares some locks with the main context and the walker + // so that we can safely run multiple evaluations at once across + // different modules. + evaluator := &Evaluator{ + Meta: w.Context.meta, + Config: w.Context.config, + Operation: w.Operation, + State: w.State, + Changes: w.Changes, + Schemas: w.Context.schemas, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, } - w.interpolaterVars[key] = variables - w.interpolaterVarLock.Unlock() ctx := &BuiltinEvalContext{ StopContext: w.StopContext, @@ -72,27 +80,17 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { Hooks: w.Context.hooks, InputValue: w.Context.uiInput, Components: w.Context.components, + Schemas: w.Context.schemas, ProviderCache: w.providerCache, - ProviderConfigCache: w.providerConfigCache, ProviderInputConfig: w.Context.providerInputConfig, ProviderLock: &w.providerLock, ProvisionerCache: w.provisionerCache, ProvisionerLock: &w.provisionerLock, - DiffValue: w.Context.diff, - DiffLock: &w.Context.diffLock, - StateValue: w.Context.state, - StateLock: &w.Context.stateLock, - Interpolater: &Interpolater{ - Operation: w.Operation, - Meta: w.Context.meta, - Module: w.Context.module, - State: w.Context.state, - StateLock: &w.Context.stateLock, - VariableValues: variables, - VariableValuesLock: &w.interpolaterVarLock, - }, - InterpolaterVars: w.interpolaterVars, - InterpolaterVarLock: &w.interpolaterVarLock, + ChangesValue: w.Changes, + StateValue: w.State, + Evaluator: evaluator, + VariableValues: w.variableValues, + VariableValuesLock: &w.variableValuesLock, } w.contexts[key] = ctx @@ -100,8 +98,7 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { } func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { - log.Printf("[TRACE] [%s] Entering eval tree: %s", - w.Operation, dag.VertexName(v)) + log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v)) // Acquire a lock on the semaphore w.Context.parallelSem.Acquire() @@ -111,10 +108,8 @@ func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return EvalFilter(n, EvalNodeFilterOp(w.Operation)) } -func (w *ContextGraphWalker) ExitEvalTree( - v dag.Vertex, output interface{}, err error) error { - log.Printf("[TRACE] [%s] Exiting eval tree: %s", - w.Operation, dag.VertexName(v)) +func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics { + log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v)) // Release the semaphore w.Context.parallelSem.Release() @@ -127,31 +122,36 @@ func (w *ContextGraphWalker) ExitEvalTree( w.errorLock.Lock() defer w.errorLock.Unlock() - // Try to get a validation error out of it. If its not a validation - // error, then just record the normal error. - verr, ok := err.(*EvalValidateError) - if !ok { - return err - } - - for _, msg := range verr.Warnings { - w.ValidationWarnings = append( - w.ValidationWarnings, - fmt.Sprintf("%s: %s", dag.VertexName(v), msg)) - } - for _, e := range verr.Errors { - w.ValidationErrors = append( - w.ValidationErrors, - errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e)) + // If the error is non-fatal then we'll accumulate its diagnostics in our + // non-fatal list, rather than returning it directly, so that the graph + // walk can continue. + if nferr, ok := err.(tfdiags.NonFatalError); ok { + log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr) + w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics) + return nil } - return nil + // Otherwise, we'll let our usual diagnostics machinery figure out how to + // unpack this as one or more diagnostic messages and return that. If we + // get down here then the returned diagnostics will contain at least one + // error, causing the graph walk to halt. + var diags tfdiags.Diagnostics + diags = diags.Append(err) + return diags } func (w *ContextGraphWalker) init() { - w.contexts = make(map[string]*BuiltinEvalContext, 5) - w.providerCache = make(map[string]ResourceProvider, 5) - w.providerConfigCache = make(map[string]*ResourceConfig, 5) - w.provisionerCache = make(map[string]ResourceProvisioner, 5) - w.interpolaterVars = make(map[string]map[string]interface{}, 5) + w.contexts = make(map[string]*BuiltinEvalContext) + w.providerCache = make(map[string]providers.Interface) + w.providerSchemas = make(map[string]*ProviderSchema) + w.provisionerCache = make(map[string]provisioners.Interface) + w.provisionerSchemas = make(map[string]*configschema.Block) + w.variableValues = make(map[string]map[string]cty.Value) + + // Populate root module variable values. Other modules will be populated + // during the graph walk. + w.variableValues[""] = make(map[string]cty.Value) + for k, iv := range w.RootVariableValues { + w.variableValues[""][k] = iv.Value + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go index 3fb37481..a3756e76 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go @@ -7,7 +7,6 @@ type walkOperation byte const ( walkInvalid walkOperation = iota - walkInput walkApply walkPlan walkPlanDestroy @@ -15,4 +14,5 @@ const ( walkValidate walkDestroy walkImport + walkEval // used just to prepare EvalContext for expression evaluation, with no other actions ) diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go index e97b4855..b51e1a26 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go @@ -2,15 +2,29 @@ package terraform -import "fmt" +import "strconv" -const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[GraphTypeInvalid-0] + _ = x[GraphTypeLegacy-1] + _ = x[GraphTypeRefresh-2] + _ = x[GraphTypePlan-3] + _ = x[GraphTypePlanDestroy-4] + _ = x[GraphTypeApply-5] + _ = x[GraphTypeValidate-6] + _ = x[GraphTypeEval-7] +} + +const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval" -var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} +var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124} func (i GraphType) String() string { if i >= GraphType(len(_GraphType_index)-1) { - return fmt.Sprintf("GraphType(%d)", i) + return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" } return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go index ab11e8ee..c0bb23ab 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go @@ -1,5 +1,14 @@ package terraform +import ( + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + // HookAction is an enum of actions that can be taken as a result of a hook // callback. This allows you to modify the behavior of Terraform at runtime. type HookAction byte @@ -21,42 +30,56 @@ const ( // NilHook into your struct, which implements all of the interface but does // nothing. Then, override only the functions you want to implement. type Hook interface { - // PreApply and PostApply are called before and after a single - // resource is applied. The error argument in PostApply is the + // PreApply and PostApply are called before and after an action for a + // single instance is applied. The error argument in PostApply is the // error, if any, that was returned from the provider Apply call itself. - PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) - PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) - - // PreDiff and PostDiff are called before and after a single resource - // resource is diffed. - PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) - PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) - - // Provisioning hooks + PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) + + // PreDiff and PostDiff are called before and after a provider is given + // the opportunity to customize the proposed new state to produce the + // planned new state. + PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) + PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) + + // The provisioning hooks signal both the overall start end end of + // provisioning for a particular instance and of each of the individual + // configured provisioners for each instance. The sequence of these + // for a given instance might look something like this: // - // All should be self-explanatory. ProvisionOutput is called with - // output sent back by the provisioners. This will be called multiple - // times as output comes in, but each call should represent a line of - // output. The ProvisionOutput method cannot control whether the - // hook continues running. - PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) - PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) - PreProvision(*InstanceInfo, string) (HookAction, error) - PostProvision(*InstanceInfo, string, error) (HookAction, error) - ProvisionOutput(*InstanceInfo, string, string) + // PreProvisionInstance(aws_instance.foo[1], ...) + // PreProvisionInstanceStep(aws_instance.foo[1], "file") + // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil) + // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...") + // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...") + // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil) + // PostProvisionInstance(aws_instance.foo[1], ...) + // + // ProvisionOutput is called with output sent back by the provisioners. + // This will be called multiple times as output comes in, with each call + // representing one line of output. It cannot control whether the + // provisioner continues running. + PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) + PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) + PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) + ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) // PreRefresh and PostRefresh are called before and after a single // resource state is refreshed, respectively. - PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) - PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) - - // PostStateUpdate is called after the state is updated. - PostStateUpdate(*State) (HookAction, error) + PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) + PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) // PreImportState and PostImportState are called before and after - // a single resource's state is being improted. - PreImportState(*InstanceInfo, string) (HookAction, error) - PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) + // (respectively) each state import operation for a given resource address. + PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) + PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) + + // PostStateUpdate is called each time the state is updated. It receives + // a deep copy of the state, which it may therefore access freely without + // any need for locks to protect from concurrent writes from the caller. + PostStateUpdate(new *states.State) (HookAction, error) } // NilHook is a Hook implementation that does nothing. It exists only to @@ -64,59 +87,60 @@ type Hook interface { // and only implement the functions you are interested in. type NilHook struct{} -func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { +var _ Hook = (*NilHook)(nil) + +func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { +func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { +func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) { +func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { +func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) ProvisionOutput( - *InstanceInfo, string, string) { +func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { } -func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { +func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) { +func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { +func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { return HookActionContinue, nil } -func (*NilHook) PostStateUpdate(*State) (HookAction, error) { +func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) { return HookActionContinue, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go index 0e464006..6efa3196 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go @@ -1,245 +1,274 @@ package terraform -import "sync" +import ( + "sync" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) // MockHook is an implementation of Hook that can be used for tests. // It records all of its function calls. type MockHook struct { sync.Mutex - PreApplyCalled bool - PreApplyInfo *InstanceInfo - PreApplyDiff *InstanceDiff - PreApplyState *InstanceState - PreApplyReturn HookAction - PreApplyError error + PreApplyCalled bool + PreApplyAddr addrs.AbsResourceInstance + PreApplyGen states.Generation + PreApplyAction plans.Action + PreApplyPriorState cty.Value + PreApplyPlannedState cty.Value + PreApplyReturn HookAction + PreApplyError error PostApplyCalled bool - PostApplyInfo *InstanceInfo - PostApplyState *InstanceState + PostApplyAddr addrs.AbsResourceInstance + PostApplyGen states.Generation + PostApplyNewState cty.Value PostApplyError error PostApplyReturn HookAction PostApplyReturnError error - PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error) - - PreDiffCalled bool - PreDiffInfo *InstanceInfo - PreDiffState *InstanceState - PreDiffReturn HookAction - PreDiffError error - - PostDiffCalled bool - PostDiffInfo *InstanceInfo - PostDiffDiff *InstanceDiff - PostDiffReturn HookAction - PostDiffError error - - PreProvisionResourceCalled bool - PreProvisionResourceInfo *InstanceInfo - PreProvisionInstanceState *InstanceState - PreProvisionResourceReturn HookAction - PreProvisionResourceError error - - PostProvisionResourceCalled bool - PostProvisionResourceInfo *InstanceInfo - PostProvisionInstanceState *InstanceState - PostProvisionResourceReturn HookAction - PostProvisionResourceError error - - PreProvisionCalled bool - PreProvisionInfo *InstanceInfo - PreProvisionProvisionerId string - PreProvisionReturn HookAction - PreProvisionError error - - PostProvisionCalled bool - PostProvisionInfo *InstanceInfo - PostProvisionProvisionerId string - PostProvisionErrorArg error - PostProvisionReturn HookAction - PostProvisionError error - - ProvisionOutputCalled bool - ProvisionOutputInfo *InstanceInfo - ProvisionOutputProvisionerId string - ProvisionOutputMessage string - - PostRefreshCalled bool - PostRefreshInfo *InstanceInfo - PostRefreshState *InstanceState - PostRefreshReturn HookAction - PostRefreshError error - - PreRefreshCalled bool - PreRefreshInfo *InstanceInfo - PreRefreshState *InstanceState - PreRefreshReturn HookAction - PreRefreshError error + PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error) + + PreDiffCalled bool + PreDiffAddr addrs.AbsResourceInstance + PreDiffGen states.Generation + PreDiffPriorState cty.Value + PreDiffProposedState cty.Value + PreDiffReturn HookAction + PreDiffError error + + PostDiffCalled bool + PostDiffAddr addrs.AbsResourceInstance + PostDiffGen states.Generation + PostDiffAction plans.Action + PostDiffPriorState cty.Value + PostDiffPlannedState cty.Value + PostDiffReturn HookAction + PostDiffError error + + PreProvisionInstanceCalled bool + PreProvisionInstanceAddr addrs.AbsResourceInstance + PreProvisionInstanceState cty.Value + PreProvisionInstanceReturn HookAction + PreProvisionInstanceError error + + PostProvisionInstanceCalled bool + PostProvisionInstanceAddr addrs.AbsResourceInstance + PostProvisionInstanceState cty.Value + PostProvisionInstanceReturn HookAction + PostProvisionInstanceError error + + PreProvisionInstanceStepCalled bool + PreProvisionInstanceStepAddr addrs.AbsResourceInstance + PreProvisionInstanceStepProvisionerType string + PreProvisionInstanceStepReturn HookAction + PreProvisionInstanceStepError error + + PostProvisionInstanceStepCalled bool + PostProvisionInstanceStepAddr addrs.AbsResourceInstance + PostProvisionInstanceStepProvisionerType string + PostProvisionInstanceStepErrorArg error + PostProvisionInstanceStepReturn HookAction + PostProvisionInstanceStepError error + + ProvisionOutputCalled bool + ProvisionOutputAddr addrs.AbsResourceInstance + ProvisionOutputProvisionerType string + ProvisionOutputMessage string + + PreRefreshCalled bool + PreRefreshAddr addrs.AbsResourceInstance + PreRefreshGen states.Generation + PreRefreshPriorState cty.Value + PreRefreshReturn HookAction + PreRefreshError error + + PostRefreshCalled bool + PostRefreshAddr addrs.AbsResourceInstance + PostRefreshGen states.Generation + PostRefreshPriorState cty.Value + PostRefreshNewState cty.Value + PostRefreshReturn HookAction + PostRefreshError error PreImportStateCalled bool - PreImportStateInfo *InstanceInfo - PreImportStateId string + PreImportStateAddr addrs.AbsResourceInstance + PreImportStateID string PreImportStateReturn HookAction PreImportStateError error - PostImportStateCalled bool - PostImportStateInfo *InstanceInfo - PostImportStateState []*InstanceState - PostImportStateReturn HookAction - PostImportStateError error + PostImportStateCalled bool + PostImportStateAddr addrs.AbsResourceInstance + PostImportStateNewStates []providers.ImportedResource + PostImportStateReturn HookAction + PostImportStateError error PostStateUpdateCalled bool - PostStateUpdateState *State + PostStateUpdateState *states.State PostStateUpdateReturn HookAction PostStateUpdateError error } -func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) { +var _ Hook = (*MockHook)(nil) + +func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PreApplyCalled = true - h.PreApplyInfo = n - h.PreApplyDiff = d - h.PreApplyState = s + h.PreApplyAddr = addr + h.PreApplyGen = gen + h.PreApplyAction = action + h.PreApplyPriorState = priorState + h.PreApplyPlannedState = plannedNewState return h.PreApplyReturn, h.PreApplyError } -func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) { +func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { h.Lock() defer h.Unlock() h.PostApplyCalled = true - h.PostApplyInfo = n - h.PostApplyState = s - h.PostApplyError = e + h.PostApplyAddr = addr + h.PostApplyGen = gen + h.PostApplyNewState = newState + h.PostApplyError = err if h.PostApplyFn != nil { - return h.PostApplyFn(n, s, e) + return h.PostApplyFn(addr, gen, newState, err) } return h.PostApplyReturn, h.PostApplyReturnError } -func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PreDiffCalled = true - h.PreDiffInfo = n - h.PreDiffState = s + h.PreDiffAddr = addr + h.PreDiffGen = gen + h.PreDiffPriorState = priorState + h.PreDiffProposedState = proposedNewState return h.PreDiffReturn, h.PreDiffError } -func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) { +func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PostDiffCalled = true - h.PostDiffInfo = n - h.PostDiffDiff = d + h.PostDiffAddr = addr + h.PostDiffGen = gen + h.PostDiffAction = action + h.PostDiffPriorState = priorState + h.PostDiffPlannedState = plannedNewState return h.PostDiffReturn, h.PostDiffError } -func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() - h.PreProvisionResourceCalled = true - h.PreProvisionResourceInfo = n - h.PreProvisionInstanceState = s - return h.PreProvisionResourceReturn, h.PreProvisionResourceError + h.PreProvisionInstanceCalled = true + h.PreProvisionInstanceAddr = addr + h.PreProvisionInstanceState = state + return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError } -func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() - h.PostProvisionResourceCalled = true - h.PostProvisionResourceInfo = n - h.PostProvisionInstanceState = s - return h.PostProvisionResourceReturn, h.PostProvisionResourceError + h.PostProvisionInstanceCalled = true + h.PostProvisionInstanceAddr = addr + h.PostProvisionInstanceState = state + return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError } -func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) { +func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { h.Lock() defer h.Unlock() - h.PreProvisionCalled = true - h.PreProvisionInfo = n - h.PreProvisionProvisionerId = provId - return h.PreProvisionReturn, h.PreProvisionError + h.PreProvisionInstanceStepCalled = true + h.PreProvisionInstanceStepAddr = addr + h.PreProvisionInstanceStepProvisionerType = typeName + return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError } -func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) { +func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { h.Lock() defer h.Unlock() - h.PostProvisionCalled = true - h.PostProvisionInfo = n - h.PostProvisionProvisionerId = provId - h.PostProvisionErrorArg = err - return h.PostProvisionReturn, h.PostProvisionError + h.PostProvisionInstanceStepCalled = true + h.PostProvisionInstanceStepAddr = addr + h.PostProvisionInstanceStepProvisionerType = typeName + h.PostProvisionInstanceStepErrorArg = err + return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError } -func (h *MockHook) ProvisionOutput( - n *InstanceInfo, - provId string, - msg string) { +func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { h.Lock() defer h.Unlock() h.ProvisionOutputCalled = true - h.ProvisionOutputInfo = n - h.ProvisionOutputProvisionerId = provId - h.ProvisionOutputMessage = msg + h.ProvisionOutputAddr = addr + h.ProvisionOutputProvisionerType = typeName + h.ProvisionOutputMessage = line } -func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PreRefreshCalled = true - h.PreRefreshInfo = n - h.PreRefreshState = s + h.PreRefreshAddr = addr + h.PreRefreshGen = gen + h.PreRefreshPriorState = priorState return h.PreRefreshReturn, h.PreRefreshError } -func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { +func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { h.Lock() defer h.Unlock() h.PostRefreshCalled = true - h.PostRefreshInfo = n - h.PostRefreshState = s + h.PostRefreshAddr = addr + h.PostRefreshPriorState = priorState + h.PostRefreshNewState = newState return h.PostRefreshReturn, h.PostRefreshError } -func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) { +func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { h.Lock() defer h.Unlock() h.PreImportStateCalled = true - h.PreImportStateInfo = info - h.PreImportStateId = id + h.PreImportStateAddr = addr + h.PreImportStateID = importID return h.PreImportStateReturn, h.PreImportStateError } -func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) { +func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { h.Lock() defer h.Unlock() h.PostImportStateCalled = true - h.PostImportStateInfo = info - h.PostImportStateState = s + h.PostImportStateAddr = addr + h.PostImportStateNewStates = imported return h.PostImportStateReturn, h.PostImportStateError } -func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) { +func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) { h.Lock() defer h.Unlock() h.PostStateUpdateCalled = true - h.PostStateUpdateState = s + h.PostStateUpdateState = new return h.PostStateUpdateReturn, h.PostStateUpdateError } diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go index 104d0098..811fb337 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go +++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go @@ -2,6 +2,13 @@ package terraform import ( "sync/atomic" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" ) // stopHook is a private Hook implementation that Terraform uses to @@ -10,63 +17,69 @@ type stopHook struct { stop uint32 } -func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { +var _ Hook = (*stopHook)(nil) + +func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { +func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) { return h.hook() } -func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { +func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) { +func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) { return h.hook() } -func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { +func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) { return h.hook() } -func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) { +func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) { } -func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { +func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) { return h.hook() } -func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) { +func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) { return h.hook() } -func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { +func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) { return h.hook() } -func (h *stopHook) PostStateUpdate(*State) (HookAction, error) { +func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) { return h.hook() } func (h *stopHook) hook() (HookAction, error) { if h.Stopped() { + // FIXME: This should really return an error since stopping partway + // through is not a successful run-to-completion, but we'll need to + // introduce that cautiously since existing automation solutions may + // be depending on this behavior. return HookActionHalt, nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go index f69267cd..95b7a980 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go @@ -2,7 +2,17 @@ package terraform -import "fmt" +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[TypeInvalid-0] + _ = x[TypePrimary-1] + _ = x[TypeTainted-2] + _ = x[TypeDeposed-3] +} const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" @@ -10,7 +20,7 @@ var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} func (i InstanceType) String() string { if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return fmt.Sprintf("InstanceType(%d)", i) + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" } return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go index 0c5acaa3..26c18575 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go @@ -45,63 +45,7 @@ type InterpolationScope struct { func (i *Interpolater) Values( scope *InterpolationScope, vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) { - if scope == nil { - scope = &InterpolationScope{} - } - - result := make(map[string]ast.Variable, len(vars)) - - // Copy the default variables - if i.Module != nil && scope != nil { - mod := i.Module - if len(scope.Path) > 1 { - mod = i.Module.Child(scope.Path[1:]) - } - for _, v := range mod.Config().Variables { - // Set default variables - if v.Default == nil { - continue - } - - n := fmt.Sprintf("var.%s", v.Name) - variable, err := hil.InterfaceToVariable(v.Default) - if err != nil { - return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default) - } - - result[n] = variable - } - } - - for n, rawV := range vars { - var err error - switch v := rawV.(type) { - case *config.CountVariable: - err = i.valueCountVar(scope, n, v, result) - case *config.ModuleVariable: - err = i.valueModuleVar(scope, n, v, result) - case *config.PathVariable: - err = i.valuePathVar(scope, n, v, result) - case *config.ResourceVariable: - err = i.valueResourceVar(scope, n, v, result) - case *config.SelfVariable: - err = i.valueSelfVar(scope, n, v, result) - case *config.SimpleVariable: - err = i.valueSimpleVar(scope, n, v, result) - case *config.TerraformVariable: - err = i.valueTerraformVar(scope, n, v, result) - case *config.UserVariable: - err = i.valueUserVar(scope, n, v, result) - default: - err = fmt.Errorf("%s: unknown variable type: %T", n, rawV) - } - - if err != nil { - return nil, err - } - } - - return result, nil + return nil, fmt.Errorf("type Interpolator is no longer supported; use the evaluator API instead") } func (i *Interpolater) valueCountVar( @@ -140,7 +84,6 @@ func (i *Interpolater) valueModuleVar( n string, v *config.ModuleVariable, result map[string]ast.Variable) error { - // Build the path to the child module we want path := make([]string, len(scope.Path), len(scope.Path)+1) copy(path, scope.Path) @@ -152,7 +95,7 @@ func (i *Interpolater) valueModuleVar( defer i.StateLock.RUnlock() // Get the module where we're looking for the value - mod := i.State.ModuleByPath(path) + mod := i.State.ModuleByPath(normalizeModulePath(path)) if mod == nil { // If the module doesn't exist, then we can return an empty string. // This happens usually only in Refresh() when we haven't populated @@ -256,13 +199,13 @@ func (i *Interpolater) valueResourceVar( } if variable == nil { - // During the input walk we tolerate missing variables because + // During the refresh walk we tolerate missing variables because // we haven't yet had a chance to refresh state, so dynamic data may // not yet be complete. // If it truly is missing, we'll catch it on a later walk. // This applies only to graph nodes that interpolate during the - // config walk, e.g. providers. - if i.Operation == walkInput || i.Operation == walkRefresh { + // refresh walk, e.g. providers. + if i.Operation == walkRefresh { result[n] = unknownVariable() return nil } @@ -306,10 +249,10 @@ func (i *Interpolater) valueSimpleVar( // relied on this for their template_file data sources. We should // remove this at some point but there isn't any rush. return fmt.Errorf( - "invalid variable syntax: %q. If this is part of inline `template` parameter\n"+ + "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+ "then you must escape the interpolation with two dollar signs. For\n"+ "example: ${a} becomes $${a}.", - n) + n, n) } func (i *Interpolater) valueTerraformVar( @@ -317,9 +260,12 @@ func (i *Interpolater) valueTerraformVar( n string, v *config.TerraformVariable, result map[string]ast.Variable) error { - if v.Field != "env" { + // "env" is supported for backward compatibility, but it's deprecated and + // so we won't advertise it as being allowed in the error message. It will + // be removed in a future version of Terraform. + if v.Field != "workspace" && v.Field != "env" { return fmt.Errorf( - "%s: only supported key for 'terraform.X' interpolations is 'env'", n) + "%s: only supported key for 'terraform.X' interpolations is 'workspace'", n) } if i.Meta == nil { @@ -331,6 +277,59 @@ func (i *Interpolater) valueTerraformVar( return nil } +func (i *Interpolater) valueLocalVar( + scope *InterpolationScope, + n string, + v *config.LocalVariable, + result map[string]ast.Variable, +) error { + i.StateLock.RLock() + defer i.StateLock.RUnlock() + + modTree := i.Module + if len(scope.Path) > 1 { + modTree = i.Module.Child(scope.Path[1:]) + } + + // Get the resource from the configuration so we can verify + // that the resource is in the configuration and so we can access + // the configuration if we need to. + var cl *config.Local + for _, l := range modTree.Config().Locals { + if l.Name == v.Name { + cl = l + break + } + } + + if cl == nil { + return fmt.Errorf("%s: no local value of this name has been declared", n) + } + + // Get the relevant module + module := i.State.ModuleByPath(normalizeModulePath(scope.Path)) + if module == nil { + result[n] = unknownVariable() + return nil + } + + rawV, exists := module.Locals[v.Name] + if !exists { + result[n] = unknownVariable() + return nil + } + + varV, err := hil.InterfaceToVariable(rawV) + if err != nil { + // Should never happen, since interpolation should always produce + // something we can feed back in to interpolation. + return fmt.Errorf("%s: %s", n, err) + } + + result[n] = varV + return nil +} + func (i *Interpolater) valueUserVar( scope *InterpolationScope, n string, @@ -461,6 +460,16 @@ func (i *Interpolater) computeResourceVariable( return &v, err } + // special case for the "id" field which is usually also an attribute + if v.Field == "id" && r.Primary.ID != "" { + // This is usually pulled from the attributes, but is sometimes missing + // during destroy. We can return the ID field in this case. + // FIXME: there should only be one ID to rule them all. + log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId()) + v, err := hil.InterfaceToVariable(r.Primary.ID) + return &v, err + } + // computed list or map attribute _, isList = r.Primary.Attributes[v.Field+".#"] _, isMap = r.Primary.Attributes[v.Field+".%"] @@ -517,10 +526,7 @@ MISSING: // // For a Destroy, we're also fine with computed values, since our goal is // only to get destroy nodes for existing resources. - // - // For an input walk, computed values are okay to return because we're only - // looking for missing variables to prompt the user for. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput { + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy { return &unknownVariable, nil } @@ -540,13 +546,6 @@ func (i *Interpolater) computeResourceMultiVariable( unknownVariable := unknownVariable() - // If we're only looking for input, we don't need to expand a - // multi-variable. This prevents us from encountering things that should be - // known but aren't because the state has yet to be refreshed. - if i.Operation == walkInput { - return &unknownVariable, nil - } - // Get the information about this resource variable, and verify // that it exists and such. module, cr, err := i.resourceVariableInfo(scope, v) @@ -594,14 +593,15 @@ func (i *Interpolater) computeResourceMultiVariable( } if singleAttr, ok := r.Primary.Attributes[v.Field]; ok { - if singleAttr == config.UnknownVariableValue { - return &unknownVariable, nil - } - values = append(values, singleAttr) continue } + if v.Field == "id" && r.Primary.ID != "" { + log.Printf("[WARN] resource %s missing 'id' attribute", v.ResourceId()) + values = append(values, r.Primary.ID) + } + // computed list or map attribute _, isList := r.Primary.Attributes[v.Field+".#"] _, isMap := r.Primary.Attributes[v.Field+".%"] @@ -613,10 +613,6 @@ func (i *Interpolater) computeResourceMultiVariable( return nil, err } - if multiAttr == unknownVariable { - return &unknownVariable, nil - } - values = append(values, multiAttr) } @@ -631,7 +627,7 @@ func (i *Interpolater) computeResourceMultiVariable( // // For an input walk, computed values are okay to return because we're only // looking for missing variables to prompt the user for. - if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { + if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy { return &unknownVariable, nil } @@ -650,7 +646,6 @@ func (i *Interpolater) computeResourceMultiVariable( func (i *Interpolater) interpolateComplexTypeAttribute( resourceID string, attributes map[string]string) (ast.Variable, error) { - // We can now distinguish between lists and maps in state by the count field: // - lists (and by extension, sets) use the traditional .# notation // - maps use the newer .% notation @@ -713,7 +708,7 @@ func (i *Interpolater) resourceVariableInfo( } // Get the relevant module - module := i.State.ModuleByPath(scope.Path) + module := i.State.ModuleByPath(normalizeModulePath(scope.Path)) return module, cr, nil } @@ -726,7 +721,8 @@ func (i *Interpolater) resourceCountMax( // If we're NOT applying, then we assume we can read the count // from the state. Plan and so on may not have any state yet so // we do a full interpolation. - if i.Operation != walkApply { + // Don't forget walkDestroy, which is a special case of walkApply + if !(i.Operation == walkApply || i.Operation == walkDestroy) { if cr == nil { return 0, nil } @@ -739,12 +735,31 @@ func (i *Interpolater) resourceCountMax( return count, nil } + // If we have no module state in the apply walk, that suggests we've hit + // a rather awkward edge-case: the resource this variable refers to + // has count = 0 and is the only resource processed so far on this walk, + // and so we've ended up not creating any resource states yet. We don't + // create a module state until the first resource is written into it, + // so the module state doesn't exist when we get here. + // + // In this case we act as we would if we had been passed a module + // with an empty resource state map. + if ms == nil { + return 0, nil + } + // We need to determine the list of resource keys to get values from. // This needs to be sorted so the order is deterministic. We used to // use "cr.Count()" but that doesn't work if the count is interpolated // and we can't guarantee that so we instead depend on the state. max := -1 - for k, _ := range ms.Resources { + for k, s := range ms.Resources { + // This resource may have been just removed, in which case the Primary + // may be nil, or just empty. + if s == nil || s.Primary == nil || len(s.Primary.Attributes) == 0 { + continue + } + // Get the index number for this resource index := "" if k == id { diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go new file mode 100644 index 00000000..66a68c7d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go @@ -0,0 +1,202 @@ +package terraform + +import ( + version "github.com/hashicorp/go-version" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/moduledeps" + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/states" +) + +// ConfigTreeDependencies returns the dependencies of the tree of modules +// described by the given configuration and state. +// +// Both configuration and state are required because there can be resources +// implied by instances in the state that no longer exist in config. +func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module { + // First we walk the configuration tree to build the overall structure + // and capture the explicit/implicit/inherited provider dependencies. + deps := configTreeConfigDependencies(root, nil) + + // Next we walk over the resources in the state to catch any additional + // dependencies created by existing resources that are no longer in config. + // Most things we find in state will already be present in 'deps', but + // we're interested in the rare thing that isn't. + configTreeMergeStateDependencies(deps, state) + + return deps +} + +func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module { + if root == nil { + // If no config is provided, we'll make a synthetic root. + // This isn't necessarily correct if we're called with a nil that + // *isn't* at the root, but in practice that can never happen. + return &moduledeps.Module{ + Name: "root", + Providers: make(moduledeps.Providers), + } + } + + name := "root" + if len(root.Path) != 0 { + name = root.Path[len(root.Path)-1] + } + + ret := &moduledeps.Module{ + Name: name, + } + + module := root.Module + + // Provider dependencies + { + providers := make(moduledeps.Providers) + + // The main way to declare a provider dependency is explicitly inside + // the "terraform" block, which allows declaring a requirement without + // also creating a configuration. + for fullName, constraints := range module.ProviderRequirements { + inst := moduledeps.ProviderInstance(fullName) + + // The handling here is a bit fiddly because the moduledeps package + // was designed around the legacy (pre-0.12) configuration model + // and hasn't yet been revised to handle the new model. As a result, + // we need to do some translation here. + // FIXME: Eventually we should adjust the underlying model so we + // can also retain the source location of each constraint, for + // more informative output from the "terraform providers" command. + var rawConstraints version.Constraints + for _, constraint := range constraints { + rawConstraints = append(rawConstraints, constraint.Required...) + } + discoConstraints := discovery.NewConstraints(rawConstraints) + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discoConstraints, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + + // Provider configurations can also include version constraints, + // allowing for more terse declaration in situations where both a + // configuration and a constraint are defined in the same module. + for fullName, pCfg := range module.ProviderConfigs { + inst := moduledeps.ProviderInstance(fullName) + discoConstraints := discovery.AllVersions + if pCfg.Version.Required != nil { + discoConstraints = discovery.NewConstraints(pCfg.Version.Required) + } + if existing, exists := providers[inst]; exists { + existing.Constraints = existing.Constraints.Append(discoConstraints) + } else { + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discoConstraints, + Reason: moduledeps.ProviderDependencyExplicit, + } + } + } + + // Each resource in the configuration creates an *implicit* provider + // dependency, though we'll only record it if there isn't already + // an explicit dependency on the same provider. + for _, rc := range module.ManagedResources { + addr := rc.ProviderConfigAddr() + inst := moduledeps.ProviderInstance(addr.StringCompact()) + if _, exists := providers[inst]; exists { + // Explicit dependency already present + continue + } + + reason := moduledeps.ProviderDependencyImplicit + if _, inherited := inheritProviders[addr.StringCompact()]; inherited { + reason = moduledeps.ProviderDependencyInherited + } + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: reason, + } + } + for _, rc := range module.DataResources { + addr := rc.ProviderConfigAddr() + inst := moduledeps.ProviderInstance(addr.StringCompact()) + if _, exists := providers[inst]; exists { + // Explicit dependency already present + continue + } + + reason := moduledeps.ProviderDependencyImplicit + if _, inherited := inheritProviders[addr.String()]; inherited { + reason = moduledeps.ProviderDependencyInherited + } + + providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: reason, + } + } + + ret.Providers = providers + } + + childInherit := make(map[string]*configs.Provider) + for k, v := range inheritProviders { + childInherit[k] = v + } + for k, v := range module.ProviderConfigs { + childInherit[k] = v + } + for _, c := range root.Children { + ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit)) + } + + return ret +} + +func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) { + if state == nil { + return + } + + findModule := func(path addrs.ModuleInstance) *moduledeps.Module { + module := root + for _, step := range path { + var next *moduledeps.Module + for _, cm := range module.Children { + if cm.Name == step.Name { + next = cm + break + } + } + + if next == nil { + // If we didn't find a next node, we'll need to make one + next = &moduledeps.Module{ + Name: step.Name, + Providers: make(moduledeps.Providers), + } + module.Children = append(module.Children, next) + } + + module = next + } + return module + } + + for _, ms := range state.Modules { + module := findModule(ms.Addr) + + for _, rs := range ms.Resources { + inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact()) + if _, exists := module.Providers[inst]; !exists { + module.Providers[inst] = moduledeps.ProviderDependency{ + Constraints: discovery.AllVersions, + Reason: moduledeps.ProviderDependencyFromState, + } + } + } + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go index bd32c79f..e4952039 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go @@ -1,14 +1,22 @@ package terraform -// NodeCountBoundary fixes any "count boundarie" in the state: resources -// that are named "foo.0" when they should be named "foo" -type NodeCountBoundary struct{} +import ( + "github.com/hashicorp/terraform/configs" +) + +// NodeCountBoundary fixes up any transitions between "each modes" in objects +// saved in state, such as switching from NoEach to EachInt. +type NodeCountBoundary struct { + Config *configs.Config +} func (n *NodeCountBoundary) Name() string { - return "meta.count-boundary (count boundary fixup)" + return "meta.count-boundary (EachMode fixup)" } // GraphNodeEvalable func (n *NodeCountBoundary) EvalTree() EvalNode { - return &EvalCountFixZeroOneBoundaryGlobal{} + return &EvalCountFixZeroOneBoundaryGlobal{ + Config: n.Config, + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go index e32cea88..6ba39904 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go @@ -1,22 +1,40 @@ package terraform -// NodeDestroyableDataResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. -type NodeDestroyableDataResource struct { - *NodeAbstractResource +import ( + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// NodeDestroyableDataResourceInstance represents a resource that is "destroyable": +// it is ready to be destroyed. +type NodeDestroyableDataResourceInstance struct { + *NodeAbstractResourceInstance } // GraphNodeEvalable -func (n *NodeDestroyableDataResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr +func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() - // stateId is the ID to put into the state - stateId := addr.stateId() + var providerSchema *ProviderSchema + // We don't need the provider, but we're calling EvalGetProvider to load the + // schema. + var provider providers.Interface // Just destroy it. - var state *InstanceState - return &EvalWriteState{ - Name: stateId, - State: &state, // state is nil here + var state *states.ResourceInstanceObject + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalWriteState{ + Addr: addr.Resource, + State: &state, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + }, + }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go index d504c892..ab821634 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go @@ -2,34 +2,71 @@ package terraform import ( "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" ) -// NodeRefreshableDataResource represents a resource that is "plannable": -// it is ready to be planned in order to create a diff. +// NodeRefreshableDataResource represents a resource that is "refreshable". type NodeRefreshableDataResource struct { - *NodeAbstractCountResource + *NodeAbstractResource } +var ( + _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil) + _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil) + _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil) + _ GraphNodeResource = (*NodeRefreshableDataResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil) +) + // GraphNodeDynamicExpandable func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count, err := n.Config.Count() - if err != nil { - return nil, err + var diags tfdiags.Diagnostics + + count, countKnown, countDiags := evaluateResourceCountExpressionKnown(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() } + if !countKnown { + // If the count isn't known yet, we'll skip refreshing and try expansion + // again during the plan walk. + return nil, nil + } + + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider return &NodeRefreshableDataResourceInstance{ - NodeAbstractResource: a, + NodeAbstractResourceInstance: a, + } + } + + // We also need a destroyable resource for orphans that are a result of a + // scaled-in count. + concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and provider since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeDestroyableDataResourceInstance{ + NodeAbstractResourceInstance: a, } } @@ -38,15 +75,25 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er // Expand the count. &ResourceCountTransformer{ Concrete: concreteResource, + Schema: n.Schema, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans. As these are orphaned refresh nodes, we add them + // directly as NodeDestroyableDataResource. + &OrphanResourceCountTransformer{ + Concrete: concreteResourceDestroyable, Count: count, Addr: n.ResourceAddr(), + State: state, }, // Attach the state &AttachStateTransformer{State: state}, // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, + &TargetsTransformer{Targets: n.Targets}, // Connect references so ordering is correct &ReferenceTransformer{}, @@ -62,137 +109,118 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er Name: "NodeRefreshableDataResource", } - return b.Build(ctx.Path()) + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() } -// NodeRefreshableDataResourceInstance represents a _single_ resource instance +// NodeRefreshableDataResourceInstance represents a single resource instance // that is refreshable. type NodeRefreshableDataResourceInstance struct { - *NodeAbstractResource + *NodeAbstractResourceInstance } // GraphNodeEvalable func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr + addr := n.ResourceInstanceAddr() - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - } - - // Get the state if we have it, if not we build it - rs := n.ResourceState - if rs == nil { - rs = &ResourceState{} - } - - // If the config isn't empty we update the state - if n.Config != nil { - rs = &ResourceState{ - Type: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: n.StateReferences(), - } - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } - - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var config *ResourceConfig - var diff *InstanceDiff - var provider ResourceProvider - var state *InstanceState + // These variables are the state for the eval sequence below, and are + // updated through pointers. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var configVal cty.Value return &EvalSequence{ Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + // Always destroy the existing state first, since we must // make sure that values from a previous read will not // get interpolated if we end up needing to defer our // loading until apply time. &EvalWriteState{ - Name: stateId, - ResourceType: rs.Type, - Provider: rs.Provider, - Dependencies: rs.Dependencies, - State: &state, // state is nil here + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, // a pointer to nil, here + ProviderSchema: &providerSchema, }, - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - // The rest of this pass can proceed only if there are no - // computed values in our config. - // (If there are, we'll deal with this during the plan and - // apply phases.) &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 { - return true, EvalEarlyExitError{} - } - // If the config explicitly has a depends_on for this // data source, assume the intention is to prevent - // refreshing ahead of that dependency. + // refreshing ahead of that dependency, and therefore + // we need to deal with this resource during the apply + // phase.. if len(n.Config.DependsOn) > 0 { return true, EvalEarlyExitError{} } return true, nil }, - Then: EvalNoop{}, }, - // The remainder of this pass is the same as running - // a "plan" pass immediately followed by an "apply" pass, - // populating the state early so it'll be available to - // provider configurations that need this data during - // refresh/plan. - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - - &EvalReadDataDiff{ - Info: info, - Config: &config, - Provider: &provider, - Output: &diff, - OutputState: &state, + // EvalReadData will _attempt_ to read the data source, but may + // generate an incomplete planned object if the configuration + // includes values that won't be known until apply. + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + OutputChange: &change, + OutputConfigValue: &configVal, + OutputState: &state, }, - &EvalReadDataApply{ - Info: info, - Diff: &diff, - Provider: &provider, - Output: &state, - }, - - &EvalWriteState{ - Name: stateId, - ResourceType: rs.Type, - Provider: rs.Provider, - Dependencies: rs.Dependencies, - State: &state, + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return (*state).Status != states.ObjectPlanned, nil + }, + Then: &EvalSequence{ + Nodes: []EvalNode{ + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + &EvalUpdateStateHook{}, + }, + }, + Else: &EvalSequence{ + // We can't deal with this yet, so we'll repeat this step + // during the plan walk to produce a planned change to read + // this during the apply walk. However, we do still need to + // save the generated change and partial state so that + // results from it can be included in other data resources + // or provider configurations during the refresh walk. + // (The planned object we save in the state here will be + // pruned out at the end of the refresh walk, returning + // it back to being unset again for subsequent walks.) + Nodes: []EvalNode{ + &EvalWriteDiff{ + Addr: addr.Resource, + Change: &change, + ProviderSchema: &providerSchema, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, + }, + }, + }, }, - - &EvalUpdateStateHook{}, }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go new file mode 100644 index 00000000..591eb305 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_local.go @@ -0,0 +1,70 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" +) + +// NodeLocal represents a named local value in a particular module. +// +// Local value nodes only have one operation, common to all walk types: +// evaluate the result and place it in state. +type NodeLocal struct { + Addr addrs.AbsLocalValue + Config *configs.Local +} + +var ( + _ GraphNodeSubPath = (*NodeLocal)(nil) + _ RemovableIfNotTargeted = (*NodeLocal)(nil) + _ GraphNodeReferenceable = (*NodeLocal)(nil) + _ GraphNodeReferencer = (*NodeLocal)(nil) + _ GraphNodeEvalable = (*NodeLocal)(nil) + _ dag.GraphNodeDotter = (*NodeLocal)(nil) +) + +func (n *NodeLocal) Name() string { + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeLocal) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeLocal) RemoveIfNotTargeted() bool { + return true +} + +// GraphNodeReferenceable +func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.LocalValue} +} + +// GraphNodeReferencer +func (n *NodeLocal) References() []*addrs.Reference { + refs, _ := lang.ReferencesInExpr(n.Config.Expr) + return appendResourceDestroyReferences(refs) +} + +// GraphNodeEvalable +func (n *NodeLocal) EvalTree() EvalNode { + return &EvalLocal{ + Addr: n.Addr.LocalValue, + Expr: n.Config.Expr, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go new file mode 100644 index 00000000..cb55a1a8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go @@ -0,0 +1,81 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" +) + +// NodeModuleRemoved represents a module that is no longer in the +// config. +type NodeModuleRemoved struct { + Addr addrs.ModuleInstance +} + +var ( + _ GraphNodeSubPath = (*NodeModuleRemoved)(nil) + _ GraphNodeEvalable = (*NodeModuleRemoved)(nil) + _ GraphNodeReferencer = (*NodeModuleRemoved)(nil) + _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil) +) + +func (n *NodeModuleRemoved) Name() string { + return fmt.Sprintf("%s (removed)", n.Addr.String()) +} + +// GraphNodeSubPath +func (n *NodeModuleRemoved) Path() addrs.ModuleInstance { + return n.Addr +} + +// GraphNodeEvalable +func (n *NodeModuleRemoved) EvalTree() EvalNode { + return &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, + Node: &EvalCheckModuleRemoved{ + Addr: n.Addr, + }, + } +} + +func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + // Our "References" implementation indicates that this node depends on + // the call to the module it represents, which implicitly depends on + // everything inside the module. That reference must therefore be + // interpreted in terms of our parent module. + return n.Addr, n.Addr.Parent() +} + +func (n *NodeModuleRemoved) References() []*addrs.Reference { + // We depend on the call to the module we represent, because that + // implicitly then depends on everything inside that module. + // Our ReferenceOutside implementation causes this to be interpreted + // within the parent module. + + _, call := n.Addr.CallInstance() + return []*addrs.Reference{ + { + Subject: call, + + // No source range here, because there's nothing reasonable for + // us to return. + }, + } +} + +// EvalCheckModuleRemoved is an EvalNode implementation that verifies that +// a module has been removed from the state as expected. +type EvalCheckModuleRemoved struct { + Addr addrs.ModuleInstance +} + +func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) { + mod := ctx.State().Module(n.Addr) + if mod != nil { + // If we get here then that indicates a bug either in the states + // module or in an earlier step of the graph walk, since we should've + // pruned out the module when the last resource was removed from it. + return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr) + } + return nil, nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go index 13fe8fc3..aca5a6a3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go @@ -1,40 +1,43 @@ package terraform import ( - "fmt" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" + "github.com/zclconf/go-cty/cty" ) // NodeApplyableModuleVariable represents a module variable input during // the apply step. type NodeApplyableModuleVariable struct { - PathValue []string - Config *config.Variable // Config is the var in the config - Value *config.RawConfig // Value is the value that is set - - Module *module.Tree // Antiquated, want to remove + Addr addrs.AbsInputVariableInstance + Config *configs.Variable // Config is the var in the config + Expr hcl.Expression // Expr is the value expression given in the call } -func (n *NodeApplyableModuleVariable) Name() string { - result := fmt.Sprintf("var.%s", n.Config.Name) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } +// Ensure that we are implementing all of the interfaces we think we are +// implementing. +var ( + _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil) + _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil) + _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) +) - return result +func (n *NodeApplyableModuleVariable) Name() string { + return n.Addr.String() } // GraphNodeSubPath -func (n *NodeApplyableModuleVariable) Path() []string { - // We execute in the parent scope (above our own module) so that - // we can access the proper interpolations. - if len(n.PathValue) > 2 { - return n.PathValue[:len(n.PathValue)-1] - } - - return rootModulePath +func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance { + // We execute in the parent scope (above our own module) because + // expressions in our value are resolved in that context. + return n.Addr.Module.Parent() } // RemovableIfNotTargeted @@ -44,82 +47,96 @@ func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool { return true } -// GraphNodeReferenceGlobal -func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool { - // We have to create fully qualified references because we cross - // boundaries here: our ReferenceableName is in one path and our - // References are from another path. - return true +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + + // Module input variables have their value expressions defined in the + // context of their calling (parent) module, and so references from + // a node of this type should be resolved in the parent module instance. + referencePath = n.Addr.Module.Parent() + + // Input variables are _referenced_ from their own module, though. + selfPath = n.Addr.Module + + return // uses named return values } // GraphNodeReferenceable -func (n *NodeApplyableModuleVariable) ReferenceableName() []string { - return []string{n.Name()} +func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Variable} } // GraphNodeReferencer -func (n *NodeApplyableModuleVariable) References() []string { - // If we have no value set, we depend on nothing - if n.Value == nil { - return nil - } +func (n *NodeApplyableModuleVariable) References() []*addrs.Reference { - // Can't depend on anything if we're in the root - if len(n.PathValue) < 2 { + // If we have no value expression, we cannot depend on anything. + if n.Expr == nil { return nil } - // Otherwise, we depend on anything that is in our value, but - // specifically in the namespace of the parent path. - // Create the prefix based on the path - var prefix string - if p := n.Path(); len(p) > 0 { - prefix = modulePrefixStr(p) + // Variables in the root don't depend on anything, because their values + // are gathered prior to the graph walk and recorded in the context. + if len(n.Addr.Module) == 0 { + return nil } - result := ReferencesFromConfig(n.Value) - return modulePrefixList(result, prefix) + // Otherwise, we depend on anything referenced by our value expression. + // We ignore diagnostics here under the assumption that we'll re-eval + // all these things later and catch them then; for our purposes here, + // we only care about valid references. + // + // Due to our GraphNodeReferenceOutside implementation, the addresses + // returned by this function are interpreted in the _parent_ module from + // where our associated variable was declared, which is correct because + // our value expression is assigned within a "module" block in the parent + // module. + refs, _ := lang.ReferencesInExpr(n.Expr) + return refs } // GraphNodeEvalable func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { // If we have no value, do nothing - if n.Value == nil { + if n.Expr == nil { return &EvalNoop{} } // Otherwise, interpolate the value of this variable and set it // within the variables mapping. - var config *ResourceConfig - variables := make(map[string]interface{}) - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Value, - Output: &config, - }, + vals := make(map[string]cty.Value) - &EvalVariableBlock{ - Config: &config, - VariableValues: variables, - }, + _, call := n.Addr.Module.CallInstance() - &EvalCoerceMapVariable{ - Variables: variables, - ModulePath: n.PathValue, - ModuleTree: n.Module, + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, + walkDestroy, walkValidate}, + Node: &EvalModuleCallArgument{ + Addr: n.Addr.Variable, + Config: n.Config, + Expr: n.Expr, + Values: vals, + + IgnoreDiagnostics: false, + }, }, - &EvalTypeCheckVariable{ - Variables: variables, - ModulePath: n.PathValue, - ModuleTree: n.Module, + &EvalSetModuleCallArguments{ + Module: call, + Values: vals, }, + }, + } +} - &EvalSetVariables{ - Module: &n.PathValue[len(n.PathValue)-1], - Variables: variables, - }, +// dag.GraphNodeDotter impl. +func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go index e28e6f02..bb3d0653 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go @@ -2,30 +2,38 @@ package terraform import ( "fmt" - "strings" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" ) // NodeApplyableOutput represents an output that is "applyable": // it is ready to be applied. type NodeApplyableOutput struct { - PathValue []string - Config *config.Output // Config is the output in the config + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config } -func (n *NodeApplyableOutput) Name() string { - result := fmt.Sprintf("output.%s", n.Config.Name) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } +var ( + _ GraphNodeSubPath = (*NodeApplyableOutput)(nil) + _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil) + _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil) + _ GraphNodeReferencer = (*NodeApplyableOutput)(nil) + _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil) + _ GraphNodeEvalable = (*NodeApplyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil) +) - return result +func (n *NodeApplyableOutput) Name() string { + return n.Addr.String() } // GraphNodeSubPath -func (n *NodeApplyableOutput) Path() []string { - return n.PathValue +func (n *NodeApplyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module } // RemovableIfNotTargeted @@ -35,42 +43,158 @@ func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool { return true } +// GraphNodeTargetDownstream +func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + // If any of the direct dependencies of an output are targeted then + // the output must always be targeted as well, so its value will always + // be up-to-date at the completion of an apply walk. + return true +} + +func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) { + + // Output values have their expressions resolved in the context of the + // module where they are defined. + referencePath = addr.Module + + // ...but they are referenced in the context of their calling module. + selfPath = addr.Module.Parent() + + return // uses named return values + +} + +// GraphNodeReferenceOutside implementation +func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + return referenceOutsideForOutput(n.Addr) +} + +func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable { + // An output in the root module can't be referenced at all. + if addr.Module.IsRoot() { + return nil + } + + // Otherwise, we can be referenced via a reference to our output name + // on the parent module's call, or via a reference to the entire call. + // e.g. module.foo.bar or just module.foo . + // Note that our ReferenceOutside method causes these addresses to be + // relative to the calling module, not the module where the output + // was declared. + _, outp := addr.ModuleCallOutput() + _, call := addr.Module.CallInstance() + return []addrs.Referenceable{outp, call} + +} + // GraphNodeReferenceable -func (n *NodeApplyableOutput) ReferenceableName() []string { - name := fmt.Sprintf("output.%s", n.Config.Name) - return []string{name} +func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) } -// GraphNodeReferencer -func (n *NodeApplyableOutput) References() []string { - var result []string - result = append(result, n.Config.DependsOn...) - result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) - for _, v := range result { - split := strings.Split(v, "/") - for i, s := range split { - split[i] = s + ".destroy" - } - - result = append(result, strings.Join(split, "/")) +func referencesForOutput(c *configs.Output) []*addrs.Reference { + impRefs, _ := lang.ReferencesInExpr(c.Expr) + expRefs, _ := lang.References(c.DependsOn) + l := len(impRefs) + len(expRefs) + if l == 0 { + return nil } + refs := make([]*addrs.Reference, 0, l) + refs = append(refs, impRefs...) + refs = append(refs, expRefs...) + return refs + +} - return result +// GraphNodeReferencer +func (n *NodeApplyableOutput) References() []*addrs.Reference { + return appendResourceDestroyReferences(referencesForOutput(n.Config)) } // GraphNodeEvalable func (n *NodeApplyableOutput) EvalTree() EvalNode { - return &EvalOpFilter{ - Ops: []walkOperation{walkRefresh, walkPlan, walkApply, - walkDestroy, walkInput, walkValidate}, - Node: &EvalSequence{ - Nodes: []EvalNode{ - &EvalWriteOutput{ - Name: n.Config.Name, + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalOpFilter{ + Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, + Node: &EvalWriteOutput{ + Addr: n.Addr.OutputValue, Sensitive: n.Config.Sensitive, - Value: n.Config.RawConfig, + Expr: n.Config.Expr, }, }, }, } } + +// dag.GraphNodeDotter impl. +func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} + +// NodeDestroyableOutput represents an output that is "destroybale": +// its application will remove the output from the state. +type NodeDestroyableOutput struct { + Addr addrs.AbsOutputValue + Config *configs.Output // Config is the output in the config +} + +var ( + _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil) + _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil) + _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil) + _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil) + _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil) + _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil) +) + +func (n *NodeDestroyableOutput) Name() string { + return fmt.Sprintf("%s (destroy)", n.Addr.String()) +} + +// GraphNodeSubPath +func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance { + return n.Addr.Module +} + +// RemovableIfNotTargeted +func (n *NodeDestroyableOutput) RemoveIfNotTargeted() bool { + // We need to add this so that this node will be removed if + // it isn't targeted or a dependency of a target. + return true +} + +// This will keep the destroy node in the graph if its corresponding output +// node is also in the destroy graph. +func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag.Set) bool { + return true +} + +// GraphNodeReferencer +func (n *NodeDestroyableOutput) References() []*addrs.Reference { + return referencesForOutput(n.Config) +} + +// GraphNodeEvalable +func (n *NodeDestroyableOutput) EvalTree() EvalNode { + return &EvalDeleteOutput{ + Addr: n.Addr.OutputValue, + } +} + +// dag.GraphNodeDotter impl. +func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go index 636a15df..518b8aa0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go @@ -2,26 +2,39 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/addrs" ) // NodeOutputOrphan represents an output that is an orphan. type NodeOutputOrphan struct { - OutputName string - PathValue []string + Addr addrs.AbsOutputValue } +var ( + _ GraphNodeSubPath = (*NodeOutputOrphan)(nil) + _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil) + _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil) + _ GraphNodeEvalable = (*NodeOutputOrphan)(nil) +) + func (n *NodeOutputOrphan) Name() string { - result := fmt.Sprintf("output.%s (orphan)", n.OutputName) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } + return fmt.Sprintf("%s (orphan)", n.Addr.String()) +} + +// GraphNodeReferenceOutside implementation +func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) { + return referenceOutsideForOutput(n.Addr) +} - return result +// GraphNodeReferenceable +func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable { + return referenceableAddrsForOutput(n.Addr) } // GraphNodeSubPath -func (n *NodeOutputOrphan) Path() []string { - return n.PathValue +func (n *NodeOutputOrphan) Path() addrs.ModuleInstance { + return n.Addr.Module } // GraphNodeEvalable @@ -29,7 +42,7 @@ func (n *NodeOutputOrphan) EvalTree() EvalNode { return &EvalOpFilter{ Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, Node: &EvalDeleteOutput{ - Name: n.OutputName, + Addr: n.Addr.OutputValue, }, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go index 8e2c176f..2071ab16 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go @@ -7,5 +7,5 @@ type NodeApplyableProvider struct { // GraphNodeEvalable func (n *NodeApplyableProvider) EvalTree() EvalNode { - return ProviderEvalTree(n.NameValue, n.ProviderConfig()) + return ProviderEvalTree(n, n.ProviderConfig()) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go index 6cc83656..a0cdcfe0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go @@ -1,9 +1,10 @@ package terraform import ( - "fmt" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" - "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/dag" ) @@ -14,28 +15,33 @@ type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex // NodeAbstractProvider represents a provider that has no associated operations. // It registers all the common interfaces across operations for providers. type NodeAbstractProvider struct { - NameValue string - PathValue []string + Addr addrs.AbsProviderConfig // The fields below will be automatically set using the Attach // interfaces if you're running those transforms, but also be explicitly // set if you already have that information. - Config *config.ProviderConfig + Config *configs.Provider + Schema *configschema.Block } -func (n *NodeAbstractProvider) Name() string { - result := fmt.Sprintf("provider.%s", n.NameValue) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) - } +var ( + _ GraphNodeSubPath = (*NodeAbstractProvider)(nil) + _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil) + _ GraphNodeReferencer = (*NodeAbstractProvider)(nil) + _ GraphNodeProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil) + _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil) +) - return result +func (n *NodeAbstractProvider) Name() string { + return n.Addr.String() } // GraphNodeSubPath -func (n *NodeAbstractProvider) Path() []string { - return n.PathValue +func (n *NodeAbstractProvider) Path() addrs.ModuleInstance { + return n.Addr.Module } // RemovableIfNotTargeted @@ -46,33 +52,38 @@ func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool { } // GraphNodeReferencer -func (n *NodeAbstractProvider) References() []string { - if n.Config == nil { +func (n *NodeAbstractProvider) References() []*addrs.Reference { + if n.Config == nil || n.Schema == nil { return nil } - return ReferencesFromConfig(n.Config.RawConfig) + return ReferencesFromConfig(n.Config.Config, n.Schema) } // GraphNodeProvider -func (n *NodeAbstractProvider) ProviderName() string { - return n.NameValue +func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.Addr } // GraphNodeProvider -func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig { +func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider { if n.Config == nil { return nil } - return n.Config.RawConfig + return n.Config } // GraphNodeAttachProvider -func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) { +func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) { n.Config = c } +// GraphNodeAttachProviderConfigSchema impl. +func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) { + n.Schema = schema +} + // GraphNodeDotter impl. func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { return &dag.DotNode{ diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go index 25e7e620..30d8813a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go @@ -2,6 +2,8 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/dag" ) // NodeDisabledProvider represents a provider that is disabled. A disabled @@ -11,28 +13,15 @@ type NodeDisabledProvider struct { *NodeAbstractProvider } +var ( + _ GraphNodeSubPath = (*NodeDisabledProvider)(nil) + _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil) + _ GraphNodeReferencer = (*NodeDisabledProvider)(nil) + _ GraphNodeProvider = (*NodeDisabledProvider)(nil) + _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil) + _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil) +) + func (n *NodeDisabledProvider) Name() string { return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) } - -// GraphNodeEvalable -func (n *NodeDisabledProvider) EvalTree() EvalNode { - var resourceConfig *ResourceConfig - return &EvalSequence{ - Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.ProviderConfig(), - Output: &resourceConfig, - }, - &EvalBuildProviderConfig{ - Provider: n.ProviderName(), - Config: &resourceConfig, - Output: &resourceConfig, - }, - &EvalSetProviderConfig{ - Provider: n.ProviderName(), - Config: &resourceConfig, - }, - }, - } -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go new file mode 100644 index 00000000..580e60cb --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go @@ -0,0 +1,20 @@ +package terraform + +// NodeEvalableProvider represents a provider during an "eval" walk. +// This special provider node type just initializes a provider and +// fetches its schema, without configuring it or otherwise interacting +// with it. +type NodeEvalableProvider struct { + *NodeAbstractProvider +} + +// GraphNodeEvalable +func (n *NodeEvalableProvider) EvalTree() EvalNode { + addr := n.Addr + relAddr := addr.ProviderConfig + + return &EvalInitProvider{ + TypeName: relAddr.Type, + Addr: addr.ProviderConfig, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go index bb117c1d..31ed1a8c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go @@ -3,6 +3,7 @@ package terraform import ( "fmt" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config" ) @@ -10,7 +11,7 @@ import ( // It registers all the common interfaces across operations for providers. type NodeProvisioner struct { NameValue string - PathValue []string + PathValue addrs.ModuleInstance // The fields below will be automatically set using the Attach // interfaces if you're running those transforms, but also be explicitly @@ -19,17 +20,23 @@ type NodeProvisioner struct { Config *config.ProviderConfig } +var ( + _ GraphNodeSubPath = (*NodeProvisioner)(nil) + _ GraphNodeProvisioner = (*NodeProvisioner)(nil) + _ GraphNodeEvalable = (*NodeProvisioner)(nil) +) + func (n *NodeProvisioner) Name() string { result := fmt.Sprintf("provisioner.%s", n.NameValue) - if len(n.PathValue) > 1 { - result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) + if len(n.PathValue) > 0 { + result = fmt.Sprintf("%s.%s", n.PathValue.String(), result) } return result } // GraphNodeSubPath -func (n *NodeProvisioner) Path() []string { +func (n *NodeProvisioner) Path() addrs.ModuleInstance { return n.PathValue } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go index e4577e9d..51484558 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go @@ -2,10 +2,16 @@ package terraform import ( "fmt" - "strings" + "log" + "sort" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) // ConcreteResourceNodeFunc is a callback type used to convert an @@ -16,218 +22,430 @@ type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex // The type of operation cannot be assumed, only that this node represents // the given resource. type GraphNodeResource interface { - ResourceAddr() *ResourceAddress + ResourceAddr() addrs.AbsResource +} + +// ConcreteResourceInstanceNodeFunc is a callback type used to convert an +// abstract resource instance to a concrete one of some type. +type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex + +// GraphNodeResourceInstance is implemented by any nodes that represent +// a resource instance. A single resource may have multiple instances if, +// for example, the "count" or "for_each" argument is used for it in +// configuration. +type GraphNodeResourceInstance interface { + ResourceInstanceAddr() addrs.AbsResourceInstance } // NodeAbstractResource represents a resource that has no associated // operations. It registers all the interfaces for a resource that common // across multiple operation types. type NodeAbstractResource struct { - Addr *ResourceAddress // Addr is the address for this resource + Addr addrs.AbsResource // Addr is the address for this resource // The fields below will be automatically set using the Attach // interfaces if you're running those transforms, but also be explicitly // set if you already have that information. - Config *config.Resource // Config is the resource in the config - ResourceState *ResourceState // ResourceState is the ResourceState for this + Schema *configschema.Block // Schema for processing the configuration body + SchemaVersion uint64 // Schema version of "Schema", as decided by the provider + Config *configs.Resource // Config is the resource in the config + + ProvisionerSchemas map[string]*configschema.Block + + Targets []addrs.Targetable // Set from GraphNodeTargetable - Targets []ResourceAddress // Set from GraphNodeTargetable + // The address of the provider this resource will use + ResolvedProvider addrs.AbsProviderConfig } -func (n *NodeAbstractResource) Name() string { - return n.Addr.String() +var ( + _ GraphNodeSubPath = (*NodeAbstractResource)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResource)(nil) + _ GraphNodeReferencer = (*NodeAbstractResource)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil) + _ GraphNodeResource = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil) + _ GraphNodeTargetable = (*NodeAbstractResource)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil) +) + +// NewNodeAbstractResource creates an abstract resource graph node for +// the given absolute resource address. +func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource { + return &NodeAbstractResource{ + Addr: addr, + } } -// GraphNodeSubPath -func (n *NodeAbstractResource) Path() []string { - return n.Addr.Path +// NodeAbstractResourceInstance represents a resource instance with no +// associated operations. It embeds NodeAbstractResource but additionally +// contains an instance key, used to identify one of potentially many +// instances that were created from a resource in configuration, e.g. using +// the "count" or "for_each" arguments. +type NodeAbstractResourceInstance struct { + NodeAbstractResource + InstanceKey addrs.InstanceKey + + // The fields below will be automatically set using the Attach + // interfaces if you're running those transforms, but also be explicitly + // set if you already have that information. + + ResourceState *states.Resource } -// GraphNodeReferenceable -func (n *NodeAbstractResource) ReferenceableName() []string { - // We always are referenceable as "type.name" as long as - // we have a config or address. Determine what that value is. - var id string - if n.Config != nil { - id = n.Config.Id() - } else if n.Addr != nil { - addrCopy := n.Addr.Copy() - addrCopy.Path = nil // ReferenceTransformer handles paths - addrCopy.Index = -1 // We handle indexes below - id = addrCopy.String() - } else { - // No way to determine our type.name, just return - return nil +var ( + _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil) + _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil) + _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil) +) + +// NewNodeAbstractResourceInstance creates an abstract resource instance graph +// node for the given absolute resource instance address. +func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance { + // Due to the fact that we embed NodeAbstractResource, the given address + // actually ends up split between the resource address in the embedded + // object and the InstanceKey field in our own struct. The + // ResourceInstanceAddr method will stick these back together again on + // request. + return &NodeAbstractResourceInstance{ + NodeAbstractResource: NodeAbstractResource{ + Addr: addr.ContainingResource(), + }, + InstanceKey: addr.Resource.Key, } +} - var result []string +func (n *NodeAbstractResource) Name() string { + return n.ResourceAddr().String() +} - // Always include our own ID. This is primarily for backwards - // compatibility with states that didn't yet support the more - // specific dep string. - result = append(result, id) +func (n *NodeAbstractResourceInstance) Name() string { + return n.ResourceInstanceAddr().String() +} - // We represent all multi-access - result = append(result, fmt.Sprintf("%s.*", id)) +// GraphNodeSubPath +func (n *NodeAbstractResource) Path() addrs.ModuleInstance { + return n.Addr.Module +} - // We represent either a specific number, or all numbers - suffix := "N" - if n.Addr != nil { - idx := n.Addr.Index - if idx == -1 { - idx = 0 - } +// GraphNodeReferenceable +func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr.Resource} +} - suffix = fmt.Sprintf("%d", idx) +// GraphNodeReferenceable +func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + addr := n.ResourceInstanceAddr() + return []addrs.Referenceable{ + addr.Resource, + + // A resource instance can also be referenced by the address of its + // containing resource, so that e.g. a reference to aws_instance.foo + // would match both aws_instance.foo[0] and aws_instance.foo[1]. + addr.ContainingResource().Resource, } - result = append(result, fmt.Sprintf("%s.%s", id, suffix)) - - return result } // GraphNodeReferencer -func (n *NodeAbstractResource) References() []string { - // If we have a config, that is our source of truth +func (n *NodeAbstractResource) References() []*addrs.Reference { + // If we have a config then we prefer to use that. if c := n.Config; c != nil { - // Grab all the references - var result []string - result = append(result, c.DependsOn...) - result = append(result, ReferencesFromConfig(c.RawCount)...) - result = append(result, ReferencesFromConfig(c.RawConfig)...) - for _, p := range c.Provisioners { - if p.When == config.ProvisionerWhenCreate { - result = append(result, ReferencesFromConfig(p.ConnInfo)...) - result = append(result, ReferencesFromConfig(p.RawConfig)...) + var result []*addrs.Reference + + for _, traversal := range c.DependsOn { + ref, err := addrs.ParseRef(traversal) + if err != nil { + // We ignore this here, because this isn't a suitable place to return + // errors. This situation should be caught and rejected during + // validation. + log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err) + continue } + + result = append(result, ref) + } + + if n.Schema == nil { + // Should never happens, but we'll log if it does so that we can + // see this easily when debugging. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) } + refs, _ := lang.ReferencesInExpr(c.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInBlock(c.Config, n.Schema) + result = append(result, refs...) + if c.Managed != nil { + for _, p := range c.Managed.Provisioners { + if p.When != configs.ProvisionerWhenCreate { + continue + } + if p.Connection != nil { + refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema) + result = append(result, refs...) + } + + schema := n.ProvisionerSchemas[p.Type] + if schema == nil { + log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name()) + } + refs, _ = lang.ReferencesInBlock(p.Config, schema) + result = append(result, refs...) + } + } return result } - // If we have state, that is our next source - if s := n.ResourceState; s != nil { - return s.Dependencies + // Otherwise, we have no references. + return nil +} + +// GraphNodeReferencer +func (n *NodeAbstractResourceInstance) References() []*addrs.Reference { + // If we have a configuration attached then we'll delegate to our + // embedded abstract resource, which knows how to extract dependencies + // from configuration. + if n.Config != nil { + if n.Schema == nil { + // We'll produce a log message about this out here so that + // we can include the full instance address, since the equivalent + // message in NodeAbstractResource.References cannot see it. + log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name()) + return nil + } + return n.NodeAbstractResource.References() + } + + // Otherwise, if we have state then we'll use the values stored in state + // as a fallback. + if rs := n.ResourceState; rs != nil { + if s := rs.Instance(n.InstanceKey); s != nil { + // State is still storing dependencies as old-style strings, so we'll + // need to do a little work here to massage this to the form we now + // want. + var result []*addrs.Reference + + // It is (apparently) possible for s.Current to be nil. This proved + // difficult to reproduce, so we will fix the symptom here and hope + // to find the root cause another time. + // + // https://github.com/hashicorp/terraform/issues/21407 + if s.Current == nil { + log.Printf("[WARN] no current state found for %s", n.Name()) + } else { + for _, addr := range s.Current.Dependencies { + if addr == nil { + // Should never happen; indicates a bug in the state loader + panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr())) + } + + // This is a little weird: we need to manufacture an addrs.Reference + // with a fake range here because the state isn't something we can + // make source references into. + result = append(result, &addrs.Reference{ + Subject: addr, + SourceRange: tfdiags.SourceRange{ + Filename: "(state file)", + }, + }) + } + } + return result + } } + // If we have neither config nor state then we have no references. return nil } +// converts an instance address to the legacy dotted notation +func dottedInstanceAddr(tr addrs.ResourceInstance) string { + // The legacy state format uses dot-separated instance keys, + // rather than bracketed as in our modern syntax. + var suffix string + switch tk := tr.Key.(type) { + case addrs.IntKey: + suffix = fmt.Sprintf(".%d", int(tk)) + case addrs.StringKey: + suffix = fmt.Sprintf(".%s", string(tk)) + } + return tr.Resource.String() + suffix +} + // StateReferences returns the dependencies to put into the state for // this resource. -func (n *NodeAbstractResource) StateReferences() []string { - self := n.ReferenceableName() - - // Determine what our "prefix" is for checking for references to - // ourself. - addrCopy := n.Addr.Copy() - addrCopy.Index = -1 - selfPrefix := addrCopy.String() + "." +func (n *NodeAbstractResourceInstance) StateReferences() []addrs.Referenceable { + selfAddrs := n.ReferenceableAddrs() + + // Since we don't include the source location references in our + // results from this method, we'll also filter out duplicates: + // there's no point in listing the same object twice without + // that additional context. + seen := map[string]struct{}{} + + // Pretend that we've already "seen" all of our own addresses so that we + // won't record self-references in the state. This can arise if, for + // example, a provisioner for a resource refers to the resource itself, + // which is valid (since provisioners always run after apply) but should + // not create an explicit dependency edge. + for _, selfAddr := range selfAddrs { + seen[selfAddr.String()] = struct{}{} + if riAddr, ok := selfAddr.(addrs.ResourceInstance); ok { + seen[riAddr.ContainingResource().String()] = struct{}{} + } + } depsRaw := n.References() - deps := make([]string, 0, len(depsRaw)) + deps := make([]addrs.Referenceable, 0, len(depsRaw)) for _, d := range depsRaw { - // Ignore any variable dependencies - if strings.HasPrefix(d, "var.") { - continue + subj := d.Subject + if mco, isOutput := subj.(addrs.ModuleCallOutput); isOutput { + // For state dependencies, we simplify outputs to just refer + // to the module as a whole. It's not really clear why we do this, + // but this logic is preserved from before the 0.12 rewrite of + // this function. + subj = mco.Call } - // If this has a backup ref, ignore those for now. The old state - // file never contained those and I'd rather store the rich types we - // add in the future. - if idx := strings.IndexRune(d, '/'); idx != -1 { - d = d[:idx] - } - - // If we're referencing ourself, then ignore it - found := false - for _, s := range self { - if d == s { - found = true - } - } - if found { + k := subj.String() + if _, exists := seen[k]; exists { continue } - - // If this is a reference to ourself and a specific index, we keep - // it. For example, if this resource is "foo.bar" and the reference - // is "foo.bar.0" then we keep it exact. Otherwise, we strip it. - if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) { - d = d[:len(d)-2] + seen[k] = struct{}{} + switch tr := subj.(type) { + case addrs.ResourceInstance: + deps = append(deps, tr) + case addrs.Resource: + deps = append(deps, tr) + case addrs.ModuleCallInstance: + deps = append(deps, tr) + default: + // No other reference types are recorded in the state. } + } - // This is sad. The dependencies are currently in the format of - // "module.foo.bar" (the full field). This strips the field off. - if strings.HasPrefix(d, "module.") { - parts := strings.SplitN(d, ".", 3) - d = strings.Join(parts[0:2], ".") - } + // We'll also sort them, since that'll avoid creating changes in the + // serialized state that make no semantic difference. + sort.Slice(deps, func(i, j int) bool { + // Simple string-based sort because we just care about consistency, + // not user-friendliness. + return deps[i].String() < deps[j].String() + }) + + return deps +} - deps = append(deps, d) +func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) { + n.ResolvedProvider = p +} + +// GraphNodeProviderConsumer +func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // If we have a config we prefer that above all else + if n.Config != nil { + relAddr := n.Config.ProviderConfigAddr() + return relAddr.Absolute(n.Path()), false } - return deps + // Use our type and containing module path to guess a provider configuration address + return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false } // GraphNodeProviderConsumer -func (n *NodeAbstractResource) ProvidedBy() []string { +func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) { // If we have a config we prefer that above all else if n.Config != nil { - return []string{resourceProvider(n.Config.Type, n.Config.Provider)} + relAddr := n.Config.ProviderConfigAddr() + return relAddr.Absolute(n.Path()), false } // If we have state, then we will use the provider from there - if n.ResourceState != nil && n.ResourceState.Provider != "" { - return []string{n.ResourceState.Provider} + if n.ResourceState != nil { + // An address from the state must match exactly, since we must ensure + // we refresh/destroy a resource with the same provider configuration + // that created it. + return n.ResourceState.ProviderConfig, true } - // Use our type - return []string{resourceProvider(n.Addr.Type, "")} + // Use our type and containing module path to guess a provider configuration address + return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false } // GraphNodeProvisionerConsumer func (n *NodeAbstractResource) ProvisionedBy() []string { // If we have no configuration, then we have no provisioners - if n.Config == nil { + if n.Config == nil || n.Config.Managed == nil { return nil } // Build the list of provisioners we need based on the configuration. // It is okay to have duplicates here. - result := make([]string, len(n.Config.Provisioners)) - for i, p := range n.Config.Provisioners { + result := make([]string, len(n.Config.Managed.Provisioners)) + for i, p := range n.Config.Managed.Provisioners { result[i] = p.Type } return result } -// GraphNodeResource, GraphNodeAttachResourceState -func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress { +// GraphNodeProvisionerConsumer +func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) { + if n.ProvisionerSchemas == nil { + n.ProvisionerSchemas = make(map[string]*configschema.Block) + } + n.ProvisionerSchemas[name] = schema +} + +// GraphNodeResource +func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource { return n.Addr } +// GraphNodeResourceInstance +func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance { + return n.NodeAbstractResource.Addr.Instance(n.InstanceKey) +} + // GraphNodeAddressable, TODO: remove, used by target, should unify func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { - return n.ResourceAddr() + return NewLegacyResourceAddress(n.Addr) } // GraphNodeTargetable -func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) { +func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) { n.Targets = targets } // GraphNodeAttachResourceState -func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) { +func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) { n.ResourceState = s } // GraphNodeAttachResourceConfig -func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) { +func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) { n.Config = c } +// GraphNodeAttachResourceSchema impl +func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) { + n.Schema = schema + n.SchemaVersion = version +} + // GraphNodeDotter impl. func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { return &dag.DotNode{ diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go index 3599782b..3e2fff3a 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go @@ -1,357 +1,71 @@ package terraform import ( - "fmt" + "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/lang" ) // NodeApplyableResource represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. +// it may need to have its record in the state adjusted to match configuration. +// +// Unlike in the plan walk, this resource node does not DynamicExpand. Instead, +// it should be inserted into the same graph as any instances of the nodes +// with dependency edges ensuring that the resource is evaluated before any +// of its instances, which will turn ensure that the whole-resource record +// in the state is suitably prepared to receive any updates to instances. type NodeApplyableResource struct { *NodeAbstractResource } -// GraphNodeCreator -func (n *NodeApplyableResource) CreateAddr() *ResourceAddress { - return n.NodeAbstractResource.Addr -} - -// GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeApplyableResource) References() []string { - result := n.NodeAbstractResource.References() - - // The "apply" side of a resource generally also depends on the - // destruction of its dependencies as well. For example, if a LB - // references a set of VMs with ${vm.foo.*.id}, then we must wait for - // the destruction so we get the newly updated list of VMs. - // - // The exception here is CBD. When CBD is set, we don't do this since - // it would create a cycle. By not creating a cycle, we require two - // applies since the first apply the creation step will use the OLD - // values (pre-destroy) and the second step will update. - // - // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x). - // We mimic that behavior here now and can improve upon it in the future. - // - // This behavior is tested in graph_build_apply_test.go to test ordering. - cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy - if !cbd { - // The "apply" side of a resource always depends on the destruction - // of all its dependencies in addition to the creation. - for _, v := range result { - result = append(result, v+".destroy") - } - } +var ( + _ GraphNodeResource = (*NodeApplyableResource)(nil) + _ GraphNodeEvalable = (*NodeApplyableResource)(nil) + _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil) + _ GraphNodeReferencer = (*NodeApplyableResource)(nil) +) - return result +func (n *NodeApplyableResource) Name() string { + return n.NodeAbstractResource.Name() + " (prepare state)" } -// GraphNodeEvalable -func (n *NodeApplyableResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, +func (n *NodeApplyableResource) References() []*addrs.Reference { + if n.Config == nil { + log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n)) + return nil } - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } + var result []*addrs.Reference - // Determine the dependencies for the state. - stateDeps := n.StateReferences() + // Since this node type only updates resource-level metadata, we only + // need to worry about the parts of the configuration that affect + // our "each mode": the count and for_each meta-arguments. + refs, _ := lang.ReferencesInExpr(n.Config.Count) + result = append(result, refs...) + refs, _ = lang.ReferencesInExpr(n.Config.ForEach) + result = append(result, refs...) - // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case config.ManagedResourceMode: - return n.evalTreeManagedResource( - stateId, info, resource, stateDeps, - ) - case config.DataResourceMode: - return n.evalTreeDataResource( - stateId, info, resource, stateDeps) - default: - panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) - } + return result } -func (n *NodeApplyableResource) evalTreeDataResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - var provider ResourceProvider - var config *ResourceConfig - var diff *InstanceDiff - var state *InstanceState - - return &EvalSequence{ - Nodes: []EvalNode{ - // Build the instance info - &EvalInstanceInfo{ - Info: info, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Name: stateId, - Diff: &diff, - }, - - // Stop here if we don't actually have a diff - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diff == nil { - return true, EvalEarlyExitError{} - } - - if diff.GetAttributesLen() == 0 { - return true, EvalEarlyExitError{} - } - - return true, nil - }, - Then: EvalNoop{}, - }, - - // We need to re-interpolate the config here, rather than - // just using the diff's values directly, because we've - // potentially learned more variable values during the - // apply pass that weren't known when the diff was produced. - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - - // Make a new diff with our newly-interpolated config. - &EvalReadDataDiff{ - Info: info, - Config: &config, - Previous: &diff, - Provider: &provider, - Output: &diff, - }, - - &EvalReadDataApply{ - Info: info, - Diff: &diff, - Provider: &provider, - Output: &state, - }, - - &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - - // Clear the diff now that we've applied it, so - // later nodes won't see a diff that's now a no-op. - &EvalWriteDiff{ - Name: stateId, - Diff: nil, - }, - - &EvalUpdateStateHook{}, - }, +// GraphNodeEvalable +func (n *NodeApplyableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + providerAddr := n.ResolvedProvider + + if config == nil { + // Nothing to do, then. + log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) + return &EvalNoop{} } -} - -func (n *NodeApplyableResource) evalTreeManagedResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var diff, diffApply *InstanceDiff - var state *InstanceState - var resourceConfig *ResourceConfig - var err error - var createNew bool - var createBeforeDestroyEnabled bool - - return &EvalSequence{ - Nodes: []EvalNode{ - // Build the instance info - &EvalInstanceInfo{ - Info: info, - }, - - // Get the saved diff for apply - &EvalReadDiff{ - Name: stateId, - Diff: &diffApply, - }, - - // We don't want to do any destroys - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - if diffApply == nil { - return true, EvalEarlyExitError{} - } - - if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 { - return true, EvalEarlyExitError{} - } - - diffApply.SetDestroy(false) - return true, nil - }, - Then: EvalNoop{}, - }, - - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - destroy := false - if diffApply != nil { - destroy = diffApply.GetDestroy() || diffApply.RequiresNew() - } - - createBeforeDestroyEnabled = - n.Config.Lifecycle.CreateBeforeDestroy && - destroy - - return createBeforeDestroyEnabled, nil - }, - Then: &EvalDeposeState{ - Name: stateId, - }, - }, - - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - IgnoreWarnings: true, - }, - &EvalDiff{ - Info: info, - Config: &resourceConfig, - Resource: n.Config, - Provider: &provider, - Diff: &diffApply, - State: &state, - OutputDiff: &diffApply, - }, - - // Get the saved diff - &EvalReadDiff{ - Name: stateId, - Diff: &diff, - }, - - // Compare the diffs - &EvalCompareDiff{ - Info: info, - One: &diff, - Two: &diffApply, - }, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, - &EvalReadState{ - Name: stateId, - Output: &state, - }, - // Call pre-apply hook - &EvalApplyPre{ - Info: info, - State: &state, - Diff: &diffApply, - }, - &EvalApply{ - Info: info, - State: &state, - Diff: &diffApply, - Provider: &provider, - Output: &state, - Error: &err, - CreateNew: &createNew, - }, - &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - &EvalApplyProvisioners{ - Info: info, - State: &state, - Resource: n.Config, - InterpResource: resource, - CreateNew: &createNew, - Error: &err, - When: config.ProvisionerWhenCreate, - }, - &EvalIf{ - If: func(ctx EvalContext) (bool, error) { - return createBeforeDestroyEnabled && err != nil, nil - }, - Then: &EvalUndeposeState{ - Name: stateId, - State: &state, - }, - Else: &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, - }, - }, - - // We clear the diff out here so that future nodes - // don't see a diff that is already complete. There - // is no longer a diff! - &EvalWriteDiff{ - Name: stateId, - Diff: nil, - }, - &EvalApplyPost{ - Info: info, - State: &state, - Error: &err, - }, - &EvalUpdateStateHook{}, - }, + return &EvalWriteResourceState{ + Addr: addr.Resource, + Config: config, + ProviderAddr: providerAddr, } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go new file mode 100644 index 00000000..dad7bfc5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go @@ -0,0 +1,433 @@ +package terraform + +import ( + "fmt" + + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// NodeApplyableResourceInstance represents a resource instance that is +// "applyable": it is ready to be applied and is represented by a diff. +// +// This node is for a specific instance of a resource. It will usually be +// accompanied in the graph by a NodeApplyableResource representing its +// containing resource, and should depend on that node to ensure that the +// state is properly prepared to receive changes to instances. +type NodeApplyableResourceInstance struct { + *NodeAbstractResourceInstance + + destroyNode GraphNodeDestroyerCBD + graphNodeDeposer // implementation of GraphNodeDeposer +} + +var ( + _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil) +) + +// GraphNodeAttachDestroyer +func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) { + n.destroyNode = d +} + +// createBeforeDestroy checks this nodes config status and the status af any +// companion destroy node for CreateBeforeDestroy. +func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool { + cbd := false + + if n.Config != nil && n.Config.Managed != nil { + cbd = n.Config.Managed.CreateBeforeDestroy + } + + if n.destroyNode != nil { + cbd = cbd || n.destroyNode.CreateBeforeDestroy() + } + + return cbd +} + +// GraphNodeCreator +func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeReferencer, overriding NodeAbstractResourceInstance +func (n *NodeApplyableResourceInstance) References() []*addrs.Reference { + // Start with the usual resource instance implementation + ret := n.NodeAbstractResourceInstance.References() + + // Applying a resource must also depend on the destruction of any of its + // dependencies, since this may for example affect the outcome of + // evaluating an entire list of resources with "count" set (by reducing + // the count). + // + // However, we can't do this in create_before_destroy mode because that + // would create a dependency cycle. We make a compromise here of requiring + // changes to be updated across two applies in this case, since the first + // plan will use the old values. + if !n.createBeforeDestroy() { + for _, ref := range ret { + switch tr := ref.Subject.(type) { + case addrs.ResourceInstance: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + case addrs.Resource: + newRef := *ref // shallow copy so we can mutate + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + newRef.Remaining = nil // can't access attributes of something being destroyed + ret = append(ret, &newRef) + } + } + } + + return ret +} + +// GraphNodeEvalable +func (n *NodeApplyableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + // State still uses legacy-style internal ids, so we need to shim to get + // a suitable key to use. + stateId := NewLegacyResourceInstanceAddress(addr).stateId() + + // Determine the dependencies for the state. + stateDeps := n.StateReferences() + + if n.Config == nil { + // This should not be possible, but we've got here in at least one + // case as discussed in the following issue: + // https://github.com/hashicorp/terraform/issues/21258 + // To avoid an outright crash here, we'll instead return an explicit + // error. + var diags tfdiags.Diagnostics + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource node has no configuration attached", + fmt.Sprintf( + "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!", + addr, + ), + )) + err := diags.Err() + return &EvalReturnError{ + Error: &err, + } + } + + // Eval info is different depending on what kind of resource this is + switch n.Config.Mode { + case addrs.ManagedResourceMode: + return n.evalTreeManagedResource(addr, stateId, stateDeps) + case addrs.DataResourceMode: + return n.evalTreeDataResource(addr, stateId, stateDeps) + default: + panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) + } +} + +func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + + // Stop early if we don't actually have a diff + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if change == nil { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + // In this particular call to EvalReadData we include our planned + // change, which signals that we expect this read to complete fully + // with no unknown values; it'll produce an error if not. + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Planned: &change, // setting this indicates that the result must be complete + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + OutputState: &state, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + // Clear the diff now that we've applied it, so + // later nodes won't see a diff that's now a no-op. + &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: nil, + }, + + &EvalUpdateStateHook{}, + }, + } +} + +func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var diff, diffApply *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var err error + var createNew bool + var createBeforeDestroyEnabled bool + var configVal cty.Value + var deposedKey states.DeposedKey + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + // Get the saved diff for apply + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &diffApply, + }, + + // We don't want to do any destroys + // (these are handled by NodeDestroyResourceInstance instead) + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil { + return true, EvalEarlyExitError{} + } + if diffApply.Action == plans.Delete { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + destroy := false + if diffApply != nil { + destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace()) + } + if destroy && n.createBeforeDestroy() { + createBeforeDestroyEnabled = true + } + return createBeforeDestroyEnabled, nil + }, + Then: &EvalDeposeState{ + Addr: addr.Resource, + ForceKey: n.PreallocatedDeposedKey, + OutputKey: &deposedKey, + }, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + // Get the saved diff + &EvalReadDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &diff, + }, + + // Make a new diff, in case we've learned new values in the state + // during apply which we can now incorporate. + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + PreviousDiff: &diff, + OutputChange: &diffApply, + OutputValue: &configVal, + OutputState: &state, + }, + + // Compare the diffs + &EvalCheckPlannedChange{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Planned: &diff, + Actual: &diffApply, + }, + + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalReduceDiff{ + Addr: addr.Resource, + InChange: &diffApply, + Destroy: false, + OutChange: &diffApply, + }, + + // EvalReduceDiff may have simplified our planned change + // into a NoOp if it only requires destroying, since destroying + // is handled by NodeDestroyResourceInstance. + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if diffApply == nil || diffApply.Action == plans.NoOp { + return true, EvalEarlyExitError{} + } + return true, nil + }, + Then: EvalNoop{}, + }, + + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + }, + &EvalApply{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + State: &state, + Change: &diffApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + CreateNew: &createNew, + }, + &EvalMaybeTainted{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + Error: &err, + StateOutput: &state, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyProvisioners{ + Addr: addr.Resource, + State: &state, // EvalApplyProvisioners will skip if already tainted + ResourceConfig: n.Config, + CreateNew: &createNew, + Error: &err, + When: configs.ProvisionerWhenCreate, + }, + &EvalMaybeTainted{ + Addr: addr.Resource, + State: &state, + Change: &diffApply, + Error: &err, + StateOutput: &state, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + return createBeforeDestroyEnabled && err != nil, nil + }, + Then: &EvalMaybeRestoreDeposedObject{ + Addr: addr.Resource, + Key: &deposedKey, + }, + }, + + // We clear the diff out here so that future nodes + // don't see a diff that is already complete. There + // is no longer a diff! + &EvalIf{ + If: func(ctx EvalContext) (bool, error) { + if !diff.Action.IsReplace() { + return true, nil + } + if !n.createBeforeDestroy() { + return true, nil + } + return false, nil + }, + Then: &EvalWriteDiff{ + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: nil, + }, + }, + + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go index c2efd2c3..ca2267e4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go @@ -2,81 +2,114 @@ package terraform import ( "fmt" + "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" ) -// NodeDestroyResource represents a resource that is to be destroyed. -type NodeDestroyResource struct { - *NodeAbstractResource +// NodeDestroyResourceInstance represents a resource instance that is to be +// destroyed. +type NodeDestroyResourceInstance struct { + *NodeAbstractResourceInstance + + // If DeposedKey is set to anything other than states.NotDeposed then + // this node destroys a deposed object of the associated instance + // rather than its current object. + DeposedKey states.DeposedKey + + CreateBeforeDestroyOverride *bool } -func (n *NodeDestroyResource) Name() string { - return n.NodeAbstractResource.Name() + " (destroy)" +var ( + _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil) +) + +func (n *NodeDestroyResourceInstance) Name() string { + if n.DeposedKey != states.NotDeposed { + return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey) + } + return n.ResourceInstanceAddr().String() + " (destroy)" } // GraphNodeDestroyer -func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress { - return n.Addr +func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr } // GraphNodeDestroyerCBD -func (n *NodeDestroyResource) CreateBeforeDestroy() bool { +func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool { + if n.CreateBeforeDestroyOverride != nil { + return *n.CreateBeforeDestroyOverride + } + // If we have no config, we just assume no - if n.Config == nil { + if n.Config == nil || n.Config.Managed == nil { return false } - return n.Config.Lifecycle.CreateBeforeDestroy + return n.Config.Managed.CreateBeforeDestroy } // GraphNodeDestroyerCBD -func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error { - // If we have no config, do nothing since it won't affect the - // create step anyways. - if n.Config == nil { - return nil - } - - // Set CBD to true - n.Config.Lifecycle.CreateBeforeDestroy = true - +func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error { + n.CreateBeforeDestroyOverride = &v return nil } // GraphNodeReferenceable, overriding NodeAbstractResource -func (n *NodeDestroyResource) ReferenceableName() []string { - // We modify our referenceable name to have the suffix of ".destroy" - // since depending on the creation side doesn't necessarilly mean - // depending on destruction. - suffix := ".destroy" - - // If we're CBD, we also append "-cbd". This is because CBD will setup - // its own edges (in CBDEdgeTransformer). Depending on the "destroy" - // side generally doesn't mean depending on CBD as well. See GH-11349 +func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable { + normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs() + destroyAddrs := make([]addrs.Referenceable, len(normalAddrs)) + + phaseType := addrs.ResourceInstancePhaseDestroy if n.CreateBeforeDestroy() { - suffix += "-cbd" + phaseType = addrs.ResourceInstancePhaseDestroyCBD } - result := n.NodeAbstractResource.ReferenceableName() - for i, v := range result { - result[i] = v + suffix + for i, normalAddr := range normalAddrs { + switch ta := normalAddr.(type) { + case addrs.Resource: + destroyAddrs[i] = ta.Phase(phaseType) + case addrs.ResourceInstance: + destroyAddrs[i] = ta.Phase(phaseType) + default: + destroyAddrs[i] = normalAddr + } } - return result + return destroyAddrs } // GraphNodeReferencer, overriding NodeAbstractResource -func (n *NodeDestroyResource) References() []string { +func (n *NodeDestroyResourceInstance) References() []*addrs.Reference { // If we have a config, then we need to include destroy-time dependencies - if c := n.Config; c != nil { - var result []string - for _, p := range c.Provisioners { - // We include conn info and config for destroy time provisioners - // as dependencies that we have. - if p.When == config.ProvisionerWhenDestroy { - result = append(result, ReferencesFromConfig(p.ConnInfo)...) - result = append(result, ReferencesFromConfig(p.RawConfig)...) + if c := n.Config; c != nil && c.Managed != nil { + var result []*addrs.Reference + + // We include conn info and config for destroy time provisioners + // as dependencies that we have. + for _, p := range c.Managed.Provisioners { + schema := n.ProvisionerSchemas[p.Type] + + if p.When == configs.ProvisionerWhenDestroy { + if p.Connection != nil { + result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...) + } + result = append(result, ReferencesFromConfig(p.Config, schema)...) } } @@ -86,114 +119,66 @@ func (n *NodeDestroyResource) References() []string { return nil } -// GraphNodeDynamicExpandable -func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // If we have no config we do nothing - if n.Addr == nil { - return nil, nil - } - - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Start creating the steps - steps := make([]GraphTransformer, 0, 5) - - // We want deposed resources in the state to be destroyed - steps = append(steps, &DeposedTransformer{ - State: state, - View: n.Addr.stateId(), - }) - - // Target - steps = append(steps, &TargetsTransformer{ - ParsedTargets: n.Targets, - }) - - // Always end with the root being added - steps = append(steps, &RootTransformer{}) - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Name: "NodeResourceDestroy", - } - return b.Build(ctx.Path()) -} - // GraphNodeEvalable -func (n *NodeDestroyResource) EvalTree() EvalNode { - // stateId is the ID to put into the state - stateId := n.Addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: n.Addr.Type, - uniqueExtra: "destroy", - } - - // Build the resource for eval - addr := n.Addr - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } +func (n *NodeDestroyResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() // Get our state rs := n.ResourceState - if rs == nil { - rs = &ResourceState{} + var is *states.ResourceInstance + if rs != nil { + is = rs.Instance(n.InstanceKey) + } + if is == nil { + log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr) } - var diffApply *InstanceDiff - var provider ResourceProvider - var state *InstanceState + var changeApply *plans.ResourceInstanceChange + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject var err error return &EvalOpFilter{ Ops: []walkOperation{walkApply, walkDestroy}, Node: &EvalSequence{ Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + // Get the saved diff for apply &EvalReadDiff{ - Name: stateId, - Diff: &diffApply, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &changeApply, }, - // Filter the diff so we only get the destroy - &EvalFilterDiff{ - Diff: &diffApply, - Output: &diffApply, - Destroy: true, + &EvalReduceDiff{ + Addr: addr.Resource, + InChange: &changeApply, + Destroy: true, + OutChange: &changeApply, }, - // If we're not destroying, then compare diffs + // EvalReduceDiff may have simplified our planned change + // into a NoOp if it does not require destroying. &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if diffApply != nil && diffApply.GetDestroy() { - return true, nil + if changeApply == nil || changeApply.Action == plans.NoOp { + return true, EvalEarlyExitError{} } - - return true, EvalEarlyExitError{} + return true, nil }, Then: EvalNoop{}, }, - // Load the instance info so we have the module path set - &EvalInstanceInfo{Info: info}, - - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, - }, &EvalReadState{ - Name: stateId, - Output: &state, + Addr: addr.Resource, + Output: &state, + Provider: &provider, + ProviderSchema: &providerSchema, }, &EvalRequireState{ State: &state, @@ -201,15 +186,15 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { // Call pre-apply hook &EvalApplyPre{ - Info: info, - State: &state, - Diff: &diffApply, + Addr: addr.Resource, + State: &state, + Change: &changeApply, }, // Run destroy provisioners if not tainted &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if state != nil && state.Tainted { + if state != nil && state.Status == states.ObjectTainted { return false, nil } @@ -217,12 +202,11 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { }, Then: &EvalApplyProvisioners{ - Info: info, + Addr: addr.Resource, State: &state, - Resource: n.Config, - InterpResource: resource, + ResourceConfig: n.Config, Error: &err, - When: config.ProvisionerWhenDestroy, + When: configs.ProvisionerWhenDestroy, }, }, @@ -234,7 +218,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { }, Then: &EvalApplyPost{ - Info: info, + Addr: addr.Resource, State: &state, Error: &err, }, @@ -243,41 +227,38 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { // Make sure we handle data sources properly. &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if n.Addr == nil { - return false, fmt.Errorf("nil address") - } - - if n.Addr.Mode == config.DataResourceMode { - return true, nil - } - - return false, nil + return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil }, Then: &EvalReadDataApply{ - Info: info, - Diff: &diffApply, - Provider: &provider, - Output: &state, + Addr: addr.Resource, + Config: n.Config, + Change: &changeApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, }, Else: &EvalApply{ - Info: info, - State: &state, - Diff: &diffApply, - Provider: &provider, - Output: &state, - Error: &err, + Addr: addr.Resource, + Config: nil, // No configuration because we are destroying + State: &state, + Change: &changeApply, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, }, }, &EvalWriteState{ - Name: stateId, - ResourceType: n.Addr.Type, - Provider: rs.Provider, - Dependencies: rs.Dependencies, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, &EvalApplyPost{ - Info: info, + Addr: addr.Resource, State: &state, Error: &err, }, @@ -286,3 +267,55 @@ func (n *NodeDestroyResource) EvalTree() EvalNode { }, } } + +// NodeDestroyResourceInstance represents a resource that is to be destroyed. +// +// Destroying a resource is a state-only operation: it is the individual +// instances being destroyed that affects remote objects. During graph +// construction, NodeDestroyResource should always depend on any other node +// related to the given resource, since it's just a final cleanup to avoid +// leaving skeleton resource objects in state after their instances have +// all been destroyed. +type NodeDestroyResource struct { + *NodeAbstractResource +} + +var ( + _ GraphNodeResource = (*NodeDestroyResource)(nil) + _ GraphNodeReferenceable = (*NodeDestroyResource)(nil) + _ GraphNodeReferencer = (*NodeDestroyResource)(nil) + _ GraphNodeEvalable = (*NodeDestroyResource)(nil) +) + +func (n *NodeDestroyResource) Name() string { + return n.ResourceAddr().String() + " (clean up state)" +} + +// GraphNodeReferenceable, overriding NodeAbstractResource +func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable { + // NodeDestroyResource doesn't participate in references: the graph + // builder that created it should ensure directly that it already depends + // on every other node related to its resource, without relying on + // references. + return nil +} + +// GraphNodeReferencer, overriding NodeAbstractResource +func (n *NodeDestroyResource) References() []*addrs.Reference { + // NodeDestroyResource doesn't participate in references: the graph + // builder that created it should ensure directly that it already depends + // on every other node related to its resource, without relying on + // references. + return nil +} + +// GraphNodeEvalable +func (n *NodeDestroyResource) EvalTree() EvalNode { + // This EvalNode will produce an error if the resource isn't already + // empty by the time it is called, since it should just be pruning the + // leftover husk of a resource in state after all of the child instances + // and their objects were destroyed. + return &EvalForgetResourceState{ + Addr: n.ResourceAddr().Resource, + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go new file mode 100644 index 00000000..67c46913 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go @@ -0,0 +1,313 @@ +package terraform + +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert +// an abstract resource instance to a concrete one of some type that has +// an associated deposed object key. +type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex + +type GraphNodeDeposedResourceInstanceObject interface { + DeposedInstanceObjectKey() states.DeposedKey +} + +// NodePlanDeposedResourceInstanceObject represents deposed resource +// instance objects during plan. These are distinct from the primary object +// for each resource instance since the only valid operation to do with them +// is to destroy them. +// +// This node type is also used during the refresh walk to ensure that the +// record of a deposed object is up-to-date before we plan to destroy it. +type NodePlanDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil) +) + +func (n *NodePlanDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey) +} + +func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeEvalable impl. +func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + + seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)} + + // During the refresh walk we will ensure that our record of the deposed + // object is up-to-date. If it was already deleted outside of Terraform + // then this will remove it from state and thus avoid us planning a + // destroy for it during the subsequent plan walk. + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkRefresh}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Key: n.DeposedKey, + Output: &state, + }, + &EvalRefresh{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, + }, + &EvalWriteStateDeposed{ + Addr: addr.Resource, + Key: n.DeposedKey, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + }, + }) + + // During the plan walk we always produce a planned destroy change, because + // destroying is the only supported action for deposed objects. + var change *plans.ResourceInstanceChange + seq.Nodes = append(seq.Nodes, &EvalOpFilter{ + Ops: []walkOperation{walkPlan, walkPlanDestroy}, + Node: &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Output: &state, + Key: n.DeposedKey, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + DeposedKey: n.DeposedKey, + State: &state, + Output: &change, + }, + &EvalWriteDiff{ + Addr: addr.Resource, + DeposedKey: n.DeposedKey, + ProviderSchema: &providerSchema, + Change: &change, + }, + // Since deposed objects cannot be referenced by expressions + // elsewhere, we don't need to also record the planned new + // state in this case. + }, + }, + }) + + return seq +} + +// NodeDestroyDeposedResourceInstanceObject represents deposed resource +// instance objects during apply. Nodes of this type are inserted by +// DiffTransformer when the planned changeset contains "delete" changes for +// deposed instance objects, and its only supported operation is to destroy +// and then forget the associated object. +type NodeDestroyDeposedResourceInstanceObject struct { + *NodeAbstractResourceInstance + DeposedKey states.DeposedKey +} + +var ( + _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) + _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil) +) + +func (n *NodeDestroyDeposedResourceInstanceObject) Name() string { + return fmt.Sprintf("%s (destroy deposed %s)", n.Addr.String(), n.DeposedKey) +} + +func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey { + return n.DeposedKey +} + +// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable { + // Deposed objects don't participate in references. + return nil +} + +// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance +func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference { + // We don't evaluate configuration for deposed objects, so they effectively + // make no references. + return nil +} + +// GraphNodeDestroyer +func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool { + // A deposed instance is always CreateBeforeDestroy by definition, since + // we use deposed only to handle create-before-destroy. + return true +} + +// GraphNodeDestroyerCBD +func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error { + if !v { + // Should never happen: deposed instances are _always_ create_before_destroy. + return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance") + } + return nil +} + +// GraphNodeEvalable impl. +func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject + var change *plans.ResourceInstanceChange + var err error + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + &EvalReadStateDeposed{ + Addr: addr.Resource, + Output: &state, + Key: n.DeposedKey, + Provider: &provider, + ProviderSchema: &providerSchema, + }, + &EvalDiffDestroy{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + Output: &change, + }, + // Call pre-apply hook + &EvalApplyPre{ + Addr: addr.Resource, + State: &state, + Change: &change, + }, + &EvalApply{ + Addr: addr.Resource, + Config: nil, // No configuration because we are destroying + State: &state, + Change: &change, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + Output: &state, + Error: &err, + }, + // Always write the resource back to the state deposed... if it + // was successfully destroyed it will be pruned. If it was not, it will + // be caught on the next run. + &EvalWriteStateDeposed{ + Addr: addr.Resource, + Key: n.DeposedKey, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + &EvalApplyPost{ + Addr: addr.Resource, + State: &state, + Error: &err, + }, + &EvalReturnError{ + Error: &err, + }, + &EvalUpdateStateHook{}, + }, + } +} + +// GraphNodeDeposer is an optional interface implemented by graph nodes that +// might create a single new deposed object for a specific associated resource +// instance, allowing a caller to optionally pre-allocate a DeposedKey for +// it. +type GraphNodeDeposer interface { + // SetPreallocatedDeposedKey will be called during graph construction + // if a particular node must use a pre-allocated deposed key if/when it + // "deposes" the current object of its associated resource instance. + SetPreallocatedDeposedKey(key states.DeposedKey) +} + +// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer. +// Embed it in a node type to get automatic support for it, and then access +// the field PreallocatedDeposedKey to access any pre-allocated key. +type graphNodeDeposer struct { + PreallocatedDeposedKey states.DeposedKey +} + +func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) { + n.PreallocatedDeposedKey = key +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go index 52bbf88a..633c1c46 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go @@ -1,45 +1,119 @@ package terraform import ( + "log" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) // NodePlannableResource represents a resource that is "plannable": // it is ready to be planned in order to create a diff. type NodePlannableResource struct { - *NodeAbstractCountResource + *NodeAbstractResource + + // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD + // during graph construction, if dependencies require us to force this + // on regardless of what the configuration says. + ForceCreateBeforeDestroy *bool +} + +var ( + _ GraphNodeSubPath = (*NodePlannableResource)(nil) + _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil) + _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil) + _ GraphNodeReferenceable = (*NodePlannableResource)(nil) + _ GraphNodeReferencer = (*NodePlannableResource)(nil) + _ GraphNodeResource = (*NodePlannableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil) +) + +// GraphNodeEvalable +func (n *NodePlannableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config + + if config == nil { + // Nothing to do, then. + log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr) + return &EvalNoop{} + } + + // this ensures we can reference the resource even if the count is 0 + return &EvalWriteResourceState{ + Addr: addr.Resource, + Config: config, + ProviderAddr: n.ResolvedProvider, + } +} + +// GraphNodeDestroyerCBD +func (n *NodePlannableResource) CreateBeforeDestroy() bool { + if n.ForceCreateBeforeDestroy != nil { + return *n.ForceCreateBeforeDestroy + } + + // If we have no config, we just assume no + if n.Config == nil || n.Config.Managed == nil { + return false + } + + return n.Config.Managed.CreateBeforeDestroy +} + +// GraphNodeDestroyerCBD +func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error { + n.ForceCreateBeforeDestroy = &v + return nil } // GraphNodeDynamicExpandable func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count, err := n.Config.Count() - if err != nil { - return nil, err + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() } + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas return &NodePlannableResourceInstance{ - NodeAbstractResource: a, + NodeAbstractResourceInstance: a, + + // By the time we're walking, we've figured out whether we need + // to force on CreateBeforeDestroy due to dependencies on other + // nodes that have it. + ForceCreateBeforeDestroy: n.CreateBeforeDestroy(), } } - // The concrete resource factory we'll use for oprhans - concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { + // The concrete resource factory we'll use for orphans + concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex { // Add the config and state since we don't do that via transforms a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + a.Schema = n.Schema + a.ProvisionerSchemas = n.ProvisionerSchemas - return &NodePlannableResourceOrphan{ - NodeAbstractResource: a, + return &NodePlannableResourceInstanceOrphan{ + NodeAbstractResourceInstance: a, } } @@ -48,6 +122,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { // Expand the count. &ResourceCountTransformer{ Concrete: concreteResource, + Schema: n.Schema, Count: count, Addr: n.ResourceAddr(), }, @@ -64,7 +139,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { &AttachStateTransformer{State: state}, // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, + &TargetsTransformer{Targets: n.Targets}, // Connect references so ordering is correct &ReferenceTransformer{}, @@ -79,5 +154,6 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { Validate: true, Name: "NodePlannableResource", } - return b.Build(ctx.Path()) + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go index 9b02362b..38746f0d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go @@ -1,52 +1,87 @@ package terraform -// NodePlanDestroyableResource represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodePlanDestroyableResource struct { - *NodeAbstractResource +import ( + "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// NodePlanDestroyableResourceInstance represents a resource that is ready +// to be planned for destruction. +type NodePlanDestroyableResourceInstance struct { + *NodeAbstractResourceInstance } +var ( + _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil) + _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil) +) + // GraphNodeDestroyer -func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress { - return n.Addr +func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr } // GraphNodeEvalable -func (n *NodePlanDestroyableResource) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr +func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() - // stateId is the ID to put into the state - stateId := addr.stateId() + // Declare a bunch of variables that are used for state during + // evaluation. These are written to by address in the EvalNodes we + // declare below. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, + if n.ResolvedProvider.ProviderConfig.Type == "" { + // Should never happen; indicates that the graph was not constructed + // correctly since we didn't get our provider attached. + panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n))) } - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var diff *InstanceDiff - var state *InstanceState - return &EvalSequence{ Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, &EvalReadState{ - Name: stateId, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + Output: &change, }, &EvalCheckPreventDestroy{ - Resource: n.Config, - Diff: &diff, + Addr: addr.Resource, + Config: n.Config, + Change: &change, }, &EvalWriteDiff{ - Name: stateId, - Diff: &diff, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go index b5295690..75e0bcd3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go @@ -3,187 +3,205 @@ package terraform import ( "fmt" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/terraform/addrs" + "github.com/zclconf/go-cty/cty" ) // NodePlannableResourceInstance represents a _single_ resource // instance that is plannable. This means this represents a single // count index, for example. type NodePlannableResourceInstance struct { - *NodeAbstractResource + *NodeAbstractResourceInstance + ForceCreateBeforeDestroy bool } +var ( + _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResource = (*NodePlannableResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil) + _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil) +) + // GraphNodeEvalable func (n *NodePlannableResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() + addr := n.ResourceInstanceAddr() - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - ModulePath: normalizeModulePath(addr.Path), - } - - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } + // State still uses legacy-style internal ids, so we need to shim to get + // a suitable key to use. + stateId := NewLegacyResourceInstanceAddress(addr).stateId() // Determine the dependencies for the state. stateDeps := n.StateReferences() // Eval info is different depending on what kind of resource this is - switch n.Config.Mode { - case config.ManagedResourceMode: - return n.evalTreeManagedResource( - stateId, info, resource, stateDeps, - ) - case config.DataResourceMode: - return n.evalTreeDataResource( - stateId, info, resource, stateDeps) + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + return n.evalTreeManagedResource(addr, stateId, stateDeps) + case addrs.DataResourceMode: + return n.evalTreeDataResource(addr, stateId, stateDeps) default: panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) } } -func (n *NodePlannableResourceInstance) evalTreeDataResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - var provider ResourceProvider - var config *ResourceConfig - var diff *InstanceDiff - var state *InstanceState +func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { + config := n.Config + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var configVal cty.Value return &EvalSequence{ Nodes: []EvalNode{ - &EvalReadState{ - Name: stateId, - Output: &state, + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, }, - // We need to re-interpolate the config here because some - // of the attributes may have become computed during - // earlier planning, due to other resources having - // "requires new resource" diffs. - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, }, + // If we already have a non-planned state then we already dealt + // with this during the refresh walk and so we have nothing to do + // here. &EvalIf{ If: func(ctx EvalContext) (bool, error) { - computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0 - - // If the configuration is complete and we - // already have a state then we don't need to - // do any further work during apply, because we - // already populated the state during refresh. - if !computed && state != nil { - return true, EvalEarlyExitError{} + depChanges := false + + // Check and see if any of our dependencies have changes. + changes := ctx.Changes() + for _, d := range n.StateReferences() { + ri, ok := d.(addrs.ResourceInstance) + if !ok { + continue + } + change := changes.GetResourceInstanceChange(ri.Absolute(ctx.Path()), states.CurrentGen) + if change != nil && change.Action != plans.NoOp { + depChanges = true + break + } } + refreshed := state != nil && state.Status != states.ObjectPlanned + + // If there are no dependency changes, and it's not a forced + // read because we there was no Refresh, then we don't need + // to re-read. If any dependencies have changes, it means + // our config may also have changes and we need to Read the + // data source again. + if !depChanges && refreshed { + return false, EvalEarlyExitError{} + } return true, nil }, Then: EvalNoop{}, }, - &EvalGetProvider{ - Name: n.ProvidedBy()[0], - Output: &provider, + &EvalValidateSelfRef{ + Addr: addr.Resource, + Config: config.Config, + ProviderSchema: &providerSchema, }, - &EvalReadDataDiff{ - Info: info, - Config: &config, - Provider: &provider, - Output: &diff, - OutputState: &state, + &EvalReadData{ + Addr: addr.Resource, + Config: n.Config, + Dependencies: n.StateReferences(), + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready + OutputChange: &change, + OutputValue: &configVal, + OutputState: &state, }, &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, &EvalWriteDiff{ - Name: stateId, - Diff: &diff, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, }, }, } } -func (n *NodePlannableResourceInstance) evalTreeManagedResource( - stateId string, info *InstanceInfo, - resource *Resource, stateDeps []string) EvalNode { - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var diff *InstanceDiff - var state *InstanceState - var resourceConfig *ResourceConfig +func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode { + config := n.Config + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject return &EvalSequence{ Nodes: []EvalNode{ - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &resourceConfig, - }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Addr: n.ResolvedProvider, Output: &provider, + Schema: &providerSchema, }, - // Re-run validation to catch any errors we missed, e.g. type - // mismatches on computed values. - &EvalValidateResource{ - Provider: &provider, - Config: &resourceConfig, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, - IgnoreWarnings: true, - }, + &EvalReadState{ - Name: stateId, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, + + &EvalValidateSelfRef{ + Addr: addr.Resource, + Config: config.Config, + ProviderSchema: &providerSchema, + }, + &EvalDiff{ - Name: stateId, - Info: info, - Config: &resourceConfig, - Resource: n.Config, - Provider: &provider, - State: &state, - OutputDiff: &diff, - OutputState: &state, + Addr: addr.Resource, + Config: n.Config, + CreateBeforeDestroy: n.ForceCreateBeforeDestroy, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + OutputChange: &change, + OutputState: &state, }, &EvalCheckPreventDestroy{ - Resource: n.Config, - Diff: &diff, + Addr: addr.Resource, + Config: n.Config, + Change: &change, }, &EvalWriteState{ - Name: stateId, - ResourceType: n.Config.Type, - Provider: n.Config.Provider, - Dependencies: stateDeps, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + State: &state, + ProviderSchema: &providerSchema, }, &EvalWriteDiff{ - Name: stateId, - Diff: &diff, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go index 73d6e41f..84166949 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go @@ -1,53 +1,83 @@ package terraform -// NodePlannableResourceOrphan represents a resource that is "applyable": +import ( + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" +) + +// NodePlannableResourceInstanceOrphan represents a resource that is "applyable": // it is ready to be applied and is represented by a diff. -type NodePlannableResourceOrphan struct { - *NodeAbstractResource +type NodePlannableResourceInstanceOrphan struct { + *NodeAbstractResourceInstance } -func (n *NodePlannableResourceOrphan) Name() string { - return n.NodeAbstractResource.Name() + " (orphan)" -} +var ( + _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil) + _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) +) -// GraphNodeEvalable -func (n *NodePlannableResourceOrphan) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr +var ( + _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil) +) - // stateId is the ID to put into the state - stateId := addr.stateId() +func (n *NodePlannableResourceInstanceOrphan) Name() string { + return n.ResourceInstanceAddr().String() + " (orphan)" +} - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - ModulePath: normalizeModulePath(addr.Path), - } +// GraphNodeEvalable +func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() // Declare a bunch of variables that are used for state during // evaluation. Most of this are written to by-address below. - var diff *InstanceDiff - var state *InstanceState + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + var provider providers.Interface + var providerSchema *ProviderSchema return &EvalSequence{ Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, &EvalReadState{ - Name: stateId, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, &EvalDiffDestroy{ - Info: info, - State: &state, - Output: &diff, + Addr: addr.Resource, + State: &state, + ProviderAddr: n.ResolvedProvider, + Output: &change, + OutputState: &state, // Will point to a nil state after this complete, signalling destroyed }, &EvalCheckPreventDestroy{ - Resource: n.Config, - ResourceId: stateId, - Diff: &diff, + Addr: addr.Resource, + Config: n.Config, + Change: &change, }, &EvalWriteDiff{ - Name: stateId, - Diff: &diff, + Addr: addr.Resource, + ProviderSchema: &providerSchema, + Change: &change, + }, + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go index 3a44926c..95060232 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go @@ -2,64 +2,172 @@ package terraform import ( "fmt" + "log" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/providers" + + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) -// NodeRefreshableResource represents a resource that is "applyable": -// it is ready to be applied and is represented by a diff. -type NodeRefreshableResource struct { +// NodeRefreshableManagedResource represents a resource that is expanabled into +// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. +type NodeRefreshableManagedResource struct { *NodeAbstractResource } +var ( + _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil) +) + +// GraphNodeDynamicExpandable +func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + + count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx) + diags = diags.Append(countDiags) + if countDiags.HasErrors() { + return nil, diags.Err() + } + + // Next we need to potentially rename an instance address in the state + // if we're transitioning whether "count" is set at all. + fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1) + + // Our graph transformers require access to the full state, so we'll + // temporarily lock it while we work on this. + state := ctx.State().Lock() + defer ctx.State().Unlock() + + // The concrete resource factory we'll use + concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex { + // Add the config and state since we don't do that via transforms + a.Config = n.Config + a.ResolvedProvider = n.ResolvedProvider + + return &NodeRefreshableManagedResourceInstance{ + NodeAbstractResourceInstance: a, + } + } + + // Start creating the steps + steps := []GraphTransformer{ + // Expand the count. + &ResourceCountTransformer{ + Concrete: concreteResource, + Schema: n.Schema, + Count: count, + Addr: n.ResourceAddr(), + }, + + // Add the count orphans to make sure these resources are accounted for + // during a scale in. + &OrphanResourceCountTransformer{ + Concrete: concreteResource, + Count: count, + Addr: n.ResourceAddr(), + State: state, + }, + + // Attach the state + &AttachStateTransformer{State: state}, + + // Targeting + &TargetsTransformer{Targets: n.Targets}, + + // Connect references so ordering is correct + &ReferenceTransformer{}, + + // Make sure there is a single root + &RootTransformer{}, + } + + // Build the graph + b := &BasicGraphBuilder{ + Steps: steps, + Validate: true, + Name: "NodeRefreshableManagedResource", + } + + graph, diags := b.Build(ctx.Path()) + return graph, diags.ErrWithWarnings() +} + +// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": +// it is ready to be applied and is represented by a diff. +type NodeRefreshableManagedResourceInstance struct { + *NodeAbstractResourceInstance +} + +var ( + _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil) + _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil) +) + // GraphNodeDestroyer -func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress { - return n.Addr +func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance { + addr := n.ResourceInstanceAddr() + return &addr } // GraphNodeEvalable -func (n *NodeRefreshableResource) EvalTree() EvalNode { +func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { + addr := n.ResourceInstanceAddr() + // Eval info is different depending on what kind of resource this is - switch mode := n.Addr.Mode; mode { - case config.ManagedResourceMode: + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + if n.ResourceState == nil { + log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr) + return n.evalTreeManagedResourceNoState() + } + log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr) return n.evalTreeManagedResource() - case config.DataResourceMode: + case addrs.DataResourceMode: // Get the data source node. If we don't have a configuration // then it is an orphan so we destroy it (remove it from the state). var dn GraphNodeEvalable if n.Config != nil { dn = &NodeRefreshableDataResourceInstance{ - NodeAbstractResource: n.NodeAbstractResource, + NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, } } else { - dn = &NodeDestroyableDataResource{ - NodeAbstractResource: n.NodeAbstractResource, + dn = &NodeDestroyableDataResourceInstance{ + NodeAbstractResourceInstance: n.NodeAbstractResourceInstance, } } return dn.EvalTree() default: - panic(fmt.Errorf("unsupported resource mode %s", mode)) + panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode)) } } -func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode { - addr := n.NodeAbstractResource.Addr - - // stateId is the ID to put into the state - stateId := addr.stateId() - - // Build the instance info. More of this will be populated during eval - info := &InstanceInfo{ - Id: stateId, - Type: addr.Type, - } +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { + addr := n.ResourceInstanceAddr() // Declare a bunch of variables that are used for state during // evaluation. Most of this are written to by-address below. - var provider ResourceProvider - var state *InstanceState + var provider providers.Interface + var providerSchema *ProviderSchema + var state *states.ResourceInstanceObject // This happened during initial development. All known cases were // fixed and tested but as a sanity check let's assert here. @@ -75,25 +183,106 @@ func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode { return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Addr: n.ResolvedProvider, Output: &provider, + Schema: &providerSchema, }, + &EvalReadState{ - Name: stateId, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Output: &state, }, + &EvalRefresh{ - Info: info, - Provider: &provider, - State: &state, - Output: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, }, + &EvalWriteState{ - Name: stateId, - ResourceType: n.ResourceState.Type, - Provider: n.ResourceState.Provider, - Dependencies: n.ResourceState.Dependencies, - State: &state, + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + }, + } +} + +// evalTreeManagedResourceNoState produces an EvalSequence for refresh resource +// nodes that don't have state attached. An example of where this functionality +// is useful is when a resource that already exists in state is being scaled +// out, ie: has its resource count increased. In this case, the scaled out node +// needs to be available to other nodes (namely data sources) that may depend +// on it for proper interpolation, or confusing "index out of range" errors can +// occur. +// +// The steps in this sequence are very similar to the steps carried out in +// plan, but nothing is done with the diff after it is created - it is dropped, +// and its changes are not counted in the UI. +func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { + addr := n.ResourceInstanceAddr() + + // Declare a bunch of variables that are used for state during + // evaluation. Most of this are written to by-address below. + var provider providers.Interface + var providerSchema *ProviderSchema + var change *plans.ResourceInstanceChange + var state *states.ResourceInstanceObject + + return &EvalSequence{ + Nodes: []EvalNode{ + &EvalGetProvider{ + Addr: n.ResolvedProvider, + Output: &provider, + Schema: &providerSchema, + }, + + &EvalReadState{ + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + + Output: &state, + }, + + &EvalDiff{ + Addr: addr.Resource, + Config: n.Config, + Provider: &provider, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + OutputChange: &change, + OutputState: &state, + Stub: true, + }, + + &EvalWriteState{ + Addr: addr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, + }, + + // We must also save the planned change, so that expressions in + // other nodes, such as provider configurations and data resources, + // can work with the planned new value. + // + // This depends on the fact that Context.Refresh creates a + // temporary new empty changeset for the duration of its graph + // walk, and so this recorded change will be discarded immediately + // after the refresh walk completes. + &EvalWriteDiff{ + Addr: addr.Resource, + Change: &change, + ProviderSchema: &providerSchema, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go index e01518de..734ec9e2 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go @@ -1,145 +1,87 @@ package terraform import ( - "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/provisioners" + "github.com/zclconf/go-cty/cty" ) // NodeValidatableResource represents a resource that is used for validation // only. type NodeValidatableResource struct { - *NodeAbstractCountResource -} - -// GraphNodeEvalable -func (n *NodeValidatableResource) EvalTree() EvalNode { - // Ensure we're validating - c := n.NodeAbstractCountResource - c.Validate = true - return c.EvalTree() -} - -// GraphNodeDynamicExpandable -func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { - // Grab the state which we read - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - - // Expand the resource count which must be available by now from EvalTree - count := 1 - if n.Config.RawCount.Value() != unknownValue() { - var err error - count, err = n.Config.Count() - if err != nil { - return nil, err - } - } - - // The concrete resource factory we'll use - concreteResource := func(a *NodeAbstractResource) dag.Vertex { - // Add the config and state since we don't do that via transforms - a.Config = n.Config - - return &NodeValidatableResourceInstance{ - NodeAbstractResource: a, - } - } - - // Start creating the steps - steps := []GraphTransformer{ - // Expand the count. - &ResourceCountTransformer{ - Concrete: concreteResource, - Count: count, - Addr: n.ResourceAddr(), - }, - - // Attach the state - &AttachStateTransformer{State: state}, - - // Targeting - &TargetsTransformer{ParsedTargets: n.Targets}, - - // Connect references so ordering is correct - &ReferenceTransformer{}, - - // Make sure there is a single root - &RootTransformer{}, - } - - // Build the graph - b := &BasicGraphBuilder{ - Steps: steps, - Validate: true, - Name: "NodeValidatableResource", - } - - return b.Build(ctx.Path()) -} - -// This represents a _single_ resource instance to validate. -type NodeValidatableResourceInstance struct { *NodeAbstractResource } -// GraphNodeEvalable -func (n *NodeValidatableResourceInstance) EvalTree() EvalNode { - addr := n.NodeAbstractResource.Addr +var ( + _ GraphNodeSubPath = (*NodeValidatableResource)(nil) + _ GraphNodeEvalable = (*NodeValidatableResource)(nil) + _ GraphNodeReferenceable = (*NodeValidatableResource)(nil) + _ GraphNodeReferencer = (*NodeValidatableResource)(nil) + _ GraphNodeResource = (*NodeValidatableResource)(nil) + _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil) +) - // Build the resource for eval - resource := &Resource{ - Name: addr.Name, - Type: addr.Type, - CountIndex: addr.Index, - } - if resource.CountIndex < 0 { - resource.CountIndex = 0 - } +// GraphNodeEvalable +func (n *NodeValidatableResource) EvalTree() EvalNode { + addr := n.ResourceAddr() + config := n.Config - // Declare a bunch of variables that are used for state during - // evaluation. Most of this are written to by-address below. - var config *ResourceConfig - var provider ResourceProvider + // Declare the variables will be used are used to pass values along + // the evaluation sequence below. These are written to via pointers + // passed to the EvalNodes. + var provider providers.Interface + var providerSchema *ProviderSchema + var configVal cty.Value seq := &EvalSequence{ Nodes: []EvalNode{ - &EvalValidateResourceSelfRef{ - Addr: &addr, - Config: &n.Config.RawConfig, - }, &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Addr: n.ResolvedProvider, Output: &provider, - }, - &EvalInterpolate{ - Config: n.Config.RawConfig.Copy(), - Resource: resource, - Output: &config, + Schema: &providerSchema, }, &EvalValidateResource{ - Provider: &provider, - Config: &config, - ResourceName: n.Config.Name, - ResourceType: n.Config.Type, - ResourceMode: n.Config.Mode, + Addr: addr.Resource, + Provider: &provider, + ProviderSchema: &providerSchema, + Config: config, + ConfigVal: &configVal, }, }, } - // Validate all the provisioners - for _, p := range n.Config.Provisioners { - var provisioner ResourceProvisioner - seq.Nodes = append(seq.Nodes, &EvalGetProvisioner{ - Name: p.Type, - Output: &provisioner, - }, &EvalInterpolate{ - Config: p.RawConfig.Copy(), - Resource: resource, - Output: &config, - }, &EvalValidateProvisioner{ - Provisioner: &provisioner, - Config: &config, - }) + if managed := n.Config.Managed; managed != nil { + hasCount := n.Config.Count != nil + + // Validate all the provisioners + for _, p := range managed.Provisioners { + var provisioner provisioners.Interface + var provisionerSchema *configschema.Block + + if p.Connection == nil { + p.Connection = config.Managed.Connection + } else if config.Managed.Connection != nil { + p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config) + } + + seq.Nodes = append( + seq.Nodes, + &EvalGetProvisioner{ + Name: p.Type, + Output: &provisioner, + Schema: &provisionerSchema, + }, + &EvalValidateProvisioner{ + ResourceAddr: addr.Resource, + Provisioner: &provisioner, + Schema: &provisionerSchema, + Config: p, + ResourceHasCount: hasCount, + }, + ) + } } return seq diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go index cb61a4e3..1c302903 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go @@ -1,22 +1,44 @@ package terraform import ( - "fmt" - - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" ) // NodeRootVariable represents a root variable input. type NodeRootVariable struct { - Config *config.Variable + Addr addrs.InputVariable + Config *configs.Variable } +var ( + _ GraphNodeSubPath = (*NodeRootVariable)(nil) + _ GraphNodeReferenceable = (*NodeRootVariable)(nil) + _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil) +) + func (n *NodeRootVariable) Name() string { - result := fmt.Sprintf("var.%s", n.Config.Name) - return result + return n.Addr.String() +} + +// GraphNodeSubPath +func (n *NodeRootVariable) Path() addrs.ModuleInstance { + return addrs.RootModuleInstance } // GraphNodeReferenceable -func (n *NodeRootVariable) ReferenceableName() []string { - return []string{n.Name()} +func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable { + return []addrs.Referenceable{n.Addr} +} + +// dag.GraphNodeDotter impl. +func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { + return &dag.DotNode{ + Name: name, + Attrs: map[string]string{ + "label": n.Name(), + "shape": "note", + }, + } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go index ca99685a..9757446b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/path.go +++ b/vendor/github.com/hashicorp/terraform/terraform/path.go @@ -1,24 +1,17 @@ package terraform import ( - "crypto/md5" - "encoding/hex" + "fmt" + + "github.com/hashicorp/terraform/addrs" ) -// PathCacheKey returns a cache key for a module path. +// PathObjectCacheKey is like PathCacheKey but includes an additional name +// to be included in the key, for module-namespaced objects. // -// TODO: test -func PathCacheKey(path []string) string { - // There is probably a better way to do this, but this is working for now. - // We just create an MD5 hash of all the MD5 hashes of all the path - // elements. This gets us the property that it is unique per ordering. - hash := md5.New() - for _, p := range path { - single := md5.Sum([]byte(p)) - if _, err := hash.Write(single[:]); err != nil { - panic(err) - } - } - - return hex.EncodeToString(hash.Sum(nil)) +// The result of this function is guaranteed unique for any distinct pair +// of path and name, but is not guaranteed to be in any particular format +// and in particular should never be shown to end-users. +func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string { + return fmt.Sprintf("%s|%s", path.String(), objectName) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go index ea088450..af04c6cd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/plan.go +++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go @@ -3,12 +3,13 @@ package terraform import ( "bytes" "encoding/gob" - "errors" "fmt" "io" "sync" - "github.com/hashicorp/terraform/config/module" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/configs" ) func init() { @@ -25,34 +26,55 @@ func init() { // necessary to make a change: the state, diff, config, backend config, etc. // This is so that it can run alone without any other data. type Plan struct { - Diff *Diff - Module *module.Tree - State *State - Vars map[string]interface{} + // Diff describes the resource actions that must be taken when this + // plan is applied. + Diff *Diff + + // Config represents the entire configuration that was present when this + // plan was created. + Config *configs.Config + + // State is the Terraform state that was current when this plan was + // created. + // + // It is not allowed to apply a plan that has a stale state, since its + // diff could be outdated. + State *State + + // Vars retains the variables that were set when creating the plan, so + // that the same variables can be applied during apply. + Vars map[string]cty.Value + + // Targets, if non-empty, contains a set of resource address strings that + // identify graph nodes that were selected as targets for plan. + // + // When targets are set, any graph node that is not directly targeted or + // indirectly targeted via dependencies is excluded from the graph. Targets []string + // TerraformVersion is the version of Terraform that was used to create + // this plan. + // + // It is not allowed to apply a plan created with a different version of + // Terraform, since the other fields of this structure may be interpreted + // in different ways between versions. + TerraformVersion string + + // ProviderSHA256s is a map giving the SHA256 hashes of the exact binaries + // used as plugins for each provider during plan. + // + // These must match between plan and apply to ensure that the diff is + // correctly interpreted, since different provider versions may have + // different attributes or attribute value constraints. + ProviderSHA256s map[string][]byte + // Backend is the backend that this plan should use and store data with. Backend *BackendState - once sync.Once -} + // Destroy indicates that this plan was created for a full destroy operation + Destroy bool -// Context returns a Context with the data encapsulated in this plan. -// -// The following fields in opts are overridden by the plan: Config, -// Diff, State, Variables. -func (p *Plan) Context(opts *ContextOpts) (*Context, error) { - opts.Diff = p.Diff - opts.Module = p.Module - opts.State = p.State - opts.Targets = p.Targets - - opts.Variables = make(map[string]interface{}) - for k, v := range p.Vars { - opts.Variables[k] = v - } - - return NewContext(opts) + once sync.Once } func (p *Plan) String() string { @@ -77,7 +99,7 @@ func (p *Plan) init() { } if p.Vars == nil { - p.Vars = make(map[string]interface{}) + p.Vars = make(map[string]cty.Value) } }) } @@ -86,68 +108,15 @@ func (p *Plan) init() { // the ability in the future to change the file format if we want for any // reason. const planFormatMagic = "tfplan" -const planFormatVersion byte = 1 +const planFormatVersion byte = 2 // ReadPlan reads a plan structure out of a reader in the format that // was written by WritePlan. func ReadPlan(src io.Reader) (*Plan, error) { - var result *Plan - var err error - n := 0 - - // Verify the magic bytes - magic := make([]byte, len(planFormatMagic)) - for n < len(magic) { - n, err = src.Read(magic[n:]) - if err != nil { - return nil, fmt.Errorf("error while reading magic bytes: %s", err) - } - } - if string(magic) != planFormatMagic { - return nil, fmt.Errorf("not a valid plan file") - } - - // Verify the version is something we can read - var formatByte [1]byte - n, err = src.Read(formatByte[:]) - if err != nil { - return nil, err - } - if n != len(formatByte) { - return nil, errors.New("failed to read plan version byte") - } - - if formatByte[0] != planFormatVersion { - return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0]) - } - - dec := gob.NewDecoder(src) - if err := dec.Decode(&result); err != nil { - return nil, err - } - - return result, nil + return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead") } // WritePlan writes a plan somewhere in a binary format. func WritePlan(d *Plan, dst io.Writer) error { - // Write the magic bytes so we can determine the file format later - n, err := dst.Write([]byte(planFormatMagic)) - if err != nil { - return err - } - if n != len(planFormatMagic) { - return errors.New("failed to write plan format magic bytes") - } - - // Write a version byte so we can iterate on version at some point - n, err = dst.Write([]byte{planFormatVersion}) - if err != nil { - return err - } - if n != 1 { - return errors.New("failed to write plan version byte") - } - - return gob.NewEncoder(dst).Encode(d) + return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead") } diff --git a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go new file mode 100644 index 00000000..4ae346d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go @@ -0,0 +1,522 @@ +package terraform + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" +) + +var _ providers.Interface = (*MockProvider)(nil) + +// MockProvider implements providers.Interface but mocks out all the +// calls for testing purposes. +type MockProvider struct { + sync.Mutex + + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests + + PrepareProviderConfigCalled bool + PrepareProviderConfigResponse providers.PrepareProviderConfigResponse + PrepareProviderConfigRequest providers.PrepareProviderConfigRequest + PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse + + ValidateResourceTypeConfigCalled bool + ValidateResourceTypeConfigTypeName string + ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse + ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest + ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse + + ValidateDataSourceConfigCalled bool + ValidateDataSourceConfigTypeName string + ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse + ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest + ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse + + UpgradeResourceStateCalled bool + UpgradeResourceStateTypeName string + UpgradeResourceStateResponse providers.UpgradeResourceStateResponse + UpgradeResourceStateRequest providers.UpgradeResourceStateRequest + UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse + + ConfigureCalled bool + ConfigureResponse providers.ConfigureResponse + ConfigureRequest providers.ConfigureRequest + ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below + + StopCalled bool + StopFn func() error + StopResponse error + + ReadResourceCalled bool + ReadResourceResponse providers.ReadResourceResponse + ReadResourceRequest providers.ReadResourceRequest + ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse + + PlanResourceChangeCalled bool + PlanResourceChangeResponse providers.PlanResourceChangeResponse + PlanResourceChangeRequest providers.PlanResourceChangeRequest + PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse + + ApplyResourceChangeCalled bool + ApplyResourceChangeResponse providers.ApplyResourceChangeResponse + ApplyResourceChangeRequest providers.ApplyResourceChangeRequest + ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse + + ImportResourceStateCalled bool + ImportResourceStateResponse providers.ImportResourceStateResponse + ImportResourceStateRequest providers.ImportResourceStateRequest + ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse + // Legacy return type for existing tests, which will be shimmed into an + // ImportResourceStateResponse if set + ImportStateReturn []*InstanceState + + ReadDataSourceCalled bool + ReadDataSourceResponse providers.ReadDataSourceResponse + ReadDataSourceRequest providers.ReadDataSourceRequest + ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse + + CloseCalled bool + CloseError error + + // Legacy callbacks: if these are set, we will shim incoming calls for + // new-style methods to these old-fashioned terraform.ResourceProvider + // mock callbacks, for the benefit of older tests that were written against + // the old mock API. + ValidateFn func(c *ResourceConfig) (ws []string, es []error) + ConfigureFn func(c *ResourceConfig) error + DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) + ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error) +} + +func (p *MockProvider) GetSchema() providers.GetSchemaResponse { + p.Lock() + defer p.Unlock() + p.GetSchemaCalled = true + return p.getSchema() +} + +func (p *MockProvider) getSchema() providers.GetSchemaResponse { + // This version of getSchema doesn't do any locking, so it's suitable to + // call from other methods of this mock as long as they are already + // holding the lock. + + ret := providers.GetSchemaResponse{ + Provider: providers.Schema{}, + DataSources: map[string]providers.Schema{}, + ResourceTypes: map[string]providers.Schema{}, + } + if p.GetSchemaReturn != nil { + ret.Provider.Block = p.GetSchemaReturn.Provider + for n, s := range p.GetSchemaReturn.DataSources { + ret.DataSources[n] = providers.Schema{ + Block: s, + } + } + for n, s := range p.GetSchemaReturn.ResourceTypes { + ret.ResourceTypes[n] = providers.Schema{ + Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]), + Block: s, + } + } + } + + return ret +} + +func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse { + p.Lock() + defer p.Unlock() + + p.PrepareProviderConfigCalled = true + p.PrepareProviderConfigRequest = r + if p.PrepareProviderConfigFn != nil { + return p.PrepareProviderConfigFn(r) + } + return p.PrepareProviderConfigResponse +} + +func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateResourceTypeConfigCalled = true + p.ValidateResourceTypeConfigRequest = r + + if p.ValidateFn != nil { + resp := p.getSchema() + schema := resp.Provider.Block + rc := NewResourceConfigShimmed(r.Config, schema) + warns, errs := p.ValidateFn(rc) + ret := providers.ValidateResourceTypeConfigResponse{} + for _, warn := range warns { + ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn)) + } + for _, err := range errs { + ret.Diagnostics = ret.Diagnostics.Append(err) + } + } + if p.ValidateResourceTypeConfigFn != nil { + return p.ValidateResourceTypeConfigFn(r) + } + + return p.ValidateResourceTypeConfigResponse +} + +func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateDataSourceConfigCalled = true + p.ValidateDataSourceConfigRequest = r + + if p.ValidateDataSourceConfigFn != nil { + return p.ValidateDataSourceConfigFn(r) + } + + return p.ValidateDataSourceConfigResponse +} + +func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse { + p.Lock() + defer p.Unlock() + + schemas := p.getSchema() + schema := schemas.ResourceTypes[r.TypeName] + schemaType := schema.Block.ImpliedType() + + p.UpgradeResourceStateCalled = true + p.UpgradeResourceStateRequest = r + + if p.UpgradeResourceStateFn != nil { + return p.UpgradeResourceStateFn(r) + } + + resp := p.UpgradeResourceStateResponse + + if resp.UpgradedState == cty.NilVal { + switch { + case r.RawStateFlatmap != nil: + v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + case len(r.RawStateJSON) > 0: + v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType) + + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + return resp + } + resp.UpgradedState = v + } + } + return resp +} + +func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse { + p.Lock() + defer p.Unlock() + + p.ConfigureCalled = true + p.ConfigureRequest = r + + if p.ConfigureFn != nil { + resp := p.getSchema() + schema := resp.Provider.Block + rc := NewResourceConfigShimmed(r.Config, schema) + ret := providers.ConfigureResponse{} + + err := p.ConfigureFn(rc) + if err != nil { + ret.Diagnostics = ret.Diagnostics.Append(err) + } + return ret + } + if p.ConfigureNewFn != nil { + return p.ConfigureNewFn(r) + } + + return p.ConfigureResponse +} + +func (p *MockProvider) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provider itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadResourceCalled = true + p.ReadResourceRequest = r + + if p.ReadResourceFn != nil { + return p.ReadResourceFn(r) + } + + // make sure the NewState fits the schema + newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState) + if err != nil { + panic(err) + } + resp := p.ReadResourceResponse + resp.NewState = newState + + return resp +} + +func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse { + p.Lock() + defer p.Unlock() + + p.PlanResourceChangeCalled = true + p.PlanResourceChangeRequest = r + + if p.DiffFn != nil { + ps := p.getSchema() + if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil { + return providers.PlanResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)), + } + } + schema := ps.ResourceTypes[r.TypeName].Block + info := &InstanceInfo{ + Type: r.TypeName, + } + priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0) + cfg := NewResourceConfigShimmed(r.Config, schema) + + legacyDiff, err := p.DiffFn(info, priorState, cfg) + + var res providers.PlanResourceChangeResponse + res.PlannedState = r.ProposedNewState + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + if legacyDiff != nil { + newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + + res.PlannedState = newVal + + var requiresNew []string + for attr, d := range legacyDiff.Attributes { + if d.RequiresNew { + requiresNew = append(requiresNew, attr) + } + } + requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType()) + if err != nil { + res.Diagnostics = res.Diagnostics.Append(err) + } + res.RequiresReplace = requiresReplace + } + return res + } + if p.PlanResourceChangeFn != nil { + return p.PlanResourceChangeFn(r) + } + + return p.PlanResourceChangeResponse +} + +func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse { + p.Lock() + p.ApplyResourceChangeCalled = true + p.ApplyResourceChangeRequest = r + p.Unlock() + + if p.ApplyFn != nil { + // ApplyFn is a special callback fashioned after our old provider + // interface, which expected to be given an actual diff rather than + // separate old/new values to apply. Therefore we need to approximate + // a diff here well enough that _most_ of our legacy ApplyFns in old + // tests still see the behavior they are expecting. New tests should + // not use this, and should instead use ApplyResourceChangeFn directly. + providerSchema := p.getSchema() + schema, ok := providerSchema.ResourceTypes[r.TypeName] + if !ok { + return providers.ApplyResourceChangeResponse{ + Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)), + } + } + + info := &InstanceInfo{ + Type: r.TypeName, + } + + priorVal := r.PriorState + plannedVal := r.PlannedState + priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal) + plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal) + s := NewInstanceStateShimmedFromValue(priorVal, 0) + d := &InstanceDiff{ + Attributes: make(map[string]*ResourceAttrDiff), + } + if plannedMap == nil { // destroying, then + d.Destroy = true + // Destroy diffs don't have any attribute diffs + } else { + if priorMap == nil { // creating, then + // We'll just make an empty prior map to make things easier below. + priorMap = make(map[string]string) + } + + for k, new := range plannedMap { + old := priorMap[k] + newComputed := false + if new == config.UnknownVariableValue { + new = "" + newComputed = true + } + d.Attributes[k] = &ResourceAttrDiff{ + Old: old, + New: new, + NewComputed: newComputed, + Type: DiffAttrInput, // not generally used in tests, so just hard-coded + } + } + // Also need any attributes that were removed in "planned" + for k, old := range priorMap { + if _, ok := plannedMap[k]; ok { + continue + } + d.Attributes[k] = &ResourceAttrDiff{ + Old: old, + NewRemoved: true, + Type: DiffAttrInput, + } + } + } + newState, err := p.ApplyFn(info, s, d) + resp := providers.ApplyResourceChangeResponse{} + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + if newState != nil { + var newVal cty.Value + if newState != nil { + var err error + newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType()) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + } else { + // If apply returned a nil new state then that's the old way to + // indicate that the object was destroyed. Our new interface calls + // for that to be signalled as a null value. + newVal = cty.NullVal(schema.Block.ImpliedType()) + } + resp.NewState = newVal + } + + return resp + } + if p.ApplyResourceChangeFn != nil { + return p.ApplyResourceChangeFn(r) + } + + return p.ApplyResourceChangeResponse +} + +func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse { + p.Lock() + defer p.Unlock() + + if p.ImportStateReturn != nil { + for _, is := range p.ImportStateReturn { + if is.Attributes == nil { + is.Attributes = make(map[string]string) + } + is.Attributes["id"] = is.ID + + typeName := is.Ephemeral.Type + // Use the requested type if the resource has no type of it's own. + // We still return the empty type, which will error, but this prevents a panic. + if typeName == "" { + typeName = r.TypeName + } + + schema := p.GetSchemaReturn.ResourceTypes[typeName] + if schema == nil { + panic("no schema found for " + typeName) + } + + private, err := json.Marshal(is.Meta) + if err != nil { + panic(err) + } + + state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType()) + if err != nil { + panic(err) + } + + state, err = schema.CoerceValue(state) + if err != nil { + panic(err) + } + + p.ImportResourceStateResponse.ImportedResources = append( + p.ImportResourceStateResponse.ImportedResources, + providers.ImportedResource{ + TypeName: is.Ephemeral.Type, + State: state, + Private: private, + }) + } + } + + p.ImportResourceStateCalled = true + p.ImportResourceStateRequest = r + if p.ImportResourceStateFn != nil { + return p.ImportResourceStateFn(r) + } + + return p.ImportResourceStateResponse +} + +func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse { + p.Lock() + defer p.Unlock() + + p.ReadDataSourceCalled = true + p.ReadDataSourceRequest = r + + if p.ReadDataSourceFn != nil { + return p.ReadDataSourceFn(r) + } + + return p.ReadDataSourceResponse +} + +func (p *MockProvider) Close() error { + p.CloseCalled = true + return p.CloseError +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go new file mode 100644 index 00000000..f5958916 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go @@ -0,0 +1,154 @@ +package terraform + +import ( + "fmt" + "sync" + + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/provisioners" +) + +var _ provisioners.Interface = (*MockProvisioner)(nil) + +// MockProvisioner implements provisioners.Interface but mocks out all the +// calls for testing purposes. +type MockProvisioner struct { + sync.Mutex + // Anything you want, in case you need to store extra data with the mock. + Meta interface{} + + GetSchemaCalled bool + GetSchemaResponse provisioners.GetSchemaResponse + + ValidateProvisionerConfigCalled bool + ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest + ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse + ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse + + ProvisionResourceCalled bool + ProvisionResourceRequest provisioners.ProvisionResourceRequest + ProvisionResourceResponse provisioners.ProvisionResourceResponse + ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse + + StopCalled bool + StopResponse error + StopFn func() error + + CloseCalled bool + CloseResponse error + CloseFn func() error + + // Legacy callbacks: if these are set, we will shim incoming calls for + // new-style methods to these old-fashioned terraform.ResourceProvider + // mock callbacks, for the benefit of older tests that were written against + // the old mock API. + ApplyFn func(rs *InstanceState, c *ResourceConfig) error +} + +func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + return p.getSchema() +} + +// getSchema is the implementation of GetSchema, which can be called from other +// methods on MockProvisioner that may already be holding the lock. +func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse { + return p.GetSchemaResponse +} + +func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse { + p.Lock() + defer p.Unlock() + + p.ValidateProvisionerConfigCalled = true + p.ValidateProvisionerConfigRequest = r + if p.ValidateProvisionerConfigFn != nil { + return p.ValidateProvisionerConfigFn(r) + } + return p.ValidateProvisionerConfigResponse +} + +func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse { + p.Lock() + defer p.Unlock() + + p.ProvisionResourceCalled = true + p.ProvisionResourceRequest = r + if p.ApplyFn != nil { + if !r.Config.IsKnown() { + panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config)) + } + + schema := p.getSchema() + rc := NewResourceConfigShimmed(r.Config, schema.Provisioner) + connVal := r.Connection + connMap := map[string]string{} + + if !connVal.IsNull() && connVal.IsKnown() { + for it := connVal.ElementIterator(); it.Next(); { + ak, av := it.Element() + name := ak.AsString() + + if !av.IsKnown() || av.IsNull() { + continue + } + + av, _ = convert.Convert(av, cty.String) + connMap[name] = av.AsString() + } + } + + // We no longer pass the full instance state to a provisioner, so we'll + // construct a partial one that should be good enough for what existing + // test mocks need. + is := &InstanceState{ + Ephemeral: EphemeralState{ + ConnInfo: connMap, + }, + } + var resp provisioners.ProvisionResourceResponse + err := p.ApplyFn(is, rc) + if err != nil { + resp.Diagnostics = resp.Diagnostics.Append(err) + } + return resp + } + if p.ProvisionResourceFn != nil { + fn := p.ProvisionResourceFn + p.Unlock() + return fn(r) + } + + return p.ProvisionResourceResponse +} + +func (p *MockProvisioner) Stop() error { + // We intentionally don't lock in this one because the whole point of this + // method is to be called concurrently with another operation that can + // be cancelled. The provisioner itself is responsible for handling + // any concurrency concerns in this case. + + p.StopCalled = true + if p.StopFn != nil { + return p.StopFn() + } + + return p.StopResponse +} + +func (p *MockProvisioner) Close() error { + p.Lock() + defer p.Unlock() + + p.CloseCalled = true + if p.CloseFn != nil { + return p.CloseFn() + } + + return p.CloseResponse +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go index 0acf0beb..2cd6c5bf 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -7,9 +7,14 @@ import ( "strconv" "strings" - "github.com/hashicorp/terraform/config" "github.com/mitchellh/copystructure" "github.com/mitchellh/reflectwalk" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs/configschema" ) // ResourceProvisionerConfig is used to pair a provisioner @@ -25,9 +30,10 @@ type ResourceProvisionerConfig struct { ConnInfo *config.RawConfig } -// Resource encapsulates a resource, its configuration, its provider, -// its current state, and potentially a desired diff from the state it -// wants to reach. +// Resource is a legacy way to identify a particular resource instance. +// +// New code should use addrs.ResourceInstance instead. This is still here +// only for codepaths that haven't been updated yet. type Resource struct { // These are all used by the new EvalNode stuff. Name string @@ -47,6 +53,31 @@ type Resource struct { Flags ResourceFlag } +// NewResource constructs a legacy Resource object from an +// addrs.ResourceInstance value. +// +// This is provided to shim to old codepaths that haven't been updated away +// from this type yet. Since this old type is not able to represent instances +// that have string keys, this function will panic if given a resource address +// that has a string key. +func NewResource(addr addrs.ResourceInstance) *Resource { + ret := &Resource{ + Name: addr.Resource.Name, + Type: addr.Resource.Type, + } + + if addr.Key != addrs.NoKey { + switch tk := addr.Key.(type) { + case addrs.IntKey: + ret.CountIndex = int(tk) + default: + panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key)) + } + } + + return ret +} + // ResourceKind specifies what kind of instance we're working with, whether // its a primary instance, a tainted instance, or an orphan. type ResourceFlag byte @@ -72,34 +103,98 @@ type InstanceInfo struct { uniqueExtra string } -// HumanId is a unique Id that is human-friendly and useful for UI elements. -func (i *InstanceInfo) HumanId() string { - if i == nil { - return "" +// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance. +// +// InstanceInfo is a legacy type, and uses of it should be gradually replaced +// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as +// appropriate. +// +// The legacy InstanceInfo type cannot represent module instances with instance +// keys, so this function will panic if given such a path. Uses of this type +// should all be removed or replaced before implementing "count" and "for_each" +// arguments on modules in order to avoid such panics. +// +// This legacy type also cannot represent resource instances with string +// instance keys. It will panic if the given key is not either NoKey or an +// IntKey. +func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo { + // We need an old-style []string module path for InstanceInfo. + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + panic("NewInstanceInfo cannot convert module instance with key") + } + path[i] = step.Name } - if len(i.ModulePath) <= 1 { - return i.Id + // This is a funny old meaning of "id" that is no longer current. It should + // not be used for anything users might see. Note that it does not include + // a representation of the resource mode, and so it's impossible to + // determine from an InstanceInfo alone whether it is a managed or data + // resource that is being referred to. + id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name) + if addr.Resource.Resource.Mode == addrs.DataResourceMode { + id = "data." + id + } + if addr.Resource.Key != addrs.NoKey { + switch k := addr.Resource.Key.(type) { + case addrs.IntKey: + id = id + fmt.Sprintf(".%d", int(k)) + default: + panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key)) + } } - return fmt.Sprintf( - "module.%s.%s", - strings.Join(i.ModulePath[1:], "."), - i.Id) + return &InstanceInfo{ + Id: id, + ModulePath: path, + Type: addr.Resource.Resource.Type, + } } -func (i *InstanceInfo) uniqueId() string { - prefix := i.HumanId() - if v := i.uniqueExtra; v != "" { - prefix += " " + v +// ResourceAddress returns the address of the resource that the receiver is describing. +func (i *InstanceInfo) ResourceAddress() *ResourceAddress { + // GROSS: for tainted and deposed instances, their status gets appended + // to i.Id to create a unique id for the graph node. Historically these + // ids were displayed to the user, so it's designed to be human-readable: + // "aws_instance.bar.0 (deposed #0)" + // + // So here we detect such suffixes and try to interpret them back to + // their original meaning so we can then produce a ResourceAddress + // with a suitable InstanceType. + id := i.Id + instanceType := TypeInvalid + if idx := strings.Index(id, " ("); idx != -1 { + remain := id[idx:] + id = id[:idx] + + switch { + case strings.Contains(remain, "tainted"): + instanceType = TypeTainted + case strings.Contains(remain, "deposed"): + instanceType = TypeDeposed + } } - return prefix + addr, err := parseResourceAddressInternal(id) + if err != nil { + // should never happen, since that would indicate a bug in the + // code that constructed this InstanceInfo. + panic(fmt.Errorf("InstanceInfo has invalid Id %s", id)) + } + if len(i.ModulePath) > 1 { + addr.Path = i.ModulePath[1:] // trim off "root" prefix, which is implied + } + if instanceType != TypeInvalid { + addr.InstanceTypeSet = true + addr.InstanceType = instanceType + } + return addr } -// ResourceConfig holds the configuration given for a resource. This is -// done instead of a raw `map[string]interface{}` type so that rich -// methods can be added to it to make dealing with it easier. +// ResourceConfig is a legacy type that was formerly used to represent +// interpolatable configuration blocks. It is now only used to shim to old +// APIs that still use this type, via NewResourceConfigShimmed. type ResourceConfig struct { ComputedKeys []string Raw map[string]interface{} @@ -115,6 +210,85 @@ func NewResourceConfig(c *config.RawConfig) *ResourceConfig { return result } +// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy +// ResourceConfig object, so that it can be passed to older APIs that expect +// this wrapping. +// +// The returned ResourceConfig is already interpolated and cannot be +// re-interpolated. It is, therefore, useful only to functions that expect +// an already-populated ResourceConfig which they then treat as read-only. +// +// If the given value is not of an object type that conforms to the given +// schema then this function will panic. +func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig { + if !val.Type().IsObjectType() { + panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type())) + } + ret := &ResourceConfig{} + + legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema) + if legacyVal != nil { + ret.Config = legacyVal + + // Now we need to walk through our structure and find any unknown values, + // producing the separate list ComputedKeys to represent these. We use the + // schema here so that we can preserve the expected invariant + // that an attribute is always either wholly known or wholly unknown, while + // a child block can be partially unknown. + ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "") + } else { + ret.Config = make(map[string]interface{}) + } + ret.Raw = ret.Config + + return ret +} + +// Record the any config values in ComputedKeys. This field had been unused in +// helper/schema, but in the new protocol we're using this so that the SDK can +// now handle having an unknown collection. The legacy diff code doesn't +// properly handle the unknown, because it can't be expressed in the same way +// between the config and diff. +func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string { + var ret []string + ty := val.Type() + + if val.IsNull() { + return ret + } + + if !val.IsKnown() { + // we shouldn't have an entirely unknown resource, but prevent empty + // strings just in case + if len(path) > 0 { + ret = append(ret, path) + } + return ret + } + + if path != "" { + path += "." + } + switch { + case ty.IsListType(), ty.IsTupleType(), ty.IsSetType(): + i := 0 + for it := val.ElementIterator(); it.Next(); i++ { + _, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i)) + ret = append(ret, keys...) + } + + case ty.IsMapType(), ty.IsObjectType(): + for it := val.ElementIterator(); it.Next(); { + subK, subVal := it.Element() + keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString())) + ret = append(ret, keys...) + } + } + + return ret +} + // DeepCopy performs a deep copy of the configuration. This makes it safe // to modify any of the structures that are part of the resource config without // affecting the original configuration. @@ -306,7 +480,7 @@ func (c *ResourceConfig) get( if err != nil { return nil, false } - if i >= int64(cv.Len()) { + if int(i) < 0 || int(i) >= cv.Len() { return nil, false } current = cv.Index(int(i)).Interface() @@ -334,6 +508,14 @@ func (c *ResourceConfig) get( // refactor is complete. func (c *ResourceConfig) interpolateForce() { if c.raw == nil { + // If we don't have a lowercase "raw" but we _do_ have the uppercase + // Raw populated then this indicates that we're recieving a shim + // ResourceConfig created by NewResourceConfigShimmed, which is already + // fully evaluated and thus this function doesn't need to do anything. + if c.Raw != nil { + return + } + var err error c.raw, err = config.NewRawConfig(make(map[string]interface{})) if err != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go index a8a0c955..156ecf5c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go @@ -7,7 +7,10 @@ import ( "strconv" "strings" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/configs" ) // ResourceAddress is a way of identifying an individual resource (or, @@ -41,9 +44,9 @@ func (r *ResourceAddress) Copy() *ResourceAddress { Type: r.Type, Mode: r.Mode, } - for _, p := range r.Path { - n.Path = append(n.Path, p) - } + + n.Path = append(n.Path, r.Path...) + return n } @@ -89,6 +92,68 @@ func (r *ResourceAddress) String() string { return strings.Join(result, ".") } +// HasResourceSpec returns true if the address has a resource spec, as +// defined in the documentation: +// https://www.terraform.io/docs/internals/resource-addressing.html +// In particular, this returns false if the address contains only +// a module path, thus addressing the entire module. +func (r *ResourceAddress) HasResourceSpec() bool { + return r.Type != "" && r.Name != "" +} + +// WholeModuleAddress returns the resource address that refers to all +// resources in the same module as the receiver address. +func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress { + return &ResourceAddress{ + Path: r.Path, + Index: -1, + InstanceTypeSet: false, + } +} + +// MatchesResourceConfig returns true if the receiver matches the given +// configuration resource within the given _static_ module path. Note that +// the module path in a resource address is a _dynamic_ module path, and +// multiple dynamic resource paths may map to a single static path if +// count and for_each are in use on module calls. +// +// Since resource configuration blocks represent all of the instances of +// a multi-instance resource, the index of the address (if any) is not +// considered. +func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool { + if r.HasResourceSpec() { + // FIXME: Some ugliness while we are between worlds. Functionality + // in "addrs" should eventually replace this ResourceAddress idea + // completely, but for now we'll need to translate to the old + // way of representing resource modes. + switch r.Mode { + case config.ManagedResourceMode: + if rc.Mode != addrs.ManagedResourceMode { + return false + } + case config.DataResourceMode: + if rc.Mode != addrs.DataResourceMode { + return false + } + } + if r.Type != rc.Type || r.Name != rc.Name { + return false + } + } + + addrPath := r.Path + + // normalize + if len(addrPath) == 0 { + addrPath = nil + } + if len(path) == 0 { + path = nil + } + rawPath := []string(path) + return reflect.DeepEqual(addrPath, rawPath) +} + // stateId returns the ID that this resource should be entered with // in the state. This is also used for diffs. In the future, we'd like to // move away from this string field so I don't export this. @@ -185,7 +250,10 @@ func ParseResourceAddress(s string) (*ResourceAddress, error) { // not allowed to say "data." without a type following if mode == config.DataResourceMode && matches["type"] == "" { - return nil, fmt.Errorf("must target specific data instance") + return nil, fmt.Errorf( + "invalid resource address %q: must target specific data instance", + s, + ) } return &ResourceAddress{ @@ -199,6 +267,213 @@ func ParseResourceAddress(s string) (*ResourceAddress, error) { }, nil } +// ParseResourceAddressForInstanceDiff creates a ResourceAddress for a +// resource name as described in a module diff. +// +// For historical reasons a different addressing format is used in this +// context. The internal format should not be shown in the UI and instead +// this function should be used to translate to a ResourceAddress and +// then, where appropriate, use the String method to produce a canonical +// resource address string for display in the UI. +// +// The given path slice must be empty (or nil) for the root module, and +// otherwise consist of a sequence of module names traversing down into +// the module tree. If a non-nil path is provided, the caller must not +// modify its underlying array after passing it to this function. +func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAddress, error) { + addr, err := parseResourceAddressInternal(key) + if err != nil { + return nil, err + } + addr.Path = path + return addr, nil +} + +// NewLegacyResourceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Type, + Name: addr.Resource.Name, + } + + switch addr.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = config.ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = config.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + ret.Index = -1 + + return ret +} + +// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style +// addrs.AbsResource value. +// +// This is provided for shimming purposes so that we can still easily call into +// older functions that expect the ResourceAddress type. +func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress { + ret := &ResourceAddress{ + Type: addr.Resource.Resource.Type, + Name: addr.Resource.Resource.Name, + } + + switch addr.Resource.Resource.Mode { + case addrs.ManagedResourceMode: + ret.Mode = config.ManagedResourceMode + case addrs.DataResourceMode: + ret.Mode = config.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Resource.Mode)) + } + + path := make([]string, len(addr.Module)) + for i, step := range addr.Module { + if step.InstanceKey != addrs.NoKey { + // At the time of writing this can't happen because we don't + // ket generate keyed module instances. This legacy codepath must + // be removed before we can support "count" and "for_each" for + // modules. + panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey)) + } + + path[i] = step.Name + } + ret.Path = path + + if addr.Resource.Key == addrs.NoKey { + ret.Index = -1 + } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok { + ret.Index = int(ik) + } else { + panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key)) + } + + return ret +} + +// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to +// the new resource address type addrs.AbsResourceInstance. +// +// This method can be used only on an address that has a resource specification. +// It will panic if called on a module-path-only ResourceAddress. Use +// method HasResourceSpec to check before calling, in contexts where it is +// unclear. +// +// addrs.AbsResourceInstance does not represent the "tainted" and "deposed" +// states, and so if these are present on the receiver then they are discarded. +// +// This is provided for shimming purposes so that we can easily adapt functions +// that are returning the legacy ResourceAddress type, for situations where +// the new type is required. +func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance { + if !addr.HasResourceSpec() { + panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec") + } + + ret := addrs.AbsResourceInstance{ + Module: addr.ModuleInstanceAddr(), + Resource: addrs.ResourceInstance{ + Resource: addrs.Resource{ + Type: addr.Type, + Name: addr.Name, + }, + }, + } + + switch addr.Mode { + case config.ManagedResourceMode: + ret.Resource.Resource.Mode = addrs.ManagedResourceMode + case config.DataResourceMode: + ret.Resource.Resource.Mode = addrs.DataResourceMode + default: + panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode)) + } + + if addr.Index != -1 { + ret.Resource.Key = addrs.IntKey(addr.Index) + } + + return ret +} + +// ModuleInstanceAddr returns the module path portion of the receiver as a +// addrs.ModuleInstance value. +func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance { + path := make(addrs.ModuleInstance, len(addr.Path)) + for i, name := range addr.Path { + path[i] = addrs.ModuleInstanceStep{Name: name} + } + return path +} + +// Contains returns true if and only if the given node is contained within +// the receiver. +// +// Containment is defined in terms of the module and resource heirarchy: +// a resource is contained within its module and any ancestor modules, +// an indexed resource instance is contained with the unindexed resource, etc. +func (addr *ResourceAddress) Contains(other *ResourceAddress) bool { + ourPath := addr.Path + givenPath := other.Path + if len(givenPath) < len(ourPath) { + return false + } + for i := range ourPath { + if ourPath[i] != givenPath[i] { + return false + } + } + + // If the receiver is a whole-module address then the path prefix + // matching is all we need. + if !addr.HasResourceSpec() { + return true + } + + if addr.Type != other.Type || addr.Name != other.Name || addr.Mode != other.Mode { + return false + } + + if addr.Index != -1 && addr.Index != other.Index { + return false + } + + if addr.InstanceTypeSet && (addr.InstanceTypeSet != other.InstanceTypeSet || addr.InstanceType != other.InstanceType) { + return false + } + + return true +} + +// Equals returns true if the receiver matches the given address. +// +// The name of this method is a misnomer, since it doesn't test for exact +// equality. Instead, it tests that the _specified_ parts of each +// address match, treating any unspecified parts as wildcards. +// +// See also Contains, which takes a more heirarchical approach to comparing +// addresses. func (addr *ResourceAddress) Equals(raw interface{}) bool { other, ok := raw.(*ResourceAddress) if !ok { @@ -233,6 +508,59 @@ func (addr *ResourceAddress) Equals(raw interface{}) bool { modeMatch } +// Less returns true if and only if the receiver should be sorted before +// the given address when presenting a list of resource addresses to +// an end-user. +// +// This sort uses lexicographic sorting for most components, but uses +// numeric sort for indices, thus causing index 10 to sort after +// index 9, rather than after index 1. +func (addr *ResourceAddress) Less(other *ResourceAddress) bool { + + switch { + + case len(addr.Path) != len(other.Path): + return len(addr.Path) < len(other.Path) + + case !reflect.DeepEqual(addr.Path, other.Path): + // If the two paths are the same length but don't match, we'll just + // cheat and compare the string forms since it's easier than + // comparing all of the path segments in turn, and lexicographic + // comparison is correct for the module path portion. + addrStr := addr.String() + otherStr := other.String() + return addrStr < otherStr + + case addr.Mode != other.Mode: + return addr.Mode == config.DataResourceMode + + case addr.Type != other.Type: + return addr.Type < other.Type + + case addr.Name != other.Name: + return addr.Name < other.Name + + case addr.Index != other.Index: + // Since "Index" is -1 for an un-indexed address, this also conveniently + // sorts unindexed addresses before indexed ones, should they both + // appear for some reason. + return addr.Index < other.Index + + case addr.InstanceTypeSet != other.InstanceTypeSet: + return !addr.InstanceTypeSet + + case addr.InstanceType != other.InstanceType: + // InstanceType is actually an enum, so this is just an arbitrary + // sort based on the enum numeric values, and thus not particularly + // meaningful. + return addr.InstanceType < other.InstanceType + + default: + return false + + } +} + func ParseResourceIndex(s string) (int, error) { if s == "" { return -1, nil @@ -275,7 +603,7 @@ func tokenizeResourceAddress(s string) (map[string]string, error) { // string "aws_instance.web.tainted[1]" re := regexp.MustCompile(`\A` + // "module.foo.module.bar" (optional) - `(?P(?:module\.[^.]+\.?)*)` + + `(?P(?:module\.(?P[^.]+)\.?)*)` + // possibly "data.", if targeting is a data resource `(?P(?:data\.)?)` + // "aws_instance.web" (optional when module path specified) @@ -289,7 +617,7 @@ func tokenizeResourceAddress(s string) (map[string]string, error) { groupNames := re.SubexpNames() rawMatches := re.FindAllStringSubmatch(s, -1) if len(rawMatches) != 1 { - return nil, fmt.Errorf("Problem parsing address: %q", s) + return nil, fmt.Errorf("invalid resource address %q", s) } matches := make(map[string]string) diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go index 1a68c869..3455ad88 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go @@ -1,5 +1,14 @@ package terraform +import ( + "fmt" + + "github.com/hashicorp/terraform/tfdiags" + + "github.com/hashicorp/terraform/plugin/discovery" + "github.com/hashicorp/terraform/providers" +) + // ResourceProvider is an interface that must be implemented by any // resource provider: the thing that creates and manages the resources in // a Terraform configuration. @@ -14,13 +23,21 @@ type ResourceProvider interface { * Functions related to the provider *********************************************************************/ - // Input is called to ask the provider to ask the user for input - // for completing the configuration if necesarry. + // ProviderSchema returns the config schema for the main provider + // configuration, as would appear in a "provider" block in the + // configuration files. // - // This may or may not be called, so resource provider writers shouldn't - // rely on this being available to set some default values for validate - // later. Example of a situation where this wouldn't be called is if - // the user is not using a TTY. + // Currently not all providers support schema. Callers must therefore + // first call Resources and DataSources and ensure that at least one + // resource or data source has the SchemaAvailable flag set. + GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) + + // Input was used prior to v0.12 to ask the provider to prompt the user + // for input to complete the configuration. + // + // From v0.12 onwards this method is never called because Terraform Core + // is able to handle the necessary input logic itself based on the + // schema returned from GetSchema. Input(UIInput, *ResourceConfig) (*ResourceConfig, error) // Validate is called once at the beginning with the raw configuration @@ -164,11 +181,69 @@ type ResourceProviderCloser interface { type ResourceType struct { Name string // Name of the resource, example "instance" (no provider prefix) Importable bool // Whether this resource supports importing + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool } // DataSource is a data source that a resource provider implements. type DataSource struct { Name string + + // SchemaAvailable is set if the provider supports the ProviderSchema, + // ResourceTypeSchema and DataSourceSchema methods. Although it is + // included on each resource type, it's actually a provider-wide setting + // that's smuggled here only because that avoids a breaking change to + // the plugin protocol. + SchemaAvailable bool +} + +// ResourceProviderResolver is an interface implemented by objects that are +// able to resolve a given set of resource provider version constraints +// into ResourceProviderFactory callbacks. +type ResourceProviderResolver interface { + // Given a constraint map, return a ResourceProviderFactory for each + // requested provider. If some or all of the constraints cannot be + // satisfied, return a non-nil slice of errors describing the problems. + ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) +} + +// ResourceProviderResolverFunc wraps a callback function and turns it into +// a ResourceProviderResolver implementation, for convenience in situations +// where a function and its associated closure are sufficient as a resolver +// implementation. +type ResourceProviderResolverFunc func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) + +// ResolveProviders implements ResourceProviderResolver by calling the +// wrapped function. +func (f ResourceProviderResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { + return f(reqd) +} + +// ResourceProviderResolverFixed returns a ResourceProviderResolver that +// has a fixed set of provider factories provided by the caller. The returned +// resolver ignores version constraints entirely and just returns the given +// factory for each requested provider name. +// +// This function is primarily used in tests, to provide mock providers or +// in-process providers under test. +func ResourceProviderResolverFixed(factories map[string]ResourceProviderFactory) ResourceProviderResolver { + return ResourceProviderResolverFunc(func(reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, []error) { + ret := make(map[string]ResourceProviderFactory, len(reqd)) + var errs []error + for name := range reqd { + if factory, exists := factories[name]; exists { + ret[name] = factory + } else { + errs = append(errs, fmt.Errorf("provider %q is not available", name)) + } + } + return ret, errs + }) } // ResourceProviderFactory is a function type that creates a new instance @@ -202,3 +277,43 @@ func ProviderHasDataSource(p ResourceProvider, n string) bool { return false } + +// resourceProviderFactories matches available plugins to the given version +// requirements to produce a map of compatible provider plugins if possible, +// or an error if the currently-available plugins are insufficient. +// +// This should be called only with configurations that have passed calls +// to config.Validate(), which ensures that all of the given version +// constraints are valid. It will panic if any invalid constraints are present. +func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[string]providers.Factory, tfdiags.Diagnostics) { + var diags tfdiags.Diagnostics + ret, errs := resolver.ResolveProviders(reqd) + if errs != nil { + diags = diags.Append( + tfdiags.Sourceless(tfdiags.Error, + "Could not satisfy plugin requirements", + errPluginInit, + ), + ) + + for _, err := range errs { + diags = diags.Append(err) + } + + return nil, diags + } + + return ret, nil +} + +const errPluginInit = ` +Plugin reinitialization required. Please run "terraform init". + +Plugins are external binaries that Terraform uses to access and manipulate +resources. The configuration provided requires plugins which can't be located, +don't satisfy the version constraints, or are otherwise incompatible. + +Terraform automatically discovers provider requirements from your +configuration, including providers used in child modules. To see the +requirements and constraints from each module, run "terraform providers". +` diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go index f5315339..4000e3d2 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go @@ -1,6 +1,8 @@ package terraform -import "sync" +import ( + "sync" +) // MockResourceProvider implements ResourceProvider but mocks out all the // calls for testing purposes. @@ -12,6 +14,10 @@ type MockResourceProvider struct { CloseCalled bool CloseError error + GetSchemaCalled bool + GetSchemaRequest *ProviderSchemaRequest + GetSchemaReturn *ProviderSchema + GetSchemaReturnError error InputCalled bool InputInput UIInput InputConfig *ResourceConfig @@ -92,8 +98,19 @@ func (p *MockResourceProvider) Close() error { return p.CloseError } +func (p *MockResourceProvider) GetSchema(req *ProviderSchemaRequest) (*ProviderSchema, error) { + p.Lock() + defer p.Unlock() + + p.GetSchemaCalled = true + p.GetSchemaRequest = req + return p.GetSchemaReturn, p.GetSchemaReturnError +} + func (p *MockResourceProvider) Input( input UIInput, c *ResourceConfig) (*ResourceConfig, error) { + p.Lock() + defer p.Unlock() p.InputCalled = true p.InputInput = input p.InputConfig = c @@ -186,6 +203,7 @@ func (p *MockResourceProvider) Diff( p.DiffInfo = info p.DiffState = state p.DiffDesired = desired + if p.DiffFn != nil { return p.DiffFn(info, state, desired) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go index 361ec1ec..2743dd7e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go @@ -1,9 +1,21 @@ package terraform +import ( + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/provisioners" +) + // ResourceProvisioner is an interface that must be implemented by any // resource provisioner: the thing that initializes resources in // a Terraform configuration. type ResourceProvisioner interface { + // GetConfigSchema returns the schema for the provisioner type's main + // configuration block. This is called prior to Validate to enable some + // basic structural validation to be performed automatically and to allow + // the configuration to be properly extracted from potentially-ambiguous + // configuration file formats. + GetConfigSchema() (*configschema.Block, error) + // Validate is called once at the beginning with the raw // configuration (no interpolation done) and can return a list of warnings // and/or errors. @@ -52,3 +64,7 @@ type ResourceProvisionerCloser interface { // ResourceProvisionerFactory is a function type that creates a new instance // of a resource provisioner. type ResourceProvisionerFactory func() (ResourceProvisioner, error) + +// ProvisionerFactory is a function type that creates a new instance +// of a provisioners.Interface. +type ProvisionerFactory = provisioners.Factory diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go index f471a518..7b88cf73 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go @@ -1,6 +1,10 @@ package terraform -import "sync" +import ( + "sync" + + "github.com/hashicorp/terraform/configs/configschema" +) // MockResourceProvisioner implements ResourceProvisioner but mocks out all the // calls for testing purposes. @@ -9,6 +13,10 @@ type MockResourceProvisioner struct { // Anything you want, in case you need to store extra data with the mock. Meta interface{} + GetConfigSchemaCalled bool + GetConfigSchemaReturnSchema *configschema.Block + GetConfigSchemaReturnError error + ApplyCalled bool ApplyOutput UIOutput ApplyState *InstanceState @@ -27,6 +35,13 @@ type MockResourceProvisioner struct { StopReturnError error } +var _ ResourceProvisioner = (*MockResourceProvisioner)(nil) + +func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) { + p.GetConfigSchemaCalled = true + return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError +} + func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { p.Lock() defer p.Unlock() diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go new file mode 100644 index 00000000..62991c82 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/schemas.go @@ -0,0 +1,278 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" +) + +// Schemas is a container for various kinds of schema that Terraform needs +// during processing. +type Schemas struct { + Providers map[string]*ProviderSchema + Provisioners map[string]*configschema.Block +} + +// ProviderSchema returns the entire ProviderSchema object that was produced +// by the plugin for the given provider, or nil if no such schema is available. +// +// It's usually better to go use the more precise methods offered by type +// Schemas to handle this detail automatically. +func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema { + if ss.Providers == nil { + return nil + } + return ss.Providers[typeName] +} + +// ProviderConfig returns the schema for the provider configuration of the +// given provider type, or nil if no such schema is available. +func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block { + ps := ss.ProviderSchema(typeName) + if ps == nil { + return nil + } + return ps.Provider +} + +// ResourceTypeConfig returns the schema for the configuration of a given +// resource type belonging to a given provider type, or nil of no such +// schema is available. +// +// In many cases the provider type is inferrable from the resource type name, +// but this is not always true because users can override the provider for +// a resource using the "provider" meta-argument. Therefore it's important to +// always pass the correct provider name, even though it many cases it feels +// redundant. +func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) { + ps := ss.ProviderSchema(providerType) + if ps == nil || ps.ResourceTypes == nil { + return nil, 0 + } + return ps.SchemaForResourceType(resourceMode, resourceType) +} + +// ProvisionerConfig returns the schema for the configuration of a given +// provisioner, or nil of no such schema is available. +func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block { + return ss.Provisioners[name] +} + +// LoadSchemas searches the given configuration, state and plan (any of which +// may be nil) for constructs that have an associated schema, requests the +// necessary schemas from the given component factory (which must _not_ be nil), +// and returns a single object representing all of the necessary schemas. +// +// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing +// errors across multiple separate objects. Errors here will usually indicate +// either misbehavior on the part of one of the providers or of the provider +// protocol itself. When returned with errors, the returned schemas object is +// still valid but may be incomplete. +func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) { + schemas := &Schemas{ + Providers: map[string]*ProviderSchema{}, + Provisioners: map[string]*configschema.Block{}, + } + var diags tfdiags.Diagnostics + + newDiags := loadProviderSchemas(schemas.Providers, config, state, components) + diags = diags.Append(newDiags) + newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components) + diags = diags.Append(newDiags) + + return schemas, diags.Err() +} + +func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(typeName string) { + if _, exists := schemas[typeName]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName) + provider, err := components.ResourceProvider(typeName, "early/"+typeName) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[typeName] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err), + ) + return + } + defer func() { + provider.Close() + }() + + resp := provider.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[typeName] = &ProviderSchema{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()), + ) + return + } + + s := &ProviderSchema{ + Provider: resp.Provider.Block, + ResourceTypes: make(map[string]*configschema.Block), + DataSources: make(map[string]*configschema.Block), + + ResourceTypeSchemaVersions: make(map[string]uint64), + } + + if resp.Provider.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName), + ) + } + + for t, r := range resp.ResourceTypes { + s.ResourceTypes[t] = r.Block + s.ResourceTypeSchemaVersions[t] = uint64(r.Version) + if r.Version < 0 { + diags = diags.Append( + fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName), + ) + } + } + + for t, d := range resp.DataSources { + s.DataSources[t] = d.Block + if d.Version < 0 { + // We're not using the version numbers here yet, but we'll check + // for validity anyway in case we start using them in future. + diags = diags.Append( + fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName), + ) + } + } + + schemas[typeName] = s + } + + if config != nil { + for _, typeName := range config.ProviderTypes() { + ensure(typeName) + } + } + + if state != nil { + needed := providers.AddressedTypesAbs(state.ProviderAddrs()) + for _, typeName := range needed { + ensure(typeName) + } + } + + return diags +} + +func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + ensure := func(name string) { + if _, exists := schemas[name]; exists { + return + } + + log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name) + provisioner, err := components.ResourceProvisioner(name, "early/"+name) + if err != nil { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err), + ) + return + } + defer func() { + if closer, ok := provisioner.(ResourceProvisionerCloser); ok { + closer.Close() + } + }() + + resp := provisioner.GetSchema() + if resp.Diagnostics.HasErrors() { + // We'll put a stub in the map so we won't re-attempt this on + // future calls. + schemas[name] = &configschema.Block{} + diags = diags.Append( + fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()), + ) + return + } + + schemas[name] = resp.Provisioner + } + + if config != nil { + for _, rc := range config.Module.ManagedResources { + for _, pc := range rc.Managed.Provisioners { + ensure(pc.Type) + } + } + + // Must also visit our child modules, recursively. + for _, cc := range config.Children { + childDiags := loadProvisionerSchemas(schemas, cc, components) + diags = diags.Append(childDiags) + } + } + + return diags +} + +// ProviderSchema represents the schema for a provider's own configuration +// and the configuration for some or all of its resources and data sources. +// +// The completeness of this structure depends on how it was constructed. +// When constructed for a configuration, it will generally include only +// resource types and data sources used by that configuration. +type ProviderSchema struct { + Provider *configschema.Block + ResourceTypes map[string]*configschema.Block + DataSources map[string]*configschema.Block + + ResourceTypeSchemaVersions map[string]uint64 +} + +// SchemaForResourceType attempts to find a schema for the given mode and type. +// Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) { + switch mode { + case addrs.ManagedResourceMode: + return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName] + case addrs.DataResourceMode: + // Data resources don't have schema versions right now, since state is discarded for each refresh + return ps.DataSources[typeName], 0 + default: + // Shouldn't happen, because the above cases are comprehensive. + return nil, 0 + } +} + +// SchemaForResourceAddr attempts to find a schema for the mode and type from +// the given resource address. Returns nil if no such schema is available. +func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) { + return ps.SchemaForResourceType(addr.Mode, addr.Type) +} + +// ProviderSchemaRequest is used to describe to a ResourceProvider which +// aspects of schema are required, when calling the GetSchema method. +type ProviderSchemaRequest struct { + ResourceTypes []string + DataSources []string +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go index 905ec3dc..092b6907 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -9,18 +9,31 @@ import ( "io" "io/ioutil" "log" + "os" "reflect" - "regexp" "sort" "strconv" "strings" "sync" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/config" + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + uuid "github.com/hashicorp/go-uuid" + version "github.com/hashicorp/go-version" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" "github.com/mitchellh/copystructure" - "github.com/satori/go.uuid" + "github.com/zclconf/go-cty/cty" + ctyjson "github.com/zclconf/go-cty/cty/json" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/config" + "github.com/hashicorp/terraform/config/hcl2shim" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/tfdiags" + tfversion "github.com/hashicorp/terraform/version" ) const ( @@ -31,26 +44,38 @@ const ( // rootModulePath is the path of the root module var rootModulePath = []string{"root"} +// normalizeModulePath transforms a legacy module path (which may or may not +// have a redundant "root" label at the start of it) into an +// addrs.ModuleInstance representing the same module. +// +// For legacy reasons, different parts of Terraform disagree about whether the +// root module has the path []string{} or []string{"root"}, and so this +// function accepts both and trims off the "root". An implication of this is +// that it's not possible to actually have a module call in the root module +// that is itself named "root", since that would be ambiguous. +// // normalizeModulePath takes a raw module path and returns a path that // has the rootModulePath prepended to it. If I could go back in time I // would've never had a rootModulePath (empty path would be root). We can // still fix this but thats a big refactor that my branch doesn't make sense // for. Instead, this function normalizes paths. -func normalizeModulePath(p []string) []string { - k := len(rootModulePath) +func normalizeModulePath(p []string) addrs.ModuleInstance { + // FIXME: Remove this once everyone is using addrs.ModuleInstance. - // If we already have a root module prefix, we're done - if len(p) >= len(rootModulePath) { - if reflect.DeepEqual(p[:k], rootModulePath) { - return p - } + if len(p) > 0 && p[0] == "root" { + p = p[1:] } - // None? Prefix it - result := make([]string, len(rootModulePath)+len(p)) - copy(result, rootModulePath) - copy(result[k:], p) - return result + ret := make(addrs.ModuleInstance, len(p)) + for i, name := range p { + // For now we don't actually support modules with multiple instances + // identified by keys, so we just treat every path element as a + // step with no key. + ret[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + return ret } // State keeps track of a snapshot state-of-the-world that Terraform @@ -136,21 +161,43 @@ func (s *State) children(path []string) []*ModuleState { // // This should be the preferred method to add module states since it // allows us to optimize lookups later as well as control sorting. -func (s *State) AddModule(path []string) *ModuleState { +func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState { s.Lock() defer s.Unlock() return s.addModule(path) } -func (s *State) addModule(path []string) *ModuleState { +func (s *State) addModule(path addrs.ModuleInstance) *ModuleState { // check if the module exists first m := s.moduleByPath(path) if m != nil { return m } - m = &ModuleState{Path: path} + // Lower the new-style address into a legacy-style address. + // This requires that none of the steps have instance keys, which is + // true for all addresses at the time of implementing this because + // "count" and "for_each" are not yet implemented for modules. + // For the purposes of state, the legacy address format also includes + // a redundant extra prefix element "root". It is important to include + // this because the "prune" method will remove any module that has a + // path length less than one, and other parts of the state code will + // trim off the first element indiscriminately. + legacyPath := make([]string, len(path)+1) + legacyPath[0] = "root" + for i, step := range path { + if step.InstanceKey != addrs.NoKey { + // FIXME: Once the rest of Terraform is ready to use count and + // for_each, remove all of this and just write the addrs.ModuleInstance + // value itself into the ModuleState. + panic("state cannot represent modules with count or for_each keys") + } + + legacyPath[i+1] = step.Name + } + + m = &ModuleState{Path: legacyPath} m.init() s.Modules = append(s.Modules, m) s.sort() @@ -160,7 +207,7 @@ func (s *State) addModule(path []string) *ModuleState { // ModuleByPath is used to lookup the module state for the given path. // This should be the preferred lookup mechanism as it allows for future // lookup optimizations. -func (s *State) ModuleByPath(path []string) *ModuleState { +func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState { if s == nil { return nil } @@ -170,7 +217,7 @@ func (s *State) ModuleByPath(path []string) *ModuleState { return s.moduleByPath(path) } -func (s *State) moduleByPath(path []string) *ModuleState { +func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState { for _, mod := range s.Modules { if mod == nil { continue @@ -178,97 +225,14 @@ func (s *State) moduleByPath(path []string) *ModuleState { if mod.Path == nil { panic("missing module path") } - if reflect.DeepEqual(mod.Path, path) { + modPath := normalizeModulePath(mod.Path) + if modPath.String() == path.String() { return mod } } return nil } -// ModuleOrphans returns all the module orphans in this state by -// returning their full paths. These paths can be used with ModuleByPath -// to return the actual state. -func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string { - s.Lock() - defer s.Unlock() - - return s.moduleOrphans(path, c) - -} - -func (s *State) moduleOrphans(path []string, c *config.Config) [][]string { - // direct keeps track of what direct children we have both in our config - // and in our state. childrenKeys keeps track of what isn't an orphan. - direct := make(map[string]struct{}) - childrenKeys := make(map[string]struct{}) - if c != nil { - for _, m := range c.Modules { - childrenKeys[m.Name] = struct{}{} - direct[m.Name] = struct{}{} - } - } - - // Go over the direct children and find any that aren't in our keys. - var orphans [][]string - for _, m := range s.children(path) { - key := m.Path[len(m.Path)-1] - - // Record that we found this key as a direct child. We use this - // later to find orphan nested modules. - direct[key] = struct{}{} - - // If we have a direct child still in our config, it is not an orphan - if _, ok := childrenKeys[key]; ok { - continue - } - - orphans = append(orphans, m.Path) - } - - // Find the orphans that are nested... - for _, m := range s.Modules { - if m == nil { - continue - } - - // We only want modules that are at least grandchildren - if len(m.Path) < len(path)+2 { - continue - } - - // If it isn't part of our tree, continue - if !reflect.DeepEqual(path, m.Path[:len(path)]) { - continue - } - - // If we have the direct child, then just skip it. - key := m.Path[len(path)] - if _, ok := direct[key]; ok { - continue - } - - orphanPath := m.Path[:len(path)+1] - - // Don't double-add if we've already added this orphan (which can happen if - // there are multiple nested sub-modules that get orphaned together). - alreadyAdded := false - for _, o := range orphans { - if reflect.DeepEqual(o, orphanPath) { - alreadyAdded = true - break - } - } - if alreadyAdded { - continue - } - - // Add this orphan - orphans = append(orphans, orphanPath) - } - - return orphans -} - // Empty returns true if the state is empty. func (s *State) Empty() bool { if s == nil { @@ -441,7 +405,7 @@ func (s *State) removeModule(path []string, v *ModuleState) { func (s *State) removeResource(path []string, v *ResourceState) { // Get the module this resource lives in. If it doesn't exist, we're done. - mod := s.moduleByPath(path) + mod := s.moduleByPath(normalizeModulePath(path)) if mod == nil { return } @@ -485,7 +449,7 @@ func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState // RootModule returns the ModuleState for the root module func (s *State) RootModule() *ModuleState { - root := s.ModuleByPath(rootModulePath) + root := s.ModuleByPath(addrs.RootModuleInstance) if root == nil { panic("missing root module") } @@ -520,7 +484,7 @@ func (s *State) equal(other *State) bool { } for _, m := range s.Modules { // This isn't very optimal currently but works. - otherM := other.moduleByPath(m.Path) + otherM := other.moduleByPath(normalizeModulePath(m.Path)) if otherM == nil { return false } @@ -534,6 +498,43 @@ func (s *State) equal(other *State) bool { return true } +// MarshalEqual is similar to Equal but provides a stronger definition of +// "equal", where two states are equal if and only if their serialized form +// is byte-for-byte identical. +// +// This is primarily useful for callers that are trying to save snapshots +// of state to persistent storage, allowing them to detect when a new +// snapshot must be taken. +// +// Note that the serial number and lineage are included in the serialized form, +// so it's the caller's responsibility to properly manage these attributes +// so that this method is only called on two states that have the same +// serial and lineage, unless detecting such differences is desired. +func (s *State) MarshalEqual(other *State) bool { + if s == nil && other == nil { + return true + } else if s == nil || other == nil { + return false + } + + recvBuf := &bytes.Buffer{} + otherBuf := &bytes.Buffer{} + + err := WriteState(s, recvBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + err = WriteState(other, otherBuf) + if err != nil { + // should never happen, since we're writing to a buffer + panic(err) + } + + return bytes.Equal(recvBuf.Bytes(), otherBuf.Bytes()) +} + type StateAgeComparison int const ( @@ -604,6 +605,10 @@ func (s *State) SameLineage(other *State) bool { // DeepCopy performs a deep copy of the state structure and returns // a new structure. func (s *State) DeepCopy() *State { + if s == nil { + return nil + } + copy, err := copystructure.Config{Lock: true}.Copy(s) if err != nil { panic(err) @@ -612,30 +617,6 @@ func (s *State) DeepCopy() *State { return copy.(*State) } -// IncrementSerialMaybe increments the serial number of this state -// if it different from the other state. -func (s *State) IncrementSerialMaybe(other *State) { - if s == nil { - return - } - if other == nil { - return - } - s.Lock() - defer s.Unlock() - - if s.Serial > other.Serial { - return - } - if other.TFVersion != s.TFVersion || !s.equal(other) { - if other.Serial > s.Serial { - s.Serial = other.Serial - } - - s.Serial++ - } -} - // FromFutureTerraform checks if this state was written by a Terraform // version from the future. func (s *State) FromFutureTerraform() bool { @@ -648,7 +629,7 @@ func (s *State) FromFutureTerraform() bool { } v := version.Must(version.NewVersion(s.TFVersion)) - return SemVersion.LessThan(v) + return tfversion.SemVer.LessThan(v) } func (s *State) Init() { @@ -661,8 +642,9 @@ func (s *State) init() { if s.Version == 0 { s.Version = StateVersion } - if s.moduleByPath(rootModulePath) == nil { - s.addModule(rootModulePath) + + if s.moduleByPath(addrs.RootModuleInstance) == nil { + s.addModule(addrs.RootModuleInstance) } s.ensureHasLineage() @@ -687,7 +669,11 @@ func (s *State) EnsureHasLineage() { func (s *State) ensureHasLineage() { if s.Lineage == "" { - s.Lineage = uuid.NewV4().String() + lineage, err := uuid.GenerateUUID() + if err != nil { + panic(fmt.Errorf("Failed to generate lineage: %v", err)) + } + s.Lineage = lineage log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage) } else { log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage) @@ -787,13 +773,9 @@ func (s *State) String() string { // BackendState stores the configuration to connect to a remote backend. type BackendState struct { - Type string `json:"type"` // Backend type - Config map[string]interface{} `json:"config"` // Backend raw config - - // Hash is the hash code to uniquely identify the original source - // configuration. We use this to detect when there is a change in - // configuration even when "type" isn't changed. - Hash uint64 `json:"hash"` + Type string `json:"type"` // Backend type + ConfigRaw json.RawMessage `json:"config"` // Backend raw config + Hash uint64 `json:"hash"` // Hash of portion of configuration from config files } // Empty returns true if BackendState has no state. @@ -801,25 +783,50 @@ func (s *BackendState) Empty() bool { return s == nil || s.Type == "" } -// Rehash returns a unique content hash for this backend's configuration -// as a uint64 value. -// The Hash stored in the backend state needs to match the config itself, but -// we need to compare the backend config after it has been combined with all -// options. -// This function must match the implementation used by config.Backend. -func (s *BackendState) Rehash() uint64 { +// Config decodes the type-specific configuration object using the provided +// schema and returns the result as a cty.Value. +// +// An error is returned if the stored configuration does not conform to the +// given schema. +func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) { + ty := schema.ImpliedType() if s == nil { - return 0 + return cty.NullVal(ty), nil } + return ctyjson.Unmarshal(s.ConfigRaw, ty) +} - cfg := config.Backend{ - Type: s.Type, - RawConfig: &config.RawConfig{ - Raw: s.Config, - }, +// SetConfig replaces (in-place) the type-specific configuration object using +// the provided value and associated schema. +// +// An error is returned if the given value does not conform to the implied +// type of the schema. +func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error { + ty := schema.ImpliedType() + buf, err := ctyjson.Marshal(val, ty) + if err != nil { + return err } + s.ConfigRaw = buf + return nil +} - return cfg.Rehash() +// ForPlan produces an alternative representation of the reciever that is +// suitable for storing in a plan. The current workspace must additionally +// be provided, to be stored alongside the backend configuration. +// +// The backend configuration schema is required in order to properly +// encode the backend-specific configuration settings. +func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) { + if s == nil { + return nil, nil + } + + configVal, err := s.Config(schema) + if err != nil { + return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err) + } + return plans.NewBackend(s.Type, configVal, schema, workspaceName) } // RemoteState is used to track the information about a remote @@ -960,6 +967,10 @@ type ModuleState struct { // always disjoint, so the path represents amodule tree Path []string `json:"path"` + // Locals are kept only transiently in-memory, because we can always + // re-compute them. + Locals map[string]interface{} `json:"-"` + // Outputs declared by the module and maintained for each module // even though only the root module technically needs to be kept. // This allows operators to inspect values at the boundaries. @@ -1061,33 +1072,64 @@ func (m *ModuleState) IsDescendent(other *ModuleState) bool { // Orphans returns a list of keys of resources that are in the State // but aren't present in the configuration itself. Hence, these keys // represent the state of resources that are orphans. -func (m *ModuleState) Orphans(c *config.Config) []string { +func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance { m.Lock() defer m.Unlock() - keys := make(map[string]struct{}) - for k, _ := range m.Resources { - keys[k] = struct{}{} + inConfig := make(map[string]struct{}) + if c != nil { + for _, r := range c.ManagedResources { + inConfig[r.Addr().String()] = struct{}{} + } + for _, r := range c.DataResources { + inConfig[r.Addr().String()] = struct{}{} + } } - if c != nil { - for _, r := range c.Resources { - delete(keys, r.Id()) + var result []addrs.ResourceInstance + for k := range m.Resources { + // Since we've not yet updated state to use our new address format, + // we need to do some shimming here. + legacyAddr, err := parseResourceAddressInternal(k) + if err != nil { + // Suggests that the user tampered with the state, since we always + // generate valid internal addresses. + log.Printf("ModuleState has invalid resource key %q. Ignoring.", k) + continue + } - for k, _ := range keys { - if strings.HasPrefix(k, r.Id()+".") { - delete(keys, k) - } - } + addr := legacyAddr.AbsResourceInstanceAddr().Resource + compareKey := addr.Resource.String() // compare by resource address, ignoring instance key + if _, exists := inConfig[compareKey]; !exists { + result = append(result, addr) } } + return result +} - result := make([]string, 0, len(keys)) - for k, _ := range keys { - result = append(result, k) +// RemovedOutputs returns a list of outputs that are in the State but aren't +// present in the configuration itself. +func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue { + if outputs == nil { + // If we got no output map at all then we'll just treat our set of + // configured outputs as empty, since that suggests that they've all + // been removed by removing their containing module. + outputs = make(map[string]*configs.Output) } - return result + s.Lock() + defer s.Unlock() + + var ret []addrs.OutputValue + for n := range s.Outputs { + if _, declared := outputs[n]; !declared { + ret = append(ret, addrs.OutputValue{ + Name: n, + }) + } + } + + return ret } // View returns a view with the given resource prefix. @@ -1163,6 +1205,8 @@ func (m *ModuleState) prune() { delete(m.Outputs, k) } } + + m.Dependencies = uniqueStrings(m.Dependencies) } func (m *ModuleState) sort() { @@ -1289,6 +1333,10 @@ func (m *ModuleState) String() string { return buf.String() } +func (m *ModuleState) Empty() bool { + return len(m.Locals) == 0 && len(m.Outputs) == 0 && len(m.Resources) == 0 +} + // ResourceStateKey is a structured representation of the key used for the // ModuleState.Resources mapping type ResourceStateKey struct { @@ -1484,6 +1532,24 @@ func (s *ResourceState) Untaint() { } } +// ProviderAddr returns the provider address for the receiver, by parsing the +// string representation saved in state. An error can be returned if the +// value in state is corrupt. +func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) { + var diags tfdiags.Diagnostics + + str := s.Provider + traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1}) + diags = diags.Append(travDiags) + if travDiags.HasErrors() { + return addrs.AbsProviderConfig{}, diags.Err() + } + + addr, addrDiags := addrs.ParseAbsProviderConfig(traversal) + diags = diags.Append(addrDiags) + return addr, diags.Err() +} + func (s *ResourceState) init() { s.Lock() defer s.Unlock() @@ -1526,8 +1592,9 @@ func (s *ResourceState) prune() { i-- } } - s.Deposed = s.Deposed[:n] + + s.Dependencies = uniqueStrings(s.Dependencies) } func (s *ResourceState) sort() { @@ -1591,6 +1658,51 @@ func (s *InstanceState) init() { s.Ephemeral.init() } +// NewInstanceStateShimmedFromValue is a shim method to lower a new-style +// object value representing the attributes of an instance object into the +// legacy InstanceState representation. +// +// This is for shimming to old components only and should not be used in new code. +func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState { + attrs := hcl2shim.FlatmapValueFromHCL2(state) + return &InstanceState{ + ID: attrs["id"], + Attributes: attrs, + Meta: map[string]interface{}{ + "schema_version": schemaVersion, + }, + } +} + +// AttrsAsObjectValue shims from the legacy InstanceState representation to +// a new-style cty object value representation of the state attributes, using +// the given type for guidance. +// +// The given type must be the implied type of the schema of the resource type +// of the object whose state is being converted, or the result is undefined. +// +// This is for shimming from old components only and should not be used in +// new code. +func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) { + if s == nil { + // if the state is nil, we need to construct a complete cty.Value with + // null attributes, rather than a single cty.NullVal(ty) + s = &InstanceState{} + } + + if s.Attributes == nil { + s.Attributes = map[string]string{} + } + + // make sure ID is included in the attributes. The InstanceState.ID value + // takes precedence. + if s.ID != "" { + s.Attributes["id"] = s.ID + } + + return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty) +} + // Copy all the Fields from another InstanceState func (s *InstanceState) Set(from *InstanceState) { s.Lock() @@ -1661,7 +1773,20 @@ func (s *InstanceState) Equal(other *InstanceState) bool { // We only do the deep check if both are non-nil. If one is nil // we treat it as equal since their lengths are both zero (check // above). - if !reflect.DeepEqual(s.Meta, other.Meta) { + // + // Since this can contain numeric values that may change types during + // serialization, let's compare the serialized values. + sMeta, err := json.Marshal(s.Meta) + if err != nil { + // marshaling primitives shouldn't ever error out + panic(err) + } + otherMeta, err := json.Marshal(other.Meta) + if err != nil { + panic(err) + } + + if !bytes.Equal(sMeta, otherMeta) { return false } } @@ -1710,43 +1835,23 @@ func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { } } - // Remove any now empty array, maps or sets because a parent structure - // won't include these entries in the count value. - isCount := regexp.MustCompile(`\.[%#]$`).MatchString - var deleted []string - - for k, v := range result.Attributes { - if isCount(k) && v == "0" { - delete(result.Attributes, k) - deleted = append(deleted, k) - } - } - - for _, k := range deleted { - // Sanity check for invalid structures. - // If we removed the primary count key, there should have been no - // other keys left with this prefix. - - // this must have a "#" or "%" which we need to remove - base := k[:len(k)-1] - for k, _ := range result.Attributes { - if strings.HasPrefix(k, base) { - panic(fmt.Sprintf("empty structure %q has entry %q", base, k)) - } - } - } - return result } func (s *InstanceState) String() string { + notCreated := "" + + if s == nil { + return notCreated + } + s.Lock() defer s.Unlock() var buf bytes.Buffer - if s == nil || s.ID == "" { - return "" + if s.ID == "" { + return notCreated } buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) @@ -1830,11 +1935,19 @@ var ErrNoState = errors.New("no state") // ReadState reads a state structure out of a reader in the format that // was written by WriteState. func ReadState(src io.Reader) (*State, error) { + // check for a nil file specifically, since that produces a platform + // specific error if we try to use it in a bufio.Reader. + if f, ok := src.(*os.File); ok && f == nil { + return nil, ErrNoState + } + buf := bufio.NewReader(src) + if _, err := buf.Peek(1); err != nil { - // the error is either io.EOF or "invalid argument", and both are from - // an empty state. - return nil, ErrNoState + if err == io.EOF { + return nil, ErrNoState + } + return nil, err } if err := testForV0State(buf); err != nil { @@ -1897,7 +2010,7 @@ func ReadState(src io.Reader) (*State, error) { result = v3State default: return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), versionIdentifier.Version) + tfversion.SemVer.String(), versionIdentifier.Version) } // If we reached this place we must have a result set @@ -1941,7 +2054,7 @@ func ReadStateV2(jsonBytes []byte) (*State, error) { // version that we don't understand if state.Version > StateVersion { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) + tfversion.SemVer.String(), state.Version) } // Make sure the version is semantic @@ -1957,12 +2070,12 @@ func ReadStateV2(jsonBytes []byte) (*State, error) { } } - // Sort it - state.sort() - // catch any unitialized fields in the state state.init() + // Sort it + state.sort() + return state, nil } @@ -1976,7 +2089,7 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { // version that we don't understand if state.Version > StateVersion { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", - SemVersion.String(), state.Version) + tfversion.SemVer.String(), state.Version) } // Make sure the version is semantic @@ -1992,12 +2105,12 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { } } - // Sort it - state.sort() - // catch any unitialized fields in the state state.init() + // Sort it + state.sort() + // Now we write the state back out to detect any changes in normaliztion. // If our state is now written out differently, bump the serial number to // prevent conflicts. @@ -2017,12 +2130,17 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { // WriteState writes a state somewhere in a binary format. func WriteState(d *State, dst io.Writer) error { - // Make sure it is sorted - d.sort() + // writing a nil state is a noop. + if d == nil { + return nil + } // make sure we have no uninitialized fields d.init() + // Make sure it is sorted + d.sort() + // Ensure the version is set d.Version = StateVersion diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go index f4a431a6..fd3f5c7d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go @@ -1,6 +1,8 @@ package terraform import ( + "log" + "github.com/hashicorp/terraform/dag" ) @@ -36,10 +38,18 @@ type graphTransformerMulti struct { } func (t *graphTransformerMulti) Transform(g *Graph) error { + var lastStepStr string for _, t := range t.Transforms { + log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t) if err := t.Transform(g); err != nil { return err } + if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s------", t, thisStepStr) + lastStepStr = thisStepStr + } else { + log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t) + } } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go index 10506ea0..897a7e79 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go @@ -1,10 +1,8 @@ package terraform import ( - "log" - - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) // GraphNodeAttachProvider is an interface that must be implemented by nodes @@ -14,67 +12,8 @@ type GraphNodeAttachProvider interface { GraphNodeSubPath // ProviderName with no module prefix. Example: "aws". - ProviderName() string + ProviderAddr() addrs.AbsProviderConfig // Sets the configuration - AttachProvider(*config.ProviderConfig) -} - -// AttachProviderConfigTransformer goes through the graph and attaches -// provider configuration structures to nodes that implement the interfaces -// above. -// -// The attached configuration structures are directly from the configuration. -// If they're going to be modified, a copy should be made. -type AttachProviderConfigTransformer struct { - Module *module.Tree // Module is the root module for the config -} - -func (t *AttachProviderConfigTransformer) Transform(g *Graph) error { - if err := t.attachProviders(g); err != nil { - return err - } - - return nil -} - -func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error { - // Go through and find GraphNodeAttachProvider - for _, v := range g.Vertices() { - // Only care about GraphNodeAttachProvider implementations - apn, ok := v.(GraphNodeAttachProvider) - if !ok { - continue - } - - // Determine what we're looking for - path := normalizeModulePath(apn.Path()) - path = path[1:] - name := apn.ProviderName() - log.Printf("[TRACE] Attach provider request: %#v %s", path, name) - - // Get the configuration. - tree := t.Module.Child(path) - if tree == nil { - continue - } - - // Go through the provider configs to find the matching config - for _, p := range tree.Config().ProviderConfigs { - // Build the name, which is "name.alias" if an alias exists - current := p.Name - if p.Alias != "" { - current += "." + p.Alias - } - - // If the configs match then attach! - if current == name { - log.Printf("[TRACE] Attaching provider config: %#v", p) - apn.AttachProvider(p) - break - } - } - } - - return nil + AttachProvider(*configs.Provider) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go index f2ee37e5..03f8564d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go @@ -1,35 +1,32 @@ package terraform import ( - "fmt" "log" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" ) // GraphNodeAttachResourceConfig is an interface that must be implemented by nodes // that want resource configurations attached. type GraphNodeAttachResourceConfig interface { - // ResourceAddr is the address to the resource - ResourceAddr() *ResourceAddress + GraphNodeResource // Sets the configuration - AttachResourceConfig(*config.Resource) + AttachResourceConfig(*configs.Resource) } // AttachResourceConfigTransformer goes through the graph and attaches -// resource configuration structures to nodes that implement the interfaces -// above. +// resource configuration structures to nodes that implement +// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig. // // The attached configuration structures are directly from the configuration. // If they're going to be modified, a copy should be made. type AttachResourceConfigTransformer struct { - Module *module.Tree // Module is the root module for the config + Config *configs.Config // Config is the root node in the config tree } func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...") // Go through and find GraphNodeAttachResource for _, v := range g.Vertices() { @@ -41,36 +38,35 @@ func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { // Determine what we're looking for addr := arn.ResourceAddr() - log.Printf( - "[TRACE] AttachResourceConfigTransformer: Attach resource "+ - "config request: %s", addr) // Get the configuration. - path := normalizeModulePath(addr.Path) - path = path[1:] - tree := t.Module.Child(path) - if tree == nil { + config := t.Config.DescendentForInstance(addr.Module) + if config == nil { + log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v) continue } - // Go through the resource configs to find the matching config - for _, r := range tree.Config().Resources { - // Get a resource address so we can compare - a, err := parseResourceAddressConfig(r) - if err != nil { - panic(fmt.Sprintf( - "Error parsing config address, this is a bug: %#v", r)) + for _, r := range config.Module.ManagedResources { + rAddr := r.Addr() + + if rAddr != addr.Resource { + // Not the same resource + continue } - a.Path = addr.Path - // If this is not the same resource, then continue - if !a.Equals(addr) { + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange) + arn.AttachResourceConfig(r) + } + for _, r := range config.Module.DataResources { + rAddr := r.Addr() + + if rAddr != addr.Resource { + // Not the same resource continue } - log.Printf("[TRACE] Attaching resource config: %#v", r) + log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange) arn.AttachResourceConfig(r) - break } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go new file mode 100644 index 00000000..c7695dd4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go @@ -0,0 +1,99 @@ +package terraform + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/dag" +) + +// GraphNodeAttachResourceSchema is an interface implemented by node types +// that need a resource schema attached. +type GraphNodeAttachResourceSchema interface { + GraphNodeResource + GraphNodeProviderConsumer + + AttachResourceSchema(schema *configschema.Block, version uint64) +} + +// GraphNodeAttachProviderConfigSchema is an interface implemented by node types +// that need a provider configuration schema attached. +type GraphNodeAttachProviderConfigSchema interface { + GraphNodeProvider + + AttachProviderConfigSchema(*configschema.Block) +} + +// GraphNodeAttachProvisionerSchema is an interface implemented by node types +// that need one or more provisioner schemas attached. +type GraphNodeAttachProvisionerSchema interface { + ProvisionedBy() []string + + // SetProvisionerSchema is called during transform for each provisioner + // type returned from ProvisionedBy, providing the configuration schema + // for each provisioner in turn. The implementer should save these for + // later use in evaluating provisioner configuration blocks. + AttachProvisionerSchema(name string, schema *configschema.Block) +} + +// AttachSchemaTransformer finds nodes that implement +// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or +// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each +// and then passes them to a method implemented by the node. +type AttachSchemaTransformer struct { + Schemas *Schemas +} + +func (t *AttachSchemaTransformer) Transform(g *Graph) error { + if t.Schemas == nil { + // Should never happen with a reasonable caller, but we'll return a + // proper error here anyway so that we'll fail gracefully. + return fmt.Errorf("AttachSchemaTransformer used with nil Schemas") + } + + for _, v := range g.Vertices() { + + if tv, ok := v.(GraphNodeAttachResourceSchema); ok { + addr := tv.ResourceAddr() + mode := addr.Resource.Mode + typeName := addr.Resource.Type + providerAddr, _ := tv.ProvidedBy() + providerType := providerAddr.ProviderConfig.Type + + schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v)) + tv.AttachResourceSchema(schema, version) + } + + if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok { + providerAddr := tv.ProviderAddr() + schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v)) + tv.AttachProviderConfigSchema(schema) + } + + if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok { + names := tv.ProvisionedBy() + for _, name := range names { + schema := t.Schemas.ProvisionerConfig(name) + if schema == nil { + log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v)) + continue + } + log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v)) + tv.AttachProvisionerSchema(name, schema) + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go index 564ff08f..3af7b989 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go @@ -4,64 +4,64 @@ import ( "log" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) // GraphNodeAttachResourceState is an interface that can be implemented // to request that a ResourceState is attached to the node. +// +// Due to a historical naming inconsistency, the type ResourceState actually +// represents the state for a particular _instance_, while InstanceState +// represents the values for that instance during a particular phase +// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState +// is supported only for nodes that represent resource instances, even though +// the name might suggest it is for containing resources. type GraphNodeAttachResourceState interface { - // The address to the resource for the state - ResourceAddr() *ResourceAddress + GraphNodeResourceInstance // Sets the state - AttachResourceState(*ResourceState) + AttachResourceState(*states.Resource) } // AttachStateTransformer goes through the graph and attaches // state to nodes that implement the interfaces above. type AttachStateTransformer struct { - State *State // State is the root state + State *states.State // State is the root state } func (t *AttachStateTransformer) Transform(g *Graph) error { // If no state, then nothing to do if t.State == nil { - log.Printf("[DEBUG] Not attaching any state: state is nil") + log.Printf("[DEBUG] Not attaching any node states: overall state is nil") return nil } - filter := &StateFilter{State: t.State} for _, v := range g.Vertices() { - // Only care about nodes requesting we're adding state + // Nodes implement this interface to request state attachment. an, ok := v.(GraphNodeAttachResourceState) if !ok { continue } - addr := an.ResourceAddr() + addr := an.ResourceInstanceAddr() - // Get the module state - results, err := filter.Filter(addr.String()) - if err != nil { - return err + rs := t.State.Resource(addr.ContainingResource()) + if rs == nil { + log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr) + continue } - // Attach the first resource state we get - found := false - for _, result := range results { - if rs, ok := result.Value.(*ResourceState); ok { - log.Printf( - "[DEBUG] Attaching resource state to %q: %#v", - dag.VertexName(v), rs) - an.AttachResourceState(rs) - found = true - break - } + is := rs.Instance(addr.Resource.Key) + if is == nil { + // We don't actually need this here, since we'll attach the whole + // resource state, but we still check because it'd be weird + // for the specific instance we're attaching to not to exist. + log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr) + continue } - if !found { - log.Printf( - "[DEBUG] Resource state not found for %q: %s", - dag.VertexName(v), addr) - } + // make sure to attach a copy of the state, so instances can modify the + // same ResourceState. + an.AttachResourceState(rs.DeepCopy()) } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go index 61bce853..9d3b6f4b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go @@ -1,13 +1,11 @@ package terraform import ( - "errors" - "fmt" "log" "sync" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -26,14 +24,14 @@ type ConfigTransformer struct { Concrete ConcreteResourceNodeFunc // Module is the module to add resources from. - Module *module.Tree + Config *configs.Config // Unique will only add resources that aren't already present in the graph. Unique bool // Mode will only add resources that match the given mode ModeFilter bool - Mode config.ResourceMode + Mode addrs.ResourceMode l sync.Mutex uniqueMap map[string]struct{} @@ -44,16 +42,11 @@ func (t *ConfigTransformer) Transform(g *Graph) error { t.l.Lock() defer t.l.Unlock() - // If no module is given, we don't do anything - if t.Module == nil { + // If no configuration is available, we don't do anything + if t.Config == nil { return nil } - // If the module isn't loaded, that is simply an error - if !t.Module.Loaded() { - return errors.New("module must be loaded for ConfigTransformer") - } - // Reset the uniqueness map. If we're tracking uniques, then populate // it with addresses. t.uniqueMap = make(map[string]struct{}) @@ -67,22 +60,22 @@ func (t *ConfigTransformer) Transform(g *Graph) error { } // Start the transformation process - return t.transform(g, t.Module) + return t.transform(g, t.Config) } -func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { +func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error { // If no config, do nothing - if m == nil { + if config == nil { return nil } // Add our resources - if err := t.transformSingle(g, m); err != nil { + if err := t.transformSingle(g, config); err != nil { return err } // Transform all the children. - for _, c := range m.Children() { + for _, c := range config.Children { if err := t.transform(g, c); err != nil { return err } @@ -91,43 +84,48 @@ func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { return nil } -func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { - log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path()) - - // Get the configuration for this module - conf := m.Config() - - // Build the path we're at - path := m.Path() +func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error { + path := config.Path + module := config.Module + log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path) + + // For now we assume that each module call produces only one module + // instance with no key, since we don't yet support "count" and "for_each" + // on modules. + // FIXME: As part of supporting "count" and "for_each" on modules, rework + // this so that we'll "expand" the module call first and then create graph + // nodes for each module instance separately. + instPath := path.UnkeyedInstanceShim() + + allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources)) + for _, r := range module.ManagedResources { + allResources = append(allResources, r) + } + for _, r := range module.DataResources { + allResources = append(allResources, r) + } - // Write all the resources out - for _, r := range conf.Resources { - // Build the resource address - addr, err := parseResourceAddressConfig(r) - if err != nil { - panic(fmt.Sprintf( - "Error parsing config address, this is a bug: %#v", r)) - } - addr.Path = path + for _, r := range allResources { + relAddr := r.Addr() - // If this is already in our uniqueness map, don't add it again - if _, ok := t.uniqueMap[addr.String()]; ok { + if t.ModeFilter && relAddr.Mode != t.Mode { + // Skip non-matching modes continue } - // Remove non-matching modes - if t.ModeFilter && addr.Mode != t.Mode { + addr := relAddr.Absolute(instPath) + if _, ok := t.uniqueMap[addr.String()]; ok { + // We've already seen a resource with this address. This should + // never happen, because we enforce uniqueness in the config loader. continue } - // Build the abstract node and the concrete one abstract := &NodeAbstractResource{Addr: addr} var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) } - // Add it to the graph g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go index 92f9888d..866c9175 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go @@ -1,9 +1,7 @@ package terraform import ( - "errors" - - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -20,54 +18,47 @@ import ( type FlatConfigTransformer struct { Concrete ConcreteResourceNodeFunc // What to turn resources into - Module *module.Tree + Config *configs.Config } func (t *FlatConfigTransformer) Transform(g *Graph) error { - // If no module, we do nothing - if t.Module == nil { + // We have nothing to do if there is no configuration. + if t.Config == nil { return nil } - // If the module is not loaded, that is an error - if !t.Module.Loaded() { - return errors.New("module must be loaded") - } - - return t.transform(g, t.Module) + return t.transform(g, t.Config) } -func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error { - // If no module, no problem - if m == nil { +func (t *FlatConfigTransformer) transform(g *Graph, config *configs.Config) error { + // If we have no configuration then there's nothing to do. + if config == nil { return nil } // Transform all the children. - for _, c := range m.Children() { + for _, c := range config.Children { if err := t.transform(g, c); err != nil { return err } } - // Get the configuration for this module - config := m.Config() - - // Write all the resources out - for _, r := range config.Resources { - // Grab the address for this resource - addr, err := parseResourceAddressConfig(r) - if err != nil { - return err - } - addr.Path = m.Path() + module := config.Module + // For now we assume that each module call produces only one module + // instance with no key, since we don't yet support "count" and "for_each" + // on modules. + // FIXME: As part of supporting "count" and "for_each" on modules, rework + // this so that we'll "expand" the module call first and then create graph + // nodes for each module instance separately. + instPath := config.Path.UnkeyedInstanceShim() - // Build the abstract resource. We have the config already so - // we'll just pre-populate that. + for _, r := range module.ManagedResources { + addr := r.Addr().Absolute(instPath) abstract := &NodeAbstractResource{ Addr: addr, Config: r, } + // Grab the address for this resource var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go index 83415f35..01601bdd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go @@ -1,16 +1,21 @@ package terraform import ( + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) // CountBoundaryTransformer adds a node that depends on everything else // so that it runs last in order to clean up the state for nodes that // are on the "count boundary": "foo.0" when only one exists becomes "foo" -type CountBoundaryTransformer struct{} +type CountBoundaryTransformer struct { + Config *configs.Config +} func (t *CountBoundaryTransformer) Transform(g *Graph) error { - node := &NodeCountBoundary{} + node := &NodeCountBoundary{ + Config: t.Config, + } g.Add(node) // Depends on everything diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go index edfb460b..2f4d5ede 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go @@ -4,15 +4,15 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) // GraphNodeDestroyerCBD must be implemented by nodes that might be -// create-before-destroy destroyers. +// create-before-destroy destroyers, or might plan a create-before-destroy +// action. type GraphNodeDestroyerCBD interface { - GraphNodeDestroyer - // CreateBeforeDestroy returns true if this node represents a node // that is doing a CBD. CreateBeforeDestroy() bool @@ -23,6 +23,89 @@ type GraphNodeDestroyerCBD interface { ModifyCreateBeforeDestroy(bool) error } +// GraphNodeAttachDestroyer is implemented by applyable nodes that have a +// companion destroy node. This allows the creation node to look up the status +// of the destroy node and determine if it needs to depose the existing state, +// or replace it. +// If a node is not marked as create-before-destroy in the configuration, but a +// dependency forces that status, only the destroy node will be aware of that +// status. +type GraphNodeAttachDestroyer interface { + // AttachDestroyNode takes a destroy node and saves a reference to that + // node in the receiver, so it can later check the status of + // CreateBeforeDestroy(). + AttachDestroyNode(n GraphNodeDestroyerCBD) +} + +// ForcedCBDTransformer detects when a particular CBD-able graph node has +// dependencies with another that has create_before_destroy set that require +// it to be forced on, and forces it on. +// +// This must be used in the plan graph builder to ensure that +// create_before_destroy settings are properly propagated before constructing +// the planned changes. This requires that the plannable resource nodes +// implement GraphNodeDestroyerCBD. +type ForcedCBDTransformer struct { +} + +func (t *ForcedCBDTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + dn, ok := v.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if !dn.CreateBeforeDestroy() { + // If there are no CBD decendent (dependent nodes), then we + // do nothing here. + if !t.hasCBDDescendent(g, v) { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v) + continue + } + + // If this isn't naturally a CBD node, this means that an descendent is + // and we need to auto-upgrade this node to CBD. We do this because + // a CBD node depending on non-CBD will result in cycles. To avoid this, + // we always attempt to upgrade it. + log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v) + if err := dn.ModifyCreateBeforeDestroy(true); err != nil { + return fmt.Errorf( + "%s: must have create before destroy enabled because "+ + "a dependent resource has CBD enabled. However, when "+ + "attempting to automatically do this, an error occurred: %s", + dag.VertexName(v), err) + } + } else { + log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v) + } + } + return nil +} + +// hasCBDDescendent returns true if any descendent (node that depends on this) +// has CBD set. +func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool { + s, _ := g.Descendents(v) + if s == nil { + return true + } + + for _, ov := range s.List() { + dn, ok := ov.(GraphNodeDestroyerCBD) + if !ok { + continue + } + + if dn.CreateBeforeDestroy() { + // some descendent is CreateBeforeDestroy, so we need to follow suit + log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov)) + return true + } + } + + return false +} + // CBDEdgeTransformer modifies the edges of CBD nodes that went through // the DestroyEdgeTransformer to have the right dependencies. There are // two real tasks here: @@ -35,16 +118,25 @@ type GraphNodeDestroyerCBD interface { // update to A. Example: adding a web server updates the load balancer // before deleting the old web server. // +// This transformer requires that a previous transformer has already forced +// create_before_destroy on for nodes that are depended on by explicit CBD +// nodes. This is the logic in ForcedCBDTransformer, though in practice we +// will get here by recording the CBD-ness of each change in the plan during +// the plan walk and then forcing the nodes into the appropriate setting during +// DiffTransformer when building the apply graph. type CBDEdgeTransformer struct { // Module and State are only needed to look up dependencies in // any way possible. Either can be nil if not availabile. - Module *module.Tree - State *State + Config *configs.Config + State *states.State + + // If configuration is present then Schemas is required in order to + // obtain schema information from providers and provisioners so we can + // properly resolve implicit dependencies. + Schemas *Schemas } func (t *CBDEdgeTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...") - // Go through and reverse any destroy edges destroyMap := make(map[string][]dag.Vertex) for _, v := range g.Vertices() { @@ -52,25 +144,13 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { if !ok { continue } + dern, ok := v.(GraphNodeDestroyer) + if !ok { + continue + } if !dn.CreateBeforeDestroy() { - // If there are no CBD ancestors (dependent nodes), then we - // do nothing here. - if !t.hasCBDAncestor(g, v) { - continue - } - - // If this isn't naturally a CBD node, this means that an ancestor is - // and we need to auto-upgrade this node to CBD. We do this because - // a CBD node depending on non-CBD will result in cycles. To avoid this, - // we always attempt to upgrade it. - if err := dn.ModifyCreateBeforeDestroy(true); err != nil { - return fmt.Errorf( - "%s: must have create before destroy enabled because "+ - "a dependent resource has CBD enabled. However, when "+ - "attempting to automatically do this, an error occurred: %s", - dag.VertexName(v), err) - } + continue } // Find the destroy edge. There should only be one. @@ -86,7 +166,9 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { // Found it! Invert. g.RemoveEdge(de) - g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()}) + applyNode := de.Source() + destroyNode := de.Target() + g.Connect(&DestroyEdge{S: destroyNode, T: applyNode}) } // If the address has an index, we strip that. Our depMap creation @@ -94,15 +176,11 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { // dependencies. One day when we limit dependencies more exactly // this will have to change. We have a test case covering this // (depNonCBDCountBoth) so it'll be caught. - addr := dn.DestroyAddr() - if addr.Index >= 0 { - addr = addr.Copy() // Copy so that we don't modify any pointers - addr.Index = -1 - } + addr := dern.DestroyAddr() + key := addr.ContainingResource().String() // Add this to the list of nodes that we need to fix up // the edges for (step 2 above in the docs). - key := addr.String() destroyMap[key] = append(destroyMap[key], v) } @@ -151,13 +229,9 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { // dependencies. One day when we limit dependencies more exactly // this will have to change. We have a test case covering this // (depNonCBDCount) so it'll be caught. - if addr.Index >= 0 { - addr = addr.Copy() // Copy so that we don't modify any pointers - addr.Index = -1 - } + key := addr.ContainingResource().String() // If there is nothing this resource should depend on, ignore it - key := addr.String() dns, ok := depMap[key] if !ok { continue @@ -174,21 +248,21 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error { return nil } -func (t *CBDEdgeTransformer) depMap( - destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { +func (t *CBDEdgeTransformer) depMap(destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) { // Build the graph of our config, this ensures that all resources // are present in the graph. - g, err := (&BasicGraphBuilder{ + g, diags := (&BasicGraphBuilder{ Steps: []GraphTransformer{ - &FlatConfigTransformer{Module: t.Module}, - &AttachResourceConfigTransformer{Module: t.Module}, + &FlatConfigTransformer{Config: t.Config}, + &AttachResourceConfigTransformer{Config: t.Config}, &AttachStateTransformer{State: t.State}, + &AttachSchemaTransformer{Schemas: t.Schemas}, &ReferenceTransformer{}, }, Name: "CBDEdgeTransformer", }).Build(nil) - if err != nil { - return nil, err + if diags.HasErrors() { + return nil, diags.Err() } // Using this graph, build the list of destroy nodes that each resource @@ -232,26 +306,3 @@ func (t *CBDEdgeTransformer) depMap( return depMap, nil } - -// hasCBDAncestor returns true if any ancestor (node that depends on this) -// has CBD set. -func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool { - s, _ := g.Ancestors(v) - if s == nil { - return true - } - - for _, v := range s.List() { - dn, ok := v.(GraphNodeDestroyerCBD) - if !ok { - continue - } - - if dn.CreateBeforeDestroy() { - // some ancestor is CreateBeforeDestroy, so we need to follow suit - return true - } - } - - return false -} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go index 22be1ab6..7fb415bd 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go @@ -3,7 +3,10 @@ package terraform import ( "log" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/states" + + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" ) @@ -11,16 +14,16 @@ import ( type GraphNodeDestroyer interface { dag.Vertex - // ResourceAddr is the address of the resource that is being + // DestroyAddr is the address of the resource that is being // destroyed by this node. If this returns nil, then this node // is not destroying anything. - DestroyAddr() *ResourceAddress + DestroyAddr() *addrs.AbsResourceInstance } // GraphNodeCreator must be implemented by nodes that create OR update resources. type GraphNodeCreator interface { - // ResourceAddr is the address of the resource being created or updated - CreateAddr() *ResourceAddress + // CreateAddr is the address of the resource being created or updated + CreateAddr() *addrs.AbsResourceInstance } // DestroyEdgeTransformer is a GraphTransformer that creates the proper @@ -40,33 +43,37 @@ type GraphNodeCreator interface { type DestroyEdgeTransformer struct { // These are needed to properly build the graph of dependencies // to determine what a destroy node depends on. Any of these can be nil. - Module *module.Tree - State *State + Config *configs.Config + State *states.State + + // If configuration is present then Schemas is required in order to + // obtain schema information from providers and provisioners in order + // to properly resolve implicit dependencies. + Schemas *Schemas } func (t *DestroyEdgeTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...") - // Build a map of what is being destroyed (by address string) to - // the list of destroyers. In general there will only be one destroyer - // but to make it more robust we support multiple. + // the list of destroyers. Usually there will be at most one destroyer + // per node, but we allow multiple if present for completeness. destroyers := make(map[string][]GraphNodeDestroyer) + destroyerAddrs := make(map[string]addrs.AbsResourceInstance) for _, v := range g.Vertices() { dn, ok := v.(GraphNodeDestroyer) if !ok { continue } - addr := dn.DestroyAddr() - if addr == nil { + addrP := dn.DestroyAddr() + if addrP == nil { continue } + addr := *addrP key := addr.String() - log.Printf( - "[TRACE] DestroyEdgeTransformer: %s destroying %q", - dag.VertexName(dn), key) + log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(dn), v, key) destroyers[key] = append(destroyers[key], dn) + destroyerAddrs[key] = addr } // If we aren't destroying anything, there will be no edges to make @@ -100,10 +107,20 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { a := v log.Printf( - "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s", + "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q", dag.VertexName(a), dag.VertexName(a_d)) g.Connect(&DestroyEdge{S: a, T: a_d}) + + // Attach the destroy node to the creator + // There really shouldn't be more than one destroyer, but even if + // there are, any of them will represent the correct + // CreateBeforeDestroy status. + if n, ok := cn.(GraphNodeAttachDestroyer); ok { + if d, ok := d.(GraphNodeDestroyerCBD); ok { + n.AttachDestroyNode(d) + } + } } } @@ -119,23 +136,25 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { return &NodeApplyableProvider{NodeAbstractProvider: a} } steps := []GraphTransformer{ + // Add the local values + &LocalTransformer{Config: t.Config}, + // Add outputs and metadata - &OutputTransformer{Module: t.Module}, - &AttachResourceConfigTransformer{Module: t.Module}, + &OutputTransformer{Config: t.Config}, + &AttachResourceConfigTransformer{Config: t.Config}, &AttachStateTransformer{State: t.State}, - // Add providers since they can affect destroy order as well - &MissingProviderTransformer{AllowAny: true, Concrete: providerFn}, - &ProviderTransformer{}, - &DisableProviderTransformer{}, - &ParentProviderTransformer{}, - &AttachProviderConfigTransformer{Module: t.Module}, - // Add all the variables. We can depend on resources through // variables due to module parameters, and we need to properly // determine that. - &RootVariableTransformer{Module: t.Module}, - &ModuleVariableTransformer{Module: t.Module}, + &RootVariableTransformer{Config: t.Config}, + &ModuleVariableTransformer{Config: t.Config}, + + TransformProviders(nil, providerFn, t.Config), + + // Must attach schemas before ReferenceTransformer so that we can + // analyze the configuration to find references. + &AttachSchemaTransformer{Schemas: t.Schemas}, &ReferenceTransformer{}, } @@ -148,37 +167,36 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { // var tempG Graph var tempDestroyed []dag.Vertex - for d, _ := range destroyers { - // d is what is being destroyed. We parse the resource address - // which it came from it is a panic if this fails. - addr, err := ParseResourceAddress(d) - if err != nil { - panic(err) - } + for d := range destroyers { + // d is the string key for the resource being destroyed. We actually + // want the address value, which we stashed earlier. + addr := destroyerAddrs[d] // This part is a little bit weird but is the best way to // find the dependencies we need to: build a graph and use the // attach config and state transformers then ask for references. - abstract := &NodeAbstractResource{Addr: addr} + abstract := NewNodeAbstractResourceInstance(addr) tempG.Add(abstract) tempDestroyed = append(tempDestroyed, abstract) // We also add the destroy version here since the destroy can // depend on things that the creation doesn't (destroy provisioners). - destroy := &NodeDestroyResource{NodeAbstractResource: abstract} + destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract} tempG.Add(destroy) tempDestroyed = append(tempDestroyed, destroy) } // Run the graph transforms so we have the information we need to // build references. + log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes()) for _, s := range steps { + log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s) if err := s.Transform(&tempG); err != nil { + log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err) return err } } - - log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String()) + log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String()) // Go through all the nodes in the graph and determine what they // depend on. @@ -209,16 +227,13 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { // Get the destroy node for this. In the example of our struct, // we are currently at B and we're looking for B_d. - rn, ok := v.(GraphNodeResource) + rn, ok := v.(GraphNodeResourceInstance) if !ok { + log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v)) continue } - addr := rn.ResourceAddr() - if addr == nil { - continue - } - + addr := rn.ResourceInstanceAddr() dns := destroyers[addr.String()] // We have dependencies, check if any are being destroyed @@ -233,16 +248,12 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { // to see if A_d exists. var depDestroyers []dag.Vertex for _, v := range refs { - rn, ok := v.(GraphNodeResource) + rn, ok := v.(GraphNodeResourceInstance) if !ok { continue } - addr := rn.ResourceAddr() - if addr == nil { - continue - } - + addr := rn.ResourceInstanceAddr() key := addr.String() if ds, ok := destroyers[key]; ok { for _, d := range ds { @@ -259,6 +270,7 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error { for _, a_d := range dns { for _, b_d := range depDestroyers { if b_d != a_d { + log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d)) g.Connect(dag.BasicEdge(b_d, a_d)) } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go index ad46d3c6..6fb915f8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go @@ -4,83 +4,189 @@ import ( "fmt" "log" - "github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/plans" + "github.com/hashicorp/terraform/states" + "github.com/hashicorp/terraform/tfdiags" ) -// DiffTransformer is a GraphTransformer that adds the elements of -// the diff to the graph. -// -// This transform is used for example by the ApplyGraphBuilder to ensure -// that only resources that are being modified are represented in the graph. -// -// Module and State is still required for the DiffTransformer for annotations -// since the Diff doesn't contain all the information required to build the -// complete graph (such as create-before-destroy information). The graph -// is built based on the diff first, though, ensuring that only resources -// that are being modified are present in the graph. +// DiffTransformer is a GraphTransformer that adds graph nodes representing +// each of the resource changes described in the given Changes object. type DiffTransformer struct { - Concrete ConcreteResourceNodeFunc - - Diff *Diff - Module *module.Tree - State *State + Concrete ConcreteResourceInstanceNodeFunc + State *states.State + Changes *plans.Changes } func (t *DiffTransformer) Transform(g *Graph) error { - // If the diff is nil or empty (nil is empty) then do nothing - if t.Diff.Empty() { + if t.Changes == nil || len(t.Changes.Resources) == 0 { + // Nothing to do! return nil } // Go through all the modules in the diff. - log.Printf("[TRACE] DiffTransformer: starting") - var nodes []dag.Vertex - for _, m := range t.Diff.Modules { - log.Printf("[TRACE] DiffTransformer: Module: %s", m) - // TODO: If this is a destroy diff then add a module destroy node - - // Go through all the resources in this module. - for name, inst := range m.Resources { - log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst) - - // We have changes! This is a create or update operation. - // First grab the address so we have a unique way to - // reference this resource. - addr, err := parseResourceAddressInternal(name) - if err != nil { - panic(fmt.Sprintf( - "Error parsing internal name, this is a bug: %q", name)) - } + log.Printf("[TRACE] DiffTransformer starting") + + var diags tfdiags.Diagnostics + state := t.State + changes := t.Changes + + // DiffTransformer creates resource _instance_ nodes. If there are any + // whole-resource nodes already in the graph, we must ensure that they + // get evaluated before any of the corresponding instances by creating + // dependency edges, so we'll do some prep work here to ensure we'll only + // create connections to nodes that existed before we started here. + resourceNodes := map[string][]GraphNodeResource{} + for _, node := range g.Vertices() { + rn, ok := node.(GraphNodeResource) + if !ok { + continue + } + // We ignore any instances that _also_ implement + // GraphNodeResourceInstance, since in the unlikely event that they + // do exist we'd probably end up creating cycles by connecting them. + if _, ok := node.(GraphNodeResourceInstance); ok { + continue + } + + addr := rn.ResourceAddr().String() + resourceNodes[addr] = append(resourceNodes[addr], rn) + } + + for _, rc := range changes.Resources { + addr := rc.Addr + dk := rc.DeposedKey + + log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk) + + // Depending on the action we'll need some different combinations of + // nodes, because destroying uses a special node type separate from + // other actions. + var update, delete, createBeforeDestroy bool + switch rc.Action { + case plans.NoOp: + continue + case plans.Delete: + delete = true + case plans.DeleteThenCreate, plans.CreateThenDelete: + update = true + delete = true + createBeforeDestroy = (rc.Action == plans.CreateThenDelete) + default: + update = true + } + + if dk != states.NotDeposed && update { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change for deposed object", + fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk), + )) + continue + } - // Very important: add the module path for this resource to - // the address. Remove "root" from it. - addr.Path = m.Path[1:] + // If we're going to do a create_before_destroy Replace operation then + // we need to allocate a DeposedKey to use to retain the + // not-yet-destroyed prior object, so that the delete node can destroy + // _that_ rather than the newly-created node, which will be current + // by the time the delete node is visited. + if update && delete && createBeforeDestroy { + // In this case, variable dk will be the _pre-assigned_ DeposedKey + // that must be used if the update graph node deposes the current + // instance, which will then align with the same key we pass + // into the destroy node to ensure we destroy exactly the deposed + // object we expect. + if state != nil { + ris := state.ResourceInstance(addr) + if ris == nil { + // Should never happen, since we don't plan to replace an + // instance that doesn't exist yet. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid planned change", + fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr), + )) + continue + } + + // Allocating a deposed key separately from using it can be racy + // in general, but we assume here that nothing except the apply + // node we instantiate below will actually make new deposed objects + // in practice, and so the set of already-used keys will not change + // between now and then. + dk = ris.FindUnusedDeposedKey() + } else { + // If we have no state at all yet then we can use _any_ + // DeposedKey. + dk = states.NewDeposedKey() + } + } - // If we're destroying, add the destroy node - if inst.Destroy || inst.GetDestroyDeposed() { - abstract := &NodeAbstractResource{Addr: addr} - g.Add(&NodeDestroyResource{NodeAbstractResource: abstract}) + if update { + // All actions except destroying the node type chosen by t.Concrete + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) } - // If we have changes, then add the applyable version - if len(inst.Attributes) > 0 { - // Add the resource to the graph - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) + if createBeforeDestroy { + // We'll attach our pre-allocated DeposedKey to the node if + // it supports that. NodeApplyableResourceInstance is the + // specific concrete node type we are looking for here really, + // since that's the only node type that might depose objects. + if dn, ok := node.(GraphNodeDeposer); ok { + dn.SetPreallocatedDeposedKey(dk) } + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk) + } else { + log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node)) + } - nodes = append(nodes, node) + g.Add(node) + rsrcAddr := addr.ContainingResource().String() + for _, rsrcNode := range resourceNodes[rsrcAddr] { + g.Connect(dag.BasicEdge(node, rsrcNode)) + } + } + + if delete { + // Destroying always uses a destroy-specific node type, though + // which one depends on whether we're destroying a current object + // or a deposed object. + var node GraphNodeResourceInstance + abstract := NewNodeAbstractResourceInstance(addr) + if dk == states.NotDeposed { + node = &NodeDestroyResourceInstance{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy) + } else { + node = &NodeDestroyDeposedResourceInstanceObject{ + NodeAbstractResourceInstance: abstract, + DeposedKey: dk, + } + } + if dk == states.NotDeposed { + log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node)) + } else { + log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node)) + } + g.Add(node) + rsrcAddr := addr.ContainingResource().String() + for _, rsrcNode := range resourceNodes[rsrcAddr] { + // We connect this edge "forwards" (even though destroy dependencies + // are often inverted) because evaluating the resource node + // after the destroy node could cause an unnecessary husk of + // a resource state to be re-added. + g.Connect(dag.BasicEdge(node, rsrcNode)) } } - } - // Add all the nodes to the graph - for _, n := range nodes { - g.Add(n) } - return nil + log.Printf("[TRACE] DiffTransformer complete") + + return diags.Err() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go index 3673771c..c1945f02 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go @@ -2,7 +2,10 @@ package terraform import ( "fmt" - "strings" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/tfdiags" ) // ImportProviderValidateTransformer is a GraphTransformer that goes through @@ -10,6 +13,8 @@ import ( type ImportProviderValidateTransformer struct{} func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { + var diags tfdiags.Diagnostics + for _, v := range g.Vertices() { // We only care about providers pv, ok := v.(GraphNodeProvider) @@ -24,15 +29,16 @@ func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { } for _, ref := range rn.References() { - if !strings.HasPrefix(ref, "var.") { - return fmt.Errorf( - "Provider %q depends on non-var %q. Providers for import can currently\n"+ - "only depend on variables or must be hardcoded. You can stop import\n"+ - "from loading configurations by specifying `-config=\"\"`.", - pv.ProviderName(), ref) + if _, ok := ref.Subject.(addrs.InputVariable); !ok { + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid provider dependency for import", + Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()), + Subject: ref.SourceRange.ToHCL().Ptr(), + }) } } } - return nil + return diags.Err() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go index 081df2f8..ab0ecae0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go @@ -2,6 +2,10 @@ package terraform import ( "fmt" + + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/providers" + "github.com/hashicorp/terraform/tfdiags" ) // ImportStateTransformer is a GraphTransformer that adds nodes to the @@ -11,59 +15,68 @@ type ImportStateTransformer struct { } func (t *ImportStateTransformer) Transform(g *Graph) error { - nodes := make([]*graphNodeImportState, 0, len(t.Targets)) for _, target := range t.Targets { - addr, err := ParseResourceAddress(target.Addr) - if err != nil { - return fmt.Errorf( - "failed to parse resource address '%s': %s", - target.Addr, err) + // The ProviderAddr may not be supplied for non-aliased providers. + // This will be populated if the targets come from the cli, but tests + // may not specify implied provider addresses. + providerAddr := target.ProviderAddr + if providerAddr.ProviderConfig.Type == "" { + providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module) } - nodes = append(nodes, &graphNodeImportState{ - Addr: addr, - ID: target.ID, - Provider: target.Provider, - }) - } - - // Build the graph vertices - for _, n := range nodes { - g.Add(n) + node := &graphNodeImportState{ + Addr: target.Addr, + ID: target.ID, + ProviderAddr: providerAddr, + } + g.Add(node) } - return nil } type graphNodeImportState struct { - Addr *ResourceAddress // Addr is the resource address to import to - ID string // ID is the ID to import as - Provider string // Provider string + Addr addrs.AbsResourceInstance // Addr is the resource address to import into + ID string // ID is the ID to import as + ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type + ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution - states []*InstanceState + states []providers.ImportedResource } +var ( + _ GraphNodeSubPath = (*graphNodeImportState)(nil) + _ GraphNodeEvalable = (*graphNodeImportState)(nil) + _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil) + _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil) +) + func (n *graphNodeImportState) Name() string { - return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) + return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID) } -func (n *graphNodeImportState) ProvidedBy() []string { - return []string{resourceProvider(n.Addr.Type, n.Provider)} +// GraphNodeProviderConsumer +func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) { + // We assume that n.ProviderAddr has been properly populated here. + // It's the responsibility of the code creating a graphNodeImportState + // to populate this, possibly by calling DefaultProviderConfig() on the + // resource address to infer an implied provider from the resource type + // name. + return n.ProviderAddr, false +} + +// GraphNodeProviderConsumer +func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) { + n.ResolvedProvider = addr } // GraphNodeSubPath -func (n *graphNodeImportState) Path() []string { - return normalizeModulePath(n.Addr.Path) +func (n *graphNodeImportState) Path() addrs.ModuleInstance { + return n.Addr.Module } // GraphNodeEvalable impl. func (n *graphNodeImportState) EvalTree() EvalNode { - var provider ResourceProvider - info := &InstanceInfo{ - Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name), - ModulePath: n.Path(), - Type: n.Addr.Type, - } + var provider providers.Interface // Reset our states n.states = nil @@ -72,13 +85,13 @@ func (n *graphNodeImportState) EvalTree() EvalNode { return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: n.ProvidedBy()[0], + Addr: n.ResolvedProvider, Output: &provider, }, &EvalImportState{ + Addr: n.Addr.Resource, Provider: &provider, - Info: info, - Id: n.ID, + ID: n.ID, Output: &n.states, }, }, @@ -92,6 +105,8 @@ func (n *graphNodeImportState) EvalTree() EvalNode { // resources they don't depend on anything else and refreshes are isolated // so this is nearly a perfect use case for dynamic expand. func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { + var diags tfdiags.Diagnostics + g := &Graph{Path: ctx.Path()} // nameCounter is used to de-dup names in the state. @@ -100,11 +115,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { // Compile the list of addresses that we'll be inserting into the state. // We do this ahead of time so we can verify that we aren't importing // something that already exists. - addrs := make([]*ResourceAddress, len(n.states)) + addrs := make([]addrs.AbsResourceInstance, len(n.states)) for i, state := range n.states { - addr := *n.Addr - if t := state.Ephemeral.Type; t != "" { - addr.Type = t + addr := n.Addr + if t := state.TypeName; t != "" { + addr.Resource.Resource.Type = t } // Determine if we need to suffix the name to de-dup @@ -112,36 +127,31 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { count, ok := nameCounter[key] if ok { count++ - addr.Name += fmt.Sprintf("-%d", count) + addr.Resource.Resource.Name += fmt.Sprintf("-%d", count) } nameCounter[key] = count // Add it to our list - addrs[i] = &addr + addrs[i] = addr } // Verify that all the addresses are clear - state, lock := ctx.State() - lock.RLock() - defer lock.RUnlock() - filter := &StateFilter{State: state} + state := ctx.State() for _, addr := range addrs { - result, err := filter.Filter(addr.String()) - if err != nil { - return nil, fmt.Errorf("Error verifying address %s: %s", addr, err) - } - - // Go through the filter results and it is an error if we find - // a matching InstanceState, meaning that we would have a collision. - for _, r := range result { - if _, ok := r.Value.(*InstanceState); ok { - return nil, fmt.Errorf( - "Can't import %s, would collide with an existing resource.\n\n"+ - "Please remove or rename this resource before continuing.", - addr) - } + existing := state.ResourceInstance(addr) + if existing != nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Resource already managed by Terraform", + fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr), + )) + continue } } + if diags.HasErrors() { + // Bail out early, then. + return nil, diags.Err() + } // For each of the states, we add a node to handle the refresh/add to state. // "n.states" is populated by our own EvalTree with the result of @@ -149,10 +159,9 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { // is safe. for i, state := range n.states { g.Add(&graphNodeImportStateSub{ - Target: addrs[i], - Path_: n.Path(), - State: state, - Provider: n.Provider, + TargetAddr: addrs[i], + State: state, + ResolvedProvider: n.ResolvedProvider, }) } @@ -163,78 +172,67 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { } // Done! - return g, nil + return g, diags.Err() } // graphNodeImportStateSub is the sub-node of graphNodeImportState // and is part of the subgraph. This node is responsible for refreshing // and adding a resource to the state once it is imported. type graphNodeImportStateSub struct { - Target *ResourceAddress - State *InstanceState - Path_ []string - Provider string + TargetAddr addrs.AbsResourceInstance + State providers.ImportedResource + ResolvedProvider addrs.AbsProviderConfig } +var ( + _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil) + _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil) +) + func (n *graphNodeImportStateSub) Name() string { - return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID) + return fmt.Sprintf("import %s result", n.TargetAddr) } -func (n *graphNodeImportStateSub) Path() []string { - return n.Path_ +func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance { + return n.TargetAddr.Module } // GraphNodeEvalable impl. func (n *graphNodeImportStateSub) EvalTree() EvalNode { // If the Ephemeral type isn't set, then it is an error - if n.State.Ephemeral.Type == "" { - err := fmt.Errorf( - "import of %s didn't set type for %s", - n.Target.String(), n.State.ID) + if n.State.TypeName == "" { + err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String()) return &EvalReturnError{Error: &err} } - // DeepCopy so we're only modifying our local copy - state := n.State.DeepCopy() + state := n.State.AsInstanceObject() - // Build the resource info - info := &InstanceInfo{ - Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name), - ModulePath: n.Path_, - Type: n.State.Ephemeral.Type, - } - - // Key is the resource key - key := &ResourceStateKey{ - Name: n.Target.Name, - Type: info.Type, - Index: n.Target.Index, - } - - // The eval sequence - var provider ResourceProvider + var provider providers.Interface + var providerSchema *ProviderSchema return &EvalSequence{ Nodes: []EvalNode{ &EvalGetProvider{ - Name: resourceProvider(info.Type, n.Provider), + Addr: n.ResolvedProvider, Output: &provider, + Schema: &providerSchema, }, &EvalRefresh{ - Provider: &provider, - State: &state, - Info: info, - Output: &state, + Addr: n.TargetAddr.Resource, + ProviderAddr: n.ResolvedProvider, + Provider: &provider, + ProviderSchema: &providerSchema, + State: &state, + Output: &state, }, &EvalImportStateVerify{ - Info: info, - Id: n.State.ID, + Addr: n.TargetAddr.Resource, State: &state, }, &EvalWriteState{ - Name: key.String(), - ResourceType: info.Type, - Provider: resourceProvider(info.Type, n.Provider), - State: &state, + Addr: n.TargetAddr.Resource, + ProviderAddr: n.ResolvedProvider, + ProviderSchema: &providerSchema, + State: &state, }, }, } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go new file mode 100644 index 00000000..84eb26b2 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go @@ -0,0 +1,48 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/configs" +) + +// LocalTransformer is a GraphTransformer that adds all the local values +// from the configuration to the graph. +type LocalTransformer struct { + Config *configs.Config +} + +func (t *LocalTransformer) Transform(g *Graph) error { + return t.transformModule(g, t.Config) +} + +func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error { + if c == nil { + // Can't have any locals if there's no config + return nil + } + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + + for _, local := range c.Module.Locals { + addr := path.LocalValue(local.Name) + node := &NodeLocal{ + Addr: addr, + Config: local, + } + g.Add(node) + } + + // Also populate locals for child modules + for _, cc := range c.Children { + if err := t.transformModule(g, cc); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go index 467950bd..a994bd4f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go @@ -1,46 +1,54 @@ package terraform import ( - "log" + "fmt" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/hcl2/hcl/hclsyntax" + "github.com/hashicorp/terraform/tfdiags" + "github.com/zclconf/go-cty/cty" + + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs" ) // ModuleVariableTransformer is a GraphTransformer that adds all the variables // in the configuration to the graph. // -// This only adds variables that are referenced by other things in the graph. -// If a module variable is not referenced, it won't be added to the graph. +// Any "variable" block present in any non-root module is included here, even +// if a particular variable is not referenced from anywhere. +// +// The transform will produce errors if a call to a module does not conform +// to the expected set of arguments, but this transformer is not in a good +// position to return errors and so the validate walk should include specific +// steps for validating module blocks, separate from this transform. type ModuleVariableTransformer struct { - Module *module.Tree - - DisablePrune bool // True if pruning unreferenced should be disabled + Config *configs.Config } func (t *ModuleVariableTransformer) Transform(g *Graph) error { - return t.transform(g, nil, t.Module) + return t.transform(g, nil, t.Config) } -func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error { - // If no config, no variables - if m == nil { +func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error { + // We can have no variables if we have no configuration. + if c == nil { return nil } - // Transform all the children. This must be done BEFORE the transform - // above since child module variables can reference parent module variables. - for _, c := range m.Children() { - if err := t.transform(g, m, c); err != nil { + // Transform all the children first. + for _, cc := range c.Children { + if err := t.transform(g, c, cc); err != nil { return err } } + // If we're processing anything other than the root module then we'll + // add graph nodes for variables defined inside. (Variables for the root + // module are dealt with in RootVariableTransformer). // If we have a parent, we can determine if a module variable is being // used, so we transform this. if parent != nil { - if err := t.transformSingle(g, parent, m); err != nil { + if err := t.transformSingle(g, parent, c); err != nil { return err } } @@ -48,71 +56,69 @@ func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) return nil } -func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error { - // If we have no vars, we're done! - vars := m.Config().Variables - if len(vars) == 0 { - log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path()) - return nil +func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error { + + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() + _, call := path.Call() + + // Find the call in the parent module configuration, so we can get the + // expressions given for each input variable at the call site. + callConfig, exists := parent.Module.ModuleCalls[call.Name] + if !exists { + // This should never happen, since it indicates an improperly-constructed + // configuration tree. + panic(fmt.Errorf("no module call block found for %s", path)) } - // Look for usage of this module - var mod *config.Module - for _, modUse := range parent.Config().Modules { - if modUse.Name == m.Name() { - mod = modUse - break - } + // We need to construct a schema for the expected call arguments based on + // the configured variables in our config, which we can then use to + // decode the content of the call block. + schema := &hcl.BodySchema{} + for _, v := range c.Module.Variables { + schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{ + Name: v.Name, + Required: v.Default == cty.NilVal, + }) } - if mod == nil { - log.Printf("[INFO] Module %#v not used, not adding variables", m.Path()) - return nil + + content, contentDiags := callConfig.Config.Content(schema) + if contentDiags.HasErrors() { + // Validation code elsewhere should deal with any errors before we + // get in here, but we'll report them out here just in case, to + // avoid crashes. + var diags tfdiags.Diagnostics + diags = diags.Append(contentDiags) + return diags.Err() } - // Build the reference map so we can determine if we're referencing things. - refMap := NewReferenceMap(g.Vertices()) - - // Add all variables here - for _, v := range vars { - // Determine the value of the variable. If it isn't in the - // configuration then it was never set and that's not a problem. - var value *config.RawConfig - if raw, ok := mod.RawConfig.Raw[v.Name]; ok { - var err error - value, err = config.NewRawConfig(map[string]interface{}{ - v.Name: raw, - }) - if err != nil { - // This shouldn't happen because it is already in - // a RawConfig above meaning it worked once before. - panic(err) + for _, v := range c.Module.Variables { + var expr hcl.Expression + if attr := content.Attributes[v.Name]; attr != nil { + expr = attr.Expr + } else { + // No expression provided for this variable, so we'll make a + // synthetic one using the variable's default value. + expr = &hclsyntax.LiteralValueExpr{ + Val: v.Default, + SrcRange: v.DeclRange, // This is not exact, but close enough } } - // Build the node. - // - // NOTE: For now this is just an "applyable" variable. As we build - // new graph builders for the other operations I suspect we'll - // find a way to parameterize this, require new transforms, etc. + // For now we treat all module variables as "applyable", even though + // such nodes are valid to use on other walks too. We may specialize + // this in future if we find reasons to employ different behaviors + // in different scenarios. node := &NodeApplyableModuleVariable{ - PathValue: normalizeModulePath(m.Path()), - Config: v, - Value: value, - Module: t.Module, + Addr: path.InputVariable(v.Name), + Config: v, + Expr: expr, } - - if !t.DisablePrune { - // If the node is not referenced by anything, then we don't need - // to include it since it won't be used. - if matches := refMap.ReferencedBy(node); len(matches) == 0 { - log.Printf( - "[INFO] Not including %q in graph, nothing depends on it", - dag.VertexName(node)) - continue - } - } - - // Add it! g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go index b256a25b..eec762e5 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go @@ -3,7 +3,9 @@ package terraform import ( "log" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) // OrphanResourceCountTransformer is a GraphTransformer that adds orphans @@ -14,95 +16,106 @@ import ( // This transform assumes that if an element in the state is within the count // bounds given, that it is not an orphan. type OrphanResourceCountTransformer struct { - Concrete ConcreteResourceNodeFunc + Concrete ConcreteResourceInstanceNodeFunc - Count int // Actual count of the resource - Addr *ResourceAddress // Addr of the resource to look for orphans - State *State // Full global state + Count int // Actual count of the resource, or -1 if count is not set at all + Addr addrs.AbsResource // Addr of the resource to look for orphans + State *states.State // Full global state } func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { - log.Printf("[TRACE] OrphanResourceCount: Starting...") + rs := t.State.Resource(t.Addr) + if rs == nil { + return nil // Resource doesn't exist in state, so nothing to do! + } - // Grab the module in the state just for this resource address - ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path)) - if ms == nil { - // If no state, there can't be orphans - return nil + haveKeys := make(map[addrs.InstanceKey]struct{}) + for key := range rs.Instances { + haveKeys[key] = struct{}{} } - orphanIndex := -1 - if t.Count == 1 { - orphanIndex = 0 + if t.Count < 0 { + return t.transformNoCount(haveKeys, g) + } + if t.Count == 0 { + return t.transformZeroCount(haveKeys, g) } + return t.transformCount(haveKeys, g) +} + +func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // Due to the logic in Transform, we only get in here if our count is + // at least one. - // Go through the orphans and add them all to the state - for key, _ := range ms.Resources { - // Build the address - addr, err := parseResourceAddressInternal(key) - if err != nil { - return err + _, have0Key := haveKeys[addrs.IntKey(0)] + + for key := range haveKeys { + if key == addrs.NoKey && !have0Key { + // If we have no 0-key then we will accept a no-key instance + // as an alias for it. + continue } - addr.Path = ms.Path[1:] - // Copy the address for comparison. If we aren't looking at - // the same resource, then just ignore it. - addrCopy := addr.Copy() - addrCopy.Index = -1 - if !addrCopy.Equals(t.Addr) { + i, isInt := key.(addrs.IntKey) + if isInt && int(i) < t.Count { continue } - log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr) + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node) + g.Add(node) + } + + return nil +} - idx := addr.Index +func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // This case is easy: we need to orphan any keys we have at all. - // If we have zero and the index here is 0 or 1, then we - // change the index to a high number so that we treat it as - // an orphan. - if t.Count <= 0 && idx <= 0 { - idx = t.Count + 1 + for key := range haveKeys { + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) } + log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node) + g.Add(node) + } - // If we have a count greater than 0 and we're at the zero index, - // we do a special case check to see if our state also has a - // -1 index value. If so, this is an orphan because our rules are - // that if both a -1 and 0 are in the state, the 0 is destroyed. - if t.Count > 0 && idx == orphanIndex { - // This is a piece of cleverness (beware), but its simple: - // if orphanIndex is 0, then check -1, else check 0. - checkIndex := (orphanIndex + 1) * -1 - - key := &ResourceStateKey{ - Name: addr.Name, - Type: addr.Type, - Mode: addr.Mode, - Index: checkIndex, - } - - if _, ok := ms.Resources[key.String()]; ok { - // We have a -1 index, too. Make an arbitrarily high - // index so that we always mark this as an orphan. - log.Printf( - "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d", - addr, orphanIndex) - idx = t.Count + 1 - } - } + return nil +} - // If the index is within the count bounds, it is not an orphan - if idx < t.Count { +func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error { + // Negative count indicates that count is not set at all, in which + // case we expect to have a single instance with no key set at all. + // However, we'll also accept an instance with key 0 set as an alias + // for it, in case the user has just deleted the "count" argument and + // so wants to keep the first instance in the set. + + _, haveNoKey := haveKeys[addrs.NoKey] + _, have0Key := haveKeys[addrs.IntKey(0)] + keepKey := addrs.NoKey + if have0Key && !haveNoKey { + // If we don't have a no-key instance then we can use the 0-key instance + // instead. + keepKey = addrs.IntKey(0) + } + + for key := range haveKeys { + if key == keepKey { continue } - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} + abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key)) var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) } - - // Add it to the graph + log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node) g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go index 49568d5b..c6754093 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go @@ -3,16 +3,17 @@ package terraform import ( "log" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" ) // OrphanOutputTransformer finds the outputs that aren't present // in the given config that are in the state and adds them to the graph // for deletion. type OrphanOutputTransformer struct { - Module *module.Tree // Root module - State *State // State is the root state + Config *configs.Config // Root of config tree + State *states.State // State is the root state } func (t *OrphanOutputTransformer) Transform(g *Graph) error { @@ -21,43 +22,38 @@ func (t *OrphanOutputTransformer) Transform(g *Graph) error { return nil } - return t.transform(g, t.Module) -} - -func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error { - // Get our configuration, and recurse into children - var c *config.Config - if m != nil { - c = m.Config() - for _, child := range m.Children() { - if err := t.transform(g, child); err != nil { - return err - } + for _, ms := range t.State.Modules { + if err := t.transform(g, ms); err != nil { + return err } } + return nil +} - // Get the state. If there is no state, then we have no orphans! - path := normalizeModulePath(m.Path()) - state := t.State.ModuleByPath(path) - if state == nil { +func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error { + if ms == nil { return nil } - // Make a map of the valid outputs - valid := make(map[string]struct{}) - for _, o := range c.Outputs { - valid[o.Name] = struct{}{} + moduleAddr := ms.Addr + + // Get the config for this path, which is nil if the entire module has been + // removed. + var outputs map[string]*configs.Output + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + outputs = c.Module.Outputs } - // Go through the outputs and find the ones that aren't in our config. - for n, _ := range state.Outputs { - // If it is in the valid map, then ignore - if _, ok := valid[n]; ok { + // An output is "orphaned" if it's present in the state but not declared + // in the configuration. + for name := range ms.OutputValues { + if _, exists := outputs[name]; exists { continue } - // Orphan! - g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) + g.Add(&NodeOutputOrphan{ + Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr), + }) } return nil diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go index e42d3c84..50df1781 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go @@ -1,34 +1,43 @@ package terraform import ( - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" + "log" + + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) -// OrphanResourceTransformer is a GraphTransformer that adds resource -// orphans to the graph. A resource orphan is a resource that is -// represented in the state but not in the configuration. -// -// This only adds orphans that have no representation at all in the +// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned +// resource instances to the graph. An "orphan" is an instance that is present +// in the state but belongs to a resource that is no longer present in the // configuration. -type OrphanResourceTransformer struct { - Concrete ConcreteResourceNodeFunc +// +// This is not the transformer that deals with "count orphans" (instances that +// are no longer covered by a resource's "count" or "for_each" setting); that's +// handled instead by OrphanResourceCountTransformer. +type OrphanResourceInstanceTransformer struct { + Concrete ConcreteResourceInstanceNodeFunc // State is the global state. We require the global state to // properly find module orphans at our path. - State *State + State *states.State - // Module is the root module. We'll look up the proper configuration - // using the graph path. - Module *module.Tree + // Config is the root node in the configuration tree. We'll look up + // the appropriate note in this tree using the path in each node. + Config *configs.Config } -func (t *OrphanResourceTransformer) Transform(g *Graph) error { +func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error { if t.State == nil { // If the entire state is nil, there can't be any orphans return nil } + if t.Config == nil { + // Should never happen: we can't be doing any Terraform operations + // without at least an empty configuration. + panic("OrphanResourceInstanceTransformer used without setting Config") + } // Go through the modules and for each module transform in order // to add the orphan. @@ -41,38 +50,130 @@ func (t *OrphanResourceTransformer) Transform(g *Graph) error { return nil } -func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error { +func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error { if ms == nil { return nil } - // Get the configuration for this path. The configuration might be + moduleAddr := ms.Addr + + // Get the configuration for this module. The configuration might be // nil if the module was removed from the configuration. This is okay, // this just means that every resource is an orphan. - var c *config.Config - if m := t.Module.Child(ms.Path[1:]); m != nil { - c = m.Config() + var m *configs.Module + if c := t.Config.DescendentForInstance(moduleAddr); c != nil { + m = c.Module } - // Go through the orphans and add them all to the state - for _, key := range ms.Orphans(c) { - // Build the abstract resource - addr, err := parseResourceAddressInternal(key) - if err != nil { - return err + // An "orphan" is a resource that is in the state but not the configuration, + // so we'll walk the state resources and try to correlate each of them + // with a configuration block. Each orphan gets a node in the graph whose + // type is decided by t.Concrete. + // + // We don't handle orphans related to changes in the "count" and "for_each" + // pseudo-arguments here. They are handled by OrphanResourceCountTransformer. + for _, rs := range ms.Resources { + if m != nil { + if r := m.ResourceByAddr(rs.Addr); r != nil { + continue + } } - addr.Path = ms.Path[1:] - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) + for key := range rs.Instances { + addr := rs.Addr.Instance(key).Absolute(moduleAddr) + abstract := NewNodeAbstractResourceInstance(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr) + g.Add(node) } + } + + return nil +} + +// OrphanResourceTransformer is a GraphTransformer that adds orphaned +// resources to the graph. An "orphan" is a resource that is present in +// the state but no longer present in the config. +// +// This is separate to OrphanResourceInstanceTransformer in that it deals with +// whole resources, rather than individual instances of resources. Orphan +// resource nodes are only used during apply to clean up leftover empty +// resource state skeletons, after all of the instances inside have been +// removed. +// +// This transformer will also create edges in the graph to any pre-existing +// node that creates or destroys the entire orphaned resource or any of its +// instances, to ensure that the "orphan-ness" of a resource is always dealt +// with after all other aspects of it. +type OrphanResourceTransformer struct { + Concrete ConcreteResourceNodeFunc + + // State is the global state. + State *states.State - // Add it to the graph - g.Add(node) + // Config is the root node in the configuration tree. + Config *configs.Config +} + +func (t *OrphanResourceTransformer) Transform(g *Graph) error { + if t.State == nil { + // If the entire state is nil, there can't be any orphans + return nil + } + if t.Config == nil { + // Should never happen: we can't be doing any Terraform operations + // without at least an empty configuration. + panic("OrphanResourceTransformer used without setting Config") + } + + // We'll first collect up the existing nodes for each resource so we can + // create dependency edges for any new nodes we create. + deps := map[string][]dag.Vertex{} + for _, v := range g.Vertices() { + switch tv := v.(type) { + case GraphNodeResourceInstance: + k := tv.ResourceInstanceAddr().ContainingResource().String() + deps[k] = append(deps[k], v) + case GraphNodeResource: + k := tv.ResourceAddr().String() + deps[k] = append(deps[k], v) + case GraphNodeDestroyer: + k := tv.DestroyAddr().ContainingResource().String() + deps[k] = append(deps[k], v) + } + } + + for _, ms := range t.State.Modules { + moduleAddr := ms.Addr + + mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed + + for _, rs := range ms.Resources { + if mc != nil { + if r := mc.Module.ResourceByAddr(rs.Addr); r != nil { + // It's in the config, so nothing to do for this one. + continue + } + } + + addr := rs.Addr.Absolute(moduleAddr) + abstract := NewNodeAbstractResource(addr) + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr) + g.Add(node) + for _, dn := range deps[addr.String()] { + log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn)) + g.Connect(dag.BasicEdge(node, dn)) + } + } } return nil + } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go index b260f4ca..ed93cdb8 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go @@ -1,7 +1,10 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "log" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/dag" ) // OutputTransformer is a GraphTransformer that adds all the outputs @@ -11,49 +14,82 @@ import ( // aren't changing since there is no downside: the state will be available // even if the dependent items aren't changing. type OutputTransformer struct { - Module *module.Tree + Config *configs.Config } func (t *OutputTransformer) Transform(g *Graph) error { - return t.transform(g, t.Module) + return t.transform(g, t.Config) } -func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { - // If no config, no outputs - if m == nil { +func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error { + // If we have no config then there can be no outputs. + if c == nil { return nil } // Transform all the children. We must do this first because // we can reference module outputs and they must show up in the // reference map. - for _, c := range m.Children() { - if err := t.transform(g, c); err != nil { + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { return err } } - // If we have no outputs, we're done! - os := m.Config().Outputs - if len(os) == 0 { - return nil - } + // Our addressing system distinguishes between modules and module instances, + // but we're not yet ready to make that distinction here (since we don't + // support "count"/"for_each" on modules) and so we just do a naive + // transform of the module path into a module instance path, assuming that + // no keys are in use. This should be removed when "count" and "for_each" + // are implemented for modules. + path := c.Path.UnkeyedInstanceShim() - // Add all outputs here - for _, o := range os { - // Build the node. - // - // NOTE: For now this is just an "applyable" output. As we build - // new graph builders for the other operations I suspect we'll - // find a way to parameterize this, require new transforms, etc. + for _, o := range c.Module.Outputs { + addr := path.OutputValue(o.Name) node := &NodeApplyableOutput{ - PathValue: normalizeModulePath(m.Path()), - Config: o, + Addr: addr, + Config: o, } - - // Add it! g.Add(node) } return nil } + +// DestroyOutputTransformer is a GraphTransformer that adds nodes to delete +// outputs during destroy. We need to do this to ensure that no stale outputs +// are ever left in the state. +type DestroyOutputTransformer struct { +} + +func (t *DestroyOutputTransformer) Transform(g *Graph) error { + for _, v := range g.Vertices() { + output, ok := v.(*NodeApplyableOutput) + if !ok { + continue + } + + // create the destroy node for this output + node := &NodeDestroyableOutput{ + Addr: output.Addr, + Config: output.Config, + } + + log.Printf("[TRACE] creating %s", node.Name()) + g.Add(node) + + deps, err := g.Descendents(v) + if err != nil { + return err + } + + // the destroy node must depend on the eval node + deps.Add(v) + + for _, d := range deps.List() { + log.Printf("[TRACE] %s depends on %s", node.Name(), dag.VertexName(d)) + g.Connect(dag.BasicEdge(node, d)) + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go index b9695d52..6a4fb47c 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go @@ -3,60 +3,201 @@ package terraform import ( "fmt" "log" - "strings" - "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/tfdiags" ) +func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer { + return GraphTransformMulti( + // Add providers from the config + &ProviderConfigTransformer{ + Config: config, + Providers: providers, + Concrete: concrete, + }, + // Add any remaining missing providers + &MissingProviderTransformer{ + Providers: providers, + Concrete: concrete, + }, + // Connect the providers + &ProviderTransformer{ + Config: config, + }, + // Remove unused providers and proxies + &PruneProviderTransformer{}, + // Connect provider to their parent provider nodes + &ParentProviderTransformer{}, + ) +} + // GraphNodeProvider is an interface that nodes that can be a provider -// must implement. The ProviderName returned is the name of the provider -// they satisfy. +// must implement. +// +// ProviderAddr returns the address of the provider configuration this +// satisfies, which is relative to the path returned by method Path(). +// +// Name returns the full name of the provider in the config. type GraphNodeProvider interface { - ProviderName() string + GraphNodeSubPath + ProviderAddr() addrs.AbsProviderConfig + Name() string } // GraphNodeCloseProvider is an interface that nodes that can be a close // provider must implement. The CloseProviderName returned is the name of // the provider they satisfy. type GraphNodeCloseProvider interface { - CloseProviderName() string + GraphNodeSubPath + CloseProviderAddr() addrs.AbsProviderConfig } // GraphNodeProviderConsumer is an interface that nodes that require -// a provider must implement. ProvidedBy must return the name of the provider -// to use. +// a provider must implement. ProvidedBy must return the address of the provider +// to use, which will be resolved to a configuration either in the same module +// or in an ancestor module, with the resulting absolute address passed to +// SetProvider. type GraphNodeProviderConsumer interface { - ProvidedBy() []string + // ProvidedBy returns the address of the provider configuration the node + // refers to. If the returned "exact" value is true, this address will + // be taken exactly. If "exact" is false, a provider configuration from + // an ancestor module may be selected instead. + ProvidedBy() (addr addrs.AbsProviderConfig, exact bool) + // Set the resolved provider address for this resource. + SetProvider(addrs.AbsProviderConfig) } // ProviderTransformer is a GraphTransformer that maps resources to // providers within the graph. This will error if there are any resources // that don't map to proper resources. -type ProviderTransformer struct{} +type ProviderTransformer struct { + Config *configs.Config +} func (t *ProviderTransformer) Transform(g *Graph) error { - // Go through the other nodes and match them to providers they need - var err error - m := providerVertexMap(g) + // We need to find a provider configuration address for each resource + // either directly represented by a node or referenced by a node in + // the graph, and then create graph edges from provider to provider user + // so that the providers will get initialized first. + + var diags tfdiags.Diagnostics + + // To start, we'll collect the _requested_ provider addresses for each + // node, which we'll then resolve (handling provider inheritence, etc) in + // the next step. + // Our "requested" map is from graph vertices to string representations of + // provider config addresses (for deduping) to requests. + type ProviderRequest struct { + Addr addrs.AbsProviderConfig + Exact bool // If true, inheritence from parent modules is not attempted + } + requested := map[dag.Vertex]map[string]ProviderRequest{} + needConfigured := map[string]addrs.AbsProviderConfig{} for _, v := range g.Vertices() { + + // Does the vertex _directly_ use a provider? if pv, ok := v.(GraphNodeProviderConsumer); ok { - for _, p := range pv.ProvidedBy() { - target := m[providerMapKey(p, pv)] - if target == nil { - println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv))) - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found", - dag.VertexName(v), p)) - continue + requested[v] = make(map[string]ProviderRequest) + + p, exact := pv.ProvidedBy() + if exact { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p) + } else { + log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p) + } + + requested[v][p.String()] = ProviderRequest{ + Addr: p, + Exact: exact, + } + + // Direct references need the provider configured as well as initialized + needConfigured[p.String()] = p + } + } + + // Now we'll go through all the requested addresses we just collected and + // figure out which _actual_ config address each belongs to, after resolving + // for provider inheritance and passing. + m := providerVertexMap(g) + for v, reqs := range requested { + for key, req := range reqs { + p := req.Addr + target := m[key] + + _, ok := v.(GraphNodeSubPath) + if !ok && target == nil { + // No target and no path to traverse up from + diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p)) + continue + } + + if target != nil { + log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v)) + } + + // if we don't have a provider at this level, walk up the path looking for one, + // unless we were told to be exact. + if target == nil && !req.Exact { + for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() { + key := pp.String() + target = m[key] + if target != nil { + log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp) + break + } + log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v)) } + } + + // If this provider doesn't need to be configured then we can just + // stub it out with an init-only provider node, which will just + // start up the provider and fetch its schema. + if _, exists := needConfigured[key]; target == nil && !exists { + stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance) + stub := &NodeEvalableProvider{ + &NodeAbstractProvider{ + Addr: stubAddr, + }, + } + m[stubAddr.String()] = stub + log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr) + target = stub + g.Add(target) + } + + if target == nil { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Provider configuration not present", + fmt.Sprintf( + "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.", + dag.VertexName(v), p, dag.VertexName(v), + ), + )) + break + } + + // see if this in an inherited provider + if p, ok := target.(*graphNodeProxyProvider); ok { + g.Remove(p) + target = p.Target() + key = target.(GraphNodeProvider).ProviderAddr().String() + } - g.Connect(dag.BasicEdge(v, target)) + log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target)) + if pv, ok := v.(GraphNodeProviderConsumer); ok { + pv.SetProvider(target.ProviderAddr()) } + g.Connect(dag.BasicEdge(v, target)) } } - return err + return diags.Err() } // CloseProviderTransformer is a GraphTransformer that adds nodes to the @@ -67,36 +208,33 @@ type CloseProviderTransformer struct{} func (t *CloseProviderTransformer) Transform(g *Graph) error { pm := providerVertexMap(g) - cpm := closeProviderVertexMap(g) + cpm := make(map[string]*graphNodeCloseProvider) var err error - for _, v := range g.Vertices() { - if pv, ok := v.(GraphNodeProviderConsumer); ok { - for _, p := range pv.ProvidedBy() { - key := p - source := cpm[key] - - if source == nil { - // Create a new graphNodeCloseProvider and add it to the graph - source = &graphNodeCloseProvider{ProviderNameValue: p} - g.Add(source) - - // Close node needs to depend on provider - provider, ok := pm[key] - if !ok { - err = multierror.Append(err, fmt.Errorf( - "%s: provider %s couldn't be found for closing", - dag.VertexName(v), p)) - continue - } - g.Connect(dag.BasicEdge(source, provider)) - // Make sure we also add the new graphNodeCloseProvider to the map - // so we don't create and add any duplicate graphNodeCloseProviders. - cpm[key] = source - } + for _, v := range pm { + p := v.(GraphNodeProvider) + key := p.ProviderAddr().String() - // Close node depends on all nodes provided by the provider - g.Connect(dag.BasicEdge(source, v)) + // get the close provider of this type if we alread created it + closer := cpm[key] + + if closer == nil { + // create a closer for this provider type + closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()} + g.Add(closer) + cpm[key] = closer + } + + // Close node depends on the provider itself + // this is added unconditionally, so it will connect to all instances + // of the provider. Extra edges will be removed by transitive + // reduction. + g.Connect(dag.BasicEdge(closer, p)) + + // connect all the provider's resources to the close node + for _, s := range g.UpEdges(p).List() { + if _, ok := s.(GraphNodeProviderConsumer); ok { + g.Connect(dag.BasicEdge(closer, s)) } } } @@ -104,18 +242,24 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error { return err } -// MissingProviderTransformer is a GraphTransformer that adds nodes -// for missing providers into the graph. Specifically, it creates provider -// configuration nodes for all the providers that we support. These are -// pruned later during an optimization pass. +// MissingProviderTransformer is a GraphTransformer that adds to the graph +// a node for each default provider configuration that is referenced by another +// node but not already present in the graph. +// +// These "default" nodes are always added to the root module, regardless of +// where they are requested. This is important because our inheritance +// resolution behavior in ProviderTransformer will then treat these as a +// last-ditch fallback after walking up the tree, rather than preferring them +// as it would if they were placed in the same module as the requester. +// +// This transformer may create extra nodes that are not needed in practice, +// due to overriding provider configurations in child modules. +// PruneProviderTransformer can then remove these once ProviderTransformer +// has resolved all of the inheritence, etc. type MissingProviderTransformer struct { // Providers is the list of providers we support. Providers []string - // AllowAny will not check that a provider is supported before adding - // it to the graph. - AllowAny bool - // Concrete, if set, overrides how the providers are made. Concrete ConcreteProviderNodeFunc } @@ -128,191 +272,137 @@ func (t *MissingProviderTransformer) Transform(g *Graph) error { } } - // Create a set of our supported providers - supported := make(map[string]struct{}, len(t.Providers)) - for _, v := range t.Providers { - supported[v] = struct{}{} - } - - // Get the map of providers we already have in our graph + var err error m := providerVertexMap(g) - - // Go through all the provider consumers and make sure we add - // that provider if it is missing. We use a for loop here instead - // of "range" since we'll modify check as we go to add more to check. - check := g.Vertices() - for i := 0; i < len(check); i++ { - v := check[i] - + for _, v := range g.Vertices() { pv, ok := v.(GraphNodeProviderConsumer) if !ok { continue } - // If this node has a subpath, then we use that as a prefix - // into our map to check for an existing provider. - var path []string - if sp, ok := pv.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - path = raw - } + // For our work here we actually care only about the provider type and + // we plan to place all default providers in the root module, and so + // it's safe for us to rely on ProvidedBy here rather than waiting for + // the later proper resolution of provider inheritance done by + // ProviderTransformer. + p, _ := pv.ProvidedBy() + if p.ProviderConfig.Alias != "" { + // We do not create default aliased configurations. + log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p) + continue } - for _, p := range pv.ProvidedBy() { - key := providerMapKey(p, pv) - if _, ok := m[key]; ok { - // This provider already exists as a configure node - continue - } + // We're going to create an implicit _default_ configuration for the + // referenced provider type in the _root_ module, ignoring all other + // aspects of the resource's declared provider address. + defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type) + key := defaultAddr.String() + provider := m[key] - // If the provider has an alias in it, we just want the type - ptype := p - if idx := strings.IndexRune(p, '.'); idx != -1 { - ptype = p[:idx] - } + if provider != nil { + // There's already an explicit default configuration for this + // provider type in the root module, so we have nothing to do. + continue + } - if !t.AllowAny { - if _, ok := supported[ptype]; !ok { - // If we don't support the provider type, skip it. - // Validation later will catch this as an error. - continue - } - } + log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v)) - // Add the missing provider node to the graph - v := t.Concrete(&NodeAbstractProvider{ - NameValue: p, - PathValue: path, - }).(dag.Vertex) - if len(path) > 0 { - // We'll need the parent provider as well, so let's - // add a dummy node to check to make sure that we add - // that parent provider. - check = append(check, &graphNodeProviderConsumerDummy{ - ProviderValue: p, - PathValue: path[:len(path)-1], - }) - } + // create the missing top-level provider + provider = t.Concrete(&NodeAbstractProvider{ + Addr: defaultAddr, + }).(GraphNodeProvider) - m[key] = g.Add(v) - } + g.Add(provider) + m[key] = provider } - return nil + return err } // ParentProviderTransformer connects provider nodes to their parents. // // This works by finding nodes that are both GraphNodeProviders and // GraphNodeSubPath. It then connects the providers to their parent -// path. +// path. The parent provider is always at the root level. type ParentProviderTransformer struct{} func (t *ParentProviderTransformer) Transform(g *Graph) error { - // Make a mapping of path to dag.Vertex, where path is: "path.name" - m := make(map[string]dag.Vertex) - - // Also create a map that maps a provider to its parent - parentMap := make(map[dag.Vertex]string) - for _, raw := range g.Vertices() { - // If it is the flat version, then make it the non-flat version. - // We eventually want to get rid of the flat version entirely so - // this is a stop-gap while it still exists. - var v dag.Vertex = raw - + pm := providerVertexMap(g) + for _, v := range g.Vertices() { // Only care about providers pn, ok := v.(GraphNodeProvider) - if !ok || pn.ProviderName() == "" { + if !ok { continue } - // Also require a subpath, if there is no subpath then we - // just totally ignore it. The expectation of this transform is - // that it is used with a graph builder that is already flattened. - var path []string - if pn, ok := raw.(GraphNodeSubPath); ok { - path = pn.Path() - } - path = normalizeModulePath(path) - - // Build the key with path.name i.e. "child.subchild.aws" - key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) - m[key] = raw - - // Determine the parent if we're non-root. This is length 1 since - // the 0 index should be "root" since we normalize above. - if len(path) > 1 { - path = path[:len(path)-1] - key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName()) - parentMap[raw] = key + // Also require non-empty path, since otherwise we're in the root + // module and so cannot have a parent. + if len(pn.Path()) <= 1 { + continue } - } - // Connect! - for v, key := range parentMap { - if parent, ok := m[key]; ok { - g.Connect(dag.BasicEdge(v, parent)) + // this provider may be disabled, but we can only get it's name from + // the ProviderName string + addr := pn.ProviderAddr() + parentAddr, ok := addr.Inherited() + if ok { + parent := pm[parentAddr.String()] + if parent != nil { + g.Connect(dag.BasicEdge(v, parent)) + } } } - return nil } -// PruneProviderTransformer is a GraphTransformer that prunes all the -// providers that aren't needed from the graph. A provider is unneeded if -// no resource or module is using that provider. +// PruneProviderTransformer removes any providers that are not actually used by +// anything, and provider proxies. This avoids the provider being initialized +// and configured. This both saves resources but also avoids errors since +// configuration may imply initialization which may require auth. type PruneProviderTransformer struct{} func (t *PruneProviderTransformer) Transform(g *Graph) error { for _, v := range g.Vertices() { - // We only care about the providers - if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" { + // We only care about providers + _, ok := v.(GraphNodeProvider) + if !ok { continue } - // Does anything depend on this? If not, then prune it. - if s := g.UpEdges(v); s.Len() == 0 { - if nv, ok := v.(dag.NamedVertex); ok { - log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name()) - } + + // ProxyProviders will have up edges, but we're now done with them in the graph + if _, ok := v.(*graphNodeProxyProvider); ok { + log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v)) g.Remove(v) } - } - - return nil -} -// providerMapKey is a helper that gives us the key to use for the -// maps returned by things such as providerVertexMap. -func providerMapKey(k string, v dag.Vertex) string { - pathPrefix := "" - if sp, ok := v.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - pathPrefix = modulePrefixStr(raw) + "." + // Remove providers with no dependencies. + if g.UpEdges(v).Len() == 0 { + log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v)) + g.Remove(v) } } - return pathPrefix + k + return nil } -func providerVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) +func providerVertexMap(g *Graph) map[string]GraphNodeProvider { + m := make(map[string]GraphNodeProvider) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeProvider); ok { - key := providerMapKey(pv.ProviderName(), v) - m[key] = v + addr := pv.ProviderAddr() + m[addr.String()] = pv } } return m } -func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { - m := make(map[string]dag.Vertex) +func closeProviderVertexMap(g *Graph) map[string]GraphNodeCloseProvider { + m := make(map[string]GraphNodeCloseProvider) for _, v := range g.Vertices() { if pv, ok := v.(GraphNodeCloseProvider); ok { - m[pv.CloseProviderName()] = v + addr := pv.CloseProviderAddr() + m[addr.String()] = pv } } @@ -320,16 +410,25 @@ func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { } type graphNodeCloseProvider struct { - ProviderNameValue string + Addr addrs.AbsProviderConfig } +var ( + _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil) +) + func (n *graphNodeCloseProvider) Name() string { - return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) + return n.Addr.String() + " (close)" +} + +// GraphNodeSubPath impl. +func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance { + return n.Addr.Module } // GraphNodeEvalable impl. func (n *graphNodeCloseProvider) EvalTree() EvalNode { - return CloseProviderEvalTree(n.ProviderNameValue) + return CloseProviderEvalTree(n.Addr) } // GraphNodeDependable impl. @@ -337,8 +436,8 @@ func (n *graphNodeCloseProvider) DependableName() []string { return []string{n.Name()} } -func (n *graphNodeCloseProvider) CloseProviderName() string { - return n.ProviderNameValue +func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig { + return n.Addr } // GraphNodeDotter impl. @@ -362,19 +461,257 @@ func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool { return true } -// graphNodeProviderConsumerDummy is a struct that never enters the real -// graph (though it could to no ill effect). It implements -// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force -// certain transformations. -type graphNodeProviderConsumerDummy struct { - ProviderValue string - PathValue []string +// graphNodeProxyProvider is a GraphNodeProvider implementation that is used to +// store the name and value of a provider node for inheritance between modules. +// These nodes are only used to store the data while loading the provider +// configurations, and are removed after all the resources have been connected +// to their providers. +type graphNodeProxyProvider struct { + addr addrs.AbsProviderConfig + target GraphNodeProvider } -func (n *graphNodeProviderConsumerDummy) Path() []string { - return n.PathValue +var ( + _ GraphNodeProvider = (*graphNodeProxyProvider)(nil) +) + +func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig { + return n.addr +} + +func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance { + return n.addr.Module +} + +func (n *graphNodeProxyProvider) Name() string { + return n.addr.String() + " (proxy)" } -func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string { - return []string{n.ProviderValue} +// find the concrete provider instance +func (n *graphNodeProxyProvider) Target() GraphNodeProvider { + switch t := n.target.(type) { + case *graphNodeProxyProvider: + return t.Target() + default: + return n.target + } +} + +// ProviderConfigTransformer adds all provider nodes from the configuration and +// attaches the configs. +type ProviderConfigTransformer struct { + Providers []string + Concrete ConcreteProviderNodeFunc + + // each provider node is stored here so that the proxy nodes can look up + // their targets by name. + providers map[string]GraphNodeProvider + // record providers that can be overriden with a proxy + proxiable map[string]bool + + // Config is the root node of the configuration tree to add providers from. + Config *configs.Config +} + +func (t *ProviderConfigTransformer) Transform(g *Graph) error { + // If no configuration is given, we don't do anything + if t.Config == nil { + return nil + } + + t.providers = make(map[string]GraphNodeProvider) + t.proxiable = make(map[string]bool) + + // Start the transformation process + if err := t.transform(g, t.Config); err != nil { + return err + } + + // finally attach the configs to the new nodes + return t.attachProviderConfigs(g) +} + +func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error { + // If no config, do nothing + if c == nil { + return nil + } + + // Add our resources + if err := t.transformSingle(g, c); err != nil { + return err + } + + // Transform all the children. + for _, cc := range c.Children { + if err := t.transform(g, cc); err != nil { + return err + } + } + return nil +} + +func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error { + // Get the module associated with this configuration tree node + mod := c.Module + staticPath := c.Path + + // We actually need a dynamic module path here, but we've not yet updated + // our graph builders enough to support expansion of module calls with + // "count" and "for_each" set, so for now we'll shim this by converting to + // a dynamic path with no keys. At the time of writing this is the only + // possible kind of dynamic path anyway. + path := make(addrs.ModuleInstance, len(staticPath)) + for i, name := range staticPath { + path[i] = addrs.ModuleInstanceStep{ + Name: name, + } + } + + // add all providers from the configuration + for _, p := range mod.ProviderConfigs { + relAddr := p.Addr() + addr := relAddr.Absolute(path) + + abstract := &NodeAbstractProvider{ + Addr: addr, + } + var v dag.Vertex + if t.Concrete != nil { + v = t.Concrete(abstract) + } else { + v = abstract + } + + // Add it to the graph + g.Add(v) + key := addr.String() + t.providers[key] = v.(GraphNodeProvider) + + // A provider configuration is "proxyable" if its configuration is + // entirely empty. This means it's standing in for a provider + // configuration that must be passed in from the parent module. + // We decide this by evaluating the config with an empty schema; + // if this succeeds, then we know there's nothing in the body. + _, diags := p.Config.Content(&hcl.BodySchema{}) + t.proxiable[key] = !diags.HasErrors() + } + + // Now replace the provider nodes with proxy nodes if a provider was being + // passed in, and create implicit proxies if there was no config. Any extra + // proxies will be removed in the prune step. + return t.addProxyProviders(g, c) +} + +func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error { + path := c.Path + + // can't add proxies at the root + if len(path) == 0 { + return nil + } + + parentPath, callAddr := path.Call() + parent := c.Parent + if parent == nil { + return nil + } + + callName := callAddr.Name + var parentCfg *configs.ModuleCall + for name, mod := range parent.Module.ModuleCalls { + if name == callName { + parentCfg = mod + break + } + } + + // We currently don't support count/for_each for modules and so we must + // shim our path and parentPath into module instances here so that the + // rest of Terraform can behave as if we do. This shimming should be + // removed later as part of implementing count/for_each for modules. + instPath := make(addrs.ModuleInstance, len(path)) + for i, name := range path { + instPath[i] = addrs.ModuleInstanceStep{Name: name} + } + parentInstPath := make(addrs.ModuleInstance, len(parentPath)) + for i, name := range parentPath { + parentInstPath[i] = addrs.ModuleInstanceStep{Name: name} + } + + if parentCfg == nil { + // this can't really happen during normal execution. + return fmt.Errorf("parent module config not found for %s", c.Path.String()) + } + + // Go through all the providers the parent is passing in, and add proxies to + // the parent provider nodes. + for _, pair := range parentCfg.Providers { + fullAddr := pair.InChild.Addr().Absolute(instPath) + fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath) + fullName := fullAddr.String() + fullParentName := fullParentAddr.String() + + parentProvider := t.providers[fullParentName] + + if parentProvider == nil { + return fmt.Errorf("missing provider %s", fullParentName) + } + + proxy := &graphNodeProxyProvider{ + addr: fullAddr, + target: parentProvider, + } + + concreteProvider := t.providers[fullName] + + // replace the concrete node with the provider passed in + if concreteProvider != nil && t.proxiable[fullName] { + g.Replace(concreteProvider, proxy) + t.providers[fullName] = proxy + continue + } + + // aliased configurations can't be implicitly passed in + if fullAddr.ProviderConfig.Alias != "" { + continue + } + + // There was no concrete provider, so add this as an implicit provider. + // The extra proxy will be pruned later if it's unused. + g.Add(proxy) + t.providers[fullName] = proxy + } + return nil +} + +func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error { + for _, v := range g.Vertices() { + // Only care about GraphNodeAttachProvider implementations + apn, ok := v.(GraphNodeAttachProvider) + if !ok { + continue + } + + // Determine what we're looking for + addr := apn.ProviderAddr() + + // Get the configuration. + mc := t.Config.DescendentForInstance(addr.Module) + if mc == nil { + log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String()) + continue + } + + // Go through the provider configs to find the matching config + for _, p := range mc.Module.ProviderConfigs { + if p.Name == addr.ProviderConfig.Type && p.Alias == addr.ProviderConfig.Alias { + log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange) + apn.AttachProvider(p) + break + } + } + } + + return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go index f49d8241..fe4cf0e9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go @@ -2,6 +2,9 @@ package terraform import ( "fmt" + "log" + + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/dag" @@ -22,8 +25,8 @@ type GraphNodeCloseProvisioner interface { } // GraphNodeProvisionerConsumer is an interface that nodes that require -// a provisioner must implement. ProvisionedBy must return the name of the -// provisioner to use. +// a provisioner must implement. ProvisionedBy must return the names of the +// provisioners to use. type GraphNodeProvisionerConsumer interface { ProvisionedBy() []string } @@ -48,6 +51,7 @@ func (t *ProvisionerTransformer) Transform(g *Graph) error { continue } + log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), key, dag.VertexName(m[key])) g.Connect(dag.BasicEdge(v, m[key])) } } @@ -83,12 +87,9 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error { // If this node has a subpath, then we use that as a prefix // into our map to check for an existing provider. - var path []string + path := addrs.RootModuleInstance if sp, ok := pv.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - path = raw - } + path = sp.Path() } for _, p := range pv.ProvisionedBy() { @@ -101,7 +102,7 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error { } if _, ok := supported[p]; !ok { - // If we don't support the provisioner type, skip it. + // If we don't support the provisioner type, we skip it. // Validation later will catch this as an error. continue } @@ -114,6 +115,7 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error { // Add the missing provisioner node to the graph m[key] = g.Add(newV) + log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", key, dag.VertexName(v)) } } @@ -156,10 +158,7 @@ func (t *CloseProvisionerTransformer) Transform(g *Graph) error { func provisionerMapKey(k string, v dag.Vertex) string { pathPrefix := "" if sp, ok := v.(GraphNodeSubPath); ok { - raw := normalizeModulePath(sp.Path()) - if len(raw) > len(rootModulePath) { - pathPrefix = modulePrefixStr(raw) + "." - } + pathPrefix = sp.Path().String() + "." } return pathPrefix + k diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go index c5452354..23bc8cd2 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go @@ -3,8 +3,12 @@ package terraform import ( "fmt" "log" - "strings" + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/configs/configschema" + "github.com/hashicorp/terraform/lang" + + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/dag" ) @@ -17,35 +21,46 @@ import ( // be referenced and other methods of referencing may still be possible (such // as by path!) type GraphNodeReferenceable interface { - // ReferenceableName is the name by which this can be referenced. - // This can be either just the type, or include the field. Example: - // "aws_instance.bar" or "aws_instance.bar.id". - ReferenceableName() []string + GraphNodeSubPath + + // ReferenceableAddrs returns a list of addresses through which this can be + // referenced. + ReferenceableAddrs() []addrs.Referenceable } // GraphNodeReferencer must be implemented by nodes that reference other // Terraform items and therefore depend on them. type GraphNodeReferencer interface { - // References are the list of things that this node references. This - // can include fields or just the type, just like GraphNodeReferenceable - // above. - References() []string + GraphNodeSubPath + + // References returns a list of references made by this node, which + // include both a referenced address and source location information for + // the reference. + References() []*addrs.Reference } -// GraphNodeReferenceGlobal is an interface that can optionally be -// implemented. If ReferenceGlobal returns true, then the References() -// and ReferenceableName() must be _fully qualified_ with "module.foo.bar" -// etc. +// GraphNodeReferenceOutside is an interface that can optionally be implemented. +// A node that implements it can specify that its own referenceable addresses +// and/or the addresses it references are in a different module than the +// node itself. // -// This allows a node to reference and be referenced by a specific name -// that may cross module boundaries. This can be very dangerous so use -// this wisely. +// Any referenceable addresses returned by ReferenceableAddrs are interpreted +// relative to the returned selfPath. // -// The primary use case for this is module boundaries (variables coming in). -type GraphNodeReferenceGlobal interface { - // Set to true to signal that references and name are fully - // qualified. See the above docs for more information. - ReferenceGlobal() bool +// Any references returned by References are interpreted relative to the +// returned referencePath. +// +// It is valid but not required for either of these paths to match what is +// returned by method Path, though if both match the main Path then there +// is no reason to implement this method. +// +// The primary use-case for this is the nodes representing module input +// variables, since their expressions are resolved in terms of their calling +// module, but they are still referenced from their own module. +type GraphNodeReferenceOutside interface { + // ReferenceOutside returns a path in which any references from this node + // are resolved. + ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) } // ReferenceTransformer is a GraphTransformer that connects all the @@ -76,82 +91,173 @@ func (t *ReferenceTransformer) Transform(g *Graph) error { return nil } +// DestroyReferenceTransformer is a GraphTransformer that reverses the edges +// for locals and outputs that depend on other nodes which will be +// removed during destroy. If a destroy node is evaluated before the local or +// output value, it will be removed from the state, and the later interpolation +// will fail. +type DestroyValueReferenceTransformer struct{} + +func (t *DestroyValueReferenceTransformer) Transform(g *Graph) error { + vs := g.Vertices() + for _, v := range vs { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + // reverse any outgoing edges so that the value is evaluated first. + for _, e := range g.EdgesFrom(v) { + target := e.Target() + + // only destroy nodes will be evaluated in reverse + if _, ok := target.(GraphNodeDestroyer); !ok { + continue + } + + log.Printf("[TRACE] output dep: %s", dag.VertexName(target)) + + g.RemoveEdge(e) + g.Connect(&DestroyEdge{S: target, T: v}) + } + } + + return nil +} + +// PruneUnusedValuesTransformer is s GraphTransformer that removes local and +// output values which are not referenced in the graph. Since outputs and +// locals always need to be evaluated, if they reference a resource that is not +// available in the state the interpolation could fail. +type PruneUnusedValuesTransformer struct{} + +func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error { + // this might need multiple runs in order to ensure that pruning a value + // doesn't effect a previously checked value. + for removed := 0; ; removed = 0 { + for _, v := range g.Vertices() { + switch v.(type) { + case *NodeApplyableOutput, *NodeLocal: + // OK + default: + continue + } + + dependants := g.UpEdges(v) + + switch dependants.Len() { + case 0: + // nothing at all depends on this + g.Remove(v) + removed++ + case 1: + // because an output's destroy node always depends on the output, + // we need to check for the case of a single destroy node. + d := dependants.List()[0] + if _, ok := d.(*NodeDestroyableOutput); ok { + g.Remove(v) + removed++ + } + } + } + if removed == 0 { + break + } + } + + return nil +} + // ReferenceMap is a structure that can be used to efficiently check // for references on a graph. type ReferenceMap struct { - // m is the mapping of referenceable name to list of verticies that - // implement that name. This is built on initialization. - references map[string][]dag.Vertex - referencedBy map[string][]dag.Vertex + // vertices is a map from internal reference keys (as produced by the + // mapKey method) to one or more vertices that are identified by each key. + // + // A particular reference key might actually identify multiple vertices, + // e.g. in situations where one object is contained inside another. + vertices map[string][]dag.Vertex + + // edges is a map whose keys are a subset of the internal reference keys + // from "vertices", and whose values are the nodes that refer to each + // key. The values in this map are the referrers, while values in + // "verticies" are the referents. The keys in both cases are referents. + edges map[string][]dag.Vertex } -// References returns the list of vertices that this vertex -// references along with any missing references. -func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { +// References returns the set of vertices that the given vertex refers to, +// and any referenced addresses that do not have corresponding vertices. +func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) { rn, ok := v.(GraphNodeReferencer) if !ok { return nil, nil } + if _, ok := v.(GraphNodeSubPath); !ok { + return nil, nil + } var matches []dag.Vertex - var missing []string - prefix := m.prefix(v) - for _, ns := range rn.References() { - found := false - for _, n := range strings.Split(ns, "/") { - n = prefix + n - parents, ok := m.references[n] - if !ok { - continue + var missing []addrs.Referenceable + + for _, ref := range rn.References() { + subject := ref.Subject + + key := m.referenceMapKey(v, subject) + if _, exists := m.vertices[key]; !exists { + // If what we were looking for was a ResourceInstance then we + // might be in a resource-oriented graph rather than an + // instance-oriented graph, and so we'll see if we have the + // resource itself instead. + switch ri := subject.(type) { + case addrs.ResourceInstance: + subject = ri.ContainingResource() + case addrs.ResourceInstancePhase: + subject = ri.ContainingResource() } + key = m.referenceMapKey(v, subject) + } - // Mark that we found a match - found = true - - // Make sure this isn't a self reference, which isn't included - selfRef := false - for _, p := range parents { - if p == v { - selfRef = true - break - } - } - if selfRef { + vertices := m.vertices[key] + for _, rv := range vertices { + // don't include self-references + if rv == v { continue } - - matches = append(matches, parents...) - break + matches = append(matches, rv) } - - if !found { - missing = append(missing, ns) + if len(vertices) == 0 { + missing = append(missing, ref.Subject) } } return matches, missing } -// ReferencedBy returns the list of vertices that reference the -// vertex passed in. -func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex { +// Referrers returns the set of vertices that refer to the given vertex. +func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex { rn, ok := v.(GraphNodeReferenceable) if !ok { return nil } + sp, ok := v.(GraphNodeSubPath) + if !ok { + return nil + } var matches []dag.Vertex - prefix := m.prefix(v) - for _, n := range rn.ReferenceableName() { - n = prefix + n - children, ok := m.referencedBy[n] + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(sp.Path(), addr) + referrers, ok := m.edges[key] if !ok { continue } - // Make sure this isn't a self reference, which isn't included + // If the referrer set includes our own given vertex then we skip, + // since we don't want to return self-references. selfRef := false - for _, p := range children { + for _, p := range referrers { if p == v { selfRef = true break @@ -161,28 +267,77 @@ func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex { continue } - matches = append(matches, children...) + matches = append(matches, referrers...) } return matches } -func (m *ReferenceMap) prefix(v dag.Vertex) string { - // If the node is stating it is already fully qualified then - // we don't have to create the prefix! - if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() { - return "" +func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string { + return fmt.Sprintf("%s|%s", path.String(), addr.String()) +} + +// vertexReferenceablePath returns the path in which the given vertex can be +// referenced. This is the path that its results from ReferenceableAddrs +// are considered to be relative to. +// +// Only GraphNodeSubPath implementations can be referenced, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance { + sp, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp)) } - // Create the prefix based on the path - var prefix string - if pn, ok := v.(GraphNodeSubPath); ok { - if path := normalizeModulePath(pn.Path()); len(path) > 1 { - prefix = modulePrefixStr(path) + "." - } + if outside, ok := v.(GraphNodeReferenceOutside); ok { + // Vertex is referenced from a different module than where it was + // declared. + path, _ := outside.ReferenceOutside() + return path } - return prefix + // Vertex is referenced from the same module as where it was declared. + return sp.Path() +} + +// vertexReferencePath returns the path in which references _from_ the given +// vertex must be interpreted. +// +// Only GraphNodeSubPath implementations can have references, so this method +// will panic if the given vertex does not implement that interface. +func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance { + sp, ok := referrer.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp)) + } + + var path addrs.ModuleInstance + if outside, ok := referrer.(GraphNodeReferenceOutside); ok { + // Vertex makes references to objects in a different module than where + // it was declared. + _, path = outside.ReferenceOutside() + return path + } + + // Vertex makes references to objects in the same module as where it + // was declared. + return sp.Path() +} + +// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex +// that the reference is from, and "addr" is the address of the object being +// referenced. +// +// The result is an opaque string that includes both the address of the given +// object and the address of the module instance that object belongs to. +// +// Only GraphNodeSubPath implementations can be referrers, so this method will +// panic if the given vertex does not implement that interface. +func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string { + path := vertexReferencePath(referrer) + return m.mapKey(path, addr) } // NewReferenceMap is used to create a new reference map for the @@ -191,83 +346,82 @@ func NewReferenceMap(vs []dag.Vertex) *ReferenceMap { var m ReferenceMap // Build the lookup table - refMap := make(map[string][]dag.Vertex) + vertices := make(map[string][]dag.Vertex) for _, v := range vs { + _, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + continue + } + // We're only looking for referenceable nodes rn, ok := v.(GraphNodeReferenceable) if !ok { continue } + path := m.vertexReferenceablePath(v) + // Go through and cache them - prefix := m.prefix(v) - for _, n := range rn.ReferenceableName() { - n = prefix + n - refMap[n] = append(refMap[n], v) + for _, addr := range rn.ReferenceableAddrs() { + key := m.mapKey(path, addr) + vertices[key] = append(vertices[key], v) } - // If there is a path, it is always referenceable by that. For - // example, if this is a referenceable thing at path []string{"foo"}, - // then it can be referenced at "module.foo" - if pn, ok := v.(GraphNodeSubPath); ok { - for _, p := range ReferenceModulePath(pn.Path()) { - refMap[p] = append(refMap[p], v) - } + // Any node can be referenced by the address of the module it belongs + // to or any of that module's ancestors. + for _, addr := range path.Ancestors()[1:] { + // Can be referenced either as the specific call instance (with + // an instance key) or as the bare module call itself (the "module" + // block in the parent module that created the instance). + callPath, call := addr.Call() + callInstPath, callInst := addr.CallInstance() + callKey := m.mapKey(callPath, call) + callInstKey := m.mapKey(callInstPath, callInst) + vertices[callKey] = append(vertices[callKey], v) + vertices[callInstKey] = append(vertices[callInstKey], v) } } // Build the lookup table for referenced by - refByMap := make(map[string][]dag.Vertex) + edges := make(map[string][]dag.Vertex) for _, v := range vs { - // We're only looking for referenceable nodes + _, ok := v.(GraphNodeSubPath) + if !ok { + // Only nodes with paths can participate in a reference map. + continue + } + rn, ok := v.(GraphNodeReferencer) if !ok { + // We're only looking for referenceable nodes continue } // Go through and cache them - prefix := m.prefix(v) - for _, n := range rn.References() { - n = prefix + n - refByMap[n] = append(refByMap[n], v) + for _, ref := range rn.References() { + if ref.Subject == nil { + // Should never happen + panic(fmt.Sprintf("%T.References returned reference with nil subject", rn)) + } + key := m.referenceMapKey(v, ref.Subject) + edges[key] = append(edges[key], v) } } - m.references = refMap - m.referencedBy = refByMap + m.vertices = vertices + m.edges = edges return &m } -// Returns the reference name for a module path. The path "foo" would return -// "module.foo". If this is a deeply nested module, it will be every parent -// as well. For example: ["foo", "bar"] would return both "module.foo" and -// "module.foo.module.bar" -func ReferenceModulePath(p []string) []string { - p = normalizeModulePath(p) - if len(p) == 1 { - // Root, no name - return nil - } - - result := make([]string, 0, len(p)-1) - for i := len(p); i > 1; i-- { - result = append(result, modulePrefixStr(p[:i])) - } - - return result -} - // ReferencesFromConfig returns the references that a configuration has // based on the interpolated variables in a configuration. -func ReferencesFromConfig(c *config.RawConfig) []string { - var result []string - for _, v := range c.Variables { - if r := ReferenceFromInterpolatedVar(v); len(r) > 0 { - result = append(result, r...) - } +func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference { + if body == nil { + return nil } - - return result + refs, _ := lang.ReferencesInBlock(body, schema) + return refs } // ReferenceFromInterpolatedVar returns the reference from this variable, @@ -296,18 +450,38 @@ func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string { return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)} case *config.UserVariable: return []string{fmt.Sprintf("var.%s", v.Name)} + case *config.LocalVariable: + return []string{fmt.Sprintf("local.%s", v.Name)} default: return nil } } -func modulePrefixStr(p []string) string { - parts := make([]string, 0, len(p)*2) - for _, p := range p[1:] { - parts = append(parts, "module", p) +// appendResourceDestroyReferences identifies resource and resource instance +// references in the given slice and appends to it the "destroy-phase" +// equivalents of those references, returning the result. +// +// This can be used in the References implementation for a node which must also +// depend on the destruction of anything it references. +func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference { + given := refs + for _, ref := range given { + switch tr := ref.Subject.(type) { + case addrs.Resource: + newRef := *ref // shallow copy + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + refs = append(refs, &newRef) + case addrs.ResourceInstance: + newRef := *ref // shallow copy + newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy) + refs = append(refs, &newRef) + } } + return refs +} - return strings.Join(parts, ".") +func modulePrefixStr(p addrs.ModuleInstance) string { + return p.String() } func modulePrefixList(result []string, prefix string) []string { diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go new file mode 100644 index 00000000..ee71387e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go @@ -0,0 +1,33 @@ +package terraform + +import ( + "log" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/states" +) + +// RemovedModuleTransformer implements GraphTransformer to add nodes indicating +// when a module was removed from the configuration. +type RemovedModuleTransformer struct { + Config *configs.Config // root node in the config tree + State *states.State +} + +func (t *RemovedModuleTransformer) Transform(g *Graph) error { + // nothing to remove if there's no state! + if t.State == nil { + return nil + } + + for _, m := range t.State.Modules { + cc := t.Config.DescendentForInstance(m.Addr) + if cc != nil { + continue + } + + log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr) + g.Add(&NodeModuleRemoved{Addr: m.Addr}) + } + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go index cda35cb7..11237909 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go @@ -1,8 +1,8 @@ package terraform import ( - "fmt" - + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/dag" ) @@ -11,39 +11,44 @@ import ( // // This assumes that the count is already interpolated. type ResourceCountTransformer struct { - Concrete ConcreteResourceNodeFunc + Concrete ConcreteResourceInstanceNodeFunc + Schema *configschema.Block + // Count is either the number of indexed instances to create, or -1 to + // indicate that count is not set at all and thus a no-key instance should + // be created. Count int - Addr *ResourceAddress + Addr addrs.AbsResource } func (t *ResourceCountTransformer) Transform(g *Graph) error { - // Don't allow the count to be negative if t.Count < 0 { - return fmt.Errorf("negative count: %d", t.Count) + // Negative count indicates that count is not set at all. + addr := t.Addr.Instance(addrs.NoKey) + + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema + var node dag.Vertex = abstract + if f := t.Concrete; f != nil { + node = f(abstract) + } + + g.Add(node) + return nil } // For each count, build and add the node for i := 0; i < t.Count; i++ { - // Set the index. If our count is 1 we special case it so that - // we handle the "resource.0" and "resource" boundary properly. - index := i - if t.Count == 1 { - index = -1 - } - - // Build the resource address - addr := t.Addr.Copy() - addr.Index = index + key := addrs.IntKey(i) + addr := t.Addr.Instance(key) - // Build the abstract node and the concrete one - abstract := &NodeAbstractResource{Addr: addr} + abstract := NewNodeAbstractResourceInstance(addr) + abstract.Schema = t.Schema var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) } - // Add it to the graph g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go index 471cd746..0b52347d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go @@ -1,10 +1,9 @@ package terraform import ( - "fmt" "log" - "github.com/hashicorp/terraform/dag" + "github.com/hashicorp/terraform/states" ) // StateTransformer is a GraphTransformer that adds the elements of @@ -13,53 +12,63 @@ import ( // This transform is used for example by the DestroyPlanGraphBuilder to ensure // that only resources that are in the state are represented in the graph. type StateTransformer struct { - Concrete ConcreteResourceNodeFunc + // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract + // resource instance nodes that this transformer will create. + // + // If either of these is nil, the objects of that type will be skipped and + // not added to the graph at all. It doesn't make sense to use this + // transformer without setting at least one of these, since that would + // skip everything and thus be a no-op. + ConcreteCurrent ConcreteResourceInstanceNodeFunc + ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc - State *State + State *states.State } func (t *StateTransformer) Transform(g *Graph) error { - // If the state is nil or empty (nil is empty) then do nothing - if t.State.Empty() { + if !t.State.HasResources() { + log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do") return nil } - // Go through all the modules in the diff. - log.Printf("[TRACE] StateTransformer: starting") - var nodes []dag.Vertex + switch { + case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects") + case t.ConcreteCurrent != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only") + case t.ConcreteDeposed != nil: + log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only") + default: + log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all") + } + for _, ms := range t.State.Modules { - log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path) + moduleAddr := ms.Addr - // Go through all the resources in this module. - for name, rs := range ms.Resources { - log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs) + for _, rs := range ms.Resources { + resourceAddr := rs.Addr.Absolute(moduleAddr) - // Add the resource to the graph - addr, err := parseResourceAddressInternal(name) - if err != nil { - panic(fmt.Sprintf( - "Error parsing internal name, this is a bug: %q", name)) - } + for key, is := range rs.Instances { + addr := resourceAddr.Instance(key) - // Very important: add the module path for this resource to - // the address. Remove "root" from it. - addr.Path = ms.Path[1:] + if obj := is.Current; obj != nil && t.ConcreteCurrent != nil { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteCurrent(abstract) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr) + } - // Add the resource to the graph - abstract := &NodeAbstractResource{Addr: addr} - var node dag.Vertex = abstract - if f := t.Concrete; f != nil { - node = f(abstract) + if t.ConcreteDeposed != nil { + for dk := range is.Deposed { + abstract := NewNodeAbstractResourceInstance(addr) + node := t.ConcreteDeposed(abstract, dk) + g.Add(node) + log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk) + } + } } - - nodes = append(nodes, node) } } - // Add all the nodes to the graph - for _, n := range nodes { - g.Add(n) - } - return nil } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go index 225ac4b4..d25274e6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go @@ -3,6 +3,7 @@ package terraform import ( "log" + "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/dag" ) @@ -12,7 +13,22 @@ import ( // provided will contain every target provided, and each implementing graph // node must filter this list to targets considered relevant. type GraphNodeTargetable interface { - SetTargets([]ResourceAddress) + SetTargets([]addrs.Targetable) +} + +// GraphNodeTargetDownstream is an interface for graph nodes that need to +// be remain present under targeting if any of their dependencies are targeted. +// TargetDownstream is called with the set of vertices that are direct +// dependencies for the node, and it should return true if the node must remain +// in the graph in support of those dependencies. +// +// This is used in situations where the dependency edges are representing an +// ordering relationship but the dependency must still be visited if its +// dependencies are visited. This is true for outputs, for example, since +// they must get updated if any of their dependent resources get updated, +// which would not normally be true if one of their dependencies were targeted. +type GraphNodeTargetDownstream interface { + TargetDownstream(targeted, untargeted *dag.Set) bool } // TargetsTransformer is a GraphTransformer that, when the user specifies a @@ -20,11 +36,13 @@ type GraphNodeTargetable interface { // their dependencies. type TargetsTransformer struct { // List of targeted resource names specified by the user - Targets []string + Targets []addrs.Targetable - // List of parsed targets, provided by callers like ResourceCountTransform - // that already have the targets parsed - ParsedTargets []ResourceAddress + // If set, the index portions of resource addresses will be ignored + // for comparison. This is used when transforming a graph where + // counted resources have not yet been expanded, since otherwise + // the unexpanded nodes (which never have indices) would not match. + IgnoreIndices bool // Set to true when we're in a `terraform destroy` or a // `terraform plan -destroy` @@ -32,17 +50,8 @@ type TargetsTransformer struct { } func (t *TargetsTransformer) Transform(g *Graph) error { - if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 { - addrs, err := t.parseTargetAddresses() - if err != nil { - return err - } - - t.ParsedTargets = addrs - } - - if len(t.ParsedTargets) > 0 { - targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets) + if len(t.Targets) > 0 { + targetedNodes, err := t.selectTargetedNodes(g, t.Targets) if err != nil { return err } @@ -52,9 +61,11 @@ func (t *TargetsTransformer) Transform(g *Graph) error { if _, ok := v.(GraphNodeResource); ok { removable = true } + if vr, ok := v.(RemovableIfNotTargeted); ok { removable = vr.RemoveIfNotTargeted() } + if removable && !targetedNodes.Include(v) { log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) g.Remove(v) @@ -65,26 +76,15 @@ func (t *TargetsTransformer) Transform(g *Graph) error { return nil } -func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { - addrs := make([]ResourceAddress, len(t.Targets)) - for i, target := range t.Targets { - ta, err := ParseResourceAddress(target) - if err != nil { - return nil, err - } - addrs[i] = *ta - } +// Returns a set of targeted nodes. A targeted node is either addressed +// directly, address indirectly via its container, or it's a dependency of a +// targeted node. Destroy mode keeps dependents instead of dependencies. +func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) { + targetedNodes := new(dag.Set) - return addrs, nil -} + vertices := g.Vertices() -// Returns the list of targeted nodes. A targeted node is either addressed -// directly, or is an Ancestor of a targeted node. Destroy mode keeps -// Descendents instead of Ancestors. -func (t *TargetsTransformer) selectTargetedNodes( - g *Graph, addrs []ResourceAddress) (*dag.Set, error) { - targetedNodes := new(dag.Set) - for _, v := range g.Vertices() { + for _, v := range vertices { if t.nodeIsTarget(v, addrs) { targetedNodes.Add(v) @@ -111,20 +111,143 @@ func (t *TargetsTransformer) selectTargetedNodes( } } } + return t.addDependencies(targetedNodes, g) +} + +func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (*dag.Set, error) { + // Handle nodes that need to be included if their dependencies are included. + // This requires multiple passes since we need to catch transitive + // dependencies if and only if they are via other nodes that also + // support TargetDownstream. For example: + // output -> output -> targeted-resource: both outputs need to be targeted + // output -> non-targeted-resource -> targeted-resource: output not targeted + // + // We'll keep looping until we stop targeting more nodes. + queue := targetedNodes.List() + for len(queue) > 0 { + vertices := queue + queue = nil // ready to append for next iteration if neccessary + for _, v := range vertices { + // providers don't cause transitive dependencies, so don't target + // downstream from them. + if _, ok := v.(GraphNodeProvider); ok { + continue + } + + dependers := g.UpEdges(v) + if dependers == nil { + // indicates that there are no up edges for this node, so + // we have nothing to do here. + continue + } + + dependers = dependers.Filter(func(dv interface{}) bool { + _, ok := dv.(GraphNodeTargetDownstream) + return ok + }) + + if dependers.Len() == 0 { + continue + } + + for _, dv := range dependers.List() { + if targetedNodes.Include(dv) { + // Already present, so nothing to do + continue + } + + // We'll give the node some information about what it's + // depending on in case that informs its decision about whether + // it is safe to be targeted. + deps := g.DownEdges(v) + + depsTargeted := deps.Intersection(targetedNodes) + depsUntargeted := deps.Difference(depsTargeted) + + if dv.(GraphNodeTargetDownstream).TargetDownstream(depsTargeted, depsUntargeted) { + targetedNodes.Add(dv) + // Need to visit this node on the next pass to see if it + // has any transitive dependers. + queue = append(queue, dv) + } + } + } + } + + return targetedNodes.Filter(func(dv interface{}) bool { + return filterPartialOutputs(dv, targetedNodes, g) + }), nil +} + +// Outputs may have been included transitively, but if any of their +// dependencies have been pruned they won't be resolvable. +// If nothing depends on the output, and the output is missing any +// dependencies, remove it from the graph. +// This essentially maintains the previous behavior where interpolation in +// outputs would fail silently, but can now surface errors where the output +// is required. +func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool { + // should this just be done with TargetDownstream? + if _, ok := v.(*NodeApplyableOutput); !ok { + return true + } + + dependers := g.UpEdges(v) + for _, d := range dependers.List() { + if _, ok := d.(*NodeCountBoundary); ok { + continue + } + + if !targetedNodes.Include(d) { + // this one is going to be removed, so it doesn't count + continue + } + + // as soon as we see a real dependency, we mark this as + // non-removable + return true + } + + depends := g.DownEdges(v) - return targetedNodes, nil + for _, d := range depends.List() { + if !targetedNodes.Include(d) { + log.Printf("[WARN] %s missing targeted dependency %s, removing from the graph", + dag.VertexName(v), dag.VertexName(d)) + return false + } + } + return true } -func (t *TargetsTransformer) nodeIsTarget( - v dag.Vertex, addrs []ResourceAddress) bool { - r, ok := v.(GraphNodeResource) +func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool { + var vertexAddr addrs.Targetable + switch r := v.(type) { + case GraphNodeResourceInstance: + vertexAddr = r.ResourceInstanceAddr() + case GraphNodeResource: + vertexAddr = r.ResourceAddr() + default: + // Only resource and resource instance nodes can be targeted. + return false + } + _, ok := v.(GraphNodeResource) if !ok { return false } - addr := r.ResourceAddr() - for _, targetAddr := range addrs { - if targetAddr.Equals(addr) { + for _, targetAddr := range targets { + if t.IgnoreIndices { + // If we're ignoring indices then we'll convert any resource instance + // addresses into resource addresses. We don't need to convert + // vertexAddr because instance addresses are contained within + // their associated resources, and so .TargetContains will take + // care of this for us. + if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance { + targetAddr = instance.ContainingResource() + } + } + if targetAddr.TargetContains(vertexAddr) { return true } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go index b31e2c76..05daa513 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go @@ -1,7 +1,8 @@ package terraform import ( - "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/addrs" + "github.com/hashicorp/terraform/configs" ) // RootVariableTransformer is a GraphTransformer that adds all the root @@ -11,28 +12,27 @@ import ( // graph since downstream things that depend on them must be able to // reach them. type RootVariableTransformer struct { - Module *module.Tree + Config *configs.Config } func (t *RootVariableTransformer) Transform(g *Graph) error { - // If no config, no variables - if t.Module == nil { + // We can have no variables if we have no config. + if t.Config == nil { return nil } - // If we have no vars, we're done! - vars := t.Module.Config().Variables - if len(vars) == 0 { - return nil - } + // We're only considering root module variables here, since child + // module variables are handled by ModuleVariableTransformer. + vars := t.Config.Module.Variables // Add all variables here for _, v := range vars { node := &NodeRootVariable{ + Addr: addrs.InputVariable{ + Name: v.Name, + }, Config: v, } - - // Add it! g.Add(node) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go index 7c874592..f6790d9e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go @@ -1,10 +1,12 @@ package terraform +import "context" + // UIInput is the interface that must be implemented to ask for input // from this user. This should forward the request to wherever the user // inputs things to ask for values. type UIInput interface { - Input(*InputOpts) (string, error) + Input(context.Context, *InputOpts) (string, error) } // InputOpts are options for asking for input. diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go index e3a07efa..e2d9c384 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go @@ -1,5 +1,7 @@ package terraform +import "context" + // MockUIInput is an implementation of UIInput that can be used for tests. type MockUIInput struct { InputCalled bool @@ -10,7 +12,7 @@ type MockUIInput struct { InputFn func(*InputOpts) (string, error) } -func (i *MockUIInput) Input(opts *InputOpts) (string, error) { +func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { i.InputCalled = true i.InputOpts = opts if i.InputFn != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go index 2207d1d0..b5d32b1e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go @@ -1,6 +1,7 @@ package terraform import ( + "context" "fmt" ) @@ -12,8 +13,8 @@ type PrefixUIInput struct { UIInput UIInput } -func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) { +func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) { opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) - return i.UIInput.Input(opts) + return i.UIInput.Input(ctx, opts) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go index 7852bc42..d828c921 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go @@ -1,13 +1,18 @@ package terraform +import "sync" + // MockUIOutput is an implementation of UIOutput that can be used for tests. type MockUIOutput struct { + sync.Mutex OutputCalled bool OutputMessage string OutputFn func(string) } func (o *MockUIOutput) Output(v string) { + o.Lock() + defer o.Unlock() o.OutputCalled = true o.OutputMessage = v if o.OutputFn != nil { diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go index 878a0312..fff964f4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go +++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go @@ -1,15 +1,19 @@ package terraform +import ( + "github.com/hashicorp/terraform/addrs" +) + // ProvisionerUIOutput is an implementation of UIOutput that calls a hook // for the output so that the hooks can handle it. type ProvisionerUIOutput struct { - Info *InstanceInfo - Type string - Hooks []Hook + InstanceAddr addrs.AbsResourceInstance + ProvisionerType string + Hooks []Hook } func (o *ProvisionerUIOutput) Output(msg string) { for _, h := range o.Hooks { - h.ProvisionOutput(o.Info, o.Type, msg) + h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg) } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/user_agent.go b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go new file mode 100644 index 00000000..a42613e8 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/user_agent.go @@ -0,0 +1,13 @@ +package terraform + +import ( + "github.com/hashicorp/terraform/httpclient" +) + +// Generate a UserAgent string +// +// Deprecated: Use httpclient.UserAgentString if you are setting your +// own User-Agent header. +func UserAgentString() string { + return httpclient.UserAgentString() +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go index e1d951c0..752241af 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/util.go +++ b/vendor/github.com/hashicorp/terraform/terraform/util.go @@ -1,7 +1,9 @@ package terraform import ( - "strings" + "sort" + + "github.com/hashicorp/terraform/config" ) // Semaphore is a wrapper around a channel to provide @@ -46,21 +48,8 @@ func (s Semaphore) Release() { } } -// resourceProvider returns the provider name for the given type. -func resourceProvider(t, alias string) string { - if alias != "" { - return alias - } - - idx := strings.IndexRune(t, '_') - if idx == -1 { - // If no underscores, the resource name is assumed to be - // also the provider name, e.g. if the provider exposes - // only a single resource of each type. - return t - } - - return t[:idx] +func resourceProvider(resourceType, explicitProvider string) string { + return config.ResourceProviderFullName(resourceType, explicitProvider) } // strSliceContains checks if a given string is contained in a slice @@ -73,3 +62,20 @@ func strSliceContains(haystack []string, needle string) bool { } return false } + +// deduplicate a slice of strings +func uniqueStrings(s []string) []string { + if len(s) < 2 { + return s + } + + sort.Strings(s) + result := make([]string, 1, len(s)) + result[0] = s[0] + for i := 1; i < len(s); i++ { + if s[i] != result[len(result)-1] { + result = append(result, s[i]) + } + } + return result +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go new file mode 100644 index 00000000..627593d7 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go @@ -0,0 +1,59 @@ +// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT. + +package terraform + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ValueFromUnknown-0] + _ = x[ValueFromConfig-67] + _ = x[ValueFromAutoFile-70] + _ = x[ValueFromNamedFile-78] + _ = x[ValueFromCLIArg-65] + _ = x[ValueFromEnvVar-69] + _ = x[ValueFromInput-73] + _ = x[ValueFromPlan-80] + _ = x[ValueFromCaller-83] +} + +const ( + _ValueSourceType_name_0 = "ValueFromUnknown" + _ValueSourceType_name_1 = "ValueFromCLIArg" + _ValueSourceType_name_2 = "ValueFromConfig" + _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile" + _ValueSourceType_name_4 = "ValueFromInput" + _ValueSourceType_name_5 = "ValueFromNamedFile" + _ValueSourceType_name_6 = "ValueFromPlan" + _ValueSourceType_name_7 = "ValueFromCaller" +) + +var ( + _ValueSourceType_index_3 = [...]uint8{0, 15, 32} +) + +func (i ValueSourceType) String() string { + switch { + case i == 0: + return _ValueSourceType_name_0 + case i == 65: + return _ValueSourceType_name_1 + case i == 67: + return _ValueSourceType_name_2 + case 69 <= i && i <= 70: + i -= 69 + return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]] + case i == 73: + return _ValueSourceType_name_4 + case i == 78: + return _ValueSourceType_name_5 + case i == 80: + return _ValueSourceType_name_6 + case i == 83: + return _ValueSourceType_name_7 + default: + return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go index 300f2adb..75531b2b 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/variables.go +++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go @@ -2,165 +2,312 @@ package terraform import ( "fmt" - "os" - "strings" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" - "github.com/hashicorp/terraform/helper/hilmapstructure" + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/convert" + + "github.com/hashicorp/terraform/configs" + "github.com/hashicorp/terraform/tfdiags" +) + +// InputValue represents a value for a variable in the root module, provided +// as part of the definition of an operation. +type InputValue struct { + Value cty.Value + SourceType ValueSourceType + + // SourceRange provides source location information for values whose + // SourceType is either ValueFromConfig or ValueFromFile. It is not + // populated for other source types, and so should not be used. + SourceRange tfdiags.SourceRange +} + +// ValueSourceType describes what broad category of source location provided +// a particular value. +type ValueSourceType rune + +const ( + // ValueFromUnknown is the zero value of ValueSourceType and is not valid. + ValueFromUnknown ValueSourceType = 0 + + // ValueFromConfig indicates that a value came from a .tf or .tf.json file, + // e.g. the default value defined for a variable. + ValueFromConfig ValueSourceType = 'C' + + // ValueFromAutoFile indicates that a value came from a "values file", like + // a .tfvars file, that was implicitly loaded by naming convention. + ValueFromAutoFile ValueSourceType = 'F' + + // ValueFromNamedFile indicates that a value came from a named "values file", + // like a .tfvars file, that was passed explicitly on the command line (e.g. + // -var-file=foo.tfvars). + ValueFromNamedFile ValueSourceType = 'N' + + // ValueFromCLIArg indicates that the value was provided directly in + // a CLI argument. The name of this argument is not recorded and so it must + // be inferred from context. + ValueFromCLIArg ValueSourceType = 'A' + + // ValueFromEnvVar indicates that the value was provided via an environment + // variable. The name of the variable is not recorded and so it must be + // inferred from context. + ValueFromEnvVar ValueSourceType = 'E' + + // ValueFromInput indicates that the value was provided at an interactive + // input prompt. + ValueFromInput ValueSourceType = 'I' + + // ValueFromPlan indicates that the value was retrieved from a stored plan. + ValueFromPlan ValueSourceType = 'P' + + // ValueFromCaller indicates that the value was explicitly overridden by + // a caller to Context.SetVariable after the context was constructed. + ValueFromCaller ValueSourceType = 'S' ) -// Variables returns the fully loaded set of variables to use with -// ContextOpts and NewContext, loading any additional variables from -// the environment or any other sources. +func (v *InputValue) GoString() string { + if (v.SourceRange != tfdiags.SourceRange{}) { + return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange) + } else { + return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType) + } +} + +func (v ValueSourceType) GoString() string { + return fmt.Sprintf("terraform.%s", v) +} + +//go:generate stringer -type ValueSourceType + +// InputValues is a map of InputValue instances. +type InputValues map[string]*InputValue + +// InputValuesFromCaller turns the given map of naked values into an +// InputValues that attributes each value to "a caller", using the source +// type ValueFromCaller. This is primarily useful for testing purposes. // -// The given module tree doesn't need to be loaded. -func Variables( - m *module.Tree, - override map[string]interface{}) (map[string]interface{}, error) { - result := make(map[string]interface{}) - - // Variables are loaded in the following sequence. Each additional step - // will override conflicting variable keys from prior steps: - // - // * Take default values from config - // * Take values from TF_VAR_x env vars - // * Take values specified in the "override" param which is usually - // from -var, -var-file, etc. - // - - // First load from the config - for _, v := range m.Config().Variables { - // If the var has no default, ignore - if v.Default == nil { - continue +// This should not be used as a general way to convert map[string]cty.Value +// into InputValues, since in most real cases we want to set a suitable +// other SourceType and possibly SourceRange value. +func InputValuesFromCaller(vals map[string]cty.Value) InputValues { + ret := make(InputValues, len(vals)) + for k, v := range vals { + ret[k] = &InputValue{ + Value: v, + SourceType: ValueFromCaller, } + } + return ret +} - // If the type isn't a string, we use it as-is since it is a rich type - if v.Type() != config.VariableTypeString { - result[v.Name] = v.Default - continue +// Override merges the given value maps with the receiver, overriding any +// conflicting keys so that the latest definition wins. +func (vv InputValues) Override(others ...InputValues) InputValues { + // FIXME: This should check to see if any of the values are maps and + // merge them if so, in order to preserve the behavior from prior to + // Terraform 0.12. + ret := make(InputValues) + for k, v := range vv { + ret[k] = v + } + for _, other := range others { + for k, v := range other { + ret[k] = v } + } + return ret +} - // v.Default has already been parsed as HCL but it may be an int type - switch typedDefault := v.Default.(type) { - case string: - if typedDefault == "" { - continue - } - result[v.Name] = typedDefault - case int, int64: - result[v.Name] = fmt.Sprintf("%d", typedDefault) - case float32, float64: - result[v.Name] = fmt.Sprintf("%f", typedDefault) - case bool: - result[v.Name] = fmt.Sprintf("%t", typedDefault) - default: - panic(fmt.Sprintf( - "Unknown default var type: %T\n\n"+ - "THIS IS A BUG. Please report it.", - v.Default)) - } +// JustValues returns a map that just includes the values, discarding the +// source information. +func (vv InputValues) JustValues() map[string]cty.Value { + ret := make(map[string]cty.Value, len(vv)) + for k, v := range vv { + ret[k] = v.Value } + return ret +} - // Load from env vars - for _, v := range os.Environ() { - if !strings.HasPrefix(v, VarEnvPrefix) { +// DefaultVariableValues returns an InputValues map representing the default +// values specified for variables in the given configuration map. +func DefaultVariableValues(configs map[string]*configs.Variable) InputValues { + ret := make(InputValues) + for k, c := range configs { + if c.Default == cty.NilVal { continue } + ret[k] = &InputValue{ + Value: c.Default, + SourceType: ValueFromConfig, + SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange), + } + } + return ret +} - // Strip off the prefix and get the value after the first "=" - idx := strings.Index(v, "=") - k := v[len(VarEnvPrefix):idx] - v = v[idx+1:] - - // Override the configuration-default values. Note that *not* finding the variable - // in configuration is OK, as we don't want to preclude people from having multiple - // sets of TF_VAR_whatever in their environment even if it is a little weird. - for _, schema := range m.Config().Variables { - if schema.Name != k { - continue - } - - varType := schema.Type() - varVal, err := parseVariableAsHCL(k, v, varType) - if err != nil { - return nil, err - } +// SameValues returns true if the given InputValues has the same values as +// the receiever, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) SameValues(other InputValues) bool { + if len(vv) != len(other) { + return false + } - switch varType { - case config.VariableTypeMap: - if err := varSetMap(result, k, varVal); err != nil { - return nil, err - } - default: - result[k] = varVal - } + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false } } - // Load from overrides - for k, v := range override { - for _, schema := range m.Config().Variables { - if schema.Name != k { - continue - } + return true +} - switch schema.Type() { - case config.VariableTypeList: - result[k] = v - case config.VariableTypeMap: - if err := varSetMap(result, k, v); err != nil { - return nil, err - } - case config.VariableTypeString: - // Convert to a string and set. We don't catch any errors - // here because the validation step later should catch - // any type errors. - var strVal string - if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { - result[k] = strVal - } else { - result[k] = v - } - default: - panic(fmt.Sprintf( - "Unhandled var type: %T\n\n"+ - "THIS IS A BUG. Please report it.", - schema.Type())) - } +// HasValues returns true if the reciever has the same values as in the given +// map, disregarding the source types and source ranges. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +func (vv InputValues) HasValues(vals map[string]cty.Value) bool { + if len(vv) != len(vals) { + return false + } + + for k, v := range vv { + oVal, exists := vals[k] + if !exists { + return false + } + if !v.Value.RawEquals(oVal) { + return false } } - return result, nil + return true } -// varSetMap sets or merges the map in "v" with the key "k" in the -// "current" set of variables. This is just a private function to remove -// duplicate logic in Variables -func varSetMap(current map[string]interface{}, k string, v interface{}) error { - existing, ok := current[k] - if !ok { - current[k] = v - return nil +// Identical returns true if the given InputValues has the same values, +// source types, and source ranges as the receiver. +// +// Values are compared using the cty "RawEquals" method, which means that +// unknown values can be considered equal to one another if they are of the +// same type. +// +// This method is primarily for testing. For most practical purposes, it's +// better to use SameValues or HasValues. +func (vv InputValues) Identical(other InputValues) bool { + if len(vv) != len(other) { + return false } - existingMap, ok := existing.(map[string]interface{}) - if !ok { - panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k)) + for k, v := range vv { + ov, exists := other[k] + if !exists { + return false + } + if !v.Value.RawEquals(ov.Value) { + return false + } + if v.SourceType != ov.SourceType { + return false + } + if v.SourceRange != ov.SourceRange { + return false + } } - switch typedV := v.(type) { - case []map[string]interface{}: - for newKey, newVal := range typedV[0] { - existingMap[newKey] = newVal + return true +} + +// checkInputVariables ensures that variable values supplied at the UI conform +// to their corresponding declarations in configuration. +// +// The set of values is considered valid only if the returned diagnostics +// does not contain errors. A valid set of values may still produce warnings, +// which should be returned to the user. +func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics { + var diags tfdiags.Diagnostics + + for name, vc := range vcs { + val, isSet := vs[name] + if !isSet { + // Always an error, since the caller should already have included + // default values from the configuration in the values map. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Unassigned variable", + fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name), + )) + continue } - case map[string]interface{}: - for newKey, newVal := range typedV { - existingMap[newKey] = newVal + + wantType := vc.Type + + // A given value is valid if it can convert to the desired type. + _, err := convert.Convert(val.Value, wantType) + if err != nil { + switch val.SourceType { + case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile: + // We have source location information for these. + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid value for input variable", + Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err), + Subject: val.SourceRange.ToHCL().Ptr(), + }) + case ValueFromEnvVar: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err), + )) + case ValueFromCLIArg: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err), + )) + case ValueFromInput: + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err), + )) + default: + // The above gets us good coverage for the situations users + // are likely to encounter with their own inputs. The other + // cases are generally implementation bugs, so we'll just + // use a generic error for these. + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Invalid value for input variable", + fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err), + )) + } } - default: - return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v)) } - return nil + + // Check for any variables that are assigned without being configured. + // This is always an implementation error in the caller, because we + // expect undefined variables to be caught during context construction + // where there is better context to report it well. + for name := range vs { + if _, defined := vcs[name]; !defined { + diags = diags.Append(tfdiags.Sourceless( + tfdiags.Error, + "Value assigned to undeclared variable", + fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name), + )) + } + } + + return diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go index ada5dcc3..ac730154 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/version.go +++ b/vendor/github.com/hashicorp/terraform/terraform/version.go @@ -1,31 +1,10 @@ package terraform import ( - "fmt" - - "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform/version" ) -// The main version number that is being run at the moment. -const Version = "0.9.3" - -// A pre-release marker for the version. If this is "" (empty string) -// then it means that it is a final release. Otherwise, this is a pre-release -// such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" - -// SemVersion is an instance of version.Version. This has the secondary -// benefit of verifying during tests and init time that our version is a -// proper semantic version, which should always be the case. -var SemVersion = version.Must(version.NewVersion(Version)) - -// VersionHeader is the header name used to send the current terraform version -// in http requests. -const VersionHeader = "Terraform-Version" - +// TODO: update providers to use the version package directly func VersionString() string { - if VersionPrerelease != "" { - return fmt.Sprintf("%s-%s", Version, VersionPrerelease) - } - return Version + return version.String() } diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go index 3cbbf560..61423c21 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go +++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go @@ -3,67 +3,60 @@ package terraform import ( "fmt" - "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/config/module" -) + "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/terraform/tfdiags" -// checkRequiredVersion verifies that any version requirements specified by -// the configuration are met. -// -// This checks the root module as well as any additional version requirements -// from child modules. -// -// This is tested in context_test.go. -func checkRequiredVersion(m *module.Tree) error { - // Check any children - for _, c := range m.Children() { - if err := checkRequiredVersion(c); err != nil { - return err - } - } + "github.com/hashicorp/terraform/configs" - var tf *config.Terraform - if c := m.Config(); c != nil { - tf = c.Terraform - } + tfversion "github.com/hashicorp/terraform/version" +) - // If there is no Terraform config or the required version isn't set, - // we move on. - if tf == nil || tf.RequiredVersion == "" { +// CheckCoreVersionRequirements visits each of the modules in the given +// configuration tree and verifies that any given Core version constraints +// match with the version of Terraform Core that is being used. +// +// The returned diagnostics will contain errors if any constraints do not match. +// The returned diagnostics might also return warnings, which should be +// displayed to the user. +func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics { + if config == nil { return nil } - // Path for errors - module := "root" - if path := normalizeModulePath(m.Path()); len(path) > 1 { - module = modulePrefixStr(path) - } - - // Check this version requirement of this module - cs, err := version.NewConstraint(tf.RequiredVersion) - if err != nil { - return fmt.Errorf( - "%s: terraform.required_version %q syntax error: %s", - module, - tf.RequiredVersion, err) + var diags tfdiags.Diagnostics + module := config.Module + + for _, constraint := range module.CoreVersionConstraints { + if !constraint.Required.Check(tfversion.SemVer) { + switch { + case len(config.Path) == 0: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + tfversion.String(), + ), + Subject: &constraint.DeclRange, + }) + default: + diags = diags.Append(&hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Unsupported Terraform Core version", + Detail: fmt.Sprintf( + "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.", + config.Path, config.SourceAddr, tfversion.String(), + ), + Subject: &constraint.DeclRange, + }) + } + } } - if !cs.Check(SemVersion) { - return fmt.Errorf( - "The currently running version of Terraform doesn't meet the\n"+ - "version requirements explicitly specified by the configuration.\n"+ - "Please use the required version or update the configuration.\n"+ - "Note that version requirements are usually set for a reason, so\n"+ - "we recommend verifying with whoever set the version requirements\n"+ - "prior to making any manual changes.\n\n"+ - " Module: %s\n"+ - " Required version: %s\n"+ - " Current version: %s", - module, - tf.RequiredVersion, - SemVersion) + for _, c := range config.Children { + childDiags := CheckCoreVersionRequirements(c) + diags = diags.Append(childDiags) } - return nil + return diags } diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go index cbd78dd9..0666aa5f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go @@ -2,15 +2,30 @@ package terraform -import "fmt" +import "strconv" -const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[walkInvalid-0] + _ = x[walkApply-1] + _ = x[walkPlan-2] + _ = x[walkPlanDestroy-3] + _ = x[walkRefresh-4] + _ = x[walkValidate-5] + _ = x[walkDestroy-6] + _ = x[walkImport-7] + _ = x[walkEval-8] +} + +const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval" -var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} +var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95} func (i walkOperation) String() string { if i >= walkOperation(len(_walkOperation_index)-1) { - return fmt.Sprintf("walkOperation(%d)", i) + return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" } return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go new file mode 100644 index 00000000..8e41f46e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go @@ -0,0 +1,68 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/zclconf/go-cty/cty" +) + +// FormatCtyPath is a helper function to produce a user-friendly string +// representation of a cty.Path. The result uses a syntax similar to the +// HCL expression language in the hope of it being familiar to users. +func FormatCtyPath(path cty.Path) string { + var buf bytes.Buffer + for _, step := range path { + switch ts := step.(type) { + case cty.GetAttrStep: + fmt.Fprintf(&buf, ".%s", ts.Name) + case cty.IndexStep: + buf.WriteByte('[') + key := ts.Key + keyTy := key.Type() + switch { + case key.IsNull(): + buf.WriteString("null") + case !key.IsKnown(): + buf.WriteString("(not yet known)") + case keyTy == cty.Number: + bf := key.AsBigFloat() + buf.WriteString(bf.Text('g', -1)) + case keyTy == cty.String: + buf.WriteString(strconv.Quote(key.AsString())) + default: + buf.WriteString("...") + } + buf.WriteByte(']') + } + } + return buf.String() +} + +// FormatError is a helper function to produce a user-friendly string +// representation of certain special error types that we might want to +// include in diagnostic messages. +// +// This currently has special behavior only for cty.PathError, where a +// non-empty path is rendered in a HCL-like syntax as context. +func FormatError(err error) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return err.Error() + } + + return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error()) +} + +// FormatErrorPrefixed is like FormatError except that it presents any path +// information after the given prefix string, which is assumed to contain +// an HCL syntax representation of the value that errors are relative to. +func FormatErrorPrefixed(err error, prefix string) string { + perr, ok := err.(cty.PathError) + if !ok || len(perr.Path) == 0 { + return fmt.Sprintf("%s: %s", prefix, err.Error()) + } + + return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error()) +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go new file mode 100644 index 00000000..25b21403 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go @@ -0,0 +1,372 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" +) + +// The "contextual" family of diagnostics are designed to allow separating +// the detection of a problem from placing that problem in context. For +// example, some code that is validating an object extracted from configuration +// may not have access to the configuration that generated it, but can still +// report problems within that object which the caller can then place in +// context by calling IsConfigBody on the returned diagnostics. +// +// When contextual diagnostics are used, the documentation for a method must +// be very explicit about what context is implied for any diagnostics returned, +// to help ensure the expected result. + +// contextualFromConfig is an interface type implemented by diagnostic types +// that can elaborate themselves when given information about the configuration +// body they are embedded in. +// +// Usually this entails extracting source location information in order to +// populate the "Subject" range. +type contextualFromConfigBody interface { + ElaborateFromConfigBody(hcl.Body) Diagnostic +} + +// InConfigBody returns a copy of the receiver with any config-contextual +// diagnostics elaborated in the context of the given body. +func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics { + if len(d) == 0 { + return nil + } + + ret := make(Diagnostics, len(d)) + for i, srcDiag := range d { + if cd, isCD := srcDiag.(contextualFromConfigBody); isCD { + ret[i] = cd.ElaborateFromConfigBody(body) + } else { + ret[i] = srcDiag + } + } + + return ret +} + +// AttributeValue returns a diagnostic about an attribute value in an implied current +// configuration context. This should be returned only from functions whose +// interface specifies a clear configuration context that this will be +// resolved in. +// +// The given path is relative to the implied configuration context. To describe +// a top-level attribute, it should be a single-element cty.Path with a +// cty.GetAttrStep. It's assumed that the path is returning into a structure +// that would be produced by our conventions in the configschema package; it +// may return unexpected results for structures that can't be represented by +// configschema. +// +// Since mapping attribute paths back onto configuration is an imprecise +// operation (e.g. dynamic block generation may cause the same block to be +// evaluated multiple times) the diagnostic detail should include the attribute +// name and other context required to help the user understand what is being +// referenced in case the identified source range is not unique. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is the value assigned to the +// named attribute, or the containing body's "missing item range" if no +// value is present. +func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic { + return &attributeDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + attrPath: attrPath, + } +} + +// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains +// one. Normally this is not accessed directly, and instead the config body is +// added to the Diagnostic to create a more complete message for the user. In +// some cases however, we may want to know just the name of the attribute that +// generated the Diagnostic message. +// This returns a nil cty.Path if it does not exist in the Diagnostic. +func GetAttribute(d Diagnostic) cty.Path { + if d, ok := d.(*attributeDiagnostic); ok { + return d.attrPath + } + return nil +} + +type attributeDiagnostic struct { + diagnosticBase + attrPath cty.Path + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +// ElaborateFromConfigBody finds the most accurate possible source location +// for a diagnostic's attribute path within the given body. +// +// Backing out from a path back to a source location is not always entirely +// possible because we lose some information in the decoding process, so +// if an exact position cannot be found then the returned diagnostic will +// refer to a position somewhere within the containing body, which is assumed +// to be better than no location at all. +// +// If possible it is generally better to report an error at a layer where +// source location information is still available, for more accuracy. This +// is not always possible due to system architecture, so this serves as a +// "best effort" fallback behavior for such situations. +func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { + if len(d.attrPath) < 1 { + // Should never happen, but we'll allow it rather than crashing. + return d + } + + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + + // This function will often end up re-decoding values that were already + // decoded by an earlier step. This is non-ideal but is architecturally + // more convenient than arranging for source location information to be + // propagated to every place in Terraform, and this happens only in the + // presence of errors where performance isn't a concern. + + traverse := d.attrPath[:] + final := d.attrPath[len(d.attrPath)-1] + + // Index should never be the first step + // as indexing of top blocks (such as resources & data sources) + // is handled elsewhere + if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep { + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + return &ret + } + + // Process index separately + idxStep, hasIdx := final.(cty.IndexStep) + if hasIdx { + final = d.attrPath[len(d.attrPath)-2] + traverse = d.attrPath[:len(d.attrPath)-1] + } + + // If we have more than one step after removing index + // then we'll first try to traverse to a child body + // corresponding to the requested path. + if len(traverse) > 1 { + body = traversePathSteps(traverse, body) + } + + // Default is to indicate a missing item in the deepest body we reached + // while traversing. + subject := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &subject + + // Once we get here, "final" should be a GetAttr step that maps to an + // attribute in our current body. + finalStep, isAttr := final.(cty.GetAttrStep) + if !isAttr { + return &ret + } + + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: finalStep.Name, + Required: true, + }, + }, + }) + if contentDiags.HasErrors() { + return &ret + } + + if attr, ok := content.Attributes[finalStep.Name]; ok { + hclRange := attr.Expr.Range() + if hasIdx { + // Try to be more precise by finding index range + hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr) + } + subject = SourceRangeFromHCL(hclRange) + ret.subject = &subject + } + + return &ret +} + +func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body { + for i := 0; i < len(traverse); i++ { + step := traverse[i] + + switch tStep := step.(type) { + case cty.GetAttrStep: + + var next cty.PathStep + if i < (len(traverse) - 1) { + next = traverse[i+1] + } + + // Will be indexing into our result here? + var indexType cty.Type + var indexVal cty.Value + if nextIndex, ok := next.(cty.IndexStep); ok { + indexVal = nextIndex.Key + indexType = indexVal.Type() + i++ // skip over the index on subsequent iterations + } + + var blockLabelNames []string + if indexType == cty.String { + // Map traversal means we expect one label for the key. + blockLabelNames = []string{"key"} + } + + // For intermediate steps we expect to be referring to a child + // block, so we'll attempt decoding under that assumption. + content, _, contentDiags := body.PartialContent(&hcl.BodySchema{ + Blocks: []hcl.BlockHeaderSchema{ + { + Type: tStep.Name, + LabelNames: blockLabelNames, + }, + }, + }) + if contentDiags.HasErrors() { + return body + } + filtered := make([]*hcl.Block, 0, len(content.Blocks)) + for _, block := range content.Blocks { + if block.Type == tStep.Name { + filtered = append(filtered, block) + } + } + if len(filtered) == 0 { + // Step doesn't refer to a block + continue + } + + switch indexType { + case cty.NilType: // no index at all + if len(filtered) != 1 { + return body + } + body = filtered[0].Body + case cty.Number: + var idx int + err := gocty.FromCtyValue(indexVal, &idx) + if err != nil || idx >= len(filtered) { + return body + } + body = filtered[idx].Body + case cty.String: + key := indexVal.AsString() + var block *hcl.Block + for _, candidate := range filtered { + if candidate.Labels[0] == key { + block = candidate + break + } + } + if block == nil { + // No block with this key, so we'll just indicate a + // missing item in the containing block. + return body + } + body = block.Body + default: + // Should never happen, because only string and numeric indices + // are supported by cty collections. + return body + } + + default: + // For any other kind of step, we'll just return our current body + // as the subject and accept that this is a little inaccurate. + return body + } + } + return body +} + +func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range { + switch idxStep.Key.Type() { + case cty.Number: + var idx int + err := gocty.FromCtyValue(idxStep.Key, &idx) + items, diags := hcl.ExprList(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + if err != nil || idx >= len(items) { + return attr.NameRange + } + return items[idx].Range() + case cty.String: + pairs, diags := hcl.ExprMap(attr.Expr) + if diags.HasErrors() { + return attr.Expr.Range() + } + stepKey := idxStep.Key.AsString() + for _, kvPair := range pairs { + key, err := kvPair.Key.Value(nil) + if err != nil { + return attr.Expr.Range() + } + if key.AsString() == stepKey { + startRng := kvPair.Value.StartRange() + return startRng + } + } + return attr.NameRange + } + return attr.Expr.Range() +} + +func (d *attributeDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} + +// WholeContainingBody returns a diagnostic about the body that is an implied +// current configuration context. This should be returned only from +// functions whose interface specifies a clear configuration context that this +// will be resolved in. +// +// The returned attribute will not have source location information until +// context is applied to the containing diagnostics using diags.InConfigBody. +// After context is applied, the source location is currently the missing item +// range of the body. In future, this may change to some other suitable +// part of the containing body. +func WholeContainingBody(severity Severity, summary, detail string) Diagnostic { + return &wholeBodyDiagnostic{ + diagnosticBase: diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + }, + } +} + +type wholeBodyDiagnostic struct { + diagnosticBase + subject *SourceRange // populated only after ElaborateFromConfigBody +} + +func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic { + if d.subject != nil { + // Don't modify an already-elaborated diagnostic. + return d + } + + ret := *d + rng := SourceRangeFromHCL(body.MissingItemRange()) + ret.subject = &rng + return &ret +} + +func (d *wholeBodyDiagnostic) Source() Source { + return Source{ + Subject: d.subject, + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go new file mode 100644 index 00000000..c91ba9a5 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go @@ -0,0 +1,40 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +type Diagnostic interface { + Severity() Severity + Description() Description + Source() Source + + // FromExpr returns the expression-related context for the diagnostic, if + // available. Returns nil if the diagnostic is not related to an + // expression evaluation. + FromExpr() *FromExpr +} + +type Severity rune + +//go:generate stringer -type=Severity + +const ( + Error Severity = 'E' + Warning Severity = 'W' +) + +type Description struct { + Summary string + Detail string +} + +type Source struct { + Subject *SourceRange + Context *SourceRange +} + +type FromExpr struct { + Expression hcl.Expression + EvalContext *hcl.EvalContext +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go new file mode 100644 index 00000000..50bf9d8e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go @@ -0,0 +1,31 @@ +package tfdiags + +// diagnosticBase can be embedded in other diagnostic structs to get +// default implementations of Severity and Description. This type also +// has default implementations of Source and FromExpr that return no source +// location or expression-related information, so embedders should generally +// override those method to return more useful results where possible. +type diagnosticBase struct { + severity Severity + summary string + detail string +} + +func (d diagnosticBase) Severity() Severity { + return d.severity +} + +func (d diagnosticBase) Description() Description { + return Description{ + Summary: d.summary, + Detail: d.detail, + } +} + +func (d diagnosticBase) Source() Source { + return Source{} +} + +func (d diagnosticBase) FromExpr() *FromExpr { + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go new file mode 100644 index 00000000..465b230f --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go @@ -0,0 +1,330 @@ +package tfdiags + +import ( + "bytes" + "fmt" + "path/filepath" + "sort" + "strings" + + "github.com/hashicorp/errwrap" + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl2/hcl" +) + +// Diagnostics is a list of diagnostics. Diagnostics is intended to be used +// where a Go "error" might normally be used, allowing richer information +// to be conveyed (more context, support for warnings). +// +// A nil Diagnostics is a valid, empty diagnostics list, thus allowing +// heap allocation to be avoided in the common case where there are no +// diagnostics to report at all. +type Diagnostics []Diagnostic + +// Append is the main interface for constructing Diagnostics lists, taking +// an existing list (which may be nil) and appending the new objects to it +// after normalizing them to be implementations of Diagnostic. +// +// The usual pattern for a function that natively "speaks" diagnostics is: +// +// // Create a nil Diagnostics at the start of the function +// var diags diag.Diagnostics +// +// // At later points, build on it if errors / warnings occur: +// foo, err := DoSomethingRisky() +// if err != nil { +// diags = diags.Append(err) +// } +// +// // Eventually return the result and diagnostics in place of error +// return result, diags +// +// Append accepts a variety of different diagnostic-like types, including +// native Go errors and HCL diagnostics. It also knows how to unwrap +// a multierror.Error into separate error diagnostics. It can be passed +// another Diagnostics to concatenate the two lists. If given something +// it cannot handle, this function will panic. +func (diags Diagnostics) Append(new ...interface{}) Diagnostics { + for _, item := range new { + if item == nil { + continue + } + + switch ti := item.(type) { + case Diagnostic: + diags = append(diags, ti) + case Diagnostics: + diags = append(diags, ti...) // flatten + case diagnosticsAsError: + diags = diags.Append(ti.Diagnostics) // unwrap + case NonFatalError: + diags = diags.Append(ti.Diagnostics) // unwrap + case hcl.Diagnostics: + for _, hclDiag := range ti { + diags = append(diags, hclDiagnostic{hclDiag}) + } + case *hcl.Diagnostic: + diags = append(diags, hclDiagnostic{ti}) + case *multierror.Error: + for _, err := range ti.Errors { + diags = append(diags, nativeError{err}) + } + case error: + switch { + case errwrap.ContainsType(ti, Diagnostics(nil)): + // If we have an errwrap wrapper with a Diagnostics hiding + // inside then we'll unpick it here to get access to the + // individual diagnostics. + diags = diags.Append(errwrap.GetType(ti, Diagnostics(nil))) + case errwrap.ContainsType(ti, hcl.Diagnostics(nil)): + // Likewise, if we have HCL diagnostics we'll unpick that too. + diags = diags.Append(errwrap.GetType(ti, hcl.Diagnostics(nil))) + default: + diags = append(diags, nativeError{ti}) + } + default: + panic(fmt.Errorf("can't construct diagnostic(s) from %T", item)) + } + } + + // Given the above, we should never end up with a non-nil empty slice + // here, but we'll make sure of that so callers can rely on empty == nil + if len(diags) == 0 { + return nil + } + + return diags +} + +// HasErrors returns true if any of the diagnostics in the list have +// a severity of Error. +func (diags Diagnostics) HasErrors() bool { + for _, diag := range diags { + if diag.Severity() == Error { + return true + } + } + return false +} + +// ForRPC returns a version of the receiver that has been simplified so that +// it is friendly to RPC protocols. +// +// Currently this means that it can be serialized with encoding/gob and +// subsequently re-inflated. It may later grow to include other serialization +// formats. +// +// Note that this loses information about the original objects used to +// construct the diagnostics, so e.g. the errwrap API will not work as +// expected on an error-wrapped Diagnostics that came from ForRPC. +func (diags Diagnostics) ForRPC() Diagnostics { + ret := make(Diagnostics, len(diags)) + for i := range diags { + ret[i] = makeRPCFriendlyDiag(diags[i]) + } + return ret +} + +// Err flattens a diagnostics list into a single Go error, or to nil +// if the diagnostics list does not include any error-level diagnostics. +// +// This can be used to smuggle diagnostics through an API that deals in +// native errors, but unfortunately it will lose naked warnings (warnings +// that aren't accompanied by at least one error) since such APIs have no +// mechanism through which to report these. +// +// return result, diags.Error() +func (diags Diagnostics) Err() error { + if !diags.HasErrors() { + return nil + } + return diagnosticsAsError{diags} +} + +// ErrWithWarnings is similar to Err except that it will also return a non-nil +// error if the receiver contains only warnings. +// +// In the warnings-only situation, the result is guaranteed to be of dynamic +// type NonFatalError, allowing diagnostics-aware callers to type-assert +// and unwrap it, treating it as non-fatal. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) ErrWithWarnings() error { + if len(diags) == 0 { + return nil + } + if diags.HasErrors() { + return diags.Err() + } + return NonFatalError{diags} +} + +// NonFatalErr is similar to Err except that it always returns either nil +// (if there are no diagnostics at all) or NonFatalError. +// +// This allows diagnostics to be returned over an error return channel while +// being explicit that the diagnostics should not halt processing. +// +// This should be used only in contexts where the caller is able to recognize +// and handle NonFatalError. For normal callers that expect a lack of errors +// to be signaled by nil, use just Diagnostics.Err. +func (diags Diagnostics) NonFatalErr() error { + if len(diags) == 0 { + return nil + } + return NonFatalError{diags} +} + +// Sort applies an ordering to the diagnostics in the receiver in-place. +// +// The ordering is: warnings before errors, sourceless before sourced, +// short source paths before long source paths, and then ordering by +// position within each file. +// +// Diagnostics that do not differ by any of these sortable characteristics +// will remain in the same relative order after this method returns. +func (diags Diagnostics) Sort() { + sort.Stable(sortDiagnostics(diags)) +} + +type diagnosticsAsError struct { + Diagnostics +} + +func (dae diagnosticsAsError) Error() string { + diags := dae.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + for _, diag := range dae.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// WrappedErrors is an implementation of errwrap.Wrapper so that an error-wrapped +// diagnostics object can be picked apart by errwrap-aware code. +func (dae diagnosticsAsError) WrappedErrors() []error { + var errs []error + for _, diag := range dae.Diagnostics { + if wrapper, isErr := diag.(nativeError); isErr { + errs = append(errs, wrapper.err) + } + } + return errs +} + +// NonFatalError is a special error type, returned by +// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr, +// that indicates that the wrapped diagnostics should be treated as non-fatal. +// Callers can conditionally type-assert an error to this type in order to +// detect the non-fatal scenario and handle it in a different way. +type NonFatalError struct { + Diagnostics +} + +func (woe NonFatalError) Error() string { + diags := woe.Diagnostics + switch { + case len(diags) == 0: + // should never happen, since we don't create this wrapper if + // there are no diagnostics in the list. + return "no errors or warnings" + case len(diags) == 1: + desc := diags[0].Description() + if desc.Detail == "" { + return desc.Summary + } + return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail) + default: + var ret bytes.Buffer + if diags.HasErrors() { + fmt.Fprintf(&ret, "%d problems:\n", len(diags)) + } else { + fmt.Fprintf(&ret, "%d warnings:\n", len(diags)) + } + for _, diag := range woe.Diagnostics { + desc := diag.Description() + if desc.Detail == "" { + fmt.Fprintf(&ret, "\n- %s", desc.Summary) + } else { + fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail) + } + } + return ret.String() + } +} + +// sortDiagnostics is an implementation of sort.Interface +type sortDiagnostics []Diagnostic + +var _ sort.Interface = sortDiagnostics(nil) + +func (sd sortDiagnostics) Len() int { + return len(sd) +} + +func (sd sortDiagnostics) Less(i, j int) bool { + iD, jD := sd[i], sd[j] + iSev, jSev := iD.Severity(), jD.Severity() + iSrc, jSrc := iD.Source(), jD.Source() + + switch { + + case iSev != jSev: + return iSev == Warning + + case (iSrc.Subject == nil) != (jSrc.Subject == nil): + return iSrc.Subject == nil + + case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject: + iSubj := iSrc.Subject + jSubj := jSrc.Subject + switch { + case iSubj.Filename != jSubj.Filename: + // Path with fewer segments goes first if they are different lengths + sep := string(filepath.Separator) + iCount := strings.Count(iSubj.Filename, sep) + jCount := strings.Count(jSubj.Filename, sep) + if iCount != jCount { + return iCount < jCount + } + return iSubj.Filename < jSubj.Filename + case iSubj.Start.Byte != jSubj.Start.Byte: + return iSubj.Start.Byte < jSubj.Start.Byte + case iSubj.End.Byte != jSubj.End.Byte: + return iSubj.End.Byte < jSubj.End.Byte + } + fallthrough + + default: + // The remaining properties do not have a defined ordering, so + // we'll leave it unspecified. Since we use sort.Stable in + // the caller of this, the ordering of remaining items will + // be preserved. + return false + } +} + +func (sd sortDiagnostics) Swap(i, j int) { + sd[i], sd[j] = sd[j], sd[i] +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/doc.go b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go new file mode 100644 index 00000000..c427879e --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/doc.go @@ -0,0 +1,16 @@ +// Package tfdiags is a utility package for representing errors and +// warnings in a manner that allows us to produce good messages for the +// user. +// +// "diag" is short for "diagnostics", and is meant as a general word for +// feedback to a user about potential or actual problems. +// +// A design goal for this package is for it to be able to provide rich +// messaging where possible but to also be pragmatic about dealing with +// generic errors produced by system components that _can't_ provide +// such rich messaging. As a consequence, the main types in this package -- +// Diagnostics and Diagnostic -- are designed so that they can be "smuggled" +// over an error channel and then be unpacked at the other end, so that +// error diagnostics (at least) can transit through APIs that are not +// aware of this package. +package tfdiags diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go new file mode 100644 index 00000000..13f7a714 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/error.go @@ -0,0 +1,28 @@ +package tfdiags + +// nativeError is a Diagnostic implementation that wraps a normal Go error +type nativeError struct { + err error +} + +var _ Diagnostic = nativeError{} + +func (e nativeError) Severity() Severity { + return Error +} + +func (e nativeError) Description() Description { + return Description{ + Summary: FormatError(e.err), + } +} + +func (e nativeError) Source() Source { + // No source information available for a native error + return Source{} +} + +func (e nativeError) FromExpr() *FromExpr { + // Native errors are not expression-related + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go new file mode 100644 index 00000000..f9aec41c --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go @@ -0,0 +1,87 @@ +package tfdiags + +import ( + "github.com/hashicorp/hcl2/hcl" +) + +// hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic +type hclDiagnostic struct { + diag *hcl.Diagnostic +} + +var _ Diagnostic = hclDiagnostic{} + +func (d hclDiagnostic) Severity() Severity { + switch d.diag.Severity { + case hcl.DiagWarning: + return Warning + default: + return Error + } +} + +func (d hclDiagnostic) Description() Description { + return Description{ + Summary: d.diag.Summary, + Detail: d.diag.Detail, + } +} + +func (d hclDiagnostic) Source() Source { + var ret Source + if d.diag.Subject != nil { + rng := SourceRangeFromHCL(*d.diag.Subject) + ret.Subject = &rng + } + if d.diag.Context != nil { + rng := SourceRangeFromHCL(*d.diag.Context) + ret.Context = &rng + } + return ret +} + +func (d hclDiagnostic) FromExpr() *FromExpr { + if d.diag.Expression == nil || d.diag.EvalContext == nil { + return nil + } + return &FromExpr{ + Expression: d.diag.Expression, + EvalContext: d.diag.EvalContext, + } +} + +// SourceRangeFromHCL constructs a SourceRange from the corresponding range +// type within the HCL package. +func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { + return SourceRange{ + Filename: hclRange.Filename, + Start: SourcePos{ + Line: hclRange.Start.Line, + Column: hclRange.Start.Column, + Byte: hclRange.Start.Byte, + }, + End: SourcePos{ + Line: hclRange.End.Line, + Column: hclRange.End.Column, + Byte: hclRange.End.Byte, + }, + } +} + +// ToHCL constructs a HCL Range from the receiving SourceRange. This is the +// opposite of SourceRangeFromHCL. +func (r SourceRange) ToHCL() hcl.Range { + return hcl.Range{ + Filename: r.Filename, + Start: hcl.Pos{ + Line: r.Start.Line, + Column: r.Start.Column, + Byte: r.Start.Byte, + }, + End: hcl.Pos{ + Line: r.End.Line, + Column: r.End.Column, + Byte: r.End.Byte, + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go new file mode 100644 index 00000000..485063b0 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go @@ -0,0 +1,59 @@ +package tfdiags + +import ( + "encoding/gob" +) + +type rpcFriendlyDiag struct { + Severity_ Severity + Summary_ string + Detail_ string + Subject_ *SourceRange + Context_ *SourceRange +} + +// rpcFriendlyDiag transforms a given diagnostic so that is more friendly to +// RPC. +// +// In particular, it currently returns an object that can be serialized and +// later re-inflated using gob. This definition may grow to include other +// serializations later. +func makeRPCFriendlyDiag(diag Diagnostic) Diagnostic { + desc := diag.Description() + source := diag.Source() + return &rpcFriendlyDiag{ + Severity_: diag.Severity(), + Summary_: desc.Summary, + Detail_: desc.Detail, + Subject_: source.Subject, + Context_: source.Context, + } +} + +func (d *rpcFriendlyDiag) Severity() Severity { + return d.Severity_ +} + +func (d *rpcFriendlyDiag) Description() Description { + return Description{ + Summary: d.Summary_, + Detail: d.Detail_, + } +} + +func (d *rpcFriendlyDiag) Source() Source { + return Source{ + Subject: d.Subject_, + Context: d.Context_, + } +} + +func (d rpcFriendlyDiag) FromExpr() *FromExpr { + // RPC-friendly diagnostics cannot preserve expression information because + // expressions themselves are not RPC-friendly. + return nil +} + +func init() { + gob.Register((*rpcFriendlyDiag)(nil)) +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go new file mode 100644 index 00000000..78a72106 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go @@ -0,0 +1,29 @@ +// Code generated by "stringer -type=Severity"; DO NOT EDIT. + +package tfdiags + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Error-69] + _ = x[Warning-87] +} + +const ( + _Severity_name_0 = "Error" + _Severity_name_1 = "Warning" +) + +func (i Severity) String() string { + switch { + case i == 69: + return _Severity_name_0 + case i == 87: + return _Severity_name_1 + default: + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go new file mode 100644 index 00000000..b0f1ecd4 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go @@ -0,0 +1,30 @@ +package tfdiags + +type simpleWarning string + +var _ Diagnostic = simpleWarning("") + +// SimpleWarning constructs a simple (summary-only) warning diagnostic. +func SimpleWarning(msg string) Diagnostic { + return simpleWarning(msg) +} + +func (e simpleWarning) Severity() Severity { + return Warning +} + +func (e simpleWarning) Description() Description { + return Description{ + Summary: string(e), + } +} + +func (e simpleWarning) Source() Source { + // No source information available for a simple warning + return Source{} +} + +func (e simpleWarning) FromExpr() *FromExpr { + // Simple warnings are not expression-related + return nil +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go new file mode 100644 index 00000000..3031168d --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/source_range.go @@ -0,0 +1,35 @@ +package tfdiags + +import ( + "fmt" + "os" + "path/filepath" +) + +type SourceRange struct { + Filename string + Start, End SourcePos +} + +type SourcePos struct { + Line, Column, Byte int +} + +// StartString returns a string representation of the start of the range, +// including the filename and the line and column numbers. +func (r SourceRange) StartString() string { + filename := r.Filename + + // We'll try to relative-ize our filename here so it's less verbose + // in the common case of being in the current working directory. If not, + // we'll just show the full path. + wd, err := os.Getwd() + if err == nil { + relFn, err := filepath.Rel(wd, filename) + if err == nil { + filename = relFn + } + } + + return fmt.Sprintf("%s:%d,%d", filename, r.Start.Line, r.Start.Column) +} diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go new file mode 100644 index 00000000..eaa27373 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go @@ -0,0 +1,13 @@ +package tfdiags + +// Sourceless creates and returns a diagnostic with no source location +// information. This is generally used for operational-type errors that are +// caused by or relate to the environment where Terraform is running rather +// than to the provided configuration. +func Sourceless(severity Severity, summary, detail string) Diagnostic { + return diagnosticBase{ + severity: severity, + summary: summary, + detail: detail, + } +} diff --git a/vendor/github.com/hashicorp/terraform/version.go b/vendor/github.com/hashicorp/terraform/version.go index 4959e0fe..36d16cb2 100644 --- a/vendor/github.com/hashicorp/terraform/version.go +++ b/vendor/github.com/hashicorp/terraform/version.go @@ -1,10 +1,12 @@ package main -import "github.com/hashicorp/terraform/terraform" +import ( + "github.com/hashicorp/terraform/version" +) // The git commit that was compiled. This will be filled in by the compiler. var GitCommit string -const Version = terraform.Version +var Version = version.Version -var VersionPrerelease = terraform.VersionPrerelease +var VersionPrerelease = version.Prerelease diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go new file mode 100644 index 00000000..1c07e272 --- /dev/null +++ b/vendor/github.com/hashicorp/terraform/version/version.go @@ -0,0 +1,40 @@ +// The version package provides a location to set the release versions for all +// packages to consume, without creating import cycles. +// +// This package should not import any other terraform packages. +package version + +import ( + "fmt" + + version "github.com/hashicorp/go-version" +) + +// The main version number that is being run at the moment. +var Version = "0.12.4" + +// A pre-release marker for the version. If this is "" (empty string) +// then it means that it is a final release. Otherwise, this is a pre-release +// such as "dev" (in development), "beta", "rc1", etc. +var Prerelease = "dev" + +// SemVer is an instance of version.Version. This has the secondary +// benefit of verifying during tests and init time that our version is a +// proper semantic version, which should always be the case. +var SemVer *version.Version + +func init() { + SemVer = version.Must(version.NewVersion(Version)) +} + +// Header is the header name used to send the current terraform version +// in http requests. +const Header = "Terraform-Version" + +// String returns the complete version string, including prerelease +func String() string { + if Prerelease != "" { + return fmt.Sprintf("%s-%s", Version, Prerelease) + } + return Version +} diff --git a/vendor/github.com/hashicorp/vault/LICENSE b/vendor/github.com/hashicorp/vault/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/api/auth.go b/vendor/github.com/hashicorp/vault/api/auth.go new file mode 100644 index 00000000..da870c11 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth.go @@ -0,0 +1,11 @@ +package api + +// Auth is used to perform credential backend related operations. +type Auth struct { + c *Client +} + +// Auth is used to return the client for credential-backend API calls. +func (c *Client) Auth() *Auth { + return &Auth{c: c} +} diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go new file mode 100644 index 00000000..6807c89c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -0,0 +1,276 @@ +package api + +import "context" + +// TokenAuth is used to perform token backend operations on Vault +type TokenAuth struct { + c *Client +} + +// Token is used to return the client for token-backend API calls +func (a *Auth) Token() *TokenAuth { + return &TokenAuth{c: a.c} +} + +func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/create") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/create-orphan") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/create/"+roleName) + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Lookup(token string) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/lookup") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { + r := c.c.NewRequest("POST", "/v1/auth/token/lookup-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) LookupSelf() (*Secret, error) { + r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + "increment": increment, + }); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RenewTokenAsSelf behaves like renew-self, but authenticates using a provided +// token instead of the token attached to the client. +func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self") + r.ClientToken = token + + body := map[string]interface{}{"increment": increment} + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// RevokeAccessor revokes a token associated with the given accessor +// along with all the child tokens. +func (c *TokenAuth) RevokeAccessor(accessor string) error { + r := c.c.NewRequest("POST", "/v1/auth/token/revoke-accessor") + if err := r.SetJSONBody(map[string]interface{}{ + "accessor": accessor, + }); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeOrphan revokes a token without revoking the tree underneath it (so +// child tokens are orphaned rather than revoked) +func (c *TokenAuth) RevokeOrphan(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-orphan") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeSelf revokes the token making the call. The `token` parameter is kept +// for backwards compatibility but is ignored; only the client's set token has +// an effect. +func (c *TokenAuth) RevokeSelf(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// RevokeTree is the "normal" revoke operation that revokes the given token and +// the entire tree underneath -- all of its child tokens, their child tokens, +// etc. +func (c *TokenAuth) RevokeTree(token string) error { + r := c.c.NewRequest("PUT", "/v1/auth/token/revoke") + if err := r.SetJSONBody(map[string]interface{}{ + "token": token, + }); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +// TokenCreateRequest is the options structure for creating a token. +type TokenCreateRequest struct { + ID string `json:"id,omitempty"` + Policies []string `json:"policies,omitempty"` + Metadata map[string]string `json:"meta,omitempty"` + Lease string `json:"lease,omitempty"` + TTL string `json:"ttl,omitempty"` + ExplicitMaxTTL string `json:"explicit_max_ttl,omitempty"` + Period string `json:"period,omitempty"` + NoParent bool `json:"no_parent,omitempty"` + NoDefaultPolicy bool `json:"no_default_policy,omitempty"` + DisplayName string `json:"display_name"` + NumUses int `json:"num_uses"` + Renewable *bool `json:"renewable,omitempty"` + Type string `json:"type"` + EntityAlias string `json:"entity_alias"` +} diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go new file mode 100644 index 00000000..f1f076a9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -0,0 +1,837 @@ +package api + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + retryablehttp "github.com/hashicorp/go-retryablehttp" + rootcerts "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/helper/parseutil" + "golang.org/x/net/http2" + "golang.org/x/time/rate" +) + +const EnvVaultAddress = "VAULT_ADDR" +const EnvVaultAgentAddr = "VAULT_AGENT_ADDR" +const EnvVaultCACert = "VAULT_CACERT" +const EnvVaultCAPath = "VAULT_CAPATH" +const EnvVaultClientCert = "VAULT_CLIENT_CERT" +const EnvVaultClientKey = "VAULT_CLIENT_KEY" +const EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT" +const EnvVaultSkipVerify = "VAULT_SKIP_VERIFY" +const EnvVaultNamespace = "VAULT_NAMESPACE" +const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME" +const EnvVaultWrapTTL = "VAULT_WRAP_TTL" +const EnvVaultMaxRetries = "VAULT_MAX_RETRIES" +const EnvVaultToken = "VAULT_TOKEN" +const EnvVaultMFA = "VAULT_MFA" +const EnvRateLimit = "VAULT_RATE_LIMIT" + +// WrappingLookupFunc is a function that, given an HTTP verb and a path, +// returns an optional string duration to be used for response wrapping (e.g. +// "15s", or simply "15"). The path will not begin with "/v1/" or "v1/" or "/", +// however, end-of-path forward slashes are not trimmed, so must match your +// called path precisely. +type WrappingLookupFunc func(operation, path string) string + +// Config is used to configure the creation of the client. +type Config struct { + modifyLock sync.RWMutex + + // Address is the address of the Vault server. This should be a complete + // URL such as "http://vault.example.com". If you need a custom SSL + // cert or want to enable insecure mode, you need to specify a custom + // HttpClient. + Address string + + // AgentAddress is the address of the local Vault agent. This should be a + // complete URL such as "http://vault.example.com". + AgentAddress string + + // HttpClient is the HTTP client to use. Vault sets sane defaults for the + // http.Client and its associated http.Transport created in DefaultConfig. + // If you must modify Vault's defaults, it is suggested that you start with + // that client and modify as needed rather than start with an empty client + // (or http.DefaultClient). + HttpClient *http.Client + + // MaxRetries controls the maximum number of times to retry when a 5xx + // error occurs. Set to 0 to disable retrying. Defaults to 2 (for a total + // of three tries). + MaxRetries int + + // Timeout is for setting custom timeout parameter in the HttpClient + Timeout time.Duration + + // If there is an error when creating the configuration, this will be the + // error + Error error + + // The Backoff function to use; a default is used if not provided + Backoff retryablehttp.Backoff + + // Limiter is the rate limiter used by the client. + // If this pointer is nil, then there will be no limit set. + // In contrast, if this pointer is set, even to an empty struct, + // then that limiter will be used. Note that an empty Limiter + // is equivalent blocking all events. + Limiter *rate.Limiter + + // OutputCurlString causes the actual request to return an error of type + // *OutputStringError. Type asserting the error message will allow + // fetching a cURL-compatible string for the operation. + // + // Note: It is not thread-safe to set this and make concurrent requests + // with the same client. Cloning a client will not clone this value. + OutputCurlString bool +} + +// TLSConfig contains the parameters needed to configure TLS on the HTTP client +// used to communicate with Vault. +type TLSConfig struct { + // CACert is the path to a PEM-encoded CA cert file to use to verify the + // Vault server SSL certificate. + CACert string + + // CAPath is the path to a directory of PEM-encoded CA cert files to verify + // the Vault server SSL certificate. + CAPath string + + // ClientCert is the path to the certificate for Vault communication + ClientCert string + + // ClientKey is the path to the private key for Vault communication + ClientKey string + + // TLSServerName, if set, is used to set the SNI host when connecting via + // TLS. + TLSServerName string + + // Insecure enables or disables SSL verification + Insecure bool +} + +// DefaultConfig returns a default configuration for the client. It is +// safe to modify the return value of this function. +// +// The default Address is https://127.0.0.1:8200, but this can be overridden by +// setting the `VAULT_ADDR` environment variable. +// +// If an error is encountered, this will return nil. +func DefaultConfig() *Config { + config := &Config{ + Address: "https://127.0.0.1:8200", + HttpClient: cleanhttp.DefaultPooledClient(), + } + config.HttpClient.Timeout = time.Second * 60 + + transport := config.HttpClient.Transport.(*http.Transport) + transport.TLSHandshakeTimeout = 10 * time.Second + transport.TLSClientConfig = &tls.Config{ + MinVersion: tls.VersionTLS12, + } + if err := http2.ConfigureTransport(transport); err != nil { + config.Error = err + return config + } + + if err := config.ReadEnvironment(); err != nil { + config.Error = err + return config + } + + // Ensure redirects are not automatically followed + // Note that this is sane for the API client as it has its own + // redirect handling logic (and thus also for command/meta), + // but in e.g. http_test actual redirect handling is necessary + config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // Returning this value causes the Go net library to not close the + // response body and to nil out the error. Otherwise retry clients may + // try three times on every redirect because it sees an error from this + // function (to prevent redirects) passing through to it. + return http.ErrUseLastResponse + } + + config.Backoff = retryablehttp.LinearJitterBackoff + config.MaxRetries = 2 + + return config +} + +// ConfigureTLS takes a set of TLS configurations and applies those to the the +// HTTP client. +func (c *Config) ConfigureTLS(t *TLSConfig) error { + if c.HttpClient == nil { + c.HttpClient = DefaultConfig().HttpClient + } + clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig + + var clientCert tls.Certificate + foundClientCert := false + + switch { + case t.ClientCert != "" && t.ClientKey != "": + var err error + clientCert, err = tls.LoadX509KeyPair(t.ClientCert, t.ClientKey) + if err != nil { + return err + } + foundClientCert = true + case t.ClientCert != "" || t.ClientKey != "": + return fmt.Errorf("both client cert and client key must be provided") + } + + if t.CACert != "" || t.CAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: t.CACert, + CAPath: t.CAPath, + } + if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil { + return err + } + } + + if t.Insecure { + clientTLSConfig.InsecureSkipVerify = true + } + + if foundClientCert { + // We use this function to ignore the server's preferential list of + // CAs, otherwise any CA used for the cert auth backend must be in the + // server's CA pool + clientTLSConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + return &clientCert, nil + } + } + + if t.TLSServerName != "" { + clientTLSConfig.ServerName = t.TLSServerName + } + + return nil +} + +// ReadEnvironment reads configuration information from the environment. If +// there is an error, no configuration value is updated. +func (c *Config) ReadEnvironment() error { + var envAddress string + var envAgentAddress string + var envCACert string + var envCAPath string + var envClientCert string + var envClientKey string + var envClientTimeout time.Duration + var envInsecure bool + var envTLSServerName string + var envMaxRetries *uint64 + var limit *rate.Limiter + + // Parse the environment variables + if v := os.Getenv(EnvVaultAddress); v != "" { + envAddress = v + } + if v := os.Getenv(EnvVaultAgentAddr); v != "" { + envAgentAddress = v + } + if v := os.Getenv(EnvVaultMaxRetries); v != "" { + maxRetries, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return err + } + envMaxRetries = &maxRetries + } + if v := os.Getenv(EnvVaultCACert); v != "" { + envCACert = v + } + if v := os.Getenv(EnvVaultCAPath); v != "" { + envCAPath = v + } + if v := os.Getenv(EnvVaultClientCert); v != "" { + envClientCert = v + } + if v := os.Getenv(EnvVaultClientKey); v != "" { + envClientKey = v + } + if v := os.Getenv(EnvRateLimit); v != "" { + rateLimit, burstLimit, err := parseRateLimit(v) + if err != nil { + return err + } + limit = rate.NewLimiter(rate.Limit(rateLimit), burstLimit) + } + if t := os.Getenv(EnvVaultClientTimeout); t != "" { + clientTimeout, err := parseutil.ParseDurationSecond(t) + if err != nil { + return fmt.Errorf("could not parse %q", EnvVaultClientTimeout) + } + envClientTimeout = clientTimeout + } + if v := os.Getenv(EnvVaultSkipVerify); v != "" { + var err error + envInsecure, err = strconv.ParseBool(v) + if err != nil { + return fmt.Errorf("could not parse VAULT_SKIP_VERIFY") + } + } + if v := os.Getenv(EnvVaultTLSServerName); v != "" { + envTLSServerName = v + } + + // Configure the HTTP clients TLS configuration. + t := &TLSConfig{ + CACert: envCACert, + CAPath: envCAPath, + ClientCert: envClientCert, + ClientKey: envClientKey, + TLSServerName: envTLSServerName, + Insecure: envInsecure, + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.Limiter = limit + + if err := c.ConfigureTLS(t); err != nil { + return err + } + + if envAddress != "" { + c.Address = envAddress + } + + if envAgentAddress != "" { + c.AgentAddress = envAgentAddress + } + + if envMaxRetries != nil { + c.MaxRetries = int(*envMaxRetries) + } + + if envClientTimeout != 0 { + c.Timeout = envClientTimeout + } + + return nil +} + +func parseRateLimit(val string) (rate float64, burst int, err error) { + + _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) + if err != nil { + rate, err = strconv.ParseFloat(val, 64) + if err != nil { + err = fmt.Errorf("%v was provided but incorrectly formatted", EnvRateLimit) + } + burst = int(rate) + } + + return rate, burst, err + +} + +// Client is the client to the Vault API. Create a client with NewClient. +type Client struct { + modifyLock sync.RWMutex + addr *url.URL + config *Config + token string + headers http.Header + wrappingLookupFunc WrappingLookupFunc + mfaCreds []string + policyOverride bool +} + +// NewClient returns a new client for the given configuration. +// +// If the configuration is nil, Vault will use configuration from +// DefaultConfig(), which is the recommended starting configuration. +// +// If the environment variable `VAULT_TOKEN` is present, the token will be +// automatically added to the client. Otherwise, you must manually call +// `SetToken()`. +func NewClient(c *Config) (*Client, error) { + def := DefaultConfig() + if def == nil { + return nil, fmt.Errorf("could not create/read default configuration") + } + if def.Error != nil { + return nil, errwrap.Wrapf("error encountered setting up default configuration: {{err}}", def.Error) + } + + if c == nil { + c = def + } + + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if c.HttpClient == nil { + c.HttpClient = def.HttpClient + } + if c.HttpClient.Transport == nil { + c.HttpClient.Transport = def.HttpClient.Transport + } + + address := c.Address + if c.AgentAddress != "" { + address = c.AgentAddress + } + + u, err := url.Parse(address) + if err != nil { + return nil, err + } + + if strings.HasPrefix(address, "unix://") { + socket := strings.TrimPrefix(address, "unix://") + transport := c.HttpClient.Transport.(*http.Transport) + transport.DialContext = func(context.Context, string, string) (net.Conn, error) { + return net.Dial("unix", socket) + } + + // Since the address points to a unix domain socket, the scheme in the + // *URL would be set to `unix`. The *URL in the client is expected to + // be pointing to the protocol used in the application layer and not to + // the transport layer. Hence, setting the fields accordingly. + u.Scheme = "http" + u.Host = socket + u.Path = "" + } + + client := &Client{ + addr: u, + config: c, + } + + if token := os.Getenv(EnvVaultToken); token != "" { + client.token = token + } + + if namespace := os.Getenv(EnvVaultNamespace); namespace != "" { + client.setNamespace(namespace) + } + + return client, nil +} + +// Sets the address of Vault in the client. The format of address should be +// "://:". Setting this on a client will override the +// value of VAULT_ADDR environment variable. +func (c *Client) SetAddress(addr string) error { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + parsedAddr, err := url.Parse(addr) + if err != nil { + return errwrap.Wrapf("failed to set address: {{err}}", err) + } + + c.addr = parsedAddr + return nil +} + +// Address returns the Vault URL the client is configured to connect to +func (c *Client) Address() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + return c.addr.String() +} + +// SetLimiter will set the rate limiter for this client. +// This method is thread-safe. +// rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter +func (c *Client) SetLimiter(rateLimit float64, burst int) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + + c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst) +} + +// SetMaxRetries sets the number of retries that will be used in the case of certain errors +func (c *Client) SetMaxRetries(retries int) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + + c.config.MaxRetries = retries +} + +// SetClientTimeout sets the client request timeout +func (c *Client) SetClientTimeout(timeout time.Duration) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + + c.config.Timeout = timeout +} + +func (c *Client) OutputCurlString() bool { + c.modifyLock.RLock() + c.config.modifyLock.RLock() + defer c.config.modifyLock.RUnlock() + c.modifyLock.RUnlock() + + return c.config.OutputCurlString +} + +func (c *Client) SetOutputCurlString(curl bool) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + + c.config.OutputCurlString = curl +} + +// CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path +func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + return c.wrappingLookupFunc +} + +// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path +func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.wrappingLookupFunc = lookupFunc +} + +// SetMFACreds sets the MFA credentials supplied either via the environment +// variable or via the command line. +func (c *Client) SetMFACreds(creds []string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.mfaCreds = creds +} + +// SetNamespace sets the namespace supplied either via the environment +// variable or via the command line. +func (c *Client) SetNamespace(namespace string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + c.setNamespace(namespace) +} + +func (c *Client) setNamespace(namespace string) { + if c.headers == nil { + c.headers = make(http.Header) + } + + c.headers.Set(consts.NamespaceHeaderName, namespace) +} + +// Token returns the access token being used by this client. It will +// return the empty string if there is no token set. +func (c *Client) Token() string { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + return c.token +} + +// SetToken sets the token directly. This won't perform any auth +// verification, it simply sets the token properly for future requests. +func (c *Client) SetToken(v string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.token = v +} + +// ClearToken deletes the token if it is set or does nothing otherwise. +func (c *Client) ClearToken() { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.token = "" +} + +// Headers gets the current set of headers used for requests. This returns a +// copy; to modify it make modifications locally and use SetHeaders. +func (c *Client) Headers() http.Header { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.headers == nil { + return nil + } + + ret := make(http.Header) + for k, v := range c.headers { + for _, val := range v { + ret[k] = append(ret[k], val) + } + } + + return ret +} + +// SetHeaders sets the headers to be used for future requests. +func (c *Client) SetHeaders(headers http.Header) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.headers = headers +} + +// SetBackoff sets the backoff function to be used for future requests. +func (c *Client) SetBackoff(backoff retryablehttp.Backoff) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + + c.config.Backoff = backoff +} + +// Clone creates a new client with the same configuration. Note that the same +// underlying http.Client is used; modifying the client from more than one +// goroutine at once may not be safe, so modify the client as needed and then +// clone. +// +// Also, only the client's config is currently copied; this means items not in +// the api.Config struct, such as policy override and wrapping function +// behavior, must currently then be set as desired on the new client. +func (c *Client) Clone() (*Client, error) { + c.modifyLock.RLock() + c.config.modifyLock.RLock() + config := c.config + c.modifyLock.RUnlock() + + newConfig := &Config{ + Address: config.Address, + HttpClient: config.HttpClient, + MaxRetries: config.MaxRetries, + Timeout: config.Timeout, + Backoff: config.Backoff, + Limiter: config.Limiter, + } + config.modifyLock.RUnlock() + + return NewClient(newConfig) +} + +// SetPolicyOverride sets whether requests should be sent with the policy +// override flag to request overriding soft-mandatory Sentinel policies (both +// RGPs and EGPs) +func (c *Client) SetPolicyOverride(override bool) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + c.policyOverride = override +} + +// NewRequest creates a new raw request object to query the Vault server +// configured for this client. This is an advanced method and generally +// doesn't need to be called externally. +func (c *Client) NewRequest(method, requestPath string) *Request { + c.modifyLock.RLock() + addr := c.addr + token := c.token + mfaCreds := c.mfaCreds + wrappingLookupFunc := c.wrappingLookupFunc + headers := c.headers + policyOverride := c.policyOverride + c.modifyLock.RUnlock() + + // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV + // record and take the highest match; this is not designed for high-availability, just discovery + var host string = addr.Host + if addr.Port() == "" { + // Internet Draft specifies that the SRV record is ignored if a port is given + _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname()) + if err == nil && len(addrs) > 0 { + host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) + } + } + + req := &Request{ + Method: method, + URL: &url.URL{ + User: addr.User, + Scheme: addr.Scheme, + Host: host, + Path: path.Join(addr.Path, requestPath), + }, + ClientToken: token, + Params: make(map[string][]string), + } + + var lookupPath string + switch { + case strings.HasPrefix(requestPath, "/v1/"): + lookupPath = strings.TrimPrefix(requestPath, "/v1/") + case strings.HasPrefix(requestPath, "v1/"): + lookupPath = strings.TrimPrefix(requestPath, "v1/") + default: + lookupPath = requestPath + } + + req.MFAHeaderVals = mfaCreds + + if wrappingLookupFunc != nil { + req.WrapTTL = wrappingLookupFunc(method, lookupPath) + } else { + req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) + } + + if headers != nil { + req.Headers = headers + } + + req.PolicyOverride = policyOverride + + return req +} + +// RawRequest performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +func (c *Client) RawRequest(r *Request) (*Response, error) { + return c.RawRequestWithContext(context.Background(), r) +} + +// RawRequestWithContext performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { + c.modifyLock.RLock() + token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + maxRetries := c.config.MaxRetries + backoff := c.config.Backoff + httpClient := c.config.HttpClient + timeout := c.config.Timeout + outputCurlString := c.config.OutputCurlString + c.config.modifyLock.RUnlock() + + c.modifyLock.RUnlock() + + if limiter != nil { + limiter.Wait(ctx) + } + + // Sanity check the token before potentially erroring from the API + idx := strings.IndexFunc(token, func(c rune) bool { + return !unicode.IsPrint(c) + }) + if idx != -1 { + return nil, fmt.Errorf("configured Vault token contains non-printable characters and cannot be used") + } + + redirectCount := 0 +START: + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + if req == nil { + return nil, fmt.Errorf("nil request created") + } + + if outputCurlString { + LastOutputStringError = &OutputStringError{Request: req} + return nil, LastOutputStringError + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + req.Request = req.Request.WithContext(ctx) + + if backoff == nil { + backoff = retryablehttp.LinearJitterBackoff + } + + client := &retryablehttp.Client{ + HTTPClient: httpClient, + RetryWaitMin: 1000 * time.Millisecond, + RetryWaitMax: 1500 * time.Millisecond, + RetryMax: maxRetries, + CheckRetry: retryablehttp.DefaultRetryPolicy, + Backoff: backoff, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + } + + var result *Response + resp, err := client.Do(req) + if resp != nil { + result = &Response{Response: resp} + } + if err != nil { + if strings.Contains(err.Error(), "tls: oversized") { + err = errwrap.Wrapf( + "{{err}}\n\n"+ + "This error usually means that the server is running with TLS disabled\n"+ + "but the client is configured to use TLS. Please either enable TLS\n"+ + "on the server or run the client with -address set to an address\n"+ + "that uses the http protocol:\n\n"+ + " vault -address http://
\n\n"+ + "You can also set the VAULT_ADDR environment variable:\n\n\n"+ + " VAULT_ADDR=http://
vault \n\n"+ + "where
is replaced by the actual address to the server.", + err) + } + return result, err + } + + // Check for a redirect, only allowing for a single redirect + if (resp.StatusCode == 301 || resp.StatusCode == 302 || resp.StatusCode == 307) && redirectCount == 0 { + // Parse the updated location + respLoc, err := resp.Location() + if err != nil { + return result, err + } + + // Ensure a protocol downgrade doesn't happen + if req.URL.Scheme == "https" && respLoc.Scheme != "https" { + return result, fmt.Errorf("redirect would cause protocol downgrade") + } + + // Update the request + r.URL = respLoc + + // Reset the request body if any + if err := r.ResetJSONBody(); err != nil { + return result, err + } + + // Retry the request + redirectCount++ + goto START + } + + if err := result.Error(); err != nil { + return result, err + } + + return result, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/go.mod b/vendor/github.com/hashicorp/vault/api/go.mod new file mode 100644 index 00000000..db0b2a5a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/go.mod @@ -0,0 +1,19 @@ +module github.com/hashicorp/vault/api + +go 1.12 + +replace github.com/hashicorp/vault/sdk => ../sdk + +require ( + github.com/hashicorp/errwrap v1.0.0 + github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-multierror v1.0.0 + github.com/hashicorp/go-retryablehttp v0.5.4 + github.com/hashicorp/go-rootcerts v1.0.1 + github.com/hashicorp/hcl v1.0.0 + github.com/hashicorp/vault/sdk v0.1.12-0.20190709075428-f03d40b2913b + github.com/mitchellh/mapstructure v1.1.2 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 + gopkg.in/square/go-jose.v2 v2.3.1 +) diff --git a/vendor/github.com/hashicorp/vault/api/go.sum b/vendor/github.com/hashicorp/vault/api/go.sum new file mode 100644 index 00000000..4e55eb8c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/go.sum @@ -0,0 +1,111 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= +github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= +github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= +github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= +github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= +github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db h1:6/JqlYfC1CCaLnGceQTI+sDGhC9UBSPAsBqI0Gun6kU= +golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= +gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go new file mode 100644 index 00000000..321bd597 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/help.go @@ -0,0 +1,30 @@ +package api + +import ( + "context" + "fmt" +) + +// Help reads the help information for the given path. +func (c *Client) Help(path string) (*Help, error) { + r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path)) + r.Params.Add("help", "1") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result Help + err = resp.DecodeJSON(&result) + return &result, err +} + +type Help struct { + Help string `json:"help"` + SeeAlso []string `json:"see_also"` + OpenAPI map[string]interface{} `json:"openapi"` +} diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go new file mode 100644 index 00000000..d212066d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -0,0 +1,267 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "io" + "net/url" + "os" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +const ( + wrappedResponseLocation = "cubbyhole/response" +) + +var ( + // The default TTL that will be used with `sys/wrapping/wrap`, can be + // changed + DefaultWrappingTTL = "5m" + + // The default function used if no other function is set, which honors the + // env var and wraps `sys/wrapping/wrap` + DefaultWrappingLookupFunc = func(operation, path string) string { + if os.Getenv(EnvVaultWrapTTL) != "" { + return os.Getenv(EnvVaultWrapTTL) + } + + if (operation == "PUT" || operation == "POST") && path == "sys/wrapping/wrap" { + return DefaultWrappingTTL + } + + return "" + } +) + +// Logical is used to perform logical backend operations on Vault. +type Logical struct { + c *Client +} + +// Logical is used to return the client for logical-backend API calls. +func (c *Client) Logical() *Logical { + return &Logical{c: c} +} + +func (c *Logical) Read(path string) (*Secret, error) { + return c.ReadWithData(path, nil) +} + +func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, error) { + r := c.c.NewRequest("GET", "/v1/"+path) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) List(path string) (*Secret, error) { + r := c.c.NewRequest("LIST", "/v1/"+path) + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = "GET" + r.Params.Set("list", "true") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + return nil, nil + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/"+path) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Delete(path string) (*Secret, error) { + r := c.c.NewRequest("DELETE", "/v1/"+path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } + if err != nil { + return nil, err + } + + return ParseSecret(resp.Body) +} + +func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { + var data map[string]interface{} + if wrappingToken != "" { + if c.c.Token() == "" { + c.c.SetToken(wrappingToken) + } else if wrappingToken != c.c.Token() { + data = map[string]interface{}{ + "token": wrappingToken, + } + } + } + + r := c.c.NewRequest("PUT", "/v1/sys/wrapping/unwrap") + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + } + if resp == nil || resp.StatusCode != 404 { + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + return ParseSecret(resp.Body) + } + + // In the 404 case this may actually be a wrapped 404 error + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } + + // Otherwise this might be an old-style wrapping token so attempt the old + // method + if wrappingToken != "" { + origToken := c.c.Token() + defer c.c.SetToken(origToken) + c.c.SetToken(wrappingToken) + } + + secret, err = c.Read(wrappedResponseLocation) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err) + } + if secret == nil { + return nil, fmt.Errorf("no value found at %q", wrappedResponseLocation) + } + if secret.Data == nil { + return nil, fmt.Errorf("\"data\" not found in wrapping response") + } + if _, ok := secret.Data["response"]; !ok { + return nil, fmt.Errorf("\"response\" not found in wrapping response \"data\" map") + } + + wrappedSecret := new(Secret) + buf := bytes.NewBufferString(secret.Data["response"].(string)) + if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil { + return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) + } + + return wrappedSecret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/output_string.go b/vendor/github.com/hashicorp/vault/api/output_string.go new file mode 100644 index 00000000..b836b77a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/output_string.go @@ -0,0 +1,71 @@ +package api + +import ( + "fmt" + "strings" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +const ( + ErrOutputStringRequest = "output a string, please" +) + +var ( + LastOutputStringError *OutputStringError +) + +type OutputStringError struct { + *retryablehttp.Request + parsingError error + parsedCurlString string +} + +func (d *OutputStringError) Error() string { + if d.parsedCurlString == "" { + d.parseRequest() + if d.parsingError != nil { + return d.parsingError.Error() + } + } + + return ErrOutputStringRequest +} + +func (d *OutputStringError) parseRequest() { + body, err := d.Request.BodyBytes() + if err != nil { + d.parsingError = err + return + } + + // Build cURL string + d.parsedCurlString = "curl " + if d.Request.Method != "GET" { + d.parsedCurlString = fmt.Sprintf("%s-X %s ", d.parsedCurlString, d.Request.Method) + } + for k, v := range d.Request.Header { + for _, h := range v { + if strings.ToLower(k) == "x-vault-token" { + h = `$(vault print token)` + } + d.parsedCurlString = fmt.Sprintf("%s-H \"%s: %s\" ", d.parsedCurlString, k, h) + } + } + + if len(body) > 0 { + // We need to escape single quotes since that's what we're using to + // quote the body + escapedBody := strings.Replace(string(body), "'", "'\"'\"'", -1) + d.parsedCurlString = fmt.Sprintf("%s-d '%s' ", d.parsedCurlString, escapedBody) + } + + d.parsedCurlString = fmt.Sprintf("%s%s", d.parsedCurlString, d.Request.URL.String()) +} + +func (d *OutputStringError) CurlString() string { + if d.parsedCurlString == "" { + d.parseRequest() + } + return d.parsedCurlString +} diff --git a/vendor/github.com/hashicorp/vault/api/plugin_helpers.go b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go new file mode 100644 index 00000000..e664d5eb --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/plugin_helpers.go @@ -0,0 +1,186 @@ +package api + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "flag" + "net/url" + "os" + + squarejwt "gopkg.in/square/go-jose.v2/jwt" + + "github.com/hashicorp/errwrap" +) + +var ( + // PluginMetadataModeEnv is an ENV name used to disable TLS communication + // to bootstrap mounting plugins. + PluginMetadataModeEnv = "VAULT_PLUGIN_METADATA_MODE" + + // PluginUnwrapTokenEnv is the ENV name used to pass unwrap tokens to the + // plugin. + PluginUnwrapTokenEnv = "VAULT_UNWRAP_TOKEN" +) + +// PluginAPIClientMeta is a helper that plugins can use to configure TLS connections +// back to Vault. +type PluginAPIClientMeta struct { + // These are set by the command line flags. + flagCACert string + flagCAPath string + flagClientCert string + flagClientKey string + flagInsecure bool +} + +// FlagSet returns the flag set for configuring the TLS connection +func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet { + fs := flag.NewFlagSet("vault plugin settings", flag.ContinueOnError) + + fs.StringVar(&f.flagCACert, "ca-cert", "", "") + fs.StringVar(&f.flagCAPath, "ca-path", "", "") + fs.StringVar(&f.flagClientCert, "client-cert", "", "") + fs.StringVar(&f.flagClientKey, "client-key", "", "") + fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "") + + return fs +} + +// GetTLSConfig will return a TLSConfig based off the values from the flags +func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig { + // If we need custom TLS configuration, then set it + if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure { + t := &TLSConfig{ + CACert: f.flagCACert, + CAPath: f.flagCAPath, + ClientCert: f.flagClientCert, + ClientKey: f.flagClientKey, + TLSServerName: "", + Insecure: f.flagInsecure, + } + + return t + } + + return nil +} + +// VaultPluginTLSProvider is run inside a plugin and retrieves the response +// wrapped TLS certificate from vault. It returns a configured TLS Config. +func VaultPluginTLSProvider(apiTLSConfig *TLSConfig) func() (*tls.Config, error) { + if os.Getenv(PluginMetadataModeEnv) == "true" { + return nil + } + + return func() (*tls.Config, error) { + unwrapToken := os.Getenv(PluginUnwrapTokenEnv) + + parsedJWT, err := squarejwt.ParseSigned(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error parsing wrapping token: {{err}}", err) + } + + var allClaims = make(map[string]interface{}) + if err = parsedJWT.UnsafeClaimsWithoutVerification(&allClaims); err != nil { + return nil, errwrap.Wrapf("error parsing claims from wrapping token: {{err}}", err) + } + + addrClaimRaw, ok := allClaims["addr"] + if !ok { + return nil, errors.New("could not validate addr claim") + } + vaultAddr, ok := addrClaimRaw.(string) + if !ok { + return nil, errors.New("could not parse addr claim") + } + if vaultAddr == "" { + return nil, errors.New(`no vault api_addr found`) + } + + // Sanity check the value + if _, err := url.Parse(vaultAddr); err != nil { + return nil, errwrap.Wrapf("error parsing the vault api_addr: {{err}}", err) + } + + // Unwrap the token + clientConf := DefaultConfig() + clientConf.Address = vaultAddr + if apiTLSConfig != nil { + err := clientConf.ConfigureTLS(apiTLSConfig) + if err != nil { + return nil, errwrap.Wrapf("error configuring api client {{err}}", err) + } + } + client, err := NewClient(clientConf) + if err != nil { + return nil, errwrap.Wrapf("error during api client creation: {{err}}", err) + } + + secret, err := client.Logical().Unwrap(unwrapToken) + if err != nil { + return nil, errwrap.Wrapf("error during token unwrap request: {{err}}", err) + } + if secret == nil { + return nil, errors.New("error during token unwrap request: secret is nil") + } + + // Retrieve and parse the server's certificate + serverCertBytesRaw, ok := secret.Data["ServerCert"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverCertBytes, err := base64.StdEncoding.DecodeString(serverCertBytesRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverCert, err := x509.ParseCertificate(serverCertBytes) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Retrieve and parse the server's private key + serverKeyB64, ok := secret.Data["ServerKey"].(string) + if !ok { + return nil, errors.New("error unmarshalling certificate") + } + + serverKeyRaw, err := base64.StdEncoding.DecodeString(serverKeyB64) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + serverKey, err := x509.ParseECPrivateKey(serverKeyRaw) + if err != nil { + return nil, errwrap.Wrapf("error parsing certificate: {{err}}", err) + } + + // Add CA cert to the cert pool + caCertPool := x509.NewCertPool() + caCertPool.AddCert(serverCert) + + // Build a certificate object out of the server's cert and private key. + cert := tls.Certificate{ + Certificate: [][]byte{serverCertBytes}, + PrivateKey: serverKey, + Leaf: serverCert, + } + + // Setup TLS config + tlsConfig := &tls.Config{ + ClientCAs: caCertPool, + RootCAs: caCertPool, + ClientAuth: tls.RequireAndVerifyClientCert, + // TLS 1.2 minimum + MinVersion: tls.VersionTLS12, + Certificates: []tls.Certificate{cert}, + ServerName: serverCert.Subject.CommonName, + } + tlsConfig.BuildNameToCertificate() + + return tlsConfig, nil + } +} diff --git a/vendor/github.com/hashicorp/vault/api/renewer.go b/vendor/github.com/hashicorp/vault/api/renewer.go new file mode 100644 index 00000000..1d37a193 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/renewer.go @@ -0,0 +1,349 @@ +package api + +import ( + "errors" + "math/rand" + "sync" + "time" +) + +var ( + ErrRenewerMissingInput = errors.New("missing input to renewer") + ErrRenewerMissingSecret = errors.New("missing secret to renew") + ErrRenewerNotRenewable = errors.New("secret is not renewable") + ErrRenewerNoSecretData = errors.New("returned empty secret data") + + // DefaultRenewerRenewBuffer is the default size of the buffer for renew + // messages on the channel. + DefaultRenewerRenewBuffer = 5 +) + +// Renewer is a process for renewing a secret. +// +// renewer, err := client.NewRenewer(&RenewerInput{ +// Secret: mySecret, +// }) +// go renewer.Renew() +// defer renewer.Stop() +// +// for { +// select { +// case err := <-renewer.DoneCh(): +// if err != nil { +// log.Fatal(err) +// } +// +// // Renewal is now over +// case renewal := <-renewer.RenewCh(): +// log.Printf("Successfully renewed: %#v", renewal) +// } +// } +// +// +// The `DoneCh` will return if renewal fails or if the remaining lease duration +// after a renewal is less than or equal to the grace (in number of seconds). In +// both cases, the caller should attempt a re-read of the secret. Clients should +// check the return value of the channel to see if renewal was successful. +type Renewer struct { + l sync.Mutex + + client *Client + secret *Secret + grace time.Duration + random *rand.Rand + increment int + doneCh chan error + renewCh chan *RenewOutput + + stopped bool + stopCh chan struct{} +} + +// RenewerInput is used as input to the renew function. +type RenewerInput struct { + // Secret is the secret to renew + Secret *Secret + + // DEPRECATED: this does not do anything. + Grace time.Duration + + // Rand is the randomizer to use for underlying randomization. If not + // provided, one will be generated and seeded automatically. If provided, it + // is assumed to have already been seeded. + Rand *rand.Rand + + // RenewBuffer is the size of the buffered channel where renew messages are + // dispatched. + RenewBuffer int + + // The new TTL, in seconds, that should be set on the lease. The TTL set + // here may or may not be honored by the vault server, based on Vault + // configuration or any associated max TTL values. + Increment int +} + +// RenewOutput is the metadata returned to the client (if it's listening) to +// renew messages. +type RenewOutput struct { + // RenewedAt is the timestamp when the renewal took place (UTC). + RenewedAt time.Time + + // Secret is the underlying renewal data. It's the same struct as all data + // that is returned from Vault, but since this is renewal data, it will not + // usually include the secret itself. + Secret *Secret +} + +// NewRenewer creates a new renewer from the given input. +func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) { + if i == nil { + return nil, ErrRenewerMissingInput + } + + secret := i.Secret + if secret == nil { + return nil, ErrRenewerMissingSecret + } + + random := i.Rand + if random == nil { + random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + } + + renewBuffer := i.RenewBuffer + if renewBuffer == 0 { + renewBuffer = DefaultRenewerRenewBuffer + } + + return &Renewer{ + client: c, + secret: secret, + increment: i.Increment, + random: random, + doneCh: make(chan error, 1), + renewCh: make(chan *RenewOutput, renewBuffer), + + stopped: false, + stopCh: make(chan struct{}), + }, nil +} + +// DoneCh returns the channel where the renewer will publish when renewal stops. +// If there is an error, this will be an error. +func (r *Renewer) DoneCh() <-chan error { + return r.doneCh +} + +// RenewCh is a channel that receives a message when a successful renewal takes +// place and includes metadata about the renewal. +func (r *Renewer) RenewCh() <-chan *RenewOutput { + return r.renewCh +} + +// Stop stops the renewer. +func (r *Renewer) Stop() { + r.l.Lock() + if !r.stopped { + close(r.stopCh) + r.stopped = true + } + r.l.Unlock() +} + +// Renew starts a background process for renewing this secret. When the secret +// has auth data, this attempts to renew the auth (token). When the secret has +// a lease, this attempts to renew the lease. +func (r *Renewer) Renew() { + var result error + if r.secret.Auth != nil { + result = r.renewAuth() + } else { + result = r.renewLease() + } + + r.doneCh <- result +} + +// renewAuth is a helper for renewing authentication. +func (r *Renewer) renewAuth() error { + if !r.secret.Auth.Renewable || r.secret.Auth.ClientToken == "" { + return ErrRenewerNotRenewable + } + + priorDuration := time.Duration(r.secret.Auth.LeaseDuration) * time.Second + r.calculateGrace(priorDuration) + + client, token := r.client, r.secret.Auth.ClientToken + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + // Renew the auth. + renewal, err := client.Auth().Token().RenewTokenAsSelf(token, r.increment) + if err != nil { + return err + } + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Somehow, sometimes, this happens. + if renewal == nil || renewal.Auth == nil { + return ErrRenewerNoSecretData + } + + // Do nothing if we are not renewable + if !renewal.Auth.Renewable { + return ErrRenewerNotRenewable + } + + // Grab the lease duration + leaseDuration := time.Duration(renewal.Auth.LeaseDuration) * time.Second + + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if leaseDuration > priorDuration { + r.calculateGrace(leaseDuration) + } + priorDuration = leaseDuration + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) + + // If we are within grace, return now; or, if the amount of time we + // would sleep would land us in the grace period. This helps with short + // tokens; for example, you don't want a current lease duration of 4 + // seconds, a grace period of 3 seconds, and end up sleeping for more + // than three of those seconds and having a very small budget of time + // to renew. + if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace { + return nil + } + + select { + case <-r.stopCh: + return nil + case <-time.After(sleepDuration): + continue + } + } +} + +// renewLease is a helper for renewing a lease. +func (r *Renewer) renewLease() error { + if !r.secret.Renewable || r.secret.LeaseID == "" { + return ErrRenewerNotRenewable + } + + priorDuration := time.Duration(r.secret.LeaseDuration) * time.Second + r.calculateGrace(priorDuration) + + client, leaseID := r.client, r.secret.LeaseID + + for { + // Check if we are stopped. + select { + case <-r.stopCh: + return nil + default: + } + + // Renew the lease. + renewal, err := client.Sys().Renew(leaseID, r.increment) + if err != nil { + return err + } + + // Push a message that a renewal took place. + select { + case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}: + default: + } + + // Somehow, sometimes, this happens. + if renewal == nil { + return ErrRenewerNoSecretData + } + + // Do nothing if we are not renewable + if !renewal.Renewable { + return ErrRenewerNotRenewable + } + + // Grab the lease duration + leaseDuration := time.Duration(renewal.LeaseDuration) * time.Second + + // We keep evaluating a new grace period so long as the lease is + // extending. Once it stops extending, we've hit the max and need to + // rely on the grace duration. + if leaseDuration > priorDuration { + r.calculateGrace(leaseDuration) + } + priorDuration = leaseDuration + + // The sleep duration is set to 2/3 of the current lease duration plus + // 1/3 of the current grace period, which adds jitter. + sleepDuration := time.Duration(float64(leaseDuration.Nanoseconds())*2/3 + float64(r.grace.Nanoseconds())/3) + + // If we are within grace, return now; or, if the amount of time we + // would sleep would land us in the grace period. This helps with short + // tokens; for example, you don't want a current lease duration of 4 + // seconds, a grace period of 3 seconds, and end up sleeping for more + // than three of those seconds and having a very small budget of time + // to renew. + if leaseDuration <= r.grace || leaseDuration-sleepDuration <= r.grace { + return nil + } + + select { + case <-r.stopCh: + return nil + case <-time.After(sleepDuration): + continue + } + } +} + +// sleepDuration calculates the time to sleep given the base lease duration. The +// base is the resulting lease duration. It will be reduced to 1/3 and +// multiplied by a random float between 0.0 and 1.0. This extra randomness +// prevents multiple clients from all trying to renew simultaneously. +func (r *Renewer) sleepDuration(base time.Duration) time.Duration { + sleep := float64(base) + + // Renew at 1/3 the remaining lease. This will give us an opportunity to retry + // at least one more time should the first renewal fail. + sleep = sleep / 3.0 + + // Use a randomness so many clients do not hit Vault simultaneously. + sleep = sleep * (r.random.Float64() + 1) / 2.0 + + return time.Duration(sleep) +} + +// calculateGrace calculates the grace period based on a reasonable set of +// assumptions given the total lease time; it also adds some jitter to not have +// clients be in sync. +func (r *Renewer) calculateGrace(leaseDuration time.Duration) { + if leaseDuration == 0 { + r.grace = 0 + return + } + + leaseNanos := float64(leaseDuration.Nanoseconds()) + jitterMax := 0.1 * leaseNanos + + // For a given lease duration, we want to allow 80-90% of that to elapse, + // so the remaining amount is the grace period + r.grace = time.Duration(jitterMax) + time.Duration(uint64(r.random.Int63())%uint64(jitterMax)) +} diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go new file mode 100644 index 00000000..0ed04220 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -0,0 +1,147 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/hashicorp/vault/sdk/helper/consts" + + retryablehttp "github.com/hashicorp/go-retryablehttp" +) + +// Request is a raw request configuration structure used to initiate +// API requests to the Vault server. +type Request struct { + Method string + URL *url.URL + Params url.Values + Headers http.Header + ClientToken string + MFAHeaderVals []string + WrapTTL string + Obj interface{} + + // When possible, use BodyBytes as it is more efficient due to how the + // retry logic works + BodyBytes []byte + + // Fallback + Body io.Reader + BodySize int64 + + // Whether to request overriding soft-mandatory Sentinel policies (RGPs and + // EGPs). If set, the override flag will take effect for all policies + // evaluated during the request. + PolicyOverride bool +} + +// SetJSONBody is used to set a request body that is a JSON-encoded value. +func (r *Request) SetJSONBody(val interface{}) error { + buf, err := json.Marshal(val) + if err != nil { + return err + } + + r.Obj = val + r.BodyBytes = buf + return nil +} + +// ResetJSONBody is used to reset the body for a redirect +func (r *Request) ResetJSONBody() error { + if r.BodyBytes == nil { + return nil + } + return r.SetJSONBody(r.Obj) +} + +// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use +// with the net/http package. +func (r *Request) ToHTTP() (*http.Request, error) { + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + + default: + if c, ok := r.Body.(io.ReadCloser); ok { + req.Request.Body = c + } else { + req.Request.Body = ioutil.NopCloser(r.Body) + } + } + + return req.Request, nil +} + +func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { + // Encode the query parameters + r.URL.RawQuery = r.Params.Encode() + + // Create the HTTP request, defaulting to retryable + var req *retryablehttp.Request + + var err error + var body interface{} + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + // Use bytes, it's more efficient + body = r.BodyBytes + + default: + body = r.Body + } + + req, err = retryablehttp.NewRequest(r.Method, r.URL.RequestURI(), body) + if err != nil { + return nil, err + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + + if len(r.ClientToken) != 0 { + req.Header.Set(consts.AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + return req, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go new file mode 100644 index 00000000..aed2a52e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -0,0 +1,120 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/jsonutil" +) + +// Response is a raw response that wraps an HTTP response. +type Response struct { + *http.Response +} + +// DecodeJSON will decode the response body to a JSON structure. This +// will consume the response body, but will not close it. Close must +// still be called. +func (r *Response) DecodeJSON(out interface{}) error { + return jsonutil.DecodeJSONFromReader(r.Body, out) +} + +// Error returns an error response if there is one. If there is an error, +// this will fully consume the response body, but will not close it. The +// body must still be closed manually. +func (r *Response) Error() error { + // 200 to 399 are okay status codes. 429 is the code for health status of + // standby nodes. + if (r.StatusCode >= 200 && r.StatusCode < 400) || r.StatusCode == 429 { + return nil + } + + // We have an error. Let's copy the body into our own buffer first, + // so that if we can't decode JSON, we can at least copy it raw. + bodyBuf := &bytes.Buffer{} + if _, err := io.Copy(bodyBuf, r.Body); err != nil { + return err + } + + r.Body.Close() + r.Body = ioutil.NopCloser(bodyBuf) + + // Build up the error object + respErr := &ResponseError{ + HTTPMethod: r.Request.Method, + URL: r.Request.URL.String(), + StatusCode: r.StatusCode, + } + + // Decode the error response if we can. Note that we wrap the bodyBuf + // in a bytes.Reader here so that the JSON decoder doesn't move the + // read pointer for the original buffer. + var resp ErrorResponse + if err := jsonutil.DecodeJSON(bodyBuf.Bytes(), &resp); err != nil { + // Store the fact that we couldn't decode the errors + respErr.RawError = true + respErr.Errors = []string{bodyBuf.String()} + } else { + // Store the decoded errors + respErr.Errors = resp.Errors + } + + return respErr +} + +// ErrorResponse is the raw structure of errors when they're returned by the +// HTTP API. +type ErrorResponse struct { + Errors []string +} + +// ResponseError is the error returned when Vault responds with an error or +// non-success HTTP status code. If a request to Vault fails because of a +// network error a different error message will be returned. ResponseError gives +// access to the underlying errors and status code. +type ResponseError struct { + // HTTPMethod is the HTTP method for the request (PUT, GET, etc). + HTTPMethod string + + // URL is the URL of the request. + URL string + + // StatusCode is the HTTP status code. + StatusCode int + + // RawError marks that the underlying error messages returned by Vault were + // not parsable. The Errors slice will contain the raw response body as the + // first and only error string if this value is set to true. + RawError bool + + // Errors are the underlying errors returned by Vault. + Errors []string +} + +// Error returns a human-readable error string for the response error. +func (r *ResponseError) Error() string { + errString := "Errors" + if r.RawError { + errString = "Raw Message" + } + + var errBody bytes.Buffer + errBody.WriteString(fmt.Sprintf( + "Error making API request.\n\n"+ + "URL: %s %s\n"+ + "Code: %d. %s:\n\n", + r.HTTPMethod, r.URL, r.StatusCode, errString)) + + if r.RawError && len(r.Errors) == 1 { + errBody.WriteString(r.Errors[0]) + } else { + for _, err := range r.Errors { + errBody.WriteString(fmt.Sprintf("* %s", err)) + } + } + + return errBody.String() +} diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go new file mode 100644 index 00000000..d5b9ce97 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -0,0 +1,322 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/jsonutil" + "github.com/hashicorp/vault/sdk/helper/parseutil" +) + +// Secret is the structure returned for every secret within Vault. +type Secret struct { + // The request ID that generated this response + RequestID string `json:"request_id"` + + LeaseID string `json:"lease_id"` + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` + + // Data is the actual contents of the secret. The format of the data + // is arbitrary and up to the secret backend. + Data map[string]interface{} `json:"data"` + + // Warnings contains any warnings related to the operation. These + // are not issues that caused the command to fail, but that the + // client should be aware of. + Warnings []string `json:"warnings"` + + // Auth, if non-nil, means that there was authentication information + // attached to this response. + Auth *SecretAuth `json:"auth,omitempty"` + + // WrapInfo, if non-nil, means that the initial response was wrapped in the + // cubbyhole of the given token (which has a TTL of the given number of + // seconds) + WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"` +} + +// TokenID returns the standardized token ID (token) for the given secret. +func (s *Secret) TokenID() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.ClientToken) > 0 { + return s.Auth.ClientToken, nil + } + + if s.Data == nil || s.Data["id"] == nil { + return "", nil + } + + id, ok := s.Data["id"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return id, nil +} + +// TokenAccessor returns the standardized token accessor for the given secret. +// If the secret is nil or does not contain an accessor, this returns the empty +// string. +func (s *Secret) TokenAccessor() (string, error) { + if s == nil { + return "", nil + } + + if s.Auth != nil && len(s.Auth.Accessor) > 0 { + return s.Auth.Accessor, nil + } + + if s.Data == nil || s.Data["accessor"] == nil { + return "", nil + } + + accessor, ok := s.Data["accessor"].(string) + if !ok { + return "", fmt.Errorf("token found but in the wrong format") + } + + return accessor, nil +} + +// TokenRemainingUses returns the standardized remaining uses for the given +// secret. If the secret is nil or does not contain the "num_uses", this +// returns -1. On error, this will return -1 and a non-nil error. +func (s *Secret) TokenRemainingUses() (int, error) { + if s == nil || s.Data == nil || s.Data["num_uses"] == nil { + return -1, nil + } + + uses, err := parseutil.ParseInt(s.Data["num_uses"]) + if err != nil { + return 0, err + } + + return int(uses), nil +} + +// TokenPolicies returns the standardized list of policies for the given secret. +// If the secret is nil or does not contain any policies, this returns nil. It +// also populates the secret's Auth info with identity/token policy info. +func (s *Secret) TokenPolicies() ([]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Policies) > 0 { + return s.Auth.Policies, nil + } + + if s.Data == nil || s.Data["policies"] == nil { + return nil, nil + } + + var tokenPolicies []string + + // Token policies + { + _, ok := s.Data["policies"] + if !ok { + goto TOKEN_DONE + } + + sList, ok := s.Data["policies"].([]string) + if ok { + tokenPolicies = sList + goto TOKEN_DONE + } + + list, ok := s.Data["policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert token policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + tokenPolicies = append(tokenPolicies, p) + } + } + +TOKEN_DONE: + var identityPolicies []string + + // Identity policies + { + _, ok := s.Data["identity_policies"] + if !ok { + goto DONE + } + + sList, ok := s.Data["identity_policies"].([]string) + if ok { + identityPolicies = sList + goto DONE + } + + list, ok := s.Data["identity_policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert identity policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + identityPolicies = append(identityPolicies, p) + } + } + +DONE: + + if s.Auth == nil { + s.Auth = &SecretAuth{} + } + + policies := append(tokenPolicies, identityPolicies...) + + s.Auth.TokenPolicies = tokenPolicies + s.Auth.IdentityPolicies = identityPolicies + s.Auth.Policies = policies + + return policies, nil +} + +// TokenMetadata returns the map of metadata associated with this token, if any +// exists. If the secret is nil or does not contain the "metadata" key, this +// returns nil. +func (s *Secret) TokenMetadata() (map[string]string, error) { + if s == nil { + return nil, nil + } + + if s.Auth != nil && len(s.Auth.Metadata) > 0 { + return s.Auth.Metadata, nil + } + + if s.Data == nil || (s.Data["metadata"] == nil && s.Data["meta"] == nil) { + return nil, nil + } + + data, ok := s.Data["metadata"].(map[string]interface{}) + if !ok { + data, ok = s.Data["meta"].(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert metadata field to expected format") + } + } + + metadata := make(map[string]string, len(data)) + for k, v := range data { + typed, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert metadata value %v to string", v) + } + metadata[k] = typed + } + + return metadata, nil +} + +// TokenIsRenewable returns the standardized token renewability for the given +// secret. If the secret is nil or does not contain the "renewable" key, this +// returns false. +func (s *Secret) TokenIsRenewable() (bool, error) { + if s == nil { + return false, nil + } + + if s.Auth != nil && s.Auth.Renewable { + return s.Auth.Renewable, nil + } + + if s.Data == nil || s.Data["renewable"] == nil { + return false, nil + } + + renewable, err := parseutil.ParseBool(s.Data["renewable"]) + if err != nil { + return false, errwrap.Wrapf("could not convert renewable value to a boolean: {{err}}", err) + } + + return renewable, nil +} + +// TokenTTL returns the standardized remaining token TTL for the given secret. +// If the secret is nil or does not contain a TTL, this returns 0. +func (s *Secret) TokenTTL() (time.Duration, error) { + if s == nil { + return 0, nil + } + + if s.Auth != nil && s.Auth.LeaseDuration > 0 { + return time.Duration(s.Auth.LeaseDuration) * time.Second, nil + } + + if s.Data == nil || s.Data["ttl"] == nil { + return 0, nil + } + + ttl, err := parseutil.ParseDurationSecond(s.Data["ttl"]) + if err != nil { + return 0, err + } + + return ttl, nil +} + +// SecretWrapInfo contains wrapping information if we have it. If what is +// contained is an authentication token, the accessor for the token will be +// available in WrappedAccessor. +type SecretWrapInfo struct { + Token string `json:"token"` + Accessor string `json:"accessor"` + TTL int `json:"ttl"` + CreationTime time.Time `json:"creation_time"` + CreationPath string `json:"creation_path"` + WrappedAccessor string `json:"wrapped_accessor"` +} + +// SecretAuth is the structure containing auth information if we have it. +type SecretAuth struct { + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies"` + IdentityPolicies []string `json:"identity_policies"` + Metadata map[string]string `json:"metadata"` + Orphan bool `json:"orphan"` + EntityID string `json:"entity_id"` + + LeaseDuration int `json:"lease_duration"` + Renewable bool `json:"renewable"` +} + +// ParseSecret is used to parse a secret value from JSON from an io.Reader. +func ParseSecret(r io.Reader) (*Secret, error) { + // First read the data into a buffer. Not super efficient but we want to + // know if we actually have a body or not. + var buf bytes.Buffer + _, err := buf.ReadFrom(r) + if err != nil { + return nil, err + } + if buf.Len() == 0 { + return nil, nil + } + + // First decode the JSON into a map[string]interface{} + var secret Secret + if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil { + return nil, err + } + + return &secret, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go new file mode 100644 index 00000000..837eac4f --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -0,0 +1,62 @@ +package api + +import ( + "context" + "fmt" +) + +// SSH is used to return a client to invoke operations on SSH backend. +type SSH struct { + c *Client + MountPoint string +} + +// SSH returns the client for logical-backend API calls. +func (c *Client) SSH() *SSH { + return c.SSHWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHWithMountPoint returns the client with specific SSH mount point. +func (c *Client) SSHWithMountPoint(mountPoint string) *SSH { + return &SSH{ + c: c, + MountPoint: mountPoint, + } +} + +// Credential invokes the SSH backend API to create a credential to establish an SSH session. +func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/creds/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +// SignKey signs the given public key and returns a signed public key to pass +// along with the SSH request. +func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) { + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role)) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go new file mode 100644 index 00000000..a4348ca2 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go @@ -0,0 +1,234 @@ +package api + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + + "github.com/hashicorp/errwrap" + cleanhttp "github.com/hashicorp/go-cleanhttp" + multierror "github.com/hashicorp/go-multierror" + rootcerts "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/sdk/helper/hclutil" + "github.com/mitchellh/mapstructure" +) + +const ( + // SSHHelperDefaultMountPoint is the default path at which SSH backend will be + // mounted in the Vault server. + SSHHelperDefaultMountPoint = "ssh" + + // VerifyEchoRequest is the echo request message sent as OTP by the helper. + VerifyEchoRequest = "verify-echo-request" + + // VerifyEchoResponse is the echo response message sent as a response to OTP + // matching echo request. + VerifyEchoResponse = "verify-echo-response" +) + +// SSHHelper is a structure representing a vault-ssh-helper which can talk to vault server +// in order to verify the OTP entered by the user. It contains the path at which +// SSH backend is mounted at the server. +type SSHHelper struct { + c *Client + MountPoint string +} + +// SSHVerifyResponse is a structure representing the fields in Vault server's +// response. +type SSHVerifyResponse struct { + // Usually empty. If the request OTP is echo request message, this will + // be set to the corresponding echo response message. + Message string `json:"message" mapstructure:"message"` + + // Username associated with the OTP + Username string `json:"username" mapstructure:"username"` + + // IP associated with the OTP + IP string `json:"ip" mapstructure:"ip"` + + // Name of the role against which the OTP was issued + RoleName string `json:"role_name" mapstructure:"role_name"` +} + +// SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file. +type SSHHelperConfig struct { + VaultAddr string `hcl:"vault_addr"` + SSHMountPoint string `hcl:"ssh_mount_point"` + CACert string `hcl:"ca_cert"` + CAPath string `hcl:"ca_path"` + AllowedCidrList string `hcl:"allowed_cidr_list"` + AllowedRoles string `hcl:"allowed_roles"` + TLSSkipVerify bool `hcl:"tls_skip_verify"` + TLSServerName string `hcl:"tls_server_name"` +} + +// SetTLSParameters sets the TLS parameters for this SSH agent. +func (c *SSHHelperConfig) SetTLSParameters(clientConfig *Config, certPool *x509.CertPool) { + tlsConfig := &tls.Config{ + InsecureSkipVerify: c.TLSSkipVerify, + MinVersion: tls.VersionTLS12, + RootCAs: certPool, + ServerName: c.TLSServerName, + } + + transport := cleanhttp.DefaultTransport() + transport.TLSClientConfig = tlsConfig + clientConfig.HttpClient.Transport = transport +} + +// Returns true if any of the following conditions are true: +// * CA cert is configured +// * CA path is configured +// * configured to skip certificate verification +// * TLS server name is configured +// +func (c *SSHHelperConfig) shouldSetTLSParameters() bool { + return c.CACert != "" || c.CAPath != "" || c.TLSServerName != "" || c.TLSSkipVerify +} + +// NewClient returns a new client for the configuration. This client will be used by the +// vault-ssh-helper to communicate with Vault server and verify the OTP entered by user. +// If the configuration supplies Vault SSL certificates, then the client will +// have TLS configured in its transport. +func (c *SSHHelperConfig) NewClient() (*Client, error) { + // Creating a default client configuration for communicating with vault server. + clientConfig := DefaultConfig() + + // Pointing the client to the actual address of vault server. + clientConfig.Address = c.VaultAddr + + // Check if certificates are provided via config file. + if c.shouldSetTLSParameters() { + rootConfig := &rootcerts.Config{ + CAFile: c.CACert, + CAPath: c.CAPath, + } + certPool, err := rootcerts.LoadCACerts(rootConfig) + if err != nil { + return nil, err + } + // Enable TLS on the HTTP client information + c.SetTLSParameters(clientConfig, certPool) + } + + // Creating the client object for the given configuration + client, err := NewClient(clientConfig) + if err != nil { + return nil, err + } + + return client, nil +} + +// LoadSSHHelperConfig loads ssh-helper's configuration from the file and populates the corresponding +// in-memory structure. +// +// Vault address is a required parameter. +// Mount point defaults to "ssh". +func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { + contents, err := ioutil.ReadFile(path) + if err != nil && !os.IsNotExist(err) { + return nil, multierror.Prefix(err, "ssh_helper:") + } + return ParseSSHHelperConfig(string(contents)) +} + +// ParseSSHHelperConfig parses the given contents as a string for the SSHHelper +// configuration. +func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { + root, err := hcl.Parse(string(contents)) + if err != nil { + return nil, errwrap.Wrapf("error parsing config: {{err}}", err) + } + + list, ok := root.Node.(*ast.ObjectList) + if !ok { + return nil, fmt.Errorf("error parsing config: file doesn't contain a root object") + } + + valid := []string{ + "vault_addr", + "ssh_mount_point", + "ca_cert", + "ca_path", + "allowed_cidr_list", + "allowed_roles", + "tls_skip_verify", + "tls_server_name", + } + if err := hclutil.CheckHCLKeys(list, valid); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + var c SSHHelperConfig + c.SSHMountPoint = SSHHelperDefaultMountPoint + if err := hcl.DecodeObject(&c, list); err != nil { + return nil, multierror.Prefix(err, "ssh_helper:") + } + + if c.VaultAddr == "" { + return nil, fmt.Errorf(`missing config "vault_addr"`) + } + return &c, nil +} + +// SSHHelper creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at default path ("ssh"). +func (c *Client) SSHHelper() *SSHHelper { + return c.SSHHelperWithMountPoint(SSHHelperDefaultMountPoint) +} + +// SSHHelperWithMountPoint creates an SSHHelper object which can talk to Vault server with SSH backend +// mounted at a specific mount point. +func (c *Client) SSHHelperWithMountPoint(mountPoint string) *SSHHelper { + return &SSHHelper{ + c: c, + MountPoint: mountPoint, + } +} + +// Verify verifies if the key provided by user is present in Vault server. The response +// will contain the IP address and username associated with the OTP. In case the +// OTP matches the echo request message, instead of searching an entry for the OTP, +// an echo response message is returned. This feature is used by ssh-helper to verify if +// its configured correctly. +func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { + data := map[string]interface{}{ + "otp": otp, + } + verifyPath := fmt.Sprintf("/v1/%s/verify", c.MountPoint) + r := c.c.NewRequest("PUT", verifyPath) + if err := r.SetJSONBody(data); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + + if secret.Data == nil { + return nil, nil + } + + var verifyResp SSHVerifyResponse + err = mapstructure.Decode(secret.Data, &verifyResp) + if err != nil { + return nil, err + } + return &verifyResp, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys.go b/vendor/github.com/hashicorp/vault/api/sys.go new file mode 100644 index 00000000..5fb11188 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys.go @@ -0,0 +1,11 @@ +package api + +// Sys is used to perform system-related operations on Vault. +type Sys struct { + c *Client +} + +// Sys is used to return the client for sys-related API calls. +func (c *Client) Sys() *Sys { + return &Sys{c: c} +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go new file mode 100644 index 00000000..2448c036 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -0,0 +1,136 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) AuditHash(path string, input string) (string, error) { + body := map[string]interface{}{ + "input": input, + } + + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit-hash/%s", path)) + if err := r.SetJSONBody(body); err != nil { + return "", err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return "", err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + hash, ok := secret.Data["hash"] + if !ok { + return "", errors.New("hash not found in response data") + } + hashStr, ok := hash.(string) + if !ok { + return "", errors.New("could not parse hash in response data") + } + + return hashStr, nil +} + +func (c *Sys) ListAudit() (map[string]*Audit, error) { + r := c.c.NewRequest("GET", "/v1/sys/audit") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*Audit{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuditWithOptions instead +func (c *Sys) EnableAudit( + path string, auditType string, desc string, opts map[string]string) error { + return c.EnableAuditWithOptions(path, &EnableAuditOptions{ + Type: auditType, + Description: desc, + Options: opts, + }) +} + +func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error { + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAudit(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Structures for the requests/resposne are all down here. They aren't +// individually documented because the map almost directly to the raw HTTP API +// documentation. Please refer to that documentation for more details. + +type EnableAuditOptions struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` +} + +type Audit struct { + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` + Path string `json:"path" mapstructure:"path"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go new file mode 100644 index 00000000..e7a9c222 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -0,0 +1,80 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListAuth() (map[string]*AuthMount, error) { + r := c.c.NewRequest("GET", "/v1/sys/auth") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*AuthMount{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +// DEPRECATED: Use EnableAuthWithOptions instead +func (c *Sys) EnableAuth(path, authType, desc string) error { + return c.EnableAuthWithOptions(path, &EnableAuthOptions{ + Type: authType, + Description: desc, + }) +} + +func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error { + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path)) + if err := r.SetJSONBody(options); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DisableAuth(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// Rather than duplicate, we can use modern Go's type aliasing +type EnableAuthOptions = MountInput +type AuthConfigInput = MountConfigInput +type AuthMount = MountOutput +type AuthConfigOutput = MountConfigOutput diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go new file mode 100644 index 00000000..64b3951d --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -0,0 +1,64 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { + return c.Capabilities(c.c.Token(), path) +} + +func (c *Sys) Capabilities(token, path string) ([]string, error) { + body := map[string]string{ + "token": token, + "path": path, + } + + reqPath := "/v1/sys/capabilities" + if token == c.c.Token() { + reqPath = fmt.Sprintf("%s-self", reqPath) + } + + r := c.c.NewRequest("POST", reqPath) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err + } + + if len(res) == 0 { + _, ok := secret.Data["capabilities"] + if ok { + err = mapstructure.Decode(secret.Data["capabilities"], &res) + if err != nil { + return nil, err + } + } + } + + return res, nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go new file mode 100644 index 00000000..d153a47c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go @@ -0,0 +1,105 @@ +package api + +import ( + "context" + "errors" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) CORSStatus() (*CORSResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/config/cors") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result CORSResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) ConfigureCORS(req *CORSRequest) (*CORSResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/config/cors") + if err := r.SetJSONBody(req); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result CORSResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) DisableCORS() (*CORSResponse, error) { + r := c.c.NewRequest("DELETE", "/v1/sys/config/cors") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result CORSResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +type CORSRequest struct { + AllowedOrigins string `json:"allowed_origins" mapstructure:"allowed_origins"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} + +type CORSResponse struct { + AllowedOrigins string `json:"allowed_origins" mapstructure:"allowed_origins"` + Enabled bool `json:"enabled" mapstructure:"enabled"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go new file mode 100644 index 00000000..66f72dff --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -0,0 +1,124 @@ +package api + +import "context" + +func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommon("/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, error) { + return c.generateRootStatusCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) { + r := c.c.NewRequest("GET", path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommon("/v1/sys/generate-root/attempt", otp, pgpKey) +} + +func (c *Sys) GenerateDROperationTokenInit(otp, pgpKey string) (*GenerateRootStatusResponse, error) { + return c.generateRootInitCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt", otp, pgpKey) +} + +func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootStatusResponse, error) { + body := map[string]interface{}{ + "otp": otp, + "pgp_key": pgpKey, + } + + r := c.c.NewRequest("PUT", path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) GenerateRootCancel() error { + return c.generateRootCancelCommon("/v1/sys/generate-root/attempt") +} + +func (c *Sys) GenerateDROperationTokenCancel() error { + return c.generateRootCancelCommon("/v1/sys/replication/dr/secondary/generate-operation-token/attempt") +} + +func (c *Sys) generateRootCancelCommon(path string) error { + r := c.c.NewRequest("DELETE", path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) GenerateRootUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommon("/v1/sys/generate-root/update", shard, nonce) +} + +func (c *Sys) GenerateDROperationTokenUpdate(shard, nonce string) (*GenerateRootStatusResponse, error) { + return c.generateRootUpdateCommon("/v1/sys/replication/dr/secondary/generate-operation-token/update", shard, nonce) +} + +func (c *Sys) generateRootUpdateCommon(path, shard, nonce string) (*GenerateRootStatusResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", path) + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result GenerateRootStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type GenerateRootStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + Progress int `json:"progress"` + Required int `json:"required"` + Complete bool `json:"complete"` + EncodedToken string `json:"encoded_token"` + EncodedRootToken string `json:"encoded_root_token"` + PGPFingerprint string `json:"pgp_fingerprint"` + OTP string `json:"otp"` + OTPLength int `json:"otp_length"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go new file mode 100644 index 00000000..d5d77960 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -0,0 +1,41 @@ +package api + +import "context" + +func (c *Sys) Health() (*HealthResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/health") + // If the code is 400 or above it will automatically turn into an error, + // but the sys/health API defaults to returning 5xx when not sealed or + // inited, so we force this code to be something else so we parse correctly + r.Params.Add("uninitcode", "299") + r.Params.Add("sealedcode", "299") + r.Params.Add("standbycode", "299") + r.Params.Add("drsecondarycode", "299") + r.Params.Add("performancestandbycode", "299") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result HealthResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type HealthResponse struct { + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + Standby bool `json:"standby"` + PerformanceStandby bool `json:"performance_standby"` + ReplicationPerformanceMode string `json:"replication_performance_mode"` + ReplicationDRMode string `json:"replication_dr_mode"` + ServerTimeUTC int64 `json:"server_time_utc"` + Version string `json:"version"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + LastWAL uint64 `json:"last_wal,omitempty"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go new file mode 100644 index 00000000..0e499c6e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_init.go @@ -0,0 +1,61 @@ +package api + +import "context" + +func (c *Sys) InitStatus() (bool, error) { + r := c.c.NewRequest("GET", "/v1/sys/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var result InitStatusResponse + err = resp.DecodeJSON(&result) + return result.Initialized, err +} + +func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/init") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result InitResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type InitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + RecoveryShares int `json:"recovery_shares"` + RecoveryThreshold int `json:"recovery_threshold"` + RecoveryPGPKeys []string `json:"recovery_pgp_keys"` + RootTokenPGPKey string `json:"root_token_pgp_key"` +} + +type InitStatusResponse struct { + Initialized bool +} + +type InitResponse struct { + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + RecoveryKeys []string `json:"recovery_keys"` + RecoveryKeysB64 []string `json:"recovery_keys_base64"` + RootToken string `json:"root_token"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go new file mode 100644 index 00000000..8846dcdf --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -0,0 +1,29 @@ +package api + +import "context" + +func (c *Sys) Leader() (*LeaderResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/leader") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result LeaderResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type LeaderResponse struct { + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` + PerfStandby bool `json:"performance_standby"` + PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` + LastWAL uint64 `json:"last_wal"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_leases.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go new file mode 100644 index 00000000..09c9642a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -0,0 +1,105 @@ +package api + +import ( + "context" + "errors" +) + +func (c *Sys) Renew(id string, increment int) (*Secret, error) { + r := c.c.NewRequest("PUT", "/v1/sys/leases/renew") + + body := map[string]interface{}{ + "increment": increment, + "lease_id": id, + } + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + return ParseSecret(resp.Body) +} + +func (c *Sys) Revoke(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke/"+id) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokePrefix(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeForce(id string) error { + r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { + if opts == nil { + return errors.New("nil options provided") + } + + // Construct path + path := "/v1/sys/leases/revoke/" + switch { + case opts.Force: + path = "/v1/sys/leases/revoke-force/" + case opts.Prefix: + path = "/v1/sys/leases/revoke-prefix/" + } + path += opts.LeaseID + + r := c.c.NewRequest("PUT", path) + if !opts.Force { + body := map[string]interface{}{ + "sync": opts.Sync, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type RevokeOptions struct { + LeaseID string + Force bool + Prefix bool + Sync bool +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go new file mode 100644 index 00000000..354b1ee9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -0,0 +1,185 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListMounts() (map[string]*MountOutput, error) { + r := c.c.NewRequest("GET", "/v1/sys/mounts") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + mounts := map[string]*MountOutput{} + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err + } + + return mounts, nil +} + +func (c *Sys) Mount(path string, mountInfo *MountInput) error { + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path)) + if err := r.SetJSONBody(mountInfo); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) Unmount(path string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) Remount(from, to string) error { + body := map[string]interface{}{ + "from": from, + "to": to, + } + + r := c.c.NewRequest("POST", "/v1/sys/remount") + if err := r.SetJSONBody(body); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) TuneMount(path string, config MountConfigInput) error { + r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + if err := r.SetJSONBody(config); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { + r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result MountConfigOutput + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +type MountInput struct { + Type string `json:"type"` + Description string `json:"description"` + Config MountConfigInput `json:"config"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + Options map[string]string `json:"options"` + + // Deprecated: Newer server responses should be returning this information in the + // Type field (json: "type") instead. + PluginName string `json:"plugin_name,omitempty"` +} + +type MountConfigInput struct { + Options map[string]string `json:"options" mapstructure:"options"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + Description *string `json:"description,omitempty" mapstructure:"description"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} + +type MountOutput struct { + UUID string `json:"uuid"` + Type string `json:"type"` + Description string `json:"description"` + Accessor string `json:"accessor"` + Config MountConfigOutput `json:"config"` + Options map[string]string `json:"options"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` +} + +type MountConfigOutput struct { + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` + AllowedResponseHeaders []string `json:"allowed_response_headers,omitempty" mapstructure:"allowed_response_headers"` + TokenType string `json:"token_type,omitempty" mapstructure:"token_type"` + + // Deprecated: This field will always be blank for newer server responses. + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go new file mode 100644 index 00000000..0ab022ba --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -0,0 +1,238 @@ +package api + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/mitchellh/mapstructure" +) + +// ListPluginsInput is used as input to the ListPlugins function. +type ListPluginsInput struct { + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` +} + +// ListPluginsResponse is the response from the ListPlugins call. +type ListPluginsResponse struct { + // PluginsByType is the list of plugins by type. + PluginsByType map[consts.PluginType][]string `json:"types"` + + // Names is the list of names of the plugins. + // + // Deprecated: Newer server responses should be returning PluginsByType (json: + // "types") instead. + Names []string `json:"names"` +} + +// ListPlugins lists all plugins in the catalog and returns their names as a +// list of strings. +func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { + path := "" + method := "" + if i.Type == consts.PluginTypeUnknown { + path = "/v1/sys/plugins/catalog" + method = "GET" + } else { + path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Type) + method = "LIST" + } + + req := c.c.NewRequest(method, path) + if method == "LIST" { + // Set this for broader compatibility, but we use LIST above to be able + // to handle the wrapping lookup function + req.Method = "GET" + req.Params.Set("list", "true") + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) + if err != nil && resp == nil { + return nil, err + } + if resp == nil { + return nil, nil + } + defer resp.Body.Close() + + // We received an Unsupported Operation response from Vault, indicating + // Vault of an older version that doesn't support the GET method yet; + // switch it to a LIST. + if resp.StatusCode == 405 { + req.Params.Set("list", "true") + resp, err := c.c.RawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + var result struct { + Data struct { + Keys []string `json:"keys"` + } `json:"data"` + } + if err := resp.DecodeJSON(&result); err != nil { + return nil, err + } + return &ListPluginsResponse{Names: result.Data.Keys}, nil + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + result := &ListPluginsResponse{ + PluginsByType: make(map[consts.PluginType][]string), + } + if i.Type == consts.PluginTypeUnknown { + for pluginTypeStr, pluginsRaw := range secret.Data { + pluginType, err := consts.ParsePluginType(pluginTypeStr) + if err != nil { + return nil, err + } + + pluginsIfc, ok := pluginsRaw.([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to parse plugins for %q type", pluginTypeStr) + } + + plugins := make([]string, len(pluginsIfc)) + for i, nameIfc := range pluginsIfc { + name, ok := nameIfc.(string) + if !ok { + + } + plugins[i] = name + } + result.PluginsByType[pluginType] = plugins + } + } else { + var respKeys []string + if err := mapstructure.Decode(secret.Data["keys"], &respKeys); err != nil { + return nil, err + } + result.PluginsByType[i.Type] = respKeys + } + + return result, nil +} + +// GetPluginInput is used as input to the GetPlugin function. +type GetPluginInput struct { + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` +} + +// GetPluginResponse is the response from the GetPlugin call. +type GetPluginResponse struct { + Args []string `json:"args"` + Builtin bool `json:"builtin"` + Command string `json:"command"` + Name string `json:"name"` + SHA256 string `json:"sha256"` +} + +// GetPlugin retrieves information about the plugin. +func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) { + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodGet, path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result struct { + Data *GetPluginResponse + } + err = resp.DecodeJSON(&result) + if err != nil { + return nil, err + } + return result.Data, err +} + +// RegisterPluginInput is used as input to the RegisterPlugin function. +type RegisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` + + // Args is the list of args to spawn the process with. + Args []string `json:"args,omitempty"` + + // Command is the command to run. + Command string `json:"command,omitempty"` + + // SHA256 is the shasum of the plugin. + SHA256 string `json:"sha256,omitempty"` +} + +// RegisterPlugin registers the plugin with the given information. +func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error { + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodPut, path) + + if err := req.SetJSONBody(i); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// DeregisterPluginInput is used as input to the DeregisterPlugin function. +type DeregisterPluginInput struct { + // Name is the name of the plugin. Required. + Name string `json:"-"` + + // Type of the plugin. Required. + Type consts.PluginType `json:"type"` +} + +// DeregisterPlugin removes the plugin with the given name from the plugin +// catalog. +func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error { + path := catalogPathByType(i.Type, i.Name) + req := c.c.NewRequest(http.MethodDelete, path) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) + if err == nil { + defer resp.Body.Close() + } + return err +} + +// catalogPathByType is a helper to construct the proper API path by plugin type +func catalogPathByType(pluginType consts.PluginType, name string) string { + path := fmt.Sprintf("/v1/sys/plugins/catalog/%s/%s", pluginType, name) + + // Backwards compat, if type is not provided then use old path + if pluginType == consts.PluginTypeUnknown { + path = fmt.Sprintf("/v1/sys/plugins/catalog/%s", name) + } + + return path +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go new file mode 100644 index 00000000..c0c239f9 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go @@ -0,0 +1,113 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) ListPolicies() ([]string, error) { + r := c.c.NewRequest("LIST", "/v1/sys/policies/acl") + // Set this for broader compatibility, but we use LIST above to be able to + // handle the wrapping lookup function + r.Method = "GET" + r.Params.Set("list", "true") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result []string + err = mapstructure.Decode(secret.Data["keys"], &result) + if err != nil { + return nil, err + } + + return result, err +} + +func (c *Sys) GetPolicy(name string) (string, error) { + r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil { + defer resp.Body.Close() + if resp.StatusCode == 404 { + return "", nil + } + } + if err != nil { + return "", err + } + + secret, err := ParseSecret(resp.Body) + if err != nil { + return "", err + } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + if policyRaw, ok := secret.Data["policy"]; ok { + return policyRaw.(string), nil + } + + return "", fmt.Errorf("no policy found in response") +} + +func (c *Sys) PutPolicy(name, rules string) error { + body := map[string]string{ + "policy": rules, + } + + r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + if err := r.SetJSONBody(body); err != nil { + return err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func (c *Sys) DeletePolicy(name string) error { + r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +type getPoliciesResp struct { + Rules string `json:"rules"` +} + +type listPoliciesResp struct { + Policies []string `json:"policies"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_raft.go b/vendor/github.com/hashicorp/vault/api/sys_raft.go new file mode 100644 index 00000000..6897dc0a --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_raft.go @@ -0,0 +1,130 @@ +package api + +import ( + "context" + "io" + "net/http" + + "github.com/hashicorp/vault/sdk/helper/consts" +) + +// RaftJoinResponse represents the response of the raft join API +type RaftJoinResponse struct { + Joined bool `json:"joined"` +} + +// RaftJoinRequest represents the parameters consumed by the raft join API +type RaftJoinRequest struct { + LeaderAPIAddr string `json:"leader_api_addr"` + LeaderCACert string `json:"leader_ca_cert":` + LeaderClientCert string `json:"leader_client_cert"` + LeaderClientKey string `json:"leader_client_key"` + Retry bool `json:"retry"` +} + +// RaftJoin adds the node from which this call is invoked from to the raft +// cluster represented by the leader address in the parameter. +func (c *Sys) RaftJoin(opts *RaftJoinRequest) (*RaftJoinResponse, error) { + r := c.c.NewRequest("POST", "/v1/sys/storage/raft/join") + + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RaftJoinResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +// RaftSnapshot invokes the API that takes the snapshot of the raft cluster and +// writes it to the supplied io.Writer. +func (c *Sys) RaftSnapshot(snapWriter io.Writer) error { + r := c.c.NewRequest("GET", "/v1/sys/storage/raft/snapshot") + r.URL.RawQuery = r.Params.Encode() + + req, err := http.NewRequest(http.MethodGet, r.URL.RequestURI(), nil) + if err != nil { + return err + } + + req.URL.User = r.URL.User + req.URL.Scheme = r.URL.Scheme + req.URL.Host = r.URL.Host + req.Host = r.URL.Host + + if r.Headers != nil { + for header, vals := range r.Headers { + for _, val := range vals { + req.Header.Add(header, val) + } + } + } + + if len(r.ClientToken) != 0 { + req.Header.Set(consts.AuthHeaderName, r.ClientToken) + } + + if len(r.WrapTTL) != 0 { + req.Header.Set("X-Vault-Wrap-TTL", r.WrapTTL) + } + + if len(r.MFAHeaderVals) != 0 { + for _, mfaHeaderVal := range r.MFAHeaderVals { + req.Header.Add("X-Vault-MFA", mfaHeaderVal) + } + } + + if r.PolicyOverride { + req.Header.Set("X-Vault-Policy-Override", "true") + } + + // Avoiding the use of RawRequestWithContext which reads the response body + // to determine if the body contains error message. + var result *Response + resp, err := c.c.config.HttpClient.Do(req) + if resp == nil { + return nil + } + + result = &Response{Response: resp} + if err := result.Error(); err != nil { + return err + } + + _, err = io.Copy(snapWriter, resp.Body) + if err != nil { + return err + } + + return nil +} + +// RaftSnapshotRestore reads the snapshot from the io.Reader and installs that +// snapshot, returning the cluster to the state defined by it. +func (c *Sys) RaftSnapshotRestore(snapReader io.Reader, force bool) error { + path := "/v1/sys/storage/raft/snapshot" + if force { + path = "/v1/sys/storage/raft/snapshot-force" + } + r := c.c.NewRequest("POST", path) + + r.Body = snapReader + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go new file mode 100644 index 00000000..153e486c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go @@ -0,0 +1,388 @@ +package api + +import ( + "context" + "errors" + + "github.com/mitchellh/mapstructure" +) + +func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/rekey/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/init") + if err := r.SetJSONBody(config); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyVerificationCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/update") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/backup") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-key-backup") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result RekeyRetrieveResponse + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + + return &result, err +} + +func (c *Sys) RekeyDeleteBackup() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyDeleteRecoveryBackup() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-key-backup") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + + return err +} + +func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type RekeyInitRequest struct { + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + Backup bool + RequireVerification bool `json:"require_verification"` +} + +type RekeyStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Required int `json:"required"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce"` +} + +type RekeyUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` +} + +type RekeyRetrieveResponse struct { + Nonce string `json:"nonce" mapstructure:"nonce"` + Keys map[string][]string `json:"keys" mapstructure:"keys"` + KeysB64 map[string][]string `json:"keys_base64" mapstructure:"keys_base64"` +} + +type RekeyVerificationStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` +} + +type RekeyVerificationUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go new file mode 100644 index 00000000..c525feb0 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go @@ -0,0 +1,77 @@ +package api + +import ( + "context" + "encoding/json" + "errors" + "time" +) + +func (c *Sys) Rotate() error { + r := c.c.NewRequest("POST", "/v1/sys/rotate") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) KeyStatus() (*KeyStatus, error) { + r := c.c.NewRequest("GET", "/v1/sys/key-status") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result KeyStatus + + termRaw, ok := secret.Data["term"] + if !ok { + return nil, errors.New("term not found in response") + } + term, ok := termRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert term to a number") + } + term64, err := term.Int64() + if err != nil { + return nil, err + } + result.Term = int(term64) + + installTimeRaw, ok := secret.Data["install_time"] + if !ok { + return nil, errors.New("install_time not found in response") + } + installTimeStr, ok := installTimeRaw.(string) + if !ok { + return nil, errors.New("could not convert install_time to a string") + } + installTime, err := time.Parse(time.RFC3339Nano, installTimeStr) + if err != nil { + return nil, err + } + result.InstallTime = installTime + + return &result, err +} + +type KeyStatus struct { + Term int `json:"term"` + InstallTime time.Time `json:"install_time"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go new file mode 100644 index 00000000..301d3f26 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -0,0 +1,86 @@ +package api + +import "context" + +func (c *Sys) SealStatus() (*SealStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/seal-status") + return sealStatusRequest(c, r) +} + +func (c *Sys) Seal() error { + r := c.c.NewRequest("PUT", "/v1/sys/seal") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) ResetUnsealProcess() (*SealStatusResponse, error) { + body := map[string]interface{}{"reset": true} + + r := c.c.NewRequest("PUT", "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequest(c, r) +} + +func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { + body := map[string]interface{}{"key": shard} + + r := c.c.NewRequest("PUT", "/v1/sys/unseal") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + return sealStatusRequest(c, r) +} + +func (c *Sys) UnsealWithOptions(opts *UnsealOpts) (*SealStatusResponse, error) { + r := c.c.NewRequest("PUT", "/v1/sys/unseal") + if err := r.SetJSONBody(opts); err != nil { + return nil, err + } + + return sealStatusRequest(c, r) +} + +func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) { + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result SealStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +type SealStatusResponse struct { + Type string `json:"type"` + Initialized bool `json:"initialized"` + Sealed bool `json:"sealed"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Nonce string `json:"nonce"` + Version string `json:"version"` + Migration bool `json:"migration"` + ClusterName string `json:"cluster_name,omitempty"` + ClusterID string `json:"cluster_id,omitempty"` + RecoverySeal bool `json:"recovery_seal"` +} + +type UnsealOpts struct { + Key string `json:"key"` + Reset bool `json:"reset"` + Migrate bool `json:"migrate"` +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go new file mode 100644 index 00000000..55dc6fbc --- /dev/null +++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go @@ -0,0 +1,15 @@ +package api + +import "context" + +func (c *Sys) StepDown() error { + r := c.c.NewRequest("PUT", "/v1/sys/step-down") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil && resp.Body != nil { + resp.Body.Close() + } + return err +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go new file mode 100644 index 00000000..356d4548 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/compressutil/compress.go @@ -0,0 +1,207 @@ +package compressutil + +import ( + "bytes" + "compress/gzip" + "compress/lzw" + "fmt" + "io" + + "github.com/golang/snappy" + "github.com/hashicorp/errwrap" + "github.com/pierrec/lz4" +) + +const ( + // A byte value used as a canary prefix for the compressed information + // which is used to distinguish if a JSON input is compressed or not. + // The value of this constant should not be a first character of any + // valid JSON string. + + CompressionTypeGzip = "gzip" + CompressionCanaryGzip byte = 'G' + + CompressionTypeLZW = "lzw" + CompressionCanaryLZW byte = 'L' + + CompressionTypeSnappy = "snappy" + CompressionCanarySnappy byte = 'S' + + CompressionTypeLZ4 = "lz4" + CompressionCanaryLZ4 byte = '4' +) + +// SnappyReadCloser embeds the snappy reader which implements the io.Reader +// interface. The decompress procedure in this utility expects an +// io.ReadCloser. This type implements the io.Closer interface to retain the +// generic way of decompression. +type CompressUtilReadCloser struct { + io.Reader +} + +// Close is a noop method implemented only to satisfy the io.Closer interface +func (c *CompressUtilReadCloser) Close() error { + return nil +} + +// CompressionConfig is used to select a compression type to be performed by +// Compress and Decompress utilities. +// Supported types are: +// * CompressionTypeLZW +// * CompressionTypeGzip +// * CompressionTypeSnappy +// * CompressionTypeLZ4 +// +// When using CompressionTypeGzip, the compression levels can also be chosen: +// * gzip.DefaultCompression +// * gzip.BestSpeed +// * gzip.BestCompression +type CompressionConfig struct { + // Type of the compression algorithm to be used + Type string + + // When using Gzip format, the compression level to employ + GzipCompressionLevel int +} + +// Compress places the canary byte in a buffer and uses the same buffer to fill +// in the compressed information of the given input. The configuration supports +// two type of compression: LZW and Gzip. When using Gzip compression format, +// if GzipCompressionLevel is not specified, the 'gzip.DefaultCompression' will +// be assumed. +func Compress(data []byte, config *CompressionConfig) ([]byte, error) { + var buf bytes.Buffer + var writer io.WriteCloser + var err error + + if config == nil { + return nil, fmt.Errorf("config is nil") + } + + // Write the canary into the buffer and create writer to compress the + // input data based on the configured type + switch config.Type { + case CompressionTypeLZW: + buf.Write([]byte{CompressionCanaryLZW}) + writer = lzw.NewWriter(&buf, lzw.LSB, 8) + + case CompressionTypeGzip: + buf.Write([]byte{CompressionCanaryGzip}) + + switch { + case config.GzipCompressionLevel == gzip.BestCompression, + config.GzipCompressionLevel == gzip.BestSpeed, + config.GzipCompressionLevel == gzip.DefaultCompression: + // These are valid compression levels + default: + // If compression level is set to NoCompression or to + // any invalid value, fallback to Defaultcompression + config.GzipCompressionLevel = gzip.DefaultCompression + } + writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel) + + case CompressionTypeSnappy: + buf.Write([]byte{CompressionCanarySnappy}) + writer = snappy.NewBufferedWriter(&buf) + + case CompressionTypeLZ4: + buf.Write([]byte{CompressionCanaryLZ4}) + writer = lz4.NewWriter(&buf) + + default: + return nil, fmt.Errorf("unsupported compression type") + } + + if err != nil { + return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) + } + + if writer == nil { + return nil, fmt.Errorf("failed to create a compression writer") + } + + // Compress the input and place it in the same buffer containing the + // canary byte. + if _, err = writer.Write(data); err != nil { + return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) + } + + // Close the io.WriteCloser + if err = writer.Close(); err != nil { + return nil, err + } + + // Return the compressed bytes with canary byte at the start + return buf.Bytes(), nil +} + +// Decompress checks if the first byte in the input matches the canary byte. +// If the first byte is a canary byte, then the input past the canary byte +// will be decompressed using the method specified in the given configuration. +// If the first byte isn't a canary byte, then the utility returns a boolean +// value indicating that the input was not compressed. +func Decompress(data []byte) ([]byte, bool, error) { + var err error + var reader io.ReadCloser + if data == nil || len(data) == 0 { + return nil, false, fmt.Errorf("'data' being decompressed is empty") + } + + canary := data[0] + cData := data[1:] + + switch canary { + // If the first byte matches the canary byte, remove the canary + // byte and try to decompress the data that is after the canary. + case CompressionCanaryGzip: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader, err = gzip.NewReader(bytes.NewReader(cData)) + + case CompressionCanaryLZW: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = lzw.NewReader(bytes.NewReader(cData), lzw.LSB, 8) + + case CompressionCanarySnappy: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: snappy.NewReader(bytes.NewReader(cData)), + } + + case CompressionCanaryLZ4: + if len(data) < 2 { + return nil, false, fmt.Errorf("invalid 'data' after the canary") + } + reader = &CompressUtilReadCloser{ + Reader: lz4.NewReader(bytes.NewReader(cData)), + } + + default: + // If the first byte doesn't match the canary byte, it means + // that the content was not compressed at all. Indicate the + // caller that the input was not compressed. + return nil, true, nil + } + if err != nil { + return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) + } + if reader == nil { + return nil, false, fmt.Errorf("failed to create a compression reader") + } + + // Close the io.ReadCloser + defer reader.Close() + + // Read all the compressed data into a buffer + var buf bytes.Buffer + if _, err = io.Copy(&buf, reader); err != nil { + return nil, false, err + } + + return buf.Bytes(), false, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go new file mode 100644 index 00000000..b62962e3 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/agent.go @@ -0,0 +1,5 @@ +package consts + +// AgentPathCacheClear is the path that the agent will use as its cache-clear +// endpoint. +const AgentPathCacheClear = "/agent/v1/cache-clear" diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go new file mode 100644 index 00000000..769a7858 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/consts.go @@ -0,0 +1,28 @@ +package consts + +const ( + // ExpirationRestoreWorkerCount specifies the number of workers to use while + // restoring leases into the expiration manager + ExpirationRestoreWorkerCount = 64 + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" + + // PerformanceReplicationALPN is the negotiated protocol used for + // performance replication. + PerformanceReplicationALPN = "replication_v1" + + // DRReplicationALPN is the negotiated protocol used for + // dr replication. + DRReplicationALPN = "replication_dr_v1" + + PerfStandbyALPN = "perf_standby_v1" + + RequestForwardingALPN = "req_fw_sb-act_v1" + + RaftStorageALPN = "raft_storage_v1" +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go new file mode 100644 index 00000000..d4e60e54 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/error.go @@ -0,0 +1,21 @@ +package consts + +import "errors" + +var ( + // ErrSealed is returned if an operation is performed on a sealed barrier. + // No operation is expected to succeed before unsealing + ErrSealed = errors.New("Vault is sealed") + + // ErrStandby is returned if an operation is performed on a standby Vault. + // No operation is expected to succeed until active. + ErrStandby = errors.New("Vault is in standby mode") + + // ErrPathContainsParentReferences is returned when a path contains parent + // references. + ErrPathContainsParentReferences = errors.New("path cannot contain parent references") + + // ErrInvalidWrappingToken is returned when checking for the validity of + // a wrapping token that turns out to be invalid. + ErrInvalidWrappingToken = errors.New("wrapping token is not valid or does not exist") +) diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go new file mode 100644 index 00000000..e0a00e48 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/plugin_types.go @@ -0,0 +1,59 @@ +package consts + +import "fmt" + +var PluginTypes = []PluginType{ + PluginTypeUnknown, + PluginTypeCredential, + PluginTypeDatabase, + PluginTypeSecrets, +} + +type PluginType uint32 + +// This is a list of PluginTypes used by Vault. +// If we need to add any in the future, it would +// be best to add them to the _end_ of the list below +// because they resolve to incrementing numbers, +// which may be saved in state somewhere. Thus if +// the name for one of those numbers changed because +// a value were added to the middle, that could cause +// the wrong plugin types to be read from storage +// for a given underlying number. Example of the problem +// here: https://play.golang.org/p/YAaPw5ww3er +const ( + PluginTypeUnknown PluginType = iota + PluginTypeCredential + PluginTypeDatabase + PluginTypeSecrets +) + +func (p PluginType) String() string { + switch p { + case PluginTypeUnknown: + return "unknown" + case PluginTypeCredential: + return "auth" + case PluginTypeDatabase: + return "database" + case PluginTypeSecrets: + return "secret" + default: + return "unsupported" + } +} + +func ParsePluginType(pluginType string) (PluginType, error) { + switch pluginType { + case "unknown": + return PluginTypeUnknown, nil + case "auth": + return PluginTypeCredential, nil + case "database": + return PluginTypeDatabase, nil + case "secret": + return PluginTypeSecrets, nil + default: + return PluginTypeUnknown, fmt.Errorf("%q is not a supported plugin type", pluginType) + } +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go new file mode 100644 index 00000000..f3cafe75 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/consts/replication.go @@ -0,0 +1,150 @@ +package consts + +import "time" + +const ( + //N.B. This needs to be excluded from replication despite the name; it's + //merely saying that this is cluster information for the replicated + //cluster. + CoreReplicatedClusterPrefix = "core/cluster/replicated/" + CoreReplicatedClusterPrefixDR = "core/cluster/replicated-dr/" + + CoreReplicatedClusterInfoPath = CoreReplicatedClusterPrefix + "info" + CoreReplicatedClusterSecondariesPrefix = CoreReplicatedClusterPrefix + "secondaries/" + CoreReplicatedClusterInfoPathDR = CoreReplicatedClusterPrefixDR + "info" + CoreReplicatedClusterSecondariesPrefixDR = CoreReplicatedClusterPrefixDR + "secondaries/" + + // This is an identifier for the current secondary in the replicated paths + // manager. It should contain a character that is not allowed in secondary + // ids to ensure it doesn't collide. + CurrentReplicatedSecondaryIdentifier = ".current" +) + +type ReplicationState uint32 + +var ReplicationStaleReadTimeout = 2 * time.Second + +const ( + _ ReplicationState = iota + OldReplicationPrimary + OldReplicationSecondary + OldReplicationBootstrapping + // Don't add anything here. Adding anything to this Old block would cause + // the rest of the values to change below. This was done originally to + // ensure no overlap between old and new values. + + ReplicationUnknown ReplicationState = 0 + ReplicationPerformancePrimary ReplicationState = 1 << iota // Note -- iota is 5 here! + ReplicationPerformanceSecondary + OldSplitReplicationBootstrapping + ReplicationDRPrimary + ReplicationDRSecondary + ReplicationPerformanceBootstrapping + ReplicationDRBootstrapping + ReplicationPerformanceDisabled + ReplicationDRDisabled + ReplicationPerformanceStandby +) + +// We verify no change to the above values are made +func init() { + + if OldReplicationBootstrapping != 3 { + panic("Replication Constants have changed") + } + + if ReplicationPerformancePrimary != 1<<5 { + panic("Replication Constants have changed") + } +} + +func (r ReplicationState) string() string { + switch r { + case ReplicationPerformanceSecondary: + return "secondary" + case ReplicationPerformancePrimary: + return "primary" + case ReplicationPerformanceBootstrapping: + return "bootstrapping" + case ReplicationPerformanceDisabled: + return "disabled" + case ReplicationDRPrimary: + return "primary" + case ReplicationDRSecondary: + return "secondary" + case ReplicationDRBootstrapping: + return "bootstrapping" + case ReplicationDRDisabled: + return "disabled" + } + + return "unknown" +} + +func (r ReplicationState) StateStrings() []string { + var ret []string + if r.HasState(ReplicationPerformanceSecondary) { + ret = append(ret, "perf-secondary") + } + if r.HasState(ReplicationPerformancePrimary) { + ret = append(ret, "perf-primary") + } + if r.HasState(ReplicationPerformanceBootstrapping) { + ret = append(ret, "perf-bootstrapping") + } + if r.HasState(ReplicationPerformanceDisabled) { + ret = append(ret, "perf-disabled") + } + if r.HasState(ReplicationDRPrimary) { + ret = append(ret, "dr-primary") + } + if r.HasState(ReplicationDRSecondary) { + ret = append(ret, "dr-secondary") + } + if r.HasState(ReplicationDRBootstrapping) { + ret = append(ret, "dr-bootstrapping") + } + if r.HasState(ReplicationDRDisabled) { + ret = append(ret, "dr-disabled") + } + if r.HasState(ReplicationPerformanceStandby) { + ret = append(ret, "perfstandby") + } + + return ret +} + +func (r ReplicationState) GetDRString() string { + switch { + case r.HasState(ReplicationDRBootstrapping): + return ReplicationDRBootstrapping.string() + case r.HasState(ReplicationDRPrimary): + return ReplicationDRPrimary.string() + case r.HasState(ReplicationDRSecondary): + return ReplicationDRSecondary.string() + case r.HasState(ReplicationDRDisabled): + return ReplicationDRDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) GetPerformanceString() string { + switch { + case r.HasState(ReplicationPerformanceBootstrapping): + return ReplicationPerformanceBootstrapping.string() + case r.HasState(ReplicationPerformancePrimary): + return ReplicationPerformancePrimary.string() + case r.HasState(ReplicationPerformanceSecondary): + return ReplicationPerformanceSecondary.string() + case r.HasState(ReplicationPerformanceDisabled): + return ReplicationPerformanceDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 } +func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag } +func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag } +func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag } diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go b/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go new file mode 100644 index 00000000..0b120367 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/hclutil/hcl.go @@ -0,0 +1,36 @@ +package hclutil + +import ( + "fmt" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/hcl/ast" +) + +// CheckHCLKeys checks whether the keys in the AST list contains any of the valid keys provided. +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go new file mode 100644 index 00000000..c03a4f8c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/jsonutil/json.go @@ -0,0 +1,100 @@ +package jsonutil + +import ( + "bytes" + "compress/gzip" + "encoding/json" + "fmt" + "io" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/sdk/helper/compressutil" +) + +// Encodes/Marshals the given object into JSON +func EncodeJSON(in interface{}) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + var buf bytes.Buffer + enc := json.NewEncoder(&buf) + if err := enc.Encode(in); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// EncodeJSONAndCompress encodes the given input into JSON and compresses the +// encoded value (using Gzip format BestCompression level, by default). A +// canary byte is placed at the beginning of the returned bytes for the logic +// in decompression method to identify compressed input. +func EncodeJSONAndCompress(in interface{}, config *compressutil.CompressionConfig) ([]byte, error) { + if in == nil { + return nil, fmt.Errorf("input for encoding is nil") + } + + // First JSON encode the given input + encodedBytes, err := EncodeJSON(in) + if err != nil { + return nil, err + } + + if config == nil { + config = &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeGzip, + GzipCompressionLevel: gzip.BestCompression, + } + } + + return compressutil.Compress(encodedBytes, config) +} + +// DecodeJSON tries to decompress the given data. The call to decompress, fails +// if the content was not compressed in the first place, which is identified by +// a canary byte before the compressed data. If the data is not compressed, it +// is JSON decoded directly. Otherwise the decompressed data will be JSON +// decoded. +func DecodeJSON(data []byte, out interface{}) error { + if data == nil || len(data) == 0 { + return fmt.Errorf("'data' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + // Decompress the data if it was compressed in the first place + decompressedBytes, uncompressed, err := compressutil.Decompress(data) + if err != nil { + return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) + } + if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { + return fmt.Errorf("decompressed data being decoded is invalid") + } + + // If the input supplied failed to contain the compression canary, it + // will be notified by the compression utility. Decode the decompressed + // input. + if !uncompressed { + data = decompressedBytes + } + + return DecodeJSONFromReader(bytes.NewReader(data), out) +} + +// Decodes/Unmarshals the given io.Reader pointing to a JSON, into a desired object +func DecodeJSONFromReader(r io.Reader, out interface{}) error { + if r == nil { + return fmt.Errorf("'io.Reader' being decoded is nil") + } + if out == nil { + return fmt.Errorf("output parameter 'out' is nil") + } + + dec := json.NewDecoder(r) + + // While decoding JSON values, interpret the integer values as `json.Number`s instead of `float64`. + dec.UseNumber() + + // Since 'out' is an interface representing a pointer, pass it to the decoder without an '&' + return dec.Decode(out) +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/parseutil/parseutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/parseutil/parseutil.go new file mode 100644 index 00000000..ea8289b4 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/parseutil/parseutil.go @@ -0,0 +1,174 @@ +package parseutil + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/hashicorp/vault/sdk/helper/strutil" + "github.com/mitchellh/mapstructure" +) + +func ParseDurationSecond(in interface{}) (time.Duration, error) { + var dur time.Duration + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + switch inp := in.(type) { + case nil: + // return default of zero + case string: + if inp == "" { + return dur, nil + } + var err error + // Look for a suffix otherwise its a plain second value + if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") || strings.HasSuffix(inp, "ms") { + dur, err = time.ParseDuration(inp) + if err != nil { + return dur, err + } + } else { + // Plain integer + secs, err := strconv.ParseInt(inp, 10, 64) + if err != nil { + return dur, err + } + dur = time.Duration(secs) * time.Second + } + case int: + dur = time.Duration(inp) * time.Second + case int32: + dur = time.Duration(inp) * time.Second + case int64: + dur = time.Duration(inp) * time.Second + case uint: + dur = time.Duration(inp) * time.Second + case uint32: + dur = time.Duration(inp) * time.Second + case uint64: + dur = time.Duration(inp) * time.Second + case float32: + dur = time.Duration(inp) * time.Second + case float64: + dur = time.Duration(inp) * time.Second + case time.Duration: + dur = inp + default: + return 0, errors.New("could not parse duration from input") + } + + return dur, nil +} + +func ParseInt(in interface{}) (int64, error) { + var ret int64 + jsonIn, ok := in.(json.Number) + if ok { + in = jsonIn.String() + } + switch in.(type) { + case string: + inp := in.(string) + if inp == "" { + return 0, nil + } + var err error + left, err := strconv.ParseInt(inp, 10, 64) + if err != nil { + return ret, err + } + ret = left + case int: + ret = int64(in.(int)) + case int32: + ret = int64(in.(int32)) + case int64: + ret = in.(int64) + case uint: + ret = int64(in.(uint)) + case uint32: + ret = int64(in.(uint32)) + case uint64: + ret = int64(in.(uint64)) + default: + return 0, errors.New("could not parse value from input") + } + + return ret, nil +} + +func ParseBool(in interface{}) (bool, error) { + var result bool + if err := mapstructure.WeakDecode(in, &result); err != nil { + return false, err + } + return result, nil +} + +func ParseCommaStringSlice(in interface{}) ([]string, error) { + rawString, ok := in.(string) + if ok && rawString == "" { + return []string{}, nil + } + var result []string + config := &mapstructure.DecoderConfig{ + Result: &result, + WeaklyTypedInput: true, + DecodeHook: mapstructure.StringToSliceHookFunc(","), + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return nil, err + } + if err := decoder.Decode(in); err != nil { + return nil, err + } + return strutil.TrimStrings(result), nil +} + +func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) { + out := make([]*sockaddr.SockAddrMarshaler, 0) + stringAddrs := make([]string, 0) + + switch addrs.(type) { + case string: + stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",") + if len(stringAddrs) == 0 { + return nil, fmt.Errorf("unable to parse addresses from %v", addrs) + } + + case []string: + stringAddrs = addrs.([]string) + + case []interface{}: + for _, v := range addrs.([]interface{}) { + stringAddr, ok := v.(string) + if !ok { + return nil, fmt.Errorf("error parsing %v as string", v) + } + stringAddrs = append(stringAddrs, stringAddr) + } + + default: + return nil, fmt.Errorf("unknown address input type %T", addrs) + } + + for _, addr := range stringAddrs { + sa, err := sockaddr.NewSockAddr(addr) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error parsing address %q: {{err}}", addr), err) + } + out = append(out, &sockaddr.SockAddrMarshaler{ + SockAddr: sa, + }) + } + + return out, nil +} diff --git a/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go new file mode 100644 index 00000000..0069a137 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/sdk/helper/strutil/strutil.go @@ -0,0 +1,447 @@ +package strutil + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/hashicorp/errwrap" + glob "github.com/ryanuber/go-glob" +) + +// StrListContainsGlob looks for a string in a list of strings and allows +// globs. +func StrListContainsGlob(haystack []string, needle string) bool { + for _, item := range haystack { + if glob.Glob(item, needle) { + return true + } + } + return false +} + +// StrListContains looks for a string in a list of strings. +func StrListContains(haystack []string, needle string) bool { + for _, item := range haystack { + if item == needle { + return true + } + } + return false +} + +// StrListSubset checks if a given list is a subset +// of another set +func StrListSubset(super, sub []string) bool { + for _, item := range sub { + if !StrListContains(super, item) { + return false + } + } + return true +} + +// ParseDedupAndSortStrings parses a comma separated list of strings +// into a slice of strings. The return slice will be sorted and will +// not contain duplicate or empty items. +func ParseDedupAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), false) +} + +// ParseDedupLowercaseAndSortStrings parses a comma separated list of +// strings into a slice of strings. The return slice will be sorted and +// will not contain duplicate or empty items. The values will be converted +// to lower case. +func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { + input = strings.TrimSpace(input) + parsed := []string{} + if input == "" { + // Don't return nil + return parsed + } + return RemoveDuplicates(strings.Split(input, sep), true) +} + +// ParseKeyValues parses a comma separated list of `=` tuples +// into a map[string]string. +func ParseKeyValues(input string, out map[string]string, sep string) error { + if out == nil { + return fmt.Errorf("'out is nil") + } + + keyValues := ParseDedupLowercaseAndSortStrings(input, sep) + if len(keyValues) == 0 { + return nil + } + + for _, keyValue := range keyValues { + shards := strings.Split(keyValue, "=") + if len(shards) != 2 { + return fmt.Errorf("invalid format") + } + + key := strings.TrimSpace(shards[0]) + value := strings.TrimSpace(shards[1]) + if key == "" || value == "" { + return fmt.Errorf("invalid pair: key: %q value: %q", key, value) + } + out[key] = value + } + return nil +} + +// ParseArbitraryKeyValues parses arbitrary tuples. The input +// can be one of the following: +// * JSON string +// * Base64 encoded JSON string +// * Comma separated list of `=` pairs +// * Base64 encoded string containing comma separated list of +// `=` pairs +// +// Input will be parsed into the output parameter, which should +// be a non-nil map[string]string. +func ParseArbitraryKeyValues(input string, out map[string]string, sep string) error { + input = strings.TrimSpace(input) + if input == "" { + return nil + } + if out == nil { + return fmt.Errorf("'out' is nil") + } + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + // Try to JSON unmarshal the input. If successful, consider that the + // metadata was supplied as JSON input. + err = json.Unmarshal([]byte(input), &out) + if err != nil { + // If JSON unmarshalling fails, consider that the input was + // supplied as a comma separated string of 'key=value' pairs. + if err = ParseKeyValues(input, out, sep); err != nil { + return errwrap.Wrapf("failed to parse the input: {{err}}", err) + } + } + + // Validate the parsed input + for key, value := range out { + if key != "" && value == "" { + return fmt.Errorf("invalid value for key %q", key) + } + } + + return nil +} + +// ParseStringSlice parses a `sep`-separated list of strings into a +// []string with surrounding whitespace removed. +// +// The output will always be a valid slice but may be of length zero. +func ParseStringSlice(input string, sep string) []string { + input = strings.TrimSpace(input) + if input == "" { + return []string{} + } + + splitStr := strings.Split(input, sep) + ret := make([]string, len(splitStr)) + for i, val := range splitStr { + ret[i] = strings.TrimSpace(val) + } + + return ret +} + +// ParseArbitraryStringSlice parses arbitrary string slice. The input +// can be one of the following: +// * JSON string +// * Base64 encoded JSON string +// * `sep` separated list of values +// * Base64-encoded string containing a `sep` separated list of values +// +// Note that the separator is ignored if the input is found to already be in a +// structured format (e.g., JSON) +// +// The output will always be a valid slice but may be of length zero. +func ParseArbitraryStringSlice(input string, sep string) []string { + input = strings.TrimSpace(input) + if input == "" { + return []string{} + } + + // Try to base64 decode the input. If successful, consider the decoded + // value as input. + inputBytes, err := base64.StdEncoding.DecodeString(input) + if err == nil { + input = string(inputBytes) + } + + ret := []string{} + + // Try to JSON unmarshal the input. If successful, consider that the + // metadata was supplied as JSON input. + err = json.Unmarshal([]byte(input), &ret) + if err != nil { + // If JSON unmarshalling fails, consider that the input was + // supplied as a separated string of values. + return ParseStringSlice(input, sep) + } + + if ret == nil { + return []string{} + } + + return ret +} + +// TrimStrings takes a slice of strings and returns a slice of strings +// with trimmed spaces +func TrimStrings(items []string) []string { + ret := make([]string, len(items)) + for i, item := range items { + ret[i] = strings.TrimSpace(item) + } + return ret +} + +// RemoveDuplicates removes duplicate and empty elements from a slice of +// strings. This also may convert the items in the slice to lower case and +// returns a sorted slice. +func RemoveDuplicates(items []string, lowercase bool) []string { + itemsMap := map[string]bool{} + for _, item := range items { + item = strings.TrimSpace(item) + if lowercase { + item = strings.ToLower(item) + } + if item == "" { + continue + } + itemsMap[item] = true + } + items = make([]string, 0, len(itemsMap)) + for item := range itemsMap { + items = append(items, item) + } + sort.Strings(items) + return items +} + +// RemoveDuplicatesStable removes duplicate and empty elements from a slice of +// strings, preserving order (and case) of the original slice. +// In all cases, strings are compared after trimming whitespace +// If caseInsensitive, strings will be compared after ToLower() +func RemoveDuplicatesStable(items []string, caseInsensitive bool) []string { + itemsMap := make(map[string]bool, len(items)) + deduplicated := make([]string, 0, len(items)) + + for _, item := range items { + key := strings.TrimSpace(item) + if caseInsensitive { + key = strings.ToLower(key) + } + if key == "" || itemsMap[key] { + continue + } + itemsMap[key] = true + deduplicated = append(deduplicated, item) + } + return deduplicated +} + +// RemoveEmpty removes empty elements from a slice of +// strings +func RemoveEmpty(items []string) []string { + if len(items) == 0 { + return items + } + itemsSlice := make([]string, 0, len(items)) + for _, item := range items { + if item == "" { + continue + } + itemsSlice = append(itemsSlice, item) + } + return itemsSlice +} + +// EquivalentSlices checks whether the given string sets are equivalent, as in, +// they contain the same values. +func EquivalentSlices(a, b []string) bool { + if a == nil && b == nil { + return true + } + + if a == nil || b == nil { + return false + } + + // First we'll build maps to ensure unique values + mapA := map[string]bool{} + mapB := map[string]bool{} + for _, keyA := range a { + mapA[keyA] = true + } + for _, keyB := range b { + mapB[keyB] = true + } + + // Now we'll build our checking slices + var sortedA, sortedB []string + for keyA := range mapA { + sortedA = append(sortedA, keyA) + } + for keyB := range mapB { + sortedB = append(sortedB, keyB) + } + sort.Strings(sortedA) + sort.Strings(sortedB) + + // Finally, compare + if len(sortedA) != len(sortedB) { + return false + } + + for i := range sortedA { + if sortedA[i] != sortedB[i] { + return false + } + } + + return true +} + +// EqualStringMaps tests whether two map[string]string objects are equal. +// Equal means both maps have the same sets of keys and values. This function +// is 6-10x faster than a call to reflect.DeepEqual(). +func EqualStringMaps(a, b map[string]string) bool { + if len(a) != len(b) { + return false + } + + for k := range a { + v, ok := b[k] + if !ok || a[k] != v { + return false + } + } + + return true +} + +// StrListDelete removes the first occurrence of the given item from the slice +// of strings if the item exists. +func StrListDelete(s []string, d string) []string { + if s == nil { + return s + } + + for index, element := range s { + if element == d { + return append(s[:index], s[index+1:]...) + } + } + + return s +} + +// GlobbedStringsMatch compares item to val with support for a leading and/or +// trailing wildcard '*' in item. +func GlobbedStringsMatch(item, val string) bool { + if len(item) < 2 { + return val == item + } + + hasPrefix := strings.HasPrefix(item, "*") + hasSuffix := strings.HasSuffix(item, "*") + + if hasPrefix && hasSuffix { + return strings.Contains(val, item[1:len(item)-1]) + } else if hasPrefix { + return strings.HasSuffix(val, item[1:]) + } else if hasSuffix { + return strings.HasPrefix(val, item[:len(item)-1]) + } + + return val == item +} + +// AppendIfMissing adds a string to a slice if the given string is not present +func AppendIfMissing(slice []string, i string) []string { + if StrListContains(slice, i) { + return slice + } + return append(slice, i) +} + +// MergeSlices adds an arbitrary number of slices together, uniquely +func MergeSlices(args ...[]string) []string { + all := map[string]struct{}{} + for _, slice := range args { + for _, v := range slice { + all[v] = struct{}{} + } + } + + result := make([]string, 0, len(all)) + for k := range all { + result = append(result, k) + } + sort.Strings(result) + return result +} + +// Difference returns the set difference (A - B) of the two given slices. The +// result will also remove any duplicated values in set A regardless of whether +// that matches any values in set B. +func Difference(a, b []string, lowercase bool) []string { + if len(a) == 0 { + return a + } + if len(b) == 0 { + if !lowercase { + return a + } + newA := make([]string, len(a)) + for i, v := range a { + newA[i] = strings.ToLower(v) + } + return newA + } + + a = RemoveDuplicates(a, lowercase) + b = RemoveDuplicates(b, lowercase) + + itemsMap := map[string]bool{} + for _, aVal := range a { + itemsMap[aVal] = true + } + + // Perform difference calculation + for _, bVal := range b { + if _, ok := itemsMap[bVal]; ok { + itemsMap[bVal] = false + } + } + + items := []string{} + for item, exists := range itemsMap { + if exists { + items = append(items, item) + } + } + sort.Strings(items) + return items +} diff --git a/vendor/github.com/hashicorp/yamux/go.mod b/vendor/github.com/hashicorp/yamux/go.mod new file mode 100644 index 00000000..672a0e58 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/yamux diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go index 7abc7c74..18a078c8 100644 --- a/vendor/github.com/hashicorp/yamux/mux.go +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -3,6 +3,7 @@ package yamux import ( "fmt" "io" + "log" "os" "time" ) @@ -30,8 +31,13 @@ type Config struct { // window size that we allow for a stream. MaxStreamWindowSize uint32 - // LogOutput is used to control the log destination + // LogOutput is used to control the log destination. Either Logger or + // LogOutput can be set, not both. LogOutput io.Writer + + // Logger is used to pass in the logger to be used. Either Logger or + // LogOutput can be set, not both. + Logger *log.Logger } // DefaultConfig is used to return a default configuration @@ -57,6 +63,11 @@ func VerifyConfig(config *Config) error { if config.MaxStreamWindowSize < initialStreamWindow { return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) } + if config.LogOutput != nil && config.Logger != nil { + return fmt.Errorf("both Logger and LogOutput may not be set, select one") + } else if config.LogOutput == nil && config.Logger == nil { + return fmt.Errorf("one of Logger or LogOutput must be set, select one") + } return nil } diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go index e1798183..a80ddec3 100644 --- a/vendor/github.com/hashicorp/yamux/session.go +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -86,9 +86,14 @@ type sendReady struct { // newSession is used to construct a new session func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + logger := config.Logger + if logger == nil { + logger = log.New(config.LogOutput, "", log.LstdFlags) + } + s := &Session{ config: config, - logger: log.New(config.LogOutput, "", log.LstdFlags), + logger: logger, conn: conn, bufRead: bufio.NewReader(conn), pings: make(map[uint32]chan struct{}), @@ -123,6 +128,12 @@ func (s *Session) IsClosed() bool { } } +// CloseChan returns a read-only channel which is closed as +// soon as the session is closed. +func (s *Session) CloseChan() <-chan struct{} { + return s.shutdownCh +} + // NumStreams returns the number of currently open streams func (s *Session) NumStreams() int { s.streamLock.Lock() @@ -303,8 +314,10 @@ func (s *Session) keepalive() { case <-time.After(s.config.KeepAliveInterval): _, err := s.Ping() if err != nil { - s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) - s.exitErr(ErrKeepAliveTimeout) + if err != ErrSessionShutdown { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + } return } case <-s.shutdownCh: @@ -323,8 +336,17 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error { // potential shutdown. Since there's the expectation that sends can happen // in a timely manner, we enforce the connection write timeout here. func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { - timer := time.NewTimer(s.config.ConnectionWriteTimeout) - defer timer.Stop() + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() ready := sendReady{Hdr: hdr, Body: body, Err: errCh} select { @@ -349,8 +371,17 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e // the send happens right here, we enforce the connection write timeout if we // can't queue the header to be sent. func (s *Session) sendNoWait(hdr header) error { - timer := time.NewTimer(s.config.ConnectionWriteTimeout) - defer timer.Stop() + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + select { + case <-timer.C: + default: + } + timerPool.Put(t) + }() select { case s.sendCh <- sendReady{Hdr: hdr}: @@ -408,11 +439,20 @@ func (s *Session) recv() { } } +// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + // recvLoop continues to receive data until a fatal error is encountered func (s *Session) recvLoop() error { defer close(s.recvDoneCh) hdr := header(make([]byte, headerSize)) - var handler func(header) error for { // Read the header if _, err := io.ReadFull(s.bufRead, hdr); err != nil { @@ -428,22 +468,12 @@ func (s *Session) recvLoop() error { return ErrInvalidVersion } - // Switch on the type - switch hdr.MsgType() { - case typeData: - handler = s.handleStreamMessage - case typeWindowUpdate: - handler = s.handleStreamMessage - case typeGoAway: - handler = s.handleGoAway - case typePing: - handler = s.handlePing - default: + mt := hdr.MsgType() + if mt < typeData || mt > typeGoAway { return ErrInvalidMsgType } - // Invoke the handler - if err := handler(hdr); err != nil { + if err := handlers[mt](s, hdr); err != nil { return err } } diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go index d216e281..aa239197 100644 --- a/vendor/github.com/hashicorp/yamux/stream.go +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -47,8 +47,8 @@ type Stream struct { recvNotifyCh chan struct{} sendNotifyCh chan struct{} - readDeadline time.Time - writeDeadline time.Time + readDeadline atomic.Value // time.Time + writeDeadline atomic.Value // time.Time } // newStream is used to construct a new stream within @@ -67,6 +67,8 @@ func newStream(session *Session, id uint32, state streamState) *Stream { recvNotifyCh: make(chan struct{}, 1), sendNotifyCh: make(chan struct{}, 1), } + s.readDeadline.Store(time.Time{}) + s.writeDeadline.Store(time.Time{}) return s } @@ -122,8 +124,9 @@ START: WAIT: var timeout <-chan time.Time var timer *time.Timer - if !s.readDeadline.IsZero() { - delay := s.readDeadline.Sub(time.Now()) + readDeadline := s.readDeadline.Load().(time.Time) + if !readDeadline.IsZero() { + delay := readDeadline.Sub(time.Now()) timer = time.NewTimer(delay) timeout = timer.C } @@ -188,7 +191,7 @@ START: // Send the header s.sendHdr.encode(typeData, flags, s.id, max) - if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { return 0, err } @@ -200,8 +203,9 @@ START: WAIT: var timeout <-chan time.Time - if !s.writeDeadline.IsZero() { - delay := s.writeDeadline.Sub(time.Now()) + writeDeadline := s.writeDeadline.Load().(time.Time) + if !writeDeadline.IsZero() { + delay := writeDeadline.Sub(time.Now()) timeout = time.After(delay) } select { @@ -238,18 +242,25 @@ func (s *Stream) sendWindowUpdate() error { // Determine the delta update max := s.session.config.MaxStreamWindowSize - delta := max - atomic.LoadUint32(&s.recvWindow) + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow // Determine the flags if any flags := s.sendFlags() // Check if we can omit the update if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() return nil } // Update our window - atomic.AddUint32(&s.recvWindow, delta) + s.recvWindow += delta + s.recvLock.Unlock() // Send the header s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) @@ -392,16 +403,18 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { if length == 0 { return nil } - if remain := atomic.LoadUint32(&s.recvWindow); length > remain { - s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length) - return ErrRecvWindowExceeded - } // Wrap in a limited reader conn = &io.LimitedReader{R: conn, N: int64(length)} // Copy into buffer s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + if s.recvBuf == nil { // Allocate the receive buffer just-in-time to fit the full data frame. // This way we can read in the whole packet without further allocations. @@ -414,7 +427,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { } // Decrement the receive window - atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) + s.recvWindow -= length s.recvLock.Unlock() // Unblock any readers @@ -435,13 +448,13 @@ func (s *Stream) SetDeadline(t time.Time) error { // SetReadDeadline sets the deadline for future Read calls. func (s *Stream) SetReadDeadline(t time.Time) error { - s.readDeadline = t + s.readDeadline.Store(t) return nil } // SetWriteDeadline sets the deadline for future Write calls func (s *Stream) SetWriteDeadline(t time.Time) error { - s.writeDeadline = t + s.writeDeadline.Store(t) return nil } diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go index 5fe45afc..8a73e924 100644 --- a/vendor/github.com/hashicorp/yamux/util.go +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -1,5 +1,20 @@ package yamux +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + // asyncSendErr is used to try an async send of an error func asyncSendErr(ch chan error, err error) { if ch == nil { diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 9cfa988b..8e26ffee 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -2,7 +2,7 @@ package jmespath import "strconv" -// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// JMESPath is the epresentation of a compiled JMES path query. A JMESPath is // safe for concurrent use by multiple goroutines. type JMESPath struct { ast ASTNode diff --git a/vendor/github.com/joyent/triton-go/CHANGELOG.md b/vendor/github.com/joyent/triton-go/CHANGELOG.md new file mode 100644 index 00000000..35636cdb --- /dev/null +++ b/vendor/github.com/joyent/triton-go/CHANGELOG.md @@ -0,0 +1,111 @@ +## Unreleased + +## 1.6.1 (June 26 2019) + +- compute/networks: support network objects for AddNIC [#169] + +## 1.6.0 (June 24 2019) + +- compute/networks: added support for network objects [#158] +- compute/instances: added instances().get support for deleted instances [#167] +- storage: added support for multipart upload [#160] +- storage: fixed directory list marker filtering [#156] + +## 1.3.1 (April 27 2018) + +- client: Fixing an issue where private Triton installations were marked as invalid DC [#152] + +## 1.3.0 (April 17 2018) + +- identity/roles: Add support for SetRoleTags [#112] +- Add support for Triton Service Groups endpoint [#148] + +## 1.2.0 (March 20 2018) + +- compute/instance: Instance Deletion status now included in the GET instance response [#138] + +## 1.1.1 (March 13 2018) + +- client: Adding the rbac user support to the SSHAgentSigner [BUG!] + +## 1.1.0 (March 13 2018) + +- client: Add support for Manta RBAC http signatures + +## 1.0.0 (February 28 2018) + +- client: Add support for querystring in client/ExecuteRequestRaw [#121] +- client: Introduce SetHeader for overriding API request header [#125] +- compute/instances: Add support for passing a list of tags to filter List instances [#116] +- compute/instances: Add support for getting a count of current instances from the CloudAPI [#119] +- compute/instances: Add ability to support name-prefix [#129] +- compute/instances: Add support for Instance Deletion Protection [#131] +- identity/user: Add support for ChangeUserPassword [#111] +- expose GetTritonEnv as a root level func [#126] + +## 0.9.0 (January 23 2018) + +**Please Note:** This is a precursor release to marking triton-go as 1.0.0. We are going to wait and fix any bugs that occur from this large set of changes that has happened since 0.5.2 + +- Add support for managing volumes in Triton [#100] +- identity/policies: Add support for managing policies in Triton [#86] +- addition of triton-go errors package to expose unwrapping of internal errors +- Migration from hashicorp/errwrap to pkg/errors +- Using path.Join() for URL structures rather than fmt.Sprintf() + +## 0.5.2 (December 28 2017) + +- Standardise the API SSH Signers input casing and naming + +## 0.5.1 (December 28 2017) + +- Include leading '/' when working with SSH Agent signers + +## 0.5.0 (December 28 2017) + +- Add support for RBAC in triton-go [#82] +This is a breaking change. No longer do we pass individual parameters to the SSH Signer funcs, but we now pass an input Struct. This will guard from from additional parameter changes in the future. +We also now add support for using `SDC_*` and `TRITON_*` env vars when working with the Default agent signer + +## 0.4.2 (December 22 2017) + +- Fixing a panic when the user loses network connectivity when making a GET request to instance [#81] + +## 0.4.1 (December 15 2017) + +- Clean up the handling of directory sanitization. Use abs paths everywhere [#79] + +## 0.4.0 (December 15 2017) + +- Fix an issue where Manta HEAD requests do not return an error resp body [#77] +- Add support for recursively creating child directories [#78] + +## 0.3.0 (December 14 2017) + +- Introduce CloudAPI's ListRulesMachines under networking +- Enable HTTP KeepAlives by default in the client. 15s idle timeout, 2x + connections per host, total of 10x connections per client. +- Expose an optional Headers attribute to clients to allow them to customize + HTTP headers when making Object requests. +- Fix a bug in Directory ListIndex [#69](https://github.com/joyent/issues/69) +- Inputs to Object inputs have been relaxed to `io.Reader` (formerly a + `io.ReadSeeker`) [#73](https://github.com/joyent/issues/73). +- Add support for ForceDelete of all children of a directory [#71](https://github.com/joyent/issues/71) +- storage: Introduce `Objects.GetInfo` and `Objects.IsDir` using HEAD requests [#74](https://github.com/joyent/triton-go/issues/74) + +## 0.2.1 (November 8 2017) + +- Fixing a bug where CreateUser and UpdateUser didn't return the UserID + +## 0.2.0 (November 7 2017) + +- Introduce CloudAPI's Ping under compute +- Introduce CloudAPI's RebootMachine under compute instances +- Introduce CloudAPI's ListUsers, GetUser, CreateUser, UpdateUser and DeleteUser under identity package +- Introduce CloudAPI's ListMachineSnapshots, GetMachineSnapshot, CreateSnapshot, DeleteMachineSnapshot and StartMachineFromSnapshot under compute package +- tools: Introduce unit testing and scripts for linting, etc. +- bug: Fix the `compute.ListMachineRules` endpoint + +## 0.1.0 (November 2 2017) + +- Initial release of a versioned SDK diff --git a/vendor/github.com/joyent/triton-go/Gopkg.lock b/vendor/github.com/joyent/triton-go/Gopkg.lock new file mode 100644 index 00000000..2adc4249 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/Gopkg.lock @@ -0,0 +1,308 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + digest = "1:6847d6b1a326b6b135f825de3de8775e93b42ed04b7dfd363db7b8c0bad2a987" + name = "github.com/abdullin/seq" + packages = ["."] + pruneopts = "" + revision = "d5467c17e7afe8d8f08f556c6c811a50c3feb28d" + +[[projects]] + digest = "1:982e2547680f9fd2212c6443ab73ea84eef40ee1cdcecb61d997de838445214c" + name = "github.com/cpuguy83/go-md2man" + packages = ["md2man"] + pruneopts = "" + revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1" + version = "v1.0.8" + +[[projects]] + branch = "master" + digest = "1:ae162f9b5c46f6d5ff4bd53a3d78f72e2eb6676c11c5d33b8b106c36f87ddb31" + name = "github.com/dustin/go-humanize" + packages = ["."] + pruneopts = "" + revision = "bb3d318650d48840a39aa21a027c6630e198e626" + +[[projects]] + digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd" + name = "github.com/fsnotify/fsnotify" + packages = ["."] + pruneopts = "" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" + +[[projects]] + branch = "master" + digest = "1:147d671753effde6d3bcd58fc74c1d67d740196c84c280c762a5417319499972" + name = "github.com/hashicorp/hcl" + packages = [ + ".", + "hcl/ast", + "hcl/parser", + "hcl/printer", + "hcl/scanner", + "hcl/strconv", + "hcl/token", + "json/parser", + "json/scanner", + "json/token", + ] + pruneopts = "" + revision = "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8" + +[[projects]] + digest = "1:23bc0b496ba341c6e3ba24d6358ff4a40a704d9eb5f9a3bd8e8fbd57ad869013" + name = "github.com/imdario/mergo" + packages = ["."] + pruneopts = "" + revision = "163f41321a19dd09362d4c63cc2489db2015f1f4" + version = "0.3.2" + +[[projects]] + digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" + name = "github.com/inconshreveable/mousetrap" + packages = ["."] + pruneopts = "" + revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" + version = "v1.0" + +[[projects]] + digest = "1:2de1791b9e43f26c696e36950e42676565e7da7499a870bc02213da4b59b1d14" + name = "github.com/jackc/pgx" + packages = [ + ".", + "chunkreader", + "internal/sanitize", + "pgio", + "pgproto3", + "pgtype", + ] + pruneopts = "" + revision = "da3231b0b66e2e74cdb779f1d46c5e958ba8be27" + version = "v3.1.0" + +[[projects]] + digest = "1:739b2038a38cebb50e922d18f4b042c042256320fea2db094814aeef8891e0c1" + name = "github.com/magiconair/properties" + packages = ["."] + pruneopts = "" + revision = "d419a98cdbed11a922bf76f257b7c4be79b50e73" + version = "v1.7.4" + +[[projects]] + branch = "master" + digest = "1:3140e04675a6a91d2a20ea9d10bdadf6072085502e6def6768361260aee4b967" + name = "github.com/mattn/go-isatty" + packages = ["."] + pruneopts = "" + revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c" + +[[projects]] + digest = "1:81e673df85e765593a863f67cba4544cf40e8919590f04d67664940786c2b61a" + name = "github.com/mattn/go-runewidth" + packages = ["."] + pruneopts = "" + revision = "9e777a8366cce605130a531d2cd6363d07ad7317" + version = "v0.0.2" + +[[projects]] + branch = "master" + digest = "1:c7089cbc147665e7bf85929fe3d5c1bae8fd08eb4344f917ba988b435aa92ed4" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "" + revision = "a4e142e9c047c904fa2f1e144d9a84e6133024bc" + +[[projects]] + branch = "master" + digest = "1:6ad32168a9cdb3585e45f44b2f914b4cefb0da0c23bac8482ed56d0942090be9" + name = "github.com/olekukonko/tablewriter" + packages = ["."] + pruneopts = "" + revision = "b8a9be070da40449e501c3c4730a889e42d87a9e" + +[[projects]] + digest = "1:d60cfeee185019d4fcd35e8c89c83aff576e4723b6100300bf67b05be961388f" + name = "github.com/pelletier/go-toml" + packages = ["."] + pruneopts = "" + revision = "acdc4509485b587f5e675510c4f2c63e90ff68a8" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:df48fb76fb2a40edea0c9b3d960bc95e326660d82ff1114e1f88001f7a236b40" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "" + revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc" + +[[projects]] + digest = "1:927086a7077c4f8c611d6148bb1ba509391c4ecda281465f64f4c9ec79f841f2" + name = "github.com/rs/zerolog" + packages = [ + ".", + "internal/json", + "log", + ] + pruneopts = "" + revision = "b53826c57a6a1d8833443ebeacf1cfb62b229c64" + version = "v1.4.0" + +[[projects]] + digest = "1:225a0e6d1256727d55bd805e808b41bab40d7bbd91967f82b8b278b892846786" + name = "github.com/russross/blackfriday" + packages = ["."] + pruneopts = "" + revision = "4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c" + version = "v1.5" + +[[projects]] + branch = "master" + digest = "1:810e774994adcfadeb4020a8dfe2ba0249f28016aca19f13852953c9e0560b57" + name = "github.com/sean-/conswriter" + packages = ["."] + pruneopts = "" + revision = "f5ae3917a627f1aab86eedaac41b153e4097e4e8" + +[[projects]] + branch = "master" + digest = "1:c8e042e524b64f3e040c0c50ed4f498a5af4a4c8ff600c1f6b0e49e69addc220" + name = "github.com/sean-/pager" + packages = ["."] + pruneopts = "" + revision = "666be9bf53b5257ca07b4a16227df91e104c0519" + +[[projects]] + branch = "master" + digest = "1:6ee36f2cea425916d81fdaaf983469fc18f91b3cf090cfe90fa0a9d85b8bfab7" + name = "github.com/sean-/seed" + packages = ["."] + pruneopts = "" + revision = "e2103e2c35297fb7e17febb81e49b312087a2372" + +[[projects]] + digest = "1:dae0d7dd55563fd389e7263a32d2030022ef29cceff941336e53f6520e0308c0" + name = "github.com/spf13/afero" + packages = [ + ".", + "mem", + ] + pruneopts = "" + revision = "bb8f1927f2a9d3ab41c9340aa034f6b803f4359c" + version = "v1.0.2" + +[[projects]] + digest = "1:6ff9b74bfea2625f805edec59395dc37e4a06458dd3c14e3372337e3d35a2ed6" + name = "github.com/spf13/cast" + packages = ["."] + pruneopts = "" + revision = "acbeb36b902d72a7a4c18e8f3241075e7ab763e4" + version = "v1.1.0" + +[[projects]] + branch = "master" + digest = "1:986f79b56adb90fa2dc621bea27f75548370833c3a26b9eeff4241bf5928dad1" + name = "github.com/spf13/cobra" + packages = [ + ".", + "doc", + ] + pruneopts = "" + revision = "be77323fc05148ef091e83b3866c0d47c8e74a8b" + +[[projects]] + branch = "master" + digest = "1:104517520aab91164020ab6524a5d6b7cafc641b2e42ac6236f6ac1deac4f66a" + name = "github.com/spf13/jwalterweatherman" + packages = ["."] + pruneopts = "" + revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" + +[[projects]] + digest = "1:261bc565833ef4f02121450d74eb88d5ae4bd74bfe5d0e862cddb8550ec35000" + name = "github.com/spf13/pflag" + packages = ["."] + pruneopts = "" + revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" + version = "v1.0.0" + +[[projects]] + branch = "master" + digest = "1:50a17f77f59a4a904bc5d51ac6cde6c693c3ab0687e49c12bc10f974c7175ef8" + name = "github.com/spf13/viper" + packages = ["."] + pruneopts = "" + revision = "aafc9e6bc7b7bb53ddaa75a5ef49a17d6e654be5" + +[[projects]] + branch = "master" + digest = "1:ff7dea5ad362112e8e360ee0dbfeace7b000e93947ae1f39634e7d41daef15a1" + name = "golang.org/x/crypto" + packages = [ + "curve25519", + "ed25519", + "ed25519/internal/edwards25519", + "ssh", + "ssh/agent", + ] + pruneopts = "" + revision = "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8" + +[[projects]] + branch = "master" + digest = "1:407b5f905024dd94ee08c1777fabb380fb3d380f92a7f7df2592be005337eeb3" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "" + revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" + +[[projects]] + branch = "master" + digest = "1:31985a0ed491dba5ba7fe92e18be008acd92ca9435ed9b35b06f3e6c00fd82cb" + name = "golang.org/x/text" + packages = [ + "internal/gen", + "internal/triegen", + "internal/ucd", + "transform", + "unicode/cldr", + "unicode/norm", + ] + pruneopts = "" + revision = "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1" + +[[projects]] + branch = "v2" + digest = "1:4b4e5848dfe7f316f95f754df071bebfb40cf4482da62e17e7e1aebdf11f4918" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "" + revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/abdullin/seq", + "github.com/dustin/go-humanize", + "github.com/imdario/mergo", + "github.com/jackc/pgx", + "github.com/mattn/go-isatty", + "github.com/olekukonko/tablewriter", + "github.com/pkg/errors", + "github.com/rs/zerolog", + "github.com/rs/zerolog/log", + "github.com/sean-/conswriter", + "github.com/sean-/seed", + "github.com/spf13/cobra", + "github.com/spf13/cobra/doc", + "github.com/spf13/pflag", + "github.com/spf13/viper", + "golang.org/x/crypto/ssh", + "golang.org/x/crypto/ssh/agent", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/joyent/triton-go/Gopkg.toml b/vendor/github.com/joyent/triton-go/Gopkg.toml new file mode 100644 index 00000000..24d72dab --- /dev/null +++ b/vendor/github.com/joyent/triton-go/Gopkg.toml @@ -0,0 +1,74 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "github.com/abdullin/seq" + +[[constraint]] + branch = "master" + name = "github.com/sean-/seed" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" + +[[constraint]] + branch = "master" + name = "github.com/pkg/errors" + +[[constraint]] + branch = "master" + name = "github.com/olekukonko/tablewriter" + +[[constraint]] + name = "github.com/rs/zerolog" + version = "1.4.0" + +[[constraint]] + name = "github.com/spf13/cobra" + branch = "master" + +[[constraint]] + branch = "master" + name = "github.com/spf13/viper" + +[[constraint]] + branch = "master" + name = "github.com/mattn/go-isatty" + +[[constraint]] + branch = "master" + name = "github.com/sean-/conswriter" + +[[constraint]] + branch = "master" + name = "github.com/sean-/pager" + +[[constraint]] + name = "github.com/imdario/mergo" + version = "0.3.2" + +[[constraint]] + name = "github.com/dustin/go-humanize" + branch = "master" diff --git a/vendor/github.com/joyent/triton-go/LICENSE b/vendor/github.com/joyent/triton-go/LICENSE new file mode 100644 index 00000000..14e2f777 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/joyent/triton-go/Makefile b/vendor/github.com/joyent/triton-go/Makefile new file mode 100644 index 00000000..8dae92f3 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/Makefile @@ -0,0 +1,47 @@ +# +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +# + +# +# Copyright 2019 Joyent, Inc. +# + +TEST?=$$(go list ./... |grep -Ev 'vendor|examples|testutils') +GOFMT_FILES?=$$(find . -name '*.go' |grep -v vendor) + +.PHONY: all +all: + +.PHONY: tools +tools: ## Download and install all dev/code tools + @echo "==> Installing dev tools" + go get -u github.com/golang/dep/cmd/dep + +.PHONY: build +build: + @govvv build + +.PHONY: install +install: + @govvv install + +.PHONY: test +test: ## Run unit tests + @echo "==> Running unit test with coverage" + @./scripts/go-test-with-coverage.sh + +.PHONY: testacc +testacc: ## Run acceptance tests + @echo "==> Running acceptance tests" + TRITON_TEST=1 go test $(TEST) -v $(TESTARGS) -run -timeout 120m + +.PHONY: check +check: + scripts/gofmt-check.sh + +.PHONY: help +help: ## Display this help message + @echo "GNU make(1) targets:" + @grep -E '^[a-zA-Z_.-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}' diff --git a/vendor/github.com/joyent/triton-go/README.md b/vendor/github.com/joyent/triton-go/README.md new file mode 100644 index 00000000..d7adfa89 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/README.md @@ -0,0 +1,253 @@ +# triton-go + +`triton-go` is a client SDK for Go applications using Joyent's Triton Compute +and Object Storage (Manta) APIs. + +The Triton Go SDK is used in the following open source projects. + +- [Consul](https://www.consul.io/docs/agent/cloud-auto-join.html#joyent-triton) +- [Packer](http://github.com/hashicorp/packer) +- [Vault](http://github.com/hashicorp/vault) +- [Terraform](http://github.com/hashicorp/terraform) +- [Terraform Triton Provider](https://github.com/terraform-providers/terraform-provider-triton) +- [Docker Machine](https://github.com/joyent/docker-machine-driver-triton) +- [Triton Kubernetes](https://github.com/joyent/triton-kubernetes) +- [HashiCorp go-discover](https://github.com/hashicorp/go-discover) + +## Usage + +Triton uses [HTTP Signature][4] to sign the Date header in each HTTP request +made to the Triton API. Currently, requests can be signed using either a private +key file loaded from disk (using an [`authentication.PrivateKeySigner`][5]), or +using a key stored with the local SSH Agent (using an [`SSHAgentSigner`][6]. + +To construct a Signer, use the `New*` range of methods in the `authentication` +package. In the case of `authentication.NewSSHAgentSigner`, the parameters are +the fingerprint of the key with which to sign, and the account name (normally +stored in the `TRITON_ACCOUNT` environment variable). There is also support for +passing in a username, this will allow you to use an account other than the main +Triton account. For example: + +```go +input := authentication.SSHAgentSignerInput{ + KeyID: "a4:c6:f3:75:80:27:e0:03:a9:98:79:ef:c5:0a:06:11", + AccountName: "AccountName", + Username: "Username", +} +sshKeySigner, err := authentication.NewSSHAgentSigner(input) +if err != nil { + log.Fatalf("NewSSHAgentSigner: %s", err) +} +``` + +An appropriate key fingerprint can be generated using `ssh-keygen`. + +``` +ssh-keygen -Emd5 -lf ~/.ssh/id_rsa.pub | cut -d " " -f 2 | sed 's/MD5://' +``` + +Each top level package, `account`, `compute`, `identity`, `network`, all have +their own separate client. In order to initialize a package client, simply pass +the global `triton.ClientConfig` struct into the client's constructor function. + +```go +config := &triton.ClientConfig{ + TritonURL: os.Getenv("TRITON_URL"), + MantaURL: os.Getenv("MANTA_URL"), + AccountName: accountName, + Username: os.Getenv("TRITON_USER"), + Signers: []authentication.Signer{sshKeySigner}, +} + +c, err := compute.NewClient(config) +if err != nil { + log.Fatalf("compute.NewClient: %s", err) +} +``` + +Constructing `compute.Client` returns an interface which exposes `compute` API +resources. The same goes for all other packages. Reference their unique +documentation for more information. + +The same `triton.ClientConfig` will initialize the Manta `storage` client as +well... + +```go +c, err := storage.NewClient(config) +if err != nil { + log.Fatalf("storage.NewClient: %s", err) +} +``` + +## Error Handling + +If an error is returned by the HTTP API, the `error` returned from the function +will contain an instance of `errors.APIError` in the chain. Error wrapping +is performed using the [pkg/errors][7] library. + +## Acceptance Tests + +_**NOTE:** The tests are not currently well-structured, and depend on many +hard-coded specifics of the Joyent Public Cloud (JPC) which is being shut down +in November 2019. It is likely not possible to run the test suite against a +local Triton installation at this time, though that is definitely the intended +test suite target environment for future development._ + +Acceptance Tests run directly against the Triton API, so you will need either a +local installation of Triton or an account with Joyent's Public Cloud offering +in order to run them. The tests create real resources (and thus cost real +money)! + +In order to run acceptance tests, the following environment variables must be +set: + +- `TRITON_TEST` - must be set to any value in order to indicate desire to create + resources +- `TRITON_URL` - the base endpoint for the Triton API +- `TRITON_ACCOUNT` - the account name for the Triton API +- `TRITON_KEY_ID` - the fingerprint of the SSH key identifying the key + +Additionally, you may set `TRITON_KEY_MATERIAL` to the contents of an unencrypted +private key. If this is set, the PrivateKeySigner (see above) will be used - if +not the SSHAgentSigner will be used. You can also set `TRITON_USER` to run the tests +against an account other than the main Triton account. + +### Example Run + +The verbose output has been removed for brevity here. + +``` +$ HTTP_PROXY=http://localhost:8888 \ + TRITON_TEST=1 \ + TRITON_URL=https://us-sw-1.api.joyent.com \ + TRITON_ACCOUNT=AccountName \ + TRITON_KEY_ID=a4:c6:f3:75:80:27:e0:03:a9:98:79:ef:c5:0a:06:11 \ + go test -v -run "TestAccKey" +=== RUN TestAccKey_Create +--- PASS: TestAccKey_Create (12.46s) +=== RUN TestAccKey_Get +--- PASS: TestAccKey_Get (4.30s) +=== RUN TestAccKey_Delete +--- PASS: TestAccKey_Delete (15.08s) +PASS +ok github.com/joyent/triton-go 31.861s +``` + +## Example API + +There's an `examples/` directory available with sample code setup for many of +the APIs within this library. Most of these can be run using `go run` and +referencing your SSH key file use by your active `triton` CLI profile. + +```sh +$ eval "$(triton env us-sw-1)" +$ TRITON_KEY_FILE=~/.ssh/triton-id_rsa go run examples/compute/instances.go +``` + +The following is a complete example of how to initialize the `compute` package +client and list all instances under an account. More detailed usage of this +library follows. + +```go + + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "time" + + triton "github.com/joyent/triton-go" + "github.com/joyent/triton-go/authentication" + "github.com/joyent/triton-go/compute" +) + +func main() { + keyID := os.Getenv("TRITON_KEY_ID") + accountName := os.Getenv("TRITON_ACCOUNT") + keyMaterial := os.Getenv("TRITON_KEY_MATERIAL") + userName := os.Getenv("TRITON_USER") + + var signer authentication.Signer + var err error + + if keyMaterial == "" { + input := authentication.SSHAgentSignerInput{ + KeyID: keyID, + AccountName: accountName, + Username: userName, + } + signer, err = authentication.NewSSHAgentSigner(input) + if err != nil { + log.Fatalf("Error Creating SSH Agent Signer: {{err}}", err) + } + } else { + var keyBytes []byte + if _, err = os.Stat(keyMaterial); err == nil { + keyBytes, err = ioutil.ReadFile(keyMaterial) + if err != nil { + log.Fatalf("Error reading key material from %s: %s", + keyMaterial, err) + } + block, _ := pem.Decode(keyBytes) + if block == nil { + log.Fatalf( + "Failed to read key material '%s': no key found", keyMaterial) + } + + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + log.Fatalf( + "Failed to read key '%s': password protected keys are\n"+ + "not currently supported. Please decrypt the key prior to use.", keyMaterial) + } + + } else { + keyBytes = []byte(keyMaterial) + } + + input := authentication.PrivateKeySignerInput{ + KeyID: keyID, + PrivateKeyMaterial: keyBytes, + AccountName: accountName, + Username: userName, + } + signer, err = authentication.NewPrivateKeySigner(input) + if err != nil { + log.Fatalf("Error Creating SSH Private Key Signer: {{err}}", err) + } + } + + config := &triton.ClientConfig{ + TritonURL: os.Getenv("TRITON_URL"), + AccountName: accountName, + Username: userName, + Signers: []authentication.Signer{signer}, + } + + c, err := compute.NewClient(config) + if err != nil { + log.Fatalf("compute.NewClient: %s", err) + } + + listInput := &compute.ListInstancesInput{} + instances, err := c.Instances().List(context.Background(), listInput) + if err != nil { + log.Fatalf("compute.Instances.List: %v", err) + } + numInstances := 0 + for _, instance := range instances { + numInstances++ + fmt.Println(fmt.Sprintf("-- Instance: %v", instance.Name)) + } +} + +``` + +[4]: https://github.com/joyent/node-http-signature/blob/master/http_signing.md +[5]: https://godoc.org/github.com/joyent/triton-go/authentication +[6]: https://godoc.org/github.com/joyent/triton-go/authentication +[7]: https://github.com/pkg/errors diff --git a/vendor/github.com/joyent/triton-go/authentication/agent_key_identifier.go b/vendor/github.com/joyent/triton-go/authentication/agent_key_identifier.go new file mode 100644 index 00000000..5c316363 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/agent_key_identifier.go @@ -0,0 +1,25 @@ +package authentication + +import "path" + +type KeyID struct { + UserName string + AccountName string + Fingerprint string + IsManta bool +} + +func (input *KeyID) generate() string { + var keyID string + if input.UserName != "" { + if input.IsManta { + keyID = path.Join("/", input.AccountName, input.UserName, "keys", input.Fingerprint) + } else { + keyID = path.Join("/", input.AccountName, "users", input.UserName, "keys", input.Fingerprint) + } + } else { + keyID = path.Join("/", input.AccountName, "keys", input.Fingerprint) + } + + return keyID +} diff --git a/vendor/github.com/joyent/triton-go/authentication/dummy.go b/vendor/github.com/joyent/triton-go/authentication/dummy.go new file mode 100644 index 00000000..797e0047 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/dummy.go @@ -0,0 +1,80 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +// DON'T USE THIS OUTSIDE TESTING ~ This key was only created to use for +// internal unit testing. It should never be used for acceptance testing either. +// +// This is just a randomly generated key pair. +var Dummy = struct { + Fingerprint string + PrivateKey []byte + PublicKey []byte + Signer Signer +}{ + "9f:d6:65:fc:d6:60:dc:d0:4e:db:2d:75:f7:92:8c:31", + []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIJKAIBAAKCAgEAui9lNjCJahHeFSFC6HXi/CNX588C/L2gJUx65bnNphVC98hW +1wzoRvPXHx5aWnb7lEbpNhP6B0UoCBDTaPgt9hHfD/oNQ+6HT1QpDIGfZmXI91/t +cjGVSBbxN7WaYt/HsPrGjbalwvQPChN53sMVmFkMTEDR5G3zOBOAGrOimlCT80wI +2S5Xg0spd8jjKM5I1swDR0xtuDWnHTR1Ohin+pEQIE6glLTfYq7oQx6nmMXXBNmk ++SaPD1FAyjkF/81im2EHXBygNEwraVrDcAxK2mKlU2XMJiogQKNYWlm3UkbNB6WP +Le12+Ka02rmIVsSqIpc/ZCBraAlCaSWlYCkU+vJ2hH/+ypy5bXNlbaTiWZK+vuI7 +PC87T50yLNeXVuNZAynzDpBCvsjiiHrB/ZFRfVfF6PviV8CV+m7GTzfAwJhVeSbl +rR6nts16K0HTD48v57DU0b0t5VOvC7cWPShs+afdSL3Z8ReL5EWMgU1wfvtycRKe +hiDVGj3Ms2cf83RIANr387G+1LcTQYP7JJuB7Svy5j+R6+HjI0cgu4EMUPdWfCNG +GyrlxwJNtPmUSfasH1xUKpqr7dC+0sN4/gfJw75WTAYrATkPzexoYNaMsGDfhuoh +kYa3Tn2q1g3kqhsX/R0Fd5d8d5qc137qcRCxiZYz9f3bVkXQbhYmO9da3KsCAwEA +AQKCAgAeEAURqOinPddUJhi9nDtYZwSMo3piAORY4W5+pW+1P32esLSE6MqgmkLD +/YytSsT4fjKtzq/yeJIsKztXmasiLmSMGd4Gd/9VKcuu/0cTq5+1gcG/TI5EI6Az +VJlnGacOxo9E1pcRUYMUJ2zoMSvNe6NmtJivf6lkBpIKvbKlpBkfkclj9/2db4d0 +lfVH43cTZ8Gnw4l70v320z+Sb+S/qqil7swy9rmTH5bVL5/0JQ3A9LuUl0tGN+J0 +RJzZXvprCFG958leaGYiDsu7zeBQPtlfC/LYvriSd02O2SmmmVQFxg/GZK9vGsvc +/VQsXnjyOOW9bxaop8YXYELBsiB21ipTHzOwoqHT8wFnjgU9Y/7iZIv7YbZKQsCS +DrwdlZ/Yw90wiif+ldYryIVinWfytt6ERv4Dgezc98+1XPi1Z/WB74/lIaDXFl3M +3ypjtvLYbKew2IkIjeAwjvZJg/QpC/50RrrPtVDgeAI1Ni01ikixUhMYsHJ1kRih +0tqLvLqSPoHmr6luFlaoKdc2eBqb+8U6K/TrXhKtT7BeUFiSbvnVfdbrH9r+AY/2 +zYtG6llzkE5DH8ZR3Qp+dx7QEDtvYhGftWhx9uasd79AN7CuGYnL54YFLKGRrWKN +ylysqfUyOQYiitdWdNCw9PP2vGRx5JAsMMSy+ft18jjTJvNQ0QKCAQEA28M11EE6 +MpnHxfyP00Dl1+3wl2lRyNXZnZ4hgkk1f83EJGpoB2amiMTF8P1qJb7US1fXtf7l +gkJMMk6t6iccexV1/NBh/7tDZHH/v4HPirFTXQFizflaghD8dEADy9DY4BpQYFRe +8zGsv4/4U0txCXkUIfKcENt/FtXv2T9blJT6cDV0yTx9IAyd4Kor7Ly2FIYroSME +uqnOQt5PwB+2qkE+9hdg4xBhFs9sW5dvyBvQvlBfX/xOmMw2ygH6vsaJlNfZ5VPa +EP/wFP/qHyhDlCfbHdL6qF2//wUoM2QM9RgBdZNhcKU7zWuf7Ev199tmlLC5O14J +PkQxUGftMfmWxQKCAQEA2OLKD8dwOzpwGJiPQdBmGpwCamfcCY4nDwqEaCu4vY1R +OJR+rpYdC2hgl5PTXWH7qzJVdT/ZAz2xUQOgB1hD3Ltk7DQ+EZIA8+vJdaicQOme +vfpMPNDxCEX9ee0AXAmAC3aET82B4cMFnjXjl1WXLLTowF/Jp/hMorm6tl2m15A2 +oTyWlB/i/W/cxHl2HFWK7o8uCNoKpKJjheNYn+emEcH1bkwrk8sxQ78cBNmqe/gk +MLgu8qfXQ0LLKIL7wqmIUHeUpkepOod8uXcTmmN2X9saCIwFKx4Jal5hh5v5cy0G +MkyZcUIhhnmzr7lXbepauE5V2Sj5Qp040AfRVjZcrwKCAQANe8OwuzPL6P2F20Ij +zwaLIhEx6QdYkC5i6lHaAY3jwoc3SMQLODQdjh0q9RFvMW8rFD+q7fG89T5hk8w9 +4ppvvthXY52vqBixcAEmCdvnAYxA15XtV1BDTLGAnHDfL3gu/85QqryMpU6ZDkdJ +LQbJcwFWN+F1c1Iv335w0N9YlW9sNQtuUWTH8544K5i4VLfDOJwyrchbf5GlLqir +/AYkGg634KVUKSwbzywxzm/QUkyTcLD5Xayg2V6/NDHjRKEqXbgDxwpJIrrjPvRp +ZvoGfA+Im+o/LElcZz+ZL5lP7GIiiaFf3PN3XhQY1mxIAdEgbFthFhrxFBQGf+ng +uBSVAoIBAHl12K8pg8LHoUtE9MVoziWMxRWOAH4ha+JSg4BLK/SLlbbYAnIHg1CG +LcH1eWNMokJnt9An54KXJBw4qYAzgB23nHdjcncoivwPSg1oVclMjCfcaqGMac+2 +UpPblF32vAyvXL3MWzZxn03Q5Bo2Rqk0zzwc6LP2rARdeyDyJaOHEfEOG03s5ZQE +91/YnbqUdW/QI3m1kkxM3Ot4PIOgmTJMqwQQCD+GhZppBmn49C7k8m+OVkxyjm0O +lPOlFxUXGE3oCgltDGrIwaKj+wh1Ny/LZjLvJ13UPnWhUYE+al6EEnpMx4nT/S5w +LZ71bu8RVajtxcoN1jnmDpECL8vWOeUCggEBAIEuKoY7pVHfs5gr5dXfQeVZEtqy +LnSdsd37/aqQZRlUpVmBrPNl1JBLiEVhk2SL3XJIDU4Er7f0idhtYLY3eE7wqZ4d +38Iaj5tv3zBc/wb1bImPgOgXCH7QrrbW7uTiYMLScuUbMR4uSpfubLaV8Zc9WHT8 +kTJ2pKKtA1GPJ4V7HCIxuTjD2iyOK1CRkaqSC+5VUuq5gHf92CEstv9AIvvy5cWg +gnfBQoS89m3aO035henSfRFKVJkHaEoasj8hB3pwl9FGZUJp1c2JxiKzONqZhyGa +6tcIAM3od0QtAfDJ89tWJ5D31W8KNNysobFSQxZ62WgLUUtXrkN1LGodxGQ= +-----END RSA PRIVATE KEY-----`), + []byte(`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC6L2U2MIlqEd4VIULodeL8I1fnzwL8vaAlTHrluc2mFUL3yFbXDOhG89cfHlpadvuURuk2E/oHRSgIENNo+C32Ed8P+g1D7odPVCkMgZ9mZcj3X+1yMZVIFvE3tZpi38ew+saNtqXC9A8KE3newxWYWQxMQNHkbfM4E4Aas6KaUJPzTAjZLleDSyl3yOMozkjWzANHTG24NacdNHU6GKf6kRAgTqCUtN9iruhDHqeYxdcE2aT5Jo8PUUDKOQX/zWKbYQdcHKA0TCtpWsNwDEraYqVTZcwmKiBAo1haWbdSRs0HpY8t7Xb4prTauYhWxKoilz9kIGtoCUJpJaVgKRT68naEf/7KnLltc2VtpOJZkr6+4js8LztPnTIs15dW41kDKfMOkEK+yOKIesH9kVF9V8Xo++JXwJX6bsZPN8DAmFV5JuWtHqe2zXorQdMPjy/nsNTRvS3lU68LtxY9KGz5p91IvdnxF4vkRYyBTXB++3JxEp6GINUaPcyzZx/zdEgA2vfzsb7UtxNBg/skm4HtK/LmP5Hr4eMjRyC7gQxQ91Z8I0YbKuXHAk20+ZRJ9qwfXFQqmqvt0L7Sw3j+B8nDvlZMBisBOQ/N7Ghg1oywYN+G6iGRhrdOfarWDeSqGxf9HQV3l3x3mpzXfupxELGJljP1/dtWRdBuFiY711rcqw== test-dummy-20171002140848`), + nil, +} + +func init() { + testSigner, _ := NewTestSigner() + Dummy.Signer = testSigner +} diff --git a/vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go b/vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go new file mode 100644 index 00000000..7fb5a5ab --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/ecdsa_signature.go @@ -0,0 +1,74 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +import ( + "encoding/asn1" + "encoding/base64" + "fmt" + "math/big" + + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" +) + +type ecdsaSignature struct { + hashAlgorithm string + R *big.Int + S *big.Int +} + +func (s *ecdsaSignature) SignatureType() string { + return fmt.Sprintf("ecdsa-%s", s.hashAlgorithm) +} + +func (s *ecdsaSignature) String() string { + toEncode := struct { + R *big.Int + S *big.Int + }{ + R: s.R, + S: s.S, + } + + signatureBytes, err := asn1.Marshal(toEncode) + if err != nil { + panic(fmt.Sprintf("Error marshaling signature: %s", err)) + } + + return base64.StdEncoding.EncodeToString(signatureBytes) +} + +func newECDSASignature(signatureBlob []byte) (*ecdsaSignature, error) { + var ecSig struct { + R *big.Int + S *big.Int + } + + if err := ssh.Unmarshal(signatureBlob, &ecSig); err != nil { + return nil, errors.Wrap(err, "unable to unmarshall signature") + } + + rValue := ecSig.R.Bytes() + var hashAlgorithm string + switch len(rValue) { + case 31, 32: + hashAlgorithm = "sha256" + case 65, 66: + hashAlgorithm = "sha512" + default: + return nil, fmt.Errorf("Unsupported key length: %d", len(rValue)) + } + + return &ecdsaSignature{ + hashAlgorithm: hashAlgorithm, + R: ecSig.R, + S: ecSig.S, + }, nil +} diff --git a/vendor/github.com/joyent/triton-go/authentication/private_key_signer.go b/vendor/github.com/joyent/triton-go/authentication/private_key_signer.go new file mode 100644 index 00000000..9b21d631 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/private_key_signer.go @@ -0,0 +1,131 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "strings" + + "github.com/pkg/errors" + "golang.org/x/crypto/ssh" +) + +type PrivateKeySigner struct { + formattedKeyFingerprint string + keyFingerprint string + algorithm string + accountName string + userName string + hashFunc crypto.Hash + + privateKey *rsa.PrivateKey +} + +type PrivateKeySignerInput struct { + KeyID string + PrivateKeyMaterial []byte + AccountName string + Username string +} + +func NewPrivateKeySigner(input PrivateKeySignerInput) (*PrivateKeySigner, error) { + keyFingerprintMD5 := strings.Replace(input.KeyID, ":", "", -1) + + block, _ := pem.Decode(input.PrivateKeyMaterial) + if block == nil { + return nil, errors.New("Error PEM-decoding private key material: nil block received") + } + + rsakey, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, errors.Wrap(err, "unable to parse private key") + } + + sshPublicKey, err := ssh.NewPublicKey(rsakey.Public()) + if err != nil { + return nil, errors.Wrap(err, "unable to parse SSH key from private key") + } + + matchKeyFingerprint := formatPublicKeyFingerprint(sshPublicKey, false) + displayKeyFingerprint := formatPublicKeyFingerprint(sshPublicKey, true) + if matchKeyFingerprint != keyFingerprintMD5 { + return nil, errors.New("Private key file does not match public key fingerprint") + } + + signer := &PrivateKeySigner{ + formattedKeyFingerprint: displayKeyFingerprint, + keyFingerprint: input.KeyID, + accountName: input.AccountName, + + hashFunc: crypto.SHA1, + privateKey: rsakey, + } + + if input.Username != "" { + signer.userName = input.Username + } + + _, algorithm, err := signer.SignRaw("HelloWorld") + if err != nil { + return nil, fmt.Errorf("Cannot sign using ssh agent: %s", err) + } + signer.algorithm = algorithm + + return signer, nil +} + +func (s *PrivateKeySigner) Sign(dateHeader string, isManta bool) (string, error) { + const headerName = "date" + + hash := s.hashFunc.New() + hash.Write([]byte(fmt.Sprintf("%s: %s", headerName, dateHeader))) + digest := hash.Sum(nil) + + signed, err := rsa.SignPKCS1v15(rand.Reader, s.privateKey, s.hashFunc, digest) + if err != nil { + return "", errors.Wrap(err, "unable to sign date header") + } + signedBase64 := base64.StdEncoding.EncodeToString(signed) + + key := &KeyID{ + UserName: s.userName, + AccountName: s.accountName, + Fingerprint: s.formattedKeyFingerprint, + IsManta: isManta, + } + + return fmt.Sprintf(authorizationHeaderFormat, key.generate(), "rsa-sha1", headerName, signedBase64), nil +} + +func (s *PrivateKeySigner) SignRaw(toSign string) (string, string, error) { + hash := s.hashFunc.New() + hash.Write([]byte(toSign)) + digest := hash.Sum(nil) + + signed, err := rsa.SignPKCS1v15(rand.Reader, s.privateKey, s.hashFunc, digest) + if err != nil { + return "", "", errors.Wrap(err, "unable to sign date header") + } + signedBase64 := base64.StdEncoding.EncodeToString(signed) + return signedBase64, "rsa-sha1", nil +} + +func (s *PrivateKeySigner) KeyFingerprint() string { + return s.formattedKeyFingerprint +} + +func (s *PrivateKeySigner) DefaultAlgorithm() string { + return s.algorithm +} diff --git a/vendor/github.com/joyent/triton-go/authentication/rsa_signature.go b/vendor/github.com/joyent/triton-go/authentication/rsa_signature.go new file mode 100644 index 00000000..81b735eb --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/rsa_signature.go @@ -0,0 +1,33 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +import ( + "encoding/base64" +) + +type rsaSignature struct { + hashAlgorithm string + signature []byte +} + +func (s *rsaSignature) SignatureType() string { + return s.hashAlgorithm +} + +func (s *rsaSignature) String() string { + return base64.StdEncoding.EncodeToString(s.signature) +} + +func newRSASignature(signatureBlob []byte) (*rsaSignature, error) { + return &rsaSignature{ + hashAlgorithm: "rsa-sha1", + signature: signatureBlob, + }, nil +} diff --git a/vendor/github.com/joyent/triton-go/authentication/signature.go b/vendor/github.com/joyent/triton-go/authentication/signature.go new file mode 100644 index 00000000..8cc18d96 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/signature.go @@ -0,0 +1,35 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +import ( + "fmt" + "regexp" +) + +type httpAuthSignature interface { + SignatureType() string + String() string +} + +func keyFormatToKeyType(keyFormat string) (string, error) { + if keyFormat == "ssh-rsa" { + return "rsa", nil + } + + if keyFormat == "ssh-ed25519" { + return "ed25519", nil + } + + if regexp.MustCompile("^ecdsa-sha2-*").Match([]byte(keyFormat)) { + return "ecdsa", nil + } + + return "", fmt.Errorf("Unknown key format: %s", keyFormat) +} diff --git a/vendor/github.com/joyent/triton-go/authentication/signer.go b/vendor/github.com/joyent/triton-go/authentication/signer.go new file mode 100644 index 00000000..40b71d08 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/signer.go @@ -0,0 +1,18 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +const authorizationHeaderFormat = `Signature keyId="%s",algorithm="%s",headers="%s",signature="%s"` + +type Signer interface { + DefaultAlgorithm() string + KeyFingerprint() string + Sign(dateHeader string, isManta bool) (string, error) + SignRaw(toSign string) (string, string, error) +} diff --git a/vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go b/vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go new file mode 100644 index 00000000..c2121829 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/ssh_agent_signer.go @@ -0,0 +1,194 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +import ( + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "fmt" + "net" + "os" + "strings" + + pkgerrors "github.com/pkg/errors" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +var ( + ErrUnsetEnvVar = pkgerrors.New("environment variable SSH_AUTH_SOCK not set") +) + +type SSHAgentSigner struct { + formattedKeyFingerprint string + keyFingerprint string + algorithm string + accountName string + userName string + keyIdentifier string + + agent agent.Agent + key ssh.PublicKey +} + +type SSHAgentSignerInput struct { + KeyID string + AccountName string + Username string +} + +func NewSSHAgentSigner(input SSHAgentSignerInput) (*SSHAgentSigner, error) { + sshAgentAddress, agentOk := os.LookupEnv("SSH_AUTH_SOCK") + if !agentOk { + return nil, ErrUnsetEnvVar + } + + conn, err := net.Dial("unix", sshAgentAddress) + if err != nil { + return nil, pkgerrors.Wrap(err, "unable to dial SSH agent") + } + + ag := agent.NewClient(conn) + + signer := &SSHAgentSigner{ + keyFingerprint: input.KeyID, + accountName: input.AccountName, + agent: ag, + } + + if input.Username != "" { + signer.userName = input.Username + } + + matchingKey, err := signer.MatchKey() + if err != nil { + return nil, err + } + signer.key = matchingKey + signer.formattedKeyFingerprint = formatPublicKeyFingerprint(signer.key, true) + + _, algorithm, err := signer.SignRaw("HelloWorld") + if err != nil { + return nil, fmt.Errorf("Cannot sign using ssh agent: %s", err) + } + signer.algorithm = algorithm + + return signer, nil +} + +func (s *SSHAgentSigner) MatchKey() (ssh.PublicKey, error) { + keys, err := s.agent.List() + if err != nil { + return nil, pkgerrors.Wrap(err, "unable to list keys in SSH Agent") + } + + keyFingerprintStripped := strings.TrimPrefix(s.keyFingerprint, "MD5:") + keyFingerprintStripped = strings.TrimPrefix(keyFingerprintStripped, "SHA256:") + keyFingerprintStripped = strings.Replace(keyFingerprintStripped, ":", "", -1) + + var matchingKey ssh.PublicKey + for _, key := range keys { + keyMD5 := md5.New() + keyMD5.Write(key.Marshal()) + finalizedMD5 := fmt.Sprintf("%x", keyMD5.Sum(nil)) + + keySHA256 := sha256.New() + keySHA256.Write(key.Marshal()) + finalizedSHA256 := base64.RawStdEncoding.EncodeToString(keySHA256.Sum(nil)) + + if keyFingerprintStripped == finalizedMD5 || keyFingerprintStripped == finalizedSHA256 { + matchingKey = key + } + } + + if matchingKey == nil { + return nil, fmt.Errorf("No key in the SSH Agent matches fingerprint: %s", s.keyFingerprint) + } + + return matchingKey, nil +} + +func (s *SSHAgentSigner) Sign(dateHeader string, isManta bool) (string, error) { + const headerName = "date" + + signature, err := s.agent.Sign(s.key, []byte(fmt.Sprintf("%s: %s", headerName, dateHeader))) + if err != nil { + return "", pkgerrors.Wrap(err, "unable to sign date header") + } + + keyFormat, err := keyFormatToKeyType(signature.Format) + if err != nil { + return "", pkgerrors.Wrap(err, "unable to format signature") + } + + key := &KeyID{ + UserName: s.userName, + AccountName: s.accountName, + Fingerprint: s.formattedKeyFingerprint, + IsManta: isManta, + } + + var authSignature httpAuthSignature + switch keyFormat { + case "rsa": + authSignature, err = newRSASignature(signature.Blob) + if err != nil { + return "", pkgerrors.Wrap(err, "unable to read RSA signature") + } + case "ecdsa": + authSignature, err = newECDSASignature(signature.Blob) + if err != nil { + return "", pkgerrors.Wrap(err, "unable to read ECDSA signature") + } + default: + return "", fmt.Errorf("Unsupported algorithm from SSH agent: %s", signature.Format) + } + + return fmt.Sprintf(authorizationHeaderFormat, key.generate(), + authSignature.SignatureType(), headerName, authSignature.String()), nil +} + +func (s *SSHAgentSigner) SignRaw(toSign string) (string, string, error) { + signature, err := s.agent.Sign(s.key, []byte(toSign)) + if err != nil { + return "", "", pkgerrors.Wrap(err, "unable to sign string") + } + + keyFormat, err := keyFormatToKeyType(signature.Format) + if err != nil { + return "", "", pkgerrors.Wrap(err, "unable to format key") + } + + var authSignature httpAuthSignature + switch keyFormat { + case "rsa": + authSignature, err = newRSASignature(signature.Blob) + if err != nil { + return "", "", pkgerrors.Wrap(err, "unable to read RSA signature") + } + case "ecdsa": + authSignature, err = newECDSASignature(signature.Blob) + if err != nil { + return "", "", pkgerrors.Wrap(err, "unable to read ECDSA signature") + } + default: + return "", "", fmt.Errorf("Unsupported algorithm from SSH agent: %s", signature.Format) + } + + return authSignature.String(), authSignature.SignatureType(), nil +} + +func (s *SSHAgentSigner) KeyFingerprint() string { + return s.formattedKeyFingerprint +} + +func (s *SSHAgentSigner) DefaultAlgorithm() string { + return s.algorithm +} diff --git a/vendor/github.com/joyent/triton-go/authentication/test_signer.go b/vendor/github.com/joyent/triton-go/authentication/test_signer.go new file mode 100644 index 00000000..b34f28bf --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/test_signer.go @@ -0,0 +1,35 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +// TestSigner represents an authentication key signer which we can use for +// testing purposes only. This will largely be a stub to send through client +// unit tests. +type TestSigner struct{} + +// NewTestSigner constructs a new instance of test signer +func NewTestSigner() (Signer, error) { + return &TestSigner{}, nil +} + +func (s *TestSigner) DefaultAlgorithm() string { + return "" +} + +func (s *TestSigner) KeyFingerprint() string { + return "" +} + +func (s *TestSigner) Sign(dateHeader string, isManta bool) (string, error) { + return "", nil +} + +func (s *TestSigner) SignRaw(toSign string) (string, string, error) { + return "", "", nil +} diff --git a/vendor/github.com/joyent/triton-go/authentication/util.go b/vendor/github.com/joyent/triton-go/authentication/util.go new file mode 100644 index 00000000..95918439 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/authentication/util.go @@ -0,0 +1,37 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package authentication + +import ( + "crypto/md5" + "fmt" + "strings" + + "golang.org/x/crypto/ssh" +) + +// formatPublicKeyFingerprint produces the MD5 fingerprint of the given SSH +// public key. If display is true, the fingerprint is formatted with colons +// between each byte, as per the output of OpenSSL. +func formatPublicKeyFingerprint(key ssh.PublicKey, display bool) string { + publicKeyFingerprint := md5.New() + publicKeyFingerprint.Write(key.Marshal()) + publicKeyFingerprintString := fmt.Sprintf("%x", publicKeyFingerprint.Sum(nil)) + + if !display { + return publicKeyFingerprintString + } + + formatted := "" + for i := 0; i < len(publicKeyFingerprintString); i = i + 2 { + formatted = fmt.Sprintf("%s%s:", formatted, publicKeyFingerprintString[i:i+2]) + } + + return strings.TrimSuffix(formatted, ":") +} diff --git a/vendor/github.com/joyent/triton-go/client/client.go b/vendor/github.com/joyent/triton-go/client/client.go new file mode 100644 index 00000000..38ffb98e --- /dev/null +++ b/vendor/github.com/joyent/triton-go/client/client.go @@ -0,0 +1,609 @@ +// +// Copyright 2019 Joyent, Inc. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package client + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "net/url" + "regexp" + "strings" + "time" + + triton "github.com/joyent/triton-go" + "github.com/joyent/triton-go/authentication" + "github.com/joyent/triton-go/errors" + pkgerrors "github.com/pkg/errors" +) + +var ( + ErrDefaultAuth = pkgerrors.New("default SSH agent authentication requires SDC_KEY_ID / TRITON_KEY_ID and SSH_AUTH_SOCK") + ErrAccountName = pkgerrors.New("missing account name") + ErrMissingURL = pkgerrors.New("missing API URL") + + InvalidTritonURL = "invalid format of Triton URL" + InvalidMantaURL = "invalid format of Manta URL" + InvalidServicesURL = "invalid format of Triton Service Groups URL" + InvalidDCInURL = "invalid data center in URL" + + knownDCFormats = []string{ + `https?://(.*).api.joyent.com`, + `https?://(.*).api.joyentcloud.com`, + `https?://(.*).api.samsungcloud.io`, + } + + jpcFormatURL = "https://tsg.%s.svc.joyent.zone" + spcFormatURL = "https://tsg.%s.svc.samsungcloud.zone" +) + +// Client represents a connection to the Triton Compute or Object Storage APIs. +type Client struct { + HTTPClient *http.Client + RequestHeader *http.Header + Authorizers []authentication.Signer + TritonURL url.URL + MantaURL url.URL + ServicesURL url.URL + AccountName string + Username string +} + +func isPrivateInstall(url string) bool { + for _, pattern := range knownDCFormats { + re := regexp.MustCompile(pattern) + matches := re.FindStringSubmatch(url) + if len(matches) > 1 { + return false + } + } + + return true +} + +// parseDC parses out the data center commonly found in Triton URLs. Returns an +// error if the Triton URL does not include a known data center name, in which +// case a URL override (TRITON_TSG_URL) must be provided. +func parseDC(url string) (string, bool, error) { + isSamsung := false + if strings.Contains(url, "samsung") { + isSamsung = true + } + + for _, pattern := range knownDCFormats { + re := regexp.MustCompile(pattern) + matches := re.FindStringSubmatch(url) + if len(matches) > 1 { + return matches[1], isSamsung, nil + } + } + + return "", isSamsung, fmt.Errorf("failed to parse datacenter from '%s'", url) +} + +// New is used to construct a Client in order to make API +// requests to the Triton API. +// +// At least one signer must be provided - example signers include +// authentication.PrivateKeySigner and authentication.SSHAgentSigner. +func New(tritonURL string, mantaURL string, accountName string, signers ...authentication.Signer) (*Client, error) { + if accountName == "" { + return nil, ErrAccountName + } + + if tritonURL == "" && mantaURL == "" { + return nil, ErrMissingURL + } + + cloudURL, err := url.Parse(tritonURL) + if err != nil { + return nil, pkgerrors.Wrapf(err, InvalidTritonURL) + } + + storageURL, err := url.Parse(mantaURL) + if err != nil { + return nil, pkgerrors.Wrapf(err, InvalidMantaURL) + } + + // Generate the Services URL (TSG) based on the current datacenter used in + // the Triton URL (if TritonURL is available). If TRITON_TSG_URL environment + // variable is available than override using that value instead. + tsgURL := triton.GetEnv("TSG_URL") + if tsgURL == "" && tritonURL != "" && !isPrivateInstall(tritonURL) { + currentDC, isSamsung, err := parseDC(tritonURL) + if err != nil { + return nil, pkgerrors.Wrapf(err, InvalidDCInURL) + } + + tsgURL = fmt.Sprintf(jpcFormatURL, currentDC) + if isSamsung { + tsgURL = fmt.Sprintf(spcFormatURL, currentDC) + } + } + + servicesURL, err := url.Parse(tsgURL) + if err != nil { + return nil, pkgerrors.Wrapf(err, InvalidServicesURL) + } + + authorizers := make([]authentication.Signer, 0) + for _, key := range signers { + if key != nil { + authorizers = append(authorizers, key) + } + } + + newClient := &Client{ + HTTPClient: &http.Client{ + Transport: httpTransport(false), + CheckRedirect: doNotFollowRedirects, + }, + Authorizers: authorizers, + TritonURL: *cloudURL, + MantaURL: *storageURL, + ServicesURL: *servicesURL, + AccountName: accountName, + } + + // Default to constructing an SSHAgentSigner if there are no other signers + // passed into NewClient and there's an TRITON_KEY_ID and SSH_AUTH_SOCK + // available in the user's environ(7). + if len(newClient.Authorizers) == 0 { + if err := newClient.DefaultAuth(); err != nil { + return nil, err + } + } + + return newClient, nil +} + +// initDefaultAuth provides a default key signer for a client. This should only +// be used internally if the client has no other key signer for authenticating +// with Triton. We first look for both `SDC_KEY_ID` and `SSH_AUTH_SOCK` in the +// user's environ(7). If so we default to the SSH agent key signer. +func (c *Client) DefaultAuth() error { + tritonKeyId := triton.GetEnv("KEY_ID") + if tritonKeyId != "" { + input := authentication.SSHAgentSignerInput{ + KeyID: tritonKeyId, + AccountName: c.AccountName, + Username: c.Username, + } + defaultSigner, err := authentication.NewSSHAgentSigner(input) + if err != nil { + return pkgerrors.Wrapf(err, "unable to initialize NewSSHAgentSigner") + } + c.Authorizers = append(c.Authorizers, defaultSigner) + } + + return ErrDefaultAuth +} + +// InsecureSkipTLSVerify turns off TLS verification for the client connection. This +// allows connection to an endpoint with a certificate which was signed by a non- +// trusted CA, such as self-signed certificates. This can be useful when connecting +// to temporary Triton installations such as Triton Cloud-On-A-Laptop. +func (c *Client) InsecureSkipTLSVerify() { + if c.HTTPClient == nil { + return + } + + c.HTTPClient.Transport = httpTransport(true) +} + +// httpTransport is responsible for setting up our HTTP client's transport +// settings +func httpTransport(insecureSkipTLSVerify bool) *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + MaxIdleConns: 10, + IdleConnTimeout: 15 * time.Second, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: insecureSkipTLSVerify, + }, + } +} + +func doNotFollowRedirects(*http.Request, []*http.Request) error { + return http.ErrUseLastResponse +} + +// DecodeError decodes a backend Triton error into a more usable Go error type +func (c *Client) DecodeError(resp *http.Response, requestMethod string, consumeBody bool) error { + err := &errors.APIError{ + StatusCode: resp.StatusCode, + } + + if requestMethod != http.MethodHead && resp.Body != nil && consumeBody { + errorDecoder := json.NewDecoder(resp.Body) + if err := errorDecoder.Decode(err); err != nil { + return pkgerrors.Wrapf(err, "unable to decode error response") + } + } + + if err.Message == "" { + err.Message = fmt.Sprintf("HTTP response returned status code %d", err.StatusCode) + } + + return err +} + +// overrideHeader overrides the header of the passed in HTTP request +func (c *Client) overrideHeader(req *http.Request) { + if c.RequestHeader != nil { + for k := range *c.RequestHeader { + req.Header.Set(k, c.RequestHeader.Get(k)) + } + } +} + +// resetHeader will reset the struct field that stores custom header +// information +func (c *Client) resetHeader() { + c.RequestHeader = nil +} + +// ----------------------------------------------------------------------------- + +type RequestInput struct { + Method string + Path string + Query *url.Values + Headers *http.Header + Body interface{} + + // If the response has the HTTP status code 410 (i.e., "Gone"), should we preserve the contents of the body for the caller? + PreserveGone bool +} + +func (c *Client) ExecuteRequestURIParams(ctx context.Context, inputs RequestInput) (io.ReadCloser, error) { + defer c.resetHeader() + + method := inputs.Method + path := inputs.Path + body := inputs.Body + query := inputs.Query + + var requestBody io.Reader + if body != nil { + marshaled, err := json.MarshalIndent(body, "", " ") + if err != nil { + return nil, err + } + requestBody = bytes.NewReader(marshaled) + } + + endpoint := c.TritonURL + endpoint.Path = path + if query != nil { + endpoint.RawQuery = query.Encode() + } + + req, err := http.NewRequest(method, endpoint.String(), requestBody) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to construct HTTP request") + } + + dateHeader := time.Now().UTC().Format(time.RFC1123) + req.Header.Set("date", dateHeader) + + // NewClient ensures there's always an authorizer (unless this is called + // outside that constructor). + authHeader, err := c.Authorizers[0].Sign(dateHeader, false) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to sign HTTP request") + } + req.Header.Set("Authorization", authHeader) + req.Header.Set("Accept", "application/json") + req.Header.Set("Accept-Version", triton.CloudAPIMajorVersion) + req.Header.Set("User-Agent", triton.UserAgent()) + + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + c.overrideHeader(req) + + resp, err := c.HTTPClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to execute HTTP request") + } + + // We will only return a response from the API it is in the HTTP StatusCode + // 2xx range + // StatusMultipleChoices is StatusCode 300 + if resp.StatusCode >= http.StatusOK && + resp.StatusCode < http.StatusMultipleChoices { + return resp.Body, nil + } + + return nil, c.DecodeError(resp, req.Method, true) +} + +func (c *Client) ExecuteRequest(ctx context.Context, inputs RequestInput) (io.ReadCloser, error) { + return c.ExecuteRequestURIParams(ctx, inputs) +} + +func (c *Client) ExecuteRequestRaw(ctx context.Context, inputs RequestInput) (*http.Response, error) { + defer c.resetHeader() + + method := inputs.Method + path := inputs.Path + body := inputs.Body + query := inputs.Query + + var requestBody io.Reader + if body != nil { + marshaled, err := json.MarshalIndent(body, "", " ") + if err != nil { + return nil, err + } + requestBody = bytes.NewReader(marshaled) + } + + endpoint := c.TritonURL + endpoint.Path = path + if query != nil { + endpoint.RawQuery = query.Encode() + } + + req, err := http.NewRequest(method, endpoint.String(), requestBody) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to construct HTTP request") + } + + dateHeader := time.Now().UTC().Format(time.RFC1123) + req.Header.Set("date", dateHeader) + + // NewClient ensures there's always an authorizer (unless this is called + // outside that constructor). + authHeader, err := c.Authorizers[0].Sign(dateHeader, false) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to sign HTTP request") + } + req.Header.Set("Authorization", authHeader) + req.Header.Set("Accept", "application/json") + req.Header.Set("Accept-Version", triton.CloudAPIMajorVersion) + req.Header.Set("User-Agent", triton.UserAgent()) + + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + c.overrideHeader(req) + + resp, err := c.HTTPClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to execute HTTP request") + } + + // We will only return a response from the API it is in the HTTP StatusCode + // 2xx range + // StatusMultipleChoices is StatusCode 300 + if resp.StatusCode >= http.StatusOK && + resp.StatusCode < http.StatusMultipleChoices { + return resp, nil + } + + // GetMachine returns a HTTP 410 response for deleted instances, but the body of the response is still a valid machine object with a State value of "deleted". Return the object to the caller as well as an error. + if inputs.PreserveGone && resp.StatusCode == http.StatusGone { + // Do not consume the response body. + return resp, c.DecodeError(resp, req.Method, false) + } + + return nil, c.DecodeError(resp, req.Method, true) +} + +func (c *Client) ExecuteRequestStorage(ctx context.Context, inputs RequestInput) (io.ReadCloser, http.Header, error) { + defer c.resetHeader() + + method := inputs.Method + path := inputs.Path + query := inputs.Query + headers := inputs.Headers + body := inputs.Body + + endpoint := c.MantaURL + endpoint.Path = path + + var requestBody io.Reader + if body != nil { + marshaled, err := json.MarshalIndent(body, "", " ") + if err != nil { + return nil, nil, err + } + requestBody = bytes.NewReader(marshaled) + } + + req, err := http.NewRequest(method, endpoint.String(), requestBody) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "unable to construct HTTP request") + } + + if body != nil && (headers == nil || headers.Get("Content-Type") == "") { + req.Header.Set("Content-Type", "application/json") + } + if headers != nil { + for key, values := range *headers { + for _, value := range values { + req.Header.Set(key, value) + } + } + } + + dateHeader := time.Now().UTC().Format(time.RFC1123) + req.Header.Set("date", dateHeader) + + authHeader, err := c.Authorizers[0].Sign(dateHeader, true) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "unable to sign HTTP request") + } + req.Header.Set("Authorization", authHeader) + req.Header.Set("Accept", "*/*") + req.Header.Set("User-Agent", triton.UserAgent()) + + if query != nil { + req.URL.RawQuery = query.Encode() + } + + c.overrideHeader(req) + + resp, err := c.HTTPClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "unable to execute HTTP request") + } + + // We will only return a response from the API it is in the HTTP StatusCode + // 2xx range + // StatusMultipleChoices is StatusCode 300 + if resp.StatusCode >= http.StatusOK && + resp.StatusCode < http.StatusMultipleChoices { + return resp.Body, resp.Header, nil + } + + return nil, nil, c.DecodeError(resp, req.Method, true) +} + +type RequestNoEncodeInput struct { + Method string + Path string + Query *url.Values + Headers *http.Header + Body io.Reader +} + +func (c *Client) ExecuteRequestNoEncode(ctx context.Context, inputs RequestNoEncodeInput) (io.ReadCloser, http.Header, error) { + defer c.resetHeader() + + method := inputs.Method + path := inputs.Path + query := inputs.Query + headers := inputs.Headers + body := inputs.Body + + endpoint := c.MantaURL + endpoint.Path = path + + req, err := http.NewRequest(method, endpoint.String(), body) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "unable to construct HTTP request") + } + + if headers != nil { + for key, values := range *headers { + for _, value := range values { + req.Header.Set(key, value) + } + } + } + + dateHeader := time.Now().UTC().Format(time.RFC1123) + req.Header.Set("date", dateHeader) + + authHeader, err := c.Authorizers[0].Sign(dateHeader, true) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "unable to sign HTTP request") + } + req.Header.Set("Authorization", authHeader) + req.Header.Set("Accept", "*/*") + req.Header.Set("Accept-Version", triton.CloudAPIMajorVersion) + req.Header.Set("User-Agent", triton.UserAgent()) + + if query != nil { + req.URL.RawQuery = query.Encode() + } + + c.overrideHeader(req) + + resp, err := c.HTTPClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, nil, pkgerrors.Wrapf(err, "unable to execute HTTP request") + } + + // We will only return a response from the API it is in the HTTP StatusCode + // 2xx range + // StatusMultipleChoices is StatusCode 300 + if resp.StatusCode >= http.StatusOK && + resp.StatusCode < http.StatusMultipleChoices { + return resp.Body, resp.Header, nil + } + + return nil, nil, c.DecodeError(resp, req.Method, true) +} + +func (c *Client) ExecuteRequestTSG(ctx context.Context, inputs RequestInput) (io.ReadCloser, error) { + defer c.resetHeader() + + method := inputs.Method + path := inputs.Path + body := inputs.Body + query := inputs.Query + + var requestBody io.Reader + if body != nil { + marshaled, err := json.MarshalIndent(body, "", " ") + if err != nil { + return nil, err + } + requestBody = bytes.NewReader(marshaled) + } + + endpoint := c.ServicesURL + endpoint.Path = path + if query != nil { + endpoint.RawQuery = query.Encode() + } + + req, err := http.NewRequest(method, endpoint.String(), requestBody) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to construct HTTP request") + } + + dateHeader := time.Now().UTC().Format(time.RFC1123) + req.Header.Set("date", dateHeader) + + // NewClient ensures there's always an authorizer (unless this is called + // outside that constructor). + authHeader, err := c.Authorizers[0].Sign(dateHeader, false) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to sign HTTP request") + } + req.Header.Set("Authorization", authHeader) + req.Header.Set("Accept", "application/json") + req.Header.Set("Accept-Version", triton.CloudAPIMajorVersion) + req.Header.Set("User-Agent", triton.UserAgent()) + + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + c.overrideHeader(req) + + resp, err := c.HTTPClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, pkgerrors.Wrapf(err, "unable to execute HTTP request") + } + + if resp.StatusCode >= http.StatusOK && + resp.StatusCode < http.StatusMultipleChoices { + return resp.Body, nil + } + + return nil, fmt.Errorf("could not process backend TSG request") +} diff --git a/vendor/github.com/joyent/triton-go/errors/errors.go b/vendor/github.com/joyent/triton-go/errors/errors.go new file mode 100644 index 00000000..8f271973 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/errors/errors.go @@ -0,0 +1,297 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package errors + +import ( + "fmt" + "net/http" + "strings" + + "github.com/pkg/errors" +) + +// APIError represents an error code and message along with +// the status code of the HTTP request which resulted in the error +// message. Error codes used by the Triton API are listed at +// https://apidocs.joyent.com/cloudapi/#cloudapi-http-responses +// Error codes used by the Manta API are listed at +// https://apidocs.joyent.com/manta/api.html#errors +type APIError struct { + StatusCode int + Code string `json:"code"` + Message string `json:"message"` +} + +// Error implements interface Error on the APIError type. +func (e APIError) Error() string { + return strings.Trim(fmt.Sprintf("%+q", e.Code), `"`) + ": " + strings.Trim(fmt.Sprintf("%+q", e.Message), `"`) +} + +// ClientError represents an error code and message returned +// when connecting to the triton-go client +type ClientError struct { + StatusCode int + Code string `json:"code"` + Message string `json:"message"` +} + +// Error implements interface Error on the ClientError type. +func (e ClientError) Error() string { + return strings.Trim(fmt.Sprintf("%+q", e.Code), `"`) + ": " + strings.Trim(fmt.Sprintf("%+q", e.Message), `"`) +} + +func IsAuthSchemeError(err error) bool { + return IsSpecificError(err, "AuthScheme") +} + +func IsAuthorizationError(err error) bool { + return IsSpecificError(err, "Authorization") +} + +func IsBadRequestError(err error) bool { + return IsSpecificError(err, "BadRequest") +} + +func IsChecksumError(err error) bool { + return IsSpecificError(err, "Checksum") +} + +func IsConcurrentRequestError(err error) bool { + return IsSpecificError(err, "ConcurrentRequest") +} + +func IsContentLengthError(err error) bool { + return IsSpecificError(err, "ContentLength") +} + +func IsContentMD5MismatchError(err error) bool { + return IsSpecificError(err, "ContentMD5Mismatch") +} + +func IsEntityExistsError(err error) bool { + return IsSpecificError(err, "EntityExists") +} + +func IsInvalidArgumentError(err error) bool { + return IsSpecificError(err, "InvalidArgument") +} + +func IsInvalidAuthTokenError(err error) bool { + return IsSpecificError(err, "InvalidAuthToken") +} + +func IsInvalidCredentialsError(err error) bool { + return IsSpecificError(err, "InvalidCredentials") +} + +func IsInvalidDurabilityLevelError(err error) bool { + return IsSpecificError(err, "InvalidDurabilityLevel") +} + +func IsInvalidKeyIdError(err error) bool { + return IsSpecificError(err, "InvalidKeyId") +} + +func IsInvalidJobError(err error) bool { + return IsSpecificError(err, "InvalidJob") +} + +func IsInvalidLinkError(err error) bool { + return IsSpecificError(err, "InvalidLink") +} + +func IsInvalidLimitError(err error) bool { + return IsSpecificError(err, "InvalidLimit") +} + +func IsInvalidSignatureError(err error) bool { + return IsSpecificError(err, "InvalidSignature") +} + +func IsInvalidUpdateError(err error) bool { + return IsSpecificError(err, "InvalidUpdate") +} + +func IsDirectoryDoesNotExistError(err error) bool { + return IsSpecificError(err, "DirectoryDoesNotExist") +} + +func IsDirectoryExistsError(err error) bool { + return IsSpecificError(err, "DirectoryExists") +} + +func IsDirectoryNotEmptyError(err error) bool { + return IsSpecificError(err, "DirectoryNotEmpty") +} + +func IsDirectoryOperationError(err error) bool { + return IsSpecificError(err, "DirectoryOperation") +} + +func IsInternalError(err error) bool { + return IsSpecificError(err, "Internal") +} + +func IsJobNotFoundError(err error) bool { + return IsSpecificError(err, "JobNotFound") +} + +func IsJobStateError(err error) bool { + return IsSpecificError(err, "JobState") +} + +func IsKeyDoesNotExistError(err error) bool { + return IsSpecificError(err, "KeyDoesNotExist") +} + +func IsNotAcceptableError(err error) bool { + return IsSpecificError(err, "NotAcceptable") +} + +func IsNotEnoughSpaceError(err error) bool { + return IsSpecificError(err, "NotEnoughSpace") +} + +func IsLinkNotFoundError(err error) bool { + return IsSpecificError(err, "LinkNotFound") +} + +func IsLinkNotObjectError(err error) bool { + return IsSpecificError(err, "LinkNotObject") +} + +func IsLinkRequiredError(err error) bool { + return IsSpecificError(err, "LinkRequired") +} + +func IsParentNotDirectoryError(err error) bool { + return IsSpecificError(err, "ParentNotDirectory") +} + +func IsPreconditionFailedError(err error) bool { + return IsSpecificError(err, "PreconditionFailed") +} + +func IsPreSignedRequestError(err error) bool { + return IsSpecificError(err, "PreSignedRequest") +} + +func IsRequestEntityTooLargeError(err error) bool { + return IsSpecificError(err, "RequestEntityTooLarge") +} + +func IsResourceNotFoundError(err error) bool { + return IsSpecificError(err, "ResourceNotFound") +} + +func IsRootDirectoryError(err error) bool { + return IsSpecificError(err, "RootDirectory") +} + +func IsServiceUnavailableError(err error) bool { + return IsSpecificError(err, "ServiceUnavailable") +} + +func IsSSLRequiredError(err error) bool { + return IsSpecificError(err, "SSLRequired") +} + +func IsUploadTimeoutError(err error) bool { + return IsSpecificError(err, "UploadTimeout") +} + +func IsUserDoesNotExistError(err error) bool { + return IsSpecificError(err, "UserDoesNotExist") +} + +func IsBadRequest(err error) bool { + return IsSpecificError(err, "BadRequest") +} + +func IsInUseError(err error) bool { + return IsSpecificError(err, "InUseError") +} + +func IsInvalidArgument(err error) bool { + return IsSpecificError(err, "InvalidArgument") +} + +func IsInvalidCredentials(err error) bool { + return IsSpecificError(err, "InvalidCredentials") +} + +func IsInvalidHeader(err error) bool { + return IsSpecificError(err, "InvalidHeader") +} + +func IsInvalidVersion(err error) bool { + return IsSpecificError(err, "InvalidVersion") +} + +func IsMissingParameter(err error) bool { + return IsSpecificError(err, "MissingParameter") +} + +func IsNotAuthorized(err error) bool { + return IsSpecificError(err, "NotAuthorized") +} + +func IsRequestThrottled(err error) bool { + return IsSpecificError(err, "RequestThrottled") +} + +func IsRequestTooLarge(err error) bool { + return IsSpecificError(err, "RequestTooLarge") +} + +func IsRequestMoved(err error) bool { + return IsSpecificError(err, "RequestMoved") +} + +func IsResourceFound(err error) bool { + return IsSpecificError(err, "ResourceFound") +} + +func IsResourceNotFound(err error) bool { + return IsSpecificError(err, "ResourceNotFound") +} + +func IsUnknownError(err error) bool { + return IsSpecificError(err, "UnknownError") +} + +func IsEmptyResponse(err error) bool { + return IsSpecificError(err, "EmptyResponse") +} + +func IsStatusNotFoundCode(err error) bool { + return IsSpecificStatusCode(err, http.StatusNotFound) +} + +func IsSpecificError(myError error, errorCode string) bool { + switch err := errors.Cause(myError).(type) { + case *APIError: + if err.Code == errorCode { + return true + } + } + + return false +} + +func IsSpecificStatusCode(myError error, statusCode int) bool { + switch err := errors.Cause(myError).(type) { + case *APIError: + if err.StatusCode == statusCode { + return true + } + } + + return false +} diff --git a/vendor/github.com/joyent/triton-go/go.mod b/vendor/github.com/joyent/triton-go/go.mod new file mode 100644 index 00000000..984a08c0 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/go.mod @@ -0,0 +1,175 @@ +module github.com/joyent/triton-go + +require ( + collectd.org v0.3.0 // indirect + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 // indirect + github.com/DataDog/datadog-go v2.2.0+incompatible // indirect + github.com/VividCortex/gohistogram v1.0.0 // indirect + github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af + github.com/aclements/go-gg v0.0.0-20170323211221-abd1f791f5ee // indirect + github.com/aclements/go-moremath v0.0.0-20190506201756-286cc0be6f75 // indirect + github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5 // indirect + github.com/ajstarks/deck v0.0.0-20190526003814-edf08d731d5a // indirect + github.com/ajstarks/svgo v0.0.0-20181006003313-6ce6a3bcf6cd // indirect + github.com/apache/arrow/go/arrow v0.0.0-20190626211233-e980b2024a28 // indirect + github.com/apache/thrift v0.12.0 // indirect + github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a // indirect + github.com/aws/aws-sdk-go v1.20.9 // indirect + github.com/aws/aws-sdk-go-v2 v0.9.0 // indirect + github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 // indirect + github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 // indirect + github.com/boltdb/bolt v1.3.1 // indirect + github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 // indirect + github.com/cactus/go-statsd-client/statsd v0.0.0-20190501063751-9a7692639588 // indirect + github.com/casbin/casbin v1.8.3 // indirect + github.com/cenkalti/backoff v2.1.1+incompatible // indirect + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect + github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect + github.com/circonus-labs/circonusllhist v0.1.3 // indirect + github.com/clbanning/x2j v0.0.0-20180326210544-5e605d46809c // indirect + github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect + github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect + github.com/codegangsta/negroni v1.0.0 // indirect + github.com/coreos/bbolt v1.3.3 // indirect + github.com/coreos/etcd v3.3.13+incompatible // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a // indirect + github.com/cznic/cc v0.0.0-20181122101902-d673e9b70d4d // indirect + github.com/cznic/fileutil v0.0.0-20181122101858-4d67cfea8c87 // indirect + github.com/cznic/golex v0.0.0-20181122101858-9c343928389c // indirect + github.com/cznic/internal v0.0.0-20181122101858-3279554c546e // indirect + github.com/cznic/ir v0.0.0-20181122101859-da7ba2ecce8b // indirect + github.com/cznic/lex v0.0.0-20181122101858-ce0fb5e9bb1b // indirect + github.com/cznic/lexer v0.0.0-20181122101858-e884d4bd112e // indirect + github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect + github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect + github.com/cznic/xc v0.0.0-20181122101856-45b06973881e // indirect + github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 // indirect + github.com/disintegration/gift v1.2.0 // indirect + github.com/djherbis/buffer v1.0.0 // indirect + github.com/djherbis/nio v2.0.3+incompatible // indirect + github.com/dustin/go-humanize v1.0.0 + github.com/edsrzf/mmap-go v1.0.0 // indirect + github.com/fogleman/gg v1.3.0 // indirect + github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 // indirect + github.com/garyburd/redigo v1.6.0 // indirect + github.com/gin-gonic/gin v1.4.0 // indirect + github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 // indirect + github.com/go-chi/chi v4.0.2+incompatible // indirect + github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab // indirect + github.com/gobuffalo/buffalo v0.14.6 // indirect + github.com/gobuffalo/shoulders v1.1.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/godbus/dbus v4.1.0+incompatible // indirect + github.com/goji/param v0.0.0-20160927210335-d7f49fd7d1ed // indirect + github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect + github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac // indirect + github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 // indirect + github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 // indirect + github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 // indirect + github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 // indirect + github.com/google/pprof v0.0.0-20190515194954-54271f7e092f // indirect + github.com/googleapis/gax-go v2.0.2+incompatible // indirect + github.com/grpc-ecosystem/grpc-gateway v1.9.2 // indirect + github.com/hashicorp/consul/api v1.1.0 // indirect + github.com/hashicorp/go-retryablehttp v0.5.4 // indirect + github.com/hashicorp/go-version v1.2.0 // indirect + github.com/hudl/fargo v1.2.0 // indirect + github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 // indirect + github.com/imdario/mergo v0.3.6 + github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec // indirect + github.com/influxdata/flux v0.34.1 // indirect + github.com/influxdata/influxdb v1.7.6 // indirect + github.com/influxdata/influxql v1.0.0 // indirect + github.com/influxdata/roaring v0.4.12 // indirect + github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 // indirect + github.com/jackc/pgx v3.3.0+incompatible + github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 // indirect + github.com/jsternberg/zap-logfmt v1.2.0 // indirect + github.com/jung-kurt/gofpdf v1.5.1 // indirect + github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da // indirect + github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef // indirect + github.com/klauspost/compress v1.7.1 // indirect + github.com/klauspost/cpuid v1.2.1 // indirect + github.com/klauspost/pgzip v1.2.1 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/lib/pq v1.1.1 // indirect + github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743 // indirect + github.com/lightstep/lightstep-tracer-go v0.16.0 // indirect + github.com/markbates/going v1.0.3 // indirect + github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 // indirect + github.com/mattn/go-isatty v0.0.8 + github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/nats-io/go-nats v1.7.2 // indirect + github.com/nats-io/nkeys v0.0.2 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect + github.com/oklog/oklog v0.3.2 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4 + github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 // indirect + github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect + github.com/opentracing/basictracer-go v1.0.0 // indirect + github.com/opentracing/opentracing-go v1.1.0 // indirect + github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 // indirect + github.com/openzipkin/zipkin-go v0.1.6 // indirect + github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c // indirect + github.com/paulbellamy/ratecounter v0.2.0 // indirect + github.com/pborman/uuid v1.2.0 // indirect + github.com/performancecopilot/speed v3.0.0+incompatible // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/peterh/liner v1.1.0 // indirect + github.com/philhofer/fwd v1.0.0 // indirect + github.com/pkg/errors v0.8.1 + github.com/pkg/profile v1.3.0 // indirect + github.com/pkg/sftp v1.10.0 // indirect + github.com/pressly/chi v4.0.2+incompatible // indirect + github.com/prometheus/client_golang v1.0.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7 // indirect + github.com/retailnext/hllpp v1.0.0 // indirect + github.com/rs/cors v1.6.0 // indirect + github.com/rs/xid v1.2.1 // indirect + github.com/rs/zerolog v1.4.0 + github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd // indirect + github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec // indirect + github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627 + github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 + github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 // indirect + github.com/sony/gobreaker v0.4.1 // indirect + github.com/spf13/afero v1.2.1 // indirect + github.com/spf13/cobra v0.0.5 + github.com/spf13/pflag v1.0.3 + github.com/spf13/viper v1.4.0 + github.com/stathat/go v1.0.0 // indirect + github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 // indirect + github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a // indirect + github.com/syndtr/goleveldb v1.0.0 // indirect + github.com/tinylib/msgp v1.1.0 // indirect + github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2 // indirect + github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b // indirect + github.com/tj/go-spin v1.1.0 // indirect + github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 // indirect + github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect + github.com/urfave/cli v1.20.0 // indirect + github.com/urfave/negroni v1.0.0 // indirect + github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca // indirect + github.com/zenazn/goji v0.9.0 // indirect + go.etcd.io/bbolt v1.3.3 // indirect + go.etcd.io/etcd v3.3.13+incompatible // indirect + go.opencensus.io v0.22.0 // indirect + golang.org/x/build v0.0.0-20190626175840-54405f243e45 // indirect + golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 + golang.org/x/image v0.0.0-20190622003408-7e034cad6442 // indirect + golang.org/x/mobile v0.0.0-20190607214518-6fa95d984e88 // indirect + golang.org/x/tools v0.0.0-20190626204024-7ef8a99cf38d // indirect + google.golang.org/api v0.7.0 // indirect + google.golang.org/grpc v1.21.1 // indirect + gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect + gopkg.in/gcfg.v1 v1.2.3 // indirect + gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec // indirect + gopkg.in/urfave/cli.v1 v1.20.0 // indirect + gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 // indirect + sourcegraph.com/sourcegraph/appdash v0.0.0-20190107175209-d9ea5c54f7dc // indirect + sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 // indirect +) diff --git a/vendor/github.com/joyent/triton-go/go.sum b/vendor/github.com/joyent/triton-go/go.sum new file mode 100644 index 00000000..789f7676 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/go.sum @@ -0,0 +1,1663 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40= +cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +collectd.org v0.3.0 h1:iNBHGw1VvPJxH2B6RiFWFZ+vsjo1lCdRszBeOuwGi00= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= +dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU= +dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= +dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/aclements/go-gg v0.0.0-20170323211221-abd1f791f5ee h1:KJgh99JlYRhfgHtb7XyhAZSJMdfkjVmo3PP7XO1/HO8= +github.com/aclements/go-gg v0.0.0-20170323211221-abd1f791f5ee/go.mod h1:55qNq4vcpkIuHowELi5C8e+1yUHtoLoOUR9QU5j7Tes= +github.com/aclements/go-moremath v0.0.0-20190506201756-286cc0be6f75 h1:OMm3VB/wrXTgQaFh/2z7xTH6fqUy3Q77ybHjmAJ/Ca4= +github.com/aclements/go-moremath v0.0.0-20190506201756-286cc0be6f75/go.mod h1:idZL3yvz4kzx1dsBOAC+oYv6L92P1oFEhUXUB1A/lwQ= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5 h1:rFw4nCn9iMW+Vajsk51NtYIcwSTkXr+JGrMd36kTDJw= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= +github.com/ajstarks/deck v0.0.0-20190526003814-edf08d731d5a h1:7ctgSibBC8i8TYI42mPgFZXORy8RvI1Yu/oAyYVU9iU= +github.com/ajstarks/deck v0.0.0-20190526003814-edf08d731d5a/go.mod h1:j3f/59diR4DorW5A78eDYvRkdrkh+nps4p5LA1Tl05U= +github.com/ajstarks/svgo v0.0.0-20181006003313-6ce6a3bcf6cd h1:JdtityihAc6A+gVfYh6vGXfZQg+XOLyBvla/7NbXFCg= +github.com/ajstarks/svgo v0.0.0-20181006003313-6ce6a3bcf6cd/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/kingpin v2.2.6+incompatible h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI= +github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/apache/arrow/go/arrow v0.0.0-20190426170622-338c62a2a205/go.mod h1:W8yIftLTH1FLJvxuZc4tFnIlZ2tWg7RCoJR1HcETAso= +github.com/apache/arrow/go/arrow v0.0.0-20190626211233-e980b2024a28 h1:ok4+ldBL+yIgXxnvIqcbfnBXe7d1dxENeJ6VslcuZa0= +github.com/apache/arrow/go/arrow v0.0.0-20190626211233-e980b2024a28/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= +github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apex/log v1.1.0 h1:J5rld6WVFi6NxA6m8GJ1LJqu3+GiTFIt3mYv27gdQWI= +github.com/apex/log v1.1.0/go.mod h1:yA770aXIDQrhVOIGurT/pVdfCpSq1GQV/auzMN5fzvY= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a h1:2KLQMJ8msqoPHIPDufkxVcoTtcmE5+1sL9950m4R9Pk= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aws/aws-sdk-go v1.15.64/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= +github.com/aws/aws-sdk-go v1.20.9 h1:ji83wMPlO81uzfAQZjIcpS04EhdOF5joPUFC1VeHGjc= +github.com/aws/aws-sdk-go v1.20.9/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v0.9.0 h1:dWtJKGRFv3UZkMBQaIzMsF0/y4ge3iQPWTzeC4r/vl4= +github.com/aws/aws-sdk-go-v2 v0.9.0/go.mod h1:sa1GePZ/LfBGI4dSq30f6uR4Tthll8axxtEPvlpXZ8U= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= +github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= +github.com/boombuler/barcode v1.0.0 h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625 h1:ckJgFhFWywOx+YLEMIJsTb+NV6NexWICk5+AMSuz3ss= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA= +github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668/go.mod h1:H0wQNHz2YrLsuXOZozoeDmnHXkNCRmMW0gwFWDfEZDA= +github.com/c-bata/go-prompt v0.2.2 h1:uyKRz6Z6DUyj49QVijyM339UJV9yhbr70gESwbNU3e0= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/caarlos0/ctrlc v1.0.0 h1:2DtF8GSIcajgffDFJzyG15vO+1PuBWOMUdFut7NnXhw= +github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= +github.com/cactus/go-statsd-client/statsd v0.0.0-20190501063751-9a7692639588 h1:6yVhh6P5OsW6HutPt7z2ggDgZczgUtSl2kGRe+DslPU= +github.com/cactus/go-statsd-client/statsd v0.0.0-20190501063751-9a7692639588/go.mod h1:3/sdo8I67TaOslRGJ6FqQC/ynu+wg7H6IE4WYtr51hk= +github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e h1:V9a67dfYqPLAvzk5hMQOXYJlZ4SLIXgyKIE+ZiHzgGQ= +github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= +github.com/casbin/casbin v1.8.3 h1:tIrXu7UGj0ECpV4tQhQy9sE2gJZyKNG0tQDEKQiuTYQ= +github.com/casbin/casbin v1.8.3/go.mod h1:z8uPsfBJGUsnkagrt3G8QvjgTKFMBJ32UP8HpZllfog= +github.com/cenkalti/backoff v2.1.1+incompatible h1:tKJnvO2kl0zmb/jA5UKAt4VoEVw1qxKWjE/Bpp46npY= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20180326210544-5e605d46809c h1:gZ7YRTPnL5fRduIw4unEfQWLB/DInK29N2zYza16wfQ= +github.com/clbanning/x2j v0.0.0-20180326210544-5e605d46809c/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= +github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/codegangsta/negroni v1.0.0 h1:+aYywywx4bnKXWvoWtRfJ91vC59NbEhEY03sZjQhbVY= +github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a h1:W8b4lQ4tFF21aspRGoBuCNV6V2fFJBF+pm1J6OY8Lys= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M= +github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cznic/cc v0.0.0-20181122101902-d673e9b70d4d h1:AePLLLsGE1yOEDAmaJlQ9zd/9qiaEVskYukZ1f2srAA= +github.com/cznic/cc v0.0.0-20181122101902-d673e9b70d4d/go.mod h1:m3fD/V+XTB35Kh9zw6dzjMY+We0Q7PMf6LLIC4vuG9k= +github.com/cznic/fileutil v0.0.0-20181122101858-4d67cfea8c87 h1:94XgeeTZ+3Xi9zsdgBjP1Byx/wywCImjF8FzQ7OaKdU= +github.com/cznic/fileutil v0.0.0-20181122101858-4d67cfea8c87/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= +github.com/cznic/golex v0.0.0-20181122101858-9c343928389c h1:G8zTsaqyVfIHpgMFcGgdbhHSFhlNc77rAKkhVbQ9kQg= +github.com/cznic/golex v0.0.0-20181122101858-9c343928389c/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc= +github.com/cznic/internal v0.0.0-20181122101858-3279554c546e h1:58AcyflCe84EONph4gkyo3eDOEQcW5HIPfQBrD76W68= +github.com/cznic/internal v0.0.0-20181122101858-3279554c546e/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4= +github.com/cznic/ir v0.0.0-20181122101859-da7ba2ecce8b h1:GelTfvbS1tZtnyCTx3aMIHbRw5euyrfHd6H3rLqLlHU= +github.com/cznic/ir v0.0.0-20181122101859-da7ba2ecce8b/go.mod h1:bctvsSxTD8Lpaj5RRQ0OrAAu4+0mD4KognDQItBNMn0= +github.com/cznic/lex v0.0.0-20181122101858-ce0fb5e9bb1b h1:KJtZdP0G3jUnpgEWZdJ7326WvTbREwcwlDSOpkpNZGY= +github.com/cznic/lex v0.0.0-20181122101858-ce0fb5e9bb1b/go.mod h1:LcYbbl1tn/c31gGxe2EOWyzr7EaBcdQOoIVGvJMc7Dc= +github.com/cznic/lexer v0.0.0-20181122101858-e884d4bd112e h1:K5kIaw68kxYw40mp8YKuwKrb63R0BPCR1iEGvBR6Mfs= +github.com/cznic/lexer v0.0.0-20181122101858-e884d4bd112e/go.mod h1:YNGh5qsZlhFHDfWBp/3DrJ37Uy4pRqlwxtL+LS7a/Qw= +github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= +github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 h1:MZRmHqDBd0vxNwenEbKSQqRVT24d3C05ft8kduSwlqM= +github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/cznic/xc v0.0.0-20181122101856-45b06973881e h1:U9mUTtTukbCdFuphv3QiJBjtImXsUTHcX5toZZi4OzY= +github.com/cznic/xc v0.0.0-20181122101856-45b06973881e/go.mod h1:3oFoiOvCDBYH+swwf5+k/woVmWy7h1Fcyu8Qig/jjX0= +github.com/dave/jennifer v1.2.0 h1:S15ZkFMRoJ36mGAQgWL1tnr0NQJh9rZ8qatseX/VbBc= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8 h1:akOQj8IVgoeFfBTzGOEQakCYshWD6RNo1M5pivFXt70= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/disintegration/gift v1.2.0 h1:VMQeei2F+ZtsHjMgP6Sdt1kFjRhs2lGz8ljEOPeIR50= +github.com/disintegration/gift v1.2.0/go.mod h1:Jh2i7f7Q2BM7Ezno3PhfezbR1xpUg9dUg3/RlKGr4HI= +github.com/djherbis/buffer v1.0.0 h1:6rqC+Sqzr+bnbjlUzLf3+9Tz8CT8YVbdqGRGMZM0H3o= +github.com/djherbis/buffer v1.0.0/go.mod h1:VwN8VdFkMY0DCALdY8o00d3IZ6Amz/UNVMWcSaJT44o= +github.com/djherbis/nio v2.0.3+incompatible h1:CidFHoR25he4511AIQ3RW9LH9XkLMOoNML8xd7R7Irc= +github.com/djherbis/nio v2.0.3+incompatible/go.mod h1:v74owXPROGWsr1y28T13rlXf5Hn/bWJ1bbX8M+BqyPo= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/emirpasic/gods v1.9.0 h1:rUF4PuzEjMChMiNsVjdI+SyLu7rEqpQ5reNFnhC7oFo= +github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8 h1:a9ENSRDFBUPkJ5lCgVZh26+ZbGyoVJG7yb5SSzF5H54= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/garyburd/redigo v1.6.0 h1:0VruCpn7yAIIu7pWVClQC8wxCJEcG3nyzpMSHKi1PQc= +github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g= +github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= +github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ= +github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= +github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2 h1:Ujru1hufTHVb++eG6OuNDKMxZnGIvF6o/u8q/8h2+I4= +github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/go-chi/chi v4.0.2+incompatible h1:maB6vn6FqCxrpz4FqWdh4+lwpyZIQS7YEAUcHlgXVRs= +github.com/go-chi/chi v4.0.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/attrs v0.0.0-20190219185331-f338c9388485/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/attrs v0.1.0 h1:LY6/rbhPD/hfa+AfviaMXBmZBGb0fGHF12yg7f3dPQA= +github.com/gobuffalo/attrs v0.1.0/go.mod h1:fmNpaWyHM0tRm8gCZWKx8yY9fvaNLo2PyzBNSrBZ5Hw= +github.com/gobuffalo/buffalo v0.12.8-0.20181004233540-fac9bb505aa8/go.mod h1:sLyT7/dceRXJUxSsE813JTQtA3Eb1vjxWfo/N//vXIY= +github.com/gobuffalo/buffalo v0.13.0/go.mod h1:Mjn1Ba9wpIbpbrD+lIDMy99pQ0H0LiddMIIDGse7qT4= +github.com/gobuffalo/buffalo v0.13.1/go.mod h1:K9c22KLfDz7obgxvHv1amvJtCQEZNiox9+q6FDJ1Zcs= +github.com/gobuffalo/buffalo v0.13.2/go.mod h1:vA8I4Dwcfkx7RAzIRHVDZxfS3QJR7muiOjX4r8P2/GE= +github.com/gobuffalo/buffalo v0.13.4/go.mod h1:y2jbKkO0k49OrNIOAkbWQiPBqxAFpHn5OKnkc7BDh+I= +github.com/gobuffalo/buffalo v0.13.5/go.mod h1:hPcP12TkFSZmT3gUVHZ24KRhTX3deSgu6QSgn0nbWf4= +github.com/gobuffalo/buffalo v0.13.6/go.mod h1:/Pm0MPLusPhWDayjRD+/vKYnelScIiv0sX9YYek0wpg= +github.com/gobuffalo/buffalo v0.13.7/go.mod h1:3gQwZhI8DSbqmDqlFh7kfwuv/wd40rqdVxXtFWlCQHw= +github.com/gobuffalo/buffalo v0.13.9/go.mod h1:vIItiQkTHq46D1p+bw8mFc5w3BwrtJhMvYjSIYK3yjE= +github.com/gobuffalo/buffalo v0.13.12/go.mod h1:Y9e0p0cdo/eI+lHm7EFzlkc9YzjwGo5QeDj+FbsyqVA= +github.com/gobuffalo/buffalo v0.13.13/go.mod h1:WAL36xBN8OkU71lNjuYv6llmgl0o8twjlY+j7oGUmYw= +github.com/gobuffalo/buffalo v0.14.0/go.mod h1:A9JI3juErlXNrPBeJ/0Pdky9Wz+GffEg7ZN0d1B5h48= +github.com/gobuffalo/buffalo v0.14.2/go.mod h1:VNMTzddg7bMnkVxCUXzqTS4PvUo6cDOs/imtG8Cnt/k= +github.com/gobuffalo/buffalo v0.14.3/go.mod h1:3O9vB/a4UNb16TevehTvDCaPnb98NWvYz0msJQ6ZlVA= +github.com/gobuffalo/buffalo v0.14.5/go.mod h1:RWK6evR4hY4nRVfw9xie9V/LYK3j0U9wU2oKgQUFZ88= +github.com/gobuffalo/buffalo v0.14.6 h1:Y2ptQaXoTrsgPGlvfcrWvum1sUZ2C7azUndT1e9n9Xg= +github.com/gobuffalo/buffalo v0.14.6/go.mod h1:71Un+T2JGgwXLjBqYFdGSooz/OUjw15BJM0nbbcAM0o= +github.com/gobuffalo/buffalo-docker v1.0.5/go.mod h1:NZ3+21WIdqOUlOlM2onCt7cwojYp4Qwlsngoolw8zlE= +github.com/gobuffalo/buffalo-docker v1.0.6/go.mod h1:UlqKHJD8CQvyIb+pFq+m/JQW2w2mXuhxsaKaTj1X1XI= +github.com/gobuffalo/buffalo-docker v1.0.7 h1:kj+AfChcev54v4N8N6PzNFWyiVSenzu6djrgxTBvbTk= +github.com/gobuffalo/buffalo-docker v1.0.7/go.mod h1:BdB8AhcmjwR6Lo3rDPMzyh/+eNjYnZ1TAO0eZeLkhig= +github.com/gobuffalo/buffalo-plugins v1.0.2/go.mod h1:pOp/uF7X3IShFHyobahTkTLZaeUXwb0GrUTb9ngJWTs= +github.com/gobuffalo/buffalo-plugins v1.0.4/go.mod h1:pWS1vjtQ6uD17MVFWf7i3zfThrEKWlI5+PYLw/NaDB4= +github.com/gobuffalo/buffalo-plugins v1.4.3/go.mod h1:uCzTY0woez4nDMdQjkcOYKanngeUVRO2HZi7ezmAjWY= +github.com/gobuffalo/buffalo-plugins v1.5.1/go.mod h1:jbmwSZK5+PiAP9cC09VQOrGMZFCa/P0UMlIS3O12r5w= +github.com/gobuffalo/buffalo-plugins v1.6.1/go.mod h1:/XZt7UuuDnx5P4v3cStK0+XoYiNOA2f0wDIsm1oLJQA= +github.com/gobuffalo/buffalo-plugins v1.6.4/go.mod h1:/+N1aophkA2jZ1ifB2O3Y9yGwu6gKOVMtUmJnbg+OZI= +github.com/gobuffalo/buffalo-plugins v1.6.5/go.mod h1:0HVkbgrVs/MnPZ/FOseDMVanCTm2RNcdM0PuXcL1NNI= +github.com/gobuffalo/buffalo-plugins v1.6.6/go.mod h1:hSWAEkJyL9RENJlmanMivgnNkrQ9RC4xJARz8dQryi0= +github.com/gobuffalo/buffalo-plugins v1.6.7/go.mod h1:ZGZRkzz2PiKWHs0z7QsPBOTo2EpcGRArMEym6ghKYgk= +github.com/gobuffalo/buffalo-plugins v1.6.9/go.mod h1:yYlYTrPdMCz+6/+UaXg5Jm4gN3xhsvsQ2ygVatZV5vw= +github.com/gobuffalo/buffalo-plugins v1.6.10/go.mod h1:HxzPZjAEzh9H0gnHelObxxrut9O+1dxydf7U93SYsc8= +github.com/gobuffalo/buffalo-plugins v1.6.11/go.mod h1:eAA6xJIL8OuynJZ8amXjRmHND6YiusVAaJdHDN1Lu8Q= +github.com/gobuffalo/buffalo-plugins v1.7.2/go.mod h1:vEbx30cLFeeZ48gBA/rkhbqC2M/2JpsKs5CoESWhkPw= +github.com/gobuffalo/buffalo-plugins v1.8.1/go.mod h1:vu71J3fD4b7KKywJQ1tyaJGtahG837Cj6kgbxX0e4UI= +github.com/gobuffalo/buffalo-plugins v1.8.2/go.mod h1:9te6/VjEQ7pKp7lXlDIMqzxgGpjlKoAcAANdCgoR960= +github.com/gobuffalo/buffalo-plugins v1.8.3/go.mod h1:IAWq6vjZJVXebIq2qGTLOdlXzmpyTZ5iJG5b59fza5U= +github.com/gobuffalo/buffalo-plugins v1.9.3/go.mod h1:BNRunDThMZKjqx6R+n14Rk3sRSOWgbMuzCKXLqbd7m0= +github.com/gobuffalo/buffalo-plugins v1.9.4/go.mod h1:grCV6DGsQlVzQwk6XdgcL3ZPgLm9BVxlBmXPMF8oBHI= +github.com/gobuffalo/buffalo-plugins v1.10.0/go.mod h1:4osg8d9s60txLuGwXnqH+RCjPHj9K466cDFRl3PErHI= +github.com/gobuffalo/buffalo-plugins v1.11.0/go.mod h1:rtIvAYRjYibgmWhnjKmo7OadtnxuMG5ZQLr25ozAzjg= +github.com/gobuffalo/buffalo-plugins v1.12.0/go.mod h1:kw4Mj2vQXqe4X5TI36PEQgswbL30heGQwJEeDKd1v+4= +github.com/gobuffalo/buffalo-plugins v1.13.0/go.mod h1:Y9nH2VwHVkeKhmdM380ulNXmhhD5On81nRVeD+WlDTQ= +github.com/gobuffalo/buffalo-plugins v1.13.1/go.mod h1:VcvhrgWcZLhOp8JPLckHBDtv05KepY/MxHsT2+06xX4= +github.com/gobuffalo/buffalo-plugins v1.14.0/go.mod h1:r2lykSXBT79c3T5JK1ouivVDrHvvCZUdZBmn+lPMHU8= +github.com/gobuffalo/buffalo-plugins v1.14.1 h1:ZL22sNZif+k/0I9X7LB8cpVMWh7zcVjfpiqxFlH4xSY= +github.com/gobuffalo/buffalo-plugins v1.14.1/go.mod h1:9BRBvXuKxR0m4YttVFRtuUcAP9Rs97mGq6OleyDbIuo= +github.com/gobuffalo/buffalo-pop v1.0.5/go.mod h1:Fw/LfFDnSmB/vvQXPvcXEjzP98Tc+AudyNWUBWKCwQ8= +github.com/gobuffalo/buffalo-pop v1.1.2/go.mod h1:czNLXcYbg5/fjr+uht0NyjZaQ0V2W23H1jzyORgCzQ4= +github.com/gobuffalo/buffalo-pop v1.1.5/go.mod h1:H01JIg42XwOHS4gRMhSeDZqBovNVlfBUsVXckU617s4= +github.com/gobuffalo/buffalo-pop v1.1.8/go.mod h1:1uaxOFzzVud/zR5f1OEBr21tMVLQS3OZpQ1A5cr0svE= +github.com/gobuffalo/buffalo-pop v1.1.13/go.mod h1:47GQoBjCMcl5Pw40iCWHQYJvd0HsT9kdaOPWgnzHzk4= +github.com/gobuffalo/buffalo-pop v1.1.14/go.mod h1:sAMh6+s7wytCn5cHqZIuItJbAqzvs6M7FemLexl+pwc= +github.com/gobuffalo/buffalo-pop v1.1.15/go.mod h1:vnvvxhbEFAaEbac9E2ZPjsBeL7WHkma2UyKNVA4y9Wo= +github.com/gobuffalo/buffalo-pop v1.2.1/go.mod h1:SHqojN0bVzaAzCbQDdWtsib202FDIxqwmCO8VDdweF4= +github.com/gobuffalo/buffalo-pop v1.3.0/go.mod h1:P0PhA225dRGyv0WkgYjYKqgoxPdDPDFZDvHj60AGF5w= +github.com/gobuffalo/buffalo-pop v1.6.0/go.mod h1:vrEVNOBKe042HjSNMj72J4FgER/VG6lt4xW6WMpTdlY= +github.com/gobuffalo/buffalo-pop v1.7.0/go.mod h1:UB5HHeFucJG7esTPUPjinBaJTEpVoREJHfSJJELnyeI= +github.com/gobuffalo/buffalo-pop v1.9.0/go.mod h1:MfrkBg0iN9+RdlxdHHAqqGFAC/iyCfTiKqH7Jvt+vhE= +github.com/gobuffalo/buffalo-pop v1.10.0/go.mod h1:C3/cFXB8Zd38XiGgHFdE7dw3Wu9MOKeD7bfELQicGPI= +github.com/gobuffalo/buffalo-pop v1.12.0/go.mod h1:pO2ONSJOCjyroGp4BwVHfMkfd7sLg1U9BvMJqRy6Otk= +github.com/gobuffalo/buffalo-pop v1.13.0 h1:9fnVfkkzgDZ/T8ADZnDp0GAGxbzhO1O6a5QWZ9GLslk= +github.com/gobuffalo/buffalo-pop v1.13.0/go.mod h1:h+zfyXCUFwihFqz6jmo9xsdsZ1Tm9n7knYpQjW0gv18= +github.com/gobuffalo/clara v0.4.1/go.mod h1:3QgAPqYgPqAzhfGbNLlp4UztaZRi2SOg+ZrZwaq9L94= +github.com/gobuffalo/clara v0.6.0 h1:rThH7BmFnjgc1R2I2ePjaWhwsVWyakqIvmn48cNeNP8= +github.com/gobuffalo/clara v0.6.0/go.mod h1:RKZxkcH80pLykRi2hLkoxGMxA8T06Dc9fN/pFvutMFY= +github.com/gobuffalo/depgen v0.0.0-20190219190223-ba8c93fa0c2c/go.mod h1:CE/HUV4vDCXtJayRf6WoMWgezb1yH4QHg8GNK8FL0JI= +github.com/gobuffalo/depgen v0.0.0-20190315122043-8442b3fa16db/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.0.0-20190315124901-e02f65b90669/go.mod h1:yTQe8xo5pGIDOApkeO95DjePS4ZOSSSx+ItkqJHxUG4= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/depgen v0.1.1/go.mod h1:65EOv3g7CMe4kc8J1Ds+l2bjcwrWKGXkE4/vpRRLPWY= +github.com/gobuffalo/depgen v0.2.0 h1:CYuqsR8sq+L9G9+A6uUcTEuaK8AGenAjtYOm238fN3M= +github.com/gobuffalo/depgen v0.2.0/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/envy v1.6.4/go.mod h1:Abh+Jfw475/NWtYMEt+hnJWRiC8INKWibIMyNt1w2Mc= +github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.6/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.7/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.8/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.9/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= +github.com/gobuffalo/envy v1.6.10/go.mod h1:X0CFllQjTV5ogsnUrg+Oks2yTI+PU2dGYBJOEI2D1Uo= +github.com/gobuffalo/envy v1.6.11/go.mod h1:Fiq52W7nrHGDggFPhn2ZCcHw4u/rqXkqo+i7FB6EAcg= +github.com/gobuffalo/envy v1.6.12/go.mod h1:qJNrJhKkZpEW0glh5xP2syQHH5kgdmgsKss2Kk8PTP0= +github.com/gobuffalo/envy v1.6.13/go.mod h1:w9DJppgl51JwUFWWd/M/6/otrPtWV3WYMa+NNLunqKA= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/events v1.0.3/go.mod h1:Txo8WmqScapa7zimEQIwgiJBvMECMe9gJjsKNPN3uZw= +github.com/gobuffalo/events v1.0.7/go.mod h1:z8txf6H9jWhQ5Scr7YPLWg/cgXBRj8Q4uYI+rsVCCSQ= +github.com/gobuffalo/events v1.0.8/go.mod h1:A5KyqT1sA+3GJiBE4QKZibse9mtOcI9nw8gGrDdqYGs= +github.com/gobuffalo/events v1.1.1/go.mod h1:Ia9OgHMco9pEhJaPrPQJ4u4+IZlkxYVco2VbJ2XgnAE= +github.com/gobuffalo/events v1.1.3/go.mod h1:9yPGWYv11GENtzrIRApwQRMYSbUgCsZ1w6R503fCfrk= +github.com/gobuffalo/events v1.1.4/go.mod h1:09/YRRgZHEOts5Isov+g9X2xajxdvOAcUuAHIX/O//A= +github.com/gobuffalo/events v1.1.5/go.mod h1:3YUSzgHfYctSjEjLCWbkXP6djH2M+MLaVRzb4ymbAK0= +github.com/gobuffalo/events v1.1.6/go.mod h1:H/3ZB9BA+WorMb/0F79UvU6u0Cyo2hU97WA51bG2ONY= +github.com/gobuffalo/events v1.1.7/go.mod h1:6fGqxH2ing5XMb3EYRq9LEkVlyPGs4oO/eLzh+S8CxY= +github.com/gobuffalo/events v1.1.8/go.mod h1:UFy+W6X6VbCWS8k2iT81HYX65dMtiuVycMy04cplt/8= +github.com/gobuffalo/events v1.1.9/go.mod h1:/0nf8lMtP5TkgNbzYxR6Bl4GzBy5s5TebgNTdRfRbPM= +github.com/gobuffalo/events v1.2.0/go.mod h1:pxvpvsKXKZNPtHuIxUV3K+g+KP5o4forzaeFj++bh68= +github.com/gobuffalo/events v1.3.1 h1:DiyepPJWOx+6LPQj7gxWpaMRlS4/DnbDgUdmlwVYqjg= +github.com/gobuffalo/events v1.3.1/go.mod h1:9JOkQVoyRtailYVE/JJ2ZQ/6i4gTjM5t2HsZK4C1cSA= +github.com/gobuffalo/fizz v1.0.12/go.mod h1:C0sltPxpYK8Ftvf64kbsQa2yiCZY4RZviurNxXdAKwc= +github.com/gobuffalo/fizz v1.0.15/go.mod h1:EI3mEpjImuji6Bwu++N2uXhljQwOhwtimZQJ89zwyF4= +github.com/gobuffalo/fizz v1.0.16/go.mod h1:EI3mEpjImuji6Bwu++N2uXhljQwOhwtimZQJ89zwyF4= +github.com/gobuffalo/fizz v1.1.2/go.mod h1:THqzNTlNxNaF5hq3ddp16SnEcl2m83bTeTzJEoD+kqc= +github.com/gobuffalo/fizz v1.1.3/go.mod h1:THqzNTlNxNaF5hq3ddp16SnEcl2m83bTeTzJEoD+kqc= +github.com/gobuffalo/fizz v1.3.0/go.mod h1:THqzNTlNxNaF5hq3ddp16SnEcl2m83bTeTzJEoD+kqc= +github.com/gobuffalo/fizz v1.5.0/go.mod h1:Uu3ch14M4S7LDU7LAP1GQ+KNCRmZYd05Gqasc96XLa0= +github.com/gobuffalo/fizz v1.6.0/go.mod h1:V4V6EA8eYu/L43y6gZj7mjmPkigl9m+Eu3Pe+SWQRRg= +github.com/gobuffalo/fizz v1.6.1/go.mod h1:V4V6EA8eYu/L43y6gZj7mjmPkigl9m+Eu3Pe+SWQRRg= +github.com/gobuffalo/fizz v1.8.0/go.mod h1:2LqJOOGUp1JpN9m54ac5jMQ1MpbNvSVbFi9BY+AEXOo= +github.com/gobuffalo/fizz v1.9.0 h1:dkDLayojbcmHyKnIKmOJlOF0dcMSaAh2kVwJpylUkpY= +github.com/gobuffalo/fizz v1.9.0/go.mod h1:2LqJOOGUp1JpN9m54ac5jMQ1MpbNvSVbFi9BY+AEXOo= +github.com/gobuffalo/flect v0.0.0-20180907193754-dc14d8acaf9f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181002182613-4571df4b1daf/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181007231023-ae7ed6bfe683/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181018182602-fd24a256709f/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181019110701-3d6f0b585514/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181024204909-8f6be1a8c6c2/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181104133451-1f6e9779237a/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181108195648-8fe1b44cfe32/go.mod h1:rCiQgmAE4axgBNl3jZWzS5rETRYTGOsrixTRaCPzNdA= +github.com/gobuffalo/flect v0.0.0-20181109221320-179d36177b5b/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= +github.com/gobuffalo/flect v0.0.0-20181114183036-47375f6d8328/go.mod h1:0HvNbHdfh+WOvDSIASqJOSxTOWSxCCUF++k/Y53v9rI= +github.com/gobuffalo/flect v0.0.0-20181210151238-24a2b68e0316/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/flect v0.0.0-20190104192022-4af577e09bf2/go.mod h1:en58vff74S9b99Eg42Dr+/9yPu437QjlNsO/hBYPuOk= +github.com/gobuffalo/flect v0.0.0-20190117212819-a62e61d96794/go.mod h1:397QT6v05LkZkn07oJXXT6y9FCfwC8Pug0WA2/2mE9k= +github.com/gobuffalo/flect v0.0.0-20190205211104-b2cb381e56e0/go.mod h1:397QT6v05LkZkn07oJXXT6y9FCfwC8Pug0WA2/2mE9k= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= +github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= +github.com/gobuffalo/genny v0.0.0-20180924032338-7af3a40f2252/go.mod h1:tUTQOogrr7tAQnhajMSH6rv1BVev34H2sa1xNHMy94g= +github.com/gobuffalo/genny v0.0.0-20181003150629-3786a0744c5d/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181005145118-318a41a134cc/go.mod h1:WAd8HmjMVrnkAZbmfgH5dLBUchsZfqzp/WS5sQz+uTM= +github.com/gobuffalo/genny v0.0.0-20181007153042-b8de7d566757/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181012161047-33e5f43d83a6/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181017160347-90a774534246/go.mod h1:+oG5Ljrw04czAHbPXREwaFojJbpUvcIy4DiOnbEJFTA= +github.com/gobuffalo/genny v0.0.0-20181019144442-df0a36fdd146/go.mod h1:IyRrGrQb/sbHu/0z9i5mbpZroIsdxjCYfj+zFiFiWZQ= +github.com/gobuffalo/genny v0.0.0-20181024195656-51392254bf53/go.mod h1:o9GEH5gn5sCKLVB5rHFC4tq40rQ3VRUzmx6WwmaqISE= +github.com/gobuffalo/genny v0.0.0-20181025145300-af3f81d526b8/go.mod h1:uZ1fFYvdcP8mu0B/Ynarf6dsGvp7QFIpk/QACUuFUVI= +github.com/gobuffalo/genny v0.0.0-20181027191429-94d6cfb5c7fc/go.mod h1:x7SkrQQBx204Y+O9EwRXeszLJDTaWN0GnEasxgLrQTA= +github.com/gobuffalo/genny v0.0.0-20181027195209-3887b7171c4f/go.mod h1:JbKx8HSWICu5zyqWOa0dVV1pbbXOHusrSzQUprW6g+w= +github.com/gobuffalo/genny v0.0.0-20181030163439-ed103521b8ec/go.mod h1:3Xm9z7/2oRxlB7PSPLxvadZ60/0UIek1YWmcC7QSaVs= +github.com/gobuffalo/genny v0.0.0-20181106193839-7dcb0924caf1/go.mod h1:x61yHxvbDCgQ/7cOAbJCacZQuHgB0KMSzoYcw5debjU= +github.com/gobuffalo/genny v0.0.0-20181107223128-f18346459dbe/go.mod h1:utQD3aKKEsdb03oR+Vi/6ztQb1j7pO10N3OBoowRcSU= +github.com/gobuffalo/genny v0.0.0-20181109163038-9539921b620f/go.mod h1:118bnhJR2oviiji++mZj0IH/IaFBCzwkWHaI4OQq5hQ= +github.com/gobuffalo/genny v0.0.0-20181110202416-7b7d8756a9e2/go.mod h1:118bnhJR2oviiji++mZj0IH/IaFBCzwkWHaI4OQq5hQ= +github.com/gobuffalo/genny v0.0.0-20181111200257-599b33630ab4/go.mod h1:w+iD/cdtIpPDFax6LlUFuCdXFD0DLRUXsfp3IeT/Doc= +github.com/gobuffalo/genny v0.0.0-20181114215459-0a4decd77f5d/go.mod h1:kN2KZ8VgXF9VIIOj/GM0Eo7YK+un4Q3tTreKOf0q1ng= +github.com/gobuffalo/genny v0.0.0-20181119162812-e8ff4adce8bb/go.mod h1:BA9htSe4bZwBDJLe8CUkoqkypq3hn3+CkoHqVOW718E= +github.com/gobuffalo/genny v0.0.0-20181127225641-2d959acc795b/go.mod h1:l54xLXNkteX/PdZ+HlgPk1qtcrgeOr3XUBBPDbH+7CQ= +github.com/gobuffalo/genny v0.0.0-20181128191930-77e34f71ba2a/go.mod h1:FW/D9p7cEEOqxYA71/hnrkOWm62JZ5ZNxcNIVJEaWBU= +github.com/gobuffalo/genny v0.0.0-20181203165245-fda8bcce96b1/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181203201232-849d2c9534ea/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181206121324-d6fb8a0dbe36/go.mod h1:wpNSANu9UErftfiaAlz1pDZclrYzLtO5lALifODyjuM= +github.com/gobuffalo/genny v0.0.0-20181207164119-84844398a37d/go.mod h1:y0ysCHGGQf2T3vOhCrGHheYN54Y/REj0ayd0Suf4C/8= +github.com/gobuffalo/genny v0.0.0-20181211165820-e26c8466f14d/go.mod h1:sHnK+ZSU4e2feXP3PA29ouij6PUEiN+RCwECjCTB3yM= +github.com/gobuffalo/genny v0.0.0-20190104222617-a71664fc38e7/go.mod h1:QPsQ1FnhEsiU8f+O0qKWXz2RE4TiDqLVChWkBuh1WaY= +github.com/gobuffalo/genny v0.0.0-20190112155932-f31a84fcacf5/go.mod h1:CIaHCrSIuJ4il6ka3Hub4DR4adDrGoXGEEt2FbBxoIo= +github.com/gobuffalo/genny v0.0.0-20190124191459-3310289fa4b4/go.mod h1:yIRqxhZV2sAzb+B3iPUMLauTRrYP8tJUlZ1zV9teKik= +github.com/gobuffalo/genny v0.0.0-20190131150032-1045e97d19fb/go.mod h1:yIRqxhZV2sAzb+B3iPUMLauTRrYP8tJUlZ1zV9teKik= +github.com/gobuffalo/genny v0.0.0-20190131190646-008a76242145/go.mod h1:NJvPZJxb9M4z790P6N2SMZKSUYpASpEvLuUWnHGKzb4= +github.com/gobuffalo/genny v0.0.0-20190219203444-c95082806342/go.mod h1:3BLT+Vs94EEz3fKR8WWOkYpL6c1tdJcZUNCe3LZAnvQ= +github.com/gobuffalo/genny v0.0.0-20190315121735-8b38fb089e88/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190315124720-e16e52a93c79/go.mod h1:nKeefjbhYowo36ys9nG9VUvD9FRIS0p3BC2JFfcOucM= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/genny v0.2.0 h1:4srOvuxvdxKsc6izHdHkS8KpQCtUHp6QZm5EUc5vksU= +github.com/gobuffalo/genny v0.2.0/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/gitgen v0.0.0-20190219185555-91c2c5f0aad5/go.mod h1:ZzGIrxBvCJEluaU4i3CN0GFlu1Qmb3yK8ziV02evJ1E= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211 h1:mSVZ4vj4khv+oThUfS+SQU3UuFIZ5Zo6UNcvK8E8Mz8= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/github_flavored_markdown v1.0.4/go.mod h1:uRowCdK+q8d/RF0Kt3/DSalaIXbb0De/dmTqMQdkQ4I= +github.com/gobuffalo/github_flavored_markdown v1.0.5/go.mod h1:U0643QShPF+OF2tJvYNiYDLDGDuQmJZXsf/bHOJPsMY= +github.com/gobuffalo/github_flavored_markdown v1.0.7 h1:Vjvz4wqOnviiLEfTh5bh270b3lhpJiwwQEWOWmHMwY8= +github.com/gobuffalo/github_flavored_markdown v1.0.7/go.mod h1:w93Pd9Lz6LvyQXEG6DktTPHkOtCbr+arAD5mkwMzXLI= +github.com/gobuffalo/gogen v0.0.0-20190219194924-d32a17ad9761/go.mod h1:v47C8sid+ZM2qK+YpQ2MGJKssKAqyTsH1wl/pTCPdz8= +github.com/gobuffalo/gogen v0.0.0-20190224213239-1c6076128bbc/go.mod h1:tQqPADZKflmJCR4FHRHYNPP79cXPICyxUiUHyhuXtqg= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/gogen v0.2.0 h1:Xx7NCe+/y++eII2aWAFZ09/81MhDCsZwvMzIFJoQRnU= +github.com/gobuffalo/gogen v0.2.0/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/helpers v0.0.0-20190422082217-384f90c6579f/go.mod h1:g0I3qKQEyJxwnHtEmLugD75eoOiWxEtibcV62tYah9w= +github.com/gobuffalo/helpers v0.0.0-20190506214229-8e6f634af7c3/go.mod h1:HlNpmw2+Rjx882VUf6hJfNJs5wpNRzX02KcqCXDlLGc= +github.com/gobuffalo/helpers v0.2.1/go.mod h1:5UhA1EfGvyPZfzo9PqhKkSgmLolaTpnWYDbqCJcmiAE= +github.com/gobuffalo/helpers v0.2.2 h1:AAu+rUINhzIii77xzuih26Q0ZJgOcLkf37FaBcv2diU= +github.com/gobuffalo/helpers v0.2.2/go.mod h1:xYbzUdCUpVzLwLnqV8HIjT6hmG0Cs7YIBCJkNM597jw= +github.com/gobuffalo/httptest v1.0.2/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/httptest v1.0.3/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/httptest v1.0.4/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/httptest v1.0.5/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/httptest v1.0.6/go.mod h1:7T1IbSrg60ankme0aDLVnEY0h056g9M1/ZvpVThtB7E= +github.com/gobuffalo/httptest v1.1.0/go.mod h1:BIfCgiqCOotRc5xYwCcZN7IFYag4277Ynqjar/Ra3iI= +github.com/gobuffalo/httptest v1.2.0/go.mod h1:0KfourZCsapuvkljDMSr7YM+66kCt/rXv54YcWRWwhc= +github.com/gobuffalo/httptest v1.3.0 h1:/ykFRXJYWYLnxlYons2eL7p6B5vuz22DoySTFfd4wkY= +github.com/gobuffalo/httptest v1.3.0/go.mod h1:Y4qebOsMH91XdB0cZuS8OUdAKHGV7hVDcjgzGupoYlk= +github.com/gobuffalo/licenser v0.0.0-20180924033006-eae28e638a42/go.mod h1:Ubo90Np8gpsSZqNScZZkVXXAo5DGhTb+WYFIjlnog8w= +github.com/gobuffalo/licenser v0.0.0-20181025145548-437d89de4f75/go.mod h1:x3lEpYxkRG/XtGCUNkio+6RZ/dlOvLzTI9M1auIwFcw= +github.com/gobuffalo/licenser v0.0.0-20181027200154-58051a75da95/go.mod h1:BzhaaxGd1tq1+OLKObzgdCV9kqVhbTulxOpYbvMQWS0= +github.com/gobuffalo/licenser v0.0.0-20181109171355-91a2a7aac9a7/go.mod h1:m+Ygox92pi9bdg+gVaycvqE8RVSjZp7mWw75+K5NPHk= +github.com/gobuffalo/licenser v0.0.0-20181116224424-1b7fd3f9cbb4/go.mod h1:icHYfF2FVDi6CpI8BK9Sy1ChkSijz/0GNN7Qzzdk6JE= +github.com/gobuffalo/licenser v0.0.0-20181128165715-cc7305f8abed/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= +github.com/gobuffalo/licenser v0.0.0-20181128170751-82cc989582b9/go.mod h1:oU9F9UCE+AzI/MueCKZamsezGOOHfSirltllOVeRTAE= +github.com/gobuffalo/licenser v0.0.0-20181203160806-fe900bbede07/go.mod h1:ph6VDNvOzt1CdfaWC+9XwcBnlSTBz2j49PBwum6RFaU= +github.com/gobuffalo/licenser v0.0.0-20181211173111-f8a311c51159/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= +github.com/gobuffalo/licenser v0.0.0-20190224205124-37799bc2ebf6/go.mod h1:ve/Ue99DRuvnTaLq2zKa6F4KtHiYf7W046tDjuGYPfM= +github.com/gobuffalo/licenser v0.0.0-20190329153211-c35c0a2813b2/go.mod h1:ZVWE6uKUE3rGf7sedUHWVjNWrEgxaUQLVFL+pQiWpfY= +github.com/gobuffalo/licenser v1.1.0 h1:xAHoWgZ8vbRkxC8a+SBVL7Y7atJBRZfXM9H9MmPXx1k= +github.com/gobuffalo/licenser v1.1.0/go.mod h1:ZVWE6uKUE3rGf7sedUHWVjNWrEgxaUQLVFL+pQiWpfY= +github.com/gobuffalo/logger v0.0.0-20181022175615-46cfb361fc27/go.mod h1:8sQkgyhWipz1mIctHF4jTxmJh1Vxhp7mP8IqbljgJZo= +github.com/gobuffalo/logger v0.0.0-20181027144941-73d08d2bb969/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181027193913-9cf4dd0efe46/go.mod h1:7uGg2duHKpWnN4+YmyKBdLXfhopkAdVM6H3nKbyFbz8= +github.com/gobuffalo/logger v0.0.0-20181109185836-3feeab578c17/go.mod h1:oNErH0xLe+utO+OW8ptXMSA5DkiSEDW1u3zGIt8F9Ew= +github.com/gobuffalo/logger v0.0.0-20181117211126-8e9b89b7c264/go.mod h1:5etB91IE0uBlw9k756fVKZJdS+7M7ejVhmpXXiSFj0I= +github.com/gobuffalo/logger v0.0.0-20181127160119-5b956e21995c/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= +github.com/gobuffalo/logger v0.0.0-20190224201004-be78ebfea0fa/go.mod h1:+HxKANrR9VGw9yN3aOAppJKvhO05ctDi63w4mDnKv2U= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2 h1:8thhT+kUJMTMy3HlX4+y9Da+BNJck+p109tqqKp7WDs= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/makr v1.1.5 h1:lOlpv2iz0dNa4qse0ZYQgbtT+ybwVxWEAcOZbcPmeYc= +github.com/gobuffalo/makr v1.1.5/go.mod h1:Y+o0btAH1kYAMDJW/TX3+oAXEu0bmSLLoC9mIFxtzOw= +github.com/gobuffalo/mapi v1.0.0/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.1.0 h1:VEhxtd2aoPXFqVmliLXGSmqPh541OprxYYZFwgNcjn4= +github.com/gobuffalo/mapi v1.1.0/go.mod h1:pqQ1XAqvpy/JYtRwoieNps2yU8MFiMxBUpAm2FBtQ50= +github.com/gobuffalo/meta v0.0.0-20181018155829-df62557efcd3/go.mod h1:XTTOhwMNryif3x9LkTTBO/Llrveezd71u3quLd0u7CM= +github.com/gobuffalo/meta v0.0.0-20181018192820-8c6cef77dab3/go.mod h1:E94EPzx9NERGCY69UWlcj6Hipf2uK/vnfrF4QD0plVE= +github.com/gobuffalo/meta v0.0.0-20181025145500-3a985a084b0a/go.mod h1:YDAKBud2FP7NZdruCSlmTmDOZbVSa6bpK7LJ/A/nlKg= +github.com/gobuffalo/meta v0.0.0-20181109154556-f76929ccd5fa/go.mod h1:1rYI5QsanV6cLpT1BlTAkrFi9rtCZrGkvSK8PglwfS8= +github.com/gobuffalo/meta v0.0.0-20181114191255-b130ebedd2f7/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= +github.com/gobuffalo/meta v0.0.0-20181116202903-8850e47774f5/go.mod h1:K6cRZ29ozr4Btvsqkjvg5nDFTLOgTqf03KA70Ks0ypE= +github.com/gobuffalo/meta v0.0.0-20181127070345-0d7e59dd540b/go.mod h1:RLO7tMvE0IAKAM8wny1aN12pvEKn7EtkBLkUZR00Qf8= +github.com/gobuffalo/meta v0.0.0-20190120163247-50bbb1fa260d/go.mod h1:KKsH44nIK2gA8p0PJmRT9GvWJUdphkDUA8AJEvFWiqM= +github.com/gobuffalo/meta v0.0.0-20190121163014-ecaa953cbfb3/go.mod h1:KLfkGnS+Tucc+iTkUcAUBtxpwOJGfhw2pHRLddPxMQY= +github.com/gobuffalo/meta v0.0.0-20190126124307-c8fb6f4eb5a9/go.mod h1:zoh6GLgkk9+iI/62dST4amAuVAczZrBXoAk/t64n7Ew= +github.com/gobuffalo/meta v0.0.0-20190207205153-50a99e08b8cf/go.mod h1:+VGfK9Jm9I7oJyFeJzIT6omCPvrDktzAtpHJKaieugY= +github.com/gobuffalo/meta v0.0.0-20190320152240-a5320142224a/go.mod h1:+VGfK9Jm9I7oJyFeJzIT6omCPvrDktzAtpHJKaieugY= +github.com/gobuffalo/meta v0.0.0-20190329152330-e161e8a93e3b h1:clEiBk0NcX6wX7jKQ4MZzw/ihe3NWupFF3eDj8BO++c= +github.com/gobuffalo/meta v0.0.0-20190329152330-e161e8a93e3b/go.mod h1:mCRSy5F47tjK8yaIDcJad4oe9fXxY5gLrx3Xx2spK+0= +github.com/gobuffalo/mw-basicauth v1.0.3/go.mod h1:dg7+ilMZOKnQFHDefUzUHufNyTswVUviCBgF244C1+0= +github.com/gobuffalo/mw-basicauth v1.0.6/go.mod h1:RFyeGeDLZlVgp/eBflqu2eavFqyv0j0fVVP87WPYFwY= +github.com/gobuffalo/mw-basicauth v1.0.7/go.mod h1:xJ9/OSiOWl+kZkjaSun62srODr3Cx8OB4AKr+G4FlS4= +github.com/gobuffalo/mw-contenttype v0.0.0-20180802152300-74f5a47f4d56/go.mod h1:7EvcmzBbeCvFtQm5GqF9ys6QnCxz2UM1x0moiWLq1No= +github.com/gobuffalo/mw-contenttype v0.0.0-20190129203934-2554e742333b/go.mod h1:7x87+mDrr9Peh7AqhOtESyJLanMd2zQNz2Hts+vtBoE= +github.com/gobuffalo/mw-csrf v0.0.0-20180802151833-446ff26e108b/go.mod h1:sbGtb8DmDZuDUQoxjr8hG1ZbLtZboD9xsn6p77ppcHo= +github.com/gobuffalo/mw-csrf v0.0.0-20190129204204-25460a055517/go.mod h1:o5u+nnN0Oa7LBeDYH9QP36qeMPnXV9qbVnbZ4D+Kb0Q= +github.com/gobuffalo/mw-forcessl v0.0.0-20180802152810-73921ae7a130/go.mod h1:JvNHRj7bYNAMUr/5XMkZaDcw3jZhUZpsmzhd//FFWmQ= +github.com/gobuffalo/mw-i18n v0.0.0-20180802152014-e3060b7e13d6/go.mod h1:91AQfukc52A6hdfIfkxzyr+kpVYDodgAeT5cjX1UIj4= +github.com/gobuffalo/mw-i18n v0.0.0-20181027200759-09e0c99be4d3/go.mod h1:1PpGPgqP8VsfUppgBA9FrTOXjI6X9gjqhh/8dmg48lg= +github.com/gobuffalo/mw-i18n v0.0.0-20190129204410-552713a3ebb4/go.mod h1:rBg2eHxsyxVjtYra6fGy4GSF5C8NysOvz+Znnzk42EM= +github.com/gobuffalo/mw-paramlogger v0.0.0-20181005191442-d6ee392ec72e/go.mod h1:6OJr6VwSzgJMqWMj7TYmRUqzNe2LXu/W1rRW4MAz/ME= +github.com/gobuffalo/mw-paramlogger v0.0.0-20190129202837-395da1998525/go.mod h1:gEo/ABCsKqvpp/KCxN2AIzDEe0OJUXbJ9293FYrXw+w= +github.com/gobuffalo/mw-tokenauth v0.0.0-20181001105134-8545f626c189/go.mod h1:UqBF00IfKvd39ni5+yI5MLMjAf4gX7cDKN/26zDOD6c= +github.com/gobuffalo/mw-tokenauth v0.0.0-20190129201951-95847f29c5c8/go.mod h1:n2oa93LHGD94hGI+PoJO+6cf60DNrXrAIv9L/Ke3GXc= +github.com/gobuffalo/nulls v0.0.0-20190305142546-85f3c9250d87 h1:5crg1Kr62BIj/RVGneCwJtOD7/fHegH71kJD0D0ugp0= +github.com/gobuffalo/nulls v0.0.0-20190305142546-85f3c9250d87/go.mod h1:KhaLCW+kFA/G97tZkmVkIxhRw3gvZszJn7JjPLI3gtI= +github.com/gobuffalo/packd v0.0.0-20181027182251-01ad393492c8/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027190505-aafc0d02c411/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181027194105-7ae579e6d213/go.mod h1:SmdBdhj6uhOsg1Ui4SFAyrhuc7U4VCildosO5IDJ3lc= +github.com/gobuffalo/packd v0.0.0-20181028162033-6d52e0eabf41/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181029140631-cf76bd87a5a6/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181031195726-c82734870264/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181103221656-16c4ed88b296/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181104210303-d376b15f8e96/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181111195323-b2e760a5f0ff/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181114190715-f25c5d2471d7/go.mod h1:Yf2toFaISlyQrr5TfO3h6DB9pl9mZRmyvBGQb/aQ/pI= +github.com/gobuffalo/packd v0.0.0-20181124090624-311c6248e5fb/go.mod h1:Foenia9ZvITEvG05ab6XpiD5EfBHPL8A6hush8SJ0o8= +github.com/gobuffalo/packd v0.0.0-20181207120301-c49825f8f6f4/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packd v0.0.0-20181212173646-eca3b8fd6687/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packd v0.0.0-20190224160250-d04dd98aca5b/go.mod h1:LYc0TGKFBBFTRC9dg2pcRcMqGCTMD7T2BIMP7OBuQAA= +github.com/gobuffalo/packd v0.0.0-20190315122247-83d601d65093/go.mod h1:LpEu7OkoplvlhztyAEePkS6JwcGgANdgGL5pB4Knxaw= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.2.0 h1:9Mu1TYbSfH75zQIQtZEgHEaYeRR4485U4T9N1OQYrXk= +github.com/gobuffalo/packd v0.2.0/go.mod h1:k2CkHP3bjbqL2GwxwhxUy1DgnlbW644hkLC9iIUvZwY= +github.com/gobuffalo/packr v1.13.7/go.mod h1:KkinLIn/n6+3tVXMwg6KkNvWwVsrRAz4ph+jgpk3Z24= +github.com/gobuffalo/packr v1.15.0/go.mod h1:t5gXzEhIviQwVlNx/+3SfS07GS+cZ2hn76WLzPp6MGI= +github.com/gobuffalo/packr v1.15.1/go.mod h1:IeqicJ7jm8182yrVmNbM6PR4g79SjN9tZLH8KduZZwE= +github.com/gobuffalo/packr v1.16.0/go.mod h1:Yx/lcR/7mDLXhuJSzsz2MauD/HUwSc+EK6oigMRGGsM= +github.com/gobuffalo/packr v1.19.0/go.mod h1:MstrNkfCQhd5o+Ct4IJ0skWlxN8emOq8DsoT1G98VIU= +github.com/gobuffalo/packr v1.20.0/go.mod h1:JDytk1t2gP+my1ig7iI4NcVaXr886+N0ecUga6884zw= +github.com/gobuffalo/packr v1.21.0/go.mod h1:H00jGfj1qFKxscFJSw8wcL4hpQtPe1PfU2wa6sg/SR0= +github.com/gobuffalo/packr v1.21.5/go.mod h1:zCvDxrZzFmq5Xd7Jw4vaGe/OYwzuXnma31D2EbTHMWk= +github.com/gobuffalo/packr v1.21.7/go.mod h1:73tmYjwi4Cvb1eNiAwpmrzZ0gxVA4KBqVSZ2FNeJodM= +github.com/gobuffalo/packr v1.21.9/go.mod h1:GC76q6nMzRtR+AEN/VV4w0z2/4q7SOaEmXh3Ooa8sOE= +github.com/gobuffalo/packr v1.22.0/go.mod h1:Qr3Wtxr3+HuQEwWqlLnNW4t1oTvK+7Gc/Rnoi/lDFvA= +github.com/gobuffalo/packr v1.24.0/go.mod h1:p9Sgang00I1hlr1ub+tgI9AQdFd4f+WH1h62jYpzetM= +github.com/gobuffalo/packr v1.24.1/go.mod h1:absPnW/XUUa4DmIh5ga7AipGXXg0DOcd5YWKk5RZs8Y= +github.com/gobuffalo/packr v1.25.0 h1:NtPK45yOKFdTKHTvRGKL+UIKAKmJVWIVJOZBDI/qEdY= +github.com/gobuffalo/packr v1.25.0/go.mod h1:NqsGg8CSB2ZD+6RBIRs18G7aZqdYDlYNNvsSqP6T4/U= +github.com/gobuffalo/packr/v2 v2.0.0-rc.5/go.mod h1:e6gmOfhf3KmT4zl2X/NDRSfBXk2oV4TXZ+NNOM0xwt8= +github.com/gobuffalo/packr/v2 v2.0.0-rc.7/go.mod h1:BzhceHWfF3DMAkbPUONHYWs63uacCZxygFY1b4H9N2A= +github.com/gobuffalo/packr/v2 v2.0.0-rc.8/go.mod h1:y60QCdzwuMwO2R49fdQhsjCPv7tLQFR0ayzxxla9zes= +github.com/gobuffalo/packr/v2 v2.0.0-rc.9/go.mod h1:fQqADRfZpEsgkc7c/K7aMew3n4aF1Kji7+lIZeR98Fc= +github.com/gobuffalo/packr/v2 v2.0.0-rc.10/go.mod h1:4CWWn4I5T3v4c1OsJ55HbHlUEKNWMITG5iIkdr4Px4w= +github.com/gobuffalo/packr/v2 v2.0.0-rc.11/go.mod h1:JoieH/3h3U4UmatmV93QmqyPUdf4wVM9HELaHEu+3fk= +github.com/gobuffalo/packr/v2 v2.0.0-rc.12/go.mod h1:FV1zZTsVFi1DSCboO36Xgs4pzCZBjB/tDV9Cz/lSaR8= +github.com/gobuffalo/packr/v2 v2.0.0-rc.13/go.mod h1:2Mp7GhBFMdJlOK8vGfl7SYtfMP3+5roE39ejlfjw0rA= +github.com/gobuffalo/packr/v2 v2.0.0-rc.14/go.mod h1:06otbrNvDKO1eNQ3b8hst+1010UooI2MFg+B2Ze4MV8= +github.com/gobuffalo/packr/v2 v2.0.0-rc.15/go.mod h1:IMe7H2nJvcKXSF90y4X1rjYIRlNMJYCxEhssBXNZwWs= +github.com/gobuffalo/packr/v2 v2.0.0/go.mod h1:7McfLpSxaPUoSQm7gYpTZRQSK63mX8EKzzYSEFKvfkM= +github.com/gobuffalo/packr/v2 v2.0.1/go.mod h1:tp5/5A2e67F1lUGTiNadtA2ToP045+mvkWzaqMCsZr4= +github.com/gobuffalo/packr/v2 v2.0.2/go.mod h1:6Y+2NY9cHDlrz96xkJG8bfPwLlCdJVS/irhNJmwD7kM= +github.com/gobuffalo/packr/v2 v2.0.6/go.mod h1:/TYKOjadT7P9jRWZtj4BRTgeXy2tIYntifGkD+aM2KY= +github.com/gobuffalo/packr/v2 v2.0.7/go.mod h1:1SBFAIr3YnxYdJRyrceR7zhOrhV/YhHzOjDwA9LLZ5Y= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.0.10/go.mod h1:n90ZuXIc2KN2vFAOQascnPItp9A2g9QYSvYvS3AjQEM= +github.com/gobuffalo/packr/v2 v2.1.0/go.mod h1:n90ZuXIc2KN2vFAOQascnPItp9A2g9QYSvYvS3AjQEM= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/packr/v2 v2.3.2/go.mod h1:93elRVdDhpUgox9GnXswWK5dzpVBQsnlQjnnncSLoiU= +github.com/gobuffalo/packr/v2 v2.4.0 h1:FB8CdysPlcECofv3tpH21T4aJDWmv7NFq8m0X6JZMGc= +github.com/gobuffalo/packr/v2 v2.4.0/go.mod h1:ra341gygw9/61nSjAbfwcwh8IrYL4WmR4IsPkPBhQiY= +github.com/gobuffalo/plush v3.7.16+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.20+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.21+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.22+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.23+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.30+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.31+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.32+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.33+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.7.34+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.8.0+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plush v3.8.2+incompatible h1:EXtDf5L7TTwX8tEddtdHT+PT2lFerIKQm8Ye/M7O+54= +github.com/gobuffalo/plush v3.8.2+incompatible/go.mod h1:rQ4zdtUUyZNqULlc6bqd5scsPfLKfT0+TGMChgduDvI= +github.com/gobuffalo/plushgen v0.0.0-20181128164830-d29dcb966cb2/go.mod h1:r9QwptTFnuvSaSRjpSp4S2/4e2D3tJhARYbvEBcKSb4= +github.com/gobuffalo/plushgen v0.0.0-20181203163832-9fc4964505c2/go.mod h1:opEdT33AA2HdrIwK1aibqnTJDVVKXC02Bar/GT1YRVs= +github.com/gobuffalo/plushgen v0.0.0-20181207152837-eedb135bd51b/go.mod h1:Lcw7HQbEVm09sAQrCLzIxuhFbB3nAgp4c55E+UlynR0= +github.com/gobuffalo/plushgen v0.0.0-20190104222512-177cd2b872b3/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= +github.com/gobuffalo/plushgen v0.0.0-20190224160205-347ea233336e/go.mod h1:tYxCozi8X62bpZyKXYHw1ncx2ZtT2nFvG42kuLwYjoc= +github.com/gobuffalo/plushgen v0.0.0-20190329152458-0555238fe0d9/go.mod h1:3U71v6HWZpVER1nInTXeAwdoRNsRd4W8aeIa1Lyp+Bk= +github.com/gobuffalo/plushgen v0.1.0/go.mod h1:NK33QLkRK/xKexiPFSxlWRT286F4yStZUa/Fbx0guvo= +github.com/gobuffalo/plushgen v0.1.2 h1:s4yAgNdfNMyMQ7o+Is4f1VlH2L1tKosT+m7BF28C8H4= +github.com/gobuffalo/plushgen v0.1.2/go.mod h1:3U71v6HWZpVER1nInTXeAwdoRNsRd4W8aeIa1Lyp+Bk= +github.com/gobuffalo/pop v4.8.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.4+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.5+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.7+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.8.8+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.9.0+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.9.1+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.9.2+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.9.3+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.9.5+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.9.6+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.9.8+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.9.9+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.10.0+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.11.0+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/pop v4.11.1+incompatible h1:WPaOpbIZZpOz0hcIF1trnYgC3FivQSTWSRXf/fTUjYc= +github.com/gobuffalo/pop v4.11.1+incompatible/go.mod h1:DwBz3SD5SsHpTZiTubcsFWcVDpJWGsxjVjMPnkiThWg= +github.com/gobuffalo/release v1.0.35/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.38/go.mod h1:VtHFAKs61vO3wboCec5xr9JPTjYyWYcvaM3lclkc4x4= +github.com/gobuffalo/release v1.0.42/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.51/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.52/go.mod h1:RPs7EtafH4oylgetOJpGP0yCZZUiO4vqHfTHJjSdpug= +github.com/gobuffalo/release v1.0.53/go.mod h1:FdF257nd8rqhNaqtDWFGhxdJ/Ig4J7VcS3KL7n/a+aA= +github.com/gobuffalo/release v1.0.54/go.mod h1:Pe5/RxRa/BE8whDpGfRqSI7D1a0evGK1T4JDm339tJc= +github.com/gobuffalo/release v1.0.61/go.mod h1:mfIO38ujUNVDlBziIYqXquYfBF+8FDHUjKZgYC1Hj24= +github.com/gobuffalo/release v1.0.63/go.mod h1:/7hQAikt0l8Iu/tAX7slC1qiOhD6Nb+3KMmn/htiUfc= +github.com/gobuffalo/release v1.0.72/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= +github.com/gobuffalo/release v1.0.74/go.mod h1:NP5NXgg/IX3M5XmHmWR99D687/3Dt9qZtTK/Lbwc1hU= +github.com/gobuffalo/release v1.1.1/go.mod h1:Sluak1Xd6kcp6snkluR1jeXAogdJZpFFRzTYRs/2uwg= +github.com/gobuffalo/release v1.1.3/go.mod h1:CuXc5/m+4zuq8idoDt1l4va0AXAn/OSs08uHOfMVr8E= +github.com/gobuffalo/release v1.1.6/go.mod h1:18naWa3kBsqO0cItXZNJuefCKOENpbbUIqRL1g+p6z0= +github.com/gobuffalo/release v1.2.2/go.mod h1:tkFFZua2N5WRxyGDk2cSwQjzkZ/apKKXl5T8P+kEO+E= +github.com/gobuffalo/release v1.2.5/go.mod h1:tkFFZua2N5WRxyGDk2cSwQjzkZ/apKKXl5T8P+kEO+E= +github.com/gobuffalo/release v1.4.0/go.mod h1:f4uUPnD9dxrWxVy9yy0k/mvDf3EzhFtf7/byl0tTdY4= +github.com/gobuffalo/release v1.7.0 h1:5+6HdlnRQ2anNSOm3GyMvmiCjSIXg3dcJhNA7Eh99dQ= +github.com/gobuffalo/release v1.7.0/go.mod h1:xH2NjAueVSY89XgC4qx24ojEQ4zQ9XCGVs5eXwJTkEs= +github.com/gobuffalo/shoulders v1.0.1/go.mod h1:V33CcVmaQ4gRUmHKwq1fiTXuf8Gp/qjQBUL5tHPmvbA= +github.com/gobuffalo/shoulders v1.0.3/go.mod h1:LqMcHhKRuBPMAYElqOe3POHiZ1x7Ry0BE8ZZ84Bx+k4= +github.com/gobuffalo/shoulders v1.0.4/go.mod h1:LqMcHhKRuBPMAYElqOe3POHiZ1x7Ry0BE8ZZ84Bx+k4= +github.com/gobuffalo/shoulders v1.1.0 h1:nzhFIkpYmWPYF3sxtw3KQ5z7l1QIM3tLApyk+TL2HH0= +github.com/gobuffalo/shoulders v1.1.0/go.mod h1:kcIJs3p7VqoBJ36Mzs+x767NyzTx0pxBvzZdWTWZYF8= +github.com/gobuffalo/syncx v0.0.0-20181120191700-98333ab04150/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/syncx v0.0.0-20181120194010-558ac7de985f/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754 h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/tags v2.0.11+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.14+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.15+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.0.16+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/tags v2.1.0+incompatible h1:qQjj3n2RtHxfooqXQ4/A9SsEfZ7/7guv8cp/GdAPa+Y= +github.com/gobuffalo/tags v2.1.0+incompatible/go.mod h1:9XmhOkyaB7UzvuY4UoZO4s67q8/xRMVJEaakauVQYeY= +github.com/gobuffalo/uuid v2.0.3+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.4+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/uuid v2.0.5+incompatible h1:c5uWRuEnYggYCrT9AJm0U2v1QTG7OVDAvxhj8tIV5Gc= +github.com/gobuffalo/uuid v2.0.5+incompatible/go.mod h1:ErhIzkRhm0FtRuiE/PeORqcw4cVi1RtSpnwYrxuvkfE= +github.com/gobuffalo/validate v2.0.3+incompatible h1:6f4JCEz11Zi6iIlexMv7Jz10RBPvgI795AOaubtCwTE= +github.com/gobuffalo/validate v2.0.3+incompatible/go.mod h1:N+EtDe0J8252BgfzQUChBgfd6L93m9weay53EWFVsMM= +github.com/gobuffalo/x v0.0.0-20181003152136-452098b06085/go.mod h1:WevpGD+5YOreDJznWevcn8NTmQEW5STSBgIkpkjzqXc= +github.com/gobuffalo/x v0.0.0-20181007152206-913e47c59ca7/go.mod h1:9rDPXaB3kXdKWzMc4odGQQdG2e2DIEmANy5aSJ9yesY= +github.com/gobuffalo/x v0.0.0-20181025165825-f204f550da9d/go.mod h1:Qh2Pb/Ak1Ko2mzHlGPigrnxkhO4WTTCI1jJM58sbgtE= +github.com/gobuffalo/x v0.0.0-20181025192250-1ef645d63fe8/go.mod h1:AIlnMGlYXOCsoCntLPFLYtrJNS/pc2HD4IdSXH62TpU= +github.com/gobuffalo/x v0.0.0-20181109195216-5b3131238124/go.mod h1:GpdLUY6/Ztf/3FfxfwsLkDqAGZ0brhlh7LzIibHyZp0= +github.com/gobuffalo/x v0.0.0-20181110221217-14085ca3e1a9/go.mod h1:ig5vdn4+5IPtxgESlZWo1SSDyHKKef8EjVVKhY9kkIQ= +github.com/gobuffalo/x v0.0.0-20190224155809-6bb134105960 h1:DoUD23uwnzKJ3t5HH2SeTIszWmc13AV9TAdMhtXQts8= +github.com/gobuffalo/x v0.0.0-20190224155809-6bb134105960/go.mod h1:ig5vdn4+5IPtxgESlZWo1SSDyHKKef8EjVVKhY9kkIQ= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4= +github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/gofrs/uuid v3.1.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/goji/param v0.0.0-20160927210335-d7f49fd7d1ed h1:4ZXAJ/IvcryiUPDddal4P7mu6V0+PoBe+2tKG6TNQtc= +github.com/goji/param v0.0.0-20160927210335-d7f49fd7d1ed/go.mod h1:GZJblUu7ACjguvQUK2un6nQBlnZk7H1MzXZdfrFUd8Q= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 h1:8jtTdc+Nfj9AR+0soOeia9UZSvYBvETVHZrugUowJ7M= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 h1:7qnwS9+oeSiOIsiUMajT+0R7HR6hw5NegnKPmn/94oI= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.10.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= +github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= +github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/goreleaser/goreleaser v0.94.0 h1:2CFMxMTLODjYfNOx2sADNzpgCwH9ltMqvQYtj+ntK1Q= +github.com/goreleaser/goreleaser v0.94.0/go.mod h1:OjbYR2NhOI6AEUWCowMSBzo9nP1aRif3sYtx+rhp+Zo= +github.com/goreleaser/nfpm v0.9.7 h1:h8RQMDztu6cW7b0/s4PGbdeMYykAbJG0UMXaWG5uBMI= +github.com/goreleaser/nfpm v0.9.7/go.mod h1:F2yzin6cBAL9gb+mSiReuXdsfTrOQwDMsuSpULof+y4= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= +github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.2/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gorilla/sessions v1.1.3 h1:uXoZdcdA5XdXF3QzuSlheVRUvjl+1rKY7zBXL68L9RU= +github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.2 h1:S+ef0492XaIknb8LMjcwgW2i3cNTzDYMmDrOThOJNWc= +github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.5.4 h1:1BZvpawXoJCWX6pNtow9+rpEj+3itIlutiqnntI6jOE= +github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20171017181929-23c074d0eceb h1:1OvvPvZkn/yCQ3xBcM8y4020wdkMXPHLB4+NfoGWh4U= +github.com/hashicorp/hcl v0.0.0-20171017181929-23c074d0eceb/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0 h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hudl/fargo v1.2.0 h1:kW2/M2LOhG5kTQZ/KcWYDcYoJu5l7uCc6m/EZ5ZErlk= +github.com/hudl/fargo v1.2.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.0.0-20180119215619-163f41321a19 h1:geJOJJZwkYI1yqxWrAMcgrwDvy4P1XyNNgIyN9d6UXc= +github.com/imdario/mergo v0.0.0-20180119215619-163f41321a19/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec h1:CGkYB1Q7DSsH/ku+to+foV4agt2F2miquaLUgF6L178= +github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.34.1 h1:DuLi8XIPlEO5t1YVzW+OgmAYt2qL0yNC+qIlUkeqmsk= +github.com/influxdata/flux v0.34.1/go.mod h1:GvaTIeU904jG5Q7kwsfPFyXD61I7eSSGO30p+y0XOmk= +github.com/influxdata/influxdb v1.7.6 h1:8mQ7A/V+3noMGCt/P9pD09ISaiz9XvgCk303UYA3gcs= +github.com/influxdata/influxdb v1.7.6/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxql v1.0.0 h1:FJ/YSDaXkzpMsyOVIATOq9oz5ijfS1RWxklQsgC7V8E= +github.com/influxdata/influxql v1.0.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e h1:/o3vQtpWJhvnIbXley4/jwzzqNeigJK9z+LZcJZ9zfM= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/roaring v0.4.12 h1:3DzTjKHcXFs4P3D7xRLpCqVrfK6eFRQT0c8BG99M3Ms= +github.com/influxdata/roaring v0.4.12/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9 h1:MHTrDWmQpHq/hkq+7cw9oYAt2PqUw52TZazRA0N7PGE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368 h1:+TUUmaFa4YD1Q+7bH9o5NCHQGPMqZCYJiNW6lIIS9z4= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= +github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= +github.com/jackc/pgx v3.1.0+incompatible h1:G6xyq9OLi10XNimlx3LFe3e+zkYhbYND9nitiMrJx48= +github.com/jackc/pgx v3.1.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= +github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1 h1:ujPKutqRlJtcfWk6toYVYagwra7HQHbXOaS171b4Tg8= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v0.0.0-20180614180643-0dae4fefe7c0/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= +github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/joho/godotenv v1.2.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joyent/triton-go v1.6.1 h1:un+z30dpvRvlM1bd9h/Mxw5t9vFfhYMuK625X6ia9KM= +github.com/joyent/triton-go v1.6.1/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jsternberg/zap-logfmt v1.2.0 h1:1v+PK4/B48cy8cfQbxL4FmmNZrjnIMr2BsnyEmXqv2o= +github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0= +github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.5.1 h1:Yx4Jgj/jxMSsCDayGLhcZoj4Pb1hCNv02h1kLYKnO+A= +github.com/jung-kurt/gofpdf v1.5.1/go.mod h1:oIiEpiXAwTUssrFUGgVj4SO17oiCYsfnTjeQZz/amnM= +github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da h1:5y58+OCjoHCYB8182mpf/dEsq0vwTKPOo4zGfH0xW9A= +github.com/justinas/alice v0.0.0-20171023064455-03f45bd4b7da/go.mod h1:oLH0CmIaxCGXD67VKGR5AacGXZSMznlmeqM8RzPrcY8= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef h1:2jNeR4YUziVtswNP9sEFAI913cVrzH85T+8Q6LpYbT0= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= +github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.7/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3 h1:lOpSw2vJP0y5eLBW906QwKsUK/fe/QDyoqM5rnnuPDY= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e h1:RgQk53JHp/Cjunrr1WlsXSZpqXn+uREuHvUVcK82CV8= +github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.1.0 h1:ZqfnKyx9KGpRcW04j5nnPDgRgoXUeLh2YFBeFzphcA0= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.7.1 h1:VRD0WLa8rweLB7alA5WMSVkoAtrI8xou5RrNd4JUlR0= +github.com/klauspost/compress v1.7.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= +github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3 h1:/Um6a/ZmD5tF7peoOJ5oN5KMQ0DrGVQSXLNwyckutPk= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743 h1:143Bb8f8DuGWck/xpNUOckBVYfFbBTnLevfRZ1aVVqo= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.16.0 h1:O9XRJ7BlgPlkv6XDT6vTgFNMSZ78AZ9QdktePgGNoic= +github.com/lightstep/lightstep-tracer-go v0.16.0/go.mod h1:6AMpwZpsyCFwSovxzM78e+AsYxE8sGwiM6C3TytaWeI= +github.com/magiconair/properties v1.7.4 h1:UVo0TkHGd4lQSN1dVDzs9URCIgReuSIcCXpAVB9nZ80= +github.com/magiconair/properties v1.7.4/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/markbates/deplist v1.0.4/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/deplist v1.0.5/go.mod h1:gRRbPbbuA8TmMiRvaOzUlRfzfjeCCBqX2A6arxN01MM= +github.com/markbates/deplist v1.1.3 h1:/OcV27jxF6aLU+rVGF1RQdT2n+qMlKXeQF6yA0cBQ4k= +github.com/markbates/deplist v1.1.3/go.mod h1:BF7ioVzAJYEtzQN/os4rt8H8Ti3h0T7EoN+7eyALktE= +github.com/markbates/deplist v1.2.0 h1:4O1+3kAk6xat7T5tVahmKrrtTgzArSNeKpZVGBm5Hl4= +github.com/markbates/deplist v1.2.0/go.mod h1:dtsWLZ5bWoazbM0rCxZncQaAPifWbvHgBJk8UNI1Yfk= +github.com/markbates/going v1.0.2/go.mod h1:UWCk3zm0UKefHZ7l8BNqi26UyiEMniznk8naLdTcy6c= +github.com/markbates/going v1.0.3 h1:mY45T5TvW+Xz5A6jY7lf4+NLg9D8+iuStIHyR7M8qsE= +github.com/markbates/going v1.0.3/go.mod h1:fQiT6v6yQar9UD6bd/D4Z5Afbk9J6BBVBtLiyY4gp2o= +github.com/markbates/grift v1.0.4/go.mod h1:wbmtW74veyx+cgfwFhlnnMWqhoz55rnHR47oMXzsyVs= +github.com/markbates/grift v1.0.5/go.mod h1:EHmVIjOQoj/OOBDzlZ8RW0ZkvOtQ4xRHjrPvmfoiFaU= +github.com/markbates/grift v1.0.6 h1:ZTw+Y3A0HjfiYc7xR3jdu8TvCbwNiTJ5eXXb1LgK+0M= +github.com/markbates/grift v1.0.6/go.mod h1:2AUYA/+pODhwonRbYwsltPVPIztBzw5nIJEGiWgKMPM= +github.com/markbates/hmax v1.0.0/go.mod h1:cOkR9dktiESxIMu+65oc/r/bdY4bE8zZw3OLhLx0X2c= +github.com/markbates/hmax v1.1.0 h1:MswE0ks4Iv1UAQNlvAyFpsyFQSBHolckas95gRUkka4= +github.com/markbates/hmax v1.1.0/go.mod h1:hhn8pJiRwNTEmNlxhfiTbL+CtEYiAX3wuhSf/kg/6wI= +github.com/markbates/inflect v1.0.0/go.mod h1:oTeZL2KHA7CUX6X+fovmK9OvIOFuqu0TwdQrZjLTh88= +github.com/markbates/inflect v1.0.1/go.mod h1:uv3UVNBe5qBIfCm8O8Q+DW+S1EopeyINj+Ikhc7rnCk= +github.com/markbates/inflect v1.0.3/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= +github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= +github.com/markbates/oncer v0.0.0-20180924031910-e862a676800b/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20180924034138-723ad0170a46/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181014194634-05fccaae8fc4/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2 h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/refresh v1.4.10/go.mod h1:NDPHvotuZmTmesXxr95C9bjlw1/0frJwtME2dzcVKhc= +github.com/markbates/refresh v1.4.11/go.mod h1:awpJuyo4zgexB/JaHfmBX0sRdvOjo2dXwIayWIz9i3g= +github.com/markbates/refresh v1.5.0/go.mod h1:ZYMLkxV+x7wXQ2Xd7bXAPyF0EXiEWAMfiy/4URYb1+M= +github.com/markbates/refresh v1.6.0/go.mod h1:p8jWGABFUaFf/cSw0pxbo0MQVujiz5NTQ0bmCHLC4ac= +github.com/markbates/refresh v1.7.1 h1:uC6yQi8Vhk2VQ/0ySIR3tsQIIVSBcTnIRCr6LECJWQA= +github.com/markbates/refresh v1.7.1/go.mod h1:hcGVJc3m5EeskliwSVJxcTHzUtMz2h8gBtCS0V94CgE= +github.com/markbates/safe v1.0.0/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/markbates/sigtx v1.0.0 h1:y/xtkBvNPRjD4KeEplf4w9rJVSc23/xl+jXYGowTwy0= +github.com/markbates/sigtx v1.0.0/go.mod h1:QF1Hv6Ic6Ca6W+T+DL0Y/ypborFKyvUY9HmuCD4VeTc= +github.com/markbates/willie v1.0.9/go.mod h1:fsrFVWl91+gXpx/6dv715j7i11fYPfZ9ZGfH0DQzY7w= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.3 h1:a+kO+98RDGEfo6asOGMmpodZq4FNtnGP54yps8BzLR4= +github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104 h1:d8RFOZ2IiFtFWBcKEHAFYJcPTf0wY5q0exFNJZVWa1U= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= +github.com/mattn/go-zglob v0.0.0-20171230104132-4959821b4817/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/go-zglob v0.0.0-20180803001819-2ea3427bfa53 h1:tGfIHhDghvEnneeRhODvGYOt305TPwingKt6p90F4MU= +github.com/mattn/go-zglob v0.0.0-20180803001819-2ea3427bfa53/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= +github.com/microcosm-cc/bluemonday v1.0.2 h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s= +github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= +github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047 h1:zCoDWFD5nrJJVjbXiDZcVhOBSzKn3o9LgRLLMRNuru8= +github.com/mitchellh/mapstructure v0.0.0-20180203102830-a4e142e9c047/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.0.0/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/monoculum/formam v0.0.0-20180901015400-4e68be1d79ba/go.mod h1:RKgILGEJq24YyJ2ban8EO0RUVSJlF1pGsEvoLEACr/Q= +github.com/monoculum/formam v0.0.0-20190307031628-bc555adff0cd h1:IXT4e1P5FWjF2zZa45blCFdkaNy2OXKTEUSjUK96/DA= +github.com/monoculum/formam v0.0.0-20190307031628-bc555adff0cd/go.mod h1:JKa2av1XVkGjhxdLS59nDoXa2JpmIHpnURWNbzCtXtc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/go-nats v1.7.2 h1:cJujlwCYR8iMz5ofZSD/p2WLW8FabhkQ2lIEVbSvNSA= +github.com/nats-io/go-nats v1.7.2/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= +github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M= +github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= +github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= +github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= +github.com/oklog/oklog v0.3.2 h1:wVfs8F+in6nTBMkA7CbRw+zZMIB7nNM825cM1wuzoTk= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4 h1:Mm4XQCBICntJzH8fKglsRuEiFUJYnTnM4BBFvpP5BWs= +github.com/olekukonko/tablewriter v0.0.0-20180130162743-b8a9be070da4/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 h1:lM6RxxfUMrYL/f8bWEUqdXrANWtrL7Nndbm9iFN0DlU= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5 h1:82Tnq9OJpn+h5xgGpss5/mOv3KXdjtkdorFSOUusjM8= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.3.5/go.mod h1:uVHyebswE1cCXr2A73cRM2frx5ld1RJUCJkFNZ90ZiI= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6 h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw= +github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0 h1:2L/RhJq+HA8gBQImDXtLPrDXK5qAj6ozWVK/zFXVJGs= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= +github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWooScCR7aA= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pelletier/go-toml v1.1.0 h1:cmiOvKzEunMsAxyhXSzpL5Q1CRKpVv0KQsnAIcSEVYM= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/performancecopilot/speed v3.0.0+incompatible h1:2WnRzIquHa5QxaJKShDkLM+sc0JPuwhXzK8OYOyt3Vg= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/peterh/liner v1.1.0 h1:f+aAedNJA6uk7+6rXsYBnhdo4Xux7ESLe+kcuVUF5os= +github.com/peterh/liner v1.1.0/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/phpdave11/gofpdi v1.0.3 h1:K1ozatDS1cIybEaIKzmKtZo/i9/o95xFjyByEmNs0fc= +github.com/phpdave11/gofpdi v1.0.3/go.mod h1:B7ryN7q4MLItB8BDM5PJAplblJegAAcaI98viOZUihg= +github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e h1:+RHxT/gm0O3UF7nLJbdNzAmULvCFt4XfXHWzh3XI/zs= +github.com/pkg/errors v0.0.0-20171216070316-e881fd58d78e/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.3.0 h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI= +github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.0 h1:DGA1KlA9esU6WcicH+P8PxFZOl15O6GYtab1cIJdOlE= +github.com/pkg/sftp v1.10.0/go.mod h1:NxmoDg/QLVWluQDUYG7XBZTLUpKeFa8e3aMf1BfjyHk= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5 h1:tFwafIEMf0B7NlcxV/zJ6leBIa81D3hgGSgsE5hCkOQ= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pressly/chi v4.0.2+incompatible h1:IQCvpYSGI/zsVuwr8+Q2R/13k9EHaFi05M3g8thnyqs= +github.com/pressly/chi v4.0.2+incompatible/go.mod h1:s/kslmeFE633XtTPvfX2olbs4ymzIHxGGXmEJ/AvPT8= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7 h1:FUL3b97ZY2EPqg2NbXKuMHs5pXJB9hjj1fDHnF2vl28= +github.com/remyoudompheng/bigfft v0.0.0-20190512091148-babf20351dd7/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/retailnext/hllpp v1.0.0 h1:7+NffI2mo7lZG78NruEsf3jEnjJ6Z0n1otEyFqdK8zA= +github.com/retailnext/hllpp v1.0.0/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af h1:gu+uRPtBe88sKxUCEXRoeCvVG90TJmwhiqRpvdhQFng= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.0.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.4.0 h1:OvAD0LYeMMGaPLkNnmo+ktTVWimtTUIhHkATDXbWJSo= +github.com/rs/zerolog v1.4.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/russross/blackfriday v0.0.0-20170728175326-4048872b16cc h1:Ng688TEbTGosxh0B0IQ7NqUMYZiERtWbGGGEvRLKjh4= +github.com/russross/blackfriday v0.0.0-20170728175326-4048872b16cc/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 h1:nlG4Wa5+minh3S9LVFtNoY+GVRiudA2e3EVfcCi3RCA= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f h1:UFr9zpz4xgTnIE5yIMtWAMngCdZ9p/+q6lTbgelo80M= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec h1:6ncX5ko6B9LntYM0YBRXkiSaZMmLYeZ/NWcmeB43mMY= +github.com/samuel/go-zookeeper v0.0.0-20180130194729-c4fab1ac1bec/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627 h1:Tn2Iev07a4oOcAuFna8AJxDOF/M+6OkNbpEZLX30D6M= +github.com/sean-/conswriter v0.0.0-20180208195008-f5ae3917a627/go.mod h1:7zjs06qF79/FKAJpBvFx3P8Ww4UTIMAe+lpNXDHziac= +github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5 h1:D07EBYJLI26GmLRKNtrs47p8vs/5QqpUX3VcwsAPkEo= +github.com/sean-/pager v0.0.0-20180208200047-666be9bf53b5/go.mod h1:BeybITEsBEg6qbIiqJ6/Bqeq25bCLbL7YFmpaFfJDuM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.1.0 h1:IXCHG+sXPNiIR5pC/vTEItZduPKu4cnpr85YgxpxlW0= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516 h1:ofR1ZdrNSkiWcMsRrubK9tb2/SlZVWttAfqUjJi6QYc= +github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= +github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= +github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw= +github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI= +github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU= +github.com/shurcooL/highlight_go v0.0.0-20170515013102-78fb10f4a5f8/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag= +github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg= +github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw= +github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y= +github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= +github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q= +github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ= +github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I= +github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0= +github.com/shurcooL/octicon v0.0.0-20180602230221-c42b0e3b24d9/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ= +github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk= +github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= +github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.1.0/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 h1:hp2CYQUINdZMHdvTdXtPOY2ainKl4IoMcpAXEf2xj3Q= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d h1:yKm7XZV6j9Ev6lojP2XaIshpT4ymkqhMeSghO5Ps00E= +github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8= +github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.0.2 h1:5bRmqmInNmNFkI9NG9O0Xc/Lgl9wOWWUUA/O8XZqTCo= +github.com/spf13/afero v1.0.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.0/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.2.1 h1:qgMbHoJbPbw579P+1zVY+6n4nIFuIchaIjzZ/I/Yq8M= +github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.1.0 h1:0Rhw4d6C8J9VPu6cjZLIhZ8+aAOHcDvGeKn+cq5Aq3k= +github.com/spf13/cast v1.1.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.0-20180211162230-be77323fc051 h1:VwGeM0WDoZwUGgTbuvPboprTYzo48S33AwDprcA4GlA= +github.com/spf13/cobra v0.0.0-20180211162230-be77323fc051/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.4/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec h1:2ZXvIUGghLpdTVHR1UfvfrzoVlZaE/yOWC5LueIHZig= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.0 h1:oaPbdDe/x0UncahuwiPxW1GYJyilRAdsPnq3e1yaPcI= +github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v0.0.0-20171227194143-aafc9e6bc7b7 h1:Wj4cg2M6Um7j1N7yD/mxsdy1/wrsdjzVha2eWdOhti8= +github.com/spf13/viper v0.0.0-20171227194143-aafc9e6bc7b7/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.2.1/go.mod h1:P4AexN0a+C9tGAnUFNwDMYYZv3pjFuvmeiMyKRaNVlI= +github.com/spf13/viper v1.3.0/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.3.1/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/stathat/go v1.0.0 h1:HFIS5YkyaI6tXu7JXIRRZBLRvYstdNZm034zcCeaybI= +github.com/stathat/go v1.0.0/go.mod h1:+9Eg2szqkcOGWv6gfheJmBBsmq9Qf5KDbzy8/aYYR0c= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94 h1:0ngsPmuP6XIjiFRNFYlvKwSr5zff2v+uPHaffZ6/M4k= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a h1:AhmOdSHeswKHBjhsLs/7+1voOxT+LLrSk/Nxvk35fug= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07 h1:UyzmZLoiDWMRywV4DUYb9Fbt8uiOSooupjTq10vpvnU= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= +github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= +github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2 h1:eGaGNxrtoZf/mBURsnNQKDR7u50Klgcf2eFDQEnc8Bc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b h1:m74UWYy+HBs+jMFR9mdZU6shPewugMyH5+GV6LNgW8w= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 h1:OXcKh35JaYsGMRzpvFkLv/MEyPuL49CThT1pZ8aSml4= +github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/unrolled/secure v0.0.0-20180918153822-f340ee86eb8b/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/unrolled/secure v0.0.0-20181005190816-ff9db2ff917f/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/unrolled/secure v0.0.0-20181022170031-4b6b7cf51606/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/unrolled/secure v0.0.0-20190103195806-76e6d4e9b90c/go.mod h1:mnPT77IAdsi/kV7+Es7y+pXALeV3h7G6dQF6mNYjcLA= +github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/xanzy/ssh-agent v0.2.0 h1:Adglfbi5p9Z0BmK2oKU9nTG+zKfniSfnaMYB+ULd+Ro= +github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.2 h1:VBfFXTpEwLq2hzs42qCHOyKw5AqEm9DYGqBuINmzUZY= +github.com/xordataexchange/crypt v0.0.2/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/zenazn/goji v0.9.0 h1:RSQQAbXGArQ0dIDEq+PI6WqN6if+5KHu6x2Cx/GXLTQ= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v3.3.13+incompatible h1:jCejD5EMnlGxFvcGRyEV4VGlENZc7oPQX6o0t7n3xbw= +go.etcd.io/etcd v3.3.13+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20180809161055-417644f6feb5 h1:+hE86LblG4AyDgwMCLTE6FOlM9+qjHSYS+rKqxUVdsM= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= +golang.org/x/build v0.0.0-20190626175840-54405f243e45 h1:/S+KN5W4Lv7MKbWNOj2LtQZ5YE7zCC6jKLbQbZGTdNM= +golang.org/x/build v0.0.0-20190626175840-54405f243e45/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= +golang.org/x/crypto v0.0.0-20171231215028-0fcca4842a8d h1:GrqEEc3+MtHKTsZrdIGVoYDgLpbSRzW1EF+nLu0PcHE= +golang.org/x/crypto v0.0.0-20171231215028-0fcca4842a8d/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181001203147-e3636079e1a4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181024171144-74cb1d3d52f4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025113841-85e1b3f9139a/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190102171810-8d7daa0c54b3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190103213133-ff983b9c42bc/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190122013713-64072686203f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190130090550-b01c7a725664/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190403202508-8e1b8d32e692/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d h1:adrbvkTDn9rGnXg2IJDKozEpXXLZN89pdIA+Syt4/u0= +golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20181112044915-a3060d491354/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8 h1:idBdZTd9UioThJp8KpM/rTSinK/ChZFBE43/WtIy8zg= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190507092727-e4e5bf290fec/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190622003408-7e034cad6442 h1:KHkZ9SH+rcDwdj29lcYkks2agONXqNa8YkEGwGhY6Sg= +golang.org/x/image v0.0.0-20190622003408-7e034cad6442/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mobile v0.0.0-20190607214518-6fa95d984e88 h1:H6DkDrMSuEE2MQR7DgGwkzbXSY1lvMpEN5MDE1bo/5U= +golang.org/x/mobile v0.0.0-20190607214518-6fa95d984e88/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180816102801-aaf60122140d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180926154720-4dfa2610cdf3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181017193950-04a2e542c03f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181207154023-610586996380/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181213202711-891ebc4b82d6/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190119204137-ed066c81e75e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c h1:uOCk1iQW6Vc18bnC13MfzScl+wdKBmM9Y9kU7Z83/lw= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190514140710-3ec191127204/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852 h1:xYq6+9AtI+xP3M4r0N1hCkHrInHDBohhquRgx9Kk6gI= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5 h1:MF92a0wJ3gzSUVBpjcwdrDr5+klMFRNEEu6Mev4n00I= +golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180906133057-8cf3aee42992/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180921163948-d47a0f339242/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180927150500-dad3d9fb7b6e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181005133103-4497e2df6f9e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181011152604-fa43e7bc11ba/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181019084534-8f1d3d21f81b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181022134430-8a28ead16f52/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181024145615-5cd93ef61a7c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181025063200-d989b31c8746/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026064943-731415f00dce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181030150119-7e31e0c00fa0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181106135930-3a76605856fd/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181128092732-4ed8d59d0b35/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181206074257-70b957f3b65e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213150753-586ba8c9bb14/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190116161447-11f53e031339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190220154126-629670e5acc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438 h1:khxRGsvPk4n2y8I/mLLjp7e5dMTJmH75wvqS6nMwUtY= +golang.org/x/sys v0.0.0-20190515120540-06a5c4944438/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20180208041248-4e4a3210bb54 h1:a5WocgxWTnjG0C4hZblDx+yonFbQMMbv8yJGhHMz/nY= +golang.org/x/text v0.0.0-20180208041248-4e4a3210bb54/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181003024731-2f84ea8ef872/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181006002542-f60d9635b16a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181013182035-5e66757b835f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181017214349-06f26fdaaa28/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181019005945-6adeb8aab2de/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181024171208-a2dc47679d30/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181026183834-f60e5f99f081/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030151751-bb28844c46df/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181102223251-96e9e165b75e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181105230042-78dc5bac0cac/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181107215632-34b416bd17b3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181109152631-138c20b93253/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181109202920-92d8274bd7b8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181111003725-6d71ab8aade0/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181114190951-94339b83286c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181119130350-139d099f6620/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181120060634-fc4f04983f62/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181122213734-04b5d21e00f1/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127195227-b4e97c0ed882/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181127232545-e782529d0ddd/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181201035826-d0ca3933b724/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181203210056-e5f3ab76ea4b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181205224935-3576414c54a4/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181206194817-bcd4e47d0288/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181207183836-8bc39b988060/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181212172921-837e80568c09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181213190329-bbccd8cae4a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181221154417-3ad2d988d5e2/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190102213336-ca9055ed7d04/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190104182027-498d95493402/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190111214448-fc1d57b08d7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190118193359-16909d206f00/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190124004107-78ee07aa9465/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190131142011-8dbcc66f33bb/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206221403-44bcb96178d3/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190214204934-8dcb7bc8c7fe/go.mod h1:E6PF97AdD6v0s+fPshSmumCW1S1Ne85RbPQxELkKa44= +golang.org/x/tools v0.0.0-20190219135230-f000d56b39dc/go.mod h1:E6PF97AdD6v0s+fPshSmumCW1S1Ne85RbPQxELkKa44= +golang.org/x/tools v0.0.0-20190219185102-9394956cfdc5/go.mod h1:E6PF97AdD6v0s+fPshSmumCW1S1Ne85RbPQxELkKa44= +golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190315044204-8b67d361bba2/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190318200714-bb1270c20edf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190404132500-923d25813098/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190407030857-0fdf0c73855b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190603152906-08e0b306e832/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190603231351-8aaa1484dc10/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190613204242-ed0dc450797f/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190626204024-7ef8a99cf38d h1:i9jnTEK1CvXQ2V4rSf5pFujeIdQbr8YgFTCSi/QRP3Y= +golang.org/x/tools v0.0.0-20190626204024-7ef8a99cf38d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca h1:PupagGYwj8+I4ubCxcmcBRk3VlUWtTg5huQpZR9flmE= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6 h1:4WsZyVtkthqrHTbDCJfiTs8IWNYE4uvsSDgaV6xpp+o= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0 h1:9sdfJOzWlkqPltHAuzT2Cp+yrBeY1KRVYgms8soxMwM= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.0 h1:Tfd7cKwKbFRsI8RMAD3oqqw7JPFRrvFlOsfbgVkjOOw= +google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101 h1:wuGevabY6r+ivPNagjUXGGxF+GqgMd+dBhjsxW4q9u4= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= +gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/mail.v2 v2.0.0-20180731213649-a0242b2233b4/go.mod h1:htwXN1Qh09vZJ1NVKxQqHPBaCBbzKhp5GzuJEA4VJWw= +gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/src-d/go-billy.v4 v4.2.1 h1:omN5CrMrMcQ+4I8bJ0wEhOBPanIRWzFC953IiXKdYzo= +gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= +gopkg.in/src-d/go-git-fixtures.v3 v3.1.1 h1:XWW/s5W18RaJpmo1l0IYGqXKuJITWRFuA45iOf1dKJs= +gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.8.1 h1:aAyBmkdE1QUUEHcP4YFCGKmsMQRAuRmUcPEQR7lOAa0= +gopkg.in/src-d/go-git.v4 v4.8.1/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= +gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0 h1:POO/ycCATvegFmVuPpQzZFJ+pGZeX22Ufu6fibxDVjU= +gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.0.0 h1:uUkhRGrsEyx/laRdeS6YIQKIys8pg+lRSRdVMTYjivs= +gopkg.in/yaml.v2 v2.0.0/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919 h1:tmXTu+dfa+d9Evp8NpJdgOy6+rt8/x4yG7qPBrtNfLY= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20181108184350-ae8f1f9103cc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099 h1:XJP7lxbSxWLOMNdBE4B/STaqVy6L73o0knwj2vIlxnw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190107175209-d9ea5c54f7dc h1:lmf242UNy8ucQUSUse9oXtyxHb6kaF82XRLqeVDXXhA= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190107175209-d9ea5c54f7dc/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 h1:e1sMhtVq9AfcEy8AXNb8eSg6gbzfdpYhoNqnPJa+GzI= +sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k= +sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/joyent/triton-go/storage/client.go b/vendor/github.com/joyent/triton-go/storage/client.go new file mode 100644 index 00000000..91aeacff --- /dev/null +++ b/vendor/github.com/joyent/triton-go/storage/client.go @@ -0,0 +1,72 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package storage + +import ( + "net/http" + + triton "github.com/joyent/triton-go" + "github.com/joyent/triton-go/client" +) + +type StorageClient struct { + Client *client.Client +} + +func newStorageClient(client *client.Client) *StorageClient { + return &StorageClient{ + Client: client, + } +} + +// NewClient returns a new client for working with Storage endpoints and +// resources within CloudAPI +func NewClient(config *triton.ClientConfig) (*StorageClient, error) { + // TODO: Utilize config interface within the function itself + client, err := client.New( + config.TritonURL, + config.MantaURL, + config.AccountName, + config.Signers..., + ) + if err != nil { + return nil, err + } + return newStorageClient(client), nil +} + +// SetHeader allows a consumer of the current client to set a custom header for +// the next backend HTTP request sent to CloudAPI. +func (c *StorageClient) SetHeader(header *http.Header) { + c.Client.RequestHeader = header +} + +// Dir returns a DirectoryClient used for accessing functions pertaining to +// Directories functionality of the Manta API. +func (c *StorageClient) Dir() *DirectoryClient { + return &DirectoryClient{c.Client} +} + +// Jobs returns a JobClient used for accessing functions pertaining to Jobs +// functionality of the Triton Object Storage API. +func (c *StorageClient) Jobs() *JobClient { + return &JobClient{c.Client} +} + +// Objects returns an ObjectsClient used for accessing functions pertaining to +// Objects functionality of the Triton Object Storage API. +func (c *StorageClient) Objects() *ObjectsClient { + return &ObjectsClient{c.Client} +} + +// SnapLinks returns an SnapLinksClient used for accessing functions pertaining to +// SnapLinks functionality of the Triton Object Storage API. +func (c *StorageClient) SnapLinks() *SnapLinksClient { + return &SnapLinksClient{c.Client} +} diff --git a/vendor/github.com/joyent/triton-go/storage/directory.go b/vendor/github.com/joyent/triton-go/storage/directory.go new file mode 100644 index 00000000..cb35de66 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/storage/directory.go @@ -0,0 +1,208 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package storage + +import ( + "bufio" + "context" + "encoding/json" + "net/http" + "net/url" + "path" + "strconv" + "time" + + "github.com/joyent/triton-go/client" + "github.com/pkg/errors" +) + +type DirectoryClient struct { + client *client.Client +} + +// DirectoryEntry represents an object or directory in Manta. +type DirectoryEntry struct { + ETag string `json:"etag"` + ModifiedTime time.Time `json:"mtime"` + Name string `json:"name"` + Size uint64 `json:"size"` + Type string `json:"type"` +} + +// ListDirectoryInput represents parameters to a List operation. +type ListDirectoryInput struct { + DirectoryName string + Limit uint64 + Marker string +} + +// ListDirectoryOutput contains the outputs of a List operation. +type ListDirectoryOutput struct { + Entries []*DirectoryEntry + ResultSetSize uint64 +} + +// List lists the contents of a directory on the Triton Object Store service. +func (s *DirectoryClient) List(ctx context.Context, input *ListDirectoryInput) (*ListDirectoryOutput, error) { + absPath := absFileInput(s.client.AccountName, input.DirectoryName) + query := &url.Values{} + if input.Limit != 0 { + query.Set("limit", strconv.FormatUint(input.Limit, 10)) + } + + if input.Marker != "" { + query.Set("marker", input.Marker) + } + + reqInput := client.RequestInput{ + Method: http.MethodGet, + Path: string(absPath), + Query: query, + } + respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if err != nil { + return nil, errors.Wrap(err, "unable to list directory") + } + defer respBody.Close() + + var results []*DirectoryEntry + scanner := bufio.NewScanner(respBody) + for scanner.Scan() { + current := &DirectoryEntry{} + if err := json.Unmarshal(scanner.Bytes(), current); err != nil { + return nil, errors.Wrap(err, "unable to decode list directories response") + } + + results = append(results, current) + } + + if err := scanner.Err(); err != nil { + return nil, errors.Wrap(err, "unable to decode list directories response") + } + + output := &ListDirectoryOutput{ + Entries: results, + } + + resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) + if err == nil { + output.ResultSetSize = resultSetSize + } + + return output, nil +} + +// PutDirectoryInput represents parameters to a Put operation. +type PutDirectoryInput struct { + DirectoryName string +} + +// Put puts a director into the Triton Object Storage service is an idempotent +// create-or-update operation. Your private namespace starts at /:login, and you +// can create any nested set of directories or objects within it. +func (s *DirectoryClient) Put(ctx context.Context, input *PutDirectoryInput) error { + absPath := absFileInput(s.client.AccountName, input.DirectoryName) + + headers := &http.Header{} + headers.Set("Content-Type", "application/json; type=directory") + + reqInput := client.RequestInput{ + Method: http.MethodPut, + Path: string(absPath), + Headers: headers, + } + respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrap(err, "unable to put directory") + } + + return nil +} + +// DeleteDirectoryInput represents parameters to a Delete operation. +type DeleteDirectoryInput struct { + DirectoryName string + ForceDelete bool //Will recursively delete all child directories and objects +} + +// Delete deletes a directory on the Triton Object Storage. The directory must +// be empty. +func (s *DirectoryClient) Delete(ctx context.Context, input *DeleteDirectoryInput) error { + absPath := absFileInput(s.client.AccountName, input.DirectoryName) + + if input.ForceDelete { + err := deleteAll(*s, ctx, absPath) + if err != nil { + return err + } + } else { + err := deleteDirectory(*s, ctx, absPath) + if err != nil { + return err + } + } + + return nil +} + +func deleteAll(c DirectoryClient, ctx context.Context, directoryPath _AbsCleanPath) error { + objs, err := c.List(ctx, &ListDirectoryInput{ + DirectoryName: string(directoryPath), + }) + if err != nil { + return err + } + for _, obj := range objs.Entries { + newPath := absFileInput(c.client.AccountName, path.Join(string(directoryPath), obj.Name)) + if obj.Type == "directory" { + err := deleteDirectory(c, ctx, newPath) + if err != nil { + return deleteAll(c, ctx, newPath) + } + } else { + return deleteObject(c, ctx, newPath) + } + } + + return nil +} + +func deleteDirectory(c DirectoryClient, ctx context.Context, directoryPath _AbsCleanPath) error { + reqInput := client.RequestInput{ + Method: http.MethodDelete, + Path: string(directoryPath), + } + respBody, _, err := c.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrap(err, "unable to delete directory") + } + + return nil +} + +func deleteObject(c DirectoryClient, ctx context.Context, path _AbsCleanPath) error { + objClient := &ObjectsClient{ + client: c.client, + } + + err := objClient.Delete(ctx, &DeleteObjectInput{ + ObjectPath: string(path), + }) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/joyent/triton-go/storage/job.go b/vendor/github.com/joyent/triton-go/storage/job.go new file mode 100644 index 00000000..51084a43 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/storage/job.go @@ -0,0 +1,448 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package storage + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/joyent/triton-go/client" + "github.com/pkg/errors" +) + +type JobClient struct { + client *client.Client +} + +const ( + JobStateDone = "done" + JobStateRunning = "running" +) + +// JobPhase represents the specification for a map or reduce phase of a Manta +// job. +type JobPhase struct { + // Type is the type of phase. Must be `map` or `reduce`. + Type string `json:"type,omitempty"` + + // Assets is an array of objects to be placed in your compute zones. + Assets []string `json:"assets,omitempty"` + + // Exec is the shell statement to execute. It may be any valid shell + // command, including pipelines and other shell syntax. You can also + // execute programs stored in the service by including them in "assets" + // and referencing them as /assets/$manta_path. + Exec string `json:"exec"` + + // Init is a shell statement to execute in each compute zone before + // any tasks are executed. The same constraints apply as to Exec. + Init string `json:"init"` + + // ReducerCount is an optional number of reducers for this phase. The + // default value if not specified is 1. The maximum value is 1024. + ReducerCount uint `json:"count,omitempty"` + + // Memory is the amount of DRAM in MB to be allocated to the compute + // zone. Valid values are 256, 512, 1024, 2048, 4096 or 8192. + Memory uint64 `json:"memory,omitempty"` + + // Disk is the amount of disk space in GB to be allocated to the compute + // zone. Valid values are 2, 4, 8, 16, 32, 64, 128, 256, 512 or 1024. + Disk uint64 `json:"disk,omitempty"` +} + +// JobSummary represents the summary of a compute job in Manta. +type JobSummary struct { + ModifiedTime time.Time `json:"mtime"` + ID string `json:"name"` +} + +// Job represents a compute job in Manta. +type Job struct { + ID string `json:"id"` + Name string `json:"name"` + Phases []*JobPhase `json:"phases"` + State string `json:"state"` + Cancelled bool `json:"cancelled"` + InputDone bool `json:"inputDone"` + CreatedTime time.Time `json:"timeCreated"` + DoneTime time.Time `json:"timeDone"` + Transient bool `json:"transient"` + Stats *JobStats `json:"stats"` +} + +// JobStats represents statistics for a compute job in Manta. +type JobStats struct { + Errors uint64 `json:"errors"` + Outputs uint64 `json:"outputs"` + Retries uint64 `json:"retries"` + Tasks uint64 `json:"tasks"` + TasksDone uint64 `json:"tasksDone"` +} + +// CreateJobInput represents parameters to a CreateJob operation. +type CreateJobInput struct { + Name string `json:"name"` + Phases []*JobPhase `json:"phases"` +} + +// CreateJobOutput contains the outputs of a CreateJob operation. +type CreateJobOutput struct { + JobID string +} + +// CreateJob submits a new job to be executed. This call is not +// idempotent, so calling it twice will create two jobs. +func (s *JobClient) Create(ctx context.Context, input *CreateJobInput) (*CreateJobOutput, error) { + fullPath := path.Join("/", s.client.AccountName, "jobs") + + reqInput := client.RequestInput{ + Method: http.MethodPost, + Path: fullPath, + Body: input, + } + respBody, respHeaders, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return nil, errors.Wrap(err, "unable to create job") + } + + jobURI := respHeaders.Get("Location") + parts := strings.Split(jobURI, "/") + jobID := parts[len(parts)-1] + + response := &CreateJobOutput{ + JobID: jobID, + } + + return response, nil +} + +// AddJobInputs represents parameters to a AddJobInputs operation. +type AddJobInputsInput struct { + JobID string + ObjectPaths []string +} + +// AddJobInputs submits inputs to an already created job. +func (s *JobClient) AddInputs(ctx context.Context, input *AddJobInputsInput) error { + fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "in") + headers := &http.Header{} + headers.Set("Content-Type", "text/plain") + + reader := strings.NewReader(strings.Join(input.ObjectPaths, "\n")) + + reqInput := client.RequestNoEncodeInput{ + Method: http.MethodPost, + Path: fullPath, + Headers: headers, + Body: reader, + } + respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrap(err, "unable to add job inputs") + } + + return nil +} + +// EndJobInputInput represents parameters to a EndJobInput operation. +type EndJobInputInput struct { + JobID string +} + +// EndJobInput submits inputs to an already created job. +func (s *JobClient) EndInput(ctx context.Context, input *EndJobInputInput) error { + fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "in", "end") + + reqInput := client.RequestNoEncodeInput{ + Method: http.MethodPost, + Path: fullPath, + } + respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrap(err, "unable to end job inputs") + } + + return nil +} + +// CancelJobInput represents parameters to a CancelJob operation. +type CancelJobInput struct { + JobID string +} + +// CancelJob cancels a job from doing any further work. Cancellation +// is asynchronous and "best effort"; there is no guarantee the job +// will actually stop. For example, short jobs where input is already +// closed will likely still run to completion. +// +// This is however useful when: +// - input is still open +// - you have a long-running job +func (s *JobClient) Cancel(ctx context.Context, input *CancelJobInput) error { + fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "cancel") + + reqInput := client.RequestNoEncodeInput{ + Method: http.MethodPost, + Path: fullPath, + } + respBody, _, err := s.client.ExecuteRequestNoEncode(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrap(err, "unable to cancel job") + } + + return nil +} + +// ListJobsInput represents parameters to a ListJobs operation. +type ListJobsInput struct { + RunningOnly bool + Limit uint64 + Marker string +} + +// ListJobsOutput contains the outputs of a ListJobs operation. +type ListJobsOutput struct { + Jobs []*JobSummary + ResultSetSize uint64 +} + +// ListJobs returns the list of jobs you currently have. +func (s *JobClient) List(ctx context.Context, input *ListJobsInput) (*ListJobsOutput, error) { + fullPath := path.Join("/", s.client.AccountName, "jobs") + query := &url.Values{} + if input.RunningOnly { + query.Set("state", "running") + } + if input.Limit != 0 { + query.Set("limit", strconv.FormatUint(input.Limit, 10)) + } + if input.Marker != "" { + query.Set("manta_path", input.Marker) + } + + reqInput := client.RequestInput{ + Method: http.MethodGet, + Path: fullPath, + Query: query, + } + respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return nil, errors.Wrap(err, "unable to list jobs") + } + + var results []*JobSummary + for { + current := &JobSummary{} + decoder := json.NewDecoder(respBody) + if err = decoder.Decode(¤t); err != nil { + if err == io.EOF { + break + } + return nil, errors.Wrap(err, "unable to decode list jobs response") + } + results = append(results, current) + } + + output := &ListJobsOutput{ + Jobs: results, + } + + resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) + if err == nil { + output.ResultSetSize = resultSetSize + } + + return output, nil +} + +// GetJobInput represents parameters to a GetJob operation. +type GetJobInput struct { + JobID string +} + +// GetJobOutput contains the outputs of a GetJob operation. +type GetJobOutput struct { + Job *Job +} + +// GetJob returns the list of jobs you currently have. +func (s *JobClient) Get(ctx context.Context, input *GetJobInput) (*GetJobOutput, error) { + fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "status") + + reqInput := client.RequestInput{ + Method: http.MethodGet, + Path: fullPath, + } + respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return nil, errors.Wrap(err, "unable to get job") + } + + job := &Job{} + decoder := json.NewDecoder(respBody) + if err = decoder.Decode(&job); err != nil { + return nil, errors.Wrap(err, "unable to decode get job response") + } + + return &GetJobOutput{ + Job: job, + }, nil +} + +// GetJobOutputInput represents parameters to a GetJobOutput operation. +type GetJobOutputInput struct { + JobID string +} + +// GetJobOutputOutput contains the outputs for a GetJobOutput operation. It is your +// responsibility to ensure that the io.ReadCloser Items is closed. +type GetJobOutputOutput struct { + ResultSetSize uint64 + Items io.ReadCloser +} + +// GetJobOutput returns the current "live" set of outputs from a job. Think of +// this like `tail -f`. If error is nil (i.e. the operation is successful), it is +// your responsibility to close the io.ReadCloser named Items in the output. +func (s *JobClient) GetOutput(ctx context.Context, input *GetJobOutputInput) (*GetJobOutputOutput, error) { + fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "out") + + reqInput := client.RequestInput{ + Method: http.MethodGet, + Path: fullPath, + } + respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return nil, errors.Wrap(err, "unable to get job output") + } + + output := &GetJobOutputOutput{ + Items: respBody, + } + + resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) + if err == nil { + output.ResultSetSize = resultSetSize + } + + return output, nil +} + +// GetJobInputInput represents parameters to a GetJobOutput operation. +type GetJobInputInput struct { + JobID string +} + +// GetJobInputOutput contains the outputs for a GetJobOutput operation. It is your +// responsibility to ensure that the io.ReadCloser Items is closed. +type GetJobInputOutput struct { + ResultSetSize uint64 + Items io.ReadCloser +} + +// GetJobInput returns the current "live" set of inputs from a job. Think of +// this like `tail -f`. If error is nil (i.e. the operation is successful), it is +// your responsibility to close the io.ReadCloser named Items in the output. +func (s *JobClient) GetInput(ctx context.Context, input *GetJobInputInput) (*GetJobInputOutput, error) { + fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "in") + + reqInput := client.RequestInput{ + Method: http.MethodGet, + Path: fullPath, + } + respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return nil, errors.Wrap(err, "unable to get job input") + } + + output := &GetJobInputOutput{ + Items: respBody, + } + + resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) + if err == nil { + output.ResultSetSize = resultSetSize + } + + return output, nil +} + +// GetJobFailuresInput represents parameters to a GetJobFailures operation. +type GetJobFailuresInput struct { + JobID string +} + +// GetJobFailuresOutput contains the outputs for a GetJobFailures operation. It is your +// responsibility to ensure that the io.ReadCloser Items is closed. +type GetJobFailuresOutput struct { + ResultSetSize uint64 + Items io.ReadCloser +} + +// GetJobFailures returns the current "live" set of outputs from a job. Think of +// this like `tail -f`. If error is nil (i.e. the operation is successful), it is +// your responsibility to close the io.ReadCloser named Items in the output. +func (s *JobClient) GetFailures(ctx context.Context, input *GetJobFailuresInput) (*GetJobFailuresOutput, error) { + fullPath := path.Join("/", s.client.AccountName, "jobs", input.JobID, "live", "fail") + + reqInput := client.RequestInput{ + Method: http.MethodGet, + Path: fullPath, + } + respBody, respHeader, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return nil, errors.Wrap(err, "unable to get job failures") + } + + output := &GetJobFailuresOutput{ + Items: respBody, + } + + resultSetSize, err := strconv.ParseUint(respHeader.Get("Result-Set-Size"), 10, 64) + if err == nil { + output.ResultSetSize = resultSetSize + } + + return output, nil +} diff --git a/vendor/github.com/joyent/triton-go/storage/objects.go b/vendor/github.com/joyent/triton-go/storage/objects.go new file mode 100644 index 00000000..87f4b9b8 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/storage/objects.go @@ -0,0 +1,717 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package storage + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/joyent/triton-go/client" + tt "github.com/joyent/triton-go/errors" + "github.com/pkg/errors" +) + +type ObjectsClient struct { + client *client.Client +} + +// AbortMpuInput represents parameters to an AbortMpu operation +type AbortMpuInput struct { + PartsDirectoryPath string +} + +func (s *ObjectsClient) AbortMultipartUpload(ctx context.Context, input *AbortMpuInput) error { + return abortMpu(*s, ctx, input) +} + +// CommitMpuInput represents parameters to a CommitMpu operation +type CommitMpuInput struct { + Id string + Headers map[string]string + Body CommitMpuBody +} + +// CommitMpuBody represents the body of a CommitMpu request +type CommitMpuBody struct { + Parts []string `json:"parts"` +} + +func (s *ObjectsClient) CommitMultipartUpload(ctx context.Context, input *CommitMpuInput) error { + return commitMpu(*s, ctx, input) +} + +// CreateMpuInput represents parameters to a CreateMpu operation. +type CreateMpuInput struct { + Body CreateMpuBody + ContentLength uint64 + ContentMD5 string + DurabilityLevel uint64 + ForceInsert bool //Force the creation of the directory tree +} + +// CreateMpuOutput represents the response from a CreateMpu operation +type CreateMpuOutput struct { + Id string `json:"id"` + PartsDirectory string `json:"partsDirectory"` +} + +// CreateMpuBody represents the body of a CreateMpu request. +type CreateMpuBody struct { + ObjectPath string `json:"objectPath"` + Headers map[string]string `json:"headers,omitempty"` +} + +func (s *ObjectsClient) CreateMultipartUpload(ctx context.Context, input *CreateMpuInput) (*CreateMpuOutput, error) { + return createMpu(*s, ctx, input) +} + +// GetObjectInput represents parameters to a GetObject operation. +type GetInfoInput struct { + ObjectPath string + Headers map[string]string +} + +// GetObjectOutput contains the outputs for a GetObject operation. It is your +// responsibility to ensure that the io.ReadCloser ObjectReader is closed. +type GetInfoOutput struct { + ContentLength uint64 + ContentType string + LastModified time.Time + ContentMD5 string + ETag string + Metadata map[string]string +} + +// GetInfo sends a HEAD request to an object in the Manta service. This function +// does not return a response body. +func (s *ObjectsClient) GetInfo(ctx context.Context, input *GetInfoInput) (*GetInfoOutput, error) { + absPath := absFileInput(s.client.AccountName, input.ObjectPath) + + headers := &http.Header{} + for key, value := range input.Headers { + headers.Set(key, value) + } + + reqInput := client.RequestInput{ + Method: http.MethodHead, + Path: string(absPath), + Headers: headers, + } + _, respHeaders, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if err != nil { + return nil, errors.Wrap(err, "unable to get info") + } + + response := &GetInfoOutput{ + ContentType: respHeaders.Get("Content-Type"), + ContentMD5: respHeaders.Get("Content-MD5"), + ETag: respHeaders.Get("Etag"), + } + + lastModified, err := time.Parse(time.RFC1123, respHeaders.Get("Last-Modified")) + if err == nil { + response.LastModified = lastModified + } + + contentLength, err := strconv.ParseUint(respHeaders.Get("Content-Length"), 10, 64) + if err == nil { + response.ContentLength = contentLength + } + + metadata := map[string]string{} + for key, values := range respHeaders { + if strings.HasPrefix(key, "m-") { + metadata[key] = strings.Join(values, ", ") + } + } + response.Metadata = metadata + + return response, nil +} + +// IsDir is a convenience wrapper around the GetInfo function which takes an +// ObjectPath and returns a boolean whether or not the object is a directory +// type in Manta. Returns an error if GetInfo failed upstream for some reason. +func (s *ObjectsClient) IsDir(ctx context.Context, objectPath string) (bool, error) { + info, err := s.GetInfo(ctx, &GetInfoInput{ + ObjectPath: objectPath, + }) + if err != nil { + return false, err + } + if info != nil { + return strings.HasSuffix(info.ContentType, "type=directory"), nil + } + return false, nil +} + +// GetObjectInput represents parameters to a GetObject operation. +type GetObjectInput struct { + ObjectPath string + Headers map[string]string +} + +// GetObjectOutput contains the outputs for a GetObject operation. It is your +// responsibility to ensure that the io.ReadCloser ObjectReader is closed. +type GetObjectOutput struct { + ContentLength uint64 + ContentType string + LastModified time.Time + ContentMD5 string + ETag string + Metadata map[string]string + ObjectReader io.ReadCloser +} + +// Get retrieves an object from the Manta service. If error is nil (i.e. the +// call returns successfully), it is your responsibility to close the +// io.ReadCloser named ObjectReader in the operation output. +func (s *ObjectsClient) Get(ctx context.Context, input *GetObjectInput) (*GetObjectOutput, error) { + absPath := absFileInput(s.client.AccountName, input.ObjectPath) + + headers := &http.Header{} + for key, value := range input.Headers { + headers.Set(key, value) + } + + reqInput := client.RequestInput{ + Method: http.MethodGet, + Path: string(absPath), + Headers: headers, + } + respBody, respHeaders, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if err != nil { + return nil, errors.Wrap(err, "unable to get object") + } + + response := &GetObjectOutput{ + ContentType: respHeaders.Get("Content-Type"), + ContentMD5: respHeaders.Get("Content-MD5"), + ETag: respHeaders.Get("Etag"), + ObjectReader: respBody, + } + + lastModified, err := time.Parse(time.RFC1123, respHeaders.Get("Last-Modified")) + if err == nil { + response.LastModified = lastModified + } + + contentLength, err := strconv.ParseUint(respHeaders.Get("Content-Length"), 10, 64) + if err == nil { + response.ContentLength = contentLength + } + + metadata := map[string]string{} + for key, values := range respHeaders { + if strings.HasPrefix(key, "m-") { + metadata[key] = strings.Join(values, ", ") + } + } + response.Metadata = metadata + + return response, nil +} + +// DeleteObjectInput represents parameters to a DeleteObject operation. +type DeleteObjectInput struct { + ObjectPath string + Headers map[string]string +} + +// DeleteObject deletes an object. +func (s *ObjectsClient) Delete(ctx context.Context, input *DeleteObjectInput) error { + absPath := absFileInput(s.client.AccountName, input.ObjectPath) + + headers := &http.Header{} + for key, value := range input.Headers { + headers.Set(key, value) + } + + reqInput := client.RequestInput{ + Method: http.MethodDelete, + Path: string(absPath), + Headers: headers, + } + respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrap(err, "unable to delete object") + } + + return nil +} + +// GetMpuInput represents parameters to a GetMpu operation +type GetMpuInput struct { + PartsDirectoryPath string +} + +type GetMpuHeaders struct { + ContentLength int64 `json:"content-length"` + ContentMd5 string `json:"content-md5"` +} + +type GetMpuOutput struct { + Id string `json:"id"` + State string `json:"state"` + PartsDirectory string `json:"partsDirectory"` + TargetObject string `json:"targetObject"` + Headers GetMpuHeaders `json:"headers"` + NumCopies int64 `json:"numCopies"` + CreationTimeMs int64 `json:"creationTimeMs"` +} + +func (s *ObjectsClient) GetMultipartUpload(ctx context.Context, input *GetMpuInput) (*GetMpuOutput, error) { + return getMpu(*s, ctx, input) +} + +type ListMpuPartsInput struct { + Id string +} + +type ListMpuPart struct { + ETag string + PartNumber int + Size int64 +} + +type ListMpuPartsOutput struct { + Parts []ListMpuPart +} + +func (s *ObjectsClient) ListMultipartUploadParts(ctx context.Context, input *ListMpuPartsInput) (*ListMpuPartsOutput, error) { + return listMpuParts(*s, ctx, input) +} + +// PutObjectMetadataInput represents parameters to a PutObjectMetadata operation. +type PutObjectMetadataInput struct { + ObjectPath string + ContentType string + Metadata map[string]string +} + +// PutObjectMetadata allows you to overwrite the HTTP headers for an already +// existing object, without changing the data. Note this is an idempotent "replace" +// operation, so you must specify the complete set of HTTP headers you want +// stored on each request. +// +// You cannot change "critical" headers: +// - Content-Length +// - Content-MD5 +// - Durability-Level +func (s *ObjectsClient) PutMetadata(ctx context.Context, input *PutObjectMetadataInput) error { + absPath := absFileInput(s.client.AccountName, input.ObjectPath) + query := &url.Values{} + query.Set("metadata", "true") + + headers := &http.Header{} + headers.Set("Content-Type", input.ContentType) + for key, value := range input.Metadata { + headers.Set(key, value) + } + + reqInput := client.RequestInput{ + Method: http.MethodPut, + Path: string(absPath), + Query: query, + Headers: headers, + } + respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrap(err, "unable to put metadata") + } + + return nil +} + +// PutObjectInput represents parameters to a PutObject operation. +type PutObjectInput struct { + ObjectPath string + DurabilityLevel uint64 + ContentType string + ContentMD5 string + IfMatch string + IfModifiedSince *time.Time + ContentLength uint64 + MaxContentLength uint64 + ObjectReader io.Reader + Headers map[string]string + ForceInsert bool //Force the creation of the directory tree +} + +func (s *ObjectsClient) Put(ctx context.Context, input *PutObjectInput) error { + absPath := absFileInput(s.client.AccountName, input.ObjectPath) + if input.ForceInsert { + absDirName := _AbsCleanPath(path.Dir(string(absPath))) + exists, err := checkDirectoryTreeExists(*s, ctx, absDirName) + if err != nil { + return err + } + if !exists { + err := createDirectory(*s, ctx, absDirName) + if err != nil { + return err + } + return putObject(*s, ctx, input, absPath) + } + } + + return putObject(*s, ctx, input, absPath) +} + +// UploadPartInput represents parameters to a UploadPart operation. +type UploadPartInput struct { + Id string + PartNum uint64 + ContentMD5 string + Headers map[string]string + ObjectReader io.Reader +} + +// UploadPartOutput represents the response from a +type UploadPartOutput struct { + Part string `json:"part"` +} + +func (s *ObjectsClient) UploadPart(ctx context.Context, input *UploadPartInput) (*UploadPartOutput, error) { + return uploadPart(*s, ctx, input) +} + +// _AbsCleanPath is an internal type that means the input has been +// path.Clean()'ed and is an absolute path. +type _AbsCleanPath string + +func absFileInput(accountName, objPath string) _AbsCleanPath { + cleanInput := path.Clean(objPath) + if strings.HasPrefix(cleanInput, path.Join("/", accountName, "/")) { + return _AbsCleanPath(cleanInput) + } + + cleanAbs := path.Clean(path.Join("/", accountName, objPath)) + return _AbsCleanPath(cleanAbs) +} + +func putObject(c ObjectsClient, ctx context.Context, input *PutObjectInput, absPath _AbsCleanPath) error { + if input.MaxContentLength != 0 && input.ContentLength != 0 { + return errors.New("ContentLength and MaxContentLength may not both be set to non-zero values.") + } + + headers := &http.Header{} + for key, value := range input.Headers { + headers.Set(key, value) + } + if input.DurabilityLevel != 0 { + headers.Set("Durability-Level", strconv.FormatUint(input.DurabilityLevel, 10)) + } + if input.ContentType != "" { + headers.Set("Content-Type", input.ContentType) + } + if input.ContentMD5 != "" { + headers.Set("Content-MD$", input.ContentMD5) + } + if input.IfMatch != "" { + headers.Set("If-Match", input.IfMatch) + } + if input.IfModifiedSince != nil { + headers.Set("If-Modified-Since", input.IfModifiedSince.Format(time.RFC1123)) + } + if input.ContentLength != 0 { + headers.Set("Content-Length", strconv.FormatUint(input.ContentLength, 10)) + } + if input.MaxContentLength != 0 { + headers.Set("Max-Content-Length", strconv.FormatUint(input.MaxContentLength, 10)) + } + + reqInput := client.RequestNoEncodeInput{ + Method: http.MethodPut, + Path: string(absPath), + Headers: headers, + Body: input.ObjectReader, + } + respBody, _, err := c.client.ExecuteRequestNoEncode(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrap(err, "unable to put object") + } + + return nil +} + +func createDirectory(c ObjectsClient, ctx context.Context, absPath _AbsCleanPath) error { + dirClient := &DirectoryClient{ + client: c.client, + } + + // An abspath starts w/ a leading "/" which gets added to the slice as an + // empty string. Start all array math at 1. + parts := strings.Split(string(absPath), "/") + if len(parts) < 2 { + return errors.New("no path components to create directory") + } + + folderPath := parts[1] + // Don't attempt to create a manta account as a directory + for i := 2; i < len(parts); i++ { + part := parts[i] + folderPath = path.Clean(path.Join("/", folderPath, part)) + err := dirClient.Put(ctx, &PutDirectoryInput{ + DirectoryName: folderPath, + }) + if err != nil { + return err + } + } + + return nil +} + +func abortMpu(c ObjectsClient, ctx context.Context, input *AbortMpuInput) error { + reqInput := client.RequestInput{ + Method: http.MethodPost, + Path: input.PartsDirectoryPath + "/abort", + Headers: &http.Header{}, + Body: nil, + } + respBody, _, err := c.client.ExecuteRequestStorage(ctx, reqInput) + if err != nil { + return errors.Wrap(err, "unable to abort mpu") + } + + if respBody != nil { + defer respBody.Close() + } + + return nil +} + +func commitMpu(c ObjectsClient, ctx context.Context, input *CommitMpuInput) error { + headers := &http.Header{} + for key, value := range input.Headers { + headers.Set(key, value) + } + + // The mpu directory prefix length is derived from the final character + // in the mpu identifier which we'll call P. The mpu prefix itself is + // the first P characters of the mpu identifier. In order to derive the + // correct directory structure we need to parse this information from + // the mpu identifier + id := input.Id + idLength := len(id) + prefixLen, err := strconv.Atoi(id[idLength-1 : idLength]) + if err != nil { + return errors.Wrap(err, "unable to commit mpu due to invalid mpu prefix length") + } + prefix := id[:prefixLen] + partPath := "/" + c.client.AccountName + "/uploads/" + prefix + "/" + input.Id + "/commit" + + reqInput := client.RequestInput{ + Method: http.MethodPost, + Path: partPath, + Headers: headers, + Body: input.Body, + } + respBody, _, err := c.client.ExecuteRequestStorage(ctx, reqInput) + if err != nil { + return errors.Wrap(err, "unable to commit mpu") + } + + if respBody != nil { + defer respBody.Close() + } + + return nil +} + +func createMpu(c ObjectsClient, ctx context.Context, input *CreateMpuInput) (*CreateMpuOutput, error) { + absPath := absFileInput(c.client.AccountName, input.Body.ObjectPath) + + // Because some clients will be treating Manta like S3, they will + // include slashes in object names which we'll need to convert to + // directories + if input.ForceInsert { + absDirName := _AbsCleanPath(path.Dir(string(absPath))) + exists, _ := checkDirectoryTreeExists(c, ctx, absDirName) + if !exists { + err := createDirectory(c, ctx, absDirName) + if err != nil { + return nil, errors.Wrap(err, "unable to create directory for create mpu operation") + } + } + } + headers := &http.Header{} + for key, value := range input.Body.Headers { + headers.Set(key, value) + } + if input.DurabilityLevel != 0 { + headers.Set("Durability-Level", strconv.FormatUint(input.DurabilityLevel, 10)) + } + if input.ContentLength != 0 { + headers.Set("Content-Length", strconv.FormatUint(input.ContentLength, 10)) + } + if input.ContentMD5 != "" { + headers.Set("Content-MD5", input.ContentMD5) + } + + input.Body.ObjectPath = string(absPath) + reqInput := client.RequestInput{ + Method: http.MethodPost, + Path: "/" + c.client.AccountName + "/uploads", + Headers: headers, + Body: input.Body, + } + respBody, _, err := c.client.ExecuteRequestStorage(ctx, reqInput) + if err != nil { + return nil, errors.Wrap(err, "unable to create mpu") + } + if respBody != nil { + defer respBody.Close() + } + + response := &CreateMpuOutput{} + decoder := json.NewDecoder(respBody) + if err = decoder.Decode(&response); err != nil { + return nil, errors.Wrap(err, "unable to decode create mpu response") + } + + return response, nil +} + +func getMpu(c ObjectsClient, ctx context.Context, input *GetMpuInput) (*GetMpuOutput, error) { + headers := &http.Header{} + + reqInput := client.RequestInput{ + Method: http.MethodGet, + Path: input.PartsDirectoryPath + "/state", + Headers: headers, + } + respBody, _, err := c.client.ExecuteRequestStorage(ctx, reqInput) + if err != nil { + return nil, errors.Wrap(err, "unable to get mpu") + } + + response := &GetMpuOutput{} + decoder := json.NewDecoder(respBody) + if err = decoder.Decode(&response); err != nil { + return nil, errors.Wrap(err, "unable to decode get mpu response") + } + + return response, nil +} + +func listMpuParts(c ObjectsClient, ctx context.Context, input *ListMpuPartsInput) (*ListMpuPartsOutput, error) { + id := input.Id + idLength := len(id) + prefixLen, err := strconv.Atoi(id[idLength-1 : idLength]) + if err != nil { + return nil, errors.Wrap(err, "unable to upload part") + } + prefix := id[:prefixLen] + partPath := "/" + c.client.AccountName + "/uploads/" + prefix + "/" + input.Id + "/" + listDirInput := ListDirectoryInput{ + DirectoryName: partPath, + } + + dirClient := &DirectoryClient{ + client: c.client, + } + + listDirOutput, err := dirClient.List(ctx, &listDirInput) + if err != nil { + return nil, errors.Wrap(err, "unable to list mpu parts") + } + + var parts []ListMpuPart + for num, part := range listDirOutput.Entries { + parts = append(parts, ListMpuPart{ + ETag: part.ETag, + PartNumber: num, + Size: int64(part.Size), + }) + } + + listMpuPartsOutput := &ListMpuPartsOutput{ + Parts: parts, + } + + return listMpuPartsOutput, nil +} + +func uploadPart(c ObjectsClient, ctx context.Context, input *UploadPartInput) (*UploadPartOutput, error) { + headers := &http.Header{} + for key, value := range input.Headers { + headers.Set(key, value) + } + + if input.ContentMD5 != "" { + headers.Set("Content-MD5", input.ContentMD5) + } + + // The mpu directory prefix length is derived from the final character + // in the mpu identifier which we'll call P. The mpu prefix itself is + // the first P characters of the mpu identifier. In order to derive the + // correct directory structure we need to parse this information from + // the mpu identifier + id := input.Id + idLength := len(id) + partNum := strconv.FormatUint(input.PartNum, 10) + prefixLen, err := strconv.Atoi(id[idLength-1 : idLength]) + if err != nil { + return nil, errors.Wrap(err, "unable to upload part due to invalid mpu prefix length") + } + prefix := id[:prefixLen] + partPath := "/" + c.client.AccountName + "/uploads/" + prefix + "/" + input.Id + "/" + partNum + + reqInput := client.RequestNoEncodeInput{ + Method: http.MethodPut, + Path: partPath, + Headers: headers, + Body: input.ObjectReader, + } + respBody, respHeader, err := c.client.ExecuteRequestNoEncode(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return nil, errors.Wrap(err, "unable to upload part") + } + + uploadPartOutput := &UploadPartOutput{ + Part: respHeader.Get("Etag"), + } + return uploadPartOutput, nil +} + +func checkDirectoryTreeExists(c ObjectsClient, ctx context.Context, absPath _AbsCleanPath) (bool, error) { + exists, err := c.IsDir(ctx, string(absPath)) + if err != nil { + if tt.IsResourceNotFoundError(err) || tt.IsStatusNotFoundCode(err) { + return false, nil + } + return false, err + } + if exists { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/joyent/triton-go/storage/signing.go b/vendor/github.com/joyent/triton-go/storage/signing.go new file mode 100644 index 00000000..81111c32 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/storage/signing.go @@ -0,0 +1,90 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package storage + +import ( + "bytes" + "fmt" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" +) + +// SignURLInput represents parameters to a SignURL operation. +type SignURLInput struct { + ValidityPeriod time.Duration + Method string + ObjectPath string +} + +// SignURLOutput contains the outputs of a SignURL operation. To simply +// access the signed URL, use the SignedURL method. +type SignURLOutput struct { + host string + objectPath string + Method string + Algorithm string + Signature string + Expires string + KeyID string +} + +// SignedURL returns a signed URL for the given scheme. Valid schemes are +// `http` and `https`. +func (output *SignURLOutput) SignedURL(scheme string) string { + query := &url.Values{} + query.Set("algorithm", output.Algorithm) + query.Set("expires", output.Expires) + query.Set("keyId", output.KeyID) + query.Set("signature", output.Signature) + + sUrl := url.URL{} + sUrl.Scheme = scheme + sUrl.Host = output.host + sUrl.Path = output.objectPath + sUrl.RawQuery = query.Encode() + + return sUrl.String() +} + +// SignURL creates a time-expiring URL that can be shared with others. +// This is useful to generate HTML links, for example. +func (s *StorageClient) SignURL(input *SignURLInput) (*SignURLOutput, error) { + output := &SignURLOutput{ + host: s.Client.MantaURL.Host, + objectPath: fmt.Sprintf("/%s%s", s.Client.AccountName, input.ObjectPath), + Method: input.Method, + Algorithm: strings.ToUpper(s.Client.Authorizers[0].DefaultAlgorithm()), + Expires: strconv.FormatInt(time.Now().Add(input.ValidityPeriod).Unix(), 10), + KeyID: path.Join("/", s.Client.AccountName, "keys", s.Client.Authorizers[0].KeyFingerprint()), + } + + toSign := bytes.Buffer{} + toSign.WriteString(input.Method + "\n") + toSign.WriteString(s.Client.MantaURL.Host + "\n") + toSign.WriteString(fmt.Sprintf("/%s%s\n", s.Client.AccountName, input.ObjectPath)) + + query := &url.Values{} + query.Set("algorithm", output.Algorithm) + query.Set("expires", output.Expires) + query.Set("keyId", output.KeyID) + toSign.WriteString(query.Encode()) + + signature, _, err := s.Client.Authorizers[0].SignRaw(toSign.String()) + if err != nil { + return nil, errors.Wrapf(err, "error signing string") + } + + output.Signature = signature + return output, nil +} diff --git a/vendor/github.com/joyent/triton-go/storage/snaplink.go b/vendor/github.com/joyent/triton-go/storage/snaplink.go new file mode 100644 index 00000000..ec8510a6 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/storage/snaplink.go @@ -0,0 +1,54 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package storage + +import ( + "context" + "fmt" + "net/http" + + "github.com/joyent/triton-go/client" + "github.com/pkg/errors" +) + +type SnapLinksClient struct { + client *client.Client +} + +// PutSnapLinkInput represents parameters to a PutSnapLink operation. +type PutSnapLinkInput struct { + LinkPath string + SourcePath string +} + +// PutSnapLink creates a SnapLink to an object. +func (s *SnapLinksClient) Put(ctx context.Context, input *PutSnapLinkInput) error { + linkPath := fmt.Sprintf("/%s%s", s.client.AccountName, input.LinkPath) + sourcePath := fmt.Sprintf("/%s%s", s.client.AccountName, input.SourcePath) + headers := &http.Header{} + headers.Set("Content-Type", "application/json; type=link") + headers.Set("location", sourcePath) + headers.Set("Accept", "~1.0") + headers.Set("Accept-Version", "application/json, */*") + + reqInput := client.RequestInput{ + Method: http.MethodPut, + Path: linkPath, + Headers: headers, + } + respBody, _, err := s.client.ExecuteRequestStorage(ctx, reqInput) + if respBody != nil { + defer respBody.Close() + } + if err != nil { + return errors.Wrapf(err, "unable to put snaplink") + } + + return nil +} diff --git a/vendor/github.com/joyent/triton-go/triton.go b/vendor/github.com/joyent/triton-go/triton.go new file mode 100644 index 00000000..f499e9a6 --- /dev/null +++ b/vendor/github.com/joyent/triton-go/triton.go @@ -0,0 +1,46 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package triton + +import ( + "os" + + "github.com/joyent/triton-go/authentication" +) + +// Universal package used for defining configuration used across all client +// constructors. + +// ClientConfig is a placeholder/input struct around the behavior of configuring +// a client constructor through the implementation's runtime environment +// (SDC/MANTA env vars). +type ClientConfig struct { + TritonURL string + MantaURL string + AccountName string + Username string + Signers []authentication.Signer +} + +var envPrefixes = []string{"TRITON", "SDC"} + +// GetEnv looks up environment variables using the preferred "TRITON" prefix, +// but falls back to the retired "SDC" prefix. For example, looking up "USER" +// will search for "TRITON_USER" followed by "SDC_USER". If the environment +// variable is not set, an empty string is returned. GetEnv() is used to aid in +// the transition and deprecation of the "SDC_*" environment variables. +func GetEnv(name string) string { + for _, prefix := range envPrefixes { + if val, found := os.LookupEnv(prefix + "_" + name); found { + return val + } + } + + return "" +} diff --git a/vendor/github.com/joyent/triton-go/version.go b/vendor/github.com/joyent/triton-go/version.go new file mode 100644 index 00000000..5fc6091b --- /dev/null +++ b/vendor/github.com/joyent/triton-go/version.go @@ -0,0 +1,42 @@ +// +// Copyright (c) 2018, Joyent, Inc. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at http://mozilla.org/MPL/2.0/. +// + +package triton + +import ( + "fmt" + "runtime" +) + +// Version represents main version number of the current release +// of the Triton-go SDK. +const Version = "1.7.0" + +// Prerelease adds a pre-release marker to the version. +// +// If this is "" (empty string) then it means that it is a final release. +// Otherwise, this is a pre-release such as "dev" (in development), "beta", +// "rc1", etc. +var Prerelease = "" + +// UserAgent returns a Triton-go characteristic string that allows the +// network protocol peers to identify the version, release and runtime +// of the Triton-go client from which the requests originate. +func UserAgent() string { + if Prerelease != "" { + return fmt.Sprintf("triton-go/%s-%s (%s-%s; %s)", Version, Prerelease, + runtime.GOARCH, runtime.GOOS, runtime.Version()) + } + + return fmt.Sprintf("triton-go/%s (%s-%s; %s)", Version, runtime.GOARCH, + runtime.GOOS, runtime.Version()) +} + +// CloudAPIMajorVersion specifies the CloudAPI version compatibility +// for current release of the Triton-go SDK. +const CloudAPIMajorVersion = "8" diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 00000000..c8a9fbb3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 00000000..313a0f88 --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 00000000..2cf4f5ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000..50d56ffb --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,87 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) +* [Oleg Shaldybin](https://github.com/olegshaldybin) +* [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 00000000..92d2cc4a --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 00000000..f6b8aeab --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 00000000..0449e9aa --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 00000000..9452324a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 00000000..35fdb094 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 00000000..1b56f399 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 00000000..c440d72b --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 00000000..1d859eac --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 00000000..d04cb54c --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 00000000..9d1e901a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 00000000..c44ef5c9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 00000000..a4b93c78 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 00000000..656bbd33 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 00000000..7df2fce3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 00000000..b45ef688 --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 00000000..8c58fcba --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 00000000..3095662b --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod new file mode 100644 index 00000000..e05c42ff --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.mod @@ -0,0 +1,11 @@ +module github.com/json-iterator/go + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/gofuzz v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum new file mode 100644 index 00000000..d778b5a1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 00000000..95ae54fb --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,322 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 00000000..6188cb45 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,58 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + return false + } + return true + } + return true + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 00000000..b9754638 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,339 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 00000000..21423203 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,345 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 00000000..1c575767 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,251 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 00000000..e91eefb1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 00000000..8fcdc3b6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,144 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + case ']': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + case '}': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 00000000..6cf66d04 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 00000000..adc487ea --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 00000000..c2934f91 --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 00000000..e2389b56 --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 00000000..4459e203 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,332 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 00000000..13a0b7b0 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 00000000..8b6bc8b4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 00000000..05e8fbf1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + tagParts := strings.Split(tag, ",") + if tag == "-" { + continue + } + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 00000000..98d45c1e --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 00000000..f2619936 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,60 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 00000000..547b4421 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,338 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + if c != '"' { + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + subStream.buf = make([]byte, 0, 64) + key, elem := mapIter.UnsafeNext() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer() + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer(), + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 00000000..fea50719 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,217 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + bytes, err := json.Marshal(obj) + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 00000000..f88722d1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 00000000..43ec71d6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,133 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { + return &dereferenceDecoder{elemType, decoder} + } + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 00000000..9441d79d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 00000000..932641ac --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1048 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 00000000..d0759cf6 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 00000000..17662fde --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + n, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[n:] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) + stream.Flush() +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 00000000..826aa594 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 00000000..d1059ee4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 00000000..54c2ba0b --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML " that closes the next token. If + // non-empty, the subsequent call to Next will return a raw or RCDATA text + // token: one that treats "

" as text instead of an element. + // rawTag's contents are lower-cased. + rawTag string + // textIsRaw is whether the current text token's data is not escaped. + textIsRaw bool + // convertNUL is whether NUL bytes in the current token's data should + // be converted into \ufffd replacement characters. + convertNUL bool + // allowCDATA is whether CDATA sections are allowed in the current context. + allowCDATA bool +} + +// AllowCDATA sets whether or not the tokenizer recognizes as +// the text "foo". The default value is false, which means to recognize it as +// a bogus comment "" instead. +// +// Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and +// only if tokenizing foreign content, such as MathML and SVG. However, +// tracking foreign-contentness is difficult to do purely in the tokenizer, +// as opposed to the parser, due to HTML integration points: an element +// can contain a that is foreign-to-SVG but not foreign-to- +// HTML. For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call AllowCDATA as appropriate. +// In practice, if using the tokenizer without caring whether MathML or SVG +// CDATA is text or comments, such as tokenizing HTML to find all the anchor +// text, it is acceptable to ignore this responsibility. +func (z *Tokenizer) AllowCDATA(allowCDATA bool) { + z.allowCDATA = allowCDATA +} + +// NextIsNotRawText instructs the tokenizer that the next token should not be +// considered as 'raw text'. Some elements, such as script and title elements, +// normally require the next token after the opening tag to be 'raw text' that +// has no child elements. For example, tokenizing "a<b>c</b>d" +// yields a start tag token for "", a text token for "a<b>c</b>d", and +// an end tag token for "". There are no distinct start tag or end tag +// tokens for the "" and "". +// +// This tokenizer implementation will generally look for raw text at the right +// times. Strictly speaking, an HTML5 compliant tokenizer should not look for +// raw text if in foreign content: generally needs raw text, but a +// <title> inside an <svg> does not. Another example is that a <textarea> +// generally needs raw text, but a <textarea> is not allowed as an immediate +// child of a <select>; in normal parsing, a <textarea> implies </select>, but +// one cannot close the implicit element when parsing a <select>'s InnerHTML. +// Similarly to AllowCDATA, tracking the correct moment to override raw-text- +// ness is difficult to do purely in the tokenizer, as opposed to the parser. +// For strict compliance with the HTML5 tokenization algorithm, it is the +// responsibility of the user of a tokenizer to call NextIsNotRawText as +// appropriate. In practice, like AllowCDATA, it is acceptable to ignore this +// responsibility for basic usage. +// +// Note that this 'raw text' concept is different from the one offered by the +// Tokenizer.Raw method. +func (z *Tokenizer) NextIsNotRawText() { + z.rawTag = "" +} + +// Err returns the error associated with the most recent ErrorToken token. +// This is typically io.EOF, meaning the end of tokenization. +func (z *Tokenizer) Err() error { + if z.tt != ErrorToken { + return nil + } + return z.err +} + +// readByte returns the next byte from the input stream, doing a buffered read +// from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte +// slice that holds all the bytes read so far for the current token. +// It sets z.err if the underlying reader returns an error. +// Pre-condition: z.err == nil. +func (z *Tokenizer) readByte() byte { + if z.raw.end >= len(z.buf) { + // Our buffer is exhausted and we have to read from z.r. Check if the + // previous read resulted in an error. + if z.readErr != nil { + z.err = z.readErr + return 0 + } + // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length + // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we + // allocate a new buffer before the copy. + c := cap(z.buf) + d := z.raw.end - z.raw.start + var buf1 []byte + if 2*d > c { + buf1 = make([]byte, d, 2*c) + } else { + buf1 = z.buf[:d] + } + copy(buf1, z.buf[z.raw.start:z.raw.end]) + if x := z.raw.start; x != 0 { + // Adjust the data/attr spans to refer to the same contents after the copy. + z.data.start -= x + z.data.end -= x + z.pendingAttr[0].start -= x + z.pendingAttr[0].end -= x + z.pendingAttr[1].start -= x + z.pendingAttr[1].end -= x + for i := range z.attr { + z.attr[i][0].start -= x + z.attr[i][0].end -= x + z.attr[i][1].start -= x + z.attr[i][1].end -= x + } + } + z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d] + // Now that we have copied the live bytes to the start of the buffer, + // we read from z.r into the remainder. + var n int + n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)]) + if n == 0 { + z.err = z.readErr + return 0 + } + z.buf = buf1[:d+n] + } + x := z.buf[z.raw.end] + z.raw.end++ + if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf { + z.err = ErrBufferExceeded + return 0 + } + return x +} + +// Buffered returns a slice containing data buffered but not yet tokenized. +func (z *Tokenizer) Buffered() []byte { + return z.buf[z.raw.end:] +} + +// readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil). +// It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil) +// too many times in succession. +func readAtLeastOneByte(r io.Reader, b []byte) (int, error) { + for i := 0; i < 100; i++ { + n, err := r.Read(b) + if n != 0 || err != nil { + return n, err + } + } + return 0, io.ErrNoProgress +} + +// skipWhiteSpace skips past any white space. +func (z *Tokenizer) skipWhiteSpace() { + if z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + // No-op. + default: + z.raw.end-- + return + } + } +} + +// readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and +// is typically something like "script" or "textarea". +func (z *Tokenizer) readRawOrRCDATA() { + if z.rawTag == "script" { + z.readScript() + z.textIsRaw = true + z.rawTag = "" + return + } +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + c = z.readByte() + if z.err != nil { + break loop + } + if c != '/' { + continue loop + } + if z.readRawEndTag() || z.err != nil { + break loop + } + } + z.data.end = z.raw.end + // A textarea's or title's RCDATA can contain escaped entities. + z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title" + z.rawTag = "" +} + +// readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag. +// If it succeeds, it backs up the input position to reconsume the tag and +// returns true. Otherwise it returns false. The opening "</" has already been +// consumed. +func (z *Tokenizer) readRawEndTag() bool { + for i := 0; i < len(z.rawTag); i++ { + c := z.readByte() + if z.err != nil { + return false + } + if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') { + z.raw.end-- + return false + } + } + c := z.readByte() + if z.err != nil { + return false + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + // The 3 is 2 for the leading "</" plus 1 for the trailing character c. + z.raw.end -= 3 + len(z.rawTag) + return true + } + z.raw.end-- + return false +} + +// readScript reads until the next </script> tag, following the byzantine +// rules for escaping/hiding the closing tag. +func (z *Tokenizer) readScript() { + defer func() { + z.data.end = z.raw.end + }() + var c byte + +scriptData: + c = z.readByte() + if z.err != nil { + return + } + if c == '<' { + goto scriptDataLessThanSign + } + goto scriptData + +scriptDataLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '/': + goto scriptDataEndTagOpen + case '!': + goto scriptDataEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptData + +scriptDataEscapeStart: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapeStartDash + } + z.raw.end-- + goto scriptData + +scriptDataEscapeStartDash: + c = z.readByte() + if z.err != nil { + return + } + if c == '-' { + goto scriptDataEscapedDashDash + } + z.raw.end-- + goto scriptData + +scriptDataEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + } + goto scriptDataEscaped + +scriptDataEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataEscapedDashDash + case '<': + goto scriptDataEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataEscaped + +scriptDataEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataEscapedEndTagOpen + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + goto scriptDataDoubleEscapeStart + } + z.raw.end-- + goto scriptData + +scriptDataEscapedEndTagOpen: + if z.readRawEndTag() || z.err != nil { + return + } + goto scriptDataEscaped + +scriptDataDoubleEscapeStart: + z.raw.end-- + for i := 0; i < len("script"); i++ { + c = z.readByte() + if z.err != nil { + return + } + if c != "script"[i] && c != "SCRIPT"[i] { + z.raw.end-- + goto scriptDataEscaped + } + } + c = z.readByte() + if z.err != nil { + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/', '>': + goto scriptDataDoubleEscaped + } + z.raw.end-- + goto scriptDataEscaped + +scriptDataDoubleEscaped: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedDashDash: + c = z.readByte() + if z.err != nil { + return + } + switch c { + case '-': + goto scriptDataDoubleEscapedDashDash + case '<': + goto scriptDataDoubleEscapedLessThanSign + case '>': + goto scriptData + } + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapedLessThanSign: + c = z.readByte() + if z.err != nil { + return + } + if c == '/' { + goto scriptDataDoubleEscapeEnd + } + z.raw.end-- + goto scriptDataDoubleEscaped + +scriptDataDoubleEscapeEnd: + if z.readRawEndTag() { + z.raw.end += len("</script>") + goto scriptDataEscaped + } + if z.err != nil { + return + } + goto scriptDataDoubleEscaped +} + +// readComment reads the next comment token starting with "<!--". The opening +// "<!--" has already been consumed. +func (z *Tokenizer) readComment() { + z.data.start = z.raw.end + defer func() { + if z.data.end < z.data.start { + // It's a comment with no data, like <!-->. + z.data.end = z.data.start + } + }() + for dashCount := 2; ; { + c := z.readByte() + if z.err != nil { + // Ignore up to two dashes at EOF. + if dashCount > 2 { + dashCount = 2 + } + z.data.end = z.raw.end - dashCount + return + } + switch c { + case '-': + dashCount++ + continue + case '>': + if dashCount >= 2 { + z.data.end = z.raw.end - len("-->") + return + } + case '!': + if dashCount >= 2 { + c = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len("--!>") + return + } + } + } + dashCount = 0 + } +} + +// readUntilCloseAngle reads until the next ">". +func (z *Tokenizer) readUntilCloseAngle() { + z.data.start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + if c == '>' { + z.data.end = z.raw.end - len(">") + return + } + } +} + +// readMarkupDeclaration reads the next token starting with "<!". It might be +// a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or +// "<!a bogus comment". The opening "<!" has already been consumed. +func (z *Tokenizer) readMarkupDeclaration() TokenType { + z.data.start = z.raw.end + var c [2]byte + for i := 0; i < 2; i++ { + c[i] = z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return CommentToken + } + } + if c[0] == '-' && c[1] == '-' { + z.readComment() + return CommentToken + } + z.raw.end -= 2 + if z.readDoctype() { + return DoctypeToken + } + if z.allowCDATA && z.readCDATA() { + z.convertNUL = true + return TextToken + } + // It's a bogus comment. + z.readUntilCloseAngle() + return CommentToken +} + +// readDoctype attempts to read a doctype declaration and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readDoctype() bool { + const s = "DOCTYPE" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] && c != s[i]+('a'-'A') { + // Back up to read the fragment of "DOCTYPE" again. + z.raw.end = z.data.start + return false + } + } + if z.skipWhiteSpace(); z.err != nil { + z.data.start = z.raw.end + z.data.end = z.raw.end + return true + } + z.readUntilCloseAngle() + return true +} + +// readCDATA attempts to read a CDATA section and returns true if +// successful. The opening "<!" has already been consumed. +func (z *Tokenizer) readCDATA() bool { + const s = "[CDATA[" + for i := 0; i < len(s); i++ { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return false + } + if c != s[i] { + // Back up to read the fragment of "[CDATA[" again. + z.raw.end = z.data.start + return false + } + } + z.data.start = z.raw.end + brackets := 0 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return true + } + switch c { + case ']': + brackets++ + case '>': + if brackets >= 2 { + z.data.end = z.raw.end - len("]]>") + return true + } + brackets = 0 + default: + brackets = 0 + } + } +} + +// startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end] +// case-insensitively matches any element of ss. +func (z *Tokenizer) startTagIn(ss ...string) bool { +loop: + for _, s := range ss { + if z.data.end-z.data.start != len(s) { + continue loop + } + for i := 0; i < len(s); i++ { + c := z.buf[z.data.start+i] + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + if c != s[i] { + continue loop + } + } + return true + } + return false +} + +// readStartTag reads the next start tag token. The opening "<a" has already +// been consumed, where 'a' means anything in [A-Za-z]. +func (z *Tokenizer) readStartTag() TokenType { + z.readTag(true) + if z.err != nil { + return ErrorToken + } + // Several tags flag the tokenizer's next token as raw. + c, raw := z.buf[z.data.start], false + if 'A' <= c && c <= 'Z' { + c += 'a' - 'A' + } + switch c { + case 'i': + raw = z.startTagIn("iframe") + case 'n': + raw = z.startTagIn("noembed", "noframes", "noscript") + case 'p': + raw = z.startTagIn("plaintext") + case 's': + raw = z.startTagIn("script", "style") + case 't': + raw = z.startTagIn("textarea", "title") + case 'x': + raw = z.startTagIn("xmp") + } + if raw { + z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) + } + // Look for a self-closing token like "<br/>". + if z.err == nil && z.buf[z.raw.end-2] == '/' { + return SelfClosingTagToken + } + return StartTagToken +} + +// readTag reads the next tag token and its attributes. If saveAttr, those +// attributes are saved in z.attr, otherwise z.attr is set to an empty slice. +// The opening "<a" or "</a" has already been consumed, where 'a' means anything +// in [A-Za-z]. +func (z *Tokenizer) readTag(saveAttr bool) { + z.attr = z.attr[:0] + z.nAttrReturned = 0 + // Read the tag name and attribute key/value pairs. + z.readTagName() + if z.skipWhiteSpace(); z.err != nil { + return + } + for { + c := z.readByte() + if z.err != nil || c == '>' { + break + } + z.raw.end-- + z.readTagAttrKey() + z.readTagAttrVal() + // Save pendingAttr if saveAttr and that attribute has a non-empty key. + if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end { + z.attr = append(z.attr, z.pendingAttr) + } + if z.skipWhiteSpace(); z.err != nil { + break + } + } +} + +// readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end) +// is positioned such that the first byte of the tag name (the "d" in "<div") +// has already been consumed. +func (z *Tokenizer) readTagName() { + z.data.start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.data.end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.data.end = z.raw.end - 1 + return + case '/', '>': + z.raw.end-- + z.data.end = z.raw.end + return + } + } +} + +// readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>". +// Precondition: z.err == nil. +func (z *Tokenizer) readTagAttrKey() { + z.pendingAttr[0].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[0].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f', '/': + z.pendingAttr[0].end = z.raw.end - 1 + return + case '=', '>': + z.raw.end-- + z.pendingAttr[0].end = z.raw.end + return + } + } +} + +// readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>". +func (z *Tokenizer) readTagAttrVal() { + z.pendingAttr[1].start = z.raw.end + z.pendingAttr[1].end = z.raw.end + if z.skipWhiteSpace(); z.err != nil { + return + } + c := z.readByte() + if z.err != nil { + return + } + if c != '=' { + z.raw.end-- + return + } + if z.skipWhiteSpace(); z.err != nil { + return + } + quote := z.readByte() + if z.err != nil { + return + } + switch quote { + case '>': + z.raw.end-- + return + + case '\'', '"': + z.pendingAttr[1].start = z.raw.end + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + if c == quote { + z.pendingAttr[1].end = z.raw.end - 1 + return + } + } + + default: + z.pendingAttr[1].start = z.raw.end - 1 + for { + c := z.readByte() + if z.err != nil { + z.pendingAttr[1].end = z.raw.end + return + } + switch c { + case ' ', '\n', '\r', '\t', '\f': + z.pendingAttr[1].end = z.raw.end - 1 + return + case '>': + z.raw.end-- + z.pendingAttr[1].end = z.raw.end + return + } + } + } +} + +// Next scans the next token and returns its type. +func (z *Tokenizer) Next() TokenType { + z.raw.start = z.raw.end + z.data.start = z.raw.end + z.data.end = z.raw.end + if z.err != nil { + z.tt = ErrorToken + return z.tt + } + if z.rawTag != "" { + if z.rawTag == "plaintext" { + // Read everything up to EOF. + for z.err == nil { + z.readByte() + } + z.data.end = z.raw.end + z.textIsRaw = true + } else { + z.readRawOrRCDATA() + } + if z.data.end > z.data.start { + z.tt = TextToken + z.convertNUL = true + return z.tt + } + } + z.textIsRaw = false + z.convertNUL = false + +loop: + for { + c := z.readByte() + if z.err != nil { + break loop + } + if c != '<' { + continue loop + } + + // Check if the '<' we have just read is part of a tag, comment + // or doctype. If not, it's part of the accumulated text token. + c = z.readByte() + if z.err != nil { + break loop + } + var tokenType TokenType + switch { + case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z': + tokenType = StartTagToken + case c == '/': + tokenType = EndTagToken + case c == '!' || c == '?': + // We use CommentToken to mean any of "<!--actual comments-->", + // "<!DOCTYPE declarations>" and "<?xml processing instructions?>". + tokenType = CommentToken + default: + // Reconsume the current character. + z.raw.end-- + continue + } + + // We have a non-text token, but we might have accumulated some text + // before that. If so, we return the text first, and return the non- + // text token on the subsequent call to Next. + if x := z.raw.end - len("<a"); z.raw.start < x { + z.raw.end = x + z.data.end = x + z.tt = TextToken + return z.tt + } + switch tokenType { + case StartTagToken: + z.tt = z.readStartTag() + return z.tt + case EndTagToken: + c = z.readByte() + if z.err != nil { + break loop + } + if c == '>' { + // "</>" does not generate a token at all. Generate an empty comment + // to allow passthrough clients to pick up the data using Raw. + // Reset the tokenizer state and start again. + z.tt = CommentToken + return z.tt + } + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' { + z.readTag(false) + if z.err != nil { + z.tt = ErrorToken + } else { + z.tt = EndTagToken + } + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + case CommentToken: + if c == '!' { + z.tt = z.readMarkupDeclaration() + return z.tt + } + z.raw.end-- + z.readUntilCloseAngle() + z.tt = CommentToken + return z.tt + } + } + if z.raw.start < z.raw.end { + z.data.end = z.raw.end + z.tt = TextToken + return z.tt + } + z.tt = ErrorToken + return z.tt +} + +// Raw returns the unmodified text of the current token. Calling Next, Token, +// Text, TagName or TagAttr may change the contents of the returned slice. +func (z *Tokenizer) Raw() []byte { + return z.buf[z.raw.start:z.raw.end] +} + +// convertNewlines converts "\r" and "\r\n" in s to "\n". +// The conversion happens in place, but the resulting slice may be shorter. +func convertNewlines(s []byte) []byte { + for i, c := range s { + if c != '\r' { + continue + } + + src := i + 1 + if src >= len(s) || s[src] != '\n' { + s[i] = '\n' + continue + } + + dst := i + for src < len(s) { + if s[src] == '\r' { + if src+1 < len(s) && s[src+1] == '\n' { + src++ + } + s[dst] = '\n' + } else { + s[dst] = s[src] + } + src++ + dst++ + } + return s[:dst] + } + return s +} + +var ( + nul = []byte("\x00") + replacement = []byte("\ufffd") +) + +// Text returns the unescaped text of a text, comment or doctype token. The +// contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) Text() []byte { + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + s = convertNewlines(s) + if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) { + s = bytes.Replace(s, nul, replacement, -1) + } + if !z.textIsRaw { + s = unescape(s, false) + } + return s + } + return nil +} + +// TagName returns the lower-cased name of a tag token (the `img` out of +// `<IMG SRC="foo">`) and whether the tag has attributes. +// The contents of the returned slice may change on the next call to Next. +func (z *Tokenizer) TagName() (name []byte, hasAttr bool) { + if z.data.start < z.data.end { + switch z.tt { + case StartTagToken, EndTagToken, SelfClosingTagToken: + s := z.buf[z.data.start:z.data.end] + z.data.start = z.raw.end + z.data.end = z.raw.end + return lower(s), z.nAttrReturned < len(z.attr) + } + } + return nil, false +} + +// TagAttr returns the lower-cased key and unescaped value of the next unparsed +// attribute for the current tag token and whether there are more attributes. +// The contents of the returned slices may change on the next call to Next. +func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { + if z.nAttrReturned < len(z.attr) { + switch z.tt { + case StartTagToken, SelfClosingTagToken: + x := z.attr[z.nAttrReturned] + z.nAttrReturned++ + key = z.buf[x[0].start:x[0].end] + val = z.buf[x[1].start:x[1].end] + return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr) + } + } + return nil, nil, false +} + +// Token returns the current Token. The result's Data and Attr values remain +// valid after subsequent Next calls. +func (z *Tokenizer) Token() Token { + t := Token{Type: z.tt} + switch z.tt { + case TextToken, CommentToken, DoctypeToken: + t.Data = string(z.Text()) + case StartTagToken, SelfClosingTagToken, EndTagToken: + name, moreAttr := z.TagName() + for moreAttr { + var key, val []byte + key, val, moreAttr = z.TagAttr() + t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)}) + } + if a := atom.Lookup(name); a != 0 { + t.DataAtom, t.Data = a, a.String() + } else { + t.DataAtom, t.Data = 0, string(name) + } + } + return t +} + +// SetMaxBuf sets a limit on the amount of data buffered during tokenization. +// A value of 0 means unlimited. +func (z *Tokenizer) SetMaxBuf(n int) { + z.maxBuf = n +} + +// NewTokenizer returns a new HTML Tokenizer for the given Reader. +// The input is assumed to be UTF-8 encoded. +func NewTokenizer(r io.Reader) *Tokenizer { + return NewTokenizerFragment(r, "") +} + +// NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for +// tokenizing an existing element's InnerHTML fragment. contextTag is that +// element's tag, such as "div" or "iframe". +// +// For example, how the InnerHTML "a<b" is tokenized depends on whether it is +// for a <p> tag or a <script> tag. +// +// The input is assumed to be UTF-8 encoded. +func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer { + z := &Tokenizer{ + r: r, + buf: make([]byte, 0, 4096), + } + if contextTag != "" { + switch s := strings.ToLower(contextTag); s { + case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp": + z.rawTag = s + } + } + return z +} diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go new file mode 100644 index 00000000..e6cd0ced --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/guts.go @@ -0,0 +1,50 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpguts provides functions implementing various details +// of the HTTP specification. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httpguts + +import ( + "net/textproto" + "strings" +) + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See RFC 7230, Section 4.1.2 +func ValidTrailerHeader(name string) bool { + name = textproto.CanonicalMIMEHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} diff --git a/vendor/golang.org/x/net/http/httpguts/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go new file mode 100644 index 00000000..e7de24ee --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -0,0 +1,346 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpguts + +import ( + "net" + "strings" + "unicode/utf8" + + "golang.org/x/net/idna" +) + +var isTokenTable = [127]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +func IsTokenRune(r rune) bool { + i := int(r) + return i < len(isTokenTable) && isTokenTable[i] +} + +func isNotToken(r rune) bool { + return !IsTokenRune(r) +} + +// HeaderValuesContainsToken reports whether any string in values +// contains the provided token, ASCII case-insensitively. +func HeaderValuesContainsToken(values []string, token string) bool { + for _, v := range values { + if headerValueContainsToken(v, token) { + return true + } + } + return false +} + +// isOWS reports whether b is an optional whitespace byte, as defined +// by RFC 7230 section 3.2.3. +func isOWS(b byte) bool { return b == ' ' || b == '\t' } + +// trimOWS returns x with all optional whitespace removes from the +// beginning and end. +func trimOWS(x string) string { + // TODO: consider using strings.Trim(x, " \t") instead, + // if and when it's fast enough. See issue 10292. + // But this ASCII-only code will probably always beat UTF-8 + // aware code. + for len(x) > 0 && isOWS(x[0]) { + x = x[1:] + } + for len(x) > 0 && isOWS(x[len(x)-1]) { + x = x[:len(x)-1] + } + return x +} + +// headerValueContainsToken reports whether v (assumed to be a +// 0#element, in the ABNF extension described in RFC 7230 section 7) +// contains token amongst its comma-separated tokens, ASCII +// case-insensitively. +func headerValueContainsToken(v string, token string) bool { + v = trimOWS(v) + if comma := strings.IndexByte(v, ','); comma != -1 { + return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) + } + return tokenEqual(v, token) +} + +// lowerASCII returns the ASCII lowercase version of b. +func lowerASCII(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. +func tokenEqual(t1, t2 string) bool { + if len(t1) != len(t2) { + return false + } + for i, b := range t1 { + if b >= utf8.RuneSelf { + // No UTF-8 or non-ASCII allowed in tokens. + return false + } + if lowerASCII(byte(b)) != lowerASCII(t2[i]) { + return false + } + } + return true +} + +// isLWS reports whether b is linear white space, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// LWS = [CRLF] 1*( SP | HT ) +func isLWS(b byte) bool { return b == ' ' || b == '\t' } + +// isCTL reports whether b is a control byte, according +// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 +// CTL = <any US-ASCII control character +// (octets 0 - 31) and DEL (127)> +func isCTL(b byte) bool { + const del = 0x7f // a CTL + return b < ' ' || b == del +} + +// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. +// HTTP/2 imposes the additional restriction that uppercase ASCII +// letters are not allowed. +// +// RFC 7230 says: +// header-field = field-name ":" OWS field-value OWS +// field-name = token +// token = 1*tchar +// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / +// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA +func ValidHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !IsTokenRune(r) { + return false + } + } + return true +} + +// ValidHostHeader reports whether h is a valid host header. +func ValidHostHeader(h string) bool { + // The latest spec is actually this: + // + // http://tools.ietf.org/html/rfc7230#section-5.4 + // Host = uri-host [ ":" port ] + // + // Where uri-host is: + // http://tools.ietf.org/html/rfc3986#section-3.2.2 + // + // But we're going to be much more lenient for now and just + // search for any byte that's not a valid byte in any of those + // expressions. + for i := 0; i < len(h); i++ { + if !validHostByte[h[i]] { + return false + } + } + return true +} + +// See the validHostHeader comment. +var validHostByte = [256]bool{ + '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, + '8': true, '9': true, + + 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, + 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, + 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, + 'y': true, 'z': true, + + 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, + 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, + 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, + 'Y': true, 'Z': true, + + '!': true, // sub-delims + '$': true, // sub-delims + '%': true, // pct-encoded (and used in IPv6 zones) + '&': true, // sub-delims + '(': true, // sub-delims + ')': true, // sub-delims + '*': true, // sub-delims + '+': true, // sub-delims + ',': true, // sub-delims + '-': true, // unreserved + '.': true, // unreserved + ':': true, // IPv6address + Host expression's optional port + ';': true, // sub-delims + '=': true, // sub-delims + '[': true, + '\'': true, // sub-delims + ']': true, + '_': true, // unreserved + '~': true, // unreserved +} + +// ValidHeaderFieldValue reports whether v is a valid "field-value" according to +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : +// +// message-header = field-name ":" [ field-value ] +// field-value = *( field-content | LWS ) +// field-content = <the OCTETs making up the field-value +// and consisting of either *TEXT or combinations +// of token, separators, and quoted-string> +// +// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : +// +// TEXT = <any OCTET except CTLs, +// but including LWS> +// LWS = [CRLF] 1*( SP | HT ) +// CTL = <any US-ASCII control character +// (octets 0 - 31) and DEL (127)> +// +// RFC 7230 says: +// field-value = *( field-content / obs-fold ) +// obj-fold = N/A to http2, and deprecated +// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +// field-vchar = VCHAR / obs-text +// obs-text = %x80-FF +// VCHAR = "any visible [USASCII] character" +// +// http2 further says: "Similarly, HTTP/2 allows header field values +// that are not valid. While most of the values that can be encoded +// will not alter header field parsing, carriage return (CR, ASCII +// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII +// 0x0) might be exploited by an attacker if they are translated +// verbatim. Any request or response that contains a character not +// permitted in a header field value MUST be treated as malformed +// (Section 8.1.2.6). Valid characters are defined by the +// field-content ABNF rule in Section 3.2 of [RFC7230]." +// +// This function does not (yet?) properly handle the rejection of +// strings that begin or end with SP or HTAB. +func ValidHeaderFieldValue(v string) bool { + for i := 0; i < len(v); i++ { + b := v[i] + if isCTL(b) && !isLWS(b) { + return false + } + } + return true +} + +func isASCII(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} + +// PunycodeHostPort returns the IDNA Punycode version +// of the provided "host" or "host:port" string. +func PunycodeHostPort(v string) (string, error) { + if isASCII(v) { + return v, nil + } + + host, port, err := net.SplitHostPort(v) + if err != nil { + // The input 'v' argument was just a "host" argument, + // without a port. This error should not be returned + // to the caller. + host = v + port = "" + } + host, err = idna.ToASCII(host) + if err != nil { + // Non-UTF-8? Not representable in Punycode, in any + // case. + return "", err + } + if port == "" { + return host, nil + } + return net.JoinHostPort(host, port), nil +} diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile new file mode 100644 index 00000000..53fc5257 --- /dev/null +++ b/vendor/golang.org/x/net/http2/Dockerfile @@ -0,0 +1,51 @@ +# +# This Dockerfile builds a recent curl with HTTP/2 client support, using +# a recent nghttp2 build. +# +# See the Makefile for how to tag it. If Docker and that image is found, the +# Go tests use this curl binary for integration tests. +# + +FROM ubuntu:trusty + +RUN apt-get update && \ + apt-get upgrade -y && \ + apt-get install -y git-core build-essential wget + +RUN apt-get install -y --no-install-recommends \ + autotools-dev libtool pkg-config zlib1g-dev \ + libcunit1-dev libssl-dev libxml2-dev libevent-dev \ + automake autoconf + +# The list of packages nghttp2 recommends for h2load: +RUN apt-get install -y --no-install-recommends make binutils \ + autoconf automake autotools-dev \ + libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ + libev-dev libevent-dev libjansson-dev libjemalloc-dev \ + cython python3.4-dev python-setuptools + +# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: +ENV NGHTTP2_VER 895da9a +RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git + +WORKDIR /root/nghttp2 +RUN git reset --hard $NGHTTP2_VER +RUN autoreconf -i +RUN automake +RUN autoconf +RUN ./configure +RUN make +RUN make install + +WORKDIR /root +RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz +RUN tar -zxvf curl-7.45.0.tar.gz +WORKDIR /root/curl-7.45.0 +RUN ./configure --with-ssl --with-nghttp2=/usr/local +RUN make +RUN make install +RUN ldconfig + +CMD ["-h"] +ENTRYPOINT ["/usr/local/bin/curl"] + diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile new file mode 100644 index 00000000..55fd826f --- /dev/null +++ b/vendor/golang.org/x/net/http2/Makefile @@ -0,0 +1,3 @@ +curlimage: + docker build -t gohttp2/curl . + diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README new file mode 100644 index 00000000..360d5aa3 --- /dev/null +++ b/vendor/golang.org/x/net/http2/README @@ -0,0 +1,20 @@ +This is a work-in-progress HTTP/2 implementation for Go. + +It will eventually live in the Go standard library and won't require +any changes to your code to use. It will just be automatic. + +Status: + +* The server support is pretty good. A few things are missing + but are being worked on. +* The client work has just started but shares a lot of code + is coming along much quicker. + +Docs are at https://godoc.org/golang.org/x/net/http2 + +Demo test server at https://http2.golang.org/ + +Help & bug reports welcome! + +Contributing: https://golang.org/doc/contribute.html +Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 00000000..c9a0cf3b --- /dev/null +++ b/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go new file mode 100644 index 00000000..f4d9b5ec --- /dev/null +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -0,0 +1,282 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code's client connection pooling. + +package http2 + +import ( + "crypto/tls" + "net/http" + "sync" +) + +// ClientConnPool manages a pool of HTTP/2 client connections. +type ClientConnPool interface { + GetClientConn(req *http.Request, addr string) (*ClientConn, error) + MarkDead(*ClientConn) +} + +// clientConnPoolIdleCloser is the interface implemented by ClientConnPool +// implementations which can close their idle connections. +type clientConnPoolIdleCloser interface { + ClientConnPool + closeIdleConnections() +} + +var ( + _ clientConnPoolIdleCloser = (*clientConnPool)(nil) + _ clientConnPoolIdleCloser = noDialClientConnPool{} +) + +// TODO: use singleflight for dialing and addConnCalls? +type clientConnPool struct { + t *Transport + + mu sync.Mutex // TODO: maybe switch to RWMutex + // TODO: add support for sharing conns based on cert names + // (e.g. share conn for googleapis.com and appspot.com) + conns map[string][]*ClientConn // key is host:port + dialing map[string]*dialCall // currently in-flight dials + keys map[*ClientConn][]string + addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls +} + +func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, dialOnMiss) +} + +const ( + dialOnMiss = true + noDialOnMiss = false +) + +// shouldTraceGetConn reports whether getClientConn should call any +// ClientTrace.GetConn hook associated with the http.Request. +// +// This complexity is needed to avoid double calls of the GetConn hook +// during the back-and-forth between net/http and x/net/http2 (when the +// net/http.Transport is upgraded to also speak http2), as well as support +// the case where x/net/http2 is being used directly. +func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { + // If our Transport wasn't made via ConfigureTransport, always + // trace the GetConn hook if provided, because that means the + // http2 package is being used directly and it's the one + // dialing, as opposed to net/http. + if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { + return true + } + // Otherwise, only use the GetConn hook if this connection has + // been used previously for other requests. For fresh + // connections, the net/http package does the dialing. + return !st.freshConn +} + +func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { + if isConnectionCloseRequest(req) && dialOnMiss { + // It gets its own connection. + traceGetConn(req, addr) + const singleUse = true + cc, err := p.t.dialClientConn(addr, singleUse) + if err != nil { + return nil, err + } + return cc, nil + } + p.mu.Lock() + for _, cc := range p.conns[addr] { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } + p.mu.Unlock() + return cc, nil + } + } + if !dialOnMiss { + p.mu.Unlock() + return nil, ErrNoCachedConn + } + traceGetConn(req, addr) + call := p.getStartDialLocked(addr) + p.mu.Unlock() + <-call.done + return call.res, call.err +} + +// dialCall is an in-flight Transport dial call to a host. +type dialCall struct { + p *clientConnPool + done chan struct{} // closed when done + res *ClientConn // valid after done is closed + err error // valid after done is closed +} + +// requires p.mu is held. +func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { + if call, ok := p.dialing[addr]; ok { + // A dial is already in-flight. Don't start another. + return call + } + call := &dialCall{p: p, done: make(chan struct{})} + if p.dialing == nil { + p.dialing = make(map[string]*dialCall) + } + p.dialing[addr] = call + go call.dial(addr) + return call +} + +// run in its own goroutine. +func (c *dialCall) dial(addr string) { + const singleUse = false // shared conn + c.res, c.err = c.p.t.dialClientConn(addr, singleUse) + close(c.done) + + c.p.mu.Lock() + delete(c.p.dialing, addr) + if c.err == nil { + c.p.addConnLocked(addr, c.res) + } + c.p.mu.Unlock() +} + +// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't +// already exist. It coalesces concurrent calls with the same key. +// This is used by the http1 Transport code when it creates a new connection. Because +// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know +// the protocol), it can get into a situation where it has multiple TLS connections. +// This code decides which ones live or die. +// The return value used is whether c was used. +// c is never closed. +func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { + p.mu.Lock() + for _, cc := range p.conns[key] { + if cc.CanTakeNewRequest() { + p.mu.Unlock() + return false, nil + } + } + call, dup := p.addConnCalls[key] + if !dup { + if p.addConnCalls == nil { + p.addConnCalls = make(map[string]*addConnCall) + } + call = &addConnCall{ + p: p, + done: make(chan struct{}), + } + p.addConnCalls[key] = call + go call.run(t, key, c) + } + p.mu.Unlock() + + <-call.done + if call.err != nil { + return false, call.err + } + return !dup, nil +} + +type addConnCall struct { + p *clientConnPool + done chan struct{} // closed when done + err error +} + +func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { + cc, err := t.NewClientConn(tc) + + p := c.p + p.mu.Lock() + if err != nil { + c.err = err + } else { + p.addConnLocked(key, cc) + } + delete(p.addConnCalls, key) + p.mu.Unlock() + close(c.done) +} + +func (p *clientConnPool) addConn(key string, cc *ClientConn) { + p.mu.Lock() + p.addConnLocked(key, cc) + p.mu.Unlock() +} + +// p.mu must be held +func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { + for _, v := range p.conns[key] { + if v == cc { + return + } + } + if p.conns == nil { + p.conns = make(map[string][]*ClientConn) + } + if p.keys == nil { + p.keys = make(map[*ClientConn][]string) + } + p.conns[key] = append(p.conns[key], cc) + p.keys[cc] = append(p.keys[cc], key) +} + +func (p *clientConnPool) MarkDead(cc *ClientConn) { + p.mu.Lock() + defer p.mu.Unlock() + for _, key := range p.keys[cc] { + vv, ok := p.conns[key] + if !ok { + continue + } + newList := filterOutClientConn(vv, cc) + if len(newList) > 0 { + p.conns[key] = newList + } else { + delete(p.conns, key) + } + } + delete(p.keys, cc) +} + +func (p *clientConnPool) closeIdleConnections() { + p.mu.Lock() + defer p.mu.Unlock() + // TODO: don't close a cc if it was just added to the pool + // milliseconds ago and has never been used. There's currently + // a small race window with the HTTP/1 Transport's integration + // where it can add an idle conn just before using it, and + // somebody else can concurrently call CloseIdleConns and + // break some caller's RoundTrip. + for _, vv := range p.conns { + for _, cc := range vv { + cc.closeIfIdle() + } + } +} + +func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { + out := in[:0] + for _, v := range in { + if v != exclude { + out = append(out, v) + } + } + // If we filtered it out, zero out the last item to prevent + // the GC from seeing it. + if len(in) != len(out) { + in[len(in)-1] = nil + } + return out +} + +// noDialClientConnPool is an implementation of http2.ClientConnPool +// which never dials. We let the HTTP/1.1 client dial and use its TLS +// connection instead. +type noDialClientConnPool struct{ *clientConnPool } + +func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { + return p.getClientConn(req, addr, noDialOnMiss) +} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 00000000..a3067f8d --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go new file mode 100644 index 00000000..71f2c463 --- /dev/null +++ b/vendor/golang.org/x/net/http2/errors.go @@ -0,0 +1,133 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" +) + +// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. +type ErrCode uint32 + +const ( + ErrCodeNo ErrCode = 0x0 + ErrCodeProtocol ErrCode = 0x1 + ErrCodeInternal ErrCode = 0x2 + ErrCodeFlowControl ErrCode = 0x3 + ErrCodeSettingsTimeout ErrCode = 0x4 + ErrCodeStreamClosed ErrCode = 0x5 + ErrCodeFrameSize ErrCode = 0x6 + ErrCodeRefusedStream ErrCode = 0x7 + ErrCodeCancel ErrCode = 0x8 + ErrCodeCompression ErrCode = 0x9 + ErrCodeConnect ErrCode = 0xa + ErrCodeEnhanceYourCalm ErrCode = 0xb + ErrCodeInadequateSecurity ErrCode = 0xc + ErrCodeHTTP11Required ErrCode = 0xd +) + +var errCodeName = map[ErrCode]string{ + ErrCodeNo: "NO_ERROR", + ErrCodeProtocol: "PROTOCOL_ERROR", + ErrCodeInternal: "INTERNAL_ERROR", + ErrCodeFlowControl: "FLOW_CONTROL_ERROR", + ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", + ErrCodeStreamClosed: "STREAM_CLOSED", + ErrCodeFrameSize: "FRAME_SIZE_ERROR", + ErrCodeRefusedStream: "REFUSED_STREAM", + ErrCodeCancel: "CANCEL", + ErrCodeCompression: "COMPRESSION_ERROR", + ErrCodeConnect: "CONNECT_ERROR", + ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", + ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", + ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", +} + +func (e ErrCode) String() string { + if s, ok := errCodeName[e]; ok { + return s + } + return fmt.Sprintf("unknown error code 0x%x", uint32(e)) +} + +// ConnectionError is an error that results in the termination of the +// entire connection. +type ConnectionError ErrCode + +func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } + +// StreamError is an error that only affects one stream within an +// HTTP/2 connection. +type StreamError struct { + StreamID uint32 + Code ErrCode + Cause error // optional additional detail +} + +func streamError(id uint32, code ErrCode) StreamError { + return StreamError{StreamID: id, Code: code} +} + +func (e StreamError) Error() string { + if e.Cause != nil { + return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) + } + return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) +} + +// 6.9.1 The Flow Control Window +// "If a sender receives a WINDOW_UPDATE that causes a flow control +// window to exceed this maximum it MUST terminate either the stream +// or the connection, as appropriate. For streams, [...]; for the +// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." +type goAwayFlowError struct{} + +func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } + +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// +// Errors of this type are only returned by the frame parser functions +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. +type connError struct { + Code ErrCode // the ConnectionError error code + Reason string // additional reason +} + +func (e connError) Error() string { + return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) +} + +type pseudoHeaderError string + +func (e pseudoHeaderError) Error() string { + return fmt.Sprintf("invalid pseudo-header %q", string(e)) +} + +type duplicatePseudoHeaderError string + +func (e duplicatePseudoHeaderError) Error() string { + return fmt.Sprintf("duplicate pseudo-header %q", string(e)) +} + +type headerFieldNameError string + +func (e headerFieldNameError) Error() string { + return fmt.Sprintf("invalid header field name %q", string(e)) +} + +type headerFieldValueError string + +func (e headerFieldValueError) Error() string { + return fmt.Sprintf("invalid header field value %q", string(e)) +} + +var ( + errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") + errPseudoAfterRegular = errors.New("pseudo header field after regular") +) diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go new file mode 100644 index 00000000..cea601fc --- /dev/null +++ b/vendor/golang.org/x/net/http2/flow.go @@ -0,0 +1,50 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Flow control + +package http2 + +// flow is the flow control window's size. +type flow struct { + // n is the number of DATA bytes we're allowed to send. + // A flow is kept both on a conn and a per-stream. + n int32 + + // conn points to the shared connection-level flow that is + // shared by all streams on that conn. It is nil for the flow + // that's on the conn directly. + conn *flow +} + +func (f *flow) setConnFlow(cf *flow) { f.conn = cf } + +func (f *flow) available() int32 { + n := f.n + if f.conn != nil && f.conn.n < n { + n = f.conn.n + } + return n +} + +func (f *flow) take(n int32) { + if n > f.available() { + panic("internal error: took too much") + } + f.n -= n + if f.conn != nil { + f.conn.n -= n + } +} + +// add adds n bytes (positive or negative) to the flow control window. +// It returns false if the sum would exceed 2^31-1. +func (f *flow) add(n int32) bool { + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true + } + return false +} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go new file mode 100644 index 00000000..514c126c --- /dev/null +++ b/vendor/golang.org/x/net/http2/frame.go @@ -0,0 +1,1614 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const frameHeaderLen = 9 + +var padZeros = make([]byte, 255) // zeros for padding + +// A FrameType is a registered frame type as defined in +// http://http2.github.io/http2-spec/#rfc.section.11.2 +type FrameType uint8 + +const ( + FrameData FrameType = 0x0 + FrameHeaders FrameType = 0x1 + FramePriority FrameType = 0x2 + FrameRSTStream FrameType = 0x3 + FrameSettings FrameType = 0x4 + FramePushPromise FrameType = 0x5 + FramePing FrameType = 0x6 + FrameGoAway FrameType = 0x7 + FrameWindowUpdate FrameType = 0x8 + FrameContinuation FrameType = 0x9 +) + +var frameName = map[FrameType]string{ + FrameData: "DATA", + FrameHeaders: "HEADERS", + FramePriority: "PRIORITY", + FrameRSTStream: "RST_STREAM", + FrameSettings: "SETTINGS", + FramePushPromise: "PUSH_PROMISE", + FramePing: "PING", + FrameGoAway: "GOAWAY", + FrameWindowUpdate: "WINDOW_UPDATE", + FrameContinuation: "CONTINUATION", +} + +func (t FrameType) String() string { + if s, ok := frameName[t]; ok { + return s + } + return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) +} + +// Flags is a bitmask of HTTP/2 flags. +// The meaning of flags varies depending on the frame type. +type Flags uint8 + +// Has reports whether f contains all (0 or more) flags in v. +func (f Flags) Has(v Flags) bool { + return (f & v) == v +} + +// Frame-specific FrameHeader flag bits. +const ( + // Data Frame + FlagDataEndStream Flags = 0x1 + FlagDataPadded Flags = 0x8 + + // Headers Frame + FlagHeadersEndStream Flags = 0x1 + FlagHeadersEndHeaders Flags = 0x4 + FlagHeadersPadded Flags = 0x8 + FlagHeadersPriority Flags = 0x20 + + // Settings Frame + FlagSettingsAck Flags = 0x1 + + // Ping Frame + FlagPingAck Flags = 0x1 + + // Continuation Frame + FlagContinuationEndHeaders Flags = 0x4 + + FlagPushPromiseEndHeaders Flags = 0x4 + FlagPushPromisePadded Flags = 0x8 +) + +var flagName = map[FrameType]map[Flags]string{ + FrameData: { + FlagDataEndStream: "END_STREAM", + FlagDataPadded: "PADDED", + }, + FrameHeaders: { + FlagHeadersEndStream: "END_STREAM", + FlagHeadersEndHeaders: "END_HEADERS", + FlagHeadersPadded: "PADDED", + FlagHeadersPriority: "PRIORITY", + }, + FrameSettings: { + FlagSettingsAck: "ACK", + }, + FramePing: { + FlagPingAck: "ACK", + }, + FrameContinuation: { + FlagContinuationEndHeaders: "END_HEADERS", + }, + FramePushPromise: { + FlagPushPromiseEndHeaders: "END_HEADERS", + FlagPushPromisePadded: "PADDED", + }, +} + +// a frameParser parses a frame given its FrameHeader and payload +// bytes. The length of payload will always equal fh.Length (which +// might be 0). +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) + +var frameParsers = map[FrameType]frameParser{ + FrameData: parseDataFrame, + FrameHeaders: parseHeadersFrame, + FramePriority: parsePriorityFrame, + FrameRSTStream: parseRSTStreamFrame, + FrameSettings: parseSettingsFrame, + FramePushPromise: parsePushPromise, + FramePing: parsePingFrame, + FrameGoAway: parseGoAwayFrame, + FrameWindowUpdate: parseWindowUpdateFrame, + FrameContinuation: parseContinuationFrame, +} + +func typeFrameParser(t FrameType) frameParser { + if f := frameParsers[t]; f != nil { + return f + } + return parseUnknownFrame +} + +// A FrameHeader is the 9 byte header of all HTTP/2 frames. +// +// See http://http2.github.io/http2-spec/#FrameHeader +type FrameHeader struct { + valid bool // caller can access []byte fields in the Frame + + // Type is the 1 byte frame type. There are ten standard frame + // types, but extension frame types may be written by WriteRawFrame + // and will be returned by ReadFrame (as UnknownFrame). + Type FrameType + + // Flags are the 1 byte of 8 potential bit flags per frame. + // They are specific to the frame type. + Flags Flags + + // Length is the length of the frame, not including the 9 byte header. + // The maximum size is one byte less than 16MB (uint24), but only + // frames up to 16KB are allowed without peer agreement. + Length uint32 + + // StreamID is which stream this frame is for. Certain frames + // are not stream-specific, in which case this field is 0. + StreamID uint32 +} + +// Header returns h. It exists so FrameHeaders can be embedded in other +// specific frame types and implement the Frame interface. +func (h FrameHeader) Header() FrameHeader { return h } + +func (h FrameHeader) String() string { + var buf bytes.Buffer + buf.WriteString("[FrameHeader ") + h.writeDebug(&buf) + buf.WriteByte(']') + return buf.String() +} + +func (h FrameHeader) writeDebug(buf *bytes.Buffer) { + buf.WriteString(h.Type.String()) + if h.Flags != 0 { + buf.WriteString(" flags=") + set := 0 + for i := uint8(0); i < 8; i++ { + if h.Flags&(1<<i) == 0 { + continue + } + set++ + if set > 1 { + buf.WriteByte('|') + } + name := flagName[h.Type][Flags(1<<i)] + if name != "" { + buf.WriteString(name) + } else { + fmt.Fprintf(buf, "0x%x", 1<<i) + } + } + } + if h.StreamID != 0 { + fmt.Fprintf(buf, " stream=%d", h.StreamID) + } + fmt.Fprintf(buf, " len=%d", h.Length) +} + +func (h *FrameHeader) checkValid() { + if !h.valid { + panic("Frame accessor called on non-owned Frame") + } +} + +func (h *FrameHeader) invalidate() { h.valid = false } + +// frame header bytes. +// Used only by ReadFrameHeader. +var fhBytes = sync.Pool{ + New: func() interface{} { + buf := make([]byte, frameHeaderLen) + return &buf + }, +} + +// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. +// Most users should use Framer.ReadFrame instead. +func ReadFrameHeader(r io.Reader) (FrameHeader, error) { + bufp := fhBytes.Get().(*[]byte) + defer fhBytes.Put(bufp) + return readFrameHeader(*bufp, r) +} + +func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) { + _, err := io.ReadFull(r, buf[:frameHeaderLen]) + if err != nil { + return FrameHeader{}, err + } + return FrameHeader{ + Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])), + Type: FrameType(buf[3]), + Flags: Flags(buf[4]), + StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1), + valid: true, + }, nil +} + +// A Frame is the base interface implemented by all frame types. +// Callers will generally type-assert the specific frame type: +// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc. +// +// Frames are only valid until the next call to Framer.ReadFrame. +type Frame interface { + Header() FrameHeader + + // invalidate is called by Framer.ReadFrame to make this + // frame's buffers as being invalid, since the subsequent + // frame will reuse them. + invalidate() +} + +// A Framer reads and writes Frames. +type Framer struct { + r io.Reader + lastFrame Frame + errDetail error + + // lastHeaderStream is non-zero if the last frame was an + // unfinished HEADERS/CONTINUATION. + lastHeaderStream uint32 + + maxReadSize uint32 + headerBuf [frameHeaderLen]byte + + // TODO: let getReadBuf be configurable, and use a less memory-pinning + // allocator in server.go to minimize memory pinned for many idle conns. + // Will probably also need to make frame invalidation have a hook too. + getReadBuf func(size uint32) []byte + readBuf []byte // cache for default getReadBuf + + maxWriteSize uint32 // zero means unlimited; TODO: implement + + w io.Writer + wbuf []byte + + // AllowIllegalWrites permits the Framer's Write methods to + // write frames that do not conform to the HTTP/2 spec. This + // permits using the Framer to test other HTTP/2 + // implementations' conformance to the spec. + // If false, the Write methods will prefer to return an error + // rather than comply. + AllowIllegalWrites bool + + // AllowIllegalReads permits the Framer's ReadFrame method + // to return non-compliant frames or frame orders. + // This is for testing and permits using the Framer to test + // other HTTP/2 implementations' conformance to the spec. + // It is not compatible with ReadMetaHeaders. + AllowIllegalReads bool + + // ReadMetaHeaders if non-nil causes ReadFrame to merge + // HEADERS and CONTINUATION frames together and return + // MetaHeadersFrame instead. + ReadMetaHeaders *hpack.Decoder + + // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE. + // It's used only if ReadMetaHeaders is set; 0 means a sane default + // (currently 16MB) + // If the limit is hit, MetaHeadersFrame.Truncated is set true. + MaxHeaderListSize uint32 + + // TODO: track which type of frame & with which flags was sent + // last. Then return an error (unless AllowIllegalWrites) if + // we're in the middle of a header block and a + // non-Continuation or Continuation on a different stream is + // attempted to be written. + + logReads, logWrites bool + + debugFramer *Framer // only use for logging written writes + debugFramerBuf *bytes.Buffer + debugReadLoggerf func(string, ...interface{}) + debugWriteLoggerf func(string, ...interface{}) + + frameCache *frameCache // nil if frames aren't reused (default) +} + +func (fr *Framer) maxHeaderListSize() uint32 { + if fr.MaxHeaderListSize == 0 { + return 16 << 20 // sane default, per docs + } + return fr.MaxHeaderListSize +} + +func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { + // Write the FrameHeader. + f.wbuf = append(f.wbuf[:0], + 0, // 3 bytes of length, filled in in endWrite + 0, + 0, + byte(ftype), + byte(flags), + byte(streamID>>24), + byte(streamID>>16), + byte(streamID>>8), + byte(streamID)) +} + +func (f *Framer) endWrite() error { + // Now that we know the final size, fill in the FrameHeader in + // the space previously reserved for it. Abuse append. + length := len(f.wbuf) - frameHeaderLen + if length >= (1 << 24) { + return ErrFrameTooLarge + } + _ = append(f.wbuf[:0], + byte(length>>16), + byte(length>>8), + byte(length)) + if f.logWrites { + f.logWrite() + } + + n, err := f.w.Write(f.wbuf) + if err == nil && n != len(f.wbuf) { + err = io.ErrShortWrite + } + return err +} + +func (f *Framer) logWrite() { + if f.debugFramer == nil { + f.debugFramerBuf = new(bytes.Buffer) + f.debugFramer = NewFramer(nil, f.debugFramerBuf) + f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below + // Let us read anything, even if we accidentally wrote it + // in the wrong order: + f.debugFramer.AllowIllegalReads = true + } + f.debugFramerBuf.Write(f.wbuf) + fr, err := f.debugFramer.ReadFrame() + if err != nil { + f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f) + return + } + f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) +} + +func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } +func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } +func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } +func (f *Framer) writeUint32(v uint32) { + f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +const ( + minMaxFrameSize = 1 << 14 + maxFrameSize = 1<<24 - 1 +) + +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + +// NewFramer returns a Framer that writes frames to w and reads them from r. +func NewFramer(w io.Writer, r io.Reader) *Framer { + fr := &Framer{ + w: w, + r: r, + logReads: logFrameReads, + logWrites: logFrameWrites, + debugReadLoggerf: log.Printf, + debugWriteLoggerf: log.Printf, + } + fr.getReadBuf = func(size uint32) []byte { + if cap(fr.readBuf) >= int(size) { + return fr.readBuf[:size] + } + fr.readBuf = make([]byte, size) + return fr.readBuf + } + fr.SetMaxReadFrameSize(maxFrameSize) + return fr +} + +// SetMaxReadFrameSize sets the maximum size of a frame +// that will be read by a subsequent call to ReadFrame. +// It is the caller's responsibility to advertise this +// limit with a SETTINGS frame. +func (fr *Framer) SetMaxReadFrameSize(v uint32) { + if v > maxFrameSize { + v = maxFrameSize + } + fr.maxReadSize = v +} + +// ErrorDetail returns a more detailed error of the last error +// returned by Framer.ReadFrame. For instance, if ReadFrame +// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail +// will say exactly what was invalid. ErrorDetail is not guaranteed +// to return a non-nil value and like the rest of the http2 package, +// its return value is not protected by an API compatibility promise. +// ErrorDetail is reset after the next call to ReadFrame. +func (fr *Framer) ErrorDetail() error { + return fr.errDetail +} + +// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer +// sends a frame that is larger than declared with SetMaxReadFrameSize. +var ErrFrameTooLarge = errors.New("http2: frame too large") + +// terminalReadFrameError reports whether err is an unrecoverable +// error from ReadFrame and no other frames should be read. +func terminalReadFrameError(err error) bool { + if _, ok := err.(StreamError); ok { + return false + } + return err != nil +} + +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +func (fr *Framer) ReadFrame() (Frame, error) { + fr.errDetail = nil + if fr.lastFrame != nil { + fr.lastFrame.invalidate() + } + fh, err := readFrameHeader(fr.headerBuf[:], fr.r) + if err != nil { + return nil, err + } + if fh.Length > fr.maxReadSize { + return nil, ErrFrameTooLarge + } + payload := fr.getReadBuf(fh.Length) + if _, err := io.ReadFull(fr.r, payload); err != nil { + return nil, err + } + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) + if err != nil { + if ce, ok := err.(connError); ok { + return nil, fr.connError(ce.Code, ce.Reason) + } + return nil, err + } + if err := fr.checkFrameOrder(f); err != nil { + return nil, err + } + if fr.logReads { + fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) + } + if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { + return fr.readMetaFrame(f.(*HeadersFrame)) + } + return f, nil +} + +// connError returns ConnectionError(code) but first +// stashes away a public reason to the caller can optionally relay it +// to the peer before hanging up on them. This might help others debug +// their implementations. +func (fr *Framer) connError(code ErrCode, reason string) error { + fr.errDetail = errors.New(reason) + return ConnectionError(code) +} + +// checkFrameOrder reports an error if f is an invalid frame to return +// next from ReadFrame. Mostly it checks whether HEADERS and +// CONTINUATION frames are contiguous. +func (fr *Framer) checkFrameOrder(f Frame) error { + last := fr.lastFrame + fr.lastFrame = f + if fr.AllowIllegalReads { + return nil + } + + fh := f.Header() + if fr.lastHeaderStream != 0 { + if fh.Type != FrameContinuation { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", + fh.Type, fh.StreamID, + last.Header().Type, fr.lastHeaderStream)) + } + if fh.StreamID != fr.lastHeaderStream { + return fr.connError(ErrCodeProtocol, + fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", + fh.StreamID, fr.lastHeaderStream)) + } + } else if fh.Type == FrameContinuation { + return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) + } + + switch fh.Type { + case FrameHeaders, FrameContinuation: + if fh.Flags.Has(FlagHeadersEndHeaders) { + fr.lastHeaderStream = 0 + } else { + fr.lastHeaderStream = fh.StreamID + } + } + + return nil +} + +// A DataFrame conveys arbitrary, variable-length sequences of octets +// associated with a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.1 +type DataFrame struct { + FrameHeader + data []byte +} + +func (f *DataFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagDataEndStream) +} + +// Data returns the frame's data octets, not including any padding +// size byte or padding suffix bytes. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *DataFrame) Data() []byte { + f.checkValid() + return f.data +} + +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + // DATA frames MUST be associated with a stream. If a + // DATA frame is received whose stream identifier + // field is 0x0, the recipient MUST respond with a + // connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} + } + f := fc.getDataFrame() + f.FrameHeader = fh + + var padSize byte + if fh.Flags.Has(FlagDataPadded) { + var err error + payload, padSize, err = readByte(payload) + if err != nil { + return nil, err + } + } + if int(padSize) > len(payload) { + // If the length of the padding is greater than the + // length of the frame payload, the recipient MUST + // treat this as a connection error. + // Filed: https://github.com/http2/http2-spec/issues/610 + return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} + } + f.data = payload[:len(payload)-int(padSize)] + return f, nil +} + +var ( + errStreamID = errors.New("invalid stream ID") + errDepStreamID = errors.New("invalid dependent stream ID") + errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") +) + +func validStreamIDOrZero(streamID uint32) bool { + return streamID&(1<<31) == 0 +} + +func validStreamID(streamID uint32) bool { + return streamID != 0 && streamID&(1<<31) == 0 +} + +// WriteData writes a DATA frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { + return f.WriteDataPadded(streamID, endStream, data, nil) +} + +// WriteDataPadded writes a DATA frame with optional padding. +// +// If pad is nil, the padding bit is not sent. +// The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility not to violate the maximum frame size +// and to not call other Write methods concurrently. +func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } + } + var flags Flags + if endStream { + flags |= FlagDataEndStream + } + if pad != nil { + flags |= FlagDataPadded + } + f.startWrite(FrameData, flags, streamID) + if pad != nil { + f.wbuf = append(f.wbuf, byte(len(pad))) + } + f.wbuf = append(f.wbuf, data...) + f.wbuf = append(f.wbuf, pad...) + return f.endWrite() +} + +// A SettingsFrame conveys configuration parameters that affect how +// endpoints communicate, such as preferences and constraints on peer +// behavior. +// +// See http://http2.github.io/http2-spec/#SETTINGS +type SettingsFrame struct { + FrameHeader + p []byte +} + +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { + // When this (ACK 0x1) bit is set, the payload of the + // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame with the ACK flag set and a length + // field value other than 0 MUST be treated as a + // connection error (Section 5.4.1) of type + // FRAME_SIZE_ERROR. + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + // SETTINGS frames always apply to a connection, + // never a single stream. The stream identifier for a + // SETTINGS frame MUST be zero (0x0). If an endpoint + // receives a SETTINGS frame whose stream identifier + // field is anything other than 0x0, the endpoint MUST + // respond with a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p)%6 != 0 { + // Expecting even number of 6 byte settings. + return nil, ConnectionError(ErrCodeFrameSize) + } + f := &SettingsFrame{FrameHeader: fh, p: p} + if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { + // Values above the maximum flow control window size of 2^31 - 1 MUST + // be treated as a connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + return nil, ConnectionError(ErrCodeFlowControl) + } + return f, nil +} + +func (f *SettingsFrame) IsAck() bool { + return f.FrameHeader.Flags.Has(FlagSettingsAck) +} + +func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if s := f.Setting(i); s.ID == id { + return s.Val, true + } + } + return 0, false +} + +// Setting returns the setting from the frame at the given 0-based index. +// The index must be >= 0 and less than f.NumSettings(). +func (f *SettingsFrame) Setting(i int) Setting { + buf := f.p + return Setting{ + ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), + } +} + +func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } + +// HasDuplicates reports whether f contains any duplicate setting IDs. +func (f *SettingsFrame) HasDuplicates() bool { + num := f.NumSettings() + if num == 0 { + return false + } + // If it's small enough (the common case), just do the n^2 + // thing and avoid a map allocation. + if num < 10 { + for i := 0; i < num; i++ { + idi := f.Setting(i).ID + for j := i + 1; j < num; j++ { + idj := f.Setting(j).ID + if idi == idj { + return true + } + } + } + return false + } + seen := map[SettingID]bool{} + for i := 0; i < num; i++ { + id := f.Setting(i).ID + if seen[id] { + return true + } + seen[id] = true + } + return false +} + +// ForeachSetting runs fn for each setting. +// It stops and returns the first error. +func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { + f.checkValid() + for i := 0; i < f.NumSettings(); i++ { + if err := fn(f.Setting(i)); err != nil { + return err + } + } + return nil +} + +// WriteSettings writes a SETTINGS frame with zero or more settings +// specified and the ACK bit not set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettings(settings ...Setting) error { + f.startWrite(FrameSettings, 0, 0) + for _, s := range settings { + f.writeUint16(uint16(s.ID)) + f.writeUint32(s.Val) + } + return f.endWrite() +} + +// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteSettingsAck() error { + f.startWrite(FrameSettings, FlagSettingsAck, 0) + return f.endWrite() +} + +// A PingFrame is a mechanism for measuring a minimal round trip time +// from the sender, as well as determining whether an idle connection +// is still functional. +// See http://http2.github.io/http2-spec/#rfc.section.6.7 +type PingFrame struct { + FrameHeader + Data [8]byte +} + +func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } + +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if len(payload) != 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + f := &PingFrame{FrameHeader: fh} + copy(f.Data[:], payload) + return f, nil +} + +func (f *Framer) WritePing(ack bool, data [8]byte) error { + var flags Flags + if ack { + flags = FlagPingAck + } + f.startWrite(FramePing, flags, 0) + f.writeBytes(data[:]) + return f.endWrite() +} + +// A GoAwayFrame informs the remote peer to stop creating streams on this connection. +// See http://http2.github.io/http2-spec/#rfc.section.6.8 +type GoAwayFrame struct { + FrameHeader + LastStreamID uint32 + ErrCode ErrCode + debugData []byte +} + +// DebugData returns any debug data in the GOAWAY frame. Its contents +// are not defined. +// The caller must not retain the returned memory past the next +// call to ReadFrame. +func (f *GoAwayFrame) DebugData() []byte { + f.checkValid() + return f.debugData +} + +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID != 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + if len(p) < 8 { + return nil, ConnectionError(ErrCodeFrameSize) + } + return &GoAwayFrame{ + FrameHeader: fh, + LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), + ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), + debugData: p[8:], + }, nil +} + +func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { + f.startWrite(FrameGoAway, 0, 0) + f.writeUint32(maxStreamID & (1<<31 - 1)) + f.writeUint32(uint32(code)) + f.writeBytes(debugData) + return f.endWrite() +} + +// An UnknownFrame is the frame type returned when the frame type is unknown +// or no specific frame type parser exists. +type UnknownFrame struct { + FrameHeader + p []byte +} + +// Payload returns the frame's payload (after the header). It is not +// valid to call this method after a subsequent call to +// Framer.ReadFrame, nor is it valid to retain the returned slice. +// The memory is owned by the Framer and is invalidated when the next +// frame is read. +func (f *UnknownFrame) Payload() []byte { + f.checkValid() + return f.p +} + +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + return &UnknownFrame{fh, p}, nil +} + +// A WindowUpdateFrame is used to implement flow control. +// See http://http2.github.io/http2-spec/#rfc.section.6.9 +type WindowUpdateFrame struct { + FrameHeader + Increment uint32 // never read with high bit set +} + +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit + if inc == 0 { + // A receiver MUST treat the receipt of a + // WINDOW_UPDATE frame with an flow control window + // increment of 0 as a stream error (Section 5.4.2) of + // type PROTOCOL_ERROR; errors on the connection flow + // control window MUST be treated as a connection + // error (Section 5.4.1). + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + return &WindowUpdateFrame{ + FrameHeader: fh, + Increment: inc, + }, nil +} + +// WriteWindowUpdate writes a WINDOW_UPDATE frame. +// The increment value must be between 1 and 2,147,483,647, inclusive. +// If the Stream ID is zero, the window update applies to the +// connection as a whole. +func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { + // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." + if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { + return errors.New("illegal window increment value") + } + f.startWrite(FrameWindowUpdate, 0, streamID) + f.writeUint32(incr) + return f.endWrite() +} + +// A HeadersFrame is used to open a stream and additionally carries a +// header block fragment. +type HeadersFrame struct { + FrameHeader + + // Priority is set if FlagHeadersPriority is set in the FrameHeader. + Priority PriorityParam + + headerFragBuf []byte // not owned +} + +func (f *HeadersFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *HeadersFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) +} + +func (f *HeadersFrame) StreamEnded() bool { + return f.FrameHeader.Flags.Has(FlagHeadersEndStream) +} + +func (f *HeadersFrame) HasPriority() bool { + return f.FrameHeader.Flags.Has(FlagHeadersPriority) +} + +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + hf := &HeadersFrame{ + FrameHeader: fh, + } + if fh.StreamID == 0 { + // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // is received whose stream identifier field is 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR. + return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} + } + var padLength uint8 + if fh.Flags.Has(FlagHeadersPadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + if fh.Flags.Has(FlagHeadersPriority) { + var v uint32 + p, v, err = readUint32(p) + if err != nil { + return nil, err + } + hf.Priority.StreamDep = v & 0x7fffffff + hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set + p, hf.Priority.Weight, err = readByte(p) + if err != nil { + return nil, err + } + } + if len(p)-int(padLength) <= 0 { + return nil, streamError(fh.StreamID, ErrCodeProtocol) + } + hf.headerFragBuf = p[:len(p)-int(padLength)] + return hf, nil +} + +// HeadersFrameParam are the parameters for writing a HEADERS frame. +type HeadersFrameParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndStream indicates that the header block is the last that + // the endpoint will send for the identified stream. Setting + // this flag causes the stream to enter one of "half closed" + // states. + EndStream bool + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 + + // Priority, if non-zero, includes stream priority information + // in the HEADER frame. + Priority PriorityParam +} + +// WriteHeaders writes a single HEADERS frame. +// +// This is a low-level header writing method. Encoding headers and +// splitting them into any necessary CONTINUATION frames is handled +// elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteHeaders(p HeadersFrameParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagHeadersPadded + } + if p.EndStream { + flags |= FlagHeadersEndStream + } + if p.EndHeaders { + flags |= FlagHeadersEndHeaders + } + if !p.Priority.IsZero() { + flags |= FlagHeadersPriority + } + f.startWrite(FrameHeaders, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !p.Priority.IsZero() { + v := p.Priority.StreamDep + if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { + return errDepStreamID + } + if p.Priority.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Priority.Weight) + } + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// A PriorityFrame specifies the sender-advised priority of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.3 +type PriorityFrame struct { + FrameHeader + PriorityParam +} + +// PriorityParam are the stream prioritzation parameters. +type PriorityParam struct { + // StreamDep is a 31-bit stream identifier for the + // stream that this stream depends on. Zero means no + // dependency. + StreamDep uint32 + + // Exclusive is whether the dependency is exclusive. + Exclusive bool + + // Weight is the stream's zero-indexed weight. It should be + // set together with StreamDep, or neither should be set. Per + // the spec, "Add one to the value to obtain a weight between + // 1 and 256." + Weight uint8 +} + +func (p PriorityParam) IsZero() bool { + return p == PriorityParam{} +} + +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} + } + if len(payload) != 5 { + return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} + } + v := binary.BigEndian.Uint32(payload[:4]) + streamID := v & 0x7fffffff // mask off high bit + return &PriorityFrame{ + FrameHeader: fh, + PriorityParam: PriorityParam{ + Weight: payload[4], + StreamDep: streamID, + Exclusive: streamID != v, // was high bit set? + }, + }, nil +} + +// WritePriority writes a PRIORITY frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + if !validStreamIDOrZero(p.StreamDep) { + return errDepStreamID + } + f.startWrite(FramePriority, 0, streamID) + v := p.StreamDep + if p.Exclusive { + v |= 1 << 31 + } + f.writeUint32(v) + f.writeByte(p.Weight) + return f.endWrite() +} + +// A RSTStreamFrame allows for abnormal termination of a stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.4 +type RSTStreamFrame struct { + FrameHeader + ErrCode ErrCode +} + +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if len(p) != 4 { + return nil, ConnectionError(ErrCodeFrameSize) + } + if fh.StreamID == 0 { + return nil, ConnectionError(ErrCodeProtocol) + } + return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil +} + +// WriteRSTStream writes a RST_STREAM frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + f.startWrite(FrameRSTStream, 0, streamID) + f.writeUint32(uint32(code)) + return f.endWrite() +} + +// A ContinuationFrame is used to continue a sequence of header block fragments. +// See http://http2.github.io/http2-spec/#rfc.section.6.10 +type ContinuationFrame struct { + FrameHeader + headerFragBuf []byte +} + +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { + if fh.StreamID == 0 { + return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} + } + return &ContinuationFrame{fh, p}, nil +} + +func (f *ContinuationFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *ContinuationFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) +} + +// WriteContinuation writes a CONTINUATION frame. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { + if !validStreamID(streamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if endHeaders { + flags |= FlagContinuationEndHeaders + } + f.startWrite(FrameContinuation, flags, streamID) + f.wbuf = append(f.wbuf, headerBlockFragment...) + return f.endWrite() +} + +// A PushPromiseFrame is used to initiate a server stream. +// See http://http2.github.io/http2-spec/#rfc.section.6.6 +type PushPromiseFrame struct { + FrameHeader + PromiseID uint32 + headerFragBuf []byte // not owned +} + +func (f *PushPromiseFrame) HeaderBlockFragment() []byte { + f.checkValid() + return f.headerFragBuf +} + +func (f *PushPromiseFrame) HeadersEnded() bool { + return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) +} + +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { + pp := &PushPromiseFrame{ + FrameHeader: fh, + } + if pp.StreamID == 0 { + // PUSH_PROMISE frames MUST be associated with an existing, + // peer-initiated stream. The stream identifier of a + // PUSH_PROMISE frame indicates the stream it is associated + // with. If the stream identifier field specifies the value + // 0x0, a recipient MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return nil, ConnectionError(ErrCodeProtocol) + } + // The PUSH_PROMISE frame includes optional padding. + // Padding fields and flags are identical to those defined for DATA frames + var padLength uint8 + if fh.Flags.Has(FlagPushPromisePadded) { + if p, padLength, err = readByte(p); err != nil { + return + } + } + + p, pp.PromiseID, err = readUint32(p) + if err != nil { + return + } + pp.PromiseID = pp.PromiseID & (1<<31 - 1) + + if int(padLength) > len(p) { + // like the DATA frame, error out if padding is longer than the body. + return nil, ConnectionError(ErrCodeProtocol) + } + pp.headerFragBuf = p[:len(p)-int(padLength)] + return pp, nil +} + +// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. +type PushPromiseParam struct { + // StreamID is the required Stream ID to initiate. + StreamID uint32 + + // PromiseID is the required Stream ID which this + // Push Promises + PromiseID uint32 + + // BlockFragment is part (or all) of a Header Block. + BlockFragment []byte + + // EndHeaders indicates that this frame contains an entire + // header block and is not followed by any + // CONTINUATION frames. + EndHeaders bool + + // PadLength is the optional number of bytes of zeros to add + // to this frame. + PadLength uint8 +} + +// WritePushPromise writes a single PushPromise Frame. +// +// As with Header Frames, This is the low level call for writing +// individual frames. Continuation frames are handled elsewhere. +// +// It will perform exactly one Write to the underlying Writer. +// It is the caller's responsibility to not call other Write methods concurrently. +func (f *Framer) WritePushPromise(p PushPromiseParam) error { + if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { + return errStreamID + } + var flags Flags + if p.PadLength != 0 { + flags |= FlagPushPromisePadded + } + if p.EndHeaders { + flags |= FlagPushPromiseEndHeaders + } + f.startWrite(FramePushPromise, flags, p.StreamID) + if p.PadLength != 0 { + f.writeByte(p.PadLength) + } + if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { + return errStreamID + } + f.writeUint32(p.PromiseID) + f.wbuf = append(f.wbuf, p.BlockFragment...) + f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) + return f.endWrite() +} + +// WriteRawFrame writes a raw frame. This can be used to write +// extension frames unknown to this package. +func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { + f.startWrite(t, flags, streamID) + f.writeBytes(payload) + return f.endWrite() +} + +func readByte(p []byte) (remain []byte, b byte, err error) { + if len(p) == 0 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[1:], p[0], nil +} + +func readUint32(p []byte) (remain []byte, v uint32, err error) { + if len(p) < 4 { + return nil, 0, io.ErrUnexpectedEOF + } + return p[4:], binary.BigEndian.Uint32(p[:4]), nil +} + +type streamEnder interface { + StreamEnded() bool +} + +type headersEnder interface { + HeadersEnded() bool +} + +type headersOrContinuation interface { + headersEnder + HeaderBlockFragment() []byte +} + +// A MetaHeadersFrame is the representation of one HEADERS frame and +// zero or more contiguous CONTINUATION frames and the decoding of +// their HPACK-encoded contents. +// +// This type of frame does not appear on the wire and is only returned +// by the Framer when Framer.ReadMetaHeaders is set. +type MetaHeadersFrame struct { + *HeadersFrame + + // Fields are the fields contained in the HEADERS and + // CONTINUATION frames. The underlying slice is owned by the + // Framer and must not be retained after the next call to + // ReadFrame. + // + // Fields are guaranteed to be in the correct http2 order and + // not have unknown pseudo header fields or invalid header + // field names or values. Required pseudo header fields may be + // missing, however. Use the MetaHeadersFrame.Pseudo accessor + // method access pseudo headers. + Fields []hpack.HeaderField + + // Truncated is whether the max header list size limit was hit + // and Fields is incomplete. The hpack decoder state is still + // valid, however. + Truncated bool +} + +// PseudoValue returns the given pseudo header field's value. +// The provided pseudo field should not contain the leading colon. +func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { + for _, hf := range mh.Fields { + if !hf.IsPseudo() { + return "" + } + if hf.Name[1:] == pseudo { + return hf.Value + } + } + return "" +} + +// RegularFields returns the regular (non-pseudo) header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[i:] + } + } + return nil +} + +// PseudoFields returns the pseudo header fields of mh. +// The caller does not own the returned slice. +func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { + for i, hf := range mh.Fields { + if !hf.IsPseudo() { + return mh.Fields[:i] + } + } + return mh.Fields +} + +func (mh *MetaHeadersFrame) checkPseudos() error { + var isRequest, isResponse bool + pf := mh.PseudoFields() + for i, hf := range pf { + switch hf.Name { + case ":method", ":path", ":scheme", ":authority": + isRequest = true + case ":status": + isResponse = true + default: + return pseudoHeaderError(hf.Name) + } + // Check for duplicates. + // This would be a bad algorithm, but N is 4. + // And this doesn't allocate. + for _, hf2 := range pf[:i] { + if hf.Name == hf2.Name { + return duplicatePseudoHeaderError(hf.Name) + } + } + } + if isRequest && isResponse { + return errMixPseudoHeaderTypes + } + return nil +} + +func (fr *Framer) maxHeaderStringLen() int { + v := fr.maxHeaderListSize() + if uint32(int(v)) == v { + return int(v) + } + // They had a crazy big number for MaxHeaderBytes anyway, + // so give them unlimited header lengths: + return 0 +} + +// readMetaFrame returns 0 or more CONTINUATION frames from fr and +// merge them into the provided hf and returns a MetaHeadersFrame +// with the decoded hpack values. +func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { + if fr.AllowIllegalReads { + return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") + } + mh := &MetaHeadersFrame{ + HeadersFrame: hf, + } + var remainSize = fr.maxHeaderListSize() + var sawRegular bool + + var invalid error // pseudo header field errors + hdec := fr.ReadMetaHeaders + hdec.SetEmitEnabled(true) + hdec.SetMaxStringLength(fr.maxHeaderStringLen()) + hdec.SetEmitFunc(func(hf hpack.HeaderField) { + if VerboseLogs && fr.logReads { + fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) + } + if !httpguts.ValidHeaderFieldValue(hf.Value) { + invalid = headerFieldValueError(hf.Value) + } + isPseudo := strings.HasPrefix(hf.Name, ":") + if isPseudo { + if sawRegular { + invalid = errPseudoAfterRegular + } + } else { + sawRegular = true + if !validWireHeaderFieldName(hf.Name) { + invalid = headerFieldNameError(hf.Name) + } + } + + if invalid != nil { + hdec.SetEmitEnabled(false) + return + } + + size := hf.Size() + if size > remainSize { + hdec.SetEmitEnabled(false) + mh.Truncated = true + return + } + remainSize -= size + + mh.Fields = append(mh.Fields, hf) + }) + // Lose reference to MetaHeadersFrame: + defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) + + var hc headersOrContinuation = hf + for { + frag := hc.HeaderBlockFragment() + if _, err := hdec.Write(frag); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + + if hc.HeadersEnded() { + break + } + if f, err := fr.ReadFrame(); err != nil { + return nil, err + } else { + hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder + } + } + + mh.HeadersFrame.headerFragBuf = nil + mh.HeadersFrame.invalidate() + + if err := hdec.Close(); err != nil { + return nil, ConnectionError(ErrCodeCompression) + } + if invalid != nil { + fr.errDetail = invalid + if VerboseLogs { + log.Printf("http2: invalid header: %v", invalid) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} + } + if err := mh.checkPseudos(); err != nil { + fr.errDetail = err + if VerboseLogs { + log.Printf("http2: invalid pseudo headers: %v", err) + } + return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} + } + return mh, nil +} + +func summarizeFrame(f Frame) string { + var buf bytes.Buffer + f.Header().writeDebug(&buf) + switch f := f.(type) { + case *SettingsFrame: + n := 0 + f.ForeachSetting(func(s Setting) error { + n++ + if n == 1 { + buf.WriteString(", settings:") + } + fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) + return nil + }) + if n > 0 { + buf.Truncate(buf.Len() - 1) // remove trailing comma + } + case *DataFrame: + data := f.Data() + const max = 256 + if len(data) > max { + data = data[:max] + } + fmt.Fprintf(&buf, " data=%q", data) + if len(f.Data()) > max { + fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) + } + case *WindowUpdateFrame: + if f.StreamID == 0 { + buf.WriteString(" (conn)") + } + fmt.Fprintf(&buf, " incr=%v", f.Increment) + case *PingFrame: + fmt.Fprintf(&buf, " ping=%q", f.Data[:]) + case *GoAwayFrame: + fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", + f.LastStreamID, f.ErrCode, f.debugData) + case *RSTStreamFrame: + fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) + } + return buf.String() +} diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 00000000..3a131016 --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go new file mode 100644 index 00000000..9933c9f8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -0,0 +1,170 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Defensive debug-only utility to track that functions run on the +// goroutine that they're supposed to. + +package http2 + +import ( + "bytes" + "errors" + "fmt" + "os" + "runtime" + "strconv" + "sync" +) + +var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" + +type goroutineLock uint64 + +func newGoroutineLock() goroutineLock { + if !DebugGoroutines { + return 0 + } + return goroutineLock(curGoroutineID()) +} + +func (g goroutineLock) check() { + if !DebugGoroutines { + return + } + if curGoroutineID() != uint64(g) { + panic("running on the wrong goroutine") + } +} + +func (g goroutineLock) checkNotOn() { + if !DebugGoroutines { + return + } + if curGoroutineID() == uint64(g) { + panic("running on the wrong goroutine") + } +} + +var goroutineSpace = []byte("goroutine ") + +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := parseUintBytes(b, 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} + +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// parseUintBytes is like strconv.ParseUint, but using a []byte. +func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { + var cutoff, maxVal uint64 + + if bitSize == 0 { + bitSize = int(strconv.IntSize) + } + + s0 := s + switch { + case len(s) < 1: + err = strconv.ErrSyntax + goto Error + + case 2 <= base && base <= 36: + // valid base; nothing to do + + case base == 0: + // Look for octal, hex prefix. + switch { + case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): + base = 16 + s = s[2:] + if len(s) < 1 { + err = strconv.ErrSyntax + goto Error + } + case s[0] == '0': + base = 8 + default: + base = 10 + } + + default: + err = errors.New("invalid base " + strconv.Itoa(base)) + goto Error + } + + n = 0 + cutoff = cutoff64(base) + maxVal = 1<<uint(bitSize) - 1 + + for i := 0; i < len(s); i++ { + var v byte + d := s[i] + switch { + case '0' <= d && d <= '9': + v = d - '0' + case 'a' <= d && d <= 'z': + v = d - 'a' + 10 + case 'A' <= d && d <= 'Z': + v = d - 'A' + 10 + default: + n = 0 + err = strconv.ErrSyntax + goto Error + } + if int(v) >= base { + n = 0 + err = strconv.ErrSyntax + goto Error + } + + if n >= cutoff { + // n*base overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n *= uint64(base) + + n1 := n + uint64(v) + if n1 < n || n1 > maxVal { + // n+v overflows + n = 1<<64 - 1 + err = strconv.ErrRange + goto Error + } + n = n1 + } + + return n, nil + +Error: + return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} +} + +// Return the first number n such that n*base >= 1<<64. +func cutoff64(base int) uint64 { + if base < 2 { + return 0 + } + return (1<<64-1)/uint64(base) + 1 +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go new file mode 100644 index 00000000..c3ff3fa1 --- /dev/null +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -0,0 +1,88 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "net/http" + "strings" + "sync" +) + +var ( + commonBuildOnce sync.Once + commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case + commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case +) + +func buildCommonHeaderMapsOnce() { + commonBuildOnce.Do(buildCommonHeaderMaps) +} + +func buildCommonHeaderMaps() { + common := []string{ + "accept", + "accept-charset", + "accept-encoding", + "accept-language", + "accept-ranges", + "age", + "access-control-allow-origin", + "allow", + "authorization", + "cache-control", + "content-disposition", + "content-encoding", + "content-language", + "content-length", + "content-location", + "content-range", + "content-type", + "cookie", + "date", + "etag", + "expect", + "expires", + "from", + "host", + "if-match", + "if-modified-since", + "if-none-match", + "if-unmodified-since", + "last-modified", + "link", + "location", + "max-forwards", + "proxy-authenticate", + "proxy-authorization", + "range", + "referer", + "refresh", + "retry-after", + "server", + "set-cookie", + "strict-transport-security", + "trailer", + "transfer-encoding", + "user-agent", + "vary", + "via", + "www-authenticate", + } + commonLowerHeader = make(map[string]string, len(common)) + commonCanonHeader = make(map[string]string, len(common)) + for _, v := range common { + chk := http.CanonicalHeaderKey(v) + commonLowerHeader[chk] = v + commonCanonHeader[v] = chk + } +} + +func lowerHeader(v string) string { + buildCommonHeaderMapsOnce() + if s, ok := commonLowerHeader[v]; ok { + return s + } + return strings.ToLower(v) +} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go new file mode 100644 index 00000000..1565cf27 --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -0,0 +1,240 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "io" +) + +const ( + uint32Max = ^uint32(0) + initialHeaderTableSize = 4096 +) + +type Encoder struct { + dynTab dynamicTable + // minSize is the minimum table size set by + // SetMaxDynamicTableSize after the previous Header Table Size + // Update. + minSize uint32 + // maxSizeLimit is the maximum table size this encoder + // supports. This will protect the encoder from too large + // size. + maxSizeLimit uint32 + // tableSizeUpdate indicates whether "Header Table Size + // Update" is required. + tableSizeUpdate bool + w io.Writer + buf []byte +} + +// NewEncoder returns a new Encoder which performs HPACK encoding. An +// encoded data is written to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{ + minSize: uint32Max, + maxSizeLimit: initialHeaderTableSize, + tableSizeUpdate: false, + w: w, + } + e.dynTab.table.init() + e.dynTab.setMaxSize(initialHeaderTableSize) + return e +} + +// WriteField encodes f into a single Write to e's underlying Writer. +// This function may also produce bytes for "Header Table Size Update" +// if necessary. If produced, it is done before encoding f. +func (e *Encoder) WriteField(f HeaderField) error { + e.buf = e.buf[:0] + + if e.tableSizeUpdate { + e.tableSizeUpdate = false + if e.minSize < e.dynTab.maxSize { + e.buf = appendTableSize(e.buf, e.minSize) + } + e.minSize = uint32Max + e.buf = appendTableSize(e.buf, e.dynTab.maxSize) + } + + idx, nameValueMatch := e.searchTable(f) + if nameValueMatch { + e.buf = appendIndexed(e.buf, idx) + } else { + indexing := e.shouldIndex(f) + if indexing { + e.dynTab.add(f) + } + + if idx == 0 { + e.buf = appendNewName(e.buf, f, indexing) + } else { + e.buf = appendIndexedName(e.buf, f, idx, indexing) + } + } + n, err := e.w.Write(e.buf) + if err == nil && n != len(e.buf) { + err = io.ErrShortWrite + } + return err +} + +// searchTable searches f in both stable and dynamic header tables. +// The static header table is searched first. Only when there is no +// exact match for both name and value, the dynamic header table is +// then searched. If there is no match, i is 0. If both name and value +// match, i is the matched index and nameValueMatch becomes true. If +// only name matches, i points to that index and nameValueMatch +// becomes false. +func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true + } + + j, nameValueMatch := e.dynTab.table.search(f) + if nameValueMatch || (i == 0 && j != 0) { + return j + uint64(staticTable.len()), nameValueMatch + } + + return i, false +} + +// SetMaxDynamicTableSize changes the dynamic header table size to v. +// The actual size is bounded by the value passed to +// SetMaxDynamicTableSizeLimit. +func (e *Encoder) SetMaxDynamicTableSize(v uint32) { + if v > e.maxSizeLimit { + v = e.maxSizeLimit + } + if v < e.minSize { + e.minSize = v + } + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) +} + +// SetMaxDynamicTableSizeLimit changes the maximum value that can be +// specified in SetMaxDynamicTableSize to v. By default, it is set to +// 4096, which is the same size of the default dynamic header table +// size described in HPACK specification. If the current maximum +// dynamic header table size is strictly greater than v, "Header Table +// Size Update" will be done in the next WriteField call and the +// maximum dynamic header table size is truncated to v. +func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { + e.maxSizeLimit = v + if e.dynTab.maxSize > v { + e.tableSizeUpdate = true + e.dynTab.setMaxSize(v) + } +} + +// shouldIndex reports whether f should be indexed. +func (e *Encoder) shouldIndex(f HeaderField) bool { + return !f.Sensitive && f.Size() <= e.dynTab.maxSize +} + +// appendIndexed appends index i, as encoded in "Indexed Header Field" +// representation, to dst and returns the extended buffer. +func appendIndexed(dst []byte, i uint64) []byte { + first := len(dst) + dst = appendVarInt(dst, 7, i) + dst[first] |= 0x80 + return dst +} + +// appendNewName appends f, as encoded in one of "Literal Header field +// - New Name" representation variants, to dst and returns the +// extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Inremental Indexing" +// representation is used. +func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { + dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) + dst = appendHpackString(dst, f.Name) + return appendHpackString(dst, f.Value) +} + +// appendIndexedName appends f and index i referring indexed name +// entry, as encoded in one of "Literal Header field - Indexed Name" +// representation variants, to dst and returns the extended buffer. +// +// If f.Sensitive is true, "Never Indexed" representation is used. If +// f.Sensitive is false and indexing is true, "Incremental Indexing" +// representation is used. +func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { + first := len(dst) + var n byte + if indexing { + n = 6 + } else { + n = 4 + } + dst = appendVarInt(dst, n, i) + dst[first] |= encodeTypeByte(indexing, f.Sensitive) + return appendHpackString(dst, f.Value) +} + +// appendTableSize appends v, as encoded in "Header Table Size Update" +// representation, to dst and returns the extended buffer. +func appendTableSize(dst []byte, v uint32) []byte { + first := len(dst) + dst = appendVarInt(dst, 5, uint64(v)) + dst[first] |= 0x20 + return dst +} + +// appendVarInt appends i, as encoded in variable integer form using n +// bit prefix, to dst and returns the extended buffer. +// +// See +// http://http2.github.io/http2-spec/compression.html#integer.representation +func appendVarInt(dst []byte, n byte, i uint64) []byte { + k := uint64((1 << n) - 1) + if i < k { + return append(dst, byte(i)) + } + dst = append(dst, byte(k)) + i -= k + for ; i >= 128; i >>= 7 { + dst = append(dst, byte(0x80|(i&0x7f))) + } + return append(dst, byte(i)) +} + +// appendHpackString appends s, as encoded in "String Literal" +// representation, to dst and returns the extended buffer. +// +// s will be encoded in Huffman codes only when it produces strictly +// shorter byte string. +func appendHpackString(dst []byte, s string) []byte { + huffmanLength := HuffmanEncodeLength(s) + if huffmanLength < uint64(len(s)) { + first := len(dst) + dst = appendVarInt(dst, 7, huffmanLength) + dst = AppendHuffmanString(dst, s) + dst[first] |= 0x80 + } else { + dst = appendVarInt(dst, 7, uint64(len(s))) + dst = append(dst, s...) + } + return dst +} + +// encodeTypeByte returns type byte. If sensitive is true, type byte +// for "Never Indexed" representation is returned. If sensitive is +// false and indexing is true, type byte for "Incremental Indexing" +// representation is returned. Otherwise, type byte for "Without +// Indexing" is returned. +func encodeTypeByte(indexing, sensitive bool) byte { + if sensitive { + return 0x10 + } + if indexing { + return 0x40 + } + return 0 +} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go new file mode 100644 index 00000000..85f18a2b --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -0,0 +1,504 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package hpack implements HPACK, a compression format for +// efficiently representing HTTP header fields in the context of HTTP/2. +// +// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 +package hpack + +import ( + "bytes" + "errors" + "fmt" +) + +// A DecodingError is something the spec defines as a decoding error. +type DecodingError struct { + Err error +} + +func (de DecodingError) Error() string { + return fmt.Sprintf("decoding error: %v", de.Err) +} + +// An InvalidIndexError is returned when an encoder references a table +// entry before the static table or after the end of the dynamic table. +type InvalidIndexError int + +func (e InvalidIndexError) Error() string { + return fmt.Sprintf("invalid indexed representation index %d", int(e)) +} + +// A HeaderField is a name-value pair. Both the name and value are +// treated as opaque sequences of octets. +type HeaderField struct { + Name, Value string + + // Sensitive means that this header field should never be + // indexed. + Sensitive bool +} + +// IsPseudo reports whether the header field is an http2 pseudo header. +// That is, it reports whether it starts with a colon. +// It is not otherwise guaranteed to be a valid pseudo header field, +// though. +func (hf HeaderField) IsPseudo() bool { + return len(hf.Name) != 0 && hf.Name[0] == ':' +} + +func (hf HeaderField) String() string { + var suffix string + if hf.Sensitive { + suffix = " (sensitive)" + } + return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) +} + +// Size returns the size of an entry per RFC 7541 section 4.1. +func (hf HeaderField) Size() uint32 { + // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 + // "The size of the dynamic table is the sum of the size of + // its entries. The size of an entry is the sum of its name's + // length in octets (as defined in Section 5.2), its value's + // length in octets (see Section 5.2), plus 32. The size of + // an entry is calculated using the length of the name and + // value without any Huffman encoding applied." + + // This can overflow if somebody makes a large HeaderField + // Name and/or Value by hand, but we don't care, because that + // won't happen on the wire because the encoding doesn't allow + // it. + return uint32(len(hf.Name) + len(hf.Value) + 32) +} + +// A Decoder is the decoding context for incremental processing of +// header blocks. +type Decoder struct { + dynTab dynamicTable + emit func(f HeaderField) + + emitEnabled bool // whether calls to emit are enabled + maxStrLen int // 0 means unlimited + + // buf is the unparsed buffer. It's only written to + // saveBuf if it was truncated in the middle of a header + // block. Because it's usually not owned, we can only + // process it under Write. + buf []byte // not owned; only valid during Write + + // saveBuf is previous data passed to Write which we weren't able + // to fully parse before. Unlike buf, we own this data. + saveBuf bytes.Buffer + + firstField bool // processing the first field of the header block +} + +// NewDecoder returns a new decoder with the provided maximum dynamic +// table size. The emitFunc will be called for each valid field +// parsed, in the same goroutine as calls to Write, before Write returns. +func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { + d := &Decoder{ + emit: emitFunc, + emitEnabled: true, + firstField: true, + } + d.dynTab.table.init() + d.dynTab.allowedMaxSize = maxDynamicTableSize + d.dynTab.setMaxSize(maxDynamicTableSize) + return d +} + +// ErrStringLength is returned by Decoder.Write when the max string length +// (as configured by Decoder.SetMaxStringLength) would be violated. +var ErrStringLength = errors.New("hpack: string too long") + +// SetMaxStringLength sets the maximum size of a HeaderField name or +// value string. If a string exceeds this length (even after any +// decompression), Write will return ErrStringLength. +// A value of 0 means unlimited and is the default from NewDecoder. +func (d *Decoder) SetMaxStringLength(n int) { + d.maxStrLen = n +} + +// SetEmitFunc changes the callback used when new header fields +// are decoded. +// It must be non-nil. It does not affect EmitEnabled. +func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { + d.emit = emitFunc +} + +// SetEmitEnabled controls whether the emitFunc provided to NewDecoder +// should be called. The default is true. +// +// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE +// while still decoding and keeping in-sync with decoder state, but +// without doing unnecessary decompression or generating unnecessary +// garbage for header fields past the limit. +func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } + +// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder +// are currently enabled. The default is true. +func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } + +// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their +// underlying buffers for garbage reasons. + +func (d *Decoder) SetMaxDynamicTableSize(v uint32) { + d.dynTab.setMaxSize(v) +} + +// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded +// stream (via dynamic table size updates) may set the maximum size +// to. +func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { + d.dynTab.allowedMaxSize = v +} + +type dynamicTable struct { + // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 + table headerFieldTable + size uint32 // in bytes + maxSize uint32 // current maxSize + allowedMaxSize uint32 // maxSize may go up to this, inclusive +} + +func (dt *dynamicTable) setMaxSize(v uint32) { + dt.maxSize = v + dt.evict() +} + +func (dt *dynamicTable) add(f HeaderField) { + dt.table.addEntry(f) + dt.size += f.Size() + dt.evict() +} + +// If we're too big, evict old stuff. +func (dt *dynamicTable) evict() { + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ + } + dt.table.evictOldest(n) +} + +func (d *Decoder) maxTableIndex() int { + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() +} + +func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { + // See Section 2.3.3. + if i == 0 { + return + } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } + if i > uint64(d.maxTableIndex()) { + return + } + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true +} + +// Decode decodes an entire block. +// +// TODO: remove this method and make it incremental later? This is +// easier for debugging now. +func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { + var hf []HeaderField + saveFunc := d.emit + defer func() { d.emit = saveFunc }() + d.emit = func(f HeaderField) { hf = append(hf, f) } + if _, err := d.Write(p); err != nil { + return nil, err + } + if err := d.Close(); err != nil { + return nil, err + } + return hf, nil +} + +// Close declares that the decoding is complete and resets the Decoder +// to be reused again for a new header block. If there is any remaining +// data in the decoder's buffer, Close returns an error. +func (d *Decoder) Close() error { + if d.saveBuf.Len() > 0 { + d.saveBuf.Reset() + return DecodingError{errors.New("truncated headers")} + } + d.firstField = true + return nil +} + +func (d *Decoder) Write(p []byte) (n int, err error) { + if len(p) == 0 { + // Prevent state machine CPU attacks (making us redo + // work up to the point of finding out we don't have + // enough data) + return + } + // Only copy the data if we have to. Optimistically assume + // that p will contain a complete header block. + if d.saveBuf.Len() == 0 { + d.buf = p + } else { + d.saveBuf.Write(p) + d.buf = d.saveBuf.Bytes() + d.saveBuf.Reset() + } + + for len(d.buf) > 0 { + err = d.parseHeaderFieldRepr() + if err == errNeedMore { + // Extra paranoia, making sure saveBuf won't + // get too large. All the varint and string + // reading code earlier should already catch + // overlong things and return ErrStringLength, + // but keep this as a last resort. + const varIntOverhead = 8 // conservative + if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { + return 0, ErrStringLength + } + d.saveBuf.Write(d.buf) + return len(p), nil + } + d.firstField = false + if err != nil { + break + } + } + return len(p), err +} + +// errNeedMore is an internal sentinel error value that means the +// buffer is truncated and we need to read more data before we can +// continue parsing. +var errNeedMore = errors.New("need more data") + +type indexType int + +const ( + indexedTrue indexType = iota + indexedFalse + indexedNever +) + +func (v indexType) indexed() bool { return v == indexedTrue } +func (v indexType) sensitive() bool { return v == indexedNever } + +// returns errNeedMore if there isn't enough data available. +// any other error is fatal. +// consumes d.buf iff it returns nil. +// precondition: must be called with len(d.buf) > 0 +func (d *Decoder) parseHeaderFieldRepr() error { + b := d.buf[0] + switch { + case b&128 != 0: + // Indexed representation. + // High bit set? + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 + return d.parseFieldIndexed() + case b&192 == 64: + // 6.2.1 Literal Header Field with Incremental Indexing + // 0b10xxxxxx: top two bits are 10 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 + return d.parseFieldLiteral(6, indexedTrue) + case b&240 == 0: + // 6.2.2 Literal Header Field without Indexing + // 0b0000xxxx: top four bits are 0000 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 + return d.parseFieldLiteral(4, indexedFalse) + case b&240 == 16: + // 6.2.3 Literal Header Field never Indexed + // 0b0001xxxx: top four bits are 0001 + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 + return d.parseFieldLiteral(4, indexedNever) + case b&224 == 32: + // 6.3 Dynamic Table Size Update + // Top three bits are '001'. + // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 + return d.parseDynamicTableSizeUpdate() + } + + return DecodingError{errors.New("invalid encoding")} +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldIndexed() error { + buf := d.buf + idx, buf, err := readVarInt(7, buf) + if err != nil { + return err + } + hf, ok := d.at(idx) + if !ok { + return DecodingError{InvalidIndexError(idx)} + } + d.buf = buf + return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { + buf := d.buf + nameIdx, buf, err := readVarInt(n, buf) + if err != nil { + return err + } + + var hf HeaderField + wantStr := d.emitEnabled || it.indexed() + if nameIdx > 0 { + ihf, ok := d.at(nameIdx) + if !ok { + return DecodingError{InvalidIndexError(nameIdx)} + } + hf.Name = ihf.Name + } else { + hf.Name, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + } + hf.Value, buf, err = d.readString(buf, wantStr) + if err != nil { + return err + } + d.buf = buf + if it.indexed() { + d.dynTab.add(hf) + } + hf.Sensitive = it.sensitive() + return d.callEmit(hf) +} + +func (d *Decoder) callEmit(hf HeaderField) error { + if d.maxStrLen != 0 { + if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { + return ErrStringLength + } + } + if d.emitEnabled { + d.emit(hf) + } + return nil +} + +// (same invariants and behavior as parseHeaderFieldRepr) +func (d *Decoder) parseDynamicTableSizeUpdate() error { + // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the + // beginning of the first header block following the change to the dynamic table size. + if !d.firstField && d.dynTab.size > 0 { + return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} + } + + buf := d.buf + size, buf, err := readVarInt(5, buf) + if err != nil { + return err + } + if size > uint64(d.dynTab.allowedMaxSize) { + return DecodingError{errors.New("dynamic table size update too large")} + } + d.dynTab.setMaxSize(uint32(size)) + d.buf = buf + return nil +} + +var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} + +// readVarInt reads an unsigned variable length integer off the +// beginning of p. n is the parameter as described in +// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. +// +// n must always be between 1 and 8. +// +// The returned remain buffer is either a smaller suffix of p, or err != nil. +// The error is errNeedMore if p doesn't contain a complete integer. +func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { + if n < 1 || n > 8 { + panic("bad n") + } + if len(p) == 0 { + return 0, p, errNeedMore + } + i = uint64(p[0]) + if n < 8 { + i &= (1 << uint64(n)) - 1 + } + if i < (1<<uint64(n))-1 { + return i, p[1:], nil + } + + origP := p + p = p[1:] + var m uint64 + for len(p) > 0 { + b := p[0] + p = p[1:] + i += uint64(b&127) << m + if b&128 == 0 { + return i, p, nil + } + m += 7 + if m >= 63 { // TODO: proper overflow check. making this up. + return 0, origP, errVarintOverflow + } + } + return 0, origP, errNeedMore +} + +// readString decodes an hpack string from p. +// +// wantStr is whether s will be used. If false, decompression and +// []byte->string garbage are skipped if s will be ignored +// anyway. This does mean that huffman decoding errors for non-indexed +// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server +// is returning an error anyway, and because they're not indexed, the error +// won't affect the decoding state. +func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { + if len(p) == 0 { + return "", p, errNeedMore + } + isHuff := p[0]&128 != 0 + strLen, p, err := readVarInt(7, p) + if err != nil { + return "", p, err + } + if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { + return "", nil, ErrStringLength + } + if uint64(len(p)) < strLen { + return "", p, errNeedMore + } + if !isHuff { + if wantStr { + s = string(p[:strLen]) + } + return s, p[strLen:], nil + } + + if wantStr { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() // don't trust others + defer bufPool.Put(buf) + if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { + buf.Reset() + return "", nil, err + } + s = buf.String() + buf.Reset() // be nice to GC + } + return s, p[strLen:], nil +} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go new file mode 100644 index 00000000..b412a96c --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -0,0 +1,222 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "bytes" + "errors" + "io" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +// HuffmanDecode decodes the string in v and writes the expanded +// result to w, returning the number of bytes written to w and the +// Write call's return value. At most one Write call is made. +func HuffmanDecode(w io.Writer, v []byte) (int, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return 0, err + } + return w.Write(buf.Bytes()) +} + +// HuffmanDecodeToString decodes the string in v. +func HuffmanDecodeToString(v []byte) (string, error) { + buf := bufPool.Get().(*bytes.Buffer) + buf.Reset() + defer bufPool.Put(buf) + if err := huffmanDecode(buf, 0, v); err != nil { + return "", err + } + return buf.String(), nil +} + +// ErrInvalidHuffman is returned for errors found decoding +// Huffman-encoded strings. +var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") + +// huffmanDecode decodes v to buf. +// If maxLen is greater than 0, attempts to write more to buf than +// maxLen bytes will return ErrStringLength. +func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + rootHuffmanNode := getRootHuffmanNode() + n := rootHuffmanNode + // cur is the bit buffer that has not been fed into n. + // cbits is the number of low order bits in cur that are valid. + // sbits is the number of bits of the symbol prefix being decoded. + cur, cbits, sbits := uint(0), uint8(0), uint8(0) + for _, b := range v { + cur = cur<<8 | uint(b) + cbits += 8 + sbits += 8 + for cbits >= 8 { + idx := byte(cur >> (cbits - 8)) + n = n.children[idx] + if n == nil { + return ErrInvalidHuffman + } + if n.children == nil { + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } else { + cbits -= 8 + } + } + } + for cbits > 0 { + n = n.children[byte(cur<<(8-cbits))] + if n == nil { + return ErrInvalidHuffman + } + if n.children != nil || n.codeLen > cbits { + break + } + if maxLen != 0 && buf.Len() == maxLen { + return ErrStringLength + } + buf.WriteByte(n.sym) + cbits -= n.codeLen + n = rootHuffmanNode + sbits = cbits + } + if sbits > 7 { + // Either there was an incomplete symbol, or overlong padding. + // Both are decoding errors per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + if mask := uint(1<<cbits - 1); cur&mask != mask { + // Trailing bits must be a prefix of EOS per RFC 7541 section 5.2. + return ErrInvalidHuffman + } + + return nil +} + +type node struct { + // children is non-nil for internal nodes + children *[256]*node + + // The following are only valid if children is nil: + codeLen uint8 // number of bits that led to the output of sym + sym byte // output symbol +} + +func newInternalNode() *node { + return &node{children: new([256]*node)} +} + +var ( + buildRootOnce sync.Once + lazyRootHuffmanNode *node +) + +func getRootHuffmanNode() *node { + buildRootOnce.Do(buildRootHuffmanNode) + return lazyRootHuffmanNode +} + +func buildRootHuffmanNode() { + if len(huffmanCodes) != 256 { + panic("unexpected size") + } + lazyRootHuffmanNode = newInternalNode() + for i, code := range huffmanCodes { + addDecoderNode(byte(i), code, huffmanCodeLen[i]) + } +} + +func addDecoderNode(sym byte, code uint32, codeLen uint8) { + cur := lazyRootHuffmanNode + for codeLen > 8 { + codeLen -= 8 + i := uint8(code >> codeLen) + if cur.children[i] == nil { + cur.children[i] = newInternalNode() + } + cur = cur.children[i] + } + shift := 8 - codeLen + start, end := int(uint8(code<<shift)), int(1<<shift) + for i := start; i < start+end; i++ { + cur.children[i] = &node{sym: sym, codeLen: codeLen} + } +} + +// AppendHuffmanString appends s, as encoded in Huffman codes, to dst +// and returns the extended buffer. +func AppendHuffmanString(dst []byte, s string) []byte { + rembits := uint8(8) + + for i := 0; i < len(s); i++ { + if rembits == 8 { + dst = append(dst, 0) + } + dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i]) + } + + if rembits < 8 { + // special EOS symbol + code := uint32(0x3fffffff) + nbits := uint8(30) + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + } + + return dst +} + +// HuffmanEncodeLength returns the number of bytes required to encode +// s in Huffman codes. The result is round up to byte boundary. +func HuffmanEncodeLength(s string) uint64 { + n := uint64(0) + for i := 0; i < len(s); i++ { + n += uint64(huffmanCodeLen[s[i]]) + } + return (n + 7) / 8 +} + +// appendByteToHuffmanCode appends Huffman code for c to dst and +// returns the extended buffer and the remaining bits in the last +// element. The appending is not byte aligned and the remaining bits +// in the last element of dst is given in rembits. +func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { + code := huffmanCodes[c] + nbits := huffmanCodeLen[c] + + for { + if rembits > nbits { + t := uint8(code << (rembits - nbits)) + dst[len(dst)-1] |= t + rembits -= nbits + break + } + + t := uint8(code >> (nbits - rembits)) + dst[len(dst)-1] |= t + + nbits -= rembits + rembits = 8 + + if nbits == 0 { + break + } + + dst = append(dst, 0) + } + + return dst, rembits +} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go new file mode 100644 index 00000000..a66cfbea --- /dev/null +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -0,0 +1,479 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package hpack + +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 +} + +// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t +} + +var huffmanCodes = [256]uint32{ + 0x1ff8, + 0x7fffd8, + 0xfffffe2, + 0xfffffe3, + 0xfffffe4, + 0xfffffe5, + 0xfffffe6, + 0xfffffe7, + 0xfffffe8, + 0xffffea, + 0x3ffffffc, + 0xfffffe9, + 0xfffffea, + 0x3ffffffd, + 0xfffffeb, + 0xfffffec, + 0xfffffed, + 0xfffffee, + 0xfffffef, + 0xffffff0, + 0xffffff1, + 0xffffff2, + 0x3ffffffe, + 0xffffff3, + 0xffffff4, + 0xffffff5, + 0xffffff6, + 0xffffff7, + 0xffffff8, + 0xffffff9, + 0xffffffa, + 0xffffffb, + 0x14, + 0x3f8, + 0x3f9, + 0xffa, + 0x1ff9, + 0x15, + 0xf8, + 0x7fa, + 0x3fa, + 0x3fb, + 0xf9, + 0x7fb, + 0xfa, + 0x16, + 0x17, + 0x18, + 0x0, + 0x1, + 0x2, + 0x19, + 0x1a, + 0x1b, + 0x1c, + 0x1d, + 0x1e, + 0x1f, + 0x5c, + 0xfb, + 0x7ffc, + 0x20, + 0xffb, + 0x3fc, + 0x1ffa, + 0x21, + 0x5d, + 0x5e, + 0x5f, + 0x60, + 0x61, + 0x62, + 0x63, + 0x64, + 0x65, + 0x66, + 0x67, + 0x68, + 0x69, + 0x6a, + 0x6b, + 0x6c, + 0x6d, + 0x6e, + 0x6f, + 0x70, + 0x71, + 0x72, + 0xfc, + 0x73, + 0xfd, + 0x1ffb, + 0x7fff0, + 0x1ffc, + 0x3ffc, + 0x22, + 0x7ffd, + 0x3, + 0x23, + 0x4, + 0x24, + 0x5, + 0x25, + 0x26, + 0x27, + 0x6, + 0x74, + 0x75, + 0x28, + 0x29, + 0x2a, + 0x7, + 0x2b, + 0x76, + 0x2c, + 0x8, + 0x9, + 0x2d, + 0x77, + 0x78, + 0x79, + 0x7a, + 0x7b, + 0x7ffe, + 0x7fc, + 0x3ffd, + 0x1ffd, + 0xffffffc, + 0xfffe6, + 0x3fffd2, + 0xfffe7, + 0xfffe8, + 0x3fffd3, + 0x3fffd4, + 0x3fffd5, + 0x7fffd9, + 0x3fffd6, + 0x7fffda, + 0x7fffdb, + 0x7fffdc, + 0x7fffdd, + 0x7fffde, + 0xffffeb, + 0x7fffdf, + 0xffffec, + 0xffffed, + 0x3fffd7, + 0x7fffe0, + 0xffffee, + 0x7fffe1, + 0x7fffe2, + 0x7fffe3, + 0x7fffe4, + 0x1fffdc, + 0x3fffd8, + 0x7fffe5, + 0x3fffd9, + 0x7fffe6, + 0x7fffe7, + 0xffffef, + 0x3fffda, + 0x1fffdd, + 0xfffe9, + 0x3fffdb, + 0x3fffdc, + 0x7fffe8, + 0x7fffe9, + 0x1fffde, + 0x7fffea, + 0x3fffdd, + 0x3fffde, + 0xfffff0, + 0x1fffdf, + 0x3fffdf, + 0x7fffeb, + 0x7fffec, + 0x1fffe0, + 0x1fffe1, + 0x3fffe0, + 0x1fffe2, + 0x7fffed, + 0x3fffe1, + 0x7fffee, + 0x7fffef, + 0xfffea, + 0x3fffe2, + 0x3fffe3, + 0x3fffe4, + 0x7ffff0, + 0x3fffe5, + 0x3fffe6, + 0x7ffff1, + 0x3ffffe0, + 0x3ffffe1, + 0xfffeb, + 0x7fff1, + 0x3fffe7, + 0x7ffff2, + 0x3fffe8, + 0x1ffffec, + 0x3ffffe2, + 0x3ffffe3, + 0x3ffffe4, + 0x7ffffde, + 0x7ffffdf, + 0x3ffffe5, + 0xfffff1, + 0x1ffffed, + 0x7fff2, + 0x1fffe3, + 0x3ffffe6, + 0x7ffffe0, + 0x7ffffe1, + 0x3ffffe7, + 0x7ffffe2, + 0xfffff2, + 0x1fffe4, + 0x1fffe5, + 0x3ffffe8, + 0x3ffffe9, + 0xffffffd, + 0x7ffffe3, + 0x7ffffe4, + 0x7ffffe5, + 0xfffec, + 0xfffff3, + 0xfffed, + 0x1fffe6, + 0x3fffe9, + 0x1fffe7, + 0x1fffe8, + 0x7ffff3, + 0x3fffea, + 0x3fffeb, + 0x1ffffee, + 0x1ffffef, + 0xfffff4, + 0xfffff5, + 0x3ffffea, + 0x7ffff4, + 0x3ffffeb, + 0x7ffffe6, + 0x3ffffec, + 0x3ffffed, + 0x7ffffe7, + 0x7ffffe8, + 0x7ffffe9, + 0x7ffffea, + 0x7ffffeb, + 0xffffffe, + 0x7ffffec, + 0x7ffffed, + 0x7ffffee, + 0x7ffffef, + 0x7fffff0, + 0x3ffffee, +} + +var huffmanCodeLen = [256]uint8{ + 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, + 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, + 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, + 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, + 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, + 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, + 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, + 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, + 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, + 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, + 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, + 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, + 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, + 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, + 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go new file mode 100644 index 00000000..bdaba1d4 --- /dev/null +++ b/vendor/golang.org/x/net/http2/http2.go @@ -0,0 +1,384 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http2 implements the HTTP/2 protocol. +// +// This package is low-level and intended to be used directly by very +// few people. Most users will use it indirectly through the automatic +// use by the net/http package (from Go 1.6 and later). +// For use in earlier Go versions see ConfigureServer. (Transport support +// requires Go 1.6 or later) +// +// See https://http2.github.io/ for more information on HTTP/2. +// +// See https://http2.golang.org/ for a test server running this code. +// +package http2 // import "golang.org/x/net/http2" + +import ( + "bufio" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "os" + "sort" + "strconv" + "strings" + "sync" + + "golang.org/x/net/http/httpguts" +) + +var ( + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool +) + +func init() { + e := os.Getenv("GODEBUG") + if strings.Contains(e, "http2debug=1") { + VerboseLogs = true + } + if strings.Contains(e, "http2debug=2") { + VerboseLogs = true + logFrameWrites = true + logFrameReads = true + } +} + +const ( + // ClientPreface is the string that must be sent by new + // connections from clients. + ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" + + // SETTINGS_MAX_FRAME_SIZE default + // http://http2.github.io/http2-spec/#rfc.section.6.5.2 + initialMaxFrameSize = 16384 + + // NextProtoTLS is the NPN/ALPN protocol negotiated during + // HTTP/2's TLS setup. + NextProtoTLS = "h2" + + // http://http2.github.io/http2-spec/#SettingValues + initialHeaderTableSize = 4096 + + initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size + + defaultMaxReadFrameSize = 1 << 20 +) + +var ( + clientPreface = []byte(ClientPreface) +) + +type streamState int + +// HTTP/2 stream states. +// +// See http://tools.ietf.org/html/rfc7540#section-5.1. +// +// For simplicity, the server code merges "reserved (local)" into +// "half-closed (remote)". This is one less state transition to track. +// The only downside is that we send PUSH_PROMISEs slightly less +// liberally than allowable. More discussion here: +// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html +// +// "reserved (remote)" is omitted since the client code does not +// support server push. +const ( + stateIdle streamState = iota + stateOpen + stateHalfClosedLocal + stateHalfClosedRemote + stateClosed +) + +var stateName = [...]string{ + stateIdle: "Idle", + stateOpen: "Open", + stateHalfClosedLocal: "HalfClosedLocal", + stateHalfClosedRemote: "HalfClosedRemote", + stateClosed: "Closed", +} + +func (st streamState) String() string { + return stateName[st] +} + +// Setting is a setting parameter: which setting it is, and its value. +type Setting struct { + // ID is which setting is being set. + // See http://http2.github.io/http2-spec/#SettingValues + ID SettingID + + // Val is the value. + Val uint32 +} + +func (s Setting) String() string { + return fmt.Sprintf("[%v = %d]", s.ID, s.Val) +} + +// Valid reports whether the setting is valid. +func (s Setting) Valid() error { + // Limits and error codes from 6.5.2 Defined SETTINGS Parameters + switch s.ID { + case SettingEnablePush: + if s.Val != 1 && s.Val != 0 { + return ConnectionError(ErrCodeProtocol) + } + case SettingInitialWindowSize: + if s.Val > 1<<31-1 { + return ConnectionError(ErrCodeFlowControl) + } + case SettingMaxFrameSize: + if s.Val < 16384 || s.Val > 1<<24-1 { + return ConnectionError(ErrCodeProtocol) + } + } + return nil +} + +// A SettingID is an HTTP/2 setting as defined in +// http://http2.github.io/http2-spec/#iana-settings +type SettingID uint16 + +const ( + SettingHeaderTableSize SettingID = 0x1 + SettingEnablePush SettingID = 0x2 + SettingMaxConcurrentStreams SettingID = 0x3 + SettingInitialWindowSize SettingID = 0x4 + SettingMaxFrameSize SettingID = 0x5 + SettingMaxHeaderListSize SettingID = 0x6 +) + +var settingName = map[SettingID]string{ + SettingHeaderTableSize: "HEADER_TABLE_SIZE", + SettingEnablePush: "ENABLE_PUSH", + SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", + SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", + SettingMaxFrameSize: "MAX_FRAME_SIZE", + SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", +} + +func (s SettingID) String() string { + if v, ok := settingName[s]; ok { + return v + } + return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) +} + +var ( + errInvalidHeaderFieldName = errors.New("http2: invalid header field name") + errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") +) + +// validWireHeaderFieldName reports whether v is a valid header field +// name (key). See httpguts.ValidHeaderName for the base rules. +// +// Further, http2 says: +// "Just as in HTTP/1.x, header field names are strings of ASCII +// characters that are compared in a case-insensitive +// fashion. However, header field names MUST be converted to +// lowercase prior to their encoding in HTTP/2. " +func validWireHeaderFieldName(v string) bool { + if len(v) == 0 { + return false + } + for _, r := range v { + if !httpguts.IsTokenRune(r) { + return false + } + if 'A' <= r && r <= 'Z' { + return false + } + } + return true +} + +func httpCodeString(code int) string { + switch code { + case 200: + return "200" + case 404: + return "404" + } + return strconv.Itoa(code) +} + +// from pkg io +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// A gate lets two goroutines coordinate their activities. +type gate chan struct{} + +func (g gate) Done() { g <- struct{}{} } +func (g gate) Wait() { <-g } + +// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). +type closeWaiter chan struct{} + +// Init makes a closeWaiter usable. +// It exists because so a closeWaiter value can be placed inside a +// larger struct and have the Mutex and Cond's memory in the same +// allocation. +func (cw *closeWaiter) Init() { + *cw = make(chan struct{}) +} + +// Close marks the closeWaiter as closed and unblocks any waiters. +func (cw closeWaiter) Close() { + close(cw) +} + +// Wait waits for the closeWaiter to become closed. +func (cw closeWaiter) Wait() { + <-cw +} + +// bufferedWriter is a buffered writer that writes to w. +// Its buffered writer is lazily allocated as needed, to minimize +// idle memory usage with many connections. +type bufferedWriter struct { + w io.Writer // immutable + bw *bufio.Writer // non-nil when data is buffered +} + +func newBufferedWriter(w io.Writer) *bufferedWriter { + return &bufferedWriter{w: w} +} + +// bufWriterPoolBufferSize is the size of bufio.Writer's +// buffers created using bufWriterPool. +// +// TODO: pick a less arbitrary value? this is a bit under +// (3 x typical 1500 byte MTU) at least. Other than that, +// not much thought went into it. +const bufWriterPoolBufferSize = 4 << 10 + +var bufWriterPool = sync.Pool{ + New: func() interface{} { + return bufio.NewWriterSize(nil, bufWriterPoolBufferSize) + }, +} + +func (w *bufferedWriter) Available() int { + if w.bw == nil { + return bufWriterPoolBufferSize + } + return w.bw.Available() +} + +func (w *bufferedWriter) Write(p []byte) (n int, err error) { + if w.bw == nil { + bw := bufWriterPool.Get().(*bufio.Writer) + bw.Reset(w.w) + w.bw = bw + } + return w.bw.Write(p) +} + +func (w *bufferedWriter) Flush() error { + bw := w.bw + if bw == nil { + return nil + } + err := bw.Flush() + bw.Reset(nil) + bufWriterPool.Put(bw) + w.bw = nil + return err +} + +func mustUint31(v int32) uint32 { + if v < 0 || v > 2147483647 { + panic("out of range") + } + return uint32(v) +} + +// bodyAllowedForStatus reports whether a given response status code +// permits a body. See RFC 7230, section 3.3. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +type httpError struct { + msg string + timeout bool +} + +func (e *httpError) Error() string { return e.msg } +func (e *httpError) Timeout() bool { return e.timeout } +func (e *httpError) Temporary() bool { return true } + +var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} + +type connectionStater interface { + ConnectionState() tls.ConnectionState +} + +var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} + +type sorter struct { + v []string // owned by sorter +} + +func (s *sorter) Len() int { return len(s.v) } +func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } +func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } + +// Keys returns the sorted keys of h. +// +// The returned slice is only valid until s used again or returned to +// its pool. +func (s *sorter) Keys(h http.Header) []string { + keys := s.v[:0] + for k := range h { + keys = append(keys, k) + } + s.v = keys + sort.Sort(s) + return keys +} + +func (s *sorter) SortStrings(ss []string) { + // Our sorter works on s.v, which sorter owns, so + // stash it away while we sort the user's buffer. + save := s.v + s.v = ss + sort.Sort(s) + s.v = save +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// *) a non-empty string starting with '/' +// *) the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 00000000..161bca7c --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go new file mode 100644 index 00000000..a6140099 --- /dev/null +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -0,0 +1,163 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "io" + "sync" +) + +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// io.Pipe except there are no PipeReader/PipeWriter halves, and the +// underlying buffer is an interface. (io.Pipe is always unbuffered) +type pipe struct { + mu sync.Mutex + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading + err error // read error once empty. non-nil means closed. + breakErr error // immediate read error (caller doesn't see rest of b) + donec chan struct{} // closed on error + readFn func() // optional code to run in Read before error +} + +type pipeBuffer interface { + Len() int + io.Writer + io.Reader +} + +func (p *pipe) Len() int { + p.mu.Lock() + defer p.mu.Unlock() + if p.b == nil { + return 0 + } + return p.b.Len() +} + +// Read waits until data is available and copies bytes +// from the buffer into p. +func (p *pipe) Read(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + for { + if p.breakErr != nil { + return 0, p.breakErr + } + if p.b != nil && p.b.Len() > 0 { + return p.b.Read(d) + } + if p.err != nil { + if p.readFn != nil { + p.readFn() // e.g. copy trailers + p.readFn = nil // not sticky like p.err + } + p.b = nil + return 0, p.err + } + p.c.Wait() + } +} + +var errClosedPipeWrite = errors.New("write on closed buffer") + +// Write copies bytes from p into the buffer and wakes a reader. +// It is an error to write more data than the buffer can hold. +func (p *pipe) Write(d []byte) (n int, err error) { + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if p.err != nil { + return 0, errClosedPipeWrite + } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } + return p.b.Write(d) +} + +// CloseWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err after all data has been +// read. +// +// The error must be non-nil. +func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } + +// BreakWithError causes the next Read (waking up a current blocked +// Read if needed) to return the provided err immediately, without +// waiting for unread data. +func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } + +// closeWithErrorAndCode is like CloseWithError but also sets some code to run +// in the caller's goroutine before returning the error. +func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } + +func (p *pipe) closeWithError(dst *error, err error, fn func()) { + if err == nil { + panic("err must be non-nil") + } + p.mu.Lock() + defer p.mu.Unlock() + if p.c.L == nil { + p.c.L = &p.mu + } + defer p.c.Signal() + if *dst != nil { + // Already been done. + return + } + p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } + *dst = err + p.closeDoneLocked() +} + +// requires p.mu be held. +func (p *pipe) closeDoneLocked() { + if p.donec == nil { + return + } + // Close if unclosed. This isn't racy since we always + // hold p.mu while closing. + select { + case <-p.donec: + default: + close(p.donec) + } +} + +// Err returns the error (if any) first set by BreakWithError or CloseWithError. +func (p *pipe) Err() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.breakErr != nil { + return p.breakErr + } + return p.err +} + +// Done returns a channel which is closed if and when this pipe is closed +// with CloseWithError. +func (p *pipe) Done() <-chan struct{} { + p.mu.Lock() + defer p.mu.Unlock() + if p.donec == nil { + p.donec = make(chan struct{}) + if p.err != nil || p.breakErr != nil { + // Already hit an error. + p.closeDoneLocked() + } + } + return p.donec +} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go new file mode 100644 index 00000000..57334dc7 --- /dev/null +++ b/vendor/golang.org/x/net/http2/server.go @@ -0,0 +1,2931 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO: turn off the serve goroutine when idle, so +// an idle conn only has the readFrames goroutine active. (which could +// also be optimized probably to pin less memory in crypto/tls). This +// would involve tracking when the serve goroutine is active (atomic +// int32 read/CAS probably?) and starting it up when frames arrive, +// and shutting it down when all handlers exit. the occasional PING +// packets could use time.AfterFunc to call sc.wakeStartServeLoop() +// (which is a no-op if already running) and then queue the PING write +// as normal. The serve loop would then exit in most cases (if no +// Handlers running) and not be woken up again until the PING packet +// returns. + +// TODO (maybe): add a mechanism for Handlers to going into +// half-closed-local mode (rw.(io.Closer) test?) but not exit their +// handler, and continue to be able to read from the +// Request.Body. This would be a somewhat semantic change from HTTP/1 +// (or at least what we expose in net/http), so I'd probably want to +// add it there too. For now, this package says that returning from +// the Handler ServeHTTP function means you're both done reading and +// done writing, without a way to stop just one or the other. + +package http2 + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "math" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +const ( + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? +) + +var ( + errClientDisconnected = errors.New("client disconnected") + errClosedBody = errors.New("body closed by handler") + errHandlerComplete = errors.New("http2: request body closed due to handler exiting") + errStreamClosed = errors.New("http2: stream closed") +) + +var responseWriterStatePool = sync.Pool{ + New: func() interface{} { + rws := &responseWriterState{} + rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) + return rws + }, +} + +// Test hooks. +var ( + testHookOnConn func() + testHookGetServerConn func(*serverConn) + testHookOnPanicMu *sync.Mutex // nil except in tests + testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) +) + +// Server is an HTTP/2 server. +type Server struct { + // MaxHandlers limits the number of http.Handler ServeHTTP goroutines + // which may run at a time over all connections. + // Negative or zero no limit. + // TODO: implement + MaxHandlers int + + // MaxConcurrentStreams optionally specifies the number of + // concurrent streams that each client may have open at a + // time. This is unrelated to the number of http.Handler goroutines + // which may be active globally, which is MaxHandlers. + // If zero, MaxConcurrentStreams defaults to at least 100, per + // the HTTP/2 spec's recommendations. + MaxConcurrentStreams uint32 + + // MaxReadFrameSize optionally specifies the largest frame + // this server is willing to read. A valid value is between + // 16k and 16M, inclusive. If zero or otherwise invalid, a + // default value is used. + MaxReadFrameSize uint32 + + // PermitProhibitedCipherSuites, if true, permits the use of + // cipher suites prohibited by the HTTP/2 spec. + PermitProhibitedCipherSuites bool + + // IdleTimeout specifies how long until idle clients should be + // closed with a GOAWAY frame. PING frames are not considered + // activity for the purposes of IdleTimeout. + IdleTimeout time.Duration + + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + + // NewWriteScheduler constructs a write scheduler for a connection. + // If nil, a default scheduler is chosen. + NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 +} + +func (s *Server) maxReadFrameSize() uint32 { + if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { + return v + } + return defaultMaxReadFrameSize +} + +func (s *Server) maxConcurrentStreams() uint32 { + if v := s.MaxConcurrentStreams; v > 0 { + return v + } + return defaultMaxStreams +} + +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + +// ConfigureServer adds HTTP/2 support to a net/http Server. +// +// The configuration conf may be nil. +// +// ConfigureServer must be called before s begins serving. +func ConfigureServer(s *http.Server, conf *Server) error { + if s == nil { + panic("nil *http.Server") + } + if conf == nil { + conf = new(Server) + } + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if h1, h2 := s, conf; h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } + } + s.RegisterOnShutdown(conf.state.startGracefulShutdown) + + if s.TLSConfig == nil { + s.TLSConfig = new(tls.Config) + } else if s.TLSConfig.CipherSuites != nil { + // If they already provided a CipherSuite list, return + // an error if it has a bad order or is missing + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. + haveRequired := false + sawBad := false + for i, cs := range s.TLSConfig.CipherSuites { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: + haveRequired = true + } + if isBadCipher(cs) { + sawBad = true + } else if sawBad { + return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) + } + } + if !haveRequired { + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") + } + } + + // Note: not setting MinVersion to tls.VersionTLS12, + // as we don't want to interfere with HTTP/1.1 traffic + // on the user's server. We enforce TLS 1.2 later once + // we accept a connection. Ideally this should be done + // during next-proto selection, but using TLS <1.2 with + // HTTP/2 is still the client's bug. + + s.TLSConfig.PreferServerCipherSuites = true + + haveNPN := false + for _, p := range s.TLSConfig.NextProtos { + if p == NextProtoTLS { + haveNPN = true + break + } + } + if !haveNPN { + s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) + } + + if s.TLSNextProto == nil { + s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} + } + protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { + if testHookOnConn != nil { + testHookOnConn() + } + // The TLSNextProto interface predates contexts, so + // the net/http package passes down its per-connection + // base context via an exported but unadvertised + // method on the Handler. This is for internal + // net/http<=>http2 use only. + var ctx context.Context + type baseContexter interface { + BaseContext() context.Context + } + if bc, ok := h.(baseContexter); ok { + ctx = bc.BaseContext() + } + conf.ServeConn(c, &ServeConnOpts{ + Context: ctx, + Handler: h, + BaseConfig: hs, + }) + } + s.TLSNextProto[NextProtoTLS] = protoHandler + return nil +} + +// ServeConnOpts are options for the Server.ServeConn method. +type ServeConnOpts struct { + // Context is the base context to use. + // If nil, context.Background is used. + Context context.Context + + // BaseConfig optionally sets the base configuration + // for values. If nil, defaults are used. + BaseConfig *http.Server + + // Handler specifies which handler to use for processing + // requests. If nil, BaseConfig.Handler is used. If BaseConfig + // or BaseConfig.Handler is nil, http.DefaultServeMux is used. + Handler http.Handler +} + +func (o *ServeConnOpts) context() context.Context { + if o.Context != nil { + return o.Context + } + return context.Background() +} + +func (o *ServeConnOpts) baseConfig() *http.Server { + if o != nil && o.BaseConfig != nil { + return o.BaseConfig + } + return new(http.Server) +} + +func (o *ServeConnOpts) handler() http.Handler { + if o != nil { + if o.Handler != nil { + return o.Handler + } + if o.BaseConfig != nil && o.BaseConfig.Handler != nil { + return o.BaseConfig.Handler + } + } + return http.DefaultServeMux +} + +// ServeConn serves HTTP/2 requests on the provided connection and +// blocks until the connection is no longer readable. +// +// ServeConn starts speaking HTTP/2 assuming that c has not had any +// reads or writes. It writes its initial settings frame and expects +// to be able to read the preface and settings frame from the +// client. If c has a ConnectionState method like a *tls.Conn, the +// ConnectionState is used to verify the TLS ciphersuite and to set +// the Request.TLS field in Handlers. +// +// ServeConn does not support h2c by itself. Any h2c support must be +// implemented in terms of providing a suitably-behaving net.Conn. +// +// The opts parameter is optional. If nil, default values are used. +func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + baseCtx, cancel := serverConnBaseContext(c, opts) + defer cancel() + + sc := &serverConn{ + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) + + // The net/http package sets the write deadline from the + // http.Server.WriteTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. + if sc.hs.WriteTimeout != 0 { + sc.conn.SetWriteDeadline(time.Time{}) + } + + if s.NewWriteScheduler != nil { + sc.writeSched = s.NewWriteScheduler() + } else { + sc.writeSched = NewRandomWriteScheduler() + } + + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. + sc.flow.add(initialWindowSize) + sc.inflow.add(initialWindowSize) + sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) + + fr := NewFramer(sc.bw, c) + fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + fr.MaxHeaderListSize = sc.maxHeaderListSize() + fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + sc.framer = fr + + if tc, ok := c.(connectionStater); ok { + sc.tlsState = new(tls.ConnectionState) + *sc.tlsState = tc.ConnectionState() + // 9.2 Use of TLS Features + // An implementation of HTTP/2 over TLS MUST use TLS + // 1.2 or higher with the restrictions on feature set + // and cipher suite described in this section. Due to + // implementation limitations, it might not be + // possible to fail TLS negotiation. An endpoint MUST + // immediately terminate an HTTP/2 connection that + // does not meet the TLS requirements described in + // this section with a connection error (Section + // 5.4.1) of type INADEQUATE_SECURITY. + if sc.tlsState.Version < tls.VersionTLS12 { + sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") + return + } + + if sc.tlsState.ServerName == "" { + // Client must use SNI, but we don't enforce that anymore, + // since it was causing problems when connecting to bare IP + // addresses during development. + // + // TODO: optionally enforce? Or enforce at the time we receive + // a new request, and verify the ServerName matches the :authority? + // But that precludes proxy situations, perhaps. + // + // So for now, do nothing here again. + } + + if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + // "Endpoints MAY choose to generate a connection error + // (Section 5.4.1) of type INADEQUATE_SECURITY if one of + // the prohibited cipher suites are negotiated." + // + // We choose that. In my opinion, the spec is weak + // here. It also says both parties must support at least + // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no + // excuses here. If we really must, we could allow an + // "AllowInsecureWeakCiphers" option on the server later. + // Let's see how it plays out first. + sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) + return + } + } + + if hook := testHookGetServerConn; hook != nil { + hook(sc) + } + sc.serve() +} + +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { + ctx, cancel = context.WithCancel(opts.context()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + +func (sc *serverConn) rejectConn(err ErrCode, debug string) { + sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) + // ignoring errors. hanging up anyway. + sc.framer.WriteGoAway(0, err, []byte(debug)) + sc.bw.Flush() + sc.conn.Close() +} + +type serverConn struct { + // Immutable: + srv *Server + hs *http.Server + conn net.Conn + bw *bufferedWriter // writing to conn + handler http.Handler + baseCtx context.Context + framer *Framer + doneServing chan struct{} // closed when serverConn.serve ends + readFrameCh chan readFrameResult // written by serverConn.readFrames + wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve + wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes + bodyReadCh chan bodyReadMsg // from handlers -> serve + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop + flow flow // conn-wide (not stream-specific) outbound flow control + inflow flow // conn-wide inbound flow control + tlsState *tls.ConnectionState // shared by all handlers, like net/http + remoteAddrStr string + writeSched WriteScheduler + + // Everything following is owned by the serve loop; use serveG.check(): + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused + + // Owned by the writeFrameAsync goroutine: + headerWriteBuf bytes.Buffer + hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once +} + +func (sc *serverConn) maxHeaderListSize() uint32 { + n := sc.hs.MaxHeaderBytes + if n <= 0 { + n = http.DefaultMaxHeaderBytes + } + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return uint32(n + typicalHeaders*perFieldOverhead) +} + +func (sc *serverConn) curOpenStreams() uint32 { + sc.serveG.check() + return sc.curClientStreams + sc.curPushedStreams +} + +// stream represents a stream. This is the minimal metadata needed by +// the serve goroutine. Most of the actual stream state is owned by +// the http.Handler's goroutine in the responseWriter. Because the +// responseWriter's responseWriterState is recycled at the end of a +// handler, this struct intentionally has no pointer to the +// *responseWriter{,State} itself, as the Handler ending nils out the +// responseWriter's state field. +type stream struct { + // immutable: + sc *serverConn + id uint32 + body *pipe // non-nil if expecting DATA frames + cw closeWaiter // closed wait stream transitions to closed state + ctx context.Context + cancelCtx func() + + // owned by serverConn's serve loop: + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 + state streamState + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused + + trailer http.Header // accumulated trailers + reqTrailer http.Header // handler's Request.Trailer +} + +func (sc *serverConn) Framer() *Framer { return sc.framer } +func (sc *serverConn) CloseConn() error { return sc.conn.Close() } +func (sc *serverConn) Flush() error { return sc.bw.Flush() } +func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { + return sc.hpackEncoder, &sc.headerWriteBuf +} + +func (sc *serverConn) state(streamID uint32) (streamState, *stream) { + sc.serveG.check() + // http://tools.ietf.org/html/rfc7540#section-5.1 + if st, ok := sc.streams[streamID]; ok { + return st.state, st + } + // "The first use of a new stream identifier implicitly closes all + // streams in the "idle" state that might have been initiated by + // that peer with a lower-valued stream identifier. For example, if + // a client sends a HEADERS frame on stream 7 without ever sending a + // frame on stream 5, then stream 5 transitions to the "closed" + // state when the first frame for stream 7 is sent or received." + if streamID%2 == 1 { + if streamID <= sc.maxClientStreamID { + return stateClosed, nil + } + } else { + if streamID <= sc.maxPushPromiseID { + return stateClosed, nil + } + } + return stateIdle, nil +} + +// setConnState calls the net/http ConnState hook for this connection, if configured. +// Note that the net/http package does StateNew and StateClosed for us. +// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. +func (sc *serverConn) setConnState(state http.ConnState) { + if sc.hs.ConnState != nil { + sc.hs.ConnState(sc.conn, state) + } +} + +func (sc *serverConn) vlogf(format string, args ...interface{}) { + if VerboseLogs { + sc.logf(format, args...) + } +} + +func (sc *serverConn) logf(format string, args ...interface{}) { + if lg := sc.hs.ErrorLog; lg != nil { + lg.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// errno returns v's underlying uintptr, else 0. +// +// TODO: remove this helper function once http2 can use build +// tags. See comment in isClosedConnError. +func errno(v error) uintptr { + if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { + return uintptr(rv.Uint()) + } + return 0 +} + +// isClosedConnError reports whether err is an error from use of a closed +// network connection. +func isClosedConnError(err error) bool { + if err == nil { + return false + } + + // TODO: remove this string search and be more like the Windows + // case below. That might involve modifying the standard library + // to return better error types. + str := err.Error() + if strings.Contains(str, "use of closed network connection") { + return true + } + + // TODO(bradfitz): x/tools/cmd/bundle doesn't really support + // build tags, so I can't make an http2_windows.go file with + // Windows-specific stuff. Fix that and move this, once we + // have a way to bundle this into std's net/http somehow. + if runtime.GOOS == "windows" { + if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { + if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { + const WSAECONNABORTED = 10053 + const WSAECONNRESET = 10054 + if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { + return true + } + } + } + } + return false +} + +func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { + if err == nil { + return + } + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { + // Boring, expected errors. + sc.vlogf(format, args...) + } else { + sc.logf(format, args...) + } +} + +func (sc *serverConn) canonicalHeader(v string) string { + sc.serveG.check() + buildCommonHeaderMapsOnce() + cv, ok := commonCanonHeader[v] + if ok { + return cv + } + cv, ok = sc.canonHeader[v] + if ok { + return cv + } + if sc.canonHeader == nil { + sc.canonHeader = make(map[string]string) + } + cv = http.CanonicalHeaderKey(v) + sc.canonHeader[v] = cv + return cv +} + +type readFrameResult struct { + f Frame // valid until readMore is called + err error + + // readMore should be called once the consumer no longer needs or + // retains f. After readMore, f is invalid and more frames can be + // read. + readMore func() +} + +// readFrames is the loop that reads incoming frames. +// It takes care to only read one frame at a time, blocking until the +// consumer is done with the frame. +// It's run on its own goroutine. +func (sc *serverConn) readFrames() { + gate := make(gate) + gateDone := gate.Done + for { + f, err := sc.framer.ReadFrame() + select { + case sc.readFrameCh <- readFrameResult{f, err, gateDone}: + case <-sc.doneServing: + return + } + select { + case <-gate: + case <-sc.doneServing: + return + } + if terminalReadFrameError(err) { + return + } + } +} + +// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. +type frameWriteResult struct { + wr FrameWriteRequest // what was written (or attempted) + err error // result of the writeFrame call +} + +// writeFrameAsync runs in its own goroutine and writes a single frame +// and then reports when it's done. +// At most one goroutine can be running writeFrameAsync at a time per +// serverConn. +func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) { + err := wr.write.writeFrame(sc) + sc.wroteFrameCh <- frameWriteResult{wr, err} +} + +func (sc *serverConn) closeAllStreamsOnConnClose() { + sc.serveG.check() + for _, st := range sc.streams { + sc.closeStream(st, errClientDisconnected) + } +} + +func (sc *serverConn) stopShutdownTimer() { + sc.serveG.check() + if t := sc.shutdownTimer; t != nil { + t.Stop() + } +} + +func (sc *serverConn) notePanic() { + // Note: this is for serverConn.serve panicking, not http.Handler code. + if testHookOnPanicMu != nil { + testHookOnPanicMu.Lock() + defer testHookOnPanicMu.Unlock() + } + if testHookOnPanic != nil { + if e := recover(); e != nil { + if testHookOnPanic(sc, e) { + panic(e) + } + } + } +} + +func (sc *serverConn) serve() { + sc.serveG.check() + defer sc.notePanic() + defer sc.conn.Close() + defer sc.closeAllStreamsOnConnClose() + defer sc.stopShutdownTimer() + defer close(sc.doneServing) // unblocks handlers trying to send + + if VerboseLogs { + sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) + } + + sc.writeFrame(FrameWriteRequest{ + write: writeSettings{ + {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxConcurrentStreams, sc.advMaxStreams}, + {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + }, + }) + sc.unackedSettings++ + + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + + if err := sc.readPreface(); err != nil { + sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) + return + } + // Now that we've got the preface, get us out of the + // "StateNew" state. We can't go directly to idle, though. + // Active means we read some data and anticipate a request. We'll + // do another Active when we get a HEADERS frame. + sc.setConnState(http.StateActive) + sc.setConnState(http.StateIdle) + + if sc.srv.IdleTimeout != 0 { + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + defer sc.idleTimer.Stop() + } + + go sc.readFrames() // closed by defer sc.conn.Close above + + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + + loopNum := 0 + for { + loopNum++ + select { + case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } + sc.writeFrame(wr) + case res := <-sc.wroteFrameCh: + sc.wroteFrame(res) + case res := <-sc.readFrameCh: + if !sc.processFrameFromReader(res) { + return + } + res.readMore() + if settingsTimer != nil { + settingsTimer.Stop() + settingsTimer = nil + } + case m := <-sc.bodyReadCh: + sc.noteBodyRead(m.st, m.n) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } + } + + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) + } + } +} + +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. +func (sc *serverConn) readPreface() error { + errc := make(chan error, 1) + go func() { + // Read the client preface + buf := make([]byte, len(ClientPreface)) + if _, err := io.ReadFull(sc.conn, buf); err != nil { + errc <- err + } else if !bytes.Equal(buf, clientPreface) { + errc <- fmt.Errorf("bogus greeting %q", buf) + } else { + errc <- nil + } + }() + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? + defer timer.Stop() + select { + case <-timer.C: + return errPrefaceTimeout + case err := <-errc: + if err == nil { + if VerboseLogs { + sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) + } + } + return err + } +} + +var errChanPool = sync.Pool{ + New: func() interface{} { return make(chan error, 1) }, +} + +var writeDataPool = sync.Pool{ + New: func() interface{} { return new(writeData) }, +} + +// writeDataFromHandler writes DATA response frames from a handler on +// the given stream. +func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { + ch := errChanPool.Get().(chan error) + writeArg := writeDataPool.Get().(*writeData) + *writeArg = writeData{stream.id, data, endStream} + err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: writeArg, + stream: stream, + done: ch, + }) + if err != nil { + return err + } + var frameWriteDone bool // the frame write is done (successfully or not) + select { + case err = <-ch: + frameWriteDone = true + case <-sc.doneServing: + return errClientDisconnected + case <-stream.cw: + // If both ch and stream.cw were ready (as might + // happen on the final Write after an http.Handler + // ends), prefer the write result. Otherwise this + // might just be us successfully closing the stream. + // The writeFrameAsync and serve goroutines guarantee + // that the ch send will happen before the stream.cw + // close. + select { + case err = <-ch: + frameWriteDone = true + default: + return errStreamClosed + } + } + errChanPool.Put(ch) + if frameWriteDone { + writeDataPool.Put(writeArg) + } + return err +} + +// writeFrameFromHandler sends wr to sc.wantWriteFrameCh, but aborts +// if the connection has gone away. +// +// This must not be run from the serve goroutine itself, else it might +// deadlock writing to sc.wantWriteFrameCh (which is only mildly +// buffered and is read by serve itself). If you're on the serve +// goroutine, call writeFrame instead. +func (sc *serverConn) writeFrameFromHandler(wr FrameWriteRequest) error { + sc.serveG.checkNotOn() // NOT + select { + case sc.wantWriteFrameCh <- wr: + return nil + case <-sc.doneServing: + // Serve loop is gone. + // Client has closed their connection to the server. + return errClientDisconnected + } +} + +// writeFrame schedules a frame to write and sends it if there's nothing +// already being written. +// +// There is no pushback here (the serve goroutine never blocks). It's +// the http.Handlers that block, waiting for their previous frames to +// make it onto the wire +// +// If you're not on the serve goroutine, use writeFrameFromHandler instead. +func (sc *serverConn) writeFrame(wr FrameWriteRequest) { + sc.serveG.check() + + // If true, wr will not be written and wr.done will not be signaled. + var ignoreWrite bool + + // We are not allowed to write frames on closed streams. RFC 7540 Section + // 5.1.1 says: "An endpoint MUST NOT send frames other than PRIORITY on + // a closed stream." Our server never sends PRIORITY, so that exception + // does not apply. + // + // The serverConn might close an open stream while the stream's handler + // is still running. For example, the server might close a stream when it + // receives bad data from the client. If this happens, the handler might + // attempt to write a frame after the stream has been closed (since the + // handler hasn't yet been notified of the close). In this case, we simply + // ignore the frame. The handler will notice that the stream is closed when + // it waits for the frame to be written. + // + // As an exception to this rule, we allow sending RST_STREAM after close. + // This allows us to immediately reject new streams without tracking any + // state for those streams (except for the queued RST_STREAM frame). This + // may result in duplicate RST_STREAMs in some cases, but the client should + // ignore those. + if wr.StreamID() != 0 { + _, isReset := wr.write.(StreamError) + if state, _ := sc.state(wr.StreamID()); state == stateClosed && !isReset { + ignoreWrite = true + } + } + + // Don't send a 100-continue response if we've already sent headers. + // See golang.org/issue/14030. + switch wr.write.(type) { + case *writeResHeaders: + wr.stream.wroteHeaders = true + case write100ContinueHeadersFrame: + if wr.stream.wroteHeaders { + // We do not need to notify wr.done because this frame is + // never written with wr.done != nil. + if wr.done != nil { + panic("wr.done != nil for write100ContinueHeadersFrame") + } + ignoreWrite = true + } + } + + if !ignoreWrite { + sc.writeSched.Push(wr) + } + sc.scheduleFrameWrite() +} + +// startFrameWrite starts a goroutine to write wr (in a separate +// goroutine since that might block on the network), and updates the +// serve goroutine's state about the world, updated from info in wr. +func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { + sc.serveG.check() + if sc.writingFrame { + panic("internal error: can only be writing one frame at a time") + } + + st := wr.stream + if st != nil { + switch st.state { + case stateHalfClosedLocal: + switch wr.write.(type) { + case StreamError, handlerPanicRST, writeWindowUpdate: + // RFC 7540 Section 5.1 allows sending RST_STREAM, PRIORITY, and WINDOW_UPDATE + // in this state. (We never send PRIORITY from the server, so that is not checked.) + default: + panic(fmt.Sprintf("internal error: attempt to send frame on a half-closed-local stream: %v", wr)) + } + case stateClosed: + panic(fmt.Sprintf("internal error: attempt to send frame on a closed stream: %v", wr)) + } + } + if wpp, ok := wr.write.(*writePushPromise); ok { + var err error + wpp.promisedID, err = wpp.allocatePromisedID() + if err != nil { + sc.writingFrameAsync = false + wr.replyToWriter(err) + return + } + } + + sc.writingFrame = true + sc.needsFrameFlush = true + if wr.write.staysWithinBuffer(sc.bw.Available()) { + sc.writingFrameAsync = false + err := wr.write.writeFrame(sc) + sc.wroteFrame(frameWriteResult{wr, err}) + } else { + sc.writingFrameAsync = true + go sc.writeFrameAsync(wr) + } +} + +// errHandlerPanicked is the error given to any callers blocked in a read from +// Request.Body when the main goroutine panics. Since most handlers read in the +// main ServeHTTP goroutine, this will show up rarely. +var errHandlerPanicked = errors.New("http2: handler panicked") + +// wroteFrame is called on the serve goroutine with the result of +// whatever happened on writeFrameAsync. +func (sc *serverConn) wroteFrame(res frameWriteResult) { + sc.serveG.check() + if !sc.writingFrame { + panic("internal error: expected to be already writing a frame") + } + sc.writingFrame = false + sc.writingFrameAsync = false + + wr := res.wr + + if writeEndsStream(wr.write) { + st := wr.stream + if st == nil { + panic("internal error: expecting non-nil stream") + } + switch st.state { + case stateOpen: + // Here we would go to stateHalfClosedLocal in + // theory, but since our handler is done and + // the net/http package provides no mechanism + // for closing a ResponseWriter while still + // reading data (see possible TODO at top of + // this file), we go into closed state here + // anyway, after telling the peer we're + // hanging up on them. We'll transition to + // stateClosed after the RST_STREAM frame is + // written. + st.state = stateHalfClosedLocal + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) + case stateHalfClosedRemote: + sc.closeStream(st, errHandlerComplete) + } + } else { + switch v := wr.write.(type) { + case StreamError: + // st may be unknown if the RST_STREAM was generated to reject bad input. + if st, ok := sc.streams[v.StreamID]; ok { + sc.closeStream(st, v) + } + case handlerPanicRST: + sc.closeStream(wr.stream, errHandlerPanicked) + } + } + + // Reply (if requested) to unblock the ServeHTTP goroutine. + wr.replyToWriter(res.err) + + sc.scheduleFrameWrite() +} + +// scheduleFrameWrite tickles the frame writing scheduler. +// +// If a frame is already being written, nothing happens. This will be called again +// when the frame is done being written. +// +// If a frame isn't being written we need to send one, the best frame +// to send is selected, preferring first things that aren't +// stream-specific (e.g. ACKing settings), and then finding the +// highest priority stream. +// +// If a frame isn't being written and there's nothing else to send, we +// flush the write buffer. +func (sc *serverConn) scheduleFrameWrite() { + sc.serveG.check() + if sc.writingFrame || sc.inFrameScheduleLoop { + return + } + sc.inFrameScheduleLoop = true + for !sc.writingFrameAsync { + if sc.needToSendGoAway { + sc.needToSendGoAway = false + sc.startFrameWrite(FrameWriteRequest{ + write: &writeGoAway{ + maxStreamID: sc.maxClientStreamID, + code: sc.goAwayCode, + }, + }) + continue + } + if sc.needToSendSettingsAck { + sc.needToSendSettingsAck = false + sc.startFrameWrite(FrameWriteRequest{write: writeSettingsAck{}}) + continue + } + if !sc.inGoAway || sc.goAwayCode == ErrCodeNo { + if wr, ok := sc.writeSched.Pop(); ok { + sc.startFrameWrite(wr) + continue + } + } + if sc.needsFrameFlush { + sc.startFrameWrite(FrameWriteRequest{write: flushFrameWriter{}}) + sc.needsFrameFlush = false // after startFrameWrite, since it sets this true + continue + } + break + } + sc.inFrameScheduleLoop = false +} + +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. +func (sc *serverConn) startGracefulShutdown() { + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) +} + +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) +} + +func (sc *serverConn) goAway(code ErrCode) { + sc.serveG.check() + if sc.inGoAway { + return + } + sc.inGoAway = true + sc.needToSendGoAway = true + sc.goAwayCode = code + sc.scheduleFrameWrite() +} + +func (sc *serverConn) shutDownIn(d time.Duration) { + sc.serveG.check() + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) +} + +func (sc *serverConn) resetStream(se StreamError) { + sc.serveG.check() + sc.writeFrame(FrameWriteRequest{write: se}) + if st, ok := sc.streams[se.StreamID]; ok { + st.resetQueued = true + } +} + +// processFrameFromReader processes the serve loop's read from readFrameCh from the +// frame-reading goroutine. +// processFrameFromReader returns whether the connection should be kept open. +func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { + sc.serveG.check() + err := res.err + if err != nil { + if err == ErrFrameTooLarge { + sc.goAway(ErrCodeFrameSize) + return true // goAway will close the loop + } + clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) + if clientGone { + // TODO: could we also get into this state if + // the peer does a half close + // (e.g. CloseWrite) because they're done + // sending frames but they're still wanting + // our open replies? Investigate. + // TODO: add CloseWrite to crypto/tls.Conn first + // so we have a way to test this? I suppose + // just for testing we could have a non-TLS mode. + return false + } + } else { + f := res.f + if VerboseLogs { + sc.vlogf("http2: server read frame %v", summarizeFrame(f)) + } + err = sc.processFrame(f) + if err == nil { + return true + } + } + + switch ev := err.(type) { + case StreamError: + sc.resetStream(ev) + return true + case goAwayFlowError: + sc.goAway(ErrCodeFlowControl) + return true + case ConnectionError: + sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) + sc.goAway(ErrCode(ev)) + return true // goAway will handle shutdown + default: + if res.err != nil { + sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) + } else { + sc.logf("http2: server closing client connection: %v", err) + } + return false + } +} + +func (sc *serverConn) processFrame(f Frame) error { + sc.serveG.check() + + // First frame received must be SETTINGS. + if !sc.sawFirstSettings { + if _, ok := f.(*SettingsFrame); !ok { + return ConnectionError(ErrCodeProtocol) + } + sc.sawFirstSettings = true + } + + switch f := f.(type) { + case *SettingsFrame: + return sc.processSettings(f) + case *MetaHeadersFrame: + return sc.processHeaders(f) + case *WindowUpdateFrame: + return sc.processWindowUpdate(f) + case *PingFrame: + return sc.processPing(f) + case *DataFrame: + return sc.processData(f) + case *RSTStreamFrame: + return sc.processResetStream(f) + case *PriorityFrame: + return sc.processPriority(f) + case *GoAwayFrame: + return sc.processGoAway(f) + case *PushPromiseFrame: + // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE + // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + default: + sc.vlogf("http2: server ignoring frame: %v", f.Header()) + return nil + } +} + +func (sc *serverConn) processPing(f *PingFrame) error { + sc.serveG.check() + if f.IsAck() { + // 6.7 PING: " An endpoint MUST NOT respond to PING frames + // containing this flag." + return nil + } + if f.StreamID != 0 { + // "PING frames are not associated with any individual + // stream. If a PING frame is received with a stream + // identifier field value other than 0x0, the recipient MUST + // respond with a connection error (Section 5.4.1) of type + // PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + sc.writeFrame(FrameWriteRequest{write: writePingAck{f}}) + return nil +} + +func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { + sc.serveG.check() + switch { + case f.StreamID != 0: // stream-level flow control + state, st := sc.state(f.StreamID) + if state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil { + // "WINDOW_UPDATE can be sent by a peer that has sent a + // frame bearing the END_STREAM flag. This means that a + // receiver could receive a WINDOW_UPDATE frame on a "half + // closed (remote)" or "closed" stream. A receiver MUST + // NOT treat this as an error, see Section 5.1." + return nil + } + if !st.flow.add(int32(f.Increment)) { + return streamError(f.StreamID, ErrCodeFlowControl) + } + default: // connection-level flow control + if !sc.flow.add(int32(f.Increment)) { + return goAwayFlowError{} + } + } + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { + sc.serveG.check() + + state, st := sc.state(f.StreamID) + if state == stateIdle { + // 6.4 "RST_STREAM frames MUST NOT be sent for a + // stream in the "idle" state. If a RST_STREAM frame + // identifying an idle stream is received, the + // recipient MUST treat this as a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + return ConnectionError(ErrCodeProtocol) + } + if st != nil { + st.cancelCtx() + sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) + } + return nil +} + +func (sc *serverConn) closeStream(st *stream, err error) { + sc.serveG.check() + if st.state == stateIdle || st.state == stateClosed { + panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) + } + st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } + if st.isPushed() { + sc.curPushedStreams-- + } else { + sc.curClientStreams-- + } + delete(sc.streams, st.id) + if len(sc.streams) == 0 { + sc.setConnState(http.StateIdle) + if sc.srv.IdleTimeout != 0 { + sc.idleTimer.Reset(sc.srv.IdleTimeout) + } + if h1ServerKeepAlivesDisabled(sc.hs) { + sc.startGracefulShutdownInternal() + } + } + if p := st.body; p != nil { + // Return any buffered unread bytes worth of conn-level flow control. + // See golang.org/issue/16481 + sc.sendWindowUpdate(nil, p.Len()) + + p.CloseWithError(err) + } + st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc + sc.writeSched.CloseStream(st.id) +} + +func (sc *serverConn) processSettings(f *SettingsFrame) error { + sc.serveG.check() + if f.IsAck() { + sc.unackedSettings-- + if sc.unackedSettings < 0 { + // Why is the peer ACKing settings we never sent? + // The spec doesn't mention this case, but + // hang up on them anyway. + return ConnectionError(ErrCodeProtocol) + } + return nil + } + if f.NumSettings() > 100 || f.HasDuplicates() { + // This isn't actually in the spec, but hang up on + // suspiciously large settings frames or those with + // duplicate entries. + return ConnectionError(ErrCodeProtocol) + } + if err := f.ForeachSetting(sc.processSetting); err != nil { + return err + } + sc.needToSendSettingsAck = true + sc.scheduleFrameWrite() + return nil +} + +func (sc *serverConn) processSetting(s Setting) error { + sc.serveG.check() + if err := s.Valid(); err != nil { + return err + } + if VerboseLogs { + sc.vlogf("http2: server processing setting %v", s) + } + switch s.ID { + case SettingHeaderTableSize: + sc.headerTableSize = s.Val + sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) + case SettingEnablePush: + sc.pushEnabled = s.Val != 0 + case SettingMaxConcurrentStreams: + sc.clientMaxStreams = s.Val + case SettingInitialWindowSize: + return sc.processSettingInitialWindowSize(s.Val) + case SettingMaxFrameSize: + sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31 + case SettingMaxHeaderListSize: + sc.peerMaxHeaderListSize = s.Val + default: + // Unknown setting: "An endpoint that receives a SETTINGS + // frame with any unknown or unsupported identifier MUST + // ignore that setting." + if VerboseLogs { + sc.vlogf("http2: server ignoring unknown setting %v", s) + } + } + return nil +} + +func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { + sc.serveG.check() + // Note: val already validated to be within range by + // processSetting's Valid call. + + // "A SETTINGS frame can alter the initial flow control window + // size for all current streams. When the value of + // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST + // adjust the size of all stream flow control windows that it + // maintains by the difference between the new value and the + // old value." + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative + for _, st := range sc.streams { + if !st.flow.add(growth) { + // 6.9.2 Initial Flow Control Window Size + // "An endpoint MUST treat a change to + // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow + // control window to exceed the maximum size as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR." + return ConnectionError(ErrCodeFlowControl) + } + } + return nil +} + +func (sc *serverConn) processData(f *DataFrame) error { + sc.serveG.check() + if sc.inGoAway && sc.goAwayCode != ErrCodeNo { + return nil + } + data := f.Data() + + // "If a DATA frame is received whose stream is not in "open" + // or "half closed (local)" state, the recipient MUST respond + // with a stream error (Section 5.4.2) of type STREAM_CLOSED." + id := f.Header().StreamID + state, st := sc.state(id) + if id == 0 || state == stateIdle { + // Section 5.1: "Receiving any frame other than HEADERS + // or PRIORITY on a stream in this state MUST be + // treated as a connection error (Section 5.4.1) of + // type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) + } + if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { + // This includes sending a RST_STREAM if the stream is + // in stateHalfClosedLocal (which currently means that + // the http.Handler returned, so it's done reading & + // done writing). Try to stop the client from sending + // more DATA. + + // But still enforce their connection-level flow control, + // and return any flow control bytes since we're not going + // to consume them. + if sc.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + // Deduct the flow control from inflow, since we're + // going to immediately add it back in + // sendWindowUpdate, which also schedules sending the + // frames. + sc.inflow.take(int32(f.Length)) + sc.sendWindowUpdate(nil, int(f.Length)) // conn-level + + if st != nil && st.resetQueued { + // Already have a stream error in flight. Don't send another. + return nil + } + return streamError(id, ErrCodeStreamClosed) + } + if st.body == nil { + panic("internal error: should have a body in this state") + } + + // Sender sending more than they'd declared? + if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { + st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) + // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the + // value of a content-length header field does not equal the sum of the + // DATA frame payload lengths that form the body. + return streamError(id, ErrCodeProtocol) + } + if f.Length > 0 { + // Check whether the client has flow control quota. + if st.inflow.available() < int32(f.Length) { + return streamError(id, ErrCodeFlowControl) + } + st.inflow.take(int32(f.Length)) + + if len(data) > 0 { + wrote, err := st.body.Write(data) + if err != nil { + return streamError(id, ErrCodeStreamClosed) + } + if wrote != len(data) { + panic("internal error: bad Writer") + } + st.bodyBytes += int64(len(data)) + } + + // Return any padded flow control now, since we won't + // refund it later on body reads. + if pad := int32(f.Length) - int32(len(data)); pad > 0 { + sc.sendWindowUpdate32(nil, pad) + sc.sendWindowUpdate32(st, pad) + } + } + if f.StreamEnded() { + st.endStream() + } + return nil +} + +func (sc *serverConn) processGoAway(f *GoAwayFrame) error { + sc.serveG.check() + if f.ErrCode != ErrCodeNo { + sc.logf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } else { + sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) + } + sc.startGracefulShutdownInternal() + // http://tools.ietf.org/html/rfc7540#section-6.8 + // We should not create any new streams, which means we should disable push. + sc.pushEnabled = false + return nil +} + +// isPushed reports whether the stream is server-initiated. +func (st *stream) isPushed() bool { + return st.id%2 == 0 +} + +// endStream closes a Request.Body's pipe. It is called when a DATA +// frame says a request body is over (or after trailers). +func (st *stream) endStream() { + sc := st.sc + sc.serveG.check() + + if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { + st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", + st.declBodyBytes, st.bodyBytes)) + } else { + st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) + st.body.CloseWithError(io.EOF) + } + st.state = stateHalfClosedRemote +} + +// copyTrailersToHandlerRequest is run in the Handler's goroutine in +// its Request.Body.Read just before it gets io.EOF. +func (st *stream) copyTrailersToHandlerRequest() { + for k, vv := range st.trailer { + if _, ok := st.reqTrailer[k]; ok { + // Only copy it over it was pre-declared. + st.reqTrailer[k] = vv + } + } +} + +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + +func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { + sc.serveG.check() + id := f.StreamID + if sc.inGoAway { + // Ignore. + return nil + } + // http://tools.ietf.org/html/rfc7540#section-5.1.1 + // Streams initiated by a client MUST use odd-numbered stream + // identifiers. [...] An endpoint that receives an unexpected + // stream identifier MUST respond with a connection error + // (Section 5.4.1) of type PROTOCOL_ERROR. + if id%2 != 1 { + return ConnectionError(ErrCodeProtocol) + } + // A HEADERS frame can be used to create a new stream or + // send a trailer for an open one. If we already have a stream + // open, let it process its own HEADERS frame (trailers at this + // point, if it's valid). + if st := sc.streams[f.StreamID]; st != nil { + if st.resetQueued { + // We're sending RST_STREAM to close the stream, so don't bother + // processing this frame. + return nil + } + // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than + // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in + // this state, it MUST respond with a stream error (Section 5.4.2) of + // type STREAM_CLOSED. + if st.state == stateHalfClosedRemote { + return streamError(id, ErrCodeStreamClosed) + } + return st.processTrailerHeaders(f) + } + + // [...] The identifier of a newly established stream MUST be + // numerically greater than all streams that the initiating + // endpoint has opened or reserved. [...] An endpoint that + // receives an unexpected stream identifier MUST respond with + // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. + if id <= sc.maxClientStreamID { + return ConnectionError(ErrCodeProtocol) + } + sc.maxClientStreamID = id + + if sc.idleTimer != nil { + sc.idleTimer.Stop() + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.2 + // [...] Endpoints MUST NOT exceed the limit set by their peer. An + // endpoint that receives a HEADERS frame that causes their + // advertised concurrent stream limit to be exceeded MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR + // or REFUSED_STREAM. + if sc.curClientStreams+1 > sc.advMaxStreams { + if sc.unackedSettings == 0 { + // They should know better. + return streamError(id, ErrCodeProtocol) + } + // Assume it's a network race, where they just haven't + // received our last SETTINGS update. But actually + // this can't happen yet, because we don't yet provide + // a way for users to adjust server parameters at + // runtime. + return streamError(id, ErrCodeRefusedStream) + } + + initialState := stateOpen + if f.StreamEnded() { + initialState = stateHalfClosedRemote + } + st := sc.newStream(id, 0, initialState) + + if f.HasPriority() { + if err := checkPriority(f.StreamID, f.Priority); err != nil { + return err + } + sc.writeSched.AdjustStream(st.id, f.Priority) + } + + rw, req, err := sc.newWriterAndRequest(st, f) + if err != nil { + return err + } + st.reqTrailer = req.Trailer + if st.reqTrailer != nil { + st.trailer = make(http.Header) + } + st.body = req.Body.(*requestBody).pipe // may be nil + st.declBodyBytes = req.ContentLength + + handler := sc.handler.ServeHTTP + if f.Truncated { + // Their header list was too long. Send a 431 error. + handler = handleHeaderListTooLong + } else if err := checkValidHTTP2RequestHeaders(req.Header); err != nil { + handler = new400Handler(err) + } + + // The net/http package sets the read deadline from the + // http.Server.ReadTimeout during the TLS handshake, but then + // passes the connection off to us with the deadline already + // set. Disarm it here after the request headers are read, + // similar to how the http1 server works. Here it's + // technically more like the http1 Server's ReadHeaderTimeout + // (in Go 1.8), though. That's a more sane option anyway. + if sc.hs.ReadTimeout != 0 { + sc.conn.SetReadDeadline(time.Time{}) + } + + go sc.runHandler(rw, req, handler) + return nil +} + +func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { + sc := st.sc + sc.serveG.check() + if st.gotTrailerHeader { + return ConnectionError(ErrCodeProtocol) + } + st.gotTrailerHeader = true + if !f.StreamEnded() { + return streamError(st.id, ErrCodeProtocol) + } + + if len(f.PseudoFields()) > 0 { + return streamError(st.id, ErrCodeProtocol) + } + if st.trailer != nil { + for _, hf := range f.RegularFields() { + key := sc.canonicalHeader(hf.Name) + if !httpguts.ValidTrailerHeader(key) { + // TODO: send more details to the peer somehow. But http2 has + // no way to send debug data at a stream level. Discuss with + // HTTP folk. + return streamError(st.id, ErrCodeProtocol) + } + st.trailer[key] = append(st.trailer[key], hf.Value) + } + } + st.endStream() + return nil +} + +func checkPriority(streamID uint32, p PriorityParam) error { + if streamID == p.StreamDep { + // Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat + // this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR." + // Section 5.3.3 says that a stream can depend on one of its dependencies, + // so it's only self-dependencies that are forbidden. + return streamError(streamID, ErrCodeProtocol) + } + return nil +} + +func (sc *serverConn) processPriority(f *PriorityFrame) error { + if sc.inGoAway { + return nil + } + if err := checkPriority(f.StreamID, f.PriorityParam); err != nil { + return err + } + sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam) + return nil +} + +func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream { + sc.serveG.check() + if id == 0 { + panic("internal error: cannot create stream with id 0") + } + + ctx, cancelCtx := context.WithCancel(sc.baseCtx) + st := &stream{ + sc: sc, + id: id, + state: state, + ctx: ctx, + cancelCtx: cancelCtx, + } + st.cw.Init() + st.flow.conn = &sc.flow // link to conn-level counter + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } + + sc.streams[id] = st + sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) + if st.isPushed() { + sc.curPushedStreams++ + } else { + sc.curClientStreams++ + } + if sc.curOpenStreams() == 1 { + sc.setConnState(http.StateActive) + } + + return st +} + +func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + rp := requestParam{ + method: f.PseudoValue("method"), + scheme: f.PseudoValue("scheme"), + authority: f.PseudoValue("authority"), + path: f.PseudoValue("path"), + } + + isConnect := rp.method == "CONNECT" + if isConnect { + if rp.path != "" || rp.scheme != "" || rp.authority == "" { + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + // See 8.1.2.6 Malformed Requests and Responses: + // + // Malformed requests or responses that are detected + // MUST be treated as a stream error (Section 5.4.2) + // of type PROTOCOL_ERROR." + // + // 8.1.2.3 Request Pseudo-Header Fields + // "All HTTP/2 requests MUST include exactly one valid + // value for the :method, :scheme, and :path + // pseudo-header fields" + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + bodyOpen := !f.StreamEnded() + if rp.method == "HEAD" && bodyOpen { + // HEAD requests can't have bodies + return nil, nil, streamError(f.StreamID, ErrCodeProtocol) + } + + rp.header = make(http.Header) + for _, hf := range f.RegularFields() { + rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + } + if rp.authority == "" { + rp.authority = rp.header.Get("Host") + } + + rw, req, err := sc.newWriterAndRequestNoBody(st, rp) + if err != nil { + return nil, nil, err + } + if bodyOpen { + if vv, ok := rp.header["Content-Length"]; ok { + req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) + } else { + req.ContentLength = -1 + } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } + } + return rw, req, nil +} + +type requestParam struct { + method string + scheme, authority, path string + header http.Header +} + +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { + sc.serveG.check() + + var tlsState *tls.ConnectionState // nil if not scheme https + if rp.scheme == "https" { + tlsState = sc.tlsState + } + + needsContinue := rp.header.Get("Expect") == "100-continue" + if needsContinue { + rp.header.Del("Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.header["Cookie"]; len(cookies) > 1 { + rp.header.Set("Cookie", strings.Join(cookies, "; ")) + } + + // Setup Trailers + var trailer http.Header + for _, v := range rp.header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = http.CanonicalHeaderKey(strings.TrimSpace(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(http.Header) + } + trailer[key] = nil + } + } + } + delete(rp.header, "Trailer") + + var url_ *url.URL + var requestURI string + if rp.method == "CONNECT" { + url_ = &url.URL{Host: rp.authority} + requestURI = rp.authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.path) + if err != nil { + return nil, nil, streamError(st.id, ErrCodeProtocol) + } + requestURI = rp.path + } + + body := &requestBody{ + conn: sc, + stream: st, + needsContinue: needsContinue, + } + req := &http.Request{ + Method: rp.method, + URL: url_, + RemoteAddr: sc.remoteAddrStr, + Header: rp.header, + RequestURI: requestURI, + Proto: "HTTP/2.0", + ProtoMajor: 2, + ProtoMinor: 0, + TLS: tlsState, + Host: rp.authority, + Body: body, + Trailer: trailer, + } + req = req.WithContext(st.ctx) + + rws := responseWriterStatePool.Get().(*responseWriterState) + bwSave := rws.bw + *rws = responseWriterState{} // zero all the fields + rws.conn = sc + rws.bw = bwSave + rws.bw.Reset(chunkWriter{rws}) + rws.stream = st + rws.req = req + rws.body = body + + rw := &responseWriter{rws: rws} + return rw, req, nil +} + +// Run on its own goroutine. +func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + didPanic := true + defer func() { + rw.rws.stream.cancelCtx() + if didPanic { + e := recover() + sc.writeFrameFromHandler(FrameWriteRequest{ + write: handlerPanicRST{rw.rws.stream.id}, + stream: rw.rws.stream, + }) + // Same as net/http: + if e != nil && e != http.ErrAbortHandler { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) + } + return + } + rw.handlerDone() + }() + handler(rw, req) + didPanic = false +} + +func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { + // 10.5.1 Limits on Header Block Size: + // .. "A server that receives a larger header block than it is + // willing to handle can send an HTTP 431 (Request Header Fields Too + // Large) status code" + const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ + w.WriteHeader(statusRequestHeaderFieldsTooLarge) + io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>") +} + +// called from handler goroutines. +// h may be nil. +func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { + sc.serveG.checkNotOn() // NOT on + var errc chan error + if headerData.h != nil { + // If there's a header map (which we don't own), so we have to block on + // waiting for this frame to be written, so an http.Flush mid-handler + // writes out the correct value of keys, before a handler later potentially + // mutates it. + errc = errChanPool.Get().(chan error) + } + if err := sc.writeFrameFromHandler(FrameWriteRequest{ + write: headerData, + stream: st, + done: errc, + }); err != nil { + return err + } + if errc != nil { + select { + case err := <-errc: + errChanPool.Put(errc) + return err + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + } + } + return nil +} + +// called from handler goroutines. +func (sc *serverConn) write100ContinueHeaders(st *stream) { + sc.writeFrameFromHandler(FrameWriteRequest{ + write: write100ContinueHeadersFrame{st.id}, + stream: st, + }) +} + +// A bodyReadMsg tells the server loop that the http.Handler read n +// bytes of the DATA from the client on the given stream. +type bodyReadMsg struct { + st *stream + n int +} + +// called from handler goroutines. +// Notes that the handler for the given stream ID read n bytes of its body +// and schedules flow control tokens to be sent. +func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { + sc.serveG.checkNotOn() // NOT on + if n > 0 { + select { + case sc.bodyReadCh <- bodyReadMsg{st, n}: + case <-sc.doneServing: + } + } +} + +func (sc *serverConn) noteBodyRead(st *stream, n int) { + sc.serveG.check() + sc.sendWindowUpdate(nil, n) // conn-level + if st.state != stateHalfClosedRemote && st.state != stateClosed { + // Don't send this WINDOW_UPDATE if the stream is closed + // remotely. + sc.sendWindowUpdate(st, n) + } +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate(st *stream, n int) { + sc.serveG.check() + // "The legal range for the increment to the flow control + // window is 1 to 2^31-1 (2,147,483,647) octets." + // A Go Read call on 64-bit machines could in theory read + // a larger Read than this. Very unlikely, but we handle it here + // rather than elsewhere for now. + const maxUint31 = 1<<31 - 1 + for n >= maxUint31 { + sc.sendWindowUpdate32(st, maxUint31) + n -= maxUint31 + } + sc.sendWindowUpdate32(st, int32(n)) +} + +// st may be nil for conn-level +func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { + sc.serveG.check() + if n == 0 { + return + } + if n < 0 { + panic("negative update") + } + var streamID uint32 + if st != nil { + streamID = st.id + } + sc.writeFrame(FrameWriteRequest{ + write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, + stream: st, + }) + var ok bool + if st == nil { + ok = sc.inflow.add(n) + } else { + ok = st.inflow.add(n) + } + if !ok { + panic("internal error; sent too many window updates without decrements?") + } +} + +// requestBody is the Handler's Request.Body type. +// Read and Close may be called concurrently. +type requestBody struct { + stream *stream + conn *serverConn + closed bool // for use by Close only + sawEOF bool // for use by Read only + pipe *pipe // non-nil if we have a HTTP entity message body + needsContinue bool // need to send a 100-continue +} + +func (b *requestBody) Close() error { + if b.pipe != nil && !b.closed { + b.pipe.BreakWithError(errClosedBody) + } + b.closed = true + return nil +} + +func (b *requestBody) Read(p []byte) (n int, err error) { + if b.needsContinue { + b.needsContinue = false + b.conn.write100ContinueHeaders(b.stream) + } + if b.pipe == nil || b.sawEOF { + return 0, io.EOF + } + n, err = b.pipe.Read(p) + if err == io.EOF { + b.sawEOF = true + } + if b.conn == nil && inTests { + return + } + b.conn.noteBodyReadFromHandler(b.stream, n, err) + return +} + +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriterState pointer inside is zeroed at the end of a +// request (in handlerDone) and calls on the responseWriter thereafter +// simply crash (caller's mistake), but the much larger responseWriterState +// and buffers are reused between multiple requests. +type responseWriter struct { + rws *responseWriterState +} + +// Optional http.ResponseWriter interfaces implemented. +var ( + _ http.CloseNotifier = (*responseWriter)(nil) + _ http.Flusher = (*responseWriter)(nil) + _ stringWriter = (*responseWriter)(nil) +) + +type responseWriterState struct { + // immutable within a request: + stream *stream + req *http.Request + body *requestBody // to close at end of request, if DATA frames didn't + conn *serverConn + + // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc + bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} + + // mutated by http.Handler goroutine: + handlerHeader http.Header // nil until called + snapHeader http.Header // snapshot of handlerHeader at WriteHeader time + trailers []string // set in writeChunk + status int // status code passed to WriteHeader + wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. + sentHeader bool // have we sent the header frame? + handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState + + sentContentLen int64 // non-zero if handler set a Content-Length header + wroteBytes int64 + + closeNotifierMu sync.Mutex // guards closeNotifierCh + closeNotifierCh chan bool // nil until first used +} + +type chunkWriter struct{ rws *responseWriterState } + +func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } + +func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) > 0 } + +func (rws *responseWriterState) hasNonemptyTrailers() bool { + for _, trailer := range rws.trailers { + if _, ok := rws.handlerHeader[trailer]; ok { + return true + } + } + return false +} + +// declareTrailer is called for each Trailer header when the +// response header is written. It notes that a header will need to be +// written in the trailers at the end of the response. +func (rws *responseWriterState) declareTrailer(k string) { + k = http.CanonicalHeaderKey(k) + if !httpguts.ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. + rws.conn.logf("ignoring invalid trailer %q", k) + return + } + if !strSliceContains(rws.trailers, k) { + rws.trailers = append(rws.trailers, k) + } +} + +// writeChunk writes chunks from the bufio.Writer. But because +// bufio.Writer may bypass its chunking, sometimes p may be +// arbitrarily large. +// +// writeChunk is also responsible (on the first chunk) for sending the +// HEADER response. +func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { + if !rws.wroteHeader { + rws.writeHeader(200) + } + + isHeadResp := rws.req.Method == "HEAD" + if !rws.sentHeader { + rws.sentHeader = true + var ctype, clen string + if clen = rws.snapHeader.Get("Content-Length"); clen != "" { + rws.snapHeader.Del("Content-Length") + clen64, err := strconv.ParseInt(clen, 10, 64) + if err == nil && clen64 >= 0 { + rws.sentContentLen = clen64 + } else { + clen = "" + } + } + if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { + clen = strconv.Itoa(len(p)) + } + _, hasContentType := rws.snapHeader["Content-Type"] + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + ctype = http.DetectContentType(p) + } + var date string + if _, ok := rws.snapHeader["Date"]; !ok { + // TODO(bradfitz): be faster here, like net/http? measure. + date = time.Now().UTC().Format(http.TimeFormat) + } + + for _, v := range rws.snapHeader["Trailer"] { + foreachHeaderElement(v, rws.declareTrailer) + } + + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + // TODO: remove more Connection-specific header fields here, in addition + // to "Connection". + if _, ok := rws.snapHeader["Connection"]; ok { + v := rws.snapHeader.Get("Connection") + delete(rws.snapHeader, "Connection") + if v == "close" { + rws.conn.startGracefulShutdown() + } + } + + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + httpResCode: rws.status, + h: rws.snapHeader, + endStream: endStream, + contentType: ctype, + contentLength: clen, + date: date, + }) + if err != nil { + rws.dirty = true + return 0, err + } + if endStream { + return 0, nil + } + } + if isHeadResp { + return len(p), nil + } + if len(p) == 0 && !rws.handlerDone { + return 0, nil + } + + if rws.handlerDone { + rws.promoteUndeclaredTrailers() + } + + // only send trailers if they have actually been defined by the + // server handler. + hasNonemptyTrailers := rws.hasNonemptyTrailers() + endStream := rws.handlerDone && !hasNonemptyTrailers + if len(p) > 0 || endStream { + // only send a 0 byte DATA frame if we're ending the stream. + if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true + return 0, err + } + } + + if rws.handlerDone && hasNonemptyTrailers { + err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + streamID: rws.stream.id, + h: rws.handlerHeader, + trailers: rws.trailers, + endStream: true, + }) + if err != nil { + rws.dirty = true + } + return len(p), err + } + return len(p), nil +} + +// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// that, if present, signals that the map entry is actually for +// the response trailers, and not the response headers. The prefix +// is stripped after the ServeHTTP call finishes and the values are +// sent in the trailers. +// +// This mechanism is intended only for trailers that are not known +// prior to the headers being written. If the set of trailers is fixed +// or known before the header is written, the normal Go trailers mechanism +// is preferred: +// https://golang.org/pkg/net/http/#ResponseWriter +// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers +const TrailerPrefix = "Trailer:" + +// promoteUndeclaredTrailers permits http.Handlers to set trailers +// after the header has already been flushed. Because the Go +// ResponseWriter interface has no way to set Trailers (only the +// Header), and because we didn't want to expand the ResponseWriter +// interface, and because nobody used trailers, and because RFC 7230 +// says you SHOULD (but not must) predeclare any trailers in the +// header, the official ResponseWriter rules said trailers in Go must +// be predeclared, and then we reuse the same ResponseWriter.Header() +// map to mean both Headers and Trailers. When it's time to write the +// Trailers, we pick out the fields of Headers that were declared as +// trailers. That worked for a while, until we found the first major +// user of Trailers in the wild: gRPC (using them only over http2), +// and gRPC libraries permit setting trailers mid-stream without +// predeclarnig them. So: change of plans. We still permit the old +// way, but we also permit this hack: if a Header() key begins with +// "Trailer:", the suffix of that key is a Trailer. Because ':' is an +// invalid token byte anyway, there is no ambiguity. (And it's already +// filtered out) It's mildly hacky, but not terrible. +// +// This method runs after the Handler is done and promotes any Header +// fields to be trailers. +func (rws *responseWriterState) promoteUndeclaredTrailers() { + for k, vv := range rws.handlerHeader { + if !strings.HasPrefix(k, TrailerPrefix) { + continue + } + trailerKey := strings.TrimPrefix(k, TrailerPrefix) + rws.declareTrailer(trailerKey) + rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv + } + + if len(rws.trailers) > 1 { + sorter := sorterPool.Get().(*sorter) + sorter.SortStrings(rws.trailers) + sorterPool.Put(sorter) + } +} + +func (w *responseWriter) Flush() { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.bw.Buffered() > 0 { + if err := rws.bw.Flush(); err != nil { + // Ignore the error. The frame writer already knows. + return + } + } else { + // The bufio.Writer won't call chunkWriter.Write + // (writeChunk with zero bytes, so we have to do it + // ourselves to force the HTTP response header and/or + // final DATA frame (with END_STREAM) to be sent. + rws.writeChunk(nil) + } +} + +func (w *responseWriter) CloseNotify() <-chan bool { + rws := w.rws + if rws == nil { + panic("CloseNotify called after Handler finished") + } + rws.closeNotifierMu.Lock() + ch := rws.closeNotifierCh + if ch == nil { + ch = make(chan bool, 1) + rws.closeNotifierCh = ch + cw := rws.stream.cw + go func() { + cw.Wait() // wait for close + ch <- true + }() + } + rws.closeNotifierMu.Unlock() + return ch +} + +func (w *responseWriter) Header() http.Header { + rws := w.rws + if rws == nil { + panic("Header called after Handler finished") + } + if rws.handlerHeader == nil { + rws.handlerHeader = make(http.Header) + } + return rws.handlerHeader +} + +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + +func (w *responseWriter) WriteHeader(code int) { + rws := w.rws + if rws == nil { + panic("WriteHeader called after Handler finished") + } + rws.writeHeader(code) +} + +func (rws *responseWriterState) writeHeader(code int) { + if !rws.wroteHeader { + checkWriteHeaderCode(code) + rws.wroteHeader = true + rws.status = code + if len(rws.handlerHeader) > 0 { + rws.snapHeader = cloneHeader(rws.handlerHeader) + } + } +} + +func cloneHeader(h http.Header) http.Header { + h2 := make(http.Header, len(h)) + for k, vv := range h { + vv2 := make([]string, len(vv)) + copy(vv2, vv) + h2[k] = vv2 + } + return h2 +} + +// The Life Of A Write is like this: +// +// * Handler calls w.Write or w.WriteString -> +// * -> rws.bw (*bufio.Writer) -> +// * (Handler might call Flush) +// * -> chunkWriter{rws} +// * -> responseWriterState.writeChunk(p []byte) +// * -> responseWriterState.writeChunk (most of the magic; see comment there) +func (w *responseWriter) Write(p []byte) (n int, err error) { + return w.write(len(p), p, "") +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + return w.write(len(s), nil, s) +} + +// either dataB or dataS is non-zero. +func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { + rws := w.rws + if rws == nil { + panic("Write called after Handler finished") + } + if !rws.wroteHeader { + w.WriteHeader(200) + } + if !bodyAllowedForStatus(rws.status) { + return 0, http.ErrBodyNotAllowed + } + rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set + if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { + // TODO: send a RST_STREAM + return 0, errors.New("http2: handler wrote more than declared Content-Length") + } + + if dataB != nil { + return rws.bw.Write(dataB) + } else { + return rws.bw.WriteString(dataS) + } +} + +func (w *responseWriter) handlerDone() { + rws := w.rws + dirty := rws.dirty + rws.handlerDone = true + w.Flush() + w.rws = nil + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } +} + +// Push errors. +var ( + ErrRecursivePush = errors.New("http2: recursive push not allowed") + ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") +) + +var _ http.Pusher = (*responseWriter)(nil) + +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { + st := w.rws.stream + sc := st.sc + sc.serveG.checkNotOn() + + // No recursive pushes: "PUSH_PROMISE frames MUST only be sent on a peer-initiated stream." + // http://tools.ietf.org/html/rfc7540#section-6.6 + if st.isPushed() { + return ErrRecursivePush + } + + if opts == nil { + opts = new(http.PushOptions) + } + + // Default options. + if opts.Method == "" { + opts.Method = "GET" + } + if opts.Header == nil { + opts.Header = http.Header{} + } + wantScheme := "http" + if w.rws.req.TLS != nil { + wantScheme = "https" + } + + // Validate the request. + u, err := url.Parse(target) + if err != nil { + return err + } + if u.Scheme == "" { + if !strings.HasPrefix(target, "/") { + return fmt.Errorf("target must be an absolute URL or an absolute path: %q", target) + } + u.Scheme = wantScheme + u.Host = w.rws.req.Host + } else { + if u.Scheme != wantScheme { + return fmt.Errorf("cannot push URL with scheme %q from request with scheme %q", u.Scheme, wantScheme) + } + if u.Host == "" { + return errors.New("URL must have a host") + } + } + for k := range opts.Header { + if strings.HasPrefix(k, ":") { + return fmt.Errorf("promised request headers cannot include pseudo header %q", k) + } + // These headers are meaningful only if the request has a body, + // but PUSH_PROMISE requests cannot have a body. + // http://tools.ietf.org/html/rfc7540#section-8.2 + // Also disallow Host, since the promised URL must be absolute. + switch strings.ToLower(k) { + case "content-length", "content-encoding", "trailer", "te", "expect", "host": + return fmt.Errorf("promised request headers cannot include %q", k) + } + } + if err := checkValidHTTP2RequestHeaders(opts.Header); err != nil { + return err + } + + // The RFC effectively limits promised requests to GET and HEAD: + // "Promised requests MUST be cacheable [GET, HEAD, or POST], and MUST be safe [GET or HEAD]" + // http://tools.ietf.org/html/rfc7540#section-8.2 + if opts.Method != "GET" && opts.Method != "HEAD" { + return fmt.Errorf("method %q must be GET or HEAD", opts.Method) + } + + msg := &startPushRequest{ + parent: st, + method: opts.Method, + url: u, + header: cloneHeader(opts.Header), + done: errChanPool.Get().(chan error), + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case sc.serveMsgCh <- msg: + } + + select { + case <-sc.doneServing: + return errClientDisconnected + case <-st.cw: + return errStreamClosed + case err := <-msg.done: + errChanPool.Put(msg.done) + return err + } +} + +type startPushRequest struct { + parent *stream + method string + url *url.URL + header http.Header + done chan error +} + +func (sc *serverConn) startPush(msg *startPushRequest) { + sc.serveG.check() + + // http://tools.ietf.org/html/rfc7540#section-6.6. + // PUSH_PROMISE frames MUST only be sent on a peer-initiated stream that + // is in either the "open" or "half-closed (remote)" state. + if msg.parent.state != stateOpen && msg.parent.state != stateHalfClosedRemote { + // responseWriter.Push checks that the stream is peer-initiaed. + msg.done <- errStreamClosed + return + } + + // http://tools.ietf.org/html/rfc7540#section-6.6. + if !sc.pushEnabled { + msg.done <- http.ErrNotSupported + return + } + + // PUSH_PROMISE frames must be sent in increasing order by stream ID, so + // we allocate an ID for the promised stream lazily, when the PUSH_PROMISE + // is written. Once the ID is allocated, we start the request handler. + allocatePromisedID := func() (uint32, error) { + sc.serveG.check() + + // Check this again, just in case. Technically, we might have received + // an updated SETTINGS by the time we got around to writing this frame. + if !sc.pushEnabled { + return 0, http.ErrNotSupported + } + // http://tools.ietf.org/html/rfc7540#section-6.5.2. + if sc.curPushedStreams+1 > sc.clientMaxStreams { + return 0, ErrPushLimitReached + } + + // http://tools.ietf.org/html/rfc7540#section-5.1.1. + // Streams initiated by the server MUST use even-numbered identifiers. + // A server that is unable to establish a new stream identifier can send a GOAWAY + // frame so that the client is forced to open a new connection for new streams. + if sc.maxPushPromiseID+2 >= 1<<31 { + sc.startGracefulShutdownInternal() + return 0, ErrPushLimitReached + } + sc.maxPushPromiseID += 2 + promisedID := sc.maxPushPromiseID + + // http://tools.ietf.org/html/rfc7540#section-8.2. + // Strictly speaking, the new stream should start in "reserved (local)", then + // transition to "half closed (remote)" after sending the initial HEADERS, but + // we start in "half closed (remote)" for simplicity. + // See further comments at the definition of stateHalfClosedRemote. + promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) + rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ + method: msg.method, + scheme: msg.url.Scheme, + authority: msg.url.Host, + path: msg.url.RequestURI(), + header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + }) + if err != nil { + // Should not happen, since we've already validated msg.url. + panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) + } + + go sc.runHandler(rw, req, sc.handler.ServeHTTP) + return promisedID, nil + } + + sc.writeFrame(FrameWriteRequest{ + write: &writePushPromise{ + streamID: msg.parent.id, + method: msg.method, + url: msg.url, + h: msg.header, + allocatePromisedID: allocatePromisedID, + }, + stream: msg.parent, + done: msg.done, + }) +} + +// foreachHeaderElement splits v according to the "#rule" construction +// in RFC 7230 section 7 and calls fn for each non-empty element. +func foreachHeaderElement(v string, fn func(string)) { + v = textproto.TrimString(v) + if v == "" { + return + } + if !strings.Contains(v, ",") { + fn(v) + return + } + for _, f := range strings.Split(v, ",") { + if f = textproto.TrimString(f); f != "" { + fn(f) + } + } +} + +// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 +var connHeaders = []string{ + "Connection", + "Keep-Alive", + "Proxy-Connection", + "Transfer-Encoding", + "Upgrade", +} + +// checkValidHTTP2RequestHeaders checks whether h is a valid HTTP/2 request, +// per RFC 7540 Section 8.1.2.2. +// The returned error is reported to users. +func checkValidHTTP2RequestHeaders(h http.Header) error { + for _, k := range connHeaders { + if _, ok := h[k]; ok { + return fmt.Errorf("request header %q is not valid in HTTP/2", k) + } + } + te := h["Te"] + if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { + return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) + } + return nil +} + +func new400Handler(err error) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + http.Error(w, err.Error(), http.StatusBadRequest) + } +} + +// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives +// disabled. See comments on h1ServerShutdownChan above for why +// the code is written this way. +func h1ServerKeepAlivesDisabled(hs *http.Server) bool { + var x interface{} = hs + type I interface { + doKeepAlives() bool + } + if hs, ok := x.(I); ok { + return !hs.doKeepAlives() + } + return false +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go new file mode 100644 index 00000000..c0c80d89 --- /dev/null +++ b/vendor/golang.org/x/net/http2/transport.go @@ -0,0 +1,2610 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Transport code. + +package http2 + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "crypto/rand" + "crypto/tls" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "math" + mathrand "math/rand" + "net" + "net/http" + "net/http/httptrace" + "net/textproto" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" + "golang.org/x/net/idna" +) + +const ( + // transportDefaultConnFlow is how many connection-level flow control + // tokens we give the server at start-up, past the default 64k. + transportDefaultConnFlow = 1 << 30 + + // transportDefaultStreamFlow is how many stream-level flow + // control tokens we announce to the peer, and how many bytes + // we buffer per stream. + transportDefaultStreamFlow = 4 << 20 + + // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send + // a stream-level WINDOW_UPDATE for at a time. + transportDefaultStreamMinRefresh = 4 << 10 + + defaultUserAgent = "Go-http-client/2.0" +) + +// Transport is an HTTP/2 Transport. +// +// A Transport internally caches connections to servers. It is safe +// for concurrent use by multiple goroutines. +type Transport struct { + // DialTLS specifies an optional dial function for creating + // TLS connections for requests. + // + // If DialTLS is nil, tls.Dial is used. + // + // If the returned net.Conn has a ConnectionState method like tls.Conn, + // it will be used to set http.Response.TLS. + DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) + + // TLSClientConfig specifies the TLS configuration to use with + // tls.Client. If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // ConnPool optionally specifies an alternate connection pool to use. + // If nil, the default is used. + ConnPool ClientConnPool + + // DisableCompression, if true, prevents the Transport from + // requesting compression with an "Accept-Encoding: gzip" + // request header when the Request contains no existing + // Accept-Encoding value. If the Transport requests gzip on + // its own and gets a gzipped response, it's transparently + // decoded in the Response.Body. However, if the user + // explicitly requested gzip it is not automatically + // uncompressed. + DisableCompression bool + + // AllowHTTP, if true, permits HTTP/2 requests using the insecure, + // plain-text "http" scheme. Note that this does not enable h2c support. + AllowHTTP bool + + // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to + // send in the initial settings frame. It is how many bytes + // of response headers are allowed. Unlike the http2 spec, zero here + // means to use a default limit (currently 10MB). If you actually + // want to advertise an ulimited value to the peer, Transport + // interprets the highest possible value here (0xffffffff or 1<<32-1) + // to mean no limit. + MaxHeaderListSize uint32 + + // StrictMaxConcurrentStreams controls whether the server's + // SETTINGS_MAX_CONCURRENT_STREAMS should be respected + // globally. If false, new TCP connections are created to the + // server as needed to keep each under the per-connection + // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the + // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as + // a global limit and callers of RoundTrip block when needed, + // waiting for their turn. + StrictMaxConcurrentStreams bool + + // t1, if non-nil, is the standard library Transport using + // this transport. Its settings are used (but not its + // RoundTrip method, etc). + t1 *http.Transport + + connPoolOnce sync.Once + connPoolOrDef ClientConnPool // non-nil version of ConnPool +} + +func (t *Transport) maxHeaderListSize() uint32 { + if t.MaxHeaderListSize == 0 { + return 10 << 20 + } + if t.MaxHeaderListSize == 0xffffffff { + return 0 + } + return t.MaxHeaderListSize +} + +func (t *Transport) disableCompression() bool { + return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) +} + +// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. +// It returns an error if t1 has already been HTTP/2-enabled. +func ConfigureTransport(t1 *http.Transport) error { + _, err := configureTransport(t1) + return err +} + +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + +func (t *Transport) connPool() ClientConnPool { + t.connPoolOnce.Do(t.initConnPool) + return t.connPoolOrDef +} + +func (t *Transport) initConnPool() { + if t.ConnPool != nil { + t.connPoolOrDef = t.ConnPool + } else { + t.connPoolOrDef = &clientConnPool{t: t} + } +} + +// ClientConn is the state of a single HTTP/2 client connection to an +// HTTP/2 server. +type ClientConn struct { + t *Transport + tconn net.Conn // usually *tls.Conn, except specialized impls + tlsState *tls.ConnectionState // nil only for specialized impls + reused uint32 // whether conn is being reused; atomic + singleUse bool // whether being used for a single http.Request + + // readLoop goroutine fields: + readerDone chan struct{} // closed on error + readerErr error // set before readerDone is closed + + idleTimeout time.Duration // or 0 for never + idleTimer *time.Timer + + mu sync.Mutex // guards following + cond *sync.Cond // hold mu; broadcast on flow/closed changes + flow flow // our conn-level flow control quota (cs.flow is per stream) + inflow flow // peer's conn-level flow control + closing bool + closed bool + wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back + goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received + goAwayDebug string // goAway frame's debug data, retained as a string + streams map[uint32]*clientStream // client-initiated + nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams + pings map[[8]byte]chan struct{} // in flight ping data to notification channel + bw *bufio.Writer + br *bufio.Reader + fr *Framer + lastActive time.Time + // Settings from peer: (also guarded by mu) + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 + + hbuf bytes.Buffer // HPACK encoder writes into this + henc *hpack.Encoder + freeBuf [][]byte + + wmu sync.Mutex // held while writing; acquire AFTER mu if holding both + werr error // first write error that has occurred +} + +// clientStream is the state for a single HTTP/2 stream. One of these +// is created for each Transport.RoundTrip call. +type clientStream struct { + cc *ClientConn + req *http.Request + trace *httptrace.ClientTrace // or nil + ID uint32 + resc chan resAndError + bufPipe pipe // buffered pipe with the flow-controlled response payload + startedWrite bool // started request body write; guarded by cc.mu + requestedGzip bool + on100 func() // optional code to run if get a 100 continue response + + flow flow // guarded by cc.mu + inflow flow // guarded by cc.mu + bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read + readErr error // sticky read error; owned by transportResponseBody.Read + stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu + didReset bool // whether we sent a RST_STREAM to the server; guarded by cc.mu + + peerReset chan struct{} // closed on peer reset + resetErr error // populated before peerReset is closed + + done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu + + // owned by clientConnReadLoop: + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen + + trailer http.Header // accumulated trailers + resTrailer *http.Header // client's Response.Trailer +} + +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := req.Context() + if req.Cancel == nil && ctx.Done() == nil { + return nil + } + select { + case <-req.Cancel: + return errRequestCanceled + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { + cs.cancelStream() + cs.bufPipe.CloseWithError(err) + } +} + +func (cs *clientStream) cancelStream() { + cc := cs.cc + cc.mu.Lock() + didReset := cs.didReset + cs.didReset = true + cc.mu.Unlock() + + if !didReset { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) + } +} + +// checkResetOrDone reports any error sent in a RST_STREAM frame by the +// server, or errStreamClosed if the stream is complete. +func (cs *clientStream) checkResetOrDone() error { + select { + case <-cs.peerReset: + return cs.resetErr + case <-cs.done: + return errStreamClosed + default: + return nil + } +} + +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + +func (cs *clientStream) abortRequestBodyWrite(err error) { + if err == nil { + panic("nil error") + } + cc := cs.cc + cc.mu.Lock() + cs.stopReqBody = err + cc.cond.Broadcast() + cc.mu.Unlock() +} + +type stickyErrWriter struct { + w io.Writer + err *error +} + +func (sew stickyErrWriter) Write(p []byte) (n int, err error) { + if *sew.err != nil { + return 0, *sew.err + } + n, err = sew.w.Write(p) + *sew.err = err + return +} + +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} + +// RoundTripOpt are options for the Transport.RoundTripOpt method. +type RoundTripOpt struct { + // OnlyCachedConn controls whether RoundTripOpt may + // create a new TCP connection. If set true and + // no cached connection is available, RoundTripOpt + // will return ErrNoCachedConn. + OnlyCachedConn bool +} + +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.RoundTripOpt(req, RoundTripOpt{}) +} + +// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) +// and returns a host:port. The port 443 is added if needed. +func authorityAddr(scheme string, authority string) (addr string) { + host, port, err := net.SplitHostPort(authority) + if err != nil { // authority didn't have a port + port = "443" + if scheme == "http" { + port = "80" + } + host = authority + } + if a, err := idna.ToASCII(host); err == nil { + host = a + } + // IPv6 address literal, without a port: + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") { + return host + ":" + port + } + return net.JoinHostPort(host, port) +} + +// RoundTripOpt is like RoundTrip, but takes options. +func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { + if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { + return nil, errors.New("http2: unsupported scheme") + } + + addr := authorityAddr(req.URL.Scheme, req.URL.Host) + for retry := 0; ; retry++ { + cc, err := t.connPool().GetClientConn(req, addr) + if err != nil { + t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) + return nil, err + } + reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1) + traceGotConn(req, cc, reused) + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-req.Context().Done(): + return nil, req.Context().Err() + } + } + } + if err != nil { + t.vlogf("RoundTrip failure: %v", err) + return nil, err + } + return res, nil + } +} + +// CloseIdleConnections closes any connections which were previously +// connected from previous requests but are now sitting idle. +// It does not interrupt any connections currently in use. +func (t *Transport) CloseIdleConnections() { + if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { + cp.closeIdleConnections() + } +} + +var ( + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") +) + +// shouldRetryRequest is called by RoundTrip when a request fails to get +// response headers. It is always called with a non-nil error. +// It returns either a request to retry (either the same request, or a +// modified clone), or an error if the request can't be replayed. +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { + return nil, err + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || req.Body == http.NoBody { + return req, nil + } + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. + if req.GetBody != nil { + // TODO: consider a req.Body.Close here? or audit that all caller paths do? + body, err := req.GetBody() + if err != nil { + return nil, err + } + newReq := *req + newReq.Body = body + return &newReq, nil + } + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. The "afterBodyWrite" means the + // bodyWrite process has started, which becomes true before + // the first Read. + if !afterBodyWrite { + return req, nil + } + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false +} + +func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) + if err != nil { + return nil, err + } + return t.newClientConn(tconn, singleUse) +} + +func (t *Transport) newTLSConfig(host string) *tls.Config { + cfg := new(tls.Config) + if t.TLSClientConfig != nil { + *cfg = *t.TLSClientConfig.Clone() + } + if !strSliceContains(cfg.NextProtos, NextProtoTLS) { + cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) + } + if cfg.ServerName == "" { + cfg.ServerName = host + } + return cfg +} + +func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { + if t.DialTLS != nil { + return t.DialTLS + } + return t.dialTLSDefault +} + +func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { + cn, err := tls.Dial(network, addr, cfg) + if err != nil { + return nil, err + } + if err := cn.Handshake(); err != nil { + return nil, err + } + if !cfg.InsecureSkipVerify { + if err := cn.VerifyHostname(cfg.ServerName); err != nil { + return nil, err + } + } + state := cn.ConnectionState() + if p := state.NegotiatedProtocol; p != NextProtoTLS { + return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) + } + if !state.NegotiatedProtocolIsMutual { + return nil, errors.New("http2: could not negotiate protocol mutually") + } + return cn, nil +} + +// disableKeepAlives reports whether connections should be closed as +// soon as possible after handling the first request. +func (t *Transport) disableKeepAlives() bool { + return t.t1 != nil && t.t1.DisableKeepAlives +} + +func (t *Transport) expectContinueTimeout() time.Duration { + if t.t1 == nil { + return 0 + } + return t.t1.ExpectContinueTimeout +} + +func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { + return t.newClientConn(c, false) +} + +func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + cc := &ClientConn{ + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), + } + if d := t.idleConnTimeout(); d != 0 { + cc.idleTimeout = d + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) + } + if VerboseLogs { + t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) + } + + cc.cond = sync.NewCond(&cc.mu) + cc.flow.add(int32(initialWindowSize)) + + // TODO: adjust this writer size to account for frame size + + // MTU + crypto/tls record padding. + cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) + cc.br = bufio.NewReader(c) + cc.fr = NewFramer(cc.bw, cc.br) + cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) + cc.fr.MaxHeaderListSize = t.maxHeaderListSize() + + // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on + // henc in response to SETTINGS frames? + cc.henc = hpack.NewEncoder(&cc.hbuf) + + if t.AllowHTTP { + cc.nextStreamID = 3 + } + + if cs, ok := c.(connectionStater); ok { + state := cs.ConnectionState() + cc.tlsState = &state + } + + initialSettings := []Setting{ + {ID: SettingEnablePush, Val: 0}, + {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, + } + if max := t.maxHeaderListSize(); max != 0 { + initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) + } + + cc.bw.Write(clientPreface) + cc.fr.WriteSettings(initialSettings...) + cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) + cc.inflow.add(transportDefaultConnFlow + initialWindowSize) + cc.bw.Flush() + if cc.werr != nil { + return nil, cc.werr + } + + go cc.readLoop() + return cc, nil +} + +func (cc *ClientConn) setGoAway(f *GoAwayFrame) { + cc.mu.Lock() + defer cc.mu.Unlock() + + old := cc.goAway + cc.goAway = f + + // Merge the previous and current GoAway error frames. + if cc.goAwayDebug == "" { + cc.goAwayDebug = string(f.DebugData()) + } + if old != nil && old.ErrCode != ErrCodeNo { + cc.goAway.ErrCode = old.ErrCode + } + last := f.LastStreamID + for streamID, cs := range cc.streams { + if streamID > last { + select { + case cs.resc <- resAndError{err: errClientConnGotGoAway}: + default: + } + } + } +} + +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. +func (cc *ClientConn) CanTakeNewRequest() bool { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.canTakeNewRequestLocked() +} + +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool + freshConn bool // whether it's unused by any previous request +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { + if cc.singleUse && cc.nextStreamID > 1 { + return + } + var maxConcurrentOkay bool + if cc.t.StrictMaxConcurrentStreams { + // We'll tell the caller we can take a new request to + // prevent the caller from dialing a new TCP + // connection, but then we'll block later before + // writing it. + maxConcurrentOkay = true + } else { + maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) + } + + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 + st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest +} + +// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// only be called when we're idle, but because we're coming from a new +// goroutine, there could be a new request coming in at the same time, +// so this simply calls the synchronized closeIfIdle to shut down this +// connection. The timer could just call closeIfIdle, but this is more +// clear. +func (cc *ClientConn) onIdleTimeout() { + cc.closeIfIdle() +} + +func (cc *ClientConn) closeIfIdle() { + cc.mu.Lock() + if len(cc.streams) > 0 { + cc.mu.Unlock() + return + } + cc.closed = true + nextID := cc.nextStreamID + // TODO: do clients send GOAWAY too? maybe? Just Close: + cc.mu.Unlock() + + if VerboseLogs { + cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) + } + cc.tconn.Close() +} + +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully close the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan error, 1) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + done <- cc.tconn.Close() + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case err := <-done: + return err + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.wmu.Lock() + defer cc.wmu.Unlock() + if cc.closing { + // GOAWAY sent already + return nil + } + // Send a graceful shutdown frame to server + maxStreamID := cc.nextStreamID + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + cc.closing = true + return nil +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + err := errors.New("http2: client connection force closed via ClientConn.Close") + for id, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + cs.bufPipe.CloseWithError(err) + delete(cc.streams, id) + } + cc.closed = true + return cc.tconn.Close() +} + +const maxAllocFrameSize = 512 << 10 + +// frameBuffer returns a scratch buffer suitable for writing DATA frames. +// They're capped at the min of the peer's max frame size or 512KB +// (kinda arbitrarily), but definitely capped so we don't allocate 4GB +// bufers. +func (cc *ClientConn) frameScratchBuffer() []byte { + cc.mu.Lock() + size := cc.maxFrameSize + if size > maxAllocFrameSize { + size = maxAllocFrameSize + } + for i, buf := range cc.freeBuf { + if len(buf) >= int(size) { + cc.freeBuf[i] = nil + cc.mu.Unlock() + return buf[:size] + } + } + cc.mu.Unlock() + return make([]byte, size) +} + +func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { + cc.mu.Lock() + defer cc.mu.Unlock() + const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. + if len(cc.freeBuf) < maxBufs { + cc.freeBuf = append(cc.freeBuf, buf) + return + } + for i, old := range cc.freeBuf { + if old == nil { + cc.freeBuf[i] = buf + return + } + } + // forget about it. +} + +// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not +// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. +var errRequestCanceled = errors.New("net/http: request canceled") + +func commaSeparatedTrailers(req *http.Request) (string, error) { + keys := make([]string, 0, len(req.Trailer)) + for k := range req.Trailer { + k = http.CanonicalHeaderKey(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", &badStringError{"invalid Trailer key", k} + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +func (cc *ClientConn) responseHeaderTimeout() time.Duration { + if cc.t.t1 != nil { + return cc.t.t1.ResponseHeaderTimeout + } + // No way to do this (yet?) with just an http2.Transport. Probably + // no need. Request.Cancel this is the new way. We only need to support + // this for compatibility with the old http.Transport fields when + // we're doing transparent http2. + return 0 +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. +// Certain headers are special-cased as okay but not transmitted later. +func checkConnHeaders(req *http.Request) error { + if v := req.Header.Get("Upgrade"); v != "" { + return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) + } + if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) + } + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("http2: invalid Connection request header: %q", vv) + } + return nil +} + +// actualContentLength returns a sanitized version of +// req.ContentLength, where 0 actually means zero (not unknown) and -1 +// means unknown. +func actualContentLength(req *http.Request) int64 { + if req.Body == nil || req.Body == http.NoBody { + return 0 + } + if req.ContentLength != 0 { + return req.ContentLength + } + return -1 +} + +func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { + if err := checkConnHeaders(req); err != nil { + return nil, false, err + } + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + trailers, err := commaSeparatedTrailers(req) + if err != nil { + return nil, false, err + } + hasTrailers := trailers != "" + + cc.mu.Lock() + if err := cc.awaitOpenSlotForRequest(req); err != nil { + cc.mu.Unlock() + return nil, false, err + } + + body := req.Body + contentLen := actualContentLength(req) + hasBody := contentLen != 0 + + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + var requestedGzip bool + if !cc.t.disableCompression() && + req.Header.Get("Accept-Encoding") == "" && + req.Header.Get("Range") == "" && + req.Method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + requestedGzip = true + } + + // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is + // sent by writeRequestBody below, along with any Trailers, + // again in form HEADERS{1}, CONTINUATION{0,}) + hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) + if err != nil { + cc.mu.Unlock() + return nil, false, err + } + + cs := cc.newStream() + cs.req = req + cs.trace = httptrace.ContextClientTrace(req.Context()) + cs.requestedGzip = requestedGzip + bodyWriter := cc.t.getBodyWriterState(cs, body) + cs.on100 = bodyWriter.on100 + + cc.wmu.Lock() + endStream := !hasBody && !hasTrailers + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) + cc.wmu.Unlock() + traceWroteHeaders(cs.trace) + cc.mu.Unlock() + + if werr != nil { + if hasBody { + req.Body.Close() // per RoundTripper contract + bodyWriter.cancel() + } + cc.forgetStreamID(cs.ID) + // Don't bother sending a RST_STREAM (our write already failed; + // no need to keep writing) + traceWroteRequest(cs.trace, werr) + return nil, false, werr + } + + var respHeaderTimer <-chan time.Time + if hasBody { + bodyWriter.scheduleBodyWrite() + } else { + traceWroteRequest(cs.trace, nil) + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + + readLoopResCh := cs.resc + bodyWritten := false + ctx := req.Context() + + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { + res := re.res + if re.err != nil || res.StatusCode > 299 { + // On error or status code 3xx, 4xx, 5xx, etc abort any + // ongoing write, assuming that the server doesn't care + // about our request body. If the server replied with 1xx or + // 2xx, however, then assume the server DOES potentially + // want our body (e.g. full-duplex streaming: + // golang.org/issue/13444). If it turns out the server + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully + // we can keep it. + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWrite) + } + if re.err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), re.err + } + res.Request = req + res.TLS = cc.tlsState + return res, false, nil + } + + for { + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + case <-respHeaderTimer: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: + if !hasBody || bodyWritten { + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + } else { + bodyWriter.cancel() + cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) + } + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled + case <-cs.peerReset: + // processResetStream already removed the + // stream from the streams map; no need for + // forgetStreamID. + return nil, cs.getStartedWrite(), cs.resetErr + case err := <-bodyWriter.resc: + // Prefer the read loop's response, if available. Issue 16102. + select { + case re := <-readLoopResCh: + return handleReadLoopResponse(re) + default: + } + if err != nil { + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), err + } + bodyWritten = true + if d := cc.responseHeaderTimeout(); d != 0 { + timer := time.NewTimer(d) + defer timer.Stop() + respHeaderTimer = timer.C + } + } + } +} + +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + if waitingForConn != nil { + close(waitingForConn) + } + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + +// requires cc.wmu be held +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { + first := true // first frame written (HEADERS is first, then CONTINUATION) + for len(hdrs) > 0 && cc.werr == nil { + chunk := hdrs + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] + } + hdrs = hdrs[len(chunk):] + endHeaders := len(hdrs) == 0 + if first { + cc.fr.WriteHeaders(HeadersFrameParam{ + StreamID: streamID, + BlockFragment: chunk, + EndStream: endStream, + EndHeaders: endHeaders, + }) + first = false + } else { + cc.fr.WriteContinuation(streamID, endHeaders, chunk) + } + } + // TODO(bradfitz): this Flush could potentially block (as + // could the WriteHeaders call(s) above), which means they + // wouldn't respond to Request.Cancel being readable. That's + // rare, but this should probably be in a goroutine. + cc.bw.Flush() + return cc.werr +} + +// internal error values; they don't escape to callers +var ( + // abort request body write; don't send cancel + errStopReqBodyWrite = errors.New("http2: aborting request body write") + + // abort request body write, but send stream reset of cancel. + errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") +) + +func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { + cc := cs.cc + sentEnd := false // whether we sent the final DATA frame w/ END_STREAM + buf := cc.frameScratchBuffer() + defer cc.putFrameScratchBuffer(buf) + + defer func() { + traceWroteRequest(cs.trace, err) + // TODO: write h12Compare test showing whether + // Request.Body is closed by the Transport, + // and in multiple cases: server replies <=299 and >299 + // while still writing request body + cerr := bodyCloser.Close() + if err == nil { + err = cerr + } + }() + + req := cs.req + hasTrailers := req.Trailer != nil + + var sawEOF bool + for !sawEOF { + n, err := body.Read(buf) + if err == io.EOF { + sawEOF = true + err = nil + } else if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) + return err + } + + remain := buf[:n] + for len(remain) > 0 && err == nil { + var allowed int32 + allowed, err = cs.awaitFlowControl(len(remain)) + switch { + case err == errStopReqBodyWrite: + return err + case err == errStopReqBodyWriteAndCancel: + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + return err + case err != nil: + return err + } + cc.wmu.Lock() + data := remain[:allowed] + remain = remain[allowed:] + sentEnd = sawEOF && len(remain) == 0 && !hasTrailers + err = cc.fr.WriteData(cs.ID, sentEnd, data) + if err == nil { + // TODO(bradfitz): this flush is for latency, not bandwidth. + // Most requests won't need this. Make this opt-in or + // opt-out? Use some heuristic on the body type? Nagel-like + // timers? Based on 'n'? Only last chunk of this for loop, + // unless flow control tokens are low? For now, always. + // If we change this, see comment below. + err = cc.bw.Flush() + } + cc.wmu.Unlock() + } + if err != nil { + return err + } + } + + if sentEnd { + // Already sent END_STREAM (which implies we have no + // trailers) and flushed, because currently all + // WriteData frames above get a flush. So we're done. + return nil + } + + var trls []byte + if hasTrailers { + cc.mu.Lock() + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } + } + + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + // Two ways to send END_STREAM: either with trailers, or + // with an empty DATA frame. + if len(trls) > 0 { + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) + } else { + err = cc.fr.WriteData(cs.ID, true, nil) + } + if ferr := cc.bw.Flush(); ferr != nil && err == nil { + err = ferr + } + return err +} + +// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow +// control tokens from the server. +// It returns either the non-zero number of tokens taken or an error +// if the stream is dead. +func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if cc.closed { + return 0, errClientConnClosed + } + if cs.stopReqBody != nil { + return 0, cs.stopReqBody + } + if err := cs.checkResetOrDone(); err != nil { + return 0, err + } + if a := cs.flow.available(); a > 0 { + take := a + if int(take) > maxBytes { + + take = int32(maxBytes) // can't truncate int; take is int32 + } + if take > int32(cc.maxFrameSize) { + take = int32(cc.maxFrameSize) + } + cs.flow.take(take) + return take, nil + } + cc.cond.Wait() + } +} + +type badStringError struct { + what string + str string +} + +func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } + +// requires cc.mu be held. +func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { + cc.hbuf.Reset() + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return nil, err + } + + var path string + if req.Method != "CONNECT" { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return nil, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + for k, vv := range req.Header { + if !httpguts.ValidHeaderFieldName(k) { + return nil, fmt.Errorf("invalid HTTP header name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) + } + } + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = http.MethodGet + } + f(":method", m) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + trace := httptrace.ContextClientTrace(req.Context()) + traceHeaders := traceHasWroteHeaderField(trace) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name = strings.ToLower(name) + cc.writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } + }) + + return cc.hbuf.Bytes(), nil +} + +// shouldSendReqContentLength reports whether the http2.Transport should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// requires cc.mu be held. +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { + cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + + for k, vv := range req.Trailer { + // Transfer-Encoding, etc.. have already been filtered at the + // start of RoundTrip + lowKey := strings.ToLower(k) + for _, v := range vv { + cc.writeHeader(lowKey, v) + } + } + return cc.hbuf.Bytes(), nil +} + +func (cc *ClientConn) writeHeader(name, value string) { + if VerboseLogs { + log.Printf("http2: Transport encoding header %q = %q", name, value) + } + cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) +} + +type resAndError struct { + res *http.Response + err error +} + +// requires cc.mu be held. +func (cc *ClientConn) newStream() *clientStream { + cs := &clientStream{ + cc: cc, + ID: cc.nextStreamID, + resc: make(chan resAndError, 1), + peerReset: make(chan struct{}), + done: make(chan struct{}), + } + cs.flow.add(int32(cc.initialWindowSize)) + cs.flow.setConnFlow(&cc.flow) + cs.inflow.add(transportDefaultStreamFlow) + cs.inflow.setConnFlow(&cc.inflow) + cc.nextStreamID += 2 + cc.streams[cs.ID] = cs + return cs +} + +func (cc *ClientConn) forgetStreamID(id uint32) { + cc.streamByID(id, true) +} + +func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { + cc.mu.Lock() + defer cc.mu.Unlock() + cs := cc.streams[id] + if andRemove && cs != nil && !cc.closed { + cc.lastActive = time.Now() + delete(cc.streams, id) + if len(cc.streams) == 0 && cc.idleTimer != nil { + cc.idleTimer.Reset(cc.idleTimeout) + } + close(cs.done) + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() + } + return cs +} + +// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. +type clientConnReadLoop struct { + cc *ClientConn + closeWhenIdle bool +} + +// readLoop runs in its own goroutine and reads and dispatches frames. +func (cc *ClientConn) readLoop() { + rl := &clientConnReadLoop{cc: cc} + defer rl.cleanup() + cc.readerErr = rl.run() + if ce, ok := cc.readerErr.(ConnectionError); ok { + cc.wmu.Lock() + cc.fr.WriteGoAway(0, ErrCode(ce), nil) + cc.wmu.Unlock() + } +} + +// GoAwayError is returned by the Transport when the server closes the +// TCP connection after sending a GOAWAY frame. +type GoAwayError struct { + LastStreamID uint32 + ErrCode ErrCode + DebugData string +} + +func (e GoAwayError) Error() string { + return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", + e.LastStreamID, e.ErrCode, e.DebugData) +} + +func isEOFOrNetReadError(err error) bool { + if err == io.EOF { + return true + } + ne, ok := err.(*net.OpError) + return ok && ne.Op == "read" +} + +func (rl *clientConnReadLoop) cleanup() { + cc := rl.cc + defer cc.tconn.Close() + defer cc.t.connPool().MarkDead(cc) + defer close(cc.readerDone) + + if cc.idleTimer != nil { + cc.idleTimer.Stop() + } + + // Close any response bodies if the server closes prematurely. + // TODO: also do this if we've written the headers but not + // gotten a response yet. + err := cc.readerErr + cc.mu.Lock() + if cc.goAway != nil && isEOFOrNetReadError(err) { + err = GoAwayError{ + LastStreamID: cc.goAway.LastStreamID, + ErrCode: cc.goAway.ErrCode, + DebugData: cc.goAwayDebug, + } + } else if err == io.EOF { + err = io.ErrUnexpectedEOF + } + for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed + select { + case cs.resc <- resAndError{err: err}: + default: + } + close(cs.done) + } + cc.closed = true + cc.cond.Broadcast() + cc.mu.Unlock() +} + +func (rl *clientConnReadLoop) run() error { + cc := rl.cc + rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse + gotReply := false // ever saw a HEADERS reply + gotSettings := false + for { + f, err := cc.fr.ReadFrame() + if err != nil { + cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) + } + if se, ok := err.(StreamError); ok { + if cs := cc.streamByID(se.StreamID, false); cs != nil { + cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) + if se.Cause == nil { + se.Cause = cc.fr.errDetail + } + rl.endStreamError(cs, se) + } + continue + } else if err != nil { + return err + } + if VerboseLogs { + cc.vlogf("http2: Transport received %s", summarizeFrame(f)) + } + if !gotSettings { + if _, ok := f.(*SettingsFrame); !ok { + cc.logf("protocol error: received %T before a SETTINGS frame", f) + return ConnectionError(ErrCodeProtocol) + } + gotSettings = true + } + maybeIdle := false // whether frame might transition us to idle + + switch f := f.(type) { + case *MetaHeadersFrame: + err = rl.processHeaders(f) + maybeIdle = true + gotReply = true + case *DataFrame: + err = rl.processData(f) + maybeIdle = true + case *GoAwayFrame: + err = rl.processGoAway(f) + maybeIdle = true + case *RSTStreamFrame: + err = rl.processResetStream(f) + maybeIdle = true + case *SettingsFrame: + err = rl.processSettings(f) + case *PushPromiseFrame: + err = rl.processPushPromise(f) + case *WindowUpdateFrame: + err = rl.processWindowUpdate(f) + case *PingFrame: + err = rl.processPing(f) + default: + cc.logf("Transport: unhandled response frame type %T", f) + } + if err != nil { + if VerboseLogs { + cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) + } + return err + } + if rl.closeWhenIdle && gotReply && maybeIdle { + cc.closeIfIdle() + } + } +} + +func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if cs == nil { + // We'd get here if we canceled a request while the + // server had its response still in flight. So if this + // was just something we canceled, ignore it. + return nil + } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } + if !cs.firstByte { + if cs.trace != nil { + // TODO(bradfitz): move first response byte earlier, + // when we first read the 9 byte header, not waiting + // until all the HEADERS+CONTINUATION frames have been + // merged. This works for now. + traceFirstResponseByte(cs.trace) + } + cs.firstByte = true + } + if !cs.pastHeaders { + cs.pastHeaders = true + } else { + return rl.processTrailers(cs, f) + } + + res, err := rl.handleResponse(cs, f) + if err != nil { + if _, ok := err.(ConnectionError); ok { + return err + } + // Any other error type is a stream error. + cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) + cs.resc <- resAndError{err: err} + return nil // return nil from process* funcs to keep conn alive + } + if res == nil { + // (nil, nil) special case. See handleResponse docs. + return nil + } + cs.resTrailer = &res.Trailer + cs.resc <- resAndError{res: res} + return nil +} + +// may return error types nil, or ConnectionError. Any other error value +// is a StreamError of type ErrCodeProtocol. The returned error in that case +// is the detail. +// +// As a special case, handleResponse may return (nil, nil) to skip the +// frame (currently only used for 1xx responses). +func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { + if f.Truncated { + return nil, errResponseHeaderListSize + } + + status := f.PseudoValue("status") + if status == "" { + return nil, errors.New("malformed response from server: missing status pseudo header") + } + statusCode, err := strconv.Atoi(status) + if err != nil { + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") + } + + header := make(http.Header) + res := &http.Response{ + Proto: "HTTP/2.0", + ProtoMajor: 2, + Header: header, + StatusCode: statusCode, + Status: status + " " + http.StatusText(statusCode), + } + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + if key == "Trailer" { + t := res.Trailer + if t == nil { + t = make(http.Header) + res.Trailer = t + } + foreachHeaderElement(hf.Value, func(v string) { + t[http.CanonicalHeaderKey(v)] = nil + }) + } else { + header[key] = append(header[key], hf.Value) + } + } + + if statusCode >= 100 && statusCode <= 199 { + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + + streamEnded := f.StreamEnded() + isHead := cs.req.Method == "HEAD" + if !streamEnded || isHead { + res.ContentLength = -1 + if clens := res.Header["Content-Length"]; len(clens) == 1 { + if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { + res.ContentLength = clen64 + } else { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } else if len(clens) > 1 { + // TODO: care? unlike http/1, it won't mess up our framing, so it's + // more safe smuggling-wise to ignore. + } + } + + if streamEnded || isHead { + res.Body = noBody + return res, nil + } + + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} + cs.bytesRemain = res.ContentLength + res.Body = transportResponseBody{cs} + go cs.awaitRequestCancel(cs.req) + + if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { + res.Header.Del("Content-Encoding") + res.Header.Del("Content-Length") + res.ContentLength = -1 + res.Body = &gzipReader{body: res.Body} + res.Uncompressed = true + } + return res, nil +} + +func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { + if cs.pastTrailers { + // Too many HEADERS frames for this stream. + return ConnectionError(ErrCodeProtocol) + } + cs.pastTrailers = true + if !f.StreamEnded() { + // We expect that any headers for trailers also + // has END_STREAM. + return ConnectionError(ErrCodeProtocol) + } + if len(f.PseudoFields()) > 0 { + // No pseudo header fields are defined for trailers. + // TODO: ConnectionError might be overly harsh? Check. + return ConnectionError(ErrCodeProtocol) + } + + trailer := make(http.Header) + for _, hf := range f.RegularFields() { + key := http.CanonicalHeaderKey(hf.Name) + trailer[key] = append(trailer[key], hf.Value) + } + cs.trailer = trailer + + rl.endStream(cs) + return nil +} + +// transportResponseBody is the concrete type of Transport.RoundTrip's +// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. +// On Close it sends RST_STREAM if EOF wasn't already seen. +type transportResponseBody struct { + cs *clientStream +} + +func (b transportResponseBody) Read(p []byte) (n int, err error) { + cs := b.cs + cc := cs.cc + + if cs.readErr != nil { + return 0, cs.readErr + } + n, err = b.cs.bufPipe.Read(p) + if cs.bytesRemain != -1 { + if int64(n) > cs.bytesRemain { + n = int(cs.bytesRemain) + if err == nil { + err = errors.New("net/http: server replied with more than declared Content-Length; truncated") + cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) + } + cs.readErr = err + return int(cs.bytesRemain), err + } + cs.bytesRemain -= int64(n) + if err == io.EOF && cs.bytesRemain > 0 { + err = io.ErrUnexpectedEOF + cs.readErr = err + return n, err + } + } + if n == 0 { + // No flow control tokens to send back. + return + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + var connAdd, streamAdd int32 + // Check the conn-level first, before the stream-level. + if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { + connAdd = transportDefaultConnFlow - v + cc.inflow.add(connAdd) + } + if err == nil { // No need to refresh if the stream is over or failed. + // Consider any buffered body data (read from the conn but not + // consumed by the client) when computing flow control for this + // stream. + v := int(cs.inflow.available()) + cs.bufPipe.Len() + if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { + streamAdd = int32(transportDefaultStreamFlow - v) + cs.inflow.add(streamAdd) + } + } + if connAdd != 0 || streamAdd != 0 { + cc.wmu.Lock() + defer cc.wmu.Unlock() + if connAdd != 0 { + cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) + } + if streamAdd != 0 { + cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) + } + cc.bw.Flush() + } + return +} + +var errClosedResponseBody = errors.New("http2: response body closed") + +func (b transportResponseBody) Close() error { + cs := b.cs + cc := cs.cc + + serverSentStreamEnd := cs.bufPipe.Err() == io.EOF + unread := cs.bufPipe.Len() + + if unread > 0 || !serverSentStreamEnd { + cc.mu.Lock() + cc.wmu.Lock() + if !serverSentStreamEnd { + cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true + } + // Return connection-level flow control. + if unread > 0 { + cc.inflow.add(int32(unread)) + cc.fr.WriteWindowUpdate(0, uint32(unread)) + } + cc.bw.Flush() + cc.wmu.Unlock() + cc.mu.Unlock() + } + + cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) + return nil +} + +func (rl *clientConnReadLoop) processData(f *DataFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, f.StreamEnded()) + data := f.Data() + if cs == nil { + cc.mu.Lock() + neverSent := cc.nextStreamID + cc.mu.Unlock() + if f.StreamID >= neverSent { + // We never asked for this. + cc.logf("http2: Transport received unsolicited DATA frame; closing connection") + return ConnectionError(ErrCodeProtocol) + } + // We probably did ask for this, but canceled. Just ignore it. + // TODO: be stricter here? only silently ignore things which + // we canceled, but not things which were closed normally + // by the peer? Tough without accumulating too much state. + + // But at least return their flow control: + if f.Length > 0 { + cc.mu.Lock() + cc.inflow.add(int32(f.Length)) + cc.mu.Unlock() + + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(f.Length)) + cc.bw.Flush() + cc.wmu.Unlock() + } + return nil + } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + if f.Length > 0 { + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } + // Check connection-level flow control. + cc.mu.Lock() + if cs.inflow.available() >= int32(f.Length) { + cs.inflow.take(int32(f.Length)) + } else { + cc.mu.Unlock() + return ConnectionError(ErrCodeFlowControl) + } + // Return any padded flow control now, since we won't + // refund it later on body reads. + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) + cc.wmu.Lock() + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } + cc.bw.Flush() + cc.wmu.Unlock() + } + cc.mu.Unlock() + + if len(data) > 0 && !didReset { + if _, err := cs.bufPipe.Write(data); err != nil { + rl.endStreamError(cs, err) + return err + } + } + } + + if f.StreamEnded() { + rl.endStream(cs) + } + return nil +} + +var errInvalidTrailers = errors.New("http2: invalid trailers") + +func (rl *clientConnReadLoop) endStream(cs *clientStream) { + // TODO: check that any declared content-length matches, like + // server.go's (*stream).endStream method. + rl.endStreamError(cs, nil) +} + +func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { + var code func() + if err == nil { + err = io.EOF + code = cs.copyTrailers + } + if isConnectionCloseRequest(cs.req) { + rl.closeWhenIdle = true + } + cs.bufPipe.closeWithErrorAndCode(err, code) + + select { + case cs.resc <- resAndError{err: err}: + default: + } +} + +func (cs *clientStream) copyTrailers() { + for k, vv := range cs.trailer { + t := cs.resTrailer + if *t == nil { + *t = make(http.Header) + } + (*t)[k] = vv + } +} + +func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { + cc := rl.cc + cc.t.connPool().MarkDead(cc) + if f.ErrCode != 0 { + // TODO: deal with GOAWAY more. particularly the error code + cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) + } + cc.setGoAway(f) + return nil +} + +func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + + if f.IsAck() { + if cc.wantSettingsAck { + cc.wantSettingsAck = false + return nil + } + return ConnectionError(ErrCodeProtocol) + } + + err := f.ForeachSetting(func(s Setting) error { + switch s.ID { + case SettingMaxFrameSize: + cc.maxFrameSize = s.Val + case SettingMaxConcurrentStreams: + cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) + case SettingInitialWindowSize: + // Values above the maximum flow-control + // window size of 2^31-1 MUST be treated as a + // connection error (Section 5.4.1) of type + // FLOW_CONTROL_ERROR. + if s.Val > math.MaxInt32 { + return ConnectionError(ErrCodeFlowControl) + } + + // Adjust flow control of currently-open + // frames by the difference of the old initial + // window size and this one. + delta := int32(s.Val) - int32(cc.initialWindowSize) + for _, cs := range cc.streams { + cs.flow.add(delta) + } + cc.cond.Broadcast() + + cc.initialWindowSize = s.Val + default: + // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. + cc.vlogf("Unhandled Setting: %v", s) + } + return nil + }) + if err != nil { + return err + } + + cc.wmu.Lock() + defer cc.wmu.Unlock() + + cc.fr.WriteSettingsAck() + cc.bw.Flush() + return cc.werr +} + +func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { + cc := rl.cc + cs := cc.streamByID(f.StreamID, false) + if f.StreamID != 0 && cs == nil { + return nil + } + + cc.mu.Lock() + defer cc.mu.Unlock() + + fl := &cc.flow + if cs != nil { + fl = &cs.flow + } + if !fl.add(int32(f.Increment)) { + return ConnectionError(ErrCodeFlowControl) + } + cc.cond.Broadcast() + return nil +} + +func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { + cs := rl.cc.streamByID(f.StreamID, true) + if cs == nil { + // TODO: return error if server tries to RST_STEAM an idle stream + return nil + } + select { + case <-cs.peerReset: + // Already reset. + // This is the only goroutine + // which closes this, so there + // isn't a race. + default: + err := streamError(cs.ID, f.ErrCode) + cs.resetErr = err + close(cs.peerReset) + cs.bufPipe.CloseWithError(err) + cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + } + return nil +} + +// Ping sends a PING frame to the server and waits for the ack. +func (cc *ClientConn) Ping(ctx context.Context) error { + c := make(chan struct{}) + // Generate a random payload + var p [8]byte + for { + if _, err := rand.Read(p[:]); err != nil { + return err + } + cc.mu.Lock() + // check for dup before insert + if _, found := cc.pings[p]; !found { + cc.pings[p] = c + cc.mu.Unlock() + break + } + cc.mu.Unlock() + } + cc.wmu.Lock() + if err := cc.fr.WritePing(false, p); err != nil { + cc.wmu.Unlock() + return err + } + if err := cc.bw.Flush(); err != nil { + cc.wmu.Unlock() + return err + } + cc.wmu.Unlock() + select { + case <-c: + return nil + case <-ctx.Done(): + return ctx.Err() + case <-cc.readerDone: + // connection closed + return cc.readerErr + } +} + +func (rl *clientConnReadLoop) processPing(f *PingFrame) error { + if f.IsAck() { + cc := rl.cc + cc.mu.Lock() + defer cc.mu.Unlock() + // If ack, notify listener if any + if c, ok := cc.pings[f.Data]; ok { + close(c) + delete(cc.pings, f.Data) + } + return nil + } + cc := rl.cc + cc.wmu.Lock() + defer cc.wmu.Unlock() + if err := cc.fr.WritePing(true, f.Data); err != nil { + return err + } + return cc.bw.Flush() +} + +func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { + // We told the peer we don't want them. + // Spec says: + // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH + // setting of the peer endpoint is set to 0. An endpoint that + // has set this setting and has received acknowledgement MUST + // treat the receipt of a PUSH_PROMISE frame as a connection + // error (Section 5.4.1) of type PROTOCOL_ERROR." + return ConnectionError(ErrCodeProtocol) +} + +func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { + // TODO: map err to more interesting error codes, once the + // HTTP community comes up with some. But currently for + // RST_STREAM there's no equivalent to GOAWAY frame's debug + // data, and the error codes are all pretty vague ("cancel"). + cc.wmu.Lock() + cc.fr.WriteRSTStream(streamID, code) + cc.bw.Flush() + cc.wmu.Unlock() +} + +var ( + errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") +) + +func (cc *ClientConn) logf(format string, args ...interface{}) { + cc.t.logf(format, args...) +} + +func (cc *ClientConn) vlogf(format string, args ...interface{}) { + cc.t.vlogf(format, args...) +} + +func (t *Transport) vlogf(format string, args ...interface{}) { + if VerboseLogs { + t.logf(format, args...) + } +} + +func (t *Transport) logf(format string, args ...interface{}) { + log.Printf(format, args...) +} + +var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) + +func strSliceContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +type erringRoundTripper struct{ err error } + +func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } + +// gzipReader wraps a response body so it can lazily +// call gzip.NewReader on the first call to Read +type gzipReader struct { + body io.ReadCloser // underlying Response.Body + zr *gzip.Reader // lazily-initialized gzip reader + zerr error // sticky error +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + if gz.zerr != nil { + return 0, gz.zerr + } + if gz.zr == nil { + gz.zr, err = gzip.NewReader(gz.body) + if err != nil { + gz.zerr = err + return 0, err + } + } + return gz.zr.Read(p) +} + +func (gz *gzipReader) Close() error { + return gz.body.Close() +} + +type errorReader struct{ err error } + +func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } + +// bodyWriterState encapsulates various state around the Transport's writing +// of the request body, particularly regarding doing delayed writes of the body +// when the request contains "Expect: 100-continue". +type bodyWriterState struct { + cs *clientStream + timer *time.Timer // if non-nil, we're doing a delayed write + fnonce *sync.Once // to call fn with + fn func() // the code to run in the goroutine, writing the body + resc chan error // result of fn's execution + delay time.Duration // how long we should delay a delayed write for +} + +func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { + s.cs = cs + if body == nil { + return + } + resc := make(chan error, 1) + s.resc = resc + s.fn = func() { + cs.cc.mu.Lock() + cs.startedWrite = true + cs.cc.mu.Unlock() + resc <- cs.writeRequestBody(body, cs.req.Body) + } + s.delay = t.expectContinueTimeout() + if s.delay == 0 || + !httpguts.HeaderValuesContainsToken( + cs.req.Header["Expect"], + "100-continue") { + return + } + s.fnonce = new(sync.Once) + + // Arm the timer with a very large duration, which we'll + // intentionally lower later. It has to be large now because + // we need a handle to it before writing the headers, but the + // s.delay value is defined to not start until after the + // request headers were written. + const hugeDuration = 365 * 24 * time.Hour + s.timer = time.AfterFunc(hugeDuration, func() { + s.fnonce.Do(s.fn) + }) + return +} + +func (s bodyWriterState) cancel() { + if s.timer != nil { + s.timer.Stop() + } +} + +func (s bodyWriterState) on100() { + if s.timer == nil { + // If we didn't do a delayed write, ignore the server's + // bogus 100 continue response. + return + } + s.timer.Stop() + go func() { s.fnonce.Do(s.fn) }() +} + +// scheduleBodyWrite starts writing the body, either immediately (in +// the common case) or after the delay timeout. It should not be +// called until after the headers have been written. +func (s bodyWriterState) scheduleBodyWrite() { + if s.timer == nil { + // We're not doing a delayed write (see + // getBodyWriterState), so just start the writing + // goroutine immediately. + go s.fn() + return + } + traceWait100Continue(s.cs.trace) + if s.timer.Stop() { + s.timer.Reset(s.delay) + } +} + +// isConnectionCloseRequest reports whether req should use its own +// connection for a single request and then close the connection. +func isConnectionCloseRequest(req *http.Request) bool { + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// converting panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.Transport.RoundTrip(req) + if isNoCachedConnError(err) { + return nil, http.ErrSkipAltProtocol + } + return res, err +} + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + +func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + ci.Reused = reused + cc.mu.Lock() + ci.WasIdle = len(cc.streams) == 0 && reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } +} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go new file mode 100644 index 00000000..3849bc26 --- /dev/null +++ b/vendor/golang.org/x/net/http2/write.go @@ -0,0 +1,365 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/url" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +// writeFramer is implemented by any type that is used to write frames. +type writeFramer interface { + writeFrame(writeContext) error + + // staysWithinBuffer reports whether this writer promises that + // it will only write less than or equal to size bytes, and it + // won't Flush the write context. + staysWithinBuffer(size int) bool +} + +// writeContext is the interface needed by the various frame writer +// types below. All the writeFrame methods below are scheduled via the +// frame writing scheduler (see writeScheduler in writesched.go). +// +// This interface is implemented by *serverConn. +// +// TODO: decide whether to a) use this in the client code (which didn't +// end up using this yet, because it has a simpler design, not +// currently implementing priorities), or b) delete this and +// make the server code a bit more concrete. +type writeContext interface { + Framer() *Framer + Flush() error + CloseConn() error + // HeaderEncoder returns an HPACK encoder that writes to the + // returned buffer. + HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) +} + +// writeEndsStream reports whether w writes a frame that will transition +// the stream to a half-closed local state. This returns false for RST_STREAM, +// which closes the entire stream (not just the local half). +func writeEndsStream(w writeFramer) bool { + switch v := w.(type) { + case *writeData: + return v.endStream + case *writeResHeaders: + return v.endStream + case nil: + // This can only happen if the caller reuses w after it's + // been intentionally nil'ed out to prevent use. Keep this + // here to catch future refactoring breaking it. + panic("writeEndsStream called on nil writeFramer") + } + return false +} + +type flushFrameWriter struct{} + +func (flushFrameWriter) writeFrame(ctx writeContext) error { + return ctx.Flush() +} + +func (flushFrameWriter) staysWithinBuffer(max int) bool { return false } + +type writeSettings []Setting + +func (s writeSettings) staysWithinBuffer(max int) bool { + const settingSize = 6 // uint16 + uint32 + return frameHeaderLen+settingSize*len(s) <= max + +} + +func (s writeSettings) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettings([]Setting(s)...) +} + +type writeGoAway struct { + maxStreamID uint32 + code ErrCode +} + +func (p *writeGoAway) writeFrame(ctx writeContext) error { + err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) + ctx.Flush() // ignore error: we're hanging up on them anyway + return err +} + +func (*writeGoAway) staysWithinBuffer(max int) bool { return false } // flushes + +type writeData struct { + streamID uint32 + p []byte + endStream bool +} + +func (w *writeData) String() string { + return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) +} + +func (w *writeData) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) +} + +func (w *writeData) staysWithinBuffer(max int) bool { + return frameHeaderLen+len(w.p) <= max +} + +// handlerPanicRST is the message sent from handler goroutines when +// the handler panics. +type handlerPanicRST struct { + StreamID uint32 +} + +func (hp handlerPanicRST) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) +} + +func (hp handlerPanicRST) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (se StreamError) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) +} + +func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +type writePingAck struct{ pf *PingFrame } + +func (w writePingAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(true, w.pf.Data) +} + +func (w writePingAck) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.pf.Data) <= max } + +type writeSettingsAck struct{} + +func (writeSettingsAck) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteSettingsAck() +} + +func (writeSettingsAck) staysWithinBuffer(max int) bool { return frameHeaderLen <= max } + +// splitHeaderBlock splits headerBlock into fragments so that each fragment fits +// in a single frame, then calls fn for each fragment. firstFrag/lastFrag are true +// for the first/last fragment, respectively. +func splitHeaderBlock(ctx writeContext, headerBlock []byte, fn func(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error) error { + // For now we're lazy and just pick the minimum MAX_FRAME_SIZE + // that all peers must support (16KB). Later we could care + // more and send larger frames if the peer advertised it, but + // there's little point. Most headers are small anyway (so we + // generally won't have CONTINUATION frames), and extra frames + // only waste 9 bytes anyway. + const maxFrameSize = 16384 + + first := true + for len(headerBlock) > 0 { + frag := headerBlock + if len(frag) > maxFrameSize { + frag = frag[:maxFrameSize] + } + headerBlock = headerBlock[len(frag):] + if err := fn(ctx, frag, first, len(headerBlock) == 0); err != nil { + return err + } + first = false + } + return nil +} + +// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames +// for HTTP response headers or trailers from a server handler. +type writeResHeaders struct { + streamID uint32 + httpResCode int // 0 means no ":status" line + h http.Header // may be nil + trailers []string // if non-nil, which keys of h to write. nil means all. + endStream bool + + date string + contentType string + contentLength string +} + +func encKV(enc *hpack.Encoder, k, v string) { + if VerboseLogs { + log.Printf("http2: server encoding header %q = %q", k, v) + } + enc.WriteField(hpack.HeaderField{Name: k, Value: v}) +} + +func (w *writeResHeaders) staysWithinBuffer(max int) bool { + // TODO: this is a common one. It'd be nice to return true + // here and get into the fast path if we could be clever and + // calculate the size fast enough, or at least a conservative + // upper bound that usually fires. (Maybe if w.h and + // w.trailers are nil, so we don't need to enumerate it.) + // Otherwise I'm afraid that just calculating the length to + // answer this question would be slower than the ~2µs benefit. + return false +} + +func (w *writeResHeaders) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + if w.httpResCode != 0 { + encKV(enc, ":status", httpCodeString(w.httpResCode)) + } + + encodeHeaders(enc, w.h, w.trailers) + + if w.contentType != "" { + encKV(enc, "content-type", w.contentType) + } + if w.contentLength != "" { + encKV(enc, "content-length", w.contentLength) + } + if w.date != "" { + encKV(enc, "date", w.date) + } + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 && w.trailers == nil { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writeResHeaders) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: frag, + EndStream: w.endStream, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +// writePushPromise is a request to write a PUSH_PROMISE and 0+ CONTINUATION frames. +type writePushPromise struct { + streamID uint32 // pusher stream + method string // for :method + url *url.URL // for :scheme, :authority, :path + h http.Header + + // Creates an ID for a pushed stream. This runs on serveG just before + // the frame is written. The returned ID is copied to promisedID. + allocatePromisedID func() (uint32, error) + promisedID uint32 +} + +func (w *writePushPromise) staysWithinBuffer(max int) bool { + // TODO: see writeResHeaders.staysWithinBuffer + return false +} + +func (w *writePushPromise) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + + encKV(enc, ":method", w.method) + encKV(enc, ":scheme", w.url.Scheme) + encKV(enc, ":authority", w.url.Host) + encKV(enc, ":path", w.url.RequestURI()) + encodeHeaders(enc, w.h, nil) + + headerBlock := buf.Bytes() + if len(headerBlock) == 0 { + panic("unexpected empty hpack") + } + + return splitHeaderBlock(ctx, headerBlock, w.writeHeaderBlock) +} + +func (w *writePushPromise) writeHeaderBlock(ctx writeContext, frag []byte, firstFrag, lastFrag bool) error { + if firstFrag { + return ctx.Framer().WritePushPromise(PushPromiseParam{ + StreamID: w.streamID, + PromiseID: w.promisedID, + BlockFragment: frag, + EndHeaders: lastFrag, + }) + } else { + return ctx.Framer().WriteContinuation(w.streamID, lastFrag, frag) + } +} + +type write100ContinueHeadersFrame struct { + streamID uint32 +} + +func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { + enc, buf := ctx.HeaderEncoder() + buf.Reset() + encKV(enc, ":status", "100") + return ctx.Framer().WriteHeaders(HeadersFrameParam{ + StreamID: w.streamID, + BlockFragment: buf.Bytes(), + EndStream: false, + EndHeaders: true, + }) +} + +func (w write100ContinueHeadersFrame) staysWithinBuffer(max int) bool { + // Sloppy but conservative: + return 9+2*(len(":status")+len("100")) <= max +} + +type writeWindowUpdate struct { + streamID uint32 // or 0 for conn-level + n uint32 +} + +func (wu writeWindowUpdate) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } + +func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { + return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) +} + +// encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) +// is encoded only if k is in keys. +func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { + if keys == nil { + sorter := sorterPool.Get().(*sorter) + // Using defer here, since the returned keys from the + // sorter.Keys method is only valid until the sorter + // is returned: + defer sorterPool.Put(sorter) + keys = sorter.Keys(h) + } + for _, k := range keys { + vv := h[k] + k = lowerHeader(k) + if !validWireHeaderFieldName(k) { + // Skip it as backup paranoia. Per + // golang.org/issue/14048, these should + // already be rejected at a higher level. + continue + } + isTE := k == "transfer-encoding" + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // TODO: return an error? golang.org/issue/14048 + // For now just omit it. + continue + } + // TODO: more of "8.1.2.2 Connection-Specific Header Fields" + if isTE && v != "trailers" { + continue + } + encKV(enc, k, v) + } + } +} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go new file mode 100644 index 00000000..4fe30730 --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -0,0 +1,242 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "fmt" + +// WriteScheduler is the interface implemented by HTTP/2 write schedulers. +// Methods are never called concurrently. +type WriteScheduler interface { + // OpenStream opens a new stream in the write scheduler. + // It is illegal to call this with streamID=0 or with a streamID that is + // already open -- the call may panic. + OpenStream(streamID uint32, options OpenStreamOptions) + + // CloseStream closes a stream in the write scheduler. Any frames queued on + // this stream should be discarded. It is illegal to call this on a stream + // that is not open -- the call may panic. + CloseStream(streamID uint32) + + // AdjustStream adjusts the priority of the given stream. This may be called + // on a stream that has not yet been opened or has been closed. Note that + // RFC 7540 allows PRIORITY frames to be sent on streams in any state. See: + // https://tools.ietf.org/html/rfc7540#section-5.1 + AdjustStream(streamID uint32, priority PriorityParam) + + // Push queues a frame in the scheduler. In most cases, this will not be + // called with wr.StreamID()!=0 unless that stream is currently open. The one + // exception is RST_STREAM frames, which may be sent on idle or closed streams. + Push(wr FrameWriteRequest) + + // Pop dequeues the next frame to write. Returns false if no frames can + // be written. Frames with a given wr.StreamID() are Pop'd in the same + // order they are Push'd. + Pop() (wr FrameWriteRequest, ok bool) +} + +// OpenStreamOptions specifies extra options for WriteScheduler.OpenStream. +type OpenStreamOptions struct { + // PusherID is zero if the stream was initiated by the client. Otherwise, + // PusherID names the stream that pushed the newly opened stream. + PusherID uint32 +} + +// FrameWriteRequest is a request to write a frame. +type FrameWriteRequest struct { + // write is the interface value that does the writing, once the + // WriteScheduler has selected this frame to write. The write + // functions are all defined in write.go. + write writeFramer + + // stream is the stream on which this frame will be written. + // nil for non-stream frames like PING and SETTINGS. + stream *stream + + // done, if non-nil, must be a buffered channel with space for + // 1 message and is sent the return value from write (or an + // earlier error) when the frame has been written. + done chan error +} + +// StreamID returns the id of the stream this frame will be written to. +// 0 is used for non-stream frames such as PING and SETTINGS. +func (wr FrameWriteRequest) StreamID() uint32 { + if wr.stream == nil { + if se, ok := wr.write.(StreamError); ok { + // (*serverConn).resetStream doesn't set + // stream because it doesn't necessarily have + // one. So special case this type of write + // message. + return se.StreamID + } + return 0 + } + return wr.stream.id +} + +// DataSize returns the number of flow control bytes that must be consumed +// to write this entire frame. This is 0 for non-DATA frames. +func (wr FrameWriteRequest) DataSize() int { + if wd, ok := wr.write.(*writeData); ok { + return len(wd.p) + } + return 0 +} + +// Consume consumes min(n, available) bytes from this frame, where available +// is the number of flow control bytes available on the stream. Consume returns +// 0, 1, or 2 frames, where the integer return value gives the number of frames +// returned. +// +// If flow control prevents consuming any bytes, this returns (_, _, 0). If +// the entire frame was consumed, this returns (wr, _, 1). Otherwise, this +// returns (consumed, rest, 2), where 'consumed' contains the consumed bytes and +// 'rest' contains the remaining bytes. The consumed bytes are deducted from the +// underlying stream's flow control budget. +func (wr FrameWriteRequest) Consume(n int32) (FrameWriteRequest, FrameWriteRequest, int) { + var empty FrameWriteRequest + + // Non-DATA frames are always consumed whole. + wd, ok := wr.write.(*writeData) + if !ok || len(wd.p) == 0 { + return wr, empty, 1 + } + + // Might need to split after applying limits. + allowed := wr.stream.flow.available() + if n < allowed { + allowed = n + } + if wr.stream.sc.maxFrameSize < allowed { + allowed = wr.stream.sc.maxFrameSize + } + if allowed <= 0 { + return empty, empty, 0 + } + if len(wd.p) > int(allowed) { + wr.stream.flow.take(allowed) + consumed := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[:allowed], + // Even if the original had endStream set, there + // are bytes remaining because len(wd.p) > allowed, + // so we know endStream is false. + endStream: false, + }, + // Our caller is blocking on the final DATA frame, not + // this intermediate frame, so no need to wait. + done: nil, + } + rest := FrameWriteRequest{ + stream: wr.stream, + write: &writeData{ + streamID: wd.streamID, + p: wd.p[allowed:], + endStream: wd.endStream, + }, + done: wr.done, + } + return consumed, rest, 2 + } + + // The frame is consumed whole. + // NB: This cast cannot overflow because allowed is <= math.MaxInt32. + wr.stream.flow.take(int32(len(wd.p))) + return wr, empty, 1 +} + +// String is for debugging only. +func (wr FrameWriteRequest) String() string { + var des string + if s, ok := wr.write.(fmt.Stringer); ok { + des = s.String() + } else { + des = fmt.Sprintf("%T", wr.write) + } + return fmt.Sprintf("[FrameWriteRequest stream=%d, ch=%v, writer=%v]", wr.StreamID(), wr.done != nil, des) +} + +// replyToWriter sends err to wr.done and panics if the send must block +// This does nothing if wr.done is nil. +func (wr *FrameWriteRequest) replyToWriter(err error) { + if wr.done == nil { + return + } + select { + case wr.done <- err: + default: + panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wr.write)) + } + wr.write = nil // prevent use (assume it's tainted after wr.done send) +} + +// writeQueue is used by implementations of WriteScheduler. +type writeQueue struct { + s []FrameWriteRequest +} + +func (q *writeQueue) empty() bool { return len(q.s) == 0 } + +func (q *writeQueue) push(wr FrameWriteRequest) { + q.s = append(q.s, wr) +} + +func (q *writeQueue) shift() FrameWriteRequest { + if len(q.s) == 0 { + panic("invalid use of queue") + } + wr := q.s[0] + // TODO: less copy-happy queue. + copy(q.s, q.s[1:]) + q.s[len(q.s)-1] = FrameWriteRequest{} + q.s = q.s[:len(q.s)-1] + return wr +} + +// consume consumes up to n bytes from q.s[0]. If the frame is +// entirely consumed, it is removed from the queue. If the frame +// is partially consumed, the frame is kept with the consumed +// bytes removed. Returns true iff any bytes were consumed. +func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { + if len(q.s) == 0 { + return FrameWriteRequest{}, false + } + consumed, rest, numresult := q.s[0].Consume(n) + switch numresult { + case 0: + return FrameWriteRequest{}, false + case 1: + q.shift() + case 2: + q.s[0] = rest + } + return consumed, true +} + +type writeQueuePool []*writeQueue + +// put inserts an unused writeQueue into the pool. +func (p *writeQueuePool) put(q *writeQueue) { + for i := range q.s { + q.s[i] = FrameWriteRequest{} + } + q.s = q.s[:0] + *p = append(*p, q) +} + +// get returns an empty writeQueue. +func (p *writeQueuePool) get() *writeQueue { + ln := len(*p) + if ln == 0 { + return new(writeQueue) + } + x := ln - 1 + q := (*p)[x] + (*p)[x] = nil + *p = (*p)[:x] + return q +} diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go new file mode 100644 index 00000000..848fed6e --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -0,0 +1,452 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" + "sort" +) + +// RFC 7540, Section 5.3.5: the default weight is 16. +const priorityDefaultWeight = 15 // 16 = 15 + 1 + +// PriorityWriteSchedulerConfig configures a priorityWriteScheduler. +type PriorityWriteSchedulerConfig struct { + // MaxClosedNodesInTree controls the maximum number of closed streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // "It is possible for a stream to become closed while prioritization + // information ... is in transit. ... This potentially creates suboptimal + // prioritization, since the stream could be given a priority that is + // different from what is intended. To avoid these problems, an endpoint + // SHOULD retain stream prioritization state for a period after streams + // become closed. The longer state is retained, the lower the chance that + // streams are assigned incorrect or default priority values." + MaxClosedNodesInTree int + + // MaxIdleNodesInTree controls the maximum number of idle streams to + // retain in the priority tree. Setting this to zero saves a small amount + // of memory at the cost of performance. + // + // See RFC 7540, Section 5.3.4: + // Similarly, streams that are in the "idle" state can be assigned + // priority or become a parent of other streams. This allows for the + // creation of a grouping node in the dependency tree, which enables + // more flexible expressions of priority. Idle streams begin with a + // default priority (Section 5.3.5). + MaxIdleNodesInTree int + + // ThrottleOutOfOrderWrites enables write throttling to help ensure that + // data is delivered in priority order. This works around a race where + // stream B depends on stream A and both streams are about to call Write + // to queue DATA frames. If B wins the race, a naive scheduler would eagerly + // write as much data from B as possible, but this is suboptimal because A + // is a higher-priority stream. With throttling enabled, we write a small + // amount of data from B to minimize the amount of bandwidth that B can + // steal from A. + ThrottleOutOfOrderWrites bool +} + +// NewPriorityWriteScheduler constructs a WriteScheduler that schedules +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. +// If cfg is nil, default options are used. +func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { + if cfg == nil { + // For justification of these defaults, see: + // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY + cfg = &PriorityWriteSchedulerConfig{ + MaxClosedNodesInTree: 10, + MaxIdleNodesInTree: 10, + ThrottleOutOfOrderWrites: false, + } + } + + ws := &priorityWriteScheduler{ + nodes: make(map[uint32]*priorityNode), + maxClosedNodesInTree: cfg.MaxClosedNodesInTree, + maxIdleNodesInTree: cfg.MaxIdleNodesInTree, + enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, + } + ws.nodes[0] = &ws.root + if cfg.ThrottleOutOfOrderWrites { + ws.writeThrottleLimit = 1024 + } else { + ws.writeThrottleLimit = math.MaxInt32 + } + return ws +} + +type priorityNodeState int + +const ( + priorityNodeOpen priorityNodeState = iota + priorityNodeClosed + priorityNodeIdle +) + +// priorityNode is a node in an HTTP/2 priority tree. +// Each node is associated with a single stream ID. +// See RFC 7540, Section 5.3. +type priorityNode struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeState // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree + + // These links form the priority tree. + parent *priorityNode + kids *priorityNode // start of the kids list + prev, next *priorityNode // doubly-linked list of siblings +} + +func (n *priorityNode) setParent(parent *priorityNode) { + if n == parent { + panic("setParent to self") + } + if n.parent == parent { + return + } + // Unlink from current parent. + if parent := n.parent; parent != nil { + if n.prev == nil { + parent.kids = n.next + } else { + n.prev.next = n.next + } + if n.next != nil { + n.next.prev = n.prev + } + } + // Link to new parent. + // If parent=nil, remove n from the tree. + // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder). + n.parent = parent + if parent == nil { + n.next = nil + n.prev = nil + } else { + n.next = parent.kids + n.prev = nil + if n.next != nil { + n.next.prev = n + } + parent.kids = n + } +} + +func (n *priorityNode) addBytes(b int64) { + n.bytes += b + for ; n != nil; n = n.parent { + n.subtreeBytes += b + } +} + +// walkReadyInOrder iterates over the tree in priority order, calling f for each node +// with a non-empty write queue. When f returns true, this funcion returns true and the +// walk halts. tmp is used as scratch space for sorting. +// +// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true +// if any ancestor p of n is still open (ignoring the root node). +func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { + if !n.q.empty() && f(n, openParent) { + return true + } + if n.kids == nil { + return false + } + + // Don't consider the root "open" when updating openParent since + // we can't send data frames on the root stream (only control frames). + if n.id != 0 { + openParent = openParent || (n.state == priorityNodeOpen) + } + + // Common case: only one kid or all kids have the same weight. + // Some clients don't use weights; other clients (like web browsers) + // use mostly-linear priority trees. + w := n.kids.weight + needSort := false + for k := n.kids.next; k != nil; k = k.next { + if k.weight != w { + needSort = true + break + } + } + if !needSort { + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false + } + + // Uncommon case: sort the child nodes. We remove the kids from the parent, + // then re-insert after sorting so we can reuse tmp for future sort calls. + *tmp = (*tmp)[:0] + for n.kids != nil { + *tmp = append(*tmp, n.kids) + n.kids.setParent(nil) + } + sort.Sort(sortPriorityNodeSiblings(*tmp)) + for i := len(*tmp) - 1; i >= 0; i-- { + (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids + } + for k := n.kids; k != nil; k = k.next { + if k.walkReadyInOrder(openParent, tmp, f) { + return true + } + } + return false +} + +type sortPriorityNodeSiblings []*priorityNode + +func (z sortPriorityNodeSiblings) Len() int { return len(z) } +func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblings) Less(i, k int) bool { + // Prefer the subtree that has sent fewer bytes relative to its weight. + // See sections 5.3.2 and 5.3.4. + wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + if bi == 0 && bk == 0 { + return wi >= wk + } + if bk == 0 { + return false + } + return bi/bk <= wi/wk +} + +type priorityWriteScheduler struct { + // root is the root of the priority tree, where root.id = 0. + // The root queues control frames that are not associated with any stream. + root priorityNode + + // nodes maps stream ids to priority tree nodes. + nodes map[uint32]*priorityNode + + // maxID is the maximum stream id in nodes. + maxID uint32 + + // lists of nodes that have been closed or are idle, but are kept in + // the tree for improved prioritization. When the lengths exceed either + // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. + closedNodes, idleNodes []*priorityNode + + // From the config. + maxClosedNodesInTree int + maxIdleNodesInTree int + writeThrottleLimit int32 + enableWriteThrottle bool + + // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. + tmp []*priorityNode + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // The stream may be currently idle but cannot be opened or closed. + if curr := ws.nodes[streamID]; curr != nil { + if curr.state != priorityNodeIdle { + panic(fmt.Sprintf("stream %d already opened", streamID)) + } + curr.state = priorityNodeOpen + return + } + + // RFC 7540, Section 5.3.5: + // "All streams are initially assigned a non-exclusive dependency on stream 0x0. + // Pushed streams initially depend on their associated stream. In both cases, + // streams are assigned a default weight of 16." + parent := ws.nodes[options.PusherID] + if parent == nil { + parent = &ws.root + } + n := &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeOpen, + } + n.setParent(parent) + ws.nodes[streamID] = n + if streamID > ws.maxID { + ws.maxID = streamID + } +} + +func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { + if streamID == 0 { + panic("violation of WriteScheduler interface: cannot close stream 0") + } + if ws.nodes[streamID] == nil { + panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) + } + if ws.nodes[streamID].state != priorityNodeOpen { + panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) + } + + n := ws.nodes[streamID] + n.state = priorityNodeClosed + n.addBytes(-n.bytes) + + q := n.q + ws.queuePool.put(&q) + n.q.s = nil + if ws.maxClosedNodesInTree > 0 { + ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) + } else { + ws.removeNode(n) + } +} + +func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + if streamID == 0 { + panic("adjustPriority on root") + } + + // If streamID does not exist, there are two cases: + // - A closed stream that has been removed (this will have ID <= maxID) + // - An idle stream that is being used for "grouping" (this will have ID > maxID) + n := ws.nodes[streamID] + if n == nil { + if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 { + return + } + ws.maxID = streamID + n = &priorityNode{ + q: *ws.queuePool.get(), + id: streamID, + weight: priorityDefaultWeight, + state: priorityNodeIdle, + } + n.setParent(&ws.root) + ws.nodes[streamID] = n + ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n) + } + + // Section 5.3.1: A dependency on a stream that is not currently in the tree + // results in that stream being given a default priority (Section 5.3.5). + parent := ws.nodes[priority.StreamDep] + if parent == nil { + n.setParent(&ws.root) + n.weight = priorityDefaultWeight + return + } + + // Ignore if the client tries to make a node its own parent. + if n == parent { + return + } + + // Section 5.3.3: + // "If a stream is made dependent on one of its own dependencies, the + // formerly dependent stream is first moved to be dependent on the + // reprioritized stream's previous parent. The moved dependency retains + // its weight." + // + // That is: if parent depends on n, move parent to depend on n.parent. + for x := parent.parent; x != nil; x = x.parent { + if x == n { + parent.setParent(n.parent) + break + } + } + + // Section 5.3.3: The exclusive flag causes the stream to become the sole + // dependency of its parent stream, causing other dependencies to become + // dependent on the exclusive stream. + if priority.Exclusive { + k := parent.kids + for k != nil { + next := k.next + if k != n { + k.setParent(n) + } + k = next + } + } + + n.setParent(parent) + n.weight = priority.Weight +} + +func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { + var n *priorityNode + if id := wr.StreamID(); id == 0 { + n = &ws.root + } else { + n = ws.nodes[id] + if n == nil { + // id is an idle or closed stream. wr should not be a HEADERS or + // DATA frame. However, wr can be a RST_STREAM. In this case, we + // push wr onto the root, rather than creating a new priorityNode, + // since RST_STREAM is tiny and the stream's priority is unknown + // anyway. See issue #17919. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + n = &ws.root + } + } + n.q.push(wr) +} + +func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { + limit := int32(math.MaxInt32) + if openParent { + limit = ws.writeThrottleLimit + } + wr, ok = n.q.consume(limit) + if !ok { + return false + } + n.addBytes(int64(wr.DataSize())) + // If B depends on A and B continuously has data available but A + // does not, gradually increase the throttling limit to allow B to + // steal more and more bandwidth from A. + if openParent { + ws.writeThrottleLimit += 1024 + if ws.writeThrottleLimit < 0 { + ws.writeThrottleLimit = math.MaxInt32 + } + } else if ws.enableWriteThrottle { + ws.writeThrottleLimit = 1024 + } + return true + }) + return wr, ok +} + +func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { + if maxSize == 0 { + return + } + if len(*list) == maxSize { + // Remove the oldest node, then shift left. + ws.removeNode((*list)[0]) + x := (*list)[1:] + copy(*list, x) + *list = (*list)[:len(x)] + } + *list = append(*list, n) +} + +func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { + for k := n.kids; k != nil; k = k.next { + k.setParent(n.parent) + } + n.setParent(nil) + delete(ws.nodes, n.id) +} diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go new file mode 100644 index 00000000..36d7919f --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -0,0 +1,72 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import "math" + +// NewRandomWriteScheduler constructs a WriteScheduler that ignores HTTP/2 +// priorities. Control frames like SETTINGS and PING are written before DATA +// frames, but if no control frames are queued and multiple streams have queued +// HEADERS or DATA frames, Pop selects a ready stream arbitrarily. +func NewRandomWriteScheduler() WriteScheduler { + return &randomWriteScheduler{sq: make(map[uint32]*writeQueue)} +} + +type randomWriteScheduler struct { + // zero are frames not associated with a specific stream. + zero writeQueue + + // sq contains the stream-specific queues, keyed by stream ID. + // When a stream is idle or closed, it's deleted from the map. + sq map[uint32]*writeQueue + + // pool of empty queues for reuse. + queuePool writeQueuePool +} + +func (ws *randomWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { + // no-op: idle streams are not tracked +} + +func (ws *randomWriteScheduler) CloseStream(streamID uint32) { + q, ok := ws.sq[streamID] + if !ok { + return + } + delete(ws.sq, streamID) + ws.queuePool.put(q) +} + +func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { + // no-op: priorities are ignored +} + +func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) { + id := wr.StreamID() + if id == 0 { + ws.zero.push(wr) + return + } + q, ok := ws.sq[id] + if !ok { + q = ws.queuePool.get() + ws.sq[id] = q + } + q.push(wr) +} + +func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { + // Control frames first. + if !ws.zero.empty() { + return ws.zero.shift(), true + } + // Iterate over all non-idle streams until finding one that can be consumed. + for _, q := range ws.sq { + if wr, ok := q.consume(math.MaxInt32); ok { + return wr, true + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/idna/idna10.0.0.go b/vendor/golang.org/x/net/idna/idna10.0.0.go new file mode 100644 index 00000000..a98a31f4 --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -0,0 +1,734 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.10 + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in https://www.unicode.org/reports/tr46. +// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/bidi" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissible ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (mapped string, isBidi bool, err error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of an IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see https://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + var isBidi bool + if p.mapping != nil { + s, isBidi, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // TODO: allow for a quick check of the tables data. + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + isBidi = isBidi || bidirule.DirectionString(u) != bidi.LeftToRight + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if isBidi && p.bidirule != nil && err == nil { + for labels.reset(); !labels.done(); labels.next() { + if !p.bidirule(labels.label()) { + err = &labelError{s, "B"} + break + } + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (mapped string, isBidi bool, err error) { + // TODO: consider first doing a quick check to see if any of these checks + // need to be done. This will make it slower in the general case, but + // faster in the common case. + mapped = norm.NFC.String(s) + isBidi = bidirule.DirectionString(mapped) == bidi.RightToLeft + return mapped, isBidi, nil +} + +func validateRegistration(p *Profile, s string) (idem string, bidi bool, err error) { + // TODO: filter need for normalization in loop below. + if !norm.NFC.IsNormalString(s) { + return s, false, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return s, bidi, runeError(utf8.RuneError) + } + bidi = bidi || info(v).isBidi(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, bidi, runeError(r) + } + i += sz + } + return s, bidi, nil +} + +func (c info) isBidi(s string) bool { + if !c.isMapped() { + return c&attributesMask == rtl + } + // TODO: also store bidi info for mapped data. This is possible, but a bit + // cumbersome and not for the common case. + p, _ := bidi.LookupString(s) + switch p.Class() { + case bidi.R, bidi.AL, bidi.AN: + return true + } + return false +} + +func validateAndMap(p *Profile, s string) (vm string, bidi bool, err error) { + var ( + b []byte + k int + ) + // combinedInfoBits contains the or-ed bits of all runes. We use this + // to derive the mayNeedNorm bit later. This may trigger normalization + // overeagerly, but it will not do so in the common case. The end result + // is another 10% saving on BenchmarkProfile for the common case. + var combinedInfoBits info + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + b = append(b, s[k:i]...) + b = append(b, "\ufffd"...) + k = len(s) + if err == nil { + err = runeError(utf8.RuneError) + } + break + } + combinedInfoBits |= info(v) + bidi = bidi || info(v).isBidi(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + if combinedInfoBits&mayNeedNorm != 0 { + s = norm.NFC.String(s) + } + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, bidi, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + // TODO: detect whether string may have to be normalized in the following + // loop. + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if sz == 0 { + return runeError(utf8.RuneError) + } + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) (err error) { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/idna9.0.0.go b/vendor/golang.org/x/net/idna/idna9.0.0.go new file mode 100644 index 00000000..8842146b --- /dev/null +++ b/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -0,0 +1,682 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.10 + +// Package idna implements IDNA2008 using the compatibility processing +// defined by UTS (Unicode Technical Standard) #46, which defines a standard to +// deal with the transition from IDNA2003. +// +// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC +// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894. +// UTS #46 is defined in https://www.unicode.org/reports/tr46. +// See https://unicode.org/cldr/utility/idna.jsp for a visualization of the +// differences between these two standards. +package idna // import "golang.org/x/net/idna" + +import ( + "fmt" + "strings" + "unicode/utf8" + + "golang.org/x/text/secure/bidirule" + "golang.org/x/text/unicode/norm" +) + +// NOTE: Unlike common practice in Go APIs, the functions will return a +// sanitized domain name in case of errors. Browsers sometimes use a partially +// evaluated string as lookup. +// TODO: the current error handling is, in my opinion, the least opinionated. +// Other strategies are also viable, though: +// Option 1) Return an empty string in case of error, but allow the user to +// specify explicitly which errors to ignore. +// Option 2) Return the partially evaluated string if it is itself a valid +// string, otherwise return the empty string in case of error. +// Option 3) Option 1 and 2. +// Option 4) Always return an empty string for now and implement Option 1 as +// needed, and document that the return string may not be empty in case of +// error in the future. +// I think Option 1 is best, but it is quite opinionated. + +// ToASCII is a wrapper for Punycode.ToASCII. +func ToASCII(s string) (string, error) { + return Punycode.process(s, true) +} + +// ToUnicode is a wrapper for Punycode.ToUnicode. +func ToUnicode(s string) (string, error) { + return Punycode.process(s, false) +} + +// An Option configures a Profile at creation time. +type Option func(*options) + +// Transitional sets a Profile to use the Transitional mapping as defined in UTS +// #46. This will cause, for example, "ß" to be mapped to "ss". Using the +// transitional mapping provides a compromise between IDNA2003 and IDNA2008 +// compatibility. It is used by most browsers when resolving domain names. This +// option is only meaningful if combined with MapForLookup. +func Transitional(transitional bool) Option { + return func(o *options) { o.transitional = true } +} + +// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts +// are longer than allowed by the RFC. +func VerifyDNSLength(verify bool) Option { + return func(o *options) { o.verifyDNSLength = verify } +} + +// RemoveLeadingDots removes leading label separators. Leading runes that map to +// dots, such as U+3002 IDEOGRAPHIC FULL STOP, are removed as well. +// +// This is the behavior suggested by the UTS #46 and is adopted by some +// browsers. +func RemoveLeadingDots(remove bool) Option { + return func(o *options) { o.removeLeadingDots = remove } +} + +// ValidateLabels sets whether to check the mandatory label validation criteria +// as defined in Section 5.4 of RFC 5891. This includes testing for correct use +// of hyphens ('-'), normalization, validity of runes, and the context rules. +func ValidateLabels(enable bool) Option { + return func(o *options) { + // Don't override existing mappings, but set one that at least checks + // normalization if it is not set. + if o.mapping == nil && enable { + o.mapping = normalize + } + o.trie = trie + o.validateLabels = enable + o.fromPuny = validateFromPunycode + } +} + +// StrictDomainName limits the set of permissable ASCII characters to those +// allowed in domain names as defined in RFC 1034 (A-Z, a-z, 0-9 and the +// hyphen). This is set by default for MapForLookup and ValidateForRegistration. +// +// This option is useful, for instance, for browsers that allow characters +// outside this range, for example a '_' (U+005F LOW LINE). See +// http://www.rfc-editor.org/std/std3.txt for more details This option +// corresponds to the UseSTD3ASCIIRules option in UTS #46. +func StrictDomainName(use bool) Option { + return func(o *options) { + o.trie = trie + o.useSTD3Rules = use + o.fromPuny = validateFromPunycode + } +} + +// NOTE: the following options pull in tables. The tables should not be linked +// in as long as the options are not used. + +// BidiRule enables the Bidi rule as defined in RFC 5893. Any application +// that relies on proper validation of labels should include this rule. +func BidiRule() Option { + return func(o *options) { o.bidirule = bidirule.ValidString } +} + +// ValidateForRegistration sets validation options to verify that a given IDN is +// properly formatted for registration as defined by Section 4 of RFC 5891. +func ValidateForRegistration() Option { + return func(o *options) { + o.mapping = validateRegistration + StrictDomainName(true)(o) + ValidateLabels(true)(o) + VerifyDNSLength(true)(o) + BidiRule()(o) + } +} + +// MapForLookup sets validation and mapping options such that a given IDN is +// transformed for domain name lookup according to the requirements set out in +// Section 5 of RFC 5891. The mappings follow the recommendations of RFC 5894, +// RFC 5895 and UTS 46. It does not add the Bidi Rule. Use the BidiRule option +// to add this check. +// +// The mappings include normalization and mapping case, width and other +// compatibility mappings. +func MapForLookup() Option { + return func(o *options) { + o.mapping = validateAndMap + StrictDomainName(true)(o) + ValidateLabels(true)(o) + RemoveLeadingDots(true)(o) + } +} + +type options struct { + transitional bool + useSTD3Rules bool + validateLabels bool + verifyDNSLength bool + removeLeadingDots bool + + trie *idnaTrie + + // fromPuny calls validation rules when converting A-labels to U-labels. + fromPuny func(p *Profile, s string) error + + // mapping implements a validation and mapping step as defined in RFC 5895 + // or UTS 46, tailored to, for example, domain registration or lookup. + mapping func(p *Profile, s string) (string, error) + + // bidirule, if specified, checks whether s conforms to the Bidi Rule + // defined in RFC 5893. + bidirule func(s string) bool +} + +// A Profile defines the configuration of a IDNA mapper. +type Profile struct { + options +} + +func apply(o *options, opts []Option) { + for _, f := range opts { + f(o) + } +} + +// New creates a new Profile. +// +// With no options, the returned Profile is the most permissive and equals the +// Punycode Profile. Options can be passed to further restrict the Profile. The +// MapForLookup and ValidateForRegistration options set a collection of options, +// for lookup and registration purposes respectively, which can be tailored by +// adding more fine-grained options, where later options override earlier +// options. +func New(o ...Option) *Profile { + p := &Profile{} + apply(&p.options, o) + return p +} + +// ToASCII converts a domain or domain label to its ASCII form. For example, +// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and +// ToASCII("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToASCII(s string) (string, error) { + return p.process(s, true) +} + +// ToUnicode converts a domain or domain label to its Unicode form. For example, +// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and +// ToUnicode("golang") is "golang". If an error is encountered it will return +// an error and a (partially) processed result. +func (p *Profile) ToUnicode(s string) (string, error) { + pp := *p + pp.transitional = false + return pp.process(s, false) +} + +// String reports a string with a description of the profile for debugging +// purposes. The string format may change with different versions. +func (p *Profile) String() string { + s := "" + if p.transitional { + s = "Transitional" + } else { + s = "NonTransitional" + } + if p.useSTD3Rules { + s += ":UseSTD3Rules" + } + if p.validateLabels { + s += ":ValidateLabels" + } + if p.verifyDNSLength { + s += ":VerifyDNSLength" + } + return s +} + +var ( + // Punycode is a Profile that does raw punycode processing with a minimum + // of validation. + Punycode *Profile = punycode + + // Lookup is the recommended profile for looking up domain names, according + // to Section 5 of RFC 5891. The exact configuration of this profile may + // change over time. + Lookup *Profile = lookup + + // Display is the recommended profile for displaying domain names. + // The configuration of this profile may change over time. + Display *Profile = display + + // Registration is the recommended profile for checking whether a given + // IDN is valid for registration, according to Section 4 of RFC 5891. + Registration *Profile = registration + + punycode = &Profile{} + lookup = &Profile{options{ + transitional: true, + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + display = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + removeLeadingDots: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateAndMap, + bidirule: bidirule.ValidString, + }} + registration = &Profile{options{ + useSTD3Rules: true, + validateLabels: true, + verifyDNSLength: true, + trie: trie, + fromPuny: validateFromPunycode, + mapping: validateRegistration, + bidirule: bidirule.ValidString, + }} + + // TODO: profiles + // Register: recommended for approving domain names: don't do any mappings + // but rather reject on invalid input. Bundle or block deviation characters. +) + +type labelError struct{ label, code_ string } + +func (e labelError) code() string { return e.code_ } +func (e labelError) Error() string { + return fmt.Sprintf("idna: invalid label %q", e.label) +} + +type runeError rune + +func (e runeError) code() string { return "P1" } +func (e runeError) Error() string { + return fmt.Sprintf("idna: disallowed rune %U", e) +} + +// process implements the algorithm described in section 4 of UTS #46, +// see https://www.unicode.org/reports/tr46. +func (p *Profile) process(s string, toASCII bool) (string, error) { + var err error + if p.mapping != nil { + s, err = p.mapping(p, s) + } + // Remove leading empty labels. + if p.removeLeadingDots { + for ; len(s) > 0 && s[0] == '.'; s = s[1:] { + } + } + // It seems like we should only create this error on ToASCII, but the + // UTS 46 conformance tests suggests we should always check this. + if err == nil && p.verifyDNSLength && s == "" { + err = &labelError{s, "A4"} + } + labels := labelIter{orig: s} + for ; !labels.done(); labels.next() { + label := labels.label() + if label == "" { + // Empty labels are not okay. The label iterator skips the last + // label if it is empty. + if err == nil && p.verifyDNSLength { + err = &labelError{s, "A4"} + } + continue + } + if strings.HasPrefix(label, acePrefix) { + u, err2 := decode(label[len(acePrefix):]) + if err2 != nil { + if err == nil { + err = err2 + } + // Spec says keep the old label. + continue + } + labels.set(u) + if err == nil && p.validateLabels { + err = p.fromPuny(p, u) + } + if err == nil { + // This should be called on NonTransitional, according to the + // spec, but that currently does not have any effect. Use the + // original profile to preserve options. + err = p.validateLabel(u) + } + } else if err == nil { + err = p.validateLabel(label) + } + } + if toASCII { + for labels.reset(); !labels.done(); labels.next() { + label := labels.label() + if !ascii(label) { + a, err2 := encode(acePrefix, label) + if err == nil { + err = err2 + } + label = a + labels.set(a) + } + n := len(label) + if p.verifyDNSLength && err == nil && (n == 0 || n > 63) { + err = &labelError{label, "A4"} + } + } + } + s = labels.result() + if toASCII && p.verifyDNSLength && err == nil { + // Compute the length of the domain name minus the root label and its dot. + n := len(s) + if n > 0 && s[n-1] == '.' { + n-- + } + if len(s) < 1 || n > 253 { + err = &labelError{s, "A4"} + } + } + return s, err +} + +func normalize(p *Profile, s string) (string, error) { + return norm.NFC.String(s), nil +} + +func validateRegistration(p *Profile, s string) (string, error) { + if !norm.NFC.IsNormalString(s) { + return s, &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + // TODO: handle the NV8 defined in the Unicode idna data set to allow + // for strict conformance to IDNA2008. + case valid, deviation: + case disallowed, mapped, unknown, ignored: + r, _ := utf8.DecodeRuneInString(s[i:]) + return s, runeError(r) + } + i += sz + } + return s, nil +} + +func validateAndMap(p *Profile, s string) (string, error) { + var ( + err error + b []byte + k int + ) + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + start := i + i += sz + // Copy bytes not copied so far. + switch p.simplify(info(v).category()) { + case valid: + continue + case disallowed: + if err == nil { + r, _ := utf8.DecodeRuneInString(s[start:]) + err = runeError(r) + } + continue + case mapped, deviation: + b = append(b, s[k:start]...) + b = info(v).appendMapping(b, s[start:i]) + case ignored: + b = append(b, s[k:start]...) + // drop the rune + case unknown: + b = append(b, s[k:start]...) + b = append(b, "\ufffd"...) + } + k = i + } + if k == 0 { + // No changes so far. + s = norm.NFC.String(s) + } else { + b = append(b, s[k:]...) + if norm.NFC.QuickSpan(b) != len(b) { + b = norm.NFC.Bytes(b) + } + // TODO: the punycode converters require strings as input. + s = string(b) + } + return s, err +} + +// A labelIter allows iterating over domain name labels. +type labelIter struct { + orig string + slice []string + curStart int + curEnd int + i int +} + +func (l *labelIter) reset() { + l.curStart = 0 + l.curEnd = 0 + l.i = 0 +} + +func (l *labelIter) done() bool { + return l.curStart >= len(l.orig) +} + +func (l *labelIter) result() string { + if l.slice != nil { + return strings.Join(l.slice, ".") + } + return l.orig +} + +func (l *labelIter) label() string { + if l.slice != nil { + return l.slice[l.i] + } + p := strings.IndexByte(l.orig[l.curStart:], '.') + l.curEnd = l.curStart + p + if p == -1 { + l.curEnd = len(l.orig) + } + return l.orig[l.curStart:l.curEnd] +} + +// next sets the value to the next label. It skips the last label if it is empty. +func (l *labelIter) next() { + l.i++ + if l.slice != nil { + if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" { + l.curStart = len(l.orig) + } + } else { + l.curStart = l.curEnd + 1 + if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' { + l.curStart = len(l.orig) + } + } +} + +func (l *labelIter) set(s string) { + if l.slice == nil { + l.slice = strings.Split(l.orig, ".") + } + l.slice[l.i] = s +} + +// acePrefix is the ASCII Compatible Encoding prefix. +const acePrefix = "xn--" + +func (p *Profile) simplify(cat category) category { + switch cat { + case disallowedSTD3Mapped: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = mapped + } + case disallowedSTD3Valid: + if p.useSTD3Rules { + cat = disallowed + } else { + cat = valid + } + case deviation: + if !p.transitional { + cat = valid + } + case validNV8, validXV8: + // TODO: handle V2008 + cat = valid + } + return cat +} + +func validateFromPunycode(p *Profile, s string) error { + if !norm.NFC.IsNormalString(s) { + return &labelError{s, "V1"} + } + for i := 0; i < len(s); { + v, sz := trie.lookupString(s[i:]) + if c := p.simplify(info(v).category()); c != valid && c != deviation { + return &labelError{s, "V6"} + } + i += sz + } + return nil +} + +const ( + zwnj = "\u200c" + zwj = "\u200d" +) + +type joinState int8 + +const ( + stateStart joinState = iota + stateVirama + stateBefore + stateBeforeVirama + stateAfter + stateFAIL +) + +var joinStates = [][numJoinTypes]joinState{ + stateStart: { + joiningL: stateBefore, + joiningD: stateBefore, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateVirama, + }, + stateVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + }, + stateBefore: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + joinZWNJ: stateAfter, + joinZWJ: stateFAIL, + joinVirama: stateBeforeVirama, + }, + stateBeforeVirama: { + joiningL: stateBefore, + joiningD: stateBefore, + joiningT: stateBefore, + }, + stateAfter: { + joiningL: stateFAIL, + joiningD: stateBefore, + joiningT: stateAfter, + joiningR: stateStart, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateAfter, // no-op as we can't accept joiners here + }, + stateFAIL: { + 0: stateFAIL, + joiningL: stateFAIL, + joiningD: stateFAIL, + joiningT: stateFAIL, + joiningR: stateFAIL, + joinZWNJ: stateFAIL, + joinZWJ: stateFAIL, + joinVirama: stateFAIL, + }, +} + +// validateLabel validates the criteria from Section 4.1. Item 1, 4, and 6 are +// already implicitly satisfied by the overall implementation. +func (p *Profile) validateLabel(s string) error { + if s == "" { + if p.verifyDNSLength { + return &labelError{s, "A4"} + } + return nil + } + if p.bidirule != nil && !p.bidirule(s) { + return &labelError{s, "B"} + } + if !p.validateLabels { + return nil + } + trie := p.trie // p.validateLabels is only set if trie is set. + if len(s) > 4 && s[2] == '-' && s[3] == '-' { + return &labelError{s, "V2"} + } + if s[0] == '-' || s[len(s)-1] == '-' { + return &labelError{s, "V3"} + } + // TODO: merge the use of this in the trie. + v, sz := trie.lookupString(s) + x := info(v) + if x.isModifier() { + return &labelError{s, "V5"} + } + // Quickly return in the absence of zero-width (non) joiners. + if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 { + return nil + } + st := stateStart + for i := 0; ; { + jt := x.joinType() + if s[i:i+sz] == zwj { + jt = joinZWJ + } else if s[i:i+sz] == zwnj { + jt = joinZWNJ + } + st = joinStates[st][jt] + if x.isViramaModifier() { + st = joinStates[st][joinVirama] + } + if i += sz; i == len(s) { + break + } + v, sz = trie.lookupString(s[i:]) + x = info(v) + } + if st == stateFAIL || st == stateAfter { + return &labelError{s, "C"} + } + return nil +} + +func ascii(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] >= utf8.RuneSelf { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/net/idna/punycode.go b/vendor/golang.org/x/net/idna/punycode.go new file mode 100644 index 00000000..02c7d59a --- /dev/null +++ b/vendor/golang.org/x/net/idna/punycode.go @@ -0,0 +1,203 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// This file implements the Punycode algorithm from RFC 3492. + +import ( + "math" + "strings" + "unicode/utf8" +) + +// These parameter values are specified in section 5. +// +// All computation is done with int32s, so that overflow behavior is identical +// regardless of whether int is 32-bit or 64-bit. +const ( + base int32 = 36 + damp int32 = 700 + initialBias int32 = 72 + initialN int32 = 128 + skew int32 = 38 + tmax int32 = 26 + tmin int32 = 1 +) + +func punyError(s string) error { return &labelError{s, "A3"} } + +// decode decodes a string as specified in section 6.2. +func decode(encoded string) (string, error) { + if encoded == "" { + return "", nil + } + pos := 1 + strings.LastIndex(encoded, "-") + if pos == 1 { + return "", punyError(encoded) + } + if pos == len(encoded) { + return encoded[:len(encoded)-1], nil + } + output := make([]rune, 0, len(encoded)) + if pos != 0 { + for _, r := range encoded[:pos-1] { + output = append(output, r) + } + } + i, n, bias := int32(0), initialN, initialBias + for pos < len(encoded) { + oldI, w := i, int32(1) + for k := base; ; k += base { + if pos == len(encoded) { + return "", punyError(encoded) + } + digit, ok := decodeDigit(encoded[pos]) + if !ok { + return "", punyError(encoded) + } + pos++ + i += digit * w + if i < 0 { + return "", punyError(encoded) + } + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if digit < t { + break + } + w *= base - t + if w >= math.MaxInt32/base { + return "", punyError(encoded) + } + } + x := int32(len(output) + 1) + bias = adapt(i-oldI, x, oldI == 0) + n += i / x + i %= x + if n > utf8.MaxRune || len(output) >= 1024 { + return "", punyError(encoded) + } + output = append(output, 0) + copy(output[i+1:], output[i:]) + output[i] = n + i++ + } + return string(output), nil +} + +// encode encodes a string as specified in section 6.3 and prepends prefix to +// the result. +// +// The "while h < length(input)" line in the specification becomes "for +// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes. +func encode(prefix, s string) (string, error) { + output := make([]byte, len(prefix), len(prefix)+1+2*len(s)) + copy(output, prefix) + delta, n, bias := int32(0), initialN, initialBias + b, remaining := int32(0), int32(0) + for _, r := range s { + if r < 0x80 { + b++ + output = append(output, byte(r)) + } else { + remaining++ + } + } + h := b + if b > 0 { + output = append(output, '-') + } + for remaining != 0 { + m := int32(0x7fffffff) + for _, r := range s { + if m > r && r >= n { + m = r + } + } + delta += (m - n) * (h + 1) + if delta < 0 { + return "", punyError(s) + } + n = m + for _, r := range s { + if r < n { + delta++ + if delta < 0 { + return "", punyError(s) + } + continue + } + if r > n { + continue + } + q := delta + for k := base; ; k += base { + t := k - bias + if t < tmin { + t = tmin + } else if t > tmax { + t = tmax + } + if q < t { + break + } + output = append(output, encodeDigit(t+(q-t)%(base-t))) + q = (q - t) / (base - t) + } + output = append(output, encodeDigit(q)) + bias = adapt(delta, h+1, h == b) + delta = 0 + h++ + remaining-- + } + delta++ + n++ + } + return string(output), nil +} + +func decodeDigit(x byte) (digit int32, ok bool) { + switch { + case '0' <= x && x <= '9': + return int32(x - ('0' - 26)), true + case 'A' <= x && x <= 'Z': + return int32(x - 'A'), true + case 'a' <= x && x <= 'z': + return int32(x - 'a'), true + } + return 0, false +} + +func encodeDigit(digit int32) byte { + switch { + case 0 <= digit && digit < 26: + return byte(digit + 'a') + case 26 <= digit && digit < 36: + return byte(digit + ('0' - 26)) + } + panic("idna: internal error in punycode encoding") +} + +// adapt is the bias adaptation function specified in section 6.1. +func adapt(delta, numPoints int32, firstTime bool) int32 { + if firstTime { + delta /= damp + } else { + delta /= 2 + } + delta += delta / numPoints + k := int32(0) + for delta > ((base-tmin)*tmax)/2 { + delta /= base - tmin + k += base + } + return k + (base-tmin+1)*delta/(delta+skew) +} diff --git a/vendor/golang.org/x/net/idna/tables10.0.0.go b/vendor/golang.org/x/net/idna/tables10.0.0.go new file mode 100644 index 00000000..54fddb4b --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -0,0 +1,4559 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.10,!go1.13 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "10.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07</\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" + + "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" + + "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" + + "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c<?\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" + + "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" + + "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29052 bytes (28.37 KiB). Checksum: ef06e7ecc26f36dd. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x0040, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0040, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0040, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0040, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0040, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0040, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x127, 0x401: 0x128, 0x402: 0x129, 0x403: 0x12a, 0x404: 0x12b, 0x405: 0x12c, 0x406: 0x12d, 0x407: 0x12e, + 0x408: 0x12f, 0x409: 0xba, 0x40a: 0x130, 0x40b: 0x131, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x132, 0x411: 0x133, 0x412: 0x134, 0x413: 0x135, 0x414: 0xba, 0x415: 0xba, 0x416: 0x136, 0x417: 0x137, + 0x418: 0x138, 0x419: 0x139, 0x41a: 0x13a, 0x41b: 0x13b, 0x41c: 0x13c, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13d, 0x423: 0x13e, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x13f, 0x429: 0x140, 0x42a: 0x141, 0x42b: 0x142, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x143, 0x431: 0x144, 0x432: 0x145, 0x433: 0xba, 0x434: 0x146, 0x435: 0x147, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x148, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x149, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x14a, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x14b, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x14c, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x142, 0x529: 0x14d, 0x52a: 0xba, 0x52b: 0x14e, 0x52c: 0x14f, 0x52d: 0x150, 0x52e: 0x151, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x152, 0x53e: 0x153, 0x53f: 0x154, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x155, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x156, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x157, 0x585: 0x158, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x159, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x15a, 0x5b2: 0x15b, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x15c, 0x5c4: 0x15d, 0x5c5: 0x15e, 0x5c6: 0x15f, 0x5c7: 0x160, + 0x5c8: 0x9b, 0x5c9: 0x161, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x162, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x163, 0x5e9: 0x164, 0x5ea: 0x165, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x166, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x167, 0x624: 0x6f, 0x625: 0x168, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x169, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x16a, 0x641: 0x9b, 0x642: 0x16b, 0x643: 0x16c, 0x644: 0x73, 0x645: 0x74, 0x646: 0x16d, 0x647: 0x16e, + 0x648: 0x75, 0x649: 0x16f, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x170, 0x65c: 0x9b, 0x65d: 0x171, 0x65e: 0x9b, 0x65f: 0x172, + 0x660: 0x173, 0x661: 0x174, 0x662: 0x175, 0x663: 0xba, 0x664: 0x176, 0x665: 0x177, 0x666: 0x178, 0x667: 0x179, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x17a, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x17b, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x17c, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x17d, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x17e, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x17f, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x180, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x181, 0x841: 0x182, 0x842: 0xba, 0x843: 0xba, 0x844: 0x183, 0x845: 0x183, 0x846: 0x183, 0x847: 0x184, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 264 entries, 528 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x8a, 0x93, 0xa3, 0xb1, 0xbd, 0xc9, 0xda, 0xe4, 0xeb, 0xf8, 0x109, 0x110, 0x11b, 0x12a, 0x138, 0x142, 0x144, 0x149, 0x14c, 0x14f, 0x151, 0x15d, 0x168, 0x170, 0x176, 0x17c, 0x181, 0x186, 0x189, 0x18d, 0x193, 0x198, 0x1a4, 0x1ae, 0x1b4, 0x1c5, 0x1cf, 0x1d2, 0x1da, 0x1dd, 0x1ea, 0x1f2, 0x1f6, 0x1fd, 0x205, 0x215, 0x221, 0x223, 0x22d, 0x239, 0x245, 0x251, 0x259, 0x25e, 0x268, 0x279, 0x27d, 0x288, 0x28c, 0x295, 0x29d, 0x2a3, 0x2a8, 0x2ab, 0x2af, 0x2b5, 0x2b9, 0x2bd, 0x2c3, 0x2ca, 0x2d0, 0x2d8, 0x2df, 0x2ea, 0x2f4, 0x2f8, 0x2fb, 0x301, 0x305, 0x307, 0x30a, 0x30c, 0x30f, 0x319, 0x31c, 0x32b, 0x32f, 0x334, 0x337, 0x33b, 0x340, 0x345, 0x34b, 0x351, 0x360, 0x366, 0x36a, 0x379, 0x37e, 0x386, 0x390, 0x39b, 0x3a3, 0x3b4, 0x3bd, 0x3cd, 0x3da, 0x3e4, 0x3e9, 0x3f6, 0x3fa, 0x3ff, 0x401, 0x405, 0x407, 0x40b, 0x414, 0x41a, 0x41e, 0x42e, 0x438, 0x43d, 0x440, 0x446, 0x44d, 0x452, 0x456, 0x45c, 0x461, 0x46a, 0x46f, 0x475, 0x47c, 0x483, 0x48a, 0x48e, 0x493, 0x496, 0x49b, 0x4a7, 0x4ad, 0x4b2, 0x4b9, 0x4c1, 0x4c6, 0x4ca, 0x4da, 0x4e1, 0x4e5, 0x4e9, 0x4f0, 0x4f2, 0x4f5, 0x4f8, 0x4fc, 0x500, 0x506, 0x50f, 0x51b, 0x522, 0x52b, 0x533, 0x53a, 0x548, 0x555, 0x562, 0x56b, 0x56f, 0x57d, 0x585, 0x590, 0x599, 0x59f, 0x5a7, 0x5b0, 0x5ba, 0x5bd, 0x5c9, 0x5cc, 0x5d1, 0x5de, 0x5e7, 0x5f3, 0x5f6, 0x600, 0x609, 0x615, 0x622, 0x62a, 0x62d, 0x632, 0x635, 0x638, 0x63b, 0x642, 0x649, 0x64d, 0x658, 0x65b, 0x661, 0x666, 0x66a, 0x66d, 0x670, 0x673, 0x676, 0x679, 0x67e, 0x688, 0x68b, 0x68f, 0x69e, 0x6aa, 0x6ae, 0x6b3, 0x6b8, 0x6bc, 0x6c1, 0x6ca, 0x6d5, 0x6db, 0x6e3, 0x6e7, 0x6eb, 0x6f1, 0x6f7, 0x6fc, 0x6ff, 0x70f, 0x716, 0x719, 0x71c, 0x720, 0x726, 0x72b, 0x730, 0x735, 0x738, 0x73d, 0x740, 0x743, 0x747, 0x74b, 0x74e, 0x75e, 0x76f, 0x774, 0x776, 0x778} + +// idnaSparseValues: 1915 entries, 7660 bytes +var idnaSparseValues = [1915]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8a + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x93 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa3 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbd + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xc9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xda + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe4 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xeb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x109 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x110 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11b + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x138 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x142 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x144 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x149 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14c + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x14f + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x151 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x168 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x170 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x176 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x181 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x186 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x193 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x198 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a4 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1ae + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b4 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c5 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1cf + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x34, offset 0x1d2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1da + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1dd + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1ea + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f2 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f6 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x205 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x215 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x221 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x223 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22d + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x239 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x245 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x251 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x259 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25e + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x268 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x279 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28c + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x295 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ab + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2af + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b5 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2b9 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2bd + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x53, offset 0x2c3 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x54, offset 0x2ca + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2d0 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d8 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2df + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f4 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0x5b, offset 0x2fb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x301 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x305 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x307 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x30a + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x30c + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30f + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x319 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x31c + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32f + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x334 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0x67, offset 0x337 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x33b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x340 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x345 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x34b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x351 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x360 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x366 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x36a + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x70, offset 0x379 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37e + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x386 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x39b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a3 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b4 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3bd + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3cd + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3da + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e4 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e9 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fa + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3ff + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x401 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x405 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x407 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x40b + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x414 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x41a + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41e + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x438 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x440 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x446 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44d + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x452 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x456 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x45c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x461 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x46a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x475 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x47c + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x483 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x48a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x493 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x496 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x49b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a7 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4b2 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b9 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c6 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4ca + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4da + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4e1 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e9 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4f0 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f5 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f8 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4fc + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xaa, offset 0x500 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x50f + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xad, offset 0x51b + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xae, offset 0x522 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xaf, offset 0x52b + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb0, offset 0x533 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb1, offset 0x53a + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb2, offset 0x548 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x555 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb4, offset 0x562 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb5, offset 0x56b + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb6, offset 0x56f + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xb8, offset 0x585 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xb9, offset 0x590 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x599 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbb, offset 0x59f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5a7 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbd, offset 0x5b0 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xbe, offset 0x5ba + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xbf, offset 0x5bd + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc1, offset 0x5cc + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc2, offset 0x5d1 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc4, offset 0x5e7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc5, offset 0x5f3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc6, offset 0x5f6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x600 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc8, offset 0x609 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc9, offset 0x615 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xca, offset 0x622 + {value: 0x0000, lo: 0x07}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcb, offset 0x62a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xcc, offset 0x62d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xcd, offset 0x632 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xce, offset 0x635 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcf, offset 0x638 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd0, offset 0x63b + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd1, offset 0x642 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd2, offset 0x649 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd3, offset 0x64d + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd4, offset 0x658 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd5, offset 0x65b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd6, offset 0x661 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd7, offset 0x666 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xd8, offset 0x66a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd9, offset 0x66d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xda, offset 0x670 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xdb, offset 0x673 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xdc, offset 0x676 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xdd, offset 0x679 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xde, offset 0x67e + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xdf, offset 0x688 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe0, offset 0x68b + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe1, offset 0x68f + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x69e + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xe3, offset 0x6aa + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xe4, offset 0x6ae + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe5, offset 0x6b3 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe6, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe7, offset 0x6bc + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6c1 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6ca + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xea, offset 0x6d5 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xeb, offset 0x6db + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xec, offset 0x6e3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xed, offset 0x6e7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xee, offset 0x6eb + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xef, offset 0x6f1 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xf0, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xf1, offset 0x6fc + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xf2, offset 0x6ff + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xf3, offset 0x70f + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xf4, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf5, offset 0x719 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf6, offset 0x71c + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf7, offset 0x720 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf8, offset 0x726 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf9, offset 0x72b + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xfa, offset 0x730 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0xfb, offset 0x735 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xbf}, + // Block 0xfc, offset 0x738 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0xfd, offset 0x73d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xfe, offset 0x740 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xff, offset 0x743 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x100, offset 0x747 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x101, offset 0x74b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x102, offset 0x74e + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x103, offset 0x75e + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x104, offset 0x76f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x105, offset 0x774 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x106, offset 0x776 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x107, offset 0x778 + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42114 bytes (41KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go new file mode 100644 index 00000000..c515d7ad --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -0,0 +1,4653 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07</\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" + + "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" + + "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" + + "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c<?\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" + + "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" + + "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 29404 bytes (28.71 KiB). Checksum: 848c45acb5f7991c. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 125: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 125 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 127 blocks, 8128 entries, 16256 bytes +// The third block is the zero block. +var idnaValues = [8128]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, + 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, + 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, + 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, + 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, + 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, + 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, + 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, + 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, + 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, + 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, + // Block 0x16, offset 0x580 + 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, + 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, + 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, + 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, + 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, + 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, + 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, + 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, + 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, + 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, + 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, + 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, + 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, + 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, + 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, + // Block 0x18, offset 0x600 + 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, + 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, + 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, + 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, + 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, + 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, + 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, + 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, + // Block 0x19, offset 0x640 + 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, + 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, + 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, + 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, + 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, + 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, + 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, + 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, + // Block 0x1a, offset 0x680 + 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, + 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, + 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, + 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, + 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, + 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, + 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, + 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, + 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, + 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, + 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, + 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, + 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, + 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, + 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, + // Block 0x1c, offset 0x700 + 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, + 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, + 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, + 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, + 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, + 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, + 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, + 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, + 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, + 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, + 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, + 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, + 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, + 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, + // Block 0x1e, offset 0x780 + 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, + 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, + 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, + 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, + 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, + 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, + 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, + 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, + 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, + 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, + 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, + 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, + 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, + 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, + 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, + 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, + // Block 0x20, offset 0x800 + 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, + 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, + 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, + 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, + 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, + 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, + 0x836: 0x0040, 0x837: 0x0040, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, + 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, + // Block 0x21, offset 0x840 + 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, + 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, + 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, + 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, + 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, + 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, + 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, + 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, + 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, + 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, + 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, + 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, + 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, + 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, + 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, + 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, + 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, + 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, + 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, + 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, + // Block 0x24, offset 0x900 + 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, + 0x906: 0x0040, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0040, 0x90a: 0x0008, 0x90b: 0x0040, + 0x90c: 0x0040, 0x90d: 0x0008, 0x90e: 0x0040, 0x90f: 0x0040, 0x910: 0x0040, 0x911: 0x0040, + 0x912: 0x0040, 0x913: 0x0040, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, + 0x918: 0x0040, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0040, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0040, 0x929: 0x0040, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0040, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, + 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, + 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x0040, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, + // Block 0x25, offset 0x940 + 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, + 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, + 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, + 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, + 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, + 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, + 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, + 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, + 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, + // Block 0x26, offset 0x980 + 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, + 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, + 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, + 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, + 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, + 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, + 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, + 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, + 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, + 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, + 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, + 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, + 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, + 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, + 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, + 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, + 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, + 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, + 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, + 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, + // Block 0x28, offset 0xa00 + 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, + 0xa06: 0x059d, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, + 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, + 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05b5, 0xa15: 0x05b5, 0xa16: 0x0f99, 0xa17: 0x0fa9, + 0xa18: 0x0fb9, 0xa19: 0x059d, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05cd, 0xa1d: 0x1099, + 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, + 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, + 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, + 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, + 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, + 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, + // Block 0x29, offset 0xa40 + 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, + 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, + 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, + 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, + 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, + 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, + 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05e5, 0xa68: 0x1239, 0xa69: 0x1251, + 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, + 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, + 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x05fd, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, + 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, + 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, + 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, + 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, + 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, + 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x0615, 0xadb: 0x0635, 0xadc: 0x0008, 0xadd: 0x0008, + 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, + 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, + 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, + 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, + 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, + 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, + 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, + 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, + 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, + 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, + 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, + 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, + 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, + 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, + 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, + 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, + // Block 0x2d, offset 0xb40 + 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, + 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, + 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, + 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, + 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, + 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, + 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, + 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, + 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x064d, 0xb7b: 0x1459, + 0xb7c: 0x19b1, 0xb7d: 0x0666, 0xb7e: 0x1a31, 0xb7f: 0x0686, + // Block 0x2e, offset 0xb80 + 0xb80: 0x06a6, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, + 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06c5, 0xb89: 0x1471, 0xb8a: 0x06dd, 0xb8b: 0x1489, + 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, + 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, + 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x06f5, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, + 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, + 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, + 0xbaa: 0x070d, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, + 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, + 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x0725, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, + 0xbbc: 0x1ce9, 0xbbd: 0x073e, 0xbbe: 0x075e, 0xbbf: 0x0040, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, + 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, + 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x077e, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, + 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, + 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, + 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, + 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x079e, 0xbff: 0x0018, + // Block 0x30, offset 0xc00 + 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, + 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, + 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, + 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, + 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, + 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, + 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, + 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, + 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, + 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07bd, + 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, + // Block 0x31, offset 0xc40 + 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, + 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07d5, + 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, + 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, + 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, + 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, + 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, + 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, + 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, + 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, + 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, + // Block 0x32, offset 0xc80 + 0xc80: 0x07ee, 0xc81: 0x080e, 0xc82: 0x1159, 0xc83: 0x082d, 0xc84: 0x0018, 0xc85: 0x084e, + 0xc86: 0x086e, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x088d, 0xc8a: 0x0f31, 0xc8b: 0x0249, + 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, + 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, + 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, + 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08ad, 0xca2: 0x2061, 0xca3: 0x0018, + 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, + 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, + 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, + 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08cd, + 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x08ed, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, + 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, + 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, + 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, + 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, + 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x0905, 0xce3: 0x2439, + 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x0925, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, + 0xcea: 0x24a9, 0xceb: 0x0945, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, + 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x0965, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, + 0xcf6: 0x0985, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09a5, + 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, + // Block 0x34, offset 0xd00 + 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, + 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, + 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, + 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, + 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, + 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a06, 0xd35: 0x0a26, + 0xd36: 0x0a46, 0xd37: 0x0a66, 0xd38: 0x0a86, 0xd39: 0x0aa6, 0xd3a: 0x0ac6, 0xd3b: 0x0ae6, + 0xd3c: 0x0b06, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, + // Block 0x35, offset 0xd40 + 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, + 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, + 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, + 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, + 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b26, 0xd5d: 0x0b46, + 0xd5e: 0x0b66, 0xd5f: 0x0b86, 0xd60: 0x0ba6, 0xd61: 0x0bc6, 0xd62: 0x0be6, 0xd63: 0x0c06, + 0xd64: 0x0c26, 0xd65: 0x0c46, 0xd66: 0x0c66, 0xd67: 0x0c86, 0xd68: 0x0ca6, 0xd69: 0x0cc6, + 0xd6a: 0x0ce6, 0xd6b: 0x0d06, 0xd6c: 0x0d26, 0xd6d: 0x0d46, 0xd6e: 0x0d66, 0xd6f: 0x0d86, + 0xd70: 0x0da6, 0xd71: 0x0dc6, 0xd72: 0x0de6, 0xd73: 0x0e06, 0xd74: 0x0e26, 0xd75: 0x0e46, + 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, + 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, + // Block 0x36, offset 0xd80 + 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, + 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, + 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, + 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, + 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, + 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, + 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, + 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, + 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, + 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, + 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, + 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, + 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, + 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, + 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, + 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ebd, + 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, + 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, + 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, + 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, + 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, + // Block 0x38, offset 0xe00 + 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, + 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, + 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, + 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, + 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, + 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, + 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, + 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, + 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, + 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, + // Block 0x39, offset 0xe40 + 0xe40: 0x26fd, 0xe41: 0x271d, 0xe42: 0x273d, 0xe43: 0x275d, 0xe44: 0x277d, 0xe45: 0x279d, + 0xe46: 0x27bd, 0xe47: 0x27dd, 0xe48: 0x27fd, 0xe49: 0x281d, 0xe4a: 0x283d, 0xe4b: 0x285d, + 0xe4c: 0x287d, 0xe4d: 0x289d, 0xe4e: 0x28bd, 0xe4f: 0x28dd, 0xe50: 0x28fd, 0xe51: 0x291d, + 0xe52: 0x293d, 0xe53: 0x295d, 0xe54: 0x297d, 0xe55: 0x299d, 0xe56: 0x0040, 0xe57: 0x0040, + 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, + 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, + 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, + 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, + 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, + 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, + 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, + // Block 0x3a, offset 0xe80 + 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, + 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, + 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, + 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, + 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, + 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, + 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, + 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, + 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, + 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29bd, 0xeb9: 0x29dd, 0xeba: 0x29fd, 0xebb: 0x0018, + 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2b3d, 0xec1: 0x2b5d, 0xec2: 0x2b7d, 0xec3: 0x2b9d, 0xec4: 0x2bbd, 0xec5: 0x2bdd, + 0xec6: 0x2bdd, 0xec7: 0x2bdd, 0xec8: 0x2bfd, 0xec9: 0x2bfd, 0xeca: 0x2bfd, 0xecb: 0x2bfd, + 0xecc: 0x2c1d, 0xecd: 0x2c1d, 0xece: 0x2c1d, 0xecf: 0x2c3d, 0xed0: 0x2c5d, 0xed1: 0x2c5d, + 0xed2: 0x2a7d, 0xed3: 0x2a7d, 0xed4: 0x2c5d, 0xed5: 0x2c5d, 0xed6: 0x2c7d, 0xed7: 0x2c7d, + 0xed8: 0x2c5d, 0xed9: 0x2c5d, 0xeda: 0x2a7d, 0xedb: 0x2a7d, 0xedc: 0x2c5d, 0xedd: 0x2c5d, + 0xede: 0x2c3d, 0xedf: 0x2c3d, 0xee0: 0x2c9d, 0xee1: 0x2c9d, 0xee2: 0x2cbd, 0xee3: 0x2cbd, + 0xee4: 0x0040, 0xee5: 0x2cdd, 0xee6: 0x2cfd, 0xee7: 0x2d1d, 0xee8: 0x2d1d, 0xee9: 0x2d3d, + 0xeea: 0x2d5d, 0xeeb: 0x2d7d, 0xeec: 0x2d9d, 0xeed: 0x2dbd, 0xeee: 0x2ddd, 0xeef: 0x2dfd, + 0xef0: 0x2e1d, 0xef1: 0x2e3d, 0xef2: 0x2e3d, 0xef3: 0x2e5d, 0xef4: 0x2e7d, 0xef5: 0x2e7d, + 0xef6: 0x2e9d, 0xef7: 0x2ebd, 0xef8: 0x2e5d, 0xef9: 0x2edd, 0xefa: 0x2efd, 0xefb: 0x2edd, + 0xefc: 0x2e5d, 0xefd: 0x2f1d, 0xefe: 0x2f3d, 0xeff: 0x2f5d, + // Block 0x3c, offset 0xf00 + 0xf00: 0x2f7d, 0xf01: 0x2f9d, 0xf02: 0x2cfd, 0xf03: 0x2cdd, 0xf04: 0x2fbd, 0xf05: 0x2fdd, + 0xf06: 0x2ffd, 0xf07: 0x301d, 0xf08: 0x303d, 0xf09: 0x305d, 0xf0a: 0x307d, 0xf0b: 0x309d, + 0xf0c: 0x30bd, 0xf0d: 0x30dd, 0xf0e: 0x30fd, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, + 0xf12: 0x311d, 0xf13: 0x313d, 0xf14: 0x315d, 0xf15: 0x317d, 0xf16: 0x319d, 0xf17: 0x31bd, + 0xf18: 0x31dd, 0xf19: 0x31fd, 0xf1a: 0x321d, 0xf1b: 0x323d, 0xf1c: 0x315d, 0xf1d: 0x325d, + 0xf1e: 0x327d, 0xf1f: 0x329d, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, + 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, + 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, + 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, + 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, + 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32bd, 0xf45: 0x32dd, + 0xf46: 0x32fd, 0xf47: 0x331d, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, + 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x333d, 0xf51: 0x3761, + 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, + 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, + 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x335d, 0xf61: 0x337d, 0xf62: 0x339d, 0xf63: 0x33bd, + 0xf64: 0x33dd, 0xf65: 0x33dd, 0xf66: 0x33fd, 0xf67: 0x341d, 0xf68: 0x343d, 0xf69: 0x345d, + 0xf6a: 0x347d, 0xf6b: 0x349d, 0xf6c: 0x34bd, 0xf6d: 0x34dd, 0xf6e: 0x34fd, 0xf6f: 0x351d, + 0xf70: 0x353d, 0xf71: 0x355d, 0xf72: 0x357d, 0xf73: 0x359d, 0xf74: 0x35bd, 0xf75: 0x35dd, + 0xf76: 0x35fd, 0xf77: 0x361d, 0xf78: 0x363d, 0xf79: 0x365d, 0xf7a: 0x367d, 0xf7b: 0x369d, + 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36bd, 0xf7f: 0x0018, + // Block 0x3e, offset 0xf80 + 0xf80: 0x36dd, 0xf81: 0x36fd, 0xf82: 0x371d, 0xf83: 0x373d, 0xf84: 0x375d, 0xf85: 0x377d, + 0xf86: 0x379d, 0xf87: 0x37bd, 0xf88: 0x37dd, 0xf89: 0x37fd, 0xf8a: 0x381d, 0xf8b: 0x383d, + 0xf8c: 0x385d, 0xf8d: 0x387d, 0xf8e: 0x389d, 0xf8f: 0x38bd, 0xf90: 0x38dd, 0xf91: 0x38fd, + 0xf92: 0x391d, 0xf93: 0x393d, 0xf94: 0x395d, 0xf95: 0x397d, 0xf96: 0x399d, 0xf97: 0x39bd, + 0xf98: 0x39dd, 0xf99: 0x39fd, 0xf9a: 0x3a1d, 0xf9b: 0x3a3d, 0xf9c: 0x3a5d, 0xf9d: 0x3a7d, + 0xf9e: 0x3a9d, 0xf9f: 0x3abd, 0xfa0: 0x3add, 0xfa1: 0x3afd, 0xfa2: 0x3b1d, 0xfa3: 0x3b3d, + 0xfa4: 0x3b5d, 0xfa5: 0x3b7d, 0xfa6: 0x127d, 0xfa7: 0x3b9d, 0xfa8: 0x3bbd, 0xfa9: 0x3bdd, + 0xfaa: 0x3bfd, 0xfab: 0x3c1d, 0xfac: 0x3c3d, 0xfad: 0x3c5d, 0xfae: 0x239d, 0xfaf: 0x3c7d, + 0xfb0: 0x3c9d, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, + 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, + 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, + 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, + 0xfcc: 0x3c99, 0xfcd: 0x3cbd, 0xfce: 0x3cb1, 0xfcf: 0x3cdd, 0xfd0: 0x3cfd, 0xfd1: 0x3d15, + 0xfd2: 0x3d2d, 0xfd3: 0x3d45, 0xfd4: 0x3d5d, 0xfd5: 0x3d5d, 0xfd6: 0x3d45, 0xfd7: 0x3d75, + 0xfd8: 0x07bd, 0xfd9: 0x3d8d, 0xfda: 0x3da5, 0xfdb: 0x3dbd, 0xfdc: 0x3dd5, 0xfdd: 0x3ded, + 0xfde: 0x3e05, 0xfdf: 0x3e1d, 0xfe0: 0x3e35, 0xfe1: 0x3e4d, 0xfe2: 0x3e65, 0xfe3: 0x3e7d, + 0xfe4: 0x3e95, 0xfe5: 0x3e95, 0xfe6: 0x3ead, 0xfe7: 0x3ead, 0xfe8: 0x3ec5, 0xfe9: 0x3ec5, + 0xfea: 0x3edd, 0xfeb: 0x3ef5, 0xfec: 0x3f0d, 0xfed: 0x3f25, 0xfee: 0x3f3d, 0xfef: 0x3f3d, + 0xff0: 0x3f55, 0xff1: 0x3f55, 0xff2: 0x3f55, 0xff3: 0x3f6d, 0xff4: 0x3f85, 0xff5: 0x3f9d, + 0xff6: 0x3fb5, 0xff7: 0x3f9d, 0xff8: 0x3fcd, 0xff9: 0x3fe5, 0xffa: 0x3f6d, 0xffb: 0x3ffd, + 0xffc: 0x4015, 0xffd: 0x4015, 0xffe: 0x4015, 0xfff: 0x0040, + // Block 0x40, offset 0x1000 + 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, + 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, + 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, + 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, + 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, + 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, + 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, + 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, + 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, + 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, + 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, + // Block 0x41, offset 0x1040 + 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, + 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, + 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, + 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, + 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, + 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, + 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, + 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, + 0x1070: 0x6009, 0x1071: 0x402d, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x404d, 0x1075: 0x6069, + 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x406d, 0x1079: 0x406d, 0x107a: 0x60b1, 0x107b: 0x60c9, + 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, + // Block 0x42, offset 0x1080 + 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x408d, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, + 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40ad, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, + 0x108c: 0x40cd, 0x108d: 0x40cd, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x40ed, + 0x1092: 0x410d, 0x1093: 0x412d, 0x1094: 0x414d, 0x1095: 0x416d, 0x1096: 0x6359, 0x1097: 0x6371, + 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x418d, 0x109c: 0x63d1, 0x109d: 0x63e9, + 0x109e: 0x6401, 0x109f: 0x41ad, 0x10a0: 0x41cd, 0x10a1: 0x6419, 0x10a2: 0x41ed, 0x10a3: 0x420d, + 0x10a4: 0x422d, 0x10a5: 0x6431, 0x10a6: 0x424d, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, + 0x10aa: 0x426d, 0x10ab: 0x428d, 0x10ac: 0x42ad, 0x10ad: 0x42cd, 0x10ae: 0x64b1, 0x10af: 0x64f1, + 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x42ed, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, + 0x10b6: 0x430d, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, + 0x10bc: 0x432d, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x434d, 0x10c1: 0x436d, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, + 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, + 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, + 0x10d2: 0x438d, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43ad, 0x10d6: 0x43cd, 0x10d7: 0x67b1, + 0x10d8: 0x0040, 0x10d9: 0x43ed, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, + 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, + 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, + 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, + 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, + 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, + 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x440d, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, + 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, + 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, + 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, + 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, + 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, + 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, + 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, + 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, + 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, + 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, + 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, + 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, + 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, + 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, + 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, + 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, + 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, + 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, + 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, + 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, + 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, + 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, + 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, + 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, + 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, + 0x11fc: 0x0008, 0x11fd: 0x442d, 0x11fe: 0xe00d, 0x11ff: 0x0008, + // Block 0x48, offset 0x1200 + 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, + 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, + 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, + 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, + 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, + 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, + 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, + 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008, + 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x444d, 0x1234: 0xe00d, 0x1235: 0x0008, + 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0x0040, 0x1239: 0x0008, 0x123a: 0x0040, 0x123b: 0x0040, + 0x123c: 0x0040, 0x123d: 0x0040, 0x123e: 0x0040, 0x123f: 0x0040, + // Block 0x49, offset 0x1240 + 0x1240: 0x64d5, 0x1241: 0x64f5, 0x1242: 0x6515, 0x1243: 0x6535, 0x1244: 0x6555, 0x1245: 0x6575, + 0x1246: 0x6595, 0x1247: 0x65b5, 0x1248: 0x65d5, 0x1249: 0x65f5, 0x124a: 0x6615, 0x124b: 0x6635, + 0x124c: 0x6655, 0x124d: 0x6675, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x6695, 0x1251: 0x0008, + 0x1252: 0x66b5, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x66d5, 0x1256: 0x66f5, 0x1257: 0x6715, + 0x1258: 0x6735, 0x1259: 0x6755, 0x125a: 0x6775, 0x125b: 0x6795, 0x125c: 0x67b5, 0x125d: 0x67d5, + 0x125e: 0x67f5, 0x125f: 0x0008, 0x1260: 0x6815, 0x1261: 0x0008, 0x1262: 0x6835, 0x1263: 0x0008, + 0x1264: 0x0008, 0x1265: 0x6855, 0x1266: 0x6875, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, + 0x126a: 0x6895, 0x126b: 0x68b5, 0x126c: 0x68d5, 0x126d: 0x68f5, 0x126e: 0x6915, 0x126f: 0x6935, + 0x1270: 0x6955, 0x1271: 0x6975, 0x1272: 0x6995, 0x1273: 0x69b5, 0x1274: 0x69d5, 0x1275: 0x69f5, + 0x1276: 0x6a15, 0x1277: 0x6a35, 0x1278: 0x6a55, 0x1279: 0x6a75, 0x127a: 0x6a95, 0x127b: 0x6ab5, + 0x127c: 0x6ad5, 0x127d: 0x6af5, 0x127e: 0x6b15, 0x127f: 0x6b35, + // Block 0x4a, offset 0x1280 + 0x1280: 0x7a95, 0x1281: 0x7ab5, 0x1282: 0x7ad5, 0x1283: 0x7af5, 0x1284: 0x7b15, 0x1285: 0x7b35, + 0x1286: 0x7b55, 0x1287: 0x7b75, 0x1288: 0x7b95, 0x1289: 0x7bb5, 0x128a: 0x7bd5, 0x128b: 0x7bf5, + 0x128c: 0x7c15, 0x128d: 0x7c35, 0x128e: 0x7c55, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, + 0x1292: 0x7c75, 0x1293: 0x7c95, 0x1294: 0x7cb5, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, + 0x1298: 0x7cd5, 0x1299: 0x7cf5, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, + 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, + 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, + 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, + 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, + 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, + 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d15, 0x12c4: 0x7d35, 0x12c5: 0x7001, + 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, + 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, + 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, + 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, + 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, + 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, + 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, + 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, + 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, + 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, + // Block 0x4c, offset 0x1300 + 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, + 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, + 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, + 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, + 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, + 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, + 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, + 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, + 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, + 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, + 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, + // Block 0x4d, offset 0x1340 + 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, + 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, + 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, + 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, + 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, + 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, + 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, + 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, + 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, + 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, + 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, + 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, + 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, + 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, + 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, + 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, + 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, + 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, + 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, + 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, + 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, + 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, + 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, + 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, + 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, + 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, + 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, + 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, + 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, + 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, + 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, + // Block 0x50, offset 0x1400 + 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, + 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, + 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, + 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, + 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, + 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, + 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, + 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, + 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, + 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, + 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, + // Block 0x51, offset 0x1440 + 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, + 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, + 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, + 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, + 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, + 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, + 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, + 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, + 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, + 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, + 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, + // Block 0x52, offset 0x1480 + 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, + 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, + 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, + 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, + 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, + 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, + 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, + 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, + 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, + 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, + 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, + 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, + 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, + 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, + 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, + 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, + 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, + 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, + 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, + 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, + // Block 0x54, offset 0x1500 + 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, + 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, + 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, + 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, + 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, + 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, + 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, + 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, + 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, + 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, + // Block 0x55, offset 0x1540 + 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, + 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, + 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d55, + 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7d75, + 0x1558: 0x7d95, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, + 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, + 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, + 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, + 0x1570: 0x0040, 0x1571: 0x7db5, 0x1572: 0x7dd5, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, + 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7df5, 0x157a: 0x7e15, 0x157b: 0x7e35, + 0x157c: 0x7df5, 0x157d: 0x7e55, 0x157e: 0x7e75, 0x157f: 0x7e55, + // Block 0x56, offset 0x1580 + 0x1580: 0x7e95, 0x1581: 0x7eb5, 0x1582: 0x7ed5, 0x1583: 0x7eb5, 0x1584: 0x7ef5, 0x1585: 0x0018, + 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f16, 0x158a: 0x7f36, 0x158b: 0x7f56, + 0x158c: 0x7f76, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7f95, + 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, + 0x1598: 0x7fb5, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7e95, + 0x159e: 0x7ef5, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, + 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, + 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, + 0x15b0: 0x7fd6, 0x15b1: 0xb009, 0x15b2: 0x7ff6, 0x15b3: 0x0808, 0x15b4: 0x8016, 0x15b5: 0x0040, + 0x15b6: 0x8036, 0x15b7: 0xb031, 0x15b8: 0x8056, 0x15b9: 0xb059, 0x15ba: 0x8076, 0x15bb: 0xb081, + 0x15bc: 0x8096, 0x15bd: 0xb0a9, 0x15be: 0x80b6, 0x15bf: 0xb0d1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, + 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, + 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, + 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, + 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, + 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, + 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, + 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, + 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, + 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, + 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, + // Block 0x58, offset 0x1600 + 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, + 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, + 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, + 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, + 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, + 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, + 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, + 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, + 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, + 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, + 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, + // Block 0x59, offset 0x1640 + 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, + 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, + 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, + 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, + 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, + 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, + 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, + 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, + 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, + 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, + 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, + 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, + 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, + 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, + 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, + 0x169e: 0xb532, 0x169f: 0x80d5, 0x16a0: 0x80f5, 0x16a1: 0x29d1, 0x16a2: 0x8115, 0x16a3: 0x8115, + 0x16a4: 0x8135, 0x16a5: 0x8155, 0x16a6: 0x8175, 0x16a7: 0x8195, 0x16a8: 0x81b5, 0x16a9: 0x81d5, + 0x16aa: 0x81f5, 0x16ab: 0x8215, 0x16ac: 0x8235, 0x16ad: 0x8255, 0x16ae: 0x8275, 0x16af: 0x8295, + 0x16b0: 0x82b5, 0x16b1: 0x82d5, 0x16b2: 0x82f5, 0x16b3: 0x8315, 0x16b4: 0x8335, 0x16b5: 0x8355, + 0x16b6: 0x8375, 0x16b7: 0x8395, 0x16b8: 0x83b5, 0x16b9: 0x83d5, 0x16ba: 0x83f5, 0x16bb: 0x8415, + 0x16bc: 0x81b5, 0x16bd: 0x8435, 0x16be: 0x8455, 0x16bf: 0x8215, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x8475, 0x16c1: 0x8495, 0x16c2: 0x84b5, 0x16c3: 0x84d5, 0x16c4: 0x84f5, 0x16c5: 0x8515, + 0x16c6: 0x8535, 0x16c7: 0x8555, 0x16c8: 0x84d5, 0x16c9: 0x8575, 0x16ca: 0x84d5, 0x16cb: 0x8595, + 0x16cc: 0x8595, 0x16cd: 0x85b5, 0x16ce: 0x85b5, 0x16cf: 0x85d5, 0x16d0: 0x8515, 0x16d1: 0x85f5, + 0x16d2: 0x8615, 0x16d3: 0x85f5, 0x16d4: 0x8635, 0x16d5: 0x8615, 0x16d6: 0x8655, 0x16d7: 0x8655, + 0x16d8: 0x8675, 0x16d9: 0x8675, 0x16da: 0x8695, 0x16db: 0x8695, 0x16dc: 0x8615, 0x16dd: 0x8115, + 0x16de: 0x86b5, 0x16df: 0x86d5, 0x16e0: 0x0040, 0x16e1: 0x86f5, 0x16e2: 0x8715, 0x16e3: 0x8735, + 0x16e4: 0x8755, 0x16e5: 0x8735, 0x16e6: 0x8775, 0x16e7: 0x8795, 0x16e8: 0x87b5, 0x16e9: 0x87b5, + 0x16ea: 0x87d5, 0x16eb: 0x87d5, 0x16ec: 0x87f5, 0x16ed: 0x87f5, 0x16ee: 0x87d5, 0x16ef: 0x87d5, + 0x16f0: 0x8815, 0x16f1: 0x8835, 0x16f2: 0x8855, 0x16f3: 0x8875, 0x16f4: 0x8895, 0x16f5: 0x88b5, + 0x16f6: 0x88b5, 0x16f7: 0x88b5, 0x16f8: 0x88d5, 0x16f9: 0x88d5, 0x16fa: 0x88d5, 0x16fb: 0x88d5, + 0x16fc: 0x87b5, 0x16fd: 0x87b5, 0x16fe: 0x87b5, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x8715, 0x1703: 0x86f5, 0x1704: 0x88f5, 0x1705: 0x86f5, + 0x1706: 0x8715, 0x1707: 0x86f5, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x8915, 0x170b: 0x8715, + 0x170c: 0x8935, 0x170d: 0x88f5, 0x170e: 0x8935, 0x170f: 0x8715, 0x1710: 0x0040, 0x1711: 0x0040, + 0x1712: 0x8955, 0x1713: 0x8975, 0x1714: 0x8875, 0x1715: 0x8935, 0x1716: 0x88f5, 0x1717: 0x8935, + 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x8995, 0x171b: 0x89b5, 0x171c: 0x8995, 0x171d: 0x0040, + 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x89d6, + 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x89f5, 0x1727: 0x0040, 0x1728: 0x8a15, 0x1729: 0x8a35, + 0x172a: 0x8a55, 0x172b: 0x8a35, 0x172c: 0x8a75, 0x172d: 0x8a95, 0x172e: 0x8ab5, 0x172f: 0x0040, + 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, + 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, + 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, + 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, + 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, + 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, + 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, + 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, + 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, + 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, + 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, + 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, + 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, + 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, + 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, + 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, + 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, + 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, + 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, + 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, + 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, + 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, + 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, + 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, + 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, + 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, + 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, + 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, + // Block 0x60, offset 0x1800 + 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, + 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, + 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, + 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, + 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, + 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, + 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, + 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, + 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, + 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, + 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, + // Block 0x61, offset 0x1840 + 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, + 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, + 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, + 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, + 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, + 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, + 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, + 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, + 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, + 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, + 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, + // Block 0x62, offset 0x1880 + 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, + 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, + 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, + 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, + 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, + 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, + 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, + 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, + 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, + 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, + 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, + 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, + 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, + 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, + 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, + 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, + 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, + 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, + 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, + 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, + 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, + // Block 0x64, offset 0x1900 + 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, + 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, + 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, + 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, + 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, + 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, + 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, + 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, + 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, + 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, + 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, + // Block 0x65, offset 0x1940 + 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, + 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, + 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, + 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, + 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, + 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, + 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, + 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, + 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, + 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, + 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, + // Block 0x66, offset 0x1980 + 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, + 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, + 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, + 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, + 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, + 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, + 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, + 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, + 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, + 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, + 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, + 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, + 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, + 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, + 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, + 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, + 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, + 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, + 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, + 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, + 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, + 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, + 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, + 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, + 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, + 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, + 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, + 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, + 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, + 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, + 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, + 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, + 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, + 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, + 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, + 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, + 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, + 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, + 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, + 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, + 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, + 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, + 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, + 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, + 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, + 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, + 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, + 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, + 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, + 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, + 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, + 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, + 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, + 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, + 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, + 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, + 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, + 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, + 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, + 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, + 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, + 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, + 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, + 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, + 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, + 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, + 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, + 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, + 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, + 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, + 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, + 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, + 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, + 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, + 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, + 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, + 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, + 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, + 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, + 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, + 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, + 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, + 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, + 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, + 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, + 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, + 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, + 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, + 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, + 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, + 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, + 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, + 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, + 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, + 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, + 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, + 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, + 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, + 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, + 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, + 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, + 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, + 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, + 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, + 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, + 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, + 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, + 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, + 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, + 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, + 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, + 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, + 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, + 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, + 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, + 0x1c5e: 0x8b05, 0x1c5f: 0x8b05, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, + 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, + 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, + 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, + 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, + 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, + // Block 0x72, offset 0x1c80 + 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, + 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, + 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, + 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, + 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, + 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, + 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, + 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, + 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, + 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, + 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, + 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, + 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, + 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, + 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, + 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, + 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, + 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, + 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, + 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, + 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, + 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, + 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, + 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, + 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, + 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, + 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018, + 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, + 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, + 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, + // Block 0x76, offset 0x1d80 + 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, + 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, + 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, + 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, + 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, + 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, + 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, + 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0x0040, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, + 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, + 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, + 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xc1d9, 0x1dc1: 0xc211, 0x1dc2: 0xc249, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, + 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, + 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc269, 0x1dd1: 0xc289, + 0x1dd2: 0xc2a9, 0x1dd3: 0xc2c9, 0x1dd4: 0xc2e9, 0x1dd5: 0xc309, 0x1dd6: 0xc329, 0x1dd7: 0xc349, + 0x1dd8: 0xc369, 0x1dd9: 0xc389, 0x1dda: 0xc3a9, 0x1ddb: 0xc3c9, 0x1ddc: 0xc3e9, 0x1ddd: 0xc409, + 0x1dde: 0xc429, 0x1ddf: 0xc449, 0x1de0: 0xc469, 0x1de1: 0xc489, 0x1de2: 0xc4a9, 0x1de3: 0xc4c9, + 0x1de4: 0xc4e9, 0x1de5: 0xc509, 0x1de6: 0xc529, 0x1de7: 0xc549, 0x1de8: 0xc569, 0x1de9: 0xc589, + 0x1dea: 0xc5a9, 0x1deb: 0xc5c9, 0x1dec: 0xc5e9, 0x1ded: 0xc609, 0x1dee: 0xc629, 0x1def: 0xc649, + 0x1df0: 0xc669, 0x1df1: 0xc689, 0x1df2: 0xc6a9, 0x1df3: 0xc6c9, 0x1df4: 0xc6e9, 0x1df5: 0xc709, + 0x1df6: 0xc729, 0x1df7: 0xc749, 0x1df8: 0xc769, 0x1df9: 0xc789, 0x1dfa: 0xc7a9, 0x1dfb: 0xc7c9, + 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xcaf9, 0x1e01: 0xcb19, 0x1e02: 0xcb39, 0x1e03: 0x8b1d, 0x1e04: 0xcb59, 0x1e05: 0xcb79, + 0x1e06: 0xcb99, 0x1e07: 0xcbb9, 0x1e08: 0xcbd9, 0x1e09: 0xcbf9, 0x1e0a: 0xcc19, 0x1e0b: 0xcc39, + 0x1e0c: 0xcc59, 0x1e0d: 0x8b3d, 0x1e0e: 0xcc79, 0x1e0f: 0xcc99, 0x1e10: 0xccb9, 0x1e11: 0xccd9, + 0x1e12: 0x8b5d, 0x1e13: 0xccf9, 0x1e14: 0xcd19, 0x1e15: 0xc429, 0x1e16: 0x8b7d, 0x1e17: 0xcd39, + 0x1e18: 0xcd59, 0x1e19: 0xcd79, 0x1e1a: 0xcd99, 0x1e1b: 0xcdb9, 0x1e1c: 0x8b9d, 0x1e1d: 0xcdd9, + 0x1e1e: 0xcdf9, 0x1e1f: 0xce19, 0x1e20: 0xce39, 0x1e21: 0xce59, 0x1e22: 0xc789, 0x1e23: 0xce79, + 0x1e24: 0xce99, 0x1e25: 0xceb9, 0x1e26: 0xced9, 0x1e27: 0xcef9, 0x1e28: 0xcf19, 0x1e29: 0xcf39, + 0x1e2a: 0xcf59, 0x1e2b: 0xcf79, 0x1e2c: 0xcf99, 0x1e2d: 0xcfb9, 0x1e2e: 0xcfd9, 0x1e2f: 0xcff9, + 0x1e30: 0xd019, 0x1e31: 0xd039, 0x1e32: 0xd039, 0x1e33: 0xd039, 0x1e34: 0x8bbd, 0x1e35: 0xd059, + 0x1e36: 0xd079, 0x1e37: 0xd099, 0x1e38: 0x8bdd, 0x1e39: 0xd0b9, 0x1e3a: 0xd0d9, 0x1e3b: 0xd0f9, + 0x1e3c: 0xd119, 0x1e3d: 0xd139, 0x1e3e: 0xd159, 0x1e3f: 0xd179, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd199, 0x1e41: 0xd1b9, 0x1e42: 0xd1d9, 0x1e43: 0xd1f9, 0x1e44: 0xd219, 0x1e45: 0xd239, + 0x1e46: 0xd239, 0x1e47: 0xd259, 0x1e48: 0xd279, 0x1e49: 0xd299, 0x1e4a: 0xd2b9, 0x1e4b: 0xd2d9, + 0x1e4c: 0xd2f9, 0x1e4d: 0xd319, 0x1e4e: 0xd339, 0x1e4f: 0xd359, 0x1e50: 0xd379, 0x1e51: 0xd399, + 0x1e52: 0xd3b9, 0x1e53: 0xd3d9, 0x1e54: 0xd3f9, 0x1e55: 0xd419, 0x1e56: 0xd439, 0x1e57: 0xd459, + 0x1e58: 0xd479, 0x1e59: 0x8bfd, 0x1e5a: 0xd499, 0x1e5b: 0xd4b9, 0x1e5c: 0xd4d9, 0x1e5d: 0xc309, + 0x1e5e: 0xd4f9, 0x1e5f: 0xd519, 0x1e60: 0x8c1d, 0x1e61: 0x8c3d, 0x1e62: 0xd539, 0x1e63: 0xd559, + 0x1e64: 0xd579, 0x1e65: 0xd599, 0x1e66: 0xd5b9, 0x1e67: 0xd5d9, 0x1e68: 0x2040, 0x1e69: 0xd5f9, + 0x1e6a: 0xd619, 0x1e6b: 0xd619, 0x1e6c: 0x8c5d, 0x1e6d: 0xd639, 0x1e6e: 0xd659, 0x1e6f: 0xd679, + 0x1e70: 0xd699, 0x1e71: 0x8c7d, 0x1e72: 0xd6b9, 0x1e73: 0xd6d9, 0x1e74: 0x2040, 0x1e75: 0xd6f9, + 0x1e76: 0xd719, 0x1e77: 0xd739, 0x1e78: 0xd759, 0x1e79: 0xd779, 0x1e7a: 0xd799, 0x1e7b: 0x8c9d, + 0x1e7c: 0xd7b9, 0x1e7d: 0x8cbd, 0x1e7e: 0xd7d9, 0x1e7f: 0xd7f9, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xd819, 0x1e81: 0xd839, 0x1e82: 0xd859, 0x1e83: 0xd879, 0x1e84: 0xd899, 0x1e85: 0xd8b9, + 0x1e86: 0xd8d9, 0x1e87: 0xd8f9, 0x1e88: 0xd919, 0x1e89: 0x8cdd, 0x1e8a: 0xd939, 0x1e8b: 0xd959, + 0x1e8c: 0xd979, 0x1e8d: 0xd999, 0x1e8e: 0xd9b9, 0x1e8f: 0x8cfd, 0x1e90: 0xd9d9, 0x1e91: 0x8d1d, + 0x1e92: 0x8d3d, 0x1e93: 0xd9f9, 0x1e94: 0xda19, 0x1e95: 0xda19, 0x1e96: 0xda39, 0x1e97: 0x8d5d, + 0x1e98: 0x8d7d, 0x1e99: 0xda59, 0x1e9a: 0xda79, 0x1e9b: 0xda99, 0x1e9c: 0xdab9, 0x1e9d: 0xdad9, + 0x1e9e: 0xdaf9, 0x1e9f: 0xdb19, 0x1ea0: 0xdb39, 0x1ea1: 0xdb59, 0x1ea2: 0xdb79, 0x1ea3: 0xdb99, + 0x1ea4: 0x8d9d, 0x1ea5: 0xdbb9, 0x1ea6: 0xdbd9, 0x1ea7: 0xdbf9, 0x1ea8: 0xdc19, 0x1ea9: 0xdbf9, + 0x1eaa: 0xdc39, 0x1eab: 0xdc59, 0x1eac: 0xdc79, 0x1ead: 0xdc99, 0x1eae: 0xdcb9, 0x1eaf: 0xdcd9, + 0x1eb0: 0xdcf9, 0x1eb1: 0xdd19, 0x1eb2: 0xdd39, 0x1eb3: 0xdd59, 0x1eb4: 0xdd79, 0x1eb5: 0xdd99, + 0x1eb6: 0xddb9, 0x1eb7: 0xddd9, 0x1eb8: 0x8dbd, 0x1eb9: 0xddf9, 0x1eba: 0xde19, 0x1ebb: 0xde39, + 0x1ebc: 0xde59, 0x1ebd: 0xde79, 0x1ebe: 0x8ddd, 0x1ebf: 0xde99, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xe599, 0x1ec1: 0xe5b9, 0x1ec2: 0xe5d9, 0x1ec3: 0xe5f9, 0x1ec4: 0xe619, 0x1ec5: 0xe639, + 0x1ec6: 0x8efd, 0x1ec7: 0xe659, 0x1ec8: 0xe679, 0x1ec9: 0xe699, 0x1eca: 0xe6b9, 0x1ecb: 0xe6d9, + 0x1ecc: 0xe6f9, 0x1ecd: 0x8f1d, 0x1ece: 0xe719, 0x1ecf: 0xe739, 0x1ed0: 0x8f3d, 0x1ed1: 0x8f5d, + 0x1ed2: 0xe759, 0x1ed3: 0xe779, 0x1ed4: 0xe799, 0x1ed5: 0xe7b9, 0x1ed6: 0xe7d9, 0x1ed7: 0xe7f9, + 0x1ed8: 0xe819, 0x1ed9: 0xe839, 0x1eda: 0xe859, 0x1edb: 0x8f7d, 0x1edc: 0xe879, 0x1edd: 0x8f9d, + 0x1ede: 0xe899, 0x1edf: 0x2040, 0x1ee0: 0xe8b9, 0x1ee1: 0xe8d9, 0x1ee2: 0xe8f9, 0x1ee3: 0x8fbd, + 0x1ee4: 0xe919, 0x1ee5: 0xe939, 0x1ee6: 0x8fdd, 0x1ee7: 0x8ffd, 0x1ee8: 0xe959, 0x1ee9: 0xe979, + 0x1eea: 0xe999, 0x1eeb: 0xe9b9, 0x1eec: 0xe9d9, 0x1eed: 0xe9d9, 0x1eee: 0xe9f9, 0x1eef: 0xea19, + 0x1ef0: 0xea39, 0x1ef1: 0xea59, 0x1ef2: 0xea79, 0x1ef3: 0xea99, 0x1ef4: 0xeab9, 0x1ef5: 0x901d, + 0x1ef6: 0xead9, 0x1ef7: 0x903d, 0x1ef8: 0xeaf9, 0x1ef9: 0x905d, 0x1efa: 0xeb19, 0x1efb: 0x907d, + 0x1efc: 0x909d, 0x1efd: 0x90bd, 0x1efe: 0xeb39, 0x1eff: 0xeb59, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xeb79, 0x1f01: 0x90dd, 0x1f02: 0x90fd, 0x1f03: 0x911d, 0x1f04: 0x913d, 0x1f05: 0xeb99, + 0x1f06: 0xebb9, 0x1f07: 0xebb9, 0x1f08: 0xebd9, 0x1f09: 0xebf9, 0x1f0a: 0xec19, 0x1f0b: 0xec39, + 0x1f0c: 0xec59, 0x1f0d: 0x915d, 0x1f0e: 0xec79, 0x1f0f: 0xec99, 0x1f10: 0xecb9, 0x1f11: 0xecd9, + 0x1f12: 0x917d, 0x1f13: 0xecf9, 0x1f14: 0x919d, 0x1f15: 0x91bd, 0x1f16: 0xed19, 0x1f17: 0xed39, + 0x1f18: 0xed59, 0x1f19: 0xed79, 0x1f1a: 0xed99, 0x1f1b: 0xedb9, 0x1f1c: 0x91dd, 0x1f1d: 0x91fd, + 0x1f1e: 0x921d, 0x1f1f: 0x2040, 0x1f20: 0xedd9, 0x1f21: 0x923d, 0x1f22: 0xedf9, 0x1f23: 0xee19, + 0x1f24: 0xee39, 0x1f25: 0x925d, 0x1f26: 0xee59, 0x1f27: 0xee79, 0x1f28: 0xee99, 0x1f29: 0xeeb9, + 0x1f2a: 0xeed9, 0x1f2b: 0x927d, 0x1f2c: 0xeef9, 0x1f2d: 0xef19, 0x1f2e: 0xef39, 0x1f2f: 0xef59, + 0x1f30: 0xef79, 0x1f31: 0xef99, 0x1f32: 0x929d, 0x1f33: 0x92bd, 0x1f34: 0xefb9, 0x1f35: 0x92dd, + 0x1f36: 0xefd9, 0x1f37: 0x92fd, 0x1f38: 0xeff9, 0x1f39: 0xf019, 0x1f3a: 0xf039, 0x1f3b: 0x931d, + 0x1f3c: 0x933d, 0x1f3d: 0xf059, 0x1f3e: 0x935d, 0x1f3f: 0xf079, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xf6b9, 0x1f41: 0xf6d9, 0x1f42: 0xf6f9, 0x1f43: 0xf719, 0x1f44: 0xf739, 0x1f45: 0x951d, + 0x1f46: 0xf759, 0x1f47: 0xf779, 0x1f48: 0xf799, 0x1f49: 0xf7b9, 0x1f4a: 0xf7d9, 0x1f4b: 0x953d, + 0x1f4c: 0x955d, 0x1f4d: 0xf7f9, 0x1f4e: 0xf819, 0x1f4f: 0xf839, 0x1f50: 0xf859, 0x1f51: 0xf879, + 0x1f52: 0xf899, 0x1f53: 0x957d, 0x1f54: 0xf8b9, 0x1f55: 0xf8d9, 0x1f56: 0xf8f9, 0x1f57: 0xf919, + 0x1f58: 0x959d, 0x1f59: 0x95bd, 0x1f5a: 0xf939, 0x1f5b: 0xf959, 0x1f5c: 0xf979, 0x1f5d: 0x95dd, + 0x1f5e: 0xf999, 0x1f5f: 0xf9b9, 0x1f60: 0x6815, 0x1f61: 0x95fd, 0x1f62: 0xf9d9, 0x1f63: 0xf9f9, + 0x1f64: 0xfa19, 0x1f65: 0x961d, 0x1f66: 0xfa39, 0x1f67: 0xfa59, 0x1f68: 0xfa79, 0x1f69: 0xfa99, + 0x1f6a: 0xfab9, 0x1f6b: 0xfad9, 0x1f6c: 0xfaf9, 0x1f6d: 0x963d, 0x1f6e: 0xfb19, 0x1f6f: 0xfb39, + 0x1f70: 0xfb59, 0x1f71: 0x965d, 0x1f72: 0xfb79, 0x1f73: 0xfb99, 0x1f74: 0xfbb9, 0x1f75: 0xfbd9, + 0x1f76: 0x7b35, 0x1f77: 0x967d, 0x1f78: 0xfbf9, 0x1f79: 0xfc19, 0x1f7a: 0xfc39, 0x1f7b: 0x969d, + 0x1f7c: 0xfc59, 0x1f7d: 0x96bd, 0x1f7e: 0xfc79, 0x1f7f: 0xfc79, + // Block 0x7e, offset 0x1f80 + 0x1f80: 0xfc99, 0x1f81: 0x96dd, 0x1f82: 0xfcb9, 0x1f83: 0xfcd9, 0x1f84: 0xfcf9, 0x1f85: 0xfd19, + 0x1f86: 0xfd39, 0x1f87: 0xfd59, 0x1f88: 0xfd79, 0x1f89: 0x96fd, 0x1f8a: 0xfd99, 0x1f8b: 0xfdb9, + 0x1f8c: 0xfdd9, 0x1f8d: 0xfdf9, 0x1f8e: 0xfe19, 0x1f8f: 0xfe39, 0x1f90: 0x971d, 0x1f91: 0xfe59, + 0x1f92: 0x973d, 0x1f93: 0x975d, 0x1f94: 0x977d, 0x1f95: 0xfe79, 0x1f96: 0xfe99, 0x1f97: 0xfeb9, + 0x1f98: 0xfed9, 0x1f99: 0xfef9, 0x1f9a: 0xff19, 0x1f9b: 0xff39, 0x1f9c: 0xff59, 0x1f9d: 0x979d, + 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, + 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, + 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, + 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, + 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, + 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, +} + +// idnaIndex: 36 blocks, 2304 entries, 4608 bytes +// Block 0 is the zero block. +var idnaIndex = [2304]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, + // Block 0x4, offset 0x100 + 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, + 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, + 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, + // Block 0x6, offset 0x180 + 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, + 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0xd0, + 0x1b0: 0xd1, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd2, 0x1b5: 0xd3, 0x1b6: 0xd4, 0x1b7: 0xd5, + 0x1b8: 0xd6, 0x1b9: 0xd7, 0x1ba: 0xd8, 0x1bb: 0xd9, 0x1bc: 0xda, 0x1bd: 0xdb, 0x1be: 0xdc, 0x1bf: 0x37, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x38, 0x1c1: 0xdd, 0x1c2: 0xde, 0x1c3: 0xdf, 0x1c4: 0xe0, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe1, + 0x1c8: 0xe2, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe3, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe4, 0x2d3: 0xe5, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe6, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe7, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe8, + 0x2e0: 0xe9, 0x2e1: 0xea, 0x2e2: 0xeb, 0x2e3: 0xec, 0x2e4: 0xed, 0x2e5: 0xee, 0x2e6: 0xef, 0x2e7: 0xf0, + 0x2e8: 0xf1, 0x2e9: 0xf2, 0x2ea: 0xf3, 0x2eb: 0xf4, 0x2ec: 0xf5, 0x2ed: 0xf6, 0x2ee: 0xf7, 0x2ef: 0xf8, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf9, 0x31f: 0xfa, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfb, 0x3a5: 0xfc, 0x3a6: 0xfd, 0x3a7: 0xfe, + 0x3a8: 0x47, 0x3a9: 0xff, 0x3aa: 0x100, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, + 0x3b0: 0x101, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x102, 0x3b7: 0x52, + 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x103, 0x3c1: 0x104, 0x3c2: 0x9f, 0x3c3: 0x105, 0x3c4: 0x106, 0x3c5: 0x9b, 0x3c6: 0x107, 0x3c7: 0x108, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x109, 0x3cb: 0x10a, 0x3cc: 0x10b, 0x3cd: 0x10c, 0x3ce: 0x10d, 0x3cf: 0x10e, + 0x3d0: 0x10f, 0x3d1: 0x9f, 0x3d2: 0x110, 0x3d3: 0x111, 0x3d4: 0x112, 0x3d5: 0x113, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x114, 0x3dd: 0x115, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x116, 0x3e1: 0x117, 0x3e2: 0x118, 0x3e3: 0x119, 0x3e4: 0x11a, 0x3e5: 0xba, 0x3e6: 0x11b, 0x3e7: 0x11c, + 0x3e8: 0x11d, 0x3e9: 0x11e, 0x3ea: 0x11f, 0x3eb: 0x5b, 0x3ec: 0x120, 0x3ed: 0x121, 0x3ee: 0x5c, 0x3ef: 0xba, + 0x3f0: 0x122, 0x3f1: 0x123, 0x3f2: 0x124, 0x3f3: 0x125, 0x3f4: 0x126, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x128, 0x3fd: 0x129, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131, + 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a, + 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0x143, 0x429: 0x144, 0x42a: 0x145, 0x42b: 0x146, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x147, 0x431: 0x148, 0x432: 0x149, 0x433: 0xba, 0x434: 0x14a, 0x435: 0x14b, 0x436: 0x14c, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14d, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x14e, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x14f, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x150, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x151, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x152, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x146, 0x529: 0x153, 0x52a: 0xba, 0x52b: 0x154, 0x52c: 0x155, 0x52d: 0x156, 0x52e: 0x157, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0x158, 0x53a: 0x159, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15a, 0x53e: 0x15b, 0x53f: 0x15c, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x15d, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x15e, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x15f, 0x585: 0x160, 0x586: 0x9f, 0x587: 0x9f, + 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x161, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x162, 0x5b2: 0x163, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x164, 0x5c4: 0x165, 0x5c5: 0x166, 0x5c6: 0x167, 0x5c7: 0x168, + 0x5c8: 0x9b, 0x5c9: 0x169, 0x5ca: 0xba, 0x5cb: 0x16a, 0x5cc: 0x9b, 0x5cd: 0x16b, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, + 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x16c, 0x5e9: 0x16d, 0x5ea: 0x16e, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x16f, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x122, 0x621: 0x122, 0x622: 0x122, 0x623: 0x170, 0x624: 0x6f, 0x625: 0x171, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0x172, 0x632: 0x173, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x174, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x175, 0x641: 0x9b, 0x642: 0x176, 0x643: 0x177, 0x644: 0x73, 0x645: 0x74, 0x646: 0x178, 0x647: 0x179, + 0x648: 0x75, 0x649: 0x17a, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x17b, 0x65c: 0x9b, 0x65d: 0x17c, 0x65e: 0x9b, 0x65f: 0x17d, + 0x660: 0x17e, 0x661: 0x17f, 0x662: 0x180, 0x663: 0xba, 0x664: 0x181, 0x665: 0x182, 0x666: 0x183, 0x667: 0x184, + 0x668: 0xba, 0x669: 0x185, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x186, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x187, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x188, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x189, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, + // Block 0x1d, offset 0x740 + 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, + 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, + 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, + 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, + 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, + 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x18a, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, + 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, + 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, + 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, + 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x18b, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x18c, 0x7a7: 0x7b, + 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, + 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, + 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, + // Block 0x1f, offset 0x7c0 + 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, + 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, + 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, + 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, + 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, + 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, + 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, + 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, + 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, + 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, + 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, + // Block 0x21, offset 0x840 + 0x840: 0x18d, 0x841: 0x18e, 0x842: 0xba, 0x843: 0xba, 0x844: 0x18f, 0x845: 0x18f, 0x846: 0x18f, 0x847: 0x190, + 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, + 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, + 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, + 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, + 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, + 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, + 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, + 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, + 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, + 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, + 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, + 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, + 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, + 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, +} + +// idnaSparseOffset: 276 entries, 552 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x269, 0x27a, 0x27e, 0x289, 0x28d, 0x296, 0x29e, 0x2a4, 0x2a9, 0x2ac, 0x2b0, 0x2b6, 0x2ba, 0x2be, 0x2c2, 0x2c7, 0x2cd, 0x2d5, 0x2dc, 0x2e7, 0x2f1, 0x2f5, 0x2f8, 0x2fe, 0x302, 0x304, 0x307, 0x309, 0x30c, 0x316, 0x319, 0x328, 0x32c, 0x331, 0x334, 0x338, 0x33d, 0x342, 0x348, 0x34e, 0x35d, 0x363, 0x367, 0x376, 0x37b, 0x383, 0x38d, 0x398, 0x3a0, 0x3b1, 0x3ba, 0x3ca, 0x3d7, 0x3e1, 0x3e6, 0x3f3, 0x3f7, 0x3fc, 0x3fe, 0x402, 0x404, 0x408, 0x411, 0x417, 0x41b, 0x42b, 0x435, 0x43a, 0x43d, 0x443, 0x44a, 0x44f, 0x453, 0x459, 0x45e, 0x467, 0x46c, 0x472, 0x479, 0x480, 0x487, 0x48b, 0x490, 0x493, 0x498, 0x4a4, 0x4aa, 0x4af, 0x4b6, 0x4be, 0x4c3, 0x4c7, 0x4d7, 0x4de, 0x4e2, 0x4e6, 0x4ed, 0x4ef, 0x4f2, 0x4f5, 0x4f9, 0x502, 0x506, 0x50e, 0x516, 0x51c, 0x525, 0x531, 0x538, 0x541, 0x54b, 0x552, 0x560, 0x56d, 0x57a, 0x583, 0x587, 0x596, 0x59e, 0x5a9, 0x5b2, 0x5b8, 0x5c0, 0x5c9, 0x5d3, 0x5d6, 0x5e2, 0x5eb, 0x5ee, 0x5f3, 0x5fe, 0x607, 0x613, 0x616, 0x620, 0x629, 0x635, 0x642, 0x64f, 0x65d, 0x664, 0x667, 0x66c, 0x66f, 0x672, 0x675, 0x67c, 0x683, 0x687, 0x692, 0x695, 0x698, 0x69b, 0x6a1, 0x6a6, 0x6aa, 0x6ad, 0x6b0, 0x6b3, 0x6b6, 0x6b9, 0x6be, 0x6c8, 0x6cb, 0x6cf, 0x6de, 0x6ea, 0x6ee, 0x6f3, 0x6f7, 0x6fc, 0x700, 0x705, 0x70e, 0x719, 0x71f, 0x727, 0x72a, 0x72d, 0x731, 0x735, 0x73b, 0x741, 0x746, 0x749, 0x759, 0x760, 0x763, 0x766, 0x76a, 0x770, 0x775, 0x77a, 0x782, 0x787, 0x78b, 0x78f, 0x792, 0x795, 0x799, 0x79d, 0x7a0, 0x7b0, 0x7c1, 0x7c6, 0x7c8, 0x7ca} + +// idnaSparseValues: 1997 entries, 7988 bytes +var idnaSparseValues = [1997]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x06}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x6, offset 0x33 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3e + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xae}, + {value: 0x0808, lo: 0xaf, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4a + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4e + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5d + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x62 + {value: 0x0000, lo: 0x09}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbf}, + // Block 0xc, offset 0x6c + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x78 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xe, offset 0x86 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0xf, offset 0x8b + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x10, offset 0x94 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x11, offset 0xa4 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x12, offset 0xb2 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x3b08, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbe + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xca + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x15, offset 0xdb + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x16, offset 0xe5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x17, offset 0xec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x18, offset 0xf9 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x19, offset 0x10a + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1a, offset 0x111 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0x11c + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1c, offset 0x12b + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1e, offset 0x143 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x1f, offset 0x145 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x20, offset 0x14a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x21, offset 0x14d + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x22, offset 0x150 + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x23, offset 0x152 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x24, offset 0x15e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x25, offset 0x169 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x26, offset 0x171 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x177 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x28, offset 0x17d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x29, offset 0x182 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2a, offset 0x187 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2b, offset 0x18a + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2c, offset 0x18e + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2d, offset 0x194 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2e, offset 0x199 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x2f, offset 0x1a5 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x30, offset 0x1af + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x31, offset 0x1b5 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x32, offset 0x1c6 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x33, offset 0x1d0 + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x34, offset 0x1d3 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x35, offset 0x1db + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x36, offset 0x1de + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x37, offset 0x1eb + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x38, offset 0x1f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x39, offset 0x1f7 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3a, offset 0x1fe + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3b, offset 0x206 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x216 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x222 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3e, offset 0x224 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x3f, offset 0x22e + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23a + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x41, offset 0x246 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x42, offset 0x252 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x43, offset 0x25a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x44, offset 0x25f + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x45, offset 0x269 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x46, offset 0x27a + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x47, offset 0x27e + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x48, offset 0x289 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x49, offset 0x28d + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4a, offset 0x296 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4b, offset 0x29e + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4c, offset 0x2a4 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4d, offset 0x2a9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x4e, offset 0x2ac + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x4f, offset 0x2b0 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x50, offset 0x2b6 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x51, offset 0x2ba + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x52, offset 0x2be + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xbf}, + // Block 0x53, offset 0x2c2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x54, offset 0x2c7 + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x55, offset 0x2cd + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2d5 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x57, offset 0x2dc + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x58, offset 0x2e7 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x59, offset 0x2f1 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5a, offset 0x2f5 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0xbf}, + // Block 0x5b, offset 0x2f8 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5c, offset 0x2fe + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5d, offset 0x302 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x5e, offset 0x304 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x5f, offset 0x307 + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x309 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x61, offset 0x30c + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x62, offset 0x316 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x63, offset 0x319 + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x64, offset 0x328 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x65, offset 0x32c + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x66, offset 0x331 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x334 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x68, offset 0x338 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x69, offset 0x33d + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6a, offset 0x342 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6b, offset 0x348 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6c, offset 0x34e + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6d, offset 0x35d + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6e, offset 0x363 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x6f, offset 0x367 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x70, offset 0x376 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x71, offset 0x37b + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x72, offset 0x383 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x73, offset 0x38d + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x74, offset 0x398 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x75, offset 0x3a0 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x76, offset 0x3b1 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x77, offset 0x3ba + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x78, offset 0x3ca + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x79, offset 0x3d7 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7a, offset 0x3e1 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3e6 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7c, offset 0x3f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x7e, offset 0x3fc + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x7f, offset 0x3fe + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x80, offset 0x402 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x404 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x82, offset 0x408 + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x83, offset 0x411 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x84, offset 0x417 + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x85, offset 0x41b + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x86, offset 0x42b + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x87, offset 0x435 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x88, offset 0x43a + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x89, offset 0x43d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8a, offset 0x443 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8b, offset 0x44a + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8c, offset 0x44f + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8d, offset 0x453 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x8e, offset 0x459 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xbf}, + // Block 0x8f, offset 0x45e + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x90, offset 0x467 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x91, offset 0x46c + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x92, offset 0x472 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x93, offset 0x479 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x94, offset 0x480 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x95, offset 0x487 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x96, offset 0x48b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x97, offset 0x490 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x98, offset 0x493 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x99, offset 0x498 + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9a, offset 0x4a4 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9b, offset 0x4aa + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9c, offset 0x4af + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9d, offset 0x4b6 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0x9e, offset 0x4be + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0x9f, offset 0x4c3 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa0, offset 0x4c7 + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa1, offset 0x4d7 + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa2, offset 0x4de + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa3, offset 0x4e2 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa4, offset 0x4e6 + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa5, offset 0x4ed + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa6, offset 0x4ef + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa7, offset 0x4f2 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xa8, offset 0x4f5 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xa9, offset 0x4f9 + {value: 0x0000, lo: 0x08}, + {value: 0x0908, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0xa1}, + {value: 0x0c08, lo: 0xa2, hi: 0xa2}, + {value: 0x0a08, lo: 0xa3, hi: 0xa3}, + {value: 0x3308, lo: 0xa4, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xaa, offset 0x502 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xab, offset 0x506 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0xa6}, + {value: 0x0808, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0a08, lo: 0xb0, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb3}, + {value: 0x0a08, lo: 0xb4, hi: 0xbf}, + // Block 0xac, offset 0x50e + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x84}, + {value: 0x0808, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x90}, + {value: 0x0a18, lo: 0x91, hi: 0x93}, + {value: 0x0c18, lo: 0x94, hi: 0x94}, + {value: 0x0818, lo: 0x95, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xad, offset 0x516 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xae, offset 0x51c + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xaf, offset 0x525 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xb0, offset 0x531 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb1, offset 0x538 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb2, offset 0x541 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb3, offset 0x54b + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb4, offset 0x552 + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb5, offset 0x560 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb6, offset 0x56d + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb7, offset 0x57a + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb8, offset 0x583 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb9, offset 0x587 + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xba, offset 0x596 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xbb, offset 0x59e + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbc, offset 0x5a9 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbd, offset 0x5b2 + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbe, offset 0x5b8 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbf, offset 0x5c0 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xc0, offset 0x5c9 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xc1, offset 0x5d3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc2, offset 0x5d6 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc3, offset 0x5e2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xc4, offset 0x5eb + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc5, offset 0x5ee + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5f3 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0xc7, offset 0x5fe + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x3b08, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0xbf}, + // Block 0xc8, offset 0x607 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x98}, + {value: 0x3b08, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xbf}, + // Block 0xc9, offset 0x613 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xca, offset 0x616 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xcb, offset 0x620 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xcc, offset 0x629 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xcd, offset 0x635 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xce, offset 0x642 + {value: 0x0000, lo: 0x0c}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xbf}, + // Block 0xcf, offset 0x64f + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x3008, lo: 0x93, hi: 0x94}, + {value: 0x3308, lo: 0x95, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x96}, + {value: 0x3b08, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xbf}, + // Block 0xd0, offset 0x65d + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xd1, offset 0x664 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xd2, offset 0x667 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xd3, offset 0x66c + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xd4, offset 0x66f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xd5, offset 0x672 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xd6, offset 0x675 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xd7, offset 0x67c + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xd8, offset 0x683 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd9, offset 0x687 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xda, offset 0x692 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xdb, offset 0x695 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xdc, offset 0x698 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0xdd, offset 0x69b + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x6a1 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xdf, offset 0x6a6 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xe0, offset 0x6aa + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x6ad + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xe2, offset 0x6b0 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe3, offset 0x6b3 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x6b6 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0xe5, offset 0x6b9 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xe6, offset 0x6be + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xe7, offset 0x6c8 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xe8, offset 0x6cb + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xe9, offset 0x6cf + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xea, offset 0x6de + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xeb, offset 0x6ea + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xec, offset 0x6ee + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xed, offset 0x6f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xee, offset 0x6f7 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xef, offset 0x6fc + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xf0, offset 0x700 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xf1, offset 0x705 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xf2, offset 0x70e + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xf3, offset 0x719 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf4, offset 0x71f + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xf5, offset 0x727 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xb0}, + {value: 0x0818, lo: 0xb1, hi: 0xbf}, + // Block 0xf6, offset 0x72a + {value: 0x0000, lo: 0x02}, + {value: 0x0818, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xf7, offset 0x72d + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xf8, offset 0x731 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xf9, offset 0x735 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xfa, offset 0x73b + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xfb, offset 0x741 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xfc, offset 0x746 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xfd, offset 0x749 + {value: 0x0000, lo: 0x0f}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xbf}, + // Block 0xfe, offset 0x759 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xff, offset 0x760 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x100, offset 0x763 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0xbf}, + // Block 0x101, offset 0x766 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x102, offset 0x76a + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x103, offset 0x770 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x104, offset 0x775 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x105, offset 0x77a + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x106, offset 0x782 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa2}, + {value: 0x0040, lo: 0xa3, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x107, offset 0x787 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x108, offset 0x78b + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0x109, offset 0x78f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0x10a, offset 0x792 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x10b, offset 0x795 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x10c, offset 0x799 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x10d, offset 0x79d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x10e, offset 0x7a0 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0x10f, offset 0x7b0 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0x110, offset 0x7c1 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0x111, offset 0x7c6 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x112, offset 0x7c8 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x113, offset 0x7ca + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 42466 bytes (41KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/tables9.0.0.go b/vendor/golang.org/x/net/idna/tables9.0.0.go new file mode 100644 index 00000000..8b65fa16 --- /dev/null +++ b/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -0,0 +1,4486 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build !go1.10 + +package idna + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "9.0.0" + +var mappings string = "" + // Size: 8175 bytes + "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + + "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + + "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + + "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + + "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + + "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + + "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + + "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + + "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + + "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + + "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + + "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + + "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + + "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + + "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + + "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + + "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + + "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + + "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + + "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + + "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + + "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + + "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + + "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + + "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + + "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + + ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + + "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + + "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + + "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + + "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + + "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + + "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + + "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + + "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + + "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + + "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + + "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + + "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + + "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + + "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + + "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + + "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + + "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + + "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + + "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + + "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + + "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + + "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + + "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + + "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + + "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + + "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + + "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + + "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + + "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + + "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + + "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + + "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + + "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + + "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + + "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + + "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + + "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + + "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + + "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + + "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + + "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + + "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + + "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + + "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + + "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + + "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + + " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + + "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + + "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + + "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + + "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + + "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + + "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + + "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + + "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + + "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + + "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + + "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + + "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + + "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + + "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + + "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + + "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + + "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + + "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + + "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + + "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + + "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + + "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + + "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + + "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + + "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + + "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + + "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + + "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + + "c\x02mc\x02md\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多\x03解" + + "\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販\x03声" + + "\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打\x03禁" + + "\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕\x09〔安" + + "〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你\x03" + + "侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內\x03" + + "冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉\x03" + + "勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟\x03" + + "叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙\x03" + + "喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型\x03" + + "堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮\x03" + + "嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍\x03" + + "嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰\x03" + + "庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹\x03" + + "悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞\x03" + + "懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢\x03" + + "揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙\x03" + + "暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓\x03" + + "㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛\x03" + + "㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派\x03" + + "海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆\x03" + + "瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀\x03" + + "犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾\x03" + + "異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌\x03" + + "磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒\x03" + + "䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺\x03" + + "者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋\x03" + + "芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著\x03" + + "荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜\x03" + + "虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠\x03" + + "衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁\x03" + + "贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘\x03" + + "鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲\x03" + + "頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭\x03" + + "鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" + +var xorData string = "" + // Size: 4855 bytes + "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + + "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + + "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + + "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + + "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + + "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + + "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + + "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + + "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + + "\x03\x037 \x03\x0b+\x03\x02\x01\x04\x02\x01\x02\x02\x019\x02\x03\x1c\x02" + + "\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03\xc1r\x02" + + "\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<\x03\xc1s*" + + "\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03\x83\xab" + + "\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96\xe1\xcd" + + "\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03\x9a\xec" + + "\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c!\x03" + + "\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03ʦ\x93" + + "\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7\x03" + + "\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca\xfa" + + "\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e\x03" + + "\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca\xe3" + + "\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99\x03" + + "\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca\xe8" + + "\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03\x0b" + + "\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06\x05" + + "\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03\x0786" + + "\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/\x03" + + "\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f\x03" + + "\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-\x03" + + "\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03\x07" + + "\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03\x07" + + "\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03\x07" + + "\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b\x0a" + + "\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03\x07" + + "\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+\x03" + + "\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03\x04" + + "4\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03\x04+ " + + "\x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!\x22" + + "\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04\x03" + + "\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>\x03" + + "\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03\x054" + + "\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03\x05)" + + ":\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$\x1e" + + "\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226\x03" + + "\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05\x1b" + + "\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05\x03" + + "\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03\x06" + + "\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08\x03" + + "\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03\x0a6" + + "\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a\x1f" + + "\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03\x0a" + + "\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f\x02" + + "\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/\x03" + + "\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a\x00" + + "\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+\x10" + + "\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#<" + + "\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!\x00" + + "\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18.\x03" + + "\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15\x22" + + "\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b\x12" + + "\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05<" + + "\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + + "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + + "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + + "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + + "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + + "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + + "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + + "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + + "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + + "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + + "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + + "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + + "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + + "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + + "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + + "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + + "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + + "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + + "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + + "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + + "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + + "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + + "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + + "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + + "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + + "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + + "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + + "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + + "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + + "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + + "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + + "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + + ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + + "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + + "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + + "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + + "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + + "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + + "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + + "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + + "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + + "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + + "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + + "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + + "(\x04\x023 \x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!\x10\x03\x0b!0" + + "\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b\x03\x09\x1f" + + "\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14\x03\x0a\x01" + + "\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03\x08='\x03" + + "\x08\x1a\x0a\x03\x07</\x03\x07:+\x03\x07\x07*\x03\x06&\x1c\x03\x09\x0c" + + "\x16\x03\x09\x10\x0e\x03\x08'\x0f\x03\x08+\x09\x03\x074%\x03\x06!3\x03" + + "\x06\x03+\x03\x0b\x1e\x19\x03\x0a))\x03\x09\x08\x19\x03\x08,\x05\x03\x07" + + "<2\x03\x06\x1c>\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07\x01\x00" + + "\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03\x09\x11" + + "\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03\x0a/1" + + "\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03\x07<3" + + "\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06\x13\x00" + + "\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(;\x03" + + "\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08\x14$" + + "\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03\x0a" + + "\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19\x01" + + "\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18\x03" + + "\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03\x07" + + "\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03\x0a" + + "\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03\x0b" + + "\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03\x08" + + "\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05\x03" + + "\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11\x03" + + "\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03\x09" + + "\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a." + + "\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + + "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + + "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + + "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + + "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + + "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + + "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + + "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + + "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + + "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + + "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + + "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + + "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + + "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + + "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + + "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + + "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + + "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + + "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + + "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + + "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + + "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + + "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + + "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + + "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + + "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + + "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + + "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + + "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + + "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + + "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + + "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + + "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + + "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + + "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + + "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + + "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + + "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + + "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + + "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + + "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + + "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + + "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + + "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + + "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + + "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + + "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + + "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + + "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + + "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + + "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + + "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + + "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + + "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + + "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + + "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + + "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + + "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + + "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + + "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + + "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + + "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + + "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + + "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + + "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + + "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + + "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + + "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + + "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + + "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + + "\x04\x03\x0c?\x05\x03\x0c<?\x03\x0c=\x00\x03\x0c=\x06\x03\x0c=\x05\x03" + + "\x0c=\x0c\x03\x0c=\x0f\x03\x0c=\x0d\x03\x0c=\x0b\x03\x0c=\x07\x03\x0c=" + + "\x19\x03\x0c=\x15\x03\x0c=\x11\x03\x0c=1\x03\x0c=3\x03\x0c=0\x03\x0c=>" + + "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + + "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + + "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + + "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + + "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + + "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + + "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + + "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + + "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + + "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + + "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + + "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + + "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + + "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + + "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + + "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + + "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + + "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + + "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + + "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + + "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + + "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + + "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + + "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + + "\x05\x22\x05\x03\x050\x1d" + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return idnaValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := idnaIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = idnaIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = idnaIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return idnaValues[c0] + } + i := idnaIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = idnaIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// idnaTrie. Total size: 28600 bytes (27.93 KiB). Checksum: 95575047b5d8fff. +type idnaTrie struct{} + +func newIdnaTrie(i int) *idnaTrie { + return &idnaTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 124: + return uint16(idnaValues[n<<6+uint32(b)]) + default: + n -= 124 + return uint16(idnaSparse.lookup(n, b)) + } +} + +// idnaValues: 126 blocks, 8064 entries, 16128 bytes +// The third block is the zero block. +var idnaValues = [8064]uint16{ + // Block 0x0, offset 0x0 + 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, + 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, + 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, + 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, + 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, + 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, + 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, + 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, + 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, + 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, + 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, + // Block 0x1, offset 0x40 + 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, + 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, + 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, + 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, + 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, + 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, + 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, + 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, + 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, + 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, + 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, + 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, + 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, + 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, + 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, + 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, + 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, + 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, + 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, + 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, + 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, + // Block 0x4, offset 0x100 + 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, + 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, + 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, + 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, + 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, + 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, + 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, + 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, + 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, + 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, + 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, + // Block 0x5, offset 0x140 + 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, + 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, + 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, + 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, + 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, + 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, + 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, + 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, + 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, + 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, + 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, + // Block 0x6, offset 0x180 + 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, + 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, + 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, + 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, + 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, + 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, + 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, + 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, + 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, + 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, + 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, + 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, + 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, + 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, + 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, + 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, + 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, + 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, + 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, + 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, + 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, + // Block 0x8, offset 0x200 + 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, + 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, + 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, + 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, + 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, + 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, + 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, + 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, + 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, + 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, + 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, + // Block 0x9, offset 0x240 + 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, + 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, + 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, + 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, + 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, + 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, + 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, + 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, + 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, + 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, + 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, + // Block 0xa, offset 0x280 + 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, + 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, + 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, + 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, + 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, + 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, + 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, + 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, + 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, + 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, + 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, + 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, + 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, + 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, + 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, + 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, + 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, + 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, + 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, + 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, + 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, + // Block 0xc, offset 0x300 + 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, + 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, + 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, + 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, + 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, + 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, + 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, + 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, + 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, + 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, + 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, + // Block 0xd, offset 0x340 + 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, + 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, + 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, + 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, + 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, + 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, + 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, + 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, + 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, + 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, + 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, + // Block 0xe, offset 0x380 + 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, + 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, + 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, + 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, + 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, + 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, + 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, + 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, + 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, + 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, + 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, + 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, + 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, + 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, + 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, + 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, + 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, + 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, + 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, + 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, + 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, + // Block 0x10, offset 0x400 + 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, + 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, + 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, + 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, + 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, + 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, + 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, + 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, + 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, + 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, + 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, + // Block 0x11, offset 0x440 + 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, + 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, + 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, + 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, + 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, + 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, + 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, + 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, + 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, + 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, + 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, + // Block 0x12, offset 0x480 + 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, + 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, + 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, + 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, + 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, + 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, + 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, + 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, + 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, + 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, + 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, + 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, + 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, + 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, + 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, + 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, + 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, + 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, + 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, + 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, + 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, + // Block 0x14, offset 0x500 + 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, + 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, + 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, + 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, + 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, + 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, + 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, + 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, + 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, + 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, + 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, + // Block 0x15, offset 0x540 + 0x540: 0x3008, 0x541: 0x3308, 0x542: 0x3308, 0x543: 0x3308, 0x544: 0x3308, 0x545: 0x3308, + 0x546: 0x3308, 0x547: 0x3308, 0x548: 0x3308, 0x549: 0x3008, 0x54a: 0x3008, 0x54b: 0x3008, + 0x54c: 0x3008, 0x54d: 0x3b08, 0x54e: 0x3008, 0x54f: 0x3008, 0x550: 0x0008, 0x551: 0x3308, + 0x552: 0x3308, 0x553: 0x3308, 0x554: 0x3308, 0x555: 0x3308, 0x556: 0x3308, 0x557: 0x3308, + 0x558: 0x04c9, 0x559: 0x0501, 0x55a: 0x0539, 0x55b: 0x0571, 0x55c: 0x05a9, 0x55d: 0x05e1, + 0x55e: 0x0619, 0x55f: 0x0651, 0x560: 0x0008, 0x561: 0x0008, 0x562: 0x3308, 0x563: 0x3308, + 0x564: 0x0018, 0x565: 0x0018, 0x566: 0x0008, 0x567: 0x0008, 0x568: 0x0008, 0x569: 0x0008, + 0x56a: 0x0008, 0x56b: 0x0008, 0x56c: 0x0008, 0x56d: 0x0008, 0x56e: 0x0008, 0x56f: 0x0008, + 0x570: 0x0018, 0x571: 0x0008, 0x572: 0x0008, 0x573: 0x0008, 0x574: 0x0008, 0x575: 0x0008, + 0x576: 0x0008, 0x577: 0x0008, 0x578: 0x0008, 0x579: 0x0008, 0x57a: 0x0008, 0x57b: 0x0008, + 0x57c: 0x0008, 0x57d: 0x0008, 0x57e: 0x0008, 0x57f: 0x0008, + // Block 0x16, offset 0x580 + 0x580: 0x0008, 0x581: 0x3308, 0x582: 0x3008, 0x583: 0x3008, 0x584: 0x0040, 0x585: 0x0008, + 0x586: 0x0008, 0x587: 0x0008, 0x588: 0x0008, 0x589: 0x0008, 0x58a: 0x0008, 0x58b: 0x0008, + 0x58c: 0x0008, 0x58d: 0x0040, 0x58e: 0x0040, 0x58f: 0x0008, 0x590: 0x0008, 0x591: 0x0040, + 0x592: 0x0040, 0x593: 0x0008, 0x594: 0x0008, 0x595: 0x0008, 0x596: 0x0008, 0x597: 0x0008, + 0x598: 0x0008, 0x599: 0x0008, 0x59a: 0x0008, 0x59b: 0x0008, 0x59c: 0x0008, 0x59d: 0x0008, + 0x59e: 0x0008, 0x59f: 0x0008, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x0008, 0x5a3: 0x0008, + 0x5a4: 0x0008, 0x5a5: 0x0008, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0040, + 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, + 0x5b0: 0x0008, 0x5b1: 0x0040, 0x5b2: 0x0008, 0x5b3: 0x0040, 0x5b4: 0x0040, 0x5b5: 0x0040, + 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0040, 0x5bb: 0x0040, + 0x5bc: 0x3308, 0x5bd: 0x0008, 0x5be: 0x3008, 0x5bf: 0x3008, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3008, 0x5c1: 0x3308, 0x5c2: 0x3308, 0x5c3: 0x3308, 0x5c4: 0x3308, 0x5c5: 0x0040, + 0x5c6: 0x0040, 0x5c7: 0x3008, 0x5c8: 0x3008, 0x5c9: 0x0040, 0x5ca: 0x0040, 0x5cb: 0x3008, + 0x5cc: 0x3008, 0x5cd: 0x3b08, 0x5ce: 0x0008, 0x5cf: 0x0040, 0x5d0: 0x0040, 0x5d1: 0x0040, + 0x5d2: 0x0040, 0x5d3: 0x0040, 0x5d4: 0x0040, 0x5d5: 0x0040, 0x5d6: 0x0040, 0x5d7: 0x3008, + 0x5d8: 0x0040, 0x5d9: 0x0040, 0x5da: 0x0040, 0x5db: 0x0040, 0x5dc: 0x0689, 0x5dd: 0x06c1, + 0x5de: 0x0040, 0x5df: 0x06f9, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x3308, 0x5e3: 0x3308, + 0x5e4: 0x0040, 0x5e5: 0x0040, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0008, + 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, + 0x5f0: 0x0008, 0x5f1: 0x0008, 0x5f2: 0x0018, 0x5f3: 0x0018, 0x5f4: 0x0018, 0x5f5: 0x0018, + 0x5f6: 0x0018, 0x5f7: 0x0018, 0x5f8: 0x0018, 0x5f9: 0x0018, 0x5fa: 0x0018, 0x5fb: 0x0018, + 0x5fc: 0x0040, 0x5fd: 0x0040, 0x5fe: 0x0040, 0x5ff: 0x0040, + // Block 0x18, offset 0x600 + 0x600: 0x0040, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3008, 0x604: 0x0040, 0x605: 0x0008, + 0x606: 0x0008, 0x607: 0x0008, 0x608: 0x0008, 0x609: 0x0008, 0x60a: 0x0008, 0x60b: 0x0040, + 0x60c: 0x0040, 0x60d: 0x0040, 0x60e: 0x0040, 0x60f: 0x0008, 0x610: 0x0008, 0x611: 0x0040, + 0x612: 0x0040, 0x613: 0x0008, 0x614: 0x0008, 0x615: 0x0008, 0x616: 0x0008, 0x617: 0x0008, + 0x618: 0x0008, 0x619: 0x0008, 0x61a: 0x0008, 0x61b: 0x0008, 0x61c: 0x0008, 0x61d: 0x0008, + 0x61e: 0x0008, 0x61f: 0x0008, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x0008, 0x623: 0x0008, + 0x624: 0x0008, 0x625: 0x0008, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0040, + 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, + 0x630: 0x0008, 0x631: 0x0040, 0x632: 0x0008, 0x633: 0x0731, 0x634: 0x0040, 0x635: 0x0008, + 0x636: 0x0769, 0x637: 0x0040, 0x638: 0x0008, 0x639: 0x0008, 0x63a: 0x0040, 0x63b: 0x0040, + 0x63c: 0x3308, 0x63d: 0x0040, 0x63e: 0x3008, 0x63f: 0x3008, + // Block 0x19, offset 0x640 + 0x640: 0x3008, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x0040, 0x644: 0x0040, 0x645: 0x0040, + 0x646: 0x0040, 0x647: 0x3308, 0x648: 0x3308, 0x649: 0x0040, 0x64a: 0x0040, 0x64b: 0x3308, + 0x64c: 0x3308, 0x64d: 0x3b08, 0x64e: 0x0040, 0x64f: 0x0040, 0x650: 0x0040, 0x651: 0x3308, + 0x652: 0x0040, 0x653: 0x0040, 0x654: 0x0040, 0x655: 0x0040, 0x656: 0x0040, 0x657: 0x0040, + 0x658: 0x0040, 0x659: 0x07a1, 0x65a: 0x07d9, 0x65b: 0x0811, 0x65c: 0x0008, 0x65d: 0x0040, + 0x65e: 0x0849, 0x65f: 0x0040, 0x660: 0x0040, 0x661: 0x0040, 0x662: 0x0040, 0x663: 0x0040, + 0x664: 0x0040, 0x665: 0x0040, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0008, + 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, + 0x670: 0x3308, 0x671: 0x3308, 0x672: 0x0008, 0x673: 0x0008, 0x674: 0x0008, 0x675: 0x3308, + 0x676: 0x0040, 0x677: 0x0040, 0x678: 0x0040, 0x679: 0x0040, 0x67a: 0x0040, 0x67b: 0x0040, + 0x67c: 0x0040, 0x67d: 0x0040, 0x67e: 0x0040, 0x67f: 0x0040, + // Block 0x1a, offset 0x680 + 0x680: 0x0040, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x3008, 0x684: 0x0040, 0x685: 0x0008, + 0x686: 0x0008, 0x687: 0x0008, 0x688: 0x0008, 0x689: 0x0008, 0x68a: 0x0008, 0x68b: 0x0008, + 0x68c: 0x0008, 0x68d: 0x0008, 0x68e: 0x0040, 0x68f: 0x0008, 0x690: 0x0008, 0x691: 0x0008, + 0x692: 0x0040, 0x693: 0x0008, 0x694: 0x0008, 0x695: 0x0008, 0x696: 0x0008, 0x697: 0x0008, + 0x698: 0x0008, 0x699: 0x0008, 0x69a: 0x0008, 0x69b: 0x0008, 0x69c: 0x0008, 0x69d: 0x0008, + 0x69e: 0x0008, 0x69f: 0x0008, 0x6a0: 0x0008, 0x6a1: 0x0008, 0x6a2: 0x0008, 0x6a3: 0x0008, + 0x6a4: 0x0008, 0x6a5: 0x0008, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0040, + 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, + 0x6b0: 0x0008, 0x6b1: 0x0040, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0040, 0x6b5: 0x0008, + 0x6b6: 0x0008, 0x6b7: 0x0008, 0x6b8: 0x0008, 0x6b9: 0x0008, 0x6ba: 0x0040, 0x6bb: 0x0040, + 0x6bc: 0x3308, 0x6bd: 0x0008, 0x6be: 0x3008, 0x6bf: 0x3008, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3008, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3308, 0x6c4: 0x3308, 0x6c5: 0x3308, + 0x6c6: 0x0040, 0x6c7: 0x3308, 0x6c8: 0x3308, 0x6c9: 0x3008, 0x6ca: 0x0040, 0x6cb: 0x3008, + 0x6cc: 0x3008, 0x6cd: 0x3b08, 0x6ce: 0x0040, 0x6cf: 0x0040, 0x6d0: 0x0008, 0x6d1: 0x0040, + 0x6d2: 0x0040, 0x6d3: 0x0040, 0x6d4: 0x0040, 0x6d5: 0x0040, 0x6d6: 0x0040, 0x6d7: 0x0040, + 0x6d8: 0x0040, 0x6d9: 0x0040, 0x6da: 0x0040, 0x6db: 0x0040, 0x6dc: 0x0040, 0x6dd: 0x0040, + 0x6de: 0x0040, 0x6df: 0x0040, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x3308, 0x6e3: 0x3308, + 0x6e4: 0x0040, 0x6e5: 0x0040, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0008, + 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, + 0x6f0: 0x0018, 0x6f1: 0x0018, 0x6f2: 0x0040, 0x6f3: 0x0040, 0x6f4: 0x0040, 0x6f5: 0x0040, + 0x6f6: 0x0040, 0x6f7: 0x0040, 0x6f8: 0x0040, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, + 0x6fc: 0x0040, 0x6fd: 0x0040, 0x6fe: 0x0040, 0x6ff: 0x0040, + // Block 0x1c, offset 0x700 + 0x700: 0x0040, 0x701: 0x3308, 0x702: 0x3008, 0x703: 0x3008, 0x704: 0x0040, 0x705: 0x0008, + 0x706: 0x0008, 0x707: 0x0008, 0x708: 0x0008, 0x709: 0x0008, 0x70a: 0x0008, 0x70b: 0x0008, + 0x70c: 0x0008, 0x70d: 0x0040, 0x70e: 0x0040, 0x70f: 0x0008, 0x710: 0x0008, 0x711: 0x0040, + 0x712: 0x0040, 0x713: 0x0008, 0x714: 0x0008, 0x715: 0x0008, 0x716: 0x0008, 0x717: 0x0008, + 0x718: 0x0008, 0x719: 0x0008, 0x71a: 0x0008, 0x71b: 0x0008, 0x71c: 0x0008, 0x71d: 0x0008, + 0x71e: 0x0008, 0x71f: 0x0008, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x0008, 0x723: 0x0008, + 0x724: 0x0008, 0x725: 0x0008, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0040, + 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, + 0x730: 0x0008, 0x731: 0x0040, 0x732: 0x0008, 0x733: 0x0008, 0x734: 0x0040, 0x735: 0x0008, + 0x736: 0x0008, 0x737: 0x0008, 0x738: 0x0008, 0x739: 0x0008, 0x73a: 0x0040, 0x73b: 0x0040, + 0x73c: 0x3308, 0x73d: 0x0008, 0x73e: 0x3008, 0x73f: 0x3308, + // Block 0x1d, offset 0x740 + 0x740: 0x3008, 0x741: 0x3308, 0x742: 0x3308, 0x743: 0x3308, 0x744: 0x3308, 0x745: 0x0040, + 0x746: 0x0040, 0x747: 0x3008, 0x748: 0x3008, 0x749: 0x0040, 0x74a: 0x0040, 0x74b: 0x3008, + 0x74c: 0x3008, 0x74d: 0x3b08, 0x74e: 0x0040, 0x74f: 0x0040, 0x750: 0x0040, 0x751: 0x0040, + 0x752: 0x0040, 0x753: 0x0040, 0x754: 0x0040, 0x755: 0x0040, 0x756: 0x3308, 0x757: 0x3008, + 0x758: 0x0040, 0x759: 0x0040, 0x75a: 0x0040, 0x75b: 0x0040, 0x75c: 0x0881, 0x75d: 0x08b9, + 0x75e: 0x0040, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x3308, 0x763: 0x3308, + 0x764: 0x0040, 0x765: 0x0040, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0008, + 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, + 0x770: 0x0018, 0x771: 0x0008, 0x772: 0x0018, 0x773: 0x0018, 0x774: 0x0018, 0x775: 0x0018, + 0x776: 0x0018, 0x777: 0x0018, 0x778: 0x0040, 0x779: 0x0040, 0x77a: 0x0040, 0x77b: 0x0040, + 0x77c: 0x0040, 0x77d: 0x0040, 0x77e: 0x0040, 0x77f: 0x0040, + // Block 0x1e, offset 0x780 + 0x780: 0x0040, 0x781: 0x0040, 0x782: 0x3308, 0x783: 0x0008, 0x784: 0x0040, 0x785: 0x0008, + 0x786: 0x0008, 0x787: 0x0008, 0x788: 0x0008, 0x789: 0x0008, 0x78a: 0x0008, 0x78b: 0x0040, + 0x78c: 0x0040, 0x78d: 0x0040, 0x78e: 0x0008, 0x78f: 0x0008, 0x790: 0x0008, 0x791: 0x0040, + 0x792: 0x0008, 0x793: 0x0008, 0x794: 0x0008, 0x795: 0x0008, 0x796: 0x0040, 0x797: 0x0040, + 0x798: 0x0040, 0x799: 0x0008, 0x79a: 0x0008, 0x79b: 0x0040, 0x79c: 0x0008, 0x79d: 0x0040, + 0x79e: 0x0008, 0x79f: 0x0008, 0x7a0: 0x0040, 0x7a1: 0x0040, 0x7a2: 0x0040, 0x7a3: 0x0008, + 0x7a4: 0x0008, 0x7a5: 0x0040, 0x7a6: 0x0040, 0x7a7: 0x0040, 0x7a8: 0x0008, 0x7a9: 0x0008, + 0x7aa: 0x0008, 0x7ab: 0x0040, 0x7ac: 0x0040, 0x7ad: 0x0040, 0x7ae: 0x0008, 0x7af: 0x0008, + 0x7b0: 0x0008, 0x7b1: 0x0008, 0x7b2: 0x0008, 0x7b3: 0x0008, 0x7b4: 0x0008, 0x7b5: 0x0008, + 0x7b6: 0x0008, 0x7b7: 0x0008, 0x7b8: 0x0008, 0x7b9: 0x0008, 0x7ba: 0x0040, 0x7bb: 0x0040, + 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x3008, 0x7bf: 0x3008, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x3308, 0x7c1: 0x3008, 0x7c2: 0x3008, 0x7c3: 0x3008, 0x7c4: 0x3008, 0x7c5: 0x0040, + 0x7c6: 0x3308, 0x7c7: 0x3308, 0x7c8: 0x3308, 0x7c9: 0x0040, 0x7ca: 0x3308, 0x7cb: 0x3308, + 0x7cc: 0x3308, 0x7cd: 0x3b08, 0x7ce: 0x0040, 0x7cf: 0x0040, 0x7d0: 0x0040, 0x7d1: 0x0040, + 0x7d2: 0x0040, 0x7d3: 0x0040, 0x7d4: 0x0040, 0x7d5: 0x3308, 0x7d6: 0x3308, 0x7d7: 0x0040, + 0x7d8: 0x0008, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0040, 0x7dd: 0x0040, + 0x7de: 0x0040, 0x7df: 0x0040, 0x7e0: 0x0008, 0x7e1: 0x0008, 0x7e2: 0x3308, 0x7e3: 0x3308, + 0x7e4: 0x0040, 0x7e5: 0x0040, 0x7e6: 0x0008, 0x7e7: 0x0008, 0x7e8: 0x0008, 0x7e9: 0x0008, + 0x7ea: 0x0008, 0x7eb: 0x0008, 0x7ec: 0x0008, 0x7ed: 0x0008, 0x7ee: 0x0008, 0x7ef: 0x0008, + 0x7f0: 0x0040, 0x7f1: 0x0040, 0x7f2: 0x0040, 0x7f3: 0x0040, 0x7f4: 0x0040, 0x7f5: 0x0040, + 0x7f6: 0x0040, 0x7f7: 0x0040, 0x7f8: 0x0018, 0x7f9: 0x0018, 0x7fa: 0x0018, 0x7fb: 0x0018, + 0x7fc: 0x0018, 0x7fd: 0x0018, 0x7fe: 0x0018, 0x7ff: 0x0018, + // Block 0x20, offset 0x800 + 0x800: 0x0008, 0x801: 0x3308, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x0040, 0x805: 0x0008, + 0x806: 0x0008, 0x807: 0x0008, 0x808: 0x0008, 0x809: 0x0008, 0x80a: 0x0008, 0x80b: 0x0008, + 0x80c: 0x0008, 0x80d: 0x0040, 0x80e: 0x0008, 0x80f: 0x0008, 0x810: 0x0008, 0x811: 0x0040, + 0x812: 0x0008, 0x813: 0x0008, 0x814: 0x0008, 0x815: 0x0008, 0x816: 0x0008, 0x817: 0x0008, + 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0008, 0x81c: 0x0008, 0x81d: 0x0008, + 0x81e: 0x0008, 0x81f: 0x0008, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x0008, 0x823: 0x0008, + 0x824: 0x0008, 0x825: 0x0008, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0040, + 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, + 0x830: 0x0008, 0x831: 0x0008, 0x832: 0x0008, 0x833: 0x0008, 0x834: 0x0040, 0x835: 0x0008, + 0x836: 0x0008, 0x837: 0x0008, 0x838: 0x0008, 0x839: 0x0008, 0x83a: 0x0040, 0x83b: 0x0040, + 0x83c: 0x3308, 0x83d: 0x0008, 0x83e: 0x3008, 0x83f: 0x3308, + // Block 0x21, offset 0x840 + 0x840: 0x3008, 0x841: 0x3008, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x3008, 0x845: 0x0040, + 0x846: 0x3308, 0x847: 0x3008, 0x848: 0x3008, 0x849: 0x0040, 0x84a: 0x3008, 0x84b: 0x3008, + 0x84c: 0x3308, 0x84d: 0x3b08, 0x84e: 0x0040, 0x84f: 0x0040, 0x850: 0x0040, 0x851: 0x0040, + 0x852: 0x0040, 0x853: 0x0040, 0x854: 0x0040, 0x855: 0x3008, 0x856: 0x3008, 0x857: 0x0040, + 0x858: 0x0040, 0x859: 0x0040, 0x85a: 0x0040, 0x85b: 0x0040, 0x85c: 0x0040, 0x85d: 0x0040, + 0x85e: 0x0008, 0x85f: 0x0040, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x3308, 0x863: 0x3308, + 0x864: 0x0040, 0x865: 0x0040, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0008, + 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, + 0x870: 0x0040, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0040, 0x874: 0x0040, 0x875: 0x0040, + 0x876: 0x0040, 0x877: 0x0040, 0x878: 0x0040, 0x879: 0x0040, 0x87a: 0x0040, 0x87b: 0x0040, + 0x87c: 0x0040, 0x87d: 0x0040, 0x87e: 0x0040, 0x87f: 0x0040, + // Block 0x22, offset 0x880 + 0x880: 0x3008, 0x881: 0x3308, 0x882: 0x3308, 0x883: 0x3308, 0x884: 0x3308, 0x885: 0x0040, + 0x886: 0x3008, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, + 0x88c: 0x3008, 0x88d: 0x3b08, 0x88e: 0x0008, 0x88f: 0x0018, 0x890: 0x0040, 0x891: 0x0040, + 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0008, 0x895: 0x0008, 0x896: 0x0008, 0x897: 0x3008, + 0x898: 0x0018, 0x899: 0x0018, 0x89a: 0x0018, 0x89b: 0x0018, 0x89c: 0x0018, 0x89d: 0x0018, + 0x89e: 0x0018, 0x89f: 0x0008, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, + 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, + 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, + 0x8b0: 0x0018, 0x8b1: 0x0018, 0x8b2: 0x0018, 0x8b3: 0x0018, 0x8b4: 0x0018, 0x8b5: 0x0018, + 0x8b6: 0x0018, 0x8b7: 0x0018, 0x8b8: 0x0018, 0x8b9: 0x0018, 0x8ba: 0x0008, 0x8bb: 0x0008, + 0x8bc: 0x0008, 0x8bd: 0x0008, 0x8be: 0x0008, 0x8bf: 0x0008, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x0040, 0x8c1: 0x0008, 0x8c2: 0x0008, 0x8c3: 0x0040, 0x8c4: 0x0008, 0x8c5: 0x0040, + 0x8c6: 0x0040, 0x8c7: 0x0008, 0x8c8: 0x0008, 0x8c9: 0x0040, 0x8ca: 0x0008, 0x8cb: 0x0040, + 0x8cc: 0x0040, 0x8cd: 0x0008, 0x8ce: 0x0040, 0x8cf: 0x0040, 0x8d0: 0x0040, 0x8d1: 0x0040, + 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x0008, + 0x8d8: 0x0040, 0x8d9: 0x0008, 0x8da: 0x0008, 0x8db: 0x0008, 0x8dc: 0x0008, 0x8dd: 0x0008, + 0x8de: 0x0008, 0x8df: 0x0008, 0x8e0: 0x0040, 0x8e1: 0x0008, 0x8e2: 0x0008, 0x8e3: 0x0008, + 0x8e4: 0x0040, 0x8e5: 0x0008, 0x8e6: 0x0040, 0x8e7: 0x0008, 0x8e8: 0x0040, 0x8e9: 0x0040, + 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0040, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, + 0x8f0: 0x0008, 0x8f1: 0x3308, 0x8f2: 0x0008, 0x8f3: 0x0929, 0x8f4: 0x3308, 0x8f5: 0x3308, + 0x8f6: 0x3308, 0x8f7: 0x3308, 0x8f8: 0x3308, 0x8f9: 0x3308, 0x8fa: 0x0040, 0x8fb: 0x3308, + 0x8fc: 0x3308, 0x8fd: 0x0008, 0x8fe: 0x0040, 0x8ff: 0x0040, + // Block 0x24, offset 0x900 + 0x900: 0x0008, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x09d1, 0x904: 0x0008, 0x905: 0x0008, + 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0040, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0008, + 0x90c: 0x0008, 0x90d: 0x0a09, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, + 0x912: 0x0a41, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0a79, + 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0ab1, 0x91d: 0x0008, + 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, + 0x924: 0x0008, 0x925: 0x0008, 0x926: 0x0008, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0ae9, + 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0040, 0x92e: 0x0040, 0x92f: 0x0040, + 0x930: 0x0040, 0x931: 0x3308, 0x932: 0x3308, 0x933: 0x0b21, 0x934: 0x3308, 0x935: 0x0b59, + 0x936: 0x0b91, 0x937: 0x0bc9, 0x938: 0x0c19, 0x939: 0x0c51, 0x93a: 0x3308, 0x93b: 0x3308, + 0x93c: 0x3308, 0x93d: 0x3308, 0x93e: 0x3308, 0x93f: 0x3008, + // Block 0x25, offset 0x940 + 0x940: 0x3308, 0x941: 0x0ca1, 0x942: 0x3308, 0x943: 0x3308, 0x944: 0x3b08, 0x945: 0x0018, + 0x946: 0x3308, 0x947: 0x3308, 0x948: 0x0008, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, + 0x94c: 0x0008, 0x94d: 0x3308, 0x94e: 0x3308, 0x94f: 0x3308, 0x950: 0x3308, 0x951: 0x3308, + 0x952: 0x3308, 0x953: 0x0cd9, 0x954: 0x3308, 0x955: 0x3308, 0x956: 0x3308, 0x957: 0x3308, + 0x958: 0x0040, 0x959: 0x3308, 0x95a: 0x3308, 0x95b: 0x3308, 0x95c: 0x3308, 0x95d: 0x0d11, + 0x95e: 0x3308, 0x95f: 0x3308, 0x960: 0x3308, 0x961: 0x3308, 0x962: 0x0d49, 0x963: 0x3308, + 0x964: 0x3308, 0x965: 0x3308, 0x966: 0x3308, 0x967: 0x0d81, 0x968: 0x3308, 0x969: 0x3308, + 0x96a: 0x3308, 0x96b: 0x3308, 0x96c: 0x0db9, 0x96d: 0x3308, 0x96e: 0x3308, 0x96f: 0x3308, + 0x970: 0x3308, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x3308, 0x974: 0x3308, 0x975: 0x3308, + 0x976: 0x3308, 0x977: 0x3308, 0x978: 0x3308, 0x979: 0x0df1, 0x97a: 0x3308, 0x97b: 0x3308, + 0x97c: 0x3308, 0x97d: 0x0040, 0x97e: 0x0018, 0x97f: 0x0018, + // Block 0x26, offset 0x980 + 0x980: 0x0008, 0x981: 0x0008, 0x982: 0x0008, 0x983: 0x0008, 0x984: 0x0008, 0x985: 0x0008, + 0x986: 0x0008, 0x987: 0x0008, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, + 0x98c: 0x0008, 0x98d: 0x0008, 0x98e: 0x0008, 0x98f: 0x0008, 0x990: 0x0008, 0x991: 0x0008, + 0x992: 0x0008, 0x993: 0x0008, 0x994: 0x0008, 0x995: 0x0008, 0x996: 0x0008, 0x997: 0x0008, + 0x998: 0x0008, 0x999: 0x0008, 0x99a: 0x0008, 0x99b: 0x0008, 0x99c: 0x0008, 0x99d: 0x0008, + 0x99e: 0x0008, 0x99f: 0x0008, 0x9a0: 0x0008, 0x9a1: 0x0008, 0x9a2: 0x0008, 0x9a3: 0x0008, + 0x9a4: 0x0008, 0x9a5: 0x0008, 0x9a6: 0x0008, 0x9a7: 0x0008, 0x9a8: 0x0008, 0x9a9: 0x0008, + 0x9aa: 0x0008, 0x9ab: 0x0008, 0x9ac: 0x0039, 0x9ad: 0x0ed1, 0x9ae: 0x0ee9, 0x9af: 0x0008, + 0x9b0: 0x0ef9, 0x9b1: 0x0f09, 0x9b2: 0x0f19, 0x9b3: 0x0f31, 0x9b4: 0x0249, 0x9b5: 0x0f41, + 0x9b6: 0x0259, 0x9b7: 0x0f51, 0x9b8: 0x0359, 0x9b9: 0x0f61, 0x9ba: 0x0f71, 0x9bb: 0x0008, + 0x9bc: 0x00d9, 0x9bd: 0x0f81, 0x9be: 0x0f99, 0x9bf: 0x0269, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x0fa9, 0x9c1: 0x0fb9, 0x9c2: 0x0279, 0x9c3: 0x0039, 0x9c4: 0x0fc9, 0x9c5: 0x0fe1, + 0x9c6: 0x059d, 0x9c7: 0x0ee9, 0x9c8: 0x0ef9, 0x9c9: 0x0f09, 0x9ca: 0x0ff9, 0x9cb: 0x1011, + 0x9cc: 0x1029, 0x9cd: 0x0f31, 0x9ce: 0x0008, 0x9cf: 0x0f51, 0x9d0: 0x0f61, 0x9d1: 0x1041, + 0x9d2: 0x00d9, 0x9d3: 0x1059, 0x9d4: 0x05b5, 0x9d5: 0x05b5, 0x9d6: 0x0f99, 0x9d7: 0x0fa9, + 0x9d8: 0x0fb9, 0x9d9: 0x059d, 0x9da: 0x1071, 0x9db: 0x1089, 0x9dc: 0x05cd, 0x9dd: 0x1099, + 0x9de: 0x10b1, 0x9df: 0x10c9, 0x9e0: 0x10e1, 0x9e1: 0x10f9, 0x9e2: 0x0f41, 0x9e3: 0x0269, + 0x9e4: 0x0fb9, 0x9e5: 0x1089, 0x9e6: 0x1099, 0x9e7: 0x10b1, 0x9e8: 0x1111, 0x9e9: 0x10e1, + 0x9ea: 0x10f9, 0x9eb: 0x0008, 0x9ec: 0x0008, 0x9ed: 0x0008, 0x9ee: 0x0008, 0x9ef: 0x0008, + 0x9f0: 0x0008, 0x9f1: 0x0008, 0x9f2: 0x0008, 0x9f3: 0x0008, 0x9f4: 0x0008, 0x9f5: 0x0008, + 0x9f6: 0x0008, 0x9f7: 0x0008, 0x9f8: 0x1129, 0x9f9: 0x0008, 0x9fa: 0x0008, 0x9fb: 0x0008, + 0x9fc: 0x0008, 0x9fd: 0x0008, 0x9fe: 0x0008, 0x9ff: 0x0008, + // Block 0x28, offset 0xa00 + 0xa00: 0x0008, 0xa01: 0x0008, 0xa02: 0x0008, 0xa03: 0x0008, 0xa04: 0x0008, 0xa05: 0x0008, + 0xa06: 0x0008, 0xa07: 0x0008, 0xa08: 0x0008, 0xa09: 0x0008, 0xa0a: 0x0008, 0xa0b: 0x0008, + 0xa0c: 0x0008, 0xa0d: 0x0008, 0xa0e: 0x0008, 0xa0f: 0x0008, 0xa10: 0x0008, 0xa11: 0x0008, + 0xa12: 0x0008, 0xa13: 0x0008, 0xa14: 0x0008, 0xa15: 0x0008, 0xa16: 0x0008, 0xa17: 0x0008, + 0xa18: 0x0008, 0xa19: 0x0008, 0xa1a: 0x0008, 0xa1b: 0x1141, 0xa1c: 0x1159, 0xa1d: 0x1169, + 0xa1e: 0x1181, 0xa1f: 0x1029, 0xa20: 0x1199, 0xa21: 0x11a9, 0xa22: 0x11c1, 0xa23: 0x11d9, + 0xa24: 0x11f1, 0xa25: 0x1209, 0xa26: 0x1221, 0xa27: 0x05e5, 0xa28: 0x1239, 0xa29: 0x1251, + 0xa2a: 0xe17d, 0xa2b: 0x1269, 0xa2c: 0x1281, 0xa2d: 0x1299, 0xa2e: 0x12b1, 0xa2f: 0x12c9, + 0xa30: 0x12e1, 0xa31: 0x12f9, 0xa32: 0x1311, 0xa33: 0x1329, 0xa34: 0x1341, 0xa35: 0x1359, + 0xa36: 0x1371, 0xa37: 0x1389, 0xa38: 0x05fd, 0xa39: 0x13a1, 0xa3a: 0x13b9, 0xa3b: 0x13d1, + 0xa3c: 0x13e1, 0xa3d: 0x13f9, 0xa3e: 0x1411, 0xa3f: 0x1429, + // Block 0x29, offset 0xa40 + 0xa40: 0xe00d, 0xa41: 0x0008, 0xa42: 0xe00d, 0xa43: 0x0008, 0xa44: 0xe00d, 0xa45: 0x0008, + 0xa46: 0xe00d, 0xa47: 0x0008, 0xa48: 0xe00d, 0xa49: 0x0008, 0xa4a: 0xe00d, 0xa4b: 0x0008, + 0xa4c: 0xe00d, 0xa4d: 0x0008, 0xa4e: 0xe00d, 0xa4f: 0x0008, 0xa50: 0xe00d, 0xa51: 0x0008, + 0xa52: 0xe00d, 0xa53: 0x0008, 0xa54: 0xe00d, 0xa55: 0x0008, 0xa56: 0xe00d, 0xa57: 0x0008, + 0xa58: 0xe00d, 0xa59: 0x0008, 0xa5a: 0xe00d, 0xa5b: 0x0008, 0xa5c: 0xe00d, 0xa5d: 0x0008, + 0xa5e: 0xe00d, 0xa5f: 0x0008, 0xa60: 0xe00d, 0xa61: 0x0008, 0xa62: 0xe00d, 0xa63: 0x0008, + 0xa64: 0xe00d, 0xa65: 0x0008, 0xa66: 0xe00d, 0xa67: 0x0008, 0xa68: 0xe00d, 0xa69: 0x0008, + 0xa6a: 0xe00d, 0xa6b: 0x0008, 0xa6c: 0xe00d, 0xa6d: 0x0008, 0xa6e: 0xe00d, 0xa6f: 0x0008, + 0xa70: 0xe00d, 0xa71: 0x0008, 0xa72: 0xe00d, 0xa73: 0x0008, 0xa74: 0xe00d, 0xa75: 0x0008, + 0xa76: 0xe00d, 0xa77: 0x0008, 0xa78: 0xe00d, 0xa79: 0x0008, 0xa7a: 0xe00d, 0xa7b: 0x0008, + 0xa7c: 0xe00d, 0xa7d: 0x0008, 0xa7e: 0xe00d, 0xa7f: 0x0008, + // Block 0x2a, offset 0xa80 + 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, + 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, + 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, + 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0x0008, 0xa97: 0x0008, + 0xa98: 0x0008, 0xa99: 0x0008, 0xa9a: 0x0615, 0xa9b: 0x0635, 0xa9c: 0x0008, 0xa9d: 0x0008, + 0xa9e: 0x1441, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, + 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, + 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, + 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, + 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, + 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0008, 0xac1: 0x0008, 0xac2: 0x0008, 0xac3: 0x0008, 0xac4: 0x0008, 0xac5: 0x0008, + 0xac6: 0x0040, 0xac7: 0x0040, 0xac8: 0xe045, 0xac9: 0xe045, 0xaca: 0xe045, 0xacb: 0xe045, + 0xacc: 0xe045, 0xacd: 0xe045, 0xace: 0x0040, 0xacf: 0x0040, 0xad0: 0x0008, 0xad1: 0x0008, + 0xad2: 0x0008, 0xad3: 0x0008, 0xad4: 0x0008, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, + 0xad8: 0x0040, 0xad9: 0xe045, 0xada: 0x0040, 0xadb: 0xe045, 0xadc: 0x0040, 0xadd: 0xe045, + 0xade: 0x0040, 0xadf: 0xe045, 0xae0: 0x0008, 0xae1: 0x0008, 0xae2: 0x0008, 0xae3: 0x0008, + 0xae4: 0x0008, 0xae5: 0x0008, 0xae6: 0x0008, 0xae7: 0x0008, 0xae8: 0xe045, 0xae9: 0xe045, + 0xaea: 0xe045, 0xaeb: 0xe045, 0xaec: 0xe045, 0xaed: 0xe045, 0xaee: 0xe045, 0xaef: 0xe045, + 0xaf0: 0x0008, 0xaf1: 0x1459, 0xaf2: 0x0008, 0xaf3: 0x1471, 0xaf4: 0x0008, 0xaf5: 0x1489, + 0xaf6: 0x0008, 0xaf7: 0x14a1, 0xaf8: 0x0008, 0xaf9: 0x14b9, 0xafa: 0x0008, 0xafb: 0x14d1, + 0xafc: 0x0008, 0xafd: 0x14e9, 0xafe: 0x0040, 0xaff: 0x0040, + // Block 0x2c, offset 0xb00 + 0xb00: 0x1501, 0xb01: 0x1531, 0xb02: 0x1561, 0xb03: 0x1591, 0xb04: 0x15c1, 0xb05: 0x15f1, + 0xb06: 0x1621, 0xb07: 0x1651, 0xb08: 0x1501, 0xb09: 0x1531, 0xb0a: 0x1561, 0xb0b: 0x1591, + 0xb0c: 0x15c1, 0xb0d: 0x15f1, 0xb0e: 0x1621, 0xb0f: 0x1651, 0xb10: 0x1681, 0xb11: 0x16b1, + 0xb12: 0x16e1, 0xb13: 0x1711, 0xb14: 0x1741, 0xb15: 0x1771, 0xb16: 0x17a1, 0xb17: 0x17d1, + 0xb18: 0x1681, 0xb19: 0x16b1, 0xb1a: 0x16e1, 0xb1b: 0x1711, 0xb1c: 0x1741, 0xb1d: 0x1771, + 0xb1e: 0x17a1, 0xb1f: 0x17d1, 0xb20: 0x1801, 0xb21: 0x1831, 0xb22: 0x1861, 0xb23: 0x1891, + 0xb24: 0x18c1, 0xb25: 0x18f1, 0xb26: 0x1921, 0xb27: 0x1951, 0xb28: 0x1801, 0xb29: 0x1831, + 0xb2a: 0x1861, 0xb2b: 0x1891, 0xb2c: 0x18c1, 0xb2d: 0x18f1, 0xb2e: 0x1921, 0xb2f: 0x1951, + 0xb30: 0x0008, 0xb31: 0x0008, 0xb32: 0x1981, 0xb33: 0x19b1, 0xb34: 0x19d9, 0xb35: 0x0040, + 0xb36: 0x0008, 0xb37: 0x1a01, 0xb38: 0xe045, 0xb39: 0xe045, 0xb3a: 0x064d, 0xb3b: 0x1459, + 0xb3c: 0x19b1, 0xb3d: 0x0666, 0xb3e: 0x1a31, 0xb3f: 0x0686, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06a6, 0xb41: 0x1a4a, 0xb42: 0x1a79, 0xb43: 0x1aa9, 0xb44: 0x1ad1, 0xb45: 0x0040, + 0xb46: 0x0008, 0xb47: 0x1af9, 0xb48: 0x06c5, 0xb49: 0x1471, 0xb4a: 0x06dd, 0xb4b: 0x1489, + 0xb4c: 0x1aa9, 0xb4d: 0x1b2a, 0xb4e: 0x1b5a, 0xb4f: 0x1b8a, 0xb50: 0x0008, 0xb51: 0x0008, + 0xb52: 0x0008, 0xb53: 0x1bb9, 0xb54: 0x0040, 0xb55: 0x0040, 0xb56: 0x0008, 0xb57: 0x0008, + 0xb58: 0xe045, 0xb59: 0xe045, 0xb5a: 0x06f5, 0xb5b: 0x14a1, 0xb5c: 0x0040, 0xb5d: 0x1bd2, + 0xb5e: 0x1c02, 0xb5f: 0x1c32, 0xb60: 0x0008, 0xb61: 0x0008, 0xb62: 0x0008, 0xb63: 0x1c61, + 0xb64: 0x0008, 0xb65: 0x0008, 0xb66: 0x0008, 0xb67: 0x0008, 0xb68: 0xe045, 0xb69: 0xe045, + 0xb6a: 0x070d, 0xb6b: 0x14d1, 0xb6c: 0xe04d, 0xb6d: 0x1c7a, 0xb6e: 0x03d2, 0xb6f: 0x1caa, + 0xb70: 0x0040, 0xb71: 0x0040, 0xb72: 0x1cb9, 0xb73: 0x1ce9, 0xb74: 0x1d11, 0xb75: 0x0040, + 0xb76: 0x0008, 0xb77: 0x1d39, 0xb78: 0x0725, 0xb79: 0x14b9, 0xb7a: 0x0515, 0xb7b: 0x14e9, + 0xb7c: 0x1ce9, 0xb7d: 0x073e, 0xb7e: 0x075e, 0xb7f: 0x0040, + // Block 0x2e, offset 0xb80 + 0xb80: 0x000a, 0xb81: 0x000a, 0xb82: 0x000a, 0xb83: 0x000a, 0xb84: 0x000a, 0xb85: 0x000a, + 0xb86: 0x000a, 0xb87: 0x000a, 0xb88: 0x000a, 0xb89: 0x000a, 0xb8a: 0x000a, 0xb8b: 0x03c0, + 0xb8c: 0x0003, 0xb8d: 0x0003, 0xb8e: 0x0340, 0xb8f: 0x0b40, 0xb90: 0x0018, 0xb91: 0xe00d, + 0xb92: 0x0018, 0xb93: 0x0018, 0xb94: 0x0018, 0xb95: 0x0018, 0xb96: 0x0018, 0xb97: 0x077e, + 0xb98: 0x0018, 0xb99: 0x0018, 0xb9a: 0x0018, 0xb9b: 0x0018, 0xb9c: 0x0018, 0xb9d: 0x0018, + 0xb9e: 0x0018, 0xb9f: 0x0018, 0xba0: 0x0018, 0xba1: 0x0018, 0xba2: 0x0018, 0xba3: 0x0018, + 0xba4: 0x0040, 0xba5: 0x0040, 0xba6: 0x0040, 0xba7: 0x0018, 0xba8: 0x0040, 0xba9: 0x0040, + 0xbaa: 0x0340, 0xbab: 0x0340, 0xbac: 0x0340, 0xbad: 0x0340, 0xbae: 0x0340, 0xbaf: 0x000a, + 0xbb0: 0x0018, 0xbb1: 0x0018, 0xbb2: 0x0018, 0xbb3: 0x1d69, 0xbb4: 0x1da1, 0xbb5: 0x0018, + 0xbb6: 0x1df1, 0xbb7: 0x1e29, 0xbb8: 0x0018, 0xbb9: 0x0018, 0xbba: 0x0018, 0xbbb: 0x0018, + 0xbbc: 0x1e7a, 0xbbd: 0x0018, 0xbbe: 0x079e, 0xbbf: 0x0018, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x0018, 0xbc1: 0x0018, 0xbc2: 0x0018, 0xbc3: 0x0018, 0xbc4: 0x0018, 0xbc5: 0x0018, + 0xbc6: 0x0018, 0xbc7: 0x1e92, 0xbc8: 0x1eaa, 0xbc9: 0x1ec2, 0xbca: 0x0018, 0xbcb: 0x0018, + 0xbcc: 0x0018, 0xbcd: 0x0018, 0xbce: 0x0018, 0xbcf: 0x0018, 0xbd0: 0x0018, 0xbd1: 0x0018, + 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x1ed9, + 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, + 0xbde: 0x0018, 0xbdf: 0x000a, 0xbe0: 0x03c0, 0xbe1: 0x0340, 0xbe2: 0x0340, 0xbe3: 0x0340, + 0xbe4: 0x03c0, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0040, 0xbe8: 0x0040, 0xbe9: 0x0040, + 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x0340, + 0xbf0: 0x1f41, 0xbf1: 0x0f41, 0xbf2: 0x0040, 0xbf3: 0x0040, 0xbf4: 0x1f51, 0xbf5: 0x1f61, + 0xbf6: 0x1f71, 0xbf7: 0x1f81, 0xbf8: 0x1f91, 0xbf9: 0x1fa1, 0xbfa: 0x1fb2, 0xbfb: 0x07bd, + 0xbfc: 0x1fc2, 0xbfd: 0x1fd2, 0xbfe: 0x1fe2, 0xbff: 0x0f71, + // Block 0x30, offset 0xc00 + 0xc00: 0x1f41, 0xc01: 0x00c9, 0xc02: 0x0069, 0xc03: 0x0079, 0xc04: 0x1f51, 0xc05: 0x1f61, + 0xc06: 0x1f71, 0xc07: 0x1f81, 0xc08: 0x1f91, 0xc09: 0x1fa1, 0xc0a: 0x1fb2, 0xc0b: 0x07d5, + 0xc0c: 0x1fc2, 0xc0d: 0x1fd2, 0xc0e: 0x1fe2, 0xc0f: 0x0040, 0xc10: 0x0039, 0xc11: 0x0f09, + 0xc12: 0x00d9, 0xc13: 0x0369, 0xc14: 0x0ff9, 0xc15: 0x0249, 0xc16: 0x0f51, 0xc17: 0x0359, + 0xc18: 0x0f61, 0xc19: 0x0f71, 0xc1a: 0x0f99, 0xc1b: 0x01d9, 0xc1c: 0x0fa9, 0xc1d: 0x0040, + 0xc1e: 0x0040, 0xc1f: 0x0040, 0xc20: 0x0018, 0xc21: 0x0018, 0xc22: 0x0018, 0xc23: 0x0018, + 0xc24: 0x0018, 0xc25: 0x0018, 0xc26: 0x0018, 0xc27: 0x0018, 0xc28: 0x1ff1, 0xc29: 0x0018, + 0xc2a: 0x0018, 0xc2b: 0x0018, 0xc2c: 0x0018, 0xc2d: 0x0018, 0xc2e: 0x0018, 0xc2f: 0x0018, + 0xc30: 0x0018, 0xc31: 0x0018, 0xc32: 0x0018, 0xc33: 0x0018, 0xc34: 0x0018, 0xc35: 0x0018, + 0xc36: 0x0018, 0xc37: 0x0018, 0xc38: 0x0018, 0xc39: 0x0018, 0xc3a: 0x0018, 0xc3b: 0x0018, + 0xc3c: 0x0018, 0xc3d: 0x0018, 0xc3e: 0x0018, 0xc3f: 0x0040, + // Block 0x31, offset 0xc40 + 0xc40: 0x07ee, 0xc41: 0x080e, 0xc42: 0x1159, 0xc43: 0x082d, 0xc44: 0x0018, 0xc45: 0x084e, + 0xc46: 0x086e, 0xc47: 0x1011, 0xc48: 0x0018, 0xc49: 0x088d, 0xc4a: 0x0f31, 0xc4b: 0x0249, + 0xc4c: 0x0249, 0xc4d: 0x0249, 0xc4e: 0x0249, 0xc4f: 0x2009, 0xc50: 0x0f41, 0xc51: 0x0f41, + 0xc52: 0x0359, 0xc53: 0x0359, 0xc54: 0x0018, 0xc55: 0x0f71, 0xc56: 0x2021, 0xc57: 0x0018, + 0xc58: 0x0018, 0xc59: 0x0f99, 0xc5a: 0x2039, 0xc5b: 0x0269, 0xc5c: 0x0269, 0xc5d: 0x0269, + 0xc5e: 0x0018, 0xc5f: 0x0018, 0xc60: 0x2049, 0xc61: 0x08ad, 0xc62: 0x2061, 0xc63: 0x0018, + 0xc64: 0x13d1, 0xc65: 0x0018, 0xc66: 0x2079, 0xc67: 0x0018, 0xc68: 0x13d1, 0xc69: 0x0018, + 0xc6a: 0x0f51, 0xc6b: 0x2091, 0xc6c: 0x0ee9, 0xc6d: 0x1159, 0xc6e: 0x0018, 0xc6f: 0x0f09, + 0xc70: 0x0f09, 0xc71: 0x1199, 0xc72: 0x0040, 0xc73: 0x0f61, 0xc74: 0x00d9, 0xc75: 0x20a9, + 0xc76: 0x20c1, 0xc77: 0x20d9, 0xc78: 0x20f1, 0xc79: 0x0f41, 0xc7a: 0x0018, 0xc7b: 0x08cd, + 0xc7c: 0x2109, 0xc7d: 0x10b1, 0xc7e: 0x10b1, 0xc7f: 0x2109, + // Block 0x32, offset 0xc80 + 0xc80: 0x08ed, 0xc81: 0x0018, 0xc82: 0x0018, 0xc83: 0x0018, 0xc84: 0x0018, 0xc85: 0x0ef9, + 0xc86: 0x0ef9, 0xc87: 0x0f09, 0xc88: 0x0f41, 0xc89: 0x0259, 0xc8a: 0x0018, 0xc8b: 0x0018, + 0xc8c: 0x0018, 0xc8d: 0x0018, 0xc8e: 0x0008, 0xc8f: 0x0018, 0xc90: 0x2121, 0xc91: 0x2151, + 0xc92: 0x2181, 0xc93: 0x21b9, 0xc94: 0x21e9, 0xc95: 0x2219, 0xc96: 0x2249, 0xc97: 0x2279, + 0xc98: 0x22a9, 0xc99: 0x22d9, 0xc9a: 0x2309, 0xc9b: 0x2339, 0xc9c: 0x2369, 0xc9d: 0x2399, + 0xc9e: 0x23c9, 0xc9f: 0x23f9, 0xca0: 0x0f41, 0xca1: 0x2421, 0xca2: 0x0905, 0xca3: 0x2439, + 0xca4: 0x1089, 0xca5: 0x2451, 0xca6: 0x0925, 0xca7: 0x2469, 0xca8: 0x2491, 0xca9: 0x0369, + 0xcaa: 0x24a9, 0xcab: 0x0945, 0xcac: 0x0359, 0xcad: 0x1159, 0xcae: 0x0ef9, 0xcaf: 0x0f61, + 0xcb0: 0x0f41, 0xcb1: 0x2421, 0xcb2: 0x0965, 0xcb3: 0x2439, 0xcb4: 0x1089, 0xcb5: 0x2451, + 0xcb6: 0x0985, 0xcb7: 0x2469, 0xcb8: 0x2491, 0xcb9: 0x0369, 0xcba: 0x24a9, 0xcbb: 0x09a5, + 0xcbc: 0x0359, 0xcbd: 0x1159, 0xcbe: 0x0ef9, 0xcbf: 0x0f61, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x0018, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0018, + 0xcc6: 0x0018, 0xcc7: 0x0018, 0xcc8: 0x0018, 0xcc9: 0x0018, 0xcca: 0x0018, 0xccb: 0x0040, + 0xccc: 0x0040, 0xccd: 0x0040, 0xcce: 0x0040, 0xccf: 0x0040, 0xcd0: 0x0040, 0xcd1: 0x0040, + 0xcd2: 0x0040, 0xcd3: 0x0040, 0xcd4: 0x0040, 0xcd5: 0x0040, 0xcd6: 0x0040, 0xcd7: 0x0040, + 0xcd8: 0x0040, 0xcd9: 0x0040, 0xcda: 0x0040, 0xcdb: 0x0040, 0xcdc: 0x0040, 0xcdd: 0x0040, + 0xcde: 0x0040, 0xcdf: 0x0040, 0xce0: 0x00c9, 0xce1: 0x0069, 0xce2: 0x0079, 0xce3: 0x1f51, + 0xce4: 0x1f61, 0xce5: 0x1f71, 0xce6: 0x1f81, 0xce7: 0x1f91, 0xce8: 0x1fa1, 0xce9: 0x2601, + 0xcea: 0x2619, 0xceb: 0x2631, 0xcec: 0x2649, 0xced: 0x2661, 0xcee: 0x2679, 0xcef: 0x2691, + 0xcf0: 0x26a9, 0xcf1: 0x26c1, 0xcf2: 0x26d9, 0xcf3: 0x26f1, 0xcf4: 0x0a06, 0xcf5: 0x0a26, + 0xcf6: 0x0a46, 0xcf7: 0x0a66, 0xcf8: 0x0a86, 0xcf9: 0x0aa6, 0xcfa: 0x0ac6, 0xcfb: 0x0ae6, + 0xcfc: 0x0b06, 0xcfd: 0x270a, 0xcfe: 0x2732, 0xcff: 0x275a, + // Block 0x34, offset 0xd00 + 0xd00: 0x2782, 0xd01: 0x27aa, 0xd02: 0x27d2, 0xd03: 0x27fa, 0xd04: 0x2822, 0xd05: 0x284a, + 0xd06: 0x2872, 0xd07: 0x289a, 0xd08: 0x0040, 0xd09: 0x0040, 0xd0a: 0x0040, 0xd0b: 0x0040, + 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, + 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, + 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0b26, 0xd1d: 0x0b46, + 0xd1e: 0x0b66, 0xd1f: 0x0b86, 0xd20: 0x0ba6, 0xd21: 0x0bc6, 0xd22: 0x0be6, 0xd23: 0x0c06, + 0xd24: 0x0c26, 0xd25: 0x0c46, 0xd26: 0x0c66, 0xd27: 0x0c86, 0xd28: 0x0ca6, 0xd29: 0x0cc6, + 0xd2a: 0x0ce6, 0xd2b: 0x0d06, 0xd2c: 0x0d26, 0xd2d: 0x0d46, 0xd2e: 0x0d66, 0xd2f: 0x0d86, + 0xd30: 0x0da6, 0xd31: 0x0dc6, 0xd32: 0x0de6, 0xd33: 0x0e06, 0xd34: 0x0e26, 0xd35: 0x0e46, + 0xd36: 0x0039, 0xd37: 0x0ee9, 0xd38: 0x1159, 0xd39: 0x0ef9, 0xd3a: 0x0f09, 0xd3b: 0x1199, + 0xd3c: 0x0f31, 0xd3d: 0x0249, 0xd3e: 0x0f41, 0xd3f: 0x0259, + // Block 0x35, offset 0xd40 + 0xd40: 0x0f51, 0xd41: 0x0359, 0xd42: 0x0f61, 0xd43: 0x0f71, 0xd44: 0x00d9, 0xd45: 0x0f99, + 0xd46: 0x2039, 0xd47: 0x0269, 0xd48: 0x01d9, 0xd49: 0x0fa9, 0xd4a: 0x0fb9, 0xd4b: 0x1089, + 0xd4c: 0x0279, 0xd4d: 0x0369, 0xd4e: 0x0289, 0xd4f: 0x13d1, 0xd50: 0x0039, 0xd51: 0x0ee9, + 0xd52: 0x1159, 0xd53: 0x0ef9, 0xd54: 0x0f09, 0xd55: 0x1199, 0xd56: 0x0f31, 0xd57: 0x0249, + 0xd58: 0x0f41, 0xd59: 0x0259, 0xd5a: 0x0f51, 0xd5b: 0x0359, 0xd5c: 0x0f61, 0xd5d: 0x0f71, + 0xd5e: 0x00d9, 0xd5f: 0x0f99, 0xd60: 0x2039, 0xd61: 0x0269, 0xd62: 0x01d9, 0xd63: 0x0fa9, + 0xd64: 0x0fb9, 0xd65: 0x1089, 0xd66: 0x0279, 0xd67: 0x0369, 0xd68: 0x0289, 0xd69: 0x13d1, + 0xd6a: 0x1f41, 0xd6b: 0x0018, 0xd6c: 0x0018, 0xd6d: 0x0018, 0xd6e: 0x0018, 0xd6f: 0x0018, + 0xd70: 0x0018, 0xd71: 0x0018, 0xd72: 0x0018, 0xd73: 0x0018, 0xd74: 0x0018, 0xd75: 0x0018, + 0xd76: 0x0018, 0xd77: 0x0018, 0xd78: 0x0018, 0xd79: 0x0018, 0xd7a: 0x0018, 0xd7b: 0x0018, + 0xd7c: 0x0018, 0xd7d: 0x0018, 0xd7e: 0x0018, 0xd7f: 0x0018, + // Block 0x36, offset 0xd80 + 0xd80: 0x0008, 0xd81: 0x0008, 0xd82: 0x0008, 0xd83: 0x0008, 0xd84: 0x0008, 0xd85: 0x0008, + 0xd86: 0x0008, 0xd87: 0x0008, 0xd88: 0x0008, 0xd89: 0x0008, 0xd8a: 0x0008, 0xd8b: 0x0008, + 0xd8c: 0x0008, 0xd8d: 0x0008, 0xd8e: 0x0008, 0xd8f: 0x0008, 0xd90: 0x0008, 0xd91: 0x0008, + 0xd92: 0x0008, 0xd93: 0x0008, 0xd94: 0x0008, 0xd95: 0x0008, 0xd96: 0x0008, 0xd97: 0x0008, + 0xd98: 0x0008, 0xd99: 0x0008, 0xd9a: 0x0008, 0xd9b: 0x0008, 0xd9c: 0x0008, 0xd9d: 0x0008, + 0xd9e: 0x0008, 0xd9f: 0x0040, 0xda0: 0xe00d, 0xda1: 0x0008, 0xda2: 0x2971, 0xda3: 0x0ebd, + 0xda4: 0x2989, 0xda5: 0x0008, 0xda6: 0x0008, 0xda7: 0xe07d, 0xda8: 0x0008, 0xda9: 0xe01d, + 0xdaa: 0x0008, 0xdab: 0xe03d, 0xdac: 0x0008, 0xdad: 0x0fe1, 0xdae: 0x1281, 0xdaf: 0x0fc9, + 0xdb0: 0x1141, 0xdb1: 0x0008, 0xdb2: 0xe00d, 0xdb3: 0x0008, 0xdb4: 0x0008, 0xdb5: 0xe01d, + 0xdb6: 0x0008, 0xdb7: 0x0008, 0xdb8: 0x0008, 0xdb9: 0x0008, 0xdba: 0x0008, 0xdbb: 0x0008, + 0xdbc: 0x0259, 0xdbd: 0x1089, 0xdbe: 0x29a1, 0xdbf: 0x29b9, + // Block 0x37, offset 0xdc0 + 0xdc0: 0xe00d, 0xdc1: 0x0008, 0xdc2: 0xe00d, 0xdc3: 0x0008, 0xdc4: 0xe00d, 0xdc5: 0x0008, + 0xdc6: 0xe00d, 0xdc7: 0x0008, 0xdc8: 0xe00d, 0xdc9: 0x0008, 0xdca: 0xe00d, 0xdcb: 0x0008, + 0xdcc: 0xe00d, 0xdcd: 0x0008, 0xdce: 0xe00d, 0xdcf: 0x0008, 0xdd0: 0xe00d, 0xdd1: 0x0008, + 0xdd2: 0xe00d, 0xdd3: 0x0008, 0xdd4: 0xe00d, 0xdd5: 0x0008, 0xdd6: 0xe00d, 0xdd7: 0x0008, + 0xdd8: 0xe00d, 0xdd9: 0x0008, 0xdda: 0xe00d, 0xddb: 0x0008, 0xddc: 0xe00d, 0xddd: 0x0008, + 0xdde: 0xe00d, 0xddf: 0x0008, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0xe00d, 0xde3: 0x0008, + 0xde4: 0x0008, 0xde5: 0x0018, 0xde6: 0x0018, 0xde7: 0x0018, 0xde8: 0x0018, 0xde9: 0x0018, + 0xdea: 0x0018, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0xe01d, 0xdee: 0x0008, 0xdef: 0x3308, + 0xdf0: 0x3308, 0xdf1: 0x3308, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0040, 0xdf5: 0x0040, + 0xdf6: 0x0040, 0xdf7: 0x0040, 0xdf8: 0x0040, 0xdf9: 0x0018, 0xdfa: 0x0018, 0xdfb: 0x0018, + 0xdfc: 0x0018, 0xdfd: 0x0018, 0xdfe: 0x0018, 0xdff: 0x0018, + // Block 0x38, offset 0xe00 + 0xe00: 0x26fd, 0xe01: 0x271d, 0xe02: 0x273d, 0xe03: 0x275d, 0xe04: 0x277d, 0xe05: 0x279d, + 0xe06: 0x27bd, 0xe07: 0x27dd, 0xe08: 0x27fd, 0xe09: 0x281d, 0xe0a: 0x283d, 0xe0b: 0x285d, + 0xe0c: 0x287d, 0xe0d: 0x289d, 0xe0e: 0x28bd, 0xe0f: 0x28dd, 0xe10: 0x28fd, 0xe11: 0x291d, + 0xe12: 0x293d, 0xe13: 0x295d, 0xe14: 0x297d, 0xe15: 0x299d, 0xe16: 0x0040, 0xe17: 0x0040, + 0xe18: 0x0040, 0xe19: 0x0040, 0xe1a: 0x0040, 0xe1b: 0x0040, 0xe1c: 0x0040, 0xe1d: 0x0040, + 0xe1e: 0x0040, 0xe1f: 0x0040, 0xe20: 0x0040, 0xe21: 0x0040, 0xe22: 0x0040, 0xe23: 0x0040, + 0xe24: 0x0040, 0xe25: 0x0040, 0xe26: 0x0040, 0xe27: 0x0040, 0xe28: 0x0040, 0xe29: 0x0040, + 0xe2a: 0x0040, 0xe2b: 0x0040, 0xe2c: 0x0040, 0xe2d: 0x0040, 0xe2e: 0x0040, 0xe2f: 0x0040, + 0xe30: 0x0040, 0xe31: 0x0040, 0xe32: 0x0040, 0xe33: 0x0040, 0xe34: 0x0040, 0xe35: 0x0040, + 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0040, 0xe3a: 0x0040, 0xe3b: 0x0040, + 0xe3c: 0x0040, 0xe3d: 0x0040, 0xe3e: 0x0040, 0xe3f: 0x0040, + // Block 0x39, offset 0xe40 + 0xe40: 0x000a, 0xe41: 0x0018, 0xe42: 0x29d1, 0xe43: 0x0018, 0xe44: 0x0018, 0xe45: 0x0008, + 0xe46: 0x0008, 0xe47: 0x0008, 0xe48: 0x0018, 0xe49: 0x0018, 0xe4a: 0x0018, 0xe4b: 0x0018, + 0xe4c: 0x0018, 0xe4d: 0x0018, 0xe4e: 0x0018, 0xe4f: 0x0018, 0xe50: 0x0018, 0xe51: 0x0018, + 0xe52: 0x0018, 0xe53: 0x0018, 0xe54: 0x0018, 0xe55: 0x0018, 0xe56: 0x0018, 0xe57: 0x0018, + 0xe58: 0x0018, 0xe59: 0x0018, 0xe5a: 0x0018, 0xe5b: 0x0018, 0xe5c: 0x0018, 0xe5d: 0x0018, + 0xe5e: 0x0018, 0xe5f: 0x0018, 0xe60: 0x0018, 0xe61: 0x0018, 0xe62: 0x0018, 0xe63: 0x0018, + 0xe64: 0x0018, 0xe65: 0x0018, 0xe66: 0x0018, 0xe67: 0x0018, 0xe68: 0x0018, 0xe69: 0x0018, + 0xe6a: 0x3308, 0xe6b: 0x3308, 0xe6c: 0x3308, 0xe6d: 0x3308, 0xe6e: 0x3018, 0xe6f: 0x3018, + 0xe70: 0x0018, 0xe71: 0x0018, 0xe72: 0x0018, 0xe73: 0x0018, 0xe74: 0x0018, 0xe75: 0x0018, + 0xe76: 0xe125, 0xe77: 0x0018, 0xe78: 0x29bd, 0xe79: 0x29dd, 0xe7a: 0x29fd, 0xe7b: 0x0018, + 0xe7c: 0x0008, 0xe7d: 0x0018, 0xe7e: 0x0018, 0xe7f: 0x0018, + // Block 0x3a, offset 0xe80 + 0xe80: 0x2b3d, 0xe81: 0x2b5d, 0xe82: 0x2b7d, 0xe83: 0x2b9d, 0xe84: 0x2bbd, 0xe85: 0x2bdd, + 0xe86: 0x2bdd, 0xe87: 0x2bdd, 0xe88: 0x2bfd, 0xe89: 0x2bfd, 0xe8a: 0x2bfd, 0xe8b: 0x2bfd, + 0xe8c: 0x2c1d, 0xe8d: 0x2c1d, 0xe8e: 0x2c1d, 0xe8f: 0x2c3d, 0xe90: 0x2c5d, 0xe91: 0x2c5d, + 0xe92: 0x2a7d, 0xe93: 0x2a7d, 0xe94: 0x2c5d, 0xe95: 0x2c5d, 0xe96: 0x2c7d, 0xe97: 0x2c7d, + 0xe98: 0x2c5d, 0xe99: 0x2c5d, 0xe9a: 0x2a7d, 0xe9b: 0x2a7d, 0xe9c: 0x2c5d, 0xe9d: 0x2c5d, + 0xe9e: 0x2c3d, 0xe9f: 0x2c3d, 0xea0: 0x2c9d, 0xea1: 0x2c9d, 0xea2: 0x2cbd, 0xea3: 0x2cbd, + 0xea4: 0x0040, 0xea5: 0x2cdd, 0xea6: 0x2cfd, 0xea7: 0x2d1d, 0xea8: 0x2d1d, 0xea9: 0x2d3d, + 0xeaa: 0x2d5d, 0xeab: 0x2d7d, 0xeac: 0x2d9d, 0xead: 0x2dbd, 0xeae: 0x2ddd, 0xeaf: 0x2dfd, + 0xeb0: 0x2e1d, 0xeb1: 0x2e3d, 0xeb2: 0x2e3d, 0xeb3: 0x2e5d, 0xeb4: 0x2e7d, 0xeb5: 0x2e7d, + 0xeb6: 0x2e9d, 0xeb7: 0x2ebd, 0xeb8: 0x2e5d, 0xeb9: 0x2edd, 0xeba: 0x2efd, 0xebb: 0x2edd, + 0xebc: 0x2e5d, 0xebd: 0x2f1d, 0xebe: 0x2f3d, 0xebf: 0x2f5d, + // Block 0x3b, offset 0xec0 + 0xec0: 0x2f7d, 0xec1: 0x2f9d, 0xec2: 0x2cfd, 0xec3: 0x2cdd, 0xec4: 0x2fbd, 0xec5: 0x2fdd, + 0xec6: 0x2ffd, 0xec7: 0x301d, 0xec8: 0x303d, 0xec9: 0x305d, 0xeca: 0x307d, 0xecb: 0x309d, + 0xecc: 0x30bd, 0xecd: 0x30dd, 0xece: 0x30fd, 0xecf: 0x0040, 0xed0: 0x0018, 0xed1: 0x0018, + 0xed2: 0x311d, 0xed3: 0x313d, 0xed4: 0x315d, 0xed5: 0x317d, 0xed6: 0x319d, 0xed7: 0x31bd, + 0xed8: 0x31dd, 0xed9: 0x31fd, 0xeda: 0x321d, 0xedb: 0x323d, 0xedc: 0x315d, 0xedd: 0x325d, + 0xede: 0x327d, 0xedf: 0x329d, 0xee0: 0x0008, 0xee1: 0x0008, 0xee2: 0x0008, 0xee3: 0x0008, + 0xee4: 0x0008, 0xee5: 0x0008, 0xee6: 0x0008, 0xee7: 0x0008, 0xee8: 0x0008, 0xee9: 0x0008, + 0xeea: 0x0008, 0xeeb: 0x0008, 0xeec: 0x0008, 0xeed: 0x0008, 0xeee: 0x0008, 0xeef: 0x0008, + 0xef0: 0x0008, 0xef1: 0x0008, 0xef2: 0x0008, 0xef3: 0x0008, 0xef4: 0x0008, 0xef5: 0x0008, + 0xef6: 0x0008, 0xef7: 0x0008, 0xef8: 0x0008, 0xef9: 0x0008, 0xefa: 0x0008, 0xefb: 0x0040, + 0xefc: 0x0040, 0xefd: 0x0040, 0xefe: 0x0040, 0xeff: 0x0040, + // Block 0x3c, offset 0xf00 + 0xf00: 0x36a2, 0xf01: 0x36d2, 0xf02: 0x3702, 0xf03: 0x3732, 0xf04: 0x32bd, 0xf05: 0x32dd, + 0xf06: 0x32fd, 0xf07: 0x331d, 0xf08: 0x0018, 0xf09: 0x0018, 0xf0a: 0x0018, 0xf0b: 0x0018, + 0xf0c: 0x0018, 0xf0d: 0x0018, 0xf0e: 0x0018, 0xf0f: 0x0018, 0xf10: 0x333d, 0xf11: 0x3761, + 0xf12: 0x3779, 0xf13: 0x3791, 0xf14: 0x37a9, 0xf15: 0x37c1, 0xf16: 0x37d9, 0xf17: 0x37f1, + 0xf18: 0x3809, 0xf19: 0x3821, 0xf1a: 0x3839, 0xf1b: 0x3851, 0xf1c: 0x3869, 0xf1d: 0x3881, + 0xf1e: 0x3899, 0xf1f: 0x38b1, 0xf20: 0x335d, 0xf21: 0x337d, 0xf22: 0x339d, 0xf23: 0x33bd, + 0xf24: 0x33dd, 0xf25: 0x33dd, 0xf26: 0x33fd, 0xf27: 0x341d, 0xf28: 0x343d, 0xf29: 0x345d, + 0xf2a: 0x347d, 0xf2b: 0x349d, 0xf2c: 0x34bd, 0xf2d: 0x34dd, 0xf2e: 0x34fd, 0xf2f: 0x351d, + 0xf30: 0x353d, 0xf31: 0x355d, 0xf32: 0x357d, 0xf33: 0x359d, 0xf34: 0x35bd, 0xf35: 0x35dd, + 0xf36: 0x35fd, 0xf37: 0x361d, 0xf38: 0x363d, 0xf39: 0x365d, 0xf3a: 0x367d, 0xf3b: 0x369d, + 0xf3c: 0x38c9, 0xf3d: 0x3901, 0xf3e: 0x36bd, 0xf3f: 0x0018, + // Block 0x3d, offset 0xf40 + 0xf40: 0x36dd, 0xf41: 0x36fd, 0xf42: 0x371d, 0xf43: 0x373d, 0xf44: 0x375d, 0xf45: 0x377d, + 0xf46: 0x379d, 0xf47: 0x37bd, 0xf48: 0x37dd, 0xf49: 0x37fd, 0xf4a: 0x381d, 0xf4b: 0x383d, + 0xf4c: 0x385d, 0xf4d: 0x387d, 0xf4e: 0x389d, 0xf4f: 0x38bd, 0xf50: 0x38dd, 0xf51: 0x38fd, + 0xf52: 0x391d, 0xf53: 0x393d, 0xf54: 0x395d, 0xf55: 0x397d, 0xf56: 0x399d, 0xf57: 0x39bd, + 0xf58: 0x39dd, 0xf59: 0x39fd, 0xf5a: 0x3a1d, 0xf5b: 0x3a3d, 0xf5c: 0x3a5d, 0xf5d: 0x3a7d, + 0xf5e: 0x3a9d, 0xf5f: 0x3abd, 0xf60: 0x3add, 0xf61: 0x3afd, 0xf62: 0x3b1d, 0xf63: 0x3b3d, + 0xf64: 0x3b5d, 0xf65: 0x3b7d, 0xf66: 0x127d, 0xf67: 0x3b9d, 0xf68: 0x3bbd, 0xf69: 0x3bdd, + 0xf6a: 0x3bfd, 0xf6b: 0x3c1d, 0xf6c: 0x3c3d, 0xf6d: 0x3c5d, 0xf6e: 0x239d, 0xf6f: 0x3c7d, + 0xf70: 0x3c9d, 0xf71: 0x3939, 0xf72: 0x3951, 0xf73: 0x3969, 0xf74: 0x3981, 0xf75: 0x3999, + 0xf76: 0x39b1, 0xf77: 0x39c9, 0xf78: 0x39e1, 0xf79: 0x39f9, 0xf7a: 0x3a11, 0xf7b: 0x3a29, + 0xf7c: 0x3a41, 0xf7d: 0x3a59, 0xf7e: 0x3a71, 0xf7f: 0x3a89, + // Block 0x3e, offset 0xf80 + 0xf80: 0x3aa1, 0xf81: 0x3ac9, 0xf82: 0x3af1, 0xf83: 0x3b19, 0xf84: 0x3b41, 0xf85: 0x3b69, + 0xf86: 0x3b91, 0xf87: 0x3bb9, 0xf88: 0x3be1, 0xf89: 0x3c09, 0xf8a: 0x3c39, 0xf8b: 0x3c69, + 0xf8c: 0x3c99, 0xf8d: 0x3cbd, 0xf8e: 0x3cb1, 0xf8f: 0x3cdd, 0xf90: 0x3cfd, 0xf91: 0x3d15, + 0xf92: 0x3d2d, 0xf93: 0x3d45, 0xf94: 0x3d5d, 0xf95: 0x3d5d, 0xf96: 0x3d45, 0xf97: 0x3d75, + 0xf98: 0x07bd, 0xf99: 0x3d8d, 0xf9a: 0x3da5, 0xf9b: 0x3dbd, 0xf9c: 0x3dd5, 0xf9d: 0x3ded, + 0xf9e: 0x3e05, 0xf9f: 0x3e1d, 0xfa0: 0x3e35, 0xfa1: 0x3e4d, 0xfa2: 0x3e65, 0xfa3: 0x3e7d, + 0xfa4: 0x3e95, 0xfa5: 0x3e95, 0xfa6: 0x3ead, 0xfa7: 0x3ead, 0xfa8: 0x3ec5, 0xfa9: 0x3ec5, + 0xfaa: 0x3edd, 0xfab: 0x3ef5, 0xfac: 0x3f0d, 0xfad: 0x3f25, 0xfae: 0x3f3d, 0xfaf: 0x3f3d, + 0xfb0: 0x3f55, 0xfb1: 0x3f55, 0xfb2: 0x3f55, 0xfb3: 0x3f6d, 0xfb4: 0x3f85, 0xfb5: 0x3f9d, + 0xfb6: 0x3fb5, 0xfb7: 0x3f9d, 0xfb8: 0x3fcd, 0xfb9: 0x3fe5, 0xfba: 0x3f6d, 0xfbb: 0x3ffd, + 0xfbc: 0x4015, 0xfbd: 0x4015, 0xfbe: 0x4015, 0xfbf: 0x0040, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x3cc9, 0xfc1: 0x3d31, 0xfc2: 0x3d99, 0xfc3: 0x3e01, 0xfc4: 0x3e51, 0xfc5: 0x3eb9, + 0xfc6: 0x3f09, 0xfc7: 0x3f59, 0xfc8: 0x3fd9, 0xfc9: 0x4041, 0xfca: 0x4091, 0xfcb: 0x40e1, + 0xfcc: 0x4131, 0xfcd: 0x4199, 0xfce: 0x4201, 0xfcf: 0x4251, 0xfd0: 0x42a1, 0xfd1: 0x42d9, + 0xfd2: 0x4329, 0xfd3: 0x4391, 0xfd4: 0x43f9, 0xfd5: 0x4431, 0xfd6: 0x44b1, 0xfd7: 0x4549, + 0xfd8: 0x45c9, 0xfd9: 0x4619, 0xfda: 0x4699, 0xfdb: 0x4719, 0xfdc: 0x4781, 0xfdd: 0x47d1, + 0xfde: 0x4821, 0xfdf: 0x4871, 0xfe0: 0x48d9, 0xfe1: 0x4959, 0xfe2: 0x49c1, 0xfe3: 0x4a11, + 0xfe4: 0x4a61, 0xfe5: 0x4ab1, 0xfe6: 0x4ae9, 0xfe7: 0x4b21, 0xfe8: 0x4b59, 0xfe9: 0x4b91, + 0xfea: 0x4be1, 0xfeb: 0x4c31, 0xfec: 0x4cb1, 0xfed: 0x4d01, 0xfee: 0x4d69, 0xfef: 0x4de9, + 0xff0: 0x4e39, 0xff1: 0x4e71, 0xff2: 0x4ea9, 0xff3: 0x4f29, 0xff4: 0x4f91, 0xff5: 0x5011, + 0xff6: 0x5061, 0xff7: 0x50e1, 0xff8: 0x5119, 0xff9: 0x5169, 0xffa: 0x51b9, 0xffb: 0x5209, + 0xffc: 0x5259, 0xffd: 0x52a9, 0xffe: 0x5311, 0xfff: 0x5361, + // Block 0x40, offset 0x1000 + 0x1000: 0x5399, 0x1001: 0x53e9, 0x1002: 0x5439, 0x1003: 0x5489, 0x1004: 0x54f1, 0x1005: 0x5541, + 0x1006: 0x5591, 0x1007: 0x55e1, 0x1008: 0x5661, 0x1009: 0x56c9, 0x100a: 0x5701, 0x100b: 0x5781, + 0x100c: 0x57b9, 0x100d: 0x5821, 0x100e: 0x5889, 0x100f: 0x58d9, 0x1010: 0x5929, 0x1011: 0x5979, + 0x1012: 0x59e1, 0x1013: 0x5a19, 0x1014: 0x5a69, 0x1015: 0x5ad1, 0x1016: 0x5b09, 0x1017: 0x5b89, + 0x1018: 0x5bd9, 0x1019: 0x5c01, 0x101a: 0x5c29, 0x101b: 0x5c51, 0x101c: 0x5c79, 0x101d: 0x5ca1, + 0x101e: 0x5cc9, 0x101f: 0x5cf1, 0x1020: 0x5d19, 0x1021: 0x5d41, 0x1022: 0x5d69, 0x1023: 0x5d99, + 0x1024: 0x5dc9, 0x1025: 0x5df9, 0x1026: 0x5e29, 0x1027: 0x5e59, 0x1028: 0x5e89, 0x1029: 0x5eb9, + 0x102a: 0x5ee9, 0x102b: 0x5f19, 0x102c: 0x5f49, 0x102d: 0x5f79, 0x102e: 0x5fa9, 0x102f: 0x5fd9, + 0x1030: 0x6009, 0x1031: 0x402d, 0x1032: 0x6039, 0x1033: 0x6051, 0x1034: 0x404d, 0x1035: 0x6069, + 0x1036: 0x6081, 0x1037: 0x6099, 0x1038: 0x406d, 0x1039: 0x406d, 0x103a: 0x60b1, 0x103b: 0x60c9, + 0x103c: 0x6101, 0x103d: 0x6139, 0x103e: 0x6171, 0x103f: 0x61a9, + // Block 0x41, offset 0x1040 + 0x1040: 0x6211, 0x1041: 0x6229, 0x1042: 0x408d, 0x1043: 0x6241, 0x1044: 0x6259, 0x1045: 0x6271, + 0x1046: 0x6289, 0x1047: 0x62a1, 0x1048: 0x40ad, 0x1049: 0x62b9, 0x104a: 0x62e1, 0x104b: 0x62f9, + 0x104c: 0x40cd, 0x104d: 0x40cd, 0x104e: 0x6311, 0x104f: 0x6329, 0x1050: 0x6341, 0x1051: 0x40ed, + 0x1052: 0x410d, 0x1053: 0x412d, 0x1054: 0x414d, 0x1055: 0x416d, 0x1056: 0x6359, 0x1057: 0x6371, + 0x1058: 0x6389, 0x1059: 0x63a1, 0x105a: 0x63b9, 0x105b: 0x418d, 0x105c: 0x63d1, 0x105d: 0x63e9, + 0x105e: 0x6401, 0x105f: 0x41ad, 0x1060: 0x41cd, 0x1061: 0x6419, 0x1062: 0x41ed, 0x1063: 0x420d, + 0x1064: 0x422d, 0x1065: 0x6431, 0x1066: 0x424d, 0x1067: 0x6449, 0x1068: 0x6479, 0x1069: 0x6211, + 0x106a: 0x426d, 0x106b: 0x428d, 0x106c: 0x42ad, 0x106d: 0x42cd, 0x106e: 0x64b1, 0x106f: 0x64f1, + 0x1070: 0x6539, 0x1071: 0x6551, 0x1072: 0x42ed, 0x1073: 0x6569, 0x1074: 0x6581, 0x1075: 0x6599, + 0x1076: 0x430d, 0x1077: 0x65b1, 0x1078: 0x65c9, 0x1079: 0x65b1, 0x107a: 0x65e1, 0x107b: 0x65f9, + 0x107c: 0x432d, 0x107d: 0x6611, 0x107e: 0x6629, 0x107f: 0x6611, + // Block 0x42, offset 0x1080 + 0x1080: 0x434d, 0x1081: 0x436d, 0x1082: 0x0040, 0x1083: 0x6641, 0x1084: 0x6659, 0x1085: 0x6671, + 0x1086: 0x6689, 0x1087: 0x0040, 0x1088: 0x66c1, 0x1089: 0x66d9, 0x108a: 0x66f1, 0x108b: 0x6709, + 0x108c: 0x6721, 0x108d: 0x6739, 0x108e: 0x6401, 0x108f: 0x6751, 0x1090: 0x6769, 0x1091: 0x6781, + 0x1092: 0x438d, 0x1093: 0x6799, 0x1094: 0x6289, 0x1095: 0x43ad, 0x1096: 0x43cd, 0x1097: 0x67b1, + 0x1098: 0x0040, 0x1099: 0x43ed, 0x109a: 0x67c9, 0x109b: 0x67e1, 0x109c: 0x67f9, 0x109d: 0x6811, + 0x109e: 0x6829, 0x109f: 0x6859, 0x10a0: 0x6889, 0x10a1: 0x68b1, 0x10a2: 0x68d9, 0x10a3: 0x6901, + 0x10a4: 0x6929, 0x10a5: 0x6951, 0x10a6: 0x6979, 0x10a7: 0x69a1, 0x10a8: 0x69c9, 0x10a9: 0x69f1, + 0x10aa: 0x6a21, 0x10ab: 0x6a51, 0x10ac: 0x6a81, 0x10ad: 0x6ab1, 0x10ae: 0x6ae1, 0x10af: 0x6b11, + 0x10b0: 0x6b41, 0x10b1: 0x6b71, 0x10b2: 0x6ba1, 0x10b3: 0x6bd1, 0x10b4: 0x6c01, 0x10b5: 0x6c31, + 0x10b6: 0x6c61, 0x10b7: 0x6c91, 0x10b8: 0x6cc1, 0x10b9: 0x6cf1, 0x10ba: 0x6d21, 0x10bb: 0x6d51, + 0x10bc: 0x6d81, 0x10bd: 0x6db1, 0x10be: 0x6de1, 0x10bf: 0x440d, + // Block 0x43, offset 0x10c0 + 0x10c0: 0xe00d, 0x10c1: 0x0008, 0x10c2: 0xe00d, 0x10c3: 0x0008, 0x10c4: 0xe00d, 0x10c5: 0x0008, + 0x10c6: 0xe00d, 0x10c7: 0x0008, 0x10c8: 0xe00d, 0x10c9: 0x0008, 0x10ca: 0xe00d, 0x10cb: 0x0008, + 0x10cc: 0xe00d, 0x10cd: 0x0008, 0x10ce: 0xe00d, 0x10cf: 0x0008, 0x10d0: 0xe00d, 0x10d1: 0x0008, + 0x10d2: 0xe00d, 0x10d3: 0x0008, 0x10d4: 0xe00d, 0x10d5: 0x0008, 0x10d6: 0xe00d, 0x10d7: 0x0008, + 0x10d8: 0xe00d, 0x10d9: 0x0008, 0x10da: 0xe00d, 0x10db: 0x0008, 0x10dc: 0xe00d, 0x10dd: 0x0008, + 0x10de: 0xe00d, 0x10df: 0x0008, 0x10e0: 0xe00d, 0x10e1: 0x0008, 0x10e2: 0xe00d, 0x10e3: 0x0008, + 0x10e4: 0xe00d, 0x10e5: 0x0008, 0x10e6: 0xe00d, 0x10e7: 0x0008, 0x10e8: 0xe00d, 0x10e9: 0x0008, + 0x10ea: 0xe00d, 0x10eb: 0x0008, 0x10ec: 0xe00d, 0x10ed: 0x0008, 0x10ee: 0x0008, 0x10ef: 0x3308, + 0x10f0: 0x3318, 0x10f1: 0x3318, 0x10f2: 0x3318, 0x10f3: 0x0018, 0x10f4: 0x3308, 0x10f5: 0x3308, + 0x10f6: 0x3308, 0x10f7: 0x3308, 0x10f8: 0x3308, 0x10f9: 0x3308, 0x10fa: 0x3308, 0x10fb: 0x3308, + 0x10fc: 0x3308, 0x10fd: 0x3308, 0x10fe: 0x0018, 0x10ff: 0x0008, + // Block 0x44, offset 0x1100 + 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, + 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, + 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, + 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, + 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0x0ea1, 0x111d: 0x6e11, + 0x111e: 0x3308, 0x111f: 0x3308, 0x1120: 0x0008, 0x1121: 0x0008, 0x1122: 0x0008, 0x1123: 0x0008, + 0x1124: 0x0008, 0x1125: 0x0008, 0x1126: 0x0008, 0x1127: 0x0008, 0x1128: 0x0008, 0x1129: 0x0008, + 0x112a: 0x0008, 0x112b: 0x0008, 0x112c: 0x0008, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x0008, + 0x1130: 0x0008, 0x1131: 0x0008, 0x1132: 0x0008, 0x1133: 0x0008, 0x1134: 0x0008, 0x1135: 0x0008, + 0x1136: 0x0008, 0x1137: 0x0008, 0x1138: 0x0008, 0x1139: 0x0008, 0x113a: 0x0008, 0x113b: 0x0008, + 0x113c: 0x0008, 0x113d: 0x0008, 0x113e: 0x0008, 0x113f: 0x0008, + // Block 0x45, offset 0x1140 + 0x1140: 0x0018, 0x1141: 0x0018, 0x1142: 0x0018, 0x1143: 0x0018, 0x1144: 0x0018, 0x1145: 0x0018, + 0x1146: 0x0018, 0x1147: 0x0018, 0x1148: 0x0018, 0x1149: 0x0018, 0x114a: 0x0018, 0x114b: 0x0018, + 0x114c: 0x0018, 0x114d: 0x0018, 0x114e: 0x0018, 0x114f: 0x0018, 0x1150: 0x0018, 0x1151: 0x0018, + 0x1152: 0x0018, 0x1153: 0x0018, 0x1154: 0x0018, 0x1155: 0x0018, 0x1156: 0x0018, 0x1157: 0x0008, + 0x1158: 0x0008, 0x1159: 0x0008, 0x115a: 0x0008, 0x115b: 0x0008, 0x115c: 0x0008, 0x115d: 0x0008, + 0x115e: 0x0008, 0x115f: 0x0008, 0x1160: 0x0018, 0x1161: 0x0018, 0x1162: 0xe00d, 0x1163: 0x0008, + 0x1164: 0xe00d, 0x1165: 0x0008, 0x1166: 0xe00d, 0x1167: 0x0008, 0x1168: 0xe00d, 0x1169: 0x0008, + 0x116a: 0xe00d, 0x116b: 0x0008, 0x116c: 0xe00d, 0x116d: 0x0008, 0x116e: 0xe00d, 0x116f: 0x0008, + 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0xe00d, 0x1173: 0x0008, 0x1174: 0xe00d, 0x1175: 0x0008, + 0x1176: 0xe00d, 0x1177: 0x0008, 0x1178: 0xe00d, 0x1179: 0x0008, 0x117a: 0xe00d, 0x117b: 0x0008, + 0x117c: 0xe00d, 0x117d: 0x0008, 0x117e: 0xe00d, 0x117f: 0x0008, + // Block 0x46, offset 0x1180 + 0x1180: 0xe00d, 0x1181: 0x0008, 0x1182: 0xe00d, 0x1183: 0x0008, 0x1184: 0xe00d, 0x1185: 0x0008, + 0x1186: 0xe00d, 0x1187: 0x0008, 0x1188: 0xe00d, 0x1189: 0x0008, 0x118a: 0xe00d, 0x118b: 0x0008, + 0x118c: 0xe00d, 0x118d: 0x0008, 0x118e: 0xe00d, 0x118f: 0x0008, 0x1190: 0xe00d, 0x1191: 0x0008, + 0x1192: 0xe00d, 0x1193: 0x0008, 0x1194: 0xe00d, 0x1195: 0x0008, 0x1196: 0xe00d, 0x1197: 0x0008, + 0x1198: 0xe00d, 0x1199: 0x0008, 0x119a: 0xe00d, 0x119b: 0x0008, 0x119c: 0xe00d, 0x119d: 0x0008, + 0x119e: 0xe00d, 0x119f: 0x0008, 0x11a0: 0xe00d, 0x11a1: 0x0008, 0x11a2: 0xe00d, 0x11a3: 0x0008, + 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, + 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, + 0x11b0: 0xe0fd, 0x11b1: 0x0008, 0x11b2: 0x0008, 0x11b3: 0x0008, 0x11b4: 0x0008, 0x11b5: 0x0008, + 0x11b6: 0x0008, 0x11b7: 0x0008, 0x11b8: 0x0008, 0x11b9: 0xe01d, 0x11ba: 0x0008, 0x11bb: 0xe03d, + 0x11bc: 0x0008, 0x11bd: 0x442d, 0x11be: 0xe00d, 0x11bf: 0x0008, + // Block 0x47, offset 0x11c0 + 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, + 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0x0008, 0x11c9: 0x0018, 0x11ca: 0x0018, 0x11cb: 0xe03d, + 0x11cc: 0x0008, 0x11cd: 0x11d9, 0x11ce: 0x0008, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, + 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0x0008, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, + 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, + 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, + 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, + 0x11ea: 0x6e29, 0x11eb: 0x1029, 0x11ec: 0x11c1, 0x11ed: 0x6e41, 0x11ee: 0x1221, 0x11ef: 0x0040, + 0x11f0: 0x6e59, 0x11f1: 0x6e71, 0x11f2: 0x1239, 0x11f3: 0x444d, 0x11f4: 0xe00d, 0x11f5: 0x0008, + 0x11f6: 0xe00d, 0x11f7: 0x0008, 0x11f8: 0x0040, 0x11f9: 0x0040, 0x11fa: 0x0040, 0x11fb: 0x0040, + 0x11fc: 0x0040, 0x11fd: 0x0040, 0x11fe: 0x0040, 0x11ff: 0x0040, + // Block 0x48, offset 0x1200 + 0x1200: 0x64d5, 0x1201: 0x64f5, 0x1202: 0x6515, 0x1203: 0x6535, 0x1204: 0x6555, 0x1205: 0x6575, + 0x1206: 0x6595, 0x1207: 0x65b5, 0x1208: 0x65d5, 0x1209: 0x65f5, 0x120a: 0x6615, 0x120b: 0x6635, + 0x120c: 0x6655, 0x120d: 0x6675, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0x6695, 0x1211: 0x0008, + 0x1212: 0x66b5, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x66d5, 0x1216: 0x66f5, 0x1217: 0x6715, + 0x1218: 0x6735, 0x1219: 0x6755, 0x121a: 0x6775, 0x121b: 0x6795, 0x121c: 0x67b5, 0x121d: 0x67d5, + 0x121e: 0x67f5, 0x121f: 0x0008, 0x1220: 0x6815, 0x1221: 0x0008, 0x1222: 0x6835, 0x1223: 0x0008, + 0x1224: 0x0008, 0x1225: 0x6855, 0x1226: 0x6875, 0x1227: 0x0008, 0x1228: 0x0008, 0x1229: 0x0008, + 0x122a: 0x6895, 0x122b: 0x68b5, 0x122c: 0x68d5, 0x122d: 0x68f5, 0x122e: 0x6915, 0x122f: 0x6935, + 0x1230: 0x6955, 0x1231: 0x6975, 0x1232: 0x6995, 0x1233: 0x69b5, 0x1234: 0x69d5, 0x1235: 0x69f5, + 0x1236: 0x6a15, 0x1237: 0x6a35, 0x1238: 0x6a55, 0x1239: 0x6a75, 0x123a: 0x6a95, 0x123b: 0x6ab5, + 0x123c: 0x6ad5, 0x123d: 0x6af5, 0x123e: 0x6b15, 0x123f: 0x6b35, + // Block 0x49, offset 0x1240 + 0x1240: 0x7a95, 0x1241: 0x7ab5, 0x1242: 0x7ad5, 0x1243: 0x7af5, 0x1244: 0x7b15, 0x1245: 0x7b35, + 0x1246: 0x7b55, 0x1247: 0x7b75, 0x1248: 0x7b95, 0x1249: 0x7bb5, 0x124a: 0x7bd5, 0x124b: 0x7bf5, + 0x124c: 0x7c15, 0x124d: 0x7c35, 0x124e: 0x7c55, 0x124f: 0x6ec9, 0x1250: 0x6ef1, 0x1251: 0x6f19, + 0x1252: 0x7c75, 0x1253: 0x7c95, 0x1254: 0x7cb5, 0x1255: 0x6f41, 0x1256: 0x6f69, 0x1257: 0x6f91, + 0x1258: 0x7cd5, 0x1259: 0x7cf5, 0x125a: 0x0040, 0x125b: 0x0040, 0x125c: 0x0040, 0x125d: 0x0040, + 0x125e: 0x0040, 0x125f: 0x0040, 0x1260: 0x0040, 0x1261: 0x0040, 0x1262: 0x0040, 0x1263: 0x0040, + 0x1264: 0x0040, 0x1265: 0x0040, 0x1266: 0x0040, 0x1267: 0x0040, 0x1268: 0x0040, 0x1269: 0x0040, + 0x126a: 0x0040, 0x126b: 0x0040, 0x126c: 0x0040, 0x126d: 0x0040, 0x126e: 0x0040, 0x126f: 0x0040, + 0x1270: 0x0040, 0x1271: 0x0040, 0x1272: 0x0040, 0x1273: 0x0040, 0x1274: 0x0040, 0x1275: 0x0040, + 0x1276: 0x0040, 0x1277: 0x0040, 0x1278: 0x0040, 0x1279: 0x0040, 0x127a: 0x0040, 0x127b: 0x0040, + 0x127c: 0x0040, 0x127d: 0x0040, 0x127e: 0x0040, 0x127f: 0x0040, + // Block 0x4a, offset 0x1280 + 0x1280: 0x6fb9, 0x1281: 0x6fd1, 0x1282: 0x6fe9, 0x1283: 0x7d15, 0x1284: 0x7d35, 0x1285: 0x7001, + 0x1286: 0x7001, 0x1287: 0x0040, 0x1288: 0x0040, 0x1289: 0x0040, 0x128a: 0x0040, 0x128b: 0x0040, + 0x128c: 0x0040, 0x128d: 0x0040, 0x128e: 0x0040, 0x128f: 0x0040, 0x1290: 0x0040, 0x1291: 0x0040, + 0x1292: 0x0040, 0x1293: 0x7019, 0x1294: 0x7041, 0x1295: 0x7069, 0x1296: 0x7091, 0x1297: 0x70b9, + 0x1298: 0x0040, 0x1299: 0x0040, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x70e1, + 0x129e: 0x3308, 0x129f: 0x7109, 0x12a0: 0x7131, 0x12a1: 0x20a9, 0x12a2: 0x20f1, 0x12a3: 0x7149, + 0x12a4: 0x7161, 0x12a5: 0x7179, 0x12a6: 0x7191, 0x12a7: 0x71a9, 0x12a8: 0x71c1, 0x12a9: 0x1fb2, + 0x12aa: 0x71d9, 0x12ab: 0x7201, 0x12ac: 0x7229, 0x12ad: 0x7261, 0x12ae: 0x7299, 0x12af: 0x72c1, + 0x12b0: 0x72e9, 0x12b1: 0x7311, 0x12b2: 0x7339, 0x12b3: 0x7361, 0x12b4: 0x7389, 0x12b5: 0x73b1, + 0x12b6: 0x73d9, 0x12b7: 0x0040, 0x12b8: 0x7401, 0x12b9: 0x7429, 0x12ba: 0x7451, 0x12bb: 0x7479, + 0x12bc: 0x74a1, 0x12bd: 0x0040, 0x12be: 0x74c9, 0x12bf: 0x0040, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x74f1, 0x12c1: 0x7519, 0x12c2: 0x0040, 0x12c3: 0x7541, 0x12c4: 0x7569, 0x12c5: 0x0040, + 0x12c6: 0x7591, 0x12c7: 0x75b9, 0x12c8: 0x75e1, 0x12c9: 0x7609, 0x12ca: 0x7631, 0x12cb: 0x7659, + 0x12cc: 0x7681, 0x12cd: 0x76a9, 0x12ce: 0x76d1, 0x12cf: 0x76f9, 0x12d0: 0x7721, 0x12d1: 0x7721, + 0x12d2: 0x7739, 0x12d3: 0x7739, 0x12d4: 0x7739, 0x12d5: 0x7739, 0x12d6: 0x7751, 0x12d7: 0x7751, + 0x12d8: 0x7751, 0x12d9: 0x7751, 0x12da: 0x7769, 0x12db: 0x7769, 0x12dc: 0x7769, 0x12dd: 0x7769, + 0x12de: 0x7781, 0x12df: 0x7781, 0x12e0: 0x7781, 0x12e1: 0x7781, 0x12e2: 0x7799, 0x12e3: 0x7799, + 0x12e4: 0x7799, 0x12e5: 0x7799, 0x12e6: 0x77b1, 0x12e7: 0x77b1, 0x12e8: 0x77b1, 0x12e9: 0x77b1, + 0x12ea: 0x77c9, 0x12eb: 0x77c9, 0x12ec: 0x77c9, 0x12ed: 0x77c9, 0x12ee: 0x77e1, 0x12ef: 0x77e1, + 0x12f0: 0x77e1, 0x12f1: 0x77e1, 0x12f2: 0x77f9, 0x12f3: 0x77f9, 0x12f4: 0x77f9, 0x12f5: 0x77f9, + 0x12f6: 0x7811, 0x12f7: 0x7811, 0x12f8: 0x7811, 0x12f9: 0x7811, 0x12fa: 0x7829, 0x12fb: 0x7829, + 0x12fc: 0x7829, 0x12fd: 0x7829, 0x12fe: 0x7841, 0x12ff: 0x7841, + // Block 0x4c, offset 0x1300 + 0x1300: 0x7841, 0x1301: 0x7841, 0x1302: 0x7859, 0x1303: 0x7859, 0x1304: 0x7871, 0x1305: 0x7871, + 0x1306: 0x7889, 0x1307: 0x7889, 0x1308: 0x78a1, 0x1309: 0x78a1, 0x130a: 0x78b9, 0x130b: 0x78b9, + 0x130c: 0x78d1, 0x130d: 0x78d1, 0x130e: 0x78e9, 0x130f: 0x78e9, 0x1310: 0x78e9, 0x1311: 0x78e9, + 0x1312: 0x7901, 0x1313: 0x7901, 0x1314: 0x7901, 0x1315: 0x7901, 0x1316: 0x7919, 0x1317: 0x7919, + 0x1318: 0x7919, 0x1319: 0x7919, 0x131a: 0x7931, 0x131b: 0x7931, 0x131c: 0x7931, 0x131d: 0x7931, + 0x131e: 0x7949, 0x131f: 0x7949, 0x1320: 0x7961, 0x1321: 0x7961, 0x1322: 0x7961, 0x1323: 0x7961, + 0x1324: 0x7979, 0x1325: 0x7979, 0x1326: 0x7991, 0x1327: 0x7991, 0x1328: 0x7991, 0x1329: 0x7991, + 0x132a: 0x79a9, 0x132b: 0x79a9, 0x132c: 0x79a9, 0x132d: 0x79a9, 0x132e: 0x79c1, 0x132f: 0x79c1, + 0x1330: 0x79d9, 0x1331: 0x79d9, 0x1332: 0x0818, 0x1333: 0x0818, 0x1334: 0x0818, 0x1335: 0x0818, + 0x1336: 0x0818, 0x1337: 0x0818, 0x1338: 0x0818, 0x1339: 0x0818, 0x133a: 0x0818, 0x133b: 0x0818, + 0x133c: 0x0818, 0x133d: 0x0818, 0x133e: 0x0818, 0x133f: 0x0818, + // Block 0x4d, offset 0x1340 + 0x1340: 0x0818, 0x1341: 0x0818, 0x1342: 0x0040, 0x1343: 0x0040, 0x1344: 0x0040, 0x1345: 0x0040, + 0x1346: 0x0040, 0x1347: 0x0040, 0x1348: 0x0040, 0x1349: 0x0040, 0x134a: 0x0040, 0x134b: 0x0040, + 0x134c: 0x0040, 0x134d: 0x0040, 0x134e: 0x0040, 0x134f: 0x0040, 0x1350: 0x0040, 0x1351: 0x0040, + 0x1352: 0x0040, 0x1353: 0x79f1, 0x1354: 0x79f1, 0x1355: 0x79f1, 0x1356: 0x79f1, 0x1357: 0x7a09, + 0x1358: 0x7a09, 0x1359: 0x7a21, 0x135a: 0x7a21, 0x135b: 0x7a39, 0x135c: 0x7a39, 0x135d: 0x0479, + 0x135e: 0x7a51, 0x135f: 0x7a51, 0x1360: 0x7a69, 0x1361: 0x7a69, 0x1362: 0x7a81, 0x1363: 0x7a81, + 0x1364: 0x7a99, 0x1365: 0x7a99, 0x1366: 0x7a99, 0x1367: 0x7a99, 0x1368: 0x7ab1, 0x1369: 0x7ab1, + 0x136a: 0x7ac9, 0x136b: 0x7ac9, 0x136c: 0x7af1, 0x136d: 0x7af1, 0x136e: 0x7b19, 0x136f: 0x7b19, + 0x1370: 0x7b41, 0x1371: 0x7b41, 0x1372: 0x7b69, 0x1373: 0x7b69, 0x1374: 0x7b91, 0x1375: 0x7b91, + 0x1376: 0x7bb9, 0x1377: 0x7bb9, 0x1378: 0x7bb9, 0x1379: 0x7be1, 0x137a: 0x7be1, 0x137b: 0x7be1, + 0x137c: 0x7c09, 0x137d: 0x7c09, 0x137e: 0x7c09, 0x137f: 0x7c09, + // Block 0x4e, offset 0x1380 + 0x1380: 0x85f9, 0x1381: 0x8621, 0x1382: 0x8649, 0x1383: 0x8671, 0x1384: 0x8699, 0x1385: 0x86c1, + 0x1386: 0x86e9, 0x1387: 0x8711, 0x1388: 0x8739, 0x1389: 0x8761, 0x138a: 0x8789, 0x138b: 0x87b1, + 0x138c: 0x87d9, 0x138d: 0x8801, 0x138e: 0x8829, 0x138f: 0x8851, 0x1390: 0x8879, 0x1391: 0x88a1, + 0x1392: 0x88c9, 0x1393: 0x88f1, 0x1394: 0x8919, 0x1395: 0x8941, 0x1396: 0x8969, 0x1397: 0x8991, + 0x1398: 0x89b9, 0x1399: 0x89e1, 0x139a: 0x8a09, 0x139b: 0x8a31, 0x139c: 0x8a59, 0x139d: 0x8a81, + 0x139e: 0x8aaa, 0x139f: 0x8ada, 0x13a0: 0x8b0a, 0x13a1: 0x8b3a, 0x13a2: 0x8b6a, 0x13a3: 0x8b9a, + 0x13a4: 0x8bc9, 0x13a5: 0x8bf1, 0x13a6: 0x7c71, 0x13a7: 0x8c19, 0x13a8: 0x7be1, 0x13a9: 0x7c99, + 0x13aa: 0x8c41, 0x13ab: 0x8c69, 0x13ac: 0x7d39, 0x13ad: 0x8c91, 0x13ae: 0x7d61, 0x13af: 0x7d89, + 0x13b0: 0x8cb9, 0x13b1: 0x8ce1, 0x13b2: 0x7e29, 0x13b3: 0x8d09, 0x13b4: 0x7e51, 0x13b5: 0x7e79, + 0x13b6: 0x8d31, 0x13b7: 0x8d59, 0x13b8: 0x7ec9, 0x13b9: 0x8d81, 0x13ba: 0x7ef1, 0x13bb: 0x7f19, + 0x13bc: 0x83a1, 0x13bd: 0x83c9, 0x13be: 0x8441, 0x13bf: 0x8469, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x8491, 0x13c1: 0x8531, 0x13c2: 0x8559, 0x13c3: 0x8581, 0x13c4: 0x85a9, 0x13c5: 0x8649, + 0x13c6: 0x8671, 0x13c7: 0x8699, 0x13c8: 0x8da9, 0x13c9: 0x8739, 0x13ca: 0x8dd1, 0x13cb: 0x8df9, + 0x13cc: 0x8829, 0x13cd: 0x8e21, 0x13ce: 0x8851, 0x13cf: 0x8879, 0x13d0: 0x8a81, 0x13d1: 0x8e49, + 0x13d2: 0x8e71, 0x13d3: 0x89b9, 0x13d4: 0x8e99, 0x13d5: 0x89e1, 0x13d6: 0x8a09, 0x13d7: 0x7c21, + 0x13d8: 0x7c49, 0x13d9: 0x8ec1, 0x13da: 0x7c71, 0x13db: 0x8ee9, 0x13dc: 0x7cc1, 0x13dd: 0x7ce9, + 0x13de: 0x7d11, 0x13df: 0x7d39, 0x13e0: 0x8f11, 0x13e1: 0x7db1, 0x13e2: 0x7dd9, 0x13e3: 0x7e01, + 0x13e4: 0x7e29, 0x13e5: 0x8f39, 0x13e6: 0x7ec9, 0x13e7: 0x7f41, 0x13e8: 0x7f69, 0x13e9: 0x7f91, + 0x13ea: 0x7fb9, 0x13eb: 0x7fe1, 0x13ec: 0x8031, 0x13ed: 0x8059, 0x13ee: 0x8081, 0x13ef: 0x80a9, + 0x13f0: 0x80d1, 0x13f1: 0x80f9, 0x13f2: 0x8f61, 0x13f3: 0x8121, 0x13f4: 0x8149, 0x13f5: 0x8171, + 0x13f6: 0x8199, 0x13f7: 0x81c1, 0x13f8: 0x81e9, 0x13f9: 0x8239, 0x13fa: 0x8261, 0x13fb: 0x8289, + 0x13fc: 0x82b1, 0x13fd: 0x82d9, 0x13fe: 0x8301, 0x13ff: 0x8329, + // Block 0x50, offset 0x1400 + 0x1400: 0x8351, 0x1401: 0x8379, 0x1402: 0x83f1, 0x1403: 0x8419, 0x1404: 0x84b9, 0x1405: 0x84e1, + 0x1406: 0x8509, 0x1407: 0x8531, 0x1408: 0x8559, 0x1409: 0x85d1, 0x140a: 0x85f9, 0x140b: 0x8621, + 0x140c: 0x8649, 0x140d: 0x8f89, 0x140e: 0x86c1, 0x140f: 0x86e9, 0x1410: 0x8711, 0x1411: 0x8739, + 0x1412: 0x87b1, 0x1413: 0x87d9, 0x1414: 0x8801, 0x1415: 0x8829, 0x1416: 0x8fb1, 0x1417: 0x88a1, + 0x1418: 0x88c9, 0x1419: 0x8fd9, 0x141a: 0x8941, 0x141b: 0x8969, 0x141c: 0x8991, 0x141d: 0x89b9, + 0x141e: 0x9001, 0x141f: 0x7c71, 0x1420: 0x8ee9, 0x1421: 0x7d39, 0x1422: 0x8f11, 0x1423: 0x7e29, + 0x1424: 0x8f39, 0x1425: 0x7ec9, 0x1426: 0x9029, 0x1427: 0x80d1, 0x1428: 0x9051, 0x1429: 0x9079, + 0x142a: 0x90a1, 0x142b: 0x8531, 0x142c: 0x8559, 0x142d: 0x8649, 0x142e: 0x8829, 0x142f: 0x8fb1, + 0x1430: 0x89b9, 0x1431: 0x9001, 0x1432: 0x90c9, 0x1433: 0x9101, 0x1434: 0x9139, 0x1435: 0x9171, + 0x1436: 0x9199, 0x1437: 0x91c1, 0x1438: 0x91e9, 0x1439: 0x9211, 0x143a: 0x9239, 0x143b: 0x9261, + 0x143c: 0x9289, 0x143d: 0x92b1, 0x143e: 0x92d9, 0x143f: 0x9301, + // Block 0x51, offset 0x1440 + 0x1440: 0x9329, 0x1441: 0x9351, 0x1442: 0x9379, 0x1443: 0x93a1, 0x1444: 0x93c9, 0x1445: 0x93f1, + 0x1446: 0x9419, 0x1447: 0x9441, 0x1448: 0x9469, 0x1449: 0x9491, 0x144a: 0x94b9, 0x144b: 0x94e1, + 0x144c: 0x9079, 0x144d: 0x9509, 0x144e: 0x9531, 0x144f: 0x9559, 0x1450: 0x9581, 0x1451: 0x9171, + 0x1452: 0x9199, 0x1453: 0x91c1, 0x1454: 0x91e9, 0x1455: 0x9211, 0x1456: 0x9239, 0x1457: 0x9261, + 0x1458: 0x9289, 0x1459: 0x92b1, 0x145a: 0x92d9, 0x145b: 0x9301, 0x145c: 0x9329, 0x145d: 0x9351, + 0x145e: 0x9379, 0x145f: 0x93a1, 0x1460: 0x93c9, 0x1461: 0x93f1, 0x1462: 0x9419, 0x1463: 0x9441, + 0x1464: 0x9469, 0x1465: 0x9491, 0x1466: 0x94b9, 0x1467: 0x94e1, 0x1468: 0x9079, 0x1469: 0x9509, + 0x146a: 0x9531, 0x146b: 0x9559, 0x146c: 0x9581, 0x146d: 0x9491, 0x146e: 0x94b9, 0x146f: 0x94e1, + 0x1470: 0x9079, 0x1471: 0x9051, 0x1472: 0x90a1, 0x1473: 0x8211, 0x1474: 0x8059, 0x1475: 0x8081, + 0x1476: 0x80a9, 0x1477: 0x9491, 0x1478: 0x94b9, 0x1479: 0x94e1, 0x147a: 0x8211, 0x147b: 0x8239, + 0x147c: 0x95a9, 0x147d: 0x95a9, 0x147e: 0x0018, 0x147f: 0x0018, + // Block 0x52, offset 0x1480 + 0x1480: 0x0040, 0x1481: 0x0040, 0x1482: 0x0040, 0x1483: 0x0040, 0x1484: 0x0040, 0x1485: 0x0040, + 0x1486: 0x0040, 0x1487: 0x0040, 0x1488: 0x0040, 0x1489: 0x0040, 0x148a: 0x0040, 0x148b: 0x0040, + 0x148c: 0x0040, 0x148d: 0x0040, 0x148e: 0x0040, 0x148f: 0x0040, 0x1490: 0x95d1, 0x1491: 0x9609, + 0x1492: 0x9609, 0x1493: 0x9641, 0x1494: 0x9679, 0x1495: 0x96b1, 0x1496: 0x96e9, 0x1497: 0x9721, + 0x1498: 0x9759, 0x1499: 0x9759, 0x149a: 0x9791, 0x149b: 0x97c9, 0x149c: 0x9801, 0x149d: 0x9839, + 0x149e: 0x9871, 0x149f: 0x98a9, 0x14a0: 0x98a9, 0x14a1: 0x98e1, 0x14a2: 0x9919, 0x14a3: 0x9919, + 0x14a4: 0x9951, 0x14a5: 0x9951, 0x14a6: 0x9989, 0x14a7: 0x99c1, 0x14a8: 0x99c1, 0x14a9: 0x99f9, + 0x14aa: 0x9a31, 0x14ab: 0x9a31, 0x14ac: 0x9a69, 0x14ad: 0x9a69, 0x14ae: 0x9aa1, 0x14af: 0x9ad9, + 0x14b0: 0x9ad9, 0x14b1: 0x9b11, 0x14b2: 0x9b11, 0x14b3: 0x9b49, 0x14b4: 0x9b81, 0x14b5: 0x9bb9, + 0x14b6: 0x9bf1, 0x14b7: 0x9bf1, 0x14b8: 0x9c29, 0x14b9: 0x9c61, 0x14ba: 0x9c99, 0x14bb: 0x9cd1, + 0x14bc: 0x9d09, 0x14bd: 0x9d09, 0x14be: 0x9d41, 0x14bf: 0x9d79, + // Block 0x53, offset 0x14c0 + 0x14c0: 0xa949, 0x14c1: 0xa981, 0x14c2: 0xa9b9, 0x14c3: 0xa8a1, 0x14c4: 0x9bb9, 0x14c5: 0x9989, + 0x14c6: 0xa9f1, 0x14c7: 0xaa29, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, + 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x0040, 0x14d1: 0x0040, + 0x14d2: 0x0040, 0x14d3: 0x0040, 0x14d4: 0x0040, 0x14d5: 0x0040, 0x14d6: 0x0040, 0x14d7: 0x0040, + 0x14d8: 0x0040, 0x14d9: 0x0040, 0x14da: 0x0040, 0x14db: 0x0040, 0x14dc: 0x0040, 0x14dd: 0x0040, + 0x14de: 0x0040, 0x14df: 0x0040, 0x14e0: 0x0040, 0x14e1: 0x0040, 0x14e2: 0x0040, 0x14e3: 0x0040, + 0x14e4: 0x0040, 0x14e5: 0x0040, 0x14e6: 0x0040, 0x14e7: 0x0040, 0x14e8: 0x0040, 0x14e9: 0x0040, + 0x14ea: 0x0040, 0x14eb: 0x0040, 0x14ec: 0x0040, 0x14ed: 0x0040, 0x14ee: 0x0040, 0x14ef: 0x0040, + 0x14f0: 0xaa61, 0x14f1: 0xaa99, 0x14f2: 0xaad1, 0x14f3: 0xab19, 0x14f4: 0xab61, 0x14f5: 0xaba9, + 0x14f6: 0xabf1, 0x14f7: 0xac39, 0x14f8: 0xac81, 0x14f9: 0xacc9, 0x14fa: 0xad02, 0x14fb: 0xae12, + 0x14fc: 0xae91, 0x14fd: 0x0018, 0x14fe: 0x0040, 0x14ff: 0x0040, + // Block 0x54, offset 0x1500 + 0x1500: 0x33c0, 0x1501: 0x33c0, 0x1502: 0x33c0, 0x1503: 0x33c0, 0x1504: 0x33c0, 0x1505: 0x33c0, + 0x1506: 0x33c0, 0x1507: 0x33c0, 0x1508: 0x33c0, 0x1509: 0x33c0, 0x150a: 0x33c0, 0x150b: 0x33c0, + 0x150c: 0x33c0, 0x150d: 0x33c0, 0x150e: 0x33c0, 0x150f: 0x33c0, 0x1510: 0xaeda, 0x1511: 0x7d55, + 0x1512: 0x0040, 0x1513: 0xaeea, 0x1514: 0x03c2, 0x1515: 0xaefa, 0x1516: 0xaf0a, 0x1517: 0x7d75, + 0x1518: 0x7d95, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, + 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x3308, 0x1521: 0x3308, 0x1522: 0x3308, 0x1523: 0x3308, + 0x1524: 0x3308, 0x1525: 0x3308, 0x1526: 0x3308, 0x1527: 0x3308, 0x1528: 0x3308, 0x1529: 0x3308, + 0x152a: 0x3308, 0x152b: 0x3308, 0x152c: 0x3308, 0x152d: 0x3308, 0x152e: 0x3308, 0x152f: 0x3308, + 0x1530: 0x0040, 0x1531: 0x7db5, 0x1532: 0x7dd5, 0x1533: 0xaf1a, 0x1534: 0xaf1a, 0x1535: 0x1fd2, + 0x1536: 0x1fe2, 0x1537: 0xaf2a, 0x1538: 0xaf3a, 0x1539: 0x7df5, 0x153a: 0x7e15, 0x153b: 0x7e35, + 0x153c: 0x7df5, 0x153d: 0x7e55, 0x153e: 0x7e75, 0x153f: 0x7e55, + // Block 0x55, offset 0x1540 + 0x1540: 0x7e95, 0x1541: 0x7eb5, 0x1542: 0x7ed5, 0x1543: 0x7eb5, 0x1544: 0x7ef5, 0x1545: 0x0018, + 0x1546: 0x0018, 0x1547: 0xaf4a, 0x1548: 0xaf5a, 0x1549: 0x7f16, 0x154a: 0x7f36, 0x154b: 0x7f56, + 0x154c: 0x7f76, 0x154d: 0xaf1a, 0x154e: 0xaf1a, 0x154f: 0xaf1a, 0x1550: 0xaeda, 0x1551: 0x7f95, + 0x1552: 0x0040, 0x1553: 0x0040, 0x1554: 0x03c2, 0x1555: 0xaeea, 0x1556: 0xaf0a, 0x1557: 0xaefa, + 0x1558: 0x7fb5, 0x1559: 0x1fd2, 0x155a: 0x1fe2, 0x155b: 0xaf2a, 0x155c: 0xaf3a, 0x155d: 0x7e95, + 0x155e: 0x7ef5, 0x155f: 0xaf6a, 0x1560: 0xaf7a, 0x1561: 0xaf8a, 0x1562: 0x1fb2, 0x1563: 0xaf99, + 0x1564: 0xafaa, 0x1565: 0xafba, 0x1566: 0x1fc2, 0x1567: 0x0040, 0x1568: 0xafca, 0x1569: 0xafda, + 0x156a: 0xafea, 0x156b: 0xaffa, 0x156c: 0x0040, 0x156d: 0x0040, 0x156e: 0x0040, 0x156f: 0x0040, + 0x1570: 0x7fd6, 0x1571: 0xb009, 0x1572: 0x7ff6, 0x1573: 0x0808, 0x1574: 0x8016, 0x1575: 0x0040, + 0x1576: 0x8036, 0x1577: 0xb031, 0x1578: 0x8056, 0x1579: 0xb059, 0x157a: 0x8076, 0x157b: 0xb081, + 0x157c: 0x8096, 0x157d: 0xb0a9, 0x157e: 0x80b6, 0x157f: 0xb0d1, + // Block 0x56, offset 0x1580 + 0x1580: 0xb0f9, 0x1581: 0xb111, 0x1582: 0xb111, 0x1583: 0xb129, 0x1584: 0xb129, 0x1585: 0xb141, + 0x1586: 0xb141, 0x1587: 0xb159, 0x1588: 0xb159, 0x1589: 0xb171, 0x158a: 0xb171, 0x158b: 0xb171, + 0x158c: 0xb171, 0x158d: 0xb189, 0x158e: 0xb189, 0x158f: 0xb1a1, 0x1590: 0xb1a1, 0x1591: 0xb1a1, + 0x1592: 0xb1a1, 0x1593: 0xb1b9, 0x1594: 0xb1b9, 0x1595: 0xb1d1, 0x1596: 0xb1d1, 0x1597: 0xb1d1, + 0x1598: 0xb1d1, 0x1599: 0xb1e9, 0x159a: 0xb1e9, 0x159b: 0xb1e9, 0x159c: 0xb1e9, 0x159d: 0xb201, + 0x159e: 0xb201, 0x159f: 0xb201, 0x15a0: 0xb201, 0x15a1: 0xb219, 0x15a2: 0xb219, 0x15a3: 0xb219, + 0x15a4: 0xb219, 0x15a5: 0xb231, 0x15a6: 0xb231, 0x15a7: 0xb231, 0x15a8: 0xb231, 0x15a9: 0xb249, + 0x15aa: 0xb249, 0x15ab: 0xb261, 0x15ac: 0xb261, 0x15ad: 0xb279, 0x15ae: 0xb279, 0x15af: 0xb291, + 0x15b0: 0xb291, 0x15b1: 0xb2a9, 0x15b2: 0xb2a9, 0x15b3: 0xb2a9, 0x15b4: 0xb2a9, 0x15b5: 0xb2c1, + 0x15b6: 0xb2c1, 0x15b7: 0xb2c1, 0x15b8: 0xb2c1, 0x15b9: 0xb2d9, 0x15ba: 0xb2d9, 0x15bb: 0xb2d9, + 0x15bc: 0xb2d9, 0x15bd: 0xb2f1, 0x15be: 0xb2f1, 0x15bf: 0xb2f1, + // Block 0x57, offset 0x15c0 + 0x15c0: 0xb2f1, 0x15c1: 0xb309, 0x15c2: 0xb309, 0x15c3: 0xb309, 0x15c4: 0xb309, 0x15c5: 0xb321, + 0x15c6: 0xb321, 0x15c7: 0xb321, 0x15c8: 0xb321, 0x15c9: 0xb339, 0x15ca: 0xb339, 0x15cb: 0xb339, + 0x15cc: 0xb339, 0x15cd: 0xb351, 0x15ce: 0xb351, 0x15cf: 0xb351, 0x15d0: 0xb351, 0x15d1: 0xb369, + 0x15d2: 0xb369, 0x15d3: 0xb369, 0x15d4: 0xb369, 0x15d5: 0xb381, 0x15d6: 0xb381, 0x15d7: 0xb381, + 0x15d8: 0xb381, 0x15d9: 0xb399, 0x15da: 0xb399, 0x15db: 0xb399, 0x15dc: 0xb399, 0x15dd: 0xb3b1, + 0x15de: 0xb3b1, 0x15df: 0xb3b1, 0x15e0: 0xb3b1, 0x15e1: 0xb3c9, 0x15e2: 0xb3c9, 0x15e3: 0xb3c9, + 0x15e4: 0xb3c9, 0x15e5: 0xb3e1, 0x15e6: 0xb3e1, 0x15e7: 0xb3e1, 0x15e8: 0xb3e1, 0x15e9: 0xb3f9, + 0x15ea: 0xb3f9, 0x15eb: 0xb3f9, 0x15ec: 0xb3f9, 0x15ed: 0xb411, 0x15ee: 0xb411, 0x15ef: 0x7ab1, + 0x15f0: 0x7ab1, 0x15f1: 0xb429, 0x15f2: 0xb429, 0x15f3: 0xb429, 0x15f4: 0xb429, 0x15f5: 0xb441, + 0x15f6: 0xb441, 0x15f7: 0xb469, 0x15f8: 0xb469, 0x15f9: 0xb491, 0x15fa: 0xb491, 0x15fb: 0xb4b9, + 0x15fc: 0xb4b9, 0x15fd: 0x0040, 0x15fe: 0x0040, 0x15ff: 0x03c0, + // Block 0x58, offset 0x1600 + 0x1600: 0x0040, 0x1601: 0xaefa, 0x1602: 0xb4e2, 0x1603: 0xaf6a, 0x1604: 0xafda, 0x1605: 0xafea, + 0x1606: 0xaf7a, 0x1607: 0xb4f2, 0x1608: 0x1fd2, 0x1609: 0x1fe2, 0x160a: 0xaf8a, 0x160b: 0x1fb2, + 0x160c: 0xaeda, 0x160d: 0xaf99, 0x160e: 0x29d1, 0x160f: 0xb502, 0x1610: 0x1f41, 0x1611: 0x00c9, + 0x1612: 0x0069, 0x1613: 0x0079, 0x1614: 0x1f51, 0x1615: 0x1f61, 0x1616: 0x1f71, 0x1617: 0x1f81, + 0x1618: 0x1f91, 0x1619: 0x1fa1, 0x161a: 0xaeea, 0x161b: 0x03c2, 0x161c: 0xafaa, 0x161d: 0x1fc2, + 0x161e: 0xafba, 0x161f: 0xaf0a, 0x1620: 0xaffa, 0x1621: 0x0039, 0x1622: 0x0ee9, 0x1623: 0x1159, + 0x1624: 0x0ef9, 0x1625: 0x0f09, 0x1626: 0x1199, 0x1627: 0x0f31, 0x1628: 0x0249, 0x1629: 0x0f41, + 0x162a: 0x0259, 0x162b: 0x0f51, 0x162c: 0x0359, 0x162d: 0x0f61, 0x162e: 0x0f71, 0x162f: 0x00d9, + 0x1630: 0x0f99, 0x1631: 0x2039, 0x1632: 0x0269, 0x1633: 0x01d9, 0x1634: 0x0fa9, 0x1635: 0x0fb9, + 0x1636: 0x1089, 0x1637: 0x0279, 0x1638: 0x0369, 0x1639: 0x0289, 0x163a: 0x13d1, 0x163b: 0xaf4a, + 0x163c: 0xafca, 0x163d: 0xaf5a, 0x163e: 0xb512, 0x163f: 0xaf1a, + // Block 0x59, offset 0x1640 + 0x1640: 0x1caa, 0x1641: 0x0039, 0x1642: 0x0ee9, 0x1643: 0x1159, 0x1644: 0x0ef9, 0x1645: 0x0f09, + 0x1646: 0x1199, 0x1647: 0x0f31, 0x1648: 0x0249, 0x1649: 0x0f41, 0x164a: 0x0259, 0x164b: 0x0f51, + 0x164c: 0x0359, 0x164d: 0x0f61, 0x164e: 0x0f71, 0x164f: 0x00d9, 0x1650: 0x0f99, 0x1651: 0x2039, + 0x1652: 0x0269, 0x1653: 0x01d9, 0x1654: 0x0fa9, 0x1655: 0x0fb9, 0x1656: 0x1089, 0x1657: 0x0279, + 0x1658: 0x0369, 0x1659: 0x0289, 0x165a: 0x13d1, 0x165b: 0xaf2a, 0x165c: 0xb522, 0x165d: 0xaf3a, + 0x165e: 0xb532, 0x165f: 0x80d5, 0x1660: 0x80f5, 0x1661: 0x29d1, 0x1662: 0x8115, 0x1663: 0x8115, + 0x1664: 0x8135, 0x1665: 0x8155, 0x1666: 0x8175, 0x1667: 0x8195, 0x1668: 0x81b5, 0x1669: 0x81d5, + 0x166a: 0x81f5, 0x166b: 0x8215, 0x166c: 0x8235, 0x166d: 0x8255, 0x166e: 0x8275, 0x166f: 0x8295, + 0x1670: 0x82b5, 0x1671: 0x82d5, 0x1672: 0x82f5, 0x1673: 0x8315, 0x1674: 0x8335, 0x1675: 0x8355, + 0x1676: 0x8375, 0x1677: 0x8395, 0x1678: 0x83b5, 0x1679: 0x83d5, 0x167a: 0x83f5, 0x167b: 0x8415, + 0x167c: 0x81b5, 0x167d: 0x8435, 0x167e: 0x8455, 0x167f: 0x8215, + // Block 0x5a, offset 0x1680 + 0x1680: 0x8475, 0x1681: 0x8495, 0x1682: 0x84b5, 0x1683: 0x84d5, 0x1684: 0x84f5, 0x1685: 0x8515, + 0x1686: 0x8535, 0x1687: 0x8555, 0x1688: 0x84d5, 0x1689: 0x8575, 0x168a: 0x84d5, 0x168b: 0x8595, + 0x168c: 0x8595, 0x168d: 0x85b5, 0x168e: 0x85b5, 0x168f: 0x85d5, 0x1690: 0x8515, 0x1691: 0x85f5, + 0x1692: 0x8615, 0x1693: 0x85f5, 0x1694: 0x8635, 0x1695: 0x8615, 0x1696: 0x8655, 0x1697: 0x8655, + 0x1698: 0x8675, 0x1699: 0x8675, 0x169a: 0x8695, 0x169b: 0x8695, 0x169c: 0x8615, 0x169d: 0x8115, + 0x169e: 0x86b5, 0x169f: 0x86d5, 0x16a0: 0x0040, 0x16a1: 0x86f5, 0x16a2: 0x8715, 0x16a3: 0x8735, + 0x16a4: 0x8755, 0x16a5: 0x8735, 0x16a6: 0x8775, 0x16a7: 0x8795, 0x16a8: 0x87b5, 0x16a9: 0x87b5, + 0x16aa: 0x87d5, 0x16ab: 0x87d5, 0x16ac: 0x87f5, 0x16ad: 0x87f5, 0x16ae: 0x87d5, 0x16af: 0x87d5, + 0x16b0: 0x8815, 0x16b1: 0x8835, 0x16b2: 0x8855, 0x16b3: 0x8875, 0x16b4: 0x8895, 0x16b5: 0x88b5, + 0x16b6: 0x88b5, 0x16b7: 0x88b5, 0x16b8: 0x88d5, 0x16b9: 0x88d5, 0x16ba: 0x88d5, 0x16bb: 0x88d5, + 0x16bc: 0x87b5, 0x16bd: 0x87b5, 0x16be: 0x87b5, 0x16bf: 0x0040, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x0040, 0x16c1: 0x0040, 0x16c2: 0x8715, 0x16c3: 0x86f5, 0x16c4: 0x88f5, 0x16c5: 0x86f5, + 0x16c6: 0x8715, 0x16c7: 0x86f5, 0x16c8: 0x0040, 0x16c9: 0x0040, 0x16ca: 0x8915, 0x16cb: 0x8715, + 0x16cc: 0x8935, 0x16cd: 0x88f5, 0x16ce: 0x8935, 0x16cf: 0x8715, 0x16d0: 0x0040, 0x16d1: 0x0040, + 0x16d2: 0x8955, 0x16d3: 0x8975, 0x16d4: 0x8875, 0x16d5: 0x8935, 0x16d6: 0x88f5, 0x16d7: 0x8935, + 0x16d8: 0x0040, 0x16d9: 0x0040, 0x16da: 0x8995, 0x16db: 0x89b5, 0x16dc: 0x8995, 0x16dd: 0x0040, + 0x16de: 0x0040, 0x16df: 0x0040, 0x16e0: 0xb541, 0x16e1: 0xb559, 0x16e2: 0xb571, 0x16e3: 0x89d6, + 0x16e4: 0xb589, 0x16e5: 0xb5a1, 0x16e6: 0x89f5, 0x16e7: 0x0040, 0x16e8: 0x8a15, 0x16e9: 0x8a35, + 0x16ea: 0x8a55, 0x16eb: 0x8a35, 0x16ec: 0x8a75, 0x16ed: 0x8a95, 0x16ee: 0x8ab5, 0x16ef: 0x0040, + 0x16f0: 0x0040, 0x16f1: 0x0040, 0x16f2: 0x0040, 0x16f3: 0x0040, 0x16f4: 0x0040, 0x16f5: 0x0040, + 0x16f6: 0x0040, 0x16f7: 0x0040, 0x16f8: 0x0040, 0x16f9: 0x0340, 0x16fa: 0x0340, 0x16fb: 0x0340, + 0x16fc: 0x0040, 0x16fd: 0x0040, 0x16fe: 0x0040, 0x16ff: 0x0040, + // Block 0x5c, offset 0x1700 + 0x1700: 0x0a08, 0x1701: 0x0a08, 0x1702: 0x0a08, 0x1703: 0x0a08, 0x1704: 0x0a08, 0x1705: 0x0c08, + 0x1706: 0x0808, 0x1707: 0x0c08, 0x1708: 0x0818, 0x1709: 0x0c08, 0x170a: 0x0c08, 0x170b: 0x0808, + 0x170c: 0x0808, 0x170d: 0x0908, 0x170e: 0x0c08, 0x170f: 0x0c08, 0x1710: 0x0c08, 0x1711: 0x0c08, + 0x1712: 0x0c08, 0x1713: 0x0a08, 0x1714: 0x0a08, 0x1715: 0x0a08, 0x1716: 0x0a08, 0x1717: 0x0908, + 0x1718: 0x0a08, 0x1719: 0x0a08, 0x171a: 0x0a08, 0x171b: 0x0a08, 0x171c: 0x0a08, 0x171d: 0x0c08, + 0x171e: 0x0a08, 0x171f: 0x0a08, 0x1720: 0x0a08, 0x1721: 0x0c08, 0x1722: 0x0808, 0x1723: 0x0808, + 0x1724: 0x0c08, 0x1725: 0x3308, 0x1726: 0x3308, 0x1727: 0x0040, 0x1728: 0x0040, 0x1729: 0x0040, + 0x172a: 0x0040, 0x172b: 0x0a18, 0x172c: 0x0a18, 0x172d: 0x0a18, 0x172e: 0x0a18, 0x172f: 0x0c18, + 0x1730: 0x0818, 0x1731: 0x0818, 0x1732: 0x0818, 0x1733: 0x0818, 0x1734: 0x0818, 0x1735: 0x0818, + 0x1736: 0x0818, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0040, 0x173a: 0x0040, 0x173b: 0x0040, + 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, + // Block 0x5d, offset 0x1740 + 0x1740: 0x0a08, 0x1741: 0x0c08, 0x1742: 0x0a08, 0x1743: 0x0c08, 0x1744: 0x0c08, 0x1745: 0x0c08, + 0x1746: 0x0a08, 0x1747: 0x0a08, 0x1748: 0x0a08, 0x1749: 0x0c08, 0x174a: 0x0a08, 0x174b: 0x0a08, + 0x174c: 0x0c08, 0x174d: 0x0a08, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0a08, 0x1751: 0x0c08, + 0x1752: 0x0040, 0x1753: 0x0040, 0x1754: 0x0040, 0x1755: 0x0040, 0x1756: 0x0040, 0x1757: 0x0040, + 0x1758: 0x0040, 0x1759: 0x0818, 0x175a: 0x0818, 0x175b: 0x0818, 0x175c: 0x0818, 0x175d: 0x0040, + 0x175e: 0x0040, 0x175f: 0x0040, 0x1760: 0x0040, 0x1761: 0x0040, 0x1762: 0x0040, 0x1763: 0x0040, + 0x1764: 0x0040, 0x1765: 0x0040, 0x1766: 0x0040, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0c18, + 0x176a: 0x0c18, 0x176b: 0x0c18, 0x176c: 0x0c18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0818, + 0x1770: 0x0040, 0x1771: 0x0040, 0x1772: 0x0040, 0x1773: 0x0040, 0x1774: 0x0040, 0x1775: 0x0040, + 0x1776: 0x0040, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, + 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, + // Block 0x5e, offset 0x1780 + 0x1780: 0x3308, 0x1781: 0x3308, 0x1782: 0x3008, 0x1783: 0x3008, 0x1784: 0x0040, 0x1785: 0x0008, + 0x1786: 0x0008, 0x1787: 0x0008, 0x1788: 0x0008, 0x1789: 0x0008, 0x178a: 0x0008, 0x178b: 0x0008, + 0x178c: 0x0008, 0x178d: 0x0040, 0x178e: 0x0040, 0x178f: 0x0008, 0x1790: 0x0008, 0x1791: 0x0040, + 0x1792: 0x0040, 0x1793: 0x0008, 0x1794: 0x0008, 0x1795: 0x0008, 0x1796: 0x0008, 0x1797: 0x0008, + 0x1798: 0x0008, 0x1799: 0x0008, 0x179a: 0x0008, 0x179b: 0x0008, 0x179c: 0x0008, 0x179d: 0x0008, + 0x179e: 0x0008, 0x179f: 0x0008, 0x17a0: 0x0008, 0x17a1: 0x0008, 0x17a2: 0x0008, 0x17a3: 0x0008, + 0x17a4: 0x0008, 0x17a5: 0x0008, 0x17a6: 0x0008, 0x17a7: 0x0008, 0x17a8: 0x0008, 0x17a9: 0x0040, + 0x17aa: 0x0008, 0x17ab: 0x0008, 0x17ac: 0x0008, 0x17ad: 0x0008, 0x17ae: 0x0008, 0x17af: 0x0008, + 0x17b0: 0x0008, 0x17b1: 0x0040, 0x17b2: 0x0008, 0x17b3: 0x0008, 0x17b4: 0x0040, 0x17b5: 0x0008, + 0x17b6: 0x0008, 0x17b7: 0x0008, 0x17b8: 0x0008, 0x17b9: 0x0008, 0x17ba: 0x0040, 0x17bb: 0x0040, + 0x17bc: 0x3308, 0x17bd: 0x0008, 0x17be: 0x3008, 0x17bf: 0x3008, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x3308, 0x17c1: 0x3008, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x3008, 0x17c5: 0x0040, + 0x17c6: 0x0040, 0x17c7: 0x3008, 0x17c8: 0x3008, 0x17c9: 0x0040, 0x17ca: 0x0040, 0x17cb: 0x3008, + 0x17cc: 0x3008, 0x17cd: 0x3808, 0x17ce: 0x0040, 0x17cf: 0x0040, 0x17d0: 0x0008, 0x17d1: 0x0040, + 0x17d2: 0x0040, 0x17d3: 0x0040, 0x17d4: 0x0040, 0x17d5: 0x0040, 0x17d6: 0x0040, 0x17d7: 0x3008, + 0x17d8: 0x0040, 0x17d9: 0x0040, 0x17da: 0x0040, 0x17db: 0x0040, 0x17dc: 0x0040, 0x17dd: 0x0008, + 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x3008, 0x17e3: 0x3008, + 0x17e4: 0x0040, 0x17e5: 0x0040, 0x17e6: 0x3308, 0x17e7: 0x3308, 0x17e8: 0x3308, 0x17e9: 0x3308, + 0x17ea: 0x3308, 0x17eb: 0x3308, 0x17ec: 0x3308, 0x17ed: 0x0040, 0x17ee: 0x0040, 0x17ef: 0x0040, + 0x17f0: 0x3308, 0x17f1: 0x3308, 0x17f2: 0x3308, 0x17f3: 0x3308, 0x17f4: 0x3308, 0x17f5: 0x0040, + 0x17f6: 0x0040, 0x17f7: 0x0040, 0x17f8: 0x0040, 0x17f9: 0x0040, 0x17fa: 0x0040, 0x17fb: 0x0040, + 0x17fc: 0x0040, 0x17fd: 0x0040, 0x17fe: 0x0040, 0x17ff: 0x0040, + // Block 0x60, offset 0x1800 + 0x1800: 0x0039, 0x1801: 0x0ee9, 0x1802: 0x1159, 0x1803: 0x0ef9, 0x1804: 0x0f09, 0x1805: 0x1199, + 0x1806: 0x0f31, 0x1807: 0x0249, 0x1808: 0x0f41, 0x1809: 0x0259, 0x180a: 0x0f51, 0x180b: 0x0359, + 0x180c: 0x0f61, 0x180d: 0x0f71, 0x180e: 0x00d9, 0x180f: 0x0f99, 0x1810: 0x2039, 0x1811: 0x0269, + 0x1812: 0x01d9, 0x1813: 0x0fa9, 0x1814: 0x0fb9, 0x1815: 0x1089, 0x1816: 0x0279, 0x1817: 0x0369, + 0x1818: 0x0289, 0x1819: 0x13d1, 0x181a: 0x0039, 0x181b: 0x0ee9, 0x181c: 0x1159, 0x181d: 0x0ef9, + 0x181e: 0x0f09, 0x181f: 0x1199, 0x1820: 0x0f31, 0x1821: 0x0249, 0x1822: 0x0f41, 0x1823: 0x0259, + 0x1824: 0x0f51, 0x1825: 0x0359, 0x1826: 0x0f61, 0x1827: 0x0f71, 0x1828: 0x00d9, 0x1829: 0x0f99, + 0x182a: 0x2039, 0x182b: 0x0269, 0x182c: 0x01d9, 0x182d: 0x0fa9, 0x182e: 0x0fb9, 0x182f: 0x1089, + 0x1830: 0x0279, 0x1831: 0x0369, 0x1832: 0x0289, 0x1833: 0x13d1, 0x1834: 0x0039, 0x1835: 0x0ee9, + 0x1836: 0x1159, 0x1837: 0x0ef9, 0x1838: 0x0f09, 0x1839: 0x1199, 0x183a: 0x0f31, 0x183b: 0x0249, + 0x183c: 0x0f41, 0x183d: 0x0259, 0x183e: 0x0f51, 0x183f: 0x0359, + // Block 0x61, offset 0x1840 + 0x1840: 0x0f61, 0x1841: 0x0f71, 0x1842: 0x00d9, 0x1843: 0x0f99, 0x1844: 0x2039, 0x1845: 0x0269, + 0x1846: 0x01d9, 0x1847: 0x0fa9, 0x1848: 0x0fb9, 0x1849: 0x1089, 0x184a: 0x0279, 0x184b: 0x0369, + 0x184c: 0x0289, 0x184d: 0x13d1, 0x184e: 0x0039, 0x184f: 0x0ee9, 0x1850: 0x1159, 0x1851: 0x0ef9, + 0x1852: 0x0f09, 0x1853: 0x1199, 0x1854: 0x0f31, 0x1855: 0x0040, 0x1856: 0x0f41, 0x1857: 0x0259, + 0x1858: 0x0f51, 0x1859: 0x0359, 0x185a: 0x0f61, 0x185b: 0x0f71, 0x185c: 0x00d9, 0x185d: 0x0f99, + 0x185e: 0x2039, 0x185f: 0x0269, 0x1860: 0x01d9, 0x1861: 0x0fa9, 0x1862: 0x0fb9, 0x1863: 0x1089, + 0x1864: 0x0279, 0x1865: 0x0369, 0x1866: 0x0289, 0x1867: 0x13d1, 0x1868: 0x0039, 0x1869: 0x0ee9, + 0x186a: 0x1159, 0x186b: 0x0ef9, 0x186c: 0x0f09, 0x186d: 0x1199, 0x186e: 0x0f31, 0x186f: 0x0249, + 0x1870: 0x0f41, 0x1871: 0x0259, 0x1872: 0x0f51, 0x1873: 0x0359, 0x1874: 0x0f61, 0x1875: 0x0f71, + 0x1876: 0x00d9, 0x1877: 0x0f99, 0x1878: 0x2039, 0x1879: 0x0269, 0x187a: 0x01d9, 0x187b: 0x0fa9, + 0x187c: 0x0fb9, 0x187d: 0x1089, 0x187e: 0x0279, 0x187f: 0x0369, + // Block 0x62, offset 0x1880 + 0x1880: 0x0289, 0x1881: 0x13d1, 0x1882: 0x0039, 0x1883: 0x0ee9, 0x1884: 0x1159, 0x1885: 0x0ef9, + 0x1886: 0x0f09, 0x1887: 0x1199, 0x1888: 0x0f31, 0x1889: 0x0249, 0x188a: 0x0f41, 0x188b: 0x0259, + 0x188c: 0x0f51, 0x188d: 0x0359, 0x188e: 0x0f61, 0x188f: 0x0f71, 0x1890: 0x00d9, 0x1891: 0x0f99, + 0x1892: 0x2039, 0x1893: 0x0269, 0x1894: 0x01d9, 0x1895: 0x0fa9, 0x1896: 0x0fb9, 0x1897: 0x1089, + 0x1898: 0x0279, 0x1899: 0x0369, 0x189a: 0x0289, 0x189b: 0x13d1, 0x189c: 0x0039, 0x189d: 0x0040, + 0x189e: 0x1159, 0x189f: 0x0ef9, 0x18a0: 0x0040, 0x18a1: 0x0040, 0x18a2: 0x0f31, 0x18a3: 0x0040, + 0x18a4: 0x0040, 0x18a5: 0x0259, 0x18a6: 0x0f51, 0x18a7: 0x0040, 0x18a8: 0x0040, 0x18a9: 0x0f71, + 0x18aa: 0x00d9, 0x18ab: 0x0f99, 0x18ac: 0x2039, 0x18ad: 0x0040, 0x18ae: 0x01d9, 0x18af: 0x0fa9, + 0x18b0: 0x0fb9, 0x18b1: 0x1089, 0x18b2: 0x0279, 0x18b3: 0x0369, 0x18b4: 0x0289, 0x18b5: 0x13d1, + 0x18b6: 0x0039, 0x18b7: 0x0ee9, 0x18b8: 0x1159, 0x18b9: 0x0ef9, 0x18ba: 0x0040, 0x18bb: 0x1199, + 0x18bc: 0x0040, 0x18bd: 0x0249, 0x18be: 0x0f41, 0x18bf: 0x0259, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x0f51, 0x18c1: 0x0359, 0x18c2: 0x0f61, 0x18c3: 0x0f71, 0x18c4: 0x0040, 0x18c5: 0x0f99, + 0x18c6: 0x2039, 0x18c7: 0x0269, 0x18c8: 0x01d9, 0x18c9: 0x0fa9, 0x18ca: 0x0fb9, 0x18cb: 0x1089, + 0x18cc: 0x0279, 0x18cd: 0x0369, 0x18ce: 0x0289, 0x18cf: 0x13d1, 0x18d0: 0x0039, 0x18d1: 0x0ee9, + 0x18d2: 0x1159, 0x18d3: 0x0ef9, 0x18d4: 0x0f09, 0x18d5: 0x1199, 0x18d6: 0x0f31, 0x18d7: 0x0249, + 0x18d8: 0x0f41, 0x18d9: 0x0259, 0x18da: 0x0f51, 0x18db: 0x0359, 0x18dc: 0x0f61, 0x18dd: 0x0f71, + 0x18de: 0x00d9, 0x18df: 0x0f99, 0x18e0: 0x2039, 0x18e1: 0x0269, 0x18e2: 0x01d9, 0x18e3: 0x0fa9, + 0x18e4: 0x0fb9, 0x18e5: 0x1089, 0x18e6: 0x0279, 0x18e7: 0x0369, 0x18e8: 0x0289, 0x18e9: 0x13d1, + 0x18ea: 0x0039, 0x18eb: 0x0ee9, 0x18ec: 0x1159, 0x18ed: 0x0ef9, 0x18ee: 0x0f09, 0x18ef: 0x1199, + 0x18f0: 0x0f31, 0x18f1: 0x0249, 0x18f2: 0x0f41, 0x18f3: 0x0259, 0x18f4: 0x0f51, 0x18f5: 0x0359, + 0x18f6: 0x0f61, 0x18f7: 0x0f71, 0x18f8: 0x00d9, 0x18f9: 0x0f99, 0x18fa: 0x2039, 0x18fb: 0x0269, + 0x18fc: 0x01d9, 0x18fd: 0x0fa9, 0x18fe: 0x0fb9, 0x18ff: 0x1089, + // Block 0x64, offset 0x1900 + 0x1900: 0x0279, 0x1901: 0x0369, 0x1902: 0x0289, 0x1903: 0x13d1, 0x1904: 0x0039, 0x1905: 0x0ee9, + 0x1906: 0x0040, 0x1907: 0x0ef9, 0x1908: 0x0f09, 0x1909: 0x1199, 0x190a: 0x0f31, 0x190b: 0x0040, + 0x190c: 0x0040, 0x190d: 0x0259, 0x190e: 0x0f51, 0x190f: 0x0359, 0x1910: 0x0f61, 0x1911: 0x0f71, + 0x1912: 0x00d9, 0x1913: 0x0f99, 0x1914: 0x2039, 0x1915: 0x0040, 0x1916: 0x01d9, 0x1917: 0x0fa9, + 0x1918: 0x0fb9, 0x1919: 0x1089, 0x191a: 0x0279, 0x191b: 0x0369, 0x191c: 0x0289, 0x191d: 0x0040, + 0x191e: 0x0039, 0x191f: 0x0ee9, 0x1920: 0x1159, 0x1921: 0x0ef9, 0x1922: 0x0f09, 0x1923: 0x1199, + 0x1924: 0x0f31, 0x1925: 0x0249, 0x1926: 0x0f41, 0x1927: 0x0259, 0x1928: 0x0f51, 0x1929: 0x0359, + 0x192a: 0x0f61, 0x192b: 0x0f71, 0x192c: 0x00d9, 0x192d: 0x0f99, 0x192e: 0x2039, 0x192f: 0x0269, + 0x1930: 0x01d9, 0x1931: 0x0fa9, 0x1932: 0x0fb9, 0x1933: 0x1089, 0x1934: 0x0279, 0x1935: 0x0369, + 0x1936: 0x0289, 0x1937: 0x13d1, 0x1938: 0x0039, 0x1939: 0x0ee9, 0x193a: 0x0040, 0x193b: 0x0ef9, + 0x193c: 0x0f09, 0x193d: 0x1199, 0x193e: 0x0f31, 0x193f: 0x0040, + // Block 0x65, offset 0x1940 + 0x1940: 0x0f41, 0x1941: 0x0259, 0x1942: 0x0f51, 0x1943: 0x0359, 0x1944: 0x0f61, 0x1945: 0x0040, + 0x1946: 0x00d9, 0x1947: 0x0040, 0x1948: 0x0040, 0x1949: 0x0040, 0x194a: 0x01d9, 0x194b: 0x0fa9, + 0x194c: 0x0fb9, 0x194d: 0x1089, 0x194e: 0x0279, 0x194f: 0x0369, 0x1950: 0x0289, 0x1951: 0x0040, + 0x1952: 0x0039, 0x1953: 0x0ee9, 0x1954: 0x1159, 0x1955: 0x0ef9, 0x1956: 0x0f09, 0x1957: 0x1199, + 0x1958: 0x0f31, 0x1959: 0x0249, 0x195a: 0x0f41, 0x195b: 0x0259, 0x195c: 0x0f51, 0x195d: 0x0359, + 0x195e: 0x0f61, 0x195f: 0x0f71, 0x1960: 0x00d9, 0x1961: 0x0f99, 0x1962: 0x2039, 0x1963: 0x0269, + 0x1964: 0x01d9, 0x1965: 0x0fa9, 0x1966: 0x0fb9, 0x1967: 0x1089, 0x1968: 0x0279, 0x1969: 0x0369, + 0x196a: 0x0289, 0x196b: 0x13d1, 0x196c: 0x0039, 0x196d: 0x0ee9, 0x196e: 0x1159, 0x196f: 0x0ef9, + 0x1970: 0x0f09, 0x1971: 0x1199, 0x1972: 0x0f31, 0x1973: 0x0249, 0x1974: 0x0f41, 0x1975: 0x0259, + 0x1976: 0x0f51, 0x1977: 0x0359, 0x1978: 0x0f61, 0x1979: 0x0f71, 0x197a: 0x00d9, 0x197b: 0x0f99, + 0x197c: 0x2039, 0x197d: 0x0269, 0x197e: 0x01d9, 0x197f: 0x0fa9, + // Block 0x66, offset 0x1980 + 0x1980: 0x0fb9, 0x1981: 0x1089, 0x1982: 0x0279, 0x1983: 0x0369, 0x1984: 0x0289, 0x1985: 0x13d1, + 0x1986: 0x0039, 0x1987: 0x0ee9, 0x1988: 0x1159, 0x1989: 0x0ef9, 0x198a: 0x0f09, 0x198b: 0x1199, + 0x198c: 0x0f31, 0x198d: 0x0249, 0x198e: 0x0f41, 0x198f: 0x0259, 0x1990: 0x0f51, 0x1991: 0x0359, + 0x1992: 0x0f61, 0x1993: 0x0f71, 0x1994: 0x00d9, 0x1995: 0x0f99, 0x1996: 0x2039, 0x1997: 0x0269, + 0x1998: 0x01d9, 0x1999: 0x0fa9, 0x199a: 0x0fb9, 0x199b: 0x1089, 0x199c: 0x0279, 0x199d: 0x0369, + 0x199e: 0x0289, 0x199f: 0x13d1, 0x19a0: 0x0039, 0x19a1: 0x0ee9, 0x19a2: 0x1159, 0x19a3: 0x0ef9, + 0x19a4: 0x0f09, 0x19a5: 0x1199, 0x19a6: 0x0f31, 0x19a7: 0x0249, 0x19a8: 0x0f41, 0x19a9: 0x0259, + 0x19aa: 0x0f51, 0x19ab: 0x0359, 0x19ac: 0x0f61, 0x19ad: 0x0f71, 0x19ae: 0x00d9, 0x19af: 0x0f99, + 0x19b0: 0x2039, 0x19b1: 0x0269, 0x19b2: 0x01d9, 0x19b3: 0x0fa9, 0x19b4: 0x0fb9, 0x19b5: 0x1089, + 0x19b6: 0x0279, 0x19b7: 0x0369, 0x19b8: 0x0289, 0x19b9: 0x13d1, 0x19ba: 0x0039, 0x19bb: 0x0ee9, + 0x19bc: 0x1159, 0x19bd: 0x0ef9, 0x19be: 0x0f09, 0x19bf: 0x1199, + // Block 0x67, offset 0x19c0 + 0x19c0: 0x0f31, 0x19c1: 0x0249, 0x19c2: 0x0f41, 0x19c3: 0x0259, 0x19c4: 0x0f51, 0x19c5: 0x0359, + 0x19c6: 0x0f61, 0x19c7: 0x0f71, 0x19c8: 0x00d9, 0x19c9: 0x0f99, 0x19ca: 0x2039, 0x19cb: 0x0269, + 0x19cc: 0x01d9, 0x19cd: 0x0fa9, 0x19ce: 0x0fb9, 0x19cf: 0x1089, 0x19d0: 0x0279, 0x19d1: 0x0369, + 0x19d2: 0x0289, 0x19d3: 0x13d1, 0x19d4: 0x0039, 0x19d5: 0x0ee9, 0x19d6: 0x1159, 0x19d7: 0x0ef9, + 0x19d8: 0x0f09, 0x19d9: 0x1199, 0x19da: 0x0f31, 0x19db: 0x0249, 0x19dc: 0x0f41, 0x19dd: 0x0259, + 0x19de: 0x0f51, 0x19df: 0x0359, 0x19e0: 0x0f61, 0x19e1: 0x0f71, 0x19e2: 0x00d9, 0x19e3: 0x0f99, + 0x19e4: 0x2039, 0x19e5: 0x0269, 0x19e6: 0x01d9, 0x19e7: 0x0fa9, 0x19e8: 0x0fb9, 0x19e9: 0x1089, + 0x19ea: 0x0279, 0x19eb: 0x0369, 0x19ec: 0x0289, 0x19ed: 0x13d1, 0x19ee: 0x0039, 0x19ef: 0x0ee9, + 0x19f0: 0x1159, 0x19f1: 0x0ef9, 0x19f2: 0x0f09, 0x19f3: 0x1199, 0x19f4: 0x0f31, 0x19f5: 0x0249, + 0x19f6: 0x0f41, 0x19f7: 0x0259, 0x19f8: 0x0f51, 0x19f9: 0x0359, 0x19fa: 0x0f61, 0x19fb: 0x0f71, + 0x19fc: 0x00d9, 0x19fd: 0x0f99, 0x19fe: 0x2039, 0x19ff: 0x0269, + // Block 0x68, offset 0x1a00 + 0x1a00: 0x01d9, 0x1a01: 0x0fa9, 0x1a02: 0x0fb9, 0x1a03: 0x1089, 0x1a04: 0x0279, 0x1a05: 0x0369, + 0x1a06: 0x0289, 0x1a07: 0x13d1, 0x1a08: 0x0039, 0x1a09: 0x0ee9, 0x1a0a: 0x1159, 0x1a0b: 0x0ef9, + 0x1a0c: 0x0f09, 0x1a0d: 0x1199, 0x1a0e: 0x0f31, 0x1a0f: 0x0249, 0x1a10: 0x0f41, 0x1a11: 0x0259, + 0x1a12: 0x0f51, 0x1a13: 0x0359, 0x1a14: 0x0f61, 0x1a15: 0x0f71, 0x1a16: 0x00d9, 0x1a17: 0x0f99, + 0x1a18: 0x2039, 0x1a19: 0x0269, 0x1a1a: 0x01d9, 0x1a1b: 0x0fa9, 0x1a1c: 0x0fb9, 0x1a1d: 0x1089, + 0x1a1e: 0x0279, 0x1a1f: 0x0369, 0x1a20: 0x0289, 0x1a21: 0x13d1, 0x1a22: 0x0039, 0x1a23: 0x0ee9, + 0x1a24: 0x1159, 0x1a25: 0x0ef9, 0x1a26: 0x0f09, 0x1a27: 0x1199, 0x1a28: 0x0f31, 0x1a29: 0x0249, + 0x1a2a: 0x0f41, 0x1a2b: 0x0259, 0x1a2c: 0x0f51, 0x1a2d: 0x0359, 0x1a2e: 0x0f61, 0x1a2f: 0x0f71, + 0x1a30: 0x00d9, 0x1a31: 0x0f99, 0x1a32: 0x2039, 0x1a33: 0x0269, 0x1a34: 0x01d9, 0x1a35: 0x0fa9, + 0x1a36: 0x0fb9, 0x1a37: 0x1089, 0x1a38: 0x0279, 0x1a39: 0x0369, 0x1a3a: 0x0289, 0x1a3b: 0x13d1, + 0x1a3c: 0x0039, 0x1a3d: 0x0ee9, 0x1a3e: 0x1159, 0x1a3f: 0x0ef9, + // Block 0x69, offset 0x1a40 + 0x1a40: 0x0f09, 0x1a41: 0x1199, 0x1a42: 0x0f31, 0x1a43: 0x0249, 0x1a44: 0x0f41, 0x1a45: 0x0259, + 0x1a46: 0x0f51, 0x1a47: 0x0359, 0x1a48: 0x0f61, 0x1a49: 0x0f71, 0x1a4a: 0x00d9, 0x1a4b: 0x0f99, + 0x1a4c: 0x2039, 0x1a4d: 0x0269, 0x1a4e: 0x01d9, 0x1a4f: 0x0fa9, 0x1a50: 0x0fb9, 0x1a51: 0x1089, + 0x1a52: 0x0279, 0x1a53: 0x0369, 0x1a54: 0x0289, 0x1a55: 0x13d1, 0x1a56: 0x0039, 0x1a57: 0x0ee9, + 0x1a58: 0x1159, 0x1a59: 0x0ef9, 0x1a5a: 0x0f09, 0x1a5b: 0x1199, 0x1a5c: 0x0f31, 0x1a5d: 0x0249, + 0x1a5e: 0x0f41, 0x1a5f: 0x0259, 0x1a60: 0x0f51, 0x1a61: 0x0359, 0x1a62: 0x0f61, 0x1a63: 0x0f71, + 0x1a64: 0x00d9, 0x1a65: 0x0f99, 0x1a66: 0x2039, 0x1a67: 0x0269, 0x1a68: 0x01d9, 0x1a69: 0x0fa9, + 0x1a6a: 0x0fb9, 0x1a6b: 0x1089, 0x1a6c: 0x0279, 0x1a6d: 0x0369, 0x1a6e: 0x0289, 0x1a6f: 0x13d1, + 0x1a70: 0x0039, 0x1a71: 0x0ee9, 0x1a72: 0x1159, 0x1a73: 0x0ef9, 0x1a74: 0x0f09, 0x1a75: 0x1199, + 0x1a76: 0x0f31, 0x1a77: 0x0249, 0x1a78: 0x0f41, 0x1a79: 0x0259, 0x1a7a: 0x0f51, 0x1a7b: 0x0359, + 0x1a7c: 0x0f61, 0x1a7d: 0x0f71, 0x1a7e: 0x00d9, 0x1a7f: 0x0f99, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x2039, 0x1a81: 0x0269, 0x1a82: 0x01d9, 0x1a83: 0x0fa9, 0x1a84: 0x0fb9, 0x1a85: 0x1089, + 0x1a86: 0x0279, 0x1a87: 0x0369, 0x1a88: 0x0289, 0x1a89: 0x13d1, 0x1a8a: 0x0039, 0x1a8b: 0x0ee9, + 0x1a8c: 0x1159, 0x1a8d: 0x0ef9, 0x1a8e: 0x0f09, 0x1a8f: 0x1199, 0x1a90: 0x0f31, 0x1a91: 0x0249, + 0x1a92: 0x0f41, 0x1a93: 0x0259, 0x1a94: 0x0f51, 0x1a95: 0x0359, 0x1a96: 0x0f61, 0x1a97: 0x0f71, + 0x1a98: 0x00d9, 0x1a99: 0x0f99, 0x1a9a: 0x2039, 0x1a9b: 0x0269, 0x1a9c: 0x01d9, 0x1a9d: 0x0fa9, + 0x1a9e: 0x0fb9, 0x1a9f: 0x1089, 0x1aa0: 0x0279, 0x1aa1: 0x0369, 0x1aa2: 0x0289, 0x1aa3: 0x13d1, + 0x1aa4: 0xba81, 0x1aa5: 0xba99, 0x1aa6: 0x0040, 0x1aa7: 0x0040, 0x1aa8: 0xbab1, 0x1aa9: 0x1099, + 0x1aaa: 0x10b1, 0x1aab: 0x10c9, 0x1aac: 0xbac9, 0x1aad: 0xbae1, 0x1aae: 0xbaf9, 0x1aaf: 0x1429, + 0x1ab0: 0x1a31, 0x1ab1: 0xbb11, 0x1ab2: 0xbb29, 0x1ab3: 0xbb41, 0x1ab4: 0xbb59, 0x1ab5: 0xbb71, + 0x1ab6: 0xbb89, 0x1ab7: 0x2109, 0x1ab8: 0x1111, 0x1ab9: 0x1429, 0x1aba: 0xbba1, 0x1abb: 0xbbb9, + 0x1abc: 0xbbd1, 0x1abd: 0x10e1, 0x1abe: 0x10f9, 0x1abf: 0xbbe9, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x2079, 0x1ac1: 0xbc01, 0x1ac2: 0xbab1, 0x1ac3: 0x1099, 0x1ac4: 0x10b1, 0x1ac5: 0x10c9, + 0x1ac6: 0xbac9, 0x1ac7: 0xbae1, 0x1ac8: 0xbaf9, 0x1ac9: 0x1429, 0x1aca: 0x1a31, 0x1acb: 0xbb11, + 0x1acc: 0xbb29, 0x1acd: 0xbb41, 0x1ace: 0xbb59, 0x1acf: 0xbb71, 0x1ad0: 0xbb89, 0x1ad1: 0x2109, + 0x1ad2: 0x1111, 0x1ad3: 0xbba1, 0x1ad4: 0xbba1, 0x1ad5: 0xbbb9, 0x1ad6: 0xbbd1, 0x1ad7: 0x10e1, + 0x1ad8: 0x10f9, 0x1ad9: 0xbbe9, 0x1ada: 0x2079, 0x1adb: 0xbc21, 0x1adc: 0xbac9, 0x1add: 0x1429, + 0x1ade: 0xbb11, 0x1adf: 0x10e1, 0x1ae0: 0x1111, 0x1ae1: 0x2109, 0x1ae2: 0xbab1, 0x1ae3: 0x1099, + 0x1ae4: 0x10b1, 0x1ae5: 0x10c9, 0x1ae6: 0xbac9, 0x1ae7: 0xbae1, 0x1ae8: 0xbaf9, 0x1ae9: 0x1429, + 0x1aea: 0x1a31, 0x1aeb: 0xbb11, 0x1aec: 0xbb29, 0x1aed: 0xbb41, 0x1aee: 0xbb59, 0x1aef: 0xbb71, + 0x1af0: 0xbb89, 0x1af1: 0x2109, 0x1af2: 0x1111, 0x1af3: 0x1429, 0x1af4: 0xbba1, 0x1af5: 0xbbb9, + 0x1af6: 0xbbd1, 0x1af7: 0x10e1, 0x1af8: 0x10f9, 0x1af9: 0xbbe9, 0x1afa: 0x2079, 0x1afb: 0xbc01, + 0x1afc: 0xbab1, 0x1afd: 0x1099, 0x1afe: 0x10b1, 0x1aff: 0x10c9, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0xbac9, 0x1b01: 0xbae1, 0x1b02: 0xbaf9, 0x1b03: 0x1429, 0x1b04: 0x1a31, 0x1b05: 0xbb11, + 0x1b06: 0xbb29, 0x1b07: 0xbb41, 0x1b08: 0xbb59, 0x1b09: 0xbb71, 0x1b0a: 0xbb89, 0x1b0b: 0x2109, + 0x1b0c: 0x1111, 0x1b0d: 0xbba1, 0x1b0e: 0xbba1, 0x1b0f: 0xbbb9, 0x1b10: 0xbbd1, 0x1b11: 0x10e1, + 0x1b12: 0x10f9, 0x1b13: 0xbbe9, 0x1b14: 0x2079, 0x1b15: 0xbc21, 0x1b16: 0xbac9, 0x1b17: 0x1429, + 0x1b18: 0xbb11, 0x1b19: 0x10e1, 0x1b1a: 0x1111, 0x1b1b: 0x2109, 0x1b1c: 0xbab1, 0x1b1d: 0x1099, + 0x1b1e: 0x10b1, 0x1b1f: 0x10c9, 0x1b20: 0xbac9, 0x1b21: 0xbae1, 0x1b22: 0xbaf9, 0x1b23: 0x1429, + 0x1b24: 0x1a31, 0x1b25: 0xbb11, 0x1b26: 0xbb29, 0x1b27: 0xbb41, 0x1b28: 0xbb59, 0x1b29: 0xbb71, + 0x1b2a: 0xbb89, 0x1b2b: 0x2109, 0x1b2c: 0x1111, 0x1b2d: 0x1429, 0x1b2e: 0xbba1, 0x1b2f: 0xbbb9, + 0x1b30: 0xbbd1, 0x1b31: 0x10e1, 0x1b32: 0x10f9, 0x1b33: 0xbbe9, 0x1b34: 0x2079, 0x1b35: 0xbc01, + 0x1b36: 0xbab1, 0x1b37: 0x1099, 0x1b38: 0x10b1, 0x1b39: 0x10c9, 0x1b3a: 0xbac9, 0x1b3b: 0xbae1, + 0x1b3c: 0xbaf9, 0x1b3d: 0x1429, 0x1b3e: 0x1a31, 0x1b3f: 0xbb11, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0xbb29, 0x1b41: 0xbb41, 0x1b42: 0xbb59, 0x1b43: 0xbb71, 0x1b44: 0xbb89, 0x1b45: 0x2109, + 0x1b46: 0x1111, 0x1b47: 0xbba1, 0x1b48: 0xbba1, 0x1b49: 0xbbb9, 0x1b4a: 0xbbd1, 0x1b4b: 0x10e1, + 0x1b4c: 0x10f9, 0x1b4d: 0xbbe9, 0x1b4e: 0x2079, 0x1b4f: 0xbc21, 0x1b50: 0xbac9, 0x1b51: 0x1429, + 0x1b52: 0xbb11, 0x1b53: 0x10e1, 0x1b54: 0x1111, 0x1b55: 0x2109, 0x1b56: 0xbab1, 0x1b57: 0x1099, + 0x1b58: 0x10b1, 0x1b59: 0x10c9, 0x1b5a: 0xbac9, 0x1b5b: 0xbae1, 0x1b5c: 0xbaf9, 0x1b5d: 0x1429, + 0x1b5e: 0x1a31, 0x1b5f: 0xbb11, 0x1b60: 0xbb29, 0x1b61: 0xbb41, 0x1b62: 0xbb59, 0x1b63: 0xbb71, + 0x1b64: 0xbb89, 0x1b65: 0x2109, 0x1b66: 0x1111, 0x1b67: 0x1429, 0x1b68: 0xbba1, 0x1b69: 0xbbb9, + 0x1b6a: 0xbbd1, 0x1b6b: 0x10e1, 0x1b6c: 0x10f9, 0x1b6d: 0xbbe9, 0x1b6e: 0x2079, 0x1b6f: 0xbc01, + 0x1b70: 0xbab1, 0x1b71: 0x1099, 0x1b72: 0x10b1, 0x1b73: 0x10c9, 0x1b74: 0xbac9, 0x1b75: 0xbae1, + 0x1b76: 0xbaf9, 0x1b77: 0x1429, 0x1b78: 0x1a31, 0x1b79: 0xbb11, 0x1b7a: 0xbb29, 0x1b7b: 0xbb41, + 0x1b7c: 0xbb59, 0x1b7d: 0xbb71, 0x1b7e: 0xbb89, 0x1b7f: 0x2109, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x1111, 0x1b81: 0xbba1, 0x1b82: 0xbba1, 0x1b83: 0xbbb9, 0x1b84: 0xbbd1, 0x1b85: 0x10e1, + 0x1b86: 0x10f9, 0x1b87: 0xbbe9, 0x1b88: 0x2079, 0x1b89: 0xbc21, 0x1b8a: 0xbac9, 0x1b8b: 0x1429, + 0x1b8c: 0xbb11, 0x1b8d: 0x10e1, 0x1b8e: 0x1111, 0x1b8f: 0x2109, 0x1b90: 0xbab1, 0x1b91: 0x1099, + 0x1b92: 0x10b1, 0x1b93: 0x10c9, 0x1b94: 0xbac9, 0x1b95: 0xbae1, 0x1b96: 0xbaf9, 0x1b97: 0x1429, + 0x1b98: 0x1a31, 0x1b99: 0xbb11, 0x1b9a: 0xbb29, 0x1b9b: 0xbb41, 0x1b9c: 0xbb59, 0x1b9d: 0xbb71, + 0x1b9e: 0xbb89, 0x1b9f: 0x2109, 0x1ba0: 0x1111, 0x1ba1: 0x1429, 0x1ba2: 0xbba1, 0x1ba3: 0xbbb9, + 0x1ba4: 0xbbd1, 0x1ba5: 0x10e1, 0x1ba6: 0x10f9, 0x1ba7: 0xbbe9, 0x1ba8: 0x2079, 0x1ba9: 0xbc01, + 0x1baa: 0xbab1, 0x1bab: 0x1099, 0x1bac: 0x10b1, 0x1bad: 0x10c9, 0x1bae: 0xbac9, 0x1baf: 0xbae1, + 0x1bb0: 0xbaf9, 0x1bb1: 0x1429, 0x1bb2: 0x1a31, 0x1bb3: 0xbb11, 0x1bb4: 0xbb29, 0x1bb5: 0xbb41, + 0x1bb6: 0xbb59, 0x1bb7: 0xbb71, 0x1bb8: 0xbb89, 0x1bb9: 0x2109, 0x1bba: 0x1111, 0x1bbb: 0xbba1, + 0x1bbc: 0xbba1, 0x1bbd: 0xbbb9, 0x1bbe: 0xbbd1, 0x1bbf: 0x10e1, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x10f9, 0x1bc1: 0xbbe9, 0x1bc2: 0x2079, 0x1bc3: 0xbc21, 0x1bc4: 0xbac9, 0x1bc5: 0x1429, + 0x1bc6: 0xbb11, 0x1bc7: 0x10e1, 0x1bc8: 0x1111, 0x1bc9: 0x2109, 0x1bca: 0xbc41, 0x1bcb: 0xbc41, + 0x1bcc: 0x0040, 0x1bcd: 0x0040, 0x1bce: 0x1f41, 0x1bcf: 0x00c9, 0x1bd0: 0x0069, 0x1bd1: 0x0079, + 0x1bd2: 0x1f51, 0x1bd3: 0x1f61, 0x1bd4: 0x1f71, 0x1bd5: 0x1f81, 0x1bd6: 0x1f91, 0x1bd7: 0x1fa1, + 0x1bd8: 0x1f41, 0x1bd9: 0x00c9, 0x1bda: 0x0069, 0x1bdb: 0x0079, 0x1bdc: 0x1f51, 0x1bdd: 0x1f61, + 0x1bde: 0x1f71, 0x1bdf: 0x1f81, 0x1be0: 0x1f91, 0x1be1: 0x1fa1, 0x1be2: 0x1f41, 0x1be3: 0x00c9, + 0x1be4: 0x0069, 0x1be5: 0x0079, 0x1be6: 0x1f51, 0x1be7: 0x1f61, 0x1be8: 0x1f71, 0x1be9: 0x1f81, + 0x1bea: 0x1f91, 0x1beb: 0x1fa1, 0x1bec: 0x1f41, 0x1bed: 0x00c9, 0x1bee: 0x0069, 0x1bef: 0x0079, + 0x1bf0: 0x1f51, 0x1bf1: 0x1f61, 0x1bf2: 0x1f71, 0x1bf3: 0x1f81, 0x1bf4: 0x1f91, 0x1bf5: 0x1fa1, + 0x1bf6: 0x1f41, 0x1bf7: 0x00c9, 0x1bf8: 0x0069, 0x1bf9: 0x0079, 0x1bfa: 0x1f51, 0x1bfb: 0x1f61, + 0x1bfc: 0x1f71, 0x1bfd: 0x1f81, 0x1bfe: 0x1f91, 0x1bff: 0x1fa1, + // Block 0x70, offset 0x1c00 + 0x1c00: 0xe115, 0x1c01: 0xe115, 0x1c02: 0xe135, 0x1c03: 0xe135, 0x1c04: 0xe115, 0x1c05: 0xe115, + 0x1c06: 0xe175, 0x1c07: 0xe175, 0x1c08: 0xe115, 0x1c09: 0xe115, 0x1c0a: 0xe135, 0x1c0b: 0xe135, + 0x1c0c: 0xe115, 0x1c0d: 0xe115, 0x1c0e: 0xe1f5, 0x1c0f: 0xe1f5, 0x1c10: 0xe115, 0x1c11: 0xe115, + 0x1c12: 0xe135, 0x1c13: 0xe135, 0x1c14: 0xe115, 0x1c15: 0xe115, 0x1c16: 0xe175, 0x1c17: 0xe175, + 0x1c18: 0xe115, 0x1c19: 0xe115, 0x1c1a: 0xe135, 0x1c1b: 0xe135, 0x1c1c: 0xe115, 0x1c1d: 0xe115, + 0x1c1e: 0x8b05, 0x1c1f: 0x8b05, 0x1c20: 0x04b5, 0x1c21: 0x04b5, 0x1c22: 0x0a08, 0x1c23: 0x0a08, + 0x1c24: 0x0a08, 0x1c25: 0x0a08, 0x1c26: 0x0a08, 0x1c27: 0x0a08, 0x1c28: 0x0a08, 0x1c29: 0x0a08, + 0x1c2a: 0x0a08, 0x1c2b: 0x0a08, 0x1c2c: 0x0a08, 0x1c2d: 0x0a08, 0x1c2e: 0x0a08, 0x1c2f: 0x0a08, + 0x1c30: 0x0a08, 0x1c31: 0x0a08, 0x1c32: 0x0a08, 0x1c33: 0x0a08, 0x1c34: 0x0a08, 0x1c35: 0x0a08, + 0x1c36: 0x0a08, 0x1c37: 0x0a08, 0x1c38: 0x0a08, 0x1c39: 0x0a08, 0x1c3a: 0x0a08, 0x1c3b: 0x0a08, + 0x1c3c: 0x0a08, 0x1c3d: 0x0a08, 0x1c3e: 0x0a08, 0x1c3f: 0x0a08, + // Block 0x71, offset 0x1c40 + 0x1c40: 0xb189, 0x1c41: 0xb1a1, 0x1c42: 0xb201, 0x1c43: 0xb249, 0x1c44: 0x0040, 0x1c45: 0xb411, + 0x1c46: 0xb291, 0x1c47: 0xb219, 0x1c48: 0xb309, 0x1c49: 0xb429, 0x1c4a: 0xb399, 0x1c4b: 0xb3b1, + 0x1c4c: 0xb3c9, 0x1c4d: 0xb3e1, 0x1c4e: 0xb2a9, 0x1c4f: 0xb339, 0x1c50: 0xb369, 0x1c51: 0xb2d9, + 0x1c52: 0xb381, 0x1c53: 0xb279, 0x1c54: 0xb2c1, 0x1c55: 0xb1d1, 0x1c56: 0xb1e9, 0x1c57: 0xb231, + 0x1c58: 0xb261, 0x1c59: 0xb2f1, 0x1c5a: 0xb321, 0x1c5b: 0xb351, 0x1c5c: 0xbc59, 0x1c5d: 0x7949, + 0x1c5e: 0xbc71, 0x1c5f: 0xbc89, 0x1c60: 0x0040, 0x1c61: 0xb1a1, 0x1c62: 0xb201, 0x1c63: 0x0040, + 0x1c64: 0xb3f9, 0x1c65: 0x0040, 0x1c66: 0x0040, 0x1c67: 0xb219, 0x1c68: 0x0040, 0x1c69: 0xb429, + 0x1c6a: 0xb399, 0x1c6b: 0xb3b1, 0x1c6c: 0xb3c9, 0x1c6d: 0xb3e1, 0x1c6e: 0xb2a9, 0x1c6f: 0xb339, + 0x1c70: 0xb369, 0x1c71: 0xb2d9, 0x1c72: 0xb381, 0x1c73: 0x0040, 0x1c74: 0xb2c1, 0x1c75: 0xb1d1, + 0x1c76: 0xb1e9, 0x1c77: 0xb231, 0x1c78: 0x0040, 0x1c79: 0xb2f1, 0x1c7a: 0x0040, 0x1c7b: 0xb351, + 0x1c7c: 0x0040, 0x1c7d: 0x0040, 0x1c7e: 0x0040, 0x1c7f: 0x0040, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x0040, 0x1c81: 0x0040, 0x1c82: 0xb201, 0x1c83: 0x0040, 0x1c84: 0x0040, 0x1c85: 0x0040, + 0x1c86: 0x0040, 0x1c87: 0xb219, 0x1c88: 0x0040, 0x1c89: 0xb429, 0x1c8a: 0x0040, 0x1c8b: 0xb3b1, + 0x1c8c: 0x0040, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0x0040, 0x1c91: 0xb2d9, + 0x1c92: 0xb381, 0x1c93: 0x0040, 0x1c94: 0xb2c1, 0x1c95: 0x0040, 0x1c96: 0x0040, 0x1c97: 0xb231, + 0x1c98: 0x0040, 0x1c99: 0xb2f1, 0x1c9a: 0x0040, 0x1c9b: 0xb351, 0x1c9c: 0x0040, 0x1c9d: 0x7949, + 0x1c9e: 0x0040, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, + 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0xb309, 0x1ca9: 0xb429, + 0x1caa: 0xb399, 0x1cab: 0x0040, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, + 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, + 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0xb321, 0x1cbb: 0xb351, + 0x1cbc: 0xbc59, 0x1cbd: 0x0040, 0x1cbe: 0xbc71, 0x1cbf: 0x0040, + // Block 0x73, offset 0x1cc0 + 0x1cc0: 0xb189, 0x1cc1: 0xb1a1, 0x1cc2: 0xb201, 0x1cc3: 0xb249, 0x1cc4: 0xb3f9, 0x1cc5: 0xb411, + 0x1cc6: 0xb291, 0x1cc7: 0xb219, 0x1cc8: 0xb309, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, + 0x1ccc: 0xb3c9, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0xb369, 0x1cd1: 0xb2d9, + 0x1cd2: 0xb381, 0x1cd3: 0xb279, 0x1cd4: 0xb2c1, 0x1cd5: 0xb1d1, 0x1cd6: 0xb1e9, 0x1cd7: 0xb231, + 0x1cd8: 0xb261, 0x1cd9: 0xb2f1, 0x1cda: 0xb321, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x0040, + 0x1cde: 0x0040, 0x1cdf: 0x0040, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0xb249, + 0x1ce4: 0x0040, 0x1ce5: 0xb411, 0x1ce6: 0xb291, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, + 0x1cea: 0x0040, 0x1ceb: 0xb3b1, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, + 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0xb279, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, + 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0xb261, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, + 0x1cfc: 0x0040, 0x1cfd: 0x0040, 0x1cfe: 0x0040, 0x1cff: 0x0040, + // Block 0x74, offset 0x1d00 + 0x1d00: 0x0040, 0x1d01: 0xbca2, 0x1d02: 0xbcba, 0x1d03: 0xbcd2, 0x1d04: 0xbcea, 0x1d05: 0xbd02, + 0x1d06: 0xbd1a, 0x1d07: 0xbd32, 0x1d08: 0xbd4a, 0x1d09: 0xbd62, 0x1d0a: 0xbd7a, 0x1d0b: 0x0018, + 0x1d0c: 0x0018, 0x1d0d: 0x0040, 0x1d0e: 0x0040, 0x1d0f: 0x0040, 0x1d10: 0xbd92, 0x1d11: 0xbdb2, + 0x1d12: 0xbdd2, 0x1d13: 0xbdf2, 0x1d14: 0xbe12, 0x1d15: 0xbe32, 0x1d16: 0xbe52, 0x1d17: 0xbe72, + 0x1d18: 0xbe92, 0x1d19: 0xbeb2, 0x1d1a: 0xbed2, 0x1d1b: 0xbef2, 0x1d1c: 0xbf12, 0x1d1d: 0xbf32, + 0x1d1e: 0xbf52, 0x1d1f: 0xbf72, 0x1d20: 0xbf92, 0x1d21: 0xbfb2, 0x1d22: 0xbfd2, 0x1d23: 0xbff2, + 0x1d24: 0xc012, 0x1d25: 0xc032, 0x1d26: 0xc052, 0x1d27: 0xc072, 0x1d28: 0xc092, 0x1d29: 0xc0b2, + 0x1d2a: 0xc0d1, 0x1d2b: 0x1159, 0x1d2c: 0x0269, 0x1d2d: 0x6671, 0x1d2e: 0xc111, 0x1d2f: 0x0040, + 0x1d30: 0x0039, 0x1d31: 0x0ee9, 0x1d32: 0x1159, 0x1d33: 0x0ef9, 0x1d34: 0x0f09, 0x1d35: 0x1199, + 0x1d36: 0x0f31, 0x1d37: 0x0249, 0x1d38: 0x0f41, 0x1d39: 0x0259, 0x1d3a: 0x0f51, 0x1d3b: 0x0359, + 0x1d3c: 0x0f61, 0x1d3d: 0x0f71, 0x1d3e: 0x00d9, 0x1d3f: 0x0f99, + // Block 0x75, offset 0x1d40 + 0x1d40: 0x2039, 0x1d41: 0x0269, 0x1d42: 0x01d9, 0x1d43: 0x0fa9, 0x1d44: 0x0fb9, 0x1d45: 0x1089, + 0x1d46: 0x0279, 0x1d47: 0x0369, 0x1d48: 0x0289, 0x1d49: 0x13d1, 0x1d4a: 0xc129, 0x1d4b: 0x65b1, + 0x1d4c: 0xc141, 0x1d4d: 0x1441, 0x1d4e: 0xc159, 0x1d4f: 0xc179, 0x1d50: 0x0018, 0x1d51: 0x0018, + 0x1d52: 0x0018, 0x1d53: 0x0018, 0x1d54: 0x0018, 0x1d55: 0x0018, 0x1d56: 0x0018, 0x1d57: 0x0018, + 0x1d58: 0x0018, 0x1d59: 0x0018, 0x1d5a: 0x0018, 0x1d5b: 0x0018, 0x1d5c: 0x0018, 0x1d5d: 0x0018, + 0x1d5e: 0x0018, 0x1d5f: 0x0018, 0x1d60: 0x0018, 0x1d61: 0x0018, 0x1d62: 0x0018, 0x1d63: 0x0018, + 0x1d64: 0x0018, 0x1d65: 0x0018, 0x1d66: 0x0018, 0x1d67: 0x0018, 0x1d68: 0x0018, 0x1d69: 0x0018, + 0x1d6a: 0xc191, 0x1d6b: 0xc1a9, 0x1d6c: 0x0040, 0x1d6d: 0x0040, 0x1d6e: 0x0040, 0x1d6f: 0x0040, + 0x1d70: 0x0018, 0x1d71: 0x0018, 0x1d72: 0x0018, 0x1d73: 0x0018, 0x1d74: 0x0018, 0x1d75: 0x0018, + 0x1d76: 0x0018, 0x1d77: 0x0018, 0x1d78: 0x0018, 0x1d79: 0x0018, 0x1d7a: 0x0018, 0x1d7b: 0x0018, + 0x1d7c: 0x0018, 0x1d7d: 0x0018, 0x1d7e: 0x0018, 0x1d7f: 0x0018, + // Block 0x76, offset 0x1d80 + 0x1d80: 0xc1d9, 0x1d81: 0xc211, 0x1d82: 0xc249, 0x1d83: 0x0040, 0x1d84: 0x0040, 0x1d85: 0x0040, + 0x1d86: 0x0040, 0x1d87: 0x0040, 0x1d88: 0x0040, 0x1d89: 0x0040, 0x1d8a: 0x0040, 0x1d8b: 0x0040, + 0x1d8c: 0x0040, 0x1d8d: 0x0040, 0x1d8e: 0x0040, 0x1d8f: 0x0040, 0x1d90: 0xc269, 0x1d91: 0xc289, + 0x1d92: 0xc2a9, 0x1d93: 0xc2c9, 0x1d94: 0xc2e9, 0x1d95: 0xc309, 0x1d96: 0xc329, 0x1d97: 0xc349, + 0x1d98: 0xc369, 0x1d99: 0xc389, 0x1d9a: 0xc3a9, 0x1d9b: 0xc3c9, 0x1d9c: 0xc3e9, 0x1d9d: 0xc409, + 0x1d9e: 0xc429, 0x1d9f: 0xc449, 0x1da0: 0xc469, 0x1da1: 0xc489, 0x1da2: 0xc4a9, 0x1da3: 0xc4c9, + 0x1da4: 0xc4e9, 0x1da5: 0xc509, 0x1da6: 0xc529, 0x1da7: 0xc549, 0x1da8: 0xc569, 0x1da9: 0xc589, + 0x1daa: 0xc5a9, 0x1dab: 0xc5c9, 0x1dac: 0xc5e9, 0x1dad: 0xc609, 0x1dae: 0xc629, 0x1daf: 0xc649, + 0x1db0: 0xc669, 0x1db1: 0xc689, 0x1db2: 0xc6a9, 0x1db3: 0xc6c9, 0x1db4: 0xc6e9, 0x1db5: 0xc709, + 0x1db6: 0xc729, 0x1db7: 0xc749, 0x1db8: 0xc769, 0x1db9: 0xc789, 0x1dba: 0xc7a9, 0x1dbb: 0xc7c9, + 0x1dbc: 0x0040, 0x1dbd: 0x0040, 0x1dbe: 0x0040, 0x1dbf: 0x0040, + // Block 0x77, offset 0x1dc0 + 0x1dc0: 0xcaf9, 0x1dc1: 0xcb19, 0x1dc2: 0xcb39, 0x1dc3: 0x8b1d, 0x1dc4: 0xcb59, 0x1dc5: 0xcb79, + 0x1dc6: 0xcb99, 0x1dc7: 0xcbb9, 0x1dc8: 0xcbd9, 0x1dc9: 0xcbf9, 0x1dca: 0xcc19, 0x1dcb: 0xcc39, + 0x1dcc: 0xcc59, 0x1dcd: 0x8b3d, 0x1dce: 0xcc79, 0x1dcf: 0xcc99, 0x1dd0: 0xccb9, 0x1dd1: 0xccd9, + 0x1dd2: 0x8b5d, 0x1dd3: 0xccf9, 0x1dd4: 0xcd19, 0x1dd5: 0xc429, 0x1dd6: 0x8b7d, 0x1dd7: 0xcd39, + 0x1dd8: 0xcd59, 0x1dd9: 0xcd79, 0x1dda: 0xcd99, 0x1ddb: 0xcdb9, 0x1ddc: 0x8b9d, 0x1ddd: 0xcdd9, + 0x1dde: 0xcdf9, 0x1ddf: 0xce19, 0x1de0: 0xce39, 0x1de1: 0xce59, 0x1de2: 0xc789, 0x1de3: 0xce79, + 0x1de4: 0xce99, 0x1de5: 0xceb9, 0x1de6: 0xced9, 0x1de7: 0xcef9, 0x1de8: 0xcf19, 0x1de9: 0xcf39, + 0x1dea: 0xcf59, 0x1deb: 0xcf79, 0x1dec: 0xcf99, 0x1ded: 0xcfb9, 0x1dee: 0xcfd9, 0x1def: 0xcff9, + 0x1df0: 0xd019, 0x1df1: 0xd039, 0x1df2: 0xd039, 0x1df3: 0xd039, 0x1df4: 0x8bbd, 0x1df5: 0xd059, + 0x1df6: 0xd079, 0x1df7: 0xd099, 0x1df8: 0x8bdd, 0x1df9: 0xd0b9, 0x1dfa: 0xd0d9, 0x1dfb: 0xd0f9, + 0x1dfc: 0xd119, 0x1dfd: 0xd139, 0x1dfe: 0xd159, 0x1dff: 0xd179, + // Block 0x78, offset 0x1e00 + 0x1e00: 0xd199, 0x1e01: 0xd1b9, 0x1e02: 0xd1d9, 0x1e03: 0xd1f9, 0x1e04: 0xd219, 0x1e05: 0xd239, + 0x1e06: 0xd239, 0x1e07: 0xd259, 0x1e08: 0xd279, 0x1e09: 0xd299, 0x1e0a: 0xd2b9, 0x1e0b: 0xd2d9, + 0x1e0c: 0xd2f9, 0x1e0d: 0xd319, 0x1e0e: 0xd339, 0x1e0f: 0xd359, 0x1e10: 0xd379, 0x1e11: 0xd399, + 0x1e12: 0xd3b9, 0x1e13: 0xd3d9, 0x1e14: 0xd3f9, 0x1e15: 0xd419, 0x1e16: 0xd439, 0x1e17: 0xd459, + 0x1e18: 0xd479, 0x1e19: 0x8bfd, 0x1e1a: 0xd499, 0x1e1b: 0xd4b9, 0x1e1c: 0xd4d9, 0x1e1d: 0xc309, + 0x1e1e: 0xd4f9, 0x1e1f: 0xd519, 0x1e20: 0x8c1d, 0x1e21: 0x8c3d, 0x1e22: 0xd539, 0x1e23: 0xd559, + 0x1e24: 0xd579, 0x1e25: 0xd599, 0x1e26: 0xd5b9, 0x1e27: 0xd5d9, 0x1e28: 0x2040, 0x1e29: 0xd5f9, + 0x1e2a: 0xd619, 0x1e2b: 0xd619, 0x1e2c: 0x8c5d, 0x1e2d: 0xd639, 0x1e2e: 0xd659, 0x1e2f: 0xd679, + 0x1e30: 0xd699, 0x1e31: 0x8c7d, 0x1e32: 0xd6b9, 0x1e33: 0xd6d9, 0x1e34: 0x2040, 0x1e35: 0xd6f9, + 0x1e36: 0xd719, 0x1e37: 0xd739, 0x1e38: 0xd759, 0x1e39: 0xd779, 0x1e3a: 0xd799, 0x1e3b: 0x8c9d, + 0x1e3c: 0xd7b9, 0x1e3d: 0x8cbd, 0x1e3e: 0xd7d9, 0x1e3f: 0xd7f9, + // Block 0x79, offset 0x1e40 + 0x1e40: 0xd819, 0x1e41: 0xd839, 0x1e42: 0xd859, 0x1e43: 0xd879, 0x1e44: 0xd899, 0x1e45: 0xd8b9, + 0x1e46: 0xd8d9, 0x1e47: 0xd8f9, 0x1e48: 0xd919, 0x1e49: 0x8cdd, 0x1e4a: 0xd939, 0x1e4b: 0xd959, + 0x1e4c: 0xd979, 0x1e4d: 0xd999, 0x1e4e: 0xd9b9, 0x1e4f: 0x8cfd, 0x1e50: 0xd9d9, 0x1e51: 0x8d1d, + 0x1e52: 0x8d3d, 0x1e53: 0xd9f9, 0x1e54: 0xda19, 0x1e55: 0xda19, 0x1e56: 0xda39, 0x1e57: 0x8d5d, + 0x1e58: 0x8d7d, 0x1e59: 0xda59, 0x1e5a: 0xda79, 0x1e5b: 0xda99, 0x1e5c: 0xdab9, 0x1e5d: 0xdad9, + 0x1e5e: 0xdaf9, 0x1e5f: 0xdb19, 0x1e60: 0xdb39, 0x1e61: 0xdb59, 0x1e62: 0xdb79, 0x1e63: 0xdb99, + 0x1e64: 0x8d9d, 0x1e65: 0xdbb9, 0x1e66: 0xdbd9, 0x1e67: 0xdbf9, 0x1e68: 0xdc19, 0x1e69: 0xdbf9, + 0x1e6a: 0xdc39, 0x1e6b: 0xdc59, 0x1e6c: 0xdc79, 0x1e6d: 0xdc99, 0x1e6e: 0xdcb9, 0x1e6f: 0xdcd9, + 0x1e70: 0xdcf9, 0x1e71: 0xdd19, 0x1e72: 0xdd39, 0x1e73: 0xdd59, 0x1e74: 0xdd79, 0x1e75: 0xdd99, + 0x1e76: 0xddb9, 0x1e77: 0xddd9, 0x1e78: 0x8dbd, 0x1e79: 0xddf9, 0x1e7a: 0xde19, 0x1e7b: 0xde39, + 0x1e7c: 0xde59, 0x1e7d: 0xde79, 0x1e7e: 0x8ddd, 0x1e7f: 0xde99, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0xe599, 0x1e81: 0xe5b9, 0x1e82: 0xe5d9, 0x1e83: 0xe5f9, 0x1e84: 0xe619, 0x1e85: 0xe639, + 0x1e86: 0x8efd, 0x1e87: 0xe659, 0x1e88: 0xe679, 0x1e89: 0xe699, 0x1e8a: 0xe6b9, 0x1e8b: 0xe6d9, + 0x1e8c: 0xe6f9, 0x1e8d: 0x8f1d, 0x1e8e: 0xe719, 0x1e8f: 0xe739, 0x1e90: 0x8f3d, 0x1e91: 0x8f5d, + 0x1e92: 0xe759, 0x1e93: 0xe779, 0x1e94: 0xe799, 0x1e95: 0xe7b9, 0x1e96: 0xe7d9, 0x1e97: 0xe7f9, + 0x1e98: 0xe819, 0x1e99: 0xe839, 0x1e9a: 0xe859, 0x1e9b: 0x8f7d, 0x1e9c: 0xe879, 0x1e9d: 0x8f9d, + 0x1e9e: 0xe899, 0x1e9f: 0x2040, 0x1ea0: 0xe8b9, 0x1ea1: 0xe8d9, 0x1ea2: 0xe8f9, 0x1ea3: 0x8fbd, + 0x1ea4: 0xe919, 0x1ea5: 0xe939, 0x1ea6: 0x8fdd, 0x1ea7: 0x8ffd, 0x1ea8: 0xe959, 0x1ea9: 0xe979, + 0x1eaa: 0xe999, 0x1eab: 0xe9b9, 0x1eac: 0xe9d9, 0x1ead: 0xe9d9, 0x1eae: 0xe9f9, 0x1eaf: 0xea19, + 0x1eb0: 0xea39, 0x1eb1: 0xea59, 0x1eb2: 0xea79, 0x1eb3: 0xea99, 0x1eb4: 0xeab9, 0x1eb5: 0x901d, + 0x1eb6: 0xead9, 0x1eb7: 0x903d, 0x1eb8: 0xeaf9, 0x1eb9: 0x905d, 0x1eba: 0xeb19, 0x1ebb: 0x907d, + 0x1ebc: 0x909d, 0x1ebd: 0x90bd, 0x1ebe: 0xeb39, 0x1ebf: 0xeb59, + // Block 0x7b, offset 0x1ec0 + 0x1ec0: 0xeb79, 0x1ec1: 0x90dd, 0x1ec2: 0x90fd, 0x1ec3: 0x911d, 0x1ec4: 0x913d, 0x1ec5: 0xeb99, + 0x1ec6: 0xebb9, 0x1ec7: 0xebb9, 0x1ec8: 0xebd9, 0x1ec9: 0xebf9, 0x1eca: 0xec19, 0x1ecb: 0xec39, + 0x1ecc: 0xec59, 0x1ecd: 0x915d, 0x1ece: 0xec79, 0x1ecf: 0xec99, 0x1ed0: 0xecb9, 0x1ed1: 0xecd9, + 0x1ed2: 0x917d, 0x1ed3: 0xecf9, 0x1ed4: 0x919d, 0x1ed5: 0x91bd, 0x1ed6: 0xed19, 0x1ed7: 0xed39, + 0x1ed8: 0xed59, 0x1ed9: 0xed79, 0x1eda: 0xed99, 0x1edb: 0xedb9, 0x1edc: 0x91dd, 0x1edd: 0x91fd, + 0x1ede: 0x921d, 0x1edf: 0x2040, 0x1ee0: 0xedd9, 0x1ee1: 0x923d, 0x1ee2: 0xedf9, 0x1ee3: 0xee19, + 0x1ee4: 0xee39, 0x1ee5: 0x925d, 0x1ee6: 0xee59, 0x1ee7: 0xee79, 0x1ee8: 0xee99, 0x1ee9: 0xeeb9, + 0x1eea: 0xeed9, 0x1eeb: 0x927d, 0x1eec: 0xeef9, 0x1eed: 0xef19, 0x1eee: 0xef39, 0x1eef: 0xef59, + 0x1ef0: 0xef79, 0x1ef1: 0xef99, 0x1ef2: 0x929d, 0x1ef3: 0x92bd, 0x1ef4: 0xefb9, 0x1ef5: 0x92dd, + 0x1ef6: 0xefd9, 0x1ef7: 0x92fd, 0x1ef8: 0xeff9, 0x1ef9: 0xf019, 0x1efa: 0xf039, 0x1efb: 0x931d, + 0x1efc: 0x933d, 0x1efd: 0xf059, 0x1efe: 0x935d, 0x1eff: 0xf079, + // Block 0x7c, offset 0x1f00 + 0x1f00: 0xf6b9, 0x1f01: 0xf6d9, 0x1f02: 0xf6f9, 0x1f03: 0xf719, 0x1f04: 0xf739, 0x1f05: 0x951d, + 0x1f06: 0xf759, 0x1f07: 0xf779, 0x1f08: 0xf799, 0x1f09: 0xf7b9, 0x1f0a: 0xf7d9, 0x1f0b: 0x953d, + 0x1f0c: 0x955d, 0x1f0d: 0xf7f9, 0x1f0e: 0xf819, 0x1f0f: 0xf839, 0x1f10: 0xf859, 0x1f11: 0xf879, + 0x1f12: 0xf899, 0x1f13: 0x957d, 0x1f14: 0xf8b9, 0x1f15: 0xf8d9, 0x1f16: 0xf8f9, 0x1f17: 0xf919, + 0x1f18: 0x959d, 0x1f19: 0x95bd, 0x1f1a: 0xf939, 0x1f1b: 0xf959, 0x1f1c: 0xf979, 0x1f1d: 0x95dd, + 0x1f1e: 0xf999, 0x1f1f: 0xf9b9, 0x1f20: 0x6815, 0x1f21: 0x95fd, 0x1f22: 0xf9d9, 0x1f23: 0xf9f9, + 0x1f24: 0xfa19, 0x1f25: 0x961d, 0x1f26: 0xfa39, 0x1f27: 0xfa59, 0x1f28: 0xfa79, 0x1f29: 0xfa99, + 0x1f2a: 0xfab9, 0x1f2b: 0xfad9, 0x1f2c: 0xfaf9, 0x1f2d: 0x963d, 0x1f2e: 0xfb19, 0x1f2f: 0xfb39, + 0x1f30: 0xfb59, 0x1f31: 0x965d, 0x1f32: 0xfb79, 0x1f33: 0xfb99, 0x1f34: 0xfbb9, 0x1f35: 0xfbd9, + 0x1f36: 0x7b35, 0x1f37: 0x967d, 0x1f38: 0xfbf9, 0x1f39: 0xfc19, 0x1f3a: 0xfc39, 0x1f3b: 0x969d, + 0x1f3c: 0xfc59, 0x1f3d: 0x96bd, 0x1f3e: 0xfc79, 0x1f3f: 0xfc79, + // Block 0x7d, offset 0x1f40 + 0x1f40: 0xfc99, 0x1f41: 0x96dd, 0x1f42: 0xfcb9, 0x1f43: 0xfcd9, 0x1f44: 0xfcf9, 0x1f45: 0xfd19, + 0x1f46: 0xfd39, 0x1f47: 0xfd59, 0x1f48: 0xfd79, 0x1f49: 0x96fd, 0x1f4a: 0xfd99, 0x1f4b: 0xfdb9, + 0x1f4c: 0xfdd9, 0x1f4d: 0xfdf9, 0x1f4e: 0xfe19, 0x1f4f: 0xfe39, 0x1f50: 0x971d, 0x1f51: 0xfe59, + 0x1f52: 0x973d, 0x1f53: 0x975d, 0x1f54: 0x977d, 0x1f55: 0xfe79, 0x1f56: 0xfe99, 0x1f57: 0xfeb9, + 0x1f58: 0xfed9, 0x1f59: 0xfef9, 0x1f5a: 0xff19, 0x1f5b: 0xff39, 0x1f5c: 0xff59, 0x1f5d: 0x979d, + 0x1f5e: 0x0040, 0x1f5f: 0x0040, 0x1f60: 0x0040, 0x1f61: 0x0040, 0x1f62: 0x0040, 0x1f63: 0x0040, + 0x1f64: 0x0040, 0x1f65: 0x0040, 0x1f66: 0x0040, 0x1f67: 0x0040, 0x1f68: 0x0040, 0x1f69: 0x0040, + 0x1f6a: 0x0040, 0x1f6b: 0x0040, 0x1f6c: 0x0040, 0x1f6d: 0x0040, 0x1f6e: 0x0040, 0x1f6f: 0x0040, + 0x1f70: 0x0040, 0x1f71: 0x0040, 0x1f72: 0x0040, 0x1f73: 0x0040, 0x1f74: 0x0040, 0x1f75: 0x0040, + 0x1f76: 0x0040, 0x1f77: 0x0040, 0x1f78: 0x0040, 0x1f79: 0x0040, 0x1f7a: 0x0040, 0x1f7b: 0x0040, + 0x1f7c: 0x0040, 0x1f7d: 0x0040, 0x1f7e: 0x0040, 0x1f7f: 0x0040, +} + +// idnaIndex: 35 blocks, 2240 entries, 4480 bytes +// Block 0 is the zero block. +var idnaIndex = [2240]uint16{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x7c, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, + 0xc8: 0x06, 0xc9: 0x7d, 0xca: 0x7e, 0xcb: 0x07, 0xcc: 0x7f, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, + 0xd0: 0x80, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x81, 0xd6: 0x82, 0xd7: 0x83, + 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x84, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x85, 0xde: 0x86, 0xdf: 0x87, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, + 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, + 0xf0: 0x1c, 0xf1: 0x1d, 0xf2: 0x1d, 0xf3: 0x1f, 0xf4: 0x20, + // Block 0x4, offset 0x100 + 0x120: 0x88, 0x121: 0x89, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x13, 0x126: 0x14, 0x127: 0x15, + 0x128: 0x16, 0x129: 0x17, 0x12a: 0x18, 0x12b: 0x19, 0x12c: 0x1a, 0x12d: 0x1b, 0x12e: 0x1c, 0x12f: 0x8d, + 0x130: 0x8e, 0x131: 0x1d, 0x132: 0x1e, 0x133: 0x1f, 0x134: 0x8f, 0x135: 0x20, 0x136: 0x90, 0x137: 0x91, + 0x138: 0x92, 0x139: 0x93, 0x13a: 0x21, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x22, 0x13e: 0x23, 0x13f: 0x96, + // Block 0x5, offset 0x140 + 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, + 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, + 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, + 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, + 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, + 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, + 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x24, 0x175: 0x25, 0x176: 0x26, 0x177: 0xc3, + 0x178: 0x27, 0x179: 0x27, 0x17a: 0x28, 0x17b: 0x27, 0x17c: 0xc4, 0x17d: 0x29, 0x17e: 0x2a, 0x17f: 0x2b, + // Block 0x6, offset 0x180 + 0x180: 0x2c, 0x181: 0x2d, 0x182: 0x2e, 0x183: 0xc5, 0x184: 0x2f, 0x185: 0x30, 0x186: 0xc6, 0x187: 0x9b, + 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0xca, + 0x190: 0xcb, 0x191: 0x31, 0x192: 0x32, 0x193: 0x33, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, + 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, + 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, + 0x1a8: 0xcc, 0x1a9: 0xcd, 0x1aa: 0x9b, 0x1ab: 0xce, 0x1ac: 0x9b, 0x1ad: 0xcf, 0x1ae: 0xd0, 0x1af: 0xd1, + 0x1b0: 0xd2, 0x1b1: 0x34, 0x1b2: 0x27, 0x1b3: 0x35, 0x1b4: 0xd3, 0x1b5: 0xd4, 0x1b6: 0xd5, 0x1b7: 0xd6, + 0x1b8: 0xd7, 0x1b9: 0xd8, 0x1ba: 0xd9, 0x1bb: 0xda, 0x1bc: 0xdb, 0x1bd: 0xdc, 0x1be: 0xdd, 0x1bf: 0x36, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x37, 0x1c1: 0xde, 0x1c2: 0xdf, 0x1c3: 0xe0, 0x1c4: 0xe1, 0x1c5: 0x38, 0x1c6: 0x39, 0x1c7: 0xe2, + 0x1c8: 0xe3, 0x1c9: 0x3a, 0x1ca: 0x3b, 0x1cb: 0x3c, 0x1cc: 0x3d, 0x1cd: 0x3e, 0x1ce: 0x3f, 0x1cf: 0x40, + 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, + 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, + 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, + 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, + 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, + 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, + // Block 0x8, offset 0x200 + 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, + 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, + 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, + 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, + 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, + 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, + 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, + 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, + // Block 0x9, offset 0x240 + 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, + 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, + 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, + 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, + 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, + 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, + 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, + 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, + // Block 0xa, offset 0x280 + 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, + 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, + 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, + 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, + 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, + 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, + 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, + 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe4, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, + 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, + 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe5, 0x2d3: 0xe6, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, + 0x2d8: 0xe7, 0x2d9: 0x41, 0x2da: 0x42, 0x2db: 0xe8, 0x2dc: 0x43, 0x2dd: 0x44, 0x2de: 0x45, 0x2df: 0xe9, + 0x2e0: 0xea, 0x2e1: 0xeb, 0x2e2: 0xec, 0x2e3: 0xed, 0x2e4: 0xee, 0x2e5: 0xef, 0x2e6: 0xf0, 0x2e7: 0xf1, + 0x2e8: 0xf2, 0x2e9: 0xf3, 0x2ea: 0xf4, 0x2eb: 0xf5, 0x2ec: 0xf6, 0x2ed: 0xf7, 0x2ee: 0xf8, 0x2ef: 0xf9, + 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, + 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, + // Block 0xc, offset 0x300 + 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, + 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, + 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, + 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xfa, 0x31f: 0xfb, + // Block 0xd, offset 0x340 + 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, + 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, + 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, + 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, + 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, + 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, + 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, + 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, + // Block 0xe, offset 0x380 + 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, + 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, + 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, + 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, + 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfc, 0x3a5: 0xfd, 0x3a6: 0xfe, 0x3a7: 0xff, + 0x3a8: 0x46, 0x3a9: 0x100, 0x3aa: 0x101, 0x3ab: 0x47, 0x3ac: 0x48, 0x3ad: 0x49, 0x3ae: 0x4a, 0x3af: 0x4b, + 0x3b0: 0x102, 0x3b1: 0x4c, 0x3b2: 0x4d, 0x3b3: 0x4e, 0x3b4: 0x4f, 0x3b5: 0x50, 0x3b6: 0x103, 0x3b7: 0x51, + 0x3b8: 0x52, 0x3b9: 0x53, 0x3ba: 0x54, 0x3bb: 0x55, 0x3bc: 0x56, 0x3bd: 0x57, 0x3be: 0x58, 0x3bf: 0x59, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x104, 0x3c1: 0x105, 0x3c2: 0x9f, 0x3c3: 0x106, 0x3c4: 0x107, 0x3c5: 0x9b, 0x3c6: 0x108, 0x3c7: 0x109, + 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x10a, 0x3cb: 0x10b, 0x3cc: 0x10c, 0x3cd: 0x10d, 0x3ce: 0x10e, 0x3cf: 0x10f, + 0x3d0: 0x110, 0x3d1: 0x9f, 0x3d2: 0x111, 0x3d3: 0x112, 0x3d4: 0x113, 0x3d5: 0x114, 0x3d6: 0xba, 0x3d7: 0xba, + 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x115, 0x3dd: 0x116, 0x3de: 0xba, 0x3df: 0xba, + 0x3e0: 0x117, 0x3e1: 0x118, 0x3e2: 0x119, 0x3e3: 0x11a, 0x3e4: 0x11b, 0x3e5: 0xba, 0x3e6: 0x11c, 0x3e7: 0x11d, + 0x3e8: 0x11e, 0x3e9: 0x11f, 0x3ea: 0x120, 0x3eb: 0x5a, 0x3ec: 0x121, 0x3ed: 0x122, 0x3ee: 0x5b, 0x3ef: 0xba, + 0x3f0: 0x123, 0x3f1: 0x124, 0x3f2: 0x125, 0x3f3: 0x126, 0x3f4: 0xba, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, + 0x3f8: 0xba, 0x3f9: 0x127, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0xba, 0x3fd: 0xba, 0x3fe: 0xba, 0x3ff: 0xba, + // Block 0x10, offset 0x400 + 0x400: 0x128, 0x401: 0x129, 0x402: 0x12a, 0x403: 0x12b, 0x404: 0x12c, 0x405: 0x12d, 0x406: 0x12e, 0x407: 0x12f, + 0x408: 0x130, 0x409: 0xba, 0x40a: 0x131, 0x40b: 0x132, 0x40c: 0x5c, 0x40d: 0x5d, 0x40e: 0xba, 0x40f: 0xba, + 0x410: 0x133, 0x411: 0x134, 0x412: 0x135, 0x413: 0x136, 0x414: 0xba, 0x415: 0xba, 0x416: 0x137, 0x417: 0x138, + 0x418: 0x139, 0x419: 0x13a, 0x41a: 0x13b, 0x41b: 0x13c, 0x41c: 0x13d, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, + 0x420: 0xba, 0x421: 0xba, 0x422: 0x13e, 0x423: 0x13f, 0x424: 0xba, 0x425: 0xba, 0x426: 0xba, 0x427: 0xba, + 0x428: 0xba, 0x429: 0xba, 0x42a: 0xba, 0x42b: 0x140, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, + 0x430: 0x141, 0x431: 0x142, 0x432: 0x143, 0x433: 0xba, 0x434: 0xba, 0x435: 0xba, 0x436: 0xba, 0x437: 0xba, + 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0xba, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0xba, + // Block 0x11, offset 0x440 + 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, + 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x144, 0x44f: 0xba, + 0x450: 0x9b, 0x451: 0x145, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x146, 0x456: 0xba, 0x457: 0xba, + 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, + 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, + 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, + 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, + 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, + // Block 0x12, offset 0x480 + 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, + 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, + 0x490: 0x147, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, + 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, + 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, + 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, + 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, + 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, + // Block 0x13, offset 0x4c0 + 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, + 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, + 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, + 0x4d8: 0x9f, 0x4d9: 0x148, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, + 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, + 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, + 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, + 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, + // Block 0x14, offset 0x500 + 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, + 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, + 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, + 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, + 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, + 0x528: 0x140, 0x529: 0x149, 0x52a: 0xba, 0x52b: 0x14a, 0x52c: 0x14b, 0x52d: 0x14c, 0x52e: 0x14d, 0x52f: 0xba, + 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, + 0x538: 0xba, 0x539: 0xba, 0x53a: 0xba, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x14e, 0x53e: 0x14f, 0x53f: 0x150, + // Block 0x15, offset 0x540 + 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, + 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, + 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, + 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x151, + 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, + 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x152, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, + 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, + 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, + // Block 0x16, offset 0x580 + 0x580: 0x153, 0x581: 0xba, 0x582: 0xba, 0x583: 0xba, 0x584: 0xba, 0x585: 0xba, 0x586: 0xba, 0x587: 0xba, + 0x588: 0xba, 0x589: 0xba, 0x58a: 0xba, 0x58b: 0xba, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, + 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, + 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, + 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, + 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, + 0x5b0: 0x9f, 0x5b1: 0x154, 0x5b2: 0x155, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, + 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x156, 0x5c4: 0x157, 0x5c5: 0x158, 0x5c6: 0x159, 0x5c7: 0x15a, + 0x5c8: 0x9b, 0x5c9: 0x15b, 0x5ca: 0xba, 0x5cb: 0xba, 0x5cc: 0x9b, 0x5cd: 0x15c, 0x5ce: 0xba, 0x5cf: 0xba, + 0x5d0: 0x5e, 0x5d1: 0x5f, 0x5d2: 0x60, 0x5d3: 0x61, 0x5d4: 0x62, 0x5d5: 0x63, 0x5d6: 0x64, 0x5d7: 0x65, + 0x5d8: 0x66, 0x5d9: 0x67, 0x5da: 0x68, 0x5db: 0x69, 0x5dc: 0x6a, 0x5dd: 0x6b, 0x5de: 0x6c, 0x5df: 0x6d, + 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, + 0x5e8: 0x15d, 0x5e9: 0x15e, 0x5ea: 0x15f, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, + 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, + 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, + // Block 0x18, offset 0x600 + 0x600: 0x160, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0xba, 0x605: 0xba, 0x606: 0xba, 0x607: 0xba, + 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0xba, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, + 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, + 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, + 0x620: 0x123, 0x621: 0x123, 0x622: 0x123, 0x623: 0x161, 0x624: 0x6e, 0x625: 0x162, 0x626: 0xba, 0x627: 0xba, + 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, + 0x630: 0xba, 0x631: 0xba, 0x632: 0xba, 0x633: 0xba, 0x634: 0xba, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, + 0x638: 0x6f, 0x639: 0x70, 0x63a: 0x71, 0x63b: 0x163, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, + // Block 0x19, offset 0x640 + 0x640: 0x164, 0x641: 0x9b, 0x642: 0x165, 0x643: 0x166, 0x644: 0x72, 0x645: 0x73, 0x646: 0x167, 0x647: 0x168, + 0x648: 0x74, 0x649: 0x169, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, + 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, + 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x16a, 0x65c: 0x9b, 0x65d: 0x16b, 0x65e: 0x9b, 0x65f: 0x16c, + 0x660: 0x16d, 0x661: 0x16e, 0x662: 0x16f, 0x663: 0xba, 0x664: 0x170, 0x665: 0x171, 0x666: 0x172, 0x667: 0x173, + 0x668: 0xba, 0x669: 0xba, 0x66a: 0xba, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, + 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, + 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, + // Block 0x1a, offset 0x680 + 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, + 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, + 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, + 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x174, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, + 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, + 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, + 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, + 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, + 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, + 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, + 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x175, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, + 0x6e0: 0x176, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, + 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, + 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, + 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, + // Block 0x1c, offset 0x700 + 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, + 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, + 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, + 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, + 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, + 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, + 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, + 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x177, 0x73b: 0xba, 0x73c: 0xba, 0x73d: 0xba, 0x73e: 0xba, 0x73f: 0xba, + // Block 0x1d, offset 0x740 + 0x740: 0xba, 0x741: 0xba, 0x742: 0xba, 0x743: 0xba, 0x744: 0xba, 0x745: 0xba, 0x746: 0xba, 0x747: 0xba, + 0x748: 0xba, 0x749: 0xba, 0x74a: 0xba, 0x74b: 0xba, 0x74c: 0xba, 0x74d: 0xba, 0x74e: 0xba, 0x74f: 0xba, + 0x750: 0xba, 0x751: 0xba, 0x752: 0xba, 0x753: 0xba, 0x754: 0xba, 0x755: 0xba, 0x756: 0xba, 0x757: 0xba, + 0x758: 0xba, 0x759: 0xba, 0x75a: 0xba, 0x75b: 0xba, 0x75c: 0xba, 0x75d: 0xba, 0x75e: 0xba, 0x75f: 0xba, + 0x760: 0x75, 0x761: 0x76, 0x762: 0x77, 0x763: 0x178, 0x764: 0x78, 0x765: 0x79, 0x766: 0x179, 0x767: 0x7a, + 0x768: 0x7b, 0x769: 0xba, 0x76a: 0xba, 0x76b: 0xba, 0x76c: 0xba, 0x76d: 0xba, 0x76e: 0xba, 0x76f: 0xba, + 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, + 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, + // Block 0x1e, offset 0x780 + 0x790: 0x0d, 0x791: 0x0e, 0x792: 0x0f, 0x793: 0x10, 0x794: 0x11, 0x795: 0x0b, 0x796: 0x12, 0x797: 0x07, + 0x798: 0x13, 0x799: 0x0b, 0x79a: 0x0b, 0x79b: 0x14, 0x79c: 0x0b, 0x79d: 0x15, 0x79e: 0x16, 0x79f: 0x17, + 0x7a0: 0x07, 0x7a1: 0x07, 0x7a2: 0x07, 0x7a3: 0x07, 0x7a4: 0x07, 0x7a5: 0x07, 0x7a6: 0x07, 0x7a7: 0x07, + 0x7a8: 0x07, 0x7a9: 0x07, 0x7aa: 0x18, 0x7ab: 0x19, 0x7ac: 0x1a, 0x7ad: 0x0b, 0x7ae: 0x0b, 0x7af: 0x1b, + 0x7b0: 0x0b, 0x7b1: 0x0b, 0x7b2: 0x0b, 0x7b3: 0x0b, 0x7b4: 0x0b, 0x7b5: 0x0b, 0x7b6: 0x0b, 0x7b7: 0x0b, + 0x7b8: 0x0b, 0x7b9: 0x0b, 0x7ba: 0x0b, 0x7bb: 0x0b, 0x7bc: 0x0b, 0x7bd: 0x0b, 0x7be: 0x0b, 0x7bf: 0x0b, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x0b, 0x7c1: 0x0b, 0x7c2: 0x0b, 0x7c3: 0x0b, 0x7c4: 0x0b, 0x7c5: 0x0b, 0x7c6: 0x0b, 0x7c7: 0x0b, + 0x7c8: 0x0b, 0x7c9: 0x0b, 0x7ca: 0x0b, 0x7cb: 0x0b, 0x7cc: 0x0b, 0x7cd: 0x0b, 0x7ce: 0x0b, 0x7cf: 0x0b, + 0x7d0: 0x0b, 0x7d1: 0x0b, 0x7d2: 0x0b, 0x7d3: 0x0b, 0x7d4: 0x0b, 0x7d5: 0x0b, 0x7d6: 0x0b, 0x7d7: 0x0b, + 0x7d8: 0x0b, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x0b, 0x7dc: 0x0b, 0x7dd: 0x0b, 0x7de: 0x0b, 0x7df: 0x0b, + 0x7e0: 0x0b, 0x7e1: 0x0b, 0x7e2: 0x0b, 0x7e3: 0x0b, 0x7e4: 0x0b, 0x7e5: 0x0b, 0x7e6: 0x0b, 0x7e7: 0x0b, + 0x7e8: 0x0b, 0x7e9: 0x0b, 0x7ea: 0x0b, 0x7eb: 0x0b, 0x7ec: 0x0b, 0x7ed: 0x0b, 0x7ee: 0x0b, 0x7ef: 0x0b, + 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, + 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, + // Block 0x20, offset 0x800 + 0x800: 0x17a, 0x801: 0x17b, 0x802: 0xba, 0x803: 0xba, 0x804: 0x17c, 0x805: 0x17c, 0x806: 0x17c, 0x807: 0x17d, + 0x808: 0xba, 0x809: 0xba, 0x80a: 0xba, 0x80b: 0xba, 0x80c: 0xba, 0x80d: 0xba, 0x80e: 0xba, 0x80f: 0xba, + 0x810: 0xba, 0x811: 0xba, 0x812: 0xba, 0x813: 0xba, 0x814: 0xba, 0x815: 0xba, 0x816: 0xba, 0x817: 0xba, + 0x818: 0xba, 0x819: 0xba, 0x81a: 0xba, 0x81b: 0xba, 0x81c: 0xba, 0x81d: 0xba, 0x81e: 0xba, 0x81f: 0xba, + 0x820: 0xba, 0x821: 0xba, 0x822: 0xba, 0x823: 0xba, 0x824: 0xba, 0x825: 0xba, 0x826: 0xba, 0x827: 0xba, + 0x828: 0xba, 0x829: 0xba, 0x82a: 0xba, 0x82b: 0xba, 0x82c: 0xba, 0x82d: 0xba, 0x82e: 0xba, 0x82f: 0xba, + 0x830: 0xba, 0x831: 0xba, 0x832: 0xba, 0x833: 0xba, 0x834: 0xba, 0x835: 0xba, 0x836: 0xba, 0x837: 0xba, + 0x838: 0xba, 0x839: 0xba, 0x83a: 0xba, 0x83b: 0xba, 0x83c: 0xba, 0x83d: 0xba, 0x83e: 0xba, 0x83f: 0xba, + // Block 0x21, offset 0x840 + 0x840: 0x0b, 0x841: 0x0b, 0x842: 0x0b, 0x843: 0x0b, 0x844: 0x0b, 0x845: 0x0b, 0x846: 0x0b, 0x847: 0x0b, + 0x848: 0x0b, 0x849: 0x0b, 0x84a: 0x0b, 0x84b: 0x0b, 0x84c: 0x0b, 0x84d: 0x0b, 0x84e: 0x0b, 0x84f: 0x0b, + 0x850: 0x0b, 0x851: 0x0b, 0x852: 0x0b, 0x853: 0x0b, 0x854: 0x0b, 0x855: 0x0b, 0x856: 0x0b, 0x857: 0x0b, + 0x858: 0x0b, 0x859: 0x0b, 0x85a: 0x0b, 0x85b: 0x0b, 0x85c: 0x0b, 0x85d: 0x0b, 0x85e: 0x0b, 0x85f: 0x0b, + 0x860: 0x1e, 0x861: 0x0b, 0x862: 0x0b, 0x863: 0x0b, 0x864: 0x0b, 0x865: 0x0b, 0x866: 0x0b, 0x867: 0x0b, + 0x868: 0x0b, 0x869: 0x0b, 0x86a: 0x0b, 0x86b: 0x0b, 0x86c: 0x0b, 0x86d: 0x0b, 0x86e: 0x0b, 0x86f: 0x0b, + 0x870: 0x0b, 0x871: 0x0b, 0x872: 0x0b, 0x873: 0x0b, 0x874: 0x0b, 0x875: 0x0b, 0x876: 0x0b, 0x877: 0x0b, + 0x878: 0x0b, 0x879: 0x0b, 0x87a: 0x0b, 0x87b: 0x0b, 0x87c: 0x0b, 0x87d: 0x0b, 0x87e: 0x0b, 0x87f: 0x0b, + // Block 0x22, offset 0x880 + 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, + 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, +} + +// idnaSparseOffset: 258 entries, 516 bytes +var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x34, 0x3f, 0x4b, 0x4f, 0x5e, 0x63, 0x6b, 0x77, 0x85, 0x93, 0x98, 0xa1, 0xb1, 0xbf, 0xcc, 0xd8, 0xe9, 0xf3, 0xfa, 0x107, 0x118, 0x11f, 0x12a, 0x139, 0x147, 0x151, 0x153, 0x158, 0x15b, 0x15e, 0x160, 0x16c, 0x177, 0x17f, 0x185, 0x18b, 0x190, 0x195, 0x198, 0x19c, 0x1a2, 0x1a7, 0x1b3, 0x1bd, 0x1c3, 0x1d4, 0x1de, 0x1e1, 0x1e9, 0x1ec, 0x1f9, 0x201, 0x205, 0x20c, 0x214, 0x224, 0x230, 0x232, 0x23c, 0x248, 0x254, 0x260, 0x268, 0x26d, 0x277, 0x288, 0x28c, 0x297, 0x29b, 0x2a4, 0x2ac, 0x2b2, 0x2b7, 0x2ba, 0x2bd, 0x2c1, 0x2c7, 0x2cb, 0x2cf, 0x2d5, 0x2dc, 0x2e2, 0x2ea, 0x2f1, 0x2fc, 0x306, 0x30a, 0x30d, 0x313, 0x317, 0x319, 0x31c, 0x31e, 0x321, 0x32b, 0x32e, 0x33d, 0x341, 0x346, 0x349, 0x34d, 0x352, 0x357, 0x35d, 0x363, 0x372, 0x378, 0x37c, 0x38b, 0x390, 0x398, 0x3a2, 0x3ad, 0x3b5, 0x3c6, 0x3cf, 0x3df, 0x3ec, 0x3f6, 0x3fb, 0x408, 0x40c, 0x411, 0x413, 0x417, 0x419, 0x41d, 0x426, 0x42c, 0x430, 0x440, 0x44a, 0x44f, 0x452, 0x458, 0x45f, 0x464, 0x468, 0x46e, 0x473, 0x47c, 0x481, 0x487, 0x48e, 0x495, 0x49c, 0x4a0, 0x4a5, 0x4a8, 0x4ad, 0x4b9, 0x4bf, 0x4c4, 0x4cb, 0x4d3, 0x4d8, 0x4dc, 0x4ec, 0x4f3, 0x4f7, 0x4fb, 0x502, 0x504, 0x507, 0x50a, 0x50e, 0x512, 0x518, 0x521, 0x52d, 0x534, 0x53d, 0x545, 0x54c, 0x55a, 0x567, 0x574, 0x57d, 0x581, 0x58f, 0x597, 0x5a2, 0x5ab, 0x5b1, 0x5b9, 0x5c2, 0x5cc, 0x5cf, 0x5db, 0x5de, 0x5e3, 0x5e6, 0x5f0, 0x5f9, 0x605, 0x608, 0x60d, 0x610, 0x613, 0x616, 0x61d, 0x624, 0x628, 0x633, 0x636, 0x63c, 0x641, 0x645, 0x648, 0x64b, 0x64e, 0x653, 0x65d, 0x660, 0x664, 0x673, 0x67f, 0x683, 0x688, 0x68d, 0x691, 0x696, 0x69f, 0x6aa, 0x6b0, 0x6b8, 0x6bc, 0x6c0, 0x6c6, 0x6cc, 0x6d1, 0x6d4, 0x6e2, 0x6e9, 0x6ec, 0x6ef, 0x6f3, 0x6f9, 0x6fe, 0x708, 0x70d, 0x710, 0x713, 0x716, 0x719, 0x71d, 0x720, 0x730, 0x741, 0x746, 0x748, 0x74a} + +// idnaSparseValues: 1869 entries, 7476 bytes +var idnaSparseValues = [1869]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x07}, + {value: 0xe105, lo: 0x80, hi: 0x96}, + {value: 0x0018, lo: 0x97, hi: 0x97}, + {value: 0xe105, lo: 0x98, hi: 0x9e}, + {value: 0x001f, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbf}, + // Block 0x1, offset 0x8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0xe01d, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0335, lo: 0x83, hi: 0x83}, + {value: 0x034d, lo: 0x84, hi: 0x84}, + {value: 0x0365, lo: 0x85, hi: 0x85}, + {value: 0xe00d, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0xe00d, lo: 0x88, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x89}, + {value: 0xe00d, lo: 0x8a, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe00d, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0x8d}, + {value: 0xe00d, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0xbf}, + // Block 0x2, offset 0x19 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x0249, lo: 0xb0, hi: 0xb0}, + {value: 0x037d, lo: 0xb1, hi: 0xb1}, + {value: 0x0259, lo: 0xb2, hi: 0xb2}, + {value: 0x0269, lo: 0xb3, hi: 0xb3}, + {value: 0x034d, lo: 0xb4, hi: 0xb4}, + {value: 0x0395, lo: 0xb5, hi: 0xb5}, + {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, + {value: 0x0279, lo: 0xb7, hi: 0xb7}, + {value: 0x0289, lo: 0xb8, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbf}, + // Block 0x3, offset 0x25 + {value: 0x0000, lo: 0x01}, + {value: 0x3308, lo: 0x80, hi: 0xbf}, + // Block 0x4, offset 0x27 + {value: 0x0000, lo: 0x04}, + {value: 0x03f5, lo: 0x80, hi: 0x8f}, + {value: 0xe105, lo: 0x90, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x5, offset 0x2c + {value: 0x0000, lo: 0x07}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x0545, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x0008, lo: 0x99, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x6, offset 0x34 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0401, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x88}, + {value: 0x0018, lo: 0x89, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x3308, lo: 0x91, hi: 0xbd}, + {value: 0x0818, lo: 0xbe, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x7, offset 0x3f + {value: 0x0000, lo: 0x0b}, + {value: 0x0818, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x82}, + {value: 0x0818, lo: 0x83, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x85}, + {value: 0x0818, lo: 0x86, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0808, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x8, offset 0x4b + {value: 0x0000, lo: 0x03}, + {value: 0x0a08, lo: 0x80, hi: 0x87}, + {value: 0x0c08, lo: 0x88, hi: 0x99}, + {value: 0x0a08, lo: 0x9a, hi: 0xbf}, + // Block 0x9, offset 0x4f + {value: 0x0000, lo: 0x0e}, + {value: 0x3308, lo: 0x80, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8c}, + {value: 0x0c08, lo: 0x8d, hi: 0x8d}, + {value: 0x0a08, lo: 0x8e, hi: 0x98}, + {value: 0x0c08, lo: 0x99, hi: 0x9b}, + {value: 0x0a08, lo: 0x9c, hi: 0xaa}, + {value: 0x0c08, lo: 0xab, hi: 0xac}, + {value: 0x0a08, lo: 0xad, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb1}, + {value: 0x0a08, lo: 0xb2, hi: 0xb2}, + {value: 0x0c08, lo: 0xb3, hi: 0xb4}, + {value: 0x0a08, lo: 0xb5, hi: 0xb7}, + {value: 0x0c08, lo: 0xb8, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbf}, + // Block 0xa, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xb0}, + {value: 0x0808, lo: 0xb1, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xb, offset 0x63 + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0xc, offset 0x6b + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x99}, + {value: 0x0808, lo: 0x9a, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa3}, + {value: 0x0808, lo: 0xa4, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa7}, + {value: 0x0808, lo: 0xa8, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0818, lo: 0xb0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd, offset 0x77 + {value: 0x0000, lo: 0x0d}, + {value: 0x0c08, lo: 0x80, hi: 0x80}, + {value: 0x0a08, lo: 0x81, hi: 0x85}, + {value: 0x0c08, lo: 0x86, hi: 0x87}, + {value: 0x0a08, lo: 0x88, hi: 0x88}, + {value: 0x0c08, lo: 0x89, hi: 0x89}, + {value: 0x0a08, lo: 0x8a, hi: 0x93}, + {value: 0x0c08, lo: 0x94, hi: 0x94}, + {value: 0x0a08, lo: 0x95, hi: 0x95}, + {value: 0x0808, lo: 0x96, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xe, offset 0x85 + {value: 0x0000, lo: 0x0d}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0a08, lo: 0xa0, hi: 0xa9}, + {value: 0x0c08, lo: 0xaa, hi: 0xac}, + {value: 0x0808, lo: 0xad, hi: 0xad}, + {value: 0x0c08, lo: 0xae, hi: 0xae}, + {value: 0x0a08, lo: 0xaf, hi: 0xb0}, + {value: 0x0c08, lo: 0xb1, hi: 0xb2}, + {value: 0x0a08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0a08, lo: 0xb6, hi: 0xb8}, + {value: 0x0c08, lo: 0xb9, hi: 0xb9}, + {value: 0x0a08, lo: 0xba, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0xf, offset 0x93 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa1}, + {value: 0x0840, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xbf}, + // Block 0x10, offset 0x98 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x11, offset 0xa1 + {value: 0x0000, lo: 0x0f}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x85}, + {value: 0x3008, lo: 0x86, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x3008, lo: 0x8a, hi: 0x8c}, + {value: 0x3b08, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x12, offset 0xb1 + {value: 0x0000, lo: 0x0d}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xa9}, + {value: 0x0008, lo: 0xaa, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x13, offset 0xbf + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x14, offset 0xcc + {value: 0x0000, lo: 0x0b}, + {value: 0x0040, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x15, offset 0xd8 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x89}, + {value: 0x3b08, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x3008, lo: 0x98, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x16, offset 0xe9 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb2}, + {value: 0x08f1, lo: 0xb3, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb9}, + {value: 0x3b08, lo: 0xba, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x0018, lo: 0xbf, hi: 0xbf}, + // Block 0x17, offset 0xf3 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x8e}, + {value: 0x0018, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0xbf}, + // Block 0x18, offset 0xfa + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0961, lo: 0x9c, hi: 0x9c}, + {value: 0x0999, lo: 0x9d, hi: 0x9d}, + {value: 0x0008, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x19, offset 0x107 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8a}, + {value: 0x0008, lo: 0x8b, hi: 0x8b}, + {value: 0xe03d, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x1a, offset 0x118 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0xbf}, + // Block 0x1b, offset 0x11f + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0x12a + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x3008, lo: 0x96, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x3308, lo: 0x9e, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xa1}, + {value: 0x3008, lo: 0xa2, hi: 0xa4}, + {value: 0x0008, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xbf}, + // Block 0x1d, offset 0x139 + {value: 0x0000, lo: 0x0d}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x8c}, + {value: 0x3308, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x8e}, + {value: 0x3008, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x3008, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0x1e, offset 0x147 + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x86}, + {value: 0x055d, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8c}, + {value: 0x055d, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbb}, + {value: 0xe105, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0x1f, offset 0x151 + {value: 0x0000, lo: 0x01}, + {value: 0x0018, lo: 0x80, hi: 0xbf}, + // Block 0x20, offset 0x153 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa0}, + {value: 0x2018, lo: 0xa1, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x21, offset 0x158 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa7}, + {value: 0x2018, lo: 0xa8, hi: 0xbf}, + // Block 0x22, offset 0x15b + {value: 0x0000, lo: 0x02}, + {value: 0x2018, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0xbf}, + // Block 0x23, offset 0x15e + {value: 0x0000, lo: 0x01}, + {value: 0x0008, lo: 0x80, hi: 0xbf}, + // Block 0x24, offset 0x160 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x99}, + {value: 0x0008, lo: 0x9a, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x25, offset 0x16c + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x26, offset 0x177 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x27, offset 0x17f + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0x0008, lo: 0x92, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbf}, + // Block 0x28, offset 0x185 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x29, offset 0x18b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2a, offset 0x190 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x2b, offset 0x195 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x2c, offset 0x198 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xbf}, + // Block 0x2d, offset 0x19c + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x2e, offset 0x1a2 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0x2f, offset 0x1a7 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8d}, + {value: 0x0008, lo: 0x8e, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x3b08, lo: 0x94, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3b08, lo: 0xb4, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x30, offset 0x1b3 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x31, offset 0x1bd + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xb3}, + {value: 0x3340, lo: 0xb4, hi: 0xb5}, + {value: 0x3008, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbf}, + // Block 0x32, offset 0x1c3 + {value: 0x0000, lo: 0x10}, + {value: 0x3008, lo: 0x80, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x3008, lo: 0x87, hi: 0x88}, + {value: 0x3308, lo: 0x89, hi: 0x91}, + {value: 0x3b08, lo: 0x92, hi: 0x92}, + {value: 0x3308, lo: 0x93, hi: 0x93}, + {value: 0x0018, lo: 0x94, hi: 0x96}, + {value: 0x0008, lo: 0x97, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x33, offset 0x1d4 + {value: 0x0000, lo: 0x09}, + {value: 0x0018, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x86}, + {value: 0x0218, lo: 0x87, hi: 0x87}, + {value: 0x0018, lo: 0x88, hi: 0x8a}, + {value: 0x33c0, lo: 0x8b, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0208, lo: 0xa0, hi: 0xbf}, + // Block 0x34, offset 0x1de + {value: 0x0000, lo: 0x02}, + {value: 0x0208, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x35, offset 0x1e1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0208, lo: 0x87, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xa9}, + {value: 0x0208, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x36, offset 0x1e9 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0x37, offset 0x1ec + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb8}, + {value: 0x3308, lo: 0xb9, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x38, offset 0x1f9 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0008, lo: 0x86, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0x39, offset 0x201 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x3a, offset 0x205 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0028, lo: 0x9a, hi: 0x9a}, + {value: 0x0040, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0xbf}, + // Block 0x3b, offset 0x20c + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x3308, lo: 0x97, hi: 0x98}, + {value: 0x3008, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x3c, offset 0x214 + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x94}, + {value: 0x3008, lo: 0x95, hi: 0x95}, + {value: 0x3308, lo: 0x96, hi: 0x96}, + {value: 0x3008, lo: 0x97, hi: 0x97}, + {value: 0x3308, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3b08, lo: 0xa0, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xac}, + {value: 0x3008, lo: 0xad, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0x3d, offset 0x224 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xbd}, + {value: 0x3318, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x230 + {value: 0x0000, lo: 0x01}, + {value: 0x0040, lo: 0x80, hi: 0xbf}, + // Block 0x3f, offset 0x232 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x83}, + {value: 0x3008, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x40, offset 0x23c + {value: 0x0000, lo: 0x0b}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x3808, lo: 0x84, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x41, offset 0x248 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3808, lo: 0xaa, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xbf}, + // Block 0x42, offset 0x254 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa9}, + {value: 0x3008, lo: 0xaa, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3808, lo: 0xb2, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbf}, + // Block 0x43, offset 0x260 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x3008, lo: 0xa4, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbf}, + // Block 0x44, offset 0x268 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x45, offset 0x26d + {value: 0x0000, lo: 0x09}, + {value: 0x0e29, lo: 0x80, hi: 0x80}, + {value: 0x0e41, lo: 0x81, hi: 0x81}, + {value: 0x0e59, lo: 0x82, hi: 0x82}, + {value: 0x0e71, lo: 0x83, hi: 0x83}, + {value: 0x0e89, lo: 0x84, hi: 0x85}, + {value: 0x0ea1, lo: 0x86, hi: 0x86}, + {value: 0x0eb9, lo: 0x87, hi: 0x87}, + {value: 0x057d, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0x46, offset 0x277 + {value: 0x0000, lo: 0x10}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x92}, + {value: 0x0018, lo: 0x93, hi: 0x93}, + {value: 0x3308, lo: 0x94, hi: 0xa0}, + {value: 0x3008, lo: 0xa1, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa8}, + {value: 0x0008, lo: 0xa9, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x0008, lo: 0xae, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x47, offset 0x288 + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0x48, offset 0x28c + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x87}, + {value: 0xe045, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0xe045, lo: 0x98, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0xe045, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb7}, + {value: 0xe045, lo: 0xb8, hi: 0xbf}, + // Block 0x49, offset 0x297 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x3318, lo: 0x90, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbf}, + // Block 0x4a, offset 0x29b + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x88}, + {value: 0x24c1, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x4b, offset 0x2a4 + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x24f1, lo: 0xac, hi: 0xac}, + {value: 0x2529, lo: 0xad, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xae}, + {value: 0x2579, lo: 0xaf, hi: 0xaf}, + {value: 0x25b1, lo: 0xb0, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0x4c, offset 0x2ac + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x9f}, + {value: 0x0080, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xad}, + {value: 0x0080, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x4d, offset 0x2b2 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xa8}, + {value: 0x09c5, lo: 0xa9, hi: 0xa9}, + {value: 0x09e5, lo: 0xaa, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xbf}, + // Block 0x4e, offset 0x2b7 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x4f, offset 0x2ba + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xbf}, + // Block 0x50, offset 0x2bd + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x28c1, lo: 0x8c, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0xbf}, + // Block 0x51, offset 0x2c1 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0e66, lo: 0xb4, hi: 0xb4}, + {value: 0x292a, lo: 0xb5, hi: 0xb5}, + {value: 0x0e86, lo: 0xb6, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x52, offset 0x2c7 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x9b}, + {value: 0x2941, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0xbf}, + // Block 0x53, offset 0x2cb + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0x54, offset 0x2cf + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0018, lo: 0x98, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbc}, + {value: 0x0018, lo: 0xbd, hi: 0xbf}, + // Block 0x55, offset 0x2d5 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xab}, + {value: 0x0018, lo: 0xac, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x56, offset 0x2dc + {value: 0x0000, lo: 0x05}, + {value: 0xe185, lo: 0x80, hi: 0x8f}, + {value: 0x03f5, lo: 0x90, hi: 0x9f}, + {value: 0x0ea5, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x57, offset 0x2e2 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xa6}, + {value: 0x0008, lo: 0xa7, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xac}, + {value: 0x0008, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x58, offset 0x2ea + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xae}, + {value: 0xe075, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0x59, offset 0x2f1 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x0008, lo: 0xb8, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x5a, offset 0x2fc + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xbf}, + // Block 0x5b, offset 0x306 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0008, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x5c, offset 0x30a + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0xbf}, + // Block 0x5d, offset 0x30d + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9e}, + {value: 0x0edd, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0x5e, offset 0x313 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xb2}, + {value: 0x0efd, lo: 0xb3, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0x5f, offset 0x317 + {value: 0x0020, lo: 0x01}, + {value: 0x0f1d, lo: 0x80, hi: 0xbf}, + // Block 0x60, offset 0x319 + {value: 0x0020, lo: 0x02}, + {value: 0x171d, lo: 0x80, hi: 0x8f}, + {value: 0x18fd, lo: 0x90, hi: 0xbf}, + // Block 0x61, offset 0x31c + {value: 0x0020, lo: 0x01}, + {value: 0x1efd, lo: 0x80, hi: 0xbf}, + // Block 0x62, offset 0x31e + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0xbf}, + // Block 0x63, offset 0x321 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x98}, + {value: 0x3308, lo: 0x99, hi: 0x9a}, + {value: 0x29e2, lo: 0x9b, hi: 0x9b}, + {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, + {value: 0x0008, lo: 0x9d, hi: 0x9e}, + {value: 0x2a31, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0008, lo: 0xa1, hi: 0xbf}, + // Block 0x64, offset 0x32b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xbe}, + {value: 0x2a69, lo: 0xbf, hi: 0xbf}, + // Block 0x65, offset 0x32e + {value: 0x0000, lo: 0x0e}, + {value: 0x0040, lo: 0x80, hi: 0x84}, + {value: 0x0008, lo: 0x85, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xb0}, + {value: 0x2a1d, lo: 0xb1, hi: 0xb1}, + {value: 0x2a3d, lo: 0xb2, hi: 0xb2}, + {value: 0x2a5d, lo: 0xb3, hi: 0xb3}, + {value: 0x2a7d, lo: 0xb4, hi: 0xb4}, + {value: 0x2a5d, lo: 0xb5, hi: 0xb5}, + {value: 0x2a9d, lo: 0xb6, hi: 0xb6}, + {value: 0x2abd, lo: 0xb7, hi: 0xb7}, + {value: 0x2add, lo: 0xb8, hi: 0xb9}, + {value: 0x2afd, lo: 0xba, hi: 0xbb}, + {value: 0x2b1d, lo: 0xbc, hi: 0xbd}, + {value: 0x2afd, lo: 0xbe, hi: 0xbf}, + // Block 0x66, offset 0x33d + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x67, offset 0x341 + {value: 0x0030, lo: 0x04}, + {value: 0x2aa2, lo: 0x80, hi: 0x9d}, + {value: 0x305a, lo: 0x9e, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x30a2, lo: 0xa0, hi: 0xbf}, + // Block 0x68, offset 0x346 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x69, offset 0x349 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0040, lo: 0x8d, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0x6a, offset 0x34d + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0x6b, offset 0x352 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xbf}, + // Block 0x6c, offset 0x357 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb1}, + {value: 0x0018, lo: 0xb2, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x6d, offset 0x35d + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0xb6}, + {value: 0x0008, lo: 0xb7, hi: 0xb7}, + {value: 0x2009, lo: 0xb8, hi: 0xb8}, + {value: 0x6e89, lo: 0xb9, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xbf}, + // Block 0x6e, offset 0x363 + {value: 0x0000, lo: 0x0e}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x3308, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x3308, lo: 0x8b, hi: 0x8b}, + {value: 0x0008, lo: 0x8c, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa6}, + {value: 0x3008, lo: 0xa7, hi: 0xa7}, + {value: 0x0018, lo: 0xa8, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x6f, offset 0x372 + {value: 0x0000, lo: 0x05}, + {value: 0x0208, lo: 0x80, hi: 0xb1}, + {value: 0x0108, lo: 0xb2, hi: 0xb2}, + {value: 0x0008, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0x70, offset 0x378 + {value: 0x0000, lo: 0x03}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xbf}, + // Block 0x71, offset 0x37c + {value: 0x0000, lo: 0x0e}, + {value: 0x3008, lo: 0x80, hi: 0x83}, + {value: 0x3b08, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8d}, + {value: 0x0018, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xba}, + {value: 0x0008, lo: 0xbb, hi: 0xbb}, + {value: 0x0018, lo: 0xbc, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x72, offset 0x38b + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x73, offset 0x390 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x3308, lo: 0x87, hi: 0x91}, + {value: 0x3008, lo: 0x92, hi: 0x92}, + {value: 0x3808, lo: 0x93, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0x74, offset 0x398 + {value: 0x0000, lo: 0x09}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x3008, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb9}, + {value: 0x3008, lo: 0xba, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbf}, + // Block 0x75, offset 0x3a2 + {value: 0x0000, lo: 0x0a}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x3ad + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xa8}, + {value: 0x3308, lo: 0xa9, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb0}, + {value: 0x3308, lo: 0xb1, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x77, offset 0x3b5 + {value: 0x0000, lo: 0x10}, + {value: 0x0008, lo: 0x80, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8c}, + {value: 0x3008, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xb9}, + {value: 0x0008, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbc}, + {value: 0x3008, lo: 0xbd, hi: 0xbd}, + {value: 0x0008, lo: 0xbe, hi: 0xbf}, + // Block 0x78, offset 0x3c6 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb0}, + {value: 0x0008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb4}, + {value: 0x0008, lo: 0xb5, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb8}, + {value: 0x0008, lo: 0xb9, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbf}, + // Block 0x79, offset 0x3cf + {value: 0x0000, lo: 0x0f}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x9a}, + {value: 0x0008, lo: 0x9b, hi: 0x9d}, + {value: 0x0018, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xaa}, + {value: 0x3008, lo: 0xab, hi: 0xab}, + {value: 0x3308, lo: 0xac, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb5}, + {value: 0x3b08, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x7a, offset 0x3df + {value: 0x0000, lo: 0x0c}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x88}, + {value: 0x0008, lo: 0x89, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x90}, + {value: 0x0008, lo: 0x91, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x7b, offset 0x3ec + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x4465, lo: 0x9c, hi: 0x9c}, + {value: 0x447d, lo: 0x9d, hi: 0x9d}, + {value: 0x2971, lo: 0x9e, hi: 0x9e}, + {value: 0xe06d, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa5}, + {value: 0x0040, lo: 0xa6, hi: 0xaf}, + {value: 0x4495, lo: 0xb0, hi: 0xbf}, + // Block 0x7c, offset 0x3f6 + {value: 0x0000, lo: 0x04}, + {value: 0x44b5, lo: 0x80, hi: 0x8f}, + {value: 0x44d5, lo: 0x90, hi: 0x9f}, + {value: 0x44f5, lo: 0xa0, hi: 0xaf}, + {value: 0x44d5, lo: 0xb0, hi: 0xbf}, + // Block 0x7d, offset 0x3fb + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0xa2}, + {value: 0x3008, lo: 0xa3, hi: 0xa4}, + {value: 0x3308, lo: 0xa5, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa7}, + {value: 0x3308, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xaa}, + {value: 0x0018, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3b08, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0x7e, offset 0x408 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0x7f, offset 0x40c + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x80, offset 0x411 + {value: 0x0020, lo: 0x01}, + {value: 0x4515, lo: 0x80, hi: 0xbf}, + // Block 0x81, offset 0x413 + {value: 0x0020, lo: 0x03}, + {value: 0x4d15, lo: 0x80, hi: 0x94}, + {value: 0x4ad5, lo: 0x95, hi: 0x95}, + {value: 0x4fb5, lo: 0x96, hi: 0xbf}, + // Block 0x82, offset 0x417 + {value: 0x0020, lo: 0x01}, + {value: 0x54f5, lo: 0x80, hi: 0xbf}, + // Block 0x83, offset 0x419 + {value: 0x0020, lo: 0x03}, + {value: 0x5cf5, lo: 0x80, hi: 0x84}, + {value: 0x5655, lo: 0x85, hi: 0x85}, + {value: 0x5d95, lo: 0x86, hi: 0xbf}, + // Block 0x84, offset 0x41d + {value: 0x0020, lo: 0x08}, + {value: 0x6b55, lo: 0x80, hi: 0x8f}, + {value: 0x6d15, lo: 0x90, hi: 0x90}, + {value: 0x6d55, lo: 0x91, hi: 0xab}, + {value: 0x6ea1, lo: 0xac, hi: 0xac}, + {value: 0x70b5, lo: 0xad, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x70d5, lo: 0xb0, hi: 0xbf}, + // Block 0x85, offset 0x426 + {value: 0x0020, lo: 0x05}, + {value: 0x72d5, lo: 0x80, hi: 0xad}, + {value: 0x6535, lo: 0xae, hi: 0xae}, + {value: 0x7895, lo: 0xaf, hi: 0xb5}, + {value: 0x6f55, lo: 0xb6, hi: 0xb6}, + {value: 0x7975, lo: 0xb7, hi: 0xbf}, + // Block 0x86, offset 0x42c + {value: 0x0028, lo: 0x03}, + {value: 0x7c21, lo: 0x80, hi: 0x82}, + {value: 0x7be1, lo: 0x83, hi: 0x83}, + {value: 0x7c99, lo: 0x84, hi: 0xbf}, + // Block 0x87, offset 0x430 + {value: 0x0038, lo: 0x0f}, + {value: 0x9db1, lo: 0x80, hi: 0x83}, + {value: 0x9e59, lo: 0x84, hi: 0x85}, + {value: 0x9e91, lo: 0x86, hi: 0x87}, + {value: 0x9ec9, lo: 0x88, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x91}, + {value: 0xa089, lo: 0x92, hi: 0x97}, + {value: 0xa1a1, lo: 0x98, hi: 0x9c}, + {value: 0xa281, lo: 0x9d, hi: 0xb3}, + {value: 0x9d41, lo: 0xb4, hi: 0xb4}, + {value: 0x9db1, lo: 0xb5, hi: 0xb5}, + {value: 0xa789, lo: 0xb6, hi: 0xbb}, + {value: 0xa869, lo: 0xbc, hi: 0xbc}, + {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, + {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, + // Block 0x88, offset 0x440 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8c}, + {value: 0x0008, lo: 0x8d, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbb}, + {value: 0x0008, lo: 0xbc, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0x89, offset 0x44a + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0x8a, offset 0x44f + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x8b, offset 0x452 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x82}, + {value: 0x0040, lo: 0x83, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0x8c, offset 0x458 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x8e}, + {value: 0x0040, lo: 0x8f, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0x8d, offset 0x45f + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x0040, lo: 0xbe, hi: 0xbf}, + // Block 0x8e, offset 0x464 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9c}, + {value: 0x0040, lo: 0x9d, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x8f, offset 0x468 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x90}, + {value: 0x0040, lo: 0x91, hi: 0x9f}, + {value: 0x3308, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x90, offset 0x46e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x91, offset 0x473 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x81}, + {value: 0x0008, lo: 0x82, hi: 0x89}, + {value: 0x0018, lo: 0x8a, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbf}, + // Block 0x92, offset 0x47c + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0x93, offset 0x481 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0xbf}, + // Block 0x94, offset 0x487 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x97}, + {value: 0x8ad5, lo: 0x98, hi: 0x9f}, + {value: 0x8aed, lo: 0xa0, hi: 0xa7}, + {value: 0x0008, lo: 0xa8, hi: 0xbf}, + // Block 0x95, offset 0x48e + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x8aed, lo: 0xb0, hi: 0xb7}, + {value: 0x8ad5, lo: 0xb8, hi: 0xbf}, + // Block 0x96, offset 0x495 + {value: 0x0000, lo: 0x06}, + {value: 0xe145, lo: 0x80, hi: 0x87}, + {value: 0xe1c5, lo: 0x88, hi: 0x8f}, + {value: 0xe145, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0xbb}, + {value: 0x0040, lo: 0xbc, hi: 0xbf}, + // Block 0x97, offset 0x49c + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x4a0 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xae}, + {value: 0x0018, lo: 0xaf, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x99, offset 0x4a5 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0x9a, offset 0x4a8 + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xbf}, + // Block 0x9b, offset 0x4ad + {value: 0x0000, lo: 0x0b}, + {value: 0x0808, lo: 0x80, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x87}, + {value: 0x0808, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0808, lo: 0x8a, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb6}, + {value: 0x0808, lo: 0xb7, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbb}, + {value: 0x0808, lo: 0xbc, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbe}, + {value: 0x0808, lo: 0xbf, hi: 0xbf}, + // Block 0x9c, offset 0x4b9 + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x96}, + {value: 0x0818, lo: 0x97, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb6}, + {value: 0x0818, lo: 0xb7, hi: 0xbf}, + // Block 0x9d, offset 0x4bf + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xa6}, + {value: 0x0818, lo: 0xa7, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0x9e, offset 0x4c4 + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb3}, + {value: 0x0808, lo: 0xb4, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xba}, + {value: 0x0818, lo: 0xbb, hi: 0xbf}, + // Block 0x9f, offset 0x4cb + {value: 0x0000, lo: 0x07}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0818, lo: 0x96, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbe}, + {value: 0x0818, lo: 0xbf, hi: 0xbf}, + // Block 0xa0, offset 0x4d3 + {value: 0x0000, lo: 0x04}, + {value: 0x0808, lo: 0x80, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbb}, + {value: 0x0818, lo: 0xbc, hi: 0xbd}, + {value: 0x0808, lo: 0xbe, hi: 0xbf}, + // Block 0xa1, offset 0x4d8 + {value: 0x0000, lo: 0x03}, + {value: 0x0818, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x0818, lo: 0x92, hi: 0xbf}, + // Block 0xa2, offset 0x4dc + {value: 0x0000, lo: 0x0f}, + {value: 0x0808, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x84}, + {value: 0x3308, lo: 0x85, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x8b}, + {value: 0x3308, lo: 0x8c, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x94}, + {value: 0x0808, lo: 0x95, hi: 0x97}, + {value: 0x0040, lo: 0x98, hi: 0x98}, + {value: 0x0808, lo: 0x99, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xba}, + {value: 0x0040, lo: 0xbb, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xa3, offset 0x4ec + {value: 0x0000, lo: 0x06}, + {value: 0x0818, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0818, lo: 0x90, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xbc}, + {value: 0x0818, lo: 0xbd, hi: 0xbf}, + // Block 0xa4, offset 0x4f3 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0x9c}, + {value: 0x0818, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xa5, offset 0x4f7 + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb8}, + {value: 0x0018, lo: 0xb9, hi: 0xbf}, + // Block 0xa6, offset 0x4fb + {value: 0x0000, lo: 0x06}, + {value: 0x0808, lo: 0x80, hi: 0x95}, + {value: 0x0040, lo: 0x96, hi: 0x97}, + {value: 0x0818, lo: 0x98, hi: 0x9f}, + {value: 0x0808, lo: 0xa0, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb7}, + {value: 0x0818, lo: 0xb8, hi: 0xbf}, + // Block 0xa7, offset 0x502 + {value: 0x0000, lo: 0x01}, + {value: 0x0808, lo: 0x80, hi: 0xbf}, + // Block 0xa8, offset 0x504 + {value: 0x0000, lo: 0x02}, + {value: 0x0808, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0xbf}, + // Block 0xa9, offset 0x507 + {value: 0x0000, lo: 0x02}, + {value: 0x03dd, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xaa, offset 0x50a + {value: 0x0000, lo: 0x03}, + {value: 0x0808, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xb9}, + {value: 0x0818, lo: 0xba, hi: 0xbf}, + // Block 0xab, offset 0x50e + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0818, lo: 0xa0, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xac, offset 0x512 + {value: 0x0000, lo: 0x05}, + {value: 0x3008, lo: 0x80, hi: 0x80}, + {value: 0x3308, lo: 0x81, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xad, offset 0x518 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x85}, + {value: 0x3b08, lo: 0x86, hi: 0x86}, + {value: 0x0018, lo: 0x87, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x91}, + {value: 0x0018, lo: 0x92, hi: 0xa5}, + {value: 0x0008, lo: 0xa6, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xae, offset 0x521 + {value: 0x0000, lo: 0x0b}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb6}, + {value: 0x3008, lo: 0xb7, hi: 0xb8}, + {value: 0x3b08, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x0018, lo: 0xbb, hi: 0xbc}, + {value: 0x0340, lo: 0xbd, hi: 0xbd}, + {value: 0x0018, lo: 0xbe, hi: 0xbf}, + // Block 0xaf, offset 0x52d + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb0, offset 0x534 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xb2}, + {value: 0x3b08, lo: 0xb3, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xbf}, + // Block 0xb1, offset 0x53d + {value: 0x0000, lo: 0x07}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb3}, + {value: 0x0018, lo: 0xb4, hi: 0xb5}, + {value: 0x0008, lo: 0xb6, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xb2, offset 0x545 + {value: 0x0000, lo: 0x06}, + {value: 0x3308, lo: 0x80, hi: 0x81}, + {value: 0x3008, lo: 0x82, hi: 0x82}, + {value: 0x0008, lo: 0x83, hi: 0xb2}, + {value: 0x3008, lo: 0xb3, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xbe}, + {value: 0x3008, lo: 0xbf, hi: 0xbf}, + // Block 0xb3, offset 0x54c + {value: 0x0000, lo: 0x0d}, + {value: 0x3808, lo: 0x80, hi: 0x80}, + {value: 0x0008, lo: 0x81, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x89}, + {value: 0x3308, lo: 0x8a, hi: 0x8c}, + {value: 0x0018, lo: 0x8d, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0008, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x0018, lo: 0xa1, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xb4, offset 0x55a + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xae}, + {value: 0x3308, lo: 0xaf, hi: 0xb1}, + {value: 0x3008, lo: 0xb2, hi: 0xb3}, + {value: 0x3308, lo: 0xb4, hi: 0xb4}, + {value: 0x3808, lo: 0xb5, hi: 0xb5}, + {value: 0x3308, lo: 0xb6, hi: 0xb7}, + {value: 0x0018, lo: 0xb8, hi: 0xbd}, + {value: 0x3308, lo: 0xbe, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xb5, offset 0x567 + {value: 0x0000, lo: 0x0c}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x0008, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0x8d}, + {value: 0x0040, lo: 0x8e, hi: 0x8e}, + {value: 0x0008, lo: 0x8f, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9e}, + {value: 0x0008, lo: 0x9f, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbf}, + // Block 0xb6, offset 0x574 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x3308, lo: 0x9f, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa9}, + {value: 0x3b08, lo: 0xaa, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0040, lo: 0xba, hi: 0xbf}, + // Block 0xb7, offset 0x57d + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x3008, lo: 0xb5, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbf}, + // Block 0xb8, offset 0x581 + {value: 0x0000, lo: 0x0d}, + {value: 0x3008, lo: 0x80, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x84}, + {value: 0x3008, lo: 0x85, hi: 0x85}, + {value: 0x3308, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x8a}, + {value: 0x0018, lo: 0x8b, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0x9b}, + {value: 0x0040, lo: 0x9c, hi: 0x9c}, + {value: 0x0018, lo: 0x9d, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xb9, offset 0x58f + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xb8}, + {value: 0x3008, lo: 0xb9, hi: 0xb9}, + {value: 0x3308, lo: 0xba, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbe}, + {value: 0x3308, lo: 0xbf, hi: 0xbf}, + // Block 0xba, offset 0x597 + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x3008, lo: 0x81, hi: 0x81}, + {value: 0x3b08, lo: 0x82, hi: 0x82}, + {value: 0x3308, lo: 0x83, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x85}, + {value: 0x0018, lo: 0x86, hi: 0x86}, + {value: 0x0008, lo: 0x87, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xbb, offset 0x5a2 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xb7}, + {value: 0x3008, lo: 0xb8, hi: 0xbb}, + {value: 0x3308, lo: 0xbc, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbc, offset 0x5ab + {value: 0x0000, lo: 0x05}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x97}, + {value: 0x0008, lo: 0x98, hi: 0x9b}, + {value: 0x3308, lo: 0x9c, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0xbf}, + // Block 0xbd, offset 0x5b1 + {value: 0x0000, lo: 0x07}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3008, lo: 0xb0, hi: 0xb2}, + {value: 0x3308, lo: 0xb3, hi: 0xba}, + {value: 0x3008, lo: 0xbb, hi: 0xbc}, + {value: 0x3308, lo: 0xbd, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xbe, offset 0x5b9 + {value: 0x0000, lo: 0x08}, + {value: 0x3308, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x83}, + {value: 0x0008, lo: 0x84, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xbf, offset 0x5c2 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x3308, lo: 0xab, hi: 0xab}, + {value: 0x3008, lo: 0xac, hi: 0xac}, + {value: 0x3308, lo: 0xad, hi: 0xad}, + {value: 0x3008, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb5}, + {value: 0x3808, lo: 0xb6, hi: 0xb6}, + {value: 0x3308, lo: 0xb7, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbf}, + // Block 0xc0, offset 0x5cc + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x89}, + {value: 0x0040, lo: 0x8a, hi: 0xbf}, + // Block 0xc1, offset 0x5cf + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9f}, + {value: 0x3008, lo: 0xa0, hi: 0xa1}, + {value: 0x3308, lo: 0xa2, hi: 0xa5}, + {value: 0x3008, lo: 0xa6, hi: 0xa6}, + {value: 0x3308, lo: 0xa7, hi: 0xaa}, + {value: 0x3b08, lo: 0xab, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xb9}, + {value: 0x0018, lo: 0xba, hi: 0xbf}, + // Block 0xc2, offset 0x5db + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x049d, lo: 0xa0, hi: 0xbf}, + // Block 0xc3, offset 0x5de + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbe}, + {value: 0x0008, lo: 0xbf, hi: 0xbf}, + // Block 0xc4, offset 0x5e3 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb8}, + {value: 0x0040, lo: 0xb9, hi: 0xbf}, + // Block 0xc5, offset 0x5e6 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x89}, + {value: 0x0008, lo: 0x8a, hi: 0xae}, + {value: 0x3008, lo: 0xaf, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xb7}, + {value: 0x3308, lo: 0xb8, hi: 0xbd}, + {value: 0x3008, lo: 0xbe, hi: 0xbe}, + {value: 0x3b08, lo: 0xbf, hi: 0xbf}, + // Block 0xc6, offset 0x5f0 + {value: 0x0000, lo: 0x08}, + {value: 0x0008, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0018, lo: 0x9a, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0008, lo: 0xb2, hi: 0xbf}, + // Block 0xc7, offset 0x5f9 + {value: 0x0000, lo: 0x0b}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x91}, + {value: 0x3308, lo: 0x92, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xa8}, + {value: 0x3008, lo: 0xa9, hi: 0xa9}, + {value: 0x3308, lo: 0xaa, hi: 0xb0}, + {value: 0x3008, lo: 0xb1, hi: 0xb1}, + {value: 0x3308, lo: 0xb2, hi: 0xb3}, + {value: 0x3008, lo: 0xb4, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xc8, offset 0x605 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0xbf}, + // Block 0xc9, offset 0x608 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xca, offset 0x60d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0040, lo: 0x84, hi: 0xbf}, + // Block 0xcb, offset 0x610 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xbf}, + // Block 0xcc, offset 0x613 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0xbf}, + // Block 0xcd, offset 0x616 + {value: 0x0000, lo: 0x06}, + {value: 0x0008, lo: 0x80, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa9}, + {value: 0x0040, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xce, offset 0x61d + {value: 0x0000, lo: 0x06}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb4}, + {value: 0x0018, lo: 0xb5, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xcf, offset 0x624 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0xaf}, + {value: 0x3308, lo: 0xb0, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xbf}, + // Block 0xd0, offset 0x628 + {value: 0x0000, lo: 0x0a}, + {value: 0x0008, lo: 0x80, hi: 0x83}, + {value: 0x0018, lo: 0x84, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9a}, + {value: 0x0018, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x0008, lo: 0xa3, hi: 0xb7}, + {value: 0x0040, lo: 0xb8, hi: 0xbc}, + {value: 0x0008, lo: 0xbd, hi: 0xbf}, + // Block 0xd1, offset 0x633 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0xbf}, + // Block 0xd2, offset 0x636 + {value: 0x0000, lo: 0x05}, + {value: 0x0008, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x90}, + {value: 0x3008, lo: 0x91, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xd3, offset 0x63c + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x8e}, + {value: 0x3308, lo: 0x8f, hi: 0x92}, + {value: 0x0008, lo: 0x93, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xd4, offset 0x641 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xa0}, + {value: 0x0040, lo: 0xa1, hi: 0xbf}, + // Block 0xd5, offset 0x645 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xd6, offset 0x648 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb2}, + {value: 0x0040, lo: 0xb3, hi: 0xbf}, + // Block 0xd7, offset 0x64b + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0xbf}, + // Block 0xd8, offset 0x64e + {value: 0x0000, lo: 0x04}, + {value: 0x0008, lo: 0x80, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xaf}, + {value: 0x0008, lo: 0xb0, hi: 0xbc}, + {value: 0x0040, lo: 0xbd, hi: 0xbf}, + // Block 0xd9, offset 0x653 + {value: 0x0000, lo: 0x09}, + {value: 0x0008, lo: 0x80, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0x0008, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9b}, + {value: 0x0018, lo: 0x9c, hi: 0x9c}, + {value: 0x3308, lo: 0x9d, hi: 0x9e}, + {value: 0x0018, lo: 0x9f, hi: 0x9f}, + {value: 0x03c0, lo: 0xa0, hi: 0xa3}, + {value: 0x0040, lo: 0xa4, hi: 0xbf}, + // Block 0xda, offset 0x65d + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xdb, offset 0x660 + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xa6}, + {value: 0x0040, lo: 0xa7, hi: 0xa8}, + {value: 0x0018, lo: 0xa9, hi: 0xbf}, + // Block 0xdc, offset 0x664 + {value: 0x0000, lo: 0x0e}, + {value: 0x0018, lo: 0x80, hi: 0x9d}, + {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, + {value: 0xb601, lo: 0x9f, hi: 0x9f}, + {value: 0xb649, lo: 0xa0, hi: 0xa0}, + {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, + {value: 0xb719, lo: 0xa2, hi: 0xa2}, + {value: 0xb781, lo: 0xa3, hi: 0xa3}, + {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, + {value: 0x3018, lo: 0xa5, hi: 0xa6}, + {value: 0x3318, lo: 0xa7, hi: 0xa9}, + {value: 0x0018, lo: 0xaa, hi: 0xac}, + {value: 0x3018, lo: 0xad, hi: 0xb2}, + {value: 0x0340, lo: 0xb3, hi: 0xba}, + {value: 0x3318, lo: 0xbb, hi: 0xbf}, + // Block 0xdd, offset 0x673 + {value: 0x0000, lo: 0x0b}, + {value: 0x3318, lo: 0x80, hi: 0x82}, + {value: 0x0018, lo: 0x83, hi: 0x84}, + {value: 0x3318, lo: 0x85, hi: 0x8b}, + {value: 0x0018, lo: 0x8c, hi: 0xa9}, + {value: 0x3318, lo: 0xaa, hi: 0xad}, + {value: 0x0018, lo: 0xae, hi: 0xba}, + {value: 0xb851, lo: 0xbb, hi: 0xbb}, + {value: 0xb899, lo: 0xbc, hi: 0xbc}, + {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, + {value: 0xb949, lo: 0xbe, hi: 0xbe}, + {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, + // Block 0xde, offset 0x67f + {value: 0x0000, lo: 0x03}, + {value: 0xba19, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0xa8}, + {value: 0x0040, lo: 0xa9, hi: 0xbf}, + // Block 0xdf, offset 0x683 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x81}, + {value: 0x3318, lo: 0x82, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x85}, + {value: 0x0040, lo: 0x86, hi: 0xbf}, + // Block 0xe0, offset 0x688 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe1, offset 0x68d + {value: 0x0000, lo: 0x03}, + {value: 0x3308, lo: 0x80, hi: 0xb6}, + {value: 0x0018, lo: 0xb7, hi: 0xba}, + {value: 0x3308, lo: 0xbb, hi: 0xbf}, + // Block 0xe2, offset 0x691 + {value: 0x0000, lo: 0x04}, + {value: 0x3308, lo: 0x80, hi: 0xac}, + {value: 0x0018, lo: 0xad, hi: 0xb4}, + {value: 0x3308, lo: 0xb5, hi: 0xb5}, + {value: 0x0018, lo: 0xb6, hi: 0xbf}, + // Block 0xe3, offset 0x696 + {value: 0x0000, lo: 0x08}, + {value: 0x0018, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x84}, + {value: 0x0018, lo: 0x85, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xa0}, + {value: 0x3308, lo: 0xa1, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, + // Block 0xe4, offset 0x69f + {value: 0x0000, lo: 0x0a}, + {value: 0x3308, lo: 0x80, hi: 0x86}, + {value: 0x0040, lo: 0x87, hi: 0x87}, + {value: 0x3308, lo: 0x88, hi: 0x98}, + {value: 0x0040, lo: 0x99, hi: 0x9a}, + {value: 0x3308, lo: 0x9b, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xa2}, + {value: 0x3308, lo: 0xa3, hi: 0xa4}, + {value: 0x0040, lo: 0xa5, hi: 0xa5}, + {value: 0x3308, lo: 0xa6, hi: 0xaa}, + {value: 0x0040, lo: 0xab, hi: 0xbf}, + // Block 0xe5, offset 0x6aa + {value: 0x0000, lo: 0x05}, + {value: 0x0808, lo: 0x80, hi: 0x84}, + {value: 0x0040, lo: 0x85, hi: 0x86}, + {value: 0x0818, lo: 0x87, hi: 0x8f}, + {value: 0x3308, lo: 0x90, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xe6, offset 0x6b0 + {value: 0x0000, lo: 0x07}, + {value: 0x0a08, lo: 0x80, hi: 0x83}, + {value: 0x3308, lo: 0x84, hi: 0x8a}, + {value: 0x0040, lo: 0x8b, hi: 0x8f}, + {value: 0x0808, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9d}, + {value: 0x0818, lo: 0x9e, hi: 0x9f}, + {value: 0x0040, lo: 0xa0, hi: 0xbf}, + // Block 0xe7, offset 0x6b8 + {value: 0x0000, lo: 0x03}, + {value: 0x0040, lo: 0x80, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb1}, + {value: 0x0040, lo: 0xb2, hi: 0xbf}, + // Block 0xe8, offset 0x6bc + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0xab}, + {value: 0x0040, lo: 0xac, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xbf}, + // Block 0xe9, offset 0x6c0 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x93}, + {value: 0x0040, lo: 0x94, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xae}, + {value: 0x0040, lo: 0xaf, hi: 0xb0}, + {value: 0x0018, lo: 0xb1, hi: 0xbf}, + // Block 0xea, offset 0x6c6 + {value: 0x0000, lo: 0x05}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0018, lo: 0x81, hi: 0x8f}, + {value: 0x0040, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xb5}, + {value: 0x0040, lo: 0xb6, hi: 0xbf}, + // Block 0xeb, offset 0x6cc + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8f}, + {value: 0xc1c1, lo: 0x90, hi: 0x90}, + {value: 0x0018, lo: 0x91, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xbf}, + // Block 0xec, offset 0x6d1 + {value: 0x0000, lo: 0x02}, + {value: 0x0040, lo: 0x80, hi: 0xa5}, + {value: 0x0018, lo: 0xa6, hi: 0xbf}, + // Block 0xed, offset 0x6d4 + {value: 0x0000, lo: 0x0d}, + {value: 0xc7e9, lo: 0x80, hi: 0x80}, + {value: 0xc839, lo: 0x81, hi: 0x81}, + {value: 0xc889, lo: 0x82, hi: 0x82}, + {value: 0xc8d9, lo: 0x83, hi: 0x83}, + {value: 0xc929, lo: 0x84, hi: 0x84}, + {value: 0xc979, lo: 0x85, hi: 0x85}, + {value: 0xc9c9, lo: 0x86, hi: 0x86}, + {value: 0xca19, lo: 0x87, hi: 0x87}, + {value: 0xca69, lo: 0x88, hi: 0x88}, + {value: 0x0040, lo: 0x89, hi: 0x8f}, + {value: 0xcab9, lo: 0x90, hi: 0x90}, + {value: 0xcad9, lo: 0x91, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xee, offset 0x6e2 + {value: 0x0000, lo: 0x06}, + {value: 0x0018, lo: 0x80, hi: 0x92}, + {value: 0x0040, lo: 0x93, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xac}, + {value: 0x0040, lo: 0xad, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb6}, + {value: 0x0040, lo: 0xb7, hi: 0xbf}, + // Block 0xef, offset 0x6e9 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0xb3}, + {value: 0x0040, lo: 0xb4, hi: 0xbf}, + // Block 0xf0, offset 0x6ec + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x94}, + {value: 0x0040, lo: 0x95, hi: 0xbf}, + // Block 0xf1, offset 0x6ef + {value: 0x0000, lo: 0x03}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xbf}, + // Block 0xf2, offset 0x6f3 + {value: 0x0000, lo: 0x05}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x99}, + {value: 0x0040, lo: 0x9a, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xbf}, + // Block 0xf3, offset 0x6f9 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x87}, + {value: 0x0040, lo: 0x88, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0xad}, + {value: 0x0040, lo: 0xae, hi: 0xbf}, + // Block 0xf4, offset 0x6fe + {value: 0x0000, lo: 0x09}, + {value: 0x0040, lo: 0x80, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0x9f}, + {value: 0x0018, lo: 0xa0, hi: 0xa7}, + {value: 0x0040, lo: 0xa8, hi: 0xaf}, + {value: 0x0018, lo: 0xb0, hi: 0xb0}, + {value: 0x0040, lo: 0xb1, hi: 0xb2}, + {value: 0x0018, lo: 0xb3, hi: 0xbe}, + {value: 0x0040, lo: 0xbf, hi: 0xbf}, + // Block 0xf5, offset 0x708 + {value: 0x0000, lo: 0x04}, + {value: 0x0018, lo: 0x80, hi: 0x8b}, + {value: 0x0040, lo: 0x8c, hi: 0x8f}, + {value: 0x0018, lo: 0x90, hi: 0x9e}, + {value: 0x0040, lo: 0x9f, hi: 0xbf}, + // Block 0xf6, offset 0x70d + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x91}, + {value: 0x0040, lo: 0x92, hi: 0xbf}, + // Block 0xf7, offset 0x710 + {value: 0x0000, lo: 0x02}, + {value: 0x0018, lo: 0x80, hi: 0x80}, + {value: 0x0040, lo: 0x81, hi: 0xbf}, + // Block 0xf8, offset 0x713 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0x96}, + {value: 0x0040, lo: 0x97, hi: 0xbf}, + // Block 0xf9, offset 0x716 + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xb4}, + {value: 0x0040, lo: 0xb5, hi: 0xbf}, + // Block 0xfa, offset 0x719 + {value: 0x0000, lo: 0x03}, + {value: 0x0008, lo: 0x80, hi: 0x9d}, + {value: 0x0040, lo: 0x9e, hi: 0x9f}, + {value: 0x0008, lo: 0xa0, hi: 0xbf}, + // Block 0xfb, offset 0x71d + {value: 0x0000, lo: 0x02}, + {value: 0x0008, lo: 0x80, hi: 0xa1}, + {value: 0x0040, lo: 0xa2, hi: 0xbf}, + // Block 0xfc, offset 0x720 + {value: 0x0020, lo: 0x0f}, + {value: 0xdeb9, lo: 0x80, hi: 0x89}, + {value: 0x8dfd, lo: 0x8a, hi: 0x8a}, + {value: 0xdff9, lo: 0x8b, hi: 0x9c}, + {value: 0x8e1d, lo: 0x9d, hi: 0x9d}, + {value: 0xe239, lo: 0x9e, hi: 0xa2}, + {value: 0x8e3d, lo: 0xa3, hi: 0xa3}, + {value: 0xe2d9, lo: 0xa4, hi: 0xab}, + {value: 0x7ed5, lo: 0xac, hi: 0xac}, + {value: 0xe3d9, lo: 0xad, hi: 0xaf}, + {value: 0x8e5d, lo: 0xb0, hi: 0xb0}, + {value: 0xe439, lo: 0xb1, hi: 0xb6}, + {value: 0x8e7d, lo: 0xb7, hi: 0xb9}, + {value: 0xe4f9, lo: 0xba, hi: 0xba}, + {value: 0x8edd, lo: 0xbb, hi: 0xbb}, + {value: 0xe519, lo: 0xbc, hi: 0xbf}, + // Block 0xfd, offset 0x730 + {value: 0x0020, lo: 0x10}, + {value: 0x937d, lo: 0x80, hi: 0x80}, + {value: 0xf099, lo: 0x81, hi: 0x86}, + {value: 0x939d, lo: 0x87, hi: 0x8a}, + {value: 0xd9f9, lo: 0x8b, hi: 0x8b}, + {value: 0xf159, lo: 0x8c, hi: 0x96}, + {value: 0x941d, lo: 0x97, hi: 0x97}, + {value: 0xf2b9, lo: 0x98, hi: 0xa3}, + {value: 0x943d, lo: 0xa4, hi: 0xa6}, + {value: 0xf439, lo: 0xa7, hi: 0xaa}, + {value: 0x949d, lo: 0xab, hi: 0xab}, + {value: 0xf4b9, lo: 0xac, hi: 0xac}, + {value: 0x94bd, lo: 0xad, hi: 0xad}, + {value: 0xf4d9, lo: 0xae, hi: 0xaf}, + {value: 0x94dd, lo: 0xb0, hi: 0xb1}, + {value: 0xf519, lo: 0xb2, hi: 0xbe}, + {value: 0x2040, lo: 0xbf, hi: 0xbf}, + // Block 0xfe, offset 0x741 + {value: 0x0000, lo: 0x04}, + {value: 0x0040, lo: 0x80, hi: 0x80}, + {value: 0x0340, lo: 0x81, hi: 0x81}, + {value: 0x0040, lo: 0x82, hi: 0x9f}, + {value: 0x0340, lo: 0xa0, hi: 0xbf}, + // Block 0xff, offset 0x746 + {value: 0x0000, lo: 0x01}, + {value: 0x0340, lo: 0x80, hi: 0xbf}, + // Block 0x100, offset 0x748 + {value: 0x0000, lo: 0x01}, + {value: 0x33c0, lo: 0x80, hi: 0xbf}, + // Block 0x101, offset 0x74a + {value: 0x0000, lo: 0x02}, + {value: 0x33c0, lo: 0x80, hi: 0xaf}, + {value: 0x0040, lo: 0xb0, hi: 0xbf}, +} + +// Total table size 41662 bytes (40KiB); checksum: 355A58A4 diff --git a/vendor/golang.org/x/net/idna/trie.go b/vendor/golang.org/x/net/idna/trie.go new file mode 100644 index 00000000..c4ef847e --- /dev/null +++ b/vendor/golang.org/x/net/idna/trie.go @@ -0,0 +1,72 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package idna + +// appendMapping appends the mapping for the respective rune. isMapped must be +// true. A mapping is a categorization of a rune as defined in UTS #46. +func (c info) appendMapping(b []byte, s string) []byte { + index := int(c >> indexShift) + if c&xorBit == 0 { + s := mappings[index:] + return append(b, s[1:s[0]+1]...) + } + b = append(b, s...) + if c&inlineXOR == inlineXOR { + // TODO: support and handle two-byte inline masks + b[len(b)-1] ^= byte(index) + } else { + for p := len(b) - int(xorData[index]); p < len(b); p++ { + index++ + b[p] ^= xorData[index] + } + } + return b +} + +// Sparse block handling code. + +type valueRange struct { + value uint16 // header: value:stride + lo, hi byte // header: lo:n +} + +type sparseBlocks struct { + values []valueRange + offset []uint16 +} + +var idnaSparse = sparseBlocks{ + values: idnaSparseValues[:], + offset: idnaSparseOffset[:], +} + +// Don't use newIdnaTrie to avoid unconditional linking in of the table. +var trie = &idnaTrie{} + +// lookup determines the type of block n and looks up the value for b. +// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block +// is a list of ranges with an accompanying value. Given a matching range r, +// the value for b is by r.value + (b - r.lo) * stride. +func (t *sparseBlocks) lookup(n uint32, b byte) uint16 { + offset := t.offset[n] + header := t.values[offset] + lo := offset + 1 + hi := lo + uint16(header.lo) + for lo < hi { + m := lo + (hi-lo)/2 + r := t.values[m] + if r.lo <= b && b <= r.hi { + return r.value + uint16(b-r.lo)*header.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} diff --git a/vendor/golang.org/x/net/idna/trieval.go b/vendor/golang.org/x/net/idna/trieval.go new file mode 100644 index 00000000..7a8cf889 --- /dev/null +++ b/vendor/golang.org/x/net/idna/trieval.go @@ -0,0 +1,119 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package idna + +// This file contains definitions for interpreting the trie value of the idna +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds information from the IDNA mapping table for a single rune. It is +// the value returned by a trie lookup. In most cases, all information fits in +// a 16-bit value. For mappings, this value may contain an index into a slice +// with the mapped string. Such mappings can consist of the actual mapped value +// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the +// input rune. This technique is used by the cases packages and reduces the +// table size significantly. +// +// The per-rune values have the following format: +// +// if mapped { +// if inlinedXOR { +// 15..13 inline XOR marker +// 12..11 unused +// 10..3 inline XOR mask +// } else { +// 15..3 index into xor or mapping table +// } +// } else { +// 15..14 unused +// 13 mayNeedNorm +// 12..11 attributes +// 10..8 joining type +// 7..3 category type +// } +// 2 use xor pattern +// 1..0 mapped category +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + catSmallMask = 0x3 + catBigMask = 0xF8 + indexShift = 3 + xorBit = 0x4 // interpret the index as an xor pattern + inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined. + + joinShift = 8 + joinMask = 0x07 + + // Attributes + attributesMask = 0x1800 + viramaModifier = 0x1800 + modifier = 0x1000 + rtl = 0x0800 + + mayNeedNorm = 0x2000 +) + +// A category corresponds to a category defined in the IDNA mapping table. +type category uint16 + +const ( + unknown category = 0 // not currently defined in unicode. + mapped category = 1 + disallowedSTD3Mapped category = 2 + deviation category = 3 +) + +const ( + valid category = 0x08 + validNV8 category = 0x18 + validXV8 category = 0x28 + disallowed category = 0x40 + disallowedSTD3Valid category = 0x80 + ignored category = 0xC0 +) + +// join types and additional rune information +const ( + joiningL = (iota + 1) + joiningD + joiningT + joiningR + + //the following types are derived during processing + joinZWJ + joinZWNJ + joinVirama + numJoinTypes +) + +func (c info) isMapped() bool { + return c&0x3 != 0 +} + +func (c info) category() category { + small := c & catSmallMask + if small != 0 { + return category(small) + } + return category(c & catBigMask) +} + +func (c info) joinType() info { + if c.isMapped() { + return 0 + } + return (c >> joinShift) & joinMask +} + +func (c info) isModifier() bool { + return c&(modifier|catSmallMask) == modifier +} + +func (c info) isViramaModifier() bool { + return c&(attributesMask|catSmallMask) == viramaModifier +} diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000..685f0e7e --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 00000000..c646a695 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` +<html> + <head> + <title>events + + + + +

/debug/events

+ +

+ {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000..9bf4286c --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000..3ebf6f2d --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1130 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "context" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "net/url" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// HTTP ServeMux paths. +const ( + debugRequestsPath = "/debug/requests" + debugEventsPath = "/debug/events" +) + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + _, pat := http.DefaultServeMux.Handler(&http.Request{URL: &url.URL{Path: debugRequestsPath}}) + if pat == debugRequestsPath { + panic("/debug/requests is already registered. You may have two independent copies of " + + "golang.org/x/net/trace in your binary, trying to maintain separate state. This may " + + "involve a vendored copy of golang.org/x/net/trace.") + } + + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc(debugRequestsPath, Traces) + http.HandleFunc(debugEventsPath, Events) +} + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 00000000..dfbed62c --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 00000000..0f443e69 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,35 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google) + +## Policy for new packages + +We no longer accept new provider-specific packages in this repo. For +defining provider endpoints and provider-specific OAuth2 behavior, we +encourage you to create packages elsewhere. We'll keep the existing +packages for compatibility. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod new file mode 100644 index 00000000..b3457815 --- /dev/null +++ b/vendor/golang.org/x/oauth2/go.mod @@ -0,0 +1,10 @@ +module golang.org/x/oauth2 + +go 1.11 + +require ( + cloud.google.com/go v0.34.0 + golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + google.golang.org/appengine v1.4.0 +) diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum new file mode 100644 index 00000000..6f0079b0 --- /dev/null +++ b/vendor/golang.org/x/oauth2/go.sum @@ -0,0 +1,12 @@ +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go new file mode 100644 index 00000000..feb1157b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "time" + + "golang.org/x/oauth2" +) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error) + +// Set at init time by appengine_gen1.go. If nil, we're not on App Engine standard first generation (<= Go 1.9) or App Engine flexible. +var appengineAppIDFunc func(c context.Context) string + +// AppEngineTokenSource returns a token source that fetches tokens from either +// the current application's service account or from the metadata server, +// depending on the App Engine environment. See below for environment-specific +// details. If you are implementing a 3-legged OAuth 2.0 flow on App Engine that +// involves user accounts, see oauth2.Config instead. +// +// First generation App Engine runtimes (<= Go 1.9): +// AppEngineTokenSource returns a token source that fetches tokens issued to the +// current App Engine application's service account. The provided context must have +// come from appengine.NewContext. +// +// Second generation App Engine runtimes (>= Go 1.11) and App Engine flexible: +// AppEngineTokenSource is DEPRECATED on second generation runtimes and on the +// flexible environment. It delegates to ComputeTokenSource, and the provided +// context and scopes are not used. Please use DefaultTokenSource (or ComputeTokenSource, +// which DefaultTokenSource will use in this case) instead. +func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + return appEngineTokenSource(ctx, scope...) +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go new file mode 100644 index 00000000..83dacac3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -0,0 +1,77 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +// This file applies to App Engine first generation runtimes (<= Go 1.9). + +package google + +import ( + "context" + "sort" + "strings" + "sync" + + "golang.org/x/oauth2" + "google.golang.org/appengine" +) + +func init() { + appengineTokenFunc = appengine.AccessToken + appengineAppIDFunc = appengine.AppID +} + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + scopes := append([]string{}, scope...) + sort.Strings(scopes) + return &gaeTokenSource{ + ctx: ctx, + scopes: scopes, + key: strings.Join(scopes, " "), + } +} + +// aeTokens helps the fetched tokens to be reused until their expiration. +var ( + aeTokensMu sync.Mutex + aeTokens = make(map[string]*tokenLock) // key is space-separated scopes +) + +type tokenLock struct { + mu sync.Mutex // guards t; held while fetching or updating t + t *oauth2.Token +} + +type gaeTokenSource struct { + ctx context.Context + scopes []string + key string // to aeTokens map; space-separated scopes +} + +func (ts *gaeTokenSource) Token() (*oauth2.Token, error) { + aeTokensMu.Lock() + tok, ok := aeTokens[ts.key] + if !ok { + tok = &tokenLock{} + aeTokens[ts.key] = tok + } + aeTokensMu.Unlock() + + tok.mu.Lock() + defer tok.mu.Unlock() + if tok.t.Valid() { + return tok.t, nil + } + access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...) + if err != nil { + return nil, err + } + tok.t = &oauth2.Token{ + AccessToken: access, + Expiry: exp, + } + return tok.t, nil +} diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go new file mode 100644 index 00000000..04c2c221 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +// This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. + +package google + +import ( + "context" + "log" + "sync" + + "golang.org/x/oauth2" +) + +var logOnce sync.Once // only spam about deprecation once + +// See comment on AppEngineTokenSource in appengine.go. +func appEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource { + logOnce.Do(func() { + log.Print("google: AppEngineTokenSource is deprecated on App Engine standard second generation runtimes (>= Go 1.11) and App Engine flexible. Please use DefaultTokenSource or ComputeTokenSource.") + }) + return ComputeTokenSource("") +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go new file mode 100644 index 00000000..ad2c0923 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -0,0 +1,154 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// DefaultClient returns an HTTP Client that uses the +// DefaultTokenSource to obtain authentication credentials. +func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { + ts, err := DefaultTokenSource(ctx, scope...) + if err != nil { + return nil, err + } + return oauth2.NewClient(ctx, ts), nil +} + +// DefaultTokenSource returns the token source for +// "Application Default Credentials". +// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource. +func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) { + creds, err := FindDefaultCredentials(ctx, scope...) + if err != nil { + return nil, err + } + return creds.TokenSource, nil +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine standard first generation runtimes (<= Go 1.9) it uses +// the appengine.AccessToken function. +// 4. On Google Compute Engine, Google App Engine standard second generation runtimes +// (>= Go 1.11), and Google App Engine flexible environment, it fetches +// credentials from the metadata server. +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + // First, try the environment variable. + const envVar = "GOOGLE_APPLICATION_CREDENTIALS" + if filename := os.Getenv(envVar); filename != "" { + creds, err := readCredentialsFile(ctx, filename, scopes) + if err != nil { + return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) + } + return creds, nil + } + + // Second, try a well-known file. + filename := wellKnownFile() + if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { + return creds, nil + } else if !os.IsNotExist(err) { + return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) + } + + // Third, if we're on a Google App Engine standard first generation runtime (<= Go 1.9) + // use those credentials. App Engine standard second generation runtimes (>= Go 1.11) + // and App Engine flexible use ComputeTokenSource and the metadata server. + if appengineTokenFunc != nil { + return &DefaultCredentials{ + ProjectID: appengineAppIDFunc(ctx), + TokenSource: AppEngineTokenSource(ctx, scopes...), + }, nil + } + + // Fourth, if we're on Google Compute Engine, an App Engine standard second generation runtime, + // or App Engine flexible, use the metadata server. + if metadata.OnGCE() { + id, _ := metadata.ProjectID() + return &DefaultCredentials{ + ProjectID: id, + TokenSource: ComputeTokenSource("", scopes...), + }, nil + } + + // None are found; return helpful error. + const url = "https://developers.google.com/accounts/docs/application-default-credentials" + return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + }, nil +} + +func wellKnownFile() string { + const f = "application_default_credentials.json" + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud", f) + } + return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f) +} + +func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return CredentialsFromJSON(ctx, b, scopes...) +} diff --git a/vendor/golang.org/x/oauth2/google/doc.go b/vendor/golang.org/x/oauth2/google/doc.go new file mode 100644 index 00000000..73be6290 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc.go @@ -0,0 +1,40 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go new file mode 100644 index 00000000..81de32b3 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -0,0 +1,209 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" + "golang.org/x/oauth2" + "golang.org/x/oauth2/jwt" +) + +// Endpoint is Google's OAuth 2.0 endpoint. +var Endpoint = oauth2.Endpoint{ + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + AuthStyle: oauth2.AuthStyleInParams, +} + +// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow. +const JWTTokenURL = "https://oauth2.googleapis.com/token" + +// ConfigFromJSON uses a Google Developers Console client_credentials.json +// file to construct a config. +// client_credentials.json can be downloaded from +// https://console.developers.google.com, under "Credentials". Download the Web +// application credentials in the JSON format and provide the contents of the +// file as jsonKey. +func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) { + type cred struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + RedirectURIs []string `json:"redirect_uris"` + AuthURI string `json:"auth_uri"` + TokenURI string `json:"token_uri"` + } + var j struct { + Web *cred `json:"web"` + Installed *cred `json:"installed"` + } + if err := json.Unmarshal(jsonKey, &j); err != nil { + return nil, err + } + var c *cred + switch { + case j.Web != nil: + c = j.Web + case j.Installed != nil: + c = j.Installed + default: + return nil, fmt.Errorf("oauth2/google: no credentials found") + } + if len(c.RedirectURIs) < 1 { + return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json") + } + return &oauth2.Config{ + ClientID: c.ClientID, + ClientSecret: c.ClientSecret, + RedirectURL: c.RedirectURIs[0], + Scopes: scope, + Endpoint: oauth2.Endpoint{ + AuthURL: c.AuthURI, + TokenURL: c.TokenURI, + }, + }, nil +} + +// JWTConfigFromJSON uses a Google Developers service account JSON key file to read +// the credentials that authorize and authenticate the requests. +// Create a service account on "Credentials" for your project at +// https://console.developers.google.com to download a JSON key file. +func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { + var f credentialsFile + if err := json.Unmarshal(jsonKey, &f); err != nil { + return nil, err + } + if f.Type != serviceAccountKey { + return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey) + } + scope = append([]string(nil), scope...) // copy + return f.jwtConfig(scope), nil +} + +// JSON key file types. +const ( + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" +) + +// credentialsFile is the unmarshalled representation of a credentials file. +type credentialsFile struct { + Type string `json:"type"` // serviceAccountKey or userCredentialsKey + + // Service Account fields + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + + // User Credential fields + // (These typically come from gcloud auth.) + ClientSecret string `json:"client_secret"` + ClientID string `json:"client_id"` + RefreshToken string `json:"refresh_token"` +} + +func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config { + cfg := &jwt.Config{ + Email: f.ClientEmail, + PrivateKey: []byte(f.PrivateKey), + PrivateKeyID: f.PrivateKeyID, + Scopes: scopes, + TokenURL: f.TokenURL, + } + if cfg.TokenURL == "" { + cfg.TokenURL = JWTTokenURL + } + return cfg +} + +func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) { + switch f.Type { + case serviceAccountKey: + cfg := f.jwtConfig(scopes) + return cfg.TokenSource(ctx), nil + case userCredentialsKey: + cfg := &oauth2.Config{ + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + Scopes: scopes, + Endpoint: Endpoint, + } + tok := &oauth2.Token{RefreshToken: f.RefreshToken} + return cfg.TokenSource(ctx, tok), nil + case "": + return nil, errors.New("missing 'type' field in credentials") + default: + return nil, fmt.Errorf("unknown credential type: %q", f.Type) + } +} + +// ComputeTokenSource returns a token source that fetches access tokens +// from Google Compute Engine (GCE)'s metadata server. It's only valid to use +// this token source if your program is running on a GCE instance. +// If no account is specified, "default" is used. +// If no scopes are specified, a set of default scopes are automatically granted. +// Further information about retrieving access tokens from the GCE metadata +// server can be found at https://cloud.google.com/compute/docs/authentication. +func ComputeTokenSource(account string, scope ...string) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, computeSource{account: account, scopes: scope}) +} + +type computeSource struct { + account string + scopes []string +} + +func (cs computeSource) Token() (*oauth2.Token, error) { + if !metadata.OnGCE() { + return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE") + } + acct := cs.account + if acct == "" { + acct = "default" + } + tokenURI := "instance/service-accounts/" + acct + "/token" + if len(cs.scopes) > 0 { + v := url.Values{} + v.Set("scopes", strings.Join(cs.scopes, ",")) + tokenURI = tokenURI + "?" + v.Encode() + } + tokenJSON, err := metadata.Get(tokenURI) + if err != nil { + return nil, err + } + var res struct { + AccessToken string `json:"access_token"` + ExpiresInSec int `json:"expires_in"` + TokenType string `json:"token_type"` + } + err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res) + if err != nil { + return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err) + } + if res.ExpiresInSec == 0 || res.AccessToken == "" { + return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata") + } + tok := &oauth2.Token{ + AccessToken: res.AccessToken, + TokenType: res.TokenType, + Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second), + } + // NOTE(cbro): add hidden metadata about where the token is from. + // This is needed for detection by client libraries to know that credentials come from the metadata server. + // This may be removed in a future version of this library. + return tok.WithExtra(map[string]interface{}{ + "oauth2.google.tokenSource": "compute-metadata", + "oauth2.google.serviceAccount": acct, + }), nil +} diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go new file mode 100644 index 00000000..b0fdb3a8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/jwt.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "crypto/rsa" + "fmt" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON +// key file to read the credentials that authorize and authenticate the +// requests, and returns a TokenSource that does not use any OAuth2 flow but +// instead creates a JWT and sends that as the access token. +// The audience is typically a URL that specifies the scope of the credentials. +// +// Note that this is not a standard OAuth flow, but rather an +// optimization supported by a few Google services. +// Unless you know otherwise, you should use JWTConfigFromJSON instead. +func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) { + cfg, err := JWTConfigFromJSON(jsonKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse JSON key: %v", err) + } + pk, err := internal.ParseKey(cfg.PrivateKey) + if err != nil { + return nil, fmt.Errorf("google: could not parse key: %v", err) + } + ts := &jwtAccessTokenSource{ + email: cfg.Email, + audience: audience, + pk: pk, + pkID: cfg.PrivateKeyID, + } + tok, err := ts.Token() + if err != nil { + return nil, err + } + return oauth2.ReuseTokenSource(tok, ts), nil +} + +type jwtAccessTokenSource struct { + email, audience string + pk *rsa.PrivateKey + pkID string +} + +func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) { + iat := time.Now() + exp := iat.Add(time.Hour) + cs := &jws.ClaimSet{ + Iss: ts.email, + Sub: ts.email, + Aud: ts.audience, + Iat: iat.Unix(), + Exp: exp.Unix(), + } + hdr := &jws.Header{ + Algorithm: "RS256", + Typ: "JWT", + KeyID: string(ts.pkID), + } + msg, err := jws.Encode(hdr, cs, ts.pk) + if err != nil { + return nil, fmt.Errorf("google: could not encode JWT: %v", err) + } + return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil +} diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go new file mode 100644 index 00000000..456224bc --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/sdk.go @@ -0,0 +1,201 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package google + +import ( + "bufio" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" +) + +type sdkCredentials struct { + Data []struct { + Credential struct { + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + TokenExpiry *time.Time `json:"token_expiry"` + } `json:"credential"` + Key struct { + Account string `json:"account"` + Scope string `json:"scope"` + } `json:"key"` + } +} + +// An SDKConfig provides access to tokens from an account already +// authorized via the Google Cloud SDK. +type SDKConfig struct { + conf oauth2.Config + initialToken *oauth2.Token +} + +// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK +// account. If account is empty, the account currently active in +// Google Cloud SDK properties is used. +// Google Cloud SDK credentials must be created by running `gcloud auth` +// before using this function. +// The Google Cloud SDK is available at https://cloud.google.com/sdk/. +func NewSDKConfig(account string) (*SDKConfig, error) { + configPath, err := sdkConfigPath() + if err != nil { + return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err) + } + credentialsPath := filepath.Join(configPath, "credentials") + f, err := os.Open(credentialsPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err) + } + defer f.Close() + + var c sdkCredentials + if err := json.NewDecoder(f).Decode(&c); err != nil { + return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err) + } + if len(c.Data) == 0 { + return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath) + } + if account == "" { + propertiesPath := filepath.Join(configPath, "properties") + f, err := os.Open(propertiesPath) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err) + } + defer f.Close() + ini, err := parseINI(f) + if err != nil { + return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err) + } + core, ok := ini["core"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini) + } + active, ok := core["account"] + if !ok { + return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core) + } + account = active + } + + for _, d := range c.Data { + if account == "" || d.Key.Account == account { + if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" { + return nil, fmt.Errorf("oauth2/google: no token available for account %q", account) + } + var expiry time.Time + if d.Credential.TokenExpiry != nil { + expiry = *d.Credential.TokenExpiry + } + return &SDKConfig{ + conf: oauth2.Config{ + ClientID: d.Credential.ClientID, + ClientSecret: d.Credential.ClientSecret, + Scopes: strings.Split(d.Key.Scope, " "), + Endpoint: Endpoint, + RedirectURL: "oob", + }, + initialToken: &oauth2.Token{ + AccessToken: d.Credential.AccessToken, + RefreshToken: d.Credential.RefreshToken, + Expiry: expiry, + }, + }, nil + } + } + return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account) +} + +// Client returns an HTTP client using Google Cloud SDK credentials to +// authorize requests. The token will auto-refresh as necessary. The +// underlying http.RoundTripper will be obtained using the provided +// context. The returned client and its Transport should not be +// modified. +func (c *SDKConfig) Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &oauth2.Transport{ + Source: c.TokenSource(ctx), + }, + } +} + +// TokenSource returns an oauth2.TokenSource that retrieve tokens from +// Google Cloud SDK credentials using the provided context. +// It will returns the current access token stored in the credentials, +// and refresh it when it expires, but it won't update the credentials +// with the new access token. +func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource { + return c.conf.TokenSource(ctx, c.initialToken) +} + +// Scopes are the OAuth 2.0 scopes the current account is authorized for. +func (c *SDKConfig) Scopes() []string { + return c.conf.Scopes +} + +func parseINI(ini io.Reader) (map[string]map[string]string, error) { + result := map[string]map[string]string{ + "": {}, // root section + } + scanner := bufio.NewScanner(ini) + currentSection := "" + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if strings.HasPrefix(line, ";") { + // comment. + continue + } + if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") { + currentSection = strings.TrimSpace(line[1 : len(line)-1]) + result[currentSection] = map[string]string{} + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) == 2 && parts[0] != "" { + result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1]) + } + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning ini: %v", err) + } + return result, nil +} + +// sdkConfigPath tries to guess where the gcloud config is located. +// It can be overridden during tests. +var sdkConfigPath = func() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gcloud"), nil +} + +func guessUnixHomeDir() string { + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if v := os.Getenv("HOME"); v != "" { + return v + } + // Else, fall back to user.Current: + if u, err := user.Current(); err == nil { + return u.HomeDir + } + return "" +} diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go new file mode 100644 index 00000000..74348718 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import "google.golang.org/appengine/urlfetch" + +func init() { + appengineClientHook = urlfetch.Client +} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 00000000..03265e88 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 00000000..c0ab196c --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 00000000..355c3869 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,294 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context/ctxhttp" +) + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + if len(b) == 0 || string(b) == "null" { + return nil + } + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + if i > math.MaxInt32 { + i = math.MaxInt32 + } + *e = expirationTime(i) + return nil +} + +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. +type AuthStyle int + +const ( + AuthStyleUnknown AuthStyle = 0 + AuthStyleInParams AuthStyle = 1 + AuthStyleInHeader AuthStyle = 2 +) + +// authStyleCache is the set of tokenURLs we've successfully used via +// RetrieveToken and which style auth we ended up using. +// It's called a cache, but it doesn't (yet?) shrink. It's expected that +// the set of OAuth2 servers a program contacts over time is fixed and +// small. +var authStyleCache struct { + sync.Mutex + m map[string]AuthStyle // keyed by tokenURL +} + +// ResetAuthCache resets the global authentication style cache used +// for AuthStyleUnknown token requests. +func ResetAuthCache() { + authStyleCache.Lock() + defer authStyleCache.Unlock() + authStyleCache.m = nil +} + +// lookupAuthStyle reports which auth style we last used with tokenURL +// when calling RetrieveToken and whether we have ever done so. +func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + authStyleCache.Lock() + defer authStyleCache.Unlock() + style, ok = authStyleCache.m[tokenURL] + return +} + +// setAuthStyle adds an entry to authStyleCache, documented above. +func setAuthStyle(tokenURL string, v AuthStyle) { + authStyleCache.Lock() + defer authStyleCache.Unlock() + if authStyleCache.m == nil { + authStyleCache.m = make(map[string]AuthStyle) + } + authStyleCache.m[tokenURL] = v +} + +// newTokenRequest returns a new *http.Request to retrieve a new token +// from tokenURL using the provided clientID, clientSecret, and POST +// body parameters. +// +// inParams is whether the clientID & clientSecret should be encoded +// as the POST body. An 'inParams' value of true means to send it in +// the POST body (along with any values in v); false means to send it +// in the Authorization header. +func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) { + if authStyle == AuthStyleInParams { + v = cloneURLValues(v) + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if authStyle == AuthStyleInHeader { + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) + } + return req, nil +} + +func cloneURLValues(v url.Values) url.Values { + v2 := make(url.Values, len(v)) + for k, vv := range v { + v2[k] = append([]string(nil), vv...) + } + return v2 +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { + needsAuthStyleProbe := authStyle == 0 + if needsAuthStyleProbe { + if style, ok := lookupAuthStyle(tokenURL); ok { + authStyle = style + needsAuthStyleProbe = false + } else { + authStyle = AuthStyleInHeader // the first way we'll try + } + } + req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + if err != nil { + return nil, err + } + token, err := doTokenRoundTrip(ctx, req) + if err != nil && needsAuthStyleProbe { + // If we get an error, assume the server wants the + // clientID & clientSecret in a different form. + // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. + // In summary: + // - Reddit only accepts client secret in the Authorization header + // - Dropbox accepts either it in URL param or Auth header, but not both. + // - Google only accepts URL param (not spec compliant?), not Auth header + // - Stripe only accepts client secret in Auth header with Bearer method, not Basic + // + // We used to maintain a big table in this code of all the sites and which way + // they went, but maintaining it didn't scale & got annoying. + // So just try both ways. + authStyle = AuthStyleInParams // the second way we'll try + req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + token, err = doTokenRoundTrip(ctx, req) + } + if needsAuthStyleProbe && err == nil { + setAuthStyle(tokenURL, authStyle) + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token != nil && token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, err +} + +func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + if err != nil { + return nil, err + } + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + r.Body.Close() + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + if token.AccessToken == "" { + return nil, errors.New("oauth2: server response missing access_token") + } + return token, nil +} + +type RetrieveError struct { + Response *http.Response + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 00000000..572074a6 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "net/http" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +var appengineClientHook func(context.Context) *http.Client + +func ContextClient(ctx context.Context) *http.Client { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc + } + } + if appengineClientHook != nil { + return appengineClientHook(ctx) + } + return http.DefaultClient +} diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go new file mode 100644 index 00000000..683d2d27 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jws/jws.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jws provides a partial implementation +// of JSON Web Signature encoding and decoding. +// It exists to support the golang.org/x/oauth2 package. +// +// See RFC 7515. +// +// Deprecated: this package is not intended for public use and might be +// removed in the future. It exists for internal use only. +// Please switch to another JWS package or copy this package into your own +// source tree. +package jws // import "golang.org/x/oauth2/jws" + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// ClaimSet contains information about the JWT signature including the +// permissions being requested (scopes), the target of the token, the issuer, +// the time the token was issued, and the lifetime of the token. +type ClaimSet struct { + Iss string `json:"iss"` // email address of the client_id of the application making the access token request + Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests + Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional). + Exp int64 `json:"exp"` // the expiration time of the assertion (seconds since Unix epoch) + Iat int64 `json:"iat"` // the time the assertion was issued (seconds since Unix epoch) + Typ string `json:"typ,omitempty"` // token type (Optional). + + // Email for which the application is requesting delegated access (Optional). + Sub string `json:"sub,omitempty"` + + // The old name of Sub. Client keeps setting Prn to be + // complaint with legacy OAuth 2.0 providers. (Optional) + Prn string `json:"prn,omitempty"` + + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + // This array is marshalled using custom code (see (c *ClaimSet) encode()). + PrivateClaims map[string]interface{} `json:"-"` +} + +func (c *ClaimSet) encode() (string, error) { + // Reverting time back for machines whose time is not perfectly in sync. + // If client machine's time is in the future according + // to Google servers, an access token will not be issued. + now := time.Now().Add(-10 * time.Second) + if c.Iat == 0 { + c.Iat = now.Unix() + } + if c.Exp == 0 { + c.Exp = now.Add(time.Hour).Unix() + } + if c.Exp < c.Iat { + return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat) + } + + b, err := json.Marshal(c) + if err != nil { + return "", err + } + + if len(c.PrivateClaims) == 0 { + return base64.RawURLEncoding.EncodeToString(b), nil + } + + // Marshal private claim set and then append it to b. + prv, err := json.Marshal(c.PrivateClaims) + if err != nil { + return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims) + } + + // Concatenate public and private claim JSON objects. + if !bytes.HasSuffix(b, []byte{'}'}) { + return "", fmt.Errorf("jws: invalid JSON %s", b) + } + if !bytes.HasPrefix(prv, []byte{'{'}) { + return "", fmt.Errorf("jws: invalid JSON %s", prv) + } + b[len(b)-1] = ',' // Replace closing curly brace with a comma. + b = append(b, prv[1:]...) // Append private claims. + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Header represents the header for the signed JWS payloads. +type Header struct { + // The algorithm used for signature. + Algorithm string `json:"alg"` + + // Represents the token type. + Typ string `json:"typ"` + + // The optional hint of which key is being used. + KeyID string `json:"kid,omitempty"` +} + +func (h *Header) encode() (string, error) { + b, err := json.Marshal(h) + if err != nil { + return "", err + } + return base64.RawURLEncoding.EncodeToString(b), nil +} + +// Decode decodes a claim set from a JWS payload. +func Decode(payload string) (*ClaimSet, error) { + // decode returned id token to get expiry + s := strings.Split(payload, ".") + if len(s) < 2 { + // TODO(jbd): Provide more context about the error. + return nil, errors.New("jws: invalid token received") + } + decoded, err := base64.RawURLEncoding.DecodeString(s[1]) + if err != nil { + return nil, err + } + c := &ClaimSet{} + err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c) + return c, err +} + +// Signer returns a signature for the given data. +type Signer func(data []byte) (sig []byte, err error) + +// EncodeWithSigner encodes a header and claim set with the provided signer. +func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) { + head, err := header.encode() + if err != nil { + return "", err + } + cs, err := c.encode() + if err != nil { + return "", err + } + ss := fmt.Sprintf("%s.%s", head, cs) + sig, err := sg([]byte(ss)) + if err != nil { + return "", err + } + return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil +} + +// Encode encodes a signed JWS with provided header and claim set. +// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key. +func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { + sg := func(data []byte) (sig []byte, err error) { + h := sha256.New() + h.Write(data) + return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil)) + } + return EncodeWithSigner(header, c, sg) +} + +// Verify tests whether the provided JWT token's signature was produced by the private key +// associated with the supplied public key. +func Verify(token string, key *rsa.PublicKey) error { + parts := strings.Split(token, ".") + if len(parts) != 3 { + return errors.New("jws: invalid token received, token must have 3 parts") + } + + signedContent := parts[0] + "." + parts[1] + signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) + if err != nil { + return err + } + + h := sha256.New() + h.Write([]byte(signedContent)) + return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString)) +} diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go new file mode 100644 index 00000000..b2bf1829 --- /dev/null +++ b/vendor/golang.org/x/oauth2/jwt/jwt.go @@ -0,0 +1,185 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly +// known as "two-legged OAuth 2.0". +// +// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12 +package jwt + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/internal" + "golang.org/x/oauth2/jws" +) + +var ( + defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" + defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"} +) + +// Config is the configuration for using JWT to fetch tokens, +// commonly known as "two-legged OAuth 2.0". +type Config struct { + // Email is the OAuth client identifier used when communicating with + // the configured OAuth provider. + Email string + + // PrivateKey contains the contents of an RSA private key or the + // contents of a PEM file that contains a private key. The provided + // private key is used to sign JWT payloads. + // PEM containers with a passphrase are not supported. + // Use the following command to convert a PKCS 12 file into a PEM. + // + // $ openssl pkcs12 -in key.p12 -out key.pem -nodes + // + PrivateKey []byte + + // PrivateKeyID contains an optional hint indicating which key is being + // used. + PrivateKeyID string + + // Subject is the optional user to impersonate. + Subject string + + // Scopes optionally specifies a list of requested permission scopes. + Scopes []string + + // TokenURL is the endpoint required to complete the 2-legged JWT flow. + TokenURL string + + // Expires optionally specifies how long the token is valid for. + Expires time.Duration + + // Audience optionally specifies the intended audience of the + // request. If empty, the value of TokenURL is used as the + // intended audience. + Audience string + + // PrivateClaims optionally specifies custom private claims in the JWT. + // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3 + PrivateClaims map[string]interface{} + + // UseIDToken optionally specifies whether ID token should be used instead + // of access token when the server returns both. + UseIDToken bool +} + +// TokenSource returns a JWT TokenSource using the configuration +// in c and the HTTP client from the provided context. +func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource { + return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c}) +} + +// Client returns an HTTP client wrapping the context's +// HTTP transport and adding Authorization headers with tokens +// obtained from c. +// +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context) *http.Client { + return oauth2.NewClient(ctx, c.TokenSource(ctx)) +} + +// jwtSource is a source that always does a signed JWT request for a token. +// It should typically be wrapped with a reuseTokenSource. +type jwtSource struct { + ctx context.Context + conf *Config +} + +func (js jwtSource) Token() (*oauth2.Token, error) { + pk, err := internal.ParseKey(js.conf.PrivateKey) + if err != nil { + return nil, err + } + hc := oauth2.NewClient(js.ctx, nil) + claimSet := &jws.ClaimSet{ + Iss: js.conf.Email, + Scope: strings.Join(js.conf.Scopes, " "), + Aud: js.conf.TokenURL, + PrivateClaims: js.conf.PrivateClaims, + } + if subject := js.conf.Subject; subject != "" { + claimSet.Sub = subject + // prn is the old name of sub. Keep setting it + // to be compatible with legacy OAuth 2.0 providers. + claimSet.Prn = subject + } + if t := js.conf.Expires; t > 0 { + claimSet.Exp = time.Now().Add(t).Unix() + } + if aud := js.conf.Audience; aud != "" { + claimSet.Aud = aud + } + h := *defaultHeader + h.KeyID = js.conf.PrivateKeyID + payload, err := jws.Encode(&h, claimSet, pk) + if err != nil { + return nil, err + } + v := url.Values{} + v.Set("grant_type", defaultGrantType) + v.Set("assertion", payload) + resp, err := hc.PostForm(js.conf.TokenURL, v) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if c := resp.StatusCode; c < 200 || c > 299 { + return nil, &oauth2.RetrieveError{ + Response: resp, + Body: body, + } + } + // tokenRes is the JSON response body. + var tokenRes struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + IDToken string `json:"id_token"` + ExpiresIn int64 `json:"expires_in"` // relative seconds from now + } + if err := json.Unmarshal(body, &tokenRes); err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + token := &oauth2.Token{ + AccessToken: tokenRes.AccessToken, + TokenType: tokenRes.TokenType, + } + raw := make(map[string]interface{}) + json.Unmarshal(body, &raw) // no error checks for optional fields + token = token.WithExtra(raw) + + if secs := tokenRes.ExpiresIn; secs > 0 { + token.Expiry = time.Now().Add(time.Duration(secs) * time.Second) + } + if v := tokenRes.IDToken; v != "" { + // decode returned id token to get expiry + claimSet, err := jws.Decode(v) + if err != nil { + return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err) + } + token.Expiry = time.Unix(claimSet.Exp, 0) + } + if js.conf.UseIDToken { + if tokenRes.IDToken == "" { + return nil, fmt.Errorf("oauth2: response doesn't have JWT token") + } + token.AccessToken = tokenRes.IDToken + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 00000000..291df5c8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,381 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests, +// as specified in RFC 6749. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "context" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint represents an OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string + + // AuthStyle optionally specifies how the endpoint wants the + // client ID & client secret sent. The zero value means to + // auto-detect. + AuthStyle AuthStyle +} + +// AuthStyle represents how requests for tokens are authenticated +// to the server. +type AuthStyle int + +const ( + // AuthStyleAutoDetect means to auto-detect which authentication + // style the provider wants by trying both ways and caching + // the successful way for the future. + AuthStyleAutoDetect AuthStyle = 0 + + // AuthStyleInParams sends the "client_id" and "client_secret" + // in the POST body as application/x-www-form-urlencoded parameters. + AuthStyleInParams AuthStyle = 1 + + // AuthStyleInHeader sends the client_id and client_password + // using HTTP Basic Authorization. This is an optional style + // described in the OAuth2 RFC 6749 section 2.3.1. + AuthStyleInHeader AuthStyle = 2 +) + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-empty string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +// It can also be used to pass the PKCE challenge. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + if state != "" { + // TODO(light): Docs say never to omit state; don't allow empty. + v.Set("state", state) + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + v := url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + return retrieveToken(ctx, c, v) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +// +// Opts may include the PKCE verifier code if previously used in AuthCodeURL. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveToken(ctx, c, v) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + return internal.ContextClient(ctx) + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextClient(ctx).Transport, + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 00000000..82272034 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,178 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// timeNow is time.Now but pulled out as a variable for tests. +var timeNow = time.Now + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*RetrieveError)(rErr) + } + return nil, err + } + return tokenFromInternal(tk), nil +} + +// RetrieveError is the error returned when the token endpoint returns a +// non-2XX HTTP status code. +type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 00000000..aa0d34f1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "io" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + + // req.Body is assumed to have been closed by the base RoundTripper. + reqBodyClosed = true + + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *Transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/semaphore/semaphore.go b/vendor/golang.org/x/sync/semaphore/semaphore.go new file mode 100644 index 00000000..7f096fef --- /dev/null +++ b/vendor/golang.org/x/sync/semaphore/semaphore.go @@ -0,0 +1,127 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semaphore provides a weighted semaphore implementation. +package semaphore // import "golang.org/x/sync/semaphore" + +import ( + "container/list" + "context" + "sync" +) + +type waiter struct { + n int64 + ready chan<- struct{} // Closed when semaphore acquired. +} + +// NewWeighted creates a new weighted semaphore with the given +// maximum combined weight for concurrent access. +func NewWeighted(n int64) *Weighted { + w := &Weighted{size: n} + return w +} + +// Weighted provides a way to bound concurrent access to a resource. +// The callers can request access with a given weight. +type Weighted struct { + size int64 + cur int64 + mu sync.Mutex + waiters list.List +} + +// Acquire acquires the semaphore with a weight of n, blocking until resources +// are available or ctx is done. On success, returns nil. On failure, returns +// ctx.Err() and leaves the semaphore unchanged. +// +// If ctx is already done, Acquire may still succeed without blocking. +func (s *Weighted) Acquire(ctx context.Context, n int64) error { + s.mu.Lock() + if s.size-s.cur >= n && s.waiters.Len() == 0 { + s.cur += n + s.mu.Unlock() + return nil + } + + if n > s.size { + // Don't make other Acquire calls block on one that's doomed to fail. + s.mu.Unlock() + <-ctx.Done() + return ctx.Err() + } + + ready := make(chan struct{}) + w := waiter{n: n, ready: ready} + elem := s.waiters.PushBack(w) + s.mu.Unlock() + + select { + case <-ctx.Done(): + err := ctx.Err() + s.mu.Lock() + select { + case <-ready: + // Acquired the semaphore after we were canceled. Rather than trying to + // fix up the queue, just pretend we didn't notice the cancelation. + err = nil + default: + s.waiters.Remove(elem) + } + s.mu.Unlock() + return err + + case <-ready: + return nil + } +} + +// TryAcquire acquires the semaphore with a weight of n without blocking. +// On success, returns true. On failure, returns false and leaves the semaphore unchanged. +func (s *Weighted) TryAcquire(n int64) bool { + s.mu.Lock() + success := s.size-s.cur >= n && s.waiters.Len() == 0 + if success { + s.cur += n + } + s.mu.Unlock() + return success +} + +// Release releases the semaphore with a weight of n. +func (s *Weighted) Release(n int64) { + s.mu.Lock() + s.cur -= n + if s.cur < 0 { + s.mu.Unlock() + panic("semaphore: released more than held") + } + for { + next := s.waiters.Front() + if next == nil { + break // No more waiters blocked. + } + + w := next.Value.(waiter) + if s.size-s.cur < w.n { + // Not enough tokens for the next waiter. We could keep going (to try to + // find a waiter with a smaller request), but under load that could cause + // starvation for large requests; instead, we leave all remaining waiters + // blocked. + // + // Consider a semaphore used as a read-write lock, with N tokens, N + // readers, and one writer. Each reader can Acquire(1) to obtain a read + // lock. The writer can Acquire(N) to obtain a write lock, excluding all + // of the readers. If we allow the readers to jump ahead in the queue, + // the writer will starve — there is always one token available for every + // reader. + break + } + + s.cur += w.n + s.waiters.Remove(next) + close(w.ready) + } + s.mu.Unlock() +} diff --git a/vendor/golang.org/x/sys/LICENSE b/vendor/golang.org/x/sys/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/sys/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/sys/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s new file mode 100644 index 00000000..06f84b85 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// +// System calls for ppc64, AIX are implemented in runtime/syscall_aix.go +// + +TEXT ·syscall6(SB),NOSPLIT,$0-88 + JMP syscall·syscall6(SB) + +TEXT ·rawSyscall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/cpu/byteorder.go b/vendor/golang.org/x/sys/cpu/byteorder.go new file mode 100644 index 00000000..da6b9e43 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/byteorder.go @@ -0,0 +1,30 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "encoding/binary" + "runtime" +) + +// hostByteOrder returns binary.LittleEndian on little-endian machines and +// binary.BigEndian on big-endian machines. +func hostByteOrder() binary.ByteOrder { + switch runtime.GOARCH { + case "386", "amd64", "amd64p32", + "arm", "arm64", + "mipsle", "mips64le", "mips64p32le", + "ppc64le", + "riscv", "riscv64": + return binary.LittleEndian + case "armbe", "arm64be", + "mips", "mips64", "mips64p32", + "ppc", "ppc64", + "s390", "s390x", + "sparc", "sparc64": + return binary.BigEndian + } + panic("unknown architecture") +} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go new file mode 100644 index 00000000..679e78c2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -0,0 +1,126 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cpu implements processor feature detection for +// various CPU architectures. +package cpu + +// Initialized reports whether the CPU features were initialized. +// +// For some GOOS/GOARCH combinations initialization of the CPU features depends +// on reading an operating specific file, e.g. /proc/self/auxv on linux/arm +// Initialized will report false if reading the file fails. +var Initialized bool + +// CacheLinePad is used to pad structs to avoid false sharing. +type CacheLinePad struct{ _ [cacheLineSize]byte } + +// X86 contains the supported CPU features of the +// current X86/AMD64 platform. If the current platform +// is not X86/AMD64 then all feature flags are false. +// +// X86 is padded to avoid false sharing. Further the HasAVX +// and HasAVX2 are only set if the OS supports XMM and YMM +// registers in addition to the CPUID feature bit being set. +var X86 struct { + _ CacheLinePad + HasAES bool // AES hardware implementation (AES NI) + HasADX bool // Multi-precision add-carry instruction extensions + HasAVX bool // Advanced vector extension + HasAVX2 bool // Advanced vector extension 2 + HasBMI1 bool // Bit manipulation instruction set 1 + HasBMI2 bool // Bit manipulation instruction set 2 + HasERMS bool // Enhanced REP for MOVSB and STOSB + HasFMA bool // Fused-multiply-add instructions + HasOSXSAVE bool // OS supports XSAVE/XRESTOR for saving/restoring XMM registers. + HasPCLMULQDQ bool // PCLMULQDQ instruction - most often used for AES-GCM + HasPOPCNT bool // Hamming weight instruction POPCNT. + HasRDRAND bool // RDRAND instruction (on-chip random number generator) + HasRDSEED bool // RDSEED instruction (on-chip random number generator) + HasSSE2 bool // Streaming SIMD extension 2 (always available on amd64) + HasSSE3 bool // Streaming SIMD extension 3 + HasSSSE3 bool // Supplemental streaming SIMD extension 3 + HasSSE41 bool // Streaming SIMD extension 4 and 4.1 + HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + _ CacheLinePad +} + +// ARM64 contains the supported CPU features of the +// current ARMv8(aarch64) platform. If the current platform +// is not arm64 then all feature flags are false. +var ARM64 struct { + _ CacheLinePad + HasFP bool // Floating-point instruction set (always available) + HasASIMD bool // Advanced SIMD (always available) + HasEVTSTRM bool // Event stream support + HasAES bool // AES hardware implementation + HasPMULL bool // Polynomial multiplication instruction set + HasSHA1 bool // SHA1 hardware implementation + HasSHA2 bool // SHA2 hardware implementation + HasCRC32 bool // CRC32 hardware implementation + HasATOMICS bool // Atomic memory operation instruction set + HasFPHP bool // Half precision floating-point instruction set + HasASIMDHP bool // Advanced SIMD half precision instruction set + HasCPUID bool // CPUID identification scheme registers + HasASIMDRDM bool // Rounding double multiply add/subtract instruction set + HasJSCVT bool // Javascript conversion from floating-point to integer + HasFCMA bool // Floating-point multiplication and addition of complex numbers + HasLRCPC bool // Release Consistent processor consistent support + HasDCPOP bool // Persistent memory support + HasSHA3 bool // SHA3 hardware implementation + HasSM3 bool // SM3 hardware implementation + HasSM4 bool // SM4 hardware implementation + HasASIMDDP bool // Advanced SIMD double precision instruction set + HasSHA512 bool // SHA512 hardware implementation + HasSVE bool // Scalable Vector Extensions + HasASIMDFHM bool // Advanced SIMD multiplication FP16 to FP32 + _ CacheLinePad +} + +// PPC64 contains the supported CPU features of the current ppc64/ppc64le platforms. +// If the current platform is not ppc64/ppc64le then all feature flags are false. +// +// For ppc64/ppc64le, it is safe to check only for ISA level starting on ISA v3.00, +// since there are no optional categories. There are some exceptions that also +// require kernel support to work (DARN, SCV), so there are feature bits for +// those as well. The minimum processor requirement is POWER8 (ISA 2.07). +// The struct is padded to avoid false sharing. +var PPC64 struct { + _ CacheLinePad + HasDARN bool // Hardware random number generator (requires kernel enablement) + HasSCV bool // Syscall vectored (requires kernel enablement) + IsPOWER8 bool // ISA v2.07 (POWER8) + IsPOWER9 bool // ISA v3.00 (POWER9) + _ CacheLinePad +} + +// S390X contains the supported CPU features of the current IBM Z +// (s390x) platform. If the current platform is not IBM Z then all +// feature flags are false. +// +// S390X is padded to avoid false sharing. Further HasVX is only set +// if the OS supports vector registers in addition to the STFLE +// feature bit being set. +var S390X struct { + _ CacheLinePad + HasZARCH bool // z/Architecture mode is active [mandatory] + HasSTFLE bool // store facility list extended + HasLDISP bool // long (20-bit) displacements + HasEIMM bool // 32-bit immediates + HasDFP bool // decimal floating point + HasETF3EH bool // ETF-3 enhanced + HasMSA bool // message security assist (CPACF) + HasAES bool // KM-AES{128,192,256} functions + HasAESCBC bool // KMC-AES{128,192,256} functions + HasAESCTR bool // KMCTR-AES{128,192,256} functions + HasAESGCM bool // KMA-GCM-AES{128,192,256} functions + HasGHASH bool // KIMD-GHASH function + HasSHA1 bool // K{I,L}MD-SHA-1 functions + HasSHA256 bool // K{I,L}MD-SHA-256 functions + HasSHA512 bool // K{I,L}MD-SHA-512 functions + HasSHA3 bool // K{I,L}MD-SHA3-{224,256,384,512} and K{I,L}MD-SHAKE-{128,256} functions + HasVX bool // vector facility + HasVXE bool // vector-enhancements facility 1 + _ CacheLinePad +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go new file mode 100644 index 00000000..be602722 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_aix_ppc64.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix,ppc64 + +package cpu + +const cacheLineSize = 128 + +const ( + // getsystemcfg constants + _SC_IMPL = 2 + _IMPL_POWER8 = 0x10000 + _IMPL_POWER9 = 0x20000 +) + +func init() { + impl := getsystemcfg(_SC_IMPL) + if impl&_IMPL_POWER8 != 0 { + PPC64.IsPOWER8 = true + } + if impl&_IMPL_POWER9 != 0 { + PPC64.IsPOWER9 = true + } + + Initialized = true +} + +func getsystemcfg(label int) (n uint64) { + r0, _ := callgetsystemcfg(label) + n = uint64(r0) + return +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_arm.go b/vendor/golang.org/x/sys/cpu/cpu_arm.go new file mode 100644 index 00000000..7f2348b7 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_arm.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go new file mode 100644 index 00000000..568bcd03 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return true } + +// The following feature detection functions are defined in cpu_s390x.s. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList +func kmQuery() queryResult +func kmcQuery() queryResult +func kmctrQuery() queryResult +func kmaQuery() queryResult +func kimdQuery() queryResult +func klmdQuery() queryResult diff --git a/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go new file mode 100644 index 00000000..f7cb4697 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gc_x86.go @@ -0,0 +1,16 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build !gccgo + +package cpu + +// cpuid is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) + +// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler +// and in cpu_gccgo.c for gccgo. +func xgetbv() (eax, edx uint32) diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.c b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c new file mode 100644 index 00000000..e363c7d1 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.c @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +#include +#include + +// Need to wrap __get_cpuid_count because it's declared as static. +int +gccgoGetCpuidCount(uint32_t leaf, uint32_t subleaf, + uint32_t *eax, uint32_t *ebx, + uint32_t *ecx, uint32_t *edx) +{ + return __get_cpuid_count(leaf, subleaf, eax, ebx, ecx, edx); +} + +// xgetbv reads the contents of an XCR (Extended Control Register) +// specified in the ECX register into registers EDX:EAX. +// Currently, the only supported value for XCR is 0. +// +// TODO: Replace with a better alternative: +// +// #include +// +// #pragma GCC target("xsave") +// +// void gccgoXgetbv(uint32_t *eax, uint32_t *edx) { +// unsigned long long x = _xgetbv(0); +// *eax = x & 0xffffffff; +// *edx = (x >> 32) & 0xffffffff; +// } +// +// Note that _xgetbv is defined starting with GCC 8. +void +gccgoXgetbv(uint32_t *eax, uint32_t *edx) +{ + __asm(" xorl %%ecx, %%ecx\n" + " xgetbv" + : "=a"(*eax), "=d"(*edx)); +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go new file mode 100644 index 00000000..ba49b91b --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 +// +build gccgo + +package cpu + +//extern gccgoGetCpuidCount +func gccgoGetCpuidCount(eaxArg, ecxArg uint32, eax, ebx, ecx, edx *uint32) + +func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32) { + var a, b, c, d uint32 + gccgoGetCpuidCount(eaxArg, ecxArg, &a, &b, &c, &d) + return a, b, c, d +} + +//extern gccgoXgetbv +func gccgoXgetbv(eax, edx *uint32) + +func xgetbv() (eax, edx uint32) { + var a, d uint32 + gccgoXgetbv(&a, &d) + return a, d +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go new file mode 100644 index 00000000..aa986f77 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build gccgo + +package cpu + +// haveAsmFunctions reports whether the other functions in this file can +// be safely called. +func haveAsmFunctions() bool { return false } + +// TODO(mundaym): the following feature detection functions are currently +// stubs. See https://golang.org/cl/162887 for how to fix this. +// They are likely to be expensive to call so the results should be cached. +func stfle() facilityList { panic("not implemented for gccgo") } +func kmQuery() queryResult { panic("not implemented for gccgo") } +func kmcQuery() queryResult { panic("not implemented for gccgo") } +func kmctrQuery() queryResult { panic("not implemented for gccgo") } +func kmaQuery() queryResult { panic("not implemented for gccgo") } +func kimdQuery() queryResult { panic("not implemented for gccgo") } +func klmdQuery() queryResult { panic("not implemented for gccgo") } diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux.go b/vendor/golang.org/x/sys/cpu/cpu_linux.go new file mode 100644 index 00000000..76b5f507 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !amd64,!amd64p32,!386 + +package cpu + +import ( + "io/ioutil" +) + +const ( + _AT_HWCAP = 16 + _AT_HWCAP2 = 26 + + procAuxv = "/proc/self/auxv" + + uintSize = int(32 << (^uint(0) >> 63)) +) + +// For those platforms don't have a 'cpuid' equivalent we use HWCAP/HWCAP2 +// These are initialized in cpu_$GOARCH.go +// and should not be changed after they are initialized. +var hwCap uint +var hwCap2 uint + +func init() { + buf, err := ioutil.ReadFile(procAuxv) + if err != nil { + // e.g. on android /proc/self/auxv is not accessible, so silently + // ignore the error and leave Initialized = false + return + } + + bo := hostByteOrder() + for len(buf) >= 2*(uintSize/8) { + var tag, val uint + switch uintSize { + case 32: + tag = uint(bo.Uint32(buf[0:])) + val = uint(bo.Uint32(buf[4:])) + buf = buf[8:] + case 64: + tag = uint(bo.Uint64(buf[0:])) + val = uint(bo.Uint64(buf[8:])) + buf = buf[16:] + } + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + doinit() + + Initialized = true +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go new file mode 100644 index 00000000..fa7fb1bd --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 64 + +// HWCAP/HWCAP2 bits. These are exposed by Linux. +const ( + hwcap_FP = 1 << 0 + hwcap_ASIMD = 1 << 1 + hwcap_EVTSTRM = 1 << 2 + hwcap_AES = 1 << 3 + hwcap_PMULL = 1 << 4 + hwcap_SHA1 = 1 << 5 + hwcap_SHA2 = 1 << 6 + hwcap_CRC32 = 1 << 7 + hwcap_ATOMICS = 1 << 8 + hwcap_FPHP = 1 << 9 + hwcap_ASIMDHP = 1 << 10 + hwcap_CPUID = 1 << 11 + hwcap_ASIMDRDM = 1 << 12 + hwcap_JSCVT = 1 << 13 + hwcap_FCMA = 1 << 14 + hwcap_LRCPC = 1 << 15 + hwcap_DCPOP = 1 << 16 + hwcap_SHA3 = 1 << 17 + hwcap_SM3 = 1 << 18 + hwcap_SM4 = 1 << 19 + hwcap_ASIMDDP = 1 << 20 + hwcap_SHA512 = 1 << 21 + hwcap_SVE = 1 << 22 + hwcap_ASIMDFHM = 1 << 23 +) + +func doinit() { + // HWCAP feature bits + ARM64.HasFP = isSet(hwCap, hwcap_FP) + ARM64.HasASIMD = isSet(hwCap, hwcap_ASIMD) + ARM64.HasEVTSTRM = isSet(hwCap, hwcap_EVTSTRM) + ARM64.HasAES = isSet(hwCap, hwcap_AES) + ARM64.HasPMULL = isSet(hwCap, hwcap_PMULL) + ARM64.HasSHA1 = isSet(hwCap, hwcap_SHA1) + ARM64.HasSHA2 = isSet(hwCap, hwcap_SHA2) + ARM64.HasCRC32 = isSet(hwCap, hwcap_CRC32) + ARM64.HasATOMICS = isSet(hwCap, hwcap_ATOMICS) + ARM64.HasFPHP = isSet(hwCap, hwcap_FPHP) + ARM64.HasASIMDHP = isSet(hwCap, hwcap_ASIMDHP) + ARM64.HasCPUID = isSet(hwCap, hwcap_CPUID) + ARM64.HasASIMDRDM = isSet(hwCap, hwcap_ASIMDRDM) + ARM64.HasJSCVT = isSet(hwCap, hwcap_JSCVT) + ARM64.HasFCMA = isSet(hwCap, hwcap_FCMA) + ARM64.HasLRCPC = isSet(hwCap, hwcap_LRCPC) + ARM64.HasDCPOP = isSet(hwCap, hwcap_DCPOP) + ARM64.HasSHA3 = isSet(hwCap, hwcap_SHA3) + ARM64.HasSM3 = isSet(hwCap, hwcap_SM3) + ARM64.HasSM4 = isSet(hwCap, hwcap_SM4) + ARM64.HasASIMDDP = isSet(hwCap, hwcap_ASIMDDP) + ARM64.HasSHA512 = isSet(hwCap, hwcap_SHA512) + ARM64.HasSVE = isSet(hwCap, hwcap_SVE) + ARM64.HasASIMDFHM = isSet(hwCap, hwcap_ASIMDFHM) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go new file mode 100644 index 00000000..6c8d975d --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go @@ -0,0 +1,33 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build ppc64 ppc64le + +package cpu + +const cacheLineSize = 128 + +// HWCAP/HWCAP2 bits. These are exposed by the kernel. +const ( + // ISA Level + _PPC_FEATURE2_ARCH_2_07 = 0x80000000 + _PPC_FEATURE2_ARCH_3_00 = 0x00800000 + + // CPU features + _PPC_FEATURE2_DARN = 0x00200000 + _PPC_FEATURE2_SCV = 0x00100000 +) + +func doinit() { + // HWCAP2 feature bits + PPC64.IsPOWER8 = isSet(hwCap2, _PPC_FEATURE2_ARCH_2_07) + PPC64.IsPOWER9 = isSet(hwCap2, _PPC_FEATURE2_ARCH_3_00) + PPC64.HasDARN = isSet(hwCap2, _PPC_FEATURE2_DARN) + PPC64.HasSCV = isSet(hwCap2, _PPC_FEATURE2_SCV) +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go new file mode 100644 index 00000000..d579eaef --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go @@ -0,0 +1,161 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +const cacheLineSize = 256 + +const ( + // bit mask values from /usr/include/bits/hwcap.h + hwcap_ZARCH = 2 + hwcap_STFLE = 4 + hwcap_MSA = 8 + hwcap_LDISP = 16 + hwcap_EIMM = 32 + hwcap_DFP = 64 + hwcap_ETF3EH = 256 + hwcap_VX = 2048 + hwcap_VXE = 8192 +) + +// bitIsSet reports whether the bit at index is set. The bit index +// is in big endian order, so bit index 0 is the leftmost bit. +func bitIsSet(bits []uint64, index uint) bool { + return bits[index/64]&((1<<63)>>(index%64)) != 0 +} + +// function is the code for the named cryptographic function. +type function uint8 + +const ( + // KM{,A,C,CTR} function codes + aes128 function = 18 // AES-128 + aes192 function = 19 // AES-192 + aes256 function = 20 // AES-256 + + // K{I,L}MD function codes + sha1 function = 1 // SHA-1 + sha256 function = 2 // SHA-256 + sha512 function = 3 // SHA-512 + sha3_224 function = 32 // SHA3-224 + sha3_256 function = 33 // SHA3-256 + sha3_384 function = 34 // SHA3-384 + sha3_512 function = 35 // SHA3-512 + shake128 function = 36 // SHAKE-128 + shake256 function = 37 // SHAKE-256 + + // KLMD function codes + ghash function = 65 // GHASH +) + +// queryResult contains the result of a Query function +// call. Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type queryResult struct { + bits [2]uint64 +} + +// Has reports whether the given functions are present. +func (q *queryResult) Has(fns ...function) bool { + if len(fns) == 0 { + panic("no function codes provided") + } + for _, f := range fns { + if !bitIsSet(q.bits[:], uint(f)) { + return false + } + } + return true +} + +// facility is a bit index for the named facility. +type facility uint8 + +const ( + // cryptography facilities + msa4 facility = 77 // message-security-assist extension 4 + msa8 facility = 146 // message-security-assist extension 8 +) + +// facilityList contains the result of an STFLE call. +// Bits are numbered in big endian order so the +// leftmost bit (the MSB) is at index 0. +type facilityList struct { + bits [4]uint64 +} + +// Has reports whether the given facilities are present. +func (s *facilityList) Has(fs ...facility) bool { + if len(fs) == 0 { + panic("no facility bits provided") + } + for _, f := range fs { + if !bitIsSet(s.bits[:], uint(f)) { + return false + } + } + return true +} + +func doinit() { + // test HWCAP bit vector + has := func(featureMask uint) bool { + return hwCap&featureMask == featureMask + } + + // mandatory + S390X.HasZARCH = has(hwcap_ZARCH) + + // optional + S390X.HasSTFLE = has(hwcap_STFLE) + S390X.HasLDISP = has(hwcap_LDISP) + S390X.HasEIMM = has(hwcap_EIMM) + S390X.HasETF3EH = has(hwcap_ETF3EH) + S390X.HasDFP = has(hwcap_DFP) + S390X.HasMSA = has(hwcap_MSA) + S390X.HasVX = has(hwcap_VX) + if S390X.HasVX { + S390X.HasVXE = has(hwcap_VXE) + } + + // We need implementations of stfle, km and so on + // to detect cryptographic features. + if !haveAsmFunctions() { + return + } + + // optional cryptographic functions + if S390X.HasMSA { + aes := []function{aes128, aes192, aes256} + + // cipher message + km, kmc := kmQuery(), kmcQuery() + S390X.HasAES = km.Has(aes...) + S390X.HasAESCBC = kmc.Has(aes...) + if S390X.HasSTFLE { + facilities := stfle() + if facilities.Has(msa4) { + kmctr := kmctrQuery() + S390X.HasAESCTR = kmctr.Has(aes...) + } + if facilities.Has(msa8) { + kma := kmaQuery() + S390X.HasAESGCM = kma.Has(aes...) + } + } + + // compute message digest + kimd := kimdQuery() // intermediate (no padding) + klmd := klmdQuery() // last (padding) + S390X.HasSHA1 = kimd.Has(sha1) && klmd.Has(sha1) + S390X.HasSHA256 = kimd.Has(sha256) && klmd.Has(sha256) + S390X.HasSHA512 = kimd.Has(sha512) && klmd.Has(sha512) + S390X.HasGHASH = kimd.Has(ghash) // KLMD-GHASH does not exist + sha3 := []function{ + sha3_224, sha3_256, sha3_384, sha3_512, + shake128, shake256, + } + S390X.HasSHA3 = kimd.Has(sha3...) && klmd.Has(sha3...) + } +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mips64x.go b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go new file mode 100644 index 00000000..f55e0c82 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mips64x.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips64 mips64le + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_mipsx.go b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go new file mode 100644 index 00000000..cda87b1a --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_mipsx.go @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build mips mipsle + +package cpu + +const cacheLineSize = 32 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go new file mode 100644 index 00000000..dd1e76dc --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_other_arm64.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,arm64 + +package cpu + +const cacheLineSize = 64 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_s390x.s b/vendor/golang.org/x/sys/cpu/cpu_s390x.s new file mode 100644 index 00000000..e5037d92 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_s390x.s @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +// func stfle() facilityList +TEXT ·stfle(SB), NOSPLIT|NOFRAME, $0-32 + MOVD $ret+0(FP), R1 + MOVD $3, R0 // last doubleword index to store + XC $32, (R1), (R1) // clear 4 doublewords (32 bytes) + WORD $0xb2b01000 // store facility list extended (STFLE) + RET + +// func kmQuery() queryResult +TEXT ·kmQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KM-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92E0024 // cipher message (KM) + RET + +// func kmcQuery() queryResult +TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMC-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92F0024 // cipher message with chaining (KMC) + RET + +// func kmctrQuery() queryResult +TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMCTR-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB92D4024 // cipher message with counter (KMCTR) + RET + +// func kmaQuery() queryResult +TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KMA-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xb9296024 // cipher message with authentication (KMA) + RET + +// func kimdQuery() queryResult +TEXT ·kimdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KIMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93E0024 // compute intermediate message digest (KIMD) + RET + +// func klmdQuery() queryResult +TEXT ·klmdQuery(SB), NOSPLIT|NOFRAME, $0-16 + MOVD $0, R0 // set function code to 0 (KLMD-Query) + MOVD $ret+0(FP), R1 // address of 16-byte return value + WORD $0xB93F0024 // compute last message digest (KLMD) + RET diff --git a/vendor/golang.org/x/sys/cpu/cpu_wasm.go b/vendor/golang.org/x/sys/cpu/cpu_wasm.go new file mode 100644 index 00000000..bd9bbda0 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_wasm.go @@ -0,0 +1,15 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build wasm + +package cpu + +// We're compiling the cpu package for an unknown (software-abstracted) CPU. +// Make CacheLinePad an empty struct and hope that the usual struct alignment +// rules are good enough. + +const cacheLineSize = 0 + +func doinit() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go new file mode 100644 index 00000000..d70d317f --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build 386 amd64 amd64p32 + +package cpu + +const cacheLineSize = 64 + +func init() { + Initialized = true + + maxID, _, _, _ := cpuid(0, 0) + + if maxID < 1 { + return + } + + _, _, ecx1, edx1 := cpuid(1, 0) + X86.HasSSE2 = isSet(26, edx1) + + X86.HasSSE3 = isSet(0, ecx1) + X86.HasPCLMULQDQ = isSet(1, ecx1) + X86.HasSSSE3 = isSet(9, ecx1) + X86.HasFMA = isSet(12, ecx1) + X86.HasSSE41 = isSet(19, ecx1) + X86.HasSSE42 = isSet(20, ecx1) + X86.HasPOPCNT = isSet(23, ecx1) + X86.HasAES = isSet(25, ecx1) + X86.HasOSXSAVE = isSet(27, ecx1) + X86.HasRDRAND = isSet(30, ecx1) + + osSupportsAVX := false + // For XGETBV, OSXSAVE bit is required and sufficient. + if X86.HasOSXSAVE { + eax, _ := xgetbv() + // Check if XMM and YMM registers have OS support. + osSupportsAVX = isSet(1, eax) && isSet(2, eax) + } + + X86.HasAVX = isSet(28, ecx1) && osSupportsAVX + + if maxID < 7 { + return + } + + _, ebx7, _, _ := cpuid(7, 0) + X86.HasBMI1 = isSet(3, ebx7) + X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX + X86.HasBMI2 = isSet(8, ebx7) + X86.HasERMS = isSet(9, ebx7) + X86.HasRDSEED = isSet(18, ebx7) + X86.HasADX = isSet(19, ebx7) +} + +func isSet(bitpos uint, value uint32) bool { + return value&(1<